summaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kbuild27
-rw-r--r--arch/mips/Kbuild.platforms41
-rw-r--r--arch/mips/Kconfig3344
-rw-r--r--arch/mips/Kconfig.debug161
-rw-r--r--arch/mips/Makefile567
-rw-r--r--arch/mips/Makefile.postlink44
-rw-r--r--arch/mips/alchemy/Kconfig36
-rw-r--r--arch/mips/alchemy/Makefile4
-rw-r--r--arch/mips/alchemy/Platform35
-rw-r--r--arch/mips/alchemy/board-gpr.c292
-rw-r--r--arch/mips/alchemy/board-mtx1.c285
-rw-r--r--arch/mips/alchemy/board-xxs1500.c125
-rw-r--r--arch/mips/alchemy/common/Makefile10
-rw-r--r--arch/mips/alchemy/common/clock.c1117
-rw-r--r--arch/mips/alchemy/common/dbdma.c1089
-rw-r--r--arch/mips/alchemy/common/dma.c265
-rw-r--r--arch/mips/alchemy/common/gpiolib.c174
-rw-r--r--arch/mips/alchemy/common/irq.c996
-rw-r--r--arch/mips/alchemy/common/platform.c458
-rw-r--r--arch/mips/alchemy/common/power.c132
-rw-r--r--arch/mips/alchemy/common/prom.c149
-rw-r--r--arch/mips/alchemy/common/setup.c103
-rw-r--r--arch/mips/alchemy/common/sleeper.S266
-rw-r--r--arch/mips/alchemy/common/time.c155
-rw-r--r--arch/mips/alchemy/common/usb.c627
-rw-r--r--arch/mips/alchemy/common/vss.c85
-rw-r--r--arch/mips/alchemy/devboards/Makefile7
-rw-r--r--arch/mips/alchemy/devboards/bcsr.c147
-rw-r--r--arch/mips/alchemy/devboards/db1000.c562
-rw-r--r--arch/mips/alchemy/devboards/db1200.c943
-rw-r--r--arch/mips/alchemy/devboards/db1300.c888
-rw-r--r--arch/mips/alchemy/devboards/db1550.c628
-rw-r--r--arch/mips/alchemy/devboards/db1xxx.c124
-rw-r--r--arch/mips/alchemy/devboards/platform.c248
-rw-r--r--arch/mips/alchemy/devboards/platform.h22
-rw-r--r--arch/mips/alchemy/devboards/pm.c254
-rw-r--r--arch/mips/ar7/Makefile11
-rw-r--r--arch/mips/ar7/Platform5
-rw-r--r--arch/mips/ar7/clock.c494
-rw-r--r--arch/mips/ar7/gpio.c331
-rw-r--r--arch/mips/ar7/irq.c165
-rw-r--r--arch/mips/ar7/memory.c56
-rw-r--r--arch/mips/ar7/platform.c722
-rw-r--r--arch/mips/ar7/prom.c256
-rw-r--r--arch/mips/ar7/setup.c93
-rw-r--r--arch/mips/ar7/time.c31
-rw-r--r--arch/mips/ath25/Kconfig17
-rw-r--r--arch/mips/ath25/Makefile16
-rw-r--r--arch/mips/ath25/Platform5
-rw-r--r--arch/mips/ath25/ar2315.c362
-rw-r--r--arch/mips/ath25/ar2315.h23
-rw-r--r--arch/mips/ath25/ar2315_regs.h410
-rw-r--r--arch/mips/ath25/ar5312.c391
-rw-r--r--arch/mips/ath25/ar5312.h23
-rw-r--r--arch/mips/ath25/ar5312_regs.h224
-rw-r--r--arch/mips/ath25/board.c236
-rw-r--r--arch/mips/ath25/devices.c128
-rw-r--r--arch/mips/ath25/devices.h44
-rw-r--r--arch/mips/ath25/early_printk.c45
-rw-r--r--arch/mips/ath25/prom.c26
-rw-r--r--arch/mips/ath79/Kconfig48
-rw-r--r--arch/mips/ath79/Makefile11
-rw-r--r--arch/mips/ath79/Platform6
-rw-r--r--arch/mips/ath79/clock.c673
-rw-r--r--arch/mips/ath79/common.c149
-rw-r--r--arch/mips/ath79/common.h21
-rw-r--r--arch/mips/ath79/early_printk.c146
-rw-r--r--arch/mips/ath79/prom.c39
-rw-r--r--arch/mips/ath79/setup.c279
-rw-r--r--arch/mips/bcm47xx/Kconfig39
-rw-r--r--arch/mips/bcm47xx/Makefile8
-rw-r--r--arch/mips/bcm47xx/Platform7
-rw-r--r--arch/mips/bcm47xx/bcm47xx_private.h26
-rw-r--r--arch/mips/bcm47xx/board.c362
-rw-r--r--arch/mips/bcm47xx/buttons.c718
-rw-r--r--arch/mips/bcm47xx/irq.c98
-rw-r--r--arch/mips/bcm47xx/leds.c793
-rw-r--r--arch/mips/bcm47xx/prom.c182
-rw-r--r--arch/mips/bcm47xx/serial.c93
-rw-r--r--arch/mips/bcm47xx/setup.c280
-rw-r--r--arch/mips/bcm47xx/time.c81
-rw-r--r--arch/mips/bcm47xx/workarounds.c35
-rw-r--r--arch/mips/bcm63xx/Kconfig45
-rw-r--r--arch/mips/bcm63xx/Makefile8
-rw-r--r--arch/mips/bcm63xx/Platform6
-rw-r--r--arch/mips/bcm63xx/boards/Kconfig11
-rw-r--r--arch/mips/bcm63xx/boards/Makefile2
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c911
-rw-r--r--arch/mips/bcm63xx/clk.c579
-rw-r--r--arch/mips/bcm63xx/cpu.c385
-rw-r--r--arch/mips/bcm63xx/cs.c145
-rw-r--r--arch/mips/bcm63xx/dev-enet.c327
-rw-r--r--arch/mips/bcm63xx/dev-flash.c131
-rw-r--r--arch/mips/bcm63xx/dev-hsspi.c47
-rw-r--r--arch/mips/bcm63xx/dev-pcmcia.c144
-rw-r--r--arch/mips/bcm63xx/dev-rng.c40
-rw-r--r--arch/mips/bcm63xx/dev-spi.c60
-rw-r--r--arch/mips/bcm63xx/dev-uart.c76
-rw-r--r--arch/mips/bcm63xx/dev-usb-usbd.c65
-rw-r--r--arch/mips/bcm63xx/dev-wdt.c37
-rw-r--r--arch/mips/bcm63xx/early_printk.c30
-rw-r--r--arch/mips/bcm63xx/gpio.c151
-rw-r--r--arch/mips/bcm63xx/irq.c553
-rw-r--r--arch/mips/bcm63xx/nvram.c98
-rw-r--r--arch/mips/bcm63xx/prom.c100
-rw-r--r--arch/mips/bcm63xx/reset.c219
-rw-r--r--arch/mips/bcm63xx/setup.c170
-rw-r--r--arch/mips/bcm63xx/timer.c206
-rw-r--r--arch/mips/bmips/Kconfig83
-rw-r--r--arch/mips/bmips/Makefile2
-rw-r--r--arch/mips/bmips/Platform6
-rw-r--r--arch/mips/bmips/dma.c128
-rw-r--r--arch/mips/bmips/irq.c42
-rw-r--r--arch/mips/bmips/setup.c212
-rw-r--r--arch/mips/boot/.gitignore8
-rw-r--r--arch/mips/boot/Makefile173
-rw-r--r--arch/mips/boot/compressed/.gitignore3
-rw-r--r--arch/mips/boot/compressed/Makefile155
-rw-r--r--arch/mips/boot/compressed/calc_vmlinuz_load_addr.c54
-rw-r--r--arch/mips/boot/compressed/dbg.c37
-rw-r--r--arch/mips/boot/compressed/decompress.c132
-rw-r--r--arch/mips/boot/compressed/dummy.c4
-rw-r--r--arch/mips/boot/compressed/head.S56
-rw-r--r--arch/mips/boot/compressed/ld.script57
-rw-r--r--arch/mips/boot/compressed/string.c46
-rw-r--r--arch/mips/boot/compressed/uart-16550.c64
-rw-r--r--arch/mips/boot/compressed/uart-alchemy.c7
-rw-r--r--arch/mips/boot/compressed/uart-prom.c7
-rw-r--r--arch/mips/boot/dts/Makefile19
-rw-r--r--arch/mips/boot/dts/brcm/Makefile37
-rw-r--r--arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts23
-rw-r--r--arch/mips/boot/dts/brcm/bcm3368.dtsi110
-rw-r--r--arch/mips/boot/dts/brcm/bcm3384_viper.dtsi109
-rw-r--r--arch/mips/boot/dts/brcm/bcm3384_zephyr.dtsi127
-rw-r--r--arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts109
-rw-r--r--arch/mips/boot/dts/brcm/bcm63268.dtsi149
-rw-r--r--arch/mips/boot/dts/brcm/bcm6328.dtsi138
-rw-r--r--arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts48
-rw-r--r--arch/mips/boot/dts/brcm/bcm6358.dtsi139
-rw-r--r--arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts23
-rw-r--r--arch/mips/boot/dts/brcm/bcm6362.dtsi149
-rw-r--r--arch/mips/boot/dts/brcm/bcm6368.dtsi132
-rw-r--r--arch/mips/boot/dts/brcm/bcm7125.dtsi281
-rw-r--r--arch/mips/boot/dts/brcm/bcm7346.dtsi549
-rw-r--r--arch/mips/boot/dts/brcm/bcm7358.dtsi383
-rw-r--r--arch/mips/boot/dts/brcm/bcm7360.dtsi468
-rw-r--r--arch/mips/boot/dts/brcm/bcm7362.dtsi464
-rw-r--r--arch/mips/boot/dts/brcm/bcm7420.dtsi342
-rw-r--r--arch/mips/boot/dts/brcm/bcm7425.dtsi587
-rw-r--r--arch/mips/boot/dts/brcm/bcm7435.dtsi602
-rw-r--r--arch/mips/boot/dts/brcm/bcm93384wvg.dts26
-rw-r--r--arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts26
-rw-r--r--arch/mips/boot/dts/brcm/bcm96368mvwg.dts32
-rw-r--r--arch/mips/boot/dts/brcm/bcm97125cbmb.dts68
-rw-r--r--arch/mips/boot/dts/brcm/bcm97346dbsmb.dts124
-rw-r--r--arch/mips/boot/dts/brcm/bcm97358svmb.dts116
-rw-r--r--arch/mips/boot/dts/brcm/bcm97360svmb.dts119
-rw-r--r--arch/mips/boot/dts/brcm/bcm97362svmb.dts88
-rw-r--r--arch/mips/boot/dts/brcm/bcm97420c.dts90
-rw-r--r--arch/mips/boot/dts/brcm/bcm97425svmb.dts154
-rw-r--r--arch/mips/boot/dts/brcm/bcm97435svmb.dts130
-rw-r--r--arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch24.dtsi26
-rw-r--r--arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch4.dtsi26
-rw-r--r--arch/mips/boot/dts/brcm/bcm9ejtagprb.dts23
-rw-r--r--arch/mips/boot/dts/cavium-octeon/Makefile4
-rw-r--r--arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts48
-rw-r--r--arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n-1000n.dtsi55
-rw-r--r--arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n.dts37
-rw-r--r--arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts406
-rw-r--r--arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dtsi232
-rw-r--r--arch/mips/boot/dts/cavium-octeon/octeon_68xx.dts626
-rw-r--r--arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts62
-rw-r--r--arch/mips/boot/dts/img/Makefile5
-rw-r--r--arch/mips/boot/dts/img/boston.dts237
-rw-r--r--arch/mips/boot/dts/img/pistachio.dtsi920
-rw-r--r--arch/mips/boot/dts/img/pistachio_marduk.dts160
-rw-r--r--arch/mips/boot/dts/ingenic/Makefile9
-rw-r--r--arch/mips/boot/dts/ingenic/ci20.dts495
-rw-r--r--arch/mips/boot/dts/ingenic/cu1000-neo.dts168
-rw-r--r--arch/mips/boot/dts/ingenic/cu1830-neo.dts168
-rw-r--r--arch/mips/boot/dts/ingenic/gcw0.dts547
-rw-r--r--arch/mips/boot/dts/ingenic/gcw0_proto.dts13
-rw-r--r--arch/mips/boot/dts/ingenic/jz4725b.dtsi378
-rw-r--r--arch/mips/boot/dts/ingenic/jz4740.dtsi334
-rw-r--r--arch/mips/boot/dts/ingenic/jz4770.dtsi471
-rw-r--r--arch/mips/boot/dts/ingenic/jz4780.dtsi497
-rw-r--r--arch/mips/boot/dts/ingenic/qi_lb60.dts363
-rw-r--r--arch/mips/boot/dts/ingenic/rs90.dts315
-rw-r--r--arch/mips/boot/dts/ingenic/x1000.dtsi326
-rw-r--r--arch/mips/boot/dts/ingenic/x1830.dtsi314
-rw-r--r--arch/mips/boot/dts/lantiq/Makefile4
-rw-r--r--arch/mips/boot/dts/lantiq/danube.dtsi106
-rw-r--r--arch/mips/boot/dts/lantiq/easy50712.dts115
-rw-r--r--arch/mips/boot/dts/loongson/Makefile8
-rw-r--r--arch/mips/boot/dts/loongson/loongson64c-package.dtsi64
-rw-r--r--arch/mips/boot/dts/loongson/loongson64c_4core_ls7a.dts37
-rw-r--r--arch/mips/boot/dts/loongson/loongson64c_4core_rs780e.dts25
-rw-r--r--arch/mips/boot/dts/loongson/loongson64c_8core_rs780e.dts25
-rw-r--r--arch/mips/boot/dts/loongson/loongson64g-package.dtsi61
-rw-r--r--arch/mips/boot/dts/loongson/loongson64g_4core_ls7a.dts41
-rw-r--r--arch/mips/boot/dts/loongson/loongson64v_4core_virtio.dts102
-rw-r--r--arch/mips/boot/dts/loongson/ls7a-pch.dtsi417
-rw-r--r--arch/mips/boot/dts/loongson/rs780e-pch.dtsi43
-rw-r--r--arch/mips/boot/dts/mscc/Makefile4
-rw-r--r--arch/mips/boot/dts/mscc/ocelot.dtsi267
-rw-r--r--arch/mips/boot/dts/mscc/ocelot_pcb120.dts117
-rw-r--r--arch/mips/boot/dts/mscc/ocelot_pcb123.dts63
-rw-r--r--arch/mips/boot/dts/mti/Makefile5
-rw-r--r--arch/mips/boot/dts/mti/malta.dts117
-rw-r--r--arch/mips/boot/dts/mti/sead3.dts259
-rw-r--r--arch/mips/boot/dts/netlogic/Makefile8
-rw-r--r--arch/mips/boot/dts/netlogic/xlp_evp.dts131
-rw-r--r--arch/mips/boot/dts/netlogic/xlp_fvp.dts131
-rw-r--r--arch/mips/boot/dts/netlogic/xlp_gvp.dts89
-rw-r--r--arch/mips/boot/dts/netlogic/xlp_rvp.dts89
-rw-r--r--arch/mips/boot/dts/netlogic/xlp_svp.dts131
-rw-r--r--arch/mips/boot/dts/ni/169445.dts100
-rw-r--r--arch/mips/boot/dts/ni/Makefile2
-rw-r--r--arch/mips/boot/dts/pic32/Makefile7
-rw-r--r--arch/mips/boot/dts/pic32/pic32mzda.dtsi298
-rw-r--r--arch/mips/boot/dts/pic32/pic32mzda_sk.dts148
-rw-r--r--arch/mips/boot/dts/qca/Makefile7
-rw-r--r--arch/mips/boot/dts/qca/ar9132.dtsi171
-rw-r--r--arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts112
-rw-r--r--arch/mips/boot/dts/qca/ar9331.dtsi299
-rw-r--r--arch/mips/boot/dts/qca/ar9331_dpt_module.dts101
-rw-r--r--arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts102
-rw-r--r--arch/mips/boot/dts/qca/ar9331_omega.dts78
-rw-r--r--arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts118
-rw-r--r--arch/mips/boot/dts/ralink/Makefile9
-rw-r--r--arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts205
-rw-r--r--arch/mips/boot/dts/ralink/mt7620a.dtsi59
-rw-r--r--arch/mips/boot/dts/ralink/mt7620a_eval.dts18
-rw-r--r--arch/mips/boot/dts/ralink/mt7628a.dtsi298
-rw-r--r--arch/mips/boot/dts/ralink/omega2p.dts18
-rw-r--r--arch/mips/boot/dts/ralink/rt2880.dtsi59
-rw-r--r--arch/mips/boot/dts/ralink/rt2880_eval.dts48
-rw-r--r--arch/mips/boot/dts/ralink/rt3050.dtsi69
-rw-r--r--arch/mips/boot/dts/ralink/rt3052_eval.dts52
-rw-r--r--arch/mips/boot/dts/ralink/rt3883.dtsi59
-rw-r--r--arch/mips/boot/dts/ralink/rt3883_eval.dts18
-rw-r--r--arch/mips/boot/dts/ralink/vocore2.dts18
-rw-r--r--arch/mips/boot/dts/xilfpga/Makefile2
-rw-r--r--arch/mips/boot/dts/xilfpga/microAptiv.dtsi22
-rw-r--r--arch/mips/boot/dts/xilfpga/nexys4ddr.dts118
-rw-r--r--arch/mips/boot/ecoff.h65
-rw-r--r--arch/mips/boot/elf2ecoff.c621
-rw-r--r--arch/mips/boot/tools/.gitignore2
-rw-r--r--arch/mips/boot/tools/Makefile9
-rw-r--r--arch/mips/boot/tools/relocs.c681
-rw-r--r--arch/mips/boot/tools/relocs.h46
-rw-r--r--arch/mips/boot/tools/relocs_32.c18
-rw-r--r--arch/mips/boot/tools/relocs_64.c31
-rw-r--r--arch/mips/boot/tools/relocs_main.c85
-rw-r--r--arch/mips/cavium-octeon/Kconfig79
-rw-r--r--arch/mips/cavium-octeon/Makefile21
-rw-r--r--arch/mips/cavium-octeon/Platform6
-rw-r--r--arch/mips/cavium-octeon/cpu.c50
-rw-r--r--arch/mips/cavium-octeon/crypto/Makefile11
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-crypto.c69
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-crypto.h224
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-md5.c206
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha1.c236
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha256.c274
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha512.c271
-rw-r--r--arch/mips/cavium-octeon/csrc-octeon.c214
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c250
-rw-r--r--arch/mips/cavium-octeon/executive/Makefile19
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-boot-vector.c167
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-bootmem.c796
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c307
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-board.c348
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-errata.c73
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c144
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-loop.c85
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-npi.c101
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c451
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c515
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-spi.c202
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-util.c363
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c321
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper.c1176
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c371
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c140
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-l2c.c920
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-pko.c646
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-spi.c668
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-sysinfo.c53
-rw-r--r--arch/mips/cavium-octeon/executive/octeon-model.c511
-rw-r--r--arch/mips/cavium-octeon/flash_setup.c143
-rw-r--r--arch/mips/cavium-octeon/oct_ilm.c182
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c2989
-rw-r--r--arch/mips/cavium-octeon/octeon-memcpy.S482
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c1141
-rw-r--r--arch/mips/cavium-octeon/octeon-usb.c557
-rw-r--r--arch/mips/cavium-octeon/octeon_boot.h81
-rw-r--r--arch/mips/cavium-octeon/setup.c1271
-rw-r--r--arch/mips/cavium-octeon/smp.c523
-rw-r--r--arch/mips/cobalt/Makefile8
-rw-r--r--arch/mips/cobalt/Platform5
-rw-r--r--arch/mips/cobalt/buttons.c41
-rw-r--r--arch/mips/cobalt/irq.c64
-rw-r--r--arch/mips/cobalt/lcd.c42
-rw-r--r--arch/mips/cobalt/led.c49
-rw-r--r--arch/mips/cobalt/mtd.c47
-rw-r--r--arch/mips/cobalt/pci.c48
-rw-r--r--arch/mips/cobalt/reset.c52
-rw-r--r--arch/mips/cobalt/rtc.c51
-rw-r--r--arch/mips/cobalt/serial.c73
-rw-r--r--arch/mips/cobalt/setup.c124
-rw-r--r--arch/mips/cobalt/time.c41
-rw-r--r--arch/mips/configs/ar7_defconfig124
-rw-r--r--arch/mips/configs/ath25_defconfig115
-rw-r--r--arch/mips/configs/ath79_defconfig99
-rw-r--r--arch/mips/configs/bcm47xx_defconfig81
-rw-r--r--arch/mips/configs/bcm63xx_defconfig69
-rw-r--r--arch/mips/configs/bigsur_defconfig261
-rw-r--r--arch/mips/configs/bmips_be_defconfig79
-rw-r--r--arch/mips/configs/bmips_stb_defconfig90
-rw-r--r--arch/mips/configs/capcella_defconfig91
-rw-r--r--arch/mips/configs/cavium_octeon_defconfig168
-rw-r--r--arch/mips/configs/ci20_defconfig196
-rw-r--r--arch/mips/configs/cobalt_defconfig74
-rw-r--r--arch/mips/configs/cu1000-neo_defconfig109
-rw-r--r--arch/mips/configs/cu1830-neo_defconfig112
-rw-r--r--arch/mips/configs/db1xxx_defconfig226
-rw-r--r--arch/mips/configs/decstation_64_defconfig225
-rw-r--r--arch/mips/configs/decstation_defconfig221
-rw-r--r--arch/mips/configs/decstation_r4k_defconfig221
-rw-r--r--arch/mips/configs/e55_defconfig37
-rw-r--r--arch/mips/configs/fuloong2e_defconfig235
-rw-r--r--arch/mips/configs/gcw0_defconfig152
-rw-r--r--arch/mips/configs/generic/32r1.config2
-rw-r--r--arch/mips/configs/generic/32r2.config3
-rw-r--r--arch/mips/configs/generic/32r6.config4
-rw-r--r--arch/mips/configs/generic/64r1.config4
-rw-r--r--arch/mips/configs/generic/64r2.config5
-rw-r--r--arch/mips/configs/generic/64r6.config6
-rw-r--r--arch/mips/configs/generic/board-boston.config48
-rw-r--r--arch/mips/configs/generic/board-ni169445.config29
-rw-r--r--arch/mips/configs/generic/board-ocelot.config51
-rw-r--r--arch/mips/configs/generic/board-ranchu.config30
-rw-r--r--arch/mips/configs/generic/board-sead-3.config34
-rw-r--r--arch/mips/configs/generic/board-xilfpga.config22
-rw-r--r--arch/mips/configs/generic/eb.config1
-rw-r--r--arch/mips/configs/generic/el.config1
-rw-r--r--arch/mips/configs/generic/micro32r2.config4
-rw-r--r--arch/mips/configs/generic_defconfig92
-rw-r--r--arch/mips/configs/gpr_defconfig311
-rw-r--r--arch/mips/configs/ip22_defconfig345
-rw-r--r--arch/mips/configs/ip27_defconfig345
-rw-r--r--arch/mips/configs/ip28_defconfig69
-rw-r--r--arch/mips/configs/ip32_defconfig190
-rw-r--r--arch/mips/configs/jazz_defconfig96
-rw-r--r--arch/mips/configs/jmr3927_defconfig50
-rw-r--r--arch/mips/configs/lemote2f_defconfig351
-rw-r--r--arch/mips/configs/loongson1b_defconfig124
-rw-r--r--arch/mips/configs/loongson1c_defconfig125
-rw-r--r--arch/mips/configs/loongson3_defconfig411
-rw-r--r--arch/mips/configs/malta_defconfig424
-rw-r--r--arch/mips/configs/malta_kvm_defconfig437
-rw-r--r--arch/mips/configs/malta_kvm_guest_defconfig436
-rw-r--r--arch/mips/configs/malta_qemu_32r6_defconfig187
-rw-r--r--arch/mips/configs/maltaaprp_defconfig188
-rw-r--r--arch/mips/configs/maltasmvp_defconfig189
-rw-r--r--arch/mips/configs/maltasmvp_eva_defconfig191
-rw-r--r--arch/mips/configs/maltaup_defconfig187
-rw-r--r--arch/mips/configs/maltaup_xpa_defconfig434
-rw-r--r--arch/mips/configs/mpc30x_defconfig53
-rw-r--r--arch/mips/configs/mtx1_defconfig705
-rw-r--r--arch/mips/configs/nlm_xlp_defconfig556
-rw-r--r--arch/mips/configs/nlm_xlr_defconfig507
-rw-r--r--arch/mips/configs/omega2p_defconfig127
-rw-r--r--arch/mips/configs/pic32mzda_defconfig88
-rw-r--r--arch/mips/configs/pistachio_defconfig316
-rw-r--r--arch/mips/configs/qi_lb60_defconfig177
-rw-r--r--arch/mips/configs/rb532_defconfig166
-rw-r--r--arch/mips/configs/rbtx49xx_defconfig98
-rw-r--r--arch/mips/configs/rm200_defconfig413
-rw-r--r--arch/mips/configs/rs90_defconfig183
-rw-r--r--arch/mips/configs/rt305x_defconfig150
-rw-r--r--arch/mips/configs/sb1250_swarm_defconfig107
-rw-r--r--arch/mips/configs/tb0219_defconfig77
-rw-r--r--arch/mips/configs/tb0226_defconfig72
-rw-r--r--arch/mips/configs/tb0287_defconfig85
-rw-r--r--arch/mips/configs/vocore2_defconfig127
-rw-r--r--arch/mips/configs/workpad_defconfig65
-rw-r--r--arch/mips/configs/xway_defconfig156
-rw-r--r--arch/mips/crypto/Makefile24
-rw-r--r--arch/mips/crypto/chacha-core.S497
-rw-r--r--arch/mips/crypto/chacha-glue.c152
-rw-r--r--arch/mips/crypto/crc32-mips.c346
-rw-r--r--arch/mips/crypto/poly1305-glue.c191
-rw-r--r--arch/mips/crypto/poly1305-mips.pl1273
-rw-r--r--arch/mips/dec/Makefile10
-rw-r--r--arch/mips/dec/Platform7
-rw-r--r--arch/mips/dec/ecc-berr.c274
-rw-r--r--arch/mips/dec/int-handler.S311
-rw-r--r--arch/mips/dec/ioasic-irq.c112
-rw-r--r--arch/mips/dec/kn01-berr.c196
-rw-r--r--arch/mips/dec/kn02-irq.c75
-rw-r--r--arch/mips/dec/kn02xa-berr.c135
-rw-r--r--arch/mips/dec/platform.c40
-rw-r--r--arch/mips/dec/prom/Makefile9
-rw-r--r--arch/mips/dec/prom/cmdline.c40
-rw-r--r--arch/mips/dec/prom/console.c41
-rw-r--r--arch/mips/dec/prom/dectypes.h15
-rw-r--r--arch/mips/dec/prom/identify.c187
-rw-r--r--arch/mips/dec/prom/init.c137
-rw-r--r--arch/mips/dec/prom/locore.S30
-rw-r--r--arch/mips/dec/prom/memory.c117
-rw-r--r--arch/mips/dec/reset.c41
-rw-r--r--arch/mips/dec/setup.c784
-rw-r--r--arch/mips/dec/tc.c95
-rw-r--r--arch/mips/dec/time.c172
-rw-r--r--arch/mips/dec/wbflush.c92
-rw-r--r--arch/mips/fw/arc/Makefile15
-rw-r--r--arch/mips/fw/arc/arc_con.c54
-rw-r--r--arch/mips/fw/arc/cmdline.c110
-rw-r--r--arch/mips/fw/arc/env.c21
-rw-r--r--arch/mips/fw/arc/file.c25
-rw-r--r--arch/mips/fw/arc/identify.c111
-rw-r--r--arch/mips/fw/arc/init.c51
-rw-r--r--arch/mips/fw/arc/memory.c192
-rw-r--r--arch/mips/fw/arc/misc.c36
-rw-r--r--arch/mips/fw/arc/promlib.c61
-rw-r--r--arch/mips/fw/cfe/Makefile6
-rw-r--r--arch/mips/fw/cfe/cfe_api.c414
-rw-r--r--arch/mips/fw/cfe/cfe_api_int.h135
-rw-r--r--arch/mips/fw/lib/Makefile8
-rw-r--r--arch/mips/fw/lib/call_o32.S104
-rw-r--r--arch/mips/fw/lib/cmdline.c103
-rw-r--r--arch/mips/fw/sni/Makefile6
-rw-r--r--arch/mips/fw/sni/sniprom.c153
-rw-r--r--arch/mips/generic/Kconfig92
-rw-r--r--arch/mips/generic/Makefile15
-rw-r--r--arch/mips/generic/Platform23
-rw-r--r--arch/mips/generic/board-boston.its.S22
-rw-r--r--arch/mips/generic/board-ingenic.c120
-rw-r--r--arch/mips/generic/board-ni169445.its.S22
-rw-r--r--arch/mips/generic/board-ocelot.c78
-rw-r--r--arch/mips/generic/board-ocelot.its.S40
-rw-r--r--arch/mips/generic/board-ranchu.c89
-rw-r--r--arch/mips/generic/board-sead3.c220
-rw-r--r--arch/mips/generic/board-xilfpga.its.S22
-rw-r--r--arch/mips/generic/init.c208
-rw-r--r--arch/mips/generic/irq.c61
-rw-r--r--arch/mips/generic/proc.c30
-rw-r--r--arch/mips/generic/vmlinux.its.S32
-rw-r--r--arch/mips/generic/yamon-dt.c232
-rw-r--r--arch/mips/include/asm/Kbuild13
-rw-r--r--arch/mips/include/asm/abi.h32
-rw-r--r--arch/mips/include/asm/addrspace.h144
-rw-r--r--arch/mips/include/asm/amon.h12
-rw-r--r--arch/mips/include/asm/arch_hweight.h38
-rw-r--r--arch/mips/include/asm/asm-eva.h190
-rw-r--r--arch/mips/include/asm/asm-offsets.h1
-rw-r--r--arch/mips/include/asm/asm-prototypes.h8
-rw-r--r--arch/mips/include/asm/asm.h331
-rw-r--r--arch/mips/include/asm/asmmacro-32.h91
-rw-r--r--arch/mips/include/asm/asmmacro-64.h44
-rw-r--r--arch/mips/include/asm/asmmacro.h658
-rw-r--r--arch/mips/include/asm/atomic.h267
-rw-r--r--arch/mips/include/asm/barrier.h142
-rw-r--r--arch/mips/include/asm/bcache.h87
-rw-r--r--arch/mips/include/asm/bitops.h463
-rw-r--r--arch/mips/include/asm/bitrev.h31
-rw-r--r--arch/mips/include/asm/bmips-spaces.h8
-rw-r--r--arch/mips/include/asm/bmips.h128
-rw-r--r--arch/mips/include/asm/bootinfo.h166
-rw-r--r--arch/mips/include/asm/branch.h103
-rw-r--r--arch/mips/include/asm/break.h26
-rw-r--r--arch/mips/include/asm/bug.h44
-rw-r--r--arch/mips/include/asm/bugs.h36
-rw-r--r--arch/mips/include/asm/cache.h19
-rw-r--r--arch/mips/include/asm/cacheflush.h153
-rw-r--r--arch/mips/include/asm/cacheops.h116
-rw-r--r--arch/mips/include/asm/cdmm.h109
-rw-r--r--arch/mips/include/asm/cevt-r4k.h29
-rw-r--r--arch/mips/include/asm/checksum.h253
-rw-r--r--arch/mips/include/asm/clocksource.h11
-rw-r--r--arch/mips/include/asm/cmp.h18
-rw-r--r--arch/mips/include/asm/cmpxchg.h327
-rw-r--r--arch/mips/include/asm/compat-signal.h29
-rw-r--r--arch/mips/include/asm/compat.h191
-rw-r--r--arch/mips/include/asm/compiler.h73
-rw-r--r--arch/mips/include/asm/cop2.h72
-rw-r--r--arch/mips/include/asm/cpu-features.h756
-rw-r--r--arch/mips/include/asm/cpu-info.h219
-rw-r--r--arch/mips/include/asm/cpu-type.h223
-rw-r--r--arch/mips/include/asm/cpu.h451
-rw-r--r--arch/mips/include/asm/cpufeature.h22
-rw-r--r--arch/mips/include/asm/debug.h18
-rw-r--r--arch/mips/include/asm/dec/ecc.h51
-rw-r--r--arch/mips/include/asm/dec/interrupts.h126
-rw-r--r--arch/mips/include/asm/dec/ioasic.h34
-rw-r--r--arch/mips/include/asm/dec/ioasic_addrs.h152
-rw-r--r--arch/mips/include/asm/dec/ioasic_ints.h74
-rw-r--r--arch/mips/include/asm/dec/kn01.h89
-rw-r--r--arch/mips/include/asm/dec/kn02.h91
-rw-r--r--arch/mips/include/asm/dec/kn02ba.h63
-rw-r--r--arch/mips/include/asm/dec/kn02ca.h75
-rw-r--r--arch/mips/include/asm/dec/kn02xa.h84
-rw-r--r--arch/mips/include/asm/dec/kn03.h74
-rw-r--r--arch/mips/include/asm/dec/kn05.h81
-rw-r--r--arch/mips/include/asm/dec/kn230.h22
-rw-r--r--arch/mips/include/asm/dec/machtype.h27
-rw-r--r--arch/mips/include/asm/dec/prom.h165
-rw-r--r--arch/mips/include/asm/dec/system.h15
-rw-r--r--arch/mips/include/asm/delay.h32
-rw-r--r--arch/mips/include/asm/div64.h91
-rw-r--r--arch/mips/include/asm/dma-coherence.h38
-rw-r--r--arch/mips/include/asm/dma-direct.h8
-rw-r--r--arch/mips/include/asm/dma-mapping.h18
-rw-r--r--arch/mips/include/asm/dma.h318
-rw-r--r--arch/mips/include/asm/dmi.h20
-rw-r--r--arch/mips/include/asm/ds1287.h14
-rw-r--r--arch/mips/include/asm/dsemul.h115
-rw-r--r--arch/mips/include/asm/dsp.h81
-rw-r--r--arch/mips/include/asm/edac.h38
-rw-r--r--arch/mips/include/asm/elf.h538
-rw-r--r--arch/mips/include/asm/errno.h17
-rw-r--r--arch/mips/include/asm/eva.h43
-rw-r--r--arch/mips/include/asm/exec.h17
-rw-r--r--arch/mips/include/asm/extable.h14
-rw-r--r--arch/mips/include/asm/fb.h19
-rw-r--r--arch/mips/include/asm/fixmap.h79
-rw-r--r--arch/mips/include/asm/floppy.h56
-rw-r--r--arch/mips/include/asm/fpregdef.h113
-rw-r--r--arch/mips/include/asm/fpu.h328
-rw-r--r--arch/mips/include/asm/fpu_emulator.h187
-rw-r--r--arch/mips/include/asm/ftrace.h90
-rw-r--r--arch/mips/include/asm/futex.h204
-rw-r--r--arch/mips/include/asm/fw/arc/hinv.h176
-rw-r--r--arch/mips/include/asm/fw/arc/types.h86
-rw-r--r--arch/mips/include/asm/fw/cfe/cfe_api.h109
-rw-r--r--arch/mips/include/asm/fw/cfe/cfe_error.h67
-rw-r--r--arch/mips/include/asm/fw/fw.h31
-rw-r--r--arch/mips/include/asm/ginvt.h56
-rw-r--r--arch/mips/include/asm/gio_device.h53
-rw-r--r--arch/mips/include/asm/gt64120.h566
-rw-r--r--arch/mips/include/asm/hardirq.h18
-rw-r--r--arch/mips/include/asm/hazards.h422
-rw-r--r--arch/mips/include/asm/highmem.h59
-rw-r--r--arch/mips/include/asm/hpet.h74
-rw-r--r--arch/mips/include/asm/hugetlb.h86
-rw-r--r--arch/mips/include/asm/hw_irq.h20
-rw-r--r--arch/mips/include/asm/i8259.h93
-rw-r--r--arch/mips/include/asm/ide.h13
-rw-r--r--arch/mips/include/asm/idle.h32
-rw-r--r--arch/mips/include/asm/inst.h88
-rw-r--r--arch/mips/include/asm/io.h562
-rw-r--r--arch/mips/include/asm/ip32/crime.h158
-rw-r--r--arch/mips/include/asm/ip32/ip32_ints.h114
-rw-r--r--arch/mips/include/asm/ip32/mace.h365
-rw-r--r--arch/mips/include/asm/irq.h85
-rw-r--r--arch/mips/include/asm/irq_cpu.h22
-rw-r--r--arch/mips/include/asm/irq_gt641xx.h47
-rw-r--r--arch/mips/include/asm/irq_regs.h28
-rw-r--r--arch/mips/include/asm/irqflags.h185
-rw-r--r--arch/mips/include/asm/isa-rev.h24
-rw-r--r--arch/mips/include/asm/isadep.h35
-rw-r--r--arch/mips/include/asm/jazz.h310
-rw-r--r--arch/mips/include/asm/jazzdma.h88
-rw-r--r--arch/mips/include/asm/jump_label.h75
-rw-r--r--arch/mips/include/asm/kdebug.h20
-rw-r--r--arch/mips/include/asm/kexec.h51
-rw-r--r--arch/mips/include/asm/kgdb.h45
-rw-r--r--arch/mips/include/asm/kmap_types.h13
-rw-r--r--arch/mips/include/asm/kprobes.h78
-rw-r--r--arch/mips/include/asm/kvm_host.h1158
-rw-r--r--arch/mips/include/asm/kvm_types.h7
-rw-r--r--arch/mips/include/asm/linkage.h13
-rw-r--r--arch/mips/include/asm/llsc.h39
-rw-r--r--arch/mips/include/asm/local.h206
-rw-r--r--arch/mips/include/asm/maar.h127
-rw-r--r--arch/mips/include/asm/mach-ar7/ar7.h197
-rw-r--r--arch/mips/include/asm/mach-ar7/irq.h16
-rw-r--r--arch/mips/include/asm/mach-ar7/prom.h12
-rw-r--r--arch/mips/include/asm/mach-ar7/spaces.h22
-rw-r--r--arch/mips/include/asm/mach-ath25/ath25_platform.h74
-rw-r--r--arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h60
-rw-r--r--arch/mips/include/asm/mach-ath79/ar71xx_regs.h1321
-rw-r--r--arch/mips/include/asm/mach-ath79/ar933x_uart.h64
-rw-r--r--arch/mips/include/asm/mach-ath79/ath79.h178
-rw-r--r--arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h56
-rw-r--r--arch/mips/include/asm/mach-ath79/irq.h32
-rw-r--r--arch/mips/include/asm/mach-ath79/kernel-entry-init.h28
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000.h1211
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000_dma.h453
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1100_mmc.h210
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1200fb.h15
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1550_spi.h16
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1550nd.h17
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h388
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_eth.h19
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_psc.h466
-rw-r--r--arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h94
-rw-r--r--arch/mips/include/asm/mach-au1x00/gpio-au1000.h532
-rw-r--r--arch/mips/include/asm/mach-au1x00/gpio-au1300.h123
-rw-r--r--arch/mips/include/asm/mach-au1x00/prom.h13
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx.h38
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h140
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/cpu-feature-overrides.h83
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_board.h13
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h1068
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_cs.h11
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h126
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_flash.h13
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_hsspi.h9
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_pci.h7
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_pcmcia.h14
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h11
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h7
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_usb_usbd.h18
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h35
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h104
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_irq.h14
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_iudma.h39
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h36
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h1431
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_reset.h22
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_timer.h12
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h54
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h54
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/ioremap.h44
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/irq.h8
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/spaces.h17
-rw-r--r--arch/mips/include/asm/mach-bmips/cpu-feature-overrides.h15
-rw-r--r--arch/mips/include/asm/mach-bmips/ioremap.h29
-rw-r--r--arch/mips/include/asm/mach-bmips/spaces.h18
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h80
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/irq.h58
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h160
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/mangle-port.h64
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/spaces.h24
-rw-r--r--arch/mips/include/asm/mach-cobalt/cobalt.h22
-rw-r--r--arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h57
-rw-r--r--arch/mips/include/asm/mach-cobalt/irq.h57
-rw-r--r--arch/mips/include/asm/mach-cobalt/mach-gt64120.h14
-rw-r--r--arch/mips/include/asm/mach-db1x00/bcsr.h261
-rw-r--r--arch/mips/include/asm/mach-db1x00/irq.h23
-rw-r--r--arch/mips/include/asm/mach-dec/cpu-feature-overrides.h97
-rw-r--r--arch/mips/include/asm/mach-dec/mc146818rtc.h39
-rw-r--r--arch/mips/include/asm/mach-generic/cpu-feature-overrides.h13
-rw-r--r--arch/mips/include/asm/mach-generic/floppy.h133
-rw-r--r--arch/mips/include/asm/mach-generic/ide.h138
-rw-r--r--arch/mips/include/asm/mach-generic/ioremap.h21
-rw-r--r--arch/mips/include/asm/mach-generic/irq.h39
-rw-r--r--arch/mips/include/asm/mach-generic/kernel-entry-init.h25
-rw-r--r--arch/mips/include/asm/mach-generic/kmalloc.h13
-rw-r--r--arch/mips/include/asm/mach-generic/mangle-port.h52
-rw-r--r--arch/mips/include/asm/mach-generic/mc146818rtc.h36
-rw-r--r--arch/mips/include/asm/mach-generic/spaces.h110
-rw-r--r--arch/mips/include/asm/mach-generic/topology.h1
-rw-r--r--arch/mips/include/asm/mach-ingenic/cpu-feature-overrides.h50
-rw-r--r--arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h51
-rw-r--r--arch/mips/include/asm/mach-ip22/spaces.h17
-rw-r--r--arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h76
-rw-r--r--arch/mips/include/asm/mach-ip27/irq.h24
-rw-r--r--arch/mips/include/asm/mach-ip27/kernel-entry-init.h96
-rw-r--r--arch/mips/include/asm/mach-ip27/mangle-port.h25
-rw-r--r--arch/mips/include/asm/mach-ip27/mmzone.h28
-rw-r--r--arch/mips/include/asm/mach-ip27/spaces.h35
-rw-r--r--arch/mips/include/asm/mach-ip27/topology.h31
-rw-r--r--arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h54
-rw-r--r--arch/mips/include/asm/mach-ip28/spaces.h18
-rw-r--r--arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h79
-rw-r--r--arch/mips/include/asm/mach-ip30/kernel-entry-init.h13
-rw-r--r--arch/mips/include/asm/mach-ip30/mangle-port.h22
-rw-r--r--arch/mips/include/asm/mach-ip30/spaces.h20
-rw-r--r--arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h51
-rw-r--r--arch/mips/include/asm/mach-ip32/kmalloc.h12
-rw-r--r--arch/mips/include/asm/mach-ip32/mangle-port.h26
-rw-r--r--arch/mips/include/asm/mach-jazz/floppy.h133
-rw-r--r--arch/mips/include/asm/mach-jazz/mc146818rtc.h38
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/cpu-feature-overrides.h53
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h21
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/irq.h16
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h69
-rw-r--r--arch/mips/include/asm/mach-lantiq/lantiq.h55
-rw-r--r--arch/mips/include/asm/mach-lantiq/lantiq_platform.h18
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/irq.h16
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h22
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h106
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/xway_dma.h50
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cpu-feature-overrides.h44
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536.h306
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_mfgpt.h36
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_pci.h153
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_vsm.h32
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/loongson.h327
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/machine.h23
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/mem.h37
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/pci.h46
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/spaces.h10
-rw-r--r--arch/mips/include/asm/mach-loongson32/cpufreq.h18
-rw-r--r--arch/mips/include/asm/mach-loongson32/dma.h21
-rw-r--r--arch/mips/include/asm/mach-loongson32/irq.h107
-rw-r--r--arch/mips/include/asm/mach-loongson32/loongson1.h54
-rw-r--r--arch/mips/include/asm/mach-loongson32/nand.h26
-rw-r--r--arch/mips/include/asm/mach-loongson32/platform.h28
-rw-r--r--arch/mips/include/asm/mach-loongson32/regs-clk.h81
-rw-r--r--arch/mips/include/asm/mach-loongson32/regs-mux.h124
-rw-r--r--arch/mips/include/asm/mach-loongson32/regs-pwm.h25
-rw-r--r--arch/mips/include/asm/mach-loongson32/regs-rtc.h19
-rw-r--r--arch/mips/include/asm/mach-loongson32/regs-wdt.h15
-rw-r--r--arch/mips/include/asm/mach-loongson64/boot_param.h236
-rw-r--r--arch/mips/include/asm/mach-loongson64/builtin_dtbs.h16
-rw-r--r--arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h51
-rw-r--r--arch/mips/include/asm/mach-loongson64/cpucfg-emul.h74
-rw-r--r--arch/mips/include/asm/mach-loongson64/irq.h15
-rw-r--r--arch/mips/include/asm/mach-loongson64/kernel-entry-init.h86
-rw-r--r--arch/mips/include/asm/mach-loongson64/loongson.h241
-rw-r--r--arch/mips/include/asm/mach-loongson64/loongson_hwmon.h56
-rw-r--r--arch/mips/include/asm/mach-loongson64/loongson_regs.h246
-rw-r--r--arch/mips/include/asm/mach-loongson64/mmzone.h24
-rw-r--r--arch/mips/include/asm/mach-loongson64/pci.h19
-rw-r--r--arch/mips/include/asm/mach-loongson64/spaces.h17
-rw-r--r--arch/mips/include/asm/mach-loongson64/topology.h25
-rw-r--r--arch/mips/include/asm/mach-loongson64/workarounds.h8
-rw-r--r--arch/mips/include/asm/mach-malta/cpu-feature-overrides.h70
-rw-r--r--arch/mips/include/asm/mach-malta/irq.h10
-rw-r--r--arch/mips/include/asm/mach-malta/kernel-entry-init.h145
-rw-r--r--arch/mips/include/asm/mach-malta/mach-gt64120.h20
-rw-r--r--arch/mips/include/asm/mach-malta/mc146818rtc.h36
-rw-r--r--arch/mips/include/asm/mach-malta/spaces.h46
-rw-r--r--arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h57
-rw-r--r--arch/mips/include/asm/mach-netlogic/irq.h17
-rw-r--r--arch/mips/include/asm/mach-netlogic/multi-node.h74
-rw-r--r--arch/mips/include/asm/mach-pic32/cpu-feature-overrides.h32
-rw-r--r--arch/mips/include/asm/mach-pic32/irq.h14
-rw-r--r--arch/mips/include/asm/mach-pic32/pic32.h36
-rw-r--r--arch/mips/include/asm/mach-pic32/spaces.h15
-rw-r--r--arch/mips/include/asm/mach-pistachio/irq.h15
-rw-r--r--arch/mips/include/asm/mach-ralink/irq.h10
-rw-r--r--arch/mips/include/asm/mach-ralink/mt7620.h138
-rw-r--r--arch/mips/include/asm/mach-ralink/mt7620/cpu-feature-overrides.h52
-rw-r--r--arch/mips/include/asm/mach-ralink/mt7621.h34
-rw-r--r--arch/mips/include/asm/mach-ralink/mt7621/cpu-feature-overrides.h60
-rw-r--r--arch/mips/include/asm/mach-ralink/pinmux.h52
-rw-r--r--arch/mips/include/asm/mach-ralink/ralink_regs.h62
-rw-r--r--arch/mips/include/asm/mach-ralink/rt288x.h51
-rw-r--r--arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h51
-rw-r--r--arch/mips/include/asm/mach-ralink/rt305x.h160
-rw-r--r--arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h51
-rw-r--r--arch/mips/include/asm/mach-ralink/rt3883.h251
-rw-r--r--arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h50
-rw-r--r--arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h63
-rw-r--r--arch/mips/include/asm/mach-rc32434/ddr.h141
-rw-r--r--arch/mips/include/asm/mach-rc32434/dma.h104
-rw-r--r--arch/mips/include/asm/mach-rc32434/dma_v.h53
-rw-r--r--arch/mips/include/asm/mach-rc32434/eth.h220
-rw-r--r--arch/mips/include/asm/mach-rc32434/gpio.h79
-rw-r--r--arch/mips/include/asm/mach-rc32434/integ.h59
-rw-r--r--arch/mips/include/asm/mach-rc32434/irq.h37
-rw-r--r--arch/mips/include/asm/mach-rc32434/pci.h478
-rw-r--r--arch/mips/include/asm/mach-rc32434/prom.h40
-rw-r--r--arch/mips/include/asm/mach-rc32434/rb.h75
-rw-r--r--arch/mips/include/asm/mach-rc32434/rc32434.h20
-rw-r--r--arch/mips/include/asm/mach-rc32434/timer.h65
-rw-r--r--arch/mips/include/asm/mach-rm/cpu-feature-overrides.h42
-rw-r--r--arch/mips/include/asm/mach-rm/mc146818rtc.h21
-rw-r--r--arch/mips/include/asm/mach-sibyte/cpu-feature-overrides.h49
-rw-r--r--arch/mips/include/asm/mach-tx39xx/ioremap.h25
-rw-r--r--arch/mips/include/asm/mach-tx39xx/mangle-port.h24
-rw-r--r--arch/mips/include/asm/mach-tx39xx/spaces.h17
-rw-r--r--arch/mips/include/asm/mach-tx49xx/cpu-feature-overrides.h26
-rw-r--r--arch/mips/include/asm/mach-tx49xx/ioremap.h30
-rw-r--r--arch/mips/include/asm/mach-tx49xx/kmalloc.h7
-rw-r--r--arch/mips/include/asm/mach-tx49xx/mangle-port.h27
-rw-r--r--arch/mips/include/asm/mach-tx49xx/spaces.h17
-rw-r--r--arch/mips/include/asm/mach-vr41xx/irq.h9
-rw-r--r--arch/mips/include/asm/machine.h90
-rw-r--r--arch/mips/include/asm/mc146818-time.h119
-rw-r--r--arch/mips/include/asm/mc146818rtc.h16
-rw-r--r--arch/mips/include/asm/mips-boards/bonito64.h430
-rw-r--r--arch/mips/include/asm/mips-boards/generic.h79
-rw-r--r--arch/mips/include/asm/mips-boards/launch.h36
-rw-r--r--arch/mips/include/asm/mips-boards/malta.h97
-rw-r--r--arch/mips/include/asm/mips-boards/maltaint.h63
-rw-r--r--arch/mips/include/asm/mips-boards/msc01_pci.h258
-rw-r--r--arch/mips/include/asm/mips-boards/piix4.h58
-rw-r--r--arch/mips/include/asm/mips-boards/sead3-addr.h83
-rw-r--r--arch/mips/include/asm/mips-boards/sim.h27
-rw-r--r--arch/mips/include/asm/mips-cm.h459
-rw-r--r--arch/mips/include/asm/mips-cpc.h179
-rw-r--r--arch/mips/include/asm/mips-cps.h236
-rw-r--r--arch/mips/include/asm/mips-gic.h373
-rw-r--r--arch/mips/include/asm/mips-r2-to-r6-emul.h101
-rw-r--r--arch/mips/include/asm/mips_mt.h31
-rw-r--r--arch/mips/include/asm/mipsmtregs.h423
-rw-r--r--arch/mips/include/asm/mipsprom.h77
-rw-r--r--arch/mips/include/asm/mipsregs.h2927
-rw-r--r--arch/mips/include/asm/mmiowb.h11
-rw-r--r--arch/mips/include/asm/mmu.h25
-rw-r--r--arch/mips/include/asm/mmu_context.h240
-rw-r--r--arch/mips/include/asm/mmzone.h29
-rw-r--r--arch/mips/include/asm/module.h86
-rw-r--r--arch/mips/include/asm/msa.h278
-rw-r--r--arch/mips/include/asm/msc01_ic.h147
-rw-r--r--arch/mips/include/asm/netlogic/common.h132
-rw-r--r--arch/mips/include/asm/netlogic/haldefs.h171
-rw-r--r--arch/mips/include/asm/netlogic/interrupt.h45
-rw-r--r--arch/mips/include/asm/netlogic/mips-extns.h301
-rw-r--r--arch/mips/include/asm/netlogic/psb-bootinfo.h95
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/bridge.h186
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h89
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/iomap.h214
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/pcibus.h113
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/pic.h366
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/sys.h213
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/uart.h192
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/xlp.h119
-rw-r--r--arch/mips/include/asm/netlogic/xlr/bridge.h104
-rw-r--r--arch/mips/include/asm/netlogic/xlr/flash.h55
-rw-r--r--arch/mips/include/asm/netlogic/xlr/fmn.h365
-rw-r--r--arch/mips/include/asm/netlogic/xlr/gpio.h74
-rw-r--r--arch/mips/include/asm/netlogic/xlr/iomap.h109
-rw-r--r--arch/mips/include/asm/netlogic/xlr/msidef.h84
-rw-r--r--arch/mips/include/asm/netlogic/xlr/pic.h306
-rw-r--r--arch/mips/include/asm/netlogic/xlr/xlr.h59
-rw-r--r--arch/mips/include/asm/octeon/cvmx-address.h341
-rw-r--r--arch/mips/include/asm/octeon/cvmx-agl-defs.h1759
-rw-r--r--arch/mips/include/asm/octeon/cvmx-asm.h139
-rw-r--r--arch/mips/include/asm/octeon/cvmx-asxx-defs.h566
-rw-r--r--arch/mips/include/asm/octeon/cvmx-boot-vector.h53
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootinfo.h424
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootmem.h341
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ciu-defs.h176
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ciu2-defs.h48
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ciu3-defs.h353
-rw-r--r--arch/mips/include/asm/octeon/cvmx-cmd-queue.h619
-rw-r--r--arch/mips/include/asm/octeon/cvmx-config.h169
-rw-r--r--arch/mips/include/asm/octeon/cvmx-coremask.h89
-rw-r--r--arch/mips/include/asm/octeon/cvmx-dbg-defs.h101
-rw-r--r--arch/mips/include/asm/octeon/cvmx-dpi-defs.h874
-rw-r--r--arch/mips/include/asm/octeon/cvmx-fau.h619
-rw-r--r--arch/mips/include/asm/octeon/cvmx-fpa-defs.h1252
-rw-r--r--arch/mips/include/asm/octeon/cvmx-fpa.h308
-rw-r--r--arch/mips/include/asm/octeon/cvmx-gmxx-defs.h2259
-rw-r--r--arch/mips/include/asm/octeon/cvmx-gpio-defs.h399
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-board.h124
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-errata.h33
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-jtag.h43
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-loop.h60
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-npi.h61
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-rgmii.h93
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-sgmii.h87
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-spi.h84
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-util.h192
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-xaui.h87
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper.h178
-rw-r--r--arch/mips/include/asm/octeon/cvmx-iob-defs.h903
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ipd-defs.h1472
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ipd.h339
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c-defs.h239
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c.h364
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2d-defs.h60
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2t-defs.h143
-rw-r--r--arch/mips/include/asm/octeon/cvmx-led-defs.h214
-rw-r--r--arch/mips/include/asm/octeon/cvmx-lmcx-defs.h2943
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mio-defs.h4396
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mixx-defs.h430
-rw-r--r--arch/mips/include/asm/octeon/cvmx-npei-defs.h3925
-rw-r--r--arch/mips/include/asm/octeon/cvmx-npi-defs.h2514
-rw-r--r--arch/mips/include/asm/octeon/cvmx-packet.h69
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pci-defs.h2037
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pciercx-defs.h368
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pcsx-defs.h826
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h664
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pemx-defs.h651
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pescx-defs.h579
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pexp-defs.h224
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pip-defs.h2734
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pip.h524
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pko-defs.h2205
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pko.h643
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pow-defs.h1001
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pow.h2215
-rw-r--r--arch/mips/include/asm/octeon/cvmx-rnm-defs.h171
-rw-r--r--arch/mips/include/asm/octeon/cvmx-rst-defs.h278
-rw-r--r--arch/mips/include/asm/octeon/cvmx-scratch.h139
-rw-r--r--arch/mips/include/asm/octeon/cvmx-sli-defs.h129
-rw-r--r--arch/mips/include/asm/octeon/cvmx-spi.h269
-rw-r--r--arch/mips/include/asm/octeon/cvmx-spinlock.h232
-rw-r--r--arch/mips/include/asm/octeon/cvmx-spxx-defs.h446
-rw-r--r--arch/mips/include/asm/octeon/cvmx-sriox-defs.h1614
-rw-r--r--arch/mips/include/asm/octeon/cvmx-srxx-defs.h140
-rw-r--r--arch/mips/include/asm/octeon/cvmx-stxx-defs.h330
-rw-r--r--arch/mips/include/asm/octeon/cvmx-sysinfo.h125
-rw-r--r--arch/mips/include/asm/octeon/cvmx-uctlx-defs.h386
-rw-r--r--arch/mips/include/asm/octeon/cvmx-wqe.h658
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h495
-rw-r--r--arch/mips/include/asm/octeon/octeon-feature.h213
-rw-r--r--arch/mips/include/asm/octeon/octeon-model.h409
-rw-r--r--arch/mips/include/asm/octeon/octeon.h366
-rw-r--r--arch/mips/include/asm/octeon/pci-octeon.h69
-rw-r--r--arch/mips/include/asm/paccess.h114
-rw-r--r--arch/mips/include/asm/page.h261
-rw-r--r--arch/mips/include/asm/pci.h149
-rw-r--r--arch/mips/include/asm/pci/bridge.h825
-rw-r--r--arch/mips/include/asm/perf_event.h12
-rw-r--r--arch/mips/include/asm/pgalloc.h112
-rw-r--r--arch/mips/include/asm/pgtable-32.h248
-rw-r--r--arch/mips/include/asm/pgtable-64.h346
-rw-r--r--arch/mips/include/asm/pgtable-bits.h285
-rw-r--r--arch/mips/include/asm/pgtable.h744
-rw-r--r--arch/mips/include/asm/pm-cps.h49
-rw-r--r--arch/mips/include/asm/pm.h155
-rw-r--r--arch/mips/include/asm/prefetch.h87
-rw-r--r--arch/mips/include/asm/processor.h425
-rw-r--r--arch/mips/include/asm/prom.h30
-rw-r--r--arch/mips/include/asm/ptrace.h189
-rw-r--r--arch/mips/include/asm/r4k-timer.h30
-rw-r--r--arch/mips/include/asm/r4kcache.h379
-rw-r--r--arch/mips/include/asm/reboot.h15
-rw-r--r--arch/mips/include/asm/reg.h1
-rw-r--r--arch/mips/include/asm/regdef.h106
-rw-r--r--arch/mips/include/asm/rtlx.h88
-rw-r--r--arch/mips/include/asm/seccomp.h35
-rw-r--r--arch/mips/include/asm/setup.h31
-rw-r--r--arch/mips/include/asm/sgi/gio.h86
-rw-r--r--arch/mips/include/asm/sgi/heart.h323
-rw-r--r--arch/mips/include/asm/sgi/hpc3.h317
-rw-r--r--arch/mips/include/asm/sgi/ioc.h200
-rw-r--r--arch/mips/include/asm/sgi/ip22.h80
-rw-r--r--arch/mips/include/asm/sgi/mc.h231
-rw-r--r--arch/mips/include/asm/sgi/pi1.h72
-rw-r--r--arch/mips/include/asm/sgi/seeq.h21
-rw-r--r--arch/mips/include/asm/sgi/wd.h20
-rw-r--r--arch/mips/include/asm/sgialib.h61
-rw-r--r--arch/mips/include/asm/sgiarcs.h505
-rw-r--r--arch/mips/include/asm/shmparam.h13
-rw-r--r--arch/mips/include/asm/sibyte/bcm1480_int.h299
-rw-r--r--arch/mips/include/asm/sibyte/bcm1480_l2c.h163
-rw-r--r--arch/mips/include/asm/sibyte/bcm1480_mc.h971
-rw-r--r--arch/mips/include/asm/sibyte/bcm1480_regs.h889
-rw-r--r--arch/mips/include/asm/sibyte/bcm1480_scd.h393
-rw-r--r--arch/mips/include/asm/sibyte/bigsur.h35
-rw-r--r--arch/mips/include/asm/sibyte/board.h55
-rw-r--r--arch/mips/include/asm/sibyte/carmel.h45
-rw-r--r--arch/mips/include/asm/sibyte/sb1250.h55
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_defs.h246
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_dma.h581
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_genbus.h461
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_int.h235
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_l2c.h118
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_ldt.h409
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_mac.h643
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_mc.h537
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_regs.h880
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_scd.h641
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_smbus.h191
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_syncser.h133
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_uart.h349
-rw-r--r--arch/mips/include/asm/sibyte/sentosa.h27
-rw-r--r--arch/mips/include/asm/sibyte/swarm.h51
-rw-r--r--arch/mips/include/asm/sigcontext.h37
-rw-r--r--arch/mips/include/asm/signal.h35
-rw-r--r--arch/mips/include/asm/sim.h70
-rw-r--r--arch/mips/include/asm/smp-cps.h48
-rw-r--r--arch/mips/include/asm/smp-ops.h121
-rw-r--r--arch/mips/include/asm/smp.h138
-rw-r--r--arch/mips/include/asm/sn/addrs.h377
-rw-r--r--arch/mips/include/asm/sn/agent.h45
-rw-r--r--arch/mips/include/asm/sn/arch.h28
-rw-r--r--arch/mips/include/asm/sn/fru.h44
-rw-r--r--arch/mips/include/asm/sn/gda.h105
-rw-r--r--arch/mips/include/asm/sn/intr.h112
-rw-r--r--arch/mips/include/asm/sn/io.h59
-rw-r--r--arch/mips/include/asm/sn/ioc3.h606
-rw-r--r--arch/mips/include/asm/sn/irq_alloc.h11
-rw-r--r--arch/mips/include/asm/sn/klconfig.h894
-rw-r--r--arch/mips/include/asm/sn/kldir.h36
-rw-r--r--arch/mips/include/asm/sn/klkernvars.h29
-rw-r--r--arch/mips/include/asm/sn/launch.h106
-rw-r--r--arch/mips/include/asm/sn/mapped_kernel.h55
-rw-r--r--arch/mips/include/asm/sn/nmi.h125
-rw-r--r--arch/mips/include/asm/sn/sn0/addrs.h283
-rw-r--r--arch/mips/include/asm/sn/sn0/arch.h56
-rw-r--r--arch/mips/include/asm/sn/sn0/hub.h62
-rw-r--r--arch/mips/include/asm/sn/sn0/hubio.h972
-rw-r--r--arch/mips/include/asm/sn/sn0/hubmd.h789
-rw-r--r--arch/mips/include/asm/sn/sn0/hubni.h263
-rw-r--r--arch/mips/include/asm/sn/sn0/hubpi.h409
-rw-r--r--arch/mips/include/asm/sn/sn0/kldir.h186
-rw-r--r--arch/mips/include/asm/sn/types.h25
-rw-r--r--arch/mips/include/asm/sni.h246
-rw-r--r--arch/mips/include/asm/socket.h50
-rw-r--r--arch/mips/include/asm/sparsemem.h18
-rw-r--r--arch/mips/include/asm/spinlock.h31
-rw-r--r--arch/mips/include/asm/spinlock_types.h8
-rw-r--r--arch/mips/include/asm/spram.h11
-rw-r--r--arch/mips/include/asm/stackframe.h492
-rw-r--r--arch/mips/include/asm/stackprotector.h41
-rw-r--r--arch/mips/include/asm/stacktrace.h89
-rw-r--r--arch/mips/include/asm/string.h22
-rw-r--r--arch/mips/include/asm/switch_to.h142
-rw-r--r--arch/mips/include/asm/sync.h209
-rw-r--r--arch/mips/include/asm/syscall.h160
-rw-r--r--arch/mips/include/asm/termios.h105
-rw-r--r--arch/mips/include/asm/thread_info.h199
-rw-r--r--arch/mips/include/asm/time.h73
-rw-r--r--arch/mips/include/asm/timex.h102
-rw-r--r--arch/mips/include/asm/tlb.h26
-rw-r--r--arch/mips/include/asm/tlbdebug.h17
-rw-r--r--arch/mips/include/asm/tlbex.h36
-rw-r--r--arch/mips/include/asm/tlbflush.h49
-rw-r--r--arch/mips/include/asm/tlbmisc.h11
-rw-r--r--arch/mips/include/asm/topology.h21
-rw-r--r--arch/mips/include/asm/traps.h38
-rw-r--r--arch/mips/include/asm/txx9/boards.h14
-rw-r--r--arch/mips/include/asm/txx9/dmac.h48
-rw-r--r--arch/mips/include/asm/txx9/generic.h98
-rw-r--r--arch/mips/include/asm/txx9/jmr3927.h179
-rw-r--r--arch/mips/include/asm/txx9/pci.h39
-rw-r--r--arch/mips/include/asm/txx9/rbtx4927.h92
-rw-r--r--arch/mips/include/asm/txx9/rbtx4938.h145
-rw-r--r--arch/mips/include/asm/txx9/rbtx4939.h142
-rw-r--r--arch/mips/include/asm/txx9/smsc_fdc37m81x.h68
-rw-r--r--arch/mips/include/asm/txx9/spi.h34
-rw-r--r--arch/mips/include/asm/txx9/tx3927.h341
-rw-r--r--arch/mips/include/asm/txx9/tx4927.h273
-rw-r--r--arch/mips/include/asm/txx9/tx4927pcic.h203
-rw-r--r--arch/mips/include/asm/txx9/tx4938.h312
-rw-r--r--arch/mips/include/asm/txx9/tx4939.h524
-rw-r--r--arch/mips/include/asm/txx9irq.h34
-rw-r--r--arch/mips/include/asm/txx9pio.h29
-rw-r--r--arch/mips/include/asm/txx9tmr.h67
-rw-r--r--arch/mips/include/asm/types.h17
-rw-r--r--arch/mips/include/asm/uaccess.h791
-rw-r--r--arch/mips/include/asm/uasm.h325
-rw-r--r--arch/mips/include/asm/unaligned-emul.h779
-rw-r--r--arch/mips/include/asm/unistd.h62
-rw-r--r--arch/mips/include/asm/unroll.h75
-rw-r--r--arch/mips/include/asm/uprobes.h45
-rw-r--r--arch/mips/include/asm/vdso.h58
-rw-r--r--arch/mips/include/asm/vdso/clocksource.h9
-rw-r--r--arch/mips/include/asm/vdso/gettimeofday.h219
-rw-r--r--arch/mips/include/asm/vdso/processor.h27
-rw-r--r--arch/mips/include/asm/vdso/vdso.h75
-rw-r--r--arch/mips/include/asm/vdso/vsyscall.h27
-rw-r--r--arch/mips/include/asm/vermagic.h72
-rw-r--r--arch/mips/include/asm/vga.h56
-rw-r--r--arch/mips/include/asm/vmalloc.h4
-rw-r--r--arch/mips/include/asm/vpe.h129
-rw-r--r--arch/mips/include/asm/vr41xx/capcella.h30
-rw-r--r--arch/mips/include/asm/vr41xx/giu.h41
-rw-r--r--arch/mips/include/asm/vr41xx/irq.h97
-rw-r--r--arch/mips/include/asm/vr41xx/mpc30x.h24
-rw-r--r--arch/mips/include/asm/vr41xx/pci.h77
-rw-r--r--arch/mips/include/asm/vr41xx/siu.h45
-rw-r--r--arch/mips/include/asm/vr41xx/tb0219.h29
-rw-r--r--arch/mips/include/asm/vr41xx/tb0226.h30
-rw-r--r--arch/mips/include/asm/vr41xx/tb0287.h30
-rw-r--r--arch/mips/include/asm/vr41xx/vr41xx.h148
-rw-r--r--arch/mips/include/asm/war.h73
-rw-r--r--arch/mips/include/asm/watch.h32
-rw-r--r--arch/mips/include/asm/wbflush.h34
-rw-r--r--arch/mips/include/asm/xtalk/xtalk.h52
-rw-r--r--arch/mips/include/asm/xtalk/xwidget.h279
-rw-r--r--arch/mips/include/asm/yamon-dt.h60
-rw-r--r--arch/mips/include/uapi/asm/Kbuild9
-rw-r--r--arch/mips/include/uapi/asm/auxvec.h20
-rw-r--r--arch/mips/include/uapi/asm/bitfield.h30
-rw-r--r--arch/mips/include/uapi/asm/bitsperlong.h9
-rw-r--r--arch/mips/include/uapi/asm/break.h32
-rw-r--r--arch/mips/include/uapi/asm/byteorder.h20
-rw-r--r--arch/mips/include/uapi/asm/cachectl.h27
-rw-r--r--arch/mips/include/uapi/asm/errno.h130
-rw-r--r--arch/mips/include/uapi/asm/fcntl.h80
-rw-r--r--arch/mips/include/uapi/asm/hwcap.h22
-rw-r--r--arch/mips/include/uapi/asm/inst.h1141
-rw-r--r--arch/mips/include/uapi/asm/ioctl.h28
-rw-r--r--arch/mips/include/uapi/asm/ioctls.h119
-rw-r--r--arch/mips/include/uapi/asm/kvm.h227
-rw-r--r--arch/mips/include/uapi/asm/mman.h109
-rw-r--r--arch/mips/include/uapi/asm/msgbuf.h68
-rw-r--r--arch/mips/include/uapi/asm/param.h17
-rw-r--r--arch/mips/include/uapi/asm/poll.h10
-rw-r--r--arch/mips/include/uapi/asm/posix_types.h26
-rw-r--r--arch/mips/include/uapi/asm/ptrace.h109
-rw-r--r--arch/mips/include/uapi/asm/reg.h207
-rw-r--r--arch/mips/include/uapi/asm/resource.h36
-rw-r--r--arch/mips/include/uapi/asm/sembuf.h36
-rw-r--r--arch/mips/include/uapi/asm/setup.h8
-rw-r--r--arch/mips/include/uapi/asm/sgidefs.h37
-rw-r--r--arch/mips/include/uapi/asm/shmbuf.h58
-rw-r--r--arch/mips/include/uapi/asm/sigcontext.h91
-rw-r--r--arch/mips/include/uapi/asm/siginfo.h32
-rw-r--r--arch/mips/include/uapi/asm/signal.h120
-rw-r--r--arch/mips/include/uapi/asm/socket.h162
-rw-r--r--arch/mips/include/uapi/asm/sockios.h27
-rw-r--r--arch/mips/include/uapi/asm/stat.h133
-rw-r--r--arch/mips/include/uapi/asm/statfs.h101
-rw-r--r--arch/mips/include/uapi/asm/swab.h71
-rw-r--r--arch/mips/include/uapi/asm/sysmips.h26
-rw-r--r--arch/mips/include/uapi/asm/termbits.h228
-rw-r--r--arch/mips/include/uapi/asm/termios.h81
-rw-r--r--arch/mips/include/uapi/asm/types.h31
-rw-r--r--arch/mips/include/uapi/asm/ucontext.h66
-rw-r--r--arch/mips/include/uapi/asm/unistd.h39
-rw-r--r--arch/mips/ingenic/Kconfig76
-rw-r--r--arch/mips/jazz/Kconfig34
-rw-r--r--arch/mips/jazz/Makefile6
-rw-r--r--arch/mips/jazz/Platform5
-rw-r--r--arch/mips/jazz/irq.c149
-rw-r--r--arch/mips/jazz/jazzdma.c623
-rw-r--r--arch/mips/jazz/reset.c57
-rw-r--r--arch/mips/jazz/setup.c212
-rw-r--r--arch/mips/kernel/.gitignore2
-rw-r--r--arch/mips/kernel/Makefile119
-rw-r--r--arch/mips/kernel/asm-offsets.c405
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c113
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c116
-rw-r--r--arch/mips/kernel/bmips_5xxx_init.S747
-rw-r--r--arch/mips/kernel/bmips_vec.S322
-rw-r--r--arch/mips/kernel/branch.c908
-rw-r--r--arch/mips/kernel/cacheinfo.c100
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c138
-rw-r--r--arch/mips/kernel/cevt-ds1287.c121
-rw-r--r--arch/mips/kernel/cevt-gt641xx.c146
-rw-r--r--arch/mips/kernel/cevt-r4k.c345
-rw-r--r--arch/mips/kernel/cevt-sb1250.c138
-rw-r--r--arch/mips/kernel/cevt-txx9.c220
-rw-r--r--arch/mips/kernel/cmpxchg.c104
-rw-r--r--arch/mips/kernel/cps-vec-ns16550.S212
-rw-r--r--arch/mips/kernel/cps-vec.S630
-rw-r--r--arch/mips/kernel/cpu-probe.c2170
-rw-r--r--arch/mips/kernel/cpu-r3k-probe.c171
-rw-r--r--arch/mips/kernel/crash.c103
-rw-r--r--arch/mips/kernel/crash_dump.c67
-rw-r--r--arch/mips/kernel/csrc-bcm1480.c48
-rw-r--r--arch/mips/kernel/csrc-ioasic.c65
-rw-r--r--arch/mips/kernel/csrc-r4k.c130
-rw-r--r--arch/mips/kernel/csrc-sb1250.c71
-rw-r--r--arch/mips/kernel/early_printk.c41
-rw-r--r--arch/mips/kernel/early_printk_8250.c54
-rw-r--r--arch/mips/kernel/elf.c343
-rw-r--r--arch/mips/kernel/entry.S186
-rw-r--r--arch/mips/kernel/fpu-probe.c321
-rw-r--r--arch/mips/kernel/fpu-probe.h40
-rw-r--r--arch/mips/kernel/ftrace.c414
-rw-r--r--arch/mips/kernel/genex.S682
-rw-r--r--arch/mips/kernel/gpio_txx9.c86
-rw-r--r--arch/mips/kernel/head.S185
-rw-r--r--arch/mips/kernel/i8253.c38
-rw-r--r--arch/mips/kernel/idle.c272
-rw-r--r--arch/mips/kernel/irq-gt641xx.c118
-rw-r--r--arch/mips/kernel/irq-msc01.c156
-rw-r--r--arch/mips/kernel/irq-rm7000.c45
-rw-r--r--arch/mips/kernel/irq.c109
-rw-r--r--arch/mips/kernel/irq_txx9.c191
-rw-r--r--arch/mips/kernel/jump_label.c90
-rw-r--r--arch/mips/kernel/kgdb.c415
-rw-r--r--arch/mips/kernel/kprobes.c518
-rw-r--r--arch/mips/kernel/linux32.c133
-rw-r--r--arch/mips/kernel/machine_kexec.c264
-rw-r--r--arch/mips/kernel/mcount.S220
-rw-r--r--arch/mips/kernel/mips-cm.c514
-rw-r--r--arch/mips/kernel/mips-cpc.c122
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c219
-rw-r--r--arch/mips/kernel/mips-mt.c246
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c2363
-rw-r--r--arch/mips/kernel/module.c457
-rw-r--r--arch/mips/kernel/octeon_switch.S554
-rw-r--r--arch/mips/kernel/perf_event.c67
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c2138
-rw-r--r--arch/mips/kernel/pm-cps.c738
-rw-r--r--arch/mips/kernel/pm.c95
-rw-r--r--arch/mips/kernel/probes-common.h79
-rw-r--r--arch/mips/kernel/proc.c193
-rw-r--r--arch/mips/kernel/process.c896
-rw-r--r--arch/mips/kernel/prom.c67
-rw-r--r--arch/mips/kernel/ptrace.c1382
-rw-r--r--arch/mips/kernel/ptrace32.c317
-rw-r--r--arch/mips/kernel/r2300_fpu.S130
-rw-r--r--arch/mips/kernel/r2300_switch.S65
-rw-r--r--arch/mips/kernel/r4k-bugs64.c322
-rw-r--r--arch/mips/kernel/r4k_fpu.S417
-rw-r--r--arch/mips/kernel/r4k_switch.S59
-rw-r--r--arch/mips/kernel/relocate.c446
-rw-r--r--arch/mips/kernel/relocate_kernel.S190
-rw-r--r--arch/mips/kernel/reset.c125
-rw-r--r--arch/mips/kernel/rtlx-cmp.c122
-rw-r--r--arch/mips/kernel/rtlx-mt.c147
-rw-r--r--arch/mips/kernel/rtlx.c409
-rw-r--r--arch/mips/kernel/scall32-o32.S225
-rw-r--r--arch/mips/kernel/scall64-n32.S108
-rw-r--r--arch/mips/kernel/scall64-n64.S117
-rw-r--r--arch/mips/kernel/scall64-o32.S221
-rw-r--r--arch/mips/kernel/segment.c104
-rw-r--r--arch/mips/kernel/setup.c844
-rw-r--r--arch/mips/kernel/signal-common.h43
-rw-r--r--arch/mips/kernel/signal.c962
-rw-r--r--arch/mips/kernel/signal32.c78
-rw-r--r--arch/mips/kernel/signal_n32.c149
-rw-r--r--arch/mips/kernel/signal_o32.c290
-rw-r--r--arch/mips/kernel/smp-bmips.c667
-rw-r--r--arch/mips/kernel/smp-cmp.c148
-rw-r--r--arch/mips/kernel/smp-cps.c647
-rw-r--r--arch/mips/kernel/smp-mt.c240
-rw-r--r--arch/mips/kernel/smp-up.c79
-rw-r--r--arch/mips/kernel/smp.c721
-rw-r--r--arch/mips/kernel/spinlock_test.c127
-rw-r--r--arch/mips/kernel/spram.c220
-rw-r--r--arch/mips/kernel/stacktrace.c93
-rw-r--r--arch/mips/kernel/sync-r4k.c122
-rw-r--r--arch/mips/kernel/syscall.c242
-rw-r--r--arch/mips/kernel/syscalls/Makefile96
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl381
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl357
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl430
-rw-r--r--arch/mips/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/mips/kernel/syscalls/syscallnr.sh28
-rw-r--r--arch/mips/kernel/syscalls/syscalltbl.sh36
-rw-r--r--arch/mips/kernel/sysrq.c66
-rw-r--r--arch/mips/kernel/time.c167
-rw-r--r--arch/mips/kernel/topology.c33
-rw-r--r--arch/mips/kernel/traps.c2571
-rw-r--r--arch/mips/kernel/unaligned.c1610
-rw-r--r--arch/mips/kernel/uprobes.c262
-rw-r--r--arch/mips/kernel/vdso.c192
-rw-r--r--arch/mips/kernel/vmlinux.lds.S230
-rw-r--r--arch/mips/kernel/vpe-cmp.c180
-rw-r--r--arch/mips/kernel/vpe-mt.c520
-rw-r--r--arch/mips/kernel/vpe.c933
-rw-r--r--arch/mips/kernel/watch.c211
-rw-r--r--arch/mips/kvm/Kconfig77
-rw-r--r--arch/mips/kvm/Makefile27
-rw-r--r--arch/mips/kvm/callback.c14
-rw-r--r--arch/mips/kvm/commpage.c32
-rw-r--r--arch/mips/kvm/commpage.h24
-rw-r--r--arch/mips/kvm/dyntrans.c143
-rw-r--r--arch/mips/kvm/emulate.c3292
-rw-r--r--arch/mips/kvm/entry.c955
-rw-r--r--arch/mips/kvm/fpu.S125
-rw-r--r--arch/mips/kvm/hypcall.c53
-rw-r--r--arch/mips/kvm/interrupt.c175
-rw-r--r--arch/mips/kvm/interrupt.h59
-rw-r--r--arch/mips/kvm/loongson_ipi.c214
-rw-r--r--arch/mips/kvm/mips.c1701
-rw-r--r--arch/mips/kvm/mmu.c1235
-rw-r--r--arch/mips/kvm/msa.S161
-rw-r--r--arch/mips/kvm/stats.c63
-rw-r--r--arch/mips/kvm/tlb.c700
-rw-r--r--arch/mips/kvm/trace.h346
-rw-r--r--arch/mips/kvm/trap_emul.c1306
-rw-r--r--arch/mips/kvm/vz.c3331
-rw-r--r--arch/mips/lantiq/Kconfig55
-rw-r--r--arch/mips/lantiq/Makefile10
-rw-r--r--arch/mips/lantiq/Platform8
-rw-r--r--arch/mips/lantiq/clk.c201
-rw-r--r--arch/mips/lantiq/clk.h94
-rw-r--r--arch/mips/lantiq/early_printk.c31
-rw-r--r--arch/mips/lantiq/falcon/Makefile2
-rw-r--r--arch/mips/lantiq/falcon/prom.c90
-rw-r--r--arch/mips/lantiq/falcon/reset.c75
-rw-r--r--arch/mips/lantiq/falcon/sysctrl.c265
-rw-r--r--arch/mips/lantiq/irq.c433
-rw-r--r--arch/mips/lantiq/prom.c107
-rw-r--r--arch/mips/lantiq/prom.h27
-rw-r--r--arch/mips/lantiq/xway/Makefile4
-rw-r--r--arch/mips/lantiq/xway/clk.c351
-rw-r--r--arch/mips/lantiq/xway/dcdc.c60
-rw-r--r--arch/mips/lantiq/xway/dma.c271
-rw-r--r--arch/mips/lantiq/xway/gptu.c208
-rw-r--r--arch/mips/lantiq/xway/prom.c144
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c588
-rw-r--r--arch/mips/lantiq/xway/vmmc.c64
-rw-r--r--arch/mips/lib/Makefile19
-rw-r--r--arch/mips/lib/bitops.c148
-rw-r--r--arch/mips/lib/bswapdi.c17
-rw-r--r--arch/mips/lib/bswapsi.c13
-rw-r--r--arch/mips/lib/csum_partial.S754
-rw-r--r--arch/mips/lib/delay.c69
-rw-r--r--arch/mips/lib/dump_tlb.c199
-rw-r--r--arch/mips/lib/iomap-pci.c46
-rw-r--r--arch/mips/lib/iomap_copy.c29
-rw-r--r--arch/mips/lib/libgcc.h43
-rw-r--r--arch/mips/lib/memcpy.S709
-rw-r--r--arch/mips/lib/memset.S328
-rw-r--r--arch/mips/lib/mips-atomic.c113
-rw-r--r--arch/mips/lib/multi3.c54
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c75
-rw-r--r--arch/mips/lib/strncpy_user.S85
-rw-r--r--arch/mips/lib/strnlen_user.S84
-rw-r--r--arch/mips/lib/uncached.c82
-rw-r--r--arch/mips/loongson2ef/Kconfig94
-rw-r--r--arch/mips/loongson2ef/Makefile18
-rw-r--r--arch/mips/loongson2ef/Platform57
-rw-r--r--arch/mips/loongson2ef/common/Makefile26
-rw-r--r--arch/mips/loongson2ef/common/bonito-irq.c46
-rw-r--r--arch/mips/loongson2ef/common/cs5536/Makefile12
-rw-r--r--arch/mips/loongson2ef/common/cs5536/cs5536_acc.c136
-rw-r--r--arch/mips/loongson2ef/common/cs5536/cs5536_ehci.c156
-rw-r--r--arch/mips/loongson2ef/common/cs5536/cs5536_ide.c188
-rw-r--r--arch/mips/loongson2ef/common/cs5536/cs5536_isa.c326
-rw-r--r--arch/mips/loongson2ef/common/cs5536/cs5536_mfgpt.c203
-rw-r--r--arch/mips/loongson2ef/common/cs5536/cs5536_ohci.c145
-rw-r--r--arch/mips/loongson2ef/common/cs5536/cs5536_pci.c84
-rw-r--r--arch/mips/loongson2ef/common/env.c53
-rw-r--r--arch/mips/loongson2ef/common/init.c52
-rw-r--r--arch/mips/loongson2ef/common/irq.c63
-rw-r--r--arch/mips/loongson2ef/common/machtype.c62
-rw-r--r--arch/mips/loongson2ef/common/mem.c54
-rw-r--r--arch/mips/loongson2ef/common/pci.c89
-rw-r--r--arch/mips/loongson2ef/common/platform.c27
-rw-r--r--arch/mips/loongson2ef/common/pm.c158
-rw-r--r--arch/mips/loongson2ef/common/reset.c73
-rw-r--r--arch/mips/loongson2ef/common/rtc.c39
-rw-r--r--arch/mips/loongson2ef/common/serial.c86
-rw-r--r--arch/mips/loongson2ef/common/setup.c30
-rw-r--r--arch/mips/loongson2ef/common/time.c28
-rw-r--r--arch/mips/loongson2ef/common/uart_base.c43
-rw-r--r--arch/mips/loongson2ef/fuloong-2e/Makefile6
-rw-r--r--arch/mips/loongson2ef/fuloong-2e/dma.c12
-rw-r--r--arch/mips/loongson2ef/fuloong-2e/irq.c65
-rw-r--r--arch/mips/loongson2ef/fuloong-2e/reset.c19
-rw-r--r--arch/mips/loongson2ef/lemote-2f/Makefile12
-rw-r--r--arch/mips/loongson2ef/lemote-2f/clock.c51
-rw-r--r--arch/mips/loongson2ef/lemote-2f/dma.c14
-rw-r--r--arch/mips/loongson2ef/lemote-2f/ec_kb3310b.c125
-rw-r--r--arch/mips/loongson2ef/lemote-2f/ec_kb3310b.h184
-rw-r--r--arch/mips/loongson2ef/lemote-2f/irq.c118
-rw-r--r--arch/mips/loongson2ef/lemote-2f/machtype.c41
-rw-r--r--arch/mips/loongson2ef/lemote-2f/pm.c145
-rw-r--r--arch/mips/loongson2ef/lemote-2f/reset.c155
-rw-r--r--arch/mips/loongson32/Kconfig75
-rw-r--r--arch/mips/loongson32/Makefile18
-rw-r--r--arch/mips/loongson32/Platform3
-rw-r--r--arch/mips/loongson32/common/Makefile6
-rw-r--r--arch/mips/loongson32/common/irq.c191
-rw-r--r--arch/mips/loongson32/common/platform.c311
-rw-r--r--arch/mips/loongson32/common/prom.c46
-rw-r--r--arch/mips/loongson32/common/reset.c51
-rw-r--r--arch/mips/loongson32/common/setup.c26
-rw-r--r--arch/mips/loongson32/common/time.c232
-rw-r--r--arch/mips/loongson32/ls1b/Makefile6
-rw-r--r--arch/mips/loongson32/ls1b/board.c58
-rw-r--r--arch/mips/loongson32/ls1c/Makefile6
-rw-r--r--arch/mips/loongson32/ls1c/board.c23
-rw-r--r--arch/mips/loongson64/Kconfig15
-rw-r--r--arch/mips/loongson64/Makefile13
-rw-r--r--arch/mips/loongson64/Platform37
-rw-r--r--arch/mips/loongson64/cop2-ex.c341
-rw-r--r--arch/mips/loongson64/cpucfg-emul.c227
-rw-r--r--arch/mips/loongson64/dma.c28
-rw-r--r--arch/mips/loongson64/env.c223
-rw-r--r--arch/mips/loongson64/hpet.c285
-rw-r--r--arch/mips/loongson64/init.c154
-rw-r--r--arch/mips/loongson64/numa.c252
-rw-r--r--arch/mips/loongson64/platform.c42
-rw-r--r--arch/mips/loongson64/pm.c104
-rw-r--r--arch/mips/loongson64/reset.c59
-rw-r--r--arch/mips/loongson64/setup.c46
-rw-r--r--arch/mips/loongson64/smp.c806
-rw-r--r--arch/mips/loongson64/smp.h31
-rw-r--r--arch/mips/loongson64/time.c23
-rw-r--r--arch/mips/loongson64/vbios_quirk.c29
-rw-r--r--arch/mips/math-emu/Makefile19
-rw-r--r--arch/mips/math-emu/cp1emu.c2948
-rw-r--r--arch/mips/math-emu/dp_2008class.c52
-rw-r--r--arch/mips/math-emu/dp_add.c165
-rw-r--r--arch/mips/math-emu/dp_cmp.c47
-rw-r--r--arch/mips/math-emu/dp_div.c143
-rw-r--r--arch/mips/math-emu/dp_fint.c44
-rw-r--r--arch/mips/math-emu/dp_flong.c53
-rw-r--r--arch/mips/math-emu/dp_fmax.c252
-rw-r--r--arch/mips/math-emu/dp_fmin.c252
-rw-r--r--arch/mips/math-emu/dp_fsp.c63
-rw-r--r--arch/mips/math-emu/dp_maddf.c358
-rw-r--r--arch/mips/math-emu/dp_mul.c159
-rw-r--r--arch/mips/math-emu/dp_rint.c78
-rw-r--r--arch/mips/math-emu/dp_simple.c49
-rw-r--r--arch/mips/math-emu/dp_sqrt.c152
-rw-r--r--arch/mips/math-emu/dp_sub.c172
-rw-r--r--arch/mips/math-emu/dp_tint.c96
-rw-r--r--arch/mips/math-emu/dp_tlong.c100
-rw-r--r--arch/mips/math-emu/dsemul.c308
-rw-r--r--arch/mips/math-emu/ieee754.c83
-rw-r--r--arch/mips/math-emu/ieee754.h311
-rw-r--r--arch/mips/math-emu/ieee754d.c99
-rw-r--r--arch/mips/math-emu/ieee754dp.c197
-rw-r--r--arch/mips/math-emu/ieee754dp.h73
-rw-r--r--arch/mips/math-emu/ieee754int.h149
-rw-r--r--arch/mips/math-emu/ieee754sp.c196
-rw-r--r--arch/mips/math-emu/ieee754sp.h77
-rw-r--r--arch/mips/math-emu/me-debugfs.c353
-rw-r--r--arch/mips/math-emu/sp_2008class.c52
-rw-r--r--arch/mips/math-emu/sp_add.c164
-rw-r--r--arch/mips/math-emu/sp_cmp.c47
-rw-r--r--arch/mips/math-emu/sp_div.c142
-rw-r--r--arch/mips/math-emu/sp_fdp.c73
-rw-r--r--arch/mips/math-emu/sp_fint.c53
-rw-r--r--arch/mips/math-emu/sp_flong.c52
-rw-r--r--arch/mips/math-emu/sp_fmax.c252
-rw-r--r--arch/mips/math-emu/sp_fmin.c252
-rw-r--r--arch/mips/math-emu/sp_maddf.c278
-rw-r--r--arch/mips/math-emu/sp_mul.c154
-rw-r--r--arch/mips/math-emu/sp_rint.c79
-rw-r--r--arch/mips/math-emu/sp_simple.c49
-rw-r--r--arch/mips/math-emu/sp_sqrt.c103
-rw-r--r--arch/mips/math-emu/sp_sub.c168
-rw-r--r--arch/mips/math-emu/sp_tint.c100
-rw-r--r--arch/mips/math-emu/sp_tlong.c96
-rw-r--r--arch/mips/mm/Makefile42
-rw-r--r--arch/mips/mm/c-octeon.c352
-rw-r--r--arch/mips/mm/c-r3k.c319
-rw-r--r--arch/mips/mm/c-r4k.c2009
-rw-r--r--arch/mips/mm/c-tx39.c414
-rw-r--r--arch/mips/mm/cache.c242
-rw-r--r--arch/mips/mm/cerr-sb1.c569
-rw-r--r--arch/mips/mm/cex-gen.S42
-rw-r--r--arch/mips/mm/cex-oct.S70
-rw-r--r--arch/mips/mm/cex-sb1.S157
-rw-r--r--arch/mips/mm/context.c291
-rw-r--r--arch/mips/mm/dma-noncoherent.c145
-rw-r--r--arch/mips/mm/extable.c25
-rw-r--r--arch/mips/mm/fault.c332
-rw-r--r--arch/mips/mm/highmem.c94
-rw-r--r--arch/mips/mm/hugetlbpage.c81
-rw-r--r--arch/mips/mm/init.c581
-rw-r--r--arch/mips/mm/ioremap.c119
-rw-r--r--arch/mips/mm/ioremap64.c23
-rw-r--r--arch/mips/mm/mmap.c129
-rw-r--r--arch/mips/mm/page-funcs.S53
-rw-r--r--arch/mips/mm/page.c681
-rw-r--r--arch/mips/mm/pgtable-32.c91
-rw-r--r--arch/mips/mm/pgtable-64.c125
-rw-r--r--arch/mips/mm/pgtable.c25
-rw-r--r--arch/mips/mm/sc-debugfs.c61
-rw-r--r--arch/mips/mm/sc-ip22.c190
-rw-r--r--arch/mips/mm/sc-mips.c264
-rw-r--r--arch/mips/mm/sc-r5k.c107
-rw-r--r--arch/mips/mm/sc-rm7k.c270
-rw-r--r--arch/mips/mm/tlb-funcs.S40
-rw-r--r--arch/mips/mm/tlb-r3k.c284
-rw-r--r--arch/mips/mm/tlb-r4k.c583
-rw-r--r--arch/mips/mm/tlbex-fault.S28
-rw-r--r--arch/mips/mm/tlbex.c2661
-rw-r--r--arch/mips/mm/uasm-micromips.c232
-rw-r--r--arch/mips/mm/uasm-mips.c290
-rw-r--r--arch/mips/mm/uasm.c643
-rw-r--r--arch/mips/mti-malta/Makefile20
-rw-r--r--arch/mips/mti-malta/Platform10
-rw-r--r--arch/mips/mti-malta/malta-amon.c88
-rw-r--r--arch/mips/mti-malta/malta-dt.c15
-rw-r--r--arch/mips/mti-malta/malta-dtshim.c333
-rw-r--r--arch/mips/mti-malta/malta-init.c298
-rw-r--r--arch/mips/mti-malta/malta-int.c223
-rw-r--r--arch/mips/mti-malta/malta-memory.c48
-rw-r--r--arch/mips/mti-malta/malta-platform.c76
-rw-r--r--arch/mips/mti-malta/malta-setup.c249
-rw-r--r--arch/mips/mti-malta/malta-time.c256
-rw-r--r--arch/mips/net/Makefile5
-rw-r--r--arch/mips/net/bpf_jit.c1299
-rw-r--r--arch/mips/net/bpf_jit.h81
-rw-r--r--arch/mips/net/bpf_jit_asm.S285
-rw-r--r--arch/mips/net/ebpf_jit.c1933
-rw-r--r--arch/mips/netlogic/Kconfig86
-rw-r--r--arch/mips/netlogic/Makefile4
-rw-r--r--arch/mips/netlogic/Platform16
-rw-r--r--arch/mips/netlogic/common/Makefile5
-rw-r--r--arch/mips/netlogic/common/earlycons.c63
-rw-r--r--arch/mips/netlogic/common/irq.c354
-rw-r--r--arch/mips/netlogic/common/reset.S299
-rw-r--r--arch/mips/netlogic/common/smp.c285
-rw-r--r--arch/mips/netlogic/common/smpboot.S141
-rw-r--r--arch/mips/netlogic/common/time.c110
-rw-r--r--arch/mips/netlogic/xlp/Makefile11
-rw-r--r--arch/mips/netlogic/xlp/ahci-init-xlp2.c390
-rw-r--r--arch/mips/netlogic/xlp/ahci-init.c209
-rw-r--r--arch/mips/netlogic/xlp/cop2-ex.c121
-rw-r--r--arch/mips/netlogic/xlp/dt.c95
-rw-r--r--arch/mips/netlogic/xlp/nlm_hal.c508
-rw-r--r--arch/mips/netlogic/xlp/setup.c179
-rw-r--r--arch/mips/netlogic/xlp/usb-init-xlp2.c288
-rw-r--r--arch/mips/netlogic/xlp/usb-init.c149
-rw-r--r--arch/mips/netlogic/xlp/wakeup.c212
-rw-r--r--arch/mips/netlogic/xlr/Makefile3
-rw-r--r--arch/mips/netlogic/xlr/fmn-config.c293
-rw-r--r--arch/mips/netlogic/xlr/fmn.c199
-rw-r--r--arch/mips/netlogic/xlr/platform-flash.c216
-rw-r--r--arch/mips/netlogic/xlr/platform.c250
-rw-r--r--arch/mips/netlogic/xlr/setup.c211
-rw-r--r--arch/mips/netlogic/xlr/wakeup.c85
-rw-r--r--arch/mips/oprofile/Makefile18
-rw-r--r--arch/mips/oprofile/backtrace.c177
-rw-r--r--arch/mips/oprofile/common.c147
-rw-r--r--arch/mips/oprofile/op_impl.h41
-rw-r--r--arch/mips/oprofile/op_model_loongson2.c161
-rw-r--r--arch/mips/oprofile/op_model_loongson3.c213
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c479
-rw-r--r--arch/mips/pci/Makefile65
-rw-r--r--arch/mips/pci/fixup-ath79.c18
-rw-r--r--arch/mips/pci/fixup-bcm63xx.c21
-rw-r--r--arch/mips/pci/fixup-capcella.c37
-rw-r--r--arch/mips/pci/fixup-cobalt.c192
-rw-r--r--arch/mips/pci/fixup-fuloong2e.c221
-rw-r--r--arch/mips/pci/fixup-ip32.c52
-rw-r--r--arch/mips/pci/fixup-jmr3927.c79
-rw-r--r--arch/mips/pci/fixup-lantiq.c27
-rw-r--r--arch/mips/pci/fixup-lemote2f.c156
-rw-r--r--arch/mips/pci/fixup-malta.c159
-rw-r--r--arch/mips/pci/fixup-mpc30x.c36
-rw-r--r--arch/mips/pci/fixup-rbtx4927.c73
-rw-r--r--arch/mips/pci/fixup-rbtx4938.c53
-rw-r--r--arch/mips/pci/fixup-rc32434.c69
-rw-r--r--arch/mips/pci/fixup-sb1250.c91
-rw-r--r--arch/mips/pci/fixup-sni.c169
-rw-r--r--arch/mips/pci/fixup-tb0219.c38
-rw-r--r--arch/mips/pci/fixup-tb0226.c73
-rw-r--r--arch/mips/pci/fixup-tb0287.c52
-rw-r--r--arch/mips/pci/msi-octeon.c438
-rw-r--r--arch/mips/pci/msi-xlp.c571
-rw-r--r--arch/mips/pci/ops-bcm63xx.c528
-rw-r--r--arch/mips/pci/ops-bonito64.c148
-rw-r--r--arch/mips/pci/ops-gt64xxx_pci0.c140
-rw-r--r--arch/mips/pci/ops-lantiq.c113
-rw-r--r--arch/mips/pci/ops-loongson2.c213
-rw-r--r--arch/mips/pci/ops-mace.c100
-rw-r--r--arch/mips/pci/ops-msc.c134
-rw-r--r--arch/mips/pci/ops-rc32434.c206
-rw-r--r--arch/mips/pci/ops-sni.c164
-rw-r--r--arch/mips/pci/ops-tx3927.c231
-rw-r--r--arch/mips/pci/ops-tx4927.c524
-rw-r--r--arch/mips/pci/ops-vr41xx.c113
-rw-r--r--arch/mips/pci/pci-alchemy.c537
-rw-r--r--arch/mips/pci/pci-ar2315.c522
-rw-r--r--arch/mips/pci/pci-ar71xx.c401
-rw-r--r--arch/mips/pci/pci-ar724x.c447
-rw-r--r--arch/mips/pci/pci-bcm1480.c255
-rw-r--r--arch/mips/pci/pci-bcm1480ht.c203
-rw-r--r--arch/mips/pci/pci-bcm47xx.c104
-rw-r--r--arch/mips/pci/pci-bcm63xx.c351
-rw-r--r--arch/mips/pci/pci-bcm63xx.h33
-rw-r--r--arch/mips/pci/pci-generic.c48
-rw-r--r--arch/mips/pci/pci-ip27.c42
-rw-r--r--arch/mips/pci/pci-ip32.c147
-rw-r--r--arch/mips/pci/pci-lantiq.c251
-rw-r--r--arch/mips/pci/pci-lantiq.h16
-rw-r--r--arch/mips/pci/pci-legacy.c315
-rw-r--r--arch/mips/pci/pci-malta.c242
-rw-r--r--arch/mips/pci/pci-mt7620.c421
-rw-r--r--arch/mips/pci/pci-octeon.c711
-rw-r--r--arch/mips/pci/pci-rc32434.c231
-rw-r--r--arch/mips/pci/pci-rt2880.c291
-rw-r--r--arch/mips/pci/pci-rt3883.c590
-rw-r--r--arch/mips/pci/pci-sb1250.c279
-rw-r--r--arch/mips/pci/pci-tx4927.c91
-rw-r--r--arch/mips/pci/pci-tx4938.c142
-rw-r--r--arch/mips/pci/pci-tx4939.c107
-rw-r--r--arch/mips/pci/pci-vr41xx.c307
-rw-r--r--arch/mips/pci/pci-vr41xx.h141
-rw-r--r--arch/mips/pci/pci-xlp.c332
-rw-r--r--arch/mips/pci/pci-xlr.c368
-rw-r--r--arch/mips/pci/pci-xtalk-bridge.c759
-rw-r--r--arch/mips/pci/pci.c55
-rw-r--r--arch/mips/pci/pcie-octeon.c2088
-rw-r--r--arch/mips/pic32/Kconfig51
-rw-r--r--arch/mips/pic32/Makefile7
-rw-r--r--arch/mips/pic32/Platform6
-rw-r--r--arch/mips/pic32/common/Makefile6
-rw-r--r--arch/mips/pic32/common/irq.c13
-rw-r--r--arch/mips/pic32/common/reset.c54
-rw-r--r--arch/mips/pic32/pic32mzda/Makefile9
-rw-r--r--arch/mips/pic32/pic32mzda/config.c118
-rw-r--r--arch/mips/pic32/pic32mzda/early_clk.c98
-rw-r--r--arch/mips/pic32/pic32mzda/early_console.c161
-rw-r--r--arch/mips/pic32/pic32mzda/early_pin.c267
-rw-r--r--arch/mips/pic32/pic32mzda/early_pin.h233
-rw-r--r--arch/mips/pic32/pic32mzda/init.c147
-rw-r--r--arch/mips/pic32/pic32mzda/pic32mzda.h21
-rw-r--r--arch/mips/pic32/pic32mzda/time.c60
-rw-r--r--arch/mips/pistachio/Kconfig14
-rw-r--r--arch/mips/pistachio/Makefile2
-rw-r--r--arch/mips/pistachio/Platform8
-rw-r--r--arch/mips/pistachio/init.c131
-rw-r--r--arch/mips/pistachio/irq.c24
-rw-r--r--arch/mips/pistachio/time.c55
-rw-r--r--arch/mips/power/Makefile2
-rw-r--r--arch/mips/power/cpu.c42
-rw-r--r--arch/mips/power/hibernate.c11
-rw-r--r--arch/mips/power/hibernate_asm.S60
-rw-r--r--arch/mips/ralink/Kconfig103
-rw-r--r--arch/mips/ralink/Makefile28
-rw-r--r--arch/mips/ralink/Platform33
-rw-r--r--arch/mips/ralink/bootrom.c40
-rw-r--r--arch/mips/ralink/cevt-rt3352.c153
-rw-r--r--arch/mips/ralink/clk.c87
-rw-r--r--arch/mips/ralink/common.h34
-rw-r--r--arch/mips/ralink/early_printk.c88
-rw-r--r--arch/mips/ralink/ill_acc.c88
-rw-r--r--arch/mips/ralink/irq-gic.c23
-rw-r--r--arch/mips/ralink/irq.c204
-rw-r--r--arch/mips/ralink/mt7620.c718
-rw-r--r--arch/mips/ralink/mt7621.c211
-rw-r--r--arch/mips/ralink/of.c106
-rw-r--r--arch/mips/ralink/prom.c72
-rw-r--r--arch/mips/ralink/reset.c104
-rw-r--r--arch/mips/ralink/rt288x.c111
-rw-r--r--arch/mips/ralink/rt305x.c278
-rw-r--r--arch/mips/ralink/rt3883.c148
-rw-r--r--arch/mips/ralink/timer-gic.c22
-rw-r--r--arch/mips/ralink/timer.c151
-rw-r--r--arch/mips/rb532/Makefile8
-rw-r--r--arch/mips/rb532/Platform6
-rw-r--r--arch/mips/rb532/devices.c322
-rw-r--r--arch/mips/rb532/gpio.c207
-rw-r--r--arch/mips/rb532/irq.c234
-rw-r--r--arch/mips/rb532/prom.c130
-rw-r--r--arch/mips/rb532/serial.c54
-rw-r--r--arch/mips/rb532/setup.c81
-rw-r--r--arch/mips/rb532/time.c54
-rw-r--r--arch/mips/sgi-ip22/Makefile12
-rw-r--r--arch/mips/sgi-ip22/Platform32
-rw-r--r--arch/mips/sgi-ip22/ip22-berr.c116
-rw-r--r--arch/mips/sgi-ip22/ip22-eisa.c139
-rw-r--r--arch/mips/sgi-ip22/ip22-gio.c431
-rw-r--r--arch/mips/sgi-ip22/ip22-hpc.c64
-rw-r--r--arch/mips/sgi-ip22/ip22-int.c320
-rw-r--r--arch/mips/sgi-ip22/ip22-mc.c203
-rw-r--r--arch/mips/sgi-ip22/ip22-nvram.c122
-rw-r--r--arch/mips/sgi-ip22/ip22-platform.c223
-rw-r--r--arch/mips/sgi-ip22/ip22-reset.c203
-rw-r--r--arch/mips/sgi-ip22/ip22-setup.c78
-rw-r--r--arch/mips/sgi-ip22/ip22-time.c131
-rw-r--r--arch/mips/sgi-ip22/ip28-berr.c488
-rw-r--r--arch/mips/sgi-ip27/Kconfig40
-rw-r--r--arch/mips/sgi-ip27/Makefile11
-rw-r--r--arch/mips/sgi-ip27/Platform16
-rw-r--r--arch/mips/sgi-ip27/TODO19
-rw-r--r--arch/mips/sgi-ip27/ip27-berr.c96
-rw-r--r--arch/mips/sgi-ip27/ip27-common.h20
-rw-r--r--arch/mips/sgi-ip27/ip27-console.c42
-rw-r--r--arch/mips/sgi-ip27/ip27-hubio.c185
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c147
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c314
-rw-r--r--arch/mips/sgi-ip27/ip27-klconfig.c74
-rw-r--r--arch/mips/sgi-ip27/ip27-klnuma.c129
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c429
-rw-r--r--arch/mips/sgi-ip27/ip27-nmi.c240
-rw-r--r--arch/mips/sgi-ip27/ip27-reset.c81
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c189
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c160
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c242
-rw-r--r--arch/mips/sgi-ip30/Makefile9
-rw-r--r--arch/mips/sgi-ip30/Platform5
-rw-r--r--arch/mips/sgi-ip30/ip30-common.h23
-rw-r--r--arch/mips/sgi-ip30/ip30-console.c23
-rw-r--r--arch/mips/sgi-ip30/ip30-irq.c331
-rw-r--r--arch/mips/sgi-ip30/ip30-power.c41
-rw-r--r--arch/mips/sgi-ip30/ip30-setup.c138
-rw-r--r--arch/mips/sgi-ip30/ip30-smp.c149
-rw-r--r--arch/mips/sgi-ip30/ip30-timer.c63
-rw-r--r--arch/mips/sgi-ip30/ip30-xtalk.c152
-rw-r--r--arch/mips/sgi-ip32/Makefile8
-rw-r--r--arch/mips/sgi-ip32/Platform10
-rw-r--r--arch/mips/sgi-ip32/crime.c103
-rw-r--r--arch/mips/sgi-ip32/ip32-berr.c38
-rw-r--r--arch/mips/sgi-ip32/ip32-dma.c37
-rw-r--r--arch/mips/sgi-ip32/ip32-irq.c499
-rw-r--r--arch/mips/sgi-ip32/ip32-memory.c47
-rw-r--r--arch/mips/sgi-ip32/ip32-platform.c138
-rw-r--r--arch/mips/sgi-ip32/ip32-reset.c152
-rw-r--r--arch/mips/sgi-ip32/ip32-setup.c100
-rw-r--r--arch/mips/sibyte/Kconfig167
-rw-r--r--arch/mips/sibyte/Makefile29
-rw-r--r--arch/mips/sibyte/Platform40
-rw-r--r--arch/mips/sibyte/bcm1480/Makefile4
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c348
-rw-r--r--arch/mips/sibyte/bcm1480/setup.c128
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c179
-rw-r--r--arch/mips/sibyte/bcm1480/time.c14
-rw-r--r--arch/mips/sibyte/common/Makefile6
-rw-r--r--arch/mips/sibyte/common/bus_watcher.c228
-rw-r--r--arch/mips/sibyte/common/cfe.c330
-rw-r--r--arch/mips/sibyte/common/cfe_console.c81
-rw-r--r--arch/mips/sibyte/common/dma.c14
-rw-r--r--arch/mips/sibyte/common/sb_tbprof.c595
-rw-r--r--arch/mips/sibyte/sb1250/Makefile4
-rw-r--r--arch/mips/sibyte/sb1250/irq.c324
-rw-r--r--arch/mips/sibyte/sb1250/setup.c234
-rw-r--r--arch/mips/sibyte/sb1250/smp.c168
-rw-r--r--arch/mips/sibyte/sb1250/time.c14
-rw-r--r--arch/mips/sibyte/swarm/Makefile5
-rw-r--r--arch/mips/sibyte/swarm/platform.c140
-rw-r--r--arch/mips/sibyte/swarm/rtc_m41t81.c228
-rw-r--r--arch/mips/sibyte/swarm/rtc_xicor1241.c206
-rw-r--r--arch/mips/sibyte/swarm/setup.c171
-rw-r--r--arch/mips/sibyte/swarm/swarm-i2c.c31
-rw-r--r--arch/mips/sni/Makefile7
-rw-r--r--arch/mips/sni/Platform10
-rw-r--r--arch/mips/sni/a20r.c256
-rw-r--r--arch/mips/sni/eisa.c49
-rw-r--r--arch/mips/sni/irq.c76
-rw-r--r--arch/mips/sni/pcimt.c332
-rw-r--r--arch/mips/sni/pcit.c295
-rw-r--r--arch/mips/sni/reset.c48
-rw-r--r--arch/mips/sni/rm200.c485
-rw-r--r--arch/mips/sni/setup.c263
-rw-r--r--arch/mips/sni/time.c167
-rw-r--r--arch/mips/tools/.gitignore3
-rw-r--r--arch/mips/tools/Makefile10
-rw-r--r--arch/mips/tools/elf-entry.c103
-rwxr-xr-xarch/mips/tools/generic-board-config.sh84
-rw-r--r--arch/mips/tools/loongson3-llsc-check.c309
-rw-r--r--arch/mips/txx9/Kconfig128
-rw-r--r--arch/mips/txx9/Makefile18
-rw-r--r--arch/mips/txx9/Platform7
-rw-r--r--arch/mips/txx9/generic/7segled.c123
-rw-r--r--arch/mips/txx9/generic/Makefile14
-rw-r--r--arch/mips/txx9/generic/irq_tx3927.c25
-rw-r--r--arch/mips/txx9/generic/irq_tx4927.c49
-rw-r--r--arch/mips/txx9/generic/irq_tx4938.c37
-rw-r--r--arch/mips/txx9/generic/irq_tx4939.c216
-rw-r--r--arch/mips/txx9/generic/mem_tx4927.c75
-rw-r--r--arch/mips/txx9/generic/pci.c432
-rw-r--r--arch/mips/txx9/generic/setup.c965
-rw-r--r--arch/mips/txx9/generic/setup_tx3927.c136
-rw-r--r--arch/mips/txx9/generic/setup_tx4927.c337
-rw-r--r--arch/mips/txx9/generic/setup_tx4938.c485
-rw-r--r--arch/mips/txx9/generic/setup_tx4939.c568
-rw-r--r--arch/mips/txx9/generic/smsc_fdc37m81x.c169
-rw-r--r--arch/mips/txx9/generic/spi_eeprom.c104
-rw-r--r--arch/mips/txx9/jmr3927/Makefile6
-rw-r--r--arch/mips/txx9/jmr3927/irq.c128
-rw-r--r--arch/mips/txx9/jmr3927/prom.c52
-rw-r--r--arch/mips/txx9/jmr3927/setup.c223
-rw-r--r--arch/mips/txx9/rbtx4927/Makefile2
-rw-r--r--arch/mips/txx9/rbtx4927/irq.c198
-rw-r--r--arch/mips/txx9/rbtx4927/prom.c42
-rw-r--r--arch/mips/txx9/rbtx4927/setup.c380
-rw-r--r--arch/mips/txx9/rbtx4938/Makefile2
-rw-r--r--arch/mips/txx9/rbtx4938/irq.c157
-rw-r--r--arch/mips/txx9/rbtx4938/prom.c22
-rw-r--r--arch/mips/txx9/rbtx4938/setup.c372
-rw-r--r--arch/mips/txx9/rbtx4939/Makefile2
-rw-r--r--arch/mips/txx9/rbtx4939/irq.c95
-rw-r--r--arch/mips/txx9/rbtx4939/prom.c29
-rw-r--r--arch/mips/txx9/rbtx4939/setup.c554
-rw-r--r--arch/mips/vdso/.gitignore5
-rw-r--r--arch/mips/vdso/Kconfig18
-rw-r--r--arch/mips/vdso/Makefile208
-rw-r--r--arch/mips/vdso/config-n32-o32-env.c19
-rw-r--r--arch/mips/vdso/elf.S62
-rw-r--r--arch/mips/vdso/genvdso.c312
-rw-r--r--arch/mips/vdso/genvdso.h132
-rw-r--r--arch/mips/vdso/sigreturn.S35
-rw-r--r--arch/mips/vdso/vdso.lds.S105
-rw-r--r--arch/mips/vdso/vgettimeofday.c78
-rw-r--r--arch/mips/vr41xx/Kconfig104
-rw-r--r--arch/mips/vr41xx/Makefile5
-rw-r--r--arch/mips/vr41xx/Platform29
-rw-r--r--arch/mips/vr41xx/casio-e55/Makefile6
-rw-r--r--arch/mips/vr41xx/casio-e55/setup.c27
-rw-r--r--arch/mips/vr41xx/common/Makefile6
-rw-r--r--arch/mips/vr41xx/common/bcu.c210
-rw-r--r--arch/mips/vr41xx/common/cmu.c244
-rw-r--r--arch/mips/vr41xx/common/giu.c110
-rw-r--r--arch/mips/vr41xx/common/icu.c714
-rw-r--r--arch/mips/vr41xx/common/init.c64
-rw-r--r--arch/mips/vr41xx/common/irq.c106
-rw-r--r--arch/mips/vr41xx/common/pmu.c123
-rw-r--r--arch/mips/vr41xx/common/rtc.c105
-rw-r--r--arch/mips/vr41xx/common/siu.c142
-rw-r--r--arch/mips/vr41xx/common/type.c11
-rw-r--r--arch/mips/vr41xx/ibm-workpad/Makefile6
-rw-r--r--arch/mips/vr41xx/ibm-workpad/setup.c27
1771 files changed, 362017 insertions, 0 deletions
diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild
new file mode 100644
index 000000000..d5d6ef9bb
--- /dev/null
+++ b/arch/mips/Kbuild
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0
+# Fail on warnings - also for files referenced in subdirs
+# -Werror can be disabled for specific files using:
+# CFLAGS_<file.o> := -Wno-error
+ifeq ($(W),)
+subdir-ccflags-y := -Werror
+endif
+
+# platform specific definitions
+include arch/mips/Kbuild.platforms
+obj-y := $(platform-y)
+
+# make clean traverses $(obj-) without having included .config, so
+# everything ends up here
+obj- := $(platform-y)
+
+# mips object files
+# The object files are linked as core-y files would be linked
+
+obj-y += kernel/
+obj-y += mm/
+obj-y += net/
+obj-y += vdso/
+
+ifdef CONFIG_KVM
+obj-y += kvm/
+endif
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
new file mode 100644
index 000000000..5483e38b5
--- /dev/null
+++ b/arch/mips/Kbuild.platforms
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0
+# All platforms listed in alphabetic order
+
+platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/
+platform-$(CONFIG_AR7) += ar7/
+platform-$(CONFIG_ATH25) += ath25/
+platform-$(CONFIG_ATH79) += ath79/
+platform-$(CONFIG_BCM47XX) += bcm47xx/
+platform-$(CONFIG_BCM63XX) += bcm63xx/
+platform-$(CONFIG_BMIPS_GENERIC) += bmips/
+platform-$(CONFIG_CAVIUM_OCTEON_SOC) += cavium-octeon/
+platform-$(CONFIG_MIPS_COBALT) += cobalt/
+platform-$(CONFIG_MACH_DECSTATION) += dec/
+platform-$(CONFIG_MIPS_GENERIC) += generic/
+platform-$(CONFIG_MACH_JAZZ) += jazz/
+platform-$(CONFIG_LANTIQ) += lantiq/
+platform-$(CONFIG_MACH_LOONGSON2EF) += loongson2ef/
+platform-$(CONFIG_MACH_LOONGSON32) += loongson32/
+platform-$(CONFIG_MACH_LOONGSON64) += loongson64/
+platform-$(CONFIG_MIPS_MALTA) += mti-malta/
+platform-$(CONFIG_NLM_COMMON) += netlogic/
+platform-$(CONFIG_PIC32MZDA) += pic32/
+platform-$(CONFIG_MACH_PISTACHIO) += pistachio/
+platform-$(CONFIG_RALINK) += ralink/
+platform-$(CONFIG_MIKROTIK_RB532) += rb532/
+platform-$(CONFIG_SGI_IP22) += sgi-ip22/
+platform-$(CONFIG_SGI_IP27) += sgi-ip27/
+platform-$(CONFIG_SGI_IP28) += sgi-ip22/
+platform-$(CONFIG_SGI_IP30) += sgi-ip30/
+platform-$(CONFIG_SGI_IP32) += sgi-ip32/
+platform-$(CONFIG_SIBYTE_BCM112X) += sibyte/
+platform-$(CONFIG_SIBYTE_SB1250) += sibyte/
+platform-$(CONFIG_SIBYTE_BCM1x55) += sibyte/
+platform-$(CONFIG_SIBYTE_BCM1x80) += sibyte/
+platform-$(CONFIG_SNI_RM) += sni/
+platform-$(CONFIG_MACH_TX39XX) += txx9/
+platform-$(CONFIG_MACH_TX49XX) += txx9/
+platform-$(CONFIG_MACH_VR41XX) += vr41xx/
+
+# include the platform specific files
+include $(patsubst %, $(srctree)/arch/mips/%/Platform, $(platform-y))
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
new file mode 100644
index 000000000..57839f630
--- /dev/null
+++ b/arch/mips/Kconfig
@@ -0,0 +1,3344 @@
+# SPDX-License-Identifier: GPL-2.0
+config MIPS
+ bool
+ default y
+ select ARCH_32BIT_OFF_T if !64BIT
+ select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
+ select ARCH_HAS_CPU_FINALIZE_INIT
+ select ARCH_HAS_FORTIFY_SOURCE
+ select ARCH_HAS_KCOV
+ select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA
+ select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI)
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_SUPPORTS_UPROBES
+ select ARCH_USE_BUILTIN_BSWAP
+ select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
+ select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_QUEUED_SPINLOCKS
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
+ select ARCH_WANT_IPC_PARSE_VERSION
+ select BUILDTIME_TABLE_SORT
+ select CLONE_BACKWARDS
+ select CPU_NO_EFFICIENT_FFS if (TARGET_ISA_REV < 1)
+ select CPU_PM if CPU_IDLE
+ select GENERIC_ATOMIC64 if !64BIT
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CMOS_UPDATE
+ select GENERIC_CPU_AUTOPROBE
+ select GENERIC_GETTIMEOFDAY
+ select GENERIC_IOMAP
+ select GENERIC_IRQ_PROBE
+ select GENERIC_IRQ_SHOW
+ select GENERIC_ISA_DMA if EISA
+ select GENERIC_LIB_ASHLDI3
+ select GENERIC_LIB_ASHRDI3
+ select GENERIC_LIB_CMPDI2
+ select GENERIC_LIB_LSHRDI3
+ select GENERIC_LIB_UCMPDI2
+ select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC
+ select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_TIME_VSYSCALL
+ select GUP_GET_PTE_LOW_HIGH if CPU_MIPS32 && PHYS_ADDR_T_64BIT
+ select HANDLE_DOMAIN_IRQ
+ select HAVE_ARCH_COMPILER_H
+ select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_MMAP_RND_BITS if MMU
+ select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
+ select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES
+ select HAVE_ASM_MODVERSIONS
+ select HAVE_CBPF_JIT if !64BIT && !CPU_MICROMIPS
+ select HAVE_CONTEXT_TRACKING
+ select HAVE_TIF_NOHZ
+ select HAVE_C_RECORDMCOUNT
+ select HAVE_DEBUG_KMEMLEAK
+ select HAVE_DEBUG_STACKOVERFLOW
+ select HAVE_DMA_CONTIGUOUS
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2
+ select HAVE_EXIT_THREAD
+ select HAVE_FAST_GUP
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER
+ select HAVE_GCC_PLUGINS
+ select HAVE_GENERIC_VDSO
+ select HAVE_IDE
+ select HAVE_IOREMAP_PROT
+ select HAVE_IRQ_EXIT_ON_IRQ_STACK
+ select HAVE_IRQ_TIME_ACCOUNTING
+ select HAVE_KPROBES
+ select HAVE_KRETPROBES
+ select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
+ select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_NMI
+ select HAVE_OPROFILE
+ select HAVE_PERF_EVENTS
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RSEQ
+ select HAVE_SPARSE_SYSCALL_NR
+ select HAVE_STACKPROTECTOR
+ select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP
+ select IRQ_FORCED_THREADING
+ select ISA if EISA
+ select MODULES_USE_ELF_REL if MODULES
+ select MODULES_USE_ELF_RELA if MODULES && 64BIT
+ select PERF_USE_VMALLOC
+ select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
+ select RTC_LIB
+ select SET_FS
+ select SYSCTL_EXCEPTION_TRACE
+ select VIRT_TO_BUS
+
+config MIPS_FIXUP_BIGPHYS_ADDR
+ bool
+
+config MIPS_GENERIC
+ bool
+
+config MACH_INGENIC
+ bool
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_ZBOOT
+ select DMA_NONCOHERENT
+ select IRQ_MIPS_CPU
+ select PINCTRL
+ select GPIOLIB
+ select COMMON_CLK
+ select GENERIC_IRQ_CHIP
+ select BUILTIN_DTB if MIPS_NO_APPENDED_DTB
+ select USE_OF
+ select CPU_SUPPORTS_CPUFREQ
+ select MIPS_EXTERNAL_TIMER
+
+menu "Machine selection"
+
+choice
+ prompt "System type"
+ default MIPS_GENERIC_KERNEL
+
+config MIPS_GENERIC_KERNEL
+ bool "Generic board-agnostic MIPS kernel"
+ select MIPS_GENERIC
+ select BOOT_RAW
+ select BUILTIN_DTB
+ select CEVT_R4K
+ select CLKSRC_MIPS_GIC
+ select COMMON_CLK
+ select CPU_MIPSR2_IRQ_EI
+ select CPU_MIPSR2_IRQ_VI
+ select CSRC_R4K
+ select DMA_PERDEV_COHERENT
+ select HAVE_PCI
+ select IRQ_MIPS_CPU
+ select MIPS_AUTO_PFN_OFFSET
+ select MIPS_CPU_SCACHE
+ select MIPS_GIC
+ select MIPS_L1_CACHE_SHIFT_7
+ select NO_EXCEPT_FILL
+ select PCI_DRIVERS_GENERIC
+ select SMP_UP if SMP
+ select SWAP_IO_SPACE
+ select SYS_HAS_CPU_MIPS32_R1
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_HAS_CPU_MIPS32_R6
+ select SYS_HAS_CPU_MIPS64_R1
+ select SYS_HAS_CPU_MIPS64_R2
+ select SYS_HAS_CPU_MIPS64_R6
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_MICROMIPS
+ select SYS_SUPPORTS_MIPS16
+ select SYS_SUPPORTS_MIPS_CPS
+ select SYS_SUPPORTS_MULTITHREADING
+ select SYS_SUPPORTS_RELOCATABLE
+ select SYS_SUPPORTS_SMARTMIPS
+ select SYS_SUPPORTS_ZBOOT
+ select UHI_BOOT
+ select USB_EHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
+ select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
+ select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
+ select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
+ select USB_UHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
+ select USB_UHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
+ select USE_OF
+ help
+ Select this to build a kernel which aims to support multiple boards,
+ generally using a flattened device tree passed from the bootloader
+ using the boot protocol defined in the UHI (Unified Hosting
+ Interface) specification.
+
+config MIPS_ALCHEMY
+ bool "Alchemy processor based machines"
+ select PHYS_ADDR_T_64BIT
+ select CEVT_R4K
+ select CSRC_R4K
+ select IRQ_MIPS_CPU
+ select DMA_MAYBE_COHERENT # Au1000,1500,1100 aren't, rest is
+ select MIPS_FIXUP_BIGPHYS_ADDR if PCI
+ select SYS_HAS_CPU_MIPS32_R1
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_APM_EMULATION
+ select GPIOLIB
+ select SYS_SUPPORTS_ZBOOT
+ select COMMON_CLK
+
+config AR7
+ bool "Texas Instruments AR7"
+ select BOOT_ELF32
+ select DMA_NONCOHERENT
+ select CEVT_R4K
+ select CSRC_R4K
+ select IRQ_MIPS_CPU
+ select NO_EXCEPT_FILL
+ select SWAP_IO_SPACE
+ select SYS_HAS_CPU_MIPS32_R1
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_MIPS16
+ select SYS_SUPPORTS_ZBOOT_UART16550
+ select GPIOLIB
+ select VLYNQ
+ select HAVE_LEGACY_CLK
+ help
+ Support for the Texas Instruments AR7 System-on-a-Chip
+ family: TNETD7100, 7200 and 7300.
+
+config ATH25
+ bool "Atheros AR231x/AR531x SoC support"
+ select CEVT_R4K
+ select CSRC_R4K
+ select DMA_NONCOHERENT
+ select IRQ_MIPS_CPU
+ select IRQ_DOMAIN
+ select SYS_HAS_CPU_MIPS32_R1
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_HAS_EARLY_PRINTK
+ help
+ Support for Atheros AR231x and Atheros AR531x based boards
+
+config ATH79
+ bool "Atheros AR71XX/AR724X/AR913X based boards"
+ select ARCH_HAS_RESET_CONTROLLER
+ select BOOT_RAW
+ select CEVT_R4K
+ select CSRC_R4K
+ select DMA_NONCOHERENT
+ select GPIOLIB
+ select PINCTRL
+ select COMMON_CLK
+ select IRQ_MIPS_CPU
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_MIPS16
+ select SYS_SUPPORTS_ZBOOT_UART_PROM
+ select USE_OF
+ select USB_EHCI_ROOT_HUB_TT if USB_EHCI_HCD_PLATFORM
+ help
+ Support for the Atheros AR71XX/AR724X/AR913X SoCs.
+
+config BMIPS_GENERIC
+ bool "Broadcom Generic BMIPS kernel"
+ select ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+ select ARCH_HAS_PHYS_TO_DMA
+ select BOOT_RAW
+ select NO_EXCEPT_FILL
+ select USE_OF
+ select CEVT_R4K
+ select CSRC_R4K
+ select SYNC_R4K
+ select COMMON_CLK
+ select BCM6345_L1_IRQ
+ select BCM7038_L1_IRQ
+ select BCM7120_L2_IRQ
+ select BRCMSTB_L2_IRQ
+ select IRQ_MIPS_CPU
+ select DMA_NONCOHERENT
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_HAS_CPU_BMIPS32_3300
+ select SYS_HAS_CPU_BMIPS4350
+ select SYS_HAS_CPU_BMIPS4380
+ select SYS_HAS_CPU_BMIPS5000
+ select SWAP_IO_SPACE
+ select USB_EHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
+ select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
+ select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
+ select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
+ select HARDIRQS_SW_RESEND
+ help
+ Build a generic DT-based kernel image that boots on select
+ BCM33xx cable modem chips, BCM63xx DSL chips, and BCM7xxx set-top
+ box chips. Note that CONFIG_CPU_BIG_ENDIAN/CONFIG_CPU_LITTLE_ENDIAN
+ must be set appropriately for your board.
+
+config BCM47XX
+ bool "Broadcom BCM47XX based boards"
+ select BOOT_RAW
+ select CEVT_R4K
+ select CSRC_R4K
+ select DMA_NONCOHERENT
+ select HAVE_PCI
+ select IRQ_MIPS_CPU
+ select SYS_HAS_CPU_MIPS32_R1
+ select NO_EXCEPT_FILL
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_MIPS16
+ select SYS_SUPPORTS_ZBOOT
+ select SYS_HAS_EARLY_PRINTK
+ select USE_GENERIC_EARLY_PRINTK_8250
+ select GPIOLIB
+ select LEDS_GPIO_REGISTER
+ select BCM47XX_NVRAM
+ select BCM47XX_SPROM
+ select BCM47XX_SSB if !BCM47XX_BCMA
+ help
+ Support for BCM47XX based boards
+
+config BCM63XX
+ bool "Broadcom BCM63XX based boards"
+ select BOOT_RAW
+ select CEVT_R4K
+ select CSRC_R4K
+ select SYNC_R4K
+ select DMA_NONCOHERENT
+ select IRQ_MIPS_CPU
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_HAS_CPU_BMIPS32_3300
+ select SYS_HAS_CPU_BMIPS4350
+ select SYS_HAS_CPU_BMIPS4380
+ select SWAP_IO_SPACE
+ select GPIOLIB
+ select MIPS_L1_CACHE_SHIFT_4
+ select CLKDEV_LOOKUP
+ select HAVE_LEGACY_CLK
+ help
+ Support for BCM63XX based boards
+
+config MIPS_COBALT
+ bool "Cobalt Server"
+ select CEVT_R4K
+ select CSRC_R4K
+ select CEVT_GT641XX
+ select DMA_NONCOHERENT
+ select FORCE_PCI
+ select I8253
+ select I8259
+ select IRQ_MIPS_CPU
+ select IRQ_GT641XX
+ select PCI_GT64XXX_PCI0
+ select SYS_HAS_CPU_NEVADA
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select USE_GENERIC_EARLY_PRINTK_8250
+
+config MACH_DECSTATION
+ bool "DECstations"
+ select BOOT_ELF32
+ select CEVT_DS1287
+ select CEVT_R4K if CPU_R4X00
+ select CSRC_IOASIC
+ select CSRC_R4K if CPU_R4X00
+ select CPU_DADDI_WORKAROUNDS if 64BIT
+ select CPU_R4000_WORKAROUNDS if 64BIT
+ select CPU_R4400_WORKAROUNDS if 64BIT
+ select DMA_NONCOHERENT
+ select NO_IOPORT_MAP
+ select IRQ_MIPS_CPU
+ select SYS_HAS_CPU_R3000
+ select SYS_HAS_CPU_R4X00
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_128HZ
+ select SYS_SUPPORTS_256HZ
+ select SYS_SUPPORTS_1024HZ
+ select MIPS_L1_CACHE_SHIFT_4
+ help
+ This enables support for DEC's MIPS based workstations. For details
+ see the Linux/MIPS FAQ on <http://www.linux-mips.org/> and the
+ DECstation porting pages on <http://decstation.unix-ag.org/>.
+
+ If you have one of the following DECstation Models you definitely
+ want to choose R4xx0 for the CPU Type:
+
+ DECstation 5000/50
+ DECstation 5000/150
+ DECstation 5000/260
+ DECsystem 5900/260
+
+ otherwise choose R3000.
+
+config MACH_JAZZ
+ bool "Jazz family of machines"
+ select ARC_MEMORY
+ select ARC_PROMLIB
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
+ select DMA_OPS
+ select FW_ARC
+ select FW_ARC32
+ select ARCH_MAY_HAVE_PC_FDC
+ select CEVT_R4K
+ select CSRC_R4K
+ select DEFAULT_SGI_PARTITION if CPU_BIG_ENDIAN
+ select GENERIC_ISA_DMA
+ select HAVE_PCSPKR_PLATFORM
+ select IRQ_MIPS_CPU
+ select I8253
+ select I8259
+ select ISA
+ select SYS_HAS_CPU_R4X00
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_100HZ
+ help
+ This a family of machines based on the MIPS R4030 chipset which was
+ used by several vendors to build RISC/os and Windows NT workstations.
+ Members include the Acer PICA, MIPS Magnum 4000, MIPS Millennium and
+ Olivetti M700-10 workstations.
+
+config MACH_INGENIC_SOC
+ bool "Ingenic SoC based machines"
+ select MIPS_GENERIC
+ select MACH_INGENIC
+ select SYS_SUPPORTS_ZBOOT_UART16550
+ select CPU_SUPPORTS_CPUFREQ
+ select MIPS_EXTERNAL_TIMER
+
+config LANTIQ
+ bool "Lantiq based platforms"
+ select DMA_NONCOHERENT
+ select IRQ_MIPS_CPU
+ select CEVT_R4K
+ select CSRC_R4K
+ select SYS_HAS_CPU_MIPS32_R1
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_MIPS16
+ select SYS_SUPPORTS_MULTITHREADING
+ select SYS_SUPPORTS_VPE_LOADER
+ select SYS_HAS_EARLY_PRINTK
+ select GPIOLIB
+ select SWAP_IO_SPACE
+ select BOOT_RAW
+ select CLKDEV_LOOKUP
+ select HAVE_LEGACY_CLK
+ select USE_OF
+ select PINCTRL
+ select PINCTRL_LANTIQ
+ select ARCH_HAS_RESET_CONTROLLER
+ select RESET_CONTROLLER
+
+config MACH_LOONGSON32
+ bool "Loongson 32-bit family of machines"
+ select SYS_SUPPORTS_ZBOOT
+ help
+ This enables support for the Loongson-1 family of machines.
+
+ Loongson-1 is a family of 32-bit MIPS-compatible SoCs developed by
+ the Institute of Computing Technology (ICT), Chinese Academy of
+ Sciences (CAS).
+
+config MACH_LOONGSON2EF
+ bool "Loongson-2E/F family of machines"
+ select SYS_SUPPORTS_ZBOOT
+ help
+ This enables the support of early Loongson-2E/F family of machines.
+
+config MACH_LOONGSON64
+ bool "Loongson 64-bit family of machines"
+ select ARCH_SPARSEMEM_ENABLE
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
+ select GENERIC_ISA_DMA_SUPPORT_BROKEN
+ select BOOT_ELF32
+ select BOARD_SCACHE
+ select CSRC_R4K
+ select CEVT_R4K
+ select CPU_HAS_WB
+ select FORCE_PCI
+ select ISA
+ select I8259
+ select IRQ_MIPS_CPU
+ select NO_EXCEPT_FILL
+ select NR_CPUS_DEFAULT_64
+ select USE_GENERIC_EARLY_PRINTK_8250
+ select PCI_DRIVERS_GENERIC
+ select SYS_HAS_CPU_LOONGSON64
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_SUPPORTS_SMP
+ select SYS_SUPPORTS_HOTPLUG_CPU
+ select SYS_SUPPORTS_NUMA
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_ZBOOT
+ select ZONE_DMA32
+ select NUMA
+ select SMP
+ select COMMON_CLK
+ select USE_OF
+ select BUILTIN_DTB
+ select PCI_HOST_GENERIC
+ help
+ This enables the support of Loongson-2/3 family of machines.
+
+ Loongson-2 and Loongson-3 are 64-bit general-purpose processors with
+ GS264/GS464/GS464E/GS464V microarchitecture (except old Loongson-2E
+ and Loongson-2F which will be removed), developed by the Institute
+ of Computing Technology (ICT), Chinese Academy of Sciences (CAS).
+
+config MACH_PISTACHIO
+ bool "IMG Pistachio SoC based boards"
+ select BOOT_ELF32
+ select BOOT_RAW
+ select CEVT_R4K
+ select CLKSRC_MIPS_GIC
+ select COMMON_CLK
+ select CSRC_R4K
+ select DMA_NONCOHERENT
+ select GPIOLIB
+ select IRQ_MIPS_CPU
+ select MFD_SYSCON
+ select MIPS_CPU_SCACHE
+ select MIPS_GIC
+ select PINCTRL
+ select REGULATOR
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_MIPS_CPS
+ select SYS_SUPPORTS_MULTITHREADING
+ select SYS_SUPPORTS_RELOCATABLE
+ select SYS_SUPPORTS_ZBOOT
+ select SYS_HAS_EARLY_PRINTK
+ select USE_GENERIC_EARLY_PRINTK_8250
+ select USE_OF
+ help
+ This enables support for the IMG Pistachio SoC platform.
+
+config MIPS_MALTA
+ bool "MIPS Malta board"
+ select ARCH_MAY_HAVE_PC_FDC
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
+ select BOOT_ELF32
+ select BOOT_RAW
+ select BUILTIN_DTB
+ select CEVT_R4K
+ select CLKSRC_MIPS_GIC
+ select COMMON_CLK
+ select CSRC_R4K
+ select DMA_MAYBE_COHERENT
+ select GENERIC_ISA_DMA
+ select HAVE_PCSPKR_PLATFORM
+ select HAVE_PCI
+ select I8253
+ select I8259
+ select IRQ_MIPS_CPU
+ select MIPS_BONITO64
+ select MIPS_CPU_SCACHE
+ select MIPS_GIC
+ select MIPS_L1_CACHE_SHIFT_6
+ select MIPS_MSC
+ select PCI_GT64XXX_PCI0
+ select SMP_UP if SMP
+ select SWAP_IO_SPACE
+ select SYS_HAS_CPU_MIPS32_R1
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_HAS_CPU_MIPS32_R3_5
+ select SYS_HAS_CPU_MIPS32_R5
+ select SYS_HAS_CPU_MIPS32_R6
+ select SYS_HAS_CPU_MIPS64_R1
+ select SYS_HAS_CPU_MIPS64_R2
+ select SYS_HAS_CPU_MIPS64_R6
+ select SYS_HAS_CPU_NEVADA
+ select SYS_HAS_CPU_RM7000
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_MICROMIPS
+ select SYS_SUPPORTS_MIPS16
+ select SYS_SUPPORTS_MIPS_CMP
+ select SYS_SUPPORTS_MIPS_CPS
+ select SYS_SUPPORTS_MULTITHREADING
+ select SYS_SUPPORTS_RELOCATABLE
+ select SYS_SUPPORTS_SMARTMIPS
+ select SYS_SUPPORTS_VPE_LOADER
+ select SYS_SUPPORTS_ZBOOT
+ select USE_OF
+ select WAR_ICACHE_REFILLS
+ select ZONE_DMA32 if 64BIT
+ help
+ This enables support for the MIPS Technologies Malta evaluation
+ board.
+
+config MACH_PIC32
+ bool "Microchip PIC32 Family"
+ help
+ This enables support for the Microchip PIC32 family of platforms.
+
+ Microchip PIC32 is a family of general-purpose 32 bit MIPS core
+ microcontrollers.
+
+config MACH_VR41XX
+ bool "NEC VR4100 series based machines"
+ select CEVT_R4K
+ select CSRC_R4K
+ select SYS_HAS_CPU_VR41XX
+ select SYS_SUPPORTS_MIPS16
+ select GPIOLIB
+
+config RALINK
+ bool "Ralink based machines"
+ select CEVT_R4K
+ select CSRC_R4K
+ select BOOT_RAW
+ select DMA_NONCOHERENT
+ select IRQ_MIPS_CPU
+ select USE_OF
+ select SYS_HAS_CPU_MIPS32_R1
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_MIPS16
+ select SYS_SUPPORTS_ZBOOT
+ select SYS_HAS_EARLY_PRINTK
+ select CLKDEV_LOOKUP
+ select ARCH_HAS_RESET_CONTROLLER
+ select RESET_CONTROLLER
+
+config SGI_IP22
+ bool "SGI IP22 (Indy/Indigo2)"
+ select ARC_MEMORY
+ select ARC_PROMLIB
+ select FW_ARC
+ select FW_ARC32
+ select ARCH_MIGHT_HAVE_PC_SERIO
+ select BOOT_ELF32
+ select CEVT_R4K
+ select CSRC_R4K
+ select DEFAULT_SGI_PARTITION
+ select DMA_NONCOHERENT
+ select HAVE_EISA
+ select I8253
+ select I8259
+ select IP22_CPU_SCACHE
+ select IRQ_MIPS_CPU
+ select GENERIC_ISA_DMA_SUPPORT_BROKEN
+ select SGI_HAS_I8042
+ select SGI_HAS_INDYDOG
+ select SGI_HAS_HAL2
+ select SGI_HAS_SEEQ
+ select SGI_HAS_WD93
+ select SGI_HAS_ZILOG
+ select SWAP_IO_SPACE
+ select SYS_HAS_CPU_R4X00
+ select SYS_HAS_CPU_R5000
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select WAR_R4600_V1_INDEX_ICACHEOP
+ select WAR_R4600_V1_HIT_CACHEOP
+ select WAR_R4600_V2_HIT_CACHEOP
+ select MIPS_L1_CACHE_SHIFT_7
+ help
+ This are the SGI Indy, Challenge S and Indigo2, as well as certain
+ OEM variants like the Tandem CMN B006S. To compile a Linux kernel
+ that runs on these, say Y here.
+
+config SGI_IP27
+ bool "SGI IP27 (Origin200/2000)"
+ select ARCH_HAS_PHYS_TO_DMA
+ select ARCH_SPARSEMEM_ENABLE
+ select FW_ARC
+ select FW_ARC64
+ select ARC_CMDLINE_ONLY
+ select BOOT_ELF64
+ select DEFAULT_SGI_PARTITION
+ select SYS_HAS_EARLY_PRINTK
+ select HAVE_PCI
+ select IRQ_MIPS_CPU
+ select IRQ_DOMAIN_HIERARCHY
+ select NR_CPUS_DEFAULT_64
+ select PCI_DRIVERS_GENERIC
+ select PCI_XTALK_BRIDGE
+ select SYS_HAS_CPU_R10000
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_NUMA
+ select SYS_SUPPORTS_SMP
+ select WAR_R10000_LLSC
+ select MIPS_L1_CACHE_SHIFT_7
+ select NUMA
+ help
+ This are the SGI Origin 200, Origin 2000 and Onyx 2 Graphics
+ workstations. To compile a Linux kernel that runs on these, say Y
+ here.
+
+config SGI_IP28
+ bool "SGI IP28 (Indigo2 R10k)"
+ select ARC_MEMORY
+ select ARC_PROMLIB
+ select FW_ARC
+ select FW_ARC64
+ select ARCH_MIGHT_HAVE_PC_SERIO
+ select BOOT_ELF64
+ select CEVT_R4K
+ select CSRC_R4K
+ select DEFAULT_SGI_PARTITION
+ select DMA_NONCOHERENT
+ select GENERIC_ISA_DMA_SUPPORT_BROKEN
+ select IRQ_MIPS_CPU
+ select HAVE_EISA
+ select I8253
+ select I8259
+ select SGI_HAS_I8042
+ select SGI_HAS_INDYDOG
+ select SGI_HAS_HAL2
+ select SGI_HAS_SEEQ
+ select SGI_HAS_WD93
+ select SGI_HAS_ZILOG
+ select SWAP_IO_SPACE
+ select SYS_HAS_CPU_R10000
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select WAR_R10000_LLSC
+ select MIPS_L1_CACHE_SHIFT_7
+ help
+ This is the SGI Indigo2 with R10000 processor. To compile a Linux
+ kernel that runs on these, say Y here.
+
+config SGI_IP30
+ bool "SGI IP30 (Octane/Octane2)"
+ select ARCH_HAS_PHYS_TO_DMA
+ select FW_ARC
+ select FW_ARC64
+ select BOOT_ELF64
+ select CEVT_R4K
+ select CSRC_R4K
+ select SYNC_R4K if SMP
+ select ZONE_DMA32
+ select HAVE_PCI
+ select IRQ_MIPS_CPU
+ select IRQ_DOMAIN_HIERARCHY
+ select NR_CPUS_DEFAULT_2
+ select PCI_DRIVERS_GENERIC
+ select PCI_XTALK_BRIDGE
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_HAS_CPU_R10000
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_SMP
+ select WAR_R10000_LLSC
+ select MIPS_L1_CACHE_SHIFT_7
+ select ARC_MEMORY
+ help
+ These are the SGI Octane and Octane2 graphics workstations. To
+ compile a Linux kernel that runs on these, say Y here.
+
+config SGI_IP32
+ bool "SGI IP32 (O2)"
+ select ARC_MEMORY
+ select ARC_PROMLIB
+ select ARCH_HAS_PHYS_TO_DMA
+ select FW_ARC
+ select FW_ARC32
+ select BOOT_ELF32
+ select CEVT_R4K
+ select CSRC_R4K
+ select DMA_NONCOHERENT
+ select HAVE_PCI
+ select IRQ_MIPS_CPU
+ select R5000_CPU_SCACHE
+ select RM7000_CPU_SCACHE
+ select SYS_HAS_CPU_R5000
+ select SYS_HAS_CPU_R10000 if BROKEN
+ select SYS_HAS_CPU_RM7000
+ select SYS_HAS_CPU_NEVADA
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select WAR_ICACHE_REFILLS
+ help
+ If you want this kernel to run on SGI O2 workstation, say Y here.
+
+config SIBYTE_CRHINE
+ bool "Sibyte BCM91120C-CRhine"
+ select BOOT_ELF32
+ select SIBYTE_BCM1120
+ select SWAP_IO_SPACE
+ select SYS_HAS_CPU_SB1
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+
+config SIBYTE_CARMEL
+ bool "Sibyte BCM91120x-Carmel"
+ select BOOT_ELF32
+ select SIBYTE_BCM1120
+ select SWAP_IO_SPACE
+ select SYS_HAS_CPU_SB1
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+
+config SIBYTE_CRHONE
+ bool "Sibyte BCM91125C-CRhone"
+ select BOOT_ELF32
+ select SIBYTE_BCM1125
+ select SWAP_IO_SPACE
+ select SYS_HAS_CPU_SB1
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+
+config SIBYTE_RHONE
+ bool "Sibyte BCM91125E-Rhone"
+ select BOOT_ELF32
+ select SIBYTE_BCM1125H
+ select SWAP_IO_SPACE
+ select SYS_HAS_CPU_SB1
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+
+config SIBYTE_SWARM
+ bool "Sibyte BCM91250A-SWARM"
+ select BOOT_ELF32
+ select HAVE_PATA_PLATFORM
+ select SIBYTE_SB1250
+ select SWAP_IO_SPACE
+ select SYS_HAS_CPU_SB1
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select ZONE_DMA32 if 64BIT
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
+
+config SIBYTE_LITTLESUR
+ bool "Sibyte BCM91250C2-LittleSur"
+ select BOOT_ELF32
+ select HAVE_PATA_PLATFORM
+ select SIBYTE_SB1250
+ select SWAP_IO_SPACE
+ select SYS_HAS_CPU_SB1
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select ZONE_DMA32 if 64BIT
+
+config SIBYTE_SENTOSA
+ bool "Sibyte BCM91250E-Sentosa"
+ select BOOT_ELF32
+ select SIBYTE_SB1250
+ select SWAP_IO_SPACE
+ select SYS_HAS_CPU_SB1
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
+
+config SIBYTE_BIGSUR
+ bool "Sibyte BCM91480B-BigSur"
+ select BOOT_ELF32
+ select NR_CPUS_DEFAULT_4
+ select SIBYTE_BCM1x80
+ select SWAP_IO_SPACE
+ select SYS_HAS_CPU_SB1
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select ZONE_DMA32 if 64BIT
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
+
+config SNI_RM
+ bool "SNI RM200/300/400"
+ select ARC_MEMORY
+ select ARC_PROMLIB
+ select FW_ARC if CPU_LITTLE_ENDIAN
+ select FW_ARC32 if CPU_LITTLE_ENDIAN
+ select FW_SNIPROM if CPU_BIG_ENDIAN
+ select ARCH_MAY_HAVE_PC_FDC
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
+ select BOOT_ELF32
+ select CEVT_R4K
+ select CSRC_R4K
+ select DEFAULT_SGI_PARTITION if CPU_BIG_ENDIAN
+ select DMA_NONCOHERENT
+ select GENERIC_ISA_DMA
+ select HAVE_EISA
+ select HAVE_PCSPKR_PLATFORM
+ select HAVE_PCI
+ select IRQ_MIPS_CPU
+ select I8253
+ select I8259
+ select ISA
+ select MIPS_L1_CACHE_SHIFT_6
+ select SWAP_IO_SPACE if CPU_BIG_ENDIAN
+ select SYS_HAS_CPU_R4X00
+ select SYS_HAS_CPU_R5000
+ select SYS_HAS_CPU_R10000
+ select R5000_CPU_SCACHE
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select WAR_R4600_V2_HIT_CACHEOP
+ help
+ The SNI RM200/300/400 are MIPS-based machines manufactured by
+ Siemens Nixdorf Informationssysteme (SNI), parent company of Pyramid
+ Technology and now in turn merged with Fujitsu. Say Y here to
+ support this machine type.
+
+config MACH_TX39XX
+ bool "Toshiba TX39 series based machines"
+
+config MACH_TX49XX
+ bool "Toshiba TX49 series based machines"
+ select WAR_TX49XX_ICACHE_INDEX_INV
+
+config MIKROTIK_RB532
+ bool "Mikrotik RB532 boards"
+ select CEVT_R4K
+ select CSRC_R4K
+ select DMA_NONCOHERENT
+ select HAVE_PCI
+ select IRQ_MIPS_CPU
+ select SYS_HAS_CPU_MIPS32_R1
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SWAP_IO_SPACE
+ select BOOT_RAW
+ select GPIOLIB
+ select MIPS_L1_CACHE_SHIFT_4
+ help
+ Support the Mikrotik(tm) RouterBoard 532 series,
+ based on the IDT RC32434 SoC.
+
+config CAVIUM_OCTEON_SOC
+ bool "Cavium Networks Octeon SoC based boards"
+ select CEVT_R4K
+ select ARCH_HAS_PHYS_TO_DMA
+ select HAVE_RAPIDIO
+ select PHYS_ADDR_T_64BIT
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select EDAC_SUPPORT
+ select EDAC_ATOMIC_SCRUB
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_HOTPLUG_CPU if CPU_BIG_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_HAS_CPU_CAVIUM_OCTEON
+ select HAVE_PCI
+ select HAVE_PLAT_DELAY
+ select HAVE_PLAT_FW_INIT_CMDLINE
+ select HAVE_PLAT_MEMCPY
+ select ZONE_DMA32
+ select HOLES_IN_ZONE
+ select GPIOLIB
+ select USE_OF
+ select ARCH_SPARSEMEM_ENABLE
+ select SYS_SUPPORTS_SMP
+ select NR_CPUS_DEFAULT_64
+ select MIPS_NR_CPU_NR_MAP_1024
+ select BUILTIN_DTB
+ select MTD_COMPLEX_MAPPINGS
+ select SWIOTLB
+ select SYS_SUPPORTS_RELOCATABLE
+ help
+ This option supports all of the Octeon reference boards from Cavium
+ Networks. It builds a kernel that dynamically determines the Octeon
+ CPU type and supports all known board reference implementations.
+ Some of the supported boards are:
+ EBT3000
+ EBH3000
+ EBH3100
+ Thunder
+ Kodama
+ Hikari
+ Say Y here for most Octeon reference boards.
+
+config NLM_XLR_BOARD
+ bool "Netlogic XLR/XLS based systems"
+ select BOOT_ELF32
+ select NLM_COMMON
+ select SYS_HAS_CPU_XLR
+ select SYS_SUPPORTS_SMP
+ select HAVE_PCI
+ select SWAP_IO_SPACE
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select PHYS_ADDR_T_64BIT
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select NR_CPUS_DEFAULT_32
+ select CEVT_R4K
+ select CSRC_R4K
+ select IRQ_MIPS_CPU
+ select ZONE_DMA32 if 64BIT
+ select SYNC_R4K
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_SUPPORTS_ZBOOT
+ select SYS_SUPPORTS_ZBOOT_UART16550
+ help
+ Support for systems based on Netlogic XLR and XLS processors.
+ Say Y here if you have a XLR or XLS based board.
+
+config NLM_XLP_BOARD
+ bool "Netlogic XLP based systems"
+ select BOOT_ELF32
+ select NLM_COMMON
+ select SYS_HAS_CPU_XLP
+ select SYS_SUPPORTS_SMP
+ select HAVE_PCI
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select PHYS_ADDR_T_64BIT
+ select GPIOLIB
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select NR_CPUS_DEFAULT_32
+ select CEVT_R4K
+ select CSRC_R4K
+ select IRQ_MIPS_CPU
+ select ZONE_DMA32 if 64BIT
+ select SYNC_R4K
+ select SYS_HAS_EARLY_PRINTK
+ select USE_OF
+ select SYS_SUPPORTS_ZBOOT
+ select SYS_SUPPORTS_ZBOOT_UART16550
+ help
+ This board is based on Netlogic XLP Processor.
+ Say Y here if you have a XLP based board.
+
+endchoice
+
+source "arch/mips/alchemy/Kconfig"
+source "arch/mips/ath25/Kconfig"
+source "arch/mips/ath79/Kconfig"
+source "arch/mips/bcm47xx/Kconfig"
+source "arch/mips/bcm63xx/Kconfig"
+source "arch/mips/bmips/Kconfig"
+source "arch/mips/generic/Kconfig"
+source "arch/mips/ingenic/Kconfig"
+source "arch/mips/jazz/Kconfig"
+source "arch/mips/lantiq/Kconfig"
+source "arch/mips/pic32/Kconfig"
+source "arch/mips/pistachio/Kconfig"
+source "arch/mips/ralink/Kconfig"
+source "arch/mips/sgi-ip27/Kconfig"
+source "arch/mips/sibyte/Kconfig"
+source "arch/mips/txx9/Kconfig"
+source "arch/mips/vr41xx/Kconfig"
+source "arch/mips/cavium-octeon/Kconfig"
+source "arch/mips/loongson2ef/Kconfig"
+source "arch/mips/loongson32/Kconfig"
+source "arch/mips/loongson64/Kconfig"
+source "arch/mips/netlogic/Kconfig"
+
+endmenu
+
+config GENERIC_HWEIGHT
+ bool
+ default y
+
+config GENERIC_CALIBRATE_DELAY
+ bool
+ default y
+
+config SCHED_OMIT_FRAME_POINTER
+ bool
+ default y
+
+#
+# Select some configuration options automatically based on user selections.
+#
+config FW_ARC
+ bool
+
+config ARCH_MAY_HAVE_PC_FDC
+ bool
+
+config BOOT_RAW
+ bool
+
+config CEVT_BCM1480
+ bool
+
+config CEVT_DS1287
+ bool
+
+config CEVT_GT641XX
+ bool
+
+config CEVT_R4K
+ bool
+
+config CEVT_SB1250
+ bool
+
+config CEVT_TXX9
+ bool
+
+config CSRC_BCM1480
+ bool
+
+config CSRC_IOASIC
+ bool
+
+config CSRC_R4K
+ select CLOCKSOURCE_WATCHDOG if CPU_FREQ
+ bool
+
+config CSRC_SB1250
+ bool
+
+config MIPS_CLOCK_VSYSCALL
+ def_bool CSRC_R4K || CLKSRC_MIPS_GIC
+
+config GPIO_TXX9
+ select GPIOLIB
+ bool
+
+config FW_CFE
+ bool
+
+config ARCH_SUPPORTS_UPROBES
+ bool
+
+config DMA_MAYBE_COHERENT
+ select ARCH_HAS_DMA_COHERENCE_H
+ select DMA_NONCOHERENT
+ bool
+
+config DMA_PERDEV_COHERENT
+ bool
+ select ARCH_HAS_SETUP_DMA_OPS
+ select DMA_NONCOHERENT
+
+config DMA_NONCOHERENT
+ bool
+ #
+ # MIPS allows mixing "slightly different" Cacheability and Coherency
+ # Attribute bits. It is believed that the uncached access through
+ # KSEG1 and the implementation specific "uncached accelerated" used
+ # by pgprot_writcombine can be mixed, and the latter sometimes provides
+ # significant advantages.
+ #
+ select ARCH_HAS_DMA_WRITE_COMBINE
+ select ARCH_HAS_DMA_PREP_COHERENT
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_HAS_DMA_SET_UNCACHED
+ select DMA_NONCOHERENT_MMAP
+ select NEED_DMA_MAP_STATE
+
+config SYS_HAS_EARLY_PRINTK
+ bool
+
+config SYS_SUPPORTS_HOTPLUG_CPU
+ bool
+
+config MIPS_BONITO64
+ bool
+
+config MIPS_MSC
+ bool
+
+config SYNC_R4K
+ bool
+
+config NO_IOPORT_MAP
+ def_bool n
+
+config GENERIC_CSUM
+ def_bool CPU_NO_LOAD_STORE_LR
+
+config GENERIC_ISA_DMA
+ bool
+ select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n
+ select ISA_DMA_API
+
+config GENERIC_ISA_DMA_SUPPORT_BROKEN
+ bool
+ select GENERIC_ISA_DMA
+
+config HAVE_PLAT_DELAY
+ bool
+
+config HAVE_PLAT_FW_INIT_CMDLINE
+ bool
+
+config HAVE_PLAT_MEMCPY
+ bool
+
+config ISA_DMA_API
+ bool
+
+config HOLES_IN_ZONE
+ bool
+
+config SYS_SUPPORTS_RELOCATABLE
+ bool
+ help
+ Selected if the platform supports relocating the kernel.
+ The platform must provide plat_get_fdt() if it selects CONFIG_USE_OF
+ to allow access to command line and entropy sources.
+
+config MIPS_CBPF_JIT
+ def_bool y
+ depends on BPF_JIT && HAVE_CBPF_JIT
+
+config MIPS_EBPF_JIT
+ def_bool y
+ depends on BPF_JIT && HAVE_EBPF_JIT
+
+
+#
+# Endianness selection. Sufficiently obscure so many users don't know what to
+# answer,so we try hard to limit the available choices. Also the use of a
+# choice statement should be more obvious to the user.
+#
+choice
+ prompt "Endianness selection"
+ help
+ Some MIPS machines can be configured for either little or big endian
+ byte order. These modes require different kernels and a different
+ Linux distribution. In general there is one preferred byteorder for a
+ particular system but some systems are just as commonly used in the
+ one or the other endianness.
+
+config CPU_BIG_ENDIAN
+ bool "Big endian"
+ depends on SYS_SUPPORTS_BIG_ENDIAN
+
+config CPU_LITTLE_ENDIAN
+ bool "Little endian"
+ depends on SYS_SUPPORTS_LITTLE_ENDIAN
+
+endchoice
+
+config EXPORT_UASM
+ bool
+
+config SYS_SUPPORTS_APM_EMULATION
+ bool
+
+config SYS_SUPPORTS_BIG_ENDIAN
+ bool
+
+config SYS_SUPPORTS_LITTLE_ENDIAN
+ bool
+
+config SYS_SUPPORTS_HUGETLBFS
+ bool
+ depends on CPU_SUPPORTS_HUGEPAGES
+ default y
+
+config MIPS_HUGE_TLB_SUPPORT
+ def_bool HUGETLB_PAGE || TRANSPARENT_HUGEPAGE
+
+config IRQ_CPU_RM7K
+ bool
+
+config IRQ_MSP_SLP
+ bool
+
+config IRQ_MSP_CIC
+ bool
+
+config IRQ_TXX9
+ bool
+
+config IRQ_GT641XX
+ bool
+
+config PCI_GT64XXX_PCI0
+ bool
+
+config PCI_XTALK_BRIDGE
+ bool
+
+config NO_EXCEPT_FILL
+ bool
+
+config MIPS_SPRAM
+ bool
+
+config SWAP_IO_SPACE
+ bool
+
+config SGI_HAS_INDYDOG
+ bool
+
+config SGI_HAS_HAL2
+ bool
+
+config SGI_HAS_SEEQ
+ bool
+
+config SGI_HAS_WD93
+ bool
+
+config SGI_HAS_ZILOG
+ bool
+
+config SGI_HAS_I8042
+ bool
+
+config DEFAULT_SGI_PARTITION
+ bool
+
+config FW_ARC32
+ bool
+
+config FW_SNIPROM
+ bool
+
+config BOOT_ELF32
+ bool
+
+config MIPS_L1_CACHE_SHIFT_4
+ bool
+
+config MIPS_L1_CACHE_SHIFT_5
+ bool
+
+config MIPS_L1_CACHE_SHIFT_6
+ bool
+
+config MIPS_L1_CACHE_SHIFT_7
+ bool
+
+config MIPS_L1_CACHE_SHIFT
+ int
+ default "7" if MIPS_L1_CACHE_SHIFT_7
+ default "6" if MIPS_L1_CACHE_SHIFT_6
+ default "5" if MIPS_L1_CACHE_SHIFT_5
+ default "4" if MIPS_L1_CACHE_SHIFT_4
+ default "5"
+
+config ARC_CMDLINE_ONLY
+ bool
+
+config ARC_CONSOLE
+ bool "ARC console support"
+ depends on SGI_IP22 || SGI_IP28 || (SNI_RM && CPU_LITTLE_ENDIAN)
+
+config ARC_MEMORY
+ bool
+
+config ARC_PROMLIB
+ bool
+
+config FW_ARC64
+ bool
+
+config BOOT_ELF64
+ bool
+
+menu "CPU selection"
+
+choice
+ prompt "CPU type"
+ default CPU_R4X00
+
+config CPU_LOONGSON64
+ bool "Loongson 64-bit CPU"
+ depends on SYS_HAS_CPU_LOONGSON64
+ select ARCH_HAS_PHYS_TO_DMA
+ select CPU_MIPSR2
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
+ select CPU_SUPPORTS_MSA
+ select CPU_DIEI_BROKEN if !LOONGSON3_ENHANCEMENT
+ select CPU_MIPSR2_IRQ_VI
+ select WEAK_ORDERING
+ select WEAK_REORDERING_BEYOND_LLSC
+ select MIPS_ASID_BITS_VARIABLE
+ select MIPS_PGD_C0_CONTEXT
+ select MIPS_L1_CACHE_SHIFT_6
+ select MIPS_FP_SUPPORT
+ select GPIOLIB
+ select SWIOTLB
+ select HAVE_KVM
+ help
+ The Loongson GSx64(GS264/GS464/GS464E/GS464V) series of processor
+ cores implements the MIPS64R2 instruction set with many extensions,
+ including most 64-bit Loongson-2 (2H, 2K) and Loongson-3 (3A1000,
+ 3B1000, 3B1500, 3A2000, 3A3000 and 3A4000) processors. However, old
+ Loongson-2E/2F is not covered here and will be removed in future.
+
+config LOONGSON3_ENHANCEMENT
+ bool "New Loongson-3 CPU Enhancements"
+ default n
+ depends on CPU_LOONGSON64
+ help
+ New Loongson-3 cores (since Loongson-3A R2, as opposed to Loongson-3A
+ R1, Loongson-3B R1 and Loongson-3B R2) has many enhancements, such as
+ FTLB, L1-VCache, EI/DI/Wait/Prefetch instruction, DSP/DSPr2 ASE, User
+ Local register, Read-Inhibit/Execute-Inhibit, SFB (Store Fill Buffer),
+ Fast TLB refill support, etc.
+
+ This option enable those enhancements which are not probed at run
+ time. If you want a generic kernel to run on all Loongson 3 machines,
+ please say 'N' here. If you want a high-performance kernel to run on
+ new Loongson-3 machines only, please say 'Y' here.
+
+config CPU_LOONGSON3_WORKAROUNDS
+ bool "Old Loongson-3 LLSC Workarounds"
+ default y if SMP
+ depends on CPU_LOONGSON64
+ help
+ Loongson-3 processors have the llsc issues which require workarounds.
+ Without workarounds the system may hang unexpectedly.
+
+ Newer Loongson-3 will fix these issues and no workarounds are needed.
+ The workarounds have no significant side effect on them but may
+ decrease the performance of the system so this option should be
+ disabled unless the kernel is intended to be run on old systems.
+
+ If unsure, please say Y.
+
+config CPU_LOONGSON3_CPUCFG_EMULATION
+ bool "Emulate the CPUCFG instruction on older Loongson cores"
+ default y
+ depends on CPU_LOONGSON64
+ help
+ Loongson-3A R4 and newer have the CPUCFG instruction available for
+ userland to query CPU capabilities, much like CPUID on x86. This
+ option provides emulation of the instruction on older Loongson
+ cores, back to Loongson-3A1000.
+
+ If unsure, please say Y.
+
+config CPU_LOONGSON2E
+ bool "Loongson 2E"
+ depends on SYS_HAS_CPU_LOONGSON2E
+ select CPU_LOONGSON2EF
+ help
+ The Loongson 2E processor implements the MIPS III instruction set
+ with many extensions.
+
+ It has an internal FPGA northbridge, which is compatible to
+ bonito64.
+
+config CPU_LOONGSON2F
+ bool "Loongson 2F"
+ depends on SYS_HAS_CPU_LOONGSON2F
+ select CPU_LOONGSON2EF
+ select GPIOLIB
+ help
+ The Loongson 2F processor implements the MIPS III instruction set
+ with many extensions.
+
+ Loongson2F have built-in DDR2 and PCIX controller. The PCIX controller
+ have a similar programming interface with FPGA northbridge used in
+ Loongson2E.
+
+config CPU_LOONGSON1B
+ bool "Loongson 1B"
+ depends on SYS_HAS_CPU_LOONGSON1B
+ select CPU_LOONGSON32
+ select LEDS_GPIO_REGISTER
+ help
+ The Loongson 1B is a 32-bit SoC, which implements the MIPS32
+ Release 1 instruction set and part of the MIPS32 Release 2
+ instruction set.
+
+config CPU_LOONGSON1C
+ bool "Loongson 1C"
+ depends on SYS_HAS_CPU_LOONGSON1C
+ select CPU_LOONGSON32
+ select LEDS_GPIO_REGISTER
+ help
+ The Loongson 1C is a 32-bit SoC, which implements the MIPS32
+ Release 1 instruction set and part of the MIPS32 Release 2
+ instruction set.
+
+config CPU_MIPS32_R1
+ bool "MIPS32 Release 1"
+ depends on SYS_HAS_CPU_MIPS32_R1
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ help
+ Choose this option to build a kernel for release 1 or later of the
+ MIPS32 architecture. Most modern embedded systems with a 32-bit
+ MIPS processor are based on a MIPS32 processor. If you know the
+ specific type of processor in your system, choose those that one
+ otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system.
+ Release 2 of the MIPS32 architecture is available since several
+ years so chances are you even have a MIPS32 Release 2 processor
+ in which case you should choose CPU_MIPS32_R2 instead for better
+ performance.
+
+config CPU_MIPS32_R2
+ bool "MIPS32 Release 2"
+ depends on SYS_HAS_CPU_MIPS32_R2
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_MSA
+ select HAVE_KVM
+ help
+ Choose this option to build a kernel for release 2 or later of the
+ MIPS32 architecture. Most modern embedded systems with a 32-bit
+ MIPS processor are based on a MIPS32 processor. If you know the
+ specific type of processor in your system, choose those that one
+ otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system.
+
+config CPU_MIPS32_R5
+ bool "MIPS32 Release 5"
+ depends on SYS_HAS_CPU_MIPS32_R5
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_MSA
+ select HAVE_KVM
+ select MIPS_O32_FP64_SUPPORT
+ help
+ Choose this option to build a kernel for release 5 or later of the
+ MIPS32 architecture. New MIPS processors, starting with the Warrior
+ family, are based on a MIPS32r5 processor. If you own an older
+ processor, you probably need to select MIPS32r1 or MIPS32r2 instead.
+
+config CPU_MIPS32_R6
+ bool "MIPS32 Release 6"
+ depends on SYS_HAS_CPU_MIPS32_R6
+ select CPU_HAS_PREFETCH
+ select CPU_NO_LOAD_STORE_LR
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_MSA
+ select HAVE_KVM
+ select MIPS_O32_FP64_SUPPORT
+ help
+ Choose this option to build a kernel for release 6 or later of the
+ MIPS32 architecture. New MIPS processors, starting with the Warrior
+ family, are based on a MIPS32r6 processor. If you own an older
+ processor, you probably need to select MIPS32r1 or MIPS32r2 instead.
+
+config CPU_MIPS64_R1
+ bool "MIPS64 Release 1"
+ depends on SYS_HAS_CPU_MIPS64_R1
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
+ help
+ Choose this option to build a kernel for release 1 or later of the
+ MIPS64 architecture. Many modern embedded systems with a 64-bit
+ MIPS processor are based on a MIPS64 processor. If you know the
+ specific type of processor in your system, choose those that one
+ otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system.
+ Release 2 of the MIPS64 architecture is available since several
+ years so chances are you even have a MIPS64 Release 2 processor
+ in which case you should choose CPU_MIPS64_R2 instead for better
+ performance.
+
+config CPU_MIPS64_R2
+ bool "MIPS64 Release 2"
+ depends on SYS_HAS_CPU_MIPS64_R2
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
+ select CPU_SUPPORTS_MSA
+ select HAVE_KVM
+ help
+ Choose this option to build a kernel for release 2 or later of the
+ MIPS64 architecture. Many modern embedded systems with a 64-bit
+ MIPS processor are based on a MIPS64 processor. If you know the
+ specific type of processor in your system, choose those that one
+ otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system.
+
+config CPU_MIPS64_R5
+ bool "MIPS64 Release 5"
+ depends on SYS_HAS_CPU_MIPS64_R5
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
+ select CPU_SUPPORTS_MSA
+ select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
+ select HAVE_KVM
+ help
+ Choose this option to build a kernel for release 5 or later of the
+ MIPS64 architecture. This is a intermediate MIPS architecture
+ release partly implementing release 6 features. Though there is no
+ any hardware known to be based on this release.
+
+config CPU_MIPS64_R6
+ bool "MIPS64 Release 6"
+ depends on SYS_HAS_CPU_MIPS64_R6
+ select CPU_HAS_PREFETCH
+ select CPU_NO_LOAD_STORE_LR
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
+ select CPU_SUPPORTS_MSA
+ select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
+ select HAVE_KVM
+ help
+ Choose this option to build a kernel for release 6 or later of the
+ MIPS64 architecture. New MIPS processors, starting with the Warrior
+ family, are based on a MIPS64r6 processor. If you own an older
+ processor, you probably need to select MIPS64r1 or MIPS64r2 instead.
+
+config CPU_P5600
+ bool "MIPS Warrior P5600"
+ depends on SYS_HAS_CPU_P5600
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_MSA
+ select CPU_SUPPORTS_CPUFREQ
+ select CPU_MIPSR2_IRQ_VI
+ select CPU_MIPSR2_IRQ_EI
+ select HAVE_KVM
+ select MIPS_O32_FP64_SUPPORT
+ help
+ Choose this option to build a kernel for MIPS Warrior P5600 CPU.
+ It's based on MIPS32r5 ISA with XPA, EVA, dual/quad issue exec pipes,
+ MMU with two-levels TLB, UCA, MSA, MDU core level features and system
+ level features like up to six P5600 calculation cores, CM2 with L2
+ cache, IOCU/IOMMU (though might be unused depending on the system-
+ specific IP core configuration), GIC, CPC, virtualisation module,
+ eJTAG and PDtrace.
+
+config CPU_R3000
+ bool "R3000"
+ depends on SYS_HAS_CPU_R3000
+ select CPU_HAS_WB
+ select CPU_R3K_TLB
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ help
+ Please make sure to pick the right CPU type. Linux/MIPS is not
+ designed to be generic, i.e. Kernels compiled for R3000 CPUs will
+ *not* work on R4000 machines and vice versa. However, since most
+ of the supported machines have an R4000 (or similar) CPU, R4x00
+ might be a safe bet. If the resulting kernel does not work,
+ try to recompile with R3000.
+
+config CPU_TX39XX
+ bool "R39XX"
+ depends on SYS_HAS_CPU_TX39XX
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_R3K_TLB
+
+config CPU_VR41XX
+ bool "R41xx"
+ depends on SYS_HAS_CPU_VR41XX
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ help
+ The options selects support for the NEC VR4100 series of processors.
+ Only choose this option if you have one of these processors as a
+ kernel built with this option will not run on any other type of
+ processor or vice versa.
+
+config CPU_R4X00
+ bool "R4x00"
+ depends on SYS_HAS_CPU_R4X00
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HUGEPAGES
+ help
+ MIPS Technologies R4000-series processors other than 4300, including
+ the R4000, R4400, R4600, and 4700.
+
+config CPU_TX49XX
+ bool "R49XX"
+ depends on SYS_HAS_CPU_TX49XX
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HUGEPAGES
+
+config CPU_R5000
+ bool "R5000"
+ depends on SYS_HAS_CPU_R5000
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HUGEPAGES
+ help
+ MIPS Technologies R5000-series processors other than the Nevada.
+
+config CPU_R5500
+ bool "R5500"
+ depends on SYS_HAS_CPU_R5500
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HUGEPAGES
+ help
+ NEC VR5500 and VR5500A series processors implement 64-bit MIPS IV
+ instruction set.
+
+config CPU_NEVADA
+ bool "RM52xx"
+ depends on SYS_HAS_CPU_NEVADA
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HUGEPAGES
+ help
+ QED / PMC-Sierra RM52xx-series ("Nevada") processors.
+
+config CPU_R10000
+ bool "R10000"
+ depends on SYS_HAS_CPU_R10000
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
+ help
+ MIPS Technologies R10000-series processors.
+
+config CPU_RM7000
+ bool "RM7000"
+ depends on SYS_HAS_CPU_RM7000
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
+
+config CPU_SB1
+ bool "SB1"
+ depends on SYS_HAS_CPU_SB1
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
+ select WEAK_ORDERING
+
+config CPU_CAVIUM_OCTEON
+ bool "Cavium Octeon processor"
+ depends on SYS_HAS_CPU_CAVIUM_OCTEON
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select WEAK_ORDERING
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
+ select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
+ select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
+ select MIPS_L1_CACHE_SHIFT_7
+ select HAVE_KVM
+ help
+ The Cavium Octeon processor is a highly integrated chip containing
+ many ethernet hardware widgets for networking tasks. The processor
+ can have up to 16 Mips64v2 cores and 8 integrated gigabit ethernets.
+ Full details can be found at http://www.caviumnetworks.com.
+
+config CPU_BMIPS
+ bool "Broadcom BMIPS"
+ depends on SYS_HAS_CPU_BMIPS
+ select CPU_MIPS32
+ select CPU_BMIPS32_3300 if SYS_HAS_CPU_BMIPS32_3300
+ select CPU_BMIPS4350 if SYS_HAS_CPU_BMIPS4350
+ select CPU_BMIPS4380 if SYS_HAS_CPU_BMIPS4380
+ select CPU_BMIPS5000 if SYS_HAS_CPU_BMIPS5000
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select DMA_NONCOHERENT
+ select IRQ_MIPS_CPU
+ select SWAP_IO_SPACE
+ select WEAK_ORDERING
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_CPUFREQ
+ select MIPS_EXTERNAL_TIMER
+ help
+ Support for BMIPS32/3300/4350/4380 and BMIPS5000 processors.
+
+config CPU_XLR
+ bool "Netlogic XLR SoC"
+ depends on SYS_HAS_CPU_XLR
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
+ select WEAK_ORDERING
+ select WEAK_REORDERING_BEYOND_LLSC
+ help
+ Netlogic Microsystems XLR/XLS processors.
+
+config CPU_XLP
+ bool "Netlogic XLP SoC"
+ depends on SYS_HAS_CPU_XLP
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select WEAK_ORDERING
+ select WEAK_REORDERING_BEYOND_LLSC
+ select CPU_HAS_PREFETCH
+ select CPU_MIPSR2
+ select CPU_SUPPORTS_HUGEPAGES
+ select MIPS_ASID_BITS_VARIABLE
+ help
+ Netlogic Microsystems XLP processors.
+endchoice
+
+config CPU_MIPS32_3_5_FEATURES
+ bool "MIPS32 Release 3.5 Features"
+ depends on SYS_HAS_CPU_MIPS32_R3_5
+ depends on CPU_MIPS32_R2 || CPU_MIPS32_R5 || CPU_MIPS32_R6 || \
+ CPU_P5600
+ help
+ Choose this option to build a kernel for release 2 or later of the
+ MIPS32 architecture including features from the 3.5 release such as
+ support for Enhanced Virtual Addressing (EVA).
+
+config CPU_MIPS32_3_5_EVA
+ bool "Enhanced Virtual Addressing (EVA)"
+ depends on CPU_MIPS32_3_5_FEATURES
+ select EVA
+ default y
+ help
+ Choose this option if you want to enable the Enhanced Virtual
+ Addressing (EVA) on your MIPS32 core (such as proAptiv).
+ One of its primary benefits is an increase in the maximum size
+ of lowmem (up to 3GB). If unsure, say 'N' here.
+
+config CPU_MIPS32_R5_FEATURES
+ bool "MIPS32 Release 5 Features"
+ depends on SYS_HAS_CPU_MIPS32_R5
+ depends on CPU_MIPS32_R2 || CPU_MIPS32_R5 || CPU_P5600
+ help
+ Choose this option to build a kernel for release 2 or later of the
+ MIPS32 architecture including features from release 5 such as
+ support for Extended Physical Addressing (XPA).
+
+config CPU_MIPS32_R5_XPA
+ bool "Extended Physical Addressing (XPA)"
+ depends on CPU_MIPS32_R5_FEATURES
+ depends on !EVA
+ depends on !PAGE_SIZE_4KB
+ depends on SYS_SUPPORTS_HIGHMEM
+ select XPA
+ select HIGHMEM
+ select PHYS_ADDR_T_64BIT
+ default n
+ help
+ Choose this option if you want to enable the Extended Physical
+ Addressing (XPA) on your MIPS32 core (such as P5600 series). The
+ benefit is to increase physical addressing equal to or greater
+ than 40 bits. Note that this has the side effect of turning on
+ 64-bit addressing which in turn makes the PTEs 64-bit in size.
+ If unsure, say 'N' here.
+
+if CPU_LOONGSON2F
+config CPU_NOP_WORKAROUNDS
+ bool
+
+config CPU_JUMP_WORKAROUNDS
+ bool
+
+config CPU_LOONGSON2F_WORKAROUNDS
+ bool "Loongson 2F Workarounds"
+ default y
+ select CPU_NOP_WORKAROUNDS
+ select CPU_JUMP_WORKAROUNDS
+ help
+ Loongson 2F01 / 2F02 processors have the NOP & JUMP issues which
+ require workarounds. Without workarounds the system may hang
+ unexpectedly. For more information please refer to the gas
+ -mfix-loongson2f-nop and -mfix-loongson2f-jump options.
+
+ Loongson 2F03 and later have fixed these issues and no workarounds
+ are needed. The workarounds have no significant side effect on them
+ but may decrease the performance of the system so this option should
+ be disabled unless the kernel is intended to be run on 2F01 or 2F02
+ systems.
+
+ If unsure, please say Y.
+endif # CPU_LOONGSON2F
+
+config SYS_SUPPORTS_ZBOOT
+ bool
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_BZIP2
+ select HAVE_KERNEL_LZ4
+ select HAVE_KERNEL_LZMA
+ select HAVE_KERNEL_LZO
+ select HAVE_KERNEL_XZ
+ select HAVE_KERNEL_ZSTD
+
+config SYS_SUPPORTS_ZBOOT_UART16550
+ bool
+ select SYS_SUPPORTS_ZBOOT
+
+config SYS_SUPPORTS_ZBOOT_UART_PROM
+ bool
+ select SYS_SUPPORTS_ZBOOT
+
+config CPU_LOONGSON2EF
+ bool
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
+ select ARCH_HAS_PHYS_TO_DMA
+
+config CPU_LOONGSON32
+ bool
+ select CPU_MIPS32
+ select CPU_MIPSR2
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_CPUFREQ
+
+config CPU_BMIPS32_3300
+ select SMP_UP if SMP
+ bool
+
+config CPU_BMIPS4350
+ bool
+ select SYS_SUPPORTS_SMP
+ select SYS_SUPPORTS_HOTPLUG_CPU
+
+config CPU_BMIPS4380
+ bool
+ select MIPS_L1_CACHE_SHIFT_6
+ select SYS_SUPPORTS_SMP
+ select SYS_SUPPORTS_HOTPLUG_CPU
+ select CPU_HAS_RIXI
+
+config CPU_BMIPS5000
+ bool
+ select MIPS_CPU_SCACHE
+ select MIPS_L1_CACHE_SHIFT_7
+ select SYS_SUPPORTS_SMP
+ select SYS_SUPPORTS_HOTPLUG_CPU
+ select CPU_HAS_RIXI
+
+config SYS_HAS_CPU_LOONGSON64
+ bool
+ select CPU_SUPPORTS_CPUFREQ
+ select CPU_HAS_RIXI
+
+config SYS_HAS_CPU_LOONGSON2E
+ bool
+
+config SYS_HAS_CPU_LOONGSON2F
+ bool
+ select CPU_SUPPORTS_CPUFREQ
+ select CPU_SUPPORTS_ADDRWINCFG if 64BIT
+
+config SYS_HAS_CPU_LOONGSON1B
+ bool
+
+config SYS_HAS_CPU_LOONGSON1C
+ bool
+
+config SYS_HAS_CPU_MIPS32_R1
+ bool
+
+config SYS_HAS_CPU_MIPS32_R2
+ bool
+
+config SYS_HAS_CPU_MIPS32_R3_5
+ bool
+
+config SYS_HAS_CPU_MIPS32_R5
+ bool
+ select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
+
+config SYS_HAS_CPU_MIPS32_R6
+ bool
+ select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
+
+config SYS_HAS_CPU_MIPS64_R1
+ bool
+
+config SYS_HAS_CPU_MIPS64_R2
+ bool
+
+config SYS_HAS_CPU_MIPS64_R5
+ bool
+ select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
+
+config SYS_HAS_CPU_MIPS64_R6
+ bool
+ select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
+
+config SYS_HAS_CPU_P5600
+ bool
+ select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
+
+config SYS_HAS_CPU_R3000
+ bool
+
+config SYS_HAS_CPU_TX39XX
+ bool
+
+config SYS_HAS_CPU_VR41XX
+ bool
+
+config SYS_HAS_CPU_R4X00
+ bool
+
+config SYS_HAS_CPU_TX49XX
+ bool
+
+config SYS_HAS_CPU_R5000
+ bool
+
+config SYS_HAS_CPU_R5500
+ bool
+
+config SYS_HAS_CPU_NEVADA
+ bool
+
+config SYS_HAS_CPU_R10000
+ bool
+ select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
+
+config SYS_HAS_CPU_RM7000
+ bool
+
+config SYS_HAS_CPU_SB1
+ bool
+
+config SYS_HAS_CPU_CAVIUM_OCTEON
+ bool
+
+config SYS_HAS_CPU_BMIPS
+ bool
+
+config SYS_HAS_CPU_BMIPS32_3300
+ bool
+ select SYS_HAS_CPU_BMIPS
+
+config SYS_HAS_CPU_BMIPS4350
+ bool
+ select SYS_HAS_CPU_BMIPS
+
+config SYS_HAS_CPU_BMIPS4380
+ bool
+ select SYS_HAS_CPU_BMIPS
+
+config SYS_HAS_CPU_BMIPS5000
+ bool
+ select SYS_HAS_CPU_BMIPS
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
+
+config SYS_HAS_CPU_XLR
+ bool
+
+config SYS_HAS_CPU_XLP
+ bool
+
+#
+# CPU may reorder R->R, R->W, W->R, W->W
+# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
+#
+config WEAK_ORDERING
+ bool
+
+#
+# CPU may reorder reads and writes beyond LL/SC
+# CPU may reorder R->LL, R->LL, W->LL, W->LL, R->SC, R->SC, W->SC, W->SC
+#
+config WEAK_REORDERING_BEYOND_LLSC
+ bool
+endmenu
+
+#
+# These two indicate any level of the MIPS32 and MIPS64 architecture
+#
+config CPU_MIPS32
+ bool
+ default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R5 || \
+ CPU_MIPS32_R6 || CPU_P5600
+
+config CPU_MIPS64
+ bool
+ default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R5 || \
+ CPU_MIPS64_R6
+
+#
+# These indicate the revision of the architecture
+#
+config CPU_MIPSR1
+ bool
+ default y if CPU_MIPS32_R1 || CPU_MIPS64_R1
+
+config CPU_MIPSR2
+ bool
+ default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
+ select CPU_HAS_RIXI
+ select CPU_HAS_DIEI if !CPU_DIEI_BROKEN
+ select MIPS_SPRAM
+
+config CPU_MIPSR5
+ bool
+ default y if CPU_MIPS32_R5 || CPU_MIPS64_R5 || CPU_P5600
+ select CPU_HAS_RIXI
+ select CPU_HAS_DIEI if !CPU_DIEI_BROKEN
+ select MIPS_SPRAM
+
+config CPU_MIPSR6
+ bool
+ default y if CPU_MIPS32_R6 || CPU_MIPS64_R6
+ select CPU_HAS_RIXI
+ select CPU_HAS_DIEI if !CPU_DIEI_BROKEN
+ select HAVE_ARCH_BITREVERSE
+ select MIPS_ASID_BITS_VARIABLE
+ select MIPS_CRC_SUPPORT
+ select MIPS_SPRAM
+
+config TARGET_ISA_REV
+ int
+ default 1 if CPU_MIPSR1
+ default 2 if CPU_MIPSR2
+ default 5 if CPU_MIPSR5
+ default 6 if CPU_MIPSR6
+ default 0
+ help
+ Reflects the ISA revision being targeted by the kernel build. This
+ is effectively the Kconfig equivalent of MIPS_ISA_REV.
+
+config EVA
+ bool
+
+config XPA
+ bool
+
+config SYS_SUPPORTS_32BIT_KERNEL
+ bool
+config SYS_SUPPORTS_64BIT_KERNEL
+ bool
+config CPU_SUPPORTS_32BIT_KERNEL
+ bool
+config CPU_SUPPORTS_64BIT_KERNEL
+ bool
+config CPU_SUPPORTS_CPUFREQ
+ bool
+config CPU_SUPPORTS_ADDRWINCFG
+ bool
+config CPU_SUPPORTS_HUGEPAGES
+ bool
+ depends on !(32BIT && (PHYS_ADDR_T_64BIT || EVA))
+config MIPS_PGD_C0_CONTEXT
+ bool
+ default y if 64BIT && (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP
+
+#
+# Set to y for ptrace access to watch registers.
+#
+config HARDWARE_WATCHPOINTS
+ bool
+ default y if CPU_MIPSR1 || CPU_MIPSR2 || CPU_MIPSR6
+
+menu "Kernel type"
+
+choice
+ prompt "Kernel code model"
+ help
+ You should only select this option if you have a workload that
+ actually benefits from 64-bit processing or if your machine has
+ large memory. You will only be presented a single option in this
+ menu if your system does not support both 32-bit and 64-bit kernels.
+
+config 32BIT
+ bool "32-bit kernel"
+ depends on CPU_SUPPORTS_32BIT_KERNEL && SYS_SUPPORTS_32BIT_KERNEL
+ select TRAD_SIGNALS
+ help
+ Select this option if you want to build a 32-bit kernel.
+
+config 64BIT
+ bool "64-bit kernel"
+ depends on CPU_SUPPORTS_64BIT_KERNEL && SYS_SUPPORTS_64BIT_KERNEL
+ help
+ Select this option if you want to build a 64-bit kernel.
+
+endchoice
+
+config KVM_GUEST
+ bool "KVM Guest Kernel"
+ depends on CPU_MIPS32_R2
+ depends on BROKEN_ON_SMP
+ help
+ Select this option if building a guest kernel for KVM (Trap & Emulate)
+ mode.
+
+config KVM_GUEST_TIMER_FREQ
+ int "Count/Compare Timer Frequency (MHz)"
+ depends on KVM_GUEST
+ default 100
+ help
+ Set this to non-zero if building a guest kernel for KVM to skip RTC
+ emulation when determining guest CPU Frequency. Instead, the guest's
+ timer frequency is specified directly.
+
+config MIPS_VA_BITS_48
+ bool "48 bits virtual memory"
+ depends on 64BIT
+ help
+ Support a maximum at least 48 bits of application virtual
+ memory. Default is 40 bits or less, depending on the CPU.
+ For page sizes 16k and above, this option results in a small
+ memory overhead for page tables. For 4k page size, a fourth
+ level of page tables is added which imposes both a memory
+ overhead as well as slower TLB fault handling.
+
+ If unsure, say N.
+
+choice
+ prompt "Kernel page size"
+ default PAGE_SIZE_4KB
+
+config PAGE_SIZE_4KB
+ bool "4kB"
+ depends on !CPU_LOONGSON2EF && !CPU_LOONGSON64
+ help
+ This option select the standard 4kB Linux page size. On some
+ R3000-family processors this is the only available page size. Using
+ 4kB page size will minimize memory consumption and is therefore
+ recommended for low memory systems.
+
+config PAGE_SIZE_8KB
+ bool "8kB"
+ depends on CPU_CAVIUM_OCTEON
+ depends on !MIPS_VA_BITS_48
+ help
+ Using 8kB page size will result in higher performance kernel at
+ the price of higher memory consumption. This option is available
+ only on cnMIPS processors. Note that you will need a suitable Linux
+ distribution to support this.
+
+config PAGE_SIZE_16KB
+ bool "16kB"
+ depends on !CPU_R3000 && !CPU_TX39XX
+ help
+ Using 16kB page size will result in higher performance kernel at
+ the price of higher memory consumption. This option is available on
+ all non-R3000 family processors. Note that you will need a suitable
+ Linux distribution to support this.
+
+config PAGE_SIZE_32KB
+ bool "32kB"
+ depends on CPU_CAVIUM_OCTEON
+ depends on !MIPS_VA_BITS_48
+ help
+ Using 32kB page size will result in higher performance kernel at
+ the price of higher memory consumption. This option is available
+ only on cnMIPS cores. Note that you will need a suitable Linux
+ distribution to support this.
+
+config PAGE_SIZE_64KB
+ bool "64kB"
+ depends on !CPU_R3000 && !CPU_TX39XX
+ help
+ Using 64kB page size will result in higher performance kernel at
+ the price of higher memory consumption. This option is available on
+ all non-R3000 family processor. Not that at the time of this
+ writing this option is still high experimental.
+
+endchoice
+
+config FORCE_MAX_ZONEORDER
+ int "Maximum zone order"
+ range 14 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
+ default "14" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
+ range 13 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB
+ default "13" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB
+ range 12 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB
+ default "12" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB
+ range 0 64
+ default "11"
+ help
+ The kernel memory allocator divides physically contiguous memory
+ blocks into "zones", where each zone is a power of two number of
+ pages. This option selects the largest power of two that the kernel
+ keeps in the memory allocator. If you need to allocate very large
+ blocks of physically contiguous memory, then you may need to
+ increase this value.
+
+ This config option is actually maximum order plus one. For example,
+ a value of 11 means that the largest free memory block is 2^10 pages.
+
+ The page size is not necessarily 4KB. Keep this in mind
+ when choosing a value for this option.
+
+config BOARD_SCACHE
+ bool
+
+config IP22_CPU_SCACHE
+ bool
+ select BOARD_SCACHE
+
+#
+# Support for a MIPS32 / MIPS64 style S-caches
+#
+config MIPS_CPU_SCACHE
+ bool
+ select BOARD_SCACHE
+
+config R5000_CPU_SCACHE
+ bool
+ select BOARD_SCACHE
+
+config RM7000_CPU_SCACHE
+ bool
+ select BOARD_SCACHE
+
+config SIBYTE_DMA_PAGEOPS
+ bool "Use DMA to clear/copy pages"
+ depends on CPU_SB1
+ help
+ Instead of using the CPU to zero and copy pages, use a Data Mover
+ channel. These DMA channels are otherwise unused by the standard
+ SiByte Linux port. Seems to give a small performance benefit.
+
+config CPU_HAS_PREFETCH
+ bool
+
+config CPU_GENERIC_DUMP_TLB
+ bool
+ default y if !(CPU_R3000 || CPU_TX39XX)
+
+config MIPS_FP_SUPPORT
+ bool "Floating Point support" if EXPERT
+ default y
+ help
+ Select y to include support for floating point in the kernel
+ including initialization of FPU hardware, FP context save & restore
+ and emulation of an FPU where necessary. Without this support any
+ userland program attempting to use floating point instructions will
+ receive a SIGILL.
+
+ If you know that your userland will not attempt to use floating point
+ instructions then you can say n here to shrink the kernel a little.
+
+ If unsure, say y.
+
+config CPU_R2300_FPU
+ bool
+ depends on MIPS_FP_SUPPORT
+ default y if CPU_R3000 || CPU_TX39XX
+
+config CPU_R3K_TLB
+ bool
+
+config CPU_R4K_FPU
+ bool
+ depends on MIPS_FP_SUPPORT
+ default y if !CPU_R2300_FPU
+
+config CPU_R4K_CACHE_TLB
+ bool
+ default y if !(CPU_R3K_TLB || CPU_SB1 || CPU_CAVIUM_OCTEON)
+
+config MIPS_MT_SMP
+ bool "MIPS MT SMP support (1 TC on each available VPE)"
+ default y
+ depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 && !CPU_MICROMIPS
+ select CPU_MIPSR2_IRQ_VI
+ select CPU_MIPSR2_IRQ_EI
+ select SYNC_R4K
+ select MIPS_MT
+ select SMP
+ select SMP_UP
+ select SYS_SUPPORTS_SMP
+ select SYS_SUPPORTS_SCHED_SMT
+ select MIPS_PERF_SHARED_TC_COUNTERS
+ help
+ This is a kernel model which is known as SMVP. This is supported
+ on cores with the MT ASE and uses the available VPEs to implement
+ virtual processors which supports SMP. This is equivalent to the
+ Intel Hyperthreading feature. For further information go to
+ <http://www.imgtec.com/mips/mips-multithreading.asp>.
+
+config MIPS_MT
+ bool
+
+config SCHED_SMT
+ bool "SMT (multithreading) scheduler support"
+ depends on SYS_SUPPORTS_SCHED_SMT
+ default n
+ help
+ SMT scheduler support improves the CPU scheduler's decision making
+ when dealing with MIPS MT enabled cores at a cost of slightly
+ increased overhead in some places. If unsure say N here.
+
+config SYS_SUPPORTS_SCHED_SMT
+ bool
+
+config SYS_SUPPORTS_MULTITHREADING
+ bool
+
+config MIPS_MT_FPAFF
+ bool "Dynamic FPU affinity for FP-intensive threads"
+ default y
+ depends on MIPS_MT_SMP
+
+config MIPSR2_TO_R6_EMULATOR
+ bool "MIPS R2-to-R6 emulator"
+ depends on CPU_MIPSR6
+ depends on MIPS_FP_SUPPORT
+ default y
+ help
+ Choose this option if you want to run non-R6 MIPS userland code.
+ Even if you say 'Y' here, the emulator will still be disabled by
+ default. You can enable it using the 'mipsr2emu' kernel option.
+ The only reason this is a build-time option is to save ~14K from the
+ final kernel image.
+
+config SYS_SUPPORTS_VPE_LOADER
+ bool
+ depends on SYS_SUPPORTS_MULTITHREADING
+ help
+ Indicates that the platform supports the VPE loader, and provides
+ physical_memsize.
+
+config MIPS_VPE_LOADER
+ bool "VPE loader support."
+ depends on SYS_SUPPORTS_VPE_LOADER && MODULES
+ select CPU_MIPSR2_IRQ_VI
+ select CPU_MIPSR2_IRQ_EI
+ select MIPS_MT
+ help
+ Includes a loader for loading an elf relocatable object
+ onto another VPE and running it.
+
+config MIPS_VPE_LOADER_CMP
+ bool
+ default "y"
+ depends on MIPS_VPE_LOADER && MIPS_CMP
+
+config MIPS_VPE_LOADER_MT
+ bool
+ default "y"
+ depends on MIPS_VPE_LOADER && !MIPS_CMP
+
+config MIPS_VPE_LOADER_TOM
+ bool "Load VPE program into memory hidden from linux"
+ depends on MIPS_VPE_LOADER
+ default y
+ help
+ The loader can use memory that is present but has been hidden from
+ Linux using the kernel command line option "mem=xxMB". It's up to
+ you to ensure the amount you put in the option and the space your
+ program requires is less or equal to the amount physically present.
+
+config MIPS_VPE_APSP_API
+ bool "Enable support for AP/SP API (RTLX)"
+ depends on MIPS_VPE_LOADER
+
+config MIPS_VPE_APSP_API_CMP
+ bool
+ default "y"
+ depends on MIPS_VPE_APSP_API && MIPS_CMP
+
+config MIPS_VPE_APSP_API_MT
+ bool
+ default "y"
+ depends on MIPS_VPE_APSP_API && !MIPS_CMP
+
+config MIPS_CMP
+ bool "MIPS CMP framework support (DEPRECATED)"
+ depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6
+ select SMP
+ select SYNC_R4K
+ select SYS_SUPPORTS_SMP
+ select WEAK_ORDERING
+ default n
+ help
+ Select this if you are using a bootloader which implements the "CMP
+ framework" protocol (ie. YAMON) and want your kernel to make use of
+ its ability to start secondary CPUs.
+
+ Unless you have a specific need, you should use CONFIG_MIPS_CPS
+ instead of this.
+
+config MIPS_CPS
+ bool "MIPS Coherent Processing System support"
+ depends on SYS_SUPPORTS_MIPS_CPS
+ select MIPS_CM
+ select MIPS_CPS_PM if HOTPLUG_CPU
+ select SMP
+ select SYNC_R4K if (CEVT_R4K || CSRC_R4K)
+ select SYS_SUPPORTS_HOTPLUG_CPU
+ select SYS_SUPPORTS_SCHED_SMT if CPU_MIPSR6
+ select SYS_SUPPORTS_SMP
+ select WEAK_ORDERING
+ help
+ Select this if you wish to run an SMP kernel across multiple cores
+ within a MIPS Coherent Processing System. When this option is
+ enabled the kernel will probe for other cores and boot them with
+ no external assistance. It is safe to enable this when hardware
+ support is unavailable.
+
+config MIPS_CPS_PM
+ depends on MIPS_CPS
+ bool
+
+config MIPS_CM
+ bool
+ select MIPS_CPC
+
+config MIPS_CPC
+ bool
+
+config SB1_PASS_2_WORKAROUNDS
+ bool
+ depends on CPU_SB1 && (CPU_SB1_PASS_2_2 || CPU_SB1_PASS_2)
+ default y
+
+config SB1_PASS_2_1_WORKAROUNDS
+ bool
+ depends on CPU_SB1 && CPU_SB1_PASS_2
+ default y
+
+choice
+ prompt "SmartMIPS or microMIPS ASE support"
+
+config CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS
+ bool "None"
+ help
+ Select this if you want neither microMIPS nor SmartMIPS support
+
+config CPU_HAS_SMARTMIPS
+ depends on SYS_SUPPORTS_SMARTMIPS
+ bool "SmartMIPS"
+ help
+ SmartMIPS is a extension of the MIPS32 architecture aimed at
+ increased security at both hardware and software level for
+ smartcards. Enabling this option will allow proper use of the
+ SmartMIPS instructions by Linux applications. However a kernel with
+ this option will not work on a MIPS core without SmartMIPS core. If
+ you don't know you probably don't have SmartMIPS and should say N
+ here.
+
+config CPU_MICROMIPS
+ depends on 32BIT && SYS_SUPPORTS_MICROMIPS && !CPU_MIPSR6
+ bool "microMIPS"
+ help
+ When this option is enabled the kernel will be built using the
+ microMIPS ISA
+
+endchoice
+
+config CPU_HAS_MSA
+ bool "Support for the MIPS SIMD Architecture"
+ depends on CPU_SUPPORTS_MSA
+ depends on MIPS_FP_SUPPORT
+ depends on 64BIT || MIPS_O32_FP64_SUPPORT
+ help
+ MIPS SIMD Architecture (MSA) introduces 128 bit wide vector registers
+ and a set of SIMD instructions to operate on them. When this option
+ is enabled the kernel will support allocating & switching MSA
+ vector register contexts. If you know that your kernel will only be
+ running on CPUs which do not support MSA or that your userland will
+ not be making use of it then you may wish to say N here to reduce
+ the size & complexity of your kernel.
+
+ If unsure, say Y.
+
+config CPU_HAS_WB
+ bool
+
+config XKS01
+ bool
+
+config CPU_HAS_DIEI
+ depends on !CPU_DIEI_BROKEN
+ bool
+
+config CPU_DIEI_BROKEN
+ bool
+
+config CPU_HAS_RIXI
+ bool
+
+config CPU_NO_LOAD_STORE_LR
+ bool
+ help
+ CPU lacks support for unaligned load and store instructions:
+ LWL, LWR, SWL, SWR (Load/store word left/right).
+ LDL, LDR, SDL, SDR (Load/store doubleword left/right, for 64bit
+ systems).
+
+#
+# Vectored interrupt mode is an R2 feature
+#
+config CPU_MIPSR2_IRQ_VI
+ bool
+
+#
+# Extended interrupt mode is an R2 feature
+#
+config CPU_MIPSR2_IRQ_EI
+ bool
+
+config CPU_HAS_SYNC
+ bool
+ depends on !CPU_R3000
+ default y
+
+#
+# CPU non-features
+#
+config CPU_DADDI_WORKAROUNDS
+ bool
+
+config CPU_R4000_WORKAROUNDS
+ bool
+ select CPU_R4400_WORKAROUNDS
+
+config CPU_R4400_WORKAROUNDS
+ bool
+
+config CPU_R4X00_BUGS64
+ bool
+ default y if SYS_HAS_CPU_R4X00 && 64BIT && (TARGET_ISA_REV < 1)
+
+config MIPS_ASID_SHIFT
+ int
+ default 6 if CPU_R3000 || CPU_TX39XX
+ default 0
+
+config MIPS_ASID_BITS
+ int
+ default 0 if MIPS_ASID_BITS_VARIABLE
+ default 6 if CPU_R3000 || CPU_TX39XX
+ default 8
+
+config MIPS_ASID_BITS_VARIABLE
+ bool
+
+config MIPS_CRC_SUPPORT
+ bool
+
+# R4600 erratum. Due to the lack of errata information the exact
+# technical details aren't known. I've experimentally found that disabling
+# interrupts during indexed I-cache flushes seems to be sufficient to deal
+# with the issue.
+config WAR_R4600_V1_INDEX_ICACHEOP
+ bool
+
+# Pleasures of the R4600 V1.x. Cite from the IDT R4600 V1.7 errata:
+#
+# 18. The CACHE instructions Hit_Writeback_Invalidate_D, Hit_Writeback_D,
+# Hit_Invalidate_D and Create_Dirty_Excl_D should only be
+# executed if there is no other dcache activity. If the dcache is
+# accessed for another instruction immeidately preceding when these
+# cache instructions are executing, it is possible that the dcache
+# tag match outputs used by these cache instructions will be
+# incorrect. These cache instructions should be preceded by at least
+# four instructions that are not any kind of load or store
+# instruction.
+#
+# This is not allowed: lw
+# nop
+# nop
+# nop
+# cache Hit_Writeback_Invalidate_D
+#
+# This is allowed: lw
+# nop
+# nop
+# nop
+# nop
+# cache Hit_Writeback_Invalidate_D
+config WAR_R4600_V1_HIT_CACHEOP
+ bool
+
+# Writeback and invalidate the primary cache dcache before DMA.
+#
+# R4600 v2.0 bug: "The CACHE instructions Hit_Writeback_Inv_D,
+# Hit_Writeback_D, Hit_Invalidate_D and Create_Dirty_Exclusive_D will only
+# operate correctly if the internal data cache refill buffer is empty. These
+# CACHE instructions should be separated from any potential data cache miss
+# by a load instruction to an uncached address to empty the response buffer."
+# (Revision 2.0 device errata from IDT available on https://www.idt.com/
+# in .pdf format.)
+config WAR_R4600_V2_HIT_CACHEOP
+ bool
+
+# From TX49/H2 manual: "If the instruction (i.e. CACHE) is issued for
+# the line which this instruction itself exists, the following
+# operation is not guaranteed."
+#
+# Workaround: do two phase flushing for Index_Invalidate_I
+config WAR_TX49XX_ICACHE_INDEX_INV
+ bool
+
+# The RM7000 processors and the E9000 cores have a bug (though PMC-Sierra
+# opposes it being called that) where invalid instructions in the same
+# I-cache line worth of instructions being fetched may case spurious
+# exceptions.
+config WAR_ICACHE_REFILLS
+ bool
+
+# On the R10000 up to version 2.6 (not sure about 2.7) there is a bug that
+# may cause ll / sc and lld / scd sequences to execute non-atomically.
+config WAR_R10000_LLSC
+ bool
+
+# 34K core erratum: "Problems Executing the TLBR Instruction"
+config WAR_MIPS34K_MISSED_ITLB
+ bool
+
+#
+# - Highmem only makes sense for the 32-bit kernel.
+# - The current highmem code will only work properly on physically indexed
+# caches such as R3000, SB1, R7000 or those that look like they're virtually
+# indexed such as R4000/R4400 SC and MC versions or R10000. So for the
+# moment we protect the user and offer the highmem option only on machines
+# where it's known to be safe. This will not offer highmem on a few systems
+# such as MIPS32 and MIPS64 CPUs which may have virtual and physically
+# indexed CPUs but we're playing safe.
+# - We use SYS_SUPPORTS_HIGHMEM to offer highmem only for systems where we
+# know they might have memory configurations that could make use of highmem
+# support.
+#
+config HIGHMEM
+ bool "High Memory Support"
+ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
+
+config CPU_SUPPORTS_HIGHMEM
+ bool
+
+config SYS_SUPPORTS_HIGHMEM
+ bool
+
+config SYS_SUPPORTS_SMARTMIPS
+ bool
+
+config SYS_SUPPORTS_MICROMIPS
+ bool
+
+config SYS_SUPPORTS_MIPS16
+ bool
+ help
+ This option must be set if a kernel might be executed on a MIPS16-
+ enabled CPU even if MIPS16 is not actually being used. In other
+ words, it makes the kernel MIPS16-tolerant.
+
+config CPU_SUPPORTS_MSA
+ bool
+
+config ARCH_FLATMEM_ENABLE
+ def_bool y
+ depends on !NUMA && !CPU_LOONGSON2EF
+
+config ARCH_SPARSEMEM_ENABLE
+ bool
+ select SPARSEMEM_STATIC if !SGI_IP27
+
+config NUMA
+ bool "NUMA Support"
+ depends on SYS_SUPPORTS_NUMA
+ help
+ Say Y to compile the kernel to support NUMA (Non-Uniform Memory
+ Access). This option improves performance on systems with more
+ than two nodes; on two node systems it is generally better to
+ leave it disabled; on single node systems leave this option
+ disabled.
+
+config SYS_SUPPORTS_NUMA
+ bool
+
+config HAVE_SETUP_PER_CPU_AREA
+ def_bool y
+ depends on NUMA
+
+config NEED_PER_CPU_EMBED_FIRST_CHUNK
+ def_bool y
+ depends on NUMA
+
+config RELOCATABLE
+ bool "Relocatable kernel"
+ depends on SYS_SUPPORTS_RELOCATABLE
+ depends on CPU_MIPS32_R2 || CPU_MIPS64_R2 || \
+ CPU_MIPS32_R5 || CPU_MIPS64_R5 || \
+ CPU_MIPS32_R6 || CPU_MIPS64_R6 || \
+ CPU_P5600 || CAVIUM_OCTEON_SOC
+ help
+ This builds a kernel image that retains relocation information
+ so it can be loaded someplace besides the default 1MB.
+ The relocations make the kernel binary about 15% larger,
+ but are discarded at runtime
+
+config RELOCATION_TABLE_SIZE
+ hex "Relocation table size"
+ depends on RELOCATABLE
+ range 0x0 0x01000000
+ default "0x00100000"
+ help
+ A table of relocation data will be appended to the kernel binary
+ and parsed at boot to fix up the relocated kernel.
+
+ This option allows the amount of space reserved for the table to be
+ adjusted, although the default of 1Mb should be ok in most cases.
+
+ The build will fail and a valid size suggested if this is too small.
+
+ If unsure, leave at the default value.
+
+config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image"
+ depends on RELOCATABLE
+ help
+ Randomizes the physical and virtual address at which the
+ kernel image is loaded, as a security feature that
+ deters exploit attempts relying on knowledge of the location
+ of kernel internals.
+
+ Entropy is generated using any coprocessor 0 registers available.
+
+ The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET.
+
+ If unsure, say N.
+
+config RANDOMIZE_BASE_MAX_OFFSET
+ hex "Maximum kASLR offset" if EXPERT
+ depends on RANDOMIZE_BASE
+ range 0x0 0x40000000 if EVA || 64BIT
+ range 0x0 0x08000000
+ default "0x01000000"
+ help
+ When kASLR is active, this provides the maximum offset that will
+ be applied to the kernel image. It should be set according to the
+ amount of physical RAM available in the target system minus
+ PHYSICAL_START and must be a power of 2.
+
+ This is limited by the size of KSEG0, 256Mb on 32-bit or 1Gb with
+ EVA or 64-bit. The default is 16Mb.
+
+config NODES_SHIFT
+ int
+ default "6"
+ depends on NEED_MULTIPLE_NODES
+
+config HW_PERF_EVENTS
+ bool "Enable hardware performance counter support for perf events"
+ depends on PERF_EVENTS && !OPROFILE && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP || CPU_LOONGSON64)
+ default y
+ help
+ Enable hardware performance counter support for perf events. If
+ disabled, perf events will use software events only.
+
+config DMI
+ bool "Enable DMI scanning"
+ depends on MACH_LOONGSON64
+ select DMI_SCAN_MACHINE_NON_EFI_FALLBACK
+ default y
+ help
+ Enabled scanning of DMI to identify machine quirks. Say Y
+ here unless you have verified that your setup is not
+ affected by entries in the DMI blacklist. Required by PNP
+ BIOS code.
+
+config SMP
+ bool "Multi-Processing support"
+ depends on SYS_SUPPORTS_SMP
+ help
+ This enables support for systems with more than one CPU. If you have
+ a system with only one CPU, say N. If you have a system with more
+ than one CPU, say Y.
+
+ If you say N here, the kernel will run on uni- and multiprocessor
+ machines, but will use only one CPU of a multiprocessor machine. If
+ you say Y here, the kernel will run on many, but not all,
+ uniprocessor machines. On a uniprocessor machine, the kernel
+ will run faster if you say N here.
+
+ People using multiprocessor machines who say Y here should also say
+ Y to "Enhanced Real Time Clock Support", below.
+
+ See also the SMP-HOWTO available at
+ <https://www.tldp.org/docs.html#howto>.
+
+ If you don't know what to do here, say N.
+
+config HOTPLUG_CPU
+ bool "Support for hot-pluggable CPUs"
+ depends on SMP && SYS_SUPPORTS_HOTPLUG_CPU
+ help
+ Say Y here to allow turning CPUs off and on. CPUs can be
+ controlled through /sys/devices/system/cpu.
+ (Note: power management support will enable this option
+ automatically on SMP systems. )
+ Say N if you want to disable CPU hotplug.
+
+config SMP_UP
+ bool
+
+config SYS_SUPPORTS_MIPS_CMP
+ bool
+
+config SYS_SUPPORTS_MIPS_CPS
+ bool
+
+config SYS_SUPPORTS_SMP
+ bool
+
+config NR_CPUS_DEFAULT_4
+ bool
+
+config NR_CPUS_DEFAULT_8
+ bool
+
+config NR_CPUS_DEFAULT_16
+ bool
+
+config NR_CPUS_DEFAULT_32
+ bool
+
+config NR_CPUS_DEFAULT_64
+ bool
+
+config NR_CPUS
+ int "Maximum number of CPUs (2-256)"
+ range 2 256
+ depends on SMP
+ default "4" if NR_CPUS_DEFAULT_4
+ default "8" if NR_CPUS_DEFAULT_8
+ default "16" if NR_CPUS_DEFAULT_16
+ default "32" if NR_CPUS_DEFAULT_32
+ default "64" if NR_CPUS_DEFAULT_64
+ help
+ This allows you to specify the maximum number of CPUs which this
+ kernel will support. The maximum supported value is 32 for 32-bit
+ kernel and 64 for 64-bit kernels; the minimum value which makes
+ sense is 1 for Qemu (useful only for kernel debugging purposes)
+ and 2 for all others.
+
+ This is purely to save memory - each supported CPU adds
+ approximately eight kilobytes to the kernel image. For best
+ performance should round up your number of processors to the next
+ power of two.
+
+config MIPS_PERF_SHARED_TC_COUNTERS
+ bool
+
+config MIPS_NR_CPU_NR_MAP_1024
+ bool
+
+config MIPS_NR_CPU_NR_MAP
+ int
+ depends on SMP
+ default 1024 if MIPS_NR_CPU_NR_MAP_1024
+ default NR_CPUS if !MIPS_NR_CPU_NR_MAP_1024
+
+#
+# Timer Interrupt Frequency Configuration
+#
+
+choice
+ prompt "Timer frequency"
+ default HZ_250
+ help
+ Allows the configuration of the timer frequency.
+
+ config HZ_24
+ bool "24 HZ" if SYS_SUPPORTS_24HZ || SYS_SUPPORTS_ARBIT_HZ
+
+ config HZ_48
+ bool "48 HZ" if SYS_SUPPORTS_48HZ || SYS_SUPPORTS_ARBIT_HZ
+
+ config HZ_100
+ bool "100 HZ" if SYS_SUPPORTS_100HZ || SYS_SUPPORTS_ARBIT_HZ
+
+ config HZ_128
+ bool "128 HZ" if SYS_SUPPORTS_128HZ || SYS_SUPPORTS_ARBIT_HZ
+
+ config HZ_250
+ bool "250 HZ" if SYS_SUPPORTS_250HZ || SYS_SUPPORTS_ARBIT_HZ
+
+ config HZ_256
+ bool "256 HZ" if SYS_SUPPORTS_256HZ || SYS_SUPPORTS_ARBIT_HZ
+
+ config HZ_1000
+ bool "1000 HZ" if SYS_SUPPORTS_1000HZ || SYS_SUPPORTS_ARBIT_HZ
+
+ config HZ_1024
+ bool "1024 HZ" if SYS_SUPPORTS_1024HZ || SYS_SUPPORTS_ARBIT_HZ
+
+endchoice
+
+config SYS_SUPPORTS_24HZ
+ bool
+
+config SYS_SUPPORTS_48HZ
+ bool
+
+config SYS_SUPPORTS_100HZ
+ bool
+
+config SYS_SUPPORTS_128HZ
+ bool
+
+config SYS_SUPPORTS_250HZ
+ bool
+
+config SYS_SUPPORTS_256HZ
+ bool
+
+config SYS_SUPPORTS_1000HZ
+ bool
+
+config SYS_SUPPORTS_1024HZ
+ bool
+
+config SYS_SUPPORTS_ARBIT_HZ
+ bool
+ default y if !SYS_SUPPORTS_24HZ && \
+ !SYS_SUPPORTS_48HZ && \
+ !SYS_SUPPORTS_100HZ && \
+ !SYS_SUPPORTS_128HZ && \
+ !SYS_SUPPORTS_250HZ && \
+ !SYS_SUPPORTS_256HZ && \
+ !SYS_SUPPORTS_1000HZ && \
+ !SYS_SUPPORTS_1024HZ
+
+config HZ
+ int
+ default 24 if HZ_24
+ default 48 if HZ_48
+ default 100 if HZ_100
+ default 128 if HZ_128
+ default 250 if HZ_250
+ default 256 if HZ_256
+ default 1000 if HZ_1000
+ default 1024 if HZ_1024
+
+config SCHED_HRTICK
+ def_bool HIGH_RES_TIMERS
+
+config KEXEC
+ bool "Kexec system call"
+ select KEXEC_CORE
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+ but it is independent of the system firmware. And like a reboot
+ you can start any kernel with it, not just Linux.
+
+ The name comes from the similarity to the exec system call.
+
+ It is an ongoing process to be certain the hardware in a machine
+ is properly shutdown, so do not be surprised if this code does not
+ initially work for you. As of this writing the exact hardware
+ interface is strongly in flux, so no good recommendation can be
+ made.
+
+config CRASH_DUMP
+ bool "Kernel crash dumps"
+ help
+ Generate crash dump after being started by kexec.
+ This should be normally only set in special crash dump kernels
+ which are loaded in the main kernel with kexec-tools into
+ a specially reserved region and then later executed after
+ a crash by kdump/kexec. The crash dump kernel must be compiled
+ to a memory address not used by the main kernel or firmware using
+ PHYSICAL_START.
+
+config PHYSICAL_START
+ hex "Physical address where the kernel is loaded"
+ default "0xffffffff84000000"
+ depends on CRASH_DUMP
+ help
+ This gives the CKSEG0 or KSEG0 address where the kernel is loaded.
+ If you plan to use kernel for capturing the crash dump change
+ this value to start of the reserved region (the "X" value as
+ specified in the "crashkernel=YM@XM" command line boot parameter
+ passed to the panic-ed kernel).
+
+config MIPS_O32_FP64_SUPPORT
+ bool "Support for O32 binaries using 64-bit FP" if !CPU_MIPSR6
+ depends on 32BIT || MIPS32_O32
+ help
+ When this is enabled, the kernel will support use of 64-bit floating
+ point registers with binaries using the O32 ABI along with the
+ EF_MIPS_FP64 ELF header flag (typically built with -mfp64). On
+ 32-bit MIPS systems this support is at the cost of increasing the
+ size and complexity of the compiled FPU emulator. Thus if you are
+ running a MIPS32 system and know that none of your userland binaries
+ will require 64-bit floating point, you may wish to reduce the size
+ of your kernel & potentially improve FP emulation performance by
+ saying N here.
+
+ Although binutils currently supports use of this flag the details
+ concerning its effect upon the O32 ABI in userland are still being
+ worked on. In order to avoid userland becoming dependant upon current
+ behaviour before the details have been finalised, this option should
+ be considered experimental and only enabled by those working upon
+ said details.
+
+ If unsure, say N.
+
+config USE_OF
+ bool
+ select OF
+ select OF_EARLY_FLATTREE
+ select IRQ_DOMAIN
+
+config UHI_BOOT
+ bool
+
+config BUILTIN_DTB
+ bool
+
+choice
+ prompt "Kernel appended dtb support" if USE_OF
+ default MIPS_NO_APPENDED_DTB
+
+ config MIPS_NO_APPENDED_DTB
+ bool "None"
+ help
+ Do not enable appended dtb support.
+
+ config MIPS_ELF_APPENDED_DTB
+ bool "vmlinux"
+ help
+ With this option, the boot code will look for a device tree binary
+ DTB) included in the vmlinux ELF section .appended_dtb. By default
+ it is empty and the DTB can be appended using binutils command
+ objcopy:
+
+ objcopy --update-section .appended_dtb=<filename>.dtb vmlinux
+
+ This is meant as a backward compatiblity convenience for those
+ systems with a bootloader that can't be upgraded to accommodate
+ the documented boot protocol using a device tree.
+
+ config MIPS_RAW_APPENDED_DTB
+ bool "vmlinux.bin or vmlinuz.bin"
+ help
+ With this option, the boot code will look for a device tree binary
+ DTB) appended to raw vmlinux.bin or vmlinuz.bin.
+ (e.g. cat vmlinux.bin <filename>.dtb > vmlinux_w_dtb).
+
+ This is meant as a backward compatibility convenience for those
+ systems with a bootloader that can't be upgraded to accommodate
+ the documented boot protocol using a device tree.
+
+ Beware that there is very little in terms of protection against
+ this option being confused by leftover garbage in memory that might
+ look like a DTB header after a reboot if no actual DTB is appended
+ to vmlinux.bin. Do not leave this option active in a production kernel
+ if you don't intend to always append a DTB.
+endchoice
+
+choice
+ prompt "Kernel command line type" if !CMDLINE_OVERRIDE
+ default MIPS_CMDLINE_FROM_DTB if USE_OF && !ATH79 && !MACH_INGENIC && \
+ !MACH_LOONGSON64 && !MIPS_MALTA && \
+ !CAVIUM_OCTEON_SOC
+ default MIPS_CMDLINE_FROM_BOOTLOADER
+
+ config MIPS_CMDLINE_FROM_DTB
+ depends on USE_OF
+ bool "Dtb kernel arguments if available"
+
+ config MIPS_CMDLINE_DTB_EXTEND
+ depends on USE_OF
+ bool "Extend dtb kernel arguments with bootloader arguments"
+
+ config MIPS_CMDLINE_FROM_BOOTLOADER
+ bool "Bootloader kernel arguments if available"
+
+ config MIPS_CMDLINE_BUILTIN_EXTEND
+ depends on CMDLINE_BOOL
+ bool "Extend builtin kernel arguments with bootloader arguments"
+endchoice
+
+endmenu
+
+config LOCKDEP_SUPPORT
+ bool
+ default y
+
+config STACKTRACE_SUPPORT
+ bool
+ default y
+
+config PGTABLE_LEVELS
+ int
+ default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
+ default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48)
+ default 2
+
+config MIPS_AUTO_PFN_OFFSET
+ bool
+
+menu "Bus options (PCI, PCMCIA, EISA, ISA, TC)"
+
+config PCI_DRIVERS_GENERIC
+ select PCI_DOMAINS_GENERIC if PCI
+ bool
+
+config PCI_DRIVERS_LEGACY
+ def_bool !PCI_DRIVERS_GENERIC
+ select NO_GENERIC_PCI_IOPORT_MAP
+ select PCI_DOMAINS if PCI
+
+#
+# ISA support is now enabled via select. Too many systems still have the one
+# or other ISA chip on the board that users don't know about so don't expect
+# users to choose the right thing ...
+#
+config ISA
+ bool
+
+config TC
+ bool "TURBOchannel support"
+ depends on MACH_DECSTATION
+ help
+ TURBOchannel is a DEC (now Compaq (now HP)) bus for Alpha and MIPS
+ processors. TURBOchannel programming specifications are available
+ at:
+ <ftp://ftp.hp.com/pub/alphaserver/archive/triadd/>
+ and:
+ <http://www.computer-refuge.org/classiccmp/ftp.digital.com/pub/DEC/TriAdd/>
+ Linux driver support status is documented at:
+ <http://www.linux-mips.org/wiki/DECstation>
+
+config MMU
+ bool
+ default y
+
+config ARCH_MMAP_RND_BITS_MIN
+ default 12 if 64BIT
+ default 8
+
+config ARCH_MMAP_RND_BITS_MAX
+ default 18 if 64BIT
+ default 15
+
+config ARCH_MMAP_RND_COMPAT_BITS_MIN
+ default 8
+
+config ARCH_MMAP_RND_COMPAT_BITS_MAX
+ default 15
+
+config I8253
+ bool
+ select CLKSRC_I8253
+ select CLKEVT_I8253
+ select MIPS_EXTERNAL_TIMER
+
+config ZONE_DMA
+ bool
+
+config ZONE_DMA32
+ bool
+
+endmenu
+
+config TRAD_SIGNALS
+ bool
+
+config MIPS32_COMPAT
+ bool
+
+config COMPAT
+ bool
+
+config SYSVIPC_COMPAT
+ bool
+
+config MIPS32_O32
+ bool "Kernel support for o32 binaries"
+ depends on 64BIT
+ select ARCH_WANT_OLD_COMPAT_IPC
+ select COMPAT
+ select MIPS32_COMPAT
+ select SYSVIPC_COMPAT if SYSVIPC
+ help
+ Select this option if you want to run o32 binaries. These are pure
+ 32-bit binaries as used by the 32-bit Linux/MIPS port. Most of
+ existing binaries are in this format.
+
+ If unsure, say Y.
+
+config MIPS32_N32
+ bool "Kernel support for n32 binaries"
+ depends on 64BIT
+ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
+ select COMPAT
+ select MIPS32_COMPAT
+ select SYSVIPC_COMPAT if SYSVIPC
+ help
+ Select this option if you want to run n32 binaries. These are
+ 64-bit binaries using 32-bit quantities for addressing and certain
+ data that would normally be 64-bit. They are used in special
+ cases.
+
+ If unsure, say N.
+
+config BINFMT_ELF32
+ bool
+ default y if MIPS32_O32 || MIPS32_N32
+ select ELFCORE
+
+menu "Power management options"
+
+config ARCH_HIBERNATION_POSSIBLE
+ def_bool y
+ depends on SYS_SUPPORTS_HOTPLUG_CPU || !SMP
+
+config ARCH_SUSPEND_POSSIBLE
+ def_bool y
+ depends on SYS_SUPPORTS_HOTPLUG_CPU || !SMP
+
+source "kernel/power/Kconfig"
+
+endmenu
+
+config MIPS_EXTERNAL_TIMER
+ bool
+
+menu "CPU Power Management"
+
+if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER
+source "drivers/cpufreq/Kconfig"
+endif
+
+source "drivers/cpuidle/Kconfig"
+
+endmenu
+
+source "drivers/firmware/Kconfig"
+
+source "arch/mips/kvm/Kconfig"
+
+source "arch/mips/vdso/Kconfig"
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
new file mode 100644
index 000000000..7a8d94cdd
--- /dev/null
+++ b/arch/mips/Kconfig.debug
@@ -0,0 +1,161 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config TRACE_IRQFLAGS_SUPPORT
+ bool
+ default y
+
+config EARLY_PRINTK
+ bool "Early printk" if EXPERT
+ depends on SYS_HAS_EARLY_PRINTK
+ default y
+ help
+ This option enables special console drivers which allow the kernel
+ to print messages very early in the bootup process.
+
+ This is useful for kernel debugging when your machine crashes very
+ early before the console code is initialized. For normal operation,
+ it is not recommended because it looks ugly on some machines and
+ doesn't cooperate with an X server. You should normally say N here,
+ unless you want to debug such a crash.
+
+config EARLY_PRINTK_8250
+ bool
+ depends on EARLY_PRINTK && USE_GENERIC_EARLY_PRINTK_8250
+ default y
+ help
+ "8250/16550 and compatible serial early printk driver"
+ If you say Y here, it will be possible to use a 8250/16550 serial
+ port as the boot console.
+
+config USE_GENERIC_EARLY_PRINTK_8250
+ bool
+
+config CMDLINE_BOOL
+ bool "Built-in kernel command line"
+ help
+ For most systems, it is firmware or second stage bootloader that
+ by default specifies the kernel command line options. However,
+ it might be necessary or advantageous to either override the
+ default kernel command line or add a few extra options to it.
+ For such cases, this option allows you to hardcode your own
+ command line options directly into the kernel. For that, you
+ should choose 'Y' here, and fill in the extra boot arguments
+ in CONFIG_CMDLINE.
+
+ The built-in options will be concatenated to the default command
+ line if CMDLINE_OVERRIDE is set to 'N'. Otherwise, the default
+ command line will be ignored and replaced by the built-in string.
+
+ Most MIPS systems will normally expect 'N' here and rely upon
+ the command line from the firmware or the second-stage bootloader.
+
+config CMDLINE
+ string "Default kernel command string"
+ depends on CMDLINE_BOOL
+ help
+ On some platforms, there is currently no way for the boot loader to
+ pass arguments to the kernel. For these platforms, and for the cases
+ when you want to add some extra options to the command line or ignore
+ the default command line, you can supply some command-line options at
+ build time by entering them here. In other cases you can specify
+ kernel args so that you don't have to set them up in board prom
+ initialization routines.
+
+ For more information, see the CMDLINE_BOOL and CMDLINE_OVERRIDE
+ options.
+
+config CMDLINE_OVERRIDE
+ bool "Built-in command line overrides firmware arguments"
+ depends on CMDLINE_BOOL
+ help
+ By setting this option to 'Y' you will have your kernel ignore
+ command line arguments from firmware or second stage bootloader.
+ Instead, the built-in command line will be used exclusively.
+
+ Normally, you will choose 'N' here.
+
+config SB1XXX_CORELIS
+ bool "Corelis Debugger"
+ depends on SIBYTE_SB1xxx_SOC
+ select DEBUG_INFO if !COMPILE_TEST
+ help
+ Select compile flags that produce code that can be processed by the
+ Corelis mksym utility and UDB Emulator.
+
+config DEBUG_ZBOOT
+ bool "Enable compressed kernel support debugging"
+ depends on DEBUG_KERNEL && SYS_SUPPORTS_ZBOOT
+ default n
+ help
+ If you want to add compressed kernel support to a new board, and the
+ board supports uart16550 compatible serial port, please select
+ SYS_SUPPORTS_ZBOOT_UART16550 for your board and enable this option to
+ debug it.
+
+ If your board doesn't support uart16550 compatible serial port, you
+ can try to select SYS_SUPPORTS_ZBOOT and use the other methods to
+ debug it. for example, add a new serial port support just as
+ arch/mips/boot/compressed/uart-16550.c does.
+
+ After the compressed kernel support works, please disable this option
+ to reduce the kernel image size and speed up the booting procedure a
+ little.
+
+config SPINLOCK_TEST
+ bool "Enable spinlock timing tests in debugfs"
+ depends on DEBUG_FS
+ default n
+ help
+ Add several files to the debugfs to test spinlock speed.
+
+config SCACHE_DEBUGFS
+ bool "L2 cache debugfs entries"
+ depends on DEBUG_FS
+ help
+ Enable this to allow parts of the L2 cache configuration, such as
+ whether or not prefetching is enabled, to be exposed to userland
+ via debugfs.
+
+ If unsure, say N.
+
+menuconfig MIPS_CPS_NS16550_BOOL
+ bool "CPS SMP NS16550 UART output"
+ depends on MIPS_CPS
+ help
+ Output debug information via an ns16550 compatible UART if exceptions
+ occur early in the boot process of a secondary core.
+
+if MIPS_CPS_NS16550_BOOL
+
+config MIPS_CPS_NS16550
+ def_bool MIPS_CPS_NS16550_BASE != 0
+
+config MIPS_CPS_NS16550_BASE
+ hex "UART Base Address"
+ default 0x1b0003f8 if MIPS_MALTA
+ default 0
+ help
+ The base address of the ns16550 compatible UART on which to output
+ debug information from the early stages of core startup.
+
+ This is only used if non-zero.
+
+config MIPS_CPS_NS16550_SHIFT
+ int "UART Register Shift"
+ default 0
+ help
+ The number of bits to shift ns16550 register indices by in order to
+ form their addresses. That is, log base 2 of the span between
+ adjacent ns16550 registers in the system.
+
+config MIPS_CPS_NS16550_WIDTH
+ int "UART Register Width"
+ default 1
+ help
+ ns16550 registers width. UART registers IO access methods will be
+ selected in accordance with this parameter. By setting it to 1, 2 or
+ 4 UART registers will be accessed by means of lb/sb, lh/sh or lw/sw
+ instructions respectively. Any value not from that set activates
+ lb/sb instructions.
+
+endif # MIPS_CPS_NS16550_BOOL
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
new file mode 100644
index 000000000..acab8018a
--- /dev/null
+++ b/arch/mips/Makefile
@@ -0,0 +1,567 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994, 95, 96, 2003 by Ralf Baechle
+# DECStation modifications by Paul M. Antoine, 1996
+# Copyright (C) 2002, 2003, 2004 Maciej W. Rozycki
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" cleaning up for this architecture.
+#
+
+archscripts: scripts_basic
+ $(Q)$(MAKE) $(build)=arch/mips/tools elf-entry
+ifeq ($(CONFIG_CPU_LOONGSON3_WORKAROUNDS),y)
+ $(Q)$(MAKE) $(build)=arch/mips/tools loongson3-llsc-check
+endif
+ $(Q)$(MAKE) $(build)=arch/mips/boot/tools relocs
+
+KBUILD_DEFCONFIG := 32r2el_defconfig
+KBUILD_DTBS := dtbs
+
+#
+# Select the object file format to substitute into the linker script.
+#
+ifdef CONFIG_CPU_LITTLE_ENDIAN
+32bit-tool-archpref = mipsel
+64bit-tool-archpref = mips64el
+32bit-bfd = elf32-tradlittlemips
+64bit-bfd = elf64-tradlittlemips
+32bit-emul = elf32ltsmip
+64bit-emul = elf64ltsmip
+else
+32bit-tool-archpref = mips
+64bit-tool-archpref = mips64
+32bit-bfd = elf32-tradbigmips
+64bit-bfd = elf64-tradbigmips
+32bit-emul = elf32btsmip
+64bit-emul = elf64btsmip
+endif
+
+ifdef CONFIG_32BIT
+tool-archpref = $(32bit-tool-archpref)
+UTS_MACHINE := mips
+endif
+ifdef CONFIG_64BIT
+tool-archpref = $(64bit-tool-archpref)
+UTS_MACHINE := mips64
+endif
+
+ifneq ($(SUBARCH),$(ARCH))
+ ifeq ($(CROSS_COMPILE),)
+ CROSS_COMPILE := $(call cc-cross-prefix, $(tool-archpref)-linux- $(tool-archpref)-linux-gnu- $(tool-archpref)-unknown-linux-gnu-)
+ endif
+endif
+
+ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ ifndef KBUILD_MCOUNT_RA_ADDRESS
+ ifeq ($(call cc-option-yn,-mmcount-ra-address), y)
+ cflags-y += -mmcount-ra-address -DKBUILD_MCOUNT_RA_ADDRESS
+ endif
+ endif
+endif
+cflags-y += $(call cc-option, -mno-check-zero-division)
+
+ifdef CONFIG_32BIT
+ld-emul = $(32bit-emul)
+vmlinux-32 = vmlinux
+vmlinux-64 = vmlinux.64
+
+cflags-y += -mabi=32
+endif
+
+ifdef CONFIG_64BIT
+ld-emul = $(64bit-emul)
+vmlinux-32 = vmlinux.32
+vmlinux-64 = vmlinux
+
+cflags-y += -mabi=64
+endif
+
+all-$(CONFIG_BOOT_ELF32) := $(vmlinux-32)
+all-$(CONFIG_BOOT_ELF64) := $(vmlinux-64)
+all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlinuz
+
+#
+# GCC uses -G 0 -mabicalls -fpic as default. We don't want PIC in the kernel
+# code since it only slows down the whole thing. At some point we might make
+# use of global pointer optimizations but their use of $28 conflicts with
+# the current pointer optimization.
+#
+# The DECStation requires an ECOFF kernel for remote booting, other MIPS
+# machines may also. Since BFD is incredibly buggy with respect to
+# crossformat linking we rely on the elf2ecoff tool for format conversion.
+#
+cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
+cflags-y += -msoft-float
+LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
+KBUILD_AFLAGS_MODULE += -mlong-calls
+KBUILD_CFLAGS_MODULE += -mlong-calls
+
+ifeq ($(CONFIG_RELOCATABLE),y)
+LDFLAGS_vmlinux += --emit-relocs
+endif
+
+#
+# pass -msoft-float to GAS if it supports it. However on newer binutils
+# (specifically newer than 2.24.51.20140728) we then also need to explicitly
+# set ".set hardfloat" in all files which manipulate floating point registers.
+#
+ifneq ($(call as-option,-Wa$(comma)-msoft-float,),)
+ cflags-y += -DGAS_HAS_SET_HARDFLOAT -Wa,-msoft-float
+endif
+
+cflags-y += -ffreestanding
+
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
+cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -EL
+
+cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
+ -fno-omit-frame-pointer
+
+# Some distribution-specific toolchains might pass the -fstack-check
+# option during the build, which adds a simple stack-probe at the beginning
+# of every function. This stack probe is to ensure that there is enough
+# stack space, else a SEGV is generated. This is not desirable for MIPS
+# as kernel stacks are small, placed in unmapped virtual memory, and do not
+# grow when overflowed. Especially on SGI IP27 platforms, this check will
+# lead to a NULL pointer dereference in _raw_spin_lock_irq.
+#
+# In disassembly, this stack probe appears at the top of a function as:
+# sd zero,<offset>(sp)
+# Where <offset> is a negative value.
+#
+cflags-y += -fno-stack-check
+
+# binutils from v2.35 when built with --enable-mips-fix-loongson3-llsc=yes,
+# supports an -mfix-loongson3-llsc flag which emits a sync prior to each ll
+# instruction to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h
+# for a description).
+#
+# We disable this in order to prevent the assembler meddling with the
+# instruction that labels refer to, ie. if we label an ll instruction:
+#
+# 1: ll v0, 0(a0)
+#
+# ...then with the assembler fix applied the label may actually point at a sync
+# instruction inserted by the assembler, and if we were using the label in an
+# exception table the table would no longer contain the address of the ll
+# instruction.
+#
+# Avoid this by explicitly disabling that assembler behaviour.
+#
+cflags-y += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
+
+#
+# CPU-dependent compiler/assembler options for optimization.
+#
+cflags-$(CONFIG_CPU_R3000) += -march=r3000
+cflags-$(CONFIG_CPU_TX39XX) += -march=r3900
+cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap
+cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap
+cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R5) += -march=mips32r5 -Wa,--trap -modd-spreg
+cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg
+cflags-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R2) += -march=mips64r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R5) += -march=mips64r5 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap
+cflags-$(CONFIG_CPU_P5600) += -march=p5600 -Wa,--trap -modd-spreg
+cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap
+cflags-$(CONFIG_CPU_R5500) += $(call cc-option,-march=r5500,-march=r5000) \
+ -Wa,--trap
+cflags-$(CONFIG_CPU_NEVADA) += $(call cc-option,-march=rm5200,-march=r5000) \
+ -Wa,--trap
+cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=r5000) \
+ -Wa,--trap
+cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=r5000) \
+ -Wa,--trap
+cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-mno-mdmx)
+cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-mno-mips3d)
+cflags-$(CONFIG_CPU_R10000) += $(call cc-option,-march=r10000,-march=r8000) \
+ -Wa,--trap
+cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += $(call cc-option,-march=octeon) -Wa,--trap
+ifeq (,$(findstring march=octeon, $(cflags-$(CONFIG_CPU_CAVIUM_OCTEON))))
+cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -Wa,-march=octeon
+endif
+cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1
+cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,-mips32 -Wa,--trap
+
+cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
+cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,)
+cflags-$(CONFIG_CPU_DADDI_WORKAROUNDS) += $(call cc-option,-mno-daddi,)
+
+# For smartmips configurations, there are hundreds of warnings due to ISA overrides
+# in assembly and header files. smartmips is only supported for MIPS32r1 onwards
+# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or
+# similar directives in the kernel will spam the build logs with the following warnings:
+# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater
+# or
+# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
+# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
+# been fixed properly.
+mips-cflags := $(cflags-y)
+ifeq ($(CONFIG_CPU_HAS_SMARTMIPS),y)
+smartmips-ase := $(call cc-option-yn,$(mips-cflags) -msmartmips)
+cflags-$(smartmips-ase) += -msmartmips -Wa,--no-warn
+endif
+ifeq ($(CONFIG_CPU_MICROMIPS),y)
+micromips-ase := $(call cc-option-yn,$(mips-cflags) -mmicromips)
+cflags-$(micromips-ase) += -mmicromips
+endif
+ifeq ($(CONFIG_CPU_HAS_MSA),y)
+toolchain-msa := $(call cc-option-yn,$(mips-cflags) -mhard-float -mfp64 -Wa$(comma)-mmsa)
+cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
+endif
+toolchain-virt := $(call cc-option-yn,$(mips-cflags) -mvirt)
+cflags-$(toolchain-virt) += -DTOOLCHAIN_SUPPORTS_VIRT
+# For -mmicromips, use -Wa,-fatal-warnings to catch unsupported -mxpa which
+# only warns
+xpa-cflags-y := $(mips-cflags)
+xpa-cflags-$(micromips-ase) += -mmicromips -Wa$(comma)-fatal-warnings
+toolchain-xpa := $(call cc-option-yn,$(xpa-cflags-y) -mxpa)
+cflags-$(toolchain-xpa) += -DTOOLCHAIN_SUPPORTS_XPA
+toolchain-crc := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mcrc)
+cflags-$(toolchain-crc) += -DTOOLCHAIN_SUPPORTS_CRC
+toolchain-dsp := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mdsp)
+cflags-$(toolchain-dsp) += -DTOOLCHAIN_SUPPORTS_DSP
+toolchain-ginv := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mginv)
+cflags-$(toolchain-ginv) += -DTOOLCHAIN_SUPPORTS_GINV
+
+#
+# Firmware support
+#
+libs-$(CONFIG_FW_ARC) += arch/mips/fw/arc/
+libs-$(CONFIG_FW_CFE) += arch/mips/fw/cfe/
+libs-$(CONFIG_FW_SNIPROM) += arch/mips/fw/sni/
+libs-y += arch/mips/fw/lib/
+
+#
+# Kernel compression
+#
+ifdef CONFIG_SYS_SUPPORTS_ZBOOT
+COMPRESSION_FNAME = vmlinuz
+else
+COMPRESSION_FNAME = vmlinux
+endif
+
+#
+# Board-dependent options and extra files
+#
+include arch/mips/Kbuild.platforms
+
+ifdef CONFIG_PHYSICAL_START
+load-y = $(CONFIG_PHYSICAL_START)
+endif
+
+entry-y = $(shell $(objtree)/arch/mips/tools/elf-entry vmlinux)
+cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
+drivers-$(CONFIG_PCI) += arch/mips/pci/
+
+#
+# Automatically detect the build format. By default we choose
+# the elf format according to the load address.
+# We can always force a build with a 64-bits symbol format by
+# passing 'KBUILD_SYM32=no' option to the make's command line.
+#
+ifdef CONFIG_64BIT
+ ifndef KBUILD_SYM32
+ ifeq ($(shell expr $(load-y) \< 0xffffffff80000000), 0)
+ KBUILD_SYM32 = y
+ endif
+ endif
+
+ ifeq ($(KBUILD_SYM32)$(call cc-option-yn,-msym32), yy)
+ cflags-y += -msym32 -DKBUILD_64BIT_SYM32
+ else
+ ifeq ($(CONFIG_CPU_DADDI_WORKAROUNDS), y)
+ $(error CONFIG_CPU_DADDI_WORKAROUNDS unsupported without -msym32)
+ endif
+ endif
+endif
+
+# When linking a 32-bit executable the LLVM linker cannot cope with a
+# 32-bit load address that has been sign-extended to 64 bits. Simply
+# remove the upper 32 bits then, as it is safe to do so with other
+# linkers.
+ifdef CONFIG_64BIT
+ load-ld = $(load-y)
+else
+ load-ld = $(subst 0xffffffff,0x,$(load-y))
+endif
+
+KBUILD_AFLAGS += $(cflags-y)
+KBUILD_CFLAGS += $(cflags-y)
+KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) -DLINKER_LOAD_ADDRESS=$(load-ld)
+KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
+
+bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \
+ LINKER_LOAD_ADDRESS=$(load-ld) \
+ VMLINUX_ENTRY_ADDRESS=$(entry-y) \
+ PLATFORM="$(platform-y)" \
+ ITS_INPUTS="$(its-y)"
+ifdef CONFIG_32BIT
+bootvars-y += ADDR_BITS=32
+endif
+ifdef CONFIG_64BIT
+bootvars-y += ADDR_BITS=64
+endif
+
+# This is required to get dwarf unwinding tables into .debug_frame
+# instead of .eh_frame so we don't discard them.
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+
+KBUILD_LDFLAGS += -m $(ld-emul)
+
+ifdef CONFIG_MIPS
+CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
+ egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
+ sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
+endif
+
+OBJCOPYFLAGS += --remove-section=.reginfo
+
+head-y := arch/mips/kernel/head.o
+
+libs-y += arch/mips/lib/
+libs-$(CONFIG_MIPS_FP_SUPPORT) += arch/mips/math-emu/
+
+# See arch/mips/Kbuild for content of core part of the kernel
+core-y += arch/mips/
+
+drivers-y += arch/mips/crypto/
+drivers-$(CONFIG_OPROFILE) += arch/mips/oprofile/
+
+# suspend and hibernation support
+drivers-$(CONFIG_PM) += arch/mips/power/
+
+# boot image targets (arch/mips/boot/)
+boot-y := vmlinux.bin
+boot-y += vmlinux.ecoff
+boot-y += vmlinux.srec
+ifeq ($(shell expr $(load-y) \< 0xffffffff80000000 2> /dev/null), 0)
+boot-y += uImage
+boot-y += uImage.bin
+boot-y += uImage.bz2
+boot-y += uImage.gz
+boot-y += uImage.lzma
+boot-y += uImage.lzo
+endif
+boot-y += vmlinux.itb
+boot-y += vmlinux.gz.itb
+boot-y += vmlinux.bz2.itb
+boot-y += vmlinux.lzma.itb
+boot-y += vmlinux.lzo.itb
+
+# compressed boot image targets (arch/mips/boot/compressed/)
+bootz-y := vmlinuz
+bootz-y += vmlinuz.bin
+bootz-y += vmlinuz.ecoff
+bootz-y += vmlinuz.srec
+ifeq ($(shell expr $(zload-y) \< 0xffffffff80000000 2> /dev/null), 0)
+bootz-y += uzImage.bin
+endif
+
+#
+# Some machines like the Indy need 32-bit ELF binaries for booting purposes.
+# Other need ECOFF, so we build a 32-bit ELF binary for them which we then
+# convert to ECOFF using elf2ecoff.
+#
+quiet_cmd_32 = OBJCOPY $@
+ cmd_32 = $(OBJCOPY) -O $(32bit-bfd) $(OBJCOPYFLAGS) $< $@
+vmlinux.32: vmlinux
+ $(call cmd,32)
+
+#
+# The 64-bit ELF tools are pretty broken so at this time we generate 64-bit
+# ELF files from 32-bit files by conversion.
+#
+quiet_cmd_64 = OBJCOPY $@
+ cmd_64 = $(OBJCOPY) -O $(64bit-bfd) $(OBJCOPYFLAGS) $< $@
+vmlinux.64: vmlinux
+ $(call cmd,64)
+
+all: $(all-y) $(KBUILD_DTBS)
+
+# boot
+$(boot-y): $(vmlinux-32) FORCE
+ $(Q)$(MAKE) $(build)=arch/mips/boot VMLINUX=$(vmlinux-32) \
+ $(bootvars-y) arch/mips/boot/$@
+
+ifdef CONFIG_SYS_SUPPORTS_ZBOOT
+# boot/compressed
+$(bootz-y): $(vmlinux-32) FORCE
+ $(Q)$(MAKE) $(build)=arch/mips/boot/compressed \
+ $(bootvars-y) 32bit-bfd=$(32bit-bfd) $@
+else
+vmlinuz: FORCE
+ @echo ' CONFIG_SYS_SUPPORTS_ZBOOT is not enabled'
+ /bin/false
+endif
+
+
+CLEAN_FILES += vmlinux.32 vmlinux.64
+
+# device-trees
+core-y += arch/mips/boot/dts/
+
+archprepare:
+ifdef CONFIG_MIPS32_N32
+ @$(kecho) ' Checking missing-syscalls for N32'
+ $(Q)$(MAKE) $(build)=. missing-syscalls missing_syscalls_flags="-mabi=n32"
+endif
+ifdef CONFIG_MIPS32_O32
+ @$(kecho) ' Checking missing-syscalls for O32'
+ $(Q)$(MAKE) $(build)=. missing-syscalls missing_syscalls_flags="-mabi=32"
+endif
+
+install:
+ $(Q)install -D -m 755 vmlinux $(INSTALL_PATH)/vmlinux-$(KERNELRELEASE)
+ifdef CONFIG_SYS_SUPPORTS_ZBOOT
+ $(Q)install -D -m 755 vmlinuz $(INSTALL_PATH)/vmlinuz-$(KERNELRELEASE)
+endif
+ $(Q)install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE)
+ $(Q)install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE)
+
+archclean:
+ $(Q)$(MAKE) $(clean)=arch/mips/boot
+ $(Q)$(MAKE) $(clean)=arch/mips/boot/compressed
+ $(Q)$(MAKE) $(clean)=arch/mips/boot/tools
+
+archheaders:
+ $(Q)$(MAKE) $(build)=arch/mips/kernel/syscalls all
+
+define archhelp
+ echo ' install - install kernel into $(INSTALL_PATH)'
+ echo ' vmlinux.ecoff - ECOFF boot image'
+ echo ' vmlinux.bin - Raw binary boot image'
+ echo ' vmlinux.srec - SREC boot image'
+ echo ' vmlinux.32 - 64-bit boot image wrapped in 32bits (IP22/IP32)'
+ echo ' vmlinuz - Compressed boot(zboot) image'
+ echo ' vmlinuz.ecoff - ECOFF zboot image'
+ echo ' vmlinuz.bin - Raw binary zboot image'
+ echo ' vmlinuz.srec - SREC zboot image'
+ echo ' uImage - U-Boot image'
+ echo ' uImage.bin - U-Boot image (uncompressed)'
+ echo ' uImage.bz2 - U-Boot image (bz2)'
+ echo ' uImage.gz - U-Boot image (gzip)'
+ echo ' uImage.lzma - U-Boot image (lzma)'
+ echo ' uImage.lzo - U-Boot image (lzo)'
+ echo ' uzImage.bin - U-Boot image (self-extracting)'
+ echo
+ echo ' These will be default as appropriate for a configured platform.'
+ echo
+ echo ' If you are targeting a system supported by generic kernels you may'
+ echo ' configure the kernel for a given architecture target like so:'
+ echo
+ echo ' {micro32,32,64}{r1,r2,r6}{el,}_defconfig <BOARDS="list of boards">'
+ echo
+ echo ' Where BOARDS is some subset of the following:'
+ for board in $(sort $(BOARDS)); do echo " $${board}"; done
+ echo
+ echo ' Specifically the following generic default configurations are'
+ echo ' supported:'
+ echo
+ $(foreach cfg,$(generic_defconfigs),
+ printf " %-24s - Build generic kernel for $(call describe_generic_defconfig,$(cfg))\n" $(cfg);)
+ echo
+ echo ' The following legacy default configurations have been converted to'
+ echo ' generic and can still be used:'
+ echo
+ $(foreach cfg,$(sort $(legacy_defconfigs)),
+ printf " %-24s - Build $($(cfg)-y)\n" $(cfg);)
+ echo
+ echo ' Otherwise, the following default configurations are available:'
+endef
+
+generic_config_dir = $(srctree)/arch/$(ARCH)/configs/generic
+generic_defconfigs :=
+
+#
+# If the user generates a generic kernel configuration without specifying a
+# list of boards to include the config fragments for, default to including all
+# available board config fragments.
+#
+ifeq ($(BOARDS),)
+BOARDS = $(patsubst board-%.config,%,$(notdir $(wildcard $(generic_config_dir)/board-*.config)))
+endif
+
+#
+# Generic kernel configurations which merge generic_defconfig with the
+# appropriate config fragments from arch/mips/configs/generic/, resulting in
+# the ability to easily configure the kernel for a given architecture,
+# endianness & set of boards without duplicating the needed configuration in
+# hundreds of defconfig files.
+#
+define gen_generic_defconfigs
+$(foreach bits,$(1),$(foreach rev,$(2),$(foreach endian,$(3),
+target := $(bits)$(rev)$(filter el,$(endian))_defconfig
+generic_defconfigs += $$(target)
+$$(target): $(generic_config_dir)/$(bits)$(rev).config
+$$(target): $(generic_config_dir)/$(endian).config
+)))
+endef
+
+$(eval $(call gen_generic_defconfigs,32 64,r1 r2 r6,eb el))
+$(eval $(call gen_generic_defconfigs,micro32,r2,eb el))
+
+define describe_generic_defconfig
+$(subst 32r,MIPS32 r,$(subst 64r,MIPS64 r,$(subst el, little endian,$(patsubst %_defconfig,%,$(1)))))
+endef
+
+.PHONY: $(generic_defconfigs)
+$(generic_defconfigs):
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \
+ -m -O $(objtree) $(srctree)/arch/$(ARCH)/configs/generic_defconfig $^ | \
+ grep -Ev '^#'
+ $(Q)cp $(KCONFIG_CONFIG) $(objtree)/.config.$@
+ $(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig \
+ KCONFIG_CONFIG=$(objtree)/.config.$@ >/dev/null
+ $(Q)$(CONFIG_SHELL) $(srctree)/arch/$(ARCH)/tools/generic-board-config.sh \
+ $(srctree) $(objtree) $(objtree)/.config.$@ $(KCONFIG_CONFIG) \
+ "$(origin BOARDS)" $(BOARDS)
+ $(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
+
+#
+# Prevent generic merge_config rules attempting to merge single fragments
+#
+$(generic_config_dir)/%.config: ;
+
+#
+# Prevent direct use of generic_defconfig, which is intended to be used as the
+# basis of the various ISA-specific targets generated above.
+#
+.PHONY: generic_defconfig
+generic_defconfig:
+ $(Q)echo "generic_defconfig is not intended for direct use, but should instead be"
+ $(Q)echo "used via an ISA-specific target from the following list:"
+ $(Q)echo
+ $(Q)for cfg in $(generic_defconfigs); do echo " $${cfg}"; done
+ $(Q)echo
+ $(Q)false
+
+#
+# Legacy defconfig compatibility - these targets used to be real defconfigs but
+# now that the boards have been converted to use the generic kernel they are
+# wrappers around the generic rules above.
+#
+legacy_defconfigs += ocelot_defconfig
+ocelot_defconfig-y := 32r2el_defconfig BOARDS=ocelot
+
+legacy_defconfigs += sead3_defconfig
+sead3_defconfig-y := 32r2el_defconfig BOARDS=sead-3
+
+legacy_defconfigs += sead3micro_defconfig
+sead3micro_defconfig-y := micro32r2el_defconfig BOARDS=sead-3
+
+legacy_defconfigs += xilfpga_defconfig
+xilfpga_defconfig-y := 32r2el_defconfig BOARDS=xilfpga
+
+.PHONY: $(legacy_defconfigs)
+$(legacy_defconfigs):
+ $(Q)$(MAKE) -f $(srctree)/Makefile $($@-y)
diff --git a/arch/mips/Makefile.postlink b/arch/mips/Makefile.postlink
new file mode 100644
index 000000000..4b1d3ba3a
--- /dev/null
+++ b/arch/mips/Makefile.postlink
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0
+# ===========================================================================
+# Post-link MIPS pass
+# ===========================================================================
+#
+# 1. Check that Loongson3 LL/SC workarounds are applied correctly
+# 2. Insert relocations into vmlinux
+
+PHONY := __archpost
+__archpost:
+
+-include include/config/auto.conf
+include scripts/Kbuild.include
+
+CMD_LS3_LLSC = arch/mips/tools/loongson3-llsc-check
+quiet_cmd_ls3_llsc = LLSCCHK $@
+ cmd_ls3_llsc = $(CMD_LS3_LLSC) $@
+
+CMD_RELOCS = arch/mips/boot/tools/relocs
+quiet_cmd_relocs = RELOCS $@
+ cmd_relocs = $(CMD_RELOCS) $@
+
+# `@true` prevents complaint when there is nothing to be done
+
+vmlinux: FORCE
+ @true
+ifeq ($(CONFIG_CPU_LOONGSON3_WORKAROUNDS),y)
+ $(call if_changed,ls3_llsc)
+endif
+ifeq ($(CONFIG_RELOCATABLE),y)
+ $(call if_changed,relocs)
+endif
+
+%.ko: FORCE
+ @true
+
+clean:
+ @true
+
+PHONY += FORCE clean
+
+FORCE:
+
+.PHONY: $(PHONY)
diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig
new file mode 100644
index 000000000..69734120a
--- /dev/null
+++ b/arch/mips/alchemy/Kconfig
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0
+choice
+ prompt "Machine type"
+ depends on MIPS_ALCHEMY
+ default MIPS_DB1XXX
+
+config MIPS_MTX1
+ bool "4G Systems MTX-1 board"
+ select HAVE_PCI
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
+
+config MIPS_DB1XXX
+ bool "Alchemy DB1XXX / PB1XXX boards"
+ select GPIOLIB
+ select HAVE_PCI
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
+ help
+ Select this option if you have one of the following Alchemy
+ development boards: DB1000 DB1500 DB1100 DB1550 DB1200 DB1300
+ PB1500 PB1100 PB1550 PB1200
+ Board type is autodetected during boot.
+
+config MIPS_XXS1500
+ bool "MyCable XXS1500 board"
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
+
+config MIPS_GPR
+ bool "Trapeze ITS GPR board"
+ select HAVE_PCI
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
+
+endchoice
diff --git a/arch/mips/alchemy/Makefile b/arch/mips/alchemy/Makefile
new file mode 100644
index 000000000..fabbc7019
--- /dev/null
+++ b/arch/mips/alchemy/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MIPS_GPR) += board-gpr.o
+obj-$(CONFIG_MIPS_MTX1) += board-mtx1.o
+obj-$(CONFIG_MIPS_XXS1500) += board-xxs1500.o
diff --git a/arch/mips/alchemy/Platform b/arch/mips/alchemy/Platform
new file mode 100644
index 000000000..c8cff50b0
--- /dev/null
+++ b/arch/mips/alchemy/Platform
@@ -0,0 +1,35 @@
+#
+# Core Alchemy code
+#
+platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/common/
+
+
+#
+# AMD Alchemy Db1000/Db1500/Pb1500/Db1100/Pb1100
+# Db1550/Pb1550/Db1200/Pb1200/Db1300
+#
+platform-$(CONFIG_MIPS_DB1XXX) += alchemy/devboards/
+cflags-$(CONFIG_MIPS_DB1XXX) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
+load-$(CONFIG_MIPS_DB1XXX) += 0xffffffff80100000
+
+#
+# 4G-Systems MTX-1 "MeshCube" wireless router
+#
+load-$(CONFIG_MIPS_MTX1) += 0xffffffff80100000
+
+#
+# MyCable eval board
+#
+load-$(CONFIG_MIPS_XXS1500) += 0xffffffff80100000
+
+#
+# Trapeze ITS GRP board
+#
+load-$(CONFIG_MIPS_GPR) += 0xffffffff80100000
+
+# boards can specify their own <gpio.h> in one of their include dirs.
+# If they do, placing this line here at the end will make sure the
+# compiler picks the board one. If they don't, it will make sure
+# the alchemy generic gpio header is picked up.
+
+cflags-$(CONFIG_MIPS_ALCHEMY) += -I$(srctree)/arch/mips/include/asm/mach-au1x00
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c
new file mode 100644
index 000000000..f587c40b6
--- /dev/null
+++ b/arch/mips/alchemy/board-gpr.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * GPR board platform device registration (Au1550)
+ *
+ * Copyright (C) 2010 Wolfgang Grandegger <wg@denx.de>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/platform_data/i2c-gpio.h>
+#include <linux/gpio/machine.h>
+#include <asm/bootinfo.h>
+#include <asm/idle.h>
+#include <asm/reboot.h>
+#include <asm/setup.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
+#include <prom.h>
+
+const char *get_system_type(void)
+{
+ return "GPR";
+}
+
+void prom_putchar(char c)
+{
+ alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
+}
+
+static void gpr_reset(char *c)
+{
+ /* switch System-LED to orange (red# and green# on) */
+ alchemy_gpio_direction_output(4, 0);
+ alchemy_gpio_direction_output(5, 0);
+
+ /* trigger watchdog to reset board in 200ms */
+ printk(KERN_EMERG "Triggering watchdog soft reset...\n");
+ raw_local_irq_disable();
+ alchemy_gpio_direction_output(1, 0);
+ udelay(1);
+ alchemy_gpio_set_value(1, 1);
+ while (1)
+ cpu_wait();
+}
+
+static void gpr_power_off(void)
+{
+ while (1)
+ cpu_wait();
+}
+
+void __init board_setup(void)
+{
+ printk(KERN_INFO "Trapeze ITS GPR board\n");
+
+ pm_power_off = gpr_power_off;
+ _machine_halt = gpr_power_off;
+ _machine_restart = gpr_reset;
+
+ /* Enable UART1/3 */
+ alchemy_uart_enable(AU1000_UART3_PHYS_ADDR);
+ alchemy_uart_enable(AU1000_UART1_PHYS_ADDR);
+
+ /* Take away Reset of UMTS-card */
+ alchemy_gpio_direction_output(215, 1);
+}
+
+/*
+ * Watchdog
+ */
+static struct resource gpr_wdt_resource[] = {
+ [0] = {
+ .start = 1,
+ .end = 1,
+ .name = "gpr-adm6320-wdt",
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device gpr_wdt_device = {
+ .name = "adm6320-wdt",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(gpr_wdt_resource),
+ .resource = gpr_wdt_resource,
+};
+
+/*
+ * FLASH
+ *
+ * 0x00000000-0x00200000 : "kernel"
+ * 0x00200000-0x00a00000 : "rootfs"
+ * 0x01d00000-0x01f00000 : "config"
+ * 0x01c00000-0x01d00000 : "yamon"
+ * 0x01d00000-0x01d40000 : "yamon env vars"
+ * 0x00000000-0x00a00000 : "kernel+rootfs"
+ */
+static struct mtd_partition gpr_mtd_partitions[] = {
+ {
+ .name = "kernel",
+ .size = 0x00200000,
+ .offset = 0,
+ },
+ {
+ .name = "rootfs",
+ .size = 0x00800000,
+ .offset = MTDPART_OFS_APPEND,
+ .mask_flags = MTD_WRITEABLE,
+ },
+ {
+ .name = "config",
+ .size = 0x00200000,
+ .offset = 0x01d00000,
+ },
+ {
+ .name = "yamon",
+ .size = 0x00100000,
+ .offset = 0x01c00000,
+ },
+ {
+ .name = "yamon env vars",
+ .size = 0x00040000,
+ .offset = MTDPART_OFS_APPEND,
+ },
+ {
+ .name = "kernel+rootfs",
+ .size = 0x00a00000,
+ .offset = 0,
+ },
+};
+
+static struct physmap_flash_data gpr_flash_data = {
+ .width = 4,
+ .nr_parts = ARRAY_SIZE(gpr_mtd_partitions),
+ .parts = gpr_mtd_partitions,
+};
+
+static struct resource gpr_mtd_resource = {
+ .start = 0x1e000000,
+ .end = 0x1fffffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device gpr_mtd_device = {
+ .name = "physmap-flash",
+ .dev = {
+ .platform_data = &gpr_flash_data,
+ },
+ .num_resources = 1,
+ .resource = &gpr_mtd_resource,
+};
+
+/*
+ * LEDs
+ */
+static const struct gpio_led gpr_gpio_leds[] = {
+ { /* green */
+ .name = "gpr:green",
+ .gpio = 4,
+ .active_low = 1,
+ },
+ { /* red */
+ .name = "gpr:red",
+ .gpio = 5,
+ .active_low = 1,
+ }
+};
+
+static struct gpio_led_platform_data gpr_led_data = {
+ .num_leds = ARRAY_SIZE(gpr_gpio_leds),
+ .leds = gpr_gpio_leds,
+};
+
+static struct platform_device gpr_led_devices = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &gpr_led_data,
+ }
+};
+
+/*
+ * I2C
+ */
+static struct gpiod_lookup_table gpr_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ /*
+ * This should be on "GPIO2" which has base at 200 so
+ * the global numbers 209 and 210 should correspond to
+ * local offsets 9 and 10.
+ */
+ GPIO_LOOKUP_IDX("alchemy-gpio2", 9, NULL, 0,
+ GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("alchemy-gpio2", 10, NULL, 1,
+ GPIO_ACTIVE_HIGH),
+ },
+};
+
+static struct i2c_gpio_platform_data gpr_i2c_data = {
+ /*
+ * The open drain mode is hardwired somewhere or an electrical
+ * property of the alchemy GPIO controller.
+ */
+ .sda_is_open_drain = 1,
+ .scl_is_open_drain = 1,
+ .udelay = 2, /* ~100 kHz */
+ .timeout = HZ,
+};
+
+static struct platform_device gpr_i2c_device = {
+ .name = "i2c-gpio",
+ .id = -1,
+ .dev.platform_data = &gpr_i2c_data,
+};
+
+static struct i2c_board_info gpr_i2c_info[] __initdata = {
+ {
+ I2C_BOARD_INFO("lm83", 0x18),
+ }
+};
+
+
+
+static struct resource alchemy_pci_host_res[] = {
+ [0] = {
+ .start = AU1500_PCI_PHYS_ADDR,
+ .end = AU1500_PCI_PHYS_ADDR + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static int gpr_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin)
+{
+ if ((slot == 0) && (pin == 1))
+ return AU1550_PCI_INTA;
+ else if ((slot == 0) && (pin == 2))
+ return AU1550_PCI_INTB;
+
+ return 0xff;
+}
+
+static struct alchemy_pci_platdata gpr_pci_pd = {
+ .board_map_irq = gpr_map_pci_irq,
+ .pci_cfg_set = PCI_CONFIG_AEN | PCI_CONFIG_R2H | PCI_CONFIG_R1H |
+ PCI_CONFIG_CH |
+#if defined(__MIPSEB__)
+ PCI_CONFIG_SIC_HWA_DAT | PCI_CONFIG_SM,
+#else
+ 0,
+#endif
+};
+
+static struct platform_device gpr_pci_host_dev = {
+ .dev.platform_data = &gpr_pci_pd,
+ .name = "alchemy-pci",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(alchemy_pci_host_res),
+ .resource = alchemy_pci_host_res,
+};
+
+static struct platform_device *gpr_devices[] __initdata = {
+ &gpr_wdt_device,
+ &gpr_mtd_device,
+ &gpr_i2c_device,
+ &gpr_led_devices,
+};
+
+static int __init gpr_pci_init(void)
+{
+ return platform_device_register(&gpr_pci_host_dev);
+}
+/* must be arch_initcall; MIPS PCI scans busses in a subsys_initcall */
+arch_initcall(gpr_pci_init);
+
+
+static int __init gpr_dev_init(void)
+{
+ gpiod_add_lookup_table(&gpr_i2c_gpiod_table);
+ i2c_register_board_info(0, gpr_i2c_info, ARRAY_SIZE(gpr_i2c_info));
+
+ return platform_add_devices(gpr_devices, ARRAY_SIZE(gpr_devices));
+}
+device_initcall(gpr_dev_init);
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
new file mode 100644
index 000000000..68ea57511
--- /dev/null
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MTX-1 platform devices registration (Au1500)
+ *
+ * Copyright (C) 2007-2009, Florian Fainelli <florian@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <mtd/mtd-abi.h>
+#include <asm/bootinfo.h>
+#include <asm/reboot.h>
+#include <asm/setup.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
+#include <asm/mach-au1x00/au1xxx_eth.h>
+#include <prom.h>
+
+const char *get_system_type(void)
+{
+ return "MTX-1";
+}
+
+void prom_putchar(char c)
+{
+ alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
+}
+
+static void mtx1_reset(char *c)
+{
+ /* Jump to the reset vector */
+ __asm__ __volatile__("jr\t%0" : : "r"(0xbfc00000));
+}
+
+static void mtx1_power_off(void)
+{
+ while (1)
+ asm volatile (
+ " .set mips32 \n"
+ " wait \n"
+ " .set mips0 \n");
+}
+
+void __init board_setup(void)
+{
+#if IS_ENABLED(CONFIG_USB_OHCI_HCD)
+ /* Enable USB power switch */
+ alchemy_gpio_direction_output(204, 0);
+#endif /* IS_ENABLED(CONFIG_USB_OHCI_HCD) */
+
+ /* Initialize sys_pinfunc */
+ alchemy_wrsys(SYS_PF_NI2, AU1000_SYS_PINFUNC);
+
+ /* Initialize GPIO */
+ alchemy_wrsys(~0, AU1000_SYS_TRIOUTCLR);
+ alchemy_gpio_direction_output(0, 0); /* Disable M66EN (PCI 66MHz) */
+ alchemy_gpio_direction_output(3, 1); /* Disable PCI CLKRUN# */
+ alchemy_gpio_direction_output(1, 1); /* Enable EXT_IO3 */
+ alchemy_gpio_direction_output(5, 0); /* Disable eth PHY TX_ER */
+
+ /* Enable LED and set it to green */
+ alchemy_gpio_direction_output(211, 1); /* green on */
+ alchemy_gpio_direction_output(212, 0); /* red off */
+
+ pm_power_off = mtx1_power_off;
+ _machine_halt = mtx1_power_off;
+ _machine_restart = mtx1_reset;
+
+ printk(KERN_INFO "4G Systems MTX-1 Board\n");
+}
+
+/******************************************************************************/
+
+static struct gpio_keys_button mtx1_gpio_button[] = {
+ {
+ .gpio = 207,
+ .code = BTN_0,
+ .desc = "System button",
+ }
+};
+
+static struct gpio_keys_platform_data mtx1_buttons_data = {
+ .buttons = mtx1_gpio_button,
+ .nbuttons = ARRAY_SIZE(mtx1_gpio_button),
+};
+
+static struct platform_device mtx1_button = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &mtx1_buttons_data,
+ }
+};
+
+static struct gpiod_lookup_table mtx1_wdt_gpio_table = {
+ .dev_id = "mtx1-wdt.0",
+ .table = {
+ /* Global number 215 is offset 15 on Alchemy GPIO 2 */
+ GPIO_LOOKUP("alchemy-gpio2", 15, NULL, GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
+static struct platform_device mtx1_wdt = {
+ .name = "mtx1-wdt",
+ .id = 0,
+};
+
+static const struct gpio_led default_leds[] = {
+ {
+ .name = "mtx1:green",
+ .gpio = 211,
+ }, {
+ .name = "mtx1:red",
+ .gpio = 212,
+ },
+};
+
+static struct gpio_led_platform_data mtx1_led_data = {
+ .num_leds = ARRAY_SIZE(default_leds),
+ .leds = default_leds,
+};
+
+static struct platform_device mtx1_gpio_leds = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &mtx1_led_data,
+ }
+};
+
+static struct mtd_partition mtx1_mtd_partitions[] = {
+ {
+ .name = "filesystem",
+ .size = 0x01C00000,
+ .offset = 0,
+ },
+ {
+ .name = "yamon",
+ .size = 0x00100000,
+ .offset = MTDPART_OFS_APPEND,
+ .mask_flags = MTD_WRITEABLE,
+ },
+ {
+ .name = "kernel",
+ .size = 0x002c0000,
+ .offset = MTDPART_OFS_APPEND,
+ },
+ {
+ .name = "yamon env",
+ .size = 0x00040000,
+ .offset = MTDPART_OFS_APPEND,
+ },
+};
+
+static struct physmap_flash_data mtx1_flash_data = {
+ .width = 4,
+ .nr_parts = 4,
+ .parts = mtx1_mtd_partitions,
+};
+
+static struct resource mtx1_mtd_resource = {
+ .start = 0x1e000000,
+ .end = 0x1fffffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device mtx1_mtd = {
+ .name = "physmap-flash",
+ .dev = {
+ .platform_data = &mtx1_flash_data,
+ },
+ .num_resources = 1,
+ .resource = &mtx1_mtd_resource,
+};
+
+static struct resource alchemy_pci_host_res[] = {
+ [0] = {
+ .start = AU1500_PCI_PHYS_ADDR,
+ .end = AU1500_PCI_PHYS_ADDR + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static int mtx1_pci_idsel(unsigned int devsel, int assert)
+{
+ /* This function is only necessary to support a proprietary Cardbus
+ * adapter on the mtx-1 "singleboard" variant. It triggers a custom
+ * logic chip connected to EXT_IO3 (GPIO1) to suppress IDSEL signals.
+ */
+ udelay(1);
+
+ if (assert && devsel != 0)
+ /* Suppress signal to Cardbus */
+ alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */
+ else
+ alchemy_gpio_set_value(1, 1); /* set EXT_IO3 ON */
+
+ udelay(1);
+ return 1;
+}
+
+static const char mtx1_irqtab[][5] = {
+ [0] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 00 - AdapterA-Slot0 (top) */
+ [1] = { -1, AU1500_PCI_INTB, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 01 - AdapterA-Slot1 (bottom) */
+ [2] = { -1, AU1500_PCI_INTC, AU1500_PCI_INTD, 0xff, 0xff }, /* IDSEL 02 - AdapterB-Slot0 (top) */
+ [3] = { -1, AU1500_PCI_INTD, AU1500_PCI_INTC, 0xff, 0xff }, /* IDSEL 03 - AdapterB-Slot1 (bottom) */
+ [4] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, /* IDSEL 04 - AdapterC-Slot0 (top) */
+ [5] = { -1, AU1500_PCI_INTB, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 05 - AdapterC-Slot1 (bottom) */
+ [6] = { -1, AU1500_PCI_INTC, AU1500_PCI_INTD, 0xff, 0xff }, /* IDSEL 06 - AdapterD-Slot0 (top) */
+ [7] = { -1, AU1500_PCI_INTD, AU1500_PCI_INTC, 0xff, 0xff }, /* IDSEL 07 - AdapterD-Slot1 (bottom) */
+};
+
+static int mtx1_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin)
+{
+ return mtx1_irqtab[slot][pin];
+}
+
+static struct alchemy_pci_platdata mtx1_pci_pd = {
+ .board_map_irq = mtx1_map_pci_irq,
+ .board_pci_idsel = mtx1_pci_idsel,
+ .pci_cfg_set = PCI_CONFIG_AEN | PCI_CONFIG_R2H | PCI_CONFIG_R1H |
+ PCI_CONFIG_CH |
+#if defined(__MIPSEB__)
+ PCI_CONFIG_SIC_HWA_DAT | PCI_CONFIG_SM,
+#else
+ 0,
+#endif
+};
+
+static struct platform_device mtx1_pci_host = {
+ .dev.platform_data = &mtx1_pci_pd,
+ .name = "alchemy-pci",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(alchemy_pci_host_res),
+ .resource = alchemy_pci_host_res,
+};
+
+static struct platform_device *mtx1_devs[] __initdata = {
+ &mtx1_pci_host,
+ &mtx1_gpio_leds,
+ &mtx1_wdt,
+ &mtx1_button,
+ &mtx1_mtd,
+};
+
+static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = {
+ .phy_search_highest_addr = 1,
+ .phy1_search_mac0 = 1,
+};
+
+static int __init mtx1_register_devices(void)
+{
+ int rc;
+
+ irq_set_irq_type(AU1500_GPIO204_INT, IRQ_TYPE_LEVEL_HIGH);
+ irq_set_irq_type(AU1500_GPIO201_INT, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(AU1500_GPIO202_INT, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(AU1500_GPIO203_INT, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(AU1500_GPIO205_INT, IRQ_TYPE_LEVEL_LOW);
+
+ au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata);
+
+ rc = gpio_request(mtx1_gpio_button[0].gpio,
+ mtx1_gpio_button[0].desc);
+ if (rc < 0) {
+ printk(KERN_INFO "mtx1: failed to request %d\n",
+ mtx1_gpio_button[0].gpio);
+ goto out;
+ }
+ gpio_direction_input(mtx1_gpio_button[0].gpio);
+out:
+ gpiod_add_lookup_table(&mtx1_wdt_gpio_table);
+ return platform_add_devices(mtx1_devs, ARRAY_SIZE(mtx1_devs));
+}
+arch_initcall(mtx1_register_devices);
diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c
new file mode 100644
index 000000000..f175bce29
--- /dev/null
+++ b/arch/mips/alchemy/board-xxs1500.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * BRIEF MODULE DESCRIPTION
+ * MyCable XXS1500 board support
+ *
+ * Copyright 2003, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <asm/bootinfo.h>
+#include <asm/reboot.h>
+#include <asm/setup.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
+#include <prom.h>
+
+const char *get_system_type(void)
+{
+ return "XXS1500";
+}
+
+void prom_putchar(char c)
+{
+ alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
+}
+
+static void xxs1500_reset(char *c)
+{
+ /* Jump to the reset vector */
+ __asm__ __volatile__("jr\t%0" : : "r"(0xbfc00000));
+}
+
+static void xxs1500_power_off(void)
+{
+ while (1)
+ asm volatile (
+ " .set mips32 \n"
+ " wait \n"
+ " .set mips0 \n");
+}
+
+void __init board_setup(void)
+{
+ u32 pin_func;
+
+ pm_power_off = xxs1500_power_off;
+ _machine_halt = xxs1500_power_off;
+ _machine_restart = xxs1500_reset;
+
+ alchemy_gpio1_input_enable();
+ alchemy_gpio2_enable();
+
+ /* Set multiple use pins (UART3/GPIO) to UART (it's used as UART too) */
+ pin_func = alchemy_rdsys(AU1000_SYS_PINFUNC) & ~SYS_PF_UR3;
+ pin_func |= SYS_PF_UR3;
+ alchemy_wrsys(pin_func, AU1000_SYS_PINFUNC);
+
+ /* Enable UART */
+ alchemy_uart_enable(AU1000_UART3_PHYS_ADDR);
+ /* Enable DTR (MCR bit 0) = USB power up */
+ __raw_writel(1, (void __iomem *)KSEG1ADDR(AU1000_UART3_PHYS_ADDR + 0x18));
+ wmb();
+}
+
+/******************************************************************************/
+
+static struct resource xxs1500_pcmcia_res[] = {
+ {
+ .name = "pcmcia-io",
+ .flags = IORESOURCE_MEM,
+ .start = AU1000_PCMCIA_IO_PHYS_ADDR,
+ .end = AU1000_PCMCIA_IO_PHYS_ADDR + 0x000400000 - 1,
+ },
+ {
+ .name = "pcmcia-attr",
+ .flags = IORESOURCE_MEM,
+ .start = AU1000_PCMCIA_ATTR_PHYS_ADDR,
+ .end = AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1,
+ },
+ {
+ .name = "pcmcia-mem",
+ .flags = IORESOURCE_MEM,
+ .start = AU1000_PCMCIA_MEM_PHYS_ADDR,
+ .end = AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
+ },
+};
+
+static struct platform_device xxs1500_pcmcia_dev = {
+ .name = "xxs1500_pcmcia",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(xxs1500_pcmcia_res),
+ .resource = xxs1500_pcmcia_res,
+};
+
+static struct platform_device *xxs1500_devs[] __initdata = {
+ &xxs1500_pcmcia_dev,
+};
+
+static int __init xxs1500_dev_init(void)
+{
+ irq_set_irq_type(AU1500_GPIO204_INT, IRQ_TYPE_LEVEL_HIGH);
+ irq_set_irq_type(AU1500_GPIO201_INT, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(AU1500_GPIO202_INT, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(AU1500_GPIO203_INT, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(AU1500_GPIO205_INT, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(AU1500_GPIO207_INT, IRQ_TYPE_LEVEL_LOW);
+
+ irq_set_irq_type(AU1500_GPIO0_INT, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(AU1500_GPIO1_INT, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(AU1500_GPIO2_INT, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(AU1500_GPIO3_INT, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(AU1500_GPIO4_INT, IRQ_TYPE_LEVEL_LOW); /* CF irq */
+ irq_set_irq_type(AU1500_GPIO5_INT, IRQ_TYPE_LEVEL_LOW);
+
+ return platform_add_devices(xxs1500_devs,
+ ARRAY_SIZE(xxs1500_devs));
+}
+device_initcall(xxs1500_dev_init);
diff --git a/arch/mips/alchemy/common/Makefile b/arch/mips/alchemy/common/Makefile
new file mode 100644
index 000000000..a0e94388d
--- /dev/null
+++ b/arch/mips/alchemy/common/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright 2000, 2008 MontaVista Software Inc.
+# Author: MontaVista Software, Inc. <source@mvista.com>
+#
+# Makefile for the Alchemy Au1xx0 CPUs, generic files.
+#
+
+obj-y += prom.o time.o clock.o platform.o power.o gpiolib.o \
+ setup.o sleeper.o dma.o dbdma.o vss.o irq.o usb.o
diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c
new file mode 100644
index 000000000..f0c830337
--- /dev/null
+++ b/arch/mips/alchemy/common/clock.c
@@ -0,0 +1,1117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Alchemy clocks.
+ *
+ * Exposes all configurable internal clock sources to the clk framework.
+ *
+ * We have:
+ * - Root source, usually 12MHz supplied by an external crystal
+ * - 3 PLLs which generate multiples of root rate [AUX, CPU, AUX2]
+ *
+ * Dividers:
+ * - 6 clock dividers with:
+ * * selectable source [one of the PLLs],
+ * * output divided between [2 .. 512 in steps of 2] (!Au1300)
+ * or [1 .. 256 in steps of 1] (Au1300),
+ * * can be enabled individually.
+ *
+ * - up to 6 "internal" (fixed) consumers which:
+ * * take either AUXPLL or one of the above 6 dividers as input,
+ * * divide this input by 1, 2, or 4 (and 3 on Au1300).
+ * * can be disabled separately.
+ *
+ * Misc clocks:
+ * - sysbus clock: CPU core clock (CPUPLL) divided by 2, 3 or 4.
+ * depends on board design and should be set by bootloader, read-only.
+ * - peripheral clock: half the rate of sysbus clock, source for a lot
+ * of peripheral blocks, read-only.
+ * - memory clock: clk rate to main memory chips, depends on board
+ * design and is read-only,
+ * - lrclk: the static bus clock signal for synchronous operation.
+ * depends on board design, must be set by bootloader,
+ * but may be required to correctly configure devices attached to
+ * the static bus. The Au1000/1500/1100 manuals call it LCLK, on
+ * later models it's called RCLK.
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <asm/mach-au1x00/au1000.h>
+
+/* Base clock: 12MHz is the default in all databooks, and I haven't
+ * found any board yet which uses a different rate.
+ */
+#define ALCHEMY_ROOTCLK_RATE 12000000
+
+/*
+ * the internal sources which can be driven by the PLLs and dividers.
+ * Names taken from the databooks, refer to them for more information,
+ * especially which ones are share a clock line.
+ */
+static const char * const alchemy_au1300_intclknames[] = {
+ "lcd_intclk", "gpemgp_clk", "maempe_clk", "maebsa_clk",
+ "EXTCLK0", "EXTCLK1"
+};
+
+static const char * const alchemy_au1200_intclknames[] = {
+ "lcd_intclk", NULL, NULL, NULL, "EXTCLK0", "EXTCLK1"
+};
+
+static const char * const alchemy_au1550_intclknames[] = {
+ "usb_clk", "psc0_intclk", "psc1_intclk", "pci_clko",
+ "EXTCLK0", "EXTCLK1"
+};
+
+static const char * const alchemy_au1100_intclknames[] = {
+ "usb_clk", "lcd_intclk", NULL, "i2s_clk", "EXTCLK0", "EXTCLK1"
+};
+
+static const char * const alchemy_au1500_intclknames[] = {
+ NULL, "usbd_clk", "usbh_clk", "pci_clko", "EXTCLK0", "EXTCLK1"
+};
+
+static const char * const alchemy_au1000_intclknames[] = {
+ "irda_clk", "usbd_clk", "usbh_clk", "i2s_clk", "EXTCLK0",
+ "EXTCLK1"
+};
+
+/* aliases for a few on-chip sources which are either shared
+ * or have gone through name changes.
+ */
+static struct clk_aliastable {
+ char *alias;
+ char *base;
+ int cputype;
+} alchemy_clk_aliases[] __initdata = {
+ { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
+ { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
+ { "irda_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
+ { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1550 },
+ { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1550 },
+ { "psc2_intclk", "usb_clk", ALCHEMY_CPU_AU1550 },
+ { "psc3_intclk", "EXTCLK0", ALCHEMY_CPU_AU1550 },
+ { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1200 },
+ { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1200 },
+ { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 },
+ { "psc2_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 },
+ { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 },
+ { "psc3_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 },
+
+ { NULL, NULL, 0 },
+};
+
+#define IOMEM(x) ((void __iomem *)(KSEG1ADDR(CPHYSADDR(x))))
+
+/* access locks to SYS_FREQCTRL0/1 and SYS_CLKSRC registers */
+static spinlock_t alchemy_clk_fg0_lock;
+static spinlock_t alchemy_clk_fg1_lock;
+static spinlock_t alchemy_clk_csrc_lock;
+
+/* CPU Core clock *****************************************************/
+
+static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned long t;
+
+ /*
+ * On early Au1000, sys_cpupll was write-only. Since these
+ * silicon versions of Au1000 are not sold, we don't bend
+ * over backwards trying to determine the frequency.
+ */
+ if (unlikely(au1xxx_cpu_has_pll_wo()))
+ t = 396000000;
+ else {
+ t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f;
+ if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300)
+ t &= 0x3f;
+ t *= parent_rate;
+ }
+
+ return t;
+}
+
+void __init alchemy_set_lpj(void)
+{
+ preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE);
+ preset_lpj /= 2 * HZ;
+}
+
+static const struct clk_ops alchemy_clkops_cpu = {
+ .recalc_rate = alchemy_clk_cpu_recalc,
+};
+
+static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name,
+ int ctype)
+{
+ struct clk_init_data id;
+ struct clk_hw *h;
+ struct clk *clk;
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return ERR_PTR(-ENOMEM);
+
+ id.name = ALCHEMY_CPU_CLK;
+ id.parent_names = &parent_name;
+ id.num_parents = 1;
+ id.flags = 0;
+ id.ops = &alchemy_clkops_cpu;
+ h->init = &id;
+
+ clk = clk_register(NULL, h);
+ if (IS_ERR(clk)) {
+ pr_err("failed to register clock\n");
+ kfree(h);
+ }
+
+ return clk;
+}
+
+/* AUXPLLs ************************************************************/
+
+struct alchemy_auxpll_clk {
+ struct clk_hw hw;
+ unsigned long reg; /* au1300 has also AUXPLL2 */
+ int maxmult; /* max multiplier */
+};
+#define to_auxpll_clk(x) container_of(x, struct alchemy_auxpll_clk, hw)
+
+static unsigned long alchemy_clk_aux_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
+
+ return (alchemy_rdsys(a->reg) & 0xff) * parent_rate;
+}
+
+static int alchemy_clk_aux_setr(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
+ unsigned long d = rate;
+
+ if (rate)
+ d /= parent_rate;
+ else
+ d = 0;
+
+ /* minimum is 84MHz, max is 756-1032 depending on variant */
+ if (((d < 7) && (d != 0)) || (d > a->maxmult))
+ return -EINVAL;
+
+ alchemy_wrsys(d, a->reg);
+ return 0;
+}
+
+static long alchemy_clk_aux_roundr(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
+ unsigned long mult;
+
+ if (!rate || !*parent_rate)
+ return 0;
+
+ mult = rate / (*parent_rate);
+
+ if (mult && (mult < 7))
+ mult = 7;
+ if (mult > a->maxmult)
+ mult = a->maxmult;
+
+ return (*parent_rate) * mult;
+}
+
+static const struct clk_ops alchemy_clkops_aux = {
+ .recalc_rate = alchemy_clk_aux_recalc,
+ .set_rate = alchemy_clk_aux_setr,
+ .round_rate = alchemy_clk_aux_roundr,
+};
+
+static struct clk __init *alchemy_clk_setup_aux(const char *parent_name,
+ char *name, int maxmult,
+ unsigned long reg)
+{
+ struct clk_init_data id;
+ struct clk *c;
+ struct alchemy_auxpll_clk *a;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return ERR_PTR(-ENOMEM);
+
+ id.name = name;
+ id.parent_names = &parent_name;
+ id.num_parents = 1;
+ id.flags = CLK_GET_RATE_NOCACHE;
+ id.ops = &alchemy_clkops_aux;
+
+ a->reg = reg;
+ a->maxmult = maxmult;
+ a->hw.init = &id;
+
+ c = clk_register(NULL, &a->hw);
+ if (!IS_ERR(c))
+ clk_register_clkdev(c, name, NULL);
+ else
+ kfree(a);
+
+ return c;
+}
+
+/* sysbus_clk *********************************************************/
+
+static struct clk __init *alchemy_clk_setup_sysbus(const char *pn)
+{
+ unsigned long v = (alchemy_rdsys(AU1000_SYS_POWERCTRL) & 3) + 2;
+ struct clk *c;
+
+ c = clk_register_fixed_factor(NULL, ALCHEMY_SYSBUS_CLK,
+ pn, 0, 1, v);
+ if (!IS_ERR(c))
+ clk_register_clkdev(c, ALCHEMY_SYSBUS_CLK, NULL);
+ return c;
+}
+
+/* Peripheral Clock ***************************************************/
+
+static struct clk __init *alchemy_clk_setup_periph(const char *pn)
+{
+ /* Peripheral clock runs at half the rate of sysbus clk */
+ struct clk *c;
+
+ c = clk_register_fixed_factor(NULL, ALCHEMY_PERIPH_CLK,
+ pn, 0, 1, 2);
+ if (!IS_ERR(c))
+ clk_register_clkdev(c, ALCHEMY_PERIPH_CLK, NULL);
+ return c;
+}
+
+/* mem clock **********************************************************/
+
+static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct)
+{
+ void __iomem *addr = IOMEM(AU1000_MEM_PHYS_ADDR);
+ unsigned long v;
+ struct clk *c;
+ int div;
+
+ switch (ct) {
+ case ALCHEMY_CPU_AU1550:
+ case ALCHEMY_CPU_AU1200:
+ v = __raw_readl(addr + AU1550_MEM_SDCONFIGB);
+ div = (v & (1 << 15)) ? 1 : 2;
+ break;
+ case ALCHEMY_CPU_AU1300:
+ v = __raw_readl(addr + AU1550_MEM_SDCONFIGB);
+ div = (v & (1 << 31)) ? 1 : 2;
+ break;
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ default:
+ div = 2;
+ break;
+ }
+
+ c = clk_register_fixed_factor(NULL, ALCHEMY_MEM_CLK, pn,
+ 0, 1, div);
+ if (!IS_ERR(c))
+ clk_register_clkdev(c, ALCHEMY_MEM_CLK, NULL);
+ return c;
+}
+
+/* lrclk: external synchronous static bus clock ***********************/
+
+static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t)
+{
+ /* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5,
+ * otherwise lrclk=pclk/4.
+ * All other variants: MEM_STCFG0[15:13] = divisor.
+ * L/RCLK = periph_clk / (divisor + 1)
+ * On Au1000, Au1500, Au1100 it's called LCLK,
+ * on later models it's called RCLK, but it's the same thing.
+ */
+ struct clk *c;
+ unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0);
+
+ switch (t) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ v = 4 + ((v >> 11) & 1);
+ break;
+ default: /* all other models */
+ v = ((v >> 13) & 7) + 1;
+ }
+ c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK,
+ pn, 0, 1, v);
+ if (!IS_ERR(c))
+ clk_register_clkdev(c, ALCHEMY_LR_CLK, NULL);
+ return c;
+}
+
+/* Clock dividers and muxes *******************************************/
+
+/* data for fgen and csrc mux-dividers */
+struct alchemy_fgcs_clk {
+ struct clk_hw hw;
+ spinlock_t *reglock; /* register lock */
+ unsigned long reg; /* SYS_FREQCTRL0/1 */
+ int shift; /* offset in register */
+ int parent; /* parent before disable [Au1300] */
+ int isen; /* is it enabled? */
+ int *dt; /* dividertable for csrc */
+};
+#define to_fgcs_clk(x) container_of(x, struct alchemy_fgcs_clk, hw)
+
+static long alchemy_calc_div(unsigned long rate, unsigned long prate,
+ int scale, int maxdiv, unsigned long *rv)
+{
+ long div1, div2;
+
+ div1 = prate / rate;
+ if ((prate / div1) > rate)
+ div1++;
+
+ if (scale == 2) { /* only div-by-multiple-of-2 possible */
+ if (div1 & 1)
+ div1++; /* stay <=prate */
+ }
+
+ div2 = (div1 / scale) - 1; /* value to write to register */
+
+ if (div2 > maxdiv)
+ div2 = maxdiv;
+ if (rv)
+ *rv = div2;
+
+ div1 = ((div2 + 1) * scale);
+ return div1;
+}
+
+static int alchemy_clk_fgcs_detr(struct clk_hw *hw,
+ struct clk_rate_request *req,
+ int scale, int maxdiv)
+{
+ struct clk_hw *pc, *bpc, *free;
+ long tdv, tpr, pr, nr, br, bpr, diff, lastdiff;
+ int j;
+
+ lastdiff = INT_MAX;
+ bpr = 0;
+ bpc = NULL;
+ br = -EINVAL;
+ free = NULL;
+
+ /* look at the rates each enabled parent supplies and select
+ * the one that gets closest to but not over the requested rate.
+ */
+ for (j = 0; j < 7; j++) {
+ pc = clk_hw_get_parent_by_index(hw, j);
+ if (!pc)
+ break;
+
+ /* if this parent is currently unused, remember it.
+ * XXX: we would actually want clk_has_active_children()
+ * but this is a good-enough approximation for now.
+ */
+ if (!clk_hw_is_prepared(pc)) {
+ if (!free)
+ free = pc;
+ }
+
+ pr = clk_hw_get_rate(pc);
+ if (pr < req->rate)
+ continue;
+
+ /* what can hardware actually provide */
+ tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL);
+ nr = pr / tdv;
+ diff = req->rate - nr;
+ if (nr > req->rate)
+ continue;
+
+ if (diff < lastdiff) {
+ lastdiff = diff;
+ bpr = pr;
+ bpc = pc;
+ br = nr;
+ }
+ if (diff == 0)
+ break;
+ }
+
+ /* if we couldn't get the exact rate we wanted from the enabled
+ * parents, maybe we can tell an available disabled/inactive one
+ * to give us a rate we can divide down to the requested rate.
+ */
+ if (lastdiff && free) {
+ for (j = (maxdiv == 4) ? 1 : scale; j <= maxdiv; j += scale) {
+ tpr = req->rate * j;
+ if (tpr < 0)
+ break;
+ pr = clk_hw_round_rate(free, tpr);
+
+ tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv,
+ NULL);
+ nr = pr / tdv;
+ diff = req->rate - nr;
+ if (nr > req->rate)
+ continue;
+ if (diff < lastdiff) {
+ lastdiff = diff;
+ bpr = pr;
+ bpc = free;
+ br = nr;
+ }
+ if (diff == 0)
+ break;
+ }
+ }
+
+ if (br < 0)
+ return br;
+
+ req->best_parent_rate = bpr;
+ req->best_parent_hw = bpc;
+ req->rate = br;
+
+ return 0;
+}
+
+static int alchemy_clk_fgv1_en(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v, flags;
+
+ spin_lock_irqsave(c->reglock, flags);
+ v = alchemy_rdsys(c->reg);
+ v |= (1 << 1) << c->shift;
+ alchemy_wrsys(v, c->reg);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static int alchemy_clk_fgv1_isen(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 1);
+
+ return v & 1;
+}
+
+static void alchemy_clk_fgv1_dis(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v, flags;
+
+ spin_lock_irqsave(c->reglock, flags);
+ v = alchemy_rdsys(c->reg);
+ v &= ~((1 << 1) << c->shift);
+ alchemy_wrsys(v, c->reg);
+ spin_unlock_irqrestore(c->reglock, flags);
+}
+
+static int alchemy_clk_fgv1_setp(struct clk_hw *hw, u8 index)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v, flags;
+
+ spin_lock_irqsave(c->reglock, flags);
+ v = alchemy_rdsys(c->reg);
+ if (index)
+ v |= (1 << c->shift);
+ else
+ v &= ~(1 << c->shift);
+ alchemy_wrsys(v, c->reg);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static u8 alchemy_clk_fgv1_getp(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+
+ return (alchemy_rdsys(c->reg) >> c->shift) & 1;
+}
+
+static int alchemy_clk_fgv1_setr(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long div, v, flags, ret;
+ int sh = c->shift + 2;
+
+ if (!rate || !parent_rate || rate > (parent_rate / 2))
+ return -EINVAL;
+ ret = alchemy_calc_div(rate, parent_rate, 2, 512, &div);
+ spin_lock_irqsave(c->reglock, flags);
+ v = alchemy_rdsys(c->reg);
+ v &= ~(0xff << sh);
+ v |= div << sh;
+ alchemy_wrsys(v, c->reg);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 2);
+
+ v = ((v & 0xff) + 1) * 2;
+ return parent_rate / v;
+}
+
+static int alchemy_clk_fgv1_detr(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return alchemy_clk_fgcs_detr(hw, req, 2, 512);
+}
+
+/* Au1000, Au1100, Au15x0, Au12x0 */
+static const struct clk_ops alchemy_clkops_fgenv1 = {
+ .recalc_rate = alchemy_clk_fgv1_recalc,
+ .determine_rate = alchemy_clk_fgv1_detr,
+ .set_rate = alchemy_clk_fgv1_setr,
+ .set_parent = alchemy_clk_fgv1_setp,
+ .get_parent = alchemy_clk_fgv1_getp,
+ .enable = alchemy_clk_fgv1_en,
+ .disable = alchemy_clk_fgv1_dis,
+ .is_enabled = alchemy_clk_fgv1_isen,
+};
+
+static void __alchemy_clk_fgv2_en(struct alchemy_fgcs_clk *c)
+{
+ unsigned long v = alchemy_rdsys(c->reg);
+
+ v &= ~(3 << c->shift);
+ v |= (c->parent & 3) << c->shift;
+ alchemy_wrsys(v, c->reg);
+ c->isen = 1;
+}
+
+static int alchemy_clk_fgv2_en(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long flags;
+
+ /* enable by setting the previous parent clock */
+ spin_lock_irqsave(c->reglock, flags);
+ __alchemy_clk_fgv2_en(c);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static int alchemy_clk_fgv2_isen(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+
+ return ((alchemy_rdsys(c->reg) >> c->shift) & 3) != 0;
+}
+
+static void alchemy_clk_fgv2_dis(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v, flags;
+
+ spin_lock_irqsave(c->reglock, flags);
+ v = alchemy_rdsys(c->reg);
+ v &= ~(3 << c->shift); /* set input mux to "disabled" state */
+ alchemy_wrsys(v, c->reg);
+ c->isen = 0;
+ spin_unlock_irqrestore(c->reglock, flags);
+}
+
+static int alchemy_clk_fgv2_setp(struct clk_hw *hw, u8 index)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(c->reglock, flags);
+ c->parent = index + 1; /* value to write to register */
+ if (c->isen)
+ __alchemy_clk_fgv2_en(c);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static u8 alchemy_clk_fgv2_getp(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long flags, v;
+
+ spin_lock_irqsave(c->reglock, flags);
+ v = c->parent - 1;
+ spin_unlock_irqrestore(c->reglock, flags);
+ return v;
+}
+
+/* fg0-2 and fg4-6 share a "scale"-bit. With this bit cleared, the
+ * dividers behave exactly as on previous models (dividers are multiples
+ * of 2); with the bit set, dividers are multiples of 1, halving their
+ * range, but making them also much more flexible.
+ */
+static int alchemy_clk_fgv2_setr(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ int sh = c->shift + 2;
+ unsigned long div, v, flags, ret;
+
+ if (!rate || !parent_rate || rate > parent_rate)
+ return -EINVAL;
+
+ v = alchemy_rdsys(c->reg) & (1 << 30); /* test "scale" bit */
+ ret = alchemy_calc_div(rate, parent_rate, v ? 1 : 2,
+ v ? 256 : 512, &div);
+
+ spin_lock_irqsave(c->reglock, flags);
+ v = alchemy_rdsys(c->reg);
+ v &= ~(0xff << sh);
+ v |= (div & 0xff) << sh;
+ alchemy_wrsys(v, c->reg);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ int sh = c->shift + 2;
+ unsigned long v, t;
+
+ v = alchemy_rdsys(c->reg);
+ t = parent_rate / (((v >> sh) & 0xff) + 1);
+ if ((v & (1 << 30)) == 0) /* test scale bit */
+ t /= 2;
+
+ return t;
+}
+
+static int alchemy_clk_fgv2_detr(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ int scale, maxdiv;
+
+ if (alchemy_rdsys(c->reg) & (1 << 30)) {
+ scale = 1;
+ maxdiv = 256;
+ } else {
+ scale = 2;
+ maxdiv = 512;
+ }
+
+ return alchemy_clk_fgcs_detr(hw, req, scale, maxdiv);
+}
+
+/* Au1300 larger input mux, no separate disable bit, flexible divider */
+static const struct clk_ops alchemy_clkops_fgenv2 = {
+ .recalc_rate = alchemy_clk_fgv2_recalc,
+ .determine_rate = alchemy_clk_fgv2_detr,
+ .set_rate = alchemy_clk_fgv2_setr,
+ .set_parent = alchemy_clk_fgv2_setp,
+ .get_parent = alchemy_clk_fgv2_getp,
+ .enable = alchemy_clk_fgv2_en,
+ .disable = alchemy_clk_fgv2_dis,
+ .is_enabled = alchemy_clk_fgv2_isen,
+};
+
+static const char * const alchemy_clk_fgv1_parents[] = {
+ ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK
+};
+
+static const char * const alchemy_clk_fgv2_parents[] = {
+ ALCHEMY_AUXPLL2_CLK, ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK
+};
+
+static const char * const alchemy_clk_fgen_names[] = {
+ ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK,
+ ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK };
+
+static int __init alchemy_clk_init_fgens(int ctype)
+{
+ struct clk *c;
+ struct clk_init_data id;
+ struct alchemy_fgcs_clk *a;
+ unsigned long v;
+ int i, ret;
+
+ switch (ctype) {
+ case ALCHEMY_CPU_AU1000...ALCHEMY_CPU_AU1200:
+ id.ops = &alchemy_clkops_fgenv1;
+ id.parent_names = alchemy_clk_fgv1_parents;
+ id.num_parents = 2;
+ break;
+ case ALCHEMY_CPU_AU1300:
+ id.ops = &alchemy_clkops_fgenv2;
+ id.parent_names = alchemy_clk_fgv2_parents;
+ id.num_parents = 3;
+ break;
+ default:
+ return -ENODEV;
+ }
+ id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
+
+ a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ spin_lock_init(&alchemy_clk_fg0_lock);
+ spin_lock_init(&alchemy_clk_fg1_lock);
+ ret = 0;
+ for (i = 0; i < 6; i++) {
+ id.name = alchemy_clk_fgen_names[i];
+ a->shift = 10 * (i < 3 ? i : i - 3);
+ if (i > 2) {
+ a->reg = AU1000_SYS_FREQCTRL1;
+ a->reglock = &alchemy_clk_fg1_lock;
+ } else {
+ a->reg = AU1000_SYS_FREQCTRL0;
+ a->reglock = &alchemy_clk_fg0_lock;
+ }
+
+ /* default to first parent if bootloader has set
+ * the mux to disabled state.
+ */
+ if (ctype == ALCHEMY_CPU_AU1300) {
+ v = alchemy_rdsys(a->reg);
+ a->parent = (v >> a->shift) & 3;
+ if (!a->parent) {
+ a->parent = 1;
+ a->isen = 0;
+ } else
+ a->isen = 1;
+ }
+
+ a->hw.init = &id;
+ c = clk_register(NULL, &a->hw);
+ if (IS_ERR(c))
+ ret++;
+ else
+ clk_register_clkdev(c, id.name, NULL);
+ a++;
+ }
+
+ return ret;
+}
+
+/* internal sources muxes *********************************************/
+
+static int alchemy_clk_csrc_isen(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v = alchemy_rdsys(c->reg);
+
+ return (((v >> c->shift) >> 2) & 7) != 0;
+}
+
+static void __alchemy_clk_csrc_en(struct alchemy_fgcs_clk *c)
+{
+ unsigned long v = alchemy_rdsys(c->reg);
+
+ v &= ~((7 << 2) << c->shift);
+ v |= ((c->parent & 7) << 2) << c->shift;
+ alchemy_wrsys(v, c->reg);
+ c->isen = 1;
+}
+
+static int alchemy_clk_csrc_en(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long flags;
+
+ /* enable by setting the previous parent clock */
+ spin_lock_irqsave(c->reglock, flags);
+ __alchemy_clk_csrc_en(c);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static void alchemy_clk_csrc_dis(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v, flags;
+
+ spin_lock_irqsave(c->reglock, flags);
+ v = alchemy_rdsys(c->reg);
+ v &= ~((3 << 2) << c->shift); /* mux to "disabled" state */
+ alchemy_wrsys(v, c->reg);
+ c->isen = 0;
+ spin_unlock_irqrestore(c->reglock, flags);
+}
+
+static int alchemy_clk_csrc_setp(struct clk_hw *hw, u8 index)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(c->reglock, flags);
+ c->parent = index + 1; /* value to write to register */
+ if (c->isen)
+ __alchemy_clk_csrc_en(c);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static u8 alchemy_clk_csrc_getp(struct clk_hw *hw)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+
+ return c->parent - 1;
+}
+
+static unsigned long alchemy_clk_csrc_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long v = (alchemy_rdsys(c->reg) >> c->shift) & 3;
+
+ return parent_rate / c->dt[v];
+}
+
+static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ unsigned long d, v, flags;
+ int i;
+
+ if (!rate || !parent_rate || rate > parent_rate)
+ return -EINVAL;
+
+ d = (parent_rate + (rate / 2)) / rate;
+ if (d > 4)
+ return -EINVAL;
+ if ((d == 3) && (c->dt[2] != 3))
+ d = 4;
+
+ for (i = 0; i < 4; i++)
+ if (c->dt[i] == d)
+ break;
+
+ if (i >= 4)
+ return -EINVAL; /* oops */
+
+ spin_lock_irqsave(c->reglock, flags);
+ v = alchemy_rdsys(c->reg);
+ v &= ~(3 << c->shift);
+ v |= (i & 3) << c->shift;
+ alchemy_wrsys(v, c->reg);
+ spin_unlock_irqrestore(c->reglock, flags);
+
+ return 0;
+}
+
+static int alchemy_clk_csrc_detr(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
+ int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */
+
+ return alchemy_clk_fgcs_detr(hw, req, scale, 4);
+}
+
+static const struct clk_ops alchemy_clkops_csrc = {
+ .recalc_rate = alchemy_clk_csrc_recalc,
+ .determine_rate = alchemy_clk_csrc_detr,
+ .set_rate = alchemy_clk_csrc_setr,
+ .set_parent = alchemy_clk_csrc_setp,
+ .get_parent = alchemy_clk_csrc_getp,
+ .enable = alchemy_clk_csrc_en,
+ .disable = alchemy_clk_csrc_dis,
+ .is_enabled = alchemy_clk_csrc_isen,
+};
+
+static const char * const alchemy_clk_csrc_parents[] = {
+ /* disabled at index 0 */ ALCHEMY_AUXPLL_CLK,
+ ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK,
+ ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK
+};
+
+/* divider tables */
+static int alchemy_csrc_dt1[] = { 1, 4, 1, 2 }; /* rest */
+static int alchemy_csrc_dt2[] = { 1, 4, 3, 2 }; /* Au1300 */
+
+static int __init alchemy_clk_setup_imux(int ctype)
+{
+ struct alchemy_fgcs_clk *a;
+ const char * const *names;
+ struct clk_init_data id;
+ unsigned long v;
+ int i, ret, *dt;
+ struct clk *c;
+
+ id.ops = &alchemy_clkops_csrc;
+ id.parent_names = alchemy_clk_csrc_parents;
+ id.num_parents = 7;
+ id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
+
+ dt = alchemy_csrc_dt1;
+ switch (ctype) {
+ case ALCHEMY_CPU_AU1000:
+ names = alchemy_au1000_intclknames;
+ break;
+ case ALCHEMY_CPU_AU1500:
+ names = alchemy_au1500_intclknames;
+ break;
+ case ALCHEMY_CPU_AU1100:
+ names = alchemy_au1100_intclknames;
+ break;
+ case ALCHEMY_CPU_AU1550:
+ names = alchemy_au1550_intclknames;
+ break;
+ case ALCHEMY_CPU_AU1200:
+ names = alchemy_au1200_intclknames;
+ break;
+ case ALCHEMY_CPU_AU1300:
+ dt = alchemy_csrc_dt2;
+ names = alchemy_au1300_intclknames;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ a = kcalloc(6, sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ spin_lock_init(&alchemy_clk_csrc_lock);
+ ret = 0;
+
+ for (i = 0; i < 6; i++) {
+ id.name = names[i];
+ if (!id.name)
+ goto next;
+
+ a->shift = i * 5;
+ a->reg = AU1000_SYS_CLKSRC;
+ a->reglock = &alchemy_clk_csrc_lock;
+ a->dt = dt;
+
+ /* default to first parent clock if mux is initially
+ * set to disabled state.
+ */
+ v = alchemy_rdsys(a->reg);
+ a->parent = ((v >> a->shift) >> 2) & 7;
+ if (!a->parent) {
+ a->parent = 1;
+ a->isen = 0;
+ } else
+ a->isen = 1;
+
+ a->hw.init = &id;
+ c = clk_register(NULL, &a->hw);
+ if (IS_ERR(c))
+ ret++;
+ else
+ clk_register_clkdev(c, id.name, NULL);
+next:
+ a++;
+ }
+
+ return ret;
+}
+
+
+/**********************************************************************/
+
+
+#define ERRCK(x) \
+ if (IS_ERR(x)) { \
+ ret = PTR_ERR(x); \
+ goto out; \
+ }
+
+static int __init alchemy_clk_init(void)
+{
+ int ctype = alchemy_get_cputype(), ret, i;
+ struct clk_aliastable *t = alchemy_clk_aliases;
+ struct clk *c;
+
+ /* Root of the Alchemy clock tree: external 12MHz crystal osc */
+ c = clk_register_fixed_rate(NULL, ALCHEMY_ROOT_CLK, NULL,
+ 0, ALCHEMY_ROOTCLK_RATE);
+ ERRCK(c)
+
+ /* CPU core clock */
+ c = alchemy_clk_setup_cpu(ALCHEMY_ROOT_CLK, ctype);
+ ERRCK(c)
+
+ /* AUXPLLs: max 1GHz on Au1300, 748MHz on older models */
+ i = (ctype == ALCHEMY_CPU_AU1300) ? 84 : 63;
+ c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, ALCHEMY_AUXPLL_CLK,
+ i, AU1000_SYS_AUXPLL);
+ ERRCK(c)
+
+ if (ctype == ALCHEMY_CPU_AU1300) {
+ c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK,
+ ALCHEMY_AUXPLL2_CLK, i,
+ AU1300_SYS_AUXPLL2);
+ ERRCK(c)
+ }
+
+ /* sysbus clock: cpu core clock divided by 2, 3 or 4 */
+ c = alchemy_clk_setup_sysbus(ALCHEMY_CPU_CLK);
+ ERRCK(c)
+
+ /* peripheral clock: runs at half rate of sysbus clk */
+ c = alchemy_clk_setup_periph(ALCHEMY_SYSBUS_CLK);
+ ERRCK(c)
+
+ /* SDR/DDR memory clock */
+ c = alchemy_clk_setup_mem(ALCHEMY_SYSBUS_CLK, ctype);
+ ERRCK(c)
+
+ /* L/RCLK: external static bus clock for synchronous mode */
+ c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype);
+ ERRCK(c)
+
+ /* Frequency dividers 0-5 */
+ ret = alchemy_clk_init_fgens(ctype);
+ if (ret) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* diving muxes for internal sources */
+ ret = alchemy_clk_setup_imux(ctype);
+ if (ret) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* set up aliases drivers might look for */
+ while (t->base) {
+ if (t->cputype == ctype)
+ clk_add_alias(t->alias, NULL, t->base, NULL);
+ t++;
+ }
+
+ pr_info("Alchemy clocktree installed\n");
+ return 0;
+
+out:
+ return ret;
+}
+postcore_initcall(alchemy_clk_init);
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
new file mode 100644
index 000000000..4ca2c2887
--- /dev/null
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -0,0 +1,1089 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ * The Descriptor Based DMA channel manager that first appeared
+ * on the Au1550. I started with dma.c, but I think all that is
+ * left is this initial comment :-)
+ *
+ * Copyright 2004 Embedded Edge, LLC
+ * dan@embeddededge.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/export.h>
+#include <linux/syscore_ops.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1xxx_dbdma.h>
+
+/*
+ * The Descriptor Based DMA supports up to 16 channels.
+ *
+ * There are 32 devices defined. We keep an internal structure
+ * of devices using these channels, along with additional
+ * information.
+ *
+ * We allocate the descriptors and allow access to them through various
+ * functions. The drivers allocate the data buffers and assign them
+ * to the descriptors.
+ */
+static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock);
+
+/* I couldn't find a macro that did this... */
+#define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1))
+
+static dbdma_global_t *dbdma_gptr =
+ (dbdma_global_t *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
+static int dbdma_initialized;
+
+static dbdev_tab_t *dbdev_tab;
+
+static dbdev_tab_t au1550_dbdev_tab[] __initdata = {
+ /* UARTS */
+ { AU1550_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
+ { AU1550_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 },
+ { AU1550_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x11400004, 0, 0 },
+ { AU1550_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN, 0, 8, 0x11400000, 0, 0 },
+
+ /* EXT DMA */
+ { AU1550_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
+ { AU1550_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
+ { AU1550_DSCR_CMD0_DMA_REQ2, 0, 0, 0, 0x00000000, 0, 0 },
+ { AU1550_DSCR_CMD0_DMA_REQ3, 0, 0, 0, 0x00000000, 0, 0 },
+
+ /* USB DEV */
+ { AU1550_DSCR_CMD0_USBDEV_RX0, DEV_FLAGS_IN, 4, 8, 0x10200000, 0, 0 },
+ { AU1550_DSCR_CMD0_USBDEV_TX0, DEV_FLAGS_OUT, 4, 8, 0x10200004, 0, 0 },
+ { AU1550_DSCR_CMD0_USBDEV_TX1, DEV_FLAGS_OUT, 4, 8, 0x10200008, 0, 0 },
+ { AU1550_DSCR_CMD0_USBDEV_TX2, DEV_FLAGS_OUT, 4, 8, 0x1020000c, 0, 0 },
+ { AU1550_DSCR_CMD0_USBDEV_RX3, DEV_FLAGS_IN, 4, 8, 0x10200010, 0, 0 },
+ { AU1550_DSCR_CMD0_USBDEV_RX4, DEV_FLAGS_IN, 4, 8, 0x10200014, 0, 0 },
+
+ /* PSCs */
+ { AU1550_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 0, 0x11a0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 0, 0x11b0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 0, 0x11b0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 0, 0x10a0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN, 0, 0, 0x10a0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 0, 0x10b0001c, 0, 0 },
+ { AU1550_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN, 0, 0, 0x10b0001c, 0, 0 },
+
+ { AU1550_DSCR_CMD0_PCI_WRITE, 0, 0, 0, 0x00000000, 0, 0 }, /* PCI */
+ { AU1550_DSCR_CMD0_NAND_FLASH, 0, 0, 0, 0x00000000, 0, 0 }, /* NAND */
+
+ /* MAC 0 */
+ { AU1550_DSCR_CMD0_MAC0_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
+ { AU1550_DSCR_CMD0_MAC0_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
+
+ /* MAC 1 */
+ { AU1550_DSCR_CMD0_MAC1_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
+ { AU1550_DSCR_CMD0_MAC1_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
+
+ { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+};
+
+static dbdev_tab_t au1200_dbdev_tab[] __initdata = {
+ { AU1200_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
+ { AU1200_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 },
+ { AU1200_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x11200004, 0, 0 },
+ { AU1200_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN, 0, 8, 0x11200000, 0, 0 },
+
+ { AU1200_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
+ { AU1200_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1200_DSCR_CMD0_MAE_BE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { AU1200_DSCR_CMD0_MAE_FE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { AU1200_DSCR_CMD0_MAE_BOTH, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { AU1200_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1200_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 },
+ { AU1200_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN, 4, 8, 0x10600004, 0, 0 },
+ { AU1200_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 4, 8, 0x10680000, 0, 0 },
+ { AU1200_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN, 4, 8, 0x10680004, 0, 0 },
+
+ { AU1200_DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 },
+ { AU1200_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 },
+
+ { AU1200_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 16, 0x11a0001c, 0, 0 },
+ { AU1200_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 16, 0x11a0001c, 0, 0 },
+ { AU1200_DSCR_CMD0_PSC0_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { AU1200_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 16, 0x11b0001c, 0, 0 },
+ { AU1200_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 16, 0x11b0001c, 0, 0 },
+ { AU1200_DSCR_CMD0_PSC1_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1200_DSCR_CMD0_CIM_RXA, DEV_FLAGS_IN, 0, 32, 0x14004020, 0, 0 },
+ { AU1200_DSCR_CMD0_CIM_RXB, DEV_FLAGS_IN, 0, 32, 0x14004040, 0, 0 },
+ { AU1200_DSCR_CMD0_CIM_RXC, DEV_FLAGS_IN, 0, 32, 0x14004060, 0, 0 },
+ { AU1200_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1200_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
+
+ { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+};
+
+static dbdev_tab_t au1300_dbdev_tab[] __initdata = {
+ { AU1300_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x10100004, 0, 0 },
+ { AU1300_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x10100000, 0, 0 },
+ { AU1300_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x10101004, 0, 0 },
+ { AU1300_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN, 0, 8, 0x10101000, 0, 0 },
+ { AU1300_DSCR_CMD0_UART2_TX, DEV_FLAGS_OUT, 0, 8, 0x10102004, 0, 0 },
+ { AU1300_DSCR_CMD0_UART2_RX, DEV_FLAGS_IN, 0, 8, 0x10102000, 0, 0 },
+ { AU1300_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x10103004, 0, 0 },
+ { AU1300_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN, 0, 8, 0x10103000, 0, 0 },
+
+ { AU1300_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 },
+ { AU1300_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN, 4, 8, 0x10600004, 0, 0 },
+ { AU1300_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 8, 8, 0x10601000, 0, 0 },
+ { AU1300_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN, 8, 8, 0x10601004, 0, 0 },
+
+ { AU1300_DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 },
+ { AU1300_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 },
+
+ { AU1300_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 16, 0x10a0001c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 16, 0x10a0001c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 16, 0x10a0101c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 16, 0x10a0101c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 16, 0x10a0201c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN, 0, 16, 0x10a0201c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 16, 0x10a0301c, 0, 0 },
+ { AU1300_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN, 0, 16, 0x10a0301c, 0, 0 },
+
+ { AU1300_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { AU1300_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1300_DSCR_CMD0_SDMS_TX2, DEV_FLAGS_OUT, 4, 8, 0x10602000, 0, 0 },
+ { AU1300_DSCR_CMD0_SDMS_RX2, DEV_FLAGS_IN, 4, 8, 0x10602004, 0, 0 },
+
+ { AU1300_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+
+ { AU1300_DSCR_CMD0_UDMA, DEV_FLAGS_ANYUSE, 0, 32, 0x14001810, 0, 0 },
+
+ { AU1300_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
+ { AU1300_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
+
+ { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+ { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
+};
+
+/* 32 predefined plus 32 custom */
+#define DBDEV_TAB_SIZE 64
+
+static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
+
+static dbdev_tab_t *find_dbdev_id(u32 id)
+{
+ int i;
+ dbdev_tab_t *p;
+ for (i = 0; i < DBDEV_TAB_SIZE; ++i) {
+ p = &dbdev_tab[i];
+ if (p->dev_id == id)
+ return p;
+ }
+ return NULL;
+}
+
+void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp)
+{
+ return phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+}
+EXPORT_SYMBOL(au1xxx_ddma_get_nextptr_virt);
+
+u32 au1xxx_ddma_add_device(dbdev_tab_t *dev)
+{
+ u32 ret = 0;
+ dbdev_tab_t *p;
+ static u16 new_id = 0x1000;
+
+ p = find_dbdev_id(~0);
+ if (NULL != p) {
+ memcpy(p, dev, sizeof(dbdev_tab_t));
+ p->dev_id = DSCR_DEV2CUSTOM_ID(new_id, dev->dev_id);
+ ret = p->dev_id;
+ new_id++;
+#if 0
+ printk(KERN_DEBUG "add_device: id:%x flags:%x padd:%x\n",
+ p->dev_id, p->dev_flags, p->dev_physaddr);
+#endif
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(au1xxx_ddma_add_device);
+
+void au1xxx_ddma_del_device(u32 devid)
+{
+ dbdev_tab_t *p = find_dbdev_id(devid);
+
+ if (p != NULL) {
+ memset(p, 0, sizeof(dbdev_tab_t));
+ p->dev_id = ~0;
+ }
+}
+EXPORT_SYMBOL(au1xxx_ddma_del_device);
+
+/* Allocate a channel and return a non-zero descriptor if successful. */
+u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
+ void (*callback)(int, void *), void *callparam)
+{
+ unsigned long flags;
+ u32 used, chan;
+ u32 dcp;
+ int i;
+ dbdev_tab_t *stp, *dtp;
+ chan_tab_t *ctp;
+ au1x_dma_chan_t *cp;
+
+ /*
+ * We do the initialization on the first channel allocation.
+ * We have to wait because of the interrupt handler initialization
+ * which can't be done successfully during board set up.
+ */
+ if (!dbdma_initialized)
+ return 0;
+
+ stp = find_dbdev_id(srcid);
+ if (stp == NULL)
+ return 0;
+ dtp = find_dbdev_id(destid);
+ if (dtp == NULL)
+ return 0;
+
+ used = 0;
+
+ /* Check to see if we can get both channels. */
+ spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
+ if (!(stp->dev_flags & DEV_FLAGS_INUSE) ||
+ (stp->dev_flags & DEV_FLAGS_ANYUSE)) {
+ /* Got source */
+ stp->dev_flags |= DEV_FLAGS_INUSE;
+ if (!(dtp->dev_flags & DEV_FLAGS_INUSE) ||
+ (dtp->dev_flags & DEV_FLAGS_ANYUSE)) {
+ /* Got destination */
+ dtp->dev_flags |= DEV_FLAGS_INUSE;
+ } else {
+ /* Can't get dest. Release src. */
+ stp->dev_flags &= ~DEV_FLAGS_INUSE;
+ used++;
+ }
+ } else
+ used++;
+ spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
+
+ if (used)
+ return 0;
+
+ /* Let's see if we can allocate a channel for it. */
+ ctp = NULL;
+ chan = 0;
+ spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
+ for (i = 0; i < NUM_DBDMA_CHANS; i++)
+ if (chan_tab_ptr[i] == NULL) {
+ /*
+ * If kmalloc fails, it is caught below same
+ * as a channel not available.
+ */
+ ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC);
+ chan_tab_ptr[i] = ctp;
+ break;
+ }
+ spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
+
+ if (ctp != NULL) {
+ memset(ctp, 0, sizeof(chan_tab_t));
+ ctp->chan_index = chan = i;
+ dcp = KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
+ dcp += (0x0100 * chan);
+ ctp->chan_ptr = (au1x_dma_chan_t *)dcp;
+ cp = (au1x_dma_chan_t *)dcp;
+ ctp->chan_src = stp;
+ ctp->chan_dest = dtp;
+ ctp->chan_callback = callback;
+ ctp->chan_callparam = callparam;
+
+ /* Initialize channel configuration. */
+ i = 0;
+ if (stp->dev_intlevel)
+ i |= DDMA_CFG_SED;
+ if (stp->dev_intpolarity)
+ i |= DDMA_CFG_SP;
+ if (dtp->dev_intlevel)
+ i |= DDMA_CFG_DED;
+ if (dtp->dev_intpolarity)
+ i |= DDMA_CFG_DP;
+ if ((stp->dev_flags & DEV_FLAGS_SYNC) ||
+ (dtp->dev_flags & DEV_FLAGS_SYNC))
+ i |= DDMA_CFG_SYNC;
+ cp->ddma_cfg = i;
+ wmb(); /* drain writebuffer */
+
+ /*
+ * Return a non-zero value that can be used to find the channel
+ * information in subsequent operations.
+ */
+ return (u32)(&chan_tab_ptr[chan]);
+ }
+
+ /* Release devices */
+ stp->dev_flags &= ~DEV_FLAGS_INUSE;
+ dtp->dev_flags &= ~DEV_FLAGS_INUSE;
+
+ return 0;
+}
+EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc);
+
+/*
+ * Set the device width if source or destination is a FIFO.
+ * Should be 8, 16, or 32 bits.
+ */
+u32 au1xxx_dbdma_set_devwidth(u32 chanid, int bits)
+{
+ u32 rv;
+ chan_tab_t *ctp;
+ dbdev_tab_t *stp, *dtp;
+
+ ctp = *((chan_tab_t **)chanid);
+ stp = ctp->chan_src;
+ dtp = ctp->chan_dest;
+ rv = 0;
+
+ if (stp->dev_flags & DEV_FLAGS_IN) { /* Source in fifo */
+ rv = stp->dev_devwidth;
+ stp->dev_devwidth = bits;
+ }
+ if (dtp->dev_flags & DEV_FLAGS_OUT) { /* Destination out fifo */
+ rv = dtp->dev_devwidth;
+ dtp->dev_devwidth = bits;
+ }
+
+ return rv;
+}
+EXPORT_SYMBOL(au1xxx_dbdma_set_devwidth);
+
+/* Allocate a descriptor ring, initializing as much as possible. */
+u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
+{
+ int i;
+ u32 desc_base, srcid, destid;
+ u32 cmd0, cmd1, src1, dest1;
+ u32 src0, dest0;
+ chan_tab_t *ctp;
+ dbdev_tab_t *stp, *dtp;
+ au1x_ddma_desc_t *dp;
+
+ /*
+ * I guess we could check this to be within the
+ * range of the table......
+ */
+ ctp = *((chan_tab_t **)chanid);
+ stp = ctp->chan_src;
+ dtp = ctp->chan_dest;
+
+ /*
+ * The descriptors must be 32-byte aligned. There is a
+ * possibility the allocation will give us such an address,
+ * and if we try that first we are likely to not waste larger
+ * slabs of memory.
+ */
+ desc_base = (u32)kmalloc_array(entries, sizeof(au1x_ddma_desc_t),
+ GFP_KERNEL|GFP_DMA);
+ if (desc_base == 0)
+ return 0;
+
+ if (desc_base & 0x1f) {
+ /*
+ * Lost....do it again, allocate extra, and round
+ * the address base.
+ */
+ kfree((const void *)desc_base);
+ i = entries * sizeof(au1x_ddma_desc_t);
+ i += (sizeof(au1x_ddma_desc_t) - 1);
+ desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA);
+ if (desc_base == 0)
+ return 0;
+
+ ctp->cdb_membase = desc_base;
+ desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t));
+ } else
+ ctp->cdb_membase = desc_base;
+
+ dp = (au1x_ddma_desc_t *)desc_base;
+
+ /* Keep track of the base descriptor. */
+ ctp->chan_desc_base = dp;
+
+ /* Initialize the rings with as much information as we know. */
+ srcid = stp->dev_id;
+ destid = dtp->dev_id;
+
+ cmd0 = cmd1 = src1 = dest1 = 0;
+ src0 = dest0 = 0;
+
+ cmd0 |= DSCR_CMD0_SID(srcid);
+ cmd0 |= DSCR_CMD0_DID(destid);
+ cmd0 |= DSCR_CMD0_IE | DSCR_CMD0_CV;
+ cmd0 |= DSCR_CMD0_ST(DSCR_CMD0_ST_NOCHANGE);
+
+ /* Is it mem to mem transfer? */
+ if (((DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_THROTTLE) ||
+ (DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_ALWAYS)) &&
+ ((DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_THROTTLE) ||
+ (DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_ALWAYS)))
+ cmd0 |= DSCR_CMD0_MEM;
+
+ switch (stp->dev_devwidth) {
+ case 8:
+ cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_BYTE);
+ break;
+ case 16:
+ cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_HALFWORD);
+ break;
+ case 32:
+ default:
+ cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_WORD);
+ break;
+ }
+
+ switch (dtp->dev_devwidth) {
+ case 8:
+ cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_BYTE);
+ break;
+ case 16:
+ cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_HALFWORD);
+ break;
+ case 32:
+ default:
+ cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_WORD);
+ break;
+ }
+
+ /*
+ * If the device is marked as an in/out FIFO, ensure it is
+ * set non-coherent.
+ */
+ if (stp->dev_flags & DEV_FLAGS_IN)
+ cmd0 |= DSCR_CMD0_SN; /* Source in FIFO */
+ if (dtp->dev_flags & DEV_FLAGS_OUT)
+ cmd0 |= DSCR_CMD0_DN; /* Destination out FIFO */
+
+ /*
+ * Set up source1. For now, assume no stride and increment.
+ * A channel attribute update can change this later.
+ */
+ switch (stp->dev_tsize) {
+ case 1:
+ src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE1);
+ break;
+ case 2:
+ src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE2);
+ break;
+ case 4:
+ src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE4);
+ break;
+ case 8:
+ default:
+ src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE8);
+ break;
+ }
+
+ /* If source input is FIFO, set static address. */
+ if (stp->dev_flags & DEV_FLAGS_IN) {
+ if (stp->dev_flags & DEV_FLAGS_BURSTABLE)
+ src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST);
+ else
+ src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC);
+ }
+
+ if (stp->dev_physaddr)
+ src0 = stp->dev_physaddr;
+
+ /*
+ * Set up dest1. For now, assume no stride and increment.
+ * A channel attribute update can change this later.
+ */
+ switch (dtp->dev_tsize) {
+ case 1:
+ dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE1);
+ break;
+ case 2:
+ dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE2);
+ break;
+ case 4:
+ dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE4);
+ break;
+ case 8:
+ default:
+ dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE8);
+ break;
+ }
+
+ /* If destination output is FIFO, set static address. */
+ if (dtp->dev_flags & DEV_FLAGS_OUT) {
+ if (dtp->dev_flags & DEV_FLAGS_BURSTABLE)
+ dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST);
+ else
+ dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC);
+ }
+
+ if (dtp->dev_physaddr)
+ dest0 = dtp->dev_physaddr;
+
+#if 0
+ printk(KERN_DEBUG "did:%x sid:%x cmd0:%x cmd1:%x source0:%x "
+ "source1:%x dest0:%x dest1:%x\n",
+ dtp->dev_id, stp->dev_id, cmd0, cmd1, src0,
+ src1, dest0, dest1);
+#endif
+ for (i = 0; i < entries; i++) {
+ dp->dscr_cmd0 = cmd0;
+ dp->dscr_cmd1 = cmd1;
+ dp->dscr_source0 = src0;
+ dp->dscr_source1 = src1;
+ dp->dscr_dest0 = dest0;
+ dp->dscr_dest1 = dest1;
+ dp->dscr_stat = 0;
+ dp->sw_context = 0;
+ dp->sw_status = 0;
+ dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(dp + 1));
+ dp++;
+ }
+
+ /* Make last descrptor point to the first. */
+ dp--;
+ dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base));
+ ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
+
+ return (u32)ctp->chan_desc_base;
+}
+EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc);
+
+/*
+ * Put a source buffer into the DMA ring.
+ * This updates the source pointer and byte count. Normally used
+ * for memory to fifo transfers.
+ */
+u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+
+ /*
+ * I guess we could check this to be within the
+ * range of the table......
+ */
+ ctp = *(chan_tab_t **)chanid;
+
+ /*
+ * We should have multiple callers for a particular channel,
+ * an interrupt doesn't affect this pointer nor the descriptor,
+ * so no locking should be needed.
+ */
+ dp = ctp->put_ptr;
+
+ /*
+ * If the descriptor is valid, we are way ahead of the DMA
+ * engine, so just return an error condition.
+ */
+ if (dp->dscr_cmd0 & DSCR_CMD0_V)
+ return 0;
+
+ /* Load up buffer address and byte count. */
+ dp->dscr_source0 = buf & ~0UL;
+ dp->dscr_cmd1 = nbytes;
+ /* Check flags */
+ if (flags & DDMA_FLAGS_IE)
+ dp->dscr_cmd0 |= DSCR_CMD0_IE;
+ if (flags & DDMA_FLAGS_NOIE)
+ dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
+
+ /*
+ * There is an errata on the Au1200/Au1550 parts that could result
+ * in "stale" data being DMA'ed. It has to do with the snoop logic on
+ * the cache eviction buffer. DMA_NONCOHERENT is on by default for
+ * these parts. If it is fixed in the future, these dma_cache_inv will
+ * just be nothing more than empty macros. See io.h.
+ */
+ dma_cache_wback_inv((unsigned long)buf, nbytes);
+ dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
+ wmb(); /* drain writebuffer */
+ dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
+ ctp->chan_ptr->ddma_dbell = 0;
+
+ /* Get next descriptor pointer. */
+ ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+
+ /* Return something non-zero. */
+ return nbytes;
+}
+EXPORT_SYMBOL(au1xxx_dbdma_put_source);
+
+/* Put a destination buffer into the DMA ring.
+ * This updates the destination pointer and byte count. Normally used
+ * to place an empty buffer into the ring for fifo to memory transfers.
+ */
+u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+
+ /* I guess we could check this to be within the
+ * range of the table......
+ */
+ ctp = *((chan_tab_t **)chanid);
+
+ /* We should have multiple callers for a particular channel,
+ * an interrupt doesn't affect this pointer nor the descriptor,
+ * so no locking should be needed.
+ */
+ dp = ctp->put_ptr;
+
+ /* If the descriptor is valid, we are way ahead of the DMA
+ * engine, so just return an error condition.
+ */
+ if (dp->dscr_cmd0 & DSCR_CMD0_V)
+ return 0;
+
+ /* Load up buffer address and byte count */
+
+ /* Check flags */
+ if (flags & DDMA_FLAGS_IE)
+ dp->dscr_cmd0 |= DSCR_CMD0_IE;
+ if (flags & DDMA_FLAGS_NOIE)
+ dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
+
+ dp->dscr_dest0 = buf & ~0UL;
+ dp->dscr_cmd1 = nbytes;
+#if 0
+ printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
+ dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0,
+ dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
+#endif
+ /*
+ * There is an errata on the Au1200/Au1550 parts that could result in
+ * "stale" data being DMA'ed. It has to do with the snoop logic on the
+ * cache eviction buffer. DMA_NONCOHERENT is on by default for these
+ * parts. If it is fixed in the future, these dma_cache_inv will just
+ * be nothing more than empty macros. See io.h.
+ */
+ dma_cache_inv((unsigned long)buf, nbytes);
+ dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
+ wmb(); /* drain writebuffer */
+ dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
+ ctp->chan_ptr->ddma_dbell = 0;
+
+ /* Get next descriptor pointer. */
+ ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+
+ /* Return something non-zero. */
+ return nbytes;
+}
+EXPORT_SYMBOL(au1xxx_dbdma_put_dest);
+
+/*
+ * Get a destination buffer into the DMA ring.
+ * Normally used to get a full buffer from the ring during fifo
+ * to memory transfers. This does not set the valid bit, you will
+ * have to put another destination buffer to keep the DMA going.
+ */
+u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+ u32 rv;
+
+ /*
+ * I guess we could check this to be within the
+ * range of the table......
+ */
+ ctp = *((chan_tab_t **)chanid);
+
+ /*
+ * We should have multiple callers for a particular channel,
+ * an interrupt doesn't affect this pointer nor the descriptor,
+ * so no locking should be needed.
+ */
+ dp = ctp->get_ptr;
+
+ /*
+ * If the descriptor is valid, we are way ahead of the DMA
+ * engine, so just return an error condition.
+ */
+ if (dp->dscr_cmd0 & DSCR_CMD0_V)
+ return 0;
+
+ /* Return buffer address and byte count. */
+ *buf = (void *)(phys_to_virt(dp->dscr_dest0));
+ *nbytes = dp->dscr_cmd1;
+ rv = dp->dscr_stat;
+
+ /* Get next descriptor pointer. */
+ ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+
+ /* Return something non-zero. */
+ return rv;
+}
+EXPORT_SYMBOL_GPL(au1xxx_dbdma_get_dest);
+
+void au1xxx_dbdma_stop(u32 chanid)
+{
+ chan_tab_t *ctp;
+ au1x_dma_chan_t *cp;
+ int halt_timeout = 0;
+
+ ctp = *((chan_tab_t **)chanid);
+
+ cp = ctp->chan_ptr;
+ cp->ddma_cfg &= ~DDMA_CFG_EN; /* Disable channel */
+ wmb(); /* drain writebuffer */
+ while (!(cp->ddma_stat & DDMA_STAT_H)) {
+ udelay(1);
+ halt_timeout++;
+ if (halt_timeout > 100) {
+ printk(KERN_WARNING "warning: DMA channel won't halt\n");
+ break;
+ }
+ }
+ /* clear current desc valid and doorbell */
+ cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V);
+ wmb(); /* drain writebuffer */
+}
+EXPORT_SYMBOL(au1xxx_dbdma_stop);
+
+/*
+ * Start using the current descriptor pointer. If the DBDMA encounters
+ * a non-valid descriptor, it will stop. In this case, we can just
+ * continue by adding a buffer to the list and starting again.
+ */
+void au1xxx_dbdma_start(u32 chanid)
+{
+ chan_tab_t *ctp;
+ au1x_dma_chan_t *cp;
+
+ ctp = *((chan_tab_t **)chanid);
+ cp = ctp->chan_ptr;
+ cp->ddma_desptr = virt_to_phys(ctp->cur_ptr);
+ cp->ddma_cfg |= DDMA_CFG_EN; /* Enable channel */
+ wmb(); /* drain writebuffer */
+ cp->ddma_dbell = 0;
+ wmb(); /* drain writebuffer */
+}
+EXPORT_SYMBOL(au1xxx_dbdma_start);
+
+void au1xxx_dbdma_reset(u32 chanid)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+
+ au1xxx_dbdma_stop(chanid);
+
+ ctp = *((chan_tab_t **)chanid);
+ ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
+
+ /* Run through the descriptors and reset the valid indicator. */
+ dp = ctp->chan_desc_base;
+
+ do {
+ dp->dscr_cmd0 &= ~DSCR_CMD0_V;
+ /*
+ * Reset our software status -- this is used to determine
+ * if a descriptor is in use by upper level software. Since
+ * posting can reset 'V' bit.
+ */
+ dp->sw_status = 0;
+ dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+ } while (dp != ctp->chan_desc_base);
+}
+EXPORT_SYMBOL(au1xxx_dbdma_reset);
+
+u32 au1xxx_get_dma_residue(u32 chanid)
+{
+ chan_tab_t *ctp;
+ au1x_dma_chan_t *cp;
+ u32 rv;
+
+ ctp = *((chan_tab_t **)chanid);
+ cp = ctp->chan_ptr;
+
+ /* This is only valid if the channel is stopped. */
+ rv = cp->ddma_bytecnt;
+ wmb(); /* drain writebuffer */
+
+ return rv;
+}
+EXPORT_SYMBOL_GPL(au1xxx_get_dma_residue);
+
+void au1xxx_dbdma_chan_free(u32 chanid)
+{
+ chan_tab_t *ctp;
+ dbdev_tab_t *stp, *dtp;
+
+ ctp = *((chan_tab_t **)chanid);
+ stp = ctp->chan_src;
+ dtp = ctp->chan_dest;
+
+ au1xxx_dbdma_stop(chanid);
+
+ kfree((void *)ctp->cdb_membase);
+
+ stp->dev_flags &= ~DEV_FLAGS_INUSE;
+ dtp->dev_flags &= ~DEV_FLAGS_INUSE;
+ chan_tab_ptr[ctp->chan_index] = NULL;
+
+ kfree(ctp);
+}
+EXPORT_SYMBOL(au1xxx_dbdma_chan_free);
+
+static irqreturn_t dbdma_interrupt(int irq, void *dev_id)
+{
+ u32 intstat;
+ u32 chan_index;
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+ au1x_dma_chan_t *cp;
+
+ intstat = dbdma_gptr->ddma_intstat;
+ wmb(); /* drain writebuffer */
+ chan_index = __ffs(intstat);
+
+ ctp = chan_tab_ptr[chan_index];
+ cp = ctp->chan_ptr;
+ dp = ctp->cur_ptr;
+
+ /* Reset interrupt. */
+ cp->ddma_irq = 0;
+ wmb(); /* drain writebuffer */
+
+ if (ctp->chan_callback)
+ ctp->chan_callback(irq, ctp->chan_callparam);
+
+ ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+ return IRQ_RETVAL(1);
+}
+
+void au1xxx_dbdma_dump(u32 chanid)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+ dbdev_tab_t *stp, *dtp;
+ au1x_dma_chan_t *cp;
+ u32 i = 0;
+
+ ctp = *((chan_tab_t **)chanid);
+ stp = ctp->chan_src;
+ dtp = ctp->chan_dest;
+ cp = ctp->chan_ptr;
+
+ printk(KERN_DEBUG "Chan %x, stp %x (dev %d) dtp %x (dev %d)\n",
+ (u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp,
+ dtp - dbdev_tab);
+ printk(KERN_DEBUG "desc base %x, get %x, put %x, cur %x\n",
+ (u32)(ctp->chan_desc_base), (u32)(ctp->get_ptr),
+ (u32)(ctp->put_ptr), (u32)(ctp->cur_ptr));
+
+ printk(KERN_DEBUG "dbdma chan %x\n", (u32)cp);
+ printk(KERN_DEBUG "cfg %08x, desptr %08x, statptr %08x\n",
+ cp->ddma_cfg, cp->ddma_desptr, cp->ddma_statptr);
+ printk(KERN_DEBUG "dbell %08x, irq %08x, stat %08x, bytecnt %08x\n",
+ cp->ddma_dbell, cp->ddma_irq, cp->ddma_stat,
+ cp->ddma_bytecnt);
+
+ /* Run through the descriptors */
+ dp = ctp->chan_desc_base;
+
+ do {
+ printk(KERN_DEBUG "Dp[%d]= %08x, cmd0 %08x, cmd1 %08x\n",
+ i++, (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1);
+ printk(KERN_DEBUG "src0 %08x, src1 %08x, dest0 %08x, dest1 %08x\n",
+ dp->dscr_source0, dp->dscr_source1,
+ dp->dscr_dest0, dp->dscr_dest1);
+ printk(KERN_DEBUG "stat %08x, nxtptr %08x\n",
+ dp->dscr_stat, dp->dscr_nxtptr);
+ dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+ } while (dp != ctp->chan_desc_base);
+}
+
+/* Put a descriptor into the DMA ring.
+ * This updates the source/destination pointers and byte count.
+ */
+u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
+{
+ chan_tab_t *ctp;
+ au1x_ddma_desc_t *dp;
+ u32 nbytes = 0;
+
+ /*
+ * I guess we could check this to be within the
+ * range of the table......
+ */
+ ctp = *((chan_tab_t **)chanid);
+
+ /*
+ * We should have multiple callers for a particular channel,
+ * an interrupt doesn't affect this pointer nor the descriptor,
+ * so no locking should be needed.
+ */
+ dp = ctp->put_ptr;
+
+ /*
+ * If the descriptor is valid, we are way ahead of the DMA
+ * engine, so just return an error condition.
+ */
+ if (dp->dscr_cmd0 & DSCR_CMD0_V)
+ return 0;
+
+ /* Load up buffer addresses and byte count. */
+ dp->dscr_dest0 = dscr->dscr_dest0;
+ dp->dscr_source0 = dscr->dscr_source0;
+ dp->dscr_dest1 = dscr->dscr_dest1;
+ dp->dscr_source1 = dscr->dscr_source1;
+ dp->dscr_cmd1 = dscr->dscr_cmd1;
+ nbytes = dscr->dscr_cmd1;
+ /* Allow the caller to specify if an interrupt is generated */
+ dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
+ dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
+ ctp->chan_ptr->ddma_dbell = 0;
+
+ /* Get next descriptor pointer. */
+ ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+
+ /* Return something non-zero. */
+ return nbytes;
+}
+
+
+static unsigned long alchemy_dbdma_pm_data[NUM_DBDMA_CHANS + 1][6];
+
+static int alchemy_dbdma_suspend(void)
+{
+ int i;
+ void __iomem *addr;
+
+ addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
+ alchemy_dbdma_pm_data[0][0] = __raw_readl(addr + 0x00);
+ alchemy_dbdma_pm_data[0][1] = __raw_readl(addr + 0x04);
+ alchemy_dbdma_pm_data[0][2] = __raw_readl(addr + 0x08);
+ alchemy_dbdma_pm_data[0][3] = __raw_readl(addr + 0x0c);
+
+ /* save channel configurations */
+ addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
+ for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
+ alchemy_dbdma_pm_data[i][0] = __raw_readl(addr + 0x00);
+ alchemy_dbdma_pm_data[i][1] = __raw_readl(addr + 0x04);
+ alchemy_dbdma_pm_data[i][2] = __raw_readl(addr + 0x08);
+ alchemy_dbdma_pm_data[i][3] = __raw_readl(addr + 0x0c);
+ alchemy_dbdma_pm_data[i][4] = __raw_readl(addr + 0x10);
+ alchemy_dbdma_pm_data[i][5] = __raw_readl(addr + 0x14);
+
+ /* halt channel */
+ __raw_writel(alchemy_dbdma_pm_data[i][0] & ~1, addr + 0x00);
+ wmb();
+ while (!(__raw_readl(addr + 0x14) & 1))
+ wmb();
+
+ addr += 0x100; /* next channel base */
+ }
+ /* disable channel interrupts */
+ addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
+ __raw_writel(0, addr + 0x0c);
+ wmb();
+
+ return 0;
+}
+
+static void alchemy_dbdma_resume(void)
+{
+ int i;
+ void __iomem *addr;
+
+ addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
+ __raw_writel(alchemy_dbdma_pm_data[0][0], addr + 0x00);
+ __raw_writel(alchemy_dbdma_pm_data[0][1], addr + 0x04);
+ __raw_writel(alchemy_dbdma_pm_data[0][2], addr + 0x08);
+ __raw_writel(alchemy_dbdma_pm_data[0][3], addr + 0x0c);
+
+ /* restore channel configurations */
+ addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
+ for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
+ __raw_writel(alchemy_dbdma_pm_data[i][0], addr + 0x00);
+ __raw_writel(alchemy_dbdma_pm_data[i][1], addr + 0x04);
+ __raw_writel(alchemy_dbdma_pm_data[i][2], addr + 0x08);
+ __raw_writel(alchemy_dbdma_pm_data[i][3], addr + 0x0c);
+ __raw_writel(alchemy_dbdma_pm_data[i][4], addr + 0x10);
+ __raw_writel(alchemy_dbdma_pm_data[i][5], addr + 0x14);
+ wmb();
+ addr += 0x100; /* next channel base */
+ }
+}
+
+static struct syscore_ops alchemy_dbdma_syscore_ops = {
+ .suspend = alchemy_dbdma_suspend,
+ .resume = alchemy_dbdma_resume,
+};
+
+static int __init dbdma_setup(unsigned int irq, dbdev_tab_t *idtable)
+{
+ int ret;
+
+ dbdev_tab = kcalloc(DBDEV_TAB_SIZE, sizeof(dbdev_tab_t), GFP_KERNEL);
+ if (!dbdev_tab)
+ return -ENOMEM;
+
+ memcpy(dbdev_tab, idtable, 32 * sizeof(dbdev_tab_t));
+ for (ret = 32; ret < DBDEV_TAB_SIZE; ret++)
+ dbdev_tab[ret].dev_id = ~0;
+
+ dbdma_gptr->ddma_config = 0;
+ dbdma_gptr->ddma_throttle = 0;
+ dbdma_gptr->ddma_inten = 0xffff;
+ wmb(); /* drain writebuffer */
+
+ ret = request_irq(irq, dbdma_interrupt, 0, "dbdma", (void *)dbdma_gptr);
+ if (ret)
+ printk(KERN_ERR "Cannot grab DBDMA interrupt!\n");
+ else {
+ dbdma_initialized = 1;
+ register_syscore_ops(&alchemy_dbdma_syscore_ops);
+ }
+
+ return ret;
+}
+
+static int __init alchemy_dbdma_init(void)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1550:
+ return dbdma_setup(AU1550_DDMA_INT, au1550_dbdev_tab);
+ case ALCHEMY_CPU_AU1200:
+ return dbdma_setup(AU1200_DDMA_INT, au1200_dbdev_tab);
+ case ALCHEMY_CPU_AU1300:
+ return dbdma_setup(AU1300_DDMA_INT, au1300_dbdev_tab);
+ }
+ return 0;
+}
+subsys_initcall(alchemy_dbdma_init);
diff --git a/arch/mips/alchemy/common/dma.c b/arch/mips/alchemy/common/dma.c
new file mode 100644
index 000000000..973049b5b
--- /dev/null
+++ b/arch/mips/alchemy/common/dma.c
@@ -0,0 +1,265 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ * A DMA channel allocator for Au1x00. API is modeled loosely off of
+ * linux/kernel/dma.c.
+ *
+ * Copyright 2000, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1000_dma.h>
+
+/*
+ * A note on resource allocation:
+ *
+ * All drivers needing DMA channels, should allocate and release them
+ * through the public routines `request_dma()' and `free_dma()'.
+ *
+ * In order to avoid problems, all processes should allocate resources in
+ * the same sequence and release them in the reverse order.
+ *
+ * So, when allocating DMAs and IRQs, first allocate the DMA, then the IRQ.
+ * When releasing them, first release the IRQ, then release the DMA. The
+ * main reason for this order is that, if you are requesting the DMA buffer
+ * done interrupt, you won't know the irq number until the DMA channel is
+ * returned from request_dma.
+ */
+
+/* DMA Channel register block spacing */
+#define DMA_CHANNEL_LEN 0x00000100
+
+DEFINE_SPINLOCK(au1000_dma_spin_lock);
+
+struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = {
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,},
+ {.dev_id = -1,}
+};
+EXPORT_SYMBOL(au1000_dma_table);
+
+/* Device FIFO addresses and default DMA modes */
+static const struct dma_dev {
+ unsigned int fifo_addr;
+ unsigned int dma_mode;
+} dma_dev_table[DMA_NUM_DEV] = {
+ { AU1000_UART0_PHYS_ADDR + 0x04, DMA_DW8 }, /* UART0_TX */
+ { AU1000_UART0_PHYS_ADDR + 0x00, DMA_DW8 | DMA_DR }, /* UART0_RX */
+ { 0, 0 }, /* DMA_REQ0 */
+ { 0, 0 }, /* DMA_REQ1 */
+ { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 }, /* AC97 TX c */
+ { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 | DMA_DR }, /* AC97 RX c */
+ { AU1000_UART3_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* UART3_TX */
+ { AU1000_UART3_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* UART3_RX */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* EP0RD */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* EP0WR */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x08, DMA_DW8 | DMA_NC }, /* EP2WR */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x0c, DMA_DW8 | DMA_NC }, /* EP3WR */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x10, DMA_DW8 | DMA_NC | DMA_DR }, /* EP4RD */
+ { AU1000_USB_UDC_PHYS_ADDR + 0x14, DMA_DW8 | DMA_NC | DMA_DR }, /* EP5RD */
+ /* on Au1500, these 2 are DMA_REQ2/3 (GPIO208/209) instead! */
+ { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC}, /* I2S TX */
+ { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC | DMA_DR}, /* I2S RX */
+};
+
+int au1000_dma_read_proc(char *buf, char **start, off_t fpos,
+ int length, int *eof, void *data)
+{
+ int i, len = 0;
+ struct dma_chan *chan;
+
+ for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++) {
+ chan = get_dma_chan(i);
+ if (chan != NULL)
+ len += sprintf(buf + len, "%2d: %s\n",
+ i, chan->dev_str);
+ }
+
+ if (fpos >= len) {
+ *start = buf;
+ *eof = 1;
+ return 0;
+ }
+ *start = buf + fpos;
+ len -= fpos;
+ if (len > length)
+ return length;
+ *eof = 1;
+ return len;
+}
+
+/* Device FIFO addresses and default DMA modes - 2nd bank */
+static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = {
+ { AU1100_SD0_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */
+ { AU1100_SD0_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR }, /* coherent */
+ { AU1100_SD1_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */
+ { AU1100_SD1_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR } /* coherent */
+};
+
+void dump_au1000_dma_channel(unsigned int dmanr)
+{
+ struct dma_chan *chan;
+
+ if (dmanr >= NUM_AU1000_DMA_CHANNELS)
+ return;
+ chan = &au1000_dma_table[dmanr];
+
+ printk(KERN_INFO "Au1000 DMA%d Register Dump:\n", dmanr);
+ printk(KERN_INFO " mode = 0x%08x\n",
+ __raw_readl(chan->io + DMA_MODE_SET));
+ printk(KERN_INFO " addr = 0x%08x\n",
+ __raw_readl(chan->io + DMA_PERIPHERAL_ADDR));
+ printk(KERN_INFO " start0 = 0x%08x\n",
+ __raw_readl(chan->io + DMA_BUFFER0_START));
+ printk(KERN_INFO " start1 = 0x%08x\n",
+ __raw_readl(chan->io + DMA_BUFFER1_START));
+ printk(KERN_INFO " count0 = 0x%08x\n",
+ __raw_readl(chan->io + DMA_BUFFER0_COUNT));
+ printk(KERN_INFO " count1 = 0x%08x\n",
+ __raw_readl(chan->io + DMA_BUFFER1_COUNT));
+}
+
+/*
+ * Finds a free channel, and binds the requested device to it.
+ * Returns the allocated channel number, or negative on error.
+ * Requests the DMA done IRQ if irqhandler != NULL.
+ */
+int request_au1000_dma(int dev_id, const char *dev_str,
+ irq_handler_t irqhandler,
+ unsigned long irqflags,
+ void *irq_dev_id)
+{
+ struct dma_chan *chan;
+ const struct dma_dev *dev;
+ int i, ret;
+
+ if (alchemy_get_cputype() == ALCHEMY_CPU_AU1100) {
+ if (dev_id < 0 || dev_id >= (DMA_NUM_DEV + DMA_NUM_DEV_BANK2))
+ return -EINVAL;
+ } else {
+ if (dev_id < 0 || dev_id >= DMA_NUM_DEV)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++)
+ if (au1000_dma_table[i].dev_id < 0)
+ break;
+
+ if (i == NUM_AU1000_DMA_CHANNELS)
+ return -ENODEV;
+
+ chan = &au1000_dma_table[i];
+
+ if (dev_id >= DMA_NUM_DEV) {
+ dev_id -= DMA_NUM_DEV;
+ dev = &dma_dev_table_bank2[dev_id];
+ } else
+ dev = &dma_dev_table[dev_id];
+
+ if (irqhandler) {
+ chan->irq_dev = irq_dev_id;
+ ret = request_irq(chan->irq, irqhandler, irqflags, dev_str,
+ chan->irq_dev);
+ if (ret) {
+ chan->irq_dev = NULL;
+ return ret;
+ }
+ } else {
+ chan->irq_dev = NULL;
+ }
+
+ /* fill it in */
+ chan->io = (void __iomem *)(KSEG1ADDR(AU1000_DMA_PHYS_ADDR) +
+ i * DMA_CHANNEL_LEN);
+ chan->dev_id = dev_id;
+ chan->dev_str = dev_str;
+ chan->fifo_addr = dev->fifo_addr;
+ chan->mode = dev->dma_mode;
+
+ /* initialize the channel before returning */
+ init_dma(i);
+
+ return i;
+}
+EXPORT_SYMBOL(request_au1000_dma);
+
+void free_au1000_dma(unsigned int dmanr)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan) {
+ printk(KERN_ERR "Error trying to free DMA%d\n", dmanr);
+ return;
+ }
+
+ disable_dma(dmanr);
+ if (chan->irq_dev)
+ free_irq(chan->irq, chan->irq_dev);
+
+ chan->irq_dev = NULL;
+ chan->dev_id = -1;
+}
+EXPORT_SYMBOL(free_au1000_dma);
+
+static int __init au1000_dma_init(void)
+{
+ int base, i;
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ base = AU1000_DMA_INT_BASE;
+ break;
+ case ALCHEMY_CPU_AU1500:
+ base = AU1500_DMA_INT_BASE;
+ break;
+ case ALCHEMY_CPU_AU1100:
+ base = AU1100_DMA_INT_BASE;
+ break;
+ default:
+ goto out;
+ }
+
+ for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++)
+ au1000_dma_table[i].irq = base + i;
+
+ printk(KERN_INFO "Alchemy DMA initialized\n");
+
+out:
+ return 0;
+}
+arch_initcall(au1000_dma_init);
diff --git a/arch/mips/alchemy/common/gpiolib.c b/arch/mips/alchemy/common/gpiolib.c
new file mode 100644
index 000000000..7d5da5edd
--- /dev/null
+++ b/arch/mips/alchemy/common/gpiolib.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2007-2009, OpenWrt.org, Florian Fainelli <florian@openwrt.org>
+ * GPIOLIB support for Alchemy chips.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Notes :
+ * This file must ONLY be built when CONFIG_GPIOLIB=y and
+ * CONFIG_ALCHEMY_GPIO_INDIRECT=n, otherwise compilation will fail!
+ * au1000 SoC have only one GPIO block : GPIO1
+ * Au1100, Au15x0, Au12x0 have a second one : GPIO2
+ * Au1300 is totally different: 1 block with up to 128 GPIOs
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/gpio.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
+#include <asm/mach-au1x00/gpio-au1300.h>
+
+static int gpio2_get(struct gpio_chip *chip, unsigned offset)
+{
+ return !!alchemy_gpio2_get_value(offset + ALCHEMY_GPIO2_BASE);
+}
+
+static void gpio2_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ alchemy_gpio2_set_value(offset + ALCHEMY_GPIO2_BASE, value);
+}
+
+static int gpio2_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ return alchemy_gpio2_direction_input(offset + ALCHEMY_GPIO2_BASE);
+}
+
+static int gpio2_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ return alchemy_gpio2_direction_output(offset + ALCHEMY_GPIO2_BASE,
+ value);
+}
+
+static int gpio2_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return alchemy_gpio2_to_irq(offset + ALCHEMY_GPIO2_BASE);
+}
+
+
+static int gpio1_get(struct gpio_chip *chip, unsigned offset)
+{
+ return !!alchemy_gpio1_get_value(offset + ALCHEMY_GPIO1_BASE);
+}
+
+static void gpio1_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ alchemy_gpio1_set_value(offset + ALCHEMY_GPIO1_BASE, value);
+}
+
+static int gpio1_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ return alchemy_gpio1_direction_input(offset + ALCHEMY_GPIO1_BASE);
+}
+
+static int gpio1_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ return alchemy_gpio1_direction_output(offset + ALCHEMY_GPIO1_BASE,
+ value);
+}
+
+static int gpio1_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return alchemy_gpio1_to_irq(offset + ALCHEMY_GPIO1_BASE);
+}
+
+struct gpio_chip alchemy_gpio_chip[] = {
+ [0] = {
+ .label = "alchemy-gpio1",
+ .direction_input = gpio1_direction_input,
+ .direction_output = gpio1_direction_output,
+ .get = gpio1_get,
+ .set = gpio1_set,
+ .to_irq = gpio1_to_irq,
+ .base = ALCHEMY_GPIO1_BASE,
+ .ngpio = ALCHEMY_GPIO1_NUM,
+ },
+ [1] = {
+ .label = "alchemy-gpio2",
+ .direction_input = gpio2_direction_input,
+ .direction_output = gpio2_direction_output,
+ .get = gpio2_get,
+ .set = gpio2_set,
+ .to_irq = gpio2_to_irq,
+ .base = ALCHEMY_GPIO2_BASE,
+ .ngpio = ALCHEMY_GPIO2_NUM,
+ },
+};
+
+static int alchemy_gpic_get(struct gpio_chip *chip, unsigned int off)
+{
+ return !!au1300_gpio_get_value(off + AU1300_GPIO_BASE);
+}
+
+static void alchemy_gpic_set(struct gpio_chip *chip, unsigned int off, int v)
+{
+ au1300_gpio_set_value(off + AU1300_GPIO_BASE, v);
+}
+
+static int alchemy_gpic_dir_input(struct gpio_chip *chip, unsigned int off)
+{
+ return au1300_gpio_direction_input(off + AU1300_GPIO_BASE);
+}
+
+static int alchemy_gpic_dir_output(struct gpio_chip *chip, unsigned int off,
+ int v)
+{
+ return au1300_gpio_direction_output(off + AU1300_GPIO_BASE, v);
+}
+
+static int alchemy_gpic_gpio_to_irq(struct gpio_chip *chip, unsigned int off)
+{
+ return au1300_gpio_to_irq(off + AU1300_GPIO_BASE);
+}
+
+static struct gpio_chip au1300_gpiochip = {
+ .label = "alchemy-gpic",
+ .direction_input = alchemy_gpic_dir_input,
+ .direction_output = alchemy_gpic_dir_output,
+ .get = alchemy_gpic_get,
+ .set = alchemy_gpic_set,
+ .to_irq = alchemy_gpic_gpio_to_irq,
+ .base = AU1300_GPIO_BASE,
+ .ngpio = AU1300_GPIO_NUM,
+};
+
+static int __init alchemy_gpiochip_init(void)
+{
+ int ret = 0;
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ ret = gpiochip_add_data(&alchemy_gpio_chip[0], NULL);
+ break;
+ case ALCHEMY_CPU_AU1500...ALCHEMY_CPU_AU1200:
+ ret = gpiochip_add_data(&alchemy_gpio_chip[0], NULL);
+ ret |= gpiochip_add_data(&alchemy_gpio_chip[1], NULL);
+ break;
+ case ALCHEMY_CPU_AU1300:
+ ret = gpiochip_add_data(&au1300_gpiochip, NULL);
+ break;
+ }
+ return ret;
+}
+arch_initcall(alchemy_gpiochip_init);
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
new file mode 100644
index 000000000..da9f92200
--- /dev/null
+++ b/arch/mips/alchemy/common/irq.c
@@ -0,0 +1,996 @@
+/*
+ * Copyright 2001, 2007-2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1300.h>
+
+/* Interrupt Controller register offsets */
+#define IC_CFG0RD 0x40
+#define IC_CFG0SET 0x40
+#define IC_CFG0CLR 0x44
+#define IC_CFG1RD 0x48
+#define IC_CFG1SET 0x48
+#define IC_CFG1CLR 0x4C
+#define IC_CFG2RD 0x50
+#define IC_CFG2SET 0x50
+#define IC_CFG2CLR 0x54
+#define IC_REQ0INT 0x54
+#define IC_SRCRD 0x58
+#define IC_SRCSET 0x58
+#define IC_SRCCLR 0x5C
+#define IC_REQ1INT 0x5C
+#define IC_ASSIGNRD 0x60
+#define IC_ASSIGNSET 0x60
+#define IC_ASSIGNCLR 0x64
+#define IC_WAKERD 0x68
+#define IC_WAKESET 0x68
+#define IC_WAKECLR 0x6C
+#define IC_MASKRD 0x70
+#define IC_MASKSET 0x70
+#define IC_MASKCLR 0x74
+#define IC_RISINGRD 0x78
+#define IC_RISINGCLR 0x78
+#define IC_FALLINGRD 0x7C
+#define IC_FALLINGCLR 0x7C
+#define IC_TESTBIT 0x80
+
+/* per-processor fixed function irqs */
+struct alchemy_irqmap {
+ int irq; /* linux IRQ number */
+ int type; /* IRQ_TYPE_ */
+ int prio; /* irq priority, 0 highest, 3 lowest */
+ int internal; /* GPIC: internal source (no ext. pin)? */
+};
+
+static int au1x_ic_settype(struct irq_data *d, unsigned int type);
+static int au1300_gpic_settype(struct irq_data *d, unsigned int type);
+
+
+/* NOTE on interrupt priorities: The original writers of this code said:
+ *
+ * Because of the tight timing of SETUP token to reply transactions,
+ * the USB devices-side packet complete interrupt (USB_DEV_REQ_INT)
+ * needs the highest priority.
+ */
+struct alchemy_irqmap au1000_irqmap[] __initdata = {
+ { AU1000_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_UART2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
+ { AU1000_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1000_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { -1, },
+};
+
+struct alchemy_irqmap au1500_irqmap[] __initdata = {
+ { AU1500_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
+ { AU1500_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1500_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1500_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { -1, },
+};
+
+struct alchemy_irqmap au1100_irqmap[] __initdata = {
+ { AU1100_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_SD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
+ { AU1100_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1100_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1100_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1100_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { -1, },
+};
+
+struct alchemy_irqmap au1550_irqmap[] __initdata = {
+ { AU1550_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_CRYPTO_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_PCI_RST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
+ { AU1550_NAND_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1550_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1550_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { -1, },
+};
+
+struct alchemy_irqmap au1200_irqmap[] __initdata = {
+ { AU1200_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_SWT_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_SD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_MAE_BE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_MAE_FE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_AES_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_CAMERA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
+ { AU1200_NAND_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
+ { AU1200_USB_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_MAE_BOTH_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { -1, },
+};
+
+static struct alchemy_irqmap au1300_irqmap[] __initdata = {
+ /* multifunction: gpio pin or device */
+ { AU1300_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_UART2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_SD1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_SD2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_PSC2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_PSC3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ { AU1300_NAND_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
+ /* au1300 internal */
+ { AU1300_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_MMU_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_MPU_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_GPU_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_UDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 1, },
+ { AU1300_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_SD0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_USB_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_BSA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_MPE_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
+ { AU1300_ITE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_AES_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { AU1300_CIM_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
+ { -1, }, /* terminator */
+};
+
+/******************************************************************************/
+
+static void au1x_ic0_unmask(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_MASKSET);
+ __raw_writel(1 << bit, base + IC_WAKESET);
+ wmb();
+}
+
+static void au1x_ic1_unmask(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_MASKSET);
+ __raw_writel(1 << bit, base + IC_WAKESET);
+ wmb();
+}
+
+static void au1x_ic0_mask(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_MASKCLR);
+ __raw_writel(1 << bit, base + IC_WAKECLR);
+ wmb();
+}
+
+static void au1x_ic1_mask(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_MASKCLR);
+ __raw_writel(1 << bit, base + IC_WAKECLR);
+ wmb();
+}
+
+static void au1x_ic0_ack(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+
+ /*
+ * This may assume that we don't get interrupts from
+ * both edges at once, or if we do, that we don't care.
+ */
+ __raw_writel(1 << bit, base + IC_FALLINGCLR);
+ __raw_writel(1 << bit, base + IC_RISINGCLR);
+ wmb();
+}
+
+static void au1x_ic1_ack(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+
+ /*
+ * This may assume that we don't get interrupts from
+ * both edges at once, or if we do, that we don't care.
+ */
+ __raw_writel(1 << bit, base + IC_FALLINGCLR);
+ __raw_writel(1 << bit, base + IC_RISINGCLR);
+ wmb();
+}
+
+static void au1x_ic0_maskack(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_WAKECLR);
+ __raw_writel(1 << bit, base + IC_MASKCLR);
+ __raw_writel(1 << bit, base + IC_RISINGCLR);
+ __raw_writel(1 << bit, base + IC_FALLINGCLR);
+ wmb();
+}
+
+static void au1x_ic1_maskack(struct irq_data *d)
+{
+ unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+
+ __raw_writel(1 << bit, base + IC_WAKECLR);
+ __raw_writel(1 << bit, base + IC_MASKCLR);
+ __raw_writel(1 << bit, base + IC_RISINGCLR);
+ __raw_writel(1 << bit, base + IC_FALLINGCLR);
+ wmb();
+}
+
+static int au1x_ic1_setwake(struct irq_data *d, unsigned int on)
+{
+ int bit = d->irq - AU1000_INTC1_INT_BASE;
+ unsigned long wakemsk, flags;
+
+ /* only GPIO 0-7 can act as wakeup source. Fortunately these
+ * are wired up identically on all supported variants.
+ */
+ if ((bit < 0) || (bit > 7))
+ return -EINVAL;
+
+ local_irq_save(flags);
+ wakemsk = alchemy_rdsys(AU1000_SYS_WAKEMSK);
+ if (on)
+ wakemsk |= 1 << bit;
+ else
+ wakemsk &= ~(1 << bit);
+ alchemy_wrsys(wakemsk, AU1000_SYS_WAKEMSK);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+/*
+ * irq_chips for both ICs; this way the mask handlers can be
+ * as short as possible.
+ */
+static struct irq_chip au1x_ic0_chip = {
+ .name = "Alchemy-IC0",
+ .irq_ack = au1x_ic0_ack,
+ .irq_mask = au1x_ic0_mask,
+ .irq_mask_ack = au1x_ic0_maskack,
+ .irq_unmask = au1x_ic0_unmask,
+ .irq_set_type = au1x_ic_settype,
+};
+
+static struct irq_chip au1x_ic1_chip = {
+ .name = "Alchemy-IC1",
+ .irq_ack = au1x_ic1_ack,
+ .irq_mask = au1x_ic1_mask,
+ .irq_mask_ack = au1x_ic1_maskack,
+ .irq_unmask = au1x_ic1_unmask,
+ .irq_set_type = au1x_ic_settype,
+ .irq_set_wake = au1x_ic1_setwake,
+};
+
+static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type)
+{
+ struct irq_chip *chip;
+ unsigned int bit, irq = d->irq;
+ irq_flow_handler_t handler = NULL;
+ unsigned char *name = NULL;
+ void __iomem *base;
+ int ret;
+
+ if (irq >= AU1000_INTC1_INT_BASE) {
+ bit = irq - AU1000_INTC1_INT_BASE;
+ chip = &au1x_ic1_chip;
+ base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+ } else {
+ bit = irq - AU1000_INTC0_INT_BASE;
+ chip = &au1x_ic0_chip;
+ base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+ }
+
+ if (bit > 31)
+ return -EINVAL;
+
+ ret = 0;
+
+ switch (flow_type) { /* cfgregs 2:1:0 */
+ case IRQ_TYPE_EDGE_RISING: /* 0:0:1 */
+ __raw_writel(1 << bit, base + IC_CFG2CLR);
+ __raw_writel(1 << bit, base + IC_CFG1CLR);
+ __raw_writel(1 << bit, base + IC_CFG0SET);
+ handler = handle_edge_irq;
+ name = "riseedge";
+ break;
+ case IRQ_TYPE_EDGE_FALLING: /* 0:1:0 */
+ __raw_writel(1 << bit, base + IC_CFG2CLR);
+ __raw_writel(1 << bit, base + IC_CFG1SET);
+ __raw_writel(1 << bit, base + IC_CFG0CLR);
+ handler = handle_edge_irq;
+ name = "falledge";
+ break;
+ case IRQ_TYPE_EDGE_BOTH: /* 0:1:1 */
+ __raw_writel(1 << bit, base + IC_CFG2CLR);
+ __raw_writel(1 << bit, base + IC_CFG1SET);
+ __raw_writel(1 << bit, base + IC_CFG0SET);
+ handler = handle_edge_irq;
+ name = "bothedge";
+ break;
+ case IRQ_TYPE_LEVEL_HIGH: /* 1:0:1 */
+ __raw_writel(1 << bit, base + IC_CFG2SET);
+ __raw_writel(1 << bit, base + IC_CFG1CLR);
+ __raw_writel(1 << bit, base + IC_CFG0SET);
+ handler = handle_level_irq;
+ name = "hilevel";
+ break;
+ case IRQ_TYPE_LEVEL_LOW: /* 1:1:0 */
+ __raw_writel(1 << bit, base + IC_CFG2SET);
+ __raw_writel(1 << bit, base + IC_CFG1SET);
+ __raw_writel(1 << bit, base + IC_CFG0CLR);
+ handler = handle_level_irq;
+ name = "lowlevel";
+ break;
+ case IRQ_TYPE_NONE: /* 0:0:0 */
+ __raw_writel(1 << bit, base + IC_CFG2CLR);
+ __raw_writel(1 << bit, base + IC_CFG1CLR);
+ __raw_writel(1 << bit, base + IC_CFG0CLR);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ irq_set_chip_handler_name_locked(d, chip, handler, name);
+
+ wmb();
+
+ return ret;
+}
+
+/******************************************************************************/
+
+/*
+ * au1300_gpic_chgcfg - change PIN configuration.
+ * @gpio: pin to change (0-based GPIO number from datasheet).
+ * @clr: clear all bits set in 'clr'.
+ * @set: set these bits.
+ *
+ * modifies a pins' configuration register, bits set in @clr will
+ * be cleared in the register, bits in @set will be set.
+ */
+static inline void au1300_gpic_chgcfg(unsigned int gpio,
+ unsigned long clr,
+ unsigned long set)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long l;
+
+ r += gpio * 4; /* offset into pin config array */
+ l = __raw_readl(r + AU1300_GPIC_PINCFG);
+ l &= ~clr;
+ l |= set;
+ __raw_writel(l, r + AU1300_GPIC_PINCFG);
+ wmb();
+}
+
+/*
+ * au1300_pinfunc_to_gpio - assign a pin as GPIO input (GPIO ctrl).
+ * @pin: pin (0-based GPIO number from datasheet).
+ *
+ * Assigns a GPIO pin to the GPIO controller, so its level can either
+ * be read or set through the generic GPIO functions.
+ * If you need a GPOUT, use au1300_gpio_set_value(pin, 0/1).
+ * REVISIT: is this function really necessary?
+ */
+void au1300_pinfunc_to_gpio(enum au1300_multifunc_pins gpio)
+{
+ au1300_gpio_direction_input(gpio + AU1300_GPIO_BASE);
+}
+EXPORT_SYMBOL_GPL(au1300_pinfunc_to_gpio);
+
+/*
+ * au1300_pinfunc_to_dev - assign a pin to the device function.
+ * @pin: pin (0-based GPIO number from datasheet).
+ *
+ * Assigns a GPIO pin to its associated device function; the pin will be
+ * driven by the device and not through GPIO functions.
+ */
+void au1300_pinfunc_to_dev(enum au1300_multifunc_pins gpio)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long bit;
+
+ r += GPIC_GPIO_BANKOFF(gpio);
+ bit = GPIC_GPIO_TO_BIT(gpio);
+ __raw_writel(bit, r + AU1300_GPIC_DEVSEL);
+ wmb();
+}
+EXPORT_SYMBOL_GPL(au1300_pinfunc_to_dev);
+
+/*
+ * au1300_set_irq_priority - set internal priority of IRQ.
+ * @irq: irq to set priority (linux irq number).
+ * @p: priority (0 = highest, 3 = lowest).
+ */
+void au1300_set_irq_priority(unsigned int irq, int p)
+{
+ irq -= ALCHEMY_GPIC_INT_BASE;
+ au1300_gpic_chgcfg(irq, GPIC_CFG_IL_MASK, GPIC_CFG_IL_SET(p));
+}
+EXPORT_SYMBOL_GPL(au1300_set_irq_priority);
+
+/*
+ * au1300_set_dbdma_gpio - assign a gpio to one of the DBDMA triggers.
+ * @dchan: dbdma trigger select (0, 1).
+ * @gpio: pin to assign as trigger.
+ *
+ * DBDMA controller has 2 external trigger sources; this function
+ * assigns a GPIO to the selected trigger.
+ */
+void au1300_set_dbdma_gpio(int dchan, unsigned int gpio)
+{
+ unsigned long r;
+
+ if ((dchan >= 0) && (dchan <= 1)) {
+ r = __raw_readl(AU1300_GPIC_ADDR + AU1300_GPIC_DMASEL);
+ r &= ~(0xff << (8 * dchan));
+ r |= (gpio & 0x7f) << (8 * dchan);
+ __raw_writel(r, AU1300_GPIC_ADDR + AU1300_GPIC_DMASEL);
+ wmb();
+ }
+}
+
+static inline void gpic_pin_set_idlewake(unsigned int gpio, int allow)
+{
+ au1300_gpic_chgcfg(gpio, GPIC_CFG_IDLEWAKE,
+ allow ? GPIC_CFG_IDLEWAKE : 0);
+}
+
+static void au1300_gpic_mask(struct irq_data *d)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long bit, irq = d->irq;
+
+ irq -= ALCHEMY_GPIC_INT_BASE;
+ r += GPIC_GPIO_BANKOFF(irq);
+ bit = GPIC_GPIO_TO_BIT(irq);
+ __raw_writel(bit, r + AU1300_GPIC_IDIS);
+ wmb();
+
+ gpic_pin_set_idlewake(irq, 0);
+}
+
+static void au1300_gpic_unmask(struct irq_data *d)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long bit, irq = d->irq;
+
+ irq -= ALCHEMY_GPIC_INT_BASE;
+
+ gpic_pin_set_idlewake(irq, 1);
+
+ r += GPIC_GPIO_BANKOFF(irq);
+ bit = GPIC_GPIO_TO_BIT(irq);
+ __raw_writel(bit, r + AU1300_GPIC_IEN);
+ wmb();
+}
+
+static void au1300_gpic_maskack(struct irq_data *d)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long bit, irq = d->irq;
+
+ irq -= ALCHEMY_GPIC_INT_BASE;
+ r += GPIC_GPIO_BANKOFF(irq);
+ bit = GPIC_GPIO_TO_BIT(irq);
+ __raw_writel(bit, r + AU1300_GPIC_IPEND); /* ack */
+ __raw_writel(bit, r + AU1300_GPIC_IDIS); /* mask */
+ wmb();
+
+ gpic_pin_set_idlewake(irq, 0);
+}
+
+static void au1300_gpic_ack(struct irq_data *d)
+{
+ void __iomem *r = AU1300_GPIC_ADDR;
+ unsigned long bit, irq = d->irq;
+
+ irq -= ALCHEMY_GPIC_INT_BASE;
+ r += GPIC_GPIO_BANKOFF(irq);
+ bit = GPIC_GPIO_TO_BIT(irq);
+ __raw_writel(bit, r + AU1300_GPIC_IPEND); /* ack */
+ wmb();
+}
+
+static struct irq_chip au1300_gpic = {
+ .name = "GPIOINT",
+ .irq_ack = au1300_gpic_ack,
+ .irq_mask = au1300_gpic_mask,
+ .irq_mask_ack = au1300_gpic_maskack,
+ .irq_unmask = au1300_gpic_unmask,
+ .irq_set_type = au1300_gpic_settype,
+};
+
+static int au1300_gpic_settype(struct irq_data *d, unsigned int type)
+{
+ unsigned long s;
+ unsigned char *name = NULL;
+ irq_flow_handler_t hdl = NULL;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ s = GPIC_CFG_IC_LEVEL_HIGH;
+ name = "high";
+ hdl = handle_level_irq;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ s = GPIC_CFG_IC_LEVEL_LOW;
+ name = "low";
+ hdl = handle_level_irq;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ s = GPIC_CFG_IC_EDGE_RISE;
+ name = "posedge";
+ hdl = handle_edge_irq;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ s = GPIC_CFG_IC_EDGE_FALL;
+ name = "negedge";
+ hdl = handle_edge_irq;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ s = GPIC_CFG_IC_EDGE_BOTH;
+ name = "bothedge";
+ hdl = handle_edge_irq;
+ break;
+ case IRQ_TYPE_NONE:
+ s = GPIC_CFG_IC_OFF;
+ name = "disabled";
+ hdl = handle_level_irq;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ irq_set_chip_handler_name_locked(d, &au1300_gpic, hdl, name);
+
+ au1300_gpic_chgcfg(d->irq - ALCHEMY_GPIC_INT_BASE, GPIC_CFG_IC_MASK, s);
+
+ return 0;
+}
+
+/******************************************************************************/
+
+static inline void ic_init(void __iomem *base)
+{
+ /* initialize interrupt controller to a safe state */
+ __raw_writel(0xffffffff, base + IC_CFG0CLR);
+ __raw_writel(0xffffffff, base + IC_CFG1CLR);
+ __raw_writel(0xffffffff, base + IC_CFG2CLR);
+ __raw_writel(0xffffffff, base + IC_MASKCLR);
+ __raw_writel(0xffffffff, base + IC_ASSIGNCLR);
+ __raw_writel(0xffffffff, base + IC_WAKECLR);
+ __raw_writel(0xffffffff, base + IC_SRCSET);
+ __raw_writel(0xffffffff, base + IC_FALLINGCLR);
+ __raw_writel(0xffffffff, base + IC_RISINGCLR);
+ __raw_writel(0x00000000, base + IC_TESTBIT);
+ wmb();
+}
+
+static unsigned long alchemy_gpic_pmdata[ALCHEMY_GPIC_INT_NUM + 6];
+
+static inline void alchemy_ic_suspend_one(void __iomem *base, unsigned long *d)
+{
+ d[0] = __raw_readl(base + IC_CFG0RD);
+ d[1] = __raw_readl(base + IC_CFG1RD);
+ d[2] = __raw_readl(base + IC_CFG2RD);
+ d[3] = __raw_readl(base + IC_SRCRD);
+ d[4] = __raw_readl(base + IC_ASSIGNRD);
+ d[5] = __raw_readl(base + IC_WAKERD);
+ d[6] = __raw_readl(base + IC_MASKRD);
+ ic_init(base); /* shut it up too while at it */
+}
+
+static inline void alchemy_ic_resume_one(void __iomem *base, unsigned long *d)
+{
+ ic_init(base);
+
+ __raw_writel(d[0], base + IC_CFG0SET);
+ __raw_writel(d[1], base + IC_CFG1SET);
+ __raw_writel(d[2], base + IC_CFG2SET);
+ __raw_writel(d[3], base + IC_SRCSET);
+ __raw_writel(d[4], base + IC_ASSIGNSET);
+ __raw_writel(d[5], base + IC_WAKESET);
+ wmb();
+
+ __raw_writel(d[6], base + IC_MASKSET);
+ wmb();
+}
+
+static int alchemy_ic_suspend(void)
+{
+ alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR),
+ alchemy_gpic_pmdata);
+ alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR),
+ &alchemy_gpic_pmdata[7]);
+ return 0;
+}
+
+static void alchemy_ic_resume(void)
+{
+ alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR),
+ &alchemy_gpic_pmdata[7]);
+ alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR),
+ alchemy_gpic_pmdata);
+}
+
+static int alchemy_gpic_suspend(void)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1300_GPIC_PHYS_ADDR);
+ int i;
+
+ /* save 4 interrupt mask status registers */
+ alchemy_gpic_pmdata[0] = __raw_readl(base + AU1300_GPIC_IEN + 0x0);
+ alchemy_gpic_pmdata[1] = __raw_readl(base + AU1300_GPIC_IEN + 0x4);
+ alchemy_gpic_pmdata[2] = __raw_readl(base + AU1300_GPIC_IEN + 0x8);
+ alchemy_gpic_pmdata[3] = __raw_readl(base + AU1300_GPIC_IEN + 0xc);
+
+ /* save misc register(s) */
+ alchemy_gpic_pmdata[4] = __raw_readl(base + AU1300_GPIC_DMASEL);
+
+ /* molto silenzioso */
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x0);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x4);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x8);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0xc);
+ wmb();
+
+ /* save pin/int-type configuration */
+ base += AU1300_GPIC_PINCFG;
+ for (i = 0; i < ALCHEMY_GPIC_INT_NUM; i++)
+ alchemy_gpic_pmdata[i + 5] = __raw_readl(base + (i << 2));
+
+ wmb();
+
+ return 0;
+}
+
+static void alchemy_gpic_resume(void)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1300_GPIC_PHYS_ADDR);
+ int i;
+
+ /* disable all first */
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x0);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x4);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x8);
+ __raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0xc);
+ wmb();
+
+ /* restore pin/int-type configurations */
+ base += AU1300_GPIC_PINCFG;
+ for (i = 0; i < ALCHEMY_GPIC_INT_NUM; i++)
+ __raw_writel(alchemy_gpic_pmdata[i + 5], base + (i << 2));
+ wmb();
+
+ /* restore misc register(s) */
+ base = (void __iomem *)KSEG1ADDR(AU1300_GPIC_PHYS_ADDR);
+ __raw_writel(alchemy_gpic_pmdata[4], base + AU1300_GPIC_DMASEL);
+ wmb();
+
+ /* finally restore masks */
+ __raw_writel(alchemy_gpic_pmdata[0], base + AU1300_GPIC_IEN + 0x0);
+ __raw_writel(alchemy_gpic_pmdata[1], base + AU1300_GPIC_IEN + 0x4);
+ __raw_writel(alchemy_gpic_pmdata[2], base + AU1300_GPIC_IEN + 0x8);
+ __raw_writel(alchemy_gpic_pmdata[3], base + AU1300_GPIC_IEN + 0xc);
+ wmb();
+}
+
+static struct syscore_ops alchemy_ic_pmops = {
+ .suspend = alchemy_ic_suspend,
+ .resume = alchemy_ic_resume,
+};
+
+static struct syscore_ops alchemy_gpic_pmops = {
+ .suspend = alchemy_gpic_suspend,
+ .resume = alchemy_gpic_resume,
+};
+
+/******************************************************************************/
+
+/* create chained handlers for the 4 IC requests to the MIPS IRQ ctrl */
+#define DISP(name, base, addr) \
+static void au1000_##name##_dispatch(struct irq_desc *d) \
+{ \
+ unsigned long r = __raw_readl((void __iomem *)KSEG1ADDR(addr)); \
+ if (likely(r)) \
+ generic_handle_irq(base + __ffs(r)); \
+ else \
+ spurious_interrupt(); \
+}
+
+DISP(ic0r0, AU1000_INTC0_INT_BASE, AU1000_IC0_PHYS_ADDR + IC_REQ0INT)
+DISP(ic0r1, AU1000_INTC0_INT_BASE, AU1000_IC0_PHYS_ADDR + IC_REQ1INT)
+DISP(ic1r0, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ0INT)
+DISP(ic1r1, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ1INT)
+
+static void alchemy_gpic_dispatch(struct irq_desc *d)
+{
+ int i = __raw_readl(AU1300_GPIC_ADDR + AU1300_GPIC_PRIENC);
+ generic_handle_irq(ALCHEMY_GPIC_INT_BASE + i);
+}
+
+/******************************************************************************/
+
+static void __init au1000_init_irq(struct alchemy_irqmap *map)
+{
+ unsigned int bit, irq_nr;
+ void __iomem *base;
+
+ ic_init((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR));
+ ic_init((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR));
+ register_syscore_ops(&alchemy_ic_pmops);
+ mips_cpu_irq_init();
+
+ /* register all 64 possible IC0+IC1 irq sources as type "none".
+ * Use set_irq_type() to set edge/level behaviour at runtime.
+ */
+ for (irq_nr = AU1000_INTC0_INT_BASE;
+ (irq_nr < AU1000_INTC0_INT_BASE + 32); irq_nr++)
+ au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE);
+
+ for (irq_nr = AU1000_INTC1_INT_BASE;
+ (irq_nr < AU1000_INTC1_INT_BASE + 32); irq_nr++)
+ au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE);
+
+ /*
+ * Initialize IC0, which is fixed per processor.
+ */
+ while (map->irq != -1) {
+ irq_nr = map->irq;
+
+ if (irq_nr >= AU1000_INTC1_INT_BASE) {
+ bit = irq_nr - AU1000_INTC1_INT_BASE;
+ base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
+ } else {
+ bit = irq_nr - AU1000_INTC0_INT_BASE;
+ base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
+ }
+ if (map->prio == 0)
+ __raw_writel(1 << bit, base + IC_ASSIGNSET);
+
+ au1x_ic_settype(irq_get_irq_data(irq_nr), map->type);
+ ++map;
+ }
+
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 2, au1000_ic0r0_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 3, au1000_ic0r1_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 4, au1000_ic1r0_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 5, au1000_ic1r1_dispatch);
+}
+
+static void __init alchemy_gpic_init_irq(const struct alchemy_irqmap *dints)
+{
+ int i;
+ void __iomem *bank_base;
+
+ register_syscore_ops(&alchemy_gpic_pmops);
+ mips_cpu_irq_init();
+
+ /* disable & ack all possible interrupt sources */
+ for (i = 0; i < 4; i++) {
+ bank_base = AU1300_GPIC_ADDR + (i * 4);
+ __raw_writel(~0UL, bank_base + AU1300_GPIC_IDIS);
+ wmb();
+ __raw_writel(~0UL, bank_base + AU1300_GPIC_IPEND);
+ wmb();
+ }
+
+ /* register an irq_chip for them, with 2nd highest priority */
+ for (i = ALCHEMY_GPIC_INT_BASE; i <= ALCHEMY_GPIC_INT_LAST; i++) {
+ au1300_set_irq_priority(i, 1);
+ au1300_gpic_settype(irq_get_irq_data(i), IRQ_TYPE_NONE);
+ }
+
+ /* setup known on-chip sources */
+ while ((i = dints->irq) != -1) {
+ au1300_gpic_settype(irq_get_irq_data(i), dints->type);
+ au1300_set_irq_priority(i, dints->prio);
+
+ if (dints->internal)
+ au1300_pinfunc_to_dev(i - ALCHEMY_GPIC_INT_BASE);
+
+ dints++;
+ }
+
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 2, alchemy_gpic_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 3, alchemy_gpic_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 4, alchemy_gpic_dispatch);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 5, alchemy_gpic_dispatch);
+}
+
+/******************************************************************************/
+
+void __init arch_init_irq(void)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ au1000_init_irq(au1000_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1500:
+ au1000_init_irq(au1500_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1100:
+ au1000_init_irq(au1100_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1550:
+ au1000_init_irq(au1550_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1200:
+ au1000_init_irq(au1200_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1300:
+ alchemy_gpic_init_irq(au1300_irqmap);
+ break;
+ default:
+ pr_err("unknown Alchemy IRQ core\n");
+ break;
+ }
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned long r = (read_c0_status() & read_c0_cause()) >> 8;
+ do_IRQ(MIPS_CPU_IRQ_BASE + __ffs(r & 0xff));
+}
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c
new file mode 100644
index 000000000..b8f3397c5
--- /dev/null
+++ b/arch/mips/alchemy/common/platform.c
@@ -0,0 +1,458 @@
+/*
+ * Platform device support for Au1x00 SoCs.
+ *
+ * Copyright 2004, Matt Porter <mporter@kernel.crashing.org>
+ *
+ * (C) Copyright Embedded Alley Solutions, Inc 2005
+ * Author: Pantelis Antoniou <pantelis@embeddedalley.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+#include <linux/slab.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/ohci_pdriver.h>
+
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1xxx_dbdma.h>
+#include <asm/mach-au1x00/au1100_mmc.h>
+#include <asm/mach-au1x00/au1xxx_eth.h>
+
+#include <prom.h>
+
+static void alchemy_8250_pm(struct uart_port *port, unsigned int state,
+ unsigned int old_state)
+{
+#ifdef CONFIG_SERIAL_8250
+ switch (state) {
+ case 0:
+ alchemy_uart_enable(CPHYSADDR(port->membase));
+ serial8250_do_pm(port, state, old_state);
+ break;
+ case 3: /* power off */
+ serial8250_do_pm(port, state, old_state);
+ alchemy_uart_disable(CPHYSADDR(port->membase));
+ break;
+ default:
+ serial8250_do_pm(port, state, old_state);
+ break;
+ }
+#endif
+}
+
+#define PORT(_base, _irq) \
+ { \
+ .mapbase = _base, \
+ .irq = _irq, \
+ .regshift = 2, \
+ .iotype = UPIO_AU, \
+ .flags = UPF_SKIP_TEST | UPF_IOREMAP | \
+ UPF_FIXED_TYPE, \
+ .type = PORT_16550A, \
+ .pm = alchemy_8250_pm, \
+ }
+
+static struct plat_serial8250_port au1x00_uart_data[][4] __initdata = {
+ [ALCHEMY_CPU_AU1000] = {
+ PORT(AU1000_UART0_PHYS_ADDR, AU1000_UART0_INT),
+ PORT(AU1000_UART1_PHYS_ADDR, AU1000_UART1_INT),
+ PORT(AU1000_UART2_PHYS_ADDR, AU1000_UART2_INT),
+ PORT(AU1000_UART3_PHYS_ADDR, AU1000_UART3_INT),
+ },
+ [ALCHEMY_CPU_AU1500] = {
+ PORT(AU1000_UART0_PHYS_ADDR, AU1500_UART0_INT),
+ PORT(AU1000_UART3_PHYS_ADDR, AU1500_UART3_INT),
+ },
+ [ALCHEMY_CPU_AU1100] = {
+ PORT(AU1000_UART0_PHYS_ADDR, AU1100_UART0_INT),
+ PORT(AU1000_UART1_PHYS_ADDR, AU1100_UART1_INT),
+ PORT(AU1000_UART3_PHYS_ADDR, AU1100_UART3_INT),
+ },
+ [ALCHEMY_CPU_AU1550] = {
+ PORT(AU1000_UART0_PHYS_ADDR, AU1550_UART0_INT),
+ PORT(AU1000_UART1_PHYS_ADDR, AU1550_UART1_INT),
+ PORT(AU1000_UART3_PHYS_ADDR, AU1550_UART3_INT),
+ },
+ [ALCHEMY_CPU_AU1200] = {
+ PORT(AU1000_UART0_PHYS_ADDR, AU1200_UART0_INT),
+ PORT(AU1000_UART1_PHYS_ADDR, AU1200_UART1_INT),
+ },
+ [ALCHEMY_CPU_AU1300] = {
+ PORT(AU1300_UART0_PHYS_ADDR, AU1300_UART0_INT),
+ PORT(AU1300_UART1_PHYS_ADDR, AU1300_UART1_INT),
+ PORT(AU1300_UART2_PHYS_ADDR, AU1300_UART2_INT),
+ PORT(AU1300_UART3_PHYS_ADDR, AU1300_UART3_INT),
+ },
+};
+
+static struct platform_device au1xx0_uart_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_AU1X00,
+};
+
+static void __init alchemy_setup_uarts(int ctype)
+{
+ long uartclk;
+ int s = sizeof(struct plat_serial8250_port);
+ int c = alchemy_get_uarts(ctype);
+ struct plat_serial8250_port *ports;
+ struct clk *clk = clk_get(NULL, ALCHEMY_PERIPH_CLK);
+
+ if (IS_ERR(clk))
+ return;
+ if (clk_prepare_enable(clk)) {
+ clk_put(clk);
+ return;
+ }
+ uartclk = clk_get_rate(clk);
+ clk_put(clk);
+
+ ports = kcalloc(s, (c + 1), GFP_KERNEL);
+ if (!ports) {
+ printk(KERN_INFO "Alchemy: no memory for UART data\n");
+ return;
+ }
+ memcpy(ports, au1x00_uart_data[ctype], s * c);
+ au1xx0_uart_device.dev.platform_data = ports;
+
+ /* Fill up uartclk. */
+ for (s = 0; s < c; s++)
+ ports[s].uartclk = uartclk;
+ if (platform_device_register(&au1xx0_uart_device))
+ printk(KERN_INFO "Alchemy: failed to register UARTs\n");
+}
+
+
+static u64 alchemy_all_dmamask = DMA_BIT_MASK(32);
+
+/* Power on callback for the ehci platform driver */
+static int alchemy_ehci_power_on(struct platform_device *pdev)
+{
+ return alchemy_usb_control(ALCHEMY_USB_EHCI0, 1);
+}
+
+/* Power off/suspend callback for the ehci platform driver */
+static void alchemy_ehci_power_off(struct platform_device *pdev)
+{
+ alchemy_usb_control(ALCHEMY_USB_EHCI0, 0);
+}
+
+static struct usb_ehci_pdata alchemy_ehci_pdata = {
+ .no_io_watchdog = 1,
+ .power_on = alchemy_ehci_power_on,
+ .power_off = alchemy_ehci_power_off,
+ .power_suspend = alchemy_ehci_power_off,
+};
+
+/* Power on callback for the ohci platform driver */
+static int alchemy_ohci_power_on(struct platform_device *pdev)
+{
+ int unit;
+
+ unit = (pdev->id == 1) ?
+ ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0;
+
+ return alchemy_usb_control(unit, 1);
+}
+
+/* Power off/suspend callback for the ohci platform driver */
+static void alchemy_ohci_power_off(struct platform_device *pdev)
+{
+ int unit;
+
+ unit = (pdev->id == 1) ?
+ ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0;
+
+ alchemy_usb_control(unit, 0);
+}
+
+static struct usb_ohci_pdata alchemy_ohci_pdata = {
+ .power_on = alchemy_ohci_power_on,
+ .power_off = alchemy_ohci_power_off,
+ .power_suspend = alchemy_ohci_power_off,
+};
+
+static unsigned long alchemy_ohci_data[][2] __initdata = {
+ [ALCHEMY_CPU_AU1000] = { AU1000_USB_OHCI_PHYS_ADDR, AU1000_USB_HOST_INT },
+ [ALCHEMY_CPU_AU1500] = { AU1000_USB_OHCI_PHYS_ADDR, AU1500_USB_HOST_INT },
+ [ALCHEMY_CPU_AU1100] = { AU1000_USB_OHCI_PHYS_ADDR, AU1100_USB_HOST_INT },
+ [ALCHEMY_CPU_AU1550] = { AU1550_USB_OHCI_PHYS_ADDR, AU1550_USB_HOST_INT },
+ [ALCHEMY_CPU_AU1200] = { AU1200_USB_OHCI_PHYS_ADDR, AU1200_USB_INT },
+ [ALCHEMY_CPU_AU1300] = { AU1300_USB_OHCI0_PHYS_ADDR, AU1300_USB_INT },
+};
+
+static unsigned long alchemy_ehci_data[][2] __initdata = {
+ [ALCHEMY_CPU_AU1200] = { AU1200_USB_EHCI_PHYS_ADDR, AU1200_USB_INT },
+ [ALCHEMY_CPU_AU1300] = { AU1300_USB_EHCI_PHYS_ADDR, AU1300_USB_INT },
+};
+
+static int __init _new_usbres(struct resource **r, struct platform_device **d)
+{
+ *r = kcalloc(2, sizeof(struct resource), GFP_KERNEL);
+ if (!*r)
+ return -ENOMEM;
+ *d = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
+ if (!*d) {
+ kfree(*r);
+ return -ENOMEM;
+ }
+
+ (*d)->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ (*d)->num_resources = 2;
+ (*d)->resource = *r;
+
+ return 0;
+}
+
+static void __init alchemy_setup_usb(int ctype)
+{
+ struct resource *res;
+ struct platform_device *pdev;
+
+ /* setup OHCI0. Every variant has one */
+ if (_new_usbres(&res, &pdev))
+ return;
+
+ res[0].start = alchemy_ohci_data[ctype][0];
+ res[0].end = res[0].start + 0x100 - 1;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = alchemy_ohci_data[ctype][1];
+ res[1].end = res[1].start;
+ res[1].flags = IORESOURCE_IRQ;
+ pdev->name = "ohci-platform";
+ pdev->id = 0;
+ pdev->dev.dma_mask = &alchemy_all_dmamask;
+ pdev->dev.platform_data = &alchemy_ohci_pdata;
+
+ if (platform_device_register(pdev))
+ printk(KERN_INFO "Alchemy USB: cannot add OHCI0\n");
+
+
+ /* setup EHCI0: Au1200/Au1300 */
+ if ((ctype == ALCHEMY_CPU_AU1200) || (ctype == ALCHEMY_CPU_AU1300)) {
+ if (_new_usbres(&res, &pdev))
+ return;
+
+ res[0].start = alchemy_ehci_data[ctype][0];
+ res[0].end = res[0].start + 0x100 - 1;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = alchemy_ehci_data[ctype][1];
+ res[1].end = res[1].start;
+ res[1].flags = IORESOURCE_IRQ;
+ pdev->name = "ehci-platform";
+ pdev->id = 0;
+ pdev->dev.dma_mask = &alchemy_all_dmamask;
+ pdev->dev.platform_data = &alchemy_ehci_pdata;
+
+ if (platform_device_register(pdev))
+ printk(KERN_INFO "Alchemy USB: cannot add EHCI0\n");
+ }
+
+ /* Au1300: OHCI1 */
+ if (ctype == ALCHEMY_CPU_AU1300) {
+ if (_new_usbres(&res, &pdev))
+ return;
+
+ res[0].start = AU1300_USB_OHCI1_PHYS_ADDR;
+ res[0].end = res[0].start + 0x100 - 1;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = AU1300_USB_INT;
+ res[1].end = res[1].start;
+ res[1].flags = IORESOURCE_IRQ;
+ pdev->name = "ohci-platform";
+ pdev->id = 1;
+ pdev->dev.dma_mask = &alchemy_all_dmamask;
+ pdev->dev.platform_data = &alchemy_ohci_pdata;
+
+ if (platform_device_register(pdev))
+ printk(KERN_INFO "Alchemy USB: cannot add OHCI1\n");
+ }
+}
+
+/* Macro to help defining the Ethernet MAC resources */
+#define MAC_RES_COUNT 4 /* MAC regs, MAC en, MAC INT, MACDMA regs */
+#define MAC_RES(_base, _enable, _irq, _macdma) \
+ { \
+ .start = _base, \
+ .end = _base + 0xffff, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = _enable, \
+ .end = _enable + 0x3, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = _irq, \
+ .end = _irq, \
+ .flags = IORESOURCE_IRQ \
+ }, \
+ { \
+ .start = _macdma, \
+ .end = _macdma + 0x1ff, \
+ .flags = IORESOURCE_MEM, \
+ }
+
+static struct resource au1xxx_eth0_resources[][MAC_RES_COUNT] __initdata = {
+ [ALCHEMY_CPU_AU1000] = {
+ MAC_RES(AU1000_MAC0_PHYS_ADDR,
+ AU1000_MACEN_PHYS_ADDR,
+ AU1000_MAC0_DMA_INT,
+ AU1000_MACDMA0_PHYS_ADDR)
+ },
+ [ALCHEMY_CPU_AU1500] = {
+ MAC_RES(AU1500_MAC0_PHYS_ADDR,
+ AU1500_MACEN_PHYS_ADDR,
+ AU1500_MAC0_DMA_INT,
+ AU1000_MACDMA0_PHYS_ADDR)
+ },
+ [ALCHEMY_CPU_AU1100] = {
+ MAC_RES(AU1000_MAC0_PHYS_ADDR,
+ AU1000_MACEN_PHYS_ADDR,
+ AU1100_MAC0_DMA_INT,
+ AU1000_MACDMA0_PHYS_ADDR)
+ },
+ [ALCHEMY_CPU_AU1550] = {
+ MAC_RES(AU1000_MAC0_PHYS_ADDR,
+ AU1000_MACEN_PHYS_ADDR,
+ AU1550_MAC0_DMA_INT,
+ AU1000_MACDMA0_PHYS_ADDR)
+ },
+};
+
+static struct au1000_eth_platform_data au1xxx_eth0_platform_data = {
+ .phy1_search_mac0 = 1,
+};
+
+static struct platform_device au1xxx_eth0_device = {
+ .name = "au1000-eth",
+ .id = 0,
+ .num_resources = MAC_RES_COUNT,
+ .dev = {
+ .dma_mask = &alchemy_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &au1xxx_eth0_platform_data,
+ },
+};
+
+static struct resource au1xxx_eth1_resources[][MAC_RES_COUNT] __initdata = {
+ [ALCHEMY_CPU_AU1000] = {
+ MAC_RES(AU1000_MAC1_PHYS_ADDR,
+ AU1000_MACEN_PHYS_ADDR + 4,
+ AU1000_MAC1_DMA_INT,
+ AU1000_MACDMA1_PHYS_ADDR)
+ },
+ [ALCHEMY_CPU_AU1500] = {
+ MAC_RES(AU1500_MAC1_PHYS_ADDR,
+ AU1500_MACEN_PHYS_ADDR + 4,
+ AU1500_MAC1_DMA_INT,
+ AU1000_MACDMA1_PHYS_ADDR)
+ },
+ [ALCHEMY_CPU_AU1550] = {
+ MAC_RES(AU1000_MAC1_PHYS_ADDR,
+ AU1000_MACEN_PHYS_ADDR + 4,
+ AU1550_MAC1_DMA_INT,
+ AU1000_MACDMA1_PHYS_ADDR)
+ },
+};
+
+static struct au1000_eth_platform_data au1xxx_eth1_platform_data = {
+ .phy1_search_mac0 = 1,
+};
+
+static struct platform_device au1xxx_eth1_device = {
+ .name = "au1000-eth",
+ .id = 1,
+ .num_resources = MAC_RES_COUNT,
+ .dev = {
+ .dma_mask = &alchemy_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &au1xxx_eth1_platform_data,
+ },
+};
+
+void __init au1xxx_override_eth_cfg(unsigned int port,
+ struct au1000_eth_platform_data *eth_data)
+{
+ if (!eth_data || port > 1)
+ return;
+
+ if (port == 0)
+ memcpy(&au1xxx_eth0_platform_data, eth_data,
+ sizeof(struct au1000_eth_platform_data));
+ else
+ memcpy(&au1xxx_eth1_platform_data, eth_data,
+ sizeof(struct au1000_eth_platform_data));
+}
+
+static void __init alchemy_setup_macs(int ctype)
+{
+ int ret, i;
+ unsigned char ethaddr[6];
+ struct resource *macres;
+
+ /* Handle 1st MAC */
+ if (alchemy_get_macs(ctype) < 1)
+ return;
+
+ macres = kmemdup(au1xxx_eth0_resources[ctype],
+ sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL);
+ if (!macres) {
+ printk(KERN_INFO "Alchemy: no memory for MAC0 resources\n");
+ return;
+ }
+ au1xxx_eth0_device.resource = macres;
+
+ i = prom_get_ethernet_addr(ethaddr);
+ if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac))
+ memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6);
+
+ ret = platform_device_register(&au1xxx_eth0_device);
+ if (ret)
+ printk(KERN_INFO "Alchemy: failed to register MAC0\n");
+
+
+ /* Handle 2nd MAC */
+ if (alchemy_get_macs(ctype) < 2)
+ return;
+
+ macres = kmemdup(au1xxx_eth1_resources[ctype],
+ sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL);
+ if (!macres) {
+ printk(KERN_INFO "Alchemy: no memory for MAC1 resources\n");
+ return;
+ }
+ au1xxx_eth1_device.resource = macres;
+
+ ethaddr[5] += 1; /* next addr for 2nd MAC */
+ if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac))
+ memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6);
+
+ /* Register second MAC if enabled in pinfunc */
+ if (!(alchemy_rdsys(AU1000_SYS_PINFUNC) & SYS_PF_NI2)) {
+ ret = platform_device_register(&au1xxx_eth1_device);
+ if (ret)
+ printk(KERN_INFO "Alchemy: failed to register MAC1\n");
+ }
+}
+
+static int __init au1xxx_platform_init(void)
+{
+ int ctype = alchemy_get_cputype();
+
+ alchemy_setup_uarts(ctype);
+ alchemy_setup_macs(ctype);
+ alchemy_setup_usb(ctype);
+
+ return 0;
+}
+
+arch_initcall(au1xxx_platform_init);
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c
new file mode 100644
index 000000000..303257b69
--- /dev/null
+++ b/arch/mips/alchemy/common/power.c
@@ -0,0 +1,132 @@
+/*
+ * BRIEF MODULE DESCRIPTION
+ * Au1xx0 Power Management routines.
+ *
+ * Copyright 2001, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ * Some of the routines are right out of init/main.c, whose
+ * copyrights apply here.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/pm.h>
+#include <linux/sysctl.h>
+#include <linux/jiffies.h>
+
+#include <linux/uaccess.h>
+#include <asm/mach-au1x00/au1000.h>
+
+/*
+ * We need to save/restore a bunch of core registers that are
+ * either volatile or reset to some state across a processor sleep.
+ * If reading a register doesn't provide a proper result for a
+ * later restore, we have to provide a function for loading that
+ * register and save a copy.
+ *
+ * We only have to save/restore registers that aren't otherwise
+ * done as part of a driver pm_* function.
+ */
+static unsigned int sleep_sys_clocks[5];
+static unsigned int sleep_sys_pinfunc;
+static unsigned int sleep_static_memctlr[4][3];
+
+
+static void save_core_regs(void)
+{
+ /* Clocks and PLLs. */
+ sleep_sys_clocks[0] = alchemy_rdsys(AU1000_SYS_FREQCTRL0);
+ sleep_sys_clocks[1] = alchemy_rdsys(AU1000_SYS_FREQCTRL1);
+ sleep_sys_clocks[2] = alchemy_rdsys(AU1000_SYS_CLKSRC);
+ sleep_sys_clocks[3] = alchemy_rdsys(AU1000_SYS_CPUPLL);
+ sleep_sys_clocks[4] = alchemy_rdsys(AU1000_SYS_AUXPLL);
+
+ /* pin mux config */
+ sleep_sys_pinfunc = alchemy_rdsys(AU1000_SYS_PINFUNC);
+
+ /* Save the static memory controller configuration. */
+ sleep_static_memctlr[0][0] = alchemy_rdsmem(AU1000_MEM_STCFG0);
+ sleep_static_memctlr[0][1] = alchemy_rdsmem(AU1000_MEM_STTIME0);
+ sleep_static_memctlr[0][2] = alchemy_rdsmem(AU1000_MEM_STADDR0);
+ sleep_static_memctlr[1][0] = alchemy_rdsmem(AU1000_MEM_STCFG1);
+ sleep_static_memctlr[1][1] = alchemy_rdsmem(AU1000_MEM_STTIME1);
+ sleep_static_memctlr[1][2] = alchemy_rdsmem(AU1000_MEM_STADDR1);
+ sleep_static_memctlr[2][0] = alchemy_rdsmem(AU1000_MEM_STCFG2);
+ sleep_static_memctlr[2][1] = alchemy_rdsmem(AU1000_MEM_STTIME2);
+ sleep_static_memctlr[2][2] = alchemy_rdsmem(AU1000_MEM_STADDR2);
+ sleep_static_memctlr[3][0] = alchemy_rdsmem(AU1000_MEM_STCFG3);
+ sleep_static_memctlr[3][1] = alchemy_rdsmem(AU1000_MEM_STTIME3);
+ sleep_static_memctlr[3][2] = alchemy_rdsmem(AU1000_MEM_STADDR3);
+}
+
+static void restore_core_regs(void)
+{
+ /* restore clock configuration. Writing CPUPLL last will
+ * stall a bit and stabilize other clocks (unless this is
+ * one of those Au1000 with a write-only PLL, where we dont
+ * have a valid value)
+ */
+ alchemy_wrsys(sleep_sys_clocks[0], AU1000_SYS_FREQCTRL0);
+ alchemy_wrsys(sleep_sys_clocks[1], AU1000_SYS_FREQCTRL1);
+ alchemy_wrsys(sleep_sys_clocks[2], AU1000_SYS_CLKSRC);
+ alchemy_wrsys(sleep_sys_clocks[4], AU1000_SYS_AUXPLL);
+ if (!au1xxx_cpu_has_pll_wo())
+ alchemy_wrsys(sleep_sys_clocks[3], AU1000_SYS_CPUPLL);
+
+ alchemy_wrsys(sleep_sys_pinfunc, AU1000_SYS_PINFUNC);
+
+ /* Restore the static memory controller configuration. */
+ alchemy_wrsmem(sleep_static_memctlr[0][0], AU1000_MEM_STCFG0);
+ alchemy_wrsmem(sleep_static_memctlr[0][1], AU1000_MEM_STTIME0);
+ alchemy_wrsmem(sleep_static_memctlr[0][2], AU1000_MEM_STADDR0);
+ alchemy_wrsmem(sleep_static_memctlr[1][0], AU1000_MEM_STCFG1);
+ alchemy_wrsmem(sleep_static_memctlr[1][1], AU1000_MEM_STTIME1);
+ alchemy_wrsmem(sleep_static_memctlr[1][2], AU1000_MEM_STADDR1);
+ alchemy_wrsmem(sleep_static_memctlr[2][0], AU1000_MEM_STCFG2);
+ alchemy_wrsmem(sleep_static_memctlr[2][1], AU1000_MEM_STTIME2);
+ alchemy_wrsmem(sleep_static_memctlr[2][2], AU1000_MEM_STADDR2);
+ alchemy_wrsmem(sleep_static_memctlr[3][0], AU1000_MEM_STCFG3);
+ alchemy_wrsmem(sleep_static_memctlr[3][1], AU1000_MEM_STTIME3);
+ alchemy_wrsmem(sleep_static_memctlr[3][2], AU1000_MEM_STADDR3);
+}
+
+void au_sleep(void)
+{
+ save_core_regs();
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ alchemy_sleep_au1000();
+ break;
+ case ALCHEMY_CPU_AU1550:
+ case ALCHEMY_CPU_AU1200:
+ alchemy_sleep_au1550();
+ break;
+ case ALCHEMY_CPU_AU1300:
+ alchemy_sleep_au1300();
+ break;
+ }
+
+ restore_core_regs();
+}
diff --git a/arch/mips/alchemy/common/prom.c b/arch/mips/alchemy/common/prom.c
new file mode 100644
index 000000000..d910c0a64
--- /dev/null
+++ b/arch/mips/alchemy/common/prom.c
@@ -0,0 +1,149 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ * PROM library initialisation code, supports YAMON and U-Boot.
+ *
+ * Copyright 2000-2001, 2006, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file was derived from Carsten Langgaard's
+ * arch/mips/mips-boards/xx files.
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/sizes.h>
+#include <linux/string.h>
+
+#include <asm/bootinfo.h>
+
+int prom_argc;
+char **prom_argv;
+char **prom_envp;
+
+void __init prom_init_cmdline(void)
+{
+ int i;
+
+ for (i = 1; i < prom_argc; i++) {
+ strlcat(arcs_cmdline, prom_argv[i], COMMAND_LINE_SIZE);
+ if (i < (prom_argc - 1))
+ strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
+ }
+}
+
+char *prom_getenv(char *envname)
+{
+ /*
+ * Return a pointer to the given environment variable.
+ * YAMON uses "name", "value" pairs, while U-Boot uses "name=value".
+ */
+
+ char **env = prom_envp;
+ int i = strlen(envname);
+ int yamon = (*env && strchr(*env, '=') == NULL);
+
+ while (*env) {
+ if (yamon) {
+ if (strcmp(envname, *env++) == 0)
+ return *env;
+ } else if (strncmp(envname, *env, i) == 0 && (*env)[i] == '=')
+ return *env + i + 1;
+ env++;
+ }
+
+ return NULL;
+}
+
+void __init prom_init(void)
+{
+ unsigned char *memsize_str;
+ unsigned long memsize;
+
+ prom_argc = (int)fw_arg0;
+ prom_argv = (char **)fw_arg1;
+ prom_envp = (char **)fw_arg2;
+
+ prom_init_cmdline();
+
+ memsize_str = prom_getenv("memsize");
+ if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
+ memsize = SZ_64M; /* minimum memsize is 64MB RAM */
+
+ memblock_add(0, memsize);
+}
+
+static inline unsigned char str2hexnum(unsigned char c)
+{
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 10;
+
+ return 0; /* foo */
+}
+
+static inline void str2eaddr(unsigned char *ea, unsigned char *str)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ unsigned char num;
+
+ if ((*str == '.') || (*str == ':'))
+ str++;
+ num = str2hexnum(*str++) << 4;
+ num |= str2hexnum(*str++);
+ ea[i] = num;
+ }
+}
+
+int __init prom_get_ethernet_addr(char *ethernet_addr)
+{
+ char *ethaddr_str;
+
+ /* Check the environment variables first */
+ ethaddr_str = prom_getenv("ethaddr");
+ if (!ethaddr_str) {
+ /* Check command line */
+ ethaddr_str = strstr(arcs_cmdline, "ethaddr=");
+ if (!ethaddr_str)
+ return -1;
+
+ ethaddr_str += strlen("ethaddr=");
+ }
+
+ str2eaddr(ethernet_addr, ethaddr_str);
+
+ return 0;
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
new file mode 100644
index 000000000..0f60efe04
--- /dev/null
+++ b/arch/mips/alchemy/common/setup.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2000, 2007-2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com
+ *
+ * Updates to 2.6, Pete Popov, Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+
+#include <asm/dma-coherence.h>
+#include <asm/mipsregs.h>
+
+#include <au1000.h>
+
+extern void __init board_setup(void);
+extern void __init alchemy_set_lpj(void);
+
+void __init plat_mem_setup(void)
+{
+ alchemy_set_lpj();
+
+ if (au1xxx_cpu_needs_config_od())
+ /* Various early Au1xx0 errata corrected by this */
+ set_c0_config(1 << 19); /* Set Config[OD] */
+ else
+ /* Clear to obtain best system bus performance */
+ clear_c0_config(1 << 19); /* Clear Config[OD] */
+
+ hw_coherentio = 0;
+ coherentio = IO_COHERENCE_ENABLED;
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ coherentio = IO_COHERENCE_DISABLED;
+ break;
+ case ALCHEMY_CPU_AU1200:
+ /* Au1200 AB USB does not support coherent memory */
+ if (0 == (read_c0_prid() & PRID_REV_MASK))
+ coherentio = IO_COHERENCE_DISABLED;
+ break;
+ }
+
+ board_setup(); /* board specific setup */
+
+ /* IO/MEM resources. */
+ set_io_port_base(0);
+ ioport_resource.start = IOPORT_RESOURCE_START;
+ ioport_resource.end = IOPORT_RESOURCE_END;
+ iomem_resource.start = IOMEM_RESOURCE_START;
+ iomem_resource.end = IOMEM_RESOURCE_END;
+}
+
+#ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR
+/* This routine should be valid for all Au1x based boards */
+phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
+{
+ unsigned long start = ALCHEMY_PCI_MEMWIN_START;
+ unsigned long end = ALCHEMY_PCI_MEMWIN_END;
+
+ /* Don't fixup 36-bit addresses */
+ if ((phys_addr >> 32) != 0)
+ return phys_addr;
+
+ /* Check for PCI memory window */
+ if (phys_addr >= start && (phys_addr + size - 1) <= end)
+ return (phys_addr_t)(AU1500_PCI_MEM_PHYS_ADDR + phys_addr);
+
+ /* default nop */
+ return phys_addr;
+}
+
+int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ phys_addr_t phys_addr = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
+
+ return remap_pfn_range(vma, vaddr, phys_addr >> PAGE_SHIFT, size, prot);
+}
+EXPORT_SYMBOL(io_remap_pfn_range);
+#endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */
diff --git a/arch/mips/alchemy/common/sleeper.S b/arch/mips/alchemy/common/sleeper.S
new file mode 100644
index 000000000..13586d224
--- /dev/null
+++ b/arch/mips/alchemy/common/sleeper.S
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2002 Embedded Edge, LLC
+ * Author: dan@embeddededge.com
+ *
+ * Sleep helper for Au1xxx sleep mode.
+ */
+
+#include <asm/asm.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+
+ .extern __flush_cache_all
+
+ .text
+ .set noreorder
+ .set noat
+ .align 5
+
+
+/* preparatory stuff */
+.macro SETUP_SLEEP
+ subu sp, PT_SIZE
+ sw $1, PT_R1(sp)
+ sw $2, PT_R2(sp)
+ sw $3, PT_R3(sp)
+ sw $4, PT_R4(sp)
+ sw $5, PT_R5(sp)
+ sw $6, PT_R6(sp)
+ sw $7, PT_R7(sp)
+ sw $16, PT_R16(sp)
+ sw $17, PT_R17(sp)
+ sw $18, PT_R18(sp)
+ sw $19, PT_R19(sp)
+ sw $20, PT_R20(sp)
+ sw $21, PT_R21(sp)
+ sw $22, PT_R22(sp)
+ sw $23, PT_R23(sp)
+ sw $26, PT_R26(sp)
+ sw $27, PT_R27(sp)
+ sw $28, PT_R28(sp)
+ sw $30, PT_R30(sp)
+ sw $31, PT_R31(sp)
+ mfc0 k0, CP0_STATUS
+ sw k0, 0x20(sp)
+ mfc0 k0, CP0_CONTEXT
+ sw k0, 0x1c(sp)
+ mfc0 k0, CP0_PAGEMASK
+ sw k0, 0x18(sp)
+ mfc0 k0, CP0_CONFIG
+ sw k0, 0x14(sp)
+
+ /* flush caches to make sure context is in memory */
+ la t1, __flush_cache_all
+ lw t0, 0(t1)
+ jalr t0
+ nop
+
+ /* Now set up the scratch registers so the boot rom will
+ * return to this point upon wakeup.
+ * sys_scratch0 : SP
+ * sys_scratch1 : RA
+ */
+ lui t3, 0xb190 /* sys_xxx */
+ sw sp, 0x0018(t3)
+ la k0, alchemy_sleep_wakeup /* resume path */
+ sw k0, 0x001c(t3)
+.endm
+
+.macro DO_SLEEP
+ /* put power supply and processor to sleep */
+ sw zero, 0x0078(t3) /* sys_slppwr */
+ sync
+ sw zero, 0x007c(t3) /* sys_sleep */
+ sync
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+.endm
+
+/* sleep code for Au1000/Au1100/Au1500 memory controller type */
+LEAF(alchemy_sleep_au1000)
+
+ SETUP_SLEEP
+
+ /* cache following instructions, as memory gets put to sleep */
+ la t0, 1f
+ .set arch=r4000
+ cache 0x14, 0(t0)
+ cache 0x14, 32(t0)
+ cache 0x14, 64(t0)
+ cache 0x14, 96(t0)
+ .set mips0
+
+1: lui a0, 0xb400 /* mem_xxx */
+ sw zero, 0x001c(a0) /* Precharge */
+ sync
+ sw zero, 0x0020(a0) /* Auto Refresh */
+ sync
+ sw zero, 0x0030(a0) /* Sleep */
+ sync
+
+ DO_SLEEP
+
+END(alchemy_sleep_au1000)
+
+/* sleep code for Au1550/Au1200 memory controller type */
+LEAF(alchemy_sleep_au1550)
+
+ SETUP_SLEEP
+
+ /* cache following instructions, as memory gets put to sleep */
+ la t0, 1f
+ .set arch=r4000
+ cache 0x14, 0(t0)
+ cache 0x14, 32(t0)
+ cache 0x14, 64(t0)
+ cache 0x14, 96(t0)
+ .set mips0
+
+1: lui a0, 0xb400 /* mem_xxx */
+ sw zero, 0x08c0(a0) /* Precharge */
+ sync
+ sw zero, 0x08d0(a0) /* Self Refresh */
+ sync
+
+ /* wait for sdram to enter self-refresh mode */
+ lui t0, 0x0100
+2: lw t1, 0x0850(a0) /* mem_sdstat */
+ and t2, t1, t0
+ beq t2, zero, 2b
+ nop
+
+ /* disable SDRAM clocks */
+ lui t0, 0xcfff
+ ori t0, t0, 0xffff
+ lw t1, 0x0840(a0) /* mem_sdconfiga */
+ and t1, t0, t1 /* clear CE[1:0] */
+ sw t1, 0x0840(a0) /* mem_sdconfiga */
+ sync
+
+ DO_SLEEP
+
+END(alchemy_sleep_au1550)
+
+/* sleepcode for Au1300 memory controller type */
+LEAF(alchemy_sleep_au1300)
+
+ SETUP_SLEEP
+
+ /* cache following instructions, as memory gets put to sleep */
+ la t0, 2f
+ la t1, 4f
+ subu t2, t1, t0
+
+ .set arch=r4000
+
+1: cache 0x14, 0(t0)
+ subu t2, t2, 32
+ bgez t2, 1b
+ addu t0, t0, 32
+
+ .set mips0
+
+2: lui a0, 0xb400 /* mem_xxx */
+
+ /* disable all ports in mem_sdportcfga */
+ sw zero, 0x868(a0) /* mem_sdportcfga */
+ sync
+
+ /* disable ODT */
+ li t0, 0x03010000
+ sw t0, 0x08d8(a0) /* mem_sdcmd0 */
+ sw t0, 0x08dc(a0) /* mem_sdcmd1 */
+ sync
+
+ /* precharge */
+ li t0, 0x23000400
+ sw t0, 0x08dc(a0) /* mem_sdcmd1 */
+ sw t0, 0x08d8(a0) /* mem_sdcmd0 */
+ sync
+
+ /* auto refresh */
+ sw zero, 0x08c8(a0) /* mem_sdautoref */
+ sync
+
+ /* block access to the DDR */
+ lw t0, 0x0848(a0) /* mem_sdconfigb */
+ li t1, (1 << 7 | 0x3F)
+ or t0, t0, t1
+ sw t0, 0x0848(a0) /* mem_sdconfigb */
+ sync
+
+ /* issue the Self Refresh command */
+ li t0, 0x10000000
+ sw t0, 0x08dc(a0) /* mem_sdcmd1 */
+ sw t0, 0x08d8(a0) /* mem_sdcmd0 */
+ sync
+
+ /* wait for sdram to enter self-refresh mode */
+ lui t0, 0x0300
+3: lw t1, 0x0850(a0) /* mem_sdstat */
+ and t2, t1, t0
+ bne t2, t0, 3b
+ nop
+
+ /* disable SDRAM clocks */
+ li t0, ~(3<<28)
+ lw t1, 0x0840(a0) /* mem_sdconfiga */
+ and t1, t1, t0 /* clear CE[1:0] */
+ sw t1, 0x0840(a0) /* mem_sdconfiga */
+ sync
+
+ DO_SLEEP
+4:
+
+END(alchemy_sleep_au1300)
+
+
+ /* This is where we return upon wakeup.
+ * Reload all of the registers and return.
+ */
+LEAF(alchemy_sleep_wakeup)
+ lw k0, 0x20(sp)
+ mtc0 k0, CP0_STATUS
+ lw k0, 0x1c(sp)
+ mtc0 k0, CP0_CONTEXT
+ lw k0, 0x18(sp)
+ mtc0 k0, CP0_PAGEMASK
+ lw k0, 0x14(sp)
+ mtc0 k0, CP0_CONFIG
+
+ /* We need to catch the early Alchemy SOCs with
+ * the write-only Config[OD] bit and set it back to one...
+ */
+ jal au1x00_fixup_config_od
+ nop
+ lw $1, PT_R1(sp)
+ lw $2, PT_R2(sp)
+ lw $3, PT_R3(sp)
+ lw $4, PT_R4(sp)
+ lw $5, PT_R5(sp)
+ lw $6, PT_R6(sp)
+ lw $7, PT_R7(sp)
+ lw $16, PT_R16(sp)
+ lw $17, PT_R17(sp)
+ lw $18, PT_R18(sp)
+ lw $19, PT_R19(sp)
+ lw $20, PT_R20(sp)
+ lw $21, PT_R21(sp)
+ lw $22, PT_R22(sp)
+ lw $23, PT_R23(sp)
+ lw $26, PT_R26(sp)
+ lw $27, PT_R27(sp)
+ lw $28, PT_R28(sp)
+ lw $30, PT_R30(sp)
+ lw $31, PT_R31(sp)
+ jr ra
+ addiu sp, PT_SIZE
+END(alchemy_sleep_wakeup)
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
new file mode 100644
index 000000000..d794ffb67
--- /dev/null
+++ b/arch/mips/alchemy/common/time.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2008-2009 Manuel Lauss <manuel.lauss@gmail.com>
+ *
+ * Previous incarnations were:
+ * Copyright (C) 2001, 2006, 2008 MontaVista Software, <source@mvista.com>
+ * Copied and modified Carsten Langgaard's time.c
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * ########################################################################
+ *
+ * ########################################################################
+ *
+ * Clocksource/event using the 32.768kHz-clocked Counter1 ('RTC' in the
+ * databooks). Firmware/Board init code must enable the counters in the
+ * counter control register, otherwise the CP0 counter clocksource/event
+ * will be installed instead (and use of 'wait' instruction is prohibited).
+ */
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+#include <asm/idle.h>
+#include <asm/processor.h>
+#include <asm/time.h>
+#include <asm/mach-au1x00/au1000.h>
+
+/* 32kHz clock enabled and detected */
+#define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S)
+
+static u64 au1x_counter1_read(struct clocksource *cs)
+{
+ return alchemy_rdsys(AU1000_SYS_RTCREAD);
+}
+
+static struct clocksource au1x_counter1_clocksource = {
+ .name = "alchemy-counter1",
+ .read = au1x_counter1_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .rating = 1500,
+};
+
+static int au1x_rtcmatch2_set_next_event(unsigned long delta,
+ struct clock_event_device *cd)
+{
+ delta += alchemy_rdsys(AU1000_SYS_RTCREAD);
+ /* wait for register access */
+ while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_M21)
+ ;
+ alchemy_wrsys(delta, AU1000_SYS_RTCMATCH2);
+
+ return 0;
+}
+
+static irqreturn_t au1x_rtcmatch2_irq(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = dev_id;
+ cd->event_handler(cd);
+ return IRQ_HANDLED;
+}
+
+static struct clock_event_device au1x_rtcmatch2_clockdev = {
+ .name = "rtcmatch2",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 1500,
+ .set_next_event = au1x_rtcmatch2_set_next_event,
+ .cpumask = cpu_possible_mask,
+};
+
+static int __init alchemy_time_init(unsigned int m2int)
+{
+ struct clock_event_device *cd = &au1x_rtcmatch2_clockdev;
+ unsigned long t;
+
+ au1x_rtcmatch2_clockdev.irq = m2int;
+
+ /* Check if firmware (YAMON, ...) has enabled 32kHz and clock
+ * has been detected. If so install the rtcmatch2 clocksource,
+ * otherwise don't bother. Note that both bits being set is by
+ * no means a definite guarantee that the counters actually work
+ * (the 32S bit seems to be stuck set to 1 once a single clock-
+ * edge is detected, hence the timeouts).
+ */
+ if (CNTR_OK != (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & CNTR_OK))
+ goto cntr_err;
+
+ /*
+ * setup counter 1 (RTC) to tick at full speed
+ */
+ t = 0xffffff;
+ while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_T1S) && --t)
+ asm volatile ("nop");
+ if (!t)
+ goto cntr_err;
+
+ alchemy_wrsys(0, AU1000_SYS_RTCTRIM); /* 32.768 kHz */
+
+ t = 0xffffff;
+ while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C1S) && --t)
+ asm volatile ("nop");
+ if (!t)
+ goto cntr_err;
+ alchemy_wrsys(0, AU1000_SYS_RTCWRITE);
+
+ t = 0xffffff;
+ while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C1S) && --t)
+ asm volatile ("nop");
+ if (!t)
+ goto cntr_err;
+
+ /* register counter1 clocksource and event device */
+ clocksource_register_hz(&au1x_counter1_clocksource, 32768);
+
+ cd->shift = 32;
+ cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift);
+ cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd);
+ cd->max_delta_ticks = 0xffffffff;
+ cd->min_delta_ns = clockevent_delta2ns(9, cd);
+ cd->min_delta_ticks = 9; /* ~0.28ms */
+ clockevents_register_device(cd);
+ if (request_irq(m2int, au1x_rtcmatch2_irq, IRQF_TIMER, "timer",
+ &au1x_rtcmatch2_clockdev))
+ pr_err("Failed to register timer interrupt\n");
+
+ printk(KERN_INFO "Alchemy clocksource installed\n");
+
+ return 0;
+
+cntr_err:
+ return -1;
+}
+
+static int alchemy_m2inttab[] __initdata = {
+ AU1000_RTC_MATCH2_INT,
+ AU1500_RTC_MATCH2_INT,
+ AU1100_RTC_MATCH2_INT,
+ AU1550_RTC_MATCH2_INT,
+ AU1200_RTC_MATCH2_INT,
+ AU1300_RTC_MATCH2_INT,
+};
+
+void __init plat_time_init(void)
+{
+ int t;
+
+ t = alchemy_get_cputype();
+ if (t == ALCHEMY_CPU_UNKNOWN ||
+ alchemy_time_init(alchemy_m2inttab[t]))
+ cpu_wait = NULL; /* wait doesn't work with r4k timer */
+}
diff --git a/arch/mips/alchemy/common/usb.c b/arch/mips/alchemy/common/usb.c
new file mode 100644
index 000000000..5d618547e
--- /dev/null
+++ b/arch/mips/alchemy/common/usb.c
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * USB block power/access management abstraction.
+ *
+ * Au1000+: The OHCI block control register is at the far end of the OHCI memory
+ * area. Au1550 has OHCI on different base address. No need to handle
+ * UDC here.
+ * Au1200: one register to control access and clocks to O/EHCI, UDC and OTG
+ * as well as the PHY for EHCI and UDC.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+#include <asm/cpu.h>
+#include <asm/mach-au1x00/au1000.h>
+
+/* control register offsets */
+#define AU1000_OHCICFG 0x7fffc
+#define AU1550_OHCICFG 0x07ffc
+#define AU1200_USBCFG 0x04
+
+/* Au1000 USB block config bits */
+#define USBHEN_RD (1 << 4) /* OHCI reset-done indicator */
+#define USBHEN_CE (1 << 3) /* OHCI block clock enable */
+#define USBHEN_E (1 << 2) /* OHCI block enable */
+#define USBHEN_C (1 << 1) /* OHCI block coherency bit */
+#define USBHEN_BE (1 << 0) /* OHCI Big-Endian */
+
+/* Au1200 USB config bits */
+#define USBCFG_PFEN (1 << 31) /* prefetch enable (undoc) */
+#define USBCFG_RDCOMB (1 << 30) /* read combining (undoc) */
+#define USBCFG_UNKNOWN (5 << 20) /* unknown, leave this way */
+#define USBCFG_SSD (1 << 23) /* serial short detect en */
+#define USBCFG_PPE (1 << 19) /* HS PHY PLL */
+#define USBCFG_UCE (1 << 18) /* UDC clock enable */
+#define USBCFG_ECE (1 << 17) /* EHCI clock enable */
+#define USBCFG_OCE (1 << 16) /* OHCI clock enable */
+#define USBCFG_FLA(x) (((x) & 0x3f) << 8)
+#define USBCFG_UCAM (1 << 7) /* coherent access (undoc) */
+#define USBCFG_GME (1 << 6) /* OTG mem access */
+#define USBCFG_DBE (1 << 5) /* UDC busmaster enable */
+#define USBCFG_DME (1 << 4) /* UDC mem enable */
+#define USBCFG_EBE (1 << 3) /* EHCI busmaster enable */
+#define USBCFG_EME (1 << 2) /* EHCI mem enable */
+#define USBCFG_OBE (1 << 1) /* OHCI busmaster enable */
+#define USBCFG_OME (1 << 0) /* OHCI mem enable */
+#define USBCFG_INIT_AU1200 (USBCFG_PFEN | USBCFG_RDCOMB | USBCFG_UNKNOWN |\
+ USBCFG_SSD | USBCFG_FLA(0x20) | USBCFG_UCAM | \
+ USBCFG_GME | USBCFG_DBE | USBCFG_DME | \
+ USBCFG_EBE | USBCFG_EME | USBCFG_OBE | \
+ USBCFG_OME)
+
+/* Au1300 USB config registers */
+#define USB_DWC_CTRL1 0x00
+#define USB_DWC_CTRL2 0x04
+#define USB_VBUS_TIMER 0x10
+#define USB_SBUS_CTRL 0x14
+#define USB_MSR_ERR 0x18
+#define USB_DWC_CTRL3 0x1C
+#define USB_DWC_CTRL4 0x20
+#define USB_OTG_STATUS 0x28
+#define USB_DWC_CTRL5 0x2C
+#define USB_DWC_CTRL6 0x30
+#define USB_DWC_CTRL7 0x34
+#define USB_PHY_STATUS 0xC0
+#define USB_INT_STATUS 0xC4
+#define USB_INT_ENABLE 0xC8
+
+#define USB_DWC_CTRL1_OTGD 0x04 /* set to DISable OTG */
+#define USB_DWC_CTRL1_HSTRS 0x02 /* set to ENable EHCI */
+#define USB_DWC_CTRL1_DCRS 0x01 /* set to ENable UDC */
+
+#define USB_DWC_CTRL2_PHY1RS 0x04 /* set to enable PHY1 */
+#define USB_DWC_CTRL2_PHY0RS 0x02 /* set to enable PHY0 */
+#define USB_DWC_CTRL2_PHYRS 0x01 /* set to enable PHY */
+
+#define USB_DWC_CTRL3_OHCI1_CKEN (1 << 19)
+#define USB_DWC_CTRL3_OHCI0_CKEN (1 << 18)
+#define USB_DWC_CTRL3_EHCI0_CKEN (1 << 17)
+#define USB_DWC_CTRL3_OTG0_CKEN (1 << 16)
+
+#define USB_SBUS_CTRL_SBCA 0x04 /* coherent access */
+
+#define USB_INTEN_FORCE 0x20
+#define USB_INTEN_PHY 0x10
+#define USB_INTEN_UDC 0x08
+#define USB_INTEN_EHCI 0x04
+#define USB_INTEN_OHCI1 0x02
+#define USB_INTEN_OHCI0 0x01
+
+static DEFINE_SPINLOCK(alchemy_usb_lock);
+
+static inline void __au1300_usb_phyctl(void __iomem *base, int enable)
+{
+ unsigned long r, s;
+
+ r = __raw_readl(base + USB_DWC_CTRL2);
+ s = __raw_readl(base + USB_DWC_CTRL3);
+
+ s &= USB_DWC_CTRL3_OHCI1_CKEN | USB_DWC_CTRL3_OHCI0_CKEN |
+ USB_DWC_CTRL3_EHCI0_CKEN | USB_DWC_CTRL3_OTG0_CKEN;
+
+ if (enable) {
+ /* simply enable all PHYs */
+ r |= USB_DWC_CTRL2_PHY1RS | USB_DWC_CTRL2_PHY0RS |
+ USB_DWC_CTRL2_PHYRS;
+ __raw_writel(r, base + USB_DWC_CTRL2);
+ wmb();
+ } else if (!s) {
+ /* no USB block active, do disable all PHYs */
+ r &= ~(USB_DWC_CTRL2_PHY1RS | USB_DWC_CTRL2_PHY0RS |
+ USB_DWC_CTRL2_PHYRS);
+ __raw_writel(r, base + USB_DWC_CTRL2);
+ wmb();
+ }
+}
+
+static inline void __au1300_ohci_control(void __iomem *base, int enable, int id)
+{
+ unsigned long r;
+
+ if (enable) {
+ __raw_writel(1, base + USB_DWC_CTRL7); /* start OHCI clock */
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL3); /* enable OHCI block */
+ r |= (id == 0) ? USB_DWC_CTRL3_OHCI0_CKEN
+ : USB_DWC_CTRL3_OHCI1_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable); /* power up the PHYs */
+
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r |= (id == 0) ? USB_INTEN_OHCI0 : USB_INTEN_OHCI1;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+
+ /* reset the OHCI start clock bit */
+ __raw_writel(0, base + USB_DWC_CTRL7);
+ wmb();
+ } else {
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r &= ~((id == 0) ? USB_INTEN_OHCI0 : USB_INTEN_OHCI1);
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r &= ~((id == 0) ? USB_DWC_CTRL3_OHCI0_CKEN
+ : USB_DWC_CTRL3_OHCI1_CKEN);
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ }
+}
+
+static inline void __au1300_ehci_control(void __iomem *base, int enable)
+{
+ unsigned long r;
+
+ if (enable) {
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r |= USB_DWC_CTRL3_EHCI0_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r |= USB_DWC_CTRL1_HSTRS;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r |= USB_INTEN_EHCI;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+ } else {
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r &= ~USB_INTEN_EHCI;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r &= ~USB_DWC_CTRL1_HSTRS;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r &= ~USB_DWC_CTRL3_EHCI0_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ }
+}
+
+static inline void __au1300_udc_control(void __iomem *base, int enable)
+{
+ unsigned long r;
+
+ if (enable) {
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r |= USB_DWC_CTRL1_DCRS;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r |= USB_INTEN_UDC;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+ } else {
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r &= ~USB_INTEN_UDC;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r &= ~USB_DWC_CTRL1_DCRS;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ }
+}
+
+static inline void __au1300_otg_control(void __iomem *base, int enable)
+{
+ unsigned long r;
+ if (enable) {
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r |= USB_DWC_CTRL3_OTG0_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r &= ~USB_DWC_CTRL1_OTGD;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ } else {
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r |= USB_DWC_CTRL1_OTGD;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r &= ~USB_DWC_CTRL3_OTG0_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ }
+}
+
+static inline int au1300_usb_control(int block, int enable)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR);
+ int ret = 0;
+
+ switch (block) {
+ case ALCHEMY_USB_OHCI0:
+ __au1300_ohci_control(base, enable, 0);
+ break;
+ case ALCHEMY_USB_OHCI1:
+ __au1300_ohci_control(base, enable, 1);
+ break;
+ case ALCHEMY_USB_EHCI0:
+ __au1300_ehci_control(base, enable);
+ break;
+ case ALCHEMY_USB_UDC0:
+ __au1300_udc_control(base, enable);
+ break;
+ case ALCHEMY_USB_OTG0:
+ __au1300_otg_control(base, enable);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ return ret;
+}
+
+static inline void au1300_usb_init(void)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR);
+
+ /* set some sane defaults. Note: we don't fiddle with DWC_CTRL4
+ * here at all: Port 2 routing (EHCI or UDC) must be set either
+ * by boot firmware or platform init code; I can't autodetect
+ * a sane setting.
+ */
+ __raw_writel(0, base + USB_INT_ENABLE); /* disable all USB irqs */
+ wmb();
+ __raw_writel(0, base + USB_DWC_CTRL3); /* disable all clocks */
+ wmb();
+ __raw_writel(~0, base + USB_MSR_ERR); /* clear all errors */
+ wmb();
+ __raw_writel(~0, base + USB_INT_STATUS); /* clear int status */
+ wmb();
+ /* set coherent access bit */
+ __raw_writel(USB_SBUS_CTRL_SBCA, base + USB_SBUS_CTRL);
+ wmb();
+}
+
+static inline void __au1200_ohci_control(void __iomem *base, int enable)
+{
+ unsigned long r = __raw_readl(base + AU1200_USBCFG);
+ if (enable) {
+ __raw_writel(r | USBCFG_OCE, base + AU1200_USBCFG);
+ wmb();
+ udelay(2000);
+ } else {
+ __raw_writel(r & ~USBCFG_OCE, base + AU1200_USBCFG);
+ wmb();
+ udelay(1000);
+ }
+}
+
+static inline void __au1200_ehci_control(void __iomem *base, int enable)
+{
+ unsigned long r = __raw_readl(base + AU1200_USBCFG);
+ if (enable) {
+ __raw_writel(r | USBCFG_ECE | USBCFG_PPE, base + AU1200_USBCFG);
+ wmb();
+ udelay(1000);
+ } else {
+ if (!(r & USBCFG_UCE)) /* UDC also off? */
+ r &= ~USBCFG_PPE; /* yes: disable HS PHY PLL */
+ __raw_writel(r & ~USBCFG_ECE, base + AU1200_USBCFG);
+ wmb();
+ udelay(1000);
+ }
+}
+
+static inline void __au1200_udc_control(void __iomem *base, int enable)
+{
+ unsigned long r = __raw_readl(base + AU1200_USBCFG);
+ if (enable) {
+ __raw_writel(r | USBCFG_UCE | USBCFG_PPE, base + AU1200_USBCFG);
+ wmb();
+ } else {
+ if (!(r & USBCFG_ECE)) /* EHCI also off? */
+ r &= ~USBCFG_PPE; /* yes: disable HS PHY PLL */
+ __raw_writel(r & ~USBCFG_UCE, base + AU1200_USBCFG);
+ wmb();
+ }
+}
+
+static inline int au1200_usb_control(int block, int enable)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR);
+
+ switch (block) {
+ case ALCHEMY_USB_OHCI0:
+ __au1200_ohci_control(base, enable);
+ break;
+ case ALCHEMY_USB_UDC0:
+ __au1200_udc_control(base, enable);
+ break;
+ case ALCHEMY_USB_EHCI0:
+ __au1200_ehci_control(base, enable);
+ break;
+ default:
+ return -ENODEV;
+ }
+ return 0;
+}
+
+
+/* initialize USB block(s) to a known working state */
+static inline void au1200_usb_init(void)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR);
+ __raw_writel(USBCFG_INIT_AU1200, base + AU1200_USBCFG);
+ wmb();
+ udelay(1000);
+}
+
+static inline int au1000_usb_init(unsigned long rb, int reg)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(rb + reg);
+ unsigned long r = __raw_readl(base);
+ struct clk *c;
+
+ /* 48MHz check. Don't init if no one can provide it */
+ c = clk_get(NULL, "usbh_clk");
+ if (IS_ERR(c))
+ return -ENODEV;
+ if (clk_round_rate(c, 48000000) != 48000000) {
+ clk_put(c);
+ return -ENODEV;
+ }
+ if (clk_set_rate(c, 48000000)) {
+ clk_put(c);
+ return -ENODEV;
+ }
+ clk_put(c);
+
+#if defined(__BIG_ENDIAN)
+ r |= USBHEN_BE;
+#endif
+ r |= USBHEN_C;
+
+ __raw_writel(r, base);
+ wmb();
+ udelay(1000);
+
+ return 0;
+}
+
+
+static inline void __au1xx0_ohci_control(int enable, unsigned long rb, int creg)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(rb);
+ unsigned long r = __raw_readl(base + creg);
+ struct clk *c = clk_get(NULL, "usbh_clk");
+
+ if (IS_ERR(c))
+ return;
+
+ if (enable) {
+ if (clk_prepare_enable(c))
+ goto out;
+
+ __raw_writel(r | USBHEN_CE, base + creg);
+ wmb();
+ udelay(1000);
+ __raw_writel(r | USBHEN_CE | USBHEN_E, base + creg);
+ wmb();
+ udelay(1000);
+
+ /* wait for reset complete (read reg twice: au1500 erratum) */
+ while (__raw_readl(base + creg),
+ !(__raw_readl(base + creg) & USBHEN_RD))
+ udelay(1000);
+ } else {
+ __raw_writel(r & ~(USBHEN_CE | USBHEN_E), base + creg);
+ wmb();
+ clk_disable_unprepare(c);
+ }
+out:
+ clk_put(c);
+}
+
+static inline int au1000_usb_control(int block, int enable, unsigned long rb,
+ int creg)
+{
+ int ret = 0;
+
+ switch (block) {
+ case ALCHEMY_USB_OHCI0:
+ __au1xx0_ohci_control(enable, rb, creg);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ return ret;
+}
+
+/*
+ * alchemy_usb_control - control Alchemy on-chip USB blocks
+ * @block: USB block to target
+ * @enable: set 1 to enable a block, 0 to disable
+ */
+int alchemy_usb_control(int block, int enable)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&alchemy_usb_lock, flags);
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ ret = au1000_usb_control(block, enable,
+ AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG);
+ break;
+ case ALCHEMY_CPU_AU1550:
+ ret = au1000_usb_control(block, enable,
+ AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG);
+ break;
+ case ALCHEMY_CPU_AU1200:
+ ret = au1200_usb_control(block, enable);
+ break;
+ case ALCHEMY_CPU_AU1300:
+ ret = au1300_usb_control(block, enable);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ spin_unlock_irqrestore(&alchemy_usb_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(alchemy_usb_control);
+
+
+static unsigned long alchemy_usb_pmdata[2];
+
+static void au1000_usb_pm(unsigned long br, int creg, int susp)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(br);
+
+ if (susp) {
+ alchemy_usb_pmdata[0] = __raw_readl(base + creg);
+ /* There appears to be some undocumented reset register.... */
+ __raw_writel(0, base + 0x04);
+ wmb();
+ __raw_writel(0, base + creg);
+ wmb();
+ } else {
+ __raw_writel(alchemy_usb_pmdata[0], base + creg);
+ wmb();
+ }
+}
+
+static void au1200_usb_pm(int susp)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1200_USB_OTG_PHYS_ADDR);
+ if (susp) {
+ /* save OTG_CAP/MUX registers which indicate port routing */
+ /* FIXME: write an OTG driver to do that */
+ alchemy_usb_pmdata[0] = __raw_readl(base + 0x00);
+ alchemy_usb_pmdata[1] = __raw_readl(base + 0x04);
+ } else {
+ /* restore access to all MMIO areas */
+ au1200_usb_init();
+
+ /* restore OTG_CAP/MUX registers */
+ __raw_writel(alchemy_usb_pmdata[0], base + 0x00);
+ __raw_writel(alchemy_usb_pmdata[1], base + 0x04);
+ wmb();
+ }
+}
+
+static void au1300_usb_pm(int susp)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR);
+ /* remember Port2 routing */
+ if (susp) {
+ alchemy_usb_pmdata[0] = __raw_readl(base + USB_DWC_CTRL4);
+ } else {
+ au1300_usb_init();
+ __raw_writel(alchemy_usb_pmdata[0], base + USB_DWC_CTRL4);
+ wmb();
+ }
+}
+
+static void alchemy_usb_pm(int susp)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ au1000_usb_pm(AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG, susp);
+ break;
+ case ALCHEMY_CPU_AU1550:
+ au1000_usb_pm(AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG, susp);
+ break;
+ case ALCHEMY_CPU_AU1200:
+ au1200_usb_pm(susp);
+ break;
+ case ALCHEMY_CPU_AU1300:
+ au1300_usb_pm(susp);
+ break;
+ }
+}
+
+static int alchemy_usb_suspend(void)
+{
+ alchemy_usb_pm(1);
+ return 0;
+}
+
+static void alchemy_usb_resume(void)
+{
+ alchemy_usb_pm(0);
+}
+
+static struct syscore_ops alchemy_usb_pm_ops = {
+ .suspend = alchemy_usb_suspend,
+ .resume = alchemy_usb_resume,
+};
+
+static int __init alchemy_usb_init(void)
+{
+ int ret = 0;
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ ret = au1000_usb_init(AU1000_USB_OHCI_PHYS_ADDR,
+ AU1000_OHCICFG);
+ break;
+ case ALCHEMY_CPU_AU1550:
+ ret = au1000_usb_init(AU1550_USB_OHCI_PHYS_ADDR,
+ AU1550_OHCICFG);
+ break;
+ case ALCHEMY_CPU_AU1200:
+ au1200_usb_init();
+ break;
+ case ALCHEMY_CPU_AU1300:
+ au1300_usb_init();
+ break;
+ }
+
+ if (!ret)
+ register_syscore_ops(&alchemy_usb_pm_ops);
+
+ return ret;
+}
+arch_initcall(alchemy_usb_init);
diff --git a/arch/mips/alchemy/common/vss.c b/arch/mips/alchemy/common/vss.c
new file mode 100644
index 000000000..3d0d468d9
--- /dev/null
+++ b/arch/mips/alchemy/common/vss.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Au1300 media block power gating (VSS)
+ *
+ * This is a stop-gap solution until I have the clock framework integration
+ * ready. This stuff here really must be handled transparently when clocks
+ * for various media blocks are enabled/disabled.
+ */
+
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <asm/mach-au1x00/au1000.h>
+
+#define VSS_GATE 0x00 /* gate wait timers */
+#define VSS_CLKRST 0x04 /* clock/block control */
+#define VSS_FTR 0x08 /* footers */
+
+#define VSS_ADDR(blk) (KSEG1ADDR(AU1300_VSS_PHYS_ADDR) + (blk * 0x0c))
+
+static DEFINE_SPINLOCK(au1300_vss_lock);
+
+/* enable a block as outlined in the databook */
+static inline void __enable_block(int block)
+{
+ void __iomem *base = (void __iomem *)VSS_ADDR(block);
+
+ __raw_writel(3, base + VSS_CLKRST); /* enable clock, assert reset */
+ wmb();
+
+ __raw_writel(0x01fffffe, base + VSS_GATE); /* maximum setup time */
+ wmb();
+
+ /* enable footers in sequence */
+ __raw_writel(0x01, base + VSS_FTR);
+ wmb();
+ __raw_writel(0x03, base + VSS_FTR);
+ wmb();
+ __raw_writel(0x07, base + VSS_FTR);
+ wmb();
+ __raw_writel(0x0f, base + VSS_FTR);
+ wmb();
+
+ __raw_writel(0x01ffffff, base + VSS_GATE); /* start FSM too */
+ wmb();
+
+ __raw_writel(2, base + VSS_CLKRST); /* deassert reset */
+ wmb();
+
+ __raw_writel(0x1f, base + VSS_FTR); /* enable isolation cells */
+ wmb();
+}
+
+/* disable a block as outlined in the databook */
+static inline void __disable_block(int block)
+{
+ void __iomem *base = (void __iomem *)VSS_ADDR(block);
+
+ __raw_writel(0x0f, base + VSS_FTR); /* disable isolation cells */
+ wmb();
+ __raw_writel(0, base + VSS_GATE); /* disable FSM */
+ wmb();
+ __raw_writel(3, base + VSS_CLKRST); /* assert reset */
+ wmb();
+ __raw_writel(1, base + VSS_CLKRST); /* disable clock */
+ wmb();
+ __raw_writel(0, base + VSS_FTR); /* disable all footers */
+ wmb();
+}
+
+void au1300_vss_block_control(int block, int enable)
+{
+ unsigned long flags;
+
+ if (alchemy_get_cputype() != ALCHEMY_CPU_AU1300)
+ return;
+
+ /* only one block at a time */
+ spin_lock_irqsave(&au1300_vss_lock, flags);
+ if (enable)
+ __enable_block(block);
+ else
+ __disable_block(block);
+ spin_unlock_irqrestore(&au1300_vss_lock, flags);
+}
+EXPORT_SYMBOL_GPL(au1300_vss_block_control);
diff --git a/arch/mips/alchemy/devboards/Makefile b/arch/mips/alchemy/devboards/Makefile
new file mode 100644
index 000000000..10a52283f
--- /dev/null
+++ b/arch/mips/alchemy/devboards/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Alchemy Develboards
+#
+
+obj-y += bcsr.o platform.o db1000.o db1200.o db1300.o db1550.o db1xxx.o
+obj-$(CONFIG_PM) += pm.o
diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c
new file mode 100644
index 000000000..8df0ccdc9
--- /dev/null
+++ b/arch/mips/alchemy/devboards/bcsr.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * bcsr.h -- Db1xxx/Pb1xxx Devboard CPLD registers ("BCSR") abstraction.
+ *
+ * All Alchemy development boards (except, of course, the weird PB1000)
+ * have a few registers in a CPLD with standardised layout; they mostly
+ * only differ in base address.
+ * All registers are 16bits wide with 32bit spacing.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/mach-db1x00/bcsr.h>
+
+static struct bcsr_reg {
+ void __iomem *raddr;
+ spinlock_t lock;
+} bcsr_regs[BCSR_CNT];
+
+static void __iomem *bcsr_virt; /* KSEG1 addr of BCSR base */
+static int bcsr_csc_base; /* linux-irq of first cascaded irq */
+
+void __init bcsr_init(unsigned long bcsr1_phys, unsigned long bcsr2_phys)
+{
+ int i;
+
+ bcsr1_phys = KSEG1ADDR(CPHYSADDR(bcsr1_phys));
+ bcsr2_phys = KSEG1ADDR(CPHYSADDR(bcsr2_phys));
+
+ bcsr_virt = (void __iomem *)bcsr1_phys;
+
+ for (i = 0; i < BCSR_CNT; i++) {
+ if (i >= BCSR_HEXLEDS)
+ bcsr_regs[i].raddr = (void __iomem *)bcsr2_phys +
+ (0x04 * (i - BCSR_HEXLEDS));
+ else
+ bcsr_regs[i].raddr = (void __iomem *)bcsr1_phys +
+ (0x04 * i);
+
+ spin_lock_init(&bcsr_regs[i].lock);
+ }
+}
+
+unsigned short bcsr_read(enum bcsr_id reg)
+{
+ unsigned short r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
+ r = __raw_readw(bcsr_regs[reg].raddr);
+ spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
+ return r;
+}
+EXPORT_SYMBOL_GPL(bcsr_read);
+
+void bcsr_write(enum bcsr_id reg, unsigned short val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
+ __raw_writew(val, bcsr_regs[reg].raddr);
+ wmb();
+ spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
+}
+EXPORT_SYMBOL_GPL(bcsr_write);
+
+void bcsr_mod(enum bcsr_id reg, unsigned short clr, unsigned short set)
+{
+ unsigned short r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
+ r = __raw_readw(bcsr_regs[reg].raddr);
+ r &= ~clr;
+ r |= set;
+ __raw_writew(r, bcsr_regs[reg].raddr);
+ wmb();
+ spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
+}
+EXPORT_SYMBOL_GPL(bcsr_mod);
+
+/*
+ * DB1200/PB1200 CPLD IRQ muxer
+ */
+static void bcsr_csc_handler(struct irq_desc *d)
+{
+ unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
+ struct irq_chip *chip = irq_desc_get_chip(d);
+
+ chained_irq_enter(chip, d);
+ generic_handle_irq(bcsr_csc_base + __ffs(bisr));
+ chained_irq_exit(chip, d);
+}
+
+static void bcsr_irq_mask(struct irq_data *d)
+{
+ unsigned short v = 1 << (d->irq - bcsr_csc_base);
+ __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
+ wmb();
+}
+
+static void bcsr_irq_maskack(struct irq_data *d)
+{
+ unsigned short v = 1 << (d->irq - bcsr_csc_base);
+ __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
+ __raw_writew(v, bcsr_virt + BCSR_REG_INTSTAT); /* ack */
+ wmb();
+}
+
+static void bcsr_irq_unmask(struct irq_data *d)
+{
+ unsigned short v = 1 << (d->irq - bcsr_csc_base);
+ __raw_writew(v, bcsr_virt + BCSR_REG_MASKSET);
+ wmb();
+}
+
+static struct irq_chip bcsr_irq_type = {
+ .name = "CPLD",
+ .irq_mask = bcsr_irq_mask,
+ .irq_mask_ack = bcsr_irq_maskack,
+ .irq_unmask = bcsr_irq_unmask,
+};
+
+void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq)
+{
+ unsigned int irq;
+
+ /* mask & enable & ack all */
+ __raw_writew(0xffff, bcsr_virt + BCSR_REG_MASKCLR);
+ __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSET);
+ __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSTAT);
+ wmb();
+
+ bcsr_csc_base = csc_start;
+
+ for (irq = csc_start; irq <= csc_end; irq++)
+ irq_set_chip_and_handler_name(irq, &bcsr_irq_type,
+ handle_level_irq, "level");
+
+ irq_set_chained_handler(hook_irq, bcsr_csc_handler);
+}
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
new file mode 100644
index 000000000..3183df60a
--- /dev/null
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DBAu1000/1500/1100 PBAu1100/1500 board support
+ *
+ * Copyright 2000, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/leds.h>
+#include <linux/mmc/host.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_gpio.h>
+#include <linux/spi/ads7846.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
+#include <asm/mach-au1x00/au1000_dma.h>
+#include <asm/mach-au1x00/au1100_mmc.h>
+#include <asm/mach-db1x00/bcsr.h>
+#include <asm/reboot.h>
+#include <prom.h>
+#include "platform.h"
+
+#define F_SWAPPED (bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1000_SWAPBOOT)
+
+const char *get_system_type(void);
+
+int __init db1000_board_setup(void)
+{
+ /* initialize board register space */
+ bcsr_init(DB1000_BCSR_PHYS_ADDR,
+ DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS);
+
+ switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
+ case BCSR_WHOAMI_DB1000:
+ case BCSR_WHOAMI_DB1500:
+ case BCSR_WHOAMI_DB1100:
+ case BCSR_WHOAMI_PB1500:
+ case BCSR_WHOAMI_PB1500R2:
+ case BCSR_WHOAMI_PB1100:
+ pr_info("AMD Alchemy %s Board\n", get_system_type());
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static int db1500_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin)
+{
+ if ((slot < 12) || (slot > 13) || pin == 0)
+ return -1;
+ if (slot == 12)
+ return (pin == 1) ? AU1500_PCI_INTA : 0xff;
+ if (slot == 13) {
+ switch (pin) {
+ case 1: return AU1500_PCI_INTA;
+ case 2: return AU1500_PCI_INTB;
+ case 3: return AU1500_PCI_INTC;
+ case 4: return AU1500_PCI_INTD;
+ }
+ }
+ return -1;
+}
+
+static u64 au1xxx_all_dmamask = DMA_BIT_MASK(32);
+
+static struct resource alchemy_pci_host_res[] = {
+ [0] = {
+ .start = AU1500_PCI_PHYS_ADDR,
+ .end = AU1500_PCI_PHYS_ADDR + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct alchemy_pci_platdata db1500_pci_pd = {
+ .board_map_irq = db1500_map_pci_irq,
+};
+
+static struct platform_device db1500_pci_host_dev = {
+ .dev.platform_data = &db1500_pci_pd,
+ .name = "alchemy-pci",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(alchemy_pci_host_res),
+ .resource = alchemy_pci_host_res,
+};
+
+int __init db1500_pci_setup(void)
+{
+ return platform_device_register(&db1500_pci_host_dev);
+}
+
+static struct resource au1100_lcd_resources[] = {
+ [0] = {
+ .start = AU1100_LCD_PHYS_ADDR,
+ .end = AU1100_LCD_PHYS_ADDR + 0x800 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1100_LCD_INT,
+ .end = AU1100_LCD_INT,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device au1100_lcd_device = {
+ .name = "au1100-lcd",
+ .id = 0,
+ .dev = {
+ .dma_mask = &au1xxx_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = ARRAY_SIZE(au1100_lcd_resources),
+ .resource = au1100_lcd_resources,
+};
+
+static struct resource alchemy_ac97c_res[] = {
+ [0] = {
+ .start = AU1000_AC97_PHYS_ADDR,
+ .end = AU1000_AC97_PHYS_ADDR + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMA_ID_AC97C_TX,
+ .end = DMA_ID_AC97C_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMA_ID_AC97C_RX,
+ .end = DMA_ID_AC97C_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct platform_device alchemy_ac97c_dev = {
+ .name = "alchemy-ac97c",
+ .id = -1,
+ .resource = alchemy_ac97c_res,
+ .num_resources = ARRAY_SIZE(alchemy_ac97c_res),
+};
+
+static struct platform_device alchemy_ac97c_dma_dev = {
+ .name = "alchemy-pcm-dma",
+ .id = 0,
+};
+
+static struct platform_device db1x00_codec_dev = {
+ .name = "ac97-codec",
+ .id = -1,
+};
+
+static struct platform_device db1x00_audio_dev = {
+ .name = "db1000-audio",
+ .dev = {
+ .dma_mask = &au1xxx_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+/******************************************************************************/
+
+#ifdef CONFIG_MMC_AU1X
+static irqreturn_t db1100_mmc_cd(int irq, void *ptr)
+{
+ mmc_detect_change(ptr, msecs_to_jiffies(500));
+ return IRQ_HANDLED;
+}
+
+static int db1100_mmc_cd_setup(void *mmc_host, int en)
+{
+ int ret = 0, irq;
+
+ if (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)) == BCSR_WHOAMI_DB1100)
+ irq = AU1100_GPIO19_INT;
+ else
+ irq = AU1100_GPIO14_INT; /* PB1100 SD0 CD# */
+
+ if (en) {
+ irq_set_irq_type(irq, IRQ_TYPE_EDGE_BOTH);
+ ret = request_irq(irq, db1100_mmc_cd, 0,
+ "sd0_cd", mmc_host);
+ } else
+ free_irq(irq, mmc_host);
+ return ret;
+}
+
+static int db1100_mmc1_cd_setup(void *mmc_host, int en)
+{
+ int ret = 0, irq;
+
+ if (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)) == BCSR_WHOAMI_DB1100)
+ irq = AU1100_GPIO20_INT;
+ else
+ irq = AU1100_GPIO15_INT; /* PB1100 SD1 CD# */
+
+ if (en) {
+ irq_set_irq_type(irq, IRQ_TYPE_EDGE_BOTH);
+ ret = request_irq(irq, db1100_mmc_cd, 0,
+ "sd1_cd", mmc_host);
+ } else
+ free_irq(irq, mmc_host);
+ return ret;
+}
+
+static int db1100_mmc_card_readonly(void *mmc_host)
+{
+ /* testing suggests that this bit is inverted */
+ return (bcsr_read(BCSR_STATUS) & BCSR_STATUS_SD0WP) ? 0 : 1;
+}
+
+static int db1100_mmc_card_inserted(void *mmc_host)
+{
+ return !alchemy_gpio_get_value(19);
+}
+
+static void db1100_mmc_set_power(void *mmc_host, int state)
+{
+ int bit;
+
+ if (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)) == BCSR_WHOAMI_DB1100)
+ bit = BCSR_BOARD_SD0PWR;
+ else
+ bit = BCSR_BOARD_PB1100_SD0PWR;
+
+ if (state) {
+ bcsr_mod(BCSR_BOARD, 0, bit);
+ msleep(400); /* stabilization time */
+ } else
+ bcsr_mod(BCSR_BOARD, bit, 0);
+}
+
+static void db1100_mmcled_set(struct led_classdev *led, enum led_brightness b)
+{
+ if (b != LED_OFF)
+ bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED0, 0);
+ else
+ bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED0);
+}
+
+static struct led_classdev db1100_mmc_led = {
+ .brightness_set = db1100_mmcled_set,
+};
+
+static int db1100_mmc1_card_readonly(void *mmc_host)
+{
+ return (bcsr_read(BCSR_BOARD) & BCSR_BOARD_SD1WP) ? 1 : 0;
+}
+
+static int db1100_mmc1_card_inserted(void *mmc_host)
+{
+ return !alchemy_gpio_get_value(20);
+}
+
+static void db1100_mmc1_set_power(void *mmc_host, int state)
+{
+ int bit;
+
+ if (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)) == BCSR_WHOAMI_DB1100)
+ bit = BCSR_BOARD_SD1PWR;
+ else
+ bit = BCSR_BOARD_PB1100_SD1PWR;
+
+ if (state) {
+ bcsr_mod(BCSR_BOARD, 0, bit);
+ msleep(400); /* stabilization time */
+ } else
+ bcsr_mod(BCSR_BOARD, bit, 0);
+}
+
+static void db1100_mmc1led_set(struct led_classdev *led, enum led_brightness b)
+{
+ if (b != LED_OFF)
+ bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED1, 0);
+ else
+ bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED1);
+}
+
+static struct led_classdev db1100_mmc1_led = {
+ .brightness_set = db1100_mmc1led_set,
+};
+
+static struct au1xmmc_platform_data db1100_mmc_platdata[2] = {
+ [0] = {
+ .cd_setup = db1100_mmc_cd_setup,
+ .set_power = db1100_mmc_set_power,
+ .card_inserted = db1100_mmc_card_inserted,
+ .card_readonly = db1100_mmc_card_readonly,
+ .led = &db1100_mmc_led,
+ },
+ [1] = {
+ .cd_setup = db1100_mmc1_cd_setup,
+ .set_power = db1100_mmc1_set_power,
+ .card_inserted = db1100_mmc1_card_inserted,
+ .card_readonly = db1100_mmc1_card_readonly,
+ .led = &db1100_mmc1_led,
+ },
+};
+
+static struct resource au1100_mmc0_resources[] = {
+ [0] = {
+ .start = AU1100_SD0_PHYS_ADDR,
+ .end = AU1100_SD0_PHYS_ADDR + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1100_SD_INT,
+ .end = AU1100_SD_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = DMA_ID_SD0_TX,
+ .end = DMA_ID_SD0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = DMA_ID_SD0_RX,
+ .end = DMA_ID_SD0_RX,
+ .flags = IORESOURCE_DMA,
+ }
+};
+
+static struct platform_device db1100_mmc0_dev = {
+ .name = "au1xxx-mmc",
+ .id = 0,
+ .dev = {
+ .dma_mask = &au1xxx_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &db1100_mmc_platdata[0],
+ },
+ .num_resources = ARRAY_SIZE(au1100_mmc0_resources),
+ .resource = au1100_mmc0_resources,
+};
+
+static struct resource au1100_mmc1_res[] = {
+ [0] = {
+ .start = AU1100_SD1_PHYS_ADDR,
+ .end = AU1100_SD1_PHYS_ADDR + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1100_SD_INT,
+ .end = AU1100_SD_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = DMA_ID_SD1_TX,
+ .end = DMA_ID_SD1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = DMA_ID_SD1_RX,
+ .end = DMA_ID_SD1_RX,
+ .flags = IORESOURCE_DMA,
+ }
+};
+
+static struct platform_device db1100_mmc1_dev = {
+ .name = "au1xxx-mmc",
+ .id = 1,
+ .dev = {
+ .dma_mask = &au1xxx_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &db1100_mmc_platdata[1],
+ },
+ .num_resources = ARRAY_SIZE(au1100_mmc1_res),
+ .resource = au1100_mmc1_res,
+};
+#endif /* CONFIG_MMC_AU1X */
+
+/******************************************************************************/
+
+static struct ads7846_platform_data db1100_touch_pd = {
+ .model = 7846,
+ .vref_mv = 3300,
+ .gpio_pendown = 21,
+};
+
+static struct spi_gpio_platform_data db1100_spictl_pd = {
+ .num_chipselect = 1,
+};
+
+static struct spi_board_info db1100_spi_info[] __initdata = {
+ [0] = {
+ .modalias = "ads7846",
+ .max_speed_hz = 3250000,
+ .bus_num = 0,
+ .chip_select = 0,
+ .mode = 0,
+ .irq = AU1100_GPIO21_INT,
+ .platform_data = &db1100_touch_pd,
+ },
+};
+
+static struct platform_device db1100_spi_dev = {
+ .name = "spi_gpio",
+ .id = 0,
+ .dev = {
+ .platform_data = &db1100_spictl_pd,
+ .dma_mask = &au1xxx_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+/*
+ * Alchemy GPIO 2 has its base at 200 so the GPIO lines
+ * 207 thru 210 are GPIOs at offset 7 thru 10 at this chip.
+ */
+static struct gpiod_lookup_table db1100_spi_gpiod_table = {
+ .dev_id = "spi_gpio",
+ .table = {
+ GPIO_LOOKUP("alchemy-gpio2", 9,
+ "sck", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("alchemy-gpio2", 8,
+ "mosi", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("alchemy-gpio2", 7,
+ "miso", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("alchemy-gpio2", 10,
+ "cs", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
+static struct platform_device *db1x00_devs[] = {
+ &db1x00_codec_dev,
+ &alchemy_ac97c_dma_dev,
+ &alchemy_ac97c_dev,
+ &db1x00_audio_dev,
+};
+
+static struct platform_device *db1100_devs[] = {
+ &au1100_lcd_device,
+#ifdef CONFIG_MMC_AU1X
+ &db1100_mmc0_dev,
+ &db1100_mmc1_dev,
+#endif
+};
+
+int __init db1000_dev_setup(void)
+{
+ int board = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
+ int c0, c1, d0, d1, s0, s1, flashsize = 32, twosocks = 1;
+ unsigned long pfc;
+ struct clk *c, *p;
+
+ if (board == BCSR_WHOAMI_DB1500) {
+ c0 = AU1500_GPIO2_INT;
+ c1 = AU1500_GPIO5_INT;
+ d0 = 0; /* GPIO number, NOT irq! */
+ d1 = 3; /* GPIO number, NOT irq! */
+ s0 = AU1500_GPIO1_INT;
+ s1 = AU1500_GPIO4_INT;
+ } else if (board == BCSR_WHOAMI_DB1100) {
+ c0 = AU1100_GPIO2_INT;
+ c1 = AU1100_GPIO5_INT;
+ d0 = 0; /* GPIO number, NOT irq! */
+ d1 = 3; /* GPIO number, NOT irq! */
+ s0 = AU1100_GPIO1_INT;
+ s1 = AU1100_GPIO4_INT;
+
+ gpio_request(19, "sd0_cd");
+ gpio_request(20, "sd1_cd");
+ gpio_direction_input(19); /* sd0 cd# */
+ gpio_direction_input(20); /* sd1 cd# */
+
+ /* spi_gpio on SSI0 pins */
+ pfc = alchemy_rdsys(AU1000_SYS_PINFUNC);
+ pfc |= (1 << 0); /* SSI0 pins as GPIOs */
+ alchemy_wrsys(pfc, AU1000_SYS_PINFUNC);
+
+ spi_register_board_info(db1100_spi_info,
+ ARRAY_SIZE(db1100_spi_info));
+
+ /* link LCD clock to AUXPLL */
+ p = clk_get(NULL, "auxpll_clk");
+ c = clk_get(NULL, "lcd_intclk");
+ if (!IS_ERR(c) && !IS_ERR(p)) {
+ clk_set_parent(c, p);
+ clk_set_rate(c, clk_get_rate(p));
+ }
+ if (!IS_ERR(c))
+ clk_put(c);
+ if (!IS_ERR(p))
+ clk_put(p);
+
+ platform_add_devices(db1100_devs, ARRAY_SIZE(db1100_devs));
+ gpiod_add_lookup_table(&db1100_spi_gpiod_table);
+ platform_device_register(&db1100_spi_dev);
+ } else if (board == BCSR_WHOAMI_DB1000) {
+ c0 = AU1000_GPIO2_INT;
+ c1 = AU1000_GPIO5_INT;
+ d0 = 0; /* GPIO number, NOT irq! */
+ d1 = 3; /* GPIO number, NOT irq! */
+ s0 = AU1000_GPIO1_INT;
+ s1 = AU1000_GPIO4_INT;
+ } else if ((board == BCSR_WHOAMI_PB1500) ||
+ (board == BCSR_WHOAMI_PB1500R2)) {
+ c0 = AU1500_GPIO203_INT;
+ d0 = 1; /* GPIO number, NOT irq! */
+ s0 = AU1500_GPIO202_INT;
+ twosocks = 0;
+ flashsize = 64;
+ /* RTC and daughtercard irqs */
+ irq_set_irq_type(AU1500_GPIO204_INT, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(AU1500_GPIO205_INT, IRQ_TYPE_LEVEL_LOW);
+ /* EPSON S1D13806 0x1b000000
+ * SRAM 1MB/2MB 0x1a000000
+ * DS1693 RTC 0x0c000000
+ */
+ } else if (board == BCSR_WHOAMI_PB1100) {
+ c0 = AU1100_GPIO11_INT;
+ d0 = 9; /* GPIO number, NOT irq! */
+ s0 = AU1100_GPIO10_INT;
+ twosocks = 0;
+ flashsize = 64;
+ /* pendown, rtc, daughtercard irqs */
+ irq_set_irq_type(AU1100_GPIO8_INT, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(AU1100_GPIO12_INT, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(AU1100_GPIO13_INT, IRQ_TYPE_LEVEL_LOW);
+ /* EPSON S1D13806 0x1b000000
+ * SRAM 1MB/2MB 0x1a000000
+ * DiskOnChip 0x0d000000
+ * DS1693 RTC 0x0c000000
+ */
+ platform_add_devices(db1100_devs, ARRAY_SIZE(db1100_devs));
+ } else
+ return 0; /* unknown board, no further dev setup to do */
+
+ irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW);
+
+ db1x_register_pcmcia_socket(
+ AU1000_PCMCIA_ATTR_PHYS_ADDR,
+ AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1,
+ AU1000_PCMCIA_MEM_PHYS_ADDR,
+ AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
+ AU1000_PCMCIA_IO_PHYS_ADDR,
+ AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
+ c0, d0, /*s0*/0, 0, 0);
+
+ if (twosocks) {
+ irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW);
+
+ db1x_register_pcmcia_socket(
+ AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004000000,
+ AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004400000 - 1,
+ AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004000000,
+ AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1,
+ AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000,
+ AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1,
+ c1, d1, /*s1*/0, 0, 1);
+ }
+
+ platform_add_devices(db1x00_devs, ARRAY_SIZE(db1x00_devs));
+ db1x_register_norflash(flashsize << 20, 4 /* 32bit */, F_SWAPPED);
+ return 0;
+}
diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
new file mode 100644
index 000000000..9ad26215b
--- /dev/null
+++ b/arch/mips/alchemy/devboards/db1200.c
@@ -0,0 +1,943 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DBAu1200/PBAu1200 board platform device registration
+ *
+ * Copyright (C) 2008-2011 Manuel Lauss
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/mmc/host.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/platnand.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/smc91x.h>
+#include <linux/ata_platform.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1100_mmc.h>
+#include <asm/mach-au1x00/au1xxx_dbdma.h>
+#include <asm/mach-au1x00/au1xxx_psc.h>
+#include <asm/mach-au1x00/au1200fb.h>
+#include <asm/mach-au1x00/au1550_spi.h>
+#include <asm/mach-db1x00/bcsr.h>
+
+#include "platform.h"
+
+#define BCSR_INT_IDE 0x0001
+#define BCSR_INT_ETH 0x0002
+#define BCSR_INT_PC0 0x0004
+#define BCSR_INT_PC0STSCHG 0x0008
+#define BCSR_INT_PC1 0x0010
+#define BCSR_INT_PC1STSCHG 0x0020
+#define BCSR_INT_DC 0x0040
+#define BCSR_INT_FLASHBUSY 0x0080
+#define BCSR_INT_PC0INSERT 0x0100
+#define BCSR_INT_PC0EJECT 0x0200
+#define BCSR_INT_PC1INSERT 0x0400
+#define BCSR_INT_PC1EJECT 0x0800
+#define BCSR_INT_SD0INSERT 0x1000
+#define BCSR_INT_SD0EJECT 0x2000
+#define BCSR_INT_SD1INSERT 0x4000
+#define BCSR_INT_SD1EJECT 0x8000
+
+#define DB1200_IDE_PHYS_ADDR 0x18800000
+#define DB1200_IDE_REG_SHIFT 5
+#define DB1200_IDE_PHYS_LEN (16 << DB1200_IDE_REG_SHIFT)
+#define DB1200_ETH_PHYS_ADDR 0x19000300
+#define DB1200_NAND_PHYS_ADDR 0x20000000
+
+#define PB1200_IDE_PHYS_ADDR 0x0C800000
+#define PB1200_ETH_PHYS_ADDR 0x0D000300
+#define PB1200_NAND_PHYS_ADDR 0x1C000000
+
+#define DB1200_INT_BEGIN (AU1000_MAX_INTR + 1)
+#define DB1200_IDE_INT (DB1200_INT_BEGIN + 0)
+#define DB1200_ETH_INT (DB1200_INT_BEGIN + 1)
+#define DB1200_PC0_INT (DB1200_INT_BEGIN + 2)
+#define DB1200_PC0_STSCHG_INT (DB1200_INT_BEGIN + 3)
+#define DB1200_PC1_INT (DB1200_INT_BEGIN + 4)
+#define DB1200_PC1_STSCHG_INT (DB1200_INT_BEGIN + 5)
+#define DB1200_DC_INT (DB1200_INT_BEGIN + 6)
+#define DB1200_FLASHBUSY_INT (DB1200_INT_BEGIN + 7)
+#define DB1200_PC0_INSERT_INT (DB1200_INT_BEGIN + 8)
+#define DB1200_PC0_EJECT_INT (DB1200_INT_BEGIN + 9)
+#define DB1200_PC1_INSERT_INT (DB1200_INT_BEGIN + 10)
+#define DB1200_PC1_EJECT_INT (DB1200_INT_BEGIN + 11)
+#define DB1200_SD0_INSERT_INT (DB1200_INT_BEGIN + 12)
+#define DB1200_SD0_EJECT_INT (DB1200_INT_BEGIN + 13)
+#define PB1200_SD1_INSERT_INT (DB1200_INT_BEGIN + 14)
+#define PB1200_SD1_EJECT_INT (DB1200_INT_BEGIN + 15)
+#define DB1200_INT_END (DB1200_INT_BEGIN + 15)
+
+const char *get_system_type(void);
+
+static int __init db1200_detect_board(void)
+{
+ int bid;
+
+ /* try the DB1200 first */
+ bcsr_init(DB1200_BCSR_PHYS_ADDR,
+ DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS);
+ if (BCSR_WHOAMI_DB1200 == BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
+ unsigned short t = bcsr_read(BCSR_HEXLEDS);
+ bcsr_write(BCSR_HEXLEDS, ~t);
+ if (bcsr_read(BCSR_HEXLEDS) != t) {
+ bcsr_write(BCSR_HEXLEDS, t);
+ return 0;
+ }
+ }
+
+ /* okay, try the PB1200 then */
+ bcsr_init(PB1200_BCSR_PHYS_ADDR,
+ PB1200_BCSR_PHYS_ADDR + PB1200_BCSR_HEXLED_OFS);
+ bid = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
+ if ((bid == BCSR_WHOAMI_PB1200_DDR1) ||
+ (bid == BCSR_WHOAMI_PB1200_DDR2)) {
+ unsigned short t = bcsr_read(BCSR_HEXLEDS);
+ bcsr_write(BCSR_HEXLEDS, ~t);
+ if (bcsr_read(BCSR_HEXLEDS) != t) {
+ bcsr_write(BCSR_HEXLEDS, t);
+ return 0;
+ }
+ }
+
+ return 1; /* it's neither */
+}
+
+int __init db1200_board_setup(void)
+{
+ unsigned short whoami;
+
+ if (db1200_detect_board())
+ return -ENODEV;
+
+ whoami = bcsr_read(BCSR_WHOAMI);
+ switch (BCSR_WHOAMI_BOARD(whoami)) {
+ case BCSR_WHOAMI_PB1200_DDR1:
+ case BCSR_WHOAMI_PB1200_DDR2:
+ case BCSR_WHOAMI_DB1200:
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO "Alchemy/AMD/RMI %s Board, CPLD Rev %d"
+ " Board-ID %d Daughtercard ID %d\n", get_system_type(),
+ (whoami >> 4) & 0xf, (whoami >> 8) & 0xf, whoami & 0xf);
+
+ return 0;
+}
+
+/******************************************************************************/
+
+static u64 au1200_all_dmamask = DMA_BIT_MASK(32);
+
+static struct mtd_partition db1200_spiflash_parts[] = {
+ {
+ .name = "spi_flash",
+ .offset = 0,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+static struct flash_platform_data db1200_spiflash_data = {
+ .name = "s25fl001",
+ .parts = db1200_spiflash_parts,
+ .nr_parts = ARRAY_SIZE(db1200_spiflash_parts),
+ .type = "m25p10",
+};
+
+static struct spi_board_info db1200_spi_devs[] __initdata = {
+ {
+ /* TI TMP121AIDBVR temp sensor */
+ .modalias = "tmp121",
+ .max_speed_hz = 2000000,
+ .bus_num = 0,
+ .chip_select = 0,
+ .mode = 0,
+ },
+ {
+ /* Spansion S25FL001D0FMA SPI flash */
+ .modalias = "m25p80",
+ .max_speed_hz = 50000000,
+ .bus_num = 0,
+ .chip_select = 1,
+ .mode = 0,
+ .platform_data = &db1200_spiflash_data,
+ },
+};
+
+static struct i2c_board_info db1200_i2c_devs[] __initdata = {
+ { I2C_BOARD_INFO("24c04", 0x52), }, /* AT24C04-10 I2C eeprom */
+ { I2C_BOARD_INFO("ne1619", 0x2d), }, /* adm1025-compat hwmon */
+ { I2C_BOARD_INFO("wm8731", 0x1b), }, /* I2S audio codec WM8731 */
+};
+
+/**********************************************************************/
+
+static void au1200_nand_cmd_ctrl(struct nand_chip *this, int cmd,
+ unsigned int ctrl)
+{
+ unsigned long ioaddr = (unsigned long)this->legacy.IO_ADDR_W;
+
+ ioaddr &= 0xffffff00;
+
+ if (ctrl & NAND_CLE) {
+ ioaddr += MEM_STNAND_CMD;
+ } else if (ctrl & NAND_ALE) {
+ ioaddr += MEM_STNAND_ADDR;
+ } else {
+ /* assume we want to r/w real data by default */
+ ioaddr += MEM_STNAND_DATA;
+ }
+ this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W = (void __iomem *)ioaddr;
+ if (cmd != NAND_CMD_NONE) {
+ __raw_writeb(cmd, this->legacy.IO_ADDR_W);
+ wmb();
+ }
+}
+
+static int au1200_nand_device_ready(struct nand_chip *this)
+{
+ return alchemy_rdsmem(AU1000_MEM_STSTAT) & 1;
+}
+
+static struct mtd_partition db1200_nand_parts[] = {
+ {
+ .name = "NAND FS 0",
+ .offset = 0,
+ .size = 8 * 1024 * 1024,
+ },
+ {
+ .name = "NAND FS 1",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL
+ },
+};
+
+struct platform_nand_data db1200_nand_platdata = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_offset = 0,
+ .nr_partitions = ARRAY_SIZE(db1200_nand_parts),
+ .partitions = db1200_nand_parts,
+ .chip_delay = 20,
+ },
+ .ctrl = {
+ .dev_ready = au1200_nand_device_ready,
+ .cmd_ctrl = au1200_nand_cmd_ctrl,
+ },
+};
+
+static struct resource db1200_nand_res[] = {
+ [0] = {
+ .start = DB1200_NAND_PHYS_ADDR,
+ .end = DB1200_NAND_PHYS_ADDR + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device db1200_nand_dev = {
+ .name = "gen_nand",
+ .num_resources = ARRAY_SIZE(db1200_nand_res),
+ .resource = db1200_nand_res,
+ .id = -1,
+ .dev = {
+ .platform_data = &db1200_nand_platdata,
+ }
+};
+
+/**********************************************************************/
+
+static struct smc91x_platdata db1200_eth_data = {
+ .flags = SMC91X_NOWAIT | SMC91X_USE_16BIT,
+ .leda = RPC_LED_100_10,
+ .ledb = RPC_LED_TX_RX,
+};
+
+static struct resource db1200_eth_res[] = {
+ [0] = {
+ .start = DB1200_ETH_PHYS_ADDR,
+ .end = DB1200_ETH_PHYS_ADDR + 0xf,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DB1200_ETH_INT,
+ .end = DB1200_ETH_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device db1200_eth_dev = {
+ .dev = {
+ .platform_data = &db1200_eth_data,
+ },
+ .name = "smc91x",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(db1200_eth_res),
+ .resource = db1200_eth_res,
+};
+
+/**********************************************************************/
+
+static struct pata_platform_info db1200_ide_info = {
+ .ioport_shift = DB1200_IDE_REG_SHIFT,
+};
+
+#define IDE_ALT_START (14 << DB1200_IDE_REG_SHIFT)
+static struct resource db1200_ide_res[] = {
+ [0] = {
+ .start = DB1200_IDE_PHYS_ADDR,
+ .end = DB1200_IDE_PHYS_ADDR + IDE_ALT_START - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DB1200_IDE_PHYS_ADDR + IDE_ALT_START,
+ .end = DB1200_IDE_PHYS_ADDR + DB1200_IDE_PHYS_LEN - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = DB1200_IDE_INT,
+ .end = DB1200_IDE_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device db1200_ide_dev = {
+ .name = "pata_platform",
+ .id = 0,
+ .dev = {
+ .dma_mask = &au1200_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &db1200_ide_info,
+ },
+ .num_resources = ARRAY_SIZE(db1200_ide_res),
+ .resource = db1200_ide_res,
+};
+
+/**********************************************************************/
+
+#ifdef CONFIG_MMC_AU1X
+/* SD carddetects: they're supposed to be edge-triggered, but ack
+ * doesn't seem to work (CPLD Rev 2). Instead, the screaming one
+ * is disabled and its counterpart enabled. The 200ms timeout is
+ * because the carddetect usually triggers twice, after debounce.
+ */
+static irqreturn_t db1200_mmc_cd(int irq, void *ptr)
+{
+ disable_irq_nosync(irq);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t db1200_mmc_cdfn(int irq, void *ptr)
+{
+ mmc_detect_change(ptr, msecs_to_jiffies(200));
+
+ msleep(100); /* debounce */
+ if (irq == DB1200_SD0_INSERT_INT)
+ enable_irq(DB1200_SD0_EJECT_INT);
+ else
+ enable_irq(DB1200_SD0_INSERT_INT);
+
+ return IRQ_HANDLED;
+}
+
+static int db1200_mmc_cd_setup(void *mmc_host, int en)
+{
+ int ret;
+
+ if (en) {
+ ret = request_threaded_irq(DB1200_SD0_INSERT_INT, db1200_mmc_cd,
+ db1200_mmc_cdfn, 0, "sd_insert", mmc_host);
+ if (ret)
+ goto out;
+
+ ret = request_threaded_irq(DB1200_SD0_EJECT_INT, db1200_mmc_cd,
+ db1200_mmc_cdfn, 0, "sd_eject", mmc_host);
+ if (ret) {
+ free_irq(DB1200_SD0_INSERT_INT, mmc_host);
+ goto out;
+ }
+
+ if (bcsr_read(BCSR_SIGSTAT) & BCSR_INT_SD0INSERT)
+ enable_irq(DB1200_SD0_EJECT_INT);
+ else
+ enable_irq(DB1200_SD0_INSERT_INT);
+
+ } else {
+ free_irq(DB1200_SD0_INSERT_INT, mmc_host);
+ free_irq(DB1200_SD0_EJECT_INT, mmc_host);
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+static void db1200_mmc_set_power(void *mmc_host, int state)
+{
+ if (state) {
+ bcsr_mod(BCSR_BOARD, 0, BCSR_BOARD_SD0PWR);
+ msleep(400); /* stabilization time */
+ } else
+ bcsr_mod(BCSR_BOARD, BCSR_BOARD_SD0PWR, 0);
+}
+
+static int db1200_mmc_card_readonly(void *mmc_host)
+{
+ return (bcsr_read(BCSR_STATUS) & BCSR_STATUS_SD0WP) ? 1 : 0;
+}
+
+static int db1200_mmc_card_inserted(void *mmc_host)
+{
+ return (bcsr_read(BCSR_SIGSTAT) & BCSR_INT_SD0INSERT) ? 1 : 0;
+}
+
+static void db1200_mmcled_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ if (brightness != LED_OFF)
+ bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED0, 0);
+ else
+ bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED0);
+}
+
+static struct led_classdev db1200_mmc_led = {
+ .brightness_set = db1200_mmcled_set,
+};
+
+/* -- */
+
+static irqreturn_t pb1200_mmc1_cd(int irq, void *ptr)
+{
+ disable_irq_nosync(irq);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t pb1200_mmc1_cdfn(int irq, void *ptr)
+{
+ mmc_detect_change(ptr, msecs_to_jiffies(200));
+
+ msleep(100); /* debounce */
+ if (irq == PB1200_SD1_INSERT_INT)
+ enable_irq(PB1200_SD1_EJECT_INT);
+ else
+ enable_irq(PB1200_SD1_INSERT_INT);
+
+ return IRQ_HANDLED;
+}
+
+static int pb1200_mmc1_cd_setup(void *mmc_host, int en)
+{
+ int ret;
+
+ if (en) {
+ ret = request_threaded_irq(PB1200_SD1_INSERT_INT, pb1200_mmc1_cd,
+ pb1200_mmc1_cdfn, 0, "sd1_insert", mmc_host);
+ if (ret)
+ goto out;
+
+ ret = request_threaded_irq(PB1200_SD1_EJECT_INT, pb1200_mmc1_cd,
+ pb1200_mmc1_cdfn, 0, "sd1_eject", mmc_host);
+ if (ret) {
+ free_irq(PB1200_SD1_INSERT_INT, mmc_host);
+ goto out;
+ }
+
+ if (bcsr_read(BCSR_SIGSTAT) & BCSR_INT_SD1INSERT)
+ enable_irq(PB1200_SD1_EJECT_INT);
+ else
+ enable_irq(PB1200_SD1_INSERT_INT);
+
+ } else {
+ free_irq(PB1200_SD1_INSERT_INT, mmc_host);
+ free_irq(PB1200_SD1_EJECT_INT, mmc_host);
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+static void pb1200_mmc1led_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ if (brightness != LED_OFF)
+ bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED1, 0);
+ else
+ bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED1);
+}
+
+static struct led_classdev pb1200_mmc1_led = {
+ .brightness_set = pb1200_mmc1led_set,
+};
+
+static void pb1200_mmc1_set_power(void *mmc_host, int state)
+{
+ if (state) {
+ bcsr_mod(BCSR_BOARD, 0, BCSR_BOARD_SD1PWR);
+ msleep(400); /* stabilization time */
+ } else
+ bcsr_mod(BCSR_BOARD, BCSR_BOARD_SD1PWR, 0);
+}
+
+static int pb1200_mmc1_card_readonly(void *mmc_host)
+{
+ return (bcsr_read(BCSR_STATUS) & BCSR_STATUS_SD1WP) ? 1 : 0;
+}
+
+static int pb1200_mmc1_card_inserted(void *mmc_host)
+{
+ return (bcsr_read(BCSR_SIGSTAT) & BCSR_INT_SD1INSERT) ? 1 : 0;
+}
+
+
+static struct au1xmmc_platform_data db1200_mmc_platdata[2] = {
+ [0] = {
+ .cd_setup = db1200_mmc_cd_setup,
+ .set_power = db1200_mmc_set_power,
+ .card_inserted = db1200_mmc_card_inserted,
+ .card_readonly = db1200_mmc_card_readonly,
+ .led = &db1200_mmc_led,
+ },
+ [1] = {
+ .cd_setup = pb1200_mmc1_cd_setup,
+ .set_power = pb1200_mmc1_set_power,
+ .card_inserted = pb1200_mmc1_card_inserted,
+ .card_readonly = pb1200_mmc1_card_readonly,
+ .led = &pb1200_mmc1_led,
+ },
+};
+
+static struct resource au1200_mmc0_resources[] = {
+ [0] = {
+ .start = AU1100_SD0_PHYS_ADDR,
+ .end = AU1100_SD0_PHYS_ADDR + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1200_SD_INT,
+ .end = AU1200_SD_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = AU1200_DSCR_CMD0_SDMS_TX0,
+ .end = AU1200_DSCR_CMD0_SDMS_TX0,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = AU1200_DSCR_CMD0_SDMS_RX0,
+ .end = AU1200_DSCR_CMD0_SDMS_RX0,
+ .flags = IORESOURCE_DMA,
+ }
+};
+
+static struct platform_device db1200_mmc0_dev = {
+ .name = "au1xxx-mmc",
+ .id = 0,
+ .dev = {
+ .dma_mask = &au1200_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &db1200_mmc_platdata[0],
+ },
+ .num_resources = ARRAY_SIZE(au1200_mmc0_resources),
+ .resource = au1200_mmc0_resources,
+};
+
+static struct resource au1200_mmc1_res[] = {
+ [0] = {
+ .start = AU1100_SD1_PHYS_ADDR,
+ .end = AU1100_SD1_PHYS_ADDR + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1200_SD_INT,
+ .end = AU1200_SD_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = AU1200_DSCR_CMD0_SDMS_TX1,
+ .end = AU1200_DSCR_CMD0_SDMS_TX1,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = AU1200_DSCR_CMD0_SDMS_RX1,
+ .end = AU1200_DSCR_CMD0_SDMS_RX1,
+ .flags = IORESOURCE_DMA,
+ }
+};
+
+static struct platform_device pb1200_mmc1_dev = {
+ .name = "au1xxx-mmc",
+ .id = 1,
+ .dev = {
+ .dma_mask = &au1200_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &db1200_mmc_platdata[1],
+ },
+ .num_resources = ARRAY_SIZE(au1200_mmc1_res),
+ .resource = au1200_mmc1_res,
+};
+#endif /* CONFIG_MMC_AU1X */
+
+/**********************************************************************/
+
+static int db1200fb_panel_index(void)
+{
+ return (bcsr_read(BCSR_SWITCHES) >> 8) & 0x0f;
+}
+
+static int db1200fb_panel_init(void)
+{
+ /* Apply power */
+ bcsr_mod(BCSR_BOARD, 0, BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD |
+ BCSR_BOARD_LCDBL);
+ return 0;
+}
+
+static int db1200fb_panel_shutdown(void)
+{
+ /* Remove power */
+ bcsr_mod(BCSR_BOARD, BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD |
+ BCSR_BOARD_LCDBL, 0);
+ return 0;
+}
+
+static struct au1200fb_platdata db1200fb_pd = {
+ .panel_index = db1200fb_panel_index,
+ .panel_init = db1200fb_panel_init,
+ .panel_shutdown = db1200fb_panel_shutdown,
+};
+
+static struct resource au1200_lcd_res[] = {
+ [0] = {
+ .start = AU1200_LCD_PHYS_ADDR,
+ .end = AU1200_LCD_PHYS_ADDR + 0x800 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1200_LCD_INT,
+ .end = AU1200_LCD_INT,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device au1200_lcd_dev = {
+ .name = "au1200-lcd",
+ .id = 0,
+ .dev = {
+ .dma_mask = &au1200_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &db1200fb_pd,
+ },
+ .num_resources = ARRAY_SIZE(au1200_lcd_res),
+ .resource = au1200_lcd_res,
+};
+
+/**********************************************************************/
+
+static struct resource au1200_psc0_res[] = {
+ [0] = {
+ .start = AU1550_PSC0_PHYS_ADDR,
+ .end = AU1550_PSC0_PHYS_ADDR + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1200_PSC0_INT,
+ .end = AU1200_PSC0_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = AU1200_DSCR_CMD0_PSC0_TX,
+ .end = AU1200_DSCR_CMD0_PSC0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = AU1200_DSCR_CMD0_PSC0_RX,
+ .end = AU1200_DSCR_CMD0_PSC0_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct platform_device db1200_i2c_dev = {
+ .name = "au1xpsc_smbus",
+ .id = 0, /* bus number */
+ .num_resources = ARRAY_SIZE(au1200_psc0_res),
+ .resource = au1200_psc0_res,
+};
+
+static void db1200_spi_cs_en(struct au1550_spi_info *spi, int cs, int pol)
+{
+ if (cs)
+ bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_SPISEL);
+ else
+ bcsr_mod(BCSR_RESETS, BCSR_RESETS_SPISEL, 0);
+}
+
+static struct au1550_spi_info db1200_spi_platdata = {
+ .mainclk_hz = 50000000, /* PSC0 clock */
+ .num_chipselect = 2,
+ .activate_cs = db1200_spi_cs_en,
+};
+
+static struct platform_device db1200_spi_dev = {
+ .dev = {
+ .dma_mask = &au1200_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &db1200_spi_platdata,
+ },
+ .name = "au1550-spi",
+ .id = 0, /* bus number */
+ .num_resources = ARRAY_SIZE(au1200_psc0_res),
+ .resource = au1200_psc0_res,
+};
+
+static struct resource au1200_psc1_res[] = {
+ [0] = {
+ .start = AU1550_PSC1_PHYS_ADDR,
+ .end = AU1550_PSC1_PHYS_ADDR + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1200_PSC1_INT,
+ .end = AU1200_PSC1_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = AU1200_DSCR_CMD0_PSC1_TX,
+ .end = AU1200_DSCR_CMD0_PSC1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = AU1200_DSCR_CMD0_PSC1_RX,
+ .end = AU1200_DSCR_CMD0_PSC1_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+/* AC97 or I2S device */
+static struct platform_device db1200_audio_dev = {
+ /* name assigned later based on switch setting */
+ .id = 1, /* PSC ID */
+ .num_resources = ARRAY_SIZE(au1200_psc1_res),
+ .resource = au1200_psc1_res,
+};
+
+/* DB1200 ASoC card device */
+static struct platform_device db1200_sound_dev = {
+ /* name assigned later based on switch setting */
+ .id = 1, /* PSC ID */
+ .dev = {
+ .dma_mask = &au1200_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+static struct platform_device db1200_stac_dev = {
+ .name = "ac97-codec",
+ .id = 1, /* on PSC1 */
+};
+
+static struct platform_device db1200_audiodma_dev = {
+ .name = "au1xpsc-pcm",
+ .id = 1, /* PSC ID */
+};
+
+static struct platform_device *db1200_devs[] __initdata = {
+ NULL, /* PSC0, selected by S6.8 */
+ &db1200_ide_dev,
+#ifdef CONFIG_MMC_AU1X
+ &db1200_mmc0_dev,
+#endif
+ &au1200_lcd_dev,
+ &db1200_eth_dev,
+ &db1200_nand_dev,
+ &db1200_audiodma_dev,
+ &db1200_audio_dev,
+ &db1200_stac_dev,
+ &db1200_sound_dev,
+};
+
+static struct platform_device *pb1200_devs[] __initdata = {
+#ifdef CONFIG_MMC_AU1X
+ &pb1200_mmc1_dev,
+#endif
+};
+
+/* Some peripheral base addresses differ on the PB1200 */
+static int __init pb1200_res_fixup(void)
+{
+ /* CPLD Revs earlier than 4 cause problems */
+ if (BCSR_WHOAMI_CPLD(bcsr_read(BCSR_WHOAMI)) <= 3) {
+ printk(KERN_ERR "WARNING!!!\n");
+ printk(KERN_ERR "WARNING!!!\n");
+ printk(KERN_ERR "PB1200 must be at CPLD rev 4. Please have\n");
+ printk(KERN_ERR "the board updated to latest revisions.\n");
+ printk(KERN_ERR "This software will not work reliably\n");
+ printk(KERN_ERR "on anything older than CPLD rev 4.!\n");
+ printk(KERN_ERR "WARNING!!!\n");
+ printk(KERN_ERR "WARNING!!!\n");
+ return 1;
+ }
+
+ db1200_nand_res[0].start = PB1200_NAND_PHYS_ADDR;
+ db1200_nand_res[0].end = PB1200_NAND_PHYS_ADDR + 0xff;
+ db1200_ide_res[0].start = PB1200_IDE_PHYS_ADDR;
+ db1200_ide_res[0].end = PB1200_IDE_PHYS_ADDR + DB1200_IDE_PHYS_LEN - 1;
+ db1200_eth_res[0].start = PB1200_ETH_PHYS_ADDR;
+ db1200_eth_res[0].end = PB1200_ETH_PHYS_ADDR + 0xff;
+ return 0;
+}
+
+int __init db1200_dev_setup(void)
+{
+ unsigned long pfc;
+ unsigned short sw;
+ int swapped, bid;
+ struct clk *c;
+
+ bid = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
+ if ((bid == BCSR_WHOAMI_PB1200_DDR1) ||
+ (bid == BCSR_WHOAMI_PB1200_DDR2)) {
+ if (pb1200_res_fixup())
+ return -ENODEV;
+ }
+
+ /* GPIO7 is low-level triggered CPLD cascade */
+ irq_set_irq_type(AU1200_GPIO7_INT, IRQ_TYPE_LEVEL_LOW);
+ bcsr_init_irq(DB1200_INT_BEGIN, DB1200_INT_END, AU1200_GPIO7_INT);
+
+ /* SMBus/SPI on PSC0, Audio on PSC1 */
+ pfc = alchemy_rdsys(AU1000_SYS_PINFUNC);
+ pfc &= ~(SYS_PINFUNC_P0A | SYS_PINFUNC_P0B);
+ pfc &= ~(SYS_PINFUNC_P1A | SYS_PINFUNC_P1B | SYS_PINFUNC_FS3);
+ pfc |= SYS_PINFUNC_P1C; /* SPI is configured later */
+ alchemy_wrsys(pfc, AU1000_SYS_PINFUNC);
+
+ /* get 50MHz for I2C driver on PSC0 */
+ c = clk_get(NULL, "psc0_intclk");
+ if (!IS_ERR(c)) {
+ pfc = clk_round_rate(c, 50000000);
+ if ((pfc < 1) || (abs(50000000 - pfc) > 2500000))
+ pr_warn("DB1200: cant get I2C close to 50MHz\n");
+ else
+ clk_set_rate(c, pfc);
+ clk_prepare_enable(c);
+ clk_put(c);
+ }
+
+ /* insert/eject pairs: one of both is always screaming. To avoid
+ * issues they must not be automatically enabled when initially
+ * requested.
+ */
+ irq_set_status_flags(DB1200_SD0_INSERT_INT, IRQ_NOAUTOEN);
+ irq_set_status_flags(DB1200_SD0_EJECT_INT, IRQ_NOAUTOEN);
+ irq_set_status_flags(DB1200_PC0_INSERT_INT, IRQ_NOAUTOEN);
+ irq_set_status_flags(DB1200_PC0_EJECT_INT, IRQ_NOAUTOEN);
+ irq_set_status_flags(DB1200_PC1_INSERT_INT, IRQ_NOAUTOEN);
+ irq_set_status_flags(DB1200_PC1_EJECT_INT, IRQ_NOAUTOEN);
+
+ i2c_register_board_info(0, db1200_i2c_devs,
+ ARRAY_SIZE(db1200_i2c_devs));
+ spi_register_board_info(db1200_spi_devs,
+ ARRAY_SIZE(db1200_spi_devs));
+
+ /* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI)
+ * S6.7 AC97/I2S selector (OFF=AC97 ON=I2S)
+ * or S12 on the PB1200.
+ */
+
+ /* NOTE: GPIO215 controls OTG VBUS supply. In SPI mode however
+ * this pin is claimed by PSC0 (unused though, but pinmux doesn't
+ * allow to free it without crippling the SPI interface).
+ * As a result, in SPI mode, OTG simply won't work (PSC0 uses
+ * it as an input pin which is pulled high on the boards).
+ */
+ pfc = alchemy_rdsys(AU1000_SYS_PINFUNC) & ~SYS_PINFUNC_P0A;
+
+ /* switch off OTG VBUS supply */
+ gpio_request(215, "otg-vbus");
+ gpio_direction_output(215, 1);
+
+ printk(KERN_INFO "%s device configuration:\n", get_system_type());
+
+ sw = bcsr_read(BCSR_SWITCHES);
+ if (sw & BCSR_SWITCHES_DIP_8) {
+ db1200_devs[0] = &db1200_i2c_dev;
+ bcsr_mod(BCSR_RESETS, BCSR_RESETS_PSC0MUX, 0);
+
+ pfc |= (2 << 17); /* GPIO2 block owns GPIO215 */
+
+ printk(KERN_INFO " S6.8 OFF: PSC0 mode I2C\n");
+ printk(KERN_INFO " OTG port VBUS supply available!\n");
+ } else {
+ db1200_devs[0] = &db1200_spi_dev;
+ bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_PSC0MUX);
+
+ pfc |= (1 << 17); /* PSC0 owns GPIO215 */
+
+ printk(KERN_INFO " S6.8 ON : PSC0 mode SPI\n");
+ printk(KERN_INFO " OTG port VBUS supply disabled\n");
+ }
+ alchemy_wrsys(pfc, AU1000_SYS_PINFUNC);
+
+ /* Audio: DIP7 selects I2S(0)/AC97(1), but need I2C for I2S!
+ * so: DIP7=1 || DIP8=0 => AC97, DIP7=0 && DIP8=1 => I2S
+ */
+ sw &= BCSR_SWITCHES_DIP_8 | BCSR_SWITCHES_DIP_7;
+ if (sw == BCSR_SWITCHES_DIP_8) {
+ bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_PSC1MUX);
+ db1200_audio_dev.name = "au1xpsc_i2s";
+ db1200_sound_dev.name = "db1200-i2s";
+ printk(KERN_INFO " S6.7 ON : PSC1 mode I2S\n");
+ } else {
+ bcsr_mod(BCSR_RESETS, BCSR_RESETS_PSC1MUX, 0);
+ db1200_audio_dev.name = "au1xpsc_ac97";
+ db1200_sound_dev.name = "db1200-ac97";
+ printk(KERN_INFO " S6.7 OFF: PSC1 mode AC97\n");
+ }
+
+ /* Audio PSC clock is supplied externally. (FIXME: platdata!!) */
+ __raw_writel(PSC_SEL_CLK_SERCLK,
+ (void __iomem *)KSEG1ADDR(AU1550_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET);
+ wmb();
+
+ db1x_register_pcmcia_socket(
+ AU1000_PCMCIA_ATTR_PHYS_ADDR,
+ AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1,
+ AU1000_PCMCIA_MEM_PHYS_ADDR,
+ AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
+ AU1000_PCMCIA_IO_PHYS_ADDR,
+ AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
+ DB1200_PC0_INT, DB1200_PC0_INSERT_INT,
+ /*DB1200_PC0_STSCHG_INT*/0, DB1200_PC0_EJECT_INT, 0);
+
+ db1x_register_pcmcia_socket(
+ AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004000000,
+ AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004400000 - 1,
+ AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004000000,
+ AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1,
+ AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000,
+ AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1,
+ DB1200_PC1_INT, DB1200_PC1_INSERT_INT,
+ /*DB1200_PC1_STSCHG_INT*/0, DB1200_PC1_EJECT_INT, 1);
+
+ swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1200_SWAPBOOT;
+ db1x_register_norflash(64 << 20, 2, swapped);
+
+ platform_add_devices(db1200_devs, ARRAY_SIZE(db1200_devs));
+
+ /* PB1200 is a DB1200 with a 2nd MMC and Camera connector */
+ if ((bid == BCSR_WHOAMI_PB1200_DDR1) ||
+ (bid == BCSR_WHOAMI_PB1200_DDR2))
+ platform_add_devices(pb1200_devs, ARRAY_SIZE(pb1200_devs));
+
+ return 0;
+}
diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c
new file mode 100644
index 000000000..c965d0007
--- /dev/null
+++ b/arch/mips/alchemy/devboards/db1300.c
@@ -0,0 +1,888 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DBAu1300 init and platform device setup.
+ *
+ * (c) 2009 Manuel Lauss <manuel.lauss@googlemail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/init.h>
+#include <linux/input.h> /* KEY_* codes */
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/interrupt.h>
+#include <linux/ata_platform.h>
+#include <linux/mmc/host.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/platnand.h>
+#include <linux/platform_device.h>
+#include <linux/smsc911x.h>
+#include <linux/wm97xx.h>
+
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1300.h>
+#include <asm/mach-au1x00/au1100_mmc.h>
+#include <asm/mach-au1x00/au1200fb.h>
+#include <asm/mach-au1x00/au1xxx_dbdma.h>
+#include <asm/mach-au1x00/au1xxx_psc.h>
+#include <asm/mach-db1x00/bcsr.h>
+#include <asm/mach-au1x00/prom.h>
+
+#include "platform.h"
+
+/* FPGA (external mux) interrupt sources */
+#define DB1300_FIRST_INT (ALCHEMY_GPIC_INT_LAST + 1)
+#define DB1300_IDE_INT (DB1300_FIRST_INT + 0)
+#define DB1300_ETH_INT (DB1300_FIRST_INT + 1)
+#define DB1300_CF_INT (DB1300_FIRST_INT + 2)
+#define DB1300_VIDEO_INT (DB1300_FIRST_INT + 4)
+#define DB1300_HDMI_INT (DB1300_FIRST_INT + 5)
+#define DB1300_DC_INT (DB1300_FIRST_INT + 6)
+#define DB1300_FLASH_INT (DB1300_FIRST_INT + 7)
+#define DB1300_CF_INSERT_INT (DB1300_FIRST_INT + 8)
+#define DB1300_CF_EJECT_INT (DB1300_FIRST_INT + 9)
+#define DB1300_AC97_INT (DB1300_FIRST_INT + 10)
+#define DB1300_AC97_PEN_INT (DB1300_FIRST_INT + 11)
+#define DB1300_SD1_INSERT_INT (DB1300_FIRST_INT + 12)
+#define DB1300_SD1_EJECT_INT (DB1300_FIRST_INT + 13)
+#define DB1300_OTG_VBUS_OC_INT (DB1300_FIRST_INT + 14)
+#define DB1300_HOST_VBUS_OC_INT (DB1300_FIRST_INT + 15)
+#define DB1300_LAST_INT (DB1300_FIRST_INT + 15)
+
+/* SMSC9210 CS */
+#define DB1300_ETH_PHYS_ADDR 0x19000000
+#define DB1300_ETH_PHYS_END 0x197fffff
+
+/* ATA CS */
+#define DB1300_IDE_PHYS_ADDR 0x18800000
+#define DB1300_IDE_REG_SHIFT 5
+#define DB1300_IDE_PHYS_LEN (16 << DB1300_IDE_REG_SHIFT)
+
+/* NAND CS */
+#define DB1300_NAND_PHYS_ADDR 0x20000000
+#define DB1300_NAND_PHYS_END 0x20000fff
+
+
+static struct i2c_board_info db1300_i2c_devs[] __initdata = {
+ { I2C_BOARD_INFO("wm8731", 0x1b), }, /* I2S audio codec */
+ { I2C_BOARD_INFO("ne1619", 0x2d), }, /* adm1025-compat hwmon */
+};
+
+/* multifunction pins to assign to GPIO controller */
+static int db1300_gpio_pins[] __initdata = {
+ AU1300_PIN_LCDPWM0, AU1300_PIN_PSC2SYNC1, AU1300_PIN_WAKE1,
+ AU1300_PIN_WAKE2, AU1300_PIN_WAKE3, AU1300_PIN_FG3AUX,
+ AU1300_PIN_EXTCLK1,
+ -1, /* terminator */
+};
+
+/* multifunction pins to assign to device functions */
+static int db1300_dev_pins[] __initdata = {
+ /* wake-from-str pins 0-3 */
+ AU1300_PIN_WAKE0,
+ /* external clock sources for PSC0 */
+ AU1300_PIN_EXTCLK0,
+ /* 8bit MMC interface on SD0: 6-9 */
+ AU1300_PIN_SD0DAT4, AU1300_PIN_SD0DAT5, AU1300_PIN_SD0DAT6,
+ AU1300_PIN_SD0DAT7,
+ /* UART1 pins: 11-18 */
+ AU1300_PIN_U1RI, AU1300_PIN_U1DCD, AU1300_PIN_U1DSR,
+ AU1300_PIN_U1CTS, AU1300_PIN_U1RTS, AU1300_PIN_U1DTR,
+ AU1300_PIN_U1RX, AU1300_PIN_U1TX,
+ /* UART0 pins: 19-24 */
+ AU1300_PIN_U0RI, AU1300_PIN_U0DCD, AU1300_PIN_U0DSR,
+ AU1300_PIN_U0CTS, AU1300_PIN_U0RTS, AU1300_PIN_U0DTR,
+ /* UART2: 25-26 */
+ AU1300_PIN_U2RX, AU1300_PIN_U2TX,
+ /* UART3: 27-28 */
+ AU1300_PIN_U3RX, AU1300_PIN_U3TX,
+ /* LCD controller PWMs, ext pixclock: 30-31 */
+ AU1300_PIN_LCDPWM1, AU1300_PIN_LCDCLKIN,
+ /* SD1 interface: 32-37 */
+ AU1300_PIN_SD1DAT0, AU1300_PIN_SD1DAT1, AU1300_PIN_SD1DAT2,
+ AU1300_PIN_SD1DAT3, AU1300_PIN_SD1CMD, AU1300_PIN_SD1CLK,
+ /* SD2 interface: 38-43 */
+ AU1300_PIN_SD2DAT0, AU1300_PIN_SD2DAT1, AU1300_PIN_SD2DAT2,
+ AU1300_PIN_SD2DAT3, AU1300_PIN_SD2CMD, AU1300_PIN_SD2CLK,
+ /* PSC0/1 clocks: 44-45 */
+ AU1300_PIN_PSC0CLK, AU1300_PIN_PSC1CLK,
+ /* PSCs: 46-49/50-53/54-57/58-61 */
+ AU1300_PIN_PSC0SYNC0, AU1300_PIN_PSC0SYNC1, AU1300_PIN_PSC0D0,
+ AU1300_PIN_PSC0D1,
+ AU1300_PIN_PSC1SYNC0, AU1300_PIN_PSC1SYNC1, AU1300_PIN_PSC1D0,
+ AU1300_PIN_PSC1D1,
+ AU1300_PIN_PSC2SYNC0, AU1300_PIN_PSC2D0,
+ AU1300_PIN_PSC2D1,
+ AU1300_PIN_PSC3SYNC0, AU1300_PIN_PSC3SYNC1, AU1300_PIN_PSC3D0,
+ AU1300_PIN_PSC3D1,
+ /* PCMCIA interface: 62-70 */
+ AU1300_PIN_PCE2, AU1300_PIN_PCE1, AU1300_PIN_PIOS16,
+ AU1300_PIN_PIOR, AU1300_PIN_PWE, AU1300_PIN_PWAIT,
+ AU1300_PIN_PREG, AU1300_PIN_POE, AU1300_PIN_PIOW,
+ /* camera interface H/V sync inputs: 71-72 */
+ AU1300_PIN_CIMLS, AU1300_PIN_CIMFS,
+ /* PSC2/3 clocks: 73-74 */
+ AU1300_PIN_PSC2CLK, AU1300_PIN_PSC3CLK,
+ -1, /* terminator */
+};
+
+static void __init db1300_gpio_config(void)
+{
+ int *i;
+
+ i = &db1300_dev_pins[0];
+ while (*i != -1)
+ au1300_pinfunc_to_dev(*i++);
+
+ i = &db1300_gpio_pins[0];
+ while (*i != -1)
+ au1300_gpio_direction_input(*i++);/* implies pin_to_gpio */
+
+ au1300_set_dbdma_gpio(1, AU1300_PIN_FG3AUX);
+}
+
+/**********************************************************************/
+
+static u64 au1300_all_dmamask = DMA_BIT_MASK(32);
+
+static void au1300_nand_cmd_ctrl(struct nand_chip *this, int cmd,
+ unsigned int ctrl)
+{
+ unsigned long ioaddr = (unsigned long)this->legacy.IO_ADDR_W;
+
+ ioaddr &= 0xffffff00;
+
+ if (ctrl & NAND_CLE) {
+ ioaddr += MEM_STNAND_CMD;
+ } else if (ctrl & NAND_ALE) {
+ ioaddr += MEM_STNAND_ADDR;
+ } else {
+ /* assume we want to r/w real data by default */
+ ioaddr += MEM_STNAND_DATA;
+ }
+ this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W = (void __iomem *)ioaddr;
+ if (cmd != NAND_CMD_NONE) {
+ __raw_writeb(cmd, this->legacy.IO_ADDR_W);
+ wmb();
+ }
+}
+
+static int au1300_nand_device_ready(struct nand_chip *this)
+{
+ return alchemy_rdsmem(AU1000_MEM_STSTAT) & 1;
+}
+
+static struct mtd_partition db1300_nand_parts[] = {
+ {
+ .name = "NAND FS 0",
+ .offset = 0,
+ .size = 8 * 1024 * 1024,
+ },
+ {
+ .name = "NAND FS 1",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL
+ },
+};
+
+struct platform_nand_data db1300_nand_platdata = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_offset = 0,
+ .nr_partitions = ARRAY_SIZE(db1300_nand_parts),
+ .partitions = db1300_nand_parts,
+ .chip_delay = 20,
+ },
+ .ctrl = {
+ .dev_ready = au1300_nand_device_ready,
+ .cmd_ctrl = au1300_nand_cmd_ctrl,
+ },
+};
+
+static struct resource db1300_nand_res[] = {
+ [0] = {
+ .start = DB1300_NAND_PHYS_ADDR,
+ .end = DB1300_NAND_PHYS_ADDR + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device db1300_nand_dev = {
+ .name = "gen_nand",
+ .num_resources = ARRAY_SIZE(db1300_nand_res),
+ .resource = db1300_nand_res,
+ .id = -1,
+ .dev = {
+ .platform_data = &db1300_nand_platdata,
+ }
+};
+
+/**********************************************************************/
+
+static struct resource db1300_eth_res[] = {
+ [0] = {
+ .start = DB1300_ETH_PHYS_ADDR,
+ .end = DB1300_ETH_PHYS_END,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DB1300_ETH_INT,
+ .end = DB1300_ETH_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct smsc911x_platform_config db1300_eth_config = {
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+ .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
+ .flags = SMSC911X_USE_32BIT,
+};
+
+static struct platform_device db1300_eth_dev = {
+ .name = "smsc911x",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(db1300_eth_res),
+ .resource = db1300_eth_res,
+ .dev = {
+ .platform_data = &db1300_eth_config,
+ },
+};
+
+/**********************************************************************/
+
+static struct resource au1300_psc1_res[] = {
+ [0] = {
+ .start = AU1300_PSC1_PHYS_ADDR,
+ .end = AU1300_PSC1_PHYS_ADDR + 0x0fff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1300_PSC1_INT,
+ .end = AU1300_PSC1_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = AU1300_DSCR_CMD0_PSC1_TX,
+ .end = AU1300_DSCR_CMD0_PSC1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = AU1300_DSCR_CMD0_PSC1_RX,
+ .end = AU1300_DSCR_CMD0_PSC1_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct platform_device db1300_ac97_dev = {
+ .name = "au1xpsc_ac97",
+ .id = 1, /* PSC ID. match with AC97 codec ID! */
+ .num_resources = ARRAY_SIZE(au1300_psc1_res),
+ .resource = au1300_psc1_res,
+};
+
+/**********************************************************************/
+
+static struct resource au1300_psc2_res[] = {
+ [0] = {
+ .start = AU1300_PSC2_PHYS_ADDR,
+ .end = AU1300_PSC2_PHYS_ADDR + 0x0fff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1300_PSC2_INT,
+ .end = AU1300_PSC2_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = AU1300_DSCR_CMD0_PSC2_TX,
+ .end = AU1300_DSCR_CMD0_PSC2_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = AU1300_DSCR_CMD0_PSC2_RX,
+ .end = AU1300_DSCR_CMD0_PSC2_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct platform_device db1300_i2s_dev = {
+ .name = "au1xpsc_i2s",
+ .id = 2, /* PSC ID */
+ .num_resources = ARRAY_SIZE(au1300_psc2_res),
+ .resource = au1300_psc2_res,
+};
+
+/**********************************************************************/
+
+static struct resource au1300_psc3_res[] = {
+ [0] = {
+ .start = AU1300_PSC3_PHYS_ADDR,
+ .end = AU1300_PSC3_PHYS_ADDR + 0x0fff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1300_PSC3_INT,
+ .end = AU1300_PSC3_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = AU1300_DSCR_CMD0_PSC3_TX,
+ .end = AU1300_DSCR_CMD0_PSC3_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = AU1300_DSCR_CMD0_PSC3_RX,
+ .end = AU1300_DSCR_CMD0_PSC3_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct platform_device db1300_i2c_dev = {
+ .name = "au1xpsc_smbus",
+ .id = 0, /* bus number */
+ .num_resources = ARRAY_SIZE(au1300_psc3_res),
+ .resource = au1300_psc3_res,
+};
+
+/**********************************************************************/
+
+/* proper key assignments when facing the LCD panel. For key assignments
+ * according to the schematics swap up with down and left with right.
+ * I chose to use it to emulate the arrow keys of a keyboard.
+ */
+static struct gpio_keys_button db1300_5waysw_arrowkeys[] = {
+ {
+ .code = KEY_DOWN,
+ .gpio = AU1300_PIN_LCDPWM0,
+ .type = EV_KEY,
+ .debounce_interval = 1,
+ .active_low = 1,
+ .desc = "5waysw-down",
+ },
+ {
+ .code = KEY_UP,
+ .gpio = AU1300_PIN_PSC2SYNC1,
+ .type = EV_KEY,
+ .debounce_interval = 1,
+ .active_low = 1,
+ .desc = "5waysw-up",
+ },
+ {
+ .code = KEY_RIGHT,
+ .gpio = AU1300_PIN_WAKE3,
+ .type = EV_KEY,
+ .debounce_interval = 1,
+ .active_low = 1,
+ .desc = "5waysw-right",
+ },
+ {
+ .code = KEY_LEFT,
+ .gpio = AU1300_PIN_WAKE2,
+ .type = EV_KEY,
+ .debounce_interval = 1,
+ .active_low = 1,
+ .desc = "5waysw-left",
+ },
+ {
+ .code = KEY_ENTER,
+ .gpio = AU1300_PIN_WAKE1,
+ .type = EV_KEY,
+ .debounce_interval = 1,
+ .active_low = 1,
+ .desc = "5waysw-push",
+ },
+};
+
+static struct gpio_keys_platform_data db1300_5waysw_data = {
+ .buttons = db1300_5waysw_arrowkeys,
+ .nbuttons = ARRAY_SIZE(db1300_5waysw_arrowkeys),
+ .rep = 1,
+ .name = "db1300-5wayswitch",
+};
+
+static struct platform_device db1300_5waysw_dev = {
+ .name = "gpio-keys",
+ .dev = {
+ .platform_data = &db1300_5waysw_data,
+ },
+};
+
+/**********************************************************************/
+
+static struct pata_platform_info db1300_ide_info = {
+ .ioport_shift = DB1300_IDE_REG_SHIFT,
+};
+
+#define IDE_ALT_START (14 << DB1300_IDE_REG_SHIFT)
+static struct resource db1300_ide_res[] = {
+ [0] = {
+ .start = DB1300_IDE_PHYS_ADDR,
+ .end = DB1300_IDE_PHYS_ADDR + IDE_ALT_START - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DB1300_IDE_PHYS_ADDR + IDE_ALT_START,
+ .end = DB1300_IDE_PHYS_ADDR + DB1300_IDE_PHYS_LEN - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = DB1300_IDE_INT,
+ .end = DB1300_IDE_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device db1300_ide_dev = {
+ .dev = {
+ .dma_mask = &au1300_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &db1300_ide_info,
+ },
+ .name = "pata_platform",
+ .resource = db1300_ide_res,
+ .num_resources = ARRAY_SIZE(db1300_ide_res),
+};
+
+/**********************************************************************/
+
+#ifdef CONFIG_MMC_AU1X
+static irqreturn_t db1300_mmc_cd(int irq, void *ptr)
+{
+ disable_irq_nosync(irq);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t db1300_mmc_cdfn(int irq, void *ptr)
+{
+ mmc_detect_change(ptr, msecs_to_jiffies(200));
+
+ msleep(100); /* debounce */
+ if (irq == DB1300_SD1_INSERT_INT)
+ enable_irq(DB1300_SD1_EJECT_INT);
+ else
+ enable_irq(DB1300_SD1_INSERT_INT);
+
+ return IRQ_HANDLED;
+}
+
+static int db1300_mmc_card_readonly(void *mmc_host)
+{
+ /* it uses SD1 interface, but the DB1200's SD0 bit in the CPLD */
+ return bcsr_read(BCSR_STATUS) & BCSR_STATUS_SD0WP;
+}
+
+static int db1300_mmc_card_inserted(void *mmc_host)
+{
+ return bcsr_read(BCSR_SIGSTAT) & (1 << 12); /* insertion irq signal */
+}
+
+static int db1300_mmc_cd_setup(void *mmc_host, int en)
+{
+ int ret;
+
+ if (en) {
+ ret = request_threaded_irq(DB1300_SD1_INSERT_INT, db1300_mmc_cd,
+ db1300_mmc_cdfn, 0, "sd_insert", mmc_host);
+ if (ret)
+ goto out;
+
+ ret = request_threaded_irq(DB1300_SD1_EJECT_INT, db1300_mmc_cd,
+ db1300_mmc_cdfn, 0, "sd_eject", mmc_host);
+ if (ret) {
+ free_irq(DB1300_SD1_INSERT_INT, mmc_host);
+ goto out;
+ }
+
+ if (db1300_mmc_card_inserted(mmc_host))
+ enable_irq(DB1300_SD1_EJECT_INT);
+ else
+ enable_irq(DB1300_SD1_INSERT_INT);
+
+ } else {
+ free_irq(DB1300_SD1_INSERT_INT, mmc_host);
+ free_irq(DB1300_SD1_EJECT_INT, mmc_host);
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+static void db1300_mmcled_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ if (brightness != LED_OFF)
+ bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED0, 0);
+ else
+ bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED0);
+}
+
+static struct led_classdev db1300_mmc_led = {
+ .brightness_set = db1300_mmcled_set,
+};
+
+struct au1xmmc_platform_data db1300_sd1_platdata = {
+ .cd_setup = db1300_mmc_cd_setup,
+ .card_inserted = db1300_mmc_card_inserted,
+ .card_readonly = db1300_mmc_card_readonly,
+ .led = &db1300_mmc_led,
+};
+
+static struct resource au1300_sd1_res[] = {
+ [0] = {
+ .start = AU1300_SD1_PHYS_ADDR,
+ .end = AU1300_SD1_PHYS_ADDR,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1300_SD1_INT,
+ .end = AU1300_SD1_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = AU1300_DSCR_CMD0_SDMS_TX1,
+ .end = AU1300_DSCR_CMD0_SDMS_TX1,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = AU1300_DSCR_CMD0_SDMS_RX1,
+ .end = AU1300_DSCR_CMD0_SDMS_RX1,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct platform_device db1300_sd1_dev = {
+ .dev = {
+ .dma_mask = &au1300_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &db1300_sd1_platdata,
+ },
+ .name = "au1xxx-mmc",
+ .id = 1,
+ .resource = au1300_sd1_res,
+ .num_resources = ARRAY_SIZE(au1300_sd1_res),
+};
+
+/**********************************************************************/
+
+static int db1300_movinand_inserted(void *mmc_host)
+{
+ return 0; /* disable for now, it doesn't work yet */
+}
+
+static int db1300_movinand_readonly(void *mmc_host)
+{
+ return 0;
+}
+
+static void db1300_movinand_led_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ if (brightness != LED_OFF)
+ bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED1, 0);
+ else
+ bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED1);
+}
+
+static struct led_classdev db1300_movinand_led = {
+ .brightness_set = db1300_movinand_led_set,
+};
+
+struct au1xmmc_platform_data db1300_sd0_platdata = {
+ .card_inserted = db1300_movinand_inserted,
+ .card_readonly = db1300_movinand_readonly,
+ .led = &db1300_movinand_led,
+ .mask_host_caps = MMC_CAP_NEEDS_POLL,
+};
+
+static struct resource au1300_sd0_res[] = {
+ [0] = {
+ .start = AU1100_SD0_PHYS_ADDR,
+ .end = AU1100_SD0_PHYS_ADDR,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1300_SD0_INT,
+ .end = AU1300_SD0_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = AU1300_DSCR_CMD0_SDMS_TX0,
+ .end = AU1300_DSCR_CMD0_SDMS_TX0,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = AU1300_DSCR_CMD0_SDMS_RX0,
+ .end = AU1300_DSCR_CMD0_SDMS_RX0,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct platform_device db1300_sd0_dev = {
+ .dev = {
+ .dma_mask = &au1300_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &db1300_sd0_platdata,
+ },
+ .name = "au1xxx-mmc",
+ .id = 0,
+ .resource = au1300_sd0_res,
+ .num_resources = ARRAY_SIZE(au1300_sd0_res),
+};
+#endif /* CONFIG_MMC_AU1X */
+
+/**********************************************************************/
+
+static struct platform_device db1300_wm9715_dev = {
+ .name = "wm9712-codec",
+ .id = 1, /* ID of PSC for AC97 audio, see asoc glue! */
+};
+
+static struct platform_device db1300_ac97dma_dev = {
+ .name = "au1xpsc-pcm",
+ .id = 1, /* PSC ID */
+};
+
+static struct platform_device db1300_i2sdma_dev = {
+ .name = "au1xpsc-pcm",
+ .id = 2, /* PSC ID */
+};
+
+static struct platform_device db1300_sndac97_dev = {
+ .name = "db1300-ac97",
+ .dev = {
+ .dma_mask = &au1300_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+static struct platform_device db1300_sndi2s_dev = {
+ .name = "db1300-i2s",
+ .dev = {
+ .dma_mask = &au1300_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+/**********************************************************************/
+
+static int db1300fb_panel_index(void)
+{
+ return 9; /* DB1300_800x480 */
+}
+
+static int db1300fb_panel_init(void)
+{
+ /* Apply power (Vee/Vdd logic is inverted on Panel DB1300_800x480) */
+ bcsr_mod(BCSR_BOARD, BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD,
+ BCSR_BOARD_LCDBL);
+ return 0;
+}
+
+static int db1300fb_panel_shutdown(void)
+{
+ /* Remove power (Vee/Vdd logic is inverted on Panel DB1300_800x480) */
+ bcsr_mod(BCSR_BOARD, BCSR_BOARD_LCDBL,
+ BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD);
+ return 0;
+}
+
+static struct au1200fb_platdata db1300fb_pd = {
+ .panel_index = db1300fb_panel_index,
+ .panel_init = db1300fb_panel_init,
+ .panel_shutdown = db1300fb_panel_shutdown,
+};
+
+static struct resource au1300_lcd_res[] = {
+ [0] = {
+ .start = AU1200_LCD_PHYS_ADDR,
+ .end = AU1200_LCD_PHYS_ADDR + 0x800 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1300_LCD_INT,
+ .end = AU1300_LCD_INT,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+
+static struct platform_device db1300_lcd_dev = {
+ .name = "au1200-lcd",
+ .id = 0,
+ .dev = {
+ .dma_mask = &au1300_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &db1300fb_pd,
+ },
+ .num_resources = ARRAY_SIZE(au1300_lcd_res),
+ .resource = au1300_lcd_res,
+};
+
+/**********************************************************************/
+
+#if IS_ENABLED(CONFIG_TOUCHSCREEN_WM97XX)
+static void db1300_wm97xx_irqen(struct wm97xx *wm, int enable)
+{
+ if (enable)
+ enable_irq(DB1300_AC97_PEN_INT);
+ else
+ disable_irq_nosync(DB1300_AC97_PEN_INT);
+}
+
+static struct wm97xx_mach_ops db1300_wm97xx_ops = {
+ .irq_enable = db1300_wm97xx_irqen,
+ .irq_gpio = WM97XX_GPIO_3,
+};
+
+static int db1300_wm97xx_probe(struct platform_device *pdev)
+{
+ struct wm97xx *wm = platform_get_drvdata(pdev);
+
+ /* external pendown indicator */
+ wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN,
+ WM97XX_GPIO_POL_LOW, WM97XX_GPIO_STICKY,
+ WM97XX_GPIO_WAKE);
+
+ /* internal "virtual" pendown gpio */
+ wm97xx_config_gpio(wm, WM97XX_GPIO_3, WM97XX_GPIO_OUT,
+ WM97XX_GPIO_POL_LOW, WM97XX_GPIO_NOTSTICKY,
+ WM97XX_GPIO_NOWAKE);
+
+ wm->pen_irq = DB1300_AC97_PEN_INT;
+
+ return wm97xx_register_mach_ops(wm, &db1300_wm97xx_ops);
+}
+#else
+static int db1300_wm97xx_probe(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+#endif
+
+static struct platform_driver db1300_wm97xx_driver = {
+ .driver.name = "wm97xx-touch",
+ .driver.owner = THIS_MODULE,
+ .probe = db1300_wm97xx_probe,
+};
+
+/**********************************************************************/
+
+static struct platform_device *db1300_dev[] __initdata = {
+ &db1300_eth_dev,
+ &db1300_i2c_dev,
+ &db1300_5waysw_dev,
+ &db1300_nand_dev,
+ &db1300_ide_dev,
+#ifdef CONFIG_MMC_AU1X
+ &db1300_sd0_dev,
+ &db1300_sd1_dev,
+#endif
+ &db1300_lcd_dev,
+ &db1300_ac97_dev,
+ &db1300_i2s_dev,
+ &db1300_wm9715_dev,
+ &db1300_ac97dma_dev,
+ &db1300_i2sdma_dev,
+ &db1300_sndac97_dev,
+ &db1300_sndi2s_dev,
+};
+
+int __init db1300_dev_setup(void)
+{
+ int swapped, cpldirq;
+ struct clk *c;
+
+ /* setup CPLD IRQ muxer */
+ cpldirq = au1300_gpio_to_irq(AU1300_PIN_EXTCLK1);
+ irq_set_irq_type(cpldirq, IRQ_TYPE_LEVEL_HIGH);
+ bcsr_init_irq(DB1300_FIRST_INT, DB1300_LAST_INT, cpldirq);
+
+ /* insert/eject IRQs: one always triggers so don't enable them
+ * when doing request_irq() on them. DB1200 has this bug too.
+ */
+ irq_set_status_flags(DB1300_SD1_INSERT_INT, IRQ_NOAUTOEN);
+ irq_set_status_flags(DB1300_SD1_EJECT_INT, IRQ_NOAUTOEN);
+ irq_set_status_flags(DB1300_CF_INSERT_INT, IRQ_NOAUTOEN);
+ irq_set_status_flags(DB1300_CF_EJECT_INT, IRQ_NOAUTOEN);
+
+ /*
+ * setup board
+ */
+ prom_get_ethernet_addr(&db1300_eth_config.mac[0]);
+
+ i2c_register_board_info(0, db1300_i2c_devs,
+ ARRAY_SIZE(db1300_i2c_devs));
+
+ if (platform_driver_register(&db1300_wm97xx_driver))
+ pr_warn("DB1300: failed to init touch pen irq support!\n");
+
+ /* Audio PSC clock is supplied by codecs (PSC1, 2) */
+ __raw_writel(PSC_SEL_CLK_SERCLK,
+ (void __iomem *)KSEG1ADDR(AU1300_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET);
+ wmb();
+ __raw_writel(PSC_SEL_CLK_SERCLK,
+ (void __iomem *)KSEG1ADDR(AU1300_PSC2_PHYS_ADDR) + PSC_SEL_OFFSET);
+ wmb();
+ /* I2C driver wants 50MHz, get as close as possible */
+ c = clk_get(NULL, "psc3_intclk");
+ if (!IS_ERR(c)) {
+ clk_set_rate(c, 50000000);
+ clk_prepare_enable(c);
+ clk_put(c);
+ }
+ __raw_writel(PSC_SEL_CLK_INTCLK,
+ (void __iomem *)KSEG1ADDR(AU1300_PSC3_PHYS_ADDR) + PSC_SEL_OFFSET);
+ wmb();
+
+ /* enable power to USB ports */
+ bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_USBHPWR | BCSR_RESETS_OTGPWR);
+
+ /* although it is socket #0, it uses the CPLD bits which previous boards
+ * have used for socket #1.
+ */
+ db1x_register_pcmcia_socket(
+ AU1000_PCMCIA_ATTR_PHYS_ADDR,
+ AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x00400000 - 1,
+ AU1000_PCMCIA_MEM_PHYS_ADDR,
+ AU1000_PCMCIA_MEM_PHYS_ADDR + 0x00400000 - 1,
+ AU1000_PCMCIA_IO_PHYS_ADDR,
+ AU1000_PCMCIA_IO_PHYS_ADDR + 0x00010000 - 1,
+ DB1300_CF_INT, DB1300_CF_INSERT_INT, 0, DB1300_CF_EJECT_INT, 1);
+
+ swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1200_SWAPBOOT;
+ db1x_register_norflash(64 << 20, 2, swapped);
+
+ return platform_add_devices(db1300_dev, ARRAY_SIZE(db1300_dev));
+}
+
+
+int __init db1300_board_setup(void)
+{
+ unsigned short whoami;
+
+ bcsr_init(DB1300_BCSR_PHYS_ADDR,
+ DB1300_BCSR_PHYS_ADDR + DB1300_BCSR_HEXLED_OFS);
+
+ whoami = bcsr_read(BCSR_WHOAMI);
+ if (BCSR_WHOAMI_BOARD(whoami) != BCSR_WHOAMI_DB1300)
+ return -ENODEV;
+
+ db1300_gpio_config();
+
+ printk(KERN_INFO "NetLogic DBAu1300 Development Platform.\n\t"
+ "BoardID %d CPLD Rev %d DaughtercardID %d\n",
+ BCSR_WHOAMI_BOARD(whoami), BCSR_WHOAMI_CPLD(whoami),
+ BCSR_WHOAMI_DCID(whoami));
+
+ /* enable UARTs, YAMON only enables #2 */
+ alchemy_uart_enable(AU1300_UART0_PHYS_ADDR);
+ alchemy_uart_enable(AU1300_UART1_PHYS_ADDR);
+ alchemy_uart_enable(AU1300_UART3_PHYS_ADDR);
+
+ return 0;
+}
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
new file mode 100644
index 000000000..06811a5db
--- /dev/null
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Alchemy Db1550/Pb1550 board support
+ *
+ * (c) 2011 Manuel Lauss <manuel.lauss@googlemail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/platnand.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <asm/bootinfo.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
+#include <asm/mach-au1x00/au1xxx_eth.h>
+#include <asm/mach-au1x00/au1xxx_dbdma.h>
+#include <asm/mach-au1x00/au1xxx_psc.h>
+#include <asm/mach-au1x00/au1550_spi.h>
+#include <asm/mach-au1x00/au1550nd.h>
+#include <asm/mach-db1x00/bcsr.h>
+#include <prom.h>
+#include "platform.h"
+
+static void __init db1550_hw_setup(void)
+{
+ void __iomem *base;
+ unsigned long v;
+
+ /* complete pin setup: assign GPIO16 to PSC0_SYNC1 (SPI cs# line)
+ * as well as PSC1_SYNC for AC97 on PB1550.
+ */
+ v = alchemy_rdsys(AU1000_SYS_PINFUNC);
+ alchemy_wrsys(v | 1 | SYS_PF_PSC1_S1, AU1000_SYS_PINFUNC);
+
+ /* reset the AC97 codec now, the reset time in the psc-ac97 driver
+ * is apparently too short although it's ridiculous as it is.
+ */
+ base = (void __iomem *)KSEG1ADDR(AU1550_PSC1_PHYS_ADDR);
+ __raw_writel(PSC_SEL_CLK_SERCLK | PSC_SEL_PS_AC97MODE,
+ base + PSC_SEL_OFFSET);
+ __raw_writel(PSC_CTRL_DISABLE, base + PSC_CTRL_OFFSET);
+ wmb();
+ __raw_writel(PSC_AC97RST_RST, base + PSC_AC97RST_OFFSET);
+ wmb();
+}
+
+int __init db1550_board_setup(void)
+{
+ unsigned short whoami;
+
+ bcsr_init(DB1550_BCSR_PHYS_ADDR,
+ DB1550_BCSR_PHYS_ADDR + DB1550_BCSR_HEXLED_OFS);
+
+ whoami = bcsr_read(BCSR_WHOAMI); /* PB1550 hexled offset differs */
+ switch (BCSR_WHOAMI_BOARD(whoami)) {
+ case BCSR_WHOAMI_PB1550_SDR:
+ case BCSR_WHOAMI_PB1550_DDR:
+ bcsr_init(PB1550_BCSR_PHYS_ADDR,
+ PB1550_BCSR_PHYS_ADDR + PB1550_BCSR_HEXLED_OFS);
+ case BCSR_WHOAMI_DB1550:
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ pr_info("Alchemy/AMD %s Board, CPLD Rev %d Board-ID %d " \
+ "Daughtercard ID %d\n", get_system_type(),
+ (whoami >> 4) & 0xf, (whoami >> 8) & 0xf, whoami & 0xf);
+
+ db1550_hw_setup();
+ return 0;
+}
+
+/*****************************************************************************/
+
+static u64 au1550_all_dmamask = DMA_BIT_MASK(32);
+
+static struct mtd_partition db1550_spiflash_parts[] = {
+ {
+ .name = "spi_flash",
+ .offset = 0,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+static struct flash_platform_data db1550_spiflash_data = {
+ .name = "s25fl010",
+ .parts = db1550_spiflash_parts,
+ .nr_parts = ARRAY_SIZE(db1550_spiflash_parts),
+ .type = "m25p10",
+};
+
+static struct spi_board_info db1550_spi_devs[] __initdata = {
+ {
+ /* TI TMP121AIDBVR temp sensor */
+ .modalias = "tmp121",
+ .max_speed_hz = 2400000,
+ .bus_num = 0,
+ .chip_select = 0,
+ .mode = SPI_MODE_0,
+ },
+ {
+ /* Spansion S25FL001D0FMA SPI flash */
+ .modalias = "m25p80",
+ .max_speed_hz = 2400000,
+ .bus_num = 0,
+ .chip_select = 1,
+ .mode = SPI_MODE_0,
+ .platform_data = &db1550_spiflash_data,
+ },
+};
+
+static struct i2c_board_info db1550_i2c_devs[] __initdata = {
+ { I2C_BOARD_INFO("24c04", 0x52),}, /* AT24C04-10 I2C eeprom */
+ { I2C_BOARD_INFO("ne1619", 0x2d),}, /* adm1025-compat hwmon */
+ { I2C_BOARD_INFO("wm8731", 0x1b),}, /* I2S audio codec WM8731 */
+};
+
+/**********************************************************************/
+
+static void au1550_nand_cmd_ctrl(struct nand_chip *this, int cmd,
+ unsigned int ctrl)
+{
+ unsigned long ioaddr = (unsigned long)this->legacy.IO_ADDR_W;
+
+ ioaddr &= 0xffffff00;
+
+ if (ctrl & NAND_CLE) {
+ ioaddr += MEM_STNAND_CMD;
+ } else if (ctrl & NAND_ALE) {
+ ioaddr += MEM_STNAND_ADDR;
+ } else {
+ /* assume we want to r/w real data by default */
+ ioaddr += MEM_STNAND_DATA;
+ }
+ this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W = (void __iomem *)ioaddr;
+ if (cmd != NAND_CMD_NONE) {
+ __raw_writeb(cmd, this->legacy.IO_ADDR_W);
+ wmb();
+ }
+}
+
+static int au1550_nand_device_ready(struct nand_chip *this)
+{
+ return alchemy_rdsmem(AU1000_MEM_STSTAT) & 1;
+}
+
+static struct mtd_partition db1550_nand_parts[] = {
+ {
+ .name = "NAND FS 0",
+ .offset = 0,
+ .size = 8 * 1024 * 1024,
+ },
+ {
+ .name = "NAND FS 1",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL
+ },
+};
+
+struct platform_nand_data db1550_nand_platdata = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_offset = 0,
+ .nr_partitions = ARRAY_SIZE(db1550_nand_parts),
+ .partitions = db1550_nand_parts,
+ .chip_delay = 20,
+ },
+ .ctrl = {
+ .dev_ready = au1550_nand_device_ready,
+ .cmd_ctrl = au1550_nand_cmd_ctrl,
+ },
+};
+
+static struct resource db1550_nand_res[] = {
+ [0] = {
+ .start = 0x20000000,
+ .end = 0x200000ff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device db1550_nand_dev = {
+ .name = "gen_nand",
+ .num_resources = ARRAY_SIZE(db1550_nand_res),
+ .resource = db1550_nand_res,
+ .id = -1,
+ .dev = {
+ .platform_data = &db1550_nand_platdata,
+ }
+};
+
+static struct au1550nd_platdata pb1550_nand_pd = {
+ .parts = db1550_nand_parts,
+ .num_parts = ARRAY_SIZE(db1550_nand_parts),
+ .devwidth = 0, /* x8 NAND default, needs fixing up */
+};
+
+static struct platform_device pb1550_nand_dev = {
+ .name = "au1550-nand",
+ .id = -1,
+ .resource = db1550_nand_res,
+ .num_resources = ARRAY_SIZE(db1550_nand_res),
+ .dev = {
+ .platform_data = &pb1550_nand_pd,
+ },
+};
+
+static void __init pb1550_nand_setup(void)
+{
+ int boot_swapboot = (alchemy_rdsmem(AU1000_MEM_STSTAT) & (0x7 << 1)) |
+ ((bcsr_read(BCSR_STATUS) >> 6) & 0x1);
+
+ gpio_direction_input(206); /* de-assert NAND CS# */
+ switch (boot_swapboot) {
+ case 0: case 2: case 8: case 0xC: case 0xD:
+ /* x16 NAND Flash */
+ pb1550_nand_pd.devwidth = 1;
+ fallthrough;
+ case 1: case 3: case 9: case 0xE: case 0xF:
+ /* x8 NAND, already set up */
+ platform_device_register(&pb1550_nand_dev);
+ }
+}
+
+/**********************************************************************/
+
+static struct resource au1550_psc0_res[] = {
+ [0] = {
+ .start = AU1550_PSC0_PHYS_ADDR,
+ .end = AU1550_PSC0_PHYS_ADDR + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1550_PSC0_INT,
+ .end = AU1550_PSC0_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = AU1550_DSCR_CMD0_PSC0_TX,
+ .end = AU1550_DSCR_CMD0_PSC0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = AU1550_DSCR_CMD0_PSC0_RX,
+ .end = AU1550_DSCR_CMD0_PSC0_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static void db1550_spi_cs_en(struct au1550_spi_info *spi, int cs, int pol)
+{
+ if (cs)
+ bcsr_mod(BCSR_BOARD, 0, BCSR_BOARD_SPISEL);
+ else
+ bcsr_mod(BCSR_BOARD, BCSR_BOARD_SPISEL, 0);
+}
+
+static struct au1550_spi_info db1550_spi_platdata = {
+ .mainclk_hz = 48000000, /* PSC0 clock: max. 2.4MHz SPI clk */
+ .num_chipselect = 2,
+ .activate_cs = db1550_spi_cs_en,
+};
+
+
+static struct platform_device db1550_spi_dev = {
+ .dev = {
+ .dma_mask = &au1550_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &db1550_spi_platdata,
+ },
+ .name = "au1550-spi",
+ .id = 0, /* bus number */
+ .num_resources = ARRAY_SIZE(au1550_psc0_res),
+ .resource = au1550_psc0_res,
+};
+
+/**********************************************************************/
+
+static struct resource au1550_psc1_res[] = {
+ [0] = {
+ .start = AU1550_PSC1_PHYS_ADDR,
+ .end = AU1550_PSC1_PHYS_ADDR + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1550_PSC1_INT,
+ .end = AU1550_PSC1_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = AU1550_DSCR_CMD0_PSC1_TX,
+ .end = AU1550_DSCR_CMD0_PSC1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = AU1550_DSCR_CMD0_PSC1_RX,
+ .end = AU1550_DSCR_CMD0_PSC1_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct platform_device db1550_ac97_dev = {
+ .name = "au1xpsc_ac97",
+ .id = 1, /* PSC ID */
+ .num_resources = ARRAY_SIZE(au1550_psc1_res),
+ .resource = au1550_psc1_res,
+};
+
+
+static struct resource au1550_psc2_res[] = {
+ [0] = {
+ .start = AU1550_PSC2_PHYS_ADDR,
+ .end = AU1550_PSC2_PHYS_ADDR + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1550_PSC2_INT,
+ .end = AU1550_PSC2_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = AU1550_DSCR_CMD0_PSC2_TX,
+ .end = AU1550_DSCR_CMD0_PSC2_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = AU1550_DSCR_CMD0_PSC2_RX,
+ .end = AU1550_DSCR_CMD0_PSC2_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct platform_device db1550_i2c_dev = {
+ .name = "au1xpsc_smbus",
+ .id = 0, /* bus number */
+ .num_resources = ARRAY_SIZE(au1550_psc2_res),
+ .resource = au1550_psc2_res,
+};
+
+/**********************************************************************/
+
+static struct resource au1550_psc3_res[] = {
+ [0] = {
+ .start = AU1550_PSC3_PHYS_ADDR,
+ .end = AU1550_PSC3_PHYS_ADDR + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1550_PSC3_INT,
+ .end = AU1550_PSC3_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = AU1550_DSCR_CMD0_PSC3_TX,
+ .end = AU1550_DSCR_CMD0_PSC3_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = AU1550_DSCR_CMD0_PSC3_RX,
+ .end = AU1550_DSCR_CMD0_PSC3_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct platform_device db1550_i2s_dev = {
+ .name = "au1xpsc_i2s",
+ .id = 3, /* PSC ID */
+ .num_resources = ARRAY_SIZE(au1550_psc3_res),
+ .resource = au1550_psc3_res,
+};
+
+/**********************************************************************/
+
+static struct platform_device db1550_stac_dev = {
+ .name = "ac97-codec",
+ .id = 1, /* on PSC1 */
+};
+
+static struct platform_device db1550_ac97dma_dev = {
+ .name = "au1xpsc-pcm",
+ .id = 1, /* on PSC3 */
+};
+
+static struct platform_device db1550_i2sdma_dev = {
+ .name = "au1xpsc-pcm",
+ .id = 3, /* on PSC3 */
+};
+
+static struct platform_device db1550_sndac97_dev = {
+ .name = "db1550-ac97",
+ .dev = {
+ .dma_mask = &au1550_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+static struct platform_device db1550_sndi2s_dev = {
+ .name = "db1550-i2s",
+ .dev = {
+ .dma_mask = &au1550_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+/**********************************************************************/
+
+static int db1550_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin)
+{
+ if ((slot < 11) || (slot > 13) || pin == 0)
+ return -1;
+ if (slot == 11)
+ return (pin == 1) ? AU1550_PCI_INTC : 0xff;
+ if (slot == 12) {
+ switch (pin) {
+ case 1: return AU1550_PCI_INTB;
+ case 2: return AU1550_PCI_INTC;
+ case 3: return AU1550_PCI_INTD;
+ case 4: return AU1550_PCI_INTA;
+ }
+ }
+ if (slot == 13) {
+ switch (pin) {
+ case 1: return AU1550_PCI_INTA;
+ case 2: return AU1550_PCI_INTB;
+ case 3: return AU1550_PCI_INTC;
+ case 4: return AU1550_PCI_INTD;
+ }
+ }
+ return -1;
+}
+
+static int pb1550_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin)
+{
+ if ((slot < 12) || (slot > 13) || pin == 0)
+ return -1;
+ if (slot == 12) {
+ switch (pin) {
+ case 1: return AU1500_PCI_INTB;
+ case 2: return AU1500_PCI_INTC;
+ case 3: return AU1500_PCI_INTD;
+ case 4: return AU1500_PCI_INTA;
+ }
+ }
+ if (slot == 13) {
+ switch (pin) {
+ case 1: return AU1500_PCI_INTA;
+ case 2: return AU1500_PCI_INTB;
+ case 3: return AU1500_PCI_INTC;
+ case 4: return AU1500_PCI_INTD;
+ }
+ }
+ return -1;
+}
+
+static struct resource alchemy_pci_host_res[] = {
+ [0] = {
+ .start = AU1500_PCI_PHYS_ADDR,
+ .end = AU1500_PCI_PHYS_ADDR + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct alchemy_pci_platdata db1550_pci_pd = {
+ .board_map_irq = db1550_map_pci_irq,
+};
+
+static struct platform_device db1550_pci_host_dev = {
+ .dev.platform_data = &db1550_pci_pd,
+ .name = "alchemy-pci",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(alchemy_pci_host_res),
+ .resource = alchemy_pci_host_res,
+};
+
+/**********************************************************************/
+
+static struct platform_device *db1550_devs[] __initdata = {
+ &db1550_i2c_dev,
+ &db1550_ac97_dev,
+ &db1550_spi_dev,
+ &db1550_i2s_dev,
+ &db1550_stac_dev,
+ &db1550_ac97dma_dev,
+ &db1550_i2sdma_dev,
+ &db1550_sndac97_dev,
+ &db1550_sndi2s_dev,
+};
+
+/* must be arch_initcall; MIPS PCI scans busses in a subsys_initcall */
+int __init db1550_pci_setup(int id)
+{
+ if (id)
+ db1550_pci_pd.board_map_irq = pb1550_map_pci_irq;
+ return platform_device_register(&db1550_pci_host_dev);
+}
+
+static void __init db1550_devices(void)
+{
+ alchemy_gpio_direction_output(203, 0); /* red led on */
+
+ irq_set_irq_type(AU1550_GPIO0_INT, IRQ_TYPE_EDGE_BOTH); /* CD0# */
+ irq_set_irq_type(AU1550_GPIO1_INT, IRQ_TYPE_EDGE_BOTH); /* CD1# */
+ irq_set_irq_type(AU1550_GPIO3_INT, IRQ_TYPE_LEVEL_LOW); /* CARD0# */
+ irq_set_irq_type(AU1550_GPIO5_INT, IRQ_TYPE_LEVEL_LOW); /* CARD1# */
+ irq_set_irq_type(AU1550_GPIO21_INT, IRQ_TYPE_LEVEL_LOW); /* STSCHG0# */
+ irq_set_irq_type(AU1550_GPIO22_INT, IRQ_TYPE_LEVEL_LOW); /* STSCHG1# */
+
+ db1x_register_pcmcia_socket(
+ AU1000_PCMCIA_ATTR_PHYS_ADDR,
+ AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1,
+ AU1000_PCMCIA_MEM_PHYS_ADDR,
+ AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
+ AU1000_PCMCIA_IO_PHYS_ADDR,
+ AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
+ AU1550_GPIO3_INT, 0,
+ /*AU1550_GPIO21_INT*/0, 0, 0);
+
+ db1x_register_pcmcia_socket(
+ AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004000000,
+ AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004400000 - 1,
+ AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004000000,
+ AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1,
+ AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000,
+ AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1,
+ AU1550_GPIO5_INT, 1,
+ /*AU1550_GPIO22_INT*/0, 0, 1);
+
+ platform_device_register(&db1550_nand_dev);
+
+ alchemy_gpio_direction_output(202, 0); /* green led on */
+}
+
+static void __init pb1550_devices(void)
+{
+ irq_set_irq_type(AU1550_GPIO0_INT, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(AU1550_GPIO1_INT, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(AU1550_GPIO201_205_INT, IRQ_TYPE_LEVEL_HIGH);
+
+ /* enable both PCMCIA card irqs in the shared line */
+ alchemy_gpio2_enable_int(201); /* socket 0 card irq */
+ alchemy_gpio2_enable_int(202); /* socket 1 card irq */
+
+ /* Pb1550, like all others, also has statuschange irqs; however they're
+ * wired up on one of the Au1550's shared GPIO201_205 line, which also
+ * services the PCMCIA card interrupts. So we ignore statuschange and
+ * use the GPIO201_205 exclusively for card interrupts, since a) pcmcia
+ * drivers are used to shared irqs and b) statuschange isn't really use-
+ * ful anyway.
+ */
+ db1x_register_pcmcia_socket(
+ AU1000_PCMCIA_ATTR_PHYS_ADDR,
+ AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1,
+ AU1000_PCMCIA_MEM_PHYS_ADDR,
+ AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
+ AU1000_PCMCIA_IO_PHYS_ADDR,
+ AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
+ AU1550_GPIO201_205_INT, AU1550_GPIO0_INT, 0, 0, 0);
+
+ db1x_register_pcmcia_socket(
+ AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x008000000,
+ AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x008400000 - 1,
+ AU1000_PCMCIA_MEM_PHYS_ADDR + 0x008000000,
+ AU1000_PCMCIA_MEM_PHYS_ADDR + 0x008400000 - 1,
+ AU1000_PCMCIA_IO_PHYS_ADDR + 0x008000000,
+ AU1000_PCMCIA_IO_PHYS_ADDR + 0x008010000 - 1,
+ AU1550_GPIO201_205_INT, AU1550_GPIO1_INT, 0, 0, 1);
+
+ pb1550_nand_setup();
+}
+
+int __init db1550_dev_setup(void)
+{
+ int swapped, id;
+ struct clk *c;
+
+ id = (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)) != BCSR_WHOAMI_DB1550);
+
+ i2c_register_board_info(0, db1550_i2c_devs,
+ ARRAY_SIZE(db1550_i2c_devs));
+ spi_register_board_info(db1550_spi_devs,
+ ARRAY_SIZE(db1550_spi_devs));
+
+ c = clk_get(NULL, "psc0_intclk");
+ if (!IS_ERR(c)) {
+ clk_set_rate(c, 50000000);
+ clk_prepare_enable(c);
+ clk_put(c);
+ }
+ c = clk_get(NULL, "psc2_intclk");
+ if (!IS_ERR(c)) {
+ clk_set_rate(c, db1550_spi_platdata.mainclk_hz);
+ clk_prepare_enable(c);
+ clk_put(c);
+ }
+
+ /* Audio PSC clock is supplied by codecs (PSC1, 3) FIXME: platdata!! */
+ __raw_writel(PSC_SEL_CLK_SERCLK,
+ (void __iomem *)KSEG1ADDR(AU1550_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET);
+ wmb();
+ __raw_writel(PSC_SEL_CLK_SERCLK,
+ (void __iomem *)KSEG1ADDR(AU1550_PSC3_PHYS_ADDR) + PSC_SEL_OFFSET);
+ wmb();
+ /* SPI/I2C use internally supplied 50MHz source */
+ __raw_writel(PSC_SEL_CLK_INTCLK,
+ (void __iomem *)KSEG1ADDR(AU1550_PSC0_PHYS_ADDR) + PSC_SEL_OFFSET);
+ wmb();
+ __raw_writel(PSC_SEL_CLK_INTCLK,
+ (void __iomem *)KSEG1ADDR(AU1550_PSC2_PHYS_ADDR) + PSC_SEL_OFFSET);
+ wmb();
+
+ id ? pb1550_devices() : db1550_devices();
+
+ swapped = bcsr_read(BCSR_STATUS) &
+ (id ? BCSR_STATUS_PB1550_SWAPBOOT : BCSR_STATUS_DB1000_SWAPBOOT);
+ db1x_register_norflash(128 << 20, 4, swapped);
+
+ return platform_add_devices(db1550_devs, ARRAY_SIZE(db1550_devs));
+}
diff --git a/arch/mips/alchemy/devboards/db1xxx.c b/arch/mips/alchemy/devboards/db1xxx.c
new file mode 100644
index 000000000..e6d25aad8
--- /dev/null
+++ b/arch/mips/alchemy/devboards/db1xxx.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Alchemy DB/PB1xxx board support.
+ */
+
+#include <asm/prom.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-db1x00/bcsr.h>
+
+int __init db1000_board_setup(void);
+int __init db1000_dev_setup(void);
+int __init db1500_pci_setup(void);
+int __init db1200_board_setup(void);
+int __init db1200_dev_setup(void);
+int __init db1300_board_setup(void);
+int __init db1300_dev_setup(void);
+int __init db1550_board_setup(void);
+int __init db1550_dev_setup(void);
+int __init db1550_pci_setup(int);
+
+static const char *board_type_str(void)
+{
+ switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
+ case BCSR_WHOAMI_DB1000:
+ return "DB1000";
+ case BCSR_WHOAMI_DB1500:
+ return "DB1500";
+ case BCSR_WHOAMI_DB1100:
+ return "DB1100";
+ case BCSR_WHOAMI_PB1500:
+ case BCSR_WHOAMI_PB1500R2:
+ return "PB1500";
+ case BCSR_WHOAMI_PB1100:
+ return "PB1100";
+ case BCSR_WHOAMI_PB1200_DDR1:
+ case BCSR_WHOAMI_PB1200_DDR2:
+ return "PB1200";
+ case BCSR_WHOAMI_DB1200:
+ return "DB1200";
+ case BCSR_WHOAMI_DB1300:
+ return "DB1300";
+ case BCSR_WHOAMI_DB1550:
+ return "DB1550";
+ case BCSR_WHOAMI_PB1550_SDR:
+ case BCSR_WHOAMI_PB1550_DDR:
+ return "PB1550";
+ default:
+ return "(unknown)";
+ }
+}
+
+const char *get_system_type(void)
+{
+ return board_type_str();
+}
+
+void __init board_setup(void)
+{
+ int ret;
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ ret = db1000_board_setup();
+ break;
+ case ALCHEMY_CPU_AU1550:
+ ret = db1550_board_setup();
+ break;
+ case ALCHEMY_CPU_AU1200:
+ ret = db1200_board_setup();
+ break;
+ case ALCHEMY_CPU_AU1300:
+ ret = db1300_board_setup();
+ break;
+ default:
+ pr_err("unsupported CPU on board\n");
+ ret = -ENODEV;
+ }
+ if (ret)
+ panic("cannot initialize board support");
+}
+
+static int __init db1xxx_arch_init(void)
+{
+ int id = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
+ if (id == BCSR_WHOAMI_DB1550)
+ return db1550_pci_setup(0);
+ else if ((id == BCSR_WHOAMI_PB1550_SDR) ||
+ (id == BCSR_WHOAMI_PB1550_DDR))
+ return db1550_pci_setup(1);
+ else if ((id == BCSR_WHOAMI_DB1500) || (id == BCSR_WHOAMI_PB1500) ||
+ (id == BCSR_WHOAMI_PB1500R2))
+ return db1500_pci_setup();
+
+ return 0;
+}
+arch_initcall(db1xxx_arch_init);
+
+static int __init db1xxx_dev_init(void)
+{
+ mips_set_machine_name(board_type_str());
+ switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
+ case BCSR_WHOAMI_DB1000:
+ case BCSR_WHOAMI_DB1500:
+ case BCSR_WHOAMI_DB1100:
+ case BCSR_WHOAMI_PB1500:
+ case BCSR_WHOAMI_PB1500R2:
+ case BCSR_WHOAMI_PB1100:
+ return db1000_dev_setup();
+ case BCSR_WHOAMI_PB1200_DDR1:
+ case BCSR_WHOAMI_PB1200_DDR2:
+ case BCSR_WHOAMI_DB1200:
+ return db1200_dev_setup();
+ case BCSR_WHOAMI_DB1300:
+ return db1300_dev_setup();
+ case BCSR_WHOAMI_DB1550:
+ case BCSR_WHOAMI_PB1550_SDR:
+ case BCSR_WHOAMI_PB1550_DDR:
+ return db1550_dev_setup();
+ }
+ return 0;
+}
+device_initcall(db1xxx_dev_init);
diff --git a/arch/mips/alchemy/devboards/platform.c b/arch/mips/alchemy/devboards/platform.c
new file mode 100644
index 000000000..754bdd2ca
--- /dev/null
+++ b/arch/mips/alchemy/devboards/platform.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * devoard misc stuff.
+ */
+
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/physmap.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+
+#include <asm/bootinfo.h>
+#include <asm/idle.h>
+#include <asm/reboot.h>
+#include <asm/setup.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-db1x00/bcsr.h>
+
+#include <prom.h>
+
+void prom_putchar(char c)
+{
+ if (alchemy_get_cputype() == ALCHEMY_CPU_AU1300)
+ alchemy_uart_putchar(AU1300_UART2_PHYS_ADDR, c);
+ else
+ alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
+}
+
+
+static struct platform_device db1x00_rtc_dev = {
+ .name = "rtc-au1xxx",
+ .id = -1,
+};
+
+
+static void db1x_power_off(void)
+{
+ bcsr_write(BCSR_RESETS, 0);
+ bcsr_write(BCSR_SYSTEM, BCSR_SYSTEM_PWROFF | BCSR_SYSTEM_RESET);
+ while (1) /* sit and spin */
+ cpu_wait();
+}
+
+static void db1x_reset(char *c)
+{
+ bcsr_write(BCSR_RESETS, 0);
+ bcsr_write(BCSR_SYSTEM, 0);
+}
+
+static int __init db1x_late_setup(void)
+{
+ if (!pm_power_off)
+ pm_power_off = db1x_power_off;
+ if (!_machine_halt)
+ _machine_halt = db1x_power_off;
+ if (!_machine_restart)
+ _machine_restart = db1x_reset;
+
+ platform_device_register(&db1x00_rtc_dev);
+
+ return 0;
+}
+device_initcall(db1x_late_setup);
+
+/* register a pcmcia socket */
+int __init db1x_register_pcmcia_socket(phys_addr_t pcmcia_attr_start,
+ phys_addr_t pcmcia_attr_end,
+ phys_addr_t pcmcia_mem_start,
+ phys_addr_t pcmcia_mem_end,
+ phys_addr_t pcmcia_io_start,
+ phys_addr_t pcmcia_io_end,
+ int card_irq,
+ int cd_irq,
+ int stschg_irq,
+ int eject_irq,
+ int id)
+{
+ int cnt, i, ret;
+ struct resource *sr;
+ struct platform_device *pd;
+
+ cnt = 5;
+ if (eject_irq)
+ cnt++;
+ if (stschg_irq)
+ cnt++;
+
+ sr = kcalloc(cnt, sizeof(struct resource), GFP_KERNEL);
+ if (!sr)
+ return -ENOMEM;
+
+ pd = platform_device_alloc("db1xxx_pcmcia", id);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ sr[0].name = "pcmcia-attr";
+ sr[0].flags = IORESOURCE_MEM;
+ sr[0].start = pcmcia_attr_start;
+ sr[0].end = pcmcia_attr_end;
+
+ sr[1].name = "pcmcia-mem";
+ sr[1].flags = IORESOURCE_MEM;
+ sr[1].start = pcmcia_mem_start;
+ sr[1].end = pcmcia_mem_end;
+
+ sr[2].name = "pcmcia-io";
+ sr[2].flags = IORESOURCE_MEM;
+ sr[2].start = pcmcia_io_start;
+ sr[2].end = pcmcia_io_end;
+
+ sr[3].name = "insert";
+ sr[3].flags = IORESOURCE_IRQ;
+ sr[3].start = sr[3].end = cd_irq;
+
+ sr[4].name = "card";
+ sr[4].flags = IORESOURCE_IRQ;
+ sr[4].start = sr[4].end = card_irq;
+
+ i = 5;
+ if (stschg_irq) {
+ sr[i].name = "stschg";
+ sr[i].flags = IORESOURCE_IRQ;
+ sr[i].start = sr[i].end = stschg_irq;
+ i++;
+ }
+ if (eject_irq) {
+ sr[i].name = "eject";
+ sr[i].flags = IORESOURCE_IRQ;
+ sr[i].start = sr[i].end = eject_irq;
+ }
+
+ pd->resource = sr;
+ pd->num_resources = cnt;
+
+ ret = platform_device_add(pd);
+ if (!ret)
+ return 0;
+
+ platform_device_put(pd);
+out:
+ kfree(sr);
+ return ret;
+}
+
+#define YAMON_SIZE 0x00100000
+#define YAMON_ENV_SIZE 0x00040000
+
+int __init db1x_register_norflash(unsigned long size, int width,
+ int swapped)
+{
+ struct physmap_flash_data *pfd;
+ struct platform_device *pd;
+ struct mtd_partition *parts;
+ struct resource *res;
+ int ret, i;
+
+ if (size < (8 * 1024 * 1024))
+ return -EINVAL;
+
+ ret = -ENOMEM;
+ parts = kcalloc(5, sizeof(struct mtd_partition), GFP_KERNEL);
+ if (!parts)
+ goto out;
+
+ res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+ if (!res)
+ goto out1;
+
+ pfd = kzalloc(sizeof(struct physmap_flash_data), GFP_KERNEL);
+ if (!pfd)
+ goto out2;
+
+ pd = platform_device_alloc("physmap-flash", 0);
+ if (!pd)
+ goto out3;
+
+ /* NOR flash ends at 0x20000000, regardless of size */
+ res->start = 0x20000000 - size;
+ res->end = 0x20000000 - 1;
+ res->flags = IORESOURCE_MEM;
+
+ /* partition setup. Most Develboards have a switch which allows
+ * to swap the physical locations of the 2 NOR flash banks.
+ */
+ i = 0;
+ if (!swapped) {
+ /* first NOR chip */
+ parts[i].offset = 0;
+ parts[i].name = "User FS";
+ parts[i].size = size / 2;
+ i++;
+ }
+
+ parts[i].offset = MTDPART_OFS_APPEND;
+ parts[i].name = "User FS 2";
+ parts[i].size = (size / 2) - (0x20000000 - 0x1fc00000);
+ i++;
+
+ parts[i].offset = MTDPART_OFS_APPEND;
+ parts[i].name = "YAMON";
+ parts[i].size = YAMON_SIZE;
+ parts[i].mask_flags = MTD_WRITEABLE;
+ i++;
+
+ parts[i].offset = MTDPART_OFS_APPEND;
+ parts[i].name = "raw kernel";
+ parts[i].size = 0x00400000 - YAMON_SIZE - YAMON_ENV_SIZE;
+ i++;
+
+ parts[i].offset = MTDPART_OFS_APPEND;
+ parts[i].name = "YAMON Env";
+ parts[i].size = YAMON_ENV_SIZE;
+ parts[i].mask_flags = MTD_WRITEABLE;
+ i++;
+
+ if (swapped) {
+ parts[i].offset = MTDPART_OFS_APPEND;
+ parts[i].name = "User FS";
+ parts[i].size = size / 2;
+ i++;
+ }
+
+ pfd->width = width;
+ pfd->parts = parts;
+ pfd->nr_parts = 5;
+
+ pd->dev.platform_data = pfd;
+ pd->resource = res;
+ pd->num_resources = 1;
+
+ ret = platform_device_add(pd);
+ if (!ret)
+ return ret;
+
+ platform_device_put(pd);
+out3:
+ kfree(pfd);
+out2:
+ kfree(res);
+out1:
+ kfree(parts);
+out:
+ return ret;
+}
diff --git a/arch/mips/alchemy/devboards/platform.h b/arch/mips/alchemy/devboards/platform.h
new file mode 100644
index 000000000..23d98fc09
--- /dev/null
+++ b/arch/mips/alchemy/devboards/platform.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DEVBOARD_PLATFORM_H_
+#define _DEVBOARD_PLATFORM_H_
+
+#include <linux/init.h>
+
+int __init db1x_register_pcmcia_socket(phys_addr_t pcmcia_attr_start,
+ phys_addr_t pcmcia_attr_len,
+ phys_addr_t pcmcia_mem_start,
+ phys_addr_t pcmcia_mem_end,
+ phys_addr_t pcmcia_io_start,
+ phys_addr_t pcmcia_io_end,
+ int card_irq,
+ int cd_irq,
+ int stschg_irq,
+ int eject_irq,
+ int id);
+
+int __init db1x_register_norflash(unsigned long size, int width,
+ int swapped);
+
+#endif
diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
new file mode 100644
index 000000000..73c778146
--- /dev/null
+++ b/arch/mips/alchemy/devboards/pm.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Alchemy Development Board example suspend userspace interface.
+ *
+ * (c) 2008 Manuel Lauss <mano@roarinelk.homelinux.net>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/suspend.h>
+#include <linux/sysfs.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
+#include <asm/mach-db1x00/bcsr.h>
+
+/*
+ * Generic suspend userspace interface for Alchemy development boards.
+ * This code exports a few sysfs nodes under /sys/power/db1x/ which
+ * can be used by userspace to en/disable all au1x-provided wakeup
+ * sources and configure the timeout after which the the TOYMATCH2 irq
+ * is to trigger a wakeup.
+ */
+
+
+static unsigned long db1x_pm_sleep_secs;
+static unsigned long db1x_pm_wakemsk;
+static unsigned long db1x_pm_last_wakesrc;
+
+static int db1x_pm_enter(suspend_state_t state)
+{
+ unsigned short bcsrs[16];
+ int i, j, hasint;
+
+ /* save CPLD regs */
+ hasint = bcsr_read(BCSR_WHOAMI);
+ hasint = BCSR_WHOAMI_BOARD(hasint) >= BCSR_WHOAMI_DB1200;
+ j = (hasint) ? BCSR_MASKSET : BCSR_SYSTEM;
+
+ for (i = BCSR_STATUS; i <= j; i++)
+ bcsrs[i] = bcsr_read(i);
+
+ /* shut off hexleds */
+ bcsr_write(BCSR_HEXCLEAR, 3);
+
+ /* enable GPIO based wakeup */
+ alchemy_gpio1_input_enable();
+
+ /* clear and setup wake cause and source */
+ alchemy_wrsys(0, AU1000_SYS_WAKEMSK);
+ alchemy_wrsys(0, AU1000_SYS_WAKESRC);
+
+ alchemy_wrsys(db1x_pm_wakemsk, AU1000_SYS_WAKEMSK);
+
+ /* setup 1Hz-timer-based wakeup: wait for reg access */
+ while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_M20)
+ asm volatile ("nop");
+
+ alchemy_wrsys(alchemy_rdsys(AU1000_SYS_TOYREAD) + db1x_pm_sleep_secs,
+ AU1000_SYS_TOYMATCH2);
+
+ /* wait for value to really hit the register */
+ while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_M20)
+ asm volatile ("nop");
+
+ /* ...and now the sandman can come! */
+ au_sleep();
+
+
+ /* restore CPLD regs */
+ for (i = BCSR_STATUS; i <= BCSR_SYSTEM; i++)
+ bcsr_write(i, bcsrs[i]);
+
+ /* restore CPLD int registers */
+ if (hasint) {
+ bcsr_write(BCSR_INTCLR, 0xffff);
+ bcsr_write(BCSR_MASKCLR, 0xffff);
+ bcsr_write(BCSR_INTSTAT, 0xffff);
+ bcsr_write(BCSR_INTSET, bcsrs[BCSR_INTSET]);
+ bcsr_write(BCSR_MASKSET, bcsrs[BCSR_MASKSET]);
+ }
+
+ /* light up hexleds */
+ bcsr_write(BCSR_HEXCLEAR, 0);
+
+ return 0;
+}
+
+static int db1x_pm_begin(suspend_state_t state)
+{
+ if (!db1x_pm_wakemsk) {
+ printk(KERN_ERR "db1x: no wakeup source activated!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void db1x_pm_end(void)
+{
+ /* read and store wakeup source, the clear the register. To
+ * be able to clear it, WAKEMSK must be cleared first.
+ */
+ db1x_pm_last_wakesrc = alchemy_rdsys(AU1000_SYS_WAKESRC);
+
+ alchemy_wrsys(0, AU1000_SYS_WAKEMSK);
+ alchemy_wrsys(0, AU1000_SYS_WAKESRC);
+}
+
+static const struct platform_suspend_ops db1x_pm_ops = {
+ .valid = suspend_valid_only_mem,
+ .begin = db1x_pm_begin,
+ .enter = db1x_pm_enter,
+ .end = db1x_pm_end,
+};
+
+#define ATTRCMP(x) (0 == strcmp(attr->attr.name, #x))
+
+static ssize_t db1x_pmattr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ int idx;
+
+ if (ATTRCMP(timer_timeout))
+ return sprintf(buf, "%lu\n", db1x_pm_sleep_secs);
+
+ else if (ATTRCMP(timer))
+ return sprintf(buf, "%u\n",
+ !!(db1x_pm_wakemsk & SYS_WAKEMSK_M2));
+
+ else if (ATTRCMP(wakesrc))
+ return sprintf(buf, "%lu\n", db1x_pm_last_wakesrc);
+
+ else if (ATTRCMP(gpio0) || ATTRCMP(gpio1) || ATTRCMP(gpio2) ||
+ ATTRCMP(gpio3) || ATTRCMP(gpio4) || ATTRCMP(gpio5) ||
+ ATTRCMP(gpio6) || ATTRCMP(gpio7)) {
+ idx = (attr->attr.name)[4] - '0';
+ return sprintf(buf, "%d\n",
+ !!(db1x_pm_wakemsk & SYS_WAKEMSK_GPIO(idx)));
+
+ } else if (ATTRCMP(wakemsk)) {
+ return sprintf(buf, "%08lx\n", db1x_pm_wakemsk);
+ }
+
+ return -ENOENT;
+}
+
+static ssize_t db1x_pmattr_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *instr,
+ size_t bytes)
+{
+ unsigned long l;
+ int tmp;
+
+ if (ATTRCMP(timer_timeout)) {
+ tmp = kstrtoul(instr, 0, &l);
+ if (tmp)
+ return tmp;
+
+ db1x_pm_sleep_secs = l;
+
+ } else if (ATTRCMP(timer)) {
+ if (instr[0] != '0')
+ db1x_pm_wakemsk |= SYS_WAKEMSK_M2;
+ else
+ db1x_pm_wakemsk &= ~SYS_WAKEMSK_M2;
+
+ } else if (ATTRCMP(gpio0) || ATTRCMP(gpio1) || ATTRCMP(gpio2) ||
+ ATTRCMP(gpio3) || ATTRCMP(gpio4) || ATTRCMP(gpio5) ||
+ ATTRCMP(gpio6) || ATTRCMP(gpio7)) {
+ tmp = (attr->attr.name)[4] - '0';
+ if (instr[0] != '0') {
+ db1x_pm_wakemsk |= SYS_WAKEMSK_GPIO(tmp);
+ } else {
+ db1x_pm_wakemsk &= ~SYS_WAKEMSK_GPIO(tmp);
+ }
+
+ } else if (ATTRCMP(wakemsk)) {
+ tmp = kstrtoul(instr, 0, &l);
+ if (tmp)
+ return tmp;
+
+ db1x_pm_wakemsk = l & 0x0000003f;
+
+ } else
+ bytes = -ENOENT;
+
+ return bytes;
+}
+
+#define ATTR(x) \
+ static struct kobj_attribute x##_attribute = \
+ __ATTR(x, 0664, db1x_pmattr_show, \
+ db1x_pmattr_store);
+
+ATTR(gpio0) /* GPIO-based wakeup enable */
+ATTR(gpio1)
+ATTR(gpio2)
+ATTR(gpio3)
+ATTR(gpio4)
+ATTR(gpio5)
+ATTR(gpio6)
+ATTR(gpio7)
+ATTR(timer) /* TOYMATCH2-based wakeup enable */
+ATTR(timer_timeout) /* timer-based wakeup timeout value, in seconds */
+ATTR(wakesrc) /* contents of SYS_WAKESRC after last wakeup */
+ATTR(wakemsk) /* direct access to SYS_WAKEMSK */
+
+#define ATTR_LIST(x) & x ## _attribute.attr
+static struct attribute *db1x_pmattrs[] = {
+ ATTR_LIST(gpio0),
+ ATTR_LIST(gpio1),
+ ATTR_LIST(gpio2),
+ ATTR_LIST(gpio3),
+ ATTR_LIST(gpio4),
+ ATTR_LIST(gpio5),
+ ATTR_LIST(gpio6),
+ ATTR_LIST(gpio7),
+ ATTR_LIST(timer),
+ ATTR_LIST(timer_timeout),
+ ATTR_LIST(wakesrc),
+ ATTR_LIST(wakemsk),
+ NULL, /* terminator */
+};
+
+static struct attribute_group db1x_pmattr_group = {
+ .name = "db1x",
+ .attrs = db1x_pmattrs,
+};
+
+/*
+ * Initialize suspend interface
+ */
+static int __init pm_init(void)
+{
+ /* init TOY to tick at 1Hz if not already done. No need to wait
+ * for confirmation since there's plenty of time from here to
+ * the next suspend cycle.
+ */
+ if (alchemy_rdsys(AU1000_SYS_TOYTRIM) != 32767)
+ alchemy_wrsys(32767, AU1000_SYS_TOYTRIM);
+
+ db1x_pm_last_wakesrc = alchemy_rdsys(AU1000_SYS_WAKESRC);
+
+ alchemy_wrsys(0, AU1000_SYS_WAKESRC);
+ alchemy_wrsys(0, AU1000_SYS_WAKEMSK);
+
+ suspend_set_ops(&db1x_pm_ops);
+
+ return sysfs_create_group(power_kobj, &db1x_pmattr_group);
+}
+
+late_initcall(pm_init);
diff --git a/arch/mips/ar7/Makefile b/arch/mips/ar7/Makefile
new file mode 100644
index 000000000..cd51c6c6e
--- /dev/null
+++ b/arch/mips/ar7/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-y := \
+ prom.o \
+ setup.o \
+ memory.o \
+ irq.o \
+ time.o \
+ platform.o \
+ gpio.o \
+ clock.o
diff --git a/arch/mips/ar7/Platform b/arch/mips/ar7/Platform
new file mode 100644
index 000000000..a9257cc01
--- /dev/null
+++ b/arch/mips/ar7/Platform
@@ -0,0 +1,5 @@
+#
+# Texas Instruments AR7
+#
+cflags-$(CONFIG_AR7) += -I$(srctree)/arch/mips/include/asm/mach-ar7
+load-$(CONFIG_AR7) += 0xffffffff94100000
diff --git a/arch/mips/ar7/clock.c b/arch/mips/ar7/clock.c
new file mode 100644
index 000000000..95def949c
--- /dev/null
+++ b/arch/mips/ar7/clock.c
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2007 Eugene Konev <ejka@openwrt.org>
+ * Copyright (C) 2009 Florian Fainelli <florian@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/gcd.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+
+#include <asm/addrspace.h>
+#include <asm/mach-ar7/ar7.h>
+
+#define BOOT_PLL_SOURCE_MASK 0x3
+#define CPU_PLL_SOURCE_SHIFT 16
+#define BUS_PLL_SOURCE_SHIFT 14
+#define USB_PLL_SOURCE_SHIFT 18
+#define DSP_PLL_SOURCE_SHIFT 22
+#define BOOT_PLL_SOURCE_AFE 0
+#define BOOT_PLL_SOURCE_BUS 0
+#define BOOT_PLL_SOURCE_REF 1
+#define BOOT_PLL_SOURCE_XTAL 2
+#define BOOT_PLL_SOURCE_CPU 3
+#define BOOT_PLL_BYPASS 0x00000020
+#define BOOT_PLL_ASYNC_MODE 0x02000000
+#define BOOT_PLL_2TO1_MODE 0x00008000
+
+#define TNETD7200_CLOCK_ID_CPU 0
+#define TNETD7200_CLOCK_ID_DSP 1
+#define TNETD7200_CLOCK_ID_USB 2
+
+#define TNETD7200_DEF_CPU_CLK 211000000
+#define TNETD7200_DEF_DSP_CLK 125000000
+#define TNETD7200_DEF_USB_CLK 48000000
+
+struct tnetd7300_clock {
+ u32 ctrl;
+#define PREDIV_MASK 0x001f0000
+#define PREDIV_SHIFT 16
+#define POSTDIV_MASK 0x0000001f
+ u32 unused1[3];
+ u32 pll;
+#define MUL_MASK 0x0000f000
+#define MUL_SHIFT 12
+#define PLL_MODE_MASK 0x00000001
+#define PLL_NDIV 0x00000800
+#define PLL_DIV 0x00000002
+#define PLL_STATUS 0x00000001
+ u32 unused2[3];
+};
+
+struct tnetd7300_clocks {
+ struct tnetd7300_clock bus;
+ struct tnetd7300_clock cpu;
+ struct tnetd7300_clock usb;
+ struct tnetd7300_clock dsp;
+};
+
+struct tnetd7200_clock {
+ u32 ctrl;
+ u32 unused1[3];
+#define DIVISOR_ENABLE_MASK 0x00008000
+ u32 mul;
+ u32 prediv;
+ u32 postdiv;
+ u32 postdiv2;
+ u32 unused2[6];
+ u32 cmd;
+ u32 status;
+ u32 cmden;
+ u32 padding[15];
+};
+
+struct tnetd7200_clocks {
+ struct tnetd7200_clock cpu;
+ struct tnetd7200_clock dsp;
+ struct tnetd7200_clock usb;
+};
+
+static struct clk bus_clk = {
+ .rate = 125000000,
+};
+
+static struct clk cpu_clk = {
+ .rate = 150000000,
+};
+
+static struct clk dsp_clk;
+static struct clk vbus_clk;
+
+static void approximate(int base, int target, int *prediv,
+ int *postdiv, int *mul)
+{
+ int i, j, k, freq, res = target;
+ for (i = 1; i <= 16; i++)
+ for (j = 1; j <= 32; j++)
+ for (k = 1; k <= 32; k++) {
+ freq = abs(base / j * i / k - target);
+ if (freq < res) {
+ res = freq;
+ *mul = i;
+ *prediv = j;
+ *postdiv = k;
+ }
+ }
+}
+
+static void calculate(int base, int target, int *prediv, int *postdiv,
+ int *mul)
+{
+ int tmp_gcd, tmp_base, tmp_freq;
+
+ for (*prediv = 1; *prediv <= 32; (*prediv)++) {
+ tmp_base = base / *prediv;
+ tmp_gcd = gcd(target, tmp_base);
+ *mul = target / tmp_gcd;
+ *postdiv = tmp_base / tmp_gcd;
+ if ((*mul < 1) || (*mul >= 16))
+ continue;
+ if ((*postdiv > 0) & (*postdiv <= 32))
+ break;
+ }
+
+ if (base / *prediv * *mul / *postdiv != target) {
+ approximate(base, target, prediv, postdiv, mul);
+ tmp_freq = base / *prediv * *mul / *postdiv;
+ printk(KERN_WARNING
+ "Adjusted requested frequency %d to %d\n",
+ target, tmp_freq);
+ }
+
+ printk(KERN_DEBUG "Clocks: prediv: %d, postdiv: %d, mul: %d\n",
+ *prediv, *postdiv, *mul);
+}
+
+static int tnetd7300_dsp_clock(void)
+{
+ u32 didr1, didr2;
+ u8 rev = ar7_chip_rev();
+ didr1 = readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x18));
+ didr2 = readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x1c));
+ if (didr2 & (1 << 23))
+ return 0;
+ if ((rev >= 0x23) && (rev != 0x57))
+ return 250000000;
+ if ((((didr2 & 0x1fff) << 10) | ((didr1 & 0xffc00000) >> 22))
+ > 4208000)
+ return 250000000;
+ return 0;
+}
+
+static int tnetd7300_get_clock(u32 shift, struct tnetd7300_clock *clock,
+ u32 *bootcr, u32 bus_clock)
+{
+ int product;
+ int base_clock = AR7_REF_CLOCK;
+ u32 ctrl = readl(&clock->ctrl);
+ u32 pll = readl(&clock->pll);
+ int prediv = ((ctrl & PREDIV_MASK) >> PREDIV_SHIFT) + 1;
+ int postdiv = (ctrl & POSTDIV_MASK) + 1;
+ int divisor = prediv * postdiv;
+ int mul = ((pll & MUL_MASK) >> MUL_SHIFT) + 1;
+
+ switch ((*bootcr & (BOOT_PLL_SOURCE_MASK << shift)) >> shift) {
+ case BOOT_PLL_SOURCE_BUS:
+ base_clock = bus_clock;
+ break;
+ case BOOT_PLL_SOURCE_REF:
+ base_clock = AR7_REF_CLOCK;
+ break;
+ case BOOT_PLL_SOURCE_XTAL:
+ base_clock = AR7_XTAL_CLOCK;
+ break;
+ case BOOT_PLL_SOURCE_CPU:
+ base_clock = cpu_clk.rate;
+ break;
+ }
+
+ if (*bootcr & BOOT_PLL_BYPASS)
+ return base_clock / divisor;
+
+ if ((pll & PLL_MODE_MASK) == 0)
+ return (base_clock >> (mul / 16 + 1)) / divisor;
+
+ if ((pll & (PLL_NDIV | PLL_DIV)) == (PLL_NDIV | PLL_DIV)) {
+ product = (mul & 1) ?
+ (base_clock * mul) >> 1 :
+ (base_clock * (mul - 1)) >> 2;
+ return product / divisor;
+ }
+
+ if (mul == 16)
+ return base_clock / divisor;
+
+ return base_clock * mul / divisor;
+}
+
+static void tnetd7300_set_clock(u32 shift, struct tnetd7300_clock *clock,
+ u32 *bootcr, u32 frequency)
+{
+ int prediv, postdiv, mul;
+ int base_clock = bus_clk.rate;
+
+ switch ((*bootcr & (BOOT_PLL_SOURCE_MASK << shift)) >> shift) {
+ case BOOT_PLL_SOURCE_BUS:
+ base_clock = bus_clk.rate;
+ break;
+ case BOOT_PLL_SOURCE_REF:
+ base_clock = AR7_REF_CLOCK;
+ break;
+ case BOOT_PLL_SOURCE_XTAL:
+ base_clock = AR7_XTAL_CLOCK;
+ break;
+ case BOOT_PLL_SOURCE_CPU:
+ base_clock = cpu_clk.rate;
+ break;
+ }
+
+ calculate(base_clock, frequency, &prediv, &postdiv, &mul);
+
+ writel(((prediv - 1) << PREDIV_SHIFT) | (postdiv - 1), &clock->ctrl);
+ mdelay(1);
+ writel(4, &clock->pll);
+ while (readl(&clock->pll) & PLL_STATUS)
+ ;
+ writel(((mul - 1) << MUL_SHIFT) | (0xff << 3) | 0x0e, &clock->pll);
+ mdelay(75);
+}
+
+static void __init tnetd7300_init_clocks(void)
+{
+ u32 *bootcr = (u32 *)ioremap(AR7_REGS_DCL, 4);
+ struct tnetd7300_clocks *clocks =
+ ioremap(UR8_REGS_CLOCKS,
+ sizeof(struct tnetd7300_clocks));
+
+ bus_clk.rate = tnetd7300_get_clock(BUS_PLL_SOURCE_SHIFT,
+ &clocks->bus, bootcr, AR7_AFE_CLOCK);
+
+ if (*bootcr & BOOT_PLL_ASYNC_MODE)
+ cpu_clk.rate = tnetd7300_get_clock(CPU_PLL_SOURCE_SHIFT,
+ &clocks->cpu, bootcr, AR7_AFE_CLOCK);
+ else
+ cpu_clk.rate = bus_clk.rate;
+
+ if (dsp_clk.rate == 250000000)
+ tnetd7300_set_clock(DSP_PLL_SOURCE_SHIFT, &clocks->dsp,
+ bootcr, dsp_clk.rate);
+
+ iounmap(clocks);
+ iounmap(bootcr);
+}
+
+static void tnetd7200_set_clock(int base, struct tnetd7200_clock *clock,
+ int prediv, int postdiv, int postdiv2, int mul, u32 frequency)
+{
+ printk(KERN_INFO
+ "Clocks: base = %d, frequency = %u, prediv = %d, "
+ "postdiv = %d, postdiv2 = %d, mul = %d\n",
+ base, frequency, prediv, postdiv, postdiv2, mul);
+
+ writel(0, &clock->ctrl);
+ writel(DIVISOR_ENABLE_MASK | ((prediv - 1) & 0x1F), &clock->prediv);
+ writel((mul - 1) & 0xF, &clock->mul);
+
+ while (readl(&clock->status) & 0x1)
+ ; /* nop */
+
+ writel(DIVISOR_ENABLE_MASK | ((postdiv - 1) & 0x1F), &clock->postdiv);
+
+ writel(readl(&clock->cmden) | 1, &clock->cmden);
+ writel(readl(&clock->cmd) | 1, &clock->cmd);
+
+ while (readl(&clock->status) & 0x1)
+ ; /* nop */
+
+ writel(DIVISOR_ENABLE_MASK | ((postdiv2 - 1) & 0x1F), &clock->postdiv2);
+
+ writel(readl(&clock->cmden) | 1, &clock->cmden);
+ writel(readl(&clock->cmd) | 1, &clock->cmd);
+
+ while (readl(&clock->status) & 0x1)
+ ; /* nop */
+
+ writel(readl(&clock->ctrl) | 1, &clock->ctrl);
+}
+
+static int tnetd7200_get_clock_base(int clock_id, u32 *bootcr)
+{
+ if (*bootcr & BOOT_PLL_ASYNC_MODE)
+ /* Async */
+ switch (clock_id) {
+ case TNETD7200_CLOCK_ID_DSP:
+ return AR7_REF_CLOCK;
+ default:
+ return AR7_AFE_CLOCK;
+ }
+ else
+ /* Sync */
+ if (*bootcr & BOOT_PLL_2TO1_MODE)
+ /* 2:1 */
+ switch (clock_id) {
+ case TNETD7200_CLOCK_ID_DSP:
+ return AR7_REF_CLOCK;
+ default:
+ return AR7_AFE_CLOCK;
+ }
+ else
+ /* 1:1 */
+ return AR7_REF_CLOCK;
+}
+
+
+static void __init tnetd7200_init_clocks(void)
+{
+ u32 *bootcr = (u32 *)ioremap(AR7_REGS_DCL, 4);
+ struct tnetd7200_clocks *clocks =
+ ioremap(AR7_REGS_CLOCKS,
+ sizeof(struct tnetd7200_clocks));
+ int cpu_base, cpu_mul, cpu_prediv, cpu_postdiv;
+ int dsp_base, dsp_mul, dsp_prediv, dsp_postdiv;
+ int usb_base, usb_mul, usb_prediv, usb_postdiv;
+
+ cpu_base = tnetd7200_get_clock_base(TNETD7200_CLOCK_ID_CPU, bootcr);
+ dsp_base = tnetd7200_get_clock_base(TNETD7200_CLOCK_ID_DSP, bootcr);
+
+ if (*bootcr & BOOT_PLL_ASYNC_MODE) {
+ printk(KERN_INFO "Clocks: Async mode\n");
+
+ printk(KERN_INFO "Clocks: Setting DSP clock\n");
+ calculate(dsp_base, TNETD7200_DEF_DSP_CLK,
+ &dsp_prediv, &dsp_postdiv, &dsp_mul);
+ bus_clk.rate =
+ ((dsp_base / dsp_prediv) * dsp_mul) / dsp_postdiv;
+ tnetd7200_set_clock(dsp_base, &clocks->dsp,
+ dsp_prediv, dsp_postdiv * 2, dsp_postdiv, dsp_mul * 2,
+ bus_clk.rate);
+
+ printk(KERN_INFO "Clocks: Setting CPU clock\n");
+ calculate(cpu_base, TNETD7200_DEF_CPU_CLK, &cpu_prediv,
+ &cpu_postdiv, &cpu_mul);
+ cpu_clk.rate =
+ ((cpu_base / cpu_prediv) * cpu_mul) / cpu_postdiv;
+ tnetd7200_set_clock(cpu_base, &clocks->cpu,
+ cpu_prediv, cpu_postdiv, -1, cpu_mul,
+ cpu_clk.rate);
+
+ } else
+ if (*bootcr & BOOT_PLL_2TO1_MODE) {
+ printk(KERN_INFO "Clocks: Sync 2:1 mode\n");
+
+ printk(KERN_INFO "Clocks: Setting CPU clock\n");
+ calculate(cpu_base, TNETD7200_DEF_CPU_CLK, &cpu_prediv,
+ &cpu_postdiv, &cpu_mul);
+ cpu_clk.rate = ((cpu_base / cpu_prediv) * cpu_mul)
+ / cpu_postdiv;
+ tnetd7200_set_clock(cpu_base, &clocks->cpu,
+ cpu_prediv, cpu_postdiv, -1, cpu_mul,
+ cpu_clk.rate);
+
+ printk(KERN_INFO "Clocks: Setting DSP clock\n");
+ calculate(dsp_base, TNETD7200_DEF_DSP_CLK, &dsp_prediv,
+ &dsp_postdiv, &dsp_mul);
+ bus_clk.rate = cpu_clk.rate / 2;
+ tnetd7200_set_clock(dsp_base, &clocks->dsp,
+ dsp_prediv, dsp_postdiv * 2, dsp_postdiv,
+ dsp_mul * 2, bus_clk.rate);
+ } else {
+ printk(KERN_INFO "Clocks: Sync 1:1 mode\n");
+
+ printk(KERN_INFO "Clocks: Setting DSP clock\n");
+ calculate(dsp_base, TNETD7200_DEF_DSP_CLK, &dsp_prediv,
+ &dsp_postdiv, &dsp_mul);
+ bus_clk.rate = ((dsp_base / dsp_prediv) * dsp_mul)
+ / dsp_postdiv;
+ tnetd7200_set_clock(dsp_base, &clocks->dsp,
+ dsp_prediv, dsp_postdiv * 2, dsp_postdiv,
+ dsp_mul * 2, bus_clk.rate);
+
+ cpu_clk.rate = bus_clk.rate;
+ }
+
+ printk(KERN_INFO "Clocks: Setting USB clock\n");
+ usb_base = bus_clk.rate;
+ calculate(usb_base, TNETD7200_DEF_USB_CLK, &usb_prediv,
+ &usb_postdiv, &usb_mul);
+ tnetd7200_set_clock(usb_base, &clocks->usb,
+ usb_prediv, usb_postdiv, -1, usb_mul,
+ TNETD7200_DEF_USB_CLK);
+
+ dsp_clk.rate = cpu_clk.rate;
+
+ iounmap(clocks);
+ iounmap(bootcr);
+}
+
+/*
+ * Linux clock API
+ */
+int clk_enable(struct clk *clk)
+{
+ return 0;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_disable);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ if (!clk)
+ return 0;
+
+ return clk->rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+struct clk *clk_get(struct device *dev, const char *id)
+{
+ if (!strcmp(id, "bus"))
+ return &bus_clk;
+ /* cpmac and vbus share the same rate */
+ if (!strcmp(id, "cpmac"))
+ return &vbus_clk;
+ if (!strcmp(id, "cpu"))
+ return &cpu_clk;
+ if (!strcmp(id, "dsp"))
+ return &dsp_clk;
+ if (!strcmp(id, "vbus"))
+ return &vbus_clk;
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(clk_get);
+
+void clk_put(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_put);
+
+void __init ar7_init_clocks(void)
+{
+ switch (ar7_chip_id()) {
+ case AR7_CHIP_7100:
+ case AR7_CHIP_7200:
+ tnetd7200_init_clocks();
+ break;
+ case AR7_CHIP_7300:
+ dsp_clk.rate = tnetd7300_dsp_clock();
+ tnetd7300_init_clocks();
+ break;
+ default:
+ break;
+ }
+ /* adjust vbus clock rate */
+ vbus_clk.rate = bus_clk.rate / 2;
+}
+
+/* dummy functions, should not be called */
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+ WARN_ON(clk);
+ return NULL;
+}
+EXPORT_SYMBOL(clk_get_parent);
diff --git a/arch/mips/ar7/gpio.c b/arch/mips/ar7/gpio.c
new file mode 100644
index 000000000..8b006addd
--- /dev/null
+++ b/arch/mips/ar7/gpio.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2007 Eugene Konev <ejka@openwrt.org>
+ * Copyright (C) 2009-2010 Florian Fainelli <florian@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/gpio.h>
+
+#include <asm/mach-ar7/ar7.h>
+
+#define AR7_GPIO_MAX 32
+#define TITAN_GPIO_MAX 51
+
+struct ar7_gpio_chip {
+ void __iomem *regs;
+ struct gpio_chip chip;
+};
+
+static int ar7_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
+ void __iomem *gpio_in = gpch->regs + AR7_GPIO_INPUT;
+
+ return !!(readl(gpio_in) & (1 << gpio));
+}
+
+static int titan_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
+ void __iomem *gpio_in0 = gpch->regs + TITAN_GPIO_INPUT_0;
+ void __iomem *gpio_in1 = gpch->regs + TITAN_GPIO_INPUT_1;
+
+ return readl(gpio >> 5 ? gpio_in1 : gpio_in0) & (1 << (gpio & 0x1f));
+}
+
+static void ar7_gpio_set_value(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
+ void __iomem *gpio_out = gpch->regs + AR7_GPIO_OUTPUT;
+ unsigned tmp;
+
+ tmp = readl(gpio_out) & ~(1 << gpio);
+ if (value)
+ tmp |= 1 << gpio;
+ writel(tmp, gpio_out);
+}
+
+static void titan_gpio_set_value(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
+ void __iomem *gpio_out0 = gpch->regs + TITAN_GPIO_OUTPUT_0;
+ void __iomem *gpio_out1 = gpch->regs + TITAN_GPIO_OUTPUT_1;
+ unsigned tmp;
+
+ tmp = readl(gpio >> 5 ? gpio_out1 : gpio_out0) & ~(1 << (gpio & 0x1f));
+ if (value)
+ tmp |= 1 << (gpio & 0x1f);
+ writel(tmp, gpio >> 5 ? gpio_out1 : gpio_out0);
+}
+
+static int ar7_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
+ void __iomem *gpio_dir = gpch->regs + AR7_GPIO_DIR;
+
+ writel(readl(gpio_dir) | (1 << gpio), gpio_dir);
+
+ return 0;
+}
+
+static int titan_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
+ void __iomem *gpio_dir0 = gpch->regs + TITAN_GPIO_DIR_0;
+ void __iomem *gpio_dir1 = gpch->regs + TITAN_GPIO_DIR_1;
+
+ if (gpio >= TITAN_GPIO_MAX)
+ return -EINVAL;
+
+ writel(readl(gpio >> 5 ? gpio_dir1 : gpio_dir0) | (1 << (gpio & 0x1f)),
+ gpio >> 5 ? gpio_dir1 : gpio_dir0);
+ return 0;
+}
+
+static int ar7_gpio_direction_output(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
+ void __iomem *gpio_dir = gpch->regs + AR7_GPIO_DIR;
+
+ ar7_gpio_set_value(chip, gpio, value);
+ writel(readl(gpio_dir) & ~(1 << gpio), gpio_dir);
+
+ return 0;
+}
+
+static int titan_gpio_direction_output(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
+ void __iomem *gpio_dir0 = gpch->regs + TITAN_GPIO_DIR_0;
+ void __iomem *gpio_dir1 = gpch->regs + TITAN_GPIO_DIR_1;
+
+ if (gpio >= TITAN_GPIO_MAX)
+ return -EINVAL;
+
+ titan_gpio_set_value(chip, gpio, value);
+ writel(readl(gpio >> 5 ? gpio_dir1 : gpio_dir0) & ~(1 <<
+ (gpio & 0x1f)), gpio >> 5 ? gpio_dir1 : gpio_dir0);
+
+ return 0;
+}
+
+static struct ar7_gpio_chip ar7_gpio_chip = {
+ .chip = {
+ .label = "ar7-gpio",
+ .direction_input = ar7_gpio_direction_input,
+ .direction_output = ar7_gpio_direction_output,
+ .set = ar7_gpio_set_value,
+ .get = ar7_gpio_get_value,
+ .base = 0,
+ .ngpio = AR7_GPIO_MAX,
+ }
+};
+
+static struct ar7_gpio_chip titan_gpio_chip = {
+ .chip = {
+ .label = "titan-gpio",
+ .direction_input = titan_gpio_direction_input,
+ .direction_output = titan_gpio_direction_output,
+ .set = titan_gpio_set_value,
+ .get = titan_gpio_get_value,
+ .base = 0,
+ .ngpio = TITAN_GPIO_MAX,
+ }
+};
+
+static inline int ar7_gpio_enable_ar7(unsigned gpio)
+{
+ void __iomem *gpio_en = ar7_gpio_chip.regs + AR7_GPIO_ENABLE;
+
+ writel(readl(gpio_en) | (1 << gpio), gpio_en);
+
+ return 0;
+}
+
+static inline int ar7_gpio_enable_titan(unsigned gpio)
+{
+ void __iomem *gpio_en0 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_0;
+ void __iomem *gpio_en1 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_1;
+
+ writel(readl(gpio >> 5 ? gpio_en1 : gpio_en0) | (1 << (gpio & 0x1f)),
+ gpio >> 5 ? gpio_en1 : gpio_en0);
+
+ return 0;
+}
+
+int ar7_gpio_enable(unsigned gpio)
+{
+ return ar7_is_titan() ? ar7_gpio_enable_titan(gpio) :
+ ar7_gpio_enable_ar7(gpio);
+}
+EXPORT_SYMBOL(ar7_gpio_enable);
+
+static inline int ar7_gpio_disable_ar7(unsigned gpio)
+{
+ void __iomem *gpio_en = ar7_gpio_chip.regs + AR7_GPIO_ENABLE;
+
+ writel(readl(gpio_en) & ~(1 << gpio), gpio_en);
+
+ return 0;
+}
+
+static inline int ar7_gpio_disable_titan(unsigned gpio)
+{
+ void __iomem *gpio_en0 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_0;
+ void __iomem *gpio_en1 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_1;
+
+ writel(readl(gpio >> 5 ? gpio_en1 : gpio_en0) & ~(1 << (gpio & 0x1f)),
+ gpio >> 5 ? gpio_en1 : gpio_en0);
+
+ return 0;
+}
+
+int ar7_gpio_disable(unsigned gpio)
+{
+ return ar7_is_titan() ? ar7_gpio_disable_titan(gpio) :
+ ar7_gpio_disable_ar7(gpio);
+}
+EXPORT_SYMBOL(ar7_gpio_disable);
+
+struct titan_gpio_cfg {
+ u32 reg;
+ u32 shift;
+ u32 func;
+};
+
+static const struct titan_gpio_cfg titan_gpio_table[] = {
+ /* reg, start bit, mux value */
+ {4, 24, 1},
+ {4, 26, 1},
+ {4, 28, 1},
+ {4, 30, 1},
+ {5, 6, 1},
+ {5, 8, 1},
+ {5, 10, 1},
+ {5, 12, 1},
+ {7, 14, 3},
+ {7, 16, 3},
+ {7, 18, 3},
+ {7, 20, 3},
+ {7, 22, 3},
+ {7, 26, 3},
+ {7, 28, 3},
+ {7, 30, 3},
+ {8, 0, 3},
+ {8, 2, 3},
+ {8, 4, 3},
+ {8, 10, 3},
+ {8, 14, 3},
+ {8, 16, 3},
+ {8, 18, 3},
+ {8, 20, 3},
+ {9, 8, 3},
+ {9, 10, 3},
+ {9, 12, 3},
+ {9, 14, 3},
+ {9, 18, 3},
+ {9, 20, 3},
+ {9, 24, 3},
+ {9, 26, 3},
+ {9, 28, 3},
+ {9, 30, 3},
+ {10, 0, 3},
+ {10, 2, 3},
+ {10, 8, 3},
+ {10, 10, 3},
+ {10, 12, 3},
+ {10, 14, 3},
+ {13, 12, 3},
+ {13, 14, 3},
+ {13, 16, 3},
+ {13, 18, 3},
+ {13, 24, 3},
+ {13, 26, 3},
+ {13, 28, 3},
+ {13, 30, 3},
+ {14, 2, 3},
+ {14, 6, 3},
+ {14, 8, 3},
+ {14, 12, 3}
+};
+
+static int titan_gpio_pinsel(unsigned gpio)
+{
+ struct titan_gpio_cfg gpio_cfg;
+ u32 mux_status, pin_sel_reg, tmp;
+ void __iomem *pin_sel = (void __iomem *)KSEG1ADDR(AR7_REGS_PINSEL);
+
+ if (gpio >= ARRAY_SIZE(titan_gpio_table))
+ return -EINVAL;
+
+ gpio_cfg = titan_gpio_table[gpio];
+ pin_sel_reg = gpio_cfg.reg - 1;
+
+ mux_status = (readl(pin_sel + pin_sel_reg) >> gpio_cfg.shift) & 0x3;
+
+ /* Check the mux status */
+ if (!((mux_status == 0) || (mux_status == gpio_cfg.func)))
+ return 0;
+
+ /* Set the pin sel value */
+ tmp = readl(pin_sel + pin_sel_reg);
+ tmp |= ((gpio_cfg.func & 0x3) << gpio_cfg.shift);
+ writel(tmp, pin_sel + pin_sel_reg);
+
+ return 0;
+}
+
+/* Perform minimal Titan GPIO configuration */
+static void titan_gpio_init(void)
+{
+ unsigned i;
+
+ for (i = 44; i < 48; i++) {
+ titan_gpio_pinsel(i);
+ ar7_gpio_enable_titan(i);
+ titan_gpio_direction_input(&titan_gpio_chip.chip, i);
+ }
+}
+
+int __init ar7_gpio_init(void)
+{
+ int ret;
+ struct ar7_gpio_chip *gpch;
+ unsigned size;
+
+ if (!ar7_is_titan()) {
+ gpch = &ar7_gpio_chip;
+ size = 0x10;
+ } else {
+ gpch = &titan_gpio_chip;
+ size = 0x1f;
+ }
+
+ gpch->regs = ioremap(AR7_REGS_GPIO, size);
+ if (!gpch->regs) {
+ printk(KERN_ERR "%s: failed to ioremap regs\n",
+ gpch->chip.label);
+ return -ENOMEM;
+ }
+
+ ret = gpiochip_add_data(&gpch->chip, gpch);
+ if (ret) {
+ printk(KERN_ERR "%s: failed to add gpiochip\n",
+ gpch->chip.label);
+ return ret;
+ }
+ printk(KERN_INFO "%s: registered %d GPIOs\n",
+ gpch->chip.label, gpch->chip.ngpio);
+
+ if (ar7_is_titan())
+ titan_gpio_init();
+
+ return ret;
+}
diff --git a/arch/mips/ar7/irq.c b/arch/mips/ar7/irq.c
new file mode 100644
index 000000000..f0a7942d3
--- /dev/null
+++ b/arch/mips/ar7/irq.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2006,2007 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2006,2007 Eugene Konev <ejka@openwrt.org>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/mach-ar7/ar7.h>
+
+#define EXCEPT_OFFSET 0x80
+#define PACE_OFFSET 0xA0
+#define CHNLS_OFFSET 0x200
+
+#define REG_OFFSET(irq, reg) ((irq) / 32 * 0x4 + reg * 0x10)
+#define SEC_REG_OFFSET(reg) (EXCEPT_OFFSET + reg * 0x8)
+#define SEC_SR_OFFSET (SEC_REG_OFFSET(0)) /* 0x80 */
+#define CR_OFFSET(irq) (REG_OFFSET(irq, 1)) /* 0x10 */
+#define SEC_CR_OFFSET (SEC_REG_OFFSET(1)) /* 0x88 */
+#define ESR_OFFSET(irq) (REG_OFFSET(irq, 2)) /* 0x20 */
+#define SEC_ESR_OFFSET (SEC_REG_OFFSET(2)) /* 0x90 */
+#define ECR_OFFSET(irq) (REG_OFFSET(irq, 3)) /* 0x30 */
+#define SEC_ECR_OFFSET (SEC_REG_OFFSET(3)) /* 0x98 */
+#define PIR_OFFSET (0x40)
+#define MSR_OFFSET (0x44)
+#define PM_OFFSET(irq) (REG_OFFSET(irq, 5)) /* 0x50 */
+#define TM_OFFSET(irq) (REG_OFFSET(irq, 6)) /* 0x60 */
+
+#define REG(addr) ((u32 *)(KSEG1ADDR(AR7_REGS_IRQ) + addr))
+
+#define CHNL_OFFSET(chnl) (CHNLS_OFFSET + (chnl * 4))
+
+static int ar7_irq_base;
+
+static void ar7_unmask_irq(struct irq_data *d)
+{
+ writel(1 << ((d->irq - ar7_irq_base) % 32),
+ REG(ESR_OFFSET(d->irq - ar7_irq_base)));
+}
+
+static void ar7_mask_irq(struct irq_data *d)
+{
+ writel(1 << ((d->irq - ar7_irq_base) % 32),
+ REG(ECR_OFFSET(d->irq - ar7_irq_base)));
+}
+
+static void ar7_ack_irq(struct irq_data *d)
+{
+ writel(1 << ((d->irq - ar7_irq_base) % 32),
+ REG(CR_OFFSET(d->irq - ar7_irq_base)));
+}
+
+static void ar7_unmask_sec_irq(struct irq_data *d)
+{
+ writel(1 << (d->irq - ar7_irq_base - 40), REG(SEC_ESR_OFFSET));
+}
+
+static void ar7_mask_sec_irq(struct irq_data *d)
+{
+ writel(1 << (d->irq - ar7_irq_base - 40), REG(SEC_ECR_OFFSET));
+}
+
+static void ar7_ack_sec_irq(struct irq_data *d)
+{
+ writel(1 << (d->irq - ar7_irq_base - 40), REG(SEC_CR_OFFSET));
+}
+
+static struct irq_chip ar7_irq_type = {
+ .name = "AR7",
+ .irq_unmask = ar7_unmask_irq,
+ .irq_mask = ar7_mask_irq,
+ .irq_ack = ar7_ack_irq
+};
+
+static struct irq_chip ar7_sec_irq_type = {
+ .name = "AR7",
+ .irq_unmask = ar7_unmask_sec_irq,
+ .irq_mask = ar7_mask_sec_irq,
+ .irq_ack = ar7_ack_sec_irq,
+};
+
+static void __init ar7_irq_init(int base)
+{
+ int i;
+ /*
+ * Disable interrupts and clear pending
+ */
+ writel(0xffffffff, REG(ECR_OFFSET(0)));
+ writel(0xff, REG(ECR_OFFSET(32)));
+ writel(0xffffffff, REG(SEC_ECR_OFFSET));
+ writel(0xffffffff, REG(CR_OFFSET(0)));
+ writel(0xff, REG(CR_OFFSET(32)));
+ writel(0xffffffff, REG(SEC_CR_OFFSET));
+
+ ar7_irq_base = base;
+
+ for (i = 0; i < 40; i++) {
+ writel(i, REG(CHNL_OFFSET(i)));
+ /* Primary IRQ's */
+ irq_set_chip_and_handler(base + i, &ar7_irq_type,
+ handle_level_irq);
+ /* Secondary IRQ's */
+ if (i < 32)
+ irq_set_chip_and_handler(base + i + 40,
+ &ar7_sec_irq_type,
+ handle_level_irq);
+ }
+
+ if (request_irq(2, no_action, IRQF_NO_THREAD, "AR7 cascade interrupt",
+ NULL))
+ pr_err("Failed to request irq 2 (AR7 cascade interrupt)\n");
+ if (request_irq(ar7_irq_base, no_action, IRQF_NO_THREAD,
+ "AR7 cascade interrupt", NULL)) {
+ pr_err("Failed to request irq %d (AR7 cascade interrupt)\n",
+ ar7_irq_base);
+ }
+ set_c0_status(IE_IRQ0);
+}
+
+void __init arch_init_irq(void)
+{
+ mips_cpu_irq_init();
+ ar7_irq_init(8);
+}
+
+static void ar7_cascade(void)
+{
+ u32 status;
+ int i, irq;
+
+ /* Primary IRQ's */
+ irq = readl(REG(PIR_OFFSET)) & 0x3f;
+ if (irq) {
+ do_IRQ(ar7_irq_base + irq);
+ return;
+ }
+
+ /* Secondary IRQ's are cascaded through primary '0' */
+ writel(1, REG(CR_OFFSET(irq)));
+ status = readl(REG(SEC_SR_OFFSET));
+ for (i = 0; i < 32; i++) {
+ if (status & 1) {
+ do_IRQ(ar7_irq_base + i + 40);
+ return;
+ }
+ status >>= 1;
+ }
+
+ spurious_interrupt();
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
+ if (pending & STATUSF_IP7) /* cpu timer */
+ do_IRQ(7);
+ else if (pending & STATUSF_IP2) /* int0 hardware line */
+ ar7_cascade();
+ else
+ spurious_interrupt();
+}
diff --git a/arch/mips/ar7/memory.c b/arch/mips/ar7/memory.c
new file mode 100644
index 000000000..787716c5e
--- /dev/null
+++ b/arch/mips/ar7/memory.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2007 Eugene Konev <ejka@openwrt.org>
+ */
+#include <linux/memblock.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/pfn.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/swap.h>
+
+#include <asm/bootinfo.h>
+#include <asm/page.h>
+#include <asm/sections.h>
+
+#include <asm/mach-ar7/ar7.h>
+
+static int __init memsize(void)
+{
+ u32 size = (64 << 20);
+ u32 *addr = (u32 *)KSEG1ADDR(AR7_SDRAM_BASE + size - 4);
+ u32 *kernel_end = (u32 *)KSEG1ADDR(CPHYSADDR((u32)&_end));
+ u32 *tmpaddr = addr;
+
+ while (tmpaddr > kernel_end) {
+ *tmpaddr = (u32)tmpaddr;
+ size >>= 1;
+ tmpaddr -= size >> 2;
+ }
+
+ do {
+ tmpaddr += size >> 2;
+ if (*tmpaddr != (u32)tmpaddr)
+ break;
+ size <<= 1;
+ } while (size < (64 << 20));
+
+ writel((u32)tmpaddr, &addr);
+
+ return size;
+}
+
+void __init prom_meminit(void)
+{
+ unsigned long pages;
+
+ pages = memsize() >> PAGE_SHIFT;
+ memblock_add(PHYS_OFFSET, pages << PAGE_SHIFT);
+}
+
+void __init prom_free_prom_memory(void)
+{
+ /* Nothing to free */
+}
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
new file mode 100644
index 000000000..215149a85
--- /dev/null
+++ b/arch/mips/ar7/platform.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2006,2007 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2006,2007 Eugene Konev <ejka@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/physmap.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/vlynq.h>
+#include <linux/leds.h>
+#include <linux/string.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+
+#include <asm/addrspace.h>
+#include <asm/mach-ar7/ar7.h>
+#include <asm/mach-ar7/prom.h>
+
+/*****************************************************************************
+ * VLYNQ Bus
+ ****************************************************************************/
+struct plat_vlynq_data {
+ struct plat_vlynq_ops ops;
+ int gpio_bit;
+ int reset_bit;
+};
+
+static int vlynq_on(struct vlynq_device *dev)
+{
+ int ret;
+ struct plat_vlynq_data *pdata = dev->dev.platform_data;
+
+ ret = gpio_request(pdata->gpio_bit, "vlynq");
+ if (ret)
+ goto out;
+
+ ar7_device_reset(pdata->reset_bit);
+
+ ret = ar7_gpio_disable(pdata->gpio_bit);
+ if (ret)
+ goto out_enabled;
+
+ ret = ar7_gpio_enable(pdata->gpio_bit);
+ if (ret)
+ goto out_enabled;
+
+ ret = gpio_direction_output(pdata->gpio_bit, 0);
+ if (ret)
+ goto out_gpio_enabled;
+
+ msleep(50);
+
+ gpio_set_value(pdata->gpio_bit, 1);
+
+ msleep(50);
+
+ return 0;
+
+out_gpio_enabled:
+ ar7_gpio_disable(pdata->gpio_bit);
+out_enabled:
+ ar7_device_disable(pdata->reset_bit);
+ gpio_free(pdata->gpio_bit);
+out:
+ return ret;
+}
+
+static void vlynq_off(struct vlynq_device *dev)
+{
+ struct plat_vlynq_data *pdata = dev->dev.platform_data;
+
+ ar7_gpio_disable(pdata->gpio_bit);
+ gpio_free(pdata->gpio_bit);
+ ar7_device_disable(pdata->reset_bit);
+}
+
+static struct resource vlynq_low_res[] = {
+ {
+ .name = "regs",
+ .flags = IORESOURCE_MEM,
+ .start = AR7_REGS_VLYNQ0,
+ .end = AR7_REGS_VLYNQ0 + 0xff,
+ },
+ {
+ .name = "irq",
+ .flags = IORESOURCE_IRQ,
+ .start = 29,
+ .end = 29,
+ },
+ {
+ .name = "mem",
+ .flags = IORESOURCE_MEM,
+ .start = 0x04000000,
+ .end = 0x04ffffff,
+ },
+ {
+ .name = "devirq",
+ .flags = IORESOURCE_IRQ,
+ .start = 80,
+ .end = 111,
+ },
+};
+
+static struct resource vlynq_high_res[] = {
+ {
+ .name = "regs",
+ .flags = IORESOURCE_MEM,
+ .start = AR7_REGS_VLYNQ1,
+ .end = AR7_REGS_VLYNQ1 + 0xff,
+ },
+ {
+ .name = "irq",
+ .flags = IORESOURCE_IRQ,
+ .start = 33,
+ .end = 33,
+ },
+ {
+ .name = "mem",
+ .flags = IORESOURCE_MEM,
+ .start = 0x0c000000,
+ .end = 0x0cffffff,
+ },
+ {
+ .name = "devirq",
+ .flags = IORESOURCE_IRQ,
+ .start = 112,
+ .end = 143,
+ },
+};
+
+static struct plat_vlynq_data vlynq_low_data = {
+ .ops = {
+ .on = vlynq_on,
+ .off = vlynq_off,
+ },
+ .reset_bit = 20,
+ .gpio_bit = 18,
+};
+
+static struct plat_vlynq_data vlynq_high_data = {
+ .ops = {
+ .on = vlynq_on,
+ .off = vlynq_off,
+ },
+ .reset_bit = 16,
+ .gpio_bit = 19,
+};
+
+static struct platform_device vlynq_low = {
+ .id = 0,
+ .name = "vlynq",
+ .dev = {
+ .platform_data = &vlynq_low_data,
+ },
+ .resource = vlynq_low_res,
+ .num_resources = ARRAY_SIZE(vlynq_low_res),
+};
+
+static struct platform_device vlynq_high = {
+ .id = 1,
+ .name = "vlynq",
+ .dev = {
+ .platform_data = &vlynq_high_data,
+ },
+ .resource = vlynq_high_res,
+ .num_resources = ARRAY_SIZE(vlynq_high_res),
+};
+
+/*****************************************************************************
+ * Flash
+ ****************************************************************************/
+static struct resource physmap_flash_resource = {
+ .name = "mem",
+ .flags = IORESOURCE_MEM,
+ .start = 0x10000000,
+ .end = 0x107fffff,
+};
+
+static const char *ar7_probe_types[] = { "ar7part", NULL };
+
+static struct physmap_flash_data physmap_flash_data = {
+ .width = 2,
+ .part_probe_types = ar7_probe_types,
+};
+
+static struct platform_device physmap_flash = {
+ .name = "physmap-flash",
+ .dev = {
+ .platform_data = &physmap_flash_data,
+ },
+ .resource = &physmap_flash_resource,
+ .num_resources = 1,
+};
+
+/*****************************************************************************
+ * Ethernet
+ ****************************************************************************/
+static struct resource cpmac_low_res[] = {
+ {
+ .name = "regs",
+ .flags = IORESOURCE_MEM,
+ .start = AR7_REGS_MAC0,
+ .end = AR7_REGS_MAC0 + 0x7ff,
+ },
+ {
+ .name = "irq",
+ .flags = IORESOURCE_IRQ,
+ .start = 27,
+ .end = 27,
+ },
+};
+
+static struct resource cpmac_high_res[] = {
+ {
+ .name = "regs",
+ .flags = IORESOURCE_MEM,
+ .start = AR7_REGS_MAC1,
+ .end = AR7_REGS_MAC1 + 0x7ff,
+ },
+ {
+ .name = "irq",
+ .flags = IORESOURCE_IRQ,
+ .start = 41,
+ .end = 41,
+ },
+};
+
+static struct fixed_phy_status fixed_phy_status __initdata = {
+ .link = 1,
+ .speed = 100,
+ .duplex = 1,
+};
+
+static struct plat_cpmac_data cpmac_low_data = {
+ .reset_bit = 17,
+ .power_bit = 20,
+ .phy_mask = 0x80000000,
+};
+
+static struct plat_cpmac_data cpmac_high_data = {
+ .reset_bit = 21,
+ .power_bit = 22,
+ .phy_mask = 0x7fffffff,
+};
+
+static u64 cpmac_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device cpmac_low = {
+ .id = 0,
+ .name = "cpmac",
+ .dev = {
+ .dma_mask = &cpmac_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &cpmac_low_data,
+ },
+ .resource = cpmac_low_res,
+ .num_resources = ARRAY_SIZE(cpmac_low_res),
+};
+
+static struct platform_device cpmac_high = {
+ .id = 1,
+ .name = "cpmac",
+ .dev = {
+ .dma_mask = &cpmac_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &cpmac_high_data,
+ },
+ .resource = cpmac_high_res,
+ .num_resources = ARRAY_SIZE(cpmac_high_res),
+};
+
+static void __init cpmac_get_mac(int instance, unsigned char *dev_addr)
+{
+ char name[5], *mac;
+
+ sprintf(name, "mac%c", 'a' + instance);
+ mac = prom_getenv(name);
+ if (!mac && instance) {
+ sprintf(name, "mac%c", 'a');
+ mac = prom_getenv(name);
+ }
+
+ if (mac) {
+ if (!mac_pton(mac, dev_addr)) {
+ pr_warn("cannot parse mac address, using random address\n");
+ eth_random_addr(dev_addr);
+ }
+ } else
+ eth_random_addr(dev_addr);
+}
+
+/*****************************************************************************
+ * USB
+ ****************************************************************************/
+static struct resource usb_res[] = {
+ {
+ .name = "regs",
+ .flags = IORESOURCE_MEM,
+ .start = AR7_REGS_USB,
+ .end = AR7_REGS_USB + 0xff,
+ },
+ {
+ .name = "irq",
+ .flags = IORESOURCE_IRQ,
+ .start = 32,
+ .end = 32,
+ },
+ {
+ .name = "mem",
+ .flags = IORESOURCE_MEM,
+ .start = 0x03400000,
+ .end = 0x03401fff,
+ },
+};
+
+static struct platform_device ar7_udc = {
+ .name = "ar7_udc",
+ .resource = usb_res,
+ .num_resources = ARRAY_SIZE(usb_res),
+};
+
+/*****************************************************************************
+ * LEDs
+ ****************************************************************************/
+static const struct gpio_led default_leds[] = {
+ {
+ .name = "status",
+ .gpio = 8,
+ .active_low = 1,
+ },
+};
+
+static const struct gpio_led titan_leds[] = {
+ { .name = "status", .gpio = 8, .active_low = 1, },
+ { .name = "wifi", .gpio = 13, .active_low = 1, },
+};
+
+static const struct gpio_led dsl502t_leds[] = {
+ {
+ .name = "status",
+ .gpio = 9,
+ .active_low = 1,
+ },
+ {
+ .name = "ethernet",
+ .gpio = 7,
+ .active_low = 1,
+ },
+ {
+ .name = "usb",
+ .gpio = 12,
+ .active_low = 1,
+ },
+};
+
+static const struct gpio_led dg834g_leds[] = {
+ {
+ .name = "ppp",
+ .gpio = 6,
+ .active_low = 1,
+ },
+ {
+ .name = "status",
+ .gpio = 7,
+ .active_low = 1,
+ },
+ {
+ .name = "adsl",
+ .gpio = 8,
+ .active_low = 1,
+ },
+ {
+ .name = "wifi",
+ .gpio = 12,
+ .active_low = 1,
+ },
+ {
+ .name = "power",
+ .gpio = 14,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+};
+
+static const struct gpio_led fb_sl_leds[] = {
+ {
+ .name = "1",
+ .gpio = 7,
+ },
+ {
+ .name = "2",
+ .gpio = 13,
+ .active_low = 1,
+ },
+ {
+ .name = "3",
+ .gpio = 10,
+ .active_low = 1,
+ },
+ {
+ .name = "4",
+ .gpio = 12,
+ .active_low = 1,
+ },
+ {
+ .name = "5",
+ .gpio = 9,
+ .active_low = 1,
+ },
+};
+
+static const struct gpio_led fb_fon_leds[] = {
+ {
+ .name = "1",
+ .gpio = 8,
+ },
+ {
+ .name = "2",
+ .gpio = 3,
+ .active_low = 1,
+ },
+ {
+ .name = "3",
+ .gpio = 5,
+ },
+ {
+ .name = "4",
+ .gpio = 4,
+ .active_low = 1,
+ },
+ {
+ .name = "5",
+ .gpio = 11,
+ .active_low = 1,
+ },
+};
+
+static const struct gpio_led gt701_leds[] = {
+ {
+ .name = "inet:green",
+ .gpio = 13,
+ .active_low = 1,
+ },
+ {
+ .name = "usb",
+ .gpio = 12,
+ .active_low = 1,
+ },
+ {
+ .name = "inet:red",
+ .gpio = 9,
+ .active_low = 1,
+ },
+ {
+ .name = "power:red",
+ .gpio = 7,
+ .active_low = 1,
+ },
+ {
+ .name = "power:green",
+ .gpio = 8,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "ethernet",
+ .gpio = 10,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_led_platform_data ar7_led_data;
+
+static struct platform_device ar7_gpio_leds = {
+ .name = "leds-gpio",
+ .dev = {
+ .platform_data = &ar7_led_data,
+ }
+};
+
+static void __init detect_leds(void)
+{
+ char *prid, *usb_prod;
+
+ /* Default LEDs */
+ ar7_led_data.num_leds = ARRAY_SIZE(default_leds);
+ ar7_led_data.leds = default_leds;
+
+ /* FIXME: the whole thing is unreliable */
+ prid = prom_getenv("ProductID");
+ usb_prod = prom_getenv("usb_prod");
+
+ /* If we can't get the product id from PROM, use the default LEDs */
+ if (!prid)
+ return;
+
+ if (strstr(prid, "Fritz_Box_FON")) {
+ ar7_led_data.num_leds = ARRAY_SIZE(fb_fon_leds);
+ ar7_led_data.leds = fb_fon_leds;
+ } else if (strstr(prid, "Fritz_Box_")) {
+ ar7_led_data.num_leds = ARRAY_SIZE(fb_sl_leds);
+ ar7_led_data.leds = fb_sl_leds;
+ } else if ((!strcmp(prid, "AR7RD") || !strcmp(prid, "AR7DB"))
+ && usb_prod != NULL && strstr(usb_prod, "DSL-502T")) {
+ ar7_led_data.num_leds = ARRAY_SIZE(dsl502t_leds);
+ ar7_led_data.leds = dsl502t_leds;
+ } else if (strstr(prid, "DG834")) {
+ ar7_led_data.num_leds = ARRAY_SIZE(dg834g_leds);
+ ar7_led_data.leds = dg834g_leds;
+ } else if (strstr(prid, "CYWM") || strstr(prid, "CYWL")) {
+ ar7_led_data.num_leds = ARRAY_SIZE(titan_leds);
+ ar7_led_data.leds = titan_leds;
+ } else if (strstr(prid, "GT701")) {
+ ar7_led_data.num_leds = ARRAY_SIZE(gt701_leds);
+ ar7_led_data.leds = gt701_leds;
+ }
+}
+
+/*****************************************************************************
+ * Watchdog
+ ****************************************************************************/
+static struct resource ar7_wdt_res = {
+ .name = "regs",
+ .flags = IORESOURCE_MEM,
+ .start = -1, /* Filled at runtime */
+ .end = -1, /* Filled at runtime */
+};
+
+static struct platform_device ar7_wdt = {
+ .name = "ar7_wdt",
+ .resource = &ar7_wdt_res,
+ .num_resources = 1,
+};
+
+/*****************************************************************************
+ * Init
+ ****************************************************************************/
+static int __init ar7_register_uarts(void)
+{
+#ifdef CONFIG_SERIAL_8250
+ static struct uart_port uart_port __initdata;
+ struct clk *bus_clk;
+ int res;
+
+ memset(&uart_port, 0, sizeof(struct uart_port));
+
+ bus_clk = clk_get(NULL, "bus");
+ if (IS_ERR(bus_clk))
+ panic("unable to get bus clk");
+
+ uart_port.type = PORT_AR7;
+ uart_port.uartclk = clk_get_rate(bus_clk) / 2;
+ uart_port.iotype = UPIO_MEM32;
+ uart_port.flags = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF;
+ uart_port.regshift = 2;
+
+ uart_port.line = 0;
+ uart_port.irq = AR7_IRQ_UART0;
+ uart_port.mapbase = AR7_REGS_UART0;
+ uart_port.membase = ioremap(uart_port.mapbase, 256);
+
+ res = early_serial_setup(&uart_port);
+ if (res)
+ return res;
+
+ /* Only TNETD73xx have a second serial port */
+ if (ar7_has_second_uart()) {
+ uart_port.line = 1;
+ uart_port.irq = AR7_IRQ_UART1;
+ uart_port.mapbase = UR8_REGS_UART1;
+ uart_port.membase = ioremap(uart_port.mapbase, 256);
+
+ res = early_serial_setup(&uart_port);
+ if (res)
+ return res;
+ }
+#endif
+
+ return 0;
+}
+
+static void __init titan_fixup_devices(void)
+{
+ /* Set vlynq0 data */
+ vlynq_low_data.reset_bit = 15;
+ vlynq_low_data.gpio_bit = 14;
+
+ /* Set vlynq1 data */
+ vlynq_high_data.reset_bit = 16;
+ vlynq_high_data.gpio_bit = 7;
+
+ /* Set vlynq0 resources */
+ vlynq_low_res[0].start = TITAN_REGS_VLYNQ0;
+ vlynq_low_res[0].end = TITAN_REGS_VLYNQ0 + 0xff;
+ vlynq_low_res[1].start = 33;
+ vlynq_low_res[1].end = 33;
+ vlynq_low_res[2].start = 0x0c000000;
+ vlynq_low_res[2].end = 0x0fffffff;
+ vlynq_low_res[3].start = 80;
+ vlynq_low_res[3].end = 111;
+
+ /* Set vlynq1 resources */
+ vlynq_high_res[0].start = TITAN_REGS_VLYNQ1;
+ vlynq_high_res[0].end = TITAN_REGS_VLYNQ1 + 0xff;
+ vlynq_high_res[1].start = 34;
+ vlynq_high_res[1].end = 34;
+ vlynq_high_res[2].start = 0x40000000;
+ vlynq_high_res[2].end = 0x43ffffff;
+ vlynq_high_res[3].start = 112;
+ vlynq_high_res[3].end = 143;
+
+ /* Set cpmac0 data */
+ cpmac_low_data.phy_mask = 0x40000000;
+
+ /* Set cpmac1 data */
+ cpmac_high_data.phy_mask = 0x80000000;
+
+ /* Set cpmac0 resources */
+ cpmac_low_res[0].start = TITAN_REGS_MAC0;
+ cpmac_low_res[0].end = TITAN_REGS_MAC0 + 0x7ff;
+
+ /* Set cpmac1 resources */
+ cpmac_high_res[0].start = TITAN_REGS_MAC1;
+ cpmac_high_res[0].end = TITAN_REGS_MAC1 + 0x7ff;
+}
+
+static int __init ar7_register_devices(void)
+{
+ void __iomem *bootcr;
+ u32 val;
+ int res;
+
+ res = ar7_gpio_init();
+ if (res)
+ pr_warn("unable to register gpios: %d\n", res);
+
+ res = ar7_register_uarts();
+ if (res)
+ pr_err("unable to setup uart(s): %d\n", res);
+
+ res = platform_device_register(&physmap_flash);
+ if (res)
+ pr_warn("unable to register physmap-flash: %d\n", res);
+
+ if (ar7_is_titan())
+ titan_fixup_devices();
+
+ ar7_device_disable(vlynq_low_data.reset_bit);
+ res = platform_device_register(&vlynq_low);
+ if (res)
+ pr_warn("unable to register vlynq-low: %d\n", res);
+
+ if (ar7_has_high_vlynq()) {
+ ar7_device_disable(vlynq_high_data.reset_bit);
+ res = platform_device_register(&vlynq_high);
+ if (res)
+ pr_warn("unable to register vlynq-high: %d\n", res);
+ }
+
+ if (ar7_has_high_cpmac()) {
+ res = fixed_phy_add(PHY_POLL, cpmac_high.id,
+ &fixed_phy_status);
+ if (!res) {
+ cpmac_get_mac(1, cpmac_high_data.dev_addr);
+
+ res = platform_device_register(&cpmac_high);
+ if (res)
+ pr_warn("unable to register cpmac-high: %d\n",
+ res);
+ } else
+ pr_warn("unable to add cpmac-high phy: %d\n", res);
+ } else
+ cpmac_low_data.phy_mask = 0xffffffff;
+
+ res = fixed_phy_add(PHY_POLL, cpmac_low.id, &fixed_phy_status);
+ if (!res) {
+ cpmac_get_mac(0, cpmac_low_data.dev_addr);
+ res = platform_device_register(&cpmac_low);
+ if (res)
+ pr_warn("unable to register cpmac-low: %d\n", res);
+ } else
+ pr_warn("unable to add cpmac-low phy: %d\n", res);
+
+ detect_leds();
+ res = platform_device_register(&ar7_gpio_leds);
+ if (res)
+ pr_warn("unable to register leds: %d\n", res);
+
+ res = platform_device_register(&ar7_udc);
+ if (res)
+ pr_warn("unable to register usb slave: %d\n", res);
+
+ /* Register watchdog only if enabled in hardware */
+ bootcr = ioremap(AR7_REGS_DCL, 4);
+ val = readl(bootcr);
+ iounmap(bootcr);
+ if (val & AR7_WDT_HW_ENA) {
+ if (ar7_has_high_vlynq())
+ ar7_wdt_res.start = UR8_REGS_WDT;
+ else
+ ar7_wdt_res.start = AR7_REGS_WDT;
+
+ ar7_wdt_res.end = ar7_wdt_res.start + 0x20;
+ res = platform_device_register(&ar7_wdt);
+ if (res)
+ pr_warn("unable to register watchdog: %d\n", res);
+ }
+
+ return 0;
+}
+device_initcall(ar7_register_devices);
diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c
new file mode 100644
index 000000000..5810d3993
--- /dev/null
+++ b/arch/mips/ar7/prom.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Putting things on the screen/serial line using YAMONs facilities.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/serial_reg.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <asm/bootinfo.h>
+#include <asm/setup.h>
+
+#include <asm/mach-ar7/ar7.h>
+#include <asm/mach-ar7/prom.h>
+
+#define MAX_ENTRY 80
+
+struct env_var {
+ char *name;
+ char *value;
+};
+
+static struct env_var adam2_env[MAX_ENTRY];
+
+char *prom_getenv(const char *name)
+{
+ int i;
+
+ for (i = 0; (i < MAX_ENTRY) && adam2_env[i].name; i++)
+ if (!strcmp(name, adam2_env[i].name))
+ return adam2_env[i].value;
+
+ return NULL;
+}
+EXPORT_SYMBOL(prom_getenv);
+
+static void __init ar7_init_cmdline(int argc, char *argv[])
+{
+ int i;
+
+ for (i = 1; i < argc; i++) {
+ strlcat(arcs_cmdline, argv[i], COMMAND_LINE_SIZE);
+ if (i < (argc - 1))
+ strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
+ }
+}
+
+struct psbl_rec {
+ u32 psbl_size;
+ u32 env_base;
+ u32 env_size;
+ u32 ffs_base;
+ u32 ffs_size;
+};
+
+static const char psp_env_version[] __initconst = "TIENV0.8";
+
+struct psp_env_chunk {
+ u8 num;
+ u8 ctrl;
+ u16 csum;
+ u8 len;
+ char data[11];
+} __packed;
+
+struct psp_var_map_entry {
+ u8 num;
+ char *value;
+};
+
+static const struct psp_var_map_entry psp_var_map[] = {
+ { 1, "cpufrequency" },
+ { 2, "memsize" },
+ { 3, "flashsize" },
+ { 4, "modetty0" },
+ { 5, "modetty1" },
+ { 8, "maca" },
+ { 9, "macb" },
+ { 28, "sysfrequency" },
+ { 38, "mipsfrequency" },
+};
+
+/*
+
+Well-known variable (num is looked up in table above for matching variable name)
+Example: cpufrequency=211968000
++----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
+| 01 |CTRL|CHECKSUM | 01 | _2 | _1 | _1 | _9 | _6 | _8 | _0 | _0 | _0 | \0 | FF
++----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
+
+Name=Value pair in a single chunk
+Example: NAME=VALUE
++----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
+| 00 |CTRL|CHECKSUM | 01 | _N | _A | _M | _E | _0 | _V | _A | _L | _U | _E | \0
++----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
+
+Name=Value pair in 2 chunks (len is the number of chunks)
+Example: bootloaderVersion=1.3.7.15
++----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
+| 00 |CTRL|CHECKSUM | 02 | _b | _o | _o | _t | _l | _o | _a | _d | _e | _r | _V
++----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
+| _e | _r | _s | _i | _o | _n | \0 | _1 | _. | _3 | _. | _7 | _. | _1 | _5 | \0
++----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
+
+Data is padded with 0xFF
+
+*/
+
+#define PSP_ENV_SIZE 4096
+
+static char psp_env_data[PSP_ENV_SIZE] = { 0, };
+
+static char * __init lookup_psp_var_map(u8 num)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(psp_var_map); i++)
+ if (psp_var_map[i].num == num)
+ return psp_var_map[i].value;
+
+ return NULL;
+}
+
+static void __init add_adam2_var(char *name, char *value)
+{
+ int i;
+
+ for (i = 0; i < MAX_ENTRY; i++) {
+ if (!adam2_env[i].name) {
+ adam2_env[i].name = name;
+ adam2_env[i].value = value;
+ return;
+ } else if (!strcmp(adam2_env[i].name, name)) {
+ adam2_env[i].value = value;
+ return;
+ }
+ }
+}
+
+static int __init parse_psp_env(void *psp_env_base)
+{
+ int i, n;
+ char *name, *value;
+ struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data;
+
+ memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE);
+
+ i = 1;
+ n = PSP_ENV_SIZE / sizeof(struct psp_env_chunk);
+ while (i < n) {
+ if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n))
+ break;
+ value = chunks[i].data;
+ if (chunks[i].num) {
+ name = lookup_psp_var_map(chunks[i].num);
+ } else {
+ name = value;
+ value += strlen(name) + 1;
+ }
+ if (name)
+ add_adam2_var(name, value);
+ i += chunks[i].len;
+ }
+ return 0;
+}
+
+static void __init ar7_init_env(struct env_var *env)
+{
+ int i;
+ struct psbl_rec *psbl = (struct psbl_rec *)(KSEG1ADDR(0x14000300));
+ void *psp_env = (void *)KSEG1ADDR(psbl->env_base);
+
+ if (strcmp(psp_env, psp_env_version) == 0) {
+ parse_psp_env(psp_env);
+ } else {
+ for (i = 0; i < MAX_ENTRY; i++, env++)
+ if (env->name)
+ add_adam2_var(env->name, env->value);
+ }
+}
+
+static void __init console_config(void)
+{
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ char console_string[40];
+ int baud = 0;
+ char parity = '\0', bits = '\0', flow = '\0';
+ char *s, *p;
+
+ if (strstr(arcs_cmdline, "console="))
+ return;
+
+ s = prom_getenv("modetty0");
+ if (s) {
+ baud = simple_strtoul(s, &p, 10);
+ s = p;
+ if (*s == ',')
+ s++;
+ if (*s)
+ parity = *s++;
+ if (*s == ',')
+ s++;
+ if (*s)
+ bits = *s++;
+ if (*s == ',')
+ s++;
+ if (*s == 'h')
+ flow = 'r';
+ }
+
+ if (baud == 0)
+ baud = 38400;
+ if (parity != 'n' && parity != 'o' && parity != 'e')
+ parity = 'n';
+ if (bits != '7' && bits != '8')
+ bits = '8';
+
+ if (flow == 'r')
+ sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
+ parity, bits, flow);
+ else
+ sprintf(console_string, " console=ttyS0,%d%c%c", baud, parity,
+ bits);
+ strlcat(arcs_cmdline, console_string, COMMAND_LINE_SIZE);
+#endif
+}
+
+void __init prom_init(void)
+{
+ ar7_init_cmdline(fw_arg0, (char **)fw_arg1);
+ ar7_init_env((struct env_var *)fw_arg2);
+ console_config();
+}
+
+#define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4)))
+static inline unsigned int serial_in(int offset)
+{
+ return readl((void *)PORT(offset));
+}
+
+static inline void serial_out(int offset, int value)
+{
+ writel(value, (void *)PORT(offset));
+}
+
+void prom_putchar(char c)
+{
+ while ((serial_in(UART_LSR) & UART_LSR_TEMT) == 0)
+ ;
+ serial_out(UART_TX, c);
+}
diff --git a/arch/mips/ar7/setup.c b/arch/mips/ar7/setup.c
new file mode 100644
index 000000000..352d5dbc7
--- /dev/null
+++ b/arch/mips/ar7/setup.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/pm.h>
+#include <linux/time.h>
+
+#include <asm/reboot.h>
+#include <asm/mach-ar7/ar7.h>
+#include <asm/mach-ar7/prom.h>
+
+static void ar7_machine_restart(char *command)
+{
+ u32 *softres_reg = ioremap(AR7_REGS_RESET + AR7_RESET_SOFTWARE, 1);
+
+ writel(1, softres_reg);
+}
+
+static void ar7_machine_halt(void)
+{
+ while (1)
+ ;
+}
+
+static void ar7_machine_power_off(void)
+{
+ u32 *power_reg = (u32 *)ioremap(AR7_REGS_POWER, 1);
+ u32 power_state = readl(power_reg) | (3 << 30);
+
+ writel(power_state, power_reg);
+ ar7_machine_halt();
+}
+
+const char *get_system_type(void)
+{
+ u16 chip_id = ar7_chip_id();
+ u16 titan_variant_id = titan_chip_id();
+
+ switch (chip_id) {
+ case AR7_CHIP_7100:
+ return "TI AR7 (TNETD7100)";
+ case AR7_CHIP_7200:
+ return "TI AR7 (TNETD7200)";
+ case AR7_CHIP_7300:
+ return "TI AR7 (TNETD7300)";
+ case AR7_CHIP_TITAN:
+ switch (titan_variant_id) {
+ case TITAN_CHIP_1050:
+ return "TI AR7 (TNETV1050)";
+ case TITAN_CHIP_1055:
+ return "TI AR7 (TNETV1055)";
+ case TITAN_CHIP_1056:
+ return "TI AR7 (TNETV1056)";
+ case TITAN_CHIP_1060:
+ return "TI AR7 (TNETV1060)";
+ }
+ fallthrough;
+ default:
+ return "TI AR7 (unknown)";
+ }
+}
+
+static int __init ar7_init_console(void)
+{
+ return 0;
+}
+console_initcall(ar7_init_console);
+
+/*
+ * Initializes basic routines and structures pointers, memory size (as
+ * given by the bios and saves the command line.
+ */
+void __init plat_mem_setup(void)
+{
+ unsigned long io_base;
+
+ _machine_restart = ar7_machine_restart;
+ _machine_halt = ar7_machine_halt;
+ pm_power_off = ar7_machine_power_off;
+
+ io_base = (unsigned long)ioremap(AR7_REGS_BASE, 0x10000);
+ if (!io_base)
+ panic("Can't remap IO base!");
+ set_io_port_base(io_base);
+
+ prom_meminit();
+
+ printk(KERN_INFO "%s, ID: 0x%04x, Revision: 0x%02x\n",
+ get_system_type(), ar7_chip_id(), ar7_chip_rev());
+}
diff --git a/arch/mips/ar7/time.c b/arch/mips/ar7/time.c
new file mode 100644
index 000000000..72aa77d70
--- /dev/null
+++ b/arch/mips/ar7/time.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Setting up the clock on the MIPS boards.
+ */
+
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+
+#include <asm/time.h>
+#include <asm/mach-ar7/ar7.h>
+
+void __init plat_time_init(void)
+{
+ struct clk *cpu_clk;
+
+ /* Initialize ar7 clocks so the CPU clock frequency is correct */
+ ar7_init_clocks();
+
+ cpu_clk = clk_get(NULL, "cpu");
+ if (IS_ERR(cpu_clk)) {
+ printk(KERN_ERR "unable to get cpu clock\n");
+ return;
+ }
+
+ mips_hpt_frequency = clk_get_rate(cpu_clk) / 2;
+}
diff --git a/arch/mips/ath25/Kconfig b/arch/mips/ath25/Kconfig
new file mode 100644
index 000000000..3014c80cf
--- /dev/null
+++ b/arch/mips/ath25/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+config SOC_AR5312
+ bool "Atheros AR5312/AR2312+ SoC support"
+ depends on ATH25
+ default y
+
+config SOC_AR2315
+ bool "Atheros AR2315+ SoC support"
+ depends on ATH25
+ default y
+
+config PCI_AR2315
+ bool "Atheros AR2315 PCI controller support"
+ depends on SOC_AR2315
+ select ARCH_HAS_PHYS_TO_DMA
+ select FORCE_PCI
+ default y
diff --git a/arch/mips/ath25/Makefile b/arch/mips/ath25/Makefile
new file mode 100644
index 000000000..eabad7da4
--- /dev/null
+++ b/arch/mips/ath25/Makefile
@@ -0,0 +1,16 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2006 FON Technology, SL.
+# Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+# Copyright (C) 2006-2009 Felix Fietkau <nbd@openwrt.org>
+#
+
+obj-y += board.o prom.o devices.o
+
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+
+obj-$(CONFIG_SOC_AR5312) += ar5312.o
+obj-$(CONFIG_SOC_AR2315) += ar2315.o
diff --git a/arch/mips/ath25/Platform b/arch/mips/ath25/Platform
new file mode 100644
index 000000000..aef098b6f
--- /dev/null
+++ b/arch/mips/ath25/Platform
@@ -0,0 +1,5 @@
+#
+# Atheros AR531X/AR231X WiSoC
+#
+cflags-$(CONFIG_ATH25) += -I$(srctree)/arch/mips/include/asm/mach-ath25
+load-$(CONFIG_ATH25) += 0xffffffff80041000
diff --git a/arch/mips/ath25/ar2315.c b/arch/mips/ath25/ar2315.c
new file mode 100644
index 000000000..9dbed7b5e
--- /dev/null
+++ b/arch/mips/ath25/ar2315.c
@@ -0,0 +1,362 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
+ * Copyright (C) 2006 FON Technology, SL.
+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2012 Alexandros C. Couloumbis <alex@ozo.com>
+ */
+
+/*
+ * Platform devices for Atheros AR2315 SoCs
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/memblock.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <asm/bootinfo.h>
+#include <asm/reboot.h>
+#include <asm/time.h>
+
+#include <ath25_platform.h>
+
+#include "devices.h"
+#include "ar2315.h"
+#include "ar2315_regs.h"
+
+static void __iomem *ar2315_rst_base;
+static struct irq_domain *ar2315_misc_irq_domain;
+
+static inline u32 ar2315_rst_reg_read(u32 reg)
+{
+ return __raw_readl(ar2315_rst_base + reg);
+}
+
+static inline void ar2315_rst_reg_write(u32 reg, u32 val)
+{
+ __raw_writel(val, ar2315_rst_base + reg);
+}
+
+static inline void ar2315_rst_reg_mask(u32 reg, u32 mask, u32 val)
+{
+ u32 ret = ar2315_rst_reg_read(reg);
+
+ ret &= ~mask;
+ ret |= val;
+ ar2315_rst_reg_write(reg, ret);
+}
+
+static irqreturn_t ar2315_ahb_err_handler(int cpl, void *dev_id)
+{
+ ar2315_rst_reg_write(AR2315_AHB_ERR0, AR2315_AHB_ERROR_DET);
+ ar2315_rst_reg_read(AR2315_AHB_ERR1);
+
+ pr_emerg("AHB fatal error\n");
+ machine_restart("AHB error"); /* Catastrophic failure */
+
+ return IRQ_HANDLED;
+}
+
+static void ar2315_misc_irq_handler(struct irq_desc *desc)
+{
+ u32 pending = ar2315_rst_reg_read(AR2315_ISR) &
+ ar2315_rst_reg_read(AR2315_IMR);
+ unsigned nr, misc_irq = 0;
+
+ if (pending) {
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+
+ nr = __ffs(pending);
+ misc_irq = irq_find_mapping(domain, nr);
+ }
+
+ if (misc_irq) {
+ if (nr == AR2315_MISC_IRQ_GPIO)
+ ar2315_rst_reg_write(AR2315_ISR, AR2315_ISR_GPIO);
+ else if (nr == AR2315_MISC_IRQ_WATCHDOG)
+ ar2315_rst_reg_write(AR2315_ISR, AR2315_ISR_WD);
+ generic_handle_irq(misc_irq);
+ } else {
+ spurious_interrupt();
+ }
+}
+
+static void ar2315_misc_irq_unmask(struct irq_data *d)
+{
+ ar2315_rst_reg_mask(AR2315_IMR, 0, BIT(d->hwirq));
+}
+
+static void ar2315_misc_irq_mask(struct irq_data *d)
+{
+ ar2315_rst_reg_mask(AR2315_IMR, BIT(d->hwirq), 0);
+}
+
+static struct irq_chip ar2315_misc_irq_chip = {
+ .name = "ar2315-misc",
+ .irq_unmask = ar2315_misc_irq_unmask,
+ .irq_mask = ar2315_misc_irq_mask,
+};
+
+static int ar2315_misc_irq_map(struct irq_domain *d, unsigned irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &ar2315_misc_irq_chip, handle_level_irq);
+ return 0;
+}
+
+static struct irq_domain_ops ar2315_misc_irq_domain_ops = {
+ .map = ar2315_misc_irq_map,
+};
+
+/*
+ * Called when an interrupt is received, this function
+ * determines exactly which interrupt it was, and it
+ * invokes the appropriate handler.
+ *
+ * Implicitly, we also define interrupt priority by
+ * choosing which to dispatch first.
+ */
+static void ar2315_irq_dispatch(void)
+{
+ u32 pending = read_c0_status() & read_c0_cause();
+
+ if (pending & CAUSEF_IP3)
+ do_IRQ(AR2315_IRQ_WLAN0);
+#ifdef CONFIG_PCI_AR2315
+ else if (pending & CAUSEF_IP5)
+ do_IRQ(AR2315_IRQ_LCBUS_PCI);
+#endif
+ else if (pending & CAUSEF_IP2)
+ do_IRQ(AR2315_IRQ_MISC);
+ else if (pending & CAUSEF_IP7)
+ do_IRQ(ATH25_IRQ_CPU_CLOCK);
+ else
+ spurious_interrupt();
+}
+
+void __init ar2315_arch_init_irq(void)
+{
+ struct irq_domain *domain;
+ unsigned irq;
+
+ ath25_irq_dispatch = ar2315_irq_dispatch;
+
+ domain = irq_domain_add_linear(NULL, AR2315_MISC_IRQ_COUNT,
+ &ar2315_misc_irq_domain_ops, NULL);
+ if (!domain)
+ panic("Failed to add IRQ domain");
+
+ irq = irq_create_mapping(domain, AR2315_MISC_IRQ_AHB);
+ if (request_irq(irq, ar2315_ahb_err_handler, 0, "ar2315-ahb-error",
+ NULL))
+ pr_err("Failed to register ar2315-ahb-error interrupt\n");
+
+ irq_set_chained_handler_and_data(AR2315_IRQ_MISC,
+ ar2315_misc_irq_handler, domain);
+
+ ar2315_misc_irq_domain = domain;
+}
+
+void __init ar2315_init_devices(void)
+{
+ /* Find board configuration */
+ ath25_find_config(AR2315_SPI_READ_BASE, AR2315_SPI_READ_SIZE);
+
+ ath25_add_wmac(0, AR2315_WLAN0_BASE, AR2315_IRQ_WLAN0);
+}
+
+static void ar2315_restart(char *command)
+{
+ void (*mips_reset_vec)(void) = (void *)0xbfc00000;
+
+ local_irq_disable();
+
+ /* try reset the system via reset control */
+ ar2315_rst_reg_write(AR2315_COLD_RESET, AR2317_RESET_SYSTEM);
+
+ /* Cold reset does not work on the AR2315/6, use the GPIO reset bits
+ * a workaround. Give it some time to attempt a gpio based hardware
+ * reset (atheros reference design workaround) */
+
+ /* TODO: implement the GPIO reset workaround */
+
+ /* Some boards (e.g. Senao EOC-2610) don't implement the reset logic
+ * workaround. Attempt to jump to the mips reset location -
+ * the boot loader itself might be able to recover the system */
+ mips_reset_vec();
+}
+
+/*
+ * This table is indexed by bits 5..4 of the CLOCKCTL1 register
+ * to determine the predevisor value.
+ */
+static int clockctl1_predivide_table[4] __initdata = { 1, 2, 4, 5 };
+static int pllc_divide_table[5] __initdata = { 2, 3, 4, 6, 3 };
+
+static unsigned __init ar2315_sys_clk(u32 clock_ctl)
+{
+ unsigned int pllc_ctrl, cpu_div;
+ unsigned int pllc_out, refdiv, fdiv, divby2;
+ unsigned int clk_div;
+
+ pllc_ctrl = ar2315_rst_reg_read(AR2315_PLLC_CTL);
+ refdiv = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_REF_DIV);
+ refdiv = clockctl1_predivide_table[refdiv];
+ fdiv = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_FDBACK_DIV);
+ divby2 = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_ADD_FDBACK_DIV) + 1;
+ pllc_out = (40000000 / refdiv) * (2 * divby2) * fdiv;
+
+ /* clkm input selected */
+ switch (clock_ctl & AR2315_CPUCLK_CLK_SEL_M) {
+ case 0:
+ case 1:
+ clk_div = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_CLKM_DIV);
+ clk_div = pllc_divide_table[clk_div];
+ break;
+ case 2:
+ clk_div = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_CLKC_DIV);
+ clk_div = pllc_divide_table[clk_div];
+ break;
+ default:
+ pllc_out = 40000000;
+ clk_div = 1;
+ break;
+ }
+
+ cpu_div = ATH25_REG_MS(clock_ctl, AR2315_CPUCLK_CLK_DIV);
+ cpu_div = cpu_div * 2 ?: 1;
+
+ return pllc_out / (clk_div * cpu_div);
+}
+
+static inline unsigned ar2315_cpu_frequency(void)
+{
+ return ar2315_sys_clk(ar2315_rst_reg_read(AR2315_CPUCLK));
+}
+
+static inline unsigned ar2315_apb_frequency(void)
+{
+ return ar2315_sys_clk(ar2315_rst_reg_read(AR2315_AMBACLK));
+}
+
+void __init ar2315_plat_time_init(void)
+{
+ mips_hpt_frequency = ar2315_cpu_frequency() / 2;
+}
+
+void __init ar2315_plat_mem_setup(void)
+{
+ void __iomem *sdram_base;
+ u32 memsize, memcfg;
+ u32 devid;
+ u32 config;
+
+ /* Detect memory size */
+ sdram_base = ioremap(AR2315_SDRAMCTL_BASE,
+ AR2315_SDRAMCTL_SIZE);
+ memcfg = __raw_readl(sdram_base + AR2315_MEM_CFG);
+ memsize = 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_DATA_WIDTH);
+ memsize <<= 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_COL_WIDTH);
+ memsize <<= 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_ROW_WIDTH);
+ memsize <<= 3;
+ memblock_add(0, memsize);
+ iounmap(sdram_base);
+
+ ar2315_rst_base = ioremap(AR2315_RST_BASE, AR2315_RST_SIZE);
+
+ /* Detect the hardware based on the device ID */
+ devid = ar2315_rst_reg_read(AR2315_SREV) & AR2315_REV_CHIP;
+ switch (devid) {
+ case 0x91: /* Need to check */
+ ath25_soc = ATH25_SOC_AR2318;
+ break;
+ case 0x90:
+ ath25_soc = ATH25_SOC_AR2317;
+ break;
+ case 0x87:
+ ath25_soc = ATH25_SOC_AR2316;
+ break;
+ case 0x86:
+ default:
+ ath25_soc = ATH25_SOC_AR2315;
+ break;
+ }
+ ath25_board.devid = devid;
+
+ /* Clear any lingering AHB errors */
+ config = read_c0_config();
+ write_c0_config(config & ~0x3);
+ ar2315_rst_reg_write(AR2315_AHB_ERR0, AR2315_AHB_ERROR_DET);
+ ar2315_rst_reg_read(AR2315_AHB_ERR1);
+ ar2315_rst_reg_write(AR2315_WDT_CTRL, AR2315_WDT_CTRL_IGNORE);
+
+ _machine_restart = ar2315_restart;
+}
+
+#ifdef CONFIG_PCI_AR2315
+static struct resource ar2315_pci_res[] = {
+ {
+ .name = "ar2315-pci-ctrl",
+ .flags = IORESOURCE_MEM,
+ .start = AR2315_PCI_BASE,
+ .end = AR2315_PCI_BASE + AR2315_PCI_SIZE - 1,
+ },
+ {
+ .name = "ar2315-pci-ext",
+ .flags = IORESOURCE_MEM,
+ .start = AR2315_PCI_EXT_BASE,
+ .end = AR2315_PCI_EXT_BASE + AR2315_PCI_EXT_SIZE - 1,
+ },
+ {
+ .name = "ar2315-pci",
+ .flags = IORESOURCE_IRQ,
+ .start = AR2315_IRQ_LCBUS_PCI,
+ .end = AR2315_IRQ_LCBUS_PCI,
+ },
+};
+#endif
+
+void __init ar2315_arch_init(void)
+{
+ unsigned irq = irq_create_mapping(ar2315_misc_irq_domain,
+ AR2315_MISC_IRQ_UART0);
+
+ ath25_serial_setup(AR2315_UART0_BASE, irq, ar2315_apb_frequency());
+
+#ifdef CONFIG_PCI_AR2315
+ if (ath25_soc == ATH25_SOC_AR2315) {
+ /* Reset PCI DMA logic */
+ ar2315_rst_reg_mask(AR2315_RESET, 0, AR2315_RESET_PCIDMA);
+ msleep(20);
+ ar2315_rst_reg_mask(AR2315_RESET, AR2315_RESET_PCIDMA, 0);
+ msleep(20);
+
+ /* Configure endians */
+ ar2315_rst_reg_mask(AR2315_ENDIAN_CTL, 0, AR2315_CONFIG_PCIAHB |
+ AR2315_CONFIG_PCIAHB_BRIDGE);
+
+ /* Configure as PCI host with DMA */
+ ar2315_rst_reg_write(AR2315_PCICLK, AR2315_PCICLK_PLLC_CLKM |
+ (AR2315_PCICLK_IN_FREQ_DIV_6 <<
+ AR2315_PCICLK_DIV_S));
+ ar2315_rst_reg_mask(AR2315_AHB_ARB_CTL, 0, AR2315_ARB_PCI);
+ ar2315_rst_reg_mask(AR2315_IF_CTL, AR2315_IF_PCI_CLK_MASK |
+ AR2315_IF_MASK, AR2315_IF_PCI |
+ AR2315_IF_PCI_HOST | AR2315_IF_PCI_INTR |
+ (AR2315_IF_PCI_CLK_OUTPUT_CLK <<
+ AR2315_IF_PCI_CLK_SHIFT));
+
+ platform_device_register_simple("ar2315-pci", -1,
+ ar2315_pci_res,
+ ARRAY_SIZE(ar2315_pci_res));
+ }
+#endif
+}
diff --git a/arch/mips/ath25/ar2315.h b/arch/mips/ath25/ar2315.h
new file mode 100644
index 000000000..fccc64f6d
--- /dev/null
+++ b/arch/mips/ath25/ar2315.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __AR2315_H
+#define __AR2315_H
+
+#ifdef CONFIG_SOC_AR2315
+
+void ar2315_arch_init_irq(void);
+void ar2315_init_devices(void);
+void ar2315_plat_time_init(void);
+void ar2315_plat_mem_setup(void);
+void ar2315_arch_init(void);
+
+#else
+
+static inline void ar2315_arch_init_irq(void) {}
+static inline void ar2315_init_devices(void) {}
+static inline void ar2315_plat_time_init(void) {}
+static inline void ar2315_plat_mem_setup(void) {}
+static inline void ar2315_arch_init(void) {}
+
+#endif
+
+#endif /* __AR2315_H */
diff --git a/arch/mips/ath25/ar2315_regs.h b/arch/mips/ath25/ar2315_regs.h
new file mode 100644
index 000000000..16e86149c
--- /dev/null
+++ b/arch/mips/ath25/ar2315_regs.h
@@ -0,0 +1,410 @@
+/*
+ * Register definitions for AR2315+
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
+ * Copyright (C) 2006 FON Technology, SL.
+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2006-2008 Felix Fietkau <nbd@openwrt.org>
+ */
+
+#ifndef __ASM_MACH_ATH25_AR2315_REGS_H
+#define __ASM_MACH_ATH25_AR2315_REGS_H
+
+/*
+ * IRQs
+ */
+#define AR2315_IRQ_MISC (MIPS_CPU_IRQ_BASE + 2) /* C0_CAUSE: 0x0400 */
+#define AR2315_IRQ_WLAN0 (MIPS_CPU_IRQ_BASE + 3) /* C0_CAUSE: 0x0800 */
+#define AR2315_IRQ_ENET0 (MIPS_CPU_IRQ_BASE + 4) /* C0_CAUSE: 0x1000 */
+#define AR2315_IRQ_LCBUS_PCI (MIPS_CPU_IRQ_BASE + 5) /* C0_CAUSE: 0x2000 */
+#define AR2315_IRQ_WLAN0_POLL (MIPS_CPU_IRQ_BASE + 6) /* C0_CAUSE: 0x4000 */
+
+/*
+ * Miscellaneous interrupts, which share IP2.
+ */
+#define AR2315_MISC_IRQ_UART0 0
+#define AR2315_MISC_IRQ_I2C_RSVD 1
+#define AR2315_MISC_IRQ_SPI 2
+#define AR2315_MISC_IRQ_AHB 3
+#define AR2315_MISC_IRQ_APB 4
+#define AR2315_MISC_IRQ_TIMER 5
+#define AR2315_MISC_IRQ_GPIO 6
+#define AR2315_MISC_IRQ_WATCHDOG 7
+#define AR2315_MISC_IRQ_IR_RSVD 8
+#define AR2315_MISC_IRQ_COUNT 9
+
+/*
+ * Address map
+ */
+#define AR2315_SPI_READ_BASE 0x08000000 /* SPI flash */
+#define AR2315_SPI_READ_SIZE 0x01000000
+#define AR2315_WLAN0_BASE 0x10000000 /* Wireless MMR */
+#define AR2315_PCI_BASE 0x10100000 /* PCI MMR */
+#define AR2315_PCI_SIZE 0x00001000
+#define AR2315_SDRAMCTL_BASE 0x10300000 /* SDRAM MMR */
+#define AR2315_SDRAMCTL_SIZE 0x00000020
+#define AR2315_LOCAL_BASE 0x10400000 /* Local bus MMR */
+#define AR2315_ENET0_BASE 0x10500000 /* Ethernet MMR */
+#define AR2315_RST_BASE 0x11000000 /* Reset control MMR */
+#define AR2315_RST_SIZE 0x00000100
+#define AR2315_UART0_BASE 0x11100000 /* UART MMR */
+#define AR2315_SPI_MMR_BASE 0x11300000 /* SPI flash MMR */
+#define AR2315_SPI_MMR_SIZE 0x00000010
+#define AR2315_PCI_EXT_BASE 0x80000000 /* PCI external */
+#define AR2315_PCI_EXT_SIZE 0x40000000
+
+/*
+ * Configuration registers
+ */
+
+/* Cold reset register */
+#define AR2315_COLD_RESET 0x0000
+
+#define AR2315_RESET_COLD_AHB 0x00000001
+#define AR2315_RESET_COLD_APB 0x00000002
+#define AR2315_RESET_COLD_CPU 0x00000004
+#define AR2315_RESET_COLD_CPUWARM 0x00000008
+#define AR2315_RESET_SYSTEM (RESET_COLD_CPU |\
+ RESET_COLD_APB |\
+ RESET_COLD_AHB) /* full system */
+#define AR2317_RESET_SYSTEM 0x00000010
+
+/* Reset register */
+#define AR2315_RESET 0x0004
+
+#define AR2315_RESET_WARM_WLAN0_MAC 0x00000001 /* warm reset WLAN0 MAC */
+#define AR2315_RESET_WARM_WLAN0_BB 0x00000002 /* warm reset WLAN0 BB */
+#define AR2315_RESET_MPEGTS_RSVD 0x00000004 /* warm reset MPEG-TS */
+#define AR2315_RESET_PCIDMA 0x00000008 /* warm reset PCI ahb/dma */
+#define AR2315_RESET_MEMCTL 0x00000010 /* warm reset mem control */
+#define AR2315_RESET_LOCAL 0x00000020 /* warm reset local bus */
+#define AR2315_RESET_I2C_RSVD 0x00000040 /* warm reset I2C bus */
+#define AR2315_RESET_SPI 0x00000080 /* warm reset SPI iface */
+#define AR2315_RESET_UART0 0x00000100 /* warm reset UART0 */
+#define AR2315_RESET_IR_RSVD 0x00000200 /* warm reset IR iface */
+#define AR2315_RESET_EPHY0 0x00000400 /* cold reset ENET0 phy */
+#define AR2315_RESET_ENET0 0x00000800 /* cold reset ENET0 MAC */
+
+/* AHB master arbitration control */
+#define AR2315_AHB_ARB_CTL 0x0008
+
+#define AR2315_ARB_CPU 0x00000001 /* CPU, default */
+#define AR2315_ARB_WLAN 0x00000002 /* WLAN */
+#define AR2315_ARB_MPEGTS_RSVD 0x00000004 /* MPEG-TS */
+#define AR2315_ARB_LOCAL 0x00000008 /* Local bus */
+#define AR2315_ARB_PCI 0x00000010 /* PCI bus */
+#define AR2315_ARB_ETHERNET 0x00000020 /* Ethernet */
+#define AR2315_ARB_RETRY 0x00000100 /* Retry policy (debug) */
+
+/* Config Register */
+#define AR2315_ENDIAN_CTL 0x000c
+
+#define AR2315_CONFIG_AHB 0x00000001 /* EC-AHB bridge endian */
+#define AR2315_CONFIG_WLAN 0x00000002 /* WLAN byteswap */
+#define AR2315_CONFIG_MPEGTS_RSVD 0x00000004 /* MPEG-TS byteswap */
+#define AR2315_CONFIG_PCI 0x00000008 /* PCI byteswap */
+#define AR2315_CONFIG_MEMCTL 0x00000010 /* Mem controller endian */
+#define AR2315_CONFIG_LOCAL 0x00000020 /* Local bus byteswap */
+#define AR2315_CONFIG_ETHERNET 0x00000040 /* Ethernet byteswap */
+#define AR2315_CONFIG_MERGE 0x00000200 /* CPU write buffer merge */
+#define AR2315_CONFIG_CPU 0x00000400 /* CPU big endian */
+#define AR2315_CONFIG_BIG 0x00000400
+#define AR2315_CONFIG_PCIAHB 0x00000800
+#define AR2315_CONFIG_PCIAHB_BRIDGE 0x00001000
+#define AR2315_CONFIG_SPI 0x00008000 /* SPI byteswap */
+#define AR2315_CONFIG_CPU_DRAM 0x00010000
+#define AR2315_CONFIG_CPU_PCI 0x00020000
+#define AR2315_CONFIG_CPU_MMR 0x00040000
+
+/* NMI control */
+#define AR2315_NMI_CTL 0x0010
+
+#define AR2315_NMI_EN 1
+
+/* Revision Register - Initial value is 0x3010 (WMAC 3.0, AR231X 1.0). */
+#define AR2315_SREV 0x0014
+
+#define AR2315_REV_MAJ 0x000000f0
+#define AR2315_REV_MAJ_S 4
+#define AR2315_REV_MIN 0x0000000f
+#define AR2315_REV_MIN_S 0
+#define AR2315_REV_CHIP (AR2315_REV_MAJ | AR2315_REV_MIN)
+
+/* Interface Enable */
+#define AR2315_IF_CTL 0x0018
+
+#define AR2315_IF_MASK 0x00000007
+#define AR2315_IF_DISABLED 0 /* Disable all */
+#define AR2315_IF_PCI 1 /* PCI */
+#define AR2315_IF_TS_LOCAL 2 /* Local bus */
+#define AR2315_IF_ALL 3 /* Emulation only */
+#define AR2315_IF_LOCAL_HOST 0x00000008
+#define AR2315_IF_PCI_HOST 0x00000010
+#define AR2315_IF_PCI_INTR 0x00000020
+#define AR2315_IF_PCI_CLK_MASK 0x00030000
+#define AR2315_IF_PCI_CLK_INPUT 0
+#define AR2315_IF_PCI_CLK_OUTPUT_LOW 1
+#define AR2315_IF_PCI_CLK_OUTPUT_CLK 2
+#define AR2315_IF_PCI_CLK_OUTPUT_HIGH 3
+#define AR2315_IF_PCI_CLK_SHIFT 16
+
+/* APB Interrupt control */
+#define AR2315_ISR 0x0020
+#define AR2315_IMR 0x0024
+#define AR2315_GISR 0x0028
+
+#define AR2315_ISR_UART0 0x00000001 /* high speed UART */
+#define AR2315_ISR_I2C_RSVD 0x00000002 /* I2C bus */
+#define AR2315_ISR_SPI 0x00000004 /* SPI bus */
+#define AR2315_ISR_AHB 0x00000008 /* AHB error */
+#define AR2315_ISR_APB 0x00000010 /* APB error */
+#define AR2315_ISR_TIMER 0x00000020 /* Timer */
+#define AR2315_ISR_GPIO 0x00000040 /* GPIO */
+#define AR2315_ISR_WD 0x00000080 /* Watchdog */
+#define AR2315_ISR_IR_RSVD 0x00000100 /* IR */
+
+#define AR2315_GISR_MISC 0x00000001 /* Misc */
+#define AR2315_GISR_WLAN0 0x00000002 /* WLAN0 */
+#define AR2315_GISR_MPEGTS_RSVD 0x00000004 /* MPEG-TS */
+#define AR2315_GISR_LOCALPCI 0x00000008 /* Local/PCI bus */
+#define AR2315_GISR_WMACPOLL 0x00000010
+#define AR2315_GISR_TIMER 0x00000020
+#define AR2315_GISR_ETHERNET 0x00000040 /* Ethernet */
+
+/* Generic timer */
+#define AR2315_TIMER 0x0030
+#define AR2315_RELOAD 0x0034
+
+/* Watchdog timer */
+#define AR2315_WDT_TIMER 0x0038
+#define AR2315_WDT_CTRL 0x003c
+
+#define AR2315_WDT_CTRL_IGNORE 0x00000000 /* ignore expiration */
+#define AR2315_WDT_CTRL_NMI 0x00000001 /* NMI on watchdog */
+#define AR2315_WDT_CTRL_RESET 0x00000002 /* reset on watchdog */
+
+/* CPU Performance Counters */
+#define AR2315_PERFCNT0 0x0048
+#define AR2315_PERFCNT1 0x004c
+
+#define AR2315_PERF0_DATAHIT 0x00000001 /* Count Data Cache Hits */
+#define AR2315_PERF0_DATAMISS 0x00000002 /* Count Data Cache Misses */
+#define AR2315_PERF0_INSTHIT 0x00000004 /* Count Instruction Cache Hits */
+#define AR2315_PERF0_INSTMISS 0x00000008 /* Count Instruction Cache Misses */
+#define AR2315_PERF0_ACTIVE 0x00000010 /* Count Active Processor Cycles */
+#define AR2315_PERF0_WBHIT 0x00000020 /* Count CPU Write Buffer Hits */
+#define AR2315_PERF0_WBMISS 0x00000040 /* Count CPU Write Buffer Misses */
+
+#define AR2315_PERF1_EB_ARDY 0x00000001 /* Count EB_ARdy signal */
+#define AR2315_PERF1_EB_AVALID 0x00000002 /* Count EB_AValid signal */
+#define AR2315_PERF1_EB_WDRDY 0x00000004 /* Count EB_WDRdy signal */
+#define AR2315_PERF1_EB_RDVAL 0x00000008 /* Count EB_RdVal signal */
+#define AR2315_PERF1_VRADDR 0x00000010 /* Count valid read address cycles*/
+#define AR2315_PERF1_VWADDR 0x00000020 /* Count valid write address cycl.*/
+#define AR2315_PERF1_VWDATA 0x00000040 /* Count valid write data cycles */
+
+/* AHB Error Reporting */
+#define AR2315_AHB_ERR0 0x0050 /* error */
+#define AR2315_AHB_ERR1 0x0054 /* haddr */
+#define AR2315_AHB_ERR2 0x0058 /* hwdata */
+#define AR2315_AHB_ERR3 0x005c /* hrdata */
+#define AR2315_AHB_ERR4 0x0060 /* status */
+
+#define AR2315_AHB_ERROR_DET 1 /* AHB Error has been detected, */
+ /* write 1 to clear all bits in ERR0 */
+#define AR2315_AHB_ERROR_OVR 2 /* AHB Error overflow has been detected */
+#define AR2315_AHB_ERROR_WDT 4 /* AHB Error due to wdt instead of hresp */
+
+#define AR2315_PROCERR_HMAST 0x0000000f
+#define AR2315_PROCERR_HMAST_DFLT 0
+#define AR2315_PROCERR_HMAST_WMAC 1
+#define AR2315_PROCERR_HMAST_ENET 2
+#define AR2315_PROCERR_HMAST_PCIENDPT 3
+#define AR2315_PROCERR_HMAST_LOCAL 4
+#define AR2315_PROCERR_HMAST_CPU 5
+#define AR2315_PROCERR_HMAST_PCITGT 6
+#define AR2315_PROCERR_HMAST_S 0
+#define AR2315_PROCERR_HWRITE 0x00000010
+#define AR2315_PROCERR_HSIZE 0x00000060
+#define AR2315_PROCERR_HSIZE_S 5
+#define AR2315_PROCERR_HTRANS 0x00000180
+#define AR2315_PROCERR_HTRANS_S 7
+#define AR2315_PROCERR_HBURST 0x00000e00
+#define AR2315_PROCERR_HBURST_S 9
+
+/* Clock Control */
+#define AR2315_PLLC_CTL 0x0064
+#define AR2315_PLLV_CTL 0x0068
+#define AR2315_CPUCLK 0x006c
+#define AR2315_AMBACLK 0x0070
+#define AR2315_SYNCCLK 0x0074
+#define AR2315_DSL_SLEEP_CTL 0x0080
+#define AR2315_DSL_SLEEP_DUR 0x0084
+
+/* PLLc Control fields */
+#define AR2315_PLLC_REF_DIV_M 0x00000003
+#define AR2315_PLLC_REF_DIV_S 0
+#define AR2315_PLLC_FDBACK_DIV_M 0x0000007c
+#define AR2315_PLLC_FDBACK_DIV_S 2
+#define AR2315_PLLC_ADD_FDBACK_DIV_M 0x00000080
+#define AR2315_PLLC_ADD_FDBACK_DIV_S 7
+#define AR2315_PLLC_CLKC_DIV_M 0x0001c000
+#define AR2315_PLLC_CLKC_DIV_S 14
+#define AR2315_PLLC_CLKM_DIV_M 0x00700000
+#define AR2315_PLLC_CLKM_DIV_S 20
+
+/* CPU CLK Control fields */
+#define AR2315_CPUCLK_CLK_SEL_M 0x00000003
+#define AR2315_CPUCLK_CLK_SEL_S 0
+#define AR2315_CPUCLK_CLK_DIV_M 0x0000000c
+#define AR2315_CPUCLK_CLK_DIV_S 2
+
+/* AMBA CLK Control fields */
+#define AR2315_AMBACLK_CLK_SEL_M 0x00000003
+#define AR2315_AMBACLK_CLK_SEL_S 0
+#define AR2315_AMBACLK_CLK_DIV_M 0x0000000c
+#define AR2315_AMBACLK_CLK_DIV_S 2
+
+/* PCI Clock Control */
+#define AR2315_PCICLK 0x00a4
+
+#define AR2315_PCICLK_INPUT_M 0x00000003
+#define AR2315_PCICLK_INPUT_S 0
+#define AR2315_PCICLK_PLLC_CLKM 0
+#define AR2315_PCICLK_PLLC_CLKM1 1
+#define AR2315_PCICLK_PLLC_CLKC 2
+#define AR2315_PCICLK_REF_CLK 3
+#define AR2315_PCICLK_DIV_M 0x0000000c
+#define AR2315_PCICLK_DIV_S 2
+#define AR2315_PCICLK_IN_FREQ 0
+#define AR2315_PCICLK_IN_FREQ_DIV_6 1
+#define AR2315_PCICLK_IN_FREQ_DIV_8 2
+#define AR2315_PCICLK_IN_FREQ_DIV_10 3
+
+/* Observation Control Register */
+#define AR2315_OCR 0x00b0
+
+#define AR2315_OCR_GPIO0_IRIN 0x00000040
+#define AR2315_OCR_GPIO1_IROUT 0x00000080
+#define AR2315_OCR_GPIO3_RXCLR 0x00000200
+
+/* General Clock Control */
+#define AR2315_MISCCLK 0x00b4
+
+#define AR2315_MISCCLK_PLLBYPASS_EN 0x00000001
+#define AR2315_MISCCLK_PROCREFCLK 0x00000002
+
+/*
+ * SDRAM Controller
+ * - No read or write buffers are included.
+ */
+#define AR2315_MEM_CFG 0x0000
+#define AR2315_MEM_CTRL 0x000c
+#define AR2315_MEM_REF 0x0010
+
+#define AR2315_MEM_CFG_DATA_WIDTH_M 0x00006000
+#define AR2315_MEM_CFG_DATA_WIDTH_S 13
+#define AR2315_MEM_CFG_COL_WIDTH_M 0x00001e00
+#define AR2315_MEM_CFG_COL_WIDTH_S 9
+#define AR2315_MEM_CFG_ROW_WIDTH_M 0x000001e0
+#define AR2315_MEM_CFG_ROW_WIDTH_S 5
+#define AR2315_MEM_CFG_BANKADDR_BITS_M 0x00000018
+#define AR2315_MEM_CFG_BANKADDR_BITS_S 3
+
+/*
+ * Local Bus Interface Registers
+ */
+#define AR2315_LB_CONFIG 0x0000
+
+#define AR2315_LBCONF_OE 0x00000001 /* =1 OE is low-true */
+#define AR2315_LBCONF_CS0 0x00000002 /* =1 first CS is low-true */
+#define AR2315_LBCONF_CS1 0x00000004 /* =1 2nd CS is low-true */
+#define AR2315_LBCONF_RDY 0x00000008 /* =1 RDY is low-true */
+#define AR2315_LBCONF_WE 0x00000010 /* =1 Write En is low-true */
+#define AR2315_LBCONF_WAIT 0x00000020 /* =1 WAIT is low-true */
+#define AR2315_LBCONF_ADS 0x00000040 /* =1 Adr Strobe is low-true */
+#define AR2315_LBCONF_MOT 0x00000080 /* =0 Intel, =1 Motorola */
+#define AR2315_LBCONF_8CS 0x00000100 /* =1 8 bits CS, 0= 16bits */
+#define AR2315_LBCONF_8DS 0x00000200 /* =1 8 bits Data S, 0=16bits */
+#define AR2315_LBCONF_ADS_EN 0x00000400 /* =1 Enable ADS */
+#define AR2315_LBCONF_ADR_OE 0x00000800 /* =1 Adr cap on OE, WE or DS */
+#define AR2315_LBCONF_ADDT_MUX 0x00001000 /* =1 Adr and Data share bus */
+#define AR2315_LBCONF_DATA_OE 0x00002000 /* =1 Data cap on OE, WE, DS */
+#define AR2315_LBCONF_16DATA 0x00004000 /* =1 Data is 16 bits wide */
+#define AR2315_LBCONF_SWAPDT 0x00008000 /* =1 Byte swap data */
+#define AR2315_LBCONF_SYNC 0x00010000 /* =1 Bus synchronous to clk */
+#define AR2315_LBCONF_INT 0x00020000 /* =1 Intr is low true */
+#define AR2315_LBCONF_INT_CTR0 0x00000000 /* GND high-Z, Vdd is high-Z */
+#define AR2315_LBCONF_INT_CTR1 0x00040000 /* GND drive, Vdd is high-Z */
+#define AR2315_LBCONF_INT_CTR2 0x00080000 /* GND high-Z, Vdd drive */
+#define AR2315_LBCONF_INT_CTR3 0x000c0000 /* GND drive, Vdd drive */
+#define AR2315_LBCONF_RDY_WAIT 0x00100000 /* =1 RDY is negative of WAIT */
+#define AR2315_LBCONF_INT_PULSE 0x00200000 /* =1 Interrupt is a pulse */
+#define AR2315_LBCONF_ENABLE 0x00400000 /* =1 Falcon respond to LB */
+
+#define AR2315_LB_CLKSEL 0x0004
+
+#define AR2315_LBCLK_EXT 0x00000001 /* use external clk for lb */
+
+#define AR2315_LB_1MS 0x0008
+
+#define AR2315_LB1MS_MASK 0x0003ffff /* # of AHB clk cycles in 1ms */
+
+#define AR2315_LB_MISCCFG 0x000c
+
+#define AR2315_LBM_TXD_EN 0x00000001 /* Enable TXD for fragments */
+#define AR2315_LBM_RX_INTEN 0x00000002 /* Enable LB ints on RX ready */
+#define AR2315_LBM_MBOXWR_INTEN 0x00000004 /* Enable LB ints on mbox wr */
+#define AR2315_LBM_MBOXRD_INTEN 0x00000008 /* Enable LB ints on mbox rd */
+#define AR2315_LMB_DESCSWAP_EN 0x00000010 /* Byte swap desc enable */
+#define AR2315_LBM_TIMEOUT_M 0x00ffff80
+#define AR2315_LBM_TIMEOUT_S 7
+#define AR2315_LBM_PORTMUX 0x07000000
+
+#define AR2315_LB_RXTSOFF 0x0010
+
+#define AR2315_LB_TX_CHAIN_EN 0x0100
+
+#define AR2315_LB_TXEN_0 0x00000001
+#define AR2315_LB_TXEN_1 0x00000002
+#define AR2315_LB_TXEN_2 0x00000004
+#define AR2315_LB_TXEN_3 0x00000008
+
+#define AR2315_LB_TX_CHAIN_DIS 0x0104
+#define AR2315_LB_TX_DESC_PTR 0x0200
+
+#define AR2315_LB_RX_CHAIN_EN 0x0400
+
+#define AR2315_LB_RXEN 0x00000001
+
+#define AR2315_LB_RX_CHAIN_DIS 0x0404
+#define AR2315_LB_RX_DESC_PTR 0x0408
+
+#define AR2315_LB_INT_STATUS 0x0500
+
+#define AR2315_LB_INT_TX_DESC 0x00000001
+#define AR2315_LB_INT_TX_OK 0x00000002
+#define AR2315_LB_INT_TX_ERR 0x00000004
+#define AR2315_LB_INT_TX_EOF 0x00000008
+#define AR2315_LB_INT_RX_DESC 0x00000010
+#define AR2315_LB_INT_RX_OK 0x00000020
+#define AR2315_LB_INT_RX_ERR 0x00000040
+#define AR2315_LB_INT_RX_EOF 0x00000080
+#define AR2315_LB_INT_TX_TRUNC 0x00000100
+#define AR2315_LB_INT_TX_STARVE 0x00000200
+#define AR2315_LB_INT_LB_TIMEOUT 0x00000400
+#define AR2315_LB_INT_LB_ERR 0x00000800
+#define AR2315_LB_INT_MBOX_WR 0x00001000
+#define AR2315_LB_INT_MBOX_RD 0x00002000
+
+/* Bit definitions for INT MASK are the same as INT_STATUS */
+#define AR2315_LB_INT_MASK 0x0504
+
+#define AR2315_LB_INT_EN 0x0508
+#define AR2315_LB_MBOX 0x0600
+
+#endif /* __ASM_MACH_ATH25_AR2315_REGS_H */
diff --git a/arch/mips/ath25/ar5312.c b/arch/mips/ath25/ar5312.c
new file mode 100644
index 000000000..23c879f4b
--- /dev/null
+++ b/arch/mips/ath25/ar5312.c
@@ -0,0 +1,391 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
+ * Copyright (C) 2006 FON Technology, SL.
+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2006-2009 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2012 Alexandros C. Couloumbis <alex@ozo.com>
+ */
+
+/*
+ * Platform devices for Atheros AR5312 SoCs
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/memblock.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/physmap.h>
+#include <linux/reboot.h>
+#include <asm/bootinfo.h>
+#include <asm/reboot.h>
+#include <asm/time.h>
+
+#include <ath25_platform.h>
+
+#include "devices.h"
+#include "ar5312.h"
+#include "ar5312_regs.h"
+
+static void __iomem *ar5312_rst_base;
+static struct irq_domain *ar5312_misc_irq_domain;
+
+static inline u32 ar5312_rst_reg_read(u32 reg)
+{
+ return __raw_readl(ar5312_rst_base + reg);
+}
+
+static inline void ar5312_rst_reg_write(u32 reg, u32 val)
+{
+ __raw_writel(val, ar5312_rst_base + reg);
+}
+
+static inline void ar5312_rst_reg_mask(u32 reg, u32 mask, u32 val)
+{
+ u32 ret = ar5312_rst_reg_read(reg);
+
+ ret &= ~mask;
+ ret |= val;
+ ar5312_rst_reg_write(reg, ret);
+}
+
+static irqreturn_t ar5312_ahb_err_handler(int cpl, void *dev_id)
+{
+ u32 proc1 = ar5312_rst_reg_read(AR5312_PROC1);
+ u32 proc_addr = ar5312_rst_reg_read(AR5312_PROCADDR); /* clears error */
+ u32 dma1 = ar5312_rst_reg_read(AR5312_DMA1);
+ u32 dma_addr = ar5312_rst_reg_read(AR5312_DMAADDR); /* clears error */
+
+ pr_emerg("AHB interrupt: PROCADDR=0x%8.8x PROC1=0x%8.8x DMAADDR=0x%8.8x DMA1=0x%8.8x\n",
+ proc_addr, proc1, dma_addr, dma1);
+
+ machine_restart("AHB error"); /* Catastrophic failure */
+ return IRQ_HANDLED;
+}
+
+static void ar5312_misc_irq_handler(struct irq_desc *desc)
+{
+ u32 pending = ar5312_rst_reg_read(AR5312_ISR) &
+ ar5312_rst_reg_read(AR5312_IMR);
+ unsigned nr, misc_irq = 0;
+
+ if (pending) {
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+
+ nr = __ffs(pending);
+ misc_irq = irq_find_mapping(domain, nr);
+ }
+
+ if (misc_irq) {
+ generic_handle_irq(misc_irq);
+ if (nr == AR5312_MISC_IRQ_TIMER)
+ ar5312_rst_reg_read(AR5312_TIMER);
+ } else {
+ spurious_interrupt();
+ }
+}
+
+/* Enable the specified AR5312_MISC_IRQ interrupt */
+static void ar5312_misc_irq_unmask(struct irq_data *d)
+{
+ ar5312_rst_reg_mask(AR5312_IMR, 0, BIT(d->hwirq));
+}
+
+/* Disable the specified AR5312_MISC_IRQ interrupt */
+static void ar5312_misc_irq_mask(struct irq_data *d)
+{
+ ar5312_rst_reg_mask(AR5312_IMR, BIT(d->hwirq), 0);
+ ar5312_rst_reg_read(AR5312_IMR); /* flush write buffer */
+}
+
+static struct irq_chip ar5312_misc_irq_chip = {
+ .name = "ar5312-misc",
+ .irq_unmask = ar5312_misc_irq_unmask,
+ .irq_mask = ar5312_misc_irq_mask,
+};
+
+static int ar5312_misc_irq_map(struct irq_domain *d, unsigned irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &ar5312_misc_irq_chip, handle_level_irq);
+ return 0;
+}
+
+static struct irq_domain_ops ar5312_misc_irq_domain_ops = {
+ .map = ar5312_misc_irq_map,
+};
+
+static void ar5312_irq_dispatch(void)
+{
+ u32 pending = read_c0_status() & read_c0_cause();
+
+ if (pending & CAUSEF_IP2)
+ do_IRQ(AR5312_IRQ_WLAN0);
+ else if (pending & CAUSEF_IP5)
+ do_IRQ(AR5312_IRQ_WLAN1);
+ else if (pending & CAUSEF_IP6)
+ do_IRQ(AR5312_IRQ_MISC);
+ else if (pending & CAUSEF_IP7)
+ do_IRQ(ATH25_IRQ_CPU_CLOCK);
+ else
+ spurious_interrupt();
+}
+
+void __init ar5312_arch_init_irq(void)
+{
+ struct irq_domain *domain;
+ unsigned irq;
+
+ ath25_irq_dispatch = ar5312_irq_dispatch;
+
+ domain = irq_domain_add_linear(NULL, AR5312_MISC_IRQ_COUNT,
+ &ar5312_misc_irq_domain_ops, NULL);
+ if (!domain)
+ panic("Failed to add IRQ domain");
+
+ irq = irq_create_mapping(domain, AR5312_MISC_IRQ_AHB_PROC);
+ if (request_irq(irq, ar5312_ahb_err_handler, 0, "ar5312-ahb-error",
+ NULL))
+ pr_err("Failed to register ar5312-ahb-error interrupt\n");
+
+ irq_set_chained_handler_and_data(AR5312_IRQ_MISC,
+ ar5312_misc_irq_handler, domain);
+
+ ar5312_misc_irq_domain = domain;
+}
+
+static struct physmap_flash_data ar5312_flash_data = {
+ .width = 2,
+};
+
+static struct resource ar5312_flash_resource = {
+ .start = AR5312_FLASH_BASE,
+ .end = AR5312_FLASH_BASE + AR5312_FLASH_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device ar5312_physmap_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev.platform_data = &ar5312_flash_data,
+ .resource = &ar5312_flash_resource,
+ .num_resources = 1,
+};
+
+static void __init ar5312_flash_init(void)
+{
+ void __iomem *flashctl_base;
+ u32 ctl;
+
+ flashctl_base = ioremap(AR5312_FLASHCTL_BASE,
+ AR5312_FLASHCTL_SIZE);
+
+ ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL0);
+ ctl &= AR5312_FLASHCTL_MW;
+
+ /* fixup flash width */
+ switch (ctl) {
+ case AR5312_FLASHCTL_MW16:
+ ar5312_flash_data.width = 2;
+ break;
+ case AR5312_FLASHCTL_MW8:
+ default:
+ ar5312_flash_data.width = 1;
+ break;
+ }
+
+ /*
+ * Configure flash bank 0.
+ * Assume 8M window size. Flash will be aliased if it's smaller
+ */
+ ctl |= AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC_8M | AR5312_FLASHCTL_RBLE;
+ ctl |= 0x01 << AR5312_FLASHCTL_IDCY_S;
+ ctl |= 0x07 << AR5312_FLASHCTL_WST1_S;
+ ctl |= 0x07 << AR5312_FLASHCTL_WST2_S;
+ __raw_writel(ctl, flashctl_base + AR5312_FLASHCTL0);
+
+ /* Disable other flash banks */
+ ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL1);
+ ctl &= ~(AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC);
+ __raw_writel(ctl, flashctl_base + AR5312_FLASHCTL1);
+ ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL2);
+ ctl &= ~(AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC);
+ __raw_writel(ctl, flashctl_base + AR5312_FLASHCTL2);
+
+ iounmap(flashctl_base);
+}
+
+void __init ar5312_init_devices(void)
+{
+ struct ath25_boarddata *config;
+
+ ar5312_flash_init();
+
+ /* Locate board/radio config data */
+ ath25_find_config(AR5312_FLASH_BASE, AR5312_FLASH_SIZE);
+ config = ath25_board.config;
+
+ /* AR2313 has CPU minor rev. 10 */
+ if ((current_cpu_data.processor_id & 0xff) == 0x0a)
+ ath25_soc = ATH25_SOC_AR2313;
+
+ /* AR2312 shares the same Silicon ID as AR5312 */
+ else if (config->flags & BD_ISCASPER)
+ ath25_soc = ATH25_SOC_AR2312;
+
+ /* Everything else is probably AR5312 or compatible */
+ else
+ ath25_soc = ATH25_SOC_AR5312;
+
+ platform_device_register(&ar5312_physmap_flash);
+
+ switch (ath25_soc) {
+ case ATH25_SOC_AR5312:
+ if (!ath25_board.radio)
+ return;
+
+ if (!(config->flags & BD_WLAN0))
+ break;
+
+ ath25_add_wmac(0, AR5312_WLAN0_BASE, AR5312_IRQ_WLAN0);
+ break;
+ case ATH25_SOC_AR2312:
+ case ATH25_SOC_AR2313:
+ if (!ath25_board.radio)
+ return;
+ break;
+ default:
+ break;
+ }
+
+ if (config->flags & BD_WLAN1)
+ ath25_add_wmac(1, AR5312_WLAN1_BASE, AR5312_IRQ_WLAN1);
+}
+
+static void ar5312_restart(char *command)
+{
+ /* reset the system */
+ local_irq_disable();
+ while (1)
+ ar5312_rst_reg_write(AR5312_RESET, AR5312_RESET_SYSTEM);
+}
+
+/*
+ * This table is indexed by bits 5..4 of the CLOCKCTL1 register
+ * to determine the predevisor value.
+ */
+static unsigned clockctl1_predivide_table[4] __initdata = { 1, 2, 4, 5 };
+
+static unsigned __init ar5312_cpu_frequency(void)
+{
+ u32 scratch, devid, clock_ctl1;
+ u32 predivide_mask, multiplier_mask, doubler_mask;
+ unsigned predivide_shift, multiplier_shift;
+ unsigned predivide_select, predivisor, multiplier;
+
+ /* Trust the bootrom's idea of cpu frequency. */
+ scratch = ar5312_rst_reg_read(AR5312_SCRATCH);
+ if (scratch)
+ return scratch;
+
+ devid = ar5312_rst_reg_read(AR5312_REV);
+ devid = (devid & AR5312_REV_MAJ) >> AR5312_REV_MAJ_S;
+ if (devid == AR5312_REV_MAJ_AR2313) {
+ predivide_mask = AR2313_CLOCKCTL1_PREDIVIDE_MASK;
+ predivide_shift = AR2313_CLOCKCTL1_PREDIVIDE_SHIFT;
+ multiplier_mask = AR2313_CLOCKCTL1_MULTIPLIER_MASK;
+ multiplier_shift = AR2313_CLOCKCTL1_MULTIPLIER_SHIFT;
+ doubler_mask = AR2313_CLOCKCTL1_DOUBLER_MASK;
+ } else { /* AR5312 and AR2312 */
+ predivide_mask = AR5312_CLOCKCTL1_PREDIVIDE_MASK;
+ predivide_shift = AR5312_CLOCKCTL1_PREDIVIDE_SHIFT;
+ multiplier_mask = AR5312_CLOCKCTL1_MULTIPLIER_MASK;
+ multiplier_shift = AR5312_CLOCKCTL1_MULTIPLIER_SHIFT;
+ doubler_mask = AR5312_CLOCKCTL1_DOUBLER_MASK;
+ }
+
+ /*
+ * Clocking is derived from a fixed 40MHz input clock.
+ *
+ * cpu_freq = input_clock * MULT (where MULT is PLL multiplier)
+ * sys_freq = cpu_freq / 4 (used for APB clock, serial,
+ * flash, Timer, Watchdog Timer)
+ *
+ * cnt_freq = cpu_freq / 2 (use for CPU count/compare)
+ *
+ * So, for example, with a PLL multiplier of 5, we have
+ *
+ * cpu_freq = 200MHz
+ * sys_freq = 50MHz
+ * cnt_freq = 100MHz
+ *
+ * We compute the CPU frequency, based on PLL settings.
+ */
+
+ clock_ctl1 = ar5312_rst_reg_read(AR5312_CLOCKCTL1);
+ predivide_select = (clock_ctl1 & predivide_mask) >> predivide_shift;
+ predivisor = clockctl1_predivide_table[predivide_select];
+ multiplier = (clock_ctl1 & multiplier_mask) >> multiplier_shift;
+
+ if (clock_ctl1 & doubler_mask)
+ multiplier <<= 1;
+
+ return (40000000 / predivisor) * multiplier;
+}
+
+static inline unsigned ar5312_sys_frequency(void)
+{
+ return ar5312_cpu_frequency() / 4;
+}
+
+void __init ar5312_plat_time_init(void)
+{
+ mips_hpt_frequency = ar5312_cpu_frequency() / 2;
+}
+
+void __init ar5312_plat_mem_setup(void)
+{
+ void __iomem *sdram_base;
+ u32 memsize, memcfg, bank0_ac, bank1_ac;
+ u32 devid;
+
+ /* Detect memory size */
+ sdram_base = ioremap(AR5312_SDRAMCTL_BASE,
+ AR5312_SDRAMCTL_SIZE);
+ memcfg = __raw_readl(sdram_base + AR5312_MEM_CFG1);
+ bank0_ac = ATH25_REG_MS(memcfg, AR5312_MEM_CFG1_AC0);
+ bank1_ac = ATH25_REG_MS(memcfg, AR5312_MEM_CFG1_AC1);
+ memsize = (bank0_ac ? (1 << (bank0_ac + 1)) : 0) +
+ (bank1_ac ? (1 << (bank1_ac + 1)) : 0);
+ memsize <<= 20;
+ memblock_add(0, memsize);
+ iounmap(sdram_base);
+
+ ar5312_rst_base = ioremap(AR5312_RST_BASE, AR5312_RST_SIZE);
+
+ devid = ar5312_rst_reg_read(AR5312_REV);
+ devid >>= AR5312_REV_WMAC_MIN_S;
+ devid &= AR5312_REV_CHIP;
+ ath25_board.devid = (u16)devid;
+
+ /* Clear any lingering AHB errors */
+ ar5312_rst_reg_read(AR5312_PROCADDR);
+ ar5312_rst_reg_read(AR5312_DMAADDR);
+ ar5312_rst_reg_write(AR5312_WDT_CTRL, AR5312_WDT_CTRL_IGNORE);
+
+ _machine_restart = ar5312_restart;
+}
+
+void __init ar5312_arch_init(void)
+{
+ unsigned irq = irq_create_mapping(ar5312_misc_irq_domain,
+ AR5312_MISC_IRQ_UART0);
+
+ ath25_serial_setup(AR5312_UART0_BASE, irq, ar5312_sys_frequency());
+}
diff --git a/arch/mips/ath25/ar5312.h b/arch/mips/ath25/ar5312.h
new file mode 100644
index 000000000..67518a59a
--- /dev/null
+++ b/arch/mips/ath25/ar5312.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __AR5312_H
+#define __AR5312_H
+
+#ifdef CONFIG_SOC_AR5312
+
+void ar5312_arch_init_irq(void);
+void ar5312_init_devices(void);
+void ar5312_plat_time_init(void);
+void ar5312_plat_mem_setup(void);
+void ar5312_arch_init(void);
+
+#else
+
+static inline void ar5312_arch_init_irq(void) {}
+static inline void ar5312_init_devices(void) {}
+static inline void ar5312_plat_time_init(void) {}
+static inline void ar5312_plat_mem_setup(void) {}
+static inline void ar5312_arch_init(void) {}
+
+#endif
+
+#endif /* __AR5312_H */
diff --git a/arch/mips/ath25/ar5312_regs.h b/arch/mips/ath25/ar5312_regs.h
new file mode 100644
index 000000000..4b947f967
--- /dev/null
+++ b/arch/mips/ath25/ar5312_regs.h
@@ -0,0 +1,224 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ */
+
+#ifndef __ASM_MACH_ATH25_AR5312_REGS_H
+#define __ASM_MACH_ATH25_AR5312_REGS_H
+
+/*
+ * IRQs
+ */
+#define AR5312_IRQ_WLAN0 (MIPS_CPU_IRQ_BASE + 2) /* C0_CAUSE: 0x0400 */
+#define AR5312_IRQ_ENET0 (MIPS_CPU_IRQ_BASE + 3) /* C0_CAUSE: 0x0800 */
+#define AR5312_IRQ_ENET1 (MIPS_CPU_IRQ_BASE + 4) /* C0_CAUSE: 0x1000 */
+#define AR5312_IRQ_WLAN1 (MIPS_CPU_IRQ_BASE + 5) /* C0_CAUSE: 0x2000 */
+#define AR5312_IRQ_MISC (MIPS_CPU_IRQ_BASE + 6) /* C0_CAUSE: 0x4000 */
+
+/*
+ * Miscellaneous interrupts, which share IP6.
+ */
+#define AR5312_MISC_IRQ_TIMER 0
+#define AR5312_MISC_IRQ_AHB_PROC 1
+#define AR5312_MISC_IRQ_AHB_DMA 2
+#define AR5312_MISC_IRQ_GPIO 3
+#define AR5312_MISC_IRQ_UART0 4
+#define AR5312_MISC_IRQ_UART0_DMA 5
+#define AR5312_MISC_IRQ_WATCHDOG 6
+#define AR5312_MISC_IRQ_LOCAL 7
+#define AR5312_MISC_IRQ_SPI 8
+#define AR5312_MISC_IRQ_COUNT 9
+
+/*
+ * Address Map
+ *
+ * The AR5312 supports 2 enet MACS, even though many reference boards only
+ * actually use 1 of them (i.e. Only MAC 0 is actually connected to an enet
+ * PHY or PHY switch. The AR2312 supports 1 enet MAC.
+ */
+#define AR5312_WLAN0_BASE 0x18000000
+#define AR5312_ENET0_BASE 0x18100000
+#define AR5312_ENET1_BASE 0x18200000
+#define AR5312_SDRAMCTL_BASE 0x18300000
+#define AR5312_SDRAMCTL_SIZE 0x00000010
+#define AR5312_FLASHCTL_BASE 0x18400000
+#define AR5312_FLASHCTL_SIZE 0x00000010
+#define AR5312_WLAN1_BASE 0x18500000
+#define AR5312_UART0_BASE 0x1c000000 /* UART MMR */
+#define AR5312_GPIO_BASE 0x1c002000
+#define AR5312_GPIO_SIZE 0x00000010
+#define AR5312_RST_BASE 0x1c003000
+#define AR5312_RST_SIZE 0x00000100
+#define AR5312_FLASH_BASE 0x1e000000
+#define AR5312_FLASH_SIZE 0x00800000
+
+/*
+ * Need these defines to determine true number of ethernet MACs
+ */
+#define AR5312_AR5312_REV2 0x0052 /* AR5312 WMAC (AP31) */
+#define AR5312_AR5312_REV7 0x0057 /* AR5312 WMAC (AP30-040) */
+#define AR5312_AR2313_REV8 0x0058 /* AR2313 WMAC (AP43-030) */
+
+/* Reset/Timer Block Address Map */
+#define AR5312_TIMER 0x0000 /* countdown timer */
+#define AR5312_RELOAD 0x0004 /* timer reload value */
+#define AR5312_WDT_CTRL 0x0008 /* watchdog cntrl */
+#define AR5312_WDT_TIMER 0x000c /* watchdog timer */
+#define AR5312_ISR 0x0010 /* Intr Status Reg */
+#define AR5312_IMR 0x0014 /* Intr Mask Reg */
+#define AR5312_RESET 0x0020
+#define AR5312_CLOCKCTL1 0x0064
+#define AR5312_SCRATCH 0x006c
+#define AR5312_PROCADDR 0x0070
+#define AR5312_PROC1 0x0074
+#define AR5312_DMAADDR 0x0078
+#define AR5312_DMA1 0x007c
+#define AR5312_ENABLE 0x0080 /* interface enb */
+#define AR5312_REV 0x0090 /* revision */
+
+/* AR5312_WDT_CTRL register bit field definitions */
+#define AR5312_WDT_CTRL_IGNORE 0x00000000 /* ignore expiration */
+#define AR5312_WDT_CTRL_NMI 0x00000001
+#define AR5312_WDT_CTRL_RESET 0x00000002
+
+/* AR5312_ISR register bit field definitions */
+#define AR5312_ISR_TIMER 0x00000001
+#define AR5312_ISR_AHBPROC 0x00000002
+#define AR5312_ISR_AHBDMA 0x00000004
+#define AR5312_ISR_GPIO 0x00000008
+#define AR5312_ISR_UART0 0x00000010
+#define AR5312_ISR_UART0DMA 0x00000020
+#define AR5312_ISR_WD 0x00000040
+#define AR5312_ISR_LOCAL 0x00000080
+
+/* AR5312_RESET register bit field definitions */
+#define AR5312_RESET_SYSTEM 0x00000001 /* cold reset full system */
+#define AR5312_RESET_PROC 0x00000002 /* cold reset MIPS core */
+#define AR5312_RESET_WLAN0 0x00000004 /* cold reset WLAN MAC/BB */
+#define AR5312_RESET_EPHY0 0x00000008 /* cold reset ENET0 phy */
+#define AR5312_RESET_EPHY1 0x00000010 /* cold reset ENET1 phy */
+#define AR5312_RESET_ENET0 0x00000020 /* cold reset ENET0 MAC */
+#define AR5312_RESET_ENET1 0x00000040 /* cold reset ENET1 MAC */
+#define AR5312_RESET_UART0 0x00000100 /* cold reset UART0 */
+#define AR5312_RESET_WLAN1 0x00000200 /* cold reset WLAN MAC/BB */
+#define AR5312_RESET_APB 0x00000400 /* cold reset APB ar5312 */
+#define AR5312_RESET_WARM_PROC 0x00001000 /* warm reset MIPS core */
+#define AR5312_RESET_WARM_WLAN0_MAC 0x00002000 /* warm reset WLAN0 MAC */
+#define AR5312_RESET_WARM_WLAN0_BB 0x00004000 /* warm reset WLAN0 BB */
+#define AR5312_RESET_NMI 0x00010000 /* send an NMI to the CPU */
+#define AR5312_RESET_WARM_WLAN1_MAC 0x00020000 /* warm reset WLAN1 MAC */
+#define AR5312_RESET_WARM_WLAN1_BB 0x00040000 /* warm reset WLAN1 BB */
+#define AR5312_RESET_LOCAL_BUS 0x00080000 /* reset local bus */
+#define AR5312_RESET_WDOG 0x00100000 /* last reset was a wdt */
+
+#define AR5312_RESET_WMAC0_BITS (AR5312_RESET_WLAN0 |\
+ AR5312_RESET_WARM_WLAN0_MAC |\
+ AR5312_RESET_WARM_WLAN0_BB)
+
+#define AR5312_RESET_WMAC1_BITS (AR5312_RESET_WLAN1 |\
+ AR5312_RESET_WARM_WLAN1_MAC |\
+ AR5312_RESET_WARM_WLAN1_BB)
+
+/* AR5312_CLOCKCTL1 register bit field definitions */
+#define AR5312_CLOCKCTL1_PREDIVIDE_MASK 0x00000030
+#define AR5312_CLOCKCTL1_PREDIVIDE_SHIFT 4
+#define AR5312_CLOCKCTL1_MULTIPLIER_MASK 0x00001f00
+#define AR5312_CLOCKCTL1_MULTIPLIER_SHIFT 8
+#define AR5312_CLOCKCTL1_DOUBLER_MASK 0x00010000
+
+/* Valid for AR5312 and AR2312 */
+#define AR5312_CLOCKCTL1_PREDIVIDE_MASK 0x00000030
+#define AR5312_CLOCKCTL1_PREDIVIDE_SHIFT 4
+#define AR5312_CLOCKCTL1_MULTIPLIER_MASK 0x00001f00
+#define AR5312_CLOCKCTL1_MULTIPLIER_SHIFT 8
+#define AR5312_CLOCKCTL1_DOUBLER_MASK 0x00010000
+
+/* Valid for AR2313 */
+#define AR2313_CLOCKCTL1_PREDIVIDE_MASK 0x00003000
+#define AR2313_CLOCKCTL1_PREDIVIDE_SHIFT 12
+#define AR2313_CLOCKCTL1_MULTIPLIER_MASK 0x001f0000
+#define AR2313_CLOCKCTL1_MULTIPLIER_SHIFT 16
+#define AR2313_CLOCKCTL1_DOUBLER_MASK 0x00000000
+
+/* AR5312_ENABLE register bit field definitions */
+#define AR5312_ENABLE_WLAN0 0x00000001
+#define AR5312_ENABLE_ENET0 0x00000002
+#define AR5312_ENABLE_ENET1 0x00000004
+#define AR5312_ENABLE_UART_AND_WLAN1_PIO 0x00000008/* UART & WLAN1 PIO */
+#define AR5312_ENABLE_WLAN1_DMA 0x00000010/* WLAN1 DMAs */
+#define AR5312_ENABLE_WLAN1 (AR5312_ENABLE_UART_AND_WLAN1_PIO |\
+ AR5312_ENABLE_WLAN1_DMA)
+
+/* AR5312_REV register bit field definitions */
+#define AR5312_REV_WMAC_MAJ 0x0000f000
+#define AR5312_REV_WMAC_MAJ_S 12
+#define AR5312_REV_WMAC_MIN 0x00000f00
+#define AR5312_REV_WMAC_MIN_S 8
+#define AR5312_REV_MAJ 0x000000f0
+#define AR5312_REV_MAJ_S 4
+#define AR5312_REV_MIN 0x0000000f
+#define AR5312_REV_MIN_S 0
+#define AR5312_REV_CHIP (AR5312_REV_MAJ|AR5312_REV_MIN)
+
+/* Major revision numbers, bits 7..4 of Revision ID register */
+#define AR5312_REV_MAJ_AR5312 0x4
+#define AR5312_REV_MAJ_AR2313 0x5
+
+/* Minor revision numbers, bits 3..0 of Revision ID register */
+#define AR5312_REV_MIN_DUAL 0x0 /* Dual WLAN version */
+#define AR5312_REV_MIN_SINGLE 0x1 /* Single WLAN version */
+
+/*
+ * ARM Flash Controller -- 3 flash banks with either x8 or x16 devices
+ */
+#define AR5312_FLASHCTL0 0x0000
+#define AR5312_FLASHCTL1 0x0004
+#define AR5312_FLASHCTL2 0x0008
+
+/* AR5312_FLASHCTL register bit field definitions */
+#define AR5312_FLASHCTL_IDCY 0x0000000f /* Idle cycle turnaround time */
+#define AR5312_FLASHCTL_IDCY_S 0
+#define AR5312_FLASHCTL_WST1 0x000003e0 /* Wait state 1 */
+#define AR5312_FLASHCTL_WST1_S 5
+#define AR5312_FLASHCTL_RBLE 0x00000400 /* Read byte lane enable */
+#define AR5312_FLASHCTL_WST2 0x0000f800 /* Wait state 2 */
+#define AR5312_FLASHCTL_WST2_S 11
+#define AR5312_FLASHCTL_AC 0x00070000 /* Flash addr check (added) */
+#define AR5312_FLASHCTL_AC_S 16
+#define AR5312_FLASHCTL_AC_128K 0x00000000
+#define AR5312_FLASHCTL_AC_256K 0x00010000
+#define AR5312_FLASHCTL_AC_512K 0x00020000
+#define AR5312_FLASHCTL_AC_1M 0x00030000
+#define AR5312_FLASHCTL_AC_2M 0x00040000
+#define AR5312_FLASHCTL_AC_4M 0x00050000
+#define AR5312_FLASHCTL_AC_8M 0x00060000
+#define AR5312_FLASHCTL_AC_RES 0x00070000 /* 16MB is not supported */
+#define AR5312_FLASHCTL_E 0x00080000 /* Flash bank enable (added) */
+#define AR5312_FLASHCTL_BUSERR 0x01000000 /* Bus transfer error flag */
+#define AR5312_FLASHCTL_WPERR 0x02000000 /* Write protect error flag */
+#define AR5312_FLASHCTL_WP 0x04000000 /* Write protect */
+#define AR5312_FLASHCTL_BM 0x08000000 /* Burst mode */
+#define AR5312_FLASHCTL_MW 0x30000000 /* Mem width */
+#define AR5312_FLASHCTL_MW8 0x00000000 /* Mem width x8 */
+#define AR5312_FLASHCTL_MW16 0x10000000 /* Mem width x16 */
+#define AR5312_FLASHCTL_MW32 0x20000000 /* Mem width x32 (not supp) */
+#define AR5312_FLASHCTL_ATNR 0x00000000 /* Access == no retry */
+#define AR5312_FLASHCTL_ATR 0x80000000 /* Access == retry every */
+#define AR5312_FLASHCTL_ATR4 0xc0000000 /* Access == retry every 4 */
+
+/*
+ * ARM SDRAM Controller -- just enough to determine memory size
+ */
+#define AR5312_MEM_CFG1 0x0004
+
+#define AR5312_MEM_CFG1_AC0_M 0x00000700 /* bank 0: SDRAM addr check */
+#define AR5312_MEM_CFG1_AC0_S 8
+#define AR5312_MEM_CFG1_AC1_M 0x00007000 /* bank 1: SDRAM addr check */
+#define AR5312_MEM_CFG1_AC1_S 12
+
+#endif /* __ASM_MACH_ATH25_AR5312_REGS_H */
diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c
new file mode 100644
index 000000000..cb99f9739
--- /dev/null
+++ b/arch/mips/ath25/board.c
@@ -0,0 +1,236 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
+ * Copyright (C) 2006 FON Technology, SL.
+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2006-2009 Felix Fietkau <nbd@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/irq_cpu.h>
+#include <asm/reboot.h>
+#include <asm/bootinfo.h>
+#include <asm/time.h>
+
+#include <ath25_platform.h>
+#include "devices.h"
+#include "ar5312.h"
+#include "ar2315.h"
+
+void (*ath25_irq_dispatch)(void);
+
+static inline bool check_radio_magic(const void __iomem *addr)
+{
+ addr += 0x7a; /* offset for flash magic */
+ return (__raw_readb(addr) == 0x5a) && (__raw_readb(addr + 1) == 0xa5);
+}
+
+static inline bool check_notempty(const void __iomem *addr)
+{
+ return __raw_readl(addr) != 0xffffffff;
+}
+
+static inline bool check_board_data(const void __iomem *addr, bool broken)
+{
+ /* config magic found */
+ if (__raw_readl(addr) == ATH25_BD_MAGIC)
+ return true;
+
+ if (!broken)
+ return false;
+
+ /* broken board data detected, use radio data to find the
+ * offset, user will fix this */
+
+ if (check_radio_magic(addr + 0x1000))
+ return true;
+ if (check_radio_magic(addr + 0xf8))
+ return true;
+
+ return false;
+}
+
+static const void __iomem * __init find_board_config(const void __iomem *limit,
+ const bool broken)
+{
+ const void __iomem *addr;
+ const void __iomem *begin = limit - 0x1000;
+ const void __iomem *end = limit - 0x30000;
+
+ for (addr = begin; addr >= end; addr -= 0x1000)
+ if (check_board_data(addr, broken))
+ return addr;
+
+ return NULL;
+}
+
+static const void __iomem * __init find_radio_config(const void __iomem *limit,
+ const void __iomem *bcfg)
+{
+ const void __iomem *rcfg, *begin, *end;
+
+ /*
+ * Now find the start of Radio Configuration data, using heuristics:
+ * Search forward from Board Configuration data by 0x1000 bytes
+ * at a time until we find non-0xffffffff.
+ */
+ begin = bcfg + 0x1000;
+ end = limit;
+ for (rcfg = begin; rcfg < end; rcfg += 0x1000)
+ if (check_notempty(rcfg) && check_radio_magic(rcfg))
+ return rcfg;
+
+ /* AR2316 relocates radio config to new location */
+ begin = bcfg + 0xf8;
+ end = limit - 0x1000 + 0xf8;
+ for (rcfg = begin; rcfg < end; rcfg += 0x1000)
+ if (check_notempty(rcfg) && check_radio_magic(rcfg))
+ return rcfg;
+
+ return NULL;
+}
+
+/*
+ * NB: Search region size could be larger than the actual flash size,
+ * but this shouldn't be a problem here, because the flash
+ * will simply be mapped multiple times.
+ */
+int __init ath25_find_config(phys_addr_t base, unsigned long size)
+{
+ const void __iomem *flash_base, *flash_limit;
+ struct ath25_boarddata *config;
+ unsigned int rcfg_size;
+ int broken_boarddata = 0;
+ const void __iomem *bcfg, *rcfg;
+ u8 *board_data;
+ u8 *radio_data;
+ u8 *mac_addr;
+ u32 offset;
+
+ flash_base = ioremap(base, size);
+ flash_limit = flash_base + size;
+
+ ath25_board.config = NULL;
+ ath25_board.radio = NULL;
+
+ /* Copy the board and radio data to RAM, because accessing the mapped
+ * memory of the flash directly after booting is not safe */
+
+ /* Try to find valid board and radio data */
+ bcfg = find_board_config(flash_limit, false);
+
+ /* If that fails, try to at least find valid radio data */
+ if (!bcfg) {
+ bcfg = find_board_config(flash_limit, true);
+ broken_boarddata = 1;
+ }
+
+ if (!bcfg) {
+ pr_warn("WARNING: No board configuration data found!\n");
+ goto error;
+ }
+
+ board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
+ if (!board_data)
+ goto error;
+ ath25_board.config = (struct ath25_boarddata *)board_data;
+ memcpy_fromio(board_data, bcfg, 0x100);
+ if (broken_boarddata) {
+ pr_warn("WARNING: broken board data detected\n");
+ config = ath25_board.config;
+ if (is_zero_ether_addr(config->enet0_mac)) {
+ pr_info("Fixing up empty mac addresses\n");
+ config->reset_config_gpio = 0xffff;
+ config->sys_led_gpio = 0xffff;
+ eth_random_addr(config->wlan0_mac);
+ config->wlan0_mac[0] &= ~0x06;
+ eth_random_addr(config->enet0_mac);
+ eth_random_addr(config->enet1_mac);
+ }
+ }
+
+ /* Radio config starts 0x100 bytes after board config, regardless
+ * of what the physical layout on the flash chip looks like */
+
+ rcfg = find_radio_config(flash_limit, bcfg);
+ if (!rcfg) {
+ pr_warn("WARNING: Could not find Radio Configuration data\n");
+ goto error;
+ }
+
+ radio_data = board_data + 0x100 + ((rcfg - bcfg) & 0xfff);
+ ath25_board.radio = radio_data;
+ offset = radio_data - board_data;
+ pr_info("Radio config found at offset 0x%x (0x%x)\n", rcfg - bcfg,
+ offset);
+ rcfg_size = BOARD_CONFIG_BUFSZ - offset;
+ memcpy_fromio(radio_data, rcfg, rcfg_size);
+
+ mac_addr = &radio_data[0x1d * 2];
+ if (is_broadcast_ether_addr(mac_addr)) {
+ pr_info("Radio MAC is blank; using board-data\n");
+ ether_addr_copy(mac_addr, ath25_board.config->wlan0_mac);
+ }
+
+ iounmap(flash_base);
+
+ return 0;
+
+error:
+ iounmap(flash_base);
+ return -ENODEV;
+}
+
+static void ath25_halt(void)
+{
+ local_irq_disable();
+ unreachable();
+}
+
+void __init plat_mem_setup(void)
+{
+ _machine_halt = ath25_halt;
+ pm_power_off = ath25_halt;
+
+ if (is_ar5312())
+ ar5312_plat_mem_setup();
+ else
+ ar2315_plat_mem_setup();
+
+ /* Disable data watchpoints */
+ write_c0_watchlo0(0);
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ ath25_irq_dispatch();
+}
+
+void __init plat_time_init(void)
+{
+ if (is_ar5312())
+ ar5312_plat_time_init();
+ else
+ ar2315_plat_time_init();
+}
+
+unsigned int get_c0_compare_int(void)
+{
+ return CP0_LEGACY_COMPARE_IRQ;
+}
+
+void __init arch_init_irq(void)
+{
+ clear_c0_status(ST0_IM);
+ mips_cpu_irq_init();
+
+ /* Initialize interrupt controllers */
+ if (is_ar5312())
+ ar5312_arch_init_irq();
+ else
+ ar2315_arch_init_irq();
+}
diff --git a/arch/mips/ath25/devices.c b/arch/mips/ath25/devices.c
new file mode 100644
index 000000000..301a90282
--- /dev/null
+++ b/arch/mips/ath25/devices.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/serial_8250.h>
+#include <linux/platform_device.h>
+#include <asm/bootinfo.h>
+
+#include <ath25_platform.h>
+#include "devices.h"
+#include "ar5312.h"
+#include "ar2315.h"
+
+struct ar231x_board_config ath25_board;
+enum ath25_soc_type ath25_soc = ATH25_SOC_UNKNOWN;
+
+static struct resource ath25_wmac0_res[] = {
+ {
+ .name = "wmac0_membase",
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "wmac0_irq",
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct resource ath25_wmac1_res[] = {
+ {
+ .name = "wmac1_membase",
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "wmac1_irq",
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device ath25_wmac[] = {
+ {
+ .id = 0,
+ .name = "ar231x-wmac",
+ .resource = ath25_wmac0_res,
+ .num_resources = ARRAY_SIZE(ath25_wmac0_res),
+ .dev.platform_data = &ath25_board,
+ },
+ {
+ .id = 1,
+ .name = "ar231x-wmac",
+ .resource = ath25_wmac1_res,
+ .num_resources = ARRAY_SIZE(ath25_wmac1_res),
+ .dev.platform_data = &ath25_board,
+ },
+};
+
+static const char * const soc_type_strings[] = {
+ [ATH25_SOC_AR5312] = "Atheros AR5312",
+ [ATH25_SOC_AR2312] = "Atheros AR2312",
+ [ATH25_SOC_AR2313] = "Atheros AR2313",
+ [ATH25_SOC_AR2315] = "Atheros AR2315",
+ [ATH25_SOC_AR2316] = "Atheros AR2316",
+ [ATH25_SOC_AR2317] = "Atheros AR2317",
+ [ATH25_SOC_AR2318] = "Atheros AR2318",
+ [ATH25_SOC_UNKNOWN] = "Atheros (unknown)",
+};
+
+const char *get_system_type(void)
+{
+ if ((ath25_soc >= ARRAY_SIZE(soc_type_strings)) ||
+ !soc_type_strings[ath25_soc])
+ return soc_type_strings[ATH25_SOC_UNKNOWN];
+ return soc_type_strings[ath25_soc];
+}
+
+void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk)
+{
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ struct uart_port s;
+
+ memset(&s, 0, sizeof(s));
+
+ s.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP;
+ s.iotype = UPIO_MEM32;
+ s.irq = irq;
+ s.regshift = 2;
+ s.mapbase = mapbase;
+ s.uartclk = uartclk;
+
+ early_serial_setup(&s);
+#endif /* CONFIG_SERIAL_8250_CONSOLE */
+}
+
+int __init ath25_add_wmac(int nr, u32 base, int irq)
+{
+ struct resource *res;
+
+ ath25_wmac[nr].dev.platform_data = &ath25_board;
+ res = &ath25_wmac[nr].resource[0];
+ res->start = base;
+ res->end = base + 0x10000 - 1;
+ res++;
+ res->start = irq;
+ res->end = irq;
+ return platform_device_register(&ath25_wmac[nr]);
+}
+
+static int __init ath25_register_devices(void)
+{
+ if (is_ar5312())
+ ar5312_init_devices();
+ else
+ ar2315_init_devices();
+
+ return 0;
+}
+
+device_initcall(ath25_register_devices);
+
+static int __init ath25_arch_init(void)
+{
+ if (is_ar5312())
+ ar5312_arch_init();
+ else
+ ar2315_arch_init();
+
+ return 0;
+}
+
+arch_initcall(ath25_arch_init);
diff --git a/arch/mips/ath25/devices.h b/arch/mips/ath25/devices.h
new file mode 100644
index 000000000..44cf69063
--- /dev/null
+++ b/arch/mips/ath25/devices.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ATH25_DEVICES_H
+#define __ATH25_DEVICES_H
+
+#include <linux/cpu.h>
+
+#define ATH25_REG_MS(_val, _field) (((_val) & _field##_M) >> _field##_S)
+
+#define ATH25_IRQ_CPU_CLOCK (MIPS_CPU_IRQ_BASE + 7) /* C0_CAUSE: 0x8000 */
+
+enum ath25_soc_type {
+ /* handled by ar5312.c */
+ ATH25_SOC_AR2312,
+ ATH25_SOC_AR2313,
+ ATH25_SOC_AR5312,
+
+ /* handled by ar2315.c */
+ ATH25_SOC_AR2315,
+ ATH25_SOC_AR2316,
+ ATH25_SOC_AR2317,
+ ATH25_SOC_AR2318,
+
+ ATH25_SOC_UNKNOWN
+};
+
+extern enum ath25_soc_type ath25_soc;
+extern struct ar231x_board_config ath25_board;
+extern void (*ath25_irq_dispatch)(void);
+
+int ath25_find_config(phys_addr_t offset, unsigned long size);
+void ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk);
+int ath25_add_wmac(int nr, u32 base, int irq);
+
+static inline bool is_ar2315(void)
+{
+ return (current_cpu_data.cputype == CPU_4KEC);
+}
+
+static inline bool is_ar5312(void)
+{
+ return !is_ar2315();
+}
+
+#endif
diff --git a/arch/mips/ath25/early_printk.c b/arch/mips/ath25/early_printk.c
new file mode 100644
index 000000000..d534761e9
--- /dev/null
+++ b/arch/mips/ath25/early_printk.c
@@ -0,0 +1,45 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/serial_reg.h>
+#include <asm/setup.h>
+
+#include "devices.h"
+#include "ar2315_regs.h"
+#include "ar5312_regs.h"
+
+static inline void prom_uart_wr(void __iomem *base, unsigned reg,
+ unsigned char ch)
+{
+ __raw_writel(ch, base + 4 * reg);
+}
+
+static inline unsigned char prom_uart_rr(void __iomem *base, unsigned reg)
+{
+ return __raw_readl(base + 4 * reg);
+}
+
+void prom_putchar(char ch)
+{
+ static void __iomem *base;
+
+ if (unlikely(base == NULL)) {
+ if (is_ar2315())
+ base = (void __iomem *)(KSEG1ADDR(AR2315_UART0_BASE));
+ else
+ base = (void __iomem *)(KSEG1ADDR(AR5312_UART0_BASE));
+ }
+
+ while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0)
+ ;
+ prom_uart_wr(base, UART_TX, (unsigned char)ch);
+ while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0)
+ ;
+}
diff --git a/arch/mips/ath25/prom.c b/arch/mips/ath25/prom.c
new file mode 100644
index 000000000..edf82be88
--- /dev/null
+++ b/arch/mips/ath25/prom.c
@@ -0,0 +1,26 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright MontaVista Software Inc
+ * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
+ * Copyright (C) 2006 FON Technology, SL.
+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ */
+
+/*
+ * Prom setup file for AR5312/AR231x SoCs
+ */
+
+#include <linux/init.h>
+#include <asm/bootinfo.h>
+
+void __init prom_init(void)
+{
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/ath79/Kconfig b/arch/mips/ath79/Kconfig
new file mode 100644
index 000000000..736741664
--- /dev/null
+++ b/arch/mips/ath79/Kconfig
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-2.0
+if ATH79
+
+config SOC_AR71XX
+ select HAVE_PCI
+ def_bool n
+
+config SOC_AR724X
+ select HAVE_PCI
+ select PCI_AR724X if PCI
+ def_bool n
+
+config SOC_AR913X
+ def_bool n
+
+config SOC_AR933X
+ def_bool n
+
+config SOC_AR934X
+ select HAVE_PCI
+ select PCI_AR724X if PCI
+ def_bool n
+
+config SOC_QCA955X
+ select HAVE_PCI
+ select PCI_AR724X if PCI
+ def_bool n
+
+config PCI_AR724X
+ def_bool n
+
+config ATH79_DEV_GPIO_BUTTONS
+ def_bool n
+
+config ATH79_DEV_LEDS_GPIO
+ def_bool n
+
+config ATH79_DEV_SPI
+ def_bool n
+
+config ATH79_DEV_USB
+ def_bool n
+
+config ATH79_DEV_WMAC
+ depends on (SOC_AR913X || SOC_AR933X || SOC_AR934X || SOC_QCA955X)
+ def_bool n
+
+endif
diff --git a/arch/mips/ath79/Makefile b/arch/mips/ath79/Makefile
new file mode 100644
index 000000000..0fb3aaf42
--- /dev/null
+++ b/arch/mips/ath79/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Atheros AR71XX/AR724X/AR913X specific parts of the kernel
+#
+# Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+# Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+#
+
+obj-y := prom.o setup.o common.o clock.o
+
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
diff --git a/arch/mips/ath79/Platform b/arch/mips/ath79/Platform
new file mode 100644
index 000000000..57744472e
--- /dev/null
+++ b/arch/mips/ath79/Platform
@@ -0,0 +1,6 @@
+#
+# Atheros AR71xx/AR724x/AR913x
+#
+
+cflags-$(CONFIG_ATH79) += -I$(srctree)/arch/mips/include/asm/mach-ath79
+load-$(CONFIG_ATH79) = 0xffffffff80060000
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
new file mode 100644
index 000000000..050f6553f
--- /dev/null
+++ b/arch/mips/ath79/clock.c
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Atheros AR71XX/AR724X/AR913X common routines
+ *
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <dt-bindings/clock/ath79-clk.h>
+
+#include <asm/div64.h>
+
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include "common.h"
+
+#define AR71XX_BASE_FREQ 40000000
+#define AR724X_BASE_FREQ 40000000
+
+static struct clk *clks[ATH79_CLK_END];
+static struct clk_onecell_data clk_data = {
+ .clks = clks,
+ .clk_num = ARRAY_SIZE(clks),
+};
+
+static const char * const clk_names[ATH79_CLK_END] = {
+ [ATH79_CLK_CPU] = "cpu",
+ [ATH79_CLK_DDR] = "ddr",
+ [ATH79_CLK_AHB] = "ahb",
+ [ATH79_CLK_REF] = "ref",
+ [ATH79_CLK_MDIO] = "mdio",
+};
+
+static const char * __init ath79_clk_name(int type)
+{
+ BUG_ON(type >= ARRAY_SIZE(clk_names) || !clk_names[type]);
+ return clk_names[type];
+}
+
+static void __init __ath79_set_clk(int type, const char *name, struct clk *clk)
+{
+ if (IS_ERR(clk))
+ panic("failed to allocate %s clock structure", clk_names[type]);
+
+ clks[type] = clk;
+ clk_register_clkdev(clk, name, NULL);
+}
+
+static struct clk * __init ath79_set_clk(int type, unsigned long rate)
+{
+ const char *name = ath79_clk_name(type);
+ struct clk *clk;
+
+ clk = clk_register_fixed_rate(NULL, name, NULL, 0, rate);
+ __ath79_set_clk(type, name, clk);
+ return clk;
+}
+
+static struct clk * __init ath79_set_ff_clk(int type, const char *parent,
+ unsigned int mult, unsigned int div)
+{
+ const char *name = ath79_clk_name(type);
+ struct clk *clk;
+
+ clk = clk_register_fixed_factor(NULL, name, parent, 0, mult, div);
+ __ath79_set_clk(type, name, clk);
+ return clk;
+}
+
+static unsigned long __init ath79_setup_ref_clk(unsigned long rate)
+{
+ struct clk *clk = clks[ATH79_CLK_REF];
+
+ if (clk)
+ rate = clk_get_rate(clk);
+ else
+ clk = ath79_set_clk(ATH79_CLK_REF, rate);
+
+ return rate;
+}
+
+static void __init ar71xx_clocks_init(void __iomem *pll_base)
+{
+ unsigned long ref_rate;
+ unsigned long cpu_rate;
+ unsigned long ddr_rate;
+ unsigned long ahb_rate;
+ u32 pll;
+ u32 freq;
+ u32 div;
+
+ ref_rate = ath79_setup_ref_clk(AR71XX_BASE_FREQ);
+
+ pll = __raw_readl(pll_base + AR71XX_PLL_REG_CPU_CONFIG);
+
+ div = ((pll >> AR71XX_PLL_FB_SHIFT) & AR71XX_PLL_FB_MASK) + 1;
+ freq = div * ref_rate;
+
+ div = ((pll >> AR71XX_CPU_DIV_SHIFT) & AR71XX_CPU_DIV_MASK) + 1;
+ cpu_rate = freq / div;
+
+ div = ((pll >> AR71XX_DDR_DIV_SHIFT) & AR71XX_DDR_DIV_MASK) + 1;
+ ddr_rate = freq / div;
+
+ div = (((pll >> AR71XX_AHB_DIV_SHIFT) & AR71XX_AHB_DIV_MASK) + 1) * 2;
+ ahb_rate = cpu_rate / div;
+
+ ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
+ ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
+ ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
+}
+
+static void __init ar724x_clocks_init(void __iomem *pll_base)
+{
+ u32 mult, div, ddr_div, ahb_div;
+ u32 pll;
+
+ ath79_setup_ref_clk(AR71XX_BASE_FREQ);
+
+ pll = __raw_readl(pll_base + AR724X_PLL_REG_CPU_CONFIG);
+
+ mult = ((pll >> AR724X_PLL_FB_SHIFT) & AR724X_PLL_FB_MASK);
+ div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK) * 2;
+
+ ddr_div = ((pll >> AR724X_DDR_DIV_SHIFT) & AR724X_DDR_DIV_MASK) + 1;
+ ahb_div = (((pll >> AR724X_AHB_DIV_SHIFT) & AR724X_AHB_DIV_MASK) + 1) * 2;
+
+ ath79_set_ff_clk(ATH79_CLK_CPU, "ref", mult, div);
+ ath79_set_ff_clk(ATH79_CLK_DDR, "ref", mult, div * ddr_div);
+ ath79_set_ff_clk(ATH79_CLK_AHB, "ref", mult, div * ahb_div);
+}
+
+static void __init ar933x_clocks_init(void __iomem *pll_base)
+{
+ unsigned long ref_rate;
+ u32 clock_ctrl;
+ u32 ref_div;
+ u32 ninit_mul;
+ u32 out_div;
+
+ u32 cpu_div;
+ u32 ddr_div;
+ u32 ahb_div;
+ u32 t;
+
+ t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
+ if (t & AR933X_BOOTSTRAP_REF_CLK_40)
+ ref_rate = (40 * 1000 * 1000);
+ else
+ ref_rate = (25 * 1000 * 1000);
+
+ ath79_setup_ref_clk(ref_rate);
+
+ clock_ctrl = __raw_readl(pll_base + AR933X_PLL_CLOCK_CTRL_REG);
+ if (clock_ctrl & AR933X_PLL_CLOCK_CTRL_BYPASS) {
+ ref_div = 1;
+ ninit_mul = 1;
+ out_div = 1;
+
+ cpu_div = 1;
+ ddr_div = 1;
+ ahb_div = 1;
+ } else {
+ u32 cpu_config;
+ u32 t;
+
+ cpu_config = __raw_readl(pll_base + AR933X_PLL_CPU_CONFIG_REG);
+
+ t = (cpu_config >> AR933X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+ AR933X_PLL_CPU_CONFIG_REFDIV_MASK;
+ ref_div = t;
+
+ ninit_mul = (cpu_config >> AR933X_PLL_CPU_CONFIG_NINT_SHIFT) &
+ AR933X_PLL_CPU_CONFIG_NINT_MASK;
+
+ t = (cpu_config >> AR933X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+ AR933X_PLL_CPU_CONFIG_OUTDIV_MASK;
+ if (t == 0)
+ t = 1;
+
+ out_div = (1 << t);
+
+ cpu_div = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_CPU_DIV_SHIFT) &
+ AR933X_PLL_CLOCK_CTRL_CPU_DIV_MASK) + 1;
+
+ ddr_div = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_DDR_DIV_SHIFT) &
+ AR933X_PLL_CLOCK_CTRL_DDR_DIV_MASK) + 1;
+
+ ahb_div = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_AHB_DIV_SHIFT) &
+ AR933X_PLL_CLOCK_CTRL_AHB_DIV_MASK) + 1;
+ }
+
+ ath79_set_ff_clk(ATH79_CLK_CPU, "ref", ninit_mul,
+ ref_div * out_div * cpu_div);
+ ath79_set_ff_clk(ATH79_CLK_DDR, "ref", ninit_mul,
+ ref_div * out_div * ddr_div);
+ ath79_set_ff_clk(ATH79_CLK_AHB, "ref", ninit_mul,
+ ref_div * out_div * ahb_div);
+}
+
+static u32 __init ar934x_get_pll_freq(u32 ref, u32 ref_div, u32 nint, u32 nfrac,
+ u32 frac, u32 out_div)
+{
+ u64 t;
+ u32 ret;
+
+ t = ref;
+ t *= nint;
+ do_div(t, ref_div);
+ ret = t;
+
+ t = ref;
+ t *= nfrac;
+ do_div(t, ref_div * frac);
+ ret += t;
+
+ ret /= (1 << out_div);
+ return ret;
+}
+
+static void __init ar934x_clocks_init(void __iomem *pll_base)
+{
+ unsigned long ref_rate;
+ unsigned long cpu_rate;
+ unsigned long ddr_rate;
+ unsigned long ahb_rate;
+ u32 pll, out_div, ref_div, nint, nfrac, frac, clk_ctrl, postdiv;
+ u32 cpu_pll, ddr_pll;
+ u32 bootstrap;
+ void __iomem *dpll_base;
+
+ dpll_base = ioremap(AR934X_SRIF_BASE, AR934X_SRIF_SIZE);
+
+ bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & AR934X_BOOTSTRAP_REF_CLK_40)
+ ref_rate = 40 * 1000 * 1000;
+ else
+ ref_rate = 25 * 1000 * 1000;
+
+ ref_rate = ath79_setup_ref_clk(ref_rate);
+
+ pll = __raw_readl(dpll_base + AR934X_SRIF_CPU_DPLL2_REG);
+ if (pll & AR934X_SRIF_DPLL2_LOCAL_PLL) {
+ out_div = (pll >> AR934X_SRIF_DPLL2_OUTDIV_SHIFT) &
+ AR934X_SRIF_DPLL2_OUTDIV_MASK;
+ pll = __raw_readl(dpll_base + AR934X_SRIF_CPU_DPLL1_REG);
+ nint = (pll >> AR934X_SRIF_DPLL1_NINT_SHIFT) &
+ AR934X_SRIF_DPLL1_NINT_MASK;
+ nfrac = pll & AR934X_SRIF_DPLL1_NFRAC_MASK;
+ ref_div = (pll >> AR934X_SRIF_DPLL1_REFDIV_SHIFT) &
+ AR934X_SRIF_DPLL1_REFDIV_MASK;
+ frac = 1 << 18;
+ } else {
+ pll = __raw_readl(pll_base + AR934X_PLL_CPU_CONFIG_REG);
+ out_div = (pll >> AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+ AR934X_PLL_CPU_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+ AR934X_PLL_CPU_CONFIG_REFDIV_MASK;
+ nint = (pll >> AR934X_PLL_CPU_CONFIG_NINT_SHIFT) &
+ AR934X_PLL_CPU_CONFIG_NINT_MASK;
+ nfrac = (pll >> AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
+ AR934X_PLL_CPU_CONFIG_NFRAC_MASK;
+ frac = 1 << 6;
+ }
+
+ cpu_pll = ar934x_get_pll_freq(ref_rate, ref_div, nint,
+ nfrac, frac, out_div);
+
+ pll = __raw_readl(dpll_base + AR934X_SRIF_DDR_DPLL2_REG);
+ if (pll & AR934X_SRIF_DPLL2_LOCAL_PLL) {
+ out_div = (pll >> AR934X_SRIF_DPLL2_OUTDIV_SHIFT) &
+ AR934X_SRIF_DPLL2_OUTDIV_MASK;
+ pll = __raw_readl(dpll_base + AR934X_SRIF_DDR_DPLL1_REG);
+ nint = (pll >> AR934X_SRIF_DPLL1_NINT_SHIFT) &
+ AR934X_SRIF_DPLL1_NINT_MASK;
+ nfrac = pll & AR934X_SRIF_DPLL1_NFRAC_MASK;
+ ref_div = (pll >> AR934X_SRIF_DPLL1_REFDIV_SHIFT) &
+ AR934X_SRIF_DPLL1_REFDIV_MASK;
+ frac = 1 << 18;
+ } else {
+ pll = __raw_readl(pll_base + AR934X_PLL_DDR_CONFIG_REG);
+ out_div = (pll >> AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+ AR934X_PLL_DDR_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+ AR934X_PLL_DDR_CONFIG_REFDIV_MASK;
+ nint = (pll >> AR934X_PLL_DDR_CONFIG_NINT_SHIFT) &
+ AR934X_PLL_DDR_CONFIG_NINT_MASK;
+ nfrac = (pll >> AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
+ AR934X_PLL_DDR_CONFIG_NFRAC_MASK;
+ frac = 1 << 10;
+ }
+
+ ddr_pll = ar934x_get_pll_freq(ref_rate, ref_div, nint,
+ nfrac, frac, out_div);
+
+ clk_ctrl = __raw_readl(pll_base + AR934X_PLL_CPU_DDR_CLK_CTRL_REG);
+
+ postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+ AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK;
+
+ if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_PLL_BYPASS)
+ cpu_rate = ref_rate;
+ else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_CPUCLK_FROM_CPUPLL)
+ cpu_rate = cpu_pll / (postdiv + 1);
+ else
+ cpu_rate = ddr_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+ AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_MASK;
+
+ if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_PLL_BYPASS)
+ ddr_rate = ref_rate;
+ else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL)
+ ddr_rate = ddr_pll / (postdiv + 1);
+ else
+ ddr_rate = cpu_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+ AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_MASK;
+
+ if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_PLL_BYPASS)
+ ahb_rate = ref_rate;
+ else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+ ahb_rate = ddr_pll / (postdiv + 1);
+ else
+ ahb_rate = cpu_pll / (postdiv + 1);
+
+ ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
+ ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
+ ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
+
+ clk_ctrl = __raw_readl(pll_base + AR934X_PLL_SWITCH_CLOCK_CONTROL_REG);
+ if (clk_ctrl & AR934X_PLL_SWITCH_CLOCK_CONTROL_MDIO_CLK_SEL)
+ ath79_set_clk(ATH79_CLK_MDIO, 100 * 1000 * 1000);
+
+ iounmap(dpll_base);
+}
+
+static void __init qca953x_clocks_init(void __iomem *pll_base)
+{
+ unsigned long ref_rate;
+ unsigned long cpu_rate;
+ unsigned long ddr_rate;
+ unsigned long ahb_rate;
+ u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
+ u32 cpu_pll, ddr_pll;
+ u32 bootstrap;
+
+ bootstrap = ath79_reset_rr(QCA953X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & QCA953X_BOOTSTRAP_REF_CLK_40)
+ ref_rate = 40 * 1000 * 1000;
+ else
+ ref_rate = 25 * 1000 * 1000;
+
+ ref_rate = ath79_setup_ref_clk(ref_rate);
+
+ pll = __raw_readl(pll_base + QCA953X_PLL_CPU_CONFIG_REG);
+ out_div = (pll >> QCA953X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+ QCA953X_PLL_CPU_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA953X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+ QCA953X_PLL_CPU_CONFIG_REFDIV_MASK;
+ nint = (pll >> QCA953X_PLL_CPU_CONFIG_NINT_SHIFT) &
+ QCA953X_PLL_CPU_CONFIG_NINT_MASK;
+ frac = (pll >> QCA953X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
+ QCA953X_PLL_CPU_CONFIG_NFRAC_MASK;
+
+ cpu_pll = nint * ref_rate / ref_div;
+ cpu_pll += frac * (ref_rate >> 6) / ref_div;
+ cpu_pll /= (1 << out_div);
+
+ pll = __raw_readl(pll_base + QCA953X_PLL_DDR_CONFIG_REG);
+ out_div = (pll >> QCA953X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+ QCA953X_PLL_DDR_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA953X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+ QCA953X_PLL_DDR_CONFIG_REFDIV_MASK;
+ nint = (pll >> QCA953X_PLL_DDR_CONFIG_NINT_SHIFT) &
+ QCA953X_PLL_DDR_CONFIG_NINT_MASK;
+ frac = (pll >> QCA953X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
+ QCA953X_PLL_DDR_CONFIG_NFRAC_MASK;
+
+ ddr_pll = nint * ref_rate / ref_div;
+ ddr_pll += frac * (ref_rate >> 6) / (ref_div << 4);
+ ddr_pll /= (1 << out_div);
+
+ clk_ctrl = __raw_readl(pll_base + QCA953X_PLL_CLK_CTRL_REG);
+
+ postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+ QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA953X_PLL_CLK_CTRL_CPU_PLL_BYPASS)
+ cpu_rate = ref_rate;
+ else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL)
+ cpu_rate = cpu_pll / (postdiv + 1);
+ else
+ cpu_rate = ddr_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+ QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA953X_PLL_CLK_CTRL_DDR_PLL_BYPASS)
+ ddr_rate = ref_rate;
+ else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL)
+ ddr_rate = ddr_pll / (postdiv + 1);
+ else
+ ddr_rate = cpu_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+ QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA953X_PLL_CLK_CTRL_AHB_PLL_BYPASS)
+ ahb_rate = ref_rate;
+ else if (clk_ctrl & QCA953X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+ ahb_rate = ddr_pll / (postdiv + 1);
+ else
+ ahb_rate = cpu_pll / (postdiv + 1);
+
+ ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
+ ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
+ ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
+}
+
+static void __init qca955x_clocks_init(void __iomem *pll_base)
+{
+ unsigned long ref_rate;
+ unsigned long cpu_rate;
+ unsigned long ddr_rate;
+ unsigned long ahb_rate;
+ u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
+ u32 cpu_pll, ddr_pll;
+ u32 bootstrap;
+
+ bootstrap = ath79_reset_rr(QCA955X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & QCA955X_BOOTSTRAP_REF_CLK_40)
+ ref_rate = 40 * 1000 * 1000;
+ else
+ ref_rate = 25 * 1000 * 1000;
+
+ ref_rate = ath79_setup_ref_clk(ref_rate);
+
+ pll = __raw_readl(pll_base + QCA955X_PLL_CPU_CONFIG_REG);
+ out_div = (pll >> QCA955X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+ QCA955X_PLL_CPU_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA955X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+ QCA955X_PLL_CPU_CONFIG_REFDIV_MASK;
+ nint = (pll >> QCA955X_PLL_CPU_CONFIG_NINT_SHIFT) &
+ QCA955X_PLL_CPU_CONFIG_NINT_MASK;
+ frac = (pll >> QCA955X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
+ QCA955X_PLL_CPU_CONFIG_NFRAC_MASK;
+
+ cpu_pll = nint * ref_rate / ref_div;
+ cpu_pll += frac * ref_rate / (ref_div * (1 << 6));
+ cpu_pll /= (1 << out_div);
+
+ pll = __raw_readl(pll_base + QCA955X_PLL_DDR_CONFIG_REG);
+ out_div = (pll >> QCA955X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+ QCA955X_PLL_DDR_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA955X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+ QCA955X_PLL_DDR_CONFIG_REFDIV_MASK;
+ nint = (pll >> QCA955X_PLL_DDR_CONFIG_NINT_SHIFT) &
+ QCA955X_PLL_DDR_CONFIG_NINT_MASK;
+ frac = (pll >> QCA955X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
+ QCA955X_PLL_DDR_CONFIG_NFRAC_MASK;
+
+ ddr_pll = nint * ref_rate / ref_div;
+ ddr_pll += frac * ref_rate / (ref_div * (1 << 10));
+ ddr_pll /= (1 << out_div);
+
+ clk_ctrl = __raw_readl(pll_base + QCA955X_PLL_CLK_CTRL_REG);
+
+ postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+ QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA955X_PLL_CLK_CTRL_CPU_PLL_BYPASS)
+ cpu_rate = ref_rate;
+ else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL)
+ cpu_rate = ddr_pll / (postdiv + 1);
+ else
+ cpu_rate = cpu_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+ QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA955X_PLL_CLK_CTRL_DDR_PLL_BYPASS)
+ ddr_rate = ref_rate;
+ else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL)
+ ddr_rate = cpu_pll / (postdiv + 1);
+ else
+ ddr_rate = ddr_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+ QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA955X_PLL_CLK_CTRL_AHB_PLL_BYPASS)
+ ahb_rate = ref_rate;
+ else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+ ahb_rate = ddr_pll / (postdiv + 1);
+ else
+ ahb_rate = cpu_pll / (postdiv + 1);
+
+ ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
+ ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
+ ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
+}
+
+static void __init qca956x_clocks_init(void __iomem *pll_base)
+{
+ unsigned long ref_rate;
+ unsigned long cpu_rate;
+ unsigned long ddr_rate;
+ unsigned long ahb_rate;
+ u32 pll, out_div, ref_div, nint, hfrac, lfrac, clk_ctrl, postdiv;
+ u32 cpu_pll, ddr_pll;
+ u32 bootstrap;
+
+ /*
+ * QCA956x timer init workaround has to be applied right before setting
+ * up the clock. Else, there will be no jiffies
+ */
+ u32 misc;
+
+ misc = ath79_reset_rr(AR71XX_RESET_REG_MISC_INT_ENABLE);
+ misc |= MISC_INT_MIPS_SI_TIMERINT_MASK;
+ ath79_reset_wr(AR71XX_RESET_REG_MISC_INT_ENABLE, misc);
+
+ bootstrap = ath79_reset_rr(QCA956X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & QCA956X_BOOTSTRAP_REF_CLK_40)
+ ref_rate = 40 * 1000 * 1000;
+ else
+ ref_rate = 25 * 1000 * 1000;
+
+ ref_rate = ath79_setup_ref_clk(ref_rate);
+
+ pll = __raw_readl(pll_base + QCA956X_PLL_CPU_CONFIG_REG);
+ out_div = (pll >> QCA956X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA956X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG_REFDIV_MASK;
+
+ pll = __raw_readl(pll_base + QCA956X_PLL_CPU_CONFIG1_REG);
+ nint = (pll >> QCA956X_PLL_CPU_CONFIG1_NINT_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG1_NINT_MASK;
+ hfrac = (pll >> QCA956X_PLL_CPU_CONFIG1_NFRAC_H_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG1_NFRAC_H_MASK;
+ lfrac = (pll >> QCA956X_PLL_CPU_CONFIG1_NFRAC_L_SHIFT) &
+ QCA956X_PLL_CPU_CONFIG1_NFRAC_L_MASK;
+
+ cpu_pll = nint * ref_rate / ref_div;
+ cpu_pll += (lfrac * ref_rate) / ((ref_div * 25) << 13);
+ cpu_pll += (hfrac >> 13) * ref_rate / ref_div;
+ cpu_pll /= (1 << out_div);
+
+ pll = __raw_readl(pll_base + QCA956X_PLL_DDR_CONFIG_REG);
+ out_div = (pll >> QCA956X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA956X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG_REFDIV_MASK;
+ pll = __raw_readl(pll_base + QCA956X_PLL_DDR_CONFIG1_REG);
+ nint = (pll >> QCA956X_PLL_DDR_CONFIG1_NINT_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG1_NINT_MASK;
+ hfrac = (pll >> QCA956X_PLL_DDR_CONFIG1_NFRAC_H_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG1_NFRAC_H_MASK;
+ lfrac = (pll >> QCA956X_PLL_DDR_CONFIG1_NFRAC_L_SHIFT) &
+ QCA956X_PLL_DDR_CONFIG1_NFRAC_L_MASK;
+
+ ddr_pll = nint * ref_rate / ref_div;
+ ddr_pll += (lfrac * ref_rate) / ((ref_div * 25) << 13);
+ ddr_pll += (hfrac >> 13) * ref_rate / ref_div;
+ ddr_pll /= (1 << out_div);
+
+ clk_ctrl = __raw_readl(pll_base + QCA956X_PLL_CLK_CTRL_REG);
+
+ postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+ QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_PLL_BYPASS)
+ cpu_rate = ref_rate;
+ else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_CPUPLL)
+ cpu_rate = ddr_pll / (postdiv + 1);
+ else
+ cpu_rate = cpu_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+ QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA956X_PLL_CLK_CTRL_DDR_PLL_BYPASS)
+ ddr_rate = ref_rate;
+ else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_DDRPLL)
+ ddr_rate = cpu_pll / (postdiv + 1);
+ else
+ ddr_rate = ddr_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+ QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA956X_PLL_CLK_CTRL_AHB_PLL_BYPASS)
+ ahb_rate = ref_rate;
+ else if (clk_ctrl & QCA956X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+ ahb_rate = ddr_pll / (postdiv + 1);
+ else
+ ahb_rate = cpu_pll / (postdiv + 1);
+
+ ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
+ ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
+ ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
+}
+
+static void __init ath79_clocks_init_dt(struct device_node *np)
+{
+ struct clk *ref_clk;
+ void __iomem *pll_base;
+
+ ref_clk = of_clk_get(np, 0);
+ if (!IS_ERR(ref_clk))
+ clks[ATH79_CLK_REF] = ref_clk;
+
+ pll_base = of_iomap(np, 0);
+ if (!pll_base) {
+ pr_err("%pOF: can't map pll registers\n", np);
+ goto err_clk;
+ }
+
+ if (of_device_is_compatible(np, "qca,ar7100-pll"))
+ ar71xx_clocks_init(pll_base);
+ else if (of_device_is_compatible(np, "qca,ar7240-pll") ||
+ of_device_is_compatible(np, "qca,ar9130-pll"))
+ ar724x_clocks_init(pll_base);
+ else if (of_device_is_compatible(np, "qca,ar9330-pll"))
+ ar933x_clocks_init(pll_base);
+ else if (of_device_is_compatible(np, "qca,ar9340-pll"))
+ ar934x_clocks_init(pll_base);
+ else if (of_device_is_compatible(np, "qca,qca9530-pll"))
+ qca953x_clocks_init(pll_base);
+ else if (of_device_is_compatible(np, "qca,qca9550-pll"))
+ qca955x_clocks_init(pll_base);
+ else if (of_device_is_compatible(np, "qca,qca9560-pll"))
+ qca956x_clocks_init(pll_base);
+
+ if (!clks[ATH79_CLK_MDIO])
+ clks[ATH79_CLK_MDIO] = clks[ATH79_CLK_REF];
+
+ if (of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data)) {
+ pr_err("%pOF: could not register clk provider\n", np);
+ goto err_iounmap;
+ }
+
+ return;
+
+err_iounmap:
+ iounmap(pll_base);
+
+err_clk:
+ clk_put(ref_clk);
+}
+
+CLK_OF_DECLARE(ar7100_clk, "qca,ar7100-pll", ath79_clocks_init_dt);
+CLK_OF_DECLARE(ar7240_clk, "qca,ar7240-pll", ath79_clocks_init_dt);
+CLK_OF_DECLARE(ar9130_clk, "qca,ar9130-pll", ath79_clocks_init_dt);
+CLK_OF_DECLARE(ar9330_clk, "qca,ar9330-pll", ath79_clocks_init_dt);
+CLK_OF_DECLARE(ar9340_clk, "qca,ar9340-pll", ath79_clocks_init_dt);
+CLK_OF_DECLARE(ar9530_clk, "qca,qca9530-pll", ath79_clocks_init_dt);
+CLK_OF_DECLARE(ar9550_clk, "qca,qca9550-pll", ath79_clocks_init_dt);
+CLK_OF_DECLARE(ar9560_clk, "qca,qca9560-pll", ath79_clocks_init_dt);
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
new file mode 100644
index 000000000..137abbc65
--- /dev/null
+++ b/arch/mips/ath79/common.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Atheros AR71XX/AR724X/AR913X common routines
+ *
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include "common.h"
+
+static DEFINE_SPINLOCK(ath79_device_reset_lock);
+
+u32 ath79_cpu_freq;
+EXPORT_SYMBOL_GPL(ath79_cpu_freq);
+
+u32 ath79_ahb_freq;
+EXPORT_SYMBOL_GPL(ath79_ahb_freq);
+
+u32 ath79_ddr_freq;
+EXPORT_SYMBOL_GPL(ath79_ddr_freq);
+
+enum ath79_soc_type ath79_soc;
+unsigned int ath79_soc_rev;
+
+void __iomem *ath79_pll_base;
+void __iomem *ath79_reset_base;
+EXPORT_SYMBOL_GPL(ath79_reset_base);
+static void __iomem *ath79_ddr_base;
+static void __iomem *ath79_ddr_wb_flush_base;
+static void __iomem *ath79_ddr_pci_win_base;
+
+void ath79_ddr_ctrl_init(void)
+{
+ ath79_ddr_base = ioremap(AR71XX_DDR_CTRL_BASE,
+ AR71XX_DDR_CTRL_SIZE);
+ if (soc_is_ar913x() || soc_is_ar724x() || soc_is_ar933x()) {
+ ath79_ddr_wb_flush_base = ath79_ddr_base + 0x7c;
+ ath79_ddr_pci_win_base = 0;
+ } else {
+ ath79_ddr_wb_flush_base = ath79_ddr_base + 0x9c;
+ ath79_ddr_pci_win_base = ath79_ddr_base + 0x7c;
+ }
+}
+EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
+
+void ath79_ddr_wb_flush(u32 reg)
+{
+ void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4);
+
+ /* Flush the DDR write buffer. */
+ __raw_writel(0x1, flush_reg);
+ while (__raw_readl(flush_reg) & 0x1)
+ ;
+
+ /* It must be run twice. */
+ __raw_writel(0x1, flush_reg);
+ while (__raw_readl(flush_reg) & 0x1)
+ ;
+}
+EXPORT_SYMBOL_GPL(ath79_ddr_wb_flush);
+
+void ath79_ddr_set_pci_windows(void)
+{
+ BUG_ON(!ath79_ddr_pci_win_base);
+
+ __raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0x0);
+ __raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 0x4);
+ __raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 0x8);
+ __raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 0xc);
+ __raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 0x10);
+ __raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 0x14);
+ __raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 0x18);
+ __raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 0x1c);
+}
+EXPORT_SYMBOL_GPL(ath79_ddr_set_pci_windows);
+
+void ath79_device_reset_set(u32 mask)
+{
+ unsigned long flags;
+ u32 reg;
+ u32 t;
+
+ if (soc_is_ar71xx())
+ reg = AR71XX_RESET_REG_RESET_MODULE;
+ else if (soc_is_ar724x())
+ reg = AR724X_RESET_REG_RESET_MODULE;
+ else if (soc_is_ar913x())
+ reg = AR913X_RESET_REG_RESET_MODULE;
+ else if (soc_is_ar933x())
+ reg = AR933X_RESET_REG_RESET_MODULE;
+ else if (soc_is_ar934x())
+ reg = AR934X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca953x())
+ reg = QCA953X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca955x())
+ reg = QCA955X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca956x() || soc_is_tp9343())
+ reg = QCA956X_RESET_REG_RESET_MODULE;
+ else
+ BUG();
+
+ spin_lock_irqsave(&ath79_device_reset_lock, flags);
+ t = ath79_reset_rr(reg);
+ ath79_reset_wr(reg, t | mask);
+ spin_unlock_irqrestore(&ath79_device_reset_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ath79_device_reset_set);
+
+void ath79_device_reset_clear(u32 mask)
+{
+ unsigned long flags;
+ u32 reg;
+ u32 t;
+
+ if (soc_is_ar71xx())
+ reg = AR71XX_RESET_REG_RESET_MODULE;
+ else if (soc_is_ar724x())
+ reg = AR724X_RESET_REG_RESET_MODULE;
+ else if (soc_is_ar913x())
+ reg = AR913X_RESET_REG_RESET_MODULE;
+ else if (soc_is_ar933x())
+ reg = AR933X_RESET_REG_RESET_MODULE;
+ else if (soc_is_ar934x())
+ reg = AR934X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca953x())
+ reg = QCA953X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca955x())
+ reg = QCA955X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca956x() || soc_is_tp9343())
+ reg = QCA956X_RESET_REG_RESET_MODULE;
+ else
+ BUG();
+
+ spin_lock_irqsave(&ath79_device_reset_lock, flags);
+ t = ath79_reset_rr(reg);
+ ath79_reset_wr(reg, t & ~mask);
+ spin_unlock_irqrestore(&ath79_device_reset_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ath79_device_reset_clear);
diff --git a/arch/mips/ath79/common.h b/arch/mips/ath79/common.h
new file mode 100644
index 000000000..47fb66d7b
--- /dev/null
+++ b/arch/mips/ath79/common.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Atheros AR71XX/AR724X/AR913X common definitions
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15 BSP
+ */
+
+#ifndef __ATH79_COMMON_H
+#define __ATH79_COMMON_H
+
+#include <linux/types.h>
+
+#define ATH79_MEM_SIZE_MIN (2 * 1024 * 1024)
+#define ATH79_MEM_SIZE_MAX (256 * 1024 * 1024)
+
+void ath79_ddr_ctrl_init(void);
+
+#endif /* __ATH79_COMMON_H */
diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
new file mode 100644
index 000000000..782732cd1
--- /dev/null
+++ b/arch/mips/ath79/early_printk.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Atheros AR7XXX/AR9XXX SoC early printk support
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/serial_reg.h>
+#include <asm/addrspace.h>
+#include <asm/setup.h>
+
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/ar933x_uart.h>
+
+static void (*_prom_putchar)(char);
+
+static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
+{
+ u32 t;
+
+ do {
+ t = __raw_readl(reg);
+ if ((t & mask) == val)
+ break;
+ } while (1);
+}
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+static void prom_putchar_ar71xx(char ch)
+{
+ void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE));
+
+ prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
+ __raw_writel((unsigned char)ch, base + UART_TX * 4);
+ prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
+}
+
+static void prom_putchar_ar933x(char ch)
+{
+ void __iomem *base = (void __iomem *)(KSEG1ADDR(AR933X_UART_BASE));
+
+ prom_putchar_wait(base + AR933X_UART_DATA_REG, AR933X_UART_DATA_TX_CSR,
+ AR933X_UART_DATA_TX_CSR);
+ __raw_writel(AR933X_UART_DATA_TX_CSR | (unsigned char)ch,
+ base + AR933X_UART_DATA_REG);
+ prom_putchar_wait(base + AR933X_UART_DATA_REG, AR933X_UART_DATA_TX_CSR,
+ AR933X_UART_DATA_TX_CSR);
+}
+
+static void prom_putchar_dummy(char ch)
+{
+ /* nothing to do */
+}
+
+static void prom_enable_uart(u32 id)
+{
+ void __iomem *gpio_base;
+ u32 uart_en;
+ u32 t;
+
+ switch (id) {
+ case REV_ID_MAJOR_AR71XX:
+ uart_en = AR71XX_GPIO_FUNC_UART_EN;
+ break;
+
+ case REV_ID_MAJOR_AR7240:
+ case REV_ID_MAJOR_AR7241:
+ case REV_ID_MAJOR_AR7242:
+ uart_en = AR724X_GPIO_FUNC_UART_EN;
+ break;
+
+ case REV_ID_MAJOR_AR913X:
+ uart_en = AR913X_GPIO_FUNC_UART_EN;
+ break;
+
+ case REV_ID_MAJOR_AR9330:
+ case REV_ID_MAJOR_AR9331:
+ uart_en = AR933X_GPIO_FUNC_UART_EN;
+ break;
+
+ case REV_ID_MAJOR_AR9341:
+ case REV_ID_MAJOR_AR9342:
+ case REV_ID_MAJOR_AR9344:
+ /* TODO */
+ default:
+ return;
+ }
+
+ gpio_base = (void __iomem *)KSEG1ADDR(AR71XX_GPIO_BASE);
+ t = __raw_readl(gpio_base + AR71XX_GPIO_REG_FUNC);
+ t |= uart_en;
+ __raw_writel(t, gpio_base + AR71XX_GPIO_REG_FUNC);
+}
+
+static void prom_putchar_init(void)
+{
+ void __iomem *base;
+ u32 id;
+
+ base = (void __iomem *)(KSEG1ADDR(AR71XX_RESET_BASE));
+ id = __raw_readl(base + AR71XX_RESET_REG_REV_ID);
+ id &= REV_ID_MAJOR_MASK;
+
+ switch (id) {
+ case REV_ID_MAJOR_AR71XX:
+ case REV_ID_MAJOR_AR7240:
+ case REV_ID_MAJOR_AR7241:
+ case REV_ID_MAJOR_AR7242:
+ case REV_ID_MAJOR_AR913X:
+ case REV_ID_MAJOR_AR9341:
+ case REV_ID_MAJOR_AR9342:
+ case REV_ID_MAJOR_AR9344:
+ case REV_ID_MAJOR_QCA9533:
+ case REV_ID_MAJOR_QCA9533_V2:
+ case REV_ID_MAJOR_QCA9556:
+ case REV_ID_MAJOR_QCA9558:
+ case REV_ID_MAJOR_TP9343:
+ case REV_ID_MAJOR_QCA956X:
+ _prom_putchar = prom_putchar_ar71xx;
+ break;
+
+ case REV_ID_MAJOR_AR9330:
+ case REV_ID_MAJOR_AR9331:
+ _prom_putchar = prom_putchar_ar933x;
+ break;
+
+ default:
+ _prom_putchar = prom_putchar_dummy;
+ return;
+ }
+
+ prom_enable_uart(id);
+}
+
+void prom_putchar(char ch)
+{
+ if (!_prom_putchar)
+ prom_putchar_init();
+
+ _prom_putchar(ch);
+}
diff --git a/arch/mips/ath79/prom.c b/arch/mips/ath79/prom.c
new file mode 100644
index 000000000..25724b4e9
--- /dev/null
+++ b/arch/mips/ath79/prom.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Atheros AR71XX/AR724X/AR913X specific prom routines
+ *
+ * Copyright (C) 2015 Laurent Fasnacht <l@libres.ch>
+ * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/initrd.h>
+
+#include <asm/bootinfo.h>
+#include <asm/addrspace.h>
+#include <asm/fw/fw.h>
+
+#include "common.h"
+
+void __init prom_init(void)
+{
+ fw_init_cmdline();
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ /* Read the initrd address from the firmware environment */
+ initrd_start = fw_getenvl("initrd_start");
+ if (initrd_start) {
+ initrd_start = KSEG0ADDR(initrd_start);
+ initrd_end = initrd_start + fw_getenvl("initrd_size");
+ }
+#endif
+}
+
+void __init prom_free_prom_memory(void)
+{
+ /* We do not have to prom memory to free */
+}
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
new file mode 100644
index 000000000..7e7bf9c2a
--- /dev/null
+++ b/arch/mips/ath79/setup.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Atheros AR71XX/AR724X/AR913X specific setup
+ *
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/of_clk.h>
+#include <linux/of_fdt.h>
+#include <linux/irqchip.h>
+
+#include <asm/bootinfo.h>
+#include <asm/idle.h>
+#include <asm/time.h> /* for mips_hpt_frequency */
+#include <asm/reboot.h> /* for _machine_{restart,halt} */
+#include <asm/prom.h>
+#include <asm/fw/fw.h>
+
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include "common.h"
+
+#define ATH79_SYS_TYPE_LEN 64
+
+static char ath79_sys_type[ATH79_SYS_TYPE_LEN];
+
+static void ath79_restart(char *command)
+{
+ local_irq_disable();
+ ath79_device_reset_set(AR71XX_RESET_FULL_CHIP);
+ for (;;)
+ if (cpu_wait)
+ cpu_wait();
+}
+
+static void ath79_halt(void)
+{
+ while (1)
+ cpu_wait();
+}
+
+static void __init ath79_detect_sys_type(void)
+{
+ char *chip = "????";
+ u32 id;
+ u32 major;
+ u32 minor;
+ u32 rev = 0;
+ u32 ver = 1;
+
+ id = ath79_reset_rr(AR71XX_RESET_REG_REV_ID);
+ major = id & REV_ID_MAJOR_MASK;
+
+ switch (major) {
+ case REV_ID_MAJOR_AR71XX:
+ minor = id & AR71XX_REV_ID_MINOR_MASK;
+ rev = id >> AR71XX_REV_ID_REVISION_SHIFT;
+ rev &= AR71XX_REV_ID_REVISION_MASK;
+ switch (minor) {
+ case AR71XX_REV_ID_MINOR_AR7130:
+ ath79_soc = ATH79_SOC_AR7130;
+ chip = "7130";
+ break;
+
+ case AR71XX_REV_ID_MINOR_AR7141:
+ ath79_soc = ATH79_SOC_AR7141;
+ chip = "7141";
+ break;
+
+ case AR71XX_REV_ID_MINOR_AR7161:
+ ath79_soc = ATH79_SOC_AR7161;
+ chip = "7161";
+ break;
+ }
+ break;
+
+ case REV_ID_MAJOR_AR7240:
+ ath79_soc = ATH79_SOC_AR7240;
+ chip = "7240";
+ rev = id & AR724X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_AR7241:
+ ath79_soc = ATH79_SOC_AR7241;
+ chip = "7241";
+ rev = id & AR724X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_AR7242:
+ ath79_soc = ATH79_SOC_AR7242;
+ chip = "7242";
+ rev = id & AR724X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_AR913X:
+ minor = id & AR913X_REV_ID_MINOR_MASK;
+ rev = id >> AR913X_REV_ID_REVISION_SHIFT;
+ rev &= AR913X_REV_ID_REVISION_MASK;
+ switch (minor) {
+ case AR913X_REV_ID_MINOR_AR9130:
+ ath79_soc = ATH79_SOC_AR9130;
+ chip = "9130";
+ break;
+
+ case AR913X_REV_ID_MINOR_AR9132:
+ ath79_soc = ATH79_SOC_AR9132;
+ chip = "9132";
+ break;
+ }
+ break;
+
+ case REV_ID_MAJOR_AR9330:
+ ath79_soc = ATH79_SOC_AR9330;
+ chip = "9330";
+ rev = id & AR933X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_AR9331:
+ ath79_soc = ATH79_SOC_AR9331;
+ chip = "9331";
+ rev = id & AR933X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_AR9341:
+ ath79_soc = ATH79_SOC_AR9341;
+ chip = "9341";
+ rev = id & AR934X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_AR9342:
+ ath79_soc = ATH79_SOC_AR9342;
+ chip = "9342";
+ rev = id & AR934X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_AR9344:
+ ath79_soc = ATH79_SOC_AR9344;
+ chip = "9344";
+ rev = id & AR934X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_QCA9533_V2:
+ ver = 2;
+ ath79_soc_rev = 2;
+ fallthrough;
+ case REV_ID_MAJOR_QCA9533:
+ ath79_soc = ATH79_SOC_QCA9533;
+ chip = "9533";
+ rev = id & QCA953X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_QCA9556:
+ ath79_soc = ATH79_SOC_QCA9556;
+ chip = "9556";
+ rev = id & QCA955X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_QCA9558:
+ ath79_soc = ATH79_SOC_QCA9558;
+ chip = "9558";
+ rev = id & QCA955X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_QCA956X:
+ ath79_soc = ATH79_SOC_QCA956X;
+ chip = "956X";
+ rev = id & QCA956X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_TP9343:
+ ath79_soc = ATH79_SOC_TP9343;
+ chip = "9343";
+ rev = id & QCA956X_REV_ID_REVISION_MASK;
+ break;
+
+ default:
+ panic("ath79: unknown SoC, id:0x%08x", id);
+ }
+
+ if (ver == 1)
+ ath79_soc_rev = rev;
+
+ if (soc_is_qca953x() || soc_is_qca955x() || soc_is_qca956x())
+ sprintf(ath79_sys_type, "Qualcomm Atheros QCA%s ver %u rev %u",
+ chip, ver, rev);
+ else if (soc_is_tp9343())
+ sprintf(ath79_sys_type, "Qualcomm Atheros TP%s rev %u",
+ chip, rev);
+ else
+ sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev);
+ pr_info("SoC: %s\n", ath79_sys_type);
+}
+
+const char *get_system_type(void)
+{
+ return ath79_sys_type;
+}
+
+unsigned int get_c0_compare_int(void)
+{
+ return CP0_LEGACY_COMPARE_IRQ;
+}
+
+void __init plat_mem_setup(void)
+{
+ unsigned long fdt_start;
+
+ set_io_port_base(KSEG1);
+
+ /* Get the position of the FDT passed by the bootloader */
+ fdt_start = fw_getenvl("fdt_start");
+ if (fdt_start)
+ __dt_setup_arch((void *)KSEG0ADDR(fdt_start));
+ else if (fw_passed_dtb)
+ __dt_setup_arch((void *)KSEG0ADDR(fw_passed_dtb));
+
+ ath79_reset_base = ioremap(AR71XX_RESET_BASE,
+ AR71XX_RESET_SIZE);
+ ath79_pll_base = ioremap(AR71XX_PLL_BASE,
+ AR71XX_PLL_SIZE);
+ ath79_detect_sys_type();
+ ath79_ddr_ctrl_init();
+
+ detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
+
+ _machine_restart = ath79_restart;
+ _machine_halt = ath79_halt;
+ pm_power_off = ath79_halt;
+}
+
+void __init plat_time_init(void)
+{
+ struct device_node *np;
+ struct clk *clk;
+ unsigned long cpu_clk_rate;
+
+ of_clk_init(NULL);
+
+ np = of_get_cpu_node(0, NULL);
+ if (!np) {
+ pr_err("Failed to get CPU node\n");
+ return;
+ }
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get CPU clock: %ld\n", PTR_ERR(clk));
+ return;
+ }
+
+ cpu_clk_rate = clk_get_rate(clk);
+
+ pr_info("CPU clock: %lu.%03lu MHz\n",
+ cpu_clk_rate / 1000000, (cpu_clk_rate / 1000) % 1000);
+
+ mips_hpt_frequency = cpu_clk_rate / 2;
+
+ clk_put(clk);
+}
+
+void __init arch_init_irq(void)
+{
+ irqchip_init();
+}
+
+void __init device_tree_init(void)
+{
+ unflatten_and_copy_device_tree();
+}
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
new file mode 100644
index 000000000..490bb6da7
--- /dev/null
+++ b/arch/mips/bcm47xx/Kconfig
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0
+if BCM47XX
+
+config BCM47XX_SSB
+ bool "SSB Support for Broadcom BCM47XX"
+ select SYS_HAS_CPU_BMIPS32_3300
+ select SSB
+ select SSB_HOST_SOC
+ select SSB_DRIVER_MIPS
+ select SSB_DRIVER_EXTIF
+ select SSB_EMBEDDED
+ select SSB_B43_PCI_BRIDGE if PCI
+ select SSB_DRIVER_PCICORE if PCI
+ select SSB_PCICORE_HOSTMODE if PCI
+ select SSB_DRIVER_GPIO
+ default y
+ help
+ Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support.
+
+ This will generate an image with support for SSB and MIPS32 R1 instruction set.
+
+config BCM47XX_BCMA
+ bool "BCMA Support for Broadcom BCM47XX"
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_SUPPORTS_HIGHMEM
+ select CPU_MIPSR2_IRQ_VI
+ select BCMA
+ select BCMA_HOST_SOC
+ select BCMA_DRIVER_MIPS
+ select BCMA_DRIVER_PCI if PCI
+ select BCMA_DRIVER_PCI_HOSTMODE if PCI
+ select BCMA_DRIVER_GPIO
+ default y
+ help
+ Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
+
+ This will generate an image with support for BCMA and MIPS32 R2 instruction set.
+
+endif
diff --git a/arch/mips/bcm47xx/Makefile b/arch/mips/bcm47xx/Makefile
new file mode 100644
index 000000000..c7438a410
--- /dev/null
+++ b/arch/mips/bcm47xx/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the BCM47XX specific kernel interface routines
+# under Linux.
+#
+
+obj-y += irq.o prom.o serial.o setup.o time.o
+obj-y += board.o buttons.o leds.o workarounds.o
diff --git a/arch/mips/bcm47xx/Platform b/arch/mips/bcm47xx/Platform
new file mode 100644
index 000000000..833b204fe
--- /dev/null
+++ b/arch/mips/bcm47xx/Platform
@@ -0,0 +1,7 @@
+#
+# Broadcom BCM47XX boards
+#
+cflags-$(CONFIG_BCM47XX) += \
+ -I$(srctree)/arch/mips/include/asm/mach-bcm47xx
+load-$(CONFIG_BCM47XX) := 0xffffffff80001000
+zload-$(CONFIG_BCM47XX) += 0xffffffff80400000
diff --git a/arch/mips/bcm47xx/bcm47xx_private.h b/arch/mips/bcm47xx/bcm47xx_private.h
new file mode 100644
index 000000000..bb96743bb
--- /dev/null
+++ b/arch/mips/bcm47xx/bcm47xx_private.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_BCM47XX_PRIVATE_H_
+#define LINUX_BCM47XX_PRIVATE_H_
+
+#ifndef pr_fmt
+#define pr_fmt(fmt) "bcm47xx: " fmt
+#endif
+
+#include <linux/kernel.h>
+
+/* prom.c */
+void __init bcm47xx_prom_highmem_init(void);
+
+/* buttons.c */
+int __init bcm47xx_buttons_register(void);
+
+/* leds.c */
+void __init bcm47xx_leds_register(void);
+
+/* setup.c */
+void __init bcm47xx_bus_setup(void);
+
+/* workarounds.c */
+void __init bcm47xx_workarounds(void);
+
+#endif
diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
new file mode 100644
index 000000000..35266a70e
--- /dev/null
+++ b/arch/mips/bcm47xx/board.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <bcm47xx.h>
+#include <bcm47xx_board.h>
+
+struct bcm47xx_board_type {
+ const enum bcm47xx_board board;
+ const char *name;
+};
+
+struct bcm47xx_board_type_list1 {
+ struct bcm47xx_board_type board;
+ const char *value1;
+};
+
+struct bcm47xx_board_type_list2 {
+ struct bcm47xx_board_type board;
+ const char *value1;
+ const char *value2;
+};
+
+struct bcm47xx_board_type_list3 {
+ struct bcm47xx_board_type board;
+ const char *value1;
+ const char *value2;
+ const char *value3;
+};
+
+struct bcm47xx_board_store {
+ enum bcm47xx_board board;
+ char name[BCM47XX_BOARD_MAX_NAME];
+};
+
+/* model_name */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_model_name[] __initconst = {
+ {{BCM47XX_BOARD_DLINK_DIR130, "D-Link DIR-130"}, "DIR-130"},
+ {{BCM47XX_BOARD_DLINK_DIR330, "D-Link DIR-330"}, "DIR-330"},
+ { {0}, NULL},
+};
+
+/* hardware_version */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_hardware_version[] __initconst = {
+ {{BCM47XX_BOARD_ASUS_RTN10U, "Asus RT-N10U"}, "RTN10U"},
+ {{BCM47XX_BOARD_ASUS_RTN10D, "Asus RT-N10D"}, "RTN10D"},
+ {{BCM47XX_BOARD_ASUS_RTN12, "Asus RT-N12"}, "RT-N12"},
+ {{BCM47XX_BOARD_ASUS_RTN12B1, "Asus RT-N12B1"}, "RTN12B1"},
+ {{BCM47XX_BOARD_ASUS_RTN12C1, "Asus RT-N12C1"}, "RTN12C1"},
+ {{BCM47XX_BOARD_ASUS_RTN12D1, "Asus RT-N12D1"}, "RTN12D1"},
+ {{BCM47XX_BOARD_ASUS_RTN12HP, "Asus RT-N12HP"}, "RTN12HP"},
+ {{BCM47XX_BOARD_ASUS_RTN16, "Asus RT-N16"}, "RT-N16-"},
+ {{BCM47XX_BOARD_ASUS_WL320GE, "Asus WL320GE"}, "WL320G-"},
+ {{BCM47XX_BOARD_ASUS_WL330GE, "Asus WL330GE"}, "WL330GE-"},
+ {{BCM47XX_BOARD_ASUS_WL500GD, "Asus WL500GD"}, "WL500gd-"},
+ {{BCM47XX_BOARD_ASUS_WL500GPV1, "Asus WL500GP V1"}, "WL500gp-"},
+ {{BCM47XX_BOARD_ASUS_WL500GPV2, "Asus WL500GP V2"}, "WL500GPV2-"},
+ {{BCM47XX_BOARD_ASUS_WL500W, "Asus WL500W"}, "WL500gW-"},
+ {{BCM47XX_BOARD_ASUS_WL520GC, "Asus WL520GC"}, "WL520GC-"},
+ {{BCM47XX_BOARD_ASUS_WL520GU, "Asus WL520GU"}, "WL520GU-"},
+ {{BCM47XX_BOARD_BELKIN_F7D3301, "Belkin F7D3301"}, "F7D3301"},
+ {{BCM47XX_BOARD_BELKIN_F7D3302, "Belkin F7D3302"}, "F7D3302"},
+ {{BCM47XX_BOARD_BELKIN_F7D4301, "Belkin F7D4301"}, "F7D4301"},
+ {{BCM47XX_BOARD_BELKIN_F7D4302, "Belkin F7D4302"}, "F7D4302"},
+ {{BCM47XX_BOARD_BELKIN_F7D4401, "Belkin F7D4401"}, "F7D4401"},
+ { {0}, NULL},
+};
+
+/* hardware_version, boardnum */
+static const
+struct bcm47xx_board_type_list2 bcm47xx_board_list_hw_version_num[] __initconst = {
+ {{BCM47XX_BOARD_MICROSOFT_MN700, "Microsoft MN-700"}, "WL500-", "mn700"},
+ {{BCM47XX_BOARD_ASUS_WL500G, "Asus WL500G"}, "WL500-", "asusX"},
+ { {0}, NULL},
+};
+
+/* productid */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_productid[] __initconst = {
+ {{BCM47XX_BOARD_ASUS_RTAC66U, "Asus RT-AC66U"}, "RT-AC66U"},
+ {{BCM47XX_BOARD_ASUS_RTN10, "Asus RT-N10"}, "RT-N10"},
+ {{BCM47XX_BOARD_ASUS_RTN10D, "Asus RT-N10D"}, "RT-N10D"},
+ {{BCM47XX_BOARD_ASUS_RTN15U, "Asus RT-N15U"}, "RT-N15U"},
+ {{BCM47XX_BOARD_ASUS_RTN16, "Asus RT-N16"}, "RT-N16"},
+ {{BCM47XX_BOARD_ASUS_RTN53, "Asus RT-N53"}, "RT-N53"},
+ {{BCM47XX_BOARD_ASUS_RTN66U, "Asus RT-N66U"}, "RT-N66U"},
+ {{BCM47XX_BOARD_ASUS_WL300G, "Asus WL300G"}, "WL300g"},
+ {{BCM47XX_BOARD_ASUS_WLHDD, "Asus WLHDD"}, "WLHDD"},
+ { {0}, NULL},
+};
+
+/* ModelId */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_ModelId[] __initconst = {
+ {{BCM47XX_BOARD_DELL_TM2300, "Dell TrueMobile 2300"}, "WX-5565"},
+ {{BCM47XX_BOARD_MOTOROLA_WE800G, "Motorola WE800G"}, "WE800G"},
+ {{BCM47XX_BOARD_MOTOROLA_WR850GP, "Motorola WR850GP"}, "WR850GP"},
+ {{BCM47XX_BOARD_MOTOROLA_WR850GV2V3, "Motorola WR850G"}, "WR850G"},
+ { {0}, NULL},
+};
+
+/* melco_id or buf1falo_id */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_melco_id[] __initconst = {
+ {{BCM47XX_BOARD_BUFFALO_WBR2_G54, "Buffalo WBR2-G54"}, "29bb0332"},
+ {{BCM47XX_BOARD_BUFFALO_WHR2_A54G54, "Buffalo WHR2-A54G54"}, "290441dd"},
+ {{BCM47XX_BOARD_BUFFALO_WHR_G125, "Buffalo WHR-G125"}, "32093"},
+ {{BCM47XX_BOARD_BUFFALO_WHR_G54S, "Buffalo WHR-G54S"}, "30182"},
+ {{BCM47XX_BOARD_BUFFALO_WHR_HP_G54, "Buffalo WHR-HP-G54"}, "30189"},
+ {{BCM47XX_BOARD_BUFFALO_WLA2_G54L, "Buffalo WLA2-G54L"}, "29129"},
+ {{BCM47XX_BOARD_BUFFALO_WZR_G300N, "Buffalo WZR-G300N"}, "31120"},
+ {{BCM47XX_BOARD_BUFFALO_WZR_RS_G54, "Buffalo WZR-RS-G54"}, "30083"},
+ {{BCM47XX_BOARD_BUFFALO_WZR_RS_G54HP, "Buffalo WZR-RS-G54HP"}, "30103"},
+ { {0}, NULL},
+};
+
+/* boot_hw_model, boot_hw_ver */
+static const
+struct bcm47xx_board_type_list2 bcm47xx_board_list_boot_hw[] __initconst = {
+ /* like WRT160N v3.0 */
+ {{BCM47XX_BOARD_CISCO_M10V1, "Cisco M10"}, "M10", "1.0"},
+ /* like WRT310N v2.0 */
+ {{BCM47XX_BOARD_CISCO_M20V1, "Cisco M20"}, "M20", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_E900V1, "Linksys E900 V1"}, "E900", "1.0"},
+ /* like WRT160N v3.0 */
+ {{BCM47XX_BOARD_LINKSYS_E1000V1, "Linksys E1000 V1"}, "E100", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_E1000V2, "Linksys E1000 V2"}, "E1000", "2.0"},
+ {{BCM47XX_BOARD_LINKSYS_E1000V21, "Linksys E1000 V2.1"}, "E1000", "2.1"},
+ {{BCM47XX_BOARD_LINKSYS_E1200V2, "Linksys E1200 V2"}, "E1200", "2.0"},
+ {{BCM47XX_BOARD_LINKSYS_E2000V1, "Linksys E2000 V1"}, "Linksys E2000", "1.0"},
+ /* like WRT610N v2.0 */
+ {{BCM47XX_BOARD_LINKSYS_E3000V1, "Linksys E3000 V1"}, "E300", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_E3200V1, "Linksys E3200 V1"}, "E3200", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_E4200V1, "Linksys E4200 V1"}, "E4200", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_WRT150NV11, "Linksys WRT150N V1.1"}, "WRT150N", "1.1"},
+ {{BCM47XX_BOARD_LINKSYS_WRT150NV1, "Linksys WRT150N V1"}, "WRT150N", "1"},
+ {{BCM47XX_BOARD_LINKSYS_WRT160NV1, "Linksys WRT160N V1"}, "WRT160N", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_WRT160NV3, "Linksys WRT160N V3"}, "WRT160N", "3.0"},
+ {{BCM47XX_BOARD_LINKSYS_WRT300NV11, "Linksys WRT300N V1.1"}, "WRT300N", "1.1"},
+ {{BCM47XX_BOARD_LINKSYS_WRT310NV1, "Linksys WRT310N V1"}, "WRT310N", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_WRT310NV2, "Linksys WRT310N V2"}, "WRT310N", "2.0"},
+ {{BCM47XX_BOARD_LINKSYS_WRT54G3GV2, "Linksys WRT54G3GV2-VF"}, "WRT54G3GV2-VF", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_WRT610NV1, "Linksys WRT610N V1"}, "WRT610N", "1.0"},
+ {{BCM47XX_BOARD_LINKSYS_WRT610NV2, "Linksys WRT610N V2"}, "WRT610N", "2.0"},
+ { {0}, NULL},
+};
+
+/* board_id */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_board_id[] __initconst = {
+ {{BCM47XX_BOARD_LUXUL_ABR_4400_V1, "Luxul ABR-4400 V1"}, "luxul_abr4400_v1"},
+ {{BCM47XX_BOARD_LUXUL_XAP_310_V1, "Luxul XAP-310 V1"}, "luxul_xap310_v1"},
+ {{BCM47XX_BOARD_LUXUL_XAP_1210_V1, "Luxul XAP-1210 V1"}, "luxul_xap1210_v1"},
+ {{BCM47XX_BOARD_LUXUL_XAP_1230_V1, "Luxul XAP-1230 V1"}, "luxul_xap1230_v1"},
+ {{BCM47XX_BOARD_LUXUL_XAP_1240_V1, "Luxul XAP-1240 V1"}, "luxul_xap1240_v1"},
+ {{BCM47XX_BOARD_LUXUL_XAP_1500_V1, "Luxul XAP-1500 V1"}, "luxul_xap1500_v1"},
+ {{BCM47XX_BOARD_LUXUL_XBR_4400_V1, "Luxul XBR-4400 V1"}, "luxul_xbr4400_v1"},
+ {{BCM47XX_BOARD_LUXUL_XVW_P30_V1, "Luxul XVW-P30 V1"}, "luxul_xvwp30_v1"},
+ {{BCM47XX_BOARD_LUXUL_XWR_600_V1, "Luxul XWR-600 V1"}, "luxul_xwr600_v1"},
+ {{BCM47XX_BOARD_LUXUL_XWR_1750_V1, "Luxul XWR-1750 V1"}, "luxul_xwr1750_v1"},
+ {{BCM47XX_BOARD_NETGEAR_R6200_V1, "Netgear R6200 V1"}, "U12H192T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WGR614V8, "Netgear WGR614 V8"}, "U12H072T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WGR614V9, "Netgear WGR614 V9"}, "U12H094T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WGR614_V10, "Netgear WGR614 V10"}, "U12H139T01_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR3300, "Netgear WNDR3300"}, "U12H093T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR3400V1, "Netgear WNDR3400 V1"}, "U12H155T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR3400V2, "Netgear WNDR3400 V2"}, "U12H187T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR3400_V3, "Netgear WNDR3400 V3"}, "U12H208T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR3400VCNA, "Netgear WNDR3400 Vcna"}, "U12H155T01_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR3700V3, "Netgear WNDR3700 V3"}, "U12H194T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR4000, "Netgear WNDR4000"}, "U12H181T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR4500V1, "Netgear WNDR4500 V1"}, "U12H189T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNDR4500V2, "Netgear WNDR4500 V2"}, "U12H224T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNR1000_V3, "Netgear WNR1000 V3"}, "U12H139T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNR1000_V3, "Netgear WNR1000 V3"}, "U12H139T50_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNR2000, "Netgear WNR2000"}, "U12H114T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNR3500L, "Netgear WNR3500L"}, "U12H136T99_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNR3500U, "Netgear WNR3500U"}, "U12H136T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNR3500V2, "Netgear WNR3500 V2"}, "U12H127T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNR3500V2VC, "Netgear WNR3500 V2vc"}, "U12H127T70_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNR834BV2, "Netgear WNR834B V2"}, "U12H081T00_NETGEAR"},
+ { {0}, NULL},
+};
+
+/* boardtype, boardnum, boardrev */
+static const
+struct bcm47xx_board_type_list3 bcm47xx_board_list_board[] __initconst = {
+ {{BCM47XX_BOARD_HUAWEI_E970, "Huawei E970"}, "0x048e", "0x5347", "0x11"},
+ {{BCM47XX_BOARD_PHICOMM_M1, "Phicomm M1"}, "0x0590", "80", "0x1104"},
+ {{BCM47XX_BOARD_ZTE_H218N, "ZTE H218N"}, "0x053d", "1234", "0x1305"},
+ {{BCM47XX_BOARD_NETGEAR_WNR3500L, "Netgear WNR3500L"}, "0x04CF", "3500", "02"},
+ {{BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0101, "Linksys WRT54G/GS/GL"}, "0x0101", "42", "0x10"},
+ {{BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0467, "Linksys WRT54G/GS/GL"}, "0x0467", "42", "0x10"},
+ {{BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0708, "Linksys WRT54G/GS/GL"}, "0x0708", "42", "0x10"},
+ { {0}, NULL},
+};
+
+/* boardtype, boardrev */
+static const
+struct bcm47xx_board_type_list2 bcm47xx_board_list_board_type_rev[] __initconst = {
+ {{BCM47XX_BOARD_SIEMENS_SE505V2, "Siemens SE505 V2"}, "0x0101", "0x10"},
+ { {0}, NULL},
+};
+
+/*
+ * Some devices don't use any common NVRAM entry for identification and they
+ * have only one model specific variable.
+ * They don't deserve own arrays, let's group them there using key-value array.
+ */
+static const
+struct bcm47xx_board_type_list2 bcm47xx_board_list_key_value[] __initconst = {
+ {{BCM47XX_BOARD_ASUS_WL700GE, "Asus WL700"}, "model_no", "WL700"},
+ {{BCM47XX_BOARD_LINKSYS_WRT300N_V1, "Linksys WRT300N V1"}, "router_name", "WRT300N"},
+ {{BCM47XX_BOARD_LINKSYS_WRT600N_V11, "Linksys WRT600N V1.1"}, "Model_Name", "WRT600N"},
+ {{BCM47XX_BOARD_LINKSYS_WRTSL54GS, "Linksys WRTSL54GS"}, "machine_name", "WRTSL54GS"},
+ { {0}, NULL},
+};
+
+static const
+struct bcm47xx_board_type bcm47xx_board_unknown[] __initconst = {
+ {BCM47XX_BOARD_UNKNOWN, "Unknown Board"},
+};
+
+static struct bcm47xx_board_store bcm47xx_board = {BCM47XX_BOARD_NO, "Unknown Board"};
+
+static __init const struct bcm47xx_board_type *bcm47xx_board_get_nvram(void)
+{
+ char buf1[30];
+ char buf2[30];
+ char buf3[30];
+ const struct bcm47xx_board_type_list1 *e1;
+ const struct bcm47xx_board_type_list2 *e2;
+ const struct bcm47xx_board_type_list3 *e3;
+
+ if (bcm47xx_nvram_getenv("model_name", buf1, sizeof(buf1)) >= 0) {
+ for (e1 = bcm47xx_board_list_model_name; e1->value1; e1++) {
+ if (!strcmp(buf1, e1->value1))
+ return &e1->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("hardware_version", buf1, sizeof(buf1)) >= 0) {
+ for (e1 = bcm47xx_board_list_hardware_version; e1->value1; e1++) {
+ if (strstarts(buf1, e1->value1))
+ return &e1->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("hardware_version", buf1, sizeof(buf1)) >= 0 &&
+ bcm47xx_nvram_getenv("boardnum", buf2, sizeof(buf2)) >= 0) {
+ for (e2 = bcm47xx_board_list_hw_version_num; e2->value1; e2++) {
+ if (!strstarts(buf1, e2->value1) &&
+ !strcmp(buf2, e2->value2))
+ return &e2->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("productid", buf1, sizeof(buf1)) >= 0) {
+ for (e1 = bcm47xx_board_list_productid; e1->value1; e1++) {
+ if (!strcmp(buf1, e1->value1))
+ return &e1->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("ModelId", buf1, sizeof(buf1)) >= 0) {
+ for (e1 = bcm47xx_board_list_ModelId; e1->value1; e1++) {
+ if (!strcmp(buf1, e1->value1))
+ return &e1->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("melco_id", buf1, sizeof(buf1)) >= 0 ||
+ bcm47xx_nvram_getenv("buf1falo_id", buf1, sizeof(buf1)) >= 0) {
+ /* buffalo hardware, check id for specific hardware matches */
+ for (e1 = bcm47xx_board_list_melco_id; e1->value1; e1++) {
+ if (!strcmp(buf1, e1->value1))
+ return &e1->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("boot_hw_model", buf1, sizeof(buf1)) >= 0 &&
+ bcm47xx_nvram_getenv("boot_hw_ver", buf2, sizeof(buf2)) >= 0) {
+ for (e2 = bcm47xx_board_list_boot_hw; e2->value1; e2++) {
+ if (!strcmp(buf1, e2->value1) &&
+ !strcmp(buf2, e2->value2))
+ return &e2->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("board_id", buf1, sizeof(buf1)) >= 0) {
+ for (e1 = bcm47xx_board_list_board_id; e1->value1; e1++) {
+ if (!strcmp(buf1, e1->value1))
+ return &e1->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("boardtype", buf1, sizeof(buf1)) >= 0 &&
+ bcm47xx_nvram_getenv("boardnum", buf2, sizeof(buf2)) >= 0 &&
+ bcm47xx_nvram_getenv("boardrev", buf3, sizeof(buf3)) >= 0) {
+ for (e3 = bcm47xx_board_list_board; e3->value1; e3++) {
+ if (!strcmp(buf1, e3->value1) &&
+ !strcmp(buf2, e3->value2) &&
+ !strcmp(buf3, e3->value3))
+ return &e3->board;
+ }
+ }
+
+ if (bcm47xx_nvram_getenv("boardtype", buf1, sizeof(buf1)) >= 0 &&
+ bcm47xx_nvram_getenv("boardrev", buf2, sizeof(buf2)) >= 0 &&
+ bcm47xx_nvram_getenv("boardnum", buf3, sizeof(buf3)) == -ENOENT) {
+ for (e2 = bcm47xx_board_list_board_type_rev; e2->value1; e2++) {
+ if (!strcmp(buf1, e2->value1) &&
+ !strcmp(buf2, e2->value2))
+ return &e2->board;
+ }
+ }
+
+ for (e2 = bcm47xx_board_list_key_value; e2->value1; e2++) {
+ if (bcm47xx_nvram_getenv(e2->value1, buf1, sizeof(buf1)) >= 0) {
+ if (!strcmp(buf1, e2->value2))
+ return &e2->board;
+ }
+ }
+
+ return bcm47xx_board_unknown;
+}
+
+void __init bcm47xx_board_detect(void)
+{
+ int err;
+ char buf[10];
+ const struct bcm47xx_board_type *board_detected;
+
+ if (bcm47xx_board.board != BCM47XX_BOARD_NO)
+ return;
+
+ /* check if the nvram is available */
+ err = bcm47xx_nvram_getenv("boardtype", buf, sizeof(buf));
+
+ /* init of nvram failed, probably too early now */
+ if (err == -ENXIO)
+ return;
+
+ board_detected = bcm47xx_board_get_nvram();
+ bcm47xx_board.board = board_detected->board;
+ strlcpy(bcm47xx_board.name, board_detected->name,
+ BCM47XX_BOARD_MAX_NAME);
+}
+
+enum bcm47xx_board bcm47xx_board_get(void)
+{
+ return bcm47xx_board.board;
+}
+EXPORT_SYMBOL(bcm47xx_board_get);
+
+const char *bcm47xx_board_get_name(void)
+{
+ return bcm47xx_board.name;
+}
+EXPORT_SYMBOL(bcm47xx_board_get_name);
diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c
new file mode 100644
index 000000000..535d84add
--- /dev/null
+++ b/arch/mips/bcm47xx/buttons.c
@@ -0,0 +1,718 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcm47xx_private.h"
+
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+#include <linux/interrupt.h>
+#include <bcm47xx_board.h>
+#include <bcm47xx.h>
+
+/**************************************************
+ * Database
+ **************************************************/
+
+#define BCM47XX_GPIO_KEY(_gpio, _code) \
+ { \
+ .code = _code, \
+ .gpio = _gpio, \
+ .active_low = 1, \
+ }
+
+#define BCM47XX_GPIO_KEY_H(_gpio, _code) \
+ { \
+ .code = _code, \
+ .gpio = _gpio, \
+ }
+
+/* Asus */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_rtn12[] __initconst = {
+ BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(1, KEY_RESTART),
+ BCM47XX_GPIO_KEY(4, BTN_0), /* Router mode */
+ BCM47XX_GPIO_KEY(5, BTN_1), /* Repeater mode */
+ BCM47XX_GPIO_KEY(6, BTN_2), /* AP mode */
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_rtn16[] __initconst = {
+ BCM47XX_GPIO_KEY(6, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(8, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_rtn66u[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(9, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl300g[] __initconst = {
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl320ge[] __initconst = {
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl330ge[] __initconst = {
+ BCM47XX_GPIO_KEY(2, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl500g[] __initconst = {
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl500gd[] __initconst = {
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl500gpv1[] __initconst = {
+ BCM47XX_GPIO_KEY(0, KEY_RESTART),
+ BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl500gpv2[] __initconst = {
+ BCM47XX_GPIO_KEY(2, KEY_RESTART),
+ BCM47XX_GPIO_KEY(3, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl500w[] __initconst = {
+ BCM47XX_GPIO_KEY_H(6, KEY_RESTART),
+ BCM47XX_GPIO_KEY_H(7, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl520gc[] __initconst = {
+ BCM47XX_GPIO_KEY(2, KEY_RESTART),
+ BCM47XX_GPIO_KEY(3, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl520gu[] __initconst = {
+ BCM47XX_GPIO_KEY(2, KEY_RESTART),
+ BCM47XX_GPIO_KEY(3, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wl700ge[] __initconst = {
+ BCM47XX_GPIO_KEY(0, KEY_POWER), /* Hard disk power switch */
+ BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON), /* EZSetup */
+ BCM47XX_GPIO_KEY(6, KEY_COPY), /* Copy data from USB to internal disk */
+ BCM47XX_GPIO_KEY(7, KEY_RESTART), /* Hard reset */
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_asus_wlhdd[] __initconst = {
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+/* Huawei */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_huawei_e970[] __initconst = {
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+/* Belkin */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_belkin_f7d4301[] __initconst = {
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+ BCM47XX_GPIO_KEY(8, KEY_WPS_BUTTON),
+};
+
+/* Buffalo */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_buffalo_whr2_a54g54[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_buffalo_whr_g125[] __initconst = {
+ BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(4, KEY_RESTART),
+ BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode swtich */
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_buffalo_whr_g54s[] __initconst = {
+ BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY_H(4, KEY_RESTART),
+ BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode swtich */
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_buffalo_whr_hp_g54[] __initconst = {
+ BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(4, KEY_RESTART),
+ BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode swtich */
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_buffalo_wzr_g300n[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_buffalo_wzr_rs_g54[] __initconst = {
+ BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(4, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_buffalo_wzr_rs_g54hp[] __initconst = {
+ BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(4, KEY_RESTART),
+};
+
+/* Dell */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_dell_tm2300[] __initconst = {
+ BCM47XX_GPIO_KEY(0, KEY_RESTART),
+};
+
+/* D-Link */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_dlink_dir130[] __initconst = {
+ BCM47XX_GPIO_KEY(3, KEY_RESTART),
+ BCM47XX_GPIO_KEY(7, KEY_UNKNOWN),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_dlink_dir330[] __initconst = {
+ BCM47XX_GPIO_KEY(3, KEY_RESTART),
+ BCM47XX_GPIO_KEY(7, KEY_UNKNOWN),
+};
+
+/* Linksys */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_e1000v1[] __initconst = {
+ BCM47XX_GPIO_KEY(5, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_e1000v21[] __initconst = {
+ BCM47XX_GPIO_KEY(9, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(10, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_e2000v1[] __initconst = {
+ BCM47XX_GPIO_KEY(5, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(8, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_e3000v1[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_e3200v1[] __initconst = {
+ BCM47XX_GPIO_KEY(5, KEY_RESTART),
+ BCM47XX_GPIO_KEY(8, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_e4200v1[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt150nv1[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt150nv11[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt160nv1[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt160nv3[] __initconst = {
+ BCM47XX_GPIO_KEY(5, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt300n_v1[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt300nv11[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_UNKNOWN),
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt310nv1[] __initconst = {
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+ BCM47XX_GPIO_KEY(8, KEY_UNKNOWN),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt54g3gv2[] __initconst = {
+ BCM47XX_GPIO_KEY(5, KEY_WIMAX),
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt54g_generic[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt610nv1[] __initconst = {
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+ BCM47XX_GPIO_KEY(8, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt610nv2[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrtsl54gs[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+/* Luxul */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_luxul_abr_4400_v1[] = {
+ BCM47XX_GPIO_KEY(14, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_luxul_xap_310_v1[] = {
+ BCM47XX_GPIO_KEY(20, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_luxul_xap_1210_v1[] = {
+ BCM47XX_GPIO_KEY(8, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_luxul_xap_1230_v1[] = {
+ BCM47XX_GPIO_KEY(8, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_luxul_xap_1240_v1[] = {
+ BCM47XX_GPIO_KEY(8, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_luxul_xap_1500_v1[] = {
+ BCM47XX_GPIO_KEY(14, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_luxul_xbr_4400_v1[] = {
+ BCM47XX_GPIO_KEY(14, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_luxul_xvw_p30_v1[] = {
+ BCM47XX_GPIO_KEY(20, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_luxul_xwr_600_v1[] = {
+ BCM47XX_GPIO_KEY(8, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_luxul_xwr_1750_v1[] = {
+ BCM47XX_GPIO_KEY(14, KEY_RESTART),
+};
+
+/* Microsoft */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_microsoft_nm700[] __initconst = {
+ BCM47XX_GPIO_KEY(7, KEY_RESTART),
+};
+
+/* Motorola */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_motorola_we800g[] __initconst = {
+ BCM47XX_GPIO_KEY(0, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_motorola_wr850gp[] __initconst = {
+ BCM47XX_GPIO_KEY(5, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_motorola_wr850gv2v3[] __initconst = {
+ BCM47XX_GPIO_KEY(5, KEY_RESTART),
+};
+
+/* Netgear */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_netgear_r6200_v1[] __initconst = {
+ BCM47XX_GPIO_KEY(2, KEY_RFKILL),
+ BCM47XX_GPIO_KEY(3, KEY_RESTART),
+ BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_netgear_wndr3400v1[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_RESTART),
+ BCM47XX_GPIO_KEY(6, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(8, KEY_RFKILL),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_netgear_wndr3400_v3[] __initconst = {
+ BCM47XX_GPIO_KEY(12, KEY_RESTART),
+ BCM47XX_GPIO_KEY(23, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_netgear_wndr3700v3[] __initconst = {
+ BCM47XX_GPIO_KEY(2, KEY_RFKILL),
+ BCM47XX_GPIO_KEY(3, KEY_RESTART),
+ BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_netgear_wndr4500v1[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(5, KEY_RFKILL),
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_netgear_wnr1000_v3[] __initconst = {
+ BCM47XX_GPIO_KEY(2, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(3, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_netgear_wnr3500lv1[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_RESTART),
+ BCM47XX_GPIO_KEY(6, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_netgear_wnr834bv2[] __initconst = {
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+/* SimpleTech */
+
+static const struct gpio_keys_button
+bcm47xx_buttons_simpletech_simpleshare[] __initconst = {
+ BCM47XX_GPIO_KEY(0, KEY_RESTART),
+};
+
+/**************************************************
+ * Init
+ **************************************************/
+
+static struct gpio_keys_platform_data bcm47xx_button_pdata;
+
+static struct platform_device bcm47xx_buttons_gpio_keys = {
+ .name = "gpio-keys",
+ .dev = {
+ .platform_data = &bcm47xx_button_pdata,
+ }
+};
+
+/* Copy data from __initconst */
+static int __init bcm47xx_buttons_copy(const struct gpio_keys_button *buttons,
+ size_t nbuttons)
+{
+ size_t size = nbuttons * sizeof(*buttons);
+
+ bcm47xx_button_pdata.buttons = kmemdup(buttons, size, GFP_KERNEL);
+ if (!bcm47xx_button_pdata.buttons)
+ return -ENOMEM;
+ bcm47xx_button_pdata.nbuttons = nbuttons;
+
+ return 0;
+}
+
+#define bcm47xx_copy_bdata(dev_buttons) \
+ bcm47xx_buttons_copy(dev_buttons, ARRAY_SIZE(dev_buttons));
+
+int __init bcm47xx_buttons_register(void)
+{
+ enum bcm47xx_board board = bcm47xx_board_get();
+ int err;
+
+ switch (board) {
+ case BCM47XX_BOARD_ASUS_RTN12:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_rtn12);
+ break;
+ case BCM47XX_BOARD_ASUS_RTN16:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_rtn16);
+ break;
+ case BCM47XX_BOARD_ASUS_RTN66U:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_rtn66u);
+ break;
+ case BCM47XX_BOARD_ASUS_WL300G:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl300g);
+ break;
+ case BCM47XX_BOARD_ASUS_WL320GE:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl320ge);
+ break;
+ case BCM47XX_BOARD_ASUS_WL330GE:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl330ge);
+ break;
+ case BCM47XX_BOARD_ASUS_WL500G:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl500g);
+ break;
+ case BCM47XX_BOARD_ASUS_WL500GD:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl500gd);
+ break;
+ case BCM47XX_BOARD_ASUS_WL500GPV1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl500gpv1);
+ break;
+ case BCM47XX_BOARD_ASUS_WL500GPV2:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl500gpv2);
+ break;
+ case BCM47XX_BOARD_ASUS_WL500W:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl500w);
+ break;
+ case BCM47XX_BOARD_ASUS_WL520GC:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl520gc);
+ break;
+ case BCM47XX_BOARD_ASUS_WL520GU:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl520gu);
+ break;
+ case BCM47XX_BOARD_ASUS_WL700GE:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wl700ge);
+ break;
+ case BCM47XX_BOARD_ASUS_WLHDD:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wlhdd);
+ break;
+
+ case BCM47XX_BOARD_BELKIN_F7D3301:
+ case BCM47XX_BOARD_BELKIN_F7D3302:
+ case BCM47XX_BOARD_BELKIN_F7D4301:
+ case BCM47XX_BOARD_BELKIN_F7D4302:
+ case BCM47XX_BOARD_BELKIN_F7D4401:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_belkin_f7d4301);
+ break;
+
+ case BCM47XX_BOARD_BUFFALO_WHR2_A54G54:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_buffalo_whr2_a54g54);
+ break;
+ case BCM47XX_BOARD_BUFFALO_WHR_G125:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_buffalo_whr_g125);
+ break;
+ case BCM47XX_BOARD_BUFFALO_WHR_G54S:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_buffalo_whr_g54s);
+ break;
+ case BCM47XX_BOARD_BUFFALO_WHR_HP_G54:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_buffalo_whr_hp_g54);
+ break;
+ case BCM47XX_BOARD_BUFFALO_WZR_G300N:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_buffalo_wzr_g300n);
+ break;
+ case BCM47XX_BOARD_BUFFALO_WZR_RS_G54:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_buffalo_wzr_rs_g54);
+ break;
+ case BCM47XX_BOARD_BUFFALO_WZR_RS_G54HP:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_buffalo_wzr_rs_g54hp);
+ break;
+
+ case BCM47XX_BOARD_DELL_TM2300:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_dell_tm2300);
+ break;
+
+ case BCM47XX_BOARD_DLINK_DIR130:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_dlink_dir130);
+ break;
+ case BCM47XX_BOARD_DLINK_DIR330:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_dlink_dir330);
+ break;
+
+ case BCM47XX_BOARD_HUAWEI_E970:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_huawei_e970);
+ break;
+
+ case BCM47XX_BOARD_LINKSYS_E1000V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_e1000v1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_E1000V21:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_e1000v21);
+ break;
+ case BCM47XX_BOARD_LINKSYS_E2000V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_e2000v1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_E3000V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_e3000v1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_E3200V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_e3200v1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_E4200V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_e4200v1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT150NV1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt150nv1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT150NV11:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt150nv11);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT160NV1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt160nv1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT160NV3:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt160nv3);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT300N_V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt300n_v1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT300NV11:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt300nv11);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT310NV1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt310nv1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT54G3GV2:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt54g3gv2);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0101:
+ case BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0467:
+ case BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0708:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt54g_generic);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT610NV1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt610nv1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT610NV2:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt610nv2);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRTSL54GS:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrtsl54gs);
+ break;
+
+ case BCM47XX_BOARD_LUXUL_ABR_4400_V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_luxul_abr_4400_v1);
+ break;
+ case BCM47XX_BOARD_LUXUL_XAP_310_V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_luxul_xap_310_v1);
+ break;
+ case BCM47XX_BOARD_LUXUL_XAP_1210_V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_luxul_xap_1210_v1);
+ break;
+ case BCM47XX_BOARD_LUXUL_XAP_1230_V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_luxul_xap_1230_v1);
+ break;
+ case BCM47XX_BOARD_LUXUL_XAP_1240_V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_luxul_xap_1240_v1);
+ break;
+ case BCM47XX_BOARD_LUXUL_XAP_1500_V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_luxul_xap_1500_v1);
+ break;
+ case BCM47XX_BOARD_LUXUL_XBR_4400_V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_luxul_xbr_4400_v1);
+ break;
+ case BCM47XX_BOARD_LUXUL_XVW_P30_V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_luxul_xvw_p30_v1);
+ break;
+ case BCM47XX_BOARD_LUXUL_XWR_600_V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_luxul_xwr_600_v1);
+ break;
+ case BCM47XX_BOARD_LUXUL_XWR_1750_V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_luxul_xwr_1750_v1);
+ break;
+
+ case BCM47XX_BOARD_MICROSOFT_MN700:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_microsoft_nm700);
+ break;
+
+ case BCM47XX_BOARD_MOTOROLA_WE800G:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_motorola_we800g);
+ break;
+ case BCM47XX_BOARD_MOTOROLA_WR850GP:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_motorola_wr850gp);
+ break;
+ case BCM47XX_BOARD_MOTOROLA_WR850GV2V3:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_motorola_wr850gv2v3);
+ break;
+
+ case BCM47XX_BOARD_NETGEAR_R6200_V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_r6200_v1);
+ break;
+ case BCM47XX_BOARD_NETGEAR_WNDR3400V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wndr3400v1);
+ break;
+ case BCM47XX_BOARD_NETGEAR_WNDR3400_V3:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wndr3400_v3);
+ break;
+ case BCM47XX_BOARD_NETGEAR_WNDR3700V3:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wndr3700v3);
+ break;
+ case BCM47XX_BOARD_NETGEAR_WNDR4500V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wndr4500v1);
+ break;
+ case BCM47XX_BOARD_NETGEAR_WNR1000_V3:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wnr1000_v3);
+ break;
+ case BCM47XX_BOARD_NETGEAR_WNR3500L:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wnr3500lv1);
+ break;
+ case BCM47XX_BOARD_NETGEAR_WNR834BV2:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wnr834bv2);
+ break;
+
+ case BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_simpletech_simpleshare);
+ break;
+
+ default:
+ pr_debug("No buttons configuration found for this device\n");
+ return -ENOTSUPP;
+ }
+
+ if (err)
+ return -ENOMEM;
+
+ err = platform_device_register(&bcm47xx_buttons_gpio_keys);
+ if (err) {
+ pr_err("Failed to register platform device: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/arch/mips/bcm47xx/irq.c b/arch/mips/bcm47xx/irq.c
new file mode 100644
index 000000000..21b4497f0
--- /dev/null
+++ b/arch/mips/bcm47xx/irq.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "bcm47xx_private.h"
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/setup.h>
+#include <asm/irq_cpu.h>
+#include <bcm47xx.h>
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ u32 cause;
+
+ cause = read_c0_cause() & read_c0_status() & CAUSEF_IP;
+
+ clear_c0_status(cause);
+
+ if (cause & CAUSEF_IP7)
+ do_IRQ(7);
+ if (cause & CAUSEF_IP2)
+ do_IRQ(2);
+ if (cause & CAUSEF_IP3)
+ do_IRQ(3);
+ if (cause & CAUSEF_IP4)
+ do_IRQ(4);
+ if (cause & CAUSEF_IP5)
+ do_IRQ(5);
+ if (cause & CAUSEF_IP6)
+ do_IRQ(6);
+}
+
+#define DEFINE_HWx_IRQDISPATCH(x) \
+ static void bcm47xx_hw ## x ## _irqdispatch(void) \
+ { \
+ do_IRQ(x); \
+ }
+DEFINE_HWx_IRQDISPATCH(2)
+DEFINE_HWx_IRQDISPATCH(3)
+DEFINE_HWx_IRQDISPATCH(4)
+DEFINE_HWx_IRQDISPATCH(5)
+DEFINE_HWx_IRQDISPATCH(6)
+DEFINE_HWx_IRQDISPATCH(7)
+
+void __init arch_init_irq(void)
+{
+ /*
+ * This is the first arch callback after mm_init (we can use kmalloc),
+ * so let's finish bus initialization now.
+ */
+ bcm47xx_bus_setup();
+
+#ifdef CONFIG_BCM47XX_BCMA
+ if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
+ bcma_write32(bcm47xx_bus.bcma.bus.drv_mips.core,
+ BCMA_MIPS_MIPS74K_INTMASK(5), 1 << 31);
+ /*
+ * the kernel reads the timer irq from some register and thinks
+ * it's #5, but we offset it by 2 and route to #7
+ */
+ cp0_compare_irq = 7;
+ }
+#endif
+ mips_cpu_irq_init();
+
+ if (cpu_has_vint) {
+ pr_info("Setting up vectored interrupts\n");
+ set_vi_handler(2, bcm47xx_hw2_irqdispatch);
+ set_vi_handler(3, bcm47xx_hw3_irqdispatch);
+ set_vi_handler(4, bcm47xx_hw4_irqdispatch);
+ set_vi_handler(5, bcm47xx_hw5_irqdispatch);
+ set_vi_handler(6, bcm47xx_hw6_irqdispatch);
+ set_vi_handler(7, bcm47xx_hw7_irqdispatch);
+ }
+}
diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c
new file mode 100644
index 000000000..167c42c71
--- /dev/null
+++ b/arch/mips/bcm47xx/leds.c
@@ -0,0 +1,793 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcm47xx_private.h"
+
+#include <linux/leds.h>
+#include <bcm47xx_board.h>
+
+/**************************************************
+ * Database
+ **************************************************/
+
+#define BCM47XX_GPIO_LED(_gpio, _color, _function, _active_low, \
+ _default_state) \
+ { \
+ .name = "bcm47xx:" _color ":" _function, \
+ .gpio = _gpio, \
+ .active_low = _active_low, \
+ .default_state = _default_state, \
+ }
+
+#define BCM47XX_GPIO_LED_TRIGGER(_gpio, _color, _function, _active_low, \
+ _default_trigger) \
+ { \
+ .name = "bcm47xx:" _color ":" _function, \
+ .gpio = _gpio, \
+ .active_low = _active_low, \
+ .default_state = LEDS_GPIO_DEFSTATE_OFF, \
+ .default_trigger = _default_trigger, \
+ }
+
+/* Asus */
+
+static const struct gpio_led
+bcm47xx_leds_asus_rtn12[] __initconst = {
+ BCM47XX_GPIO_LED(2, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(7, "unk", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_rtn15u[] __initconst = {
+ /* TODO: Add "wlan" LED */
+ BCM47XX_GPIO_LED(3, "blue", "wan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(4, "blue", "lan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(6, "blue", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(9, "blue", "usb", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_rtn16[] __initconst = {
+ BCM47XX_GPIO_LED(1, "blue", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(7, "blue", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_rtn66u[] __initconst = {
+ BCM47XX_GPIO_LED(12, "blue", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(15, "blue", "usb", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl300g[] __initconst = {
+ BCM47XX_GPIO_LED(0, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl320ge[] __initconst = {
+ BCM47XX_GPIO_LED(0, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(2, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(11, "unk", "link", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl330ge[] __initconst = {
+ BCM47XX_GPIO_LED(0, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl500g[] __initconst = {
+ BCM47XX_GPIO_LED(0, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl500gd[] __initconst = {
+ BCM47XX_GPIO_LED(0, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl500gpv1[] __initconst = {
+ BCM47XX_GPIO_LED(1, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl500gpv2[] __initconst = {
+ BCM47XX_GPIO_LED(0, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(1, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl500w[] __initconst = {
+ BCM47XX_GPIO_LED(5, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl520gc[] __initconst = {
+ BCM47XX_GPIO_LED(0, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(1, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl520gu[] __initconst = {
+ BCM47XX_GPIO_LED(0, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(1, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wl700ge[] __initconst = {
+ BCM47XX_GPIO_LED(1, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON), /* Labeled "READY" (there is no "power" LED). Originally ON, flashing on USB activity. */
+};
+
+static const struct gpio_led
+bcm47xx_leds_asus_wlhdd[] __initconst = {
+ BCM47XX_GPIO_LED(0, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(2, "unk", "usb", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Belkin */
+
+static const struct gpio_led
+bcm47xx_leds_belkin_f7d4301[] __initconst = {
+ BCM47XX_GPIO_LED(10, "green", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(11, "amber", "power", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(12, "unk", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(13, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(14, "unk", "usb0", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(15, "unk", "usb1", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Buffalo */
+
+static const struct gpio_led
+bcm47xx_leds_buffalo_whr2_a54g54[] __initconst = {
+ BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_buffalo_whr_g125[] __initconst = {
+ BCM47XX_GPIO_LED(1, "unk", "bridge", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(2, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(3, "unk", "internal", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(6, "unk", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_buffalo_whr_g54s[] __initconst = {
+ BCM47XX_GPIO_LED(1, "green", "bridge", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(2, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(3, "green", "internal", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(6, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(7, "red", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_buffalo_whr_hp_g54[] __initconst = {
+ BCM47XX_GPIO_LED(1, "unk", "bridge", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(2, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(3, "unk", "internal", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(6, "unk", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_buffalo_wzr_g300n[] __initconst = {
+ BCM47XX_GPIO_LED(1, "unk", "bridge", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(6, "unk", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_buffalo_wzr_rs_g54[] __initconst = {
+ BCM47XX_GPIO_LED(6, "unk", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(1, "unk", "vpn", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_buffalo_wzr_rs_g54hp[] __initconst = {
+ BCM47XX_GPIO_LED(6, "unk", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(1, "unk", "vpn", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Dell */
+
+static const struct gpio_led
+bcm47xx_leds_dell_tm2300[] __initconst = {
+ BCM47XX_GPIO_LED(6, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(7, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+/* D-Link */
+
+static const struct gpio_led
+bcm47xx_leds_dlink_dir130[] __initconst = {
+ BCM47XX_GPIO_LED_TRIGGER(0, "green", "status", 1, "timer"), /* Originally blinking when device is ready, separated from "power" LED */
+ BCM47XX_GPIO_LED(6, "blue", "unk", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_dlink_dir330[] __initconst = {
+ BCM47XX_GPIO_LED_TRIGGER(0, "green", "status", 1, "timer"), /* Originally blinking when device is ready, separated from "power" LED */
+ BCM47XX_GPIO_LED(4, "unk", "usb", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(6, "blue", "unk", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Huawei */
+
+static const struct gpio_led
+bcm47xx_leds_huawei_e970[] __initconst = {
+ BCM47XX_GPIO_LED(0, "unk", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Linksys */
+
+static const struct gpio_led
+bcm47xx_leds_linksys_e1000v1[] __initconst = {
+ BCM47XX_GPIO_LED(0, "blue", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(1, "blue", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(2, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(4, "blue", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_e1000v21[] __initconst = {
+ BCM47XX_GPIO_LED(5, "blue", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(6, "blue", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(7, "amber", "wps", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(8, "blue", "wps", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_e2000v1[] __initconst = {
+ BCM47XX_GPIO_LED(1, "blue", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(2, "blue", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(3, "blue", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(4, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_e3000v1[] __initconst = {
+ BCM47XX_GPIO_LED(0, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(1, "unk", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(3, "blue", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(5, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(7, "unk", "usb", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_e3200v1[] __initconst = {
+ BCM47XX_GPIO_LED(3, "green", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_e4200v1[] __initconst = {
+ BCM47XX_GPIO_LED(5, "white", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt150nv1[] __initconst = {
+ BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(3, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(5, "green", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt150nv11[] __initconst = {
+ BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(3, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(5, "green", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt160nv1[] __initconst = {
+ BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(3, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(5, "blue", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt160nv3[] __initconst = {
+ BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(2, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(4, "blue", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt300n_v1[] __initconst = {
+ BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(3, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(5, "green", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt300nv11[] __initconst = {
+ BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(3, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(5, "green", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt310nv1[] __initconst = {
+ BCM47XX_GPIO_LED(1, "blue", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(3, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(9, "blue", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt54g_generic[] __initconst = {
+ BCM47XX_GPIO_LED(0, "unk", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(5, "white", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(7, "orange", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = {
+ BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(2, "green", "3g", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(3, "blue", "3g", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Verified on: WRT54GS V1.0 */
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = {
+ BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Verified on: WRT54GL V1.1 */
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt54g_type_0467[] __initconst = {
+ BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(2, "white", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(3, "orange", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt610nv1[] __initconst = {
+ BCM47XX_GPIO_LED(0, "unk", "usb", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(3, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(9, "blue", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrt610nv2[] __initconst = {
+ BCM47XX_GPIO_LED(0, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(1, "unk", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(3, "blue", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(5, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(7, "unk", "usb", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_linksys_wrtsl54gs[] __initconst = {
+ BCM47XX_GPIO_LED(0, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(5, "white", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(7, "orange", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Luxul */
+
+static const struct gpio_led
+bcm47xx_leds_luxul_abr_4400_v1[] __initconst = {
+ BCM47XX_GPIO_LED(12, "green", "usb", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED_TRIGGER(15, "green", "status", 0, "timer"),
+};
+
+static const struct gpio_led
+bcm47xx_leds_luxul_xap_310_v1[] __initconst = {
+ BCM47XX_GPIO_LED_TRIGGER(6, "green", "status", 1, "timer"),
+};
+
+static const struct gpio_led
+bcm47xx_leds_luxul_xap_1210_v1[] __initconst = {
+ BCM47XX_GPIO_LED_TRIGGER(6, "green", "status", 1, "timer"),
+};
+
+static const struct gpio_led
+bcm47xx_leds_luxul_xap_1230_v1[] __initconst = {
+ BCM47XX_GPIO_LED(3, "blue", "2ghz", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(4, "green", "bridge", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED_TRIGGER(6, "green", "status", 1, "timer"),
+};
+
+static const struct gpio_led
+bcm47xx_leds_luxul_xap_1240_v1[] __initconst = {
+ BCM47XX_GPIO_LED(3, "blue", "2ghz", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(4, "green", "bridge", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED_TRIGGER(6, "green", "status", 1, "timer"),
+};
+
+static const struct gpio_led
+bcm47xx_leds_luxul_xap_1500_v1[] __initconst = {
+ BCM47XX_GPIO_LED_TRIGGER(13, "green", "status", 1, "timer"),
+};
+
+static const struct gpio_led
+bcm47xx_leds_luxul_xap1500_v1_extra[] __initconst = {
+ BCM47XX_GPIO_LED(44, "green", "5ghz", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(76, "green", "2ghz", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_luxul_xbr_4400_v1[] __initconst = {
+ BCM47XX_GPIO_LED(12, "green", "usb", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED_TRIGGER(15, "green", "status", 0, "timer"),
+};
+
+static const struct gpio_led
+bcm47xx_leds_luxul_xvw_p30_v1[] __initconst = {
+ BCM47XX_GPIO_LED_TRIGGER(0, "blue", "status", 1, "timer"),
+ BCM47XX_GPIO_LED(1, "green", "link", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_luxul_xwr_600_v1[] __initconst = {
+ BCM47XX_GPIO_LED(3, "green", "wps", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED_TRIGGER(6, "green", "status", 1, "timer"),
+ BCM47XX_GPIO_LED(9, "green", "usb", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_luxul_xwr_1750_v1[] __initconst = {
+ BCM47XX_GPIO_LED(5, "green", "5ghz", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(12, "green", "usb", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED_TRIGGER(13, "green", "status", 0, "timer"),
+ BCM47XX_GPIO_LED(15, "green", "wps", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_luxul_xwr1750_v1_extra[] __initconst = {
+ BCM47XX_GPIO_LED(76, "green", "2ghz", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Microsoft */
+
+static const struct gpio_led
+bcm47xx_leds_microsoft_nm700[] __initconst = {
+ BCM47XX_GPIO_LED(6, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+};
+
+/* Motorola */
+
+static const struct gpio_led
+bcm47xx_leds_motorola_we800g[] __initconst = {
+ BCM47XX_GPIO_LED(1, "amber", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(2, "unk", "unk", 1, LEDS_GPIO_DEFSTATE_OFF), /* There are only 3 LEDs: Power, Wireless and Device (ethernet) */
+ BCM47XX_GPIO_LED(4, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+};
+
+static const struct gpio_led
+bcm47xx_leds_motorola_wr850gp[] __initconst = {
+ BCM47XX_GPIO_LED(0, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(6, "unk", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_motorola_wr850gv2v3[] __initconst = {
+ BCM47XX_GPIO_LED(0, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Netgear */
+
+static const struct gpio_led
+bcm47xx_leds_netgear_wndr3400v1[] __initconst = {
+ BCM47XX_GPIO_LED(2, "green", "usb", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(3, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(7, "amber", "power", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_netgear_wndr4500v1[] __initconst = {
+ BCM47XX_GPIO_LED(1, "green", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(2, "green", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(3, "amber", "power", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(8, "green", "usb1", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(9, "green", "2ghz", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(11, "blue", "5ghz", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(14, "green", "usb2", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_netgear_wnr1000_v3[] __initconst = {
+ BCM47XX_GPIO_LED(0, "blue", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(1, "green", "wps", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_netgear_wnr3500lv1[] __initconst = {
+ BCM47XX_GPIO_LED(0, "blue", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(1, "green", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(2, "green", "wan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(3, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(7, "amber", "power", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
+bcm47xx_leds_netgear_wnr834bv2[] __initconst = {
+ BCM47XX_GPIO_LED(2, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(3, "amber", "power", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(7, "unk", "connected", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+/* Siemens */
+static const struct gpio_led
+bcm47xx_leds_siemens_se505v2[] __initconst = {
+ BCM47XX_GPIO_LED(0, "unk", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(3, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(5, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+};
+
+/* SimpleTech */
+
+static const struct gpio_led
+bcm47xx_leds_simpletech_simpleshare[] __initconst = {
+ BCM47XX_GPIO_LED(1, "unk", "status", 1, LEDS_GPIO_DEFSTATE_OFF), /* "Ready" LED */
+};
+
+/**************************************************
+ * Init
+ **************************************************/
+
+static struct gpio_led_platform_data bcm47xx_leds_pdata __initdata;
+
+#define bcm47xx_set_pdata(dev_leds) do { \
+ bcm47xx_leds_pdata.leds = dev_leds; \
+ bcm47xx_leds_pdata.num_leds = ARRAY_SIZE(dev_leds); \
+} while (0)
+
+static struct gpio_led_platform_data bcm47xx_leds_pdata_extra __initdata = {};
+#define bcm47xx_set_pdata_extra(dev_leds) do { \
+ bcm47xx_leds_pdata_extra.leds = dev_leds; \
+ bcm47xx_leds_pdata_extra.num_leds = ARRAY_SIZE(dev_leds); \
+} while (0)
+
+void __init bcm47xx_leds_register(void)
+{
+ enum bcm47xx_board board = bcm47xx_board_get();
+
+ switch (board) {
+ case BCM47XX_BOARD_ASUS_RTN12:
+ bcm47xx_set_pdata(bcm47xx_leds_asus_rtn12);
+ break;
+ case BCM47XX_BOARD_ASUS_RTN15U:
+ bcm47xx_set_pdata(bcm47xx_leds_asus_rtn15u);
+ break;
+ case BCM47XX_BOARD_ASUS_RTN16:
+ bcm47xx_set_pdata(bcm47xx_leds_asus_rtn16);
+ break;
+ case BCM47XX_BOARD_ASUS_RTN66U:
+ bcm47xx_set_pdata(bcm47xx_leds_asus_rtn66u);
+ break;
+ case BCM47XX_BOARD_ASUS_WL300G:
+ bcm47xx_set_pdata(bcm47xx_leds_asus_wl300g);
+ break;
+ case BCM47XX_BOARD_ASUS_WL320GE:
+ bcm47xx_set_pdata(bcm47xx_leds_asus_wl320ge);
+ break;
+ case BCM47XX_BOARD_ASUS_WL330GE:
+ bcm47xx_set_pdata(bcm47xx_leds_asus_wl330ge);
+ break;
+ case BCM47XX_BOARD_ASUS_WL500G:
+ bcm47xx_set_pdata(bcm47xx_leds_asus_wl500g);
+ break;
+ case BCM47XX_BOARD_ASUS_WL500GD:
+ bcm47xx_set_pdata(bcm47xx_leds_asus_wl500gd);
+ break;
+ case BCM47XX_BOARD_ASUS_WL500GPV1:
+ bcm47xx_set_pdata(bcm47xx_leds_asus_wl500gpv1);
+ break;
+ case BCM47XX_BOARD_ASUS_WL500GPV2:
+ bcm47xx_set_pdata(bcm47xx_leds_asus_wl500gpv2);
+ break;
+ case BCM47XX_BOARD_ASUS_WL500W:
+ bcm47xx_set_pdata(bcm47xx_leds_asus_wl500w);
+ break;
+ case BCM47XX_BOARD_ASUS_WL520GC:
+ bcm47xx_set_pdata(bcm47xx_leds_asus_wl520gc);
+ break;
+ case BCM47XX_BOARD_ASUS_WL520GU:
+ bcm47xx_set_pdata(bcm47xx_leds_asus_wl520gu);
+ break;
+ case BCM47XX_BOARD_ASUS_WL700GE:
+ bcm47xx_set_pdata(bcm47xx_leds_asus_wl700ge);
+ break;
+ case BCM47XX_BOARD_ASUS_WLHDD:
+ bcm47xx_set_pdata(bcm47xx_leds_asus_wlhdd);
+ break;
+
+ case BCM47XX_BOARD_BELKIN_F7D3301:
+ case BCM47XX_BOARD_BELKIN_F7D3302:
+ case BCM47XX_BOARD_BELKIN_F7D4301:
+ case BCM47XX_BOARD_BELKIN_F7D4302:
+ case BCM47XX_BOARD_BELKIN_F7D4401:
+ bcm47xx_set_pdata(bcm47xx_leds_belkin_f7d4301);
+ break;
+
+ case BCM47XX_BOARD_BUFFALO_WHR2_A54G54:
+ bcm47xx_set_pdata(bcm47xx_leds_buffalo_whr2_a54g54);
+ break;
+ case BCM47XX_BOARD_BUFFALO_WHR_G125:
+ bcm47xx_set_pdata(bcm47xx_leds_buffalo_whr_g125);
+ break;
+ case BCM47XX_BOARD_BUFFALO_WHR_G54S:
+ bcm47xx_set_pdata(bcm47xx_leds_buffalo_whr_g54s);
+ break;
+ case BCM47XX_BOARD_BUFFALO_WHR_HP_G54:
+ bcm47xx_set_pdata(bcm47xx_leds_buffalo_whr_hp_g54);
+ break;
+ case BCM47XX_BOARD_BUFFALO_WZR_G300N:
+ bcm47xx_set_pdata(bcm47xx_leds_buffalo_wzr_g300n);
+ break;
+ case BCM47XX_BOARD_BUFFALO_WZR_RS_G54:
+ bcm47xx_set_pdata(bcm47xx_leds_buffalo_wzr_rs_g54);
+ break;
+ case BCM47XX_BOARD_BUFFALO_WZR_RS_G54HP:
+ bcm47xx_set_pdata(bcm47xx_leds_buffalo_wzr_rs_g54hp);
+ break;
+
+ case BCM47XX_BOARD_DELL_TM2300:
+ bcm47xx_set_pdata(bcm47xx_leds_dell_tm2300);
+ break;
+
+ case BCM47XX_BOARD_DLINK_DIR130:
+ bcm47xx_set_pdata(bcm47xx_leds_dlink_dir130);
+ break;
+ case BCM47XX_BOARD_DLINK_DIR330:
+ bcm47xx_set_pdata(bcm47xx_leds_dlink_dir330);
+ break;
+
+ case BCM47XX_BOARD_HUAWEI_E970:
+ bcm47xx_set_pdata(bcm47xx_leds_huawei_e970);
+ break;
+
+ case BCM47XX_BOARD_LINKSYS_E1000V1:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_e1000v1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_E1000V21:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_e1000v21);
+ break;
+ case BCM47XX_BOARD_LINKSYS_E2000V1:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_e2000v1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_E3000V1:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_e3000v1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_E3200V1:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_e3200v1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_E4200V1:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_e4200v1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT150NV1:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt150nv1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT150NV11:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt150nv11);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT160NV1:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt160nv1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT160NV3:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt160nv3);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT300N_V1:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt300n_v1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT300NV11:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt300nv11);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT310NV1:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt310nv1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT54G3GV2:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt54g3gv2);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0101:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt54g_type_0101);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0467:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt54g_type_0467);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0708:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt54g_generic);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT610NV1:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt610nv1);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT610NV2:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt610nv2);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRTSL54GS:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_wrtsl54gs);
+ break;
+
+ case BCM47XX_BOARD_LUXUL_ABR_4400_V1:
+ bcm47xx_set_pdata(bcm47xx_leds_luxul_abr_4400_v1);
+ break;
+ case BCM47XX_BOARD_LUXUL_XAP_310_V1:
+ bcm47xx_set_pdata(bcm47xx_leds_luxul_xap_310_v1);
+ break;
+ case BCM47XX_BOARD_LUXUL_XAP_1210_V1:
+ bcm47xx_set_pdata(bcm47xx_leds_luxul_xap_1210_v1);
+ break;
+ case BCM47XX_BOARD_LUXUL_XAP_1230_V1:
+ bcm47xx_set_pdata(bcm47xx_leds_luxul_xap_1230_v1);
+ break;
+ case BCM47XX_BOARD_LUXUL_XAP_1240_V1:
+ bcm47xx_set_pdata(bcm47xx_leds_luxul_xap_1240_v1);
+ break;
+ case BCM47XX_BOARD_LUXUL_XAP_1500_V1:
+ bcm47xx_set_pdata(bcm47xx_leds_luxul_xap_1500_v1);
+ bcm47xx_set_pdata_extra(bcm47xx_leds_luxul_xap1500_v1_extra);
+ break;
+ case BCM47XX_BOARD_LUXUL_XBR_4400_V1:
+ bcm47xx_set_pdata(bcm47xx_leds_luxul_xbr_4400_v1);
+ break;
+ case BCM47XX_BOARD_LUXUL_XVW_P30_V1:
+ bcm47xx_set_pdata(bcm47xx_leds_luxul_xvw_p30_v1);
+ break;
+ case BCM47XX_BOARD_LUXUL_XWR_600_V1:
+ bcm47xx_set_pdata(bcm47xx_leds_luxul_xwr_600_v1);
+ break;
+ case BCM47XX_BOARD_LUXUL_XWR_1750_V1:
+ bcm47xx_set_pdata(bcm47xx_leds_luxul_xwr_1750_v1);
+ bcm47xx_set_pdata_extra(bcm47xx_leds_luxul_xwr1750_v1_extra);
+ break;
+
+ case BCM47XX_BOARD_MICROSOFT_MN700:
+ bcm47xx_set_pdata(bcm47xx_leds_microsoft_nm700);
+ break;
+
+ case BCM47XX_BOARD_MOTOROLA_WE800G:
+ bcm47xx_set_pdata(bcm47xx_leds_motorola_we800g);
+ break;
+ case BCM47XX_BOARD_MOTOROLA_WR850GP:
+ bcm47xx_set_pdata(bcm47xx_leds_motorola_wr850gp);
+ break;
+ case BCM47XX_BOARD_MOTOROLA_WR850GV2V3:
+ bcm47xx_set_pdata(bcm47xx_leds_motorola_wr850gv2v3);
+ break;
+
+ case BCM47XX_BOARD_NETGEAR_WNDR3400V1:
+ bcm47xx_set_pdata(bcm47xx_leds_netgear_wndr3400v1);
+ break;
+ case BCM47XX_BOARD_NETGEAR_WNDR4500V1:
+ bcm47xx_set_pdata(bcm47xx_leds_netgear_wndr4500v1);
+ break;
+ case BCM47XX_BOARD_NETGEAR_WNR1000_V3:
+ bcm47xx_set_pdata(bcm47xx_leds_netgear_wnr1000_v3);
+ break;
+ case BCM47XX_BOARD_NETGEAR_WNR3500L:
+ bcm47xx_set_pdata(bcm47xx_leds_netgear_wnr3500lv1);
+ break;
+ case BCM47XX_BOARD_NETGEAR_WNR834BV2:
+ bcm47xx_set_pdata(bcm47xx_leds_netgear_wnr834bv2);
+ break;
+
+ case BCM47XX_BOARD_SIEMENS_SE505V2:
+ bcm47xx_set_pdata(bcm47xx_leds_siemens_se505v2);
+ break;
+
+ case BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE:
+ bcm47xx_set_pdata(bcm47xx_leds_simpletech_simpleshare);
+ break;
+
+ default:
+ pr_debug("No LEDs configuration found for this device\n");
+ return;
+ }
+
+ gpio_led_register_device(-1, &bcm47xx_leds_pdata);
+ if (bcm47xx_leds_pdata_extra.num_leds)
+ gpio_led_register_device(0, &bcm47xx_leds_pdata_extra);
+}
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
new file mode 100644
index 000000000..22509b5fa
--- /dev/null
+++ b/arch/mips/bcm47xx/prom.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org>
+ * Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net>
+ * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/spinlock.h>
+#include <linux/ssb/ssb_driver_chipcommon.h>
+#include <linux/ssb/ssb_regs.h>
+#include <linux/smp.h>
+#include <asm/bootinfo.h>
+#include <bcm47xx.h>
+#include <bcm47xx_board.h>
+
+static char bcm47xx_system_type[20] = "Broadcom BCM47XX";
+
+const char *get_system_type(void)
+{
+ return bcm47xx_system_type;
+}
+
+__init void bcm47xx_set_system_type(u16 chip_id)
+{
+ snprintf(bcm47xx_system_type, sizeof(bcm47xx_system_type),
+ (chip_id > 0x9999) ? "Broadcom BCM%d" :
+ "Broadcom BCM%04X",
+ chip_id);
+}
+
+static unsigned long lowmem __initdata;
+
+static __init void prom_init_mem(void)
+{
+ unsigned long mem;
+ unsigned long max;
+ unsigned long off;
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ /* Figure out memory size by finding aliases.
+ *
+ * We should theoretically use the mapping from CFE using cfe_enummem().
+ * However as the BCM47XX is mostly used on low-memory systems, we
+ * want to reuse the memory used by CFE (around 4MB). That means cfe_*
+ * functions stop to work at some point during the boot, we should only
+ * call them at the beginning of the boot.
+ *
+ * BCM47XX uses 128MB for addressing the ram, if the system contains
+ * less that that amount of ram it remaps the ram more often into the
+ * available space.
+ */
+
+ /* Physical address, without mapping to any kernel segment */
+ off = CPHYSADDR((unsigned long)prom_init);
+
+ /* Accessing memory after 128 MiB will cause an exception */
+ max = 128 << 20;
+
+ for (mem = 1 << 20; mem < max; mem += 1 << 20) {
+ /* Loop condition may be not enough, off may be over 1 MiB */
+ if (off + mem >= max) {
+ mem = max;
+ pr_debug("Assume 128MB RAM\n");
+ break;
+ }
+ if (!memcmp((void *)prom_init, (void *)prom_init + mem, 32))
+ break;
+ }
+ lowmem = mem;
+
+ /* Ignoring the last page when ddr size is 128M. Cached
+ * accesses to last page is causing the processor to prefetch
+ * using address above 128M stepping out of the ddr address
+ * space.
+ */
+ if (c->cputype == CPU_74K && (mem == (128 << 20)))
+ mem -= 0x1000;
+ memblock_add(0, mem);
+}
+
+/*
+ * This is the first serial on the chip common core, it is at this position
+ * for sb (ssb) and ai (bcma) bus.
+ */
+#define BCM47XX_SERIAL_ADDR (SSB_ENUM_BASE + SSB_CHIPCO_UART0_DATA)
+
+void __init prom_init(void)
+{
+ prom_init_mem();
+ setup_8250_early_printk_port(CKSEG1ADDR(BCM47XX_SERIAL_ADDR), 0, 0);
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
+
+#if defined(CONFIG_BCM47XX_BCMA) && defined(CONFIG_HIGHMEM)
+
+#define EXTVBASE 0xc0000000
+#define ENTRYLO(x) ((pte_val(pfn_pte((x) >> _PFN_SHIFT, PAGE_KERNEL_UNCACHED)) >> 6) | 1)
+
+#include <asm/tlbflush.h>
+
+/* Stripped version of tlb_init, with the call to build_tlb_refill_handler
+ * dropped. Calling it at this stage causes a hang.
+ */
+void early_tlb_init(void)
+{
+ write_c0_pagemask(PM_DEFAULT_MASK);
+ write_c0_wired(0);
+ temp_tlb_entry = current_cpu_data.tlbsize - 1;
+ local_flush_tlb_all();
+}
+
+void __init bcm47xx_prom_highmem_init(void)
+{
+ unsigned long off = (unsigned long)prom_init;
+ unsigned long extmem = 0;
+ bool highmem_region = false;
+
+ if (WARN_ON(bcm47xx_bus_type != BCM47XX_BUS_TYPE_BCMA))
+ return;
+
+ if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
+ highmem_region = true;
+
+ if (lowmem != 128 << 20 || !highmem_region)
+ return;
+
+ early_tlb_init();
+
+ /* Add one temporary TLB entry to map SDRAM Region 2.
+ * Physical Virtual
+ * 0x80000000 0xc0000000 (1st: 256MB)
+ * 0x90000000 0xd0000000 (2nd: 256MB)
+ */
+ add_temporary_entry(ENTRYLO(0x80000000),
+ ENTRYLO(0x80000000 + (256 << 20)),
+ EXTVBASE, PM_256M);
+
+ off = EXTVBASE + __pa(off);
+ for (extmem = 128 << 20; extmem < 512 << 20; extmem <<= 1) {
+ if (!memcmp((void *)prom_init, (void *)(off + extmem), 16))
+ break;
+ }
+ extmem -= lowmem;
+
+ early_tlb_init();
+
+ if (!extmem)
+ return;
+
+ pr_warn("Found %lu MiB of extra memory, but highmem is unsupported yet!\n",
+ extmem >> 20);
+
+ /* TODO: Register extra memory */
+}
+
+#endif /* defined(CONFIG_BCM47XX_BCMA) && defined(CONFIG_HIGHMEM) */
diff --git a/arch/mips/bcm47xx/serial.c b/arch/mips/bcm47xx/serial.c
new file mode 100644
index 000000000..e3c9872a4
--- /dev/null
+++ b/arch/mips/bcm47xx/serial.c
@@ -0,0 +1,93 @@
+/*
+ * 8250 UART probe driver for the BCM47XX platforms
+ * Author: Aurelien Jarno
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net>
+ */
+
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+#include <linux/ssb/ssb.h>
+#include <bcm47xx.h>
+
+static struct plat_serial8250_port uart8250_data[5];
+
+static struct platform_device uart8250_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = uart8250_data,
+ },
+};
+
+#ifdef CONFIG_BCM47XX_SSB
+static int __init uart8250_init_ssb(void)
+{
+ int i;
+ struct ssb_mipscore *mcore = &(bcm47xx_bus.ssb.mipscore);
+
+ memset(&uart8250_data, 0, sizeof(uart8250_data));
+
+ for (i = 0; i < mcore->nr_serial_ports &&
+ i < ARRAY_SIZE(uart8250_data) - 1; i++) {
+ struct plat_serial8250_port *p = &(uart8250_data[i]);
+ struct ssb_serial_port *ssb_port = &(mcore->serial_ports[i]);
+
+ p->mapbase = (unsigned int)ssb_port->regs;
+ p->membase = (void *)ssb_port->regs;
+ p->irq = ssb_port->irq + 2;
+ p->uartclk = ssb_port->baud_base;
+ p->regshift = ssb_port->reg_shift;
+ p->iotype = UPIO_MEM;
+ p->flags = UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
+ }
+ return platform_device_register(&uart8250_device);
+}
+#endif
+
+#ifdef CONFIG_BCM47XX_BCMA
+static int __init uart8250_init_bcma(void)
+{
+ int i;
+ struct bcma_drv_cc *cc = &(bcm47xx_bus.bcma.bus.drv_cc);
+
+ memset(&uart8250_data, 0, sizeof(uart8250_data));
+
+ for (i = 0; i < cc->nr_serial_ports &&
+ i < ARRAY_SIZE(uart8250_data) - 1; i++) {
+ struct plat_serial8250_port *p = &(uart8250_data[i]);
+ struct bcma_serial_port *bcma_port;
+ bcma_port = &(cc->serial_ports[i]);
+
+ p->mapbase = (unsigned int)bcma_port->regs;
+ p->membase = (void *)bcma_port->regs;
+ p->irq = bcma_port->irq;
+ p->uartclk = bcma_port->baud_base;
+ p->regshift = bcma_port->reg_shift;
+ p->iotype = UPIO_MEM;
+ p->flags = UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
+ }
+ return platform_device_register(&uart8250_device);
+}
+#endif
+
+static int __init uart8250_init(void)
+{
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ return uart8250_init_ssb();
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ return uart8250_init_bcma();
+#endif
+ }
+ return -EINVAL;
+}
+device_initcall(uart8250_init);
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
new file mode 100644
index 000000000..94bf83957
--- /dev/null
+++ b/arch/mips/bcm47xx/setup.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org>
+ * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2006 Michael Buesch <m@bues.ch>
+ * Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org>
+ * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "bcm47xx_private.h"
+
+#include <linux/bcm47xx_sprom.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/ssb/ssb.h>
+#include <linux/ssb/ssb_embedded.h>
+#include <linux/bcma/bcma_soc.h>
+#include <asm/bootinfo.h>
+#include <asm/idle.h>
+#include <asm/prom.h>
+#include <asm/reboot.h>
+#include <asm/time.h>
+#include <bcm47xx.h>
+#include <bcm47xx_board.h>
+
+union bcm47xx_bus bcm47xx_bus;
+EXPORT_SYMBOL(bcm47xx_bus);
+
+enum bcm47xx_bus_type bcm47xx_bus_type;
+EXPORT_SYMBOL(bcm47xx_bus_type);
+
+static void bcm47xx_machine_restart(char *command)
+{
+ pr_alert("Please stand by while rebooting the system...\n");
+ local_irq_disable();
+ /* Set the watchdog timer to reset immediately */
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ if (bcm47xx_bus.ssb.chip_id == 0x4785)
+ write_c0_diag4(1 << 22);
+ ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 1);
+ if (bcm47xx_bus.ssb.chip_id == 0x4785) {
+ __asm__ __volatile__(
+ ".set\tmips3\n\t"
+ "sync\n\t"
+ "wait\n\t"
+ ".set\tmips0");
+ }
+ break;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc, 1);
+ break;
+#endif
+ }
+ while (1)
+ cpu_relax();
+}
+
+static void bcm47xx_machine_halt(void)
+{
+ /* Disable interrupts and watchdog and spin forever */
+ local_irq_disable();
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 0);
+ break;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc, 0);
+ break;
+#endif
+ }
+ while (1)
+ cpu_relax();
+}
+
+#ifdef CONFIG_BCM47XX_SSB
+static void __init bcm47xx_register_ssb(void)
+{
+ int err;
+ char buf[100];
+ struct ssb_mipscore *mcore;
+
+ err = ssb_bus_host_soc_register(&bcm47xx_bus.ssb, SSB_ENUM_BASE);
+ if (err)
+ panic("Failed to initialize SSB bus (err %d)", err);
+
+ mcore = &bcm47xx_bus.ssb.mipscore;
+ if (bcm47xx_nvram_getenv("kernel_args", buf, sizeof(buf)) >= 0) {
+ if (strstr(buf, "console=ttyS1")) {
+ struct ssb_serial_port port;
+
+ pr_debug("Swapping serial ports!\n");
+ /* swap serial ports */
+ memcpy(&port, &mcore->serial_ports[0], sizeof(port));
+ memcpy(&mcore->serial_ports[0], &mcore->serial_ports[1],
+ sizeof(port));
+ memcpy(&mcore->serial_ports[1], &port, sizeof(port));
+ }
+ }
+}
+#endif
+
+#ifdef CONFIG_BCM47XX_BCMA
+static void __init bcm47xx_register_bcma(void)
+{
+ int err;
+
+ err = bcma_host_soc_register(&bcm47xx_bus.bcma);
+ if (err)
+ panic("Failed to register BCMA bus (err %d)", err);
+}
+#endif
+
+/*
+ * Memory setup is done in the early part of MIPS's arch_mem_init. It's supposed
+ * to detect memory and record it with memblock_add.
+ * Any extra initializaion performed here must not use kmalloc or bootmem.
+ */
+void __init plat_mem_setup(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ if (c->cputype == CPU_74K) {
+ pr_info("Using bcma bus\n");
+#ifdef CONFIG_BCM47XX_BCMA
+ bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA;
+ bcm47xx_register_bcma();
+ bcm47xx_set_system_type(bcm47xx_bus.bcma.bus.chipinfo.id);
+#ifdef CONFIG_HIGHMEM
+ bcm47xx_prom_highmem_init();
+#endif
+#endif
+ } else {
+ pr_info("Using ssb bus\n");
+#ifdef CONFIG_BCM47XX_SSB
+ bcm47xx_bus_type = BCM47XX_BUS_TYPE_SSB;
+ bcm47xx_sprom_register_fallbacks();
+ bcm47xx_register_ssb();
+ bcm47xx_set_system_type(bcm47xx_bus.ssb.chip_id);
+#endif
+ }
+
+ _machine_restart = bcm47xx_machine_restart;
+ _machine_halt = bcm47xx_machine_halt;
+ pm_power_off = bcm47xx_machine_halt;
+}
+
+#ifdef CONFIG_BCM47XX_BCMA
+static struct device * __init bcm47xx_setup_device(void)
+{
+ struct device *dev;
+ int err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ err = dev_set_name(dev, "bcm47xx_soc");
+ if (err) {
+ pr_err("Failed to set SoC device name: %d\n", err);
+ kfree(dev);
+ return NULL;
+ }
+
+ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (err)
+ pr_err("Failed to set SoC DMA mask: %d\n", err);
+
+ return dev;
+}
+#endif
+
+/*
+ * This finishes bus initialization doing things that were not possible without
+ * kmalloc. Make sure to call it late enough (after mm_init).
+ */
+void __init bcm47xx_bus_setup(void)
+{
+#ifdef CONFIG_BCM47XX_BCMA
+ if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
+ int err;
+
+ bcm47xx_bus.bcma.dev = bcm47xx_setup_device();
+ if (!bcm47xx_bus.bcma.dev)
+ panic("Failed to setup SoC device\n");
+
+ err = bcma_host_soc_init(&bcm47xx_bus.bcma);
+ if (err)
+ panic("Failed to initialize BCMA bus (err %d)", err);
+ }
+#endif
+
+ /* With bus initialized we can access NVRAM and detect the board */
+ bcm47xx_board_detect();
+ mips_set_machine_name(bcm47xx_board_get_name());
+}
+
+static int __init bcm47xx_cpu_fixes(void)
+{
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ /* Nothing to do */
+ break;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ /* The BCM4706 has a problem with the CPU wait instruction.
+ * When r4k_wait or r4k_wait_irqoff is used will just hang and
+ * not return from a msleep(). Removing the cpu_wait
+ * functionality is a workaround for this problem. The BCM4716
+ * does not have this problem.
+ */
+ if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
+ cpu_wait = NULL;
+ break;
+#endif
+ }
+ return 0;
+}
+arch_initcall(bcm47xx_cpu_fixes);
+
+static struct fixed_phy_status bcm47xx_fixed_phy_status __initdata = {
+ .link = 1,
+ .speed = SPEED_100,
+ .duplex = DUPLEX_FULL,
+};
+
+static int __init bcm47xx_register_bus_complete(void)
+{
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ /* Nothing to do */
+ break;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ if (device_register(bcm47xx_bus.bcma.dev))
+ pr_err("Failed to register SoC device\n");
+ bcma_bus_register(&bcm47xx_bus.bcma.bus);
+ break;
+#endif
+ }
+ bcm47xx_buttons_register();
+ bcm47xx_leds_register();
+ bcm47xx_workarounds();
+
+ fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status);
+ return 0;
+}
+device_initcall(bcm47xx_register_bus_complete);
diff --git a/arch/mips/bcm47xx/time.c b/arch/mips/bcm47xx/time.c
new file mode 100644
index 000000000..74224cf2e
--- /dev/null
+++ b/arch/mips/bcm47xx/time.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/ssb/ssb.h>
+#include <asm/time.h>
+#include <bcm47xx.h>
+#include <bcm47xx_board.h>
+
+void __init plat_time_init(void)
+{
+ unsigned long hz = 0;
+ u16 chip_id = 0;
+ char buf[10];
+ int len;
+ enum bcm47xx_board board = bcm47xx_board_get();
+
+ /*
+ * Use deterministic values for initial counter interrupt
+ * so that calibrate delay avoids encountering a counter wrap.
+ */
+ write_c0_count(0);
+ write_c0_compare(0xffff);
+
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ hz = ssb_cpu_clock(&bcm47xx_bus.ssb.mipscore) / 2;
+ chip_id = bcm47xx_bus.ssb.chip_id;
+ break;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ hz = bcma_cpu_clock(&bcm47xx_bus.bcma.bus.drv_mips) / 2;
+ chip_id = bcm47xx_bus.bcma.bus.chipinfo.id;
+ break;
+#endif
+ }
+
+ if (chip_id == 0x5354) {
+ len = bcm47xx_nvram_getenv("clkfreq", buf, sizeof(buf));
+ if (len >= 0 && !strncmp(buf, "200", 4))
+ hz = 100000000;
+ }
+
+ switch (board) {
+ case BCM47XX_BOARD_ASUS_WL520GC:
+ case BCM47XX_BOARD_ASUS_WL520GU:
+ hz = 100000000;
+ break;
+ default:
+ break;
+ }
+
+ if (!hz)
+ hz = 100000000;
+
+ /* Set MIPS counter frequency for fixed_rate_gettimeoffset() */
+ mips_hpt_frequency = hz;
+}
diff --git a/arch/mips/bcm47xx/workarounds.c b/arch/mips/bcm47xx/workarounds.c
new file mode 100644
index 000000000..0ab95dd43
--- /dev/null
+++ b/arch/mips/bcm47xx/workarounds.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcm47xx_private.h"
+
+#include <linux/gpio.h>
+#include <bcm47xx_board.h>
+#include <bcm47xx.h>
+
+static void __init bcm47xx_workarounds_enable_usb_power(int usb_power)
+{
+ int err;
+
+ err = gpio_request_one(usb_power, GPIOF_OUT_INIT_HIGH, "usb_power");
+ if (err)
+ pr_err("Failed to request USB power gpio: %d\n", err);
+ else
+ gpio_free(usb_power);
+}
+
+void __init bcm47xx_workarounds(void)
+{
+ enum bcm47xx_board board = bcm47xx_board_get();
+
+ switch (board) {
+ case BCM47XX_BOARD_NETGEAR_WNR3500L:
+ bcm47xx_workarounds_enable_usb_power(12);
+ break;
+ case BCM47XX_BOARD_NETGEAR_WNDR3400V2:
+ case BCM47XX_BOARD_NETGEAR_WNDR3400_V3:
+ bcm47xx_workarounds_enable_usb_power(21);
+ break;
+ default:
+ /* No workaround(s) needed */
+ break;
+ }
+}
diff --git a/arch/mips/bcm63xx/Kconfig b/arch/mips/bcm63xx/Kconfig
new file mode 100644
index 000000000..837f6e5a2
--- /dev/null
+++ b/arch/mips/bcm63xx/Kconfig
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0
+menu "CPU support"
+ depends on BCM63XX
+
+config BCM63XX_CPU_3368
+ bool "support 3368 CPU"
+ select SYS_HAS_CPU_BMIPS4350
+ select HAVE_PCI
+
+config BCM63XX_CPU_6328
+ bool "support 6328 CPU"
+ select SYS_HAS_CPU_BMIPS4350
+ select HAVE_PCI
+
+config BCM63XX_CPU_6338
+ bool "support 6338 CPU"
+ select SYS_HAS_CPU_BMIPS32_3300
+ select HAVE_PCI
+
+config BCM63XX_CPU_6345
+ bool "support 6345 CPU"
+ select SYS_HAS_CPU_BMIPS32_3300
+
+config BCM63XX_CPU_6348
+ bool "support 6348 CPU"
+ select SYS_HAS_CPU_BMIPS32_3300
+ select HAVE_PCI
+
+config BCM63XX_CPU_6358
+ bool "support 6358 CPU"
+ select SYS_HAS_CPU_BMIPS4350
+ select HAVE_PCI
+
+config BCM63XX_CPU_6362
+ bool "support 6362 CPU"
+ select SYS_HAS_CPU_BMIPS4350
+ select HAVE_PCI
+
+config BCM63XX_CPU_6368
+ bool "support 6368 CPU"
+ select SYS_HAS_CPU_BMIPS4350
+ select HAVE_PCI
+endmenu
+
+source "arch/mips/bcm63xx/boards/Kconfig"
diff --git a/arch/mips/bcm63xx/Makefile b/arch/mips/bcm63xx/Makefile
new file mode 100644
index 000000000..d89651e53
--- /dev/null
+++ b/arch/mips/bcm63xx/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += clk.o cpu.o cs.o gpio.o irq.o nvram.o prom.o reset.o \
+ setup.o timer.o dev-enet.o dev-flash.o dev-pcmcia.o \
+ dev-rng.o dev-spi.o dev-hsspi.o dev-uart.o dev-wdt.o \
+ dev-usb-usbd.o
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+
+obj-y += boards/
diff --git a/arch/mips/bcm63xx/Platform b/arch/mips/bcm63xx/Platform
new file mode 100644
index 000000000..882dc40f4
--- /dev/null
+++ b/arch/mips/bcm63xx/Platform
@@ -0,0 +1,6 @@
+#
+# Broadcom BCM63XX boards
+#
+cflags-$(CONFIG_BCM63XX) += \
+ -I$(srctree)/arch/mips/include/asm/mach-bcm63xx/
+load-$(CONFIG_BCM63XX) := 0xffffffff80010000
diff --git a/arch/mips/bcm63xx/boards/Kconfig b/arch/mips/bcm63xx/boards/Kconfig
new file mode 100644
index 000000000..492c3bd00
--- /dev/null
+++ b/arch/mips/bcm63xx/boards/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+choice
+ prompt "Board support"
+ depends on BCM63XX
+ default BOARD_BCM963XX
+
+config BOARD_BCM963XX
+ bool "Generic Broadcom 963xx boards"
+ select SSB
+
+endchoice
diff --git a/arch/mips/bcm63xx/boards/Makefile b/arch/mips/bcm63xx/boards/Makefile
new file mode 100644
index 000000000..a74b9c8d0
--- /dev/null
+++ b/arch/mips/bcm63xx/boards/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_BOARD_BCM963XX) += board_bcm963xx.o
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
new file mode 100644
index 000000000..01aff80a5
--- /dev/null
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -0,0 +1,911 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/ssb/ssb.h>
+#include <asm/addrspace.h>
+#include <bcm63xx_board.h>
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_dev_uart.h>
+#include <bcm63xx_regs.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_nvram.h>
+#include <bcm63xx_dev_pci.h>
+#include <bcm63xx_dev_enet.h>
+#include <bcm63xx_dev_flash.h>
+#include <bcm63xx_dev_hsspi.h>
+#include <bcm63xx_dev_pcmcia.h>
+#include <bcm63xx_dev_spi.h>
+#include <bcm63xx_dev_usb_usbd.h>
+#include <board_bcm963xx.h>
+
+#include <uapi/linux/bcm933xx_hcs.h>
+
+#define HCS_OFFSET_128K 0x20000
+
+static struct board_info board;
+
+/*
+ * known 3368 boards
+ */
+#ifdef CONFIG_BCM63XX_CPU_3368
+static struct board_info __initdata board_cvg834g = {
+ .name = "CVG834G_E15R3921",
+ .expected_cpu_id = 0x3368,
+
+ .ephy_reset_gpio = 36,
+ .ephy_reset_gpio_flags = GPIOF_INIT_HIGH,
+ .has_pci = 1,
+ .has_uart0 = 1,
+ .has_uart1 = 1,
+
+ .has_enet0 = 1,
+ .enet0 = {
+ .has_phy = 1,
+ .use_internal_phy = 1,
+ },
+
+ .leds = {
+ {
+ .name = "CVG834G:green:power",
+ .gpio = 37,
+ .default_trigger= "default-on",
+ },
+ },
+};
+#endif /* CONFIG_BCM63XX_CPU_3368 */
+
+/*
+ * known 6328 boards
+ */
+#ifdef CONFIG_BCM63XX_CPU_6328
+static struct board_info __initdata board_96328avng = {
+ .name = "96328avng",
+ .expected_cpu_id = 0x6328,
+
+ .has_pci = 1,
+ .has_uart0 = 1,
+
+ .has_usbd = 0,
+ .usbd = {
+ .use_fullspeed = 0,
+ .port_no = 0,
+ },
+
+ .leds = {
+ {
+ .name = "96328avng::ppp-fail",
+ .gpio = 2,
+ .active_low = 1,
+ },
+ {
+ .name = "96328avng::power",
+ .gpio = 4,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "96328avng::power-fail",
+ .gpio = 8,
+ .active_low = 1,
+ },
+ {
+ .name = "96328avng::wps",
+ .gpio = 9,
+ .active_low = 1,
+ },
+ {
+ .name = "96328avng::ppp",
+ .gpio = 11,
+ .active_low = 1,
+ },
+ },
+};
+#endif /* CONFIG_BCM63XX_CPU_6328 */
+
+/*
+ * known 6338 boards
+ */
+#ifdef CONFIG_BCM63XX_CPU_6338
+static struct board_info __initdata board_96338gw = {
+ .name = "96338GW",
+ .expected_cpu_id = 0x6338,
+
+ .has_ohci0 = 1,
+ .has_uart0 = 1,
+
+ .has_enet0 = 1,
+ .enet0 = {
+ .force_speed_100 = 1,
+ .force_duplex_full = 1,
+ },
+
+ .leds = {
+ {
+ .name = "adsl",
+ .gpio = 3,
+ .active_low = 1,
+ },
+ {
+ .name = "ses",
+ .gpio = 5,
+ .active_low = 1,
+ },
+ {
+ .name = "ppp-fail",
+ .gpio = 4,
+ .active_low = 1,
+ },
+ {
+ .name = "power",
+ .gpio = 0,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "stop",
+ .gpio = 1,
+ .active_low = 1,
+ }
+ },
+};
+
+static struct board_info __initdata board_96338w = {
+ .name = "96338W",
+ .expected_cpu_id = 0x6338,
+
+ .has_uart0 = 1,
+
+ .has_enet0 = 1,
+ .enet0 = {
+ .force_speed_100 = 1,
+ .force_duplex_full = 1,
+ },
+
+ .leds = {
+ {
+ .name = "adsl",
+ .gpio = 3,
+ .active_low = 1,
+ },
+ {
+ .name = "ses",
+ .gpio = 5,
+ .active_low = 1,
+ },
+ {
+ .name = "ppp-fail",
+ .gpio = 4,
+ .active_low = 1,
+ },
+ {
+ .name = "power",
+ .gpio = 0,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "stop",
+ .gpio = 1,
+ .active_low = 1,
+ },
+ },
+};
+#endif /* CONFIG_BCM63XX_CPU_6338 */
+
+/*
+ * known 6345 boards
+ */
+#ifdef CONFIG_BCM63XX_CPU_6345
+static struct board_info __initdata board_96345gw2 = {
+ .name = "96345GW2",
+ .expected_cpu_id = 0x6345,
+
+ .has_uart0 = 1,
+};
+#endif /* CONFIG_BCM63XX_CPU_6345 */
+
+/*
+ * known 6348 boards
+ */
+#ifdef CONFIG_BCM63XX_CPU_6348
+static struct board_info __initdata board_96348r = {
+ .name = "96348R",
+ .expected_cpu_id = 0x6348,
+
+ .has_pci = 1,
+ .has_uart0 = 1,
+
+ .has_enet0 = 1,
+ .enet0 = {
+ .has_phy = 1,
+ .use_internal_phy = 1,
+ },
+
+ .leds = {
+ {
+ .name = "adsl-fail",
+ .gpio = 2,
+ .active_low = 1,
+ },
+ {
+ .name = "ppp",
+ .gpio = 3,
+ .active_low = 1,
+ },
+ {
+ .name = "ppp-fail",
+ .gpio = 4,
+ .active_low = 1,
+ },
+ {
+ .name = "power",
+ .gpio = 0,
+ .active_low = 1,
+ .default_trigger = "default-on",
+
+ },
+ {
+ .name = "stop",
+ .gpio = 1,
+ .active_low = 1,
+ },
+ },
+};
+
+static struct board_info __initdata board_96348gw_10 = {
+ .name = "96348GW-10",
+ .expected_cpu_id = 0x6348,
+
+ .has_ohci0 = 1,
+ .has_pccard = 1,
+ .has_pci = 1,
+ .has_uart0 = 1,
+
+ .has_enet0 = 1,
+ .enet0 = {
+ .has_phy = 1,
+ .use_internal_phy = 1,
+ },
+
+ .has_enet1 = 1,
+ .enet1 = {
+ .force_speed_100 = 1,
+ .force_duplex_full = 1,
+ },
+
+ .leds = {
+ {
+ .name = "adsl-fail",
+ .gpio = 2,
+ .active_low = 1,
+ },
+ {
+ .name = "ppp",
+ .gpio = 3,
+ .active_low = 1,
+ },
+ {
+ .name = "ppp-fail",
+ .gpio = 4,
+ .active_low = 1,
+ },
+ {
+ .name = "power",
+ .gpio = 0,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "stop",
+ .gpio = 1,
+ .active_low = 1,
+ },
+ },
+};
+
+static struct board_info __initdata board_96348gw_11 = {
+ .name = "96348GW-11",
+ .expected_cpu_id = 0x6348,
+
+ .has_ohci0 = 1,
+ .has_pccard = 1,
+ .has_pci = 1,
+ .has_uart0 = 1,
+
+ .has_enet0 = 1,
+ .enet0 = {
+ .has_phy = 1,
+ .use_internal_phy = 1,
+ },
+
+ .has_enet1 = 1,
+ .enet1 = {
+ .force_speed_100 = 1,
+ .force_duplex_full = 1,
+ },
+
+ .leds = {
+ {
+ .name = "adsl-fail",
+ .gpio = 2,
+ .active_low = 1,
+ },
+ {
+ .name = "ppp",
+ .gpio = 3,
+ .active_low = 1,
+ },
+ {
+ .name = "ppp-fail",
+ .gpio = 4,
+ .active_low = 1,
+ },
+ {
+ .name = "power",
+ .gpio = 0,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "stop",
+ .gpio = 1,
+ .active_low = 1,
+ },
+ },
+};
+
+static struct board_info __initdata board_96348gw = {
+ .name = "96348GW",
+ .expected_cpu_id = 0x6348,
+
+ .has_ohci0 = 1,
+ .has_pci = 1,
+ .has_uart0 = 1,
+
+ .has_enet0 = 1,
+ .enet0 = {
+ .has_phy = 1,
+ .use_internal_phy = 1,
+ },
+
+ .has_enet1 = 1,
+ .enet1 = {
+ .force_speed_100 = 1,
+ .force_duplex_full = 1,
+ },
+
+ .leds = {
+ {
+ .name = "adsl-fail",
+ .gpio = 2,
+ .active_low = 1,
+ },
+ {
+ .name = "ppp",
+ .gpio = 3,
+ .active_low = 1,
+ },
+ {
+ .name = "ppp-fail",
+ .gpio = 4,
+ .active_low = 1,
+ },
+ {
+ .name = "power",
+ .gpio = 0,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "stop",
+ .gpio = 1,
+ .active_low = 1,
+ },
+ },
+};
+
+static struct board_info __initdata board_FAST2404 = {
+ .name = "F@ST2404",
+ .expected_cpu_id = 0x6348,
+
+ .has_ohci0 = 1,
+ .has_pccard = 1,
+ .has_pci = 1,
+ .has_uart0 = 1,
+
+ .has_enet0 = 1,
+ .enet0 = {
+ .has_phy = 1,
+ .use_internal_phy = 1,
+ },
+
+ .has_enet1 = 1,
+ .enet1 = {
+ .force_speed_100 = 1,
+ .force_duplex_full = 1,
+ },
+};
+
+static struct board_info __initdata board_rta1025w_16 = {
+ .name = "RTA1025W_16",
+ .expected_cpu_id = 0x6348,
+
+ .has_pci = 1,
+
+ .has_enet0 = 1,
+ .enet0 = {
+ .has_phy = 1,
+ .use_internal_phy = 1,
+ },
+
+ .has_enet1 = 1,
+ .enet1 = {
+ .force_speed_100 = 1,
+ .force_duplex_full = 1,
+ },
+};
+
+static struct board_info __initdata board_DV201AMR = {
+ .name = "DV201AMR",
+ .expected_cpu_id = 0x6348,
+
+ .has_ohci0 = 1,
+ .has_pci = 1,
+ .has_uart0 = 1,
+
+ .has_enet0 = 1,
+ .enet0 = {
+ .has_phy = 1,
+ .use_internal_phy = 1,
+ },
+
+ .has_enet1 = 1,
+ .enet1 = {
+ .force_speed_100 = 1,
+ .force_duplex_full = 1,
+ },
+};
+
+static struct board_info __initdata board_96348gw_a = {
+ .name = "96348GW-A",
+ .expected_cpu_id = 0x6348,
+
+ .has_ohci0 = 1,
+ .has_pci = 1,
+ .has_uart0 = 1,
+
+ .has_enet0 = 1,
+ .enet0 = {
+ .has_phy = 1,
+ .use_internal_phy = 1,
+ },
+
+ .has_enet1 = 1,
+ .enet1 = {
+ .force_speed_100 = 1,
+ .force_duplex_full = 1,
+ },
+};
+#endif /* CONFIG_BCM63XX_CPU_6348 */
+
+/*
+ * known 6358 boards
+ */
+#ifdef CONFIG_BCM63XX_CPU_6358
+static struct board_info __initdata board_96358vw = {
+ .name = "96358VW",
+ .expected_cpu_id = 0x6358,
+
+ .has_ehci0 = 1,
+ .has_ohci0 = 1,
+ .has_pccard = 1,
+ .has_pci = 1,
+ .has_uart0 = 1,
+
+ .has_enet0 = 1,
+ .enet0 = {
+ .has_phy = 1,
+ .use_internal_phy = 1,
+ },
+
+ .has_enet1 = 1,
+ .enet1 = {
+ .force_speed_100 = 1,
+ .force_duplex_full = 1,
+ },
+
+ .leds = {
+ {
+ .name = "adsl-fail",
+ .gpio = 15,
+ .active_low = 1,
+ },
+ {
+ .name = "ppp",
+ .gpio = 22,
+ .active_low = 1,
+ },
+ {
+ .name = "ppp-fail",
+ .gpio = 23,
+ .active_low = 1,
+ },
+ {
+ .name = "power",
+ .gpio = 4,
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "stop",
+ .gpio = 5,
+ },
+ },
+};
+
+static struct board_info __initdata board_96358vw2 = {
+ .name = "96358VW2",
+ .expected_cpu_id = 0x6358,
+
+ .has_ehci0 = 1,
+ .has_ohci0 = 1,
+ .has_pccard = 1,
+ .has_pci = 1,
+ .has_uart0 = 1,
+
+ .has_enet0 = 1,
+ .enet0 = {
+ .has_phy = 1,
+ .use_internal_phy = 1,
+ },
+
+ .has_enet1 = 1,
+ .enet1 = {
+ .force_speed_100 = 1,
+ .force_duplex_full = 1,
+ },
+
+ .leds = {
+ {
+ .name = "adsl",
+ .gpio = 22,
+ .active_low = 1,
+ },
+ {
+ .name = "ppp-fail",
+ .gpio = 23,
+ },
+ {
+ .name = "power",
+ .gpio = 5,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "stop",
+ .gpio = 4,
+ .active_low = 1,
+ },
+ },
+};
+
+static struct board_info __initdata board_AGPFS0 = {
+ .name = "AGPF-S0",
+ .expected_cpu_id = 0x6358,
+
+ .has_ehci0 = 1,
+ .has_ohci0 = 1,
+ .has_pci = 1,
+ .has_uart0 = 1,
+
+ .has_enet0 = 1,
+ .enet0 = {
+ .has_phy = 1,
+ .use_internal_phy = 1,
+ },
+
+ .has_enet1 = 1,
+ .enet1 = {
+ .force_speed_100 = 1,
+ .force_duplex_full = 1,
+ },
+};
+
+static struct board_info __initdata board_DWVS0 = {
+ .name = "DWV-S0",
+ .expected_cpu_id = 0x6358,
+
+ .has_ehci0 = 1,
+ .has_ohci0 = 1,
+ .has_pci = 1,
+
+ .has_enet0 = 1,
+ .enet0 = {
+ .has_phy = 1,
+ .use_internal_phy = 1,
+ },
+
+ .has_enet1 = 1,
+ .enet1 = {
+ .force_speed_100 = 1,
+ .force_duplex_full = 1,
+ },
+};
+#endif /* CONFIG_BCM63XX_CPU_6358 */
+
+/*
+ * all boards
+ */
+static const struct board_info __initconst *bcm963xx_boards[] = {
+#ifdef CONFIG_BCM63XX_CPU_3368
+ &board_cvg834g,
+#endif /* CONFIG_BCM63XX_CPU_3368 */
+#ifdef CONFIG_BCM63XX_CPU_6328
+ &board_96328avng,
+#endif /* CONFIG_BCM63XX_CPU_6328 */
+#ifdef CONFIG_BCM63XX_CPU_6338
+ &board_96338gw,
+ &board_96338w,
+#endif /* CONFIG_BCM63XX_CPU_6338 */
+#ifdef CONFIG_BCM63XX_CPU_6345
+ &board_96345gw2,
+#endif /* CONFIG_BCM63XX_CPU_6345 */
+#ifdef CONFIG_BCM63XX_CPU_6348
+ &board_96348r,
+ &board_96348gw,
+ &board_96348gw_10,
+ &board_96348gw_11,
+ &board_FAST2404,
+ &board_DV201AMR,
+ &board_96348gw_a,
+ &board_rta1025w_16,
+#endif /* CONFIG_BCM63XX_CPU_6348 */
+#ifdef CONFIG_BCM63XX_CPU_6358
+ &board_96358vw,
+ &board_96358vw2,
+ &board_AGPFS0,
+ &board_DWVS0,
+#endif /* CONFIG_BCM63XX_CPU_6358 */
+};
+
+/*
+ * Register a sane SPROMv2 to make the on-board
+ * bcm4318 WLAN work
+ */
+#ifdef CONFIG_SSB_PCIHOST
+static struct ssb_sprom bcm63xx_sprom = {
+ .revision = 0x02,
+ .board_rev = 0x17,
+ .country_code = 0x0,
+ .ant_available_bg = 0x3,
+ .pa0b0 = 0x15ae,
+ .pa0b1 = 0xfa85,
+ .pa0b2 = 0xfe8d,
+ .pa1b0 = 0xffff,
+ .pa1b1 = 0xffff,
+ .pa1b2 = 0xffff,
+ .gpio0 = 0xff,
+ .gpio1 = 0xff,
+ .gpio2 = 0xff,
+ .gpio3 = 0xff,
+ .maxpwr_bg = 0x004c,
+ .itssi_bg = 0x00,
+ .boardflags_lo = 0x2848,
+ .boardflags_hi = 0x0000,
+};
+
+int bcm63xx_get_fallback_sprom(struct ssb_bus *bus, struct ssb_sprom *out)
+{
+ if (bus->bustype == SSB_BUSTYPE_PCI) {
+ memcpy(out, &bcm63xx_sprom, sizeof(struct ssb_sprom));
+ return 0;
+ } else {
+ pr_err("unable to fill SPROM for given bustype\n");
+ return -EINVAL;
+ }
+}
+#endif /* CONFIG_SSB_PCIHOST */
+
+/*
+ * return board name for /proc/cpuinfo
+ */
+const char *board_get_name(void)
+{
+ return board.name;
+}
+
+/*
+ * early init callback, read nvram data from flash and checksum it
+ */
+void __init board_prom_init(void)
+{
+ unsigned int i;
+ u8 *boot_addr, *cfe;
+ char cfe_version[32];
+ char *board_name = NULL;
+ u32 val;
+ struct bcm_hcs *hcs;
+
+ /* read base address of boot chip select (0)
+ * 6328/6362 do not have MPI but boot from a fixed address
+ */
+ if (BCMCPU_IS_6328() || BCMCPU_IS_6362()) {
+ val = 0x18000000;
+ } else {
+ val = bcm_mpi_readl(MPI_CSBASE_REG(0));
+ val &= MPI_CSBASE_BASE_MASK;
+ }
+ boot_addr = (u8 *)KSEG1ADDR(val);
+
+ /* dump cfe version */
+ cfe = boot_addr + BCM963XX_CFE_VERSION_OFFSET;
+ if (strstarts(cfe, "cfe-")) {
+ if(cfe[4] == 'v') {
+ if(cfe[5] == 'd')
+ snprintf(cfe_version, 11, "%s",
+ (char *) &cfe[5]);
+ else if (cfe[10] > 0)
+ snprintf(cfe_version, sizeof(cfe_version),
+ "%u.%u.%u-%u.%u-%u", cfe[5], cfe[6],
+ cfe[7], cfe[8], cfe[9], cfe[10]);
+ else
+ snprintf(cfe_version, sizeof(cfe_version),
+ "%u.%u.%u-%u.%u", cfe[5], cfe[6],
+ cfe[7], cfe[8], cfe[9]);
+ } else {
+ snprintf(cfe_version, 12, "%s", (char *) &cfe[4]);
+ }
+ } else {
+ strcpy(cfe_version, "unknown");
+ }
+ pr_info("CFE version: %s\n", cfe_version);
+
+ bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET);
+
+ if (BCMCPU_IS_3368()) {
+ hcs = (struct bcm_hcs *)boot_addr;
+ board_name = hcs->filename;
+ } else {
+ board_name = bcm63xx_nvram_get_name();
+ }
+ /* find board by name */
+ for (i = 0; i < ARRAY_SIZE(bcm963xx_boards); i++) {
+ if (strncmp(board_name, bcm963xx_boards[i]->name, 16))
+ continue;
+ /* copy, board desc array is marked initdata */
+ memcpy(&board, bcm963xx_boards[i], sizeof(board));
+ break;
+ }
+
+ /* bail out if board is not found, will complain later */
+ if (!board.name[0]) {
+ char name[17];
+ memcpy(name, board_name, 16);
+ name[16] = 0;
+ pr_err("unknown bcm963xx board: %s\n", name);
+ return;
+ }
+
+ /* setup pin multiplexing depending on board enabled device,
+ * this has to be done this early since PCI init is done
+ * inside arch_initcall */
+ val = 0;
+
+#ifdef CONFIG_PCI
+ if (board.has_pci) {
+ bcm63xx_pci_enabled = 1;
+ if (BCMCPU_IS_6348())
+ val |= GPIO_MODE_6348_G2_PCI;
+ }
+#endif /* CONFIG_PCI */
+
+ if (board.has_pccard) {
+ if (BCMCPU_IS_6348())
+ val |= GPIO_MODE_6348_G1_MII_PCCARD;
+ }
+
+ if (board.has_enet0 && !board.enet0.use_internal_phy) {
+ if (BCMCPU_IS_6348())
+ val |= GPIO_MODE_6348_G3_EXT_MII |
+ GPIO_MODE_6348_G0_EXT_MII;
+ }
+
+ if (board.has_enet1 && !board.enet1.use_internal_phy) {
+ if (BCMCPU_IS_6348())
+ val |= GPIO_MODE_6348_G3_EXT_MII |
+ GPIO_MODE_6348_G0_EXT_MII;
+ }
+
+ bcm_gpio_writel(val, GPIO_MODE_REG);
+}
+
+/*
+ * second stage init callback, good time to panic if we couldn't
+ * identify on which board we're running since early printk is working
+ */
+void __init board_setup(void)
+{
+ if (!board.name[0])
+ panic("unable to detect bcm963xx board");
+ pr_info("board name: %s\n", board.name);
+
+ /* make sure we're running on expected cpu */
+ if (bcm63xx_get_cpu_id() != board.expected_cpu_id)
+ panic("unexpected CPU for bcm963xx board");
+}
+
+static struct gpio_led_platform_data bcm63xx_led_data;
+
+static struct platform_device bcm63xx_gpio_leds = {
+ .name = "leds-gpio",
+ .id = 0,
+ .dev.platform_data = &bcm63xx_led_data,
+};
+
+/*
+ * third stage init callback, register all board devices.
+ */
+int __init board_register_devices(void)
+{
+ if (board.has_uart0)
+ bcm63xx_uart_register(0);
+
+ if (board.has_uart1)
+ bcm63xx_uart_register(1);
+
+ if (board.has_pccard)
+ bcm63xx_pcmcia_register();
+
+ if (board.has_enet0 &&
+ !bcm63xx_nvram_get_mac_address(board.enet0.mac_addr))
+ bcm63xx_enet_register(0, &board.enet0);
+
+ if (board.has_enet1 &&
+ !bcm63xx_nvram_get_mac_address(board.enet1.mac_addr))
+ bcm63xx_enet_register(1, &board.enet1);
+
+ if (board.has_enetsw &&
+ !bcm63xx_nvram_get_mac_address(board.enetsw.mac_addr))
+ bcm63xx_enetsw_register(&board.enetsw);
+
+ if (board.has_usbd)
+ bcm63xx_usbd_register(&board.usbd);
+
+ /* Generate MAC address for WLAN and register our SPROM,
+ * do this after registering enet devices
+ */
+#ifdef CONFIG_SSB_PCIHOST
+ if (!bcm63xx_nvram_get_mac_address(bcm63xx_sprom.il0mac)) {
+ memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN);
+ memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN);
+ if (ssb_arch_register_fallback_sprom(
+ &bcm63xx_get_fallback_sprom) < 0)
+ pr_err("failed to register fallback SPROM\n");
+ }
+#endif /* CONFIG_SSB_PCIHOST */
+
+ bcm63xx_spi_register();
+
+ bcm63xx_hsspi_register();
+
+ bcm63xx_flash_register();
+
+ bcm63xx_led_data.num_leds = ARRAY_SIZE(board.leds);
+ bcm63xx_led_data.leds = board.leds;
+
+ platform_device_register(&bcm63xx_gpio_leds);
+
+ if (board.ephy_reset_gpio && board.ephy_reset_gpio_flags)
+ gpio_request_one(board.ephy_reset_gpio,
+ board.ephy_reset_gpio_flags, "ephy-reset");
+
+ return 0;
+}
diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
new file mode 100644
index 000000000..f183c4550
--- /dev/null
+++ b/arch/mips/bcm63xx/clk.c
@@ -0,0 +1,579 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_regs.h>
+#include <bcm63xx_reset.h>
+
+struct clk {
+ void (*set)(struct clk *, int);
+ unsigned int rate;
+ unsigned int usage;
+ int id;
+};
+
+static DEFINE_MUTEX(clocks_mutex);
+
+
+static void clk_enable_unlocked(struct clk *clk)
+{
+ if (clk->set && (clk->usage++) == 0)
+ clk->set(clk, 1);
+}
+
+static void clk_disable_unlocked(struct clk *clk)
+{
+ if (clk->set && (--clk->usage) == 0)
+ clk->set(clk, 0);
+}
+
+static void bcm_hwclock_set(u32 mask, int enable)
+{
+ u32 reg;
+
+ reg = bcm_perf_readl(PERF_CKCTL_REG);
+ if (enable)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ bcm_perf_writel(reg, PERF_CKCTL_REG);
+}
+
+/*
+ * Ethernet MAC "misc" clock: dma clocks and main clock on 6348
+ */
+static void enet_misc_set(struct clk *clk, int enable)
+{
+ u32 mask;
+
+ if (BCMCPU_IS_6338())
+ mask = CKCTL_6338_ENET_EN;
+ else if (BCMCPU_IS_6345())
+ mask = CKCTL_6345_ENET_EN;
+ else if (BCMCPU_IS_6348())
+ mask = CKCTL_6348_ENET_EN;
+ else
+ /* BCMCPU_IS_6358 */
+ mask = CKCTL_6358_EMUSB_EN;
+ bcm_hwclock_set(mask, enable);
+}
+
+static struct clk clk_enet_misc = {
+ .set = enet_misc_set,
+};
+
+/*
+ * Ethernet MAC clocks: only revelant on 6358, silently enable misc
+ * clocks
+ */
+static void enetx_set(struct clk *clk, int enable)
+{
+ if (enable)
+ clk_enable_unlocked(&clk_enet_misc);
+ else
+ clk_disable_unlocked(&clk_enet_misc);
+
+ if (BCMCPU_IS_3368() || BCMCPU_IS_6358()) {
+ u32 mask;
+
+ if (clk->id == 0)
+ mask = CKCTL_6358_ENET0_EN;
+ else
+ mask = CKCTL_6358_ENET1_EN;
+ bcm_hwclock_set(mask, enable);
+ }
+}
+
+static struct clk clk_enet0 = {
+ .id = 0,
+ .set = enetx_set,
+};
+
+static struct clk clk_enet1 = {
+ .id = 1,
+ .set = enetx_set,
+};
+
+/*
+ * Ethernet PHY clock
+ */
+static void ephy_set(struct clk *clk, int enable)
+{
+ if (BCMCPU_IS_3368() || BCMCPU_IS_6358())
+ bcm_hwclock_set(CKCTL_6358_EPHY_EN, enable);
+}
+
+
+static struct clk clk_ephy = {
+ .set = ephy_set,
+};
+
+/*
+ * Ethernet switch SAR clock
+ */
+static void swpkt_sar_set(struct clk *clk, int enable)
+{
+ if (BCMCPU_IS_6368())
+ bcm_hwclock_set(CKCTL_6368_SWPKT_SAR_EN, enable);
+ else
+ return;
+}
+
+static struct clk clk_swpkt_sar = {
+ .set = swpkt_sar_set,
+};
+
+/*
+ * Ethernet switch USB clock
+ */
+static void swpkt_usb_set(struct clk *clk, int enable)
+{
+ if (BCMCPU_IS_6368())
+ bcm_hwclock_set(CKCTL_6368_SWPKT_USB_EN, enable);
+ else
+ return;
+}
+
+static struct clk clk_swpkt_usb = {
+ .set = swpkt_usb_set,
+};
+
+/*
+ * Ethernet switch clock
+ */
+static void enetsw_set(struct clk *clk, int enable)
+{
+ if (BCMCPU_IS_6328()) {
+ bcm_hwclock_set(CKCTL_6328_ROBOSW_EN, enable);
+ } else if (BCMCPU_IS_6362()) {
+ bcm_hwclock_set(CKCTL_6362_ROBOSW_EN, enable);
+ } else if (BCMCPU_IS_6368()) {
+ if (enable) {
+ clk_enable_unlocked(&clk_swpkt_sar);
+ clk_enable_unlocked(&clk_swpkt_usb);
+ } else {
+ clk_disable_unlocked(&clk_swpkt_usb);
+ clk_disable_unlocked(&clk_swpkt_sar);
+ }
+ bcm_hwclock_set(CKCTL_6368_ROBOSW_EN, enable);
+ } else {
+ return;
+ }
+
+ if (enable) {
+ /* reset switch core afer clock change */
+ bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1);
+ msleep(10);
+ bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 0);
+ msleep(10);
+ }
+}
+
+static struct clk clk_enetsw = {
+ .set = enetsw_set,
+};
+
+/*
+ * PCM clock
+ */
+static void pcm_set(struct clk *clk, int enable)
+{
+ if (BCMCPU_IS_3368())
+ bcm_hwclock_set(CKCTL_3368_PCM_EN, enable);
+ if (BCMCPU_IS_6358())
+ bcm_hwclock_set(CKCTL_6358_PCM_EN, enable);
+}
+
+static struct clk clk_pcm = {
+ .set = pcm_set,
+};
+
+/*
+ * USB host clock
+ */
+static void usbh_set(struct clk *clk, int enable)
+{
+ if (BCMCPU_IS_6328())
+ bcm_hwclock_set(CKCTL_6328_USBH_EN, enable);
+ else if (BCMCPU_IS_6348())
+ bcm_hwclock_set(CKCTL_6348_USBH_EN, enable);
+ else if (BCMCPU_IS_6362())
+ bcm_hwclock_set(CKCTL_6362_USBH_EN, enable);
+ else if (BCMCPU_IS_6368())
+ bcm_hwclock_set(CKCTL_6368_USBH_EN, enable);
+}
+
+static struct clk clk_usbh = {
+ .set = usbh_set,
+};
+
+/*
+ * USB device clock
+ */
+static void usbd_set(struct clk *clk, int enable)
+{
+ if (BCMCPU_IS_6328())
+ bcm_hwclock_set(CKCTL_6328_USBD_EN, enable);
+ else if (BCMCPU_IS_6362())
+ bcm_hwclock_set(CKCTL_6362_USBD_EN, enable);
+ else if (BCMCPU_IS_6368())
+ bcm_hwclock_set(CKCTL_6368_USBD_EN, enable);
+}
+
+static struct clk clk_usbd = {
+ .set = usbd_set,
+};
+
+/*
+ * SPI clock
+ */
+static void spi_set(struct clk *clk, int enable)
+{
+ u32 mask;
+
+ if (BCMCPU_IS_6338())
+ mask = CKCTL_6338_SPI_EN;
+ else if (BCMCPU_IS_6348())
+ mask = CKCTL_6348_SPI_EN;
+ else if (BCMCPU_IS_3368() || BCMCPU_IS_6358())
+ mask = CKCTL_6358_SPI_EN;
+ else if (BCMCPU_IS_6362())
+ mask = CKCTL_6362_SPI_EN;
+ else
+ /* BCMCPU_IS_6368 */
+ mask = CKCTL_6368_SPI_EN;
+ bcm_hwclock_set(mask, enable);
+}
+
+static struct clk clk_spi = {
+ .set = spi_set,
+};
+
+/*
+ * HSSPI clock
+ */
+static void hsspi_set(struct clk *clk, int enable)
+{
+ u32 mask;
+
+ if (BCMCPU_IS_6328())
+ mask = CKCTL_6328_HSSPI_EN;
+ else if (BCMCPU_IS_6362())
+ mask = CKCTL_6362_HSSPI_EN;
+ else
+ return;
+
+ bcm_hwclock_set(mask, enable);
+}
+
+static struct clk clk_hsspi = {
+ .set = hsspi_set,
+};
+
+/*
+ * HSSPI PLL
+ */
+static struct clk clk_hsspi_pll;
+
+/*
+ * XTM clock
+ */
+static void xtm_set(struct clk *clk, int enable)
+{
+ if (!BCMCPU_IS_6368())
+ return;
+
+ if (enable)
+ clk_enable_unlocked(&clk_swpkt_sar);
+ else
+ clk_disable_unlocked(&clk_swpkt_sar);
+
+ bcm_hwclock_set(CKCTL_6368_SAR_EN, enable);
+
+ if (enable) {
+ /* reset sar core afer clock change */
+ bcm63xx_core_set_reset(BCM63XX_RESET_SAR, 1);
+ mdelay(1);
+ bcm63xx_core_set_reset(BCM63XX_RESET_SAR, 0);
+ mdelay(1);
+ }
+}
+
+
+static struct clk clk_xtm = {
+ .set = xtm_set,
+};
+
+/*
+ * IPsec clock
+ */
+static void ipsec_set(struct clk *clk, int enable)
+{
+ if (BCMCPU_IS_6362())
+ bcm_hwclock_set(CKCTL_6362_IPSEC_EN, enable);
+ else if (BCMCPU_IS_6368())
+ bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable);
+}
+
+static struct clk clk_ipsec = {
+ .set = ipsec_set,
+};
+
+/*
+ * PCIe clock
+ */
+
+static void pcie_set(struct clk *clk, int enable)
+{
+ if (BCMCPU_IS_6328())
+ bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable);
+ else if (BCMCPU_IS_6362())
+ bcm_hwclock_set(CKCTL_6362_PCIE_EN, enable);
+}
+
+static struct clk clk_pcie = {
+ .set = pcie_set,
+};
+
+/*
+ * Internal peripheral clock
+ */
+static struct clk clk_periph = {
+ .rate = (50 * 1000 * 1000),
+};
+
+
+/*
+ * Linux clock API implementation
+ */
+int clk_enable(struct clk *clk)
+{
+ if (!clk)
+ return 0;
+ mutex_lock(&clocks_mutex);
+ clk_enable_unlocked(clk);
+ mutex_unlock(&clocks_mutex);
+ return 0;
+}
+
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+ if (!clk)
+ return;
+
+ mutex_lock(&clocks_mutex);
+ clk_disable_unlocked(clk);
+ mutex_unlock(&clocks_mutex);
+}
+
+EXPORT_SYMBOL(clk_disable);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+ return NULL;
+}
+EXPORT_SYMBOL(clk_get_parent);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ if (!clk)
+ return 0;
+
+ return clk->rate;
+}
+
+EXPORT_SYMBOL(clk_get_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(clk_set_rate);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(clk_round_rate);
+
+static struct clk_lookup bcm3368_clks[] = {
+ /* fixed rate clocks */
+ CLKDEV_INIT(NULL, "periph", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
+ /* gated clocks */
+ CLKDEV_INIT(NULL, "enet0", &clk_enet0),
+ CLKDEV_INIT(NULL, "enet1", &clk_enet1),
+ CLKDEV_INIT(NULL, "ephy", &clk_ephy),
+ CLKDEV_INIT(NULL, "usbh", &clk_usbh),
+ CLKDEV_INIT(NULL, "usbd", &clk_usbd),
+ CLKDEV_INIT(NULL, "spi", &clk_spi),
+ CLKDEV_INIT(NULL, "pcm", &clk_pcm),
+ CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet0),
+ CLKDEV_INIT("bcm63xx_enet.1", "enet", &clk_enet1),
+};
+
+static struct clk_lookup bcm6328_clks[] = {
+ /* fixed rate clocks */
+ CLKDEV_INIT(NULL, "periph", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
+ CLKDEV_INIT("bcm63xx-hsspi.0", "pll", &clk_hsspi_pll),
+ /* gated clocks */
+ CLKDEV_INIT(NULL, "enetsw", &clk_enetsw),
+ CLKDEV_INIT(NULL, "usbh", &clk_usbh),
+ CLKDEV_INIT(NULL, "usbd", &clk_usbd),
+ CLKDEV_INIT(NULL, "hsspi", &clk_hsspi),
+ CLKDEV_INIT(NULL, "pcie", &clk_pcie),
+};
+
+static struct clk_lookup bcm6338_clks[] = {
+ /* fixed rate clocks */
+ CLKDEV_INIT(NULL, "periph", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
+ /* gated clocks */
+ CLKDEV_INIT(NULL, "enet0", &clk_enet0),
+ CLKDEV_INIT(NULL, "enet1", &clk_enet1),
+ CLKDEV_INIT(NULL, "ephy", &clk_ephy),
+ CLKDEV_INIT(NULL, "usbh", &clk_usbh),
+ CLKDEV_INIT(NULL, "usbd", &clk_usbd),
+ CLKDEV_INIT(NULL, "spi", &clk_spi),
+ CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet_misc),
+};
+
+static struct clk_lookup bcm6345_clks[] = {
+ /* fixed rate clocks */
+ CLKDEV_INIT(NULL, "periph", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
+ /* gated clocks */
+ CLKDEV_INIT(NULL, "enet0", &clk_enet0),
+ CLKDEV_INIT(NULL, "enet1", &clk_enet1),
+ CLKDEV_INIT(NULL, "ephy", &clk_ephy),
+ CLKDEV_INIT(NULL, "usbh", &clk_usbh),
+ CLKDEV_INIT(NULL, "usbd", &clk_usbd),
+ CLKDEV_INIT(NULL, "spi", &clk_spi),
+ CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet_misc),
+};
+
+static struct clk_lookup bcm6348_clks[] = {
+ /* fixed rate clocks */
+ CLKDEV_INIT(NULL, "periph", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
+ /* gated clocks */
+ CLKDEV_INIT(NULL, "enet0", &clk_enet0),
+ CLKDEV_INIT(NULL, "enet1", &clk_enet1),
+ CLKDEV_INIT(NULL, "ephy", &clk_ephy),
+ CLKDEV_INIT(NULL, "usbh", &clk_usbh),
+ CLKDEV_INIT(NULL, "usbd", &clk_usbd),
+ CLKDEV_INIT(NULL, "spi", &clk_spi),
+ CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet_misc),
+ CLKDEV_INIT("bcm63xx_enet.1", "enet", &clk_enet_misc),
+};
+
+static struct clk_lookup bcm6358_clks[] = {
+ /* fixed rate clocks */
+ CLKDEV_INIT(NULL, "periph", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
+ /* gated clocks */
+ CLKDEV_INIT(NULL, "enet0", &clk_enet0),
+ CLKDEV_INIT(NULL, "enet1", &clk_enet1),
+ CLKDEV_INIT(NULL, "ephy", &clk_ephy),
+ CLKDEV_INIT(NULL, "usbh", &clk_usbh),
+ CLKDEV_INIT(NULL, "usbd", &clk_usbd),
+ CLKDEV_INIT(NULL, "spi", &clk_spi),
+ CLKDEV_INIT(NULL, "pcm", &clk_pcm),
+ CLKDEV_INIT(NULL, "swpkt_sar", &clk_swpkt_sar),
+ CLKDEV_INIT(NULL, "swpkt_usb", &clk_swpkt_usb),
+ CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet0),
+ CLKDEV_INIT("bcm63xx_enet.1", "enet", &clk_enet1),
+};
+
+static struct clk_lookup bcm6362_clks[] = {
+ /* fixed rate clocks */
+ CLKDEV_INIT(NULL, "periph", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
+ CLKDEV_INIT("bcm63xx-hsspi.0", "pll", &clk_hsspi_pll),
+ /* gated clocks */
+ CLKDEV_INIT(NULL, "enetsw", &clk_enetsw),
+ CLKDEV_INIT(NULL, "usbh", &clk_usbh),
+ CLKDEV_INIT(NULL, "usbd", &clk_usbd),
+ CLKDEV_INIT(NULL, "spi", &clk_spi),
+ CLKDEV_INIT(NULL, "hsspi", &clk_hsspi),
+ CLKDEV_INIT(NULL, "pcie", &clk_pcie),
+ CLKDEV_INIT(NULL, "ipsec", &clk_ipsec),
+};
+
+static struct clk_lookup bcm6368_clks[] = {
+ /* fixed rate clocks */
+ CLKDEV_INIT(NULL, "periph", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
+ /* gated clocks */
+ CLKDEV_INIT(NULL, "enetsw", &clk_enetsw),
+ CLKDEV_INIT(NULL, "usbh", &clk_usbh),
+ CLKDEV_INIT(NULL, "usbd", &clk_usbd),
+ CLKDEV_INIT(NULL, "spi", &clk_spi),
+ CLKDEV_INIT(NULL, "xtm", &clk_xtm),
+ CLKDEV_INIT(NULL, "ipsec", &clk_ipsec),
+};
+
+#define HSSPI_PLL_HZ_6328 133333333
+#define HSSPI_PLL_HZ_6362 400000000
+
+static int __init bcm63xx_clk_init(void)
+{
+ switch (bcm63xx_get_cpu_id()) {
+ case BCM3368_CPU_ID:
+ clkdev_add_table(bcm3368_clks, ARRAY_SIZE(bcm3368_clks));
+ break;
+ case BCM6328_CPU_ID:
+ clk_hsspi_pll.rate = HSSPI_PLL_HZ_6328;
+ clkdev_add_table(bcm6328_clks, ARRAY_SIZE(bcm6328_clks));
+ break;
+ case BCM6338_CPU_ID:
+ clkdev_add_table(bcm6338_clks, ARRAY_SIZE(bcm6338_clks));
+ break;
+ case BCM6345_CPU_ID:
+ clkdev_add_table(bcm6345_clks, ARRAY_SIZE(bcm6345_clks));
+ break;
+ case BCM6348_CPU_ID:
+ clkdev_add_table(bcm6348_clks, ARRAY_SIZE(bcm6348_clks));
+ break;
+ case BCM6358_CPU_ID:
+ clkdev_add_table(bcm6358_clks, ARRAY_SIZE(bcm6358_clks));
+ break;
+ case BCM6362_CPU_ID:
+ clk_hsspi_pll.rate = HSSPI_PLL_HZ_6362;
+ clkdev_add_table(bcm6362_clks, ARRAY_SIZE(bcm6362_clks));
+ break;
+ case BCM6368_CPU_ID:
+ clkdev_add_table(bcm6368_clks, ARRAY_SIZE(bcm6368_clks));
+ break;
+ }
+
+ return 0;
+}
+arch_initcall(bcm63xx_clk_init);
diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c
new file mode 100644
index 000000000..8e3e199dd
--- /dev/null
+++ b/arch/mips/bcm63xx/cpu.c
@@ -0,0 +1,385 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ * Copyright (C) 2009 Florian Fainelli <florian@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/cpu.h>
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/mipsregs.h>
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_regs.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_irq.h>
+
+const unsigned long *bcm63xx_regs_base;
+EXPORT_SYMBOL(bcm63xx_regs_base);
+
+const int *bcm63xx_irqs;
+EXPORT_SYMBOL(bcm63xx_irqs);
+
+u16 bcm63xx_cpu_id __read_mostly;
+EXPORT_SYMBOL(bcm63xx_cpu_id);
+
+static u8 bcm63xx_cpu_rev;
+static unsigned int bcm63xx_cpu_freq;
+static unsigned int bcm63xx_memory_size;
+
+static const unsigned long bcm3368_regs_base[] = {
+ __GEN_CPU_REGS_TABLE(3368)
+};
+
+static const int bcm3368_irqs[] = {
+ __GEN_CPU_IRQ_TABLE(3368)
+};
+
+static const unsigned long bcm6328_regs_base[] = {
+ __GEN_CPU_REGS_TABLE(6328)
+};
+
+static const int bcm6328_irqs[] = {
+ __GEN_CPU_IRQ_TABLE(6328)
+};
+
+static const unsigned long bcm6338_regs_base[] = {
+ __GEN_CPU_REGS_TABLE(6338)
+};
+
+static const int bcm6338_irqs[] = {
+ __GEN_CPU_IRQ_TABLE(6338)
+};
+
+static const unsigned long bcm6345_regs_base[] = {
+ __GEN_CPU_REGS_TABLE(6345)
+};
+
+static const int bcm6345_irqs[] = {
+ __GEN_CPU_IRQ_TABLE(6345)
+};
+
+static const unsigned long bcm6348_regs_base[] = {
+ __GEN_CPU_REGS_TABLE(6348)
+};
+
+static const int bcm6348_irqs[] = {
+ __GEN_CPU_IRQ_TABLE(6348)
+
+};
+
+static const unsigned long bcm6358_regs_base[] = {
+ __GEN_CPU_REGS_TABLE(6358)
+};
+
+static const int bcm6358_irqs[] = {
+ __GEN_CPU_IRQ_TABLE(6358)
+
+};
+
+static const unsigned long bcm6362_regs_base[] = {
+ __GEN_CPU_REGS_TABLE(6362)
+};
+
+static const int bcm6362_irqs[] = {
+ __GEN_CPU_IRQ_TABLE(6362)
+
+};
+
+static const unsigned long bcm6368_regs_base[] = {
+ __GEN_CPU_REGS_TABLE(6368)
+};
+
+static const int bcm6368_irqs[] = {
+ __GEN_CPU_IRQ_TABLE(6368)
+
+};
+
+u8 bcm63xx_get_cpu_rev(void)
+{
+ return bcm63xx_cpu_rev;
+}
+
+EXPORT_SYMBOL(bcm63xx_get_cpu_rev);
+
+unsigned int bcm63xx_get_cpu_freq(void)
+{
+ return bcm63xx_cpu_freq;
+}
+
+unsigned int bcm63xx_get_memory_size(void)
+{
+ return bcm63xx_memory_size;
+}
+
+static unsigned int detect_cpu_clock(void)
+{
+ u16 cpu_id = bcm63xx_get_cpu_id();
+
+ switch (cpu_id) {
+ case BCM3368_CPU_ID:
+ return 300000000;
+
+ case BCM6328_CPU_ID:
+ {
+ unsigned int tmp, mips_pll_fcvo;
+
+ tmp = bcm_misc_readl(MISC_STRAPBUS_6328_REG);
+ mips_pll_fcvo = (tmp & STRAPBUS_6328_FCVO_MASK)
+ >> STRAPBUS_6328_FCVO_SHIFT;
+
+ switch (mips_pll_fcvo) {
+ case 0x12:
+ case 0x14:
+ case 0x19:
+ return 160000000;
+ case 0x1c:
+ return 192000000;
+ case 0x13:
+ case 0x15:
+ return 200000000;
+ case 0x1a:
+ return 384000000;
+ case 0x16:
+ return 400000000;
+ default:
+ return 320000000;
+ }
+
+ }
+ case BCM6338_CPU_ID:
+ /* BCM6338 has a fixed 240 Mhz frequency */
+ return 240000000;
+
+ case BCM6345_CPU_ID:
+ /* BCM6345 has a fixed 140Mhz frequency */
+ return 140000000;
+
+ case BCM6348_CPU_ID:
+ {
+ unsigned int tmp, n1, n2, m1;
+
+ /* 16MHz * (N1 + 1) * (N2 + 2) / (M1_CPU + 1) */
+ tmp = bcm_perf_readl(PERF_MIPSPLLCTL_REG);
+ n1 = (tmp & MIPSPLLCTL_N1_MASK) >> MIPSPLLCTL_N1_SHIFT;
+ n2 = (tmp & MIPSPLLCTL_N2_MASK) >> MIPSPLLCTL_N2_SHIFT;
+ m1 = (tmp & MIPSPLLCTL_M1CPU_MASK) >> MIPSPLLCTL_M1CPU_SHIFT;
+ n1 += 1;
+ n2 += 2;
+ m1 += 1;
+ return (16 * 1000000 * n1 * n2) / m1;
+ }
+
+ case BCM6358_CPU_ID:
+ {
+ unsigned int tmp, n1, n2, m1;
+
+ /* 16MHz * N1 * N2 / M1_CPU */
+ tmp = bcm_ddr_readl(DDR_DMIPSPLLCFG_REG);
+ n1 = (tmp & DMIPSPLLCFG_N1_MASK) >> DMIPSPLLCFG_N1_SHIFT;
+ n2 = (tmp & DMIPSPLLCFG_N2_MASK) >> DMIPSPLLCFG_N2_SHIFT;
+ m1 = (tmp & DMIPSPLLCFG_M1_MASK) >> DMIPSPLLCFG_M1_SHIFT;
+ return (16 * 1000000 * n1 * n2) / m1;
+ }
+
+ case BCM6362_CPU_ID:
+ {
+ unsigned int tmp, mips_pll_fcvo;
+
+ tmp = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
+ mips_pll_fcvo = (tmp & STRAPBUS_6362_FCVO_MASK)
+ >> STRAPBUS_6362_FCVO_SHIFT;
+ switch (mips_pll_fcvo) {
+ case 0x03:
+ case 0x0b:
+ case 0x13:
+ case 0x1b:
+ return 240000000;
+ case 0x04:
+ case 0x0c:
+ case 0x14:
+ case 0x1c:
+ return 160000000;
+ case 0x05:
+ case 0x0e:
+ case 0x16:
+ case 0x1e:
+ case 0x1f:
+ return 400000000;
+ case 0x06:
+ return 440000000;
+ case 0x07:
+ case 0x17:
+ return 384000000;
+ case 0x15:
+ case 0x1d:
+ return 200000000;
+ default:
+ return 320000000;
+ }
+ }
+ case BCM6368_CPU_ID:
+ {
+ unsigned int tmp, p1, p2, ndiv, m1;
+
+ /* (64MHz / P1) * P2 * NDIV / M1_CPU */
+ tmp = bcm_ddr_readl(DDR_DMIPSPLLCFG_6368_REG);
+
+ p1 = (tmp & DMIPSPLLCFG_6368_P1_MASK) >>
+ DMIPSPLLCFG_6368_P1_SHIFT;
+
+ p2 = (tmp & DMIPSPLLCFG_6368_P2_MASK) >>
+ DMIPSPLLCFG_6368_P2_SHIFT;
+
+ ndiv = (tmp & DMIPSPLLCFG_6368_NDIV_MASK) >>
+ DMIPSPLLCFG_6368_NDIV_SHIFT;
+
+ tmp = bcm_ddr_readl(DDR_DMIPSPLLDIV_6368_REG);
+ m1 = (tmp & DMIPSPLLDIV_6368_MDIV_MASK) >>
+ DMIPSPLLDIV_6368_MDIV_SHIFT;
+
+ return (((64 * 1000000) / p1) * p2 * ndiv) / m1;
+ }
+
+ default:
+ panic("Failed to detect clock for CPU with id=%04X\n", cpu_id);
+ }
+}
+
+/*
+ * attempt to detect the amount of memory installed
+ */
+static unsigned int detect_memory_size(void)
+{
+ unsigned int cols = 0, rows = 0, is_32bits = 0, banks = 0;
+ u32 val;
+
+ if (BCMCPU_IS_6328() || BCMCPU_IS_6362())
+ return bcm_ddr_readl(DDR_CSEND_REG) << 24;
+
+ if (BCMCPU_IS_6345()) {
+ val = bcm_sdram_readl(SDRAM_MBASE_REG);
+ return val * 8 * 1024 * 1024;
+ }
+
+ if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
+ val = bcm_sdram_readl(SDRAM_CFG_REG);
+ rows = (val & SDRAM_CFG_ROW_MASK) >> SDRAM_CFG_ROW_SHIFT;
+ cols = (val & SDRAM_CFG_COL_MASK) >> SDRAM_CFG_COL_SHIFT;
+ is_32bits = (val & SDRAM_CFG_32B_MASK) ? 1 : 0;
+ banks = (val & SDRAM_CFG_BANK_MASK) ? 2 : 1;
+ }
+
+ if (BCMCPU_IS_3368() || BCMCPU_IS_6358() || BCMCPU_IS_6368()) {
+ val = bcm_memc_readl(MEMC_CFG_REG);
+ rows = (val & MEMC_CFG_ROW_MASK) >> MEMC_CFG_ROW_SHIFT;
+ cols = (val & MEMC_CFG_COL_MASK) >> MEMC_CFG_COL_SHIFT;
+ is_32bits = (val & MEMC_CFG_32B_MASK) ? 0 : 1;
+ banks = 2;
+ }
+
+ /* 0 => 11 address bits ... 2 => 13 address bits */
+ rows += 11;
+
+ /* 0 => 8 address bits ... 2 => 10 address bits */
+ cols += 8;
+
+ return 1 << (cols + rows + (is_32bits + 1) + banks);
+}
+
+void __init bcm63xx_cpu_init(void)
+{
+ unsigned int tmp;
+ unsigned int cpu = smp_processor_id();
+ u32 chipid_reg;
+
+ /* soc registers location depends on cpu type */
+ chipid_reg = 0;
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS3300:
+ if ((read_c0_prid() & PRID_IMP_MASK) != PRID_IMP_BMIPS3300_ALT)
+ __cpu_name[cpu] = "Broadcom BCM6338";
+ fallthrough;
+ case CPU_BMIPS32:
+ chipid_reg = BCM_6345_PERF_BASE;
+ break;
+ case CPU_BMIPS4350:
+ switch ((read_c0_prid() & PRID_REV_MASK)) {
+ case 0x04:
+ chipid_reg = BCM_3368_PERF_BASE;
+ break;
+ case 0x10:
+ chipid_reg = BCM_6345_PERF_BASE;
+ break;
+ default:
+ chipid_reg = BCM_6368_PERF_BASE;
+ break;
+ }
+ break;
+ }
+
+ /*
+ * really early to panic, but delaying panic would not help since we
+ * will never get any working console
+ */
+ if (!chipid_reg)
+ panic("unsupported Broadcom CPU");
+
+ /* read out CPU type */
+ tmp = bcm_readl(chipid_reg);
+ bcm63xx_cpu_id = (tmp & REV_CHIPID_MASK) >> REV_CHIPID_SHIFT;
+ bcm63xx_cpu_rev = (tmp & REV_REVID_MASK) >> REV_REVID_SHIFT;
+
+ switch (bcm63xx_cpu_id) {
+ case BCM3368_CPU_ID:
+ bcm63xx_regs_base = bcm3368_regs_base;
+ bcm63xx_irqs = bcm3368_irqs;
+ break;
+ case BCM6328_CPU_ID:
+ bcm63xx_regs_base = bcm6328_regs_base;
+ bcm63xx_irqs = bcm6328_irqs;
+ break;
+ case BCM6338_CPU_ID:
+ bcm63xx_regs_base = bcm6338_regs_base;
+ bcm63xx_irqs = bcm6338_irqs;
+ break;
+ case BCM6345_CPU_ID:
+ bcm63xx_regs_base = bcm6345_regs_base;
+ bcm63xx_irqs = bcm6345_irqs;
+ break;
+ case BCM6348_CPU_ID:
+ bcm63xx_regs_base = bcm6348_regs_base;
+ bcm63xx_irqs = bcm6348_irqs;
+ break;
+ case BCM6358_CPU_ID:
+ bcm63xx_regs_base = bcm6358_regs_base;
+ bcm63xx_irqs = bcm6358_irqs;
+ break;
+ case BCM6362_CPU_ID:
+ bcm63xx_regs_base = bcm6362_regs_base;
+ bcm63xx_irqs = bcm6362_irqs;
+ break;
+ case BCM6368_CPU_ID:
+ bcm63xx_regs_base = bcm6368_regs_base;
+ bcm63xx_irqs = bcm6368_irqs;
+ break;
+ default:
+ panic("unsupported broadcom CPU %x", bcm63xx_cpu_id);
+ break;
+ }
+
+ bcm63xx_cpu_freq = detect_cpu_clock();
+ bcm63xx_memory_size = detect_memory_size();
+
+ pr_info("Detected Broadcom 0x%04x CPU revision %02x\n",
+ bcm63xx_cpu_id, bcm63xx_cpu_rev);
+ pr_info("CPU frequency is %u MHz\n",
+ bcm63xx_cpu_freq / 1000000);
+ pr_info("%uMB of RAM installed\n",
+ bcm63xx_memory_size >> 20);
+}
diff --git a/arch/mips/bcm63xx/cs.c b/arch/mips/bcm63xx/cs.c
new file mode 100644
index 000000000..29205badc
--- /dev/null
+++ b/arch/mips/bcm63xx/cs.c
@@ -0,0 +1,145 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <linux/log2.h>
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_regs.h>
+#include <bcm63xx_cs.h>
+
+static DEFINE_SPINLOCK(bcm63xx_cs_lock);
+
+/*
+ * check if given chip select exists
+ */
+static int is_valid_cs(unsigned int cs)
+{
+ if (cs > 6)
+ return 0;
+ return 1;
+}
+
+/*
+ * Configure chipselect base address and size (bytes).
+ * Size must be a power of two between 8k and 256M.
+ */
+int bcm63xx_set_cs_base(unsigned int cs, u32 base, unsigned int size)
+{
+ unsigned long flags;
+ u32 val;
+
+ if (!is_valid_cs(cs))
+ return -EINVAL;
+
+ /* sanity check on size */
+ if (size != roundup_pow_of_two(size))
+ return -EINVAL;
+
+ if (size < 8 * 1024 || size > 256 * 1024 * 1024)
+ return -EINVAL;
+
+ val = (base & MPI_CSBASE_BASE_MASK);
+ /* 8k => 0 - 256M => 15 */
+ val |= (ilog2(size) - ilog2(8 * 1024)) << MPI_CSBASE_SIZE_SHIFT;
+
+ spin_lock_irqsave(&bcm63xx_cs_lock, flags);
+ bcm_mpi_writel(val, MPI_CSBASE_REG(cs));
+ spin_unlock_irqrestore(&bcm63xx_cs_lock, flags);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(bcm63xx_set_cs_base);
+
+/*
+ * configure chipselect timing (ns)
+ */
+int bcm63xx_set_cs_timing(unsigned int cs, unsigned int wait,
+ unsigned int setup, unsigned int hold)
+{
+ unsigned long flags;
+ u32 val;
+
+ if (!is_valid_cs(cs))
+ return -EINVAL;
+
+ spin_lock_irqsave(&bcm63xx_cs_lock, flags);
+ val = bcm_mpi_readl(MPI_CSCTL_REG(cs));
+ val &= ~(MPI_CSCTL_WAIT_MASK);
+ val &= ~(MPI_CSCTL_SETUP_MASK);
+ val &= ~(MPI_CSCTL_HOLD_MASK);
+ val |= wait << MPI_CSCTL_WAIT_SHIFT;
+ val |= setup << MPI_CSCTL_SETUP_SHIFT;
+ val |= hold << MPI_CSCTL_HOLD_SHIFT;
+ bcm_mpi_writel(val, MPI_CSCTL_REG(cs));
+ spin_unlock_irqrestore(&bcm63xx_cs_lock, flags);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(bcm63xx_set_cs_timing);
+
+/*
+ * configure other chipselect parameter (data bus size, ...)
+ */
+int bcm63xx_set_cs_param(unsigned int cs, u32 params)
+{
+ unsigned long flags;
+ u32 val;
+
+ if (!is_valid_cs(cs))
+ return -EINVAL;
+
+ /* none of this fields apply to pcmcia */
+ if (cs == MPI_CS_PCMCIA_COMMON ||
+ cs == MPI_CS_PCMCIA_ATTR ||
+ cs == MPI_CS_PCMCIA_IO)
+ return -EINVAL;
+
+ spin_lock_irqsave(&bcm63xx_cs_lock, flags);
+ val = bcm_mpi_readl(MPI_CSCTL_REG(cs));
+ val &= ~(MPI_CSCTL_DATA16_MASK);
+ val &= ~(MPI_CSCTL_SYNCMODE_MASK);
+ val &= ~(MPI_CSCTL_TSIZE_MASK);
+ val &= ~(MPI_CSCTL_ENDIANSWAP_MASK);
+ val |= params;
+ bcm_mpi_writel(val, MPI_CSCTL_REG(cs));
+ spin_unlock_irqrestore(&bcm63xx_cs_lock, flags);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(bcm63xx_set_cs_param);
+
+/*
+ * set cs status (enable/disable)
+ */
+int bcm63xx_set_cs_status(unsigned int cs, int enable)
+{
+ unsigned long flags;
+ u32 val;
+
+ if (!is_valid_cs(cs))
+ return -EINVAL;
+
+ spin_lock_irqsave(&bcm63xx_cs_lock, flags);
+ val = bcm_mpi_readl(MPI_CSCTL_REG(cs));
+ if (enable)
+ val |= MPI_CSCTL_ENABLE_MASK;
+ else
+ val &= ~MPI_CSCTL_ENABLE_MASK;
+ bcm_mpi_writel(val, MPI_CSCTL_REG(cs));
+ spin_unlock_irqrestore(&bcm63xx_cs_lock, flags);
+ return 0;
+}
+
+EXPORT_SYMBOL(bcm63xx_set_cs_status);
diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c
new file mode 100644
index 000000000..8e73d65f3
--- /dev/null
+++ b/arch/mips/bcm63xx/dev-enet.c
@@ -0,0 +1,327 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/export.h>
+#include <bcm63xx_dev_enet.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_regs.h>
+
+static const unsigned long bcm6348_regs_enetdmac[] = {
+ [ENETDMAC_CHANCFG] = ENETDMAC_CHANCFG_REG,
+ [ENETDMAC_IR] = ENETDMAC_IR_REG,
+ [ENETDMAC_IRMASK] = ENETDMAC_IRMASK_REG,
+ [ENETDMAC_MAXBURST] = ENETDMAC_MAXBURST_REG,
+};
+
+static const unsigned long bcm6345_regs_enetdmac[] = {
+ [ENETDMAC_CHANCFG] = ENETDMA_6345_CHANCFG_REG,
+ [ENETDMAC_IR] = ENETDMA_6345_IR_REG,
+ [ENETDMAC_IRMASK] = ENETDMA_6345_IRMASK_REG,
+ [ENETDMAC_MAXBURST] = ENETDMA_6345_MAXBURST_REG,
+ [ENETDMAC_BUFALLOC] = ENETDMA_6345_BUFALLOC_REG,
+ [ENETDMAC_RSTART] = ENETDMA_6345_RSTART_REG,
+ [ENETDMAC_FC] = ENETDMA_6345_FC_REG,
+ [ENETDMAC_LEN] = ENETDMA_6345_LEN_REG,
+};
+
+const unsigned long *bcm63xx_regs_enetdmac;
+EXPORT_SYMBOL(bcm63xx_regs_enetdmac);
+
+static __init void bcm63xx_enetdmac_regs_init(void)
+{
+ if (BCMCPU_IS_6345())
+ bcm63xx_regs_enetdmac = bcm6345_regs_enetdmac;
+ else
+ bcm63xx_regs_enetdmac = bcm6348_regs_enetdmac;
+}
+
+static struct resource shared_res[] = {
+ {
+ .start = -1, /* filled at runtime */
+ .end = -1, /* filled at runtime */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = -1, /* filled at runtime */
+ .end = -1, /* filled at runtime */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = -1, /* filled at runtime */
+ .end = -1, /* filled at runtime */
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device bcm63xx_enet_shared_device = {
+ .name = "bcm63xx_enet_shared",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(shared_res),
+ .resource = shared_res,
+};
+
+static int shared_device_registered;
+
+static u64 enet_dmamask = DMA_BIT_MASK(32);
+
+static struct resource enet0_res[] = {
+ {
+ .start = -1, /* filled at runtime */
+ .end = -1, /* filled at runtime */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = -1, /* filled at runtime */
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = -1, /* filled at runtime */
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = -1, /* filled at runtime */
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct bcm63xx_enet_platform_data enet0_pd;
+
+static struct platform_device bcm63xx_enet0_device = {
+ .name = "bcm63xx_enet",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(enet0_res),
+ .resource = enet0_res,
+ .dev = {
+ .platform_data = &enet0_pd,
+ .dma_mask = &enet_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+static struct resource enet1_res[] = {
+ {
+ .start = -1, /* filled at runtime */
+ .end = -1, /* filled at runtime */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = -1, /* filled at runtime */
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = -1, /* filled at runtime */
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = -1, /* filled at runtime */
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct bcm63xx_enet_platform_data enet1_pd;
+
+static struct platform_device bcm63xx_enet1_device = {
+ .name = "bcm63xx_enet",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(enet1_res),
+ .resource = enet1_res,
+ .dev = {
+ .platform_data = &enet1_pd,
+ .dma_mask = &enet_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+static struct resource enetsw_res[] = {
+ {
+ /* start & end filled at runtime */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* start filled at runtime */
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* start filled at runtime */
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct bcm63xx_enetsw_platform_data enetsw_pd;
+
+static struct platform_device bcm63xx_enetsw_device = {
+ .name = "bcm63xx_enetsw",
+ .num_resources = ARRAY_SIZE(enetsw_res),
+ .resource = enetsw_res,
+ .dev = {
+ .platform_data = &enetsw_pd,
+ .dma_mask = &enet_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+static int __init register_shared(void)
+{
+ int ret, chan_count;
+
+ if (shared_device_registered)
+ return 0;
+
+ bcm63xx_enetdmac_regs_init();
+
+ shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
+ shared_res[0].end = shared_res[0].start;
+ if (BCMCPU_IS_6345())
+ shared_res[0].end += (RSET_6345_ENETDMA_SIZE) - 1;
+ else
+ shared_res[0].end += (RSET_ENETDMA_SIZE) - 1;
+
+ if (BCMCPU_IS_6328() || BCMCPU_IS_6362() || BCMCPU_IS_6368())
+ chan_count = 32;
+ else if (BCMCPU_IS_6345())
+ chan_count = 8;
+ else
+ chan_count = 16;
+
+ shared_res[1].start = bcm63xx_regset_address(RSET_ENETDMAC);
+ shared_res[1].end = shared_res[1].start;
+ shared_res[1].end += RSET_ENETDMAC_SIZE(chan_count) - 1;
+
+ shared_res[2].start = bcm63xx_regset_address(RSET_ENETDMAS);
+ shared_res[2].end = shared_res[2].start;
+ shared_res[2].end += RSET_ENETDMAS_SIZE(chan_count) - 1;
+
+ ret = platform_device_register(&bcm63xx_enet_shared_device);
+ if (ret)
+ return ret;
+ shared_device_registered = 1;
+
+ return 0;
+}
+
+int __init bcm63xx_enet_register(int unit,
+ const struct bcm63xx_enet_platform_data *pd)
+{
+ struct platform_device *pdev;
+ struct bcm63xx_enet_platform_data *dpd;
+ int ret;
+
+ if (unit > 1)
+ return -ENODEV;
+
+ if (unit == 1 && (BCMCPU_IS_6338() || BCMCPU_IS_6345()))
+ return -ENODEV;
+
+ ret = register_shared();
+ if (ret)
+ return ret;
+
+ if (unit == 0) {
+ enet0_res[0].start = bcm63xx_regset_address(RSET_ENET0);
+ enet0_res[0].end = enet0_res[0].start;
+ enet0_res[0].end += RSET_ENET_SIZE - 1;
+ enet0_res[1].start = bcm63xx_get_irq_number(IRQ_ENET0);
+ enet0_res[2].start = bcm63xx_get_irq_number(IRQ_ENET0_RXDMA);
+ enet0_res[3].start = bcm63xx_get_irq_number(IRQ_ENET0_TXDMA);
+ pdev = &bcm63xx_enet0_device;
+ } else {
+ enet1_res[0].start = bcm63xx_regset_address(RSET_ENET1);
+ enet1_res[0].end = enet1_res[0].start;
+ enet1_res[0].end += RSET_ENET_SIZE - 1;
+ enet1_res[1].start = bcm63xx_get_irq_number(IRQ_ENET1);
+ enet1_res[2].start = bcm63xx_get_irq_number(IRQ_ENET1_RXDMA);
+ enet1_res[3].start = bcm63xx_get_irq_number(IRQ_ENET1_TXDMA);
+ pdev = &bcm63xx_enet1_device;
+ }
+
+ /* copy given platform data */
+ dpd = pdev->dev.platform_data;
+ memcpy(dpd, pd, sizeof(*pd));
+
+ /* adjust them in case internal phy is used */
+ if (dpd->use_internal_phy) {
+
+ /* internal phy only exists for enet0 */
+ if (unit == 1)
+ return -ENODEV;
+
+ dpd->phy_id = 1;
+ dpd->has_phy_interrupt = 1;
+ dpd->phy_interrupt = bcm63xx_get_irq_number(IRQ_ENET_PHY);
+ }
+
+ dpd->dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK;
+ dpd->dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK;
+ if (BCMCPU_IS_6345()) {
+ dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_CHAINING_MASK;
+ dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_WRAP_EN_MASK;
+ dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_FLOWC_EN_MASK;
+ dpd->dma_chan_int_mask |= ENETDMA_IR_BUFDONE_MASK;
+ dpd->dma_chan_int_mask |= ENETDMA_IR_NOTOWNER_MASK;
+ dpd->dma_chan_width = ENETDMA_6345_CHAN_WIDTH;
+ dpd->dma_desc_shift = ENETDMA_6345_DESC_SHIFT;
+ } else {
+ dpd->dma_has_sram = true;
+ dpd->dma_chan_width = ENETDMA_CHAN_WIDTH;
+ }
+
+ if (unit == 0) {
+ dpd->rx_chan = 0;
+ dpd->tx_chan = 1;
+ } else {
+ dpd->rx_chan = 2;
+ dpd->tx_chan = 3;
+ }
+
+ ret = platform_device_register(pdev);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+int __init
+bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd)
+{
+ int ret;
+
+ if (!BCMCPU_IS_6328() && !BCMCPU_IS_6362() && !BCMCPU_IS_6368())
+ return -ENODEV;
+
+ ret = register_shared();
+ if (ret)
+ return ret;
+
+ enetsw_res[0].start = bcm63xx_regset_address(RSET_ENETSW);
+ enetsw_res[0].end = enetsw_res[0].start;
+ enetsw_res[0].end += RSET_ENETSW_SIZE - 1;
+ enetsw_res[1].start = bcm63xx_get_irq_number(IRQ_ENETSW_RXDMA0);
+ enetsw_res[2].start = bcm63xx_get_irq_number(IRQ_ENETSW_TXDMA0);
+ if (!enetsw_res[2].start)
+ enetsw_res[2].start = -1;
+
+ memcpy(bcm63xx_enetsw_device.dev.platform_data, pd, sizeof(*pd));
+
+ if (BCMCPU_IS_6328())
+ enetsw_pd.num_ports = ENETSW_PORTS_6328;
+ else if (BCMCPU_IS_6362() || BCMCPU_IS_6368())
+ enetsw_pd.num_ports = ENETSW_PORTS_6368;
+
+ enetsw_pd.dma_has_sram = true;
+ enetsw_pd.dma_chan_width = ENETDMA_CHAN_WIDTH;
+ enetsw_pd.dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK;
+ enetsw_pd.dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK;
+
+ ret = platform_device_register(&bcm63xx_enetsw_device);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/arch/mips/bcm63xx/dev-flash.c b/arch/mips/bcm63xx/dev-flash.c
new file mode 100644
index 000000000..f9cc015d3
--- /dev/null
+++ b/arch/mips/bcm63xx/dev-flash.c
@@ -0,0 +1,131 @@
+/*
+ * Broadcom BCM63xx flash registration
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2012 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_dev_flash.h>
+#include <bcm63xx_regs.h>
+#include <bcm63xx_io.h>
+
+static struct mtd_partition mtd_partitions[] = {
+ {
+ .name = "cfe",
+ .offset = 0x0,
+ .size = 0x40000,
+ }
+};
+
+static const char *bcm63xx_part_types[] = { "bcm63xxpart", NULL };
+
+static struct physmap_flash_data flash_data = {
+ .width = 2,
+ .parts = mtd_partitions,
+ .part_probe_types = bcm63xx_part_types,
+};
+
+static struct resource mtd_resources[] = {
+ {
+ .start = 0, /* filled at runtime */
+ .end = 0, /* filled at runtime */
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct platform_device mtd_dev = {
+ .name = "physmap-flash",
+ .resource = mtd_resources,
+ .num_resources = ARRAY_SIZE(mtd_resources),
+ .dev = {
+ .platform_data = &flash_data,
+ },
+};
+
+static int __init bcm63xx_detect_flash_type(void)
+{
+ u32 val;
+
+ switch (bcm63xx_get_cpu_id()) {
+ case BCM6328_CPU_ID:
+ val = bcm_misc_readl(MISC_STRAPBUS_6328_REG);
+ if (val & STRAPBUS_6328_BOOT_SEL_SERIAL)
+ return BCM63XX_FLASH_TYPE_SERIAL;
+ else
+ return BCM63XX_FLASH_TYPE_NAND;
+ case BCM6338_CPU_ID:
+ case BCM6345_CPU_ID:
+ case BCM6348_CPU_ID:
+ /* no way to auto detect so assume parallel */
+ return BCM63XX_FLASH_TYPE_PARALLEL;
+ case BCM3368_CPU_ID:
+ case BCM6358_CPU_ID:
+ val = bcm_gpio_readl(GPIO_STRAPBUS_REG);
+ if (val & STRAPBUS_6358_BOOT_SEL_PARALLEL)
+ return BCM63XX_FLASH_TYPE_PARALLEL;
+ else
+ return BCM63XX_FLASH_TYPE_SERIAL;
+ case BCM6362_CPU_ID:
+ val = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
+ if (val & STRAPBUS_6362_BOOT_SEL_SERIAL)
+ return BCM63XX_FLASH_TYPE_SERIAL;
+ else
+ return BCM63XX_FLASH_TYPE_NAND;
+ case BCM6368_CPU_ID:
+ val = bcm_gpio_readl(GPIO_STRAPBUS_REG);
+ switch (val & STRAPBUS_6368_BOOT_SEL_MASK) {
+ case STRAPBUS_6368_BOOT_SEL_NAND:
+ return BCM63XX_FLASH_TYPE_NAND;
+ case STRAPBUS_6368_BOOT_SEL_SERIAL:
+ return BCM63XX_FLASH_TYPE_SERIAL;
+ case STRAPBUS_6368_BOOT_SEL_PARALLEL:
+ return BCM63XX_FLASH_TYPE_PARALLEL;
+ }
+ fallthrough;
+ default:
+ return -EINVAL;
+ }
+}
+
+int __init bcm63xx_flash_register(void)
+{
+ int flash_type;
+ u32 val;
+
+ flash_type = bcm63xx_detect_flash_type();
+
+ switch (flash_type) {
+ case BCM63XX_FLASH_TYPE_PARALLEL:
+ /* read base address of boot chip select (0) */
+ val = bcm_mpi_readl(MPI_CSBASE_REG(0));
+ val &= MPI_CSBASE_BASE_MASK;
+
+ mtd_resources[0].start = val;
+ mtd_resources[0].end = 0x1FFFFFFF;
+
+ return platform_device_register(&mtd_dev);
+ case BCM63XX_FLASH_TYPE_SERIAL:
+ pr_warn("unsupported serial flash detected\n");
+ return -ENODEV;
+ case BCM63XX_FLASH_TYPE_NAND:
+ pr_warn("unsupported NAND flash detected\n");
+ return -ENODEV;
+ default:
+ pr_err("flash detection failed for BCM%x: %d\n",
+ bcm63xx_get_cpu_id(), flash_type);
+ return -ENODEV;
+ }
+}
diff --git a/arch/mips/bcm63xx/dev-hsspi.c b/arch/mips/bcm63xx/dev-hsspi.c
new file mode 100644
index 000000000..696abc48e
--- /dev/null
+++ b/arch/mips/bcm63xx/dev-hsspi.c
@@ -0,0 +1,47 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_dev_hsspi.h>
+#include <bcm63xx_regs.h>
+
+static struct resource spi_resources[] = {
+ {
+ .start = -1, /* filled at runtime */
+ .end = -1, /* filled at runtime */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = -1, /* filled at runtime */
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device bcm63xx_hsspi_device = {
+ .name = "bcm63xx-hsspi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(spi_resources),
+ .resource = spi_resources,
+};
+
+int __init bcm63xx_hsspi_register(void)
+{
+ if (!BCMCPU_IS_6328() && !BCMCPU_IS_6362())
+ return -ENODEV;
+
+ spi_resources[0].start = bcm63xx_regset_address(RSET_HSSPI);
+ spi_resources[0].end = spi_resources[0].start;
+ spi_resources[0].end += RSET_HSSPI_SIZE - 1;
+ spi_resources[1].start = bcm63xx_get_irq_number(IRQ_HSSPI);
+
+ return platform_device_register(&bcm63xx_hsspi_device);
+}
diff --git a/arch/mips/bcm63xx/dev-pcmcia.c b/arch/mips/bcm63xx/dev-pcmcia.c
new file mode 100644
index 000000000..9496cd236
--- /dev/null
+++ b/arch/mips/bcm63xx/dev-pcmcia.c
@@ -0,0 +1,144 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/bootinfo.h>
+#include <linux/platform_device.h>
+#include <bcm63xx_cs.h>
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_dev_pcmcia.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_regs.h>
+
+static struct resource pcmcia_resources[] = {
+ /* pcmcia registers */
+ {
+ /* start & end filled at runtime */
+ .flags = IORESOURCE_MEM,
+ },
+
+ /* pcmcia memory zone resources */
+ {
+ .start = BCM_PCMCIA_COMMON_BASE_PA,
+ .end = BCM_PCMCIA_COMMON_END_PA,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = BCM_PCMCIA_ATTR_BASE_PA,
+ .end = BCM_PCMCIA_ATTR_END_PA,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = BCM_PCMCIA_IO_BASE_PA,
+ .end = BCM_PCMCIA_IO_END_PA,
+ .flags = IORESOURCE_MEM,
+ },
+
+ /* PCMCIA irq */
+ {
+ /* start filled at runtime */
+ .flags = IORESOURCE_IRQ,
+ },
+
+ /* declare PCMCIA IO resource also */
+ {
+ .start = BCM_PCMCIA_IO_BASE_PA,
+ .end = BCM_PCMCIA_IO_END_PA,
+ .flags = IORESOURCE_IO,
+ },
+};
+
+static struct bcm63xx_pcmcia_platform_data pd;
+
+static struct platform_device bcm63xx_pcmcia_device = {
+ .name = "bcm63xx_pcmcia",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(pcmcia_resources),
+ .resource = pcmcia_resources,
+ .dev = {
+ .platform_data = &pd,
+ },
+};
+
+static int __init config_pcmcia_cs(unsigned int cs,
+ u32 base, unsigned int size)
+{
+ int ret;
+
+ ret = bcm63xx_set_cs_status(cs, 0);
+ if (!ret)
+ ret = bcm63xx_set_cs_base(cs, base, size);
+ if (!ret)
+ ret = bcm63xx_set_cs_status(cs, 1);
+ return ret;
+}
+
+static const struct {
+ unsigned int cs;
+ unsigned int base;
+ unsigned int size;
+} pcmcia_cs[3] __initconst = {
+ {
+ .cs = MPI_CS_PCMCIA_COMMON,
+ .base = BCM_PCMCIA_COMMON_BASE_PA,
+ .size = BCM_PCMCIA_COMMON_SIZE
+ },
+ {
+ .cs = MPI_CS_PCMCIA_ATTR,
+ .base = BCM_PCMCIA_ATTR_BASE_PA,
+ .size = BCM_PCMCIA_ATTR_SIZE
+ },
+ {
+ .cs = MPI_CS_PCMCIA_IO,
+ .base = BCM_PCMCIA_IO_BASE_PA,
+ .size = BCM_PCMCIA_IO_SIZE
+ },
+};
+
+int __init bcm63xx_pcmcia_register(void)
+{
+ int ret, i;
+
+ if (!BCMCPU_IS_6348() && !BCMCPU_IS_6358())
+ return 0;
+
+ /* use correct pcmcia ready gpio depending on processor */
+ switch (bcm63xx_get_cpu_id()) {
+ case BCM6348_CPU_ID:
+ pd.ready_gpio = 22;
+ break;
+
+ case BCM6358_CPU_ID:
+ pd.ready_gpio = 18;
+ break;
+
+ default:
+ return -ENODEV;
+ }
+
+ pcmcia_resources[0].start = bcm63xx_regset_address(RSET_PCMCIA);
+ pcmcia_resources[0].end = pcmcia_resources[0].start +
+ RSET_PCMCIA_SIZE - 1;
+ pcmcia_resources[4].start = bcm63xx_get_irq_number(IRQ_PCMCIA);
+
+ /* configure pcmcia chip selects */
+ for (i = 0; i < 3; i++) {
+ ret = config_pcmcia_cs(pcmcia_cs[i].cs,
+ pcmcia_cs[i].base,
+ pcmcia_cs[i].size);
+ if (ret)
+ goto out_err;
+ }
+
+ return platform_device_register(&bcm63xx_pcmcia_device);
+
+out_err:
+ pr_err("unable to set pcmcia chip select\n");
+ return ret;
+}
diff --git a/arch/mips/bcm63xx/dev-rng.c b/arch/mips/bcm63xx/dev-rng.c
new file mode 100644
index 000000000..d277b4dc6
--- /dev/null
+++ b/arch/mips/bcm63xx/dev-rng.c
@@ -0,0 +1,40 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 Florian Fainelli <florian@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <bcm63xx_cpu.h>
+
+static struct resource rng_resources[] = {
+ {
+ .start = -1, /* filled at runtime */
+ .end = -1, /* filled at runtime */
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device bcm63xx_rng_device = {
+ .name = "bcm63xx-rng",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rng_resources),
+ .resource = rng_resources,
+};
+
+int __init bcm63xx_rng_register(void)
+{
+ if (!BCMCPU_IS_6368())
+ return -ENODEV;
+
+ rng_resources[0].start = bcm63xx_regset_address(RSET_RNG);
+ rng_resources[0].end = rng_resources[0].start;
+ rng_resources[0].end += RSET_RNG_SIZE - 1;
+
+ return platform_device_register(&bcm63xx_rng_device);
+}
+arch_initcall(bcm63xx_rng_register);
diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c
new file mode 100644
index 000000000..232385441
--- /dev/null
+++ b/arch/mips/bcm63xx/dev-spi.c
@@ -0,0 +1,60 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009-2011 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2010 Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_dev_spi.h>
+#include <bcm63xx_regs.h>
+
+static struct resource spi_resources[] = {
+ {
+ .start = -1, /* filled at runtime */
+ .end = -1, /* filled at runtime */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = -1, /* filled at runtime */
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device bcm63xx_spi_device = {
+ .id = -1,
+ .num_resources = ARRAY_SIZE(spi_resources),
+ .resource = spi_resources,
+};
+
+int __init bcm63xx_spi_register(void)
+{
+ if (BCMCPU_IS_6328() || BCMCPU_IS_6345())
+ return -ENODEV;
+
+ spi_resources[0].start = bcm63xx_regset_address(RSET_SPI);
+ spi_resources[0].end = spi_resources[0].start;
+ spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI);
+
+ if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
+ bcm63xx_spi_device.name = "bcm6348-spi",
+ spi_resources[0].end += BCM_6348_RSET_SPI_SIZE - 1;
+ }
+
+ if (BCMCPU_IS_3368() || BCMCPU_IS_6358() || BCMCPU_IS_6362() ||
+ BCMCPU_IS_6368()) {
+ bcm63xx_spi_device.name = "bcm6358-spi",
+ spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
+ }
+
+ return platform_device_register(&bcm63xx_spi_device);
+}
diff --git a/arch/mips/bcm63xx/dev-uart.c b/arch/mips/bcm63xx/dev-uart.c
new file mode 100644
index 000000000..3bc7f3bfc
--- /dev/null
+++ b/arch/mips/bcm63xx/dev-uart.c
@@ -0,0 +1,76 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <bcm63xx_cpu.h>
+
+static struct resource uart0_resources[] = {
+ {
+ /* start & end filled at runtime */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* start filled at runtime */
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource uart1_resources[] = {
+ {
+ /* start & end filled at runtime */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* start filled at runtime */
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device bcm63xx_uart_devices[] = {
+ {
+ .name = "bcm63xx_uart",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(uart0_resources),
+ .resource = uart0_resources,
+ },
+
+ {
+ .name = "bcm63xx_uart",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(uart1_resources),
+ .resource = uart1_resources,
+ }
+};
+
+int __init bcm63xx_uart_register(unsigned int id)
+{
+ if (id >= ARRAY_SIZE(bcm63xx_uart_devices))
+ return -ENODEV;
+
+ if (id == 1 && (!BCMCPU_IS_3368() && !BCMCPU_IS_6358() &&
+ !BCMCPU_IS_6368()))
+ return -ENODEV;
+
+ if (id == 0) {
+ uart0_resources[0].start = bcm63xx_regset_address(RSET_UART0);
+ uart0_resources[0].end = uart0_resources[0].start +
+ RSET_UART_SIZE - 1;
+ uart0_resources[1].start = bcm63xx_get_irq_number(IRQ_UART0);
+ }
+
+ if (id == 1) {
+ uart1_resources[0].start = bcm63xx_regset_address(RSET_UART1);
+ uart1_resources[0].end = uart1_resources[0].start +
+ RSET_UART_SIZE - 1;
+ uart1_resources[1].start = bcm63xx_get_irq_number(IRQ_UART1);
+ }
+
+ return platform_device_register(&bcm63xx_uart_devices[id]);
+}
diff --git a/arch/mips/bcm63xx/dev-usb-usbd.c b/arch/mips/bcm63xx/dev-usb-usbd.c
new file mode 100644
index 000000000..508bd9d8d
--- /dev/null
+++ b/arch/mips/bcm63xx/dev-usb-usbd.c
@@ -0,0 +1,65 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
+ * Copyright (C) 2012 Broadcom Corporation
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_dev_usb_usbd.h>
+
+#define NUM_MMIO 2
+#define NUM_IRQ 7
+
+static struct resource usbd_resources[NUM_MMIO + NUM_IRQ];
+
+static u64 usbd_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device bcm63xx_usbd_device = {
+ .name = "bcm63xx_udc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(usbd_resources),
+ .resource = usbd_resources,
+ .dev = {
+ .dma_mask = &usbd_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+int __init bcm63xx_usbd_register(const struct bcm63xx_usbd_platform_data *pd)
+{
+ const int irq_list[NUM_IRQ] = { IRQ_USBD,
+ IRQ_USBD_RXDMA0, IRQ_USBD_TXDMA0,
+ IRQ_USBD_RXDMA1, IRQ_USBD_TXDMA1,
+ IRQ_USBD_RXDMA2, IRQ_USBD_TXDMA2 };
+ int i;
+
+ if (!BCMCPU_IS_6328() && !BCMCPU_IS_6368())
+ return 0;
+
+ usbd_resources[0].start = bcm63xx_regset_address(RSET_USBD);
+ usbd_resources[0].end = usbd_resources[0].start + RSET_USBD_SIZE - 1;
+ usbd_resources[0].flags = IORESOURCE_MEM;
+
+ usbd_resources[1].start = bcm63xx_regset_address(RSET_USBDMA);
+ usbd_resources[1].end = usbd_resources[1].start + RSET_USBDMA_SIZE - 1;
+ usbd_resources[1].flags = IORESOURCE_MEM;
+
+ for (i = 0; i < NUM_IRQ; i++) {
+ struct resource *r = &usbd_resources[NUM_MMIO + i];
+
+ r->start = r->end = bcm63xx_get_irq_number(irq_list[i]);
+ r->flags = IORESOURCE_IRQ;
+ }
+
+ platform_device_add_data(&bcm63xx_usbd_device, pd, sizeof(*pd));
+
+ return platform_device_register(&bcm63xx_usbd_device);
+}
diff --git a/arch/mips/bcm63xx/dev-wdt.c b/arch/mips/bcm63xx/dev-wdt.c
new file mode 100644
index 000000000..2a2346a99
--- /dev/null
+++ b/arch/mips/bcm63xx/dev-wdt.c
@@ -0,0 +1,37 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <bcm63xx_cpu.h>
+
+static struct resource wdt_resources[] = {
+ {
+ .start = -1, /* filled at runtime */
+ .end = -1, /* filled at runtime */
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device bcm63xx_wdt_device = {
+ .name = "bcm63xx-wdt",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(wdt_resources),
+ .resource = wdt_resources,
+};
+
+int __init bcm63xx_wdt_register(void)
+{
+ wdt_resources[0].start = bcm63xx_regset_address(RSET_WDT);
+ wdt_resources[0].end = wdt_resources[0].start;
+ wdt_resources[0].end += RSET_WDT_SIZE - 1;
+
+ return platform_device_register(&bcm63xx_wdt_device);
+}
+arch_initcall(bcm63xx_wdt_register);
diff --git a/arch/mips/bcm63xx/early_printk.c b/arch/mips/bcm63xx/early_printk.c
new file mode 100644
index 000000000..9e9ec27c2
--- /dev/null
+++ b/arch/mips/bcm63xx/early_printk.c
@@ -0,0 +1,30 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ */
+
+#include <bcm63xx_io.h>
+#include <linux/serial_bcm63xx.h>
+#include <asm/setup.h>
+
+static void wait_xfered(void)
+{
+ unsigned int val;
+
+ /* wait for any previous char to be transmitted */
+ do {
+ val = bcm_uart0_readl(UART_IR_REG);
+ if (val & UART_IR_STAT(UART_IR_TXEMPTY))
+ break;
+ } while (1);
+}
+
+void prom_putchar(char c)
+{
+ wait_xfered();
+ bcm_uart0_writel(c, UART_FIFO_REG);
+ wait_xfered();
+}
diff --git a/arch/mips/bcm63xx/gpio.c b/arch/mips/bcm63xx/gpio.c
new file mode 100644
index 000000000..16f353ac3
--- /dev/null
+++ b/arch/mips/bcm63xx/gpio.c
@@ -0,0 +1,151 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ * Copyright (C) 2008-2011 Florian Fainelli <florian@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/driver.h>
+
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_gpio.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_regs.h>
+
+static u32 gpio_out_low_reg;
+
+static void bcm63xx_gpio_out_low_reg_init(void)
+{
+ switch (bcm63xx_get_cpu_id()) {
+ case BCM6345_CPU_ID:
+ gpio_out_low_reg = GPIO_DATA_LO_REG_6345;
+ break;
+ default:
+ gpio_out_low_reg = GPIO_DATA_LO_REG;
+ break;
+ }
+}
+
+static DEFINE_SPINLOCK(bcm63xx_gpio_lock);
+static u32 gpio_out_low, gpio_out_high;
+
+static void bcm63xx_gpio_set(struct gpio_chip *chip,
+ unsigned gpio, int val)
+{
+ u32 reg;
+ u32 mask;
+ u32 *v;
+ unsigned long flags;
+
+ if (gpio >= chip->ngpio)
+ BUG();
+
+ if (gpio < 32) {
+ reg = gpio_out_low_reg;
+ mask = 1 << gpio;
+ v = &gpio_out_low;
+ } else {
+ reg = GPIO_DATA_HI_REG;
+ mask = 1 << (gpio - 32);
+ v = &gpio_out_high;
+ }
+
+ spin_lock_irqsave(&bcm63xx_gpio_lock, flags);
+ if (val)
+ *v |= mask;
+ else
+ *v &= ~mask;
+ bcm_gpio_writel(*v, reg);
+ spin_unlock_irqrestore(&bcm63xx_gpio_lock, flags);
+}
+
+static int bcm63xx_gpio_get(struct gpio_chip *chip, unsigned gpio)
+{
+ u32 reg;
+ u32 mask;
+
+ if (gpio >= chip->ngpio)
+ BUG();
+
+ if (gpio < 32) {
+ reg = gpio_out_low_reg;
+ mask = 1 << gpio;
+ } else {
+ reg = GPIO_DATA_HI_REG;
+ mask = 1 << (gpio - 32);
+ }
+
+ return !!(bcm_gpio_readl(reg) & mask);
+}
+
+static int bcm63xx_gpio_set_direction(struct gpio_chip *chip,
+ unsigned gpio, int dir)
+{
+ u32 reg;
+ u32 mask;
+ u32 tmp;
+ unsigned long flags;
+
+ if (gpio >= chip->ngpio)
+ BUG();
+
+ if (gpio < 32) {
+ reg = GPIO_CTL_LO_REG;
+ mask = 1 << gpio;
+ } else {
+ reg = GPIO_CTL_HI_REG;
+ mask = 1 << (gpio - 32);
+ }
+
+ spin_lock_irqsave(&bcm63xx_gpio_lock, flags);
+ tmp = bcm_gpio_readl(reg);
+ if (dir == BCM63XX_GPIO_DIR_IN)
+ tmp &= ~mask;
+ else
+ tmp |= mask;
+ bcm_gpio_writel(tmp, reg);
+ spin_unlock_irqrestore(&bcm63xx_gpio_lock, flags);
+
+ return 0;
+}
+
+static int bcm63xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ return bcm63xx_gpio_set_direction(chip, gpio, BCM63XX_GPIO_DIR_IN);
+}
+
+static int bcm63xx_gpio_direction_output(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ bcm63xx_gpio_set(chip, gpio, value);
+ return bcm63xx_gpio_set_direction(chip, gpio, BCM63XX_GPIO_DIR_OUT);
+}
+
+
+static struct gpio_chip bcm63xx_gpio_chip = {
+ .label = "bcm63xx-gpio",
+ .direction_input = bcm63xx_gpio_direction_input,
+ .direction_output = bcm63xx_gpio_direction_output,
+ .get = bcm63xx_gpio_get,
+ .set = bcm63xx_gpio_set,
+ .base = 0,
+};
+
+int __init bcm63xx_gpio_init(void)
+{
+ bcm63xx_gpio_out_low_reg_init();
+
+ gpio_out_low = bcm_gpio_readl(gpio_out_low_reg);
+ if (!BCMCPU_IS_6345())
+ gpio_out_high = bcm_gpio_readl(GPIO_DATA_HI_REG);
+ bcm63xx_gpio_chip.ngpio = bcm63xx_gpio_count();
+ pr_info("registering %d GPIOs\n", bcm63xx_gpio_chip.ngpio);
+
+ return gpiochip_add_data(&bcm63xx_gpio_chip, NULL);
+}
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c
new file mode 100644
index 000000000..254801344
--- /dev/null
+++ b/arch/mips/bcm63xx/irq.c
@@ -0,0 +1,553 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_regs.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_irq.h>
+
+
+static DEFINE_SPINLOCK(ipic_lock);
+static DEFINE_SPINLOCK(epic_lock);
+
+static u32 irq_stat_addr[2];
+static u32 irq_mask_addr[2];
+static void (*dispatch_internal)(int cpu);
+static int is_ext_irq_cascaded;
+static unsigned int ext_irq_count;
+static unsigned int ext_irq_start, ext_irq_end;
+static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
+static void (*internal_irq_mask)(struct irq_data *d);
+static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m);
+
+
+static inline u32 get_ext_irq_perf_reg(int irq)
+{
+ if (irq < 4)
+ return ext_irq_cfg_reg1;
+ return ext_irq_cfg_reg2;
+}
+
+static inline void handle_internal(int intbit)
+{
+ if (is_ext_irq_cascaded &&
+ intbit >= ext_irq_start && intbit <= ext_irq_end)
+ do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
+ else
+ do_IRQ(intbit + IRQ_INTERNAL_BASE);
+}
+
+static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
+ const struct cpumask *m)
+{
+ bool enable = cpu_online(cpu);
+
+#ifdef CONFIG_SMP
+ if (m)
+ enable &= cpumask_test_cpu(cpu, m);
+ else if (irqd_affinity_was_set(d))
+ enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
+#endif
+ return enable;
+}
+
+/*
+ * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
+ * prioritize any interrupt relatively to another. the static counter
+ * will resume the loop where it ended the last time we left this
+ * function.
+ */
+
+#define BUILD_IPIC_INTERNAL(width) \
+void __dispatch_internal_##width(int cpu) \
+{ \
+ u32 pending[width / 32]; \
+ unsigned int src, tgt; \
+ bool irqs_pending = false; \
+ static unsigned int i[2]; \
+ unsigned int *next = &i[cpu]; \
+ unsigned long flags; \
+ \
+ /* read registers in reverse order */ \
+ spin_lock_irqsave(&ipic_lock, flags); \
+ for (src = 0, tgt = (width / 32); src < (width / 32); src++) { \
+ u32 val; \
+ \
+ val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
+ val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
+ pending[--tgt] = val; \
+ \
+ if (val) \
+ irqs_pending = true; \
+ } \
+ spin_unlock_irqrestore(&ipic_lock, flags); \
+ \
+ if (!irqs_pending) \
+ return; \
+ \
+ while (1) { \
+ unsigned int to_call = *next; \
+ \
+ *next = (*next + 1) & (width - 1); \
+ if (pending[to_call / 32] & (1 << (to_call & 0x1f))) { \
+ handle_internal(to_call); \
+ break; \
+ } \
+ } \
+} \
+ \
+static void __internal_irq_mask_##width(struct irq_data *d) \
+{ \
+ u32 val; \
+ unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
+ unsigned reg = (irq / 32) ^ (width/32 - 1); \
+ unsigned bit = irq & 0x1f; \
+ unsigned long flags; \
+ int cpu; \
+ \
+ spin_lock_irqsave(&ipic_lock, flags); \
+ for_each_present_cpu(cpu) { \
+ if (!irq_mask_addr[cpu]) \
+ break; \
+ \
+ val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
+ val &= ~(1 << bit); \
+ bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
+ } \
+ spin_unlock_irqrestore(&ipic_lock, flags); \
+} \
+ \
+static void __internal_irq_unmask_##width(struct irq_data *d, \
+ const struct cpumask *m) \
+{ \
+ u32 val; \
+ unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
+ unsigned reg = (irq / 32) ^ (width/32 - 1); \
+ unsigned bit = irq & 0x1f; \
+ unsigned long flags; \
+ int cpu; \
+ \
+ spin_lock_irqsave(&ipic_lock, flags); \
+ for_each_present_cpu(cpu) { \
+ if (!irq_mask_addr[cpu]) \
+ break; \
+ \
+ val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
+ if (enable_irq_for_cpu(cpu, d, m)) \
+ val |= (1 << bit); \
+ else \
+ val &= ~(1 << bit); \
+ bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
+ } \
+ spin_unlock_irqrestore(&ipic_lock, flags); \
+}
+
+BUILD_IPIC_INTERNAL(32);
+BUILD_IPIC_INTERNAL(64);
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ u32 cause;
+
+ do {
+ cause = read_c0_cause() & read_c0_status() & ST0_IM;
+
+ if (!cause)
+ break;
+
+ if (cause & CAUSEF_IP7)
+ do_IRQ(7);
+ if (cause & CAUSEF_IP0)
+ do_IRQ(0);
+ if (cause & CAUSEF_IP1)
+ do_IRQ(1);
+ if (cause & CAUSEF_IP2)
+ dispatch_internal(0);
+ if (is_ext_irq_cascaded) {
+ if (cause & CAUSEF_IP3)
+ dispatch_internal(1);
+ } else {
+ if (cause & CAUSEF_IP3)
+ do_IRQ(IRQ_EXT_0);
+ if (cause & CAUSEF_IP4)
+ do_IRQ(IRQ_EXT_1);
+ if (cause & CAUSEF_IP5)
+ do_IRQ(IRQ_EXT_2);
+ if (cause & CAUSEF_IP6)
+ do_IRQ(IRQ_EXT_3);
+ }
+ } while (1);
+}
+
+/*
+ * internal IRQs operations: only mask/unmask on PERF irq mask
+ * register.
+ */
+static void bcm63xx_internal_irq_mask(struct irq_data *d)
+{
+ internal_irq_mask(d);
+}
+
+static void bcm63xx_internal_irq_unmask(struct irq_data *d)
+{
+ internal_irq_unmask(d, NULL);
+}
+
+/*
+ * external IRQs operations: mask/unmask and clear on PERF external
+ * irq control register.
+ */
+static void bcm63xx_external_irq_mask(struct irq_data *d)
+{
+ unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
+ u32 reg, regaddr;
+ unsigned long flags;
+
+ regaddr = get_ext_irq_perf_reg(irq);
+ spin_lock_irqsave(&epic_lock, flags);
+ reg = bcm_perf_readl(regaddr);
+
+ if (BCMCPU_IS_6348())
+ reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
+ else
+ reg &= ~EXTIRQ_CFG_MASK(irq % 4);
+
+ bcm_perf_writel(reg, regaddr);
+ spin_unlock_irqrestore(&epic_lock, flags);
+
+ if (is_ext_irq_cascaded)
+ internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
+}
+
+static void bcm63xx_external_irq_unmask(struct irq_data *d)
+{
+ unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
+ u32 reg, regaddr;
+ unsigned long flags;
+
+ regaddr = get_ext_irq_perf_reg(irq);
+ spin_lock_irqsave(&epic_lock, flags);
+ reg = bcm_perf_readl(regaddr);
+
+ if (BCMCPU_IS_6348())
+ reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
+ else
+ reg |= EXTIRQ_CFG_MASK(irq % 4);
+
+ bcm_perf_writel(reg, regaddr);
+ spin_unlock_irqrestore(&epic_lock, flags);
+
+ if (is_ext_irq_cascaded)
+ internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start),
+ NULL);
+}
+
+static void bcm63xx_external_irq_clear(struct irq_data *d)
+{
+ unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
+ u32 reg, regaddr;
+ unsigned long flags;
+
+ regaddr = get_ext_irq_perf_reg(irq);
+ spin_lock_irqsave(&epic_lock, flags);
+ reg = bcm_perf_readl(regaddr);
+
+ if (BCMCPU_IS_6348())
+ reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
+ else
+ reg |= EXTIRQ_CFG_CLEAR(irq % 4);
+
+ bcm_perf_writel(reg, regaddr);
+ spin_unlock_irqrestore(&epic_lock, flags);
+}
+
+static int bcm63xx_external_irq_set_type(struct irq_data *d,
+ unsigned int flow_type)
+{
+ unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
+ u32 reg, regaddr;
+ int levelsense, sense, bothedge;
+ unsigned long flags;
+
+ flow_type &= IRQ_TYPE_SENSE_MASK;
+
+ if (flow_type == IRQ_TYPE_NONE)
+ flow_type = IRQ_TYPE_LEVEL_LOW;
+
+ levelsense = sense = bothedge = 0;
+ switch (flow_type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ bothedge = 1;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ sense = 1;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ levelsense = 1;
+ sense = 1;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ levelsense = 1;
+ break;
+
+ default:
+ pr_err("bogus flow type combination given !\n");
+ return -EINVAL;
+ }
+
+ regaddr = get_ext_irq_perf_reg(irq);
+ spin_lock_irqsave(&epic_lock, flags);
+ reg = bcm_perf_readl(regaddr);
+ irq %= 4;
+
+ switch (bcm63xx_get_cpu_id()) {
+ case BCM6348_CPU_ID:
+ if (levelsense)
+ reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
+ else
+ reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
+ if (sense)
+ reg |= EXTIRQ_CFG_SENSE_6348(irq);
+ else
+ reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
+ if (bothedge)
+ reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
+ else
+ reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
+ break;
+
+ case BCM3368_CPU_ID:
+ case BCM6328_CPU_ID:
+ case BCM6338_CPU_ID:
+ case BCM6345_CPU_ID:
+ case BCM6358_CPU_ID:
+ case BCM6362_CPU_ID:
+ case BCM6368_CPU_ID:
+ if (levelsense)
+ reg |= EXTIRQ_CFG_LEVELSENSE(irq);
+ else
+ reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
+ if (sense)
+ reg |= EXTIRQ_CFG_SENSE(irq);
+ else
+ reg &= ~EXTIRQ_CFG_SENSE(irq);
+ if (bothedge)
+ reg |= EXTIRQ_CFG_BOTHEDGE(irq);
+ else
+ reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
+ break;
+ default:
+ BUG();
+ }
+
+ bcm_perf_writel(reg, regaddr);
+ spin_unlock_irqrestore(&epic_lock, flags);
+
+ irqd_set_trigger_type(d, flow_type);
+ if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ irq_set_handler_locked(d, handle_level_irq);
+ else
+ irq_set_handler_locked(d, handle_edge_irq);
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+#ifdef CONFIG_SMP
+static int bcm63xx_internal_set_affinity(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force)
+{
+ if (!irqd_irq_disabled(data))
+ internal_irq_unmask(data, dest);
+
+ return 0;
+}
+#endif
+
+static struct irq_chip bcm63xx_internal_irq_chip = {
+ .name = "bcm63xx_ipic",
+ .irq_mask = bcm63xx_internal_irq_mask,
+ .irq_unmask = bcm63xx_internal_irq_unmask,
+};
+
+static struct irq_chip bcm63xx_external_irq_chip = {
+ .name = "bcm63xx_epic",
+ .irq_ack = bcm63xx_external_irq_clear,
+
+ .irq_mask = bcm63xx_external_irq_mask,
+ .irq_unmask = bcm63xx_external_irq_unmask,
+
+ .irq_set_type = bcm63xx_external_irq_set_type,
+};
+
+static void bcm63xx_init_irq(void)
+{
+ int irq_bits;
+
+ irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
+ irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
+ irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
+ irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
+
+ switch (bcm63xx_get_cpu_id()) {
+ case BCM3368_CPU_ID:
+ irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
+ irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
+ irq_stat_addr[1] = 0;
+ irq_mask_addr[1] = 0;
+ irq_bits = 32;
+ ext_irq_count = 4;
+ ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
+ break;
+ case BCM6328_CPU_ID:
+ irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
+ irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
+ irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
+ irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
+ irq_bits = 64;
+ ext_irq_count = 4;
+ is_ext_irq_cascaded = 1;
+ ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
+ ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
+ ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
+ break;
+ case BCM6338_CPU_ID:
+ irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
+ irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
+ irq_stat_addr[1] = 0;
+ irq_mask_addr[1] = 0;
+ irq_bits = 32;
+ ext_irq_count = 4;
+ ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
+ break;
+ case BCM6345_CPU_ID:
+ irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
+ irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
+ irq_stat_addr[1] = 0;
+ irq_mask_addr[1] = 0;
+ irq_bits = 32;
+ ext_irq_count = 4;
+ ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
+ break;
+ case BCM6348_CPU_ID:
+ irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
+ irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
+ irq_stat_addr[1] = 0;
+ irq_mask_addr[1] = 0;
+ irq_bits = 32;
+ ext_irq_count = 4;
+ ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
+ break;
+ case BCM6358_CPU_ID:
+ irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
+ irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
+ irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
+ irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
+ irq_bits = 32;
+ ext_irq_count = 4;
+ is_ext_irq_cascaded = 1;
+ ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
+ ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
+ ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
+ break;
+ case BCM6362_CPU_ID:
+ irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
+ irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
+ irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
+ irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
+ irq_bits = 64;
+ ext_irq_count = 4;
+ is_ext_irq_cascaded = 1;
+ ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
+ ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
+ ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
+ break;
+ case BCM6368_CPU_ID:
+ irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
+ irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
+ irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
+ irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
+ irq_bits = 64;
+ ext_irq_count = 6;
+ is_ext_irq_cascaded = 1;
+ ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
+ ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
+ ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
+ ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
+ break;
+ default:
+ BUG();
+ }
+
+ if (irq_bits == 32) {
+ dispatch_internal = __dispatch_internal_32;
+ internal_irq_mask = __internal_irq_mask_32;
+ internal_irq_unmask = __internal_irq_unmask_32;
+ } else {
+ dispatch_internal = __dispatch_internal_64;
+ internal_irq_mask = __internal_irq_mask_64;
+ internal_irq_unmask = __internal_irq_unmask_64;
+ }
+}
+
+void __init arch_init_irq(void)
+{
+ int i, irq;
+
+ bcm63xx_init_irq();
+ mips_cpu_irq_init();
+ for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
+ irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
+ handle_level_irq);
+
+ for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
+ irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
+ handle_edge_irq);
+
+ if (!is_ext_irq_cascaded) {
+ for (i = 3; i < 3 + ext_irq_count; ++i) {
+ irq = MIPS_CPU_IRQ_BASE + i;
+ if (request_irq(irq, no_action, IRQF_NO_THREAD,
+ "cascade_extirq", NULL)) {
+ pr_err("Failed to request irq %d (cascade_extirq)\n",
+ irq);
+ }
+ }
+ }
+
+ irq = MIPS_CPU_IRQ_BASE + 2;
+ if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade_ip2", NULL))
+ pr_err("Failed to request irq %d (cascade_ip2)\n", irq);
+#ifdef CONFIG_SMP
+ if (is_ext_irq_cascaded) {
+ irq = MIPS_CPU_IRQ_BASE + 3;
+ if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade_ip3",
+ NULL))
+ pr_err("Failed to request irq %d (cascade_ip3)\n", irq);
+ bcm63xx_internal_irq_chip.irq_set_affinity =
+ bcm63xx_internal_set_affinity;
+
+ cpumask_clear(irq_default_affinity);
+ cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
+ }
+#endif
+}
diff --git a/arch/mips/bcm63xx/nvram.c b/arch/mips/bcm63xx/nvram.c
new file mode 100644
index 000000000..05757aed0
--- /dev/null
+++ b/arch/mips/bcm63xx/nvram.c
@@ -0,0 +1,98 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2012 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#define pr_fmt(fmt) "bcm63xx_nvram: " fmt
+
+#include <linux/bcm963xx_nvram.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/if_ether.h>
+
+#include <bcm63xx_nvram.h>
+
+#define BCM63XX_DEFAULT_PSI_SIZE 64
+
+static struct bcm963xx_nvram nvram;
+static int mac_addr_used;
+
+void __init bcm63xx_nvram_init(void *addr)
+{
+ u32 crc, expected_crc;
+ u8 hcs_mac_addr[ETH_ALEN] = { 0x00, 0x10, 0x18, 0xff, 0xff, 0xff };
+
+ /* extract nvram data */
+ memcpy(&nvram, addr, BCM963XX_NVRAM_V5_SIZE);
+
+ /* check checksum before using data */
+ if (bcm963xx_nvram_checksum(&nvram, &expected_crc, &crc))
+ pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n",
+ expected_crc, crc);
+
+ /* Cable modems have a different NVRAM which is embedded in the eCos
+ * firmware and not easily extractible, give at least a MAC address
+ * pool.
+ */
+ if (BCMCPU_IS_3368()) {
+ memcpy(nvram.mac_addr_base, hcs_mac_addr, ETH_ALEN);
+ nvram.mac_addr_count = 2;
+ }
+}
+
+u8 *bcm63xx_nvram_get_name(void)
+{
+ return nvram.name;
+}
+EXPORT_SYMBOL(bcm63xx_nvram_get_name);
+
+int bcm63xx_nvram_get_mac_address(u8 *mac)
+{
+ u8 *oui;
+ int count;
+
+ if (mac_addr_used >= nvram.mac_addr_count) {
+ pr_err("not enough mac addresses\n");
+ return -ENODEV;
+ }
+
+ memcpy(mac, nvram.mac_addr_base, ETH_ALEN);
+ oui = mac + ETH_ALEN/2 - 1;
+ count = mac_addr_used;
+
+ while (count--) {
+ u8 *p = mac + ETH_ALEN - 1;
+
+ do {
+ (*p)++;
+ if (*p != 0)
+ break;
+ p--;
+ } while (p != oui);
+
+ if (p == oui) {
+ pr_err("unable to fetch mac address\n");
+ return -ENODEV;
+ }
+ }
+
+ mac_addr_used++;
+ return 0;
+}
+EXPORT_SYMBOL(bcm63xx_nvram_get_mac_address);
+
+int bcm63xx_nvram_get_psi_size(void)
+{
+ if (nvram.psi_size > 0)
+ return nvram.psi_size;
+
+ return BCM63XX_DEFAULT_PSI_SIZE;
+}
+EXPORT_SYMBOL(bcm63xx_nvram_get_psi_size);
diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c
new file mode 100644
index 000000000..df69eaa45
--- /dev/null
+++ b/arch/mips/bcm63xx/prom.c
@@ -0,0 +1,100 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ */
+
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/smp.h>
+#include <asm/bootinfo.h>
+#include <asm/bmips.h>
+#include <asm/smp-ops.h>
+#include <asm/mipsregs.h>
+#include <bcm63xx_board.h>
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_regs.h>
+
+void __init prom_init(void)
+{
+ u32 reg, mask;
+
+ bcm63xx_cpu_init();
+
+ /* stop any running watchdog */
+ bcm_wdt_writel(WDT_STOP_1, WDT_CTL_REG);
+ bcm_wdt_writel(WDT_STOP_2, WDT_CTL_REG);
+
+ /* disable all hardware blocks clock for now */
+ if (BCMCPU_IS_3368())
+ mask = CKCTL_3368_ALL_SAFE_EN;
+ else if (BCMCPU_IS_6328())
+ mask = CKCTL_6328_ALL_SAFE_EN;
+ else if (BCMCPU_IS_6338())
+ mask = CKCTL_6338_ALL_SAFE_EN;
+ else if (BCMCPU_IS_6345())
+ mask = CKCTL_6345_ALL_SAFE_EN;
+ else if (BCMCPU_IS_6348())
+ mask = CKCTL_6348_ALL_SAFE_EN;
+ else if (BCMCPU_IS_6358())
+ mask = CKCTL_6358_ALL_SAFE_EN;
+ else if (BCMCPU_IS_6362())
+ mask = CKCTL_6362_ALL_SAFE_EN;
+ else if (BCMCPU_IS_6368())
+ mask = CKCTL_6368_ALL_SAFE_EN;
+ else
+ mask = 0;
+
+ reg = bcm_perf_readl(PERF_CKCTL_REG);
+ reg &= ~mask;
+ bcm_perf_writel(reg, PERF_CKCTL_REG);
+
+ /* do low level board init */
+ board_prom_init();
+
+ /* set up SMP */
+ if (!register_bmips_smp_ops()) {
+ /*
+ * BCM6328 might not have its second CPU enabled, while BCM3368
+ * and BCM6358 need special handling for their shared TLB, so
+ * disable SMP for now.
+ */
+ if (BCMCPU_IS_6328()) {
+ reg = bcm_readl(BCM_6328_OTP_BASE +
+ OTP_USER_BITS_6328_REG(3));
+
+ if (reg & OTP_6328_REG3_TP1_DISABLED)
+ bmips_smp_enabled = 0;
+ } else if (BCMCPU_IS_3368() || BCMCPU_IS_6358()) {
+ bmips_smp_enabled = 0;
+ }
+
+ if (!bmips_smp_enabled)
+ return;
+
+ /*
+ * The bootloader has set up the CPU1 reset vector at
+ * 0xa000_0200.
+ * This conflicts with the special interrupt vector (IV).
+ * The bootloader has also set up CPU1 to respond to the wrong
+ * IPI interrupt.
+ * Here we will start up CPU1 in the background and ask it to
+ * reconfigure itself then go back to sleep.
+ */
+ memcpy((void *)0xa0000200, bmips_smp_movevec, 0x20);
+ __sync();
+ set_c0_cause(C_SW0);
+ cpumask_set_cpu(1, &bmips_booted_mask);
+
+ /*
+ * FIXME: we really should have some sort of hazard barrier here
+ */
+ }
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/bcm63xx/reset.c b/arch/mips/bcm63xx/reset.c
new file mode 100644
index 000000000..64574e74c
--- /dev/null
+++ b/arch/mips/bcm63xx/reset.c
@@ -0,0 +1,219 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_regs.h>
+#include <bcm63xx_reset.h>
+
+#define __GEN_RESET_BITS_TABLE(__cpu) \
+ [BCM63XX_RESET_SPI] = BCM## __cpu ##_RESET_SPI, \
+ [BCM63XX_RESET_ENET] = BCM## __cpu ##_RESET_ENET, \
+ [BCM63XX_RESET_USBH] = BCM## __cpu ##_RESET_USBH, \
+ [BCM63XX_RESET_USBD] = BCM## __cpu ##_RESET_USBD, \
+ [BCM63XX_RESET_DSL] = BCM## __cpu ##_RESET_DSL, \
+ [BCM63XX_RESET_SAR] = BCM## __cpu ##_RESET_SAR, \
+ [BCM63XX_RESET_EPHY] = BCM## __cpu ##_RESET_EPHY, \
+ [BCM63XX_RESET_ENETSW] = BCM## __cpu ##_RESET_ENETSW, \
+ [BCM63XX_RESET_PCM] = BCM## __cpu ##_RESET_PCM, \
+ [BCM63XX_RESET_MPI] = BCM## __cpu ##_RESET_MPI, \
+ [BCM63XX_RESET_PCIE] = BCM## __cpu ##_RESET_PCIE, \
+ [BCM63XX_RESET_PCIE_EXT] = BCM## __cpu ##_RESET_PCIE_EXT,
+
+#define BCM3368_RESET_SPI SOFTRESET_3368_SPI_MASK
+#define BCM3368_RESET_ENET SOFTRESET_3368_ENET_MASK
+#define BCM3368_RESET_USBH 0
+#define BCM3368_RESET_USBD SOFTRESET_3368_USBS_MASK
+#define BCM3368_RESET_DSL 0
+#define BCM3368_RESET_SAR 0
+#define BCM3368_RESET_EPHY SOFTRESET_3368_EPHY_MASK
+#define BCM3368_RESET_ENETSW 0
+#define BCM3368_RESET_PCM SOFTRESET_3368_PCM_MASK
+#define BCM3368_RESET_MPI SOFTRESET_3368_MPI_MASK
+#define BCM3368_RESET_PCIE 0
+#define BCM3368_RESET_PCIE_EXT 0
+
+#define BCM6328_RESET_SPI SOFTRESET_6328_SPI_MASK
+#define BCM6328_RESET_ENET 0
+#define BCM6328_RESET_USBH SOFTRESET_6328_USBH_MASK
+#define BCM6328_RESET_USBD SOFTRESET_6328_USBS_MASK
+#define BCM6328_RESET_DSL 0
+#define BCM6328_RESET_SAR SOFTRESET_6328_SAR_MASK
+#define BCM6328_RESET_EPHY SOFTRESET_6328_EPHY_MASK
+#define BCM6328_RESET_ENETSW SOFTRESET_6328_ENETSW_MASK
+#define BCM6328_RESET_PCM SOFTRESET_6328_PCM_MASK
+#define BCM6328_RESET_MPI 0
+#define BCM6328_RESET_PCIE \
+ (SOFTRESET_6328_PCIE_MASK | \
+ SOFTRESET_6328_PCIE_CORE_MASK | \
+ SOFTRESET_6328_PCIE_HARD_MASK)
+#define BCM6328_RESET_PCIE_EXT SOFTRESET_6328_PCIE_EXT_MASK
+
+#define BCM6338_RESET_SPI SOFTRESET_6338_SPI_MASK
+#define BCM6338_RESET_ENET SOFTRESET_6338_ENET_MASK
+#define BCM6338_RESET_USBH SOFTRESET_6338_USBH_MASK
+#define BCM6338_RESET_USBD SOFTRESET_6338_USBS_MASK
+#define BCM6338_RESET_DSL SOFTRESET_6338_ADSL_MASK
+#define BCM6338_RESET_SAR SOFTRESET_6338_SAR_MASK
+#define BCM6338_RESET_EPHY 0
+#define BCM6338_RESET_ENETSW 0
+#define BCM6338_RESET_PCM 0
+#define BCM6338_RESET_MPI 0
+#define BCM6338_RESET_PCIE 0
+#define BCM6338_RESET_PCIE_EXT 0
+
+#define BCM6348_RESET_SPI SOFTRESET_6348_SPI_MASK
+#define BCM6348_RESET_ENET SOFTRESET_6348_ENET_MASK
+#define BCM6348_RESET_USBH SOFTRESET_6348_USBH_MASK
+#define BCM6348_RESET_USBD SOFTRESET_6348_USBS_MASK
+#define BCM6348_RESET_DSL SOFTRESET_6348_ADSL_MASK
+#define BCM6348_RESET_SAR SOFTRESET_6348_SAR_MASK
+#define BCM6348_RESET_EPHY 0
+#define BCM6348_RESET_ENETSW 0
+#define BCM6348_RESET_PCM 0
+#define BCM6348_RESET_MPI 0
+#define BCM6348_RESET_PCIE 0
+#define BCM6348_RESET_PCIE_EXT 0
+
+#define BCM6358_RESET_SPI SOFTRESET_6358_SPI_MASK
+#define BCM6358_RESET_ENET SOFTRESET_6358_ENET_MASK
+#define BCM6358_RESET_USBH SOFTRESET_6358_USBH_MASK
+#define BCM6358_RESET_USBD 0
+#define BCM6358_RESET_DSL SOFTRESET_6358_ADSL_MASK
+#define BCM6358_RESET_SAR SOFTRESET_6358_SAR_MASK
+#define BCM6358_RESET_EPHY SOFTRESET_6358_EPHY_MASK
+#define BCM6358_RESET_ENETSW 0
+#define BCM6358_RESET_PCM SOFTRESET_6358_PCM_MASK
+#define BCM6358_RESET_MPI SOFTRESET_6358_MPI_MASK
+#define BCM6358_RESET_PCIE 0
+#define BCM6358_RESET_PCIE_EXT 0
+
+#define BCM6362_RESET_SPI SOFTRESET_6362_SPI_MASK
+#define BCM6362_RESET_ENET 0
+#define BCM6362_RESET_USBH SOFTRESET_6362_USBH_MASK
+#define BCM6362_RESET_USBD SOFTRESET_6362_USBS_MASK
+#define BCM6362_RESET_DSL 0
+#define BCM6362_RESET_SAR SOFTRESET_6362_SAR_MASK
+#define BCM6362_RESET_EPHY SOFTRESET_6362_EPHY_MASK
+#define BCM6362_RESET_ENETSW SOFTRESET_6362_ENETSW_MASK
+#define BCM6362_RESET_PCM SOFTRESET_6362_PCM_MASK
+#define BCM6362_RESET_MPI 0
+#define BCM6362_RESET_PCIE (SOFTRESET_6362_PCIE_MASK | \
+ SOFTRESET_6362_PCIE_CORE_MASK)
+#define BCM6362_RESET_PCIE_EXT SOFTRESET_6362_PCIE_EXT_MASK
+
+#define BCM6368_RESET_SPI SOFTRESET_6368_SPI_MASK
+#define BCM6368_RESET_ENET 0
+#define BCM6368_RESET_USBH SOFTRESET_6368_USBH_MASK
+#define BCM6368_RESET_USBD SOFTRESET_6368_USBS_MASK
+#define BCM6368_RESET_DSL 0
+#define BCM6368_RESET_SAR SOFTRESET_6368_SAR_MASK
+#define BCM6368_RESET_EPHY SOFTRESET_6368_EPHY_MASK
+#define BCM6368_RESET_ENETSW SOFTRESET_6368_ENETSW_MASK
+#define BCM6368_RESET_PCM SOFTRESET_6368_PCM_MASK
+#define BCM6368_RESET_MPI SOFTRESET_6368_MPI_MASK
+#define BCM6368_RESET_PCIE 0
+#define BCM6368_RESET_PCIE_EXT 0
+
+/*
+ * core reset bits
+ */
+static const u32 bcm3368_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(3368)
+};
+
+static const u32 bcm6328_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6328)
+};
+
+static const u32 bcm6338_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6338)
+};
+
+static const u32 bcm6348_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6348)
+};
+
+static const u32 bcm6358_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6358)
+};
+
+static const u32 bcm6362_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6362)
+};
+
+static const u32 bcm6368_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6368)
+};
+
+const u32 *bcm63xx_reset_bits;
+static int reset_reg;
+
+static int __init bcm63xx_reset_bits_init(void)
+{
+ if (BCMCPU_IS_3368()) {
+ reset_reg = PERF_SOFTRESET_6358_REG;
+ bcm63xx_reset_bits = bcm3368_reset_bits;
+ } else if (BCMCPU_IS_6328()) {
+ reset_reg = PERF_SOFTRESET_6328_REG;
+ bcm63xx_reset_bits = bcm6328_reset_bits;
+ } else if (BCMCPU_IS_6338()) {
+ reset_reg = PERF_SOFTRESET_REG;
+ bcm63xx_reset_bits = bcm6338_reset_bits;
+ } else if (BCMCPU_IS_6348()) {
+ reset_reg = PERF_SOFTRESET_REG;
+ bcm63xx_reset_bits = bcm6348_reset_bits;
+ } else if (BCMCPU_IS_6358()) {
+ reset_reg = PERF_SOFTRESET_6358_REG;
+ bcm63xx_reset_bits = bcm6358_reset_bits;
+ } else if (BCMCPU_IS_6362()) {
+ reset_reg = PERF_SOFTRESET_6362_REG;
+ bcm63xx_reset_bits = bcm6362_reset_bits;
+ } else if (BCMCPU_IS_6368()) {
+ reset_reg = PERF_SOFTRESET_6368_REG;
+ bcm63xx_reset_bits = bcm6368_reset_bits;
+ }
+
+ return 0;
+}
+
+static DEFINE_SPINLOCK(reset_mutex);
+
+static void __bcm63xx_core_set_reset(u32 mask, int enable)
+{
+ unsigned long flags;
+ u32 val;
+
+ if (!mask)
+ return;
+
+ spin_lock_irqsave(&reset_mutex, flags);
+ val = bcm_perf_readl(reset_reg);
+
+ if (enable)
+ val &= ~mask;
+ else
+ val |= mask;
+
+ bcm_perf_writel(val, reset_reg);
+ spin_unlock_irqrestore(&reset_mutex, flags);
+}
+
+void bcm63xx_core_set_reset(enum bcm63xx_core_reset core, int reset)
+{
+ __bcm63xx_core_set_reset(bcm63xx_reset_bits[core], reset);
+}
+EXPORT_SYMBOL(bcm63xx_core_set_reset);
+
+postcore_initcall(bcm63xx_reset_bits_init);
diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c
new file mode 100644
index 000000000..d811e3e03
--- /dev/null
+++ b/arch/mips/bcm63xx/setup.c
@@ -0,0 +1,170 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/memblock.h>
+#include <linux/ioport.h>
+#include <linux/pm.h>
+#include <asm/bootinfo.h>
+#include <asm/time.h>
+#include <asm/reboot.h>
+#include <asm/cacheflush.h>
+#include <bcm63xx_board.h>
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_regs.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_gpio.h>
+
+void bcm63xx_machine_halt(void)
+{
+ pr_info("System halted\n");
+ while (1)
+ ;
+}
+
+static void bcm6348_a1_reboot(void)
+{
+ u32 reg;
+
+ /* soft reset all blocks */
+ pr_info("soft-resetting all blocks ...\n");
+ reg = bcm_perf_readl(PERF_SOFTRESET_REG);
+ reg &= ~SOFTRESET_6348_ALL;
+ bcm_perf_writel(reg, PERF_SOFTRESET_REG);
+ mdelay(10);
+
+ reg = bcm_perf_readl(PERF_SOFTRESET_REG);
+ reg |= SOFTRESET_6348_ALL;
+ bcm_perf_writel(reg, PERF_SOFTRESET_REG);
+ mdelay(10);
+
+ /* Jump to the power on address. */
+ pr_info("jumping to reset vector.\n");
+ /* set high vectors (base at 0xbfc00000 */
+ set_c0_status(ST0_BEV | ST0_ERL);
+ /* run uncached in kseg0 */
+ change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
+ __flush_cache_all();
+ /* remove all wired TLB entries */
+ write_c0_wired(0);
+ __asm__ __volatile__(
+ "jr\t%0"
+ :
+ : "r" (0xbfc00000));
+ while (1)
+ ;
+}
+
+void bcm63xx_machine_reboot(void)
+{
+ u32 reg, perf_regs[2] = { 0, 0 };
+ unsigned int i;
+
+ /* mask and clear all external irq */
+ switch (bcm63xx_get_cpu_id()) {
+ case BCM3368_CPU_ID:
+ perf_regs[0] = PERF_EXTIRQ_CFG_REG_3368;
+ break;
+ case BCM6328_CPU_ID:
+ perf_regs[0] = PERF_EXTIRQ_CFG_REG_6328;
+ break;
+ case BCM6338_CPU_ID:
+ perf_regs[0] = PERF_EXTIRQ_CFG_REG_6338;
+ break;
+ case BCM6345_CPU_ID:
+ perf_regs[0] = PERF_EXTIRQ_CFG_REG_6345;
+ break;
+ case BCM6348_CPU_ID:
+ perf_regs[0] = PERF_EXTIRQ_CFG_REG_6348;
+ break;
+ case BCM6358_CPU_ID:
+ perf_regs[0] = PERF_EXTIRQ_CFG_REG_6358;
+ break;
+ case BCM6362_CPU_ID:
+ perf_regs[0] = PERF_EXTIRQ_CFG_REG_6362;
+ break;
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (!perf_regs[i])
+ break;
+
+ reg = bcm_perf_readl(perf_regs[i]);
+ if (BCMCPU_IS_6348()) {
+ reg &= ~EXTIRQ_CFG_MASK_ALL_6348;
+ reg |= EXTIRQ_CFG_CLEAR_ALL_6348;
+ } else {
+ reg &= ~EXTIRQ_CFG_MASK_ALL;
+ reg |= EXTIRQ_CFG_CLEAR_ALL;
+ }
+ bcm_perf_writel(reg, perf_regs[i]);
+ }
+
+ if (BCMCPU_IS_6348() && (bcm63xx_get_cpu_rev() == 0xa1))
+ bcm6348_a1_reboot();
+
+ pr_info("triggering watchdog soft-reset...\n");
+ if (BCMCPU_IS_6328()) {
+ bcm_wdt_writel(1, WDT_SOFTRESET_REG);
+ } else {
+ reg = bcm_perf_readl(PERF_SYS_PLL_CTL_REG);
+ reg |= SYS_PLL_SOFT_RESET;
+ bcm_perf_writel(reg, PERF_SYS_PLL_CTL_REG);
+ }
+ while (1)
+ ;
+}
+
+static void __bcm63xx_machine_reboot(char *p)
+{
+ bcm63xx_machine_reboot();
+}
+
+/*
+ * return system type in /proc/cpuinfo
+ */
+const char *get_system_type(void)
+{
+ static char buf[128];
+ snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%02X)",
+ board_get_name(),
+ bcm63xx_get_cpu_id(), bcm63xx_get_cpu_rev());
+ return buf;
+}
+
+void __init plat_time_init(void)
+{
+ mips_hpt_frequency = bcm63xx_get_cpu_freq() / 2;
+}
+
+void __init plat_mem_setup(void)
+{
+ memblock_add(0, bcm63xx_get_memory_size());
+
+ _machine_halt = bcm63xx_machine_halt;
+ _machine_restart = __bcm63xx_machine_reboot;
+ pm_power_off = bcm63xx_machine_halt;
+
+ set_io_port_base(0);
+ ioport_resource.start = 0;
+ ioport_resource.end = ~0;
+
+ board_setup();
+}
+
+int __init bcm63xx_register_devices(void)
+{
+ /* register gpiochip */
+ bcm63xx_gpio_init();
+
+ return board_register_devices();
+}
+
+arch_initcall(bcm63xx_register_devices);
diff --git a/arch/mips/bcm63xx/timer.c b/arch/mips/bcm63xx/timer.c
new file mode 100644
index 000000000..a86065854
--- /dev/null
+++ b/arch/mips/bcm63xx/timer.c
@@ -0,0 +1,206 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_timer.h>
+#include <bcm63xx_regs.h>
+
+static DEFINE_RAW_SPINLOCK(timer_reg_lock);
+static DEFINE_RAW_SPINLOCK(timer_data_lock);
+static struct clk *periph_clk;
+
+static struct timer_data {
+ void (*cb)(void *);
+ void *data;
+} timer_data[BCM63XX_TIMER_COUNT];
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ u32 stat;
+ int i;
+
+ raw_spin_lock(&timer_reg_lock);
+ stat = bcm_timer_readl(TIMER_IRQSTAT_REG);
+ bcm_timer_writel(stat, TIMER_IRQSTAT_REG);
+ raw_spin_unlock(&timer_reg_lock);
+
+ for (i = 0; i < BCM63XX_TIMER_COUNT; i++) {
+ if (!(stat & TIMER_IRQSTAT_TIMER_CAUSE(i)))
+ continue;
+
+ raw_spin_lock(&timer_data_lock);
+ if (!timer_data[i].cb) {
+ raw_spin_unlock(&timer_data_lock);
+ continue;
+ }
+
+ timer_data[i].cb(timer_data[i].data);
+ raw_spin_unlock(&timer_data_lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int bcm63xx_timer_enable(int id)
+{
+ u32 reg;
+ unsigned long flags;
+
+ if (id >= BCM63XX_TIMER_COUNT)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&timer_reg_lock, flags);
+
+ reg = bcm_timer_readl(TIMER_CTLx_REG(id));
+ reg |= TIMER_CTL_ENABLE_MASK;
+ bcm_timer_writel(reg, TIMER_CTLx_REG(id));
+
+ reg = bcm_timer_readl(TIMER_IRQSTAT_REG);
+ reg |= TIMER_IRQSTAT_TIMER_IR_EN(id);
+ bcm_timer_writel(reg, TIMER_IRQSTAT_REG);
+
+ raw_spin_unlock_irqrestore(&timer_reg_lock, flags);
+ return 0;
+}
+
+EXPORT_SYMBOL(bcm63xx_timer_enable);
+
+int bcm63xx_timer_disable(int id)
+{
+ u32 reg;
+ unsigned long flags;
+
+ if (id >= BCM63XX_TIMER_COUNT)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&timer_reg_lock, flags);
+
+ reg = bcm_timer_readl(TIMER_CTLx_REG(id));
+ reg &= ~TIMER_CTL_ENABLE_MASK;
+ bcm_timer_writel(reg, TIMER_CTLx_REG(id));
+
+ reg = bcm_timer_readl(TIMER_IRQSTAT_REG);
+ reg &= ~TIMER_IRQSTAT_TIMER_IR_EN(id);
+ bcm_timer_writel(reg, TIMER_IRQSTAT_REG);
+
+ raw_spin_unlock_irqrestore(&timer_reg_lock, flags);
+ return 0;
+}
+
+EXPORT_SYMBOL(bcm63xx_timer_disable);
+
+int bcm63xx_timer_register(int id, void (*callback)(void *data), void *data)
+{
+ unsigned long flags;
+ int ret;
+
+ if (id >= BCM63XX_TIMER_COUNT || !callback)
+ return -EINVAL;
+
+ ret = 0;
+ raw_spin_lock_irqsave(&timer_data_lock, flags);
+ if (timer_data[id].cb) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ timer_data[id].cb = callback;
+ timer_data[id].data = data;
+
+out:
+ raw_spin_unlock_irqrestore(&timer_data_lock, flags);
+ return ret;
+}
+
+EXPORT_SYMBOL(bcm63xx_timer_register);
+
+void bcm63xx_timer_unregister(int id)
+{
+ unsigned long flags;
+
+ if (id >= BCM63XX_TIMER_COUNT)
+ return;
+
+ raw_spin_lock_irqsave(&timer_data_lock, flags);
+ timer_data[id].cb = NULL;
+ raw_spin_unlock_irqrestore(&timer_data_lock, flags);
+}
+
+EXPORT_SYMBOL(bcm63xx_timer_unregister);
+
+unsigned int bcm63xx_timer_countdown(unsigned int countdown_us)
+{
+ return (clk_get_rate(periph_clk) / (1000 * 1000)) * countdown_us;
+}
+
+EXPORT_SYMBOL(bcm63xx_timer_countdown);
+
+int bcm63xx_timer_set(int id, int monotonic, unsigned int countdown_us)
+{
+ u32 reg, countdown;
+ unsigned long flags;
+
+ if (id >= BCM63XX_TIMER_COUNT)
+ return -EINVAL;
+
+ countdown = bcm63xx_timer_countdown(countdown_us);
+ if (countdown & ~TIMER_CTL_COUNTDOWN_MASK)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&timer_reg_lock, flags);
+ reg = bcm_timer_readl(TIMER_CTLx_REG(id));
+
+ if (monotonic)
+ reg &= ~TIMER_CTL_MONOTONIC_MASK;
+ else
+ reg |= TIMER_CTL_MONOTONIC_MASK;
+
+ reg &= ~TIMER_CTL_COUNTDOWN_MASK;
+ reg |= countdown;
+ bcm_timer_writel(reg, TIMER_CTLx_REG(id));
+
+ raw_spin_unlock_irqrestore(&timer_reg_lock, flags);
+ return 0;
+}
+
+EXPORT_SYMBOL(bcm63xx_timer_set);
+
+int bcm63xx_timer_init(void)
+{
+ int ret, irq;
+ u32 reg;
+
+ reg = bcm_timer_readl(TIMER_IRQSTAT_REG);
+ reg &= ~TIMER_IRQSTAT_TIMER0_IR_EN;
+ reg &= ~TIMER_IRQSTAT_TIMER1_IR_EN;
+ reg &= ~TIMER_IRQSTAT_TIMER2_IR_EN;
+ bcm_timer_writel(reg, TIMER_IRQSTAT_REG);
+
+ periph_clk = clk_get(NULL, "periph");
+ if (IS_ERR(periph_clk))
+ return -ENODEV;
+
+ irq = bcm63xx_get_irq_number(IRQ_TIMER);
+ ret = request_irq(irq, timer_interrupt, 0, "bcm63xx_timer", NULL);
+ if (ret) {
+ pr_err("%s: failed to register irq\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+arch_initcall(bcm63xx_timer_init);
diff --git a/arch/mips/bmips/Kconfig b/arch/mips/bmips/Kconfig
new file mode 100644
index 000000000..63dfc6950
--- /dev/null
+++ b/arch/mips/bmips/Kconfig
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: GPL-2.0
+if BMIPS_GENERIC
+
+choice
+ prompt "Built-in device tree"
+ help
+ Legacy bootloaders do not pass a DTB pointer to the kernel, so
+ if a "wrapper" is not being used, the kernel will need to include
+ a device tree that matches the target board.
+
+ The builtin DTB will only be used if the firmware does not supply
+ a valid DTB.
+
+config DT_NONE
+ bool "None"
+
+config DT_BCM93384WVG
+ bool "BCM93384WVG Zephyr CPU"
+ select BUILTIN_DTB
+
+config DT_BCM93384WVG_VIPER
+ bool "BCM93384WVG Viper CPU (EXPERIMENTAL)"
+ select BUILTIN_DTB
+
+config DT_BCM96368MVWG
+ bool "BCM96368MVWG"
+ select BUILTIN_DTB
+
+config DT_BCM9EJTAGPRB
+ bool "BCM9EJTAGPRB"
+ select BUILTIN_DTB
+
+config DT_BCM97125CBMB
+ bool "BCM97125CBMB"
+ select BUILTIN_DTB
+
+config DT_BCM97346DBSMB
+ bool "BCM97346DBSMB"
+ select BUILTIN_DTB
+
+config DT_BCM97358SVMB
+ bool "BCM97358SVMB"
+ select BUILTIN_DTB
+
+config DT_BCM97360SVMB
+ bool "BCM97360SVMB"
+ select BUILTIN_DTB
+
+config DT_BCM97362SVMB
+ bool "BCM97362SVMB"
+ select BUILTIN_DTB
+
+config DT_BCM97420C
+ bool "BCM97420C"
+ select BUILTIN_DTB
+
+config DT_BCM97425SVMB
+ bool "BCM97425SVMB"
+ select BUILTIN_DTB
+
+config DT_BCM97435SVMB
+ bool "BCM97435SVMB"
+ select BUILTIN_DTB
+
+config DT_COMTREND_VR3032U
+ bool "Comtrend VR-3032u"
+ select BUILTIN_DTB
+
+config DT_NETGEAR_CVG834G
+ bool "NETGEAR CVG834G"
+ select BUILTIN_DTB
+
+config DT_SFR_NEUFBOX4_SERCOMM
+ bool "SFR Neufbox 4 (Sercomm)"
+ select BUILTIN_DTB
+
+config DT_SFR_NEUFBOX6_SERCOMM
+ bool "SFR Neufbox 6 (Sercomm)"
+ select BUILTIN_DTB
+
+endchoice
+
+endif
diff --git a/arch/mips/bmips/Makefile b/arch/mips/bmips/Makefile
new file mode 100644
index 000000000..1165bf2ef
--- /dev/null
+++ b/arch/mips/bmips/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += setup.o irq.o dma.o
diff --git a/arch/mips/bmips/Platform b/arch/mips/bmips/Platform
new file mode 100644
index 000000000..1434ea31c
--- /dev/null
+++ b/arch/mips/bmips/Platform
@@ -0,0 +1,6 @@
+#
+# Broadcom Generic BMIPS kernel
+#
+cflags-$(CONFIG_BMIPS_GENERIC) += \
+ -I$(srctree)/arch/mips/include/asm/mach-bmips/
+load-$(CONFIG_BMIPS_GENERIC) := 0xffffffff80010000
diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c
new file mode 100644
index 000000000..daef44f68
--- /dev/null
+++ b/arch/mips/bmips/dma.c
@@ -0,0 +1,128 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Kevin Cernekee <cernekee@gmail.com>
+ */
+
+#define pr_fmt(fmt) "bmips-dma: " fmt
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <asm/bmips.h>
+
+/*
+ * BCM338x has configurable address translation windows which allow the
+ * peripherals' DMA addresses to be different from the Zephyr-visible
+ * physical addresses. e.g. usb_dma_addr = zephyr_pa ^ 0x08000000
+ *
+ * If the "brcm,ubus" node has a "dma-ranges" property we will enable this
+ * translation globally using the provided information. This implements a
+ * very limited subset of "dma-ranges" support and it will probably be
+ * replaced by a more generic version later.
+ */
+
+struct bmips_dma_range {
+ u32 child_addr;
+ u32 parent_addr;
+ u32 size;
+};
+
+static struct bmips_dma_range *bmips_dma_ranges;
+
+#define FLUSH_RAC 0x100
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t pa)
+{
+ struct bmips_dma_range *r;
+
+ for (r = bmips_dma_ranges; r && r->size; r++) {
+ if (pa >= r->child_addr &&
+ pa < (r->child_addr + r->size))
+ return pa - r->child_addr + r->parent_addr;
+ }
+ return pa;
+}
+
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ struct bmips_dma_range *r;
+
+ for (r = bmips_dma_ranges; r && r->size; r++) {
+ if (dma_addr >= r->parent_addr &&
+ dma_addr < (r->parent_addr + r->size))
+ return dma_addr - r->parent_addr + r->child_addr;
+ }
+ return dma_addr;
+}
+
+bool bmips_rac_flush_disable;
+
+void arch_sync_dma_for_cpu_all(void)
+{
+ void __iomem *cbr = BMIPS_GET_CBR();
+ u32 cfg;
+
+ if (boot_cpu_type() != CPU_BMIPS3300 &&
+ boot_cpu_type() != CPU_BMIPS4350 &&
+ boot_cpu_type() != CPU_BMIPS4380)
+ return;
+
+ if (unlikely(bmips_rac_flush_disable))
+ return;
+
+ /* Flush stale data out of the readahead cache */
+ cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
+ __raw_readl(cbr + BMIPS_RAC_CONFIG);
+}
+
+static int __init bmips_init_dma_ranges(void)
+{
+ struct device_node *np =
+ of_find_compatible_node(NULL, NULL, "brcm,ubus");
+ const __be32 *data;
+ struct bmips_dma_range *r;
+ int len;
+
+ if (!np)
+ return 0;
+
+ data = of_get_property(np, "dma-ranges", &len);
+ if (!data)
+ goto out_good;
+
+ len /= sizeof(*data) * 3;
+ if (!len)
+ goto out_bad;
+
+ /* add a dummy (zero) entry at the end as a sentinel */
+ bmips_dma_ranges = kcalloc(len + 1, sizeof(struct bmips_dma_range),
+ GFP_KERNEL);
+ if (!bmips_dma_ranges)
+ goto out_bad;
+
+ for (r = bmips_dma_ranges; len; len--, r++) {
+ r->child_addr = be32_to_cpup(data++);
+ r->parent_addr = be32_to_cpup(data++);
+ r->size = be32_to_cpup(data++);
+ }
+
+out_good:
+ of_node_put(np);
+ return 0;
+
+out_bad:
+ pr_err("error parsing dma-ranges property\n");
+ of_node_put(np);
+ return -EINVAL;
+}
+arch_initcall(bmips_init_dma_ranges);
diff --git a/arch/mips/bmips/irq.c b/arch/mips/bmips/irq.c
new file mode 100644
index 000000000..c4daa590b
--- /dev/null
+++ b/arch/mips/bmips/irq.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ * Author: Kevin Cernekee <cernekee@gmail.com>
+ */
+
+#include <linux/of.h>
+#include <linux/irqchip.h>
+
+#include <asm/bmips.h>
+#include <asm/irq.h>
+#include <asm/irq_cpu.h>
+#include <asm/time.h>
+
+static const struct of_device_id smp_intc_dt_match[] = {
+ { .compatible = "brcm,bcm7038-l1-intc" },
+ { .compatible = "brcm,bcm6345-l1-intc" },
+ {}
+};
+
+unsigned int get_c0_compare_int(void)
+{
+ return CP0_LEGACY_COMPARE_IRQ;
+}
+
+void __init arch_init_irq(void)
+{
+ struct device_node *dn;
+
+ /* Only these controllers support SMP IRQ affinity */
+ dn = of_find_matching_node(NULL, smp_intc_dt_match);
+ if (dn)
+ of_node_put(dn);
+ else
+ bmips_tp1_irqs = 0;
+
+ irqchip_init();
+}
+
+IRQCHIP_DECLARE(mips_cpu_intc, "mti,cpu-interrupt-controller",
+ mips_cpu_irq_of_init);
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
new file mode 100644
index 000000000..16063081d
--- /dev/null
+++ b/arch/mips/bmips/setup.c
@@ -0,0 +1,212 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ * Copyright (C) 2014 Kevin Cernekee <cernekee@gmail.com>
+ */
+
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/memblock.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_clk.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <linux/libfdt.h>
+#include <linux/smp.h>
+#include <asm/addrspace.h>
+#include <asm/bmips.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu-type.h>
+#include <asm/mipsregs.h>
+#include <asm/prom.h>
+#include <asm/smp-ops.h>
+#include <asm/time.h>
+#include <asm/traps.h>
+
+#define RELO_NORMAL_VEC BIT(18)
+
+#define REG_BCM6328_OTP ((void __iomem *)CKSEG1ADDR(0x1000062c))
+#define BCM6328_TP1_DISABLED BIT(9)
+
+extern bool bmips_rac_flush_disable;
+
+static const unsigned long kbase = VMLINUX_LOAD_ADDRESS & 0xfff00000;
+
+struct bmips_quirk {
+ const char *compatible;
+ void (*quirk_fn)(void);
+};
+
+static void kbase_setup(void)
+{
+ __raw_writel(kbase | RELO_NORMAL_VEC,
+ BMIPS_GET_CBR() + BMIPS_RELO_VECTOR_CONTROL_1);
+ ebase = kbase;
+}
+
+static void bcm3384_viper_quirks(void)
+{
+ /*
+ * Some experimental CM boxes are set up to let CM own the Viper TP0
+ * and let Linux own TP1. This requires moving the kernel
+ * load address to a non-conflicting region (e.g. via
+ * CONFIG_PHYSICAL_START) and supplying an alternate DTB.
+ * If we detect this condition, we need to move the MIPS exception
+ * vectors up to an area that we own.
+ *
+ * This is distinct from the OTHER special case mentioned in
+ * smp-bmips.c (boot on TP1, but enable SMP, then TP0 becomes our
+ * logical CPU#1). For the Viper TP1 case, SMP is off limits.
+ *
+ * Also note that many BMIPS435x CPUs do not have a
+ * BMIPS_RELO_VECTOR_CONTROL_1 register, so it isn't safe to just
+ * write VMLINUX_LOAD_ADDRESS into that register on every SoC.
+ */
+ board_ebase_setup = &kbase_setup;
+ bmips_smp_enabled = 0;
+}
+
+static void bcm63xx_fixup_cpu1(void)
+{
+ /*
+ * The bootloader has set up the CPU1 reset vector at
+ * 0xa000_0200.
+ * This conflicts with the special interrupt vector (IV).
+ * The bootloader has also set up CPU1 to respond to the wrong
+ * IPI interrupt.
+ * Here we will start up CPU1 in the background and ask it to
+ * reconfigure itself then go back to sleep.
+ */
+ memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20);
+ __sync();
+ set_c0_cause(C_SW0);
+ cpumask_set_cpu(1, &bmips_booted_mask);
+}
+
+static void bcm6328_quirks(void)
+{
+ /* Check CPU1 status in OTP (it is usually disabled) */
+ if (__raw_readl(REG_BCM6328_OTP) & BCM6328_TP1_DISABLED)
+ bmips_smp_enabled = 0;
+ else
+ bcm63xx_fixup_cpu1();
+}
+
+static void bcm6358_quirks(void)
+{
+ /*
+ * BCM3368/BCM6358 need special handling for their shared TLB, so
+ * disable SMP for now
+ */
+ bmips_smp_enabled = 0;
+
+ /*
+ * RAC flush causes kernel panics on BCM6358 when booting from TP1
+ * because the bootloader is not initializing it properly.
+ */
+ bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
+}
+
+static void bcm6368_quirks(void)
+{
+ bcm63xx_fixup_cpu1();
+}
+
+static const struct bmips_quirk bmips_quirk_list[] = {
+ { "brcm,bcm3368", &bcm6358_quirks },
+ { "brcm,bcm3384-viper", &bcm3384_viper_quirks },
+ { "brcm,bcm33843-viper", &bcm3384_viper_quirks },
+ { "brcm,bcm6328", &bcm6328_quirks },
+ { "brcm,bcm6358", &bcm6358_quirks },
+ { "brcm,bcm6362", &bcm6368_quirks },
+ { "brcm,bcm6368", &bcm6368_quirks },
+ { "brcm,bcm63168", &bcm6368_quirks },
+ { "brcm,bcm63268", &bcm6368_quirks },
+ { },
+};
+
+void __init prom_init(void)
+{
+ bmips_cpu_setup();
+ register_bmips_smp_ops();
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
+
+const char *get_system_type(void)
+{
+ return "Generic BMIPS kernel";
+}
+
+void __init plat_time_init(void)
+{
+ struct device_node *np;
+ u32 freq;
+
+ np = of_find_node_by_name(NULL, "cpus");
+ if (!np)
+ panic("missing 'cpus' DT node");
+ if (of_property_read_u32(np, "mips-hpt-frequency", &freq) < 0)
+ panic("missing 'mips-hpt-frequency' property");
+ of_node_put(np);
+
+ mips_hpt_frequency = freq;
+}
+
+void __init plat_mem_setup(void)
+{
+ void *dtb;
+ const struct bmips_quirk *q;
+
+ set_io_port_base(0);
+ ioport_resource.start = 0;
+ ioport_resource.end = ~0;
+
+ /* intended to somewhat resemble ARM; see Documentation/arm/booting.rst */
+ if (fw_arg0 == 0 && fw_arg1 == 0xffffffff)
+ dtb = phys_to_virt(fw_arg2);
+ else if (fw_passed_dtb) /* UHI interface or appended dtb */
+ dtb = (void *)fw_passed_dtb;
+ else if (&__dtb_start != &__dtb_end)
+ dtb = (void *)__dtb_start;
+ else
+ panic("no dtb found");
+
+ __dt_setup_arch(dtb);
+
+ for (q = bmips_quirk_list; q->quirk_fn; q++) {
+ if (of_flat_dt_is_compatible(of_get_flat_dt_root(),
+ q->compatible)) {
+ q->quirk_fn();
+ }
+ }
+}
+
+void __init device_tree_init(void)
+{
+ struct device_node *np;
+
+ unflatten_and_copy_device_tree();
+
+ /* Disable SMP boot unless both CPUs are listed in DT and !disabled */
+ np = of_find_node_by_name(NULL, "cpus");
+ if (np && of_get_available_child_count(np) <= 1)
+ bmips_smp_enabled = 0;
+ of_node_put(np);
+}
+
+static int __init plat_dev_init(void)
+{
+ of_clk_init(NULL);
+ return 0;
+}
+
+device_initcall(plat_dev_init);
diff --git a/arch/mips/boot/.gitignore b/arch/mips/boot/.gitignore
new file mode 100644
index 000000000..2adc8581a
--- /dev/null
+++ b/arch/mips/boot/.gitignore
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+mkboot
+elf2ecoff
+vmlinux.*
+zImage
+zImage.tmp
+calc_vmlinuz_load_addr
+uImage
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
new file mode 100644
index 000000000..a3da2c5d6
--- /dev/null
+++ b/arch/mips/boot/Makefile
@@ -0,0 +1,173 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995, 1998, 2001, 2002 by Ralf Baechle
+# Copyright (C) 2004 Maciej W. Rozycki
+#
+
+#
+# Some DECstations need all possible sections of an ECOFF executable
+#
+ifdef CONFIG_MACH_DECSTATION
+ e2eflag := -a
+endif
+
+#
+# Drop some uninteresting sections in the kernel.
+# This is only relevant for ELF kernels but doesn't hurt a.out
+#
+drop-sections := .reginfo .mdebug .comment .note .pdr .options .MIPS.options
+strip-flags := $(addprefix --remove-section=,$(drop-sections))
+
+hostprogs := elf2ecoff
+
+suffix-y := bin
+suffix-$(CONFIG_KERNEL_BZIP2) := bz2
+suffix-$(CONFIG_KERNEL_GZIP) := gz
+suffix-$(CONFIG_KERNEL_LZMA) := lzma
+suffix-$(CONFIG_KERNEL_LZO) := lzo
+
+targets := vmlinux.ecoff
+quiet_cmd_ecoff = ECOFF $@
+ cmd_ecoff = $(obj)/elf2ecoff $(VMLINUX) $@ $(e2eflag)
+$(obj)/vmlinux.ecoff: $(obj)/elf2ecoff $(VMLINUX) FORCE
+ $(call if_changed,ecoff)
+
+targets += vmlinux.bin
+quiet_cmd_bin = OBJCOPY $@
+ cmd_bin = $(OBJCOPY) -O binary $(strip-flags) $(VMLINUX) $@
+$(obj)/vmlinux.bin: $(VMLINUX) FORCE
+ $(call if_changed,bin)
+
+targets += vmlinux.srec
+quiet_cmd_srec = OBJCOPY $@
+ cmd_srec = $(OBJCOPY) -S -O srec $(strip-flags) $(VMLINUX) $@
+$(obj)/vmlinux.srec: $(VMLINUX) FORCE
+ $(call if_changed,srec)
+
+UIMAGE_LOADADDR = $(VMLINUX_LOAD_ADDRESS)
+UIMAGE_ENTRYADDR = $(VMLINUX_ENTRY_ADDRESS)
+
+#
+# Compressed vmlinux images
+#
+
+extra-y += vmlinux.bin.bz2
+extra-y += vmlinux.bin.gz
+extra-y += vmlinux.bin.lzma
+extra-y += vmlinux.bin.lzo
+
+$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,bzip2)
+
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,gzip)
+
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lzma)
+
+$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lzo)
+
+#
+# Compressed u-boot images
+#
+
+targets += uImage
+targets += uImage.bin
+targets += uImage.bz2
+targets += uImage.gz
+targets += uImage.lzma
+targets += uImage.lzo
+
+$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,uimage,none)
+
+$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
+ $(call if_changed,uimage,bzip2)
+
+$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
+ $(call if_changed,uimage,gzip)
+
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
+ $(call if_changed,uimage,lzma)
+
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
+ $(call if_changed,uimage,lzo)
+
+$(obj)/uImage: $(obj)/uImage.$(suffix-y)
+ @ln -sf $(notdir $<) $@
+ @echo ' Image $@ is ready'
+
+#
+# Flattened Image Tree (.itb) images
+#
+
+ifeq ($(ADDR_BITS),32)
+itb_addr_cells = 1
+endif
+ifeq ($(ADDR_BITS),64)
+itb_addr_cells = 2
+endif
+
+targets += vmlinux.its.S
+
+quiet_cmd_its_cat = CAT $@
+ cmd_its_cat = cat $(real-prereqs) >$@
+
+$(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS)) FORCE
+ $(call if_changed,its_cat)
+
+targets += vmlinux.its
+targets += vmlinux.gz.its
+targets += vmlinux.bz2.its
+targets += vmlinux.lzma.its
+targets += vmlinux.lzo.its
+
+quiet_cmd_cpp_its_S = ITS $@
+ cmd_cpp_its_S = $(CPP) -P -C -o $@ $< \
+ -DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \
+ -DVMLINUX_BINARY="\"$(3)\"" \
+ -DVMLINUX_COMPRESSION="\"$(2)\"" \
+ -DVMLINUX_LOAD_ADDRESS=$(VMLINUX_LOAD_ADDRESS) \
+ -DVMLINUX_ENTRY_ADDRESS=$(VMLINUX_ENTRY_ADDRESS) \
+ -DADDR_BITS=$(ADDR_BITS) \
+ -DADDR_CELLS=$(itb_addr_cells)
+
+$(obj)/vmlinux.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
+ $(call if_changed,cpp_its_S,none,vmlinux.bin)
+
+$(obj)/vmlinux.gz.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
+ $(call if_changed,cpp_its_S,gzip,vmlinux.bin.gz)
+
+$(obj)/vmlinux.bz2.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
+ $(call if_changed,cpp_its_S,bzip2,vmlinux.bin.bz2)
+
+$(obj)/vmlinux.lzma.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
+ $(call if_changed,cpp_its_S,lzma,vmlinux.bin.lzma)
+
+$(obj)/vmlinux.lzo.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE
+ $(call if_changed,cpp_its_S,lzo,vmlinux.bin.lzo)
+
+targets += vmlinux.itb
+targets += vmlinux.gz.itb
+targets += vmlinux.bz2.itb
+targets += vmlinux.lzma.itb
+targets += vmlinux.lzo.itb
+
+quiet_cmd_itb-image = ITB $@
+ cmd_itb-image = \
+ env PATH="$(objtree)/scripts/dtc:$(PATH)" \
+ $(BASH) $(MKIMAGE) \
+ -D "-I dts -O dtb -p 500 \
+ --include $(objtree)/arch/mips \
+ --warning no-unit_address_vs_reg" \
+ -f $(2) $@
+
+$(obj)/vmlinux.itb: $(obj)/vmlinux.its $(obj)/vmlinux.bin FORCE
+ $(call if_changed,itb-image,$<)
+
+$(obj)/vmlinux.%.itb: $(obj)/vmlinux.%.its $(obj)/vmlinux.bin.% FORCE
+ $(call if_changed,itb-image,$<)
diff --git a/arch/mips/boot/compressed/.gitignore b/arch/mips/boot/compressed/.gitignore
new file mode 100644
index 000000000..d35839561
--- /dev/null
+++ b/arch/mips/boot/compressed/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ashldi3.c
+bswapsi.c
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
new file mode 100644
index 000000000..eae0fad30
--- /dev/null
+++ b/arch/mips/boot/compressed/Makefile
@@ -0,0 +1,155 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.
+#
+# Adapted for MIPS Pete Popov, Dan Malek
+#
+# Copyright (C) 1994 by Linus Torvalds
+# Adapted for PowerPC by Gary Thomas
+# modified by Cort (cort@cs.nmt.edu)
+#
+# Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University
+# Author: Wu Zhangjin <wuzhangjin@gmail.com>
+#
+
+include $(srctree)/arch/mips/Kbuild.platforms
+
+# set the default size of the mallocing area for decompressing
+BOOT_HEAP_SIZE := 0x400000
+
+# Disable Function Tracer
+KBUILD_CFLAGS := $(filter-out -pg, $(KBUILD_CFLAGS))
+
+KBUILD_CFLAGS := $(filter-out -fstack-protector, $(KBUILD_CFLAGS))
+
+# Disable lq/sq in zboot
+ifdef CONFIG_CPU_LOONGSON64
+KBUILD_CFLAGS := $(filter-out -march=loongson3a, $(KBUILD_CFLAGS)) -march=mips64r2
+endif
+
+KBUILD_CFLAGS := $(KBUILD_CFLAGS) -D__KERNEL__ -D__DISABLE_EXPORTS \
+ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) -D"VMLINUX_LOAD_ADDRESS_ULL=$(VMLINUX_LOAD_ADDRESS)ull"
+
+KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
+ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \
+ -DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS)
+
+# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+KCOV_INSTRUMENT := n
+UBSAN_SANITIZE := n
+
+# decompressor objects (linked with vmlinuz)
+vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o $(obj)/bswapsi.o
+
+ifdef CONFIG_DEBUG_ZBOOT
+vmlinuzobjs-$(CONFIG_DEBUG_ZBOOT) += $(obj)/dbg.o
+vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART16550) += $(obj)/uart-16550.o
+vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART_PROM) += $(obj)/uart-prom.o
+vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY) += $(obj)/uart-alchemy.o
+vmlinuzobjs-$(CONFIG_ATH79) += $(obj)/uart-ath79.o
+endif
+
+extra-y += uart-ath79.c
+$(obj)/uart-ath79.c: $(srctree)/arch/mips/ath79/early_printk.c
+ $(call cmd,shipped)
+
+vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o
+
+extra-y += ashldi3.c
+$(obj)/ashldi3.c: $(obj)/%.c: $(srctree)/lib/%.c FORCE
+ $(call if_changed,shipped)
+
+extra-y += bswapsi.c
+$(obj)/bswapsi.c: $(obj)/%.c: $(srctree)/arch/mips/lib/%.c FORCE
+ $(call if_changed,shipped)
+
+targets := $(notdir $(vmlinuzobjs-y))
+
+targets += vmlinux.bin
+OBJCOPYFLAGS_vmlinux.bin := $(OBJCOPYFLAGS) -O binary -R .comment -S
+$(obj)/vmlinux.bin: $(KBUILD_IMAGE) FORCE
+ $(call if_changed,objcopy)
+
+tool_$(CONFIG_KERNEL_GZIP) = gzip
+tool_$(CONFIG_KERNEL_BZIP2) = bzip2
+tool_$(CONFIG_KERNEL_LZ4) = lz4
+tool_$(CONFIG_KERNEL_LZMA) = lzma
+tool_$(CONFIG_KERNEL_LZO) = lzo
+tool_$(CONFIG_KERNEL_XZ) = xzkern
+tool_$(CONFIG_KERNEL_ZSTD) = zstd22
+
+targets += vmlinux.bin.z
+$(obj)/vmlinux.bin.z: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,$(tool_y))
+
+targets += piggy.o dummy.o
+OBJCOPYFLAGS_piggy.o := --add-section=.image=$(obj)/vmlinux.bin.z \
+ --set-section-flags=.image=contents,alloc,load,readonly,data
+$(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
+ $(call if_changed,objcopy)
+
+HOSTCFLAGS_calc_vmlinuz_load_addr.o += $(LINUXINCLUDE)
+
+# Calculate the load address of the compressed kernel image
+hostprogs := calc_vmlinuz_load_addr
+
+ifneq ($(zload-y),)
+VMLINUZ_LOAD_ADDRESS := $(zload-y)
+else
+VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
+ $(obj)/vmlinux.bin $(LINKER_LOAD_ADDRESS))
+endif
+UIMAGE_LOADADDR = $(VMLINUZ_LOAD_ADDRESS)
+
+vmlinuzobjs-y += $(obj)/piggy.o
+
+quiet_cmd_zld = LD $@
+ cmd_zld = $(LD) $(KBUILD_LDFLAGS) -Ttext $(VMLINUZ_LOAD_ADDRESS) -T $< $(vmlinuzobjs-y) -o $@
+quiet_cmd_strip = STRIP $@
+ cmd_strip = $(STRIP) -s $@
+vmlinuz: $(src)/ld.script $(vmlinuzobjs-y) $(obj)/calc_vmlinuz_load_addr
+ $(call cmd,zld)
+ $(call cmd,strip)
+
+#
+# Some DECstations need all possible sections of an ECOFF executable
+#
+ifdef CONFIG_MACH_DECSTATION
+ e2eflag := -a
+endif
+
+# elf2ecoff can only handle 32bit image
+hostprogs += ../elf2ecoff
+
+ifdef CONFIG_32BIT
+ VMLINUZ = vmlinuz
+else
+ VMLINUZ = vmlinuz.32
+endif
+
+quiet_cmd_32 = OBJCOPY $@
+ cmd_32 = $(OBJCOPY) -O $(32bit-bfd) $(OBJCOPYFLAGS) $< $@
+vmlinuz.32: vmlinuz
+ $(call cmd,32)
+
+quiet_cmd_ecoff = ECOFF $@
+ cmd_ecoff = $< $(VMLINUZ) $@ $(e2eflag)
+vmlinuz.ecoff: $(obj)/../elf2ecoff $(VMLINUZ)
+ $(call cmd,ecoff)
+
+OBJCOPYFLAGS_vmlinuz.bin := $(OBJCOPYFLAGS) -O binary
+vmlinuz.bin: vmlinuz
+ $(call cmd,objcopy)
+
+OBJCOPYFLAGS_vmlinuz.srec := $(OBJCOPYFLAGS) -S -O srec
+vmlinuz.srec: vmlinuz
+ $(call cmd,objcopy)
+
+uzImage.bin: vmlinuz.bin FORCE
+ $(call if_changed,uimage,none)
+
+clean-files += $(objtree)/vmlinuz
+clean-files += $(objtree)/vmlinuz.32
+clean-files += $(objtree)/vmlinuz.ecoff
+clean-files += $(objtree)/vmlinuz.bin
+clean-files += $(objtree)/vmlinuz.srec
diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
new file mode 100644
index 000000000..080b926d2
--- /dev/null
+++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2010 "Wu Zhangjin" <wuzhangjin@gmail.com>
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <linux/sizes.h>
+
+int main(int argc, char *argv[])
+{
+ unsigned long long vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr;
+ struct stat sb;
+
+ if (argc != 3) {
+ fprintf(stderr, "Usage: %s <pathname> <vmlinux_load_addr>\n",
+ argv[0]);
+ return EXIT_FAILURE;
+ }
+
+ if (stat(argv[1], &sb) == -1) {
+ perror("stat");
+ return EXIT_FAILURE;
+ }
+
+ /* Convert hex characters to dec number */
+ errno = 0;
+ if (sscanf(argv[2], "%llx", &vmlinux_load_addr) != 1) {
+ if (errno != 0)
+ perror("sscanf");
+ else
+ fprintf(stderr, "No matching characters\n");
+
+ return EXIT_FAILURE;
+ }
+
+ vmlinux_size = (uint64_t)sb.st_size;
+ vmlinuz_load_addr = vmlinux_load_addr + vmlinux_size;
+
+ /*
+ * Align with 64KB: KEXEC needs load sections to be aligned to PAGE_SIZE,
+ * which may be as large as 64KB depending on the kernel configuration.
+ */
+
+ vmlinuz_load_addr += (SZ_64K - vmlinux_size % SZ_64K);
+
+ printf("0x%llx\n", vmlinuz_load_addr);
+
+ return EXIT_SUCCESS;
+}
diff --git a/arch/mips/boot/compressed/dbg.c b/arch/mips/boot/compressed/dbg.c
new file mode 100644
index 000000000..f6728a8fd
--- /dev/null
+++ b/arch/mips/boot/compressed/dbg.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MIPS-specific debug support for pre-boot environment
+ *
+ * NOTE: putc() is board specific, if your board have a 16550 compatible uart,
+ * please select SYS_SUPPORTS_ZBOOT_UART16550 for your machine. othewise, you
+ * need to implement your own putc().
+ */
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+void __weak putc(char c)
+{
+}
+
+void puts(const char *s)
+{
+ char c;
+ while ((c = *s++) != '\0') {
+ putc(c);
+ if (c == '\n')
+ putc('\r');
+ }
+}
+
+void puthex(unsigned long long val)
+{
+
+ unsigned char buf[10];
+ int i;
+ for (i = 7; i >= 0; i--) {
+ buf[i] = "0123456789ABCDEF"[val & 0x0F];
+ val >>= 4;
+ }
+ buf[8] = '\0';
+ puts(buf);
+}
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
new file mode 100644
index 000000000..1e91155be
--- /dev/null
+++ b/arch/mips/boot/compressed/decompress.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: Matt Porter <mporter@mvista.com>
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/libfdt.h>
+
+#include <asm/addrspace.h>
+#include <asm/unaligned.h>
+
+/*
+ * These two variables specify the free mem region
+ * that can be used for temporary malloc area
+ */
+unsigned long free_mem_ptr;
+unsigned long free_mem_end_ptr;
+
+/* The linker tells us where the image is. */
+extern unsigned char __image_begin, __image_end;
+
+/* debug interfaces */
+#ifdef CONFIG_DEBUG_ZBOOT
+extern void puts(const char *s);
+extern void puthex(unsigned long long val);
+#else
+#define puts(s) do {} while (0)
+#define puthex(val) do {} while (0)
+#endif
+
+extern char __appended_dtb[];
+
+void error(char *x)
+{
+ puts("\n\n");
+ puts(x);
+ puts("\n\n -- System halted");
+
+ while (1)
+ ; /* Halt */
+}
+
+/* activate the code for pre-boot environment */
+#define STATIC static
+
+#ifdef CONFIG_KERNEL_GZIP
+#include "../../../../lib/decompress_inflate.c"
+#endif
+
+#ifdef CONFIG_KERNEL_BZIP2
+#include "../../../../lib/decompress_bunzip2.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZ4
+#include "../../../../lib/decompress_unlz4.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZMA
+#include "../../../../lib/decompress_unlzma.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZO
+#include "../../../../lib/decompress_unlzo.c"
+#endif
+
+#ifdef CONFIG_KERNEL_XZ
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
+#ifdef CONFIG_KERNEL_ZSTD
+#include "../../../../lib/decompress_unzstd.c"
+#endif
+
+const unsigned long __stack_chk_guard = 0x000a0dff;
+
+void __stack_chk_fail(void)
+{
+ error("stack-protector: Kernel stack is corrupted\n");
+}
+
+void decompress_kernel(unsigned long boot_heap_start)
+{
+ unsigned long zimage_start, zimage_size;
+
+ zimage_start = (unsigned long)(&__image_begin);
+ zimage_size = (unsigned long)(&__image_end) -
+ (unsigned long)(&__image_begin);
+
+ puts("zimage at: ");
+ puthex(zimage_start);
+ puts(" ");
+ puthex(zimage_size + zimage_start);
+ puts("\n");
+
+ /* This area are prepared for mallocing when decompressing */
+ free_mem_ptr = boot_heap_start;
+ free_mem_end_ptr = boot_heap_start + BOOT_HEAP_SIZE;
+
+ /* Display standard Linux/MIPS boot prompt */
+ puts("Uncompressing Linux at load address ");
+ puthex(VMLINUX_LOAD_ADDRESS_ULL);
+ puts("\n");
+
+ /* Decompress the kernel with according algorithm */
+ __decompress((char *)zimage_start, zimage_size, 0, 0,
+ (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, 0, error);
+
+ if (IS_ENABLED(CONFIG_MIPS_RAW_APPENDED_DTB) &&
+ fdt_magic((void *)&__appended_dtb) == FDT_MAGIC) {
+ unsigned int image_size, dtb_size;
+
+ dtb_size = fdt_totalsize((void *)&__appended_dtb);
+
+ /* last four bytes is always image size in little endian */
+ image_size = get_unaligned_le32((void *)&__image_end - 4);
+
+ /* copy dtb to where the booted kernel will expect it */
+ memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size,
+ __appended_dtb, dtb_size);
+ }
+
+ /* FIXME: should we flush cache here? */
+ puts("Now, booting the kernel...\n");
+}
diff --git a/arch/mips/boot/compressed/dummy.c b/arch/mips/boot/compressed/dummy.c
new file mode 100644
index 000000000..31dbf45bf
--- /dev/null
+++ b/arch/mips/boot/compressed/dummy.c
@@ -0,0 +1,4 @@
+int main(void)
+{
+ return 0;
+}
diff --git a/arch/mips/boot/compressed/head.S b/arch/mips/boot/compressed/head.S
new file mode 100644
index 000000000..409cb483a
--- /dev/null
+++ b/arch/mips/boot/compressed/head.S
@@ -0,0 +1,56 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995 Waldorf Electronics
+ * Written by Ralf Baechle and Andreas Busse
+ * Copyright (C) 1995 - 1999 Ralf Baechle
+ * Copyright (C) 1996 Paul M. Antoine
+ * Modified for DECStation and hence R3000 support by Paul M. Antoine
+ * Further modifications by David S. Miller and Harald Koerfgen
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+ .set noreorder
+ .cprestore
+ LEAF(start)
+start:
+ /* Save boot rom start args */
+ move s0, a0
+ move s1, a1
+ move s2, a2
+ move s3, a3
+
+ /* Clear BSS */
+ PTR_LA a0, _edata
+ PTR_LA a2, _end
+1: sw zero, 0(a0)
+ bne a2, a0, 1b
+ addiu a0, a0, 4
+
+ PTR_LA a0, (.heap) /* heap address */
+ PTR_LA sp, (.stack + 8192) /* stack address */
+
+ PTR_LA ra, 2f
+ PTR_LA k0, decompress_kernel
+ jr k0
+ nop
+2:
+ move a0, s0
+ move a1, s1
+ move a2, s2
+ move a3, s3
+ PTR_LI k0, KERNEL_ENTRY
+ jr k0
+ nop
+3:
+ b 3b
+ nop
+ END(start)
+
+ .comm .heap,BOOT_HEAP_SIZE,4
+ .comm .stack,4096*2,4
diff --git a/arch/mips/boot/compressed/ld.script b/arch/mips/boot/compressed/ld.script
new file mode 100644
index 000000000..2ed08fbef
--- /dev/null
+++ b/arch/mips/boot/compressed/ld.script
@@ -0,0 +1,57 @@
+/*
+ * ld.script for compressed kernel support of MIPS
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin <wuzhanjing@gmail.com>
+ * Copyright (C) 2010 "Wu Zhangjin" <wuzhanjing@gmail.com>
+ */
+
+OUTPUT_ARCH(mips)
+ENTRY(start)
+PHDRS {
+ text PT_LOAD FLAGS(7); /* RWX */
+}
+SECTIONS
+{
+ /* Text and read-only data */
+ /* . = VMLINUZ_LOAD_ADDRESS; */
+ .text : {
+ *(.text)
+ *(.rodata)
+ }: text
+ /* End of text section */
+
+ /* Writable data */
+ .data : {
+ *(.data)
+ /* Put the compressed image here */
+ __image_begin = .;
+ *(.image)
+ __image_end = .;
+ CONSTRUCTORS
+ . = ALIGN(16);
+ }
+ __appended_dtb = .;
+ /* leave space for appended DTB */
+ . += 0x100000;
+
+ _edata = .;
+ /* End of data section */
+
+ /* BSS */
+ .bss : {
+ *(.bss)
+ }
+ . = ALIGN(16);
+ _end = .;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.MIPS.options)
+ *(.options)
+ *(.pdr)
+ *(.reginfo)
+ *(.comment)
+ *(.note)
+ }
+}
diff --git a/arch/mips/boot/compressed/string.c b/arch/mips/boot/compressed/string.c
new file mode 100644
index 000000000..0b593b709
--- /dev/null
+++ b/arch/mips/boot/compressed/string.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * arch/mips/boot/compressed/string.c
+ *
+ * Very small subset of simple string routines
+ */
+
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+void *memcpy(void *dest, const void *src, size_t n)
+{
+ int i;
+ const char *s = src;
+ char *d = dest;
+
+ for (i = 0; i < n; i++)
+ d[i] = s[i];
+ return dest;
+}
+
+void *memset(void *s, int c, size_t n)
+{
+ int i;
+ char *ss = s;
+
+ for (i = 0; i < n; i++)
+ ss[i] = c;
+ return s;
+}
+
+void * __weak memmove(void *dest, const void *src, size_t n)
+{
+ unsigned int i;
+ const char *s = src;
+ char *d = dest;
+
+ if ((uintptr_t)dest < (uintptr_t)src) {
+ for (i = 0; i < n; i++)
+ d[i] = s[i];
+ } else {
+ for (i = n; i > 0; i--)
+ d[i - 1] = s[i - 1];
+ }
+ return dest;
+}
diff --git a/arch/mips/boot/compressed/uart-16550.c b/arch/mips/boot/compressed/uart-16550.c
new file mode 100644
index 000000000..aee8d7b8f
--- /dev/null
+++ b/arch/mips/boot/compressed/uart-16550.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * 16550 compatible uart based serial debug support for zboot
+ */
+
+#include <linux/types.h>
+#include <linux/serial_reg.h>
+
+#include <asm/addrspace.h>
+
+#if defined(CONFIG_MACH_LOONGSON64) || defined(CONFIG_MIPS_MALTA)
+#define UART_BASE 0x1fd003f8
+#define PORT(offset) (CKSEG1ADDR(UART_BASE) + (offset))
+#endif
+
+#ifdef CONFIG_AR7
+#include <ar7.h>
+#define PORT(offset) (CKSEG1ADDR(AR7_REGS_UART0) + (4 * offset))
+#endif
+
+#ifdef CONFIG_MACH_INGENIC
+#define INGENIC_UART0_BASE_ADDR 0x10030000
+#define PORT(offset) (CKSEG1ADDR(INGENIC_UART0_BASE_ADDR) + (4 * offset))
+#endif
+
+#ifdef CONFIG_CPU_XLR
+#define UART0_BASE 0x1EF14000
+#define PORT(offset) (CKSEG1ADDR(UART0_BASE) + (4 * offset))
+#define IOTYPE unsigned int
+#endif
+
+#ifdef CONFIG_CPU_XLP
+#define UART0_BASE 0x18030100
+#define PORT(offset) (CKSEG1ADDR(UART0_BASE) + (4 * offset))
+#define IOTYPE unsigned int
+#endif
+
+#ifndef IOTYPE
+#define IOTYPE char
+#endif
+
+#ifndef PORT
+#error please define the serial port address for your own machine
+#endif
+
+static inline unsigned int serial_in(int offset)
+{
+ return *((volatile IOTYPE *)PORT(offset)) & 0xFF;
+}
+
+static inline void serial_out(int offset, int value)
+{
+ *((volatile IOTYPE *)PORT(offset)) = value & 0xFF;
+}
+
+void putc(char c)
+{
+ int timeout = 1000000;
+
+ while (((serial_in(UART_LSR) & UART_LSR_THRE) == 0) && (timeout-- > 0))
+ ;
+
+ serial_out(UART_TX, c);
+}
diff --git a/arch/mips/boot/compressed/uart-alchemy.c b/arch/mips/boot/compressed/uart-alchemy.c
new file mode 100644
index 000000000..8ec63011e
--- /dev/null
+++ b/arch/mips/boot/compressed/uart-alchemy.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <asm/mach-au1x00/au1000.h>
+
+void putc(char c)
+{
+ alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
+}
diff --git a/arch/mips/boot/compressed/uart-prom.c b/arch/mips/boot/compressed/uart-prom.c
new file mode 100644
index 000000000..a8a0a32e0
--- /dev/null
+++ b/arch/mips/boot/compressed/uart-prom.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <asm/setup.h>
+
+void putc(char c)
+{
+ prom_putchar(c);
+}
diff --git a/arch/mips/boot/dts/Makefile b/arch/mips/boot/dts/Makefile
new file mode 100644
index 000000000..19027129a
--- /dev/null
+++ b/arch/mips/boot/dts/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+subdir-$(CONFIG_BMIPS_GENERIC) += brcm
+subdir-$(CONFIG_CAVIUM_OCTEON_SOC) += cavium-octeon
+subdir-$(CONFIG_MACH_PISTACHIO) += img
+subdir-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += img
+subdir-$(CONFIG_MACH_INGENIC) += ingenic
+subdir-$(CONFIG_LANTIQ) += lantiq
+subdir-$(CONFIG_MACH_LOONGSON64) += loongson
+subdir-$(CONFIG_MSCC_OCELOT) += mscc
+subdir-$(CONFIG_MIPS_MALTA) += mti
+subdir-$(CONFIG_LEGACY_BOARD_SEAD3) += mti
+subdir-$(CONFIG_NLM_XLP_BOARD) += netlogic
+subdir-$(CONFIG_FIT_IMAGE_FDT_NI169445) += ni
+subdir-$(CONFIG_MACH_PIC32) += pic32
+subdir-$(CONFIG_ATH79) += qca
+subdir-$(CONFIG_RALINK) += ralink
+subdir-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += xilfpga
+
+obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y))
diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile
new file mode 100644
index 000000000..d85f446cc
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/Makefile
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_DT_BCM93384WVG) += bcm93384wvg.dtb
+dtb-$(CONFIG_DT_BCM93384WVG_VIPER) += bcm93384wvg_viper.dtb
+dtb-$(CONFIG_DT_BCM96368MVWG) += bcm96368mvwg.dtb
+dtb-$(CONFIG_DT_BCM9EJTAGPRB) += bcm9ejtagprb.dtb
+dtb-$(CONFIG_DT_BCM97125CBMB) += bcm97125cbmb.dtb
+dtb-$(CONFIG_DT_BCM97346DBSMB) += bcm97346dbsmb.dtb
+dtb-$(CONFIG_DT_BCM97358SVMB) += bcm97358svmb.dtb
+dtb-$(CONFIG_DT_BCM97360SVMB) += bcm97360svmb.dtb
+dtb-$(CONFIG_DT_BCM97362SVMB) += bcm97362svmb.dtb
+dtb-$(CONFIG_DT_BCM97420C) += bcm97420c.dtb
+dtb-$(CONFIG_DT_BCM97425SVMB) += bcm97425svmb.dtb
+dtb-$(CONFIG_DT_BCM97435SVMB) += bcm97435svmb.dtb
+dtb-$(CONFIG_DT_COMTREND_VR3032U) += bcm63268-comtrend-vr-3032u.dtb
+dtb-$(CONFIG_DT_NETGEAR_CVG834G) += bcm3368-netgear-cvg834g.dtb
+dtb-$(CONFIG_DT_SFR_NEUFBOX4_SERCOMM) += bcm6358-neufbox4-sercomm.dtb
+dtb-$(CONFIG_DT_SFR_NEUFBOX6_SERCOMM) += bcm6362-neufbox6-sercomm.dtb
+
+dtb-$(CONFIG_DT_NONE) += \
+ bcm3368-netgear-cvg834g.dtb \
+ bcm6358-neufbox4-sercomm.dtb \
+ bcm6362-neufbox6-sercomm.dtb \
+ bcm63268-comtrend-vr-3032u.dtb \
+ bcm93384wvg.dtb \
+ bcm93384wvg_viper.dtb \
+ bcm96368mvwg.dtb \
+ bcm9ejtagprb.dtb \
+ bcm97125cbmb.dtb \
+ bcm97346dbsmb.dtb \
+ bcm97358svmb.dtb \
+ bcm97360svmb.dtb \
+ bcm97362svmb.dtb \
+ bcm97420c.dtb \
+ bcm97425svmb.dtb \
+ bcm97435svmb.dtb
+
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts b/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts
new file mode 100644
index 000000000..ed6023a91
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "bcm3368.dtsi"
+
+/ {
+ compatible = "netgear,cvg834g", "brcm,bcm3368";
+ model = "NETGEAR CVG834G";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x02000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm3368.dtsi b/arch/mips/boot/dts/brcm/bcm3368.dtsi
new file mode 100644
index 000000000..d4b2b430d
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm3368.dtsi
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm3368";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <150000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ clocks {
+ periph_clk: periph-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+ };
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ ubus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges;
+
+ clkctl: clock-controller@fff8c004 {
+ compatible = "brcm,bcm3368-clocks";
+ reg = <0xfff8c004 0x4>;
+ #clock-cells = <1>;
+ };
+
+ periph_cntl: syscon@fff8c008 {
+ compatible = "syscon";
+ reg = <0xfff8c008 0x4>;
+ native-endian;
+ };
+
+ reboot: syscon-reboot@fff8c008 {
+ compatible = "syscon-reboot";
+ regmap = <&periph_cntl>;
+ offset = <0x0>;
+ mask = <0x1>;
+ };
+
+ periph_intc: interrupt-controller@fff8c00c {
+ compatible = "brcm,bcm6345-l1-intc";
+ reg = <0xfff8c00c 0x8>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>;
+ };
+
+ uart0: serial@fff8c100 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0xfff8c100 0x18>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <2>;
+
+ clocks = <&periph_clk>;
+ clock-names = "refclk";
+
+ status = "disabled";
+ };
+
+ uart1: serial@fff8c120 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0xfff8c120 0x18>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <3>;
+
+ clocks = <&periph_clk>;
+ clock-names = "refclk";
+
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm3384_viper.dtsi b/arch/mips/boot/dts/brcm/bcm3384_viper.dtsi
new file mode 100644
index 000000000..eb2a9c6ed
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm3384_viper.dtsi
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm3384-viper", "brcm,bcm33843-viper";
+
+ memory@0 {
+ device_type = "memory";
+
+ /* Typical ranges. The bootloader should fill these in. */
+ reg = <0x06000000 0x02000000>,
+ <0x0e000000 0x02000000>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* 1/2 of the CPU core clock (standard MIPS behavior) */
+ mips-hpt-frequency = <300000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <0>;
+ };
+ };
+
+ cpu_intc: cpu_intc {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ periph_clk: periph_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <54000000>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ ubus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "brcm,ubus", "simple-bus";
+ ranges;
+ /* No dma-ranges on Viper. */
+
+ periph_intc: periph_intc@14e00048 {
+ compatible = "brcm,bcm3380-l2-intc";
+ reg = <0x14e00048 0x4 0x14e0004c 0x4>,
+ <0x14e00350 0x4 0x14e00354 0x4>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <4>;
+ };
+
+ cmips_intc: cmips_intc@151f8048 {
+ compatible = "brcm,bcm3380-l2-intc";
+ reg = <0x151f8048 0x4 0x151f804c 0x4>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <30>;
+ brcm,int-map-mask = <0xffffffff>;
+ };
+
+ uart0: serial@14e00520 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x14e00520 0x18>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <2>;
+ clocks = <&periph_clk>;
+ status = "disabled";
+ };
+
+ ehci0: usb@15400300 {
+ compatible = "brcm,bcm3384-ehci", "generic-ehci";
+ reg = <0x15400300 0x100>;
+ big-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <41>;
+ status = "disabled";
+ };
+
+ ohci0: usb@15400400 {
+ compatible = "brcm,bcm3384-ohci", "generic-ohci";
+ reg = <0x15400400 0x100>;
+ big-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <40>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm3384_zephyr.dtsi b/arch/mips/boot/dts/brcm/bcm3384_zephyr.dtsi
new file mode 100644
index 000000000..d7ad769a4
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm3384_zephyr.dtsi
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm3384", "brcm,bcm33843";
+
+ memory@0 {
+ device_type = "memory";
+
+ /* Typical range. The bootloader should fill this in. */
+ reg = <0x0 0x08000000>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* On BMIPS5000 this is 1/8th of the CPU core clock */
+ mips-hpt-frequency = <100000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ cpu_intc: cpu_intc {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ periph_clk: periph_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <54000000>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ ubus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "brcm,ubus", "simple-bus";
+ ranges;
+ dma-ranges = <0x00000000 0x08000000 0x08000000>,
+ <0x08000000 0x00000000 0x08000000>;
+
+ periph_intc: periph_intc@14e00038 {
+ compatible = "brcm,bcm3380-l2-intc";
+ reg = <0x14e00038 0x4 0x14e0003c 0x4>,
+ <0x14e00340 0x4 0x14e00344 0x4>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <4>;
+ };
+
+ zmips_intc: zmips_intc@104b0060 {
+ compatible = "brcm,bcm3380-l2-intc";
+ reg = <0x104b0060 0x4 0x104b0064 0x4>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <29>;
+ brcm,int-map-mask = <0xffffffff>;
+ };
+
+ iop_intc: iop_intc@14e00058 {
+ compatible = "brcm,bcm3380-l2-intc";
+ reg = <0x14e00058 0x4 0x14e0005c 0x4>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <6>;
+ brcm,int-map-mask = <0xffffffff>;
+ };
+
+ uart0: serial@14e00520 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x14e00520 0x18>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <2>;
+ clocks = <&periph_clk>;
+ status = "disabled";
+ };
+
+ ehci0: usb@15400300 {
+ compatible = "brcm,bcm3384-ehci", "generic-ehci";
+ reg = <0x15400300 0x100>;
+ big-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <41>;
+ status = "disabled";
+ };
+
+ ohci0: usb@15400400 {
+ compatible = "brcm,bcm3384-ohci", "generic-ohci";
+ reg = <0x15400400 0x100>;
+ big-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <40>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts b/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts
new file mode 100644
index 000000000..8d010b919
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "bcm63268.dtsi"
+
+/ {
+ compatible = "comtrend,vr-3032u", "brcm,bcm63268";
+ model = "Comtrend VR-3032u";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x04000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&leds0 {
+ status = "okay";
+ brcm,serial-leds;
+ brcm,serial-dat-low;
+ brcm,serial-shift-inv;
+
+ led@0 {
+ reg = <0>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <0>;
+ /* GPHY0 Speed 0 */
+ };
+ led@1 {
+ reg = <1>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <1>;
+ /* GPHY0 Speed 1 */
+ };
+ led@2 {
+ reg = <2>;
+ active-low;
+ label = "vr-3032u:red:inet";
+ };
+ led@3 {
+ reg = <3>;
+ active-low;
+ label = "vr-3032u:green:dsl";
+ };
+ led@4 {
+ reg = <4>;
+ active-low;
+ label = "vr-3032u:green:usb";
+ };
+ led@7 {
+ reg = <7>;
+ active-low;
+ label = "vr-3032u:green:wps";
+ };
+ led@8 {
+ reg = <8>;
+ active-low;
+ label = "vr-3032u:green:inet";
+ };
+ led@9 {
+ reg = <9>;
+ brcm,hardware-controlled;
+ /* EPHY0 Activity */
+ };
+ led@10 {
+ reg = <10>;
+ brcm,hardware-controlled;
+ /* EPHY1 Activity */
+ };
+ led@11 {
+ reg = <11>;
+ brcm,hardware-controlled;
+ /* EPHY2 Activity */
+ };
+ led@12 {
+ reg = <12>;
+ brcm,hardware-controlled;
+ /* GPHY0 Activity */
+ };
+ led@13 {
+ reg = <13>;
+ brcm,hardware-controlled;
+ /* EPHY0 Speed */
+ };
+ led@14 {
+ reg = <14>;
+ brcm,hardware-controlled;
+ /* EPHY1 Speed */
+ };
+ led@15 {
+ reg = <15>;
+ brcm,hardware-controlled;
+ /* EPHY2 Speed */
+ };
+ led@20 {
+ reg = <20>;
+ active-low;
+ label = "vr-3032u:green:power";
+ default-state = "on";
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm63268.dtsi b/arch/mips/boot/dts/brcm/bcm63268.dtsi
new file mode 100644
index 000000000..365fa75cd
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm63268.dtsi
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm63268";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <200000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ clocks {
+ periph_clk: periph-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+ };
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ ubus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges;
+
+ clkctl: clock-controller@10000004 {
+ compatible = "brcm,bcm63268-clocks";
+ reg = <0x10000004 0x4>;
+ #clock-cells = <1>;
+ };
+
+ periph_cntl: syscon@10000008 {
+ compatible = "syscon";
+ reg = <0x10000008 0x4>;
+ native-endian;
+ };
+
+ reboot: syscon-reboot@10000008 {
+ compatible = "syscon-reboot";
+ regmap = <&periph_cntl>;
+ offset = <0x0>;
+ mask = <0x1>;
+ };
+
+ periph_intc: interrupt-controller@10000020 {
+ compatible = "brcm,bcm6345-l1-intc";
+ reg = <0x10000020 0x20>,
+ <0x10000040 0x20>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ uart0: serial@10000180 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x10000180 0x18>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <5>;
+
+ clocks = <&periph_clk>;
+ clock-names = "refclk";
+
+ status = "disabled";
+ };
+
+ uart1: serial@100001a0 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x100001a0 0x18>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <34>;
+
+ clocks = <&periph_clk>;
+ clock-names = "refclk";
+
+ status = "disabled";
+ };
+
+ leds0: led-controller@10001900 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,bcm6328-leds";
+ reg = <0x10001900 0x24>;
+
+ status = "disabled";
+ };
+
+ periph_pwr: power-controller@1000184c {
+ compatible = "brcm,bcm6328-power-controller";
+ reg = <0x1000184c 0x4>;
+ #power-domain-cells = <1>;
+ };
+
+ ehci: usb@10002500 {
+ compatible = "brcm,bcm63268-ehci", "generic-ehci";
+ reg = <0x10002500 0x100>;
+ big-endian;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <10>;
+
+ status = "disabled";
+ };
+
+ ohci: usb@10002600 {
+ compatible = "brcm,bcm63268-ohci", "generic-ohci";
+ reg = <0x10002600 0x100>;
+ big-endian;
+ no-big-frame-no;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <9>;
+
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm6328.dtsi b/arch/mips/boot/dts/brcm/bcm6328.dtsi
new file mode 100644
index 000000000..1f9edd710
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm6328.dtsi
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm6328";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <160000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ clocks {
+ periph_clk: periph-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+ };
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ ubus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges;
+
+ clkctl: clock-controller@10000004 {
+ compatible = "brcm,bcm6328-clocks";
+ reg = <0x10000004 0x4>;
+ #clock-cells = <1>;
+ };
+
+ periph_intc: interrupt-controller@10000020 {
+ compatible = "brcm,bcm6345-l1-intc";
+ reg = <0x10000020 0x10>,
+ <0x10000030 0x10>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ uart0: serial@10000100 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x10000100 0x18>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <28>;
+ clocks = <&periph_clk>;
+ clock-names = "refclk";
+ status = "disabled";
+ };
+
+ uart1: serial@10000120 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x10000120 0x18>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <39>;
+ clocks = <&periph_clk>;
+ clock-names = "refclk";
+ status = "disabled";
+ };
+
+ timer: syscon@10000040 {
+ compatible = "syscon";
+ reg = <0x10000040 0x2c>;
+ native-endian;
+ };
+
+ reboot: syscon-reboot@10000068 {
+ compatible = "syscon-reboot";
+ regmap = <&timer>;
+ offset = <0x28>;
+ mask = <0x1>;
+ };
+
+ leds0: led-controller@10000800 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,bcm6328-leds";
+ reg = <0x10000800 0x24>;
+ status = "disabled";
+ };
+
+ periph_pwr: power-controller@10001848 {
+ compatible = "brcm,bcm6328-power-controller";
+ reg = <0x10001848 0x4>;
+ #power-domain-cells = <1>;
+ };
+
+ ehci: usb@10002500 {
+ compatible = "brcm,bcm6328-ehci", "generic-ehci";
+ reg = <0x10002500 0x100>;
+ big-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <42>;
+ status = "disabled";
+ };
+
+ ohci: usb@10002600 {
+ compatible = "brcm,bcm6328-ohci", "generic-ohci";
+ reg = <0x10002600 0x100>;
+ big-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <41>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts b/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts
new file mode 100644
index 000000000..53e57cc29
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "bcm6358.dtsi"
+
+/ {
+ compatible = "sfr,nb4-ser", "brcm,bcm6358";
+ model = "SFR Neufbox 4 (Sercomm)";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x02000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&leds0 {
+ status = "okay";
+
+ led@0 {
+ reg = <0>;
+ active-low;
+ label = "nb4-ser:white:alarm";
+ };
+ led@2 {
+ reg = <2>;
+ active-low;
+ label = "nb4-ser:white:tv";
+ };
+ led@3 {
+ reg = <3>;
+ active-low;
+ label = "nb4-ser:white:tel";
+ };
+ led@4 {
+ reg = <4>;
+ active-low;
+ label = "nb4-ser:white:adsl";
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm6358.dtsi b/arch/mips/boot/dts/brcm/bcm6358.dtsi
new file mode 100644
index 000000000..89a3107ca
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm6358.dtsi
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm6358";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <150000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ clocks {
+ periph_clk: periph-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+ };
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ ubus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges;
+
+ clkctl: clock-controller@fffe0004 {
+ compatible = "brcm,bcm6358-clocks";
+ reg = <0xfffe0004 0x4>;
+ #clock-cells = <1>;
+ };
+
+ periph_cntl: syscon@fffe0008 {
+ compatible = "syscon";
+ reg = <0xfffe0008 0x4>;
+ native-endian;
+ };
+
+ reboot: syscon-reboot@fffe0008 {
+ compatible = "syscon-reboot";
+ regmap = <&periph_cntl>;
+ offset = <0x0>;
+ mask = <0x1>;
+ };
+
+ periph_intc: interrupt-controller@fffe000c {
+ compatible = "brcm,bcm6345-l1-intc";
+ reg = <0xfffe000c 0x8>,
+ <0xfffe0038 0x8>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ leds0: led-controller@fffe00d0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,bcm6358-leds";
+ reg = <0xfffe00d0 0x8>;
+
+ status = "disabled";
+ };
+
+ uart0: serial@fffe0100 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0xfffe0100 0x18>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <2>;
+
+ clocks = <&periph_clk>;
+ clock-names = "refclk";
+
+ status = "disabled";
+ };
+
+ uart1: serial@fffe0120 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0xfffe0120 0x18>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <3>;
+
+ clocks = <&periph_clk>;
+ clock-names = "refclk";
+
+ status = "disabled";
+ };
+
+ ehci: usb@fffe1300 {
+ compatible = "brcm,bcm6358-ehci", "generic-ehci";
+ reg = <0xfffe1300 0x100>;
+ big-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <10>;
+ status = "disabled";
+ };
+
+ ohci: usb@fffe1400 {
+ compatible = "brcm,bcm6358-ohci", "generic-ohci";
+ reg = <0xfffe1400 0x100>;
+ big-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <5>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts b/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts
new file mode 100644
index 000000000..3e83bee5b
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "bcm6362.dtsi"
+
+/ {
+ compatible = "sfr,nb6-ser", "brcm,bcm6362";
+ model = "SFR NeufBox 6 (Sercomm)";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x08000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm6362.dtsi b/arch/mips/boot/dts/brcm/bcm6362.dtsi
new file mode 100644
index 000000000..0b2adefd7
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm6362.dtsi
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm6362";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <200000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ clocks {
+ periph_clk: periph-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+ };
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ ubus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges;
+
+ clkctl: clock-controller@10000004 {
+ compatible = "brcm,bcm6362-clocks";
+ reg = <0x10000004 0x4>;
+ #clock-cells = <1>;
+ };
+
+ periph_cntl: syscon@10000008 {
+ compatible = "syscon";
+ reg = <0x10000008 0x4>;
+ native-endian;
+ };
+
+ reboot: syscon-reboot@10000008 {
+ compatible = "syscon-reboot";
+ regmap = <&periph_cntl>;
+ offset = <0x0>;
+ mask = <0x1>;
+ };
+
+ periph_intc: interrupt-controller@10000020 {
+ compatible = "brcm,bcm6345-l1-intc";
+ reg = <0x10000020 0x10>,
+ <0x10000030 0x10>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ uart0: serial@10000100 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x10000100 0x18>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <3>;
+
+ clocks = <&periph_clk>;
+ clock-names = "refclk";
+
+ status = "disabled";
+ };
+
+ uart1: serial@10000120 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x10000120 0x18>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <4>;
+
+ clocks = <&periph_clk>;
+ clock-names = "refclk";
+
+ status = "disabled";
+ };
+
+ periph_pwr: power-controller@10001848 {
+ compatible = "brcm,bcm6362-power-controller";
+ reg = <0x10001848 0x4>;
+ #power-domain-cells = <1>;
+ };
+
+ leds0: led-controller@10001900 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,bcm6328-leds";
+ reg = <0x10001900 0x24>;
+
+ status = "disabled";
+ };
+
+ ehci: usb@10002500 {
+ compatible = "brcm,bcm6362-ehci", "generic-ehci";
+ reg = <0x10002500 0x100>;
+ big-endian;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <10>;
+
+ status = "disabled";
+ };
+
+ ohci: usb@10002600 {
+ compatible = "brcm,bcm6362-ohci", "generic-ohci";
+ reg = <0x10002600 0x100>;
+ big-endian;
+ no-big-frame-no;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <9>;
+
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm6368.dtsi b/arch/mips/boot/dts/brcm/bcm6368.dtsi
new file mode 100644
index 000000000..b84a3bfe8
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm6368.dtsi
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm6368";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <200000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ clocks {
+ periph_clk: periph-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+ };
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ ubus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges;
+
+ clkctl: clock-controller@10000004 {
+ compatible = "brcm,bcm6368-clocks";
+ reg = <0x10000004 0x4>;
+ #clock-cells = <1>;
+ };
+
+ periph_cntl: syscon@100000008 {
+ compatible = "syscon";
+ reg = <0x10000008 0x4>;
+ native-endian;
+ };
+
+ reboot: syscon-reboot@10000008 {
+ compatible = "syscon-reboot";
+ regmap = <&periph_cntl>;
+ offset = <0x0>;
+ mask = <0x1>;
+ };
+
+ periph_intc: interrupt-controller@10000020 {
+ compatible = "brcm,bcm6345-l1-intc";
+ reg = <0x10000020 0x10>,
+ <0x10000030 0x10>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ leds0: led-controller@100000d0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,bcm6358-leds";
+ reg = <0x100000d0 0x8>;
+ status = "disabled";
+ };
+
+ uart0: serial@10000100 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x10000100 0x18>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <2>;
+ clocks = <&periph_clk>;
+ clock-names = "refclk";
+ status = "disabled";
+ };
+
+ uart1: serial@10000120 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x10000120 0x18>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <3>;
+ clocks = <&periph_clk>;
+ clock-names = "refclk";
+ status = "disabled";
+ };
+
+ ehci: usb@10001500 {
+ compatible = "brcm,bcm6368-ehci", "generic-ehci";
+ reg = <0x10001500 0x100>;
+ big-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <7>;
+ status = "disabled";
+ };
+
+ ohci: usb@10001600 {
+ compatible = "brcm,bcm6368-ohci", "generic-ohci";
+ reg = <0x10001600 0x100>;
+ big-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <5>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm7125.dtsi b/arch/mips/boot/dts/brcm/bcm7125.dtsi
new file mode 100644
index 000000000..5bf77b6fc
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm7125.dtsi
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm7125";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <202500000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4380";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips4380";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ uart_clk: uart_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <81000000>;
+ };
+
+ upg_clk: upg_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+ };
+
+ rdb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges = <0 0x10000000 0x01000000>;
+
+ periph_intc: interrupt-controller@441400 {
+ compatible = "brcm,bcm7038-l1-intc";
+ reg = <0x441400 0x30>, <0x441600 0x30>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ sun_l2_intc: interrupt-controller@401800 {
+ compatible = "brcm,l2-intc";
+ reg = <0x401800 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <23>;
+ };
+
+ gisb-arb@400000 {
+ compatible = "brcm,bcm7400-gisb-arb";
+ reg = <0x400000 0xdc>;
+ native-endian;
+ interrupt-parent = <&sun_l2_intc>;
+ interrupts = <0>, <2>;
+ brcm,gisb-arb-master-mask = <0x2f7>;
+ brcm,gisb-arb-master-names = "ssp_0", "cpu_0", "pci_0",
+ "bsp_0", "rdc_0", "rptd_0",
+ "avd_0", "jtag_0";
+ };
+
+ upg_irq0_intc: interrupt-controller@406780 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x406780 0x8>;
+
+ brcm,int-map-mask = <0x44>, <0xf000000>, <0x100000>;
+ brcm,int-fwd-mask = <0x70000>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <18>, <19>, <20>;
+ interrupt-names = "upg_main", "upg_bsc", "upg_spi";
+ };
+
+ sun_top_ctrl: syscon@404000 {
+ compatible = "brcm,bcm7125-sun-top-ctrl", "syscon";
+ reg = <0x404000 0x60c>;
+ native-endian;
+ };
+
+ reboot {
+ compatible = "brcm,bcm7038-reboot";
+ syscon = <&sun_top_ctrl 0x8 0x14>;
+ };
+
+ uart0: serial@406b00 {
+ compatible = "ns16550a";
+ reg = <0x406b00 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <21>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart1: serial@406b40 {
+ compatible = "ns16550a";
+ reg = <0x406b40 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <64>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart2: serial@406b80 {
+ compatible = "ns16550a";
+ reg = <0x406b80 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <65>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ bsca: i2c@406200 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406200 0x58>;
+ interrupts = <24>;
+ interrupt-names = "upg_bsca";
+ status = "disabled";
+ };
+
+ bscb: i2c@406280 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406280 0x58>;
+ interrupts = <25>;
+ interrupt-names = "upg_bscb";
+ status = "disabled";
+ };
+
+ bscc: i2c@406300 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406300 0x58>;
+ interrupts = <26>;
+ interrupt-names = "upg_bscc";
+ status = "disabled";
+ };
+
+ bscd: i2c@406380 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406380 0x58>;
+ interrupts = <27>;
+ interrupt-names = "upg_bscd";
+ status = "disabled";
+ };
+
+ pwma: pwm@406580 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406580 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ watchdog: watchdog@4067e8 {
+ clocks = <&upg_clk>;
+ compatible = "brcm,bcm7038-wdt";
+ reg = <0x4067e8 0x14>;
+ status = "disabled";
+ };
+
+ upg_gio: gpio@406700 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x406700 0x80>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupts = <6>;
+ brcm,gpio-bank-widths = <32 32 32 18>;
+ };
+
+ ehci0: usb@488300 {
+ compatible = "brcm,bcm7125-ehci", "generic-ehci";
+ reg = <0x488300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <60>;
+ status = "disabled";
+ };
+
+ ohci0: usb@488400 {
+ compatible = "brcm,bcm7125-ohci", "generic-ohci";
+ reg = <0x488400 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <61>;
+ status = "disabled";
+ };
+
+ spi_l2_intc: interrupt-controller@411d00 {
+ compatible = "brcm,l2-intc";
+ reg = <0x411d00 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <79>;
+ };
+
+ qspi: spi@443000 {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ compatible = "brcm,spi-bcm-qspi",
+ "brcm,spi-brcmstb-qspi";
+ clocks = <&upg_clk>;
+ reg = <0x440920 0x4 0x443200 0x188 0x443000 0x50>;
+ reg-names = "cs_reg", "hif_mspi", "bspi";
+ interrupts = <0x0 0x1 0x2 0x3 0x4 0x5 0x6>;
+ interrupt-parent = <&spi_l2_intc>;
+ interrupt-names = "spi_lr_fullness_reached",
+ "spi_lr_session_aborted",
+ "spi_lr_impatient",
+ "spi_lr_session_done",
+ "spi_lr_overread",
+ "mspi_done",
+ "mspi_halted";
+ status = "disabled";
+ };
+
+ mspi: spi@406400 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,spi-bcm-qspi",
+ "brcm,spi-brcmstb-mspi";
+ clocks = <&upg_clk>;
+ reg = <0x406400 0x180>;
+ reg-names = "mspi";
+ interrupts = <0x14>;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupt-names = "mspi_done";
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm7346.dtsi b/arch/mips/boot/dts/brcm/bcm7346.dtsi
new file mode 100644
index 000000000..2afa0dada
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm7346.dtsi
@@ -0,0 +1,549 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm7346";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <163125000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ uart_clk: uart_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <81000000>;
+ };
+
+ upg_clk: upg_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+ };
+
+ rdb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges = <0 0x10000000 0x01000000>;
+
+ periph_intc: interrupt-controller@411400 {
+ compatible = "brcm,bcm7038-l1-intc";
+ reg = <0x411400 0x30>, <0x411600 0x30>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ sun_l2_intc: interrupt-controller@403000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x403000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <51>;
+ };
+
+ gisb-arb@400000 {
+ compatible = "brcm,bcm7400-gisb-arb";
+ reg = <0x400000 0xdc>;
+ native-endian;
+ interrupt-parent = <&sun_l2_intc>;
+ interrupts = <0>, <2>;
+ brcm,gisb-arb-master-mask = <0x673>;
+ brcm,gisb-arb-master-names = "ssp_0", "cpu_0", "bsp_0",
+ "rdc_0", "raaga_0",
+ "jtag_0", "svd_0";
+ };
+
+ upg_irq0_intc: interrupt-controller@406780 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x406780 0x8>;
+
+ brcm,int-map-mask = <0x44>, <0xf000000>;
+ brcm,int-fwd-mask = <0x70000>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <59>, <57>;
+ interrupt-names = "upg_main", "upg_bsc";
+ };
+
+ upg_aon_irq0_intc: interrupt-controller@408b80 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x408b80 0x8>;
+
+ brcm,int-map-mask = <0x40>, <0x8000000>, <0x100000>;
+ brcm,int-fwd-mask = <0>;
+ brcm,irq-can-wake;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <60>, <58>, <62>;
+ interrupt-names = "upg_main_aon", "upg_bsc_aon",
+ "upg_spi";
+ };
+
+ sun_top_ctrl: syscon@404000 {
+ compatible = "brcm,bcm7346-sun-top-ctrl", "syscon";
+ reg = <0x404000 0x51c>;
+ native-endian;
+ };
+
+ reboot {
+ compatible = "brcm,brcmstb-reboot";
+ syscon = <&sun_top_ctrl 0x304 0x308>;
+ };
+
+ uart0: serial@406900 {
+ compatible = "ns16550a";
+ reg = <0x406900 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <64>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart1: serial@406940 {
+ compatible = "ns16550a";
+ reg = <0x406940 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <65>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart2: serial@406980 {
+ compatible = "ns16550a";
+ reg = <0x406980 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <66>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ bsca: i2c@406200 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406200 0x58>;
+ interrupts = <24>;
+ interrupt-names = "upg_bsca";
+ status = "disabled";
+ };
+
+ bscb: i2c@406280 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406280 0x58>;
+ interrupts = <25>;
+ interrupt-names = "upg_bscb";
+ status = "disabled";
+ };
+
+ bscc: i2c@406300 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406300 0x58>;
+ interrupts = <26>;
+ interrupt-names = "upg_bscc";
+ status = "disabled";
+ };
+
+ bscd: i2c@406380 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406380 0x58>;
+ interrupts = <27>;
+ interrupt-names = "upg_bscd";
+ status = "disabled";
+ };
+
+ bsce: i2c@408980 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ reg = <0x408980 0x58>;
+ interrupts = <27>;
+ interrupt-names = "upg_bsce";
+ status = "disabled";
+ };
+
+ pwma: pwm@406580 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406580 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ pwmb: pwm@406800 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406800 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ watchdog: watchdog@4067e8 {
+ clocks = <&upg_clk>;
+ compatible = "brcm,bcm7038-wdt";
+ reg = <0x4067e8 0x14>;
+ status = "disabled";
+ };
+
+ aon_pm_l2_intc: interrupt-controller@408440 {
+ compatible = "brcm,l2-intc";
+ reg = <0x408440 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <53>;
+ brcm,irq-can-wake;
+ };
+
+ aon_ctrl: syscon@408000 {
+ compatible = "brcm,brcmstb-aon-ctrl";
+ reg = <0x408000 0x100>, <0x408200 0x200>;
+ reg-names = "aon-ctrl", "aon-sram";
+ };
+
+ timers: timer@4067c0 {
+ compatible = "brcm,brcmstb-timers";
+ reg = <0x4067c0 0x40>;
+ };
+
+ upg_gio: gpio@406700 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x406700 0x60>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupts = <6>;
+ brcm,gpio-bank-widths = <32 32 16>;
+ };
+
+ upg_gio_aon: gpio@408c00 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x408c00 0x60>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupts = <6>;
+ interrupts-extended = <&upg_aon_irq0_intc 6>,
+ <&aon_pm_l2_intc 5>;
+ wakeup-source;
+ brcm,gpio-bank-widths = <27 32 2>;
+ };
+
+ enet0: ethernet@430000 {
+ phy-mode = "internal";
+ phy-handle = <&phy1>;
+ mac-address = [ 00 10 18 36 23 1a ];
+ compatible = "brcm,genet-v2";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0x430000 0x4c8c>;
+ interrupts = <24>, <25>;
+ interrupt-parent = <&periph_intc>;
+ status = "disabled";
+
+ mdio@e14 {
+ compatible = "brcm,genet-mdio-v2";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+
+ phy1: ethernet-phy@1 {
+ max-speed = <100>;
+ reg = <0x1>;
+ compatible = "brcm,40nm-ephy",
+ "ethernet-phy-ieee802.3-c22";
+ };
+ };
+ };
+
+ ehci0: usb@480300 {
+ compatible = "brcm,bcm7346-ehci", "generic-ehci";
+ reg = <0x480300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <68>;
+ status = "disabled";
+ };
+
+ ohci0: usb@480400 {
+ compatible = "brcm,bcm7346-ohci", "generic-ohci";
+ reg = <0x480400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <70>;
+ status = "disabled";
+ };
+
+ ehci1: usb@480500 {
+ compatible = "brcm,bcm7346-ehci", "generic-ehci";
+ reg = <0x480500 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <69>;
+ status = "disabled";
+ };
+
+ ohci1: usb@480600 {
+ compatible = "brcm,bcm7346-ohci", "generic-ohci";
+ reg = <0x480600 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <71>;
+ status = "disabled";
+ };
+
+ ehci2: usb@490300 {
+ compatible = "brcm,bcm7346-ehci", "generic-ehci";
+ reg = <0x490300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <73>;
+ status = "disabled";
+ };
+
+ ohci2: usb@490400 {
+ compatible = "brcm,bcm7346-ohci", "generic-ohci";
+ reg = <0x490400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <75>;
+ status = "disabled";
+ };
+
+ ehci3: usb@490500 {
+ compatible = "brcm,bcm7346-ehci", "generic-ehci";
+ reg = <0x490500 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <74>;
+ status = "disabled";
+ };
+
+ ohci3: usb@490600 {
+ compatible = "brcm,bcm7346-ohci", "generic-ohci";
+ reg = <0x490600 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <76>;
+ status = "disabled";
+ };
+
+ hif_l2_intc: interrupt-controller@411000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x411000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <30>;
+ };
+
+ nand: nand@412800 {
+ compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "nand";
+ reg = <0x412800 0x400>;
+ interrupt-parent = <&hif_l2_intc>;
+ interrupts = <24>;
+ status = "disabled";
+ };
+
+ sata: sata@181000 {
+ compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
+ reg-names = "ahci", "top-ctrl";
+ reg = <0x181000 0xa9c>, <0x180020 0x1c>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <40>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata0: sata-port@0 {
+ reg = <0>;
+ phys = <&sata_phy0>;
+ };
+
+ sata1: sata-port@1 {
+ reg = <1>;
+ phys = <&sata_phy1>;
+ };
+ };
+
+ sata_phy: sata-phy@180100 {
+ compatible = "brcm,bcm7425-sata-phy", "brcm,phy-sata3";
+ reg = <0x180100 0x0eff>;
+ reg-names = "phy";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata_phy0: sata-phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ };
+
+ sata_phy1: sata-phy@1 {
+ reg = <1>;
+ #phy-cells = <0>;
+ };
+ };
+
+ sdhci0: sdhci@413500 {
+ compatible = "brcm,bcm7425-sdhci";
+ reg = <0x413500 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <85>;
+ status = "disabled";
+ };
+
+ spi_l2_intc: interrupt-controller@411d00 {
+ compatible = "brcm,l2-intc";
+ reg = <0x411d00 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <31>;
+ };
+
+ qspi: spi@413000 {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ compatible = "brcm,spi-bcm-qspi",
+ "brcm,spi-brcmstb-qspi";
+ clocks = <&upg_clk>;
+ reg = <0x410920 0x4 0x413200 0x188 0x413000 0x50>;
+ reg-names = "cs_reg", "hif_mspi", "bspi";
+ interrupts = <0x0 0x1 0x2 0x3 0x4 0x5 0x6>;
+ interrupt-parent = <&spi_l2_intc>;
+ interrupt-names = "spi_lr_fullness_reached",
+ "spi_lr_session_aborted",
+ "spi_lr_impatient",
+ "spi_lr_session_done",
+ "spi_lr_overread",
+ "mspi_done",
+ "mspi_halted";
+ status = "disabled";
+ };
+
+ mspi: spi@408a00 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,spi-bcm-qspi",
+ "brcm,spi-brcmstb-mspi";
+ clocks = <&upg_clk>;
+ reg = <0x408a00 0x180>;
+ reg-names = "mspi";
+ interrupts = <0x14>;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupt-names = "mspi_done";
+ status = "disabled";
+ };
+
+ waketimer: waketimer@408e80 {
+ compatible = "brcm,brcmstb-waketimer";
+ reg = <0x408e80 0x14>;
+ interrupts = <0x3>;
+ interrupt-parent = <&aon_pm_l2_intc>;
+ interrupt-names = "timer";
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+ };
+
+ memory_controllers {
+ compatible = "simple-bus";
+ ranges = <0x0 0x103b0000 0xa000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory-controller@0 {
+ compatible = "brcm,brcmstb-memc", "simple-bus";
+ ranges = <0x0 0x0 0xa000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memc-arb@1000 {
+ compatible = "brcm,brcmstb-memc-arb";
+ reg = <0x1000 0x248>;
+ };
+
+ memc-ddr@2000 {
+ compatible = "brcm,brcmstb-memc-ddr";
+ reg = <0x2000 0x300>;
+ };
+
+ ddr-phy@6000 {
+ compatible = "brcm,brcmstb-ddr-phy";
+ reg = <0x6000 0xc8>;
+ };
+
+ shimphy@8000 {
+ compatible = "brcm,brcmstb-ddr-shimphy";
+ reg = <0x8000 0x13c>;
+ };
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm7358.dtsi b/arch/mips/boot/dts/brcm/bcm7358.dtsi
new file mode 100644
index 000000000..6375fc77f
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm7358.dtsi
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm7358";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <375000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips3300";
+ device_type = "cpu";
+ reg = <0>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ uart_clk: uart_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <81000000>;
+ };
+
+ upg_clk: upg_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+ };
+
+ rdb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges = <0 0x10000000 0x01000000>;
+
+ periph_intc: interrupt-controller@411400 {
+ compatible = "brcm,bcm7038-l1-intc";
+ reg = <0x411400 0x30>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>;
+ };
+
+ sun_l2_intc: interrupt-controller@403000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x403000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <48>;
+ };
+
+ gisb-arb@400000 {
+ compatible = "brcm,bcm7400-gisb-arb";
+ reg = <0x400000 0xdc>;
+ native-endian;
+ interrupt-parent = <&sun_l2_intc>;
+ interrupts = <0>, <2>;
+ brcm,gisb-arb-master-mask = <0x2f3>;
+ brcm,gisb-arb-master-names = "ssp_0", "cpu_0", "bsp_0",
+ "rdc_0", "raaga_0",
+ "avd_0", "jtag_0";
+ };
+
+ upg_irq0_intc: interrupt-controller@406600 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x406600 0x8>;
+
+ brcm,int-map-mask = <0x44>, <0x7000000>;
+ brcm,int-fwd-mask = <0x70000>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <56>, <54>;
+ interrupt-names = "upg_main", "upg_bsc";
+ };
+
+ upg_aon_irq0_intc: interrupt-controller@408b80 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x408b80 0x8>;
+
+ brcm,int-map-mask = <0x40>, <0x8000000>, <0x100000>;
+ brcm,int-fwd-mask = <0>;
+ brcm,irq-can-wake;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <57>, <55>, <59>;
+ interrupt-names = "upg_main_aon", "upg_bsc_aon",
+ "upg_spi";
+ };
+
+ sun_top_ctrl: syscon@404000 {
+ compatible = "brcm,bcm7358-sun-top-ctrl", "syscon";
+ reg = <0x404000 0x51c>;
+ native-endian;
+ };
+
+ reboot {
+ compatible = "brcm,brcmstb-reboot";
+ syscon = <&sun_top_ctrl 0x304 0x308>;
+ };
+
+ uart0: serial@406800 {
+ compatible = "ns16550a";
+ reg = <0x406800 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <61>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart1: serial@406840 {
+ compatible = "ns16550a";
+ reg = <0x406840 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <62>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart2: serial@406880 {
+ compatible = "ns16550a";
+ reg = <0x406880 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <63>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ bsca: i2c@406200 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406200 0x58>;
+ interrupts = <24>;
+ interrupt-names = "upg_bsca";
+ status = "disabled";
+ };
+
+ bscb: i2c@406280 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406280 0x58>;
+ interrupts = <25>;
+ interrupt-names = "upg_bscb";
+ status = "disabled";
+ };
+
+ bscc: i2c@406300 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406300 0x58>;
+ interrupts = <26>;
+ interrupt-names = "upg_bscc";
+ status = "disabled";
+ };
+
+ bscd: i2c@408980 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ reg = <0x408980 0x58>;
+ interrupts = <27>;
+ interrupt-names = "upg_bscd";
+ status = "disabled";
+ };
+
+ pwma: pwm@406400 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406400 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ pwmb: pwm@406700 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406700 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ watchdog: watchdog@4066a8 {
+ clocks = <&upg_clk>;
+ compatible = "brcm,bcm7038-wdt";
+ reg = <0x4066a8 0x14>;
+ status = "disabled";
+ };
+
+ aon_pm_l2_intc: interrupt-controller@408240 {
+ compatible = "brcm,l2-intc";
+ reg = <0x408240 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <50>;
+ brcm,irq-can-wake;
+ };
+
+ upg_gio: gpio@406500 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x406500 0xa0>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupts = <6>;
+ brcm,gpio-bank-widths = <32 32 32 29 4>;
+ };
+
+ upg_gio_aon: gpio@408c00 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x408c00 0x60>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupts = <6>;
+ interrupts-extended = <&upg_aon_irq0_intc 6>,
+ <&aon_pm_l2_intc 5>;
+ wakeup-source;
+ brcm,gpio-bank-widths = <21 32 2>;
+ };
+
+ enet0: ethernet@430000 {
+ phy-mode = "internal";
+ phy-handle = <&phy1>;
+ mac-address = [ 00 10 18 36 23 1a ];
+ compatible = "brcm,genet-v2";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0x430000 0x4c8c>;
+ interrupts = <24>, <25>;
+ interrupt-parent = <&periph_intc>;
+ status = "disabled";
+
+ mdio@e14 {
+ compatible = "brcm,genet-mdio-v2";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+
+ phy1: ethernet-phy@1 {
+ max-speed = <100>;
+ reg = <0x1>;
+ compatible = "brcm,40nm-ephy",
+ "ethernet-phy-ieee802.3-c22";
+ };
+ };
+ };
+
+ ehci0: usb@480300 {
+ compatible = "brcm,bcm7358-ehci", "generic-ehci";
+ reg = <0x480300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <65>;
+ status = "disabled";
+ };
+
+ ohci0: usb@480400 {
+ compatible = "brcm,bcm7358-ohci", "generic-ohci";
+ reg = <0x480400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <66>;
+ status = "disabled";
+ };
+
+ hif_l2_intc: interrupt-controller@411000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x411000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <30>;
+ };
+
+ nand: nand@412800 {
+ compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "nand";
+ reg = <0x412800 0x400>;
+ interrupt-parent = <&hif_l2_intc>;
+ interrupts = <24>;
+ status = "disabled";
+ };
+
+ spi_l2_intc: interrupt-controller@411d00 {
+ compatible = "brcm,l2-intc";
+ reg = <0x411d00 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <31>;
+ };
+
+ qspi: spi@413000 {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ compatible = "brcm,spi-bcm-qspi",
+ "brcm,spi-brcmstb-qspi";
+ clocks = <&upg_clk>;
+ reg = <0x410920 0x4 0x413200 0x188 0x413000 0x50>;
+ reg-names = "cs_reg", "hif_mspi", "bspi";
+ interrupts = <0x0 0x1 0x2 0x3 0x4 0x5 0x6>;
+ interrupt-parent = <&spi_l2_intc>;
+ interrupt-names = "spi_lr_fullness_reached",
+ "spi_lr_session_aborted",
+ "spi_lr_impatient",
+ "spi_lr_session_done",
+ "spi_lr_overread",
+ "mspi_done",
+ "mspi_halted";
+ status = "disabled";
+ };
+
+ mspi: spi@408a00 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,spi-bcm-qspi",
+ "brcm,spi-brcmstb-mspi";
+ clocks = <&upg_clk>;
+ reg = <0x408a00 0x180>;
+ reg-names = "mspi";
+ interrupts = <0x14>;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupt-names = "mspi_done";
+ status = "disabled";
+ };
+
+ waketimer: waketimer@408e80 {
+ compatible = "brcm,brcmstb-waketimer";
+ reg = <0x408e80 0x14>;
+ interrupts = <0x3>;
+ interrupt-parent = <&aon_pm_l2_intc>;
+ interrupt-names = "timer";
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm7360.dtsi b/arch/mips/boot/dts/brcm/bcm7360.dtsi
new file mode 100644
index 000000000..a57cacea9
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm7360.dtsi
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm7360";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <375000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips3300";
+ device_type = "cpu";
+ reg = <0>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ uart_clk: uart_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <81000000>;
+ };
+
+ upg_clk: upg_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+ };
+
+ rdb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges = <0 0x10000000 0x01000000>;
+
+ periph_intc: interrupt-controller@411400 {
+ compatible = "brcm,bcm7038-l1-intc";
+ reg = <0x411400 0x30>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>;
+ };
+
+ sun_l2_intc: interrupt-controller@403000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x403000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <48>;
+ };
+
+ gisb-arb@400000 {
+ compatible = "brcm,bcm7400-gisb-arb";
+ reg = <0x400000 0xdc>;
+ native-endian;
+ interrupt-parent = <&sun_l2_intc>;
+ interrupts = <0>, <2>;
+ brcm,gisb-arb-master-mask = <0x2f3>;
+ brcm,gisb-arb-master-names = "ssp_0", "cpu_0", "bsp_0",
+ "rdc_0", "raaga_0",
+ "avd_0", "jtag_0";
+ };
+
+ upg_irq0_intc: interrupt-controller@406600 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x406600 0x8>;
+
+ brcm,int-map-mask = <0x44>, <0x7000000>;
+ brcm,int-fwd-mask = <0x70000>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <56>, <54>;
+ interrupt-names = "upg_main", "upg_bsc";
+ };
+
+ upg_aon_irq0_intc: interrupt-controller@408b80 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x408b80 0x8>;
+
+ brcm,int-map-mask = <0x40>, <0x8000000>, <0x100000>;
+ brcm,int-fwd-mask = <0>;
+ brcm,irq-can-wake;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <57>, <55>, <59>;
+ interrupt-names = "upg_main_aon", "upg_bsc_aon",
+ "upg_spi";
+ };
+
+ sun_top_ctrl: syscon@404000 {
+ compatible = "brcm,bcm7360-sun-top-ctrl", "syscon";
+ reg = <0x404000 0x51c>;
+ native-endian;
+ };
+
+ reboot {
+ compatible = "brcm,brcmstb-reboot";
+ syscon = <&sun_top_ctrl 0x304 0x308>;
+ };
+
+ uart0: serial@406800 {
+ compatible = "ns16550a";
+ reg = <0x406800 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <61>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart1: serial@406840 {
+ compatible = "ns16550a";
+ reg = <0x406840 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <62>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart2: serial@406880 {
+ compatible = "ns16550a";
+ reg = <0x406880 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <63>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ bsca: i2c@406200 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406200 0x58>;
+ interrupts = <24>;
+ interrupt-names = "upg_bsca";
+ status = "disabled";
+ };
+
+ bscb: i2c@406280 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406280 0x58>;
+ interrupts = <25>;
+ interrupt-names = "upg_bscb";
+ status = "disabled";
+ };
+
+ bscc: i2c@406300 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406300 0x58>;
+ interrupts = <26>;
+ interrupt-names = "upg_bscc";
+ status = "disabled";
+ };
+
+ bscd: i2c@408980 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ reg = <0x408980 0x58>;
+ interrupts = <27>;
+ interrupt-names = "upg_bscd";
+ status = "disabled";
+ };
+
+ pwma: pwm@406400 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406400 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ watchdog: watchdog@4066a8 {
+ clocks = <&upg_clk>;
+ compatible = "brcm,bcm7038-wdt";
+ reg = <0x4066a8 0x14>;
+ status = "disabled";
+ };
+
+ aon_pm_l2_intc: interrupt-controller@408440 {
+ compatible = "brcm,l2-intc";
+ reg = <0x408440 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <50>;
+ brcm,irq-can-wake;
+ };
+
+ aon_ctrl: syscon@408000 {
+ compatible = "brcm,brcmstb-aon-ctrl";
+ reg = <0x408000 0x100>, <0x408200 0x200>;
+ reg-names = "aon-ctrl", "aon-sram";
+ };
+
+ timers: timer@406680 {
+ compatible = "brcm,brcmstb-timers";
+ reg = <0x406680 0x40>;
+ };
+
+ upg_gio: gpio@406500 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x406500 0xa0>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupts = <6>;
+ brcm,gpio-bank-widths = <32 32 32 29 4>;
+ };
+
+ upg_gio_aon: gpio@408c00 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x408c00 0x60>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupts = <6>;
+ interrupts-extended = <&upg_aon_irq0_intc 6>,
+ <&aon_pm_l2_intc 5>;
+ wakeup-source;
+ brcm,gpio-bank-widths = <21 32 2>;
+ };
+
+ enet0: ethernet@430000 {
+ phy-mode = "internal";
+ phy-handle = <&phy1>;
+ mac-address = [ 00 10 18 36 23 1a ];
+ compatible = "brcm,genet-v2";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0x430000 0x4c8c>;
+ interrupts = <24>, <25>;
+ interrupt-parent = <&periph_intc>;
+ status = "disabled";
+
+ mdio@e14 {
+ compatible = "brcm,genet-mdio-v2";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+
+ phy1: ethernet-phy@1 {
+ max-speed = <100>;
+ reg = <0x1>;
+ compatible = "brcm,40nm-ephy",
+ "ethernet-phy-ieee802.3-c22";
+ };
+ };
+ };
+
+ ehci0: usb@480300 {
+ compatible = "brcm,bcm7360-ehci", "generic-ehci";
+ reg = <0x480300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <65>;
+ status = "disabled";
+ };
+
+ ohci0: usb@480400 {
+ compatible = "brcm,bcm7360-ohci", "generic-ohci";
+ reg = <0x480400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <66>;
+ status = "disabled";
+ };
+
+ hif_l2_intc: interrupt-controller@411000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x411000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <30>;
+ };
+
+ nand: nand@412800 {
+ compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "nand";
+ reg = <0x412800 0x400>;
+ interrupt-parent = <&hif_l2_intc>;
+ interrupts = <24>;
+ status = "disabled";
+ };
+
+ sata: sata@181000 {
+ compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
+ reg-names = "ahci", "top-ctrl";
+ reg = <0x181000 0xa9c>, <0x180020 0x1c>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <86>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata0: sata-port@0 {
+ reg = <0>;
+ phys = <&sata_phy0>;
+ };
+
+ sata1: sata-port@1 {
+ reg = <1>;
+ phys = <&sata_phy1>;
+ };
+ };
+
+ sata_phy: sata-phy@180100 {
+ compatible = "brcm,bcm7425-sata-phy", "brcm,phy-sata3";
+ reg = <0x180100 0x0eff>;
+ reg-names = "phy";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata_phy0: sata-phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ };
+
+ sata_phy1: sata-phy@1 {
+ reg = <1>;
+ #phy-cells = <0>;
+ };
+ };
+
+ sdhci0: sdhci@410000 {
+ compatible = "brcm,bcm7425-sdhci";
+ reg = <0x410000 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <82>;
+ status = "disabled";
+ };
+
+ spi_l2_intc: interrupt-controller@411d00 {
+ compatible = "brcm,l2-intc";
+ reg = <0x411d00 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <31>;
+ };
+
+ qspi: spi@413000 {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ compatible = "brcm,spi-bcm-qspi",
+ "brcm,spi-brcmstb-qspi";
+ clocks = <&upg_clk>;
+ reg = <0x410920 0x4 0x413200 0x188 0x413000 0x50>;
+ reg-names = "cs_reg", "hif_mspi", "bspi";
+ interrupts = <0x0 0x1 0x2 0x3 0x4 0x5 0x6>;
+ interrupt-parent = <&spi_l2_intc>;
+ interrupt-names = "spi_lr_fullness_reached",
+ "spi_lr_session_aborted",
+ "spi_lr_impatient",
+ "spi_lr_session_done",
+ "spi_lr_overread",
+ "mspi_done",
+ "mspi_halted";
+ status = "disabled";
+ };
+
+ mspi: spi@408a00 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,spi-bcm-qspi",
+ "brcm,spi-brcmstb-mspi";
+ clocks = <&upg_clk>;
+ reg = <0x408a00 0x180>;
+ reg-names = "mspi";
+ interrupts = <0x14>;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupt-names = "mspi_done";
+ status = "disabled";
+ };
+
+ waketimer: waketimer@408e80 {
+ compatible = "brcm,brcmstb-waketimer";
+ reg = <0x408e80 0x14>;
+ interrupts = <0x3>;
+ interrupt-parent = <&aon_pm_l2_intc>;
+ interrupt-names = "timer";
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+ };
+
+ memory_controllers {
+ compatible = "simple-bus";
+ ranges = <0x0 0x103b0000 0xa000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory-controller@0 {
+ compatible = "brcm,brcmstb-memc", "simple-bus";
+ ranges = <0x0 0x0 0xa000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memc-arb@1000 {
+ compatible = "brcm,brcmstb-memc-arb";
+ reg = <0x1000 0x248>;
+ };
+
+ memc-ddr@2000 {
+ compatible = "brcm,brcmstb-memc-ddr";
+ reg = <0x2000 0x300>;
+ };
+
+ ddr-phy@6000 {
+ compatible = "brcm,brcmstb-ddr-phy";
+ reg = <0x6000 0xc8>;
+ };
+
+ shimphy@8000 {
+ compatible = "brcm,brcmstb-ddr-shimphy";
+ reg = <0x8000 0x13c>;
+ };
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm7362.dtsi b/arch/mips/boot/dts/brcm/bcm7362.dtsi
new file mode 100644
index 000000000..728b9e9f8
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm7362.dtsi
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm7362";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <375000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4380";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips4380";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ uart_clk: uart_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <81000000>;
+ };
+
+ upg_clk: upg_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+ };
+
+ rdb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges = <0 0x10000000 0x01000000>;
+
+ periph_intc: interrupt-controller@411400 {
+ compatible = "brcm,bcm7038-l1-intc";
+ reg = <0x411400 0x30>, <0x411600 0x30>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ sun_l2_intc: interrupt-controller@403000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x403000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <48>;
+ };
+
+ gisb-arb@400000 {
+ compatible = "brcm,bcm7400-gisb-arb";
+ reg = <0x400000 0xdc>;
+ native-endian;
+ interrupt-parent = <&sun_l2_intc>;
+ interrupts = <0>, <2>;
+ brcm,gisb-arb-master-mask = <0x2f3>;
+ brcm,gisb-arb-master-names = "ssp_0", "cpu_0", "bsp_0",
+ "rdc_0", "raaga_0",
+ "avd_0", "jtag_0";
+ };
+
+ upg_irq0_intc: interrupt-controller@406600 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x406600 0x8>;
+
+ brcm,int-map-mask = <0x44>, <0x7000000>;
+ brcm,int-fwd-mask = <0x70000>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <56>, <54>;
+ interrupt-names = "upg_main", "upg_bsc";
+ };
+
+ upg_aon_irq0_intc: interrupt-controller@408b80 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x408b80 0x8>;
+
+ brcm,int-map-mask = <0x40>, <0x8000000>, <0x100000>;
+ brcm,int-fwd-mask = <0>;
+ brcm,irq-can-wake;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <57>, <55>, <59>;
+ interrupt-names = "upg_main_aon", "upg_bsc_aon",
+ "upg_spi";
+ };
+
+ sun_top_ctrl: syscon@404000 {
+ compatible = "brcm,bcm7362-sun-top-ctrl", "syscon";
+ reg = <0x404000 0x51c>;
+ native-endian;
+ };
+
+ reboot {
+ compatible = "brcm,brcmstb-reboot";
+ syscon = <&sun_top_ctrl 0x304 0x308>;
+ };
+
+ uart0: serial@406800 {
+ compatible = "ns16550a";
+ reg = <0x406800 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <61>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart1: serial@406840 {
+ compatible = "ns16550a";
+ reg = <0x406840 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <62>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart2: serial@406880 {
+ compatible = "ns16550a";
+ reg = <0x406880 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <63>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ bsca: i2c@406200 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406200 0x58>;
+ interrupts = <24>;
+ interrupt-names = "upg_bsca";
+ status = "disabled";
+ };
+
+ bscb: i2c@406280 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406280 0x58>;
+ interrupts = <25>;
+ interrupt-names = "upg_bscb";
+ status = "disabled";
+ };
+
+ bscd: i2c@408980 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ reg = <0x408980 0x58>;
+ interrupts = <27>;
+ interrupt-names = "upg_bscd";
+ status = "disabled";
+ };
+
+ pwma: pwm@406400 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406400 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ watchdog: watchdog@4066a8 {
+ clocks = <&upg_clk>;
+ compatible = "brcm,bcm7038-wdt";
+ reg = <0x4066a8 0x14>;
+ status = "disabled";
+ };
+
+ aon_pm_l2_intc: interrupt-controller@408440 {
+ compatible = "brcm,l2-intc";
+ reg = <0x408440 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <50>;
+ brcm,irq-can-wake;
+ };
+
+ aon_ctrl: syscon@408000 {
+ compatible = "brcm,brcmstb-aon-ctrl";
+ reg = <0x408000 0x100>, <0x408200 0x200>;
+ reg-names = "aon-ctrl", "aon-sram";
+ };
+
+ timers: timer@406680 {
+ compatible = "brcm,brcmstb-timers";
+ reg = <0x406680 0x40>;
+ };
+
+ upg_gio: gpio@406500 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x406500 0xa0>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupts = <6>;
+ brcm,gpio-bank-widths = <32 32 32 29 4>;
+ };
+
+ upg_gio_aon: gpio@408c00 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x408c00 0x60>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupts = <6>;
+ interrupts-extended = <&upg_aon_irq0_intc 6>,
+ <&aon_pm_l2_intc 5>;
+ wakeup-source;
+ brcm,gpio-bank-widths = <21 32 2>;
+ };
+
+ enet0: ethernet@430000 {
+ phy-mode = "internal";
+ phy-handle = <&phy1>;
+ mac-address = [ 00 10 18 36 23 1a ];
+ compatible = "brcm,genet-v2";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0x430000 0x4c8c>;
+ interrupts = <24>, <25>;
+ interrupt-parent = <&periph_intc>;
+ status = "disabled";
+
+ mdio@e14 {
+ compatible = "brcm,genet-mdio-v2";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+
+ phy1: ethernet-phy@1 {
+ max-speed = <100>;
+ reg = <0x1>;
+ compatible = "brcm,40nm-ephy",
+ "ethernet-phy-ieee802.3-c22";
+ };
+ };
+ };
+
+ ehci0: usb@480300 {
+ compatible = "brcm,bcm7362-ehci", "generic-ehci";
+ reg = <0x480300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <65>;
+ status = "disabled";
+ };
+
+ ohci0: usb@480400 {
+ compatible = "brcm,bcm7362-ohci", "generic-ohci";
+ reg = <0x480400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <66>;
+ status = "disabled";
+ };
+
+ hif_l2_intc: interrupt-controller@411000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x411000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <30>;
+ };
+
+ nand: nand@412800 {
+ compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "nand";
+ reg = <0x412800 0x400>;
+ interrupt-parent = <&hif_l2_intc>;
+ interrupts = <24>;
+ status = "disabled";
+ };
+
+ sata: sata@181000 {
+ compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
+ reg-names = "ahci", "top-ctrl";
+ reg = <0x181000 0xa9c>, <0x180020 0x1c>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <86>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata0: sata-port@0 {
+ reg = <0>;
+ phys = <&sata_phy0>;
+ };
+
+ sata1: sata-port@1 {
+ reg = <1>;
+ phys = <&sata_phy1>;
+ };
+ };
+
+ sata_phy: sata-phy@180100 {
+ compatible = "brcm,bcm7425-sata-phy", "brcm,phy-sata3";
+ reg = <0x180100 0x0eff>;
+ reg-names = "phy";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata_phy0: sata-phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ };
+
+ sata_phy1: sata-phy@1 {
+ reg = <1>;
+ #phy-cells = <0>;
+ };
+ };
+
+ sdhci0: sdhci@410000 {
+ compatible = "brcm,bcm7425-sdhci";
+ reg = <0x410000 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <82>;
+ status = "disabled";
+ };
+
+ spi_l2_intc: interrupt-controller@411d00 {
+ compatible = "brcm,l2-intc";
+ reg = <0x411d00 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <31>;
+ };
+
+ qspi: spi@413000 {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ compatible = "brcm,spi-bcm-qspi",
+ "brcm,spi-brcmstb-qspi";
+ clocks = <&upg_clk>;
+ reg = <0x410920 0x4 0x413200 0x188 0x413000 0x50>;
+ reg-names = "cs_reg", "hif_mspi", "bspi";
+ interrupts = <0x0 0x1 0x2 0x3 0x4 0x5 0x6>;
+ interrupt-parent = <&spi_l2_intc>;
+ interrupt-names = "spi_lr_fullness_reached",
+ "spi_lr_session_aborted",
+ "spi_lr_impatient",
+ "spi_lr_session_done",
+ "spi_lr_overread",
+ "mspi_done",
+ "mspi_halted";
+ status = "disabled";
+ };
+
+ mspi: spi@408a00 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,spi-bcm-qspi",
+ "brcm,spi-brcmstb-mspi";
+ clocks = <&upg_clk>;
+ reg = <0x408a00 0x180>;
+ reg-names = "mspi";
+ interrupts = <0x14>;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupt-names = "mspi_done";
+ status = "disabled";
+ };
+
+ waketimer: waketimer@408e80 {
+ compatible = "brcm,brcmstb-waketimer";
+ reg = <0x408e80 0x14>;
+ interrupts = <0x3>;
+ interrupt-parent = <&aon_pm_l2_intc>;
+ interrupt-names = "timer";
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+ };
+
+ memory_controllers {
+ compatible = "simple-bus";
+ ranges = <0x0 0x103b0000 0xa000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory-controller@0 {
+ compatible = "brcm,brcmstb-memc", "simple-bus";
+ ranges = <0x0 0x0 0xa000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memc-arb@1000 {
+ compatible = "brcm,brcmstb-memc-arb";
+ reg = <0x1000 0x248>;
+ };
+
+ memc-ddr@2000 {
+ compatible = "brcm,brcmstb-memc-ddr";
+ reg = <0x2000 0x300>;
+ };
+
+ ddr-phy@6000 {
+ compatible = "brcm,brcmstb-ddr-phy";
+ reg = <0x6000 0xc8>;
+ };
+
+ shimphy@8000 {
+ compatible = "brcm,brcmstb-ddr-shimphy";
+ reg = <0x8000 0x13c>;
+ };
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm7420.dtsi b/arch/mips/boot/dts/brcm/bcm7420.dtsi
new file mode 100644
index 000000000..9540c27f1
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm7420.dtsi
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm7420";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <93750000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ uart_clk: uart_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <81000000>;
+ };
+
+ upg_clk: upg_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+ };
+
+ rdb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges = <0 0x10000000 0x01000000>;
+
+ periph_intc: interrupt-controller@441400 {
+ compatible = "brcm,bcm7038-l1-intc";
+ reg = <0x441400 0x30>, <0x441600 0x30>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ sun_l2_intc: interrupt-controller@401800 {
+ compatible = "brcm,l2-intc";
+ reg = <0x401800 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <23>;
+ };
+
+ gisb-arb@400000 {
+ compatible = "brcm,bcm7400-gisb-arb";
+ reg = <0x400000 0xdc>;
+ native-endian;
+ interrupt-parent = <&sun_l2_intc>;
+ interrupts = <0>, <2>;
+ brcm,gisb-arb-master-mask = <0x3ff>;
+ brcm,gisb-arb-master-names = "ssp_0", "cpu_0", "pci_0",
+ "pcie_0", "bsp_0", "rdc_0",
+ "rptd_0", "avd_0", "avd_1",
+ "jtag_0";
+ };
+
+ upg_irq0_intc: interrupt-controller@406780 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x406780 0x8>;
+
+ brcm,int-map-mask = <0x44>, <0x1f000000>, <0x100000>;
+ brcm,int-fwd-mask = <0x70000>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <18>, <19>, <20>;
+ interrupt-names = "upg_main", "upg_bsc", "upg_spi";
+ };
+
+ sun_top_ctrl: syscon@404000 {
+ compatible = "brcm,bcm7420-sun-top-ctrl", "syscon";
+ reg = <0x404000 0x60c>;
+ native-endian;
+ };
+
+ reboot {
+ compatible = "brcm,bcm7038-reboot";
+ syscon = <&sun_top_ctrl 0x8 0x14>;
+ };
+
+ uart0: serial@406b00 {
+ compatible = "ns16550a";
+ reg = <0x406b00 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <21>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart1: serial@406b40 {
+ compatible = "ns16550a";
+ reg = <0x406b40 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <64>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart2: serial@406b80 {
+ compatible = "ns16550a";
+ reg = <0x406b80 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <65>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ bsca: i2c@406200 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406200 0x58>;
+ interrupts = <24>;
+ interrupt-names = "upg_bsca";
+ status = "disabled";
+ };
+
+ bscb: i2c@406280 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406280 0x58>;
+ interrupts = <25>;
+ interrupt-names = "upg_bscb";
+ status = "disabled";
+ };
+
+ bscc: i2c@406300 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406300 0x58>;
+ interrupts = <26>;
+ interrupt-names = "upg_bscc";
+ status = "disabled";
+ };
+
+ bscd: i2c@406380 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406380 0x58>;
+ interrupts = <27>;
+ interrupt-names = "upg_bscd";
+ status = "disabled";
+ };
+
+ bsce: i2c@406800 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406800 0x58>;
+ interrupts = <28>;
+ interrupt-names = "upg_bsce";
+ status = "disabled";
+ };
+
+ pwma: pwm@406580 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406580 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ pwmb: pwm@406880 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406880 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ watchdog: watchdog@4067e8 {
+ clocks = <&upg_clk>;
+ compatible = "brcm,bcm7038-wdt";
+ reg = <0x4067e8 0x14>;
+ status = "disabled";
+ };
+
+ upg_gio: gpio@406700 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x406700 0x80>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupts = <6>;
+ brcm,gpio-bank-widths = <32 32 32 27>;
+ };
+
+ enet0: ethernet@468000 {
+ phy-mode = "internal";
+ phy-handle = <&phy1>;
+ mac-address = [ 00 10 18 36 23 1a ];
+ compatible = "brcm,genet-v1";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0x468000 0x3c8c>;
+ interrupts = <69>, <79>;
+ interrupt-parent = <&periph_intc>;
+ status = "disabled";
+
+ mdio@e14 {
+ compatible = "brcm,genet-mdio-v1";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+
+ phy1: ethernet-phy@1 {
+ max-speed = <100>;
+ reg = <0x1>;
+ compatible = "brcm,65nm-ephy",
+ "ethernet-phy-ieee802.3-c22";
+ };
+ };
+ };
+
+ ehci0: usb@488300 {
+ compatible = "brcm,bcm7420-ehci", "generic-ehci";
+ reg = <0x488300 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <60>;
+ status = "disabled";
+ };
+
+ ohci0: usb@488400 {
+ compatible = "brcm,bcm7420-ohci", "generic-ohci";
+ reg = <0x488400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <61>;
+ status = "disabled";
+ };
+
+ ehci1: usb@488500 {
+ compatible = "brcm,bcm7420-ehci", "generic-ehci";
+ reg = <0x488500 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <55>;
+ status = "disabled";
+ };
+
+ ohci1: usb@488600 {
+ compatible = "brcm,bcm7420-ohci", "generic-ohci";
+ reg = <0x488600 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <62>;
+ status = "disabled";
+ };
+
+ spi_l2_intc: interrupt-controller@411d00 {
+ compatible = "brcm,l2-intc";
+ reg = <0x411d00 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <78>;
+ };
+
+ qspi: spi@443000 {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ compatible = "brcm,spi-bcm-qspi",
+ "brcm,spi-brcmstb-qspi";
+ clocks = <&upg_clk>;
+ reg = <0x440920 0x4 0x443200 0x188 0x443000 0x50>;
+ reg-names = "cs_reg", "hif_mspi", "bspi";
+ interrupts = <0x0 0x1 0x2 0x3 0x4 0x5 0x6>;
+ interrupt-parent = <&spi_l2_intc>;
+ interrupt-names = "spi_lr_fullness_reached",
+ "spi_lr_session_aborted",
+ "spi_lr_impatient",
+ "spi_lr_session_done",
+ "spi_lr_overread",
+ "mspi_done",
+ "mspi_halted";
+ status = "disabled";
+ };
+
+ mspi: spi@406400 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,spi-bcm-qspi",
+ "brcm,spi-brcmstb-mspi";
+ clocks = <&upg_clk>;
+ reg = <0x406400 0x180>;
+ reg-names = "mspi";
+ interrupts = <0x14>;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupt-names = "mspi_done";
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm7425.dtsi b/arch/mips/boot/dts/brcm/bcm7425.dtsi
new file mode 100644
index 000000000..aa0b2d39c
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm7425.dtsi
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm7425";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <163125000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips5000";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ uart_clk: uart_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <81000000>;
+ };
+
+ upg_clk: upg_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+ };
+
+ rdb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges = <0 0x10000000 0x01000000>;
+
+ periph_intc: interrupt-controller@41a400 {
+ compatible = "brcm,bcm7038-l1-intc";
+ reg = <0x41a400 0x30>, <0x41a600 0x30>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ sun_l2_intc: interrupt-controller@403000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x403000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <47>;
+ };
+
+ gisb-arb@400000 {
+ compatible = "brcm,bcm7400-gisb-arb";
+ reg = <0x400000 0xdc>;
+ native-endian;
+ interrupt-parent = <&sun_l2_intc>;
+ interrupts = <0>, <2>;
+ brcm,gisb-arb-master-mask = <0x177b>;
+ brcm,gisb-arb-master-names = "ssp_0", "cpu_0", "pcie_0",
+ "bsp_0", "rdc_0",
+ "raaga_0", "avd_1",
+ "jtag_0", "svd_0",
+ "vice_0";
+ };
+
+ upg_irq0_intc: interrupt-controller@406780 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x406780 0x8>;
+
+ brcm,int-map-mask = <0x44>, <0x7000000>;
+ brcm,int-fwd-mask = <0x70000>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <55>, <53>;
+ interrupt-names = "upg_main", "upg_bsc";
+ };
+
+ upg_aon_irq0_intc: interrupt-controller@409480 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x409480 0x8>;
+
+ brcm,int-map-mask = <0x40>, <0x18000000>, <0x100000>;
+ brcm,int-fwd-mask = <0>;
+ brcm,irq-can-wake;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <56>, <54>, <59>;
+ interrupt-names = "upg_main_aon", "upg_bsc_aon",
+ "upg_spi";
+ };
+
+ sun_top_ctrl: syscon@404000 {
+ compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
+ reg = <0x404000 0x51c>;
+ native-endian;
+ };
+
+ reboot {
+ compatible = "brcm,brcmstb-reboot";
+ syscon = <&sun_top_ctrl 0x304 0x308>;
+ };
+
+ uart0: serial@406b00 {
+ compatible = "ns16550a";
+ reg = <0x406b00 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <61>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart1: serial@406b40 {
+ compatible = "ns16550a";
+ reg = <0x406b40 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <62>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart2: serial@406b80 {
+ compatible = "ns16550a";
+ reg = <0x406b80 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <63>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ bsca: i2c@409180 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ reg = <0x409180 0x58>;
+ interrupts = <27>;
+ interrupt-names = "upg_bsca";
+ status = "disabled";
+ };
+
+ bscb: i2c@409400 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ reg = <0x409400 0x58>;
+ interrupts = <28>;
+ interrupt-names = "upg_bscb";
+ status = "disabled";
+ };
+
+ bscc: i2c@406200 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406200 0x58>;
+ interrupts = <24>;
+ interrupt-names = "upg_bscc";
+ status = "disabled";
+ };
+
+ bscd: i2c@406280 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406280 0x58>;
+ interrupts = <25>;
+ interrupt-names = "upg_bscd";
+ status = "disabled";
+ };
+
+ bsce: i2c@406300 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406300 0x58>;
+ interrupts = <26>;
+ interrupt-names = "upg_bsce";
+ status = "disabled";
+ };
+
+ pwma: pwm@406580 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406580 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ pwmb: pwm@406800 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406800 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ watchdog: watchdog@4067e8 {
+ clocks = <&upg_clk>;
+ compatible = "brcm,bcm7038-wdt";
+ reg = <0x4067e8 0x14>;
+ status = "disabled";
+ };
+
+ aon_pm_l2_intc: interrupt-controller@408440 {
+ compatible = "brcm,l2-intc";
+ reg = <0x408440 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <49>;
+ brcm,irq-can-wake;
+ };
+
+ aon_ctrl: syscon@408000 {
+ compatible = "brcm,brcmstb-aon-ctrl";
+ reg = <0x408000 0x100>, <0x408200 0x200>;
+ reg-names = "aon-ctrl", "aon-sram";
+ };
+
+ timers: timer@4067c0 {
+ compatible = "brcm,brcmstb-timers";
+ reg = <0x4067c0 0x40>;
+ };
+
+ upg_gio: gpio@406700 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x406700 0x80>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupts = <6>;
+ brcm,gpio-bank-widths = <32 32 32 21>;
+ };
+
+ upg_gio_aon: gpio@4094c0 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x4094c0 0x40>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupts = <6>;
+ interrupts-extended = <&upg_aon_irq0_intc 6>,
+ <&aon_pm_l2_intc 5>;
+ wakeup-source;
+ brcm,gpio-bank-widths = <18 4>;
+ };
+
+ enet0: ethernet@b80000 {
+ phy-mode = "internal";
+ phy-handle = <&phy1>;
+ mac-address = [ 00 10 18 36 23 1a ];
+ compatible = "brcm,genet-v3";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0xb80000 0x11c88>;
+ interrupts = <17>, <18>;
+ interrupt-parent = <&periph_intc>;
+ status = "disabled";
+
+ mdio@e14 {
+ compatible = "brcm,genet-mdio-v3";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+
+ phy1: ethernet-phy@1 {
+ max-speed = <100>;
+ reg = <0x1>;
+ compatible = "brcm,40nm-ephy",
+ "ethernet-phy-ieee802.3-c22";
+ };
+ };
+ };
+
+ ehci0: usb@480300 {
+ compatible = "brcm,bcm7425-ehci", "generic-ehci";
+ reg = <0x480300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <65>;
+ status = "disabled";
+ };
+
+ ohci0: usb@480400 {
+ compatible = "brcm,bcm7425-ohci", "generic-ohci";
+ reg = <0x480400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <67>;
+ status = "disabled";
+ };
+
+ ehci1: usb@480500 {
+ compatible = "brcm,bcm7425-ehci", "generic-ehci";
+ reg = <0x480500 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <66>;
+ status = "disabled";
+ };
+
+ ohci1: usb@480600 {
+ compatible = "brcm,bcm7425-ohci", "generic-ohci";
+ reg = <0x480600 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <68>;
+ status = "disabled";
+ };
+
+ ehci2: usb@490300 {
+ compatible = "brcm,bcm7425-ehci", "generic-ehci";
+ reg = <0x490300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <70>;
+ status = "disabled";
+ };
+
+ ohci2: usb@490400 {
+ compatible = "brcm,bcm7425-ohci", "generic-ohci";
+ reg = <0x490400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <72>;
+ status = "disabled";
+ };
+
+ ehci3: usb@490500 {
+ compatible = "brcm,bcm7425-ehci", "generic-ehci";
+ reg = <0x490500 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <71>;
+ status = "disabled";
+ };
+
+ ohci3: usb@490600 {
+ compatible = "brcm,bcm7425-ohci", "generic-ohci";
+ reg = <0x490600 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <73>;
+ status = "disabled";
+ };
+
+ hif_l2_intc: interrupt-controller@41a000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x41a000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <24>;
+ };
+
+ nand: nand@41b800 {
+ compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "nand", "flash-edu";
+ reg = <0x41b800 0x400>, <0x41bc00 0x24>;
+ interrupt-parent = <&hif_l2_intc>;
+ interrupts = <24>;
+ status = "disabled";
+ };
+
+ sata: sata@181000 {
+ compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
+ reg-names = "ahci", "top-ctrl";
+ reg = <0x181000 0xa9c>, <0x180020 0x1c>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <41>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata0: sata-port@0 {
+ reg = <0>;
+ phys = <&sata_phy0>;
+ };
+
+ sata1: sata-port@1 {
+ reg = <1>;
+ phys = <&sata_phy1>;
+ };
+ };
+
+ sata_phy: sata-phy@180100 {
+ compatible = "brcm,bcm7425-sata-phy", "brcm,phy-sata3";
+ reg = <0x180100 0x0eff>;
+ reg-names = "phy";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata_phy0: sata-phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ };
+
+ sata_phy1: sata-phy@1 {
+ reg = <1>;
+ #phy-cells = <0>;
+ };
+ };
+
+ sdhci0: sdhci@419000 {
+ compatible = "brcm,bcm7425-sdhci";
+ reg = <0x419000 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <43>;
+ sd-uhs-sdr50;
+ mmc-hs200-1_8v;
+ status = "disabled";
+ };
+
+ sdhci1: sdhci@419200 {
+ compatible = "brcm,bcm7425-sdhci";
+ reg = <0x419200 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <44>;
+ sd-uhs-sdr50;
+ mmc-hs200-1_8v;
+ status = "disabled";
+ };
+
+ spi_l2_intc: interrupt-controller@41ad00 {
+ compatible = "brcm,l2-intc";
+ reg = <0x41ad00 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <25>;
+ };
+
+ qspi: spi@41c000 {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ compatible = "brcm,spi-bcm-qspi",
+ "brcm,spi-brcmstb-qspi";
+ clocks = <&upg_clk>;
+ reg = <0x419920 0x4 0x41c200 0x188 0x41c000 0x50>;
+ reg-names = "cs_reg", "hif_mspi", "bspi";
+ interrupts = <0x0 0x1 0x2 0x3 0x4 0x5 0x6>;
+ interrupt-parent = <&spi_l2_intc>;
+ interrupt-names = "spi_lr_fullness_reached",
+ "spi_lr_session_aborted",
+ "spi_lr_impatient",
+ "spi_lr_session_done",
+ "spi_lr_overread",
+ "mspi_done",
+ "mspi_halted";
+ status = "disabled";
+ };
+
+ mspi: spi@409200 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,spi-bcm-qspi",
+ "brcm,spi-brcmstb-mspi";
+ clocks = <&upg_clk>;
+ reg = <0x409200 0x180>;
+ reg-names = "mspi";
+ interrupts = <0x14>;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupt-names = "mspi_done";
+ status = "disabled";
+ };
+
+ waketimer: waketimer@409580 {
+ compatible = "brcm,brcmstb-waketimer";
+ reg = <0x409580 0x14>;
+ interrupts = <0x3>;
+ interrupt-parent = <&aon_pm_l2_intc>;
+ interrupt-names = "timer";
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+ };
+
+ memory_controllers {
+ compatible = "simple-bus";
+ ranges = <0x0 0x103b0000 0x1a000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory-controller@0 {
+ compatible = "brcm,brcmstb-memc", "simple-bus";
+ ranges = <0x0 0x0 0xa000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memc-arb@1000 {
+ compatible = "brcm,brcmstb-memc-arb";
+ reg = <0x1000 0x248>;
+ };
+
+ memc-ddr@2000 {
+ compatible = "brcm,brcmstb-memc-ddr";
+ reg = <0x2000 0x300>;
+ };
+
+ ddr-phy@6000 {
+ compatible = "brcm,brcmstb-ddr-phy";
+ reg = <0x6000 0xc8>;
+ };
+
+ shimphy@8000 {
+ compatible = "brcm,brcmstb-ddr-shimphy";
+ reg = <0x8000 0x13c>;
+ };
+ };
+
+ memory-controller@1 {
+ compatible = "brcm,brcmstb-memc", "simple-bus";
+ ranges = <0x0 0x10000 0xa000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memc-arb@1000 {
+ compatible = "brcm,brcmstb-memc-arb";
+ reg = <0x1000 0x248>;
+ };
+
+ memc-ddr@2000 {
+ compatible = "brcm,brcmstb-memc-ddr";
+ reg = <0x2000 0x300>;
+ };
+
+ ddr-phy@6000 {
+ compatible = "brcm,brcmstb-ddr-phy";
+ reg = <0x6000 0xc8>;
+ };
+
+ shimphy@8000 {
+ compatible = "brcm,brcmstb-ddr-shimphy";
+ reg = <0x8000 0x13c>;
+ };
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm7435.dtsi b/arch/mips/boot/dts/brcm/bcm7435.dtsi
new file mode 100644
index 000000000..8398b7f68
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm7435.dtsi
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm7435";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <175625000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips5200";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips5200";
+ device_type = "cpu";
+ reg = <1>;
+ };
+
+ cpu@2 {
+ compatible = "brcm,bmips5200";
+ device_type = "cpu";
+ reg = <2>;
+ };
+
+ cpu@3 {
+ compatible = "brcm,bmips5200";
+ device_type = "cpu";
+ reg = <3>;
+ };
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ clocks {
+ uart_clk: uart_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <81000000>;
+ };
+
+ upg_clk: upg_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+ };
+
+ rdb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges = <0 0x10000000 0x01000000>;
+
+ periph_intc: interrupt-controller@41b500 {
+ compatible = "brcm,bcm7038-l1-intc";
+ reg = <0x41b500 0x40>, <0x41b600 0x40>,
+ <0x41b700 0x40>, <0x41b800 0x40>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>, <2>, <3>;
+ };
+
+ sun_l2_intc: interrupt-controller@403000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x403000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <52>;
+ };
+
+ gisb-arb@400000 {
+ compatible = "brcm,bcm7435-gisb-arb";
+ reg = <0x400000 0xdc>;
+ native-endian;
+ interrupt-parent = <&sun_l2_intc>;
+ interrupts = <0>, <2>;
+ brcm,gisb-arb-master-mask = <0xf77f>;
+ brcm,gisb-arb-master-names = "ssp_0", "cpu_0", "webcpu_0",
+ "pcie_0", "bsp_0",
+ "rdc_0", "raaga_0",
+ "avd_1", "jtag_0",
+ "svd_0", "vice_0",
+ "vice_1", "raaga_1",
+ "scpu";
+ };
+
+ upg_irq0_intc: interrupt-controller@406780 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x406780 0x8>;
+
+ brcm,int-map-mask = <0x44>, <0x7000000>;
+ brcm,int-fwd-mask = <0x70000>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <60>, <58>;
+ interrupt-names = "upg_main", "upg_bsc";
+ };
+
+ upg_aon_irq0_intc: interrupt-controller@409480 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x409480 0x8>;
+
+ brcm,int-map-mask = <0x40>, <0x18000000>, <0x100000>;
+ brcm,int-fwd-mask = <0>;
+ brcm,irq-can-wake;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <61>, <59>, <64>;
+ interrupt-names = "upg_main_aon", "upg_bsc_aon",
+ "upg_spi";
+ };
+
+ sun_top_ctrl: syscon@404000 {
+ compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
+ reg = <0x404000 0x51c>;
+ native-endian;
+ };
+
+ reboot {
+ compatible = "brcm,brcmstb-reboot";
+ syscon = <&sun_top_ctrl 0x304 0x308>;
+ };
+
+ uart0: serial@406b00 {
+ compatible = "ns16550a";
+ reg = <0x406b00 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <66>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart1: serial@406b40 {
+ compatible = "ns16550a";
+ reg = <0x406b40 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <67>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart2: serial@406b80 {
+ compatible = "ns16550a";
+ reg = <0x406b80 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <68>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ bsca: i2c@406300 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406300 0x58>;
+ interrupts = <26>;
+ interrupt-names = "upg_bsca";
+ status = "disabled";
+ };
+
+ bscb: i2c@409400 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ reg = <0x409400 0x58>;
+ interrupts = <28>;
+ interrupt-names = "upg_bscb";
+ status = "disabled";
+ };
+
+ bscc: i2c@406200 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406200 0x58>;
+ interrupts = <24>;
+ interrupt-names = "upg_bscc";
+ status = "disabled";
+ };
+
+ bscd: i2c@406280 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406280 0x58>;
+ interrupts = <25>;
+ interrupt-names = "upg_bscd";
+ status = "disabled";
+ };
+
+ bsce: i2c@409180 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ reg = <0x409180 0x58>;
+ interrupts = <27>;
+ interrupt-names = "upg_bsce";
+ status = "disabled";
+ };
+
+ pwma: pwm@406580 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406580 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ pwmb: pwm@406800 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406800 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ watchdog: watchdog@4067e8 {
+ clocks = <&upg_clk>;
+ compatible = "brcm,bcm7038-wdt";
+ reg = <0x4067e8 0x14>;
+ status = "disabled";
+ };
+
+ aon_pm_l2_intc: interrupt-controller@408440 {
+ compatible = "brcm,l2-intc";
+ reg = <0x408440 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <54>;
+ brcm,irq-can-wake;
+ };
+
+ aon_ctrl: syscon@408000 {
+ compatible = "brcm,brcmstb-aon-ctrl";
+ reg = <0x408000 0x100>, <0x408200 0x200>;
+ reg-names = "aon-ctrl", "aon-sram";
+ };
+
+ timers: timer@4067c0 {
+ compatible = "brcm,brcmstb-timers";
+ reg = <0x4067c0 0x40>;
+ };
+
+ upg_gio: gpio@406700 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x406700 0x80>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupts = <6>;
+ brcm,gpio-bank-widths = <32 32 32 21>;
+ };
+
+ upg_gio_aon: gpio@4094c0 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x4094c0 0x40>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupts = <6>;
+ interrupts-extended = <&upg_aon_irq0_intc 6>,
+ <&aon_pm_l2_intc 5>;
+ wakeup-source;
+ brcm,gpio-bank-widths = <18 4>;
+ };
+
+ enet0: ethernet@b80000 {
+ phy-mode = "internal";
+ phy-handle = <&phy1>;
+ mac-address = [ 00 10 18 36 23 1a ];
+ compatible = "brcm,genet-v3";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0xb80000 0x11c88>;
+ interrupts = <17>, <18>;
+ interrupt-parent = <&periph_intc>;
+ status = "disabled";
+
+ mdio@e14 {
+ compatible = "brcm,genet-mdio-v3";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+
+ phy1: ethernet-phy@1 {
+ max-speed = <100>;
+ reg = <0x1>;
+ compatible = "brcm,40nm-ephy",
+ "ethernet-phy-ieee802.3-c22";
+ };
+ };
+ };
+
+ ehci0: usb@480300 {
+ compatible = "brcm,bcm7435-ehci", "generic-ehci";
+ reg = <0x480300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <70>;
+ status = "disabled";
+ };
+
+ ohci0: usb@480400 {
+ compatible = "brcm,bcm7435-ohci", "generic-ohci";
+ reg = <0x480400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <72>;
+ status = "disabled";
+ };
+
+ ehci1: usb@480500 {
+ compatible = "brcm,bcm7435-ehci", "generic-ehci";
+ reg = <0x480500 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <71>;
+ status = "disabled";
+ };
+
+ ohci1: usb@480600 {
+ compatible = "brcm,bcm7435-ohci", "generic-ohci";
+ reg = <0x480600 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <73>;
+ status = "disabled";
+ };
+
+ ehci2: usb@490300 {
+ compatible = "brcm,bcm7435-ehci", "generic-ehci";
+ reg = <0x490300 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <75>;
+ status = "disabled";
+ };
+
+ ohci2: usb@490400 {
+ compatible = "brcm,bcm7435-ohci", "generic-ohci";
+ reg = <0x490400 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <77>;
+ status = "disabled";
+ };
+
+ ehci3: usb@490500 {
+ compatible = "brcm,bcm7435-ehci", "generic-ehci";
+ reg = <0x490500 0x100>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <76>;
+ status = "disabled";
+ };
+
+ ohci3: usb@490600 {
+ compatible = "brcm,bcm7435-ohci", "generic-ohci";
+ reg = <0x490600 0x100>;
+ native-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <78>;
+ status = "disabled";
+ };
+
+ hif_l2_intc: interrupt-controller@41b000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x41b000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <24>;
+ };
+
+ nand: nand@41c800 {
+ compatible = "brcm,brcmnand-v6.2", "brcm,brcmnand";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "nand", "flash-dma";
+ reg = <0x41c800 0x600>, <0x41d000 0x100>;
+ interrupt-parent = <&hif_l2_intc>;
+ interrupts = <24>, <4>;
+ status = "disabled";
+ };
+
+ sata: sata@181000 {
+ compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
+ reg-names = "ahci", "top-ctrl";
+ reg = <0x181000 0xa9c>, <0x180020 0x1c>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <45>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata0: sata-port@0 {
+ reg = <0>;
+ phys = <&sata_phy0>;
+ };
+
+ sata1: sata-port@1 {
+ reg = <1>;
+ phys = <&sata_phy1>;
+ };
+ };
+
+ sata_phy: sata-phy@180100 {
+ compatible = "brcm,bcm7425-sata-phy", "brcm,phy-sata3";
+ reg = <0x180100 0x0eff>;
+ reg-names = "phy";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata_phy0: sata-phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ };
+
+ sata_phy1: sata-phy@1 {
+ reg = <1>;
+ #phy-cells = <0>;
+ };
+ };
+
+ sdhci0: sdhci@41a000 {
+ compatible = "brcm,bcm7425-sdhci";
+ reg = <0x41a000 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <47>;
+ sd-uhs-sdr50;
+ mmc-hs200-1_8v;
+ status = "disabled";
+ };
+
+ sdhci1: sdhci@41a200 {
+ compatible = "brcm,bcm7425-sdhci";
+ reg = <0x41a200 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <48>;
+ sd-uhs-sdr50;
+ mmc-hs200-1_8v;
+ status = "disabled";
+ };
+
+ spi_l2_intc: interrupt-controller@41bd00 {
+ compatible = "brcm,l2-intc";
+ reg = <0x41bd00 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <25>;
+ };
+
+ qspi: spi@41d200 {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ compatible = "brcm,spi-bcm-qspi",
+ "brcm,spi-brcmstb-qspi";
+ clocks = <&upg_clk>;
+ reg = <0x41a920 0x4 0x41d400 0x188 0x41d200 0x50>;
+ reg-names = "cs_reg", "hif_mspi", "bspi";
+ interrupts = <0x0 0x1 0x2 0x3 0x4 0x5 0x6>;
+ interrupt-parent = <&spi_l2_intc>;
+ interrupt-names = "spi_lr_fullness_reached",
+ "spi_lr_session_aborted",
+ "spi_lr_impatient",
+ "spi_lr_session_done",
+ "spi_lr_overread",
+ "mspi_done",
+ "mspi_halted";
+ status = "disabled";
+ };
+
+ mspi: spi@409200 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,spi-bcm-qspi",
+ "brcm,spi-brcmstb-mspi";
+ clocks = <&upg_clk>;
+ reg = <0x409200 0x180>;
+ reg-names = "mspi";
+ interrupts = <0x14>;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupt-names = "mspi_done";
+ status = "disabled";
+ };
+
+ waketimer: waketimer@409580 {
+ compatible = "brcm,brcmstb-waketimer";
+ reg = <0x409580 0x14>;
+ interrupts = <0x3>;
+ interrupt-parent = <&aon_pm_l2_intc>;
+ interrupt-names = "timer";
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+ };
+
+ memory_controllers {
+ compatible = "simple-bus";
+ ranges = <0x0 0x103b0000 0x1a000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory-controller@0 {
+ compatible = "brcm,brcmstb-memc", "simple-bus";
+ ranges = <0x0 0x0 0xa000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memc-arb@1000 {
+ compatible = "brcm,brcmstb-memc-arb";
+ reg = <0x1000 0x248>;
+ };
+
+ memc-ddr@2000 {
+ compatible = "brcm,brcmstb-memc-ddr";
+ reg = <0x2000 0x300>;
+ };
+
+ ddr-phy@6000 {
+ compatible = "brcm,brcmstb-ddr-phy";
+ reg = <0x6000 0xc8>;
+ };
+
+ shimphy@8000 {
+ compatible = "brcm,brcmstb-ddr-shimphy";
+ reg = <0x8000 0x13c>;
+ };
+ };
+
+ memory-controller@1 {
+ compatible = "brcm,brcmstb-memc", "simple-bus";
+ ranges = <0x0 0x10000 0xa000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memc-arb@1000 {
+ compatible = "brcm,brcmstb-memc-arb";
+ reg = <0x1000 0x248>;
+ };
+
+ memc-ddr@2000 {
+ compatible = "brcm,brcmstb-memc-ddr";
+ reg = <0x2000 0x300>;
+ };
+
+ ddr-phy@6000 {
+ compatible = "brcm,brcmstb-ddr-phy";
+ reg = <0x6000 0xc8>;
+ };
+
+ shimphy@8000 {
+ compatible = "brcm,brcmstb-ddr-shimphy";
+ reg = <0x8000 0x13c>;
+ };
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm93384wvg.dts b/arch/mips/boot/dts/brcm/bcm93384wvg.dts
new file mode 100644
index 000000000..601e4d929
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm93384wvg.dts
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "bcm3384_zephyr.dtsi"
+
+/ {
+ compatible = "brcm,bcm93384wvg", "brcm,bcm3384";
+ model = "Broadcom BCM93384WVG";
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts b/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts
new file mode 100644
index 000000000..938a8e661
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "bcm3384_viper.dtsi"
+
+/ {
+ compatible = "brcm,bcm93384wvg-viper", "brcm,bcm3384-viper";
+ model = "Broadcom BCM93384WVG-viper";
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm96368mvwg.dts b/arch/mips/boot/dts/brcm/bcm96368mvwg.dts
new file mode 100644
index 000000000..6d772c394
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm96368mvwg.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "bcm6368.dtsi"
+
+/ {
+ compatible = "brcm,bcm96368mvwg", "brcm,bcm6368";
+ model = "Broadcom BCM96368MVWG";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x04000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+/* FIXME: need to set up USB_CTRL registers first */
+&ehci {
+ status = "disabled";
+};
+
+&ohci {
+ status = "disabled";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97125cbmb.dts b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
new file mode 100644
index 000000000..79e9769f7
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "bcm7125.dtsi"
+
+/ {
+ compatible = "brcm,bcm97125cbmb", "brcm,bcm7125";
+ model = "Broadcom BCM97125CBMB";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&bsca {
+ status = "okay";
+};
+
+&bscb {
+ status = "okay";
+};
+
+&bscc {
+ status = "okay";
+};
+
+&bscd {
+ status = "okay";
+};
+
+&pwma {
+ status = "okay";
+};
+
+&watchdog {
+ status = "okay";
+};
+
+/* FIXME: USB is wonky; disable it for now */
+&ehci0 {
+ status = "disabled";
+};
+
+&ohci0 {
+ status = "disabled";
+};
+
+&mspi {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts b/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts
new file mode 100644
index 000000000..28370ff77
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "bcm7346.dtsi"
+/include/ "bcm97xxx-nand-cs1-bch24.dtsi"
+
+/ {
+ compatible = "brcm,bcm97346dbsmb", "brcm,bcm7346";
+ model = "Broadcom BCM97346DBSMB";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>, <0x20000000 0x30000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&bsca {
+ status = "okay";
+};
+
+&bscb {
+ status = "okay";
+};
+
+&bscc {
+ status = "okay";
+};
+
+&bscd {
+ status = "okay";
+};
+
+&bsce {
+ status = "okay";
+};
+
+&pwma {
+ status = "okay";
+};
+
+&pwmb {
+ status = "okay";
+};
+
+&watchdog {
+ status = "okay";
+};
+
+&enet0 {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&ehci2 {
+ status = "okay";
+};
+
+&ohci2 {
+ status = "okay";
+};
+
+&ehci3 {
+ status = "okay";
+};
+
+&ohci3 {
+ status = "okay";
+};
+
+&nand {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+};
+
+&sata_phy {
+ status = "okay";
+};
+
+&sdhci0 {
+ status = "okay";
+};
+
+&mspi {
+ status = "okay";
+};
+
+&waketimer {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97358svmb.dts b/arch/mips/boot/dts/brcm/bcm97358svmb.dts
new file mode 100644
index 000000000..41c1b510c
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97358svmb.dts
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "bcm7358.dtsi"
+/include/ "bcm97xxx-nand-cs1-bch4.dtsi"
+
+/ {
+ compatible = "brcm,bcm97358svmb", "brcm,bcm7358";
+ model = "Broadcom BCM97358SVMB";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&bsca {
+ status = "okay";
+};
+
+&bscb {
+ status = "okay";
+};
+
+&bscc {
+ status = "okay";
+};
+
+&bscd {
+ status = "okay";
+};
+
+&pwma {
+ status = "okay";
+};
+
+&pwmb {
+ status = "okay";
+};
+
+&watchdog {
+ status = "okay";
+};
+
+&enet0 {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&nand {
+ status = "okay";
+};
+
+&qspi {
+ status = "okay";
+
+ m25p80@0 {
+ compatible = "m25p80";
+ reg = <0>;
+ spi-max-frequency = <40000000>;
+ spi-cpol;
+ spi-cpha;
+ use-bspi;
+ m25p,fast-read;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ flash0.cfe@0 {
+ reg = <0x0 0x200000>;
+ };
+
+ flash0.mac@200000 {
+ reg = <0x200000 0x40000>;
+ };
+
+ flash0.nvram@240000 {
+ reg = <0x240000 0x10000>;
+ };
+ };
+ };
+};
+
+&mspi {
+ status = "okay";
+};
+
+&waketimer {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97360svmb.dts b/arch/mips/boot/dts/brcm/bcm97360svmb.dts
new file mode 100644
index 000000000..9f6c6c9b7
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97360svmb.dts
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "bcm7360.dtsi"
+
+/ {
+ compatible = "brcm,bcm97360svmb", "brcm,bcm7360";
+ model = "Broadcom BCM97360SVMB";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&bsca {
+ status = "okay";
+};
+
+&bscb {
+ status = "okay";
+};
+
+&bscc {
+ status = "okay";
+};
+
+&bscd {
+ status = "okay";
+};
+
+&pwma {
+ status = "okay";
+};
+
+&watchdog {
+ status = "okay";
+};
+
+&enet0 {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+};
+
+&sata_phy {
+ status = "okay";
+};
+
+&sdhci0 {
+ status = "okay";
+};
+
+&qspi {
+ status = "okay";
+
+ m25p80@0 {
+ compatible = "m25p80";
+ reg = <0>;
+ spi-max-frequency = <40000000>;
+ spi-cpol;
+ spi-cpha;
+ use-bspi;
+ m25p,fast-read;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ flash0.cfe@0 {
+ reg = <0x0 0x200000>;
+ };
+
+ flash0.mac@200000 {
+ reg = <0x200000 0x40000>;
+ };
+
+ flash0.nvram@240000 {
+ reg = <0x240000 0x10000>;
+ };
+ };
+ };
+};
+
+&mspi {
+ status = "okay";
+};
+
+&waketimer {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97362svmb.dts b/arch/mips/boot/dts/brcm/bcm97362svmb.dts
new file mode 100644
index 000000000..df8b755c3
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97362svmb.dts
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "bcm7362.dtsi"
+/include/ "bcm97xxx-nand-cs1-bch4.dtsi"
+
+/ {
+ compatible = "brcm,bcm97362svmb", "brcm,bcm7362";
+ model = "Broadcom BCM97362SVMB";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>, <0x20000000 0x30000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&bsca {
+ status = "okay";
+};
+
+&bscb {
+ status = "okay";
+};
+
+&bscd {
+ status = "okay";
+};
+
+&pwma {
+ status = "okay";
+};
+
+&watchdog {
+ status = "okay";
+};
+
+&enet0 {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&nand {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+};
+
+&sata_phy {
+ status = "okay";
+};
+
+&sdhci0 {
+ status = "okay";
+};
+
+&mspi {
+ status = "okay";
+};
+
+&waketimer {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97420c.dts b/arch/mips/boot/dts/brcm/bcm97420c.dts
new file mode 100644
index 000000000..086faeaa3
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97420c.dts
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "bcm7420.dtsi"
+
+/ {
+ compatible = "brcm,bcm97420c", "brcm,bcm7420";
+ model = "Broadcom BCM97420C";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>,
+ <0x20000000 0x30000000>,
+ <0x60000000 0x10000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&bsca {
+ status = "okay";
+};
+
+&bscb {
+ status = "okay";
+};
+
+&bscc {
+ status = "okay";
+};
+
+&bscd {
+ status = "okay";
+};
+
+&bsce {
+ status = "okay";
+};
+
+&pwma {
+ status = "okay";
+};
+
+&pwmb {
+ status = "okay";
+};
+
+&watchdog {
+ status = "okay";
+};
+
+/* FIXME: MAC driver comes up but cannot attach to PHY */
+&enet0 {
+ status = "disabled";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&mspi {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97425svmb.dts b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
new file mode 100644
index 000000000..0ed22217b
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "bcm7425.dtsi"
+/include/ "bcm97xxx-nand-cs1-bch24.dtsi"
+
+/ {
+ compatible = "brcm,bcm97425svmb", "brcm,bcm7425";
+ model = "Broadcom BCM97425SVMB";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>,
+ <0x20000000 0x30000000>,
+ <0x90000000 0x40000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&bsca {
+ status = "okay";
+};
+
+&bscb {
+ status = "okay";
+};
+
+&bscc {
+ status = "okay";
+};
+
+&bscd {
+ status = "okay";
+};
+
+&bsce {
+ status = "okay";
+};
+
+&pwma {
+ status = "okay";
+};
+
+&pwmb {
+ status = "okay";
+};
+
+&watchdog {
+ status = "okay";
+};
+
+&enet0 {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&ehci2 {
+ status = "okay";
+};
+
+&ohci2 {
+ status = "okay";
+};
+
+&ehci3 {
+ status = "okay";
+};
+
+&ohci3 {
+ status = "okay";
+};
+
+&nand {
+ status = "okay";
+};
+
+&sdhci0 {
+ status = "okay";
+};
+
+&sdhci1 {
+ status = "okay";
+};
+
+&qspi {
+ status = "okay";
+
+ m25p80@0 {
+ compatible = "m25p80";
+ reg = <0>;
+ spi-max-frequency = <40000000>;
+ spi-cpol;
+ spi-cpha;
+ use-bspi;
+ m25p,fast-read;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ flash0.cfe@0 {
+ reg = <0x0 0x200000>;
+ };
+
+ flash0.mac@200000 {
+ reg = <0x200000 0x40000>;
+ };
+
+ flash0.nvram@240000 {
+ reg = <0x240000 0x10000>;
+ };
+ };
+ };
+};
+
+&mspi {
+ status = "okay";
+};
+
+&waketimer {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97435svmb.dts b/arch/mips/boot/dts/brcm/bcm97435svmb.dts
new file mode 100644
index 000000000..2c145a883
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97435svmb.dts
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "bcm7435.dtsi"
+/include/ "bcm97xxx-nand-cs1-bch24.dtsi"
+
+/ {
+ compatible = "brcm,bcm97435svmb", "brcm,bcm7435";
+ model = "Broadcom BCM97435SVMB";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>,
+ <0x20000000 0x30000000>,
+ <0x90000000 0x40000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&bsca {
+ status = "okay";
+};
+
+&bscb {
+ status = "okay";
+};
+
+&bscc {
+ status = "okay";
+};
+
+&bscd {
+ status = "okay";
+};
+
+&bsce {
+ status = "okay";
+};
+
+&pwma {
+ status = "okay";
+};
+
+&pwmb {
+ status = "okay";
+};
+
+&watchdog {
+ status = "okay";
+};
+
+&enet0 {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&ehci2 {
+ status = "okay";
+};
+
+&ohci2 {
+ status = "okay";
+};
+
+&ehci3 {
+ status = "okay";
+};
+
+&ohci3 {
+ status = "okay";
+};
+
+&nand {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+};
+
+&sata_phy {
+ status = "okay";
+};
+
+&sdhci0 {
+ status = "okay";
+};
+
+&sdhci1 {
+ status = "okay";
+};
+
+&mspi {
+ status = "okay";
+};
+
+&waketimer {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch24.dtsi b/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch24.dtsi
new file mode 100644
index 000000000..96c30d857
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch24.dtsi
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+&nand {
+ nandcs@1 {
+ compatible = "brcm,nandcs";
+ reg = <1>;
+ nand-on-flash-bbt;
+
+ nand-ecc-strength = <24>;
+ nand-ecc-step-size = <1024>;
+ brcm,nand-oob-sector-size = <27>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ flash1.rootfs@0 {
+ reg = <0x0 0x10000000>;
+ };
+
+ flash1.kernel@10000000 {
+ reg = <0x10000000 0x400000>;
+ };
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch4.dtsi b/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch4.dtsi
new file mode 100644
index 000000000..7b5afefbb
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch4.dtsi
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+&nand {
+ nandcs@1 {
+ compatible = "brcm,nandcs";
+ reg = <1>;
+ nand-on-flash-bbt;
+
+ nand-ecc-strength = <4>;
+ nand-ecc-step-size = <512>;
+ brcm,nand-oob-sector-size = <16>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ flash1.rootfs@0 {
+ reg = <0x0 0x10000000>;
+ };
+
+ flash1.kernel@10000000 {
+ reg = <0x10000000 0x400000>;
+ };
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts b/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts
new file mode 100644
index 000000000..8d58c1971
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "bcm6328.dtsi"
+
+/ {
+ compatible = "brcm,bcm9ejtagprb", "brcm,bcm6328";
+ model = "Broadcom BCM9EJTAGPRB";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x08000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/cavium-octeon/Makefile b/arch/mips/boot/dts/cavium-octeon/Makefile
new file mode 100644
index 000000000..17aef35f3
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_CAVIUM_OCTEON_SOC) += octeon_3xxx.dtb octeon_68xx.dtb
+
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
new file mode 100644
index 000000000..2fdb4baad
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Device tree source for D-Link DSR-1000N.
+ *
+ * Written by: Aaro Koskinen <aaro.koskinen@iki.fi>
+ */
+
+/include/ "dlink_dsr-500n-1000n.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "dlink,dsr-1000n";
+
+ soc@0 {
+ uart0: serial@1180000000800 {
+ clock-frequency = <500000000>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ usb1 {
+ label = "usb1";
+ gpios = <&gpio 9 GPIO_ACTIVE_LOW>;
+ };
+
+ usb2 {
+ label = "usb2";
+ gpios = <&gpio 10 GPIO_ACTIVE_LOW>;
+ };
+
+ wps {
+ label = "wps";
+ gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
+ };
+
+ wireless1 {
+ label = "5g";
+ gpios = <&gpio 17 GPIO_ACTIVE_LOW>;
+ };
+
+ wireless2 {
+ label = "2.4g";
+ gpios = <&gpio 18 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n-1000n.dtsi b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n-1000n.dtsi
new file mode 100644
index 000000000..b4acdb26a
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n-1000n.dtsi
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Device tree source for D-Link DSR-500N/1000N (common parts).
+ *
+ * Written by: Aaro Koskinen <aaro.koskinen@iki.fi>
+ */
+
+/include/ "octeon_3xxx.dtsi"
+
+/ {
+ soc@0 {
+ smi0: mdio@1180000001800 {
+ phy8: ethernet-phy@8 {
+ reg = <8>;
+ compatible = "ethernet-phy-ieee802.3-c22";
+ };
+ };
+
+ pip: pip@11800a0000000 {
+ interface@0 {
+ ethernet@0 {
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ ethernet@1 {
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ ethernet@2 {
+ phy-handle = <&phy8>;
+ };
+ };
+ };
+
+ twsi0: i2c@1180000001000 {
+ rtc@68 {
+ compatible = "dallas,ds1337";
+ reg = <0x68>;
+ };
+ };
+
+ usbn: usbn@1180068000000 {
+ refclk-frequency = <12000000>;
+ refclk-type = "crystal";
+ };
+ };
+
+ aliases {
+ pip = &pip;
+ };
+};
diff --git a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n.dts b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n.dts
new file mode 100644
index 000000000..e04237281
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n.dts
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Device tree source for D-Link DSR-500N.
+ *
+ * Written by: Aaro Koskinen <aaro.koskinen@iki.fi>
+ */
+
+/include/ "dlink_dsr-500n-1000n.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "dlink,dsr-500n";
+ compatible = "dlink,dsr-500n", "cavium,octeon-3860";
+
+ soc@0 {
+ uart0: serial@1180000000800 {
+ clock-frequency = <300000000>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ usb {
+ gpios = <&gpio 9 GPIO_ACTIVE_LOW>;
+ };
+
+ wps {
+ gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
+ };
+
+ wireless {
+ label = "2.4g";
+ gpios = <&gpio 18 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
new file mode 100644
index 000000000..dda0559ce
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OCTEON 3XXX, 5XXX, 63XX device tree skeleton.
+ *
+ * This device tree is pruned and patched by early boot code before
+ * use. Because of this, it contains a super-set of the available
+ * devices and properties.
+ */
+
+/include/ "octeon_3xxx.dtsi"
+
+/ {
+ soc@0 {
+ smi0: mdio@1180000001800 {
+ phy0: ethernet-phy@0 {
+ compatible = "marvell,88e1118";
+ marvell,reg-init =
+ /* Fix rx and tx clock transition timing */
+ <2 0x15 0xffcf 0>, /* Reg 2,21 Clear bits 4, 5 */
+ /* Adjust LED drive. */
+ <3 0x11 0 0x442a>, /* Reg 3,17 <- 0442a */
+ /* irq, blink-activity, blink-link */
+ <3 0x10 0 0x0242>; /* Reg 3,16 <- 0x0242 */
+ reg = <0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ compatible = "marvell,88e1118";
+ marvell,reg-init =
+ /* Fix rx and tx clock transition timing */
+ <2 0x15 0xffcf 0>, /* Reg 2,21 Clear bits 4, 5 */
+ /* Adjust LED drive. */
+ <3 0x11 0 0x442a>, /* Reg 3,17 <- 0442a */
+ /* irq, blink-activity, blink-link */
+ <3 0x10 0 0x0242>; /* Reg 3,16 <- 0x0242 */
+ reg = <1>;
+ };
+
+ phy2: ethernet-phy@2 {
+ reg = <2>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy3: ethernet-phy@3 {
+ reg = <3>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy4: ethernet-phy@4 {
+ reg = <4>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy5: ethernet-phy@5 {
+ reg = <5>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+
+ phy6: ethernet-phy@6 {
+ reg = <6>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy7: ethernet-phy@7 {
+ reg = <7>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy8: ethernet-phy@8 {
+ reg = <8>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy9: ethernet-phy@9 {
+ reg = <9>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ };
+
+ smi1: mdio@1180000001900 {
+ compatible = "cavium,octeon-3860-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0x00001900 0x0 0x40>;
+
+ phy100: ethernet-phy@1 {
+ reg = <1>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ interrupt-parent = <&gpio>;
+ interrupts = <12 8>; /* Pin 12, active low */
+ };
+ phy101: ethernet-phy@2 {
+ reg = <2>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ interrupt-parent = <&gpio>;
+ interrupts = <12 8>; /* Pin 12, active low */
+ };
+ phy102: ethernet-phy@3 {
+ reg = <3>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ interrupt-parent = <&gpio>;
+ interrupts = <12 8>; /* Pin 12, active low */
+ };
+ phy103: ethernet-phy@4 {
+ reg = <4>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ interrupt-parent = <&gpio>;
+ interrupts = <12 8>; /* Pin 12, active low */
+ };
+ };
+
+ mix0: ethernet@1070000100000 {
+ compatible = "cavium,octeon-5750-mix";
+ reg = <0x10700 0x00100000 0x0 0x100>, /* MIX */
+ <0x11800 0xE0000000 0x0 0x300>, /* AGL */
+ <0x11800 0xE0000400 0x0 0x400>, /* AGL_SHARED */
+ <0x11800 0xE0002000 0x0 0x8>; /* AGL_PRT_CTL */
+ cell-index = <0>;
+ interrupts = <0 62>, <1 46>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy0>;
+ };
+
+ mix1: ethernet@1070000100800 {
+ compatible = "cavium,octeon-5750-mix";
+ reg = <0x10700 0x00100800 0x0 0x100>, /* MIX */
+ <0x11800 0xE0000800 0x0 0x300>, /* AGL */
+ <0x11800 0xE0000400 0x0 0x400>, /* AGL_SHARED */
+ <0x11800 0xE0002008 0x0 0x8>; /* AGL_PRT_CTL */
+ cell-index = <1>;
+ interrupts = <1 18>, < 1 46>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy1>;
+ };
+
+ pip: pip@11800a0000000 {
+ interface@0 {
+ ethernet@0 {
+ phy-handle = <&phy2>;
+ cavium,alt-phy-handle = <&phy100>;
+ rx-delay = <0>;
+ tx-delay = <0>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ ethernet@1 {
+ phy-handle = <&phy3>;
+ cavium,alt-phy-handle = <&phy101>;
+ rx-delay = <0>;
+ tx-delay = <0>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ ethernet@2 {
+ phy-handle = <&phy4>;
+ cavium,alt-phy-handle = <&phy102>;
+ rx-delay = <0>;
+ tx-delay = <0>;
+ };
+ ethernet@3 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x3>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy5>;
+ cavium,alt-phy-handle = <&phy103>;
+ };
+ ethernet@4 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x4>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@5 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x5>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@6 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x6>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@7 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x7>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@8 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x8>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@9 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x9>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@a {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0xa>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@b {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0xb>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@c {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0xc>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@d {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0xd>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@e {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0xe>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@f {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0xf>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ };
+
+ interface@1 {
+ ethernet@0 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x0>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy6>;
+ };
+ ethernet@1 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x1>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy7>;
+ };
+ ethernet@2 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x2>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy8>;
+ };
+ ethernet@3 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x3>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy9>;
+ };
+ };
+ };
+
+ twsi0: i2c@1180000001000 {
+ rtc@68 {
+ compatible = "dallas,ds1337";
+ reg = <0x68>;
+ };
+ tmp@4c {
+ compatible = "ti,tmp421";
+ reg = <0x4c>;
+ };
+ };
+
+ twsi1: i2c@1180000001200 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "cavium,octeon-3860-twsi";
+ reg = <0x11800 0x00001200 0x0 0x200>;
+ interrupts = <0 59>;
+ clock-frequency = <100000>;
+ };
+
+ uart1: serial@1180000000c00 {
+ compatible = "cavium,octeon-3860-uart","ns16550";
+ reg = <0x11800 0x00000c00 0x0 0x400>;
+ clock-frequency = <0>;
+ current-speed = <115200>;
+ reg-shift = <3>;
+ interrupts = <0 35>;
+ };
+
+ uart2: serial@1180000000400 {
+ compatible = "cavium,octeon-3860-uart","ns16550";
+ reg = <0x11800 0x00000400 0x0 0x400>;
+ clock-frequency = <0>;
+ current-speed = <115200>;
+ reg-shift = <3>;
+ interrupts = <1 16>;
+ };
+
+ bootbus: bootbus@1180000000000 {
+ led0: led-display@4,0 {
+ compatible = "avago,hdsp-253x";
+ reg = <4 0x20 0x20>, <4 0 0x20>;
+ };
+
+ cf0: compact-flash@5,0 {
+ compatible = "cavium,ebt3000-compact-flash";
+ reg = <5 0 0x10000>, <6 0 0x10000>;
+ cavium,bus-width = <16>;
+ cavium,true-ide;
+ cavium,dma-engine-handle = <&dma0>;
+ };
+ };
+
+ uctl: uctl@118006f000000 {
+ compatible = "cavium,octeon-6335-uctl";
+ reg = <0x11800 0x6f000000 0x0 0x100>;
+ ranges; /* Direct mapping */
+ #address-cells = <2>;
+ #size-cells = <2>;
+ /* 12MHz, 24MHz and 48MHz allowed */
+ refclk-frequency = <12000000>;
+ /* Either "crystal" or "external" */
+ refclk-type = "crystal";
+
+ ehci@16f0000000000 {
+ compatible = "cavium,octeon-6335-ehci","usb-ehci";
+ reg = <0x16f00 0x00000000 0x0 0x100>;
+ interrupts = <0 56>;
+ big-endian-regs;
+ };
+ ohci@16f0000000400 {
+ compatible = "cavium,octeon-6335-ohci","usb-ohci";
+ reg = <0x16f00 0x00000400 0x0 0x100>;
+ interrupts = <0 56>;
+ big-endian-regs;
+ };
+ };
+
+ usbn: usbn@1180068000000 {
+ /* 12MHz, 24MHz and 48MHz allowed */
+ refclk-frequency = <12000000>;
+ /* Either "crystal" or "external" */
+ refclk-type = "crystal";
+ };
+ };
+
+ aliases {
+ mix0 = &mix0;
+ mix1 = &mix1;
+ pip = &pip;
+ smi0 = &smi0;
+ smi1 = &smi1;
+ twsi0 = &twsi0;
+ twsi1 = &twsi1;
+ uart0 = &uart0;
+ uart1 = &uart1;
+ uart2 = &uart2;
+ flash0 = &flash0;
+ cf0 = &cf0;
+ uctl = &uctl;
+ usbn = &usbn;
+ led0 = &led0;
+ };
+ };
diff --git a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dtsi b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dtsi
new file mode 100644
index 000000000..3c296623d
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dtsi
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OCTEON 3XXX DTS common parts. */
+
+/dts-v1/;
+
+/ {
+ compatible = "cavium,octeon-3860";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&ciu>;
+
+ soc@0 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges; /* Direct mapping */
+
+ ciu: interrupt-controller@1070000000000 {
+ compatible = "cavium,octeon-3860-ciu";
+ interrupt-controller;
+ /* Interrupts are specified by two parts:
+ * 1) Controller register (0 or 1)
+ * 2) Bit within the register (0..63)
+ */
+ #interrupt-cells = <2>;
+ reg = <0x10700 0x00000000 0x0 0x7000>;
+ };
+
+ gpio: gpio-controller@1070000000800 {
+ #gpio-cells = <2>;
+ compatible = "cavium,octeon-3860-gpio";
+ reg = <0x10700 0x00000800 0x0 0x100>;
+ gpio-controller;
+ /* Interrupts are specified by two parts:
+ * 1) GPIO pin number (0..15)
+ * 2) Triggering (1 - edge rising
+ * 2 - edge falling
+ * 4 - level active high
+ * 8 - level active low)
+ */
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ /* The GPIO pin connect to 16 consecutive CUI bits */
+ interrupts = <0 16>, <0 17>, <0 18>, <0 19>,
+ <0 20>, <0 21>, <0 22>, <0 23>,
+ <0 24>, <0 25>, <0 26>, <0 27>,
+ <0 28>, <0 29>, <0 30>, <0 31>;
+ };
+
+ smi0: mdio@1180000001800 {
+ compatible = "cavium,octeon-3860-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0x00001800 0x0 0x40>;
+ };
+
+ pip: pip@11800a0000000 {
+ compatible = "cavium,octeon-3860-pip";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0xa0000000 0x0 0x2000>;
+
+ interface@0 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>; /* interface */
+
+ ethernet@0 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x0>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@1 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x1>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@2 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x2>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ };
+
+ interface@1 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>; /* interface */
+ };
+ };
+
+ twsi0: i2c@1180000001000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "cavium,octeon-3860-twsi";
+ reg = <0x11800 0x00001000 0x0 0x200>;
+ interrupts = <0 45>;
+ clock-frequency = <100000>;
+ };
+
+ uart0: serial@1180000000800 {
+ compatible = "cavium,octeon-3860-uart","ns16550";
+ reg = <0x11800 0x00000800 0x0 0x400>;
+ clock-frequency = <0>;
+ current-speed = <115200>;
+ reg-shift = <3>;
+ interrupts = <0 34>;
+ };
+
+ bootbus: bootbus@1180000000000 {
+ compatible = "cavium,octeon-3860-bootbus";
+ reg = <0x11800 0x00000000 0x0 0x200>;
+ /* The chip select number and offset */
+ #address-cells = <2>;
+ /* The size of the chip select region */
+ #size-cells = <1>;
+ ranges = <0 0 0x0 0x1f400000 0xc00000>,
+ <1 0 0x10000 0x30000000 0>,
+ <2 0 0x10000 0x40000000 0>,
+ <3 0 0x10000 0x50000000 0>,
+ <4 0 0x0 0x1d020000 0x10000>,
+ <5 0 0x0 0x1d040000 0x10000>,
+ <6 0 0x0 0x1d050000 0x10000>,
+ <7 0 0x10000 0x90000000 0>;
+
+ cavium,cs-config@0 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <0>;
+ cavium,t-adr = <20>;
+ cavium,t-ce = <60>;
+ cavium,t-oe = <60>;
+ cavium,t-we = <45>;
+ cavium,t-rd-hld = <35>;
+ cavium,t-wr-hld = <45>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <0>;
+ cavium,t-page = <35>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,bus-width = <8>;
+ };
+ cavium,cs-config@4 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <4>;
+ cavium,t-adr = <320>;
+ cavium,t-ce = <320>;
+ cavium,t-oe = <320>;
+ cavium,t-we = <320>;
+ cavium,t-rd-hld = <320>;
+ cavium,t-wr-hld = <320>;
+ cavium,t-pause = <320>;
+ cavium,t-wait = <320>;
+ cavium,t-page = <320>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,bus-width = <8>;
+ };
+ cavium,cs-config@5 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <5>;
+ cavium,t-adr = <5>;
+ cavium,t-ce = <300>;
+ cavium,t-oe = <125>;
+ cavium,t-we = <150>;
+ cavium,t-rd-hld = <100>;
+ cavium,t-wr-hld = <30>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <30>;
+ cavium,t-page = <320>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,bus-width = <16>;
+ };
+ cavium,cs-config@6 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <6>;
+ cavium,t-adr = <5>;
+ cavium,t-ce = <300>;
+ cavium,t-oe = <270>;
+ cavium,t-we = <150>;
+ cavium,t-rd-hld = <100>;
+ cavium,t-wr-hld = <70>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <0>;
+ cavium,t-page = <320>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,wait-mode;
+ cavium,bus-width = <16>;
+ };
+
+ flash0: nor@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0 0x800000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
+
+ dma0: dma-engine@1180000000100 {
+ compatible = "cavium,octeon-5750-bootbus-dma";
+ reg = <0x11800 0x00000100 0x0 0x8>;
+ interrupts = <0 63>;
+ };
+
+ dma1: dma-engine@1180000000108 {
+ compatible = "cavium,octeon-5750-bootbus-dma";
+ reg = <0x11800 0x00000108 0x0 0x8>;
+ interrupts = <0 63>;
+ };
+
+ usbn: usbn@1180068000000 {
+ compatible = "cavium,octeon-5750-usbn";
+ reg = <0x11800 0x68000000 0x0 0x1000>;
+ ranges; /* Direct mapping */
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ usbc@16f0010000000 {
+ compatible = "cavium,octeon-5750-usbc";
+ reg = <0x16f00 0x10000000 0x0 0x80000>;
+ interrupts = <0 56>;
+ };
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/cavium-octeon/octeon_68xx.dts b/arch/mips/boot/dts/cavium-octeon/octeon_68xx.dts
new file mode 100644
index 000000000..3d0acbb2e
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/octeon_68xx.dts
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/*
+ * OCTEON 68XX device tree skeleton.
+ *
+ * This device tree is pruned and patched by early boot code before
+ * use. Because of this, it contains a super-set of the available
+ * devices and properties.
+ */
+/ {
+ compatible = "cavium,octeon-6880";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&ciu2>;
+
+ soc@0 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges; /* Direct mapping */
+
+ ciu2: interrupt-controller@1070100000000 {
+ compatible = "cavium,octeon-6880-ciu2";
+ interrupt-controller;
+ /* Interrupts are specified by two parts:
+ * 1) Controller register (0 or 7)
+ * 2) Bit within the register (0..63)
+ */
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x10701 0x00000000 0x0 0x4000000>;
+ };
+
+ gpio: gpio-controller@1070000000800 {
+ #gpio-cells = <2>;
+ compatible = "cavium,octeon-3860-gpio";
+ reg = <0x10700 0x00000800 0x0 0x100>;
+ gpio-controller;
+ /* Interrupts are specified by two parts:
+ * 1) GPIO pin number (0..15)
+ * 2) Triggering (1 - edge rising
+ * 2 - edge falling
+ * 4 - level active high
+ * 8 - level active low)
+ */
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ /* The GPIO pins connect to 16 consecutive CUI bits */
+ interrupts = <7 0>, <7 1>, <7 2>, <7 3>,
+ <7 4>, <7 5>, <7 6>, <7 7>,
+ <7 8>, <7 9>, <7 10>, <7 11>,
+ <7 12>, <7 13>, <7 14>, <7 15>;
+ };
+
+ smi0: mdio@1180000003800 {
+ compatible = "cavium,octeon-3860-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0x00003800 0x0 0x40>;
+
+ phy0: ethernet-phy@6 {
+ compatible = "marvell,88e1118";
+ marvell,reg-init =
+ /* Fix rx and tx clock transition timing */
+ <2 0x15 0xffcf 0>, /* Reg 2,21 Clear bits 4, 5 */
+ /* Adjust LED drive. */
+ <3 0x11 0 0x442a>, /* Reg 3,17 <- 0442a */
+ /* irq, blink-activity, blink-link */
+ <3 0x10 0 0x0242>; /* Reg 3,16 <- 0x0242 */
+ reg = <6>;
+ };
+
+ phy1: ethernet-phy@1 {
+ cavium,qlm-trim = "4,sgmii";
+ reg = <1>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy2: ethernet-phy@2 {
+ cavium,qlm-trim = "4,sgmii";
+ reg = <2>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy3: ethernet-phy@3 {
+ cavium,qlm-trim = "4,sgmii";
+ reg = <3>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy4: ethernet-phy@4 {
+ cavium,qlm-trim = "4,sgmii";
+ reg = <4>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ };
+
+ smi1: mdio@1180000003880 {
+ compatible = "cavium,octeon-3860-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0x00003880 0x0 0x40>;
+
+ phy41: ethernet-phy@1 {
+ cavium,qlm-trim = "0,sgmii";
+ reg = <1>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy42: ethernet-phy@2 {
+ cavium,qlm-trim = "0,sgmii";
+ reg = <2>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy43: ethernet-phy@3 {
+ cavium,qlm-trim = "0,sgmii";
+ reg = <3>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy44: ethernet-phy@4 {
+ cavium,qlm-trim = "0,sgmii";
+ reg = <4>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ };
+
+ smi2: mdio@1180000003900 {
+ compatible = "cavium,octeon-3860-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0x00003900 0x0 0x40>;
+
+ phy21: ethernet-phy@1 {
+ cavium,qlm-trim = "2,sgmii";
+ reg = <1>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy22: ethernet-phy@2 {
+ cavium,qlm-trim = "2,sgmii";
+ reg = <2>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy23: ethernet-phy@3 {
+ cavium,qlm-trim = "2,sgmii";
+ reg = <3>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy24: ethernet-phy@4 {
+ cavium,qlm-trim = "2,sgmii";
+ reg = <4>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ };
+
+ smi3: mdio@1180000003980 {
+ compatible = "cavium,octeon-3860-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0x00003980 0x0 0x40>;
+
+ phy11: ethernet-phy@1 {
+ cavium,qlm-trim = "3,sgmii";
+ reg = <1>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy12: ethernet-phy@2 {
+ cavium,qlm-trim = "3,sgmii";
+ reg = <2>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy13: ethernet-phy@3 {
+ cavium,qlm-trim = "3,sgmii";
+ reg = <3>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy14: ethernet-phy@4 {
+ cavium,qlm-trim = "3,sgmii";
+ reg = <4>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ };
+
+ mix0: ethernet@1070000100000 {
+ compatible = "cavium,octeon-5750-mix";
+ reg = <0x10700 0x00100000 0x0 0x100>, /* MIX */
+ <0x11800 0xE0000000 0x0 0x300>, /* AGL */
+ <0x11800 0xE0000400 0x0 0x400>, /* AGL_SHARED */
+ <0x11800 0xE0002000 0x0 0x8>; /* AGL_PRT_CTL */
+ cell-index = <0>;
+ interrupts = <6 40>, <6 32>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy0>;
+ };
+
+ pip: pip@11800a0000000 {
+ compatible = "cavium,octeon-3860-pip";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0xa0000000 0x0 0x2000>;
+
+ interface@4 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x4>; /* interface */
+
+ ethernet@0 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x0>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy1>;
+ };
+ ethernet@1 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x1>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy2>;
+ };
+ ethernet@2 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x2>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy3>;
+ };
+ ethernet@3 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x3>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy4>;
+ };
+ };
+
+ interface@3 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x3>; /* interface */
+
+ ethernet@0 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x0>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy11>;
+ };
+ ethernet@1 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x1>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy12>;
+ };
+ ethernet@2 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x2>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy13>;
+ };
+ ethernet@3 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x3>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy14>;
+ };
+ };
+
+ interface@2 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2>; /* interface */
+
+ ethernet@0 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x0>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy21>;
+ };
+ ethernet@1 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x1>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy22>;
+ };
+ ethernet@2 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x2>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy23>;
+ };
+ ethernet@3 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x3>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy24>;
+ };
+ };
+
+ interface@1 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>; /* interface */
+
+ ethernet@0 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x0>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ };
+
+ interface@0 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>; /* interface */
+
+ ethernet@0 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x0>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy41>;
+ };
+ ethernet@1 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x1>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy42>;
+ };
+ ethernet@2 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x2>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy43>;
+ };
+ ethernet@3 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x3>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy44>;
+ };
+ };
+ };
+
+ twsi0: i2c@1180000001000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "cavium,octeon-3860-twsi";
+ reg = <0x11800 0x00001000 0x0 0x200>;
+ interrupts = <3 32>;
+ clock-frequency = <100000>;
+
+ rtc@68 {
+ compatible = "dallas,ds1337";
+ reg = <0x68>;
+ };
+ tmp@4c {
+ compatible = "ti,tmp421";
+ reg = <0x4c>;
+ };
+ };
+
+ twsi1: i2c@1180000001200 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "cavium,octeon-3860-twsi";
+ reg = <0x11800 0x00001200 0x0 0x200>;
+ interrupts = <3 33>;
+ clock-frequency = <100000>;
+ };
+
+ uart0: serial@1180000000800 {
+ compatible = "cavium,octeon-3860-uart","ns16550";
+ reg = <0x11800 0x00000800 0x0 0x400>;
+ clock-frequency = <0>;
+ current-speed = <115200>;
+ reg-shift = <3>;
+ interrupts = <3 36>;
+ };
+
+ uart1: serial@1180000000c00 {
+ compatible = "cavium,octeon-3860-uart","ns16550";
+ reg = <0x11800 0x00000c00 0x0 0x400>;
+ clock-frequency = <0>;
+ current-speed = <115200>;
+ reg-shift = <3>;
+ interrupts = <3 37>;
+ };
+
+ bootbus: bootbus@1180000000000 {
+ compatible = "cavium,octeon-3860-bootbus";
+ reg = <0x11800 0x00000000 0x0 0x200>;
+ /* The chip select number and offset */
+ #address-cells = <2>;
+ /* The size of the chip select region */
+ #size-cells = <1>;
+ ranges = <0 0 0 0x1f400000 0xc00000>,
+ <1 0 0x10000 0x30000000 0>,
+ <2 0 0x10000 0x40000000 0>,
+ <3 0 0x10000 0x50000000 0>,
+ <4 0 0 0x1d020000 0x10000>,
+ <5 0 0 0x1d040000 0x10000>,
+ <6 0 0 0x1d050000 0x10000>,
+ <7 0 0x10000 0x90000000 0>;
+
+ cavium,cs-config@0 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <0>;
+ cavium,t-adr = <10>;
+ cavium,t-ce = <50>;
+ cavium,t-oe = <50>;
+ cavium,t-we = <35>;
+ cavium,t-rd-hld = <25>;
+ cavium,t-wr-hld = <35>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <300>;
+ cavium,t-page = <25>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,bus-width = <8>;
+ };
+ cavium,cs-config@4 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <4>;
+ cavium,t-adr = <320>;
+ cavium,t-ce = <320>;
+ cavium,t-oe = <320>;
+ cavium,t-we = <320>;
+ cavium,t-rd-hld = <320>;
+ cavium,t-wr-hld = <320>;
+ cavium,t-pause = <320>;
+ cavium,t-wait = <320>;
+ cavium,t-page = <320>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,bus-width = <8>;
+ };
+ cavium,cs-config@5 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <5>;
+ cavium,t-adr = <0>;
+ cavium,t-ce = <300>;
+ cavium,t-oe = <125>;
+ cavium,t-we = <150>;
+ cavium,t-rd-hld = <100>;
+ cavium,t-wr-hld = <300>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <300>;
+ cavium,t-page = <310>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,bus-width = <16>;
+ };
+ cavium,cs-config@6 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <6>;
+ cavium,t-adr = <0>;
+ cavium,t-ce = <30>;
+ cavium,t-oe = <125>;
+ cavium,t-we = <150>;
+ cavium,t-rd-hld = <100>;
+ cavium,t-wr-hld = <30>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <30>;
+ cavium,t-page = <310>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,wait-mode;
+ cavium,bus-width = <16>;
+ };
+
+ flash0: nor@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0 0x800000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "bootloader";
+ reg = <0 0x200000>;
+ read-only;
+ };
+ partition@200000 {
+ label = "kernel";
+ reg = <0x200000 0x200000>;
+ };
+ partition@400000 {
+ label = "cramfs";
+ reg = <0x400000 0x3fe000>;
+ };
+ partition@7fe000 {
+ label = "environment";
+ reg = <0x7fe000 0x2000>;
+ read-only;
+ };
+ };
+
+ led0: led-display@4,0 {
+ compatible = "avago,hdsp-253x";
+ reg = <4 0x20 0x20>, <4 0 0x20>;
+ };
+
+ compact-flash@5,0 {
+ compatible = "cavium,ebt3000-compact-flash";
+ reg = <5 0 0x10000>, <6 0 0x10000>;
+ cavium,bus-width = <16>;
+ cavium,true-ide;
+ cavium,dma-engine-handle = <&dma0>;
+ };
+ };
+
+ dma0: dma-engine@1180000000100 {
+ compatible = "cavium,octeon-5750-bootbus-dma";
+ reg = <0x11800 0x00000100 0x0 0x8>;
+ interrupts = <0 63>;
+ };
+ dma1: dma-engine@1180000000108 {
+ compatible = "cavium,octeon-5750-bootbus-dma";
+ reg = <0x11800 0x00000108 0x0 0x8>;
+ interrupts = <0 63>;
+ };
+
+ uctl: uctl@118006f000000 {
+ compatible = "cavium,octeon-6335-uctl";
+ reg = <0x11800 0x6f000000 0x0 0x100>;
+ ranges; /* Direct mapping */
+ #address-cells = <2>;
+ #size-cells = <2>;
+ /* 12MHz, 24MHz and 48MHz allowed */
+ refclk-frequency = <12000000>;
+ /* Either "crystal" or "external" */
+ refclk-type = "crystal";
+
+ ehci@16f0000000000 {
+ compatible = "cavium,octeon-6335-ehci","usb-ehci";
+ reg = <0x16f00 0x00000000 0x0 0x100>;
+ interrupts = <3 44>;
+ big-endian-regs;
+ };
+ ohci@16f0000000400 {
+ compatible = "cavium,octeon-6335-ohci","usb-ohci";
+ reg = <0x16f00 0x00000400 0x0 0x100>;
+ interrupts = <3 44>;
+ big-endian-regs;
+ };
+ };
+ };
+
+ aliases {
+ mix0 = &mix0;
+ pip = &pip;
+ smi0 = &smi0;
+ smi1 = &smi1;
+ smi2 = &smi2;
+ smi3 = &smi3;
+ twsi0 = &twsi0;
+ twsi1 = &twsi1;
+ uart0 = &uart0;
+ uart1 = &uart1;
+ uctl = &uctl;
+ led0 = &led0;
+ flash0 = &flash0;
+ };
+ };
diff --git a/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts b/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts
new file mode 100644
index 000000000..cb219b730
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Device tree source for EdgeRouter Lite.
+ *
+ * Written by: Aaro Koskinen <aaro.koskinen@iki.fi>
+ */
+
+/include/ "octeon_3xxx.dtsi"
+
+/ {
+ model = "ubnt,e100";
+
+ soc@0 {
+ smi0: mdio@1180000001800 {
+ phy5: ethernet-phy@5 {
+ reg = <5>;
+ compatible = "ethernet-phy-ieee802.3-c22";
+ };
+ phy6: ethernet-phy@6 {
+ reg = <6>;
+ compatible = "ethernet-phy-ieee802.3-c22";
+ };
+ phy7: ethernet-phy@7 {
+ reg = <7>;
+ compatible = "ethernet-phy-ieee802.3-c22";
+ };
+ };
+
+ pip: pip@11800a0000000 {
+ interface@0 {
+ ethernet@0 {
+ phy-handle = <&phy7>;
+ rx-delay = <0>;
+ tx-delay = <0x10>;
+ };
+ ethernet@1 {
+ phy-handle = <&phy6>;
+ rx-delay = <0>;
+ tx-delay = <0x10>;
+ };
+ ethernet@2 {
+ phy-handle = <&phy5>;
+ rx-delay = <0>;
+ tx-delay = <0x10>;
+ };
+ };
+ };
+
+ uart0: serial@1180000000800 {
+ clock-frequency = <500000000>;
+ };
+
+ usbn: usbn@1180068000000 {
+ refclk-frequency = <12000000>;
+ refclk-type = "crystal";
+ };
+ };
+
+ aliases {
+ pip = &pip;
+ };
+};
diff --git a/arch/mips/boot/dts/img/Makefile b/arch/mips/boot/dts/img/Makefile
new file mode 100644
index 000000000..441a3c16e
--- /dev/null
+++ b/arch/mips/boot/dts/img/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += boston.dtb
+
+dtb-$(CONFIG_MACH_PISTACHIO) += pistachio_marduk.dtb
+obj-$(CONFIG_MACH_PISTACHIO) += pistachio_marduk.dtb.o
diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts
new file mode 100644
index 000000000..84328afa3
--- /dev/null
+++ b/arch/mips/boot/dts/img/boston.dts
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include <dt-bindings/clock/boston-clock.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/mips-gic.h>
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "img,boston";
+
+ chosen {
+ stdout-path = "uart0:115200";
+ };
+
+ aliases {
+ uart0 = &uart0;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "img,mips";
+ reg = <0>;
+ clocks = <&clk_boston BOSTON_CLK_CPU>;
+ };
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>;
+ };
+
+ pci0: pci@10000000 {
+ compatible = "xlnx,axi-pcie-host-1.00.a";
+ device_type = "pci";
+ reg = <0x10000000 0x2000000>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 2 IRQ_TYPE_LEVEL_HIGH>;
+
+ ranges = <0x02000000 0 0x40000000
+ 0x40000000 0 0x40000000>;
+
+ bus-range = <0x00 0xff>;
+
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pci0_intc 1>,
+ <0 0 0 2 &pci0_intc 2>,
+ <0 0 0 3 &pci0_intc 3>,
+ <0 0 0 4 &pci0_intc 4>;
+
+ pci0_intc: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ pci1: pci@12000000 {
+ compatible = "xlnx,axi-pcie-host-1.00.a";
+ device_type = "pci";
+ reg = <0x12000000 0x2000000>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 1 IRQ_TYPE_LEVEL_HIGH>;
+
+ ranges = <0x02000000 0 0x20000000
+ 0x20000000 0 0x20000000>;
+
+ bus-range = <0x00 0xff>;
+
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pci1_intc 1>,
+ <0 0 0 2 &pci1_intc 2>,
+ <0 0 0 3 &pci1_intc 3>,
+ <0 0 0 4 &pci1_intc 4>;
+
+ pci1_intc: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ pci2: pci@14000000 {
+ compatible = "xlnx,axi-pcie-host-1.00.a";
+ device_type = "pci";
+ reg = <0x14000000 0x2000000>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 0 IRQ_TYPE_LEVEL_HIGH>;
+
+ ranges = <0x02000000 0 0x16000000
+ 0x16000000 0 0x100000>;
+
+ bus-range = <0x00 0xff>;
+
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pci2_intc 1>,
+ <0 0 0 2 &pci2_intc 2>,
+ <0 0 0 3 &pci2_intc 3>,
+ <0 0 0 4 &pci2_intc 4>;
+
+ pci2_intc: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+
+ pci2_root@0,0,0 {
+ compatible = "pci10ee,7021";
+ reg = <0x00000000 0 0 0 0>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ eg20t_bridge@1,0,0 {
+ compatible = "pci8086,8800";
+ reg = <0x00010000 0 0 0 0>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ eg20t_phub@2,0,0 {
+ compatible = "pci8086,8801";
+ reg = <0x00020000 0 0 0 0>;
+ intel,eg20t-prefetch = <0>;
+ };
+
+ eg20t_mac@2,0,1 {
+ compatible = "pci8086,8802";
+ reg = <0x00020100 0 0 0 0>;
+ phy-reset-gpios = <&eg20t_gpio 6
+ GPIO_ACTIVE_LOW>;
+ };
+
+ eg20t_gpio: eg20t_gpio@2,0,2 {
+ compatible = "pci8086,8803";
+ reg = <0x00020200 0 0 0 0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ eg20t_i2c@2,12,2 {
+ compatible = "pci8086,8817";
+ reg = <0x00026200 0 0 0 0>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@68 {
+ compatible = "st,m41t81s";
+ reg = <0x68>;
+ };
+ };
+ };
+ };
+ };
+
+ gic: interrupt-controller@16120000 {
+ compatible = "mti,gic";
+ reg = <0x16120000 0x20000>;
+
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ timer {
+ compatible = "mti,gic-timer";
+ interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>;
+ clocks = <&clk_boston BOSTON_CLK_CPU>;
+ };
+ };
+
+ cdmm@16140000 {
+ compatible = "mti,mips-cdmm";
+ reg = <0x16140000 0x8000>;
+ };
+
+ cpc@16200000 {
+ compatible = "mti,mips-cpc";
+ reg = <0x16200000 0x8000>;
+ };
+
+ plat_regs: system-controller@17ffd000 {
+ compatible = "img,boston-platform-regs", "syscon";
+ reg = <0x17ffd000 0x1000>;
+
+ clk_boston: clock {
+ compatible = "img,boston-clock";
+ #clock-cells = <1>;
+ };
+ };
+
+ reboot: syscon-reboot {
+ compatible = "syscon-reboot";
+ regmap = <&plat_regs>;
+ offset = <0x10>;
+ mask = <0x10>;
+ };
+
+ uart0: uart@17ffe000 {
+ compatible = "ns16550a";
+ reg = <0x17ffe000 0x1000>;
+ reg-shift = <2>;
+
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 3 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&clk_boston BOSTON_CLK_SYS>;
+ };
+
+ lcd: lcd@17fff000 {
+ compatible = "img,boston-lcd";
+ reg = <0x17fff000 0x8>;
+ };
+};
diff --git a/arch/mips/boot/dts/img/pistachio.dtsi b/arch/mips/boot/dts/img/pistachio.dtsi
new file mode 100644
index 000000000..dc3b7909d
--- /dev/null
+++ b/arch/mips/boot/dts/img/pistachio.dtsi
@@ -0,0 +1,920 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015, 2016 Imagination Technologies Ltd.
+ * Copyright (C) 2015 Google, Inc.
+ */
+
+#include <dt-bindings/clock/pistachio-clk.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/mips-gic.h>
+#include <dt-bindings/reset/pistachio-resets.h>
+
+/ {
+ compatible = "img,pistachio";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ interrupt-parent = <&gic>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "mti,interaptiv";
+ reg = <0>;
+ clocks = <&clk_core CLK_MIPS_PLL>;
+ clock-names = "cpu";
+ clock-latency = <1000>;
+ operating-points = <
+ /* kHz uV(dummy) */
+ 546000 1150000
+ 520000 1100000
+ 494000 1000000
+ 468000 950000
+ 442000 900000
+ 416000 800000
+ >;
+ };
+ };
+
+ i2c0: i2c@18100000 {
+ compatible = "img,scb-i2c";
+ reg = <0x18100000 0x200>;
+ interrupts = <GIC_SHARED 2 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_periph PERIPH_CLK_I2C0>,
+ <&cr_periph SYS_CLK_I2C0>;
+ clock-names = "scb", "sys";
+ assigned-clocks = <&clk_periph PERIPH_CLK_I2C0_PRE_DIV>,
+ <&clk_periph PERIPH_CLK_I2C0_DIV>;
+ assigned-clock-rates = <100000000>, <33333334>;
+ status = "disabled";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c1: i2c@18100200 {
+ compatible = "img,scb-i2c";
+ reg = <0x18100200 0x200>;
+ interrupts = <GIC_SHARED 3 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_periph PERIPH_CLK_I2C1>,
+ <&cr_periph SYS_CLK_I2C1>;
+ clock-names = "scb", "sys";
+ assigned-clocks = <&clk_periph PERIPH_CLK_I2C1_PRE_DIV>,
+ <&clk_periph PERIPH_CLK_I2C1_DIV>;
+ assigned-clock-rates = <100000000>, <33333334>;
+ status = "disabled";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c2: i2c@18100400 {
+ compatible = "img,scb-i2c";
+ reg = <0x18100400 0x200>;
+ interrupts = <GIC_SHARED 4 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_periph PERIPH_CLK_I2C2>,
+ <&cr_periph SYS_CLK_I2C2>;
+ clock-names = "scb", "sys";
+ assigned-clocks = <&clk_periph PERIPH_CLK_I2C2_PRE_DIV>,
+ <&clk_periph PERIPH_CLK_I2C2_DIV>;
+ assigned-clock-rates = <100000000>, <33333334>;
+ status = "disabled";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c3: i2c@18100600 {
+ compatible = "img,scb-i2c";
+ reg = <0x18100600 0x200>;
+ interrupts = <GIC_SHARED 5 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_periph PERIPH_CLK_I2C3>,
+ <&cr_periph SYS_CLK_I2C3>;
+ clock-names = "scb", "sys";
+ assigned-clocks = <&clk_periph PERIPH_CLK_I2C3_PRE_DIV>,
+ <&clk_periph PERIPH_CLK_I2C3_DIV>;
+ assigned-clock-rates = <100000000>, <33333334>;
+ status = "disabled";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_pins>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2s_in: i2s-in@18100800 {
+ compatible = "img,i2s-in";
+ reg = <0x18100800 0x200>;
+ interrupts = <GIC_SHARED 7 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&mdc 30 0xffffffff 0>;
+ dma-names = "rx";
+ clocks = <&cr_periph SYS_CLK_I2S_IN>;
+ clock-names = "sys";
+ img,i2s-channels = <6>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s_in_pins>;
+ status = "disabled";
+
+ #sound-dai-cells = <0>;
+ };
+
+ i2s_out: i2s-out@18100a00 {
+ compatible = "img,i2s-out";
+ reg = <0x18100a00 0x200>;
+ interrupts = <GIC_SHARED 13 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&mdc 23 0xffffffff 0>;
+ dma-names = "tx";
+ clocks = <&cr_periph SYS_CLK_I2S_OUT>,
+ <&clk_core CLK_I2S>;
+ clock-names = "sys", "ref";
+ assigned-clocks = <&clk_core CLK_I2S_DIV>;
+ assigned-clock-rates = <12288000>;
+ img,i2s-channels = <6>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s_out_pins>;
+ status = "disabled";
+ resets = <&pistachio_reset PISTACHIO_RESET_I2S_OUT>;
+ reset-names = "rst";
+ #sound-dai-cells = <0>;
+ };
+
+ parallel_out: parallel-audio-out@18100c00 {
+ compatible = "img,parallel-out";
+ reg = <0x18100c00 0x100>;
+ interrupts = <GIC_SHARED 19 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&mdc 16 0xffffffff 0>;
+ dma-names = "tx";
+ clocks = <&cr_periph SYS_CLK_PAUD_OUT>,
+ <&clk_core CLK_AUDIO_DAC>;
+ clock-names = "sys", "ref";
+ assigned-clocks = <&clk_core CLK_AUDIO_DAC_DIV>;
+ assigned-clock-rates = <12288000>;
+ status = "disabled";
+ resets = <&pistachio_reset PISTACHIO_RESET_PRL_OUT>;
+ reset-names = "rst";
+ #sound-dai-cells = <0>;
+ };
+
+ spdif_out: spdif-out@18100d00 {
+ compatible = "img,spdif-out";
+ reg = <0x18100d00 0x100>;
+ interrupts = <GIC_SHARED 21 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&mdc 14 0xffffffff 0>;
+ dma-names = "tx";
+ clocks = <&cr_periph SYS_CLK_SPDIF_OUT>,
+ <&clk_core CLK_SPDIF>;
+ clock-names = "sys", "ref";
+ assigned-clocks = <&clk_core CLK_SPDIF_DIV>;
+ assigned-clock-rates = <12288000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spdif_out_pin>;
+ status = "disabled";
+ resets = <&pistachio_reset PISTACHIO_RESET_SPDIF_OUT>;
+ reset-names = "rst";
+ #sound-dai-cells = <0>;
+ };
+
+ spdif_in: spdif-in@18100e00 {
+ compatible = "img,spdif-in";
+ reg = <0x18100e00 0x100>;
+ interrupts = <GIC_SHARED 20 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&mdc 15 0xffffffff 0>;
+ dma-names = "rx";
+ clocks = <&cr_periph SYS_CLK_SPDIF_IN>;
+ clock-names = "sys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spdif_in_pin>;
+ status = "disabled";
+
+ #sound-dai-cells = <0>;
+ };
+
+ internal_dac: internal-dac {
+ compatible = "img,pistachio-internal-dac";
+ img,cr-top = <&cr_top>;
+ img,voltage-select = <1>;
+
+ #sound-dai-cells = <0>;
+ };
+
+ spfi0: spi@18100f00 {
+ compatible = "img,spfi";
+ reg = <0x18100f00 0x100>;
+ interrupts = <GIC_SHARED 22 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_core CLK_SPI0>, <&cr_periph SYS_CLK_SPI0_MASTER>;
+ clock-names = "sys", "spfi";
+ dmas = <&mdc 9 0xffffffff 0>, <&mdc 10 0xffffffff 0>;
+ dma-names = "rx", "tx";
+ spfi-max-frequency = <50000000>;
+ status = "disabled";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ spfi1: spi@18101000 {
+ compatible = "img,spfi";
+ reg = <0x18101000 0x100>;
+ interrupts = <GIC_SHARED 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_core CLK_SPI1>, <&cr_periph SYS_CLK_SPI1>;
+ clock-names = "sys", "spfi";
+ dmas = <&mdc 1 0xffffffff 0>, <&mdc 2 0xffffffff 0>;
+ dma-names = "rx", "tx";
+ img,supports-quad-mode;
+ spfi-max-frequency = <50000000>;
+ status = "disabled";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ pwm: pwm@18101300 {
+ compatible = "img,pistachio-pwm";
+ reg = <0x18101300 0x100>;
+ clocks = <&clk_periph PERIPH_CLK_PWM>,
+ <&cr_periph SYS_CLK_PWM>;
+ clock-names = "pwm", "sys";
+ img,cr-periph = <&cr_periph>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ uart0: uart@18101400 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x18101400 0x100>;
+ interrupts = <GIC_SHARED 24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_core CLK_UART0>, <&cr_periph SYS_CLK_UART0>;
+ clock-names = "baudclk", "apb_pclk";
+ assigned-clocks = <&clk_core CLK_UART0_INTERNAL_DIV>,
+ <&clk_core CLK_UART0_DIV>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ pinctrl-0 = <&uart0_pins>, <&uart0_rts_cts_pins>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ uart1: uart@18101500 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x18101500 0x100>;
+ interrupts = <GIC_SHARED 25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_core CLK_UART1>, <&cr_periph SYS_CLK_UART1>;
+ clock-names = "baudclk", "apb_pclk";
+ assigned-clocks = <&clk_core CLK_UART1_INTERNAL_DIV>,
+ <&clk_core CLK_UART1_DIV>;
+ assigned-clock-rates = <114278400>, <1843200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ pinctrl-0 = <&uart1_pins>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ adc: adc@18101600 {
+ compatible = "cosmic,10001-adc";
+ reg = <0x18101600 0x24>;
+ adc-reserved-channels = <0x30>;
+ clocks = <&clk_core CLK_AUX_ADC>;
+ clock-names = "adc";
+ assigned-clocks = <&clk_core CLK_AUX_ADC_INTERNAL_DIV>,
+ <&clk_core CLK_AUX_ADC_DIV>;
+ assigned-clock-rates = <100000000>, <1000000>;
+ status = "disabled";
+
+ #io-channel-cells = <1>;
+ };
+
+ pinctrl: pinctrl@18101c00 {
+ compatible = "img,pistachio-system-pinctrl";
+ reg = <0x18101c00 0x400>;
+
+ gpio0: gpio0 {
+ interrupts = <GIC_SHARED 71 IRQ_TYPE_LEVEL_HIGH>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 0 16>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio1 {
+ interrupts = <GIC_SHARED 72 IRQ_TYPE_LEVEL_HIGH>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 16 16>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio2 {
+ interrupts = <GIC_SHARED 73 IRQ_TYPE_LEVEL_HIGH>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 32 16>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio3 {
+ interrupts = <GIC_SHARED 74 IRQ_TYPE_LEVEL_HIGH>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 48 16>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio4: gpio4 {
+ interrupts = <GIC_SHARED 75 IRQ_TYPE_LEVEL_HIGH>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 64 16>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio5: gpio5 {
+ interrupts = <GIC_SHARED 76 IRQ_TYPE_LEVEL_HIGH>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 80 10>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ i2c0_pins: i2c0-pins {
+ pin_i2c0: i2c0 {
+ pins = "mfio28", "mfio29";
+ function = "i2c0";
+ drive-strength = <4>;
+ };
+ };
+
+ i2c1_pins: i2c1-pins {
+ pin_i2c1: i2c1 {
+ pins = "mfio30", "mfio31";
+ function = "i2c1";
+ drive-strength = <4>;
+ };
+ };
+
+ i2c2_pins: i2c2-pins {
+ pin_i2c2: i2c2 {
+ pins = "mfio32", "mfio33";
+ function = "i2c2";
+ drive-strength = <4>;
+ };
+ };
+
+ i2c3_pins: i2c3-pins {
+ pin_i2c3: i2c3 {
+ pins = "mfio34", "mfio35";
+ function = "i2c3";
+ drive-strength = <4>;
+ };
+ };
+
+ spim0_pins: spim0-pins {
+ pin_spim0: spim0 {
+ pins = "mfio9", "mfio10";
+ function = "spim0";
+ drive-strength = <4>;
+ };
+ spim0_clk: spim0-clk {
+ pins = "mfio8";
+ function = "spim0";
+ drive-strength = <4>;
+ };
+ };
+
+ spim0_cs0_alt_pin: spim0-cs0-alt-pin {
+ spim0-cs0 {
+ pins = "mfio2";
+ drive-strength = <2>;
+ };
+ };
+
+ spim0_cs1_pin: spim0-cs1-pin {
+ spim0-cs1 {
+ pins = "mfio1";
+ drive-strength = <2>;
+ };
+ };
+
+ spim0_cs2_pin: spim0-cs2-pin {
+ spim0-cs2 {
+ pins = "mfio55";
+ drive-strength = <2>;
+ };
+ };
+
+ spim0_cs2_alt_pin: spim0-cs2-alt-pin {
+ spim0-cs2 {
+ pins = "mfio28";
+ drive-strength = <2>;
+ };
+ };
+
+ spim0_cs3_pin: spim0-cs3-pin {
+ spim0-cs3 {
+ pins = "mfio56";
+ drive-strength = <2>;
+ };
+ };
+
+ spim0_cs3_alt_pin: spim0-cs3-alt-pin {
+ spim0-cs3 {
+ pins = "mfio29";
+ drive-strength = <2>;
+ };
+ };
+
+ spim0_cs4_pin: spim0-cs4-pin {
+ spim0-cs4 {
+ pins = "mfio57";
+ drive-strength = <2>;
+ };
+ };
+
+ spim0_cs4_alt_pin: spim0-cs4-alt-pin {
+ spim0-cs4 {
+ pins = "mfio30";
+ drive-strength = <2>;
+ };
+ };
+
+ spim1_pins: spim1-pins {
+ spim1 {
+ pins = "mfio3", "mfio4", "mfio5";
+ function = "spim1";
+ drive-strength = <2>;
+ };
+ };
+
+ spim1_quad_pins: spim1-quad-pins {
+ spim1-quad {
+ pins = "mfio6", "mfio7";
+ function = "spim1";
+ drive-strength = <2>;
+ };
+ };
+
+ spim1_cs0_pin: spim1-cs0-pins {
+ spim1-cs0 {
+ pins = "mfio0";
+ function = "spim1";
+ drive-strength = <2>;
+ };
+ };
+
+ spim1_cs1_pin: spim1-cs1-pin {
+ spim1-cs1 {
+ pins = "mfio1";
+ function = "spim1";
+ drive-strength = <2>;
+ };
+ };
+
+ spim1_cs1_alt_pin: spim1-cs1-alt-pin {
+ spim1-cs1 {
+ pins = "mfio58";
+ function = "spim1";
+ drive-strength = <2>;
+ };
+ };
+
+ spim1_cs2_pin: spim1-cs2-pin {
+ spim1-cs2 {
+ pins = "mfio2";
+ function = "spim1";
+ drive-strength = <2>;
+ };
+ };
+
+ spim1_cs2_alt0_pin: spim1-cs2-alt0-pin {
+ spim1-cs2 {
+ pins = "mfio31";
+ function = "spim1";
+ drive-strength = <2>;
+ };
+ };
+
+ spim1_cs2_alt1_pin: spim1-cs2-alt1-pin {
+ spim1-cs2 {
+ pins = "mfio55";
+ function = "spim1";
+ drive-strength = <2>;
+ };
+ };
+
+ spim1_cs3_pin: spim1-cs3-pin {
+ spim1-cs3 {
+ pins = "mfio56";
+ function = "spim1";
+ drive-strength = <2>;
+ };
+ };
+
+ spim1_cs4_pin: spim1-cs4-pin {
+ spim1-cs4 {
+ pins = "mfio57";
+ function = "spim1";
+ drive-strength = <2>;
+ };
+ };
+
+ uart0_pins: uart0-pins {
+ uart0 {
+ pins = "mfio55", "mfio56";
+ function = "uart0";
+ drive-strength = <2>;
+ };
+ };
+
+ uart0_rts_cts_pins: uart0-rts-cts-pins {
+ uart0-rts-cts {
+ pins = "mfio57", "mfio58";
+ function = "uart0";
+ drive-strength = <2>;
+ };
+ };
+
+ uart1_pins: uart1-pins {
+ uart1 {
+ pins = "mfio59", "mfio60";
+ function = "uart1";
+ drive-strength = <2>;
+ };
+ };
+
+ uart1_rts_cts_pins: uart1-rts-cts-pins {
+ uart1-rts-cts {
+ pins = "mfio1", "mfio2";
+ function = "uart1";
+ drive-strength = <2>;
+ };
+ };
+
+ enet_pins: enet-pins {
+ pin_enet: enet {
+ pins = "mfio63", "mfio64", "mfio65", "mfio66",
+ "mfio67", "mfio68", "mfio69", "mfio70";
+ function = "eth";
+ slew-rate = <1>;
+ drive-strength = <4>;
+ };
+ pin_enet_phy_clk: enet-phy-clk {
+ pins = "mfio71";
+ function = "eth";
+ slew-rate = <1>;
+ drive-strength = <8>;
+ };
+ };
+
+ sdhost_pins: sdhost-pins {
+ pin_sdhost_clk: sdhost-clk {
+ pins = "mfio15";
+ function = "sdhost";
+ slew-rate = <1>;
+ drive-strength = <4>;
+ };
+ pin_sdhost_cmd: sdhost-cmd {
+ pins = "mfio16";
+ function = "sdhost";
+ slew-rate = <1>;
+ drive-strength = <4>;
+ };
+ pin_sdhost_data: sdhost-data {
+ pins = "mfio17", "mfio18", "mfio19", "mfio20",
+ "mfio21", "mfio22", "mfio23", "mfio24";
+ function = "sdhost";
+ slew-rate = <1>;
+ drive-strength = <4>;
+ };
+ pin_sdhost_power_select: sdhost-power-select {
+ pins = "mfio25";
+ function = "sdhost";
+ slew-rate = <1>;
+ drive-strength = <2>;
+ };
+ pin_sdhost_card_detect: sdhost-card-detect {
+ pins = "mfio26";
+ function = "sdhost";
+ drive-strength = <2>;
+ };
+ pin_sdhost_write_protect: sdhost-write-protect {
+ pins = "mfio27";
+ function = "sdhost";
+ drive-strength = <2>;
+ };
+ };
+
+ ir_pin: ir-pin {
+ ir-data {
+ pins = "mfio72";
+ function = "ir";
+ drive-strength = <2>;
+ };
+ };
+
+ pwmpdm0_pin: pwmpdm0-pin {
+ pwmpdm0 {
+ pins = "mfio73";
+ function = "pwmpdm";
+ drive-strength = <2>;
+ };
+ };
+
+ pwmpdm1_pin: pwmpdm1-pin {
+ pwmpdm1 {
+ pins = "mfio74";
+ function = "pwmpdm";
+ drive-strength = <2>;
+ };
+ };
+
+ pwmpdm2_pin: pwmpdm2-pin {
+ pwmpdm2 {
+ pins = "mfio75";
+ function = "pwmpdm";
+ drive-strength = <2>;
+ };
+ };
+
+ pwmpdm3_pin: pwmpdm3-pin {
+ pwmpdm3 {
+ pins = "mfio76";
+ function = "pwmpdm";
+ drive-strength = <2>;
+ };
+ };
+
+ dac_clk_pin: dac-clk-pin {
+ pin_dac_clk: dac-clk {
+ pins = "mfio45";
+ function = "i2s_dac_clk";
+ drive-strength = <4>;
+ };
+ };
+
+ i2s_mclk_pin: i2s-mclk-pin {
+ pin_i2s_mclk: i2s-mclk {
+ pins = "mfio36";
+ function = "i2s_out";
+ drive-strength = <4>;
+ };
+ };
+
+ spdif_out_pin: spdif-out-pin {
+ spdif-out {
+ pins = "mfio61";
+ function = "spdif_out";
+ slew-rate = <1>;
+ drive-strength = <2>;
+ };
+ };
+
+ spdif_in_pin: spdif-in-pin {
+ spdif-in {
+ pins = "mfio62";
+ function = "spdif_in";
+ drive-strength = <2>;
+ };
+ };
+
+ i2s_out_pins: i2s-out-pins {
+ pins_i2s_out_clk: i2s-out-clk {
+ pins = "mfio37", "mfio38";
+ function = "i2s_out";
+ drive-strength = <4>;
+ };
+ pins_i2s_out: i2s-out {
+ pins = "mfio39", "mfio40",
+ "mfio41", "mfio42",
+ "mfio43", "mfio44";
+ function = "i2s_out";
+ drive-strength = <2>;
+ };
+ };
+
+ i2s_in_pins: i2s-in-pins {
+ i2s-in {
+ pins = "mfio47", "mfio48", "mfio49",
+ "mfio50", "mfio51", "mfio52",
+ "mfio53", "mfio54";
+ function = "i2s_in";
+ drive-strength = <2>;
+ };
+ };
+ };
+
+ timer: timer@18102000 {
+ compatible = "img,pistachio-gptimer";
+ reg = <0x18102000 0x100>;
+ interrupts = <GIC_SHARED 60 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_periph PERIPH_CLK_COUNTER_FAST>,
+ <&cr_periph SYS_CLK_TIMER>;
+ clock-names = "fast", "sys";
+ img,cr-periph = <&cr_periph>;
+ };
+
+ wdt: watchdog@18102100 {
+ compatible = "img,pdc-wdt";
+ reg = <0x18102100 0x100>;
+ interrupts = <GIC_SHARED 52 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_periph PERIPH_CLK_WD>, <&cr_periph SYS_CLK_WD>;
+ clock-names = "wdt", "sys";
+ assigned-clocks = <&clk_periph PERIPH_CLK_WD_PRE_DIV>,
+ <&clk_periph PERIPH_CLK_WD_DIV>;
+ assigned-clock-rates = <4000000>, <32768>;
+ };
+
+ ir: ir@18102200 {
+ compatible = "img,ir-rev1";
+ reg = <0x18102200 0x100>;
+ interrupts = <GIC_SHARED 51 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_periph PERIPH_CLK_IR>, <&cr_periph SYS_CLK_IR>;
+ clock-names = "core", "sys";
+ assigned-clocks = <&clk_periph PERIPH_CLK_IR_PRE_DIV>,
+ <&clk_periph PERIPH_CLK_IR_DIV>;
+ assigned-clock-rates = <4000000>, <32768>;
+ pinctrl-0 = <&ir_pin>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ usb: usb@18120000 {
+ compatible = "snps,dwc2";
+ reg = <0x18120000 0x1c000>;
+ interrupts = <GIC_SHARED 49 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&usb_phy>;
+ phy-names = "usb2-phy";
+ g-tx-fifo-size = <256 256 256 256>;
+ status = "disabled";
+ };
+
+ enet: ethernet@18140000 {
+ compatible = "snps,dwmac";
+ reg = <0x18140000 0x2000>;
+ interrupts = <GIC_SHARED 50 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clocks = <&clk_core CLK_ENET>, <&cr_periph SYS_CLK_ENET>;
+ clock-names = "stmmaceth", "pclk";
+ assigned-clocks = <&clk_core CLK_ENET_MUX>,
+ <&clk_core CLK_ENET_DIV>;
+ assigned-clock-parents = <&clk_core CLK_SYS_INTERNAL_DIV>;
+ assigned-clock-rates = <0>, <50000000>;
+ pinctrl-0 = <&enet_pins>;
+ pinctrl-names = "default";
+ phy-mode = "rmii";
+ status = "disabled";
+ };
+
+ sdhost: mmc@18142000 {
+ compatible = "img,pistachio-dw-mshc";
+ reg = <0x18142000 0x400>;
+ interrupts = <GIC_SHARED 39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_core CLK_SD_HOST>, <&cr_periph SYS_CLK_SD_HOST>;
+ clock-names = "ciu", "biu";
+ pinctrl-0 = <&sdhost_pins>;
+ pinctrl-names = "default";
+ fifo-depth = <0x20>;
+ clock-frequency = <50000000>;
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ status = "disabled";
+ };
+
+ sram: sram@1b000000 {
+ compatible = "mmio-sram";
+ reg = <0x1b000000 0x10000>;
+ };
+
+ mdc: dma-controller@18143000 {
+ compatible = "img,pistachio-mdc-dma";
+ reg = <0x18143000 0x1000>;
+ interrupts = <GIC_SHARED 27 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 28 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 29 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 30 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 31 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 32 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 33 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 34 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 35 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 36 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 37 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SHARED 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cr_periph SYS_CLK_MDC>;
+ clock-names = "sys";
+
+ img,max-burst-multiplier = <16>;
+ img,cr-periph = <&cr_periph>;
+
+ #dma-cells = <3>;
+ };
+
+ clk_core: clk@18144000 {
+ compatible = "img,pistachio-clk", "syscon";
+ clocks = <&xtal>, <&cr_top EXT_CLK_AUDIO_IN>,
+ <&cr_top EXT_CLK_ENET_IN>;
+ clock-names = "xtal", "audio_refclk_ext_gate",
+ "ext_enet_in_gate";
+ reg = <0x18144000 0x800>;
+ #clock-cells = <1>;
+ };
+
+ clk_periph: clk@18144800 {
+ compatible = "img,pistachio-clk-periph";
+ reg = <0x18144800 0x1000>;
+ clocks = <&clk_core CLK_PERIPH_SYS>;
+ clock-names = "periph_sys_core";
+ #clock-cells = <1>;
+ };
+
+ cr_periph: clk@18148000 {
+ compatible = "img,pistachio-cr-periph", "syscon", "simple-bus";
+ reg = <0x18148000 0x1000>;
+ clocks = <&clk_periph PERIPH_CLK_SYS>;
+ clock-names = "sys";
+ #clock-cells = <1>;
+
+ pistachio_reset: reset-controller {
+ compatible = "img,pistachio-reset";
+ #reset-cells = <1>;
+ };
+ };
+
+ cr_top: clk@18149000 {
+ compatible = "img,pistachio-cr-top", "syscon";
+ reg = <0x18149000 0x200>;
+ #clock-cells = <1>;
+ };
+
+ hash: hash@18149600 {
+ compatible = "img,hash-accelerator";
+ reg = <0x18149600 0x100>, <0x18101100 0x4>;
+ interrupts = <GIC_SHARED 59 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&mdc 8 0xffffffff 0>;
+ dma-names = "tx";
+ clocks = <&cr_periph SYS_CLK_HASH>,
+ <&clk_periph PERIPH_CLK_ROM>;
+ clock-names = "sys", "hash";
+ };
+
+ gic: interrupt-controller@1bdc0000 {
+ compatible = "mti,gic";
+ reg = <0x1bdc0000 0x20000>;
+
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ timer {
+ compatible = "mti,gic-timer";
+ interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>;
+ clocks = <&clk_core CLK_MIPS>;
+ };
+ };
+
+ usb_phy: usb-phy {
+ compatible = "img,pistachio-usb-phy";
+ clocks = <&clk_core CLK_USB_PHY>;
+ clock-names = "usb_phy";
+ assigned-clocks = <&clk_core CLK_USB_PHY_DIV>;
+ assigned-clock-rates = <50000000>;
+ img,refclk = <0x2>;
+ img,cr-top = <&cr_top>;
+ #phy-cells = <0>;
+ };
+
+ xtal: xtal {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <52000000>;
+ clock-output-names = "xtal";
+ };
+};
diff --git a/arch/mips/boot/dts/img/pistachio_marduk.dts b/arch/mips/boot/dts/img/pistachio_marduk.dts
new file mode 100644
index 000000000..bf69da96d
--- /dev/null
+++ b/arch/mips/boot/dts/img/pistachio_marduk.dts
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015, 2016 Imagination Technologies Ltd.
+ *
+ * IMG Marduk board is also known as Creator Ci40.
+ */
+
+/dts-v1/;
+
+#include "pistachio.dtsi"
+
+/ {
+ model = "IMG Marduk (Creator Ci40)";
+ compatible = "img,pistachio-marduk", "img,pistachio";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ ethernet0 = &enet;
+ spi0 = &spfi0;
+ spi1 = &spfi1;
+ };
+
+ chosen {
+ bootargs = "root=/dev/sda1 rootwait ro lpj=723968";
+ stdout-path = "serial1:115200";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>;
+ };
+
+ reg_1v8: fixed-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "aux_adc_vref";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ };
+
+ internal_dac_supply: internal-dac-supply {
+ compatible = "regulator-fixed";
+ regulator-name = "internal_dac_supply";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ leds {
+ compatible = "pwm-leds";
+ heartbeat {
+ label = "marduk:red:heartbeat";
+ pwms = <&pwm 3 300000>;
+ max-brightness = <255>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ keys {
+ compatible = "gpio-keys";
+ button@1 {
+ label = "Button 1";
+ linux,code = <0x101>; /* BTN_1 */
+ gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
+ };
+ button@2 {
+ label = "Button 2";
+ linux,code = <0x102>; /* BTN_2 */
+ gpios = <&gpio2 14 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&internal_dac {
+ VDD-supply = <&internal_dac_supply>;
+};
+
+&spfi1 {
+ status = "okay";
+
+ pinctrl-0 = <&spim1_pins>, <&spim1_quad_pins>, <&spim1_cs0_pin>,
+ <&spim1_cs1_pin>;
+ pinctrl-names = "default";
+ cs-gpios = <&gpio0 0 GPIO_ACTIVE_HIGH>, <&gpio0 1 GPIO_ACTIVE_HIGH>;
+
+ flash@0 {
+ compatible = "spansion,s25fl016k", "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+ };
+};
+
+&uart0 {
+ status = "okay";
+ assigned-clock-rates = <114278400>, <1843200>;
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&usb {
+ status = "okay";
+};
+
+&enet {
+ status = "okay";
+};
+
+&pin_enet {
+ drive-strength = <2>;
+};
+
+&pin_enet_phy_clk {
+ drive-strength = <2>;
+};
+
+&sdhost {
+ status = "okay";
+ bus-width = <4>;
+ disable-wp;
+};
+
+&pin_sdhost_cmd {
+ drive-strength = <2>;
+};
+
+&pin_sdhost_data {
+ drive-strength = <2>;
+};
+
+&pwm {
+ status = "okay";
+
+ pinctrl-0 = <&pwmpdm0_pin>, <&pwmpdm1_pin>, <&pwmpdm2_pin>,
+ <&pwmpdm3_pin>;
+ pinctrl-names = "default";
+};
+
+&adc {
+ status = "okay";
+ vref-supply = <&reg_1v8>;
+ adc-reserved-channels = <0x10>;
+};
+
+&i2c2 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ tpm@20 {
+ compatible = "infineon,slb9645tt";
+ reg = <0x20>;
+ };
+
+};
+
+&i2c3 {
+ status = "okay";
+ clock-frequency = <400000>;
+};
diff --git a/arch/mips/boot/dts/ingenic/Makefile b/arch/mips/boot/dts/ingenic/Makefile
new file mode 100644
index 000000000..54aa0c4e6
--- /dev/null
+++ b/arch/mips/boot/dts/ingenic/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_JZ4740_QI_LB60) += qi_lb60.dtb
+dtb-$(CONFIG_JZ4740_RS90) += rs90.dtb
+dtb-$(CONFIG_JZ4770_GCW0) += gcw0.dtb
+dtb-$(CONFIG_JZ4780_CI20) += ci20.dtb
+dtb-$(CONFIG_X1000_CU1000_NEO) += cu1000-neo.dtb
+dtb-$(CONFIG_X1830_CU1830_NEO) += cu1830-neo.dtb
+
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
new file mode 100644
index 000000000..75f5bfbf2
--- /dev/null
+++ b/arch/mips/boot/dts/ingenic/ci20.dts
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include "jz4780.dtsi"
+#include <dt-bindings/clock/ingenic,tcu.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/regulator/active-semi,8865-regulator.h>
+
+/ {
+ compatible = "img,ci20", "ingenic,jz4780";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial3 = &uart3;
+ serial4 = &uart4;
+ };
+
+ chosen {
+ stdout-path = &uart4;
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x10000000
+ 0x30000000 0x30000000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ sw1 {
+ label = "ci20:sw1";
+ linux,code = <KEY_F13>;
+ gpios = <&gpd 17 GPIO_ACTIVE_HIGH>;
+ wakeup-source;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led0 {
+ label = "ci20:red:led0";
+ gpios = <&gpc 3 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "none";
+ };
+
+ led1 {
+ label = "ci20:red:led1";
+ gpios = <&gpc 2 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "nand-disk";
+ };
+
+ led2 {
+ label = "ci20:red:led2";
+ gpios = <&gpc 1 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "cpu1";
+ };
+
+ led3 {
+ label = "ci20:red:led3";
+ gpios = <&gpc 0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "cpu0";
+ };
+ };
+
+ eth0_power: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "eth0_power";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpb 25 GPIO_ACTIVE_LOW>;
+ enable-active-high;
+ };
+
+ ir: ir {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpe 3 GPIO_ACTIVE_LOW>;
+ };
+
+ wlan0_power: fixedregulator@1 {
+ compatible = "regulator-fixed";
+ regulator-name = "wlan0_power";
+ gpio = <&gpb 19 GPIO_ACTIVE_LOW>;
+ enable-active-high;
+ };
+};
+
+&ext {
+ clock-frequency = <48000000>;
+};
+
+&mmc0 {
+ status = "okay";
+
+ bus-width = <4>;
+ max-frequency = <50000000>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_mmc0>;
+
+ cd-gpios = <&gpf 20 GPIO_ACTIVE_LOW>;
+};
+
+&mmc1 {
+ status = "okay";
+
+ bus-width = <4>;
+ max-frequency = <50000000>;
+ non-removable;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_mmc1>;
+
+ brcmf: wifi@1 {
+/* reg = <4>;*/
+ compatible = "brcm,bcm4330-fmac";
+ vcc-supply = <&wlan0_power>;
+ device-wakeup-gpios = <&gpd 9 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpf 7 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&uart0 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_uart0>;
+};
+
+&uart1 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_uart1>;
+};
+
+&uart2 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_uart2>;
+ uart-has-rtscts;
+
+ bluetooth {
+ compatible = "brcm,bcm4330-bt";
+ reset-gpios = <&gpf 8 GPIO_ACTIVE_HIGH>;
+ vcc-supply = <&wlan0_power>;
+ device-wakeup-gpios = <&gpf 5 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpf 6 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpf 4 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&uart3 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_uart3>;
+};
+
+&uart4 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_uart4>;
+};
+
+&i2c0 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c0>;
+
+ clock-frequency = <400000>;
+
+ act8600: act8600@5a {
+ compatible = "active-semi,act8600";
+ reg = <0x5a>;
+ status = "okay";
+
+ regulators {
+ vddcore: SUDCDC1 {
+ regulator-name = "DCDC_REG1";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+ };
+ vddmem: SUDCDC2 {
+ regulator-name = "DCDC_REG2";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ };
+ vcc_33: SUDCDC3 {
+ regulator-name = "DCDC_REG3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ vcc_50: SUDCDC4 {
+ regulator-name = "SUDCDC_REG4";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+ vcc_25: LDO_REG5 {
+ regulator-name = "LDO_REG5";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+ wifi_io: LDO_REG6 {
+ regulator-name = "LDO_REG6";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+ vcc_28: LDO_REG7 {
+ regulator-name = "LDO_REG7";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ };
+ vcc_15: LDO_REG8 {
+ regulator-name = "LDO_REG8";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ };
+ vrtc_18: LDO_REG9 {
+ regulator-name = "LDO_REG9";
+ /* Despite the datasheet stating 3.3V
+ * for REG9 and the driver expecting that,
+ * REG9 outputs 1.8V.
+ * Likely the CI20 uses a proprietary
+ * factory programmed chip variant.
+ * Since this is a simple on/off LDO the
+ * exact values do not matter.
+ */
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ vcc_11: LDO_REG10 {
+ regulator-name = "LDO_REG10";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&i2c1 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c1>;
+
+};
+
+&i2c2 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c2>;
+
+};
+
+&i2c3 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c3>;
+
+};
+
+&i2c4 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c4>;
+
+ clock-frequency = <400000>;
+
+ rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+
+ interrupt-parent = <&gpf>;
+ interrupts = <30 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+&nemc {
+ status = "okay";
+
+ nandc: nand-controller@1 {
+ compatible = "ingenic,jz4780-nand";
+ reg = <1 0 0x1000000>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ingenic,bch-controller = <&bch>;
+
+ ingenic,nemc-tAS = <10>;
+ ingenic,nemc-tAH = <5>;
+ ingenic,nemc-tBP = <10>;
+ ingenic,nemc-tAW = <15>;
+ ingenic,nemc-tSTRV = <100>;
+
+ /*
+ * Only CLE/ALE are needed for the devices that are connected, rather
+ * than the full address line set.
+ */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_nemc>;
+
+ nand@1 {
+ reg = <1>;
+
+ nand-ecc-step-size = <1024>;
+ nand-ecc-strength = <24>;
+ nand-ecc-mode = "hw";
+ nand-on-flash-bbt;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_nemc_cs1>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ partition@0 {
+ label = "u-boot-spl";
+ reg = <0x0 0x0 0x0 0x800000>;
+ };
+
+ partition@800000 {
+ label = "u-boot";
+ reg = <0x0 0x800000 0x0 0x200000>;
+ };
+
+ partition@a00000 {
+ label = "u-boot-env";
+ reg = <0x0 0xa00000 0x0 0x200000>;
+ };
+
+ partition@c00000 {
+ label = "boot";
+ reg = <0x0 0xc00000 0x0 0x4000000>;
+ };
+
+ partition@4c00000 {
+ label = "system";
+ reg = <0x0 0x4c00000 0x1 0xfb400000>;
+ };
+ };
+ };
+ };
+
+ dm9000@6 {
+ compatible = "davicom,dm9000";
+ davicom,no-eeprom;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_nemc_cs6>;
+
+ reg = <6 0 1 /* addr */
+ 6 2 1>; /* data */
+
+ ingenic,nemc-tAS = <15>;
+ ingenic,nemc-tAH = <10>;
+ ingenic,nemc-tBP = <20>;
+ ingenic,nemc-tAW = <50>;
+ ingenic,nemc-tSTRV = <100>;
+
+ reset-gpios = <&gpf 12 GPIO_ACTIVE_HIGH>;
+ vcc-supply = <&eth0_power>;
+
+ interrupt-parent = <&gpe>;
+ interrupts = <19 4>;
+
+ nvmem-cells = <&eth0_addr>;
+ nvmem-cell-names = "mac-address";
+ };
+};
+
+&bch {
+ status = "okay";
+};
+
+&pinctrl {
+ pins_uart0: uart0 {
+ function = "uart0";
+ groups = "uart0-data";
+ bias-disable;
+ };
+
+ pins_uart1: uart1 {
+ function = "uart1";
+ groups = "uart1-data";
+ bias-disable;
+ };
+
+ pins_uart2: uart2 {
+ function = "uart2";
+ groups = "uart2-data", "uart2-hwflow";
+ bias-disable;
+ };
+
+ pins_uart3: uart3 {
+ function = "uart3";
+ groups = "uart3-data", "uart3-hwflow";
+ bias-disable;
+ };
+
+ pins_uart4: uart4 {
+ function = "uart4";
+ groups = "uart4-data";
+ bias-disable;
+ };
+
+ pins_i2c0: i2c0 {
+ function = "i2c0";
+ groups = "i2c0-data";
+ bias-disable;
+ };
+
+ pins_i2c1: i2c1 {
+ function = "i2c1";
+ groups = "i2c1-data";
+ bias-disable;
+ };
+
+ pins_i2c2: i2c2 {
+ function = "i2c2";
+ groups = "i2c2-data";
+ bias-disable;
+ };
+
+ pins_i2c3: i2c3 {
+ function = "i2c3";
+ groups = "i2c3-data";
+ bias-disable;
+ };
+
+ pins_i2c4: i2c4 {
+ function = "i2c4";
+ groups = "i2c4-data-e";
+ bias-disable;
+ };
+
+ pins_nemc: nemc {
+ function = "nemc";
+ groups = "nemc-data", "nemc-cle-ale", "nemc-rd-we", "nemc-frd-fwe";
+ bias-disable;
+ };
+
+ pins_nemc_cs1: nemc-cs1 {
+ function = "nemc-cs1";
+ groups = "nemc-cs1";
+ bias-disable;
+ };
+
+ pins_nemc_cs6: nemc-cs6 {
+ function = "nemc-cs6";
+ groups = "nemc-cs6";
+ bias-disable;
+ };
+
+ pins_mmc0: mmc0 {
+ function = "mmc0";
+ groups = "mmc0-1bit-e", "mmc0-4bit-e";
+ bias-disable;
+ };
+
+ pins_mmc1: mmc1 {
+ function = "mmc1";
+ groups = "mmc1-1bit-d", "mmc1-4bit-d";
+ bias-disable;
+ };
+};
+
+&tcu {
+ /* 3 MHz for the system timer and clocksource */
+ assigned-clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER1>;
+ assigned-clock-rates = <3000000>, <3000000>;
+};
diff --git a/arch/mips/boot/dts/ingenic/cu1000-neo.dts b/arch/mips/boot/dts/ingenic/cu1000-neo.dts
new file mode 100644
index 000000000..22a1066d6
--- /dev/null
+++ b/arch/mips/boot/dts/ingenic/cu1000-neo.dts
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include "x1000.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/clock/ingenic,tcu.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ compatible = "yna,cu1000-neo", "ingenic,x1000e";
+ model = "YSH & ATIL General Board CU1000-Neo";
+
+ aliases {
+ serial2 = &uart2;
+ };
+
+ chosen {
+ stdout-path = "serial2:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x04000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led-0 {
+ gpios = <&gpb 21 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc0";
+ };
+ };
+
+ wlan_pwrseq: msc1-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+
+ reset-gpios = <&gpc 17 GPIO_ACTIVE_LOW>;
+ post-power-on-delay-ms = <200>;
+ };
+};
+
+&exclk {
+ clock-frequency = <24000000>;
+};
+
+&tcu {
+ /* 1500 kHz for the system timer and clocksource */
+ assigned-clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER2>;
+ assigned-clock-rates = <1500000>, <1500000>;
+
+ /* Use channel #0 for the system timer channel #2 for the clocksource */
+ ingenic,pwm-channels-mask = <0xfa>;
+};
+
+&uart2 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_uart2>;
+};
+
+&i2c0 {
+ status = "okay";
+
+ clock-frequency = <400000>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c0>;
+
+ ads7830: adc@48 {
+ compatible = "ti,ads7830";
+ reg = <0x48>;
+ };
+};
+
+&msc0 {
+ status = "okay";
+
+ bus-width = <8>;
+ max-frequency = <50000000>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_msc0>;
+
+ non-removable;
+};
+
+&msc1 {
+ status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bus-width = <4>;
+ max-frequency = <50000000>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_msc1>;
+
+ non-removable;
+
+ mmc-pwrseq = <&wlan_pwrseq>;
+
+ ap6212a: wifi@1 {
+ compatible = "brcm,bcm4329-fmac";
+ reg = <1>;
+
+ interrupt-parent = <&gpc>;
+ interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-names = "host-wake";
+
+ brcm,drive-strength = <10>;
+ };
+};
+
+&mac {
+ status = "okay";
+
+ phy-mode = "rmii";
+ phy-handle = <&lan8720a>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_mac>;
+
+ snps,reset-gpio = <&gpc 23 GPIO_ACTIVE_LOW>; /* PC23 */
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 30000>;
+};
+
+&mdio {
+ status = "okay";
+
+ lan8720a: ethernet-phy@0 {
+ compatible = "ethernet-phy-id0007.c0f0", "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+};
+
+&pinctrl {
+ pins_uart2: uart2 {
+ function = "uart2";
+ groups = "uart2-data-d";
+ bias-pull-up;
+ };
+
+ pins_i2c0: i2c0 {
+ function = "i2c0";
+ groups = "i2c0-data";
+ bias-pull-up;
+ };
+
+ pins_msc0: msc0 {
+ function = "mmc0";
+ groups = "mmc0-1bit", "mmc0-4bit", "mmc0-8bit";
+ bias-disable;
+ };
+
+ pins_msc1: msc1 {
+ function = "mmc1";
+ groups = "mmc1-1bit", "mmc1-4bit";
+ bias-disable;
+ };
+
+ pins_mac: mac {
+ function = "mac";
+ groups = "mac";
+ bias-disable;
+ };
+};
diff --git a/arch/mips/boot/dts/ingenic/cu1830-neo.dts b/arch/mips/boot/dts/ingenic/cu1830-neo.dts
new file mode 100644
index 000000000..640f96c00
--- /dev/null
+++ b/arch/mips/boot/dts/ingenic/cu1830-neo.dts
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include "x1830.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/clock/ingenic,tcu.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ compatible = "yna,cu1830-neo", "ingenic,x1830";
+ model = "YSH & ATIL General Board CU1830-Neo";
+
+ aliases {
+ serial1 = &uart1;
+ };
+
+ chosen {
+ stdout-path = "serial1:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x08000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led-0 {
+ gpios = <&gpc 17 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc0";
+ };
+ };
+
+ wlan_pwrseq: msc1-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+
+ reset-gpios = <&gpc 13 GPIO_ACTIVE_LOW>;
+ post-power-on-delay-ms = <200>;
+ };
+};
+
+&exclk {
+ clock-frequency = <24000000>;
+};
+
+&tcu {
+ /* 1500 kHz for the system timer and clocksource */
+ assigned-clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER2>;
+ assigned-clock-rates = <1500000>, <1500000>;
+
+ /* Use channel #0 for the system timer channel #2 for the clocksource */
+ ingenic,pwm-channels-mask = <0xfa>;
+};
+
+&uart1 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_uart1>;
+};
+
+&i2c0 {
+ status = "okay";
+
+ clock-frequency = <400000>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c0>;
+
+ ads7830: adc@48 {
+ compatible = "ti,ads7830";
+ reg = <0x48>;
+ };
+};
+
+&msc0 {
+ status = "okay";
+
+ bus-width = <4>;
+ max-frequency = <50000000>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_msc0>;
+
+ non-removable;
+};
+
+&msc1 {
+ status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bus-width = <4>;
+ max-frequency = <50000000>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_msc1>;
+
+ non-removable;
+
+ mmc-pwrseq = <&wlan_pwrseq>;
+
+ ap6212a: wifi@1 {
+ compatible = "brcm,bcm4329-fmac";
+ reg = <1>;
+
+ interrupt-parent = <&gpc>;
+ interrupts = <25 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-names = "host-wake";
+
+ brcm,drive-strength = <10>;
+ };
+};
+
+&mac {
+ status = "okay";
+
+ phy-mode = "rmii";
+ phy-handle = <&ip101gr>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_mac>;
+
+ snps,reset-gpio = <&gpb 28 GPIO_ACTIVE_LOW>; /* PB28 */
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 30000>;
+};
+
+&mdio {
+ status = "okay";
+
+ ip101gr: ethernet-phy@0 {
+ compatible = "ethernet-phy-id0243.0c54", "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+};
+
+&pinctrl {
+ pins_uart1: uart1 {
+ function = "uart1";
+ groups = "uart1-data";
+ bias-pull-up;
+ };
+
+ pins_i2c0: i2c0 {
+ function = "i2c0";
+ groups = "i2c0-data";
+ bias-pull-up;
+ };
+
+ pins_msc0: msc0 {
+ function = "mmc0";
+ groups = "mmc0-1bit", "mmc0-4bit";
+ bias-disable;
+ };
+
+ pins_msc1: msc1 {
+ function = "mmc1";
+ groups = "mmc1-1bit", "mmc1-4bit";
+ bias-disable;
+ };
+
+ pins_mac: mac {
+ function = "mac";
+ groups = "mac";
+ bias-disable;
+ };
+};
diff --git a/arch/mips/boot/dts/ingenic/gcw0.dts b/arch/mips/boot/dts/ingenic/gcw0.dts
new file mode 100644
index 000000000..bc72304a2
--- /dev/null
+++ b/arch/mips/boot/dts/ingenic/gcw0.dts
@@ -0,0 +1,547 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include "jz4770.dtsi"
+#include <dt-bindings/clock/ingenic,tcu.h>
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/iio/adc/ingenic,adc.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ compatible = "gcw,zero", "ingenic,jz4770";
+ model = "GCW Zero";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &uart2;
+ serial3 = &uart3;
+ };
+
+ memory: memory {
+ device_type = "memory";
+ reg = <0x0 0x10000000>,
+ <0x30000000 0x10000000>;
+ };
+
+ chosen {
+ stdout-path = "serial2:57600n8";
+ };
+
+ vcc: regulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc";
+
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ mmc1_power: regulator@1 {
+ compatible = "regulator-fixed";
+ regulator-name = "mmc1_vcc";
+ gpio = <&gpe 9 0>;
+
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc>;
+ };
+
+ headphones_amp: analog-amplifier@0 {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&gpf 3 GPIO_ACTIVE_LOW>;
+ enable-delay-ms = <50>;
+
+ VCC-supply = <&ldo5>;
+ sound-name-prefix = "Headphones Amp";
+ };
+
+ speaker_amp: analog-amplifier@1 {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&gpf 20 GPIO_ACTIVE_HIGH>;
+
+ VCC-supply = <&ldo5>;
+ sound-name-prefix = "Speaker Amp";
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+
+ simple-audio-card,name = "gcw0-audio";
+ simple-audio-card,format = "i2s";
+
+ simple-audio-card,widgets =
+ "Speaker", "Speaker",
+ "Headphone", "Headphones",
+ "Line", "FM Radio",
+ "Microphone", "Built-in Mic";
+ simple-audio-card,routing =
+ "Headphones Amp INL", "LHPOUT",
+ "Headphones Amp INR", "RHPOUT",
+ "Headphones", "Headphones Amp OUTL",
+ "Headphones", "Headphones Amp OUTR",
+ "Speaker Amp INL", "LOUT",
+ "Speaker Amp INR", "ROUT",
+ "Speaker", "Speaker Amp OUTL",
+ "Speaker", "Speaker Amp OUTR",
+ "LLINEIN", "FM Radio",
+ "RLINEIN", "FM Radio",
+ "Built-in Mic", "MICBIAS",
+ "MIC1P", "Built-in Mic",
+ "MIC1N", "Built-in Mic";
+ simple-audio-card,pin-switches = "Speaker", "Headphones";
+
+ simple-audio-card,hp-det-gpio = <&gpf 21 GPIO_ACTIVE_LOW>;
+ simple-audio-card,aux-devs = <&speaker_amp>, <&headphones_amp>;
+
+ simple-audio-card,bitclock-master = <&dai_codec>;
+ simple-audio-card,frame-master = <&dai_codec>;
+
+ dai_cpu: simple-audio-card,cpu {
+ sound-dai = <&aic>;
+ };
+
+ dai_codec: simple-audio-card,codec {
+ sound-dai = <&codec>;
+ };
+ };
+
+ rumble {
+ compatible = "pwm-vibrator";
+ pwms = <&pwm 4 2000000 0>;
+ pwm-names = "enable";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_pwm4>;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm 1 40000 0>;
+ power-supply = <&vcc>;
+
+ brightness-levels = <0 16 32 48 64 80 96 112 128
+ 144 160 176 192 208 224 240 255>;
+ default-brightness-level = <12>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_pwm1>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ autorepeat;
+
+ button@0 {
+ label = "D-pad up";
+ linux,code = <KEY_UP>;
+ linux,can-disable;
+ gpios = <&gpe 21 GPIO_ACTIVE_LOW>;
+ };
+
+ button@1 {
+ label = "D-pad down";
+ linux,code = <KEY_DOWN>;
+ linux,can-disable;
+ gpios = <&gpe 25 GPIO_ACTIVE_LOW>;
+ };
+
+ button@2 {
+ label = "D-pad left";
+ linux,code = <KEY_LEFT>;
+ linux,can-disable;
+ gpios = <&gpe 23 GPIO_ACTIVE_LOW>;
+ };
+
+ button@3 {
+ label = "D-pad right";
+ linux,code = <KEY_RIGHT>;
+ linux,can-disable;
+ gpios = <&gpe 24 GPIO_ACTIVE_LOW>;
+ };
+
+ button@4 {
+ label = "Button A";
+ linux,code = <KEY_LEFTCTRL>;
+ linux,can-disable;
+ gpios = <&gpe 29 GPIO_ACTIVE_LOW>;
+ };
+
+ button@5 {
+ label = "Button B";
+ linux,code = <KEY_LEFTALT>;
+ linux,can-disable;
+ gpios = <&gpe 20 GPIO_ACTIVE_LOW>;
+ };
+
+ button@6 {
+ label = "Button Y";
+ linux,code = <KEY_SPACE>;
+ linux,can-disable;
+ gpios = <&gpe 27 GPIO_ACTIVE_LOW>;
+ };
+
+ button@7 {
+ label = "Button X";
+ linux,code = <KEY_LEFTSHIFT>;
+ linux,can-disable;
+ gpios = <&gpe 28 GPIO_ACTIVE_LOW>;
+ };
+
+ button@8 {
+ label = "Left shoulder button";
+ linux,code = <KEY_TAB>;
+ linux,can-disable;
+ gpios = <&gpb 20 GPIO_ACTIVE_LOW>;
+ };
+
+ button@9 {
+ label = "Right shoulder button";
+ linux,code = <KEY_BACKSPACE>;
+ linux,can-disable;
+ gpios = <&gpe 26 GPIO_ACTIVE_LOW>;
+ };
+
+ button@10 {
+ label = "Start button";
+ linux,code = <KEY_ENTER>;
+ linux,can-disable;
+ gpios = <&gpb 21 GPIO_ACTIVE_LOW>;
+ };
+
+ button@11 {
+ label = "Select button";
+ linux,code = <KEY_ESC>;
+ linux,can-disable;
+ /*
+ * This is the only button that is active high,
+ * since it doubles as BOOT_SEL1.
+ */
+ gpios = <&gpd 18 GPIO_ACTIVE_HIGH>;
+ };
+
+ button@12 {
+ label = "Power slider";
+ linux,code = <KEY_POWER>;
+ linux,can-disable;
+ gpios = <&gpa 30 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ };
+
+ button@13 {
+ label = "Power hold";
+ linux,code = <KEY_PAUSE>;
+ linux,can-disable;
+ gpios = <&gpf 11 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ i2c3: i2c-controller@3 {
+ compatible = "i2c-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sda-gpios = <&gpd 5 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&gpd 4 GPIO_ACTIVE_HIGH>;
+ i2c-gpio,delay-us = <2>; /* 250 kHz */
+
+ act8600: pmic@5a {
+ compatible = "active-semi,act8600";
+ reg = <0x5a>;
+
+ regulators {
+ /* USB OTG */
+ otg_vbus: SUDCDC_REG4 {
+ /*
+ * 5.3V instead of 5.0V to compensate
+ * for the voltage drop of a diode
+ * between the regulator and the
+ * connector.
+ */
+ regulator-min-microvolt = <5300000>;
+ regulator-max-microvolt = <5300000>;
+ inl-supply = <&vcc>;
+ };
+
+ /*
+ * When this is off, there is no sound, but also
+ * no USB networking.
+ */
+ ldo5: LDO5 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ inl-supply = <&vcc>;
+ };
+
+ /* LCD panel and FM radio */
+ ldo6: LDO6 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ inl-supply = <&vcc>;
+ };
+
+ /* ??? */
+ LDO7 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ /*regulator-always-on;*/
+ inl-supply = <&vcc>;
+ };
+
+ /*
+ * The colors on the LCD are wrong when this is
+ * off. Which is strange, since the LCD panel
+ * data sheet only mentions a 3.3V input.
+ */
+ LDO8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ inl-supply = <&vcc>;
+ };
+
+ /* RTC fixed 3.3V */
+ LDO_REG9 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ inl-supply = <&vcc>;
+ };
+
+ /* Unused fixed 1.2V */
+ LDO_REG10 {
+ inl-supply = <&vcc>;
+ };
+ };
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led {
+ gpios = <&gpb 30 GPIO_ACTIVE_LOW>;
+ default-state = "on";
+ };
+ };
+
+ spi {
+ compatible = "spi-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sck-gpios = <&gpe 15 GPIO_ACTIVE_HIGH>;
+ mosi-gpios = <&gpe 17 GPIO_ACTIVE_HIGH>;
+ cs-gpios = <&gpe 16 GPIO_ACTIVE_HIGH>;
+ num-chipselects = <1>;
+
+ nt39016@0 {
+ compatible = "kingdisplay,kd035g6-54nt";
+ reg = <0>;
+
+ spi-max-frequency = <3125000>;
+ spi-3wire;
+ spi-cs-high;
+
+ reset-gpios = <&gpe 2 GPIO_ACTIVE_LOW>;
+
+ backlight = <&backlight>;
+ power-supply = <&ldo6>;
+
+ port {
+ panel_input: endpoint {
+ remote-endpoint = <&panel_output>;
+ };
+ };
+ };
+ };
+
+ connector {
+ compatible = "gpio-usb-b-connector", "usb-b-connector";
+ label = "mini-USB";
+ type = "mini";
+
+ /*
+ * USB OTG is not yet working reliably, the ID detection
+ * mechanism tends to fry easily for unknown reasons.
+ * Until this is fixed, disable OTG by not providing the
+ * ID GPIO to the driver.
+ */
+ //id-gpios = <&gpf 18 GPIO_ACTIVE_LOW>;
+
+ vbus-gpios = <&gpb 5 GPIO_ACTIVE_HIGH>;
+ vbus-supply = <&otg_vbus>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_otg>;
+
+ port {
+ usb_ep: endpoint {
+ remote-endpoint = <&usb_otg_ep>;
+ };
+ };
+ };
+};
+
+&ext {
+ clock-frequency = <12000000>;
+};
+
+&pinctrl {
+ pins_lcd: lcd {
+ function = "lcd";
+ groups = "lcd-24bit";
+ };
+
+ pins_uart2: uart2 {
+ function = "uart2";
+ groups = "uart2-data";
+ };
+
+ pins_mmc0: mmc0 {
+ function = "mmc0";
+ groups = "mmc0-1bit-a", "mmc0-4bit-a";
+ };
+
+ pins_mmc1: mmc1 {
+ function = "mmc1";
+ groups = "mmc1-1bit-d", "mmc1-4bit-d";
+ };
+
+ pins_otg: otg {
+ otg-vbus-pin {
+ function = "otg";
+ groups = "otg-vbus";
+ };
+
+ vbus-pin {
+ pins = "PB5";
+ bias-disable;
+ };
+ };
+
+ pins_pwm1: pwm1 {
+ function = "pwm1";
+ groups = "pwm1";
+ };
+
+ pins_pwm4: pwm4 {
+ function = "pwm4";
+ groups = "pwm4";
+ };
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_uart2>;
+
+ status = "okay";
+};
+
+&cgu {
+ /*
+ * Put high-speed peripherals under PLL1, such that we can change the
+ * PLL0 frequency on demand without having to suspend peripherals.
+ * We use a rate of 432 MHz, which is the least common multiple of
+ * 27 MHz (required by TV encoder) and 48 MHz (required by USB host).
+ * Put the GPU under PLL0 since we want a higher frequency.
+ * Use the 32 kHz oscillator as the parent of the RTC for a higher
+ * precision.
+ */
+ assigned-clocks =
+ <&cgu JZ4770_CLK_PLL1>,
+ <&cgu JZ4770_CLK_GPU>,
+ <&cgu JZ4770_CLK_RTC>,
+ <&cgu JZ4770_CLK_UHC>,
+ <&cgu JZ4770_CLK_LPCLK_MUX>,
+ <&cgu JZ4770_CLK_MMC0_MUX>,
+ <&cgu JZ4770_CLK_MMC1_MUX>;
+ assigned-clock-parents =
+ <0>,
+ <&cgu JZ4770_CLK_PLL0>,
+ <&cgu JZ4770_CLK_OSC32K>,
+ <&cgu JZ4770_CLK_PLL1>,
+ <&cgu JZ4770_CLK_PLL1>,
+ <&cgu JZ4770_CLK_PLL1>,
+ <&cgu JZ4770_CLK_PLL1>;
+ assigned-clock-rates =
+ <432000000>,
+ <600000000>;
+};
+
+&uhc {
+ /* The WiFi module is connected to the UHC. */
+ status = "okay";
+};
+
+&tcu {
+ /*
+ * 750 kHz for the system timer and clocksource, 12 MHz for the OST,
+ * and use RTC as the parent for the watchdog clock
+ */
+ assigned-clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER2>,
+ <&tcu TCU_CLK_OST>, <&tcu TCU_CLK_WDT>;
+ assigned-clock-parents = <0>, <0>, <0>, <&cgu JZ4770_CLK_RTC>;
+ assigned-clock-rates = <750000>, <750000>, <12000000>;
+
+ /* PWM1 is in use, so use channel #2 for the clocksource */
+ ingenic,pwm-channels-mask = <0xfa>;
+};
+
+&usb_otg {
+ port {
+ usb_otg_ep: endpoint {
+ remote-endpoint = <&usb_ep>;
+ };
+ };
+};
+
+&otg_phy {
+ vcc-supply = <&ldo5>;
+};
+
+&rtc {
+ clocks = <&cgu JZ4770_CLK_RTC>;
+ clock-names = "rtc";
+
+ system-power-controller;
+};
+
+&mmc0 {
+ status = "okay";
+
+ bus-width = <4>;
+ max-frequency = <48000000>;
+ vmmc-supply = <&vcc>;
+ non-removable;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_mmc0>;
+};
+
+&mmc1 {
+ status = "okay";
+
+ bus-width = <4>;
+ max-frequency = <48000000>;
+ cd-gpios = <&gpb 2 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&mmc1_power>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_mmc1>;
+};
+
+&lcd {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_lcd>;
+
+ port {
+ panel_output: endpoint {
+ remote-endpoint = <&panel_input>;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/ingenic/gcw0_proto.dts b/arch/mips/boot/dts/ingenic/gcw0_proto.dts
new file mode 100644
index 000000000..02df22f8a
--- /dev/null
+++ b/arch/mips/boot/dts/ingenic/gcw0_proto.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include "gcw0.dts"
+
+/ {
+ model = "GCW Zero Prototype";
+};
+
+&memory {
+ /* Prototype has only 256 MiB of RAM */
+ reg = <0x0 0x10000000>;
+};
diff --git a/arch/mips/boot/dts/ingenic/jz4725b.dtsi b/arch/mips/boot/dts/ingenic/jz4725b.dtsi
new file mode 100644
index 000000000..a1f0b71c9
--- /dev/null
+++ b/arch/mips/boot/dts/ingenic/jz4725b.dtsi
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <dt-bindings/clock/jz4725b-cgu.h>
+#include <dt-bindings/clock/ingenic,tcu.h>
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ingenic,jz4725b";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "ingenic,xburst-mxu1.0";
+ reg = <0>;
+
+ clocks = <&cgu JZ4725B_CLK_CCLK>;
+ clock-names = "cpu";
+ };
+ };
+
+ cpuintc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ intc: interrupt-controller@10001000 {
+ compatible = "ingenic,jz4725b-intc", "ingenic,jz4740-intc";
+ reg = <0x10001000 0x14>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ };
+
+ ext: ext {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+
+ osc32k: osc32k {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ cgu: clock-controller@10000000 {
+ compatible = "ingenic,jz4725b-cgu";
+ reg = <0x10000000 0x100>;
+
+ clocks = <&ext>, <&osc32k>;
+ clock-names = "ext", "osc32k";
+
+ #clock-cells = <1>;
+ };
+
+ tcu: timer@10002000 {
+ compatible = "ingenic,jz4725b-tcu", "simple-mfd";
+ reg = <0x10002000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x10002000 0x1000>;
+
+ #clock-cells = <1>;
+
+ clocks = <&cgu JZ4725B_CLK_RTC>,
+ <&cgu JZ4725B_CLK_EXT>,
+ <&cgu JZ4725B_CLK_PCLK>,
+ <&cgu JZ4725B_CLK_TCU>;
+ clock-names = "rtc", "ext", "pclk", "tcu";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <23>, <22>, <21>;
+
+ watchdog: watchdog@0 {
+ compatible = "ingenic,jz4725b-watchdog", "ingenic,jz4740-watchdog";
+ reg = <0x0 0xc>;
+
+ clocks = <&tcu TCU_CLK_WDT>;
+ clock-names = "wdt";
+ };
+
+ pwm: pwm@60 {
+ compatible = "ingenic,jz4725b-pwm";
+ reg = <0x60 0x40>;
+
+ #pwm-cells = <3>;
+
+ clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER1>,
+ <&tcu TCU_CLK_TIMER2>, <&tcu TCU_CLK_TIMER3>,
+ <&tcu TCU_CLK_TIMER4>, <&tcu TCU_CLK_TIMER5>;
+ clock-names = "timer0", "timer1", "timer2",
+ "timer3", "timer4", "timer5";
+ };
+
+ ost: timer@e0 {
+ compatible = "ingenic,jz4725b-ost";
+ reg = <0xe0 0x20>;
+
+ clocks = <&tcu TCU_CLK_OST>;
+ clock-names = "ost";
+
+ interrupts = <15>;
+ };
+ };
+
+ rtc_dev: rtc@10003000 {
+ compatible = "ingenic,jz4725b-rtc", "ingenic,jz4740-rtc";
+ reg = <0x10003000 0x40>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <6>;
+
+ clocks = <&cgu JZ4725B_CLK_RTC>;
+ clock-names = "rtc";
+ };
+
+ pinctrl: pinctrl@10010000 {
+ compatible = "ingenic,jz4725b-pinctrl";
+ reg = <0x10010000 0x400>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpa: gpio@0 {
+ compatible = "ingenic,jz4725b-gpio";
+ reg = <0>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <16>;
+ };
+
+ gpb: gpio@1 {
+ compatible = "ingenic,jz4725b-gpio";
+ reg = <1>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 32 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <15>;
+ };
+
+ gpc: gpio@2 {
+ compatible = "ingenic,jz4725b-gpio";
+ reg = <2>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 64 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <14>;
+ };
+
+ gpd: gpio@3 {
+ compatible = "ingenic,jz4725b-gpio";
+ reg = <3>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 96 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <13>;
+ };
+ };
+
+ aic: audio-controller@10020000 {
+ compatible = "ingenic,jz4725b-i2s", "ingenic,jz4740-i2s";
+ reg = <0x10020000 0x38>;
+
+ #sound-dai-cells = <0>;
+
+ clocks = <&cgu JZ4725B_CLK_AIC>,
+ <&cgu JZ4725B_CLK_I2S>,
+ <&cgu JZ4725B_CLK_EXT>,
+ <&cgu JZ4725B_CLK_PLL_HALF>;
+ clock-names = "aic", "i2s", "ext", "pll half";
+
+ interrupt-parent = <&intc>;
+ interrupts = <10>;
+
+ dmas = <&dmac 25 0xffffffff>, <&dmac 24 0xffffffff>;
+ dma-names = "rx", "tx";
+ };
+
+ codec: audio-codec@100200a4 {
+ compatible = "ingenic,jz4725b-codec";
+ reg = <0x100200a4 0x8>;
+
+ #sound-dai-cells = <0>;
+
+ clocks = <&cgu JZ4725B_CLK_AIC>;
+ clock-names = "aic";
+ };
+
+ mmc0: mmc@10021000 {
+ compatible = "ingenic,jz4725b-mmc";
+ reg = <0x10021000 0x1000>;
+
+ clocks = <&cgu JZ4725B_CLK_MMC0>;
+ clock-names = "mmc";
+
+ interrupt-parent = <&intc>;
+ interrupts = <25>;
+
+ dmas = <&dmac 27 0xffffffff>, <&dmac 26 0xffffffff>;
+ dma-names = "rx", "tx";
+
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cap-sdio-irq;
+ };
+
+ mmc1: mmc@10022000 {
+ compatible = "ingenic,jz4725b-mmc";
+ reg = <0x10022000 0x1000>;
+
+ clocks = <&cgu JZ4725B_CLK_MMC1>;
+ clock-names = "mmc";
+
+ interrupt-parent = <&intc>;
+ interrupts = <24>;
+
+ dmas = <&dmac 31 0xffffffff>, <&dmac 30 0xffffffff>;
+ dma-names = "rx", "tx";
+
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cap-sdio-irq;
+ };
+
+ uart: serial@10030000 {
+ compatible = "ingenic,jz4725b-uart", "ingenic,jz4740-uart";
+ reg = <0x10030000 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <9>;
+
+ clocks = <&ext>, <&cgu JZ4725B_CLK_UART>;
+ clock-names = "baud", "module";
+ };
+
+ adc: adc@10070000 {
+ compatible = "ingenic,jz4725b-adc";
+ #io-channel-cells = <1>;
+
+ reg = <0x10070000 0x30>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x10070000 0x30>;
+
+ clocks = <&cgu JZ4725B_CLK_ADC>;
+ clock-names = "adc";
+
+ interrupt-parent = <&intc>;
+ interrupts = <18>;
+ };
+
+ nemc: memory-controller@13010000 {
+ compatible = "ingenic,jz4725b-nemc", "ingenic,jz4740-nemc";
+ reg = <0x13010000 0x10000>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <1 0 0x18000000 0x4000000>, <2 0 0x14000000 0x4000000>,
+ <3 0 0x0c000000 0x4000000>, <4 0 0x08000000 0x4000000>;
+
+ clocks = <&cgu JZ4725B_CLK_MCLK>;
+ };
+
+ dmac: dma-controller@13020000 {
+ compatible = "ingenic,jz4725b-dma";
+ reg = <0x13020000 0xd8>, <0x13020300 0x14>;
+
+ #dma-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <29>;
+
+ clocks = <&cgu JZ4725B_CLK_DMA>;
+ };
+
+ udc: usb@13040000 {
+ compatible = "ingenic,jz4725b-musb", "ingenic,jz4740-musb";
+ reg = <0x13040000 0x10000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <27>;
+ interrupt-names = "mc";
+
+ clocks = <&cgu JZ4725B_CLK_UDC>;
+ clock-names = "udc";
+ };
+
+ lcd: lcd-controller@13050000 {
+ compatible = "ingenic,jz4725b-lcd";
+ reg = <0x13050000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <31>;
+
+ clocks = <&cgu JZ4725B_CLK_LCD>;
+ clock-names = "lcd_pclk";
+
+ lcd_ports: ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@8 {
+ reg = <8>;
+
+ ipu_output: endpoint {
+ remote-endpoint = <&ipu_input>;
+ };
+ };
+ };
+ };
+
+ ipu: ipu@13080000 {
+ compatible = "ingenic,jz4725b-ipu";
+ reg = <0x13080000 0x64>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <30>;
+
+ clocks = <&cgu JZ4725B_CLK_IPU>;
+ clock-names = "ipu";
+
+ port {
+ ipu_input: endpoint {
+ remote-endpoint = <&ipu_output>;
+ };
+ };
+ };
+
+ bch: ecc-controller@130d0000 {
+ compatible = "ingenic,jz4725b-bch";
+ reg = <0x130d0000 0x44>;
+
+ clocks = <&cgu JZ4725B_CLK_BCH>;
+ };
+
+ rom: memory@1fc00000 {
+ compatible = "mtd-rom";
+ probe-type = "map_rom";
+ reg = <0x1fc00000 0x2000>;
+
+ bank-width = <4>;
+ device-width = <1>;
+ };
+};
diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
new file mode 100644
index 000000000..eee523678
--- /dev/null
+++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <dt-bindings/clock/jz4740-cgu.h>
+#include <dt-bindings/clock/ingenic,tcu.h>
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ingenic,jz4740";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "ingenic,xburst-mxu1.0";
+ reg = <0>;
+
+ clocks = <&cgu JZ4740_CLK_CCLK>;
+ clock-names = "cpu";
+ };
+ };
+
+ cpuintc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ intc: interrupt-controller@10001000 {
+ compatible = "ingenic,jz4740-intc";
+ reg = <0x10001000 0x14>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ };
+
+ ext: ext {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+
+ rtc: rtc {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ cgu: jz4740-cgu@10000000 {
+ compatible = "ingenic,jz4740-cgu";
+ reg = <0x10000000 0x100>;
+
+ clocks = <&ext>, <&rtc>;
+ clock-names = "ext", "rtc";
+
+ #clock-cells = <1>;
+ };
+
+ tcu: timer@10002000 {
+ compatible = "ingenic,jz4740-tcu", "simple-mfd";
+ reg = <0x10002000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x10002000 0x1000>;
+
+ #clock-cells = <1>;
+
+ clocks = <&cgu JZ4740_CLK_RTC>,
+ <&cgu JZ4740_CLK_EXT>,
+ <&cgu JZ4740_CLK_PCLK>,
+ <&cgu JZ4740_CLK_TCU>;
+ clock-names = "rtc", "ext", "pclk", "tcu";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <23 22 21>;
+
+ watchdog: watchdog@0 {
+ compatible = "ingenic,jz4740-watchdog";
+ reg = <0x0 0xc>;
+
+ clocks = <&tcu TCU_CLK_WDT>;
+ clock-names = "wdt";
+ };
+
+ pwm: pwm@40 {
+ compatible = "ingenic,jz4740-pwm";
+ reg = <0x40 0x80>;
+
+ #pwm-cells = <3>;
+
+ clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER1>,
+ <&tcu TCU_CLK_TIMER2>, <&tcu TCU_CLK_TIMER3>,
+ <&tcu TCU_CLK_TIMER4>, <&tcu TCU_CLK_TIMER5>,
+ <&tcu TCU_CLK_TIMER6>, <&tcu TCU_CLK_TIMER7>;
+ clock-names = "timer0", "timer1", "timer2", "timer3",
+ "timer4", "timer5", "timer6", "timer7";
+ };
+ };
+
+ rtc_dev: rtc@10003000 {
+ compatible = "ingenic,jz4740-rtc";
+ reg = <0x10003000 0x40>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <15>;
+
+ clocks = <&cgu JZ4740_CLK_RTC>;
+ clock-names = "rtc";
+ };
+
+ pinctrl: pin-controller@10010000 {
+ compatible = "ingenic,jz4740-pinctrl";
+ reg = <0x10010000 0x400>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpa: gpio@0 {
+ compatible = "ingenic,jz4740-gpio";
+ reg = <0>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <28>;
+ };
+
+ gpb: gpio@1 {
+ compatible = "ingenic,jz4740-gpio";
+ reg = <1>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 32 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <27>;
+ };
+
+ gpc: gpio@2 {
+ compatible = "ingenic,jz4740-gpio";
+ reg = <2>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 64 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <26>;
+ };
+
+ gpd: gpio@3 {
+ compatible = "ingenic,jz4740-gpio";
+ reg = <3>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 96 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <25>;
+ };
+ };
+
+ aic: audio-controller@10020000 {
+ compatible = "ingenic,jz4740-i2s";
+ reg = <0x10020000 0x38>;
+
+ #sound-dai-cells = <0>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <18>;
+
+ clocks = <&cgu JZ4740_CLK_AIC>,
+ <&cgu JZ4740_CLK_I2S>,
+ <&cgu JZ4740_CLK_EXT>,
+ <&cgu JZ4740_CLK_PLL_HALF>;
+ clock-names = "aic", "i2s", "ext", "pll half";
+
+ dmas = <&dmac 25 0xffffffff>, <&dmac 24 0xffffffff>;
+ dma-names = "rx", "tx";
+ };
+
+ codec: audio-codec@100200a4 {
+ compatible = "ingenic,jz4740-codec";
+ reg = <0x10020080 0x8>;
+
+ #sound-dai-cells = <0>;
+
+ clocks = <&cgu JZ4740_CLK_AIC>;
+ clock-names = "aic";
+ };
+
+ mmc: mmc@10021000 {
+ compatible = "ingenic,jz4740-mmc";
+ reg = <0x10021000 0x1000>;
+
+ clocks = <&cgu JZ4740_CLK_MMC>;
+ clock-names = "mmc";
+
+ interrupt-parent = <&intc>;
+ interrupts = <14>;
+
+ dmas = <&dmac 27 0xffffffff>, <&dmac 26 0xffffffff>;
+ dma-names = "rx", "tx";
+
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cap-sdio-irq;
+ };
+
+ uart0: serial@10030000 {
+ compatible = "ingenic,jz4740-uart";
+ reg = <0x10030000 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <9>;
+
+ clocks = <&ext>, <&cgu JZ4740_CLK_UART0>;
+ clock-names = "baud", "module";
+ };
+
+ uart1: serial@10031000 {
+ compatible = "ingenic,jz4740-uart";
+ reg = <0x10031000 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <8>;
+
+ clocks = <&ext>, <&cgu JZ4740_CLK_UART1>;
+ clock-names = "baud", "module";
+ };
+
+ adc: adc@10070000 {
+ compatible = "ingenic,jz4740-adc";
+ reg = <0x10070000 0x30>;
+ #io-channel-cells = <1>;
+
+ clocks = <&cgu JZ4740_CLK_ADC>;
+ clock-names = "adc";
+
+ interrupt-parent = <&intc>;
+ interrupts = <12>;
+ };
+
+ nemc: memory-controller@13010000 {
+ compatible = "ingenic,jz4740-nemc";
+ reg = <0x13010000 0x54>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <1 0 0x18000000 0x4000000>,
+ <2 0 0x14000000 0x4000000>,
+ <3 0 0x0c000000 0x4000000>,
+ <4 0 0x08000000 0x4000000>;
+
+ clocks = <&cgu JZ4740_CLK_MCLK>;
+ };
+
+ ecc: ecc-controller@13010100 {
+ compatible = "ingenic,jz4740-ecc";
+ reg = <0x13010100 0x2C>;
+
+ clocks = <&cgu JZ4740_CLK_MCLK>;
+ };
+
+ dmac: dma-controller@13020000 {
+ compatible = "ingenic,jz4740-dma";
+ reg = <0x13020000 0xbc>, <0x13020300 0x14>;
+ #dma-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <20>;
+
+ clocks = <&cgu JZ4740_CLK_DMA>;
+ };
+
+ uhc: uhc@13030000 {
+ compatible = "ingenic,jz4740-ohci", "generic-ohci";
+ reg = <0x13030000 0x1000>;
+
+ clocks = <&cgu JZ4740_CLK_UHC>;
+ assigned-clocks = <&cgu JZ4740_CLK_UHC>;
+ assigned-clock-rates = <48000000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <3>;
+
+ status = "disabled";
+ };
+
+ udc: usb@13040000 {
+ compatible = "ingenic,jz4740-musb";
+ reg = <0x13040000 0x10000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <24>;
+ interrupt-names = "mc";
+
+ clocks = <&cgu JZ4740_CLK_UDC>;
+ clock-names = "udc";
+ };
+
+ lcd: lcd-controller@13050000 {
+ compatible = "ingenic,jz4740-lcd";
+ reg = <0x13050000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <30>;
+
+ clocks = <&cgu JZ4740_CLK_LCD_PCLK>, <&cgu JZ4740_CLK_LCD>;
+ clock-names = "lcd_pclk", "lcd";
+ };
+};
diff --git a/arch/mips/boot/dts/ingenic/jz4770.dtsi b/arch/mips/boot/dts/ingenic/jz4770.dtsi
new file mode 100644
index 000000000..018721a9e
--- /dev/null
+++ b/arch/mips/boot/dts/ingenic/jz4770.dtsi
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <dt-bindings/clock/jz4770-cgu.h>
+#include <dt-bindings/clock/ingenic,tcu.h>
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ingenic,jz4770";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "ingenic,xburst-fpu1.0-mxu1.1";
+ reg = <0>;
+
+ clocks = <&cgu JZ4770_CLK_CCLK>;
+ clock-names = "cpu";
+ };
+ };
+
+ cpuintc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ intc: interrupt-controller@10001000 {
+ compatible = "ingenic,jz4770-intc";
+ reg = <0x10001000 0x40>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ };
+
+ ext: ext {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+
+ osc32k: osc32k {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ cgu: jz4770-cgu@10000000 {
+ compatible = "ingenic,jz4770-cgu", "simple-mfd";
+ reg = <0x10000000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x10000000 0x100>;
+
+ clocks = <&ext>, <&osc32k>;
+ clock-names = "ext", "osc32k";
+
+ #clock-cells = <1>;
+
+ otg_phy: usb-phy@3c {
+ compatible = "ingenic,jz4770-phy";
+ reg = <0x3c 0x10>;
+
+ clocks = <&cgu JZ4770_CLK_OTG_PHY>;
+
+ #phy-cells = <0>;
+ };
+ };
+
+ tcu: timer@10002000 {
+ compatible = "ingenic,jz4770-tcu", "simple-mfd";
+ reg = <0x10002000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x10002000 0x1000>;
+
+ #clock-cells = <1>;
+
+ clocks = <&cgu JZ4770_CLK_RTC>,
+ <&cgu JZ4770_CLK_EXT>,
+ <&cgu JZ4770_CLK_PCLK>;
+ clock-names = "rtc", "ext", "pclk";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <27 26 25>;
+
+ watchdog: watchdog@0 {
+ compatible = "ingenic,jz4770-watchdog",
+ "ingenic,jz4740-watchdog";
+ reg = <0x0 0xc>;
+
+ clocks = <&tcu TCU_CLK_WDT>;
+ clock-names = "wdt";
+ };
+
+ pwm: pwm@40 {
+ compatible = "ingenic,jz4770-pwm", "ingenic,jz4740-pwm";
+ reg = <0x40 0x80>;
+
+ #pwm-cells = <3>;
+
+ clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER1>,
+ <&tcu TCU_CLK_TIMER2>, <&tcu TCU_CLK_TIMER3>,
+ <&tcu TCU_CLK_TIMER4>, <&tcu TCU_CLK_TIMER5>,
+ <&tcu TCU_CLK_TIMER6>, <&tcu TCU_CLK_TIMER7>;
+ clock-names = "timer0", "timer1", "timer2", "timer3",
+ "timer4", "timer5", "timer6", "timer7";
+ };
+
+ ost: timer@e0 {
+ compatible = "ingenic,jz4770-ost";
+ reg = <0xe0 0x20>;
+
+ clocks = <&tcu TCU_CLK_OST>;
+ clock-names = "ost";
+
+ interrupts = <15>;
+ };
+ };
+
+ rtc: rtc@10003000 {
+ compatible = "ingenic,jz4770-rtc", "ingenic,jz4760-rtc";
+ reg = <0x10003000 0x40>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <32>;
+ };
+
+ pinctrl: pin-controller@10010000 {
+ compatible = "ingenic,jz4770-pinctrl";
+ reg = <0x10010000 0x600>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpa: gpio@0 {
+ compatible = "ingenic,jz4770-gpio";
+ reg = <0>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <17>;
+ };
+
+ gpb: gpio@1 {
+ compatible = "ingenic,jz4770-gpio";
+ reg = <1>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 32 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <16>;
+ };
+
+ gpc: gpio@2 {
+ compatible = "ingenic,jz4770-gpio";
+ reg = <2>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 64 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <15>;
+ };
+
+ gpd: gpio@3 {
+ compatible = "ingenic,jz4770-gpio";
+ reg = <3>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 96 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <14>;
+ };
+
+ gpe: gpio@4 {
+ compatible = "ingenic,jz4770-gpio";
+ reg = <4>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 128 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <13>;
+ };
+
+ gpf: gpio@5 {
+ compatible = "ingenic,jz4770-gpio";
+ reg = <5>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 160 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <12>;
+ };
+ };
+
+ aic: audio-controller@10020000 {
+ compatible = "ingenic,jz4770-i2s";
+ reg = <0x10020000 0x94>;
+
+ #sound-dai-cells = <0>;
+
+ clocks = <&cgu JZ4770_CLK_AIC>, <&cgu JZ4770_CLK_I2S>,
+ <&cgu JZ4770_CLK_EXT>, <&cgu JZ4770_CLK_PLL0>;
+ clock-names = "aic", "i2s", "ext", "pll half";
+
+ interrupt-parent = <&intc>;
+ interrupts = <34>;
+
+ dmas = <&dmac0 25 0xffffffff>, <&dmac0 24 0xffffffff>;
+ dma-names = "rx", "tx";
+ };
+
+ codec: audio-codec@100200a0 {
+ compatible = "ingenic,jz4770-codec";
+ reg = <0x100200a4 0x8>;
+
+ #sound-dai-cells = <0>;
+
+ clocks = <&cgu JZ4770_CLK_AIC>;
+ clock-names = "aic";
+ };
+
+ mmc0: mmc@10021000 {
+ compatible = "ingenic,jz4770-mmc", "ingenic,jz4760-mmc";
+ reg = <0x10021000 0x1000>;
+
+ clocks = <&cgu JZ4770_CLK_MMC0>;
+ clock-names = "mmc";
+
+ interrupt-parent = <&intc>;
+ interrupts = <37>;
+
+ dmas = <&dmac1 27 0xffffffff>, <&dmac1 26 0xffffffff>;
+ dma-names = "rx", "tx";
+
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cap-sdio-irq;
+
+ status = "disabled";
+ };
+
+ mmc1: mmc@10022000 {
+ compatible = "ingenic,jz4770-mmc", "ingenic,jz4760-mmc";
+ reg = <0x10022000 0x1000>;
+
+ clocks = <&cgu JZ4770_CLK_MMC1>;
+ clock-names = "mmc";
+
+ interrupt-parent = <&intc>;
+ interrupts = <36>;
+
+ dmas = <&dmac1 31 0xffffffff>, <&dmac1 30 0xffffffff>;
+ dma-names = "rx", "tx";
+
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cap-sdio-irq;
+
+ status = "disabled";
+ };
+
+ mmc2: mmc@10023000 {
+ compatible = "ingenic,jz4770-mmc", "ingenic,jz4760-mmc";
+ reg = <0x10023000 0x1000>;
+
+ clocks = <&cgu JZ4770_CLK_MMC2>;
+ clock-names = "mmc";
+
+ interrupt-parent = <&intc>;
+ interrupts = <35>;
+
+ dmas = <&dmac1 37 0xffffffff>, <&dmac1 36 0xffffffff>;
+ dma-names = "rx", "tx";
+
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cap-sdio-irq;
+
+ status = "disabled";
+ };
+
+ uart0: serial@10030000 {
+ compatible = "ingenic,jz4770-uart";
+ reg = <0x10030000 0x100>;
+
+ clocks = <&ext>, <&cgu JZ4770_CLK_UART0>;
+ clock-names = "baud", "module";
+
+ interrupt-parent = <&intc>;
+ interrupts = <5>;
+
+ status = "disabled";
+ };
+
+ uart1: serial@10031000 {
+ compatible = "ingenic,jz4770-uart";
+ reg = <0x10031000 0x100>;
+
+ clocks = <&ext>, <&cgu JZ4770_CLK_UART1>;
+ clock-names = "baud", "module";
+
+ interrupt-parent = <&intc>;
+ interrupts = <4>;
+
+ status = "disabled";
+ };
+
+ uart2: serial@10032000 {
+ compatible = "ingenic,jz4770-uart";
+ reg = <0x10032000 0x100>;
+
+ clocks = <&ext>, <&cgu JZ4770_CLK_UART2>;
+ clock-names = "baud", "module";
+
+ interrupt-parent = <&intc>;
+ interrupts = <3>;
+
+ status = "disabled";
+ };
+
+ uart3: serial@10033000 {
+ compatible = "ingenic,jz4770-uart";
+ reg = <0x10033000 0x100>;
+
+ clocks = <&ext>, <&cgu JZ4770_CLK_UART3>;
+ clock-names = "baud", "module";
+
+ interrupt-parent = <&intc>;
+ interrupts = <2>;
+
+ status = "disabled";
+ };
+
+ adc: adc@10070000 {
+ compatible = "ingenic,jz4770-adc";
+ reg = <0x10070000 0x30>;
+
+ #io-channel-cells = <1>;
+
+ clocks = <&cgu JZ4770_CLK_ADC>;
+ clock-names = "adc";
+
+ interrupt-parent = <&intc>;
+ interrupts = <18>;
+ };
+
+ gpu: gpu@13040000 {
+ compatible = "vivante,gc";
+ reg = <0x13040000 0x10000>;
+
+ clocks = <&cgu JZ4770_CLK_GPU>,
+ <&cgu JZ4770_CLK_GPU>,
+ <&cgu JZ4770_CLK_GPU>;
+ clock-names = "bus", "core", "shader";
+
+ interrupt-parent = <&intc>;
+ interrupts = <6>;
+ };
+
+ lcd: lcd-controller@13050000 {
+ compatible = "ingenic,jz4770-lcd";
+ reg = <0x13050000 0x300>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <31>;
+
+ clocks = <&cgu JZ4770_CLK_LPCLK_MUX>;
+ clock-names = "lcd_pclk";
+ };
+
+ dmac0: dma-controller@13420000 {
+ compatible = "ingenic,jz4770-dma";
+ reg = <0x13420000 0xC0>, <0x13420300 0x20>;
+
+ #dma-cells = <2>;
+
+ clocks = <&cgu JZ4770_CLK_DMA>;
+ interrupt-parent = <&intc>;
+ interrupts = <24>;
+ };
+
+ dmac1: dma-controller@13420100 {
+ compatible = "ingenic,jz4770-dma";
+ reg = <0x13420100 0xC0>, <0x13420400 0x20>;
+
+ #dma-cells = <2>;
+
+ clocks = <&cgu JZ4770_CLK_DMA>;
+ interrupt-parent = <&intc>;
+ interrupts = <23>;
+ };
+
+ uhc: uhc@13430000 {
+ compatible = "generic-ohci";
+ reg = <0x13430000 0x1000>;
+
+ clocks = <&cgu JZ4770_CLK_UHC>, <&cgu JZ4770_CLK_UHC_PHY>;
+ assigned-clocks = <&cgu JZ4770_CLK_UHC>;
+ assigned-clock-rates = <48000000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <20>;
+
+ status = "disabled";
+ };
+
+ usb_otg: usb@13440000 {
+ compatible = "ingenic,jz4770-musb";
+ reg = <0x13440000 0x10000>;
+
+ clocks = <&cgu JZ4770_CLK_OTG>;
+ clock-names = "udc";
+
+ interrupt-parent = <&intc>;
+ interrupts = <21>;
+ interrupt-names = "mc";
+
+ phys = <&otg_phy>;
+
+ usb-role-switch;
+ };
+
+ rom: memory@1fc00000 {
+ compatible = "mtd-rom";
+ probe-type = "map_rom";
+ reg = <0x1fc00000 0x2000>;
+
+ bank-width = <4>;
+ device-width = <1>;
+ };
+};
diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi
new file mode 100644
index 000000000..830e5dd35
--- /dev/null
+++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <dt-bindings/clock/jz4780-cgu.h>
+#include <dt-bindings/clock/ingenic,tcu.h>
+#include <dt-bindings/dma/jz4780-dma.h>
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ingenic,jz4780";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "ingenic,xburst-fpu1.0-mxu1.1";
+ reg = <0>;
+
+ clocks = <&cgu JZ4780_CLK_CPU>;
+ clock-names = "cpu";
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "ingenic,xburst-fpu1.0-mxu1.1";
+ reg = <1>;
+
+ clocks = <&cgu JZ4780_CLK_CORE1>;
+ clock-names = "cpu";
+ };
+ };
+
+ cpuintc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ intc: interrupt-controller@10001000 {
+ compatible = "ingenic,jz4780-intc";
+ reg = <0x10001000 0x50>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ };
+
+ ext: ext {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+
+ rtc: rtc {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ cgu: jz4780-cgu@10000000 {
+ compatible = "ingenic,jz4780-cgu";
+ reg = <0x10000000 0x100>;
+
+ clocks = <&ext>, <&rtc>;
+ clock-names = "ext", "rtc";
+
+ #clock-cells = <1>;
+ };
+
+ tcu: timer@10002000 {
+ compatible = "ingenic,jz4780-tcu",
+ "ingenic,jz4770-tcu",
+ "simple-mfd";
+ reg = <0x10002000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x10002000 0x1000>;
+
+ #clock-cells = <1>;
+
+ clocks = <&cgu JZ4780_CLK_RTCLK>,
+ <&cgu JZ4780_CLK_EXCLK>,
+ <&cgu JZ4780_CLK_PCLK>;
+ clock-names = "rtc", "ext", "pclk";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <27 26 25>;
+
+ watchdog: watchdog@0 {
+ compatible = "ingenic,jz4780-watchdog";
+ reg = <0x0 0xc>;
+
+ clocks = <&tcu TCU_CLK_WDT>;
+ clock-names = "wdt";
+ };
+
+ pwm: pwm@40 {
+ compatible = "ingenic,jz4780-pwm", "ingenic,jz4740-pwm";
+ reg = <0x40 0x80>;
+
+ #pwm-cells = <3>;
+
+ clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER1>,
+ <&tcu TCU_CLK_TIMER2>, <&tcu TCU_CLK_TIMER3>,
+ <&tcu TCU_CLK_TIMER4>, <&tcu TCU_CLK_TIMER5>,
+ <&tcu TCU_CLK_TIMER6>, <&tcu TCU_CLK_TIMER7>;
+ clock-names = "timer0", "timer1", "timer2", "timer3",
+ "timer4", "timer5", "timer6", "timer7";
+ };
+
+ ost: timer@e0 {
+ compatible = "ingenic,jz4780-ost", "ingenic,jz4770-ost";
+ reg = <0xe0 0x20>;
+
+ clocks = <&tcu TCU_CLK_OST>;
+ clock-names = "ost";
+
+ interrupts = <15>;
+ };
+ };
+
+ rtc_dev: rtc@10003000 {
+ compatible = "ingenic,jz4780-rtc";
+ reg = <0x10003000 0x4c>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <32>;
+
+ clocks = <&cgu JZ4780_CLK_RTCLK>;
+ clock-names = "rtc";
+ };
+
+ pinctrl: pin-controller@10010000 {
+ compatible = "ingenic,jz4780-pinctrl";
+ reg = <0x10010000 0x600>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpa: gpio@0 {
+ compatible = "ingenic,jz4780-gpio";
+ reg = <0>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <17>;
+ };
+
+ gpb: gpio@1 {
+ compatible = "ingenic,jz4780-gpio";
+ reg = <1>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 32 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <16>;
+ };
+
+ gpc: gpio@2 {
+ compatible = "ingenic,jz4780-gpio";
+ reg = <2>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 64 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <15>;
+ };
+
+ gpd: gpio@3 {
+ compatible = "ingenic,jz4780-gpio";
+ reg = <3>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 96 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <14>;
+ };
+
+ gpe: gpio@4 {
+ compatible = "ingenic,jz4780-gpio";
+ reg = <4>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 128 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <13>;
+ };
+
+ gpf: gpio@5 {
+ compatible = "ingenic,jz4780-gpio";
+ reg = <5>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 160 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <12>;
+ };
+ };
+
+ spi_gpio {
+ compatible = "spi-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ num-chipselects = <2>;
+
+ gpio-miso = <&gpe 14 0>;
+ gpio-sck = <&gpe 15 0>;
+ gpio-mosi = <&gpe 17 0>;
+ cs-gpios = <&gpe 16 0>, <&gpe 18 0>;
+
+ spidev@0 {
+ compatible = "spidev";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ uart0: serial@10030000 {
+ compatible = "ingenic,jz4780-uart";
+ reg = <0x10030000 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <51>;
+
+ clocks = <&ext>, <&cgu JZ4780_CLK_UART0>;
+ clock-names = "baud", "module";
+
+ status = "disabled";
+ };
+
+ uart1: serial@10031000 {
+ compatible = "ingenic,jz4780-uart";
+ reg = <0x10031000 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <50>;
+
+ clocks = <&ext>, <&cgu JZ4780_CLK_UART1>;
+ clock-names = "baud", "module";
+
+ status = "disabled";
+ };
+
+ uart2: serial@10032000 {
+ compatible = "ingenic,jz4780-uart";
+ reg = <0x10032000 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <49>;
+
+ clocks = <&ext>, <&cgu JZ4780_CLK_UART2>;
+ clock-names = "baud", "module";
+
+ status = "disabled";
+ };
+
+ uart3: serial@10033000 {
+ compatible = "ingenic,jz4780-uart";
+ reg = <0x10033000 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <48>;
+
+ clocks = <&ext>, <&cgu JZ4780_CLK_UART3>;
+ clock-names = "baud", "module";
+
+ status = "disabled";
+ };
+
+ uart4: serial@10034000 {
+ compatible = "ingenic,jz4780-uart";
+ reg = <0x10034000 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <34>;
+
+ clocks = <&ext>, <&cgu JZ4780_CLK_UART4>;
+ clock-names = "baud", "module";
+
+ status = "disabled";
+ };
+
+ i2c0: i2c@10050000 {
+ compatible = "ingenic,jz4780-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <0x10050000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <60>;
+
+ clocks = <&cgu JZ4780_CLK_SMB0>;
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c0_data>;
+
+ status = "disabled";
+ };
+
+ i2c1: i2c@10051000 {
+ compatible = "ingenic,jz4780-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10051000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <59>;
+
+ clocks = <&cgu JZ4780_CLK_SMB1>;
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c1_data>;
+
+ status = "disabled";
+ };
+
+ i2c2: i2c@10052000 {
+ compatible = "ingenic,jz4780-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10052000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <58>;
+
+ clocks = <&cgu JZ4780_CLK_SMB2>;
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c2_data>;
+
+ status = "disabled";
+ };
+
+ i2c3: i2c@10053000 {
+ compatible = "ingenic,jz4780-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10053000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <57>;
+
+ clocks = <&cgu JZ4780_CLK_SMB3>;
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c3_data>;
+
+ status = "disabled";
+ };
+
+ i2c4: i2c@10054000 {
+ compatible = "ingenic,jz4780-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10054000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <56>;
+
+ clocks = <&cgu JZ4780_CLK_SMB4>;
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c4_data>;
+
+ status = "disabled";
+ };
+
+ nemc: nemc@13410000 {
+ compatible = "ingenic,jz4780-nemc", "simple-mfd";
+ reg = <0x13410000 0x10000>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0x13410000 0x10000>,
+ <1 0 0x1b000000 0x1000000>,
+ <2 0 0x1a000000 0x1000000>,
+ <3 0 0x19000000 0x1000000>,
+ <4 0 0x18000000 0x1000000>,
+ <5 0 0x17000000 0x1000000>,
+ <6 0 0x16000000 0x1000000>;
+
+ clocks = <&cgu JZ4780_CLK_NEMC>;
+
+ status = "disabled";
+
+ efuse: efuse@d0 {
+ reg = <0 0xd0 0x30>;
+ compatible = "ingenic,jz4780-efuse";
+
+ clocks = <&cgu JZ4780_CLK_AHB2>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ eth0_addr: eth-mac-addr@22 {
+ reg = <0x22 0x6>;
+ };
+ };
+ };
+
+ dma: dma@13420000 {
+ compatible = "ingenic,jz4780-dma";
+ reg = <0x13420000 0x400>, <0x13421000 0x40>;
+ #dma-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <10>;
+
+ clocks = <&cgu JZ4780_CLK_PDMA>;
+ };
+
+ mmc0: mmc@13450000 {
+ compatible = "ingenic,jz4780-mmc";
+ reg = <0x13450000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <37>;
+
+ clocks = <&cgu JZ4780_CLK_MSC0>;
+ clock-names = "mmc";
+
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cap-sdio-irq;
+ dmas = <&dma JZ4780_DMA_MSC0_RX 0xffffffff>,
+ <&dma JZ4780_DMA_MSC0_TX 0xffffffff>;
+ dma-names = "rx", "tx";
+
+ status = "disabled";
+ };
+
+ mmc1: mmc@13460000 {
+ compatible = "ingenic,jz4780-mmc";
+ reg = <0x13460000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <36>;
+
+ clocks = <&cgu JZ4780_CLK_MSC1>;
+ clock-names = "mmc";
+
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cap-sdio-irq;
+ dmas = <&dma JZ4780_DMA_MSC1_RX 0xffffffff>,
+ <&dma JZ4780_DMA_MSC1_TX 0xffffffff>;
+ dma-names = "rx", "tx";
+
+ status = "disabled";
+ };
+
+ bch: bch@134d0000 {
+ compatible = "ingenic,jz4780-bch";
+ reg = <0x134d0000 0x10000>;
+
+ clocks = <&cgu JZ4780_CLK_BCH>;
+
+ status = "disabled";
+ };
+};
diff --git a/arch/mips/boot/dts/ingenic/qi_lb60.dts b/arch/mips/boot/dts/ingenic/qi_lb60.dts
new file mode 100644
index 000000000..ba0218971
--- /dev/null
+++ b/arch/mips/boot/dts/ingenic/qi_lb60.dts
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include "jz4740.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/iio/adc/ingenic,adc.h>
+#include <dt-bindings/clock/ingenic,tcu.h>
+#include <dt-bindings/input/input.h>
+
+#define KEY_QI_QI KEY_F13
+#define KEY_QI_UPRED KEY_RIGHTALT
+#define KEY_QI_VOLUP KEY_VOLUMEUP
+#define KEY_QI_VOLDOWN KEY_VOLUMEDOWN
+#define KEY_QI_FN KEY_LEFTCTRL
+
+/ {
+ compatible = "qi,lb60", "ingenic,jz4740";
+ model = "Ben Nanonote";
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x2000000>;
+ };
+
+ chosen {
+ stdout-path = &uart0;
+ };
+
+ vcc: regulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc";
+
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ mmc_power: regulator@1 {
+ compatible = "regulator-fixed";
+ regulator-name = "mmc_vcc";
+ gpio = <&gpd 2 0>;
+
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ amp_supply: regulator@2 {
+ compatible = "regulator-fixed";
+ regulator-name = "amp_supply";
+ gpio = <&gpd 4 0>;
+ enable-active-high;
+
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ amp: analog-amplifier {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&gpb 29 GPIO_ACTIVE_HIGH>;
+ VCC-supply = <&amp_supply>;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+
+ simple-audio-card,name = "QI LB60";
+ simple-audio-card,format = "i2s";
+
+ simple-audio-card,widgets =
+ "Speaker", "Speaker",
+ "Microphone", "Mic";
+ simple-audio-card,routing =
+ "MIC", "Mic",
+ "Speaker", "OUTL",
+ "Speaker", "OUTR",
+ "INL", "LOUT",
+ "INR", "ROUT";
+
+ simple-audio-card,aux-devs = <&amp>;
+
+ simple-audio-card,bitclock-master = <&dai_codec>;
+ simple-audio-card,frame-master = <&dai_codec>;
+
+ dai_cpu: simple-audio-card,cpu {
+ sound-dai = <&aic>;
+ };
+
+ dai_codec: simple-audio-card,codec {
+ sound-dai = <&codec>;
+ };
+ };
+
+ keys {
+ compatible = "gpio-keys";
+
+ key {
+ label = "Power";
+ wakeup-source;
+ linux,code = <KEY_POWER>;
+ gpios = <&gpd 29 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ keyboard {
+ compatible = "gpio-matrix-keypad";
+
+ col-scan-delay-us = <10>;
+ debounce-delay-ms = <10>;
+ wakeup-source;
+
+ row-gpios = <&gpd 18 0>, <&gpd 19 0>, <&gpd 20 0>, <&gpd 21 0>,
+ <&gpd 22 0>, <&gpd 23 0>, <&gpd 24 0>, <&gpd 26 0>;
+ col-gpios = <&gpc 10 0>, <&gpc 11 0>, <&gpc 12 0>, <&gpc 13 0>,
+ <&gpc 14 0>, <&gpc 15 0>, <&gpc 16 0>, <&gpc 17 0>;
+ gpio-activelow;
+
+ linux,keymap =
+ <MATRIX_KEY(0, 0, KEY_F1)>, /* S2 */
+ <MATRIX_KEY(0, 1, KEY_F2)>, /* S3 */
+ <MATRIX_KEY(0, 2, KEY_F3)>, /* S4 */
+ <MATRIX_KEY(0, 3, KEY_F4)>, /* S5 */
+ <MATRIX_KEY(0, 4, KEY_F5)>, /* S6 */
+ <MATRIX_KEY(0, 5, KEY_F6)>, /* S7 */
+ <MATRIX_KEY(0, 6, KEY_F7)>, /* S8 */
+
+ <MATRIX_KEY(1, 0, KEY_Q)>, /* S10 */
+ <MATRIX_KEY(1, 1, KEY_W)>, /* S11 */
+ <MATRIX_KEY(1, 2, KEY_E)>, /* S12 */
+ <MATRIX_KEY(1, 3, KEY_R)>, /* S13 */
+ <MATRIX_KEY(1, 4, KEY_T)>, /* S14 */
+ <MATRIX_KEY(1, 5, KEY_Y)>, /* S15 */
+ <MATRIX_KEY(1, 6, KEY_U)>, /* S16 */
+ <MATRIX_KEY(1, 7, KEY_I)>, /* S17 */
+ <MATRIX_KEY(2, 0, KEY_A)>, /* S18 */
+ <MATRIX_KEY(2, 1, KEY_S)>, /* S19 */
+ <MATRIX_KEY(2, 2, KEY_D)>, /* S20 */
+ <MATRIX_KEY(2, 3, KEY_F)>, /* S21 */
+ <MATRIX_KEY(2, 4, KEY_G)>, /* S22 */
+ <MATRIX_KEY(2, 5, KEY_H)>, /* S23 */
+ <MATRIX_KEY(2, 6, KEY_J)>, /* S24 */
+ <MATRIX_KEY(2, 7, KEY_K)>, /* S25 */
+ <MATRIX_KEY(3, 0, KEY_ESC)>, /* S26 */
+ <MATRIX_KEY(3, 1, KEY_Z)>, /* S27 */
+ <MATRIX_KEY(3, 2, KEY_X)>, /* S28 */
+ <MATRIX_KEY(3, 3, KEY_C)>, /* S29 */
+ <MATRIX_KEY(3, 4, KEY_V)>, /* S30 */
+ <MATRIX_KEY(3, 5, KEY_B)>, /* S31 */
+ <MATRIX_KEY(3, 6, KEY_N)>, /* S32 */
+ <MATRIX_KEY(3, 7, KEY_M)>, /* S33 */
+ <MATRIX_KEY(4, 0, KEY_TAB)>, /* S34 */
+ <MATRIX_KEY(4, 1, KEY_CAPSLOCK)>, /* S35 */
+ <MATRIX_KEY(4, 2, KEY_BACKSLASH)>, /* S36 */
+ <MATRIX_KEY(4, 3, KEY_APOSTROPHE)>, /* S37 */
+ <MATRIX_KEY(4, 4, KEY_COMMA)>, /* S38 */
+ <MATRIX_KEY(4, 5, KEY_DOT)>, /* S39 */
+ <MATRIX_KEY(4, 6, KEY_SLASH)>, /* S40 */
+ <MATRIX_KEY(4, 7, KEY_UP)>, /* S41 */
+ <MATRIX_KEY(5, 0, KEY_O)>, /* S42 */
+ <MATRIX_KEY(5, 1, KEY_L)>, /* S43 */
+ <MATRIX_KEY(5, 2, KEY_EQUAL)>, /* S44 */
+ <MATRIX_KEY(5, 3, KEY_QI_UPRED)>, /* S45 */
+ <MATRIX_KEY(5, 4, KEY_SPACE)>, /* S46 */
+ <MATRIX_KEY(5, 5, KEY_QI_QI)>, /* S47 */
+ <MATRIX_KEY(5, 6, KEY_RIGHTCTRL)>, /* S48 */
+ <MATRIX_KEY(5, 7, KEY_LEFT)>, /* S49 */
+ <MATRIX_KEY(6, 0, KEY_F8)>, /* S50 */
+ <MATRIX_KEY(6, 1, KEY_P)>, /* S51 */
+ <MATRIX_KEY(6, 2, KEY_BACKSPACE)>,/* S52 */
+ <MATRIX_KEY(6, 3, KEY_ENTER)>, /* S53 */
+ <MATRIX_KEY(6, 4, KEY_QI_VOLUP)>, /* S54 */
+ <MATRIX_KEY(6, 5, KEY_QI_VOLDOWN)>, /* S55 */
+ <MATRIX_KEY(6, 6, KEY_DOWN)>, /* S56 */
+ <MATRIX_KEY(6, 7, KEY_RIGHT)>, /* S57 */
+
+ <MATRIX_KEY(7, 0, KEY_LEFTSHIFT)>, /* S58 */
+ <MATRIX_KEY(7, 1, KEY_LEFTALT)>, /* S59 */
+ <MATRIX_KEY(7, 2, KEY_QI_FN)>; /* S60 */
+ };
+
+ spi {
+ compatible = "spi-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sck-gpios = <&gpc 23 GPIO_ACTIVE_HIGH>;
+ mosi-gpios = <&gpc 22 GPIO_ACTIVE_HIGH>;
+ cs-gpios = <&gpc 21 GPIO_ACTIVE_LOW>;
+ num-chipselects = <1>;
+ };
+
+ usb_charger: charger {
+ compatible = "gpio-charger";
+ charger-type = "usb-sdp";
+ gpios = <&gpd 28 GPIO_ACTIVE_LOW>;
+ status-gpios = <&gpc 27 GPIO_ACTIVE_LOW>;
+ };
+
+ simple_battery: battery {
+ compatible = "simple-battery";
+ voltage-min-design-microvolt = <3600000>;
+ voltage-max-design-microvolt = <4200000>;
+ };
+
+ pmu {
+ compatible = "ingenic,jz4740-battery";
+ io-channels = <&adc INGENIC_ADC_BATTERY>;
+ io-channel-names = "battery";
+ power-supplies = <&usb_charger>;
+ monitored-battery = <&simple_battery>;
+ };
+
+ hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc INGENIC_ADC_AUX>;
+ };
+
+ panel: panel {
+ compatible = "giantplus,gpm940b0";
+
+ power-supply = <&vcc>;
+
+ port {
+ panel_input: endpoint {
+ remote-endpoint = <&panel_output>;
+ };
+ };
+ };
+
+ usb_phy: usb-phy {
+ compatible = "usb-nop-xceiv";
+ #phy-cells = <0>;
+
+ vcc-supply = <&vcc>;
+ };
+};
+
+&ext {
+ clock-frequency = <12000000>;
+};
+
+&rtc_dev {
+ system-power-controller;
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_uart0>;
+};
+
+&uart1 {
+ status = "disabled";
+};
+
+&nemc {
+ nandc: nand-controller@1 {
+ compatible = "ingenic,jz4740-nand";
+ reg = <1 0 0x4000000>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ecc-engine = <&ecc>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_nemc>;
+
+ rb-gpios = <&gpc 30 GPIO_ACTIVE_HIGH>;
+
+ nand@1 {
+ reg = <1>;
+
+ nand-ecc-step-size = <512>;
+ nand-ecc-strength = <4>;
+ nand-ecc-mode = "hw";
+ nand-is-boot-medium;
+ nand-on-flash-bbt;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "boot";
+ reg = <0x0 0x400000>;
+ };
+
+ partition@400000 {
+ label = "kernel";
+ reg = <0x400000 0x400000>;
+ };
+
+ partition@800000 {
+ label = "rootfs";
+ reg = <0x800000 0x0>;
+ };
+ };
+ };
+ };
+};
+
+&lcd {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_lcd>;
+
+ port {
+ panel_output: endpoint {
+ remote-endpoint = <&panel_input>;
+ };
+ };
+};
+
+&udc {
+ phys = <&usb_phy>;
+};
+
+&pinctrl {
+ pins_lcd: lcd {
+ function = "lcd";
+ groups = "lcd-8bit";
+ };
+
+ pins_nemc: nemc {
+ function = "nand";
+ groups = "nand-fre-fwe", "nand-cs1";
+ };
+
+ pins_uart0: uart0 {
+ function = "uart0";
+ groups = "uart0-data";
+ bias-disable;
+ };
+
+ pins_mmc: mmc {
+ mmc {
+ function = "mmc";
+ groups = "mmc-1bit", "mmc-4bit";
+ bias-disable;
+ };
+
+ mmc-gpios {
+ pins = "PD0", "PD2";
+ bias-disable;
+ };
+ };
+};
+
+&mmc {
+ bus-width = <4>;
+ max-frequency = <24000000>;
+ cd-gpios = <&gpd 0 GPIO_ACTIVE_HIGH>;
+ vmmc-supply = <&mmc_power>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_mmc>;
+};
+
+&tcu {
+ /* 750 kHz for the system timer and clocksource */
+ assigned-clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER1>;
+ assigned-clock-rates = <750000>, <750000>;
+};
diff --git a/arch/mips/boot/dts/ingenic/rs90.dts b/arch/mips/boot/dts/ingenic/rs90.dts
new file mode 100644
index 000000000..4eb1edbfc
--- /dev/null
+++ b/arch/mips/boot/dts/ingenic/rs90.dts
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include "jz4725b.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/iio/adc/ingenic,adc.h>
+#include <dt-bindings/input/linux-event-codes.h>
+
+/ {
+ compatible = "ylm,rs90", "ingenic,jz4725b";
+ model = "RS-90";
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x2000000>;
+ };
+
+ vcc: regulator {
+ compatible = "regulator-fixed";
+
+ regulator-name = "vcc";
+ regulaor-min-microvolt = <3300000>;
+ regulaor-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm 3 40000 0>;
+
+ brightness-levels = <0 16 32 48 64 80 112 144 192 255>;
+ default-brightness-level = <8>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_pwm3>;
+
+ power-supply = <&vcc>;
+ };
+
+ keys@0 {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ key@0 {
+ label = "D-pad up";
+ linux,code = <KEY_UP>;
+ gpios = <&gpc 10 GPIO_ACTIVE_LOW>;
+ };
+
+ key@1 {
+ label = "D-pad down";
+ linux,code = <KEY_DOWN>;
+ gpios = <&gpc 11 GPIO_ACTIVE_LOW>;
+ };
+
+ key@2 {
+ label = "D-pad left";
+ linux,code = <KEY_LEFT>;
+ gpios = <&gpb 31 GPIO_ACTIVE_LOW>;
+ };
+
+ key@3 {
+ label = "D-pad right";
+ linux,code = <KEY_RIGHT>;
+ gpios = <&gpd 21 GPIO_ACTIVE_LOW>;
+ };
+
+ key@4 {
+ label = "Button A";
+ linux,code = <KEY_LEFTCTRL>;
+ gpios = <&gpc 31 GPIO_ACTIVE_LOW>;
+ };
+
+ key@5 {
+ label = "Button B";
+ linux,code = <KEY_LEFTALT>;
+ gpios = <&gpc 30 GPIO_ACTIVE_LOW>;
+ };
+
+ key@6 {
+ label = "Right shoulder button";
+ linux,code = <KEY_BACKSPACE>;
+ gpios = <&gpc 12 GPIO_ACTIVE_LOW>;
+ debounce-interval = <10>;
+ };
+
+ key@7 {
+ label = "Start button";
+ linux,code = <KEY_ENTER>;
+ gpios = <&gpd 17 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ keys@1 {
+ compatible = "adc-keys";
+ io-channels = <&adc INGENIC_ADC_AUX>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1400000>;
+ poll-interval = <30>;
+
+ key@0 {
+ label = "Left shoulder button";
+ linux,code = <KEY_TAB>;
+ press-threshold-microvolt = <800000>;
+ };
+
+ key@1 {
+ label = "Select button";
+ linux,code = <KEY_ESC>;
+ press-threshold-microvolt = <1100000>;
+ };
+ };
+
+ amp: analog-amplifier {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&gpc 15 GPIO_ACTIVE_HIGH>;
+
+ VCC-supply = <&vcc>;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+
+ simple-audio-card,name = "rs90-audio";
+ simple-audio-card,format = "i2s";
+
+ simple-audio-card,widgets =
+ "Speaker", "Speaker",
+ "Headphone", "Headphones";
+ simple-audio-card,routing =
+ "INL", "LHPOUT",
+ "INR", "RHPOUT",
+ "Headphones", "LHPOUT",
+ "Headphones", "RHPOUT",
+ "Speaker", "OUTL",
+ "Speaker", "OUTR";
+ simple-audio-card,pin-switches = "Speaker";
+
+ simple-audio-card,hp-det-gpio = <&gpd 16 GPIO_ACTIVE_LOW>;
+ simple-audio-card,aux-devs = <&amp>;
+
+ simple-audio-card,bitclock-master = <&dai_codec>;
+ simple-audio-card,frame-master = <&dai_codec>;
+
+ dai_cpu: simple-audio-card,cpu {
+ sound-dai = <&aic>;
+ };
+
+ dai_codec: simple-audio-card,codec {
+ sound-dai = <&codec>;
+ };
+
+ };
+
+ usb_phy: usb-phy {
+ compatible = "usb-nop-xceiv";
+ #phy-cells = <0>;
+
+ clocks = <&cgu JZ4725B_CLK_UDC_PHY>;
+ clock-names = "main_clk";
+ vcc-supply = <&vcc>;
+ };
+
+ panel {
+ compatible = "sharp,ls020b1dd01d";
+
+ backlight = <&backlight>;
+ power-supply = <&vcc>;
+
+ port {
+ panel_input: endpoint {
+ remote-endpoint = <&panel_output>;
+ };
+ };
+ };
+};
+
+&ext {
+ clock-frequency = <12000000>;
+};
+
+&rtc_dev {
+ system-power-controller;
+};
+
+&udc {
+ phys = <&usb_phy>;
+};
+
+&pinctrl {
+ pins_mmc1: mmc1 {
+ function = "mmc1";
+ groups = "mmc1-1bit";
+ };
+
+ pins_nemc: nemc {
+ function = "nand";
+ groups = "nand-cs1", "nand-cle-ale", "nand-fre-fwe";
+ };
+
+ pins_pwm3: pwm3 {
+ function = "pwm3";
+ groups = "pwm3";
+ bias-disable;
+ };
+
+ pins_lcd: lcd {
+ function = "lcd";
+ groups = "lcd-8bit", "lcd-16bit", "lcd-special";
+ };
+};
+
+&mmc0 {
+ status = "disabled";
+};
+
+&mmc1 {
+ bus-width = <1>;
+ max-frequency = <48000000>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_mmc1>;
+
+ cd-gpios = <&gpc 20 GPIO_ACTIVE_LOW>;
+};
+
+&uart {
+ /*
+ * The pins for RX/TX are used for the right shoulder button and
+ * backlight PWM.
+ */
+ status = "disabled";
+};
+
+&nemc {
+ nandc: nand-controller@1 {
+ compatible = "ingenic,jz4725b-nand";
+ reg = <1 0 0x4000000>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ecc-engine = <&bch>;
+
+ ingenic,nemc-tAS = <10>;
+ ingenic,nemc-tAH = <5>;
+ ingenic,nemc-tBP = <10>;
+ ingenic,nemc-tAW = <15>;
+ ingenic,nemc-tSTRV = <100>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_nemc>;
+
+ rb-gpios = <&gpc 27 GPIO_ACTIVE_HIGH>;
+
+ nand@1 {
+ reg = <1>;
+
+ nand-ecc-step-size = <512>;
+ nand-ecc-strength = <8>;
+ nand-ecc-mode = "hw";
+ nand-is-boot-medium;
+ nand-on-flash-bbt;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "bootloader";
+ reg = <0x0 0x20000>;
+ };
+
+ partition@20000 {
+ label = "system";
+ reg = <0x20000 0x0>;
+ };
+ };
+ };
+ };
+};
+
+&cgu {
+ /* Use 32kHz oscillator as the parent of the RTC clock */
+ assigned-clocks = <&cgu JZ4725B_CLK_RTC>;
+ assigned-clock-parents = <&cgu JZ4725B_CLK_OSC32K>;
+};
+
+&tcu {
+ /*
+ * 750 kHz for the system timer and clocksource, and use RTC as the
+ * parent for the watchdog clock.
+ */
+ assigned-clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER1>, <&tcu TCU_CLK_WDT>;
+ assigned-clock-parents = <0>, <0>, <&cgu JZ4725B_CLK_RTC>;
+ assigned-clock-rates = <750000>, <750000>;
+};
+
+&lcd {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_lcd>;
+};
+
+&lcd_ports {
+ port@0 {
+ reg = <0>;
+
+ panel_output: endpoint {
+ remote-endpoint = <&panel_input>;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/ingenic/x1000.dtsi b/arch/mips/boot/dts/ingenic/x1000.dtsi
new file mode 100644
index 000000000..1f1f896dd
--- /dev/null
+++ b/arch/mips/boot/dts/ingenic/x1000.dtsi
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <dt-bindings/clock/ingenic,tcu.h>
+#include <dt-bindings/clock/x1000-cgu.h>
+#include <dt-bindings/dma/x1000-dma.h>
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ingenic,x1000", "ingenic,x1000e";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "ingenic,xburst-fpu1.0-mxu1.1";
+ reg = <0>;
+
+ clocks = <&cgu X1000_CLK_CPU>;
+ clock-names = "cpu";
+ };
+ };
+
+ cpuintc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ intc: interrupt-controller@10001000 {
+ compatible = "ingenic,x1000-intc", "ingenic,jz4780-intc";
+ reg = <0x10001000 0x50>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ };
+
+ exclk: ext {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+
+ rtclk: rtc {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ cgu: x1000-cgu@10000000 {
+ compatible = "ingenic,x1000-cgu";
+ reg = <0x10000000 0x100>;
+
+ #clock-cells = <1>;
+
+ clocks = <&exclk>, <&rtclk>;
+ clock-names = "ext", "rtc";
+ };
+
+ tcu: timer@10002000 {
+ compatible = "ingenic,x1000-tcu", "simple-mfd";
+ reg = <0x10002000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x10002000 0x1000>;
+
+ #clock-cells = <1>;
+
+ clocks = <&cgu X1000_CLK_RTCLK>,
+ <&cgu X1000_CLK_EXCLK>,
+ <&cgu X1000_CLK_PCLK>;
+ clock-names = "rtc", "ext", "pclk";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <27 26 25>;
+
+ wdt: watchdog@0 {
+ compatible = "ingenic,x1000-watchdog", "ingenic,jz4780-watchdog";
+ reg = <0x0 0x10>;
+
+ clocks = <&tcu TCU_CLK_WDT>;
+ clock-names = "wdt";
+ };
+ };
+
+ rtc: rtc@10003000 {
+ compatible = "ingenic,x1000-rtc", "ingenic,jz4780-rtc";
+ reg = <0x10003000 0x4c>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <32>;
+
+ clocks = <&cgu X1000_CLK_RTCLK>;
+ clock-names = "rtc";
+ };
+
+ pinctrl: pin-controller@10010000 {
+ compatible = "ingenic,x1000-pinctrl";
+ reg = <0x10010000 0x800>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpa: gpio@0 {
+ compatible = "ingenic,x1000-gpio";
+ reg = <0>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <17>;
+ };
+
+ gpb: gpio@1 {
+ compatible = "ingenic,x1000-gpio";
+ reg = <1>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 32 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <16>;
+ };
+
+ gpc: gpio@2 {
+ compatible = "ingenic,x1000-gpio";
+ reg = <2>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 64 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <15>;
+ };
+
+ gpd: gpio@3 {
+ compatible = "ingenic,x1000-gpio";
+ reg = <3>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 96 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <14>;
+ };
+ };
+
+ uart0: serial@10030000 {
+ compatible = "ingenic,x1000-uart";
+ reg = <0x10030000 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <51>;
+
+ clocks = <&exclk>, <&cgu X1000_CLK_UART0>;
+ clock-names = "baud", "module";
+
+ status = "disabled";
+ };
+
+ uart1: serial@10031000 {
+ compatible = "ingenic,x1000-uart";
+ reg = <0x10031000 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <50>;
+
+ clocks = <&exclk>, <&cgu X1000_CLK_UART1>;
+ clock-names = "baud", "module";
+
+ status = "disabled";
+ };
+
+ uart2: serial@10032000 {
+ compatible = "ingenic,x1000-uart";
+ reg = <0x10032000 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <49>;
+
+ clocks = <&exclk>, <&cgu X1000_CLK_UART2>;
+ clock-names = "baud", "module";
+
+ status = "disabled";
+ };
+
+ i2c0: i2c-controller@10050000 {
+ compatible = "ingenic,x1000-i2c";
+ reg = <0x10050000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <60>;
+
+ clocks = <&cgu X1000_CLK_I2C0>;
+
+ status = "disabled";
+ };
+
+ i2c1: i2c-controller@10051000 {
+ compatible = "ingenic,x1000-i2c";
+ reg = <0x10051000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <59>;
+
+ clocks = <&cgu X1000_CLK_I2C1>;
+
+ status = "disabled";
+ };
+
+ i2c2: i2c-controller@10052000 {
+ compatible = "ingenic,x1000-i2c";
+ reg = <0x10052000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <58>;
+
+ clocks = <&cgu X1000_CLK_I2C2>;
+
+ status = "disabled";
+ };
+
+ pdma: dma-controller@13420000 {
+ compatible = "ingenic,x1000-dma";
+ reg = <0x13420000 0x400>, <0x13421000 0x40>;
+ #dma-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <10>;
+
+ clocks = <&cgu X1000_CLK_PDMA>;
+ };
+
+ msc0: mmc@13450000 {
+ compatible = "ingenic,x1000-mmc";
+ reg = <0x13450000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <37>;
+
+ clocks = <&cgu X1000_CLK_MSC0>;
+ clock-names = "mmc";
+
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cap-sdio-irq;
+
+ dmas = <&pdma X1000_DMA_MSC0_RX 0xffffffff>,
+ <&pdma X1000_DMA_MSC0_TX 0xffffffff>;
+ dma-names = "rx", "tx";
+
+ status = "disabled";
+ };
+
+ msc1: mmc@13460000 {
+ compatible = "ingenic,x1000-mmc";
+ reg = <0x13460000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <36>;
+
+ clocks = <&cgu X1000_CLK_MSC1>;
+ clock-names = "mmc";
+
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cap-sdio-irq;
+
+ dmas = <&pdma X1000_DMA_MSC1_RX 0xffffffff>,
+ <&pdma X1000_DMA_MSC1_TX 0xffffffff>;
+ dma-names = "rx", "tx";
+
+ status = "disabled";
+ };
+
+ mac: ethernet@134b0000 {
+ compatible = "ingenic,x1000-mac", "snps,dwmac";
+ reg = <0x134b0000 0x2000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <55>;
+ interrupt-names = "macirq";
+
+ clocks = <&cgu X1000_CLK_MAC>;
+ clock-names = "stmmaceth";
+
+ status = "disabled";
+
+ mdio: mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/ingenic/x1830.dtsi b/arch/mips/boot/dts/ingenic/x1830.dtsi
new file mode 100644
index 000000000..b05dac3ae
--- /dev/null
+++ b/arch/mips/boot/dts/ingenic/x1830.dtsi
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <dt-bindings/clock/ingenic,tcu.h>
+#include <dt-bindings/clock/x1830-cgu.h>
+#include <dt-bindings/dma/x1830-dma.h>
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ingenic,x1830";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "ingenic,xburst-fpu2.0-mxu2.0";
+ reg = <0>;
+
+ clocks = <&cgu X1830_CLK_CPU>;
+ clock-names = "cpu";
+ };
+ };
+
+ cpuintc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ intc: interrupt-controller@10001000 {
+ compatible = "ingenic,x1830-intc", "ingenic,jz4780-intc";
+ reg = <0x10001000 0x50>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ };
+
+ exclk: ext {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+
+ rtclk: rtc {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ cgu: x1830-cgu@10000000 {
+ compatible = "ingenic,x1830-cgu";
+ reg = <0x10000000 0x100>;
+
+ #clock-cells = <1>;
+
+ clocks = <&exclk>, <&rtclk>;
+ clock-names = "ext", "rtc";
+ };
+
+ tcu: timer@10002000 {
+ compatible = "ingenic,x1830-tcu", "ingenic,x1000-tcu", "simple-mfd";
+ reg = <0x10002000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x10002000 0x1000>;
+
+ #clock-cells = <1>;
+
+ clocks = <&cgu X1830_CLK_RTCLK
+ &cgu X1830_CLK_EXCLK
+ &cgu X1830_CLK_PCLK>;
+ clock-names = "rtc", "ext", "pclk";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <27 26 25>;
+
+ wdt: watchdog@0 {
+ compatible = "ingenic,x1830-watchdog", "ingenic,jz4780-watchdog";
+ reg = <0x0 0x10>;
+
+ clocks = <&tcu TCU_CLK_WDT>;
+ clock-names = "wdt";
+ };
+ };
+
+ rtc: rtc@10003000 {
+ compatible = "ingenic,x1830-rtc", "ingenic,jz4780-rtc";
+ reg = <0x10003000 0x4c>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <32>;
+
+ clocks = <&cgu X1830_CLK_RTCLK>;
+ clock-names = "rtc";
+ };
+
+ pinctrl: pin-controller@10010000 {
+ compatible = "ingenic,x1830-pinctrl";
+ reg = <0x10010000 0x800>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpa: gpio@0 {
+ compatible = "ingenic,x1830-gpio";
+ reg = <0>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <17>;
+ };
+
+ gpb: gpio@1 {
+ compatible = "ingenic,x1830-gpio";
+ reg = <1>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 32 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <16>;
+ };
+
+ gpc: gpio@2 {
+ compatible = "ingenic,x1830-gpio";
+ reg = <2>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 64 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <15>;
+ };
+
+ gpd: gpio@3 {
+ compatible = "ingenic,x1830-gpio";
+ reg = <3>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 96 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <14>;
+ };
+ };
+
+ uart0: serial@10030000 {
+ compatible = "ingenic,x1830-uart", "ingenic,x1000-uart";
+ reg = <0x10030000 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <51>;
+
+ clocks = <&exclk>, <&cgu X1830_CLK_UART0>;
+ clock-names = "baud", "module";
+
+ status = "disabled";
+ };
+
+ uart1: serial@10031000 {
+ compatible = "ingenic,x1830-uart", "ingenic,x1000-uart";
+ reg = <0x10031000 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <50>;
+
+ clocks = <&exclk>, <&cgu X1830_CLK_UART1>;
+ clock-names = "baud", "module";
+
+ status = "disabled";
+ };
+
+ i2c0: i2c-controller@10050000 {
+ compatible = "ingenic,x1830-i2c", "ingenic,x1000-i2c";
+ reg = <0x10050000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <60>;
+
+ clocks = <&cgu X1830_CLK_SMB0>;
+
+ status = "disabled";
+ };
+
+ i2c1: i2c-controller@10051000 {
+ compatible = "ingenic,x1830-i2c", "ingenic,x1000-i2c";
+ reg = <0x10051000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <59>;
+
+ clocks = <&cgu X1830_CLK_SMB1>;
+
+ status = "disabled";
+ };
+
+ i2c2: i2c-controller@10052000 {
+ compatible = "ingenic,x1830-i2c", "ingenic,x1000-i2c";
+ reg = <0x10052000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <58>;
+
+ clocks = <&cgu X1830_CLK_SMB2>;
+
+ status = "disabled";
+ };
+
+ pdma: dma-controller@13420000 {
+ compatible = "ingenic,x1830-dma";
+ reg = <0x13420000 0x400
+ 0x13421000 0x40>;
+ #dma-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <10>;
+
+ clocks = <&cgu X1830_CLK_PDMA>;
+ };
+
+ msc0: mmc@13450000 {
+ compatible = "ingenic,x1830-mmc", "ingenic,x1000-mmc";
+ reg = <0x13450000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <37>;
+
+ clocks = <&cgu X1830_CLK_MSC0>;
+ clock-names = "mmc";
+
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cap-sdio-irq;
+
+ dmas = <&pdma X1830_DMA_MSC0_RX 0xffffffff>,
+ <&pdma X1830_DMA_MSC0_TX 0xffffffff>;
+ dma-names = "rx", "tx";
+
+ status = "disabled";
+ };
+
+ msc1: mmc@13460000 {
+ compatible = "ingenic,x1830-mmc", "ingenic,x1000-mmc";
+ reg = <0x13460000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <36>;
+
+ clocks = <&cgu X1830_CLK_MSC1>;
+ clock-names = "mmc";
+
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cap-sdio-irq;
+
+ dmas = <&pdma X1830_DMA_MSC1_RX 0xffffffff>,
+ <&pdma X1830_DMA_MSC1_TX 0xffffffff>;
+ dma-names = "rx", "tx";
+
+ status = "disabled";
+ };
+
+ mac: ethernet@134b0000 {
+ compatible = "ingenic,x1830-mac", "snps,dwmac";
+ reg = <0x134b0000 0x2000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <55>;
+ interrupt-names = "macirq";
+
+ clocks = <&cgu X1830_CLK_MAC>;
+ clock-names = "stmmaceth";
+
+ status = "disabled";
+
+ mdio: mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/lantiq/Makefile b/arch/mips/boot/dts/lantiq/Makefile
new file mode 100644
index 000000000..f5dfc0624
--- /dev/null
+++ b/arch/mips/boot/dts/lantiq/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_DT_EASY50712) += easy50712.dtb
+
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/lantiq/danube.dtsi b/arch/mips/boot/dts/lantiq/danube.dtsi
new file mode 100644
index 000000000..510be63c8
--- /dev/null
+++ b/arch/mips/boot/dts/lantiq/danube.dtsi
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "lantiq,xway", "lantiq,danube";
+
+ cpus {
+ cpu@0 {
+ compatible = "mips,mips24Kc";
+ };
+ };
+
+ biu@1f800000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "lantiq,biu", "simple-bus";
+ reg = <0x1f800000 0x800000>;
+ ranges = <0x0 0x1f800000 0x7fffff>;
+
+ icu0: icu@80200 {
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "lantiq,icu";
+ reg = <0x80200 0x120>;
+ };
+
+ watchdog@803f0 {
+ compatible = "lantiq,wdt";
+ reg = <0x803f0 0x10>;
+ };
+ };
+
+ sram@1f000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "lantiq,sram";
+ reg = <0x1f000000 0x800000>;
+ ranges = <0x0 0x1f000000 0x7fffff>;
+
+ eiu0: eiu@101000 {
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ interrupt-parent;
+ compatible = "lantiq,eiu-xway";
+ reg = <0x101000 0x1000>;
+ };
+
+ pmu0: pmu@102000 {
+ compatible = "lantiq,pmu-xway";
+ reg = <0x102000 0x1000>;
+ };
+
+ cgu0: cgu@103000 {
+ compatible = "lantiq,cgu-xway";
+ reg = <0x103000 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ rcu0: rcu@203000 {
+ compatible = "lantiq,rcu-xway";
+ reg = <0x203000 0x1000>;
+ };
+ };
+
+ fpi@10000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "lantiq,fpi", "simple-bus";
+ ranges = <0x0 0x10000000 0xeefffff>;
+ reg = <0x10000000 0xef00000>;
+
+ gptu@e100a00 {
+ compatible = "lantiq,gptu-xway";
+ reg = <0xe100a00 0x100>;
+ };
+
+ serial@e100c00 {
+ compatible = "lantiq,asc";
+ reg = <0xe100c00 0x400>;
+ interrupt-parent = <&icu0>;
+ interrupts = <112 113 114>;
+ };
+
+ dma0: dma@e104100 {
+ compatible = "lantiq,dma-xway";
+ reg = <0xe104100 0x800>;
+ };
+
+ ebu0: ebu@e105300 {
+ compatible = "lantiq,ebu-xway";
+ reg = <0xe105300 0x100>;
+ };
+
+ pci0: pci@e105400 {
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ compatible = "lantiq,pci-xway";
+ bus-range = <0x0 0x0>;
+ ranges = <0x2000000 0 0x8000000 0x8000000 0 0x2000000 /* pci memory */
+ 0x1000000 0 0x00000000 0xae00000 0 0x200000>; /* io space */
+ reg = <0x7000000 0x8000 /* config space */
+ 0xe105400 0x400>; /* pci bridge */
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/lantiq/easy50712.dts b/arch/mips/boot/dts/lantiq/easy50712.dts
new file mode 100644
index 000000000..1ce20b7d0
--- /dev/null
+++ b/arch/mips/boot/dts/lantiq/easy50712.dts
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "danube.dtsi"
+
+/ {
+ chosen {
+ bootargs = "console=ttyLTQ0,115200 init=/etc/preinit";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x2000000>;
+ };
+
+ fpi@10000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ localbus@0 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0x0 0x3ffffff /* addrsel0 */
+ 1 0 0x4000000 0x4000010>; /* addsel1 */
+ compatible = "lantiq,localbus", "simple-bus";
+
+ nor-boot@0 {
+ compatible = "lantiq,nor";
+ bank-width = <2>;
+ reg = <0 0x0 0x2000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x00000 0x10000>; /* 64 KB */
+ };
+
+ partition@10000 {
+ label = "uboot_env";
+ reg = <0x10000 0x10000>; /* 64 KB */
+ };
+
+ partition@20000 {
+ label = "linux";
+ reg = <0x20000 0x3d0000>;
+ };
+
+ partition@400000 {
+ label = "rootfs";
+ reg = <0x400000 0x400000>;
+ };
+ };
+ };
+
+ gpio: pinmux@e100b10 {
+ compatible = "lantiq,danube-pinctrl";
+ pinctrl-names = "default";
+ pinctrl-0 = <&state_default>;
+
+ #gpio-cells = <2>;
+ gpio-controller;
+ reg = <0xe100b10 0xa0>;
+
+ state_default: pinmux {
+ stp {
+ lantiq,groups = "stp";
+ lantiq,function = "stp";
+ };
+ exin {
+ lantiq,groups = "exin1";
+ lantiq,function = "exin";
+ };
+ pci {
+ lantiq,groups = "gnt1";
+ lantiq,function = "pci";
+ };
+ conf_out {
+ lantiq,pins = "io4", "io5", "io6"; /* stp */
+ lantiq,open-drain;
+ lantiq,pull = <0>;
+ };
+ };
+ };
+
+ etop@e180000 {
+ compatible = "lantiq,etop-xway";
+ reg = <0xe180000 0x40000>;
+ interrupt-parent = <&icu0>;
+ interrupts = <73 78>;
+ phy-mode = "rmii";
+ mac-address = [ 00 11 22 33 44 55 ];
+ };
+
+ stp0: stp@e100bb0 {
+ #gpio-cells = <2>;
+ compatible = "lantiq,gpio-stp-xway";
+ gpio-controller;
+ reg = <0xe100bb0 0x40>;
+
+ lantiq,shadow = <0xfff>;
+ lantiq,groups = <0x3>;
+ };
+
+ pci@e105400 {
+ lantiq,bus-clock = <33333333>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ 0x7000 0 0 1 &icu0 29 1 // slot 14, irq 29
+ >;
+ gpios-reset = <&gpio 21 0>;
+ req-mask = <0x1>; /* GNT1 */
+ };
+
+ };
+};
diff --git a/arch/mips/boot/dts/loongson/Makefile b/arch/mips/boot/dts/loongson/Makefile
new file mode 100644
index 000000000..8fd0efb37
--- /dev/null
+++ b/arch/mips/boot/dts/loongson/Makefile
@@ -0,0 +1,8 @@
+# SPDX_License_Identifier: GPL_2.0
+dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_4core_ls7a.dtb
+dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_4core_rs780e.dtb
+dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_8core_rs780e.dtb
+dtb-$(CONFIG_MACH_LOONGSON64) += loongson64g_4core_ls7a.dtb
+dtb-$(CONFIG_MACH_LOONGSON64) += loongson64v_4core_virtio.dtb
+
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/loongson/loongson64c-package.dtsi b/arch/mips/boot/dts/loongson/loongson64c-package.dtsi
new file mode 100644
index 000000000..5bb876a4d
--- /dev/null
+++ b/arch/mips/boot/dts/loongson/loongson64c-package.dtsi
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpuintc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ package0: bus@1fe00000 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0x1fe00000 0 0x1fe00000 0x100000
+ 0 0x3ff00000 0 0x3ff00000 0x100000
+ /* 3A HT Config Space */
+ 0xefd 0xfb000000 0xefd 0xfb000000 0x10000000
+ /* 3B HT Config Space */
+ 0x1efd 0xfb000000 0x1efd 0xfb000000 0x10000000>;
+
+ liointc: interrupt-controller@3ff01400 {
+ compatible = "loongson,liointc-1.0";
+ reg = <0 0x3ff01400 0x64>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>, <3>;
+ interrupt-names = "int0", "int1";
+
+ loongson,parent_int_map = <0xf0ffffff>, /* int0 */
+ <0x0f000000>, /* int1 */
+ <0x00000000>, /* int2 */
+ <0x00000000>; /* int3 */
+
+ };
+
+ cpu_uart0: serial@1fe001e0 {
+ compatible = "ns16550a";
+ reg = <0 0x1fe001e0 0x8>;
+ clock-frequency = <33000000>;
+ interrupt-parent = <&liointc>;
+ interrupts = <10 IRQ_TYPE_LEVEL_HIGH>;
+ no-loopback-test;
+ };
+
+ cpu_uart1: serial@1fe001e8 {
+ status = "disabled";
+ compatible = "ns16550a";
+ reg = <0 0x1fe001e8 0x8>;
+ clock-frequency = <33000000>;
+ interrupts = <10 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc>;
+ no-loopback-test;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/loongson/loongson64c_4core_ls7a.dts b/arch/mips/boot/dts/loongson/loongson64c_4core_ls7a.dts
new file mode 100644
index 000000000..c7ea4f1c0
--- /dev/null
+++ b/arch/mips/boot/dts/loongson/loongson64c_4core_ls7a.dts
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/dts-v1/;
+
+#include "loongson64c-package.dtsi"
+#include "ls7a-pch.dtsi"
+
+/ {
+ compatible = "loongson,loongson64c-4core-ls7a";
+};
+
+&package0 {
+ htvec: interrupt-controller@efdfb000080 {
+ compatible = "loongson,htvec-1.0";
+ reg = <0xefd 0xfb000080 0x40>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&liointc>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>,
+ <25 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>;
+ };
+};
+
+&pch {
+ msi: msi-controller@2ff00000 {
+ compatible = "loongson,pch-msi-1.0";
+ reg = <0 0x2ff00000 0 0x8>;
+ interrupt-controller;
+ msi-controller;
+ loongson,msi-base-vec = <64>;
+ loongson,msi-num-vecs = <64>;
+ interrupt-parent = <&htvec>;
+ };
+};
diff --git a/arch/mips/boot/dts/loongson/loongson64c_4core_rs780e.dts b/arch/mips/boot/dts/loongson/loongson64c_4core_rs780e.dts
new file mode 100644
index 000000000..d681a295d
--- /dev/null
+++ b/arch/mips/boot/dts/loongson/loongson64c_4core_rs780e.dts
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/dts-v1/;
+
+#include "loongson64c-package.dtsi"
+#include "rs780e-pch.dtsi"
+
+/ {
+ compatible = "loongson,loongson64c-4core-rs780e";
+};
+
+&package0 {
+ htpic: interrupt-controller@efdfb000080 {
+ compatible = "loongson,htpic-1.0";
+ reg = <0xefd 0xfb000080 0x40>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&liointc>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>,
+ <25 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>;
+ };
+};
diff --git a/arch/mips/boot/dts/loongson/loongson64c_8core_rs780e.dts b/arch/mips/boot/dts/loongson/loongson64c_8core_rs780e.dts
new file mode 100644
index 000000000..3c2044142
--- /dev/null
+++ b/arch/mips/boot/dts/loongson/loongson64c_8core_rs780e.dts
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/dts-v1/;
+
+#include "loongson64c-package.dtsi"
+#include "rs780e-pch.dtsi"
+
+/ {
+ compatible = "loongson,loongson64c-8core-rs780e";
+};
+
+&package0 {
+ htpic: interrupt-controller@1efdfb000080 {
+ compatible = "loongson,htpic-1.0";
+ reg = <0x1efd 0xfb000080 0x40>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&liointc>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>,
+ <25 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>;
+ };
+};
diff --git a/arch/mips/boot/dts/loongson/loongson64g-package.dtsi b/arch/mips/boot/dts/loongson/loongson64g-package.dtsi
new file mode 100644
index 000000000..38abc570c
--- /dev/null
+++ b/arch/mips/boot/dts/loongson/loongson64g-package.dtsi
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpuintc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ package0: bus@1fe00000 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0x1fe00000 0 0x1fe00000 0x100000
+ 0 0x3ff00000 0 0x3ff00000 0x100000
+ 0xefd 0xfb000000 0xefd 0xfb000000 0x10000000>;
+
+ liointc: interrupt-controller@3ff01400 {
+ compatible = "loongson,liointc-1.0";
+ reg = <0 0x3ff01400 0x64>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>, <3>;
+ interrupt-names = "int0", "int1";
+
+ loongson,parent_int_map = <0x00ffffff>, /* int0 */
+ <0xff000000>, /* int1 */
+ <0x00000000>, /* int2 */
+ <0x00000000>; /* int3 */
+
+ };
+
+ cpu_uart0: serial@1fe001e0 {
+ compatible = "ns16550a";
+ reg = <0 0x1fe00100 0x10>;
+ clock-frequency = <100000000>;
+ interrupt-parent = <&liointc>;
+ interrupts = <10 IRQ_TYPE_LEVEL_HIGH>;
+ no-loopback-test;
+ };
+
+ cpu_uart1: serial@1fe001e8 {
+ status = "disabled";
+ compatible = "ns16550a";
+ reg = <0 0x1fe00110 0x10>;
+ clock-frequency = <100000000>;
+ interrupts = <15 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc>;
+ no-loopback-test;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/loongson/loongson64g_4core_ls7a.dts b/arch/mips/boot/dts/loongson/loongson64g_4core_ls7a.dts
new file mode 100644
index 000000000..c945f8565
--- /dev/null
+++ b/arch/mips/boot/dts/loongson/loongson64g_4core_ls7a.dts
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/dts-v1/;
+
+#include "loongson64g-package.dtsi"
+#include "ls7a-pch.dtsi"
+
+/ {
+ compatible = "loongson,loongson64g-4core-ls7a";
+};
+
+&package0 {
+ htvec: interrupt-controller@efdfb000080 {
+ compatible = "loongson,htvec-1.0";
+ reg = <0xefd 0xfb000080 0x40>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&liointc>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>,
+ <25 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <28 IRQ_TYPE_LEVEL_HIGH>,
+ <29 IRQ_TYPE_LEVEL_HIGH>,
+ <30 IRQ_TYPE_LEVEL_HIGH>,
+ <31 IRQ_TYPE_LEVEL_HIGH>;
+ };
+};
+
+&pch {
+ msi: msi-controller@2ff00000 {
+ compatible = "loongson,pch-msi-1.0";
+ reg = <0 0x2ff00000 0 0x8>;
+ interrupt-controller;
+ msi-controller;
+ loongson,msi-base-vec = <64>;
+ loongson,msi-num-vecs = <192>;
+ interrupt-parent = <&htvec>;
+ };
+};
diff --git a/arch/mips/boot/dts/loongson/loongson64v_4core_virtio.dts b/arch/mips/boot/dts/loongson/loongson64v_4core_virtio.dts
new file mode 100644
index 000000000..41f0b110d
--- /dev/null
+++ b/arch/mips/boot/dts/loongson/loongson64v_4core_virtio.dts
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/dts-v1/;
+/ {
+ compatible = "loongson,loongson64v-4core-virtio";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpuintc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ package0: bus@1fe00000 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0x1fe00000 0 0x1fe00000 0x100000
+ 0 0x3ff00000 0 0x3ff00000 0x100000
+ 0xefd 0xfb000000 0xefd 0xfb000000 0x10000000>;
+
+ liointc: interrupt-controller@3ff01400 {
+ compatible = "loongson,liointc-1.0";
+ reg = <0 0x3ff01400 0x64>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>, <3>;
+ interrupt-names = "int0", "int1";
+
+ loongson,parent_int_map = <0x00000001>, /* int0 */
+ <0xfffffffe>, /* int1 */
+ <0x00000000>, /* int2 */
+ <0x00000000>; /* int3 */
+
+ };
+
+ cpu_uart0: serial@1fe001e0 {
+ compatible = "ns16550a";
+ reg = <0 0x1fe001e0 0x8>;
+ clock-frequency = <33000000>;
+ interrupt-parent = <&liointc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ no-loopback-test;
+ };
+ };
+
+ bus@10000000 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0 0x10000000 0 0x10000000 0 0x10000000 /* PIO & CONF & APB */
+ 0 0x40000000 0 0x40000000 0 0x40000000>; /* PCI MEM */
+
+ rtc0: rtc@10081000 {
+ compatible = "google,goldfish-rtc";
+ reg = <0 0x10081000 0 0x1000>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc>;
+ };
+
+ pci@1a000000 {
+ compatible = "pci-host-ecam-generic";
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ bus-range = <0x0 0x1f>;
+ reg = <0 0x1a000000 0 0x02000000>;
+
+ ranges = <0x01000000 0x0 0x00004000 0x0 0x18004000 0x0 0x0000c000>,
+ <0x02000000 0x0 0x40000000 0x0 0x40000000 0x0 0x40000000>;
+
+ interrupt-map = <
+ 0x0000 0x0 0x0 0x1 &liointc 0x2 IRQ_TYPE_LEVEL_HIGH
+ 0x0800 0x0 0x0 0x1 &liointc 0x3 IRQ_TYPE_LEVEL_HIGH
+ 0x1000 0x0 0x0 0x1 &liointc 0x4 IRQ_TYPE_LEVEL_HIGH
+ 0x1800 0x0 0x0 0x1 &liointc 0x5 IRQ_TYPE_LEVEL_HIGH
+ >;
+
+ interrupt-map-mask = <0x1800 0x0 0x0 0x7>;
+ };
+
+ isa {
+ compatible = "isa";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <1 0 0 0x18000000 0x4000>;
+ };
+ };
+
+ hypervisor {
+ compatible = "linux,kvm";
+ };
+};
diff --git a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
new file mode 100644
index 000000000..f99a7a11f
--- /dev/null
+++ b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/ {
+ pch: bus@10000000 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0 0x10000000 0 0x10000000 0 0x10000000 /* PIO & CONF & APB */
+ 0 0x20000000 0 0x20000000 0 0x10000000
+ 0 0x40000000 0 0x40000000 0 0x40000000 /* PCI MEM */
+ 0xe00 0x00000000 0xe00 0x00000000 0x100 0x0000000>;
+
+ pic: interrupt-controller@10000000 {
+ compatible = "loongson,pch-pic-1.0";
+ reg = <0 0x10000000 0 0x400>;
+ interrupt-controller;
+ interrupt-parent = <&htvec>;
+ loongson,pic-base-vec = <0>;
+ #interrupt-cells = <2>;
+ };
+
+ ls7a_uart0: serial@10080000 {
+ compatible = "ns16550a";
+ reg = <0 0x10080000 0 0x100>;
+ clock-frequency = <50000000>;
+ interrupt-parent = <&pic>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ no-loopback-test;
+ };
+
+ ls7a_uart1: serial@10080100 {
+ status = "disabled";
+ compatible = "ns16550a";
+ reg = <0 0x10080100 0 0x100>;
+ clock-frequency = <50000000>;
+ interrupt-parent = <&pic>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ no-loopback-test;
+ };
+
+ ls7a_uart2: serial@10080200 {
+ status = "disabled";
+ compatible = "ns16550a";
+ reg = <0 0x10080200 0 0x100>;
+ clock-frequency = <50000000>;
+ interrupt-parent = <&pic>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ no-loopback-test;
+ };
+
+ ls7a_uart3: serial@10080300 {
+ status = "disabled";
+ compatible = "ns16550a";
+ reg = <0 0x10080300 0 0x100>;
+ clock-frequency = <50000000>;
+ interrupt-parent = <&pic>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ no-loopback-test;
+ };
+
+ pci@1a000000 {
+ compatible = "loongson,ls7a-pci";
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <2>;
+ msi-parent = <&msi>;
+
+ reg = <0 0x1a000000 0 0x02000000>,
+ <0xefe 0x00000000 0 0x20000000>;
+
+ ranges = <0x01000000 0x0 0x00020000 0x0 0x18020000 0x0 0x00020000>,
+ <0x02000000 0x0 0x40000000 0x0 0x40000000 0x0 0x40000000>;
+
+ ohci@4,0 {
+ compatible = "pci0014,7a24.0",
+ "pci0014,7a24",
+ "pciclass0c0310",
+ "pciclass0c03";
+
+ reg = <0x2000 0x0 0x0 0x0 0x0>;
+ interrupts = <49 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ };
+
+ ehci@4,1 {
+ compatible = "pci0014,7a14.0",
+ "pci0014,7a14",
+ "pciclass0c0320",
+ "pciclass0c03";
+
+ reg = <0x2100 0x0 0x0 0x0 0x0>;
+ interrupts = <48 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ };
+
+ ohci@5,0 {
+ compatible = "pci0014,7a24.0",
+ "pci0014,7a24",
+ "pciclass0c0310",
+ "pciclass0c03";
+
+ reg = <0x2800 0x0 0x0 0x0 0x0>;
+ interrupts = <51 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ };
+
+ ehci@5,1 {
+ compatible = "pci0014,7a14.0",
+ "pci0014,7a14",
+ "pciclass0c0320",
+ "pciclass0c03";
+
+ reg = <0x2900 0x0 0x0 0x0 0x0>;
+ interrupts = <50 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ };
+
+ sata@8,0 {
+ compatible = "pci0014,7a08.0",
+ "pci0014,7a08",
+ "pciclass010601",
+ "pciclass0106";
+
+ reg = <0x4000 0x0 0x0 0x0 0x0>;
+ interrupts = <16 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ };
+
+ sata@8,1 {
+ compatible = "pci0014,7a08.0",
+ "pci0014,7a08",
+ "pciclass010601",
+ "pciclass0106";
+
+ reg = <0x4100 0x0 0x0 0x0 0x0>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ };
+
+ sata@8,2 {
+ compatible = "pci0014,7a08.0",
+ "pci0014,7a08",
+ "pciclass010601",
+ "pciclass0106";
+
+ reg = <0x4200 0x0 0x0 0x0 0x0>;
+ interrupts = <18 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ };
+
+ gpu@6,0 {
+ compatible = "pci0014,7a15.0",
+ "pci0014,7a15",
+ "pciclass030200",
+ "pciclass0302";
+
+ reg = <0x3000 0x0 0x0 0x0 0x0>;
+ interrupts = <29 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ };
+
+ dc@6,1 {
+ compatible = "pci0014,7a06.0",
+ "pci0014,7a06",
+ "pciclass030000",
+ "pciclass0300";
+
+ reg = <0x3100 0x0 0x0 0x0 0x0>;
+ interrupts = <28 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ };
+
+ hda@7,0 {
+ compatible = "pci0014,7a07.0",
+ "pci0014,7a07",
+ "pciclass040300",
+ "pciclass0403";
+
+ reg = <0x3800 0x0 0x0 0x0 0x0>;
+ interrupts = <58 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ };
+
+ gmac@3,0 {
+ compatible = "pci0014,7a03.0",
+ "pci0014,7a03",
+ "pciclass020000",
+ "pciclass0200";
+
+ reg = <0x1800 0x0 0x0 0x0 0x0>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
+ <13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi";
+ interrupt-parent = <&pic>;
+ phy-mode = "rgmii";
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+ };
+
+ gmac@3,1 {
+ compatible = "pci0014,7a03.0",
+ "pci0014,7a03",
+ "pciclass020000",
+ "pciclass0200";
+
+ reg = <0x1900 0x0 0x0 0x0 0x0>;
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
+ <15 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi";
+ interrupt-parent = <&pic>;
+ phy-mode = "rgmii";
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy1: ethernet-phy@1 {
+ reg = <0>;
+ };
+ };
+ };
+
+ pci_bridge@9,0 {
+ compatible = "pci0014,7a19.1",
+ "pci0014,7a19",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x4800 0x0 0x0 0x0 0x0>;
+ interrupts = <32 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &pic 32 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pci_bridge@a,0 {
+ compatible = "pci0014,7a09.1",
+ "pci0014,7a09",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x5000 0x0 0x0 0x0 0x0>;
+ interrupts = <33 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &pic 33 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pci_bridge@b,0 {
+ compatible = "pci0014,7a09.1",
+ "pci0014,7a09",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x5800 0x0 0x0 0x0 0x0>;
+ interrupts = <34 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &pic 34 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pci_bridge@c,0 {
+ compatible = "pci0014,7a09.1",
+ "pci0014,7a09",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x6000 0x0 0x0 0x0 0x0>;
+ interrupts = <35 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &pic 35 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pci_bridge@d,0 {
+ compatible = "pci0014,7a19.1",
+ "pci0014,7a19",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x6800 0x0 0x0 0x0 0x0>;
+ interrupts = <36 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &pic 36 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pci_bridge@e,0 {
+ compatible = "pci0014,7a09.1",
+ "pci0014,7a09",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x7000 0x0 0x0 0x0 0x0>;
+ interrupts = <37 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &pic 37 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pci_bridge@f,0 {
+ compatible = "pci0014,7a29.1",
+ "pci0014,7a29",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x7800 0x0 0x0 0x0 0x0>;
+ interrupts = <40 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &pic 40 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pci_bridge@10,0 {
+ compatible = "pci0014,7a19.1",
+ "pci0014,7a19",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x8000 0x0 0x0 0x0 0x0>;
+ interrupts = <41 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &pic 41 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pci_bridge@11,0 {
+ compatible = "pci0014,7a29.1",
+ "pci0014,7a29",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x8800 0x0 0x0 0x0 0x0>;
+ interrupts = <42 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &pic 42 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pci_bridge@12,0 {
+ compatible = "pci0014,7a19.1",
+ "pci0014,7a19",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x9000 0x0 0x0 0x0 0x0>;
+ interrupts = <43 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &pic 43 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pci_bridge@13,0 {
+ compatible = "pci0014,7a29.1",
+ "pci0014,7a29",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x9800 0x0 0x0 0x0 0x0>;
+ interrupts = <38 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &pic 38 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pci_bridge@14,0 {
+ compatible = "pci0014,7a19.1",
+ "pci0014,7a19",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0xa000 0x0 0x0 0x0 0x0>;
+ interrupts = <39 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &pic 39 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ isa {
+ compatible = "isa";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <1 0 0 0x18000000 0x20000>;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/loongson/rs780e-pch.dtsi b/arch/mips/boot/dts/loongson/rs780e-pch.dtsi
new file mode 100644
index 000000000..871c866e0
--- /dev/null
+++ b/arch/mips/boot/dts/loongson/rs780e-pch.dtsi
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/ {
+ bus@10000000 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0 0x10000000 0 0x10000000 0 0x10000000
+ 0 0x40000000 0 0x40000000 0 0x40000000
+ 0xfd 0xfe000000 0xfd 0xfe000000 0 0x2000000 /* PCI Config Space */>;
+
+ pci@1a000000 {
+ compatible = "loongson,rs780e-pci";
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ reg = <0 0x1a000000 0 0x02000000>;
+
+ ranges = <0x01000000 0 0x00004000 0 0x18004000 0 0x0000c000>,
+ <0x02000000 0 0x40000000 0 0x40000000 0 0x40000000>;
+ };
+
+ isa {
+ compatible = "isa";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <1 0 0 0x18000000 0x4000>;
+
+ rtc0: rtc@70 {
+ compatible = "motorola,mc146818";
+ reg = <1 0x70 0x8>;
+ interrupts = <8>;
+ interrupt-parent = <&htpic>;
+ };
+
+ acpi@800 {
+ compatible = "loongson,rs780e-acpi";
+ reg = <1 0x800 0x100>;
+ };
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/mscc/Makefile b/arch/mips/boot/dts/mscc/Makefile
new file mode 100644
index 000000000..eb7151587
--- /dev/null
+++ b/arch/mips/boot/dts/mscc/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+dtb-$(CONFIG_MSCC_OCELOT) += ocelot_pcb123.dtb ocelot_pcb120.dtb
+
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/mscc/ocelot.dtsi b/arch/mips/boot/dts/mscc/ocelot.dtsi
new file mode 100644
index 000000000..535a98284
--- /dev/null
+++ b/arch/mips/boot/dts/mscc/ocelot.dtsi
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2017 Microsemi Corporation */
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "mscc,ocelot";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "mips,mips24KEc";
+ device_type = "cpu";
+ clocks = <&cpu_clk>;
+ reg = <0>;
+ };
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ cpuintc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ cpu_clk: cpu-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <500000000>;
+ };
+
+ ahb_clk: ahb-clk {
+ compatible = "fixed-factor-clock";
+ #clock-cells = <0>;
+ clocks = <&cpu_clk>;
+ clock-div = <2>;
+ clock-mult = <1>;
+ };
+
+ ahb@70000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x70000000 0x2000000>;
+
+ interrupt-parent = <&intc>;
+
+ cpu_ctrl: syscon@0 {
+ compatible = "mscc,ocelot-cpu-syscon", "syscon";
+ reg = <0x0 0x2c>;
+ };
+
+ intc: interrupt-controller@70 {
+ compatible = "mscc,ocelot-icpu-intr";
+ reg = <0x70 0x70>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ };
+
+ uart0: serial@100000 {
+ pinctrl-0 = <&uart_pins>;
+ pinctrl-names = "default";
+ compatible = "ns16550a";
+ reg = <0x100000 0x20>;
+ interrupts = <6>;
+ clocks = <&ahb_clk>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+
+ status = "disabled";
+ };
+
+ i2c: i2c@100400 {
+ compatible = "mscc,ocelot-i2c", "snps,designware-i2c";
+ pinctrl-0 = <&i2c_pins>;
+ pinctrl-names = "default";
+ reg = <0x100400 0x100>, <0x198 0x8>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <8>;
+ clocks = <&ahb_clk>;
+
+ status = "disabled";
+ };
+
+ uart2: serial@100800 {
+ pinctrl-0 = <&uart2_pins>;
+ pinctrl-names = "default";
+ compatible = "ns16550a";
+ reg = <0x100800 0x20>;
+ interrupts = <7>;
+ clocks = <&ahb_clk>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+
+ status = "disabled";
+ };
+
+ spi: spi@101000 {
+ compatible = "mscc,ocelot-spi", "snps,dw-apb-ssi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x101000 0x100>, <0x3c 0x18>;
+ interrupts = <9>;
+ clocks = <&ahb_clk>;
+
+ status = "disabled";
+ };
+
+ switch@1010000 {
+ compatible = "mscc,vsc7514-switch";
+ reg = <0x1010000 0x10000>,
+ <0x1030000 0x10000>,
+ <0x1080000 0x100>,
+ <0x10e0000 0x10000>,
+ <0x11e0000 0x100>,
+ <0x11f0000 0x100>,
+ <0x1200000 0x100>,
+ <0x1210000 0x100>,
+ <0x1220000 0x100>,
+ <0x1230000 0x100>,
+ <0x1240000 0x100>,
+ <0x1250000 0x100>,
+ <0x1260000 0x100>,
+ <0x1270000 0x100>,
+ <0x1280000 0x100>,
+ <0x1800000 0x80000>,
+ <0x1880000 0x10000>,
+ <0x1040000 0x10000>,
+ <0x1050000 0x10000>,
+ <0x1060000 0x10000>;
+ reg-names = "sys", "rew", "qs", "ptp", "port0", "port1",
+ "port2", "port3", "port4", "port5", "port6",
+ "port7", "port8", "port9", "port10", "qsys",
+ "ana", "s0", "s1", "s2";
+ interrupts = <18 21 22>;
+ interrupt-names = "ptp_rdy", "xtr", "inj";
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port0: port@0 {
+ reg = <0>;
+ };
+ port1: port@1 {
+ reg = <1>;
+ };
+ port2: port@2 {
+ reg = <2>;
+ };
+ port3: port@3 {
+ reg = <3>;
+ };
+ port4: port@4 {
+ reg = <4>;
+ };
+ port5: port@5 {
+ reg = <5>;
+ };
+ port6: port@6 {
+ reg = <6>;
+ };
+ port7: port@7 {
+ reg = <7>;
+ };
+ port8: port@8 {
+ reg = <8>;
+ };
+ port9: port@9 {
+ reg = <9>;
+ };
+ port10: port@10 {
+ reg = <10>;
+ };
+ };
+ };
+
+ reset@1070008 {
+ compatible = "mscc,ocelot-chip-reset";
+ reg = <0x1070008 0x4>;
+ };
+
+ gpio: pinctrl@1070034 {
+ compatible = "mscc,ocelot-pinctrl";
+ reg = <0x1070034 0x68>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&gpio 0 0 22>;
+ interrupt-controller;
+ interrupts = <13>;
+ #interrupt-cells = <2>;
+
+ i2c_pins: i2c-pins {
+ pins = "GPIO_16", "GPIO_17";
+ function = "twi";
+ };
+
+ uart_pins: uart-pins {
+ pins = "GPIO_6", "GPIO_7";
+ function = "uart";
+ };
+
+ uart2_pins: uart2-pins {
+ pins = "GPIO_12", "GPIO_13";
+ function = "uart2";
+ };
+
+ miim1: miim1 {
+ pins = "GPIO_14", "GPIO_15";
+ function = "miim";
+ };
+
+ };
+
+ mdio0: mdio@107009c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "mscc,ocelot-miim";
+ reg = <0x107009c 0x24>, <0x10700f0 0x8>;
+ interrupts = <14>;
+ status = "disabled";
+
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ phy2: ethernet-phy@2 {
+ reg = <2>;
+ };
+ phy3: ethernet-phy@3 {
+ reg = <3>;
+ };
+ };
+
+ mdio1: mdio@10700c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "mscc,ocelot-miim";
+ reg = <0x10700c0 0x24>;
+ interrupts = <15>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&miim1>;
+ status = "disabled";
+ };
+
+ hsio: syscon@10d0000 {
+ compatible = "mscc,ocelot-hsio", "syscon", "simple-mfd";
+ reg = <0x10d0000 0x10000>;
+
+ serdes: serdes {
+ compatible = "mscc,vsc7514-serdes";
+ #phy-cells = <2>;
+ };
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/mscc/ocelot_pcb120.dts b/arch/mips/boot/dts/mscc/ocelot_pcb120.dts
new file mode 100644
index 000000000..897de5025
--- /dev/null
+++ b/arch/mips/boot/dts/mscc/ocelot_pcb120.dts
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2017 Microsemi Corporation */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/phy/phy-ocelot-serdes.h>
+#include "ocelot.dtsi"
+
+/ {
+ compatible = "mscc,ocelot-pcb120", "mscc,ocelot";
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0e000000>;
+ };
+};
+
+&gpio {
+ phy_int_pins: phy_int_pins {
+ pins = "GPIO_4";
+ function = "gpio";
+ };
+
+ phy_load_save_pins: phy_load_save_pins {
+ pins = "GPIO_10";
+ function = "ptp2";
+ };
+};
+
+&mdio0 {
+ status = "okay";
+};
+
+&mdio1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&miim1>, <&phy_int_pins>, <&phy_load_save_pins>;
+
+ phy7: ethernet-phy@0 {
+ reg = <0>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gpio>;
+ load-save-gpios = <&gpio 10 GPIO_ACTIVE_HIGH>;
+ };
+ phy6: ethernet-phy@1 {
+ reg = <1>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gpio>;
+ load-save-gpios = <&gpio 10 GPIO_ACTIVE_HIGH>;
+ };
+ phy5: ethernet-phy@2 {
+ reg = <2>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gpio>;
+ load-save-gpios = <&gpio 10 GPIO_ACTIVE_HIGH>;
+ };
+ phy4: ethernet-phy@3 {
+ reg = <3>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gpio>;
+ load-save-gpios = <&gpio 10 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&port0 {
+ phy-handle = <&phy0>;
+};
+
+&port1 {
+ phy-handle = <&phy1>;
+};
+
+&port2 {
+ phy-handle = <&phy2>;
+};
+
+&port3 {
+ phy-handle = <&phy3>;
+};
+
+&port4 {
+ phy-handle = <&phy7>;
+ phy-mode = "sgmii";
+ phys = <&serdes 4 SERDES1G(2)>;
+};
+
+&port5 {
+ phy-handle = <&phy4>;
+ phy-mode = "sgmii";
+ phys = <&serdes 5 SERDES1G(5)>;
+};
+
+&port6 {
+ phy-handle = <&phy6>;
+ phy-mode = "sgmii";
+ phys = <&serdes 6 SERDES1G(3)>;
+};
+
+&port9 {
+ phy-handle = <&phy5>;
+ phy-mode = "sgmii";
+ phys = <&serdes 9 SERDES1G(4)>;
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/mscc/ocelot_pcb123.dts b/arch/mips/boot/dts/mscc/ocelot_pcb123.dts
new file mode 100644
index 000000000..ef852f382
--- /dev/null
+++ b/arch/mips/boot/dts/mscc/ocelot_pcb123.dts
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2017 Microsemi Corporation */
+
+/dts-v1/;
+
+#include "ocelot.dtsi"
+
+/ {
+ compatible = "mscc,ocelot-pcb123", "mscc,ocelot";
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0e000000>;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&spi {
+ status = "okay";
+
+ flash@0 {
+ compatible = "macronix,mx25l25635f", "jedec,spi-nor";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ };
+};
+
+&i2c {
+ clock-frequency = <100000>;
+ i2c-sda-hold-time-ns = <300>;
+ status = "okay";
+};
+
+&mdio0 {
+ status = "okay";
+};
+
+&port0 {
+ phy-handle = <&phy0>;
+};
+
+&port1 {
+ phy-handle = <&phy1>;
+};
+
+&port2 {
+ phy-handle = <&phy2>;
+};
+
+&port3 {
+ phy-handle = <&phy3>;
+};
diff --git a/arch/mips/boot/dts/mti/Makefile b/arch/mips/boot/dts/mti/Makefile
new file mode 100644
index 000000000..b5f742699
--- /dev/null
+++ b/arch/mips/boot/dts/mti/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_MIPS_MALTA) += malta.dtb
+dtb-$(CONFIG_LEGACY_BOARD_SEAD3) += sead3.dtb
+
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/mti/malta.dts b/arch/mips/boot/dts/mti/malta.dts
new file mode 100644
index 000000000..f03279b1c
--- /dev/null
+++ b/arch/mips/boot/dts/mti/malta.dts
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/mips-gic.h>
+
+/memreserve/ 0x00000000 0x00001000; /* YAMON exception vectors */
+/memreserve/ 0x00001000 0x000ef000; /* YAMON */
+/memreserve/ 0x000f0000 0x00010000; /* PIIX4 ISA memory */
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "mti,malta";
+
+ cpu_intc: interrupt-controller {
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gic: interrupt-controller@1bdc0000 {
+ compatible = "mti,gic";
+ reg = <0x1bdc0000 0x20000>;
+
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ /*
+ * Declare the interrupt-parent even though the mti,gic
+ * binding doesn't require it, such that the kernel can
+ * figure out that cpu_intc is the root interrupt
+ * controller & should be probed first.
+ */
+ interrupt-parent = <&cpu_intc>;
+
+ timer {
+ compatible = "mti,gic-timer";
+ interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>;
+ };
+ };
+
+ i8259: interrupt-controller@20 {
+ compatible = "intel,i8259";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 3 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ flash@1e000000 {
+ compatible = "intel,dt28f160", "cfi-flash";
+ reg = <0x1e000000 0x400000>;
+ bank-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ yamon@0 {
+ label = "YAMON";
+ reg = <0x0 0x100000>;
+ read-only;
+ };
+
+ user-fs@100000 {
+ label = "User FS";
+ reg = <0x100000 0x2e0000>;
+ };
+
+ board-config@3e0000 {
+ label = "Board Config";
+ reg = <0x3e0000 0x20000>;
+ read-only;
+ };
+ };
+ };
+
+ fpga_regs: system-controller@1f000000 {
+ compatible = "mti,malta-fpga", "syscon", "simple-mfd";
+ reg = <0x1f000000 0x1000>;
+ native-endian;
+
+ lcd@410 {
+ compatible = "mti,malta-lcd";
+ offset = <0x410>;
+ };
+
+ reboot {
+ compatible = "syscon-reboot";
+ regmap = <&fpga_regs>;
+ offset = <0x500>;
+ mask = <0x42>;
+ };
+ };
+
+ isa {
+ compatible = "isa";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <1 0 0 0x1000>;
+
+ rtc@70 {
+ compatible = "motorola,mc146818";
+ reg = <1 0x70 0x8>;
+
+ interrupt-parent = <&i8259>;
+ interrupts = <8>;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/mti/sead3.dts b/arch/mips/boot/dts/mti/sead3.dts
new file mode 100644
index 000000000..192c26ff1
--- /dev/null
+++ b/arch/mips/boot/dts/mti/sead3.dts
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/memreserve/ 0x00000000 0x00001000; // reserved
+/memreserve/ 0x00001000 0x000ef000; // ROM data
+/memreserve/ 0x000f0000 0x004cc000; // reserved
+
+#include <dt-bindings/interrupt-controller/mips-gic.h>
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "mti,sead-3";
+ model = "MIPS SEAD-3";
+
+ chosen {
+ stdout-path = "serial1:115200";
+ };
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
+ cpus {
+ cpu@0 {
+ compatible = "mti,mips14KEc", "mti,mips14Kc";
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x08000000>;
+ };
+
+ cpu_intc: interrupt-controller {
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gic: interrupt-controller@1b1c0000 {
+ compatible = "mti,gic";
+ reg = <0x1b1c0000 0x20000>;
+
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ /*
+ * Declare the interrupt-parent even though the mti,gic
+ * binding doesn't require it, such that the kernel can
+ * figure out that cpu_intc is the root interrupt
+ * controller & should be probed first.
+ */
+ interrupt-parent = <&cpu_intc>;
+ };
+
+ ehci@1b200000 {
+ compatible = "generic-ehci";
+ reg = <0x1b200000 0x1000>;
+
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 0 IRQ_TYPE_LEVEL_HIGH>; /* GIC 0 or CPU 6 */
+
+ has-transaction-translator;
+ };
+
+ flash@1c000000 {
+ compatible = "intel,28f128j3", "cfi-flash";
+ reg = <0x1c000000 0x2000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ bank-width = <4>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ user-fs@0 {
+ label = "User FS";
+ reg = <0x0 0x1fc0000>;
+ };
+
+ board-config@3e0000 {
+ label = "Board Config";
+ reg = <0x1fc0000 0x40000>;
+ };
+ };
+ };
+
+ fpga_regs: system-controller@1f000000 {
+ compatible = "mti,sead3-fpga", "syscon", "simple-mfd";
+ reg = <0x1f000000 0x200>;
+
+ reboot {
+ compatible = "syscon-reboot";
+ regmap = <&fpga_regs>;
+ offset = <0x50>;
+ mask = <0x4d>;
+ };
+
+ poweroff {
+ compatible = "restart-poweroff";
+ };
+ };
+
+ system-controller@1f000200 {
+ compatible = "mti,sead3-cpld", "syscon", "simple-mfd";
+ reg = <0x1f000200 0x300>;
+
+ led@10.0 {
+ compatible = "register-bit-led";
+ offset = <0x10>;
+ mask = <0x1>;
+ label = "pled0";
+ };
+ led@10.1 {
+ compatible = "register-bit-led";
+ offset = <0x10>;
+ mask = <0x2>;
+ label = "pled1";
+ };
+ led@10.2 {
+ compatible = "register-bit-led";
+ offset = <0x10>;
+ mask = <0x4>;
+ label = "pled2";
+ };
+ led@10.3 {
+ compatible = "register-bit-led";
+ offset = <0x10>;
+ mask = <0x8>;
+ label = "pled3";
+ };
+ led@10.4 {
+ compatible = "register-bit-led";
+ offset = <0x10>;
+ mask = <0x10>;
+ label = "pled4";
+ };
+ led@10.5 {
+ compatible = "register-bit-led";
+ offset = <0x10>;
+ mask = <0x20>;
+ label = "pled5";
+ };
+ led@10.6 {
+ compatible = "register-bit-led";
+ offset = <0x10>;
+ mask = <0x40>;
+ label = "pled6";
+ };
+ led@10.7 {
+ compatible = "register-bit-led";
+ offset = <0x10>;
+ mask = <0x80>;
+ label = "pled7";
+ };
+
+ led@18.0 {
+ compatible = "register-bit-led";
+ offset = <0x18>;
+ mask = <0x1>;
+ label = "fled0";
+ };
+ led@18.1 {
+ compatible = "register-bit-led";
+ offset = <0x18>;
+ mask = <0x2>;
+ label = "fled1";
+ };
+ led@18.2 {
+ compatible = "register-bit-led";
+ offset = <0x18>;
+ mask = <0x4>;
+ label = "fled2";
+ };
+ led@18.3 {
+ compatible = "register-bit-led";
+ offset = <0x18>;
+ mask = <0x8>;
+ label = "fled3";
+ };
+ led@18.4 {
+ compatible = "register-bit-led";
+ offset = <0x18>;
+ mask = <0x10>;
+ label = "fled4";
+ };
+ led@18.5 {
+ compatible = "register-bit-led";
+ offset = <0x18>;
+ mask = <0x20>;
+ label = "fled5";
+ };
+ led@18.6 {
+ compatible = "register-bit-led";
+ offset = <0x18>;
+ mask = <0x40>;
+ label = "fled6";
+ };
+ led@18.7 {
+ compatible = "register-bit-led";
+ offset = <0x18>;
+ mask = <0x80>;
+ label = "fled7";
+ };
+
+ lcd@200 {
+ compatible = "mti,sead3-lcd";
+ offset = <0x200>;
+ };
+ };
+
+ /* UART connected to FTDI & miniUSB socket */
+ uart0: uart@1f000900 {
+ compatible = "ns16550a";
+ reg = <0x1f000900 0x20>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+
+ clock-frequency = <14745600>;
+
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 3 IRQ_TYPE_LEVEL_HIGH>; /* GIC 3 or CPU 4 */
+
+ no-loopback-test;
+ };
+
+ /* UART connected to RS232 socket */
+ uart1: uart@1f000800 {
+ compatible = "ns16550a";
+ reg = <0x1f000800 0x20>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+
+ clock-frequency = <14745600>;
+
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 2 IRQ_TYPE_LEVEL_HIGH>; /* GIC 2 or CPU 4 */
+
+ no-loopback-test;
+ };
+
+ eth@1f010000 {
+ compatible = "smsc,lan9115";
+ reg = <0x1f010000 0x10000>;
+ reg-io-width = <4>;
+
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 0 IRQ_TYPE_LEVEL_HIGH>; /* GIC 0 or CPU 6 */
+
+ phy-mode = "mii";
+ smsc,irq-push-pull;
+ smsc,save-mac-address;
+ };
+};
diff --git a/arch/mips/boot/dts/netlogic/Makefile b/arch/mips/boot/dts/netlogic/Makefile
new file mode 100644
index 000000000..45af42244
--- /dev/null
+++ b/arch/mips/boot/dts/netlogic/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_DT_XLP_EVP) += xlp_evp.dtb
+dtb-$(CONFIG_DT_XLP_SVP) += xlp_svp.dtb
+dtb-$(CONFIG_DT_XLP_FVP) += xlp_fvp.dtb
+dtb-$(CONFIG_DT_XLP_GVP) += xlp_gvp.dtb
+dtb-$(CONFIG_DT_XLP_RVP) += xlp_rvp.dtb
+
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/netlogic/xlp_evp.dts b/arch/mips/boot/dts/netlogic/xlp_evp.dts
new file mode 100644
index 000000000..e63e55926
--- /dev/null
+++ b/arch/mips/boot/dts/netlogic/xlp_evp.dts
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * XLP8XX Device Tree Source for EVP boards
+ */
+
+/dts-v1/;
+/ {
+ model = "netlogic,XLP-EVP";
+ compatible = "netlogic,xlp";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG
+ 1 0 0 0x16000000 0x02000000>; // GBU chipselects
+
+ serial0: serial@30000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0 0x30100 0xa00>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <133333333>;
+ interrupt-parent = <&pic>;
+ interrupts = <17>;
+ };
+ serial1: serial@31000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0 0x31100 0xa00>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <133333333>;
+ interrupt-parent = <&pic>;
+ interrupts = <18>;
+ };
+ i2c0: ocores@32000 {
+ compatible = "opencores,i2c-ocores";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0 0x32100 0xa00>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <32000000>;
+ interrupt-parent = <&pic>;
+ interrupts = <30>;
+ };
+ i2c1: ocores@33000 {
+ compatible = "opencores,i2c-ocores";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0 0x33100 0xa00>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <32000000>;
+ interrupt-parent = <&pic>;
+ interrupts = <31>;
+
+ rtc@68 {
+ compatible = "dallas,ds1374";
+ reg = <0x68>;
+ };
+
+ dtt@4c {
+ compatible = "national,lm90";
+ reg = <0x4c>;
+ };
+ };
+ pic: pic@4000 {
+ compatible = "netlogic,xlp-pic";
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0 0x4000 0x200>;
+ interrupt-controller;
+ };
+
+ nor_flash@1,0 {
+ compatible = "cfi-flash";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ bank-width = <2>;
+ reg = <1 0 0x1000000>;
+
+ partition@0 {
+ label = "x-loader";
+ reg = <0x0 0x100000>; /* 1M */
+ read-only;
+ };
+
+ partition@100000 {
+ label = "u-boot";
+ reg = <0x100000 0x100000>; /* 1M */
+ };
+
+ partition@200000 {
+ label = "kernel";
+ reg = <0x200000 0x500000>; /* 5M */
+ };
+
+ partition@700000 {
+ label = "rootfs";
+ reg = <0x700000 0x800000>; /* 8M */
+ };
+
+ partition@f00000 {
+ label = "env";
+ reg = <0xf00000 0x100000>; /* 1M */
+ read-only;
+ };
+ };
+
+ gpio: xlp_gpio@34100 {
+ compatible = "netlogic,xlp832-gpio";
+ reg = <0 0x34100 0x1000>;
+ #gpio-cells = <2>;
+ gpio-controller;
+
+ #interrupt-cells = <2>;
+ interrupt-parent = <&pic>;
+ interrupts = <39>;
+ interrupt-controller;
+ };
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200 rdinit=/sbin/init";
+ };
+};
diff --git a/arch/mips/boot/dts/netlogic/xlp_fvp.dts b/arch/mips/boot/dts/netlogic/xlp_fvp.dts
new file mode 100644
index 000000000..d05abf13f
--- /dev/null
+++ b/arch/mips/boot/dts/netlogic/xlp_fvp.dts
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * XLP2XX Device Tree Source for FVP boards
+ */
+
+/dts-v1/;
+/ {
+ model = "netlogic,XLP-FVP";
+ compatible = "netlogic,xlp";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG
+ 1 0 0 0x16000000 0x02000000>; // GBU chipselects
+
+ serial0: serial@30000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0 0x30100 0xa00>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <133333333>;
+ interrupt-parent = <&pic>;
+ interrupts = <17>;
+ };
+ serial1: serial@31000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0 0x31100 0xa00>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <133333333>;
+ interrupt-parent = <&pic>;
+ interrupts = <18>;
+ };
+ i2c0: ocores@37100 {
+ compatible = "opencores,i2c-ocores";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0 0x37100 0x20>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <32000000>;
+ interrupt-parent = <&pic>;
+ interrupts = <30>;
+ };
+ i2c1: ocores@37120 {
+ compatible = "opencores,i2c-ocores";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0 0x37120 0x20>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <32000000>;
+ interrupt-parent = <&pic>;
+ interrupts = <31>;
+
+ rtc@68 {
+ compatible = "dallas,ds1374";
+ reg = <0x68>;
+ };
+
+ dtt@4c {
+ compatible = "national,lm90";
+ reg = <0x4c>;
+ };
+ };
+ pic: pic@4000 {
+ compatible = "netlogic,xlp-pic";
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0 0x4000 0x200>;
+ interrupt-controller;
+ };
+
+ nor_flash@1,0 {
+ compatible = "cfi-flash";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ bank-width = <2>;
+ reg = <1 0 0x1000000>;
+
+ partition@0 {
+ label = "x-loader";
+ reg = <0x0 0x100000>; /* 1M */
+ read-only;
+ };
+
+ partition@100000 {
+ label = "u-boot";
+ reg = <0x100000 0x100000>; /* 1M */
+ };
+
+ partition@200000 {
+ label = "kernel";
+ reg = <0x200000 0x500000>; /* 5M */
+ };
+
+ partition@700000 {
+ label = "rootfs";
+ reg = <0x700000 0x800000>; /* 8M */
+ };
+
+ partition@f00000 {
+ label = "env";
+ reg = <0xf00000 0x100000>; /* 1M */
+ read-only;
+ };
+ };
+
+ gpio: xlp_gpio@34100 {
+ compatible = "netlogic,xlp208-gpio";
+ reg = <0 0x34100 0x1000>;
+ #gpio-cells = <2>;
+ gpio-controller;
+
+ #interrupt-cells = <2>;
+ interrupt-parent = <&pic>;
+ interrupts = <39>;
+ interrupt-controller;
+ };
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200 rdinit=/sbin/init";
+ };
+};
diff --git a/arch/mips/boot/dts/netlogic/xlp_gvp.dts b/arch/mips/boot/dts/netlogic/xlp_gvp.dts
new file mode 100644
index 000000000..d47de4851
--- /dev/null
+++ b/arch/mips/boot/dts/netlogic/xlp_gvp.dts
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * XLP9XX Device Tree Source for GVP boards
+ */
+
+/dts-v1/;
+/ {
+ model = "netlogic,XLP-GVP";
+ compatible = "netlogic,xlp";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG
+ 1 0 0 0x16000000 0x02000000>; // GBU chipselects
+
+ serial0: serial@30000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0 0x112100 0xa00>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <125000000>;
+ interrupt-parent = <&pic>;
+ interrupts = <17>;
+ };
+ pic: pic@110000 {
+ compatible = "netlogic,xlp-pic";
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0 0x110000 0x200>;
+ interrupt-controller;
+ };
+
+ nor_flash@1,0 {
+ compatible = "cfi-flash";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ bank-width = <2>;
+ reg = <1 0 0x1000000>;
+
+ partition@0 {
+ label = "x-loader";
+ reg = <0x0 0x100000>; /* 1M */
+ read-only;
+ };
+
+ partition@100000 {
+ label = "u-boot";
+ reg = <0x100000 0x100000>; /* 1M */
+ };
+
+ partition@200000 {
+ label = "kernel";
+ reg = <0x200000 0x500000>; /* 5M */
+ };
+
+ partition@700000 {
+ label = "rootfs";
+ reg = <0x700000 0x800000>; /* 8M */
+ };
+
+ partition@f00000 {
+ label = "env";
+ reg = <0xf00000 0x100000>; /* 1M */
+ read-only;
+ };
+ };
+
+ gpio: xlp_gpio@114100 {
+ compatible = "netlogic,xlp980-gpio";
+ reg = <0 0x114100 0x1000>;
+ #gpio-cells = <2>;
+ gpio-controller;
+
+ #interrupt-cells = <2>;
+ interrupt-parent = <&pic>;
+ interrupts = <39>;
+ interrupt-controller;
+ };
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200 rdinit=/sbin/init";
+ };
+};
diff --git a/arch/mips/boot/dts/netlogic/xlp_rvp.dts b/arch/mips/boot/dts/netlogic/xlp_rvp.dts
new file mode 100644
index 000000000..aa0faee19
--- /dev/null
+++ b/arch/mips/boot/dts/netlogic/xlp_rvp.dts
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * XLP5XX Device Tree Source for RVP boards
+ */
+
+/dts-v1/;
+/ {
+ model = "netlogic,XLP-RVP";
+ compatible = "netlogic,xlp";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG
+ 1 0 0 0x16000000 0x02000000>; // GBU chipselects
+
+ serial0: serial@30000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0 0x112100 0xa00>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <125000000>;
+ interrupt-parent = <&pic>;
+ interrupts = <17>;
+ };
+ pic: pic@110000 {
+ compatible = "netlogic,xlp-pic";
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0 0x110000 0x200>;
+ interrupt-controller;
+ };
+
+ nor_flash@1,0 {
+ compatible = "cfi-flash";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ bank-width = <2>;
+ reg = <1 0 0x1000000>;
+
+ partition@0 {
+ label = "x-loader";
+ reg = <0x0 0x100000>; /* 1M */
+ read-only;
+ };
+
+ partition@100000 {
+ label = "u-boot";
+ reg = <0x100000 0x100000>; /* 1M */
+ };
+
+ partition@200000 {
+ label = "kernel";
+ reg = <0x200000 0x500000>; /* 5M */
+ };
+
+ partition@700000 {
+ label = "rootfs";
+ reg = <0x700000 0x800000>; /* 8M */
+ };
+
+ partition@f00000 {
+ label = "env";
+ reg = <0xf00000 0x100000>; /* 1M */
+ read-only;
+ };
+ };
+
+ gpio: xlp_gpio@114100 {
+ compatible = "netlogic,xlp532-gpio";
+ reg = <0 0x114100 0x1000>;
+ #gpio-cells = <2>;
+ gpio-controller;
+
+ #interrupt-cells = <2>;
+ interrupt-parent = <&pic>;
+ interrupts = <39>;
+ interrupt-controller;
+ };
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200 rdinit=/sbin/init";
+ };
+};
diff --git a/arch/mips/boot/dts/netlogic/xlp_svp.dts b/arch/mips/boot/dts/netlogic/xlp_svp.dts
new file mode 100644
index 000000000..3bb0b2e08
--- /dev/null
+++ b/arch/mips/boot/dts/netlogic/xlp_svp.dts
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * XLP3XX Device Tree Source for SVP boards
+ */
+
+/dts-v1/;
+/ {
+ model = "netlogic,XLP-SVP";
+ compatible = "netlogic,xlp";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG
+ 1 0 0 0x16000000 0x02000000>; // GBU chipselects
+
+ serial0: serial@30000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0 0x30100 0xa00>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <133333333>;
+ interrupt-parent = <&pic>;
+ interrupts = <17>;
+ };
+ serial1: serial@31000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0 0x31100 0xa00>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <133333333>;
+ interrupt-parent = <&pic>;
+ interrupts = <18>;
+ };
+ i2c0: ocores@32000 {
+ compatible = "opencores,i2c-ocores";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0 0x32100 0xa00>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <32000000>;
+ interrupt-parent = <&pic>;
+ interrupts = <30>;
+ };
+ i2c1: ocores@33000 {
+ compatible = "opencores,i2c-ocores";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0 0x33100 0xa00>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <32000000>;
+ interrupt-parent = <&pic>;
+ interrupts = <31>;
+
+ rtc@68 {
+ compatible = "dallas,ds1374";
+ reg = <0x68>;
+ };
+
+ dtt@4c {
+ compatible = "national,lm90";
+ reg = <0x4c>;
+ };
+ };
+ pic: pic@4000 {
+ compatible = "netlogic,xlp-pic";
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0 0x4000 0x200>;
+ interrupt-controller;
+ };
+
+ nor_flash@1,0 {
+ compatible = "cfi-flash";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ bank-width = <2>;
+ reg = <1 0 0x1000000>;
+
+ partition@0 {
+ label = "x-loader";
+ reg = <0x0 0x100000>; /* 1M */
+ read-only;
+ };
+
+ partition@100000 {
+ label = "u-boot";
+ reg = <0x100000 0x100000>; /* 1M */
+ };
+
+ partition@200000 {
+ label = "kernel";
+ reg = <0x200000 0x500000>; /* 5M */
+ };
+
+ partition@700000 {
+ label = "rootfs";
+ reg = <0x700000 0x800000>; /* 8M */
+ };
+
+ partition@f00000 {
+ label = "env";
+ reg = <0xf00000 0x100000>; /* 1M */
+ read-only;
+ };
+ };
+
+ gpio: xlp_gpio@34100 {
+ compatible = "netlogic,xlp316-gpio";
+ reg = <0 0x34100 0x1000>;
+ #gpio-cells = <2>;
+ gpio-controller;
+
+ #interrupt-cells = <2>;
+ interrupt-parent = <&pic>;
+ interrupts = <39>;
+ interrupt-controller;
+ };
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200 rdinit=/sbin/init";
+ };
+};
diff --git a/arch/mips/boot/dts/ni/169445.dts b/arch/mips/boot/dts/ni/169445.dts
new file mode 100644
index 000000000..5389ef46c
--- /dev/null
+++ b/arch/mips/boot/dts/ni/169445.dts
@@ -0,0 +1,100 @@
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ni,169445";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "mti,mips14KEc";
+ clocks = <&baseclk>;
+ reg = <0>;
+ };
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x10000000>;
+ };
+
+ baseclk: baseclock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ ahb@1f300000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1f300000 0x80FFF>;
+
+ gpio1: gpio@10 {
+ compatible = "ni,169445-nand-gpio";
+ reg = <0x10 0x4>;
+ reg-names = "dat";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio2: gpio@14 {
+ compatible = "ni,169445-nand-gpio";
+ reg = <0x14 0x4>;
+ reg-names = "dat";
+ gpio-controller;
+ #gpio-cells = <2>;
+ no-output;
+ };
+
+ nand@0 {
+ compatible = "gpio-control-nand";
+ nand-on-flash-bbt;
+ nand-ecc-mode = "soft_bch";
+ nand-ecc-step-size = <512>;
+ nand-ecc-strength = <4>;
+ reg = <0x0 4>;
+ gpios = <&gpio2 0 0>, /* rdy */
+ <&gpio1 1 0>, /* nce */
+ <&gpio1 2 0>, /* ale */
+ <&gpio1 3 0>, /* cle */
+ <&gpio1 4 0>; /* nwp */
+ };
+
+ serial@80000 {
+ compatible = "ns16550a";
+ reg = <0x80000 0x1000>;
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <6>;
+ clocks = <&baseclk>;
+ reg-shift = <0>;
+ };
+
+ ethernet@40000 {
+ compatible = "snps,dwmac-4.10a";
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <5>;
+ interrupt-names = "macirq";
+ reg = <0x40000 0x2000>;
+ clock-names = "stmmaceth", "pclk";
+ clocks = <&baseclk>, <&baseclk>;
+
+ phy-mode = "rgmii";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/ni/Makefile b/arch/mips/boot/dts/ni/Makefile
new file mode 100644
index 000000000..93867e1a5
--- /dev/null
+++ b/arch/mips/boot/dts/ni/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+dtb-$(CONFIG_FIT_IMAGE_FDT_NI169445) += 169445.dtb
diff --git a/arch/mips/boot/dts/pic32/Makefile b/arch/mips/boot/dts/pic32/Makefile
new file mode 100644
index 000000000..fb57f3632
--- /dev/null
+++ b/arch/mips/boot/dts/pic32/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_DTB_PIC32_MZDA_SK) += pic32mzda_sk.dtb
+
+dtb-$(CONFIG_DTB_PIC32_NONE) += \
+ pic32mzda_sk.dtb
+
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/pic32/pic32mzda.dtsi b/arch/mips/boot/dts/pic32/pic32mzda.dtsi
new file mode 100644
index 000000000..f1e3dad6b
--- /dev/null
+++ b/arch/mips/boot/dts/pic32/pic32mzda.dtsi
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ */
+#include <dt-bindings/clock/microchip,pic32-clock.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&evic>;
+
+ aliases {
+ gpio0 = &gpio0;
+ gpio1 = &gpio1;
+ gpio2 = &gpio2;
+ gpio3 = &gpio3;
+ gpio4 = &gpio4;
+ gpio5 = &gpio5;
+ gpio6 = &gpio6;
+ gpio7 = &gpio7;
+ gpio8 = &gpio8;
+ gpio9 = &gpio9;
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ serial4 = &uart5;
+ serial5 = &uart6;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "mti,mips14KEc";
+ device_type = "cpu";
+ };
+ };
+
+ soc {
+ compatible = "microchip,pic32mzda-infra";
+ interrupts = <0 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ /* external clock input on TxCLKI pin */
+ txcki: txcki_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <4000000>;
+ status = "disabled";
+ };
+
+ /* external input on REFCLKIx pin */
+ refix: refix_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ status = "disabled";
+ };
+
+ rootclk: clock-controller@1f801200 {
+ compatible = "microchip,pic32mzda-clk";
+ reg = <0x1f801200 0x200>;
+ #clock-cells = <1>;
+ microchip,pic32mzda-sosc;
+ };
+
+ evic: interrupt-controller@1f810000 {
+ compatible = "microchip,pic32mzda-evic";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x1f810000 0x1000>;
+ microchip,external-irqs = <3 8 13 18 23>;
+ };
+
+ pic32_pinctrl: pinctrl@1f801400{
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "microchip,pic32mzda-pinctrl";
+ reg = <0x1f801400 0x400>;
+ clocks = <&rootclk PB1CLK>;
+ };
+
+ /* PORTA */
+ gpio0: gpio0@1f860000 {
+ compatible = "microchip,pic32mzda-gpio";
+ reg = <0x1f860000 0x100>;
+ interrupts = <118 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&rootclk PB4CLK>;
+ microchip,gpio-bank = <0>;
+ gpio-ranges = <&pic32_pinctrl 0 0 16>;
+ };
+
+ /* PORTB */
+ gpio1: gpio1@1f860100 {
+ compatible = "microchip,pic32mzda-gpio";
+ reg = <0x1f860100 0x100>;
+ interrupts = <119 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&rootclk PB4CLK>;
+ microchip,gpio-bank = <1>;
+ gpio-ranges = <&pic32_pinctrl 0 16 16>;
+ };
+
+ /* PORTC */
+ gpio2: gpio2@1f860200 {
+ compatible = "microchip,pic32mzda-gpio";
+ reg = <0x1f860200 0x100>;
+ interrupts = <120 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&rootclk PB4CLK>;
+ microchip,gpio-bank = <2>;
+ gpio-ranges = <&pic32_pinctrl 0 32 16>;
+ };
+
+ /* PORTD */
+ gpio3: gpio3@1f860300 {
+ compatible = "microchip,pic32mzda-gpio";
+ reg = <0x1f860300 0x100>;
+ interrupts = <121 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&rootclk PB4CLK>;
+ microchip,gpio-bank = <3>;
+ gpio-ranges = <&pic32_pinctrl 0 48 16>;
+ };
+
+ /* PORTE */
+ gpio4: gpio4@1f860400 {
+ compatible = "microchip,pic32mzda-gpio";
+ reg = <0x1f860400 0x100>;
+ interrupts = <122 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&rootclk PB4CLK>;
+ microchip,gpio-bank = <4>;
+ gpio-ranges = <&pic32_pinctrl 0 64 16>;
+ };
+
+ /* PORTF */
+ gpio5: gpio5@1f860500 {
+ compatible = "microchip,pic32mzda-gpio";
+ reg = <0x1f860500 0x100>;
+ interrupts = <123 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&rootclk PB4CLK>;
+ microchip,gpio-bank = <5>;
+ gpio-ranges = <&pic32_pinctrl 0 80 16>;
+ };
+
+ /* PORTG */
+ gpio6: gpio6@1f860600 {
+ compatible = "microchip,pic32mzda-gpio";
+ reg = <0x1f860600 0x100>;
+ interrupts = <124 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&rootclk PB4CLK>;
+ microchip,gpio-bank = <6>;
+ gpio-ranges = <&pic32_pinctrl 0 96 16>;
+ };
+
+ /* PORTH */
+ gpio7: gpio7@1f860700 {
+ compatible = "microchip,pic32mzda-gpio";
+ reg = <0x1f860700 0x100>;
+ interrupts = <125 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&rootclk PB4CLK>;
+ microchip,gpio-bank = <7>;
+ gpio-ranges = <&pic32_pinctrl 0 112 16>;
+ };
+
+ /* PORTI does not exist */
+
+ /* PORTJ */
+ gpio8: gpio8@1f860800 {
+ compatible = "microchip,pic32mzda-gpio";
+ reg = <0x1f860800 0x100>;
+ interrupts = <126 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&rootclk PB4CLK>;
+ microchip,gpio-bank = <8>;
+ gpio-ranges = <&pic32_pinctrl 0 128 16>;
+ };
+
+ /* PORTK */
+ gpio9: gpio9@1f860900 {
+ compatible = "microchip,pic32mzda-gpio";
+ reg = <0x1f860900 0x100>;
+ interrupts = <127 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&rootclk PB4CLK>;
+ microchip,gpio-bank = <9>;
+ gpio-ranges = <&pic32_pinctrl 0 144 16>;
+ };
+
+ sdhci: sdhci@1f8ec000 {
+ compatible = "microchip,pic32mzda-sdhci";
+ reg = <0x1f8ec000 0x100>;
+ interrupts = <191 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rootclk REF4CLK>, <&rootclk PB5CLK>;
+ clock-names = "base_clk", "sys_clk";
+ bus-width = <4>;
+ cap-sd-highspeed;
+ status = "disabled";
+ };
+
+ uart1: serial@1f822000 {
+ compatible = "microchip,pic32mzda-uart";
+ reg = <0x1f822000 0x50>;
+ interrupts = <112 IRQ_TYPE_LEVEL_HIGH>,
+ <113 IRQ_TYPE_LEVEL_HIGH>,
+ <114 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rootclk PB2CLK>;
+ status = "disabled";
+ };
+
+ uart2: serial@1f822200 {
+ compatible = "microchip,pic32mzda-uart";
+ reg = <0x1f822200 0x50>;
+ interrupts = <145 IRQ_TYPE_LEVEL_HIGH>,
+ <146 IRQ_TYPE_LEVEL_HIGH>,
+ <147 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rootclk PB2CLK>;
+ status = "disabled";
+ };
+
+ uart3: serial@1f822400 {
+ compatible = "microchip,pic32mzda-uart";
+ reg = <0x1f822400 0x50>;
+ interrupts = <157 IRQ_TYPE_LEVEL_HIGH>,
+ <158 IRQ_TYPE_LEVEL_HIGH>,
+ <159 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rootclk PB2CLK>;
+ status = "disabled";
+ };
+
+ uart4: serial@1f822600 {
+ compatible = "microchip,pic32mzda-uart";
+ reg = <0x1f822600 0x50>;
+ interrupts = <170 IRQ_TYPE_LEVEL_HIGH>,
+ <171 IRQ_TYPE_LEVEL_HIGH>,
+ <172 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rootclk PB2CLK>;
+ status = "disabled";
+ };
+
+ uart5: serial@1f822800 {
+ compatible = "microchip,pic32mzda-uart";
+ reg = <0x1f822800 0x50>;
+ interrupts = <179 IRQ_TYPE_LEVEL_HIGH>,
+ <180 IRQ_TYPE_LEVEL_HIGH>,
+ <181 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rootclk PB2CLK>;
+ status = "disabled";
+ };
+
+ uart6: serial@1f822A00 {
+ compatible = "microchip,pic32mzda-uart";
+ reg = <0x1f822A00 0x50>;
+ interrupts = <188 IRQ_TYPE_LEVEL_HIGH>,
+ <189 IRQ_TYPE_LEVEL_HIGH>,
+ <190 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rootclk PB2CLK>;
+ status = "disabled";
+ };
+};
diff --git a/arch/mips/boot/dts/pic32/pic32mzda_sk.dts b/arch/mips/boot/dts/pic32/pic32mzda_sk.dts
new file mode 100644
index 000000000..d7fa5d55d
--- /dev/null
+++ b/arch/mips/boot/dts/pic32/pic32mzda_sk.dts
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+#include "pic32mzda.dtsi"
+
+/ {
+ compatible = "microchip,pic32mzda-sk", "microchip,pic32mzda";
+ model = "Microchip PIC32MZDA Starter Kit";
+
+ memory {
+ device_type = "memory";
+ reg = <0x08000000 0x08000000>;
+ };
+
+ chosen {
+ bootargs = "earlyprintk=ttyPIC1,115200n8r console=ttyPIC1,115200n8";
+ };
+
+ leds0 {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&user_leds_s0>;
+
+ led@1 {
+ label = "pic32mzda_sk:red:led1";
+ gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ led@2 {
+ label = "pic32mzda_sk:yellow:led2";
+ gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc0";
+ };
+
+ led@3 {
+ label = "pic32mzda_sk:green:led3";
+ gpios = <&gpio7 2 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
+
+ keys0 {
+ compatible = "gpio-keys";
+ pinctrl-0 = <&user_buttons_s0>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@sw1 {
+ label = "ESC";
+ linux,code = <1>;
+ gpios = <&gpio1 12 0>;
+ };
+
+ button@sw2 {
+ label = "Home";
+ linux,code = <102>;
+ gpios = <&gpio1 13 0>;
+ };
+
+ button@sw3 {
+ label = "Menu";
+ linux,code = <139>;
+ gpios = <&gpio1 14 0>;
+ };
+ };
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+};
+
+&sdhci {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhc1>;
+ status = "okay";
+ assigned-clocks = <&rootclk REF2CLK>, <&rootclk REF4CLK>,
+ <&rootclk REF5CLK>;
+ assigned-clock-rates = <50000000>, <25000000>, <40000000>;
+};
+
+&pic32_pinctrl {
+
+ pinctrl_sdhc1: sdhc1_pins0 {
+ pins = "A6", "D4", "G13", "G12", "G14", "A7", "A0";
+ microchip,digital;
+ };
+
+ user_leds_s0: user_leds_s0 {
+ pins = "H0", "H1", "H2";
+ output-low;
+ microchip,digital;
+ };
+
+ user_buttons_s0: user_buttons_s0 {
+ pins = "B12", "B13", "B14";
+ microchip,digital;
+ input-enable;
+ bias-pull-up;
+ };
+
+ pinctrl_uart2: pinctrl_uart2 {
+ uart2-tx {
+ pins = "G9";
+ function = "U2TX";
+ microchip,digital;
+ output-high;
+ };
+ uart2-rx {
+ pins = "B0";
+ function = "U2RX";
+ microchip,digital;
+ input-enable;
+ };
+ };
+
+ pinctrl_uart4: uart4-0 {
+ uart4-tx {
+ pins = "C3";
+ function = "U4TX";
+ microchip,digital;
+ output-high;
+ };
+ uart4-rx {
+ pins = "E8";
+ function = "U4RX";
+ microchip,digital;
+ input-enable;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/qca/Makefile b/arch/mips/boot/dts/qca/Makefile
new file mode 100644
index 000000000..4451cf45b
--- /dev/null
+++ b/arch/mips/boot/dts/qca/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+# All DTBs
+dtb-$(CONFIG_ATH79) += ar9132_tl_wr1043nd_v1.dtb
+dtb-$(CONFIG_ATH79) += ar9331_dpt_module.dtb
+dtb-$(CONFIG_ATH79) += ar9331_dragino_ms14.dtb
+dtb-$(CONFIG_ATH79) += ar9331_omega.dtb
+dtb-$(CONFIG_ATH79) += ar9331_tl_mr3020.dtb
diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi
new file mode 100644
index 000000000..61dcfa5b6
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9132.dtsi
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <dt-bindings/clock/ath79-clk.h>
+
+/ {
+ compatible = "qca,ar9132";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "mips,mips24Kc";
+ clocks = <&pll ATH79_CLK_CPU>;
+ reg = <0>;
+ };
+ };
+
+ cpuintc: interrupt-controller {
+ compatible = "qca,ar9132-cpu-intc", "qca,ar7100-cpu-intc";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ qca,ddr-wb-channel-interrupts = <2>, <3>, <4>, <5>;
+ qca,ddr-wb-channels = <&ddr_ctrl 3>, <&ddr_ctrl 2>,
+ <&ddr_ctrl 0>, <&ddr_ctrl 1>;
+ };
+
+ ahb {
+ compatible = "simple-bus";
+ ranges;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+
+ apb {
+ compatible = "simple-bus";
+ ranges;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ interrupt-parent = <&miscintc>;
+
+ ddr_ctrl: memory-controller@18000000 {
+ compatible = "qca,ar9132-ddr-controller",
+ "qca,ar7240-ddr-controller";
+ reg = <0x18000000 0x100>;
+
+ #qca,ddr-wb-channel-cells = <1>;
+ };
+
+ uart: uart@18020000 {
+ compatible = "ns8250";
+ reg = <0x18020000 0x20>;
+ interrupts = <3>;
+
+ clocks = <&pll ATH79_CLK_AHB>;
+ clock-names = "uart";
+
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ no-loopback-test;
+
+ status = "disabled";
+ };
+
+ gpio: gpio@18040000 {
+ compatible = "qca,ar9132-gpio",
+ "qca,ar7100-gpio";
+ reg = <0x18040000 0x30>;
+ interrupts = <2>;
+
+ ngpios = <22>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ pll: pll-controller@18050000 {
+ compatible = "qca,ar9132-pll",
+ "qca,ar9130-pll";
+ reg = <0x18050000 0x20>;
+
+ clock-names = "ref";
+ /* The board must provides the ref clock */
+
+ #clock-cells = <1>;
+ clock-output-names = "cpu", "ddr", "ahb";
+ };
+
+ wdt: wdt@18060008 {
+ compatible = "qca,ar7130-wdt";
+ reg = <0x18060008 0x8>;
+
+ interrupts = <4>;
+
+ clocks = <&pll ATH79_CLK_AHB>;
+ clock-names = "wdt";
+ };
+
+ miscintc: interrupt-controller@18060010 {
+ compatible = "qca,ar9132-misc-intc",
+ "qca,ar7100-misc-intc";
+ reg = <0x18060010 0x8>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <6>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ rst: reset-controller@1806001c {
+ compatible = "qca,ar9132-reset",
+ "qca,ar7100-reset";
+ reg = <0x1806001c 0x4>;
+
+ #reset-cells = <1>;
+ };
+ };
+
+ usb: usb@1b000100 {
+ compatible = "qca,ar7100-ehci", "generic-ehci";
+ reg = <0x1b000100 0x100>;
+
+ interrupts = <3>;
+ resets = <&rst 5>;
+
+ has-transaction-translator;
+
+ phy-names = "usb";
+ phys = <&usb_phy>;
+
+ status = "disabled";
+ };
+
+ spi: spi@1f000000 {
+ compatible = "qca,ar9132-spi", "qca,ar7100-spi";
+ reg = <0x1f000000 0x10>;
+
+ clocks = <&pll ATH79_CLK_AHB>;
+ clock-names = "ahb";
+
+ status = "disabled";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ usb_phy: usb-phy {
+ compatible = "qca,ar7100-usb-phy";
+
+ reset-names = "phy", "suspend-override";
+ resets = <&rst 4>, <&rst 3>;
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+};
diff --git a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
new file mode 100644
index 000000000..7fccf6357
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+#include "ar9132.dtsi"
+
+/ {
+ compatible = "tplink,tl-wr1043nd-v1", "qca,ar9132";
+ model = "TP-Link TL-WR1043ND Version 1";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x2000000>;
+ };
+
+ extosc: ref {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <40000000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@0 {
+ label = "reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&gpio 3 GPIO_ACTIVE_LOW>;
+ debounce-interval = <60>;
+ };
+
+ button@1 {
+ label = "qss";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&gpio 7 GPIO_ACTIVE_LOW>;
+ debounce-interval = <60>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led@0 {
+ label = "tp-link:green:usb";
+ gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
+ };
+
+ led@1 {
+ label = "tp-link:green:system";
+ gpios = <&gpio 2 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ led@2 {
+ label = "tp-link:green:qss";
+ gpios = <&gpio 5 GPIO_ACTIVE_HIGH>;
+ };
+
+ led@3 {
+ label = "tp-link:green:wlan";
+ gpios = <&gpio 9 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&uart {
+ status = "okay";
+};
+
+&pll {
+ clocks = <&extosc>;
+};
+
+&usb {
+ status = "okay";
+};
+
+&usb_phy {
+ status = "okay";
+};
+
+&spi {
+ status = "okay";
+ num-cs = <1>;
+
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "s25sl064a";
+ reg = <0>;
+ spi-max-frequency = <25000000>;
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x000000 0x020000>;
+ };
+
+ partition@1 {
+ label = "firmware";
+ reg = <0x020000 0x7D0000>;
+ };
+
+ partition@2 {
+ label = "art";
+ reg = <0x7F0000 0x010000>;
+ read-only;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi
new file mode 100644
index 000000000..83b3c0ce1
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9331.dtsi
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <dt-bindings/clock/ath79-clk.h>
+
+/ {
+ compatible = "qca,ar9331";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "mips,mips24Kc";
+ clocks = <&pll ATH79_CLK_CPU>;
+ reg = <0>;
+ };
+ };
+
+ cpuintc: interrupt-controller {
+ compatible = "qca,ar7100-cpu-intc";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ qca,ddr-wb-channel-interrupts = <2>, <3>;
+ qca,ddr-wb-channels = <&ddr_ctrl 3>, <&ddr_ctrl 2>;
+ };
+
+ ref: ref {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+
+ ahb {
+ compatible = "simple-bus";
+ ranges;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+
+ apb {
+ compatible = "simple-bus";
+ ranges;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ interrupt-parent = <&miscintc>;
+
+ ddr_ctrl: memory-controller@18000000 {
+ compatible = "qca,ar7240-ddr-controller";
+ reg = <0x18000000 0x100>;
+
+ #qca,ddr-wb-channel-cells = <1>;
+ };
+
+ uart: serial@18020000 {
+ compatible = "qca,ar9330-uart";
+ reg = <0x18020000 0x14>;
+
+ interrupts = <3>;
+
+ clocks = <&ref>;
+ clock-names = "uart";
+
+ status = "disabled";
+ };
+
+ gpio: gpio@18040000 {
+ compatible = "qca,ar7100-gpio";
+ reg = <0x18040000 0x34>;
+ interrupts = <2>;
+
+ ngpios = <30>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ status = "disabled";
+ };
+
+ pll: pll-controller@18050000 {
+ compatible = "qca,ar9330-pll";
+ reg = <0x18050000 0x100>;
+
+ clocks = <&ref>;
+ clock-names = "ref";
+
+ #clock-cells = <1>;
+ };
+
+ miscintc: interrupt-controller@18060010 {
+ compatible = "qca,ar7240-misc-intc";
+ reg = <0x18060010 0x8>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <6>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ rst: reset-controller@1806001c {
+ compatible = "qca,ar7100-reset";
+ reg = <0x1806001c 0x4>;
+
+ #reset-cells = <1>;
+ };
+ };
+
+ eth0: ethernet@19000000 {
+ compatible = "qca,ar9330-eth";
+ reg = <0x19000000 0x200>;
+ interrupts = <4>;
+
+ resets = <&rst 9>, <&rst 22>;
+ reset-names = "mac", "mdio";
+ clocks = <&pll ATH79_CLK_AHB>, <&pll ATH79_CLK_AHB>;
+ clock-names = "eth", "mdio";
+
+ phy-mode = "mii";
+ phy-handle = <&phy_port4>;
+
+ status = "disabled";
+ };
+
+ eth1: ethernet@1a000000 {
+ compatible = "qca,ar9330-eth";
+ reg = <0x1a000000 0x200>;
+ interrupts = <5>;
+ resets = <&rst 13>, <&rst 23>;
+ reset-names = "mac", "mdio";
+ clocks = <&pll ATH79_CLK_AHB>, <&pll ATH79_CLK_AHB>;
+ clock-names = "eth", "mdio";
+
+ phy-mode = "gmii";
+
+ status = "disabled";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch10: switch@10 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "qca,ar9331-switch";
+ reg = <0x10>;
+ resets = <&rst 8>;
+ reset-names = "switch";
+
+ interrupt-parent = <&miscintc>;
+ interrupts = <12>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch_port0: port@0 {
+ reg = <0x0>;
+ label = "cpu";
+ ethernet = <&eth1>;
+
+ phy-mode = "gmii";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ switch_port1: port@1 {
+ reg = <0x1>;
+ phy-handle = <&phy_port0>;
+ phy-mode = "internal";
+
+ status = "disabled";
+ };
+
+ switch_port2: port@2 {
+ reg = <0x2>;
+ phy-handle = <&phy_port1>;
+ phy-mode = "internal";
+
+ status = "disabled";
+ };
+
+ switch_port3: port@3 {
+ reg = <0x3>;
+ phy-handle = <&phy_port2>;
+ phy-mode = "internal";
+
+ status = "disabled";
+ };
+
+ switch_port4: port@4 {
+ reg = <0x4>;
+ phy-handle = <&phy_port3>;
+ phy-mode = "internal";
+
+ status = "disabled";
+ };
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&switch10>;
+
+ phy_port0: phy@0 {
+ reg = <0x0>;
+ interrupts = <0>;
+ status = "disabled";
+ };
+
+ phy_port1: phy@1 {
+ reg = <0x1>;
+ interrupts = <0>;
+ status = "disabled";
+ };
+
+ phy_port2: phy@2 {
+ reg = <0x2>;
+ interrupts = <0>;
+ status = "disabled";
+ };
+
+ phy_port3: phy@3 {
+ reg = <0x3>;
+ interrupts = <0>;
+ status = "disabled";
+ };
+
+ phy_port4: phy@4 {
+ reg = <0x4>;
+ interrupts = <0>;
+ status = "disabled";
+ };
+ };
+ };
+ };
+ };
+
+ usb: usb@1b000100 {
+ compatible = "chipidea,usb2";
+ reg = <0x1b000000 0x200>;
+
+ interrupts = <3>;
+ resets = <&rst 5>;
+
+ phy-names = "usb-phy";
+ phys = <&usb_phy>;
+
+ status = "disabled";
+ };
+
+ spi: spi@1f000000 {
+ compatible = "qca,ar7100-spi";
+ reg = <0x1f000000 0x10>;
+
+ clocks = <&pll ATH79_CLK_AHB>;
+ clock-names = "ahb";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+ };
+
+ usb_phy: usb-phy {
+ compatible = "qca,ar7100-usb-phy";
+
+ reset-names = "phy", "suspend-override";
+ resets = <&rst 4>, <&rst 3>;
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+};
diff --git a/arch/mips/boot/dts/qca/ar9331_dpt_module.dts b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
new file mode 100644
index 000000000..7695d326d
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+#include "ar9331.dtsi"
+
+/ {
+ model = "DPTechnics DPT-Module";
+ compatible = "dptechnics,dpt-module";
+
+ aliases {
+ serial0 = &uart;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x4000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&gpio 27 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@0 {
+ label = "reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&ref {
+ clock-frequency = <25000000>;
+};
+
+&uart {
+ status = "okay";
+};
+
+&gpio {
+ status = "okay";
+};
+
+&usb {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb_phy {
+ status = "okay";
+};
+
+&spi {
+ num-chipselects = <1>;
+ status = "okay";
+
+ /* Winbond 25Q128FVSG SPI flash */
+ spiflash: w25q128@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "winbond,w25q128", "jedec,spi-nor";
+ spi-max-frequency = <104000000>;
+ reg = <0>;
+ };
+};
+
+&eth0 {
+ status = "okay";
+};
+
+&eth1 {
+ status = "okay";
+};
+
+&switch_port1 {
+ label = "lan0";
+ status = "okay";
+};
+
+&phy_port0 {
+ status = "okay";
+};
+
+&phy_port4 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
new file mode 100644
index 000000000..d38aa73f1
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+#include "ar9331.dtsi"
+
+/ {
+ model = "Dragino MS14 (Dragino 2)";
+ compatible = "dragino,ms14";
+
+ aliases {
+ serial0 = &uart;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x4000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ wlan {
+ label = "dragino2:red:wlan";
+ gpios = <&gpio 0 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ lan {
+ label = "dragino2:red:lan";
+ gpios = <&gpio 13 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ wan {
+ label = "dragino2:red:wan";
+ gpios = <&gpio 17 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ system {
+ label = "dragino2:red:system";
+ gpios = <&gpio 28 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@0 {
+ label = "jumpstart";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
+ };
+
+ button@1 {
+ label = "reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&gpio 12 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&ref {
+ clock-frequency = <25000000>;
+};
+
+&uart {
+ status = "okay";
+};
+
+&gpio {
+ status = "okay";
+};
+
+&usb {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb_phy {
+ status = "okay";
+};
+
+&spi {
+ num-chipselects = <1>;
+ status = "okay";
+
+ /* Winbond 25Q128BVFG SPI flash */
+ spiflash: w25q128@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "winbond,w25q128", "jedec,spi-nor";
+ spi-max-frequency = <104000000>;
+ reg = <0>;
+ };
+};
diff --git a/arch/mips/boot/dts/qca/ar9331_omega.dts b/arch/mips/boot/dts/qca/ar9331_omega.dts
new file mode 100644
index 000000000..11778abac
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9331_omega.dts
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+#include "ar9331.dtsi"
+
+/ {
+ model = "Onion Omega";
+ compatible = "onion,omega";
+
+ aliases {
+ serial0 = &uart;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x4000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ system {
+ label = "onion:amber:system";
+ gpios = <&gpio 27 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@0 {
+ label = "reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&gpio 11 GPIO_ACTIVE_HIGH>;
+ };
+ };
+};
+
+&ref {
+ clock-frequency = <25000000>;
+};
+
+&uart {
+ status = "okay";
+};
+
+&gpio {
+ status = "okay";
+};
+
+&usb {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb_phy {
+ status = "okay";
+};
+
+&spi {
+ num-chipselects = <1>;
+ status = "okay";
+
+ /* Winbond 25Q128FVSG SPI flash */
+ spiflash: w25q128@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "winbond,w25q128", "jedec,spi-nor";
+ spi-max-frequency = <104000000>;
+ reg = <0>;
+ };
+};
diff --git a/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
new file mode 100644
index 000000000..c8290d36c
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+#include "ar9331.dtsi"
+
+/ {
+ model = "TP-Link TL-MR3020";
+ compatible = "tplink,tl-mr3020";
+
+ aliases {
+ serial0 = &uart;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x2000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ wlan {
+ label = "tp-link:green:wlan";
+ gpios = <&gpio 0 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ lan {
+ label = "tp-link:green:lan";
+ gpios = <&gpio 17 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ wps {
+ label = "tp-link:green:wps";
+ gpios = <&gpio 26 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led3g {
+ label = "tp-link:green:3g";
+ gpios = <&gpio 27 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@0 {
+ label = "wps";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&gpio 11 GPIO_ACTIVE_HIGH>;
+ };
+
+ button@1 {
+ label = "sw1";
+ linux,code = <BTN_0>;
+ gpios = <&gpio 18 GPIO_ACTIVE_HIGH>;
+ };
+
+ button@2 {
+ label = "sw2";
+ linux,code = <BTN_1>;
+ gpios = <&gpio 20 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ reg_usb_vbus: reg_usb_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&ref {
+ clock-frequency = <25000000>;
+};
+
+&uart {
+ status = "okay";
+};
+
+&gpio {
+ status = "okay";
+};
+
+&usb {
+ dr_mode = "host";
+ vbus-supply = <&reg_usb_vbus>;
+ status = "okay";
+};
+
+&usb_phy {
+ status = "okay";
+};
+
+&spi {
+ num-chipselects = <1>;
+ status = "okay";
+
+ /* Spansion S25FL032PIF SPI flash */
+ spiflash: s25sl032p@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25sl032p", "jedec,spi-nor";
+ spi-max-frequency = <104000000>;
+ reg = <0>;
+ };
+};
diff --git a/arch/mips/boot/dts/ralink/Makefile b/arch/mips/boot/dts/ralink/Makefile
new file mode 100644
index 000000000..6c26dfa0a
--- /dev/null
+++ b/arch/mips/boot/dts/ralink/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_DTB_RT2880_EVAL) += rt2880_eval.dtb
+dtb-$(CONFIG_DTB_RT305X_EVAL) += rt3052_eval.dtb
+dtb-$(CONFIG_DTB_RT3883_EVAL) += rt3883_eval.dtb
+dtb-$(CONFIG_DTB_MT7620A_EVAL) += mt7620a_eval.dtb
+dtb-$(CONFIG_DTB_OMEGA2P) += omega2p.dtb
+dtb-$(CONFIG_DTB_VOCORE2) += vocore2.dtb
+
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts b/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
new file mode 100644
index 000000000..6069b33cf
--- /dev/null
+++ b/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Stefan Roese <sr@denx.de>
+ */
+
+/dts-v1/;
+
+/include/ "mt7628a.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ compatible = "gardena,smart-gateway-mt7688", "ralink,mt7688a-soc",
+ "ralink,mt7628a-soc";
+ model = "GARDENA smart Gateway (MT7688)";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x8000000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinmux_gpio_gpio>; /* GPIO11 */
+
+ user_btn1 {
+ label = "USER_BTN1";
+ gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
+ linux,code =<KEY_PROG1> ;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinmux_pwm0_gpio>, /* GPIO18 */
+ <&pinmux_pwm1_gpio>, /* GPIO19 */
+ <&pinmux_sdmode_gpio>, /* GPIO22..29 */
+ <&pinmux_p0led_an_gpio>; /* GPIO43 */
+ /*
+ * <&pinmux_i2s_gpio> (covers GPIO0..3) is needed here as
+ * well for GPIO3. But this is already claimed for uart1
+ * (see below). So we can't include it in this LED node.
+ */
+
+ power_blue {
+ label = "smartgw:power:blue";
+ gpios = <&gpio 18 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ power_green {
+ label = "smartgw:power:green";
+ gpios = <&gpio 19 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ power_red {
+ label = "smartgw:power:red";
+ gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ radio_blue {
+ label = "smartgw:radio:blue";
+ gpios = <&gpio 23 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ radio_green {
+ label = "smartgw:radio:green";
+ gpios = <&gpio 24 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ radio_red {
+ label = "smartgw:radio:red";
+ gpios = <&gpio 25 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ internet_blue {
+ label = "smartgw:internet:blue";
+ gpios = <&gpio 26 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ internet_green {
+ label = "smartgw:internet:green";
+ gpios = <&gpio 27 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ internet_red {
+ label = "smartgw:internet:red";
+ gpios = <&gpio 28 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ ethernet_link {
+ label = "smartgw:eth:link";
+ gpios = <&gpio 3 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "netdev";
+ };
+
+ ethernet_activity {
+ label = "smartgw:eth:act";
+ gpios = <&gpio 43 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "netdev";
+ };
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+};
+
+&i2c {
+ status = "okay";
+};
+
+&spi {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinmux_spi_spi>, <&pinmux_spi_cs1_cs>;
+
+ m25p80@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <40000000>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x0 0xa0000>;
+ read-only;
+ };
+
+ partition@a0000 {
+ label = "uboot_env0";
+ reg = <0xa0000 0x10000>;
+ };
+
+ partition@b0000 {
+ label = "uboot_env1";
+ reg = <0xb0000 0x10000>;
+ };
+
+ factory: partition@c0000 {
+ label = "factory";
+ reg = <0xc0000 0x10000>;
+ read-only;
+ };
+ };
+ };
+
+ nand_flash@1 {
+ compatible = "spi-nand";
+ linux,mtd-name = "gd5f";
+ reg = <1>;
+ spi-max-frequency = <40000000>;
+ };
+};
+
+&uart1 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinmux_i2s_gpio>; /* GPIO0..3 */
+
+ fifo-size = <8>;
+ tx-threshold = <8>;
+
+ rts-gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
+ cts-gpios = <&gpio 2 GPIO_ACTIVE_LOW>;
+};
+
+&uart2 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinmux_p2led_an_gpio>, /* GPIO41 */
+ <&pinmux_p3led_an_gpio>; /* GPIO40 */
+
+ rts-gpios = <&gpio 40 GPIO_ACTIVE_LOW>;
+ cts-gpios = <&gpio 41 GPIO_ACTIVE_LOW>;
+};
+
+&watchdog {
+ status = "okay";
+};
+
+&wmac {
+ status = "okay";
+ mediatek,mtd-eeprom = <&factory 0x0000>;
+};
diff --git a/arch/mips/boot/dts/ralink/mt7620a.dtsi b/arch/mips/boot/dts/ralink/mt7620a.dtsi
new file mode 100644
index 000000000..1f6e5320f
--- /dev/null
+++ b/arch/mips/boot/dts/ralink/mt7620a.dtsi
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ralink,mtk7620a-soc";
+
+ cpus {
+ cpu@0 {
+ compatible = "mips,mips24KEc";
+ };
+ };
+
+ cpuintc: cpuintc {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ palmbus@10000000 {
+ compatible = "palmbus";
+ reg = <0x10000000 0x200000>;
+ ranges = <0x0 0x10000000 0x1FFFFF>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ sysc@0 {
+ compatible = "ralink,mt7620a-sysc";
+ reg = <0x0 0x100>;
+ };
+
+ intc: intc@200 {
+ compatible = "ralink,mt7620a-intc", "ralink,rt2880-intc";
+ reg = <0x200 0x100>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ };
+
+ memc@300 {
+ compatible = "ralink,mt7620a-memc", "ralink,rt3050-memc";
+ reg = <0x300 0x100>;
+ };
+
+ uartlite@c00 {
+ compatible = "ralink,mt7620a-uart", "ralink,rt2880-uart", "ns16550a";
+ reg = <0xc00 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <12>;
+
+ reg-shift = <2>;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/ralink/mt7620a_eval.dts b/arch/mips/boot/dts/ralink/mt7620a_eval.dts
new file mode 100644
index 000000000..8de8f89f3
--- /dev/null
+++ b/arch/mips/boot/dts/ralink/mt7620a_eval.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "mt7620a.dtsi"
+
+/ {
+ compatible = "ralink,mt7620a-eval-board", "ralink,mt7620a-soc";
+ model = "Ralink MT7620A evaluation board";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x2000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,57600";
+ };
+};
diff --git a/arch/mips/boot/dts/ralink/mt7628a.dtsi b/arch/mips/boot/dts/ralink/mt7628a.dtsi
new file mode 100644
index 000000000..892e8ab86
--- /dev/null
+++ b/arch/mips/boot/dts/ralink/mt7628a.dtsi
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ralink,mt7628a-soc";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "mti,mips24KEc";
+ device_type = "cpu";
+ reg = <0>;
+ };
+ };
+
+ resetc: reset-controller {
+ compatible = "ralink,rt2880-reset";
+ #reset-cells = <1>;
+ };
+
+ cpuintc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ palmbus@10000000 {
+ compatible = "palmbus";
+ reg = <0x10000000 0x200000>;
+ ranges = <0x0 0x10000000 0x1FFFFF>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ sysc: system-controller@0 {
+ compatible = "ralink,mt7620a-sysc", "syscon";
+ reg = <0x0 0x60>;
+ };
+
+ pinmux: pinmux@60 {
+ compatible = "pinctrl-single";
+ reg = <0x60 0x8>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #pinctrl-cells = <2>;
+ pinctrl-single,bit-per-mux;
+ pinctrl-single,register-width = <32>;
+ pinctrl-single,function-mask = <0x1>;
+
+ pinmux_gpio_gpio: pinmux_gpio_gpio {
+ pinctrl-single,bits = <0x0 0x0 0x3>;
+ };
+
+ pinmux_spi_cs1_cs: pinmux_spi_cs1_cs {
+ pinctrl-single,bits = <0x0 0x0 0x30>;
+ };
+
+ pinmux_i2s_gpio: pinmux_i2s_gpio {
+ pinctrl-single,bits = <0x0 0x40 0xc0>;
+ };
+
+ pinmux_uart0_uart: pinmux_uart0_uart0 {
+ pinctrl-single,bits = <0x0 0x0 0x300>;
+ };
+
+ pinmux_sdmode_sdxc: pinmux_sdmode_sdxc {
+ pinctrl-single,bits = <0x0 0x0 0xc00>;
+ };
+
+ pinmux_sdmode_gpio: pinmux_sdmode_gpio {
+ pinctrl-single,bits = <0x0 0x400 0xc00>;
+ };
+
+ pinmux_spi_spi: pinmux_spi_spi {
+ pinctrl-single,bits = <0x0 0x0 0x1000>;
+ };
+
+ pinmux_refclk_gpio: pinmux_refclk_gpio {
+ pinctrl-single,bits = <0x0 0x40000 0x40000>;
+ };
+
+ pinmux_i2c_i2c: pinmux_i2c_i2c {
+ pinctrl-single,bits = <0x0 0x0 0x300000>;
+ };
+
+ pinmux_uart1_uart: pinmux_uart1_uart1 {
+ pinctrl-single,bits = <0x0 0x0 0x3000000>;
+ };
+
+ pinmux_uart2_uart: pinmux_uart2_uart {
+ pinctrl-single,bits = <0x0 0x0 0xc000000>;
+ };
+
+ pinmux_pwm0_pwm: pinmux_pwm0_pwm {
+ pinctrl-single,bits = <0x0 0x0 0x30000000>;
+ };
+
+ pinmux_pwm0_gpio: pinmux_pwm0_gpio {
+ pinctrl-single,bits = <0x0 0x10000000
+ 0x30000000>;
+ };
+
+ pinmux_pwm1_pwm: pinmux_pwm1_pwm {
+ pinctrl-single,bits = <0x0 0x0 0xc0000000>;
+ };
+
+ pinmux_pwm1_gpio: pinmux_pwm1_gpio {
+ pinctrl-single,bits = <0x0 0x40000000
+ 0xc0000000>;
+ };
+
+ pinmux_p0led_an_gpio: pinmux_p0led_an_gpio {
+ pinctrl-single,bits = <0x4 0x4 0xc>;
+ };
+
+ pinmux_p1led_an_gpio: pinmux_p1led_an_gpio {
+ pinctrl-single,bits = <0x4 0x10 0x30>;
+ };
+
+ pinmux_p2led_an_gpio: pinmux_p2led_an_gpio {
+ pinctrl-single,bits = <0x4 0x40 0xc0>;
+ };
+
+ pinmux_p3led_an_gpio: pinmux_p3led_an_gpio {
+ pinctrl-single,bits = <0x4 0x100 0x300>;
+ };
+
+ pinmux_p4led_an_gpio: pinmux_p4led_an_gpio {
+ pinctrl-single,bits = <0x4 0x400 0xc00>;
+ };
+ };
+
+ watchdog: watchdog@100 {
+ compatible = "mediatek,mt7621-wdt";
+ reg = <0x100 0x30>;
+
+ resets = <&resetc 8>;
+ reset-names = "wdt";
+
+ interrupt-parent = <&intc>;
+ interrupts = <24>;
+
+ status = "disabled";
+ };
+
+ intc: interrupt-controller@200 {
+ compatible = "ralink,rt2880-intc";
+ reg = <0x200 0x100>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ resets = <&resetc 9>;
+ reset-names = "intc";
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+
+ ralink,intc-registers = <0x9c 0xa0
+ 0x6c 0xa4
+ 0x80 0x78>;
+ };
+
+ memory-controller@300 {
+ compatible = "ralink,mt7620a-memc";
+ reg = <0x300 0x100>;
+ };
+
+ gpio: gpio@600 {
+ compatible = "mediatek,mt7621-gpio";
+ reg = <0x600 0x100>;
+
+ gpio-controller;
+ interrupt-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <6>;
+ };
+
+ spi: spi@b00 {
+ compatible = "ralink,mt7621-spi";
+ reg = <0xb00 0x100>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinmux_spi_spi>;
+
+ resets = <&resetc 18>;
+ reset-names = "spi";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c: i2c@900 {
+ compatible = "mediatek,mt7621-i2c";
+ reg = <0x900 0x100>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinmux_i2c_i2c>;
+
+ resets = <&resetc 16>;
+ reset-names = "i2c";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ uart0: uartlite@c00 {
+ compatible = "ns16550a";
+ reg = <0xc00 0x100>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinmux_uart0_uart>;
+
+ resets = <&resetc 12>;
+ reset-names = "uart0";
+
+ interrupt-parent = <&intc>;
+ interrupts = <20>;
+
+ reg-shift = <2>;
+ };
+
+ uart1: uart1@d00 {
+ compatible = "ns16550a";
+ reg = <0xd00 0x100>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinmux_uart1_uart>;
+
+ resets = <&resetc 19>;
+ reset-names = "uart1";
+
+ interrupt-parent = <&intc>;
+ interrupts = <21>;
+
+ reg-shift = <2>;
+ };
+
+ uart2: uart2@e00 {
+ compatible = "ns16550a";
+ reg = <0xe00 0x100>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinmux_uart2_uart>;
+
+ resets = <&resetc 20>;
+ reset-names = "uart2";
+
+ interrupt-parent = <&intc>;
+ interrupts = <22>;
+
+ reg-shift = <2>;
+ };
+ };
+
+ usb_phy: usb-phy@10120000 {
+ compatible = "mediatek,mt7628-usbphy";
+ reg = <0x10120000 0x1000>;
+
+ #phy-cells = <0>;
+
+ ralink,sysctl = <&sysc>;
+ resets = <&resetc 22 &resetc 25>;
+ reset-names = "host", "device";
+ };
+
+ ehci@101c0000 {
+ compatible = "generic-ehci";
+ reg = <0x101c0000 0x1000>;
+
+ phys = <&usb_phy>;
+ phy-names = "usb";
+
+ interrupt-parent = <&intc>;
+ interrupts = <18>;
+ };
+
+ wmac: wmac@10300000 {
+ compatible = "mediatek,mt7628-wmac";
+ reg = <0x10300000 0x100000>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <6>;
+
+ status = "disabled";
+ };
+};
diff --git a/arch/mips/boot/dts/ralink/omega2p.dts b/arch/mips/boot/dts/ralink/omega2p.dts
new file mode 100644
index 000000000..5884fd48f
--- /dev/null
+++ b/arch/mips/boot/dts/ralink/omega2p.dts
@@ -0,0 +1,18 @@
+/dts-v1/;
+
+/include/ "mt7628a.dtsi"
+
+/ {
+ compatible = "onion,omega2+", "ralink,mt7688a-soc", "ralink,mt7628a-soc";
+ model = "Onion Omega2+";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x8000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
diff --git a/arch/mips/boot/dts/ralink/rt2880.dtsi b/arch/mips/boot/dts/ralink/rt2880.dtsi
new file mode 100644
index 000000000..8fc1987d9
--- /dev/null
+++ b/arch/mips/boot/dts/ralink/rt2880.dtsi
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ralink,rt2880-soc";
+
+ cpus {
+ cpu@0 {
+ compatible = "mips,mips4KEc";
+ };
+ };
+
+ cpuintc: cpuintc {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ palmbus@300000 {
+ compatible = "palmbus";
+ reg = <0x300000 0x200000>;
+ ranges = <0x0 0x300000 0x1FFFFF>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ sysc@0 {
+ compatible = "ralink,rt2880-sysc";
+ reg = <0x0 0x100>;
+ };
+
+ intc: intc@200 {
+ compatible = "ralink,rt2880-intc";
+ reg = <0x200 0x100>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ };
+
+ memc@300 {
+ compatible = "ralink,rt2880-memc";
+ reg = <0x300 0x100>;
+ };
+
+ uartlite@c00 {
+ compatible = "ralink,rt2880-uart", "ns16550a";
+ reg = <0xc00 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <8>;
+
+ reg-shift = <2>;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/ralink/rt2880_eval.dts b/arch/mips/boot/dts/ralink/rt2880_eval.dts
new file mode 100644
index 000000000..759bc1dd5
--- /dev/null
+++ b/arch/mips/boot/dts/ralink/rt2880_eval.dts
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "rt2880.dtsi"
+
+/ {
+ compatible = "ralink,rt2880-eval-board", "ralink,rt2880-soc";
+ model = "Ralink RT2880 evaluation board";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x8000000 0x2000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,57600";
+ };
+
+ cfi@1f000000 {
+ compatible = "cfi-flash";
+ reg = <0x1f000000 0x400000>;
+
+ bank-width = <2>;
+ device-width = <2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x0 0x30000>;
+ read-only;
+ };
+ partition@30000 {
+ label = "uboot-env";
+ reg = <0x30000 0x10000>;
+ read-only;
+ };
+ partition@40000 {
+ label = "calibration";
+ reg = <0x40000 0x10000>;
+ read-only;
+ };
+ partition@50000 {
+ label = "linux";
+ reg = <0x50000 0x3b0000>;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/ralink/rt3050.dtsi b/arch/mips/boot/dts/ralink/rt3050.dtsi
new file mode 100644
index 000000000..23062333a
--- /dev/null
+++ b/arch/mips/boot/dts/ralink/rt3050.dtsi
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ralink,rt3050-soc", "ralink,rt3052-soc", "ralink,rt3350-soc";
+
+ cpus {
+ cpu@0 {
+ compatible = "mips,mips24KEc";
+ };
+ };
+
+ cpuintc: cpuintc {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ palmbus@10000000 {
+ compatible = "palmbus";
+ reg = <0x10000000 0x200000>;
+ ranges = <0x0 0x10000000 0x1FFFFF>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ sysc@0 {
+ compatible = "ralink,rt3052-sysc", "ralink,rt3050-sysc";
+ reg = <0x0 0x100>;
+ };
+
+ intc: intc@200 {
+ compatible = "ralink,rt3052-intc", "ralink,rt2880-intc";
+ reg = <0x200 0x100>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ };
+
+ memc@300 {
+ compatible = "ralink,rt3052-memc", "ralink,rt3050-memc";
+ reg = <0x300 0x100>;
+ };
+
+ uartlite@c00 {
+ compatible = "ralink,rt3052-uart", "ralink,rt2880-uart", "ns16550a";
+ reg = <0xc00 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <12>;
+
+ reg-shift = <2>;
+ };
+ };
+
+ usb@101c0000 {
+ compatible = "ralink,rt3050-usb", "snps,dwc2";
+ reg = <0x101c0000 40000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <18>;
+
+ status = "disabled";
+ };
+};
diff --git a/arch/mips/boot/dts/ralink/rt3052_eval.dts b/arch/mips/boot/dts/ralink/rt3052_eval.dts
new file mode 100644
index 000000000..6408ff629
--- /dev/null
+++ b/arch/mips/boot/dts/ralink/rt3052_eval.dts
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include "rt3050.dtsi"
+
+/ {
+ compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc";
+ model = "Ralink RT3052 evaluation board";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x2000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,57600";
+ };
+
+ cfi@1f000000 {
+ compatible = "cfi-flash";
+ reg = <0x1f000000 0x800000>;
+
+ bank-width = <2>;
+ device-width = <2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x0 0x30000>;
+ read-only;
+ };
+ partition@30000 {
+ label = "uboot-env";
+ reg = <0x30000 0x10000>;
+ read-only;
+ };
+ partition@40000 {
+ label = "calibration";
+ reg = <0x40000 0x10000>;
+ read-only;
+ };
+ partition@50000 {
+ label = "linux";
+ reg = <0x50000 0x7b0000>;
+ };
+ };
+
+ usb@101c0000 {
+ status = "okay";
+ };
+};
diff --git a/arch/mips/boot/dts/ralink/rt3883.dtsi b/arch/mips/boot/dts/ralink/rt3883.dtsi
new file mode 100644
index 000000000..61132cf15
--- /dev/null
+++ b/arch/mips/boot/dts/ralink/rt3883.dtsi
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ralink,rt3883-soc";
+
+ cpus {
+ cpu@0 {
+ compatible = "mips,mips74Kc";
+ };
+ };
+
+ cpuintc: cpuintc {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ palmbus@10000000 {
+ compatible = "palmbus";
+ reg = <0x10000000 0x200000>;
+ ranges = <0x0 0x10000000 0x1FFFFF>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ sysc@0 {
+ compatible = "ralink,rt3883-sysc", "ralink,rt3050-sysc";
+ reg = <0x0 0x100>;
+ };
+
+ intc: intc@200 {
+ compatible = "ralink,rt3883-intc", "ralink,rt2880-intc";
+ reg = <0x200 0x100>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ };
+
+ memc@300 {
+ compatible = "ralink,rt3883-memc", "ralink,rt3050-memc";
+ reg = <0x300 0x100>;
+ };
+
+ uartlite@c00 {
+ compatible = "ralink,rt3883-uart", "ralink,rt2880-uart", "ns16550a";
+ reg = <0xc00 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <12>;
+
+ reg-shift = <2>;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/ralink/rt3883_eval.dts b/arch/mips/boot/dts/ralink/rt3883_eval.dts
new file mode 100644
index 000000000..c22bc84df
--- /dev/null
+++ b/arch/mips/boot/dts/ralink/rt3883_eval.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/include/ "rt3883.dtsi"
+
+/ {
+ compatible = "ralink,rt3883-eval-board", "ralink,rt3883-soc";
+ model = "Ralink RT3883 evaluation board";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x2000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,57600";
+ };
+};
diff --git a/arch/mips/boot/dts/ralink/vocore2.dts b/arch/mips/boot/dts/ralink/vocore2.dts
new file mode 100644
index 000000000..fa8a5f8f2
--- /dev/null
+++ b/arch/mips/boot/dts/ralink/vocore2.dts
@@ -0,0 +1,18 @@
+/dts-v1/;
+
+#include "mt7628a.dtsi"
+
+/ {
+ compatible = "vocore,vocore2", "ralink,mt7628a-soc";
+ model = "VoCore2";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x8000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS2,115200";
+ stdout-path = &uart2;
+ };
+};
diff --git a/arch/mips/boot/dts/xilfpga/Makefile b/arch/mips/boot/dts/xilfpga/Makefile
new file mode 100644
index 000000000..69ca00590
--- /dev/null
+++ b/arch/mips/boot/dts/xilfpga/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += nexys4ddr.dtb
diff --git a/arch/mips/boot/dts/xilfpga/microAptiv.dtsi b/arch/mips/boot/dts/xilfpga/microAptiv.dtsi
new file mode 100644
index 000000000..87b2b1f9a
--- /dev/null
+++ b/arch/mips/boot/dts/xilfpga/microAptiv.dtsi
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "img,xilfpga";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "mips,m14Kc";
+ clocks = <&ext>;
+ reg = <0>;
+ };
+ };
+
+ ext: ext {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+};
diff --git a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
new file mode 100644
index 000000000..cc8dbea09
--- /dev/null
+++ b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include "microAptiv.dtsi"
+
+/ {
+ compatible = "digilent,nexys4ddr";
+
+ aliases {
+ serial0 = &axi_uart16550;
+ };
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x08000000>;
+ };
+
+ cpuintc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ axi_intc: interrupt-controller@10200000 {
+ #interrupt-cells = <1>;
+ compatible = "xlnx,xps-intc-1.00.a";
+ interrupt-controller;
+ reg = <0x10200000 0x10000>;
+ xlnx,kind-of-intr = <0x0>;
+ xlnx,num-intr-inputs = <0x6>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <6>;
+ };
+
+ axi_gpio: gpio@10600000 {
+ #gpio-cells = <1>;
+ compatible = "xlnx,xps-gpio-1.00.a";
+ gpio-controller;
+ reg = <0x10600000 0x10000>;
+ xlnx,all-inputs = <0x0>;
+ xlnx,dout-default = <0x0>;
+ xlnx,gpio-width = <0x16>;
+ xlnx,interrupt-present = <0x0>;
+ xlnx,is-dual = <0x0>;
+ xlnx,tri-default = <0xffffffff>;
+ } ;
+
+ axi_ethernetlite: ethernet@10e00000 {
+ compatible = "xlnx,xps-ethernetlite-3.00.a";
+ device_type = "network";
+ interrupt-parent = <&axi_intc>;
+ interrupts = <1>;
+ phy-handle = <&phy0>;
+ reg = <0x10e00000 0x10000>;
+ xlnx,duplex = <0x1>;
+ xlnx,include-global-buffers = <0x1>;
+ xlnx,include-internal-loopback = <0x0>;
+ xlnx,include-mdio = <0x1>;
+ xlnx,instance = "axi_ethernetlite_inst";
+ xlnx,rx-ping-pong = <0x1>;
+ xlnx,s-axi-id-width = <0x1>;
+ xlnx,tx-ping-pong = <0x1>;
+ xlnx,use-internal = <0x0>;
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy0: phy@1 {
+ device_type = "ethernet-phy";
+ reg = <1>;
+ };
+ };
+ };
+
+ axi_uart16550: serial@10400000 {
+ compatible = "ns16550a";
+ reg = <0x10400000 0x10000>;
+
+ reg-shift = <2>;
+ reg-offset = <0x1000>;
+
+ clocks = <&ext>;
+
+ interrupt-parent = <&axi_intc>;
+ interrupts = <0>;
+ };
+
+ axi_i2c: i2c@10a00000 {
+ compatible = "xlnx,xps-iic-2.00.a";
+ interrupt-parent = <&axi_intc>;
+ interrupts = <4>;
+ reg = < 0x10a00000 0x10000 >;
+ clocks = <&ext>;
+ xlnx,clk-freq = <0x5f5e100>;
+ xlnx,family = "Artix7";
+ xlnx,gpo-width = <0x1>;
+ xlnx,iic-freq = <0x186a0>;
+ xlnx,scl-inertial-delay = <0x0>;
+ xlnx,sda-inertial-delay = <0x0>;
+ xlnx,ten-bit-adr = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ad7420@4b {
+ compatible = "adi,adt7420";
+ reg = <0x4b>;
+ };
+ } ;
+};
+
+&ext {
+ clock-frequency = <50000000>;
+};
diff --git a/arch/mips/boot/ecoff.h b/arch/mips/boot/ecoff.h
new file mode 100644
index 000000000..5be79ebfc
--- /dev/null
+++ b/arch/mips/boot/ecoff.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Some ECOFF definitions.
+ */
+
+#include <stdint.h>
+
+typedef struct filehdr {
+ uint16_t f_magic; /* magic number */
+ uint16_t f_nscns; /* number of sections */
+ int32_t f_timdat; /* time & date stamp */
+ int32_t f_symptr; /* file pointer to symbolic header */
+ int32_t f_nsyms; /* sizeof(symbolic hdr) */
+ uint16_t f_opthdr; /* sizeof(optional hdr) */
+ uint16_t f_flags; /* flags */
+} FILHDR;
+#define FILHSZ sizeof(FILHDR)
+
+#define MIPSEBMAGIC 0x160
+#define MIPSELMAGIC 0x162
+
+typedef struct scnhdr {
+ char s_name[8]; /* section name */
+ int32_t s_paddr; /* physical address, aliased s_nlib */
+ int32_t s_vaddr; /* virtual address */
+ int32_t s_size; /* section size */
+ int32_t s_scnptr; /* file ptr to raw data for section */
+ int32_t s_relptr; /* file ptr to relocation */
+ int32_t s_lnnoptr; /* file ptr to gp histogram */
+ uint16_t s_nreloc; /* number of relocation entries */
+ uint16_t s_nlnno; /* number of gp histogram entries */
+ int32_t s_flags; /* flags */
+} SCNHDR;
+#define SCNHSZ sizeof(SCNHDR)
+#define SCNROUND ((int32_t)16)
+
+typedef struct aouthdr {
+ int16_t magic; /* see above */
+ int16_t vstamp; /* version stamp */
+ int32_t tsize; /* text size in bytes, padded to DW bdry*/
+ int32_t dsize; /* initialized data " " */
+ int32_t bsize; /* uninitialized data " " */
+ int32_t entry; /* entry pt. */
+ int32_t text_start; /* base of text used for this file */
+ int32_t data_start; /* base of data used for this file */
+ int32_t bss_start; /* base of bss used for this file */
+ int32_t gprmask; /* general purpose register mask */
+ int32_t cprmask[4]; /* co-processor register masks */
+ int32_t gp_value; /* the gp value used for this object */
+} AOUTHDR;
+#define AOUTHSZ sizeof(AOUTHDR)
+
+#define OMAGIC 0407
+#define NMAGIC 0410
+#define ZMAGIC 0413
+#define SMAGIC 0411
+#define LIBMAGIC 0443
+
+#define N_TXTOFF(f, a) \
+ ((a).magic == ZMAGIC || (a).magic == LIBMAGIC ? 0 : \
+ ((a).vstamp < 23 ? \
+ ((FILHSZ + AOUTHSZ + (f).f_nscns * SCNHSZ + 7) & 0xfffffff8) : \
+ ((FILHSZ + AOUTHSZ + (f).f_nscns * SCNHSZ + SCNROUND-1) & ~(SCNROUND-1)) ) )
+#define N_DATOFF(f, a) \
+ N_TXTOFF(f, a) + (a).tsize;
diff --git a/arch/mips/boot/elf2ecoff.c b/arch/mips/boot/elf2ecoff.c
new file mode 100644
index 000000000..6972b9723
--- /dev/null
+++ b/arch/mips/boot/elf2ecoff.c
@@ -0,0 +1,621 @@
+/*
+ * Copyright (c) 1995
+ * Ted Lemon (hereinafter referred to as the author)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* elf2ecoff.c
+
+ This program converts an elf executable to an ECOFF executable.
+ No symbol table is retained. This is useful primarily in building
+ net-bootable kernels for machines (e.g., DECstation and Alpha) which
+ only support the ECOFF object file format. */
+
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <elf.h>
+#include <limits.h>
+#include <netinet/in.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ecoff.h"
+
+/*
+ * Some extra ELF definitions
+ */
+#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */
+#define PT_MIPS_ABIFLAGS 0x70000003 /* Records ABI related flags */
+
+/* -------------------------------------------------------------------- */
+
+struct sect {
+ uint32_t vaddr;
+ uint32_t len;
+};
+
+int *symTypeTable;
+int must_convert_endian;
+int format_bigendian;
+
+static void copy(int out, int in, off_t offset, off_t size)
+{
+ char ibuf[4096];
+ int remaining, cur, count;
+
+ /* Go to the start of the ELF symbol table... */
+ if (lseek(in, offset, SEEK_SET) < 0) {
+ perror("copy: lseek");
+ exit(1);
+ }
+
+ remaining = size;
+ while (remaining) {
+ cur = remaining;
+ if (cur > sizeof ibuf)
+ cur = sizeof ibuf;
+ remaining -= cur;
+ if ((count = read(in, ibuf, cur)) != cur) {
+ fprintf(stderr, "copy: read: %s\n",
+ count ? strerror(errno) :
+ "premature end of file");
+ exit(1);
+ }
+ if ((count = write(out, ibuf, cur)) != cur) {
+ perror("copy: write");
+ exit(1);
+ }
+ }
+}
+
+/*
+ * Combine two segments, which must be contiguous. If pad is true, it's
+ * okay for there to be padding between.
+ */
+static void combine(struct sect *base, struct sect *new, int pad)
+{
+ if (!base->len)
+ *base = *new;
+ else if (new->len) {
+ if (base->vaddr + base->len != new->vaddr) {
+ if (pad)
+ base->len = new->vaddr - base->vaddr;
+ else {
+ fprintf(stderr,
+ "Non-contiguous data can't be converted.\n");
+ exit(1);
+ }
+ }
+ base->len += new->len;
+ }
+}
+
+static int phcmp(const void *v1, const void *v2)
+{
+ const Elf32_Phdr *h1 = v1;
+ const Elf32_Phdr *h2 = v2;
+
+ if (h1->p_vaddr > h2->p_vaddr)
+ return 1;
+ else if (h1->p_vaddr < h2->p_vaddr)
+ return -1;
+ else
+ return 0;
+}
+
+static char *saveRead(int file, off_t offset, off_t len, char *name)
+{
+ char *tmp;
+ int count;
+ off_t off;
+ if ((off = lseek(file, offset, SEEK_SET)) < 0) {
+ fprintf(stderr, "%s: fseek: %s\n", name, strerror(errno));
+ exit(1);
+ }
+ if (!(tmp = (char *) malloc(len))) {
+ fprintf(stderr, "%s: Can't allocate %ld bytes.\n", name,
+ len);
+ exit(1);
+ }
+ count = read(file, tmp, len);
+ if (count != len) {
+ fprintf(stderr, "%s: read: %s.\n",
+ name,
+ count ? strerror(errno) : "End of file reached");
+ exit(1);
+ }
+ return tmp;
+}
+
+#define swab16(x) \
+ ((uint16_t)( \
+ (((uint16_t)(x) & (uint16_t)0x00ffU) << 8) | \
+ (((uint16_t)(x) & (uint16_t)0xff00U) >> 8) ))
+
+#define swab32(x) \
+ ((unsigned int)( \
+ (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \
+ (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \
+ (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \
+ (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24) ))
+
+static void convert_elf_hdr(Elf32_Ehdr * e)
+{
+ e->e_type = swab16(e->e_type);
+ e->e_machine = swab16(e->e_machine);
+ e->e_version = swab32(e->e_version);
+ e->e_entry = swab32(e->e_entry);
+ e->e_phoff = swab32(e->e_phoff);
+ e->e_shoff = swab32(e->e_shoff);
+ e->e_flags = swab32(e->e_flags);
+ e->e_ehsize = swab16(e->e_ehsize);
+ e->e_phentsize = swab16(e->e_phentsize);
+ e->e_phnum = swab16(e->e_phnum);
+ e->e_shentsize = swab16(e->e_shentsize);
+ e->e_shnum = swab16(e->e_shnum);
+ e->e_shstrndx = swab16(e->e_shstrndx);
+}
+
+static void convert_elf_phdrs(Elf32_Phdr * p, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++, p++) {
+ p->p_type = swab32(p->p_type);
+ p->p_offset = swab32(p->p_offset);
+ p->p_vaddr = swab32(p->p_vaddr);
+ p->p_paddr = swab32(p->p_paddr);
+ p->p_filesz = swab32(p->p_filesz);
+ p->p_memsz = swab32(p->p_memsz);
+ p->p_flags = swab32(p->p_flags);
+ p->p_align = swab32(p->p_align);
+ }
+
+}
+
+static void convert_elf_shdrs(Elf32_Shdr * s, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++, s++) {
+ s->sh_name = swab32(s->sh_name);
+ s->sh_type = swab32(s->sh_type);
+ s->sh_flags = swab32(s->sh_flags);
+ s->sh_addr = swab32(s->sh_addr);
+ s->sh_offset = swab32(s->sh_offset);
+ s->sh_size = swab32(s->sh_size);
+ s->sh_link = swab32(s->sh_link);
+ s->sh_info = swab32(s->sh_info);
+ s->sh_addralign = swab32(s->sh_addralign);
+ s->sh_entsize = swab32(s->sh_entsize);
+ }
+}
+
+static void convert_ecoff_filehdr(struct filehdr *f)
+{
+ f->f_magic = swab16(f->f_magic);
+ f->f_nscns = swab16(f->f_nscns);
+ f->f_timdat = swab32(f->f_timdat);
+ f->f_symptr = swab32(f->f_symptr);
+ f->f_nsyms = swab32(f->f_nsyms);
+ f->f_opthdr = swab16(f->f_opthdr);
+ f->f_flags = swab16(f->f_flags);
+}
+
+static void convert_ecoff_aouthdr(struct aouthdr *a)
+{
+ a->magic = swab16(a->magic);
+ a->vstamp = swab16(a->vstamp);
+ a->tsize = swab32(a->tsize);
+ a->dsize = swab32(a->dsize);
+ a->bsize = swab32(a->bsize);
+ a->entry = swab32(a->entry);
+ a->text_start = swab32(a->text_start);
+ a->data_start = swab32(a->data_start);
+ a->bss_start = swab32(a->bss_start);
+ a->gprmask = swab32(a->gprmask);
+ a->cprmask[0] = swab32(a->cprmask[0]);
+ a->cprmask[1] = swab32(a->cprmask[1]);
+ a->cprmask[2] = swab32(a->cprmask[2]);
+ a->cprmask[3] = swab32(a->cprmask[3]);
+ a->gp_value = swab32(a->gp_value);
+}
+
+static void convert_ecoff_esecs(struct scnhdr *s, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++, s++) {
+ s->s_paddr = swab32(s->s_paddr);
+ s->s_vaddr = swab32(s->s_vaddr);
+ s->s_size = swab32(s->s_size);
+ s->s_scnptr = swab32(s->s_scnptr);
+ s->s_relptr = swab32(s->s_relptr);
+ s->s_lnnoptr = swab32(s->s_lnnoptr);
+ s->s_nreloc = swab16(s->s_nreloc);
+ s->s_nlnno = swab16(s->s_nlnno);
+ s->s_flags = swab32(s->s_flags);
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ Elf32_Ehdr ex;
+ Elf32_Phdr *ph;
+ Elf32_Shdr *sh;
+ int i, pad;
+ struct sect text, data, bss;
+ struct filehdr efh;
+ struct aouthdr eah;
+ struct scnhdr esecs[6];
+ int infile, outfile;
+ uint32_t cur_vma = UINT32_MAX;
+ int addflag = 0;
+ int nosecs;
+
+ text.len = data.len = bss.len = 0;
+ text.vaddr = data.vaddr = bss.vaddr = 0;
+
+ /* Check args... */
+ if (argc < 3 || argc > 4) {
+ usage:
+ fprintf(stderr,
+ "usage: elf2ecoff <elf executable> <ecoff executable> [-a]\n");
+ exit(1);
+ }
+ if (argc == 4) {
+ if (strcmp(argv[3], "-a"))
+ goto usage;
+ addflag = 1;
+ }
+
+ /* Try the input file... */
+ if ((infile = open(argv[1], O_RDONLY)) < 0) {
+ fprintf(stderr, "Can't open %s for read: %s\n",
+ argv[1], strerror(errno));
+ exit(1);
+ }
+
+ /* Read the header, which is at the beginning of the file... */
+ i = read(infile, &ex, sizeof ex);
+ if (i != sizeof ex) {
+ fprintf(stderr, "ex: %s: %s.\n",
+ argv[1],
+ i ? strerror(errno) : "End of file reached");
+ exit(1);
+ }
+
+ if (ex.e_ident[EI_DATA] == ELFDATA2MSB)
+ format_bigendian = 1;
+
+ if (ntohs(0xaa55) == 0xaa55) {
+ if (!format_bigendian)
+ must_convert_endian = 1;
+ } else {
+ if (format_bigendian)
+ must_convert_endian = 1;
+ }
+ if (must_convert_endian)
+ convert_elf_hdr(&ex);
+
+ /* Read the program headers... */
+ ph = (Elf32_Phdr *) saveRead(infile, ex.e_phoff,
+ ex.e_phnum * sizeof(Elf32_Phdr),
+ "ph");
+ if (must_convert_endian)
+ convert_elf_phdrs(ph, ex.e_phnum);
+ /* Read the section headers... */
+ sh = (Elf32_Shdr *) saveRead(infile, ex.e_shoff,
+ ex.e_shnum * sizeof(Elf32_Shdr),
+ "sh");
+ if (must_convert_endian)
+ convert_elf_shdrs(sh, ex.e_shnum);
+
+ /* Figure out if we can cram the program header into an ECOFF
+ header... Basically, we can't handle anything but loadable
+ segments, but we can ignore some kinds of segments. We can't
+ handle holes in the address space. Segments may be out of order,
+ so we sort them first. */
+
+ qsort(ph, ex.e_phnum, sizeof(Elf32_Phdr), phcmp);
+
+ for (i = 0; i < ex.e_phnum; i++) {
+ /* Section types we can ignore... */
+ switch (ph[i].p_type) {
+ case PT_NULL:
+ case PT_NOTE:
+ case PT_PHDR:
+ case PT_MIPS_REGINFO:
+ case PT_MIPS_ABIFLAGS:
+ continue;
+
+ case PT_LOAD:
+ /* Writable (data) segment? */
+ if (ph[i].p_flags & PF_W) {
+ struct sect ndata, nbss;
+
+ ndata.vaddr = ph[i].p_vaddr;
+ ndata.len = ph[i].p_filesz;
+ nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz;
+ nbss.len = ph[i].p_memsz - ph[i].p_filesz;
+
+ combine(&data, &ndata, 0);
+ combine(&bss, &nbss, 1);
+ } else {
+ struct sect ntxt;
+
+ ntxt.vaddr = ph[i].p_vaddr;
+ ntxt.len = ph[i].p_filesz;
+
+ combine(&text, &ntxt, 0);
+ }
+ /* Remember the lowest segment start address. */
+ if (ph[i].p_vaddr < cur_vma)
+ cur_vma = ph[i].p_vaddr;
+ break;
+
+ default:
+ /* Section types we can't handle... */
+ fprintf(stderr,
+ "Program header %d type %d can't be converted.\n",
+ ex.e_phnum, ph[i].p_type);
+ exit(1);
+ }
+ }
+
+ /* Sections must be in order to be converted... */
+ if (text.vaddr > data.vaddr || data.vaddr > bss.vaddr ||
+ text.vaddr + text.len > data.vaddr
+ || data.vaddr + data.len > bss.vaddr) {
+ fprintf(stderr,
+ "Sections ordering prevents a.out conversion.\n");
+ exit(1);
+ }
+
+ /* If there's a data section but no text section, then the loader
+ combined everything into one section. That needs to be the
+ text section, so just make the data section zero length following
+ text. */
+ if (data.len && !text.len) {
+ text = data;
+ data.vaddr = text.vaddr + text.len;
+ data.len = 0;
+ }
+
+ /* If there is a gap between text and data, we'll fill it when we copy
+ the data, so update the length of the text segment as represented in
+ a.out to reflect that, since a.out doesn't allow gaps in the program
+ address space. */
+ if (text.vaddr + text.len < data.vaddr)
+ text.len = data.vaddr - text.vaddr;
+
+ /* We now have enough information to cons up an a.out header... */
+ eah.magic = OMAGIC;
+ eah.vstamp = 200;
+ eah.tsize = text.len;
+ eah.dsize = data.len;
+ eah.bsize = bss.len;
+ eah.entry = ex.e_entry;
+ eah.text_start = text.vaddr;
+ eah.data_start = data.vaddr;
+ eah.bss_start = bss.vaddr;
+ eah.gprmask = 0xf3fffffe;
+ memset(&eah.cprmask, '\0', sizeof eah.cprmask);
+ eah.gp_value = 0; /* unused. */
+
+ if (format_bigendian)
+ efh.f_magic = MIPSEBMAGIC;
+ else
+ efh.f_magic = MIPSELMAGIC;
+ if (addflag)
+ nosecs = 6;
+ else
+ nosecs = 3;
+ efh.f_nscns = nosecs;
+ efh.f_timdat = 0; /* bogus */
+ efh.f_symptr = 0;
+ efh.f_nsyms = 0;
+ efh.f_opthdr = sizeof eah;
+ efh.f_flags = 0x100f; /* Stripped, not sharable. */
+
+ memset(esecs, 0, sizeof esecs);
+ strcpy(esecs[0].s_name, ".text");
+ strcpy(esecs[1].s_name, ".data");
+ strcpy(esecs[2].s_name, ".bss");
+ if (addflag) {
+ strcpy(esecs[3].s_name, ".rdata");
+ strcpy(esecs[4].s_name, ".sdata");
+ strcpy(esecs[5].s_name, ".sbss");
+ }
+ esecs[0].s_paddr = esecs[0].s_vaddr = eah.text_start;
+ esecs[1].s_paddr = esecs[1].s_vaddr = eah.data_start;
+ esecs[2].s_paddr = esecs[2].s_vaddr = eah.bss_start;
+ if (addflag) {
+ esecs[3].s_paddr = esecs[3].s_vaddr = 0;
+ esecs[4].s_paddr = esecs[4].s_vaddr = 0;
+ esecs[5].s_paddr = esecs[5].s_vaddr = 0;
+ }
+ esecs[0].s_size = eah.tsize;
+ esecs[1].s_size = eah.dsize;
+ esecs[2].s_size = eah.bsize;
+ if (addflag) {
+ esecs[3].s_size = 0;
+ esecs[4].s_size = 0;
+ esecs[5].s_size = 0;
+ }
+ esecs[0].s_scnptr = N_TXTOFF(efh, eah);
+ esecs[1].s_scnptr = N_DATOFF(efh, eah);
+#define ECOFF_SEGMENT_ALIGNMENT(a) 0x10
+#define ECOFF_ROUND(s, a) (((s)+(a)-1)&~((a)-1))
+ esecs[2].s_scnptr = esecs[1].s_scnptr +
+ ECOFF_ROUND(esecs[1].s_size, ECOFF_SEGMENT_ALIGNMENT(&eah));
+ if (addflag) {
+ esecs[3].s_scnptr = 0;
+ esecs[4].s_scnptr = 0;
+ esecs[5].s_scnptr = 0;
+ }
+ esecs[0].s_relptr = esecs[1].s_relptr = esecs[2].s_relptr = 0;
+ esecs[0].s_lnnoptr = esecs[1].s_lnnoptr = esecs[2].s_lnnoptr = 0;
+ esecs[0].s_nreloc = esecs[1].s_nreloc = esecs[2].s_nreloc = 0;
+ esecs[0].s_nlnno = esecs[1].s_nlnno = esecs[2].s_nlnno = 0;
+ if (addflag) {
+ esecs[3].s_relptr = esecs[4].s_relptr
+ = esecs[5].s_relptr = 0;
+ esecs[3].s_lnnoptr = esecs[4].s_lnnoptr
+ = esecs[5].s_lnnoptr = 0;
+ esecs[3].s_nreloc = esecs[4].s_nreloc = esecs[5].s_nreloc =
+ 0;
+ esecs[3].s_nlnno = esecs[4].s_nlnno = esecs[5].s_nlnno = 0;
+ }
+ esecs[0].s_flags = 0x20;
+ esecs[1].s_flags = 0x40;
+ esecs[2].s_flags = 0x82;
+ if (addflag) {
+ esecs[3].s_flags = 0x100;
+ esecs[4].s_flags = 0x200;
+ esecs[5].s_flags = 0x400;
+ }
+
+ /* Make the output file... */
+ if ((outfile = open(argv[2], O_WRONLY | O_CREAT, 0777)) < 0) {
+ fprintf(stderr, "Unable to create %s: %s\n", argv[2],
+ strerror(errno));
+ exit(1);
+ }
+
+ if (must_convert_endian)
+ convert_ecoff_filehdr(&efh);
+ /* Write the headers... */
+ i = write(outfile, &efh, sizeof efh);
+ if (i != sizeof efh) {
+ perror("efh: write");
+ exit(1);
+
+ for (i = 0; i < nosecs; i++) {
+ printf
+ ("Section %d: %s phys %"PRIx32" size %"PRIx32"\t file offset %"PRIx32"\n",
+ i, esecs[i].s_name, esecs[i].s_paddr,
+ esecs[i].s_size, esecs[i].s_scnptr);
+ }
+ }
+ fprintf(stderr, "wrote %d byte file header.\n", i);
+
+ if (must_convert_endian)
+ convert_ecoff_aouthdr(&eah);
+ i = write(outfile, &eah, sizeof eah);
+ if (i != sizeof eah) {
+ perror("eah: write");
+ exit(1);
+ }
+ fprintf(stderr, "wrote %d byte a.out header.\n", i);
+
+ if (must_convert_endian)
+ convert_ecoff_esecs(&esecs[0], nosecs);
+ i = write(outfile, &esecs, nosecs * sizeof(struct scnhdr));
+ if (i != nosecs * sizeof(struct scnhdr)) {
+ perror("esecs: write");
+ exit(1);
+ }
+ fprintf(stderr, "wrote %d bytes of section headers.\n", i);
+
+ pad = (sizeof(efh) + sizeof(eah) + nosecs * sizeof(struct scnhdr)) & 15;
+ if (pad) {
+ pad = 16 - pad;
+ i = write(outfile, "\0\0\0\0\0\0\0\0\0\0\0\0\0\0", pad);
+ if (i < 0) {
+ perror("ipad: write");
+ exit(1);
+ }
+ fprintf(stderr, "wrote %d byte pad.\n", i);
+ }
+
+ /*
+ * Copy the loadable sections. Zero-fill any gaps less than 64k;
+ * complain about any zero-filling, and die if we're asked to zero-fill
+ * more than 64k.
+ */
+ for (i = 0; i < ex.e_phnum; i++) {
+ /* Unprocessable sections were handled above, so just verify that
+ the section can be loaded before copying. */
+ if (ph[i].p_type == PT_LOAD && ph[i].p_filesz) {
+ if (cur_vma != ph[i].p_vaddr) {
+ uint32_t gap = ph[i].p_vaddr - cur_vma;
+ char obuf[1024];
+ if (gap > 65536) {
+ fprintf(stderr,
+ "Intersegment gap (%"PRId32" bytes) too large.\n",
+ gap);
+ exit(1);
+ }
+ fprintf(stderr,
+ "Warning: %d byte intersegment gap.\n",
+ gap);
+ memset(obuf, 0, sizeof obuf);
+ while (gap) {
+ int count =
+ write(outfile, obuf,
+ (gap >
+ sizeof obuf ? sizeof
+ obuf : gap));
+ if (count < 0) {
+ fprintf(stderr,
+ "Error writing gap: %s\n",
+ strerror(errno));
+ exit(1);
+ }
+ gap -= count;
+ }
+ }
+ fprintf(stderr, "writing %d bytes...\n",
+ ph[i].p_filesz);
+ copy(outfile, infile, ph[i].p_offset,
+ ph[i].p_filesz);
+ cur_vma = ph[i].p_vaddr + ph[i].p_filesz;
+ }
+ }
+
+ /*
+ * Write a page of padding for boot PROMS that read entire pages.
+ * Without this, they may attempt to read past the end of the
+ * data section, incur an error, and refuse to boot.
+ */
+ {
+ char obuf[4096];
+ memset(obuf, 0, sizeof obuf);
+ if (write(outfile, obuf, sizeof(obuf)) != sizeof(obuf)) {
+ fprintf(stderr, "Error writing PROM padding: %s\n",
+ strerror(errno));
+ exit(1);
+ }
+ }
+
+ /* Looks like we won... */
+ exit(0);
+}
diff --git a/arch/mips/boot/tools/.gitignore b/arch/mips/boot/tools/.gitignore
new file mode 100644
index 000000000..d36dc7cf9
--- /dev/null
+++ b/arch/mips/boot/tools/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+relocs
diff --git a/arch/mips/boot/tools/Makefile b/arch/mips/boot/tools/Makefile
new file mode 100644
index 000000000..592e05a51
--- /dev/null
+++ b/arch/mips/boot/tools/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+
+hostprogs += relocs
+relocs-objs += relocs_32.o
+relocs-objs += relocs_64.o
+relocs-objs += relocs_main.o
+PHONY += relocs
+relocs: $(obj)/relocs
+ @:
diff --git a/arch/mips/boot/tools/relocs.c b/arch/mips/boot/tools/relocs.c
new file mode 100644
index 000000000..1bf53f352
--- /dev/null
+++ b/arch/mips/boot/tools/relocs.c
@@ -0,0 +1,681 @@
+// SPDX-License-Identifier: GPL-2.0
+/* This is included from relocs_32/64.c */
+
+#define ElfW(type) _ElfW(ELF_BITS, type)
+#define _ElfW(bits, type) __ElfW(bits, type)
+#define __ElfW(bits, type) Elf##bits##_##type
+
+#define Elf_Addr ElfW(Addr)
+#define Elf_Ehdr ElfW(Ehdr)
+#define Elf_Phdr ElfW(Phdr)
+#define Elf_Shdr ElfW(Shdr)
+#define Elf_Sym ElfW(Sym)
+
+static Elf_Ehdr ehdr;
+
+struct relocs {
+ uint32_t *offset;
+ unsigned long count;
+ unsigned long size;
+};
+
+static struct relocs relocs;
+
+struct section {
+ Elf_Shdr shdr;
+ struct section *link;
+ Elf_Sym *symtab;
+ Elf_Rel *reltab;
+ char *strtab;
+ long shdr_offset;
+};
+static struct section *secs;
+
+static const char * const regex_sym_kernel = {
+/* Symbols matching these regex's should never be relocated */
+ "^(__crc_)",
+};
+
+static regex_t sym_regex_c;
+
+static int regex_skip_reloc(const char *sym_name)
+{
+ return !regexec(&sym_regex_c, sym_name, 0, NULL, 0);
+}
+
+static void regex_init(void)
+{
+ char errbuf[128];
+ int err;
+
+ err = regcomp(&sym_regex_c, regex_sym_kernel,
+ REG_EXTENDED|REG_NOSUB);
+
+ if (err) {
+ regerror(err, &sym_regex_c, errbuf, sizeof(errbuf));
+ die("%s", errbuf);
+ }
+}
+
+static const char *rel_type(unsigned type)
+{
+ static const char * const type_name[] = {
+#define REL_TYPE(X)[X] = #X
+ REL_TYPE(R_MIPS_NONE),
+ REL_TYPE(R_MIPS_16),
+ REL_TYPE(R_MIPS_32),
+ REL_TYPE(R_MIPS_REL32),
+ REL_TYPE(R_MIPS_26),
+ REL_TYPE(R_MIPS_HI16),
+ REL_TYPE(R_MIPS_LO16),
+ REL_TYPE(R_MIPS_GPREL16),
+ REL_TYPE(R_MIPS_LITERAL),
+ REL_TYPE(R_MIPS_GOT16),
+ REL_TYPE(R_MIPS_PC16),
+ REL_TYPE(R_MIPS_CALL16),
+ REL_TYPE(R_MIPS_GPREL32),
+ REL_TYPE(R_MIPS_64),
+ REL_TYPE(R_MIPS_HIGHER),
+ REL_TYPE(R_MIPS_HIGHEST),
+ REL_TYPE(R_MIPS_PC21_S2),
+ REL_TYPE(R_MIPS_PC26_S2),
+#undef REL_TYPE
+ };
+ const char *name = "unknown type rel type name";
+
+ if (type < ARRAY_SIZE(type_name) && type_name[type])
+ name = type_name[type];
+ return name;
+}
+
+static const char *sec_name(unsigned shndx)
+{
+ const char *sec_strtab;
+ const char *name;
+
+ sec_strtab = secs[ehdr.e_shstrndx].strtab;
+ if (shndx < ehdr.e_shnum)
+ name = sec_strtab + secs[shndx].shdr.sh_name;
+ else if (shndx == SHN_ABS)
+ name = "ABSOLUTE";
+ else if (shndx == SHN_COMMON)
+ name = "COMMON";
+ else
+ name = "<noname>";
+ return name;
+}
+
+static struct section *sec_lookup(const char *secname)
+{
+ int i;
+
+ for (i = 0; i < ehdr.e_shnum; i++)
+ if (strcmp(secname, sec_name(i)) == 0)
+ return &secs[i];
+
+ return NULL;
+}
+
+static const char *sym_name(const char *sym_strtab, Elf_Sym *sym)
+{
+ const char *name;
+
+ if (sym->st_name)
+ name = sym_strtab + sym->st_name;
+ else
+ name = sec_name(sym->st_shndx);
+ return name;
+}
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define le16_to_cpu(val) (val)
+#define le32_to_cpu(val) (val)
+#define le64_to_cpu(val) (val)
+#define be16_to_cpu(val) bswap_16(val)
+#define be32_to_cpu(val) bswap_32(val)
+#define be64_to_cpu(val) bswap_64(val)
+
+#define cpu_to_le16(val) (val)
+#define cpu_to_le32(val) (val)
+#define cpu_to_le64(val) (val)
+#define cpu_to_be16(val) bswap_16(val)
+#define cpu_to_be32(val) bswap_32(val)
+#define cpu_to_be64(val) bswap_64(val)
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+#define le16_to_cpu(val) bswap_16(val)
+#define le32_to_cpu(val) bswap_32(val)
+#define le64_to_cpu(val) bswap_64(val)
+#define be16_to_cpu(val) (val)
+#define be32_to_cpu(val) (val)
+#define be64_to_cpu(val) (val)
+
+#define cpu_to_le16(val) bswap_16(val)
+#define cpu_to_le32(val) bswap_32(val)
+#define cpu_to_le64(val) bswap_64(val)
+#define cpu_to_be16(val) (val)
+#define cpu_to_be32(val) (val)
+#define cpu_to_be64(val) (val)
+#endif
+
+static uint16_t elf16_to_cpu(uint16_t val)
+{
+ if (ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+ return le16_to_cpu(val);
+ else
+ return be16_to_cpu(val);
+}
+
+static uint32_t elf32_to_cpu(uint32_t val)
+{
+ if (ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+ return le32_to_cpu(val);
+ else
+ return be32_to_cpu(val);
+}
+
+static uint32_t cpu_to_elf32(uint32_t val)
+{
+ if (ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+ return cpu_to_le32(val);
+ else
+ return cpu_to_be32(val);
+}
+
+#define elf_half_to_cpu(x) elf16_to_cpu(x)
+#define elf_word_to_cpu(x) elf32_to_cpu(x)
+
+#if ELF_BITS == 64
+static uint64_t elf64_to_cpu(uint64_t val)
+{
+ if (ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+ return le64_to_cpu(val);
+ else
+ return be64_to_cpu(val);
+}
+#define elf_addr_to_cpu(x) elf64_to_cpu(x)
+#define elf_off_to_cpu(x) elf64_to_cpu(x)
+#define elf_xword_to_cpu(x) elf64_to_cpu(x)
+#else
+#define elf_addr_to_cpu(x) elf32_to_cpu(x)
+#define elf_off_to_cpu(x) elf32_to_cpu(x)
+#define elf_xword_to_cpu(x) elf32_to_cpu(x)
+#endif
+
+static void read_ehdr(FILE *fp)
+{
+ if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
+ die("Cannot read ELF header: %s\n", strerror(errno));
+
+ if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0)
+ die("No ELF magic\n");
+
+ if (ehdr.e_ident[EI_CLASS] != ELF_CLASS)
+ die("Not a %d bit executable\n", ELF_BITS);
+
+ if ((ehdr.e_ident[EI_DATA] != ELFDATA2LSB) &&
+ (ehdr.e_ident[EI_DATA] != ELFDATA2MSB))
+ die("Unknown ELF Endianness\n");
+
+ if (ehdr.e_ident[EI_VERSION] != EV_CURRENT)
+ die("Unknown ELF version\n");
+
+ /* Convert the fields to native endian */
+ ehdr.e_type = elf_half_to_cpu(ehdr.e_type);
+ ehdr.e_machine = elf_half_to_cpu(ehdr.e_machine);
+ ehdr.e_version = elf_word_to_cpu(ehdr.e_version);
+ ehdr.e_entry = elf_addr_to_cpu(ehdr.e_entry);
+ ehdr.e_phoff = elf_off_to_cpu(ehdr.e_phoff);
+ ehdr.e_shoff = elf_off_to_cpu(ehdr.e_shoff);
+ ehdr.e_flags = elf_word_to_cpu(ehdr.e_flags);
+ ehdr.e_ehsize = elf_half_to_cpu(ehdr.e_ehsize);
+ ehdr.e_phentsize = elf_half_to_cpu(ehdr.e_phentsize);
+ ehdr.e_phnum = elf_half_to_cpu(ehdr.e_phnum);
+ ehdr.e_shentsize = elf_half_to_cpu(ehdr.e_shentsize);
+ ehdr.e_shnum = elf_half_to_cpu(ehdr.e_shnum);
+ ehdr.e_shstrndx = elf_half_to_cpu(ehdr.e_shstrndx);
+
+ if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN))
+ die("Unsupported ELF header type\n");
+
+ if (ehdr.e_machine != ELF_MACHINE)
+ die("Not for %s\n", ELF_MACHINE_NAME);
+
+ if (ehdr.e_version != EV_CURRENT)
+ die("Unknown ELF version\n");
+
+ if (ehdr.e_ehsize != sizeof(Elf_Ehdr))
+ die("Bad Elf header size\n");
+
+ if (ehdr.e_phentsize != sizeof(Elf_Phdr))
+ die("Bad program header entry\n");
+
+ if (ehdr.e_shentsize != sizeof(Elf_Shdr))
+ die("Bad section header entry\n");
+
+ if (ehdr.e_shstrndx >= ehdr.e_shnum)
+ die("String table index out of bounds\n");
+}
+
+static void read_shdrs(FILE *fp)
+{
+ int i;
+ Elf_Shdr shdr;
+
+ secs = calloc(ehdr.e_shnum, sizeof(struct section));
+ if (!secs)
+ die("Unable to allocate %d section headers\n", ehdr.e_shnum);
+
+ if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0)
+ die("Seek to %d failed: %s\n", ehdr.e_shoff, strerror(errno));
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+
+ sec->shdr_offset = ftell(fp);
+ if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
+ die("Cannot read ELF section headers %d/%d: %s\n",
+ i, ehdr.e_shnum, strerror(errno));
+ sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name);
+ sec->shdr.sh_type = elf_word_to_cpu(shdr.sh_type);
+ sec->shdr.sh_flags = elf_xword_to_cpu(shdr.sh_flags);
+ sec->shdr.sh_addr = elf_addr_to_cpu(shdr.sh_addr);
+ sec->shdr.sh_offset = elf_off_to_cpu(shdr.sh_offset);
+ sec->shdr.sh_size = elf_xword_to_cpu(shdr.sh_size);
+ sec->shdr.sh_link = elf_word_to_cpu(shdr.sh_link);
+ sec->shdr.sh_info = elf_word_to_cpu(shdr.sh_info);
+ sec->shdr.sh_addralign = elf_xword_to_cpu(shdr.sh_addralign);
+ sec->shdr.sh_entsize = elf_xword_to_cpu(shdr.sh_entsize);
+ if (sec->shdr.sh_link < ehdr.e_shnum)
+ sec->link = &secs[sec->shdr.sh_link];
+ }
+}
+
+static void read_strtabs(FILE *fp)
+{
+ int i;
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+
+ if (sec->shdr.sh_type != SHT_STRTAB)
+ continue;
+
+ sec->strtab = malloc(sec->shdr.sh_size);
+ if (!sec->strtab)
+ die("malloc of %d bytes for strtab failed\n",
+ sec->shdr.sh_size);
+
+ if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0)
+ die("Seek to %d failed: %s\n",
+ sec->shdr.sh_offset, strerror(errno));
+
+ if (fread(sec->strtab, 1, sec->shdr.sh_size, fp) !=
+ sec->shdr.sh_size)
+ die("Cannot read symbol table: %s\n", strerror(errno));
+ }
+}
+
+static void read_symtabs(FILE *fp)
+{
+ int i, j;
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_SYMTAB)
+ continue;
+
+ sec->symtab = malloc(sec->shdr.sh_size);
+ if (!sec->symtab)
+ die("malloc of %d bytes for symtab failed\n",
+ sec->shdr.sh_size);
+
+ if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0)
+ die("Seek to %d failed: %s\n",
+ sec->shdr.sh_offset, strerror(errno));
+
+ if (fread(sec->symtab, 1, sec->shdr.sh_size, fp) !=
+ sec->shdr.sh_size)
+ die("Cannot read symbol table: %s\n", strerror(errno));
+
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Sym); j++) {
+ Elf_Sym *sym = &sec->symtab[j];
+
+ sym->st_name = elf_word_to_cpu(sym->st_name);
+ sym->st_value = elf_addr_to_cpu(sym->st_value);
+ sym->st_size = elf_xword_to_cpu(sym->st_size);
+ sym->st_shndx = elf_half_to_cpu(sym->st_shndx);
+ }
+ }
+}
+
+static void read_relocs(FILE *fp)
+{
+ static unsigned long base = 0;
+ int i, j;
+
+ if (!base) {
+ struct section *sec = sec_lookup(".text");
+
+ if (!sec)
+ die("Could not find .text section\n");
+
+ base = sec->shdr.sh_addr;
+ }
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+
+ if (sec->shdr.sh_type != SHT_REL_TYPE)
+ continue;
+
+ sec->reltab = malloc(sec->shdr.sh_size);
+ if (!sec->reltab)
+ die("malloc of %d bytes for relocs failed\n",
+ sec->shdr.sh_size);
+
+ if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0)
+ die("Seek to %d failed: %s\n",
+ sec->shdr.sh_offset, strerror(errno));
+
+ if (fread(sec->reltab, 1, sec->shdr.sh_size, fp) !=
+ sec->shdr.sh_size)
+ die("Cannot read symbol table: %s\n", strerror(errno));
+
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
+ Elf_Rel *rel = &sec->reltab[j];
+
+ rel->r_offset = elf_addr_to_cpu(rel->r_offset);
+ /* Set offset into kernel image */
+ rel->r_offset -= base;
+#if (ELF_BITS == 32)
+ rel->r_info = elf_xword_to_cpu(rel->r_info);
+#else
+ /* Convert MIPS64 RELA format - only the symbol
+ * index needs converting to native endianness
+ */
+ rel->r_info = rel->r_info;
+ ELF_R_SYM(rel->r_info) = elf32_to_cpu(ELF_R_SYM(rel->r_info));
+#endif
+#if (SHT_REL_TYPE == SHT_RELA)
+ rel->r_addend = elf_xword_to_cpu(rel->r_addend);
+#endif
+ }
+ }
+}
+
+static void remove_relocs(FILE *fp)
+{
+ int i;
+ Elf_Shdr shdr;
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+
+ if (sec->shdr.sh_type != SHT_REL_TYPE)
+ continue;
+
+ if (fseek(fp, sec->shdr_offset, SEEK_SET) < 0)
+ die("Seek to %d failed: %s\n",
+ sec->shdr_offset, strerror(errno));
+
+ if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
+ die("Cannot read ELF section headers %d/%d: %s\n",
+ i, ehdr.e_shnum, strerror(errno));
+
+ /* Set relocation section size to 0, effectively removing it.
+ * This is necessary due to lack of support for relocations
+ * in objcopy when creating 32bit elf from 64bit elf.
+ */
+ shdr.sh_size = 0;
+
+ if (fseek(fp, sec->shdr_offset, SEEK_SET) < 0)
+ die("Seek to %d failed: %s\n",
+ sec->shdr_offset, strerror(errno));
+
+ if (fwrite(&shdr, sizeof(shdr), 1, fp) != 1)
+ die("Cannot write ELF section headers %d/%d: %s\n",
+ i, ehdr.e_shnum, strerror(errno));
+ }
+}
+
+static void add_reloc(struct relocs *r, uint32_t offset, unsigned type)
+{
+ /* Relocation representation in binary table:
+ * |76543210|76543210|76543210|76543210|
+ * | Type | offset from _text >> 2 |
+ */
+ offset >>= 2;
+ if (offset > 0x00FFFFFF)
+ die("Kernel image exceeds maximum size for relocation!\n");
+
+ offset = (offset & 0x00FFFFFF) | ((type & 0xFF) << 24);
+
+ if (r->count == r->size) {
+ unsigned long newsize = r->size + 50000;
+ void *mem = realloc(r->offset, newsize * sizeof(r->offset[0]));
+
+ if (!mem)
+ die("realloc failed\n");
+
+ r->offset = mem;
+ r->size = newsize;
+ }
+ r->offset[r->count++] = offset;
+}
+
+static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
+ Elf_Sym *sym, const char *symname))
+{
+ int i;
+
+ /* Walk through the relocations */
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ char *sym_strtab;
+ Elf_Sym *sh_symtab;
+ struct section *sec_applies, *sec_symtab;
+ int j;
+ struct section *sec = &secs[i];
+
+ if (sec->shdr.sh_type != SHT_REL_TYPE)
+ continue;
+
+ sec_symtab = sec->link;
+ sec_applies = &secs[sec->shdr.sh_info];
+ if (!(sec_applies->shdr.sh_flags & SHF_ALLOC))
+ continue;
+
+ sh_symtab = sec_symtab->symtab;
+ sym_strtab = sec_symtab->link->strtab;
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
+ Elf_Rel *rel = &sec->reltab[j];
+ Elf_Sym *sym = &sh_symtab[ELF_R_SYM(rel->r_info)];
+ const char *symname = sym_name(sym_strtab, sym);
+
+ process(sec, rel, sym, symname);
+ }
+ }
+}
+
+static int do_reloc(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
+ const char *symname)
+{
+ unsigned r_type = ELF_R_TYPE(rel->r_info);
+ unsigned bind = ELF_ST_BIND(sym->st_info);
+
+ if ((bind == STB_WEAK) && (sym->st_value == 0)) {
+ /* Don't relocate weak symbols without a target */
+ return 0;
+ }
+
+ if (regex_skip_reloc(symname))
+ return 0;
+
+ switch (r_type) {
+ case R_MIPS_NONE:
+ case R_MIPS_REL32:
+ case R_MIPS_PC16:
+ case R_MIPS_PC21_S2:
+ case R_MIPS_PC26_S2:
+ /*
+ * NONE can be ignored and PC relative relocations don't
+ * need to be adjusted.
+ */
+ case R_MIPS_HIGHEST:
+ case R_MIPS_HIGHER:
+ /* We support relocating within the same 4Gb segment only,
+ * thus leaving the top 32bits unchanged
+ */
+ case R_MIPS_LO16:
+ /* We support relocating by 64k jumps only
+ * thus leaving the bottom 16bits unchanged
+ */
+ break;
+
+ case R_MIPS_64:
+ case R_MIPS_32:
+ case R_MIPS_26:
+ case R_MIPS_HI16:
+ add_reloc(&relocs, rel->r_offset, r_type);
+ break;
+
+ default:
+ die("Unsupported relocation type: %s (%d)\n",
+ rel_type(r_type), r_type);
+ break;
+ }
+
+ return 0;
+}
+
+static int write_reloc_as_bin(uint32_t v, FILE *f)
+{
+ unsigned char buf[4];
+
+ v = cpu_to_elf32(v);
+
+ memcpy(buf, &v, sizeof(uint32_t));
+ return fwrite(buf, 1, 4, f);
+}
+
+static int write_reloc_as_text(uint32_t v, FILE *f)
+{
+ int res;
+
+ res = fprintf(f, "\t.long 0x%08"PRIx32"\n", v);
+ if (res < 0)
+ return res;
+ else
+ return sizeof(uint32_t);
+}
+
+static void emit_relocs(int as_text, int as_bin, FILE *outf)
+{
+ int i;
+ int (*write_reloc)(uint32_t, FILE *) = write_reloc_as_bin;
+ int size = 0;
+ int size_reserved;
+ struct section *sec_reloc;
+
+ sec_reloc = sec_lookup(".data.reloc");
+ if (!sec_reloc)
+ die("Could not find relocation section\n");
+
+ size_reserved = sec_reloc->shdr.sh_size;
+
+ /* Collect up the relocations */
+ walk_relocs(do_reloc);
+
+ /* Print the relocations */
+ if (as_text) {
+ /* Print the relocations in a form suitable that
+ * gas will like.
+ */
+ printf(".section \".data.reloc\",\"a\"\n");
+ printf(".balign 4\n");
+ /* Output text to stdout */
+ write_reloc = write_reloc_as_text;
+ outf = stdout;
+ } else if (as_bin) {
+ /* Output raw binary to stdout */
+ outf = stdout;
+ } else {
+ /* Seek to offset of the relocation section.
+ * Each relocation is then written into the
+ * vmlinux kernel image.
+ */
+ if (fseek(outf, sec_reloc->shdr.sh_offset, SEEK_SET) < 0) {
+ die("Seek to %d failed: %s\n",
+ sec_reloc->shdr.sh_offset, strerror(errno));
+ }
+ }
+
+ for (i = 0; i < relocs.count; i++)
+ size += write_reloc(relocs.offset[i], outf);
+
+ /* Print a stop, but only if we've actually written some relocs */
+ if (size)
+ size += write_reloc(0, outf);
+
+ if (size > size_reserved)
+ /* Die, but suggest a value for CONFIG_RELOCATION_TABLE_SIZE
+ * which will fix this problem and allow a bit of headroom
+ * if more kernel features are enabled
+ */
+ die("Relocations overflow available space!\n" \
+ "Please adjust CONFIG_RELOCATION_TABLE_SIZE " \
+ "to at least 0x%08x\n", (size + 0x1000) & ~0xFFF);
+}
+
+/*
+ * As an aid to debugging problems with different linkers
+ * print summary information about the relocs.
+ * Since different linkers tend to emit the sections in
+ * different orders we use the section names in the output.
+ */
+static int do_reloc_info(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
+ const char *symname)
+{
+ printf("%16s 0x%08x %16s %40s %16s\n",
+ sec_name(sec->shdr.sh_info),
+ (unsigned int)rel->r_offset,
+ rel_type(ELF_R_TYPE(rel->r_info)),
+ symname,
+ sec_name(sym->st_shndx));
+ return 0;
+}
+
+static void print_reloc_info(void)
+{
+ printf("%16s %10s %16s %40s %16s\n",
+ "reloc section",
+ "offset",
+ "reloc type",
+ "symbol",
+ "symbol section");
+ walk_relocs(do_reloc_info);
+}
+
+#if ELF_BITS == 64
+# define process process_64
+#else
+# define process process_32
+#endif
+
+void process(FILE *fp, int as_text, int as_bin,
+ int show_reloc_info, int keep_relocs)
+{
+ regex_init();
+ read_ehdr(fp);
+ read_shdrs(fp);
+ read_strtabs(fp);
+ read_symtabs(fp);
+ read_relocs(fp);
+ if (show_reloc_info) {
+ print_reloc_info();
+ return;
+ }
+ emit_relocs(as_text, as_bin, fp);
+ if (!keep_relocs)
+ remove_relocs(fp);
+}
diff --git a/arch/mips/boot/tools/relocs.h b/arch/mips/boot/tools/relocs.h
new file mode 100644
index 000000000..607ff0103
--- /dev/null
+++ b/arch/mips/boot/tools/relocs.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef RELOCS_H
+#define RELOCS_H
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <elf.h>
+#include <byteswap.h>
+#define USE_BSD
+#include <endian.h>
+#include <regex.h>
+
+void die(char *fmt, ...);
+
+/*
+ * Introduced for MIPSr6
+ */
+#ifndef R_MIPS_PC21_S2
+#define R_MIPS_PC21_S2 60
+#endif
+
+#ifndef R_MIPS_PC26_S2
+#define R_MIPS_PC26_S2 61
+#endif
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+enum symtype {
+ S_ABS,
+ S_REL,
+ S_SEG,
+ S_LIN,
+ S_NSYMTYPES
+};
+
+void process_32(FILE *fp, int as_text, int as_bin,
+ int show_reloc_info, int keep_relocs);
+void process_64(FILE *fp, int as_text, int as_bin,
+ int show_reloc_info, int keep_relocs);
+#endif /* RELOCS_H */
diff --git a/arch/mips/boot/tools/relocs_32.c b/arch/mips/boot/tools/relocs_32.c
new file mode 100644
index 000000000..428bea489
--- /dev/null
+++ b/arch/mips/boot/tools/relocs_32.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "relocs.h"
+
+#define ELF_BITS 32
+
+#define ELF_MACHINE EM_MIPS
+#define ELF_MACHINE_NAME "MIPS"
+#define SHT_REL_TYPE SHT_REL
+#define Elf_Rel ElfW(Rel)
+
+#define ELF_CLASS ELFCLASS32
+#define ELF_R_SYM(val) ELF32_R_SYM(val)
+#define ELF_R_TYPE(val) ELF32_R_TYPE(val)
+#define ELF_ST_TYPE(o) ELF32_ST_TYPE(o)
+#define ELF_ST_BIND(o) ELF32_ST_BIND(o)
+#define ELF_ST_VISIBILITY(o) ELF32_ST_VISIBILITY(o)
+
+#include "relocs.c"
diff --git a/arch/mips/boot/tools/relocs_64.c b/arch/mips/boot/tools/relocs_64.c
new file mode 100644
index 000000000..154015d74
--- /dev/null
+++ b/arch/mips/boot/tools/relocs_64.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "relocs.h"
+
+#define ELF_BITS 64
+
+#define ELF_MACHINE EM_MIPS
+#define ELF_MACHINE_NAME "MIPS64"
+#define SHT_REL_TYPE SHT_RELA
+#define Elf_Rel Elf64_Rela
+
+typedef uint8_t Elf64_Byte;
+
+typedef union {
+ struct {
+ Elf64_Word r_sym; /* Symbol index. */
+ Elf64_Byte r_ssym; /* Special symbol. */
+ Elf64_Byte r_type3; /* Third relocation. */
+ Elf64_Byte r_type2; /* Second relocation. */
+ Elf64_Byte r_type; /* First relocation. */
+ } fields;
+ Elf64_Xword unused;
+} Elf64_Mips_Rela;
+
+#define ELF_CLASS ELFCLASS64
+#define ELF_R_SYM(val) (((Elf64_Mips_Rela *)(&val))->fields.r_sym)
+#define ELF_R_TYPE(val) (((Elf64_Mips_Rela *)(&val))->fields.r_type)
+#define ELF_ST_TYPE(o) ELF64_ST_TYPE(o)
+#define ELF_ST_BIND(o) ELF64_ST_BIND(o)
+#define ELF_ST_VISIBILITY(o) ELF64_ST_VISIBILITY(o)
+
+#include "relocs.c"
diff --git a/arch/mips/boot/tools/relocs_main.c b/arch/mips/boot/tools/relocs_main.c
new file mode 100644
index 000000000..e2453a564
--- /dev/null
+++ b/arch/mips/boot/tools/relocs_main.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <endian.h>
+#include <elf.h>
+
+#include "relocs.h"
+
+void die(char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+ exit(1);
+}
+
+static void usage(void)
+{
+ die("relocs [--reloc-info|--text|--bin|--keep] vmlinux\n");
+}
+
+int main(int argc, char **argv)
+{
+ int show_reloc_info, as_text, as_bin, keep_relocs;
+ const char *fname;
+ FILE *fp;
+ int i;
+ unsigned char e_ident[EI_NIDENT];
+
+ show_reloc_info = 0;
+ as_text = 0;
+ as_bin = 0;
+ keep_relocs = 0;
+ fname = NULL;
+ for (i = 1; i < argc; i++) {
+ char *arg = argv[i];
+
+ if (*arg == '-') {
+ if (strcmp(arg, "--reloc-info") == 0) {
+ show_reloc_info = 1;
+ continue;
+ }
+ if (strcmp(arg, "--text") == 0) {
+ as_text = 1;
+ continue;
+ }
+ if (strcmp(arg, "--bin") == 0) {
+ as_bin = 1;
+ continue;
+ }
+ if (strcmp(arg, "--keep") == 0) {
+ keep_relocs = 1;
+ continue;
+ }
+ } else if (!fname) {
+ fname = arg;
+ continue;
+ }
+ usage();
+ }
+ if (!fname)
+ usage();
+
+ fp = fopen(fname, "r+");
+ if (!fp)
+ die("Cannot open %s: %s\n", fname, strerror(errno));
+
+ if (fread(&e_ident, 1, EI_NIDENT, fp) != EI_NIDENT)
+ die("Cannot read %s: %s", fname, strerror(errno));
+
+ rewind(fp);
+ if (e_ident[EI_CLASS] == ELFCLASS64)
+ process_64(fp, as_text, as_bin, show_reloc_info, keep_relocs);
+ else
+ process_32(fp, as_text, as_bin, show_reloc_info, keep_relocs);
+ fclose(fp);
+ return 0;
+}
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
new file mode 100644
index 000000000..4984e462b
--- /dev/null
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: GPL-2.0
+if CPU_CAVIUM_OCTEON
+
+config CAVIUM_CN63XXP1
+ bool "Enable CN63XXP1 errata workarounds"
+ default "n"
+ help
+ The CN63XXP1 chip requires build time workarounds to
+ function reliably, select this option to enable them. These
+ workarounds will cause a slight decrease in performance on
+ non-CN63XXP1 hardware, so it is recommended to select "n"
+ unless it is known the workarounds are needed.
+
+config CAVIUM_OCTEON_CVMSEG_SIZE
+ int "Number of L1 cache lines reserved for CVMSEG memory"
+ range 0 54
+ default 1
+ help
+ CVMSEG LM is a segment that accesses portions of the dcache as a
+ local memory; the larger CVMSEG is, the smaller the cache is.
+ This selects the size of CVMSEG LM, which is in cache blocks. The
+ legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is
+ between zero and 6192 bytes).
+
+endif # CPU_CAVIUM_OCTEON
+
+if CAVIUM_OCTEON_SOC
+
+config CAVIUM_OCTEON_LOCK_L2
+ bool "Lock often used kernel code in the L2"
+ default "y"
+ help
+ Enable locking parts of the kernel into the L2 cache.
+
+config CAVIUM_OCTEON_LOCK_L2_TLB
+ bool "Lock the TLB handler in L2"
+ depends on CAVIUM_OCTEON_LOCK_L2
+ default "y"
+ help
+ Lock the low level TLB fast path into L2.
+
+config CAVIUM_OCTEON_LOCK_L2_EXCEPTION
+ bool "Lock the exception handler in L2"
+ depends on CAVIUM_OCTEON_LOCK_L2
+ default "y"
+ help
+ Lock the low level exception handler into L2.
+
+config CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
+ bool "Lock the interrupt handler in L2"
+ depends on CAVIUM_OCTEON_LOCK_L2
+ default "y"
+ help
+ Lock the low level interrupt handler into L2.
+
+config CAVIUM_OCTEON_LOCK_L2_INTERRUPT
+ bool "Lock the 2nd level interrupt handler in L2"
+ depends on CAVIUM_OCTEON_LOCK_L2
+ default "y"
+ help
+ Lock the 2nd level interrupt handler in L2.
+
+config CAVIUM_OCTEON_LOCK_L2_MEMCPY
+ bool "Lock memcpy() in L2"
+ depends on CAVIUM_OCTEON_LOCK_L2
+ default "y"
+ help
+ Lock the kernel's implementation of memcpy() into L2.
+
+config OCTEON_ILM
+ tristate "Module to measure interrupt latency using Octeon CIU Timer"
+ help
+ This driver is a module to measure interrupt latency using the
+ the CIU Timers on Octeon.
+
+ To compile this driver as a module, choose M here. The module
+ will be called octeon-ilm
+
+endif # CAVIUM_OCTEON_SOC
diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile
new file mode 100644
index 000000000..7c02e5429
--- /dev/null
+++ b/arch/mips/cavium-octeon/Makefile
@@ -0,0 +1,21 @@
+#
+# Makefile for the Cavium Octeon specific kernel interface routines
+# under Linux.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2005-2009 Cavium Networks
+#
+
+obj-y := cpu.o setup.o octeon-platform.o octeon-irq.o csrc-octeon.o
+obj-y += dma-octeon.o
+obj-y += octeon-memcpy.o
+obj-y += executive/
+obj-y += crypto/
+
+obj-$(CONFIG_MTD) += flash_setup.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_OCTEON_ILM) += oct_ilm.o
+obj-$(CONFIG_USB) += octeon-usb.o
diff --git a/arch/mips/cavium-octeon/Platform b/arch/mips/cavium-octeon/Platform
new file mode 100644
index 000000000..4adef38de
--- /dev/null
+++ b/arch/mips/cavium-octeon/Platform
@@ -0,0 +1,6 @@
+#
+# Cavium Octeon
+#
+cflags-$(CONFIG_CAVIUM_OCTEON_SOC) += \
+ -I$(srctree)/arch/mips/include/asm/mach-cavium-octeon
+load-$(CONFIG_CAVIUM_OCTEON_SOC) += 0xffffffff81100000
diff --git a/arch/mips/cavium-octeon/cpu.c b/arch/mips/cavium-octeon/cpu.c
new file mode 100644
index 000000000..036d56cc4
--- /dev/null
+++ b/arch/mips/cavium-octeon/cpu.c
@@ -0,0 +1,50 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/notifier.h>
+#include <linux/prefetch.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/cop2.h>
+#include <asm/current.h>
+#include <asm/mipsregs.h>
+#include <asm/page.h>
+#include <asm/octeon/octeon.h>
+
+static int cnmips_cu2_call(struct notifier_block *nfb, unsigned long action,
+ void *data)
+{
+ unsigned long flags;
+ unsigned int status;
+
+ switch (action) {
+ case CU2_EXCEPTION:
+ prefetch(&current->thread.cp2);
+ local_irq_save(flags);
+ KSTK_STATUS(current) |= ST0_CU2;
+ status = read_c0_status();
+ write_c0_status(status | ST0_CU2);
+ octeon_cop2_restore(&(current->thread.cp2));
+ write_c0_status(status & ~ST0_CU2);
+ local_irq_restore(flags);
+
+ return NOTIFY_BAD; /* Don't call default notifier */
+ }
+
+ return NOTIFY_OK; /* Let default notifier send signals */
+}
+
+static int __init cnmips_cu2_setup(void)
+{
+ return cu2_notifier(cnmips_cu2_call, 0);
+}
+early_initcall(cnmips_cu2_setup);
diff --git a/arch/mips/cavium-octeon/crypto/Makefile b/arch/mips/cavium-octeon/crypto/Makefile
new file mode 100644
index 000000000..db26c73fa
--- /dev/null
+++ b/arch/mips/cavium-octeon/crypto/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# OCTEON-specific crypto modules.
+#
+
+obj-y += octeon-crypto.o
+
+obj-$(CONFIG_CRYPTO_MD5_OCTEON) += octeon-md5.o
+obj-$(CONFIG_CRYPTO_SHA1_OCTEON) += octeon-sha1.o
+obj-$(CONFIG_CRYPTO_SHA256_OCTEON) += octeon-sha256.o
+obj-$(CONFIG_CRYPTO_SHA512_OCTEON) += octeon-sha512.o
diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.c b/arch/mips/cavium-octeon/crypto/octeon-crypto.c
new file mode 100644
index 000000000..cfb4a146c
--- /dev/null
+++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.c
@@ -0,0 +1,69 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2012 Cavium Networks
+ */
+
+#include <asm/cop2.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/sched/task_stack.h>
+
+#include "octeon-crypto.h"
+
+/**
+ * Enable access to Octeon's COP2 crypto hardware for kernel use. Wrap any
+ * crypto operations in calls to octeon_crypto_enable/disable in order to make
+ * sure the state of COP2 isn't corrupted if userspace is also performing
+ * hardware crypto operations. Allocate the state parameter on the stack.
+ * Returns with preemption disabled.
+ *
+ * @state: Pointer to state structure to store current COP2 state in.
+ *
+ * Returns: Flags to be passed to octeon_crypto_disable()
+ */
+unsigned long octeon_crypto_enable(struct octeon_cop2_state *state)
+{
+ int status;
+ unsigned long flags;
+
+ preempt_disable();
+ local_irq_save(flags);
+ status = read_c0_status();
+ write_c0_status(status | ST0_CU2);
+ if (KSTK_STATUS(current) & ST0_CU2) {
+ octeon_cop2_save(&(current->thread.cp2));
+ KSTK_STATUS(current) &= ~ST0_CU2;
+ status &= ~ST0_CU2;
+ } else if (status & ST0_CU2) {
+ octeon_cop2_save(state);
+ }
+ local_irq_restore(flags);
+ return status & ST0_CU2;
+}
+EXPORT_SYMBOL_GPL(octeon_crypto_enable);
+
+/**
+ * Disable access to Octeon's COP2 crypto hardware in the kernel. This must be
+ * called after an octeon_crypto_enable() before any context switch or return to
+ * userspace.
+ *
+ * @state: Pointer to COP2 state to restore
+ * @flags: Return value from octeon_crypto_enable()
+ */
+void octeon_crypto_disable(struct octeon_cop2_state *state,
+ unsigned long crypto_flags)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (crypto_flags & ST0_CU2)
+ octeon_cop2_restore(state);
+ else
+ write_c0_status(read_c0_status() & ~ST0_CU2);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(octeon_crypto_disable);
diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.h b/arch/mips/cavium-octeon/crypto/octeon-crypto.h
new file mode 100644
index 000000000..7315cc307
--- /dev/null
+++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.h
@@ -0,0 +1,224 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012-2013 Cavium Inc., All Rights Reserved.
+ *
+ * MD5/SHA1/SHA256/SHA512 instruction definitions added by
+ * Aaro Koskinen <aaro.koskinen@iki.fi>.
+ *
+ */
+#ifndef __LINUX_OCTEON_CRYPTO_H
+#define __LINUX_OCTEON_CRYPTO_H
+
+#include <linux/sched.h>
+#include <asm/mipsregs.h>
+
+#define OCTEON_CR_OPCODE_PRIORITY 300
+
+extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *state);
+extern void octeon_crypto_disable(struct octeon_cop2_state *state,
+ unsigned long flags);
+
+/*
+ * Macros needed to implement MD5/SHA1/SHA256:
+ */
+
+/*
+ * The index can be 0-1 (MD5) or 0-2 (SHA1), 0-3 (SHA256).
+ */
+#define write_octeon_64bit_hash_dword(value, index) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x0048+" STR(index) \
+ : \
+ : [rt] "d" (cpu_to_be64(value))); \
+} while (0)
+
+/*
+ * The index can be 0-1 (MD5) or 0-2 (SHA1), 0-3 (SHA256).
+ */
+#define read_octeon_64bit_hash_dword(index) \
+({ \
+ u64 __value; \
+ \
+ __asm__ __volatile__ ( \
+ "dmfc2 %[rt],0x0048+" STR(index) \
+ : [rt] "=d" (__value) \
+ : ); \
+ \
+ be64_to_cpu(__value); \
+})
+
+/*
+ * The index can be 0-6.
+ */
+#define write_octeon_64bit_block_dword(value, index) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x0040+" STR(index) \
+ : \
+ : [rt] "d" (cpu_to_be64(value))); \
+} while (0)
+
+/*
+ * The value is the final block dword (64-bit).
+ */
+#define octeon_md5_start(value) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x4047" \
+ : \
+ : [rt] "d" (cpu_to_be64(value))); \
+} while (0)
+
+/*
+ * The value is the final block dword (64-bit).
+ */
+#define octeon_sha1_start(value) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x4057" \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * The value is the final block dword (64-bit).
+ */
+#define octeon_sha256_start(value) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x404f" \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * Macros needed to implement SHA512:
+ */
+
+/*
+ * The index can be 0-7.
+ */
+#define write_octeon_64bit_hash_sha512(value, index) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x0250+" STR(index) \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * The index can be 0-7.
+ */
+#define read_octeon_64bit_hash_sha512(index) \
+({ \
+ u64 __value; \
+ \
+ __asm__ __volatile__ ( \
+ "dmfc2 %[rt],0x0250+" STR(index) \
+ : [rt] "=d" (__value) \
+ : ); \
+ \
+ __value; \
+})
+
+/*
+ * The index can be 0-14.
+ */
+#define write_octeon_64bit_block_sha512(value, index) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x0240+" STR(index) \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * The value is the final block word (64-bit).
+ */
+#define octeon_sha512_start(value) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x424f" \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * The value is the final block dword (64-bit).
+ */
+#define octeon_sha1_start(value) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x4057" \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * The value is the final block dword (64-bit).
+ */
+#define octeon_sha256_start(value) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x404f" \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * Macros needed to implement SHA512:
+ */
+
+/*
+ * The index can be 0-7.
+ */
+#define write_octeon_64bit_hash_sha512(value, index) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x0250+" STR(index) \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * The index can be 0-7.
+ */
+#define read_octeon_64bit_hash_sha512(index) \
+({ \
+ u64 __value; \
+ \
+ __asm__ __volatile__ ( \
+ "dmfc2 %[rt],0x0250+" STR(index) \
+ : [rt] "=d" (__value) \
+ : ); \
+ \
+ __value; \
+})
+
+/*
+ * The index can be 0-14.
+ */
+#define write_octeon_64bit_block_sha512(value, index) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x0240+" STR(index) \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+/*
+ * The value is the final block word (64-bit).
+ */
+#define octeon_sha512_start(value) \
+do { \
+ __asm__ __volatile__ ( \
+ "dmtc2 %[rt],0x424f" \
+ : \
+ : [rt] "d" (value)); \
+} while (0)
+
+#endif /* __LINUX_OCTEON_CRYPTO_H */
diff --git a/arch/mips/cavium-octeon/crypto/octeon-md5.c b/arch/mips/cavium-octeon/crypto/octeon-md5.c
new file mode 100644
index 000000000..8c8ea1396
--- /dev/null
+++ b/arch/mips/cavium-octeon/crypto/octeon-md5.c
@@ -0,0 +1,206 @@
+/*
+ * Cryptographic API.
+ *
+ * MD5 Message Digest Algorithm (RFC1321).
+ *
+ * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>.
+ *
+ * Based on crypto/md5.c, which is:
+ *
+ * Derived from cryptoapi implementation, originally based on the
+ * public domain implementation written by Colin Plumb in 1993.
+ *
+ * Copyright (c) Cryptoapi developers.
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <crypto/md5.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <asm/byteorder.h>
+#include <asm/octeon/octeon.h>
+#include <crypto/internal/hash.h>
+
+#include "octeon-crypto.h"
+
+/*
+ * We pass everything as 64-bit. OCTEON can handle misaligned data.
+ */
+
+static void octeon_md5_store_hash(struct md5_state *ctx)
+{
+ u64 *hash = (u64 *)ctx->hash;
+
+ write_octeon_64bit_hash_dword(hash[0], 0);
+ write_octeon_64bit_hash_dword(hash[1], 1);
+}
+
+static void octeon_md5_read_hash(struct md5_state *ctx)
+{
+ u64 *hash = (u64 *)ctx->hash;
+
+ hash[0] = read_octeon_64bit_hash_dword(0);
+ hash[1] = read_octeon_64bit_hash_dword(1);
+}
+
+static void octeon_md5_transform(const void *_block)
+{
+ const u64 *block = _block;
+
+ write_octeon_64bit_block_dword(block[0], 0);
+ write_octeon_64bit_block_dword(block[1], 1);
+ write_octeon_64bit_block_dword(block[2], 2);
+ write_octeon_64bit_block_dword(block[3], 3);
+ write_octeon_64bit_block_dword(block[4], 4);
+ write_octeon_64bit_block_dword(block[5], 5);
+ write_octeon_64bit_block_dword(block[6], 6);
+ octeon_md5_start(block[7]);
+}
+
+static int octeon_md5_init(struct shash_desc *desc)
+{
+ struct md5_state *mctx = shash_desc_ctx(desc);
+
+ mctx->hash[0] = cpu_to_le32(MD5_H0);
+ mctx->hash[1] = cpu_to_le32(MD5_H1);
+ mctx->hash[2] = cpu_to_le32(MD5_H2);
+ mctx->hash[3] = cpu_to_le32(MD5_H3);
+ mctx->byte_count = 0;
+
+ return 0;
+}
+
+static int octeon_md5_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct md5_state *mctx = shash_desc_ctx(desc);
+ const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ mctx->byte_count += len;
+
+ if (avail > len) {
+ memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
+ data, len);
+ return 0;
+ }
+
+ memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data,
+ avail);
+
+ flags = octeon_crypto_enable(&state);
+ octeon_md5_store_hash(mctx);
+
+ octeon_md5_transform(mctx->block);
+ data += avail;
+ len -= avail;
+
+ while (len >= sizeof(mctx->block)) {
+ octeon_md5_transform(data);
+ data += sizeof(mctx->block);
+ len -= sizeof(mctx->block);
+ }
+
+ octeon_md5_read_hash(mctx);
+ octeon_crypto_disable(&state, flags);
+
+ memcpy(mctx->block, data, len);
+
+ return 0;
+}
+
+static int octeon_md5_final(struct shash_desc *desc, u8 *out)
+{
+ struct md5_state *mctx = shash_desc_ctx(desc);
+ const unsigned int offset = mctx->byte_count & 0x3f;
+ char *p = (char *)mctx->block + offset;
+ int padding = 56 - (offset + 1);
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ *p++ = 0x80;
+
+ flags = octeon_crypto_enable(&state);
+ octeon_md5_store_hash(mctx);
+
+ if (padding < 0) {
+ memset(p, 0x00, padding + sizeof(u64));
+ octeon_md5_transform(mctx->block);
+ p = (char *)mctx->block;
+ padding = 56;
+ }
+
+ memset(p, 0, padding);
+ mctx->block[14] = cpu_to_le32(mctx->byte_count << 3);
+ mctx->block[15] = cpu_to_le32(mctx->byte_count >> 29);
+ octeon_md5_transform(mctx->block);
+
+ octeon_md5_read_hash(mctx);
+ octeon_crypto_disable(&state, flags);
+
+ memcpy(out, mctx->hash, sizeof(mctx->hash));
+ memset(mctx, 0, sizeof(*mctx));
+
+ return 0;
+}
+
+static int octeon_md5_export(struct shash_desc *desc, void *out)
+{
+ struct md5_state *ctx = shash_desc_ctx(desc);
+
+ memcpy(out, ctx, sizeof(*ctx));
+ return 0;
+}
+
+static int octeon_md5_import(struct shash_desc *desc, const void *in)
+{
+ struct md5_state *ctx = shash_desc_ctx(desc);
+
+ memcpy(ctx, in, sizeof(*ctx));
+ return 0;
+}
+
+static struct shash_alg alg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .init = octeon_md5_init,
+ .update = octeon_md5_update,
+ .final = octeon_md5_final,
+ .export = octeon_md5_export,
+ .import = octeon_md5_import,
+ .descsize = sizeof(struct md5_state),
+ .statesize = sizeof(struct md5_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name= "octeon-md5",
+ .cra_priority = OCTEON_CR_OPCODE_PRIORITY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init md5_mod_init(void)
+{
+ if (!octeon_has_crypto())
+ return -ENOTSUPP;
+ return crypto_register_shash(&alg);
+}
+
+static void __exit md5_mod_fini(void)
+{
+ crypto_unregister_shash(&alg);
+}
+
+module_init(md5_mod_init);
+module_exit(md5_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MD5 Message Digest Algorithm (OCTEON)");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha1.c b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
new file mode 100644
index 000000000..75e79b47a
--- /dev/null
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cryptographic API.
+ *
+ * SHA1 Secure Hash Algorithm.
+ *
+ * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>.
+ *
+ * Based on crypto/sha1_generic.c, which is:
+ *
+ * Copyright (c) Alan Smithee.
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
+ */
+
+#include <linux/mm.h>
+#include <crypto/sha.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <asm/byteorder.h>
+#include <asm/octeon/octeon.h>
+#include <crypto/internal/hash.h>
+
+#include "octeon-crypto.h"
+
+/*
+ * We pass everything as 64-bit. OCTEON can handle misaligned data.
+ */
+
+static void octeon_sha1_store_hash(struct sha1_state *sctx)
+{
+ u64 *hash = (u64 *)sctx->state;
+ union {
+ u32 word[2];
+ u64 dword;
+ } hash_tail = { { sctx->state[4], } };
+
+ write_octeon_64bit_hash_dword(hash[0], 0);
+ write_octeon_64bit_hash_dword(hash[1], 1);
+ write_octeon_64bit_hash_dword(hash_tail.dword, 2);
+ memzero_explicit(&hash_tail.word[0], sizeof(hash_tail.word[0]));
+}
+
+static void octeon_sha1_read_hash(struct sha1_state *sctx)
+{
+ u64 *hash = (u64 *)sctx->state;
+ union {
+ u32 word[2];
+ u64 dword;
+ } hash_tail;
+
+ hash[0] = read_octeon_64bit_hash_dword(0);
+ hash[1] = read_octeon_64bit_hash_dword(1);
+ hash_tail.dword = read_octeon_64bit_hash_dword(2);
+ sctx->state[4] = hash_tail.word[0];
+ memzero_explicit(&hash_tail.dword, sizeof(hash_tail.dword));
+}
+
+static void octeon_sha1_transform(const void *_block)
+{
+ const u64 *block = _block;
+
+ write_octeon_64bit_block_dword(block[0], 0);
+ write_octeon_64bit_block_dword(block[1], 1);
+ write_octeon_64bit_block_dword(block[2], 2);
+ write_octeon_64bit_block_dword(block[3], 3);
+ write_octeon_64bit_block_dword(block[4], 4);
+ write_octeon_64bit_block_dword(block[5], 5);
+ write_octeon_64bit_block_dword(block[6], 6);
+ octeon_sha1_start(block[7]);
+}
+
+static int octeon_sha1_init(struct shash_desc *desc)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA1_H0;
+ sctx->state[1] = SHA1_H1;
+ sctx->state[2] = SHA1_H2;
+ sctx->state[3] = SHA1_H3;
+ sctx->state[4] = SHA1_H4;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static void __octeon_sha1_update(struct sha1_state *sctx, const u8 *data,
+ unsigned int len)
+{
+ unsigned int partial;
+ unsigned int done;
+ const u8 *src;
+
+ partial = sctx->count % SHA1_BLOCK_SIZE;
+ sctx->count += len;
+ done = 0;
+ src = data;
+
+ if ((partial + len) >= SHA1_BLOCK_SIZE) {
+ if (partial) {
+ done = -partial;
+ memcpy(sctx->buffer + partial, data,
+ done + SHA1_BLOCK_SIZE);
+ src = sctx->buffer;
+ }
+
+ do {
+ octeon_sha1_transform(src);
+ done += SHA1_BLOCK_SIZE;
+ src = data + done;
+ } while (done + SHA1_BLOCK_SIZE <= len);
+
+ partial = 0;
+ }
+ memcpy(sctx->buffer + partial, src, len - done);
+}
+
+static int octeon_sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ /*
+ * Small updates never reach the crypto engine, so the generic sha1 is
+ * faster because of the heavyweight octeon_crypto_enable() /
+ * octeon_crypto_disable().
+ */
+ if ((sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
+ return crypto_sha1_update(desc, data, len);
+
+ flags = octeon_crypto_enable(&state);
+ octeon_sha1_store_hash(sctx);
+
+ __octeon_sha1_update(sctx, data, len);
+
+ octeon_sha1_read_hash(sctx);
+ octeon_crypto_disable(&state, flags);
+
+ return 0;
+}
+
+static int octeon_sha1_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ static const u8 padding[64] = { 0x80, };
+ struct octeon_cop2_state state;
+ __be32 *dst = (__be32 *)out;
+ unsigned int pad_len;
+ unsigned long flags;
+ unsigned int index;
+ __be64 bits;
+ int i;
+
+ /* Save number of bits. */
+ bits = cpu_to_be64(sctx->count << 3);
+
+ /* Pad out to 56 mod 64. */
+ index = sctx->count & 0x3f;
+ pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
+
+ flags = octeon_crypto_enable(&state);
+ octeon_sha1_store_hash(sctx);
+
+ __octeon_sha1_update(sctx, padding, pad_len);
+
+ /* Append length (before padding). */
+ __octeon_sha1_update(sctx, (const u8 *)&bits, sizeof(bits));
+
+ octeon_sha1_read_hash(sctx);
+ octeon_crypto_disable(&state, flags);
+
+ /* Store state in digest */
+ for (i = 0; i < 5; i++)
+ dst[i] = cpu_to_be32(sctx->state[i]);
+
+ /* Zeroize sensitive information. */
+ memset(sctx, 0, sizeof(*sctx));
+
+ return 0;
+}
+
+static int octeon_sha1_export(struct shash_desc *desc, void *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int octeon_sha1_import(struct shash_desc *desc, const void *in)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
+static struct shash_alg octeon_sha1_alg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .init = octeon_sha1_init,
+ .update = octeon_sha1_update,
+ .final = octeon_sha1_final,
+ .export = octeon_sha1_export,
+ .import = octeon_sha1_import,
+ .descsize = sizeof(struct sha1_state),
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name= "octeon-sha1",
+ .cra_priority = OCTEON_CR_OPCODE_PRIORITY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init octeon_sha1_mod_init(void)
+{
+ if (!octeon_has_crypto())
+ return -ENOTSUPP;
+ return crypto_register_shash(&octeon_sha1_alg);
+}
+
+static void __exit octeon_sha1_mod_fini(void)
+{
+ crypto_unregister_shash(&octeon_sha1_alg);
+}
+
+module_init(octeon_sha1_mod_init);
+module_exit(octeon_sha1_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (OCTEON)");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha256.c b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
new file mode 100644
index 000000000..a682ce767
--- /dev/null
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cryptographic API.
+ *
+ * SHA-224 and SHA-256 Secure Hash Algorithm.
+ *
+ * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>.
+ *
+ * Based on crypto/sha256_generic.c, which is:
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
+ */
+
+#include <linux/mm.h>
+#include <crypto/sha.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <asm/byteorder.h>
+#include <asm/octeon/octeon.h>
+#include <crypto/internal/hash.h>
+
+#include "octeon-crypto.h"
+
+/*
+ * We pass everything as 64-bit. OCTEON can handle misaligned data.
+ */
+
+static void octeon_sha256_store_hash(struct sha256_state *sctx)
+{
+ u64 *hash = (u64 *)sctx->state;
+
+ write_octeon_64bit_hash_dword(hash[0], 0);
+ write_octeon_64bit_hash_dword(hash[1], 1);
+ write_octeon_64bit_hash_dword(hash[2], 2);
+ write_octeon_64bit_hash_dword(hash[3], 3);
+}
+
+static void octeon_sha256_read_hash(struct sha256_state *sctx)
+{
+ u64 *hash = (u64 *)sctx->state;
+
+ hash[0] = read_octeon_64bit_hash_dword(0);
+ hash[1] = read_octeon_64bit_hash_dword(1);
+ hash[2] = read_octeon_64bit_hash_dword(2);
+ hash[3] = read_octeon_64bit_hash_dword(3);
+}
+
+static void octeon_sha256_transform(const void *_block)
+{
+ const u64 *block = _block;
+
+ write_octeon_64bit_block_dword(block[0], 0);
+ write_octeon_64bit_block_dword(block[1], 1);
+ write_octeon_64bit_block_dword(block[2], 2);
+ write_octeon_64bit_block_dword(block[3], 3);
+ write_octeon_64bit_block_dword(block[4], 4);
+ write_octeon_64bit_block_dword(block[5], 5);
+ write_octeon_64bit_block_dword(block[6], 6);
+ octeon_sha256_start(block[7]);
+}
+
+static int octeon_sha224_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA224_H0;
+ sctx->state[1] = SHA224_H1;
+ sctx->state[2] = SHA224_H2;
+ sctx->state[3] = SHA224_H3;
+ sctx->state[4] = SHA224_H4;
+ sctx->state[5] = SHA224_H5;
+ sctx->state[6] = SHA224_H6;
+ sctx->state[7] = SHA224_H7;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static int octeon_sha256_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA256_H0;
+ sctx->state[1] = SHA256_H1;
+ sctx->state[2] = SHA256_H2;
+ sctx->state[3] = SHA256_H3;
+ sctx->state[4] = SHA256_H4;
+ sctx->state[5] = SHA256_H5;
+ sctx->state[6] = SHA256_H6;
+ sctx->state[7] = SHA256_H7;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static void __octeon_sha256_update(struct sha256_state *sctx, const u8 *data,
+ unsigned int len)
+{
+ unsigned int partial;
+ unsigned int done;
+ const u8 *src;
+
+ partial = sctx->count % SHA256_BLOCK_SIZE;
+ sctx->count += len;
+ done = 0;
+ src = data;
+
+ if ((partial + len) >= SHA256_BLOCK_SIZE) {
+ if (partial) {
+ done = -partial;
+ memcpy(sctx->buf + partial, data,
+ done + SHA256_BLOCK_SIZE);
+ src = sctx->buf;
+ }
+
+ do {
+ octeon_sha256_transform(src);
+ done += SHA256_BLOCK_SIZE;
+ src = data + done;
+ } while (done + SHA256_BLOCK_SIZE <= len);
+
+ partial = 0;
+ }
+ memcpy(sctx->buf + partial, src, len - done);
+}
+
+static int octeon_sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ /*
+ * Small updates never reach the crypto engine, so the generic sha256 is
+ * faster because of the heavyweight octeon_crypto_enable() /
+ * octeon_crypto_disable().
+ */
+ if ((sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
+ return crypto_sha256_update(desc, data, len);
+
+ flags = octeon_crypto_enable(&state);
+ octeon_sha256_store_hash(sctx);
+
+ __octeon_sha256_update(sctx, data, len);
+
+ octeon_sha256_read_hash(sctx);
+ octeon_crypto_disable(&state, flags);
+
+ return 0;
+}
+
+static int octeon_sha256_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ static const u8 padding[64] = { 0x80, };
+ struct octeon_cop2_state state;
+ __be32 *dst = (__be32 *)out;
+ unsigned int pad_len;
+ unsigned long flags;
+ unsigned int index;
+ __be64 bits;
+ int i;
+
+ /* Save number of bits. */
+ bits = cpu_to_be64(sctx->count << 3);
+
+ /* Pad out to 56 mod 64. */
+ index = sctx->count & 0x3f;
+ pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
+
+ flags = octeon_crypto_enable(&state);
+ octeon_sha256_store_hash(sctx);
+
+ __octeon_sha256_update(sctx, padding, pad_len);
+
+ /* Append length (before padding). */
+ __octeon_sha256_update(sctx, (const u8 *)&bits, sizeof(bits));
+
+ octeon_sha256_read_hash(sctx);
+ octeon_crypto_disable(&state, flags);
+
+ /* Store state in digest */
+ for (i = 0; i < 8; i++)
+ dst[i] = cpu_to_be32(sctx->state[i]);
+
+ /* Zeroize sensitive information. */
+ memset(sctx, 0, sizeof(*sctx));
+
+ return 0;
+}
+
+static int octeon_sha224_final(struct shash_desc *desc, u8 *hash)
+{
+ u8 D[SHA256_DIGEST_SIZE];
+
+ octeon_sha256_final(desc, D);
+
+ memcpy(hash, D, SHA224_DIGEST_SIZE);
+ memzero_explicit(D, SHA256_DIGEST_SIZE);
+
+ return 0;
+}
+
+static int octeon_sha256_export(struct shash_desc *desc, void *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int octeon_sha256_import(struct shash_desc *desc, const void *in)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
+static struct shash_alg octeon_sha256_algs[2] = { {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = octeon_sha256_init,
+ .update = octeon_sha256_update,
+ .final = octeon_sha256_final,
+ .export = octeon_sha256_export,
+ .import = octeon_sha256_import,
+ .descsize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name= "octeon-sha256",
+ .cra_priority = OCTEON_CR_OPCODE_PRIORITY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+}, {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = octeon_sha224_init,
+ .update = octeon_sha256_update,
+ .final = octeon_sha224_final,
+ .descsize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name= "octeon-sha224",
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+} };
+
+static int __init octeon_sha256_mod_init(void)
+{
+ if (!octeon_has_crypto())
+ return -ENOTSUPP;
+ return crypto_register_shashes(octeon_sha256_algs,
+ ARRAY_SIZE(octeon_sha256_algs));
+}
+
+static void __exit octeon_sha256_mod_fini(void)
+{
+ crypto_unregister_shashes(octeon_sha256_algs,
+ ARRAY_SIZE(octeon_sha256_algs));
+}
+
+module_init(octeon_sha256_mod_init);
+module_exit(octeon_sha256_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm (OCTEON)");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha512.c b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
new file mode 100644
index 000000000..50722a0cf
--- /dev/null
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cryptographic API.
+ *
+ * SHA-512 and SHA-384 Secure Hash Algorithm.
+ *
+ * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>.
+ *
+ * Based on crypto/sha512_generic.c, which is:
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2003 Kyle McMartin <kyle@debian.org>
+ */
+
+#include <linux/mm.h>
+#include <crypto/sha.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <asm/byteorder.h>
+#include <asm/octeon/octeon.h>
+#include <crypto/internal/hash.h>
+
+#include "octeon-crypto.h"
+
+/*
+ * We pass everything as 64-bit. OCTEON can handle misaligned data.
+ */
+
+static void octeon_sha512_store_hash(struct sha512_state *sctx)
+{
+ write_octeon_64bit_hash_sha512(sctx->state[0], 0);
+ write_octeon_64bit_hash_sha512(sctx->state[1], 1);
+ write_octeon_64bit_hash_sha512(sctx->state[2], 2);
+ write_octeon_64bit_hash_sha512(sctx->state[3], 3);
+ write_octeon_64bit_hash_sha512(sctx->state[4], 4);
+ write_octeon_64bit_hash_sha512(sctx->state[5], 5);
+ write_octeon_64bit_hash_sha512(sctx->state[6], 6);
+ write_octeon_64bit_hash_sha512(sctx->state[7], 7);
+}
+
+static void octeon_sha512_read_hash(struct sha512_state *sctx)
+{
+ sctx->state[0] = read_octeon_64bit_hash_sha512(0);
+ sctx->state[1] = read_octeon_64bit_hash_sha512(1);
+ sctx->state[2] = read_octeon_64bit_hash_sha512(2);
+ sctx->state[3] = read_octeon_64bit_hash_sha512(3);
+ sctx->state[4] = read_octeon_64bit_hash_sha512(4);
+ sctx->state[5] = read_octeon_64bit_hash_sha512(5);
+ sctx->state[6] = read_octeon_64bit_hash_sha512(6);
+ sctx->state[7] = read_octeon_64bit_hash_sha512(7);
+}
+
+static void octeon_sha512_transform(const void *_block)
+{
+ const u64 *block = _block;
+
+ write_octeon_64bit_block_sha512(block[0], 0);
+ write_octeon_64bit_block_sha512(block[1], 1);
+ write_octeon_64bit_block_sha512(block[2], 2);
+ write_octeon_64bit_block_sha512(block[3], 3);
+ write_octeon_64bit_block_sha512(block[4], 4);
+ write_octeon_64bit_block_sha512(block[5], 5);
+ write_octeon_64bit_block_sha512(block[6], 6);
+ write_octeon_64bit_block_sha512(block[7], 7);
+ write_octeon_64bit_block_sha512(block[8], 8);
+ write_octeon_64bit_block_sha512(block[9], 9);
+ write_octeon_64bit_block_sha512(block[10], 10);
+ write_octeon_64bit_block_sha512(block[11], 11);
+ write_octeon_64bit_block_sha512(block[12], 12);
+ write_octeon_64bit_block_sha512(block[13], 13);
+ write_octeon_64bit_block_sha512(block[14], 14);
+ octeon_sha512_start(block[15]);
+}
+
+static int octeon_sha512_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA512_H0;
+ sctx->state[1] = SHA512_H1;
+ sctx->state[2] = SHA512_H2;
+ sctx->state[3] = SHA512_H3;
+ sctx->state[4] = SHA512_H4;
+ sctx->state[5] = SHA512_H5;
+ sctx->state[6] = SHA512_H6;
+ sctx->state[7] = SHA512_H7;
+ sctx->count[0] = sctx->count[1] = 0;
+
+ return 0;
+}
+
+static int octeon_sha384_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA384_H0;
+ sctx->state[1] = SHA384_H1;
+ sctx->state[2] = SHA384_H2;
+ sctx->state[3] = SHA384_H3;
+ sctx->state[4] = SHA384_H4;
+ sctx->state[5] = SHA384_H5;
+ sctx->state[6] = SHA384_H6;
+ sctx->state[7] = SHA384_H7;
+ sctx->count[0] = sctx->count[1] = 0;
+
+ return 0;
+}
+
+static void __octeon_sha512_update(struct sha512_state *sctx, const u8 *data,
+ unsigned int len)
+{
+ unsigned int part_len;
+ unsigned int index;
+ unsigned int i;
+
+ /* Compute number of bytes mod 128. */
+ index = sctx->count[0] % SHA512_BLOCK_SIZE;
+
+ /* Update number of bytes. */
+ if ((sctx->count[0] += len) < len)
+ sctx->count[1]++;
+
+ part_len = SHA512_BLOCK_SIZE - index;
+
+ /* Transform as many times as possible. */
+ if (len >= part_len) {
+ memcpy(&sctx->buf[index], data, part_len);
+ octeon_sha512_transform(sctx->buf);
+
+ for (i = part_len; i + SHA512_BLOCK_SIZE <= len;
+ i += SHA512_BLOCK_SIZE)
+ octeon_sha512_transform(&data[i]);
+
+ index = 0;
+ } else {
+ i = 0;
+ }
+
+ /* Buffer remaining input. */
+ memcpy(&sctx->buf[index], &data[i], len - i);
+}
+
+static int octeon_sha512_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct octeon_cop2_state state;
+ unsigned long flags;
+
+ /*
+ * Small updates never reach the crypto engine, so the generic sha512 is
+ * faster because of the heavyweight octeon_crypto_enable() /
+ * octeon_crypto_disable().
+ */
+ if ((sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
+ return crypto_sha512_update(desc, data, len);
+
+ flags = octeon_crypto_enable(&state);
+ octeon_sha512_store_hash(sctx);
+
+ __octeon_sha512_update(sctx, data, len);
+
+ octeon_sha512_read_hash(sctx);
+ octeon_crypto_disable(&state, flags);
+
+ return 0;
+}
+
+static int octeon_sha512_final(struct shash_desc *desc, u8 *hash)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ static u8 padding[128] = { 0x80, };
+ struct octeon_cop2_state state;
+ __be64 *dst = (__be64 *)hash;
+ unsigned int pad_len;
+ unsigned long flags;
+ unsigned int index;
+ __be64 bits[2];
+ int i;
+
+ /* Save number of bits. */
+ bits[1] = cpu_to_be64(sctx->count[0] << 3);
+ bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
+
+ /* Pad out to 112 mod 128. */
+ index = sctx->count[0] & 0x7f;
+ pad_len = (index < 112) ? (112 - index) : ((128+112) - index);
+
+ flags = octeon_crypto_enable(&state);
+ octeon_sha512_store_hash(sctx);
+
+ __octeon_sha512_update(sctx, padding, pad_len);
+
+ /* Append length (before padding). */
+ __octeon_sha512_update(sctx, (const u8 *)bits, sizeof(bits));
+
+ octeon_sha512_read_hash(sctx);
+ octeon_crypto_disable(&state, flags);
+
+ /* Store state in digest. */
+ for (i = 0; i < 8; i++)
+ dst[i] = cpu_to_be64(sctx->state[i]);
+
+ /* Zeroize sensitive information. */
+ memset(sctx, 0, sizeof(struct sha512_state));
+
+ return 0;
+}
+
+static int octeon_sha384_final(struct shash_desc *desc, u8 *hash)
+{
+ u8 D[64];
+
+ octeon_sha512_final(desc, D);
+
+ memcpy(hash, D, 48);
+ memzero_explicit(D, 64);
+
+ return 0;
+}
+
+static struct shash_alg octeon_sha512_algs[2] = { {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .init = octeon_sha512_init,
+ .update = octeon_sha512_update,
+ .final = octeon_sha512_final,
+ .descsize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name= "octeon-sha512",
+ .cra_priority = OCTEON_CR_OPCODE_PRIORITY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+}, {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .init = octeon_sha384_init,
+ .update = octeon_sha512_update,
+ .final = octeon_sha384_final,
+ .descsize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name= "octeon-sha384",
+ .cra_priority = OCTEON_CR_OPCODE_PRIORITY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+} };
+
+static int __init octeon_sha512_mod_init(void)
+{
+ if (!octeon_has_crypto())
+ return -ENOTSUPP;
+ return crypto_register_shashes(octeon_sha512_algs,
+ ARRAY_SIZE(octeon_sha512_algs));
+}
+
+static void __exit octeon_sha512_mod_fini(void)
+{
+ crypto_unregister_shashes(octeon_sha512_algs,
+ ARRAY_SIZE(octeon_sha512_algs));
+}
+
+module_init(octeon_sha512_mod_init);
+module_exit(octeon_sha512_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms (OCTEON)");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c
new file mode 100644
index 000000000..124817609
--- /dev/null
+++ b/arch/mips/cavium-octeon/csrc-octeon.c
@@ -0,0 +1,214 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 by Ralf Baechle
+ * Copyright (C) 2009, 2012 Cavium, Inc.
+ */
+#include <linux/clocksource.h>
+#include <linux/sched/clock.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+
+#include <asm/cpu-info.h>
+#include <asm/cpu-type.h>
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-ipd-defs.h>
+#include <asm/octeon/cvmx-mio-defs.h>
+#include <asm/octeon/cvmx-rst-defs.h>
+#include <asm/octeon/cvmx-fpa-defs.h>
+
+static u64 f;
+static u64 rdiv;
+static u64 sdiv;
+static u64 octeon_udelay_factor;
+static u64 octeon_ndelay_factor;
+
+void __init octeon_setup_delays(void)
+{
+ octeon_udelay_factor = octeon_get_clock_rate() / 1000000;
+ /*
+ * For __ndelay we divide by 2^16, so the factor is multiplied
+ * by the same amount.
+ */
+ octeon_ndelay_factor = (octeon_udelay_factor * 0x10000ull) / 1000ull;
+
+ preset_lpj = octeon_get_clock_rate() / HZ;
+
+ if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
+ union cvmx_mio_rst_boot rst_boot;
+
+ rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
+ rdiv = rst_boot.s.c_mul; /* CPU clock */
+ sdiv = rst_boot.s.pnr_mul; /* I/O clock */
+ f = (0x8000000000000000ull / sdiv) * 2;
+ } else if (current_cpu_type() == CPU_CAVIUM_OCTEON3) {
+ union cvmx_rst_boot rst_boot;
+
+ rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
+ rdiv = rst_boot.s.c_mul; /* CPU clock */
+ sdiv = rst_boot.s.pnr_mul; /* I/O clock */
+ f = (0x8000000000000000ull / sdiv) * 2;
+ }
+
+}
+
+/*
+ * Set the current core's cvmcount counter to the value of the
+ * IPD_CLK_COUNT. We do this on all cores as they are brought
+ * on-line. This allows for a read from a local cpu register to
+ * access a synchronized counter.
+ *
+ * On CPU_CAVIUM_OCTEON2 the IPD_CLK_COUNT is scaled by rdiv/sdiv.
+ */
+void octeon_init_cvmcount(void)
+{
+ u64 clk_reg;
+ unsigned long flags;
+ unsigned loops = 2;
+
+ clk_reg = octeon_has_feature(OCTEON_FEATURE_FPA3) ?
+ CVMX_FPA_CLK_COUNT : CVMX_IPD_CLK_COUNT;
+
+ /* Clobber loops so GCC will not unroll the following while loop. */
+ asm("" : "+r" (loops));
+
+ local_irq_save(flags);
+ /*
+ * Loop several times so we are executing from the cache,
+ * which should give more deterministic timing.
+ */
+ while (loops--) {
+ u64 clk_count = cvmx_read_csr(clk_reg);
+ if (rdiv != 0) {
+ clk_count *= rdiv;
+ if (f != 0) {
+ asm("dmultu\t%[cnt],%[f]\n\t"
+ "mfhi\t%[cnt]"
+ : [cnt] "+r" (clk_count)
+ : [f] "r" (f)
+ : "hi", "lo");
+ }
+ }
+ write_c0_cvmcount(clk_count);
+ }
+ local_irq_restore(flags);
+}
+
+static u64 octeon_cvmcount_read(struct clocksource *cs)
+{
+ return read_c0_cvmcount();
+}
+
+static struct clocksource clocksource_mips = {
+ .name = "OCTEON_CVMCOUNT",
+ .read = octeon_cvmcount_read,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+unsigned long long notrace sched_clock(void)
+{
+ /* 64-bit arithmatic can overflow, so use 128-bit. */
+ u64 t1, t2, t3;
+ unsigned long long rv;
+ u64 mult = clocksource_mips.mult;
+ u64 shift = clocksource_mips.shift;
+ u64 cnt = read_c0_cvmcount();
+
+ asm (
+ "dmultu\t%[cnt],%[mult]\n\t"
+ "nor\t%[t1],$0,%[shift]\n\t"
+ "mfhi\t%[t2]\n\t"
+ "mflo\t%[t3]\n\t"
+ "dsll\t%[t2],%[t2],1\n\t"
+ "dsrlv\t%[rv],%[t3],%[shift]\n\t"
+ "dsllv\t%[t1],%[t2],%[t1]\n\t"
+ "or\t%[rv],%[t1],%[rv]\n\t"
+ : [rv] "=&r" (rv), [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3)
+ : [cnt] "r" (cnt), [mult] "r" (mult), [shift] "r" (shift)
+ : "hi", "lo");
+ return rv;
+}
+
+void __init plat_time_init(void)
+{
+ clocksource_mips.rating = 300;
+ clocksource_register_hz(&clocksource_mips, octeon_get_clock_rate());
+}
+
+void __udelay(unsigned long us)
+{
+ u64 cur, end, inc;
+
+ cur = read_c0_cvmcount();
+
+ inc = us * octeon_udelay_factor;
+ end = cur + inc;
+
+ while (end > cur)
+ cur = read_c0_cvmcount();
+}
+EXPORT_SYMBOL(__udelay);
+
+void __ndelay(unsigned long ns)
+{
+ u64 cur, end, inc;
+
+ cur = read_c0_cvmcount();
+
+ inc = ((ns * octeon_ndelay_factor) >> 16);
+ end = cur + inc;
+
+ while (end > cur)
+ cur = read_c0_cvmcount();
+}
+EXPORT_SYMBOL(__ndelay);
+
+void __delay(unsigned long loops)
+{
+ u64 cur, end;
+
+ cur = read_c0_cvmcount();
+ end = cur + loops;
+
+ while (end > cur)
+ cur = read_c0_cvmcount();
+}
+EXPORT_SYMBOL(__delay);
+
+
+/**
+ * octeon_io_clk_delay - wait for a given number of io clock cycles to pass.
+ *
+ * We scale the wait by the clock ratio, and then wait for the
+ * corresponding number of core clocks.
+ *
+ * @count: The number of clocks to wait.
+ */
+void octeon_io_clk_delay(unsigned long count)
+{
+ u64 cur, end;
+
+ cur = read_c0_cvmcount();
+ if (rdiv != 0) {
+ end = count * rdiv;
+ if (f != 0) {
+ asm("dmultu\t%[cnt],%[f]\n\t"
+ "mfhi\t%[cnt]"
+ : [cnt] "+r" (end)
+ : [f] "r" (f)
+ : "hi", "lo");
+ }
+ end = cur + end;
+ } else {
+ end = cur + count;
+ }
+ while (end > cur)
+ cur = read_c0_cvmcount();
+}
+EXPORT_SYMBOL(octeon_io_clk_delay);
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
new file mode 100644
index 000000000..df70308db
--- /dev/null
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -0,0 +1,250 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
+ * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
+ * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
+ * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
+ * IP32 changes by Ilya.
+ * Copyright (C) 2010 Cavium Networks, Inc.
+ */
+#include <linux/dma-direct.h>
+#include <linux/memblock.h>
+#include <linux/swiotlb.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+
+#include <asm/bootinfo.h>
+
+#include <asm/octeon/octeon.h>
+
+#ifdef CONFIG_PCI
+#include <linux/pci.h>
+#include <asm/octeon/pci-octeon.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-pci-defs.h>
+
+struct octeon_dma_map_ops {
+ dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
+ phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
+};
+
+static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
+{
+ if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
+ return paddr - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
+ else
+ return paddr;
+}
+
+static phys_addr_t octeon_hole_dma_to_phys(dma_addr_t daddr)
+{
+ if (daddr >= CVMX_PCIE_BAR1_RC_BASE)
+ return daddr + CVMX_PCIE_BAR1_PHYS_BASE - CVMX_PCIE_BAR1_RC_BASE;
+ else
+ return daddr;
+}
+
+static dma_addr_t octeon_gen1_phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
+ paddr -= 0x400000000ull;
+ return octeon_hole_phys_to_dma(paddr);
+}
+
+static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ daddr = octeon_hole_dma_to_phys(daddr);
+
+ if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
+ daddr += 0x400000000ull;
+
+ return daddr;
+}
+
+static const struct octeon_dma_map_ops octeon_gen1_ops = {
+ .phys_to_dma = octeon_gen1_phys_to_dma,
+ .dma_to_phys = octeon_gen1_dma_to_phys,
+};
+
+static dma_addr_t octeon_gen2_phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return octeon_hole_phys_to_dma(paddr);
+}
+
+static phys_addr_t octeon_gen2_dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return octeon_hole_dma_to_phys(daddr);
+}
+
+static const struct octeon_dma_map_ops octeon_gen2_ops = {
+ .phys_to_dma = octeon_gen2_phys_to_dma,
+ .dma_to_phys = octeon_gen2_dma_to_phys,
+};
+
+static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
+ paddr -= 0x400000000ull;
+
+ /* Anything in the BAR1 hole or above goes via BAR2 */
+ if (paddr >= 0xf0000000ull)
+ paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
+
+ return paddr;
+}
+
+static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
+ daddr -= OCTEON_BAR2_PCI_ADDRESS;
+
+ if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
+ daddr += 0x400000000ull;
+ return daddr;
+}
+
+static const struct octeon_dma_map_ops octeon_big_ops = {
+ .phys_to_dma = octeon_big_phys_to_dma,
+ .dma_to_phys = octeon_big_dma_to_phys,
+};
+
+static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
+ phys_addr_t paddr)
+{
+ if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
+ paddr -= 0x400000000ull;
+
+ /* Anything not in the BAR1 range goes via BAR2 */
+ if (paddr >= octeon_bar1_pci_phys && paddr < octeon_bar1_pci_phys + 0x8000000ull)
+ paddr = paddr - octeon_bar1_pci_phys;
+ else
+ paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
+
+ return paddr;
+}
+
+static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
+ dma_addr_t daddr)
+{
+ if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
+ daddr -= OCTEON_BAR2_PCI_ADDRESS;
+ else
+ daddr += octeon_bar1_pci_phys;
+
+ if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
+ daddr += 0x400000000ull;
+ return daddr;
+}
+
+static const struct octeon_dma_map_ops octeon_small_ops = {
+ .phys_to_dma = octeon_small_phys_to_dma,
+ .dma_to_phys = octeon_small_dma_to_phys,
+};
+
+static const struct octeon_dma_map_ops *octeon_pci_dma_ops;
+
+void __init octeon_pci_dma_init(void)
+{
+ switch (octeon_dma_bar_type) {
+ case OCTEON_DMA_BAR_TYPE_PCIE:
+ octeon_pci_dma_ops = &octeon_gen1_ops;
+ break;
+ case OCTEON_DMA_BAR_TYPE_PCIE2:
+ octeon_pci_dma_ops = &octeon_gen2_ops;
+ break;
+ case OCTEON_DMA_BAR_TYPE_BIG:
+ octeon_pci_dma_ops = &octeon_big_ops;
+ break;
+ case OCTEON_DMA_BAR_TYPE_SMALL:
+ octeon_pci_dma_ops = &octeon_small_ops;
+ break;
+ default:
+ BUG();
+ }
+}
+#endif /* CONFIG_PCI */
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+#ifdef CONFIG_PCI
+ if (dev && dev_is_pci(dev))
+ return octeon_pci_dma_ops->phys_to_dma(dev, paddr);
+#endif
+ return paddr;
+}
+
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+#ifdef CONFIG_PCI
+ if (dev && dev_is_pci(dev))
+ return octeon_pci_dma_ops->dma_to_phys(dev, daddr);
+#endif
+ return daddr;
+}
+
+char *octeon_swiotlb;
+
+void __init plat_swiotlb_setup(void)
+{
+ phys_addr_t start, end;
+ phys_addr_t max_addr;
+ phys_addr_t addr_size;
+ size_t swiotlbsize;
+ unsigned long swiotlb_nslabs;
+ u64 i;
+
+ max_addr = 0;
+ addr_size = 0;
+
+ for_each_mem_range(i, &start, &end) {
+ /* These addresses map low for PCI. */
+ if (start > 0x410000000ull && !OCTEON_IS_OCTEON2())
+ continue;
+
+ addr_size += (end - start);
+
+ if (max_addr < end)
+ max_addr = end;
+ }
+
+ swiotlbsize = PAGE_SIZE;
+
+#ifdef CONFIG_PCI
+ /*
+ * For OCTEON_DMA_BAR_TYPE_SMALL, size the iotlb at 1/4 memory
+ * size to a maximum of 64MB
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+ || OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
+ swiotlbsize = addr_size / 4;
+ if (swiotlbsize > 64 * (1<<20))
+ swiotlbsize = 64 * (1<<20);
+ } else if (max_addr > 0xf0000000ul) {
+ /*
+ * Otherwise only allocate a big iotlb if there is
+ * memory past the BAR1 hole.
+ */
+ swiotlbsize = 64 * (1<<20);
+ }
+#endif
+#ifdef CONFIG_USB_OHCI_HCD_PLATFORM
+ /* OCTEON II ohci is only 32-bit. */
+ if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
+ swiotlbsize = 64 * (1<<20);
+#endif
+ swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
+ swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
+ swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
+
+ octeon_swiotlb = memblock_alloc_low(swiotlbsize, PAGE_SIZE);
+ if (!octeon_swiotlb)
+ panic("%s: Failed to allocate %zu bytes align=%lx\n",
+ __func__, swiotlbsize, PAGE_SIZE);
+
+ if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
+ panic("Cannot allocate SWIOTLB buffer");
+}
diff --git a/arch/mips/cavium-octeon/executive/Makefile b/arch/mips/cavium-octeon/executive/Makefile
new file mode 100644
index 000000000..50b427879
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/Makefile
@@ -0,0 +1,19 @@
+#
+# Makefile for the Cavium Octeon specific kernel interface routines
+# under Linux.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2005-2008 Cavium Networks
+#
+
+obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
+obj-y += cvmx-pko.o cvmx-spi.o cvmx-cmd-queue.o \
+ cvmx-helper-board.o cvmx-helper.o cvmx-helper-xaui.o \
+ cvmx-helper-rgmii.o cvmx-helper-sgmii.o cvmx-helper-npi.o \
+ cvmx-helper-loop.o cvmx-helper-spi.o cvmx-helper-util.o \
+ cvmx-interrupt-decodes.o cvmx-interrupt-rsl.o
+
+obj-y += cvmx-helper-errata.o cvmx-helper-jtag.o cvmx-boot-vector.o
diff --git a/arch/mips/cavium-octeon/executive/cvmx-boot-vector.c b/arch/mips/cavium-octeon/executive/cvmx-boot-vector.c
new file mode 100644
index 000000000..b7019d218
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-boot-vector.c
@@ -0,0 +1,167 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2017 Cavium, Inc.
+ */
+
+
+/*
+ We install this program at the bootvector:
+------------------------------------
+ .set noreorder
+ .set nomacro
+ .set noat
+reset_vector:
+ dmtc0 $k0, $31, 0 # Save $k0 to DESAVE
+ dmtc0 $k1, $31, 3 # Save $k1 to KScratch2
+
+ mfc0 $k0, $12, 0 # Status
+ mfc0 $k1, $15, 1 # Ebase
+
+ ori $k0, 0x84 # Enable 64-bit addressing, set
+ # ERL (should already be set)
+ andi $k1, 0x3ff # mask out core ID
+
+ mtc0 $k0, $12, 0 # Status
+ sll $k1, 5
+
+ lui $k0, 0xbfc0
+ cache 17, 0($0) # Core-14345, clear L1 Dcache virtual
+ # tags if the core hit an NMI
+
+ ld $k0, 0x78($k0) # k0 <- (bfc00078) pointer to the reset vector
+ synci 0($0) # Invalidate ICache to get coherent
+ # view of target code.
+
+ daddu $k0, $k0, $k1
+ nop
+
+ ld $k0, 0($k0) # k0 <- core specific target address
+ dmfc0 $k1, $31, 3 # Restore $k1 from KScratch2
+
+ beqz $k0, wait_loop # Spin in wait loop
+ nop
+
+ jr $k0
+ nop
+
+ nop # NOPs needed here to fill delay slots
+ nop # on endian reversal of previous instructions
+
+wait_loop:
+ wait
+ nop
+
+ b wait_loop
+ nop
+
+ nop
+ nop
+------------------------------------
+
+0000000000000000 <reset_vector>:
+ 0: 40baf800 dmtc0 k0,c0_desave
+ 4: 40bbf803 dmtc0 k1,c0_kscratch2
+
+ 8: 401a6000 mfc0 k0,c0_status
+ c: 401b7801 mfc0 k1,c0_ebase
+
+ 10: 375a0084 ori k0,k0,0x84
+ 14: 337b03ff andi k1,k1,0x3ff
+
+ 18: 409a6000 mtc0 k0,c0_status
+ 1c: 001bd940 sll k1,k1,0x5
+
+ 20: 3c1abfc0 lui k0,0xbfc0
+ 24: bc110000 cache 0x11,0(zero)
+
+ 28: df5a0078 ld k0,120(k0)
+ 2c: 041f0000 synci 0(zero)
+
+ 30: 035bd02d daddu k0,k0,k1
+ 34: 00000000 nop
+
+ 38: df5a0000 ld k0,0(k0)
+ 3c: 403bf803 dmfc0 k1,c0_kscratch2
+
+ 40: 13400005 beqz k0,58 <wait_loop>
+ 44: 00000000 nop
+
+ 48: 03400008 jr k0
+ 4c: 00000000 nop
+
+ 50: 00000000 nop
+ 54: 00000000 nop
+
+0000000000000058 <wait_loop>:
+ 58: 42000020 wait
+ 5c: 00000000 nop
+
+ 60: 1000fffd b 58 <wait_loop>
+ 64: 00000000 nop
+
+ 68: 00000000 nop
+ 6c: 00000000 nop
+
+ */
+
+#include <asm/octeon/cvmx-boot-vector.h>
+
+static unsigned long long _cvmx_bootvector_data[16] = {
+ 0x40baf80040bbf803ull, /* patch low order 8-bits if no KScratch*/
+ 0x401a6000401b7801ull,
+ 0x375a0084337b03ffull,
+ 0x409a6000001bd940ull,
+ 0x3c1abfc0bc110000ull,
+ 0xdf5a0078041f0000ull,
+ 0x035bd02d00000000ull,
+ 0xdf5a0000403bf803ull, /* patch low order 8-bits if no KScratch*/
+ 0x1340000500000000ull,
+ 0x0340000800000000ull,
+ 0x0000000000000000ull,
+ 0x4200002000000000ull,
+ 0x1000fffd00000000ull,
+ 0x0000000000000000ull,
+ OCTEON_BOOT_MOVEABLE_MAGIC1,
+ 0 /* To be filled in with address of vector block*/
+};
+
+/* 2^10 CPUs */
+#define VECTOR_TABLE_SIZE (1024 * sizeof(struct cvmx_boot_vector_element))
+
+static void cvmx_boot_vector_init(void *mem)
+{
+ uint64_t kseg0_mem;
+ int i;
+
+ memset(mem, 0, VECTOR_TABLE_SIZE);
+ kseg0_mem = cvmx_ptr_to_phys(mem) | 0x8000000000000000ull;
+
+ for (i = 0; i < 15; i++) {
+ uint64_t v = _cvmx_bootvector_data[i];
+
+ if (OCTEON_IS_OCTEON1PLUS() && (i == 0 || i == 7))
+ v &= 0xffffffff00000000ull; /* KScratch not availble. */
+ cvmx_write_csr(CVMX_MIO_BOOT_LOC_ADR, i * 8);
+ cvmx_write_csr(CVMX_MIO_BOOT_LOC_DAT, v);
+ }
+ cvmx_write_csr(CVMX_MIO_BOOT_LOC_ADR, 15 * 8);
+ cvmx_write_csr(CVMX_MIO_BOOT_LOC_DAT, kseg0_mem);
+ cvmx_write_csr(CVMX_MIO_BOOT_LOC_CFGX(0), 0x81fc0000);
+}
+
+/**
+ * Get a pointer to the per-core table of reset vector pointers
+ *
+ */
+struct cvmx_boot_vector_element *cvmx_boot_vector_get(void)
+{
+ struct cvmx_boot_vector_element *ret;
+
+ ret = cvmx_bootmem_alloc_named_range_once(VECTOR_TABLE_SIZE, 0,
+ (1ull << 32) - 1, 8, "__boot_vector1__", cvmx_boot_vector_init);
+ return ret;
+}
+EXPORT_SYMBOL(cvmx_boot_vector_get);
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
new file mode 100644
index 000000000..e794b2d53
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
@@ -0,0 +1,796 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Simple allocate only memory allocator. Used to allocate memory at
+ * application start time.
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-spinlock.h>
+#include <asm/octeon/cvmx-bootmem.h>
+
+/*#define DEBUG */
+
+
+static struct cvmx_bootmem_desc *cvmx_bootmem_desc;
+
+/* See header file for descriptions of functions */
+
+/**
+ * This macro returns a member of the
+ * cvmx_bootmem_named_block_desc_t structure. These members can't
+ * be directly addressed as they might be in memory not directly
+ * reachable. In the case where bootmem is compiled with
+ * LINUX_HOST, the structure itself might be located on a remote
+ * Octeon. The argument "field" is the member name of the
+ * cvmx_bootmem_named_block_desc_t to read. Regardless of the type
+ * of the field, the return type is always a uint64_t. The "addr"
+ * parameter is the physical address of the structure.
+ */
+#define CVMX_BOOTMEM_NAMED_GET_FIELD(addr, field) \
+ __cvmx_bootmem_desc_get(addr, \
+ offsetof(struct cvmx_bootmem_named_block_desc, field), \
+ sizeof_field(struct cvmx_bootmem_named_block_desc, field))
+
+/**
+ * This function is the implementation of the get macros defined
+ * for individual structure members. The argument are generated
+ * by the macros inorder to read only the needed memory.
+ *
+ * @param base 64bit physical address of the complete structure
+ * @param offset Offset from the beginning of the structure to the member being
+ * accessed.
+ * @param size Size of the structure member.
+ *
+ * @return Value of the structure member promoted into a uint64_t.
+ */
+static inline uint64_t __cvmx_bootmem_desc_get(uint64_t base, int offset,
+ int size)
+{
+ base = (1ull << 63) | (base + offset);
+ switch (size) {
+ case 4:
+ return cvmx_read64_uint32(base);
+ case 8:
+ return cvmx_read64_uint64(base);
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Wrapper functions are provided for reading/writing the size and
+ * next block values as these may not be directly addressible (in 32
+ * bit applications, for instance.) Offsets of data elements in
+ * bootmem list, must match cvmx_bootmem_block_header_t.
+ */
+#define NEXT_OFFSET 0
+#define SIZE_OFFSET 8
+
+static void cvmx_bootmem_phy_set_size(uint64_t addr, uint64_t size)
+{
+ cvmx_write64_uint64((addr + SIZE_OFFSET) | (1ull << 63), size);
+}
+
+static void cvmx_bootmem_phy_set_next(uint64_t addr, uint64_t next)
+{
+ cvmx_write64_uint64((addr + NEXT_OFFSET) | (1ull << 63), next);
+}
+
+static uint64_t cvmx_bootmem_phy_get_size(uint64_t addr)
+{
+ return cvmx_read64_uint64((addr + SIZE_OFFSET) | (1ull << 63));
+}
+
+static uint64_t cvmx_bootmem_phy_get_next(uint64_t addr)
+{
+ return cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63));
+}
+
+/**
+ * Allocate a block of memory from the free list that was
+ * passed to the application by the bootloader within a specified
+ * address range. This is an allocate-only algorithm, so
+ * freeing memory is not possible. Allocation will fail if
+ * memory cannot be allocated in the requested range.
+ *
+ * @size: Size in bytes of block to allocate
+ * @min_addr: defines the minimum address of the range
+ * @max_addr: defines the maximum address of the range
+ * @alignment: Alignment required - must be power of 2
+ * Returns pointer to block of memory, NULL on error
+ */
+static void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
+ uint64_t min_addr, uint64_t max_addr)
+{
+ int64_t address;
+ address =
+ cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, 0);
+
+ if (address > 0)
+ return cvmx_phys_to_ptr(address);
+ else
+ return NULL;
+}
+
+void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address,
+ uint64_t alignment)
+{
+ return cvmx_bootmem_alloc_range(size, alignment, address,
+ address + size);
+}
+
+void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr,
+ uint64_t max_addr, uint64_t align,
+ char *name)
+{
+ int64_t addr;
+
+ addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr,
+ align, name, 0);
+ if (addr >= 0)
+ return cvmx_phys_to_ptr(addr);
+ else
+ return NULL;
+}
+
+void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, char *name)
+{
+ return cvmx_bootmem_alloc_named_range(size, 0, 0, alignment, name);
+}
+EXPORT_SYMBOL(cvmx_bootmem_alloc_named);
+
+void cvmx_bootmem_lock(void)
+{
+ cvmx_spinlock_lock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
+}
+
+void cvmx_bootmem_unlock(void)
+{
+ cvmx_spinlock_unlock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
+}
+
+int cvmx_bootmem_init(void *mem_desc_ptr)
+{
+ /* Here we set the global pointer to the bootmem descriptor
+ * block. This pointer will be used directly, so we will set
+ * it up to be directly usable by the application. It is set
+ * up as follows for the various runtime/ABI combinations:
+ *
+ * Linux 64 bit: Set XKPHYS bit
+ * Linux 32 bit: use mmap to create mapping, use virtual address
+ * CVMX 64 bit: use physical address directly
+ * CVMX 32 bit: use physical address directly
+ *
+ * Note that the CVMX environment assumes the use of 1-1 TLB
+ * mappings so that the physical addresses can be used
+ * directly
+ */
+ if (!cvmx_bootmem_desc) {
+#if defined(CVMX_ABI_64)
+ /* Set XKPHYS bit */
+ cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr));
+#else
+ cvmx_bootmem_desc = (struct cvmx_bootmem_desc *) mem_desc_ptr;
+#endif
+ }
+
+ return 0;
+}
+
+/*
+ * The cvmx_bootmem_phy* functions below return 64 bit physical
+ * addresses, and expose more features that the cvmx_bootmem_functions
+ * above. These are required for full memory space access in 32 bit
+ * applications, as well as for using some advance features. Most
+ * applications should not need to use these.
+ */
+
+int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
+ uint64_t address_max, uint64_t alignment,
+ uint32_t flags)
+{
+
+ uint64_t head_addr;
+ uint64_t ent_addr;
+ /* points to previous list entry, NULL current entry is head of list */
+ uint64_t prev_addr = 0;
+ uint64_t new_ent_addr = 0;
+ uint64_t desired_min_addr;
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, "
+ "min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n",
+ (unsigned long long)req_size,
+ (unsigned long long)address_min,
+ (unsigned long long)address_max,
+ (unsigned long long)alignment);
+#endif
+
+ if (cvmx_bootmem_desc->major_version > 3) {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
+ "version: %d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version,
+ (int)cvmx_bootmem_desc->minor_version,
+ cvmx_bootmem_desc);
+ goto error_out;
+ }
+
+ /*
+ * Do a variety of checks to validate the arguments. The
+ * allocator code will later assume that these checks have
+ * been made. We validate that the requested constraints are
+ * not self-contradictory before we look through the list of
+ * available memory.
+ */
+
+ /* 0 is not a valid req_size for this allocator */
+ if (!req_size)
+ goto error_out;
+
+ /* Round req_size up to mult of minimum alignment bytes */
+ req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) &
+ ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
+
+ /*
+ * Convert !0 address_min and 0 address_max to special case of
+ * range that specifies an exact memory block to allocate. Do
+ * this before other checks and adjustments so that this
+ * tranformation will be validated.
+ */
+ if (address_min && !address_max)
+ address_max = address_min + req_size;
+ else if (!address_min && !address_max)
+ address_max = ~0ull; /* If no limits given, use max limits */
+
+
+ /*
+ * Enforce minimum alignment (this also keeps the minimum free block
+ * req_size the same as the alignment req_size.
+ */
+ if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE)
+ alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE;
+
+ /*
+ * Adjust address minimum based on requested alignment (round
+ * up to meet alignment). Do this here so we can reject
+ * impossible requests up front. (NOP for address_min == 0)
+ */
+ if (alignment)
+ address_min = ALIGN(address_min, alignment);
+
+ /*
+ * Reject inconsistent args. We have adjusted these, so this
+ * may fail due to our internal changes even if this check
+ * would pass for the values the user supplied.
+ */
+ if (req_size > address_max - address_min)
+ goto error_out;
+
+ /* Walk through the list entries - first fit found is returned */
+
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_lock();
+ head_addr = cvmx_bootmem_desc->head_addr;
+ ent_addr = head_addr;
+ for (; ent_addr;
+ prev_addr = ent_addr,
+ ent_addr = cvmx_bootmem_phy_get_next(ent_addr)) {
+ uint64_t usable_base, usable_max;
+ uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr);
+
+ if (cvmx_bootmem_phy_get_next(ent_addr)
+ && ent_addr > cvmx_bootmem_phy_get_next(ent_addr)) {
+ cvmx_dprintf("Internal bootmem_alloc() error: ent: "
+ "0x%llx, next: 0x%llx\n",
+ (unsigned long long)ent_addr,
+ (unsigned long long)
+ cvmx_bootmem_phy_get_next(ent_addr));
+ goto error_out;
+ }
+
+ /*
+ * Determine if this is an entry that can satisify the
+ * request Check to make sure entry is large enough to
+ * satisfy request.
+ */
+ usable_base =
+ ALIGN(max(address_min, ent_addr), alignment);
+ usable_max = min(address_max, ent_addr + ent_size);
+ /*
+ * We should be able to allocate block at address
+ * usable_base.
+ */
+
+ desired_min_addr = usable_base;
+ /*
+ * Determine if request can be satisfied from the
+ * current entry.
+ */
+ if (!((ent_addr + ent_size) > usable_base
+ && ent_addr < address_max
+ && req_size <= usable_max - usable_base))
+ continue;
+ /*
+ * We have found an entry that has room to satisfy the
+ * request, so allocate it from this entry. If end
+ * CVMX_BOOTMEM_FLAG_END_ALLOC set, then allocate from
+ * the end of this block rather than the beginning.
+ */
+ if (flags & CVMX_BOOTMEM_FLAG_END_ALLOC) {
+ desired_min_addr = usable_max - req_size;
+ /*
+ * Align desired address down to required
+ * alignment.
+ */
+ desired_min_addr &= ~(alignment - 1);
+ }
+
+ /* Match at start of entry */
+ if (desired_min_addr == ent_addr) {
+ if (req_size < ent_size) {
+ /*
+ * big enough to create a new block
+ * from top portion of block.
+ */
+ new_ent_addr = ent_addr + req_size;
+ cvmx_bootmem_phy_set_next(new_ent_addr,
+ cvmx_bootmem_phy_get_next(ent_addr));
+ cvmx_bootmem_phy_set_size(new_ent_addr,
+ ent_size -
+ req_size);
+
+ /*
+ * Adjust next pointer as following
+ * code uses this.
+ */
+ cvmx_bootmem_phy_set_next(ent_addr,
+ new_ent_addr);
+ }
+
+ /*
+ * adjust prev ptr or head to remove this
+ * entry from list.
+ */
+ if (prev_addr)
+ cvmx_bootmem_phy_set_next(prev_addr,
+ cvmx_bootmem_phy_get_next(ent_addr));
+ else
+ /*
+ * head of list being returned, so
+ * update head ptr.
+ */
+ cvmx_bootmem_desc->head_addr =
+ cvmx_bootmem_phy_get_next(ent_addr);
+
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_unlock();
+ return desired_min_addr;
+ }
+ /*
+ * block returned doesn't start at beginning of entry,
+ * so we know that we will be splitting a block off
+ * the front of this one. Create a new block from the
+ * beginning, add to list, and go to top of loop
+ * again.
+ *
+ * create new block from high portion of
+ * block, so that top block starts at desired
+ * addr.
+ */
+ new_ent_addr = desired_min_addr;
+ cvmx_bootmem_phy_set_next(new_ent_addr,
+ cvmx_bootmem_phy_get_next
+ (ent_addr));
+ cvmx_bootmem_phy_set_size(new_ent_addr,
+ cvmx_bootmem_phy_get_size
+ (ent_addr) -
+ (desired_min_addr -
+ ent_addr));
+ cvmx_bootmem_phy_set_size(ent_addr,
+ desired_min_addr - ent_addr);
+ cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
+ /* Loop again to handle actual alloc from new block */
+ }
+error_out:
+ /* We didn't find anything, so return error */
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_unlock();
+ return -1;
+}
+
+int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
+{
+ uint64_t cur_addr;
+ uint64_t prev_addr = 0; /* zero is invalid */
+ int retval = 0;
+
+#ifdef DEBUG
+ cvmx_dprintf("__cvmx_bootmem_phy_free addr: 0x%llx, size: 0x%llx\n",
+ (unsigned long long)phy_addr, (unsigned long long)size);
+#endif
+ if (cvmx_bootmem_desc->major_version > 3) {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
+ "version: %d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version,
+ (int)cvmx_bootmem_desc->minor_version,
+ cvmx_bootmem_desc);
+ return 0;
+ }
+
+ /* 0 is not a valid size for this allocator */
+ if (!size)
+ return 0;
+
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_lock();
+ cur_addr = cvmx_bootmem_desc->head_addr;
+ if (cur_addr == 0 || phy_addr < cur_addr) {
+ /* add at front of list - special case with changing head ptr */
+ if (cur_addr && phy_addr + size > cur_addr)
+ goto bootmem_free_done; /* error, overlapping section */
+ else if (phy_addr + size == cur_addr) {
+ /* Add to front of existing first block */
+ cvmx_bootmem_phy_set_next(phy_addr,
+ cvmx_bootmem_phy_get_next
+ (cur_addr));
+ cvmx_bootmem_phy_set_size(phy_addr,
+ cvmx_bootmem_phy_get_size
+ (cur_addr) + size);
+ cvmx_bootmem_desc->head_addr = phy_addr;
+
+ } else {
+ /* New block before first block. OK if cur_addr is 0 */
+ cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ cvmx_bootmem_desc->head_addr = phy_addr;
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ }
+
+ /* Find place in list to add block */
+ while (cur_addr && phy_addr > cur_addr) {
+ prev_addr = cur_addr;
+ cur_addr = cvmx_bootmem_phy_get_next(cur_addr);
+ }
+
+ if (!cur_addr) {
+ /*
+ * We have reached the end of the list, add on to end,
+ * checking to see if we need to combine with last
+ * block
+ */
+ if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
+ phy_addr) {
+ cvmx_bootmem_phy_set_size(prev_addr,
+ cvmx_bootmem_phy_get_size
+ (prev_addr) + size);
+ } else {
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ cvmx_bootmem_phy_set_next(phy_addr, 0);
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ } else {
+ /*
+ * insert between prev and cur nodes, checking for
+ * merge with either/both.
+ */
+ if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
+ phy_addr) {
+ /* Merge with previous */
+ cvmx_bootmem_phy_set_size(prev_addr,
+ cvmx_bootmem_phy_get_size
+ (prev_addr) + size);
+ if (phy_addr + size == cur_addr) {
+ /* Also merge with current */
+ cvmx_bootmem_phy_set_size(prev_addr,
+ cvmx_bootmem_phy_get_size(cur_addr) +
+ cvmx_bootmem_phy_get_size(prev_addr));
+ cvmx_bootmem_phy_set_next(prev_addr,
+ cvmx_bootmem_phy_get_next(cur_addr));
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ } else if (phy_addr + size == cur_addr) {
+ /* Merge with current */
+ cvmx_bootmem_phy_set_size(phy_addr,
+ cvmx_bootmem_phy_get_size
+ (cur_addr) + size);
+ cvmx_bootmem_phy_set_next(phy_addr,
+ cvmx_bootmem_phy_get_next
+ (cur_addr));
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+ retval = 1;
+ goto bootmem_free_done;
+ }
+
+ /* It is a standalone block, add in between prev and cur */
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+
+ }
+ retval = 1;
+
+bootmem_free_done:
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_unlock();
+ return retval;
+
+}
+
+/**
+ * Finds a named memory block by name.
+ * Also used for finding an unused entry in the named block table.
+ *
+ * @name: Name of memory block to find. If NULL pointer given, then
+ * finds unused descriptor, if available.
+ *
+ * @flags: Flags to control options for the allocation.
+ *
+ * Returns Pointer to memory block descriptor, NULL if not found.
+ * If NULL returned when name parameter is NULL, then no memory
+ * block descriptors are available.
+ */
+static struct cvmx_bootmem_named_block_desc *
+ cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags)
+{
+ unsigned int i;
+ struct cvmx_bootmem_named_block_desc *named_block_array_ptr;
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_find: %s\n", name);
+#endif
+ /*
+ * Lock the structure to make sure that it is not being
+ * changed while we are examining it.
+ */
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_lock();
+
+ /* Use XKPHYS for 64 bit linux */
+ named_block_array_ptr = (struct cvmx_bootmem_named_block_desc *)
+ cvmx_phys_to_ptr(cvmx_bootmem_desc->named_block_array_addr);
+
+#ifdef DEBUG
+ cvmx_dprintf
+ ("cvmx_bootmem_phy_named_block_find: named_block_array_ptr: %p\n",
+ named_block_array_ptr);
+#endif
+ if (cvmx_bootmem_desc->major_version == 3) {
+ for (i = 0;
+ i < cvmx_bootmem_desc->named_block_num_blocks; i++) {
+ if ((name && named_block_array_ptr[i].size
+ && !strncmp(name, named_block_array_ptr[i].name,
+ cvmx_bootmem_desc->named_block_name_len
+ - 1))
+ || (!name && !named_block_array_ptr[i].size)) {
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_unlock();
+
+ return &(named_block_array_ptr[i]);
+ }
+ }
+ } else {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
+ "version: %d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version,
+ (int)cvmx_bootmem_desc->minor_version,
+ cvmx_bootmem_desc);
+ }
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_unlock();
+
+ return NULL;
+}
+
+void *cvmx_bootmem_alloc_named_range_once(uint64_t size, uint64_t min_addr,
+ uint64_t max_addr, uint64_t align,
+ char *name,
+ void (*init) (void *))
+{
+ int64_t addr;
+ void *ptr;
+ uint64_t named_block_desc_addr;
+
+ named_block_desc_addr = (uint64_t)
+ cvmx_bootmem_phy_named_block_find(name,
+ (uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING);
+
+ if (named_block_desc_addr) {
+ addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_desc_addr,
+ base_addr);
+ return cvmx_phys_to_ptr(addr);
+ }
+
+ addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr,
+ align, name,
+ (uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING);
+
+ if (addr < 0)
+ return NULL;
+ ptr = cvmx_phys_to_ptr(addr);
+
+ if (init)
+ init(ptr);
+ else
+ memset(ptr, 0, size);
+
+ return ptr;
+}
+EXPORT_SYMBOL(cvmx_bootmem_alloc_named_range_once);
+
+struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name)
+{
+ return cvmx_bootmem_phy_named_block_find(name, 0);
+}
+EXPORT_SYMBOL(cvmx_bootmem_find_named_block);
+
+/**
+ * Frees a named block.
+ *
+ * @name: name of block to free
+ * @flags: flags for passing options
+ *
+ * Returns 0 on failure
+ * 1 on success
+ */
+static int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
+{
+ struct cvmx_bootmem_named_block_desc *named_block_ptr;
+
+ if (cvmx_bootmem_desc->major_version != 3) {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
+ "%d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version,
+ (int)cvmx_bootmem_desc->minor_version,
+ cvmx_bootmem_desc);
+ return 0;
+ }
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s\n", name);
+#endif
+
+ /*
+ * Take lock here, as name lookup/block free/name free need to
+ * be atomic.
+ */
+ cvmx_bootmem_lock();
+
+ named_block_ptr =
+ cvmx_bootmem_phy_named_block_find(name,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (named_block_ptr) {
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_free: "
+ "%s, base: 0x%llx, size: 0x%llx\n",
+ name,
+ (unsigned long long)named_block_ptr->base_addr,
+ (unsigned long long)named_block_ptr->size);
+#endif
+ __cvmx_bootmem_phy_free(named_block_ptr->base_addr,
+ named_block_ptr->size,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ named_block_ptr->size = 0;
+ /* Set size to zero to indicate block not used. */
+ }
+
+ cvmx_bootmem_unlock();
+ return named_block_ptr != NULL; /* 0 on failure, 1 on success */
+}
+
+int cvmx_bootmem_free_named(char *name)
+{
+ return cvmx_bootmem_phy_named_block_free(name, 0);
+}
+
+int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
+ uint64_t max_addr,
+ uint64_t alignment,
+ char *name,
+ uint32_t flags)
+{
+ int64_t addr_allocated;
+ struct cvmx_bootmem_named_block_desc *named_block_desc_ptr;
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: "
+ "0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n",
+ (unsigned long long)size,
+ (unsigned long long)min_addr,
+ (unsigned long long)max_addr,
+ (unsigned long long)alignment,
+ name);
+#endif
+ if (cvmx_bootmem_desc->major_version != 3) {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
+ "%d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version,
+ (int)cvmx_bootmem_desc->minor_version,
+ cvmx_bootmem_desc);
+ return -1;
+ }
+
+ /*
+ * Take lock here, as name lookup/block alloc/name add need to
+ * be atomic.
+ */
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+
+ /* Get pointer to first available named block descriptor */
+ named_block_desc_ptr =
+ cvmx_bootmem_phy_named_block_find(NULL,
+ flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
+
+ /*
+ * Check to see if name already in use, return error if name
+ * not available or no more room for blocks.
+ */
+ if (cvmx_bootmem_phy_named_block_find(name,
+ flags | CVMX_BOOTMEM_FLAG_NO_LOCKING) || !named_block_desc_ptr) {
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ return -1;
+ }
+
+
+ /*
+ * Round size up to mult of minimum alignment bytes We need
+ * the actual size allocated to allow for blocks to be
+ * coalesced when they are freed. The alloc routine does the
+ * same rounding up on all allocations.
+ */
+ size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE);
+
+ addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr,
+ alignment,
+ flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (addr_allocated >= 0) {
+ named_block_desc_ptr->base_addr = addr_allocated;
+ named_block_desc_ptr->size = size;
+ strncpy(named_block_desc_ptr->name, name,
+ cvmx_bootmem_desc->named_block_name_len);
+ named_block_desc_ptr->name[cvmx_bootmem_desc->named_block_name_len - 1] = 0;
+ }
+
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
+ return addr_allocated;
+}
+
+struct cvmx_bootmem_desc *cvmx_bootmem_get_desc(void)
+{
+ return cvmx_bootmem_desc;
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c b/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
new file mode 100644
index 000000000..3839feba6
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
@@ -0,0 +1,307 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Support functions for managing command queues used for
+ * various hardware blocks.
+ */
+
+#include <linux/kernel.h>
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-fpa.h>
+#include <asm/octeon/cvmx-cmd-queue.h>
+
+#include <asm/octeon/cvmx-npei-defs.h>
+#include <asm/octeon/cvmx-pexp-defs.h>
+#include <asm/octeon/cvmx-pko-defs.h>
+
+/**
+ * This application uses this pointer to access the global queue
+ * state. It points to a bootmem named block.
+ */
+__cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
+EXPORT_SYMBOL_GPL(__cvmx_cmd_queue_state_ptr);
+
+/**
+ * Initialize the Global queue state pointer.
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
+{
+ char *alloc_name = "cvmx_cmd_queues";
+#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
+ extern uint64_t octeon_reserve32_memory;
+#endif
+
+ if (likely(__cvmx_cmd_queue_state_ptr))
+ return CVMX_CMD_QUEUE_SUCCESS;
+
+#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
+ if (octeon_reserve32_memory)
+ __cvmx_cmd_queue_state_ptr =
+ cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
+ octeon_reserve32_memory,
+ octeon_reserve32_memory +
+ (CONFIG_CAVIUM_RESERVE32 <<
+ 20) - 1, 128, alloc_name);
+ else
+#endif
+ __cvmx_cmd_queue_state_ptr =
+ cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
+ 128,
+ alloc_name);
+ if (__cvmx_cmd_queue_state_ptr)
+ memset(__cvmx_cmd_queue_state_ptr, 0,
+ sizeof(*__cvmx_cmd_queue_state_ptr));
+ else {
+ struct cvmx_bootmem_named_block_desc *block_desc =
+ cvmx_bootmem_find_named_block(alloc_name);
+ if (block_desc)
+ __cvmx_cmd_queue_state_ptr =
+ cvmx_phys_to_ptr(block_desc->base_addr);
+ else {
+ cvmx_dprintf
+ ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
+ alloc_name);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ }
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+/**
+ * Initialize a command queue for use. The initial FPA buffer is
+ * allocated and the hardware unit is configured to point to the
+ * new command queue.
+ *
+ * @queue_id: Hardware command queue to initialize.
+ * @max_depth: Maximum outstanding commands that can be queued.
+ * @fpa_pool: FPA pool the command queues should come from.
+ * @pool_size: Size of each buffer in the FPA pool (bytes)
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
+ int max_depth, int fpa_pool,
+ int pool_size)
+{
+ __cvmx_cmd_queue_state_t *qstate;
+ cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
+ if (result != CVMX_CMD_QUEUE_SUCCESS)
+ return result;
+
+ qstate = __cvmx_cmd_queue_get_state(queue_id);
+ if (qstate == NULL)
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+ /*
+ * We artificially limit max_depth to 1<<20 words. It is an
+ * arbitrary limit.
+ */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
+ if ((max_depth < 0) || (max_depth > 1 << 20))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ } else if (max_depth != 0)
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+ if ((fpa_pool < 0) || (fpa_pool > 7))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ if ((pool_size < 128) || (pool_size > 65536))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+ /* See if someone else has already initialized the queue */
+ if (qstate->base_ptr_div128) {
+ if (max_depth != (int)qstate->max_depth) {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
+ "Queue already initialized with different "
+ "max_depth (%d).\n",
+ (int)qstate->max_depth);
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ if (fpa_pool != qstate->fpa_pool) {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
+ "Queue already initialized with different "
+ "FPA pool (%u).\n",
+ qstate->fpa_pool);
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
+ "Queue already initialized with different "
+ "FPA pool size (%u).\n",
+ (qstate->pool_size_m1 + 1) << 3);
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ CVMX_SYNCWS;
+ return CVMX_CMD_QUEUE_ALREADY_SETUP;
+ } else {
+ union cvmx_fpa_ctl_status status;
+ void *buffer;
+
+ status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
+ if (!status.s.enb) {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
+ "FPA is not enabled.\n");
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ buffer = cvmx_fpa_alloc(fpa_pool);
+ if (buffer == NULL) {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
+ "Unable to allocate initial buffer.\n");
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+
+ memset(qstate, 0, sizeof(*qstate));
+ qstate->max_depth = max_depth;
+ qstate->fpa_pool = fpa_pool;
+ qstate->pool_size_m1 = (pool_size >> 3) - 1;
+ qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
+ /*
+ * We zeroed the now serving field so we need to also
+ * zero the ticket.
+ */
+ __cvmx_cmd_queue_state_ptr->
+ ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
+ CVMX_SYNCWS;
+ return CVMX_CMD_QUEUE_SUCCESS;
+ }
+}
+
+/**
+ * Shutdown a queue a free it's command buffers to the FPA. The
+ * hardware connected to the queue must be stopped before this
+ * function is called.
+ *
+ * @queue_id: Queue to shutdown
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+ if (qptr == NULL) {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
+ "get queue information.\n");
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+
+ if (cvmx_cmd_queue_length(queue_id) > 0) {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
+ "has data in it.\n");
+ return CVMX_CMD_QUEUE_FULL;
+ }
+
+ __cvmx_cmd_queue_lock(queue_id, qptr);
+ if (qptr->base_ptr_div128) {
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ ((uint64_t) qptr->base_ptr_div128 << 7),
+ qptr->fpa_pool, 0);
+ qptr->base_ptr_div128 = 0;
+ }
+ __cvmx_cmd_queue_unlock(qptr);
+
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+/**
+ * Return the number of command words pending in the queue. This
+ * function may be relatively slow for some hardware units.
+ *
+ * @queue_id: Hardware command queue to query
+ *
+ * Returns Number of outstanding commands
+ */
+int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
+{
+ if (CVMX_ENABLE_PARAMETER_CHECKING) {
+ if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+
+ /*
+ * The cast is here so gcc with check that all values in the
+ * cvmx_cmd_queue_id_t enumeration are here.
+ */
+ switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
+ case CVMX_CMD_QUEUE_PKO_BASE:
+ /*
+ * FIXME: Need atomic lock on
+ * CVMX_PKO_REG_READ_IDX. Right now we are normally
+ * called with the queue lock, so that is a SLIGHT
+ * amount of protection.
+ */
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ union cvmx_pko_mem_debug9 debug9;
+ debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
+ return debug9.cn38xx.doorbell;
+ } else {
+ union cvmx_pko_mem_debug8 debug8;
+ debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
+ return debug8.cn50xx.doorbell;
+ }
+ case CVMX_CMD_QUEUE_ZIP:
+ case CVMX_CMD_QUEUE_DFA:
+ case CVMX_CMD_QUEUE_RAID:
+ /* FIXME: Implement other lengths */
+ return 0;
+ case CVMX_CMD_QUEUE_DMA_BASE:
+ {
+ union cvmx_npei_dmax_counts dmax_counts;
+ dmax_counts.u64 =
+ cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
+ (queue_id & 0x7));
+ return dmax_counts.s.dbell;
+ }
+ case CVMX_CMD_QUEUE_END:
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+}
+
+/**
+ * Return the command buffer to be written to. The purpose of this
+ * function is to allow CVMX routine access t othe low level buffer
+ * for initial hardware setup. User applications should not call this
+ * function directly.
+ *
+ * @queue_id: Command queue to query
+ *
+ * Returns Command buffer or NULL on failure
+ */
+void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+ if (qptr && qptr->base_ptr_div128)
+ return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
+ else
+ return NULL;
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
new file mode 100644
index 000000000..9b791ccf8
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -0,0 +1,348 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Helper functions to abstract board specific data about
+ * network ports from the rest of the cvmx-helper files.
+ */
+
+#include <linux/bug.h>
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-bootinfo.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-util.h>
+#include <asm/octeon/cvmx-helper-board.h>
+
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-asxx-defs.h>
+
+/**
+ * Return the MII PHY address associated with the given IPD
+ * port. A result of -1 means there isn't a MII capable PHY
+ * connected to this port. On chips supporting multiple MII
+ * busses the bus number is encoded in bits <15:8>.
+ *
+ * This function must be modified for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It replies on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @ipd_port: Octeon IPD port to get the MII address for.
+ *
+ * Returns MII PHY address and bus number or -1.
+ */
+int cvmx_helper_board_get_mii_address(int ipd_port)
+{
+ switch (cvmx_sysinfo_get()->board_type) {
+ case CVMX_BOARD_TYPE_SIM:
+ /* Simulator doesn't have MII */
+ return -1;
+ case CVMX_BOARD_TYPE_EBT3000:
+ case CVMX_BOARD_TYPE_EBT5800:
+ case CVMX_BOARD_TYPE_THUNDER:
+ case CVMX_BOARD_TYPE_NICPRO2:
+ /* Interface 0 is SPI4, interface 1 is RGMII */
+ if ((ipd_port >= 16) && (ipd_port < 20))
+ return ipd_port - 16;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_KODAMA:
+ case CVMX_BOARD_TYPE_EBH3100:
+ case CVMX_BOARD_TYPE_HIKARI:
+ case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
+ case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
+ case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
+ /*
+ * Port 0 is WAN connected to a PHY, Port 1 is GMII
+ * connected to a switch
+ */
+ if (ipd_port == 0)
+ return 4;
+ else if (ipd_port == 1)
+ return 9;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_NAC38:
+ /* Board has 8 RGMII ports PHYs are 0-7 */
+ if ((ipd_port >= 0) && (ipd_port < 4))
+ return ipd_port;
+ else if ((ipd_port >= 16) && (ipd_port < 20))
+ return ipd_port - 16 + 4;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_EBH3000:
+ /* Board has dual SPI4 and no PHYs */
+ return -1;
+ case CVMX_BOARD_TYPE_EBH5200:
+ case CVMX_BOARD_TYPE_EBH5201:
+ case CVMX_BOARD_TYPE_EBT5200:
+ /* Board has 2 management ports */
+ if ((ipd_port >= CVMX_HELPER_BOARD_MGMT_IPD_PORT) &&
+ (ipd_port < (CVMX_HELPER_BOARD_MGMT_IPD_PORT + 2)))
+ return ipd_port - CVMX_HELPER_BOARD_MGMT_IPD_PORT;
+ /*
+ * Board has 4 SGMII ports. The PHYs start right after the MII
+ * ports MII0 = 0, MII1 = 1, SGMII = 2-5.
+ */
+ if ((ipd_port >= 0) && (ipd_port < 4))
+ return ipd_port + 2;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_EBH5600:
+ case CVMX_BOARD_TYPE_EBH5601:
+ case CVMX_BOARD_TYPE_EBH5610:
+ /* Board has 1 management port */
+ if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
+ return 0;
+ /*
+ * Board has 8 SGMII ports. 4 connect out, two connect
+ * to a switch, and 2 loop to each other
+ */
+ if ((ipd_port >= 0) && (ipd_port < 4))
+ return ipd_port + 1;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_CUST_NB5:
+ if (ipd_port == 2)
+ return 4;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_NIC_XLE_4G:
+ /* Board has 4 SGMII ports. connected QLM3(interface 1) */
+ if ((ipd_port >= 16) && (ipd_port < 20))
+ return ipd_port - 16 + 1;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_NIC_XLE_10G:
+ case CVMX_BOARD_TYPE_NIC10E:
+ return -1;
+ case CVMX_BOARD_TYPE_NIC4E:
+ if (ipd_port >= 0 && ipd_port <= 3)
+ return (ipd_port + 0x1f) & 0x1f;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_NIC2E:
+ if (ipd_port >= 0 && ipd_port <= 1)
+ return ipd_port + 1;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ /*
+ * No PHYs are connected to Octeon, everything is
+ * through switch.
+ */
+ return -1;
+
+ case CVMX_BOARD_TYPE_CUST_WSX16:
+ if (ipd_port >= 0 && ipd_port <= 3)
+ return ipd_port;
+ else if (ipd_port >= 16 && ipd_port <= 19)
+ return ipd_port - 16 + 4;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_UBNT_E100:
+ if (ipd_port >= 0 && ipd_port <= 2)
+ return 7 - ipd_port;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_KONTRON_S1901:
+ if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
+ return 1;
+ else
+ return -1;
+
+ }
+
+ /* Some unknown board. Somebody forgot to update this function... */
+ cvmx_dprintf
+ ("cvmx_helper_board_get_mii_address: Unknown board type %d\n",
+ cvmx_sysinfo_get()->board_type);
+ return -1;
+}
+
+/**
+ * This function is the board specific method of determining an
+ * ethernet ports link speed. Most Octeon boards have Marvell PHYs
+ * and are handled by the fall through case. This function must be
+ * updated for boards that don't have the normal Marvell PHYs.
+ *
+ * This function must be modified for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It relies on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @ipd_port: IPD input port associated with the port we want to get link
+ * status for.
+ *
+ * Returns The ports link status. If the link isn't fully resolved, this must
+ * return zero.
+ */
+union cvmx_helper_link_info __cvmx_helper_board_link_get(int ipd_port)
+{
+ union cvmx_helper_link_info result;
+
+ WARN_ONCE(!octeon_is_simulation(),
+ "Using deprecated link status - please update your DT");
+
+ /* Unless we fix it later, all links are defaulted to down */
+ result.u64 = 0;
+
+ if (octeon_is_simulation()) {
+ /* The simulator gives you a simulated 1Gbps full duplex link */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX)
+ || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ /*
+ * We don't have a PHY address, so attempt to use
+ * in-band status. It is really important that boards
+ * not supporting in-band status never get
+ * here. Reading broken in-band status tends to do bad
+ * things
+ */
+ union cvmx_gmxx_rxx_rx_inbnd inband_status;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ inband_status.u64 =
+ cvmx_read_csr(CVMX_GMXX_RXX_RX_INBND(index, interface));
+
+ result.s.link_up = inband_status.s.status;
+ result.s.full_duplex = inband_status.s.duplex;
+ switch (inband_status.s.speed) {
+ case 0: /* 10 Mbps */
+ result.s.speed = 10;
+ break;
+ case 1: /* 100 Mbps */
+ result.s.speed = 100;
+ break;
+ case 2: /* 1 Gbps */
+ result.s.speed = 1000;
+ break;
+ case 3: /* Illegal */
+ result.u64 = 0;
+ break;
+ }
+ } else {
+ /*
+ * We don't have a PHY address and we don't have
+ * in-band status. There is no way to determine the
+ * link speed. Return down assuming this port isn't
+ * wired
+ */
+ result.u64 = 0;
+ }
+
+ /* If link is down, return all fields as zero. */
+ if (!result.s.link_up)
+ result.u64 = 0;
+
+ return result;
+}
+
+/**
+ * This function is called by cvmx_helper_interface_probe() after it
+ * determines the number of ports Octeon can support on a specific
+ * interface. This function is the per board location to override
+ * this value. It is called with the number of ports Octeon might
+ * support and should return the number of actual ports on the
+ * board.
+ *
+ * This function must be modifed for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It relys on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @interface: Interface to probe
+ * @supported_ports:
+ * Number of ports Octeon supports.
+ *
+ * Returns Number of ports the actual board supports. Many times this will
+ * simple be "support_ports".
+ */
+int __cvmx_helper_board_interface_probe(int interface, int supported_ports)
+{
+ switch (cvmx_sysinfo_get()->board_type) {
+ case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
+ if (interface == 0)
+ return 2;
+ break;
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ if (interface == 0)
+ return 2;
+ break;
+ case CVMX_BOARD_TYPE_NIC_XLE_4G:
+ if (interface == 0)
+ return 0;
+ break;
+ /* The 2nd interface on the EBH5600 is connected to the Marvel switch,
+ which we don't support. Disable ports connected to it */
+ case CVMX_BOARD_TYPE_EBH5600:
+ if (interface == 1)
+ return 0;
+ break;
+ }
+ return supported_ports;
+}
+
+/**
+ * Get the clock type used for the USB block based on board type.
+ * Used by the USB code for auto configuration of clock type.
+ *
+ * Return USB clock type enumeration
+ */
+enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(void)
+{
+ switch (cvmx_sysinfo_get()->board_type) {
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ case CVMX_BOARD_TYPE_LANAI2_A:
+ case CVMX_BOARD_TYPE_LANAI2_U:
+ case CVMX_BOARD_TYPE_LANAI2_G:
+ case CVMX_BOARD_TYPE_NIC10E_66:
+ case CVMX_BOARD_TYPE_UBNT_E100:
+ return USB_CLOCK_TYPE_CRYSTAL_12;
+ case CVMX_BOARD_TYPE_NIC10E:
+ return USB_CLOCK_TYPE_REF_12;
+ default:
+ break;
+ }
+ /* Most boards except NIC10e use a 12MHz crystal */
+ if (OCTEON_IS_OCTEON2())
+ return USB_CLOCK_TYPE_CRYSTAL_12;
+ return USB_CLOCK_TYPE_REF_48;
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c b/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c
new file mode 100644
index 000000000..4b26fedec
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c
@@ -0,0 +1,73 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ *
+ * Fixes and workaround for Octeon chip errata. This file
+ * contains functions called by cvmx-helper to workaround known
+ * chip errata. For the most part, code doesn't need to call
+ * these functions directly.
+ *
+ */
+#include <linux/export.h>
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-helper-jtag.h>
+
+/**
+ * Due to errata G-720, the 2nd order CDR circuit on CN52XX pass
+ * 1 doesn't work properly. The following code disables 2nd order
+ * CDR for the specified QLM.
+ *
+ * @qlm: QLM to disable 2nd order CDR for.
+ */
+void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm)
+{
+ int lane;
+ cvmx_helper_qlm_jtag_init();
+ /* We need to load all four lanes of the QLM, a total of 1072 bits */
+ for (lane = 0; lane < 4; lane++) {
+ /*
+ * Each lane has 268 bits. We need to set
+ * cfg_cdr_incx<67:64> = 3 and cfg_cdr_secord<77> =
+ * 1. All other bits are zero. Bits go in LSB first,
+ * so start off with the zeros for bits <63:0>.
+ */
+ cvmx_helper_qlm_jtag_shift_zeros(qlm, 63 - 0 + 1);
+ /* cfg_cdr_incx<67:64>=3 */
+ cvmx_helper_qlm_jtag_shift(qlm, 67 - 64 + 1, 3);
+ /* Zeros for bits <76:68> */
+ cvmx_helper_qlm_jtag_shift_zeros(qlm, 76 - 68 + 1);
+ /* cfg_cdr_secord<77>=1 */
+ cvmx_helper_qlm_jtag_shift(qlm, 77 - 77 + 1, 1);
+ /* Zeros for bits <267:78> */
+ cvmx_helper_qlm_jtag_shift_zeros(qlm, 267 - 78 + 1);
+ }
+ cvmx_helper_qlm_jtag_update(qlm);
+}
+EXPORT_SYMBOL(__cvmx_helper_errata_qlm_disable_2nd_order_cdr);
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c b/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
new file mode 100644
index 000000000..607b4e659
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
@@ -0,0 +1,144 @@
+
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ *
+ * Helper utilities for qlm_jtag.
+ *
+ */
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-helper-jtag.h>
+
+
+/**
+ * Initialize the internal QLM JTAG logic to allow programming
+ * of the JTAG chain by the cvmx_helper_qlm_jtag_*() functions.
+ * These functions should only be used at the direction of Cavium
+ * Networks. Programming incorrect values into the JTAG chain
+ * can cause chip damage.
+ */
+void cvmx_helper_qlm_jtag_init(void)
+{
+ union cvmx_ciu_qlm_jtgc jtgc;
+ uint32_t clock_div = 0;
+ uint32_t divisor = cvmx_sysinfo_get()->cpu_clock_hz / (25 * 1000000);
+ divisor = (divisor - 1) >> 2;
+ /* Convert the divisor into a power of 2 shift */
+ while (divisor) {
+ clock_div++;
+ divisor = divisor >> 1;
+ }
+
+ /*
+ * Clock divider for QLM JTAG operations. eclk is divided by
+ * 2^(CLK_DIV + 2)
+ */
+ jtgc.u64 = 0;
+ jtgc.s.clk_div = clock_div;
+ jtgc.s.mux_sel = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX))
+ jtgc.s.bypass = 0x3;
+ else
+ jtgc.s.bypass = 0xf;
+ cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64);
+ cvmx_read_csr(CVMX_CIU_QLM_JTGC);
+}
+
+/**
+ * Write up to 32bits into the QLM jtag chain. Bits are shifted
+ * into the MSB and out the LSB, so you should shift in the low
+ * order bits followed by the high order bits. The JTAG chain is
+ * 4 * 268 bits long, or 1072.
+ *
+ * @qlm: QLM to shift value into
+ * @bits: Number of bits to shift in (1-32).
+ * @data: Data to shift in. Bit 0 enters the chain first, followed by
+ * bit 1, etc.
+ *
+ * Returns The low order bits of the JTAG chain that shifted out of the
+ * circle.
+ */
+uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data)
+{
+ union cvmx_ciu_qlm_jtgd jtgd;
+ jtgd.u64 = 0;
+ jtgd.s.shift = 1;
+ jtgd.s.shft_cnt = bits - 1;
+ jtgd.s.shft_reg = data;
+ if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
+ jtgd.s.select = 1 << qlm;
+ cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
+ do {
+ jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
+ } while (jtgd.s.shift);
+ return jtgd.s.shft_reg >> (32 - bits);
+}
+
+/**
+ * Shift long sequences of zeros into the QLM JTAG chain. It is
+ * common to need to shift more than 32 bits of zeros into the
+ * chain. This function is a convience wrapper around
+ * cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of
+ * zeros at a time.
+ *
+ * @qlm: QLM to shift zeros into
+ * @bits:
+ */
+void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits)
+{
+ while (bits > 0) {
+ int n = bits;
+ if (n > 32)
+ n = 32;
+ cvmx_helper_qlm_jtag_shift(qlm, n, 0);
+ bits -= n;
+ }
+}
+
+/**
+ * Program the QLM JTAG chain into all lanes of the QLM. You must
+ * have already shifted in 268*4, or 1072 bits into the JTAG
+ * chain. Updating invalid values can possibly cause chip damage.
+ *
+ * @qlm: QLM to program
+ */
+void cvmx_helper_qlm_jtag_update(int qlm)
+{
+ union cvmx_ciu_qlm_jtgd jtgd;
+
+ /* Update the new data */
+ jtgd.u64 = 0;
+ jtgd.s.update = 1;
+ if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
+ jtgd.s.select = 1 << qlm;
+ cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
+ do {
+ jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
+ } while (jtgd.s.update);
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-loop.c b/arch/mips/cavium-octeon/executive/cvmx-helper-loop.c
new file mode 100644
index 000000000..bfbd46115
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-loop.c
@@ -0,0 +1,85 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for LOOP initialization, configuration,
+ * and monitoring.
+ */
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-pip-defs.h>
+
+/**
+ * Probe a LOOP interface and determine the number of ports
+ * connected to it. The LOOP interface should still be down
+ * after this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_loop_probe(int interface)
+{
+ union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
+ int num_ports = 4;
+ int port;
+
+ /* We need to disable length checking so packet < 64 bytes and jumbo
+ frames don't get errors */
+ for (port = 0; port < num_ports; port++) {
+ union cvmx_pip_prt_cfgx port_cfg;
+ int ipd_port = cvmx_helper_get_ipd_port(interface, port);
+ port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
+ port_cfg.s.maxerr_en = 0;
+ port_cfg.s.minerr_en = 0;
+ cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64);
+ }
+
+ /* Disable FCS stripping for loopback ports */
+ ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
+ ipd_sub_port_fcs.s.port_bit2 = 0;
+ cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
+ return num_ports;
+}
+
+/**
+ * Bringup and enable a LOOP interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_loop_enable(int interface)
+{
+ /* Do nothing. */
+ return 0;
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-npi.c b/arch/mips/cavium-octeon/executive/cvmx-helper-npi.c
new file mode 100644
index 000000000..cb210d2ef
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-npi.c
@@ -0,0 +1,101 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for NPI initialization, configuration,
+ * and monitoring.
+ */
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-helper.h>
+
+#include <asm/octeon/cvmx-pip-defs.h>
+
+/**
+ * Probe a NPI interface and determine the number of ports
+ * connected to it. The NPI interface should still be down
+ * after this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_npi_probe(int interface)
+{
+#if CVMX_PKO_QUEUES_PER_PORT_PCI > 0
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
+ return 4;
+ else if (OCTEON_IS_MODEL(OCTEON_CN56XX)
+ && !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
+ /* The packet engines didn't exist before pass 2 */
+ return 4;
+ else if (OCTEON_IS_MODEL(OCTEON_CN52XX)
+ && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
+ /* The packet engines didn't exist before pass 2 */
+ return 4;
+#endif
+ return 0;
+}
+
+/**
+ * Bringup and enable a NPI interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_npi_enable(int interface)
+{
+ /*
+ * On CN50XX, CN52XX, and CN56XX we need to disable length
+ * checking so packet < 64 bytes and jumbo frames don't get
+ * errors.
+ */
+ if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
+ !OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int port;
+ for (port = 0; port < num_ports; port++) {
+ union cvmx_pip_prt_cfgx port_cfg;
+ int ipd_port =
+ cvmx_helper_get_ipd_port(interface, port);
+ port_cfg.u64 =
+ cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
+ port_cfg.s.maxerr_en = 0;
+ port_cfg.s.minerr_en = 0;
+ cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port),
+ port_cfg.u64);
+ }
+ }
+
+ /* Enables are controlled by the remote host, so nothing to do here */
+ return 0;
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
new file mode 100644
index 000000000..c4b58598a
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
@@ -0,0 +1,451 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (C) 2003-2018 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for RGMII/GMII/MII initialization, configuration,
+ * and monitoring.
+ */
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-board.h>
+
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-asxx-defs.h>
+#include <asm/octeon/cvmx-dbg-defs.h>
+
+/**
+ * Probe RGMII ports and determine the number present
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of RGMII/GMII/MII ports (0-4).
+ */
+int __cvmx_helper_rgmii_probe(int interface)
+{
+ int num_ports = 0;
+ union cvmx_gmxx_inf_mode mode;
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+ if (mode.s.type) {
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ cvmx_dprintf("ERROR: RGMII initialize called in "
+ "SPI interface\n");
+ } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+ || OCTEON_IS_MODEL(OCTEON_CN30XX)
+ || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ /*
+ * On these chips "type" says we're in
+ * GMII/MII mode. This limits us to 2 ports
+ */
+ num_ports = 2;
+ } else {
+ cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
+ __func__);
+ }
+ } else {
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ num_ports = 4;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+ || OCTEON_IS_MODEL(OCTEON_CN30XX)
+ || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ num_ports = 3;
+ } else {
+ cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
+ __func__);
+ }
+ }
+ return num_ports;
+}
+
+/**
+ * Put an RGMII interface in loopback mode. Internal packets sent
+ * out will be received back again on the same port. Externally
+ * received packets will echo back out.
+ *
+ * @port: IPD port number to loop.
+ */
+void cvmx_helper_rgmii_internal_loopback(int port)
+{
+ int interface = (port >> 4) & 1;
+ int index = port & 0xf;
+ uint64_t tmp;
+
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ gmx_cfg.u64 = 0;
+ gmx_cfg.s.duplex = 1;
+ gmx_cfg.s.slottime = 1;
+ gmx_cfg.s.speed = 1;
+ cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+ tmp = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
+ cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), (1 << index) | tmp);
+ tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
+ cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
+ tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
+ gmx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+}
+
+/**
+ * Workaround ASX setup errata with CN38XX pass1
+ *
+ * @interface: Interface to setup
+ * @port: Port to setup (0..3)
+ * @cpu_clock_hz:
+ * Chip frequency in Hertz
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_errata_asx_pass1(int interface, int port,
+ int cpu_clock_hz)
+{
+ /* Set hi water mark as per errata GMX-4 */
+ if (cpu_clock_hz >= 325000000 && cpu_clock_hz < 375000000)
+ cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 12);
+ else if (cpu_clock_hz >= 375000000 && cpu_clock_hz < 437000000)
+ cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 11);
+ else if (cpu_clock_hz >= 437000000 && cpu_clock_hz < 550000000)
+ cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 10);
+ else if (cpu_clock_hz >= 550000000 && cpu_clock_hz < 687000000)
+ cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 9);
+ else
+ cvmx_dprintf("Illegal clock frequency (%d). "
+ "CVMX_ASXX_TX_HI_WATERX not set\n", cpu_clock_hz);
+ return 0;
+}
+
+/**
+ * Configure all of the ASX, GMX, and PKO registers required
+ * to get RGMII to function on the supplied interface.
+ *
+ * @interface: PKO Interface to configure (0 or 1)
+ *
+ * Returns Zero on success
+ */
+int __cvmx_helper_rgmii_enable(int interface)
+{
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int port;
+ struct cvmx_sysinfo *sys_info_ptr = cvmx_sysinfo_get();
+ union cvmx_gmxx_inf_mode mode;
+ union cvmx_asxx_tx_prt_en asx_tx;
+ union cvmx_asxx_rx_prt_en asx_rx;
+
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+ if (mode.s.en == 0)
+ return -1;
+ if ((OCTEON_IS_MODEL(OCTEON_CN38XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN58XX)) && mode.s.type == 1)
+ /* Ignore SPI interfaces */
+ return -1;
+
+ /* Configure the ASX registers needed to use the RGMII ports */
+ asx_tx.u64 = 0;
+ asx_tx.s.prt_en = cvmx_build_mask(num_ports);
+ cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64);
+
+ asx_rx.u64 = 0;
+ asx_rx.s.prt_en = cvmx_build_mask(num_ports);
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64);
+
+ /* Configure the GMX registers needed to use the RGMII ports */
+ for (port = 0; port < num_ports; port++) {
+ /* Setting of CVMX_GMXX_TXX_THRESH has been moved to
+ __cvmx_helper_setup_gmx() */
+
+ if (cvmx_octeon_is_pass1())
+ __cvmx_helper_errata_asx_pass1(interface, port,
+ sys_info_ptr->
+ cpu_clock_hz);
+ else {
+ /*
+ * Configure more flexible RGMII preamble
+ * checking. Pass 1 doesn't support this
+ * feature.
+ */
+ union cvmx_gmxx_rxx_frm_ctl frm_ctl;
+ frm_ctl.u64 =
+ cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL
+ (port, interface));
+ /* New field, so must be compile time */
+ frm_ctl.s.pre_free = 1;
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface),
+ frm_ctl.u64);
+ }
+
+ /*
+ * Each pause frame transmitted will ask for about 10M
+ * bit times before resume. If buffer space comes
+ * available before that time has expired, an XON
+ * pause frame (0 time) will be transmitted to restart
+ * the flow.
+ */
+ cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface),
+ 20000);
+ cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL
+ (port, interface), 19000);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
+ 16);
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
+ 16);
+ } else {
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
+ 24);
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
+ 24);
+ }
+ }
+
+ __cvmx_helper_setup_gmx(interface, num_ports);
+
+ /* enable the ports now */
+ for (port = 0; port < num_ports; port++) {
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+
+ gmx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface));
+ gmx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(port, interface),
+ gmx_cfg.u64);
+ }
+ __cvmx_interrupt_asxx_enable(interface);
+ __cvmx_interrupt_gmxx_enable(interface);
+
+ return 0;
+}
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+union cvmx_helper_link_info __cvmx_helper_rgmii_link_get(int ipd_port)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ union cvmx_asxx_prt_loop asxx_prt_loop;
+
+ asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
+ if (asxx_prt_loop.s.int_loop & (1 << index)) {
+ /* Force 1Gbps full duplex on internal loopback */
+ union cvmx_helper_link_info result;
+ result.u64 = 0;
+ result.s.full_duplex = 1;
+ result.s.link_up = 1;
+ result.s.speed = 1000;
+ return result;
+ } else
+ return __cvmx_helper_board_link_get(ipd_port);
+}
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get().
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_rgmii_link_set(int ipd_port,
+ union cvmx_helper_link_info link_info)
+{
+ int result = 0;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ union cvmx_gmxx_prtx_cfg original_gmx_cfg;
+ union cvmx_gmxx_prtx_cfg new_gmx_cfg;
+ union cvmx_pko_mem_queue_qos pko_mem_queue_qos;
+ union cvmx_pko_mem_queue_qos pko_mem_queue_qos_save[16];
+ union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp;
+ union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp_save;
+ int i;
+
+ /* Ignore speed sets in the simulator */
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
+ return 0;
+
+ /* Read the current settings so we know the current enable state */
+ original_gmx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ new_gmx_cfg = original_gmx_cfg;
+
+ /* Disable the lowest level RX */
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
+ cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) &
+ ~(1 << index));
+
+ memset(pko_mem_queue_qos_save, 0, sizeof(pko_mem_queue_qos_save));
+ /* Disable all queues so that TX should become idle */
+ for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
+ int queue = cvmx_pko_get_base_queue(ipd_port) + i;
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
+ pko_mem_queue_qos.u64 = cvmx_read_csr(CVMX_PKO_MEM_QUEUE_QOS);
+ pko_mem_queue_qos.s.pid = ipd_port;
+ pko_mem_queue_qos.s.qid = queue;
+ pko_mem_queue_qos_save[i] = pko_mem_queue_qos;
+ pko_mem_queue_qos.s.qos_mask = 0;
+ cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64);
+ }
+
+ /* Disable backpressure */
+ gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
+ gmx_tx_ovr_bp_save = gmx_tx_ovr_bp;
+ gmx_tx_ovr_bp.s.bp &= ~(1 << index);
+ gmx_tx_ovr_bp.s.en |= 1 << index;
+ cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
+ cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
+
+ /*
+ * Poll the GMX state machine waiting for it to become
+ * idle. Preferably we should only change speed when it is
+ * idle. If it doesn't become idle we will still do the speed
+ * change, but there is a slight chance that GMX will
+ * lockup.
+ */
+ cvmx_write_csr(CVMX_NPI_DBG_SELECT,
+ interface * 0x800 + index * 0x100 + 0x880);
+ CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 7,
+ ==, 0, 10000);
+ CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 0xf,
+ ==, 0, 10000);
+
+ /* Disable the port before we make any changes */
+ new_gmx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+
+ /* Set full/half duplex */
+ if (cvmx_octeon_is_pass1())
+ /* Half duplex is broken for 38XX Pass 1 */
+ new_gmx_cfg.s.duplex = 1;
+ else if (!link_info.s.link_up)
+ /* Force full duplex on down links */
+ new_gmx_cfg.s.duplex = 1;
+ else
+ new_gmx_cfg.s.duplex = link_info.s.full_duplex;
+
+ /* Set the link speed. Anything unknown is set to 1Gbps */
+ if (link_info.s.speed == 10) {
+ new_gmx_cfg.s.slottime = 0;
+ new_gmx_cfg.s.speed = 0;
+ } else if (link_info.s.speed == 100) {
+ new_gmx_cfg.s.slottime = 0;
+ new_gmx_cfg.s.speed = 0;
+ } else {
+ new_gmx_cfg.s.slottime = 1;
+ new_gmx_cfg.s.speed = 1;
+ }
+
+ /* Adjust the clocks */
+ if (link_info.s.speed == 10) {
+ cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 50);
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+ } else if (link_info.s.speed == 100) {
+ cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 5);
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+ } else {
+ cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ if ((link_info.s.speed == 10) || (link_info.s.speed == 100)) {
+ union cvmx_gmxx_inf_mode mode;
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+ /*
+ * Port .en .type .p0mii Configuration
+ * ---- --- ----- ------ -----------------------------------------
+ * X 0 X X All links are disabled.
+ * 0 1 X 0 Port 0 is RGMII
+ * 0 1 X 1 Port 0 is MII
+ * 1 1 0 X Ports 1 and 2 are configured as RGMII ports.
+ * 1 1 1 X Port 1: GMII/MII; Port 2: disabled. GMII or
+ * MII port is selected by GMX_PRT1_CFG[SPEED].
+ */
+
+ /* In MII mode, CLK_CNT = 1. */
+ if (((index == 0) && (mode.s.p0mii == 1))
+ || ((index != 0) && (mode.s.type == 1))) {
+ cvmx_write_csr(CVMX_GMXX_TXX_CLK
+ (index, interface), 1);
+ }
+ }
+ }
+
+ /* Do a read to make sure all setup stuff is complete */
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+
+ /* Save the new GMX setting without enabling the port */
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+
+ /* Enable the lowest level RX */
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
+ cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) | (1 <<
+ index));
+
+ /* Re-enable the TX path */
+ for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
+ int queue = cvmx_pko_get_base_queue(ipd_port) + i;
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
+ cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS,
+ pko_mem_queue_qos_save[i].u64);
+ }
+
+ /* Restore backpressure */
+ cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64);
+
+ /* Restore the GMX enable state. Port config is complete */
+ new_gmx_cfg.s.en = original_gmx_cfg.s.en;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+
+ return result;
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
new file mode 100644
index 000000000..e07d8f15e
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
@@ -0,0 +1,515 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (C) 2003-2018 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for SGMII initialization, configuration,
+ * and monitoring.
+ */
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-board.h>
+
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-pcsx-defs.h>
+#include <asm/octeon/cvmx-pcsxx-defs.h>
+
+/**
+ * Perform initialization required only once for an SGMII port.
+ *
+ * @interface: Interface to init
+ * @index: Index of prot on the interface
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init_one_time(int interface, int index)
+{
+ const uint64_t clock_mhz = cvmx_sysinfo_get()->cpu_clock_hz / 1000000;
+ union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg;
+ union cvmx_pcsx_linkx_timer_count_reg pcsx_linkx_timer_count_reg;
+ union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+
+ /* Disable GMX */
+ gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmxx_prtx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+ /*
+ * Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the
+ * appropriate value. 1000BASE-X specifies a 10ms
+ * interval. SGMII specifies a 1.6ms interval.
+ */
+ pcs_misc_ctl_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ pcsx_linkx_timer_count_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface));
+ if (pcs_misc_ctl_reg.s.mode) {
+ /* 1000BASE-X */
+ pcsx_linkx_timer_count_reg.s.count =
+ (10000ull * clock_mhz) >> 10;
+ } else {
+ /* SGMII */
+ pcsx_linkx_timer_count_reg.s.count =
+ (1600ull * clock_mhz) >> 10;
+ }
+ cvmx_write_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface),
+ pcsx_linkx_timer_count_reg.u64);
+
+ /*
+ * Write the advertisement register to be used as the
+ * tx_Config_Reg<D15:D0> of the autonegotiation. In
+ * 1000BASE-X mode, tx_Config_Reg<D15:D0> is PCS*_AN*_ADV_REG.
+ * In SGMII PHY mode, tx_Config_Reg<D15:D0> is
+ * PCS*_SGM*_AN_ADV_REG. In SGMII MAC mode,
+ * tx_Config_Reg<D15:D0> is the fixed value 0x4001, so this
+ * step can be skipped.
+ */
+ if (pcs_misc_ctl_reg.s.mode) {
+ /* 1000BASE-X */
+ union cvmx_pcsx_anx_adv_reg pcsx_anx_adv_reg;
+ pcsx_anx_adv_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_ANX_ADV_REG(index, interface));
+ pcsx_anx_adv_reg.s.rem_flt = 0;
+ pcsx_anx_adv_reg.s.pause = 3;
+ pcsx_anx_adv_reg.s.hfd = 1;
+ pcsx_anx_adv_reg.s.fd = 1;
+ cvmx_write_csr(CVMX_PCSX_ANX_ADV_REG(index, interface),
+ pcsx_anx_adv_reg.u64);
+ } else {
+ union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+ pcsx_miscx_ctl_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ if (pcsx_miscx_ctl_reg.s.mac_phy) {
+ /* PHY Mode */
+ union cvmx_pcsx_sgmx_an_adv_reg pcsx_sgmx_an_adv_reg;
+ pcsx_sgmx_an_adv_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_SGMX_AN_ADV_REG
+ (index, interface));
+ pcsx_sgmx_an_adv_reg.s.link = 1;
+ pcsx_sgmx_an_adv_reg.s.dup = 1;
+ pcsx_sgmx_an_adv_reg.s.speed = 2;
+ cvmx_write_csr(CVMX_PCSX_SGMX_AN_ADV_REG
+ (index, interface),
+ pcsx_sgmx_an_adv_reg.u64);
+ } else {
+ /* MAC Mode - Nothing to do */
+ }
+ }
+ return 0;
+}
+
+/**
+ * Initialize the SERTES link for the first time or after a loss
+ * of link.
+ *
+ * @interface: Interface to init
+ * @index: Index of prot on the interface
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init_link(int interface, int index)
+{
+ union cvmx_pcsx_mrx_control_reg control_reg;
+
+ /*
+ * Take PCS through a reset sequence.
+ * PCS*_MR*_CONTROL_REG[PWR_DN] should be cleared to zero.
+ * Write PCS*_MR*_CONTROL_REG[RESET]=1 (while not changing the
+ * value of the other PCS*_MR*_CONTROL_REG bits). Read
+ * PCS*_MR*_CONTROL_REG[RESET] until it changes value to
+ * zero.
+ */
+ control_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+ if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
+ control_reg.s.reset = 1;
+ cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+ control_reg.u64);
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+ union cvmx_pcsx_mrx_control_reg, reset, ==, 0, 10000)) {
+ cvmx_dprintf("SGMII%d: Timeout waiting for port %d "
+ "to finish reset\n",
+ interface, index);
+ return -1;
+ }
+ }
+
+ /*
+ * Write PCS*_MR*_CONTROL_REG[RST_AN]=1 to ensure a fresh
+ * sgmii negotiation starts.
+ */
+ control_reg.s.rst_an = 1;
+ control_reg.s.an_en = 1;
+ control_reg.s.pwr_dn = 0;
+ cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+ control_reg.u64);
+
+ /*
+ * Wait for PCS*_MR*_STATUS_REG[AN_CPT] to be set, indicating
+ * that sgmii autonegotiation is complete. In MAC mode this
+ * isn't an ethernet link, but a link between Octeon and the
+ * PHY.
+ */
+ if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
+ CVMX_WAIT_FOR_FIELD64(CVMX_PCSX_MRX_STATUS_REG(index, interface),
+ union cvmx_pcsx_mrx_status_reg, an_cpt, ==, 1,
+ 10000)) {
+ /* cvmx_dprintf("SGMII%d: Port %d link timeout\n", interface, index); */
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Configure an SGMII link to the specified speed after the SERTES
+ * link is up.
+ *
+ * @interface: Interface to init
+ * @index: Index of prot on the interface
+ * @link_info: Link state to configure
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init_link_speed(int interface,
+ int index,
+ union cvmx_helper_link_info
+ link_info)
+{
+ int is_enabled;
+ union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+ union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+
+ /* Disable GMX before we make any changes. Remember the enable state */
+ gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ is_enabled = gmxx_prtx_cfg.s.en;
+ gmxx_prtx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+ /* Wait for GMX to be idle */
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_GMXX_PRTX_CFG(index, interface), union cvmx_gmxx_prtx_cfg,
+ rx_idle, ==, 1, 10000)
+ || CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
+ union cvmx_gmxx_prtx_cfg, tx_idle, ==, 1,
+ 10000)) {
+ cvmx_dprintf
+ ("SGMII%d: Timeout waiting for port %d to be idle\n",
+ interface, index);
+ return -1;
+ }
+
+ /* Read GMX CFG again to make sure the disable completed */
+ gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+
+ /*
+ * Get the misc control for PCS. We will need to set the
+ * duplication amount.
+ */
+ pcsx_miscx_ctl_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+
+ /*
+ * Use GMXENO to force the link down if the status we get says
+ * it should be down.
+ */
+ pcsx_miscx_ctl_reg.s.gmxeno = !link_info.s.link_up;
+
+ /* Only change the duplex setting if the link is up */
+ if (link_info.s.link_up)
+ gmxx_prtx_cfg.s.duplex = link_info.s.full_duplex;
+
+ /* Do speed based setting for GMX */
+ switch (link_info.s.speed) {
+ case 10:
+ gmxx_prtx_cfg.s.speed = 0;
+ gmxx_prtx_cfg.s.speed_msb = 1;
+ gmxx_prtx_cfg.s.slottime = 0;
+ /* Setting from GMX-603 */
+ pcsx_miscx_ctl_reg.s.samp_pt = 25;
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+ break;
+ case 100:
+ gmxx_prtx_cfg.s.speed = 0;
+ gmxx_prtx_cfg.s.speed_msb = 0;
+ gmxx_prtx_cfg.s.slottime = 0;
+ pcsx_miscx_ctl_reg.s.samp_pt = 0x5;
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+ break;
+ case 1000:
+ gmxx_prtx_cfg.s.speed = 1;
+ gmxx_prtx_cfg.s.speed_msb = 0;
+ gmxx_prtx_cfg.s.slottime = 1;
+ pcsx_miscx_ctl_reg.s.samp_pt = 1;
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 512);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 8192);
+ break;
+ default:
+ break;
+ }
+
+ /* Write the new misc control for PCS */
+ cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
+ pcsx_miscx_ctl_reg.u64);
+
+ /* Write the new GMX settings with the port still disabled */
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+ /* Read GMX CFG again to make sure the config completed */
+ gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+
+ /* Restore the enabled / disabled state */
+ gmxx_prtx_cfg.s.en = is_enabled;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+ return 0;
+}
+
+/**
+ * Bring up the SGMII interface to be ready for packet I/O but
+ * leave I/O disabled using the GMX override. This function
+ * follows the bringup documented in 10.6.3 of the manual.
+ *
+ * @interface: Interface to bringup
+ * @num_ports: Number of ports on the interface
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init(int interface, int num_ports)
+{
+ int index;
+
+ __cvmx_helper_setup_gmx(interface, num_ports);
+
+ for (index = 0; index < num_ports; index++) {
+ int ipd_port = cvmx_helper_get_ipd_port(interface, index);
+ __cvmx_helper_sgmii_hardware_init_one_time(interface, index);
+ /* Linux kernel driver will call ....link_set with the
+ * proper link state. In the simulator there is no
+ * link state polling and hence it is set from
+ * here.
+ */
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
+ __cvmx_helper_sgmii_link_set(ipd_port,
+ __cvmx_helper_sgmii_link_get(ipd_port));
+ }
+
+ return 0;
+}
+
+int __cvmx_helper_sgmii_enumerate(int interface)
+{
+ return 4;
+}
+/**
+ * Probe a SGMII interface and determine the number of ports
+ * connected to it. The SGMII interface should still be down after
+ * this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_sgmii_probe(int interface)
+{
+ union cvmx_gmxx_inf_mode mode;
+
+ /*
+ * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
+ * interface needs to be enabled before IPD otherwise per port
+ * backpressure may not work properly
+ */
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+ mode.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
+ return __cvmx_helper_sgmii_enumerate(interface);
+}
+
+/**
+ * Bringup and enable a SGMII interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_sgmii_enable(int interface)
+{
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int index;
+
+ __cvmx_helper_sgmii_hardware_init(interface, num_ports);
+
+ for (index = 0; index < num_ports; index++) {
+ union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+ gmxx_prtx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmxx_prtx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmxx_prtx_cfg.u64);
+ __cvmx_interrupt_pcsx_intx_en_reg_enable(index, interface);
+ }
+ __cvmx_interrupt_pcsxx_int_en_reg_enable(interface);
+ __cvmx_interrupt_gmxx_enable(interface);
+ return 0;
+}
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+union cvmx_helper_link_info __cvmx_helper_sgmii_link_get(int ipd_port)
+{
+ union cvmx_helper_link_info result;
+ union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
+
+ result.u64 = 0;
+
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) {
+ /* The simulator gives you a simulated 1Gbps full duplex link */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+ }
+
+ pcsx_mrx_control_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+ if (pcsx_mrx_control_reg.s.loopbck1) {
+ /* Force 1Gbps full duplex link for internal loopback */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+ }
+
+ pcs_misc_ctl_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ if (pcs_misc_ctl_reg.s.mode) {
+ /* 1000BASE-X */
+ /* FIXME */
+ } else {
+ union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+ pcsx_miscx_ctl_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ if (pcsx_miscx_ctl_reg.s.mac_phy) {
+ /* PHY Mode */
+ union cvmx_pcsx_mrx_status_reg pcsx_mrx_status_reg;
+ union cvmx_pcsx_anx_results_reg pcsx_anx_results_reg;
+
+ /*
+ * Don't bother continuing if the SERTES low
+ * level link is down
+ */
+ pcsx_mrx_status_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MRX_STATUS_REG
+ (index, interface));
+ if (pcsx_mrx_status_reg.s.lnk_st == 0) {
+ if (__cvmx_helper_sgmii_hardware_init_link
+ (interface, index) != 0)
+ return result;
+ }
+
+ /* Read the autoneg results */
+ pcsx_anx_results_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_ANX_RESULTS_REG
+ (index, interface));
+ if (pcsx_anx_results_reg.s.an_cpt) {
+ /*
+ * Auto negotiation is complete. Set
+ * status accordingly.
+ */
+ result.s.full_duplex =
+ pcsx_anx_results_reg.s.dup;
+ result.s.link_up =
+ pcsx_anx_results_reg.s.link_ok;
+ switch (pcsx_anx_results_reg.s.spd) {
+ case 0:
+ result.s.speed = 10;
+ break;
+ case 1:
+ result.s.speed = 100;
+ break;
+ case 2:
+ result.s.speed = 1000;
+ break;
+ default:
+ result.s.speed = 0;
+ result.s.link_up = 0;
+ break;
+ }
+ } else {
+ /*
+ * Auto negotiation isn't
+ * complete. Return link down.
+ */
+ result.s.speed = 0;
+ result.s.link_up = 0;
+ }
+ } else { /* MAC Mode */
+
+ result = __cvmx_helper_board_link_get(ipd_port);
+ }
+ }
+ return result;
+}
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get().
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_sgmii_link_set(int ipd_port,
+ union cvmx_helper_link_info link_info)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ __cvmx_helper_sgmii_hardware_init_link(interface, index);
+ return __cvmx_helper_sgmii_hardware_init_link_speed(interface, index,
+ link_info);
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c b/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
new file mode 100644
index 000000000..525914e9b
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
@@ -0,0 +1,202 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (C) 2003-2018 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for SPI initialization, configuration,
+ * and monitoring.
+ */
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-spi.h>
+#include <asm/octeon/cvmx-helper.h>
+
+#include <asm/octeon/cvmx-pip-defs.h>
+#include <asm/octeon/cvmx-pko-defs.h>
+#include <asm/octeon/cvmx-spxx-defs.h>
+#include <asm/octeon/cvmx-stxx-defs.h>
+
+/*
+ * CVMX_HELPER_SPI_TIMEOUT is used to determine how long the SPI
+ * initialization routines wait for SPI training. You can override the
+ * value using executive-config.h if necessary.
+ */
+#ifndef CVMX_HELPER_SPI_TIMEOUT
+#define CVMX_HELPER_SPI_TIMEOUT 10
+#endif
+
+int __cvmx_helper_spi_enumerate(int interface)
+{
+ if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
+ cvmx_spi4000_is_present(interface)) {
+ return 10;
+ } else {
+ return 16;
+ }
+}
+
+/**
+ * Probe a SPI interface and determine the number of ports
+ * connected to it. The SPI interface should still be down after
+ * this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_spi_probe(int interface)
+{
+ int num_ports = 0;
+
+ if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
+ cvmx_spi4000_is_present(interface)) {
+ num_ports = 10;
+ } else {
+ union cvmx_pko_reg_crc_enable enable;
+ num_ports = 16;
+ /*
+ * Unlike the SPI4000, most SPI devices don't
+ * automatically put on the L2 CRC. For everything
+ * except for the SPI4000 have PKO append the L2 CRC
+ * to the packet.
+ */
+ enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE);
+ enable.s.enable |= 0xffff << (interface * 16);
+ cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64);
+ }
+ __cvmx_helper_setup_gmx(interface, num_ports);
+ return num_ports;
+}
+
+/**
+ * Bringup and enable a SPI interface. After this call packet I/O
+ * should be fully functional. This is called with IPD enabled but
+ * PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_spi_enable(int interface)
+{
+ /*
+ * Normally the ethernet L2 CRC is checked and stripped in the
+ * GMX block. When you are using SPI, this isn' the case and
+ * IPD needs to check the L2 CRC.
+ */
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int ipd_port;
+ for (ipd_port = interface * 16; ipd_port < interface * 16 + num_ports;
+ ipd_port++) {
+ union cvmx_pip_prt_cfgx port_config;
+ port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
+ port_config.s.crc_en = 1;
+ cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64);
+ }
+
+ if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
+ cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX,
+ CVMX_HELPER_SPI_TIMEOUT, num_ports);
+ if (cvmx_spi4000_is_present(interface))
+ cvmx_spi4000_initialize(interface);
+ }
+ __cvmx_interrupt_spxx_int_msk_enable(interface);
+ __cvmx_interrupt_stxx_int_msk_enable(interface);
+ __cvmx_interrupt_gmxx_enable(interface);
+ return 0;
+}
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+union cvmx_helper_link_info __cvmx_helper_spi_link_get(int ipd_port)
+{
+ union cvmx_helper_link_info result;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ result.u64 = 0;
+
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) {
+ /* The simulator gives you a simulated full duplex link */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 10000;
+ } else if (cvmx_spi4000_is_present(interface)) {
+ union cvmx_gmxx_rxx_rx_inbnd inband =
+ cvmx_spi4000_check_speed(interface, index);
+ result.s.link_up = inband.s.status;
+ result.s.full_duplex = inband.s.duplex;
+ switch (inband.s.speed) {
+ case 0: /* 10 Mbps */
+ result.s.speed = 10;
+ break;
+ case 1: /* 100 Mbps */
+ result.s.speed = 100;
+ break;
+ case 2: /* 1 Gbps */
+ result.s.speed = 1000;
+ break;
+ case 3: /* Illegal */
+ result.s.speed = 0;
+ result.s.link_up = 0;
+ break;
+ }
+ } else {
+ /* For generic SPI we can't determine the link, just return some
+ sane results */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 10000;
+ }
+ return result;
+}
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get().
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_spi_link_set(int ipd_port, union cvmx_helper_link_info link_info)
+{
+ /* Nothing to do. If we have a SPI4000 then the setup was already performed
+ by cvmx_spi4000_check_speed(). If not then there isn't any link
+ info */
+ return 0;
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-util.c b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
new file mode 100644
index 000000000..53b912745
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
@@ -0,0 +1,363 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Small helper utilities.
+ */
+#include <linux/kernel.h>
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-fpa.h>
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-ipd.h>
+#include <asm/octeon/cvmx-spi.h>
+
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-util.h>
+
+#include <asm/octeon/cvmx-ipd-defs.h>
+
+/**
+ * Convert a interface mode into a human readable string
+ *
+ * @mode: Mode to convert
+ *
+ * Returns String
+ */
+const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t
+ mode)
+{
+ switch (mode) {
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ return "DISABLED";
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ return "RGMII";
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ return "GMII";
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ return "SPI";
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ return "PCIE";
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ return "XAUI";
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ return "SGMII";
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ return "PICMG";
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ return "NPI";
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ return "LOOP";
+ }
+ return "UNKNOWN";
+}
+
+/**
+ * Setup Random Early Drop on a specific input queue
+ *
+ * @queue: Input queue to setup RED on (0-7)
+ * @pass_thresh:
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
+ * @drop_thresh:
+ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
+ * Returns Zero on success. Negative on failure
+ */
+static int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
+ int drop_thresh)
+{
+ union cvmx_ipd_qosx_red_marks red_marks;
+ union cvmx_ipd_red_quex_param red_param;
+
+ /* Set RED to begin dropping packets when there are pass_thresh buffers
+ left. It will linearly drop more packets until reaching drop_thresh
+ buffers */
+ red_marks.u64 = 0;
+ red_marks.s.drop = drop_thresh;
+ red_marks.s.pass = pass_thresh;
+ cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64);
+
+ /* Use the actual queue 0 counter, not the average */
+ red_param.u64 = 0;
+ red_param.s.prb_con =
+ (255ul << 24) / (red_marks.s.pass - red_marks.s.drop);
+ red_param.s.avg_con = 1;
+ red_param.s.new_con = 255;
+ red_param.s.use_pcnt = 1;
+ cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64);
+ return 0;
+}
+
+/**
+ * Setup Random Early Drop to automatically begin dropping packets.
+ *
+ * @pass_thresh:
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
+ * @drop_thresh:
+ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
+ * Returns Zero on success. Negative on failure
+ */
+int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
+{
+ union cvmx_ipd_portx_bp_page_cnt page_cnt;
+ union cvmx_ipd_bp_prt_red_end ipd_bp_prt_red_end;
+ union cvmx_ipd_red_port_enable red_port_enable;
+ int queue;
+ int interface;
+ int port;
+
+ /* Disable backpressure based on queued buffers. It needs SW support */
+ page_cnt.u64 = 0;
+ page_cnt.s.bp_enb = 0;
+ page_cnt.s.page_cnt = 100;
+ for (interface = 0; interface < 2; interface++) {
+ for (port = cvmx_helper_get_first_ipd_port(interface);
+ port < cvmx_helper_get_last_ipd_port(interface); port++)
+ cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port),
+ page_cnt.u64);
+ }
+
+ for (queue = 0; queue < 8; queue++)
+ cvmx_helper_setup_red_queue(queue, pass_thresh, drop_thresh);
+
+ /* Shutoff the dropping based on the per port page count. SW isn't
+ decrementing it right now */
+ ipd_bp_prt_red_end.u64 = 0;
+ ipd_bp_prt_red_end.s.prt_enb = 0;
+ cvmx_write_csr(CVMX_IPD_BP_PRT_RED_END, ipd_bp_prt_red_end.u64);
+
+ red_port_enable.u64 = 0;
+ red_port_enable.s.prt_enb = 0xfffffffffull;
+ red_port_enable.s.avg_dly = 10000;
+ red_port_enable.s.prb_dly = 10000;
+ cvmx_write_csr(CVMX_IPD_RED_PORT_ENABLE, red_port_enable.u64);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_setup_red);
+
+/**
+ * Setup the common GMX settings that determine the number of
+ * ports. These setting apply to almost all configurations of all
+ * chips.
+ *
+ * @interface: Interface to configure
+ * @num_ports: Number of ports on the interface
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_setup_gmx(int interface, int num_ports)
+{
+ union cvmx_gmxx_tx_prts gmx_tx_prts;
+ union cvmx_gmxx_rx_prts gmx_rx_prts;
+ union cvmx_pko_reg_gmx_port_mode pko_mode;
+ union cvmx_gmxx_txx_thresh gmx_tx_thresh;
+ int index;
+
+ /* Tell GMX the number of TX ports on this interface */
+ gmx_tx_prts.u64 = cvmx_read_csr(CVMX_GMXX_TX_PRTS(interface));
+ gmx_tx_prts.s.prts = num_ports;
+ cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), gmx_tx_prts.u64);
+
+ /* Tell GMX the number of RX ports on this interface. This only
+ ** applies to *GMII and XAUI ports */
+ if (cvmx_helper_interface_get_mode(interface) ==
+ CVMX_HELPER_INTERFACE_MODE_RGMII
+ || cvmx_helper_interface_get_mode(interface) ==
+ CVMX_HELPER_INTERFACE_MODE_SGMII
+ || cvmx_helper_interface_get_mode(interface) ==
+ CVMX_HELPER_INTERFACE_MODE_GMII
+ || cvmx_helper_interface_get_mode(interface) ==
+ CVMX_HELPER_INTERFACE_MODE_XAUI) {
+ if (num_ports > 4) {
+ cvmx_dprintf("__cvmx_helper_setup_gmx: Illegal "
+ "num_ports\n");
+ return -1;
+ }
+
+ gmx_rx_prts.u64 = cvmx_read_csr(CVMX_GMXX_RX_PRTS(interface));
+ gmx_rx_prts.s.prts = num_ports;
+ cvmx_write_csr(CVMX_GMXX_RX_PRTS(interface), gmx_rx_prts.u64);
+ }
+
+ /* Skip setting CVMX_PKO_REG_GMX_PORT_MODE on 30XX, 31XX, and 50XX */
+ if (!OCTEON_IS_MODEL(OCTEON_CN30XX) && !OCTEON_IS_MODEL(OCTEON_CN31XX)
+ && !OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ /* Tell PKO the number of ports on this interface */
+ pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE);
+ if (interface == 0) {
+ if (num_ports == 1)
+ pko_mode.s.mode0 = 4;
+ else if (num_ports == 2)
+ pko_mode.s.mode0 = 3;
+ else if (num_ports <= 4)
+ pko_mode.s.mode0 = 2;
+ else if (num_ports <= 8)
+ pko_mode.s.mode0 = 1;
+ else
+ pko_mode.s.mode0 = 0;
+ } else {
+ if (num_ports == 1)
+ pko_mode.s.mode1 = 4;
+ else if (num_ports == 2)
+ pko_mode.s.mode1 = 3;
+ else if (num_ports <= 4)
+ pko_mode.s.mode1 = 2;
+ else if (num_ports <= 8)
+ pko_mode.s.mode1 = 1;
+ else
+ pko_mode.s.mode1 = 0;
+ }
+ cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);
+ }
+
+ /*
+ * Set GMX to buffer as much data as possible before starting
+ * transmit. This reduces the chances that we have a TX under
+ * run due to memory contention. Any packet that fits entirely
+ * in the GMX FIFO can never have an under run regardless of
+ * memory load.
+ */
+ gmx_tx_thresh.u64 = cvmx_read_csr(CVMX_GMXX_TXX_THRESH(0, interface));
+ if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX)
+ || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ /* These chips have a fixed max threshold of 0x40 */
+ gmx_tx_thresh.s.cnt = 0x40;
+ } else {
+ /* Choose the max value for the number of ports */
+ if (num_ports <= 1)
+ gmx_tx_thresh.s.cnt = 0x100 / 1;
+ else if (num_ports == 2)
+ gmx_tx_thresh.s.cnt = 0x100 / 2;
+ else
+ gmx_tx_thresh.s.cnt = 0x100 / 4;
+ }
+ /*
+ * SPI and XAUI can have lots of ports but the GMX hardware
+ * only ever has a max of 4.
+ */
+ if (num_ports > 4)
+ num_ports = 4;
+ for (index = 0; index < num_ports; index++)
+ cvmx_write_csr(CVMX_GMXX_TXX_THRESH(index, interface),
+ gmx_tx_thresh.u64);
+
+ return 0;
+}
+
+/**
+ * Returns the IPD/PKO port number for a port on the given
+ * interface.
+ *
+ * @interface: Interface to use
+ * @port: Port on the interface
+ *
+ * Returns IPD/PKO port number
+ */
+int cvmx_helper_get_ipd_port(int interface, int port)
+{
+ switch (interface) {
+ case 0:
+ return port;
+ case 1:
+ return port + 16;
+ case 2:
+ return port + 32;
+ case 3:
+ return port + 36;
+ case 4:
+ return port + 40;
+ case 5:
+ return port + 44;
+ }
+ return -1;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_get_ipd_port);
+
+/**
+ * Returns the interface number for an IPD/PKO port number.
+ *
+ * @ipd_port: IPD/PKO port number
+ *
+ * Returns Interface number
+ */
+int cvmx_helper_get_interface_num(int ipd_port)
+{
+ if (ipd_port < 16)
+ return 0;
+ else if (ipd_port < 32)
+ return 1;
+ else if (ipd_port < 36)
+ return 2;
+ else if (ipd_port < 40)
+ return 3;
+ else if (ipd_port < 44)
+ return 4;
+ else if (ipd_port < 48)
+ return 5;
+ else
+ cvmx_dprintf("cvmx_helper_get_interface_num: Illegal IPD "
+ "port number\n");
+
+ return -1;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_get_interface_num);
+
+/**
+ * Returns the interface index number for an IPD/PKO port
+ * number.
+ *
+ * @ipd_port: IPD/PKO port number
+ *
+ * Returns Interface index number
+ */
+int cvmx_helper_get_interface_index_num(int ipd_port)
+{
+ if (ipd_port < 32)
+ return ipd_port & 15;
+ else if (ipd_port < 36)
+ return ipd_port & 3;
+ else if (ipd_port < 40)
+ return ipd_port & 3;
+ else if (ipd_port < 44)
+ return ipd_port & 3;
+ else if (ipd_port < 48)
+ return ipd_port & 3;
+ else
+ cvmx_dprintf("cvmx_helper_get_interface_index_num: "
+ "Illegal IPD port number\n");
+
+ return -1;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_get_interface_index_num);
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
new file mode 100644
index 000000000..842990e84
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
@@ -0,0 +1,321 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (C) 2003-2018 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for XAUI initialization, configuration,
+ * and monitoring.
+ *
+ */
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-helper.h>
+
+#include <asm/octeon/cvmx-pko-defs.h>
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-pcsx-defs.h>
+#include <asm/octeon/cvmx-pcsxx-defs.h>
+
+int __cvmx_helper_xaui_enumerate(int interface)
+{
+ union cvmx_gmxx_hg2_control gmx_hg2_control;
+
+ /* If HiGig2 is enabled return 16 ports, otherwise return 1 port */
+ gmx_hg2_control.u64 = cvmx_read_csr(CVMX_GMXX_HG2_CONTROL(interface));
+ if (gmx_hg2_control.s.hg2tx_en)
+ return 16;
+ else
+ return 1;
+}
+
+/**
+ * Probe a XAUI interface and determine the number of ports
+ * connected to it. The XAUI interface should still be down
+ * after this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_xaui_probe(int interface)
+{
+ int i;
+ union cvmx_gmxx_inf_mode mode;
+
+ /*
+ * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
+ * interface needs to be enabled before IPD otherwise per port
+ * backpressure may not work properly.
+ */
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+ mode.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
+
+ __cvmx_helper_setup_gmx(interface, 1);
+
+ /*
+ * Setup PKO to support 16 ports for HiGig2 virtual
+ * ports. We're pointing all of the PKO packet ports for this
+ * interface to the XAUI. This allows us to use HiGig2
+ * backpressure per port.
+ */
+ for (i = 0; i < 16; i++) {
+ union cvmx_pko_mem_port_ptrs pko_mem_port_ptrs;
+ pko_mem_port_ptrs.u64 = 0;
+ /*
+ * We set each PKO port to have equal priority in a
+ * round robin fashion.
+ */
+ pko_mem_port_ptrs.s.static_p = 0;
+ pko_mem_port_ptrs.s.qos_mask = 0xff;
+ /* All PKO ports map to the same XAUI hardware port */
+ pko_mem_port_ptrs.s.eid = interface * 4;
+ pko_mem_port_ptrs.s.pid = interface * 16 + i;
+ cvmx_write_csr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64);
+ }
+ return __cvmx_helper_xaui_enumerate(interface);
+}
+
+/**
+ * Bringup and enable a XAUI interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_xaui_enable(int interface)
+{
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ union cvmx_pcsxx_control1_reg xauiCtl;
+ union cvmx_pcsxx_misc_ctl_reg xauiMiscCtl;
+ union cvmx_gmxx_tx_xaui_ctl gmxXauiTxCtl;
+ union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
+ union cvmx_gmxx_tx_int_en gmx_tx_int_en;
+ union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
+
+ /* Setup PKND */
+ if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
+ gmx_cfg.s.pknd = cvmx_helper_get_ipd_port(interface, 0);
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+ }
+
+ /* (1) Interface has already been enabled. */
+
+ /* (2) Disable GMX. */
+ xauiMiscCtl.u64 = cvmx_read_csr(CVMX_PCSXX_MISC_CTL_REG(interface));
+ xauiMiscCtl.s.gmxeno = 1;
+ cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
+
+ /* (3) Disable GMX and PCSX interrupts. */
+ gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN(0, interface));
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
+ gmx_tx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_TX_INT_EN(interface));
+ cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
+ pcsx_int_en_reg.u64 = cvmx_read_csr(CVMX_PCSXX_INT_EN_REG(interface));
+ cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
+
+ /* (4) Bring up the PCSX and GMX reconciliation layer. */
+ /* (4)a Set polarity and lane swapping. */
+ /* (4)b */
+ gmxXauiTxCtl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
+ /* Enable better IFG packing and improves performance */
+ gmxXauiTxCtl.s.dic_en = 1;
+ gmxXauiTxCtl.s.uni_en = 0;
+ cvmx_write_csr(CVMX_GMXX_TX_XAUI_CTL(interface), gmxXauiTxCtl.u64);
+
+ /* (4)c Aply reset sequence */
+ xauiCtl.u64 = cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
+ xauiCtl.s.lo_pwr = 0;
+
+ /* Issuing a reset here seems to hang some CN68XX chips. */
+ if (!OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X) &&
+ !OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X))
+ xauiCtl.s.reset = 1;
+
+ cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface), xauiCtl.u64);
+
+ /* Wait for PCS to come out of reset */
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_PCSXX_CONTROL1_REG(interface), union cvmx_pcsxx_control1_reg,
+ reset, ==, 0, 10000))
+ return -1;
+ /* Wait for PCS to be aligned */
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_PCSXX_10GBX_STATUS_REG(interface),
+ union cvmx_pcsxx_10gbx_status_reg, alignd, ==, 1, 10000))
+ return -1;
+ /* Wait for RX to be ready */
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_GMXX_RX_XAUI_CTL(interface), union cvmx_gmxx_rx_xaui_ctl,
+ status, ==, 0, 10000))
+ return -1;
+
+ /* (6) Configure GMX */
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
+ gmx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+
+ /* Wait for GMX RX to be idle */
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_GMXX_PRTX_CFG(0, interface), union cvmx_gmxx_prtx_cfg,
+ rx_idle, ==, 1, 10000))
+ return -1;
+ /* Wait for GMX TX to be idle */
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_GMXX_PRTX_CFG(0, interface), union cvmx_gmxx_prtx_cfg,
+ tx_idle, ==, 1, 10000))
+ return -1;
+
+ /* GMX configure */
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
+ gmx_cfg.s.speed = 1;
+ gmx_cfg.s.speed_msb = 0;
+ gmx_cfg.s.slottime = 1;
+ cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), 1);
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(0, interface), 512);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(0, interface), 8192);
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+
+ /* (7) Clear out any error state */
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(0, interface),
+ cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(0, interface)));
+ cvmx_write_csr(CVMX_GMXX_TX_INT_REG(interface),
+ cvmx_read_csr(CVMX_GMXX_TX_INT_REG(interface)));
+ cvmx_write_csr(CVMX_PCSXX_INT_REG(interface),
+ cvmx_read_csr(CVMX_PCSXX_INT_REG(interface)));
+
+ /* Wait for receive link */
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_PCSXX_STATUS1_REG(interface), union cvmx_pcsxx_status1_reg,
+ rcv_lnk, ==, 1, 10000))
+ return -1;
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_PCSXX_STATUS2_REG(interface), union cvmx_pcsxx_status2_reg,
+ xmtflt, ==, 0, 10000))
+ return -1;
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_PCSXX_STATUS2_REG(interface), union cvmx_pcsxx_status2_reg,
+ rcvflt, ==, 0, 10000))
+ return -1;
+
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), gmx_rx_int_en.u64);
+ cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64);
+ cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), pcsx_int_en_reg.u64);
+
+ /* (8) Enable packet reception */
+ xauiMiscCtl.s.gmxeno = 0;
+ cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
+
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
+ gmx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+
+ __cvmx_interrupt_pcsx_intx_en_reg_enable(0, interface);
+ __cvmx_interrupt_pcsx_intx_en_reg_enable(1, interface);
+ __cvmx_interrupt_pcsx_intx_en_reg_enable(2, interface);
+ __cvmx_interrupt_pcsx_intx_en_reg_enable(3, interface);
+ __cvmx_interrupt_pcsxx_int_en_reg_enable(interface);
+ __cvmx_interrupt_gmxx_enable(interface);
+
+ return 0;
+}
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+union cvmx_helper_link_info __cvmx_helper_xaui_link_get(int ipd_port)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
+ union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
+ union cvmx_pcsxx_status1_reg pcsxx_status1_reg;
+ union cvmx_helper_link_info result;
+
+ gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
+ gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
+ pcsxx_status1_reg.u64 =
+ cvmx_read_csr(CVMX_PCSXX_STATUS1_REG(interface));
+ result.u64 = 0;
+
+ /* Only return a link if both RX and TX are happy */
+ if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0) &&
+ (pcsxx_status1_reg.s.rcv_lnk == 1)) {
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 10000;
+ } else {
+ /* Disable GMX and PCSX interrupts. */
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
+ cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
+ cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
+ }
+ return result;
+}
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get().
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_xaui_link_set(int ipd_port, union cvmx_helper_link_info link_info)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
+ union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
+
+ gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
+ gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
+
+ /* If the link shouldn't be up, then just return */
+ if (!link_info.s.link_up)
+ return 0;
+
+ /* Do nothing if both RX and TX are happy */
+ if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0))
+ return 0;
+
+ /* Bring the link up */
+ return __cvmx_helper_xaui_enable(interface);
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
new file mode 100644
index 000000000..a18ad2daf
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -0,0 +1,1176 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Helper functions for common, but complicated tasks.
+ *
+ */
+#include <linux/bug.h>
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-fpa.h>
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-ipd.h>
+#include <asm/octeon/cvmx-spi.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-board.h>
+
+#include <asm/octeon/cvmx-pip-defs.h>
+#include <asm/octeon/cvmx-asxx-defs.h>
+
+/* Port count per interface */
+static int interface_port_count[9];
+
+/**
+ * Return the number of interfaces the chip has. Each interface
+ * may have multiple ports. Most chips support two interfaces,
+ * but the CNX0XX and CNX1XX are exceptions. These only support
+ * one interface.
+ *
+ * Returns Number of interfaces on chip
+ */
+int cvmx_helper_get_number_of_interfaces(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return 9;
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
+ return 4;
+ if (OCTEON_IS_MODEL(OCTEON_CN7XXX))
+ return 5;
+ else
+ return 3;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_get_number_of_interfaces);
+
+/**
+ * Return the number of ports on an interface. Depending on the
+ * chip and configuration, this can be 1-16. A value of 0
+ * specifies that the interface doesn't exist or isn't usable.
+ *
+ * @interface: Interface to get the port count for
+ *
+ * Returns Number of ports on interface. Can be Zero.
+ */
+int cvmx_helper_ports_on_interface(int interface)
+{
+ return interface_port_count[interface];
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_ports_on_interface);
+
+/**
+ * @INTERNAL
+ * Return interface mode for CN68xx.
+ */
+static cvmx_helper_interface_mode_t __cvmx_get_mode_cn68xx(int interface)
+{
+ union cvmx_mio_qlmx_cfg qlm_cfg;
+ switch (interface) {
+ case 0:
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
+ /* QLM is disabled when QLM SPD is 15. */
+ if (qlm_cfg.s.qlm_spd == 15)
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ if (qlm_cfg.s.qlm_cfg == 2)
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ else if (qlm_cfg.s.qlm_cfg == 3)
+ return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ case 2:
+ case 3:
+ case 4:
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(interface));
+ /* QLM is disabled when QLM SPD is 15. */
+ if (qlm_cfg.s.qlm_spd == 15)
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ if (qlm_cfg.s.qlm_cfg == 2)
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ else if (qlm_cfg.s.qlm_cfg == 3)
+ return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ case 7:
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(3));
+ /* QLM is disabled when QLM SPD is 15. */
+ if (qlm_cfg.s.qlm_spd == 15) {
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ } else if (qlm_cfg.s.qlm_cfg != 0) {
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
+ if (qlm_cfg.s.qlm_cfg != 0)
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+ return CVMX_HELPER_INTERFACE_MODE_NPI;
+ case 8:
+ return CVMX_HELPER_INTERFACE_MODE_LOOP;
+ default:
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+}
+
+/**
+ * @INTERNAL
+ * Return interface mode for an Octeon II
+ */
+static cvmx_helper_interface_mode_t __cvmx_get_mode_octeon2(int interface)
+{
+ union cvmx_gmxx_inf_mode mode;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return __cvmx_get_mode_cn68xx(interface);
+
+ if (interface == 2)
+ return CVMX_HELPER_INTERFACE_MODE_NPI;
+
+ if (interface == 3)
+ return CVMX_HELPER_INTERFACE_MODE_LOOP;
+
+ /* Only present in CN63XX & CN66XX Octeon model */
+ if ((OCTEON_IS_MODEL(OCTEON_CN63XX) &&
+ (interface == 4 || interface == 5)) ||
+ (OCTEON_IS_MODEL(OCTEON_CN66XX) &&
+ interface >= 4 && interface <= 7)) {
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
+ union cvmx_mio_qlmx_cfg mio_qlm_cfg;
+
+ /* QLM2 is SGMII0 and QLM1 is SGMII1 */
+ if (interface == 0)
+ mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2));
+ else if (interface == 1)
+ mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ if (mio_qlm_cfg.s.qlm_spd == 15)
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ if (mio_qlm_cfg.s.qlm_cfg == 9)
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ else if (mio_qlm_cfg.s.qlm_cfg == 11)
+ return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN61XX)) {
+ union cvmx_mio_qlmx_cfg qlm_cfg;
+
+ if (interface == 0) {
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2));
+ if (qlm_cfg.s.qlm_cfg == 2)
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ else if (qlm_cfg.s.qlm_cfg == 3)
+ return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ } else if (interface == 1) {
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
+ if (qlm_cfg.s.qlm_cfg == 2)
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ else if (qlm_cfg.s.qlm_cfg == 3)
+ return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+ } else if (OCTEON_IS_MODEL(OCTEON_CNF71XX)) {
+ if (interface == 0) {
+ union cvmx_mio_qlmx_cfg qlm_cfg;
+ qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
+ if (qlm_cfg.s.qlm_cfg == 2)
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ }
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+
+ if (interface == 1 && OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ switch (mode.cn61xx.mode) {
+ case 0:
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ case 1:
+ return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ default:
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+ } else {
+ if (!mode.s.en)
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ if (mode.s.type)
+ return CVMX_HELPER_INTERFACE_MODE_GMII;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_RGMII;
+ }
+}
+
+/**
+ * @INTERNAL
+ * Return interface mode for CN7XXX.
+ */
+static cvmx_helper_interface_mode_t __cvmx_get_mode_cn7xxx(int interface)
+{
+ union cvmx_gmxx_inf_mode mode;
+
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+ switch (interface) {
+ case 0:
+ case 1:
+ switch (mode.cn68xx.mode) {
+ case 0:
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ case 1:
+ case 2:
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ case 3:
+ return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ default:
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ }
+ case 2:
+ return CVMX_HELPER_INTERFACE_MODE_NPI;
+ case 3:
+ return CVMX_HELPER_INTERFACE_MODE_LOOP;
+ case 4:
+ /* TODO: Implement support for AGL (RGMII). */
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ default:
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+}
+
+/**
+ * Get the operating mode of an interface. Depending on the Octeon
+ * chip and configuration, this function returns an enumeration
+ * of the type of packet I/O supported by an interface.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Mode of the interface. Unknown or unsupported interfaces return
+ * DISABLED.
+ */
+cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
+{
+ union cvmx_gmxx_inf_mode mode;
+
+ if (interface < 0 ||
+ interface >= cvmx_helper_get_number_of_interfaces())
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ /*
+ * OCTEON III models
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN7XXX))
+ return __cvmx_get_mode_cn7xxx(interface);
+
+ /*
+ * Octeon II models
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
+ return __cvmx_get_mode_octeon2(interface);
+
+ /*
+ * Octeon and Octeon Plus models
+ */
+ if (interface == 2)
+ return CVMX_HELPER_INTERFACE_MODE_NPI;
+
+ if (interface == 3) {
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX))
+ return CVMX_HELPER_INTERFACE_MODE_LOOP;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+
+ /* Interface 1 is always disabled on CN31XX and CN30XX */
+ if ((interface == 1)
+ && (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX)
+ || OCTEON_IS_MODEL(OCTEON_CN50XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ switch (mode.cn52xx.mode) {
+ case 0:
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ case 1:
+ return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ case 2:
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ case 3:
+ return CVMX_HELPER_INTERFACE_MODE_PICMG;
+ default:
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+ } else {
+ if (!mode.s.en)
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ if (mode.s.type) {
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX))
+ return CVMX_HELPER_INTERFACE_MODE_SPI;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_GMII;
+ } else
+ return CVMX_HELPER_INTERFACE_MODE_RGMII;
+ }
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_interface_get_mode);
+
+/**
+ * Configure the IPD/PIP tagging and QoS options for a specific
+ * port. This function determines the POW work queue entry
+ * contents for a port. The setup performed here is controlled by
+ * the defines in executive-config.h.
+ *
+ * @ipd_port: Port to configure. This follows the IPD numbering, not the
+ * per interface numbering
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_port_setup_ipd(int ipd_port)
+{
+ union cvmx_pip_prt_cfgx port_config;
+ union cvmx_pip_prt_tagx tag_config;
+
+ port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
+ tag_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(ipd_port));
+
+ /* Have each port go to a different POW queue */
+ port_config.s.qos = ipd_port & 0x7;
+
+ /* Process the headers and place the IP header in the work queue */
+ port_config.s.mode = CVMX_HELPER_INPUT_PORT_SKIP_MODE;
+
+ tag_config.s.ip6_src_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP;
+ tag_config.s.ip6_dst_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_IP;
+ tag_config.s.ip6_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT;
+ tag_config.s.ip6_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT;
+ tag_config.s.ip6_nxth_flag = CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER;
+ tag_config.s.ip4_src_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP;
+ tag_config.s.ip4_dst_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_IP;
+ tag_config.s.ip4_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT;
+ tag_config.s.ip4_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT;
+ tag_config.s.ip4_pctl_flag = CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL;
+ tag_config.s.inc_prt_flag = CVMX_HELPER_INPUT_TAG_INPUT_PORT;
+ tag_config.s.tcp6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ tag_config.s.tcp4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ tag_config.s.ip6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ tag_config.s.ip4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ tag_config.s.non_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ /* Put all packets in group 0. Other groups can be used by the app */
+ tag_config.s.grp = 0;
+
+ cvmx_pip_config_port(ipd_port, port_config, tag_config);
+
+ return 0;
+}
+
+/**
+ * This function sets the interface_port_count[interface] correctly,
+ * without modifying any hardware configuration. Hardware setup of
+ * the ports will be performed later.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_helper_interface_enumerate(int interface)
+{
+ switch (cvmx_helper_interface_get_mode(interface)) {
+ /* These types don't support ports to IPD/PKO */
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ interface_port_count[interface] = 0;
+ break;
+ /* XAUI is a single high speed port */
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ interface_port_count[interface] =
+ __cvmx_helper_xaui_enumerate(interface);
+ break;
+ /*
+ * RGMII/GMII/MII are all treated about the same. Most
+ * functions refer to these ports as RGMII.
+ */
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ interface_port_count[interface] =
+ __cvmx_helper_rgmii_enumerate(interface);
+ break;
+ /*
+ * SPI4 can have 1-16 ports depending on the device at
+ * the other end.
+ */
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ interface_port_count[interface] =
+ __cvmx_helper_spi_enumerate(interface);
+ break;
+ /*
+ * SGMII can have 1-4 ports depending on how many are
+ * hooked up.
+ */
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ interface_port_count[interface] =
+ __cvmx_helper_sgmii_enumerate(interface);
+ break;
+ /* PCI target Network Packet Interface */
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ interface_port_count[interface] =
+ __cvmx_helper_npi_enumerate(interface);
+ break;
+ /*
+ * Special loopback only ports. These are not the same
+ * as other ports in loopback mode.
+ */
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ interface_port_count[interface] =
+ __cvmx_helper_loop_enumerate(interface);
+ break;
+ }
+
+ interface_port_count[interface] =
+ __cvmx_helper_board_interface_probe(interface,
+ interface_port_count
+ [interface]);
+
+ /* Make sure all global variables propagate to other cores */
+ CVMX_SYNCWS;
+
+ return 0;
+}
+
+/**
+ * This function probes an interface to determine the actual
+ * number of hardware ports connected to it. It doesn't setup the
+ * ports or enable them. The main goal here is to set the global
+ * interface_port_count[interface] correctly. Hardware setup of the
+ * ports will be performed later.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_helper_interface_probe(int interface)
+{
+ cvmx_helper_interface_enumerate(interface);
+ /* At this stage in the game we don't want packets to be moving yet.
+ The following probe calls should perform hardware setup
+ needed to determine port counts. Receive must still be disabled */
+ switch (cvmx_helper_interface_get_mode(interface)) {
+ /* These types don't support ports to IPD/PKO */
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ break;
+ /* XAUI is a single high speed port */
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ __cvmx_helper_xaui_probe(interface);
+ break;
+ /*
+ * RGMII/GMII/MII are all treated about the same. Most
+ * functions refer to these ports as RGMII.
+ */
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ __cvmx_helper_rgmii_probe(interface);
+ break;
+ /*
+ * SPI4 can have 1-16 ports depending on the device at
+ * the other end.
+ */
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ __cvmx_helper_spi_probe(interface);
+ break;
+ /*
+ * SGMII can have 1-4 ports depending on how many are
+ * hooked up.
+ */
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ __cvmx_helper_sgmii_probe(interface);
+ break;
+ /* PCI target Network Packet Interface */
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ __cvmx_helper_npi_probe(interface);
+ break;
+ /*
+ * Special loopback only ports. These are not the same
+ * as other ports in loopback mode.
+ */
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ __cvmx_helper_loop_probe(interface);
+ break;
+ }
+
+ /* Make sure all global variables propagate to other cores */
+ CVMX_SYNCWS;
+
+ return 0;
+}
+
+/**
+ * Setup the IPD/PIP for the ports on an interface. Packet
+ * classification and tagging are set for every port on the
+ * interface. The number of ports on the interface must already
+ * have been probed.
+ *
+ * @interface: Interface to setup IPD/PIP for
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_interface_setup_ipd(int interface)
+{
+ int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
+ int num_ports = interface_port_count[interface];
+
+ while (num_ports--) {
+ __cvmx_helper_port_setup_ipd(ipd_port);
+ ipd_port++;
+ }
+ return 0;
+}
+
+/**
+ * Setup global setting for IPD/PIP not related to a specific
+ * interface or port. This must be called before IPD is enabled.
+ *
+ * Returns Zero on success, negative on failure.
+ */
+static int __cvmx_helper_global_setup_ipd(void)
+{
+ /* Setup the global packet input options */
+ cvmx_ipd_config(CVMX_FPA_PACKET_POOL_SIZE / 8,
+ CVMX_HELPER_FIRST_MBUFF_SKIP / 8,
+ CVMX_HELPER_NOT_FIRST_MBUFF_SKIP / 8,
+ /* The +8 is to account for the next ptr */
+ (CVMX_HELPER_FIRST_MBUFF_SKIP + 8) / 128,
+ /* The +8 is to account for the next ptr */
+ (CVMX_HELPER_NOT_FIRST_MBUFF_SKIP + 8) / 128,
+ CVMX_FPA_WQE_POOL,
+ CVMX_IPD_OPC_MODE_STT,
+ CVMX_HELPER_ENABLE_BACK_PRESSURE);
+ return 0;
+}
+
+/**
+ * Setup the PKO for the ports on an interface. The number of
+ * queues per port and the priority of each PKO output queue
+ * is set here. PKO must be disabled when this function is called.
+ *
+ * @interface: Interface to setup PKO for
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_interface_setup_pko(int interface)
+{
+ /*
+ * Each packet output queue has an associated priority. The
+ * higher the priority, the more often it can send a packet. A
+ * priority of 8 means it can send in all 8 rounds of
+ * contention. We're going to make each queue one less than
+ * the last. The vector of priorities has been extended to
+ * support CN5xxx CPUs, where up to 16 queues can be
+ * associated to a port. To keep backward compatibility we
+ * don't change the initial 8 priorities and replicate them in
+ * the second half. With per-core PKO queues (PKO lockless
+ * operation) all queues have the same priority.
+ */
+ uint64_t priorities[16] =
+ { 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1 };
+
+ /*
+ * Setup the IPD/PIP and PKO for the ports discovered
+ * above. Here packet classification, tagging and output
+ * priorities are set.
+ */
+ int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
+ int num_ports = interface_port_count[interface];
+ while (num_ports--) {
+ cvmx_pko_config_port(ipd_port,
+ cvmx_pko_get_base_queue_per_core(ipd_port,
+ 0),
+ cvmx_pko_get_num_queues(ipd_port),
+ priorities);
+ ipd_port++;
+ }
+ return 0;
+}
+
+/**
+ * Setup global setting for PKO not related to a specific
+ * interface or port. This must be called before PKO is enabled.
+ *
+ * Returns Zero on success, negative on failure.
+ */
+static int __cvmx_helper_global_setup_pko(void)
+{
+ /*
+ * Disable tagwait FAU timeout. This needs to be done before
+ * anyone might start packet output using tags.
+ */
+ union cvmx_iob_fau_timeout fau_to;
+ fau_to.u64 = 0;
+ fau_to.s.tout_val = 0xfff;
+ fau_to.s.tout_enb = 0;
+ cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ union cvmx_pko_reg_min_pkt min_pkt;
+
+ min_pkt.u64 = 0;
+ min_pkt.s.size1 = 59;
+ min_pkt.s.size2 = 59;
+ min_pkt.s.size3 = 59;
+ min_pkt.s.size4 = 59;
+ min_pkt.s.size5 = 59;
+ min_pkt.s.size6 = 59;
+ min_pkt.s.size7 = 59;
+ cvmx_write_csr(CVMX_PKO_REG_MIN_PKT, min_pkt.u64);
+ }
+
+ return 0;
+}
+
+/**
+ * Setup global backpressure setting.
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_global_setup_backpressure(void)
+{
+#if CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE
+ /* Disable backpressure if configured to do so */
+ /* Disable backpressure (pause frame) generation */
+ int num_interfaces = cvmx_helper_get_number_of_interfaces();
+ int interface;
+ for (interface = 0; interface < num_interfaces; interface++) {
+ switch (cvmx_helper_interface_get_mode(interface)) {
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ cvmx_gmx_set_backpressure_override(interface, 0xf);
+ break;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+/**
+ * Enable packet input/output from the hardware. This function is
+ * called after all internal setup is complete and IPD is enabled.
+ * After this function completes, packets will be accepted from the
+ * hardware ports. PKO should still be disabled to make sure packets
+ * aren't sent out partially setup hardware.
+ *
+ * @interface: Interface to enable
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_packet_hardware_enable(int interface)
+{
+ int result = 0;
+ switch (cvmx_helper_interface_get_mode(interface)) {
+ /* These types don't support ports to IPD/PKO */
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ /* Nothing to do */
+ break;
+ /* XAUI is a single high speed port */
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ result = __cvmx_helper_xaui_enable(interface);
+ break;
+ /*
+ * RGMII/GMII/MII are all treated about the same. Most
+ * functions refer to these ports as RGMII
+ */
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ result = __cvmx_helper_rgmii_enable(interface);
+ break;
+ /*
+ * SPI4 can have 1-16 ports depending on the device at
+ * the other end
+ */
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ result = __cvmx_helper_spi_enable(interface);
+ break;
+ /*
+ * SGMII can have 1-4 ports depending on how many are
+ * hooked up
+ */
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ result = __cvmx_helper_sgmii_enable(interface);
+ break;
+ /* PCI target Network Packet Interface */
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ result = __cvmx_helper_npi_enable(interface);
+ break;
+ /*
+ * Special loopback only ports. These are not the same
+ * as other ports in loopback mode
+ */
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ result = __cvmx_helper_loop_enable(interface);
+ break;
+ }
+ return result;
+}
+
+/**
+ * Function to adjust internal IPD pointer alignments
+ *
+ * Returns 0 on success
+ * !0 on failure
+ */
+static int __cvmx_helper_errata_fix_ipd_ptr_alignment(void)
+{
+#define FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES \
+ (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_FIRST_MBUFF_SKIP)
+#define FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES \
+ (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_NOT_FIRST_MBUFF_SKIP)
+#define FIX_IPD_OUTPORT 0
+ /* Ports 0-15 are interface 0, 16-31 are interface 1 */
+#define INTERFACE(port) (port >> 4)
+#define INDEX(port) (port & 0xf)
+ uint64_t *p64;
+ union cvmx_pko_command_word0 pko_command;
+ union cvmx_buf_ptr g_buffer, pkt_buffer;
+ struct cvmx_wqe *work;
+ int size, num_segs = 0, wqe_pcnt, pkt_pcnt;
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ int retry_cnt;
+ int retry_loop_cnt;
+ int i;
+
+ /* Save values for restore at end */
+ uint64_t prtx_cfg =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG
+ (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
+ uint64_t tx_ptr_en =
+ cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
+ uint64_t rx_ptr_en =
+ cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
+ uint64_t rxx_jabber =
+ cvmx_read_csr(CVMX_GMXX_RXX_JABBER
+ (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
+ uint64_t frame_max =
+ cvmx_read_csr(CVMX_GMXX_RXX_FRM_MAX
+ (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
+
+ /* Configure port to gig FDX as required for loopback mode */
+ cvmx_helper_rgmii_internal_loopback(FIX_IPD_OUTPORT);
+
+ /*
+ * Disable reception on all ports so if traffic is present it
+ * will not interfere.
+ */
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), 0);
+
+ __delay(100000000ull);
+
+ for (retry_loop_cnt = 0; retry_loop_cnt < 10; retry_loop_cnt++) {
+ retry_cnt = 100000;
+ wqe_pcnt = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
+ pkt_pcnt = (wqe_pcnt >> 7) & 0x7f;
+ wqe_pcnt &= 0x7f;
+
+ num_segs = (2 + pkt_pcnt - wqe_pcnt) & 3;
+
+ if (num_segs == 0)
+ goto fix_ipd_exit;
+
+ num_segs += 1;
+
+ size =
+ FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES +
+ ((num_segs - 1) * FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES) -
+ (FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES / 2);
+
+ cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)),
+ 1 << INDEX(FIX_IPD_OUTPORT));
+ CVMX_SYNC;
+
+ g_buffer.u64 = 0;
+ g_buffer.s.addr =
+ cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_WQE_POOL));
+ if (g_buffer.s.addr == 0) {
+ cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
+ "buffer allocation failure.\n");
+ goto fix_ipd_exit;
+ }
+
+ g_buffer.s.pool = CVMX_FPA_WQE_POOL;
+ g_buffer.s.size = num_segs;
+
+ pkt_buffer.u64 = 0;
+ pkt_buffer.s.addr =
+ cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL));
+ if (pkt_buffer.s.addr == 0) {
+ cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
+ "buffer allocation failure.\n");
+ goto fix_ipd_exit;
+ }
+ pkt_buffer.s.i = 1;
+ pkt_buffer.s.pool = CVMX_FPA_PACKET_POOL;
+ pkt_buffer.s.size = FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES;
+
+ p64 = (uint64_t *) cvmx_phys_to_ptr(pkt_buffer.s.addr);
+ p64[0] = 0xffffffffffff0000ull;
+ p64[1] = 0x08004510ull;
+ p64[2] = ((uint64_t) (size - 14) << 48) | 0x5ae740004000ull;
+ p64[3] = 0x3a5fc0a81073c0a8ull;
+
+ for (i = 0; i < num_segs; i++) {
+ if (i > 0)
+ pkt_buffer.s.size =
+ FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES;
+
+ if (i == (num_segs - 1))
+ pkt_buffer.s.i = 0;
+
+ *(uint64_t *) cvmx_phys_to_ptr(g_buffer.s.addr +
+ 8 * i) = pkt_buffer.u64;
+ }
+
+ /* Build the PKO command */
+ pko_command.u64 = 0;
+ pko_command.s.segs = num_segs;
+ pko_command.s.total_bytes = size;
+ pko_command.s.dontfree = 0;
+ pko_command.s.gather = 1;
+
+ gmx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG
+ (INDEX(FIX_IPD_OUTPORT),
+ INTERFACE(FIX_IPD_OUTPORT)));
+ gmx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG
+ (INDEX(FIX_IPD_OUTPORT),
+ INTERFACE(FIX_IPD_OUTPORT)), gmx_cfg.u64);
+ cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
+ 1 << INDEX(FIX_IPD_OUTPORT));
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
+ 1 << INDEX(FIX_IPD_OUTPORT));
+
+ cvmx_write_csr(CVMX_GMXX_RXX_JABBER
+ (INDEX(FIX_IPD_OUTPORT),
+ INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4);
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX
+ (INDEX(FIX_IPD_OUTPORT),
+ INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4);
+
+ cvmx_pko_send_packet_prepare(FIX_IPD_OUTPORT,
+ cvmx_pko_get_base_queue
+ (FIX_IPD_OUTPORT),
+ CVMX_PKO_LOCK_CMD_QUEUE);
+ cvmx_pko_send_packet_finish(FIX_IPD_OUTPORT,
+ cvmx_pko_get_base_queue
+ (FIX_IPD_OUTPORT), pko_command,
+ g_buffer, CVMX_PKO_LOCK_CMD_QUEUE);
+
+ CVMX_SYNC;
+
+ do {
+ work = cvmx_pow_work_request_sync(CVMX_POW_WAIT);
+ retry_cnt--;
+ } while ((work == NULL) && (retry_cnt > 0));
+
+ if (!retry_cnt)
+ cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
+ "get_work() timeout occurred.\n");
+
+ /* Free packet */
+ if (work)
+ cvmx_helper_free_packet_data(work);
+ }
+
+fix_ipd_exit:
+
+ /* Return CSR configs to saved values */
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG
+ (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
+ prtx_cfg);
+ cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
+ tx_ptr_en);
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
+ rx_ptr_en);
+ cvmx_write_csr(CVMX_GMXX_RXX_JABBER
+ (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
+ rxx_jabber);
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX
+ (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
+ frame_max);
+ cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)), 0);
+
+ CVMX_SYNC;
+ if (num_segs)
+ cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT failed.\n");
+
+ return !!num_segs;
+
+}
+
+/**
+ * Called after all internal packet IO paths are setup. This
+ * function enables IPD/PIP and begins packet input and output.
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_helper_ipd_and_packet_input_enable(void)
+{
+ int num_interfaces;
+ int interface;
+
+ /* Enable IPD */
+ cvmx_ipd_enable();
+
+ /*
+ * Time to enable hardware ports packet input and output. Note
+ * that at this point IPD/PIP must be fully functional and PKO
+ * must be disabled
+ */
+ num_interfaces = cvmx_helper_get_number_of_interfaces();
+ for (interface = 0; interface < num_interfaces; interface++) {
+ if (cvmx_helper_ports_on_interface(interface) > 0)
+ __cvmx_helper_packet_hardware_enable(interface);
+ }
+
+ /* Finally enable PKO now that the entire path is up and running */
+ cvmx_pko_enable();
+
+ if ((OCTEON_IS_MODEL(OCTEON_CN31XX_PASS1)
+ || OCTEON_IS_MODEL(OCTEON_CN30XX_PASS1))
+ && (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM))
+ __cvmx_helper_errata_fix_ipd_ptr_alignment();
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_ipd_and_packet_input_enable);
+
+/**
+ * Initialize the PIP, IPD, and PKO hardware to support
+ * simple priority based queues for the ethernet ports. Each
+ * port is configured with a number of priority queues based
+ * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
+ * priority than the previous.
+ *
+ * Returns Zero on success, non-zero on failure
+ */
+int cvmx_helper_initialize_packet_io_global(void)
+{
+ int result = 0;
+ int interface;
+ union cvmx_l2c_cfg l2c_cfg;
+ const int num_interfaces = cvmx_helper_get_number_of_interfaces();
+
+ /*
+ * CN52XX pass 1: Due to a bug in 2nd order CDR, it needs to
+ * be disabled.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
+ __cvmx_helper_errata_qlm_disable_2nd_order_cdr(1);
+
+ /*
+ * Tell L2 to give the IOB statically higher priority compared
+ * to the cores. This avoids conditions where IO blocks might
+ * be starved under very high L2 loads.
+ */
+ l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
+ l2c_cfg.s.lrf_arb_mode = 0;
+ l2c_cfg.s.rfb_arb_mode = 0;
+ cvmx_write_csr(CVMX_L2C_CFG, l2c_cfg.u64);
+
+ cvmx_pko_initialize_global();
+ for (interface = 0; interface < num_interfaces; interface++) {
+ result |= cvmx_helper_interface_probe(interface);
+ if (cvmx_helper_ports_on_interface(interface) > 0)
+ cvmx_dprintf("Interface %d has %d ports (%s)\n",
+ interface,
+ cvmx_helper_ports_on_interface(interface),
+ cvmx_helper_interface_mode_to_string
+ (cvmx_helper_interface_get_mode
+ (interface)));
+ result |= __cvmx_helper_interface_setup_ipd(interface);
+ result |= __cvmx_helper_interface_setup_pko(interface);
+ }
+
+ result |= __cvmx_helper_global_setup_ipd();
+ result |= __cvmx_helper_global_setup_pko();
+
+ /* Enable any flow control and backpressure */
+ result |= __cvmx_helper_global_setup_backpressure();
+
+#if CVMX_HELPER_ENABLE_IPD
+ result |= cvmx_helper_ipd_and_packet_input_enable();
+#endif
+ return result;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_initialize_packet_io_global);
+
+/**
+ * Does core local initialization for packet io
+ *
+ * Returns Zero on success, non-zero on failure
+ */
+int cvmx_helper_initialize_packet_io_local(void)
+{
+ return cvmx_pko_initialize_local();
+}
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port)
+{
+ union cvmx_helper_link_info result;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ /* The default result will be a down link unless the code below
+ changes it */
+ result.u64 = 0;
+
+ if (index >= cvmx_helper_ports_on_interface(interface))
+ return result;
+
+ switch (cvmx_helper_interface_get_mode(interface)) {
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ /* Network links are not supported */
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ result = __cvmx_helper_xaui_link_get(ipd_port);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ if (index == 0)
+ result = __cvmx_helper_rgmii_link_get(ipd_port);
+ else {
+ WARN_ONCE(1, "Using deprecated link status - please update your DT");
+ result.s.full_duplex = 1;
+ result.s.link_up = 1;
+ result.s.speed = 1000;
+ }
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ result = __cvmx_helper_rgmii_link_get(ipd_port);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ result = __cvmx_helper_spi_link_get(ipd_port);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ result = __cvmx_helper_sgmii_link_get(ipd_port);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ /* Network links are not supported */
+ break;
+ }
+ return result;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_link_get);
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get().
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_helper_link_set(int ipd_port, union cvmx_helper_link_info link_info)
+{
+ int result = -1;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ if (index >= cvmx_helper_ports_on_interface(interface))
+ return -1;
+
+ switch (cvmx_helper_interface_get_mode(interface)) {
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ result = __cvmx_helper_xaui_link_set(ipd_port, link_info);
+ break;
+ /*
+ * RGMII/GMII/MII are all treated about the same. Most
+ * functions refer to these ports as RGMII.
+ */
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ result = __cvmx_helper_rgmii_link_set(ipd_port, link_info);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ result = __cvmx_helper_spi_link_set(ipd_port, link_info);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ result = __cvmx_helper_sgmii_link_set(ipd_port, link_info);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ break;
+ }
+ return result;
+}
+EXPORT_SYMBOL_GPL(cvmx_helper_link_set);
diff --git a/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c b/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
new file mode 100644
index 000000000..2f415d9d0
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
@@ -0,0 +1,371 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2009 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Automatically generated functions useful for enabling
+ * and decoding RSL_INT_BLOCKS interrupts.
+ *
+ */
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-pcsx-defs.h>
+#include <asm/octeon/cvmx-pcsxx-defs.h>
+#include <asm/octeon/cvmx-spxx-defs.h>
+#include <asm/octeon/cvmx-stxx-defs.h>
+
+#ifndef PRINT_ERROR
+#define PRINT_ERROR(format, ...)
+#endif
+
+
+/**
+ * __cvmx_interrupt_gmxx_rxx_int_en_enable enables all interrupt bits in cvmx_gmxx_rxx_int_en_t
+ */
+void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
+{
+ union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, block),
+ cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, block)));
+ gmx_rx_int_en.u64 = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ /* Skipping gmx_rx_int_en.s.reserved_29_63 */
+ gmx_rx_int_en.s.hg2cc = 1;
+ gmx_rx_int_en.s.hg2fld = 1;
+ gmx_rx_int_en.s.undat = 1;
+ gmx_rx_int_en.s.uneop = 1;
+ gmx_rx_int_en.s.unsop = 1;
+ gmx_rx_int_en.s.bad_term = 1;
+ gmx_rx_int_en.s.bad_seq = 1;
+ gmx_rx_int_en.s.rem_fault = 1;
+ gmx_rx_int_en.s.loc_fault = 1;
+ gmx_rx_int_en.s.pause_drp = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_16_18 */
+ /*gmx_rx_int_en.s.ifgerr = 1; */
+ /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
+ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+ gmx_rx_int_en.s.ovrerr = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_9_9 */
+ gmx_rx_int_en.s.skperr = 1;
+ gmx_rx_int_en.s.rcverr = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_5_6 */
+ /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+ gmx_rx_int_en.s.jabber = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_2_2 */
+ gmx_rx_int_en.s.carext = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_0_0 */
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
+ /* Skipping gmx_rx_int_en.s.reserved_19_63 */
+ /*gmx_rx_int_en.s.phy_dupx = 1; */
+ /*gmx_rx_int_en.s.phy_spd = 1; */
+ /*gmx_rx_int_en.s.phy_link = 1; */
+ /*gmx_rx_int_en.s.ifgerr = 1; */
+ /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
+ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+ gmx_rx_int_en.s.ovrerr = 1;
+ gmx_rx_int_en.s.niberr = 1;
+ gmx_rx_int_en.s.skperr = 1;
+ gmx_rx_int_en.s.rcverr = 1;
+ /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
+ gmx_rx_int_en.s.alnerr = 1;
+ /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+ gmx_rx_int_en.s.jabber = 1;
+ gmx_rx_int_en.s.maxerr = 1;
+ gmx_rx_int_en.s.carext = 1;
+ gmx_rx_int_en.s.minerr = 1;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ /* Skipping gmx_rx_int_en.s.reserved_20_63 */
+ gmx_rx_int_en.s.pause_drp = 1;
+ /*gmx_rx_int_en.s.phy_dupx = 1; */
+ /*gmx_rx_int_en.s.phy_spd = 1; */
+ /*gmx_rx_int_en.s.phy_link = 1; */
+ /*gmx_rx_int_en.s.ifgerr = 1; */
+ /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
+ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+ gmx_rx_int_en.s.ovrerr = 1;
+ gmx_rx_int_en.s.niberr = 1;
+ gmx_rx_int_en.s.skperr = 1;
+ gmx_rx_int_en.s.rcverr = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_6_6 */
+ gmx_rx_int_en.s.alnerr = 1;
+ /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+ gmx_rx_int_en.s.jabber = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_2_2 */
+ gmx_rx_int_en.s.carext = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_0_0 */
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+ /* Skipping gmx_rx_int_en.s.reserved_19_63 */
+ /*gmx_rx_int_en.s.phy_dupx = 1; */
+ /*gmx_rx_int_en.s.phy_spd = 1; */
+ /*gmx_rx_int_en.s.phy_link = 1; */
+ /*gmx_rx_int_en.s.ifgerr = 1; */
+ /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
+ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+ gmx_rx_int_en.s.ovrerr = 1;
+ gmx_rx_int_en.s.niberr = 1;
+ gmx_rx_int_en.s.skperr = 1;
+ gmx_rx_int_en.s.rcverr = 1;
+ /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
+ gmx_rx_int_en.s.alnerr = 1;
+ /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+ gmx_rx_int_en.s.jabber = 1;
+ gmx_rx_int_en.s.maxerr = 1;
+ gmx_rx_int_en.s.carext = 1;
+ gmx_rx_int_en.s.minerr = 1;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN31XX)) {
+ /* Skipping gmx_rx_int_en.s.reserved_19_63 */
+ /*gmx_rx_int_en.s.phy_dupx = 1; */
+ /*gmx_rx_int_en.s.phy_spd = 1; */
+ /*gmx_rx_int_en.s.phy_link = 1; */
+ /*gmx_rx_int_en.s.ifgerr = 1; */
+ /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
+ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+ gmx_rx_int_en.s.ovrerr = 1;
+ gmx_rx_int_en.s.niberr = 1;
+ gmx_rx_int_en.s.skperr = 1;
+ gmx_rx_int_en.s.rcverr = 1;
+ /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
+ gmx_rx_int_en.s.alnerr = 1;
+ /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+ gmx_rx_int_en.s.jabber = 1;
+ gmx_rx_int_en.s.maxerr = 1;
+ gmx_rx_int_en.s.carext = 1;
+ gmx_rx_int_en.s.minerr = 1;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ /* Skipping gmx_rx_int_en.s.reserved_20_63 */
+ gmx_rx_int_en.s.pause_drp = 1;
+ /*gmx_rx_int_en.s.phy_dupx = 1; */
+ /*gmx_rx_int_en.s.phy_spd = 1; */
+ /*gmx_rx_int_en.s.phy_link = 1; */
+ /*gmx_rx_int_en.s.ifgerr = 1; */
+ /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
+ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+ gmx_rx_int_en.s.ovrerr = 1;
+ gmx_rx_int_en.s.niberr = 1;
+ gmx_rx_int_en.s.skperr = 1;
+ gmx_rx_int_en.s.rcverr = 1;
+ /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
+ gmx_rx_int_en.s.alnerr = 1;
+ /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+ gmx_rx_int_en.s.jabber = 1;
+ gmx_rx_int_en.s.maxerr = 1;
+ gmx_rx_int_en.s.carext = 1;
+ gmx_rx_int_en.s.minerr = 1;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ /* Skipping gmx_rx_int_en.s.reserved_29_63 */
+ gmx_rx_int_en.s.hg2cc = 1;
+ gmx_rx_int_en.s.hg2fld = 1;
+ gmx_rx_int_en.s.undat = 1;
+ gmx_rx_int_en.s.uneop = 1;
+ gmx_rx_int_en.s.unsop = 1;
+ gmx_rx_int_en.s.bad_term = 1;
+ gmx_rx_int_en.s.bad_seq = 0;
+ gmx_rx_int_en.s.rem_fault = 1;
+ gmx_rx_int_en.s.loc_fault = 0;
+ gmx_rx_int_en.s.pause_drp = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_16_18 */
+ /*gmx_rx_int_en.s.ifgerr = 1; */
+ /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
+ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+ gmx_rx_int_en.s.ovrerr = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_9_9 */
+ gmx_rx_int_en.s.skperr = 1;
+ gmx_rx_int_en.s.rcverr = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_5_6 */
+ /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+ gmx_rx_int_en.s.jabber = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_2_2 */
+ gmx_rx_int_en.s.carext = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_0_0 */
+ }
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, block), gmx_rx_int_en.u64);
+}
+/**
+ * __cvmx_interrupt_pcsx_intx_en_reg_enable enables all interrupt bits in cvmx_pcsx_intx_en_reg_t
+ */
+void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block)
+{
+ union cvmx_pcsx_intx_en_reg pcs_int_en_reg;
+ cvmx_write_csr(CVMX_PCSX_INTX_REG(index, block),
+ cvmx_read_csr(CVMX_PCSX_INTX_REG(index, block)));
+ pcs_int_en_reg.u64 = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ /* Skipping pcs_int_en_reg.s.reserved_12_63 */
+ /*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */
+ pcs_int_en_reg.s.sync_bad_en = 1;
+ pcs_int_en_reg.s.an_bad_en = 1;
+ pcs_int_en_reg.s.rxlock_en = 1;
+ pcs_int_en_reg.s.rxbad_en = 1;
+ /*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */
+ pcs_int_en_reg.s.txbad_en = 1;
+ pcs_int_en_reg.s.txfifo_en = 1;
+ pcs_int_en_reg.s.txfifu_en = 1;
+ pcs_int_en_reg.s.an_err_en = 1;
+ /*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */
+ /*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ /* Skipping pcs_int_en_reg.s.reserved_12_63 */
+ /*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */
+ pcs_int_en_reg.s.sync_bad_en = 1;
+ pcs_int_en_reg.s.an_bad_en = 1;
+ pcs_int_en_reg.s.rxlock_en = 1;
+ pcs_int_en_reg.s.rxbad_en = 1;
+ /*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */
+ pcs_int_en_reg.s.txbad_en = 1;
+ pcs_int_en_reg.s.txfifo_en = 1;
+ pcs_int_en_reg.s.txfifu_en = 1;
+ pcs_int_en_reg.s.an_err_en = 1;
+ /*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */
+ /*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */
+ }
+ cvmx_write_csr(CVMX_PCSX_INTX_EN_REG(index, block), pcs_int_en_reg.u64);
+}
+/**
+ * __cvmx_interrupt_pcsxx_int_en_reg_enable enables all interrupt bits in cvmx_pcsxx_int_en_reg_t
+ */
+void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index)
+{
+ union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
+ cvmx_write_csr(CVMX_PCSXX_INT_REG(index),
+ cvmx_read_csr(CVMX_PCSXX_INT_REG(index)));
+ pcsx_int_en_reg.u64 = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ /* Skipping pcsx_int_en_reg.s.reserved_6_63 */
+ pcsx_int_en_reg.s.algnlos_en = 1;
+ pcsx_int_en_reg.s.synlos_en = 1;
+ pcsx_int_en_reg.s.bitlckls_en = 1;
+ pcsx_int_en_reg.s.rxsynbad_en = 1;
+ pcsx_int_en_reg.s.rxbad_en = 1;
+ pcsx_int_en_reg.s.txflt_en = 1;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ /* Skipping pcsx_int_en_reg.s.reserved_6_63 */
+ pcsx_int_en_reg.s.algnlos_en = 1;
+ pcsx_int_en_reg.s.synlos_en = 1;
+ pcsx_int_en_reg.s.bitlckls_en = 0; /* Happens if XAUI module is not installed */
+ pcsx_int_en_reg.s.rxsynbad_en = 1;
+ pcsx_int_en_reg.s.rxbad_en = 1;
+ pcsx_int_en_reg.s.txflt_en = 1;
+ }
+ cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(index), pcsx_int_en_reg.u64);
+}
+
+/**
+ * __cvmx_interrupt_spxx_int_msk_enable enables all interrupt bits in cvmx_spxx_int_msk_t
+ */
+void __cvmx_interrupt_spxx_int_msk_enable(int index)
+{
+ union cvmx_spxx_int_msk spx_int_msk;
+ cvmx_write_csr(CVMX_SPXX_INT_REG(index),
+ cvmx_read_csr(CVMX_SPXX_INT_REG(index)));
+ spx_int_msk.u64 = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+ /* Skipping spx_int_msk.s.reserved_12_63 */
+ spx_int_msk.s.calerr = 1;
+ spx_int_msk.s.syncerr = 1;
+ spx_int_msk.s.diperr = 1;
+ spx_int_msk.s.tpaovr = 1;
+ spx_int_msk.s.rsverr = 1;
+ spx_int_msk.s.drwnng = 1;
+ spx_int_msk.s.clserr = 1;
+ spx_int_msk.s.spiovr = 1;
+ /* Skipping spx_int_msk.s.reserved_2_3 */
+ spx_int_msk.s.abnorm = 1;
+ spx_int_msk.s.prtnxa = 1;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ /* Skipping spx_int_msk.s.reserved_12_63 */
+ spx_int_msk.s.calerr = 1;
+ spx_int_msk.s.syncerr = 1;
+ spx_int_msk.s.diperr = 1;
+ spx_int_msk.s.tpaovr = 1;
+ spx_int_msk.s.rsverr = 1;
+ spx_int_msk.s.drwnng = 1;
+ spx_int_msk.s.clserr = 1;
+ spx_int_msk.s.spiovr = 1;
+ /* Skipping spx_int_msk.s.reserved_2_3 */
+ spx_int_msk.s.abnorm = 1;
+ spx_int_msk.s.prtnxa = 1;
+ }
+ cvmx_write_csr(CVMX_SPXX_INT_MSK(index), spx_int_msk.u64);
+}
+/**
+ * __cvmx_interrupt_stxx_int_msk_enable enables all interrupt bits in cvmx_stxx_int_msk_t
+ */
+void __cvmx_interrupt_stxx_int_msk_enable(int index)
+{
+ union cvmx_stxx_int_msk stx_int_msk;
+ cvmx_write_csr(CVMX_STXX_INT_REG(index),
+ cvmx_read_csr(CVMX_STXX_INT_REG(index)));
+ stx_int_msk.u64 = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+ /* Skipping stx_int_msk.s.reserved_8_63 */
+ stx_int_msk.s.frmerr = 1;
+ stx_int_msk.s.unxfrm = 1;
+ stx_int_msk.s.nosync = 1;
+ stx_int_msk.s.diperr = 1;
+ stx_int_msk.s.datovr = 1;
+ stx_int_msk.s.ovrbst = 1;
+ stx_int_msk.s.calpar1 = 1;
+ stx_int_msk.s.calpar0 = 1;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ /* Skipping stx_int_msk.s.reserved_8_63 */
+ stx_int_msk.s.frmerr = 1;
+ stx_int_msk.s.unxfrm = 1;
+ stx_int_msk.s.nosync = 1;
+ stx_int_msk.s.diperr = 1;
+ stx_int_msk.s.datovr = 1;
+ stx_int_msk.s.ovrbst = 1;
+ stx_int_msk.s.calpar1 = 1;
+ stx_int_msk.s.calpar0 = 1;
+ }
+ cvmx_write_csr(CVMX_STXX_INT_MSK(index), stx_int_msk.u64);
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c b/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c
new file mode 100644
index 000000000..d23f46736
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c
@@ -0,0 +1,140 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Utility functions to decode Octeon's RSL_INT_BLOCKS
+ * interrupts into error messages.
+ */
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-asxx-defs.h>
+#include <asm/octeon/cvmx-gmxx-defs.h>
+
+#ifndef PRINT_ERROR
+#define PRINT_ERROR(format, ...)
+#endif
+
+void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block);
+
+/**
+ * Enable ASX error interrupts that exist on CN3XXX, CN50XX, and
+ * CN58XX.
+ *
+ * @block: Interface to enable 0-1
+ */
+void __cvmx_interrupt_asxx_enable(int block)
+{
+ int mask;
+ union cvmx_asxx_int_en csr;
+ /*
+ * CN38XX and CN58XX have two interfaces with 4 ports per
+ * interface. All other chips have a max of 3 ports on
+ * interface 0
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
+ mask = 0xf; /* Set enables for 4 ports */
+ else
+ mask = 0x7; /* Set enables for 3 ports */
+
+ /* Enable interface interrupts */
+ csr.u64 = cvmx_read_csr(CVMX_ASXX_INT_EN(block));
+ csr.s.txpsh = mask;
+ csr.s.txpop = mask;
+ csr.s.ovrflw = mask;
+ cvmx_write_csr(CVMX_ASXX_INT_EN(block), csr.u64);
+}
+/**
+ * Enable GMX error reporting for the supplied interface
+ *
+ * @interface: Interface to enable
+ */
+void __cvmx_interrupt_gmxx_enable(int interface)
+{
+ union cvmx_gmxx_inf_mode mode;
+ union cvmx_gmxx_tx_int_en gmx_tx_int_en;
+ int num_ports;
+ int index;
+
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ if (mode.s.en) {
+ switch (mode.cn52xx.mode) {
+ case 1: /* XAUI */
+ num_ports = 1;
+ break;
+ case 2: /* SGMII */
+ case 3: /* PICMG */
+ num_ports = 4;
+ break;
+ default: /* Disabled */
+ num_ports = 0;
+ break;
+ }
+ } else
+ num_ports = 0;
+ } else {
+ if (mode.s.en) {
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ /*
+ * SPI on CN38XX and CN58XX report all
+ * errors through port 0. RGMII needs
+ * to check all 4 ports
+ */
+ if (mode.s.type)
+ num_ports = 1;
+ else
+ num_ports = 4;
+ } else {
+ /*
+ * CN30XX, CN31XX, and CN50XX have two
+ * or three ports. GMII and MII has 2,
+ * RGMII has three
+ */
+ if (mode.s.type)
+ num_ports = 2;
+ else
+ num_ports = 3;
+ }
+ } else
+ num_ports = 0;
+ }
+
+ gmx_tx_int_en.u64 = 0;
+ if (num_ports) {
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX))
+ gmx_tx_int_en.cn38xx.ncb_nxa = 1;
+ gmx_tx_int_en.s.pko_nxa = 1;
+ }
+ gmx_tx_int_en.s.undflw = (1 << num_ports) - 1;
+ cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64);
+ for (index = 0; index < num_ports; index++)
+ __cvmx_interrupt_gmxx_rxx_int_en_enable(index, interface);
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
new file mode 100644
index 000000000..83df0a963
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -0,0 +1,920 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Implementation of the Level 2 Cache (L2C) control,
+ * measurement, and debugging facilities.
+ */
+
+#include <linux/compiler.h>
+#include <linux/irqflags.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-l2c.h>
+#include <asm/octeon/cvmx-spinlock.h>
+
+/*
+ * This spinlock is used internally to ensure that only one core is
+ * performing certain L2 operations at a time.
+ *
+ * NOTE: This only protects calls from within a single application -
+ * if multiple applications or operating systems are running, then it
+ * is up to the user program to coordinate between them.
+ */
+static cvmx_spinlock_t cvmx_l2c_spinlock;
+
+int cvmx_l2c_get_core_way_partition(uint32_t core)
+{
+ uint32_t field;
+
+ /* Validate the core number */
+ if (core >= cvmx_octeon_num_cores())
+ return -1;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return cvmx_read_csr(CVMX_L2C_WPAR_PPX(core)) & 0xffff;
+
+ /*
+ * Use the lower two bits of the coreNumber to determine the
+ * bit offset of the UMSK[] field in the L2C_SPAR register.
+ */
+ field = (core & 0x3) * 8;
+
+ /*
+ * Return the UMSK[] field from the appropriate L2C_SPAR
+ * register based on the coreNumber.
+ */
+
+ switch (core & 0xC) {
+ case 0x0:
+ return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >> field;
+ case 0x4:
+ return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >> field;
+ case 0x8:
+ return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >> field;
+ case 0xC:
+ return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >> field;
+ }
+ return 0;
+}
+
+int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
+{
+ uint32_t field;
+ uint32_t valid_mask;
+
+ valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
+
+ mask &= valid_mask;
+
+ /* A UMSK setting which blocks all L2C Ways is an error on some chips */
+ if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return -1;
+
+ /* Validate the core number */
+ if (core >= cvmx_octeon_num_cores())
+ return -1;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ cvmx_write_csr(CVMX_L2C_WPAR_PPX(core), mask);
+ return 0;
+ }
+
+ /*
+ * Use the lower two bits of core to determine the bit offset of the
+ * UMSK[] field in the L2C_SPAR register.
+ */
+ field = (core & 0x3) * 8;
+
+ /*
+ * Assign the new mask setting to the UMSK[] field in the appropriate
+ * L2C_SPAR register based on the core_num.
+ *
+ */
+ switch (core & 0xC) {
+ case 0x0:
+ cvmx_write_csr(CVMX_L2C_SPAR0,
+ (cvmx_read_csr(CVMX_L2C_SPAR0) & ~(0xFF << field)) |
+ mask << field);
+ break;
+ case 0x4:
+ cvmx_write_csr(CVMX_L2C_SPAR1,
+ (cvmx_read_csr(CVMX_L2C_SPAR1) & ~(0xFF << field)) |
+ mask << field);
+ break;
+ case 0x8:
+ cvmx_write_csr(CVMX_L2C_SPAR2,
+ (cvmx_read_csr(CVMX_L2C_SPAR2) & ~(0xFF << field)) |
+ mask << field);
+ break;
+ case 0xC:
+ cvmx_write_csr(CVMX_L2C_SPAR3,
+ (cvmx_read_csr(CVMX_L2C_SPAR3) & ~(0xFF << field)) |
+ mask << field);
+ break;
+ }
+ return 0;
+}
+
+int cvmx_l2c_set_hw_way_partition(uint32_t mask)
+{
+ uint32_t valid_mask;
+
+ valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
+ mask &= valid_mask;
+
+ /* A UMSK setting which blocks all L2C Ways is an error on some chips */
+ if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return -1;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ cvmx_write_csr(CVMX_L2C_WPAR_IOBX(0), mask);
+ else
+ cvmx_write_csr(CVMX_L2C_SPAR4,
+ (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
+ return 0;
+}
+
+int cvmx_l2c_get_hw_way_partition(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return cvmx_read_csr(CVMX_L2C_WPAR_IOBX(0)) & 0xffff;
+ else
+ return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
+}
+
+void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
+ uint32_t clear_on_read)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ union cvmx_l2c_pfctl pfctl;
+
+ pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
+
+ switch (counter) {
+ case 0:
+ pfctl.s.cnt0sel = event;
+ pfctl.s.cnt0ena = 1;
+ pfctl.s.cnt0rdclr = clear_on_read;
+ break;
+ case 1:
+ pfctl.s.cnt1sel = event;
+ pfctl.s.cnt1ena = 1;
+ pfctl.s.cnt1rdclr = clear_on_read;
+ break;
+ case 2:
+ pfctl.s.cnt2sel = event;
+ pfctl.s.cnt2ena = 1;
+ pfctl.s.cnt2rdclr = clear_on_read;
+ break;
+ case 3:
+ default:
+ pfctl.s.cnt3sel = event;
+ pfctl.s.cnt3ena = 1;
+ pfctl.s.cnt3rdclr = clear_on_read;
+ break;
+ }
+
+ cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
+ } else {
+ union cvmx_l2c_tadx_prf l2c_tadx_prf;
+ int tad;
+
+ cvmx_dprintf("L2C performance counter events are different for this chip, mapping 'event' to cvmx_l2c_tad_event_t\n");
+ if (clear_on_read)
+ cvmx_dprintf("L2C counters don't support clear on read for this chip\n");
+
+ l2c_tadx_prf.u64 = cvmx_read_csr(CVMX_L2C_TADX_PRF(0));
+
+ switch (counter) {
+ case 0:
+ l2c_tadx_prf.s.cnt0sel = event;
+ break;
+ case 1:
+ l2c_tadx_prf.s.cnt1sel = event;
+ break;
+ case 2:
+ l2c_tadx_prf.s.cnt2sel = event;
+ break;
+ default:
+ case 3:
+ l2c_tadx_prf.s.cnt3sel = event;
+ break;
+ }
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ cvmx_write_csr(CVMX_L2C_TADX_PRF(tad),
+ l2c_tadx_prf.u64);
+ }
+}
+
+uint64_t cvmx_l2c_read_perf(uint32_t counter)
+{
+ switch (counter) {
+ case 0:
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return cvmx_read_csr(CVMX_L2C_PFC0);
+ else {
+ uint64_t counter = 0;
+ int tad;
+
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad));
+ return counter;
+ }
+ case 1:
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return cvmx_read_csr(CVMX_L2C_PFC1);
+ else {
+ uint64_t counter = 0;
+ int tad;
+
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad));
+ return counter;
+ }
+ case 2:
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return cvmx_read_csr(CVMX_L2C_PFC2);
+ else {
+ uint64_t counter = 0;
+ int tad;
+
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad));
+ return counter;
+ }
+ case 3:
+ default:
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return cvmx_read_csr(CVMX_L2C_PFC3);
+ else {
+ uint64_t counter = 0;
+ int tad;
+
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad));
+ return counter;
+ }
+ }
+}
+
+/**
+ * @INTERNAL
+ * Helper function use to fault in cache lines for L2 cache locking
+ *
+ * @addr: Address of base of memory region to read into L2 cache
+ * @len: Length (in bytes) of region to fault in
+ */
+static void fault_in(uint64_t addr, int len)
+{
+ char *ptr;
+
+ /*
+ * Adjust addr and length so we get all cache lines even for
+ * small ranges spanning two cache lines.
+ */
+ len += addr & CVMX_CACHE_LINE_MASK;
+ addr &= ~CVMX_CACHE_LINE_MASK;
+ ptr = cvmx_phys_to_ptr(addr);
+ /*
+ * Invalidate L1 cache to make sure all loads result in data
+ * being in L2.
+ */
+ CVMX_DCACHE_INVALIDATE;
+ while (len > 0) {
+ READ_ONCE(*ptr);
+ len -= CVMX_CACHE_LINE_SIZE;
+ ptr += CVMX_CACHE_LINE_SIZE;
+ }
+}
+
+int cvmx_l2c_lock_line(uint64_t addr)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ int shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
+ uint64_t assoc = cvmx_l2c_get_num_assoc();
+ uint64_t tag = addr >> shift;
+ uint64_t index = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, cvmx_l2c_address_to_index(addr) << CVMX_L2C_IDX_ADDR_SHIFT);
+ uint64_t way;
+ union cvmx_l2c_tadx_tag l2c_tadx_tag;
+
+ CVMX_CACHE_LCKL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, addr), 0);
+
+ /* Make sure we were able to lock the line */
+ for (way = 0; way < assoc; way++) {
+ CVMX_CACHE_LTGL2I(index | (way << shift), 0);
+ /* make sure CVMX_L2C_TADX_TAG is updated */
+ CVMX_SYNC;
+ l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
+ if (l2c_tadx_tag.s.valid && l2c_tadx_tag.s.tag == tag)
+ break;
+ }
+
+ /* Check if a valid line is found */
+ if (way >= assoc) {
+ /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: line not found for locking at 0x%llx address\n", (unsigned long long)addr); */
+ return -1;
+ }
+
+ /* Check if lock bit is not set */
+ if (!l2c_tadx_tag.s.lock) {
+ /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: Not able to lock at 0x%llx address\n", (unsigned long long)addr); */
+ return -1;
+ }
+ return way;
+ } else {
+ int retval = 0;
+ union cvmx_l2c_dbg l2cdbg;
+ union cvmx_l2c_lckbase lckbase;
+ union cvmx_l2c_lckoff lckoff;
+ union cvmx_l2t_err l2t_err;
+
+ cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+
+ l2cdbg.u64 = 0;
+ lckbase.u64 = 0;
+ lckoff.u64 = 0;
+
+ /* Clear l2t error bits if set */
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ l2t_err.s.lckerr = 1;
+ l2t_err.s.lckerr2 = 1;
+ cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
+
+ addr &= ~CVMX_CACHE_LINE_MASK;
+
+ /* Set this core as debug core */
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
+ cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
+ cvmx_read_csr(CVMX_L2C_LCKOFF);
+
+ if (((union cvmx_l2c_cfg)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
+ int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
+ uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> CVMX_L2_SET_BITS;
+
+ lckbase.s.lck_base = addr_tmp >> 7;
+
+ } else {
+ lckbase.s.lck_base = addr >> 7;
+ }
+
+ lckbase.s.lck_ena = 1;
+ cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+ /* Make sure it gets there */
+ cvmx_read_csr(CVMX_L2C_LCKBASE);
+
+ fault_in(addr, CVMX_CACHE_LINE_SIZE);
+
+ lckbase.s.lck_ena = 0;
+ cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+ /* Make sure it gets there */
+ cvmx_read_csr(CVMX_L2C_LCKBASE);
+
+ /* Stop being debug core */
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
+ retval = 1; /* We were unable to lock the line */
+
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ return retval;
+ }
+}
+
+int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
+{
+ int retval = 0;
+
+ /* Round start/end to cache line boundaries */
+ len += start & CVMX_CACHE_LINE_MASK;
+ start &= ~CVMX_CACHE_LINE_MASK;
+ len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
+
+ while (len) {
+ retval += cvmx_l2c_lock_line(start);
+ start += CVMX_CACHE_LINE_SIZE;
+ len -= CVMX_CACHE_LINE_SIZE;
+ }
+ return retval;
+}
+
+void cvmx_l2c_flush(void)
+{
+ uint64_t assoc, set;
+ uint64_t n_assoc, n_set;
+
+ n_set = cvmx_l2c_get_num_sets();
+ n_assoc = cvmx_l2c_get_num_assoc();
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ uint64_t address;
+ /* These may look like constants, but they aren't... */
+ int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
+ int set_shift = CVMX_L2C_IDX_ADDR_SHIFT;
+
+ for (set = 0; set < n_set; set++) {
+ for (assoc = 0; assoc < n_assoc; assoc++) {
+ address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ (assoc << assoc_shift) | (set << set_shift));
+ CVMX_CACHE_WBIL2I(address, 0);
+ }
+ }
+ } else {
+ for (set = 0; set < n_set; set++)
+ for (assoc = 0; assoc < n_assoc; assoc++)
+ cvmx_l2c_flush_line(assoc, set);
+ }
+}
+
+
+int cvmx_l2c_unlock_line(uint64_t address)
+{
+
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ int assoc;
+ union cvmx_l2c_tag tag;
+ uint32_t tag_addr;
+ uint32_t index = cvmx_l2c_address_to_index(address);
+
+ tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
+
+ /*
+ * For 63XX, we can flush a line by using the physical
+ * address directly, so finding the cache line used by
+ * the address is only required to provide the proper
+ * return value for the function.
+ */
+ for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
+ tag = cvmx_l2c_get_tag(assoc, index);
+
+ if (tag.s.V && (tag.s.addr == tag_addr)) {
+ CVMX_CACHE_WBIL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, address), 0);
+ return tag.s.L;
+ }
+ }
+ } else {
+ int assoc;
+ union cvmx_l2c_tag tag;
+ uint32_t tag_addr;
+
+ uint32_t index = cvmx_l2c_address_to_index(address);
+
+ /* Compute portion of address that is stored in tag */
+ tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
+ for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
+ tag = cvmx_l2c_get_tag(assoc, index);
+
+ if (tag.s.V && (tag.s.addr == tag_addr)) {
+ cvmx_l2c_flush_line(assoc, index);
+ return tag.s.L;
+ }
+ }
+ }
+ return 0;
+}
+
+int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
+{
+ int num_unlocked = 0;
+ /* Round start/end to cache line boundaries */
+ len += start & CVMX_CACHE_LINE_MASK;
+ start &= ~CVMX_CACHE_LINE_MASK;
+ len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
+ while (len > 0) {
+ num_unlocked += cvmx_l2c_unlock_line(start);
+ start += CVMX_CACHE_LINE_SIZE;
+ len -= CVMX_CACHE_LINE_SIZE;
+ }
+
+ return num_unlocked;
+}
+
+/*
+ * Internal l2c tag types. These are converted to a generic structure
+ * that can be used on all chips.
+ */
+union __cvmx_l2c_tag {
+ uint64_t u64;
+ struct cvmx_l2c_tag_cn50xx {
+ __BITFIELD_FIELD(uint64_t reserved:40,
+ __BITFIELD_FIELD(uint64_t V:1, /* Line valid */
+ __BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
+ __BITFIELD_FIELD(uint64_t L:1, /* Line locked */
+ __BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
+ __BITFIELD_FIELD(uint64_t addr:20, /* Phys addr (33..14) */
+ ;))))))
+ } cn50xx;
+ struct cvmx_l2c_tag_cn30xx {
+ __BITFIELD_FIELD(uint64_t reserved:41,
+ __BITFIELD_FIELD(uint64_t V:1, /* Line valid */
+ __BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
+ __BITFIELD_FIELD(uint64_t L:1, /* Line locked */
+ __BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
+ __BITFIELD_FIELD(uint64_t addr:19, /* Phys addr (33..15) */
+ ;))))))
+ } cn30xx;
+ struct cvmx_l2c_tag_cn31xx {
+ __BITFIELD_FIELD(uint64_t reserved:42,
+ __BITFIELD_FIELD(uint64_t V:1, /* Line valid */
+ __BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
+ __BITFIELD_FIELD(uint64_t L:1, /* Line locked */
+ __BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
+ __BITFIELD_FIELD(uint64_t addr:18, /* Phys addr (33..16) */
+ ;))))))
+ } cn31xx;
+ struct cvmx_l2c_tag_cn38xx {
+ __BITFIELD_FIELD(uint64_t reserved:43,
+ __BITFIELD_FIELD(uint64_t V:1, /* Line valid */
+ __BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
+ __BITFIELD_FIELD(uint64_t L:1, /* Line locked */
+ __BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
+ __BITFIELD_FIELD(uint64_t addr:17, /* Phys addr (33..17) */
+ ;))))))
+ } cn38xx;
+ struct cvmx_l2c_tag_cn58xx {
+ __BITFIELD_FIELD(uint64_t reserved:44,
+ __BITFIELD_FIELD(uint64_t V:1, /* Line valid */
+ __BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
+ __BITFIELD_FIELD(uint64_t L:1, /* Line locked */
+ __BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
+ __BITFIELD_FIELD(uint64_t addr:16, /* Phys addr (33..18) */
+ ;))))))
+ } cn58xx;
+ struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
+ struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
+};
+
+
+/**
+ * @INTERNAL
+ * Function to read a L2C tag. This code make the current core
+ * the 'debug core' for the L2. This code must only be executed by
+ * 1 core at a time.
+ *
+ * @assoc: Association (way) of the tag to dump
+ * @index: Index of the cacheline
+ *
+ * Returns The Octeon model specific tag structure. This is
+ * translated by a wrapper function to a generic form that is
+ * easier for applications to use.
+ */
+static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
+{
+
+ uint64_t debug_tag_addr = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (index << 7) + 96);
+ uint64_t core = cvmx_get_core_num();
+ union __cvmx_l2c_tag tag_val;
+ uint64_t dbg_addr = CVMX_L2C_DBG;
+ unsigned long flags;
+ union cvmx_l2c_dbg debug_val;
+
+ debug_val.u64 = 0;
+ /*
+ * For low core count parts, the core number is always small
+ * enough to stay in the correct field and not set any
+ * reserved bits.
+ */
+ debug_val.s.ppnum = core;
+ debug_val.s.l2t = 1;
+ debug_val.s.set = assoc;
+
+ local_irq_save(flags);
+ /*
+ * Make sure core is quiet (no prefetches, etc.) before
+ * entering debug mode.
+ */
+ CVMX_SYNC;
+ /* Flush L1 to make sure debug load misses L1 */
+ CVMX_DCACHE_INVALIDATE;
+
+ /*
+ * The following must be done in assembly as when in debug
+ * mode all data loads from L2 return special debug data, not
+ * normal memory contents. Also, interrupts must be disabled,
+ * since if an interrupt occurs while in debug mode the ISR
+ * will get debug data from all its memory * reads instead of
+ * the contents of memory.
+ */
+
+ asm volatile (
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noreorder\n\t"
+ "sd %[dbg_val], 0(%[dbg_addr])\n\t" /* Enter debug mode, wait for store */
+ "ld $0, 0(%[dbg_addr])\n\t"
+ "ld %[tag_val], 0(%[tag_addr])\n\t" /* Read L2C tag data */
+ "sd $0, 0(%[dbg_addr])\n\t" /* Exit debug mode, wait for store */
+ "ld $0, 0(%[dbg_addr])\n\t"
+ "cache 9, 0($0)\n\t" /* Invalidate dcache to discard debug data */
+ ".set pop"
+ : [tag_val] "=r" (tag_val)
+ : [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr)
+ : "memory");
+
+ local_irq_restore(flags);
+
+ return tag_val;
+}
+
+
+union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
+{
+ union cvmx_l2c_tag tag;
+
+ tag.u64 = 0;
+ if ((int)association >= cvmx_l2c_get_num_assoc()) {
+ cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
+ return tag;
+ }
+ if ((int)index >= cvmx_l2c_get_num_sets()) {
+ cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n",
+ (int)index, cvmx_l2c_get_num_sets());
+ return tag;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ union cvmx_l2c_tadx_tag l2c_tadx_tag;
+ uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ (association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
+ (index << CVMX_L2C_IDX_ADDR_SHIFT));
+ /*
+ * Use L2 cache Index load tag cache instruction, as
+ * hardware loads the virtual tag for the L2 cache
+ * block with the contents of L2C_TAD0_TAG
+ * register.
+ */
+ CVMX_CACHE_LTGL2I(address, 0);
+ CVMX_SYNC; /* make sure CVMX_L2C_TADX_TAG is updated */
+ l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
+
+ tag.s.V = l2c_tadx_tag.s.valid;
+ tag.s.D = l2c_tadx_tag.s.dirty;
+ tag.s.L = l2c_tadx_tag.s.lock;
+ tag.s.U = l2c_tadx_tag.s.use;
+ tag.s.addr = l2c_tadx_tag.s.tag;
+ } else {
+ union __cvmx_l2c_tag tmp_tag;
+ /* __read_l2_tag is intended for internal use only */
+ tmp_tag = __read_l2_tag(association, index);
+
+ /*
+ * Convert all tag structure types to generic version,
+ * as it can represent all models.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ tag.s.V = tmp_tag.cn58xx.V;
+ tag.s.D = tmp_tag.cn58xx.D;
+ tag.s.L = tmp_tag.cn58xx.L;
+ tag.s.U = tmp_tag.cn58xx.U;
+ tag.s.addr = tmp_tag.cn58xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+ tag.s.V = tmp_tag.cn38xx.V;
+ tag.s.D = tmp_tag.cn38xx.D;
+ tag.s.L = tmp_tag.cn38xx.L;
+ tag.s.U = tmp_tag.cn38xx.U;
+ tag.s.addr = tmp_tag.cn38xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ tag.s.V = tmp_tag.cn31xx.V;
+ tag.s.D = tmp_tag.cn31xx.D;
+ tag.s.L = tmp_tag.cn31xx.L;
+ tag.s.U = tmp_tag.cn31xx.U;
+ tag.s.addr = tmp_tag.cn31xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
+ tag.s.V = tmp_tag.cn30xx.V;
+ tag.s.D = tmp_tag.cn30xx.D;
+ tag.s.L = tmp_tag.cn30xx.L;
+ tag.s.U = tmp_tag.cn30xx.U;
+ tag.s.addr = tmp_tag.cn30xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ tag.s.V = tmp_tag.cn50xx.V;
+ tag.s.D = tmp_tag.cn50xx.D;
+ tag.s.L = tmp_tag.cn50xx.L;
+ tag.s.U = tmp_tag.cn50xx.U;
+ tag.s.addr = tmp_tag.cn50xx.addr;
+ } else {
+ cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+ }
+ }
+ return tag;
+}
+
+uint32_t cvmx_l2c_address_to_index(uint64_t addr)
+{
+ uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
+ int indxalias = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ union cvmx_l2c_ctl l2c_ctl;
+
+ l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
+ indxalias = !l2c_ctl.s.disidxalias;
+ } else {
+ union cvmx_l2c_cfg l2c_cfg;
+
+ l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
+ indxalias = l2c_cfg.s.idxalias;
+ }
+
+ if (indxalias) {
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
+
+ idx ^= idx / cvmx_l2c_get_num_sets();
+ idx ^= a_14_12;
+ } else {
+ idx ^= ((addr & CVMX_L2C_ALIAS_MASK) >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
+ }
+ }
+ idx &= CVMX_L2C_IDX_MASK;
+ return idx;
+}
+
+int cvmx_l2c_get_cache_size_bytes(void)
+{
+ return cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() *
+ CVMX_CACHE_LINE_SIZE;
+}
+
+/**
+ * Return log base 2 of the number of sets in the L2 cache
+ * Returns
+ */
+int cvmx_l2c_get_set_bits(void)
+{
+ int l2_set_bits;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
+ l2_set_bits = 11; /* 2048 sets */
+ else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
+ l2_set_bits = 10; /* 1024 sets */
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
+ l2_set_bits = 9; /* 512 sets */
+ else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
+ l2_set_bits = 8; /* 256 sets */
+ else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
+ l2_set_bits = 7; /* 128 sets */
+ else {
+ cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+ l2_set_bits = 11; /* 2048 sets */
+ }
+ return l2_set_bits;
+}
+
+/* Return the number of sets in the L2 Cache */
+int cvmx_l2c_get_num_sets(void)
+{
+ return 1 << cvmx_l2c_get_set_bits();
+}
+
+/* Return the number of associations in the L2 Cache */
+int cvmx_l2c_get_num_assoc(void)
+{
+ int l2_assoc;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN52XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN58XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN50XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN38XX))
+ l2_assoc = 8;
+ else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ l2_assoc = 16;
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN30XX))
+ l2_assoc = 4;
+ else {
+ cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+ l2_assoc = 8;
+ }
+
+ /* Check to see if part of the cache is disabled */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ union cvmx_mio_fus_dat3 mio_fus_dat3;
+
+ mio_fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
+ /*
+ * cvmx_mio_fus_dat3.s.l2c_crip fuses map as follows
+ * <2> will be not used for 63xx
+ * <1> disables 1/2 ways
+ * <0> disables 1/4 ways
+ * They are cumulative, so for 63xx:
+ * <1> <0>
+ * 0 0 16-way 2MB cache
+ * 0 1 12-way 1.5MB cache
+ * 1 0 8-way 1MB cache
+ * 1 1 4-way 512KB cache
+ */
+
+ if (mio_fus_dat3.s.l2c_crip == 3)
+ l2_assoc = 4;
+ else if (mio_fus_dat3.s.l2c_crip == 2)
+ l2_assoc = 8;
+ else if (mio_fus_dat3.s.l2c_crip == 1)
+ l2_assoc = 12;
+ } else {
+ uint64_t l2d_fus3;
+
+ l2d_fus3 = cvmx_read_csr(CVMX_L2D_FUS3);
+ /*
+ * Using shifts here, as bit position names are
+ * different for each model but they all mean the
+ * same.
+ */
+ if ((l2d_fus3 >> 35) & 0x1)
+ l2_assoc = l2_assoc >> 2;
+ else if ((l2d_fus3 >> 34) & 0x1)
+ l2_assoc = l2_assoc >> 1;
+ }
+ return l2_assoc;
+}
+
+/**
+ * Flush a line from the L2 cache
+ * This should only be called from one core at a time, as this routine
+ * sets the core to the 'debug' core in order to flush the line.
+ *
+ * @assoc: Association (or way) to flush
+ * @index: Index to flush
+ */
+void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
+{
+ /* Check the range of the index. */
+ if (index > (uint32_t)cvmx_l2c_get_num_sets()) {
+ cvmx_dprintf("ERROR: cvmx_l2c_flush_line index out of range.\n");
+ return;
+ }
+
+ /* Check the range of association. */
+ if (assoc > (uint32_t)cvmx_l2c_get_num_assoc()) {
+ cvmx_dprintf("ERROR: cvmx_l2c_flush_line association out of range.\n");
+ return;
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ uint64_t address;
+ /* Create the address based on index and association.
+ * Bits<20:17> select the way of the cache block involved in
+ * the operation
+ * Bits<16:7> of the effect address select the index
+ */
+ address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ (assoc << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
+ (index << CVMX_L2C_IDX_ADDR_SHIFT));
+ CVMX_CACHE_WBIL2I(address, 0);
+ } else {
+ union cvmx_l2c_dbg l2cdbg;
+
+ l2cdbg.u64 = 0;
+ if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ l2cdbg.s.finv = 1;
+
+ l2cdbg.s.set = assoc;
+ cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+ /*
+ * Enter debug mode, and make sure all other writes
+ * complete before we enter debug mode
+ */
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ index * CVMX_CACHE_LINE_SIZE),
+ 0);
+ /* Exit debug mode */
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ }
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-pko.c b/arch/mips/cavium-octeon/executive/cvmx-pko.c
new file mode 100644
index 000000000..b0efc35e9
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-pko.c
@@ -0,0 +1,646 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Support library for the hardware Packet Output unit.
+ */
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-helper.h>
+
+/**
+ * Internal state of packet output
+ */
+
+static int __cvmx_pko_int(int interface, int index)
+{
+ switch (interface) {
+ case 0:
+ return index;
+ case 1:
+ return 4;
+ case 2:
+ return index + 0x08;
+ case 3:
+ return index + 0x0c;
+ case 4:
+ return index + 0x10;
+ case 5:
+ return 0x1c;
+ case 6:
+ return 0x1d;
+ case 7:
+ return 0x1e;
+ case 8:
+ return 0x1f;
+ default:
+ return -1;
+ }
+}
+
+static void __cvmx_pko_iport_config(int pko_port)
+{
+ int queue;
+ const int num_queues = 1;
+ const int base_queue = pko_port;
+ const int static_priority_end = 1;
+ const int static_priority_base = 1;
+
+ for (queue = 0; queue < num_queues; queue++) {
+ union cvmx_pko_mem_iqueue_ptrs config;
+ cvmx_cmd_queue_result_t cmd_res;
+ uint64_t *buf_ptr;
+
+ config.u64 = 0;
+ config.s.index = queue;
+ config.s.qid = base_queue + queue;
+ config.s.ipid = pko_port;
+ config.s.tail = (queue == (num_queues - 1));
+ config.s.s_tail = (queue == static_priority_end);
+ config.s.static_p = (static_priority_base >= 0);
+ config.s.static_q = (queue <= static_priority_end);
+ config.s.qos_mask = 0xff;
+
+ cmd_res = cvmx_cmd_queue_initialize(
+ CVMX_CMD_QUEUE_PKO(base_queue + queue),
+ CVMX_PKO_MAX_QUEUE_DEPTH,
+ CVMX_FPA_OUTPUT_BUFFER_POOL,
+ (CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE -
+ CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8));
+
+ WARN(cmd_res,
+ "%s: cmd_res=%d pko_port=%d base_queue=%d num_queues=%d queue=%d\n",
+ __func__, (int)cmd_res, pko_port, base_queue,
+ num_queues, queue);
+
+ buf_ptr = (uint64_t *)cvmx_cmd_queue_buffer(
+ CVMX_CMD_QUEUE_PKO(base_queue + queue));
+ config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr) >> 7;
+ CVMX_SYNCWS;
+ cvmx_write_csr(CVMX_PKO_MEM_IQUEUE_PTRS, config.u64);
+ }
+}
+
+static void __cvmx_pko_queue_alloc_o68(void)
+{
+ int port;
+
+ for (port = 0; port < 48; port++)
+ __cvmx_pko_iport_config(port);
+}
+
+static void __cvmx_pko_port_map_o68(void)
+{
+ int port;
+ int interface, index;
+ cvmx_helper_interface_mode_t mode;
+ union cvmx_pko_mem_iport_ptrs config;
+
+ /*
+ * Initialize every iport with the invalid eid.
+ */
+ config.u64 = 0;
+ config.s.eid = 31; /* Invalid */
+ for (port = 0; port < 128; port++) {
+ config.s.ipid = port;
+ cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
+ }
+
+ /*
+ * Set up PKO_MEM_IPORT_PTRS
+ */
+ for (port = 0; port < 48; port++) {
+ interface = cvmx_helper_get_interface_num(port);
+ index = cvmx_helper_get_interface_index_num(port);
+ mode = cvmx_helper_interface_get_mode(interface);
+ if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED)
+ continue;
+
+ config.s.ipid = port;
+ config.s.qos_mask = 0xff;
+ config.s.crc = 1;
+ config.s.min_pkt = 1;
+ config.s.intr = __cvmx_pko_int(interface, index);
+ config.s.eid = config.s.intr;
+ config.s.pipe = (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) ?
+ index : port;
+ cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
+ }
+}
+
+static void __cvmx_pko_chip_init(void)
+{
+ int i;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ __cvmx_pko_port_map_o68();
+ __cvmx_pko_queue_alloc_o68();
+ return;
+ }
+
+ /*
+ * Initialize queues
+ */
+ for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++) {
+ const uint64_t priority = 8;
+
+ cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
+ &priority);
+ }
+}
+
+/**
+ * Call before any other calls to initialize the packet
+ * output system. This does chip global config, and should only be
+ * done by one core.
+ */
+
+void cvmx_pko_initialize_global(void)
+{
+ union cvmx_pko_reg_cmd_buf config;
+
+ /*
+ * Set the size of the PKO command buffers to an odd number of
+ * 64bit words. This allows the normal two word send to stay
+ * aligned and never span a command word buffer.
+ */
+ config.u64 = 0;
+ config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
+ config.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE / 8 - 1;
+
+ cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64);
+
+ /*
+ * Chip-specific setup.
+ */
+ __cvmx_pko_chip_init();
+
+ /*
+ * If we aren't using all of the queues optimize PKO's
+ * internal memory.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)
+ || OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ int num_interfaces = cvmx_helper_get_number_of_interfaces();
+ int last_port =
+ cvmx_helper_get_last_ipd_port(num_interfaces - 1);
+ int max_queues =
+ cvmx_pko_get_base_queue(last_port) +
+ cvmx_pko_get_num_queues(last_port);
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+ if (max_queues <= 32)
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
+ else if (max_queues <= 64)
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
+ } else {
+ if (max_queues <= 64)
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
+ else if (max_queues <= 128)
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
+ }
+ }
+}
+
+/**
+ * This function does per-core initialization required by the PKO routines.
+ * This must be called on all cores that will do packet output, and must
+ * be called after the FPA has been initialized and filled with pages.
+ *
+ * Returns 0 on success
+ * !0 on failure
+ */
+int cvmx_pko_initialize_local(void)
+{
+ /* Nothing to do */
+ return 0;
+}
+
+/**
+ * Enables the packet output hardware. It must already be
+ * configured.
+ */
+void cvmx_pko_enable(void)
+{
+ union cvmx_pko_reg_flags flags;
+
+ flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
+ if (flags.s.ena_pko)
+ cvmx_dprintf
+ ("Warning: Enabling PKO when PKO already enabled.\n");
+
+ flags.s.ena_dwb = 1;
+ flags.s.ena_pko = 1;
+ /*
+ * always enable big endian for 3-word command. Does nothing
+ * for 2-word.
+ */
+ flags.s.store_be = 1;
+ cvmx_write_csr(CVMX_PKO_REG_FLAGS, flags.u64);
+}
+
+/**
+ * Disables the packet output. Does not affect any configuration.
+ */
+void cvmx_pko_disable(void)
+{
+ union cvmx_pko_reg_flags pko_reg_flags;
+ pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
+ pko_reg_flags.s.ena_pko = 0;
+ cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
+}
+EXPORT_SYMBOL_GPL(cvmx_pko_disable);
+
+/**
+ * Reset the packet output.
+ */
+static void __cvmx_pko_reset(void)
+{
+ union cvmx_pko_reg_flags pko_reg_flags;
+ pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
+ pko_reg_flags.s.reset = 1;
+ cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
+}
+
+/**
+ * Shutdown and free resources required by packet output.
+ */
+void cvmx_pko_shutdown(void)
+{
+ union cvmx_pko_mem_queue_ptrs config;
+ int queue;
+
+ cvmx_pko_disable();
+
+ for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) {
+ config.u64 = 0;
+ config.s.tail = 1;
+ config.s.index = 0;
+ config.s.port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID;
+ config.s.queue = queue & 0x7f;
+ config.s.qos_mask = 0;
+ config.s.buf_ptr = 0;
+ if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ union cvmx_pko_reg_queue_ptrs1 config1;
+ config1.u64 = 0;
+ config1.s.qid7 = queue >> 7;
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
+ }
+ cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
+ cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue));
+ }
+ __cvmx_pko_reset();
+}
+EXPORT_SYMBOL_GPL(cvmx_pko_shutdown);
+
+/**
+ * Configure a output port and the associated queues for use.
+ *
+ * @port: Port to configure.
+ * @base_queue: First queue number to associate with this port.
+ * @num_queues: Number of queues to associate with this port
+ * @priority: Array of priority levels for each queue. Values are
+ * allowed to be 0-8. A value of 8 get 8 times the traffic
+ * of a value of 1. A value of 0 indicates that no rounds
+ * will be participated in. These priorities can be changed
+ * on the fly while the pko is enabled. A priority of 9
+ * indicates that static priority should be used. If static
+ * priority is used all queues with static priority must be
+ * contiguous starting at the base_queue, and lower numbered
+ * queues have higher priority than higher numbered queues.
+ * There must be num_queues elements in the array.
+ */
+cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
+ uint64_t num_queues,
+ const uint64_t priority[])
+{
+ cvmx_pko_status_t result_code;
+ uint64_t queue;
+ union cvmx_pko_mem_queue_ptrs config;
+ union cvmx_pko_reg_queue_ptrs1 config1;
+ int static_priority_base = -1;
+ int static_priority_end = -1;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return CVMX_PKO_SUCCESS;
+
+ if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS)
+ && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) {
+ cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n",
+ (unsigned long long)port);
+ return CVMX_PKO_INVALID_PORT;
+ }
+
+ if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) {
+ cvmx_dprintf
+ ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n",
+ (unsigned long long)(base_queue + num_queues));
+ return CVMX_PKO_INVALID_QUEUE;
+ }
+
+ if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
+ /*
+ * Validate the static queue priority setup and set
+ * static_priority_base and static_priority_end
+ * accordingly.
+ */
+ for (queue = 0; queue < num_queues; queue++) {
+ /* Find first queue of static priority */
+ if (static_priority_base == -1
+ && priority[queue] ==
+ CVMX_PKO_QUEUE_STATIC_PRIORITY)
+ static_priority_base = queue;
+ /* Find last queue of static priority */
+ if (static_priority_base != -1
+ && static_priority_end == -1
+ && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY
+ && queue)
+ static_priority_end = queue - 1;
+ else if (static_priority_base != -1
+ && static_priority_end == -1
+ && queue == num_queues - 1)
+ /* all queues are static priority */
+ static_priority_end = queue;
+ /*
+ * Check to make sure all static priority
+ * queues are contiguous. Also catches some
+ * cases of static priorites not starting at
+ * queue 0.
+ */
+ if (static_priority_end != -1
+ && (int)queue > static_priority_end
+ && priority[queue] ==
+ CVMX_PKO_QUEUE_STATIC_PRIORITY) {
+ cvmx_dprintf("ERROR: cvmx_pko_config_port: "
+ "Static priority queues aren't "
+ "contiguous or don't start at "
+ "base queue. q: %d, eq: %d\n",
+ (int)queue, static_priority_end);
+ return CVMX_PKO_INVALID_PRIORITY;
+ }
+ }
+ if (static_priority_base > 0) {
+ cvmx_dprintf("ERROR: cvmx_pko_config_port: Static "
+ "priority queues don't start at base "
+ "queue. sq: %d\n",
+ static_priority_base);
+ return CVMX_PKO_INVALID_PRIORITY;
+ }
+#if 0
+ cvmx_dprintf("Port %d: Static priority queue base: %d, "
+ "end: %d\n", port,
+ static_priority_base, static_priority_end);
+#endif
+ }
+ /*
+ * At this point, static_priority_base and static_priority_end
+ * are either both -1, or are valid start/end queue
+ * numbers.
+ */
+
+ result_code = CVMX_PKO_SUCCESS;
+
+#ifdef PKO_DEBUG
+ cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues,
+ CVMX_PKO_QUEUES_PER_PORT_INTERFACE0,
+ CVMX_PKO_QUEUES_PER_PORT_INTERFACE1);
+#endif
+
+ for (queue = 0; queue < num_queues; queue++) {
+ uint64_t *buf_ptr = NULL;
+
+ config1.u64 = 0;
+ config1.s.idx3 = queue >> 3;
+ config1.s.qid7 = (base_queue + queue) >> 7;
+
+ config.u64 = 0;
+ config.s.tail = queue == (num_queues - 1);
+ config.s.index = queue;
+ config.s.port = port;
+ config.s.queue = base_queue + queue;
+
+ if (!cvmx_octeon_is_pass1()) {
+ config.s.static_p = static_priority_base >= 0;
+ config.s.static_q = (int)queue <= static_priority_end;
+ config.s.s_tail = (int)queue == static_priority_end;
+ }
+ /*
+ * Convert the priority into an enable bit field. Try
+ * to space the bits out evenly so the packet don't
+ * get grouped up
+ */
+ switch ((int)priority[queue]) {
+ case 0:
+ config.s.qos_mask = 0x00;
+ break;
+ case 1:
+ config.s.qos_mask = 0x01;
+ break;
+ case 2:
+ config.s.qos_mask = 0x11;
+ break;
+ case 3:
+ config.s.qos_mask = 0x49;
+ break;
+ case 4:
+ config.s.qos_mask = 0x55;
+ break;
+ case 5:
+ config.s.qos_mask = 0x57;
+ break;
+ case 6:
+ config.s.qos_mask = 0x77;
+ break;
+ case 7:
+ config.s.qos_mask = 0x7f;
+ break;
+ case 8:
+ config.s.qos_mask = 0xff;
+ break;
+ case CVMX_PKO_QUEUE_STATIC_PRIORITY:
+ if (!cvmx_octeon_is_pass1()) {
+ config.s.qos_mask = 0xff;
+ break;
+ }
+ fallthrough; /* to the error case, when Pass 1 */
+ default:
+ cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid "
+ "priority %llu\n",
+ (unsigned long long)priority[queue]);
+ config.s.qos_mask = 0xff;
+ result_code = CVMX_PKO_INVALID_PRIORITY;
+ break;
+ }
+
+ if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
+ cvmx_cmd_queue_result_t cmd_res =
+ cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO
+ (base_queue + queue),
+ CVMX_PKO_MAX_QUEUE_DEPTH,
+ CVMX_FPA_OUTPUT_BUFFER_POOL,
+ CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
+ -
+ CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST
+ * 8);
+ if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
+ switch (cmd_res) {
+ case CVMX_CMD_QUEUE_NO_MEMORY:
+ cvmx_dprintf("ERROR: "
+ "cvmx_pko_config_port: "
+ "Unable to allocate "
+ "output buffer.\n");
+ return CVMX_PKO_NO_MEMORY;
+ case CVMX_CMD_QUEUE_ALREADY_SETUP:
+ cvmx_dprintf
+ ("ERROR: cvmx_pko_config_port: Port already setup.\n");
+ return CVMX_PKO_PORT_ALREADY_SETUP;
+ case CVMX_CMD_QUEUE_INVALID_PARAM:
+ default:
+ cvmx_dprintf
+ ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n");
+ return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
+ }
+ }
+
+ buf_ptr =
+ (uint64_t *)
+ cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO
+ (base_queue + queue));
+ config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr);
+ } else
+ config.s.buf_ptr = 0;
+
+ CVMX_SYNCWS;
+
+ if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
+ cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
+ }
+
+ return result_code;
+}
+
+#ifdef PKO_DEBUG
+/**
+ * Show map of ports -> queues for different cores.
+ */
+void cvmx_pko_show_queue_map()
+{
+ int core, port;
+ int pko_output_ports = 36;
+
+ cvmx_dprintf("port");
+ for (port = 0; port < pko_output_ports; port++)
+ cvmx_dprintf("%3d ", port);
+ cvmx_dprintf("\n");
+
+ for (core = 0; core < CVMX_MAX_CORES; core++) {
+ cvmx_dprintf("\n%2d: ", core);
+ for (port = 0; port < pko_output_ports; port++) {
+ cvmx_dprintf("%3d ",
+ cvmx_pko_get_base_queue_per_core(port,
+ core));
+ }
+ }
+ cvmx_dprintf("\n");
+}
+#endif
+
+/**
+ * Rate limit a PKO port to a max packets/sec. This function is only
+ * supported on CN51XX and higher, excluding CN58XX.
+ *
+ * @port: Port to rate limit
+ * @packets_s: Maximum packet/sec
+ * @burst: Maximum number of packets to burst in a row before rate
+ * limiting cuts in.
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst)
+{
+ union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
+ union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
+
+ pko_mem_port_rate0.u64 = 0;
+ pko_mem_port_rate0.s.pid = port;
+ pko_mem_port_rate0.s.rate_pkt =
+ cvmx_sysinfo_get()->cpu_clock_hz / packets_s / 16;
+ /* No cost per word since we are limited by packets/sec, not bits/sec */
+ pko_mem_port_rate0.s.rate_word = 0;
+
+ pko_mem_port_rate1.u64 = 0;
+ pko_mem_port_rate1.s.pid = port;
+ pko_mem_port_rate1.s.rate_lim =
+ ((uint64_t) pko_mem_port_rate0.s.rate_pkt * burst) >> 8;
+
+ cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
+ cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
+ return 0;
+}
+
+/**
+ * Rate limit a PKO port to a max bits/sec. This function is only
+ * supported on CN51XX and higher, excluding CN58XX.
+ *
+ * @port: Port to rate limit
+ * @bits_s: PKO rate limit in bits/sec
+ * @burst: Maximum number of bits to burst before rate
+ * limiting cuts in.
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst)
+{
+ union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
+ union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
+ uint64_t clock_rate = cvmx_sysinfo_get()->cpu_clock_hz;
+ uint64_t tokens_per_bit = clock_rate * 16 / bits_s;
+
+ pko_mem_port_rate0.u64 = 0;
+ pko_mem_port_rate0.s.pid = port;
+ /*
+ * Each packet has a 12 bytes of interframe gap, an 8 byte
+ * preamble, and a 4 byte CRC. These are not included in the
+ * per word count. Multiply by 8 to covert to bits and divide
+ * by 256 for limit granularity.
+ */
+ pko_mem_port_rate0.s.rate_pkt = (12 + 8 + 4) * 8 * tokens_per_bit / 256;
+ /* Each 8 byte word has 64bits */
+ pko_mem_port_rate0.s.rate_word = 64 * tokens_per_bit;
+
+ pko_mem_port_rate1.u64 = 0;
+ pko_mem_port_rate1.s.pid = port;
+ pko_mem_port_rate1.s.rate_lim = tokens_per_bit * burst / 256;
+
+ cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
+ cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
+ return 0;
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-spi.c b/arch/mips/cavium-octeon/executive/cvmx-spi.c
new file mode 100644
index 000000000..f51957a3e
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-spi.c
@@ -0,0 +1,668 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Support library for the SPI
+ */
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-config.h>
+
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-spi.h>
+
+#include <asm/octeon/cvmx-spxx-defs.h>
+#include <asm/octeon/cvmx-stxx-defs.h>
+#include <asm/octeon/cvmx-srxx-defs.h>
+
+#define INVOKE_CB(function_p, args...) \
+ do { \
+ if (function_p) { \
+ res = function_p(args); \
+ if (res) \
+ return res; \
+ } \
+ } while (0)
+
+#if CVMX_ENABLE_DEBUG_PRINTS
+static const char *modes[] =
+ { "UNKNOWN", "TX Halfplex", "Rx Halfplex", "Duplex" };
+#endif
+
+/* Default callbacks, can be overridden
+ * using cvmx_spi_get_callbacks/cvmx_spi_set_callbacks
+ */
+static cvmx_spi_callbacks_t cvmx_spi_callbacks = {
+ .reset_cb = cvmx_spi_reset_cb,
+ .calendar_setup_cb = cvmx_spi_calendar_setup_cb,
+ .clock_detect_cb = cvmx_spi_clock_detect_cb,
+ .training_cb = cvmx_spi_training_cb,
+ .calendar_sync_cb = cvmx_spi_calendar_sync_cb,
+ .interface_up_cb = cvmx_spi_interface_up_cb
+};
+
+/**
+ * Get current SPI4 initialization callbacks
+ *
+ * @callbacks: Pointer to the callbacks structure.to fill
+ *
+ * Returns Pointer to cvmx_spi_callbacks_t structure.
+ */
+void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t *callbacks)
+{
+ memcpy(callbacks, &cvmx_spi_callbacks, sizeof(cvmx_spi_callbacks));
+}
+
+/**
+ * Set new SPI4 initialization callbacks
+ *
+ * @new_callbacks: Pointer to an updated callbacks structure.
+ */
+void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks)
+{
+ memcpy(&cvmx_spi_callbacks, new_callbacks, sizeof(cvmx_spi_callbacks));
+}
+
+/**
+ * Initialize and start the SPI interface.
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for clock synchronization in seconds
+ * @num_ports: Number of SPI ports to configure
+ *
+ * Returns Zero on success, negative of failure.
+ */
+int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout,
+ int num_ports)
+{
+ int res = -1;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ return res;
+
+ /* Callback to perform SPI4 reset */
+ INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode);
+
+ /* Callback to perform calendar setup */
+ INVOKE_CB(cvmx_spi_callbacks.calendar_setup_cb, interface, mode,
+ num_ports);
+
+ /* Callback to perform clock detection */
+ INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
+
+ /* Callback to perform SPI4 link training */
+ INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout);
+
+ /* Callback to perform calendar sync */
+ INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode,
+ timeout);
+
+ /* Callback to handle interface coming up */
+ INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode);
+
+ return res;
+}
+
+/**
+ * This routine restarts the SPI interface after it has lost synchronization
+ * with its correspondent system.
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for clock synchronization in seconds
+ *
+ * Returns Zero on success, negative of failure.
+ */
+int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
+{
+ int res = -1;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ return res;
+
+ cvmx_dprintf("SPI%d: Restart %s\n", interface, modes[mode]);
+
+ /* Callback to perform SPI4 reset */
+ INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode);
+
+ /* NOTE: Calendar setup is not performed during restart */
+ /* Refer to cvmx_spi_start_interface() for the full sequence */
+
+ /* Callback to perform clock detection */
+ INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
+
+ /* Callback to perform SPI4 link training */
+ INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout);
+
+ /* Callback to perform calendar sync */
+ INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode,
+ timeout);
+
+ /* Callback to handle interface coming up */
+ INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(cvmx_spi_restart_interface);
+
+/**
+ * Callback to perform SPI4 reset
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
+{
+ union cvmx_spxx_dbg_deskew_ctl spxx_dbg_deskew_ctl;
+ union cvmx_spxx_clk_ctl spxx_clk_ctl;
+ union cvmx_spxx_bist_stat spxx_bist_stat;
+ union cvmx_spxx_int_msk spxx_int_msk;
+ union cvmx_stxx_int_msk stxx_int_msk;
+ union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
+ int index;
+ uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
+
+ /* Disable SPI error events while we run BIST */
+ spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
+ cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
+ stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
+ cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
+
+ /* Run BIST in the SPI interface */
+ cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), 0);
+ cvmx_write_csr(CVMX_STXX_COM_CTL(interface), 0);
+ spxx_clk_ctl.u64 = 0;
+ spxx_clk_ctl.s.runbist = 1;
+ cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
+ __delay(10 * MS);
+ spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface));
+ if (spxx_bist_stat.s.stat0)
+ cvmx_dprintf
+ ("ERROR SPI%d: BIST failed on receive datapath FIFO\n",
+ interface);
+ if (spxx_bist_stat.s.stat1)
+ cvmx_dprintf("ERROR SPI%d: BIST failed on RX calendar table\n",
+ interface);
+ if (spxx_bist_stat.s.stat2)
+ cvmx_dprintf("ERROR SPI%d: BIST failed on TX calendar table\n",
+ interface);
+
+ /* Clear the calendar table after BIST to fix parity errors */
+ for (index = 0; index < 32; index++) {
+ union cvmx_srxx_spi4_calx srxx_spi4_calx;
+ union cvmx_stxx_spi4_calx stxx_spi4_calx;
+
+ srxx_spi4_calx.u64 = 0;
+ srxx_spi4_calx.s.oddpar = 1;
+ cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
+ srxx_spi4_calx.u64);
+
+ stxx_spi4_calx.u64 = 0;
+ stxx_spi4_calx.s.oddpar = 1;
+ cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
+ stxx_spi4_calx.u64);
+ }
+
+ /* Re enable reporting of error interrupts */
+ cvmx_write_csr(CVMX_SPXX_INT_REG(interface),
+ cvmx_read_csr(CVMX_SPXX_INT_REG(interface)));
+ cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
+ cvmx_write_csr(CVMX_STXX_INT_REG(interface),
+ cvmx_read_csr(CVMX_STXX_INT_REG(interface)));
+ cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);
+
+ /* Setup the CLKDLY right in the middle */
+ spxx_clk_ctl.u64 = 0;
+ spxx_clk_ctl.s.seetrn = 0;
+ spxx_clk_ctl.s.clkdly = 0x10;
+ spxx_clk_ctl.s.runbist = 0;
+ spxx_clk_ctl.s.statdrv = 0;
+ /* This should always be on the opposite edge as statdrv */
+ spxx_clk_ctl.s.statrcv = 1;
+ spxx_clk_ctl.s.sndtrn = 0;
+ spxx_clk_ctl.s.drptrn = 0;
+ spxx_clk_ctl.s.rcvtrn = 0;
+ spxx_clk_ctl.s.srxdlck = 0;
+ cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
+ __delay(100 * MS);
+
+ /* Reset SRX0 DLL */
+ spxx_clk_ctl.s.srxdlck = 1;
+ cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
+
+ /* Waiting for Inf0 Spi4 RX DLL to lock */
+ __delay(100 * MS);
+
+ /* Enable dynamic alignment */
+ spxx_trn4_ctl.s.trntest = 0;
+ spxx_trn4_ctl.s.jitter = 1;
+ spxx_trn4_ctl.s.clr_boot = 1;
+ spxx_trn4_ctl.s.set_boot = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX))
+ spxx_trn4_ctl.s.maxdist = 3;
+ else
+ spxx_trn4_ctl.s.maxdist = 8;
+ spxx_trn4_ctl.s.macro_en = 1;
+ spxx_trn4_ctl.s.mux_en = 1;
+ cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
+
+ spxx_dbg_deskew_ctl.u64 = 0;
+ cvmx_write_csr(CVMX_SPXX_DBG_DESKEW_CTL(interface),
+ spxx_dbg_deskew_ctl.u64);
+
+ return 0;
+}
+
+/**
+ * Callback to setup calendar and miscellaneous settings before clock detection
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @num_ports: Number of ports to configure on SPI
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
+ int num_ports)
+{
+ int port;
+ int index;
+ if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
+ union cvmx_srxx_com_ctl srxx_com_ctl;
+ union cvmx_srxx_spi4_stat srxx_spi4_stat;
+
+ /* SRX0 number of Ports */
+ srxx_com_ctl.u64 = 0;
+ srxx_com_ctl.s.prts = num_ports - 1;
+ srxx_com_ctl.s.st_en = 0;
+ srxx_com_ctl.s.inf_en = 0;
+ cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
+
+ /* SRX0 Calendar Table. This round robbins through all ports */
+ port = 0;
+ index = 0;
+ while (port < num_ports) {
+ union cvmx_srxx_spi4_calx srxx_spi4_calx;
+ srxx_spi4_calx.u64 = 0;
+ srxx_spi4_calx.s.prt0 = port++;
+ srxx_spi4_calx.s.prt1 = port++;
+ srxx_spi4_calx.s.prt2 = port++;
+ srxx_spi4_calx.s.prt3 = port++;
+ srxx_spi4_calx.s.oddpar =
+ ~(cvmx_dpop(srxx_spi4_calx.u64) & 1);
+ cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
+ srxx_spi4_calx.u64);
+ index++;
+ }
+ srxx_spi4_stat.u64 = 0;
+ srxx_spi4_stat.s.len = num_ports;
+ srxx_spi4_stat.s.m = 1;
+ cvmx_write_csr(CVMX_SRXX_SPI4_STAT(interface),
+ srxx_spi4_stat.u64);
+ }
+
+ if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
+ union cvmx_stxx_arb_ctl stxx_arb_ctl;
+ union cvmx_gmxx_tx_spi_max gmxx_tx_spi_max;
+ union cvmx_gmxx_tx_spi_thresh gmxx_tx_spi_thresh;
+ union cvmx_gmxx_tx_spi_ctl gmxx_tx_spi_ctl;
+ union cvmx_stxx_spi4_stat stxx_spi4_stat;
+ union cvmx_stxx_spi4_dat stxx_spi4_dat;
+
+ /* STX0 Config */
+ stxx_arb_ctl.u64 = 0;
+ stxx_arb_ctl.s.igntpa = 0;
+ stxx_arb_ctl.s.mintrn = 0;
+ cvmx_write_csr(CVMX_STXX_ARB_CTL(interface), stxx_arb_ctl.u64);
+
+ gmxx_tx_spi_max.u64 = 0;
+ gmxx_tx_spi_max.s.max1 = 8;
+ gmxx_tx_spi_max.s.max2 = 4;
+ gmxx_tx_spi_max.s.slice = 0;
+ cvmx_write_csr(CVMX_GMXX_TX_SPI_MAX(interface),
+ gmxx_tx_spi_max.u64);
+
+ gmxx_tx_spi_thresh.u64 = 0;
+ gmxx_tx_spi_thresh.s.thresh = 4;
+ cvmx_write_csr(CVMX_GMXX_TX_SPI_THRESH(interface),
+ gmxx_tx_spi_thresh.u64);
+
+ gmxx_tx_spi_ctl.u64 = 0;
+ gmxx_tx_spi_ctl.s.tpa_clr = 0;
+ gmxx_tx_spi_ctl.s.cont_pkt = 0;
+ cvmx_write_csr(CVMX_GMXX_TX_SPI_CTL(interface),
+ gmxx_tx_spi_ctl.u64);
+
+ /* STX0 Training Control */
+ stxx_spi4_dat.u64 = 0;
+ /*Minimum needed by dynamic alignment */
+ stxx_spi4_dat.s.alpha = 32;
+ stxx_spi4_dat.s.max_t = 0xFFFF; /*Minimum interval is 0x20 */
+ cvmx_write_csr(CVMX_STXX_SPI4_DAT(interface),
+ stxx_spi4_dat.u64);
+
+ /* STX0 Calendar Table. This round robbins through all ports */
+ port = 0;
+ index = 0;
+ while (port < num_ports) {
+ union cvmx_stxx_spi4_calx stxx_spi4_calx;
+ stxx_spi4_calx.u64 = 0;
+ stxx_spi4_calx.s.prt0 = port++;
+ stxx_spi4_calx.s.prt1 = port++;
+ stxx_spi4_calx.s.prt2 = port++;
+ stxx_spi4_calx.s.prt3 = port++;
+ stxx_spi4_calx.s.oddpar =
+ ~(cvmx_dpop(stxx_spi4_calx.u64) & 1);
+ cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
+ stxx_spi4_calx.u64);
+ index++;
+ }
+ stxx_spi4_stat.u64 = 0;
+ stxx_spi4_stat.s.len = num_ports;
+ stxx_spi4_stat.s.m = 1;
+ cvmx_write_csr(CVMX_STXX_SPI4_STAT(interface),
+ stxx_spi4_stat.u64);
+ }
+
+ return 0;
+}
+
+/**
+ * Callback to perform clock detection
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for clock synchronization in seconds
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, int timeout)
+{
+ int clock_transitions;
+ union cvmx_spxx_clk_stat stat;
+ uint64_t timeout_time;
+ uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
+
+ /*
+ * Regardless of operating mode, both Tx and Rx clocks must be
+ * present for the SPI interface to operate.
+ */
+ cvmx_dprintf("SPI%d: Waiting to see TsClk...\n", interface);
+ timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
+ /*
+ * Require 100 clock transitions in order to avoid any noise
+ * in the beginning.
+ */
+ clock_transitions = 100;
+ do {
+ stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
+ if (stat.s.s4clk0 && stat.s.s4clk1 && clock_transitions) {
+ /*
+ * We've seen a clock transition, so decrement
+ * the number we still need.
+ */
+ clock_transitions--;
+ cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
+ stat.s.s4clk0 = 0;
+ stat.s.s4clk1 = 0;
+ }
+ if (cvmx_get_cycle() > timeout_time) {
+ cvmx_dprintf("SPI%d: Timeout\n", interface);
+ return -1;
+ }
+ } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);
+
+ cvmx_dprintf("SPI%d: Waiting to see RsClk...\n", interface);
+ timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
+ /*
+ * Require 100 clock transitions in order to avoid any noise in the
+ * beginning.
+ */
+ clock_transitions = 100;
+ do {
+ stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
+ if (stat.s.d4clk0 && stat.s.d4clk1 && clock_transitions) {
+ /*
+ * We've seen a clock transition, so decrement
+ * the number we still need
+ */
+ clock_transitions--;
+ cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
+ stat.s.d4clk0 = 0;
+ stat.s.d4clk1 = 0;
+ }
+ if (cvmx_get_cycle() > timeout_time) {
+ cvmx_dprintf("SPI%d: Timeout\n", interface);
+ return -1;
+ }
+ } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);
+
+ return 0;
+}
+
+/**
+ * Callback to perform link training
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for link to be trained (in seconds)
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout)
+{
+ union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
+ union cvmx_spxx_clk_stat stat;
+ uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
+ uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
+ int rx_training_needed;
+
+ /* SRX0 & STX0 Inf0 Links are configured - begin training */
+ union cvmx_spxx_clk_ctl spxx_clk_ctl;
+ spxx_clk_ctl.u64 = 0;
+ spxx_clk_ctl.s.seetrn = 0;
+ spxx_clk_ctl.s.clkdly = 0x10;
+ spxx_clk_ctl.s.runbist = 0;
+ spxx_clk_ctl.s.statdrv = 0;
+ /* This should always be on the opposite edge as statdrv */
+ spxx_clk_ctl.s.statrcv = 1;
+ spxx_clk_ctl.s.sndtrn = 1;
+ spxx_clk_ctl.s.drptrn = 1;
+ spxx_clk_ctl.s.rcvtrn = 1;
+ spxx_clk_ctl.s.srxdlck = 1;
+ cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
+ __delay(1000 * MS);
+
+ /* SRX0 clear the boot bit */
+ spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface));
+ spxx_trn4_ctl.s.clr_boot = 1;
+ cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
+
+ /* Wait for the training sequence to complete */
+ cvmx_dprintf("SPI%d: Waiting for training\n", interface);
+ __delay(1000 * MS);
+ /* Wait a really long time here */
+ timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;
+ /*
+ * The HRM says we must wait for 34 + 16 * MAXDIST training sequences.
+ * We'll be pessimistic and wait for a lot more.
+ */
+ rx_training_needed = 500;
+ do {
+ stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
+ if (stat.s.srxtrn && rx_training_needed) {
+ rx_training_needed--;
+ cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
+ stat.s.srxtrn = 0;
+ }
+ if (cvmx_get_cycle() > timeout_time) {
+ cvmx_dprintf("SPI%d: Timeout\n", interface);
+ return -1;
+ }
+ } while (stat.s.srxtrn == 0);
+
+ return 0;
+}
+
+/**
+ * Callback to perform calendar data synchronization
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for calendar data in seconds
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode, int timeout)
+{
+ uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
+ if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
+ /* SRX0 interface should be good, send calendar data */
+ union cvmx_srxx_com_ctl srxx_com_ctl;
+ cvmx_dprintf
+ ("SPI%d: Rx is synchronized, start sending calendar data\n",
+ interface);
+ srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
+ srxx_com_ctl.s.inf_en = 1;
+ srxx_com_ctl.s.st_en = 1;
+ cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
+ }
+
+ if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
+ /* STX0 has achieved sync */
+ /* The corespondant board should be sending calendar data */
+ /* Enable the STX0 STAT receiver. */
+ union cvmx_spxx_clk_stat stat;
+ uint64_t timeout_time;
+ union cvmx_stxx_com_ctl stxx_com_ctl;
+ stxx_com_ctl.u64 = 0;
+ stxx_com_ctl.s.st_en = 1;
+ cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
+
+ /* Waiting for calendar sync on STX0 STAT */
+ cvmx_dprintf("SPI%d: Waiting to sync on STX[%d] STAT\n",
+ interface, interface);
+ timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
+ /* SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10) */
+ do {
+ stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
+ if (cvmx_get_cycle() > timeout_time) {
+ cvmx_dprintf("SPI%d: Timeout\n", interface);
+ return -1;
+ }
+ } while (stat.s.stxcal == 0);
+ }
+
+ return 0;
+}
+
+/**
+ * Callback to handle interface up
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode)
+{
+ union cvmx_gmxx_rxx_frm_min gmxx_rxx_frm_min;
+ union cvmx_gmxx_rxx_frm_max gmxx_rxx_frm_max;
+ union cvmx_gmxx_rxx_jabber gmxx_rxx_jabber;
+
+ if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
+ union cvmx_srxx_com_ctl srxx_com_ctl;
+ srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
+ srxx_com_ctl.s.inf_en = 1;
+ cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
+ cvmx_dprintf("SPI%d: Rx is now up\n", interface);
+ }
+
+ if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
+ union cvmx_stxx_com_ctl stxx_com_ctl;
+ stxx_com_ctl.u64 = cvmx_read_csr(CVMX_STXX_COM_CTL(interface));
+ stxx_com_ctl.s.inf_en = 1;
+ cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
+ cvmx_dprintf("SPI%d: Tx is now up\n", interface);
+ }
+
+ gmxx_rxx_frm_min.u64 = 0;
+ gmxx_rxx_frm_min.s.len = 64;
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MIN(0, interface),
+ gmxx_rxx_frm_min.u64);
+ gmxx_rxx_frm_max.u64 = 0;
+ gmxx_rxx_frm_max.s.len = 64 * 1024 - 4;
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(0, interface),
+ gmxx_rxx_frm_max.u64);
+ gmxx_rxx_jabber.u64 = 0;
+ gmxx_rxx_jabber.s.cnt = 64 * 1024 - 4;
+ cvmx_write_csr(CVMX_GMXX_RXX_JABBER(0, interface), gmxx_rxx_jabber.u64);
+
+ return 0;
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
new file mode 100644
index 000000000..30ecba134
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
@@ -0,0 +1,53 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * This module provides system/board/application information obtained
+ * by the bootloader.
+ */
+#include <linux/export.h>
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-sysinfo.h>
+
+/*
+ * This structure defines the private state maintained by sysinfo module.
+ */
+static struct cvmx_sysinfo sysinfo; /* system information */
+
+/*
+ * Returns the application information as obtained
+ * by the bootloader. This provides the core mask of the cores
+ * running the same application image, as well as the physical
+ * memory regions available to the core.
+ */
+struct cvmx_sysinfo *cvmx_sysinfo_get(void)
+{
+ return &sysinfo;
+}
+EXPORT_SYMBOL(cvmx_sysinfo_get);
+
diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c
new file mode 100644
index 000000000..657dbad96
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/octeon-model.c
@@ -0,0 +1,511 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#include <asm/octeon/octeon.h>
+
+enum octeon_feature_bits __octeon_feature_bits __read_mostly;
+EXPORT_SYMBOL_GPL(__octeon_feature_bits);
+
+/**
+ * Read a byte of fuse data
+ * @byte_addr: address to read
+ *
+ * Returns fuse value: 0 or 1
+ */
+static uint8_t __init cvmx_fuse_read_byte(int byte_addr)
+{
+ union cvmx_mio_fus_rcmd read_cmd;
+
+ read_cmd.u64 = 0;
+ read_cmd.s.addr = byte_addr;
+ read_cmd.s.pend = 1;
+ cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
+ while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD))
+ && read_cmd.s.pend)
+ ;
+ return read_cmd.s.dat;
+}
+
+/*
+ * Version of octeon_model_get_string() that takes buffer as argument,
+ * as running early in u-boot static/global variables don't work when
+ * running from flash.
+ */
+static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
+ char *buffer)
+{
+ const char *family;
+ const char *core_model;
+ char pass[4];
+ int clock_mhz;
+ const char *suffix;
+ int num_cores;
+ union cvmx_mio_fus_dat2 fus_dat2;
+ union cvmx_mio_fus_dat3 fus_dat3;
+ char fuse_model[10];
+ uint32_t fuse_data = 0;
+ uint64_t l2d_fus3 = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ l2d_fus3 = (cvmx_read_csr(CVMX_L2D_FUS3) >> 34) & 0x3;
+ fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
+ fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
+ num_cores = cvmx_octeon_num_cores();
+
+ /* Make sure the non existent devices look disabled */
+ switch ((chip_id >> 8) & 0xff) {
+ case 6: /* CN50XX */
+ case 2: /* CN30XX */
+ fus_dat3.s.nodfa_dte = 1;
+ fus_dat3.s.nozip = 1;
+ break;
+ case 4: /* CN57XX or CN56XX */
+ fus_dat3.s.nodfa_dte = 1;
+ break;
+ default:
+ break;
+ }
+
+ /* Make a guess at the suffix */
+ /* NSP = everything */
+ /* EXP = No crypto */
+ /* SCP = No DFA, No zip */
+ /* CP = No DFA, No crypto, No zip */
+ if (fus_dat3.s.nodfa_dte) {
+ if (fus_dat2.s.nocrypto)
+ suffix = "CP";
+ else
+ suffix = "SCP";
+ } else if (fus_dat2.s.nocrypto)
+ suffix = "EXP";
+ else
+ suffix = "NSP";
+
+ if (!fus_dat2.s.nocrypto)
+ __octeon_feature_bits |= OCTEON_HAS_CRYPTO;
+
+ /*
+ * Assume pass number is encoded using <5:3><2:0>. Exceptions
+ * will be fixed later.
+ */
+ sprintf(pass, "%d.%d", (int)((chip_id >> 3) & 7) + 1, (int)chip_id & 7);
+
+ /*
+ * Use the number of cores to determine the last 2 digits of
+ * the model number. There are some exceptions that are fixed
+ * later.
+ */
+ switch (num_cores) {
+ case 48:
+ core_model = "90";
+ break;
+ case 44:
+ core_model = "88";
+ break;
+ case 40:
+ core_model = "85";
+ break;
+ case 32:
+ core_model = "80";
+ break;
+ case 24:
+ core_model = "70";
+ break;
+ case 16:
+ core_model = "60";
+ break;
+ case 15:
+ core_model = "58";
+ break;
+ case 14:
+ core_model = "55";
+ break;
+ case 13:
+ core_model = "52";
+ break;
+ case 12:
+ core_model = "50";
+ break;
+ case 11:
+ core_model = "48";
+ break;
+ case 10:
+ core_model = "45";
+ break;
+ case 9:
+ core_model = "42";
+ break;
+ case 8:
+ core_model = "40";
+ break;
+ case 7:
+ core_model = "38";
+ break;
+ case 6:
+ core_model = "34";
+ break;
+ case 5:
+ core_model = "32";
+ break;
+ case 4:
+ core_model = "30";
+ break;
+ case 3:
+ core_model = "25";
+ break;
+ case 2:
+ core_model = "20";
+ break;
+ case 1:
+ core_model = "10";
+ break;
+ default:
+ core_model = "XX";
+ break;
+ }
+
+ /* Now figure out the family, the first two digits */
+ switch ((chip_id >> 8) & 0xff) {
+ case 0: /* CN38XX, CN37XX or CN36XX */
+ if (l2d_fus3) {
+ /*
+ * For some unknown reason, the 16 core one is
+ * called 37 instead of 36.
+ */
+ if (num_cores >= 16)
+ family = "37";
+ else
+ family = "36";
+ } else
+ family = "38";
+ /*
+ * This series of chips didn't follow the standard
+ * pass numbering.
+ */
+ switch (chip_id & 0xf) {
+ case 0:
+ strcpy(pass, "1.X");
+ break;
+ case 1:
+ strcpy(pass, "2.X");
+ break;
+ case 3:
+ strcpy(pass, "3.X");
+ break;
+ default:
+ strcpy(pass, "X.X");
+ break;
+ }
+ break;
+ case 1: /* CN31XX or CN3020 */
+ if ((chip_id & 0x10) || l2d_fus3)
+ family = "30";
+ else
+ family = "31";
+ /*
+ * This series of chips didn't follow the standard
+ * pass numbering.
+ */
+ switch (chip_id & 0xf) {
+ case 0:
+ strcpy(pass, "1.0");
+ break;
+ case 2:
+ strcpy(pass, "1.1");
+ break;
+ default:
+ strcpy(pass, "X.X");
+ break;
+ }
+ break;
+ case 2: /* CN3010 or CN3005 */
+ family = "30";
+ /* A chip with half cache is an 05 */
+ if (l2d_fus3)
+ core_model = "05";
+ /*
+ * This series of chips didn't follow the standard
+ * pass numbering.
+ */
+ switch (chip_id & 0xf) {
+ case 0:
+ strcpy(pass, "1.0");
+ break;
+ case 2:
+ strcpy(pass, "1.1");
+ break;
+ default:
+ strcpy(pass, "X.X");
+ break;
+ }
+ break;
+ case 3: /* CN58XX */
+ family = "58";
+ /* Special case. 4 core, half cache (CP with half cache) */
+ if ((num_cores == 4) && l2d_fus3 && !strncmp(suffix, "CP", 2))
+ core_model = "29";
+
+ /* Pass 1 uses different encodings for pass numbers */
+ if ((chip_id & 0xFF) < 0x8) {
+ switch (chip_id & 0x3) {
+ case 0:
+ strcpy(pass, "1.0");
+ break;
+ case 1:
+ strcpy(pass, "1.1");
+ break;
+ case 3:
+ strcpy(pass, "1.2");
+ break;
+ default:
+ strcpy(pass, "1.X");
+ break;
+ }
+ }
+ break;
+ case 4: /* CN57XX, CN56XX, CN55XX, CN54XX */
+ if (fus_dat2.cn56xx.raid_en) {
+ if (l2d_fus3)
+ family = "55";
+ else
+ family = "57";
+ if (fus_dat2.cn56xx.nocrypto)
+ suffix = "SP";
+ else
+ suffix = "SSP";
+ } else {
+ if (fus_dat2.cn56xx.nocrypto)
+ suffix = "CP";
+ else {
+ suffix = "NSP";
+ if (fus_dat3.s.nozip)
+ suffix = "SCP";
+
+ if (fus_dat3.cn38xx.bar2_en)
+ suffix = "NSPB2";
+ }
+ if (l2d_fus3)
+ family = "54";
+ else
+ family = "56";
+ }
+ break;
+ case 6: /* CN50XX */
+ family = "50";
+ break;
+ case 7: /* CN52XX */
+ if (l2d_fus3)
+ family = "51";
+ else
+ family = "52";
+ break;
+ case 0x93: /* CN61XX */
+ family = "61";
+ if (fus_dat2.cn61xx.nocrypto && fus_dat2.cn61xx.dorm_crypto)
+ suffix = "AP";
+ if (fus_dat2.cn61xx.nocrypto)
+ suffix = "CP";
+ else if (fus_dat2.cn61xx.dorm_crypto)
+ suffix = "DAP";
+ else if (fus_dat3.cn61xx.nozip)
+ suffix = "SCP";
+ break;
+ case 0x90: /* CN63XX */
+ family = "63";
+ if (fus_dat3.s.l2c_crip == 2)
+ family = "62";
+ if (num_cores == 6) /* Other core counts match generic */
+ core_model = "35";
+ if (fus_dat2.cn63xx.nocrypto)
+ suffix = "CP";
+ else if (fus_dat2.cn63xx.dorm_crypto)
+ suffix = "DAP";
+ else if (fus_dat3.cn61xx.nozip)
+ suffix = "SCP";
+ else
+ suffix = "AAP";
+ break;
+ case 0x92: /* CN66XX */
+ family = "66";
+ if (num_cores == 6) /* Other core counts match generic */
+ core_model = "35";
+ if (fus_dat2.cn66xx.nocrypto && fus_dat2.cn66xx.dorm_crypto)
+ suffix = "AP";
+ if (fus_dat2.cn66xx.nocrypto)
+ suffix = "CP";
+ else if (fus_dat2.cn66xx.dorm_crypto)
+ suffix = "DAP";
+ else if (fus_dat3.cn61xx.nozip)
+ suffix = "SCP";
+ else
+ suffix = "AAP";
+ break;
+ case 0x91: /* CN68XX */
+ family = "68";
+ if (fus_dat2.cn68xx.nocrypto && fus_dat3.cn61xx.nozip)
+ suffix = "CP";
+ else if (fus_dat2.cn68xx.dorm_crypto)
+ suffix = "DAP";
+ else if (fus_dat3.cn61xx.nozip)
+ suffix = "SCP";
+ else if (fus_dat2.cn68xx.nocrypto)
+ suffix = "SP";
+ else
+ suffix = "AAP";
+ break;
+ case 0x94: /* CNF71XX */
+ family = "F71";
+ if (fus_dat3.cn61xx.nozip)
+ suffix = "SCP";
+ else
+ suffix = "AAP";
+ break;
+ case 0x95: /* CN78XX */
+ if (num_cores == 6) /* Other core counts match generic */
+ core_model = "35";
+ if (OCTEON_IS_MODEL(OCTEON_CN76XX))
+ family = "76";
+ else
+ family = "78";
+ if (fus_dat3.cn78xx.l2c_crip == 2)
+ family = "77";
+ if (fus_dat3.cn78xx.nozip
+ && fus_dat3.cn78xx.nodfa_dte
+ && fus_dat3.cn78xx.nohna_dte) {
+ if (fus_dat3.cn78xx.nozip &&
+ !fus_dat2.cn78xx.raid_en &&
+ fus_dat3.cn78xx.nohna_dte) {
+ suffix = "CP";
+ } else {
+ suffix = "SCP";
+ }
+ } else if (fus_dat2.cn78xx.raid_en == 0)
+ suffix = "HCP";
+ else
+ suffix = "AAP";
+ break;
+ case 0x96: /* CN70XX */
+ family = "70";
+ if (cvmx_read_csr(CVMX_MIO_FUS_PDF) & (0x1ULL << 32))
+ family = "71";
+ if (fus_dat2.cn70xx.nocrypto)
+ suffix = "CP";
+ else if (fus_dat3.cn70xx.nodfa_dte)
+ suffix = "SCP";
+ else
+ suffix = "AAP";
+ break;
+ case 0x97: /* CN73XX */
+ if (num_cores == 6) /* Other core counts match generic */
+ core_model = "35";
+ family = "73";
+ if (fus_dat3.cn73xx.l2c_crip == 2)
+ family = "72";
+ if (fus_dat3.cn73xx.nozip
+ && fus_dat3.cn73xx.nodfa_dte
+ && fus_dat3.cn73xx.nohna_dte) {
+ if (!fus_dat2.cn73xx.raid_en)
+ suffix = "CP";
+ else
+ suffix = "SCP";
+ } else
+ suffix = "AAP";
+ break;
+ case 0x98: /* CN75XX */
+ family = "F75";
+ if (fus_dat3.cn78xx.nozip
+ && fus_dat3.cn78xx.nodfa_dte
+ && fus_dat3.cn78xx.nohna_dte)
+ suffix = "SCP";
+ else
+ suffix = "AAP";
+ break;
+ default:
+ family = "XX";
+ core_model = "XX";
+ strcpy(pass, "X.X");
+ suffix = "XXX";
+ break;
+ }
+
+ clock_mhz = octeon_get_clock_rate() / 1000000;
+ if (family[0] != '3') {
+ int fuse_base = 384 / 8;
+ if (family[0] == '6')
+ fuse_base = 832 / 8;
+
+ /* Check for model in fuses, overrides normal decode */
+ /* This is _not_ valid for Octeon CN3XXX models */
+ fuse_data |= cvmx_fuse_read_byte(fuse_base + 3);
+ fuse_data = fuse_data << 8;
+ fuse_data |= cvmx_fuse_read_byte(fuse_base + 2);
+ fuse_data = fuse_data << 8;
+ fuse_data |= cvmx_fuse_read_byte(fuse_base + 1);
+ fuse_data = fuse_data << 8;
+ fuse_data |= cvmx_fuse_read_byte(fuse_base);
+ if (fuse_data & 0x7ffff) {
+ int model = fuse_data & 0x3fff;
+ int suffix = (fuse_data >> 14) & 0x1f;
+ if (suffix && model) {
+ /* Have both number and suffix in fuses, so both */
+ sprintf(fuse_model, "%d%c", model, 'A' + suffix - 1);
+ core_model = "";
+ family = fuse_model;
+ } else if (suffix && !model) {
+ /* Only have suffix, so add suffix to 'normal' model number */
+ sprintf(fuse_model, "%s%c", core_model, 'A' + suffix - 1);
+ core_model = fuse_model;
+ } else {
+ /* Don't have suffix, so just use model from fuses */
+ sprintf(fuse_model, "%d", model);
+ core_model = "";
+ family = fuse_model;
+ }
+ }
+ }
+ sprintf(buffer, "CN%s%sp%s-%d-%s", family, core_model, pass, clock_mhz, suffix);
+ return buffer;
+}
+
+/**
+ * Given the chip processor ID from COP0, this function returns a
+ * string representing the chip model number. The string is of the
+ * form CNXXXXpX.X-FREQ-SUFFIX.
+ * - XXXX = The chip model number
+ * - X.X = Chip pass number
+ * - FREQ = Current frequency in Mhz
+ * - SUFFIX = NSP, EXP, SCP, SSP, or CP
+ *
+ * @chip_id: Chip ID
+ *
+ * Returns Model string
+ */
+const char *__init octeon_model_get_string(uint32_t chip_id)
+{
+ static char buffer[32];
+ return octeon_model_get_string_buffer(chip_id, buffer);
+}
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c
new file mode 100644
index 000000000..a5e8f4a78
--- /dev/null
+++ b/arch/mips/cavium-octeon/flash_setup.c
@@ -0,0 +1,143 @@
+/*
+ * Octeon Bootbus flash setup
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007, 2008 Cavium Networks
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/semaphore.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/of_platform.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/octeon/octeon.h>
+
+static struct map_info flash_map;
+static struct mtd_info *mymtd;
+static const char *part_probe_types[] = {
+ "cmdlinepart",
+#ifdef CONFIG_MTD_REDBOOT_PARTS
+ "RedBoot",
+#endif
+ NULL
+};
+
+static map_word octeon_flash_map_read(struct map_info *map, unsigned long ofs)
+{
+ map_word r;
+
+ down(&octeon_bootbus_sem);
+ r = inline_map_read(map, ofs);
+ up(&octeon_bootbus_sem);
+
+ return r;
+}
+
+static void octeon_flash_map_write(struct map_info *map, const map_word datum,
+ unsigned long ofs)
+{
+ down(&octeon_bootbus_sem);
+ inline_map_write(map, datum, ofs);
+ up(&octeon_bootbus_sem);
+}
+
+static void octeon_flash_map_copy_from(struct map_info *map, void *to,
+ unsigned long from, ssize_t len)
+{
+ down(&octeon_bootbus_sem);
+ inline_map_copy_from(map, to, from, len);
+ up(&octeon_bootbus_sem);
+}
+
+static void octeon_flash_map_copy_to(struct map_info *map, unsigned long to,
+ const void *from, ssize_t len)
+{
+ down(&octeon_bootbus_sem);
+ inline_map_copy_to(map, to, from, len);
+ up(&octeon_bootbus_sem);
+}
+
+/**
+ * Module/ driver initialization.
+ *
+ * Returns Zero on success
+ */
+static int octeon_flash_probe(struct platform_device *pdev)
+{
+ union cvmx_mio_boot_reg_cfgx region_cfg;
+ u32 cs;
+ int r;
+ struct device_node *np = pdev->dev.of_node;
+
+ r = of_property_read_u32(np, "reg", &cs);
+ if (r)
+ return r;
+
+ /*
+ * Read the bootbus region 0 setup to determine the base
+ * address of the flash.
+ */
+ region_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
+ if (region_cfg.s.en) {
+ /*
+ * The bootloader always takes the flash and sets its
+ * address so the entire flash fits below
+ * 0x1fc00000. This way the flash aliases to
+ * 0x1fc00000 for booting. Software can access the
+ * full flash at the true address, while core boot can
+ * access 4MB.
+ */
+ /* Use this name so old part lines work */
+ flash_map.name = "phys_mapped_flash";
+ flash_map.phys = region_cfg.s.base << 16;
+ flash_map.size = 0x1fc00000 - flash_map.phys;
+ /* 8-bit bus (0 + 1) or 16-bit bus (1 + 1) */
+ flash_map.bankwidth = region_cfg.s.width + 1;
+ flash_map.virt = ioremap(flash_map.phys, flash_map.size);
+ pr_notice("Bootbus flash: Setting flash for %luMB flash at "
+ "0x%08llx\n", flash_map.size >> 20, flash_map.phys);
+ WARN_ON(!map_bankwidth_supported(flash_map.bankwidth));
+ flash_map.read = octeon_flash_map_read;
+ flash_map.write = octeon_flash_map_write;
+ flash_map.copy_from = octeon_flash_map_copy_from;
+ flash_map.copy_to = octeon_flash_map_copy_to;
+ mymtd = do_map_probe("cfi_probe", &flash_map);
+ if (mymtd) {
+ mymtd->owner = THIS_MODULE;
+ mtd_device_parse_register(mymtd, part_probe_types,
+ NULL, NULL, 0);
+ } else {
+ pr_err("Failed to register MTD device for flash\n");
+ }
+ }
+ return 0;
+}
+
+static const struct of_device_id of_flash_match[] = {
+ {
+ .compatible = "cfi-flash",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_flash_match);
+
+static struct platform_driver of_flash_driver = {
+ .driver = {
+ .name = "octeon-of-flash",
+ .of_match_table = of_flash_match,
+ },
+ .probe = octeon_flash_probe,
+};
+
+static int octeon_flash_init(void)
+{
+ return platform_driver_register(&of_flash_driver);
+}
+late_initcall(octeon_flash_init);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/mips/cavium-octeon/oct_ilm.c b/arch/mips/cavium-octeon/oct_ilm.c
new file mode 100644
index 000000000..99e27155b
--- /dev/null
+++ b/arch/mips/cavium-octeon/oct_ilm.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-ciu-defs.h>
+#include <asm/octeon/cvmx.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+
+#define TIMER_NUM 3
+
+static bool reset_stats;
+
+struct latency_info {
+ u64 io_interval;
+ u64 cpu_interval;
+ u64 timer_start1;
+ u64 timer_start2;
+ u64 max_latency;
+ u64 min_latency;
+ u64 latency_sum;
+ u64 average_latency;
+ u64 interrupt_cnt;
+};
+
+static struct latency_info li;
+static struct dentry *dir;
+
+static int show_latency(struct seq_file *m, void *v)
+{
+ u64 cpuclk, avg, max, min;
+ struct latency_info curr_li = li;
+
+ cpuclk = octeon_get_clock_rate();
+
+ max = (curr_li.max_latency * 1000000000) / cpuclk;
+ min = (curr_li.min_latency * 1000000000) / cpuclk;
+ avg = (curr_li.latency_sum * 1000000000) / (cpuclk * curr_li.interrupt_cnt);
+
+ seq_printf(m, "cnt: %10lld, avg: %7lld ns, max: %7lld ns, min: %7lld ns\n",
+ curr_li.interrupt_cnt, avg, max, min);
+ return 0;
+}
+
+static int oct_ilm_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_latency, NULL);
+}
+
+static const struct file_operations oct_ilm_ops = {
+ .open = oct_ilm_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int reset_statistics(void *data, u64 value)
+{
+ reset_stats = true;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n");
+
+static void init_debugfs(void)
+{
+ dir = debugfs_create_dir("oct_ilm", 0);
+ debugfs_create_file("statistics", 0222, dir, NULL, &oct_ilm_ops);
+ debugfs_create_file("reset", 0222, dir, NULL, &reset_statistics_ops);
+}
+
+static void init_latency_info(struct latency_info *li, int startup)
+{
+ /* interval in milli seconds after which the interrupt will
+ * be triggered
+ */
+ int interval = 1;
+
+ if (startup) {
+ /* Calculating by the amounts io clock and cpu clock would
+ * increment in interval amount of ms
+ */
+ li->io_interval = (octeon_get_io_clock_rate() * interval) / 1000;
+ li->cpu_interval = (octeon_get_clock_rate() * interval) / 1000;
+ }
+ li->timer_start1 = 0;
+ li->timer_start2 = 0;
+ li->max_latency = 0;
+ li->min_latency = (u64)-1;
+ li->latency_sum = 0;
+ li->interrupt_cnt = 0;
+}
+
+
+static void start_timer(int timer, u64 interval)
+{
+ union cvmx_ciu_timx timx;
+ unsigned long flags;
+
+ timx.u64 = 0;
+ timx.s.one_shot = 1;
+ timx.s.len = interval;
+ raw_local_irq_save(flags);
+ li.timer_start1 = read_c0_cvmcount();
+ cvmx_write_csr(CVMX_CIU_TIMX(timer), timx.u64);
+ /* Read it back to force wait until register is written. */
+ timx.u64 = cvmx_read_csr(CVMX_CIU_TIMX(timer));
+ li.timer_start2 = read_c0_cvmcount();
+ raw_local_irq_restore(flags);
+}
+
+
+static irqreturn_t cvm_oct_ciu_timer_interrupt(int cpl, void *dev_id)
+{
+ u64 last_latency;
+ u64 last_int_cnt;
+
+ if (reset_stats) {
+ init_latency_info(&li, 0);
+ reset_stats = false;
+ } else {
+ last_int_cnt = read_c0_cvmcount();
+ last_latency = last_int_cnt - (li.timer_start1 + li.cpu_interval);
+ li.interrupt_cnt++;
+ li.latency_sum += last_latency;
+ if (last_latency > li.max_latency)
+ li.max_latency = last_latency;
+ if (last_latency < li.min_latency)
+ li.min_latency = last_latency;
+ }
+ start_timer(TIMER_NUM, li.io_interval);
+ return IRQ_HANDLED;
+}
+
+static void disable_timer(int timer)
+{
+ union cvmx_ciu_timx timx;
+
+ timx.s.one_shot = 0;
+ timx.s.len = 0;
+ cvmx_write_csr(CVMX_CIU_TIMX(timer), timx.u64);
+ /* Read it back to force immediate write of timer register*/
+ timx.u64 = cvmx_read_csr(CVMX_CIU_TIMX(timer));
+}
+
+static __init int oct_ilm_module_init(void)
+{
+ int rc;
+ int irq = OCTEON_IRQ_TIMER0 + TIMER_NUM;
+
+ init_debugfs();
+
+ rc = request_irq(irq, cvm_oct_ciu_timer_interrupt, IRQF_NO_THREAD,
+ "oct_ilm", 0);
+ if (rc) {
+ WARN(1, "Could not acquire IRQ %d", irq);
+ goto err_irq;
+ }
+
+ init_latency_info(&li, 1);
+ start_timer(TIMER_NUM, li.io_interval);
+
+ return 0;
+err_irq:
+ debugfs_remove_recursive(dir);
+ return rc;
+}
+
+static __exit void oct_ilm_module_exit(void)
+{
+ disable_timer(TIMER_NUM);
+ debugfs_remove_recursive(dir);
+ free_irq(OCTEON_IRQ_TIMER0 + TIMER_NUM, 0);
+}
+
+module_exit(oct_ilm_module_exit);
+module_init(oct_ilm_module_init);
+MODULE_AUTHOR("Venkat Subbiah, Cavium");
+MODULE_DESCRIPTION("Measures interrupt latency on Octeon chips.");
+MODULE_LICENSE("GPL");
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
new file mode 100644
index 000000000..191bcaf56
--- /dev/null
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -0,0 +1,2989 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2016 Cavium, Inc.
+ */
+
+#include <linux/of_address.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/bitops.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-ciu2-defs.h>
+#include <asm/octeon/cvmx-ciu3-defs.h>
+
+static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
+static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
+static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
+static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip2);
+
+static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip3);
+static DEFINE_PER_CPU(struct octeon_ciu3_info *, octeon_ciu3_info);
+#define CIU3_MBOX_PER_CORE 10
+
+/*
+ * The 8 most significant bits of the intsn identify the interrupt major block.
+ * Each major block might use its own interrupt domain. Thus 256 domains are
+ * needed.
+ */
+#define MAX_CIU3_DOMAINS 256
+
+typedef irq_hw_number_t (*octeon_ciu3_intsn2hw_t)(struct irq_domain *, unsigned int);
+
+/* Information for each ciu3 in the system */
+struct octeon_ciu3_info {
+ u64 ciu3_addr;
+ int node;
+ struct irq_domain *domain[MAX_CIU3_DOMAINS];
+ octeon_ciu3_intsn2hw_t intsn2hw[MAX_CIU3_DOMAINS];
+};
+
+/* Each ciu3 in the system uses its own data (one ciu3 per node) */
+static struct octeon_ciu3_info *octeon_ciu3_info_per_node[4];
+
+struct octeon_irq_ciu_domain_data {
+ int num_sum; /* number of sum registers (2 or 3). */
+};
+
+/* Register offsets from ciu3_addr */
+#define CIU3_CONST 0x220
+#define CIU3_IDT_CTL(_idt) ((_idt) * 8 + 0x110000)
+#define CIU3_IDT_PP(_idt, _idx) ((_idt) * 32 + (_idx) * 8 + 0x120000)
+#define CIU3_IDT_IO(_idt) ((_idt) * 8 + 0x130000)
+#define CIU3_DEST_PP_INT(_pp_ip) ((_pp_ip) * 8 + 0x200000)
+#define CIU3_DEST_IO_INT(_io) ((_io) * 8 + 0x210000)
+#define CIU3_ISC_CTL(_intsn) ((_intsn) * 8 + 0x80000000)
+#define CIU3_ISC_W1C(_intsn) ((_intsn) * 8 + 0x90000000)
+#define CIU3_ISC_W1S(_intsn) ((_intsn) * 8 + 0xa0000000)
+
+static __read_mostly int octeon_irq_ciu_to_irq[8][64];
+
+struct octeon_ciu_chip_data {
+ union {
+ struct { /* only used for ciu3 */
+ u64 ciu3_addr;
+ unsigned int intsn;
+ };
+ struct { /* only used for ciu/ciu2 */
+ u8 line;
+ u8 bit;
+ };
+ };
+ int gpio_line;
+ int current_cpu; /* Next CPU expected to take this irq */
+ int ciu_node; /* NUMA node number of the CIU */
+};
+
+struct octeon_core_chip_data {
+ struct mutex core_irq_mutex;
+ bool current_en;
+ bool desired_en;
+ u8 bit;
+};
+
+#define MIPS_CORE_IRQ_LINES 8
+
+static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
+
+static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
+ struct irq_chip *chip,
+ irq_flow_handler_t handler)
+{
+ struct octeon_ciu_chip_data *cd;
+
+ cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
+ irq_set_chip_and_handler(irq, chip, handler);
+
+ cd->line = line;
+ cd->bit = bit;
+ cd->gpio_line = gpio_line;
+
+ irq_set_chip_data(irq, cd);
+ octeon_irq_ciu_to_irq[line][bit] = irq;
+ return 0;
+}
+
+static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
+{
+ struct irq_data *data = irq_get_irq_data(irq);
+ struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+ irq_set_chip_data(irq, NULL);
+ kfree(cd);
+}
+
+static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
+ int irq, int line, int bit)
+{
+ struct device_node *of_node;
+ int ret;
+
+ of_node = irq_domain_get_of_node(domain);
+ if (!of_node)
+ return -EINVAL;
+ ret = irq_alloc_desc_at(irq, of_node_to_nid(of_node));
+ if (ret < 0)
+ return ret;
+
+ return irq_domain_associate(domain, irq, line << 6 | bit);
+}
+
+static int octeon_coreid_for_cpu(int cpu)
+{
+#ifdef CONFIG_SMP
+ return cpu_logical_map(cpu);
+#else
+ return cvmx_get_core_num();
+#endif
+}
+
+static int octeon_cpu_for_coreid(int coreid)
+{
+#ifdef CONFIG_SMP
+ return cpu_number_map(coreid);
+#else
+ return smp_processor_id();
+#endif
+}
+
+static void octeon_irq_core_ack(struct irq_data *data)
+{
+ struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+ unsigned int bit = cd->bit;
+
+ /*
+ * We don't need to disable IRQs to make these atomic since
+ * they are already disabled earlier in the low level
+ * interrupt code.
+ */
+ clear_c0_status(0x100 << bit);
+ /* The two user interrupts must be cleared manually. */
+ if (bit < 2)
+ clear_c0_cause(0x100 << bit);
+}
+
+static void octeon_irq_core_eoi(struct irq_data *data)
+{
+ struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+ /*
+ * We don't need to disable IRQs to make these atomic since
+ * they are already disabled earlier in the low level
+ * interrupt code.
+ */
+ set_c0_status(0x100 << cd->bit);
+}
+
+static void octeon_irq_core_set_enable_local(void *arg)
+{
+ struct irq_data *data = arg;
+ struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+ unsigned int mask = 0x100 << cd->bit;
+
+ /*
+ * Interrupts are already disabled, so these are atomic.
+ */
+ if (cd->desired_en)
+ set_c0_status(mask);
+ else
+ clear_c0_status(mask);
+
+}
+
+static void octeon_irq_core_disable(struct irq_data *data)
+{
+ struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+ cd->desired_en = false;
+}
+
+static void octeon_irq_core_enable(struct irq_data *data)
+{
+ struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+ cd->desired_en = true;
+}
+
+static void octeon_irq_core_bus_lock(struct irq_data *data)
+{
+ struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&cd->core_irq_mutex);
+}
+
+static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
+{
+ struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+ if (cd->desired_en != cd->current_en) {
+ on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
+
+ cd->current_en = cd->desired_en;
+ }
+
+ mutex_unlock(&cd->core_irq_mutex);
+}
+
+static struct irq_chip octeon_irq_chip_core = {
+ .name = "Core",
+ .irq_enable = octeon_irq_core_enable,
+ .irq_disable = octeon_irq_core_disable,
+ .irq_ack = octeon_irq_core_ack,
+ .irq_eoi = octeon_irq_core_eoi,
+ .irq_bus_lock = octeon_irq_core_bus_lock,
+ .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
+
+ .irq_cpu_online = octeon_irq_core_eoi,
+ .irq_cpu_offline = octeon_irq_core_ack,
+ .flags = IRQCHIP_ONOFFLINE_ENABLED,
+};
+
+static void __init octeon_irq_init_core(void)
+{
+ int i;
+ int irq;
+ struct octeon_core_chip_data *cd;
+
+ for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
+ cd = &octeon_irq_core_chip_data[i];
+ cd->current_en = false;
+ cd->desired_en = false;
+ cd->bit = i;
+ mutex_init(&cd->core_irq_mutex);
+
+ irq = OCTEON_IRQ_SW0 + i;
+ irq_set_chip_data(irq, cd);
+ irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
+ handle_percpu_irq);
+ }
+}
+
+static int next_cpu_for_irq(struct irq_data *data)
+{
+
+#ifdef CONFIG_SMP
+ int cpu;
+ struct cpumask *mask = irq_data_get_affinity_mask(data);
+ int weight = cpumask_weight(mask);
+ struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+ if (weight > 1) {
+ cpu = cd->current_cpu;
+ for (;;) {
+ cpu = cpumask_next(cpu, mask);
+ if (cpu >= nr_cpu_ids) {
+ cpu = -1;
+ continue;
+ } else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
+ break;
+ }
+ }
+ } else if (weight == 1) {
+ cpu = cpumask_first(mask);
+ } else {
+ cpu = smp_processor_id();
+ }
+ cd->current_cpu = cpu;
+ return cpu;
+#else
+ return smp_processor_id();
+#endif
+}
+
+static void octeon_irq_ciu_enable(struct irq_data *data)
+{
+ int cpu = next_cpu_for_irq(data);
+ int coreid = octeon_coreid_for_cpu(cpu);
+ unsigned long *pen;
+ unsigned long flags;
+ struct octeon_ciu_chip_data *cd;
+ raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ raw_spin_lock_irqsave(lock, flags);
+ if (cd->line == 0) {
+ pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
+ __set_bit(cd->bit, pen);
+ /*
+ * Must be visible to octeon_irq_ip{2,3}_ciu() before
+ * enabling the irq.
+ */
+ wmb();
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
+ } else {
+ pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+ __set_bit(cd->bit, pen);
+ /*
+ * Must be visible to octeon_irq_ip{2,3}_ciu() before
+ * enabling the irq.
+ */
+ wmb();
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
+ }
+ raw_spin_unlock_irqrestore(lock, flags);
+}
+
+static void octeon_irq_ciu_enable_local(struct irq_data *data)
+{
+ unsigned long *pen;
+ unsigned long flags;
+ struct octeon_ciu_chip_data *cd;
+ raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ raw_spin_lock_irqsave(lock, flags);
+ if (cd->line == 0) {
+ pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
+ __set_bit(cd->bit, pen);
+ /*
+ * Must be visible to octeon_irq_ip{2,3}_ciu() before
+ * enabling the irq.
+ */
+ wmb();
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
+ } else {
+ pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
+ __set_bit(cd->bit, pen);
+ /*
+ * Must be visible to octeon_irq_ip{2,3}_ciu() before
+ * enabling the irq.
+ */
+ wmb();
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
+ }
+ raw_spin_unlock_irqrestore(lock, flags);
+}
+
+static void octeon_irq_ciu_disable_local(struct irq_data *data)
+{
+ unsigned long *pen;
+ unsigned long flags;
+ struct octeon_ciu_chip_data *cd;
+ raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ raw_spin_lock_irqsave(lock, flags);
+ if (cd->line == 0) {
+ pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
+ __clear_bit(cd->bit, pen);
+ /*
+ * Must be visible to octeon_irq_ip{2,3}_ciu() before
+ * enabling the irq.
+ */
+ wmb();
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
+ } else {
+ pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
+ __clear_bit(cd->bit, pen);
+ /*
+ * Must be visible to octeon_irq_ip{2,3}_ciu() before
+ * enabling the irq.
+ */
+ wmb();
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
+ }
+ raw_spin_unlock_irqrestore(lock, flags);
+}
+
+static void octeon_irq_ciu_disable_all(struct irq_data *data)
+{
+ unsigned long flags;
+ unsigned long *pen;
+ int cpu;
+ struct octeon_ciu_chip_data *cd;
+ raw_spinlock_t *lock;
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ for_each_online_cpu(cpu) {
+ int coreid = octeon_coreid_for_cpu(cpu);
+ lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
+ if (cd->line == 0)
+ pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
+ else
+ pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+
+ raw_spin_lock_irqsave(lock, flags);
+ __clear_bit(cd->bit, pen);
+ /*
+ * Must be visible to octeon_irq_ip{2,3}_ciu() before
+ * enabling the irq.
+ */
+ wmb();
+ if (cd->line == 0)
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
+ else
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
+ raw_spin_unlock_irqrestore(lock, flags);
+ }
+}
+
+static void octeon_irq_ciu_enable_all(struct irq_data *data)
+{
+ unsigned long flags;
+ unsigned long *pen;
+ int cpu;
+ struct octeon_ciu_chip_data *cd;
+ raw_spinlock_t *lock;
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ for_each_online_cpu(cpu) {
+ int coreid = octeon_coreid_for_cpu(cpu);
+ lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
+ if (cd->line == 0)
+ pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
+ else
+ pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+
+ raw_spin_lock_irqsave(lock, flags);
+ __set_bit(cd->bit, pen);
+ /*
+ * Must be visible to octeon_irq_ip{2,3}_ciu() before
+ * enabling the irq.
+ */
+ wmb();
+ if (cd->line == 0)
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
+ else
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
+ raw_spin_unlock_irqrestore(lock, flags);
+ }
+}
+
+/*
+ * Enable the irq on the next core in the affinity set for chips that
+ * have the EN*_W1{S,C} registers.
+ */
+static void octeon_irq_ciu_enable_v2(struct irq_data *data)
+{
+ u64 mask;
+ int cpu = next_cpu_for_irq(data);
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd->bit);
+
+ /*
+ * Called under the desc lock, so these should never get out
+ * of sync.
+ */
+ if (cd->line == 0) {
+ int index = octeon_coreid_for_cpu(cpu) * 2;
+ set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+ cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
+ } else {
+ int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
+ set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+ cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
+ }
+}
+
+/*
+ * Enable the irq in the sum2 registers.
+ */
+static void octeon_irq_ciu_enable_sum2(struct irq_data *data)
+{
+ u64 mask;
+ int cpu = next_cpu_for_irq(data);
+ int index = octeon_coreid_for_cpu(cpu);
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd->bit);
+
+ cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
+}
+
+/*
+ * Disable the irq in the sum2 registers.
+ */
+static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
+{
+ u64 mask;
+ int cpu = next_cpu_for_irq(data);
+ int index = octeon_coreid_for_cpu(cpu);
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd->bit);
+
+ cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
+}
+
+static void octeon_irq_ciu_ack_sum2(struct irq_data *data)
+{
+ u64 mask;
+ int cpu = next_cpu_for_irq(data);
+ int index = octeon_coreid_for_cpu(cpu);
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd->bit);
+
+ cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask);
+}
+
+static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data)
+{
+ int cpu;
+ struct octeon_ciu_chip_data *cd;
+ u64 mask;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd->bit);
+
+ for_each_online_cpu(cpu) {
+ int coreid = octeon_coreid_for_cpu(cpu);
+
+ cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask);
+ }
+}
+
+/*
+ * Enable the irq on the current CPU for chips that
+ * have the EN*_W1{S,C} registers.
+ */
+static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
+{
+ u64 mask;
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd->bit);
+
+ if (cd->line == 0) {
+ int index = cvmx_get_core_num() * 2;
+ set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
+ cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
+ } else {
+ int index = cvmx_get_core_num() * 2 + 1;
+ set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
+ cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
+ }
+}
+
+static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
+{
+ u64 mask;
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd->bit);
+
+ if (cd->line == 0) {
+ int index = cvmx_get_core_num() * 2;
+ clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
+ cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
+ } else {
+ int index = cvmx_get_core_num() * 2 + 1;
+ clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
+ cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
+ }
+}
+
+/*
+ * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
+ */
+static void octeon_irq_ciu_ack(struct irq_data *data)
+{
+ u64 mask;
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd->bit);
+
+ if (cd->line == 0) {
+ int index = cvmx_get_core_num() * 2;
+ cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
+ } else {
+ cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
+ }
+}
+
+/*
+ * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
+ * registers.
+ */
+static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
+{
+ int cpu;
+ u64 mask;
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd->bit);
+
+ if (cd->line == 0) {
+ for_each_online_cpu(cpu) {
+ int index = octeon_coreid_for_cpu(cpu) * 2;
+ clear_bit(cd->bit,
+ &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+ cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
+ }
+ } else {
+ for_each_online_cpu(cpu) {
+ int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
+ clear_bit(cd->bit,
+ &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+ cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
+ }
+ }
+}
+
+/*
+ * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
+ * registers.
+ */
+static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
+{
+ int cpu;
+ u64 mask;
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd->bit);
+
+ if (cd->line == 0) {
+ for_each_online_cpu(cpu) {
+ int index = octeon_coreid_for_cpu(cpu) * 2;
+ set_bit(cd->bit,
+ &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+ cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
+ }
+ } else {
+ for_each_online_cpu(cpu) {
+ int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
+ set_bit(cd->bit,
+ &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+ cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
+ }
+ }
+}
+
+static int octeon_irq_ciu_set_type(struct irq_data *data, unsigned int t)
+{
+ irqd_set_trigger_type(data, t);
+
+ if (t & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(data, handle_edge_irq);
+ else
+ irq_set_handler_locked(data, handle_level_irq);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static void octeon_irq_gpio_setup(struct irq_data *data)
+{
+ union cvmx_gpio_bit_cfgx cfg;
+ struct octeon_ciu_chip_data *cd;
+ u32 t = irqd_get_trigger_type(data);
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ cfg.u64 = 0;
+ cfg.s.int_en = 1;
+ cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0;
+ cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0;
+
+ /* 140 nS glitch filter*/
+ cfg.s.fil_cnt = 7;
+ cfg.s.fil_sel = 3;
+
+ cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64);
+}
+
+static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
+{
+ octeon_irq_gpio_setup(data);
+ octeon_irq_ciu_enable_v2(data);
+}
+
+static void octeon_irq_ciu_enable_gpio(struct irq_data *data)
+{
+ octeon_irq_gpio_setup(data);
+ octeon_irq_ciu_enable(data);
+}
+
+static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
+{
+ irqd_set_trigger_type(data, t);
+ octeon_irq_gpio_setup(data);
+
+ if (t & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(data, handle_edge_irq);
+ else
+ irq_set_handler_locked(data, handle_level_irq);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
+{
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+ cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
+
+ octeon_irq_ciu_disable_all_v2(data);
+}
+
+static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
+{
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+ cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
+
+ octeon_irq_ciu_disable_all(data);
+}
+
+static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
+{
+ struct octeon_ciu_chip_data *cd;
+ u64 mask;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd->gpio_line);
+
+ cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
+}
+
+#ifdef CONFIG_SMP
+
+static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
+{
+ int cpu = smp_processor_id();
+ cpumask_t new_affinity;
+ struct cpumask *mask = irq_data_get_affinity_mask(data);
+
+ if (!cpumask_test_cpu(cpu, mask))
+ return;
+
+ if (cpumask_weight(mask) > 1) {
+ /*
+ * It has multi CPU affinity, just remove this CPU
+ * from the affinity set.
+ */
+ cpumask_copy(&new_affinity, mask);
+ cpumask_clear_cpu(cpu, &new_affinity);
+ } else {
+ /* Otherwise, put it on lowest numbered online CPU. */
+ cpumask_clear(&new_affinity);
+ cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
+ }
+ irq_set_affinity_locked(data, &new_affinity, false);
+}
+
+static int octeon_irq_ciu_set_affinity(struct irq_data *data,
+ const struct cpumask *dest, bool force)
+{
+ int cpu;
+ bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
+ unsigned long flags;
+ struct octeon_ciu_chip_data *cd;
+ unsigned long *pen;
+ raw_spinlock_t *lock;
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ /*
+ * For non-v2 CIU, we will allow only single CPU affinity.
+ * This removes the need to do locking in the .ack/.eoi
+ * functions.
+ */
+ if (cpumask_weight(dest) != 1)
+ return -EINVAL;
+
+ if (!enable_one)
+ return 0;
+
+
+ for_each_online_cpu(cpu) {
+ int coreid = octeon_coreid_for_cpu(cpu);
+
+ lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
+ raw_spin_lock_irqsave(lock, flags);
+
+ if (cd->line == 0)
+ pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
+ else
+ pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+
+ if (cpumask_test_cpu(cpu, dest) && enable_one) {
+ enable_one = false;
+ __set_bit(cd->bit, pen);
+ } else {
+ __clear_bit(cd->bit, pen);
+ }
+ /*
+ * Must be visible to octeon_irq_ip{2,3}_ciu() before
+ * enabling the irq.
+ */
+ wmb();
+
+ if (cd->line == 0)
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
+ else
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
+
+ raw_spin_unlock_irqrestore(lock, flags);
+ }
+ return 0;
+}
+
+/*
+ * Set affinity for the irq for chips that have the EN*_W1{S,C}
+ * registers.
+ */
+static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force)
+{
+ int cpu;
+ bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
+ u64 mask;
+ struct octeon_ciu_chip_data *cd;
+
+ if (!enable_one)
+ return 0;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << cd->bit;
+
+ if (cd->line == 0) {
+ for_each_online_cpu(cpu) {
+ unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
+ int index = octeon_coreid_for_cpu(cpu) * 2;
+ if (cpumask_test_cpu(cpu, dest) && enable_one) {
+ enable_one = false;
+ set_bit(cd->bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
+ } else {
+ clear_bit(cd->bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
+ }
+ }
+ } else {
+ for_each_online_cpu(cpu) {
+ unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+ int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
+ if (cpumask_test_cpu(cpu, dest) && enable_one) {
+ enable_one = false;
+ set_bit(cd->bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
+ } else {
+ clear_bit(cd->bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
+ }
+ }
+ }
+ return 0;
+}
+
+static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force)
+{
+ int cpu;
+ bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
+ u64 mask;
+ struct octeon_ciu_chip_data *cd;
+
+ if (!enable_one)
+ return 0;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << cd->bit;
+
+ for_each_online_cpu(cpu) {
+ int index = octeon_coreid_for_cpu(cpu);
+
+ if (cpumask_test_cpu(cpu, dest) && enable_one) {
+ enable_one = false;
+ cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
+ } else {
+ cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
+ }
+ }
+ return 0;
+}
+#endif
+
+static unsigned int edge_startup(struct irq_data *data)
+{
+ /* ack any pending edge-irq at startup, so there is
+ * an _edge_ to fire on when the event reappears.
+ */
+ data->chip->irq_ack(data);
+ data->chip->irq_enable(data);
+ return 0;
+}
+
+/*
+ * Newer octeon chips have support for lockless CIU operation.
+ */
+static struct irq_chip octeon_irq_chip_ciu_v2 = {
+ .name = "CIU",
+ .irq_enable = octeon_irq_ciu_enable_v2,
+ .irq_disable = octeon_irq_ciu_disable_all_v2,
+ .irq_mask = octeon_irq_ciu_disable_local_v2,
+ .irq_unmask = octeon_irq_ciu_enable_v2,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
+ .name = "CIU",
+ .irq_enable = octeon_irq_ciu_enable_v2,
+ .irq_disable = octeon_irq_ciu_disable_all_v2,
+ .irq_ack = octeon_irq_ciu_ack,
+ .irq_mask = octeon_irq_ciu_disable_local_v2,
+ .irq_unmask = octeon_irq_ciu_enable_v2,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+/*
+ * Newer octeon chips have support for lockless CIU operation.
+ */
+static struct irq_chip octeon_irq_chip_ciu_sum2 = {
+ .name = "CIU",
+ .irq_enable = octeon_irq_ciu_enable_sum2,
+ .irq_disable = octeon_irq_ciu_disable_all_sum2,
+ .irq_mask = octeon_irq_ciu_disable_local_sum2,
+ .irq_unmask = octeon_irq_ciu_enable_sum2,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
+ .name = "CIU",
+ .irq_enable = octeon_irq_ciu_enable_sum2,
+ .irq_disable = octeon_irq_ciu_disable_all_sum2,
+ .irq_ack = octeon_irq_ciu_ack_sum2,
+ .irq_mask = octeon_irq_ciu_disable_local_sum2,
+ .irq_unmask = octeon_irq_ciu_enable_sum2,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu = {
+ .name = "CIU",
+ .irq_enable = octeon_irq_ciu_enable,
+ .irq_disable = octeon_irq_ciu_disable_all,
+ .irq_mask = octeon_irq_ciu_disable_local,
+ .irq_unmask = octeon_irq_ciu_enable,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = octeon_irq_ciu_set_affinity,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_edge = {
+ .name = "CIU",
+ .irq_enable = octeon_irq_ciu_enable,
+ .irq_disable = octeon_irq_ciu_disable_all,
+ .irq_ack = octeon_irq_ciu_ack,
+ .irq_mask = octeon_irq_ciu_disable_local,
+ .irq_unmask = octeon_irq_ciu_enable,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = octeon_irq_ciu_set_affinity,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+/* The mbox versions don't do any affinity or round-robin. */
+static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
+ .name = "CIU-M",
+ .irq_enable = octeon_irq_ciu_enable_all_v2,
+ .irq_disable = octeon_irq_ciu_disable_all_v2,
+ .irq_ack = octeon_irq_ciu_disable_local_v2,
+ .irq_eoi = octeon_irq_ciu_enable_local_v2,
+
+ .irq_cpu_online = octeon_irq_ciu_enable_local_v2,
+ .irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
+ .flags = IRQCHIP_ONOFFLINE_ENABLED,
+};
+
+static struct irq_chip octeon_irq_chip_ciu_mbox = {
+ .name = "CIU-M",
+ .irq_enable = octeon_irq_ciu_enable_all,
+ .irq_disable = octeon_irq_ciu_disable_all,
+ .irq_ack = octeon_irq_ciu_disable_local,
+ .irq_eoi = octeon_irq_ciu_enable_local,
+
+ .irq_cpu_online = octeon_irq_ciu_enable_local,
+ .irq_cpu_offline = octeon_irq_ciu_disable_local,
+ .flags = IRQCHIP_ONOFFLINE_ENABLED,
+};
+
+static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
+ .name = "CIU-GPIO",
+ .irq_enable = octeon_irq_ciu_enable_gpio_v2,
+ .irq_disable = octeon_irq_ciu_disable_gpio_v2,
+ .irq_ack = octeon_irq_ciu_gpio_ack,
+ .irq_mask = octeon_irq_ciu_disable_local_v2,
+ .irq_unmask = octeon_irq_ciu_enable_v2,
+ .irq_set_type = octeon_irq_ciu_gpio_set_type,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static struct irq_chip octeon_irq_chip_ciu_gpio = {
+ .name = "CIU-GPIO",
+ .irq_enable = octeon_irq_ciu_enable_gpio,
+ .irq_disable = octeon_irq_ciu_disable_gpio,
+ .irq_mask = octeon_irq_ciu_disable_local,
+ .irq_unmask = octeon_irq_ciu_enable,
+ .irq_ack = octeon_irq_ciu_gpio_ack,
+ .irq_set_type = octeon_irq_ciu_gpio_set_type,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = octeon_irq_ciu_set_affinity,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+/*
+ * Watchdog interrupts are special. They are associated with a single
+ * core, so we hardwire the affinity to that core.
+ */
+static void octeon_irq_ciu_wd_enable(struct irq_data *data)
+{
+ unsigned long flags;
+ unsigned long *pen;
+ int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
+ int cpu = octeon_cpu_for_coreid(coreid);
+ raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
+
+ raw_spin_lock_irqsave(lock, flags);
+ pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+ __set_bit(coreid, pen);
+ /*
+ * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling
+ * the irq.
+ */
+ wmb();
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
+ raw_spin_unlock_irqrestore(lock, flags);
+}
+
+/*
+ * Watchdog interrupts are special. They are associated with a single
+ * core, so we hardwire the affinity to that core.
+ */
+static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
+{
+ int coreid = data->irq - OCTEON_IRQ_WDOG0;
+ int cpu = octeon_cpu_for_coreid(coreid);
+
+ set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+ cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
+}
+
+
+static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
+ .name = "CIU-W",
+ .irq_enable = octeon_irq_ciu1_wd_enable_v2,
+ .irq_disable = octeon_irq_ciu_disable_all_v2,
+ .irq_mask = octeon_irq_ciu_disable_local_v2,
+ .irq_unmask = octeon_irq_ciu_enable_local_v2,
+};
+
+static struct irq_chip octeon_irq_chip_ciu_wd = {
+ .name = "CIU-W",
+ .irq_enable = octeon_irq_ciu_wd_enable,
+ .irq_disable = octeon_irq_ciu_disable_all,
+ .irq_mask = octeon_irq_ciu_disable_local,
+ .irq_unmask = octeon_irq_ciu_enable_local,
+};
+
+static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit)
+{
+ bool edge = false;
+
+ if (line == 0)
+ switch (bit) {
+ case 48 ... 49: /* GMX DRP */
+ case 50: /* IPD_DRP */
+ case 52 ... 55: /* Timers */
+ case 58: /* MPI */
+ edge = true;
+ break;
+ default:
+ break;
+ }
+ else /* line == 1 */
+ switch (bit) {
+ case 47: /* PTP */
+ edge = true;
+ break;
+ default:
+ break;
+ }
+ return edge;
+}
+
+struct octeon_irq_gpio_domain_data {
+ unsigned int base_hwirq;
+};
+
+static int octeon_irq_gpio_xlat(struct irq_domain *d,
+ struct device_node *node,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ unsigned int type;
+ unsigned int pin;
+ unsigned int trigger;
+
+ if (irq_domain_get_of_node(d) != node)
+ return -EINVAL;
+
+ if (intsize < 2)
+ return -EINVAL;
+
+ pin = intspec[0];
+ if (pin >= 16)
+ return -EINVAL;
+
+ trigger = intspec[1];
+
+ switch (trigger) {
+ case 1:
+ type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case 2:
+ type = IRQ_TYPE_EDGE_FALLING;
+ break;
+ case 4:
+ type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ case 8:
+ type = IRQ_TYPE_LEVEL_LOW;
+ break;
+ default:
+ pr_err("Error: (%pOFn) Invalid irq trigger specification: %x\n",
+ node,
+ trigger);
+ type = IRQ_TYPE_LEVEL_LOW;
+ break;
+ }
+ *out_type = type;
+ *out_hwirq = pin;
+
+ return 0;
+}
+
+static int octeon_irq_ciu_xlat(struct irq_domain *d,
+ struct device_node *node,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ unsigned int ciu, bit;
+ struct octeon_irq_ciu_domain_data *dd = d->host_data;
+
+ ciu = intspec[0];
+ bit = intspec[1];
+
+ if (ciu >= dd->num_sum || bit > 63)
+ return -EINVAL;
+
+ *out_hwirq = (ciu << 6) | bit;
+ *out_type = 0;
+
+ return 0;
+}
+
+static struct irq_chip *octeon_irq_ciu_chip;
+static struct irq_chip *octeon_irq_ciu_chip_edge;
+static struct irq_chip *octeon_irq_gpio_chip;
+
+static int octeon_irq_ciu_map(struct irq_domain *d,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ int rv;
+ unsigned int line = hw >> 6;
+ unsigned int bit = hw & 63;
+ struct octeon_irq_ciu_domain_data *dd = d->host_data;
+
+ if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
+ return -EINVAL;
+
+ if (line == 2) {
+ if (octeon_irq_ciu_is_edge(line, bit))
+ rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+ &octeon_irq_chip_ciu_sum2_edge,
+ handle_edge_irq);
+ else
+ rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+ &octeon_irq_chip_ciu_sum2,
+ handle_level_irq);
+ } else {
+ if (octeon_irq_ciu_is_edge(line, bit))
+ rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+ octeon_irq_ciu_chip_edge,
+ handle_edge_irq);
+ else
+ rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+ octeon_irq_ciu_chip,
+ handle_level_irq);
+ }
+ return rv;
+}
+
+static int octeon_irq_gpio_map(struct irq_domain *d,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
+ unsigned int line, bit;
+ int r;
+
+ line = (hw + gpiod->base_hwirq) >> 6;
+ bit = (hw + gpiod->base_hwirq) & 63;
+ if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
+ octeon_irq_ciu_to_irq[line][bit] != 0)
+ return -EINVAL;
+
+ /*
+ * Default to handle_level_irq. If the DT contains a different
+ * trigger type, it will call the irq_set_type callback and
+ * the handler gets updated.
+ */
+ r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
+ octeon_irq_gpio_chip, handle_level_irq);
+ return r;
+}
+
+static struct irq_domain_ops octeon_irq_domain_ciu_ops = {
+ .map = octeon_irq_ciu_map,
+ .unmap = octeon_irq_free_cd,
+ .xlate = octeon_irq_ciu_xlat,
+};
+
+static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
+ .map = octeon_irq_gpio_map,
+ .unmap = octeon_irq_free_cd,
+ .xlate = octeon_irq_gpio_xlat,
+};
+
+static void octeon_irq_ip2_ciu(void)
+{
+ const unsigned long core_id = cvmx_get_core_num();
+ u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
+
+ ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror);
+ if (likely(ciu_sum)) {
+ int bit = fls64(ciu_sum) - 1;
+ int irq = octeon_irq_ciu_to_irq[0][bit];
+ if (likely(irq))
+ do_IRQ(irq);
+ else
+ spurious_interrupt();
+ } else {
+ spurious_interrupt();
+ }
+}
+
+static void octeon_irq_ip3_ciu(void)
+{
+ u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
+
+ ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror);
+ if (likely(ciu_sum)) {
+ int bit = fls64(ciu_sum) - 1;
+ int irq = octeon_irq_ciu_to_irq[1][bit];
+ if (likely(irq))
+ do_IRQ(irq);
+ else
+ spurious_interrupt();
+ } else {
+ spurious_interrupt();
+ }
+}
+
+static void octeon_irq_ip4_ciu(void)
+{
+ int coreid = cvmx_get_core_num();
+ u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid));
+ u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid));
+
+ ciu_sum &= ciu_en;
+ if (likely(ciu_sum)) {
+ int bit = fls64(ciu_sum) - 1;
+ int irq = octeon_irq_ciu_to_irq[2][bit];
+
+ if (likely(irq))
+ do_IRQ(irq);
+ else
+ spurious_interrupt();
+ } else {
+ spurious_interrupt();
+ }
+}
+
+static bool octeon_irq_use_ip4;
+
+static void octeon_irq_local_enable_ip4(void *arg)
+{
+ set_c0_status(STATUSF_IP4);
+}
+
+static void octeon_irq_ip4_mask(void)
+{
+ clear_c0_status(STATUSF_IP4);
+ spurious_interrupt();
+}
+
+static void (*octeon_irq_ip2)(void);
+static void (*octeon_irq_ip3)(void);
+static void (*octeon_irq_ip4)(void);
+
+void (*octeon_irq_setup_secondary)(void);
+
+void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
+{
+ octeon_irq_ip4 = h;
+ octeon_irq_use_ip4 = true;
+ on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
+}
+
+static void octeon_irq_percpu_enable(void)
+{
+ irq_cpu_online();
+}
+
+static void octeon_irq_init_ciu_percpu(void)
+{
+ int coreid = cvmx_get_core_num();
+
+
+ __this_cpu_write(octeon_irq_ciu0_en_mirror, 0);
+ __this_cpu_write(octeon_irq_ciu1_en_mirror, 0);
+ wmb();
+ raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock));
+ /*
+ * Disable All CIU Interrupts. The ones we need will be
+ * enabled later. Read the SUM register so we know the write
+ * completed.
+ */
+ cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
+ cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
+}
+
+static void octeon_irq_init_ciu2_percpu(void)
+{
+ u64 regx, ipx;
+ int coreid = cvmx_get_core_num();
+ u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid);
+
+ /*
+ * Disable All CIU2 Interrupts. The ones we need will be
+ * enabled later. Read the SUM register so we know the write
+ * completed.
+ *
+ * There are 9 registers and 3 IPX levels with strides 0x1000
+ * and 0x200 respectivly. Use loops to clear them.
+ */
+ for (regx = 0; regx <= 0x8000; regx += 0x1000) {
+ for (ipx = 0; ipx <= 0x400; ipx += 0x200)
+ cvmx_write_csr(base + regx + ipx, 0);
+ }
+
+ cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
+}
+
+static void octeon_irq_setup_secondary_ciu(void)
+{
+ octeon_irq_init_ciu_percpu();
+ octeon_irq_percpu_enable();
+
+ /* Enable the CIU lines */
+ set_c0_status(STATUSF_IP3 | STATUSF_IP2);
+ if (octeon_irq_use_ip4)
+ set_c0_status(STATUSF_IP4);
+ else
+ clear_c0_status(STATUSF_IP4);
+}
+
+static void octeon_irq_setup_secondary_ciu2(void)
+{
+ octeon_irq_init_ciu2_percpu();
+ octeon_irq_percpu_enable();
+
+ /* Enable the CIU lines */
+ set_c0_status(STATUSF_IP3 | STATUSF_IP2);
+ if (octeon_irq_use_ip4)
+ set_c0_status(STATUSF_IP4);
+ else
+ clear_c0_status(STATUSF_IP4);
+}
+
+static int __init octeon_irq_init_ciu(
+ struct device_node *ciu_node, struct device_node *parent)
+{
+ unsigned int i, r;
+ struct irq_chip *chip;
+ struct irq_chip *chip_edge;
+ struct irq_chip *chip_mbox;
+ struct irq_chip *chip_wd;
+ struct irq_domain *ciu_domain = NULL;
+ struct octeon_irq_ciu_domain_data *dd;
+
+ dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+ if (!dd)
+ return -ENOMEM;
+
+ octeon_irq_init_ciu_percpu();
+ octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
+
+ octeon_irq_ip2 = octeon_irq_ip2_ciu;
+ octeon_irq_ip3 = octeon_irq_ip3_ciu;
+ if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
+ && !OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ octeon_irq_ip4 = octeon_irq_ip4_ciu;
+ dd->num_sum = 3;
+ octeon_irq_use_ip4 = true;
+ } else {
+ octeon_irq_ip4 = octeon_irq_ip4_mask;
+ dd->num_sum = 2;
+ octeon_irq_use_ip4 = false;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
+ OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
+ chip = &octeon_irq_chip_ciu_v2;
+ chip_edge = &octeon_irq_chip_ciu_v2_edge;
+ chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
+ chip_wd = &octeon_irq_chip_ciu_wd_v2;
+ octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
+ } else {
+ chip = &octeon_irq_chip_ciu;
+ chip_edge = &octeon_irq_chip_ciu_edge;
+ chip_mbox = &octeon_irq_chip_ciu_mbox;
+ chip_wd = &octeon_irq_chip_ciu_wd;
+ octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
+ }
+ octeon_irq_ciu_chip = chip;
+ octeon_irq_ciu_chip_edge = chip_edge;
+
+ /* Mips internal */
+ octeon_irq_init_core();
+
+ ciu_domain = irq_domain_add_tree(
+ ciu_node, &octeon_irq_domain_ciu_ops, dd);
+ irq_set_default_host(ciu_domain);
+
+ /* CIU_0 */
+ for (i = 0; i < 16; i++) {
+ r = octeon_irq_force_ciu_mapping(
+ ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
+ if (r)
+ goto err;
+ }
+
+ r = octeon_irq_set_ciu_mapping(
+ OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
+ if (r)
+ goto err;
+ r = octeon_irq_set_ciu_mapping(
+ OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
+ if (r)
+ goto err;
+
+ for (i = 0; i < 4; i++) {
+ r = octeon_irq_force_ciu_mapping(
+ ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
+ if (r)
+ goto err;
+ }
+ for (i = 0; i < 4; i++) {
+ r = octeon_irq_force_ciu_mapping(
+ ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
+ if (r)
+ goto err;
+ }
+
+ r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
+ if (r)
+ goto err;
+
+ r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
+ if (r)
+ goto err;
+
+ for (i = 0; i < 4; i++) {
+ r = octeon_irq_force_ciu_mapping(
+ ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
+ if (r)
+ goto err;
+ }
+
+ r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
+ if (r)
+ goto err;
+
+ /* CIU_1 */
+ for (i = 0; i < 16; i++) {
+ r = octeon_irq_set_ciu_mapping(
+ i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
+ handle_level_irq);
+ if (r)
+ goto err;
+ }
+
+ /* Enable the CIU lines */
+ set_c0_status(STATUSF_IP3 | STATUSF_IP2);
+ if (octeon_irq_use_ip4)
+ set_c0_status(STATUSF_IP4);
+ else
+ clear_c0_status(STATUSF_IP4);
+
+ return 0;
+err:
+ return r;
+}
+
+static int __init octeon_irq_init_gpio(
+ struct device_node *gpio_node, struct device_node *parent)
+{
+ struct octeon_irq_gpio_domain_data *gpiod;
+ u32 interrupt_cells;
+ unsigned int base_hwirq;
+ int r;
+
+ r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells);
+ if (r)
+ return r;
+
+ if (interrupt_cells == 1) {
+ u32 v;
+
+ r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v);
+ if (r) {
+ pr_warn("No \"interrupts\" property.\n");
+ return r;
+ }
+ base_hwirq = v;
+ } else if (interrupt_cells == 2) {
+ u32 v0, v1;
+
+ r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0);
+ if (r) {
+ pr_warn("No \"interrupts\" property.\n");
+ return r;
+ }
+ r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1);
+ if (r) {
+ pr_warn("No \"interrupts\" property.\n");
+ return r;
+ }
+ base_hwirq = (v0 << 6) | v1;
+ } else {
+ pr_warn("Bad \"#interrupt-cells\" property: %u\n",
+ interrupt_cells);
+ return -EINVAL;
+ }
+
+ gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
+ if (gpiod) {
+ /* gpio domain host_data is the base hwirq number. */
+ gpiod->base_hwirq = base_hwirq;
+ irq_domain_add_linear(
+ gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
+ } else {
+ pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Clear the OF_POPULATED flag that was set by of_irq_init()
+ * so that all GPIO devices will be probed.
+ */
+ of_node_clear_flag(gpio_node, OF_POPULATED);
+
+ return 0;
+}
+/*
+ * Watchdog interrupts are special. They are associated with a single
+ * core, so we hardwire the affinity to that core.
+ */
+static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
+{
+ u64 mask;
+ u64 en_addr;
+ int coreid = data->irq - OCTEON_IRQ_WDOG0;
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd->bit);
+
+ en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+ (0x1000ull * cd->line);
+ cvmx_write_csr(en_addr, mask);
+
+}
+
+static void octeon_irq_ciu2_enable(struct irq_data *data)
+{
+ u64 mask;
+ u64 en_addr;
+ int cpu = next_cpu_for_irq(data);
+ int coreid = octeon_coreid_for_cpu(cpu);
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd->bit);
+
+ en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+ (0x1000ull * cd->line);
+ cvmx_write_csr(en_addr, mask);
+}
+
+static void octeon_irq_ciu2_enable_local(struct irq_data *data)
+{
+ u64 mask;
+ u64 en_addr;
+ int coreid = cvmx_get_core_num();
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd->bit);
+
+ en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+ (0x1000ull * cd->line);
+ cvmx_write_csr(en_addr, mask);
+
+}
+
+static void octeon_irq_ciu2_disable_local(struct irq_data *data)
+{
+ u64 mask;
+ u64 en_addr;
+ int coreid = cvmx_get_core_num();
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd->bit);
+
+ en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) +
+ (0x1000ull * cd->line);
+ cvmx_write_csr(en_addr, mask);
+
+}
+
+static void octeon_irq_ciu2_ack(struct irq_data *data)
+{
+ u64 mask;
+ u64 en_addr;
+ int coreid = cvmx_get_core_num();
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd->bit);
+
+ en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line);
+ cvmx_write_csr(en_addr, mask);
+
+}
+
+static void octeon_irq_ciu2_disable_all(struct irq_data *data)
+{
+ int cpu;
+ u64 mask;
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd->bit);
+
+ for_each_online_cpu(cpu) {
+ u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
+ octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
+ cvmx_write_csr(en_addr, mask);
+ }
+}
+
+static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
+{
+ int cpu;
+ u64 mask;
+
+ mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
+
+ for_each_online_cpu(cpu) {
+ u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
+ octeon_coreid_for_cpu(cpu));
+ cvmx_write_csr(en_addr, mask);
+ }
+}
+
+static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
+{
+ int cpu;
+ u64 mask;
+
+ mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
+
+ for_each_online_cpu(cpu) {
+ u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
+ octeon_coreid_for_cpu(cpu));
+ cvmx_write_csr(en_addr, mask);
+ }
+}
+
+static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data)
+{
+ u64 mask;
+ u64 en_addr;
+ int coreid = cvmx_get_core_num();
+
+ mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
+ en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid);
+ cvmx_write_csr(en_addr, mask);
+}
+
+static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data)
+{
+ u64 mask;
+ u64 en_addr;
+ int coreid = cvmx_get_core_num();
+
+ mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
+ en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid);
+ cvmx_write_csr(en_addr, mask);
+}
+
+#ifdef CONFIG_SMP
+static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
+ const struct cpumask *dest, bool force)
+{
+ int cpu;
+ bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
+ u64 mask;
+ struct octeon_ciu_chip_data *cd;
+
+ if (!enable_one)
+ return 0;
+
+ cd = irq_data_get_irq_chip_data(data);
+ mask = 1ull << cd->bit;
+
+ for_each_online_cpu(cpu) {
+ u64 en_addr;
+ if (cpumask_test_cpu(cpu, dest) && enable_one) {
+ enable_one = false;
+ en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
+ octeon_coreid_for_cpu(cpu)) +
+ (0x1000ull * cd->line);
+ } else {
+ en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
+ octeon_coreid_for_cpu(cpu)) +
+ (0x1000ull * cd->line);
+ }
+ cvmx_write_csr(en_addr, mask);
+ }
+
+ return 0;
+}
+#endif
+
+static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
+{
+ octeon_irq_gpio_setup(data);
+ octeon_irq_ciu2_enable(data);
+}
+
+static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
+{
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
+
+ octeon_irq_ciu2_disable_all(data);
+}
+
+static struct irq_chip octeon_irq_chip_ciu2 = {
+ .name = "CIU2-E",
+ .irq_enable = octeon_irq_ciu2_enable,
+ .irq_disable = octeon_irq_ciu2_disable_all,
+ .irq_mask = octeon_irq_ciu2_disable_local,
+ .irq_unmask = octeon_irq_ciu2_enable,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = octeon_irq_ciu2_set_affinity,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu2_edge = {
+ .name = "CIU2-E",
+ .irq_enable = octeon_irq_ciu2_enable,
+ .irq_disable = octeon_irq_ciu2_disable_all,
+ .irq_ack = octeon_irq_ciu2_ack,
+ .irq_mask = octeon_irq_ciu2_disable_local,
+ .irq_unmask = octeon_irq_ciu2_enable,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = octeon_irq_ciu2_set_affinity,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu2_mbox = {
+ .name = "CIU2-M",
+ .irq_enable = octeon_irq_ciu2_mbox_enable_all,
+ .irq_disable = octeon_irq_ciu2_mbox_disable_all,
+ .irq_ack = octeon_irq_ciu2_mbox_disable_local,
+ .irq_eoi = octeon_irq_ciu2_mbox_enable_local,
+
+ .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local,
+ .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local,
+ .flags = IRQCHIP_ONOFFLINE_ENABLED,
+};
+
+static struct irq_chip octeon_irq_chip_ciu2_wd = {
+ .name = "CIU2-W",
+ .irq_enable = octeon_irq_ciu2_wd_enable,
+ .irq_disable = octeon_irq_ciu2_disable_all,
+ .irq_mask = octeon_irq_ciu2_disable_local,
+ .irq_unmask = octeon_irq_ciu2_enable_local,
+};
+
+static struct irq_chip octeon_irq_chip_ciu2_gpio = {
+ .name = "CIU-GPIO",
+ .irq_enable = octeon_irq_ciu2_enable_gpio,
+ .irq_disable = octeon_irq_ciu2_disable_gpio,
+ .irq_ack = octeon_irq_ciu_gpio_ack,
+ .irq_mask = octeon_irq_ciu2_disable_local,
+ .irq_unmask = octeon_irq_ciu2_enable,
+ .irq_set_type = octeon_irq_ciu_gpio_set_type,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = octeon_irq_ciu2_set_affinity,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static int octeon_irq_ciu2_xlat(struct irq_domain *d,
+ struct device_node *node,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ unsigned int ciu, bit;
+
+ ciu = intspec[0];
+ bit = intspec[1];
+
+ *out_hwirq = (ciu << 6) | bit;
+ *out_type = 0;
+
+ return 0;
+}
+
+static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
+{
+ bool edge = false;
+
+ if (line == 3) /* MIO */
+ switch (bit) {
+ case 2: /* IPD_DRP */
+ case 8 ... 11: /* Timers */
+ case 48: /* PTP */
+ edge = true;
+ break;
+ default:
+ break;
+ }
+ else if (line == 6) /* PKT */
+ switch (bit) {
+ case 52 ... 53: /* ILK_DRP */
+ case 8 ... 12: /* GMX_DRP */
+ edge = true;
+ break;
+ default:
+ break;
+ }
+ return edge;
+}
+
+static int octeon_irq_ciu2_map(struct irq_domain *d,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ unsigned int line = hw >> 6;
+ unsigned int bit = hw & 63;
+
+ /*
+ * Don't map irq if it is reserved for GPIO.
+ * (Line 7 are the GPIO lines.)
+ */
+ if (line == 7)
+ return 0;
+
+ if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
+ return -EINVAL;
+
+ if (octeon_irq_ciu2_is_edge(line, bit))
+ octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+ &octeon_irq_chip_ciu2_edge,
+ handle_edge_irq);
+ else
+ octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+ &octeon_irq_chip_ciu2,
+ handle_level_irq);
+
+ return 0;
+}
+
+static struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
+ .map = octeon_irq_ciu2_map,
+ .unmap = octeon_irq_free_cd,
+ .xlate = octeon_irq_ciu2_xlat,
+};
+
+static void octeon_irq_ciu2(void)
+{
+ int line;
+ int bit;
+ int irq;
+ u64 src_reg, src, sum;
+ const unsigned long core_id = cvmx_get_core_num();
+
+ sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful;
+
+ if (unlikely(!sum))
+ goto spurious;
+
+ line = fls64(sum) - 1;
+ src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line);
+ src = cvmx_read_csr(src_reg);
+
+ if (unlikely(!src))
+ goto spurious;
+
+ bit = fls64(src) - 1;
+ irq = octeon_irq_ciu_to_irq[line][bit];
+ if (unlikely(!irq))
+ goto spurious;
+
+ do_IRQ(irq);
+ goto out;
+
+spurious:
+ spurious_interrupt();
+out:
+ /* CN68XX pass 1.x has an errata that accessing the ACK registers
+ can stop interrupts from propagating */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
+ else
+ cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id));
+ return;
+}
+
+static void octeon_irq_ciu2_mbox(void)
+{
+ int line;
+
+ const unsigned long core_id = cvmx_get_core_num();
+ u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60;
+
+ if (unlikely(!sum))
+ goto spurious;
+
+ line = fls64(sum) - 1;
+
+ do_IRQ(OCTEON_IRQ_MBOX0 + line);
+ goto out;
+
+spurious:
+ spurious_interrupt();
+out:
+ /* CN68XX pass 1.x has an errata that accessing the ACK registers
+ can stop interrupts from propagating */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
+ else
+ cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id));
+ return;
+}
+
+static int __init octeon_irq_init_ciu2(
+ struct device_node *ciu_node, struct device_node *parent)
+{
+ unsigned int i, r;
+ struct irq_domain *ciu_domain = NULL;
+
+ octeon_irq_init_ciu2_percpu();
+ octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
+
+ octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio;
+ octeon_irq_ip2 = octeon_irq_ciu2;
+ octeon_irq_ip3 = octeon_irq_ciu2_mbox;
+ octeon_irq_ip4 = octeon_irq_ip4_mask;
+
+ /* Mips internal */
+ octeon_irq_init_core();
+
+ ciu_domain = irq_domain_add_tree(
+ ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
+ irq_set_default_host(ciu_domain);
+
+ /* CUI2 */
+ for (i = 0; i < 64; i++) {
+ r = octeon_irq_force_ciu_mapping(
+ ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
+ if (r)
+ goto err;
+ }
+
+ for (i = 0; i < 32; i++) {
+ r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
+ &octeon_irq_chip_ciu2_wd, handle_level_irq);
+ if (r)
+ goto err;
+ }
+
+ for (i = 0; i < 4; i++) {
+ r = octeon_irq_force_ciu_mapping(
+ ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
+ if (r)
+ goto err;
+ }
+
+ for (i = 0; i < 4; i++) {
+ r = octeon_irq_force_ciu_mapping(
+ ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
+ if (r)
+ goto err;
+ }
+
+ for (i = 0; i < 4; i++) {
+ r = octeon_irq_force_ciu_mapping(
+ ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
+ if (r)
+ goto err;
+ }
+
+ irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
+ irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
+ irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
+ irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
+
+ /* Enable the CIU lines */
+ set_c0_status(STATUSF_IP3 | STATUSF_IP2);
+ clear_c0_status(STATUSF_IP4);
+ return 0;
+err:
+ return r;
+}
+
+struct octeon_irq_cib_host_data {
+ raw_spinlock_t lock;
+ u64 raw_reg;
+ u64 en_reg;
+ int max_bits;
+};
+
+struct octeon_irq_cib_chip_data {
+ struct octeon_irq_cib_host_data *host_data;
+ int bit;
+};
+
+static void octeon_irq_cib_enable(struct irq_data *data)
+{
+ unsigned long flags;
+ u64 en;
+ struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
+ struct octeon_irq_cib_host_data *host_data = cd->host_data;
+
+ raw_spin_lock_irqsave(&host_data->lock, flags);
+ en = cvmx_read_csr(host_data->en_reg);
+ en |= 1ull << cd->bit;
+ cvmx_write_csr(host_data->en_reg, en);
+ raw_spin_unlock_irqrestore(&host_data->lock, flags);
+}
+
+static void octeon_irq_cib_disable(struct irq_data *data)
+{
+ unsigned long flags;
+ u64 en;
+ struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
+ struct octeon_irq_cib_host_data *host_data = cd->host_data;
+
+ raw_spin_lock_irqsave(&host_data->lock, flags);
+ en = cvmx_read_csr(host_data->en_reg);
+ en &= ~(1ull << cd->bit);
+ cvmx_write_csr(host_data->en_reg, en);
+ raw_spin_unlock_irqrestore(&host_data->lock, flags);
+}
+
+static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
+{
+ irqd_set_trigger_type(data, t);
+ return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip octeon_irq_chip_cib = {
+ .name = "CIB",
+ .irq_enable = octeon_irq_cib_enable,
+ .irq_disable = octeon_irq_cib_disable,
+ .irq_mask = octeon_irq_cib_disable,
+ .irq_unmask = octeon_irq_cib_enable,
+ .irq_set_type = octeon_irq_cib_set_type,
+};
+
+static int octeon_irq_cib_xlat(struct irq_domain *d,
+ struct device_node *node,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ unsigned int type = 0;
+
+ if (intsize == 2)
+ type = intspec[1];
+
+ switch (type) {
+ case 0: /* unofficial value, but we might as well let it work. */
+ case 4: /* official value for level triggering. */
+ *out_type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ case 1: /* official value for edge triggering. */
+ *out_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ default: /* Nothing else is acceptable. */
+ return -EINVAL;
+ }
+
+ *out_hwirq = intspec[0];
+
+ return 0;
+}
+
+static int octeon_irq_cib_map(struct irq_domain *d,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ struct octeon_irq_cib_host_data *host_data = d->host_data;
+ struct octeon_irq_cib_chip_data *cd;
+
+ if (hw >= host_data->max_bits) {
+ pr_err("ERROR: %s mapping %u is too big!\n",
+ irq_domain_get_of_node(d)->name, (unsigned)hw);
+ return -EINVAL;
+ }
+
+ cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
+ cd->host_data = host_data;
+ cd->bit = hw;
+
+ irq_set_chip_and_handler(virq, &octeon_irq_chip_cib,
+ handle_simple_irq);
+ irq_set_chip_data(virq, cd);
+ return 0;
+}
+
+static struct irq_domain_ops octeon_irq_domain_cib_ops = {
+ .map = octeon_irq_cib_map,
+ .unmap = octeon_irq_free_cd,
+ .xlate = octeon_irq_cib_xlat,
+};
+
+/* Chain to real handler. */
+static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
+{
+ u64 en;
+ u64 raw;
+ u64 bits;
+ int i;
+ int irq;
+ struct irq_domain *cib_domain = data;
+ struct octeon_irq_cib_host_data *host_data = cib_domain->host_data;
+
+ en = cvmx_read_csr(host_data->en_reg);
+ raw = cvmx_read_csr(host_data->raw_reg);
+
+ bits = en & raw;
+
+ for (i = 0; i < host_data->max_bits; i++) {
+ if ((bits & 1ull << i) == 0)
+ continue;
+ irq = irq_find_mapping(cib_domain, i);
+ if (!irq) {
+ unsigned long flags;
+
+ pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
+ i, host_data->raw_reg);
+ raw_spin_lock_irqsave(&host_data->lock, flags);
+ en = cvmx_read_csr(host_data->en_reg);
+ en &= ~(1ull << i);
+ cvmx_write_csr(host_data->en_reg, en);
+ cvmx_write_csr(host_data->raw_reg, 1ull << i);
+ raw_spin_unlock_irqrestore(&host_data->lock, flags);
+ } else {
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_data *irq_data = irq_desc_get_irq_data(desc);
+ /* If edge, acknowledge the bit we will be sending. */
+ if (irqd_get_trigger_type(irq_data) &
+ IRQ_TYPE_EDGE_BOTH)
+ cvmx_write_csr(host_data->raw_reg, 1ull << i);
+ generic_handle_irq_desc(desc);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __init octeon_irq_init_cib(struct device_node *ciu_node,
+ struct device_node *parent)
+{
+ const __be32 *addr;
+ u32 val;
+ struct octeon_irq_cib_host_data *host_data;
+ int parent_irq;
+ int r;
+ struct irq_domain *cib_domain;
+
+ parent_irq = irq_of_parse_and_map(ciu_node, 0);
+ if (!parent_irq) {
+ pr_err("ERROR: Couldn't acquire parent_irq for %pOFn\n",
+ ciu_node);
+ return -EINVAL;
+ }
+
+ host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
+ if (!host_data)
+ return -ENOMEM;
+ raw_spin_lock_init(&host_data->lock);
+
+ addr = of_get_address(ciu_node, 0, NULL, NULL);
+ if (!addr) {
+ pr_err("ERROR: Couldn't acquire reg(0) %pOFn\n", ciu_node);
+ return -EINVAL;
+ }
+ host_data->raw_reg = (u64)phys_to_virt(
+ of_translate_address(ciu_node, addr));
+
+ addr = of_get_address(ciu_node, 1, NULL, NULL);
+ if (!addr) {
+ pr_err("ERROR: Couldn't acquire reg(1) %pOFn\n", ciu_node);
+ return -EINVAL;
+ }
+ host_data->en_reg = (u64)phys_to_virt(
+ of_translate_address(ciu_node, addr));
+
+ r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
+ if (r) {
+ pr_err("ERROR: Couldn't read cavium,max-bits from %pOFn\n",
+ ciu_node);
+ return r;
+ }
+ host_data->max_bits = val;
+
+ cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
+ &octeon_irq_domain_cib_ops,
+ host_data);
+ if (!cib_domain) {
+ pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
+ return -ENOMEM;
+ }
+
+ cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */
+ cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */
+
+ r = request_irq(parent_irq, octeon_irq_cib_handler,
+ IRQF_NO_THREAD, "cib", cib_domain);
+ if (r) {
+ pr_err("request_irq cib failed %d\n", r);
+ return r;
+ }
+ pr_info("CIB interrupt controller probed: %llx %d\n",
+ host_data->raw_reg, host_data->max_bits);
+ return 0;
+}
+
+int octeon_irq_ciu3_xlat(struct irq_domain *d,
+ struct device_node *node,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ struct octeon_ciu3_info *ciu3_info = d->host_data;
+ unsigned int hwirq, type, intsn_major;
+ union cvmx_ciu3_iscx_ctl isc;
+
+ if (intsize < 2)
+ return -EINVAL;
+ hwirq = intspec[0];
+ type = intspec[1];
+
+ if (hwirq >= (1 << 20))
+ return -EINVAL;
+
+ intsn_major = hwirq >> 12;
+ switch (intsn_major) {
+ case 0x04: /* Software handled separately. */
+ return -EINVAL;
+ default:
+ break;
+ }
+
+ isc.u64 = cvmx_read_csr(ciu3_info->ciu3_addr + CIU3_ISC_CTL(hwirq));
+ if (!isc.s.imp)
+ return -EINVAL;
+
+ switch (type) {
+ case 4: /* official value for level triggering. */
+ *out_type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ case 0: /* unofficial value, but we might as well let it work. */
+ case 1: /* official value for edge triggering. */
+ *out_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ default: /* Nothing else is acceptable. */
+ return -EINVAL;
+ }
+
+ *out_hwirq = hwirq;
+
+ return 0;
+}
+
+void octeon_irq_ciu3_enable(struct irq_data *data)
+{
+ int cpu;
+ union cvmx_ciu3_iscx_ctl isc_ctl;
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+ u64 isc_ctl_addr;
+
+ struct octeon_ciu_chip_data *cd;
+
+ cpu = next_cpu_for_irq(data);
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.en = 1;
+ cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
+
+ isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
+ isc_ctl.u64 = 0;
+ isc_ctl.s.en = 1;
+ isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
+ cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
+ cvmx_read_csr(isc_ctl_addr);
+}
+
+void octeon_irq_ciu3_disable(struct irq_data *data)
+{
+ u64 isc_ctl_addr;
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.en = 1;
+
+ isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
+ cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
+ cvmx_write_csr(isc_ctl_addr, 0);
+ cvmx_read_csr(isc_ctl_addr);
+}
+
+void octeon_irq_ciu3_ack(struct irq_data *data)
+{
+ u64 isc_w1c_addr;
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+ struct octeon_ciu_chip_data *cd;
+ u32 trigger_type = irqd_get_trigger_type(data);
+
+ /*
+ * We use a single irq_chip, so we have to do nothing to ack a
+ * level interrupt.
+ */
+ if (!(trigger_type & IRQ_TYPE_EDGE_BOTH))
+ return;
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.raw = 1;
+
+ isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
+ cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+ cvmx_read_csr(isc_w1c_addr);
+}
+
+void octeon_irq_ciu3_mask(struct irq_data *data)
+{
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+ u64 isc_w1c_addr;
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.en = 1;
+
+ isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
+ cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+ cvmx_read_csr(isc_w1c_addr);
+}
+
+void octeon_irq_ciu3_mask_ack(struct irq_data *data)
+{
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+ u64 isc_w1c_addr;
+ struct octeon_ciu_chip_data *cd;
+ u32 trigger_type = irqd_get_trigger_type(data);
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.en = 1;
+
+ /*
+ * We use a single irq_chip, so only ack an edge (!level)
+ * interrupt.
+ */
+ if (trigger_type & IRQ_TYPE_EDGE_BOTH)
+ isc_w1c.s.raw = 1;
+
+ isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
+ cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+ cvmx_read_csr(isc_w1c_addr);
+}
+
+#ifdef CONFIG_SMP
+static int octeon_irq_ciu3_set_affinity(struct irq_data *data,
+ const struct cpumask *dest, bool force)
+{
+ union cvmx_ciu3_iscx_ctl isc_ctl;
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+ u64 isc_ctl_addr;
+ int cpu;
+ bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
+ struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+ if (!cpumask_subset(dest, cpumask_of_node(cd->ciu_node)))
+ return -EINVAL;
+
+ if (!enable_one)
+ return IRQ_SET_MASK_OK;
+
+ cd = irq_data_get_irq_chip_data(data);
+ cpu = cpumask_first(dest);
+ if (cpu >= nr_cpu_ids)
+ cpu = smp_processor_id();
+ cd->current_cpu = cpu;
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.en = 1;
+ cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
+
+ isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
+ isc_ctl.u64 = 0;
+ isc_ctl.s.en = 1;
+ isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
+ cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
+ cvmx_read_csr(isc_ctl_addr);
+
+ return IRQ_SET_MASK_OK;
+}
+#endif
+
+static struct irq_chip octeon_irq_chip_ciu3 = {
+ .name = "CIU3",
+ .irq_startup = edge_startup,
+ .irq_enable = octeon_irq_ciu3_enable,
+ .irq_disable = octeon_irq_ciu3_disable,
+ .irq_ack = octeon_irq_ciu3_ack,
+ .irq_mask = octeon_irq_ciu3_mask,
+ .irq_mask_ack = octeon_irq_ciu3_mask_ack,
+ .irq_unmask = octeon_irq_ciu3_enable,
+ .irq_set_type = octeon_irq_ciu_set_type,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = octeon_irq_ciu3_set_affinity,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+int octeon_irq_ciu3_mapx(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw, struct irq_chip *chip)
+{
+ struct octeon_ciu3_info *ciu3_info = d->host_data;
+ struct octeon_ciu_chip_data *cd = kzalloc_node(sizeof(*cd), GFP_KERNEL,
+ ciu3_info->node);
+ if (!cd)
+ return -ENOMEM;
+ cd->intsn = hw;
+ cd->current_cpu = -1;
+ cd->ciu3_addr = ciu3_info->ciu3_addr;
+ cd->ciu_node = ciu3_info->node;
+ irq_set_chip_and_handler(virq, chip, handle_edge_irq);
+ irq_set_chip_data(virq, cd);
+
+ return 0;
+}
+
+static int octeon_irq_ciu3_map(struct irq_domain *d,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ return octeon_irq_ciu3_mapx(d, virq, hw, &octeon_irq_chip_ciu3);
+}
+
+static struct irq_domain_ops octeon_dflt_domain_ciu3_ops = {
+ .map = octeon_irq_ciu3_map,
+ .unmap = octeon_irq_free_cd,
+ .xlate = octeon_irq_ciu3_xlat,
+};
+
+static void octeon_irq_ciu3_ip2(void)
+{
+ union cvmx_ciu3_destx_pp_int dest_pp_int;
+ struct octeon_ciu3_info *ciu3_info;
+ u64 ciu3_addr;
+
+ ciu3_info = __this_cpu_read(octeon_ciu3_info);
+ ciu3_addr = ciu3_info->ciu3_addr;
+
+ dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(3 * cvmx_get_local_core_num()));
+
+ if (likely(dest_pp_int.s.intr)) {
+ irq_hw_number_t intsn = dest_pp_int.s.intsn;
+ irq_hw_number_t hw;
+ struct irq_domain *domain;
+ /* Get the domain to use from the major block */
+ int block = intsn >> 12;
+ int ret;
+
+ domain = ciu3_info->domain[block];
+ if (ciu3_info->intsn2hw[block])
+ hw = ciu3_info->intsn2hw[block](domain, intsn);
+ else
+ hw = intsn;
+
+ ret = handle_domain_irq(domain, hw, NULL);
+ if (ret < 0) {
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+ u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.en = 1;
+ cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+ cvmx_read_csr(isc_w1c_addr);
+ spurious_interrupt();
+ }
+ } else {
+ spurious_interrupt();
+ }
+}
+
+/*
+ * 10 mbox per core starting from zero.
+ * Base mbox is core * 10
+ */
+static unsigned int octeon_irq_ciu3_base_mbox_intsn(int core)
+{
+ /* SW (mbox) are 0x04 in bits 12..19 */
+ return 0x04000 + CIU3_MBOX_PER_CORE * core;
+}
+
+static unsigned int octeon_irq_ciu3_mbox_intsn_for_core(int core, unsigned int mbox)
+{
+ return octeon_irq_ciu3_base_mbox_intsn(core) + mbox;
+}
+
+static unsigned int octeon_irq_ciu3_mbox_intsn_for_cpu(int cpu, unsigned int mbox)
+{
+ int local_core = octeon_coreid_for_cpu(cpu) & 0x3f;
+
+ return octeon_irq_ciu3_mbox_intsn_for_core(local_core, mbox);
+}
+
+static void octeon_irq_ciu3_mbox(void)
+{
+ union cvmx_ciu3_destx_pp_int dest_pp_int;
+ struct octeon_ciu3_info *ciu3_info;
+ u64 ciu3_addr;
+ int core = cvmx_get_local_core_num();
+
+ ciu3_info = __this_cpu_read(octeon_ciu3_info);
+ ciu3_addr = ciu3_info->ciu3_addr;
+
+ dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(1 + 3 * core));
+
+ if (likely(dest_pp_int.s.intr)) {
+ irq_hw_number_t intsn = dest_pp_int.s.intsn;
+ int mbox = intsn - octeon_irq_ciu3_base_mbox_intsn(core);
+
+ if (likely(mbox >= 0 && mbox < CIU3_MBOX_PER_CORE)) {
+ do_IRQ(mbox + OCTEON_IRQ_MBOX0);
+ } else {
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+ u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.en = 1;
+ cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+ cvmx_read_csr(isc_w1c_addr);
+ spurious_interrupt();
+ }
+ } else {
+ spurious_interrupt();
+ }
+}
+
+void octeon_ciu3_mbox_send(int cpu, unsigned int mbox)
+{
+ struct octeon_ciu3_info *ciu3_info;
+ unsigned int intsn;
+ union cvmx_ciu3_iscx_w1s isc_w1s;
+ u64 isc_w1s_addr;
+
+ if (WARN_ON_ONCE(mbox >= CIU3_MBOX_PER_CORE))
+ return;
+
+ intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
+ ciu3_info = per_cpu(octeon_ciu3_info, cpu);
+ isc_w1s_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1S(intsn);
+
+ isc_w1s.u64 = 0;
+ isc_w1s.s.raw = 1;
+
+ cvmx_write_csr(isc_w1s_addr, isc_w1s.u64);
+ cvmx_read_csr(isc_w1s_addr);
+}
+
+static void octeon_irq_ciu3_mbox_set_enable(struct irq_data *data, int cpu, bool en)
+{
+ struct octeon_ciu3_info *ciu3_info;
+ unsigned int intsn;
+ u64 isc_ctl_addr, isc_w1c_addr;
+ union cvmx_ciu3_iscx_ctl isc_ctl;
+ unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
+
+ intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
+ ciu3_info = per_cpu(octeon_ciu3_info, cpu);
+ isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
+ isc_ctl_addr = ciu3_info->ciu3_addr + CIU3_ISC_CTL(intsn);
+
+ isc_ctl.u64 = 0;
+ isc_ctl.s.en = 1;
+
+ cvmx_write_csr(isc_w1c_addr, isc_ctl.u64);
+ cvmx_write_csr(isc_ctl_addr, 0);
+ if (en) {
+ unsigned int idt = per_cpu(octeon_irq_ciu3_idt_ip3, cpu);
+
+ isc_ctl.u64 = 0;
+ isc_ctl.s.en = 1;
+ isc_ctl.s.idt = idt;
+ cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
+ }
+ cvmx_read_csr(isc_ctl_addr);
+}
+
+static void octeon_irq_ciu3_mbox_enable(struct irq_data *data)
+{
+ int cpu;
+ unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
+
+ WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
+
+ for_each_online_cpu(cpu)
+ octeon_irq_ciu3_mbox_set_enable(data, cpu, true);
+}
+
+static void octeon_irq_ciu3_mbox_disable(struct irq_data *data)
+{
+ int cpu;
+ unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
+
+ WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
+
+ for_each_online_cpu(cpu)
+ octeon_irq_ciu3_mbox_set_enable(data, cpu, false);
+}
+
+static void octeon_irq_ciu3_mbox_ack(struct irq_data *data)
+{
+ struct octeon_ciu3_info *ciu3_info;
+ unsigned int intsn;
+ u64 isc_w1c_addr;
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+ unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
+
+ intsn = octeon_irq_ciu3_mbox_intsn_for_core(cvmx_get_local_core_num(), mbox);
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.raw = 1;
+
+ ciu3_info = __this_cpu_read(octeon_ciu3_info);
+ isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
+ cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+ cvmx_read_csr(isc_w1c_addr);
+}
+
+static void octeon_irq_ciu3_mbox_cpu_online(struct irq_data *data)
+{
+ octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), true);
+}
+
+static void octeon_irq_ciu3_mbox_cpu_offline(struct irq_data *data)
+{
+ octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), false);
+}
+
+static int octeon_irq_ciu3_alloc_resources(struct octeon_ciu3_info *ciu3_info)
+{
+ u64 b = ciu3_info->ciu3_addr;
+ int idt_ip2, idt_ip3, idt_ip4;
+ int unused_idt2;
+ int core = cvmx_get_local_core_num();
+ int i;
+
+ __this_cpu_write(octeon_ciu3_info, ciu3_info);
+
+ /*
+ * 4 idt per core starting from 1 because zero is reserved.
+ * Base idt per core is 4 * core + 1
+ */
+ idt_ip2 = core * 4 + 1;
+ idt_ip3 = core * 4 + 2;
+ idt_ip4 = core * 4 + 3;
+ unused_idt2 = core * 4 + 4;
+ __this_cpu_write(octeon_irq_ciu3_idt_ip2, idt_ip2);
+ __this_cpu_write(octeon_irq_ciu3_idt_ip3, idt_ip3);
+
+ /* ip2 interrupts for this CPU */
+ cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip2), 0);
+ cvmx_write_csr(b + CIU3_IDT_PP(idt_ip2, 0), 1ull << core);
+ cvmx_write_csr(b + CIU3_IDT_IO(idt_ip2), 0);
+
+ /* ip3 interrupts for this CPU */
+ cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip3), 1);
+ cvmx_write_csr(b + CIU3_IDT_PP(idt_ip3, 0), 1ull << core);
+ cvmx_write_csr(b + CIU3_IDT_IO(idt_ip3), 0);
+
+ /* ip4 interrupts for this CPU */
+ cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip4), 2);
+ cvmx_write_csr(b + CIU3_IDT_PP(idt_ip4, 0), 0);
+ cvmx_write_csr(b + CIU3_IDT_IO(idt_ip4), 0);
+
+ cvmx_write_csr(b + CIU3_IDT_CTL(unused_idt2), 0);
+ cvmx_write_csr(b + CIU3_IDT_PP(unused_idt2, 0), 0);
+ cvmx_write_csr(b + CIU3_IDT_IO(unused_idt2), 0);
+
+ for (i = 0; i < CIU3_MBOX_PER_CORE; i++) {
+ unsigned int intsn = octeon_irq_ciu3_mbox_intsn_for_core(core, i);
+
+ cvmx_write_csr(b + CIU3_ISC_W1C(intsn), 2);
+ cvmx_write_csr(b + CIU3_ISC_CTL(intsn), 0);
+ }
+
+ return 0;
+}
+
+static void octeon_irq_setup_secondary_ciu3(void)
+{
+ struct octeon_ciu3_info *ciu3_info;
+
+ ciu3_info = octeon_ciu3_info_per_node[cvmx_get_node_num()];
+ octeon_irq_ciu3_alloc_resources(ciu3_info);
+ irq_cpu_online();
+
+ /* Enable the CIU lines */
+ set_c0_status(STATUSF_IP3 | STATUSF_IP2);
+ if (octeon_irq_use_ip4)
+ set_c0_status(STATUSF_IP4);
+ else
+ clear_c0_status(STATUSF_IP4);
+}
+
+static struct irq_chip octeon_irq_chip_ciu3_mbox = {
+ .name = "CIU3-M",
+ .irq_enable = octeon_irq_ciu3_mbox_enable,
+ .irq_disable = octeon_irq_ciu3_mbox_disable,
+ .irq_ack = octeon_irq_ciu3_mbox_ack,
+
+ .irq_cpu_online = octeon_irq_ciu3_mbox_cpu_online,
+ .irq_cpu_offline = octeon_irq_ciu3_mbox_cpu_offline,
+ .flags = IRQCHIP_ONOFFLINE_ENABLED,
+};
+
+static int __init octeon_irq_init_ciu3(struct device_node *ciu_node,
+ struct device_node *parent)
+{
+ int i;
+ int node;
+ struct irq_domain *domain;
+ struct octeon_ciu3_info *ciu3_info;
+ const __be32 *zero_addr;
+ u64 base_addr;
+ union cvmx_ciu3_const consts;
+
+ node = 0; /* of_node_to_nid(ciu_node); */
+ ciu3_info = kzalloc_node(sizeof(*ciu3_info), GFP_KERNEL, node);
+
+ if (!ciu3_info)
+ return -ENOMEM;
+
+ zero_addr = of_get_address(ciu_node, 0, NULL, NULL);
+ if (WARN_ON(!zero_addr))
+ return -EINVAL;
+
+ base_addr = of_translate_address(ciu_node, zero_addr);
+ base_addr = (u64)phys_to_virt(base_addr);
+
+ ciu3_info->ciu3_addr = base_addr;
+ ciu3_info->node = node;
+
+ consts.u64 = cvmx_read_csr(base_addr + CIU3_CONST);
+
+ octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu3;
+
+ octeon_irq_ip2 = octeon_irq_ciu3_ip2;
+ octeon_irq_ip3 = octeon_irq_ciu3_mbox;
+ octeon_irq_ip4 = octeon_irq_ip4_mask;
+
+ if (node == cvmx_get_node_num()) {
+ /* Mips internal */
+ octeon_irq_init_core();
+
+ /* Only do per CPU things if it is the CIU of the boot node. */
+ i = irq_alloc_descs_from(OCTEON_IRQ_MBOX0, 8, node);
+ WARN_ON(i < 0);
+
+ for (i = 0; i < 8; i++)
+ irq_set_chip_and_handler(i + OCTEON_IRQ_MBOX0,
+ &octeon_irq_chip_ciu3_mbox, handle_percpu_irq);
+ }
+
+ /*
+ * Initialize all domains to use the default domain. Specific major
+ * blocks will overwrite the default domain as needed.
+ */
+ domain = irq_domain_add_tree(ciu_node, &octeon_dflt_domain_ciu3_ops,
+ ciu3_info);
+ for (i = 0; i < MAX_CIU3_DOMAINS; i++)
+ ciu3_info->domain[i] = domain;
+
+ octeon_ciu3_info_per_node[node] = ciu3_info;
+
+ if (node == cvmx_get_node_num()) {
+ /* Only do per CPU things if it is the CIU of the boot node. */
+ octeon_irq_ciu3_alloc_resources(ciu3_info);
+ if (node == 0)
+ irq_set_default_host(domain);
+
+ octeon_irq_use_ip4 = false;
+ /* Enable the CIU lines */
+ set_c0_status(STATUSF_IP2 | STATUSF_IP3);
+ clear_c0_status(STATUSF_IP4);
+ }
+
+ return 0;
+}
+
+static struct of_device_id ciu_types[] __initdata = {
+ {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
+ {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
+ {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
+ {.compatible = "cavium,octeon-7890-ciu3", .data = octeon_irq_init_ciu3},
+ {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
+ {}
+};
+
+void __init arch_init_irq(void)
+{
+#ifdef CONFIG_SMP
+ /* Set the default affinity to the boot cpu. */
+ cpumask_clear(irq_default_affinity);
+ cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
+#endif
+ of_irq_init(ciu_types);
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned long cop0_cause;
+ unsigned long cop0_status;
+
+ while (1) {
+ cop0_cause = read_c0_cause();
+ cop0_status = read_c0_status();
+ cop0_cause &= cop0_status;
+ cop0_cause &= ST0_IM;
+
+ if (cop0_cause & STATUSF_IP2)
+ octeon_irq_ip2();
+ else if (cop0_cause & STATUSF_IP3)
+ octeon_irq_ip3();
+ else if (cop0_cause & STATUSF_IP4)
+ octeon_irq_ip4();
+ else if (cop0_cause)
+ do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
+ else
+ break;
+ }
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+void octeon_fixup_irqs(void)
+{
+ irq_cpu_offline();
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block)
+{
+ struct octeon_ciu3_info *ciu3_info;
+
+ ciu3_info = octeon_ciu3_info_per_node[node & CVMX_NODE_MASK];
+ return ciu3_info->domain[block];
+}
+EXPORT_SYMBOL(octeon_irq_get_block_domain);
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
new file mode 100644
index 000000000..0a7c9834b
--- /dev/null
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -0,0 +1,482 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Unified implementation of memcpy, memmove and the __copy_user backend.
+ *
+ * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
+ * Copyright (C) 2002 Broadcom, Inc.
+ * memcpy/copy_user author: Mark Vandevoorde
+ *
+ * Mnemonic names for arguments to memcpy/__copy_user
+ */
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/export.h>
+#include <asm/regdef.h>
+
+#define dst a0
+#define src a1
+#define len a2
+
+/*
+ * Spec
+ *
+ * memcpy copies len bytes from src to dst and sets v0 to dst.
+ * It assumes that
+ * - src and dst don't overlap
+ * - src is readable
+ * - dst is writable
+ * memcpy uses the standard calling convention
+ *
+ * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
+ * the number of uncopied bytes due to an exception caused by a read or write.
+ * __copy_user assumes that src and dst don't overlap, and that the call is
+ * implementing one of the following:
+ * copy_to_user
+ * - src is readable (no exceptions when reading src)
+ * copy_from_user
+ * - dst is writable (no exceptions when writing dst)
+ * __copy_user uses a non-standard calling convention; see
+ * arch/mips/include/asm/uaccess.h
+ *
+ * When an exception happens on a load, the handler must
+ # ensure that all of the destination buffer is overwritten to prevent
+ * leaking information to user mode programs.
+ */
+
+/*
+ * Implementation
+ */
+
+/*
+ * The exception handler for loads requires that:
+ * 1- AT contain the address of the byte just past the end of the source
+ * of the copy,
+ * 2- src_entry <= src < AT, and
+ * 3- (dst - src) == (dst_entry - src_entry),
+ * The _entry suffix denotes values when __copy_user was called.
+ *
+ * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
+ * (2) is met by incrementing src by the number of bytes copied
+ * (3) is met by not doing loads between a pair of increments of dst and src
+ *
+ * The exception handlers for stores adjust len (if necessary) and return.
+ * These handlers do not need to overwrite any data.
+ *
+ * For __rmemcpy and memmove an exception is always a kernel bug, therefore
+ * they're not protected.
+ */
+
+#define EXC(inst_reg,addr,handler) \
+9: inst_reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
+/*
+ * Only on the 64-bit kernel we can made use of 64-bit registers.
+ */
+
+#define LOAD ld
+#define LOADL ldl
+#define LOADR ldr
+#define STOREL sdl
+#define STORER sdr
+#define STORE sd
+#define ADD daddu
+#define SUB dsubu
+#define SRL dsrl
+#define SRA dsra
+#define SLL dsll
+#define SLLV dsllv
+#define SRLV dsrlv
+#define NBYTES 8
+#define LOG_NBYTES 3
+
+/*
+ * As we are sharing code base with the mips32 tree (which use the o32 ABI
+ * register definitions). We need to redefine the register definitions from
+ * the n64 ABI register naming to the o32 ABI register naming.
+ */
+#undef t0
+#undef t1
+#undef t2
+#undef t3
+#define t0 $8
+#define t1 $9
+#define t2 $10
+#define t3 $11
+#define t4 $12
+#define t5 $13
+#define t6 $14
+#define t7 $15
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define LDFIRST LOADR
+#define LDREST LOADL
+#define STFIRST STORER
+#define STREST STOREL
+#define SHIFT_DISCARD SLLV
+#else
+#define LDFIRST LOADL
+#define LDREST LOADR
+#define STFIRST STOREL
+#define STREST STORER
+#define SHIFT_DISCARD SRLV
+#endif
+
+#define FIRST(unit) ((unit)*NBYTES)
+#define REST(unit) (FIRST(unit)+NBYTES-1)
+#define UNIT(unit) FIRST(unit)
+
+#define ADDRMASK (NBYTES-1)
+
+ .text
+ .set noreorder
+ .set noat
+
+/*
+ * A combined memcpy/__copy_user
+ * __copy_user sets len to 0 for success; else to an upper bound of
+ * the number of uncopied bytes.
+ * memcpy sets v0 to dst.
+ */
+ .align 5
+LEAF(memcpy) /* a0=dst a1=src a2=len */
+EXPORT_SYMBOL(memcpy)
+ move v0, dst /* return value */
+__memcpy:
+FEXPORT(__copy_user)
+EXPORT_SYMBOL(__copy_user)
+ /*
+ * Note: dst & src may be unaligned, len may be 0
+ * Temps
+ */
+ #
+ # Octeon doesn't care if the destination is unaligned. The hardware
+ # can fix it faster than we can special case the assembly.
+ #
+ pref 0, 0(src)
+ sltu t0, len, NBYTES # Check if < 1 word
+ bnez t0, copy_bytes_checklen
+ and t0, src, ADDRMASK # Check if src unaligned
+ bnez t0, src_unaligned
+ sltu t0, len, 4*NBYTES # Check if < 4 words
+ bnez t0, less_than_4units
+ sltu t0, len, 8*NBYTES # Check if < 8 words
+ bnez t0, less_than_8units
+ sltu t0, len, 16*NBYTES # Check if < 16 words
+ bnez t0, cleanup_both_aligned
+ sltu t0, len, 128+1 # Check if len < 129
+ bnez t0, 1f # Skip prefetch if len is too short
+ sltu t0, len, 256+1 # Check if len < 257
+ bnez t0, 1f # Skip prefetch if len is too short
+ pref 0, 128(src) # We must not prefetch invalid addresses
+ #
+ # This is where we loop if there is more than 128 bytes left
+2: pref 0, 256(src) # We must not prefetch invalid addresses
+ #
+ # This is where we loop if we can't prefetch anymore
+1:
+EXC( LOAD t0, UNIT(0)(src), l_exc)
+EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
+ SUB len, len, 16*NBYTES
+EXC( STORE t0, UNIT(0)(dst), s_exc_p16u)
+EXC( STORE t1, UNIT(1)(dst), s_exc_p15u)
+EXC( STORE t2, UNIT(2)(dst), s_exc_p14u)
+EXC( STORE t3, UNIT(3)(dst), s_exc_p13u)
+EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
+EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
+EXC( STORE t0, UNIT(4)(dst), s_exc_p12u)
+EXC( STORE t1, UNIT(5)(dst), s_exc_p11u)
+EXC( STORE t2, UNIT(6)(dst), s_exc_p10u)
+ ADD src, src, 16*NBYTES
+EXC( STORE t3, UNIT(7)(dst), s_exc_p9u)
+ ADD dst, dst, 16*NBYTES
+EXC( LOAD t0, UNIT(-8)(src), l_exc_copy_rewind16)
+EXC( LOAD t1, UNIT(-7)(src), l_exc_copy_rewind16)
+EXC( LOAD t2, UNIT(-6)(src), l_exc_copy_rewind16)
+EXC( LOAD t3, UNIT(-5)(src), l_exc_copy_rewind16)
+EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u)
+EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u)
+EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u)
+EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u)
+EXC( LOAD t0, UNIT(-4)(src), l_exc_copy_rewind16)
+EXC( LOAD t1, UNIT(-3)(src), l_exc_copy_rewind16)
+EXC( LOAD t2, UNIT(-2)(src), l_exc_copy_rewind16)
+EXC( LOAD t3, UNIT(-1)(src), l_exc_copy_rewind16)
+EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u)
+EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u)
+EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u)
+EXC( STORE t3, UNIT(-1)(dst), s_exc_p1u)
+ sltu t0, len, 256+1 # See if we can prefetch more
+ beqz t0, 2b
+ sltu t0, len, 128 # See if we can loop more time
+ beqz t0, 1b
+ nop
+ #
+ # Jump here if there are less than 16*NBYTES left.
+ #
+cleanup_both_aligned:
+ beqz len, done
+ sltu t0, len, 8*NBYTES
+ bnez t0, less_than_8units
+ nop
+EXC( LOAD t0, UNIT(0)(src), l_exc)
+EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
+ SUB len, len, 8*NBYTES
+EXC( STORE t0, UNIT(0)(dst), s_exc_p8u)
+EXC( STORE t1, UNIT(1)(dst), s_exc_p7u)
+EXC( STORE t2, UNIT(2)(dst), s_exc_p6u)
+EXC( STORE t3, UNIT(3)(dst), s_exc_p5u)
+EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
+EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
+EXC( STORE t0, UNIT(4)(dst), s_exc_p4u)
+EXC( STORE t1, UNIT(5)(dst), s_exc_p3u)
+EXC( STORE t2, UNIT(6)(dst), s_exc_p2u)
+EXC( STORE t3, UNIT(7)(dst), s_exc_p1u)
+ ADD src, src, 8*NBYTES
+ beqz len, done
+ ADD dst, dst, 8*NBYTES
+ #
+ # Jump here if there are less than 8*NBYTES left.
+ #
+less_than_8units:
+ sltu t0, len, 4*NBYTES
+ bnez t0, less_than_4units
+ nop
+EXC( LOAD t0, UNIT(0)(src), l_exc)
+EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
+ SUB len, len, 4*NBYTES
+EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
+EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
+EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
+EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
+ ADD src, src, 4*NBYTES
+ beqz len, done
+ ADD dst, dst, 4*NBYTES
+ #
+ # Jump here if there are less than 4*NBYTES left. This means
+ # we may need to copy up to 3 NBYTES words.
+ #
+less_than_4units:
+ sltu t0, len, 1*NBYTES
+ bnez t0, copy_bytes_checklen
+ nop
+ #
+ # 1) Copy NBYTES, then check length again
+ #
+EXC( LOAD t0, 0(src), l_exc)
+ SUB len, len, NBYTES
+ sltu t1, len, 8
+EXC( STORE t0, 0(dst), s_exc_p1u)
+ ADD src, src, NBYTES
+ bnez t1, copy_bytes_checklen
+ ADD dst, dst, NBYTES
+ #
+ # 2) Copy NBYTES, then check length again
+ #
+EXC( LOAD t0, 0(src), l_exc)
+ SUB len, len, NBYTES
+ sltu t1, len, 8
+EXC( STORE t0, 0(dst), s_exc_p1u)
+ ADD src, src, NBYTES
+ bnez t1, copy_bytes_checklen
+ ADD dst, dst, NBYTES
+ #
+ # 3) Copy NBYTES, then check length again
+ #
+EXC( LOAD t0, 0(src), l_exc)
+ SUB len, len, NBYTES
+ ADD src, src, NBYTES
+ ADD dst, dst, NBYTES
+ b copy_bytes_checklen
+EXC( STORE t0, -8(dst), s_exc_p1u)
+
+src_unaligned:
+#define rem t8
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ beqz t0, cleanup_src_unaligned
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+1:
+/*
+ * Avoid consecutive LD*'s to the same register since some mips
+ * implementations can't issue them in the same cycle.
+ * It's OK to load FIRST(N+1) before REST(N) because the two addresses
+ * are to the same unit (unless src is aligned, but it's not).
+ */
+EXC( LDFIRST t0, FIRST(0)(src), l_exc)
+EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy)
+ SUB len, len, 4*NBYTES
+EXC( LDREST t0, REST(0)(src), l_exc_copy)
+EXC( LDREST t1, REST(1)(src), l_exc_copy)
+EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy)
+EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy)
+EXC( LDREST t2, REST(2)(src), l_exc_copy)
+EXC( LDREST t3, REST(3)(src), l_exc_copy)
+ ADD src, src, 4*NBYTES
+EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
+EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
+EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
+EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
+ bne len, rem, 1b
+ ADD dst, dst, 4*NBYTES
+
+cleanup_src_unaligned:
+ beqz len, done
+ and rem, len, NBYTES-1 # rem = len % NBYTES
+ beq rem, len, copy_bytes
+ nop
+1:
+EXC( LDFIRST t0, FIRST(0)(src), l_exc)
+EXC( LDREST t0, REST(0)(src), l_exc_copy)
+ SUB len, len, NBYTES
+EXC( STORE t0, 0(dst), s_exc_p1u)
+ ADD src, src, NBYTES
+ bne len, rem, 1b
+ ADD dst, dst, NBYTES
+
+copy_bytes_checklen:
+ beqz len, done
+ nop
+copy_bytes:
+ /* 0 < len < NBYTES */
+#define COPY_BYTE(N) \
+EXC( lb t0, N(src), l_exc); \
+ SUB len, len, 1; \
+ beqz len, done; \
+EXC( sb t0, N(dst), s_exc_p1)
+
+ COPY_BYTE(0)
+ COPY_BYTE(1)
+ COPY_BYTE(2)
+ COPY_BYTE(3)
+ COPY_BYTE(4)
+ COPY_BYTE(5)
+EXC( lb t0, NBYTES-2(src), l_exc)
+ SUB len, len, 1
+ jr ra
+EXC( sb t0, NBYTES-2(dst), s_exc_p1)
+done:
+ jr ra
+ nop
+ END(memcpy)
+
+l_exc_copy_rewind16:
+ /* Rewind src and dst by 16*NBYTES for l_exc_copy */
+ SUB src, src, 16*NBYTES
+ SUB dst, dst, 16*NBYTES
+l_exc_copy:
+ /*
+ * Copy bytes from src until faulting load address (or until a
+ * lb faults)
+ *
+ * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
+ * may be more than a byte beyond the last address.
+ * Hence, the lb below may get an exception.
+ *
+ * Assumes src < THREAD_BUADDR($28)
+ */
+ LOAD t0, TI_TASK($28)
+ LOAD t0, THREAD_BUADDR(t0)
+1:
+EXC( lb t1, 0(src), l_exc)
+ ADD src, src, 1
+ sb t1, 0(dst) # can't fault -- we're copy_from_user
+ bne src, t0, 1b
+ ADD dst, dst, 1
+l_exc:
+ LOAD t0, TI_TASK($28)
+ LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
+ SUB len, AT, t0 # len number of uncopied bytes
+ jr ra
+ nop
+
+
+#define SEXC(n) \
+s_exc_p ## n ## u: \
+ jr ra; \
+ ADD len, len, n*NBYTES
+
+SEXC(16)
+SEXC(15)
+SEXC(14)
+SEXC(13)
+SEXC(12)
+SEXC(11)
+SEXC(10)
+SEXC(9)
+SEXC(8)
+SEXC(7)
+SEXC(6)
+SEXC(5)
+SEXC(4)
+SEXC(3)
+SEXC(2)
+SEXC(1)
+
+s_exc_p1:
+ jr ra
+ ADD len, len, 1
+s_exc:
+ jr ra
+ nop
+
+ .align 5
+LEAF(memmove)
+EXPORT_SYMBOL(memmove)
+ ADD t0, a0, a2
+ ADD t1, a1, a2
+ sltu t0, a1, t0 # dst + len <= src -> memcpy
+ sltu t1, a0, t1 # dst >= src + len -> memcpy
+ and t0, t1
+ beqz t0, __memcpy
+ move v0, a0 /* return value */
+ beqz a2, r_out
+ END(memmove)
+
+ /* fall through to __rmemcpy */
+LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
+ sltu t0, a1, a0
+ beqz t0, r_end_bytes_up # src >= dst
+ nop
+ ADD a0, a2 # dst = dst + len
+ ADD a1, a2 # src = src + len
+
+r_end_bytes:
+ lb t0, -1(a1)
+ SUB a2, a2, 0x1
+ sb t0, -1(a0)
+ SUB a1, a1, 0x1
+ bnez a2, r_end_bytes
+ SUB a0, a0, 0x1
+
+r_out:
+ jr ra
+ move a2, zero
+
+r_end_bytes_up:
+ lb t0, (a1)
+ SUB a2, a2, 0x1
+ sb t0, (a0)
+ ADD a1, a1, 0x1
+ bnez a2, r_end_bytes_up
+ ADD a0, a0, 0x1
+
+ jr ra
+ move a2, zero
+ END(__rmemcpy)
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
new file mode 100644
index 000000000..ce05c0dd3
--- /dev/null
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -0,0 +1,1141 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2017 Cavium, Inc.
+ * Copyright (C) 2008 Wind River Systems
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/of_platform.h>
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-helper-board.h>
+
+#ifdef CONFIG_USB
+#include <linux/usb/ehci_def.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/ohci_pdriver.h>
+#include <asm/octeon/cvmx-uctlx-defs.h>
+
+#define CVMX_UAHCX_EHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000010ull))
+#define CVMX_UAHCX_OHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000408ull))
+
+static DEFINE_MUTEX(octeon2_usb_clocks_mutex);
+
+static int octeon2_usb_clock_start_cnt;
+
+static int __init octeon2_usb_reset(void)
+{
+ union cvmx_uctlx_clk_rst_ctl clk_rst_ctl;
+ u32 ucmd;
+
+ if (!OCTEON_IS_OCTEON2())
+ return 0;
+
+ clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
+ if (clk_rst_ctl.s.hrst) {
+ ucmd = cvmx_read64_uint32(CVMX_UAHCX_EHCI_USBCMD);
+ ucmd &= ~CMD_RUN;
+ cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd);
+ mdelay(2);
+ ucmd |= CMD_RESET;
+ cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd);
+ ucmd = cvmx_read64_uint32(CVMX_UAHCX_OHCI_USBCMD);
+ ucmd |= CMD_RUN;
+ cvmx_write64_uint32(CVMX_UAHCX_OHCI_USBCMD, ucmd);
+ }
+
+ return 0;
+}
+arch_initcall(octeon2_usb_reset);
+
+static void octeon2_usb_clocks_start(struct device *dev)
+{
+ u64 div;
+ union cvmx_uctlx_if_ena if_ena;
+ union cvmx_uctlx_clk_rst_ctl clk_rst_ctl;
+ union cvmx_uctlx_uphy_portx_ctl_status port_ctl_status;
+ int i;
+ unsigned long io_clk_64_to_ns;
+ u32 clock_rate = 12000000;
+ bool is_crystal_clock = false;
+
+
+ mutex_lock(&octeon2_usb_clocks_mutex);
+
+ octeon2_usb_clock_start_cnt++;
+ if (octeon2_usb_clock_start_cnt != 1)
+ goto exit;
+
+ io_clk_64_to_ns = 64000000000ull / octeon_get_io_clock_rate();
+
+ if (dev->of_node) {
+ struct device_node *uctl_node;
+ const char *clock_type;
+
+ uctl_node = of_get_parent(dev->of_node);
+ if (!uctl_node) {
+ dev_err(dev, "No UCTL device node\n");
+ goto exit;
+ }
+ i = of_property_read_u32(uctl_node,
+ "refclk-frequency", &clock_rate);
+ if (i) {
+ dev_err(dev, "No UCTL \"refclk-frequency\"\n");
+ of_node_put(uctl_node);
+ goto exit;
+ }
+ i = of_property_read_string(uctl_node,
+ "refclk-type", &clock_type);
+ of_node_put(uctl_node);
+ if (!i && strcmp("crystal", clock_type) == 0)
+ is_crystal_clock = true;
+ }
+
+ /*
+ * Step 1: Wait for voltages stable. That surely happened
+ * before starting the kernel.
+ *
+ * Step 2: Enable SCLK of UCTL by writing UCTL0_IF_ENA[EN] = 1
+ */
+ if_ena.u64 = 0;
+ if_ena.s.en = 1;
+ cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64);
+
+ for (i = 0; i <= 1; i++) {
+ port_ctl_status.u64 =
+ cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0));
+ /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */
+ port_ctl_status.s.txvreftune = 15;
+ port_ctl_status.s.txrisetune = 1;
+ port_ctl_status.s.txpreemphasistune = 1;
+ cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0),
+ port_ctl_status.u64);
+ }
+
+ /* Step 3: Configure the reference clock, PHY, and HCLK */
+ clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
+
+ /*
+ * If the UCTL looks like it has already been started, skip
+ * the initialization, otherwise bus errors are obtained.
+ */
+ if (clk_rst_ctl.s.hrst)
+ goto end_clock;
+ /* 3a */
+ clk_rst_ctl.s.p_por = 1;
+ clk_rst_ctl.s.hrst = 0;
+ clk_rst_ctl.s.p_prst = 0;
+ clk_rst_ctl.s.h_clkdiv_rst = 0;
+ clk_rst_ctl.s.o_clkdiv_rst = 0;
+ clk_rst_ctl.s.h_clkdiv_en = 0;
+ clk_rst_ctl.s.o_clkdiv_en = 0;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 3b */
+ clk_rst_ctl.s.p_refclk_sel = is_crystal_clock ? 0 : 1;
+ switch (clock_rate) {
+ default:
+ pr_err("Invalid UCTL clock rate of %u, using 12000000 instead\n",
+ clock_rate);
+ fallthrough;
+ case 12000000:
+ clk_rst_ctl.s.p_refclk_div = 0;
+ break;
+ case 24000000:
+ clk_rst_ctl.s.p_refclk_div = 1;
+ break;
+ case 48000000:
+ clk_rst_ctl.s.p_refclk_div = 2;
+ break;
+ }
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 3c */
+ div = octeon_get_io_clock_rate() / 130000000ull;
+
+ switch (div) {
+ case 0:
+ div = 1;
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ break;
+ case 5:
+ div = 4;
+ break;
+ case 6:
+ case 7:
+ div = 6;
+ break;
+ case 8:
+ case 9:
+ case 10:
+ case 11:
+ div = 8;
+ break;
+ default:
+ div = 12;
+ break;
+ }
+ clk_rst_ctl.s.h_div = div;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+ /* Read it back, */
+ clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
+ clk_rst_ctl.s.h_clkdiv_en = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+ /* 3d */
+ clk_rst_ctl.s.h_clkdiv_rst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 3e: delay 64 io clocks */
+ ndelay(io_clk_64_to_ns);
+
+ /*
+ * Step 4: Program the power-on reset field in the UCTL
+ * clock-reset-control register.
+ */
+ clk_rst_ctl.s.p_por = 0;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* Step 5: Wait 3 ms for the PHY clock to start. */
+ mdelay(3);
+
+ /* Steps 6..9 for ATE only, are skipped. */
+
+ /* Step 10: Configure the OHCI_CLK48 and OHCI_CLK12 clocks. */
+ /* 10a */
+ clk_rst_ctl.s.o_clkdiv_rst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 10b */
+ clk_rst_ctl.s.o_clkdiv_en = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* 10c */
+ ndelay(io_clk_64_to_ns);
+
+ /*
+ * Step 11: Program the PHY reset field:
+ * UCTL0_CLK_RST_CTL[P_PRST] = 1
+ */
+ clk_rst_ctl.s.p_prst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* Step 11b */
+ udelay(1);
+
+ /* Step 11c */
+ clk_rst_ctl.s.p_prst = 0;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* Step 11d */
+ mdelay(1);
+
+ /* Step 11e */
+ clk_rst_ctl.s.p_prst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* Step 12: Wait 1 uS. */
+ udelay(1);
+
+ /* Step 13: Program the HRESET_N field: UCTL0_CLK_RST_CTL[HRST] = 1 */
+ clk_rst_ctl.s.hrst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+end_clock:
+ /* Set uSOF cycle period to 60,000 bits. */
+ cvmx_write_csr(CVMX_UCTLX_EHCI_FLA(0), 0x20ull);
+
+exit:
+ mutex_unlock(&octeon2_usb_clocks_mutex);
+}
+
+static void octeon2_usb_clocks_stop(void)
+{
+ mutex_lock(&octeon2_usb_clocks_mutex);
+ octeon2_usb_clock_start_cnt--;
+ mutex_unlock(&octeon2_usb_clocks_mutex);
+}
+
+static int octeon_ehci_power_on(struct platform_device *pdev)
+{
+ octeon2_usb_clocks_start(&pdev->dev);
+ return 0;
+}
+
+static void octeon_ehci_power_off(struct platform_device *pdev)
+{
+ octeon2_usb_clocks_stop();
+}
+
+static struct usb_ehci_pdata octeon_ehci_pdata = {
+ /* Octeon EHCI matches CPU endianness. */
+#ifdef __BIG_ENDIAN
+ .big_endian_mmio = 1,
+#endif
+ /*
+ * We can DMA from anywhere. But the descriptors must be in
+ * the lower 4GB.
+ */
+ .dma_mask_64 = 0,
+ .power_on = octeon_ehci_power_on,
+ .power_off = octeon_ehci_power_off,
+};
+
+static void __init octeon_ehci_hw_start(struct device *dev)
+{
+ union cvmx_uctlx_ehci_ctl ehci_ctl;
+
+ octeon2_usb_clocks_start(dev);
+
+ ehci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_EHCI_CTL(0));
+ /* Use 64-bit addressing. */
+ ehci_ctl.s.ehci_64b_addr_en = 1;
+ ehci_ctl.s.l2c_addr_msb = 0;
+#ifdef __BIG_ENDIAN
+ ehci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */
+ ehci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */
+#else
+ ehci_ctl.s.l2c_buff_emod = 0; /* not swapped. */
+ ehci_ctl.s.l2c_desc_emod = 0; /* not swapped. */
+ ehci_ctl.s.inv_reg_a2 = 1;
+#endif
+ cvmx_write_csr(CVMX_UCTLX_EHCI_CTL(0), ehci_ctl.u64);
+
+ octeon2_usb_clocks_stop();
+}
+
+static int __init octeon_ehci_device_init(void)
+{
+ struct platform_device *pd;
+ struct device_node *ehci_node;
+ int ret = 0;
+
+ ehci_node = of_find_node_by_name(NULL, "ehci");
+ if (!ehci_node)
+ return 0;
+
+ pd = of_find_device_by_node(ehci_node);
+ of_node_put(ehci_node);
+ if (!pd)
+ return 0;
+
+ pd->dev.platform_data = &octeon_ehci_pdata;
+ octeon_ehci_hw_start(&pd->dev);
+ put_device(&pd->dev);
+
+ return ret;
+}
+device_initcall(octeon_ehci_device_init);
+
+static int octeon_ohci_power_on(struct platform_device *pdev)
+{
+ octeon2_usb_clocks_start(&pdev->dev);
+ return 0;
+}
+
+static void octeon_ohci_power_off(struct platform_device *pdev)
+{
+ octeon2_usb_clocks_stop();
+}
+
+static struct usb_ohci_pdata octeon_ohci_pdata = {
+ /* Octeon OHCI matches CPU endianness. */
+#ifdef __BIG_ENDIAN
+ .big_endian_mmio = 1,
+#endif
+ .power_on = octeon_ohci_power_on,
+ .power_off = octeon_ohci_power_off,
+};
+
+static void __init octeon_ohci_hw_start(struct device *dev)
+{
+ union cvmx_uctlx_ohci_ctl ohci_ctl;
+
+ octeon2_usb_clocks_start(dev);
+
+ ohci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_OHCI_CTL(0));
+ ohci_ctl.s.l2c_addr_msb = 0;
+#ifdef __BIG_ENDIAN
+ ohci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */
+ ohci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */
+#else
+ ohci_ctl.s.l2c_buff_emod = 0; /* not swapped. */
+ ohci_ctl.s.l2c_desc_emod = 0; /* not swapped. */
+ ohci_ctl.s.inv_reg_a2 = 1;
+#endif
+ cvmx_write_csr(CVMX_UCTLX_OHCI_CTL(0), ohci_ctl.u64);
+
+ octeon2_usb_clocks_stop();
+}
+
+static int __init octeon_ohci_device_init(void)
+{
+ struct platform_device *pd;
+ struct device_node *ohci_node;
+ int ret = 0;
+
+ ohci_node = of_find_node_by_name(NULL, "ohci");
+ if (!ohci_node)
+ return 0;
+
+ pd = of_find_device_by_node(ohci_node);
+ of_node_put(ohci_node);
+ if (!pd)
+ return 0;
+
+ pd->dev.platform_data = &octeon_ohci_pdata;
+ octeon_ohci_hw_start(&pd->dev);
+ put_device(&pd->dev);
+
+ return ret;
+}
+device_initcall(octeon_ohci_device_init);
+
+#endif /* CONFIG_USB */
+
+/* Octeon Random Number Generator. */
+static int __init octeon_rng_device_init(void)
+{
+ struct platform_device *pd;
+ int ret = 0;
+
+ struct resource rng_resources[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ .start = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS),
+ .end = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS) + 0xf
+ }, {
+ .flags = IORESOURCE_MEM,
+ .start = cvmx_build_io_address(8, 0),
+ .end = cvmx_build_io_address(8, 0) + 0x7
+ }
+ };
+
+ pd = platform_device_alloc("octeon_rng", -1);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = platform_device_add_resources(pd, rng_resources,
+ ARRAY_SIZE(rng_resources));
+ if (ret)
+ goto fail;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ goto fail;
+
+ return ret;
+fail:
+ platform_device_put(pd);
+
+out:
+ return ret;
+}
+device_initcall(octeon_rng_device_init);
+
+static const struct of_device_id octeon_ids[] __initconst = {
+ { .compatible = "simple-bus", },
+ { .compatible = "cavium,octeon-6335-uctl", },
+ { .compatible = "cavium,octeon-5750-usbn", },
+ { .compatible = "cavium,octeon-3860-bootbus", },
+ { .compatible = "cavium,mdio-mux", },
+ { .compatible = "gpio-leds", },
+ { .compatible = "cavium,octeon-7130-usb-uctl", },
+ {},
+};
+
+static bool __init octeon_has_88e1145(void)
+{
+ return !OCTEON_IS_MODEL(OCTEON_CN52XX) &&
+ !OCTEON_IS_MODEL(OCTEON_CN6XXX) &&
+ !OCTEON_IS_MODEL(OCTEON_CN56XX);
+}
+
+static bool __init octeon_has_fixed_link(int ipd_port)
+{
+ switch (cvmx_sysinfo_get()->board_type) {
+ case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
+ case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
+ case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
+ case CVMX_BOARD_TYPE_CUST_NB5:
+ case CVMX_BOARD_TYPE_EBH3100:
+ /* Port 1 on these boards is always gigabit. */
+ return ipd_port == 1;
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ /* Ports 0 and 1 connect to the switch. */
+ return ipd_port == 0 || ipd_port == 1;
+ }
+ return false;
+}
+
+static void __init octeon_fdt_set_phy(int eth, int phy_addr)
+{
+ const __be32 *phy_handle;
+ const __be32 *alt_phy_handle;
+ const __be32 *reg;
+ u32 phandle;
+ int phy;
+ int alt_phy;
+ const char *p;
+ int current_len;
+ char new_name[20];
+
+ phy_handle = fdt_getprop(initial_boot_params, eth, "phy-handle", NULL);
+ if (!phy_handle)
+ return;
+
+ phandle = be32_to_cpup(phy_handle);
+ phy = fdt_node_offset_by_phandle(initial_boot_params, phandle);
+
+ alt_phy_handle = fdt_getprop(initial_boot_params, eth, "cavium,alt-phy-handle", NULL);
+ if (alt_phy_handle) {
+ u32 alt_phandle = be32_to_cpup(alt_phy_handle);
+
+ alt_phy = fdt_node_offset_by_phandle(initial_boot_params, alt_phandle);
+ } else {
+ alt_phy = -1;
+ }
+
+ if (phy_addr < 0 || phy < 0) {
+ /* Delete the PHY things */
+ fdt_nop_property(initial_boot_params, eth, "phy-handle");
+ /* This one may fail */
+ fdt_nop_property(initial_boot_params, eth, "cavium,alt-phy-handle");
+ if (phy >= 0)
+ fdt_nop_node(initial_boot_params, phy);
+ if (alt_phy >= 0)
+ fdt_nop_node(initial_boot_params, alt_phy);
+ return;
+ }
+
+ if (phy_addr >= 256 && alt_phy > 0) {
+ const struct fdt_property *phy_prop;
+ struct fdt_property *alt_prop;
+ fdt32_t phy_handle_name;
+
+ /* Use the alt phy node instead.*/
+ phy_prop = fdt_get_property(initial_boot_params, eth, "phy-handle", NULL);
+ phy_handle_name = phy_prop->nameoff;
+ fdt_nop_node(initial_boot_params, phy);
+ fdt_nop_property(initial_boot_params, eth, "phy-handle");
+ alt_prop = fdt_get_property_w(initial_boot_params, eth, "cavium,alt-phy-handle", NULL);
+ alt_prop->nameoff = phy_handle_name;
+ phy = alt_phy;
+ }
+
+ phy_addr &= 0xff;
+
+ if (octeon_has_88e1145()) {
+ fdt_nop_property(initial_boot_params, phy, "marvell,reg-init");
+ memset(new_name, 0, sizeof(new_name));
+ strcpy(new_name, "marvell,88e1145");
+ p = fdt_getprop(initial_boot_params, phy, "compatible",
+ &current_len);
+ if (p && current_len >= strlen(new_name))
+ fdt_setprop_inplace(initial_boot_params, phy,
+ "compatible", new_name, current_len);
+ }
+
+ reg = fdt_getprop(initial_boot_params, phy, "reg", NULL);
+ if (phy_addr == be32_to_cpup(reg))
+ return;
+
+ fdt_setprop_inplace_cell(initial_boot_params, phy, "reg", phy_addr);
+
+ snprintf(new_name, sizeof(new_name), "ethernet-phy@%x", phy_addr);
+
+ p = fdt_get_name(initial_boot_params, phy, &current_len);
+ if (p && current_len == strlen(new_name))
+ fdt_set_name(initial_boot_params, phy, new_name);
+ else
+ pr_err("Error: could not rename ethernet phy: <%s>", p);
+}
+
+static void __init octeon_fdt_set_mac_addr(int n, u64 *pmac)
+{
+ const u8 *old_mac;
+ int old_len;
+ u8 new_mac[6];
+ u64 mac = *pmac;
+ int r;
+
+ old_mac = fdt_getprop(initial_boot_params, n, "local-mac-address",
+ &old_len);
+ if (!old_mac || old_len != 6 || is_valid_ether_addr(old_mac))
+ return;
+
+ new_mac[0] = (mac >> 40) & 0xff;
+ new_mac[1] = (mac >> 32) & 0xff;
+ new_mac[2] = (mac >> 24) & 0xff;
+ new_mac[3] = (mac >> 16) & 0xff;
+ new_mac[4] = (mac >> 8) & 0xff;
+ new_mac[5] = mac & 0xff;
+
+ r = fdt_setprop_inplace(initial_boot_params, n, "local-mac-address",
+ new_mac, sizeof(new_mac));
+
+ if (r) {
+ pr_err("Setting \"local-mac-address\" failed %d", r);
+ return;
+ }
+ *pmac = mac + 1;
+}
+
+static void __init octeon_fdt_rm_ethernet(int node)
+{
+ const __be32 *phy_handle;
+
+ phy_handle = fdt_getprop(initial_boot_params, node, "phy-handle", NULL);
+ if (phy_handle) {
+ u32 ph = be32_to_cpup(phy_handle);
+ int p = fdt_node_offset_by_phandle(initial_boot_params, ph);
+
+ if (p >= 0)
+ fdt_nop_node(initial_boot_params, p);
+ }
+ fdt_nop_node(initial_boot_params, node);
+}
+
+static void __init _octeon_rx_tx_delay(int eth, int rx_delay, int tx_delay)
+{
+ fdt_setprop_inplace_cell(initial_boot_params, eth, "rx-delay",
+ rx_delay);
+ fdt_setprop_inplace_cell(initial_boot_params, eth, "tx-delay",
+ tx_delay);
+}
+
+static void __init octeon_rx_tx_delay(int eth, int iface, int port)
+{
+ switch (cvmx_sysinfo_get()->board_type) {
+ case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
+ if (iface == 0) {
+ if (port == 0) {
+ /*
+ * Boards with gigabit WAN ports need a
+ * different setting that is compatible with
+ * 100 Mbit settings
+ */
+ _octeon_rx_tx_delay(eth, 0xc, 0x0c);
+ return;
+ } else if (port == 1) {
+ /* Different config for switch port. */
+ _octeon_rx_tx_delay(eth, 0x0, 0x0);
+ return;
+ }
+ }
+ break;
+ case CVMX_BOARD_TYPE_UBNT_E100:
+ if (iface == 0 && port <= 2) {
+ _octeon_rx_tx_delay(eth, 0x0, 0x10);
+ return;
+ }
+ break;
+ }
+ fdt_nop_property(initial_boot_params, eth, "rx-delay");
+ fdt_nop_property(initial_boot_params, eth, "tx-delay");
+}
+
+static void __init octeon_fdt_pip_port(int iface, int i, int p, int max)
+{
+ char name_buffer[20];
+ int eth;
+ int phy_addr;
+ int ipd_port;
+ int fixed_link;
+
+ snprintf(name_buffer, sizeof(name_buffer), "ethernet@%x", p);
+ eth = fdt_subnode_offset(initial_boot_params, iface, name_buffer);
+ if (eth < 0)
+ return;
+ if (p > max) {
+ pr_debug("Deleting port %x:%x\n", i, p);
+ octeon_fdt_rm_ethernet(eth);
+ return;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ ipd_port = (0x100 * i) + (0x10 * p) + 0x800;
+ else
+ ipd_port = 16 * i + p;
+
+ phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
+ octeon_fdt_set_phy(eth, phy_addr);
+
+ fixed_link = fdt_subnode_offset(initial_boot_params, eth, "fixed-link");
+ if (fixed_link < 0)
+ WARN_ON(octeon_has_fixed_link(ipd_port));
+ else if (!octeon_has_fixed_link(ipd_port))
+ fdt_nop_node(initial_boot_params, fixed_link);
+ octeon_rx_tx_delay(eth, i, p);
+}
+
+static void __init octeon_fdt_pip_iface(int pip, int idx)
+{
+ char name_buffer[20];
+ int iface;
+ int p;
+ int count = 0;
+
+ snprintf(name_buffer, sizeof(name_buffer), "interface@%d", idx);
+ iface = fdt_subnode_offset(initial_boot_params, pip, name_buffer);
+ if (iface < 0)
+ return;
+
+ if (cvmx_helper_interface_enumerate(idx) == 0)
+ count = cvmx_helper_ports_on_interface(idx);
+
+ for (p = 0; p < 16; p++)
+ octeon_fdt_pip_port(iface, idx, p, count - 1);
+}
+
+void __init octeon_fill_mac_addresses(void)
+{
+ const char *alias_prop;
+ char name_buffer[20];
+ u64 mac_addr_base;
+ int aliases;
+ int pip;
+ int i;
+
+ aliases = fdt_path_offset(initial_boot_params, "/aliases");
+ if (aliases < 0)
+ return;
+
+ mac_addr_base =
+ ((octeon_bootinfo->mac_addr_base[0] & 0xffull)) << 40 |
+ ((octeon_bootinfo->mac_addr_base[1] & 0xffull)) << 32 |
+ ((octeon_bootinfo->mac_addr_base[2] & 0xffull)) << 24 |
+ ((octeon_bootinfo->mac_addr_base[3] & 0xffull)) << 16 |
+ ((octeon_bootinfo->mac_addr_base[4] & 0xffull)) << 8 |
+ (octeon_bootinfo->mac_addr_base[5] & 0xffull);
+
+ for (i = 0; i < 2; i++) {
+ int mgmt;
+
+ snprintf(name_buffer, sizeof(name_buffer), "mix%d", i);
+ alias_prop = fdt_getprop(initial_boot_params, aliases,
+ name_buffer, NULL);
+ if (!alias_prop)
+ continue;
+ mgmt = fdt_path_offset(initial_boot_params, alias_prop);
+ if (mgmt < 0)
+ continue;
+ octeon_fdt_set_mac_addr(mgmt, &mac_addr_base);
+ }
+
+ alias_prop = fdt_getprop(initial_boot_params, aliases, "pip", NULL);
+ if (!alias_prop)
+ return;
+
+ pip = fdt_path_offset(initial_boot_params, alias_prop);
+ if (pip < 0)
+ return;
+
+ for (i = 0; i <= 4; i++) {
+ int iface;
+ int p;
+
+ snprintf(name_buffer, sizeof(name_buffer), "interface@%d", i);
+ iface = fdt_subnode_offset(initial_boot_params, pip,
+ name_buffer);
+ if (iface < 0)
+ continue;
+ for (p = 0; p < 16; p++) {
+ int eth;
+
+ snprintf(name_buffer, sizeof(name_buffer),
+ "ethernet@%x", p);
+ eth = fdt_subnode_offset(initial_boot_params, iface,
+ name_buffer);
+ if (eth < 0)
+ continue;
+ octeon_fdt_set_mac_addr(eth, &mac_addr_base);
+ }
+ }
+}
+
+int __init octeon_prune_device_tree(void)
+{
+ int i, max_port, uart_mask;
+ const char *pip_path;
+ const char *alias_prop;
+ char name_buffer[20];
+ int aliases;
+
+ if (fdt_check_header(initial_boot_params))
+ panic("Corrupt Device Tree.");
+
+ WARN(octeon_bootinfo->board_type == CVMX_BOARD_TYPE_CUST_DSR1000N,
+ "Built-in DTB booting is deprecated on %s. Please switch to use appended DTB.",
+ cvmx_board_type_to_string(octeon_bootinfo->board_type));
+
+ aliases = fdt_path_offset(initial_boot_params, "/aliases");
+ if (aliases < 0) {
+ pr_err("Error: No /aliases node in device tree.");
+ return -EINVAL;
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
+ max_port = 2;
+ else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))
+ max_port = 1;
+ else
+ max_port = 0;
+
+ if (octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC10E)
+ max_port = 0;
+
+ for (i = 0; i < 2; i++) {
+ int mgmt;
+
+ snprintf(name_buffer, sizeof(name_buffer),
+ "mix%d", i);
+ alias_prop = fdt_getprop(initial_boot_params, aliases,
+ name_buffer, NULL);
+ if (alias_prop) {
+ mgmt = fdt_path_offset(initial_boot_params, alias_prop);
+ if (mgmt < 0)
+ continue;
+ if (i >= max_port) {
+ pr_debug("Deleting mix%d\n", i);
+ octeon_fdt_rm_ethernet(mgmt);
+ fdt_nop_property(initial_boot_params, aliases,
+ name_buffer);
+ } else {
+ int phy_addr = cvmx_helper_board_get_mii_address(CVMX_HELPER_BOARD_MGMT_IPD_PORT + i);
+
+ octeon_fdt_set_phy(mgmt, phy_addr);
+ }
+ }
+ }
+
+ pip_path = fdt_getprop(initial_boot_params, aliases, "pip", NULL);
+ if (pip_path) {
+ int pip = fdt_path_offset(initial_boot_params, pip_path);
+
+ if (pip >= 0)
+ for (i = 0; i <= 4; i++)
+ octeon_fdt_pip_iface(pip, i);
+ }
+
+ /* I2C */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN63XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN68XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN56XX))
+ max_port = 2;
+ else
+ max_port = 1;
+
+ for (i = 0; i < 2; i++) {
+ int i2c;
+
+ snprintf(name_buffer, sizeof(name_buffer),
+ "twsi%d", i);
+ alias_prop = fdt_getprop(initial_boot_params, aliases,
+ name_buffer, NULL);
+
+ if (alias_prop) {
+ i2c = fdt_path_offset(initial_boot_params, alias_prop);
+ if (i2c < 0)
+ continue;
+ if (i >= max_port) {
+ pr_debug("Deleting twsi%d\n", i);
+ fdt_nop_node(initial_boot_params, i2c);
+ fdt_nop_property(initial_boot_params, aliases,
+ name_buffer);
+ }
+ }
+ }
+
+ /* SMI/MDIO */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ max_port = 4;
+ else if (OCTEON_IS_MODEL(OCTEON_CN52XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN63XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN56XX))
+ max_port = 2;
+ else
+ max_port = 1;
+
+ for (i = 0; i < 2; i++) {
+ int i2c;
+
+ snprintf(name_buffer, sizeof(name_buffer),
+ "smi%d", i);
+ alias_prop = fdt_getprop(initial_boot_params, aliases,
+ name_buffer, NULL);
+ if (alias_prop) {
+ i2c = fdt_path_offset(initial_boot_params, alias_prop);
+ if (i2c < 0)
+ continue;
+ if (i >= max_port) {
+ pr_debug("Deleting smi%d\n", i);
+ fdt_nop_node(initial_boot_params, i2c);
+ fdt_nop_property(initial_boot_params, aliases,
+ name_buffer);
+ }
+ }
+ }
+
+ /* Serial */
+ uart_mask = 3;
+
+ /* Right now CN52XX is the only chip with a third uart */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX))
+ uart_mask |= 4; /* uart2 */
+
+ for (i = 0; i < 3; i++) {
+ int uart;
+
+ snprintf(name_buffer, sizeof(name_buffer),
+ "uart%d", i);
+ alias_prop = fdt_getprop(initial_boot_params, aliases,
+ name_buffer, NULL);
+
+ if (alias_prop) {
+ uart = fdt_path_offset(initial_boot_params, alias_prop);
+ if (uart_mask & (1 << i)) {
+ __be32 f;
+
+ f = cpu_to_be32(octeon_get_io_clock_rate());
+ fdt_setprop_inplace(initial_boot_params,
+ uart, "clock-frequency",
+ &f, sizeof(f));
+ continue;
+ }
+ pr_debug("Deleting uart%d\n", i);
+ fdt_nop_node(initial_boot_params, uart);
+ fdt_nop_property(initial_boot_params, aliases,
+ name_buffer);
+ }
+ }
+
+ /* Compact Flash */
+ alias_prop = fdt_getprop(initial_boot_params, aliases,
+ "cf0", NULL);
+ if (alias_prop) {
+ union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
+ unsigned long base_ptr, region_base, region_size;
+ unsigned long region1_base = 0;
+ unsigned long region1_size = 0;
+ int cs, bootbus;
+ bool is_16bit = false;
+ bool is_true_ide = false;
+ __be32 new_reg[6];
+ __be32 *ranges;
+ int len;
+
+ int cf = fdt_path_offset(initial_boot_params, alias_prop);
+
+ base_ptr = 0;
+ if (octeon_bootinfo->major_version == 1
+ && octeon_bootinfo->minor_version >= 1) {
+ if (octeon_bootinfo->compact_flash_common_base_addr)
+ base_ptr = octeon_bootinfo->compact_flash_common_base_addr;
+ } else {
+ base_ptr = 0x1d000800;
+ }
+
+ if (!base_ptr)
+ goto no_cf;
+
+ /* Find CS0 region. */
+ for (cs = 0; cs < 8; cs++) {
+ mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
+ region_base = mio_boot_reg_cfg.s.base << 16;
+ region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
+ if (mio_boot_reg_cfg.s.en && base_ptr >= region_base
+ && base_ptr < region_base + region_size) {
+ is_16bit = mio_boot_reg_cfg.s.width;
+ break;
+ }
+ }
+ if (cs >= 7) {
+ /* cs and cs + 1 are CS0 and CS1, both must be less than 8. */
+ goto no_cf;
+ }
+
+ if (!(base_ptr & 0xfffful)) {
+ /*
+ * Boot loader signals availability of DMA (true_ide
+ * mode) by setting low order bits of base_ptr to
+ * zero.
+ */
+
+ /* Asume that CS1 immediately follows. */
+ mio_boot_reg_cfg.u64 =
+ cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs + 1));
+ region1_base = mio_boot_reg_cfg.s.base << 16;
+ region1_size = (mio_boot_reg_cfg.s.size + 1) << 16;
+ if (!mio_boot_reg_cfg.s.en)
+ goto no_cf;
+ is_true_ide = true;
+
+ } else {
+ fdt_nop_property(initial_boot_params, cf, "cavium,true-ide");
+ fdt_nop_property(initial_boot_params, cf, "cavium,dma-engine-handle");
+ if (!is_16bit) {
+ __be32 width = cpu_to_be32(8);
+
+ fdt_setprop_inplace(initial_boot_params, cf,
+ "cavium,bus-width", &width, sizeof(width));
+ }
+ }
+ new_reg[0] = cpu_to_be32(cs);
+ new_reg[1] = cpu_to_be32(0);
+ new_reg[2] = cpu_to_be32(0x10000);
+ new_reg[3] = cpu_to_be32(cs + 1);
+ new_reg[4] = cpu_to_be32(0);
+ new_reg[5] = cpu_to_be32(0x10000);
+ fdt_setprop_inplace(initial_boot_params, cf,
+ "reg", new_reg, sizeof(new_reg));
+
+ bootbus = fdt_parent_offset(initial_boot_params, cf);
+ if (bootbus < 0)
+ goto no_cf;
+ ranges = fdt_getprop_w(initial_boot_params, bootbus, "ranges", &len);
+ if (!ranges || len < (5 * 8 * sizeof(__be32)))
+ goto no_cf;
+
+ ranges[(cs * 5) + 2] = cpu_to_be32(region_base >> 32);
+ ranges[(cs * 5) + 3] = cpu_to_be32(region_base & 0xffffffff);
+ ranges[(cs * 5) + 4] = cpu_to_be32(region_size);
+ if (is_true_ide) {
+ cs++;
+ ranges[(cs * 5) + 2] = cpu_to_be32(region1_base >> 32);
+ ranges[(cs * 5) + 3] = cpu_to_be32(region1_base & 0xffffffff);
+ ranges[(cs * 5) + 4] = cpu_to_be32(region1_size);
+ }
+ goto end_cf;
+no_cf:
+ fdt_nop_node(initial_boot_params, cf);
+
+end_cf:
+ ;
+ }
+
+ /* 8 char LED */
+ alias_prop = fdt_getprop(initial_boot_params, aliases,
+ "led0", NULL);
+ if (alias_prop) {
+ union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
+ unsigned long base_ptr, region_base, region_size;
+ int cs, bootbus;
+ __be32 new_reg[6];
+ __be32 *ranges;
+ int len;
+ int led = fdt_path_offset(initial_boot_params, alias_prop);
+
+ base_ptr = octeon_bootinfo->led_display_base_addr;
+ if (base_ptr == 0)
+ goto no_led;
+ /* Find CS0 region. */
+ for (cs = 0; cs < 8; cs++) {
+ mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
+ region_base = mio_boot_reg_cfg.s.base << 16;
+ region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
+ if (mio_boot_reg_cfg.s.en && base_ptr >= region_base
+ && base_ptr < region_base + region_size)
+ break;
+ }
+
+ if (cs > 7)
+ goto no_led;
+
+ new_reg[0] = cpu_to_be32(cs);
+ new_reg[1] = cpu_to_be32(0x20);
+ new_reg[2] = cpu_to_be32(0x20);
+ new_reg[3] = cpu_to_be32(cs);
+ new_reg[4] = cpu_to_be32(0);
+ new_reg[5] = cpu_to_be32(0x20);
+ fdt_setprop_inplace(initial_boot_params, led,
+ "reg", new_reg, sizeof(new_reg));
+
+ bootbus = fdt_parent_offset(initial_boot_params, led);
+ if (bootbus < 0)
+ goto no_led;
+ ranges = fdt_getprop_w(initial_boot_params, bootbus, "ranges", &len);
+ if (!ranges || len < (5 * 8 * sizeof(__be32)))
+ goto no_led;
+
+ ranges[(cs * 5) + 2] = cpu_to_be32(region_base >> 32);
+ ranges[(cs * 5) + 3] = cpu_to_be32(region_base & 0xffffffff);
+ ranges[(cs * 5) + 4] = cpu_to_be32(region_size);
+ goto end_led;
+
+no_led:
+ fdt_nop_node(initial_boot_params, led);
+end_led:
+ ;
+ }
+
+#ifdef CONFIG_USB
+ /* OHCI/UHCI USB */
+ alias_prop = fdt_getprop(initial_boot_params, aliases,
+ "uctl", NULL);
+ if (alias_prop) {
+ int uctl = fdt_path_offset(initial_boot_params, alias_prop);
+
+ if (uctl >= 0 && (!OCTEON_IS_MODEL(OCTEON_CN6XXX) ||
+ octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC2E)) {
+ pr_debug("Deleting uctl\n");
+ fdt_nop_node(initial_boot_params, uctl);
+ fdt_nop_property(initial_boot_params, aliases, "uctl");
+ } else if (octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC10E ||
+ octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC4E) {
+ /* Missing "refclk-type" defaults to crystal. */
+ fdt_nop_property(initial_boot_params, uctl, "refclk-type");
+ }
+ }
+
+ /* DWC2 USB */
+ alias_prop = fdt_getprop(initial_boot_params, aliases,
+ "usbn", NULL);
+ if (alias_prop) {
+ int usbn = fdt_path_offset(initial_boot_params, alias_prop);
+
+ if (usbn >= 0 && (current_cpu_type() == CPU_CAVIUM_OCTEON2 ||
+ !octeon_has_feature(OCTEON_FEATURE_USB))) {
+ pr_debug("Deleting usbn\n");
+ fdt_nop_node(initial_boot_params, usbn);
+ fdt_nop_property(initial_boot_params, aliases, "usbn");
+ } else {
+ __be32 new_f[1];
+ enum cvmx_helper_board_usb_clock_types c;
+
+ c = __cvmx_helper_board_usb_get_clock_type();
+ switch (c) {
+ case USB_CLOCK_TYPE_REF_48:
+ new_f[0] = cpu_to_be32(48000000);
+ fdt_setprop_inplace(initial_boot_params, usbn,
+ "refclk-frequency", new_f, sizeof(new_f));
+ fallthrough;
+ case USB_CLOCK_TYPE_REF_12:
+ /* Missing "refclk-type" defaults to external. */
+ fdt_nop_property(initial_boot_params, usbn, "refclk-type");
+ break;
+ default:
+ break;
+ }
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static int __init octeon_publish_devices(void)
+{
+ return of_platform_populate(NULL, octeon_ids, NULL, NULL);
+}
+arch_initcall(octeon_publish_devices);
diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c
new file mode 100644
index 000000000..fa87e5aa1
--- /dev/null
+++ b/arch/mips/cavium-octeon/octeon-usb.c
@@ -0,0 +1,557 @@
+/*
+ * XHCI HCD glue for Cavium Octeon III SOCs.
+ *
+ * Copyright (C) 2010-2017 Cavium Networks
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+
+#include <asm/octeon/octeon.h>
+
+/* USB Control Register */
+union cvm_usbdrd_uctl_ctl {
+ uint64_t u64;
+ struct cvm_usbdrd_uctl_ctl_s {
+ /* 1 = BIST and set all USB RAMs to 0x0, 0 = BIST */
+ __BITFIELD_FIELD(uint64_t clear_bist:1,
+ /* 1 = Start BIST and cleared by hardware */
+ __BITFIELD_FIELD(uint64_t start_bist:1,
+ /* Reference clock select for SuperSpeed and HighSpeed PLLs:
+ * 0x0 = Both PLLs use DLMC_REF_CLK0 for reference clock
+ * 0x1 = Both PLLs use DLMC_REF_CLK1 for reference clock
+ * 0x2 = SuperSpeed PLL uses DLMC_REF_CLK0 for reference clock &
+ * HighSpeed PLL uses PLL_REF_CLK for reference clck
+ * 0x3 = SuperSpeed PLL uses DLMC_REF_CLK1 for reference clock &
+ * HighSpeed PLL uses PLL_REF_CLK for reference clck
+ */
+ __BITFIELD_FIELD(uint64_t ref_clk_sel:2,
+ /* 1 = Spread-spectrum clock enable, 0 = SS clock disable */
+ __BITFIELD_FIELD(uint64_t ssc_en:1,
+ /* Spread-spectrum clock modulation range:
+ * 0x0 = -4980 ppm downspread
+ * 0x1 = -4492 ppm downspread
+ * 0x2 = -4003 ppm downspread
+ * 0x3 - 0x7 = Reserved
+ */
+ __BITFIELD_FIELD(uint64_t ssc_range:3,
+ /* Enable non-standard oscillator frequencies:
+ * [55:53] = modules -1
+ * [52:47] = 2's complement push amount, 0 = Feature disabled
+ */
+ __BITFIELD_FIELD(uint64_t ssc_ref_clk_sel:9,
+ /* Reference clock multiplier for non-standard frequencies:
+ * 0x19 = 100MHz on DLMC_REF_CLK* if REF_CLK_SEL = 0x0 or 0x1
+ * 0x28 = 125MHz on DLMC_REF_CLK* if REF_CLK_SEL = 0x0 or 0x1
+ * 0x32 = 50MHz on DLMC_REF_CLK* if REF_CLK_SEL = 0x0 or 0x1
+ * Other Values = Reserved
+ */
+ __BITFIELD_FIELD(uint64_t mpll_multiplier:7,
+ /* Enable reference clock to prescaler for SuperSpeed functionality.
+ * Should always be set to "1"
+ */
+ __BITFIELD_FIELD(uint64_t ref_ssp_en:1,
+ /* Divide the reference clock by 2 before entering the
+ * REF_CLK_FSEL divider:
+ * If REF_CLK_SEL = 0x0 or 0x1, then only 0x0 is legal
+ * If REF_CLK_SEL = 0x2 or 0x3, then:
+ * 0x1 = DLMC_REF_CLK* is 125MHz
+ * 0x0 = DLMC_REF_CLK* is another supported frequency
+ */
+ __BITFIELD_FIELD(uint64_t ref_clk_div2:1,
+ /* Select reference clock freqnuency for both PLL blocks:
+ * 0x27 = REF_CLK_SEL is 0x0 or 0x1
+ * 0x07 = REF_CLK_SEL is 0x2 or 0x3
+ */
+ __BITFIELD_FIELD(uint64_t ref_clk_fsel:6,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved_31_31:1,
+ /* Controller clock enable. */
+ __BITFIELD_FIELD(uint64_t h_clk_en:1,
+ /* Select bypass input to controller clock divider:
+ * 0x0 = Use divided coprocessor clock from H_CLKDIV
+ * 0x1 = Use clock from GPIO pins
+ */
+ __BITFIELD_FIELD(uint64_t h_clk_byp_sel:1,
+ /* Reset controller clock divider. */
+ __BITFIELD_FIELD(uint64_t h_clkdiv_rst:1,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved_27_27:1,
+ /* Clock divider select:
+ * 0x0 = divide by 1
+ * 0x1 = divide by 2
+ * 0x2 = divide by 4
+ * 0x3 = divide by 6
+ * 0x4 = divide by 8
+ * 0x5 = divide by 16
+ * 0x6 = divide by 24
+ * 0x7 = divide by 32
+ */
+ __BITFIELD_FIELD(uint64_t h_clkdiv_sel:3,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved_22_23:2,
+ /* USB3 port permanently attached: 0x0 = No, 0x1 = Yes */
+ __BITFIELD_FIELD(uint64_t usb3_port_perm_attach:1,
+ /* USB2 port permanently attached: 0x0 = No, 0x1 = Yes */
+ __BITFIELD_FIELD(uint64_t usb2_port_perm_attach:1,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved_19_19:1,
+ /* Disable SuperSpeed PHY: 0x0 = No, 0x1 = Yes */
+ __BITFIELD_FIELD(uint64_t usb3_port_disable:1,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved_17_17:1,
+ /* Disable HighSpeed PHY: 0x0 = No, 0x1 = Yes */
+ __BITFIELD_FIELD(uint64_t usb2_port_disable:1,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved_15_15:1,
+ /* Enable PHY SuperSpeed block power: 0x0 = No, 0x1 = Yes */
+ __BITFIELD_FIELD(uint64_t ss_power_en:1,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved_13_13:1,
+ /* Enable PHY HighSpeed block power: 0x0 = No, 0x1 = Yes */
+ __BITFIELD_FIELD(uint64_t hs_power_en:1,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved_5_11:7,
+ /* Enable USB UCTL interface clock: 0xx = No, 0x1 = Yes */
+ __BITFIELD_FIELD(uint64_t csclk_en:1,
+ /* Controller mode: 0x0 = Host, 0x1 = Device */
+ __BITFIELD_FIELD(uint64_t drd_mode:1,
+ /* PHY reset */
+ __BITFIELD_FIELD(uint64_t uphy_rst:1,
+ /* Software reset UAHC */
+ __BITFIELD_FIELD(uint64_t uahc_rst:1,
+ /* Software resets UCTL */
+ __BITFIELD_FIELD(uint64_t uctl_rst:1,
+ ;)))))))))))))))))))))))))))))))))
+ } s;
+};
+
+/* UAHC Configuration Register */
+union cvm_usbdrd_uctl_host_cfg {
+ uint64_t u64;
+ struct cvm_usbdrd_uctl_host_cfg_s {
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved_60_63:4,
+ /* Indicates minimum value of all received BELT values */
+ __BITFIELD_FIELD(uint64_t host_current_belt:12,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved_38_47:10,
+ /* HS jitter adjustment */
+ __BITFIELD_FIELD(uint64_t fla:6,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved_29_31:3,
+ /* Bus-master enable: 0x0 = Disabled (stall DMAs), 0x1 = enabled */
+ __BITFIELD_FIELD(uint64_t bme:1,
+ /* Overcurrent protection enable: 0x0 = unavailable, 0x1 = available */
+ __BITFIELD_FIELD(uint64_t oci_en:1,
+ /* Overcurrent sene selection:
+ * 0x0 = Overcurrent indication from off-chip is active-low
+ * 0x1 = Overcurrent indication from off-chip is active-high
+ */
+ __BITFIELD_FIELD(uint64_t oci_active_high_en:1,
+ /* Port power control enable: 0x0 = unavailable, 0x1 = available */
+ __BITFIELD_FIELD(uint64_t ppc_en:1,
+ /* Port power control sense selection:
+ * 0x0 = Port power to off-chip is active-low
+ * 0x1 = Port power to off-chip is active-high
+ */
+ __BITFIELD_FIELD(uint64_t ppc_active_high_en:1,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved_0_23:24,
+ ;)))))))))))
+ } s;
+};
+
+/* UCTL Shim Features Register */
+union cvm_usbdrd_uctl_shim_cfg {
+ uint64_t u64;
+ struct cvm_usbdrd_uctl_shim_cfg_s {
+ /* Out-of-bound UAHC register access: 0 = read, 1 = write */
+ __BITFIELD_FIELD(uint64_t xs_ncb_oob_wrn:1,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved_60_62:3,
+ /* SRCID error log for out-of-bound UAHC register access:
+ * [59:58] = chipID
+ * [57] = Request source: 0 = core, 1 = NCB-device
+ * [56:51] = Core/NCB-device number, [56] always 0 for NCB devices
+ * [50:48] = SubID
+ */
+ __BITFIELD_FIELD(uint64_t xs_ncb_oob_osrc:12,
+ /* Error log for bad UAHC DMA access: 0 = Read log, 1 = Write log */
+ __BITFIELD_FIELD(uint64_t xm_bad_dma_wrn:1,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved_44_46:3,
+ /* Encoded error type for bad UAHC DMA */
+ __BITFIELD_FIELD(uint64_t xm_bad_dma_type:4,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved_13_39:27,
+ /* Select the IOI read command used by DMA accesses */
+ __BITFIELD_FIELD(uint64_t dma_read_cmd:1,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved_10_11:2,
+ /* Select endian format for DMA accesses to the L2c:
+ * 0x0 = Little endian
+ *` 0x1 = Big endian
+ * 0x2 = Reserved
+ * 0x3 = Reserved
+ */
+ __BITFIELD_FIELD(uint64_t dma_endian_mode:2,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved_2_7:6,
+ /* Select endian format for IOI CSR access to UAHC:
+ * 0x0 = Little endian
+ *` 0x1 = Big endian
+ * 0x2 = Reserved
+ * 0x3 = Reserved
+ */
+ __BITFIELD_FIELD(uint64_t csr_endian_mode:2,
+ ;))))))))))))
+ } s;
+};
+
+#define OCTEON_H_CLKDIV_SEL 8
+#define OCTEON_MIN_H_CLK_RATE 150000000
+#define OCTEON_MAX_H_CLK_RATE 300000000
+
+static DEFINE_MUTEX(dwc3_octeon_clocks_mutex);
+static uint8_t clk_div[OCTEON_H_CLKDIV_SEL] = {1, 2, 4, 6, 8, 16, 24, 32};
+
+
+static int dwc3_octeon_config_power(struct device *dev, u64 base)
+{
+#define UCTL_HOST_CFG 0xe0
+ union cvm_usbdrd_uctl_host_cfg uctl_host_cfg;
+ union cvmx_gpio_bit_cfgx gpio_bit;
+ uint32_t gpio_pwr[3];
+ int gpio, len, power_active_low;
+ struct device_node *node = dev->of_node;
+ int index = (base >> 24) & 1;
+
+ if (of_find_property(node, "power", &len) != NULL) {
+ if (len == 12) {
+ of_property_read_u32_array(node, "power", gpio_pwr, 3);
+ power_active_low = gpio_pwr[2] & 0x01;
+ gpio = gpio_pwr[1];
+ } else if (len == 8) {
+ of_property_read_u32_array(node, "power", gpio_pwr, 2);
+ power_active_low = 0;
+ gpio = gpio_pwr[1];
+ } else {
+ dev_err(dev, "dwc3 controller clock init failure.\n");
+ return -EINVAL;
+ }
+ if ((OCTEON_IS_MODEL(OCTEON_CN73XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX))
+ && gpio <= 31) {
+ gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_BIT_CFGX(gpio));
+ gpio_bit.s.tx_oe = 1;
+ gpio_bit.s.output_sel = (index == 0 ? 0x14 : 0x15);
+ cvmx_write_csr(CVMX_GPIO_BIT_CFGX(gpio), gpio_bit.u64);
+ } else if (gpio <= 15) {
+ gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_BIT_CFGX(gpio));
+ gpio_bit.s.tx_oe = 1;
+ gpio_bit.s.output_sel = (index == 0 ? 0x14 : 0x19);
+ cvmx_write_csr(CVMX_GPIO_BIT_CFGX(gpio), gpio_bit.u64);
+ } else {
+ gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_XBIT_CFGX(gpio));
+ gpio_bit.s.tx_oe = 1;
+ gpio_bit.s.output_sel = (index == 0 ? 0x14 : 0x19);
+ cvmx_write_csr(CVMX_GPIO_XBIT_CFGX(gpio), gpio_bit.u64);
+ }
+
+ /* Enable XHCI power control and set if active high or low. */
+ uctl_host_cfg.u64 = cvmx_read_csr(base + UCTL_HOST_CFG);
+ uctl_host_cfg.s.ppc_en = 1;
+ uctl_host_cfg.s.ppc_active_high_en = !power_active_low;
+ cvmx_write_csr(base + UCTL_HOST_CFG, uctl_host_cfg.u64);
+ } else {
+ /* Disable XHCI power control and set if active high. */
+ uctl_host_cfg.u64 = cvmx_read_csr(base + UCTL_HOST_CFG);
+ uctl_host_cfg.s.ppc_en = 0;
+ uctl_host_cfg.s.ppc_active_high_en = 0;
+ cvmx_write_csr(base + UCTL_HOST_CFG, uctl_host_cfg.u64);
+ dev_warn(dev, "dwc3 controller clock init failure.\n");
+ }
+ return 0;
+}
+
+static int dwc3_octeon_clocks_start(struct device *dev, u64 base)
+{
+ union cvm_usbdrd_uctl_ctl uctl_ctl;
+ int ref_clk_sel = 2;
+ u64 div;
+ u32 clock_rate;
+ int mpll_mul;
+ int i;
+ u64 h_clk_rate;
+ u64 uctl_ctl_reg = base;
+
+ if (dev->of_node) {
+ const char *ss_clock_type;
+ const char *hs_clock_type;
+
+ i = of_property_read_u32(dev->of_node,
+ "refclk-frequency", &clock_rate);
+ if (i) {
+ pr_err("No UCTL \"refclk-frequency\"\n");
+ return -EINVAL;
+ }
+ i = of_property_read_string(dev->of_node,
+ "refclk-type-ss", &ss_clock_type);
+ if (i) {
+ pr_err("No UCTL \"refclk-type-ss\"\n");
+ return -EINVAL;
+ }
+ i = of_property_read_string(dev->of_node,
+ "refclk-type-hs", &hs_clock_type);
+ if (i) {
+ pr_err("No UCTL \"refclk-type-hs\"\n");
+ return -EINVAL;
+ }
+ if (strcmp("dlmc_ref_clk0", ss_clock_type) == 0) {
+ if (strcmp(hs_clock_type, "dlmc_ref_clk0") == 0)
+ ref_clk_sel = 0;
+ else if (strcmp(hs_clock_type, "pll_ref_clk") == 0)
+ ref_clk_sel = 2;
+ else
+ pr_err("Invalid HS clock type %s, using pll_ref_clk instead\n",
+ hs_clock_type);
+ } else if (strcmp(ss_clock_type, "dlmc_ref_clk1") == 0) {
+ if (strcmp(hs_clock_type, "dlmc_ref_clk1") == 0)
+ ref_clk_sel = 1;
+ else if (strcmp(hs_clock_type, "pll_ref_clk") == 0)
+ ref_clk_sel = 3;
+ else {
+ pr_err("Invalid HS clock type %s, using pll_ref_clk instead\n",
+ hs_clock_type);
+ ref_clk_sel = 3;
+ }
+ } else
+ pr_err("Invalid SS clock type %s, using dlmc_ref_clk0 instead\n",
+ ss_clock_type);
+
+ if ((ref_clk_sel == 0 || ref_clk_sel == 1) &&
+ (clock_rate != 100000000))
+ pr_err("Invalid UCTL clock rate of %u, using 100000000 instead\n",
+ clock_rate);
+
+ } else {
+ pr_err("No USB UCTL device node\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Step 1: Wait for all voltages to be stable...that surely
+ * happened before starting the kernel. SKIP
+ */
+
+ /* Step 2: Select GPIO for overcurrent indication, if desired. SKIP */
+
+ /* Step 3: Assert all resets. */
+ uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+ uctl_ctl.s.uphy_rst = 1;
+ uctl_ctl.s.uahc_rst = 1;
+ uctl_ctl.s.uctl_rst = 1;
+ cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+
+ /* Step 4a: Reset the clock dividers. */
+ uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+ uctl_ctl.s.h_clkdiv_rst = 1;
+ cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+
+ /* Step 4b: Select controller clock frequency. */
+ for (div = 0; div < OCTEON_H_CLKDIV_SEL; div++) {
+ h_clk_rate = octeon_get_io_clock_rate() / clk_div[div];
+ if (h_clk_rate <= OCTEON_MAX_H_CLK_RATE &&
+ h_clk_rate >= OCTEON_MIN_H_CLK_RATE)
+ break;
+ }
+ uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+ uctl_ctl.s.h_clkdiv_sel = div;
+ uctl_ctl.s.h_clk_en = 1;
+ cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+ uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+ if ((div != uctl_ctl.s.h_clkdiv_sel) || (!uctl_ctl.s.h_clk_en)) {
+ dev_err(dev, "dwc3 controller clock init failure.\n");
+ return -EINVAL;
+ }
+
+ /* Step 4c: Deassert the controller clock divider reset. */
+ uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+ uctl_ctl.s.h_clkdiv_rst = 0;
+ cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+
+ /* Step 5a: Reference clock configuration. */
+ uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+ uctl_ctl.s.ref_clk_sel = ref_clk_sel;
+ uctl_ctl.s.ref_clk_fsel = 0x07;
+ uctl_ctl.s.ref_clk_div2 = 0;
+ switch (clock_rate) {
+ default:
+ dev_err(dev, "Invalid ref_clk %u, using 100000000 instead\n",
+ clock_rate);
+ fallthrough;
+ case 100000000:
+ mpll_mul = 0x19;
+ if (ref_clk_sel < 2)
+ uctl_ctl.s.ref_clk_fsel = 0x27;
+ break;
+ case 50000000:
+ mpll_mul = 0x32;
+ break;
+ case 125000000:
+ mpll_mul = 0x28;
+ break;
+ }
+ uctl_ctl.s.mpll_multiplier = mpll_mul;
+
+ /* Step 5b: Configure and enable spread-spectrum for SuperSpeed. */
+ uctl_ctl.s.ssc_en = 1;
+
+ /* Step 5c: Enable SuperSpeed. */
+ uctl_ctl.s.ref_ssp_en = 1;
+
+ /* Step 5d: Cofngiure PHYs. SKIP */
+
+ /* Step 6a & 6b: Power up PHYs. */
+ uctl_ctl.s.hs_power_en = 1;
+ uctl_ctl.s.ss_power_en = 1;
+ cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+
+ /* Step 7: Wait 10 controller-clock cycles to take effect. */
+ udelay(10);
+
+ /* Step 8a: Deassert UCTL reset signal. */
+ uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+ uctl_ctl.s.uctl_rst = 0;
+ cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+
+ /* Step 8b: Wait 10 controller-clock cycles. */
+ udelay(10);
+
+ /* Steo 8c: Setup power-power control. */
+ if (dwc3_octeon_config_power(dev, base)) {
+ dev_err(dev, "Error configuring power.\n");
+ return -EINVAL;
+ }
+
+ /* Step 8d: Deassert UAHC reset signal. */
+ uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+ uctl_ctl.s.uahc_rst = 0;
+ cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+
+ /* Step 8e: Wait 10 controller-clock cycles. */
+ udelay(10);
+
+ /* Step 9: Enable conditional coprocessor clock of UCTL. */
+ uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+ uctl_ctl.s.csclk_en = 1;
+ cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+
+ /*Step 10: Set for host mode only. */
+ uctl_ctl.u64 = cvmx_read_csr(uctl_ctl_reg);
+ uctl_ctl.s.drd_mode = 0;
+ cvmx_write_csr(uctl_ctl_reg, uctl_ctl.u64);
+
+ return 0;
+}
+
+static void __init dwc3_octeon_set_endian_mode(u64 base)
+{
+#define UCTL_SHIM_CFG 0xe8
+ union cvm_usbdrd_uctl_shim_cfg shim_cfg;
+
+ shim_cfg.u64 = cvmx_read_csr(base + UCTL_SHIM_CFG);
+#ifdef __BIG_ENDIAN
+ shim_cfg.s.dma_endian_mode = 1;
+ shim_cfg.s.csr_endian_mode = 1;
+#else
+ shim_cfg.s.dma_endian_mode = 0;
+ shim_cfg.s.csr_endian_mode = 0;
+#endif
+ cvmx_write_csr(base + UCTL_SHIM_CFG, shim_cfg.u64);
+}
+
+#define CVMX_USBDRDX_UCTL_CTL(index) \
+ (CVMX_ADD_IO_SEG(0x0001180068000000ull) + \
+ ((index & 1) * 0x1000000ull))
+static void __init dwc3_octeon_phy_reset(u64 base)
+{
+ union cvm_usbdrd_uctl_ctl uctl_ctl;
+ int index = (base >> 24) & 1;
+
+ uctl_ctl.u64 = cvmx_read_csr(CVMX_USBDRDX_UCTL_CTL(index));
+ uctl_ctl.s.uphy_rst = 0;
+ cvmx_write_csr(CVMX_USBDRDX_UCTL_CTL(index), uctl_ctl.u64);
+}
+
+static int __init dwc3_octeon_device_init(void)
+{
+ const char compat_node_name[] = "cavium,octeon-7130-usb-uctl";
+ struct platform_device *pdev;
+ struct device_node *node;
+ struct resource *res;
+ void __iomem *base;
+
+ /*
+ * There should only be three universal controllers, "uctl"
+ * in the device tree. Two USB and a SATA, which we ignore.
+ */
+ node = NULL;
+ do {
+ node = of_find_node_by_name(node, "uctl");
+ if (!node)
+ return -ENODEV;
+
+ if (of_device_is_compatible(node, compat_node_name)) {
+ pdev = of_find_device_by_node(node);
+ if (!pdev)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ put_device(&pdev->dev);
+ dev_err(&pdev->dev, "No memory resources\n");
+ return -ENXIO;
+ }
+
+ /*
+ * The code below maps in the registers necessary for
+ * setting up the clocks and reseting PHYs. We must
+ * release the resources so the dwc3 subsystem doesn't
+ * know the difference.
+ */
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ put_device(&pdev->dev);
+ return PTR_ERR(base);
+ }
+
+ mutex_lock(&dwc3_octeon_clocks_mutex);
+ dwc3_octeon_clocks_start(&pdev->dev, (u64)base);
+ dwc3_octeon_set_endian_mode((u64)base);
+ dwc3_octeon_phy_reset((u64)base);
+ dev_info(&pdev->dev, "clocks initialized.\n");
+ mutex_unlock(&dwc3_octeon_clocks_mutex);
+ devm_iounmap(&pdev->dev, base);
+ devm_release_mem_region(&pdev->dev, res->start,
+ resource_size(res));
+ put_device(&pdev->dev);
+ }
+ } while (node != NULL);
+
+ return 0;
+}
+device_initcall(dwc3_octeon_device_init);
+
+MODULE_AUTHOR("David Daney <david.daney@cavium.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("USB driver for OCTEON III SoC");
diff --git a/arch/mips/cavium-octeon/octeon_boot.h b/arch/mips/cavium-octeon/octeon_boot.h
new file mode 100644
index 000000000..9eab66f3b
--- /dev/null
+++ b/arch/mips/cavium-octeon/octeon_boot.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * (C) Copyright 2004, 2005 Cavium Networks
+ */
+
+#ifndef __OCTEON_BOOT_H__
+#define __OCTEON_BOOT_H__
+
+#include <linux/types.h>
+
+struct boot_init_vector {
+ /* First stage address - in ram instead of flash */
+ uint64_t code_addr;
+ /* Setup code for application, NOT application entry point */
+ uint32_t app_start_func_addr;
+ /* k0 is used for global data - needs to be passed to other cores */
+ uint32_t k0_val;
+ /* Address of boot info block structure */
+ uint64_t boot_info_addr;
+ uint32_t flags; /* flags */
+ uint32_t pad;
+};
+
+/* similar to bootloader's linux_app_boot_info but without global data */
+struct linux_app_boot_info {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t labi_signature;
+ uint32_t start_core0_addr;
+ uint32_t avail_coremask;
+ uint32_t pci_console_active;
+ uint32_t icache_prefetch_disable;
+ uint32_t padding;
+ uint64_t InitTLBStart_addr;
+ uint32_t start_app_addr;
+ uint32_t cur_exception_base;
+ uint32_t no_mark_private_data;
+ uint32_t compact_flash_common_base_addr;
+ uint32_t compact_flash_attribute_base_addr;
+ uint32_t led_display_base_addr;
+#else
+ uint32_t start_core0_addr;
+ uint32_t labi_signature;
+
+ uint32_t pci_console_active;
+ uint32_t avail_coremask;
+
+ uint32_t padding;
+ uint32_t icache_prefetch_disable;
+
+ uint64_t InitTLBStart_addr;
+
+ uint32_t cur_exception_base;
+ uint32_t start_app_addr;
+
+ uint32_t compact_flash_common_base_addr;
+ uint32_t no_mark_private_data;
+
+ uint32_t led_display_base_addr;
+ uint32_t compact_flash_attribute_base_addr;
+#endif
+};
+
+/* If not to copy a lot of bootloader's structures
+ here is only offset of requested member */
+#define AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK 0x765c
+
+/* hardcoded in bootloader */
+#define LABI_ADDR_IN_BOOTLOADER 0x700
+
+#define LINUX_APP_BOOT_BLOCK_NAME "linux-app-boot"
+
+#define LABI_SIGNATURE 0xAABBCC01
+
+/* from uboot-headers/octeon_mem_map.h */
+#define EXCEPTION_BASE_INCR (4 * 1024)
+ /* Increment size for exception base addresses (4k minimum) */
+#define EXCEPTION_BASE_BASE 0
+#define BOOTLOADER_PRIV_DATA_BASE (EXCEPTION_BASE_BASE + 0x800)
+#define BOOTLOADER_BOOT_VECTOR (BOOTLOADER_PRIV_DATA_BASE)
+
+#endif /* __OCTEON_BOOT_H__ */
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
new file mode 100644
index 000000000..b329cdb61
--- /dev/null
+++ b/arch/mips/cavium-octeon/setup.c
@@ -0,0 +1,1271 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2007 Cavium Networks
+ * Copyright (C) 2008, 2009 Wind River Systems
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/compiler.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+#include <linux/serial.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/string.h> /* for memset */
+#include <linux/tty.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/serial_8250.h>
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
+#include <linux/kexec.h>
+
+#include <asm/processor.h>
+#include <asm/reboot.h>
+#include <asm/smp-ops.h>
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/bootinfo.h>
+#include <asm/sections.h>
+#include <asm/fw/fw.h>
+#include <asm/setup.h>
+#include <asm/prom.h>
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/pci-octeon.h>
+#include <asm/octeon/cvmx-rst-defs.h>
+
+/*
+ * TRUE for devices having registers with little-endian byte
+ * order, FALSE for registers with native-endian byte order.
+ * PCI mandates little-endian, USB and SATA are configuraable,
+ * but we chose little-endian for these.
+ */
+const bool octeon_should_swizzle_table[256] = {
+ [0x00] = true, /* bootbus/CF */
+ [0x1b] = true, /* PCI mmio window */
+ [0x1c] = true, /* PCI mmio window */
+ [0x1d] = true, /* PCI mmio window */
+ [0x1e] = true, /* PCI mmio window */
+ [0x68] = true, /* OCTEON III USB */
+ [0x69] = true, /* OCTEON III USB */
+ [0x6c] = true, /* OCTEON III SATA */
+ [0x6f] = true, /* OCTEON II USB */
+};
+EXPORT_SYMBOL(octeon_should_swizzle_table);
+
+#ifdef CONFIG_PCI
+extern void pci_console_init(const char *arg);
+#endif
+
+static unsigned long long max_memory = ULLONG_MAX;
+static unsigned long long reserve_low_mem;
+
+DEFINE_SEMAPHORE(octeon_bootbus_sem);
+EXPORT_SYMBOL(octeon_bootbus_sem);
+
+static struct octeon_boot_descriptor *octeon_boot_desc_ptr;
+
+struct cvmx_bootinfo *octeon_bootinfo;
+EXPORT_SYMBOL(octeon_bootinfo);
+
+#ifdef CONFIG_KEXEC
+#ifdef CONFIG_SMP
+/*
+ * Wait for relocation code is prepared and send
+ * secondary CPUs to spin until kernel is relocated.
+ */
+static void octeon_kexec_smp_down(void *ignored)
+{
+ int cpu = smp_processor_id();
+
+ local_irq_disable();
+ set_cpu_online(cpu, false);
+ while (!atomic_read(&kexec_ready_to_reboot))
+ cpu_relax();
+
+ asm volatile (
+ " sync \n"
+ " synci ($0) \n");
+
+ kexec_reboot();
+}
+#endif
+
+#define OCTEON_DDR0_BASE (0x0ULL)
+#define OCTEON_DDR0_SIZE (0x010000000ULL)
+#define OCTEON_DDR1_BASE (0x410000000ULL)
+#define OCTEON_DDR1_SIZE (0x010000000ULL)
+#define OCTEON_DDR2_BASE (0x020000000ULL)
+#define OCTEON_DDR2_SIZE (0x3e0000000ULL)
+#define OCTEON_MAX_PHY_MEM_SIZE (16*1024*1024*1024ULL)
+
+static struct kimage *kimage_ptr;
+
+static void kexec_bootmem_init(uint64_t mem_size, uint32_t low_reserved_bytes)
+{
+ int64_t addr;
+ struct cvmx_bootmem_desc *bootmem_desc;
+
+ bootmem_desc = cvmx_bootmem_get_desc();
+
+ if (mem_size > OCTEON_MAX_PHY_MEM_SIZE) {
+ mem_size = OCTEON_MAX_PHY_MEM_SIZE;
+ pr_err("Error: requested memory too large,"
+ "truncating to maximum size\n");
+ }
+
+ bootmem_desc->major_version = CVMX_BOOTMEM_DESC_MAJ_VER;
+ bootmem_desc->minor_version = CVMX_BOOTMEM_DESC_MIN_VER;
+
+ addr = (OCTEON_DDR0_BASE + reserve_low_mem + low_reserved_bytes);
+ bootmem_desc->head_addr = 0;
+
+ if (mem_size <= OCTEON_DDR0_SIZE) {
+ __cvmx_bootmem_phy_free(addr,
+ mem_size - reserve_low_mem -
+ low_reserved_bytes, 0);
+ return;
+ }
+
+ __cvmx_bootmem_phy_free(addr,
+ OCTEON_DDR0_SIZE - reserve_low_mem -
+ low_reserved_bytes, 0);
+
+ mem_size -= OCTEON_DDR0_SIZE;
+
+ if (mem_size > OCTEON_DDR1_SIZE) {
+ __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
+ __cvmx_bootmem_phy_free(OCTEON_DDR2_BASE,
+ mem_size - OCTEON_DDR1_SIZE, 0);
+ } else
+ __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
+}
+
+static int octeon_kexec_prepare(struct kimage *image)
+{
+ int i;
+ char *bootloader = "kexec";
+
+ octeon_boot_desc_ptr->argc = 0;
+ for (i = 0; i < image->nr_segments; i++) {
+ if (!strncmp(bootloader, (char *)image->segment[i].buf,
+ strlen(bootloader))) {
+ /*
+ * convert command line string to array
+ * of parameters (as bootloader does).
+ */
+ int argc = 0, offt;
+ char *str = (char *)image->segment[i].buf;
+ char *ptr = strchr(str, ' ');
+ while (ptr && (OCTEON_ARGV_MAX_ARGS > argc)) {
+ *ptr = '\0';
+ if (ptr[1] != ' ') {
+ offt = (int)(ptr - str + 1);
+ octeon_boot_desc_ptr->argv[argc] =
+ image->segment[i].mem + offt;
+ argc++;
+ }
+ ptr = strchr(ptr + 1, ' ');
+ }
+ octeon_boot_desc_ptr->argc = argc;
+ break;
+ }
+ }
+
+ /*
+ * Information about segments will be needed during pre-boot memory
+ * initialization.
+ */
+ kimage_ptr = image;
+ return 0;
+}
+
+static void octeon_generic_shutdown(void)
+{
+ int i;
+#ifdef CONFIG_SMP
+ int cpu;
+#endif
+ struct cvmx_bootmem_desc *bootmem_desc;
+ void *named_block_array_ptr;
+
+ bootmem_desc = cvmx_bootmem_get_desc();
+ named_block_array_ptr =
+ cvmx_phys_to_ptr(bootmem_desc->named_block_array_addr);
+
+#ifdef CONFIG_SMP
+ /* disable watchdogs */
+ for_each_online_cpu(cpu)
+ cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
+#else
+ cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+#endif
+ if (kimage_ptr != kexec_crash_image) {
+ memset(named_block_array_ptr,
+ 0x0,
+ CVMX_BOOTMEM_NUM_NAMED_BLOCKS *
+ sizeof(struct cvmx_bootmem_named_block_desc));
+ /*
+ * Mark all memory (except low 0x100000 bytes) as free.
+ * It is the same thing that bootloader does.
+ */
+ kexec_bootmem_init(octeon_bootinfo->dram_size*1024ULL*1024ULL,
+ 0x100000);
+ /*
+ * Allocate all segments to avoid their corruption during boot.
+ */
+ for (i = 0; i < kimage_ptr->nr_segments; i++)
+ cvmx_bootmem_alloc_address(
+ kimage_ptr->segment[i].memsz + 2*PAGE_SIZE,
+ kimage_ptr->segment[i].mem - PAGE_SIZE,
+ PAGE_SIZE);
+ } else {
+ /*
+ * Do not mark all memory as free. Free only named sections
+ * leaving the rest of memory unchanged.
+ */
+ struct cvmx_bootmem_named_block_desc *ptr =
+ (struct cvmx_bootmem_named_block_desc *)
+ named_block_array_ptr;
+
+ for (i = 0; i < bootmem_desc->named_block_num_blocks; i++)
+ if (ptr[i].size)
+ cvmx_bootmem_free_named(ptr[i].name);
+ }
+ kexec_args[2] = 1UL; /* running on octeon_main_processor */
+ kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
+#ifdef CONFIG_SMP
+ secondary_kexec_args[2] = 0UL; /* running on secondary cpu */
+ secondary_kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
+#endif
+}
+
+static void octeon_shutdown(void)
+{
+ octeon_generic_shutdown();
+#ifdef CONFIG_SMP
+ smp_call_function(octeon_kexec_smp_down, NULL, 0);
+ smp_wmb();
+ while (num_online_cpus() > 1) {
+ cpu_relax();
+ mdelay(1);
+ }
+#endif
+}
+
+static void octeon_crash_shutdown(struct pt_regs *regs)
+{
+ octeon_generic_shutdown();
+ default_machine_crash_shutdown(regs);
+}
+
+#ifdef CONFIG_SMP
+void octeon_crash_smp_send_stop(void)
+{
+ int cpu;
+
+ /* disable watchdogs */
+ for_each_online_cpu(cpu)
+ cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
+}
+#endif
+
+#endif /* CONFIG_KEXEC */
+
+#ifdef CONFIG_CAVIUM_RESERVE32
+uint64_t octeon_reserve32_memory;
+EXPORT_SYMBOL(octeon_reserve32_memory);
+#endif
+
+#ifdef CONFIG_KEXEC
+/* crashkernel cmdline parameter is parsed _after_ memory setup
+ * we also parse it here (workaround for EHB5200) */
+static uint64_t crashk_size, crashk_base;
+#endif
+
+static int octeon_uart;
+
+extern asmlinkage void handle_int(void);
+
+/**
+ * Return non zero if we are currently running in the Octeon simulator
+ *
+ * Returns
+ */
+int octeon_is_simulation(void)
+{
+ return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM;
+}
+EXPORT_SYMBOL(octeon_is_simulation);
+
+/**
+ * Return true if Octeon is in PCI Host mode. This means
+ * Linux can control the PCI bus.
+ *
+ * Returns Non zero if Octeon in host mode.
+ */
+int octeon_is_pci_host(void)
+{
+#ifdef CONFIG_PCI
+ return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST;
+#else
+ return 0;
+#endif
+}
+
+/**
+ * Get the clock rate of Octeon
+ *
+ * Returns Clock rate in HZ
+ */
+uint64_t octeon_get_clock_rate(void)
+{
+ struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
+
+ return sysinfo->cpu_clock_hz;
+}
+EXPORT_SYMBOL(octeon_get_clock_rate);
+
+static u64 octeon_io_clock_rate;
+
+u64 octeon_get_io_clock_rate(void)
+{
+ return octeon_io_clock_rate;
+}
+EXPORT_SYMBOL(octeon_get_io_clock_rate);
+
+
+/**
+ * Write to the LCD display connected to the bootbus. This display
+ * exists on most Cavium evaluation boards. If it doesn't exist, then
+ * this function doesn't do anything.
+ *
+ * @s: String to write
+ */
+static void octeon_write_lcd(const char *s)
+{
+ if (octeon_bootinfo->led_display_base_addr) {
+ void __iomem *lcd_address =
+ ioremap(octeon_bootinfo->led_display_base_addr,
+ 8);
+ int i;
+ for (i = 0; i < 8; i++, s++) {
+ if (*s)
+ iowrite8(*s, lcd_address + i);
+ else
+ iowrite8(' ', lcd_address + i);
+ }
+ iounmap(lcd_address);
+ }
+}
+
+/**
+ * Return the console uart passed by the bootloader
+ *
+ * Returns uart (0 or 1)
+ */
+static int octeon_get_boot_uart(void)
+{
+ return (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
+ 1 : 0;
+}
+
+/**
+ * Get the coremask Linux was booted on.
+ *
+ * Returns Core mask
+ */
+int octeon_get_boot_coremask(void)
+{
+ return octeon_boot_desc_ptr->core_mask;
+}
+
+/**
+ * Check the hardware BIST results for a CPU
+ */
+void octeon_check_cpu_bist(void)
+{
+ const int coreid = cvmx_get_core_num();
+ unsigned long long mask;
+ unsigned long long bist_val;
+
+ /* Check BIST results for COP0 registers */
+ mask = 0x1f00000000ull;
+ bist_val = read_octeon_c0_icacheerr();
+ if (bist_val & mask)
+ pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
+ coreid, bist_val);
+
+ bist_val = read_octeon_c0_dcacheerr();
+ if (bist_val & 1)
+ pr_err("Core%d L1 Dcache parity error: "
+ "CacheErr(dcache) = 0x%llx\n",
+ coreid, bist_val);
+
+ mask = 0xfc00000000000000ull;
+ bist_val = read_c0_cvmmemctl();
+ if (bist_val & mask)
+ pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
+ coreid, bist_val);
+
+ write_octeon_c0_dcacheerr(0);
+}
+
+/**
+ * Reboot Octeon
+ *
+ * @command: Command to pass to the bootloader. Currently ignored.
+ */
+static void octeon_restart(char *command)
+{
+ /* Disable all watchdogs before soft reset. They don't get cleared */
+#ifdef CONFIG_SMP
+ int cpu;
+ for_each_online_cpu(cpu)
+ cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
+#else
+ cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+#endif
+
+ mb();
+ while (1)
+ if (OCTEON_IS_OCTEON3())
+ cvmx_write_csr(CVMX_RST_SOFT_RST, 1);
+ else
+ cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
+}
+
+
+/**
+ * Permanently stop a core.
+ *
+ * @arg: Ignored.
+ */
+static void octeon_kill_core(void *arg)
+{
+ if (octeon_is_simulation())
+ /* A break instruction causes the simulator stop a core */
+ asm volatile ("break" ::: "memory");
+
+ local_irq_disable();
+ /* Disable watchdog on this core. */
+ cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+ /* Spin in a low power mode. */
+ while (true)
+ asm volatile ("wait" ::: "memory");
+}
+
+
+/**
+ * Halt the system
+ */
+static void octeon_halt(void)
+{
+ smp_call_function(octeon_kill_core, NULL, 0);
+
+ switch (octeon_bootinfo->board_type) {
+ case CVMX_BOARD_TYPE_NAO38:
+ /* Driving a 1 to GPIO 12 shuts off this board */
+ cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
+ cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000);
+ break;
+ default:
+ octeon_write_lcd("PowerOff");
+ break;
+ }
+
+ octeon_kill_core(NULL);
+}
+
+static char __read_mostly octeon_system_type[80];
+
+static void __init init_octeon_system_type(void)
+{
+ char const *board_type;
+
+ board_type = cvmx_board_type_to_string(octeon_bootinfo->board_type);
+ if (board_type == NULL) {
+ struct device_node *root;
+ int ret;
+
+ root = of_find_node_by_path("/");
+ ret = of_property_read_string(root, "model", &board_type);
+ of_node_put(root);
+ if (ret)
+ board_type = "Unsupported Board";
+ }
+
+ snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
+ board_type, octeon_model_get_string(read_c0_prid()));
+}
+
+/**
+ * Return a string representing the system type
+ *
+ * Returns
+ */
+const char *octeon_board_type_string(void)
+{
+ return octeon_system_type;
+}
+
+const char *get_system_type(void)
+ __attribute__ ((alias("octeon_board_type_string")));
+
+void octeon_user_io_init(void)
+{
+ union octeon_cvmemctl cvmmemctl;
+
+ /* Get the current settings for CP0_CVMMEMCTL_REG */
+ cvmmemctl.u64 = read_c0_cvmmemctl();
+ /* R/W If set, marked write-buffer entries time out the same
+ * as as other entries; if clear, marked write-buffer entries
+ * use the maximum timeout. */
+ cvmmemctl.s.dismarkwblongto = 1;
+ /* R/W If set, a merged store does not clear the write-buffer
+ * entry timeout state. */
+ cvmmemctl.s.dismrgclrwbto = 0;
+ /* R/W Two bits that are the MSBs of the resultant CVMSEG LM
+ * word location for an IOBDMA. The other 8 bits come from the
+ * SCRADDR field of the IOBDMA. */
+ cvmmemctl.s.iobdmascrmsb = 0;
+ /* R/W If set, SYNCWS and SYNCS only order marked stores; if
+ * clear, SYNCWS and SYNCS only order unmarked
+ * stores. SYNCWSMARKED has no effect when DISSYNCWS is
+ * set. */
+ cvmmemctl.s.syncwsmarked = 0;
+ /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
+ cvmmemctl.s.dissyncws = 0;
+ /* R/W If set, no stall happens on write buffer full. */
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
+ cvmmemctl.s.diswbfst = 1;
+ else
+ cvmmemctl.s.diswbfst = 0;
+ /* R/W If set (and SX set), supervisor-level loads/stores can
+ * use XKPHYS addresses with <48>==0 */
+ cvmmemctl.s.xkmemenas = 0;
+
+ /* R/W If set (and UX set), user-level loads/stores can use
+ * XKPHYS addresses with VA<48>==0 */
+ cvmmemctl.s.xkmemenau = 0;
+
+ /* R/W If set (and SX set), supervisor-level loads/stores can
+ * use XKPHYS addresses with VA<48>==1 */
+ cvmmemctl.s.xkioenas = 0;
+
+ /* R/W If set (and UX set), user-level loads/stores can use
+ * XKPHYS addresses with VA<48>==1 */
+ cvmmemctl.s.xkioenau = 0;
+
+ /* R/W If set, all stores act as SYNCW (NOMERGE must be set
+ * when this is set) RW, reset to 0. */
+ cvmmemctl.s.allsyncw = 0;
+
+ /* R/W If set, no stores merge, and all stores reach the
+ * coherent bus in order. */
+ cvmmemctl.s.nomerge = 0;
+ /* R/W Selects the bit in the counter used for DID time-outs 0
+ * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
+ * between 1x and 2x this interval. For example, with
+ * DIDTTO=3, expiration interval is between 16K and 32K. */
+ cvmmemctl.s.didtto = 0;
+ /* R/W If set, the (mem) CSR clock never turns off. */
+ cvmmemctl.s.csrckalwys = 0;
+ /* R/W If set, mclk never turns off. */
+ cvmmemctl.s.mclkalwys = 0;
+ /* R/W Selects the bit in the counter used for write buffer
+ * flush time-outs (WBFLT+11) is the bit position in an
+ * internal counter used to determine expiration. The write
+ * buffer expires between 1x and 2x this interval. For
+ * example, with WBFLT = 0, a write buffer expires between 2K
+ * and 4K cycles after the write buffer entry is allocated. */
+ cvmmemctl.s.wbfltime = 0;
+ /* R/W If set, do not put Istream in the L2 cache. */
+ cvmmemctl.s.istrnol2 = 0;
+
+ /*
+ * R/W The write buffer threshold. As per erratum Core-14752
+ * for CN63XX, a sc/scd might fail if the write buffer is
+ * full. Lowering WBTHRESH greatly lowers the chances of the
+ * write buffer ever being full and triggering the erratum.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
+ cvmmemctl.s.wbthresh = 4;
+ else
+ cvmmemctl.s.wbthresh = 10;
+
+ /* R/W If set, CVMSEG is available for loads/stores in
+ * kernel/debug mode. */
+#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
+ cvmmemctl.s.cvmsegenak = 1;
+#else
+ cvmmemctl.s.cvmsegenak = 0;
+#endif
+ /* R/W If set, CVMSEG is available for loads/stores in
+ * supervisor mode. */
+ cvmmemctl.s.cvmsegenas = 0;
+ /* R/W If set, CVMSEG is available for loads/stores in user
+ * mode. */
+ cvmmemctl.s.cvmsegenau = 0;
+
+ write_c0_cvmmemctl(cvmmemctl.u64);
+
+ /* Setup of CVMSEG is done in kernel-entry-init.h */
+ if (smp_processor_id() == 0)
+ pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
+ CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
+ CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
+
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ union cvmx_iob_fau_timeout fau_timeout;
+
+ /* Set a default for the hardware timeouts */
+ fau_timeout.u64 = 0;
+ fau_timeout.s.tout_val = 0xfff;
+ /* Disable tagwait FAU timeout */
+ fau_timeout.s.tout_enb = 0;
+ cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
+ }
+
+ if ((!OCTEON_IS_MODEL(OCTEON_CN68XX) &&
+ !OCTEON_IS_MODEL(OCTEON_CN7XXX)) ||
+ OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+ union cvmx_pow_nw_tim nm_tim;
+
+ nm_tim.u64 = 0;
+ /* 4096 cycles */
+ nm_tim.s.nw_tim = 3;
+ cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
+ }
+
+ write_octeon_c0_icacheerr(0);
+ write_c0_derraddr1(0);
+}
+
+/**
+ * Early entry point for arch setup
+ */
+void __init prom_init(void)
+{
+ struct cvmx_sysinfo *sysinfo;
+ const char *arg;
+ char *p;
+ int i;
+ u64 t;
+ int argc;
+#ifdef CONFIG_CAVIUM_RESERVE32
+ int64_t addr = -1;
+#endif
+ /*
+ * The bootloader passes a pointer to the boot descriptor in
+ * $a3, this is available as fw_arg3.
+ */
+ octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
+ octeon_bootinfo =
+ cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
+ cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
+
+ sysinfo = cvmx_sysinfo_get();
+ memset(sysinfo, 0, sizeof(*sysinfo));
+ sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
+ sysinfo->phy_mem_desc_addr = (u64)phys_to_virt(octeon_bootinfo->phy_mem_desc_addr);
+
+ if ((octeon_bootinfo->major_version > 1) ||
+ (octeon_bootinfo->major_version == 1 &&
+ octeon_bootinfo->minor_version >= 4))
+ cvmx_coremask_copy(&sysinfo->core_mask,
+ &octeon_bootinfo->ext_core_mask);
+ else
+ cvmx_coremask_set64(&sysinfo->core_mask,
+ octeon_bootinfo->core_mask);
+
+ /* Some broken u-boot pass garbage in upper bits, clear them out */
+ if (!OCTEON_IS_MODEL(OCTEON_CN78XX))
+ for (i = 512; i < 1024; i++)
+ cvmx_coremask_clear_core(&sysinfo->core_mask, i);
+
+ sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
+ sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
+ sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
+ sysinfo->board_type = octeon_bootinfo->board_type;
+ sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
+ sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
+ memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
+ sizeof(sysinfo->mac_addr_base));
+ sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
+ memcpy(sysinfo->board_serial_number,
+ octeon_bootinfo->board_serial_number,
+ sizeof(sysinfo->board_serial_number));
+ sysinfo->compact_flash_common_base_addr =
+ octeon_bootinfo->compact_flash_common_base_addr;
+ sysinfo->compact_flash_attribute_base_addr =
+ octeon_bootinfo->compact_flash_attribute_base_addr;
+ sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
+ sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
+ sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
+
+ if (OCTEON_IS_OCTEON2()) {
+ /* I/O clock runs at a different rate than the CPU. */
+ union cvmx_mio_rst_boot rst_boot;
+ rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
+ octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
+ } else if (OCTEON_IS_OCTEON3()) {
+ /* I/O clock runs at a different rate than the CPU. */
+ union cvmx_rst_boot rst_boot;
+ rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
+ octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
+ } else {
+ octeon_io_clock_rate = sysinfo->cpu_clock_hz;
+ }
+
+ t = read_c0_cvmctl();
+ if ((t & (1ull << 27)) == 0) {
+ /*
+ * Setup the multiplier save/restore code if
+ * CvmCtl[NOMUL] clear.
+ */
+ void *save;
+ void *save_end;
+ void *restore;
+ void *restore_end;
+ int save_len;
+ int restore_len;
+ int save_max = (char *)octeon_mult_save_end -
+ (char *)octeon_mult_save;
+ int restore_max = (char *)octeon_mult_restore_end -
+ (char *)octeon_mult_restore;
+ if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) {
+ save = octeon_mult_save3;
+ save_end = octeon_mult_save3_end;
+ restore = octeon_mult_restore3;
+ restore_end = octeon_mult_restore3_end;
+ } else {
+ save = octeon_mult_save2;
+ save_end = octeon_mult_save2_end;
+ restore = octeon_mult_restore2;
+ restore_end = octeon_mult_restore2_end;
+ }
+ save_len = (char *)save_end - (char *)save;
+ restore_len = (char *)restore_end - (char *)restore;
+ if (!WARN_ON(save_len > save_max ||
+ restore_len > restore_max)) {
+ memcpy(octeon_mult_save, save, save_len);
+ memcpy(octeon_mult_restore, restore, restore_len);
+ }
+ }
+
+ /*
+ * Only enable the LED controller if we're running on a CN38XX, CN58XX,
+ * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
+ */
+ if (!octeon_is_simulation() &&
+ octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) {
+ cvmx_write_csr(CVMX_LED_EN, 0);
+ cvmx_write_csr(CVMX_LED_PRT, 0);
+ cvmx_write_csr(CVMX_LED_DBG, 0);
+ cvmx_write_csr(CVMX_LED_PRT_FMT, 0);
+ cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
+ cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
+ cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
+ cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
+ cvmx_write_csr(CVMX_LED_EN, 1);
+ }
+#ifdef CONFIG_CAVIUM_RESERVE32
+ /*
+ * We need to temporarily allocate all memory in the reserve32
+ * region. This makes sure the kernel doesn't allocate this
+ * memory when it is getting memory from the
+ * bootloader. Later, after the memory allocations are
+ * complete, the reserve32 will be freed.
+ *
+ * Allocate memory for RESERVED32 aligned on 2MB boundary. This
+ * is in case we later use hugetlb entries with it.
+ */
+ addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
+ 0, 0, 2 << 20,
+ "CAVIUM_RESERVE32", 0);
+ if (addr < 0)
+ pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
+ else
+ octeon_reserve32_memory = addr;
+#endif
+
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
+ if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
+ pr_info("Skipping L2 locking due to reduced L2 cache size\n");
+ } else {
+ uint32_t __maybe_unused ebase = read_c0_ebase() & 0x3ffff000;
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
+ /* TLB refill */
+ cvmx_l2c_lock_mem_region(ebase, 0x100);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
+ /* General exception */
+ cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
+ /* Interrupt handler */
+ cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
+ cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100);
+ cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
+ cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480);
+#endif
+ }
+#endif
+
+ octeon_check_cpu_bist();
+
+ octeon_uart = octeon_get_boot_uart();
+
+#ifdef CONFIG_SMP
+ octeon_write_lcd("LinuxSMP");
+#else
+ octeon_write_lcd("Linux");
+#endif
+
+ octeon_setup_delays();
+
+ /*
+ * BIST should always be enabled when doing a soft reset. L2
+ * Cache locking for instance is not cleared unless BIST is
+ * enabled. Unfortunately due to a chip errata G-200 for
+ * Cn38XX and CN31XX, BIST must be disabled on these parts.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
+ OCTEON_IS_MODEL(OCTEON_CN31XX))
+ cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0);
+ else
+ cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1);
+
+ /* Default to 64MB in the simulator to speed things up */
+ if (octeon_is_simulation())
+ max_memory = 64ull << 20;
+
+ arg = strstr(arcs_cmdline, "mem=");
+ if (arg) {
+ max_memory = memparse(arg + 4, &p);
+ if (max_memory == 0)
+ max_memory = 32ull << 30;
+ if (*p == '@')
+ reserve_low_mem = memparse(p + 1, &p);
+ }
+
+ arcs_cmdline[0] = 0;
+ argc = octeon_boot_desc_ptr->argc;
+ for (i = 0; i < argc; i++) {
+ const char *arg =
+ cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
+ if ((strncmp(arg, "MEM=", 4) == 0) ||
+ (strncmp(arg, "mem=", 4) == 0)) {
+ max_memory = memparse(arg + 4, &p);
+ if (max_memory == 0)
+ max_memory = 32ull << 30;
+ if (*p == '@')
+ reserve_low_mem = memparse(p + 1, &p);
+#ifdef CONFIG_KEXEC
+ } else if (strncmp(arg, "crashkernel=", 12) == 0) {
+ crashk_size = memparse(arg+12, &p);
+ if (*p == '@')
+ crashk_base = memparse(p+1, &p);
+ strcat(arcs_cmdline, " ");
+ strcat(arcs_cmdline, arg);
+ /*
+ * To do: switch parsing to new style, something like:
+ * parse_crashkernel(arg, sysinfo->system_dram_size,
+ * &crashk_size, &crashk_base);
+ */
+#endif
+ } else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
+ sizeof(arcs_cmdline) - 1) {
+ strcat(arcs_cmdline, " ");
+ strcat(arcs_cmdline, arg);
+ }
+ }
+
+ if (strstr(arcs_cmdline, "console=") == NULL) {
+ if (octeon_uart == 1)
+ strcat(arcs_cmdline, " console=ttyS1,115200");
+ else
+ strcat(arcs_cmdline, " console=ttyS0,115200");
+ }
+
+ mips_hpt_frequency = octeon_get_clock_rate();
+
+ octeon_init_cvmcount();
+
+ _machine_restart = octeon_restart;
+ _machine_halt = octeon_halt;
+
+#ifdef CONFIG_KEXEC
+ _machine_kexec_shutdown = octeon_shutdown;
+ _machine_crash_shutdown = octeon_crash_shutdown;
+ _machine_kexec_prepare = octeon_kexec_prepare;
+#ifdef CONFIG_SMP
+ _crash_smp_send_stop = octeon_crash_smp_send_stop;
+#endif
+#endif
+
+ octeon_user_io_init();
+ octeon_setup_smp();
+}
+
+/* Exclude a single page from the regions obtained in plat_mem_setup. */
+#ifndef CONFIG_CRASH_DUMP
+static __init void memory_exclude_page(u64 addr, u64 *mem, u64 *size)
+{
+ if (addr > *mem && addr < *mem + *size) {
+ u64 inc = addr - *mem;
+ memblock_add(*mem, inc);
+ *mem += inc;
+ *size -= inc;
+ }
+
+ if (addr == *mem && *size > PAGE_SIZE) {
+ *mem += PAGE_SIZE;
+ *size -= PAGE_SIZE;
+ }
+}
+#endif /* CONFIG_CRASH_DUMP */
+
+void __init fw_init_cmdline(void)
+{
+ int i;
+
+ octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
+ for (i = 0; i < octeon_boot_desc_ptr->argc; i++) {
+ const char *arg =
+ cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
+ if (strlen(arcs_cmdline) + strlen(arg) + 1 <
+ sizeof(arcs_cmdline) - 1) {
+ strcat(arcs_cmdline, " ");
+ strcat(arcs_cmdline, arg);
+ }
+ }
+}
+
+void __init *plat_get_fdt(void)
+{
+ octeon_bootinfo =
+ cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
+ return phys_to_virt(octeon_bootinfo->fdt_addr);
+}
+
+void __init plat_mem_setup(void)
+{
+ uint64_t mem_alloc_size;
+ uint64_t total;
+ uint64_t crashk_end;
+#ifndef CONFIG_CRASH_DUMP
+ int64_t memory;
+ uint64_t kernel_start;
+ uint64_t kernel_size;
+#endif
+
+ total = 0;
+ crashk_end = 0;
+
+ /*
+ * The Mips memory init uses the first memory location for
+ * some memory vectors. When SPARSEMEM is in use, it doesn't
+ * verify that the size is big enough for the final
+ * vectors. Making the smallest chuck 4MB seems to be enough
+ * to consistently work.
+ */
+ mem_alloc_size = 4 << 20;
+ if (mem_alloc_size > max_memory)
+ mem_alloc_size = max_memory;
+
+/* Crashkernel ignores bootmem list. It relies on mem=X@Y option */
+#ifdef CONFIG_CRASH_DUMP
+ memblock_add(reserve_low_mem, max_memory);
+ total += max_memory;
+#else
+#ifdef CONFIG_KEXEC
+ if (crashk_size > 0) {
+ memblock_add(crashk_base, crashk_size);
+ crashk_end = crashk_base + crashk_size;
+ }
+#endif
+ /*
+ * When allocating memory, we want incrementing addresses,
+ * which is handled by memblock
+ */
+ cvmx_bootmem_lock();
+ while (total < max_memory) {
+ memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
+ __pa_symbol(&_end), -1,
+ 0x100000,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (memory >= 0) {
+ u64 size = mem_alloc_size;
+#ifdef CONFIG_KEXEC
+ uint64_t end;
+#endif
+
+ /*
+ * exclude a page at the beginning and end of
+ * the 256MB PCIe 'hole' so the kernel will not
+ * try to allocate multi-page buffers that
+ * span the discontinuity.
+ */
+ memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE,
+ &memory, &size);
+ memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE +
+ CVMX_PCIE_BAR1_PHYS_SIZE,
+ &memory, &size);
+#ifdef CONFIG_KEXEC
+ end = memory + mem_alloc_size;
+
+ /*
+ * This function automatically merges address regions
+ * next to each other if they are received in
+ * incrementing order
+ */
+ if (memory < crashk_base && end > crashk_end) {
+ /* region is fully in */
+ memblock_add(memory, crashk_base - memory);
+ total += crashk_base - memory;
+ memblock_add(crashk_end, end - crashk_end);
+ total += end - crashk_end;
+ continue;
+ }
+
+ if (memory >= crashk_base && end <= crashk_end)
+ /*
+ * Entire memory region is within the new
+ * kernel's memory, ignore it.
+ */
+ continue;
+
+ if (memory > crashk_base && memory < crashk_end &&
+ end > crashk_end) {
+ /*
+ * Overlap with the beginning of the region,
+ * reserve the beginning.
+ */
+ mem_alloc_size -= crashk_end - memory;
+ memory = crashk_end;
+ } else if (memory < crashk_base && end > crashk_base &&
+ end < crashk_end)
+ /*
+ * Overlap with the beginning of the region,
+ * chop of end.
+ */
+ mem_alloc_size -= end - crashk_base;
+#endif
+ memblock_add(memory, mem_alloc_size);
+ total += mem_alloc_size;
+ /* Recovering mem_alloc_size */
+ mem_alloc_size = 4 << 20;
+ } else {
+ break;
+ }
+ }
+ cvmx_bootmem_unlock();
+ /* Add the memory region for the kernel. */
+ kernel_start = (unsigned long) _text;
+ kernel_size = _end - _text;
+
+ /* Adjust for physical offset. */
+ kernel_start &= ~0xffffffff80000000ULL;
+ memblock_add(kernel_start, kernel_size);
+#endif /* CONFIG_CRASH_DUMP */
+
+#ifdef CONFIG_CAVIUM_RESERVE32
+ /*
+ * Now that we've allocated the kernel memory it is safe to
+ * free the reserved region. We free it here so that builtin
+ * drivers can use the memory.
+ */
+ if (octeon_reserve32_memory)
+ cvmx_bootmem_free_named("CAVIUM_RESERVE32");
+#endif /* CONFIG_CAVIUM_RESERVE32 */
+
+ if (total == 0)
+ panic("Unable to allocate memory from "
+ "cvmx_bootmem_phy_alloc");
+}
+
+/*
+ * Emit one character to the boot UART. Exported for use by the
+ * watchdog timer.
+ */
+void prom_putchar(char c)
+{
+ uint64_t lsrval;
+
+ /* Spin until there is room */
+ do {
+ lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart));
+ } while ((lsrval & 0x20) == 0);
+
+ /* Write the byte */
+ cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull);
+}
+EXPORT_SYMBOL(prom_putchar);
+
+void __init prom_free_prom_memory(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ /* Check for presence of Core-14449 fix. */
+ u32 insn;
+ u32 *foo;
+
+ foo = &insn;
+
+ asm volatile("# before" : : : "memory");
+ prefetch(foo);
+ asm volatile(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ "bal 1f\n\t"
+ "nop\n"
+ "1:\tlw %0,-12($31)\n\t"
+ ".set pop\n\t"
+ : "=r" (insn) : : "$31", "memory");
+
+ if ((insn >> 26) != 0x33)
+ panic("No PREF instruction at Core-14449 probe point.");
+
+ if (((insn >> 16) & 0x1f) != 28)
+ panic("OCTEON II DCache prefetch workaround not in place (%04x).\n"
+ "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).",
+ insn);
+ }
+}
+
+void __init octeon_fill_mac_addresses(void);
+
+void __init device_tree_init(void)
+{
+ const void *fdt;
+ bool do_prune;
+ bool fill_mac;
+
+#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
+ if (!fdt_check_header(&__appended_dtb)) {
+ fdt = &__appended_dtb;
+ do_prune = false;
+ fill_mac = true;
+ pr_info("Using appended Device Tree.\n");
+ } else
+#endif
+ if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
+ fdt = phys_to_virt(octeon_bootinfo->fdt_addr);
+ if (fdt_check_header(fdt))
+ panic("Corrupt Device Tree passed to kernel.");
+ do_prune = false;
+ fill_mac = false;
+ pr_info("Using passed Device Tree.\n");
+ } else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ fdt = &__dtb_octeon_68xx_begin;
+ do_prune = true;
+ fill_mac = true;
+ } else {
+ fdt = &__dtb_octeon_3xxx_begin;
+ do_prune = true;
+ fill_mac = true;
+ }
+
+ initial_boot_params = (void *)fdt;
+
+ if (do_prune) {
+ octeon_prune_device_tree();
+ pr_info("Using internal Device Tree.\n");
+ }
+ if (fill_mac)
+ octeon_fill_mac_addresses();
+ unflatten_and_copy_device_tree();
+ init_octeon_system_type();
+}
+
+static int __initdata disable_octeon_edac_p;
+
+static int __init disable_octeon_edac(char *str)
+{
+ disable_octeon_edac_p = 1;
+ return 0;
+}
+early_param("disable_octeon_edac", disable_octeon_edac);
+
+static char *edac_device_names[] = {
+ "octeon_l2c_edac",
+ "octeon_pc_edac",
+};
+
+static int __init edac_devinit(void)
+{
+ struct platform_device *dev;
+ int i, err = 0;
+ int num_lmc;
+ char *name;
+
+ if (disable_octeon_edac_p)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(edac_device_names); i++) {
+ name = edac_device_names[i];
+ dev = platform_device_register_simple(name, -1, NULL, 0);
+ if (IS_ERR(dev)) {
+ pr_err("Registration of %s failed!\n", name);
+ err = PTR_ERR(dev);
+ }
+ }
+
+ num_lmc = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 :
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) ? 2 : 1);
+ for (i = 0; i < num_lmc; i++) {
+ dev = platform_device_register_simple("octeon_lmc_edac",
+ i, NULL, 0);
+ if (IS_ERR(dev)) {
+ pr_err("Registration of octeon_lmc_edac %d failed!\n", i);
+ err = PTR_ERR(dev);
+ }
+ }
+
+ return err;
+}
+device_initcall(edac_devinit);
+
+static void __initdata *octeon_dummy_iospace;
+
+static int __init octeon_no_pci_init(void)
+{
+ /*
+ * Initially assume there is no PCI. The PCI/PCIe platform code will
+ * later re-initialize these to correct values if they are present.
+ */
+ octeon_dummy_iospace = vzalloc(IO_SPACE_LIMIT);
+ set_io_port_base((unsigned long)octeon_dummy_iospace);
+ ioport_resource.start = MAX_RESOURCE;
+ ioport_resource.end = 0;
+ return 0;
+}
+core_initcall(octeon_no_pci_init);
+
+static int __init octeon_no_pci_release(void)
+{
+ /*
+ * Release the allocated memory if a real IO space is there.
+ */
+ if ((unsigned long)octeon_dummy_iospace != mips_io_port_base)
+ vfree(octeon_dummy_iospace);
+ return 0;
+}
+late_initcall(octeon_no_pci_release);
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
new file mode 100644
index 000000000..076db9a06
--- /dev/null
+++ b/arch/mips/cavium-octeon/smp.c
@@ -0,0 +1,523 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
+ */
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/sched.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/kexec.h>
+
+#include <asm/mmu_context.h>
+#include <asm/time.h>
+#include <asm/setup.h>
+
+#include <asm/octeon/octeon.h>
+
+#include "octeon_boot.h"
+
+volatile unsigned long octeon_processor_boot = 0xff;
+volatile unsigned long octeon_processor_sp;
+volatile unsigned long octeon_processor_gp;
+#ifdef CONFIG_RELOCATABLE
+volatile unsigned long octeon_processor_relocated_kernel_entry;
+#endif /* CONFIG_RELOCATABLE */
+
+#ifdef CONFIG_HOTPLUG_CPU
+uint64_t octeon_bootloader_entry_addr;
+EXPORT_SYMBOL(octeon_bootloader_entry_addr);
+#endif
+
+extern void kernel_entry(unsigned long arg1, ...);
+
+static void octeon_icache_flush(void)
+{
+ asm volatile ("synci 0($0)\n");
+}
+
+static void (*octeon_message_functions[8])(void) = {
+ scheduler_ipi,
+ generic_smp_call_function_interrupt,
+ octeon_icache_flush,
+};
+
+static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
+{
+ u64 mbox_clrx = CVMX_CIU_MBOX_CLRX(cvmx_get_core_num());
+ u64 action;
+ int i;
+
+ /*
+ * Make sure the function array initialization remains
+ * correct.
+ */
+ BUILD_BUG_ON(SMP_RESCHEDULE_YOURSELF != (1 << 0));
+ BUILD_BUG_ON(SMP_CALL_FUNCTION != (1 << 1));
+ BUILD_BUG_ON(SMP_ICACHE_FLUSH != (1 << 2));
+
+ /*
+ * Load the mailbox register to figure out what we're supposed
+ * to do.
+ */
+ action = cvmx_read_csr(mbox_clrx);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ action &= 0xff;
+ else
+ action &= 0xffff;
+
+ /* Clear the mailbox to clear the interrupt */
+ cvmx_write_csr(mbox_clrx, action);
+
+ for (i = 0; i < ARRAY_SIZE(octeon_message_functions) && action;) {
+ if (action & 1) {
+ void (*fn)(void) = octeon_message_functions[i];
+
+ if (fn)
+ fn();
+ }
+ action >>= 1;
+ i++;
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * Cause the function described by call_data to be executed on the passed
+ * cpu. When the function has finished, increment the finished field of
+ * call_data.
+ */
+void octeon_send_ipi_single(int cpu, unsigned int action)
+{
+ int coreid = cpu_logical_map(cpu);
+ /*
+ pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
+ coreid, action);
+ */
+ cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
+}
+
+static inline void octeon_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ octeon_send_ipi_single(i, action);
+}
+
+/**
+ * Detect available CPUs, populate cpu_possible_mask
+ */
+static void octeon_smp_hotplug_setup(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ struct linux_app_boot_info *labi;
+
+ if (!setup_max_cpus)
+ return;
+
+ labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
+ if (labi->labi_signature != LABI_SIGNATURE) {
+ pr_info("The bootloader on this board does not support HOTPLUG_CPU.");
+ return;
+ }
+
+ octeon_bootloader_entry_addr = labi->InitTLBStart_addr;
+#endif
+}
+
+static void __init octeon_smp_setup(void)
+{
+ const int coreid = cvmx_get_core_num();
+ int cpus;
+ int id;
+ struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
+
+#ifdef CONFIG_HOTPLUG_CPU
+ int core_mask = octeon_get_boot_coremask();
+ unsigned int num_cores = cvmx_octeon_num_cores();
+#endif
+
+ /* The present CPUs are initially just the boot cpu (CPU 0). */
+ for (id = 0; id < NR_CPUS; id++) {
+ set_cpu_possible(id, id == 0);
+ set_cpu_present(id, id == 0);
+ }
+
+ __cpu_number_map[coreid] = 0;
+ __cpu_logical_map[0] = coreid;
+
+ /* The present CPUs get the lowest CPU numbers. */
+ cpus = 1;
+ for (id = 0; id < NR_CPUS; id++) {
+ if ((id != coreid) && cvmx_coremask_is_core_set(&sysinfo->core_mask, id)) {
+ set_cpu_possible(cpus, true);
+ set_cpu_present(cpus, true);
+ __cpu_number_map[id] = cpus;
+ __cpu_logical_map[cpus] = id;
+ cpus++;
+ }
+ }
+
+#ifdef CONFIG_HOTPLUG_CPU
+ /*
+ * The possible CPUs are all those present on the chip. We
+ * will assign CPU numbers for possible cores as well. Cores
+ * are always consecutively numberd from 0.
+ */
+ for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr &&
+ id < num_cores && id < NR_CPUS; id++) {
+ if (!(core_mask & (1 << id))) {
+ set_cpu_possible(cpus, true);
+ __cpu_number_map[id] = cpus;
+ __cpu_logical_map[cpus] = id;
+ cpus++;
+ }
+ }
+#endif
+
+ octeon_smp_hotplug_setup();
+}
+
+
+#ifdef CONFIG_RELOCATABLE
+int plat_post_relocation(long offset)
+{
+ unsigned long entry = (unsigned long)kernel_entry;
+
+ /* Send secondaries into relocated kernel */
+ octeon_processor_relocated_kernel_entry = entry + offset;
+
+ return 0;
+}
+#endif /* CONFIG_RELOCATABLE */
+
+/**
+ * Firmware CPU startup hook
+ *
+ */
+static int octeon_boot_secondary(int cpu, struct task_struct *idle)
+{
+ int count;
+
+ pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
+ cpu_logical_map(cpu));
+
+ octeon_processor_sp = __KSTK_TOS(idle);
+ octeon_processor_gp = (unsigned long)(task_thread_info(idle));
+ octeon_processor_boot = cpu_logical_map(cpu);
+ mb();
+
+ count = 10000;
+ while (octeon_processor_sp && count) {
+ /* Waiting for processor to get the SP and GP */
+ udelay(1);
+ count--;
+ }
+ if (count == 0) {
+ pr_err("Secondary boot timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * After we've done initial boot, this function is called to allow the
+ * board code to clean up state, if needed
+ */
+static void octeon_init_secondary(void)
+{
+ unsigned int sr;
+
+ sr = set_c0_status(ST0_BEV);
+ write_c0_ebase((u32)ebase);
+ write_c0_status(sr);
+
+ octeon_check_cpu_bist();
+ octeon_init_cvmcount();
+
+ octeon_irq_setup_secondary();
+}
+
+/**
+ * Callout to firmware before smp_init
+ *
+ */
+static void __init octeon_prepare_cpus(unsigned int max_cpus)
+{
+ /*
+ * Only the low order mailbox bits are used for IPIs, leave
+ * the other bits alone.
+ */
+ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
+ if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
+ IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
+ mailbox_interrupt)) {
+ panic("Cannot request_irq(OCTEON_IRQ_MBOX0)");
+ }
+}
+
+/**
+ * Last chance for the board code to finish SMP initialization before
+ * the CPU is "online".
+ */
+static void octeon_smp_finish(void)
+{
+ octeon_user_io_init();
+
+ /* to generate the first CPU timer interrupt */
+ write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+ local_irq_enable();
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/* State of each CPU. */
+static DEFINE_PER_CPU(int, cpu_state);
+
+static int octeon_cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if (cpu == 0)
+ return -EBUSY;
+
+ if (!octeon_bootloader_entry_addr)
+ return -ENOTSUPP;
+
+ set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
+ octeon_fixup_irqs();
+
+ __flush_cache_all();
+ local_flush_tlb_all();
+
+ return 0;
+}
+
+static void octeon_cpu_die(unsigned int cpu)
+{
+ int coreid = cpu_logical_map(cpu);
+ uint32_t mask, new_mask;
+ const struct cvmx_bootmem_named_block_desc *block_desc;
+
+ while (per_cpu(cpu_state, cpu) != CPU_DEAD)
+ cpu_relax();
+
+ /*
+ * This is a bit complicated strategics of getting/settig available
+ * cores mask, copied from bootloader
+ */
+
+ mask = 1 << coreid;
+ /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
+ block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
+
+ if (!block_desc) {
+ struct linux_app_boot_info *labi;
+
+ labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
+
+ labi->avail_coremask |= mask;
+ new_mask = labi->avail_coremask;
+ } else { /* alternative, already initialized */
+ uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr +
+ AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
+ *p |= mask;
+ new_mask = *p;
+ }
+
+ pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask);
+ mb();
+ cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
+ cvmx_write_csr(CVMX_CIU_PP_RST, 0);
+}
+
+void play_dead(void)
+{
+ int cpu = cpu_number_map(cvmx_get_core_num());
+
+ idle_task_exit();
+ octeon_processor_boot = 0xff;
+ per_cpu(cpu_state, cpu) = CPU_DEAD;
+
+ mb();
+
+ while (1) /* core will be reset here */
+ ;
+}
+
+static void start_after_reset(void)
+{
+ kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
+}
+
+static int octeon_update_boot_vector(unsigned int cpu)
+{
+
+ int coreid = cpu_logical_map(cpu);
+ uint32_t avail_coremask;
+ const struct cvmx_bootmem_named_block_desc *block_desc;
+ struct boot_init_vector *boot_vect =
+ (struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR);
+
+ block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
+
+ if (!block_desc) {
+ struct linux_app_boot_info *labi;
+
+ labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
+
+ avail_coremask = labi->avail_coremask;
+ labi->avail_coremask &= ~(1 << coreid);
+ } else { /* alternative, already initialized */
+ avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED(
+ block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
+ }
+
+ if (!(avail_coremask & (1 << coreid))) {
+ /* core not available, assume, that caught by simple-executive */
+ cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
+ cvmx_write_csr(CVMX_CIU_PP_RST, 0);
+ }
+
+ boot_vect[coreid].app_start_func_addr =
+ (uint32_t) (unsigned long) start_after_reset;
+ boot_vect[coreid].code_addr = octeon_bootloader_entry_addr;
+
+ mb();
+
+ cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
+
+ return 0;
+}
+
+static int register_cavium_notifier(void)
+{
+ return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE,
+ "mips/cavium:prepare",
+ octeon_update_boot_vector, NULL);
+}
+late_initcall(register_cavium_notifier);
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static const struct plat_smp_ops octeon_smp_ops = {
+ .send_ipi_single = octeon_send_ipi_single,
+ .send_ipi_mask = octeon_send_ipi_mask,
+ .init_secondary = octeon_init_secondary,
+ .smp_finish = octeon_smp_finish,
+ .boot_secondary = octeon_boot_secondary,
+ .smp_setup = octeon_smp_setup,
+ .prepare_cpus = octeon_prepare_cpus,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = octeon_cpu_disable,
+ .cpu_die = octeon_cpu_die,
+#endif
+#ifdef CONFIG_KEXEC
+ .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
+#endif
+};
+
+static irqreturn_t octeon_78xx_reched_interrupt(int irq, void *dev_id)
+{
+ scheduler_ipi();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t octeon_78xx_call_function_interrupt(int irq, void *dev_id)
+{
+ generic_smp_call_function_interrupt();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t octeon_78xx_icache_flush_interrupt(int irq, void *dev_id)
+{
+ octeon_icache_flush();
+ return IRQ_HANDLED;
+}
+
+/*
+ * Callout to firmware before smp_init
+ */
+static void octeon_78xx_prepare_cpus(unsigned int max_cpus)
+{
+ if (request_irq(OCTEON_IRQ_MBOX0 + 0,
+ octeon_78xx_reched_interrupt,
+ IRQF_PERCPU | IRQF_NO_THREAD, "Scheduler",
+ octeon_78xx_reched_interrupt)) {
+ panic("Cannot request_irq for SchedulerIPI");
+ }
+ if (request_irq(OCTEON_IRQ_MBOX0 + 1,
+ octeon_78xx_call_function_interrupt,
+ IRQF_PERCPU | IRQF_NO_THREAD, "SMP-Call",
+ octeon_78xx_call_function_interrupt)) {
+ panic("Cannot request_irq for SMP-Call");
+ }
+ if (request_irq(OCTEON_IRQ_MBOX0 + 2,
+ octeon_78xx_icache_flush_interrupt,
+ IRQF_PERCPU | IRQF_NO_THREAD, "ICache-Flush",
+ octeon_78xx_icache_flush_interrupt)) {
+ panic("Cannot request_irq for ICache-Flush");
+ }
+}
+
+static void octeon_78xx_send_ipi_single(int cpu, unsigned int action)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ if (action & 1)
+ octeon_ciu3_mbox_send(cpu, i);
+ action >>= 1;
+ }
+}
+
+static void octeon_78xx_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, mask)
+ octeon_78xx_send_ipi_single(cpu, action);
+}
+
+static const struct plat_smp_ops octeon_78xx_smp_ops = {
+ .send_ipi_single = octeon_78xx_send_ipi_single,
+ .send_ipi_mask = octeon_78xx_send_ipi_mask,
+ .init_secondary = octeon_init_secondary,
+ .smp_finish = octeon_smp_finish,
+ .boot_secondary = octeon_boot_secondary,
+ .smp_setup = octeon_smp_setup,
+ .prepare_cpus = octeon_78xx_prepare_cpus,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = octeon_cpu_disable,
+ .cpu_die = octeon_cpu_die,
+#endif
+#ifdef CONFIG_KEXEC
+ .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
+#endif
+};
+
+void __init octeon_setup_smp(void)
+{
+ const struct plat_smp_ops *ops;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CIU3))
+ ops = &octeon_78xx_smp_ops;
+ else
+ ops = &octeon_smp_ops;
+
+ register_smp_ops(ops);
+}
diff --git a/arch/mips/cobalt/Makefile b/arch/mips/cobalt/Makefile
new file mode 100644
index 000000000..f0e2c26c8
--- /dev/null
+++ b/arch/mips/cobalt/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Cobalt micro systems family specific parts of the kernel
+#
+
+obj-y := buttons.o irq.o lcd.o led.o mtd.o reset.o rtc.o serial.o setup.o time.o
+
+obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/mips/cobalt/Platform b/arch/mips/cobalt/Platform
new file mode 100644
index 000000000..4254895ad
--- /dev/null
+++ b/arch/mips/cobalt/Platform
@@ -0,0 +1,5 @@
+#
+# Cobalt Server
+#
+cflags-$(CONFIG_MIPS_COBALT) += -I$(srctree)/arch/mips/include/asm/mach-cobalt
+load-$(CONFIG_MIPS_COBALT) += 0xffffffff80080000
diff --git a/arch/mips/cobalt/buttons.c b/arch/mips/cobalt/buttons.c
new file mode 100644
index 000000000..0f9299fe5
--- /dev/null
+++ b/arch/mips/cobalt/buttons.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cobalt buttons platform device.
+ *
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+static struct resource cobalt_buttons_resource __initdata = {
+ .start = 0x1d000000,
+ .end = 0x1d000003,
+ .flags = IORESOURCE_MEM,
+};
+
+static __init int cobalt_add_buttons(void)
+{
+ struct platform_device *pd;
+ int error;
+
+ pd = platform_device_alloc("Cobalt buttons", -1);
+ if (!pd)
+ return -ENOMEM;
+
+ error = platform_device_add_resources(pd, &cobalt_buttons_resource, 1);
+ if (error)
+ goto err_free_device;
+
+ error = platform_device_add(pd);
+ if (error)
+ goto err_free_device;
+
+ return 0;
+
+ err_free_device:
+ platform_device_put(pd);
+ return error;
+}
+device_initcall(cobalt_add_buttons);
diff --git a/arch/mips/cobalt/irq.c b/arch/mips/cobalt/irq.c
new file mode 100644
index 000000000..ead5ae413
--- /dev/null
+++ b/arch/mips/cobalt/irq.c
@@ -0,0 +1,64 @@
+/*
+ * IRQ vector handles
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1996, 1997, 2003 by Ralf Baechle
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+
+#include <asm/i8259.h>
+#include <asm/irq_cpu.h>
+#include <asm/irq_gt641xx.h>
+#include <asm/gt64120.h>
+
+#include <irq.h>
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned pending = read_c0_status() & read_c0_cause() & ST0_IM;
+ int irq;
+
+ if (pending & CAUSEF_IP2)
+ gt641xx_irq_dispatch();
+ else if (pending & CAUSEF_IP6) {
+ irq = i8259_irq();
+ if (irq < 0)
+ spurious_interrupt();
+ else
+ do_IRQ(irq);
+ } else if (pending & CAUSEF_IP3)
+ do_IRQ(MIPS_CPU_IRQ_BASE + 3);
+ else if (pending & CAUSEF_IP4)
+ do_IRQ(MIPS_CPU_IRQ_BASE + 4);
+ else if (pending & CAUSEF_IP5)
+ do_IRQ(MIPS_CPU_IRQ_BASE + 5);
+ else if (pending & CAUSEF_IP7)
+ do_IRQ(MIPS_CPU_IRQ_BASE + 7);
+ else
+ spurious_interrupt();
+}
+
+void __init arch_init_irq(void)
+{
+ mips_cpu_irq_init();
+ gt641xx_irq_init();
+ init_i8259_irqs();
+
+ if (request_irq(GT641XX_CASCADE_IRQ, no_action, IRQF_NO_THREAD,
+ "cascade", NULL)) {
+ pr_err("Failed to request irq %d (cascade)\n",
+ GT641XX_CASCADE_IRQ);
+ }
+ if (request_irq(I8259_CASCADE_IRQ, no_action, IRQF_NO_THREAD,
+ "cascade", NULL)) {
+ pr_err("Failed to request irq %d (cascade)\n",
+ I8259_CASCADE_IRQ);
+ }
+}
diff --git a/arch/mips/cobalt/lcd.c b/arch/mips/cobalt/lcd.c
new file mode 100644
index 000000000..7d43b5ec3
--- /dev/null
+++ b/arch/mips/cobalt/lcd.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Registration of Cobalt LCD platform device.
+ *
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+
+static struct resource cobalt_lcd_resource __initdata = {
+ .start = 0x1f000000,
+ .end = 0x1f00001f,
+ .flags = IORESOURCE_MEM,
+};
+
+static __init int cobalt_lcd_add(void)
+{
+ struct platform_device *pdev;
+ int retval;
+
+ pdev = platform_device_alloc("cobalt-lcd", -1);
+ if (!pdev)
+ return -ENOMEM;
+
+ retval = platform_device_add_resources(pdev, &cobalt_lcd_resource, 1);
+ if (retval)
+ goto err_free_device;
+
+ retval = platform_device_add(pdev);
+ if (retval)
+ goto err_free_device;
+
+ return 0;
+
+err_free_device:
+ platform_device_put(pdev);
+
+ return retval;
+}
+device_initcall(cobalt_lcd_add);
diff --git a/arch/mips/cobalt/led.c b/arch/mips/cobalt/led.c
new file mode 100644
index 000000000..196660cac
--- /dev/null
+++ b/arch/mips/cobalt/led.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Registration of Cobalt LED platform device.
+ *
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+
+#include <cobalt.h>
+
+static struct resource cobalt_led_resource __initdata = {
+ .start = 0x1c000000,
+ .end = 0x1c000000,
+ .flags = IORESOURCE_MEM,
+};
+
+static __init int cobalt_led_add(void)
+{
+ struct platform_device *pdev;
+ int retval;
+
+ if (cobalt_board_id == COBALT_BRD_ID_QUBE1 ||
+ cobalt_board_id == COBALT_BRD_ID_QUBE2)
+ pdev = platform_device_alloc("cobalt-qube-leds", -1);
+ else
+ pdev = platform_device_alloc("cobalt-raq-leds", -1);
+
+ if (!pdev)
+ return -ENOMEM;
+
+ retval = platform_device_add_resources(pdev, &cobalt_led_resource, 1);
+ if (retval)
+ goto err_free_device;
+
+ retval = platform_device_add(pdev);
+ if (retval)
+ goto err_free_device;
+
+ return 0;
+
+err_free_device:
+ platform_device_put(pdev);
+
+ return retval;
+}
+device_initcall(cobalt_led_add);
diff --git a/arch/mips/cobalt/mtd.c b/arch/mips/cobalt/mtd.c
new file mode 100644
index 000000000..95f579d8c
--- /dev/null
+++ b/arch/mips/cobalt/mtd.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Registration of Cobalt MTD device.
+ *
+ * Copyright (C) 2006 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+
+static struct mtd_partition cobalt_mtd_partitions[] = {
+ {
+ .name = "firmware",
+ .offset = 0x0,
+ .size = 0x80000,
+ },
+};
+
+static struct physmap_flash_data cobalt_flash_data = {
+ .width = 1,
+ .nr_parts = 1,
+ .parts = cobalt_mtd_partitions,
+};
+
+static struct resource cobalt_mtd_resource = {
+ .start = 0x1fc00000,
+ .end = 0x1fc7ffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device cobalt_mtd = {
+ .name = "physmap-flash",
+ .dev = {
+ .platform_data = &cobalt_flash_data,
+ },
+ .num_resources = 1,
+ .resource = &cobalt_mtd_resource,
+};
+
+static int __init cobalt_mtd_init(void)
+{
+ platform_device_register(&cobalt_mtd);
+
+ return 0;
+}
+device_initcall(cobalt_mtd_init);
diff --git a/arch/mips/cobalt/pci.c b/arch/mips/cobalt/pci.c
new file mode 100644
index 000000000..85ec9cc31
--- /dev/null
+++ b/arch/mips/cobalt/pci.c
@@ -0,0 +1,48 @@
+/*
+ * Register PCI controller.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1997, 2004, 05 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2001, 2002, 2003 by Liam Davies (ldavies@agile.tv)
+ *
+ */
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/gt64120.h>
+
+extern struct pci_ops gt64xxx_pci0_ops;
+
+static struct resource cobalt_mem_resource = {
+ .start = GT_DEF_PCI0_MEM0_BASE,
+ .end = GT_DEF_PCI0_MEM0_BASE + GT_DEF_PCI0_MEM0_SIZE - 1,
+ .name = "PCI memory",
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource cobalt_io_resource = {
+ .start = 0x1000,
+ .end = 0xffffffUL,
+ .name = "PCI I/O",
+ .flags = IORESOURCE_IO,
+};
+
+static struct pci_controller cobalt_pci_controller = {
+ .pci_ops = &gt64xxx_pci0_ops,
+ .mem_resource = &cobalt_mem_resource,
+ .io_resource = &cobalt_io_resource,
+ .io_offset = 0 - GT_DEF_PCI0_IO_BASE,
+ .io_map_base = CKSEG1ADDR(GT_DEF_PCI0_IO_BASE),
+};
+
+static int __init cobalt_pci_init(void)
+{
+ register_pci_controller(&cobalt_pci_controller);
+
+ return 0;
+}
+
+arch_initcall(cobalt_pci_init);
diff --git a/arch/mips/cobalt/reset.c b/arch/mips/cobalt/reset.c
new file mode 100644
index 000000000..4eedd481d
--- /dev/null
+++ b/arch/mips/cobalt/reset.c
@@ -0,0 +1,52 @@
+/*
+ * Cobalt Reset operations
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1996, 1997 by Ralf Baechle
+ * Copyright (C) 2001 by Liam Davies (ldavies@agile.tv)
+ */
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/leds.h>
+
+#include <asm/idle.h>
+#include <asm/processor.h>
+
+#include <cobalt.h>
+
+#define RESET_PORT ((void __iomem *)CKSEG1ADDR(0x1c000000))
+#define RESET 0x0f
+
+DEFINE_LED_TRIGGER(power_off_led_trigger);
+
+static int __init ledtrig_power_off_init(void)
+{
+ led_trigger_register_simple("power-off", &power_off_led_trigger);
+ return 0;
+}
+device_initcall(ledtrig_power_off_init);
+
+void cobalt_machine_halt(void)
+{
+ /*
+ * turn on power off LED on RaQ
+ */
+ led_trigger_event(power_off_led_trigger, LED_FULL);
+
+ local_irq_disable();
+ while (1) {
+ if (cpu_wait)
+ cpu_wait();
+ }
+}
+
+void cobalt_machine_restart(char *command)
+{
+ writeb(RESET, RESET_PORT);
+
+ /* we should never get here */
+ cobalt_machine_halt();
+}
diff --git a/arch/mips/cobalt/rtc.c b/arch/mips/cobalt/rtc.c
new file mode 100644
index 000000000..0f9ca45da
--- /dev/null
+++ b/arch/mips/cobalt/rtc.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Registration of Cobalt RTC platform device.
+ *
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/mc146818rtc.h>
+#include <linux/platform_device.h>
+
+static struct resource cobalt_rtc_resource[] __initdata = {
+ {
+ .start = 0x70,
+ .end = 0x77,
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .start = RTC_IRQ,
+ .end = RTC_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static __init int cobalt_rtc_add(void)
+{
+ struct platform_device *pdev;
+ int retval;
+
+ pdev = platform_device_alloc("rtc_cmos", -1);
+ if (!pdev)
+ return -ENOMEM;
+
+ retval = platform_device_add_resources(pdev, cobalt_rtc_resource,
+ ARRAY_SIZE(cobalt_rtc_resource));
+ if (retval)
+ goto err_free_device;
+
+ retval = platform_device_add(pdev);
+ if (retval)
+ goto err_free_device;
+
+ return 0;
+
+err_free_device:
+ platform_device_put(pdev);
+
+ return retval;
+}
+device_initcall(cobalt_rtc_add);
diff --git a/arch/mips/cobalt/serial.c b/arch/mips/cobalt/serial.c
new file mode 100644
index 000000000..5fb676719
--- /dev/null
+++ b/arch/mips/cobalt/serial.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Registration of Cobalt UART platform device.
+ *
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+
+#include <cobalt.h>
+#include <irq.h>
+
+static struct resource cobalt_uart_resource[] __initdata = {
+ {
+ .start = 0x1c800000,
+ .end = 0x1c800007,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = SERIAL_IRQ,
+ .end = SERIAL_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct plat_serial8250_port cobalt_serial8250_port[] = {
+ {
+ .irq = SERIAL_IRQ,
+ .uartclk = 18432000,
+ .iotype = UPIO_MEM,
+ .flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
+ .mapbase = 0x1c800000,
+ },
+ {},
+};
+
+static __init int cobalt_uart_add(void)
+{
+ struct platform_device *pdev;
+ int retval;
+
+ /*
+ * Cobalt Qube1 has no UART.
+ */
+ if (cobalt_board_id == COBALT_BRD_ID_QUBE1)
+ return 0;
+
+ pdev = platform_device_alloc("serial8250", -1);
+ if (!pdev)
+ return -ENOMEM;
+
+ pdev->id = PLAT8250_DEV_PLATFORM;
+ pdev->dev.platform_data = cobalt_serial8250_port;
+
+ retval = platform_device_add_resources(pdev, cobalt_uart_resource, ARRAY_SIZE(cobalt_uart_resource));
+ if (retval)
+ goto err_free_device;
+
+ retval = platform_device_add(pdev);
+ if (retval)
+ goto err_free_device;
+
+ return 0;
+
+err_free_device:
+ platform_device_put(pdev);
+
+ return retval;
+}
+device_initcall(cobalt_uart_add);
diff --git a/arch/mips/cobalt/setup.c b/arch/mips/cobalt/setup.c
new file mode 100644
index 000000000..46581e686
--- /dev/null
+++ b/arch/mips/cobalt/setup.c
@@ -0,0 +1,124 @@
+/*
+ * Setup pointers to hardware dependent routines.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1997, 2004, 05 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2001, 2002, 2003 by Liam Davies (ldavies@agile.tv)
+ *
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/memblock.h>
+#include <linux/pm.h>
+
+#include <asm/bootinfo.h>
+#include <asm/reboot.h>
+#include <asm/setup.h>
+#include <asm/gt64120.h>
+
+#include <cobalt.h>
+
+extern void cobalt_machine_restart(char *command);
+extern void cobalt_machine_halt(void);
+
+const char *get_system_type(void)
+{
+ switch (cobalt_board_id) {
+ case COBALT_BRD_ID_QUBE1:
+ return "Cobalt Qube";
+ case COBALT_BRD_ID_RAQ1:
+ return "Cobalt RaQ";
+ case COBALT_BRD_ID_QUBE2:
+ return "Cobalt Qube2";
+ case COBALT_BRD_ID_RAQ2:
+ return "Cobalt RaQ2";
+ }
+ return "MIPS Cobalt";
+}
+
+/*
+ * Cobalt doesn't have PS/2 keyboard/mouse interfaces,
+ * keyboard controller is never used.
+ * Also PCI-ISA bridge DMA controller is never used.
+ */
+static struct resource cobalt_reserved_resources[] = {
+ { /* dma1 */
+ .start = 0x00,
+ .end = 0x1f,
+ .name = "reserved",
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO,
+ },
+ { /* keyboard */
+ .start = 0x60,
+ .end = 0x6f,
+ .name = "reserved",
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO,
+ },
+ { /* dma page reg */
+ .start = 0x80,
+ .end = 0x8f,
+ .name = "reserved",
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO,
+ },
+ { /* dma2 */
+ .start = 0xc0,
+ .end = 0xdf,
+ .name = "reserved",
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO,
+ },
+};
+
+void __init plat_mem_setup(void)
+{
+ int i;
+
+ _machine_restart = cobalt_machine_restart;
+ _machine_halt = cobalt_machine_halt;
+ pm_power_off = cobalt_machine_halt;
+
+ set_io_port_base(CKSEG1ADDR(GT_DEF_PCI0_IO_BASE));
+
+ /* I/O port resource */
+ ioport_resource.end = 0x01ffffff;
+
+ /* These resources have been reserved by VIA SuperI/O chip. */
+ for (i = 0; i < ARRAY_SIZE(cobalt_reserved_resources); i++)
+ request_resource(&ioport_resource, cobalt_reserved_resources + i);
+}
+
+/*
+ * Prom init. We read our one and only communication with the firmware.
+ * Grab the amount of installed memory.
+ * Better boot loaders (CoLo) pass a command line too :-)
+ */
+
+void __init prom_init(void)
+{
+ unsigned long memsz;
+ int argc, i;
+ char **argv;
+
+ memsz = fw_arg0 & 0x7fff0000;
+ argc = fw_arg0 & 0x0000ffff;
+ argv = (char **)fw_arg1;
+
+ for (i = 1; i < argc; i++) {
+ strlcat(arcs_cmdline, argv[i], COMMAND_LINE_SIZE);
+ if (i < (argc - 1))
+ strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
+ }
+
+ memblock_add(0, memsz);
+
+ setup_8250_early_printk_port(CKSEG1ADDR(0x1c800000), 0, 0);
+}
+
+void __init prom_free_prom_memory(void)
+{
+ /* Nothing to do! */
+}
diff --git a/arch/mips/cobalt/time.c b/arch/mips/cobalt/time.c
new file mode 100644
index 000000000..1b6fa6649
--- /dev/null
+++ b/arch/mips/cobalt/time.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cobalt time initialization.
+ *
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/i8253.h>
+#include <linux/init.h>
+
+#include <asm/gt64120.h>
+#include <asm/time.h>
+
+#define GT641XX_BASE_CLOCK 50000000 /* 50MHz */
+
+void __init plat_time_init(void)
+{
+ u32 start, end;
+ int i = HZ / 10;
+
+ setup_pit_timer();
+
+ gt641xx_set_base_clock(GT641XX_BASE_CLOCK);
+
+ /*
+ * MIPS counter frequency is measured during a 100msec interval
+ * using GT64111 timer0.
+ */
+ while (!gt641xx_timer0_state())
+ ;
+
+ start = read_c0_count();
+
+ while (i--)
+ while (!gt641xx_timer0_state())
+ ;
+
+ end = read_c0_count();
+
+ mips_hpt_frequency = (end - start) * 10;
+ printk(KERN_INFO "MIPS counter frequency %dHz\n", mips_hpt_frequency);
+}
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
new file mode 100644
index 000000000..cf9c6329b
--- /dev/null
+++ b/arch/mips/configs/ar7_defconfig
@@ -0,0 +1,124 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_LZMA=y
+CONFIG_SYSVIPC=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+# CONFIG_ELF_CORE is not set
+# CONFIG_KALLSYMS is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_AR7=y
+CONFIG_HZ_100=y
+CONFIG_KEXEC=y
+# CONFIG_SECCOMP is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_MROUTE=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_CUBIC is not set
+CONFIG_TCP_CONG_WESTWOOD=y
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_BRIDGE_NETFILTER is not set
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_RAW=m
+CONFIG_ATM=m
+CONFIG_ATM_BR2684=m
+CONFIG_ATM_BR2684_IPFILTER=y
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_HAMRADIO=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_NETDEVICES=y
+CONFIG_CPMAC=y
+CONFIG_FIXED_PHY=y
+CONFIG_PPP=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOATM=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_HW_RANDOM=y
+CONFIG_GPIO_SYSFS=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_AR7_WDT=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+# CONFIG_DNOTIFY is not set
+CONFIG_PROC_KCORE=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_SQUASHFS=y
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="rootfstype=squashfs,jffs2"
diff --git a/arch/mips/configs/ath25_defconfig b/arch/mips/configs/ath25_defconfig
new file mode 100644
index 000000000..7143441f5
--- /dev/null
+++ b/arch/mips/configs/ath25_defconfig
@@ -0,0 +1,115 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_FHANDLE is not set
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_ATH25=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+# CONFIG_SUSPEND is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_DEBUGFS=y
+CONFIG_MTD=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_GEOMETRY=y
+# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
+# CONFIG_MTD_CFI_I2 is not set
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_NETDEVICES=y
+# CONFIG_ETHERNET is not set
+# CONFIG_WLAN_VENDOR_ADMTEK is not set
+CONFIG_ATH5K=m
+# CONFIG_WLAN_VENDOR_ATMEL is not set
+# CONFIG_WLAN_VENDOR_BROADCOM is not set
+# CONFIG_WLAN_VENDOR_CISCO is not set
+# CONFIG_WLAN_VENDOR_INTEL is not set
+# CONFIG_WLAN_VENDOR_INTERSIL is not set
+# CONFIG_WLAN_VENDOR_MARVELL is not set
+# CONFIG_WLAN_VENDOR_MEDIATEK is not set
+# CONFIG_WLAN_VENDOR_RALINK is not set
+# CONFIG_WLAN_VENDOR_REALTEK is not set
+# CONFIG_WLAN_VENDOR_RSI is not set
+# CONFIG_WLAN_VENDOR_ST is not set
+# CONFIG_WLAN_VENDOR_TI is not set
+# CONFIG_WLAN_VENDOR_ZYDAS is not set
+CONFIG_INPUT=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_PCI is not set
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_ARB is not set
+CONFIG_USB=m
+CONFIG_USB_EHCI_HCD=m
+CONFIG_LEDS_CLASS=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+# CONFIG_JFFS2_FS_POSIX_ACL is not set
+# CONFIG_JFFS2_FS_SECURITY is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+# CONFIG_JFFS2_ZLIB is not set
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
+# CONFIG_SQUASHFS_ZLIB is not set
+CONFIG_SQUASHFS_XZ=y
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
new file mode 100644
index 000000000..96622a2ad
--- /dev/null
+++ b/arch/mips/configs/ath79_defconfig
@@ -0,0 +1,99 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_GZIP is not set
+# CONFIG_AIO is not set
+# CONFIG_KALLSYMS is not set
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_ATH79=y
+CONFIG_ATH79_MACH_AP121=y
+CONFIG_ATH79_MACH_AP136=y
+CONFIG_ATH79_MACH_AP81=y
+CONFIG_ATH79_MACH_DB120=y
+CONFIG_ATH79_MACH_PB44=y
+CONFIG_ATH79_MACH_UBNT_XM=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+CONFIG_PCI=y
+# CONFIG_SUSPEND is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_DEBUGFS=y
+CONFIG_MTD=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_NETDEVICES=y
+CONFIG_ATH9K=m
+CONFIG_ATH9K_AHB=y
+CONFIG_INPUT=m
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO_POLLED=m
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_PCI is not set
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AR933X=y
+CONFIG_SERIAL_AR933X_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+# CONFIG_I2C_COMPAT is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_GPIO=y
+CONFIG_SPI=y
+CONFIG_SPI_ATH79=y
+CONFIG_SPI_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_PCF857X=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_ATH79_WDT=y
+# CONFIG_VGA_ARB is not set
+# CONFIG_HID is not set
+# CONFIG_USB_HID is not set
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_CRC_ITU_T=m
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
new file mode 100644
index 000000000..91ce75edb
--- /dev/null
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -0,0 +1,81 @@
+CONFIG_SYSVIPC=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_BCM47XX=y
+CONFIG_PCI=y
+# CONFIG_SUSPEND is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_SYN_COOKIES=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_NETFILTER=y
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_FQ_CODEL=y
+CONFIG_HAMRADIO=y
+CONFIG_CFG80211=y
+CONFIG_MAC80211=y
+CONFIG_MTD=y
+CONFIG_MTD_BCM47XX_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_BCM47XXSFLASH=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_NAND_BCM47XXNFLASH=y
+CONFIG_NETDEVICES=y
+CONFIG_B44=y
+CONFIG_TIGON3=y
+CONFIG_ATH5K=y
+CONFIG_B43=y
+CONFIG_B43LEGACY=y
+CONFIG_BRCMSMAC=y
+CONFIG_ISDN=y
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_PCI is not set
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_HW_RANDOM=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_WATCHDOG=y
+CONFIG_BCM47XX_WDT=y
+CONFIG_SSB_DRIVER_GIGE=y
+CONFIG_BCMA_DRIVER_GMAC_CMN=y
+CONFIG_USB=y
+CONFIG_USB_HCD_BCMA=y
+CONFIG_USB_HCD_SSB=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_CRC32_SARWATE=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_REDUCED=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,115200"
diff --git a/arch/mips/configs/bcm63xx_defconfig b/arch/mips/configs/bcm63xx_defconfig
new file mode 100644
index 000000000..861f68018
--- /dev/null
+++ b/arch/mips/configs/bcm63xx_defconfig
@@ -0,0 +1,69 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_NO_HZ=y
+CONFIG_EXPERT=y
+# CONFIG_FUTEX is not set
+# CONFIG_EPOLL is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_SHMEM is not set
+# CONFIG_AIO is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_BCM63XX=y
+CONFIG_BCM63XX_CPU_6338=y
+CONFIG_BCM63XX_CPU_6345=y
+CONFIG_BCM63XX_CPU_6348=y
+CONFIG_BCM63XX_CPU_6358=y
+# CONFIG_SECCOMP is not set
+CONFIG_PCI=y
+CONFIG_PCCARD=y
+CONFIG_PCMCIA_BCM63XX=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_NET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_MAC80211=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_MTD=y
+CONFIG_MTD_BCM63XX_PARTS=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_BLK_DEV is not set
+CONFIG_NETDEVICES=y
+CONFIG_BCM63XX_ENET=y
+CONFIG_BCM63XX_PHY=y
+CONFIG_B43=y
+# CONFIG_B43_PHY_LP is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
+CONFIG_SERIAL_BCM63XX=y
+CONFIG_SERIAL_BCM63XX_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_ARB is not set
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_FILE_LOCKING is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_PROC_KCORE=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+# CONFIG_CRYPTO_HW is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,115200"
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
new file mode 100644
index 000000000..eea9b613b
--- /dev/null
+++ b/arch/mips/configs/bigsur_defconfig
@@ -0,0 +1,261 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_SIBYTE_BIGSUR=y
+CONFIG_64BIT=y
+CONFIG_SMP=y
+CONFIG_HZ_1000=y
+CONFIG_PCI=y
+CONFIG_PCI_DEBUG=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+CONFIG_PM=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT_6RD=y
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETLABEL=y
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_ADVANCED is not set
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_PROTO_SCTP=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_FTP=m
+CONFIG_IP_DCCP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_HAMRADIO=y
+CONFIG_AX25=m
+CONFIG_NETROM=m
+CONFIG_ROSE=m
+CONFIG_MKISS=m
+CONFIG_6PACK=m
+CONFIG_BPQETHER=m
+CONFIG_BAYCOM_SER_FDX=m
+CONFIG_BAYCOM_SER_HDX=m
+CONFIG_YAM=m
+CONFIG_FW_LOADER=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_EEPROM_LEGACY=y
+CONFIG_EEPROM_MAX6875=y
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDETAPE=y
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_CHR_DEV_SCH=m
+CONFIG_ATA=y
+CONFIG_SATA_SIL24=y
+CONFIG_PATA_CMD64X=y
+CONFIG_PATA_IT8213=m
+CONFIG_PATA_SIL680=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_LEGACY=y
+CONFIG_NETDEVICES=y
+CONFIG_SB1250_MAC=y
+CONFIG_CHELSIO_T3=m
+CONFIG_NETXEN_NIC=m
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+# CONFIG_INPUT is not set
+CONFIG_SERIO_RAW=m
+# CONFIG_VT is not set
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_SIBYTE=y
+CONFIG_I2C_DEBUG_CORE=y
+CONFIG_I2C_DEBUG_ALGO=y
+CONFIG_I2C_DEBUG_BUS=y
+# CONFIG_HWMON is not set
+CONFIG_EXT2_FS=m
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=m
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+CONFIG_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_NETWORK_XFRM=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRC_T10DIF=m
+CONFIG_CRC7=m
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_LIST=y
diff --git a/arch/mips/configs/bmips_be_defconfig b/arch/mips/configs/bmips_be_defconfig
new file mode 100644
index 000000000..032bb51de
--- /dev/null
+++ b/arch/mips/configs/bmips_be_defconfig
@@ -0,0 +1,79 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_NO_HZ=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_BMIPS_GENERIC=y
+CONFIG_HIGHMEM=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+# CONFIG_SECCOMP is not set
+CONFIG_MIPS_O32_FP64_SUPPORT=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_MAC80211=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_MTD=y
+CONFIG_MTD_BCM63XX_PARTS=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_BLK_DEV is not set
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_BCMGENET=y
+CONFIG_USB_USBNET=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_BCM63XX=y
+CONFIG_SERIAL_BCM63XX_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_SUPPLY=y
+# CONFIG_HWMON is not set
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_CIFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon"
diff --git a/arch/mips/configs/bmips_stb_defconfig b/arch/mips/configs/bmips_stb_defconfig
new file mode 100644
index 000000000..625bd2d7e
--- /dev/null
+++ b/arch/mips/configs/bmips_stb_defconfig
@@ -0,0 +1,90 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_NO_HZ=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_BMIPS_GENERIC=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_HIGHMEM=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+# CONFIG_SECCOMP is not set
+CONFIG_MIPS_O32_FP64_SUPPORT=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_BMIPS_CPUFREQ=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_MAC80211=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_BLK_DEV is not set
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_BCMGENET=y
+CONFIG_USB_USBNET=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_SUPPLY=y
+# CONFIG_HWMON is not set
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_SOC_BRCMSTB=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_CIFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon"
diff --git a/arch/mips/configs/capcella_defconfig b/arch/mips/configs/capcella_defconfig
new file mode 100644
index 000000000..7bf8971af
--- /dev/null
+++ b/arch/mips/configs/capcella_defconfig
@@ -0,0 +1,91 @@
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_MACH_VR41XX=y
+CONFIG_ZAO_CAPCELLA=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_TCP_MD5SIG=y
+# CONFIG_IPV6 is not set
+CONFIG_NETWORK_SECMARK=y
+CONFIG_IP_SCTP=m
+CONFIG_FW_LOADER=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_ATA=y
+CONFIG_PATA_LEGACY=y
+CONFIG_NETDEVICES=y
+CONFIG_8139TOO=y
+CONFIG_PHYLIB=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_VITESSE_PHY=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_VR41XX=y
+CONFIG_SERIAL_VR41XX_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_GPIO_VR41XX=y
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_VR41XX=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CRYPTO_CBC=m
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="mem=32M console=ttyVR0,38400"
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig
new file mode 100644
index 000000000..b6695367a
--- /dev/null
+++ b/arch/mips/configs/cavium_octeon_defconfig
@@ -0,0 +1,168 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_PREEMPT=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_CAVIUM_OCTEON_SOC=y
+CONFIG_CAVIUM_CN63XXP1=y
+CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE=2
+CONFIG_OCTEON_ILM=m
+CONFIG_SMP=y
+CONFIG_NR_CPUS=32
+CONFIG_HZ_100=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_DEVTMPFS=y
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+# CONFIG_MTD_OF_PARTS is not set
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_SLRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_PATA_OCTEON_CF=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_BCM87XX_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_MARVELL_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_DW=y
+CONFIG_I2C=y
+CONFIG_I2C_OCTEON=y
+CONFIG_SPI=y
+CONFIG_SPI_OCTEON=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_USB=y
+# CONFIG_USB_PCI is not set
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_MMC=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
+CONFIG_MMC_CAVIUM_OCTEON=y
+CONFIG_EDAC=y
+CONFIG_EDAC_OCTEON_PC=y
+CONFIG_EDAC_OCTEON_L2C=y
+CONFIG_EDAC_OCTEON_LMC=y
+CONFIG_EDAC_OCTEON_PCI=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1307=y
+CONFIG_STAGING=y
+CONFIG_OCTEON_ETHERNET=y
+CONFIG_OCTEON_USB=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_RAS=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD5_OCTEON=y
+CONFIG_CRYPTO_SHA1_OCTEON=m
+CONFIG_CRYPTO_SHA256_OCTEON=m
+CONFIG_CRYPTO_SHA512_OCTEON=m
+CONFIG_CRYPTO_DES=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig
new file mode 100644
index 000000000..052c5ad0f
--- /dev/null
+++ b/arch/mips/configs/ci20_defconfig
@@ -0,0 +1,196 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_XZ=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MACH_INGENIC_SOC=y
+CONFIG_JZ4780_CI20=y
+CONFIG_HIGHMEM=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+# CONFIG_SUSPEND is not set
+CONFIG_MODULES=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
+CONFIG_CMA=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_FW_LOADER=m
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_MTD=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_NAND_JZ4780=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_FASTMAP=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+CONFIG_DM9000=y
+CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL=y
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+CONFIG_KEYBOARD_GPIO=m
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=2
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=5
+CONFIG_SERIAL_8250_RUNTIME_UARTS=5
+CONFIG_SERIAL_8250_INGENIC=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_JZ4780=y
+CONFIG_SPI=y
+CONFIG_SPI_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_SUPPLY=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_JZ4740_WDT=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_DEBUG=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_ACT8865=y
+CONFIG_RC_CORE=m
+CONFIG_LIRC=y
+CONFIG_RC_DEVICES=y
+CONFIG_IR_GPIO_CIR=m
+CONFIG_IR_GPIO_TX=m
+CONFIG_MEDIA_SUPPORT=m
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+CONFIG_MMC_JZ4740=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_MTD=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_ACTIVITY=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_LEDS_TRIGGER_CAMERA=m
+CONFIG_LEDS_TRIGGER_PANIC=y
+CONFIG_LEDS_TRIGGER_NETDEV=y
+CONFIG_LEDS_TRIGGER_PATTERN=y
+CONFIG_LEDS_TRIGGER_AUDIO=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_JZ4740=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_JZ4780=y
+CONFIG_INGENIC_OST=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_MEMORY=y
+CONFIG_JZ4780_NEMC=y
+CONFIG_PWM=y
+CONFIG_PWM_JZ4740=m
+CONFIG_EXT4_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_PROC_KCORE=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_UBIFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_UTF8=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=32
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=10
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_STACKTRACE=y
+# CONFIG_FTRACE is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon console=ttyS4,115200 clk_ignore_unused"
diff --git a/arch/mips/configs/cobalt_defconfig b/arch/mips/configs/cobalt_defconfig
new file mode 100644
index 000000000..c6a652ad3
--- /dev/null
+++ b/arch/mips/configs/cobalt_defconfig
@@ -0,0 +1,74 @@
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+CONFIG_MIPS_COBALT=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+# CONFIG_IPV6 is not set
+CONFIG_MTD=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_RAID_ATTRS=y
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_PATA_VIA=y
+CONFIG_NETDEVICES=y
+CONFIG_NET_TULIP=y
+CONFIG_DE2104X=y
+CONFIG_TULIP=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_COBALT_BTNS=y
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_PCI is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_COBALT=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_HID=m
+CONFIG_USB=m
+CONFIG_USB_EHCI_HCD=m
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_STORAGE=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_COBALT_QUBE=y
+CONFIG_LEDS_COBALT_RAQ=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_LIBCRC32C=y
diff --git a/arch/mips/configs/cu1000-neo_defconfig b/arch/mips/configs/cu1000-neo_defconfig
new file mode 100644
index 000000000..55d0690a3
--- /dev/null
+++ b/arch/mips/configs/cu1000-neo_defconfig
@@ -0,0 +1,109 @@
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MACH_INGENIC_SOC=y
+CONFIG_X1000_CU1000_NEO=y
+CONFIG_HIGHMEM=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+# CONFIG_SUSPEND is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
+CONFIG_CMA=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_CFG80211=y
+CONFIG_UEVENT_HELPER=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_NETDEVICES=y
+CONFIG_STMMAC_ETH=y
+CONFIG_SMSC_PHY=y
+CONFIG_BRCMFMAC=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=2
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=3
+CONFIG_SERIAL_8250_RUNTIME_UARTS=3
+CONFIG_SERIAL_8250_INGENIC=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_JZ4780=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_SENSORS_ADS7828=y
+CONFIG_WATCHDOG=y
+CONFIG_JZ4740_WDT=y
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+CONFIG_MMC_JZ4740=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_JZ4740=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_JZ4780=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT4_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_AUTOFS_FS=y
+CONFIG_PROC_KCORE=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NLS=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_PRINTK_TIME=y
+CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15
+CONFIG_CONSOLE_LOGLEVEL_QUIET=15
+CONFIG_MESSAGE_LOGLEVEL_DEFAULT=7
+CONFIG_DEBUG_INFO=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=10
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_STACKTRACE=y
+# CONFIG_FTRACE is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon clk_ignore_unused"
diff --git a/arch/mips/configs/cu1830-neo_defconfig b/arch/mips/configs/cu1830-neo_defconfig
new file mode 100644
index 000000000..e7064851a
--- /dev/null
+++ b/arch/mips/configs/cu1830-neo_defconfig
@@ -0,0 +1,112 @@
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MACH_INGENIC_SOC=y
+CONFIG_X1830_CU1830_NEO=y
+CONFIG_HIGHMEM=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+# CONFIG_SUSPEND is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
+CONFIG_CMA=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_CFG80211=y
+CONFIG_UEVENT_HELPER=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_NETDEVICES=y
+CONFIG_STMMAC_ETH=y
+CONFIG_ICPLUS_PHY=y
+CONFIG_BRCMFMAC=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=2
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_INGENIC=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_JZ4780=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_SENSORS_ADS7828=y
+CONFIG_WATCHDOG=y
+CONFIG_JZ4740_WDT=y
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+CONFIG_MMC_JZ4740=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_JZ4740=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_JZ4780=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT4_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_AUTOFS_FS=y
+CONFIG_PROC_KCORE=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NLS=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_PRINTK_TIME=y
+CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15
+CONFIG_CONSOLE_LOGLEVEL_QUIET=15
+CONFIG_MESSAGE_LOGLEVEL_DEFAULT=7
+CONFIG_DEBUG_INFO=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=10
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_STACKTRACE=y
+# CONFIG_FTRACE is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon clk_ignore_unused"
diff --git a/arch/mips/configs/db1xxx_defconfig b/arch/mips/configs/db1xxx_defconfig
new file mode 100644
index 000000000..b8bd66300
--- /dev/null
+++ b/arch/mips/configs/db1xxx_defconfig
@@ -0,0 +1,226 @@
+CONFIG_LOCALVERSION="-db1xxx"
+CONFIG_KERNEL_XZ=y
+CONFIG_DEFAULT_HOSTNAME="db1xxx"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MIPS_ALCHEMY=y
+CONFIG_HZ_100=y
+CONFIG_PCI=y
+CONFIG_PCCARD=y
+CONFIG_PCMCIA_ALCHEMY_DEVBOARD=y
+CONFIG_FIRMWARE_MEMMAP=y
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUG=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_FIB_TRIE_STATS=y
+CONFIG_NET_IPIP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_VENO=y
+CONFIG_DEFAULT_VENO=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_SIT_6RD=y
+CONFIG_IPV6_GRE=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_BRIDGE=y
+CONFIG_NETLINK_DIAG=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIBTUSB=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_SST25L=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_NAND_ECC_SW_BCH=y
+CONFIG_MTD_NAND_AU1550=y
+CONFIG_MTD_NAND_PLATFORM=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_ATA=y
+CONFIG_PATA_HPT37X=y
+CONFIG_PATA_HPT3X2N=y
+CONFIG_PATA_PCMCIA=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_NLMON=y
+CONFIG_PCMCIA_3C589=y
+CONFIG_MIPS_AU1X00_ENET=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_AMD_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=y
+CONFIG_TOUCHSCREEN_WM97XX=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_TTY_PRINTK=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_AU1550=y
+CONFIG_SPI=y
+CONFIG_SPI_AU1550=y
+CONFIG_SPI_GPIO=y
+CONFIG_SENSORS_ADM1025=y
+CONFIG_SENSORS_LM70=y
+CONFIG_FB=y
+CONFIG_FB_AU1100=y
+CONFIG_FB_AU1200=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_HRTIMER=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_AC97_POWER_SAVE=y
+CONFIG_SND_AC97_POWER_SAVE_DEFAULT=1
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_AU1XPSC=y
+CONFIG_SND_SOC_AU1XAUDIO=y
+CONFIG_SND_SOC_DB1000=y
+CONFIG_SND_SOC_DB1200=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_DYNAMIC_MINORS=y
+CONFIG_USB_OTG=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_SDIO_UART=y
+CONFIG_MMC_AU1X=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_AU1XXX=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_XFS_FS=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_FANOTIFY=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="local"
+CONFIG_NFS_V4_1_MIGRATION=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_UTF8=y
+CONFIG_SECURITYFS=y
+CONFIG_CRYPTO_USER=y
+CONFIG_CRYPTO_CRYPTD=y
+CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
+CONFIG_CRC32_SLICEBY4=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/mips/configs/decstation_64_defconfig b/arch/mips/configs/decstation_64_defconfig
new file mode 100644
index 000000000..4a81297e2
--- /dev/null
+++ b/arch/mips/configs/decstation_64_defconfig
@@ -0,0 +1,225 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_EXPERT=y
+# CONFIG_SGETMASK_SYSCALL is not set
+# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_BPF_SYSCALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MACH_DECSTATION=y
+CONFIG_64BIT=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_TC=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+# CONFIG_SUSPEND is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_OSF_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=m
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_IP_SCTP=m
+CONFIG_VLAN_8021Q=m
+# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_MTD=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_BLOCK_RO=m
+CONFIG_MTD_MS02NV=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+CONFIG_DECLANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
+CONFIG_FDDI=y
+CONFIG_DEFZA=y
+CONFIG_DEFXX=y
+# CONFIG_WLAN is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_LKKBD=y
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_MOUSE_VSXXXAA=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_TGA=y
+CONFIG_FB_PMAG_AA=y
+CONFIG_FB_PMAG_BA=y
+CONFIG_FB_PMAGB_B=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE_COLUMNS=160
+CONFIG_DUMMY_CONSOLE_ROWS=64
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+CONFIG_RTC_DRV_CMOS=y
+# CONFIG_MIPS_PLATFORM_DEVICES is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_MANDATORY_FILE_LOCKING is not set
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_UFS_FS=y
+CONFIG_UFS_FS_WRITE=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_CRYPTO_RSA=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_CRCT10DIF=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
new file mode 100644
index 000000000..fd35454ba
--- /dev/null
+++ b/arch/mips/configs/decstation_defconfig
@@ -0,0 +1,221 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_EXPERT=y
+# CONFIG_SGETMASK_SYSCALL is not set
+# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_BPF_SYSCALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MACH_DECSTATION=y
+CONFIG_CPU_R3000=y
+CONFIG_TC=y
+# CONFIG_SUSPEND is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_OSF_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=m
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_IP_SCTP=m
+CONFIG_VLAN_8021Q=m
+# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_MTD=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_BLOCK_RO=m
+CONFIG_MTD_MS02NV=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+CONFIG_DECLANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
+CONFIG_FDDI=y
+CONFIG_DEFZA=y
+CONFIG_DEFXX=y
+# CONFIG_WLAN is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_LKKBD=y
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_MOUSE_VSXXXAA=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_TGA=y
+CONFIG_FB_PMAG_AA=y
+CONFIG_FB_PMAG_BA=y
+CONFIG_FB_PMAGB_B=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE_COLUMNS=160
+CONFIG_DUMMY_CONSOLE_ROWS=64
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+CONFIG_RTC_DRV_CMOS=y
+# CONFIG_MIPS_PLATFORM_DEVICES is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_MANDATORY_FILE_LOCKING is not set
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_UFS_FS=y
+CONFIG_UFS_FS_WRITE=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_CRYPTO_RSA=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_CRCT10DIF=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig
new file mode 100644
index 000000000..7ed8f4c7c
--- /dev/null
+++ b/arch/mips/configs/decstation_r4k_defconfig
@@ -0,0 +1,221 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_EXPERT=y
+# CONFIG_SGETMASK_SYSCALL is not set
+# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_BPF_SYSCALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MACH_DECSTATION=y
+CONFIG_TC=y
+# CONFIG_SUSPEND is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_OSF_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=m
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_IP_SCTP=m
+CONFIG_VLAN_8021Q=m
+# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_MTD=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_BLOCK_RO=m
+CONFIG_MTD_MS02NV=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+CONFIG_DECLANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
+CONFIG_FDDI=y
+CONFIG_DEFZA=y
+CONFIG_DEFXX=y
+# CONFIG_WLAN is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_LKKBD=y
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_MOUSE_VSXXXAA=y
+# CONFIG_SERIAL_DZ is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_TGA=y
+CONFIG_FB_PMAG_AA=y
+CONFIG_FB_PMAG_BA=y
+CONFIG_FB_PMAGB_B=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE_COLUMNS=160
+CONFIG_DUMMY_CONSOLE_ROWS=64
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+CONFIG_RTC_DRV_CMOS=y
+# CONFIG_MIPS_PLATFORM_DEVICES is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_MANDATORY_FILE_LOCKING is not set
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_UFS_FS=y
+CONFIG_UFS_FS_WRITE=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_CRYPTO_RSA=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_CRCT10DIF=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/e55_defconfig b/arch/mips/configs/e55_defconfig
new file mode 100644
index 000000000..fd82b858a
--- /dev/null
+++ b/arch/mips/configs/e55_defconfig
@@ -0,0 +1,37 @@
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_MACH_VR41XX=y
+CONFIG_CASIO_E55=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_ATA=y
+CONFIG_PATA_LEGACY=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_VR41XX=y
+CONFIG_SERIAL_VR41XX_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_GPIO_VR41XX=y
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_VR41XX=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyVR0,19200 ide0=0x1f0,0x3f6,40 mem=8M"
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
new file mode 100644
index 000000000..023b4e644
--- /dev/null
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -0,0 +1,235 @@
+CONFIG_LOCALVERSION="-fuloong2e"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_EXPERT=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_MACH_LOONGSON2EF=y
+CONFIG_PCI=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+# CONFIG_SUSPEND is not set
+CONFIG_HIBERNATION=y
+CONFIG_PM_STD_PARTITION="/dev/sda3"
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BINFMT_MISC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_PHONET=m
+CONFIG_NET_9P=m
+CONFIG_FW_LOADER=m
+CONFIG_MTD=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_CFI=m
+CONFIG_MTD_JEDECPROBE=m
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_CFI_STAA=m
+CONFIG_MTD_PHYSMAP=m
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_PATA_VIA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_LEGACY=y
+CONFIG_NETDEVICES=y
+CONFIG_NET_FC=y
+CONFIG_MACVLAN=m
+CONFIG_VETH=m
+CONFIG_8139TOO=y
+# CONFIG_8139TOO_PIO is not set
+CONFIG_PHYLIB=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+CONFIG_INPUT_FF_MEMLESS=y
+CONFIG_MOUSE_SERIAL=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=m
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_VIAPRO=m
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_RADEON=y
+# CONFIG_FB_RADEON_I2C is not set
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_SOUND=y
+CONFIG_SND=m
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_VIA82XX=m
+CONFIG_HIDRAW=y
+# CONFIG_USB_HID is not set
+CONFIG_HID_PID=y
+CONFIG_USB_KBD=y
+CONFIG_USB_MOUSE=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_OTG_PRODUCTLIST=y
+CONFIG_USB_WUSB_CBAF=m
+CONFIG_USB_C67X00_HCD=m
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=m
+CONFIG_USB_R8A66597_HCD=m
+CONFIG_USB_ACM=y
+CONFIG_USB_PRINTER=y
+CONFIG_USB_WDM=m
+CONFIG_USB_TMC=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_ONETOUCH=y
+CONFIG_USB_STORAGE_CYPRESS_ATACB=y
+CONFIG_USB_ISP1760=m
+CONFIG_USB_SEVSEG=m
+CONFIG_USB_ISIGHTFW=m
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_REISERFS_FS=m
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=936
+CONFIG_FAT_DEFAULT_IOCHARSET="utf8"
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_OMFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_CIFS=m
+CONFIG_CIFS_STATS2=y
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_CIFS_DEBUG2=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_LZO=m
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC_CCITT=y
+CONFIG_CRC7=m
+# CONFIG_ENABLE_MUST_CHECK is not set
diff --git a/arch/mips/configs/gcw0_defconfig b/arch/mips/configs/gcw0_defconfig
new file mode 100644
index 000000000..7e28a4fe9
--- /dev/null
+++ b/arch/mips/configs/gcw0_defconfig
@@ -0,0 +1,152 @@
+CONFIG_DEFAULT_HOSTNAME="gcw0"
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_EMBEDDED=y
+CONFIG_PROFILING=y
+CONFIG_MACH_INGENIC_SOC=y
+CONFIG_JZ4770_GCW0=y
+CONFIG_HIGHMEM=y
+# CONFIG_SECCOMP is not set
+CONFIG_MIPS_RAW_APPENDED_DTB=y
+CONFIG_MIPS_CMDLINE_DTB_EXTEND=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BOUNCE is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_CFG80211=y
+CONFIG_MAC80211=m
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=0
+CONFIG_NETDEVICES=y
+# CONFIG_ETHERNET is not set
+# CONFIG_WLAN_VENDOR_ADMTEK is not set
+# CONFIG_WLAN_VENDOR_ATH is not set
+# CONFIG_WLAN_VENDOR_ATMEL is not set
+# CONFIG_WLAN_VENDOR_BROADCOM is not set
+# CONFIG_WLAN_VENDOR_CISCO is not set
+# CONFIG_WLAN_VENDOR_INTEL is not set
+# CONFIG_WLAN_VENDOR_INTERSIL is not set
+# CONFIG_WLAN_VENDOR_MARVELL is not set
+# CONFIG_WLAN_VENDOR_MEDIATEK is not set
+# CONFIG_WLAN_VENDOR_RALINK is not set
+CONFIG_RTL8192CU=m
+# CONFIG_RTLWIFI_DEBUG is not set
+# CONFIG_WLAN_VENDOR_RSI is not set
+# CONFIG_WLAN_VENDOR_ST is not set
+# CONFIG_WLAN_VENDOR_TI is not set
+# CONFIG_WLAN_VENDOR_ZYDAS is not set
+# CONFIG_WLAN_VENDOR_QUANTENNA is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_PWM_VIBRA=y
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_INGENIC=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C_GPIO=y
+CONFIG_SPI=y
+CONFIG_SPI_GPIO=y
+CONFIG_POWER_SUPPLY=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_JZ4740_WDT=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_ACT8865=y
+CONFIG_DRM=y
+CONFIG_DRM_FBDEV_OVERALLOC=300
+CONFIG_DRM_PANEL_NOVATEK_NT39016=y
+CONFIG_DRM_INGENIC=y
+CONFIG_DRM_ETNAVIV=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_PROC_FS is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_MIPS is not set
+# CONFIG_SND_USB is not set
+CONFIG_SND_SOC=y
+CONFIG_SND_JZ4740_SOC_I2S=y
+CONFIG_SND_SOC_JZ4770_CODEC=y
+CONFIG_SND_SOC_SIMPLE_AMPLIFIER=y
+CONFIG_SND_SIMPLE_CARD=y
+CONFIG_USB_CONN_GPIO=y
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_GADGET=y
+CONFIG_USB_MUSB_JZ4740=y
+CONFIG_USB_INVENTRA_DMA=y
+CONFIG_JZ4770_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_ETH=y
+CONFIG_MMC=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
+CONFIG_MMC_JZ4740=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_PANIC=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_NVMEM is not set
+CONFIG_RTC_DRV_JZ4740=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_JZ4780=y
+# CONFIG_VIRTIO_MENU is not set
+CONFIG_STAGING=y
+CONFIG_R8188EU=m
+CONFIG_INGENIC_OST=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_IIO=y
+CONFIG_IIO_BUFFER=y
+CONFIG_IIO_BUFFER_CB=y
+CONFIG_IIO_KFIFO_BUF=y
+CONFIG_MXC6255=m
+CONFIG_INGENIC_ADC=y
+CONFIG_PWM=y
+CONFIG_PWM_JZ4740=y
+CONFIG_EXT4_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_DECOMP_MULTI=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity"
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_FONTS=y
+CONFIG_FONT_6x10=y
+CONFIG_DEBUG_FS=y
diff --git a/arch/mips/configs/generic/32r1.config b/arch/mips/configs/generic/32r1.config
new file mode 100644
index 000000000..a11cd8715
--- /dev/null
+++ b/arch/mips/configs/generic/32r1.config
@@ -0,0 +1,2 @@
+CONFIG_CPU_MIPS32_R1=y
+CONFIG_HIGHMEM=y
diff --git a/arch/mips/configs/generic/32r2.config b/arch/mips/configs/generic/32r2.config
new file mode 100644
index 000000000..9570672d4
--- /dev/null
+++ b/arch/mips/configs/generic/32r2.config
@@ -0,0 +1,3 @@
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_O32_FP64_SUPPORT=y
+CONFIG_HIGHMEM=y
diff --git a/arch/mips/configs/generic/32r6.config b/arch/mips/configs/generic/32r6.config
new file mode 100644
index 000000000..1a5d5ea4a
--- /dev/null
+++ b/arch/mips/configs/generic/32r6.config
@@ -0,0 +1,4 @@
+CONFIG_CPU_MIPS32_R6=y
+CONFIG_HIGHMEM=y
+
+CONFIG_CRYPTO_CRC32_MIPS=y
diff --git a/arch/mips/configs/generic/64r1.config b/arch/mips/configs/generic/64r1.config
new file mode 100644
index 000000000..7c1ea7e7b
--- /dev/null
+++ b/arch/mips/configs/generic/64r1.config
@@ -0,0 +1,4 @@
+CONFIG_CPU_MIPS64_R1=y
+CONFIG_64BIT=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
diff --git a/arch/mips/configs/generic/64r2.config b/arch/mips/configs/generic/64r2.config
new file mode 100644
index 000000000..b4d31ae8b
--- /dev/null
+++ b/arch/mips/configs/generic/64r2.config
@@ -0,0 +1,5 @@
+CONFIG_CPU_MIPS64_R2=y
+CONFIG_MIPS_O32_FP64_SUPPORT=y
+CONFIG_64BIT=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
diff --git a/arch/mips/configs/generic/64r6.config b/arch/mips/configs/generic/64r6.config
new file mode 100644
index 000000000..5dd8e8503
--- /dev/null
+++ b/arch/mips/configs/generic/64r6.config
@@ -0,0 +1,6 @@
+CONFIG_CPU_MIPS64_R6=y
+CONFIG_64BIT=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+
+CONFIG_CRYPTO_CRC32_MIPS=y
diff --git a/arch/mips/configs/generic/board-boston.config b/arch/mips/configs/generic/board-boston.config
new file mode 100644
index 000000000..19560a45b
--- /dev/null
+++ b/arch/mips/configs/generic/board-boston.config
@@ -0,0 +1,48 @@
+CONFIG_FIT_IMAGE_FDT_BOSTON=y
+
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+
+CONFIG_AUXDISPLAY=y
+CONFIG_IMG_ASCII_LCD=y
+
+CONFIG_COMMON_CLK_BOSTON=y
+
+CONFIG_DMADEVICES=y
+CONFIG_PCH_DMA=y
+
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_PCH=y
+
+CONFIG_I2C=y
+CONFIG_I2C_EG20T=y
+
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PCI=y
+
+CONFIG_NETDEVICES=y
+CONFIG_PCH_GBE=y
+
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCIE_XILINX=y
+
+CONFIG_PCH_PHUB=y
+
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_M41T80=y
+
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+
+CONFIG_SPI=y
+CONFIG_SPI_TOPCLIFF_PCH=y
+
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
diff --git a/arch/mips/configs/generic/board-ni169445.config b/arch/mips/configs/generic/board-ni169445.config
new file mode 100644
index 000000000..fc3580e4e
--- /dev/null
+++ b/arch/mips/configs/generic/board-ni169445.config
@@ -0,0 +1,29 @@
+# require CONFIG_CPU_MIPS32_R2=y
+# require CONFIG_CPU_LITTLE_ENDIAN=y
+
+CONFIG_FIT_IMAGE_FDT_NI169445=y
+
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_GENERIC_PLATFORM=y
+
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CMDLINE_PARTS=y
+
+CONFIG_MTD_NAND_ECC_SW_HAMMING=y
+CONFIG_MTD_NAND_ECC_SW_BCH=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_NAND_GPIO=y
+
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_BLOCK=y
+
+CONFIG_NETDEVICES=y
+CONFIG_STMMAC_ETH=y
+CONFIG_STMMAC_PLATFORM=y
+CONFIG_DWMAC_GENERIC=y
diff --git a/arch/mips/configs/generic/board-ocelot.config b/arch/mips/configs/generic/board-ocelot.config
new file mode 100644
index 000000000..510709565
--- /dev/null
+++ b/arch/mips/configs/generic/board-ocelot.config
@@ -0,0 +1,51 @@
+# require CONFIG_CPU_MIPS32_R2=y
+
+CONFIG_LEGACY_BOARD_OCELOT=y
+CONFIG_FIT_IMAGE_FDT_OCELOT=y
+
+CONFIG_BRIDGE=y
+CONFIG_GENERIC_PHY=y
+
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_NAND_PLATFORM=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_MTD_UBI=y
+
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+
+CONFIG_NETDEVICES=y
+CONFIG_NET_SWITCHDEV=y
+CONFIG_NET_DSA=y
+CONFIG_MSCC_OCELOT_SWITCH=y
+CONFIG_MSCC_OCELOT_SWITCH_OCELOT=y
+CONFIG_MDIO_MSCC_MIIM=y
+CONFIG_MICROSEMI_PHY=y
+
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+
+CONFIG_SPI=y
+CONFIG_SPI_BITBANG=y
+CONFIG_SPI_DESIGNWARE=y
+CONFIG_SPI_DW_MMIO=y
+CONFIG_SPI_SPIDEV=y
+
+CONFIG_PINCTRL=y
+CONFIG_PINCTRL_OCELOT=y
+
+CONFIG_GPIO_SYSFS=y
+
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_OCELOT_RESET=y
+
+CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/mips/configs/generic/board-ranchu.config b/arch/mips/configs/generic/board-ranchu.config
new file mode 100644
index 000000000..fee9ad4c5
--- /dev/null
+++ b/arch/mips/configs/generic/board-ranchu.config
@@ -0,0 +1,30 @@
+CONFIG_VIRT_BOARD_RANCHU=y
+
+CONFIG_BATTERY_GOLDFISH=y
+CONFIG_FB=y
+CONFIG_FB_GOLDFISH=y
+CONFIG_GOLDFISH=y
+CONFIG_STAGING=y
+CONFIG_GOLDFISH_AUDIO=y
+CONFIG_GOLDFISH_PIC=y
+CONFIG_GOLDFISH_PIPE=y
+CONFIG_GOLDFISH_TTY=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GOLDFISH=y
+
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_GOLDFISH_EVENTS=y
+
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_RESET_SYSCON_POWEROFF=y
+
+CONFIG_VIRTIO_BLK=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
diff --git a/arch/mips/configs/generic/board-sead-3.config b/arch/mips/configs/generic/board-sead-3.config
new file mode 100644
index 000000000..df49a592d
--- /dev/null
+++ b/arch/mips/configs/generic/board-sead-3.config
@@ -0,0 +1,34 @@
+# require CONFIG_32BIT=y
+
+CONFIG_LEGACY_BOARD_SEAD3=y
+
+CONFIG_AUXDISPLAY=y
+CONFIG_IMG_ASCII_LCD=y
+
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_SYSCON=y
+
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
+
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_GLUEBI=y
+
+CONFIG_NETDEVICES=y
+CONFIG_SMSC911X=y
+CONFIG_SMSC_PHY=y
+
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
diff --git a/arch/mips/configs/generic/board-xilfpga.config b/arch/mips/configs/generic/board-xilfpga.config
new file mode 100644
index 000000000..9cce57385
--- /dev/null
+++ b/arch/mips/configs/generic/board-xilfpga.config
@@ -0,0 +1,22 @@
+# require CONFIG_CPU_MIPS32_R2=y
+# require CONFIG_CPU_LITTLE_ENDIAN=y
+
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_XILINX=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_FIT_IMAGE_FDT_XILFPGA=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_XILINX=y
+CONFIG_SENSORS_ADT7410=y
+CONFIG_TMPFS=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_NETDEVICES=y
+CONFIG_XILINX_EMACLITE=y
+CONFIG_SMSC_PHY=y
diff --git a/arch/mips/configs/generic/eb.config b/arch/mips/configs/generic/eb.config
new file mode 100644
index 000000000..c5cdc99a6
--- /dev/null
+++ b/arch/mips/configs/generic/eb.config
@@ -0,0 +1 @@
+CONFIG_CPU_BIG_ENDIAN=y
diff --git a/arch/mips/configs/generic/el.config b/arch/mips/configs/generic/el.config
new file mode 100644
index 000000000..ee43fdb3b
--- /dev/null
+++ b/arch/mips/configs/generic/el.config
@@ -0,0 +1 @@
+CONFIG_CPU_LITTLE_ENDIAN=y
diff --git a/arch/mips/configs/generic/micro32r2.config b/arch/mips/configs/generic/micro32r2.config
new file mode 100644
index 000000000..b701fe7aa
--- /dev/null
+++ b/arch/mips/configs/generic/micro32r2.config
@@ -0,0 +1,4 @@
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MICROMIPS=y
+CONFIG_MIPS_O32_FP64_SUPPORT=y
+CONFIG_HIGHMEM=y
diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig
new file mode 100644
index 000000000..714169e41
--- /dev/null
+++ b/arch/mips/configs/generic_defconfig
@@ -0,0 +1,92 @@
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_USERFAULTFD=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_MIPS_CPS=y
+CONFIG_HIGHMEM=y
+CONFIG_NR_CPUS=16
+CONFIG_MIPS_O32_FP64_SUPPORT=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_TRIM_UNUSED_KSYMS=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NETFILTER=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_SCSI=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_MFD_SYSCON=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+# CONFIG_MIPS_PLATFORM_DEVICES is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_FANOTIFY=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_REDUCED=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon"
diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
new file mode 100644
index 000000000..5b9b59a8f
--- /dev/null
+++ b/arch/mips/configs/gpr_defconfig
@@ -0,0 +1,311 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_MIPS_ALCHEMY=y
+CONFIG_MIPS_GPR=y
+CONFIG_PCI=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_IP_DCCP=m
+CONFIG_IP_SCTP=m
+CONFIG_TIPC=m
+CONFIG_ATM=y
+CONFIG_ATM_CLIP=y
+CONFIG_ATM_LANE=m
+CONFIG_ATM_MPOA=m
+CONFIG_ATM_BR2684=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_LLC2=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_X25=m
+CONFIG_LAPB=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_ATM=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=m
+CONFIG_NET_EMATCH_NBYTE=m
+CONFIG_NET_EMATCH_U32=m
+CONFIG_NET_EMATCH_META=m
+CONFIG_NET_EMATCH_TEXT=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_PKTGEN=m
+CONFIG_HAMRADIO=y
+CONFIG_AX25=m
+# CONFIG_AX25_DAMA_SLAVE is not set
+CONFIG_NETROM=m
+CONFIG_ROSE=m
+CONFIG_MKISS=m
+CONFIG_6PACK=m
+CONFIG_BPQETHER=m
+CONFIG_BAYCOM_SER_FDX=m
+CONFIG_BAYCOM_SER_HDX=m
+CONFIG_YAM=m
+CONFIG_CFG80211=y
+CONFIG_MAC80211=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_RAM=m
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_TIFM_CORE=m
+CONFIG_SCSI=m
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_SCSI_ISCSI_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_FC=y
+CONFIG_NETCONSOLE=m
+CONFIG_ATM_TCP=m
+CONFIG_ATM_LANAI=m
+CONFIG_ATM_ENI=m
+CONFIG_ATM_FIRESTREAM=m
+CONFIG_ATM_ZATM=m
+CONFIG_ATM_NICSTAR=m
+CONFIG_ATM_IDT77252=m
+CONFIG_ATM_AMBASSADOR=m
+CONFIG_ATM_HORIZON=m
+CONFIG_ATM_IA=m
+CONFIG_ATM_FORE200E=m
+CONFIG_ATM_HE=m
+CONFIG_ATM_HE_USE_SUNI=y
+CONFIG_MIPS_AU1X00_ENET=y
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOATM=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+CONFIG_ATH_DEBUG=y
+CONFIG_ATH5K=y
+CONFIG_ATH5K_DEBUG=y
+CONFIG_WAN=y
+CONFIG_LANMEDIA=m
+CONFIG_HDLC=m
+CONFIG_HDLC_RAW=m
+CONFIG_HDLC_RAW_ETH=m
+CONFIG_HDLC_CISCO=m
+CONFIG_HDLC_FR=m
+CONFIG_HDLC_PPP=m
+CONFIG_HDLC_X25=m
+CONFIG_PCI200SYN=m
+CONFIG_WANXL=m
+CONFIG_FARSYNC=m
+CONFIG_DSCC4=m
+CONFIG_DSCC4_PCISYNC=y
+CONFIG_DSCC4_PCI_RST=y
+CONFIG_DLCI=m
+CONFIG_LAPBETHER=m
+CONFIG_X25_ASY=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_PCI is not set
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_SENSORS_LM83=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SSB=m
+CONFIG_SSB_DRIVER_PCICORE=y
+# CONFIG_VGA_ARB is not set
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_USB_HID=m
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_KBD=m
+CONFIG_USB_MOUSE=m
+CONFIG_USB=y
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=m
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_SIERRAWIRELESS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs rw ip=auto"
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
new file mode 100644
index 000000000..21a1168ae
--- /dev/null
+++ b/arch/mips/configs/ip22_defconfig
@@ -0,0 +1,345 @@
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_SGI_IP22=y
+CONFIG_ARC_CONSOLE=y
+CONFIG_CPU_R5000=y
+CONFIG_HZ_1000=y
+# CONFIG_SUSPEND is not set
+CONFIG_PM=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_RFKILL=m
+CONFIG_CONNECTOR=m
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_SGIWD93_SCSI=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_SGISEEQ=y
+CONFIG_SMC91X=m
+CONFIG_MDIO_BITBANG=m
+CONFIG_PHYLIB=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_HOSTAP=m
+CONFIG_INPUT_MOUSEDEV=m
+CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2_ALPS is not set
+# CONFIG_MOUSE_PS2_SYNAPTICS is not set
+CONFIG_MOUSE_SERIAL=m
+CONFIG_SERIO_RAW=m
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_IP22_ZILOG=m
+# CONFIG_HW_RANDOM is not set
+CONFIG_RAW_DRIVER=m
+# CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_INDYDOG=m
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_SGI_NEWPORT_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_HIDRAW=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+CONFIG_RTC_DRV_DS1286=y
+CONFIG_EXT2_FS=m
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_CIFS=m
+CONFIG_CIFS_UPCALL=y
+CONFIG_CODA_FS=m
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+CONFIG_KEYS=y
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_LZO=m
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC_T10DIF=m
+CONFIG_DEBUG_MEMORY_INIT=y
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
new file mode 100644
index 000000000..638d7cf5e
--- /dev/null
+++ b/arch/mips/configs/ip27_defconfig
@@ -0,0 +1,345 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_CGROUPS=y
+CONFIG_CPUSETS=y
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_SGI_IP27=y
+CONFIG_NUMA=y
+CONFIG_SMP=y
+CONFIG_HZ_1000=y
+CONFIG_PCI=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+CONFIG_PM=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_SIT_6RD=y
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_RFKILL=m
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=m
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_SCSI_FC_ATTRS=y
+CONFIG_SCSI_CXGB3_ISCSI=m
+CONFIG_SCSI_BNX2_ISCSI=m
+CONFIG_BE2ISCSI=m
+CONFIG_SCSI_HPSA=m
+CONFIG_SCSI_3W_SAS=m
+CONFIG_SCSI_AIC94XX=m
+# CONFIG_AIC94XX_DEBUG is not set
+CONFIG_SCSI_MVSAS=m
+# CONFIG_SCSI_MVSAS_DEBUG is not set
+CONFIG_SCSI_DPT_I2O=m
+CONFIG_SCSI_MPT2SAS=m
+CONFIG_LIBFC=m
+CONFIG_SCSI_QLOGIC_1280=y
+CONFIG_SCSI_PMCRAID=m
+CONFIG_SCSI_BFA_FC=m
+CONFIG_SCSI_DH=y
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=y
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_UEVENT=y
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_VETH=m
+CONFIG_ATL2=m
+CONFIG_ATL1E=m
+CONFIG_ATL1C=m
+CONFIG_B44=m
+CONFIG_BNX2X=m
+CONFIG_ENIC=m
+CONFIG_DNET=m
+CONFIG_BE2NET=m
+CONFIG_E1000E=m
+CONFIG_IGB=m
+CONFIG_IGBVF=m
+CONFIG_IXGBE=m
+CONFIG_JME=m
+CONFIG_MLX4_EN=m
+# CONFIG_MLX4_DEBUG is not set
+CONFIG_KS8851_MLL=m
+CONFIG_VXGE=m
+CONFIG_AX88796=m
+CONFIG_AX88796_93CX6=y
+CONFIG_ETHOC=m
+CONFIG_QLA3XXX=m
+CONFIG_NETXEN_NIC=m
+CONFIG_SFC=m
+CONFIG_SGI_IOC3_ETH=y
+CONFIG_SMC91X=m
+CONFIG_SMSC911X=m
+CONFIG_NIU=m
+CONFIG_TEHUTI=m
+CONFIG_VIA_VELOCITY=m
+CONFIG_PHYLIB=y
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_LSI_ET1011C_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_NATIONAL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_STE10XP=m
+CONFIG_VITESSE_PHY=m
+CONFIG_ADM8211=m
+CONFIG_ATH5K=m
+CONFIG_ATH9K=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_B43=m
+CONFIG_B43LEGACY=m
+# CONFIG_B43LEGACY_DEBUG is not set
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_IPW2100_DEBUG=y
+CONFIG_IPW2200=m
+CONFIG_IPW2200_MONITOR=y
+CONFIG_IPW2200_PROMISCUOUS=y
+CONFIG_IPW2200_QOS=y
+CONFIG_IPW2200_DEBUG=y
+CONFIG_IWL4965=m
+CONFIG_IWL3945=m
+CONFIG_IWLWIFI=m
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_HERMES=m
+# CONFIG_HERMES_CACHE_FW_ON_INIT is not set
+CONFIG_PLX_HERMES=m
+CONFIG_TMD_HERMES=m
+CONFIG_NORTEL_HERMES=m
+CONFIG_P54_COMMON=m
+CONFIG_P54_PCI=m
+CONFIG_PRISM54=m
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_THINFIRM=m
+CONFIG_MWL8K=m
+CONFIG_RT2X00=m
+CONFIG_RT2400PCI=m
+CONFIG_RT2500PCI=m
+CONFIG_RT61PCI=m
+CONFIG_RT2800PCI=m
+CONFIG_RTL8180=m
+CONFIG_WL1251=m
+CONFIG_WL12XX=m
+# CONFIG_INPUT is not set
+CONFIG_SERIO_LIBPS2=m
+CONFIG_SERIO_RAW=m
+CONFIG_SERIO_ALTERA_PS2=m
+# CONFIG_VT is not set
+CONFIG_NOZOMI=m
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_HW_RANDOM_TIMERIOMEM=m
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_ALI1535=m
+CONFIG_I2C_ALI1563=m
+CONFIG_I2C_ALI15X3=m
+CONFIG_I2C_AMD756=m
+CONFIG_I2C_AMD8111=m
+CONFIG_I2C_I801=m
+CONFIG_I2C_ISCH=m
+CONFIG_I2C_PIIX4=m
+CONFIG_I2C_NFORCE2=m
+CONFIG_I2C_SIS5595=m
+CONFIG_I2C_SIS630=m
+CONFIG_I2C_SIS96X=m
+CONFIG_I2C_VIA=m
+CONFIG_I2C_VIAPRO=m
+CONFIG_I2C_OCORES=m
+CONFIG_I2C_PCA_PLATFORM=m
+CONFIG_I2C_SIMTEC=m
+CONFIG_I2C_PARPORT_LIGHT=m
+CONFIG_I2C_TAOS_EVM=m
+CONFIG_I2C_STUB=m
+# CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+CONFIG_MFD_PCF50633=m
+CONFIG_PCF50633_ADC=m
+CONFIG_PCF50633_GPIO=m
+# CONFIG_VGA_ARB is not set
+CONFIG_LEDS_LP3944=m
+CONFIG_LEDS_PCA955X=m
+CONFIG_LEDS_BD2802=m
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_M48T35=y
+CONFIG_UIO=y
+CONFIG_UIO_AEC=m
+CONFIG_UIO_SERCOS3=m
+CONFIG_UIO_PCI_GENERIC=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_FSCACHE=m
+CONFIG_FSCACHE_STATS=y
+CONFIG_CACHEFILES=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_SQUASHFS=m
+CONFIG_OMFS_FS=m
+CONFIG_EXOFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_SECURITYFS=y
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRC_T10DIF=m
diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig
new file mode 100644
index 000000000..0921ef38e
--- /dev/null
+++ b/arch/mips/configs/ip28_defconfig
@@ -0,0 +1,69 @@
+CONFIG_SYSVIPC=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_SGI_IP28=y
+CONFIG_ARC_CONSOLE=y
+CONFIG_EISA=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+# CONFIG_SUSPEND is not set
+CONFIG_PM=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_TCP_MD5SIG=y
+# CONFIG_IPV6 is not set
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SGIWD93_SCSI=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_SGISEEQ=y
+# CONFIG_MOUSE_PS2_ALPS is not set
+# CONFIG_MOUSE_PS2_SYNAPTICS is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_IP22_ZILOG=y
+CONFIG_SERIAL_IP22_ZILOG_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_INDYDOG=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_PROC_KCORE=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_ROOT_NFS=y
+CONFIG_CRYPTO_MANAGER=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
new file mode 100644
index 000000000..7b1fab518
--- /dev/null
+++ b/arch/mips/configs/ip32_defconfig
@@ -0,0 +1,190 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_SGI_IP32=y
+# CONFIG_SECCOMP is not set
+CONFIG_PCI=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SGI_PARTITION=y
+CONFIG_BINFMT_MISC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_MD5SIG=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETWORK_SECMARK=y
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_RAID_ATTRS=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_SAS_LIBSAS=y
+CONFIG_SCSI_AIC7XXX=y
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_NET_TULIP=y
+CONFIG_DE2104X=m
+CONFIG_TULIP=m
+CONFIG_TULIP_MMIO=y
+CONFIG_SGI_O2MACE_ETH=y
+CONFIG_INPUT_EVDEV=m
+CONFIG_SERIO_MACEPS2=y
+CONFIG_SERIO_RAW=y
+# CONFIG_CONSOLE_TRANSLATIONS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_WATCHDOG=y
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_GBE=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_HCTOSYS is not set
+# CONFIG_RTC_INTF_SYSFS is not set
+# CONFIG_RTC_INTF_PROC is not set
+CONFIG_RTC_DRV_DS1685_FAMILY=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_CIFS=m
+CONFIG_NLS=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+CONFIG_KEYS=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=y
+CONFIG_CRYPTO_PCBC=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_CRYPTO_TGR192=y
+CONFIG_CRYPTO_WP512=y
+CONFIG_CRYPTO_ANUBIS=y
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_BLOWFISH=y
+CONFIG_CRYPTO_CAMELLIA=y
+CONFIG_CRYPTO_CAST5=y
+CONFIG_CRYPTO_CAST6=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_FCRYPT=y
+CONFIG_CRYPTO_KHAZAD=y
+CONFIG_CRYPTO_SERPENT=y
+CONFIG_CRYPTO_TEA=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRC_T10DIF=y
+CONFIG_LIBCRC32C=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
new file mode 100644
index 000000000..8c2230359
--- /dev/null
+++ b/arch/mips/configs/jazz_defconfig
@@ -0,0 +1,96 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_MACH_JAZZ=y
+CONFIG_OLIVETTI_M700=y
+CONFIG_MIPS_MAGNUM_4000=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=m
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_NET_IPIP=m
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_PARPORT=m
+CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_1284=y
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_ISCSI_TCP=m
+CONFIG_SCSI_PPA=m
+CONFIG_SCSI_IMM=m
+CONFIG_JAZZ_ESP=y
+CONFIG_ATA=y
+CONFIG_PATA_LEGACY=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_TUN=m
+CONFIG_MIPS_JAZZ_SONIC=y
+CONFIG_NE2000=m
+CONFIG_SERIO_PARKBD=m
+CONFIG_SERIO_RAW=m
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_FB=y
+CONFIG_FB_G364=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_HWMON is not set
+CONFIG_EXT2_FS=m
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_CIFS=m
diff --git a/arch/mips/configs/jmr3927_defconfig b/arch/mips/configs/jmr3927_defconfig
new file mode 100644
index 000000000..24b96faf9
--- /dev/null
+++ b/arch/mips/configs/jmr3927_defconfig
@@ -0,0 +1,50 @@
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_MACH_TX39XX=y
+CONFIG_TOSHIBA_JMR3927=y
+# CONFIG_SECCOMP is not set
+CONFIG_PCI=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_NETDEVICES=y
+CONFIG_TC35815=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_SERIAL_TXX9_CONSOLE=y
+CONFIG_SERIAL_TXX9_STDSERIAL=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_TXX9_WDT=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1742=y
+CONFIG_PROC_KCORE=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
new file mode 100644
index 000000000..3a9a453b1
--- /dev/null
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -0,0 +1,351 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_PROFILING=y
+CONFIG_MACH_LOONGSON2EF=y
+CONFIG_LEMOTE_MACH2F=y
+CONFIG_KEXEC=y
+# CONFIG_SECCOMP is not set
+CONFIG_PCI=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_STD_PARTITION="/dev/hda3"
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=y
+CONFIG_DEFAULT_BIC=y
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIVHCI=m
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=m
+CONFIG_RFKILL_INPUT=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=m
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_PATA_AMD=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_DEBUG=y
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_DELAY=m
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_NETCONSOLE=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_8139TOO=y
+# CONFIG_8139TOO_PIO is not set
+CONFIG_R8169=y
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_CDC_EEM=m
+CONFIG_INPUT_POLLDEV=m
+CONFIG_INPUT_EVDEV=y
+# CONFIG_MOUSE_PS2_ALPS is not set
+# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
+# CONFIG_MOUSE_PS2_TRACKPOINT is not set
+CONFIG_MOUSE_APPLETOUCH=m
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_SERIAL_8250=m
+# CONFIG_SERIAL_8250_PCI is not set
+CONFIG_SERIAL_8250_NR_UARTS=16
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_FOURPORT=y
+CONFIG_HW_RANDOM=y
+CONFIG_GPIO_LOONGSON=y
+CONFIG_THERMAL=y
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_SIS=y
+CONFIG_FB_SIS_300=y
+CONFIG_FB_SIS_315=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=m
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=m
+CONFIG_SND=m
+CONFIG_SND_HRTIMER=m
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_DUMMY=m
+CONFIG_SND_VIRMIDI=m
+CONFIG_SND_SERIAL_U16550=m
+CONFIG_SND_MPU401=m
+CONFIG_SND_AC97_POWER_SAVE=y
+CONFIG_SND_AC97_POWER_SAVE_DEFAULT=10
+CONFIG_SND_CS5535AUDIO=m
+# CONFIG_SND_MIPS is not set
+CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_USB_CAIAQ=m
+CONFIG_SND_USB_CAIAQ_INPUT=y
+CONFIG_HIDRAW=y
+CONFIG_HID_A4TECH=m
+CONFIG_HID_APPLE=m
+CONFIG_HID_BELKIN=m
+CONFIG_HID_CHERRY=m
+CONFIG_HID_CHICONY=m
+CONFIG_HID_CYPRESS=m
+CONFIG_HID_DRAGONRISE=m
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EZKEY=m
+CONFIG_HID_KYE=m
+CONFIG_HID_GYRATION=m
+CONFIG_HID_TWINHAN=m
+CONFIG_HID_KENSINGTON=m
+CONFIG_HID_LOGITECH=m
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_HID_MICROSOFT=m
+CONFIG_HID_MONTEREY=m
+CONFIG_HID_NTRIG=m
+CONFIG_HID_PANTHERLORD=m
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=m
+CONFIG_HID_SAMSUNG=m
+CONFIG_HID_SONY=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_HID_GREENASIA=m
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=m
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TOPSEED=m
+CONFIG_HID_THRUSTMASTER=m
+CONFIG_THRUSTMASTER_FF=y
+CONFIG_HID_WACOM=m
+CONFIG_HID_ZEROPLUS=m
+CONFIG_ZEROPLUS_FF=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_DYNAMIC_MINORS=y
+CONFIG_USB_OTG_PRODUCTLIST=y
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=m
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_WDM=m
+CONFIG_USB_STORAGE=m
+CONFIG_USB_STORAGE_DATAFAB=m
+CONFIG_USB_STORAGE_FREECOM=m
+CONFIG_USB_STORAGE_ISD200=m
+CONFIG_USB_STORAGE_USBAT=m
+CONFIG_USB_STORAGE_SDDR09=m
+CONFIG_USB_STORAGE_SDDR55=m
+CONFIG_USB_STORAGE_JUMPSHOT=m
+CONFIG_USB_STORAGE_ALAUDA=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_GADGET=m
+CONFIG_MMC=m
+CONFIG_LEDS_CLASS=y
+CONFIG_STAGING=y
+CONFIG_EXT2_FS=m
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FSCACHE=m
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_EMBEDDED=y
+CONFIG_ROMFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V4=y
+CONFIG_CIFS=m
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=y
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_LZO=m
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_6x11=y
+CONFIG_FONT_7x14=y
+CONFIG_FONT_PEARL_8x8=y
+CONFIG_FONT_ACORN_8x8=y
+CONFIG_FONT_MINI_4x6=y
+CONFIG_FONT_10x18=y
+CONFIG_FONT_SUN8x16=y
+CONFIG_FONT_SUN12x22=y
+CONFIG_PRINTK_TIME=y
+CONFIG_FRAME_WARN=1024
+CONFIG_STRIP_ASM_SYMS=y
diff --git a/arch/mips/configs/loongson1b_defconfig b/arch/mips/configs/loongson1b_defconfig
new file mode 100644
index 000000000..25e70423e
--- /dev/null
+++ b/arch/mips/configs/loongson1b_defconfig
@@ -0,0 +1,124 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_XZ=y
+CONFIG_SYSVIPC=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_NAMESPACES=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MACH_LOONGSON32=y
+# CONFIG_SECCOMP is not set
+# CONFIG_SUSPEND is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_SCSI=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=m
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=8
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_LOONGSON1=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_WATCHDOG_SYSFS=y
+CONFIG_LOONGSON1_WDT=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_HID_GENERIC=m
+CONFIG_USB_HID=m
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_LOONGSON1=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_UBIFS_ATIME_SUPPORT=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_CRYPTO_ECHAINIV is not set
+# CONFIG_CRYPTO_HW is not set
+CONFIG_DYNAMIC_DEBUG=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+# CONFIG_EARLY_PRINTK is not set
diff --git a/arch/mips/configs/loongson1c_defconfig b/arch/mips/configs/loongson1c_defconfig
new file mode 100644
index 000000000..3a158d4d2
--- /dev/null
+++ b/arch/mips/configs/loongson1c_defconfig
@@ -0,0 +1,125 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_XZ=y
+CONFIG_SYSVIPC=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_NAMESPACES=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MACH_LOONGSON32=y
+CONFIG_LOONGSON1_LS1C=y
+# CONFIG_SECCOMP is not set
+# CONFIG_SUSPEND is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_SCSI=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=m
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=8
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_LOONGSON1=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_WATCHDOG_SYSFS=y
+CONFIG_LOONGSON1_WDT=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_HID_GENERIC=m
+CONFIG_USB_HID=m
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_LOONGSON1=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_UBIFS_ATIME_SUPPORT=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_CRYPTO_ECHAINIV is not set
+# CONFIG_CRYPTO_HW is not set
+CONFIG_DYNAMIC_DEBUG=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+# CONFIG_EARLY_PRINTK is not set
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
new file mode 100644
index 000000000..38a817ead
--- /dev/null
+++ b/arch/mips/configs/loongson3_defconfig
@@ -0,0 +1,411 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_LZMA=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+CONFIG_MACH_LOONGSON64=y
+CONFIG_CPU_HAS_MSA=y
+CONFIG_NR_CPUS=16
+CONFIG_HZ_256=y
+CONFIG_KEXEC=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_KVM_MIPS_VZ=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+CONFIG_BINFMT_MISC=m
+CONFIG_KSM=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_NAT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_IP_SET=m
+CONFIG_IP_VS=m
+CONFIG_NF_TABLES_IPV4=y
+CONFIG_NF_TABLES_ARP=y
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_TABLES_IPV6=y
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_IP_SCTP=m
+CONFIG_L2TP=m
+CONFIG_BRIDGE=m
+CONFIG_VSOCKETS=m
+CONFIG_VIRTIO_VSOCKETS=m
+CONFIG_CFG80211=m
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=m
+CONFIG_RFKILL=m
+CONFIG_RFKILL_INPUT=y
+CONFIG_NET_9P=m
+CONFIG_NET_9P_VIRTIO=m
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEASPM is not set
+CONFIG_HOTPLUG_PCI=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=m
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=y
+CONFIG_RAID_ATTRS=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_MEGARAID_NEWGEN=y
+CONFIG_MEGARAID_MM=y
+CONFIG_MEGARAID_MAILBOX=y
+CONFIG_MEGARAID_LEGACY=y
+CONFIG_MEGARAID_SAS=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_PATA_ATIIXP=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
+CONFIG_LOOPBACK_TARGET=m
+CONFIG_ISCSI_TARGET=m
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_I825XX is not set
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IXGB=y
+CONFIG_IXGBE=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_RDC is not set
+CONFIG_8139CP=m
+CONFIG_8139TOO=m
+CONFIG_R8169=y
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_ATH9K=m
+CONFIG_HOSTAP=m
+CONFIG_INPUT_POLLDEV=m
+CONFIG_INPUT_SPARSEKMAP=y
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_XTKBD=m
+CONFIG_MOUSE_PS2_SENTELIC=y
+CONFIG_MOUSE_SERIAL=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_RAW=m
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=16
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_RAW_DRIVER=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_PIIX4=y
+CONFIG_GPIO_LOONGSON=y
+CONFIG_SENSORS_LM75=m
+CONFIG_SENSORS_LM93=m
+CONFIG_SENSORS_W83627HF=m
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=m
+CONFIG_DRM_QXL=y
+CONFIG_DRM_VIRTIO_GPU=y
+CONFIG_FB_RADEON=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_LCD_PLATFORM=m
+CONFIG_BACKLIGHT_GENERIC=m
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_LOGO=y
+CONFIG_SOUND=y
+CONFIG_SND=m
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+# CONFIG_SND_ISA is not set
+CONFIG_SND_HDA_INTEL=m
+CONFIG_SND_HDA_PATCH_LOADER=y
+CONFIG_SND_HDA_CODEC_REALTEK=m
+CONFIG_SND_HDA_CODEC_SIGMATEL=m
+CONFIG_SND_HDA_CODEC_HDMI=m
+CONFIG_SND_HDA_CODEC_CONEXANT=m
+# CONFIG_SND_USB is not set
+CONFIG_HIDRAW=y
+CONFIG_HID_A4TECH=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=m
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=m
+CONFIG_USB_STORAGE=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_RTC_DRV_GOLDFISH=y
+CONFIG_DMADEVICES=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=m
+CONFIG_VIRTIO_INPUT=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_SCSI=m
+CONFIG_VHOST_VSOCK=m
+CONFIG_GOLDFISH=y
+CONFIG_PM_DEVFREQ=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_XFS_FS=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_VIRTIO_FS=m
+CONFIG_FSCACHE=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=936
+CONFIG_FAT_DEFAULT_IOCHARSET="gb2312"
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_CIFS=m
+CONFIG_9P_FS=m
+CONFIG_9P_FSCACHE=y
+CONFIG_9P_FS_POSIX_ACL=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_UTF8=y
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_PATH=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_PRINTK_TIME=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="ieee754=relaxed"
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
new file mode 100644
index 000000000..211bd3d6e
--- /dev/null
+++ b/arch/mips/configs/malta_defconfig
@@ -0,0 +1,424 @@
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_PCI=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_DEVTMPFS=y
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_RAID_ATTRS=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_ATA=y
+CONFIG_ATA_PIIX=y
+CONFIG_PATA_LEGACY=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+# CONFIG_NET_VENDOR_3COM is not set
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_PRISM54=m
+CONFIG_LIBERTAS=m
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_MOUSE_PS2_ELANTECH=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
+CONFIG_POWER_RESET_SYSCON=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
new file mode 100644
index 000000000..62b1969b4
--- /dev/null
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -0,0 +1,437 @@
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_PCI=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y
+CONFIG_VHOST_NET=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_DEVTMPFS=y
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_RAID_ATTRS=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_ATA=y
+CONFIG_ATA_PIIX=y
+CONFIG_PATA_IT8213=m
+CONFIG_PATA_OLDPIIX=y
+CONFIG_PATA_MPIIX=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_LEGACY=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_PRISM54=m
+CONFIG_LIBERTAS=m
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
+CONFIG_POWER_RESET_SYSCON=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_ENABLE_DEFAULT_TRACERS=y
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
new file mode 100644
index 000000000..9185e0a0a
--- /dev/null
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -0,0 +1,436 @@
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_KVM_GUEST=y
+CONFIG_PAGE_SIZE_16KB=y
+# CONFIG_MIPS_MT_SMP is not set
+CONFIG_HZ_100=y
+CONFIG_PCI=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_DEVTMPFS=y
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_RAID_ATTRS=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_ATA=y
+CONFIG_ATA_PIIX=y
+CONFIG_PATA_IT8213=m
+CONFIG_PATA_OLDPIIX=y
+CONFIG_PATA_MPIIX=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_LEGACY=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_PRISM54=m
+CONFIG_LIBERTAS=m
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
+CONFIG_POWER_RESET_SYSCON=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
new file mode 100644
index 000000000..614af02d8
--- /dev/null
+++ b/arch/mips/configs/malta_qemu_32r6_defconfig
@@ -0,0 +1,187 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R6=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_100=y
+CONFIG_PCI=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_ATA_PIIX=y
+CONFIG_PATA_OLDPIIX=y
+CONFIG_PATA_MPIIX=y
+CONFIG_ATA_GENERIC=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=4
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
+CONFIG_POWER_RESET_SYSCON=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
new file mode 100644
index 000000000..9c051f8fd
--- /dev/null
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -0,0 +1,188 @@
+CONFIG_LOCALVERSION="aprp"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_VPE_LOADER=y
+CONFIG_MIPS_VPE_APSP_API=y
+CONFIG_NR_CPUS=2
+CONFIG_HZ_100=y
+CONFIG_PCI=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_ATA_PIIX=y
+CONFIG_PATA_OLDPIIX=y
+CONFIG_PATA_MPIIX=y
+CONFIG_ATA_GENERIC=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
+CONFIG_POWER_RESET_SYSCON=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
new file mode 100644
index 000000000..2e90d9755
--- /dev/null
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -0,0 +1,189 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_SCHED_SMT=y
+CONFIG_MIPS_CPS=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_PCI=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+# CONFIG_SATA_PMP is not set
+CONFIG_ATA_PIIX=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=4
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
+CONFIG_POWER_RESET_SYSCON=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig
new file mode 100644
index 000000000..d1f7fdb27
--- /dev/null
+++ b/arch/mips/configs/maltasmvp_eva_defconfig
@@ -0,0 +1,191 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MIPS32_3_5_FEATURES=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_SCHED_SMT=y
+CONFIG_MIPS_CPS=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_PCI=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_ATA_PIIX=y
+CONFIG_PATA_OLDPIIX=y
+CONFIG_PATA_MPIIX=y
+CONFIG_ATA_GENERIC=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=4
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
+CONFIG_POWER_RESET_SYSCON=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
new file mode 100644
index 000000000..48e5bd492
--- /dev/null
+++ b/arch/mips/configs/maltaup_defconfig
@@ -0,0 +1,187 @@
+CONFIG_LOCALVERSION="up"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_NR_CPUS=2
+CONFIG_HZ_100=y
+CONFIG_PCI=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_ATA_PIIX=y
+CONFIG_PATA_OLDPIIX=y
+CONFIG_PATA_MPIIX=y
+CONFIG_ATA_GENERIC=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
+CONFIG_POWER_RESET_SYSCON=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
new file mode 100644
index 000000000..636311d67
--- /dev/null
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -0,0 +1,434 @@
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MIPS32_R5_FEATURES=y
+CONFIG_CPU_MIPS32_R5_XPA=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_NR_CPUS=2
+CONFIG_HZ_100=y
+CONFIG_PCI=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_RAID_ATTRS=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_ATA=y
+CONFIG_ATA_PIIX=y
+CONFIG_PATA_IT8213=m
+CONFIG_PATA_OLDPIIX=y
+CONFIG_PATA_MPIIX=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_LEGACY=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+# CONFIG_NET_VENDOR_3COM is not set
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_PRISM54=m
+CONFIG_LIBERTAS=m
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_MOUSE_PS2_ELANTECH=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_PIIX4_POWEROFF=y
+CONFIG_POWER_RESET_SYSCON=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
diff --git a/arch/mips/configs/mpc30x_defconfig b/arch/mips/configs/mpc30x_defconfig
new file mode 100644
index 000000000..d4e038802
--- /dev/null
+++ b/arch/mips/configs/mpc30x_defconfig
@@ -0,0 +1,53 @@
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_MACH_VR41XX=y
+CONFIG_VICTOR_MPC30X=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_IPV6 is not set
+CONFIG_NETWORK_SECMARK=y
+CONFIG_CONNECTOR=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_ATA=y
+CONFIG_PATA_LEGACY=y
+CONFIG_NETDEVICES=y
+CONFIG_USB_PEGASUS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_VR41XX=y
+CONFIG_SERIAL_VR41XX_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_GPIO_VR41XX=y
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_USB=m
+CONFIG_USB_OHCI_HCD=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_VR41XX=y
+CONFIG_EXT2_FS=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_CONFIGFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="mem=32M console=ttyVR0,19200 ide0=0x170,0x376,73"
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
new file mode 100644
index 000000000..d6578536d
--- /dev/null
+++ b/arch/mips/configs/mtx1_defconfig
@@ -0,0 +1,705 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_MIPS_ALCHEMY=y
+CONFIG_MIPS_MTX1=y
+CONFIG_PCI=y
+CONFIG_PCCARD=m
+CONFIG_YENTA=m
+CONFIG_PD6729=m
+CONFIG_I82092=m
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ACORN_PARTITION=y
+CONFIG_ACORN_PARTITION_ICS=y
+CONFIG_ACORN_PARTITION_RISCIX=y
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_LDM_PARTITION=y
+CONFIG_SGI_PARTITION=y
+CONFIG_ULTRIX_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=m
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_IP_DCCP=m
+CONFIG_IP_SCTP=m
+CONFIG_TIPC=m
+CONFIG_ATM=y
+CONFIG_ATM_CLIP=y
+CONFIG_ATM_LANE=m
+CONFIG_ATM_MPOA=m
+CONFIG_ATM_BR2684=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_LLC2=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_X25=m
+CONFIG_LAPB=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_ATM=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=m
+CONFIG_NET_EMATCH_NBYTE=m
+CONFIG_NET_EMATCH_U32=m
+CONFIG_NET_EMATCH_META=m
+CONFIG_NET_EMATCH_TEXT=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_PKTGEN=m
+CONFIG_HAMRADIO=y
+CONFIG_AX25=m
+# CONFIG_AX25_DAMA_SLAVE is not set
+CONFIG_NETROM=m
+CONFIG_ROSE=m
+CONFIG_MKISS=m
+CONFIG_6PACK=m
+CONFIG_BPQETHER=m
+CONFIG_BAYCOM_SER_FDX=m
+CONFIG_BAYCOM_SER_HDX=m
+CONFIG_YAM=m
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBPA10X=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIDTL1=m
+CONFIG_BT_HCIBT3C=m
+CONFIG_BT_HCIBLUECARD=m
+CONFIG_BT_HCIVHCI=m
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_RAM=m
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_SCSI=m
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_SCSI_ISCSI_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_NET_FC=y
+CONFIG_NETCONSOLE=m
+CONFIG_TUN=m
+CONFIG_ARCNET=m
+CONFIG_ARCNET_1201=m
+CONFIG_ARCNET_1051=m
+CONFIG_ARCNET_RAW=m
+CONFIG_ARCNET_CAP=m
+CONFIG_ARCNET_COM90xx=m
+CONFIG_ARCNET_COM90xxIO=m
+CONFIG_ARCNET_RIM_I=m
+CONFIG_ARCNET_COM20020=m
+CONFIG_ARCNET_COM20020_PCI=m
+CONFIG_ARCNET_COM20020_CS=m
+CONFIG_ATM_TCP=m
+CONFIG_ATM_LANAI=m
+CONFIG_ATM_ENI=m
+CONFIG_ATM_FIRESTREAM=m
+CONFIG_ATM_ZATM=m
+CONFIG_ATM_NICSTAR=m
+CONFIG_ATM_IDT77252=m
+CONFIG_ATM_AMBASSADOR=m
+CONFIG_ATM_HORIZON=m
+CONFIG_ATM_IA=m
+CONFIG_ATM_FORE200E=m
+CONFIG_ATM_HE=m
+CONFIG_ATM_HE_USE_SUNI=y
+CONFIG_PCMCIA_3C574=m
+CONFIG_PCMCIA_3C589=m
+CONFIG_VORTEX=m
+CONFIG_TYPHOON=m
+CONFIG_ADAPTEC_STARFIRE=m
+CONFIG_ACENIC=m
+CONFIG_AMD8111_ETH=m
+CONFIG_PCNET32=m
+CONFIG_PCMCIA_NMCLAN=m
+CONFIG_B44=m
+CONFIG_BNX2=m
+CONFIG_TIGON3=m
+CONFIG_CHELSIO_T1=m
+CONFIG_NET_TULIP=y
+CONFIG_DE2104X=m
+CONFIG_TULIP=m
+CONFIG_DE4X5=m
+CONFIG_WINBOND_840=m
+CONFIG_DM9102=m
+CONFIG_ULI526X=m
+CONFIG_PCMCIA_XIRCOM=m
+CONFIG_DL2K=m
+CONFIG_SUNDANCE=m
+CONFIG_PCMCIA_FMVJ18X=m
+CONFIG_HP100=m
+CONFIG_E100=m
+CONFIG_E1000=m
+CONFIG_IXGB=m
+CONFIG_SKGE=m
+CONFIG_SKY2=m
+CONFIG_MYRI10GE=m
+CONFIG_FEALNX=m
+CONFIG_NATSEMI=m
+CONFIG_NS83820=m
+CONFIG_S2IO=m
+CONFIG_PCMCIA_AXNET=m
+CONFIG_NE2K_PCI=m
+CONFIG_PCMCIA_PCNET=m
+CONFIG_FORCEDETH=m
+CONFIG_HAMACHI=m
+CONFIG_YELLOWFIN=m
+CONFIG_QLA3XXX=m
+CONFIG_8139CP=m
+CONFIG_8139TOO=m
+# CONFIG_8139TOO_PIO is not set
+CONFIG_8139TOO_8129=y
+CONFIG_R8169=m
+CONFIG_SIS900=m
+CONFIG_SIS190=m
+CONFIG_PCMCIA_SMC91C92=m
+CONFIG_EPIC100=m
+CONFIG_HAPPYMEAL=m
+CONFIG_SUNGEM=m
+CONFIG_CASSINI=m
+CONFIG_TLAN=m
+CONFIG_VIA_RHINE=m
+CONFIG_VIA_VELOCITY=m
+CONFIG_PCMCIA_XIRC2PS=m
+CONFIG_FDDI=y
+CONFIG_DEFXX=m
+CONFIG_SKFP=m
+CONFIG_HIPPI=y
+CONFIG_ROADRUNNER=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOATM=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_GL620A=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_USB_NET_RNDIS_HOST=m
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_EPSON2888=y
+CONFIG_USB_SIERRA_NET=m
+CONFIG_WAN=y
+CONFIG_LANMEDIA=m
+CONFIG_HDLC=m
+CONFIG_HDLC_RAW=m
+CONFIG_HDLC_RAW_ETH=m
+CONFIG_HDLC_CISCO=m
+CONFIG_HDLC_FR=m
+CONFIG_HDLC_PPP=m
+CONFIG_HDLC_X25=m
+CONFIG_PCI200SYN=m
+CONFIG_WANXL=m
+CONFIG_FARSYNC=m
+CONFIG_DSCC4=m
+CONFIG_DSCC4_PCISYNC=y
+CONFIG_DSCC4_PCI_RST=y
+CONFIG_DLCI=m
+CONFIG_LAPBETHER=m
+CONFIG_X25_ASY=m
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_PCIPS2=m
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=m
+CONFIG_GAMEPORT=m
+CONFIG_GAMEPORT_NS558=m
+CONFIG_GAMEPORT_L4=m
+CONFIG_GAMEPORT_EMU10K1=m
+CONFIG_GAMEPORT_FM801=m
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=m
+CONFIG_SERIAL_8250_CS=m
+CONFIG_SERIAL_8250_NR_UARTS=48
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_JSM=m
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=m
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_GPIO=m
+CONFIG_GPIO_SYSFS=y
+CONFIG_SENSORS_ADM1021=m
+CONFIG_SENSORS_ADM1025=m
+CONFIG_SENSORS_ADM1026=m
+CONFIG_SENSORS_ADM1031=m
+CONFIG_SENSORS_ADM9240=m
+CONFIG_SENSORS_ATXP1=m
+CONFIG_SENSORS_DS1621=m
+CONFIG_SENSORS_F71805F=m
+CONFIG_SENSORS_GL518SM=m
+CONFIG_SENSORS_GL520SM=m
+CONFIG_SENSORS_IT87=m
+CONFIG_SENSORS_MAX1619=m
+CONFIG_SENSORS_LM63=m
+CONFIG_SENSORS_LM75=m
+CONFIG_SENSORS_LM77=m
+CONFIG_SENSORS_LM78=m
+CONFIG_SENSORS_LM80=m
+CONFIG_SENSORS_LM83=m
+CONFIG_SENSORS_LM85=m
+CONFIG_SENSORS_LM87=m
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_LM92=m
+CONFIG_SENSORS_PC87360=m
+CONFIG_SENSORS_PCF8591=m
+CONFIG_SENSORS_SIS5595=m
+CONFIG_SENSORS_SMSC47M1=m
+CONFIG_SENSORS_SMSC47M192=m
+CONFIG_SENSORS_SMSC47B397=m
+CONFIG_SENSORS_VIA686A=m
+CONFIG_SENSORS_VT1211=m
+CONFIG_SENSORS_VT8231=m
+CONFIG_SENSORS_W83781D=m
+CONFIG_SENSORS_W83791D=m
+CONFIG_SENSORS_W83792D=m
+CONFIG_SENSORS_W83L785TS=m
+CONFIG_SENSORS_W83627HF=m
+CONFIG_SENSORS_W83627EHF=m
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_WDT_MTX1=y
+# CONFIG_VGA_ARB is not set
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_SOUND=m
+CONFIG_SND=m
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_DUMMY=m
+CONFIG_SND_VIRMIDI=m
+CONFIG_SND_MTPAV=m
+CONFIG_SND_SERIAL_U16550=m
+CONFIG_SND_MPU401=m
+CONFIG_SND_AD1889=m
+CONFIG_SND_ATIIXP=m
+CONFIG_SND_ATIIXP_MODEM=m
+CONFIG_SND_AU8810=m
+CONFIG_SND_AU8820=m
+CONFIG_SND_AU8830=m
+CONFIG_SND_BT87X=m
+CONFIG_SND_CA0106=m
+CONFIG_SND_CMIPCI=m
+CONFIG_SND_CS4281=m
+CONFIG_SND_CS46XX=m
+CONFIG_SND_DARLA20=m
+CONFIG_SND_GINA20=m
+CONFIG_SND_LAYLA20=m
+CONFIG_SND_DARLA24=m
+CONFIG_SND_GINA24=m
+CONFIG_SND_LAYLA24=m
+CONFIG_SND_MONA=m
+CONFIG_SND_MIA=m
+CONFIG_SND_ECHO3G=m
+CONFIG_SND_INDIGO=m
+CONFIG_SND_INDIGOIO=m
+CONFIG_SND_INDIGODJ=m
+CONFIG_SND_ENS1370=m
+CONFIG_SND_ENS1371=m
+CONFIG_SND_FM801=m
+CONFIG_SND_HDSP=m
+CONFIG_SND_HDSPM=m
+CONFIG_SND_ICE1724=m
+CONFIG_SND_INTEL8X0=m
+CONFIG_SND_INTEL8X0M=m
+CONFIG_SND_KORG1212=m
+CONFIG_SND_MIXART=m
+CONFIG_SND_NM256=m
+CONFIG_SND_PCXHR=m
+CONFIG_SND_RIPTIDE=m
+CONFIG_SND_RME32=m
+CONFIG_SND_RME96=m
+CONFIG_SND_RME9652=m
+CONFIG_SND_VIA82XX=m
+CONFIG_SND_VIA82XX_MODEM=m
+CONFIG_SND_VX222=m
+CONFIG_SND_YMFPCI=m
+CONFIG_SND_HDA_INTEL=m
+CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_VXPOCKET=m
+CONFIG_SND_PDAUDIOCF=m
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_KBD=m
+CONFIG_USB_MOUSE=m
+CONFIG_USB=m
+CONFIG_USB_MON=m
+CONFIG_USB_EHCI_HCD=m
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_OHCI_HCD_PLATFORM=m
+CONFIG_USB_UHCI_HCD=m
+CONFIG_USB_U132_HCD=m
+CONFIG_USB_SL811_HCD=m
+CONFIG_USB_SL811_CS=m
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_STORAGE=m
+CONFIG_USB_STORAGE_DATAFAB=m
+CONFIG_USB_STORAGE_FREECOM=m
+CONFIG_USB_STORAGE_ISD200=m
+CONFIG_USB_STORAGE_USBAT=m
+CONFIG_USB_STORAGE_SDDR09=m
+CONFIG_USB_STORAGE_SDDR55=m
+CONFIG_USB_STORAGE_JUMPSHOT=m
+CONFIG_USB_STORAGE_ALAUDA=m
+CONFIG_USB_STORAGE_KARMA=m
+CONFIG_USB_MDC800=m
+CONFIG_USB_MICROTEK=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_AIRCABLE=m
+CONFIG_USB_SERIAL_ARK3116=m
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_WHITEHEAT=m
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+CONFIG_USB_SERIAL_CYPRESS_M8=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_IPAQ=m
+CONFIG_USB_SERIAL_IR=m
+CONFIG_USB_SERIAL_EDGEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT_TI=m
+CONFIG_USB_SERIAL_GARMIN=m
+CONFIG_USB_SERIAL_IPW=m
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_KLSI=m
+CONFIG_USB_SERIAL_KOBIL_SCT=m
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_MOS7720=m
+CONFIG_USB_SERIAL_MOS7840=m
+CONFIG_USB_SERIAL_NAVMAN=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_SAFE=m
+CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+CONFIG_USB_SERIAL_TI=m
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_EMI62=m
+CONFIG_USB_EMI26=m
+CONFIG_USB_ADUTUX=m
+CONFIG_USB_LEGOTOWER=m
+CONFIG_USB_LCD=m
+CONFIG_USB_CYPRESS_CY7C63=m
+CONFIG_USB_CYTHERM=m
+CONFIG_USB_IDMOUSE=m
+CONFIG_USB_FTDI_ELAN=m
+CONFIG_USB_APPLEDISPLAY=m
+CONFIG_USB_SISUSBVGA=m
+CONFIG_USB_LD=m
+CONFIG_USB_TRANCEVIBRATOR=m
+CONFIG_USB_TEST=m
+CONFIG_USB_ATM=m
+CONFIG_USB_SPEEDTOUCH=m
+CONFIG_USB_CXACRU=m
+CONFIG_USB_UEAGLEATM=m
+CONFIG_USB_XUSBATM=m
+CONFIG_USB_GADGET=m
+CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_USB_G_SERIAL=m
+CONFIG_USB_MIDI_GADGET=m
+CONFIG_MMC=m
+CONFIG_MMC_SDHCI=m
+CONFIG_MMC_TIFM_SD=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+CONFIG_RTC_DRV_TEST=m
+CONFIG_RTC_DRV_DS1307=m
+CONFIG_RTC_DRV_DS1672=m
+CONFIG_RTC_DRV_RS5C372=m
+CONFIG_RTC_DRV_ISL1208=m
+CONFIG_RTC_DRV_X1205=m
+CONFIG_RTC_DRV_PCF8563=m
+CONFIG_RTC_DRV_PCF8583=m
+CONFIG_RTC_DRV_DS1553=m
+CONFIG_RTC_DRV_DS1742=m
+CONFIG_RTC_DRV_M48T86=m
+CONFIG_RTC_DRV_V3020=m
+CONFIG_EXT2_FS=m
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=m
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_CRAMFS=y
+CONFIG_SQUASHFS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V4=m
+CONFIG_NFSD=m
+CONFIG_NFSD_V4=y
+CONFIG_CIFS=m
+CONFIG_CODA_FS=m
+CONFIG_AFS_FS=m
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="cp437"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig
new file mode 100644
index 000000000..45421a05b
--- /dev/null
+++ b/arch/mips/configs/nlm_xlp_defconfig
@@ -0,0 +1,556 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_CGROUPS=y
+CONFIG_NAMESPACES=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_NLM_XLP_BOARD=y
+CONFIG_64BIT=y
+CONFIG_PAGE_SIZE_16KB=y
+# CONFIG_HW_PERF_EVENTS is not set
+CONFIG_SMP=y
+# CONFIG_SECCOMP is not set
+CONFIG_PCI=y
+CONFIG_PCI_DEBUG=y
+CONFIG_PCI_STUB=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ACORN_PARTITION=y
+CONFIG_ACORN_PARTITION_ICS=y
+CONFIG_ACORN_PARTITION_RISCIX=y
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_LDM_PARTITION=y
+CONFIG_SGI_PARTITION=y
+CONFIG_ULTRIX_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_SYSV68_PARTITION=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_KSM=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_DCCP=m
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_TIPC=m
+CONFIG_ATM=m
+CONFIG_ATM_CLIP=m
+CONFIG_ATM_LANE=m
+CONFIG_ATM_MPOA=m
+CONFIG_ATM_BR2684=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_LLC2=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_X25=m
+CONFIG_LAPB=m
+CONFIG_PHONET=m
+CONFIG_IEEE802154=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_ATM=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=m
+CONFIG_NET_EMATCH_NBYTE=m
+CONFIG_NET_EMATCH_U32=m
+CONFIG_NET_EMATCH_META=m
+CONFIG_NET_EMATCH_TEXT=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_DCB=y
+CONFIG_NET_PKTGEN=m
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_LE_BYTE_SWAP=y
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_CDROM_PKTCDVD=y
+CONFIG_RAID_ATTRS=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_SCSI_DH=y
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_SIL24=y
+# CONFIG_ATA_SFF is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_I825XX is not set
+CONFIG_E1000E=y
+CONFIG_SKY2=y
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=m
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_N_HDLC=m
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=48
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_TIMERIOMEM=m
+CONFIG_RAW_DRIVER=m
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_OCORES=y
+CONFIG_SENSORS_LM90=y
+CONFIG_THERMAL=y
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1374=y
+CONFIG_UIO=y
+CONFIG_UIO_PDRV_GENIRQ=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_GFS2_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_NILFS2_FS=m
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=m
+CONFIG_FSCACHE=m
+CONFIG_FSCACHE_STATS=y
+CONFIG_FSCACHE_HISTOGRAM=y
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ADFS_FS=m
+CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=y
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=y
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
+CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_EXOFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFS_FSCACHE=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_CODA_FS=m
+CONFIG_AFS_FS=m
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="cp437"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_SECURITY=y
+CONFIG_LSM_MMAP_MIN_ADDR=0
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_SECURITY_TOMOYO=y
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRC7=m
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_KGDB=y
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
new file mode 100644
index 000000000..1081972c8
--- /dev/null
+++ b/arch/mips/configs/nlm_xlr_defconfig
@@ -0,0 +1,507 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_NAMESPACES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+# CONFIG_ELF_CORE is not set
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_NLM_XLR_BOARD=y
+CONFIG_HIGHMEM=y
+CONFIG_SMP=y
+CONFIG_KEXEC=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_DEBUG=y
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ACORN_PARTITION=y
+CONFIG_ACORN_PARTITION_ICS=y
+CONFIG_ACORN_PARTITION_RISCIX=y
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_LDM_PARTITION=y
+CONFIG_SGI_PARTITION=y
+CONFIG_ULTRIX_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_SYSV68_PARTITION=y
+CONFIG_BINFMT_MISC=m
+CONFIG_KSM=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_DCCP=m
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_TIPC=m
+CONFIG_ATM=m
+CONFIG_ATM_CLIP=m
+CONFIG_ATM_LANE=m
+CONFIG_ATM_MPOA=m
+CONFIG_ATM_BR2684=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_LLC2=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_X25=m
+CONFIG_LAPB=m
+CONFIG_PHONET=m
+CONFIG_IEEE802154=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_ATM=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=m
+CONFIG_NET_EMATCH_NBYTE=m
+CONFIG_NET_EMATCH_U32=m
+CONFIG_NET_EMATCH_META=m
+CONFIG_NET_EMATCH_TEXT=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_DCB=y
+CONFIG_NET_PKTGEN=m
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_CDROM_PKTCDVD=y
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_SCSI_DH=y
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_NETDEVICES=y
+CONFIG_E1000E=y
+CONFIG_SKY2=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=m
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_N_HDLC=m
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=48
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_TIMERIOMEM=m
+CONFIG_RAW_DRIVER=m
+CONFIG_I2C=y
+CONFIG_I2C_XLR=y
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1374=y
+CONFIG_UIO=y
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_GFS2_FS=m
+CONFIG_OCFS2_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_NILFS2_FS=m
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=m
+CONFIG_FSCACHE=m
+CONFIG_FSCACHE_STATS=y
+CONFIG_FSCACHE_HISTOGRAM=y
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_ADFS_FS=m
+CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=y
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
+CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_EXOFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFS_FSCACHE=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_CODA_FS=m
+CONFIG_AFS_FS=m
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="cp437"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_SECURITY=y
+CONFIG_LSM_MMAP_MIN_ADDR=0
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_SECURITY_TOMOYO=y
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRC7=m
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_KGDB=y
diff --git a/arch/mips/configs/omega2p_defconfig b/arch/mips/configs/omega2p_defconfig
new file mode 100644
index 000000000..fc39ddf61
--- /dev/null
+++ b/arch/mips/configs/omega2p_defconfig
@@ -0,0 +1,127 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_RALINK=y
+CONFIG_SOC_MT7620=y
+CONFIG_DTB_OMEGA2P=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER=y
+# CONFIG_SUSPEND is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_NETDEVICES=y
+# CONFIG_ETHERNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=2
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=3
+CONFIG_SERIAL_8250_RUNTIME_UARTS=3
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_MMC=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_MEMORY=y
+CONFIG_PHY_RALINK_USB=y
+# CONFIG_DNOTIFY is not set
+CONFIG_PROC_KCORE=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_UTF8=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_CRC16=y
+CONFIG_XZ_DEC=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=10
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_STACKTRACE=y
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/pic32mzda_defconfig b/arch/mips/configs/pic32mzda_defconfig
new file mode 100644
index 000000000..63fe2da1b
--- /dev/null
+++ b/arch/mips/configs/pic32mzda_defconfig
@@ -0,0 +1,88 @@
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_RELAY=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MACH_PIC32=y
+CONFIG_DTB_PIC32_MZDA_SK=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+# CONFIG_SUSPEND is not set
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SGI_PARTITION=y
+CONFIG_BINFMT_MISC=m
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SCAN_ASYNC=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_INPUT_LEDS=m
+CONFIG_INPUT_POLLDEV=y
+CONFIG_INPUT_MOUSEDEV=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=m
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=m
+CONFIG_KEYBOARD_GPIO_POLLED=m
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_PIC32=y
+CONFIG_SERIAL_PIC32_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_RAW_DRIVER=m
+CONFIG_GPIO_SYSFS=y
+# CONFIG_HWMON is not set
+CONFIG_HIDRAW=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MICROCHIP_PIC32=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_ONESHOT=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_GPIO=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+# CONFIG_MIPS_PLATFORM_DEVICES is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_FSCACHE=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZ4=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig
new file mode 100644
index 000000000..b9adf15eb
--- /dev/null
+++ b/arch/mips/configs/pistachio_defconfig
@@ -0,0 +1,316 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="localhost"
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_IKCONFIG=m
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_MACH_PISTACHIO=y
+CONFIG_MIPS_CPS=y
+CONFIG_NR_CPUS=4
+CONFIG_PM_DEBUG=y
+CONFIG_PM_ADVANCED_DEBUG=y
+CONFIG_CPU_IDLE=y
+# CONFIG_MIPS_CPS_CPUIDLE is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_COMPACTION is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_ZSMALLOC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_IPV6_SIT=m
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+# CONFIG_BRIDGE_NETFILTER is not set
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_MARK=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_DSCP=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_MARK=y
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIVHCI=m
+CONFIG_CFG80211=m
+CONFIG_NL80211_TESTMODE=y
+CONFIG_CFG80211_DEBUGFS=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=m
+CONFIG_MAC80211_LEDS=y
+CONFIG_MAC80211_DEBUGFS=y
+CONFIG_MAC80211_DEBUG_MENU=y
+CONFIG_MAC80211_VERBOSE_DEBUG=y
+CONFIG_RFKILL=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DEBUG_DEVRES=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_BLOCK=y
+CONFIG_ZRAM=m
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=m
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=m
+CONFIG_VETH=m
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_VIA is not set
+CONFIG_PPP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_MCS7830=m
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_LIBERTAS_THINFIRM=m
+CONFIG_RT2X00=m
+CONFIG_RT2800USB=m
+CONFIG_MAC80211_HWSIM=m
+CONFIG_USB_NET_RNDIS_WLAN=m
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_HW_RANDOM=y
+CONFIG_TCG_TPM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_IMG=y
+CONFIG_I2C_STUB=m
+CONFIG_SPI=y
+CONFIG_SPI_BITBANG=m
+CONFIG_SPI_IMG_SPFI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_IMGPDC_WDT=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_RC_CORE=y
+CONFIG_RC_DEVICES=y
+CONFIG_IR_IMG=y
+CONFIG_IR_IMG_NEC=y
+CONFIG_IR_IMG_JVC=y
+CONFIG_IR_IMG_SONY=y
+CONFIG_IR_IMG_SHARP=y
+CONFIG_IR_IMG_SANYO=y
+CONFIG_IR_IMG_RC5=y
+CONFIG_IR_IMG_RC6=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_HRTIMER=m
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+# CONFIG_SND_SPI is not set
+CONFIG_SND_USB_AUDIO=m
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+# CONFIG_USB_DEFAULT_PERSIST is not set
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_CP210X=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OTI6858=m
+CONFIG_USB_SERIAL_QUALCOMM=m
+CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=16
+CONFIG_MMC_TEST=m
+CONFIG_MMC_DW=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_IMG_MDC_DMA=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_MEMORY=y
+CONFIG_IIO=y
+CONFIG_CC10001_ADC=y
+CONFIG_PWM=y
+CONFIG_PWM_IMG=y
+CONFIG_PHY_PISTACHIO_USB=y
+CONFIG_ANDROID=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_HFSPLUS_FS=m
+CONFIG_UBIFS_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_YAMA=y
+CONFIG_CRYPTO_AUTHENC=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC_T10DIF=m
+CONFIG_CRC7=m
+# CONFIG_XZ_DEC_X86 is not set
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_LKDTM=y
+CONFIG_TEST_UDELAY=m
diff --git a/arch/mips/configs/qi_lb60_defconfig b/arch/mips/configs/qi_lb60_defconfig
new file mode 100644
index 000000000..b4448d087
--- /dev/null
+++ b/arch/mips/configs/qi_lb60_defconfig
@@ -0,0 +1,177 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MACH_INGENIC_SOC=y
+CONFIG_JZ4740_QI_LB60=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CMA=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_CUBIC is not set
+CONFIG_TCP_CONG_WESTWOOD=y
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_IPV6 is not set
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_NAND_JZ4780=y
+CONFIG_MTD_NAND_JZ4740_ECC=y
+CONFIG_MTD_UBI=y
+CONFIG_NETDEVICES=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_MATRIX=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO is not set
+CONFIG_LEGACY_PTY_COUNT=2
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_DMA is not set
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_INGENIC=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_SPI=y
+CONFIG_SPI_GPIO=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_BATTERY_INGENIC=y
+CONFIG_CHARGER_GPIO=y
+CONFIG_SENSORS_IIO_HWMON=y
+CONFIG_WATCHDOG=y
+CONFIG_JZ4740_WDT=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_DRM=y
+CONFIG_DRM_FBDEV_OVERALLOC=200
+CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_INGENIC=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_MIPS is not set
+CONFIG_SND_SOC=y
+CONFIG_SND_JZ4740_SOC_I2S=y
+CONFIG_SND_SOC_JZ4740_CODEC=y
+CONFIG_SND_SOC_SIMPLE_AMPLIFIER=y
+CONFIG_SND_SIMPLE_CARD=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_JZ4740=y
+CONFIG_USB_INVENTRA_DMA=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG=y
+CONFIG_USB_ETH=y
+# CONFIG_USB_ETH_RNDIS is not set
+CONFIG_MMC=y
+CONFIG_MMC_JZ4740=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_JZ4740=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_JZ4780=y
+CONFIG_MEMORY=y
+CONFIG_JZ4780_NEMC=y
+CONFIG_IIO=y
+CONFIG_INGENIC_ADC=y
+CONFIG_PWM=y
+CONFIG_PWM_JZ4740=y
+CONFIG_EXT4_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+# CONFIG_JFFS2_ZLIB is not set
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_UTF8=y
+CONFIG_FONTS=y
+CONFIG_FONT_SUN8x16=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_READABLE_ASM=y
+CONFIG_KGDB=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_PANIC_ON_OOPS=y
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig
new file mode 100644
index 000000000..252d47238
--- /dev/null
+++ b/arch/mips/configs/rb532_defconfig
@@ -0,0 +1,166 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+# CONFIG_ELF_CORE is not set
+# CONFIG_KALLSYMS is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_SLAB=y
+CONFIG_MIKROTIK_RB532=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+CONFIG_PCI=y
+# CONFIG_PCI_QUIRKS is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_CUBIC=m
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_VEGAS=y
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_BRIDGE_NETFILTER is not set
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=m
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=y
+CONFIG_LLC2=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=m
+CONFIG_NET_EMATCH_NBYTE=m
+CONFIG_NET_EMATCH_U32=m
+CONFIG_NET_EMATCH_META=m
+CONFIG_NET_EMATCH_TEXT=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_HAMRADIO=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_BLOCK2MTD=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_NAND_PLATFORM=y
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_SATA_PMP is not set
+CONFIG_PATA_RB532=y
+CONFIG_NETDEVICES=y
+CONFIG_IFB=m
+CONFIG_KORINA=y
+CONFIG_VIA_RHINE=y
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_RB532_BUTTON=y
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_PCI is not set
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_HW_RANDOM=y
+CONFIG_GPIO_SYSFS=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_RC32434_WDT=y
+# CONFIG_VGA_ARB is not set
+# CONFIG_HID is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_MIKROTIK_RB532=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_EXT2_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_SQUASHFS=y
+CONFIG_CRYPTO_TEST=m
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC16=m
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
new file mode 100644
index 000000000..5e389db35
--- /dev/null
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -0,0 +1,98 @@
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+# CONFIG_EPOLL is not set
+CONFIG_SLAB=y
+CONFIG_MACH_TX49XX=y
+CONFIG_TOSHIBA_RBTX4927=y
+CONFIG_TOSHIBA_RBTX4938=y
+CONFIG_TOSHIBA_RBTX4939=y
+CONFIG_TOSHIBA_RBTX4938_MPLEX_KEEP=y
+# CONFIG_SECCOMP is not set
+CONFIG_PCI=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_BLOCK_RO=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_RBTX4939=y
+CONFIG_MTD_RAW_NAND=m
+CONFIG_MTD_NAND_TXX9NDFMC=m
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE_TX4938=y
+CONFIG_BLK_DEV_IDE_TX4939=y
+CONFIG_NETDEVICES=y
+CONFIG_NE2000=y
+CONFIG_SMC91X=y
+CONFIG_TC35815=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_TXX9_CONSOLE=y
+CONFIG_SERIAL_TXX9_STDSERIAL=y
+CONFIG_SPI=y
+CONFIG_SPI_TXX9=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_TXX9_WDT=m
+# CONFIG_VGA_ARB is not set
+CONFIG_SOUND=m
+CONFIG_SND=m
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_PCI is not set
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_MIPS is not set
+CONFIG_SND_SOC=m
+CONFIG_SND_SOC_TXX9ACLC=m
+CONFIG_SND_SOC_TXX9ACLC_GENERIC=m
+# CONFIG_USB_SUPPORT is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+CONFIG_RTC_DRV_RS5C348=y
+CONFIG_RTC_DRV_DS1742=y
+CONFIG_RTC_DRV_TX4939=y
+CONFIG_DMADEVICES=y
+CONFIG_TXX9_DMAC=m
+# CONFIG_DNOTIFY is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_JFFS2_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
new file mode 100644
index 000000000..2d0506159
--- /dev/null
+++ b/arch/mips/configs/rm200_defconfig
@@ -0,0 +1,413 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_SNI_RM=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_ARC_CONSOLE=y
+CONFIG_HZ_1000=y
+CONFIG_PCI=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=m
+CONFIG_UNIX=y
+CONFIG_NET_KEY=m
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_HAMRADIO=y
+CONFIG_AX25=m
+CONFIG_NETROM=m
+CONFIG_ROSE=m
+CONFIG_MKISS=m
+CONFIG_6PACK=m
+CONFIG_BPQETHER=m
+CONFIG_CONNECTOR=m
+CONFIG_PARPORT=m
+CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_SERIAL=m
+CONFIG_PARPORT_1284=y
+CONFIG_BLK_DEV_FD=m
+CONFIG_PARIDE=m
+CONFIG_PARIDE_PD=m
+CONFIG_PARIDE_PCD=m
+CONFIG_PARIDE_PF=m
+CONFIG_PARIDE_PT=m
+CONFIG_PARIDE_PG=m
+CONFIG_PARIDE_ATEN=m
+CONFIG_PARIDE_BPCK=m
+CONFIG_PARIDE_BPCK6=m
+CONFIG_PARIDE_COMM=m
+CONFIG_PARIDE_DSTR=m
+CONFIG_PARIDE_FIT2=m
+CONFIG_PARIDE_FIT3=m
+CONFIG_PARIDE_EPAT=m
+CONFIG_PARIDE_EPIA=m
+CONFIG_PARIDE_FRIQ=m
+CONFIG_PARIDE_FRPW=m
+CONFIG_PARIDE_KBIC=m
+CONFIG_PARIDE_KTTI=m
+CONFIG_PARIDE_ON20=m
+CONFIG_PARIDE_ON26=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_SX8=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=y
+CONFIG_ISCSI_TCP=m
+CONFIG_SCSI_AIC94XX=m
+# CONFIG_AIC94XX_DEBUG is not set
+CONFIG_MEGARAID_NEWGEN=y
+CONFIG_MEGARAID_MM=m
+CONFIG_MEGARAID_MAILBOX=m
+CONFIG_SCSI_PPA=m
+CONFIG_SCSI_IMM=m
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_TUN=m
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_NE2000=m
+CONFIG_QLA3XXX=m
+CONFIG_NETXEN_NIC=m
+CONFIG_VIA_VELOCITY=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_PLIP=m
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_USBNET=m
+# CONFIG_USB_NET_CDC_SUBSET is not set
+CONFIG_INPUT_FF_MEMLESS=m
+CONFIG_SERIO_PARKBD=m
+CONFIG_SERIO_RAW=m
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=m
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_PRINTER=m
+CONFIG_PPDEV=m
+# CONFIG_HW_RANDOM is not set
+CONFIG_W1=m
+# CONFIG_HWMON is not set
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_KBD=m
+CONFIG_USB_MOUSE=m
+CONFIG_USB=m
+CONFIG_USB_MON=m
+CONFIG_USB_EHCI_HCD=m
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_UHCI_HCD=m
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_STORAGE=m
+CONFIG_USB_STORAGE_DATAFAB=m
+CONFIG_USB_STORAGE_FREECOM=m
+CONFIG_USB_STORAGE_SDDR09=m
+CONFIG_USB_STORAGE_SDDR55=m
+CONFIG_USB_STORAGE_JUMPSHOT=m
+CONFIG_USB_MDC800=m
+CONFIG_USB_MICROTEK=m
+CONFIG_USB_USS720=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_WHITEHEAT=m
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+CONFIG_USB_SERIAL_CYPRESS_M8=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_IPAQ=m
+CONFIG_USB_SERIAL_IR=m
+CONFIG_USB_SERIAL_EDGEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT_TI=m
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_KLSI=m
+CONFIG_USB_SERIAL_KOBIL_SCT=m
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_SAFE=m
+CONFIG_USB_SERIAL_SAFE_PADDED=y
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_LEGOTOWER=m
+CONFIG_USB_LCD=m
+CONFIG_USB_CYTHERM=m
+CONFIG_USB_SISUSBVGA=m
+CONFIG_USB_LD=m
+CONFIG_USB_TEST=m
+CONFIG_EXT2_FS=m
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ADFS_FS=m
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_CIFS=m
+CONFIG_CODA_FS=m
+CONFIG_AFS_FS=m
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
diff --git a/arch/mips/configs/rs90_defconfig b/arch/mips/configs/rs90_defconfig
new file mode 100644
index 000000000..dfbb9fed9
--- /dev/null
+++ b/arch/mips/configs/rs90_defconfig
@@ -0,0 +1,183 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="rs90"
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_LD_DEAD_CODE_DATA_ELIMINATION=y
+# CONFIG_SGETMASK_SYSCALL is not set
+# CONFIG_SYSFS_SYSCALL is not set
+# CONFIG_ELF_CORE is not set
+# CONFIG_BASE_FULL is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_AIO is not set
+# CONFIG_IO_URING is not set
+# CONFIG_ADVISE_SYSCALLS is not set
+# CONFIG_KALLSYMS is not set
+CONFIG_EMBEDDED=y
+# CONFIG_PERF_EVENTS is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_MACH_INGENIC_SOC=y
+CONFIG_JZ4740_RS90=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+CONFIG_MIPS_CMDLINE_DTB_EXTEND=y
+# CONFIG_SUSPEND is not set
+CONFIG_PM=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_OPROFILE=y
+CONFIG_JUMP_LABEL=y
+# CONFIG_STACKPROTECTOR is not set
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_MQ_IOSCHED_DEADLINE is not set
+# CONFIG_MQ_IOSCHED_KYBER is not set
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSMALLOC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_CUBIC is not set
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_NAND_JZ4780=y
+CONFIG_MTD_NAND_JZ4725B_BCH=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_FASTMAP=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=0
+CONFIG_NETDEVICES=y
+# CONFIG_ETHERNET is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_LEGACY_PTY_COUNT=2
+# CONFIG_HW_RANDOM is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_I2C_COMPAT is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_POWER_SUPPLY=y
+CONFIG_BATTERY_INGENIC=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED is not set
+CONFIG_JZ4740_WDT=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_DRM=y
+CONFIG_DRM_FBDEV_OVERALLOC=300
+CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_INGENIC=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+# CONFIG_SND_PCM_TIMER is not set
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_PROC_FS is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_MIPS is not set
+CONFIG_SND_SOC=y
+CONFIG_SND_JZ4740_SOC_I2S=y
+CONFIG_SND_SOC_JZ4725B_CODEC=y
+CONFIG_SND_SOC_SIMPLE_AMPLIFIER=y
+CONFIG_SND_SIMPLE_CARD=y
+# CONFIG_HID is not set
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_JZ4740=y
+CONFIG_USB_INVENTRA_DMA=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_ECM_SUBSET=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_MMC=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
+CONFIG_MMC_JZ4740=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_HCTOSYS is not set
+# CONFIG_RTC_SYSTOHC is not set
+# CONFIG_RTC_NVMEM is not set
+# CONFIG_RTC_INTF_PROC is not set
+CONFIG_RTC_DRV_JZ4740=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_JZ4780=y
+# CONFIG_VIRTIO_MENU is not set
+# CONFIG_MIPS_PLATFORM_DEVICES is not set
+CONFIG_INGENIC_OST=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_MEMORY=y
+CONFIG_JZ4780_NEMC=y
+CONFIG_IIO=y
+CONFIG_INGENIC_ADC=y
+CONFIG_IIO_RESCALE=y
+CONFIG_PWM=y
+CONFIG_PWM_JZ4740=y
+# CONFIG_NVMEM is not set
+CONFIG_EXT4_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+# CONFIG_UBIFS_FS_ZSTD is not set
+# CONFIG_UBIFS_FS_XATTR is not set
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_DECOMP_MULTI=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_4K_DEVBLK_SIZE=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_FONTS=y
+CONFIG_FONT_6x10=y
+# CONFIG_SYMBOLIC_ERRNAME is not set
+CONFIG_STRIP_ASM_SYMS=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+# CONFIG_DEBUG_MISC is not set
+CONFIG_PANIC_ON_OOPS=y
+# CONFIG_FTRACE is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/mips/configs/rt305x_defconfig b/arch/mips/configs/rt305x_defconfig
new file mode 100644
index 000000000..fec5851c1
--- /dev/null
+++ b/arch/mips/configs/rt305x_defconfig
@@ -0,0 +1,150 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_GZIP is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_AIO is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_RALINK=y
+CONFIG_DTB_RT305X_EVAL=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+# CONFIG_SUSPEND is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_COREDUMP is not set
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_BRIDGE_NETFILTER is not set
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_RAW=m
+CONFIG_BRIDGE=y
+# CONFIG_BRIDGE_IGMP_SNOOPING is not set
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_HAMRADIO=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_EEPROM_93CX6=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_PHYLIB=y
+CONFIG_PPP=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_ISDN=y
+CONFIG_INPUT=m
+CONFIG_INPUT_POLLDEV=m
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SPI=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_HID is not set
+# CONFIG_USB_HID is not set
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DEBUG=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_STAGING=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+# CONFIG_JFFS2_FS_POSIX_ACL is not set
+# CONFIG_JFFS2_FS_SECURITY is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+# CONFIG_JFFS2_ZLIB is not set
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_ZLIB is not set
+CONFIG_SQUASHFS_XZ=y
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32_SARWATE=y
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+CONFIG_CMDLINE_BOOL=y
diff --git a/arch/mips/configs/sb1250_swarm_defconfig b/arch/mips/configs/sb1250_swarm_defconfig
new file mode 100644
index 000000000..bb0b1b22e
--- /dev/null
+++ b/arch/mips/configs/sb1250_swarm_defconfig
@@ -0,0 +1,107 @@
+CONFIG_SYSVIPC=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_CGROUPS=y
+CONFIG_CPUSETS=y
+# CONFIG_PROC_PID_CPUSET is not set
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_SIBYTE_SWARM=y
+CONFIG_CPU_SB1_PASS_2_2=y
+CONFIG_64BIT=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_HZ_1000=y
+CONFIG_PCI=y
+CONFIG_MIPS32_O32=y
+CONFIG_PM=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_TCP_MD5SIG=y
+# CONFIG_IPV6 is not set
+CONFIG_NETWORK_SECMARK=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_RFKILL=m
+CONFIG_FW_LOADER=m
+CONFIG_CONNECTOR=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=9220
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDETAPE=y
+CONFIG_RAID_ATTRS=m
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_SB1250_MAC=y
+CONFIG_BROADCOM_PHY=y
+# CONFIG_INPUT is not set
+CONFIG_SERIO_RAW=m
+# CONFIG_VT is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_USB=y
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_FUSE_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_CBC=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_LZO=m
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC16=m
diff --git a/arch/mips/configs/tb0219_defconfig b/arch/mips/configs/tb0219_defconfig
new file mode 100644
index 000000000..6547f8475
--- /dev/null
+++ b/arch/mips/configs/tb0219_defconfig
@@ -0,0 +1,77 @@
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_MACH_VR41XX=y
+CONFIG_TANBAC_TB0219=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETWORK_SECMARK=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_NETDEVICES=y
+CONFIG_8139TOO=y
+CONFIG_R8169=y
+CONFIG_VIA_RHINE=y
+CONFIG_VIA_RHINE_MMIO=y
+CONFIG_VIA_VELOCITY=y
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_VITESSE_PHY=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_VR41XX=y
+CONFIG_SERIAL_VR41XX_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_GPIO_TB0219=y
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_USB=m
+CONFIG_USB_MON=m
+CONFIG_USB_EHCI_HCD=m
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_OHCI_HCD=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_VR41XX=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CRAMFS=m
+CONFIG_ROMFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="cca=3 mem=64M console=ttyVR0,115200 ip=any root=/dev/nfs"
diff --git a/arch/mips/configs/tb0226_defconfig b/arch/mips/configs/tb0226_defconfig
new file mode 100644
index 000000000..7e099f7c2
--- /dev/null
+++ b/arch/mips/configs/tb0226_defconfig
@@ -0,0 +1,72 @@
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_MACH_VR41XX=y
+CONFIG_TANBAC_TB0226=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETWORK_SECMARK=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_SAS_LIBSAS=m
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_E100=y
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_VR41XX=y
+CONFIG_SERIAL_VR41XX_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_VR41XX=y
+CONFIG_EXT2_FS=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CRAMFS=m
+CONFIG_ROMFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="cca=3 mem=32M console=ttyVR0,115200"
diff --git a/arch/mips/configs/tb0287_defconfig b/arch/mips/configs/tb0287_defconfig
new file mode 100644
index 000000000..0d881dd86
--- /dev/null
+++ b/arch/mips/configs/tb0287_defconfig
@@ -0,0 +1,85 @@
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_MACH_VR41XX=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=y
+CONFIG_TCP_CONG_CUBIC=m
+# CONFIG_IPV6 is not set
+CONFIG_NETWORK_SECMARK=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_SCAN_ASYNC=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_PATA_SIL680=y
+CONFIG_NETDEVICES=y
+CONFIG_8139TOO=y
+CONFIG_R8169=y
+CONFIG_VIA_RHINE=y
+CONFIG_VIA_RHINE_MMIO=y
+CONFIG_VIA_VELOCITY=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_VR41XX=y
+CONFIG_SERIAL_VR41XX_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_GPIO_VR41XX=y
+# CONFIG_HWMON is not set
+CONFIG_MFD_SM501=y
+CONFIG_FB=y
+CONFIG_FB_SM501=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_USB=m
+CONFIG_USB_MON=m
+CONFIG_USB_EHCI_HCD=m
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_STORAGE=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CRAMFS=m
+CONFIG_ROMFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="cca=3 mem=64M console=ttyVR0,115200 ip=any root=/dev/nfs"
diff --git a/arch/mips/configs/vocore2_defconfig b/arch/mips/configs/vocore2_defconfig
new file mode 100644
index 000000000..a14f8ea5c
--- /dev/null
+++ b/arch/mips/configs/vocore2_defconfig
@@ -0,0 +1,127 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_RALINK=y
+CONFIG_SOC_MT7620=y
+CONFIG_DTB_VOCORE2=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER=y
+# CONFIG_SUSPEND is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_NETDEVICES=y
+# CONFIG_ETHERNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=2
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=3
+CONFIG_SERIAL_8250_RUNTIME_UARTS=3
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_MMC=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_MEMORY=y
+CONFIG_PHY_RALINK_USB=y
+# CONFIG_DNOTIFY is not set
+CONFIG_PROC_KCORE=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_UTF8=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_CRC16=y
+CONFIG_XZ_DEC=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=10
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_STACKTRACE=y
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/workpad_defconfig b/arch/mips/configs/workpad_defconfig
new file mode 100644
index 000000000..891a5f773
--- /dev/null
+++ b/arch/mips/configs/workpad_defconfig
@@ -0,0 +1,65 @@
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_MACH_VR41XX=y
+CONFIG_IBM_WORKPAD=y
+CONFIG_PCCARD=y
+CONFIG_PCMCIA_VRC4171=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETWORK_SECMARK=y
+CONFIG_BLK_DEV_RAM=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECS=m
+CONFIG_IDE_GENERIC=y
+CONFIG_NETDEVICES=y
+CONFIG_PCMCIA_3C574=m
+CONFIG_PCMCIA_3C589=m
+CONFIG_PCMCIA_NMCLAN=m
+CONFIG_PCMCIA_FMVJ18X=m
+CONFIG_PCMCIA_AXNET=m
+CONFIG_PCMCIA_PCNET=m
+CONFIG_PCMCIA_SMC91C92=m
+CONFIG_PCMCIA_XIRC2PS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_VR41XX=y
+CONFIG_SERIAL_VR41XX_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_GPIO_VR41XX=y
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_VR41XX=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_NFS_FS=m
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyVR0,19200 ide0=0x170,0x376,49 mem=16M"
diff --git a/arch/mips/configs/xway_defconfig b/arch/mips/configs/xway_defconfig
new file mode 100644
index 000000000..9abbc0deb
--- /dev/null
+++ b/arch/mips/configs/xway_defconfig
@@ -0,0 +1,156 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_GZIP is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_AIO is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_LANTIQ=y
+CONFIG_PCI_LANTIQ=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_VPE_LOADER=y
+CONFIG_NR_CPUS=2
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+CONFIG_PCI=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_COREDUMP is not set
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_BRIDGE_NETFILTER is not set
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_RAW=m
+CONFIG_BRIDGE=y
+# CONFIG_BRIDGE_IGMP_SNOOPING is not set
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_HAMRADIO=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_LANTIQ=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_NAND_XWAY=y
+CONFIG_EEPROM_93CX6=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+CONFIG_LANTIQ_ETOP=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_PHYLIB=y
+CONFIG_INTEL_XWAY_PHY=y
+CONFIG_PPP=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_ISDN=y
+CONFIG_INPUT=m
+CONFIG_INPUT_POLLDEV=m
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_LANTIQ=y
+CONFIG_SPI=y
+CONFIG_GPIO_MM_LANTIQ=y
+CONFIG_GPIO_STP_XWAY=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_LANTIQ_WDT=y
+# CONFIG_HID is not set
+# CONFIG_USB_HID is not set
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DEBUG=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_DWC2_PCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_STAGING=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+# CONFIG_JFFS2_FS_POSIX_ACL is not set
+# CONFIG_JFFS2_FS_SECURITY is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+# CONFIG_JFFS2_ZLIB is not set
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_ZLIB is not set
+CONFIG_SQUASHFS_XZ=y
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32_SARWATE=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+CONFIG_CMDLINE_BOOL=y
diff --git a/arch/mips/crypto/Makefile b/arch/mips/crypto/Makefile
new file mode 100644
index 000000000..5e4105ccc
--- /dev/null
+++ b/arch/mips/crypto/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for MIPS crypto files..
+#
+
+obj-$(CONFIG_CRYPTO_CRC32_MIPS) += crc32-mips.o
+
+obj-$(CONFIG_CRYPTO_CHACHA_MIPS) += chacha-mips.o
+chacha-mips-y := chacha-core.o chacha-glue.o
+AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots
+
+obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o
+poly1305-mips-y := poly1305-core.o poly1305-glue.o
+
+perlasm-flavour-$(CONFIG_32BIT) := o32
+perlasm-flavour-$(CONFIG_64BIT) := 64
+
+quiet_cmd_perlasm = PERLASM $@
+ cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@)
+
+$(obj)/poly1305-core.S: $(src)/poly1305-mips.pl FORCE
+ $(call if_changed,perlasm)
+
+targets += poly1305-core.S
diff --git a/arch/mips/crypto/chacha-core.S b/arch/mips/crypto/chacha-core.S
new file mode 100644
index 000000000..5755f69cf
--- /dev/null
+++ b/arch/mips/crypto/chacha-core.S
@@ -0,0 +1,497 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2016-2018 René van Dorst <opensource@vdorst.com>. All Rights Reserved.
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#define MASK_U32 0x3c
+#define CHACHA20_BLOCK_SIZE 64
+#define STACK_SIZE 32
+
+#define X0 $t0
+#define X1 $t1
+#define X2 $t2
+#define X3 $t3
+#define X4 $t4
+#define X5 $t5
+#define X6 $t6
+#define X7 $t7
+#define X8 $t8
+#define X9 $t9
+#define X10 $v1
+#define X11 $s6
+#define X12 $s5
+#define X13 $s4
+#define X14 $s3
+#define X15 $s2
+/* Use regs which are overwritten on exit for Tx so we don't leak clear data. */
+#define T0 $s1
+#define T1 $s0
+#define T(n) T ## n
+#define X(n) X ## n
+
+/* Input arguments */
+#define STATE $a0
+#define OUT $a1
+#define IN $a2
+#define BYTES $a3
+
+/* Output argument */
+/* NONCE[0] is kept in a register and not in memory.
+ * We don't want to touch original value in memory.
+ * Must be incremented every loop iteration.
+ */
+#define NONCE_0 $v0
+
+/* SAVED_X and SAVED_CA are set in the jump table.
+ * Use regs which are overwritten on exit else we don't leak clear data.
+ * They are used to handling the last bytes which are not multiple of 4.
+ */
+#define SAVED_X X15
+#define SAVED_CA $s7
+
+#define IS_UNALIGNED $s7
+
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define MSB 0
+#define LSB 3
+#define ROTx rotl
+#define ROTR(n) rotr n, 24
+#define CPU_TO_LE32(n) \
+ wsbh n; \
+ rotr n, 16;
+#else
+#define MSB 3
+#define LSB 0
+#define ROTx rotr
+#define CPU_TO_LE32(n)
+#define ROTR(n)
+#endif
+
+#define FOR_EACH_WORD(x) \
+ x( 0); \
+ x( 1); \
+ x( 2); \
+ x( 3); \
+ x( 4); \
+ x( 5); \
+ x( 6); \
+ x( 7); \
+ x( 8); \
+ x( 9); \
+ x(10); \
+ x(11); \
+ x(12); \
+ x(13); \
+ x(14); \
+ x(15);
+
+#define FOR_EACH_WORD_REV(x) \
+ x(15); \
+ x(14); \
+ x(13); \
+ x(12); \
+ x(11); \
+ x(10); \
+ x( 9); \
+ x( 8); \
+ x( 7); \
+ x( 6); \
+ x( 5); \
+ x( 4); \
+ x( 3); \
+ x( 2); \
+ x( 1); \
+ x( 0);
+
+#define PLUS_ONE_0 1
+#define PLUS_ONE_1 2
+#define PLUS_ONE_2 3
+#define PLUS_ONE_3 4
+#define PLUS_ONE_4 5
+#define PLUS_ONE_5 6
+#define PLUS_ONE_6 7
+#define PLUS_ONE_7 8
+#define PLUS_ONE_8 9
+#define PLUS_ONE_9 10
+#define PLUS_ONE_10 11
+#define PLUS_ONE_11 12
+#define PLUS_ONE_12 13
+#define PLUS_ONE_13 14
+#define PLUS_ONE_14 15
+#define PLUS_ONE_15 16
+#define PLUS_ONE(x) PLUS_ONE_ ## x
+#define _CONCAT3(a,b,c) a ## b ## c
+#define CONCAT3(a,b,c) _CONCAT3(a,b,c)
+
+#define STORE_UNALIGNED(x) \
+CONCAT3(.Lchacha_mips_xor_unaligned_, PLUS_ONE(x), _b: ;) \
+ .if (x != 12); \
+ lw T0, (x*4)(STATE); \
+ .endif; \
+ lwl T1, (x*4)+MSB ## (IN); \
+ lwr T1, (x*4)+LSB ## (IN); \
+ .if (x == 12); \
+ addu X ## x, NONCE_0; \
+ .else; \
+ addu X ## x, T0; \
+ .endif; \
+ CPU_TO_LE32(X ## x); \
+ xor X ## x, T1; \
+ swl X ## x, (x*4)+MSB ## (OUT); \
+ swr X ## x, (x*4)+LSB ## (OUT);
+
+#define STORE_ALIGNED(x) \
+CONCAT3(.Lchacha_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \
+ .if (x != 12); \
+ lw T0, (x*4)(STATE); \
+ .endif; \
+ lw T1, (x*4) ## (IN); \
+ .if (x == 12); \
+ addu X ## x, NONCE_0; \
+ .else; \
+ addu X ## x, T0; \
+ .endif; \
+ CPU_TO_LE32(X ## x); \
+ xor X ## x, T1; \
+ sw X ## x, (x*4) ## (OUT);
+
+/* Jump table macro.
+ * Used for setup and handling the last bytes, which are not multiple of 4.
+ * X15 is free to store Xn
+ * Every jumptable entry must be equal in size.
+ */
+#define JMPTBL_ALIGNED(x) \
+.Lchacha_mips_jmptbl_aligned_ ## x: ; \
+ .set noreorder; \
+ b .Lchacha_mips_xor_aligned_ ## x ## _b; \
+ .if (x == 12); \
+ addu SAVED_X, X ## x, NONCE_0; \
+ .else; \
+ addu SAVED_X, X ## x, SAVED_CA; \
+ .endif; \
+ .set reorder
+
+#define JMPTBL_UNALIGNED(x) \
+.Lchacha_mips_jmptbl_unaligned_ ## x: ; \
+ .set noreorder; \
+ b .Lchacha_mips_xor_unaligned_ ## x ## _b; \
+ .if (x == 12); \
+ addu SAVED_X, X ## x, NONCE_0; \
+ .else; \
+ addu SAVED_X, X ## x, SAVED_CA; \
+ .endif; \
+ .set reorder
+
+#define AXR(A, B, C, D, K, L, M, N, V, W, Y, Z, S) \
+ addu X(A), X(K); \
+ addu X(B), X(L); \
+ addu X(C), X(M); \
+ addu X(D), X(N); \
+ xor X(V), X(A); \
+ xor X(W), X(B); \
+ xor X(Y), X(C); \
+ xor X(Z), X(D); \
+ rotl X(V), S; \
+ rotl X(W), S; \
+ rotl X(Y), S; \
+ rotl X(Z), S;
+
+.text
+.set reorder
+.set noat
+.globl chacha_crypt_arch
+.ent chacha_crypt_arch
+chacha_crypt_arch:
+ .frame $sp, STACK_SIZE, $ra
+
+ /* Load number of rounds */
+ lw $at, 16($sp)
+
+ addiu $sp, -STACK_SIZE
+
+ /* Return bytes = 0. */
+ beqz BYTES, .Lchacha_mips_end
+
+ lw NONCE_0, 48(STATE)
+
+ /* Save s0-s7 */
+ sw $s0, 0($sp)
+ sw $s1, 4($sp)
+ sw $s2, 8($sp)
+ sw $s3, 12($sp)
+ sw $s4, 16($sp)
+ sw $s5, 20($sp)
+ sw $s6, 24($sp)
+ sw $s7, 28($sp)
+
+ /* Test IN or OUT is unaligned.
+ * IS_UNALIGNED = ( IN | OUT ) & 0x00000003
+ */
+ or IS_UNALIGNED, IN, OUT
+ andi IS_UNALIGNED, 0x3
+
+ b .Lchacha_rounds_start
+
+.align 4
+.Loop_chacha_rounds:
+ addiu IN, CHACHA20_BLOCK_SIZE
+ addiu OUT, CHACHA20_BLOCK_SIZE
+ addiu NONCE_0, 1
+
+.Lchacha_rounds_start:
+ lw X0, 0(STATE)
+ lw X1, 4(STATE)
+ lw X2, 8(STATE)
+ lw X3, 12(STATE)
+
+ lw X4, 16(STATE)
+ lw X5, 20(STATE)
+ lw X6, 24(STATE)
+ lw X7, 28(STATE)
+ lw X8, 32(STATE)
+ lw X9, 36(STATE)
+ lw X10, 40(STATE)
+ lw X11, 44(STATE)
+
+ move X12, NONCE_0
+ lw X13, 52(STATE)
+ lw X14, 56(STATE)
+ lw X15, 60(STATE)
+
+.Loop_chacha_xor_rounds:
+ addiu $at, -2
+ AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16);
+ AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12);
+ AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 8);
+ AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 7);
+ AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 16);
+ AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12);
+ AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8);
+ AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7);
+ bnez $at, .Loop_chacha_xor_rounds
+
+ addiu BYTES, -(CHACHA20_BLOCK_SIZE)
+
+ /* Is data src/dst unaligned? Jump */
+ bnez IS_UNALIGNED, .Loop_chacha_unaligned
+
+ /* Set number rounds here to fill delayslot. */
+ lw $at, (STACK_SIZE+16)($sp)
+
+ /* BYTES < 0, it has no full block. */
+ bltz BYTES, .Lchacha_mips_no_full_block_aligned
+
+ FOR_EACH_WORD_REV(STORE_ALIGNED)
+
+ /* BYTES > 0? Loop again. */
+ bgtz BYTES, .Loop_chacha_rounds
+
+ /* Place this here to fill delay slot */
+ addiu NONCE_0, 1
+
+ /* BYTES < 0? Handle last bytes */
+ bltz BYTES, .Lchacha_mips_xor_bytes
+
+.Lchacha_mips_xor_done:
+ /* Restore used registers */
+ lw $s0, 0($sp)
+ lw $s1, 4($sp)
+ lw $s2, 8($sp)
+ lw $s3, 12($sp)
+ lw $s4, 16($sp)
+ lw $s5, 20($sp)
+ lw $s6, 24($sp)
+ lw $s7, 28($sp)
+
+ /* Write NONCE_0 back to right location in state */
+ sw NONCE_0, 48(STATE)
+
+.Lchacha_mips_end:
+ addiu $sp, STACK_SIZE
+ jr $ra
+
+.Lchacha_mips_no_full_block_aligned:
+ /* Restore the offset on BYTES */
+ addiu BYTES, CHACHA20_BLOCK_SIZE
+
+ /* Get number of full WORDS */
+ andi $at, BYTES, MASK_U32
+
+ /* Load upper half of jump table addr */
+ lui T0, %hi(.Lchacha_mips_jmptbl_aligned_0)
+
+ /* Calculate lower half jump table offset */
+ ins T0, $at, 1, 6
+
+ /* Add offset to STATE */
+ addu T1, STATE, $at
+
+ /* Add lower half jump table addr */
+ addiu T0, %lo(.Lchacha_mips_jmptbl_aligned_0)
+
+ /* Read value from STATE */
+ lw SAVED_CA, 0(T1)
+
+ /* Store remaining bytecounter as negative value */
+ subu BYTES, $at, BYTES
+
+ jr T0
+
+ /* Jump table */
+ FOR_EACH_WORD(JMPTBL_ALIGNED)
+
+
+.Loop_chacha_unaligned:
+ /* Set number rounds here to fill delayslot. */
+ lw $at, (STACK_SIZE+16)($sp)
+
+ /* BYTES > 0, it has no full block. */
+ bltz BYTES, .Lchacha_mips_no_full_block_unaligned
+
+ FOR_EACH_WORD_REV(STORE_UNALIGNED)
+
+ /* BYTES > 0? Loop again. */
+ bgtz BYTES, .Loop_chacha_rounds
+
+ /* Write NONCE_0 back to right location in state */
+ sw NONCE_0, 48(STATE)
+
+ .set noreorder
+ /* Fall through to byte handling */
+ bgez BYTES, .Lchacha_mips_xor_done
+.Lchacha_mips_xor_unaligned_0_b:
+.Lchacha_mips_xor_aligned_0_b:
+ /* Place this here to fill delay slot */
+ addiu NONCE_0, 1
+ .set reorder
+
+.Lchacha_mips_xor_bytes:
+ addu IN, $at
+ addu OUT, $at
+ /* First byte */
+ lbu T1, 0(IN)
+ addiu $at, BYTES, 1
+ CPU_TO_LE32(SAVED_X)
+ ROTR(SAVED_X)
+ xor T1, SAVED_X
+ sb T1, 0(OUT)
+ beqz $at, .Lchacha_mips_xor_done
+ /* Second byte */
+ lbu T1, 1(IN)
+ addiu $at, BYTES, 2
+ ROTx SAVED_X, 8
+ xor T1, SAVED_X
+ sb T1, 1(OUT)
+ beqz $at, .Lchacha_mips_xor_done
+ /* Third byte */
+ lbu T1, 2(IN)
+ ROTx SAVED_X, 8
+ xor T1, SAVED_X
+ sb T1, 2(OUT)
+ b .Lchacha_mips_xor_done
+
+.Lchacha_mips_no_full_block_unaligned:
+ /* Restore the offset on BYTES */
+ addiu BYTES, CHACHA20_BLOCK_SIZE
+
+ /* Get number of full WORDS */
+ andi $at, BYTES, MASK_U32
+
+ /* Load upper half of jump table addr */
+ lui T0, %hi(.Lchacha_mips_jmptbl_unaligned_0)
+
+ /* Calculate lower half jump table offset */
+ ins T0, $at, 1, 6
+
+ /* Add offset to STATE */
+ addu T1, STATE, $at
+
+ /* Add lower half jump table addr */
+ addiu T0, %lo(.Lchacha_mips_jmptbl_unaligned_0)
+
+ /* Read value from STATE */
+ lw SAVED_CA, 0(T1)
+
+ /* Store remaining bytecounter as negative value */
+ subu BYTES, $at, BYTES
+
+ jr T0
+
+ /* Jump table */
+ FOR_EACH_WORD(JMPTBL_UNALIGNED)
+.end chacha_crypt_arch
+.set at
+
+/* Input arguments
+ * STATE $a0
+ * OUT $a1
+ * NROUND $a2
+ */
+
+#undef X12
+#undef X13
+#undef X14
+#undef X15
+
+#define X12 $a3
+#define X13 $at
+#define X14 $v0
+#define X15 STATE
+
+.set noat
+.globl hchacha_block_arch
+.ent hchacha_block_arch
+hchacha_block_arch:
+ .frame $sp, STACK_SIZE, $ra
+
+ addiu $sp, -STACK_SIZE
+
+ /* Save X11(s6) */
+ sw X11, 0($sp)
+
+ lw X0, 0(STATE)
+ lw X1, 4(STATE)
+ lw X2, 8(STATE)
+ lw X3, 12(STATE)
+ lw X4, 16(STATE)
+ lw X5, 20(STATE)
+ lw X6, 24(STATE)
+ lw X7, 28(STATE)
+ lw X8, 32(STATE)
+ lw X9, 36(STATE)
+ lw X10, 40(STATE)
+ lw X11, 44(STATE)
+ lw X12, 48(STATE)
+ lw X13, 52(STATE)
+ lw X14, 56(STATE)
+ lw X15, 60(STATE)
+
+.Loop_hchacha_xor_rounds:
+ addiu $a2, -2
+ AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16);
+ AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12);
+ AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 8);
+ AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 7);
+ AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 16);
+ AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12);
+ AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8);
+ AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7);
+ bnez $a2, .Loop_hchacha_xor_rounds
+
+ /* Restore used register */
+ lw X11, 0($sp)
+
+ sw X0, 0(OUT)
+ sw X1, 4(OUT)
+ sw X2, 8(OUT)
+ sw X3, 12(OUT)
+ sw X12, 16(OUT)
+ sw X13, 20(OUT)
+ sw X14, 24(OUT)
+ sw X15, 28(OUT)
+
+ addiu $sp, STACK_SIZE
+ jr $ra
+.end hchacha_block_arch
+.set at
diff --git a/arch/mips/crypto/chacha-glue.c b/arch/mips/crypto/chacha-glue.c
new file mode 100644
index 000000000..d1fd23e6e
--- /dev/null
+++ b/arch/mips/crypto/chacha-glue.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MIPS accelerated ChaCha and XChaCha stream ciphers,
+ * including ChaCha20 (RFC7539)
+ *
+ * Copyright (C) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <asm/byteorder.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/chacha.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+asmlinkage void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds);
+EXPORT_SYMBOL(chacha_crypt_arch);
+
+asmlinkage void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds);
+EXPORT_SYMBOL(hchacha_block_arch);
+
+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
+{
+ chacha_init_generic(state, key, iv);
+}
+EXPORT_SYMBOL(chacha_init_arch);
+
+static int chacha_mips_stream_xor(struct skcipher_request *req,
+ const struct chacha_ctx *ctx, const u8 *iv)
+{
+ struct skcipher_walk walk;
+ u32 state[16];
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ chacha_init_generic(state, ctx->key, iv);
+
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
+
+ chacha_crypt(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes, ctx->nrounds);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+
+ return err;
+}
+
+static int chacha_mips(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return chacha_mips_stream_xor(req, ctx, req->iv);
+}
+
+static int xchacha_mips(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct chacha_ctx subctx;
+ u32 state[16];
+ u8 real_iv[16];
+
+ chacha_init_generic(state, ctx->key, req->iv);
+
+ hchacha_block(state, subctx.key, ctx->nrounds);
+ subctx.nrounds = ctx->nrounds;
+
+ memcpy(&real_iv[0], req->iv + 24, 8);
+ memcpy(&real_iv[8], req->iv + 16, 8);
+ return chacha_mips_stream_xor(req, &subctx, real_iv);
+}
+
+static struct skcipher_alg algs[] = {
+ {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-mips",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = chacha_mips,
+ .decrypt = chacha_mips,
+ }, {
+ .base.cra_name = "xchacha20",
+ .base.cra_driver_name = "xchacha20-mips",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = xchacha_mips,
+ .decrypt = xchacha_mips,
+ }, {
+ .base.cra_name = "xchacha12",
+ .base.cra_driver_name = "xchacha12-mips",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha12_setkey,
+ .encrypt = xchacha_mips,
+ .decrypt = xchacha_mips,
+ }
+};
+
+static int __init chacha_simd_mod_init(void)
+{
+ return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ?
+ crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0;
+}
+
+static void __exit chacha_simd_mod_fini(void)
+{
+ if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER))
+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
+}
+
+module_init(chacha_simd_mod_init);
+module_exit(chacha_simd_mod_fini);
+
+MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (MIPS accelerated)");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("chacha20");
+MODULE_ALIAS_CRYPTO("chacha20-mips");
+MODULE_ALIAS_CRYPTO("xchacha20");
+MODULE_ALIAS_CRYPTO("xchacha20-mips");
+MODULE_ALIAS_CRYPTO("xchacha12");
+MODULE_ALIAS_CRYPTO("xchacha12-mips");
diff --git a/arch/mips/crypto/crc32-mips.c b/arch/mips/crypto/crc32-mips.c
new file mode 100644
index 000000000..faa88a6a7
--- /dev/null
+++ b/arch/mips/crypto/crc32-mips.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * crc32-mips.c - CRC32 and CRC32C using optional MIPSr6 instructions
+ *
+ * Module based on arm64/crypto/crc32-arm.c
+ *
+ * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org>
+ * Copyright (C) 2018 MIPS Tech, LLC
+ */
+
+#include <linux/unaligned/access_ok.h>
+#include <linux/cpufeature.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <asm/mipsregs.h>
+
+#include <crypto/internal/hash.h>
+
+enum crc_op_size {
+ b, h, w, d,
+};
+
+enum crc_type {
+ crc32,
+ crc32c,
+};
+
+#ifndef TOOLCHAIN_SUPPORTS_CRC
+#define _ASM_MACRO_CRC32(OP, SZ, TYPE) \
+_ASM_MACRO_3R(OP, rt, rs, rt2, \
+ ".ifnc \\rt, \\rt2\n\t" \
+ ".error \"invalid operands \\\"" #OP " \\rt,\\rs,\\rt2\\\"\"\n\t" \
+ ".endif\n\t" \
+ _ASM_INSN_IF_MIPS(0x7c00000f | (__rt << 16) | (__rs << 21) | \
+ ((SZ) << 6) | ((TYPE) << 8)) \
+ _ASM_INSN32_IF_MM(0x00000030 | (__rs << 16) | (__rt << 21) | \
+ ((SZ) << 14) | ((TYPE) << 3)))
+_ASM_MACRO_CRC32(crc32b, 0, 0);
+_ASM_MACRO_CRC32(crc32h, 1, 0);
+_ASM_MACRO_CRC32(crc32w, 2, 0);
+_ASM_MACRO_CRC32(crc32d, 3, 0);
+_ASM_MACRO_CRC32(crc32cb, 0, 1);
+_ASM_MACRO_CRC32(crc32ch, 1, 1);
+_ASM_MACRO_CRC32(crc32cw, 2, 1);
+_ASM_MACRO_CRC32(crc32cd, 3, 1);
+#define _ASM_SET_CRC ""
+#else /* !TOOLCHAIN_SUPPORTS_CRC */
+#define _ASM_SET_CRC ".set\tcrc\n\t"
+#endif
+
+#define _CRC32(crc, value, size, type) \
+do { \
+ __asm__ __volatile__( \
+ ".set push\n\t" \
+ _ASM_SET_CRC \
+ #type #size " %0, %1, %0\n\t" \
+ ".set pop" \
+ : "+r" (crc) \
+ : "r" (value)); \
+} while (0)
+
+#define CRC32(crc, value, size) \
+ _CRC32(crc, value, size, crc32)
+
+#define CRC32C(crc, value, size) \
+ _CRC32(crc, value, size, crc32c)
+
+static u32 crc32_mips_le_hw(u32 crc_, const u8 *p, unsigned int len)
+{
+ u32 crc = crc_;
+
+#ifdef CONFIG_64BIT
+ while (len >= sizeof(u64)) {
+ u64 value = get_unaligned_le64(p);
+
+ CRC32(crc, value, d);
+ p += sizeof(u64);
+ len -= sizeof(u64);
+ }
+
+ if (len & sizeof(u32)) {
+#else /* !CONFIG_64BIT */
+ while (len >= sizeof(u32)) {
+#endif
+ u32 value = get_unaligned_le32(p);
+
+ CRC32(crc, value, w);
+ p += sizeof(u32);
+ len -= sizeof(u32);
+ }
+
+ if (len & sizeof(u16)) {
+ u16 value = get_unaligned_le16(p);
+
+ CRC32(crc, value, h);
+ p += sizeof(u16);
+ }
+
+ if (len & sizeof(u8)) {
+ u8 value = *p++;
+
+ CRC32(crc, value, b);
+ }
+
+ return crc;
+}
+
+static u32 crc32c_mips_le_hw(u32 crc_, const u8 *p, unsigned int len)
+{
+ u32 crc = crc_;
+
+#ifdef CONFIG_64BIT
+ while (len >= sizeof(u64)) {
+ u64 value = get_unaligned_le64(p);
+
+ CRC32C(crc, value, d);
+ p += sizeof(u64);
+ len -= sizeof(u64);
+ }
+
+ if (len & sizeof(u32)) {
+#else /* !CONFIG_64BIT */
+ while (len >= sizeof(u32)) {
+#endif
+ u32 value = get_unaligned_le32(p);
+
+ CRC32C(crc, value, w);
+ p += sizeof(u32);
+ len -= sizeof(u32);
+ }
+
+ if (len & sizeof(u16)) {
+ u16 value = get_unaligned_le16(p);
+
+ CRC32C(crc, value, h);
+ p += sizeof(u16);
+ }
+
+ if (len & sizeof(u8)) {
+ u8 value = *p++;
+
+ CRC32C(crc, value, b);
+ }
+ return crc;
+}
+
+#define CHKSUM_BLOCK_SIZE 1
+#define CHKSUM_DIGEST_SIZE 4
+
+struct chksum_ctx {
+ u32 key;
+};
+
+struct chksum_desc_ctx {
+ u32 crc;
+};
+
+static int chksum_init(struct shash_desc *desc)
+{
+ struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ ctx->crc = mctx->key;
+
+ return 0;
+}
+
+/*
+ * Setting the seed allows arbitrary accumulators and flexible XOR policy
+ * If your algorithm starts with ~0, then XOR with ~0 before you set
+ * the seed.
+ */
+static int chksum_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
+
+ if (keylen != sizeof(mctx->key))
+ return -EINVAL;
+ mctx->key = get_unaligned_le32(key);
+ return 0;
+}
+
+static int chksum_update(struct shash_desc *desc, const u8 *data,
+ unsigned int length)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ ctx->crc = crc32_mips_le_hw(ctx->crc, data, length);
+ return 0;
+}
+
+static int chksumc_update(struct shash_desc *desc, const u8 *data,
+ unsigned int length)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ ctx->crc = crc32c_mips_le_hw(ctx->crc, data, length);
+ return 0;
+}
+
+static int chksum_final(struct shash_desc *desc, u8 *out)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ put_unaligned_le32(ctx->crc, out);
+ return 0;
+}
+
+static int chksumc_final(struct shash_desc *desc, u8 *out)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ put_unaligned_le32(~ctx->crc, out);
+ return 0;
+}
+
+static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
+{
+ put_unaligned_le32(crc32_mips_le_hw(crc, data, len), out);
+ return 0;
+}
+
+static int __chksumc_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
+{
+ put_unaligned_le32(~crc32c_mips_le_hw(crc, data, len), out);
+ return 0;
+}
+
+static int chksum_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ return __chksum_finup(ctx->crc, data, len, out);
+}
+
+static int chksumc_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ return __chksumc_finup(ctx->crc, data, len, out);
+}
+
+static int chksum_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int length, u8 *out)
+{
+ struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
+
+ return __chksum_finup(mctx->key, data, length, out);
+}
+
+static int chksumc_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int length, u8 *out)
+{
+ struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
+
+ return __chksumc_finup(mctx->key, data, length, out);
+}
+
+static int chksum_cra_init(struct crypto_tfm *tfm)
+{
+ struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
+
+ mctx->key = ~0;
+ return 0;
+}
+
+static struct shash_alg crc32_alg = {
+ .digestsize = CHKSUM_DIGEST_SIZE,
+ .setkey = chksum_setkey,
+ .init = chksum_init,
+ .update = chksum_update,
+ .final = chksum_final,
+ .finup = chksum_finup,
+ .digest = chksum_digest,
+ .descsize = sizeof(struct chksum_desc_ctx),
+ .base = {
+ .cra_name = "crc32",
+ .cra_driver_name = "crc32-mips-hw",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
+ .cra_blocksize = CHKSUM_BLOCK_SIZE,
+ .cra_alignmask = 0,
+ .cra_ctxsize = sizeof(struct chksum_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = chksum_cra_init,
+ }
+};
+
+static struct shash_alg crc32c_alg = {
+ .digestsize = CHKSUM_DIGEST_SIZE,
+ .setkey = chksum_setkey,
+ .init = chksum_init,
+ .update = chksumc_update,
+ .final = chksumc_final,
+ .finup = chksumc_finup,
+ .digest = chksumc_digest,
+ .descsize = sizeof(struct chksum_desc_ctx),
+ .base = {
+ .cra_name = "crc32c",
+ .cra_driver_name = "crc32c-mips-hw",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
+ .cra_blocksize = CHKSUM_BLOCK_SIZE,
+ .cra_alignmask = 0,
+ .cra_ctxsize = sizeof(struct chksum_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = chksum_cra_init,
+ }
+};
+
+static int __init crc32_mod_init(void)
+{
+ int err;
+
+ err = crypto_register_shash(&crc32_alg);
+
+ if (err)
+ return err;
+
+ err = crypto_register_shash(&crc32c_alg);
+
+ if (err) {
+ crypto_unregister_shash(&crc32_alg);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit crc32_mod_exit(void)
+{
+ crypto_unregister_shash(&crc32_alg);
+ crypto_unregister_shash(&crc32c_alg);
+}
+
+MODULE_AUTHOR("Marcin Nowakowski <marcin.nowakowski@mips.com");
+MODULE_DESCRIPTION("CRC32 and CRC32C using optional MIPS instructions");
+MODULE_LICENSE("GPL v2");
+
+module_cpu_feature_match(MIPS_CRC32, crc32_mod_init);
+module_exit(crc32_mod_exit);
diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c
new file mode 100644
index 000000000..bc6110fb9
--- /dev/null
+++ b/arch/mips/crypto/poly1305-glue.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OpenSSL/Cryptogams accelerated Poly1305 transform for MIPS
+ *
+ * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/poly1305.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+asmlinkage void poly1305_init_mips(void *state, const u8 *key);
+asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
+asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce);
+
+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
+{
+ poly1305_init_mips(&dctx->h, key);
+ dctx->s[0] = get_unaligned_le32(key + 16);
+ dctx->s[1] = get_unaligned_le32(key + 20);
+ dctx->s[2] = get_unaligned_le32(key + 24);
+ dctx->s[3] = get_unaligned_le32(key + 28);
+ dctx->buflen = 0;
+}
+EXPORT_SYMBOL(poly1305_init_arch);
+
+static int mips_poly1305_init(struct shash_desc *desc)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ dctx->buflen = 0;
+ dctx->rset = 0;
+ dctx->sset = false;
+
+ return 0;
+}
+
+static void mips_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
+ u32 len, u32 hibit)
+{
+ if (unlikely(!dctx->sset)) {
+ if (!dctx->rset) {
+ poly1305_init_mips(&dctx->h, src);
+ src += POLY1305_BLOCK_SIZE;
+ len -= POLY1305_BLOCK_SIZE;
+ dctx->rset = 1;
+ }
+ if (len >= POLY1305_BLOCK_SIZE) {
+ dctx->s[0] = get_unaligned_le32(src + 0);
+ dctx->s[1] = get_unaligned_le32(src + 4);
+ dctx->s[2] = get_unaligned_le32(src + 8);
+ dctx->s[3] = get_unaligned_le32(src + 12);
+ src += POLY1305_BLOCK_SIZE;
+ len -= POLY1305_BLOCK_SIZE;
+ dctx->sset = true;
+ }
+ if (len < POLY1305_BLOCK_SIZE)
+ return;
+ }
+
+ len &= ~(POLY1305_BLOCK_SIZE - 1);
+
+ poly1305_blocks_mips(&dctx->h, src, len, hibit);
+}
+
+static int mips_poly1305_update(struct shash_desc *desc, const u8 *src,
+ unsigned int len)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (unlikely(dctx->buflen)) {
+ u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen);
+
+ memcpy(dctx->buf + dctx->buflen, src, bytes);
+ src += bytes;
+ len -= bytes;
+ dctx->buflen += bytes;
+
+ if (dctx->buflen == POLY1305_BLOCK_SIZE) {
+ mips_poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 1);
+ dctx->buflen = 0;
+ }
+ }
+
+ if (likely(len >= POLY1305_BLOCK_SIZE)) {
+ mips_poly1305_blocks(dctx, src, len, 1);
+ src += round_down(len, POLY1305_BLOCK_SIZE);
+ len %= POLY1305_BLOCK_SIZE;
+ }
+
+ if (unlikely(len)) {
+ dctx->buflen = len;
+ memcpy(dctx->buf, src, len);
+ }
+ return 0;
+}
+
+void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
+ unsigned int nbytes)
+{
+ if (unlikely(dctx->buflen)) {
+ u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen);
+
+ memcpy(dctx->buf + dctx->buflen, src, bytes);
+ src += bytes;
+ nbytes -= bytes;
+ dctx->buflen += bytes;
+
+ if (dctx->buflen == POLY1305_BLOCK_SIZE) {
+ poly1305_blocks_mips(&dctx->h, dctx->buf,
+ POLY1305_BLOCK_SIZE, 1);
+ dctx->buflen = 0;
+ }
+ }
+
+ if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
+ unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
+
+ poly1305_blocks_mips(&dctx->h, src, len, 1);
+ src += len;
+ nbytes %= POLY1305_BLOCK_SIZE;
+ }
+
+ if (unlikely(nbytes)) {
+ dctx->buflen = nbytes;
+ memcpy(dctx->buf, src, nbytes);
+ }
+}
+EXPORT_SYMBOL(poly1305_update_arch);
+
+void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
+{
+ if (unlikely(dctx->buflen)) {
+ dctx->buf[dctx->buflen++] = 1;
+ memset(dctx->buf + dctx->buflen, 0,
+ POLY1305_BLOCK_SIZE - dctx->buflen);
+ poly1305_blocks_mips(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
+ }
+
+ poly1305_emit_mips(&dctx->h, dst, dctx->s);
+ *dctx = (struct poly1305_desc_ctx){};
+}
+EXPORT_SYMBOL(poly1305_final_arch);
+
+static int mips_poly1305_final(struct shash_desc *desc, u8 *dst)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (unlikely(!dctx->sset))
+ return -ENOKEY;
+
+ poly1305_final_arch(dctx, dst);
+ return 0;
+}
+
+static struct shash_alg mips_poly1305_alg = {
+ .init = mips_poly1305_init,
+ .update = mips_poly1305_update,
+ .final = mips_poly1305_final,
+ .digestsize = POLY1305_DIGEST_SIZE,
+ .descsize = sizeof(struct poly1305_desc_ctx),
+
+ .base.cra_name = "poly1305",
+ .base.cra_driver_name = "poly1305-mips",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = POLY1305_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+};
+
+static int __init mips_poly1305_mod_init(void)
+{
+ return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
+ crypto_register_shash(&mips_poly1305_alg) : 0;
+}
+
+static void __exit mips_poly1305_mod_exit(void)
+{
+ if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
+ crypto_unregister_shash(&mips_poly1305_alg);
+}
+
+module_init(mips_poly1305_mod_init);
+module_exit(mips_poly1305_mod_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("poly1305");
+MODULE_ALIAS_CRYPTO("poly1305-mips");
diff --git a/arch/mips/crypto/poly1305-mips.pl b/arch/mips/crypto/poly1305-mips.pl
new file mode 100644
index 000000000..b05bab884
--- /dev/null
+++ b/arch/mips/crypto/poly1305-mips.pl
@@ -0,0 +1,1273 @@
+#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-1.0+ OR BSD-3-Clause
+#
+# ====================================================================
+# Written by Andy Polyakov, @dot-asm, originally for the OpenSSL
+# project.
+# ====================================================================
+
+# Poly1305 hash for MIPS.
+#
+# May 2016
+#
+# Numbers are cycles per processed byte with poly1305_blocks alone.
+#
+# IALU/gcc
+# R1x000 ~5.5/+130% (big-endian)
+# Octeon II 2.50/+70% (little-endian)
+#
+# March 2019
+#
+# Add 32-bit code path.
+#
+# October 2019
+#
+# Modulo-scheduling reduction allows to omit dependency chain at the
+# end of inner loop and improve performance. Also optimize MIPS32R2
+# code path for MIPS 1004K core. Per René von Dorst's suggestions.
+#
+# IALU/gcc
+# R1x000 ~9.8/? (big-endian)
+# Octeon II 3.65/+140% (little-endian)
+# MT7621/1004K 4.75/? (little-endian)
+#
+######################################################################
+# There is a number of MIPS ABI in use, O32 and N32/64 are most
+# widely used. Then there is a new contender: NUBI. It appears that if
+# one picks the latter, it's possible to arrange code in ABI neutral
+# manner. Therefore let's stick to NUBI register layout:
+#
+($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25));
+($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23));
+($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31));
+#
+# The return value is placed in $a0. Following coding rules facilitate
+# interoperability:
+#
+# - never ever touch $tp, "thread pointer", former $gp [o32 can be
+# excluded from the rule, because it's specified volatile];
+# - copy return value to $t0, former $v0 [or to $a0 if you're adapting
+# old code];
+# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary;
+#
+# For reference here is register layout for N32/64 MIPS ABIs:
+#
+# ($zero,$at,$v0,$v1)=map("\$$_",(0..3));
+# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
+# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
+# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
+#
+# <appro@openssl.org>
+#
+######################################################################
+
+$flavour = shift || "64"; # supported flavours are o32,n32,64,nubi32,nubi64
+
+$v0 = ($flavour =~ /nubi/i) ? $a0 : $t0;
+
+if ($flavour =~ /64|n32/i) {{{
+######################################################################
+# 64-bit code path
+#
+
+my ($ctx,$inp,$len,$padbit) = ($a0,$a1,$a2,$a3);
+my ($in0,$in1,$tmp0,$tmp1,$tmp2,$tmp3,$tmp4) = ($a4,$a5,$a6,$a7,$at,$t0,$t1);
+
+$code.=<<___;
+#if (defined(_MIPS_ARCH_MIPS64R3) || defined(_MIPS_ARCH_MIPS64R5) || \\
+ defined(_MIPS_ARCH_MIPS64R6)) \\
+ && !defined(_MIPS_ARCH_MIPS64R2)
+# define _MIPS_ARCH_MIPS64R2
+#endif
+
+#if defined(_MIPS_ARCH_MIPS64R6)
+# define dmultu(rs,rt)
+# define mflo(rd,rs,rt) dmulu rd,rs,rt
+# define mfhi(rd,rs,rt) dmuhu rd,rs,rt
+#else
+# define dmultu(rs,rt) dmultu rs,rt
+# define mflo(rd,rs,rt) mflo rd
+# define mfhi(rd,rs,rt) mfhi rd
+#endif
+
+#ifdef __KERNEL__
+# define poly1305_init poly1305_init_mips
+# define poly1305_blocks poly1305_blocks_mips
+# define poly1305_emit poly1305_emit_mips
+#endif
+
+#if defined(__MIPSEB__) && !defined(MIPSEB)
+# define MIPSEB
+#endif
+
+#ifdef MIPSEB
+# define MSB 0
+# define LSB 7
+#else
+# define MSB 7
+# define LSB 0
+#endif
+
+.text
+.set noat
+.set noreorder
+
+.align 5
+.globl poly1305_init
+.ent poly1305_init
+poly1305_init:
+ .frame $sp,0,$ra
+ .set reorder
+
+ sd $zero,0($ctx)
+ sd $zero,8($ctx)
+ sd $zero,16($ctx)
+
+ beqz $inp,.Lno_key
+
+#if defined(_MIPS_ARCH_MIPS64R6)
+ andi $tmp0,$inp,7 # $inp % 8
+ dsubu $inp,$inp,$tmp0 # align $inp
+ sll $tmp0,$tmp0,3 # byte to bit offset
+ ld $in0,0($inp)
+ ld $in1,8($inp)
+ beqz $tmp0,.Laligned_key
+ ld $tmp2,16($inp)
+
+ subu $tmp1,$zero,$tmp0
+# ifdef MIPSEB
+ dsllv $in0,$in0,$tmp0
+ dsrlv $tmp3,$in1,$tmp1
+ dsllv $in1,$in1,$tmp0
+ dsrlv $tmp2,$tmp2,$tmp1
+# else
+ dsrlv $in0,$in0,$tmp0
+ dsllv $tmp3,$in1,$tmp1
+ dsrlv $in1,$in1,$tmp0
+ dsllv $tmp2,$tmp2,$tmp1
+# endif
+ or $in0,$in0,$tmp3
+ or $in1,$in1,$tmp2
+.Laligned_key:
+#else
+ ldl $in0,0+MSB($inp)
+ ldl $in1,8+MSB($inp)
+ ldr $in0,0+LSB($inp)
+ ldr $in1,8+LSB($inp)
+#endif
+#ifdef MIPSEB
+# if defined(_MIPS_ARCH_MIPS64R2)
+ dsbh $in0,$in0 # byte swap
+ dsbh $in1,$in1
+ dshd $in0,$in0
+ dshd $in1,$in1
+# else
+ ori $tmp0,$zero,0xFF
+ dsll $tmp2,$tmp0,32
+ or $tmp0,$tmp2 # 0x000000FF000000FF
+
+ and $tmp1,$in0,$tmp0 # byte swap
+ and $tmp3,$in1,$tmp0
+ dsrl $tmp2,$in0,24
+ dsrl $tmp4,$in1,24
+ dsll $tmp1,24
+ dsll $tmp3,24
+ and $tmp2,$tmp0
+ and $tmp4,$tmp0
+ dsll $tmp0,8 # 0x0000FF000000FF00
+ or $tmp1,$tmp2
+ or $tmp3,$tmp4
+ and $tmp2,$in0,$tmp0
+ and $tmp4,$in1,$tmp0
+ dsrl $in0,8
+ dsrl $in1,8
+ dsll $tmp2,8
+ dsll $tmp4,8
+ and $in0,$tmp0
+ and $in1,$tmp0
+ or $tmp1,$tmp2
+ or $tmp3,$tmp4
+ or $in0,$tmp1
+ or $in1,$tmp3
+ dsrl $tmp1,$in0,32
+ dsrl $tmp3,$in1,32
+ dsll $in0,32
+ dsll $in1,32
+ or $in0,$tmp1
+ or $in1,$tmp3
+# endif
+#endif
+ li $tmp0,1
+ dsll $tmp0,32 # 0x0000000100000000
+ daddiu $tmp0,-63 # 0x00000000ffffffc1
+ dsll $tmp0,28 # 0x0ffffffc10000000
+ daddiu $tmp0,-1 # 0x0ffffffc0fffffff
+
+ and $in0,$tmp0
+ daddiu $tmp0,-3 # 0x0ffffffc0ffffffc
+ and $in1,$tmp0
+
+ sd $in0,24($ctx)
+ dsrl $tmp0,$in1,2
+ sd $in1,32($ctx)
+ daddu $tmp0,$in1 # s1 = r1 + (r1 >> 2)
+ sd $tmp0,40($ctx)
+
+.Lno_key:
+ li $v0,0 # return 0
+ jr $ra
+.end poly1305_init
+___
+{
+my $SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? "0x0003f000" : "0x00030000";
+
+my ($h0,$h1,$h2,$r0,$r1,$rs1,$d0,$d1,$d2) =
+ ($s0,$s1,$s2,$s3,$s4,$s5,$in0,$in1,$t2);
+my ($shr,$shl) = ($s6,$s7); # used on R6
+
+$code.=<<___;
+.align 5
+.globl poly1305_blocks
+.ent poly1305_blocks
+poly1305_blocks:
+ .set noreorder
+ dsrl $len,4 # number of complete blocks
+ bnez $len,poly1305_blocks_internal
+ nop
+ jr $ra
+ nop
+.end poly1305_blocks
+
+.align 5
+.ent poly1305_blocks_internal
+poly1305_blocks_internal:
+ .set noreorder
+#if defined(_MIPS_ARCH_MIPS64R6)
+ .frame $sp,8*8,$ra
+ .mask $SAVED_REGS_MASK|0x000c0000,-8
+ dsubu $sp,8*8
+ sd $s7,56($sp)
+ sd $s6,48($sp)
+#else
+ .frame $sp,6*8,$ra
+ .mask $SAVED_REGS_MASK,-8
+ dsubu $sp,6*8
+#endif
+ sd $s5,40($sp)
+ sd $s4,32($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue
+ sd $s3,24($sp)
+ sd $s2,16($sp)
+ sd $s1,8($sp)
+ sd $s0,0($sp)
+___
+$code.=<<___;
+ .set reorder
+
+#if defined(_MIPS_ARCH_MIPS64R6)
+ andi $shr,$inp,7
+ dsubu $inp,$inp,$shr # align $inp
+ sll $shr,$shr,3 # byte to bit offset
+ subu $shl,$zero,$shr
+#endif
+
+ ld $h0,0($ctx) # load hash value
+ ld $h1,8($ctx)
+ ld $h2,16($ctx)
+
+ ld $r0,24($ctx) # load key
+ ld $r1,32($ctx)
+ ld $rs1,40($ctx)
+
+ dsll $len,4
+ daddu $len,$inp # end of buffer
+ b .Loop
+
+.align 4
+.Loop:
+#if defined(_MIPS_ARCH_MIPS64R6)
+ ld $in0,0($inp) # load input
+ ld $in1,8($inp)
+ beqz $shr,.Laligned_inp
+
+ ld $tmp2,16($inp)
+# ifdef MIPSEB
+ dsllv $in0,$in0,$shr
+ dsrlv $tmp3,$in1,$shl
+ dsllv $in1,$in1,$shr
+ dsrlv $tmp2,$tmp2,$shl
+# else
+ dsrlv $in0,$in0,$shr
+ dsllv $tmp3,$in1,$shl
+ dsrlv $in1,$in1,$shr
+ dsllv $tmp2,$tmp2,$shl
+# endif
+ or $in0,$in0,$tmp3
+ or $in1,$in1,$tmp2
+.Laligned_inp:
+#else
+ ldl $in0,0+MSB($inp) # load input
+ ldl $in1,8+MSB($inp)
+ ldr $in0,0+LSB($inp)
+ ldr $in1,8+LSB($inp)
+#endif
+ daddiu $inp,16
+#ifdef MIPSEB
+# if defined(_MIPS_ARCH_MIPS64R2)
+ dsbh $in0,$in0 # byte swap
+ dsbh $in1,$in1
+ dshd $in0,$in0
+ dshd $in1,$in1
+# else
+ ori $tmp0,$zero,0xFF
+ dsll $tmp2,$tmp0,32
+ or $tmp0,$tmp2 # 0x000000FF000000FF
+
+ and $tmp1,$in0,$tmp0 # byte swap
+ and $tmp3,$in1,$tmp0
+ dsrl $tmp2,$in0,24
+ dsrl $tmp4,$in1,24
+ dsll $tmp1,24
+ dsll $tmp3,24
+ and $tmp2,$tmp0
+ and $tmp4,$tmp0
+ dsll $tmp0,8 # 0x0000FF000000FF00
+ or $tmp1,$tmp2
+ or $tmp3,$tmp4
+ and $tmp2,$in0,$tmp0
+ and $tmp4,$in1,$tmp0
+ dsrl $in0,8
+ dsrl $in1,8
+ dsll $tmp2,8
+ dsll $tmp4,8
+ and $in0,$tmp0
+ and $in1,$tmp0
+ or $tmp1,$tmp2
+ or $tmp3,$tmp4
+ or $in0,$tmp1
+ or $in1,$tmp3
+ dsrl $tmp1,$in0,32
+ dsrl $tmp3,$in1,32
+ dsll $in0,32
+ dsll $in1,32
+ or $in0,$tmp1
+ or $in1,$tmp3
+# endif
+#endif
+ dsrl $tmp1,$h2,2 # modulo-scheduled reduction
+ andi $h2,$h2,3
+ dsll $tmp0,$tmp1,2
+
+ daddu $d0,$h0,$in0 # accumulate input
+ daddu $tmp1,$tmp0
+ sltu $tmp0,$d0,$h0
+ daddu $d0,$d0,$tmp1 # ... and residue
+ sltu $tmp1,$d0,$tmp1
+ daddu $d1,$h1,$in1
+ daddu $tmp0,$tmp1
+ sltu $tmp1,$d1,$h1
+ daddu $d1,$tmp0
+
+ dmultu ($r0,$d0) # h0*r0
+ daddu $d2,$h2,$padbit
+ sltu $tmp0,$d1,$tmp0
+ mflo ($h0,$r0,$d0)
+ mfhi ($h1,$r0,$d0)
+
+ dmultu ($rs1,$d1) # h1*5*r1
+ daddu $d2,$tmp1
+ daddu $d2,$tmp0
+ mflo ($tmp0,$rs1,$d1)
+ mfhi ($tmp1,$rs1,$d1)
+
+ dmultu ($r1,$d0) # h0*r1
+ mflo ($tmp2,$r1,$d0)
+ mfhi ($h2,$r1,$d0)
+ daddu $h0,$tmp0
+ daddu $h1,$tmp1
+ sltu $tmp0,$h0,$tmp0
+
+ dmultu ($r0,$d1) # h1*r0
+ daddu $h1,$tmp0
+ daddu $h1,$tmp2
+ mflo ($tmp0,$r0,$d1)
+ mfhi ($tmp1,$r0,$d1)
+
+ dmultu ($rs1,$d2) # h2*5*r1
+ sltu $tmp2,$h1,$tmp2
+ daddu $h2,$tmp2
+ mflo ($tmp2,$rs1,$d2)
+
+ dmultu ($r0,$d2) # h2*r0
+ daddu $h1,$tmp0
+ daddu $h2,$tmp1
+ mflo ($tmp3,$r0,$d2)
+ sltu $tmp0,$h1,$tmp0
+ daddu $h2,$tmp0
+
+ daddu $h1,$tmp2
+ sltu $tmp2,$h1,$tmp2
+ daddu $h2,$tmp2
+ daddu $h2,$tmp3
+
+ bne $inp,$len,.Loop
+
+ sd $h0,0($ctx) # store hash value
+ sd $h1,8($ctx)
+ sd $h2,16($ctx)
+
+ .set noreorder
+#if defined(_MIPS_ARCH_MIPS64R6)
+ ld $s7,56($sp)
+ ld $s6,48($sp)
+#endif
+ ld $s5,40($sp) # epilogue
+ ld $s4,32($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi epilogue
+ ld $s3,24($sp)
+ ld $s2,16($sp)
+ ld $s1,8($sp)
+ ld $s0,0($sp)
+___
+$code.=<<___;
+ jr $ra
+#if defined(_MIPS_ARCH_MIPS64R6)
+ daddu $sp,8*8
+#else
+ daddu $sp,6*8
+#endif
+.end poly1305_blocks_internal
+___
+}
+{
+my ($ctx,$mac,$nonce) = ($a0,$a1,$a2);
+
+$code.=<<___;
+.align 5
+.globl poly1305_emit
+.ent poly1305_emit
+poly1305_emit:
+ .frame $sp,0,$ra
+ .set reorder
+
+ ld $tmp2,16($ctx)
+ ld $tmp0,0($ctx)
+ ld $tmp1,8($ctx)
+
+ li $in0,-4 # final reduction
+ dsrl $in1,$tmp2,2
+ and $in0,$tmp2
+ andi $tmp2,$tmp2,3
+ daddu $in0,$in1
+
+ daddu $tmp0,$tmp0,$in0
+ sltu $in1,$tmp0,$in0
+ daddiu $in0,$tmp0,5 # compare to modulus
+ daddu $tmp1,$tmp1,$in1
+ sltiu $tmp3,$in0,5
+ sltu $tmp4,$tmp1,$in1
+ daddu $in1,$tmp1,$tmp3
+ daddu $tmp2,$tmp2,$tmp4
+ sltu $tmp3,$in1,$tmp3
+ daddu $tmp2,$tmp2,$tmp3
+
+ dsrl $tmp2,2 # see if it carried/borrowed
+ dsubu $tmp2,$zero,$tmp2
+
+ xor $in0,$tmp0
+ xor $in1,$tmp1
+ and $in0,$tmp2
+ and $in1,$tmp2
+ xor $in0,$tmp0
+ xor $in1,$tmp1
+
+ lwu $tmp0,0($nonce) # load nonce
+ lwu $tmp1,4($nonce)
+ lwu $tmp2,8($nonce)
+ lwu $tmp3,12($nonce)
+ dsll $tmp1,32
+ dsll $tmp3,32
+ or $tmp0,$tmp1
+ or $tmp2,$tmp3
+
+ daddu $in0,$tmp0 # accumulate nonce
+ daddu $in1,$tmp2
+ sltu $tmp0,$in0,$tmp0
+ daddu $in1,$tmp0
+
+ dsrl $tmp0,$in0,8 # write mac value
+ dsrl $tmp1,$in0,16
+ dsrl $tmp2,$in0,24
+ sb $in0,0($mac)
+ dsrl $tmp3,$in0,32
+ sb $tmp0,1($mac)
+ dsrl $tmp0,$in0,40
+ sb $tmp1,2($mac)
+ dsrl $tmp1,$in0,48
+ sb $tmp2,3($mac)
+ dsrl $tmp2,$in0,56
+ sb $tmp3,4($mac)
+ dsrl $tmp3,$in1,8
+ sb $tmp0,5($mac)
+ dsrl $tmp0,$in1,16
+ sb $tmp1,6($mac)
+ dsrl $tmp1,$in1,24
+ sb $tmp2,7($mac)
+
+ sb $in1,8($mac)
+ dsrl $tmp2,$in1,32
+ sb $tmp3,9($mac)
+ dsrl $tmp3,$in1,40
+ sb $tmp0,10($mac)
+ dsrl $tmp0,$in1,48
+ sb $tmp1,11($mac)
+ dsrl $tmp1,$in1,56
+ sb $tmp2,12($mac)
+ sb $tmp3,13($mac)
+ sb $tmp0,14($mac)
+ sb $tmp1,15($mac)
+
+ jr $ra
+.end poly1305_emit
+.rdata
+.asciiz "Poly1305 for MIPS64, CRYPTOGAMS by \@dot-asm"
+.align 2
+___
+}
+}}} else {{{
+######################################################################
+# 32-bit code path
+#
+
+my ($ctx,$inp,$len,$padbit) = ($a0,$a1,$a2,$a3);
+my ($in0,$in1,$in2,$in3,$tmp0,$tmp1,$tmp2,$tmp3) =
+ ($a4,$a5,$a6,$a7,$at,$t0,$t1,$t2);
+
+$code.=<<___;
+#if (defined(_MIPS_ARCH_MIPS32R3) || defined(_MIPS_ARCH_MIPS32R5) || \\
+ defined(_MIPS_ARCH_MIPS32R6)) \\
+ && !defined(_MIPS_ARCH_MIPS32R2)
+# define _MIPS_ARCH_MIPS32R2
+#endif
+
+#if defined(_MIPS_ARCH_MIPS32R6)
+# define multu(rs,rt)
+# define mflo(rd,rs,rt) mulu rd,rs,rt
+# define mfhi(rd,rs,rt) muhu rd,rs,rt
+#else
+# define multu(rs,rt) multu rs,rt
+# define mflo(rd,rs,rt) mflo rd
+# define mfhi(rd,rs,rt) mfhi rd
+#endif
+
+#ifdef __KERNEL__
+# define poly1305_init poly1305_init_mips
+# define poly1305_blocks poly1305_blocks_mips
+# define poly1305_emit poly1305_emit_mips
+#endif
+
+#if defined(__MIPSEB__) && !defined(MIPSEB)
+# define MIPSEB
+#endif
+
+#ifdef MIPSEB
+# define MSB 0
+# define LSB 3
+#else
+# define MSB 3
+# define LSB 0
+#endif
+
+.text
+.set noat
+.set noreorder
+
+.align 5
+.globl poly1305_init
+.ent poly1305_init
+poly1305_init:
+ .frame $sp,0,$ra
+ .set reorder
+
+ sw $zero,0($ctx)
+ sw $zero,4($ctx)
+ sw $zero,8($ctx)
+ sw $zero,12($ctx)
+ sw $zero,16($ctx)
+
+ beqz $inp,.Lno_key
+
+#if defined(_MIPS_ARCH_MIPS32R6)
+ andi $tmp0,$inp,3 # $inp % 4
+ subu $inp,$inp,$tmp0 # align $inp
+ sll $tmp0,$tmp0,3 # byte to bit offset
+ lw $in0,0($inp)
+ lw $in1,4($inp)
+ lw $in2,8($inp)
+ lw $in3,12($inp)
+ beqz $tmp0,.Laligned_key
+
+ lw $tmp2,16($inp)
+ subu $tmp1,$zero,$tmp0
+# ifdef MIPSEB
+ sllv $in0,$in0,$tmp0
+ srlv $tmp3,$in1,$tmp1
+ sllv $in1,$in1,$tmp0
+ or $in0,$in0,$tmp3
+ srlv $tmp3,$in2,$tmp1
+ sllv $in2,$in2,$tmp0
+ or $in1,$in1,$tmp3
+ srlv $tmp3,$in3,$tmp1
+ sllv $in3,$in3,$tmp0
+ or $in2,$in2,$tmp3
+ srlv $tmp2,$tmp2,$tmp1
+ or $in3,$in3,$tmp2
+# else
+ srlv $in0,$in0,$tmp0
+ sllv $tmp3,$in1,$tmp1
+ srlv $in1,$in1,$tmp0
+ or $in0,$in0,$tmp3
+ sllv $tmp3,$in2,$tmp1
+ srlv $in2,$in2,$tmp0
+ or $in1,$in1,$tmp3
+ sllv $tmp3,$in3,$tmp1
+ srlv $in3,$in3,$tmp0
+ or $in2,$in2,$tmp3
+ sllv $tmp2,$tmp2,$tmp1
+ or $in3,$in3,$tmp2
+# endif
+.Laligned_key:
+#else
+ lwl $in0,0+MSB($inp)
+ lwl $in1,4+MSB($inp)
+ lwl $in2,8+MSB($inp)
+ lwl $in3,12+MSB($inp)
+ lwr $in0,0+LSB($inp)
+ lwr $in1,4+LSB($inp)
+ lwr $in2,8+LSB($inp)
+ lwr $in3,12+LSB($inp)
+#endif
+#ifdef MIPSEB
+# if defined(_MIPS_ARCH_MIPS32R2)
+ wsbh $in0,$in0 # byte swap
+ wsbh $in1,$in1
+ wsbh $in2,$in2
+ wsbh $in3,$in3
+ rotr $in0,$in0,16
+ rotr $in1,$in1,16
+ rotr $in2,$in2,16
+ rotr $in3,$in3,16
+# else
+ srl $tmp0,$in0,24 # byte swap
+ srl $tmp1,$in0,8
+ andi $tmp2,$in0,0xFF00
+ sll $in0,$in0,24
+ andi $tmp1,0xFF00
+ sll $tmp2,$tmp2,8
+ or $in0,$tmp0
+ srl $tmp0,$in1,24
+ or $tmp1,$tmp2
+ srl $tmp2,$in1,8
+ or $in0,$tmp1
+ andi $tmp1,$in1,0xFF00
+ sll $in1,$in1,24
+ andi $tmp2,0xFF00
+ sll $tmp1,$tmp1,8
+ or $in1,$tmp0
+ srl $tmp0,$in2,24
+ or $tmp2,$tmp1
+ srl $tmp1,$in2,8
+ or $in1,$tmp2
+ andi $tmp2,$in2,0xFF00
+ sll $in2,$in2,24
+ andi $tmp1,0xFF00
+ sll $tmp2,$tmp2,8
+ or $in2,$tmp0
+ srl $tmp0,$in3,24
+ or $tmp1,$tmp2
+ srl $tmp2,$in3,8
+ or $in2,$tmp1
+ andi $tmp1,$in3,0xFF00
+ sll $in3,$in3,24
+ andi $tmp2,0xFF00
+ sll $tmp1,$tmp1,8
+ or $in3,$tmp0
+ or $tmp2,$tmp1
+ or $in3,$tmp2
+# endif
+#endif
+ lui $tmp0,0x0fff
+ ori $tmp0,0xffff # 0x0fffffff
+ and $in0,$in0,$tmp0
+ subu $tmp0,3 # 0x0ffffffc
+ and $in1,$in1,$tmp0
+ and $in2,$in2,$tmp0
+ and $in3,$in3,$tmp0
+
+ sw $in0,20($ctx)
+ sw $in1,24($ctx)
+ sw $in2,28($ctx)
+ sw $in3,32($ctx)
+
+ srl $tmp1,$in1,2
+ srl $tmp2,$in2,2
+ srl $tmp3,$in3,2
+ addu $in1,$in1,$tmp1 # s1 = r1 + (r1 >> 2)
+ addu $in2,$in2,$tmp2
+ addu $in3,$in3,$tmp3
+ sw $in1,36($ctx)
+ sw $in2,40($ctx)
+ sw $in3,44($ctx)
+.Lno_key:
+ li $v0,0
+ jr $ra
+.end poly1305_init
+___
+{
+my $SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? "0x00fff000" : "0x00ff0000";
+
+my ($h0,$h1,$h2,$h3,$h4, $r0,$r1,$r2,$r3, $rs1,$rs2,$rs3) =
+ ($s0,$s1,$s2,$s3,$s4, $s5,$s6,$s7,$s8, $s9,$s10,$s11);
+my ($d0,$d1,$d2,$d3) =
+ ($a4,$a5,$a6,$a7);
+my $shr = $t2; # used on R6
+my $one = $t2; # used on R2
+
+$code.=<<___;
+.globl poly1305_blocks
+.align 5
+.ent poly1305_blocks
+poly1305_blocks:
+ .frame $sp,16*4,$ra
+ .mask $SAVED_REGS_MASK,-4
+ .set noreorder
+ subu $sp, $sp,4*12
+ sw $s11,4*11($sp)
+ sw $s10,4*10($sp)
+ sw $s9, 4*9($sp)
+ sw $s8, 4*8($sp)
+ sw $s7, 4*7($sp)
+ sw $s6, 4*6($sp)
+ sw $s5, 4*5($sp)
+ sw $s4, 4*4($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue
+ sw $s3, 4*3($sp)
+ sw $s2, 4*2($sp)
+ sw $s1, 4*1($sp)
+ sw $s0, 4*0($sp)
+___
+$code.=<<___;
+ .set reorder
+
+ srl $len,4 # number of complete blocks
+ li $one,1
+ beqz $len,.Labort
+
+#if defined(_MIPS_ARCH_MIPS32R6)
+ andi $shr,$inp,3
+ subu $inp,$inp,$shr # align $inp
+ sll $shr,$shr,3 # byte to bit offset
+#endif
+
+ lw $h0,0($ctx) # load hash value
+ lw $h1,4($ctx)
+ lw $h2,8($ctx)
+ lw $h3,12($ctx)
+ lw $h4,16($ctx)
+
+ lw $r0,20($ctx) # load key
+ lw $r1,24($ctx)
+ lw $r2,28($ctx)
+ lw $r3,32($ctx)
+ lw $rs1,36($ctx)
+ lw $rs2,40($ctx)
+ lw $rs3,44($ctx)
+
+ sll $len,4
+ addu $len,$len,$inp # end of buffer
+ b .Loop
+
+.align 4
+.Loop:
+#if defined(_MIPS_ARCH_MIPS32R6)
+ lw $d0,0($inp) # load input
+ lw $d1,4($inp)
+ lw $d2,8($inp)
+ lw $d3,12($inp)
+ beqz $shr,.Laligned_inp
+
+ lw $t0,16($inp)
+ subu $t1,$zero,$shr
+# ifdef MIPSEB
+ sllv $d0,$d0,$shr
+ srlv $at,$d1,$t1
+ sllv $d1,$d1,$shr
+ or $d0,$d0,$at
+ srlv $at,$d2,$t1
+ sllv $d2,$d2,$shr
+ or $d1,$d1,$at
+ srlv $at,$d3,$t1
+ sllv $d3,$d3,$shr
+ or $d2,$d2,$at
+ srlv $t0,$t0,$t1
+ or $d3,$d3,$t0
+# else
+ srlv $d0,$d0,$shr
+ sllv $at,$d1,$t1
+ srlv $d1,$d1,$shr
+ or $d0,$d0,$at
+ sllv $at,$d2,$t1
+ srlv $d2,$d2,$shr
+ or $d1,$d1,$at
+ sllv $at,$d3,$t1
+ srlv $d3,$d3,$shr
+ or $d2,$d2,$at
+ sllv $t0,$t0,$t1
+ or $d3,$d3,$t0
+# endif
+.Laligned_inp:
+#else
+ lwl $d0,0+MSB($inp) # load input
+ lwl $d1,4+MSB($inp)
+ lwl $d2,8+MSB($inp)
+ lwl $d3,12+MSB($inp)
+ lwr $d0,0+LSB($inp)
+ lwr $d1,4+LSB($inp)
+ lwr $d2,8+LSB($inp)
+ lwr $d3,12+LSB($inp)
+#endif
+#ifdef MIPSEB
+# if defined(_MIPS_ARCH_MIPS32R2)
+ wsbh $d0,$d0 # byte swap
+ wsbh $d1,$d1
+ wsbh $d2,$d2
+ wsbh $d3,$d3
+ rotr $d0,$d0,16
+ rotr $d1,$d1,16
+ rotr $d2,$d2,16
+ rotr $d3,$d3,16
+# else
+ srl $at,$d0,24 # byte swap
+ srl $t0,$d0,8
+ andi $t1,$d0,0xFF00
+ sll $d0,$d0,24
+ andi $t0,0xFF00
+ sll $t1,$t1,8
+ or $d0,$at
+ srl $at,$d1,24
+ or $t0,$t1
+ srl $t1,$d1,8
+ or $d0,$t0
+ andi $t0,$d1,0xFF00
+ sll $d1,$d1,24
+ andi $t1,0xFF00
+ sll $t0,$t0,8
+ or $d1,$at
+ srl $at,$d2,24
+ or $t1,$t0
+ srl $t0,$d2,8
+ or $d1,$t1
+ andi $t1,$d2,0xFF00
+ sll $d2,$d2,24
+ andi $t0,0xFF00
+ sll $t1,$t1,8
+ or $d2,$at
+ srl $at,$d3,24
+ or $t0,$t1
+ srl $t1,$d3,8
+ or $d2,$t0
+ andi $t0,$d3,0xFF00
+ sll $d3,$d3,24
+ andi $t1,0xFF00
+ sll $t0,$t0,8
+ or $d3,$at
+ or $t1,$t0
+ or $d3,$t1
+# endif
+#endif
+ srl $t0,$h4,2 # modulo-scheduled reduction
+ andi $h4,$h4,3
+ sll $at,$t0,2
+
+ addu $d0,$d0,$h0 # accumulate input
+ addu $t0,$t0,$at
+ sltu $h0,$d0,$h0
+ addu $d0,$d0,$t0 # ... and residue
+ sltu $at,$d0,$t0
+
+ addu $d1,$d1,$h1
+ addu $h0,$h0,$at # carry
+ sltu $h1,$d1,$h1
+ addu $d1,$d1,$h0
+ sltu $h0,$d1,$h0
+
+ addu $d2,$d2,$h2
+ addu $h1,$h1,$h0 # carry
+ sltu $h2,$d2,$h2
+ addu $d2,$d2,$h1
+ sltu $h1,$d2,$h1
+
+ addu $d3,$d3,$h3
+ addu $h2,$h2,$h1 # carry
+ sltu $h3,$d3,$h3
+ addu $d3,$d3,$h2
+
+#if defined(_MIPS_ARCH_MIPS32R2) && !defined(_MIPS_ARCH_MIPS32R6)
+ multu $r0,$d0 # d0*r0
+ sltu $h2,$d3,$h2
+ maddu $rs3,$d1 # d1*s3
+ addu $h3,$h3,$h2 # carry
+ maddu $rs2,$d2 # d2*s2
+ addu $h4,$h4,$padbit
+ maddu $rs1,$d3 # d3*s1
+ addu $h4,$h4,$h3
+ mfhi $at
+ mflo $h0
+
+ multu $r1,$d0 # d0*r1
+ maddu $r0,$d1 # d1*r0
+ maddu $rs3,$d2 # d2*s3
+ maddu $rs2,$d3 # d3*s2
+ maddu $rs1,$h4 # h4*s1
+ maddu $at,$one # hi*1
+ mfhi $at
+ mflo $h1
+
+ multu $r2,$d0 # d0*r2
+ maddu $r1,$d1 # d1*r1
+ maddu $r0,$d2 # d2*r0
+ maddu $rs3,$d3 # d3*s3
+ maddu $rs2,$h4 # h4*s2
+ maddu $at,$one # hi*1
+ mfhi $at
+ mflo $h2
+
+ mul $t0,$r0,$h4 # h4*r0
+
+ multu $r3,$d0 # d0*r3
+ maddu $r2,$d1 # d1*r2
+ maddu $r1,$d2 # d2*r1
+ maddu $r0,$d3 # d3*r0
+ maddu $rs3,$h4 # h4*s3
+ maddu $at,$one # hi*1
+ mfhi $at
+ mflo $h3
+
+ addiu $inp,$inp,16
+
+ addu $h4,$t0,$at
+#else
+ multu ($r0,$d0) # d0*r0
+ mflo ($h0,$r0,$d0)
+ mfhi ($h1,$r0,$d0)
+
+ sltu $h2,$d3,$h2
+ addu $h3,$h3,$h2 # carry
+
+ multu ($rs3,$d1) # d1*s3
+ mflo ($at,$rs3,$d1)
+ mfhi ($t0,$rs3,$d1)
+
+ addu $h4,$h4,$padbit
+ addiu $inp,$inp,16
+ addu $h4,$h4,$h3
+
+ multu ($rs2,$d2) # d2*s2
+ mflo ($a3,$rs2,$d2)
+ mfhi ($t1,$rs2,$d2)
+ addu $h0,$h0,$at
+ addu $h1,$h1,$t0
+ multu ($rs1,$d3) # d3*s1
+ sltu $at,$h0,$at
+ addu $h1,$h1,$at
+
+ mflo ($at,$rs1,$d3)
+ mfhi ($t0,$rs1,$d3)
+ addu $h0,$h0,$a3
+ addu $h1,$h1,$t1
+ multu ($r1,$d0) # d0*r1
+ sltu $a3,$h0,$a3
+ addu $h1,$h1,$a3
+
+
+ mflo ($a3,$r1,$d0)
+ mfhi ($h2,$r1,$d0)
+ addu $h0,$h0,$at
+ addu $h1,$h1,$t0
+ multu ($r0,$d1) # d1*r0
+ sltu $at,$h0,$at
+ addu $h1,$h1,$at
+
+ mflo ($at,$r0,$d1)
+ mfhi ($t0,$r0,$d1)
+ addu $h1,$h1,$a3
+ sltu $a3,$h1,$a3
+ multu ($rs3,$d2) # d2*s3
+ addu $h2,$h2,$a3
+
+ mflo ($a3,$rs3,$d2)
+ mfhi ($t1,$rs3,$d2)
+ addu $h1,$h1,$at
+ addu $h2,$h2,$t0
+ multu ($rs2,$d3) # d3*s2
+ sltu $at,$h1,$at
+ addu $h2,$h2,$at
+
+ mflo ($at,$rs2,$d3)
+ mfhi ($t0,$rs2,$d3)
+ addu $h1,$h1,$a3
+ addu $h2,$h2,$t1
+ multu ($rs1,$h4) # h4*s1
+ sltu $a3,$h1,$a3
+ addu $h2,$h2,$a3
+
+ mflo ($a3,$rs1,$h4)
+ addu $h1,$h1,$at
+ addu $h2,$h2,$t0
+ multu ($r2,$d0) # d0*r2
+ sltu $at,$h1,$at
+ addu $h2,$h2,$at
+
+
+ mflo ($at,$r2,$d0)
+ mfhi ($h3,$r2,$d0)
+ addu $h1,$h1,$a3
+ sltu $a3,$h1,$a3
+ multu ($r1,$d1) # d1*r1
+ addu $h2,$h2,$a3
+
+ mflo ($a3,$r1,$d1)
+ mfhi ($t1,$r1,$d1)
+ addu $h2,$h2,$at
+ sltu $at,$h2,$at
+ multu ($r0,$d2) # d2*r0
+ addu $h3,$h3,$at
+
+ mflo ($at,$r0,$d2)
+ mfhi ($t0,$r0,$d2)
+ addu $h2,$h2,$a3
+ addu $h3,$h3,$t1
+ multu ($rs3,$d3) # d3*s3
+ sltu $a3,$h2,$a3
+ addu $h3,$h3,$a3
+
+ mflo ($a3,$rs3,$d3)
+ mfhi ($t1,$rs3,$d3)
+ addu $h2,$h2,$at
+ addu $h3,$h3,$t0
+ multu ($rs2,$h4) # h4*s2
+ sltu $at,$h2,$at
+ addu $h3,$h3,$at
+
+ mflo ($at,$rs2,$h4)
+ addu $h2,$h2,$a3
+ addu $h3,$h3,$t1
+ multu ($r3,$d0) # d0*r3
+ sltu $a3,$h2,$a3
+ addu $h3,$h3,$a3
+
+
+ mflo ($a3,$r3,$d0)
+ mfhi ($t1,$r3,$d0)
+ addu $h2,$h2,$at
+ sltu $at,$h2,$at
+ multu ($r2,$d1) # d1*r2
+ addu $h3,$h3,$at
+
+ mflo ($at,$r2,$d1)
+ mfhi ($t0,$r2,$d1)
+ addu $h3,$h3,$a3
+ sltu $a3,$h3,$a3
+ multu ($r0,$d3) # d3*r0
+ addu $t1,$t1,$a3
+
+ mflo ($a3,$r0,$d3)
+ mfhi ($d3,$r0,$d3)
+ addu $h3,$h3,$at
+ addu $t1,$t1,$t0
+ multu ($r1,$d2) # d2*r1
+ sltu $at,$h3,$at
+ addu $t1,$t1,$at
+
+ mflo ($at,$r1,$d2)
+ mfhi ($t0,$r1,$d2)
+ addu $h3,$h3,$a3
+ addu $t1,$t1,$d3
+ multu ($rs3,$h4) # h4*s3
+ sltu $a3,$h3,$a3
+ addu $t1,$t1,$a3
+
+ mflo ($a3,$rs3,$h4)
+ addu $h3,$h3,$at
+ addu $t1,$t1,$t0
+ multu ($r0,$h4) # h4*r0
+ sltu $at,$h3,$at
+ addu $t1,$t1,$at
+
+
+ mflo ($h4,$r0,$h4)
+ addu $h3,$h3,$a3
+ sltu $a3,$h3,$a3
+ addu $t1,$t1,$a3
+ addu $h4,$h4,$t1
+
+ li $padbit,1 # if we loop, padbit is 1
+#endif
+ bne $inp,$len,.Loop
+
+ sw $h0,0($ctx) # store hash value
+ sw $h1,4($ctx)
+ sw $h2,8($ctx)
+ sw $h3,12($ctx)
+ sw $h4,16($ctx)
+
+ .set noreorder
+.Labort:
+ lw $s11,4*11($sp)
+ lw $s10,4*10($sp)
+ lw $s9, 4*9($sp)
+ lw $s8, 4*8($sp)
+ lw $s7, 4*7($sp)
+ lw $s6, 4*6($sp)
+ lw $s5, 4*5($sp)
+ lw $s4, 4*4($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue
+ lw $s3, 4*3($sp)
+ lw $s2, 4*2($sp)
+ lw $s1, 4*1($sp)
+ lw $s0, 4*0($sp)
+___
+$code.=<<___;
+ jr $ra
+ addu $sp,$sp,4*12
+.end poly1305_blocks
+___
+}
+{
+my ($ctx,$mac,$nonce,$tmp4) = ($a0,$a1,$a2,$a3);
+
+$code.=<<___;
+.align 5
+.globl poly1305_emit
+.ent poly1305_emit
+poly1305_emit:
+ .frame $sp,0,$ra
+ .set reorder
+
+ lw $tmp4,16($ctx)
+ lw $tmp0,0($ctx)
+ lw $tmp1,4($ctx)
+ lw $tmp2,8($ctx)
+ lw $tmp3,12($ctx)
+
+ li $in0,-4 # final reduction
+ srl $ctx,$tmp4,2
+ and $in0,$in0,$tmp4
+ andi $tmp4,$tmp4,3
+ addu $ctx,$ctx,$in0
+
+ addu $tmp0,$tmp0,$ctx
+ sltu $ctx,$tmp0,$ctx
+ addiu $in0,$tmp0,5 # compare to modulus
+ addu $tmp1,$tmp1,$ctx
+ sltiu $in1,$in0,5
+ sltu $ctx,$tmp1,$ctx
+ addu $in1,$in1,$tmp1
+ addu $tmp2,$tmp2,$ctx
+ sltu $in2,$in1,$tmp1
+ sltu $ctx,$tmp2,$ctx
+ addu $in2,$in2,$tmp2
+ addu $tmp3,$tmp3,$ctx
+ sltu $in3,$in2,$tmp2
+ sltu $ctx,$tmp3,$ctx
+ addu $in3,$in3,$tmp3
+ addu $tmp4,$tmp4,$ctx
+ sltu $ctx,$in3,$tmp3
+ addu $ctx,$tmp4
+
+ srl $ctx,2 # see if it carried/borrowed
+ subu $ctx,$zero,$ctx
+
+ xor $in0,$tmp0
+ xor $in1,$tmp1
+ xor $in2,$tmp2
+ xor $in3,$tmp3
+ and $in0,$ctx
+ and $in1,$ctx
+ and $in2,$ctx
+ and $in3,$ctx
+ xor $in0,$tmp0
+ xor $in1,$tmp1
+ xor $in2,$tmp2
+ xor $in3,$tmp3
+
+ lw $tmp0,0($nonce) # load nonce
+ lw $tmp1,4($nonce)
+ lw $tmp2,8($nonce)
+ lw $tmp3,12($nonce)
+
+ addu $in0,$tmp0 # accumulate nonce
+ sltu $ctx,$in0,$tmp0
+
+ addu $in1,$tmp1
+ sltu $tmp1,$in1,$tmp1
+ addu $in1,$ctx
+ sltu $ctx,$in1,$ctx
+ addu $ctx,$tmp1
+
+ addu $in2,$tmp2
+ sltu $tmp2,$in2,$tmp2
+ addu $in2,$ctx
+ sltu $ctx,$in2,$ctx
+ addu $ctx,$tmp2
+
+ addu $in3,$tmp3
+ addu $in3,$ctx
+
+ srl $tmp0,$in0,8 # write mac value
+ srl $tmp1,$in0,16
+ srl $tmp2,$in0,24
+ sb $in0, 0($mac)
+ sb $tmp0,1($mac)
+ srl $tmp0,$in1,8
+ sb $tmp1,2($mac)
+ srl $tmp1,$in1,16
+ sb $tmp2,3($mac)
+ srl $tmp2,$in1,24
+ sb $in1, 4($mac)
+ sb $tmp0,5($mac)
+ srl $tmp0,$in2,8
+ sb $tmp1,6($mac)
+ srl $tmp1,$in2,16
+ sb $tmp2,7($mac)
+ srl $tmp2,$in2,24
+ sb $in2, 8($mac)
+ sb $tmp0,9($mac)
+ srl $tmp0,$in3,8
+ sb $tmp1,10($mac)
+ srl $tmp1,$in3,16
+ sb $tmp2,11($mac)
+ srl $tmp2,$in3,24
+ sb $in3, 12($mac)
+ sb $tmp0,13($mac)
+ sb $tmp1,14($mac)
+ sb $tmp2,15($mac)
+
+ jr $ra
+.end poly1305_emit
+.rdata
+.asciiz "Poly1305 for MIPS32, CRYPTOGAMS by \@dot-asm"
+.align 2
+___
+}
+}}}
+
+$output=pop and open STDOUT,">$output";
+print $code;
+close STDOUT;
diff --git a/arch/mips/dec/Makefile b/arch/mips/dec/Makefile
new file mode 100644
index 000000000..c9f62f1da
--- /dev/null
+++ b/arch/mips/dec/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the DECstation family specific parts of the kernel
+#
+
+obj-y := ecc-berr.o int-handler.o ioasic-irq.o kn01-berr.o \
+ kn02-irq.o kn02xa-berr.o platform.o reset.o setup.o time.o
+
+obj-$(CONFIG_TC) += tc.o
+obj-$(CONFIG_CPU_HAS_WB) += wbflush.o
diff --git a/arch/mips/dec/Platform b/arch/mips/dec/Platform
new file mode 100644
index 000000000..c82391e83
--- /dev/null
+++ b/arch/mips/dec/Platform
@@ -0,0 +1,7 @@
+#
+# DECstation family
+#
+cflags-$(CONFIG_MACH_DECSTATION) += \
+ -I$(srctree)/arch/mips/include/asm/mach-dec
+libs-$(CONFIG_MACH_DECSTATION) += arch/mips/dec/prom/
+load-$(CONFIG_MACH_DECSTATION) += 0xffffffff80040000
diff --git a/arch/mips/dec/ecc-berr.c b/arch/mips/dec/ecc-berr.c
new file mode 100644
index 000000000..1eb356fdd
--- /dev/null
+++ b/arch/mips/dec/ecc-berr.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Bus error event handling code for systems equipped with ECC
+ * handling logic, i.e. DECstation/DECsystem 5000/200 (KN02),
+ * 5000/240 (KN03), 5000/260 (KN05) and DECsystem 5900 (KN03),
+ * 5900/260 (KN05) systems.
+ *
+ * Copyright (c) 2003, 2005 Maciej W. Rozycki
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/cpu-type.h>
+#include <asm/irq_regs.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/traps.h>
+
+#include <asm/dec/ecc.h>
+#include <asm/dec/kn02.h>
+#include <asm/dec/kn03.h>
+#include <asm/dec/kn05.h>
+
+static volatile u32 *kn0x_erraddr;
+static volatile u32 *kn0x_chksyn;
+
+static inline void dec_ecc_be_ack(void)
+{
+ *kn0x_erraddr = 0; /* any write clears the IRQ */
+ iob();
+}
+
+static int dec_ecc_be_backend(struct pt_regs *regs, int is_fixup, int invoker)
+{
+ static const char excstr[] = "exception";
+ static const char intstr[] = "interrupt";
+ static const char cpustr[] = "CPU";
+ static const char dmastr[] = "DMA";
+ static const char readstr[] = "read";
+ static const char mreadstr[] = "memory read";
+ static const char writestr[] = "write";
+ static const char mwritstr[] = "partial memory write";
+ static const char timestr[] = "timeout";
+ static const char overstr[] = "overrun";
+ static const char eccstr[] = "ECC error";
+
+ const char *kind, *agent, *cycle, *event;
+ const char *status = "", *xbit = "", *fmt = "";
+ unsigned long address;
+ u16 syn = 0, sngl;
+
+ int i = 0;
+
+ u32 erraddr = *kn0x_erraddr;
+ u32 chksyn = *kn0x_chksyn;
+ int action = MIPS_BE_FATAL;
+
+ /* For non-ECC ack ASAP, so that any subsequent errors get caught. */
+ if ((erraddr & (KN0X_EAR_VALID | KN0X_EAR_ECCERR)) == KN0X_EAR_VALID)
+ dec_ecc_be_ack();
+
+ kind = invoker ? intstr : excstr;
+
+ if (!(erraddr & KN0X_EAR_VALID)) {
+ /* No idea what happened. */
+ printk(KERN_ALERT "Unidentified bus error %s\n", kind);
+ return action;
+ }
+
+ agent = (erraddr & KN0X_EAR_CPU) ? cpustr : dmastr;
+
+ if (erraddr & KN0X_EAR_ECCERR) {
+ /* An ECC error on a CPU or DMA transaction. */
+ cycle = (erraddr & KN0X_EAR_WRITE) ? mwritstr : mreadstr;
+ event = eccstr;
+ } else {
+ /* A CPU timeout or a DMA overrun. */
+ cycle = (erraddr & KN0X_EAR_WRITE) ? writestr : readstr;
+ event = (erraddr & KN0X_EAR_CPU) ? timestr : overstr;
+ }
+
+ address = erraddr & KN0X_EAR_ADDRESS;
+ /* For ECC errors on reads adjust for MT pipelining. */
+ if ((erraddr & (KN0X_EAR_WRITE | KN0X_EAR_ECCERR)) == KN0X_EAR_ECCERR)
+ address = (address & ~0xfffLL) | ((address - 5) & 0xfffLL);
+ address <<= 2;
+
+ /* Only CPU errors are fixable. */
+ if (erraddr & KN0X_EAR_CPU && is_fixup)
+ action = MIPS_BE_FIXUP;
+
+ if (erraddr & KN0X_EAR_ECCERR) {
+ static const u8 data_sbit[32] = {
+ 0x4f, 0x4a, 0x52, 0x54, 0x57, 0x58, 0x5b, 0x5d,
+ 0x23, 0x25, 0x26, 0x29, 0x2a, 0x2c, 0x31, 0x34,
+ 0x0e, 0x0b, 0x13, 0x15, 0x16, 0x19, 0x1a, 0x1c,
+ 0x62, 0x64, 0x67, 0x68, 0x6b, 0x6d, 0x70, 0x75,
+ };
+ static const u8 data_mbit[25] = {
+ 0x07, 0x0d, 0x1f,
+ 0x2f, 0x32, 0x37, 0x38, 0x3b, 0x3d, 0x3e,
+ 0x43, 0x45, 0x46, 0x49, 0x4c, 0x51, 0x5e,
+ 0x61, 0x6e, 0x73, 0x76, 0x79, 0x7a, 0x7c, 0x7f,
+ };
+ static const char sbestr[] = "corrected single";
+ static const char dbestr[] = "uncorrectable double";
+ static const char mbestr[] = "uncorrectable multiple";
+
+ if (!(address & 0x4))
+ syn = chksyn; /* Low bank. */
+ else
+ syn = chksyn >> 16; /* High bank. */
+
+ if (!(syn & KN0X_ESR_VLDLO)) {
+ /* Ack now, no rewrite will happen. */
+ dec_ecc_be_ack();
+
+ fmt = KERN_ALERT "%s" "invalid\n";
+ } else {
+ sngl = syn & KN0X_ESR_SNGLO;
+ syn &= KN0X_ESR_SYNLO;
+
+ /*
+ * Multibit errors may be tagged incorrectly;
+ * check the syndrome explicitly.
+ */
+ for (i = 0; i < 25; i++)
+ if (syn == data_mbit[i])
+ break;
+
+ if (i < 25) {
+ status = mbestr;
+ } else if (!sngl) {
+ status = dbestr;
+ } else {
+ volatile u32 *ptr =
+ (void *)CKSEG1ADDR(address);
+
+ *ptr = *ptr; /* Rewrite. */
+ iob();
+
+ status = sbestr;
+ action = MIPS_BE_DISCARD;
+ }
+
+ /* Ack now, now we've rewritten (or not). */
+ dec_ecc_be_ack();
+
+ if (syn && syn == (syn & -syn)) {
+ if (syn == 0x01) {
+ fmt = KERN_ALERT "%s"
+ "%#04x -- %s bit error "
+ "at check bit C%s\n";
+ xbit = "X";
+ } else {
+ fmt = KERN_ALERT "%s"
+ "%#04x -- %s bit error "
+ "at check bit C%s%u\n";
+ }
+ i = syn >> 2;
+ } else {
+ for (i = 0; i < 32; i++)
+ if (syn == data_sbit[i])
+ break;
+ if (i < 32)
+ fmt = KERN_ALERT "%s"
+ "%#04x -- %s bit error "
+ "at data bit D%s%u\n";
+ else
+ fmt = KERN_ALERT "%s"
+ "%#04x -- %s bit error\n";
+ }
+ }
+ }
+
+ if (action != MIPS_BE_FIXUP)
+ printk(KERN_ALERT "Bus error %s: %s %s %s at %#010lx\n",
+ kind, agent, cycle, event, address);
+
+ if (action != MIPS_BE_FIXUP && erraddr & KN0X_EAR_ECCERR)
+ printk(fmt, " ECC syndrome ", syn, status, xbit, i);
+
+ return action;
+}
+
+int dec_ecc_be_handler(struct pt_regs *regs, int is_fixup)
+{
+ return dec_ecc_be_backend(regs, is_fixup, 0);
+}
+
+irqreturn_t dec_ecc_be_interrupt(int irq, void *dev_id)
+{
+ struct pt_regs *regs = get_irq_regs();
+
+ int action = dec_ecc_be_backend(regs, 0, 1);
+
+ if (action == MIPS_BE_DISCARD)
+ return IRQ_HANDLED;
+
+ /*
+ * FIXME: Find the affected processes and kill them, otherwise
+ * we must die.
+ *
+ * The interrupt is asynchronously delivered thus EPC and RA
+ * may be irrelevant, but are printed for a reference.
+ */
+ printk(KERN_ALERT "Fatal bus interrupt, epc == %08lx, ra == %08lx\n",
+ regs->cp0_epc, regs->regs[31]);
+ die("Unrecoverable bus error", regs);
+}
+
+
+/*
+ * Initialization differs a bit between KN02 and KN03/KN05, so we
+ * need two variants. Once set up, all systems can be handled the
+ * same way.
+ */
+static inline void dec_kn02_be_init(void)
+{
+ volatile u32 *csr = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR);
+
+ kn0x_erraddr = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_ERRADDR);
+ kn0x_chksyn = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CHKSYN);
+
+ /* Preset write-only bits of the Control Register cache. */
+ cached_kn02_csr = *csr | KN02_CSR_LEDS;
+
+ /* Set normal ECC detection and generation. */
+ cached_kn02_csr &= ~(KN02_CSR_DIAGCHK | KN02_CSR_DIAGGEN);
+ /* Enable ECC correction. */
+ cached_kn02_csr |= KN02_CSR_CORRECT;
+ *csr = cached_kn02_csr;
+ iob();
+}
+
+static inline void dec_kn03_be_init(void)
+{
+ volatile u32 *mcr = (void *)CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_MCR);
+ volatile u32 *mbcs = (void *)CKSEG1ADDR(KN4K_SLOT_BASE + KN4K_MB_CSR);
+
+ kn0x_erraddr = (void *)CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_ERRADDR);
+ kn0x_chksyn = (void *)CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_CHKSYN);
+
+ /*
+ * Set normal ECC detection and generation, enable ECC correction.
+ * For KN05 we also need to make sure EE (?) is enabled in the MB.
+ * Otherwise DBE/IBE exceptions would be masked but bus error
+ * interrupts would still arrive, resulting in an inevitable crash
+ * if get_dbe() triggers one.
+ */
+ *mcr = (*mcr & ~(KN03_MCR_DIAGCHK | KN03_MCR_DIAGGEN)) |
+ KN03_MCR_CORRECT;
+ if (current_cpu_type() == CPU_R4400SC)
+ *mbcs |= KN4K_MB_CSR_EE;
+ fast_iob();
+}
+
+void __init dec_ecc_be_init(void)
+{
+ if (mips_machtype == MACH_DS5000_200)
+ dec_kn02_be_init();
+ else
+ dec_kn03_be_init();
+
+ /* Clear any leftover errors from the firmware. */
+ dec_ecc_be_ack();
+}
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
new file mode 100644
index 000000000..011d1d678
--- /dev/null
+++ b/arch/mips/dec/int-handler.S
@@ -0,0 +1,311 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1995, 1996, 1997 Paul M. Antoine and Harald Koerfgen
+ * Copyright (C) 2000, 2001, 2002, 2003, 2005 Maciej W. Rozycki
+ *
+ * Written by Ralf Baechle and Andreas Busse, modified for DECstation
+ * support by Paul Antoine and Harald Koerfgen.
+ *
+ * completely rewritten:
+ * Copyright (C) 1998 Harald Koerfgen
+ *
+ * Rewritten extensively for controller-driven IRQ support
+ * by Maciej W. Rozycki.
+ */
+
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+
+#include <asm/dec/interrupts.h>
+#include <asm/dec/ioasic_addrs.h>
+#include <asm/dec/ioasic_ints.h>
+#include <asm/dec/kn01.h>
+#include <asm/dec/kn02.h>
+#include <asm/dec/kn02xa.h>
+#include <asm/dec/kn03.h>
+
+#define KN02_CSR_BASE CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR)
+#define KN02XA_IOASIC_BASE CKSEG1ADDR(KN02XA_SLOT_BASE + IOASIC_IOCTL)
+#define KN03_IOASIC_BASE CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_IOCTL)
+
+ .text
+ .set noreorder
+/*
+ * plat_irq_dispatch: Interrupt handler for DECstations
+ *
+ * We follow the model in the Indy interrupt code by David Miller, where he
+ * says: a lot of complication here is taken away because:
+ *
+ * 1) We handle one interrupt and return, sitting in a loop
+ * and moving across all the pending IRQ bits in the cause
+ * register is _NOT_ the answer, the common case is one
+ * pending IRQ so optimize in that direction.
+ *
+ * 2) We need not check against bits in the status register
+ * IRQ mask, that would make this routine slow as hell.
+ *
+ * 3) Linux only thinks in terms of all IRQs on or all IRQs
+ * off, nothing in between like BSD spl() brain-damage.
+ *
+ * Furthermore, the IRQs on the DECstations look basically (barring
+ * software IRQs which we don't use at all) like...
+ *
+ * DS2100/3100's, aka kn01, aka Pmax:
+ *
+ * MIPS IRQ Source
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 SCSI
+ * 3 Lance Ethernet
+ * 4 DZ11 serial
+ * 5 RTC
+ * 6 Memory Controller & Video
+ * 7 FPU
+ *
+ * DS5000/200, aka kn02, aka 3max:
+ *
+ * MIPS IRQ Source
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 TurboChannel
+ * 3 RTC
+ * 4 Reserved
+ * 5 Memory Controller
+ * 6 Reserved
+ * 7 FPU
+ *
+ * DS5000/1xx's, aka kn02ba, aka 3min:
+ *
+ * MIPS IRQ Source
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 TurboChannel Slot 0
+ * 3 TurboChannel Slot 1
+ * 4 TurboChannel Slot 2
+ * 5 TurboChannel Slot 3 (ASIC)
+ * 6 Halt button
+ * 7 FPU/R4k timer
+ *
+ * DS5000/2x's, aka kn02ca, aka maxine:
+ *
+ * MIPS IRQ Source
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 Periodic Interrupt (100usec)
+ * 3 RTC
+ * 4 I/O write timeout
+ * 5 TurboChannel (ASIC)
+ * 6 Halt Keycode from Access.Bus keyboard (CTRL-ALT-ENTER)
+ * 7 FPU/R4k timer
+ *
+ * DS5000/2xx's, aka kn03, aka 3maxplus:
+ *
+ * MIPS IRQ Source
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 System Board (ASIC)
+ * 3 RTC
+ * 4 Reserved
+ * 5 Memory
+ * 6 Halt Button
+ * 7 FPU/R4k timer
+ *
+ * We handle the IRQ according to _our_ priority (see setup.c),
+ * then we just return. If multiple IRQs are pending then we will
+ * just take another exception, big deal.
+ */
+ .align 5
+ NESTED(plat_irq_dispatch, PT_SIZE, ra)
+ .set noreorder
+
+ /*
+ * Get pending Interrupts
+ */
+ mfc0 t0,CP0_CAUSE # get pending interrupts
+ mfc0 t1,CP0_STATUS
+#if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT)
+ lw t2,cpu_fpu_mask
+#endif
+ andi t0,ST0_IM # CAUSE.CE may be non-zero!
+ and t0,t1 # isolate allowed ones
+
+ beqz t0,spurious
+
+#if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT)
+ and t2,t0
+ bnez t2,fpu # handle FPU immediately
+#endif
+
+ /*
+ * Find irq with highest priority
+ */
+ # open coded PTR_LA t1, cpu_mask_nr_tbl
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
+ # open coded la t1, cpu_mask_nr_tbl
+ lui t1, %hi(cpu_mask_nr_tbl)
+ addiu t1, %lo(cpu_mask_nr_tbl)
+#else
+#error GCC `-msym32' option required for 64-bit DECstation builds
+#endif
+1: lw t2,(t1)
+ nop
+ and t2,t0
+ beqz t2,1b
+ addu t1,2*PTRSIZE # delay slot
+
+ /*
+ * Do the low-level stuff
+ */
+ lw a0,(-PTRSIZE)(t1)
+ nop
+ bgez a0,handle_it # irq_nr >= 0?
+ # irq_nr < 0: it is an address
+ nop
+ jr a0
+ # a trick to save a branch:
+ lui t2,(KN03_IOASIC_BASE>>16)&0xffff
+ # upper part of IOASIC Address
+
+/*
+ * Handle "IRQ Controller" Interrupts
+ * Masked Interrupts are still visible and have to be masked "by hand".
+ */
+ FEXPORT(kn02_io_int) # 3max
+ lui t0,(KN02_CSR_BASE>>16)&0xffff
+ # get interrupt status and mask
+ lw t0,(t0)
+ nop
+ andi t1,t0,KN02_IRQ_ALL
+ b 1f
+ srl t0,16 # shift interrupt mask
+
+ FEXPORT(kn02xa_io_int) # 3min/maxine
+ lui t2,(KN02XA_IOASIC_BASE>>16)&0xffff
+ # upper part of IOASIC Address
+
+ FEXPORT(kn03_io_int) # 3max+ (t2 loaded earlier)
+ lw t0,IO_REG_SIR(t2) # get status: IOASIC sir
+ lw t1,IO_REG_SIMR(t2) # get mask: IOASIC simr
+ nop
+
+1: and t0,t1 # mask out allowed ones
+
+ beqz t0,spurious
+
+ /*
+ * Find irq with highest priority
+ */
+ # open coded PTR_LA t1,asic_mask_nr_tbl
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
+ # open coded la t1, asic_mask_nr_tbl
+ lui t1, %hi(asic_mask_nr_tbl)
+ addiu t1, %lo(asic_mask_nr_tbl)
+#else
+#error GCC `-msym32' option required for 64-bit DECstation builds
+#endif
+2: lw t2,(t1)
+ nop
+ and t2,t0
+ beq zero,t2,2b
+ addu t1,2*PTRSIZE # delay slot
+
+ /*
+ * Do the low-level stuff
+ */
+ lw a0,%lo(-PTRSIZE)(t1)
+ nop
+ bgez a0,handle_it # irq_nr >= 0?
+ # irq_nr < 0: it is an address
+ nop
+ jr a0
+ nop # delay slot
+
+/*
+ * Dispatch low-priority interrupts. We reconsider all status
+ * bits again, which looks like a lose, but it makes the code
+ * simple and O(log n), so it gets compensated.
+ */
+ FEXPORT(cpu_all_int) # HALT, timers, software junk
+ li a0,DEC_CPU_IRQ_BASE
+ srl t0,CAUSEB_IP
+ li t1,CAUSEF_IP>>CAUSEB_IP # mask
+ b 1f
+ li t2,4 # nr of bits / 2
+
+ FEXPORT(kn02_all_int) # impossible ?
+ li a0,KN02_IRQ_BASE
+ li t1,KN02_IRQ_ALL # mask
+ b 1f
+ li t2,4 # nr of bits / 2
+
+ FEXPORT(asic_all_int) # various I/O ASIC junk
+ li a0,IO_IRQ_BASE
+ li t1,IO_IRQ_ALL # mask
+ b 1f
+ li t2,8 # nr of bits / 2
+
+/*
+ * Dispatch DMA interrupts -- O(log n).
+ */
+ FEXPORT(asic_dma_int) # I/O ASIC DMA events
+ li a0,IO_IRQ_BASE+IO_INR_DMA
+ srl t0,IO_INR_DMA
+ li t1,IO_IRQ_DMA>>IO_INR_DMA # mask
+ li t2,8 # nr of bits / 2
+
+ /*
+ * Find irq with highest priority.
+ * Highest irq number takes precedence.
+ */
+1: srlv t3,t1,t2
+2: xor t1,t3
+ and t3,t0,t1
+ beqz t3,3f
+ nop
+ move t0,t3
+ addu a0,t2
+3: srl t2,1
+ bnez t2,2b
+ srlv t3,t1,t2
+
+handle_it:
+ j dec_irq_dispatch
+ nop
+
+#if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT)
+fpu:
+ lw t0,fpu_kstat_irq
+ nop
+ lw t1,(t0)
+ nop
+ addu t1,1
+ j handle_fpe_int
+ sw t1,(t0)
+#endif
+
+spurious:
+ j spurious_interrupt
+ nop
+ END(plat_irq_dispatch)
+
+/*
+ * Generic unimplemented interrupt routines -- cpu_mask_nr_tbl
+ * and asic_mask_nr_tbl are initialized to point all interrupts here.
+ * The tables are then filled in by machine-specific initialisation
+ * in dec_setup().
+ */
+ FEXPORT(dec_intr_unimplemented)
+ move a1,t0 # cheats way of printing an arg!
+ ASM_PANIC("Unimplemented cpu interrupt! CP0_CAUSE: 0x%08x");
+
+ FEXPORT(asic_intr_unimplemented)
+ move a1,t0 # cheats way of printing an arg!
+ ASM_PANIC("Unimplemented asic interrupt! ASIC ISR: 0x%08x");
diff --git a/arch/mips/dec/ioasic-irq.c b/arch/mips/dec/ioasic-irq.c
new file mode 100644
index 000000000..130eb67bd
--- /dev/null
+++ b/arch/mips/dec/ioasic-irq.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DEC I/O ASIC interrupts.
+ *
+ * Copyright (c) 2002, 2003, 2013 Maciej W. Rozycki
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/types.h>
+
+#include <asm/dec/ioasic.h>
+#include <asm/dec/ioasic_addrs.h>
+#include <asm/dec/ioasic_ints.h>
+
+static int ioasic_irq_base;
+
+static void unmask_ioasic_irq(struct irq_data *d)
+{
+ u32 simr;
+
+ simr = ioasic_read(IO_REG_SIMR);
+ simr |= (1 << (d->irq - ioasic_irq_base));
+ ioasic_write(IO_REG_SIMR, simr);
+}
+
+static void mask_ioasic_irq(struct irq_data *d)
+{
+ u32 simr;
+
+ simr = ioasic_read(IO_REG_SIMR);
+ simr &= ~(1 << (d->irq - ioasic_irq_base));
+ ioasic_write(IO_REG_SIMR, simr);
+}
+
+static void ack_ioasic_irq(struct irq_data *d)
+{
+ mask_ioasic_irq(d);
+ fast_iob();
+}
+
+static struct irq_chip ioasic_irq_type = {
+ .name = "IO-ASIC",
+ .irq_ack = ack_ioasic_irq,
+ .irq_mask = mask_ioasic_irq,
+ .irq_mask_ack = ack_ioasic_irq,
+ .irq_unmask = unmask_ioasic_irq,
+};
+
+static void clear_ioasic_dma_irq(struct irq_data *d)
+{
+ u32 sir;
+
+ sir = ~(1 << (d->irq - ioasic_irq_base));
+ ioasic_write(IO_REG_SIR, sir);
+ fast_iob();
+}
+
+static struct irq_chip ioasic_dma_irq_type = {
+ .name = "IO-ASIC-DMA",
+ .irq_ack = clear_ioasic_dma_irq,
+ .irq_mask = mask_ioasic_irq,
+ .irq_unmask = unmask_ioasic_irq,
+ .irq_eoi = clear_ioasic_dma_irq,
+};
+
+/*
+ * I/O ASIC implements two kinds of DMA interrupts, informational and
+ * error interrupts.
+ *
+ * The formers do not stop DMA and should be cleared as soon as possible
+ * so that if they retrigger before the handler has completed, usually as
+ * a side effect of actions taken by the handler, then they are reissued.
+ * These use the `handle_edge_irq' handler that clears the request right
+ * away.
+ *
+ * The latters stop DMA and do not resume it until the interrupt has been
+ * cleared. This cannot be done until after a corrective action has been
+ * taken and this also means they will not retrigger. Therefore they use
+ * the `handle_fasteoi_irq' handler that only clears the request on the
+ * way out. Because MIPS processor interrupt inputs, one of which the I/O
+ * ASIC is cascaded to, are level-triggered it is recommended that error
+ * DMA interrupt action handlers are registered with the IRQF_ONESHOT flag
+ * set so that they are run with the interrupt line masked.
+ *
+ * This mask has `1' bits in the positions of informational interrupts.
+ */
+#define IO_IRQ_DMA_INFO \
+ (IO_IRQ_MASK(IO_INR_SCC0A_RXDMA) | \
+ IO_IRQ_MASK(IO_INR_SCC1A_RXDMA) | \
+ IO_IRQ_MASK(IO_INR_ISDN_TXDMA) | \
+ IO_IRQ_MASK(IO_INR_ISDN_RXDMA) | \
+ IO_IRQ_MASK(IO_INR_ASC_DMA))
+
+void __init init_ioasic_irqs(int base)
+{
+ int i;
+
+ /* Mask interrupts. */
+ ioasic_write(IO_REG_SIMR, 0);
+ fast_iob();
+
+ for (i = base; i < base + IO_INR_DMA; i++)
+ irq_set_chip_and_handler(i, &ioasic_irq_type,
+ handle_level_irq);
+ for (; i < base + IO_IRQ_LINES; i++)
+ irq_set_chip_and_handler(i, &ioasic_dma_irq_type,
+ 1 << (i - base) & IO_IRQ_DMA_INFO ?
+ handle_edge_irq : handle_fasteoi_irq);
+
+ ioasic_irq_base = base;
+}
diff --git a/arch/mips/dec/kn01-berr.c b/arch/mips/dec/kn01-berr.c
new file mode 100644
index 000000000..76efed7bc
--- /dev/null
+++ b/arch/mips/dec/kn01-berr.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Bus error event handling code for DECstation/DECsystem 3100
+ * and 2100 (KN01) systems equipped with parity error detection
+ * logic.
+ *
+ * Copyright (c) 2005 Maciej W. Rozycki
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/inst.h>
+#include <asm/irq_regs.h>
+#include <asm/mipsregs.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/traps.h>
+#include <linux/uaccess.h>
+
+#include <asm/dec/kn01.h>
+
+
+/* CP0 hazard avoidance. */
+#define BARRIER \
+ __asm__ __volatile__( \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ "nop\n\t" \
+ ".set pop\n\t")
+
+/*
+ * Bits 7:0 of the Control Register are write-only -- the
+ * corresponding bits of the Status Register have a different
+ * meaning. Hence we use a cache. It speeds up things a bit
+ * as well.
+ *
+ * There is no default value -- it has to be initialized.
+ */
+u16 cached_kn01_csr;
+static DEFINE_RAW_SPINLOCK(kn01_lock);
+
+
+static inline void dec_kn01_be_ack(void)
+{
+ volatile u16 *csr = (void *)CKSEG1ADDR(KN01_SLOT_BASE + KN01_CSR);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&kn01_lock, flags);
+
+ *csr = cached_kn01_csr | KN01_CSR_MEMERR; /* Clear bus IRQ. */
+ iob();
+
+ raw_spin_unlock_irqrestore(&kn01_lock, flags);
+}
+
+static int dec_kn01_be_backend(struct pt_regs *regs, int is_fixup, int invoker)
+{
+ volatile u32 *kn01_erraddr = (void *)CKSEG1ADDR(KN01_SLOT_BASE +
+ KN01_ERRADDR);
+
+ static const char excstr[] = "exception";
+ static const char intstr[] = "interrupt";
+ static const char cpustr[] = "CPU";
+ static const char mreadstr[] = "memory read";
+ static const char readstr[] = "read";
+ static const char writestr[] = "write";
+ static const char timestr[] = "timeout";
+ static const char paritystr[] = "parity error";
+
+ int data = regs->cp0_cause & 4;
+ unsigned int __user *pc = (unsigned int __user *)regs->cp0_epc +
+ ((regs->cp0_cause & CAUSEF_BD) != 0);
+ union mips_instruction insn;
+ unsigned long entrylo, offset;
+ long asid, entryhi, vaddr;
+
+ const char *kind, *agent, *cycle, *event;
+ unsigned long address;
+
+ u32 erraddr = *kn01_erraddr;
+ int action = MIPS_BE_FATAL;
+
+ /* Ack ASAP, so that any subsequent errors get caught. */
+ dec_kn01_be_ack();
+
+ kind = invoker ? intstr : excstr;
+
+ agent = cpustr;
+
+ if (invoker)
+ address = erraddr;
+ else {
+ /* Bloody hardware doesn't record the address for reads... */
+ if (data) {
+ /* This never faults. */
+ __get_user(insn.word, pc);
+ vaddr = regs->regs[insn.i_format.rs] +
+ insn.i_format.simmediate;
+ } else
+ vaddr = (long)pc;
+ if (KSEGX(vaddr) == CKSEG0 || KSEGX(vaddr) == CKSEG1)
+ address = CPHYSADDR(vaddr);
+ else {
+ /* Peek at what physical address the CPU used. */
+ asid = read_c0_entryhi();
+ entryhi = asid & (PAGE_SIZE - 1);
+ entryhi |= vaddr & ~(PAGE_SIZE - 1);
+ write_c0_entryhi(entryhi);
+ BARRIER;
+ tlb_probe();
+ /* No need to check for presence. */
+ tlb_read();
+ entrylo = read_c0_entrylo0();
+ write_c0_entryhi(asid);
+ offset = vaddr & (PAGE_SIZE - 1);
+ address = (entrylo & ~(PAGE_SIZE - 1)) | offset;
+ }
+ }
+
+ /* Treat low 256MB as memory, high -- as I/O. */
+ if (address < 0x10000000) {
+ cycle = mreadstr;
+ event = paritystr;
+ } else {
+ cycle = invoker ? writestr : readstr;
+ event = timestr;
+ }
+
+ if (is_fixup)
+ action = MIPS_BE_FIXUP;
+
+ if (action != MIPS_BE_FIXUP)
+ printk(KERN_ALERT "Bus error %s: %s %s %s at %#010lx\n",
+ kind, agent, cycle, event, address);
+
+ return action;
+}
+
+int dec_kn01_be_handler(struct pt_regs *regs, int is_fixup)
+{
+ return dec_kn01_be_backend(regs, is_fixup, 0);
+}
+
+irqreturn_t dec_kn01_be_interrupt(int irq, void *dev_id)
+{
+ volatile u16 *csr = (void *)CKSEG1ADDR(KN01_SLOT_BASE + KN01_CSR);
+ struct pt_regs *regs = get_irq_regs();
+ int action;
+
+ if (!(*csr & KN01_CSR_MEMERR))
+ return IRQ_NONE; /* Must have been video. */
+
+ action = dec_kn01_be_backend(regs, 0, 1);
+
+ if (action == MIPS_BE_DISCARD)
+ return IRQ_HANDLED;
+
+ /*
+ * FIXME: Find the affected processes and kill them, otherwise
+ * we must die.
+ *
+ * The interrupt is asynchronously delivered thus EPC and RA
+ * may be irrelevant, but are printed for a reference.
+ */
+ printk(KERN_ALERT "Fatal bus interrupt, epc == %08lx, ra == %08lx\n",
+ regs->cp0_epc, regs->regs[31]);
+ die("Unrecoverable bus error", regs);
+}
+
+
+void __init dec_kn01_be_init(void)
+{
+ volatile u16 *csr = (void *)CKSEG1ADDR(KN01_SLOT_BASE + KN01_CSR);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&kn01_lock, flags);
+
+ /* Preset write-only bits of the Control Register cache. */
+ cached_kn01_csr = *csr;
+ cached_kn01_csr &= KN01_CSR_STATUS | KN01_CSR_PARDIS | KN01_CSR_TXDIS;
+ cached_kn01_csr |= KN01_CSR_LEDS;
+
+ /* Enable parity error detection. */
+ cached_kn01_csr &= ~KN01_CSR_PARDIS;
+ *csr = cached_kn01_csr;
+ iob();
+
+ raw_spin_unlock_irqrestore(&kn01_lock, flags);
+
+ /* Clear any leftover errors from the firmware. */
+ dec_kn01_be_ack();
+}
diff --git a/arch/mips/dec/kn02-irq.c b/arch/mips/dec/kn02-irq.c
new file mode 100644
index 000000000..7e18de574
--- /dev/null
+++ b/arch/mips/dec/kn02-irq.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DECstation 5000/200 (KN02) Control and Status Register
+ * interrupts.
+ *
+ * Copyright (c) 2002, 2003, 2005 Maciej W. Rozycki
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/types.h>
+
+#include <asm/dec/kn02.h>
+
+
+/*
+ * Bits 7:0 of the Control Register are write-only -- the
+ * corresponding bits of the Status Register have a different
+ * meaning. Hence we use a cache. It speeds up things a bit
+ * as well.
+ *
+ * There is no default value -- it has to be initialized.
+ */
+u32 cached_kn02_csr;
+
+static int kn02_irq_base;
+
+static void unmask_kn02_irq(struct irq_data *d)
+{
+ volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE +
+ KN02_CSR);
+
+ cached_kn02_csr |= (1 << (d->irq - kn02_irq_base + 16));
+ *csr = cached_kn02_csr;
+}
+
+static void mask_kn02_irq(struct irq_data *d)
+{
+ volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE +
+ KN02_CSR);
+
+ cached_kn02_csr &= ~(1 << (d->irq - kn02_irq_base + 16));
+ *csr = cached_kn02_csr;
+}
+
+static void ack_kn02_irq(struct irq_data *d)
+{
+ mask_kn02_irq(d);
+ iob();
+}
+
+static struct irq_chip kn02_irq_type = {
+ .name = "KN02-CSR",
+ .irq_ack = ack_kn02_irq,
+ .irq_mask = mask_kn02_irq,
+ .irq_mask_ack = ack_kn02_irq,
+ .irq_unmask = unmask_kn02_irq,
+};
+
+void __init init_kn02_irqs(int base)
+{
+ volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE +
+ KN02_CSR);
+ int i;
+
+ /* Mask interrupts. */
+ cached_kn02_csr &= ~KN02_CSR_IOINTEN;
+ *csr = cached_kn02_csr;
+ iob();
+
+ for (i = base; i < base + KN02_IRQ_LINES; i++)
+ irq_set_chip_and_handler(i, &kn02_irq_type, handle_level_irq);
+
+ kn02_irq_base = base;
+}
diff --git a/arch/mips/dec/kn02xa-berr.c b/arch/mips/dec/kn02xa-berr.c
new file mode 100644
index 000000000..9699fc4e6
--- /dev/null
+++ b/arch/mips/dec/kn02xa-berr.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Bus error event handling code for 5000-series systems equipped
+ * with parity error detection logic, i.e. DECstation/DECsystem
+ * 5000/120, /125, /133 (KN02-BA), 5000/150 (KN04-BA) and Personal
+ * DECstation/DECsystem 5000/20, /25, /33 (KN02-CA), 5000/50
+ * (KN04-CA) systems.
+ *
+ * Copyright (c) 2005 Maciej W. Rozycki
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <asm/addrspace.h>
+#include <asm/cpu-type.h>
+#include <asm/irq_regs.h>
+#include <asm/ptrace.h>
+#include <asm/traps.h>
+
+#include <asm/dec/kn02ca.h>
+#include <asm/dec/kn02xa.h>
+#include <asm/dec/kn05.h>
+
+static inline void dec_kn02xa_be_ack(void)
+{
+ volatile u32 *mer = (void *)CKSEG1ADDR(KN02XA_MER);
+ volatile u32 *mem_intr = (void *)CKSEG1ADDR(KN02XA_MEM_INTR);
+
+ *mer = KN02CA_MER_INTR; /* Clear errors; keep the ARC IRQ. */
+ *mem_intr = 0; /* Any write clears the bus IRQ. */
+ iob();
+}
+
+static int dec_kn02xa_be_backend(struct pt_regs *regs, int is_fixup,
+ int invoker)
+{
+ volatile u32 *kn02xa_mer = (void *)CKSEG1ADDR(KN02XA_MER);
+ volatile u32 *kn02xa_ear = (void *)CKSEG1ADDR(KN02XA_EAR);
+
+ static const char excstr[] = "exception";
+ static const char intstr[] = "interrupt";
+ static const char cpustr[] = "CPU";
+ static const char mreadstr[] = "memory read";
+ static const char readstr[] = "read";
+ static const char writestr[] = "write";
+ static const char timestr[] = "timeout";
+ static const char paritystr[] = "parity error";
+ static const char lanestat[][4] = { " OK", "BAD" };
+
+ const char *kind, *agent, *cycle, *event;
+ unsigned long address;
+
+ u32 mer = *kn02xa_mer;
+ u32 ear = *kn02xa_ear;
+ int action = MIPS_BE_FATAL;
+
+ /* Ack ASAP, so that any subsequent errors get caught. */
+ dec_kn02xa_be_ack();
+
+ kind = invoker ? intstr : excstr;
+
+ /* No DMA errors? */
+ agent = cpustr;
+
+ address = ear & KN02XA_EAR_ADDRESS;
+
+ /* Low 256MB is decoded as memory, high -- as TC. */
+ if (address < 0x10000000) {
+ cycle = mreadstr;
+ event = paritystr;
+ } else {
+ cycle = invoker ? writestr : readstr;
+ event = timestr;
+ }
+
+ if (is_fixup)
+ action = MIPS_BE_FIXUP;
+
+ if (action != MIPS_BE_FIXUP)
+ printk(KERN_ALERT "Bus error %s: %s %s %s at %#010lx\n",
+ kind, agent, cycle, event, address);
+
+ if (action != MIPS_BE_FIXUP && address < 0x10000000)
+ printk(KERN_ALERT " Byte lane status %#3x -- "
+ "#3: %s, #2: %s, #1: %s, #0: %s\n",
+ (mer & KN02XA_MER_BYTERR) >> 8,
+ lanestat[(mer & KN02XA_MER_BYTERR_3) != 0],
+ lanestat[(mer & KN02XA_MER_BYTERR_2) != 0],
+ lanestat[(mer & KN02XA_MER_BYTERR_1) != 0],
+ lanestat[(mer & KN02XA_MER_BYTERR_0) != 0]);
+
+ return action;
+}
+
+int dec_kn02xa_be_handler(struct pt_regs *regs, int is_fixup)
+{
+ return dec_kn02xa_be_backend(regs, is_fixup, 0);
+}
+
+irqreturn_t dec_kn02xa_be_interrupt(int irq, void *dev_id)
+{
+ struct pt_regs *regs = get_irq_regs();
+ int action = dec_kn02xa_be_backend(regs, 0, 1);
+
+ if (action == MIPS_BE_DISCARD)
+ return IRQ_HANDLED;
+
+ /*
+ * FIXME: Find the affected processes and kill them, otherwise
+ * we must die.
+ *
+ * The interrupt is asynchronously delivered thus EPC and RA
+ * may be irrelevant, but are printed for a reference.
+ */
+ printk(KERN_ALERT "Fatal bus interrupt, epc == %08lx, ra == %08lx\n",
+ regs->cp0_epc, regs->regs[31]);
+ die("Unrecoverable bus error", regs);
+}
+
+
+void __init dec_kn02xa_be_init(void)
+{
+ volatile u32 *mbcs = (void *)CKSEG1ADDR(KN4K_SLOT_BASE + KN4K_MB_CSR);
+
+ /* For KN04 we need to make sure EE (?) is enabled in the MB. */
+ if (current_cpu_type() == CPU_R4000SC)
+ *mbcs |= KN4K_MB_CSR_EE;
+ fast_iob();
+
+ /* Clear any leftover errors from the firmware. */
+ dec_kn02xa_be_ack();
+}
diff --git a/arch/mips/dec/platform.c b/arch/mips/dec/platform.c
new file mode 100644
index 000000000..c4fcb8c58
--- /dev/null
+++ b/arch/mips/dec/platform.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DEC platform devices.
+ *
+ * Copyright (c) 2014 Maciej W. Rozycki
+ */
+
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/mc146818rtc.h>
+#include <linux/platform_device.h>
+
+static struct resource dec_rtc_resources[] = {
+ {
+ .name = "rtc",
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct cmos_rtc_board_info dec_rtc_info = {
+ .flags = CMOS_RTC_FLAGS_NOFREQ,
+ .address_space = 64,
+};
+
+static struct platform_device dec_rtc_device = {
+ .name = "rtc_cmos",
+ .id = PLATFORM_DEVID_NONE,
+ .dev.platform_data = &dec_rtc_info,
+ .resource = dec_rtc_resources,
+ .num_resources = ARRAY_SIZE(dec_rtc_resources),
+};
+
+static int __init dec_add_devices(void)
+{
+ dec_rtc_resources[0].start = RTC_PORT(0);
+ dec_rtc_resources[0].end = RTC_PORT(0) + dec_kn_slot_size - 1;
+ return platform_device_register(&dec_rtc_device);
+}
+
+device_initcall(dec_add_devices);
diff --git a/arch/mips/dec/prom/Makefile b/arch/mips/dec/prom/Makefile
new file mode 100644
index 000000000..2bad87551
--- /dev/null
+++ b/arch/mips/dec/prom/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the DECstation prom monitor library routines
+# under Linux.
+#
+
+lib-y += init.o memory.o cmdline.o identify.o console.o
+
+lib-$(CONFIG_CPU_R3000) += locore.o
diff --git a/arch/mips/dec/prom/cmdline.c b/arch/mips/dec/prom/cmdline.c
new file mode 100644
index 000000000..3ed63280a
--- /dev/null
+++ b/arch/mips/dec/prom/cmdline.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * cmdline.c: read the command line passed to us by the PROM.
+ *
+ * Copyright (C) 1998 Harald Koerfgen
+ * Copyright (C) 2002, 2004 Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/bootinfo.h>
+#include <asm/dec/prom.h>
+
+#undef PROM_DEBUG
+
+void __init prom_init_cmdline(s32 argc, s32 *argv, u32 magic)
+{
+ char *arg;
+ int start_arg, i;
+
+ /*
+ * collect args and prepare cmd_line
+ */
+ if (!prom_is_rex(magic))
+ start_arg = 1;
+ else
+ start_arg = 2;
+ for (i = start_arg; i < argc; i++) {
+ arg = (void *)(long)(argv[i]);
+ strcat(arcs_cmdline, arg);
+ if (i < (argc - 1))
+ strcat(arcs_cmdline, " ");
+ }
+
+#ifdef PROM_DEBUG
+ printk("arcs_cmdline: %s\n", &(arcs_cmdline[0]));
+#endif
+}
diff --git a/arch/mips/dec/prom/console.c b/arch/mips/dec/prom/console.c
new file mode 100644
index 000000000..31a8441d8
--- /dev/null
+++ b/arch/mips/dec/prom/console.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DECstation PROM-based early console support.
+ *
+ * Copyright (C) 2004, 2007 Maciej W. Rozycki
+ */
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <asm/dec/prom.h>
+
+static void __init prom_console_write(struct console *con, const char *s,
+ unsigned int c)
+{
+ char buf[81];
+ unsigned int chunk = sizeof(buf) - 1;
+
+ while (c > 0) {
+ if (chunk > c)
+ chunk = c;
+ memcpy(buf, s, chunk);
+ buf[chunk] = '\0';
+ prom_printf("%s", buf);
+ s += chunk;
+ c -= chunk;
+ }
+}
+
+static struct console promcons __initdata = {
+ .name = "prom",
+ .write = prom_console_write,
+ .flags = CON_BOOT | CON_PRINTBUFFER,
+ .index = -1,
+};
+
+void __init register_prom_console(void)
+{
+ register_console(&promcons);
+}
diff --git a/arch/mips/dec/prom/dectypes.h b/arch/mips/dec/prom/dectypes.h
new file mode 100644
index 000000000..9fcbcc7cd
--- /dev/null
+++ b/arch/mips/dec/prom/dectypes.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DECTYPES
+#define DECTYPES
+
+#define DS2100_3100 1 /* DS2100/3100 Pmax */
+#define DS5000_200 2 /* DS5000/200 3max */
+#define DS5000_1XX 3 /* DS5000/1xx kmin */
+#define DS5000_2X0 4 /* DS5000/2x0 3max+ */
+#define DS5800 5 /* DS5800 Isis */
+#define DS5400 6 /* DS5400 MIPSfair */
+#define DS5000_XX 7 /* DS5000/xx maxine */
+#define DS5500 11 /* DS5500 MIPSfair-2 */
+#define DS5100 12 /* DS5100 MIPSmate */
+
+#endif
diff --git a/arch/mips/dec/prom/identify.c b/arch/mips/dec/prom/identify.c
new file mode 100644
index 000000000..80cd14cd1
--- /dev/null
+++ b/arch/mips/dec/prom/identify.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * identify.c: machine identification code.
+ *
+ * Copyright (C) 1998 Harald Koerfgen and Paul M. Antoine
+ * Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mc146818rtc.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/bootinfo.h>
+
+#include <asm/dec/ioasic.h>
+#include <asm/dec/ioasic_addrs.h>
+#include <asm/dec/kn01.h>
+#include <asm/dec/kn02.h>
+#include <asm/dec/kn02ba.h>
+#include <asm/dec/kn02ca.h>
+#include <asm/dec/kn03.h>
+#include <asm/dec/kn230.h>
+#include <asm/dec/prom.h>
+#include <asm/dec/system.h>
+
+#include "dectypes.h"
+
+static const char *dec_system_strings[] = {
+ [MACH_DSUNKNOWN] "unknown DECstation",
+ [MACH_DS23100] "DECstation 2100/3100",
+ [MACH_DS5100] "DECsystem 5100",
+ [MACH_DS5000_200] "DECstation 5000/200",
+ [MACH_DS5000_1XX] "DECstation 5000/1xx",
+ [MACH_DS5000_XX] "Personal DECstation 5000/xx",
+ [MACH_DS5000_2X0] "DECstation 5000/2x0",
+ [MACH_DS5400] "DECsystem 5400",
+ [MACH_DS5500] "DECsystem 5500",
+ [MACH_DS5800] "DECsystem 5800",
+ [MACH_DS5900] "DECsystem 5900",
+};
+
+const char *get_system_type(void)
+{
+#define STR_BUF_LEN 64
+ static char system[STR_BUF_LEN];
+ static int called = 0;
+
+ if (called == 0) {
+ called = 1;
+ snprintf(system, STR_BUF_LEN, "Digital %s",
+ dec_system_strings[mips_machtype]);
+ }
+
+ return system;
+}
+
+
+/*
+ * Setup essential system-specific memory addresses. We need them
+ * early. Semantically the functions belong to prom/init.c, but they
+ * are compact enough we want them inlined. --macro
+ */
+volatile u8 *dec_rtc_base;
+
+EXPORT_SYMBOL(dec_rtc_base);
+
+static inline void prom_init_kn01(void)
+{
+ dec_kn_slot_base = KN01_SLOT_BASE;
+ dec_kn_slot_size = KN01_SLOT_SIZE;
+
+ dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + KN01_RTC);
+}
+
+static inline void prom_init_kn230(void)
+{
+ dec_kn_slot_base = KN01_SLOT_BASE;
+ dec_kn_slot_size = KN01_SLOT_SIZE;
+
+ dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + KN01_RTC);
+}
+
+static inline void prom_init_kn02(void)
+{
+ dec_kn_slot_base = KN02_SLOT_BASE;
+ dec_kn_slot_size = KN02_SLOT_SIZE;
+ dec_tc_bus = 1;
+
+ dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + KN02_RTC);
+}
+
+static inline void prom_init_kn02xa(void)
+{
+ dec_kn_slot_base = KN02XA_SLOT_BASE;
+ dec_kn_slot_size = IOASIC_SLOT_SIZE;
+ dec_tc_bus = 1;
+
+ ioasic_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_IOCTL);
+ dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_TOY);
+}
+
+static inline void prom_init_kn03(void)
+{
+ dec_kn_slot_base = KN03_SLOT_BASE;
+ dec_kn_slot_size = IOASIC_SLOT_SIZE;
+ dec_tc_bus = 1;
+
+ ioasic_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_IOCTL);
+ dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_TOY);
+}
+
+
+void __init prom_identify_arch(u32 magic)
+{
+ unsigned char dec_cpunum, dec_firmrev, dec_etc, dec_systype;
+ u32 dec_sysid;
+
+ if (!prom_is_rex(magic)) {
+ dec_sysid = simple_strtoul(prom_getenv("systype"),
+ (char **)0, 0);
+ } else {
+ dec_sysid = rex_getsysid();
+ if (dec_sysid == 0) {
+ printk("Zero sysid returned from PROM! "
+ "Assuming a PMAX-like machine.\n");
+ dec_sysid = 1;
+ }
+ }
+
+ dec_cpunum = (dec_sysid & 0xff000000) >> 24;
+ dec_systype = (dec_sysid & 0xff0000) >> 16;
+ dec_firmrev = (dec_sysid & 0xff00) >> 8;
+ dec_etc = dec_sysid & 0xff;
+
+ /*
+ * FIXME: This may not be an exhaustive list of DECStations/Servers!
+ * Put all model-specific initialisation calls here.
+ */
+ switch (dec_systype) {
+ case DS2100_3100:
+ mips_machtype = MACH_DS23100;
+ prom_init_kn01();
+ break;
+ case DS5100: /* DS5100 MIPSMATE */
+ mips_machtype = MACH_DS5100;
+ prom_init_kn230();
+ break;
+ case DS5000_200: /* DS5000 3max */
+ mips_machtype = MACH_DS5000_200;
+ prom_init_kn02();
+ break;
+ case DS5000_1XX: /* DS5000/100 3min */
+ mips_machtype = MACH_DS5000_1XX;
+ prom_init_kn02xa();
+ break;
+ case DS5000_2X0: /* DS5000/240 3max+ or DS5900 bigmax */
+ mips_machtype = MACH_DS5000_2X0;
+ prom_init_kn03();
+ if (!(ioasic_read(IO_REG_SIR) & KN03_IO_INR_3MAXP))
+ mips_machtype = MACH_DS5900;
+ break;
+ case DS5000_XX: /* Personal DS5000/xx maxine */
+ mips_machtype = MACH_DS5000_XX;
+ prom_init_kn02xa();
+ break;
+ case DS5800: /* DS5800 Isis */
+ mips_machtype = MACH_DS5800;
+ break;
+ case DS5400: /* DS5400 MIPSfair */
+ mips_machtype = MACH_DS5400;
+ break;
+ case DS5500: /* DS5500 MIPSfair-2 */
+ mips_machtype = MACH_DS5500;
+ break;
+ default:
+ mips_machtype = MACH_DSUNKNOWN;
+ break;
+ }
+
+ if (mips_machtype == MACH_DSUNKNOWN)
+ printk("This is an %s, id is %x\n",
+ dec_system_strings[mips_machtype], dec_systype);
+ else
+ printk("This is a %s\n", dec_system_strings[mips_machtype]);
+}
diff --git a/arch/mips/dec/prom/init.c b/arch/mips/dec/prom/init.c
new file mode 100644
index 000000000..cc988bbd2
--- /dev/null
+++ b/arch/mips/dec/prom/init.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * init.c: PROM library initialisation code.
+ *
+ * Copyright (C) 1998 Harald Koerfgen
+ * Copyright (C) 2002, 2004 Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/smp.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/cpu-type.h>
+#include <asm/processor.h>
+
+#include <asm/dec/prom.h>
+
+
+int (*__rex_bootinit)(void);
+int (*__rex_bootread)(void);
+int (*__rex_getbitmap)(memmap *);
+unsigned long *(*__rex_slot_address)(int);
+void *(*__rex_gettcinfo)(void);
+int (*__rex_getsysid)(void);
+void (*__rex_clear_cache)(void);
+
+int (*__prom_getchar)(void);
+char *(*__prom_getenv)(char *);
+int (*__prom_printf)(char *, ...);
+
+int (*__pmax_open)(char*, int);
+int (*__pmax_lseek)(int, long, int);
+int (*__pmax_read)(int, void *, int);
+int (*__pmax_close)(int);
+
+
+/*
+ * Detect which PROM the DECSTATION has, and set the callback vectors
+ * appropriately.
+ */
+void __init which_prom(s32 magic, s32 *prom_vec)
+{
+ /*
+ * No sign of the REX PROM's magic number means we assume a non-REX
+ * machine (i.e. we're on a DS2100/3100, DS5100 or DS5000/2xx)
+ */
+ if (prom_is_rex(magic)) {
+ /*
+ * Set up prom abstraction structure with REX entry points.
+ */
+ __rex_bootinit =
+ (void *)(long)*(prom_vec + REX_PROM_BOOTINIT);
+ __rex_bootread =
+ (void *)(long)*(prom_vec + REX_PROM_BOOTREAD);
+ __rex_getbitmap =
+ (void *)(long)*(prom_vec + REX_PROM_GETBITMAP);
+ __prom_getchar =
+ (void *)(long)*(prom_vec + REX_PROM_GETCHAR);
+ __prom_getenv =
+ (void *)(long)*(prom_vec + REX_PROM_GETENV);
+ __rex_getsysid =
+ (void *)(long)*(prom_vec + REX_PROM_GETSYSID);
+ __rex_gettcinfo =
+ (void *)(long)*(prom_vec + REX_PROM_GETTCINFO);
+ __prom_printf =
+ (void *)(long)*(prom_vec + REX_PROM_PRINTF);
+ __rex_slot_address =
+ (void *)(long)*(prom_vec + REX_PROM_SLOTADDR);
+ __rex_clear_cache =
+ (void *)(long)*(prom_vec + REX_PROM_CLEARCACHE);
+ } else {
+ /*
+ * Set up prom abstraction structure with non-REX entry points.
+ */
+ __prom_getchar = (void *)PMAX_PROM_GETCHAR;
+ __prom_getenv = (void *)PMAX_PROM_GETENV;
+ __prom_printf = (void *)PMAX_PROM_PRINTF;
+ __pmax_open = (void *)PMAX_PROM_OPEN;
+ __pmax_lseek = (void *)PMAX_PROM_LSEEK;
+ __pmax_read = (void *)PMAX_PROM_READ;
+ __pmax_close = (void *)PMAX_PROM_CLOSE;
+ }
+}
+
+void __init prom_init(void)
+{
+ extern void dec_machine_halt(void);
+ static const char cpu_msg[] __initconst =
+ "Sorry, this kernel is compiled for a wrong CPU type!\n";
+ s32 argc = fw_arg0;
+ s32 *argv = (void *)fw_arg1;
+ u32 magic = fw_arg2;
+ s32 *prom_vec = (void *)fw_arg3;
+
+ /*
+ * Determine which PROM we have
+ * (and therefore which machine we're on!)
+ */
+ which_prom(magic, prom_vec);
+
+ if (prom_is_rex(magic))
+ rex_clear_cache();
+
+ /* Register the early console. */
+ register_prom_console();
+
+ /* Were we compiled with the right CPU option? */
+#if defined(CONFIG_CPU_R3000)
+ if ((current_cpu_type() == CPU_R4000SC) ||
+ (current_cpu_type() == CPU_R4400SC)) {
+ static const char r4k_msg[] __initconst =
+ "Please recompile with \"CONFIG_CPU_R4x00 = y\".\n";
+ printk(cpu_msg);
+ printk(r4k_msg);
+ dec_machine_halt();
+ }
+#endif
+
+#if defined(CONFIG_CPU_R4X00)
+ if ((current_cpu_type() == CPU_R3000) ||
+ (current_cpu_type() == CPU_R3000A)) {
+ static const char r3k_msg[] __initconst =
+ "Please recompile with \"CONFIG_CPU_R3000 = y\".\n";
+ printk(cpu_msg);
+ printk(r3k_msg);
+ dec_machine_halt();
+ }
+#endif
+
+ prom_meminit(magic);
+ prom_identify_arch(magic);
+ prom_init_cmdline(argc, argv, magic);
+}
diff --git a/arch/mips/dec/prom/locore.S b/arch/mips/dec/prom/locore.S
new file mode 100644
index 000000000..0eb8fab62
--- /dev/null
+++ b/arch/mips/dec/prom/locore.S
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * locore.S
+ */
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+
+ .text
+
+/*
+ * Simple general exception handling routine. This one is used for the
+ * Memory sizing routine for pmax machines. HK
+ */
+
+NESTED(genexcept_early, 0, sp)
+ .set noat
+ .set noreorder
+
+ mfc0 k0, CP0_STATUS
+ la k1, mem_err
+
+ sw k0, 0(k1)
+
+ mfc0 k0, CP0_EPC
+ nop
+ addiu k0, 4 # skip the causing instruction
+ jr k0
+ rfe
+END(genexcept_early)
diff --git a/arch/mips/dec/prom/memory.c b/arch/mips/dec/prom/memory.c
new file mode 100644
index 000000000..44490c30d
--- /dev/null
+++ b/arch/mips/dec/prom/memory.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * memory.c: memory initialisation code.
+ *
+ * Copyright (C) 1998 Harald Koerfgen, Frieder Streffer and Paul M. Antoine
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/types.h>
+
+#include <asm/addrspace.h>
+#include <asm/dec/machtype.h>
+#include <asm/dec/prom.h>
+#include <asm/page.h>
+#include <asm/sections.h>
+
+
+volatile unsigned long mem_err; /* So we know an error occurred */
+
+/*
+ * Probe memory in 4MB chunks, waiting for an error to tell us we've fallen
+ * off the end of real memory. Only suitable for the 2100/3100's (PMAX).
+ */
+
+#define CHUNK_SIZE 0x400000
+
+static __init void pmax_setup_memory_region(void)
+{
+ volatile unsigned char *memory_page, dummy;
+ char old_handler[0x80];
+ extern char genexcept_early;
+
+ /* Install exception handler */
+ memcpy(&old_handler, (void *)(CKSEG0 + 0x80), 0x80);
+ memcpy((void *)(CKSEG0 + 0x80), &genexcept_early, 0x80);
+
+ /* read unmapped and uncached (KSEG1)
+ * DECstations have at least 4MB RAM
+ * Assume less than 480MB of RAM, as this is max for 5000/2xx
+ * FIXME this should be replaced by the first free page!
+ */
+ for (memory_page = (unsigned char *)CKSEG1 + CHUNK_SIZE;
+ mem_err == 0 && memory_page < (unsigned char *)CKSEG1 + 0x1e00000;
+ memory_page += CHUNK_SIZE) {
+ dummy = *memory_page;
+ }
+ memcpy((void *)(CKSEG0 + 0x80), &old_handler, 0x80);
+
+ memblock_add(0, (unsigned long)memory_page - CKSEG1 - CHUNK_SIZE);
+}
+
+/*
+ * Use the REX prom calls to get hold of the memory bitmap, and thence
+ * determine memory size.
+ */
+static __init void rex_setup_memory_region(void)
+{
+ int i, bitmap_size;
+ unsigned long mem_start = 0, mem_size = 0;
+ memmap *bm;
+
+ /* some free 64k */
+ bm = (memmap *)CKSEG0ADDR(0x28000);
+
+ bitmap_size = rex_getbitmap(bm);
+
+ for (i = 0; i < bitmap_size; i++) {
+ /* FIXME: very simplistically only add full sets of pages */
+ if (bm->bitmap[i] == 0xff)
+ mem_size += (8 * bm->pagesize);
+ else if (!mem_size)
+ mem_start += (8 * bm->pagesize);
+ else {
+ memblock_add(mem_start, mem_size);
+ mem_start += mem_size + (8 * bm->pagesize);
+ mem_size = 0;
+ }
+ }
+ if (mem_size)
+ memblock_add(mem_start, mem_size);
+}
+
+void __init prom_meminit(u32 magic)
+{
+ if (!prom_is_rex(magic))
+ pmax_setup_memory_region();
+ else
+ rex_setup_memory_region();
+}
+
+void __init prom_free_prom_memory(void)
+{
+ unsigned long end;
+
+ /*
+ * Free everything below the kernel itself but leave
+ * the first page reserved for the exception handlers.
+ */
+
+#if IS_ENABLED(CONFIG_DECLANCE)
+ /*
+ * Leave 128 KB reserved for Lance memory for
+ * IOASIC DECstations.
+ *
+ * XXX: save this address for use in dec_lance.c?
+ */
+ if (IOASIC)
+ end = __pa(&_text) - 0x00020000;
+ else
+#endif
+ end = __pa(&_text);
+
+ free_init_pages("unused PROM memory", PAGE_SIZE, end);
+}
diff --git a/arch/mips/dec/reset.c b/arch/mips/dec/reset.c
new file mode 100644
index 000000000..3df01f1da
--- /dev/null
+++ b/arch/mips/dec/reset.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Reset a DECstation machine.
+ *
+ * Copyright (C) 199x the Anonymous
+ * Copyright (C) 2001, 2002, 2003 Maciej W. Rozycki
+ */
+#include <linux/interrupt.h>
+#include <linux/linkage.h>
+
+#include <asm/addrspace.h>
+
+typedef void __noreturn (* noret_func_t)(void);
+
+static inline void __noreturn back_to_prom(void)
+{
+ noret_func_t func = (void *)CKSEG1ADDR(0x1fc00000);
+
+ func();
+}
+
+void __noreturn dec_machine_restart(char *command)
+{
+ back_to_prom();
+}
+
+void __noreturn dec_machine_halt(void)
+{
+ back_to_prom();
+}
+
+void __noreturn dec_machine_power_off(void)
+{
+ /* DECstations don't have a software power switch */
+ back_to_prom();
+}
+
+irqreturn_t dec_intr_halt(int irq, void *dev_id)
+{
+ dec_machine_halt();
+}
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
new file mode 100644
index 000000000..99b9b2975
--- /dev/null
+++ b/arch/mips/dec/setup.c
@@ -0,0 +1,784 @@
+/*
+ * System-specific setup, especially interrupts.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Harald Koerfgen
+ * Copyright (C) 2000, 2001, 2002, 2003, 2005, 2020 Maciej W. Rozycki
+ */
+#include <linux/console.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqnr.h>
+#include <linux/memblock.h>
+#include <linux/param.h>
+#include <linux/percpu-defs.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/pm.h>
+
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
+#include <asm/irq.h>
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/page.h>
+#include <asm/reboot.h>
+#include <asm/sections.h>
+#include <asm/time.h>
+#include <asm/traps.h>
+#include <asm/wbflush.h>
+
+#include <asm/dec/interrupts.h>
+#include <asm/dec/ioasic.h>
+#include <asm/dec/ioasic_addrs.h>
+#include <asm/dec/ioasic_ints.h>
+#include <asm/dec/kn01.h>
+#include <asm/dec/kn02.h>
+#include <asm/dec/kn02ba.h>
+#include <asm/dec/kn02ca.h>
+#include <asm/dec/kn03.h>
+#include <asm/dec/kn230.h>
+#include <asm/dec/system.h>
+
+
+extern void dec_machine_restart(char *command);
+extern void dec_machine_halt(void);
+extern void dec_machine_power_off(void);
+extern irqreturn_t dec_intr_halt(int irq, void *dev_id);
+
+unsigned long dec_kn_slot_base, dec_kn_slot_size;
+
+EXPORT_SYMBOL(dec_kn_slot_base);
+EXPORT_SYMBOL(dec_kn_slot_size);
+
+int dec_tc_bus;
+
+DEFINE_SPINLOCK(ioasic_ssr_lock);
+EXPORT_SYMBOL(ioasic_ssr_lock);
+
+volatile u32 *ioasic_base;
+
+EXPORT_SYMBOL(ioasic_base);
+
+/*
+ * IRQ routing and priority tables. Priorites are set as follows:
+ *
+ * KN01 KN230 KN02 KN02-BA KN02-CA KN03
+ *
+ * MEMORY CPU CPU CPU ASIC CPU CPU
+ * RTC CPU CPU CPU ASIC CPU CPU
+ * DMA - - - ASIC ASIC ASIC
+ * SERIAL0 CPU CPU CSR ASIC ASIC ASIC
+ * SERIAL1 - - - ASIC - ASIC
+ * SCSI CPU CPU CSR ASIC ASIC ASIC
+ * ETHERNET CPU * CSR ASIC ASIC ASIC
+ * other - - - ASIC - -
+ * TC2 - - CSR CPU ASIC ASIC
+ * TC1 - - CSR CPU ASIC ASIC
+ * TC0 - - CSR CPU ASIC ASIC
+ * other - CPU - CPU ASIC ASIC
+ * other - - - - CPU CPU
+ *
+ * * -- shared with SCSI
+ */
+
+int dec_interrupt[DEC_NR_INTS] = {
+ [0 ... DEC_NR_INTS - 1] = -1
+};
+
+EXPORT_SYMBOL(dec_interrupt);
+
+int_ptr cpu_mask_nr_tbl[DEC_MAX_CPU_INTS][2] = {
+ { { .i = ~0 }, { .p = dec_intr_unimplemented } },
+};
+int_ptr asic_mask_nr_tbl[DEC_MAX_ASIC_INTS][2] = {
+ { { .i = ~0 }, { .p = asic_intr_unimplemented } },
+};
+int cpu_fpu_mask = DEC_CPU_IRQ_MASK(DEC_CPU_INR_FPU);
+int *fpu_kstat_irq;
+
+static irq_handler_t busirq_handler;
+static unsigned int busirq_flags = IRQF_NO_THREAD;
+
+/*
+ * Bus error (DBE/IBE exceptions and bus interrupts) handling setup.
+ */
+static void __init dec_be_init(void)
+{
+ switch (mips_machtype) {
+ case MACH_DS23100: /* DS2100/DS3100 Pmin/Pmax */
+ board_be_handler = dec_kn01_be_handler;
+ busirq_handler = dec_kn01_be_interrupt;
+ busirq_flags |= IRQF_SHARED;
+ dec_kn01_be_init();
+ break;
+ case MACH_DS5000_1XX: /* DS5000/1xx 3min */
+ case MACH_DS5000_XX: /* DS5000/xx Maxine */
+ board_be_handler = dec_kn02xa_be_handler;
+ busirq_handler = dec_kn02xa_be_interrupt;
+ dec_kn02xa_be_init();
+ break;
+ case MACH_DS5000_200: /* DS5000/200 3max */
+ case MACH_DS5000_2X0: /* DS5000/240 3max+ */
+ case MACH_DS5900: /* DS5900 bigmax */
+ board_be_handler = dec_ecc_be_handler;
+ busirq_handler = dec_ecc_be_interrupt;
+ dec_ecc_be_init();
+ break;
+ }
+}
+
+void __init plat_mem_setup(void)
+{
+ board_be_init = dec_be_init;
+
+ wbflush_setup();
+
+ _machine_restart = dec_machine_restart;
+ _machine_halt = dec_machine_halt;
+ pm_power_off = dec_machine_power_off;
+
+ ioport_resource.start = ~0UL;
+ ioport_resource.end = 0UL;
+
+ /* Stay away from the firmware working memory area for now. */
+ memblock_reserve(PHYS_OFFSET, __pa_symbol(&_text) - PHYS_OFFSET);
+}
+
+/*
+ * Machine-specific initialisation for KN01, aka DS2100 (aka Pmin)
+ * or DS3100 (aka Pmax).
+ */
+static int kn01_interrupt[DEC_NR_INTS] __initdata = {
+ [DEC_IRQ_CASCADE] = -1,
+ [DEC_IRQ_AB_RECV] = -1,
+ [DEC_IRQ_AB_XMIT] = -1,
+ [DEC_IRQ_DZ11] = DEC_CPU_IRQ_NR(KN01_CPU_INR_DZ11),
+ [DEC_IRQ_ASC] = -1,
+ [DEC_IRQ_FLOPPY] = -1,
+ [DEC_IRQ_FPU] = DEC_CPU_IRQ_NR(DEC_CPU_INR_FPU),
+ [DEC_IRQ_HALT] = -1,
+ [DEC_IRQ_ISDN] = -1,
+ [DEC_IRQ_LANCE] = DEC_CPU_IRQ_NR(KN01_CPU_INR_LANCE),
+ [DEC_IRQ_BUS] = DEC_CPU_IRQ_NR(KN01_CPU_INR_BUS),
+ [DEC_IRQ_PSU] = -1,
+ [DEC_IRQ_RTC] = DEC_CPU_IRQ_NR(KN01_CPU_INR_RTC),
+ [DEC_IRQ_SCC0] = -1,
+ [DEC_IRQ_SCC1] = -1,
+ [DEC_IRQ_SII] = DEC_CPU_IRQ_NR(KN01_CPU_INR_SII),
+ [DEC_IRQ_TC0] = -1,
+ [DEC_IRQ_TC1] = -1,
+ [DEC_IRQ_TC2] = -1,
+ [DEC_IRQ_TIMER] = -1,
+ [DEC_IRQ_VIDEO] = DEC_CPU_IRQ_NR(KN01_CPU_INR_VIDEO),
+ [DEC_IRQ_ASC_MERR] = -1,
+ [DEC_IRQ_ASC_ERR] = -1,
+ [DEC_IRQ_ASC_DMA] = -1,
+ [DEC_IRQ_FLOPPY_ERR] = -1,
+ [DEC_IRQ_ISDN_ERR] = -1,
+ [DEC_IRQ_ISDN_RXDMA] = -1,
+ [DEC_IRQ_ISDN_TXDMA] = -1,
+ [DEC_IRQ_LANCE_MERR] = -1,
+ [DEC_IRQ_SCC0A_RXERR] = -1,
+ [DEC_IRQ_SCC0A_RXDMA] = -1,
+ [DEC_IRQ_SCC0A_TXERR] = -1,
+ [DEC_IRQ_SCC0A_TXDMA] = -1,
+ [DEC_IRQ_AB_RXERR] = -1,
+ [DEC_IRQ_AB_RXDMA] = -1,
+ [DEC_IRQ_AB_TXERR] = -1,
+ [DEC_IRQ_AB_TXDMA] = -1,
+ [DEC_IRQ_SCC1A_RXERR] = -1,
+ [DEC_IRQ_SCC1A_RXDMA] = -1,
+ [DEC_IRQ_SCC1A_TXERR] = -1,
+ [DEC_IRQ_SCC1A_TXDMA] = -1,
+};
+
+static int_ptr kn01_cpu_mask_nr_tbl[][2] __initdata = {
+ { { .i = DEC_CPU_IRQ_MASK(KN01_CPU_INR_BUS) },
+ { .i = DEC_CPU_IRQ_NR(KN01_CPU_INR_BUS) } },
+ { { .i = DEC_CPU_IRQ_MASK(KN01_CPU_INR_RTC) },
+ { .i = DEC_CPU_IRQ_NR(KN01_CPU_INR_RTC) } },
+ { { .i = DEC_CPU_IRQ_MASK(KN01_CPU_INR_DZ11) },
+ { .i = DEC_CPU_IRQ_NR(KN01_CPU_INR_DZ11) } },
+ { { .i = DEC_CPU_IRQ_MASK(KN01_CPU_INR_SII) },
+ { .i = DEC_CPU_IRQ_NR(KN01_CPU_INR_SII) } },
+ { { .i = DEC_CPU_IRQ_MASK(KN01_CPU_INR_LANCE) },
+ { .i = DEC_CPU_IRQ_NR(KN01_CPU_INR_LANCE) } },
+ { { .i = DEC_CPU_IRQ_ALL },
+ { .p = cpu_all_int } },
+};
+
+static void __init dec_init_kn01(void)
+{
+ /* IRQ routing. */
+ memcpy(&dec_interrupt, &kn01_interrupt,
+ sizeof(kn01_interrupt));
+
+ /* CPU IRQ priorities. */
+ memcpy(&cpu_mask_nr_tbl, &kn01_cpu_mask_nr_tbl,
+ sizeof(kn01_cpu_mask_nr_tbl));
+
+ mips_cpu_irq_init();
+
+} /* dec_init_kn01 */
+
+
+/*
+ * Machine-specific initialisation for KN230, aka DS5100, aka MIPSmate.
+ */
+static int kn230_interrupt[DEC_NR_INTS] __initdata = {
+ [DEC_IRQ_CASCADE] = -1,
+ [DEC_IRQ_AB_RECV] = -1,
+ [DEC_IRQ_AB_XMIT] = -1,
+ [DEC_IRQ_DZ11] = DEC_CPU_IRQ_NR(KN230_CPU_INR_DZ11),
+ [DEC_IRQ_ASC] = -1,
+ [DEC_IRQ_FLOPPY] = -1,
+ [DEC_IRQ_FPU] = DEC_CPU_IRQ_NR(DEC_CPU_INR_FPU),
+ [DEC_IRQ_HALT] = DEC_CPU_IRQ_NR(KN230_CPU_INR_HALT),
+ [DEC_IRQ_ISDN] = -1,
+ [DEC_IRQ_LANCE] = DEC_CPU_IRQ_NR(KN230_CPU_INR_LANCE),
+ [DEC_IRQ_BUS] = DEC_CPU_IRQ_NR(KN230_CPU_INR_BUS),
+ [DEC_IRQ_PSU] = -1,
+ [DEC_IRQ_RTC] = DEC_CPU_IRQ_NR(KN230_CPU_INR_RTC),
+ [DEC_IRQ_SCC0] = -1,
+ [DEC_IRQ_SCC1] = -1,
+ [DEC_IRQ_SII] = DEC_CPU_IRQ_NR(KN230_CPU_INR_SII),
+ [DEC_IRQ_TC0] = -1,
+ [DEC_IRQ_TC1] = -1,
+ [DEC_IRQ_TC2] = -1,
+ [DEC_IRQ_TIMER] = -1,
+ [DEC_IRQ_VIDEO] = -1,
+ [DEC_IRQ_ASC_MERR] = -1,
+ [DEC_IRQ_ASC_ERR] = -1,
+ [DEC_IRQ_ASC_DMA] = -1,
+ [DEC_IRQ_FLOPPY_ERR] = -1,
+ [DEC_IRQ_ISDN_ERR] = -1,
+ [DEC_IRQ_ISDN_RXDMA] = -1,
+ [DEC_IRQ_ISDN_TXDMA] = -1,
+ [DEC_IRQ_LANCE_MERR] = -1,
+ [DEC_IRQ_SCC0A_RXERR] = -1,
+ [DEC_IRQ_SCC0A_RXDMA] = -1,
+ [DEC_IRQ_SCC0A_TXERR] = -1,
+ [DEC_IRQ_SCC0A_TXDMA] = -1,
+ [DEC_IRQ_AB_RXERR] = -1,
+ [DEC_IRQ_AB_RXDMA] = -1,
+ [DEC_IRQ_AB_TXERR] = -1,
+ [DEC_IRQ_AB_TXDMA] = -1,
+ [DEC_IRQ_SCC1A_RXERR] = -1,
+ [DEC_IRQ_SCC1A_RXDMA] = -1,
+ [DEC_IRQ_SCC1A_TXERR] = -1,
+ [DEC_IRQ_SCC1A_TXDMA] = -1,
+};
+
+static int_ptr kn230_cpu_mask_nr_tbl[][2] __initdata = {
+ { { .i = DEC_CPU_IRQ_MASK(KN230_CPU_INR_BUS) },
+ { .i = DEC_CPU_IRQ_NR(KN230_CPU_INR_BUS) } },
+ { { .i = DEC_CPU_IRQ_MASK(KN230_CPU_INR_RTC) },
+ { .i = DEC_CPU_IRQ_NR(KN230_CPU_INR_RTC) } },
+ { { .i = DEC_CPU_IRQ_MASK(KN230_CPU_INR_DZ11) },
+ { .i = DEC_CPU_IRQ_NR(KN230_CPU_INR_DZ11) } },
+ { { .i = DEC_CPU_IRQ_MASK(KN230_CPU_INR_SII) },
+ { .i = DEC_CPU_IRQ_NR(KN230_CPU_INR_SII) } },
+ { { .i = DEC_CPU_IRQ_ALL },
+ { .p = cpu_all_int } },
+};
+
+static void __init dec_init_kn230(void)
+{
+ /* IRQ routing. */
+ memcpy(&dec_interrupt, &kn230_interrupt,
+ sizeof(kn230_interrupt));
+
+ /* CPU IRQ priorities. */
+ memcpy(&cpu_mask_nr_tbl, &kn230_cpu_mask_nr_tbl,
+ sizeof(kn230_cpu_mask_nr_tbl));
+
+ mips_cpu_irq_init();
+
+} /* dec_init_kn230 */
+
+
+/*
+ * Machine-specific initialisation for KN02, aka DS5000/200, aka 3max.
+ */
+static int kn02_interrupt[DEC_NR_INTS] __initdata = {
+ [DEC_IRQ_CASCADE] = DEC_CPU_IRQ_NR(KN02_CPU_INR_CASCADE),
+ [DEC_IRQ_AB_RECV] = -1,
+ [DEC_IRQ_AB_XMIT] = -1,
+ [DEC_IRQ_DZ11] = KN02_IRQ_NR(KN02_CSR_INR_DZ11),
+ [DEC_IRQ_ASC] = KN02_IRQ_NR(KN02_CSR_INR_ASC),
+ [DEC_IRQ_FLOPPY] = -1,
+ [DEC_IRQ_FPU] = DEC_CPU_IRQ_NR(DEC_CPU_INR_FPU),
+ [DEC_IRQ_HALT] = -1,
+ [DEC_IRQ_ISDN] = -1,
+ [DEC_IRQ_LANCE] = KN02_IRQ_NR(KN02_CSR_INR_LANCE),
+ [DEC_IRQ_BUS] = DEC_CPU_IRQ_NR(KN02_CPU_INR_BUS),
+ [DEC_IRQ_PSU] = -1,
+ [DEC_IRQ_RTC] = DEC_CPU_IRQ_NR(KN02_CPU_INR_RTC),
+ [DEC_IRQ_SCC0] = -1,
+ [DEC_IRQ_SCC1] = -1,
+ [DEC_IRQ_SII] = -1,
+ [DEC_IRQ_TC0] = KN02_IRQ_NR(KN02_CSR_INR_TC0),
+ [DEC_IRQ_TC1] = KN02_IRQ_NR(KN02_CSR_INR_TC1),
+ [DEC_IRQ_TC2] = KN02_IRQ_NR(KN02_CSR_INR_TC2),
+ [DEC_IRQ_TIMER] = -1,
+ [DEC_IRQ_VIDEO] = -1,
+ [DEC_IRQ_ASC_MERR] = -1,
+ [DEC_IRQ_ASC_ERR] = -1,
+ [DEC_IRQ_ASC_DMA] = -1,
+ [DEC_IRQ_FLOPPY_ERR] = -1,
+ [DEC_IRQ_ISDN_ERR] = -1,
+ [DEC_IRQ_ISDN_RXDMA] = -1,
+ [DEC_IRQ_ISDN_TXDMA] = -1,
+ [DEC_IRQ_LANCE_MERR] = -1,
+ [DEC_IRQ_SCC0A_RXERR] = -1,
+ [DEC_IRQ_SCC0A_RXDMA] = -1,
+ [DEC_IRQ_SCC0A_TXERR] = -1,
+ [DEC_IRQ_SCC0A_TXDMA] = -1,
+ [DEC_IRQ_AB_RXERR] = -1,
+ [DEC_IRQ_AB_RXDMA] = -1,
+ [DEC_IRQ_AB_TXERR] = -1,
+ [DEC_IRQ_AB_TXDMA] = -1,
+ [DEC_IRQ_SCC1A_RXERR] = -1,
+ [DEC_IRQ_SCC1A_RXDMA] = -1,
+ [DEC_IRQ_SCC1A_TXERR] = -1,
+ [DEC_IRQ_SCC1A_TXDMA] = -1,
+};
+
+static int_ptr kn02_cpu_mask_nr_tbl[][2] __initdata = {
+ { { .i = DEC_CPU_IRQ_MASK(KN02_CPU_INR_BUS) },
+ { .i = DEC_CPU_IRQ_NR(KN02_CPU_INR_BUS) } },
+ { { .i = DEC_CPU_IRQ_MASK(KN02_CPU_INR_RTC) },
+ { .i = DEC_CPU_IRQ_NR(KN02_CPU_INR_RTC) } },
+ { { .i = DEC_CPU_IRQ_MASK(KN02_CPU_INR_CASCADE) },
+ { .p = kn02_io_int } },
+ { { .i = DEC_CPU_IRQ_ALL },
+ { .p = cpu_all_int } },
+};
+
+static int_ptr kn02_asic_mask_nr_tbl[][2] __initdata = {
+ { { .i = KN02_IRQ_MASK(KN02_CSR_INR_DZ11) },
+ { .i = KN02_IRQ_NR(KN02_CSR_INR_DZ11) } },
+ { { .i = KN02_IRQ_MASK(KN02_CSR_INR_ASC) },
+ { .i = KN02_IRQ_NR(KN02_CSR_INR_ASC) } },
+ { { .i = KN02_IRQ_MASK(KN02_CSR_INR_LANCE) },
+ { .i = KN02_IRQ_NR(KN02_CSR_INR_LANCE) } },
+ { { .i = KN02_IRQ_MASK(KN02_CSR_INR_TC2) },
+ { .i = KN02_IRQ_NR(KN02_CSR_INR_TC2) } },
+ { { .i = KN02_IRQ_MASK(KN02_CSR_INR_TC1) },
+ { .i = KN02_IRQ_NR(KN02_CSR_INR_TC1) } },
+ { { .i = KN02_IRQ_MASK(KN02_CSR_INR_TC0) },
+ { .i = KN02_IRQ_NR(KN02_CSR_INR_TC0) } },
+ { { .i = KN02_IRQ_ALL },
+ { .p = kn02_all_int } },
+};
+
+static void __init dec_init_kn02(void)
+{
+ /* IRQ routing. */
+ memcpy(&dec_interrupt, &kn02_interrupt,
+ sizeof(kn02_interrupt));
+
+ /* CPU IRQ priorities. */
+ memcpy(&cpu_mask_nr_tbl, &kn02_cpu_mask_nr_tbl,
+ sizeof(kn02_cpu_mask_nr_tbl));
+
+ /* KN02 CSR IRQ priorities. */
+ memcpy(&asic_mask_nr_tbl, &kn02_asic_mask_nr_tbl,
+ sizeof(kn02_asic_mask_nr_tbl));
+
+ mips_cpu_irq_init();
+ init_kn02_irqs(KN02_IRQ_BASE);
+
+} /* dec_init_kn02 */
+
+
+/*
+ * Machine-specific initialisation for KN02-BA, aka DS5000/1xx
+ * (xx = 20, 25, 33), aka 3min. Also applies to KN04(-BA), aka
+ * DS5000/150, aka 4min.
+ */
+static int kn02ba_interrupt[DEC_NR_INTS] __initdata = {
+ [DEC_IRQ_CASCADE] = DEC_CPU_IRQ_NR(KN02BA_CPU_INR_CASCADE),
+ [DEC_IRQ_AB_RECV] = -1,
+ [DEC_IRQ_AB_XMIT] = -1,
+ [DEC_IRQ_DZ11] = -1,
+ [DEC_IRQ_ASC] = IO_IRQ_NR(KN02BA_IO_INR_ASC),
+ [DEC_IRQ_FLOPPY] = -1,
+ [DEC_IRQ_FPU] = DEC_CPU_IRQ_NR(DEC_CPU_INR_FPU),
+ [DEC_IRQ_HALT] = DEC_CPU_IRQ_NR(KN02BA_CPU_INR_HALT),
+ [DEC_IRQ_ISDN] = -1,
+ [DEC_IRQ_LANCE] = IO_IRQ_NR(KN02BA_IO_INR_LANCE),
+ [DEC_IRQ_BUS] = IO_IRQ_NR(KN02BA_IO_INR_BUS),
+ [DEC_IRQ_PSU] = IO_IRQ_NR(KN02BA_IO_INR_PSU),
+ [DEC_IRQ_RTC] = IO_IRQ_NR(KN02BA_IO_INR_RTC),
+ [DEC_IRQ_SCC0] = IO_IRQ_NR(KN02BA_IO_INR_SCC0),
+ [DEC_IRQ_SCC1] = IO_IRQ_NR(KN02BA_IO_INR_SCC1),
+ [DEC_IRQ_SII] = -1,
+ [DEC_IRQ_TC0] = DEC_CPU_IRQ_NR(KN02BA_CPU_INR_TC0),
+ [DEC_IRQ_TC1] = DEC_CPU_IRQ_NR(KN02BA_CPU_INR_TC1),
+ [DEC_IRQ_TC2] = DEC_CPU_IRQ_NR(KN02BA_CPU_INR_TC2),
+ [DEC_IRQ_TIMER] = -1,
+ [DEC_IRQ_VIDEO] = -1,
+ [DEC_IRQ_ASC_MERR] = IO_IRQ_NR(IO_INR_ASC_MERR),
+ [DEC_IRQ_ASC_ERR] = IO_IRQ_NR(IO_INR_ASC_ERR),
+ [DEC_IRQ_ASC_DMA] = IO_IRQ_NR(IO_INR_ASC_DMA),
+ [DEC_IRQ_FLOPPY_ERR] = -1,
+ [DEC_IRQ_ISDN_ERR] = -1,
+ [DEC_IRQ_ISDN_RXDMA] = -1,
+ [DEC_IRQ_ISDN_TXDMA] = -1,
+ [DEC_IRQ_LANCE_MERR] = IO_IRQ_NR(IO_INR_LANCE_MERR),
+ [DEC_IRQ_SCC0A_RXERR] = IO_IRQ_NR(IO_INR_SCC0A_RXERR),
+ [DEC_IRQ_SCC0A_RXDMA] = IO_IRQ_NR(IO_INR_SCC0A_RXDMA),
+ [DEC_IRQ_SCC0A_TXERR] = IO_IRQ_NR(IO_INR_SCC0A_TXERR),
+ [DEC_IRQ_SCC0A_TXDMA] = IO_IRQ_NR(IO_INR_SCC0A_TXDMA),
+ [DEC_IRQ_AB_RXERR] = -1,
+ [DEC_IRQ_AB_RXDMA] = -1,
+ [DEC_IRQ_AB_TXERR] = -1,
+ [DEC_IRQ_AB_TXDMA] = -1,
+ [DEC_IRQ_SCC1A_RXERR] = IO_IRQ_NR(IO_INR_SCC1A_RXERR),
+ [DEC_IRQ_SCC1A_RXDMA] = IO_IRQ_NR(IO_INR_SCC1A_RXDMA),
+ [DEC_IRQ_SCC1A_TXERR] = IO_IRQ_NR(IO_INR_SCC1A_TXERR),
+ [DEC_IRQ_SCC1A_TXDMA] = IO_IRQ_NR(IO_INR_SCC1A_TXDMA),
+};
+
+static int_ptr kn02ba_cpu_mask_nr_tbl[][2] __initdata = {
+ { { .i = DEC_CPU_IRQ_MASK(KN02BA_CPU_INR_CASCADE) },
+ { .p = kn02xa_io_int } },
+ { { .i = DEC_CPU_IRQ_MASK(KN02BA_CPU_INR_TC2) },
+ { .i = DEC_CPU_IRQ_NR(KN02BA_CPU_INR_TC2) } },
+ { { .i = DEC_CPU_IRQ_MASK(KN02BA_CPU_INR_TC1) },
+ { .i = DEC_CPU_IRQ_NR(KN02BA_CPU_INR_TC1) } },
+ { { .i = DEC_CPU_IRQ_MASK(KN02BA_CPU_INR_TC0) },
+ { .i = DEC_CPU_IRQ_NR(KN02BA_CPU_INR_TC0) } },
+ { { .i = DEC_CPU_IRQ_ALL },
+ { .p = cpu_all_int } },
+};
+
+static int_ptr kn02ba_asic_mask_nr_tbl[][2] __initdata = {
+ { { .i = IO_IRQ_MASK(KN02BA_IO_INR_BUS) },
+ { .i = IO_IRQ_NR(KN02BA_IO_INR_BUS) } },
+ { { .i = IO_IRQ_MASK(KN02BA_IO_INR_RTC) },
+ { .i = IO_IRQ_NR(KN02BA_IO_INR_RTC) } },
+ { { .i = IO_IRQ_DMA },
+ { .p = asic_dma_int } },
+ { { .i = IO_IRQ_MASK(KN02BA_IO_INR_SCC0) },
+ { .i = IO_IRQ_NR(KN02BA_IO_INR_SCC0) } },
+ { { .i = IO_IRQ_MASK(KN02BA_IO_INR_SCC1) },
+ { .i = IO_IRQ_NR(KN02BA_IO_INR_SCC1) } },
+ { { .i = IO_IRQ_MASK(KN02BA_IO_INR_ASC) },
+ { .i = IO_IRQ_NR(KN02BA_IO_INR_ASC) } },
+ { { .i = IO_IRQ_MASK(KN02BA_IO_INR_LANCE) },
+ { .i = IO_IRQ_NR(KN02BA_IO_INR_LANCE) } },
+ { { .i = IO_IRQ_ALL },
+ { .p = asic_all_int } },
+};
+
+static void __init dec_init_kn02ba(void)
+{
+ /* IRQ routing. */
+ memcpy(&dec_interrupt, &kn02ba_interrupt,
+ sizeof(kn02ba_interrupt));
+
+ /* CPU IRQ priorities. */
+ memcpy(&cpu_mask_nr_tbl, &kn02ba_cpu_mask_nr_tbl,
+ sizeof(kn02ba_cpu_mask_nr_tbl));
+
+ /* I/O ASIC IRQ priorities. */
+ memcpy(&asic_mask_nr_tbl, &kn02ba_asic_mask_nr_tbl,
+ sizeof(kn02ba_asic_mask_nr_tbl));
+
+ mips_cpu_irq_init();
+ init_ioasic_irqs(IO_IRQ_BASE);
+
+} /* dec_init_kn02ba */
+
+
+/*
+ * Machine-specific initialisation for KN02-CA, aka DS5000/xx,
+ * (xx = 20, 25, 33), aka MAXine. Also applies to KN04(-CA), aka
+ * DS5000/50, aka 4MAXine.
+ */
+static int kn02ca_interrupt[DEC_NR_INTS] __initdata = {
+ [DEC_IRQ_CASCADE] = DEC_CPU_IRQ_NR(KN02CA_CPU_INR_CASCADE),
+ [DEC_IRQ_AB_RECV] = IO_IRQ_NR(KN02CA_IO_INR_AB_RECV),
+ [DEC_IRQ_AB_XMIT] = IO_IRQ_NR(KN02CA_IO_INR_AB_XMIT),
+ [DEC_IRQ_DZ11] = -1,
+ [DEC_IRQ_ASC] = IO_IRQ_NR(KN02CA_IO_INR_ASC),
+ [DEC_IRQ_FLOPPY] = IO_IRQ_NR(KN02CA_IO_INR_FLOPPY),
+ [DEC_IRQ_FPU] = DEC_CPU_IRQ_NR(DEC_CPU_INR_FPU),
+ [DEC_IRQ_HALT] = DEC_CPU_IRQ_NR(KN02CA_CPU_INR_HALT),
+ [DEC_IRQ_ISDN] = IO_IRQ_NR(KN02CA_IO_INR_ISDN),
+ [DEC_IRQ_LANCE] = IO_IRQ_NR(KN02CA_IO_INR_LANCE),
+ [DEC_IRQ_BUS] = DEC_CPU_IRQ_NR(KN02CA_CPU_INR_BUS),
+ [DEC_IRQ_PSU] = -1,
+ [DEC_IRQ_RTC] = DEC_CPU_IRQ_NR(KN02CA_CPU_INR_RTC),
+ [DEC_IRQ_SCC0] = IO_IRQ_NR(KN02CA_IO_INR_SCC0),
+ [DEC_IRQ_SCC1] = -1,
+ [DEC_IRQ_SII] = -1,
+ [DEC_IRQ_TC0] = IO_IRQ_NR(KN02CA_IO_INR_TC0),
+ [DEC_IRQ_TC1] = IO_IRQ_NR(KN02CA_IO_INR_TC1),
+ [DEC_IRQ_TC2] = -1,
+ [DEC_IRQ_TIMER] = DEC_CPU_IRQ_NR(KN02CA_CPU_INR_TIMER),
+ [DEC_IRQ_VIDEO] = IO_IRQ_NR(KN02CA_IO_INR_VIDEO),
+ [DEC_IRQ_ASC_MERR] = IO_IRQ_NR(IO_INR_ASC_MERR),
+ [DEC_IRQ_ASC_ERR] = IO_IRQ_NR(IO_INR_ASC_ERR),
+ [DEC_IRQ_ASC_DMA] = IO_IRQ_NR(IO_INR_ASC_DMA),
+ [DEC_IRQ_FLOPPY_ERR] = IO_IRQ_NR(IO_INR_FLOPPY_ERR),
+ [DEC_IRQ_ISDN_ERR] = IO_IRQ_NR(IO_INR_ISDN_ERR),
+ [DEC_IRQ_ISDN_RXDMA] = IO_IRQ_NR(IO_INR_ISDN_RXDMA),
+ [DEC_IRQ_ISDN_TXDMA] = IO_IRQ_NR(IO_INR_ISDN_TXDMA),
+ [DEC_IRQ_LANCE_MERR] = IO_IRQ_NR(IO_INR_LANCE_MERR),
+ [DEC_IRQ_SCC0A_RXERR] = IO_IRQ_NR(IO_INR_SCC0A_RXERR),
+ [DEC_IRQ_SCC0A_RXDMA] = IO_IRQ_NR(IO_INR_SCC0A_RXDMA),
+ [DEC_IRQ_SCC0A_TXERR] = IO_IRQ_NR(IO_INR_SCC0A_TXERR),
+ [DEC_IRQ_SCC0A_TXDMA] = IO_IRQ_NR(IO_INR_SCC0A_TXDMA),
+ [DEC_IRQ_AB_RXERR] = IO_IRQ_NR(IO_INR_AB_RXERR),
+ [DEC_IRQ_AB_RXDMA] = IO_IRQ_NR(IO_INR_AB_RXDMA),
+ [DEC_IRQ_AB_TXERR] = IO_IRQ_NR(IO_INR_AB_TXERR),
+ [DEC_IRQ_AB_TXDMA] = IO_IRQ_NR(IO_INR_AB_TXDMA),
+ [DEC_IRQ_SCC1A_RXERR] = -1,
+ [DEC_IRQ_SCC1A_RXDMA] = -1,
+ [DEC_IRQ_SCC1A_TXERR] = -1,
+ [DEC_IRQ_SCC1A_TXDMA] = -1,
+};
+
+static int_ptr kn02ca_cpu_mask_nr_tbl[][2] __initdata = {
+ { { .i = DEC_CPU_IRQ_MASK(KN02CA_CPU_INR_BUS) },
+ { .i = DEC_CPU_IRQ_NR(KN02CA_CPU_INR_BUS) } },
+ { { .i = DEC_CPU_IRQ_MASK(KN02CA_CPU_INR_RTC) },
+ { .i = DEC_CPU_IRQ_NR(KN02CA_CPU_INR_RTC) } },
+ { { .i = DEC_CPU_IRQ_MASK(KN02CA_CPU_INR_CASCADE) },
+ { .p = kn02xa_io_int } },
+ { { .i = DEC_CPU_IRQ_ALL },
+ { .p = cpu_all_int } },
+};
+
+static int_ptr kn02ca_asic_mask_nr_tbl[][2] __initdata = {
+ { { .i = IO_IRQ_DMA },
+ { .p = asic_dma_int } },
+ { { .i = IO_IRQ_MASK(KN02CA_IO_INR_SCC0) },
+ { .i = IO_IRQ_NR(KN02CA_IO_INR_SCC0) } },
+ { { .i = IO_IRQ_MASK(KN02CA_IO_INR_ASC) },
+ { .i = IO_IRQ_NR(KN02CA_IO_INR_ASC) } },
+ { { .i = IO_IRQ_MASK(KN02CA_IO_INR_LANCE) },
+ { .i = IO_IRQ_NR(KN02CA_IO_INR_LANCE) } },
+ { { .i = IO_IRQ_MASK(KN02CA_IO_INR_TC1) },
+ { .i = IO_IRQ_NR(KN02CA_IO_INR_TC1) } },
+ { { .i = IO_IRQ_MASK(KN02CA_IO_INR_TC0) },
+ { .i = IO_IRQ_NR(KN02CA_IO_INR_TC0) } },
+ { { .i = IO_IRQ_ALL },
+ { .p = asic_all_int } },
+};
+
+static void __init dec_init_kn02ca(void)
+{
+ /* IRQ routing. */
+ memcpy(&dec_interrupt, &kn02ca_interrupt,
+ sizeof(kn02ca_interrupt));
+
+ /* CPU IRQ priorities. */
+ memcpy(&cpu_mask_nr_tbl, &kn02ca_cpu_mask_nr_tbl,
+ sizeof(kn02ca_cpu_mask_nr_tbl));
+
+ /* I/O ASIC IRQ priorities. */
+ memcpy(&asic_mask_nr_tbl, &kn02ca_asic_mask_nr_tbl,
+ sizeof(kn02ca_asic_mask_nr_tbl));
+
+ mips_cpu_irq_init();
+ init_ioasic_irqs(IO_IRQ_BASE);
+
+} /* dec_init_kn02ca */
+
+
+/*
+ * Machine-specific initialisation for KN03, aka DS5000/240,
+ * aka 3max+ and DS5900, aka BIGmax. Also applies to KN05, aka
+ * DS5000/260, aka 4max+ and DS5900/260.
+ */
+static int kn03_interrupt[DEC_NR_INTS] __initdata = {
+ [DEC_IRQ_CASCADE] = DEC_CPU_IRQ_NR(KN03_CPU_INR_CASCADE),
+ [DEC_IRQ_AB_RECV] = -1,
+ [DEC_IRQ_AB_XMIT] = -1,
+ [DEC_IRQ_DZ11] = -1,
+ [DEC_IRQ_ASC] = IO_IRQ_NR(KN03_IO_INR_ASC),
+ [DEC_IRQ_FLOPPY] = -1,
+ [DEC_IRQ_FPU] = DEC_CPU_IRQ_NR(DEC_CPU_INR_FPU),
+ [DEC_IRQ_HALT] = DEC_CPU_IRQ_NR(KN03_CPU_INR_HALT),
+ [DEC_IRQ_ISDN] = -1,
+ [DEC_IRQ_LANCE] = IO_IRQ_NR(KN03_IO_INR_LANCE),
+ [DEC_IRQ_BUS] = DEC_CPU_IRQ_NR(KN03_CPU_INR_BUS),
+ [DEC_IRQ_PSU] = IO_IRQ_NR(KN03_IO_INR_PSU),
+ [DEC_IRQ_RTC] = DEC_CPU_IRQ_NR(KN03_CPU_INR_RTC),
+ [DEC_IRQ_SCC0] = IO_IRQ_NR(KN03_IO_INR_SCC0),
+ [DEC_IRQ_SCC1] = IO_IRQ_NR(KN03_IO_INR_SCC1),
+ [DEC_IRQ_SII] = -1,
+ [DEC_IRQ_TC0] = IO_IRQ_NR(KN03_IO_INR_TC0),
+ [DEC_IRQ_TC1] = IO_IRQ_NR(KN03_IO_INR_TC1),
+ [DEC_IRQ_TC2] = IO_IRQ_NR(KN03_IO_INR_TC2),
+ [DEC_IRQ_TIMER] = -1,
+ [DEC_IRQ_VIDEO] = -1,
+ [DEC_IRQ_ASC_MERR] = IO_IRQ_NR(IO_INR_ASC_MERR),
+ [DEC_IRQ_ASC_ERR] = IO_IRQ_NR(IO_INR_ASC_ERR),
+ [DEC_IRQ_ASC_DMA] = IO_IRQ_NR(IO_INR_ASC_DMA),
+ [DEC_IRQ_FLOPPY_ERR] = -1,
+ [DEC_IRQ_ISDN_ERR] = -1,
+ [DEC_IRQ_ISDN_RXDMA] = -1,
+ [DEC_IRQ_ISDN_TXDMA] = -1,
+ [DEC_IRQ_LANCE_MERR] = IO_IRQ_NR(IO_INR_LANCE_MERR),
+ [DEC_IRQ_SCC0A_RXERR] = IO_IRQ_NR(IO_INR_SCC0A_RXERR),
+ [DEC_IRQ_SCC0A_RXDMA] = IO_IRQ_NR(IO_INR_SCC0A_RXDMA),
+ [DEC_IRQ_SCC0A_TXERR] = IO_IRQ_NR(IO_INR_SCC0A_TXERR),
+ [DEC_IRQ_SCC0A_TXDMA] = IO_IRQ_NR(IO_INR_SCC0A_TXDMA),
+ [DEC_IRQ_AB_RXERR] = -1,
+ [DEC_IRQ_AB_RXDMA] = -1,
+ [DEC_IRQ_AB_TXERR] = -1,
+ [DEC_IRQ_AB_TXDMA] = -1,
+ [DEC_IRQ_SCC1A_RXERR] = IO_IRQ_NR(IO_INR_SCC1A_RXERR),
+ [DEC_IRQ_SCC1A_RXDMA] = IO_IRQ_NR(IO_INR_SCC1A_RXDMA),
+ [DEC_IRQ_SCC1A_TXERR] = IO_IRQ_NR(IO_INR_SCC1A_TXERR),
+ [DEC_IRQ_SCC1A_TXDMA] = IO_IRQ_NR(IO_INR_SCC1A_TXDMA),
+};
+
+static int_ptr kn03_cpu_mask_nr_tbl[][2] __initdata = {
+ { { .i = DEC_CPU_IRQ_MASK(KN03_CPU_INR_BUS) },
+ { .i = DEC_CPU_IRQ_NR(KN03_CPU_INR_BUS) } },
+ { { .i = DEC_CPU_IRQ_MASK(KN03_CPU_INR_RTC) },
+ { .i = DEC_CPU_IRQ_NR(KN03_CPU_INR_RTC) } },
+ { { .i = DEC_CPU_IRQ_MASK(KN03_CPU_INR_CASCADE) },
+ { .p = kn03_io_int } },
+ { { .i = DEC_CPU_IRQ_ALL },
+ { .p = cpu_all_int } },
+};
+
+static int_ptr kn03_asic_mask_nr_tbl[][2] __initdata = {
+ { { .i = IO_IRQ_DMA },
+ { .p = asic_dma_int } },
+ { { .i = IO_IRQ_MASK(KN03_IO_INR_SCC0) },
+ { .i = IO_IRQ_NR(KN03_IO_INR_SCC0) } },
+ { { .i = IO_IRQ_MASK(KN03_IO_INR_SCC1) },
+ { .i = IO_IRQ_NR(KN03_IO_INR_SCC1) } },
+ { { .i = IO_IRQ_MASK(KN03_IO_INR_ASC) },
+ { .i = IO_IRQ_NR(KN03_IO_INR_ASC) } },
+ { { .i = IO_IRQ_MASK(KN03_IO_INR_LANCE) },
+ { .i = IO_IRQ_NR(KN03_IO_INR_LANCE) } },
+ { { .i = IO_IRQ_MASK(KN03_IO_INR_TC2) },
+ { .i = IO_IRQ_NR(KN03_IO_INR_TC2) } },
+ { { .i = IO_IRQ_MASK(KN03_IO_INR_TC1) },
+ { .i = IO_IRQ_NR(KN03_IO_INR_TC1) } },
+ { { .i = IO_IRQ_MASK(KN03_IO_INR_TC0) },
+ { .i = IO_IRQ_NR(KN03_IO_INR_TC0) } },
+ { { .i = IO_IRQ_ALL },
+ { .p = asic_all_int } },
+};
+
+static void __init dec_init_kn03(void)
+{
+ /* IRQ routing. */
+ memcpy(&dec_interrupt, &kn03_interrupt,
+ sizeof(kn03_interrupt));
+
+ /* CPU IRQ priorities. */
+ memcpy(&cpu_mask_nr_tbl, &kn03_cpu_mask_nr_tbl,
+ sizeof(kn03_cpu_mask_nr_tbl));
+
+ /* I/O ASIC IRQ priorities. */
+ memcpy(&asic_mask_nr_tbl, &kn03_asic_mask_nr_tbl,
+ sizeof(kn03_asic_mask_nr_tbl));
+
+ mips_cpu_irq_init();
+ init_ioasic_irqs(IO_IRQ_BASE);
+
+} /* dec_init_kn03 */
+
+
+void __init arch_init_irq(void)
+{
+ switch (mips_machtype) {
+ case MACH_DS23100: /* DS2100/DS3100 Pmin/Pmax */
+ dec_init_kn01();
+ break;
+ case MACH_DS5100: /* DS5100 MIPSmate */
+ dec_init_kn230();
+ break;
+ case MACH_DS5000_200: /* DS5000/200 3max */
+ dec_init_kn02();
+ break;
+ case MACH_DS5000_1XX: /* DS5000/1xx 3min */
+ dec_init_kn02ba();
+ break;
+ case MACH_DS5000_2X0: /* DS5000/240 3max+ */
+ case MACH_DS5900: /* DS5900 bigmax */
+ dec_init_kn03();
+ break;
+ case MACH_DS5000_XX: /* Personal DS5000/xx */
+ dec_init_kn02ca();
+ break;
+ case MACH_DS5800: /* DS5800 Isis */
+ panic("Don't know how to set this up!");
+ break;
+ case MACH_DS5400: /* DS5400 MIPSfair */
+ panic("Don't know how to set this up!");
+ break;
+ case MACH_DS5500: /* DS5500 MIPSfair-2 */
+ panic("Don't know how to set this up!");
+ break;
+ }
+
+ /* Free the FPU interrupt if the exception is present. */
+ if (!cpu_has_nofpuex) {
+ cpu_fpu_mask = 0;
+ dec_interrupt[DEC_IRQ_FPU] = -1;
+ }
+ /* Free the halt interrupt unused on R4k systems. */
+ if (current_cpu_type() == CPU_R4000SC ||
+ current_cpu_type() == CPU_R4400SC)
+ dec_interrupt[DEC_IRQ_HALT] = -1;
+
+ /* Register board interrupts: FPU and cascade. */
+ if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT) &&
+ dec_interrupt[DEC_IRQ_FPU] >= 0 && cpu_has_fpu) {
+ struct irq_desc *desc_fpu;
+ int irq_fpu;
+
+ irq_fpu = dec_interrupt[DEC_IRQ_FPU];
+ if (request_irq(irq_fpu, no_action, IRQF_NO_THREAD, "fpu",
+ NULL))
+ pr_err("Failed to register fpu interrupt\n");
+ desc_fpu = irq_to_desc(irq_fpu);
+ fpu_kstat_irq = this_cpu_ptr(desc_fpu->kstat_irqs);
+ }
+ if (dec_interrupt[DEC_IRQ_CASCADE] >= 0) {
+ if (request_irq(dec_interrupt[DEC_IRQ_CASCADE], no_action,
+ IRQF_NO_THREAD, "cascade", NULL))
+ pr_err("Failed to register cascade interrupt\n");
+ }
+ /* Register the bus error interrupt. */
+ if (dec_interrupt[DEC_IRQ_BUS] >= 0 && busirq_handler) {
+ if (request_irq(dec_interrupt[DEC_IRQ_BUS], busirq_handler,
+ busirq_flags, "bus error", busirq_handler))
+ pr_err("Failed to register bus error interrupt\n");
+ }
+ /* Register the HALT interrupt. */
+ if (dec_interrupt[DEC_IRQ_HALT] >= 0) {
+ if (request_irq(dec_interrupt[DEC_IRQ_HALT], dec_intr_halt,
+ IRQF_NO_THREAD, "halt", NULL))
+ pr_err("Failed to register halt interrupt\n");
+ }
+}
+
+asmlinkage unsigned int dec_irq_dispatch(unsigned int irq)
+{
+ do_IRQ(irq);
+ return 0;
+}
diff --git a/arch/mips/dec/tc.c b/arch/mips/dec/tc.c
new file mode 100644
index 000000000..dba583976
--- /dev/null
+++ b/arch/mips/dec/tc.c
@@ -0,0 +1,95 @@
+/*
+ * TURBOchannel architecture calls.
+ *
+ * Copyright (c) Harald Koerfgen, 1998
+ * Copyright (c) 2001, 2003, 2005, 2006 Maciej W. Rozycki
+ * Copyright (c) 2005 James Simmons
+ *
+ * This file is subject to the terms and conditions of the GNU
+ * General Public License. See the file "COPYING" in the main
+ * directory of this archive for more details.
+ */
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/tc.h>
+#include <linux/types.h>
+
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/paccess.h>
+
+#include <asm/dec/interrupts.h>
+#include <asm/dec/prom.h>
+#include <asm/dec/system.h>
+
+/*
+ * Protected read byte from TURBOchannel slot space.
+ */
+int tc_preadb(u8 *valp, void __iomem *addr)
+{
+ return get_dbe(*valp, (u8 *)addr);
+}
+
+/*
+ * Get TURBOchannel bus information as specified by the spec, plus
+ * the slot space base address and the number of slots.
+ */
+int __init tc_bus_get_info(struct tc_bus *tbus)
+{
+ if (!dec_tc_bus)
+ return -ENXIO;
+
+ memcpy(&tbus->info, rex_gettcinfo(), sizeof(tbus->info));
+ tbus->slot_base = CPHYSADDR((long)rex_slot_address(0));
+
+ switch (mips_machtype) {
+ case MACH_DS5000_200:
+ tbus->num_tcslots = 7;
+ break;
+ case MACH_DS5000_2X0:
+ case MACH_DS5900:
+ tbus->ext_slot_base = 0x20000000;
+ tbus->ext_slot_size = 0x20000000;
+ fallthrough;
+ case MACH_DS5000_1XX:
+ tbus->num_tcslots = 3;
+ break;
+ case MACH_DS5000_XX:
+ tbus->num_tcslots = 2;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Get the IRQ for the specified slot.
+ */
+void __init tc_device_get_irq(struct tc_dev *tdev)
+{
+ switch (tdev->slot) {
+ case 0:
+ tdev->interrupt = dec_interrupt[DEC_IRQ_TC0];
+ break;
+ case 1:
+ tdev->interrupt = dec_interrupt[DEC_IRQ_TC1];
+ break;
+ case 2:
+ tdev->interrupt = dec_interrupt[DEC_IRQ_TC2];
+ break;
+ /*
+ * Yuck! DS5000/200 onboard devices
+ */
+ case 5:
+ tdev->interrupt = dec_interrupt[DEC_IRQ_TC5];
+ break;
+ case 6:
+ tdev->interrupt = dec_interrupt[DEC_IRQ_TC6];
+ break;
+ default:
+ tdev->interrupt = -1;
+ break;
+ }
+}
diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c
new file mode 100644
index 000000000..c38686f89
--- /dev/null
+++ b/arch/mips/dec/time.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ * Copyright (C) 2000, 2003 Maciej W. Rozycki
+ *
+ * This file contains the time handling details for PC-style clocks as
+ * found in some MIPS systems.
+ *
+ */
+#include <linux/bcd.h>
+#include <linux/init.h>
+#include <linux/mc146818rtc.h>
+#include <linux/param.h>
+
+#include <asm/cpu-features.h>
+#include <asm/ds1287.h>
+#include <asm/time.h>
+#include <asm/dec/interrupts.h>
+#include <asm/dec/ioasic.h>
+#include <asm/dec/machtype.h>
+
+void read_persistent_clock64(struct timespec64 *ts)
+{
+ unsigned int year, mon, day, hour, min, sec, real_year;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+
+ do {
+ sec = CMOS_READ(RTC_SECONDS);
+ min = CMOS_READ(RTC_MINUTES);
+ hour = CMOS_READ(RTC_HOURS);
+ day = CMOS_READ(RTC_DAY_OF_MONTH);
+ mon = CMOS_READ(RTC_MONTH);
+ year = CMOS_READ(RTC_YEAR);
+ /*
+ * The PROM will reset the year to either '72 or '73.
+ * Therefore we store the real year separately, in one
+ * of unused BBU RAM locations.
+ */
+ real_year = CMOS_READ(RTC_DEC_YEAR);
+ } while (sec != CMOS_READ(RTC_SECONDS));
+
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ sec = bcd2bin(sec);
+ min = bcd2bin(min);
+ hour = bcd2bin(hour);
+ day = bcd2bin(day);
+ mon = bcd2bin(mon);
+ year = bcd2bin(year);
+ }
+
+ year += real_year - 72 + 2000;
+
+ ts->tv_sec = mktime64(year, mon, day, hour, min, sec);
+ ts->tv_nsec = 0;
+}
+
+/*
+ * In order to set the CMOS clock precisely, update_persistent_clock64 has to
+ * be called 500 ms after the second nowtime has started, because when
+ * nowtime is written into the registers of the CMOS clock, it will
+ * jump to the next second precisely 500 ms later. Check the Dallas
+ * DS1287 data sheet for details.
+ */
+int update_persistent_clock64(struct timespec64 now)
+{
+ time64_t nowtime = now.tv_sec;
+ int retval = 0;
+ int real_seconds, real_minutes, cmos_minutes;
+ unsigned char save_control, save_freq_select;
+
+ /* irq are locally disabled here */
+ spin_lock(&rtc_lock);
+ /* tell the clock it's being set */
+ save_control = CMOS_READ(RTC_CONTROL);
+ CMOS_WRITE((save_control | RTC_SET), RTC_CONTROL);
+
+ /* stop and reset prescaler */
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
+ CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+ cmos_minutes = CMOS_READ(RTC_MINUTES);
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ cmos_minutes = bcd2bin(cmos_minutes);
+
+ /*
+ * since we're only adjusting minutes and seconds,
+ * don't interfere with hour overflow. This avoids
+ * messing with unknown time zones but requires your
+ * RTC not to be off by more than 15 minutes
+ */
+ real_minutes = div_s64_rem(nowtime, 60, &real_seconds);
+ if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
+ real_minutes += 30; /* correct for half hour time zone */
+ real_minutes %= 60;
+
+ if (abs(real_minutes - cmos_minutes) < 30) {
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ real_seconds = bin2bcd(real_seconds);
+ real_minutes = bin2bcd(real_minutes);
+ }
+ CMOS_WRITE(real_seconds, RTC_SECONDS);
+ CMOS_WRITE(real_minutes, RTC_MINUTES);
+ } else {
+ printk_once(KERN_NOTICE
+ "set_rtc_mmss: can't update from %d to %d\n",
+ cmos_minutes, real_minutes);
+ retval = -1;
+ }
+
+ /* The following flags have to be released exactly in this order,
+ * otherwise the DS1287 will not reset the oscillator and will not
+ * update precisely 500 ms later. You won't find this mentioned
+ * in the Dallas Semiconductor data sheets, but who believes data
+ * sheets anyway ... -- Markus Kuhn
+ */
+ CMOS_WRITE(save_control, RTC_CONTROL);
+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+ spin_unlock(&rtc_lock);
+
+ return retval;
+}
+
+void __init plat_time_init(void)
+{
+ int ioasic_clock = 0;
+ u32 start, end;
+ int i = HZ / 8;
+
+ /* Set up the rate of periodic DS1287 interrupts. */
+ ds1287_set_base_clock(HZ);
+
+ /* On some I/O ASIC systems we have the I/O ASIC's counter. */
+ if (IOASIC)
+ ioasic_clock = dec_ioasic_clocksource_init() == 0;
+ if (cpu_has_counter) {
+ ds1287_timer_state();
+ while (!ds1287_timer_state())
+ ;
+
+ start = read_c0_count();
+
+ while (i--)
+ while (!ds1287_timer_state())
+ ;
+
+ end = read_c0_count();
+
+ mips_hpt_frequency = (end - start) * 8;
+ printk(KERN_INFO "MIPS counter frequency %dHz\n",
+ mips_hpt_frequency);
+
+ /*
+ * All R4k DECstations suffer from the CP0 Count erratum,
+ * so we can't use the timer as a clock source, and a clock
+ * event both at a time. An accurate wall clock is more
+ * important than a high-precision interval timer so only
+ * use the timer as a clock source, and not a clock event
+ * if there's no I/O ASIC counter available to serve as a
+ * clock source.
+ */
+ if (!ioasic_clock) {
+ init_r4k_clocksource();
+ mips_hpt_frequency = 0;
+ }
+ }
+
+ ds1287_clockevent_init(dec_interrupt[DEC_IRQ_RTC]);
+}
diff --git a/arch/mips/dec/wbflush.c b/arch/mips/dec/wbflush.c
new file mode 100644
index 000000000..dad64d178
--- /dev/null
+++ b/arch/mips/dec/wbflush.c
@@ -0,0 +1,92 @@
+/*
+ * Setup the right wbflush routine for the different DECstations.
+ *
+ * Created with information from:
+ * DECstation 3100 Desktop Workstation Functional Specification
+ * DECstation 5000/200 KN02 System Module Functional Specification
+ * mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-)
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Harald Koerfgen
+ * Copyright (C) 2002 Maciej W. Rozycki
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+
+#include <asm/bootinfo.h>
+#include <asm/wbflush.h>
+#include <asm/barrier.h>
+
+static void wbflush_kn01(void);
+static void wbflush_kn210(void);
+static void wbflush_mips(void);
+
+void (*__wbflush) (void);
+
+void __init wbflush_setup(void)
+{
+ switch (mips_machtype) {
+ case MACH_DS23100:
+ case MACH_DS5000_200: /* DS5000 3max */
+ __wbflush = wbflush_kn01;
+ break;
+ case MACH_DS5100: /* DS5100 MIPSMATE */
+ __wbflush = wbflush_kn210;
+ break;
+ case MACH_DS5000_1XX: /* DS5000/100 3min */
+ case MACH_DS5000_XX: /* Personal DS5000/2x */
+ case MACH_DS5000_2X0: /* DS5000/240 3max+ */
+ case MACH_DS5900: /* DS5900 bigmax */
+ default:
+ __wbflush = wbflush_mips;
+ break;
+ }
+}
+
+/*
+ * For the DS3100 and DS5000/200 the R2020/R3220 writeback buffer functions
+ * as part of Coprocessor 0.
+ */
+static void wbflush_kn01(void)
+{
+ asm(".set\tpush\n\t"
+ ".set\tnoreorder\n\t"
+ "1:\tbc0f\t1b\n\t"
+ "nop\n\t"
+ ".set\tpop");
+}
+
+/*
+ * For the DS5100 the writeback buffer seems to be a part of Coprocessor 3.
+ * But CP3 has to enabled first.
+ */
+static void wbflush_kn210(void)
+{
+ asm(".set\tpush\n\t"
+ ".set\tnoreorder\n\t"
+ "mfc0\t$2,$12\n\t"
+ "lui\t$3,0x8000\n\t"
+ "or\t$3,$2,$3\n\t"
+ "mtc0\t$3,$12\n\t"
+ "nop\n"
+ "1:\tbc3f\t1b\n\t"
+ "nop\n\t"
+ "mtc0\t$2,$12\n\t"
+ "nop\n\t"
+ ".set\tpop"
+ : : : "$2", "$3");
+}
+
+/*
+ * I/O ASIC systems use a standard writeback buffer that gets flushed
+ * upon an uncached read.
+ */
+static void wbflush_mips(void)
+{
+ __fast_iob();
+}
+EXPORT_SYMBOL(__wbflush);
diff --git a/arch/mips/fw/arc/Makefile b/arch/mips/fw/arc/Makefile
new file mode 100644
index 000000000..64d685efc
--- /dev/null
+++ b/arch/mips/fw/arc/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the ARC prom monitor library routines under Linux.
+#
+
+ifdef CONFIG_ARC_CMDLINE_ONLY
+lib-y += cmdline.o
+else
+lib-y += cmdline.o env.o file.o identify.o init.o \
+ misc.o
+endif
+
+lib-$(CONFIG_ARC_MEMORY) += memory.o
+lib-$(CONFIG_ARC_CONSOLE) += arc_con.o
+lib-$(CONFIG_ARC_PROMLIB) += promlib.o
diff --git a/arch/mips/fw/arc/arc_con.c b/arch/mips/fw/arc/arc_con.c
new file mode 100644
index 000000000..7fdce236b
--- /dev/null
+++ b/arch/mips/fw/arc/arc_con.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wrap-around code for a console using the
+ * ARC io-routines.
+ *
+ * Copyright (c) 1998 Harald Koerfgen
+ * Copyright (c) 2001 Ralf Baechle
+ * Copyright (c) 2002 Thiemo Seufer
+ */
+#include <linux/tty.h>
+#include <linux/major.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/fs.h>
+#include <asm/setup.h>
+#include <asm/sgialib.h>
+
+static void prom_console_write(struct console *co, const char *s,
+ unsigned count)
+{
+ /* Do each character */
+ while (count--) {
+ if (*s == '\n')
+ prom_putchar('\r');
+ prom_putchar(*s++);
+ }
+}
+
+static int prom_console_setup(struct console *co, char *options)
+{
+ if (prom_flags & PROM_FLAG_USE_AS_CONSOLE)
+ return 0;
+ return -ENODEV;
+}
+
+static struct console arc_cons = {
+ .name = "arc",
+ .write = prom_console_write,
+ .setup = prom_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+/*
+ * Register console.
+ */
+
+static int __init arc_console_init(void)
+{
+ register_console(&arc_cons);
+
+ return 0;
+}
+console_initcall(arc_console_init);
diff --git a/arch/mips/fw/arc/cmdline.c b/arch/mips/fw/arc/cmdline.c
new file mode 100644
index 000000000..155c5e911
--- /dev/null
+++ b/arch/mips/fw/arc/cmdline.c
@@ -0,0 +1,110 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * cmdline.c: Kernel command line creation using ARCS argc/argv.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ */
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <asm/sgialib.h>
+#include <asm/bootinfo.h>
+
+#undef DEBUG_CMDLINE
+
+/*
+ * A 32-bit ARC PROM pass arguments and environment as 32-bit pointer.
+ * These macro take care of sign extension.
+ */
+#define prom_argv(index) ((char *) (long)argv[(index)])
+
+static char *ignored[] = {
+ "ConsoleIn=",
+ "ConsoleOut=",
+ "SystemPartition=",
+ "OSLoader=",
+ "OSLoadPartition=",
+ "OSLoadFilename=",
+ "OSLoadOptions="
+};
+
+static char *used_arc[][2] = {
+ { "OSLoadPartition=", "root=" },
+ { "OSLoadOptions=", "" }
+};
+
+static char __init *move_firmware_args(int argc, LONG *argv, char *cp)
+{
+ char *s;
+ int actr, i;
+
+ actr = 1; /* Always ignore argv[0] */
+
+ while (actr < argc) {
+ for(i = 0; i < ARRAY_SIZE(used_arc); i++) {
+ int len = strlen(used_arc[i][0]);
+
+ if (!strncmp(prom_argv(actr), used_arc[i][0], len)) {
+ /* Ok, we want it. First append the replacement... */
+ strcat(cp, used_arc[i][1]);
+ cp += strlen(used_arc[i][1]);
+ /* ... and now the argument */
+ s = strchr(prom_argv(actr), '=');
+ if (s) {
+ s++;
+ strcpy(cp, s);
+ cp += strlen(s);
+ }
+ *cp++ = ' ';
+ break;
+ }
+ }
+ actr++;
+ }
+
+ return cp;
+}
+
+void __init prom_init_cmdline(int argc, LONG *argv)
+{
+ char *cp;
+ int actr, i;
+
+ actr = 1; /* Always ignore argv[0] */
+
+ cp = arcs_cmdline;
+ /*
+ * Move ARC variables to the beginning to make sure they can be
+ * overridden by later arguments.
+ */
+ cp = move_firmware_args(argc, argv, cp);
+
+ while (actr < argc) {
+ for (i = 0; i < ARRAY_SIZE(ignored); i++) {
+ int len = strlen(ignored[i]);
+
+ if (!strncmp(prom_argv(actr), ignored[i], len))
+ goto pic_cont;
+ }
+ /* Ok, we want it. */
+ strcpy(cp, prom_argv(actr));
+ cp += strlen(prom_argv(actr));
+ *cp++ = ' ';
+
+ pic_cont:
+ actr++;
+ }
+
+ if (cp != arcs_cmdline) /* get rid of trailing space */
+ --cp;
+ *cp = '\0';
+
+#ifdef DEBUG_CMDLINE
+ printk(KERN_DEBUG "prom cmdline: %s\n", arcs_cmdline);
+#endif
+}
diff --git a/arch/mips/fw/arc/env.c b/arch/mips/fw/arc/env.c
new file mode 100644
index 000000000..02407a7bb
--- /dev/null
+++ b/arch/mips/fw/arc/env.c
@@ -0,0 +1,21 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * env.c: ARCS environment variable routines.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <asm/fw/arc/types.h>
+#include <asm/sgialib.h>
+
+PCHAR __init
+ArcGetEnvironmentVariable(CHAR *name)
+{
+ return (CHAR *) ARC_CALL1(get_evar, name);
+}
diff --git a/arch/mips/fw/arc/file.c b/arch/mips/fw/arc/file.c
new file mode 100644
index 000000000..b0d8535c8
--- /dev/null
+++ b/arch/mips/fw/arc/file.c
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * ARC firmware interface.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+
+#include <asm/fw/arc/types.h>
+#include <asm/sgialib.h>
+
+LONG
+ArcRead(ULONG FileID, VOID *Buffer, ULONG N, ULONG *Count)
+{
+ return ARC_CALL4(read, FileID, Buffer, N, Count);
+}
+
+LONG
+ArcWrite(ULONG FileID, PVOID Buffer, ULONG N, PULONG Count)
+{
+ return ARC_CALL4(write, FileID, Buffer, N, Count);
+}
diff --git a/arch/mips/fw/arc/identify.c b/arch/mips/fw/arc/identify.c
new file mode 100644
index 000000000..5527e0f54
--- /dev/null
+++ b/arch/mips/fw/arc/identify.c
@@ -0,0 +1,111 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * identify.c: identify machine by looking up system identifier
+ *
+ * Copyright (C) 1998 Thomas Bogendoerfer
+ *
+ * This code is based on arch/mips/sgi/kernel/system.c, which is
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ */
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+#include <asm/sgialib.h>
+#include <asm/bootinfo.h>
+
+struct smatch {
+ char *arcname;
+ char *liname;
+ int flags;
+};
+
+static struct smatch mach_table[] = {
+ {
+ .arcname = "SGI-IP22",
+ .liname = "SGI Indy",
+ .flags = PROM_FLAG_ARCS,
+ }, {
+ .arcname = "SGI-IP28",
+ .liname = "SGI IP28",
+ .flags = PROM_FLAG_ARCS,
+ }, {
+ .arcname = "SGI-IP30",
+ .liname = "SGI Octane",
+ .flags = PROM_FLAG_ARCS,
+ }, {
+ .arcname = "SGI-IP32",
+ .liname = "SGI O2",
+ .flags = PROM_FLAG_ARCS,
+ }, {
+ .arcname = "Microsoft-Jazz",
+ .liname = "Jazz MIPS_Magnum_4000",
+ .flags = 0,
+ }, {
+ .arcname = "PICA-61",
+ .liname = "Jazz Acer_PICA_61",
+ .flags = 0,
+ }, {
+ .arcname = "RM200PCI",
+ .liname = "SNI RM200_PCI",
+ .flags = PROM_FLAG_DONT_FREE_TEMP,
+ }, {
+ .arcname = "RM200PCI-R5K",
+ .liname = "SNI RM200_PCI-R5K",
+ .flags = PROM_FLAG_DONT_FREE_TEMP,
+ }
+};
+
+int prom_flags;
+
+static struct smatch * __init string_to_mach(const char *s)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mach_table); i++) {
+ if (!strcmp(s, mach_table[i].arcname))
+ return &mach_table[i];
+ }
+
+ panic("Yeee, could not determine architecture type <%s>", s);
+}
+
+char *system_type;
+
+const char *get_system_type(void)
+{
+ return system_type;
+}
+
+static pcomponent * __init ArcGetChild(pcomponent *Current)
+{
+ return (pcomponent *) ARC_CALL1(child_component, Current);
+}
+
+void __init prom_identify_arch(void)
+{
+ pcomponent *p;
+ struct smatch *mach;
+ const char *iname;
+
+ /*
+ * The root component tells us what machine architecture we have here.
+ */
+ p = ArcGetChild(PROM_NULL_COMPONENT);
+ if (p == NULL) {
+ iname = "Unknown";
+ } else
+ iname = (char *) (long) p->iname;
+
+ printk("ARCH: %s\n", iname);
+ mach = string_to_mach(iname);
+ system_type = mach->liname;
+
+ prom_flags = mach->flags;
+}
diff --git a/arch/mips/fw/arc/init.c b/arch/mips/fw/arc/init.c
new file mode 100644
index 000000000..f9d1dea9b
--- /dev/null
+++ b/arch/mips/fw/arc/init.c
@@ -0,0 +1,51 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * PROM library initialisation code.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <asm/bootinfo.h>
+#include <asm/sgialib.h>
+#include <asm/smp-ops.h>
+
+#undef DEBUG_PROM_INIT
+
+/* Master romvec interface. */
+struct linux_romvec *romvec;
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC32)
+/* stack for calling 32bit ARC prom */
+u64 o32_stk[4096];
+#endif
+
+void __init prom_init(void)
+{
+ PSYSTEM_PARAMETER_BLOCK pb = PROMBLOCK;
+
+ romvec = ROMVECTOR;
+
+ if (pb->magic != 0x53435241) {
+ printk(KERN_CRIT "Aieee, bad prom vector magic %08lx\n",
+ (unsigned long) pb->magic);
+ while(1)
+ ;
+ }
+
+ prom_init_cmdline(fw_arg0, (LONG *)fw_arg1);
+ prom_identify_arch();
+ printk(KERN_INFO "PROMLIB: ARC firmware Version %d Revision %d\n",
+ pb->ver, pb->rev);
+ prom_meminit();
+
+#ifdef DEBUG_PROM_INIT
+ pr_info("Press a key to reboot\n");
+ ArcRead(0, &c, 1, &cnt);
+ ArcEnterInteractiveMode();
+#endif
+}
diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c
new file mode 100644
index 000000000..37625ae5e
--- /dev/null
+++ b/arch/mips/fw/arc/memory.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * memory.c: PROM library functions for acquiring/using memory descriptors
+ * given to us from the ARCS firmware.
+ *
+ * Copyright (C) 1996 by David S. Miller
+ * Copyright (C) 1999, 2000, 2001 by Ralf Baechle
+ * Copyright (C) 1999, 2000 by Silicon Graphics, Inc.
+ *
+ * PROM library functions for acquiring/using memory descriptors given to us
+ * from the ARCS firmware. This is only used when CONFIG_ARC_MEMORY is set
+ * because on some machines like SGI IP27 the ARC memory configuration data
+ * completely bogus and alternate easier to use mechanisms are available.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/swap.h>
+
+#include <asm/sgialib.h>
+#include <asm/page.h>
+#include <asm/bootinfo.h>
+
+#undef DEBUG
+
+#define MAX_PROM_MEM 5
+static phys_addr_t prom_mem_base[MAX_PROM_MEM] __initdata;
+static phys_addr_t prom_mem_size[MAX_PROM_MEM] __initdata;
+static unsigned int nr_prom_mem __initdata;
+
+/*
+ * For ARC firmware memory functions the unit of meassuring memory is always
+ * a 4k page of memory
+ */
+#define ARC_PAGE_SHIFT 12
+
+struct linux_mdesc * __init ArcGetMemoryDescriptor(struct linux_mdesc *Current)
+{
+ return (struct linux_mdesc *) ARC_CALL1(get_mdesc, Current);
+}
+
+#ifdef DEBUG /* convenient for debugging */
+static char *arcs_mtypes[8] = {
+ "Exception Block",
+ "ARCS Romvec Page",
+ "Free/Contig RAM",
+ "Generic Free RAM",
+ "Bad Memory",
+ "Standalone Program Pages",
+ "ARCS Temp Storage Area",
+ "ARCS Permanent Storage Area"
+};
+
+static char *arc_mtypes[8] = {
+ "Exception Block",
+ "SystemParameterBlock",
+ "FreeMemory",
+ "Bad Memory",
+ "LoadedProgram",
+ "FirmwareTemporary",
+ "FirmwarePermanent",
+ "FreeContiguous"
+};
+#define mtypes(a) (prom_flags & PROM_FLAG_ARCS) ? arcs_mtypes[a.arcs] \
+ : arc_mtypes[a.arc]
+#endif
+
+enum {
+ mem_free, mem_prom_used, mem_reserved
+};
+
+static inline int memtype_classify_arcs(union linux_memtypes type)
+{
+ switch (type.arcs) {
+ case arcs_fcontig:
+ case arcs_free:
+ return mem_free;
+ case arcs_atmp:
+ return mem_prom_used;
+ case arcs_eblock:
+ case arcs_rvpage:
+ case arcs_bmem:
+ case arcs_prog:
+ case arcs_aperm:
+ return mem_reserved;
+ default:
+ BUG();
+ }
+ while(1); /* Nuke warning. */
+}
+
+static inline int memtype_classify_arc(union linux_memtypes type)
+{
+ switch (type.arc) {
+ case arc_free:
+ case arc_fcontig:
+ return mem_free;
+ case arc_atmp:
+ return mem_prom_used;
+ case arc_eblock:
+ case arc_rvpage:
+ case arc_bmem:
+ case arc_prog:
+ case arc_aperm:
+ return mem_reserved;
+ default:
+ BUG();
+ }
+ while(1); /* Nuke warning. */
+}
+
+static int __init prom_memtype_classify(union linux_memtypes type)
+{
+ if (prom_flags & PROM_FLAG_ARCS) /* SGI is ``different'' ... */
+ return memtype_classify_arcs(type);
+
+ return memtype_classify_arc(type);
+}
+
+void __weak __init prom_meminit(void)
+{
+ struct linux_mdesc *p;
+
+#ifdef DEBUG
+ int i = 0;
+
+ printk("ARCS MEMORY DESCRIPTOR dump:\n");
+ p = ArcGetMemoryDescriptor(PROM_NULL_MDESC);
+ while(p) {
+ printk("[%d,%p]: base<%08lx> pages<%08lx> type<%s>\n",
+ i, p, p->base, p->pages, mtypes(p->type));
+ p = ArcGetMemoryDescriptor(p);
+ i++;
+ }
+#endif
+
+ nr_prom_mem = 0;
+ p = PROM_NULL_MDESC;
+ while ((p = ArcGetMemoryDescriptor(p))) {
+ unsigned long base, size;
+ long type;
+
+ base = p->base << ARC_PAGE_SHIFT;
+ size = p->pages << ARC_PAGE_SHIFT;
+ type = prom_memtype_classify(p->type);
+
+ /* ignore mirrored RAM on IP28/IP30 */
+ if (base < PHYS_OFFSET)
+ continue;
+
+ memblock_add(base, size);
+
+ if (type == mem_reserved)
+ memblock_reserve(base, size);
+
+ if (type == mem_prom_used) {
+ memblock_reserve(base, size);
+ if (nr_prom_mem >= 5) {
+ pr_err("Too many ROM DATA regions");
+ continue;
+ }
+ prom_mem_base[nr_prom_mem] = base;
+ prom_mem_size[nr_prom_mem] = size;
+ nr_prom_mem++;
+ }
+ }
+}
+
+void __weak __init prom_cleanup(void)
+{
+}
+
+void __weak __init prom_free_prom_memory(void)
+{
+ int i;
+
+ if (prom_flags & PROM_FLAG_DONT_FREE_TEMP)
+ return;
+
+ for (i = 0; i < nr_prom_mem; i++) {
+ free_init_pages("prom memory",
+ prom_mem_base[i], prom_mem_base[i] + prom_mem_size[i]);
+ }
+ /*
+ * at this point it isn't safe to call PROM functions
+ * give platforms a way to do PROM cleanups
+ */
+ prom_cleanup();
+}
diff --git a/arch/mips/fw/arc/misc.c b/arch/mips/fw/arc/misc.c
new file mode 100644
index 000000000..d5b2d5901
--- /dev/null
+++ b/arch/mips/fw/arc/misc.c
@@ -0,0 +1,36 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Miscellaneous ARCS PROM routines.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#include <linux/compiler.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/irqflags.h>
+
+#include <asm/bcache.h>
+
+#include <asm/fw/arc/types.h>
+#include <asm/sgialib.h>
+#include <asm/bootinfo.h>
+
+VOID __noreturn
+ArcEnterInteractiveMode(VOID)
+{
+ bc_disable();
+ local_irq_disable();
+ ARC_CALL0(imode);
+
+ unreachable();
+}
+
+DISPLAY_STATUS * __init ArcGetDisplayStatus(ULONG FileID)
+{
+ return (DISPLAY_STATUS *) ARC_CALL1(GetDisplayStatus, FileID);
+}
diff --git a/arch/mips/fw/arc/promlib.c b/arch/mips/fw/arc/promlib.c
new file mode 100644
index 000000000..5e9e840a9
--- /dev/null
+++ b/arch/mips/fw/arc/promlib.c
@@ -0,0 +1,61 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996 David S. Miller (dm@sgi.com)
+ * Compatibility with board caches, Ulf Carlsson
+ */
+#include <linux/kernel.h>
+#include <asm/sgialib.h>
+#include <asm/bcache.h>
+#include <asm/setup.h>
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC32)
+/*
+ * For 64bit kernels working with a 32bit ARC PROM pointer arguments
+ * for ARC calls need to reside in CKEG0/1. But as soon as the kernel
+ * switches to it's first kernel thread stack is set to an address in
+ * XKPHYS, so anything on stack can't be used anymore. This is solved
+ * by using a * static declartion variables are put into BSS, which is
+ * linked to a CKSEG0 address. Since this is only used on UP platforms
+ * there is not spinlock needed
+ */
+#define O32_STATIC static
+#else
+#define O32_STATIC
+#endif
+
+/*
+ * IP22 boardcache is not compatible with board caches. Thus we disable it
+ * during romvec action. Since r4xx0.c is always compiled and linked with your
+ * kernel, this shouldn't cause any harm regardless what MIPS processor you
+ * have.
+ *
+ * The ARC write and read functions seem to interfere with the serial lines
+ * in some way. You should be careful with them.
+ */
+
+void prom_putchar(char c)
+{
+ O32_STATIC ULONG cnt;
+ O32_STATIC CHAR it;
+
+ it = c;
+
+ bc_disable();
+ ArcWrite(1, &it, 1, &cnt);
+ bc_enable();
+}
+
+char prom_getchar(void)
+{
+ O32_STATIC ULONG cnt;
+ O32_STATIC CHAR c;
+
+ bc_disable();
+ ArcRead(0, &c, 1, &cnt);
+ bc_enable();
+
+ return c;
+}
diff --git a/arch/mips/fw/cfe/Makefile b/arch/mips/fw/cfe/Makefile
new file mode 100644
index 000000000..55b77633e
--- /dev/null
+++ b/arch/mips/fw/cfe/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Broadcom Common Firmware Environment support
+#
+
+lib-y += cfe_api.o
diff --git a/arch/mips/fw/cfe/cfe_api.c b/arch/mips/fw/cfe/cfe_api.c
new file mode 100644
index 000000000..0c9c97ab2
--- /dev/null
+++ b/arch/mips/fw/cfe/cfe_api.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000, 2001, 2002 Broadcom Corporation
+ */
+
+/*
+ *
+ * Broadcom Common Firmware Environment (CFE)
+ *
+ * This module contains device function stubs (small routines to
+ * call the standard "iocb" interface entry point to CFE).
+ * There should be one routine here per iocb function call.
+ *
+ * Authors: Mitch Lichtenberg, Chris Demetriou
+ */
+
+#include <asm/fw/cfe/cfe_api.h>
+#include "cfe_api_int.h"
+
+/* Cast from a native pointer to a cfe_xptr_t and back. */
+#define XPTR_FROM_NATIVE(n) ((cfe_xptr_t) (intptr_t) (n))
+#define NATIVE_FROM_XPTR(x) ((void *) (intptr_t) (x))
+
+int cfe_iocb_dispatch(struct cfe_xiocb *xiocb);
+
+/*
+ * Declare the dispatch function with args of "intptr_t".
+ * This makes sure whatever model we're compiling in
+ * puts the pointers in a single register. For example,
+ * combining -mlong64 and -mips1 or -mips2 would lead to
+ * trouble, since the handle and IOCB pointer will be
+ * passed in two registers each, and CFE expects one.
+ */
+
+static int (*cfe_dispfunc) (intptr_t handle, intptr_t xiocb);
+static u64 cfe_handle;
+
+int cfe_init(u64 handle, u64 ept)
+{
+ cfe_dispfunc = NATIVE_FROM_XPTR(ept);
+ cfe_handle = handle;
+ return 0;
+}
+
+int cfe_iocb_dispatch(struct cfe_xiocb * xiocb)
+{
+ if (!cfe_dispfunc)
+ return -1;
+ return (*cfe_dispfunc) ((intptr_t) cfe_handle, (intptr_t) xiocb);
+}
+
+int cfe_close(int handle)
+{
+ struct cfe_xiocb xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_DEV_CLOSE;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = handle;
+ xiocb.xiocb_flags = 0;
+ xiocb.xiocb_psize = 0;
+
+ cfe_iocb_dispatch(&xiocb);
+
+ return xiocb.xiocb_status;
+
+}
+
+int cfe_cpu_start(int cpu, void (*fn) (void), long sp, long gp, long a1)
+{
+ struct cfe_xiocb xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_FW_CPUCTL;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = 0;
+ xiocb.xiocb_flags = 0;
+ xiocb.xiocb_psize = sizeof(struct xiocb_cpuctl);
+ xiocb.plist.xiocb_cpuctl.cpu_number = cpu;
+ xiocb.plist.xiocb_cpuctl.cpu_command = CFE_CPU_CMD_START;
+ xiocb.plist.xiocb_cpuctl.gp_val = gp;
+ xiocb.plist.xiocb_cpuctl.sp_val = sp;
+ xiocb.plist.xiocb_cpuctl.a1_val = a1;
+ xiocb.plist.xiocb_cpuctl.start_addr = (long) fn;
+
+ cfe_iocb_dispatch(&xiocb);
+
+ return xiocb.xiocb_status;
+}
+
+int cfe_cpu_stop(int cpu)
+{
+ struct cfe_xiocb xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_FW_CPUCTL;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = 0;
+ xiocb.xiocb_flags = 0;
+ xiocb.xiocb_psize = sizeof(struct xiocb_cpuctl);
+ xiocb.plist.xiocb_cpuctl.cpu_number = cpu;
+ xiocb.plist.xiocb_cpuctl.cpu_command = CFE_CPU_CMD_STOP;
+
+ cfe_iocb_dispatch(&xiocb);
+
+ return xiocb.xiocb_status;
+}
+
+int cfe_enumenv(int idx, char *name, int namelen, char *val, int vallen)
+{
+ struct cfe_xiocb xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_ENV_SET;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = 0;
+ xiocb.xiocb_flags = 0;
+ xiocb.xiocb_psize = sizeof(struct xiocb_envbuf);
+ xiocb.plist.xiocb_envbuf.enum_idx = idx;
+ xiocb.plist.xiocb_envbuf.name_ptr = XPTR_FROM_NATIVE(name);
+ xiocb.plist.xiocb_envbuf.name_length = namelen;
+ xiocb.plist.xiocb_envbuf.val_ptr = XPTR_FROM_NATIVE(val);
+ xiocb.plist.xiocb_envbuf.val_length = vallen;
+
+ cfe_iocb_dispatch(&xiocb);
+
+ return xiocb.xiocb_status;
+}
+
+int
+cfe_enummem(int idx, int flags, u64 *start, u64 *length, u64 *type)
+{
+ struct cfe_xiocb xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_FW_MEMENUM;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = 0;
+ xiocb.xiocb_flags = flags;
+ xiocb.xiocb_psize = sizeof(struct xiocb_meminfo);
+ xiocb.plist.xiocb_meminfo.mi_idx = idx;
+
+ cfe_iocb_dispatch(&xiocb);
+
+ if (xiocb.xiocb_status < 0)
+ return xiocb.xiocb_status;
+
+ *start = xiocb.plist.xiocb_meminfo.mi_addr;
+ *length = xiocb.plist.xiocb_meminfo.mi_size;
+ *type = xiocb.plist.xiocb_meminfo.mi_type;
+
+ return 0;
+}
+
+int cfe_exit(int warm, int status)
+{
+ struct cfe_xiocb xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_FW_RESTART;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = 0;
+ xiocb.xiocb_flags = warm ? CFE_FLG_WARMSTART : 0;
+ xiocb.xiocb_psize = sizeof(struct xiocb_exitstat);
+ xiocb.plist.xiocb_exitstat.status = status;
+
+ cfe_iocb_dispatch(&xiocb);
+
+ return xiocb.xiocb_status;
+}
+
+int cfe_flushcache(int flg)
+{
+ struct cfe_xiocb xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_FW_FLUSHCACHE;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = 0;
+ xiocb.xiocb_flags = flg;
+ xiocb.xiocb_psize = 0;
+
+ cfe_iocb_dispatch(&xiocb);
+
+ return xiocb.xiocb_status;
+}
+
+int cfe_getdevinfo(char *name)
+{
+ struct cfe_xiocb xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_DEV_GETINFO;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = 0;
+ xiocb.xiocb_flags = 0;
+ xiocb.xiocb_psize = sizeof(struct xiocb_buffer);
+ xiocb.plist.xiocb_buffer.buf_offset = 0;
+ xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(name);
+ xiocb.plist.xiocb_buffer.buf_length = strlen(name);
+
+ cfe_iocb_dispatch(&xiocb);
+
+ if (xiocb.xiocb_status < 0)
+ return xiocb.xiocb_status;
+ return xiocb.plist.xiocb_buffer.buf_ioctlcmd;
+}
+
+int cfe_getenv(char *name, char *dest, int destlen)
+{
+ struct cfe_xiocb xiocb;
+
+ *dest = 0;
+
+ xiocb.xiocb_fcode = CFE_CMD_ENV_GET;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = 0;
+ xiocb.xiocb_flags = 0;
+ xiocb.xiocb_psize = sizeof(struct xiocb_envbuf);
+ xiocb.plist.xiocb_envbuf.enum_idx = 0;
+ xiocb.plist.xiocb_envbuf.name_ptr = XPTR_FROM_NATIVE(name);
+ xiocb.plist.xiocb_envbuf.name_length = strlen(name);
+ xiocb.plist.xiocb_envbuf.val_ptr = XPTR_FROM_NATIVE(dest);
+ xiocb.plist.xiocb_envbuf.val_length = destlen;
+
+ cfe_iocb_dispatch(&xiocb);
+
+ return xiocb.xiocb_status;
+}
+
+int cfe_getfwinfo(cfe_fwinfo_t * info)
+{
+ struct cfe_xiocb xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_FW_GETINFO;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = 0;
+ xiocb.xiocb_flags = 0;
+ xiocb.xiocb_psize = sizeof(struct xiocb_fwinfo);
+
+ cfe_iocb_dispatch(&xiocb);
+
+ if (xiocb.xiocb_status < 0)
+ return xiocb.xiocb_status;
+
+ info->fwi_version = xiocb.plist.xiocb_fwinfo.fwi_version;
+ info->fwi_totalmem = xiocb.plist.xiocb_fwinfo.fwi_totalmem;
+ info->fwi_flags = xiocb.plist.xiocb_fwinfo.fwi_flags;
+ info->fwi_boardid = xiocb.plist.xiocb_fwinfo.fwi_boardid;
+ info->fwi_bootarea_va = xiocb.plist.xiocb_fwinfo.fwi_bootarea_va;
+ info->fwi_bootarea_pa = xiocb.plist.xiocb_fwinfo.fwi_bootarea_pa;
+ info->fwi_bootarea_size =
+ xiocb.plist.xiocb_fwinfo.fwi_bootarea_size;
+
+ return 0;
+}
+
+int cfe_getstdhandle(int flg)
+{
+ struct cfe_xiocb xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_DEV_GETHANDLE;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = 0;
+ xiocb.xiocb_flags = flg;
+ xiocb.xiocb_psize = 0;
+
+ cfe_iocb_dispatch(&xiocb);
+
+ if (xiocb.xiocb_status < 0)
+ return xiocb.xiocb_status;
+ return xiocb.xiocb_handle;
+}
+
+int64_t
+cfe_getticks(void)
+{
+ struct cfe_xiocb xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_FW_GETTIME;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = 0;
+ xiocb.xiocb_flags = 0;
+ xiocb.xiocb_psize = sizeof(struct xiocb_time);
+ xiocb.plist.xiocb_time.ticks = 0;
+
+ cfe_iocb_dispatch(&xiocb);
+
+ return xiocb.plist.xiocb_time.ticks;
+
+}
+
+int cfe_inpstat(int handle)
+{
+ struct cfe_xiocb xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_DEV_INPSTAT;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = handle;
+ xiocb.xiocb_flags = 0;
+ xiocb.xiocb_psize = sizeof(struct xiocb_inpstat);
+ xiocb.plist.xiocb_inpstat.inp_status = 0;
+
+ cfe_iocb_dispatch(&xiocb);
+
+ if (xiocb.xiocb_status < 0)
+ return xiocb.xiocb_status;
+ return xiocb.plist.xiocb_inpstat.inp_status;
+}
+
+int
+cfe_ioctl(int handle, unsigned int ioctlnum, unsigned char *buffer,
+ int length, int *retlen, u64 offset)
+{
+ struct cfe_xiocb xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_DEV_IOCTL;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = handle;
+ xiocb.xiocb_flags = 0;
+ xiocb.xiocb_psize = sizeof(struct xiocb_buffer);
+ xiocb.plist.xiocb_buffer.buf_offset = offset;
+ xiocb.plist.xiocb_buffer.buf_ioctlcmd = ioctlnum;
+ xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(buffer);
+ xiocb.plist.xiocb_buffer.buf_length = length;
+
+ cfe_iocb_dispatch(&xiocb);
+
+ if (retlen)
+ *retlen = xiocb.plist.xiocb_buffer.buf_retlen;
+ return xiocb.xiocb_status;
+}
+
+int cfe_open(char *name)
+{
+ struct cfe_xiocb xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_DEV_OPEN;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = 0;
+ xiocb.xiocb_flags = 0;
+ xiocb.xiocb_psize = sizeof(struct xiocb_buffer);
+ xiocb.plist.xiocb_buffer.buf_offset = 0;
+ xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(name);
+ xiocb.plist.xiocb_buffer.buf_length = strlen(name);
+
+ cfe_iocb_dispatch(&xiocb);
+
+ if (xiocb.xiocb_status < 0)
+ return xiocb.xiocb_status;
+ return xiocb.xiocb_handle;
+}
+
+int cfe_read(int handle, unsigned char *buffer, int length)
+{
+ return cfe_readblk(handle, 0, buffer, length);
+}
+
+int cfe_readblk(int handle, s64 offset, unsigned char *buffer, int length)
+{
+ struct cfe_xiocb xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_DEV_READ;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = handle;
+ xiocb.xiocb_flags = 0;
+ xiocb.xiocb_psize = sizeof(struct xiocb_buffer);
+ xiocb.plist.xiocb_buffer.buf_offset = offset;
+ xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(buffer);
+ xiocb.plist.xiocb_buffer.buf_length = length;
+
+ cfe_iocb_dispatch(&xiocb);
+
+ if (xiocb.xiocb_status < 0)
+ return xiocb.xiocb_status;
+ return xiocb.plist.xiocb_buffer.buf_retlen;
+}
+
+int cfe_setenv(char *name, char *val)
+{
+ struct cfe_xiocb xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_ENV_SET;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = 0;
+ xiocb.xiocb_flags = 0;
+ xiocb.xiocb_psize = sizeof(struct xiocb_envbuf);
+ xiocb.plist.xiocb_envbuf.enum_idx = 0;
+ xiocb.plist.xiocb_envbuf.name_ptr = XPTR_FROM_NATIVE(name);
+ xiocb.plist.xiocb_envbuf.name_length = strlen(name);
+ xiocb.plist.xiocb_envbuf.val_ptr = XPTR_FROM_NATIVE(val);
+ xiocb.plist.xiocb_envbuf.val_length = strlen(val);
+
+ cfe_iocb_dispatch(&xiocb);
+
+ return xiocb.xiocb_status;
+}
+
+int cfe_write(int handle, const char *buffer, int length)
+{
+ return cfe_writeblk(handle, 0, buffer, length);
+}
+
+int cfe_writeblk(int handle, s64 offset, const char *buffer, int length)
+{
+ struct cfe_xiocb xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_DEV_WRITE;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = handle;
+ xiocb.xiocb_flags = 0;
+ xiocb.xiocb_psize = sizeof(struct xiocb_buffer);
+ xiocb.plist.xiocb_buffer.buf_offset = offset;
+ xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(buffer);
+ xiocb.plist.xiocb_buffer.buf_length = length;
+
+ cfe_iocb_dispatch(&xiocb);
+
+ if (xiocb.xiocb_status < 0)
+ return xiocb.xiocb_status;
+ return xiocb.plist.xiocb_buffer.buf_retlen;
+}
diff --git a/arch/mips/fw/cfe/cfe_api_int.h b/arch/mips/fw/cfe/cfe_api_int.h
new file mode 100644
index 000000000..61a665d44
--- /dev/null
+++ b/arch/mips/fw/cfe/cfe_api_int.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2000, 2001, 2002 Broadcom Corporation
+ */
+#ifndef CFE_API_INT_H
+#define CFE_API_INT_H
+
+/*
+ * Constants.
+ */
+#define CFE_CMD_FW_GETINFO 0
+#define CFE_CMD_FW_RESTART 1
+#define CFE_CMD_FW_BOOT 2
+#define CFE_CMD_FW_CPUCTL 3
+#define CFE_CMD_FW_GETTIME 4
+#define CFE_CMD_FW_MEMENUM 5
+#define CFE_CMD_FW_FLUSHCACHE 6
+
+#define CFE_CMD_DEV_GETHANDLE 9
+#define CFE_CMD_DEV_ENUM 10
+#define CFE_CMD_DEV_OPEN 11
+#define CFE_CMD_DEV_INPSTAT 12
+#define CFE_CMD_DEV_READ 13
+#define CFE_CMD_DEV_WRITE 14
+#define CFE_CMD_DEV_IOCTL 15
+#define CFE_CMD_DEV_CLOSE 16
+#define CFE_CMD_DEV_GETINFO 17
+
+#define CFE_CMD_ENV_ENUM 20
+#define CFE_CMD_ENV_GET 22
+#define CFE_CMD_ENV_SET 23
+#define CFE_CMD_ENV_DEL 24
+
+#define CFE_CMD_MAX 32
+
+#define CFE_CMD_VENDOR_USE 0x8000 /* codes above this are for customer use */
+
+/*
+ * Structures.
+ */
+
+/* eeek, signed "pointers" */
+typedef s64 cfe_xptr_t;
+
+struct xiocb_buffer {
+ u64 buf_offset; /* offset on device (bytes) */
+ cfe_xptr_t buf_ptr; /* pointer to a buffer */
+ u64 buf_length; /* length of this buffer */
+ u64 buf_retlen; /* returned length (for read ops) */
+ u64 buf_ioctlcmd; /* IOCTL command (used only for IOCTLs) */
+};
+
+struct xiocb_inpstat {
+ u64 inp_status; /* 1 means input available */
+};
+
+struct xiocb_envbuf {
+ s64 enum_idx; /* 0-based enumeration index */
+ cfe_xptr_t name_ptr; /* name string buffer */
+ s64 name_length; /* size of name buffer */
+ cfe_xptr_t val_ptr; /* value string buffer */
+ s64 val_length; /* size of value string buffer */
+};
+
+struct xiocb_cpuctl {
+ u64 cpu_number; /* cpu number to control */
+ u64 cpu_command; /* command to issue to CPU */
+ u64 start_addr; /* CPU start address */
+ u64 gp_val; /* starting GP value */
+ u64 sp_val; /* starting SP value */
+ u64 a1_val; /* starting A1 value */
+};
+
+struct xiocb_time {
+ s64 ticks; /* current time in ticks */
+};
+
+struct xiocb_exitstat{
+ s64 status;
+};
+
+struct xiocb_meminfo {
+ s64 mi_idx; /* 0-based enumeration index */
+ s64 mi_type; /* type of memory block */
+ u64 mi_addr; /* physical start address */
+ u64 mi_size; /* block size */
+};
+
+struct xiocb_fwinfo {
+ s64 fwi_version; /* major, minor, eco version */
+ s64 fwi_totalmem; /* total installed mem */
+ s64 fwi_flags; /* various flags */
+ s64 fwi_boardid; /* board ID */
+ s64 fwi_bootarea_va; /* VA of boot area */
+ s64 fwi_bootarea_pa; /* PA of boot area */
+ s64 fwi_bootarea_size; /* size of boot area */
+ s64 fwi_reserved1;
+ s64 fwi_reserved2;
+ s64 fwi_reserved3;
+};
+
+struct cfe_xiocb {
+ u64 xiocb_fcode; /* IOCB function code */
+ s64 xiocb_status; /* return status */
+ s64 xiocb_handle; /* file/device handle */
+ u64 xiocb_flags; /* flags for this IOCB */
+ u64 xiocb_psize; /* size of parameter list */
+ union {
+ /* buffer parameters */
+ struct xiocb_buffer xiocb_buffer;
+
+ /* input status parameters */
+ struct xiocb_inpstat xiocb_inpstat;
+
+ /* environment function parameters */
+ struct xiocb_envbuf xiocb_envbuf;
+
+ /* CPU control parameters */
+ struct xiocb_cpuctl xiocb_cpuctl;
+
+ /* timer parameters */
+ struct xiocb_time xiocb_time;
+
+ /* memory arena info parameters */
+ struct xiocb_meminfo xiocb_meminfo;
+
+ /* firmware information */
+ struct xiocb_fwinfo xiocb_fwinfo;
+
+ /* Exit Status */
+ struct xiocb_exitstat xiocb_exitstat;
+ } plist;
+};
+
+#endif /* CFE_API_INT_H */
diff --git a/arch/mips/fw/lib/Makefile b/arch/mips/fw/lib/Makefile
new file mode 100644
index 000000000..cf9634548
--- /dev/null
+++ b/arch/mips/fw/lib/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for generic prom monitor library routines under Linux.
+#
+
+lib-y += cmdline.o
+
+lib-$(CONFIG_64BIT) += call_o32.o
diff --git a/arch/mips/fw/lib/call_o32.S b/arch/mips/fw/lib/call_o32.S
new file mode 100644
index 000000000..ee856709e
--- /dev/null
+++ b/arch/mips/fw/lib/call_o32.S
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * O32 interface for the 64 (or N32) ABI.
+ *
+ * Copyright (C) 2002, 2014 Maciej W. Rozycki
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+/* O32 register size. */
+#define O32_SZREG 4
+/* Maximum number of arguments supported. Must be even! */
+#define O32_ARGC 32
+/* Number of static registers we save. */
+#define O32_STATC 11
+/* Argument area frame size. */
+#define O32_ARGSZ (O32_SZREG * O32_ARGC)
+/* Static register save area frame size. */
+#define O32_STATSZ (SZREG * O32_STATC)
+/* Stack pointer register save area frame size. */
+#define O32_SPSZ SZREG
+/* Combined area frame size. */
+#define O32_FRAMESZ (O32_ARGSZ + O32_SPSZ + O32_STATSZ)
+/* Switched stack frame size. */
+#define O32_NFRAMESZ (O32_ARGSZ + O32_SPSZ)
+
+ .text
+
+/*
+ * O32 function call dispatcher, for interfacing 32-bit ROM routines.
+ *
+ * The standard 64 (N32) calling sequence is supported, with a0 holding
+ * a function pointer, a1 a pointer to the new stack to call the
+ * function with or 0 if no stack switching is requested, a2-a7 -- the
+ * function call's first six arguments, and the stack -- the remaining
+ * arguments (up to O32_ARGC, including a2-a7). Static registers, gp
+ * and fp are preserved, v0 holds the result. This code relies on the
+ * called o32 function for sp and ra restoration and this dispatcher has
+ * to be placed in a KSEGx (or KUSEG) address space. Any pointers
+ * passed have to point to addresses within one of these spaces as well.
+ */
+NESTED(call_o32, O32_FRAMESZ, ra)
+ REG_SUBU sp,O32_FRAMESZ
+
+ REG_S ra,O32_FRAMESZ-1*SZREG(sp)
+ REG_S fp,O32_FRAMESZ-2*SZREG(sp)
+ REG_S gp,O32_FRAMESZ-3*SZREG(sp)
+ REG_S s7,O32_FRAMESZ-4*SZREG(sp)
+ REG_S s6,O32_FRAMESZ-5*SZREG(sp)
+ REG_S s5,O32_FRAMESZ-6*SZREG(sp)
+ REG_S s4,O32_FRAMESZ-7*SZREG(sp)
+ REG_S s3,O32_FRAMESZ-8*SZREG(sp)
+ REG_S s2,O32_FRAMESZ-9*SZREG(sp)
+ REG_S s1,O32_FRAMESZ-10*SZREG(sp)
+ REG_S s0,O32_FRAMESZ-11*SZREG(sp)
+
+ move jp,a0
+
+ move fp,sp
+ beqz a1,0f
+ REG_SUBU fp,a1,O32_NFRAMESZ
+0:
+ REG_S sp,O32_NFRAMESZ-1*SZREG(fp)
+
+ sll a0,a2,zero
+ sll a1,a3,zero
+ sll a2,a4,zero
+ sll a3,a5,zero
+ sw a6,4*O32_SZREG(fp)
+ sw a7,5*O32_SZREG(fp)
+
+ PTR_LA t0,O32_FRAMESZ(sp)
+ PTR_LA t1,6*O32_SZREG(fp)
+ li t2,O32_ARGC-6
+1:
+ lw t3,(t0)
+ REG_ADDU t0,SZREG
+ sw t3,(t1)
+ REG_SUBU t2,1
+ REG_ADDU t1,O32_SZREG
+ bnez t2,1b
+
+ move sp,fp
+
+ jalr jp
+
+ REG_L sp,O32_NFRAMESZ-1*SZREG(sp)
+
+ REG_L s0,O32_FRAMESZ-11*SZREG(sp)
+ REG_L s1,O32_FRAMESZ-10*SZREG(sp)
+ REG_L s2,O32_FRAMESZ-9*SZREG(sp)
+ REG_L s3,O32_FRAMESZ-8*SZREG(sp)
+ REG_L s4,O32_FRAMESZ-7*SZREG(sp)
+ REG_L s5,O32_FRAMESZ-6*SZREG(sp)
+ REG_L s6,O32_FRAMESZ-5*SZREG(sp)
+ REG_L s7,O32_FRAMESZ-4*SZREG(sp)
+ REG_L gp,O32_FRAMESZ-3*SZREG(sp)
+ REG_L fp,O32_FRAMESZ-2*SZREG(sp)
+ REG_L ra,O32_FRAMESZ-1*SZREG(sp)
+
+ REG_ADDU sp,O32_FRAMESZ
+ jr ra
+END(call_o32)
diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c
new file mode 100644
index 000000000..892765b74
--- /dev/null
+++ b/arch/mips/fw/lib/cmdline.c
@@ -0,0 +1,103 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <asm/addrspace.h>
+#include <asm/fw/fw.h>
+
+int fw_argc;
+int *_fw_argv;
+int *_fw_envp;
+
+#ifndef CONFIG_HAVE_PLAT_FW_INIT_CMDLINE
+void __init fw_init_cmdline(void)
+{
+ int i;
+
+ /* Validate command line parameters. */
+ if ((fw_arg0 >= CKSEG0) || (fw_arg1 < CKSEG0)) {
+ fw_argc = 0;
+ _fw_argv = NULL;
+ } else {
+ fw_argc = (fw_arg0 & 0x0000ffff);
+ _fw_argv = (int *)fw_arg1;
+ }
+
+ /* Validate environment pointer. */
+ if (fw_arg2 < CKSEG0)
+ _fw_envp = NULL;
+ else
+ _fw_envp = (int *)fw_arg2;
+
+ for (i = 1; i < fw_argc; i++) {
+ strlcat(arcs_cmdline, fw_argv(i), COMMAND_LINE_SIZE);
+ if (i < (fw_argc - 1))
+ strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
+ }
+}
+#endif
+
+char * __init fw_getcmdline(void)
+{
+ return &(arcs_cmdline[0]);
+}
+
+char *fw_getenv(char *envname)
+{
+ char *result = NULL;
+
+ if (_fw_envp != NULL && fw_envp(0) != NULL) {
+ /*
+ * Return a pointer to the given environment variable.
+ * YAMON uses "name", "value" pairs, while U-Boot uses
+ * "name=value".
+ */
+ int i, yamon, index = 0;
+
+ yamon = (strchr(fw_envp(index), '=') == NULL);
+ i = strlen(envname);
+
+ while (fw_envp(index)) {
+ if (strncmp(envname, fw_envp(index), i) == 0) {
+ if (yamon) {
+ result = fw_envp(index + 1);
+ break;
+ } else if (fw_envp(index)[i] == '=') {
+ result = fw_envp(index) + i + 1;
+ break;
+ }
+ }
+
+ /* Increment array index. */
+ if (yamon)
+ index += 2;
+ else
+ index += 1;
+ }
+ }
+
+ return result;
+}
+
+unsigned long fw_getenvl(char *envname)
+{
+ unsigned long envl = 0UL;
+ char *str;
+ int tmp;
+
+ str = fw_getenv(envname);
+ if (str) {
+ tmp = kstrtoul(str, 0, &envl);
+ if (tmp)
+ envl = 0;
+ }
+
+ return envl;
+}
diff --git a/arch/mips/fw/sni/Makefile b/arch/mips/fw/sni/Makefile
new file mode 100644
index 000000000..e5ba8e86b
--- /dev/null
+++ b/arch/mips/fw/sni/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the SNI prom monitor routines under Linux.
+#
+
+lib-$(CONFIG_FW_SNIPROM) += sniprom.o
diff --git a/arch/mips/fw/sni/sniprom.c b/arch/mips/fw/sni/sniprom.c
new file mode 100644
index 000000000..8f6730376
--- /dev/null
+++ b/arch/mips/fw/sni/sniprom.c
@@ -0,0 +1,153 @@
+/*
+ * Big Endian PROM code for SNI RM machines
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005-2006 Florian Lohoff (flo@rfc822.org)
+ * Copyright (C) 2005-2006 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/string.h>
+#include <linux/console.h>
+
+#include <asm/addrspace.h>
+#include <asm/sni.h>
+#include <asm/mipsprom.h>
+#include <asm/mipsregs.h>
+#include <asm/bootinfo.h>
+#include <asm/setup.h>
+
+/* special SNI prom calls */
+/*
+ * This does not exist in all proms - SINIX compares
+ * the prom env variable "version" against "2.0008"
+ * or greater. If lesser it tries to probe interesting
+ * registers
+ */
+#define PROM_GET_MEMCONF 58
+#define PROM_GET_HWCONF 61
+
+#define PROM_VEC (u64 *)CKSEG1ADDR(0x1fc00000)
+#define PROM_ENTRY(x) (PROM_VEC + (x))
+
+#define ___prom_putchar ((int *(*)(int))PROM_ENTRY(PROM_PUTCHAR))
+#define ___prom_getenv ((char *(*)(char *))PROM_ENTRY(PROM_GETENV))
+#define ___prom_get_memconf ((void (*)(void *))PROM_ENTRY(PROM_GET_MEMCONF))
+#define ___prom_get_hwconf ((u32 (*)(void))PROM_ENTRY(PROM_GET_HWCONF))
+
+#ifdef CONFIG_64BIT
+
+/* O32 stack has to be 8-byte aligned. */
+static u64 o32_stk[4096];
+#define O32_STK (&o32_stk[ARRAY_SIZE(o32_stk)])
+
+#define __PROM_O32(fun, arg) fun arg __asm__(#fun); \
+ __asm__(#fun " = call_o32")
+
+int __PROM_O32(__prom_putchar, (int *(*)(int), void *, int));
+char *__PROM_O32(__prom_getenv, (char *(*)(char *), void *, char *));
+void __PROM_O32(__prom_get_memconf, (void (*)(void *), void *, void *));
+u32 __PROM_O32(__prom_get_hwconf, (u32 (*)(void), void *));
+
+#define _prom_putchar(x) __prom_putchar(___prom_putchar, O32_STK, x)
+#define _prom_getenv(x) __prom_getenv(___prom_getenv, O32_STK, x)
+#define _prom_get_memconf(x) __prom_get_memconf(___prom_get_memconf, O32_STK, x)
+#define _prom_get_hwconf() __prom_get_hwconf(___prom_get_hwconf, O32_STK)
+
+#else
+#define _prom_putchar(x) ___prom_putchar(x)
+#define _prom_getenv(x) ___prom_getenv(x)
+#define _prom_get_memconf(x) ___prom_get_memconf(x)
+#define _prom_get_hwconf(x) ___prom_get_hwconf(x)
+#endif
+
+void prom_putchar(char c)
+{
+ _prom_putchar(c);
+}
+
+
+char *prom_getenv(char *s)
+{
+ return _prom_getenv(s);
+}
+
+void *prom_get_hwconf(void)
+{
+ u32 hwconf = _prom_get_hwconf();
+
+ if (hwconf == 0xffffffff)
+ return NULL;
+
+ return (void *)CKSEG1ADDR(hwconf);
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
+
+/*
+ * /proc/cpuinfo system type
+ *
+ */
+char *system_type = "Unknown";
+const char *get_system_type(void)
+{
+ return system_type;
+}
+
+static void __init sni_mem_init(void)
+{
+ int i, memsize;
+ struct membank {
+ u32 size;
+ u32 base;
+ u32 size2;
+ u32 pad1;
+ u32 pad2;
+ } memconf[8];
+ int brd_type = *(unsigned char *)SNI_IDPROM_BRDTYPE;
+
+
+ /* MemSIZE from prom in 16MByte chunks */
+ memsize = *((unsigned char *) SNI_IDPROM_MEMSIZE) * 16;
+
+ pr_debug("IDProm memsize: %u MByte\n", memsize);
+
+ /* get memory bank layout from prom */
+ _prom_get_memconf(&memconf);
+
+ pr_debug("prom_get_mem_conf memory configuration:\n");
+ for (i = 0; i < 8 && memconf[i].size; i++) {
+ if (brd_type == SNI_BRD_PCI_TOWER ||
+ brd_type == SNI_BRD_PCI_TOWER_CPLUS) {
+ if (memconf[i].base >= 0x20000000 &&
+ memconf[i].base < 0x30000000)
+ memconf[i].base -= 0x20000000;
+ }
+ pr_debug("Bank%d: %08x @ %08x\n", i,
+ memconf[i].size, memconf[i].base);
+ memblock_add(memconf[i].base, memconf[i].size);
+ }
+}
+
+void __init prom_init(void)
+{
+ int argc = fw_arg0;
+ u32 *argv = (u32 *)CKSEG0ADDR(fw_arg1);
+ int i;
+
+ sni_mem_init();
+
+ /* copy prom cmdline parameters to kernel cmdline */
+ for (i = 1; i < argc; i++) {
+ strcat(arcs_cmdline, (char *)CKSEG0ADDR(argv[i]));
+ if (i < (argc - 1))
+ strcat(arcs_cmdline, " ");
+ }
+}
diff --git a/arch/mips/generic/Kconfig b/arch/mips/generic/Kconfig
new file mode 100644
index 000000000..55d9aed7c
--- /dev/null
+++ b/arch/mips/generic/Kconfig
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: GPL-2.0
+if MIPS_GENERIC_KERNEL
+
+config LEGACY_BOARDS
+ bool
+ help
+ Select this from your board if the board must use a legacy, non-UHI,
+ boot protocol. This will cause the kernel to scan through the list of
+ supported machines calling their detect functions in turn if the
+ kernel is booted without being provided with an FDT via the UHI
+ boot protocol.
+
+config YAMON_DT_SHIM
+ bool
+ help
+ Select this from your board if the board uses the YAMON bootloader
+ and you wish to include code which helps translate various
+ YAMON-provided environment variables into a device tree properties.
+
+comment "Legacy (non-UHI/non-FIT) Boards"
+
+config LEGACY_BOARD_SEAD3
+ bool "Support MIPS SEAD-3 boards"
+ select LEGACY_BOARDS
+ select YAMON_DT_SHIM
+ help
+ Enable this to include support for booting on MIPS SEAD-3 FPGA-based
+ development boards, which boot using a legacy boot protocol.
+
+comment "MSCC Ocelot doesn't work with SEAD3 enabled"
+ depends on LEGACY_BOARD_SEAD3
+
+config LEGACY_BOARD_OCELOT
+ bool "Support MSCC Ocelot boards"
+ depends on LEGACY_BOARD_SEAD3=n
+ select LEGACY_BOARDS
+ select MSCC_OCELOT
+ select SYS_HAS_EARLY_PRINTK
+ select USE_GENERIC_EARLY_PRINTK_8250
+
+config MSCC_OCELOT
+ bool
+ select GPIOLIB
+ select MSCC_OCELOT_IRQ
+
+comment "FIT/UHI Boards"
+
+config FIT_IMAGE_FDT_BOSTON
+ bool "Include FDT for MIPS Boston boards"
+ help
+ Enable this to include the FDT for the MIPS Boston development board
+ from Imagination Technologies in the FIT kernel image. You should
+ enable this if you wish to boot on a MIPS Boston board, as it is
+ expected by the bootloader.
+
+config FIT_IMAGE_FDT_NI169445
+ bool "Include FDT for NI 169445"
+ help
+ Enable this to include the FDT for the 169445 platform from
+ National Instruments in the FIT kernel image.
+
+config FIT_IMAGE_FDT_XILFPGA
+ bool "Include FDT for Xilfpga"
+ help
+ Enable this to include the FDT for the MIPSfpga platform
+ from Imagination Technologies in the FIT kernel image.
+
+config FIT_IMAGE_FDT_OCELOT
+ bool "Include FDT for Microsemi Ocelot development platforms"
+ select MSCC_OCELOT
+ help
+ Enable this to include the FDT for the Ocelot development platforms
+ from Microsemi in the FIT kernel image.
+ This requires u-boot on the platform.
+
+config BOARD_INGENIC
+ bool "Support boards based on Ingenic SoCs"
+ select MACH_INGENIC_GENERIC
+ help
+ Enable support for boards based on Ingenic SoCs.
+
+config VIRT_BOARD_RANCHU
+ bool "Support Ranchu platform for Android emulator"
+ help
+ This enables support for the platform used by Android emulator.
+
+ Ranchu platform consists of a set of virtual devices. This platform
+ enables emulation of variety of virtual configurations while using
+ Android emulator. Android emulator is based on Qemu, and contains
+ the support for the same set of virtual devices.
+
+endif
diff --git a/arch/mips/generic/Makefile b/arch/mips/generic/Makefile
new file mode 100644
index 000000000..e37a59bae
--- /dev/null
+++ b/arch/mips/generic/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2016 Imagination Technologies
+# Author: Paul Burton <paul.burton@mips.com>
+#
+
+obj-y += init.o
+obj-y += irq.o
+obj-y += proc.o
+
+obj-$(CONFIG_YAMON_DT_SHIM) += yamon-dt.o
+obj-$(CONFIG_LEGACY_BOARD_SEAD3) += board-sead3.o
+obj-$(CONFIG_LEGACY_BOARD_OCELOT) += board-ocelot.o
+obj-$(CONFIG_MACH_INGENIC) += board-ingenic.o
+obj-$(CONFIG_VIRT_BOARD_RANCHU) += board-ranchu.o
diff --git a/arch/mips/generic/Platform b/arch/mips/generic/Platform
new file mode 100644
index 000000000..f8ef2f9d1
--- /dev/null
+++ b/arch/mips/generic/Platform
@@ -0,0 +1,23 @@
+#
+# Copyright (C) 2016 Imagination Technologies
+# Author: Paul Burton <paul.burton@mips.com>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+
+# Note: order matters, keep the asm/mach-generic include last.
+cflags-$(CONFIG_MACH_INGENIC_SOC) += -I$(srctree)/arch/mips/include/asm/mach-ingenic
+cflags-$(CONFIG_MIPS_GENERIC) += -I$(srctree)/arch/mips/include/asm/mach-generic
+
+load-$(CONFIG_MIPS_GENERIC) += 0xffffffff80100000
+zload-$(CONFIG_MIPS_GENERIC) += 0xffffffff81000000
+all-$(CONFIG_MIPS_GENERIC) := vmlinux.gz.itb
+
+its-y := vmlinux.its.S
+its-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += board-boston.its.S
+its-$(CONFIG_FIT_IMAGE_FDT_NI169445) += board-ni169445.its.S
+its-$(CONFIG_FIT_IMAGE_FDT_OCELOT) += board-ocelot.its.S
+its-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += board-xilfpga.its.S
diff --git a/arch/mips/generic/board-boston.its.S b/arch/mips/generic/board-boston.its.S
new file mode 100644
index 000000000..c45ad2759
--- /dev/null
+++ b/arch/mips/generic/board-boston.its.S
@@ -0,0 +1,22 @@
+/ {
+ images {
+ fdt-boston {
+ description = "img,boston Device Tree";
+ data = /incbin/("boot/dts/img/boston.dtb");
+ type = "flat_dt";
+ arch = "mips";
+ compression = "none";
+ hash {
+ algo = "sha1";
+ };
+ };
+ };
+
+ configurations {
+ conf-boston {
+ description = "Boston Linux kernel";
+ kernel = "kernel";
+ fdt = "fdt-boston";
+ };
+ };
+};
diff --git a/arch/mips/generic/board-ingenic.c b/arch/mips/generic/board-ingenic.c
new file mode 100644
index 000000000..0cec0bea1
--- /dev/null
+++ b/arch/mips/generic/board-ingenic.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Support for Ingenic SoCs
+ *
+ * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ * Copyright (C) 2011, Maarten ter Huurne <maarten@treewalker.org>
+ * Copyright (C) 2020 Paul Cercueil <paul@crapouillou.net>
+ */
+
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/pm.h>
+#include <linux/sizes.h>
+#include <linux/suspend.h>
+#include <linux/types.h>
+
+#include <asm/bootinfo.h>
+#include <asm/machine.h>
+#include <asm/reboot.h>
+
+static __init char *ingenic_get_system_type(unsigned long machtype)
+{
+ switch (machtype) {
+ case MACH_INGENIC_X2000E:
+ return "X2000E";
+ case MACH_INGENIC_X2000:
+ return "X2000";
+ case MACH_INGENIC_X1830:
+ return "X1830";
+ case MACH_INGENIC_X1000E:
+ return "X1000E";
+ case MACH_INGENIC_X1000:
+ return "X1000";
+ case MACH_INGENIC_JZ4780:
+ return "JZ4780";
+ case MACH_INGENIC_JZ4775:
+ return "JZ4775";
+ case MACH_INGENIC_JZ4770:
+ return "JZ4770";
+ case MACH_INGENIC_JZ4725B:
+ return "JZ4725B";
+ default:
+ return "JZ4740";
+ }
+}
+
+static __init const void *ingenic_fixup_fdt(const void *fdt, const void *match_data)
+{
+ /*
+ * Old devicetree files for the qi,lb60 board did not have a /memory
+ * node. Hardcode the memory info here.
+ */
+ if (!fdt_node_check_compatible(fdt, 0, "qi,lb60") &&
+ fdt_path_offset(fdt, "/memory") < 0)
+ early_init_dt_add_memory_arch(0, SZ_32M);
+
+ mips_machtype = (unsigned long)match_data;
+ system_type = ingenic_get_system_type(mips_machtype);
+
+ return fdt;
+}
+
+static const struct of_device_id ingenic_of_match[] __initconst = {
+ { .compatible = "ingenic,jz4740", .data = (void *)MACH_INGENIC_JZ4740 },
+ { .compatible = "ingenic,jz4725b", .data = (void *)MACH_INGENIC_JZ4725B },
+ { .compatible = "ingenic,jz4770", .data = (void *)MACH_INGENIC_JZ4770 },
+ { .compatible = "ingenic,jz4775", .data = (void *)MACH_INGENIC_JZ4775 },
+ { .compatible = "ingenic,jz4780", .data = (void *)MACH_INGENIC_JZ4780 },
+ { .compatible = "ingenic,x1000", .data = (void *)MACH_INGENIC_X1000 },
+ { .compatible = "ingenic,x1000e", .data = (void *)MACH_INGENIC_X1000E },
+ { .compatible = "ingenic,x1830", .data = (void *)MACH_INGENIC_X1830 },
+ { .compatible = "ingenic,x2000", .data = (void *)MACH_INGENIC_X2000 },
+ { .compatible = "ingenic,x2000e", .data = (void *)MACH_INGENIC_X2000E },
+ {}
+};
+
+MIPS_MACHINE(ingenic) = {
+ .matches = ingenic_of_match,
+ .fixup_fdt = ingenic_fixup_fdt,
+};
+
+static void ingenic_wait_instr(void)
+{
+ __asm__(".set push;\n"
+ ".set mips3;\n"
+ "wait;\n"
+ ".set pop;\n"
+ );
+}
+
+static void ingenic_halt(void)
+{
+ for (;;)
+ ingenic_wait_instr();
+}
+
+static int __maybe_unused ingenic_pm_enter(suspend_state_t state)
+{
+ ingenic_wait_instr();
+
+ return 0;
+}
+
+static const struct platform_suspend_ops ingenic_pm_ops __maybe_unused = {
+ .valid = suspend_valid_only_mem,
+ .enter = ingenic_pm_enter,
+};
+
+static int __init ingenic_pm_init(void)
+{
+ if (boot_cpu_type() == CPU_XBURST) {
+ if (IS_ENABLED(CONFIG_PM_SLEEP))
+ suspend_set_ops(&ingenic_pm_ops);
+ _machine_halt = ingenic_halt;
+ }
+
+ return 0;
+
+}
+late_initcall(ingenic_pm_init);
diff --git a/arch/mips/generic/board-ni169445.its.S b/arch/mips/generic/board-ni169445.its.S
new file mode 100644
index 000000000..0a2e8f7a8
--- /dev/null
+++ b/arch/mips/generic/board-ni169445.its.S
@@ -0,0 +1,22 @@
+/ {
+ images {
+ fdt-ni169445 {
+ description = "NI 169445 device tree";
+ data = /incbin/("boot/dts/ni/169445.dtb");
+ type = "flat_dt";
+ arch = "mips";
+ compression = "none";
+ hash {
+ algo = "sha1";
+ };
+ };
+ };
+
+ configurations {
+ conf-ni169445 {
+ description = "NI 169445 Linux Kernel";
+ kernel = "kernel";
+ fdt = "fdt-ni169445";
+ };
+ };
+};
diff --git a/arch/mips/generic/board-ocelot.c b/arch/mips/generic/board-ocelot.c
new file mode 100644
index 000000000..c238e9519
--- /dev/null
+++ b/arch/mips/generic/board-ocelot.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi MIPS SoC support
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+#include <asm/machine.h>
+#include <asm/prom.h>
+
+#define DEVCPU_GCB_CHIP_REGS_CHIP_ID 0x71070000
+#define CHIP_ID_PART_ID GENMASK(27, 12)
+
+#define OCELOT_PART_ID (0x7514 << 12)
+
+#define UART_UART 0x70100000
+
+static __init bool ocelot_detect(void)
+{
+ u32 rev;
+ int idx;
+
+ /* Look for the TLB entry set up by redboot before trying to use it */
+ write_c0_entryhi(DEVCPU_GCB_CHIP_REGS_CHIP_ID);
+ mtc0_tlbw_hazard();
+ tlb_probe();
+ tlb_probe_hazard();
+ idx = read_c0_index();
+ if (idx < 0)
+ return 0;
+
+ /* A TLB entry exists, lets assume its usable and check the CHIP ID */
+ rev = __raw_readl((void __iomem *)DEVCPU_GCB_CHIP_REGS_CHIP_ID);
+
+ if ((rev & CHIP_ID_PART_ID) != OCELOT_PART_ID)
+ return 0;
+
+ /* Copy command line from bootloader early for Initrd detection */
+ if (fw_arg0 < 10 && (fw_arg1 & 0xFFF00000) == 0x80000000) {
+ unsigned int prom_argc = fw_arg0;
+ const char **prom_argv = (const char **)fw_arg1;
+
+ if (prom_argc > 1 && strlen(prom_argv[1]) > 0)
+ /* ignore all built-in args if any f/w args given */
+ strcpy(arcs_cmdline, prom_argv[1]);
+ }
+
+ return 1;
+}
+
+static void __init ocelot_earlyprintk_init(void)
+{
+ void __iomem *uart_base;
+
+ uart_base = ioremap(UART_UART, 0x20);
+ setup_8250_early_printk_port((unsigned long)uart_base, 2, 50000);
+}
+
+static void __init ocelot_late_init(void)
+{
+ ocelot_earlyprintk_init();
+}
+
+static __init const void *ocelot_fixup_fdt(const void *fdt,
+ const void *match_data)
+{
+ /* This has to be done so late because ioremap needs to work */
+ late_time_init = ocelot_late_init;
+
+ return fdt;
+}
+
+extern char __dtb_ocelot_pcb123_begin[];
+
+MIPS_MACHINE(ocelot) = {
+ .fdt = __dtb_ocelot_pcb123_begin,
+ .fixup_fdt = ocelot_fixup_fdt,
+ .detect = ocelot_detect,
+};
diff --git a/arch/mips/generic/board-ocelot.its.S b/arch/mips/generic/board-ocelot.its.S
new file mode 100644
index 000000000..8c7e3a1b6
--- /dev/null
+++ b/arch/mips/generic/board-ocelot.its.S
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/ {
+ images {
+ fdt-ocelot_pcb123 {
+ description = "MSCC Ocelot PCB123 Device Tree";
+ data = /incbin/("boot/dts/mscc/ocelot_pcb123.dtb");
+ type = "flat_dt";
+ arch = "mips";
+ compression = "none";
+ hash {
+ algo = "sha1";
+ };
+ };
+
+ fdt-ocelot_pcb120 {
+ description = "MSCC Ocelot PCB120 Device Tree";
+ data = /incbin/("boot/dts/mscc/ocelot_pcb120.dtb");
+ type = "flat_dt";
+ arch = "mips";
+ compression = "none";
+ hash {
+ algo = "sha1";
+ };
+ };
+ };
+
+ configurations {
+ conf-ocelot_pcb123 {
+ description = "Ocelot Linux kernel";
+ kernel = "kernel";
+ fdt = "fdt-ocelot_pcb123";
+ };
+
+ conf-ocelot_pcb120 {
+ description = "Ocelot Linux kernel";
+ kernel = "kernel";
+ fdt = "fdt-ocelot_pcb120";
+ };
+ };
+};
diff --git a/arch/mips/generic/board-ranchu.c b/arch/mips/generic/board-ranchu.c
new file mode 100644
index 000000000..a89aaad59
--- /dev/null
+++ b/arch/mips/generic/board-ranchu.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Support code for virtual Ranchu board for MIPS.
+ *
+ * Author: Miodrag Dinic <miodrag.dinic@mips.com>
+ */
+
+#include <linux/of_address.h>
+#include <linux/types.h>
+
+#include <asm/machine.h>
+#include <asm/mipsregs.h>
+#include <asm/time.h>
+
+#define GOLDFISH_TIMER_LOW 0x00
+#define GOLDFISH_TIMER_HIGH 0x04
+
+static __init u64 read_rtc_time(void __iomem *base)
+{
+ u32 time_low;
+ u32 time_high;
+
+ /*
+ * Reading the low address latches the high value
+ * as well so there is no fear that we may read
+ * inaccurate high value.
+ */
+ time_low = readl(base + GOLDFISH_TIMER_LOW);
+ time_high = readl(base + GOLDFISH_TIMER_HIGH);
+
+ return ((u64)time_high << 32) | time_low;
+}
+
+static __init unsigned int ranchu_measure_hpt_freq(void)
+{
+ u64 rtc_start, rtc_current, rtc_delta;
+ unsigned int start, count;
+ struct device_node *np;
+ void __iomem *rtc_base;
+
+ np = of_find_compatible_node(NULL, NULL, "google,goldfish-rtc");
+ if (!np)
+ panic("%s(): Failed to find 'google,goldfish-rtc' dt node!",
+ __func__);
+
+ rtc_base = of_iomap(np, 0);
+ if (!rtc_base)
+ panic("%s(): Failed to ioremap Goldfish RTC base!", __func__);
+
+ /*
+ * Poll the nanosecond resolution RTC for one
+ * second to calibrate the CPU frequency.
+ */
+ rtc_start = read_rtc_time(rtc_base);
+ start = read_c0_count();
+
+ do {
+ rtc_current = read_rtc_time(rtc_base);
+ rtc_delta = rtc_current - rtc_start;
+ } while (rtc_delta < NSEC_PER_SEC);
+
+ count = read_c0_count() - start;
+
+ /*
+ * Make sure the frequency will be a round number.
+ * Without this correction, the returned value may vary
+ * between subsequent emulation executions.
+ *
+ * TODO: Set this value using device tree.
+ */
+ count += 5000;
+ count -= count % 10000;
+
+ iounmap(rtc_base);
+
+ return count;
+}
+
+static const struct of_device_id ranchu_of_match[] __initconst = {
+ {
+ .compatible = "mti,ranchu",
+ },
+ {}
+};
+
+MIPS_MACHINE(ranchu) = {
+ .matches = ranchu_of_match,
+ .measure_hpt_freq = ranchu_measure_hpt_freq,
+};
diff --git a/arch/mips/generic/board-sead3.c b/arch/mips/generic/board-sead3.c
new file mode 100644
index 000000000..748ef4228
--- /dev/null
+++ b/arch/mips/generic/board-sead3.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#define pr_fmt(fmt) "sead3: " fmt
+
+#include <linux/errno.h>
+#include <linux/libfdt.h>
+#include <linux/printk.h>
+#include <linux/sizes.h>
+
+#include <asm/fw/fw.h>
+#include <asm/io.h>
+#include <asm/machine.h>
+#include <asm/yamon-dt.h>
+
+#define SEAD_CONFIG CKSEG1ADDR(0x1b100110)
+#define SEAD_CONFIG_GIC_PRESENT BIT(1)
+
+#define MIPS_REVISION CKSEG1ADDR(0x1fc00010)
+#define MIPS_REVISION_MACHINE (0xf << 4)
+#define MIPS_REVISION_MACHINE_SEAD3 (0x4 << 4)
+
+/*
+ * Maximum 384MB RAM at physical address 0, preceding any I/O.
+ */
+static struct yamon_mem_region mem_regions[] __initdata = {
+ /* start size */
+ { 0, SZ_256M + SZ_128M },
+ {}
+};
+
+static __init bool sead3_detect(void)
+{
+ uint32_t rev;
+
+ rev = __raw_readl((void *)MIPS_REVISION);
+ return (rev & MIPS_REVISION_MACHINE) == MIPS_REVISION_MACHINE_SEAD3;
+}
+
+static __init int append_memory(void *fdt)
+{
+ return yamon_dt_append_memory(fdt, mem_regions);
+}
+
+static __init int remove_gic(void *fdt)
+{
+ const unsigned int cpu_ehci_int = 2;
+ const unsigned int cpu_uart_int = 4;
+ const unsigned int cpu_eth_int = 6;
+ int gic_off, cpu_off, uart_off, eth_off, ehci_off, err;
+ uint32_t cfg, cpu_phandle;
+
+ /* leave the GIC node intact if a GIC is present */
+ cfg = __raw_readl((uint32_t *)SEAD_CONFIG);
+ if (cfg & SEAD_CONFIG_GIC_PRESENT)
+ return 0;
+
+ gic_off = fdt_node_offset_by_compatible(fdt, -1, "mti,gic");
+ if (gic_off < 0) {
+ pr_err("unable to find DT GIC node: %d\n", gic_off);
+ return gic_off;
+ }
+
+ err = fdt_nop_node(fdt, gic_off);
+ if (err) {
+ pr_err("unable to nop GIC node\n");
+ return err;
+ }
+
+ cpu_off = fdt_node_offset_by_compatible(fdt, -1,
+ "mti,cpu-interrupt-controller");
+ if (cpu_off < 0) {
+ pr_err("unable to find CPU intc node: %d\n", cpu_off);
+ return cpu_off;
+ }
+
+ cpu_phandle = fdt_get_phandle(fdt, cpu_off);
+ if (!cpu_phandle) {
+ pr_err("unable to get CPU intc phandle\n");
+ return -EINVAL;
+ }
+
+ uart_off = fdt_node_offset_by_compatible(fdt, -1, "ns16550a");
+ while (uart_off >= 0) {
+ err = fdt_setprop_u32(fdt, uart_off, "interrupt-parent",
+ cpu_phandle);
+ if (err) {
+ pr_warn("unable to set UART interrupt-parent: %d\n",
+ err);
+ return err;
+ }
+
+ err = fdt_setprop_u32(fdt, uart_off, "interrupts",
+ cpu_uart_int);
+ if (err) {
+ pr_err("unable to set UART interrupts property: %d\n",
+ err);
+ return err;
+ }
+
+ uart_off = fdt_node_offset_by_compatible(fdt, uart_off,
+ "ns16550a");
+ }
+ if (uart_off != -FDT_ERR_NOTFOUND) {
+ pr_err("error searching for UART DT node: %d\n", uart_off);
+ return uart_off;
+ }
+
+ eth_off = fdt_node_offset_by_compatible(fdt, -1, "smsc,lan9115");
+ if (eth_off < 0) {
+ pr_err("unable to find ethernet DT node: %d\n", eth_off);
+ return eth_off;
+ }
+
+ err = fdt_setprop_u32(fdt, eth_off, "interrupt-parent", cpu_phandle);
+ if (err) {
+ pr_err("unable to set ethernet interrupt-parent: %d\n", err);
+ return err;
+ }
+
+ err = fdt_setprop_u32(fdt, eth_off, "interrupts", cpu_eth_int);
+ if (err) {
+ pr_err("unable to set ethernet interrupts property: %d\n", err);
+ return err;
+ }
+
+ ehci_off = fdt_node_offset_by_compatible(fdt, -1, "generic-ehci");
+ if (ehci_off < 0) {
+ pr_err("unable to find EHCI DT node: %d\n", ehci_off);
+ return ehci_off;
+ }
+
+ err = fdt_setprop_u32(fdt, ehci_off, "interrupt-parent", cpu_phandle);
+ if (err) {
+ pr_err("unable to set EHCI interrupt-parent: %d\n", err);
+ return err;
+ }
+
+ err = fdt_setprop_u32(fdt, ehci_off, "interrupts", cpu_ehci_int);
+ if (err) {
+ pr_err("unable to set EHCI interrupts property: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct mips_fdt_fixup sead3_fdt_fixups[] __initconst = {
+ { yamon_dt_append_cmdline, "append command line" },
+ { append_memory, "append memory" },
+ { remove_gic, "remove GIC when not present" },
+ { yamon_dt_serial_config, "append serial configuration" },
+ { },
+};
+
+static __init const void *sead3_fixup_fdt(const void *fdt,
+ const void *match_data)
+{
+ static unsigned char fdt_buf[16 << 10] __initdata;
+ int err;
+
+ if (fdt_check_header(fdt))
+ panic("Corrupt DT");
+
+ /* if this isn't SEAD3, something went wrong */
+ BUG_ON(fdt_node_check_compatible(fdt, 0, "mti,sead-3"));
+
+ fw_init_cmdline();
+
+ err = apply_mips_fdt_fixups(fdt_buf, sizeof(fdt_buf),
+ fdt, sead3_fdt_fixups);
+ if (err)
+ panic("Unable to fixup FDT: %d", err);
+
+ return fdt_buf;
+}
+
+static __init unsigned int sead3_measure_hpt_freq(void)
+{
+ void __iomem *status_reg = (void __iomem *)0xbf000410;
+ unsigned int freq, orig, tick = 0;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ orig = readl(status_reg) & 0x2; /* get original sample */
+ /* wait for transition */
+ while ((readl(status_reg) & 0x2) == orig)
+ ;
+ orig = orig ^ 0x2; /* flip the bit */
+
+ write_c0_count(0);
+
+ /* wait 1 second (the sampling clock transitions every 10ms) */
+ while (tick < 100) {
+ /* wait for transition */
+ while ((readl(status_reg) & 0x2) == orig)
+ ;
+ orig = orig ^ 0x2; /* flip the bit */
+ tick++;
+ }
+
+ freq = read_c0_count();
+
+ local_irq_restore(flags);
+
+ return freq;
+}
+
+extern char __dtb_sead3_begin[];
+
+MIPS_MACHINE(sead3) = {
+ .fdt = __dtb_sead3_begin,
+ .detect = sead3_detect,
+ .fixup_fdt = sead3_fixup_fdt,
+ .measure_hpt_freq = sead3_measure_hpt_freq,
+};
diff --git a/arch/mips/generic/board-xilfpga.its.S b/arch/mips/generic/board-xilfpga.its.S
new file mode 100644
index 000000000..08c1e900e
--- /dev/null
+++ b/arch/mips/generic/board-xilfpga.its.S
@@ -0,0 +1,22 @@
+/ {
+ images {
+ fdt-xilfpga {
+ description = "MIPSfpga (xilfpga) Device Tree";
+ data = /incbin/("boot/dts/xilfpga/nexys4ddr.dtb");
+ type = "flat_dt";
+ arch = "mips";
+ compression = "none";
+ hash {
+ algo = "sha1";
+ };
+ };
+ };
+
+ configurations {
+ conf-xilfpga {
+ description = "MIPSfpga Linux kernel";
+ kernel = "kernel";
+ fdt = "fdt-xilfpga";
+ };
+ };
+};
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
new file mode 100644
index 000000000..66a19337d
--- /dev/null
+++ b/arch/mips/generic/init.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/irqchip.h>
+#include <linux/of_clk.h>
+#include <linux/of_fdt.h>
+
+#include <asm/bootinfo.h>
+#include <asm/fw/fw.h>
+#include <asm/irq_cpu.h>
+#include <asm/machine.h>
+#include <asm/mips-cps.h>
+#include <asm/prom.h>
+#include <asm/smp-ops.h>
+#include <asm/time.h>
+
+static __initconst const void *fdt;
+static __initconst const struct mips_machine *mach;
+static __initconst const void *mach_match_data;
+
+void __init prom_init(void)
+{
+ plat_get_fdt();
+ BUG_ON(!fdt);
+}
+
+void __init *plat_get_fdt(void)
+{
+ const struct mips_machine *check_mach;
+ const struct of_device_id *match;
+
+ if (fdt)
+ /* Already set up */
+ return (void *)fdt;
+
+ if (fw_passed_dtb && !fdt_check_header((void *)fw_passed_dtb)) {
+ /*
+ * We have been provided with the appropriate device tree for
+ * the board. Make use of it & search for any machine struct
+ * based upon the root compatible string.
+ */
+ fdt = (void *)fw_passed_dtb;
+
+ for_each_mips_machine(check_mach) {
+ match = mips_machine_is_compatible(check_mach, fdt);
+ if (match) {
+ mach = check_mach;
+ mach_match_data = match->data;
+ break;
+ }
+ }
+ } else if (IS_ENABLED(CONFIG_LEGACY_BOARDS)) {
+ /*
+ * We weren't booted using the UHI boot protocol, but do
+ * support some number of boards with legacy boot protocols.
+ * Attempt to find the right one.
+ */
+ for_each_mips_machine(check_mach) {
+ if (!check_mach->detect)
+ continue;
+
+ if (!check_mach->detect())
+ continue;
+
+ mach = check_mach;
+ }
+
+ /*
+ * If we don't recognise the machine then we can't continue, so
+ * die here.
+ */
+ BUG_ON(!mach);
+
+ /* Retrieve the machine's FDT */
+ fdt = mach->fdt;
+ }
+ return (void *)fdt;
+}
+
+#ifdef CONFIG_RELOCATABLE
+
+void __init plat_fdt_relocated(void *new_location)
+{
+ /*
+ * reset fdt as the cached value would point to the location
+ * before relocations happened and update the location argument
+ * if it was passed using UHI
+ */
+ fdt = NULL;
+
+ if (fw_arg0 == -2)
+ fw_arg1 = (unsigned long)new_location;
+}
+
+#endif /* CONFIG_RELOCATABLE */
+
+void __init plat_mem_setup(void)
+{
+ if (mach && mach->fixup_fdt)
+ fdt = mach->fixup_fdt(fdt, mach_match_data);
+
+ fw_init_cmdline();
+ __dt_setup_arch((void *)fdt);
+}
+
+void __init device_tree_init(void)
+{
+ int err;
+
+ unflatten_and_copy_device_tree();
+ mips_cpc_probe();
+
+ err = register_cps_smp_ops();
+ if (err)
+ err = register_up_smp_ops();
+}
+
+int __init apply_mips_fdt_fixups(void *fdt_out, size_t fdt_out_size,
+ const void *fdt_in,
+ const struct mips_fdt_fixup *fixups)
+{
+ int err;
+
+ err = fdt_open_into(fdt_in, fdt_out, fdt_out_size);
+ if (err) {
+ pr_err("Failed to open FDT\n");
+ return err;
+ }
+
+ for (; fixups->apply; fixups++) {
+ err = fixups->apply(fdt_out);
+ if (err) {
+ pr_err("Failed to apply FDT fixup \"%s\"\n",
+ fixups->description);
+ return err;
+ }
+ }
+
+ err = fdt_pack(fdt_out);
+ if (err)
+ pr_err("Failed to pack FDT\n");
+ return err;
+}
+
+void __init plat_time_init(void)
+{
+ struct device_node *np;
+ struct clk *clk;
+
+ of_clk_init(NULL);
+
+ if (!cpu_has_counter) {
+ mips_hpt_frequency = 0;
+ } else if (mach && mach->measure_hpt_freq) {
+ mips_hpt_frequency = mach->measure_hpt_freq();
+ } else {
+ np = of_get_cpu_node(0, NULL);
+ if (!np) {
+ pr_err("Failed to get CPU node\n");
+ return;
+ }
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get CPU clock: %ld\n", PTR_ERR(clk));
+ return;
+ }
+
+ mips_hpt_frequency = clk_get_rate(clk);
+ clk_put(clk);
+
+ switch (boot_cpu_type()) {
+ case CPU_20KC:
+ case CPU_25KF:
+ /* The counter runs at the CPU clock rate */
+ break;
+ default:
+ /* The counter runs at half the CPU clock rate */
+ mips_hpt_frequency /= 2;
+ break;
+ }
+ }
+
+ timer_probe();
+}
+
+void __init arch_init_irq(void)
+{
+ struct device_node *intc_node;
+
+ intc_node = of_find_compatible_node(NULL, NULL,
+ "mti,cpu-interrupt-controller");
+ if (!cpu_has_veic && !intc_node)
+ mips_cpu_irq_init();
+ of_node_put(intc_node);
+
+ irqchip_init();
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/generic/irq.c b/arch/mips/generic/irq.c
new file mode 100644
index 000000000..933119262
--- /dev/null
+++ b/arch/mips/generic/irq.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include <asm/irq.h>
+#include <asm/mips-cps.h>
+#include <asm/time.h>
+
+int get_c0_fdc_int(void)
+{
+ int mips_cpu_fdc_irq;
+
+ if (mips_gic_present())
+ mips_cpu_fdc_irq = gic_get_c0_fdc_int();
+ else if (cpu_has_veic)
+ panic("Unimplemented!");
+ else if (cp0_fdc_irq >= 0)
+ mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
+ else
+ mips_cpu_fdc_irq = -1;
+
+ return mips_cpu_fdc_irq;
+}
+
+int get_c0_perfcount_int(void)
+{
+ int mips_cpu_perf_irq;
+
+ if (mips_gic_present())
+ mips_cpu_perf_irq = gic_get_c0_perfcount_int();
+ else if (cpu_has_veic)
+ panic("Unimplemented!");
+ else if (cp0_perfcount_irq >= 0)
+ mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+ else
+ mips_cpu_perf_irq = -1;
+
+ return mips_cpu_perf_irq;
+}
+
+unsigned int get_c0_compare_int(void)
+{
+ int mips_cpu_timer_irq;
+
+ if (mips_gic_present())
+ mips_cpu_timer_irq = gic_get_c0_compare_int();
+ else if (cpu_has_veic)
+ panic("Unimplemented!");
+ else
+ mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+
+ return mips_cpu_timer_irq;
+}
diff --git a/arch/mips/generic/proc.c b/arch/mips/generic/proc.c
new file mode 100644
index 000000000..cce2fde21
--- /dev/null
+++ b/arch/mips/generic/proc.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <linux/of.h>
+
+#include <asm/bootinfo.h>
+
+char *system_type;
+
+const char *get_system_type(void)
+{
+ const char *str;
+ int err;
+
+ if (system_type)
+ return system_type;
+
+ err = of_property_read_string(of_root, "model", &str);
+ if (!err)
+ return str;
+
+ err = of_property_read_string_index(of_root, "compatible", 0, &str);
+ if (!err)
+ return str;
+
+ return "Unknown";
+}
diff --git a/arch/mips/generic/vmlinux.its.S b/arch/mips/generic/vmlinux.its.S
new file mode 100644
index 000000000..3e2546765
--- /dev/null
+++ b/arch/mips/generic/vmlinux.its.S
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/dts-v1/;
+
+/ {
+ description = KERNEL_NAME;
+ #address-cells = <ADDR_CELLS>;
+
+ images {
+ kernel {
+ description = KERNEL_NAME;
+ data = /incbin/(VMLINUX_BINARY);
+ type = "kernel";
+ arch = "mips";
+ os = "linux";
+ compression = VMLINUX_COMPRESSION;
+ load = /bits/ ADDR_BITS <VMLINUX_LOAD_ADDRESS>;
+ entry = /bits/ ADDR_BITS <VMLINUX_ENTRY_ADDRESS>;
+ hash {
+ algo = "sha1";
+ };
+ };
+ };
+
+ configurations {
+ default = "conf-default";
+
+ conf-default {
+ description = "Generic Linux kernel";
+ kernel = "kernel";
+ };
+ };
+};
diff --git a/arch/mips/generic/yamon-dt.c b/arch/mips/generic/yamon-dt.c
new file mode 100644
index 000000000..a07a5edbc
--- /dev/null
+++ b/arch/mips/generic/yamon-dt.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#define pr_fmt(fmt) "yamon-dt: " fmt
+
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/libfdt.h>
+#include <linux/printk.h>
+
+#include <asm/fw/fw.h>
+#include <asm/yamon-dt.h>
+
+#define MAX_MEM_ARRAY_ENTRIES 2
+
+__init int yamon_dt_append_cmdline(void *fdt)
+{
+ int err, chosen_off;
+
+ /* find or add chosen node */
+ chosen_off = fdt_path_offset(fdt, "/chosen");
+ if (chosen_off == -FDT_ERR_NOTFOUND)
+ chosen_off = fdt_add_subnode(fdt, 0, "chosen");
+ if (chosen_off < 0) {
+ pr_err("Unable to find or add DT chosen node: %d\n",
+ chosen_off);
+ return chosen_off;
+ }
+
+ err = fdt_setprop_string(fdt, chosen_off, "bootargs", fw_getcmdline());
+ if (err) {
+ pr_err("Unable to set bootargs property: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static unsigned int __init gen_fdt_mem_array(
+ const struct yamon_mem_region *regions,
+ __be32 *mem_array,
+ unsigned int max_entries,
+ unsigned long memsize)
+{
+ const struct yamon_mem_region *mr;
+ unsigned long size;
+ unsigned int entries = 0;
+
+ for (mr = regions; mr->size && memsize; ++mr) {
+ if (entries >= max_entries) {
+ pr_warn("Number of regions exceeds max %u\n",
+ max_entries);
+ break;
+ }
+
+ /* How much of the remaining RAM fits in the next region? */
+ size = min_t(unsigned long, memsize, mr->size);
+ memsize -= size;
+
+ /* Emit a memory region */
+ *(mem_array++) = cpu_to_be32(mr->start);
+ *(mem_array++) = cpu_to_be32(size);
+ ++entries;
+
+ /* Discard the next mr->discard bytes */
+ memsize -= min_t(unsigned long, memsize, mr->discard);
+ }
+ return entries;
+}
+
+__init int yamon_dt_append_memory(void *fdt,
+ const struct yamon_mem_region *regions)
+{
+ unsigned long phys_memsize = 0, memsize;
+ __be32 mem_array[2 * MAX_MEM_ARRAY_ENTRIES];
+ unsigned int mem_entries;
+ int i, err, mem_off;
+ char *var, param_name[10], *var_names[] = {
+ "ememsize", "memsize",
+ };
+
+ /* find memory size from the bootloader environment */
+ for (i = 0; i < ARRAY_SIZE(var_names); i++) {
+ var = fw_getenv(var_names[i]);
+ if (!var)
+ continue;
+
+ err = kstrtoul(var, 0, &phys_memsize);
+ if (!err)
+ break;
+
+ pr_warn("Failed to read the '%s' env variable '%s'\n",
+ var_names[i], var);
+ }
+
+ if (!phys_memsize) {
+ pr_warn("The bootloader didn't provide memsize: defaulting to 32MB\n");
+ phys_memsize = 32 << 20;
+ }
+
+ /* default to using all available RAM */
+ memsize = phys_memsize;
+
+ /* allow the user to override the usable memory */
+ for (i = 0; i < ARRAY_SIZE(var_names); i++) {
+ snprintf(param_name, sizeof(param_name), "%s=", var_names[i]);
+ var = strstr(arcs_cmdline, param_name);
+ if (!var)
+ continue;
+
+ memsize = memparse(var + strlen(param_name), NULL);
+ }
+
+ /* if the user says there's more RAM than we thought, believe them */
+ phys_memsize = max_t(unsigned long, phys_memsize, memsize);
+
+ /* find or add a memory node */
+ mem_off = fdt_path_offset(fdt, "/memory");
+ if (mem_off == -FDT_ERR_NOTFOUND)
+ mem_off = fdt_add_subnode(fdt, 0, "memory");
+ if (mem_off < 0) {
+ pr_err("Unable to find or add memory DT node: %d\n", mem_off);
+ return mem_off;
+ }
+
+ err = fdt_setprop_string(fdt, mem_off, "device_type", "memory");
+ if (err) {
+ pr_err("Unable to set memory node device_type: %d\n", err);
+ return err;
+ }
+
+ mem_entries = gen_fdt_mem_array(regions, mem_array,
+ MAX_MEM_ARRAY_ENTRIES, phys_memsize);
+ err = fdt_setprop(fdt, mem_off, "reg",
+ mem_array, mem_entries * 2 * sizeof(mem_array[0]));
+ if (err) {
+ pr_err("Unable to set memory regs property: %d\n", err);
+ return err;
+ }
+
+ mem_entries = gen_fdt_mem_array(regions, mem_array,
+ MAX_MEM_ARRAY_ENTRIES, memsize);
+ err = fdt_setprop(fdt, mem_off, "linux,usable-memory",
+ mem_array, mem_entries * 2 * sizeof(mem_array[0]));
+ if (err) {
+ pr_err("Unable to set linux,usable-memory property: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+__init int yamon_dt_serial_config(void *fdt)
+{
+ const char *yamontty, *mode_var;
+ char mode_var_name[9], path[20], parity;
+ unsigned int uart, baud, stop_bits;
+ bool hw_flow;
+ int chosen_off, err;
+
+ yamontty = fw_getenv("yamontty");
+ if (!yamontty || !strcmp(yamontty, "tty0")) {
+ uart = 0;
+ } else if (!strcmp(yamontty, "tty1")) {
+ uart = 1;
+ } else {
+ pr_warn("yamontty environment variable '%s' invalid\n",
+ yamontty);
+ uart = 0;
+ }
+
+ baud = stop_bits = 0;
+ parity = 0;
+ hw_flow = false;
+
+ snprintf(mode_var_name, sizeof(mode_var_name), "modetty%u", uart);
+ mode_var = fw_getenv(mode_var_name);
+ if (mode_var) {
+ while (mode_var[0] >= '0' && mode_var[0] <= '9') {
+ baud *= 10;
+ baud += mode_var[0] - '0';
+ mode_var++;
+ }
+ if (mode_var[0] == ',')
+ mode_var++;
+ if (mode_var[0])
+ parity = mode_var[0];
+ if (mode_var[0] == ',')
+ mode_var++;
+ if (mode_var[0])
+ stop_bits = mode_var[0] - '0';
+ if (mode_var[0] == ',')
+ mode_var++;
+ if (!strcmp(mode_var, "hw"))
+ hw_flow = true;
+ }
+
+ if (!baud)
+ baud = 38400;
+
+ if (parity != 'e' && parity != 'n' && parity != 'o')
+ parity = 'n';
+
+ if (stop_bits != 7 && stop_bits != 8)
+ stop_bits = 8;
+
+ WARN_ON(snprintf(path, sizeof(path), "serial%u:%u%c%u%s",
+ uart, baud, parity, stop_bits,
+ hw_flow ? "r" : "") >= sizeof(path));
+
+ /* find or add chosen node */
+ chosen_off = fdt_path_offset(fdt, "/chosen");
+ if (chosen_off == -FDT_ERR_NOTFOUND)
+ chosen_off = fdt_add_subnode(fdt, 0, "chosen");
+ if (chosen_off < 0) {
+ pr_err("Unable to find or add DT chosen node: %d\n",
+ chosen_off);
+ return chosen_off;
+ }
+
+ err = fdt_setprop_string(fdt, chosen_off, "stdout-path", path);
+ if (err) {
+ pr_err("Unable to set stdout-path property: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
new file mode 100644
index 000000000..95b4fa7bd
--- /dev/null
+++ b/arch/mips/include/asm/Kbuild
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+# MIPS headers
+generated-y += syscall_table_32_o32.h
+generated-y += syscall_table_64_n32.h
+generated-y += syscall_table_64_n64.h
+generated-y += syscall_table_64_o32.h
+generic-y += export.h
+generic-y += kvm_para.h
+generic-y += mcs_spinlock.h
+generic-y += parport.h
+generic-y += qrwlock.h
+generic-y += qspinlock.h
+generic-y += user.h
diff --git a/arch/mips/include/asm/abi.h b/arch/mips/include/asm/abi.h
new file mode 100644
index 000000000..dba7f4b6b
--- /dev/null
+++ b/arch/mips/include/asm/abi.h
@@ -0,0 +1,32 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005, 06 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2005 MIPS Technologies, Inc.
+ */
+#ifndef _ASM_ABI_H
+#define _ASM_ABI_H
+
+#include <linux/signal_types.h>
+
+#include <asm/signal.h>
+#include <asm/siginfo.h>
+#include <asm/vdso.h>
+
+struct mips_abi {
+ int (* const setup_frame)(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set);
+ int (* const setup_rt_frame)(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set);
+ const unsigned long restart;
+
+ unsigned off_sc_fpregs;
+ unsigned off_sc_fpc_csr;
+ unsigned off_sc_used_math;
+
+ struct mips_vdso_image *vdso;
+};
+
+#endif /* _ASM_ABI_H */
diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h
new file mode 100644
index 000000000..59a48c60a
--- /dev/null
+++ b/arch/mips/include/asm/addrspace.h
@@ -0,0 +1,144 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 99 Ralf Baechle
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ * Copyright (C) 1990, 1999 by Silicon Graphics, Inc.
+ */
+#ifndef _ASM_ADDRSPACE_H
+#define _ASM_ADDRSPACE_H
+
+#include <spaces.h>
+
+/*
+ * Configure language
+ */
+#ifdef __ASSEMBLY__
+#define _ATYPE_
+#define _ATYPE32_
+#define _ATYPE64_
+#define _CONST64_(x) x
+#else
+#define _ATYPE_ __PTRDIFF_TYPE__
+#define _ATYPE32_ int
+#define _ATYPE64_ __s64
+#ifdef CONFIG_64BIT
+#define _CONST64_(x) x ## L
+#else
+#define _CONST64_(x) x ## LL
+#endif
+#endif
+
+/*
+ * 32-bit MIPS address spaces
+ */
+#ifdef __ASSEMBLY__
+#define _ACAST32_
+#define _ACAST64_
+#else
+#define _ACAST32_ (_ATYPE_)(_ATYPE32_) /* widen if necessary */
+#define _ACAST64_ (_ATYPE64_) /* do _not_ narrow */
+#endif
+
+/*
+ * Returns the kernel segment base of a given address
+ */
+#define KSEGX(a) ((_ACAST32_(a)) & _ACAST32_(0xe0000000))
+
+/*
+ * Returns the physical address of a CKSEGx / XKPHYS address
+ */
+#define CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
+#define XPHYSADDR(a) ((_ACAST64_(a)) & \
+ _CONST64_(0x0000ffffffffffff))
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Memory segments (64bit kernel mode addresses)
+ * The compatibility segments use the full 64-bit sign extended value. Note
+ * the R8000 doesn't have them so don't reference these in generic MIPS code.
+ */
+#define XKUSEG _CONST64_(0x0000000000000000)
+#define XKSSEG _CONST64_(0x4000000000000000)
+#define XKPHYS _CONST64_(0x8000000000000000)
+#define XKSEG _CONST64_(0xc000000000000000)
+#define CKSEG0 _CONST64_(0xffffffff80000000)
+#define CKSEG1 _CONST64_(0xffffffffa0000000)
+#define CKSSEG _CONST64_(0xffffffffc0000000)
+#define CKSEG3 _CONST64_(0xffffffffe0000000)
+
+#define CKSEG0ADDR(a) (CPHYSADDR(a) | CKSEG0)
+#define CKSEG1ADDR(a) (CPHYSADDR(a) | CKSEG1)
+#define CKSEG2ADDR(a) (CPHYSADDR(a) | CKSEG2)
+#define CKSEG3ADDR(a) (CPHYSADDR(a) | CKSEG3)
+
+#else
+
+#define CKSEG0ADDR(a) (CPHYSADDR(a) | KSEG0)
+#define CKSEG1ADDR(a) (CPHYSADDR(a) | KSEG1)
+#define CKSEG2ADDR(a) (CPHYSADDR(a) | KSEG2)
+#define CKSEG3ADDR(a) (CPHYSADDR(a) | KSEG3)
+
+/*
+ * Map an address to a certain kernel segment
+ */
+#define KSEG0ADDR(a) (CPHYSADDR(a) | KSEG0)
+#define KSEG1ADDR(a) (CPHYSADDR(a) | KSEG1)
+#define KSEG2ADDR(a) (CPHYSADDR(a) | KSEG2)
+#define KSEG3ADDR(a) (CPHYSADDR(a) | KSEG3)
+
+/*
+ * Memory segments (32bit kernel mode addresses)
+ * These are the traditional names used in the 32-bit universe.
+ */
+#define KUSEG 0x00000000
+#define KSEG0 0x80000000
+#define KSEG1 0xa0000000
+#define KSEG2 0xc0000000
+#define KSEG3 0xe0000000
+
+#define CKUSEG 0x00000000
+#define CKSEG0 0x80000000
+#define CKSEG1 0xa0000000
+#define CKSEG2 0xc0000000
+#define CKSEG3 0xe0000000
+
+#endif
+
+/*
+ * Cache modes for XKPHYS address conversion macros
+ */
+#define K_CALG_COH_EXCL1_NOL2 0
+#define K_CALG_COH_SHRL1_NOL2 1
+#define K_CALG_UNCACHED 2
+#define K_CALG_NONCOHERENT 3
+#define K_CALG_COH_EXCL 4
+#define K_CALG_COH_SHAREABLE 5
+#define K_CALG_NOTUSED 6
+#define K_CALG_UNCACHED_ACCEL 7
+
+/*
+ * 64-bit address conversions
+ */
+#define PHYS_TO_XKSEG_UNCACHED(p) PHYS_TO_XKPHYS(K_CALG_UNCACHED, (p))
+#define PHYS_TO_XKSEG_CACHED(p) PHYS_TO_XKPHYS(K_CALG_COH_SHAREABLE, (p))
+#define XKPHYS_TO_PHYS(p) ((p) & TO_PHYS_MASK)
+#define PHYS_TO_XKPHYS(cm, a) (XKPHYS | (_ACAST64_(cm) << 59) | (a))
+
+/*
+ * The ultimate limited of the 64-bit MIPS architecture: 2 bits for selecting
+ * the region, 3 bits for the CCA mode. This leaves 59 bits of which the
+ * R8000 implements most with its 48-bit physical address space.
+ */
+#define TO_PHYS_MASK _CONST64_(0x07ffffffffffffff) /* 2^^59 - 1 */
+
+#define COMPAT_K1BASE32 _CONST64_(0xffffffffa0000000)
+#define PHYS_TO_COMPATK1(x) ((x) | COMPAT_K1BASE32) /* 32-bit compat k1 */
+
+#define KDM_TO_PHYS(x) (_ACAST64_ (x) & TO_PHYS_MASK)
+#define PHYS_TO_K0(x) (_ACAST64_ (x) | CAC_BASE)
+
+#endif /* _ASM_ADDRSPACE_H */
diff --git a/arch/mips/include/asm/amon.h b/arch/mips/include/asm/amon.h
new file mode 100644
index 000000000..3cc03c64a
--- /dev/null
+++ b/arch/mips/include/asm/amon.h
@@ -0,0 +1,12 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ *
+ * Arbitrary Monitor Support (AMON)
+ */
+int amon_cpu_avail(int cpu);
+int amon_cpu_start(int cpu, unsigned long pc, unsigned long sp,
+ unsigned long gp, unsigned long a0);
diff --git a/arch/mips/include/asm/arch_hweight.h b/arch/mips/include/asm/arch_hweight.h
new file mode 100644
index 000000000..712a7445e
--- /dev/null
+++ b/arch/mips/include/asm/arch_hweight.h
@@ -0,0 +1,38 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+#ifndef _ASM_ARCH_HWEIGHT_H
+#define _ASM_ARCH_HWEIGHT_H
+
+#ifdef ARCH_HAS_USABLE_BUILTIN_POPCOUNT
+
+#include <asm/types.h>
+
+static inline unsigned int __arch_hweight32(unsigned int w)
+{
+ return __builtin_popcount(w);
+}
+
+static inline unsigned int __arch_hweight16(unsigned int w)
+{
+ return __builtin_popcount(w & 0xffff);
+}
+
+static inline unsigned int __arch_hweight8(unsigned int w)
+{
+ return __builtin_popcount(w & 0xff);
+}
+
+static inline unsigned long __arch_hweight64(__u64 w)
+{
+ return __builtin_popcountll(w);
+}
+
+#else
+#include <asm-generic/bitops/arch_hweight.h>
+#endif
+
+#endif /* _ASM_ARCH_HWEIGHT_H */
diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h
new file mode 100644
index 000000000..e327ebc76
--- /dev/null
+++ b/arch/mips/include/asm/asm-eva.h
@@ -0,0 +1,190 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ *
+ */
+
+#ifndef __ASM_ASM_EVA_H
+#define __ASM_ASM_EVA_H
+
+#ifndef __ASSEMBLY__
+
+/* Kernel variants */
+
+#define kernel_cache(op, base) "cache " op ", " base "\n"
+#define kernel_pref(hint, base) "pref " hint ", " base "\n"
+#define kernel_ll(reg, addr) "ll " reg ", " addr "\n"
+#define kernel_sc(reg, addr) "sc " reg ", " addr "\n"
+#define kernel_lw(reg, addr) "lw " reg ", " addr "\n"
+#define kernel_lwl(reg, addr) "lwl " reg ", " addr "\n"
+#define kernel_lwr(reg, addr) "lwr " reg ", " addr "\n"
+#define kernel_lh(reg, addr) "lh " reg ", " addr "\n"
+#define kernel_lb(reg, addr) "lb " reg ", " addr "\n"
+#define kernel_lbu(reg, addr) "lbu " reg ", " addr "\n"
+#define kernel_sw(reg, addr) "sw " reg ", " addr "\n"
+#define kernel_swl(reg, addr) "swl " reg ", " addr "\n"
+#define kernel_swr(reg, addr) "swr " reg ", " addr "\n"
+#define kernel_sh(reg, addr) "sh " reg ", " addr "\n"
+#define kernel_sb(reg, addr) "sb " reg ", " addr "\n"
+
+#ifdef CONFIG_32BIT
+/*
+ * No 'sd' or 'ld' instructions in 32-bit but the code will
+ * do the correct thing
+ */
+#define kernel_sd(reg, addr) user_sw(reg, addr)
+#define kernel_ld(reg, addr) user_lw(reg, addr)
+#else
+#define kernel_sd(reg, addr) "sd " reg", " addr "\n"
+#define kernel_ld(reg, addr) "ld " reg", " addr "\n"
+#endif /* CONFIG_32BIT */
+
+#ifdef CONFIG_EVA
+
+#define __BUILD_EVA_INSN(insn, reg, addr) \
+ " .set push\n" \
+ " .set mips0\n" \
+ " .set eva\n" \
+ " "insn" "reg", "addr "\n" \
+ " .set pop\n"
+
+#define user_cache(op, base) __BUILD_EVA_INSN("cachee", op, base)
+#define user_pref(hint, base) __BUILD_EVA_INSN("prefe", hint, base)
+#define user_ll(reg, addr) __BUILD_EVA_INSN("lle", reg, addr)
+#define user_sc(reg, addr) __BUILD_EVA_INSN("sce", reg, addr)
+#define user_lw(reg, addr) __BUILD_EVA_INSN("lwe", reg, addr)
+#define user_lwl(reg, addr) __BUILD_EVA_INSN("lwle", reg, addr)
+#define user_lwr(reg, addr) __BUILD_EVA_INSN("lwre", reg, addr)
+#define user_lh(reg, addr) __BUILD_EVA_INSN("lhe", reg, addr)
+#define user_lb(reg, addr) __BUILD_EVA_INSN("lbe", reg, addr)
+#define user_lbu(reg, addr) __BUILD_EVA_INSN("lbue", reg, addr)
+/* No 64-bit EVA instruction for loading double words */
+#define user_ld(reg, addr) user_lw(reg, addr)
+#define user_sw(reg, addr) __BUILD_EVA_INSN("swe", reg, addr)
+#define user_swl(reg, addr) __BUILD_EVA_INSN("swle", reg, addr)
+#define user_swr(reg, addr) __BUILD_EVA_INSN("swre", reg, addr)
+#define user_sh(reg, addr) __BUILD_EVA_INSN("she", reg, addr)
+#define user_sb(reg, addr) __BUILD_EVA_INSN("sbe", reg, addr)
+/* No 64-bit EVA instruction for storing double words */
+#define user_sd(reg, addr) user_sw(reg, addr)
+
+#else
+
+#define user_cache(op, base) kernel_cache(op, base)
+#define user_pref(hint, base) kernel_pref(hint, base)
+#define user_ll(reg, addr) kernel_ll(reg, addr)
+#define user_sc(reg, addr) kernel_sc(reg, addr)
+#define user_lw(reg, addr) kernel_lw(reg, addr)
+#define user_lwl(reg, addr) kernel_lwl(reg, addr)
+#define user_lwr(reg, addr) kernel_lwr(reg, addr)
+#define user_lh(reg, addr) kernel_lh(reg, addr)
+#define user_lb(reg, addr) kernel_lb(reg, addr)
+#define user_lbu(reg, addr) kernel_lbu(reg, addr)
+#define user_sw(reg, addr) kernel_sw(reg, addr)
+#define user_swl(reg, addr) kernel_swl(reg, addr)
+#define user_swr(reg, addr) kernel_swr(reg, addr)
+#define user_sh(reg, addr) kernel_sh(reg, addr)
+#define user_sb(reg, addr) kernel_sb(reg, addr)
+
+#ifdef CONFIG_32BIT
+#define user_sd(reg, addr) kernel_sw(reg, addr)
+#define user_ld(reg, addr) kernel_lw(reg, addr)
+#else
+#define user_sd(reg, addr) kernel_sd(reg, addr)
+#define user_ld(reg, addr) kernel_ld(reg, addr)
+#endif /* CONFIG_32BIT */
+
+#endif /* CONFIG_EVA */
+
+#else /* __ASSEMBLY__ */
+
+#define kernel_cache(op, base) cache op, base
+#define kernel_pref(hint, base) pref hint, base
+#define kernel_ll(reg, addr) ll reg, addr
+#define kernel_sc(reg, addr) sc reg, addr
+#define kernel_lw(reg, addr) lw reg, addr
+#define kernel_lwl(reg, addr) lwl reg, addr
+#define kernel_lwr(reg, addr) lwr reg, addr
+#define kernel_lh(reg, addr) lh reg, addr
+#define kernel_lb(reg, addr) lb reg, addr
+#define kernel_lbu(reg, addr) lbu reg, addr
+#define kernel_sw(reg, addr) sw reg, addr
+#define kernel_swl(reg, addr) swl reg, addr
+#define kernel_swr(reg, addr) swr reg, addr
+#define kernel_sh(reg, addr) sh reg, addr
+#define kernel_sb(reg, addr) sb reg, addr
+
+#ifdef CONFIG_32BIT
+/*
+ * No 'sd' or 'ld' instructions in 32-bit but the code will
+ * do the correct thing
+ */
+#define kernel_sd(reg, addr) user_sw(reg, addr)
+#define kernel_ld(reg, addr) user_lw(reg, addr)
+#else
+#define kernel_sd(reg, addr) sd reg, addr
+#define kernel_ld(reg, addr) ld reg, addr
+#endif /* CONFIG_32BIT */
+
+#ifdef CONFIG_EVA
+
+#define __BUILD_EVA_INSN(insn, reg, addr) \
+ .set push; \
+ .set mips0; \
+ .set eva; \
+ insn reg, addr; \
+ .set pop;
+
+#define user_cache(op, base) __BUILD_EVA_INSN(cachee, op, base)
+#define user_pref(hint, base) __BUILD_EVA_INSN(prefe, hint, base)
+#define user_ll(reg, addr) __BUILD_EVA_INSN(lle, reg, addr)
+#define user_sc(reg, addr) __BUILD_EVA_INSN(sce, reg, addr)
+#define user_lw(reg, addr) __BUILD_EVA_INSN(lwe, reg, addr)
+#define user_lwl(reg, addr) __BUILD_EVA_INSN(lwle, reg, addr)
+#define user_lwr(reg, addr) __BUILD_EVA_INSN(lwre, reg, addr)
+#define user_lh(reg, addr) __BUILD_EVA_INSN(lhe, reg, addr)
+#define user_lb(reg, addr) __BUILD_EVA_INSN(lbe, reg, addr)
+#define user_lbu(reg, addr) __BUILD_EVA_INSN(lbue, reg, addr)
+/* No 64-bit EVA instruction for loading double words */
+#define user_ld(reg, addr) user_lw(reg, addr)
+#define user_sw(reg, addr) __BUILD_EVA_INSN(swe, reg, addr)
+#define user_swl(reg, addr) __BUILD_EVA_INSN(swle, reg, addr)
+#define user_swr(reg, addr) __BUILD_EVA_INSN(swre, reg, addr)
+#define user_sh(reg, addr) __BUILD_EVA_INSN(she, reg, addr)
+#define user_sb(reg, addr) __BUILD_EVA_INSN(sbe, reg, addr)
+/* No 64-bit EVA instruction for loading double words */
+#define user_sd(reg, addr) user_sw(reg, addr)
+#else
+
+#define user_cache(op, base) kernel_cache(op, base)
+#define user_pref(hint, base) kernel_pref(hint, base)
+#define user_ll(reg, addr) kernel_ll(reg, addr)
+#define user_sc(reg, addr) kernel_sc(reg, addr)
+#define user_lw(reg, addr) kernel_lw(reg, addr)
+#define user_lwl(reg, addr) kernel_lwl(reg, addr)
+#define user_lwr(reg, addr) kernel_lwr(reg, addr)
+#define user_lh(reg, addr) kernel_lh(reg, addr)
+#define user_lb(reg, addr) kernel_lb(reg, addr)
+#define user_lbu(reg, addr) kernel_lbu(reg, addr)
+#define user_sw(reg, addr) kernel_sw(reg, addr)
+#define user_swl(reg, addr) kernel_swl(reg, addr)
+#define user_swr(reg, addr) kernel_swr(reg, addr)
+#define user_sh(reg, addr) kernel_sh(reg, addr)
+#define user_sb(reg, addr) kernel_sb(reg, addr)
+
+#ifdef CONFIG_32BIT
+#define user_sd(reg, addr) kernel_sw(reg, addr)
+#define user_ld(reg, addr) kernel_lw(reg, addr)
+#else
+#define user_sd(reg, addr) kernel_sd(reg, addr)
+#define user_ld(reg, addr) kernel_ld(reg, addr)
+#endif /* CONFIG_32BIT */
+
+#endif /* CONFIG_EVA */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ASM_EVA_H */
diff --git a/arch/mips/include/asm/asm-offsets.h b/arch/mips/include/asm/asm-offsets.h
new file mode 100644
index 000000000..d370ee36a
--- /dev/null
+++ b/arch/mips/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/mips/include/asm/asm-prototypes.h b/arch/mips/include/asm/asm-prototypes.h
new file mode 100644
index 000000000..f901ed043
--- /dev/null
+++ b/arch/mips/include/asm/asm-prototypes.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/checksum.h>
+#include <asm/page.h>
+#include <asm/fpu.h>
+#include <asm-generic/asm-prototypes.h>
+#include <linux/uaccess.h>
+#include <asm/ftrace.h>
+#include <asm/mmu_context.h>
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
new file mode 100644
index 000000000..ea4b62ece
--- /dev/null
+++ b/arch/mips/include/asm/asm.h
@@ -0,0 +1,331 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1996, 1997, 1999, 2001 by Ralf Baechle
+ * Copyright (C) 1999 by Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ * Copyright (C) 2002 Maciej W. Rozycki
+ *
+ * Some useful macros for MIPS assembler code
+ *
+ * Some of the routines below contain useless nops that will be optimized
+ * away by gas in -O mode. These nops are however required to fill delay
+ * slots in noreorder mode.
+ */
+#ifndef __ASM_ASM_H
+#define __ASM_ASM_H
+
+#include <asm/sgidefs.h>
+#include <asm/asm-eva.h>
+
+#ifndef __VDSO__
+/*
+ * Emit CFI data in .debug_frame sections, not .eh_frame sections.
+ * We don't do DWARF unwinding at runtime, so only the offline DWARF
+ * information is useful to anyone. Note we should change this if we
+ * ever decide to enable DWARF unwinding at runtime.
+ */
+#define CFI_SECTIONS .cfi_sections .debug_frame
+#else
+ /*
+ * For the vDSO, emit both runtime unwind information and debug
+ * symbols for the .dbg file.
+ */
+#define CFI_SECTIONS
+#endif
+
+/*
+ * LEAF - declare leaf routine
+ */
+#define LEAF(symbol) \
+ CFI_SECTIONS; \
+ .globl symbol; \
+ .align 2; \
+ .type symbol, @function; \
+ .ent symbol, 0; \
+symbol: .frame sp, 0, ra; \
+ .cfi_startproc; \
+ .insn
+
+/*
+ * NESTED - declare nested routine entry point
+ */
+#define NESTED(symbol, framesize, rpc) \
+ CFI_SECTIONS; \
+ .globl symbol; \
+ .align 2; \
+ .type symbol, @function; \
+ .ent symbol, 0; \
+symbol: .frame sp, framesize, rpc; \
+ .cfi_startproc; \
+ .insn
+
+/*
+ * END - mark end of function
+ */
+#define END(function) \
+ .cfi_endproc; \
+ .end function; \
+ .size function, .-function
+
+/*
+ * EXPORT - export definition of symbol
+ */
+#define EXPORT(symbol) \
+ .globl symbol; \
+symbol:
+
+/*
+ * FEXPORT - export definition of a function symbol
+ */
+#define FEXPORT(symbol) \
+ .globl symbol; \
+ .type symbol, @function; \
+symbol: .insn
+
+/*
+ * ABS - export absolute symbol
+ */
+#define ABS(symbol,value) \
+ .globl symbol; \
+symbol = value
+
+#define TEXT(msg) \
+ .pushsection .data; \
+8: .asciiz msg; \
+ .popsection;
+
+#define ASM_PANIC(msg) \
+ .set push; \
+ .set reorder; \
+ PTR_LA a0, 8f; \
+ jal panic; \
+9: b 9b; \
+ .set pop; \
+ TEXT(msg)
+
+/*
+ * Print formatted string
+ */
+#ifdef CONFIG_PRINTK
+#define ASM_PRINT(string) \
+ .set push; \
+ .set reorder; \
+ PTR_LA a0, 8f; \
+ jal printk; \
+ .set pop; \
+ TEXT(string)
+#else
+#define ASM_PRINT(string)
+#endif
+
+/*
+ * Stack alignment
+ */
+#if (_MIPS_SIM == _MIPS_SIM_ABI32)
+#define ALSZ 7
+#define ALMASK ~7
+#endif
+#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64)
+#define ALSZ 15
+#define ALMASK ~15
+#endif
+
+/*
+ * Macros to handle different pointer/register sizes for 32/64-bit code
+ */
+
+/*
+ * Size of a register
+ */
+#ifdef __mips64
+#define SZREG 8
+#else
+#define SZREG 4
+#endif
+
+/*
+ * Use the following macros in assemblercode to load/store registers,
+ * pointers etc.
+ */
+#if (_MIPS_SIM == _MIPS_SIM_ABI32)
+#define REG_S sw
+#define REG_L lw
+#define REG_SUBU subu
+#define REG_ADDU addu
+#endif
+#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64)
+#define REG_S sd
+#define REG_L ld
+#define REG_SUBU dsubu
+#define REG_ADDU daddu
+#endif
+
+/*
+ * How to add/sub/load/store/shift C int variables.
+ */
+#if (_MIPS_SZINT == 32)
+#define INT_ADD add
+#define INT_ADDU addu
+#define INT_ADDI addi
+#define INT_ADDIU addiu
+#define INT_SUB sub
+#define INT_SUBU subu
+#define INT_L lw
+#define INT_S sw
+#define INT_SLL sll
+#define INT_SLLV sllv
+#define INT_SRL srl
+#define INT_SRLV srlv
+#define INT_SRA sra
+#define INT_SRAV srav
+#endif
+
+#if (_MIPS_SZINT == 64)
+#define INT_ADD dadd
+#define INT_ADDU daddu
+#define INT_ADDI daddi
+#define INT_ADDIU daddiu
+#define INT_SUB dsub
+#define INT_SUBU dsubu
+#define INT_L ld
+#define INT_S sd
+#define INT_SLL dsll
+#define INT_SLLV dsllv
+#define INT_SRL dsrl
+#define INT_SRLV dsrlv
+#define INT_SRA dsra
+#define INT_SRAV dsrav
+#endif
+
+/*
+ * How to add/sub/load/store/shift C long variables.
+ */
+#if (_MIPS_SZLONG == 32)
+#define LONG_ADD add
+#define LONG_ADDU addu
+#define LONG_ADDI addi
+#define LONG_ADDIU addiu
+#define LONG_SUB sub
+#define LONG_SUBU subu
+#define LONG_L lw
+#define LONG_S sw
+#define LONG_SP swp
+#define LONG_SLL sll
+#define LONG_SLLV sllv
+#define LONG_SRL srl
+#define LONG_SRLV srlv
+#define LONG_SRA sra
+#define LONG_SRAV srav
+
+#ifdef __ASSEMBLY__
+#define LONG .word
+#endif
+#define LONGSIZE 4
+#define LONGMASK 3
+#define LONGLOG 2
+#endif
+
+#if (_MIPS_SZLONG == 64)
+#define LONG_ADD dadd
+#define LONG_ADDU daddu
+#define LONG_ADDI daddi
+#define LONG_ADDIU daddiu
+#define LONG_SUB dsub
+#define LONG_SUBU dsubu
+#define LONG_L ld
+#define LONG_S sd
+#define LONG_SP sdp
+#define LONG_SLL dsll
+#define LONG_SLLV dsllv
+#define LONG_SRL dsrl
+#define LONG_SRLV dsrlv
+#define LONG_SRA dsra
+#define LONG_SRAV dsrav
+
+#ifdef __ASSEMBLY__
+#define LONG .dword
+#endif
+#define LONGSIZE 8
+#define LONGMASK 7
+#define LONGLOG 3
+#endif
+
+/*
+ * How to add/sub/load/store/shift pointers.
+ */
+#if (_MIPS_SZPTR == 32)
+#define PTR_ADD add
+#define PTR_ADDU addu
+#define PTR_ADDI addi
+#define PTR_ADDIU addiu
+#define PTR_SUB sub
+#define PTR_SUBU subu
+#define PTR_L lw
+#define PTR_S sw
+#define PTR_LA la
+#define PTR_LI li
+#define PTR_SLL sll
+#define PTR_SLLV sllv
+#define PTR_SRL srl
+#define PTR_SRLV srlv
+#define PTR_SRA sra
+#define PTR_SRAV srav
+
+#define PTR_SCALESHIFT 2
+
+#define PTR .word
+#define PTRSIZE 4
+#define PTRLOG 2
+#endif
+
+#if (_MIPS_SZPTR == 64)
+#define PTR_ADD dadd
+#define PTR_ADDU daddu
+#define PTR_ADDI daddi
+#define PTR_ADDIU daddiu
+#define PTR_SUB dsub
+#define PTR_SUBU dsubu
+#define PTR_L ld
+#define PTR_S sd
+#define PTR_LA dla
+#define PTR_LI dli
+#define PTR_SLL dsll
+#define PTR_SLLV dsllv
+#define PTR_SRL dsrl
+#define PTR_SRLV dsrlv
+#define PTR_SRA dsra
+#define PTR_SRAV dsrav
+
+#define PTR_SCALESHIFT 3
+
+#define PTR .dword
+#define PTRSIZE 8
+#define PTRLOG 3
+#endif
+
+/*
+ * Some cp0 registers were extended to 64bit for MIPS III.
+ */
+#if (_MIPS_SIM == _MIPS_SIM_ABI32)
+#define MFC0 mfc0
+#define MTC0 mtc0
+#endif
+#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64)
+#define MFC0 dmfc0
+#define MTC0 dmtc0
+#endif
+
+#define SSNOP sll zero, zero, 1
+
+#ifdef CONFIG_SGI_IP28
+/* Inhibit speculative stores to volatile (e.g.DMA) or invalid addresses. */
+#include <asm/cacheops.h>
+#define R10KCBARRIER(addr) cache Cache_Barrier, addr;
+#else
+#define R10KCBARRIER(addr)
+#endif
+
+#endif /* __ASM_ASM_H */
diff --git a/arch/mips/include/asm/asmmacro-32.h b/arch/mips/include/asm/asmmacro-32.h
new file mode 100644
index 000000000..1c08c1f79
--- /dev/null
+++ b/arch/mips/include/asm/asmmacro-32.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * asmmacro.h: Assembler macros to make things easier to read.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1998, 1999, 2003 Ralf Baechle
+ */
+#ifndef _ASM_ASMMACRO_32_H
+#define _ASM_ASMMACRO_32_H
+
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+
+ .macro fpu_save_single thread tmp=t0
+ .set push
+ SET_HARDFLOAT
+ cfc1 \tmp, fcr31
+ s.d $f0, THREAD_FPR0(\thread)
+ s.d $f2, THREAD_FPR2(\thread)
+ s.d $f4, THREAD_FPR4(\thread)
+ s.d $f6, THREAD_FPR6(\thread)
+ s.d $f8, THREAD_FPR8(\thread)
+ s.d $f10, THREAD_FPR10(\thread)
+ s.d $f12, THREAD_FPR12(\thread)
+ s.d $f14, THREAD_FPR14(\thread)
+ s.d $f16, THREAD_FPR16(\thread)
+ s.d $f18, THREAD_FPR18(\thread)
+ s.d $f20, THREAD_FPR20(\thread)
+ s.d $f22, THREAD_FPR22(\thread)
+ s.d $f24, THREAD_FPR24(\thread)
+ s.d $f26, THREAD_FPR26(\thread)
+ s.d $f28, THREAD_FPR28(\thread)
+ s.d $f30, THREAD_FPR30(\thread)
+ sw \tmp, THREAD_FCR31(\thread)
+ .set pop
+ .endm
+
+ .macro fpu_restore_single thread tmp=t0
+ .set push
+ SET_HARDFLOAT
+ lw \tmp, THREAD_FCR31(\thread)
+ l.d $f0, THREAD_FPR0(\thread)
+ l.d $f2, THREAD_FPR2(\thread)
+ l.d $f4, THREAD_FPR4(\thread)
+ l.d $f6, THREAD_FPR6(\thread)
+ l.d $f8, THREAD_FPR8(\thread)
+ l.d $f10, THREAD_FPR10(\thread)
+ l.d $f12, THREAD_FPR12(\thread)
+ l.d $f14, THREAD_FPR14(\thread)
+ l.d $f16, THREAD_FPR16(\thread)
+ l.d $f18, THREAD_FPR18(\thread)
+ l.d $f20, THREAD_FPR20(\thread)
+ l.d $f22, THREAD_FPR22(\thread)
+ l.d $f24, THREAD_FPR24(\thread)
+ l.d $f26, THREAD_FPR26(\thread)
+ l.d $f28, THREAD_FPR28(\thread)
+ l.d $f30, THREAD_FPR30(\thread)
+ ctc1 \tmp, fcr31
+ .set pop
+ .endm
+
+ .macro cpu_save_nonscratch thread
+ LONG_S s0, THREAD_REG16(\thread)
+ LONG_S s1, THREAD_REG17(\thread)
+ LONG_S s2, THREAD_REG18(\thread)
+ LONG_S s3, THREAD_REG19(\thread)
+ LONG_S s4, THREAD_REG20(\thread)
+ LONG_S s5, THREAD_REG21(\thread)
+ LONG_S s6, THREAD_REG22(\thread)
+ LONG_S s7, THREAD_REG23(\thread)
+ LONG_S sp, THREAD_REG29(\thread)
+ LONG_S fp, THREAD_REG30(\thread)
+ .endm
+
+ .macro cpu_restore_nonscratch thread
+ LONG_L s0, THREAD_REG16(\thread)
+ LONG_L s1, THREAD_REG17(\thread)
+ LONG_L s2, THREAD_REG18(\thread)
+ LONG_L s3, THREAD_REG19(\thread)
+ LONG_L s4, THREAD_REG20(\thread)
+ LONG_L s5, THREAD_REG21(\thread)
+ LONG_L s6, THREAD_REG22(\thread)
+ LONG_L s7, THREAD_REG23(\thread)
+ LONG_L sp, THREAD_REG29(\thread)
+ LONG_L fp, THREAD_REG30(\thread)
+ LONG_L ra, THREAD_REG31(\thread)
+ .endm
+
+#endif /* _ASM_ASMMACRO_32_H */
diff --git a/arch/mips/include/asm/asmmacro-64.h b/arch/mips/include/asm/asmmacro-64.h
new file mode 100644
index 000000000..68039dee5
--- /dev/null
+++ b/arch/mips/include/asm/asmmacro-64.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * asmmacro.h: Assembler macros to make things easier to read.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1998, 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_ASMMACRO_64_H
+#define _ASM_ASMMACRO_64_H
+
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+
+ .macro cpu_save_nonscratch thread
+ LONG_S s0, THREAD_REG16(\thread)
+ LONG_S s1, THREAD_REG17(\thread)
+ LONG_S s2, THREAD_REG18(\thread)
+ LONG_S s3, THREAD_REG19(\thread)
+ LONG_S s4, THREAD_REG20(\thread)
+ LONG_S s5, THREAD_REG21(\thread)
+ LONG_S s6, THREAD_REG22(\thread)
+ LONG_S s7, THREAD_REG23(\thread)
+ LONG_S sp, THREAD_REG29(\thread)
+ LONG_S fp, THREAD_REG30(\thread)
+ .endm
+
+ .macro cpu_restore_nonscratch thread
+ LONG_L s0, THREAD_REG16(\thread)
+ LONG_L s1, THREAD_REG17(\thread)
+ LONG_L s2, THREAD_REG18(\thread)
+ LONG_L s3, THREAD_REG19(\thread)
+ LONG_L s4, THREAD_REG20(\thread)
+ LONG_L s5, THREAD_REG21(\thread)
+ LONG_L s6, THREAD_REG22(\thread)
+ LONG_L s7, THREAD_REG23(\thread)
+ LONG_L sp, THREAD_REG29(\thread)
+ LONG_L fp, THREAD_REG30(\thread)
+ LONG_L ra, THREAD_REG31(\thread)
+ .endm
+
+#endif /* _ASM_ASMMACRO_64_H */
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
new file mode 100644
index 000000000..ca83ada70
--- /dev/null
+++ b/arch/mips/include/asm/asmmacro.h
@@ -0,0 +1,658 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Ralf Baechle
+ */
+#ifndef _ASM_ASMMACRO_H
+#define _ASM_ASMMACRO_H
+
+#include <asm/hazards.h>
+#include <asm/asm-offsets.h>
+#include <asm/msa.h>
+
+#ifdef CONFIG_32BIT
+#include <asm/asmmacro-32.h>
+#endif
+#ifdef CONFIG_64BIT
+#include <asm/asmmacro-64.h>
+#endif
+
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
+/*
+ * Helper macros for generating raw instruction encodings.
+ */
+#ifdef CONFIG_CPU_MICROMIPS
+ .macro insn32_if_mm enc
+ .insn
+ .hword ((\enc) >> 16)
+ .hword ((\enc) & 0xffff)
+ .endm
+
+ .macro insn_if_mips enc
+ .endm
+#else
+ .macro insn32_if_mm enc
+ .endm
+
+ .macro insn_if_mips enc
+ .insn
+ .word (\enc)
+ .endm
+#endif
+
+#ifdef CONFIG_CPU_HAS_DIEI
+ .macro local_irq_enable reg=t0
+ ei
+ irq_enable_hazard
+ .endm
+
+ .macro local_irq_disable reg=t0
+ di
+ irq_disable_hazard
+ .endm
+#else /* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR5 && !CONFIG_CPU_MIPSR6 */
+ .macro local_irq_enable reg=t0
+ mfc0 \reg, CP0_STATUS
+ ori \reg, \reg, 1
+ mtc0 \reg, CP0_STATUS
+ irq_enable_hazard
+ .endm
+
+ .macro local_irq_disable reg=t0
+#ifdef CONFIG_PREEMPTION
+ lw \reg, TI_PRE_COUNT($28)
+ addi \reg, \reg, 1
+ sw \reg, TI_PRE_COUNT($28)
+#endif
+ mfc0 \reg, CP0_STATUS
+ ori \reg, \reg, 1
+ xori \reg, \reg, 1
+ mtc0 \reg, CP0_STATUS
+ irq_disable_hazard
+#ifdef CONFIG_PREEMPTION
+ lw \reg, TI_PRE_COUNT($28)
+ addi \reg, \reg, -1
+ sw \reg, TI_PRE_COUNT($28)
+#endif
+ .endm
+#endif /* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR5 && !CONFIG_CPU_MIPSR6 */
+
+ .macro fpu_save_16even thread tmp=t0
+ .set push
+ SET_HARDFLOAT
+ cfc1 \tmp, fcr31
+ sdc1 $f0, THREAD_FPR0(\thread)
+ sdc1 $f2, THREAD_FPR2(\thread)
+ sdc1 $f4, THREAD_FPR4(\thread)
+ sdc1 $f6, THREAD_FPR6(\thread)
+ sdc1 $f8, THREAD_FPR8(\thread)
+ sdc1 $f10, THREAD_FPR10(\thread)
+ sdc1 $f12, THREAD_FPR12(\thread)
+ sdc1 $f14, THREAD_FPR14(\thread)
+ sdc1 $f16, THREAD_FPR16(\thread)
+ sdc1 $f18, THREAD_FPR18(\thread)
+ sdc1 $f20, THREAD_FPR20(\thread)
+ sdc1 $f22, THREAD_FPR22(\thread)
+ sdc1 $f24, THREAD_FPR24(\thread)
+ sdc1 $f26, THREAD_FPR26(\thread)
+ sdc1 $f28, THREAD_FPR28(\thread)
+ sdc1 $f30, THREAD_FPR30(\thread)
+ sw \tmp, THREAD_FCR31(\thread)
+ .set pop
+ .endm
+
+ .macro fpu_save_16odd thread
+ .set push
+ .set mips64r2
+ .set fp=64
+ SET_HARDFLOAT
+ sdc1 $f1, THREAD_FPR1(\thread)
+ sdc1 $f3, THREAD_FPR3(\thread)
+ sdc1 $f5, THREAD_FPR5(\thread)
+ sdc1 $f7, THREAD_FPR7(\thread)
+ sdc1 $f9, THREAD_FPR9(\thread)
+ sdc1 $f11, THREAD_FPR11(\thread)
+ sdc1 $f13, THREAD_FPR13(\thread)
+ sdc1 $f15, THREAD_FPR15(\thread)
+ sdc1 $f17, THREAD_FPR17(\thread)
+ sdc1 $f19, THREAD_FPR19(\thread)
+ sdc1 $f21, THREAD_FPR21(\thread)
+ sdc1 $f23, THREAD_FPR23(\thread)
+ sdc1 $f25, THREAD_FPR25(\thread)
+ sdc1 $f27, THREAD_FPR27(\thread)
+ sdc1 $f29, THREAD_FPR29(\thread)
+ sdc1 $f31, THREAD_FPR31(\thread)
+ .set pop
+ .endm
+
+ .macro fpu_save_double thread status tmp
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
+ defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6)
+ sll \tmp, \status, 5
+ bgez \tmp, 10f
+ fpu_save_16odd \thread
+10:
+#endif
+ fpu_save_16even \thread \tmp
+ .endm
+
+ .macro fpu_restore_16even thread tmp=t0
+ .set push
+ SET_HARDFLOAT
+ lw \tmp, THREAD_FCR31(\thread)
+ ldc1 $f0, THREAD_FPR0(\thread)
+ ldc1 $f2, THREAD_FPR2(\thread)
+ ldc1 $f4, THREAD_FPR4(\thread)
+ ldc1 $f6, THREAD_FPR6(\thread)
+ ldc1 $f8, THREAD_FPR8(\thread)
+ ldc1 $f10, THREAD_FPR10(\thread)
+ ldc1 $f12, THREAD_FPR12(\thread)
+ ldc1 $f14, THREAD_FPR14(\thread)
+ ldc1 $f16, THREAD_FPR16(\thread)
+ ldc1 $f18, THREAD_FPR18(\thread)
+ ldc1 $f20, THREAD_FPR20(\thread)
+ ldc1 $f22, THREAD_FPR22(\thread)
+ ldc1 $f24, THREAD_FPR24(\thread)
+ ldc1 $f26, THREAD_FPR26(\thread)
+ ldc1 $f28, THREAD_FPR28(\thread)
+ ldc1 $f30, THREAD_FPR30(\thread)
+ ctc1 \tmp, fcr31
+ .set pop
+ .endm
+
+ .macro fpu_restore_16odd thread
+ .set push
+ .set mips64r2
+ .set fp=64
+ SET_HARDFLOAT
+ ldc1 $f1, THREAD_FPR1(\thread)
+ ldc1 $f3, THREAD_FPR3(\thread)
+ ldc1 $f5, THREAD_FPR5(\thread)
+ ldc1 $f7, THREAD_FPR7(\thread)
+ ldc1 $f9, THREAD_FPR9(\thread)
+ ldc1 $f11, THREAD_FPR11(\thread)
+ ldc1 $f13, THREAD_FPR13(\thread)
+ ldc1 $f15, THREAD_FPR15(\thread)
+ ldc1 $f17, THREAD_FPR17(\thread)
+ ldc1 $f19, THREAD_FPR19(\thread)
+ ldc1 $f21, THREAD_FPR21(\thread)
+ ldc1 $f23, THREAD_FPR23(\thread)
+ ldc1 $f25, THREAD_FPR25(\thread)
+ ldc1 $f27, THREAD_FPR27(\thread)
+ ldc1 $f29, THREAD_FPR29(\thread)
+ ldc1 $f31, THREAD_FPR31(\thread)
+ .set pop
+ .endm
+
+ .macro fpu_restore_double thread status tmp
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
+ defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6)
+ sll \tmp, \status, 5
+ bgez \tmp, 10f # 16 register mode?
+
+ fpu_restore_16odd \thread
+10:
+#endif
+ fpu_restore_16even \thread \tmp
+ .endm
+
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
+ defined(CONFIG_CPU_MIPSR6)
+ .macro _EXT rd, rs, p, s
+ ext \rd, \rs, \p, \s
+ .endm
+#else /* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR5 && !CONFIG_CPU_MIPSR6 */
+ .macro _EXT rd, rs, p, s
+ srl \rd, \rs, \p
+ andi \rd, \rd, (1 << \s) - 1
+ .endm
+#endif /* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR5 && !CONFIG_CPU_MIPSR6 */
+
+/*
+ * Temporary until all gas have MT ASE support
+ */
+ .macro DMT reg=0
+ .word 0x41600bc1 | (\reg << 16)
+ .endm
+
+ .macro EMT reg=0
+ .word 0x41600be1 | (\reg << 16)
+ .endm
+
+ .macro DVPE reg=0
+ .word 0x41600001 | (\reg << 16)
+ .endm
+
+ .macro EVPE reg=0
+ .word 0x41600021 | (\reg << 16)
+ .endm
+
+ .macro MFTR rt=0, rd=0, u=0, sel=0
+ .word 0x41000000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)
+ .endm
+
+ .macro MTTR rt=0, rd=0, u=0, sel=0
+ .word 0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)
+ .endm
+
+#ifdef TOOLCHAIN_SUPPORTS_MSA
+ .macro _cfcmsa rd, cs
+ .set push
+ .set mips32r2
+ .set fp=64
+ .set msa
+ cfcmsa \rd, $\cs
+ .set pop
+ .endm
+
+ .macro _ctcmsa cd, rs
+ .set push
+ .set mips32r2
+ .set fp=64
+ .set msa
+ ctcmsa $\cd, \rs
+ .set pop
+ .endm
+
+ .macro ld_b wd, off, base
+ .set push
+ .set mips32r2
+ .set fp=64
+ .set msa
+ ld.b $w\wd, \off(\base)
+ .set pop
+ .endm
+
+ .macro ld_h wd, off, base
+ .set push
+ .set mips32r2
+ .set fp=64
+ .set msa
+ ld.h $w\wd, \off(\base)
+ .set pop
+ .endm
+
+ .macro ld_w wd, off, base
+ .set push
+ .set mips32r2
+ .set fp=64
+ .set msa
+ ld.w $w\wd, \off(\base)
+ .set pop
+ .endm
+
+ .macro ld_d wd, off, base
+ .set push
+ .set mips32r2
+ .set fp=64
+ .set msa
+ ld.d $w\wd, \off(\base)
+ .set pop
+ .endm
+
+ .macro st_b wd, off, base
+ .set push
+ .set mips32r2
+ .set fp=64
+ .set msa
+ st.b $w\wd, \off(\base)
+ .set pop
+ .endm
+
+ .macro st_h wd, off, base
+ .set push
+ .set mips32r2
+ .set fp=64
+ .set msa
+ st.h $w\wd, \off(\base)
+ .set pop
+ .endm
+
+ .macro st_w wd, off, base
+ .set push
+ .set mips32r2
+ .set fp=64
+ .set msa
+ st.w $w\wd, \off(\base)
+ .set pop
+ .endm
+
+ .macro st_d wd, off, base
+ .set push
+ .set mips32r2
+ .set fp=64
+ .set msa
+ st.d $w\wd, \off(\base)
+ .set pop
+ .endm
+
+ .macro copy_s_w ws, n
+ .set push
+ .set mips32r2
+ .set fp=64
+ .set msa
+ copy_s.w $1, $w\ws[\n]
+ .set pop
+ .endm
+
+ .macro copy_s_d ws, n
+ .set push
+ .set mips64r2
+ .set fp=64
+ .set msa
+ copy_s.d $1, $w\ws[\n]
+ .set pop
+ .endm
+
+ .macro insert_w wd, n
+ .set push
+ .set mips32r2
+ .set fp=64
+ .set msa
+ insert.w $w\wd[\n], $1
+ .set pop
+ .endm
+
+ .macro insert_d wd, n
+ .set push
+ .set mips64r2
+ .set fp=64
+ .set msa
+ insert.d $w\wd[\n], $1
+ .set pop
+ .endm
+#else
+
+ /*
+ * Temporary until all toolchains in use include MSA support.
+ */
+ .macro _cfcmsa rd, cs
+ .set push
+ .set noat
+ SET_HARDFLOAT
+ insn_if_mips 0x787e0059 | (\cs << 11)
+ insn32_if_mm 0x587e0056 | (\cs << 11)
+ move \rd, $1
+ .set pop
+ .endm
+
+ .macro _ctcmsa cd, rs
+ .set push
+ .set noat
+ SET_HARDFLOAT
+ move $1, \rs
+ insn_if_mips 0x783e0819 | (\cd << 6)
+ insn32_if_mm 0x583e0816 | (\cd << 6)
+ .set pop
+ .endm
+
+ .macro ld_b wd, off, base
+ .set push
+ .set noat
+ SET_HARDFLOAT
+ PTR_ADDU $1, \base, \off
+ insn_if_mips 0x78000820 | (\wd << 6)
+ insn32_if_mm 0x58000807 | (\wd << 6)
+ .set pop
+ .endm
+
+ .macro ld_h wd, off, base
+ .set push
+ .set noat
+ SET_HARDFLOAT
+ PTR_ADDU $1, \base, \off
+ insn_if_mips 0x78000821 | (\wd << 6)
+ insn32_if_mm 0x58000817 | (\wd << 6)
+ .set pop
+ .endm
+
+ .macro ld_w wd, off, base
+ .set push
+ .set noat
+ SET_HARDFLOAT
+ PTR_ADDU $1, \base, \off
+ insn_if_mips 0x78000822 | (\wd << 6)
+ insn32_if_mm 0x58000827 | (\wd << 6)
+ .set pop
+ .endm
+
+ .macro ld_d wd, off, base
+ .set push
+ .set noat
+ SET_HARDFLOAT
+ PTR_ADDU $1, \base, \off
+ insn_if_mips 0x78000823 | (\wd << 6)
+ insn32_if_mm 0x58000837 | (\wd << 6)
+ .set pop
+ .endm
+
+ .macro st_b wd, off, base
+ .set push
+ .set noat
+ SET_HARDFLOAT
+ PTR_ADDU $1, \base, \off
+ insn_if_mips 0x78000824 | (\wd << 6)
+ insn32_if_mm 0x5800080f | (\wd << 6)
+ .set pop
+ .endm
+
+ .macro st_h wd, off, base
+ .set push
+ .set noat
+ SET_HARDFLOAT
+ PTR_ADDU $1, \base, \off
+ insn_if_mips 0x78000825 | (\wd << 6)
+ insn32_if_mm 0x5800081f | (\wd << 6)
+ .set pop
+ .endm
+
+ .macro st_w wd, off, base
+ .set push
+ .set noat
+ SET_HARDFLOAT
+ PTR_ADDU $1, \base, \off
+ insn_if_mips 0x78000826 | (\wd << 6)
+ insn32_if_mm 0x5800082f | (\wd << 6)
+ .set pop
+ .endm
+
+ .macro st_d wd, off, base
+ .set push
+ .set noat
+ SET_HARDFLOAT
+ PTR_ADDU $1, \base, \off
+ insn_if_mips 0x78000827 | (\wd << 6)
+ insn32_if_mm 0x5800083f | (\wd << 6)
+ .set pop
+ .endm
+
+ .macro copy_s_w ws, n
+ .set push
+ .set noat
+ SET_HARDFLOAT
+ insn_if_mips 0x78b00059 | (\n << 16) | (\ws << 11)
+ insn32_if_mm 0x58b00056 | (\n << 16) | (\ws << 11)
+ .set pop
+ .endm
+
+ .macro copy_s_d ws, n
+ .set push
+ .set noat
+ SET_HARDFLOAT
+ insn_if_mips 0x78b80059 | (\n << 16) | (\ws << 11)
+ insn32_if_mm 0x58b80056 | (\n << 16) | (\ws << 11)
+ .set pop
+ .endm
+
+ .macro insert_w wd, n
+ .set push
+ .set noat
+ SET_HARDFLOAT
+ insn_if_mips 0x79300819 | (\n << 16) | (\wd << 6)
+ insn32_if_mm 0x59300816 | (\n << 16) | (\wd << 6)
+ .set pop
+ .endm
+
+ .macro insert_d wd, n
+ .set push
+ .set noat
+ SET_HARDFLOAT
+ insn_if_mips 0x79380819 | (\n << 16) | (\wd << 6)
+ insn32_if_mm 0x59380816 | (\n << 16) | (\wd << 6)
+ .set pop
+ .endm
+#endif
+
+#ifdef TOOLCHAIN_SUPPORTS_MSA
+#define FPR_BASE_OFFS THREAD_FPR0
+#define FPR_BASE $1
+#else
+#define FPR_BASE_OFFS 0
+#define FPR_BASE \thread
+#endif
+
+ .macro msa_save_all thread
+ .set push
+ .set noat
+#ifdef TOOLCHAIN_SUPPORTS_MSA
+ PTR_ADDU FPR_BASE, \thread, FPR_BASE_OFFS
+#endif
+ st_d 0, THREAD_FPR0 - FPR_BASE_OFFS, FPR_BASE
+ st_d 1, THREAD_FPR1 - FPR_BASE_OFFS, FPR_BASE
+ st_d 2, THREAD_FPR2 - FPR_BASE_OFFS, FPR_BASE
+ st_d 3, THREAD_FPR3 - FPR_BASE_OFFS, FPR_BASE
+ st_d 4, THREAD_FPR4 - FPR_BASE_OFFS, FPR_BASE
+ st_d 5, THREAD_FPR5 - FPR_BASE_OFFS, FPR_BASE
+ st_d 6, THREAD_FPR6 - FPR_BASE_OFFS, FPR_BASE
+ st_d 7, THREAD_FPR7 - FPR_BASE_OFFS, FPR_BASE
+ st_d 8, THREAD_FPR8 - FPR_BASE_OFFS, FPR_BASE
+ st_d 9, THREAD_FPR9 - FPR_BASE_OFFS, FPR_BASE
+ st_d 10, THREAD_FPR10 - FPR_BASE_OFFS, FPR_BASE
+ st_d 11, THREAD_FPR11 - FPR_BASE_OFFS, FPR_BASE
+ st_d 12, THREAD_FPR12 - FPR_BASE_OFFS, FPR_BASE
+ st_d 13, THREAD_FPR13 - FPR_BASE_OFFS, FPR_BASE
+ st_d 14, THREAD_FPR14 - FPR_BASE_OFFS, FPR_BASE
+ st_d 15, THREAD_FPR15 - FPR_BASE_OFFS, FPR_BASE
+ st_d 16, THREAD_FPR16 - FPR_BASE_OFFS, FPR_BASE
+ st_d 17, THREAD_FPR17 - FPR_BASE_OFFS, FPR_BASE
+ st_d 18, THREAD_FPR18 - FPR_BASE_OFFS, FPR_BASE
+ st_d 19, THREAD_FPR19 - FPR_BASE_OFFS, FPR_BASE
+ st_d 20, THREAD_FPR20 - FPR_BASE_OFFS, FPR_BASE
+ st_d 21, THREAD_FPR21 - FPR_BASE_OFFS, FPR_BASE
+ st_d 22, THREAD_FPR22 - FPR_BASE_OFFS, FPR_BASE
+ st_d 23, THREAD_FPR23 - FPR_BASE_OFFS, FPR_BASE
+ st_d 24, THREAD_FPR24 - FPR_BASE_OFFS, FPR_BASE
+ st_d 25, THREAD_FPR25 - FPR_BASE_OFFS, FPR_BASE
+ st_d 26, THREAD_FPR26 - FPR_BASE_OFFS, FPR_BASE
+ st_d 27, THREAD_FPR27 - FPR_BASE_OFFS, FPR_BASE
+ st_d 28, THREAD_FPR28 - FPR_BASE_OFFS, FPR_BASE
+ st_d 29, THREAD_FPR29 - FPR_BASE_OFFS, FPR_BASE
+ st_d 30, THREAD_FPR30 - FPR_BASE_OFFS, FPR_BASE
+ st_d 31, THREAD_FPR31 - FPR_BASE_OFFS, FPR_BASE
+ SET_HARDFLOAT
+ _cfcmsa $1, MSA_CSR
+ sw $1, THREAD_MSA_CSR(\thread)
+ .set pop
+ .endm
+
+ .macro msa_restore_all thread
+ .set push
+ .set noat
+ SET_HARDFLOAT
+ lw $1, THREAD_MSA_CSR(\thread)
+ _ctcmsa MSA_CSR, $1
+#ifdef TOOLCHAIN_SUPPORTS_MSA
+ PTR_ADDU FPR_BASE, \thread, FPR_BASE_OFFS
+#endif
+ ld_d 0, THREAD_FPR0 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 1, THREAD_FPR1 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 2, THREAD_FPR2 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 3, THREAD_FPR3 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 4, THREAD_FPR4 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 5, THREAD_FPR5 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 6, THREAD_FPR6 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 7, THREAD_FPR7 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 8, THREAD_FPR8 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 9, THREAD_FPR9 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 10, THREAD_FPR10 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 11, THREAD_FPR11 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 12, THREAD_FPR12 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 13, THREAD_FPR13 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 14, THREAD_FPR14 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 15, THREAD_FPR15 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 16, THREAD_FPR16 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 17, THREAD_FPR17 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 18, THREAD_FPR18 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 19, THREAD_FPR19 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 20, THREAD_FPR20 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 21, THREAD_FPR21 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 22, THREAD_FPR22 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 23, THREAD_FPR23 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 24, THREAD_FPR24 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 25, THREAD_FPR25 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 26, THREAD_FPR26 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 27, THREAD_FPR27 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 28, THREAD_FPR28 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 29, THREAD_FPR29 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 30, THREAD_FPR30 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 31, THREAD_FPR31 - FPR_BASE_OFFS, FPR_BASE
+ .set pop
+ .endm
+
+#undef FPR_BASE_OFFS
+#undef FPR_BASE
+
+ .macro msa_init_upper wd
+#ifdef CONFIG_64BIT
+ insert_d \wd, 1
+#else
+ insert_w \wd, 2
+ insert_w \wd, 3
+#endif
+ .endm
+
+ .macro msa_init_all_upper
+ .set push
+ .set noat
+ SET_HARDFLOAT
+ not $1, zero
+ msa_init_upper 0
+ msa_init_upper 1
+ msa_init_upper 2
+ msa_init_upper 3
+ msa_init_upper 4
+ msa_init_upper 5
+ msa_init_upper 6
+ msa_init_upper 7
+ msa_init_upper 8
+ msa_init_upper 9
+ msa_init_upper 10
+ msa_init_upper 11
+ msa_init_upper 12
+ msa_init_upper 13
+ msa_init_upper 14
+ msa_init_upper 15
+ msa_init_upper 16
+ msa_init_upper 17
+ msa_init_upper 18
+ msa_init_upper 19
+ msa_init_upper 20
+ msa_init_upper 21
+ msa_init_upper 22
+ msa_init_upper 23
+ msa_init_upper 24
+ msa_init_upper 25
+ msa_init_upper 26
+ msa_init_upper 27
+ msa_init_upper 28
+ msa_init_upper 29
+ msa_init_upper 30
+ msa_init_upper 31
+ .set pop
+ .endm
+
+#endif /* _ASM_ASMMACRO_H */
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
new file mode 100644
index 000000000..fd0e09033
--- /dev/null
+++ b/arch/mips/include/asm/atomic.h
@@ -0,0 +1,267 @@
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ *
+ * But use these as seldom as possible since they are much more slower
+ * than regular operations.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
+ */
+#ifndef _ASM_ATOMIC_H
+#define _ASM_ATOMIC_H
+
+#include <linux/irqflags.h>
+#include <linux/types.h>
+#include <asm/barrier.h>
+#include <asm/compiler.h>
+#include <asm/cpu-features.h>
+#include <asm/cmpxchg.h>
+#include <asm/llsc.h>
+#include <asm/sync.h>
+#include <asm/war.h>
+
+#define ATOMIC_OPS(pfx, type) \
+static __always_inline type pfx##_read(const pfx##_t *v) \
+{ \
+ return READ_ONCE(v->counter); \
+} \
+ \
+static __always_inline void pfx##_set(pfx##_t *v, type i) \
+{ \
+ WRITE_ONCE(v->counter, i); \
+} \
+ \
+static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n) \
+{ \
+ return cmpxchg(&v->counter, o, n); \
+} \
+ \
+static __always_inline type pfx##_xchg(pfx##_t *v, type n) \
+{ \
+ return xchg(&v->counter, n); \
+}
+
+ATOMIC_OPS(atomic, int)
+
+#ifdef CONFIG_64BIT
+# define ATOMIC64_INIT(i) { (i) }
+ATOMIC_OPS(atomic64, s64)
+#endif
+
+#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+static __inline__ void pfx##_##op(type i, pfx##_t * v) \
+{ \
+ type temp; \
+ \
+ if (!kernel_uses_llsc) { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ return; \
+ } \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " #ll " %0, %1 # " #pfx "_" #op " \n" \
+ " " #asm_op " %0, %2 \n" \
+ " " #sc " %0, %1 \n" \
+ "\t" __SC_BEQZ "%0, 1b \n" \
+ " .set pop \n" \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i) : __LLSC_CLOBBER); \
+}
+
+#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
+static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
+{ \
+ type temp, result; \
+ \
+ if (!kernel_uses_llsc) { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ result c_op i; \
+ v->counter = result; \
+ raw_local_irq_restore(flags); \
+ return result; \
+ } \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " " #sc " %0, %2 \n" \
+ "\t" __SC_BEQZ "%0, 1b \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " .set pop \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i) : __LLSC_CLOBBER); \
+ \
+ return result; \
+}
+
+#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
+{ \
+ int temp, result; \
+ \
+ if (!kernel_uses_llsc) { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ return result; \
+ } \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " " #sc " %0, %2 \n" \
+ "\t" __SC_BEQZ "%0, 1b \n" \
+ " .set pop \n" \
+ " move %0, %1 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i) : __LLSC_CLOBBER); \
+ \
+ return result; \
+}
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
+
+ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
+ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
+
+#define atomic_add_return_relaxed atomic_add_return_relaxed
+#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
+#ifdef CONFIG_64BIT
+ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
+ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
+# define atomic64_add_return_relaxed atomic64_add_return_relaxed
+# define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+# define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+# define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#endif /* CONFIG_64BIT */
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
+
+ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
+ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
+ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
+
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+
+#ifdef CONFIG_64BIT
+ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
+ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
+ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
+# define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+# define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+# define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#endif
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+/*
+ * atomic_sub_if_positive - conditionally subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+#define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
+static __inline__ type pfx##_sub_if_positive(type i, pfx##_t * v) \
+{ \
+ type temp, result; \
+ \
+ smp_mb__before_atomic(); \
+ \
+ if (!kernel_uses_llsc) { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ result -= i; \
+ if (result >= 0) \
+ v->counter = result; \
+ raw_local_irq_restore(flags); \
+ smp_mb__after_atomic(); \
+ return result; \
+ } \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " #ll " %1, %2 # atomic_sub_if_positive\n" \
+ " .set pop \n" \
+ " " #op " %0, %1, %3 \n" \
+ " move %1, %0 \n" \
+ " bltz %0, 2f \n" \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " #sc " %1, %2 \n" \
+ " " __SC_BEQZ "%1, 1b \n" \
+ "2: " __SYNC(full, loongson3_war) " \n" \
+ " .set pop \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i) \
+ : __LLSC_CLOBBER); \
+ \
+ /* \
+ * In the Loongson3 workaround case we already have a \
+ * completion barrier at 2: above, which is needed due to the \
+ * bltz that can branch to code outside of the LL/SC loop. As \
+ * such, we don't need to emit another barrier here. \
+ */ \
+ if (__SYNC_loongson3_war == 0) \
+ smp_mb__after_atomic(); \
+ \
+ return result; \
+}
+
+ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
+#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
+
+#ifdef CONFIG_64BIT
+ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
+#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
+#endif
+
+#undef ATOMIC_SIP_OP
+
+#endif /* _ASM_ATOMIC_H */
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
new file mode 100644
index 000000000..49ff172a7
--- /dev/null
+++ b/arch/mips/include/asm/barrier.h
@@ -0,0 +1,142 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#include <asm/addrspace.h>
+#include <asm/sync.h>
+
+static inline void __sync(void)
+{
+ asm volatile(__SYNC(full, always) ::: "memory");
+}
+
+static inline void rmb(void)
+{
+ asm volatile(__SYNC(rmb, always) ::: "memory");
+}
+#define rmb rmb
+
+static inline void wmb(void)
+{
+ asm volatile(__SYNC(wmb, always) ::: "memory");
+}
+#define wmb wmb
+
+#define fast_mb() __sync()
+
+#define __fast_iob() \
+ __asm__ __volatile__( \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ "lw $0,%0\n\t" \
+ "nop\n\t" \
+ ".set pop" \
+ : /* no output */ \
+ : "m" (*(int *)CKSEG1) \
+ : "memory")
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+# define fast_iob() do { } while (0)
+#else /* ! CONFIG_CPU_CAVIUM_OCTEON */
+# ifdef CONFIG_SGI_IP28
+# define fast_iob() \
+ __asm__ __volatile__( \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ "lw $0,%0\n\t" \
+ "sync\n\t" \
+ "lw $0,%0\n\t" \
+ ".set pop" \
+ : /* no output */ \
+ : "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \
+ : "memory")
+# else
+# define fast_iob() \
+ do { \
+ __sync(); \
+ __fast_iob(); \
+ } while (0)
+# endif
+#endif /* CONFIG_CPU_CAVIUM_OCTEON */
+
+#ifdef CONFIG_CPU_HAS_WB
+
+#include <asm/wbflush.h>
+
+#define mb() wbflush()
+#define iob() wbflush()
+
+#else /* !CONFIG_CPU_HAS_WB */
+
+#define mb() fast_mb()
+#define iob() fast_iob()
+
+#endif /* !CONFIG_CPU_HAS_WB */
+
+#if defined(CONFIG_WEAK_ORDERING)
+# define __smp_mb() __sync()
+# define __smp_rmb() rmb()
+# define __smp_wmb() wmb()
+#else
+# define __smp_mb() barrier()
+# define __smp_rmb() barrier()
+# define __smp_wmb() barrier()
+#endif
+
+/*
+ * When LL/SC does imply order, it must also be a compiler barrier to avoid the
+ * compiler from reordering where the CPU will not. When it does not imply
+ * order, the compiler is also free to reorder across the LL/SC loop and
+ * ordering will be done by smp_llsc_mb() and friends.
+ */
+#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
+# define __WEAK_LLSC_MB sync
+# define smp_llsc_mb() \
+ __asm__ __volatile__(__stringify(__WEAK_LLSC_MB) : : :"memory")
+# define __LLSC_CLOBBER
+#else
+# define __WEAK_LLSC_MB
+# define smp_llsc_mb() do { } while (0)
+# define __LLSC_CLOBBER "memory"
+#endif
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+#define smp_mb__before_llsc() smp_wmb()
+#define __smp_mb__before_llsc() __smp_wmb()
+/* Cause previous writes to become visible on all CPUs as soon as possible */
+#define nudge_writes() __asm__ __volatile__(".set push\n\t" \
+ ".set arch=octeon\n\t" \
+ "syncw\n\t" \
+ ".set pop" : : : "memory")
+#else
+#define smp_mb__before_llsc() smp_llsc_mb()
+#define __smp_mb__before_llsc() smp_llsc_mb()
+#define nudge_writes() mb()
+#endif
+
+/*
+ * In the Loongson3 LL/SC workaround case, all of our LL/SC loops already have
+ * a completion barrier immediately preceding the LL instruction. Therefore we
+ * can skip emitting a barrier from __smp_mb__before_atomic().
+ */
+#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS
+# define __smp_mb__before_atomic()
+#else
+# define __smp_mb__before_atomic() __smp_mb__before_llsc()
+#endif
+
+#define __smp_mb__after_atomic() smp_llsc_mb()
+
+static inline void sync_ginv(void)
+{
+ asm volatile(__SYNC(ginv, always));
+}
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASM_BARRIER_H */
diff --git a/arch/mips/include/asm/bcache.h b/arch/mips/include/asm/bcache.h
new file mode 100644
index 000000000..a00857b13
--- /dev/null
+++ b/arch/mips/include/asm/bcache.h
@@ -0,0 +1,87 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1997, 1999 by Ralf Baechle
+ * Copyright (c) 1999 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_BCACHE_H
+#define _ASM_BCACHE_H
+
+#include <linux/types.h>
+
+/* Some R4000 / R4400 / R4600 / R5000 machines may have a non-dma-coherent,
+ chipset implemented caches. On machines with other CPUs the CPU does the
+ cache thing itself. */
+struct bcache_ops {
+ void (*bc_enable)(void);
+ void (*bc_disable)(void);
+ void (*bc_wback_inv)(unsigned long page, unsigned long size);
+ void (*bc_inv)(unsigned long page, unsigned long size);
+ void (*bc_prefetch_enable)(void);
+ void (*bc_prefetch_disable)(void);
+ bool (*bc_prefetch_is_enabled)(void);
+};
+
+extern void indy_sc_init(void);
+
+#ifdef CONFIG_BOARD_SCACHE
+
+extern struct bcache_ops *bcops;
+
+static inline void bc_enable(void)
+{
+ bcops->bc_enable();
+}
+
+static inline void bc_disable(void)
+{
+ bcops->bc_disable();
+}
+
+static inline void bc_wback_inv(unsigned long page, unsigned long size)
+{
+ bcops->bc_wback_inv(page, size);
+}
+
+static inline void bc_inv(unsigned long page, unsigned long size)
+{
+ bcops->bc_inv(page, size);
+}
+
+static inline void bc_prefetch_enable(void)
+{
+ if (bcops->bc_prefetch_enable)
+ bcops->bc_prefetch_enable();
+}
+
+static inline void bc_prefetch_disable(void)
+{
+ if (bcops->bc_prefetch_disable)
+ bcops->bc_prefetch_disable();
+}
+
+static inline bool bc_prefetch_is_enabled(void)
+{
+ if (bcops->bc_prefetch_is_enabled)
+ return bcops->bc_prefetch_is_enabled();
+
+ return false;
+}
+
+#else /* !defined(CONFIG_BOARD_SCACHE) */
+
+/* Not R4000 / R4400 / R4600 / R5000. */
+
+#define bc_enable() do { } while (0)
+#define bc_disable() do { } while (0)
+#define bc_wback_inv(page, size) do { } while (0)
+#define bc_inv(page, size) do { } while (0)
+#define bc_prefetch_enable() do { } while (0)
+#define bc_prefetch_disable() do { } while (0)
+#define bc_prefetch_is_enabled() 0
+
+#endif /* !defined(CONFIG_BOARD_SCACHE) */
+
+#endif /* _ASM_BCACHE_H */
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
new file mode 100644
index 000000000..a74769940
--- /dev/null
+++ b/arch/mips/include/asm/bitops.h
@@ -0,0 +1,463 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_BITOPS_H
+#define _ASM_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <linux/bits.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/barrier.h>
+#include <asm/byteorder.h> /* sigh ... */
+#include <asm/compiler.h>
+#include <asm/cpu-features.h>
+#include <asm/isa-rev.h>
+#include <asm/llsc.h>
+#include <asm/sgidefs.h>
+#include <asm/war.h>
+
+#define __bit_op(mem, insn, inputs...) do { \
+ unsigned long temp; \
+ \
+ asm volatile( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " __LL "%0, %1 \n" \
+ " " insn " \n" \
+ " " __SC "%0, %1 \n" \
+ " " __SC_BEQZ "%0, 1b \n" \
+ " .set pop \n" \
+ : "=&r"(temp), "+" GCC_OFF_SMALL_ASM()(mem) \
+ : inputs \
+ : __LLSC_CLOBBER); \
+} while (0)
+
+#define __test_bit_op(mem, ll_dst, insn, inputs...) ({ \
+ unsigned long orig, temp; \
+ \
+ asm volatile( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " __LL ll_dst ", %2 \n" \
+ " " insn " \n" \
+ " " __SC "%1, %2 \n" \
+ " " __SC_BEQZ "%1, 1b \n" \
+ " .set pop \n" \
+ : "=&r"(orig), "=&r"(temp), \
+ "+" GCC_OFF_SMALL_ASM()(mem) \
+ : inputs \
+ : __LLSC_CLOBBER); \
+ \
+ orig; \
+})
+
+/*
+ * These are the "slower" versions of the functions and are in bitops.c.
+ * These functions call raw_local_irq_{save,restore}().
+ */
+void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
+void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
+void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
+int __mips_test_and_set_bit_lock(unsigned long nr,
+ volatile unsigned long *addr);
+int __mips_test_and_clear_bit(unsigned long nr,
+ volatile unsigned long *addr);
+int __mips_test_and_change_bit(unsigned long nr,
+ volatile unsigned long *addr);
+
+
+/*
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered. See __set_bit()
+ * if you do not require the atomic guarantees.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
+
+ if (!kernel_uses_llsc) {
+ __mips_set_bit(nr, addr);
+ return;
+ }
+
+ if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit) && (bit >= 16)) {
+ __bit_op(*m, __INS "%0, %3, %2, 1", "i"(bit), "r"(~0));
+ return;
+ }
+
+ __bit_op(*m, "or\t%0, %2", "ir"(BIT(bit)));
+}
+
+/*
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered. However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
+ * in order to ensure changes are visible on other processors.
+ */
+static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
+
+ if (!kernel_uses_llsc) {
+ __mips_clear_bit(nr, addr);
+ return;
+ }
+
+ if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit)) {
+ __bit_op(*m, __INS "%0, $0, %2, 1", "i"(bit));
+ return;
+ }
+
+ __bit_op(*m, "and\t%0, %2", "ir"(~BIT(bit)));
+}
+
+/*
+ * clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and implies release semantics before the memory
+ * operation. It can be used for an unlock.
+ */
+static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
+{
+ smp_mb__before_atomic();
+ clear_bit(nr, addr);
+}
+
+/*
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
+
+ if (!kernel_uses_llsc) {
+ __mips_change_bit(nr, addr);
+ return;
+ }
+
+ __bit_op(*m, "xor\t%0, %2", "ir"(BIT(bit)));
+}
+
+/*
+ * test_and_set_bit_lock - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and implies acquire ordering semantics
+ * after the memory operation.
+ */
+static inline int test_and_set_bit_lock(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
+ unsigned long res, orig;
+
+ if (!kernel_uses_llsc) {
+ res = __mips_test_and_set_bit_lock(nr, addr);
+ } else {
+ orig = __test_bit_op(*m, "%0",
+ "or\t%1, %0, %3",
+ "ir"(BIT(bit)));
+ res = (orig & BIT(bit)) != 0;
+ }
+
+ smp_llsc_mb();
+
+ return res;
+}
+
+/*
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_set_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ smp_mb__before_atomic();
+ return test_and_set_bit_lock(nr, addr);
+}
+
+/*
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_clear_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
+ unsigned long res, orig;
+
+ smp_mb__before_atomic();
+
+ if (!kernel_uses_llsc) {
+ res = __mips_test_and_clear_bit(nr, addr);
+ } else if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(nr)) {
+ res = __test_bit_op(*m, "%1",
+ __EXT "%0, %1, %3, 1;"
+ __INS "%1, $0, %3, 1",
+ "i"(bit));
+ } else {
+ orig = __test_bit_op(*m, "%0",
+ "or\t%1, %0, %3;"
+ "xor\t%1, %1, %3",
+ "ir"(BIT(bit)));
+ res = (orig & BIT(bit)) != 0;
+ }
+
+ smp_llsc_mb();
+
+ return res;
+}
+
+/*
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_change_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
+ unsigned long res, orig;
+
+ smp_mb__before_atomic();
+
+ if (!kernel_uses_llsc) {
+ res = __mips_test_and_change_bit(nr, addr);
+ } else {
+ orig = __test_bit_op(*m, "%0",
+ "xor\t%1, %0, %3",
+ "ir"(BIT(bit)));
+ res = (orig & BIT(bit)) != 0;
+ }
+
+ smp_llsc_mb();
+
+ return res;
+}
+
+#undef __bit_op
+#undef __test_bit_op
+
+#include <asm-generic/bitops/non-atomic.h>
+
+/*
+ * __clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * __clear_bit() is non-atomic and implies release semantics before the memory
+ * operation. It can be used for an unlock if no other CPUs can concurrently
+ * modify other bits in the word.
+ */
+static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
+{
+ smp_mb__before_llsc();
+ __clear_bit(nr, addr);
+ nudge_writes();
+}
+
+/*
+ * Return the bit position (0..63) of the most significant 1 bit in a word
+ * Returns -1 if no 1 bit exists
+ */
+static __always_inline unsigned long __fls(unsigned long word)
+{
+ int num;
+
+ if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
+ __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
+ __asm__(
+ " .set push \n"
+ " .set "MIPS_ISA_LEVEL" \n"
+ " clz %0, %1 \n"
+ " .set pop \n"
+ : "=r" (num)
+ : "r" (word));
+
+ return 31 - num;
+ }
+
+ if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
+ __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
+ __asm__(
+ " .set push \n"
+ " .set "MIPS_ISA_LEVEL" \n"
+ " dclz %0, %1 \n"
+ " .set pop \n"
+ : "=r" (num)
+ : "r" (word));
+
+ return 63 - num;
+ }
+
+ num = BITS_PER_LONG - 1;
+
+#if BITS_PER_LONG == 64
+ if (!(word & (~0ul << 32))) {
+ num -= 32;
+ word <<= 32;
+ }
+#endif
+ if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
+ num -= 16;
+ word <<= 16;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
+ num -= 8;
+ word <<= 8;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
+ num -= 4;
+ word <<= 4;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
+ num -= 2;
+ word <<= 2;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-1))))
+ num -= 1;
+ return num;
+}
+
+/*
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Returns 0..SZLONG-1
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __ffs(unsigned long word)
+{
+ return __fls(word & -word);
+}
+
+/*
+ * fls - find last bit set.
+ * @word: The word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+static inline int fls(unsigned int x)
+{
+ int r;
+
+ if (!__builtin_constant_p(x) &&
+ __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
+ __asm__(
+ " .set push \n"
+ " .set "MIPS_ISA_LEVEL" \n"
+ " clz %0, %1 \n"
+ " .set pop \n"
+ : "=r" (x)
+ : "r" (x));
+
+ return 32 - x;
+ }
+
+ r = 32;
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+
+#include <asm-generic/bitops/fls64.h>
+
+/*
+ * ffs - find first bit set.
+ * @word: The word to search
+ *
+ * This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static inline int ffs(int word)
+{
+ if (!word)
+ return 0;
+
+ return fls(word & -word);
+}
+
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/find.h>
+
+#ifdef __KERNEL__
+
+#include <asm-generic/bitops/sched.h>
+
+#include <asm/arch_hweight.h>
+#include <asm-generic/bitops/const_hweight.h>
+
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_BITOPS_H */
diff --git a/arch/mips/include/asm/bitrev.h b/arch/mips/include/asm/bitrev.h
new file mode 100644
index 000000000..8a2538e08
--- /dev/null
+++ b/arch/mips/include/asm/bitrev.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __MIPS_ASM_BITREV_H__
+#define __MIPS_ASM_BITREV_H__
+
+#include <linux/swab.h>
+
+static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
+{
+ u32 ret;
+
+ asm("bitswap %0, %1" : "=r"(ret) : "r"(__swab32(x)));
+ return ret;
+}
+
+static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x)
+{
+ u16 ret;
+
+ asm("bitswap %0, %1" : "=r"(ret) : "r"(__swab16(x)));
+ return ret;
+}
+
+static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x)
+{
+ u8 ret;
+
+ asm("bitswap %0, %1" : "=r"(ret) : "r"(x));
+ return ret;
+}
+
+#endif /* __MIPS_ASM_BITREV_H__ */
diff --git a/arch/mips/include/asm/bmips-spaces.h b/arch/mips/include/asm/bmips-spaces.h
new file mode 100644
index 000000000..febc4c30a
--- /dev/null
+++ b/arch/mips/include/asm/bmips-spaces.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BMIPS_SPACES_H
+#define __ASM_BMIPS_SPACES_H
+
+/* Avoid collisions with system base register (SBR) region on BMIPS3300 */
+#define FIXADDR_TOP ((unsigned long)(long)(int)0xff000000)
+
+#endif /* __ASM_BMIPS_SPACES_H */
diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h
new file mode 100644
index 000000000..581a6a3c6
--- /dev/null
+++ b/arch/mips/include/asm/bmips.h
@@ -0,0 +1,128 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com)
+ *
+ * Definitions for BMIPS processors
+ */
+#ifndef _ASM_BMIPS_H
+#define _ASM_BMIPS_H
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <asm/addrspace.h>
+#include <asm/mipsregs.h>
+#include <asm/hazards.h>
+
+/* NOTE: the CBR register returns a PA, and it can be above 0xff00_0000 */
+#define BMIPS_GET_CBR() ((void __iomem *)(CKSEG1 | \
+ (unsigned long) \
+ ((read_c0_brcm_cbr() >> 18) << 18)))
+
+#define BMIPS_RAC_CONFIG 0x00000000
+#define BMIPS_RAC_ADDRESS_RANGE 0x00000004
+#define BMIPS_RAC_CONFIG_1 0x00000008
+#define BMIPS_L2_CONFIG 0x0000000c
+#define BMIPS_LMB_CONTROL 0x0000001c
+#define BMIPS_SYSTEM_BASE 0x00000020
+#define BMIPS_PERF_GLOBAL_CONTROL 0x00020000
+#define BMIPS_PERF_CONTROL_0 0x00020004
+#define BMIPS_PERF_CONTROL_1 0x00020008
+#define BMIPS_PERF_COUNTER_0 0x00020010
+#define BMIPS_PERF_COUNTER_1 0x00020014
+#define BMIPS_PERF_COUNTER_2 0x00020018
+#define BMIPS_PERF_COUNTER_3 0x0002001c
+#define BMIPS_RELO_VECTOR_CONTROL_0 0x00030000
+#define BMIPS_RELO_VECTOR_CONTROL_1 0x00038000
+
+#define BMIPS_NMI_RESET_VEC 0x80000000
+#define BMIPS_WARM_RESTART_VEC 0x80000380
+
+#define ZSCM_REG_BASE 0x97000000
+
+#if !defined(__ASSEMBLY__)
+
+#include <linux/cpumask.h>
+#include <asm/r4kcache.h>
+#include <asm/smp-ops.h>
+
+extern const struct plat_smp_ops bmips43xx_smp_ops;
+extern const struct plat_smp_ops bmips5000_smp_ops;
+
+static inline int register_bmips_smp_ops(void)
+{
+#if IS_ENABLED(CONFIG_CPU_BMIPS) && IS_ENABLED(CONFIG_SMP)
+ switch (current_cpu_type()) {
+ case CPU_BMIPS32:
+ case CPU_BMIPS3300:
+ return register_up_smp_ops();
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ register_smp_ops(&bmips43xx_smp_ops);
+ break;
+ case CPU_BMIPS5000:
+ register_smp_ops(&bmips5000_smp_ops);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return 0;
+#else
+ return -ENODEV;
+#endif
+}
+
+extern char bmips_reset_nmi_vec[];
+extern char bmips_reset_nmi_vec_end[];
+extern char bmips_smp_movevec[];
+extern char bmips_smp_int_vec[];
+extern char bmips_smp_int_vec_end[];
+
+extern int bmips_smp_enabled;
+extern int bmips_cpu_offset;
+extern cpumask_t bmips_booted_mask;
+extern unsigned long bmips_tp1_irqs;
+
+extern void bmips_ebase_setup(void);
+extern asmlinkage void plat_wired_tlb_setup(void);
+extern void bmips_cpu_setup(void);
+
+static inline unsigned long bmips_read_zscm_reg(unsigned int offset)
+{
+ unsigned long ret;
+
+ barrier();
+ cache_op(Index_Load_Tag_S, ZSCM_REG_BASE + offset);
+ __sync();
+ _ssnop();
+ _ssnop();
+ _ssnop();
+ _ssnop();
+ _ssnop();
+ _ssnop();
+ _ssnop();
+ ret = read_c0_ddatalo();
+ _ssnop();
+
+ return ret;
+}
+
+static inline void bmips_write_zscm_reg(unsigned int offset, unsigned long data)
+{
+ write_c0_ddatalo(data);
+ _ssnop();
+ _ssnop();
+ _ssnop();
+ cache_op(Index_Store_Tag_S, ZSCM_REG_BASE + offset);
+ _ssnop();
+ _ssnop();
+ _ssnop();
+ barrier();
+}
+
+#endif /* !defined(__ASSEMBLY__) */
+
+#endif /* _ASM_BMIPS_H */
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
new file mode 100644
index 000000000..aa03b1237
--- /dev/null
+++ b/arch/mips/include/asm/bootinfo.h
@@ -0,0 +1,166 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1996, 2003 by Ralf Baechle
+ * Copyright (C) 1995, 1996 Andreas Busse
+ * Copyright (C) 1995, 1996 Stoned Elipot
+ * Copyright (C) 1995, 1996 Paul M. Antoine.
+ * Copyright (C) 2009 Zhang Le
+ */
+#ifndef _ASM_BOOTINFO_H
+#define _ASM_BOOTINFO_H
+
+#include <linux/types.h>
+#include <asm/setup.h>
+
+/*
+ * The MACH_ IDs are sort of equivalent to PCI product IDs. As such the
+ * numbers do not necessarily reflect technical relations or similarities
+ * between systems.
+ */
+
+/*
+ * Valid machtype values for group unknown
+ */
+#define MACH_UNKNOWN 0 /* whatever... */
+
+/*
+ * Valid machtype for group DEC
+ */
+#define MACH_DSUNKNOWN 0
+#define MACH_DS23100 1 /* DECstation 2100 or 3100 */
+#define MACH_DS5100 2 /* DECsystem 5100 */
+#define MACH_DS5000_200 3 /* DECstation 5000/200 */
+#define MACH_DS5000_1XX 4 /* DECstation 5000/120, 125, 133, 150 */
+#define MACH_DS5000_XX 5 /* DECstation 5000/20, 25, 33, 50 */
+#define MACH_DS5000_2X0 6 /* DECstation 5000/240, 260 */
+#define MACH_DS5400 7 /* DECsystem 5400 */
+#define MACH_DS5500 8 /* DECsystem 5500 */
+#define MACH_DS5800 9 /* DECsystem 5800 */
+#define MACH_DS5900 10 /* DECsystem 5900 */
+
+/*
+ * Valid machtype for group Mikrotik
+ */
+#define MACH_MIKROTIK_RB532 0 /* Mikrotik RouterBoard 532 */
+#define MACH_MIKROTIK_RB532A 1 /* Mikrotik RouterBoard 532A */
+
+/*
+ * Valid machtype for Loongson family
+ */
+enum loongson2ef_machine_type {
+ MACH_LOONGSON_UNKNOWN,
+ MACH_LEMOTE_FL2E,
+ MACH_LEMOTE_FL2F,
+ MACH_LEMOTE_ML2F7,
+ MACH_LEMOTE_YL2F89,
+ MACH_DEXXON_GDIUM2F10,
+ MACH_LEMOTE_NAS,
+ MACH_LEMOTE_LL2F,
+ MACH_LOONGSON_END
+};
+
+/*
+ * Valid machtype for group INGENIC
+ */
+enum ingenic_machine_type {
+ MACH_INGENIC_UNKNOWN,
+ MACH_INGENIC_JZ4720,
+ MACH_INGENIC_JZ4725,
+ MACH_INGENIC_JZ4725B,
+ MACH_INGENIC_JZ4730,
+ MACH_INGENIC_JZ4740,
+ MACH_INGENIC_JZ4750,
+ MACH_INGENIC_JZ4755,
+ MACH_INGENIC_JZ4760,
+ MACH_INGENIC_JZ4770,
+ MACH_INGENIC_JZ4775,
+ MACH_INGENIC_JZ4780,
+ MACH_INGENIC_X1000,
+ MACH_INGENIC_X1000E,
+ MACH_INGENIC_X1830,
+ MACH_INGENIC_X2000,
+ MACH_INGENIC_X2000E,
+};
+
+extern char *system_type;
+const char *get_system_type(void);
+
+extern unsigned long mips_machtype;
+
+extern void detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max);
+
+extern void prom_init(void);
+extern void prom_free_prom_memory(void);
+extern void prom_cleanup(void);
+
+extern void free_init_pages(const char *what,
+ unsigned long begin, unsigned long end);
+
+extern void (*free_init_pages_eva)(void *begin, void *end);
+
+/*
+ * Initial kernel command line, usually setup by prom_init()
+ */
+extern char arcs_cmdline[COMMAND_LINE_SIZE];
+
+/*
+ * Registers a0, a1, a3 and a4 as passed to the kernel entry by firmware
+ */
+extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
+
+#ifdef CONFIG_USE_OF
+extern unsigned long fw_passed_dtb;
+#endif
+
+/*
+ * Platform memory detection hook called by arch_mem_init()
+ */
+extern void plat_mem_setup(void);
+
+#ifdef CONFIG_SWIOTLB
+/*
+ * Optional platform hook to call swiotlb_setup().
+ */
+extern void plat_swiotlb_setup(void);
+
+#else
+
+static inline void plat_swiotlb_setup(void) {}
+
+#endif /* CONFIG_SWIOTLB */
+
+#ifdef CONFIG_USE_OF
+/**
+ * plat_get_fdt() - Return a pointer to the platform's device tree blob
+ *
+ * This function provides a platform independent API to get a pointer to the
+ * flattened device tree blob. The interface between bootloader and kernel
+ * is not consistent across platforms so it is necessary to provide this
+ * API such that common startup code can locate the FDT.
+ *
+ * This is used by the KASLR code to get command line arguments and random
+ * seed from the device tree. Any platform wishing to use KASLR should
+ * provide this API and select SYS_SUPPORTS_RELOCATABLE.
+ *
+ * Return: Pointer to the flattened device tree blob.
+ */
+extern void *plat_get_fdt(void);
+
+#ifdef CONFIG_RELOCATABLE
+
+/**
+ * plat_fdt_relocated() - Update platform's information about relocated dtb
+ *
+ * This function provides a platform-independent API to set platform's
+ * information about relocated DTB if it needs to be moved due to kernel
+ * relocation occurring at boot.
+ */
+void plat_fdt_relocated(void *new_location);
+
+#endif /* CONFIG_RELOCATABLE */
+#endif /* CONFIG_USE_OF */
+
+#endif /* _ASM_BOOTINFO_H */
diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h
new file mode 100644
index 000000000..fa3dcbf56
--- /dev/null
+++ b/arch/mips/include/asm/branch.h
@@ -0,0 +1,103 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1997, 1998, 2001 by Ralf Baechle
+ */
+#ifndef _ASM_BRANCH_H
+#define _ASM_BRANCH_H
+
+#include <asm/cpu-features.h>
+#include <asm/mipsregs.h>
+#include <asm/ptrace.h>
+#include <asm/inst.h>
+
+extern int __isa_exception_epc(struct pt_regs *regs);
+extern int __compute_return_epc(struct pt_regs *regs);
+extern int __compute_return_epc_for_insn(struct pt_regs *regs,
+ union mips_instruction insn);
+extern int __microMIPS_compute_return_epc(struct pt_regs *regs);
+extern int __MIPS16e_compute_return_epc(struct pt_regs *regs);
+
+/*
+ * microMIPS bitfields
+ */
+#define MM_POOL32A_MINOR_MASK 0x3f
+#define MM_POOL32A_MINOR_SHIFT 0x6
+#define MM_MIPS32_COND_FC 0x30
+
+int isBranchInstr(struct pt_regs *regs,
+ struct mm_decoded_insn dec_insn, unsigned long *contpc);
+
+extern int __mm_isBranchInstr(struct pt_regs *regs,
+ struct mm_decoded_insn dec_insn, unsigned long *contpc);
+
+static inline int mm_isBranchInstr(struct pt_regs *regs,
+ struct mm_decoded_insn dec_insn, unsigned long *contpc)
+{
+ if (!cpu_has_mmips)
+ return 0;
+
+ return __mm_isBranchInstr(regs, dec_insn, contpc);
+}
+
+static inline int delay_slot(struct pt_regs *regs)
+{
+ return regs->cp0_cause & CAUSEF_BD;
+}
+
+static inline void clear_delay_slot(struct pt_regs *regs)
+{
+ regs->cp0_cause &= ~CAUSEF_BD;
+}
+
+static inline void set_delay_slot(struct pt_regs *regs)
+{
+ regs->cp0_cause |= CAUSEF_BD;
+}
+
+static inline unsigned long exception_epc(struct pt_regs *regs)
+{
+ if (likely(!delay_slot(regs)))
+ return regs->cp0_epc;
+
+ if (get_isa16_mode(regs->cp0_epc))
+ return __isa_exception_epc(regs);
+
+ return regs->cp0_epc + 4;
+}
+
+#define BRANCH_LIKELY_TAKEN 0x0001
+
+static inline int compute_return_epc(struct pt_regs *regs)
+{
+ if (get_isa16_mode(regs->cp0_epc)) {
+ if (cpu_has_mmips)
+ return __microMIPS_compute_return_epc(regs);
+ if (cpu_has_mips16)
+ return __MIPS16e_compute_return_epc(regs);
+ } else if (!delay_slot(regs)) {
+ regs->cp0_epc += 4;
+ return 0;
+ }
+
+ return __compute_return_epc(regs);
+}
+
+static inline int MIPS16e_compute_return_epc(struct pt_regs *regs,
+ union mips16e_instruction *inst)
+{
+ if (likely(!delay_slot(regs))) {
+ if (inst->ri.opcode == MIPS16e_extend_op) {
+ regs->cp0_epc += 4;
+ return 0;
+ }
+ regs->cp0_epc += 2;
+ return 0;
+ }
+
+ return __MIPS16e_compute_return_epc(regs);
+}
+
+#endif /* _ASM_BRANCH_H */
diff --git a/arch/mips/include/asm/break.h b/arch/mips/include/asm/break.h
new file mode 100644
index 000000000..0ef11429a
--- /dev/null
+++ b/arch/mips/include/asm/break.h
@@ -0,0 +1,26 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 2003 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#ifndef __ASM_BREAK_H
+#define __ASM_BREAK_H
+
+#ifdef __UAPI_ASM_BREAK_H
+#error "Error: Do not directly include <uapi/asm/break.h>"
+#endif
+#include <uapi/asm/break.h>
+
+/*
+ * Break codes used internally to the kernel.
+ */
+#define BRK_KDB 513 /* Used in KDB_ENTER() */
+#define BRK_MEMU 514 /* Used by FPU emulator */
+#define BRK_KPROBE_BP 515 /* Kprobe break */
+#define BRK_KPROBE_SSTEPBP 516 /* Kprobe single step software implementation */
+#define BRK_MULOVF 1023 /* Multiply overflow */
+
+#endif /* __ASM_BREAK_H */
diff --git a/arch/mips/include/asm/bug.h b/arch/mips/include/asm/bug.h
new file mode 100644
index 000000000..745dc160a
--- /dev/null
+++ b/arch/mips/include/asm/bug.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BUG_H
+#define __ASM_BUG_H
+
+#include <linux/compiler.h>
+#include <asm/sgidefs.h>
+
+#ifdef CONFIG_BUG
+
+#include <asm/break.h>
+
+static inline void __noreturn BUG(void)
+{
+ __asm__ __volatile__("break %0" : : "i" (BRK_BUG));
+ unreachable();
+}
+
+#define HAVE_ARCH_BUG
+
+#if (_MIPS_ISA > _MIPS_ISA_MIPS1)
+
+static inline void __BUG_ON(unsigned long condition)
+{
+ if (__builtin_constant_p(condition)) {
+ if (condition)
+ BUG();
+ else
+ return;
+ }
+ __asm__ __volatile__("tne $0, %0, %1"
+ : : "r" (condition), "i" (BRK_BUG));
+}
+
+#define BUG_ON(C) __BUG_ON((unsigned long)(C))
+
+#define HAVE_ARCH_BUG_ON
+
+#endif /* _MIPS_ISA > _MIPS_ISA_MIPS1 */
+
+#endif
+
+#include <asm-generic/bug.h>
+
+#endif /* __ASM_BUG_H */
diff --git a/arch/mips/include/asm/bugs.h b/arch/mips/include/asm/bugs.h
new file mode 100644
index 000000000..8d4cf2986
--- /dev/null
+++ b/arch/mips/include/asm/bugs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2007 Maciej W. Rozycki
+ */
+#ifndef _ASM_BUGS_H
+#define _ASM_BUGS_H
+
+#include <linux/bug.h>
+#include <linux/smp.h>
+
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+
+extern int daddiu_bug;
+
+extern void check_bugs64_early(void);
+
+extern void check_bugs32(void);
+extern void check_bugs64(void);
+
+static inline void check_bugs_early(void)
+{
+ if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
+ check_bugs64_early();
+}
+
+static inline int r4k_daddiu_bug(void)
+{
+ if (!IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
+ return 0;
+
+ WARN_ON(daddiu_bug < 0);
+ return daddiu_bug != 0;
+}
+
+#endif /* _ASM_BUGS_H */
diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
new file mode 100644
index 000000000..29187e12b
--- /dev/null
+++ b/arch/mips/include/asm/cache.h
@@ -0,0 +1,19 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1997, 98, 99, 2000, 2003 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_CACHE_H
+#define _ASM_CACHE_H
+
+#include <kmalloc.h>
+
+#define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define __read_mostly __section(".data..read_mostly")
+
+#endif /* _ASM_CACHE_H */
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
new file mode 100644
index 000000000..d687b40b9
--- /dev/null
+++ b/arch/mips/include/asm/cacheflush.h
@@ -0,0 +1,153 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
+ * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_CACHEFLUSH_H
+#define _ASM_CACHEFLUSH_H
+
+/* Keep includes the same across arches. */
+#include <linux/mm.h>
+#include <asm/cpu-features.h>
+
+/* Cache flushing:
+ *
+ * - flush_cache_all() flushes entire cache
+ * - flush_cache_mm(mm) flushes the specified mm context's cache lines
+ * - flush_cache_dup mm(mm) handles cache flushing when forking
+ * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
+ * - flush_cache_range(vma, start, end) flushes a range of pages
+ * - flush_icache_range(start, end) flush a range of instructions
+ * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
+ *
+ * MIPS specific flush operations:
+ *
+ * - flush_icache_all() flush the entire instruction cache
+ * - flush_data_cache_page() flushes a page from the data cache
+ * - __flush_icache_user_range(start, end) flushes range of user instructions
+ */
+
+ /*
+ * This flag is used to indicate that the page pointed to by a pte
+ * is dirty and requires cleaning before returning it to the user.
+ */
+#define PG_dcache_dirty PG_arch_1
+
+#define Page_dcache_dirty(page) \
+ test_bit(PG_dcache_dirty, &(page)->flags)
+#define SetPageDcacheDirty(page) \
+ set_bit(PG_dcache_dirty, &(page)->flags)
+#define ClearPageDcacheDirty(page) \
+ clear_bit(PG_dcache_dirty, &(page)->flags)
+
+extern void (*flush_cache_all)(void);
+extern void (*__flush_cache_all)(void);
+extern void (*flush_cache_mm)(struct mm_struct *mm);
+#define flush_cache_dup_mm(mm) do { (void) (mm); } while (0)
+extern void (*flush_cache_range)(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
+extern void __flush_dcache_page(struct page *page);
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+static inline void flush_dcache_page(struct page *page)
+{
+ if (cpu_has_dc_aliases)
+ __flush_dcache_page(page);
+ else if (!cpu_has_ic_fills_f_dc)
+ SetPageDcacheDirty(page);
+}
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+#define ARCH_HAS_FLUSH_ANON_PAGE
+extern void __flush_anon_page(struct page *, unsigned long);
+static inline void flush_anon_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vmaddr)
+{
+ if (cpu_has_dc_aliases && PageAnon(page))
+ __flush_anon_page(page, vmaddr);
+}
+
+static inline void flush_icache_page(struct vm_area_struct *vma,
+ struct page *page)
+{
+}
+
+extern void (*flush_icache_range)(unsigned long start, unsigned long end);
+extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
+extern void (*__flush_icache_user_range)(unsigned long start,
+ unsigned long end);
+extern void (*__local_flush_icache_user_range)(unsigned long start,
+ unsigned long end);
+
+extern void (*__flush_cache_vmap)(void);
+
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ if (cpu_has_dc_aliases)
+ __flush_cache_vmap();
+}
+
+extern void (*__flush_cache_vunmap)(void);
+
+static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+ if (cpu_has_dc_aliases)
+ __flush_cache_vunmap();
+}
+
+extern void copy_to_user_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr, void *dst, const void *src,
+ unsigned long len);
+
+extern void copy_from_user_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr, void *dst, const void *src,
+ unsigned long len);
+
+extern void (*flush_icache_all)(void);
+extern void (*local_flush_data_cache_page)(void * addr);
+extern void (*flush_data_cache_page)(unsigned long addr);
+
+/* Run kernel code uncached, useful for cache probing functions. */
+unsigned long run_uncached(void *func);
+
+extern void *kmap_coherent(struct page *page, unsigned long addr);
+extern void kunmap_coherent(void);
+extern void *kmap_noncoherent(struct page *page, unsigned long addr);
+
+static inline void kunmap_noncoherent(void)
+{
+ kunmap_coherent();
+}
+
+#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+static inline void flush_kernel_dcache_page(struct page *page)
+{
+ BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
+ flush_dcache_page(page);
+}
+
+/*
+ * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
+ * cache writeback and invalidate operation.
+ */
+extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
+
+static inline void flush_kernel_vmap_range(void *vaddr, int size)
+{
+ if (cpu_has_dc_aliases)
+ __flush_kernel_vmap_range((unsigned long) vaddr, size);
+}
+
+static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+ if (cpu_has_dc_aliases)
+ __flush_kernel_vmap_range((unsigned long) vaddr, size);
+}
+
+#endif /* _ASM_CACHEFLUSH_H */
diff --git a/arch/mips/include/asm/cacheops.h b/arch/mips/include/asm/cacheops.h
new file mode 100644
index 000000000..50253efec
--- /dev/null
+++ b/arch/mips/include/asm/cacheops.h
@@ -0,0 +1,116 @@
+/*
+ * Cache operations for the cache instruction.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * (C) Copyright 1996, 97, 99, 2002, 03 Ralf Baechle
+ * (C) Copyright 1999 Silicon Graphics, Inc.
+ */
+#ifndef __ASM_CACHEOPS_H
+#define __ASM_CACHEOPS_H
+
+/*
+ * Most cache ops are split into a 2 bit field identifying the cache, and a 3
+ * bit field identifying the cache operation.
+ */
+#define CacheOp_Cache 0x03
+#define CacheOp_Op 0x1c
+
+#define Cache_I 0x00
+#define Cache_D 0x01
+#define Cache_T 0x02
+#define Cache_V 0x02 /* Loongson-3 */
+#define Cache_S 0x03
+
+#define Index_Writeback_Inv 0x00
+#define Index_Load_Tag 0x04
+#define Index_Store_Tag 0x08
+#define Hit_Invalidate 0x10
+#define Hit_Writeback_Inv 0x14 /* not with Cache_I though */
+#define Hit_Writeback 0x18
+
+/*
+ * Cache Operations available on all MIPS processors with R4000-style caches
+ */
+#define Index_Invalidate_I (Cache_I | Index_Writeback_Inv)
+#define Index_Writeback_Inv_D (Cache_D | Index_Writeback_Inv)
+#define Index_Load_Tag_I (Cache_I | Index_Load_Tag)
+#define Index_Load_Tag_D (Cache_D | Index_Load_Tag)
+#define Index_Store_Tag_I (Cache_I | Index_Store_Tag)
+#define Index_Store_Tag_D (Cache_D | Index_Store_Tag)
+#define Hit_Invalidate_I (Cache_I | Hit_Invalidate)
+#define Hit_Invalidate_D (Cache_D | Hit_Invalidate)
+#define Hit_Writeback_Inv_D (Cache_D | Hit_Writeback_Inv)
+
+/*
+ * R4000-specific cacheops
+ */
+#define Create_Dirty_Excl_D (Cache_D | 0x0c)
+#define Fill_I (Cache_I | 0x14)
+#define Hit_Writeback_I (Cache_I | Hit_Writeback)
+#define Hit_Writeback_D (Cache_D | Hit_Writeback)
+
+/*
+ * R4000SC and R4400SC-specific cacheops
+ */
+#define Cache_SI 0x02
+#define Cache_SD 0x03
+
+#define Index_Invalidate_SI (Cache_SI | Index_Writeback_Inv)
+#define Index_Writeback_Inv_SD (Cache_SD | Index_Writeback_Inv)
+#define Index_Load_Tag_SI (Cache_SI | Index_Load_Tag)
+#define Index_Load_Tag_SD (Cache_SD | Index_Load_Tag)
+#define Index_Store_Tag_SI (Cache_SI | Index_Store_Tag)
+#define Index_Store_Tag_SD (Cache_SD | Index_Store_Tag)
+#define Create_Dirty_Excl_SD (Cache_SD | 0x0c)
+#define Hit_Invalidate_SI (Cache_SI | Hit_Invalidate)
+#define Hit_Invalidate_SD (Cache_SD | Hit_Invalidate)
+#define Hit_Writeback_Inv_SD (Cache_SD | Hit_Writeback_Inv)
+#define Hit_Writeback_SD (Cache_SD | Hit_Writeback)
+#define Hit_Set_Virtual_SI (Cache_SI | 0x1c)
+#define Hit_Set_Virtual_SD (Cache_SD | 0x1c)
+
+/*
+ * R5000-specific cacheops
+ */
+#define R5K_Page_Invalidate_S (Cache_S | 0x14)
+
+/*
+ * RM7000-specific cacheops
+ */
+#define Page_Invalidate_T (Cache_T | 0x14)
+#define Index_Store_Tag_T (Cache_T | Index_Store_Tag)
+#define Index_Load_Tag_T (Cache_T | Index_Load_Tag)
+
+/*
+ * R10000-specific cacheops
+ *
+ * Cacheops 0x02, 0x06, 0x0a, 0x0c-0x0e, 0x16, 0x1a and 0x1e are unused.
+ * Most of the _S cacheops are identical to the R4000SC _SD cacheops.
+ */
+#define Index_Writeback_Inv_S (Cache_S | Index_Writeback_Inv)
+#define Index_Load_Tag_S (Cache_S | Index_Load_Tag)
+#define Index_Store_Tag_S (Cache_S | Index_Store_Tag)
+#define Hit_Invalidate_S (Cache_S | Hit_Invalidate)
+#define Cache_Barrier 0x14
+#define Hit_Writeback_Inv_S (Cache_S | Hit_Writeback_Inv)
+#define Index_Load_Data_I (Cache_I | 0x18)
+#define Index_Load_Data_D (Cache_D | 0x18)
+#define Index_Load_Data_S (Cache_S | 0x18)
+#define Index_Store_Data_I (Cache_I | 0x1c)
+#define Index_Store_Data_D (Cache_D | 0x1c)
+#define Index_Store_Data_S (Cache_S | 0x1c)
+
+/*
+ * Loongson2-specific cacheops
+ */
+#define Hit_Invalidate_I_Loongson2 (Cache_I | 0x00)
+
+/*
+ * Loongson3-specific cacheops
+ */
+#define Index_Writeback_Inv_V (Cache_V | Index_Writeback_Inv)
+
+#endif /* __ASM_CACHEOPS_H */
diff --git a/arch/mips/include/asm/cdmm.h b/arch/mips/include/asm/cdmm.h
new file mode 100644
index 000000000..c06dbf8ba
--- /dev/null
+++ b/arch/mips/include/asm/cdmm.h
@@ -0,0 +1,109 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ */
+#ifndef __ASM_CDMM_H
+#define __ASM_CDMM_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+/**
+ * struct mips_cdmm_device - Represents a single device on a CDMM bus.
+ * @dev: Driver model device object.
+ * @cpu: CPU which can access this device.
+ * @res: MMIO resource.
+ * @type: Device type identifier.
+ * @rev: Device revision number.
+ */
+struct mips_cdmm_device {
+ struct device dev;
+ unsigned int cpu;
+ struct resource res;
+ unsigned int type;
+ unsigned int rev;
+};
+
+/**
+ * struct mips_cdmm_driver - Represents a driver for a CDMM device.
+ * @drv: Driver model driver object.
+ * @probe Callback for probing newly discovered devices.
+ * @remove: Callback to remove the device.
+ * @shutdown: Callback on system shutdown.
+ * @cpu_down: Callback when the parent CPU is going down.
+ * Any CPU pinned threads/timers should be disabled.
+ * @cpu_up: Callback when the parent CPU is coming back up again.
+ * CPU pinned threads/timers can be restarted.
+ * @id_table: Table for CDMM IDs to match against.
+ */
+struct mips_cdmm_driver {
+ struct device_driver drv;
+ int (*probe)(struct mips_cdmm_device *);
+ int (*remove)(struct mips_cdmm_device *);
+ void (*shutdown)(struct mips_cdmm_device *);
+ int (*cpu_down)(struct mips_cdmm_device *);
+ int (*cpu_up)(struct mips_cdmm_device *);
+ const struct mips_cdmm_device_id *id_table;
+};
+
+/**
+ * mips_cdmm_phys_base() - Choose a physical base address for CDMM region.
+ *
+ * Picking a suitable physical address at which to map the CDMM region is
+ * platform specific, so this function can be defined by platform code to
+ * pick a suitable value if none is configured by the bootloader.
+ *
+ * This address must be 32kB aligned, and the region occupies a maximum of 32kB
+ * of physical address space which must not be used for anything else.
+ *
+ * Returns: Physical base address for CDMM region, or 0 on failure.
+ */
+phys_addr_t mips_cdmm_phys_base(void);
+
+extern struct bus_type mips_cdmm_bustype;
+void __iomem *mips_cdmm_early_probe(unsigned int dev_type);
+
+#define to_mips_cdmm_device(d) container_of(d, struct mips_cdmm_device, dev)
+
+#define mips_cdmm_get_drvdata(d) dev_get_drvdata(&d->dev)
+#define mips_cdmm_set_drvdata(d, p) dev_set_drvdata(&d->dev, p)
+
+int mips_cdmm_driver_register(struct mips_cdmm_driver *);
+void mips_cdmm_driver_unregister(struct mips_cdmm_driver *);
+
+/*
+ * module_mips_cdmm_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_mips_cdmm_driver(__mips_cdmm_driver) \
+ module_driver(__mips_cdmm_driver, mips_cdmm_driver_register, \
+ mips_cdmm_driver_unregister)
+
+/*
+ * builtin_mips_cdmm_driver() - Helper macro for drivers that don't do anything
+ * special in init and have no exit. This eliminates some boilerplate. Each
+ * driver may only use this macro once, and calling it replaces device_initcall
+ * (or in some cases, the legacy __initcall). This is meant to be a direct
+ * parallel of module_mips_cdmm_driver() above but without the __exit stuff that
+ * is not used for builtin cases.
+ */
+#define builtin_mips_cdmm_driver(__mips_cdmm_driver) \
+ builtin_driver(__mips_cdmm_driver, mips_cdmm_driver_register)
+
+/* drivers/tty/mips_ejtag_fdc.c */
+
+#ifdef CONFIG_MIPS_EJTAG_FDC_EARLYCON
+int setup_early_fdc_console(void);
+#else
+static inline int setup_early_fdc_console(void)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __ASM_CDMM_H */
diff --git a/arch/mips/include/asm/cevt-r4k.h b/arch/mips/include/asm/cevt-r4k.h
new file mode 100644
index 000000000..2e13a038d
--- /dev/null
+++ b/arch/mips/include/asm/cevt-r4k.h
@@ -0,0 +1,29 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Kevin D. Kissell
+ */
+
+/*
+ * Definitions used for common event timer implementation
+ * for MIPS 4K-type processors and their MIPS MT variants.
+ * Avoids unsightly extern declarations in C files.
+ */
+#ifndef __ASM_CEVT_R4K_H
+#define __ASM_CEVT_R4K_H
+
+#include <linux/clockchips.h>
+#include <asm/time.h>
+
+DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
+
+void mips_event_handler(struct clock_event_device *dev);
+int c0_compare_int_usable(void);
+irqreturn_t c0_compare_interrupt(int, void *);
+
+extern struct irqaction c0_compare_irqaction;
+extern int cp0_timer_irq_installed;
+
+#endif /* __ASM_CEVT_R4K_H */
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
new file mode 100644
index 000000000..5f80c28f5
--- /dev/null
+++ b/arch/mips/include/asm/checksum.h
@@ -0,0 +1,253 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 96, 97, 98, 99, 2001 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2001 Thiemo Seufer.
+ * Copyright (C) 2002 Maciej W. Rozycki
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ */
+#ifndef _ASM_CHECKSUM_H
+#define _ASM_CHECKSUM_H
+
+#ifdef CONFIG_GENERIC_CSUM
+#include <asm-generic/checksum.h>
+#else
+
+#include <linux/in6.h>
+
+#include <linux/uaccess.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+__wsum csum_partial(const void *buff, int len, __wsum sum);
+
+__wsum __csum_partial_copy_from_user(const void __user *src, void *dst, int len);
+__wsum __csum_partial_copy_to_user(const void *src, void __user *dst, int len);
+
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+static inline
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
+{
+ might_fault();
+ if (!access_ok(src, len))
+ return 0;
+ return __csum_partial_copy_from_user(src, dst, len);
+}
+
+/*
+ * Copy and checksum to user
+ */
+#define HAVE_CSUM_COPY_USER
+static inline
+__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
+{
+ might_fault();
+ if (!access_ok(dst, len))
+ return 0;
+ return __csum_partial_copy_to_user(src, dst, len);
+}
+
+/*
+ * the same as csum_partial, but copies from user space (but on MIPS
+ * we have just one address space, so this is identical to the above)
+ */
+#define _HAVE_ARCH_CSUM_AND_COPY
+__wsum __csum_partial_copy_nocheck(const void *src, void *dst, int len);
+static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
+{
+ return __csum_partial_copy_nocheck(src, dst, len);
+}
+
+/*
+ * Fold a partial checksum without adding pseudo headers
+ */
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 sum = (__force u32)csum;
+
+ sum += (sum << 16);
+ csum = (__force __wsum)(sum < (__force u32)csum);
+ sum >>= 16;
+ sum += (__force u32)csum;
+
+ return (__force __sum16)~sum;
+}
+#define csum_fold csum_fold
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ *
+ * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
+ * Arnt Gulbrandsen.
+ */
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ const unsigned int *word = iph;
+ const unsigned int *stop = word + ihl;
+ unsigned int csum;
+ int carry;
+
+ csum = word[0];
+ csum += word[1];
+ carry = (csum < word[1]);
+ csum += carry;
+
+ csum += word[2];
+ carry = (csum < word[2]);
+ csum += carry;
+
+ csum += word[3];
+ carry = (csum < word[3]);
+ csum += carry;
+
+ word += 4;
+ do {
+ csum += *word;
+ carry = (csum < *word);
+ csum += carry;
+ word++;
+ } while (word != stop);
+
+ return csum_fold(csum);
+}
+#define ip_fast_csum ip_fast_csum
+
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto,
+ __wsum sum)
+{
+ __asm__(
+ " .set push # csum_tcpudp_nofold\n"
+ " .set noat \n"
+#ifdef CONFIG_32BIT
+ " addu %0, %2 \n"
+ " sltu $1, %0, %2 \n"
+ " addu %0, $1 \n"
+
+ " addu %0, %3 \n"
+ " sltu $1, %0, %3 \n"
+ " addu %0, $1 \n"
+
+ " addu %0, %4 \n"
+ " sltu $1, %0, %4 \n"
+ " addu %0, $1 \n"
+#endif
+#ifdef CONFIG_64BIT
+ " daddu %0, %2 \n"
+ " daddu %0, %3 \n"
+ " daddu %0, %4 \n"
+ " dsll32 $1, %0, 0 \n"
+ " daddu %0, $1 \n"
+ " sltu $1, %0, $1 \n"
+ " dsra32 %0, %0, 0 \n"
+ " addu %0, $1 \n"
+#endif
+ " .set pop"
+ : "=r" (sum)
+ : "0" ((__force unsigned long)daddr),
+ "r" ((__force unsigned long)saddr),
+#ifdef __MIPSEL__
+ "r" ((proto + len) << 8),
+#else
+ "r" (proto + len),
+#endif
+ "r" ((__force unsigned long)sum));
+
+ return sum;
+}
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+static inline __sum16 ip_compute_csum(const void *buff, int len)
+{
+ return csum_fold(csum_partial(buff, len, 0));
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto,
+ __wsum sum)
+{
+ __wsum tmp;
+
+ __asm__(
+ " .set push # csum_ipv6_magic\n"
+ " .set noreorder \n"
+ " .set noat \n"
+ " addu %0, %5 # proto (long in network byte order)\n"
+ " sltu $1, %0, %5 \n"
+ " addu %0, $1 \n"
+
+ " addu %0, %6 # csum\n"
+ " sltu $1, %0, %6 \n"
+ " lw %1, 0(%2) # four words source address\n"
+ " addu %0, $1 \n"
+ " addu %0, %1 \n"
+ " sltu $1, %0, %1 \n"
+
+ " lw %1, 4(%2) \n"
+ " addu %0, $1 \n"
+ " addu %0, %1 \n"
+ " sltu $1, %0, %1 \n"
+
+ " lw %1, 8(%2) \n"
+ " addu %0, $1 \n"
+ " addu %0, %1 \n"
+ " sltu $1, %0, %1 \n"
+
+ " lw %1, 12(%2) \n"
+ " addu %0, $1 \n"
+ " addu %0, %1 \n"
+ " sltu $1, %0, %1 \n"
+
+ " lw %1, 0(%3) \n"
+ " addu %0, $1 \n"
+ " addu %0, %1 \n"
+ " sltu $1, %0, %1 \n"
+
+ " lw %1, 4(%3) \n"
+ " addu %0, $1 \n"
+ " addu %0, %1 \n"
+ " sltu $1, %0, %1 \n"
+
+ " lw %1, 8(%3) \n"
+ " addu %0, $1 \n"
+ " addu %0, %1 \n"
+ " sltu $1, %0, %1 \n"
+
+ " lw %1, 12(%3) \n"
+ " addu %0, $1 \n"
+ " addu %0, %1 \n"
+ " sltu $1, %0, %1 \n"
+
+ " addu %0, $1 # Add final carry\n"
+ " .set pop"
+ : "=&r" (sum), "=&r" (tmp)
+ : "r" (saddr), "r" (daddr),
+ "0" (htonl(len)), "r" (htonl(proto)), "r" (sum));
+
+ return csum_fold(sum);
+}
+
+#include <asm-generic/checksum.h>
+#endif /* CONFIG_GENERIC_CSUM */
+
+#endif /* _ASM_CHECKSUM_H */
diff --git a/arch/mips/include/asm/clocksource.h b/arch/mips/include/asm/clocksource.h
new file mode 100644
index 000000000..2f1ebbea3
--- /dev/null
+++ b/arch/mips/include/asm/clocksource.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+#ifndef __ASM_CLOCKSOURCE_H
+#define __ASM_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif /* __ASM_CLOCKSOURCE_H */
diff --git a/arch/mips/include/asm/cmp.h b/arch/mips/include/asm/cmp.h
new file mode 100644
index 000000000..e9e87504b
--- /dev/null
+++ b/arch/mips/include/asm/cmp.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_CMP_H
+#define _ASM_CMP_H
+
+/*
+ * Definitions for CMP multitasking on MIPS cores
+ */
+struct task_struct;
+
+extern void cmp_smp_setup(void);
+extern void cmp_smp_finish(void);
+extern void cmp_boot_secondary(int cpu, struct task_struct *t);
+extern void cmp_init_secondary(void);
+extern void cmp_prepare_cpus(unsigned int max_cpus);
+
+/* This is platform specific */
+extern void cmp_send_ipi(int cpu, unsigned int action);
+#endif /* _ASM_CMP_H */
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
new file mode 100644
index 000000000..3e9c41f69
--- /dev/null
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -0,0 +1,327 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_CMPXCHG_H
+#define __ASM_CMPXCHG_H
+
+#include <linux/bug.h>
+#include <linux/irqflags.h>
+#include <asm/compiler.h>
+#include <asm/llsc.h>
+#include <asm/sync.h>
+#include <asm/war.h>
+
+/*
+ * These functions doesn't exist, so if they are called you'll either:
+ *
+ * - Get an error at compile-time due to __compiletime_error, if supported by
+ * your compiler.
+ *
+ * or:
+ *
+ * - Get an error at link-time due to the call to the missing function.
+ */
+extern unsigned long __cmpxchg_called_with_bad_pointer(void)
+ __compiletime_error("Bad argument size for cmpxchg");
+extern unsigned long __cmpxchg64_unsupported(void)
+ __compiletime_error("cmpxchg64 not available; cpu_has_64bits may be false");
+extern unsigned long __xchg_called_with_bad_pointer(void)
+ __compiletime_error("Bad argument size for xchg");
+
+#define __xchg_asm(ld, st, m, val) \
+({ \
+ __typeof(*(m)) __ret; \
+ \
+ if (kernel_uses_llsc) { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set push \n" \
+ " .set " MIPS_ISA_ARCH_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " ld " %0, %2 # __xchg_asm \n" \
+ " .set pop \n" \
+ " move $1, %z3 \n" \
+ " .set " MIPS_ISA_ARCH_LEVEL " \n" \
+ " " st " $1, %1 \n" \
+ "\t" __SC_BEQZ "$1, 1b \n" \
+ " .set pop \n" \
+ : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
+ : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \
+ : __LLSC_CLOBBER); \
+ } else { \
+ unsigned long __flags; \
+ \
+ raw_local_irq_save(__flags); \
+ __ret = *m; \
+ *m = val; \
+ raw_local_irq_restore(__flags); \
+ } \
+ \
+ __ret; \
+})
+
+extern unsigned long __xchg_small(volatile void *ptr, unsigned long val,
+ unsigned int size);
+
+static __always_inline
+unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
+{
+ switch (size) {
+ case 1:
+ case 2:
+ return __xchg_small(ptr, x, size);
+
+ case 4:
+ return __xchg_asm("ll", "sc", (volatile u32 *)ptr, x);
+
+ case 8:
+ if (!IS_ENABLED(CONFIG_64BIT))
+ return __xchg_called_with_bad_pointer();
+
+ return __xchg_asm("lld", "scd", (volatile u64 *)ptr, x);
+
+ default:
+ return __xchg_called_with_bad_pointer();
+ }
+}
+
+#define xchg(ptr, x) \
+({ \
+ __typeof__(*(ptr)) __res; \
+ \
+ /* \
+ * In the Loongson3 workaround case __xchg_asm() already \
+ * contains a completion barrier prior to the LL, so we don't \
+ * need to emit an extra one here. \
+ */ \
+ if (__SYNC_loongson3_war == 0) \
+ smp_mb__before_llsc(); \
+ \
+ __res = (__typeof__(*(ptr))) \
+ __xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \
+ \
+ smp_llsc_mb(); \
+ \
+ __res; \
+})
+
+#define __cmpxchg_asm(ld, st, m, old, new) \
+({ \
+ __typeof(*(m)) __ret; \
+ \
+ if (kernel_uses_llsc) { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set push \n" \
+ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " ld " %0, %2 # __cmpxchg_asm \n" \
+ " bne %0, %z3, 2f \n" \
+ " .set pop \n" \
+ " move $1, %z4 \n" \
+ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
+ " " st " $1, %1 \n" \
+ "\t" __SC_BEQZ "$1, 1b \n" \
+ " .set pop \n" \
+ "2: " __SYNC(full, loongson3_war) " \n" \
+ : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
+ : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
+ : __LLSC_CLOBBER); \
+ } else { \
+ unsigned long __flags; \
+ \
+ raw_local_irq_save(__flags); \
+ __ret = *m; \
+ if (__ret == old) \
+ *m = new; \
+ raw_local_irq_restore(__flags); \
+ } \
+ \
+ __ret; \
+})
+
+extern unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size);
+
+static __always_inline
+unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size)
+{
+ switch (size) {
+ case 1:
+ case 2:
+ return __cmpxchg_small(ptr, old, new, size);
+
+ case 4:
+ return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr,
+ (u32)old, new);
+
+ case 8:
+ /* lld/scd are only available for MIPS64 */
+ if (!IS_ENABLED(CONFIG_64BIT))
+ return __cmpxchg_called_with_bad_pointer();
+
+ return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr,
+ (u64)old, new);
+
+ default:
+ return __cmpxchg_called_with_bad_pointer();
+ }
+}
+
+#define cmpxchg_local(ptr, old, new) \
+ ((__typeof__(*(ptr))) \
+ __cmpxchg((ptr), \
+ (unsigned long)(__typeof__(*(ptr)))(old), \
+ (unsigned long)(__typeof__(*(ptr)))(new), \
+ sizeof(*(ptr))))
+
+#define cmpxchg(ptr, old, new) \
+({ \
+ __typeof__(*(ptr)) __res; \
+ \
+ /* \
+ * In the Loongson3 workaround case __cmpxchg_asm() already \
+ * contains a completion barrier prior to the LL, so we don't \
+ * need to emit an extra one here. \
+ */ \
+ if (__SYNC_loongson3_war == 0) \
+ smp_mb__before_llsc(); \
+ \
+ __res = cmpxchg_local((ptr), (old), (new)); \
+ \
+ /* \
+ * In the Loongson3 workaround case __cmpxchg_asm() already \
+ * contains a completion barrier after the SC, so we don't \
+ * need to emit an extra one here. \
+ */ \
+ if (__SYNC_loongson3_war == 0) \
+ smp_llsc_mb(); \
+ \
+ __res; \
+})
+
+#ifdef CONFIG_64BIT
+#define cmpxchg64_local(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+ })
+
+#define cmpxchg64(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg((ptr), (o), (n)); \
+ })
+#else
+
+# include <asm-generic/cmpxchg-local.h>
+# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+# ifdef CONFIG_SMP
+
+static inline unsigned long __cmpxchg64(volatile void *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long tmp, ret;
+ unsigned long flags;
+
+ /*
+ * The assembly below has to combine 32 bit values into a 64 bit
+ * register, and split 64 bit values from one register into two. If we
+ * were to take an interrupt in the middle of this we'd only save the
+ * least significant 32 bits of each register & probably clobber the
+ * most significant 32 bits of the 64 bit values we're using. In order
+ * to avoid this we must disable interrupts.
+ */
+ local_irq_save(flags);
+
+ asm volatile(
+ " .set push \n"
+ " .set " MIPS_ISA_ARCH_LEVEL " \n"
+ /* Load 64 bits from ptr */
+ " " __SYNC(full, loongson3_war) " \n"
+ "1: lld %L0, %3 # __cmpxchg64 \n"
+ " .set pop \n"
+ /*
+ * Split the 64 bit value we loaded into the 2 registers that hold the
+ * ret variable.
+ */
+ " dsra %M0, %L0, 32 \n"
+ " sll %L0, %L0, 0 \n"
+ /*
+ * Compare ret against old, breaking out of the loop if they don't
+ * match.
+ */
+ " bne %M0, %M4, 2f \n"
+ " bne %L0, %L4, 2f \n"
+ /*
+ * Combine the 32 bit halves from the 2 registers that hold the new
+ * variable into a single 64 bit register.
+ */
+# if MIPS_ISA_REV >= 2
+ " move %L1, %L5 \n"
+ " dins %L1, %M5, 32, 32 \n"
+# else
+ " dsll %L1, %L5, 32 \n"
+ " dsrl %L1, %L1, 32 \n"
+ " .set noat \n"
+ " dsll $at, %M5, 32 \n"
+ " or %L1, %L1, $at \n"
+ " .set at \n"
+# endif
+ " .set push \n"
+ " .set " MIPS_ISA_ARCH_LEVEL " \n"
+ /* Attempt to store new at ptr */
+ " scd %L1, %2 \n"
+ /* If we failed, loop! */
+ "\t" __SC_BEQZ "%L1, 1b \n"
+ "2: " __SYNC(full, loongson3_war) " \n"
+ " .set pop \n"
+ : "=&r"(ret),
+ "=&r"(tmp),
+ "=" GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr)
+ : GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr),
+ "r" (old),
+ "r" (new)
+ : "memory");
+
+ local_irq_restore(flags);
+ return ret;
+}
+
+# define cmpxchg64(ptr, o, n) ({ \
+ unsigned long long __old = (__typeof__(*(ptr)))(o); \
+ unsigned long long __new = (__typeof__(*(ptr)))(n); \
+ __typeof__(*(ptr)) __res; \
+ \
+ /* \
+ * We can only use cmpxchg64 if we know that the CPU supports \
+ * 64-bits, ie. lld & scd. Our call to __cmpxchg64_unsupported \
+ * will cause a build error unless cpu_has_64bits is a \
+ * compile-time constant 1. \
+ */ \
+ if (cpu_has_64bits && kernel_uses_llsc) { \
+ smp_mb__before_llsc(); \
+ __res = __cmpxchg64((ptr), __old, __new); \
+ smp_llsc_mb(); \
+ } else { \
+ __res = __cmpxchg64_unsupported(); \
+ } \
+ \
+ __res; \
+})
+
+# else /* !CONFIG_SMP */
+# define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+# endif /* !CONFIG_SMP */
+#endif /* !CONFIG_64BIT */
+
+#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/mips/include/asm/compat-signal.h b/arch/mips/include/asm/compat-signal.h
new file mode 100644
index 000000000..c3b7a2550
--- /dev/null
+++ b/arch/mips/include/asm/compat-signal.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_COMPAT_SIGNAL_H
+#define __ASM_COMPAT_SIGNAL_H
+
+#include <linux/bug.h>
+#include <linux/compat.h>
+#include <linux/compiler.h>
+
+#include <asm/signal.h>
+#include <asm/siginfo.h>
+
+#include <linux/uaccess.h>
+
+static inline int __copy_conv_sigset_to_user(compat_sigset_t __user *d,
+ const sigset_t *s)
+{
+ BUILD_BUG_ON(sizeof(*d) != sizeof(*s));
+ BUILD_BUG_ON(_NSIG_WORDS != 2);
+
+ return put_compat_sigset(d, s, sizeof(*d));
+}
+
+static inline int __copy_conv_sigset_from_user(sigset_t *d,
+ const compat_sigset_t __user *s)
+{
+ return get_compat_sigset(d, s);
+}
+
+#endif /* __ASM_COMPAT_SIGNAL_H */
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
new file mode 100644
index 000000000..65975712a
--- /dev/null
+++ b/arch/mips/include/asm/compat.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_COMPAT_H
+#define _ASM_COMPAT_H
+/*
+ * Architecture specific compatibility types
+ */
+#include <linux/thread_info.h>
+#include <linux/types.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+
+#include <asm-generic/compat.h>
+
+#define COMPAT_USER_HZ 100
+#define COMPAT_UTS_MACHINE "mips\0\0\0"
+
+typedef s32 __compat_uid_t;
+typedef s32 __compat_gid_t;
+typedef __compat_uid_t __compat_uid32_t;
+typedef __compat_gid_t __compat_gid32_t;
+typedef u32 compat_mode_t;
+typedef u32 compat_dev_t;
+typedef u32 compat_nlink_t;
+typedef s32 compat_ipc_pid_t;
+typedef s32 compat_caddr_t;
+typedef struct {
+ s32 val[2];
+} compat_fsid_t;
+
+struct compat_stat {
+ compat_dev_t st_dev;
+ s32 st_pad1[3];
+ compat_ino_t st_ino;
+ compat_mode_t st_mode;
+ compat_nlink_t st_nlink;
+ __compat_uid_t st_uid;
+ __compat_gid_t st_gid;
+ compat_dev_t st_rdev;
+ s32 st_pad2[2];
+ compat_off_t st_size;
+ s32 st_pad3;
+ old_time32_t st_atime;
+ s32 st_atime_nsec;
+ old_time32_t st_mtime;
+ s32 st_mtime_nsec;
+ old_time32_t st_ctime;
+ s32 st_ctime_nsec;
+ s32 st_blksize;
+ s32 st_blocks;
+ s32 st_pad4[14];
+};
+
+struct compat_flock {
+ short l_type;
+ short l_whence;
+ compat_off_t l_start;
+ compat_off_t l_len;
+ s32 l_sysid;
+ compat_pid_t l_pid;
+ s32 pad[4];
+};
+
+#define F_GETLK64 33
+#define F_SETLK64 34
+#define F_SETLKW64 35
+
+struct compat_flock64 {
+ short l_type;
+ short l_whence;
+ compat_loff_t l_start;
+ compat_loff_t l_len;
+ compat_pid_t l_pid;
+};
+
+struct compat_statfs {
+ int f_type;
+ int f_bsize;
+ int f_frsize;
+ int f_blocks;
+ int f_bfree;
+ int f_files;
+ int f_ffree;
+ int f_bavail;
+ compat_fsid_t f_fsid;
+ int f_namelen;
+ int f_flags;
+ int f_spare[5];
+};
+
+#define COMPAT_RLIM_INFINITY 0x7fffffffUL
+
+typedef u32 compat_old_sigset_t; /* at least 32 bits */
+
+#define _COMPAT_NSIG 128 /* Don't ask !$@#% ... */
+#define _COMPAT_NSIG_BPW 32
+
+typedef u32 compat_sigset_word;
+
+#define COMPAT_OFF_T_MAX 0x7fffffff
+
+static inline void __user *arch_compat_alloc_user_space(long len)
+{
+ struct pt_regs *regs = (struct pt_regs *)
+ ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
+
+ return (void __user *) (regs->regs[29] - len);
+}
+
+struct compat_ipc64_perm {
+ compat_key_t key;
+ __compat_uid32_t uid;
+ __compat_gid32_t gid;
+ __compat_uid32_t cuid;
+ __compat_gid32_t cgid;
+ compat_mode_t mode;
+ unsigned short seq;
+ unsigned short __pad2;
+ compat_ulong_t __unused1;
+ compat_ulong_t __unused2;
+};
+
+struct compat_semid64_ds {
+ struct compat_ipc64_perm sem_perm;
+ compat_ulong_t sem_otime;
+ compat_ulong_t sem_ctime;
+ compat_ulong_t sem_nsems;
+ compat_ulong_t sem_otime_high;
+ compat_ulong_t sem_ctime_high;
+};
+
+struct compat_msqid64_ds {
+ struct compat_ipc64_perm msg_perm;
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+ compat_ulong_t msg_stime_high;
+#endif
+ compat_ulong_t msg_stime;
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ compat_ulong_t msg_stime_high;
+#endif
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+ compat_ulong_t msg_rtime_high;
+#endif
+ compat_ulong_t msg_rtime;
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ compat_ulong_t msg_rtime_high;
+#endif
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+ compat_ulong_t msg_ctime_high;
+#endif
+ compat_ulong_t msg_ctime;
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ compat_ulong_t msg_ctime_high;
+#endif
+ compat_ulong_t msg_cbytes;
+ compat_ulong_t msg_qnum;
+ compat_ulong_t msg_qbytes;
+ compat_pid_t msg_lspid;
+ compat_pid_t msg_lrpid;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+
+struct compat_shmid64_ds {
+ struct compat_ipc64_perm shm_perm;
+ compat_size_t shm_segsz;
+ compat_ulong_t shm_atime;
+ compat_ulong_t shm_dtime;
+ compat_ulong_t shm_ctime;
+ compat_pid_t shm_cpid;
+ compat_pid_t shm_lpid;
+ compat_ulong_t shm_nattch;
+ compat_ushort_t shm_atime_high;
+ compat_ushort_t shm_dtime_high;
+ compat_ushort_t shm_ctime_high;
+ compat_ushort_t __unused2;
+};
+
+/* MIPS has unusual order of fields in stack_t */
+typedef struct compat_sigaltstack {
+ compat_uptr_t ss_sp;
+ compat_size_t ss_size;
+ int ss_flags;
+} compat_stack_t;
+#define compat_sigaltstack compat_sigaltstack
+
+static inline int is_compat_task(void)
+{
+ return test_thread_flag(TIF_32BIT_ADDR);
+}
+
+#endif /* _ASM_COMPAT_H */
diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h
new file mode 100644
index 000000000..a2cb2d2b1
--- /dev/null
+++ b/arch/mips/include/asm/compiler.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2004, 2007 Maciej W. Rozycki
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef _ASM_COMPILER_H
+#define _ASM_COMPILER_H
+
+/*
+ * With GCC 4.5 onwards we can use __builtin_unreachable to indicate to the
+ * compiler that a particular code path will never be hit. This allows it to be
+ * optimised out of the generated binary.
+ *
+ * Unfortunately at least GCC 4.6.3 through 7.3.0 inclusive suffer from a bug
+ * that can lead to instructions from beyond an unreachable statement being
+ * incorrectly reordered into earlier delay slots if the unreachable statement
+ * is the only content of a case in a switch statement. This can lead to
+ * seemingly random behaviour, such as invalid memory accesses from incorrectly
+ * reordered loads or stores. See this potential GCC fix for details:
+ *
+ * https://gcc.gnu.org/ml/gcc-patches/2015-09/msg00360.html
+ *
+ * It is unclear whether GCC 8 onwards suffer from the same issue - nothing
+ * relevant is mentioned in GCC 8 release notes and nothing obviously relevant
+ * stands out in GCC commit logs, but these newer GCC versions generate very
+ * different code for the testcase which doesn't exhibit the bug.
+ *
+ * GCC also handles stack allocation suboptimally when calling noreturn
+ * functions or calling __builtin_unreachable():
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365
+ *
+ * We work around both of these issues by placing a volatile asm statement,
+ * which GCC is prevented from reordering past, prior to __builtin_unreachable
+ * calls.
+ *
+ * The .insn statement is required to ensure that any branches to the
+ * statement, which sadly must be kept due to the asm statement, are known to
+ * be branches to code and satisfy linker requirements for microMIPS kernels.
+ */
+#undef barrier_before_unreachable
+#define barrier_before_unreachable() asm volatile(".insn")
+
+#if !defined(CONFIG_CC_IS_GCC) || \
+ (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
+# define GCC_OFF_SMALL_ASM() "ZC"
+#elif defined(CONFIG_CPU_MICROMIPS)
+# error "microMIPS compilation unsupported with GCC older than 4.9"
+#else
+# define GCC_OFF_SMALL_ASM() "R"
+#endif
+
+#ifdef CONFIG_CPU_MIPSR6
+#define MIPS_ISA_LEVEL "mips64r6"
+#define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL
+#define MIPS_ISA_LEVEL_RAW mips64r6
+#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
+#elif defined(CONFIG_CPU_MIPSR5)
+#define MIPS_ISA_LEVEL "mips64r5"
+#define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL
+#define MIPS_ISA_LEVEL_RAW mips64r5
+#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
+#else
+/* MIPS64 is a superset of MIPS32 */
+#define MIPS_ISA_LEVEL "mips64r2"
+#define MIPS_ISA_ARCH_LEVEL "arch=r4000"
+#define MIPS_ISA_LEVEL_RAW mips64r2
+#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
+#endif /* CONFIG_CPU_MIPSR6 */
+
+#endif /* _ASM_COMPILER_H */
diff --git a/arch/mips/include/asm/cop2.h b/arch/mips/include/asm/cop2.h
new file mode 100644
index 000000000..6b7396a6a
--- /dev/null
+++ b/arch/mips/include/asm/cop2.h
@@ -0,0 +1,72 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __ASM_COP2_H
+#define __ASM_COP2_H
+
+#include <linux/notifier.h>
+
+#if defined(CONFIG_CPU_CAVIUM_OCTEON)
+
+extern void octeon_cop2_save(struct octeon_cop2_state *);
+extern void octeon_cop2_restore(struct octeon_cop2_state *);
+
+#define cop2_save(r) octeon_cop2_save(&(r)->thread.cp2)
+#define cop2_restore(r) octeon_cop2_restore(&(r)->thread.cp2)
+
+#define cop2_present 1
+#define cop2_lazy_restore 1
+
+#elif defined(CONFIG_CPU_XLP)
+
+extern void nlm_cop2_save(struct nlm_cop2_state *);
+extern void nlm_cop2_restore(struct nlm_cop2_state *);
+
+#define cop2_save(r) nlm_cop2_save(&(r)->thread.cp2)
+#define cop2_restore(r) nlm_cop2_restore(&(r)->thread.cp2)
+
+#define cop2_present 1
+#define cop2_lazy_restore 0
+
+#elif defined(CONFIG_CPU_LOONGSON64)
+
+#define cop2_present 1
+#define cop2_lazy_restore 1
+#define cop2_save(r) do { (void)(r); } while (0)
+#define cop2_restore(r) do { (void)(r); } while (0)
+
+#else
+
+#define cop2_present 0
+#define cop2_lazy_restore 0
+#define cop2_save(r) do { (void)(r); } while (0)
+#define cop2_restore(r) do { (void)(r); } while (0)
+#endif
+
+enum cu2_ops {
+ CU2_EXCEPTION,
+ CU2_LWC2_OP,
+ CU2_LDC2_OP,
+ CU2_SWC2_OP,
+ CU2_SDC2_OP,
+};
+
+extern int register_cu2_notifier(struct notifier_block *nb);
+extern int cu2_notifier_call_chain(unsigned long val, void *v);
+
+#define cu2_notifier(fn, pri) \
+({ \
+ static struct notifier_block fn##_nb = { \
+ .notifier_call = fn, \
+ .priority = pri \
+ }; \
+ \
+ register_cu2_notifier(&fn##_nb); \
+})
+
+#endif /* __ASM_COP2_H */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
new file mode 100644
index 000000000..dd03bc905
--- /dev/null
+++ b/arch/mips/include/asm/cpu-features.h
@@ -0,0 +1,756 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ */
+#ifndef __ASM_CPU_FEATURES_H
+#define __ASM_CPU_FEATURES_H
+
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/isa-rev.h>
+#include <cpu-feature-overrides.h>
+
+#define __ase(ase) (cpu_data[0].ases & (ase))
+#define __isa(isa) (cpu_data[0].isa_level & (isa))
+#define __opt(opt) (cpu_data[0].options & (opt))
+
+/*
+ * Check if MIPS_ISA_REV is >= isa *and* an option or ASE is detected during
+ * boot (typically by cpu_probe()).
+ *
+ * Note that these should only be used in cases where a kernel built for an
+ * older ISA *cannot* run on a CPU which supports the feature in question. For
+ * example this may be used for features introduced with MIPSr6, since a kernel
+ * built for an older ISA cannot run on a MIPSr6 CPU. This should not be used
+ * for MIPSr2 features however, since a MIPSr1 or earlier kernel might run on a
+ * MIPSr2 CPU.
+ */
+#define __isa_ge_and_ase(isa, ase) ((MIPS_ISA_REV >= (isa)) && __ase(ase))
+#define __isa_ge_and_opt(isa, opt) ((MIPS_ISA_REV >= (isa)) && __opt(opt))
+
+/*
+ * Check if MIPS_ISA_REV is >= isa *or* an option or ASE is detected during
+ * boot (typically by cpu_probe()).
+ *
+ * These are for use with features that are optional up until a particular ISA
+ * revision & then become required.
+ */
+#define __isa_ge_or_ase(isa, ase) ((MIPS_ISA_REV >= (isa)) || __ase(ase))
+#define __isa_ge_or_opt(isa, opt) ((MIPS_ISA_REV >= (isa)) || __opt(opt))
+
+/*
+ * Check if MIPS_ISA_REV is < isa *and* an option or ASE is detected during
+ * boot (typically by cpu_probe()).
+ *
+ * These are for use with features that are optional up until a particular ISA
+ * revision & are then removed - ie. no longer present in any CPU implementing
+ * the given ISA revision.
+ */
+#define __isa_lt_and_ase(isa, ase) ((MIPS_ISA_REV < (isa)) && __ase(ase))
+#define __isa_lt_and_opt(isa, opt) ((MIPS_ISA_REV < (isa)) && __opt(opt))
+
+/*
+ * Similarly allow for ISA level checks that take into account knowledge of the
+ * ISA targeted by the kernel build, provided by MIPS_ISA_REV.
+ */
+#define __isa_ge_and_flag(isa, flag) ((MIPS_ISA_REV >= (isa)) && __isa(flag))
+#define __isa_ge_or_flag(isa, flag) ((MIPS_ISA_REV >= (isa)) || __isa(flag))
+#define __isa_lt_and_flag(isa, flag) ((MIPS_ISA_REV < (isa)) && __isa(flag))
+#define __isa_range(ge, lt) \
+ ((MIPS_ISA_REV >= (ge)) && (MIPS_ISA_REV < (lt)))
+#define __isa_range_or_flag(ge, lt, flag) \
+ (__isa_range(ge, lt) || ((MIPS_ISA_REV < (lt)) && __isa(flag)))
+#define __isa_range_and_ase(ge, lt, ase) \
+ (__isa_range(ge, lt) && __ase(ase))
+
+/*
+ * SMP assumption: Options of CPU 0 are a superset of all processors.
+ * This is true for all known MIPS systems.
+ */
+#ifndef cpu_has_tlb
+#define cpu_has_tlb __opt(MIPS_CPU_TLB)
+#endif
+#ifndef cpu_has_ftlb
+#define cpu_has_ftlb __opt(MIPS_CPU_FTLB)
+#endif
+#ifndef cpu_has_tlbinv
+#define cpu_has_tlbinv __opt(MIPS_CPU_TLBINV)
+#endif
+#ifndef cpu_has_segments
+#define cpu_has_segments __opt(MIPS_CPU_SEGMENTS)
+#endif
+#ifndef cpu_has_eva
+#define cpu_has_eva __opt(MIPS_CPU_EVA)
+#endif
+#ifndef cpu_has_htw
+#define cpu_has_htw __opt(MIPS_CPU_HTW)
+#endif
+#ifndef cpu_has_ldpte
+#define cpu_has_ldpte __opt(MIPS_CPU_LDPTE)
+#endif
+#ifndef cpu_has_rixiex
+#define cpu_has_rixiex __isa_ge_or_opt(6, MIPS_CPU_RIXIEX)
+#endif
+#ifndef cpu_has_maar
+#define cpu_has_maar __opt(MIPS_CPU_MAAR)
+#endif
+#ifndef cpu_has_rw_llb
+#define cpu_has_rw_llb __isa_ge_or_opt(6, MIPS_CPU_RW_LLB)
+#endif
+
+/*
+ * For the moment we don't consider R6000 and R8000 so we can assume that
+ * anything that doesn't support R4000-style exceptions and interrupts is
+ * R3000-like. Users should still treat these two macro definitions as
+ * opaque.
+ */
+#ifndef cpu_has_3kex
+#define cpu_has_3kex (!cpu_has_4kex)
+#endif
+#ifndef cpu_has_4kex
+#define cpu_has_4kex __isa_ge_or_opt(1, MIPS_CPU_4KEX)
+#endif
+#ifndef cpu_has_3k_cache
+#define cpu_has_3k_cache __isa_lt_and_opt(1, MIPS_CPU_3K_CACHE)
+#endif
+#define cpu_has_6k_cache 0
+#define cpu_has_8k_cache 0
+#ifndef cpu_has_4k_cache
+#define cpu_has_4k_cache __isa_ge_or_opt(1, MIPS_CPU_4K_CACHE)
+#endif
+#ifndef cpu_has_tx39_cache
+#define cpu_has_tx39_cache __opt(MIPS_CPU_TX39_CACHE)
+#endif
+#ifndef cpu_has_octeon_cache
+#define cpu_has_octeon_cache \
+({ \
+ int __res; \
+ \
+ switch (boot_cpu_type()) { \
+ case CPU_CAVIUM_OCTEON: \
+ case CPU_CAVIUM_OCTEON_PLUS: \
+ case CPU_CAVIUM_OCTEON2: \
+ case CPU_CAVIUM_OCTEON3: \
+ __res = 1; \
+ break; \
+ \
+ default: \
+ __res = 0; \
+ } \
+ \
+ __res; \
+})
+#endif
+/* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */
+#ifndef cpu_has_fpu
+# ifdef CONFIG_MIPS_FP_SUPPORT
+# define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU)
+# define raw_cpu_has_fpu (raw_current_cpu_data.options & MIPS_CPU_FPU)
+# else
+# define cpu_has_fpu 0
+# define raw_cpu_has_fpu 0
+# endif
+#else
+# define raw_cpu_has_fpu cpu_has_fpu
+#endif
+#ifndef cpu_has_32fpr
+#define cpu_has_32fpr __isa_ge_or_opt(1, MIPS_CPU_32FPR)
+#endif
+#ifndef cpu_has_counter
+#define cpu_has_counter __opt(MIPS_CPU_COUNTER)
+#endif
+#ifndef cpu_has_watch
+#define cpu_has_watch __opt(MIPS_CPU_WATCH)
+#endif
+#ifndef cpu_has_divec
+#define cpu_has_divec __isa_ge_or_opt(1, MIPS_CPU_DIVEC)
+#endif
+#ifndef cpu_has_vce
+#define cpu_has_vce __opt(MIPS_CPU_VCE)
+#endif
+#ifndef cpu_has_cache_cdex_p
+#define cpu_has_cache_cdex_p __opt(MIPS_CPU_CACHE_CDEX_P)
+#endif
+#ifndef cpu_has_cache_cdex_s
+#define cpu_has_cache_cdex_s __opt(MIPS_CPU_CACHE_CDEX_S)
+#endif
+#ifndef cpu_has_prefetch
+#define cpu_has_prefetch __isa_ge_or_opt(1, MIPS_CPU_PREFETCH)
+#endif
+#ifndef cpu_has_mcheck
+#define cpu_has_mcheck __isa_ge_or_opt(1, MIPS_CPU_MCHECK)
+#endif
+#ifndef cpu_has_ejtag
+#define cpu_has_ejtag __opt(MIPS_CPU_EJTAG)
+#endif
+#ifndef cpu_has_llsc
+#define cpu_has_llsc __isa_ge_or_opt(1, MIPS_CPU_LLSC)
+#endif
+#ifndef kernel_uses_llsc
+#define kernel_uses_llsc cpu_has_llsc
+#endif
+#ifndef cpu_has_guestctl0ext
+#define cpu_has_guestctl0ext __opt(MIPS_CPU_GUESTCTL0EXT)
+#endif
+#ifndef cpu_has_guestctl1
+#define cpu_has_guestctl1 __opt(MIPS_CPU_GUESTCTL1)
+#endif
+#ifndef cpu_has_guestctl2
+#define cpu_has_guestctl2 __opt(MIPS_CPU_GUESTCTL2)
+#endif
+#ifndef cpu_has_guestid
+#define cpu_has_guestid __opt(MIPS_CPU_GUESTID)
+#endif
+#ifndef cpu_has_drg
+#define cpu_has_drg __opt(MIPS_CPU_DRG)
+#endif
+#ifndef cpu_has_mips16
+#define cpu_has_mips16 __isa_lt_and_ase(6, MIPS_ASE_MIPS16)
+#endif
+#ifndef cpu_has_mips16e2
+#define cpu_has_mips16e2 __isa_lt_and_ase(6, MIPS_ASE_MIPS16E2)
+#endif
+#ifndef cpu_has_mdmx
+#define cpu_has_mdmx __isa_lt_and_ase(6, MIPS_ASE_MDMX)
+#endif
+#ifndef cpu_has_mips3d
+#define cpu_has_mips3d __isa_lt_and_ase(6, MIPS_ASE_MIPS3D)
+#endif
+#ifndef cpu_has_smartmips
+#define cpu_has_smartmips __isa_lt_and_ase(6, MIPS_ASE_SMARTMIPS)
+#endif
+
+#ifndef cpu_has_rixi
+#define cpu_has_rixi __isa_ge_or_opt(6, MIPS_CPU_RIXI)
+#endif
+
+#ifndef cpu_has_mmips
+# if defined(__mips_micromips)
+# define cpu_has_mmips 1
+# elif defined(CONFIG_SYS_SUPPORTS_MICROMIPS)
+# define cpu_has_mmips __opt(MIPS_CPU_MICROMIPS)
+# else
+# define cpu_has_mmips 0
+# endif
+#endif
+
+#ifndef cpu_has_lpa
+#define cpu_has_lpa __opt(MIPS_CPU_LPA)
+#endif
+#ifndef cpu_has_mvh
+#define cpu_has_mvh __opt(MIPS_CPU_MVH)
+#endif
+#ifndef cpu_has_xpa
+#define cpu_has_xpa (cpu_has_lpa && cpu_has_mvh)
+#endif
+#ifndef cpu_has_vtag_icache
+#define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG)
+#endif
+#ifndef cpu_has_dc_aliases
+#define cpu_has_dc_aliases (cpu_data[0].dcache.flags & MIPS_CACHE_ALIASES)
+#endif
+#ifndef cpu_has_ic_fills_f_dc
+#define cpu_has_ic_fills_f_dc (cpu_data[0].icache.flags & MIPS_CACHE_IC_F_DC)
+#endif
+#ifndef cpu_has_pindexed_dcache
+#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
+#endif
+
+/*
+ * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors
+ * such as the R10000 have I-Caches that snoop local stores; the embedded ones
+ * don't. For maintaining I-cache coherency this means we need to flush the
+ * D-cache all the way back to whever the I-cache does refills from, so the
+ * I-cache has a chance to see the new data at all. Then we have to flush the
+ * I-cache also.
+ * Note we may have been rescheduled and may no longer be running on the CPU
+ * that did the store so we can't optimize this into only doing the flush on
+ * the local CPU.
+ */
+#ifndef cpu_icache_snoops_remote_store
+#ifdef CONFIG_SMP
+#define cpu_icache_snoops_remote_store (cpu_data[0].icache.flags & MIPS_IC_SNOOPS_REMOTE)
+#else
+#define cpu_icache_snoops_remote_store 1
+#endif
+#endif
+
+#ifndef cpu_has_mips_1
+# define cpu_has_mips_1 (MIPS_ISA_REV < 6)
+#endif
+#ifndef cpu_has_mips_2
+# define cpu_has_mips_2 __isa_lt_and_flag(6, MIPS_CPU_ISA_II)
+#endif
+#ifndef cpu_has_mips_3
+# define cpu_has_mips_3 __isa_lt_and_flag(6, MIPS_CPU_ISA_III)
+#endif
+#ifndef cpu_has_mips_4
+# define cpu_has_mips_4 __isa_lt_and_flag(6, MIPS_CPU_ISA_IV)
+#endif
+#ifndef cpu_has_mips_5
+# define cpu_has_mips_5 __isa_lt_and_flag(6, MIPS_CPU_ISA_V)
+#endif
+#ifndef cpu_has_mips32r1
+# define cpu_has_mips32r1 __isa_range_or_flag(1, 6, MIPS_CPU_ISA_M32R1)
+#endif
+#ifndef cpu_has_mips32r2
+# define cpu_has_mips32r2 __isa_range_or_flag(2, 6, MIPS_CPU_ISA_M32R2)
+#endif
+#ifndef cpu_has_mips32r5
+# define cpu_has_mips32r5 __isa_range_or_flag(5, 6, MIPS_CPU_ISA_M32R5)
+#endif
+#ifndef cpu_has_mips32r6
+# define cpu_has_mips32r6 __isa_ge_or_flag(6, MIPS_CPU_ISA_M32R6)
+#endif
+#ifndef cpu_has_mips64r1
+# define cpu_has_mips64r1 (cpu_has_64bits && \
+ __isa_range_or_flag(1, 6, MIPS_CPU_ISA_M64R1))
+#endif
+#ifndef cpu_has_mips64r2
+# define cpu_has_mips64r2 (cpu_has_64bits && \
+ __isa_range_or_flag(2, 6, MIPS_CPU_ISA_M64R2))
+#endif
+#ifndef cpu_has_mips64r5
+# define cpu_has_mips64r5 (cpu_has_64bits && \
+ __isa_range_or_flag(5, 6, MIPS_CPU_ISA_M64R5))
+#endif
+#ifndef cpu_has_mips64r6
+# define cpu_has_mips64r6 __isa_ge_and_flag(6, MIPS_CPU_ISA_M64R6)
+#endif
+
+/*
+ * Shortcuts ...
+ */
+#define cpu_has_mips_2_3_4_5 (cpu_has_mips_2 | cpu_has_mips_3_4_5)
+#define cpu_has_mips_3_4_5 (cpu_has_mips_3 | cpu_has_mips_4_5)
+#define cpu_has_mips_4_5 (cpu_has_mips_4 | cpu_has_mips_5)
+
+#define cpu_has_mips_2_3_4_5_r (cpu_has_mips_2 | cpu_has_mips_3_4_5_r)
+#define cpu_has_mips_3_4_5_r (cpu_has_mips_3 | cpu_has_mips_4_5_r)
+#define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r)
+#define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r)
+
+#define cpu_has_mips_3_4_5_64_r2_r6 \
+ (cpu_has_mips_3 | cpu_has_mips_4_5_64_r2_r6)
+#define cpu_has_mips_4_5_64_r2_r6 \
+ (cpu_has_mips_4_5 | cpu_has_mips64r1 | \
+ cpu_has_mips_r2 | cpu_has_mips_r5 | \
+ cpu_has_mips_r6)
+
+#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | \
+ cpu_has_mips32r5 | cpu_has_mips32r6)
+#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2 | \
+ cpu_has_mips64r5 | cpu_has_mips64r6)
+#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
+#define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
+#define cpu_has_mips_r5 (cpu_has_mips32r5 | cpu_has_mips64r5)
+#define cpu_has_mips_r6 (cpu_has_mips32r6 | cpu_has_mips64r6)
+#define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \
+ cpu_has_mips32r5 | cpu_has_mips32r6 | \
+ cpu_has_mips64r1 | cpu_has_mips64r2 | \
+ cpu_has_mips64r5 | cpu_has_mips64r6)
+
+/* MIPSR2 - MIPSR6 have a lot of similarities */
+#define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r5 | \
+ cpu_has_mips_r6)
+
+/*
+ * cpu_has_mips_r2_exec_hazard - return if IHB is required on current processor
+ *
+ * Returns non-zero value if the current processor implementation requires
+ * an IHB instruction to deal with an instruction hazard as per MIPS R2
+ * architecture specification, zero otherwise.
+ */
+#ifndef cpu_has_mips_r2_exec_hazard
+#define cpu_has_mips_r2_exec_hazard \
+({ \
+ int __res; \
+ \
+ switch (boot_cpu_type()) { \
+ case CPU_M14KC: \
+ case CPU_74K: \
+ case CPU_1074K: \
+ case CPU_PROAPTIV: \
+ case CPU_P5600: \
+ case CPU_M5150: \
+ case CPU_QEMU_GENERIC: \
+ case CPU_CAVIUM_OCTEON: \
+ case CPU_CAVIUM_OCTEON_PLUS: \
+ case CPU_CAVIUM_OCTEON2: \
+ case CPU_CAVIUM_OCTEON3: \
+ __res = 0; \
+ break; \
+ \
+ default: \
+ __res = 1; \
+ } \
+ \
+ __res; \
+})
+#endif
+
+/*
+ * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
+ * pre-MIPS32/MIPS64 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
+ * has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels
+ * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
+ */
+#ifndef cpu_has_clo_clz
+#define cpu_has_clo_clz cpu_has_mips_r
+#endif
+
+/*
+ * MIPS32 R2, MIPS64 R2, Loongson 3A and Octeon have WSBH.
+ * MIPS64 R2, Loongson 3A and Octeon have WSBH, DSBH and DSHD.
+ * This indicates the availability of WSBH and in case of 64 bit CPUs also
+ * DSBH and DSHD.
+ */
+#ifndef cpu_has_wsbh
+#define cpu_has_wsbh cpu_has_mips_r2
+#endif
+
+#ifndef cpu_has_dsp
+#define cpu_has_dsp __ase(MIPS_ASE_DSP)
+#endif
+
+#ifndef cpu_has_dsp2
+#define cpu_has_dsp2 __ase(MIPS_ASE_DSP2P)
+#endif
+
+#ifndef cpu_has_dsp3
+#define cpu_has_dsp3 __ase(MIPS_ASE_DSP3)
+#endif
+
+#ifndef cpu_has_loongson_mmi
+#define cpu_has_loongson_mmi __ase(MIPS_ASE_LOONGSON_MMI)
+#endif
+
+#ifndef cpu_has_loongson_cam
+#define cpu_has_loongson_cam __ase(MIPS_ASE_LOONGSON_CAM)
+#endif
+
+#ifndef cpu_has_loongson_ext
+#define cpu_has_loongson_ext __ase(MIPS_ASE_LOONGSON_EXT)
+#endif
+
+#ifndef cpu_has_loongson_ext2
+#define cpu_has_loongson_ext2 __ase(MIPS_ASE_LOONGSON_EXT2)
+#endif
+
+#ifndef cpu_has_mipsmt
+#define cpu_has_mipsmt __isa_range_and_ase(2, 6, MIPS_ASE_MIPSMT)
+#endif
+
+#ifndef cpu_has_vp
+#define cpu_has_vp __isa_ge_and_opt(6, MIPS_CPU_VP)
+#endif
+
+#ifndef cpu_has_userlocal
+#define cpu_has_userlocal __isa_ge_or_opt(6, MIPS_CPU_ULRI)
+#endif
+
+#ifdef CONFIG_32BIT
+# ifndef cpu_has_nofpuex
+# define cpu_has_nofpuex __isa_lt_and_opt(1, MIPS_CPU_NOFPUEX)
+# endif
+# ifndef cpu_has_64bits
+# define cpu_has_64bits (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT)
+# endif
+# ifndef cpu_has_64bit_zero_reg
+# define cpu_has_64bit_zero_reg (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT)
+# endif
+# ifndef cpu_has_64bit_gp_regs
+# define cpu_has_64bit_gp_regs 0
+# endif
+# ifndef cpu_vmbits
+# define cpu_vmbits 31
+# endif
+#endif
+
+#ifdef CONFIG_64BIT
+# ifndef cpu_has_nofpuex
+# define cpu_has_nofpuex 0
+# endif
+# ifndef cpu_has_64bits
+# define cpu_has_64bits 1
+# endif
+# ifndef cpu_has_64bit_zero_reg
+# define cpu_has_64bit_zero_reg 1
+# endif
+# ifndef cpu_has_64bit_gp_regs
+# define cpu_has_64bit_gp_regs 1
+# endif
+# ifndef cpu_vmbits
+# define cpu_vmbits cpu_data[0].vmbits
+# define __NEED_VMBITS_PROBE
+# endif
+#endif
+
+#if defined(CONFIG_CPU_MIPSR2_IRQ_VI) && !defined(cpu_has_vint)
+# define cpu_has_vint __opt(MIPS_CPU_VINT)
+#elif !defined(cpu_has_vint)
+# define cpu_has_vint 0
+#endif
+
+#if defined(CONFIG_CPU_MIPSR2_IRQ_EI) && !defined(cpu_has_veic)
+# define cpu_has_veic __opt(MIPS_CPU_VEIC)
+#elif !defined(cpu_has_veic)
+# define cpu_has_veic 0
+#endif
+
+#ifndef cpu_has_inclusive_pcaches
+#define cpu_has_inclusive_pcaches __opt(MIPS_CPU_INCLUSIVE_CACHES)
+#endif
+
+#ifndef cpu_dcache_line_size
+#define cpu_dcache_line_size() cpu_data[0].dcache.linesz
+#endif
+#ifndef cpu_icache_line_size
+#define cpu_icache_line_size() cpu_data[0].icache.linesz
+#endif
+#ifndef cpu_scache_line_size
+#define cpu_scache_line_size() cpu_data[0].scache.linesz
+#endif
+#ifndef cpu_tcache_line_size
+#define cpu_tcache_line_size() cpu_data[0].tcache.linesz
+#endif
+
+#ifndef cpu_hwrena_impl_bits
+#define cpu_hwrena_impl_bits 0
+#endif
+
+#ifndef cpu_has_perf_cntr_intr_bit
+#define cpu_has_perf_cntr_intr_bit __opt(MIPS_CPU_PCI)
+#endif
+
+#ifndef cpu_has_vz
+#define cpu_has_vz __ase(MIPS_ASE_VZ)
+#endif
+
+#if defined(CONFIG_CPU_HAS_MSA) && !defined(cpu_has_msa)
+# define cpu_has_msa __ase(MIPS_ASE_MSA)
+#elif !defined(cpu_has_msa)
+# define cpu_has_msa 0
+#endif
+
+#ifndef cpu_has_ufr
+# define cpu_has_ufr __opt(MIPS_CPU_UFR)
+#endif
+
+#ifndef cpu_has_fre
+# define cpu_has_fre __opt(MIPS_CPU_FRE)
+#endif
+
+#ifndef cpu_has_cdmm
+# define cpu_has_cdmm __opt(MIPS_CPU_CDMM)
+#endif
+
+#ifndef cpu_has_small_pages
+# define cpu_has_small_pages __opt(MIPS_CPU_SP)
+#endif
+
+#ifndef cpu_has_nan_legacy
+#define cpu_has_nan_legacy __isa_lt_and_opt(6, MIPS_CPU_NAN_LEGACY)
+#endif
+#ifndef cpu_has_nan_2008
+#define cpu_has_nan_2008 __isa_ge_or_opt(6, MIPS_CPU_NAN_2008)
+#endif
+
+#ifndef cpu_has_ebase_wg
+# define cpu_has_ebase_wg __opt(MIPS_CPU_EBASE_WG)
+#endif
+
+#ifndef cpu_has_badinstr
+# define cpu_has_badinstr __isa_ge_or_opt(6, MIPS_CPU_BADINSTR)
+#endif
+
+#ifndef cpu_has_badinstrp
+# define cpu_has_badinstrp __isa_ge_or_opt(6, MIPS_CPU_BADINSTRP)
+#endif
+
+#ifndef cpu_has_contextconfig
+# define cpu_has_contextconfig __opt(MIPS_CPU_CTXTC)
+#endif
+
+#ifndef cpu_has_perf
+# define cpu_has_perf __opt(MIPS_CPU_PERF)
+#endif
+
+#ifndef cpu_has_mac2008_only
+# define cpu_has_mac2008_only __opt(MIPS_CPU_MAC_2008_ONLY)
+#endif
+
+#ifndef cpu_has_ftlbparex
+# define cpu_has_ftlbparex __opt(MIPS_CPU_FTLBPAREX)
+#endif
+
+#ifndef cpu_has_gsexcex
+# define cpu_has_gsexcex __opt(MIPS_CPU_GSEXCEX)
+#endif
+
+#ifdef CONFIG_SMP
+/*
+ * Some systems share FTLB RAMs between threads within a core (siblings in
+ * kernel parlance). This means that FTLB entries may become invalid at almost
+ * any point when an entry is evicted due to a sibling thread writing an entry
+ * to the shared FTLB RAM.
+ *
+ * This is only relevant to SMP systems, and the only systems that exhibit this
+ * property implement MIPSr6 or higher so we constrain support for this to
+ * kernels that will run on such systems.
+ */
+# ifndef cpu_has_shared_ftlb_ram
+# define cpu_has_shared_ftlb_ram \
+ __isa_ge_and_opt(6, MIPS_CPU_SHARED_FTLB_RAM)
+# endif
+
+/*
+ * Some systems take this a step further & share FTLB entries between siblings.
+ * This is implemented as TLB writes happening as usual, but if an entry
+ * written by a sibling exists in the shared FTLB for a translation which would
+ * otherwise cause a TLB refill exception then the CPU will use the entry
+ * written by its sibling rather than triggering a refill & writing a matching
+ * TLB entry for itself.
+ *
+ * This is naturally only valid if a TLB entry is known to be suitable for use
+ * on all siblings in a CPU, and so it only takes effect when MMIDs are in use
+ * rather than ASIDs or when a TLB entry is marked global.
+ */
+# ifndef cpu_has_shared_ftlb_entries
+# define cpu_has_shared_ftlb_entries \
+ __isa_ge_and_opt(6, MIPS_CPU_SHARED_FTLB_ENTRIES)
+# endif
+#endif /* SMP */
+
+#ifndef cpu_has_shared_ftlb_ram
+# define cpu_has_shared_ftlb_ram 0
+#endif
+#ifndef cpu_has_shared_ftlb_entries
+# define cpu_has_shared_ftlb_entries 0
+#endif
+
+#ifdef CONFIG_MIPS_MT_SMP
+# define cpu_has_mipsmt_pertccounters \
+ __isa_lt_and_opt(6, MIPS_CPU_MT_PER_TC_PERF_COUNTERS)
+#else
+# define cpu_has_mipsmt_pertccounters 0
+#endif /* CONFIG_MIPS_MT_SMP */
+
+/*
+ * We only enable MMID support for configurations which natively support 64 bit
+ * atomics because getting good performance from the allocator relies upon
+ * efficient atomic64_*() functions.
+ */
+#ifndef cpu_has_mmid
+# ifdef CONFIG_GENERIC_ATOMIC64
+# define cpu_has_mmid 0
+# else
+# define cpu_has_mmid __isa_ge_and_opt(6, MIPS_CPU_MMID)
+# endif
+#endif
+
+#ifndef cpu_has_mm_sysad
+# define cpu_has_mm_sysad __opt(MIPS_CPU_MM_SYSAD)
+#endif
+
+#ifndef cpu_has_mm_full
+# define cpu_has_mm_full __opt(MIPS_CPU_MM_FULL)
+#endif
+
+/*
+ * Guest capabilities
+ */
+#ifndef cpu_guest_has_conf1
+#define cpu_guest_has_conf1 (cpu_data[0].guest.conf & (1 << 1))
+#endif
+#ifndef cpu_guest_has_conf2
+#define cpu_guest_has_conf2 (cpu_data[0].guest.conf & (1 << 2))
+#endif
+#ifndef cpu_guest_has_conf3
+#define cpu_guest_has_conf3 (cpu_data[0].guest.conf & (1 << 3))
+#endif
+#ifndef cpu_guest_has_conf4
+#define cpu_guest_has_conf4 (cpu_data[0].guest.conf & (1 << 4))
+#endif
+#ifndef cpu_guest_has_conf5
+#define cpu_guest_has_conf5 (cpu_data[0].guest.conf & (1 << 5))
+#endif
+#ifndef cpu_guest_has_conf6
+#define cpu_guest_has_conf6 (cpu_data[0].guest.conf & (1 << 6))
+#endif
+#ifndef cpu_guest_has_conf7
+#define cpu_guest_has_conf7 (cpu_data[0].guest.conf & (1 << 7))
+#endif
+#ifndef cpu_guest_has_fpu
+#define cpu_guest_has_fpu (cpu_data[0].guest.options & MIPS_CPU_FPU)
+#endif
+#ifndef cpu_guest_has_watch
+#define cpu_guest_has_watch (cpu_data[0].guest.options & MIPS_CPU_WATCH)
+#endif
+#ifndef cpu_guest_has_contextconfig
+#define cpu_guest_has_contextconfig (cpu_data[0].guest.options & MIPS_CPU_CTXTC)
+#endif
+#ifndef cpu_guest_has_segments
+#define cpu_guest_has_segments (cpu_data[0].guest.options & MIPS_CPU_SEGMENTS)
+#endif
+#ifndef cpu_guest_has_badinstr
+#define cpu_guest_has_badinstr (cpu_data[0].guest.options & MIPS_CPU_BADINSTR)
+#endif
+#ifndef cpu_guest_has_badinstrp
+#define cpu_guest_has_badinstrp (cpu_data[0].guest.options & MIPS_CPU_BADINSTRP)
+#endif
+#ifndef cpu_guest_has_htw
+#define cpu_guest_has_htw (cpu_data[0].guest.options & MIPS_CPU_HTW)
+#endif
+#ifndef cpu_guest_has_ldpte
+#define cpu_guest_has_ldpte (cpu_data[0].guest.options & MIPS_CPU_LDPTE)
+#endif
+#ifndef cpu_guest_has_mvh
+#define cpu_guest_has_mvh (cpu_data[0].guest.options & MIPS_CPU_MVH)
+#endif
+#ifndef cpu_guest_has_msa
+#define cpu_guest_has_msa (cpu_data[0].guest.ases & MIPS_ASE_MSA)
+#endif
+#ifndef cpu_guest_has_kscr
+#define cpu_guest_has_kscr(n) (cpu_data[0].guest.kscratch_mask & (1u << (n)))
+#endif
+#ifndef cpu_guest_has_rw_llb
+#define cpu_guest_has_rw_llb (cpu_has_mips_r6 || (cpu_data[0].guest.options & MIPS_CPU_RW_LLB))
+#endif
+#ifndef cpu_guest_has_perf
+#define cpu_guest_has_perf (cpu_data[0].guest.options & MIPS_CPU_PERF)
+#endif
+#ifndef cpu_guest_has_maar
+#define cpu_guest_has_maar (cpu_data[0].guest.options & MIPS_CPU_MAAR)
+#endif
+#ifndef cpu_guest_has_userlocal
+#define cpu_guest_has_userlocal (cpu_data[0].guest.options & MIPS_CPU_ULRI)
+#endif
+
+/*
+ * Guest dynamic capabilities
+ */
+#ifndef cpu_guest_has_dyn_fpu
+#define cpu_guest_has_dyn_fpu (cpu_data[0].guest.options_dyn & MIPS_CPU_FPU)
+#endif
+#ifndef cpu_guest_has_dyn_watch
+#define cpu_guest_has_dyn_watch (cpu_data[0].guest.options_dyn & MIPS_CPU_WATCH)
+#endif
+#ifndef cpu_guest_has_dyn_contextconfig
+#define cpu_guest_has_dyn_contextconfig (cpu_data[0].guest.options_dyn & MIPS_CPU_CTXTC)
+#endif
+#ifndef cpu_guest_has_dyn_perf
+#define cpu_guest_has_dyn_perf (cpu_data[0].guest.options_dyn & MIPS_CPU_PERF)
+#endif
+#ifndef cpu_guest_has_dyn_msa
+#define cpu_guest_has_dyn_msa (cpu_data[0].guest.ases_dyn & MIPS_ASE_MSA)
+#endif
+#ifndef cpu_guest_has_dyn_maar
+#define cpu_guest_has_dyn_maar (cpu_data[0].guest.options_dyn & MIPS_CPU_MAAR)
+#endif
+
+#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
new file mode 100644
index 000000000..a600670d0
--- /dev/null
+++ b/arch/mips/include/asm/cpu-info.h
@@ -0,0 +1,219 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 Waldorf GMBH
+ * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001, 2002, 2003 Ralf Baechle
+ * Copyright (C) 1996 Paul M. Antoine
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2004 Maciej W. Rozycki
+ */
+#ifndef __ASM_CPU_INFO_H
+#define __ASM_CPU_INFO_H
+
+#include <linux/cache.h>
+#include <linux/types.h>
+
+#include <asm/mipsregs.h>
+
+/*
+ * Descriptor for a cache
+ */
+struct cache_desc {
+ unsigned int waysize; /* Bytes per way */
+ unsigned short sets; /* Number of lines per set */
+ unsigned char ways; /* Number of ways */
+ unsigned char linesz; /* Size of line in bytes */
+ unsigned char waybit; /* Bits to select in a cache set */
+ unsigned char flags; /* Flags describing cache properties */
+};
+
+struct guest_info {
+ unsigned long ases;
+ unsigned long ases_dyn;
+ unsigned long long options;
+ unsigned long long options_dyn;
+ int tlbsize;
+ u8 conf;
+ u8 kscratch_mask;
+};
+
+/*
+ * Flag definitions
+ */
+#define MIPS_CACHE_NOT_PRESENT 0x00000001
+#define MIPS_CACHE_VTAG 0x00000002 /* Virtually tagged cache */
+#define MIPS_CACHE_ALIASES 0x00000004 /* Cache could have aliases */
+#define MIPS_CACHE_IC_F_DC 0x00000008 /* Ic can refill from D-cache */
+#define MIPS_IC_SNOOPS_REMOTE 0x00000010 /* Ic snoops remote stores */
+#define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */
+
+struct cpuinfo_mips {
+ u64 asid_cache;
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+ unsigned long asid_mask;
+#endif
+
+ /*
+ * Capability and feature descriptor structure for MIPS CPU
+ */
+ unsigned long ases;
+ unsigned long long options;
+ unsigned int udelay_val;
+ unsigned int processor_id;
+ unsigned int fpu_id;
+ unsigned int fpu_csr31;
+ unsigned int fpu_msk31;
+ unsigned int msa_id;
+ unsigned int cputype;
+ int isa_level;
+ int tlbsize;
+ int tlbsizevtlb;
+ int tlbsizeftlbsets;
+ int tlbsizeftlbways;
+ struct cache_desc icache; /* Primary I-cache */
+ struct cache_desc dcache; /* Primary D or combined I/D cache */
+ struct cache_desc vcache; /* Victim cache, between pcache and scache */
+ struct cache_desc scache; /* Secondary cache */
+ struct cache_desc tcache; /* Tertiary/split secondary cache */
+ int srsets; /* Shadow register sets */
+ int package;/* physical package number */
+ unsigned int globalnumber;
+#ifdef CONFIG_64BIT
+ int vmbits; /* Virtual memory size in bits */
+#endif
+ void *data; /* Additional data */
+ unsigned int watch_reg_count; /* Number that exist */
+ unsigned int watch_reg_use_cnt; /* Usable by ptrace */
+#define NUM_WATCH_REGS 4
+ u16 watch_reg_masks[NUM_WATCH_REGS];
+ unsigned int kscratch_mask; /* Usable KScratch mask. */
+ /*
+ * Cache Coherency attribute for write-combine memory writes.
+ * (shifted by _CACHE_SHIFT)
+ */
+ unsigned int writecombine;
+ /*
+ * Simple counter to prevent enabling HTW in nested
+ * htw_start/htw_stop calls
+ */
+ unsigned int htw_seq;
+
+ /* VZ & Guest features */
+ struct guest_info guest;
+ unsigned int gtoffset_mask;
+ unsigned int guestid_mask;
+ unsigned int guestid_cache;
+
+#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
+ /* CPUCFG data for this CPU, synthesized at probe time.
+ *
+ * CPUCFG select 0 is PRId, 4 and above are unimplemented for now.
+ * So the only stored values are for CPUCFG selects 1-3 inclusive.
+ */
+ u32 loongson3_cpucfg_data[3];
+#endif
+} __attribute__((aligned(SMP_CACHE_BYTES)));
+
+extern struct cpuinfo_mips cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
+#define boot_cpu_data cpu_data[0]
+
+extern void cpu_probe(void);
+extern void cpu_report(void);
+
+extern const char *__cpu_name[];
+#define cpu_name_string() __cpu_name[raw_smp_processor_id()]
+
+struct seq_file;
+struct notifier_block;
+
+extern int register_proc_cpuinfo_notifier(struct notifier_block *nb);
+extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v);
+
+#define proc_cpuinfo_notifier(fn, pri) \
+({ \
+ static struct notifier_block fn##_nb = { \
+ .notifier_call = fn, \
+ .priority = pri \
+ }; \
+ \
+ register_proc_cpuinfo_notifier(&fn##_nb); \
+})
+
+struct proc_cpuinfo_notifier_args {
+ struct seq_file *m;
+ unsigned long n;
+};
+
+static inline unsigned int cpu_cluster(struct cpuinfo_mips *cpuinfo)
+{
+ /* Optimisation for systems where multiple clusters aren't used */
+ if (!IS_ENABLED(CONFIG_CPU_MIPSR5) && !IS_ENABLED(CONFIG_CPU_MIPSR6))
+ return 0;
+
+ return (cpuinfo->globalnumber & MIPS_GLOBALNUMBER_CLUSTER) >>
+ MIPS_GLOBALNUMBER_CLUSTER_SHF;
+}
+
+static inline unsigned int cpu_core(struct cpuinfo_mips *cpuinfo)
+{
+ return (cpuinfo->globalnumber & MIPS_GLOBALNUMBER_CORE) >>
+ MIPS_GLOBALNUMBER_CORE_SHF;
+}
+
+static inline unsigned int cpu_vpe_id(struct cpuinfo_mips *cpuinfo)
+{
+ /* Optimisation for systems where VP(E)s aren't used */
+ if (!IS_ENABLED(CONFIG_MIPS_MT_SMP) && !IS_ENABLED(CONFIG_CPU_MIPSR6))
+ return 0;
+
+ return (cpuinfo->globalnumber & MIPS_GLOBALNUMBER_VP) >>
+ MIPS_GLOBALNUMBER_VP_SHF;
+}
+
+extern void cpu_set_cluster(struct cpuinfo_mips *cpuinfo, unsigned int cluster);
+extern void cpu_set_core(struct cpuinfo_mips *cpuinfo, unsigned int core);
+extern void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo, unsigned int vpe);
+
+static inline bool cpus_are_siblings(int cpua, int cpub)
+{
+ struct cpuinfo_mips *infoa = &cpu_data[cpua];
+ struct cpuinfo_mips *infob = &cpu_data[cpub];
+ unsigned int gnuma, gnumb;
+
+ if (infoa->package != infob->package)
+ return false;
+
+ gnuma = infoa->globalnumber & ~MIPS_GLOBALNUMBER_VP;
+ gnumb = infob->globalnumber & ~MIPS_GLOBALNUMBER_VP;
+ if (gnuma != gnumb)
+ return false;
+
+ return true;
+}
+
+static inline unsigned long cpu_asid_inc(void)
+{
+ return 1 << CONFIG_MIPS_ASID_SHIFT;
+}
+
+static inline unsigned long cpu_asid_mask(struct cpuinfo_mips *cpuinfo)
+{
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+ return cpuinfo->asid_mask;
+#endif
+ return ((1 << CONFIG_MIPS_ASID_BITS) - 1) << CONFIG_MIPS_ASID_SHIFT;
+}
+
+static inline void set_cpu_asid_mask(struct cpuinfo_mips *cpuinfo,
+ unsigned long asid_mask)
+{
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+ cpuinfo->asid_mask = asid_mask;
+#endif
+}
+
+#endif /* __ASM_CPU_INFO_H */
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
new file mode 100644
index 000000000..3288cef4b
--- /dev/null
+++ b/arch/mips/include/asm/cpu-type.h
@@ -0,0 +1,223 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ */
+#ifndef __ASM_CPU_TYPE_H
+#define __ASM_CPU_TYPE_H
+
+#include <linux/smp.h>
+#include <linux/compiler.h>
+
+static inline int __pure __get_cpu_type(const int cpu_type)
+{
+ switch (cpu_type) {
+#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \
+ defined(CONFIG_SYS_HAS_CPU_LOONGSON2F)
+ case CPU_LOONGSON2EF:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_LOONGSON64
+ case CPU_LOONGSON64:
+#endif
+
+#if defined(CONFIG_SYS_HAS_CPU_LOONGSON1B) || \
+ defined(CONFIG_SYS_HAS_CPU_LOONGSON1C)
+ case CPU_LOONGSON32:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_MIPS32_R1
+ case CPU_4KC:
+ case CPU_ALCHEMY:
+ case CPU_PR4450:
+#endif
+
+#if defined(CONFIG_SYS_HAS_CPU_MIPS32_R1) || \
+ defined(CONFIG_SYS_HAS_CPU_MIPS32_R2)
+ case CPU_4KEC:
+ case CPU_XBURST:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_MIPS32_R2
+ case CPU_4KSC:
+ case CPU_24K:
+ case CPU_34K:
+ case CPU_1004K:
+ case CPU_74K:
+ case CPU_1074K:
+ case CPU_M14KC:
+ case CPU_M14KEC:
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_MIPS32_R5
+ case CPU_M5150:
+ case CPU_P5600:
+#endif
+
+#if defined(CONFIG_SYS_HAS_CPU_MIPS32_R2) || \
+ defined(CONFIG_SYS_HAS_CPU_MIPS32_R5) || \
+ defined(CONFIG_SYS_HAS_CPU_MIPS32_R6) || \
+ defined(CONFIG_SYS_HAS_CPU_MIPS64_R2) || \
+ defined(CONFIG_SYS_HAS_CPU_MIPS64_R5) || \
+ defined(CONFIG_SYS_HAS_CPU_MIPS64_R6)
+ case CPU_QEMU_GENERIC:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
+ case CPU_5KC:
+ case CPU_5KE:
+ case CPU_20KC:
+ case CPU_25KF:
+ case CPU_SB1:
+ case CPU_SB1A:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R2
+ /*
+ * All MIPS64 R2 processors have their own special symbols. That is,
+ * there currently is no pure R2 core
+ */
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_MIPS32_R6
+ case CPU_M6250:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R6
+ case CPU_I6400:
+ case CPU_I6500:
+ case CPU_P6600:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_R3000
+ case CPU_R2000:
+ case CPU_R3000:
+ case CPU_R3000A:
+ case CPU_R3041:
+ case CPU_R3051:
+ case CPU_R3052:
+ case CPU_R3081:
+ case CPU_R3081E:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_TX39XX
+ case CPU_TX3912:
+ case CPU_TX3922:
+ case CPU_TX3927:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_VR41XX
+ case CPU_VR41XX:
+ case CPU_VR4111:
+ case CPU_VR4121:
+ case CPU_VR4122:
+ case CPU_VR4131:
+ case CPU_VR4133:
+ case CPU_VR4181:
+ case CPU_VR4181A:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_R4X00
+ case CPU_R4000PC:
+ case CPU_R4000SC:
+ case CPU_R4000MC:
+ case CPU_R4200:
+ case CPU_R4400PC:
+ case CPU_R4400SC:
+ case CPU_R4400MC:
+ case CPU_R4600:
+ case CPU_R4700:
+ case CPU_R4640:
+ case CPU_R4650:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_TX49XX
+ case CPU_TX49XX:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_R5000
+ case CPU_R5000:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_R5500
+ case CPU_R5500:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_NEVADA
+ case CPU_NEVADA:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_R10000
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_R14000:
+ case CPU_R16000:
+#endif
+#ifdef CONFIG_SYS_HAS_CPU_RM7000
+ case CPU_RM7000:
+ case CPU_SR71000:
+#endif
+#ifdef CONFIG_SYS_HAS_CPU_SB1
+ case CPU_SB1:
+ case CPU_SB1A:
+#endif
+#ifdef CONFIG_SYS_HAS_CPU_CAVIUM_OCTEON
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ case CPU_CAVIUM_OCTEON3:
+#endif
+
+#if defined(CONFIG_SYS_HAS_CPU_BMIPS32_3300) || \
+ defined (CONFIG_SYS_HAS_CPU_MIPS32_R1)
+ case CPU_BMIPS32:
+ case CPU_BMIPS3300:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_BMIPS4350
+ case CPU_BMIPS4350:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_BMIPS4380
+ case CPU_BMIPS4380:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_BMIPS5000
+ case CPU_BMIPS5000:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_XLP
+ case CPU_XLP:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_XLR
+ case CPU_XLR:
+#endif
+ break;
+ default:
+ unreachable();
+ }
+
+ return cpu_type;
+}
+
+static inline int __pure current_cpu_type(void)
+{
+ const int cpu_type = current_cpu_data.cputype;
+
+ return __get_cpu_type(cpu_type);
+}
+
+static inline int __pure boot_cpu_type(void)
+{
+ const int cpu_type = cpu_data[0].cputype;
+
+ return __get_cpu_type(cpu_type);
+}
+
+#endif /* __ASM_CPU_TYPE_H */
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
new file mode 100644
index 000000000..c9222cc22
--- /dev/null
+++ b/arch/mips/include/asm/cpu.h
@@ -0,0 +1,451 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * cpu.h: Values of the PRId register used to match up
+ * various MIPS cpu types.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2004, 2013 Maciej W. Rozycki
+ */
+#ifndef _ASM_CPU_H
+#define _ASM_CPU_H
+
+#include <linux/bits.h>
+
+/*
+ As of the MIPS32 and MIPS64 specs from MTI, the PRId register (CP0
+ register 15, select 0) is defined in this (backwards compatible) way:
+
+ +----------------+----------------+----------------+----------------+
+ | Company Options| Company ID | Processor ID | Revision |
+ +----------------+----------------+----------------+----------------+
+ 31 24 23 16 15 8 7
+
+ I don't have docs for all the previous processors, but my impression is
+ that bits 16-23 have been 0 for all MIPS processors before the MIPS32/64
+ spec.
+*/
+
+#define PRID_OPT_MASK 0xff000000
+
+/*
+ * Assigned Company values for bits 23:16 of the PRId register.
+ */
+
+#define PRID_COMP_MASK 0xff0000
+
+#define PRID_COMP_LEGACY 0x000000
+#define PRID_COMP_MIPS 0x010000
+#define PRID_COMP_BROADCOM 0x020000
+#define PRID_COMP_ALCHEMY 0x030000
+#define PRID_COMP_SIBYTE 0x040000
+#define PRID_COMP_SANDCRAFT 0x050000
+#define PRID_COMP_NXP 0x060000
+#define PRID_COMP_TOSHIBA 0x070000
+#define PRID_COMP_LSI 0x080000
+#define PRID_COMP_LEXRA 0x0b0000
+#define PRID_COMP_NETLOGIC 0x0c0000
+#define PRID_COMP_CAVIUM 0x0d0000
+#define PRID_COMP_LOONGSON 0x140000
+#define PRID_COMP_INGENIC_13 0x130000 /* X2000 */
+#define PRID_COMP_INGENIC_D0 0xd00000 /* JZ4740, JZ4750, X1830 */
+#define PRID_COMP_INGENIC_D1 0xd10000 /* JZ4770, JZ4775, X1000 */
+#define PRID_COMP_INGENIC_E1 0xe10000 /* JZ4780 */
+
+/*
+ * Assigned Processor ID (implementation) values for bits 15:8 of the PRId
+ * register. In order to detect a certain CPU type exactly eventually
+ * additional registers may need to be examined.
+ */
+
+#define PRID_IMP_MASK 0xff00
+
+/*
+ * These are valid when 23:16 == PRID_COMP_LEGACY
+ */
+
+#define PRID_IMP_R2000 0x0100
+#define PRID_IMP_AU1_REV1 0x0100
+#define PRID_IMP_AU1_REV2 0x0200
+#define PRID_IMP_R3000 0x0200 /* Same as R2000A */
+#define PRID_IMP_R6000 0x0300 /* Same as R3000A */
+#define PRID_IMP_R4000 0x0400
+#define PRID_IMP_R6000A 0x0600
+#define PRID_IMP_R10000 0x0900
+#define PRID_IMP_R4300 0x0b00
+#define PRID_IMP_VR41XX 0x0c00
+#define PRID_IMP_R12000 0x0e00
+#define PRID_IMP_R14000 0x0f00 /* R14K && R16K */
+#define PRID_IMP_R8000 0x1000
+#define PRID_IMP_PR4450 0x1200
+#define PRID_IMP_R4600 0x2000
+#define PRID_IMP_R4700 0x2100
+#define PRID_IMP_TX39 0x2200
+#define PRID_IMP_R4640 0x2200
+#define PRID_IMP_R4650 0x2200 /* Same as R4640 */
+#define PRID_IMP_R5000 0x2300
+#define PRID_IMP_TX49 0x2d00
+#define PRID_IMP_SONIC 0x2400
+#define PRID_IMP_MAGIC 0x2500
+#define PRID_IMP_RM7000 0x2700
+#define PRID_IMP_NEVADA 0x2800 /* RM5260 ??? */
+#define PRID_IMP_RM9000 0x3400
+#define PRID_IMP_LOONGSON_32 0x4200 /* Loongson-1 */
+#define PRID_IMP_R5432 0x5400
+#define PRID_IMP_R5500 0x5500
+#define PRID_IMP_LOONGSON_64R 0x6100 /* Reduced Loongson-2 */
+#define PRID_IMP_LOONGSON_64C 0x6300 /* Classic Loongson-2 and Loongson-3 */
+#define PRID_IMP_LOONGSON_64G 0xc000 /* Generic Loongson-2 and Loongson-3 */
+
+#define PRID_IMP_UNKNOWN 0xff00
+
+/*
+ * These are the PRID's for when 23:16 == PRID_COMP_MIPS
+ */
+
+#define PRID_IMP_QEMU_GENERIC 0x0000
+#define PRID_IMP_4KC 0x8000
+#define PRID_IMP_5KC 0x8100
+#define PRID_IMP_20KC 0x8200
+#define PRID_IMP_4KEC 0x8400
+#define PRID_IMP_4KSC 0x8600
+#define PRID_IMP_25KF 0x8800
+#define PRID_IMP_5KE 0x8900
+#define PRID_IMP_4KECR2 0x9000
+#define PRID_IMP_4KEMPR2 0x9100
+#define PRID_IMP_4KSD 0x9200
+#define PRID_IMP_24K 0x9300
+#define PRID_IMP_34K 0x9500
+#define PRID_IMP_24KE 0x9600
+#define PRID_IMP_74K 0x9700
+#define PRID_IMP_1004K 0x9900
+#define PRID_IMP_1074K 0x9a00
+#define PRID_IMP_M14KC 0x9c00
+#define PRID_IMP_M14KEC 0x9e00
+#define PRID_IMP_INTERAPTIV_UP 0xa000
+#define PRID_IMP_INTERAPTIV_MP 0xa100
+#define PRID_IMP_PROAPTIV_UP 0xa200
+#define PRID_IMP_PROAPTIV_MP 0xa300
+#define PRID_IMP_P6600 0xa400
+#define PRID_IMP_M5150 0xa700
+#define PRID_IMP_P5600 0xa800
+#define PRID_IMP_I6400 0xa900
+#define PRID_IMP_M6250 0xab00
+#define PRID_IMP_I6500 0xb000
+
+/*
+ * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
+ */
+
+#define PRID_IMP_SB1 0x0100
+#define PRID_IMP_SB1A 0x1100
+
+/*
+ * These are the PRID's for when 23:16 == PRID_COMP_SANDCRAFT
+ */
+
+#define PRID_IMP_SR71000 0x0400
+
+/*
+ * These are the PRID's for when 23:16 == PRID_COMP_BROADCOM
+ */
+
+#define PRID_IMP_BMIPS32_REV4 0x4000
+#define PRID_IMP_BMIPS32_REV8 0x8000
+#define PRID_IMP_BMIPS3300 0x9000
+#define PRID_IMP_BMIPS3300_ALT 0x9100
+#define PRID_IMP_BMIPS3300_BUG 0x0000
+#define PRID_IMP_BMIPS43XX 0xa000
+#define PRID_IMP_BMIPS5000 0x5a00
+#define PRID_IMP_BMIPS5200 0x5b00
+
+#define PRID_REV_BMIPS4380_LO 0x0040
+#define PRID_REV_BMIPS4380_HI 0x006f
+
+/*
+ * These are the PRID's for when 23:16 == PRID_COMP_CAVIUM
+ */
+
+#define PRID_IMP_CAVIUM_CN38XX 0x0000
+#define PRID_IMP_CAVIUM_CN31XX 0x0100
+#define PRID_IMP_CAVIUM_CN30XX 0x0200
+#define PRID_IMP_CAVIUM_CN58XX 0x0300
+#define PRID_IMP_CAVIUM_CN56XX 0x0400
+#define PRID_IMP_CAVIUM_CN50XX 0x0600
+#define PRID_IMP_CAVIUM_CN52XX 0x0700
+#define PRID_IMP_CAVIUM_CN63XX 0x9000
+#define PRID_IMP_CAVIUM_CN68XX 0x9100
+#define PRID_IMP_CAVIUM_CN66XX 0x9200
+#define PRID_IMP_CAVIUM_CN61XX 0x9300
+#define PRID_IMP_CAVIUM_CNF71XX 0x9400
+#define PRID_IMP_CAVIUM_CN78XX 0x9500
+#define PRID_IMP_CAVIUM_CN70XX 0x9600
+#define PRID_IMP_CAVIUM_CN73XX 0x9700
+#define PRID_IMP_CAVIUM_CNF75XX 0x9800
+
+/*
+ * These are the PRID's for when 23:16 == PRID_COMP_INGENIC_*
+ */
+
+#define PRID_IMP_XBURST_REV1 0x0200 /* XBurst®1 with MXU1.0/MXU1.1 SIMD ISA */
+#define PRID_IMP_XBURST_REV2 0x0100 /* XBurst®1 with MXU2.0 SIMD ISA */
+#define PRID_IMP_XBURST2 0x2000 /* XBurst®2 with MXU2.1 SIMD ISA */
+
+/*
+ * These are the PRID's for when 23:16 == PRID_COMP_NETLOGIC
+ */
+#define PRID_IMP_NETLOGIC_XLR732 0x0000
+#define PRID_IMP_NETLOGIC_XLR716 0x0200
+#define PRID_IMP_NETLOGIC_XLR532 0x0900
+#define PRID_IMP_NETLOGIC_XLR308 0x0600
+#define PRID_IMP_NETLOGIC_XLR532C 0x0800
+#define PRID_IMP_NETLOGIC_XLR516C 0x0a00
+#define PRID_IMP_NETLOGIC_XLR508C 0x0b00
+#define PRID_IMP_NETLOGIC_XLR308C 0x0f00
+#define PRID_IMP_NETLOGIC_XLS608 0x8000
+#define PRID_IMP_NETLOGIC_XLS408 0x8800
+#define PRID_IMP_NETLOGIC_XLS404 0x8c00
+#define PRID_IMP_NETLOGIC_XLS208 0x8e00
+#define PRID_IMP_NETLOGIC_XLS204 0x8f00
+#define PRID_IMP_NETLOGIC_XLS108 0xce00
+#define PRID_IMP_NETLOGIC_XLS104 0xcf00
+#define PRID_IMP_NETLOGIC_XLS616B 0x4000
+#define PRID_IMP_NETLOGIC_XLS608B 0x4a00
+#define PRID_IMP_NETLOGIC_XLS416B 0x4400
+#define PRID_IMP_NETLOGIC_XLS412B 0x4c00
+#define PRID_IMP_NETLOGIC_XLS408B 0x4e00
+#define PRID_IMP_NETLOGIC_XLS404B 0x4f00
+#define PRID_IMP_NETLOGIC_AU13XX 0x8000
+
+#define PRID_IMP_NETLOGIC_XLP8XX 0x1000
+#define PRID_IMP_NETLOGIC_XLP3XX 0x1100
+#define PRID_IMP_NETLOGIC_XLP2XX 0x1200
+#define PRID_IMP_NETLOGIC_XLP9XX 0x1500
+#define PRID_IMP_NETLOGIC_XLP5XX 0x1300
+
+/*
+ * Particular Revision values for bits 7:0 of the PRId register.
+ */
+
+#define PRID_REV_MASK 0x00ff
+
+/*
+ * Definitions for 7:0 on legacy processors
+ */
+
+#define PRID_REV_TX4927 0x0022
+#define PRID_REV_TX4937 0x0030
+#define PRID_REV_R4400 0x0040
+#define PRID_REV_R3000A 0x0030
+#define PRID_REV_R3000 0x0020
+#define PRID_REV_R2000A 0x0010
+#define PRID_REV_TX3912 0x0010
+#define PRID_REV_TX3922 0x0030
+#define PRID_REV_TX3927 0x0040
+#define PRID_REV_VR4111 0x0050
+#define PRID_REV_VR4181 0x0050 /* Same as VR4111 */
+#define PRID_REV_VR4121 0x0060
+#define PRID_REV_VR4122 0x0070
+#define PRID_REV_VR4181A 0x0070 /* Same as VR4122 */
+#define PRID_REV_VR4130 0x0080
+#define PRID_REV_34K_V1_0_2 0x0022
+#define PRID_REV_LOONGSON1B 0x0020
+#define PRID_REV_LOONGSON1C 0x0020 /* Same as Loongson-1B */
+#define PRID_REV_LOONGSON2E 0x0002
+#define PRID_REV_LOONGSON2F 0x0003
+#define PRID_REV_LOONGSON2K_R1_0 0x0000
+#define PRID_REV_LOONGSON2K_R1_1 0x0001
+#define PRID_REV_LOONGSON2K_R1_2 0x0002
+#define PRID_REV_LOONGSON2K_R1_3 0x0003
+#define PRID_REV_LOONGSON3A_R1 0x0005
+#define PRID_REV_LOONGSON3B_R1 0x0006
+#define PRID_REV_LOONGSON3B_R2 0x0007
+#define PRID_REV_LOONGSON3A_R2_0 0x0008
+#define PRID_REV_LOONGSON3A_R3_0 0x0009
+#define PRID_REV_LOONGSON3A_R2_1 0x000c
+#define PRID_REV_LOONGSON3A_R3_1 0x000d
+
+/*
+ * Older processors used to encode processor version and revision in two
+ * 4-bit bitfields, the 4K seems to simply count up and even newer MTI cores
+ * have switched to use the 8-bits as 3:3:2 bitfield with the last field as
+ * the patch number. *ARGH*
+ */
+#define PRID_REV_ENCODE_44(ver, rev) \
+ ((ver) << 4 | (rev))
+#define PRID_REV_ENCODE_332(ver, rev, patch) \
+ ((ver) << 5 | (rev) << 2 | (patch))
+
+/*
+ * FPU implementation/revision register (CP1 control register 0).
+ *
+ * +---------------------------------+----------------+----------------+
+ * | 0 | Implementation | Revision |
+ * +---------------------------------+----------------+----------------+
+ * 31 16 15 8 7 0
+ */
+
+#define FPIR_IMP_MASK 0xff00
+
+#define FPIR_IMP_NONE 0x0000
+
+#if !defined(__ASSEMBLY__)
+
+enum cpu_type_enum {
+ CPU_UNKNOWN,
+
+ /*
+ * R2000 class processors
+ */
+ CPU_R2000, CPU_R3000, CPU_R3000A, CPU_R3041, CPU_R3051, CPU_R3052,
+ CPU_R3081, CPU_R3081E,
+
+ /*
+ * R4000 class processors
+ */
+ CPU_R4000PC, CPU_R4000SC, CPU_R4000MC, CPU_R4200,
+ CPU_R4400PC, CPU_R4400SC, CPU_R4400MC, CPU_R4600, CPU_R4640, CPU_R4650,
+ CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R10000,
+ CPU_R12000, CPU_R14000, CPU_R16000, CPU_VR41XX, CPU_VR4111, CPU_VR4121,
+ CPU_VR4122, CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000,
+ CPU_SR71000, CPU_TX49XX,
+
+ /*
+ * TX3900 class processors
+ */
+ CPU_TX3912, CPU_TX3922, CPU_TX3927,
+
+ /*
+ * MIPS32 class processors
+ */
+ CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
+ CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
+ CPU_BMIPS4380, CPU_BMIPS5000, CPU_XBURST, CPU_LOONGSON32, CPU_M14KC,
+ CPU_M14KEC, CPU_INTERAPTIV, CPU_P5600, CPU_PROAPTIV, CPU_1074K,
+ CPU_M5150, CPU_I6400, CPU_P6600, CPU_M6250,
+
+ /*
+ * MIPS64 class processors
+ */
+ CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2EF,
+ CPU_LOONGSON64, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
+ CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, CPU_I6500,
+
+ CPU_QEMU_GENERIC,
+
+ CPU_LAST
+};
+
+#endif /* !__ASSEMBLY */
+
+/*
+ * ISA Level encodings
+ *
+ */
+#define MIPS_CPU_ISA_II 0x00000001
+#define MIPS_CPU_ISA_III 0x00000002
+#define MIPS_CPU_ISA_IV 0x00000004
+#define MIPS_CPU_ISA_V 0x00000008
+#define MIPS_CPU_ISA_M32R1 0x00000010
+#define MIPS_CPU_ISA_M32R2 0x00000020
+#define MIPS_CPU_ISA_M64R1 0x00000040
+#define MIPS_CPU_ISA_M64R2 0x00000080
+#define MIPS_CPU_ISA_M32R5 0x00000100
+#define MIPS_CPU_ISA_M64R5 0x00000200
+#define MIPS_CPU_ISA_M32R6 0x00000400
+#define MIPS_CPU_ISA_M64R6 0x00000800
+
+#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M32R6)
+#define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
+ MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \
+ MIPS_CPU_ISA_M64R5 | MIPS_CPU_ISA_M64R6)
+
+/*
+ * CPU Option encodings
+ */
+#define MIPS_CPU_TLB BIT_ULL( 0) /* CPU has TLB */
+#define MIPS_CPU_4KEX BIT_ULL( 1) /* "R4K" exception model */
+#define MIPS_CPU_3K_CACHE BIT_ULL( 2) /* R3000-style caches */
+#define MIPS_CPU_4K_CACHE BIT_ULL( 3) /* R4000-style caches */
+#define MIPS_CPU_TX39_CACHE BIT_ULL( 4) /* TX3900-style caches */
+#define MIPS_CPU_FPU BIT_ULL( 5) /* CPU has FPU */
+#define MIPS_CPU_32FPR BIT_ULL( 6) /* 32 dbl. prec. FP registers */
+#define MIPS_CPU_COUNTER BIT_ULL( 7) /* Cycle count/compare */
+#define MIPS_CPU_WATCH BIT_ULL( 8) /* watchpoint registers */
+#define MIPS_CPU_DIVEC BIT_ULL( 9) /* dedicated interrupt vector */
+#define MIPS_CPU_VCE BIT_ULL(10) /* virt. coherence conflict possible */
+#define MIPS_CPU_CACHE_CDEX_P BIT_ULL(11) /* Create_Dirty_Exclusive CACHE op */
+#define MIPS_CPU_CACHE_CDEX_S BIT_ULL(12) /* ... same for seconary cache ... */
+#define MIPS_CPU_MCHECK BIT_ULL(13) /* Machine check exception */
+#define MIPS_CPU_EJTAG BIT_ULL(14) /* EJTAG exception */
+#define MIPS_CPU_NOFPUEX BIT_ULL(15) /* no FPU exception */
+#define MIPS_CPU_LLSC BIT_ULL(16) /* CPU has ll/sc instructions */
+#define MIPS_CPU_INCLUSIVE_CACHES BIT_ULL(17) /* P-cache subset enforced */
+#define MIPS_CPU_PREFETCH BIT_ULL(18) /* CPU has usable prefetch */
+#define MIPS_CPU_VINT BIT_ULL(19) /* CPU supports MIPSR2 vectored interrupts */
+#define MIPS_CPU_VEIC BIT_ULL(20) /* CPU supports MIPSR2 external interrupt controller mode */
+#define MIPS_CPU_ULRI BIT_ULL(21) /* CPU has ULRI feature */
+#define MIPS_CPU_PCI BIT_ULL(22) /* CPU has Perf Ctr Int indicator */
+#define MIPS_CPU_RIXI BIT_ULL(23) /* CPU has TLB Read/eXec Inhibit */
+#define MIPS_CPU_MICROMIPS BIT_ULL(24) /* CPU has microMIPS capability */
+#define MIPS_CPU_TLBINV BIT_ULL(25) /* CPU supports TLBINV/F */
+#define MIPS_CPU_SEGMENTS BIT_ULL(26) /* CPU supports Segmentation Control registers */
+#define MIPS_CPU_EVA BIT_ULL(27) /* CPU supports Enhanced Virtual Addressing */
+#define MIPS_CPU_HTW BIT_ULL(28) /* CPU support Hardware Page Table Walker */
+#define MIPS_CPU_RIXIEX BIT_ULL(29) /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */
+#define MIPS_CPU_MAAR BIT_ULL(30) /* MAAR(I) registers are present */
+#define MIPS_CPU_FRE BIT_ULL(31) /* FRE & UFE bits implemented */
+#define MIPS_CPU_RW_LLB BIT_ULL(32) /* LLADDR/LLB writes are allowed */
+#define MIPS_CPU_LPA BIT_ULL(33) /* CPU supports Large Physical Addressing */
+#define MIPS_CPU_CDMM BIT_ULL(34) /* CPU has Common Device Memory Map */
+#define MIPS_CPU_SP BIT_ULL(36) /* Small (1KB) page support */
+#define MIPS_CPU_FTLB BIT_ULL(37) /* CPU has Fixed-page-size TLB */
+#define MIPS_CPU_NAN_LEGACY BIT_ULL(38) /* Legacy NaN implemented */
+#define MIPS_CPU_NAN_2008 BIT_ULL(39) /* 2008 NaN implemented */
+#define MIPS_CPU_VP BIT_ULL(40) /* MIPSr6 Virtual Processors (multi-threading) */
+#define MIPS_CPU_LDPTE BIT_ULL(41) /* CPU has ldpte/lddir instructions */
+#define MIPS_CPU_MVH BIT_ULL(42) /* CPU supports MFHC0/MTHC0 */
+#define MIPS_CPU_EBASE_WG BIT_ULL(43) /* CPU has EBase.WG */
+#define MIPS_CPU_BADINSTR BIT_ULL(44) /* CPU has BadInstr register */
+#define MIPS_CPU_BADINSTRP BIT_ULL(45) /* CPU has BadInstrP register */
+#define MIPS_CPU_CTXTC BIT_ULL(46) /* CPU has [X]ConfigContext registers */
+#define MIPS_CPU_PERF BIT_ULL(47) /* CPU has MIPS performance counters */
+#define MIPS_CPU_GUESTCTL0EXT BIT_ULL(48) /* CPU has VZ GuestCtl0Ext register */
+#define MIPS_CPU_GUESTCTL1 BIT_ULL(49) /* CPU has VZ GuestCtl1 register */
+#define MIPS_CPU_GUESTCTL2 BIT_ULL(50) /* CPU has VZ GuestCtl2 register */
+#define MIPS_CPU_GUESTID BIT_ULL(51) /* CPU uses VZ ASE GuestID feature */
+#define MIPS_CPU_DRG BIT_ULL(52) /* CPU has VZ Direct Root to Guest (DRG) */
+#define MIPS_CPU_UFR BIT_ULL(53) /* CPU supports User mode FR switching */
+#define MIPS_CPU_SHARED_FTLB_RAM \
+ BIT_ULL(54) /* CPU shares FTLB RAM with another */
+#define MIPS_CPU_SHARED_FTLB_ENTRIES \
+ BIT_ULL(55) /* CPU shares FTLB entries with another */
+#define MIPS_CPU_MT_PER_TC_PERF_COUNTERS \
+ BIT_ULL(56) /* CPU has perf counters implemented per TC (MIPSMT ASE) */
+#define MIPS_CPU_MMID BIT_ULL(57) /* CPU supports MemoryMapIDs */
+#define MIPS_CPU_MM_SYSAD BIT_ULL(58) /* CPU supports write-through SysAD Valid merge */
+#define MIPS_CPU_MM_FULL BIT_ULL(59) /* CPU supports write-through full merge */
+#define MIPS_CPU_MAC_2008_ONLY BIT_ULL(60) /* CPU Only support MAC2008 Fused multiply-add instruction */
+#define MIPS_CPU_FTLBPAREX BIT_ULL(61) /* CPU has FTLB parity exception */
+#define MIPS_CPU_GSEXCEX BIT_ULL(62) /* CPU has GSExc exception */
+
+/*
+ * CPU ASE encodings
+ */
+#define MIPS_ASE_MIPS16 0x00000001 /* code compression */
+#define MIPS_ASE_MDMX 0x00000002 /* MIPS digital media extension */
+#define MIPS_ASE_MIPS3D 0x00000004 /* MIPS-3D */
+#define MIPS_ASE_SMARTMIPS 0x00000008 /* SmartMIPS */
+#define MIPS_ASE_DSP 0x00000010 /* Signal Processing ASE */
+#define MIPS_ASE_MIPSMT 0x00000020 /* CPU supports MIPS MT */
+#define MIPS_ASE_DSP2P 0x00000040 /* Signal Processing ASE Rev 2 */
+#define MIPS_ASE_VZ 0x00000080 /* Virtualization ASE */
+#define MIPS_ASE_MSA 0x00000100 /* MIPS SIMD Architecture */
+#define MIPS_ASE_DSP3 0x00000200 /* Signal Processing ASE Rev 3*/
+#define MIPS_ASE_MIPS16E2 0x00000400 /* MIPS16e2 */
+#define MIPS_ASE_LOONGSON_MMI 0x00000800 /* Loongson MultiMedia extensions Instructions */
+#define MIPS_ASE_LOONGSON_CAM 0x00001000 /* Loongson CAM */
+#define MIPS_ASE_LOONGSON_EXT 0x00002000 /* Loongson EXTensions */
+#define MIPS_ASE_LOONGSON_EXT2 0x00004000 /* Loongson EXTensions R2 */
+
+#endif /* _ASM_CPU_H */
diff --git a/arch/mips/include/asm/cpufeature.h b/arch/mips/include/asm/cpufeature.h
new file mode 100644
index 000000000..ba9e62faf
--- /dev/null
+++ b/arch/mips/include/asm/cpufeature.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * CPU feature definitions for module loading, used by
+ * module_cpu_feature_match(), see uapi/asm/hwcap.h for MIPS CPU features.
+ */
+
+#ifndef __ASM_CPUFEATURE_H
+#define __ASM_CPUFEATURE_H
+
+#include <uapi/asm/hwcap.h>
+#include <asm/elf.h>
+
+#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
+
+#define cpu_feature(x) ilog2(HWCAP_ ## x)
+
+static inline bool cpu_have_feature(unsigned int num)
+{
+ return elf_hwcap & (1UL << num);
+}
+
+#endif /* __ASM_CPUFEATURE_H */
diff --git a/arch/mips/include/asm/debug.h b/arch/mips/include/asm/debug.h
new file mode 100644
index 000000000..c7013e1cb
--- /dev/null
+++ b/arch/mips/include/asm/debug.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ */
+
+#ifndef __MIPS_ASM_DEBUG_H__
+#define __MIPS_ASM_DEBUG_H__
+
+#include <linux/dcache.h>
+
+/*
+ * mips_debugfs_dir corresponds to the "mips" directory at the top level
+ * of the DebugFS hierarchy. MIPS-specific DebugFS entires should be
+ * placed beneath this directory.
+ */
+extern struct dentry *mips_debugfs_dir;
+
+#endif /* __MIPS_ASM_DEBUG_H__ */
diff --git a/arch/mips/include/asm/dec/ecc.h b/arch/mips/include/asm/dec/ecc.h
new file mode 100644
index 000000000..c3a3f71f1
--- /dev/null
+++ b/arch/mips/include/asm/dec/ecc.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/asm-mips/dec/ecc.h
+ *
+ * ECC handling logic definitions common to DECstation/DECsystem
+ * 5000/200 (KN02), 5000/240 (KN03), 5000/260 (KN05) and
+ * DECsystem 5900 (KN03), 5900/260 (KN05) systems.
+ *
+ * Copyright (C) 2003 Maciej W. Rozycki
+ */
+#ifndef __ASM_MIPS_DEC_ECC_H
+#define __ASM_MIPS_DEC_ECC_H
+
+/*
+ * Error Address Register bits.
+ * The register is r/wc -- any write clears it.
+ */
+#define KN0X_EAR_VALID (1<<31) /* error data valid, bus IRQ */
+#define KN0X_EAR_CPU (1<<30) /* CPU/DMA transaction */
+#define KN0X_EAR_WRITE (1<<29) /* write/read transaction */
+#define KN0X_EAR_ECCERR (1<<28) /* ECC/timeout or overrun */
+#define KN0X_EAR_RES_27 (1<<27) /* unused */
+#define KN0X_EAR_ADDRESS (0x7ffffff<<0) /* address involved */
+
+/*
+ * Error Syndrome Register bits.
+ * The register is frozen when EAR.VALID is set, otherwise it records bits
+ * from the last memory read. The register is r/wc -- any write clears it.
+ */
+#define KN0X_ESR_VLDHI (1<<31) /* error data valid hi word */
+#define KN0X_ESR_CHKHI (0x7f<<24) /* check bits read from mem */
+#define KN0X_ESR_SNGHI (1<<23) /* single/double bit error */
+#define KN0X_ESR_SYNHI (0x7f<<16) /* syndrome from ECC logic */
+#define KN0X_ESR_VLDLO (1<<15) /* error data valid lo word */
+#define KN0X_ESR_CHKLO (0x7f<<8) /* check bits read from mem */
+#define KN0X_ESR_SNGLO (1<<7) /* single/double bit error */
+#define KN0X_ESR_SYNLO (0x7f<<0) /* syndrome from ECC logic */
+
+
+#ifndef __ASSEMBLY__
+
+#include <linux/interrupt.h>
+
+struct pt_regs;
+
+extern void dec_ecc_be_init(void);
+extern int dec_ecc_be_handler(struct pt_regs *regs, int is_fixup);
+extern irqreturn_t dec_ecc_be_interrupt(int irq, void *dev_id);
+#endif
+
+#endif /* __ASM_MIPS_DEC_ECC_H */
diff --git a/arch/mips/include/asm/dec/interrupts.h b/arch/mips/include/asm/dec/interrupts.h
new file mode 100644
index 000000000..e10d34106
--- /dev/null
+++ b/arch/mips/include/asm/dec/interrupts.h
@@ -0,0 +1,126 @@
+/*
+ * Miscellaneous definitions used to initialise the interrupt vector table
+ * with the machine-specific interrupt routines.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1997 by Paul M. Antoine.
+ * reworked 1998 by Harald Koerfgen.
+ * Copyright (C) 2001, 2002, 2003 Maciej W. Rozycki
+ */
+
+#ifndef __ASM_DEC_INTERRUPTS_H
+#define __ASM_DEC_INTERRUPTS_H
+
+#include <irq.h>
+#include <asm/mipsregs.h>
+
+
+/*
+ * The list of possible system devices which provide an
+ * interrupt. Not all devices exist on a given system.
+ */
+#define DEC_IRQ_CASCADE 0 /* cascade from CSR or I/O ASIC */
+
+/* Ordinary interrupts */
+#define DEC_IRQ_AB_RECV 1 /* ACCESS.bus receive */
+#define DEC_IRQ_AB_XMIT 2 /* ACCESS.bus transmit */
+#define DEC_IRQ_DZ11 3 /* DZ11 (DC7085) serial */
+#define DEC_IRQ_ASC 4 /* ASC (NCR53C94) SCSI */
+#define DEC_IRQ_FLOPPY 5 /* 82077 FDC */
+#define DEC_IRQ_FPU 6 /* R3k FPU */
+#define DEC_IRQ_HALT 7 /* HALT button or from ACCESS.Bus */
+#define DEC_IRQ_ISDN 8 /* Am79C30A ISDN */
+#define DEC_IRQ_LANCE 9 /* LANCE (Am7990) Ethernet */
+#define DEC_IRQ_BUS 10 /* memory, I/O bus read/write errors */
+#define DEC_IRQ_PSU 11 /* power supply unit warning */
+#define DEC_IRQ_RTC 12 /* DS1287 RTC */
+#define DEC_IRQ_SCC0 13 /* SCC (Z85C30) serial #0 */
+#define DEC_IRQ_SCC1 14 /* SCC (Z85C30) serial #1 */
+#define DEC_IRQ_SII 15 /* SII (DC7061) SCSI */
+#define DEC_IRQ_TC0 16 /* TURBOchannel slot #0 */
+#define DEC_IRQ_TC1 17 /* TURBOchannel slot #1 */
+#define DEC_IRQ_TC2 18 /* TURBOchannel slot #2 */
+#define DEC_IRQ_TIMER 19 /* ARC periodic timer */
+#define DEC_IRQ_VIDEO 20 /* framebuffer */
+
+/* I/O ASIC DMA interrupts */
+#define DEC_IRQ_ASC_MERR 21 /* ASC memory read error */
+#define DEC_IRQ_ASC_ERR 22 /* ASC page overrun */
+#define DEC_IRQ_ASC_DMA 23 /* ASC buffer pointer loaded */
+#define DEC_IRQ_FLOPPY_ERR 24 /* FDC error */
+#define DEC_IRQ_ISDN_ERR 25 /* ISDN memory read/overrun error */
+#define DEC_IRQ_ISDN_RXDMA 26 /* ISDN recv buffer pointer loaded */
+#define DEC_IRQ_ISDN_TXDMA 27 /* ISDN xmit buffer pointer loaded */
+#define DEC_IRQ_LANCE_MERR 28 /* LANCE memory read error */
+#define DEC_IRQ_SCC0A_RXERR 29 /* SCC0A (printer) receive overrun */
+#define DEC_IRQ_SCC0A_RXDMA 30 /* SCC0A receive half page */
+#define DEC_IRQ_SCC0A_TXERR 31 /* SCC0A xmit memory read/overrun */
+#define DEC_IRQ_SCC0A_TXDMA 32 /* SCC0A transmit page end */
+#define DEC_IRQ_AB_RXERR 33 /* ACCESS.bus receive overrun */
+#define DEC_IRQ_AB_RXDMA 34 /* ACCESS.bus receive half page */
+#define DEC_IRQ_AB_TXERR 35 /* ACCESS.bus xmit memory read/ovrn */
+#define DEC_IRQ_AB_TXDMA 36 /* ACCESS.bus transmit page end */
+#define DEC_IRQ_SCC1A_RXERR 37 /* SCC1A (modem) receive overrun */
+#define DEC_IRQ_SCC1A_RXDMA 38 /* SCC1A receive half page */
+#define DEC_IRQ_SCC1A_TXERR 39 /* SCC1A xmit memory read/overrun */
+#define DEC_IRQ_SCC1A_TXDMA 40 /* SCC1A transmit page end */
+
+/* TC5 & TC6 are virtual slots for KN02's onboard devices */
+#define DEC_IRQ_TC5 DEC_IRQ_ASC /* virtual PMAZ-AA */
+#define DEC_IRQ_TC6 DEC_IRQ_LANCE /* virtual PMAD-AA */
+
+#define DEC_NR_INTS 41
+
+
+/* Largest of cpu mask_nr tables. */
+#define DEC_MAX_CPU_INTS 6
+/* Largest of asic mask_nr tables. */
+#define DEC_MAX_ASIC_INTS 9
+
+
+/*
+ * CPU interrupt bits common to all systems.
+ */
+#define DEC_CPU_INR_FPU 7 /* R3k FPU */
+#define DEC_CPU_INR_SW1 1 /* software #1 */
+#define DEC_CPU_INR_SW0 0 /* software #0 */
+
+#define DEC_CPU_IRQ_BASE MIPS_CPU_IRQ_BASE /* first IRQ assigned to CPU */
+
+#define DEC_CPU_IRQ_NR(n) ((n) + DEC_CPU_IRQ_BASE)
+#define DEC_CPU_IRQ_MASK(n) (1 << ((n) + CAUSEB_IP))
+#define DEC_CPU_IRQ_ALL (0xff << CAUSEB_IP)
+
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Interrupt table structures to hide differences between systems.
+ */
+typedef union { int i; void *p; } int_ptr;
+extern int dec_interrupt[DEC_NR_INTS];
+extern int_ptr cpu_mask_nr_tbl[DEC_MAX_CPU_INTS][2];
+extern int_ptr asic_mask_nr_tbl[DEC_MAX_ASIC_INTS][2];
+extern int cpu_fpu_mask;
+
+
+/*
+ * Common interrupt routine prototypes for all DECStations
+ */
+extern void kn02_io_int(void);
+extern void kn02xa_io_int(void);
+extern void kn03_io_int(void);
+extern void asic_dma_int(void);
+extern void asic_all_int(void);
+extern void kn02_all_int(void);
+extern void cpu_all_int(void);
+
+extern void dec_intr_unimplemented(void);
+extern void asic_intr_unimplemented(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/mips/include/asm/dec/ioasic.h b/arch/mips/include/asm/dec/ioasic.h
new file mode 100644
index 000000000..6d912f095
--- /dev/null
+++ b/arch/mips/include/asm/dec/ioasic.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/asm-mips/dec/ioasic.h
+ *
+ * DEC I/O ASIC access operations.
+ *
+ * Copyright (C) 2000, 2002, 2003 Maciej W. Rozycki
+ */
+
+#ifndef __ASM_DEC_IOASIC_H
+#define __ASM_DEC_IOASIC_H
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+extern spinlock_t ioasic_ssr_lock;
+
+extern volatile u32 *ioasic_base;
+
+static inline void ioasic_write(unsigned int reg, u32 v)
+{
+ ioasic_base[reg / 4] = v;
+}
+
+static inline u32 ioasic_read(unsigned int reg)
+{
+ return ioasic_base[reg / 4];
+}
+
+extern void init_ioasic_irqs(int base);
+
+extern int dec_ioasic_clocksource_init(void);
+
+#endif /* __ASM_DEC_IOASIC_H */
diff --git a/arch/mips/include/asm/dec/ioasic_addrs.h b/arch/mips/include/asm/dec/ioasic_addrs.h
new file mode 100644
index 000000000..8bd95971f
--- /dev/null
+++ b/arch/mips/include/asm/dec/ioasic_addrs.h
@@ -0,0 +1,152 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Definitions for the address map in the JUNKIO Asic
+ *
+ * Created with Information from:
+ *
+ * "DEC 3000 300/400/500/600/700/800/900 AXP Models System Programmer's Manual"
+ *
+ * and the Mach Sources
+ *
+ * Copyright (C) 199x the Anonymous
+ * Copyright (C) 2002, 2003 Maciej W. Rozycki
+ */
+
+#ifndef __ASM_MIPS_DEC_IOASIC_ADDRS_H
+#define __ASM_MIPS_DEC_IOASIC_ADDRS_H
+
+#define IOASIC_SLOT_SIZE 0x00040000
+
+/*
+ * Address ranges decoded by the I/O ASIC for onboard devices.
+ */
+#define IOASIC_SYS_ROM (0*IOASIC_SLOT_SIZE) /* system board ROM */
+#define IOASIC_IOCTL (1*IOASIC_SLOT_SIZE) /* I/O ASIC */
+#define IOASIC_ESAR (2*IOASIC_SLOT_SIZE) /* LANCE MAC address chip */
+#define IOASIC_LANCE (3*IOASIC_SLOT_SIZE) /* LANCE Ethernet */
+#define IOASIC_SCC0 (4*IOASIC_SLOT_SIZE) /* SCC #0 */
+#define IOASIC_VDAC_HI (5*IOASIC_SLOT_SIZE) /* VDAC (maxine) */
+#define IOASIC_SCC1 (6*IOASIC_SLOT_SIZE) /* SCC #1 (3min, 3max+) */
+#define IOASIC_VDAC_LO (7*IOASIC_SLOT_SIZE) /* VDAC (maxine) */
+#define IOASIC_TOY (8*IOASIC_SLOT_SIZE) /* RTC */
+#define IOASIC_ISDN (9*IOASIC_SLOT_SIZE) /* ISDN (maxine) */
+#define IOASIC_ERRADDR (9*IOASIC_SLOT_SIZE) /* bus error address (3max+) */
+#define IOASIC_CHKSYN (10*IOASIC_SLOT_SIZE) /* ECC syndrome (3max+) */
+#define IOASIC_ACC_BUS (10*IOASIC_SLOT_SIZE) /* ACCESS.bus (maxine) */
+#define IOASIC_MCR (11*IOASIC_SLOT_SIZE) /* memory control (3max+) */
+#define IOASIC_FLOPPY (11*IOASIC_SLOT_SIZE) /* FDC (maxine) */
+#define IOASIC_SCSI (12*IOASIC_SLOT_SIZE) /* ASC SCSI */
+#define IOASIC_FDC_DMA (13*IOASIC_SLOT_SIZE) /* FDC DMA (maxine) */
+#define IOASIC_SCSI_DMA (14*IOASIC_SLOT_SIZE) /* ??? */
+#define IOASIC_RES_15 (15*IOASIC_SLOT_SIZE) /* unused? */
+
+
+/*
+ * Offsets for I/O ASIC registers
+ * (relative to (dec_kn_slot_base + IOASIC_IOCTL)).
+ */
+ /* all systems */
+#define IO_REG_SCSI_DMA_P 0x00 /* SCSI DMA Pointer */
+#define IO_REG_SCSI_DMA_BP 0x10 /* SCSI DMA Buffer Pointer */
+#define IO_REG_LANCE_DMA_P 0x20 /* LANCE DMA Pointer */
+#define IO_REG_SCC0A_T_DMA_P 0x30 /* SCC0A Transmit DMA Pointer */
+#define IO_REG_SCC0A_R_DMA_P 0x40 /* SCC0A Receive DMA Pointer */
+
+ /* except Maxine */
+#define IO_REG_SCC1A_T_DMA_P 0x50 /* SCC1A Transmit DMA Pointer */
+#define IO_REG_SCC1A_R_DMA_P 0x60 /* SCC1A Receive DMA Pointer */
+
+ /* Maxine */
+#define IO_REG_AB_T_DMA_P 0x50 /* ACCESS.bus Transmit DMA Pointer */
+#define IO_REG_AB_R_DMA_P 0x60 /* ACCESS.bus Receive DMA Pointer */
+#define IO_REG_FLOPPY_DMA_P 0x70 /* Floppy DMA Pointer */
+#define IO_REG_ISDN_T_DMA_P 0x80 /* ISDN Transmit DMA Pointer */
+#define IO_REG_ISDN_T_DMA_BP 0x90 /* ISDN Transmit DMA Buffer Pointer */
+#define IO_REG_ISDN_R_DMA_P 0xa0 /* ISDN Receive DMA Pointer */
+#define IO_REG_ISDN_R_DMA_BP 0xb0 /* ISDN Receive DMA Buffer Pointer */
+
+ /* all systems */
+#define IO_REG_DATA_0 0xc0 /* System Data Buffer 0 */
+#define IO_REG_DATA_1 0xd0 /* System Data Buffer 1 */
+#define IO_REG_DATA_2 0xe0 /* System Data Buffer 2 */
+#define IO_REG_DATA_3 0xf0 /* System Data Buffer 3 */
+
+ /* all systems */
+#define IO_REG_SSR 0x100 /* System Support Register */
+#define IO_REG_SIR 0x110 /* System Interrupt Register */
+#define IO_REG_SIMR 0x120 /* System Interrupt Mask Reg. */
+#define IO_REG_SAR 0x130 /* System Address Register */
+
+ /* Maxine */
+#define IO_REG_ISDN_T_DATA 0x140 /* ISDN Xmit Data Register */
+#define IO_REG_ISDN_R_DATA 0x150 /* ISDN Receive Data Register */
+
+ /* all systems */
+#define IO_REG_LANCE_SLOT 0x160 /* LANCE I/O Slot Register */
+#define IO_REG_SCSI_SLOT 0x170 /* SCSI Slot Register */
+#define IO_REG_SCC0A_SLOT 0x180 /* SCC0A DMA Slot Register */
+
+ /* except Maxine */
+#define IO_REG_SCC1A_SLOT 0x190 /* SCC1A DMA Slot Register */
+
+ /* Maxine */
+#define IO_REG_AB_SLOT 0x190 /* ACCESS.bus DMA Slot Register */
+#define IO_REG_FLOPPY_SLOT 0x1a0 /* Floppy Slot Register */
+
+ /* all systems */
+#define IO_REG_SCSI_SCR 0x1b0 /* SCSI Partial-Word DMA Control */
+#define IO_REG_SCSI_SDR0 0x1c0 /* SCSI DMA Partial Word 0 */
+#define IO_REG_SCSI_SDR1 0x1d0 /* SCSI DMA Partial Word 1 */
+#define IO_REG_FCTR 0x1e0 /* Free-Running Counter */
+#define IO_REG_RES_31 0x1f0 /* unused */
+
+
+/*
+ * The upper 16 bits of the System Support Register are a part of the
+ * I/O ASIC's internal DMA engine and thus are common to all I/O ASIC
+ * machines. The exception is the Maxine, which makes use of the
+ * FLOPPY and ISDN bits (otherwise unused) and has a different SCC
+ * wiring.
+ */
+ /* all systems */
+#define IO_SSR_SCC0A_TX_DMA_EN (1<<31) /* SCC0A transmit DMA enable */
+#define IO_SSR_SCC0A_RX_DMA_EN (1<<30) /* SCC0A receive DMA enable */
+#define IO_SSR_RES_27 (1<<27) /* unused */
+#define IO_SSR_RES_26 (1<<26) /* unused */
+#define IO_SSR_RES_25 (1<<25) /* unused */
+#define IO_SSR_RES_24 (1<<24) /* unused */
+#define IO_SSR_RES_23 (1<<23) /* unused */
+#define IO_SSR_SCSI_DMA_DIR (1<<18) /* SCSI DMA direction */
+#define IO_SSR_SCSI_DMA_EN (1<<17) /* SCSI DMA enable */
+#define IO_SSR_LANCE_DMA_EN (1<<16) /* LANCE DMA enable */
+
+ /* except Maxine */
+#define IO_SSR_SCC1A_TX_DMA_EN (1<<29) /* SCC1A transmit DMA enable */
+#define IO_SSR_SCC1A_RX_DMA_EN (1<<28) /* SCC1A receive DMA enable */
+#define IO_SSR_RES_22 (1<<22) /* unused */
+#define IO_SSR_RES_21 (1<<21) /* unused */
+#define IO_SSR_RES_20 (1<<20) /* unused */
+#define IO_SSR_RES_19 (1<<19) /* unused */
+
+ /* Maxine */
+#define IO_SSR_AB_TX_DMA_EN (1<<29) /* ACCESS.bus xmit DMA enable */
+#define IO_SSR_AB_RX_DMA_EN (1<<28) /* ACCESS.bus recv DMA enable */
+#define IO_SSR_FLOPPY_DMA_DIR (1<<22) /* Floppy DMA direction */
+#define IO_SSR_FLOPPY_DMA_EN (1<<21) /* Floppy DMA enable */
+#define IO_SSR_ISDN_TX_DMA_EN (1<<20) /* ISDN transmit DMA enable */
+#define IO_SSR_ISDN_RX_DMA_EN (1<<19) /* ISDN receive DMA enable */
+
+/*
+ * The lower 16 bits are system-specific. Bits 15,11:8 are common and
+ * defined here. The rest is defined in system-specific headers.
+ */
+#define KN0X_IO_SSR_DIAGDN (1<<15) /* diagnostic jumper */
+#define KN0X_IO_SSR_SCC_RST (1<<11) /* ~SCC0,1 (Z85C30) reset */
+#define KN0X_IO_SSR_RTC_RST (1<<10) /* ~RTC (DS1287) reset */
+#define KN0X_IO_SSR_ASC_RST (1<<9) /* ~ASC (NCR53C94) reset */
+#define KN0X_IO_SSR_LANCE_RST (1<<8) /* ~LANCE (Am7990) reset */
+
+#endif /* __ASM_MIPS_DEC_IOASIC_ADDRS_H */
diff --git a/arch/mips/include/asm/dec/ioasic_ints.h b/arch/mips/include/asm/dec/ioasic_ints.h
new file mode 100644
index 000000000..9aaa98696
--- /dev/null
+++ b/arch/mips/include/asm/dec/ioasic_ints.h
@@ -0,0 +1,74 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Definitions for the interrupt related bits in the I/O ASIC
+ * interrupt status register (and the interrupt mask register, of course)
+ *
+ * Created with Information from:
+ *
+ * "DEC 3000 300/400/500/600/700/800/900 AXP Models System Programmer's Manual"
+ *
+ * and the Mach Sources
+ *
+ * Copyright (C) 199x the Anonymous
+ * Copyright (C) 2002 Maciej W. Rozycki
+ */
+
+#ifndef __ASM_DEC_IOASIC_INTS_H
+#define __ASM_DEC_IOASIC_INTS_H
+
+/*
+ * The upper 16 bits are a part of the I/O ASIC's internal DMA engine
+ * and thus are common to all I/O ASIC machines. The exception is
+ * the Maxine, which makes use of the FLOPPY and ISDN bits (otherwise
+ * unused) and has a different SCC wiring.
+ */
+ /* all systems */
+#define IO_INR_SCC0A_TXDMA 31 /* SCC0A transmit page end */
+#define IO_INR_SCC0A_TXERR 30 /* SCC0A transmit memory read error */
+#define IO_INR_SCC0A_RXDMA 29 /* SCC0A receive half page */
+#define IO_INR_SCC0A_RXERR 28 /* SCC0A receive overrun */
+#define IO_INR_ASC_DMA 19 /* ASC buffer pointer loaded */
+#define IO_INR_ASC_ERR 18 /* ASC page overrun */
+#define IO_INR_ASC_MERR 17 /* ASC memory read error */
+#define IO_INR_LANCE_MERR 16 /* LANCE memory read error */
+
+ /* except Maxine */
+#define IO_INR_SCC1A_TXDMA 27 /* SCC1A transmit page end */
+#define IO_INR_SCC1A_TXERR 26 /* SCC1A transmit memory read error */
+#define IO_INR_SCC1A_RXDMA 25 /* SCC1A receive half page */
+#define IO_INR_SCC1A_RXERR 24 /* SCC1A receive overrun */
+#define IO_INR_RES_23 23 /* unused */
+#define IO_INR_RES_22 22 /* unused */
+#define IO_INR_RES_21 21 /* unused */
+#define IO_INR_RES_20 20 /* unused */
+
+ /* Maxine */
+#define IO_INR_AB_TXDMA 27 /* ACCESS.bus transmit page end */
+#define IO_INR_AB_TXERR 26 /* ACCESS.bus xmit memory read error */
+#define IO_INR_AB_RXDMA 25 /* ACCESS.bus receive half page */
+#define IO_INR_AB_RXERR 24 /* ACCESS.bus receive overrun */
+#define IO_INR_FLOPPY_ERR 23 /* FDC error */
+#define IO_INR_ISDN_TXDMA 22 /* ISDN xmit buffer pointer loaded */
+#define IO_INR_ISDN_RXDMA 21 /* ISDN recv buffer pointer loaded */
+#define IO_INR_ISDN_ERR 20 /* ISDN memory read/overrun error */
+
+#define IO_INR_DMA 16 /* first DMA IRQ */
+
+/*
+ * The lower 16 bits are system-specific and thus defined in
+ * system-specific headers.
+ */
+
+
+#define IO_IRQ_BASE 8 /* first IRQ assigned to I/O ASIC */
+#define IO_IRQ_LINES 32 /* number of I/O ASIC interrupts */
+
+#define IO_IRQ_NR(n) ((n) + IO_IRQ_BASE)
+#define IO_IRQ_MASK(n) (1 << (n))
+#define IO_IRQ_ALL 0x0000ffff
+#define IO_IRQ_DMA 0xffff0000
+
+#endif /* __ASM_DEC_IOASIC_INTS_H */
diff --git a/arch/mips/include/asm/dec/kn01.h b/arch/mips/include/asm/dec/kn01.h
new file mode 100644
index 000000000..88d9ffd74
--- /dev/null
+++ b/arch/mips/include/asm/dec/kn01.h
@@ -0,0 +1,89 @@
+/*
+ * Hardware info about DECstation DS2100/3100 systems (otherwise known as
+ * pmin/pmax or KN01).
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995,1996 by Paul M. Antoine, some code and definitions
+ * are by courtesy of Chris Fraser.
+ * Copyright (C) 2002, 2003, 2005 Maciej W. Rozycki
+ */
+#ifndef __ASM_MIPS_DEC_KN01_H
+#define __ASM_MIPS_DEC_KN01_H
+
+#define KN01_SLOT_BASE 0x10000000
+#define KN01_SLOT_SIZE 0x01000000
+
+/*
+ * Address ranges for devices.
+ */
+#define KN01_PMASK (0*KN01_SLOT_SIZE) /* color plane mask */
+#define KN01_PCC (1*KN01_SLOT_SIZE) /* PCC (DC503) cursor */
+#define KN01_VDAC (2*KN01_SLOT_SIZE) /* color map */
+#define KN01_RES_3 (3*KN01_SLOT_SIZE) /* unused */
+#define KN01_RES_4 (4*KN01_SLOT_SIZE) /* unused */
+#define KN01_RES_5 (5*KN01_SLOT_SIZE) /* unused */
+#define KN01_RES_6 (6*KN01_SLOT_SIZE) /* unused */
+#define KN01_ERRADDR (7*KN01_SLOT_SIZE) /* write error address */
+#define KN01_LANCE (8*KN01_SLOT_SIZE) /* LANCE (Am7990) Ethernet */
+#define KN01_LANCE_MEM (9*KN01_SLOT_SIZE) /* LANCE buffer memory */
+#define KN01_SII (10*KN01_SLOT_SIZE) /* SII (DC7061) SCSI */
+#define KN01_SII_MEM (11*KN01_SLOT_SIZE) /* SII buffer memory */
+#define KN01_DZ11 (12*KN01_SLOT_SIZE) /* DZ11 (DC7085) serial */
+#define KN01_RTC (13*KN01_SLOT_SIZE) /* DS1287 RTC (bytes #0) */
+#define KN01_ESAR (13*KN01_SLOT_SIZE) /* MAC address (bytes #1) */
+#define KN01_CSR (14*KN01_SLOT_SIZE) /* system ctrl & status reg */
+#define KN01_SYS_ROM (15*KN01_SLOT_SIZE) /* system board ROM */
+
+
+/*
+ * Frame buffer memory address.
+ */
+#define KN01_VFB_MEM 0x0fc00000
+
+/*
+ * CPU interrupt bits.
+ */
+#define KN01_CPU_INR_BUS 6 /* memory, I/O bus read/write errors */
+#define KN01_CPU_INR_VIDEO 6 /* PCC area detect #2 */
+#define KN01_CPU_INR_RTC 5 /* DS1287 RTC */
+#define KN01_CPU_INR_DZ11 4 /* DZ11 (DC7085) serial */
+#define KN01_CPU_INR_LANCE 3 /* LANCE (Am7990) Ethernet */
+#define KN01_CPU_INR_SII 2 /* SII (DC7061) SCSI */
+
+
+/*
+ * System Control & Status Register bits.
+ */
+#define KN01_CSR_MNFMOD (1<<15) /* MNFMOD manufacturing jumper */
+#define KN01_CSR_STATUS (1<<14) /* self-test result status output */
+#define KN01_CSR_PARDIS (1<<13) /* parity error disable */
+#define KN01_CSR_CRSRTST (1<<12) /* PCC test output */
+#define KN01_CSR_MONO (1<<11) /* mono/color fb SIMM installed */
+#define KN01_CSR_MEMERR (1<<10) /* write timeout error status & ack*/
+#define KN01_CSR_VINT (1<<9) /* PCC area detect #2 status & ack */
+#define KN01_CSR_TXDIS (1<<8) /* DZ11 transmit disable */
+#define KN01_CSR_VBGTRG (1<<2) /* blue DAC voltage over green (r/o) */
+#define KN01_CSR_VRGTRG (1<<1) /* red DAC voltage over green (r/o) */
+#define KN01_CSR_VRGTRB (1<<0) /* red DAC voltage over blue (r/o) */
+#define KN01_CSR_LEDS (0xff<<0) /* ~diagnostic LEDs (w/o) */
+
+
+#ifndef __ASSEMBLY__
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+struct pt_regs;
+
+extern u16 cached_kn01_csr;
+
+extern void dec_kn01_be_init(void);
+extern int dec_kn01_be_handler(struct pt_regs *regs, int is_fixup);
+extern irqreturn_t dec_kn01_be_interrupt(int irq, void *dev_id);
+#endif
+
+#endif /* __ASM_MIPS_DEC_KN01_H */
diff --git a/arch/mips/include/asm/dec/kn02.h b/arch/mips/include/asm/dec/kn02.h
new file mode 100644
index 000000000..93430b5f4
--- /dev/null
+++ b/arch/mips/include/asm/dec/kn02.h
@@ -0,0 +1,91 @@
+/*
+ * Hardware info about DECstation 5000/200 systems (otherwise known as
+ * 3max or KN02).
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995,1996 by Paul M. Antoine, some code and definitions
+ * are by courtesy of Chris Fraser.
+ * Copyright (C) 2002, 2003, 2005 Maciej W. Rozycki
+ */
+#ifndef __ASM_MIPS_DEC_KN02_H
+#define __ASM_MIPS_DEC_KN02_H
+
+#define KN02_SLOT_BASE 0x1fc00000
+#define KN02_SLOT_SIZE 0x00080000
+
+/*
+ * Address ranges decoded by the "system slot" logic for onboard devices.
+ */
+#define KN02_SYS_ROM (0*KN02_SLOT_SIZE) /* system board ROM */
+#define KN02_RES_1 (1*KN02_SLOT_SIZE) /* unused */
+#define KN02_CHKSYN (2*KN02_SLOT_SIZE) /* ECC syndrome */
+#define KN02_ERRADDR (3*KN02_SLOT_SIZE) /* bus error address */
+#define KN02_DZ11 (4*KN02_SLOT_SIZE) /* DZ11 (DC7085) serial */
+#define KN02_RTC (5*KN02_SLOT_SIZE) /* DS1287 RTC */
+#define KN02_CSR (6*KN02_SLOT_SIZE) /* system ctrl & status reg */
+#define KN02_SYS_ROM_7 (7*KN02_SLOT_SIZE) /* system board ROM (alias) */
+
+
+/*
+ * System Control & Status Register bits.
+ */
+#define KN02_CSR_RES_28 (0xf<<28) /* unused */
+#define KN02_CSR_PSU (1<<27) /* power supply unit warning */
+#define KN02_CSR_NVRAM (1<<26) /* ~NVRAM clear jumper */
+#define KN02_CSR_REFEVEN (1<<25) /* mem refresh bank toggle */
+#define KN02_CSR_NRMOD (1<<24) /* ~NRMOD manufact. jumper */
+#define KN02_CSR_IOINTEN (0xff<<16) /* IRQ mask bits */
+#define KN02_CSR_DIAGCHK (1<<15) /* diagn/norml ECC reads */
+#define KN02_CSR_DIAGGEN (1<<14) /* diagn/norml ECC writes */
+#define KN02_CSR_CORRECT (1<<13) /* ECC correct/check */
+#define KN02_CSR_LEDIAG (1<<12) /* ECC diagn. latch strobe */
+#define KN02_CSR_TXDIS (1<<11) /* DZ11 transmit disable */
+#define KN02_CSR_BNK32M (1<<10) /* 32M/8M stride */
+#define KN02_CSR_DIAGDN (1<<9) /* DIAGDN manufact. jumper */
+#define KN02_CSR_BAUD38 (1<<8) /* DZ11 38/19kbps ext. rate */
+#define KN02_CSR_IOINT (0xff<<0) /* IRQ status bits (r/o) */
+#define KN02_CSR_LEDS (0xff<<0) /* ~diagnostic LEDs (w/o) */
+
+
+/*
+ * CPU interrupt bits.
+ */
+#define KN02_CPU_INR_RES_6 6 /* unused */
+#define KN02_CPU_INR_BUS 5 /* memory, I/O bus read/write errors */
+#define KN02_CPU_INR_RES_4 4 /* unused */
+#define KN02_CPU_INR_RTC 3 /* DS1287 RTC */
+#define KN02_CPU_INR_CASCADE 2 /* CSR cascade */
+
+/*
+ * CSR interrupt bits.
+ */
+#define KN02_CSR_INR_DZ11 7 /* DZ11 (DC7085) serial */
+#define KN02_CSR_INR_LANCE 6 /* LANCE (Am7990) Ethernet */
+#define KN02_CSR_INR_ASC 5 /* ASC (NCR53C94) SCSI */
+#define KN02_CSR_INR_RES_4 4 /* unused */
+#define KN02_CSR_INR_RES_3 3 /* unused */
+#define KN02_CSR_INR_TC2 2 /* TURBOchannel slot #2 */
+#define KN02_CSR_INR_TC1 1 /* TURBOchannel slot #1 */
+#define KN02_CSR_INR_TC0 0 /* TURBOchannel slot #0 */
+
+
+#define KN02_IRQ_BASE 8 /* first IRQ assigned to CSR */
+#define KN02_IRQ_LINES 8 /* number of CSR interrupts */
+
+#define KN02_IRQ_NR(n) ((n) + KN02_IRQ_BASE)
+#define KN02_IRQ_MASK(n) (1 << (n))
+#define KN02_IRQ_ALL 0xff
+
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+extern u32 cached_kn02_csr;
+extern void init_kn02_irqs(int base);
+#endif
+
+#endif /* __ASM_MIPS_DEC_KN02_H */
diff --git a/arch/mips/include/asm/dec/kn02ba.h b/arch/mips/include/asm/dec/kn02ba.h
new file mode 100644
index 000000000..81a6cc1c5
--- /dev/null
+++ b/arch/mips/include/asm/dec/kn02ba.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/asm-mips/dec/kn02ba.h
+ *
+ * DECstation 5000/1xx (3min or KN02-BA) definitions.
+ *
+ * Copyright (C) 2002, 2003 Maciej W. Rozycki
+ */
+#ifndef __ASM_MIPS_DEC_KN02BA_H
+#define __ASM_MIPS_DEC_KN02BA_H
+
+#include <asm/dec/kn02xa.h> /* For common definitions. */
+
+/*
+ * CPU interrupt bits.
+ */
+#define KN02BA_CPU_INR_HALT 6 /* HALT button */
+#define KN02BA_CPU_INR_CASCADE 5 /* I/O ASIC cascade */
+#define KN02BA_CPU_INR_TC2 4 /* TURBOchannel slot #2 */
+#define KN02BA_CPU_INR_TC1 3 /* TURBOchannel slot #1 */
+#define KN02BA_CPU_INR_TC0 2 /* TURBOchannel slot #0 */
+
+/*
+ * I/O ASIC interrupt bits. Star marks denote non-IRQ status bits.
+ */
+#define KN02BA_IO_INR_RES_15 15 /* unused */
+#define KN02BA_IO_INR_NVRAM 14 /* (*) NVRAM clear jumper */
+#define KN02BA_IO_INR_RES_13 13 /* unused */
+#define KN02BA_IO_INR_BUS 12 /* memory, I/O bus read/write errors */
+#define KN02BA_IO_INR_RES_11 11 /* unused */
+#define KN02BA_IO_INR_NRMOD 10 /* (*) NRMOD manufacturing jumper */
+#define KN02BA_IO_INR_ASC 9 /* ASC (NCR53C94) SCSI */
+#define KN02BA_IO_INR_LANCE 8 /* LANCE (Am7990) Ethernet */
+#define KN02BA_IO_INR_SCC1 7 /* SCC (Z85C30) serial #1 */
+#define KN02BA_IO_INR_SCC0 6 /* SCC (Z85C30) serial #0 */
+#define KN02BA_IO_INR_RTC 5 /* DS1287 RTC */
+#define KN02BA_IO_INR_PSU 4 /* power supply unit warning */
+#define KN02BA_IO_INR_RES_3 3 /* unused */
+#define KN02BA_IO_INR_ASC_DATA 2 /* SCSI data ready (for PIO) */
+#define KN02BA_IO_INR_PBNC 1 /* ~HALT button debouncer */
+#define KN02BA_IO_INR_PBNO 0 /* HALT button debouncer */
+
+
+/*
+ * Memory Error Register bits.
+ */
+#define KN02BA_MER_RES_27 (1<<27) /* unused */
+
+/*
+ * Memory Size Register bits.
+ */
+#define KN02BA_MSR_RES_17 (0x3ff<<17) /* unused */
+
+/*
+ * I/O ASIC System Support Register bits.
+ */
+#define KN02BA_IO_SSR_TXDIS1 (1<<14) /* SCC1 transmit disable */
+#define KN02BA_IO_SSR_TXDIS0 (1<<13) /* SCC0 transmit disable */
+#define KN02BA_IO_SSR_RES_12 (1<<12) /* unused */
+
+#define KN02BA_IO_SSR_LEDS (0xff<<0) /* ~diagnostic LEDs */
+
+#endif /* __ASM_MIPS_DEC_KN02BA_H */
diff --git a/arch/mips/include/asm/dec/kn02ca.h b/arch/mips/include/asm/dec/kn02ca.h
new file mode 100644
index 000000000..a466101eb
--- /dev/null
+++ b/arch/mips/include/asm/dec/kn02ca.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/asm-mips/dec/kn02ca.h
+ *
+ * Personal DECstation 5000/xx (Maxine or KN02-CA) definitions.
+ *
+ * Copyright (C) 2002, 2003 Maciej W. Rozycki
+ */
+#ifndef __ASM_MIPS_DEC_KN02CA_H
+#define __ASM_MIPS_DEC_KN02CA_H
+
+#include <asm/dec/kn02xa.h> /* For common definitions. */
+
+/*
+ * CPU interrupt bits.
+ */
+#define KN02CA_CPU_INR_HALT 6 /* HALT from ACCESS.Bus */
+#define KN02CA_CPU_INR_CASCADE 5 /* I/O ASIC cascade */
+#define KN02CA_CPU_INR_BUS 4 /* memory, I/O bus read/write errors */
+#define KN02CA_CPU_INR_RTC 3 /* DS1287 RTC */
+#define KN02CA_CPU_INR_TIMER 2 /* ARC periodic timer */
+
+/*
+ * I/O ASIC interrupt bits. Star marks denote non-IRQ status bits.
+ */
+#define KN02CA_IO_INR_FLOPPY 15 /* 82077 FDC */
+#define KN02CA_IO_INR_NVRAM 14 /* (*) NVRAM clear jumper */
+#define KN02CA_IO_INR_POWERON 13 /* (*) ACCESS.Bus/power-on reset */
+#define KN02CA_IO_INR_TC0 12 /* TURBOchannel slot #0 */
+#define KN02CA_IO_INR_TIMER 12 /* ARC periodic timer (?) */
+#define KN02CA_IO_INR_ISDN 11 /* Am79C30A ISDN */
+#define KN02CA_IO_INR_NRMOD 10 /* (*) NRMOD manufacturing jumper */
+#define KN02CA_IO_INR_ASC 9 /* ASC (NCR53C94) SCSI */
+#define KN02CA_IO_INR_LANCE 8 /* LANCE (Am7990) Ethernet */
+#define KN02CA_IO_INR_HDFLOPPY 7 /* (*) HD (1.44MB) floppy status */
+#define KN02CA_IO_INR_SCC0 6 /* SCC (Z85C30) serial #0 */
+#define KN02CA_IO_INR_TC1 5 /* TURBOchannel slot #1 */
+#define KN02CA_IO_INR_XDFLOPPY 4 /* (*) XD (2.88MB) floppy status */
+#define KN02CA_IO_INR_VIDEO 3 /* framebuffer */
+#define KN02CA_IO_INR_XVIDEO 2 /* ~framebuffer */
+#define KN02CA_IO_INR_AB_XMIT 1 /* ACCESS.bus transmit */
+#define KN02CA_IO_INR_AB_RECV 0 /* ACCESS.bus receive */
+
+
+/*
+ * Memory Error Register bits.
+ */
+#define KN02CA_MER_INTR (1<<27) /* ARC IRQ status & ack */
+
+/*
+ * Memory Size Register bits.
+ */
+#define KN02CA_MSR_INTREN (1<<26) /* ARC periodic IRQ enable */
+#define KN02CA_MSR_MS10EN (1<<25) /* 10/1ms IRQ period select */
+#define KN02CA_MSR_PFORCE (0xf<<21) /* byte lane error force */
+#define KN02CA_MSR_MABEN (1<<20) /* A side VFB address enable */
+#define KN02CA_MSR_LASTBANK (0x7<<17) /* onboard RAM bank # */
+
+/*
+ * I/O ASIC System Support Register bits.
+ */
+#define KN03CA_IO_SSR_RES_14 (1<<14) /* unused */
+#define KN03CA_IO_SSR_RES_13 (1<<13) /* unused */
+#define KN03CA_IO_SSR_ISDN_RST (1<<12) /* ~ISDN (Am79C30A) reset */
+
+#define KN03CA_IO_SSR_FLOPPY_RST (1<<7) /* ~FDC (82077) reset */
+#define KN03CA_IO_SSR_VIDEO_RST (1<<6) /* ~framebuffer reset */
+#define KN03CA_IO_SSR_AB_RST (1<<5) /* ACCESS.bus reset */
+#define KN03CA_IO_SSR_RES_4 (1<<4) /* unused */
+#define KN03CA_IO_SSR_RES_3 (1<<4) /* unused */
+#define KN03CA_IO_SSR_RES_2 (1<<2) /* unused */
+#define KN03CA_IO_SSR_RES_1 (1<<1) /* unused */
+#define KN03CA_IO_SSR_LED (1<<0) /* power LED */
+
+#endif /* __ASM_MIPS_DEC_KN02CA_H */
diff --git a/arch/mips/include/asm/dec/kn02xa.h b/arch/mips/include/asm/dec/kn02xa.h
new file mode 100644
index 000000000..b56b4577f
--- /dev/null
+++ b/arch/mips/include/asm/dec/kn02xa.h
@@ -0,0 +1,84 @@
+/*
+ * Hardware info common to DECstation 5000/1xx systems (otherwise
+ * known as 3min or kn02ba) and Personal DECstations 5000/xx ones
+ * (otherwise known as maxine or kn02ca).
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995,1996 by Paul M. Antoine, some code and definitions
+ * are by courtesy of Chris Fraser.
+ * Copyright (C) 2000, 2002, 2003, 2005 Maciej W. Rozycki
+ *
+ * These are addresses which have to be known early in the boot process.
+ * For other addresses refer to tc.h, ioasic_addrs.h and friends.
+ */
+#ifndef __ASM_MIPS_DEC_KN02XA_H
+#define __ASM_MIPS_DEC_KN02XA_H
+
+#include <asm/dec/ioasic_addrs.h>
+
+#define KN02XA_SLOT_BASE 0x1c000000
+
+/*
+ * Memory control ASIC registers.
+ */
+#define KN02XA_MER 0x0c400000 /* memory error register */
+#define KN02XA_MSR 0x0c800000 /* memory size register */
+
+/*
+ * CPU control ASIC registers.
+ */
+#define KN02XA_MEM_CONF 0x0e000000 /* write timeout config */
+#define KN02XA_EAR 0x0e000004 /* error address register */
+#define KN02XA_BOOT0 0x0e000008 /* boot 0 register */
+#define KN02XA_MEM_INTR 0x0e00000c /* write err IRQ stat & ack */
+
+/*
+ * Memory Error Register bits, common definitions.
+ * The rest is defined in system-specific headers.
+ */
+#define KN02XA_MER_RES_28 (0xf<<28) /* unused */
+#define KN02XA_MER_RES_17 (0x3ff<<17) /* unused */
+#define KN02XA_MER_PAGERR (1<<16) /* 2k page boundary error */
+#define KN02XA_MER_TRANSERR (1<<15) /* transfer length error */
+#define KN02XA_MER_PARDIS (1<<14) /* parity error disable */
+#define KN02XA_MER_SIZE (1<<13) /* r/o mirror of MSR_SIZE */
+#define KN02XA_MER_RES_12 (1<<12) /* unused */
+#define KN02XA_MER_BYTERR (0xf<<8) /* byte lane error bitmask: */
+#define KN02XA_MER_BYTERR_3 (0x8<<8) /* byte lane #3 */
+#define KN02XA_MER_BYTERR_2 (0x4<<8) /* byte lane #2 */
+#define KN02XA_MER_BYTERR_1 (0x2<<8) /* byte lane #1 */
+#define KN02XA_MER_BYTERR_0 (0x1<<8) /* byte lane #0 */
+#define KN02XA_MER_RES_0 (0xff<<0) /* unused */
+
+/*
+ * Memory Size Register bits, common definitions.
+ * The rest is defined in system-specific headers.
+ */
+#define KN02XA_MSR_RES_27 (0x1f<<27) /* unused */
+#define KN02XA_MSR_RES_14 (0x7<<14) /* unused */
+#define KN02XA_MSR_SIZE (1<<13) /* 16M/4M stride */
+#define KN02XA_MSR_RES_0 (0x1fff<<0) /* unused */
+
+/*
+ * Error Address Register bits.
+ */
+#define KN02XA_EAR_RES_29 (0x7<<29) /* unused */
+#define KN02XA_EAR_ADDRESS (0x7ffffff<<2) /* address involved */
+#define KN02XA_EAR_RES_0 (0x3<<0) /* unused */
+
+
+#ifndef __ASSEMBLY__
+
+#include <linux/interrupt.h>
+
+struct pt_regs;
+
+extern void dec_kn02xa_be_init(void);
+extern int dec_kn02xa_be_handler(struct pt_regs *regs, int is_fixup);
+extern irqreturn_t dec_kn02xa_be_interrupt(int irq, void *dev_id);
+#endif
+
+#endif /* __ASM_MIPS_DEC_KN02XA_H */
diff --git a/arch/mips/include/asm/dec/kn03.h b/arch/mips/include/asm/dec/kn03.h
new file mode 100644
index 000000000..edede923f
--- /dev/null
+++ b/arch/mips/include/asm/dec/kn03.h
@@ -0,0 +1,74 @@
+/*
+ * Hardware info about DECstation 5000/2x0 systems (otherwise known as
+ * 3max+) and DECsystem 5900 systems (otherwise known as bigmax) which
+ * differ mechanically but are otherwise identical (both are known as
+ * KN03).
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995,1996 by Paul M. Antoine, some code and definitions
+ * are by courtesy of Chris Fraser.
+ * Copyright (C) 2000, 2002, 2003, 2005 Maciej W. Rozycki
+ */
+#ifndef __ASM_MIPS_DEC_KN03_H
+#define __ASM_MIPS_DEC_KN03_H
+
+#include <asm/dec/ecc.h>
+#include <asm/dec/ioasic_addrs.h>
+
+#define KN03_SLOT_BASE 0x1f800000
+
+/*
+ * CPU interrupt bits.
+ */
+#define KN03_CPU_INR_HALT 6 /* HALT button */
+#define KN03_CPU_INR_BUS 5 /* memory, I/O bus read/write errors */
+#define KN03_CPU_INR_RES_4 4 /* unused */
+#define KN03_CPU_INR_RTC 3 /* DS1287 RTC */
+#define KN03_CPU_INR_CASCADE 2 /* I/O ASIC cascade */
+
+/*
+ * I/O ASIC interrupt bits. Star marks denote non-IRQ status bits.
+ */
+#define KN03_IO_INR_3MAXP 15 /* (*) 3max+/bigmax ID */
+#define KN03_IO_INR_NVRAM 14 /* (*) NVRAM clear jumper */
+#define KN03_IO_INR_TC2 13 /* TURBOchannel slot #2 */
+#define KN03_IO_INR_TC1 12 /* TURBOchannel slot #1 */
+#define KN03_IO_INR_TC0 11 /* TURBOchannel slot #0 */
+#define KN03_IO_INR_NRMOD 10 /* (*) NRMOD manufacturing jumper */
+#define KN03_IO_INR_ASC 9 /* ASC (NCR53C94) SCSI */
+#define KN03_IO_INR_LANCE 8 /* LANCE (Am7990) Ethernet */
+#define KN03_IO_INR_SCC1 7 /* SCC (Z85C30) serial #1 */
+#define KN03_IO_INR_SCC0 6 /* SCC (Z85C30) serial #0 */
+#define KN03_IO_INR_RTC 5 /* DS1287 RTC */
+#define KN03_IO_INR_PSU 4 /* power supply unit warning */
+#define KN03_IO_INR_RES_3 3 /* unused */
+#define KN03_IO_INR_ASC_DATA 2 /* SCSI data ready (for PIO) */
+#define KN03_IO_INR_PBNC 1 /* ~HALT button debouncer */
+#define KN03_IO_INR_PBNO 0 /* HALT button debouncer */
+
+
+/*
+ * Memory Control Register bits.
+ */
+#define KN03_MCR_RES_16 (0xffff<<16) /* unused */
+#define KN03_MCR_DIAGCHK (1<<15) /* diagn/norml ECC reads */
+#define KN03_MCR_DIAGGEN (1<<14) /* diagn/norml ECC writes */
+#define KN03_MCR_CORRECT (1<<13) /* ECC correct/check */
+#define KN03_MCR_RES_11 (0x3<<12) /* unused */
+#define KN03_MCR_BNK32M (1<<10) /* 32M/8M stride */
+#define KN03_MCR_RES_7 (0x7<<7) /* unused */
+#define KN03_MCR_CHECK (0x7f<<0) /* diagnostic check bits */
+
+/*
+ * I/O ASIC System Support Register bits.
+ */
+#define KN03_IO_SSR_TXDIS1 (1<<14) /* SCC1 transmit disable */
+#define KN03_IO_SSR_TXDIS0 (1<<13) /* SCC0 transmit disable */
+#define KN03_IO_SSR_RES_12 (1<<12) /* unused */
+
+#define KN03_IO_SSR_LEDS (0xff<<0) /* ~diagnostic LEDs */
+
+#endif /* __ASM_MIPS_DEC_KN03_H */
diff --git a/arch/mips/include/asm/dec/kn05.h b/arch/mips/include/asm/dec/kn05.h
new file mode 100644
index 000000000..3b1524e9f
--- /dev/null
+++ b/arch/mips/include/asm/dec/kn05.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/asm-mips/dec/kn05.h
+ *
+ * DECstation/DECsystem 5000/260 (4max+ or KN05), 5000/150 (4min
+ * or KN04-BA), Personal DECstation/DECsystem 5000/50 (4maxine or
+ * KN04-CA) and DECsystem 5900/260 (KN05) R4k CPU card MB ASIC
+ * definitions.
+ *
+ * Copyright (C) 2002, 2003, 2005, 2008 Maciej W. Rozycki
+ *
+ * WARNING! All this information is pure guesswork based on the
+ * ROM. It is provided here in hope it will give someone some
+ * food for thought. No documentation for the KN05 nor the KN04
+ * module has been located so far.
+ */
+#ifndef __ASM_MIPS_DEC_KN05_H
+#define __ASM_MIPS_DEC_KN05_H
+
+#include <asm/dec/ioasic_addrs.h>
+
+/*
+ * The oncard MB (Memory Buffer) ASIC provides an additional address
+ * decoder. Certain address ranges within the "high" 16 slots are
+ * passed to the I/O ASIC's decoder like with the KN03 or KN02-BA/CA.
+ * Others are handled locally. "Low" slots are always passed.
+ */
+#define KN4K_SLOT_BASE 0x1fc00000
+
+#define KN4K_MB_ROM (0*IOASIC_SLOT_SIZE) /* KN05/KN04 card ROM */
+#define KN4K_IOCTL (1*IOASIC_SLOT_SIZE) /* I/O ASIC */
+#define KN4K_ESAR (2*IOASIC_SLOT_SIZE) /* LANCE MAC address chip */
+#define KN4K_LANCE (3*IOASIC_SLOT_SIZE) /* LANCE Ethernet */
+#define KN4K_MB_INT (4*IOASIC_SLOT_SIZE) /* MB interrupt register */
+#define KN4K_MB_EA (5*IOASIC_SLOT_SIZE) /* MB error address? */
+#define KN4K_MB_EC (6*IOASIC_SLOT_SIZE) /* MB error ??? */
+#define KN4K_MB_CSR (7*IOASIC_SLOT_SIZE) /* MB control & status */
+#define KN4K_RES_08 (8*IOASIC_SLOT_SIZE) /* unused? */
+#define KN4K_RES_09 (9*IOASIC_SLOT_SIZE) /* unused? */
+#define KN4K_RES_10 (10*IOASIC_SLOT_SIZE) /* unused? */
+#define KN4K_RES_11 (11*IOASIC_SLOT_SIZE) /* unused? */
+#define KN4K_SCSI (12*IOASIC_SLOT_SIZE) /* ASC SCSI */
+#define KN4K_RES_13 (13*IOASIC_SLOT_SIZE) /* unused? */
+#define KN4K_RES_14 (14*IOASIC_SLOT_SIZE) /* unused? */
+#define KN4K_RES_15 (15*IOASIC_SLOT_SIZE) /* unused? */
+
+/*
+ * MB ASIC interrupt bits.
+ */
+#define KN4K_MB_INR_MB 4 /* ??? */
+#define KN4K_MB_INR_MT 3 /* memory, I/O bus read/write errors */
+#define KN4K_MB_INR_RES_2 2 /* unused */
+#define KN4K_MB_INR_RTC 1 /* RTC */
+#define KN4K_MB_INR_TC 0 /* I/O ASIC cascade */
+
+/*
+ * Bits for the MB interrupt register.
+ * The register appears read-only.
+ */
+#define KN4K_MB_INT_IRQ (0x1f<<0) /* CPU Int[4:0] status. */
+#define KN4K_MB_INT_IRQ_N(n) (1<<(n)) /* Individual status bits. */
+
+/*
+ * Bits for the MB control & status register.
+ * Set to 0x00bf8001 for KN05 and to 0x003f8000 for KN04 by the firmware.
+ */
+#define KN4K_MB_CSR_PF (1<<0) /* PreFetching enable? */
+#define KN4K_MB_CSR_F (1<<1) /* ??? */
+#define KN4K_MB_CSR_ECC (0xff<<2) /* ??? */
+#define KN4K_MB_CSR_OD (1<<10) /* ??? */
+#define KN4K_MB_CSR_CP (1<<11) /* ??? */
+#define KN4K_MB_CSR_UNC (1<<12) /* ??? */
+#define KN4K_MB_CSR_IM (1<<13) /* ??? */
+#define KN4K_MB_CSR_NC (1<<14) /* ??? */
+#define KN4K_MB_CSR_EE (1<<15) /* (bus) Exception Enable? */
+#define KN4K_MB_CSR_MSK (0x1f<<16) /* CPU Int[4:0] mask */
+#define KN4K_MB_CSR_MSK_N(n) (1<<((n)+16)) /* Individual mask bits. */
+#define KN4K_MB_CSR_FW (1<<21) /* ??? */
+#define KN4K_MB_CSR_W (1<<31) /* ??? */
+
+#endif /* __ASM_MIPS_DEC_KN05_H */
diff --git a/arch/mips/include/asm/dec/kn230.h b/arch/mips/include/asm/dec/kn230.h
new file mode 100644
index 000000000..cb13a7799
--- /dev/null
+++ b/arch/mips/include/asm/dec/kn230.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/asm-mips/dec/kn230.h
+ *
+ * DECsystem 5100 (MIPSmate or KN230) definitions.
+ *
+ * Copyright (C) 2002, 2003 Maciej W. Rozycki
+ */
+#ifndef __ASM_MIPS_DEC_KN230_H
+#define __ASM_MIPS_DEC_KN230_H
+
+/*
+ * CPU interrupt bits.
+ */
+#define KN230_CPU_INR_HALT 6 /* HALT button */
+#define KN230_CPU_INR_BUS 5 /* memory, I/O bus read/write errors */
+#define KN230_CPU_INR_RTC 4 /* DS1287 RTC */
+#define KN230_CPU_INR_SII 3 /* SII (DC7061) SCSI */
+#define KN230_CPU_INR_LANCE 3 /* LANCE (Am7990) Ethernet */
+#define KN230_CPU_INR_DZ11 2 /* DZ11 (DC7085) serial */
+
+#endif /* __ASM_MIPS_DEC_KN230_H */
diff --git a/arch/mips/include/asm/dec/machtype.h b/arch/mips/include/asm/dec/machtype.h
new file mode 100644
index 000000000..a6ecdebc4
--- /dev/null
+++ b/arch/mips/include/asm/dec/machtype.h
@@ -0,0 +1,27 @@
+/*
+ * Various machine type macros
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1998, 2000 Harald Koerfgen
+ */
+
+#ifndef __ASM_DEC_MACHTYPE_H
+#define __ASM_DEC_MACHTYPE_H
+
+#include <asm/bootinfo.h>
+
+#define TURBOCHANNEL (mips_machtype == MACH_DS5000_200 || \
+ mips_machtype == MACH_DS5000_1XX || \
+ mips_machtype == MACH_DS5000_XX || \
+ mips_machtype == MACH_DS5000_2X0 || \
+ mips_machtype == MACH_DS5900)
+
+#define IOASIC (mips_machtype == MACH_DS5000_1XX || \
+ mips_machtype == MACH_DS5000_XX || \
+ mips_machtype == MACH_DS5000_2X0 || \
+ mips_machtype == MACH_DS5900)
+
+#endif
diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h
new file mode 100644
index 000000000..908e96e3a
--- /dev/null
+++ b/arch/mips/include/asm/dec/prom.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/asm-mips/dec/prom.h
+ *
+ * DECstation PROM interface.
+ *
+ * Copyright (C) 2002 Maciej W. Rozycki
+ *
+ * Based on arch/mips/dec/prom/prom.h by the Anonymous.
+ */
+#ifndef _ASM_DEC_PROM_H
+#define _ASM_DEC_PROM_H
+
+#include <linux/types.h>
+
+#include <asm/addrspace.h>
+
+/*
+ * PMAX/3MAX PROM entry points for DS2100/3100's and DS5000/2xx's.
+ * Many of these will work for MIPSen as well!
+ */
+#define VEC_RESET (u64 *)CKSEG1ADDR(0x1fc00000)
+ /* Prom base address */
+
+#define PMAX_PROM_ENTRY(x) (VEC_RESET + (x)) /* Prom jump table */
+
+#define PMAX_PROM_HALT PMAX_PROM_ENTRY(2) /* valid on MIPSen */
+#define PMAX_PROM_AUTOBOOT PMAX_PROM_ENTRY(5) /* valid on MIPSen */
+#define PMAX_PROM_OPEN PMAX_PROM_ENTRY(6)
+#define PMAX_PROM_READ PMAX_PROM_ENTRY(7)
+#define PMAX_PROM_CLOSE PMAX_PROM_ENTRY(10)
+#define PMAX_PROM_LSEEK PMAX_PROM_ENTRY(11)
+#define PMAX_PROM_GETCHAR PMAX_PROM_ENTRY(12)
+#define PMAX_PROM_PUTCHAR PMAX_PROM_ENTRY(13) /* 12 on MIPSen */
+#define PMAX_PROM_GETS PMAX_PROM_ENTRY(15)
+#define PMAX_PROM_PRINTF PMAX_PROM_ENTRY(17)
+#define PMAX_PROM_GETENV PMAX_PROM_ENTRY(33) /* valid on MIPSen */
+
+
+/*
+ * Magic number indicating REX PROM available on DECstation. Found in
+ * register a2 on transfer of control to program from PROM.
+ */
+#define REX_PROM_MAGIC 0x30464354
+
+/* KN04 and KN05 are REX PROMs, so only do the check for R3k systems. */
+static inline bool prom_is_rex(u32 magic)
+{
+ return !IS_ENABLED(CONFIG_CPU_R3000) || magic == REX_PROM_MAGIC;
+}
+
+/*
+ * 3MIN/MAXINE PROM entry points for DS5000/1xx's, DS5000/xx's and
+ * DS5000/2x0.
+ */
+#define REX_PROM_GETBITMAP 0x84/4 /* get mem bitmap */
+#define REX_PROM_GETCHAR 0x24/4 /* getch() */
+#define REX_PROM_GETENV 0x64/4 /* get env. variable */
+#define REX_PROM_GETSYSID 0x80/4 /* get system id */
+#define REX_PROM_GETTCINFO 0xa4/4
+#define REX_PROM_PRINTF 0x30/4 /* printf() */
+#define REX_PROM_SLOTADDR 0x6c/4 /* slotaddr */
+#define REX_PROM_BOOTINIT 0x54/4 /* open() */
+#define REX_PROM_BOOTREAD 0x58/4 /* read() */
+#define REX_PROM_CLEARCACHE 0x7c/4
+
+
+/*
+ * Used by rex_getbitmap().
+ */
+typedef struct {
+ int pagesize;
+ unsigned char bitmap[];
+} memmap;
+
+
+/*
+ * Function pointers as read from a PROM's callback vector.
+ */
+extern int (*__rex_bootinit)(void);
+extern int (*__rex_bootread)(void);
+extern int (*__rex_getbitmap)(memmap *);
+extern unsigned long *(*__rex_slot_address)(int);
+extern void *(*__rex_gettcinfo)(void);
+extern int (*__rex_getsysid)(void);
+extern void (*__rex_clear_cache)(void);
+
+extern int (*__prom_getchar)(void);
+extern char *(*__prom_getenv)(char *);
+extern int (*__prom_printf)(char *, ...);
+
+extern int (*__pmax_open)(char*, int);
+extern int (*__pmax_lseek)(int, long, int);
+extern int (*__pmax_read)(int, void *, int);
+extern int (*__pmax_close)(int);
+
+
+#ifdef CONFIG_64BIT
+
+/*
+ * On MIPS64 we have to call PROM functions via a helper
+ * dispatcher to accommodate ABI incompatibilities.
+ */
+#define __DEC_PROM_O32(fun, arg) fun arg __asm__(#fun); \
+ __asm__(#fun " = call_o32")
+
+int __DEC_PROM_O32(_rex_bootinit, (int (*)(void), void *));
+int __DEC_PROM_O32(_rex_bootread, (int (*)(void), void *));
+int __DEC_PROM_O32(_rex_getbitmap, (int (*)(memmap *), void *, memmap *));
+unsigned long *__DEC_PROM_O32(_rex_slot_address,
+ (unsigned long *(*)(int), void *, int));
+void *__DEC_PROM_O32(_rex_gettcinfo, (void *(*)(void), void *));
+int __DEC_PROM_O32(_rex_getsysid, (int (*)(void), void *));
+void __DEC_PROM_O32(_rex_clear_cache, (void (*)(void), void *));
+
+int __DEC_PROM_O32(_prom_getchar, (int (*)(void), void *));
+char *__DEC_PROM_O32(_prom_getenv, (char *(*)(char *), void *, char *));
+int __DEC_PROM_O32(_prom_printf, (int (*)(char *, ...), void *, char *, ...));
+
+
+#define rex_bootinit() _rex_bootinit(__rex_bootinit, NULL)
+#define rex_bootread() _rex_bootread(__rex_bootread, NULL)
+#define rex_getbitmap(x) _rex_getbitmap(__rex_getbitmap, NULL, x)
+#define rex_slot_address(x) _rex_slot_address(__rex_slot_address, NULL, x)
+#define rex_gettcinfo() _rex_gettcinfo(__rex_gettcinfo, NULL)
+#define rex_getsysid() _rex_getsysid(__rex_getsysid, NULL)
+#define rex_clear_cache() _rex_clear_cache(__rex_clear_cache, NULL)
+
+#define prom_getchar() _prom_getchar(__prom_getchar, NULL)
+#define prom_getenv(x) _prom_getenv(__prom_getenv, NULL, x)
+#define prom_printf(x...) _prom_printf(__prom_printf, NULL, x)
+
+#else /* !CONFIG_64BIT */
+
+/*
+ * On plain MIPS we just call PROM functions directly.
+ */
+#define rex_bootinit __rex_bootinit
+#define rex_bootread __rex_bootread
+#define rex_getbitmap __rex_getbitmap
+#define rex_slot_address __rex_slot_address
+#define rex_gettcinfo __rex_gettcinfo
+#define rex_getsysid __rex_getsysid
+#define rex_clear_cache __rex_clear_cache
+
+#define prom_getchar __prom_getchar
+#define prom_getenv __prom_getenv
+#define prom_printf __prom_printf
+
+#define pmax_open __pmax_open
+#define pmax_lseek __pmax_lseek
+#define pmax_read __pmax_read
+#define pmax_close __pmax_close
+
+#endif /* !CONFIG_64BIT */
+
+
+extern void prom_meminit(u32);
+extern void prom_identify_arch(u32);
+extern void prom_init_cmdline(s32, s32 *, u32);
+
+extern void register_prom_console(void);
+extern void unregister_prom_console(void);
+
+#endif /* _ASM_DEC_PROM_H */
diff --git a/arch/mips/include/asm/dec/system.h b/arch/mips/include/asm/dec/system.h
new file mode 100644
index 000000000..d0873fd4e
--- /dev/null
+++ b/arch/mips/include/asm/dec/system.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/asm-mips/dec/system.h
+ *
+ * Generic DECstation/DECsystem bits.
+ *
+ * Copyright (C) 2005, 2006 Maciej W. Rozycki
+ */
+#ifndef __ASM_DEC_SYSTEM_H
+#define __ASM_DEC_SYSTEM_H
+
+extern unsigned long dec_kn_slot_base, dec_kn_slot_size;
+extern int dec_tc_bus;
+
+#endif /* __ASM_DEC_SYSTEM_H */
diff --git a/arch/mips/include/asm/delay.h b/arch/mips/include/asm/delay.h
new file mode 100644
index 000000000..dc0a5f77a
--- /dev/null
+++ b/arch/mips/include/asm/delay.h
@@ -0,0 +1,32 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 by Waldorf Electronics
+ * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2007 Maciej W. Rozycki
+ */
+#ifndef _ASM_DELAY_H
+#define _ASM_DELAY_H
+
+#include <linux/param.h>
+
+extern void __delay(unsigned long loops);
+extern void __ndelay(unsigned long ns);
+extern void __udelay(unsigned long us);
+
+#define ndelay(ns) __ndelay(ns)
+#define udelay(us) __udelay(us)
+
+/* make sure "usecs *= ..." in udelay do not overflow. */
+#if HZ >= 1000
+#define MAX_UDELAY_MS 1
+#elif HZ <= 200
+#define MAX_UDELAY_MS 5
+#else
+#define MAX_UDELAY_MS (1000 / HZ)
+#endif
+
+#endif /* _ASM_DELAY_H */
diff --git a/arch/mips/include/asm/div64.h b/arch/mips/include/asm/div64.h
new file mode 100644
index 000000000..ceece76fc
--- /dev/null
+++ b/arch/mips/include/asm/div64.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2000, 2004, 2021 Maciej W. Rozycki
+ * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_DIV64_H
+#define __ASM_DIV64_H
+
+#include <asm/bitsperlong.h>
+
+#if BITS_PER_LONG == 32
+
+/*
+ * No traps on overflows for any of these...
+ */
+
+#define do_div64_32(res, high, low, base) ({ \
+ unsigned long __cf, __tmp, __tmp2, __i; \
+ unsigned long __quot32, __mod32; \
+ \
+ __asm__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set noreorder \n" \
+ " move %2, $0 \n" \
+ " move %3, $0 \n" \
+ " b 1f \n" \
+ " li %4, 0x21 \n" \
+ "0: \n" \
+ " sll $1, %0, 0x1 \n" \
+ " srl %3, %0, 0x1f \n" \
+ " or %0, $1, %5 \n" \
+ " sll %1, %1, 0x1 \n" \
+ " sll %2, %2, 0x1 \n" \
+ "1: \n" \
+ " bnez %3, 2f \n" \
+ " sltu %5, %0, %z6 \n" \
+ " bnez %5, 3f \n" \
+ "2: \n" \
+ " addiu %4, %4, -1 \n" \
+ " subu %0, %0, %z6 \n" \
+ " addiu %2, %2, 1 \n" \
+ "3: \n" \
+ " bnez %4, 0b \n" \
+ " srl %5, %1, 0x1f \n" \
+ " .set pop" \
+ : "=&r" (__mod32), "=&r" (__tmp), \
+ "=&r" (__quot32), "=&r" (__cf), \
+ "=&r" (__i), "=&r" (__tmp2) \
+ : "Jr" (base), "0" (high), "1" (low)); \
+ \
+ (res) = __quot32; \
+ __mod32; \
+})
+
+#define __div64_32(n, base) ({ \
+ unsigned long __upper, __low, __high, __radix; \
+ unsigned long long __quot; \
+ unsigned long long __div; \
+ unsigned long __mod; \
+ \
+ __div = (*n); \
+ __radix = (base); \
+ \
+ __high = __div >> 32; \
+ __low = __div; \
+ \
+ if (__high < __radix) { \
+ __upper = __high; \
+ __high = 0; \
+ } else { \
+ __upper = __high % __radix; \
+ __high /= __radix; \
+ } \
+ \
+ __mod = do_div64_32(__low, __upper, __low, __radix); \
+ \
+ __quot = __high; \
+ __quot = __quot << 32 | __low; \
+ (*n) = __quot; \
+ __mod; \
+})
+
+#endif /* BITS_PER_LONG == 32 */
+
+#include <asm-generic/div64.h>
+
+#endif /* __ASM_DIV64_H */
diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h
new file mode 100644
index 000000000..5eaa1fcc8
--- /dev/null
+++ b/arch/mips/include/asm/dma-coherence.h
@@ -0,0 +1,38 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
+ *
+ */
+#ifndef __ASM_DMA_COHERENCE_H
+#define __ASM_DMA_COHERENCE_H
+
+enum coherent_io_user_state {
+ IO_COHERENCE_DEFAULT,
+ IO_COHERENCE_ENABLED,
+ IO_COHERENCE_DISABLED,
+};
+
+#if defined(CONFIG_DMA_PERDEV_COHERENT)
+/* Don't provide (hw_)coherentio to avoid misuse */
+#elif defined(CONFIG_DMA_MAYBE_COHERENT)
+extern enum coherent_io_user_state coherentio;
+extern int hw_coherentio;
+
+static inline bool dev_is_dma_coherent(struct device *dev)
+{
+ return coherentio == IO_COHERENCE_ENABLED ||
+ (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio);
+}
+#else
+#ifdef CONFIG_DMA_NONCOHERENT
+#define coherentio IO_COHERENCE_DISABLED
+#else
+#define coherentio IO_COHERENCE_ENABLED
+#endif
+#define hw_coherentio 0
+#endif /* CONFIG_DMA_MAYBE_COHERENT */
+
+#endif
diff --git a/arch/mips/include/asm/dma-direct.h b/arch/mips/include/asm/dma-direct.h
new file mode 100644
index 000000000..9a6401183
--- /dev/null
+++ b/arch/mips/include/asm/dma-direct.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MIPS_DMA_DIRECT_H
+#define _MIPS_DMA_DIRECT_H 1
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+
+#endif /* _MIPS_DMA_DIRECT_H */
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
new file mode 100644
index 000000000..34de7b17b
--- /dev/null
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_DMA_MAPPING_H
+#define _ASM_DMA_MAPPING_H
+
+#include <linux/swiotlb.h>
+
+extern const struct dma_map_ops jazz_dma_ops;
+
+static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+{
+#if defined(CONFIG_MACH_JAZZ)
+ return &jazz_dma_ops;
+#else
+ return NULL;
+#endif
+}
+
+#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/mips/include/asm/dma.h b/arch/mips/include/asm/dma.h
new file mode 100644
index 000000000..be726b943
--- /dev/null
+++ b/arch/mips/include/asm/dma.h
@@ -0,0 +1,318 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * linux/include/asm/dma.h: Defines for using and allocating dma channels.
+ * Written by Hennus Bergman, 1992.
+ * High DMA channel support & info by Hannu Savolainen
+ * and John Boyd, Nov. 1992.
+ *
+ * NOTE: all this is true *only* for ISA/EISA expansions on Mips boards
+ * and can only be used for expansion cards. Onboard DMA controllers, such
+ * as the R4030 on Jazz boards behave totally different!
+ */
+
+#ifndef _ASM_DMA_H
+#define _ASM_DMA_H
+
+#include <asm/io.h> /* need byte IO */
+#include <linux/spinlock.h> /* And spinlocks */
+#include <linux/delay.h>
+
+
+#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
+#define dma_outb outb_p
+#else
+#define dma_outb outb
+#endif
+
+#define dma_inb inb
+
+/*
+ * NOTES about DMA transfers:
+ *
+ * controller 1: channels 0-3, byte operations, ports 00-1F
+ * controller 2: channels 4-7, word operations, ports C0-DF
+ *
+ * - ALL registers are 8 bits only, regardless of transfer size
+ * - channel 4 is not used - cascades 1 into 2.
+ * - channels 0-3 are byte - addresses/counts are for physical bytes
+ * - channels 5-7 are word - addresses/counts are for physical words
+ * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
+ * - transfer count loaded to registers is 1 less than actual count
+ * - controller 2 offsets are all even (2x offsets for controller 1)
+ * - page registers for 5-7 don't use data bit 0, represent 128K pages
+ * - page registers for 0-3 use bit 0, represent 64K pages
+ *
+ * DMA transfers are limited to the lower 16MB of _physical_ memory.
+ * Note that addresses loaded into registers must be _physical_ addresses,
+ * not logical addresses (which may differ if paging is active).
+ *
+ * Address mapping for channels 0-3:
+ *
+ * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * P7 ... P0 A7 ... A0 A7 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Address mapping for channels 5-7:
+ *
+ * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
+ * | ... | \ \ ... \ \ \ ... \ \
+ * | ... | \ \ ... \ \ \ ... \ (not used)
+ * | ... | \ \ ... \ \ \ ... \
+ * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
+ * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
+ * the hardware level, so odd-byte transfers aren't possible).
+ *
+ * Transfer count (_not # bytes_) is limited to 64K, represented as actual
+ * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
+ * and up to 128K bytes may be transferred on channels 5-7 in one operation.
+ *
+ */
+
+#ifndef CONFIG_GENERIC_ISA_DMA_SUPPORT_BROKEN
+#define MAX_DMA_CHANNELS 8
+#endif
+
+/*
+ * The maximum address in KSEG0 that we can perform a DMA transfer to on this
+ * platform. This describes only the PC style part of the DMA logic like on
+ * Deskstations or Acer PICA but not the much more versatile DMA logic used
+ * for the local devices on Acer PICA or Magnums.
+ */
+#if defined(CONFIG_SGI_IP22) || defined(CONFIG_SGI_IP28)
+/* don't care; ISA bus master won't work, ISA slave DMA supports 32bit addr */
+#define MAX_DMA_ADDRESS PAGE_OFFSET
+#else
+#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000)
+#endif
+#define MAX_DMA_PFN PFN_DOWN(virt_to_phys((void *)MAX_DMA_ADDRESS))
+
+#ifndef MAX_DMA32_PFN
+#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
+#endif
+
+/* 8237 DMA controllers */
+#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
+#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
+
+/* DMA controller registers */
+#define DMA1_CMD_REG 0x08 /* command register (w) */
+#define DMA1_STAT_REG 0x08 /* status register (r) */
+#define DMA1_REQ_REG 0x09 /* request register (w) */
+#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
+#define DMA1_MODE_REG 0x0B /* mode register (w) */
+#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
+#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
+#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
+#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
+#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
+
+#define DMA2_CMD_REG 0xD0 /* command register (w) */
+#define DMA2_STAT_REG 0xD0 /* status register (r) */
+#define DMA2_REQ_REG 0xD2 /* request register (w) */
+#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
+#define DMA2_MODE_REG 0xD6 /* mode register (w) */
+#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
+#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
+#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
+#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
+#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
+
+#define DMA_ADDR_0 0x00 /* DMA address registers */
+#define DMA_ADDR_1 0x02
+#define DMA_ADDR_2 0x04
+#define DMA_ADDR_3 0x06
+#define DMA_ADDR_4 0xC0
+#define DMA_ADDR_5 0xC4
+#define DMA_ADDR_6 0xC8
+#define DMA_ADDR_7 0xCC
+
+#define DMA_CNT_0 0x01 /* DMA count registers */
+#define DMA_CNT_1 0x03
+#define DMA_CNT_2 0x05
+#define DMA_CNT_3 0x07
+#define DMA_CNT_4 0xC2
+#define DMA_CNT_5 0xC6
+#define DMA_CNT_6 0xCA
+#define DMA_CNT_7 0xCE
+
+#define DMA_PAGE_0 0x87 /* DMA page registers */
+#define DMA_PAGE_1 0x83
+#define DMA_PAGE_2 0x81
+#define DMA_PAGE_3 0x82
+#define DMA_PAGE_5 0x8B
+#define DMA_PAGE_6 0x89
+#define DMA_PAGE_7 0x8A
+
+#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
+#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
+#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
+
+#define DMA_AUTOINIT 0x10
+
+extern spinlock_t dma_spin_lock;
+
+static __inline__ unsigned long claim_dma_lock(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dma_spin_lock, flags);
+ return flags;
+}
+
+static __inline__ void release_dma_lock(unsigned long flags)
+{
+ spin_unlock_irqrestore(&dma_spin_lock, flags);
+}
+
+/* enable/disable a specific DMA channel */
+static __inline__ void enable_dma(unsigned int dmanr)
+{
+ if (dmanr<=3)
+ dma_outb(dmanr, DMA1_MASK_REG);
+ else
+ dma_outb(dmanr & 3, DMA2_MASK_REG);
+}
+
+static __inline__ void disable_dma(unsigned int dmanr)
+{
+ if (dmanr<=3)
+ dma_outb(dmanr | 4, DMA1_MASK_REG);
+ else
+ dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
+}
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while holding the DMA lock ! ---
+ */
+static __inline__ void clear_dma_ff(unsigned int dmanr)
+{
+ if (dmanr<=3)
+ dma_outb(0, DMA1_CLEAR_FF_REG);
+ else
+ dma_outb(0, DMA2_CLEAR_FF_REG);
+}
+
+/* set mode (above) for a specific DMA channel */
+static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+{
+ if (dmanr<=3)
+ dma_outb(mode | dmanr, DMA1_MODE_REG);
+ else
+ dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
+}
+
+/* Set only the page register bits of the transfer address.
+ * This is used for successive transfers when we know the contents of
+ * the lower 16 bits of the DMA current address register, but a 64k boundary
+ * may have been crossed.
+ */
+static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
+{
+ switch(dmanr) {
+ case 0:
+ dma_outb(pagenr, DMA_PAGE_0);
+ break;
+ case 1:
+ dma_outb(pagenr, DMA_PAGE_1);
+ break;
+ case 2:
+ dma_outb(pagenr, DMA_PAGE_2);
+ break;
+ case 3:
+ dma_outb(pagenr, DMA_PAGE_3);
+ break;
+ case 5:
+ dma_outb(pagenr & 0xfe, DMA_PAGE_5);
+ break;
+ case 6:
+ dma_outb(pagenr & 0xfe, DMA_PAGE_6);
+ break;
+ case 7:
+ dma_outb(pagenr & 0xfe, DMA_PAGE_7);
+ break;
+ }
+}
+
+
+/* Set transfer address & page bits for specific DMA channel.
+ * Assumes dma flipflop is clear.
+ */
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
+{
+ set_dma_page(dmanr, a>>16);
+ if (dmanr <= 3) {
+ dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+ dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+ } else {
+ dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+ dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+ }
+}
+
+
+/* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for
+ * a specific DMA channel.
+ * You must ensure the parameters are valid.
+ * NOTE: from a manual: "the number of transfers is one more
+ * than the initial word count"! This is taken into account.
+ * Assumes dma flip-flop is clear.
+ * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
+ */
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+ count--;
+ if (dmanr <= 3) {
+ dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+ dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+ } else {
+ dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+ dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+ }
+}
+
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ *
+ * Assumes DMA flip-flop is clear.
+ */
+static __inline__ int get_dma_residue(unsigned int dmanr)
+{
+ unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
+ : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
+
+ /* using short to get 16-bit wrap around */
+ unsigned short count;
+
+ count = 1 + dma_inb(io_port);
+ count += dma_inb(io_port) << 8;
+
+ return (dmanr<=3)? count : (count<<1);
+}
+
+
+/* These are in kernel/dma.c: */
+extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
+extern void free_dma(unsigned int dmanr); /* release it again */
+
+/* From PCI */
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+#endif /* _ASM_DMA_H */
diff --git a/arch/mips/include/asm/dmi.h b/arch/mips/include/asm/dmi.h
new file mode 100644
index 000000000..dc397f630
--- /dev/null
+++ b/arch/mips/include/asm/dmi.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_DMI_H
+#define _ASM_DMI_H
+
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#define dmi_early_remap(x, l) ioremap(x, l)
+#define dmi_early_unmap(x, l) iounmap(x)
+#define dmi_remap(x, l) ioremap_cache(x, l)
+#define dmi_unmap(x) iounmap(x)
+
+/* MIPS initialize DMI scan before SLAB is ready, so we use memblock here */
+#define dmi_alloc(l) memblock_alloc_low(l, PAGE_SIZE)
+
+#if defined(CONFIG_MACH_LOONGSON64)
+#define SMBIOS_ENTRY_POINT_SCAN_START 0xFFFE000
+#endif
+
+#endif /* _ASM_DMI_H */
diff --git a/arch/mips/include/asm/ds1287.h b/arch/mips/include/asm/ds1287.h
new file mode 100644
index 000000000..46cfb01f9
--- /dev/null
+++ b/arch/mips/include/asm/ds1287.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * DS1287 timer functions.
+ *
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#ifndef __ASM_DS1287_H
+#define __ASM_DS1287_H
+
+extern int ds1287_timer_state(void);
+extern void ds1287_set_base_clock(unsigned int clock);
+extern int ds1287_clockevent_init(int irq);
+
+#endif
diff --git a/arch/mips/include/asm/dsemul.h b/arch/mips/include/asm/dsemul.h
new file mode 100644
index 000000000..08bfe8fa3
--- /dev/null
+++ b/arch/mips/include/asm/dsemul.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#ifndef __MIPS_ASM_DSEMUL_H__
+#define __MIPS_ASM_DSEMUL_H__
+
+#include <asm/break.h>
+#include <asm/inst.h>
+
+/* Break instruction with special math emu break code set */
+#define BREAK_MATH(micromips) (((micromips) ? 0x7 : 0xd) | (BRK_MEMU << 16))
+
+/* When used as a frame index, indicates the lack of a frame */
+#define BD_EMUFRAME_NONE ((int)BIT(31))
+
+struct mm_struct;
+struct pt_regs;
+struct task_struct;
+
+/**
+ * mips_dsemul() - 'Emulate' an instruction from a branch delay slot
+ * @regs: User thread register context.
+ * @ir: The instruction to be 'emulated'.
+ * @branch_pc: The PC of the branch instruction.
+ * @cont_pc: The PC to continue at following 'emulation'.
+ *
+ * Emulate or execute an arbitrary MIPS instruction within the context of
+ * the current user thread. This is used primarily to handle instructions
+ * in the delay slots of emulated branch instructions, for example FP
+ * branch instructions on systems without an FPU.
+ *
+ * Return: Zero on success, negative if ir is a NOP, signal number on failure.
+ */
+extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
+ unsigned long branch_pc, unsigned long cont_pc);
+
+/**
+ * do_dsemulret() - Return from a delay slot 'emulation' frame
+ * @xcp: User thread register context.
+ *
+ * Call in response to the BRK_MEMU break instruction used to return to
+ * the kernel from branch delay slot 'emulation' frames following a call
+ * to mips_dsemul(). Restores the user thread PC to the value that was
+ * passed as the cpc parameter to mips_dsemul().
+ *
+ * Return: True if an emulation frame was returned from, else false.
+ */
+#ifdef CONFIG_MIPS_FP_SUPPORT
+extern bool do_dsemulret(struct pt_regs *xcp);
+#else
+static inline bool do_dsemulret(struct pt_regs *xcp)
+{
+ return false;
+}
+#endif
+
+/**
+ * dsemul_thread_cleanup() - Cleanup thread 'emulation' frame
+ * @tsk: The task structure associated with the thread
+ *
+ * If the thread @tsk has a branch delay slot 'emulation' frame
+ * allocated to it then free that frame.
+ *
+ * Return: True if a frame was freed, else false.
+ */
+#ifdef CONFIG_MIPS_FP_SUPPORT
+extern bool dsemul_thread_cleanup(struct task_struct *tsk);
+#else
+static inline bool dsemul_thread_cleanup(struct task_struct *tsk)
+{
+ return false;
+}
+#endif
+/**
+ * dsemul_thread_rollback() - Rollback from an 'emulation' frame
+ * @regs: User thread register context.
+ *
+ * If the current thread, whose register context is represented by @regs,
+ * is executing within a delay slot 'emulation' frame then exit that
+ * frame. The PC will be rolled back to the branch if the instruction
+ * that was being 'emulated' has not yet executed, or advanced to the
+ * continuation PC if it has.
+ *
+ * Return: True if a frame was exited, else false.
+ */
+#ifdef CONFIG_MIPS_FP_SUPPORT
+extern bool dsemul_thread_rollback(struct pt_regs *regs);
+#else
+static inline bool dsemul_thread_rollback(struct pt_regs *regs)
+{
+ return false;
+}
+#endif
+
+/**
+ * dsemul_mm_cleanup() - Cleanup per-mm delay slot 'emulation' state
+ * @mm: The struct mm_struct to cleanup state for.
+ *
+ * Cleanup state for the given @mm, ensuring that any memory allocated
+ * for delay slot 'emulation' book-keeping is freed. This is to be called
+ * before @mm is freed in order to avoid memory leaks.
+ */
+#ifdef CONFIG_MIPS_FP_SUPPORT
+extern void dsemul_mm_cleanup(struct mm_struct *mm);
+#else
+static inline void dsemul_mm_cleanup(struct mm_struct *mm)
+{
+ /* no-op */
+}
+#endif
+
+#endif /* __MIPS_ASM_DSEMUL_H__ */
diff --git a/arch/mips/include/asm/dsp.h b/arch/mips/include/asm/dsp.h
new file mode 100644
index 000000000..77fe0d675
--- /dev/null
+++ b/arch/mips/include/asm/dsp.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2005 Mips Technologies
+ * Author: Chris Dearman, chris@mips.com derived from fpu.h
+ */
+#ifndef _ASM_DSP_H
+#define _ASM_DSP_H
+
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/hazards.h>
+#include <asm/mipsregs.h>
+
+#define DSP_DEFAULT 0x00000000
+#define DSP_MASK 0x3f
+
+#define __enable_dsp_hazard() \
+do { \
+ asm("_ehb"); \
+} while (0)
+
+static inline void __init_dsp(void)
+{
+ mthi1(0);
+ mtlo1(0);
+ mthi2(0);
+ mtlo2(0);
+ mthi3(0);
+ mtlo3(0);
+ wrdsp(DSP_DEFAULT, DSP_MASK);
+}
+
+static inline void init_dsp(void)
+{
+ if (cpu_has_dsp)
+ __init_dsp();
+}
+
+#define __save_dsp(tsk) \
+do { \
+ tsk->thread.dsp.dspr[0] = mfhi1(); \
+ tsk->thread.dsp.dspr[1] = mflo1(); \
+ tsk->thread.dsp.dspr[2] = mfhi2(); \
+ tsk->thread.dsp.dspr[3] = mflo2(); \
+ tsk->thread.dsp.dspr[4] = mfhi3(); \
+ tsk->thread.dsp.dspr[5] = mflo3(); \
+ tsk->thread.dsp.dspcontrol = rddsp(DSP_MASK); \
+} while (0)
+
+#define save_dsp(tsk) \
+do { \
+ if (cpu_has_dsp) \
+ __save_dsp(tsk); \
+} while (0)
+
+#define __restore_dsp(tsk) \
+do { \
+ mthi1(tsk->thread.dsp.dspr[0]); \
+ mtlo1(tsk->thread.dsp.dspr[1]); \
+ mthi2(tsk->thread.dsp.dspr[2]); \
+ mtlo2(tsk->thread.dsp.dspr[3]); \
+ mthi3(tsk->thread.dsp.dspr[4]); \
+ mtlo3(tsk->thread.dsp.dspr[5]); \
+ wrdsp(tsk->thread.dsp.dspcontrol, DSP_MASK); \
+} while (0)
+
+#define restore_dsp(tsk) \
+do { \
+ if (cpu_has_dsp) \
+ __restore_dsp(tsk); \
+} while (0)
+
+#define __get_dsp_regs(tsk) \
+({ \
+ if (tsk == current) \
+ __save_dsp(current); \
+ \
+ tsk->thread.dsp.dspr; \
+})
+
+#endif /* _ASM_DSP_H */
diff --git a/arch/mips/include/asm/edac.h b/arch/mips/include/asm/edac.h
new file mode 100644
index 000000000..c5d147744
--- /dev/null
+++ b/arch/mips/include/asm/edac.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+
+#include <asm/compiler.h>
+
+/* ECC atomic, DMA, SMP and interrupt safe scrub function */
+
+static inline void edac_atomic_scrub(void *va, u32 size)
+{
+ unsigned long *virt_addr = va;
+ unsigned long temp;
+ u32 i;
+
+ for (i = 0; i < size / sizeof(unsigned long); i++) {
+ /*
+ * Very carefully read and write to memory atomically
+ * so we are interrupt, DMA and SMP safe.
+ *
+ * Intel: asm("lock; addl $0, %0"::"m"(*virt_addr));
+ */
+
+ __asm__ __volatile__ (
+ " .set push \n"
+ " .set mips2 \n"
+ "1: ll %0, %1 # edac_atomic_scrub \n"
+ " addu %0, $0 \n"
+ " sc %0, %1 \n"
+ " beqz %0, 1b \n"
+ " .set pop \n"
+ : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr)
+ : GCC_OFF_SMALL_ASM() (*virt_addr));
+
+ virt_addr++;
+ }
+}
+
+#endif
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
new file mode 100644
index 000000000..71c762202
--- /dev/null
+++ b/arch/mips/include/asm/elf.h
@@ -0,0 +1,538 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Much of this is taken from binutils and GNU libc ...
+ */
+#ifndef _ASM_ELF_H
+#define _ASM_ELF_H
+
+#include <linux/auxvec.h>
+#include <linux/fs.h>
+#include <linux/mm_types.h>
+
+#include <uapi/linux/elf.h>
+
+#include <asm/current.h>
+
+/* ELF header e_flags defines. */
+/* MIPS architecture level. */
+#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
+#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
+#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
+#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
+#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
+#define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */
+#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */
+#define EF_MIPS_ARCH_32R2 0x70000000 /* MIPS32 R2 code. */
+#define EF_MIPS_ARCH_64R2 0x80000000 /* MIPS64 R2 code. */
+
+/* The ABI of a file. */
+#define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */
+#define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */
+
+#define PT_MIPS_REGINFO 0x70000000
+#define PT_MIPS_RTPROC 0x70000001
+#define PT_MIPS_OPTIONS 0x70000002
+#define PT_MIPS_ABIFLAGS 0x70000003
+
+/* Flags in the e_flags field of the header */
+#define EF_MIPS_NOREORDER 0x00000001
+#define EF_MIPS_PIC 0x00000002
+#define EF_MIPS_CPIC 0x00000004
+#define EF_MIPS_ABI2 0x00000020
+#define EF_MIPS_OPTIONS_FIRST 0x00000080
+#define EF_MIPS_32BITMODE 0x00000100
+#define EF_MIPS_FP64 0x00000200
+#define EF_MIPS_NAN2008 0x00000400
+#define EF_MIPS_ABI 0x0000f000
+#define EF_MIPS_ARCH 0xf0000000
+
+#define DT_MIPS_RLD_VERSION 0x70000001
+#define DT_MIPS_TIME_STAMP 0x70000002
+#define DT_MIPS_ICHECKSUM 0x70000003
+#define DT_MIPS_IVERSION 0x70000004
+#define DT_MIPS_FLAGS 0x70000005
+ #define RHF_NONE 0x00000000
+ #define RHF_HARDWAY 0x00000001
+ #define RHF_NOTPOT 0x00000002
+ #define RHF_SGI_ONLY 0x00000010
+#define DT_MIPS_BASE_ADDRESS 0x70000006
+#define DT_MIPS_CONFLICT 0x70000008
+#define DT_MIPS_LIBLIST 0x70000009
+#define DT_MIPS_LOCAL_GOTNO 0x7000000a
+#define DT_MIPS_CONFLICTNO 0x7000000b
+#define DT_MIPS_LIBLISTNO 0x70000010
+#define DT_MIPS_SYMTABNO 0x70000011
+#define DT_MIPS_UNREFEXTNO 0x70000012
+#define DT_MIPS_GOTSYM 0x70000013
+#define DT_MIPS_HIPAGENO 0x70000014
+#define DT_MIPS_RLD_MAP 0x70000016
+
+#define R_MIPS_NONE 0
+#define R_MIPS_16 1
+#define R_MIPS_32 2
+#define R_MIPS_REL32 3
+#define R_MIPS_26 4
+#define R_MIPS_HI16 5
+#define R_MIPS_LO16 6
+#define R_MIPS_GPREL16 7
+#define R_MIPS_LITERAL 8
+#define R_MIPS_GOT16 9
+#define R_MIPS_PC16 10
+#define R_MIPS_CALL16 11
+#define R_MIPS_GPREL32 12
+/* The remaining relocs are defined on Irix, although they are not
+ in the MIPS ELF ABI. */
+#define R_MIPS_UNUSED1 13
+#define R_MIPS_UNUSED2 14
+#define R_MIPS_UNUSED3 15
+#define R_MIPS_SHIFT5 16
+#define R_MIPS_SHIFT6 17
+#define R_MIPS_64 18
+#define R_MIPS_GOT_DISP 19
+#define R_MIPS_GOT_PAGE 20
+#define R_MIPS_GOT_OFST 21
+/*
+ * The following two relocation types are specified in the MIPS ABI
+ * conformance guide version 1.2 but not yet in the psABI.
+ */
+#define R_MIPS_GOTHI16 22
+#define R_MIPS_GOTLO16 23
+#define R_MIPS_SUB 24
+#define R_MIPS_INSERT_A 25
+#define R_MIPS_INSERT_B 26
+#define R_MIPS_DELETE 27
+#define R_MIPS_HIGHER 28
+#define R_MIPS_HIGHEST 29
+/*
+ * The following two relocation types are specified in the MIPS ABI
+ * conformance guide version 1.2 but not yet in the psABI.
+ */
+#define R_MIPS_CALLHI16 30
+#define R_MIPS_CALLLO16 31
+/*
+ * Introduced for MIPSr6.
+ */
+#define R_MIPS_PC21_S2 60
+#define R_MIPS_PC26_S2 61
+/*
+ * This range is reserved for vendor specific relocations.
+ */
+#define R_MIPS_LOVENDOR 100
+#define R_MIPS_HIVENDOR 127
+
+#define SHN_MIPS_ACCOMON 0xff00 /* Allocated common symbols */
+#define SHN_MIPS_TEXT 0xff01 /* Allocated test symbols. */
+#define SHN_MIPS_DATA 0xff02 /* Allocated data symbols. */
+#define SHN_MIPS_SCOMMON 0xff03 /* Small common symbols */
+#define SHN_MIPS_SUNDEFINED 0xff04 /* Small undefined symbols */
+
+#define SHT_MIPS_LIST 0x70000000
+#define SHT_MIPS_CONFLICT 0x70000002
+#define SHT_MIPS_GPTAB 0x70000003
+#define SHT_MIPS_UCODE 0x70000004
+#define SHT_MIPS_DEBUG 0x70000005
+#define SHT_MIPS_REGINFO 0x70000006
+#define SHT_MIPS_PACKAGE 0x70000007
+#define SHT_MIPS_PACKSYM 0x70000008
+#define SHT_MIPS_RELD 0x70000009
+#define SHT_MIPS_IFACE 0x7000000b
+#define SHT_MIPS_CONTENT 0x7000000c
+#define SHT_MIPS_OPTIONS 0x7000000d
+#define SHT_MIPS_SHDR 0x70000010
+#define SHT_MIPS_FDESC 0x70000011
+#define SHT_MIPS_EXTSYM 0x70000012
+#define SHT_MIPS_DENSE 0x70000013
+#define SHT_MIPS_PDESC 0x70000014
+#define SHT_MIPS_LOCSYM 0x70000015
+#define SHT_MIPS_AUXSYM 0x70000016
+#define SHT_MIPS_OPTSYM 0x70000017
+#define SHT_MIPS_LOCSTR 0x70000018
+#define SHT_MIPS_LINE 0x70000019
+#define SHT_MIPS_RFDESC 0x7000001a
+#define SHT_MIPS_DELTASYM 0x7000001b
+#define SHT_MIPS_DELTAINST 0x7000001c
+#define SHT_MIPS_DELTACLASS 0x7000001d
+#define SHT_MIPS_DWARF 0x7000001e
+#define SHT_MIPS_DELTADECL 0x7000001f
+#define SHT_MIPS_SYMBOL_LIB 0x70000020
+#define SHT_MIPS_EVENTS 0x70000021
+#define SHT_MIPS_TRANSLATE 0x70000022
+#define SHT_MIPS_PIXIE 0x70000023
+#define SHT_MIPS_XLATE 0x70000024
+#define SHT_MIPS_XLATE_DEBUG 0x70000025
+#define SHT_MIPS_WHIRL 0x70000026
+#define SHT_MIPS_EH_REGION 0x70000027
+#define SHT_MIPS_XLATE_OLD 0x70000028
+#define SHT_MIPS_PDR_EXCEPTION 0x70000029
+
+#define SHF_MIPS_GPREL 0x10000000
+#define SHF_MIPS_MERGE 0x20000000
+#define SHF_MIPS_ADDR 0x40000000
+#define SHF_MIPS_STRING 0x80000000
+#define SHF_MIPS_NOSTRIP 0x08000000
+#define SHF_MIPS_LOCAL 0x04000000
+#define SHF_MIPS_NAMES 0x02000000
+#define SHF_MIPS_NODUPES 0x01000000
+
+#define MIPS_ABI_FP_ANY 0 /* FP ABI doesn't matter */
+#define MIPS_ABI_FP_DOUBLE 1 /* -mdouble-float */
+#define MIPS_ABI_FP_SINGLE 2 /* -msingle-float */
+#define MIPS_ABI_FP_SOFT 3 /* -msoft-float */
+#define MIPS_ABI_FP_OLD_64 4 /* -mips32r2 -mfp64 */
+#define MIPS_ABI_FP_XX 5 /* -mfpxx */
+#define MIPS_ABI_FP_64 6 /* -mips32r2 -mfp64 */
+#define MIPS_ABI_FP_64A 7 /* -mips32r2 -mfp64 -mno-odd-spreg */
+
+struct mips_elf_abiflags_v0 {
+ uint16_t version; /* Version of flags structure */
+ uint8_t isa_level; /* The level of the ISA: 1-5, 32, 64 */
+ uint8_t isa_rev; /* The revision of ISA: 0 for MIPS V and below,
+ 1-n otherwise */
+ uint8_t gpr_size; /* The size of general purpose registers */
+ uint8_t cpr1_size; /* The size of co-processor 1 registers */
+ uint8_t cpr2_size; /* The size of co-processor 2 registers */
+ uint8_t fp_abi; /* The floating-point ABI */
+ uint32_t isa_ext; /* Mask of processor-specific extensions */
+ uint32_t ases; /* Mask of ASEs used */
+ uint32_t flags1; /* Mask of general flags */
+ uint32_t flags2;
+};
+
+#ifndef ELF_ARCH
+/* ELF register definitions */
+#define ELF_NGREG 45
+#define ELF_NFPREG 33
+
+typedef unsigned long elf_greg_t;
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef double elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs);
+void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs);
+
+#ifdef CONFIG_32BIT
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch elfo32_check_arch
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+
+#define ELF_CORE_COPY_REGS(dest, regs) \
+ mips_dump_regs32((u32 *)&(dest), (regs));
+
+#endif /* CONFIG_32BIT */
+
+#ifdef CONFIG_64BIT
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch elfn64_check_arch
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS64
+
+#define ELF_CORE_COPY_REGS(dest, regs) \
+ mips_dump_regs64((u64 *)&(dest), (regs));
+
+#endif /* CONFIG_64BIT */
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#ifdef __MIPSEB__
+#define ELF_DATA ELFDATA2MSB
+#elif defined(__MIPSEL__)
+#define ELF_DATA ELFDATA2LSB
+#endif
+#define ELF_ARCH EM_MIPS
+
+#endif /* !defined(ELF_ARCH) */
+
+/*
+ * In order to be sure that we don't attempt to execute an O32 binary which
+ * requires 64 bit FP (FR=1) on a system which does not support it we refuse
+ * to execute any binary which has bits specified by the following macro set
+ * in its ELF header flags.
+ */
+#ifdef CONFIG_MIPS_O32_FP64_SUPPORT
+# define __MIPS_O32_FP64_MUST_BE_ZERO 0
+#else
+# define __MIPS_O32_FP64_MUST_BE_ZERO EF_MIPS_FP64
+#endif
+
+#define mips_elf_check_machine(x) ((x)->e_machine == EM_MIPS)
+
+#define vmcore_elf32_check_arch mips_elf_check_machine
+#define vmcore_elf64_check_arch mips_elf_check_machine
+
+/*
+ * Return non-zero if HDR identifies an o32 ELF binary.
+ */
+#define elfo32_check_arch(hdr) \
+({ \
+ int __res = 1; \
+ struct elfhdr *__h = (hdr); \
+ \
+ if (!mips_elf_check_machine(__h)) \
+ __res = 0; \
+ if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
+ __res = 0; \
+ if ((__h->e_flags & EF_MIPS_ABI2) != 0) \
+ __res = 0; \
+ if (((__h->e_flags & EF_MIPS_ABI) != 0) && \
+ ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \
+ __res = 0; \
+ if (__h->e_flags & __MIPS_O32_FP64_MUST_BE_ZERO) \
+ __res = 0; \
+ \
+ __res; \
+})
+
+/*
+ * Return non-zero if HDR identifies an n64 ELF binary.
+ */
+#define elfn64_check_arch(hdr) \
+({ \
+ int __res = 1; \
+ struct elfhdr *__h = (hdr); \
+ \
+ if (!mips_elf_check_machine(__h)) \
+ __res = 0; \
+ if (__h->e_ident[EI_CLASS] != ELFCLASS64) \
+ __res = 0; \
+ \
+ __res; \
+})
+
+/*
+ * Return non-zero if HDR identifies an n32 ELF binary.
+ */
+#define elfn32_check_arch(hdr) \
+({ \
+ int __res = 1; \
+ struct elfhdr *__h = (hdr); \
+ \
+ if (!mips_elf_check_machine(__h)) \
+ __res = 0; \
+ if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
+ __res = 0; \
+ if (((__h->e_flags & EF_MIPS_ABI2) == 0) || \
+ ((__h->e_flags & EF_MIPS_ABI) != 0)) \
+ __res = 0; \
+ \
+ __res; \
+})
+
+struct mips_abi;
+
+extern struct mips_abi mips_abi;
+extern struct mips_abi mips_abi_32;
+extern struct mips_abi mips_abi_n32;
+
+#ifdef CONFIG_32BIT
+
+#define SET_PERSONALITY2(ex, state) \
+do { \
+ clear_thread_flag(TIF_HYBRID_FPREGS); \
+ set_thread_flag(TIF_32BIT_FPREGS); \
+ \
+ current->thread.abi = &mips_abi; \
+ \
+ mips_set_personality_fp(state); \
+ mips_set_personality_nan(state); \
+ \
+ if (personality(current->personality) != PER_LINUX) \
+ set_personality(PER_LINUX); \
+} while (0)
+
+#endif /* CONFIG_32BIT */
+
+#ifdef CONFIG_64BIT
+
+#ifdef CONFIG_MIPS32_N32
+#define __SET_PERSONALITY32_N32() \
+ do { \
+ set_thread_flag(TIF_32BIT_ADDR); \
+ \
+ current->thread.abi = &mips_abi_n32; \
+ } while (0)
+#else
+#define __SET_PERSONALITY32_N32() \
+ do { } while (0)
+#endif
+
+#ifdef CONFIG_MIPS32_O32
+#define __SET_PERSONALITY32_O32(ex, state) \
+ do { \
+ set_thread_flag(TIF_32BIT_REGS); \
+ set_thread_flag(TIF_32BIT_ADDR); \
+ clear_thread_flag(TIF_HYBRID_FPREGS); \
+ set_thread_flag(TIF_32BIT_FPREGS); \
+ \
+ current->thread.abi = &mips_abi_32; \
+ \
+ mips_set_personality_fp(state); \
+ } while (0)
+#else
+#define __SET_PERSONALITY32_O32(ex, state) \
+ do { } while (0)
+#endif
+
+#ifdef CONFIG_MIPS32_COMPAT
+#define __SET_PERSONALITY32(ex, state) \
+do { \
+ if ((((ex).e_flags & EF_MIPS_ABI2) != 0) && \
+ ((ex).e_flags & EF_MIPS_ABI) == 0) \
+ __SET_PERSONALITY32_N32(); \
+ else \
+ __SET_PERSONALITY32_O32(ex, state); \
+} while (0)
+#else
+#define __SET_PERSONALITY32(ex, state) do { } while (0)
+#endif
+
+#define SET_PERSONALITY2(ex, state) \
+do { \
+ unsigned int p; \
+ \
+ clear_thread_flag(TIF_32BIT_REGS); \
+ clear_thread_flag(TIF_32BIT_FPREGS); \
+ clear_thread_flag(TIF_HYBRID_FPREGS); \
+ clear_thread_flag(TIF_32BIT_ADDR); \
+ current->personality &= ~READ_IMPLIES_EXEC; \
+ \
+ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
+ __SET_PERSONALITY32(ex, state); \
+ else \
+ current->thread.abi = &mips_abi; \
+ \
+ mips_set_personality_nan(state); \
+ \
+ p = personality(current->personality); \
+ if (p != PER_LINUX32 && p != PER_LINUX) \
+ set_personality(PER_LINUX); \
+} while (0)
+
+#endif /* CONFIG_64BIT */
+
+#define CORE_DUMP_USE_REGSET
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This could be done in userspace,
+ but it's not easy, and we've already done it here. */
+
+#define ELF_HWCAP (elf_hwcap)
+extern unsigned int elf_hwcap;
+#include <asm/hwcap.h>
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+
+#define ELF_PLATFORM __elf_platform
+extern const char *__elf_platform;
+
+#define ELF_BASE_PLATFORM __elf_base_platform
+extern const char *__elf_base_platform;
+
+/*
+ * See comments in asm-alpha/elf.h, this is the same thing
+ * on the MIPS.
+ */
+#define ELF_PLAT_INIT(_r, load_addr) do { \
+ _r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \
+ _r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \
+ _r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0; \
+ _r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \
+ _r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \
+ _r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \
+ _r->regs[25] = _r->regs[26] = _r->regs[27] = _r->regs[28] = 0; \
+ _r->regs[30] = _r->regs[31] = 0; \
+} while (0)
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#ifndef ELF_ET_DYN_BASE
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+#endif
+
+/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
+#define ARCH_DLINFO \
+do { \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (unsigned long)current->mm->context.vdso); \
+} while (0)
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+struct arch_elf_state {
+ int nan_2008;
+ int fp_abi;
+ int interp_fp_abi;
+ int overall_fp_mode;
+};
+
+#define MIPS_ABI_FP_UNKNOWN (-1) /* Unknown FP ABI (kernel internal) */
+
+#define INIT_ARCH_ELF_STATE { \
+ .nan_2008 = -1, \
+ .fp_abi = MIPS_ABI_FP_UNKNOWN, \
+ .interp_fp_abi = MIPS_ABI_FP_UNKNOWN, \
+ .overall_fp_mode = -1, \
+}
+
+extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
+ bool is_interp, struct arch_elf_state *state);
+
+extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr,
+ struct arch_elf_state *state);
+
+/* Whether to accept legacy-NaN and 2008-NaN user binaries. */
+extern bool mips_use_nan_legacy;
+extern bool mips_use_nan_2008;
+
+extern void mips_set_personality_nan(struct arch_elf_state *state);
+extern void mips_set_personality_fp(struct arch_elf_state *state);
+
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+struct arch_elf_state;
+
+static inline void mips_set_personality_nan(struct arch_elf_state *state)
+{
+ /* no-op */
+}
+
+static inline void mips_set_personality_fp(struct arch_elf_state *state)
+{
+ /* no-op */
+}
+
+#endif /* !CONFIG_MIPS_FP_SUPPORT */
+
+#define elf_read_implies_exec(ex, stk) mips_elf_read_implies_exec(&(ex), stk)
+extern int mips_elf_read_implies_exec(void *elf_ex, int exstack);
+
+#endif /* _ASM_ELF_H */
diff --git a/arch/mips/include/asm/errno.h b/arch/mips/include/asm/errno.h
new file mode 100644
index 000000000..21d91cdfe
--- /dev/null
+++ b/arch/mips/include/asm/errno.h
@@ -0,0 +1,17 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1999, 2001, 2002 by Ralf Baechle
+ */
+#ifndef _ASM_ERRNO_H
+#define _ASM_ERRNO_H
+
+#include <uapi/asm/errno.h>
+
+
+/* The biggest error number defined here or in <linux/errno.h>. */
+#define EMAXERRNO 1133
+
+#endif /* _ASM_ERRNO_H */
diff --git a/arch/mips/include/asm/eva.h b/arch/mips/include/asm/eva.h
new file mode 100644
index 000000000..a3d1807f2
--- /dev/null
+++ b/arch/mips/include/asm/eva.h
@@ -0,0 +1,43 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014, Imagination Technologies Ltd.
+ *
+ * EVA functions for generic code
+ */
+
+#ifndef _ASM_EVA_H
+#define _ASM_EVA_H
+
+#include <kernel-entry-init.h>
+
+#ifdef __ASSEMBLY__
+
+#ifdef CONFIG_EVA
+
+/*
+ * EVA early init code
+ *
+ * Platforms must define their own 'platform_eva_init' macro in
+ * their kernel-entry-init.h header. This macro usually does the
+ * platform specific configuration of the segmentation registers,
+ * and it is normally called from assembly code.
+ *
+ */
+
+.macro eva_init
+platform_eva_init
+.endm
+
+#else
+
+.macro eva_init
+.endm
+
+#endif /* CONFIG_EVA */
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
new file mode 100644
index 000000000..c1f6afa4b
--- /dev/null
+++ b/arch/mips/include/asm/exec.h
@@ -0,0 +1,17 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
+ * Copyright (C) 1996 by Paul M. Antoine
+ * Copyright (C) 1999 Silicon Graphics
+ * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#ifndef _ASM_EXEC_H
+#define _ASM_EXEC_H
+
+extern unsigned long arch_align_stack(unsigned long sp);
+
+#endif /* _ASM_EXEC_H */
diff --git a/arch/mips/include/asm/extable.h b/arch/mips/include/asm/extable.h
new file mode 100644
index 000000000..78d0ae156
--- /dev/null
+++ b/arch/mips/include/asm/extable.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_EXTABLE_H
+#define _ASM_EXTABLE_H
+
+struct exception_table_entry
+{
+ unsigned long insn;
+ unsigned long nextinsn;
+};
+
+struct pt_regs;
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif
diff --git a/arch/mips/include/asm/fb.h b/arch/mips/include/asm/fb.h
new file mode 100644
index 000000000..bd3f68c9d
--- /dev/null
+++ b/arch/mips/include/asm/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h
new file mode 100644
index 000000000..743535be7
--- /dev/null
+++ b/arch/mips/include/asm/fixmap.h
@@ -0,0 +1,79 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <asm/page.h>
+#include <spaces.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the end of virtual memory (0xfffff000) backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * highger than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+
+/*
+ * on UP currently we will have no trace of the fixmap mechanizm,
+ * no page table allocations, etc. This might change in the
+ * future, say framebuffers for the console driver(s) could be
+ * fix-mapped?
+ */
+enum fixed_addresses {
+#define FIX_N_COLOURS 8
+ FIX_CMAP_BEGIN,
+ FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2),
+#ifdef CONFIG_HIGHMEM
+ /* reserved pte's for temporary kernel mappings */
+ FIX_KMAP_BEGIN = FIX_CMAP_END + 1,
+ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+#endif
+ __end_of_fixed_addresses
+};
+
+/*
+ * used by vmalloc.c.
+ *
+ * Leave one empty page between vmalloc'ed areas and
+ * the start of the fixmap, and leave one page empty
+ * at the top of mem..
+ */
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+#include <asm-generic/fixmap.h>
+
+/*
+ * Called from pgtable_init()
+ */
+extern void fixrange_init(unsigned long start, unsigned long end,
+ pgd_t *pgd_base);
+
+
+#endif
diff --git a/arch/mips/include/asm/floppy.h b/arch/mips/include/asm/floppy.h
new file mode 100644
index 000000000..021d09ae5
--- /dev/null
+++ b/arch/mips/include/asm/floppy.h
@@ -0,0 +1,56 @@
+/*
+ * Architecture specific parts of the Floppy driver
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 - 2000 Ralf Baechle
+ */
+#ifndef _ASM_FLOPPY_H
+#define _ASM_FLOPPY_H
+
+#include <asm/io.h>
+
+static inline void fd_cacheflush(char * addr, long size)
+{
+ dma_cache_wback_inv((unsigned long)addr, size);
+}
+
+#define MAX_BUFFER_SECTORS 24
+
+
+/*
+ * And on Mips's the CMOS info fails also ...
+ *
+ * FIXME: This information should come from the ARC configuration tree
+ * or wherever a particular machine has stored this ...
+ */
+#define FLOPPY0_TYPE fd_drive_type(0)
+#define FLOPPY1_TYPE fd_drive_type(1)
+
+#define FDC1 fd_getfdaddr1()
+
+#define N_FDC 1 /* do you *really* want a second controller? */
+#define N_DRIVE 8
+
+/*
+ * The DMA channel used by the floppy controller cannot access data at
+ * addresses >= 16MB
+ *
+ * Went back to the 1MB limit, as some people had problems with the floppy
+ * driver otherwise. It doesn't matter much for performance anyway, as most
+ * floppy accesses go through the track buffer.
+ *
+ * On MIPSes using vdma, this actually means that *all* transfers go thru
+ * the * track buffer since 0x1000000 is always smaller than KSEG0/1.
+ * Actually this needs to be a bit more complicated since the so much different
+ * hardware available with MIPS CPUs ...
+ */
+#define CROSS_64KB(a, s) ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)
+
+#define EXTRA_FLOPPY_PARAMS
+
+#include <floppy.h>
+
+#endif /* _ASM_FLOPPY_H */
diff --git a/arch/mips/include/asm/fpregdef.h b/arch/mips/include/asm/fpregdef.h
new file mode 100644
index 000000000..f184ba088
--- /dev/null
+++ b/arch/mips/include/asm/fpregdef.h
@@ -0,0 +1,113 @@
+/*
+ * Definitions for the FPU register names
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1999 Ralf Baechle
+ * Copyright (C) 1985 MIPS Computer Systems, Inc.
+ * Copyright (C) 1990 - 1992, 1999 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_FPREGDEF_H
+#define _ASM_FPREGDEF_H
+
+#include <asm/sgidefs.h>
+
+/*
+ * starting with binutils 2.24.51.20140729, MIPS binutils warn about mixing
+ * hardfloat and softfloat object files. The kernel build uses soft-float by
+ * default, so we also need to pass -msoft-float along to GAS if it supports it.
+ * But this in turn causes assembler errors in files which access hardfloat
+ * registers. We detect if GAS supports "-msoft-float" in the Makefile and
+ * explicitly put ".set hardfloat" where floating point registers are touched.
+ */
+#ifdef GAS_HAS_SET_HARDFLOAT
+#define SET_HARDFLOAT .set hardfloat
+#else
+#define SET_HARDFLOAT
+#endif
+
+#if _MIPS_SIM == _MIPS_SIM_ABI32
+
+/*
+ * These definitions only cover the R3000-ish 16/32 register model.
+ * But we're trying to be R3000 friendly anyway ...
+ */
+#define fv0 $f0 /* return value */
+#define fv0f $f1
+#define fv1 $f2
+#define fv1f $f3
+#define fa0 $f12 /* argument registers */
+#define fa0f $f13
+#define fa1 $f14
+#define fa1f $f15
+#define ft0 $f4 /* caller saved */
+#define ft0f $f5
+#define ft1 $f6
+#define ft1f $f7
+#define ft2 $f8
+#define ft2f $f9
+#define ft3 $f10
+#define ft3f $f11
+#define ft4 $f16
+#define ft4f $f17
+#define ft5 $f18
+#define ft5f $f19
+#define fs0 $f20 /* callee saved */
+#define fs0f $f21
+#define fs1 $f22
+#define fs1f $f23
+#define fs2 $f24
+#define fs2f $f25
+#define fs3 $f26
+#define fs3f $f27
+#define fs4 $f28
+#define fs4f $f29
+#define fs5 $f30
+#define fs5f $f31
+
+#define fcr31 $31 /* FPU status register */
+
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
+
+#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
+
+#define fv0 $f0 /* return value */
+#define fv1 $f2
+#define fa0 $f12 /* argument registers */
+#define fa1 $f13
+#define fa2 $f14
+#define fa3 $f15
+#define fa4 $f16
+#define fa5 $f17
+#define fa6 $f18
+#define fa7 $f19
+#define ft0 $f4 /* caller saved */
+#define ft1 $f5
+#define ft2 $f6
+#define ft3 $f7
+#define ft4 $f8
+#define ft5 $f9
+#define ft6 $f10
+#define ft7 $f11
+#define ft8 $f20
+#define ft9 $f21
+#define ft10 $f22
+#define ft11 $f23
+#define ft12 $f1
+#define ft13 $f3
+#define fs0 $f24 /* callee saved */
+#define fs1 $f25
+#define fs2 $f26
+#define fs3 $f27
+#define fs4 $f28
+#define fs5 $f29
+#define fs6 $f30
+#define fs7 $f31
+
+#define fcr31 $31
+
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
+
+#endif /* _ASM_FPREGDEF_H */
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
new file mode 100644
index 000000000..08f9dd690
--- /dev/null
+++ b/arch/mips/include/asm/fpu.h
@@ -0,0 +1,328 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2002 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ */
+#ifndef _ASM_FPU_H
+#define _ASM_FPU_H
+
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/ptrace.h>
+#include <linux/thread_info.h>
+#include <linux/bitops.h>
+
+#include <asm/mipsregs.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/fpu_emulator.h>
+#include <asm/hazards.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/current.h>
+#include <asm/msa.h>
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+#include <asm/mips_mt.h>
+#endif
+
+/*
+ * This enum specifies a mode in which we want the FPU to operate, for cores
+ * which implement the Status.FR bit. Note that the bottom bit of the value
+ * purposefully matches the desired value of the Status.FR bit.
+ */
+enum fpu_mode {
+ FPU_32BIT = 0, /* FR = 0 */
+ FPU_64BIT, /* FR = 1, FRE = 0 */
+ FPU_AS_IS,
+ FPU_HYBRID, /* FR = 1, FRE = 1 */
+
+#define FPU_FR_MASK 0x1
+};
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+extern void _save_fp(struct task_struct *);
+extern void _restore_fp(struct task_struct *);
+
+#define __disable_fpu() \
+do { \
+ clear_c0_status(ST0_CU1); \
+ disable_fpu_hazard(); \
+} while (0)
+
+static inline int __enable_fpu(enum fpu_mode mode)
+{
+ int fr;
+
+ switch (mode) {
+ case FPU_AS_IS:
+ /* just enable the FPU in its current mode */
+ set_c0_status(ST0_CU1);
+ enable_fpu_hazard();
+ return 0;
+
+ case FPU_HYBRID:
+ if (!cpu_has_fre)
+ return SIGFPE;
+
+ /* set FRE */
+ set_c0_config5(MIPS_CONF5_FRE);
+ goto fr_common;
+
+ case FPU_64BIT:
+#if !(defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
+ defined(CONFIG_CPU_MIPSR6) || defined(CONFIG_64BIT))
+ /* we only have a 32-bit FPU */
+ return SIGFPE;
+#endif
+ fallthrough;
+ case FPU_32BIT:
+ if (cpu_has_fre) {
+ /* clear FRE */
+ clear_c0_config5(MIPS_CONF5_FRE);
+ }
+fr_common:
+ /* set CU1 & change FR appropriately */
+ fr = (int)mode & FPU_FR_MASK;
+ change_c0_status(ST0_CU1 | ST0_FR, ST0_CU1 | (fr ? ST0_FR : 0));
+ enable_fpu_hazard();
+
+ /* check FR has the desired value */
+ if (!!(read_c0_status() & ST0_FR) == !!fr)
+ return 0;
+
+ /* unsupported FR value */
+ __disable_fpu();
+ return SIGFPE;
+
+ default:
+ BUG();
+ }
+
+ return SIGFPE;
+}
+
+#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU)
+
+static inline int __is_fpu_owner(void)
+{
+ return test_thread_flag(TIF_USEDFPU);
+}
+
+static inline int is_fpu_owner(void)
+{
+ return cpu_has_fpu && __is_fpu_owner();
+}
+
+static inline int __own_fpu(void)
+{
+ enum fpu_mode mode;
+ int ret;
+
+ if (test_thread_flag(TIF_HYBRID_FPREGS))
+ mode = FPU_HYBRID;
+ else
+ mode = !test_thread_flag(TIF_32BIT_FPREGS);
+
+ ret = __enable_fpu(mode);
+ if (ret)
+ return ret;
+
+ KSTK_STATUS(current) |= ST0_CU1;
+ if (mode == FPU_64BIT || mode == FPU_HYBRID)
+ KSTK_STATUS(current) |= ST0_FR;
+ else /* mode == FPU_32BIT */
+ KSTK_STATUS(current) &= ~ST0_FR;
+
+ set_thread_flag(TIF_USEDFPU);
+ return 0;
+}
+
+static inline int own_fpu_inatomic(int restore)
+{
+ int ret = 0;
+
+ if (cpu_has_fpu && !__is_fpu_owner()) {
+ ret = __own_fpu();
+ if (restore && !ret)
+ _restore_fp(current);
+ }
+ return ret;
+}
+
+static inline int own_fpu(int restore)
+{
+ int ret;
+
+ preempt_disable();
+ ret = own_fpu_inatomic(restore);
+ preempt_enable();
+ return ret;
+}
+
+static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
+{
+ if (is_msa_enabled()) {
+ if (save) {
+ save_msa(tsk);
+ tsk->thread.fpu.fcr31 =
+ read_32bit_cp1_register(CP1_STATUS);
+ }
+ disable_msa();
+ clear_tsk_thread_flag(tsk, TIF_USEDMSA);
+ __disable_fpu();
+ } else if (is_fpu_owner()) {
+ if (save)
+ _save_fp(tsk);
+ __disable_fpu();
+ } else {
+ /* FPU should not have been left enabled with no owner */
+ WARN(read_c0_status() & ST0_CU1,
+ "Orphaned FPU left enabled");
+ }
+ KSTK_STATUS(tsk) &= ~ST0_CU1;
+ clear_tsk_thread_flag(tsk, TIF_USEDFPU);
+}
+
+static inline void lose_fpu(int save)
+{
+ preempt_disable();
+ lose_fpu_inatomic(save, current);
+ preempt_enable();
+}
+
+/**
+ * init_fp_ctx() - Initialize task FP context
+ * @target: The task whose FP context should be initialized.
+ *
+ * Initializes the FP context of the target task to sane default values if that
+ * target task does not already have valid FP context. Once the context has
+ * been initialized, the task will be marked as having used FP & thus having
+ * valid FP context.
+ *
+ * Returns: true if context is initialized, else false.
+ */
+static inline bool init_fp_ctx(struct task_struct *target)
+{
+ /* If FP has been used then the target already has context */
+ if (tsk_used_math(target))
+ return false;
+
+ /* Begin with data registers set to all 1s... */
+ memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
+
+ /* FCSR has been preset by `mips_set_personality_nan'. */
+
+ /*
+ * Record that the target has "used" math, such that the context
+ * just initialised, and any modifications made by the caller,
+ * aren't discarded.
+ */
+ set_stopped_child_used_math(target);
+
+ return true;
+}
+
+static inline void save_fp(struct task_struct *tsk)
+{
+ if (cpu_has_fpu)
+ _save_fp(tsk);
+}
+
+static inline void restore_fp(struct task_struct *tsk)
+{
+ if (cpu_has_fpu)
+ _restore_fp(tsk);
+}
+
+static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
+{
+ if (tsk == current) {
+ preempt_disable();
+ if (is_fpu_owner())
+ _save_fp(current);
+ preempt_enable();
+ }
+
+ return tsk->thread.fpu.fpr;
+}
+
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+/*
+ * When FP support is disabled we provide only a minimal set of stub functions
+ * to avoid callers needing to care too much about CONFIG_MIPS_FP_SUPPORT.
+ */
+
+static inline int __enable_fpu(enum fpu_mode mode)
+{
+ return SIGILL;
+}
+
+static inline void __disable_fpu(void)
+{
+ /* no-op */
+}
+
+
+static inline int is_fpu_owner(void)
+{
+ return 0;
+}
+
+static inline void clear_fpu_owner(void)
+{
+ /* no-op */
+}
+
+static inline int own_fpu_inatomic(int restore)
+{
+ return SIGILL;
+}
+
+static inline int own_fpu(int restore)
+{
+ return SIGILL;
+}
+
+static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
+{
+ /* no-op */
+}
+
+static inline void lose_fpu(int save)
+{
+ /* no-op */
+}
+
+static inline bool init_fp_ctx(struct task_struct *target)
+{
+ return false;
+}
+
+/*
+ * The following functions should only be called in paths where we know that FP
+ * support is enabled, typically a path where own_fpu() or __enable_fpu() have
+ * returned successfully. When CONFIG_MIPS_FP_SUPPORT=n it is known at compile
+ * time that this should never happen, so calls to these functions should be
+ * optimized away & never actually be emitted.
+ */
+
+extern void save_fp(struct task_struct *tsk)
+ __compiletime_error("save_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
+
+extern void _save_fp(struct task_struct *)
+ __compiletime_error("_save_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
+
+extern void restore_fp(struct task_struct *tsk)
+ __compiletime_error("restore_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
+
+extern void _restore_fp(struct task_struct *)
+ __compiletime_error("_restore_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
+
+extern union fpureg *get_fpu_regs(struct task_struct *tsk)
+ __compiletime_error("get_fpu_regs() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
+
+#endif /* !CONFIG_MIPS_FP_SUPPORT */
+#endif /* _ASM_FPU_H */
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
new file mode 100644
index 000000000..f67759e81
--- /dev/null
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Further private data for which no space exists in mips_fpu_struct.
+ * This should be subsumed into the mips_fpu_struct structure as
+ * defined in processor.h as soon as the absurd wired absolute assembler
+ * offsets become dynamic at compile time.
+ *
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ */
+#ifndef _ASM_FPU_EMULATOR_H
+#define _ASM_FPU_EMULATOR_H
+
+#include <linux/sched.h>
+#include <asm/dsemul.h>
+#include <asm/thread_info.h>
+#include <asm/inst.h>
+#include <asm/local.h>
+#include <asm/processor.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+struct mips_fpu_emulator_stats {
+ unsigned long emulated;
+ unsigned long loads;
+ unsigned long stores;
+ unsigned long branches;
+ unsigned long cp1ops;
+ unsigned long cp1xops;
+ unsigned long errors;
+ unsigned long ieee754_inexact;
+ unsigned long ieee754_underflow;
+ unsigned long ieee754_overflow;
+ unsigned long ieee754_zerodiv;
+ unsigned long ieee754_invalidop;
+ unsigned long ds_emul;
+
+ unsigned long abs_s;
+ unsigned long abs_d;
+ unsigned long add_s;
+ unsigned long add_d;
+ unsigned long bc1eqz;
+ unsigned long bc1nez;
+ unsigned long ceil_w_s;
+ unsigned long ceil_w_d;
+ unsigned long ceil_l_s;
+ unsigned long ceil_l_d;
+ unsigned long class_s;
+ unsigned long class_d;
+ unsigned long cmp_af_s;
+ unsigned long cmp_af_d;
+ unsigned long cmp_eq_s;
+ unsigned long cmp_eq_d;
+ unsigned long cmp_le_s;
+ unsigned long cmp_le_d;
+ unsigned long cmp_lt_s;
+ unsigned long cmp_lt_d;
+ unsigned long cmp_ne_s;
+ unsigned long cmp_ne_d;
+ unsigned long cmp_or_s;
+ unsigned long cmp_or_d;
+ unsigned long cmp_ueq_s;
+ unsigned long cmp_ueq_d;
+ unsigned long cmp_ule_s;
+ unsigned long cmp_ule_d;
+ unsigned long cmp_ult_s;
+ unsigned long cmp_ult_d;
+ unsigned long cmp_un_s;
+ unsigned long cmp_un_d;
+ unsigned long cmp_une_s;
+ unsigned long cmp_une_d;
+ unsigned long cmp_saf_s;
+ unsigned long cmp_saf_d;
+ unsigned long cmp_seq_s;
+ unsigned long cmp_seq_d;
+ unsigned long cmp_sle_s;
+ unsigned long cmp_sle_d;
+ unsigned long cmp_slt_s;
+ unsigned long cmp_slt_d;
+ unsigned long cmp_sne_s;
+ unsigned long cmp_sne_d;
+ unsigned long cmp_sor_s;
+ unsigned long cmp_sor_d;
+ unsigned long cmp_sueq_s;
+ unsigned long cmp_sueq_d;
+ unsigned long cmp_sule_s;
+ unsigned long cmp_sule_d;
+ unsigned long cmp_sult_s;
+ unsigned long cmp_sult_d;
+ unsigned long cmp_sun_s;
+ unsigned long cmp_sun_d;
+ unsigned long cmp_sune_s;
+ unsigned long cmp_sune_d;
+ unsigned long cvt_d_l;
+ unsigned long cvt_d_s;
+ unsigned long cvt_d_w;
+ unsigned long cvt_l_s;
+ unsigned long cvt_l_d;
+ unsigned long cvt_s_d;
+ unsigned long cvt_s_l;
+ unsigned long cvt_s_w;
+ unsigned long cvt_w_s;
+ unsigned long cvt_w_d;
+ unsigned long div_s;
+ unsigned long div_d;
+ unsigned long floor_w_s;
+ unsigned long floor_w_d;
+ unsigned long floor_l_s;
+ unsigned long floor_l_d;
+ unsigned long maddf_s;
+ unsigned long maddf_d;
+ unsigned long max_s;
+ unsigned long max_d;
+ unsigned long maxa_s;
+ unsigned long maxa_d;
+ unsigned long min_s;
+ unsigned long min_d;
+ unsigned long mina_s;
+ unsigned long mina_d;
+ unsigned long mov_s;
+ unsigned long mov_d;
+ unsigned long msubf_s;
+ unsigned long msubf_d;
+ unsigned long mul_s;
+ unsigned long mul_d;
+ unsigned long neg_s;
+ unsigned long neg_d;
+ unsigned long recip_s;
+ unsigned long recip_d;
+ unsigned long rint_s;
+ unsigned long rint_d;
+ unsigned long round_w_s;
+ unsigned long round_w_d;
+ unsigned long round_l_s;
+ unsigned long round_l_d;
+ unsigned long rsqrt_s;
+ unsigned long rsqrt_d;
+ unsigned long sel_s;
+ unsigned long sel_d;
+ unsigned long seleqz_s;
+ unsigned long seleqz_d;
+ unsigned long selnez_s;
+ unsigned long selnez_d;
+ unsigned long sqrt_s;
+ unsigned long sqrt_d;
+ unsigned long sub_s;
+ unsigned long sub_d;
+ unsigned long trunc_w_s;
+ unsigned long trunc_w_d;
+ unsigned long trunc_l_s;
+ unsigned long trunc_l_d;
+};
+
+DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
+
+#define MIPS_FPU_EMU_INC_STATS(M) \
+do { \
+ preempt_disable(); \
+ __this_cpu_inc(fpuemustats.M); \
+ preempt_enable(); \
+} while (0)
+
+#else
+#define MIPS_FPU_EMU_INC_STATS(M) do { } while (0)
+#endif /* CONFIG_DEBUG_FS */
+
+extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
+ struct mips_fpu_struct *ctx, int has_fpu,
+ void __user **fault_addr);
+void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+ struct task_struct *tsk);
+int process_fpemu_return(int sig, void __user *fault_addr,
+ unsigned long fcr31);
+
+/*
+ * Mask the FCSR Cause bits according to the Enable bits, observing
+ * that Unimplemented is always enabled.
+ */
+static inline unsigned long mask_fcr31_x(unsigned long fcr31)
+{
+ return fcr31 & (FPU_CSR_UNI_X |
+ ((fcr31 & FPU_CSR_ALL_E) <<
+ (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E))));
+}
+
+#endif /* _ASM_FPU_EMULATOR_H */
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
new file mode 100644
index 000000000..b463f2aa5
--- /dev/null
+++ b/arch/mips/include/asm/ftrace.h
@@ -0,0 +1,90 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive for
+ * more details.
+ *
+ * Copyright (C) 2009 DSLab, Lanzhou University, China
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#ifndef _ASM_MIPS_FTRACE_H
+#define _ASM_MIPS_FTRACE_H
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define MCOUNT_ADDR ((unsigned long)(_mcount))
+#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
+
+#ifndef __ASSEMBLY__
+extern void _mcount(void);
+#define mcount _mcount
+
+#define safe_load(load, src, dst, error) \
+do { \
+ asm volatile ( \
+ "1: " load " %[tmp_dst], 0(%[tmp_src])\n" \
+ " li %[tmp_err], 0\n" \
+ "2: .insn\n" \
+ \
+ ".section .fixup, \"ax\"\n" \
+ "3: li %[tmp_err], 1\n" \
+ " j 2b\n" \
+ ".previous\n" \
+ \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR) "\t1b, 3b\n\t" \
+ ".previous\n" \
+ \
+ : [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\
+ : [tmp_src] "r" (src) \
+ : "memory" \
+ ); \
+} while (0)
+
+#define safe_store(store, src, dst, error) \
+do { \
+ asm volatile ( \
+ "1: " store " %[tmp_src], 0(%[tmp_dst])\n"\
+ " li %[tmp_err], 0\n" \
+ "2: .insn\n" \
+ \
+ ".section .fixup, \"ax\"\n" \
+ "3: li %[tmp_err], 1\n" \
+ " j 2b\n" \
+ ".previous\n" \
+ \
+ ".section\t__ex_table,\"a\"\n\t"\
+ STR(PTR) "\t1b, 3b\n\t" \
+ ".previous\n" \
+ \
+ : [tmp_err] "=r" (error) \
+ : [tmp_dst] "r" (dst), [tmp_src] "r" (src)\
+ : "memory" \
+ ); \
+} while (0)
+
+#define safe_load_code(dst, src, error) \
+ safe_load(STR(lw), src, dst, error)
+#define safe_store_code(src, dst, error) \
+ safe_store(STR(sw), src, dst, error)
+
+#define safe_load_stack(dst, src, error) \
+ safe_load(STR(PTR_L), src, dst, error)
+
+#define safe_store_stack(src, dst, error) \
+ safe_store(STR(PTR_S), src, dst, error)
+
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr;
+}
+
+struct dyn_arch_ftrace {
+};
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_FUNCTION_TRACER */
+#endif /* _ASM_MIPS_FTRACE_H */
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
new file mode 100644
index 000000000..d85248404
--- /dev/null
+++ b/arch/mips/include/asm/futex.h
@@ -0,0 +1,204 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2006 Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/asm-eva.h>
+#include <asm/barrier.h>
+#include <asm/compiler.h>
+#include <asm/errno.h>
+#include <asm/sync.h>
+#include <asm/war.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+{ \
+ if (cpu_has_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set push \n" \
+ " .set arch=r4000 \n" \
+ "1: ll %1, %4 # __futex_atomic_op \n" \
+ " .set pop \n" \
+ " " insn " \n" \
+ " .set arch=r4000 \n" \
+ "2: sc $1, %2 \n" \
+ " beqzl $1, 1b \n" \
+ __stringify(__WEAK_LLSC_MB) " \n" \
+ "3: \n" \
+ " .insn \n" \
+ " .set pop \n" \
+ " .section .fixup,\"ax\" \n" \
+ "4: li %0, %6 \n" \
+ " j 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "__UA_ADDR "\t1b, 4b \n" \
+ " "__UA_ADDR "\t2b, 4b \n" \
+ " .previous \n" \
+ : "=r" (ret), "=&r" (oldval), \
+ "=" GCC_OFF_SMALL_ASM() (*uaddr) \
+ : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \
+ "i" (-EFAULT) \
+ : "memory"); \
+ } else if (cpu_has_llsc) { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set push \n" \
+ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
+ " .set pop \n" \
+ " " insn " \n" \
+ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
+ "2: "user_sc("$1", "%2")" \n" \
+ " beqz $1, 1b \n" \
+ __stringify(__WEAK_LLSC_MB) " \n" \
+ "3: \n" \
+ " .insn \n" \
+ " .set pop \n" \
+ " .section .fixup,\"ax\" \n" \
+ "4: li %0, %6 \n" \
+ " j 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "__UA_ADDR "\t1b, 4b \n" \
+ " "__UA_ADDR "\t2b, 4b \n" \
+ " .previous \n" \
+ : "=r" (ret), "=&r" (oldval), \
+ "=" GCC_OFF_SMALL_ASM() (*uaddr) \
+ : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \
+ "i" (-EFAULT) \
+ : "memory"); \
+ } else \
+ ret = -ENOSYS; \
+}
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval = 0, ret;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg);
+ break;
+
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("addu $1, %1, %z5",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or $1, %1, %z5",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("and $1, %1, %z5",
+ ret, oldval, uaddr, ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor $1, %1, %z5",
+ ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+ u32 val;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ if (cpu_has_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) {
+ __asm__ __volatile__(
+ "# futex_atomic_cmpxchg_inatomic \n"
+ " .set push \n"
+ " .set noat \n"
+ " .set push \n"
+ " .set arch=r4000 \n"
+ "1: ll %1, %3 \n"
+ " bne %1, %z4, 3f \n"
+ " .set pop \n"
+ " move $1, %z5 \n"
+ " .set arch=r4000 \n"
+ "2: sc $1, %2 \n"
+ " beqzl $1, 1b \n"
+ __stringify(__WEAK_LLSC_MB) " \n"
+ "3: \n"
+ " .insn \n"
+ " .set pop \n"
+ " .section .fixup,\"ax\" \n"
+ "4: li %0, %6 \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " "__UA_ADDR "\t1b, 4b \n"
+ " "__UA_ADDR "\t2b, 4b \n"
+ " .previous \n"
+ : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
+ : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+ "i" (-EFAULT)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ __asm__ __volatile__(
+ "# futex_atomic_cmpxchg_inatomic \n"
+ " .set push \n"
+ " .set noat \n"
+ " .set push \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
+ " " __SYNC(full, loongson3_war) " \n"
+ "1: "user_ll("%1", "%3")" \n"
+ " bne %1, %z4, 3f \n"
+ " .set pop \n"
+ " move $1, %z5 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
+ "2: "user_sc("$1", "%2")" \n"
+ " beqz $1, 1b \n"
+ "3: " __SYNC_ELSE(full, loongson3_war, __WEAK_LLSC_MB) "\n"
+ " .insn \n"
+ " .set pop \n"
+ " .section .fixup,\"ax\" \n"
+ "4: li %0, %6 \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " "__UA_ADDR "\t1b, 4b \n"
+ " "__UA_ADDR "\t2b, 4b \n"
+ " .previous \n"
+ : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
+ : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+ "i" (-EFAULT)
+ : "memory");
+ } else
+ return -ENOSYS;
+
+ *uval = val;
+ return ret;
+}
+
+#endif
+#endif /* _ASM_FUTEX_H */
diff --git a/arch/mips/include/asm/fw/arc/hinv.h b/arch/mips/include/asm/fw/arc/hinv.h
new file mode 100644
index 000000000..d67b6a90f
--- /dev/null
+++ b/arch/mips/include/asm/fw/arc/hinv.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ARCS hardware/memory inventory/configuration and system ID definitions.
+ */
+#ifndef _ASM_ARC_HINV_H
+#define _ASM_ARC_HINV_H
+
+#include <asm/sgidefs.h>
+#include <asm/fw/arc/types.h>
+
+/* configuration query defines */
+typedef enum configclass {
+ SystemClass,
+ ProcessorClass,
+ CacheClass,
+#ifndef _NT_PROM
+ MemoryClass,
+ AdapterClass,
+ ControllerClass,
+ PeripheralClass
+#else /* _NT_PROM */
+ AdapterClass,
+ ControllerClass,
+ PeripheralClass,
+ MemoryClass
+#endif /* _NT_PROM */
+} CONFIGCLASS;
+
+typedef enum configtype {
+ ARC,
+ CPU,
+ FPU,
+ PrimaryICache,
+ PrimaryDCache,
+ SecondaryICache,
+ SecondaryDCache,
+ SecondaryCache,
+#ifndef _NT_PROM
+ Memory,
+#endif
+ EISAAdapter,
+ TCAdapter,
+ SCSIAdapter,
+ DTIAdapter,
+ MultiFunctionAdapter,
+ DiskController,
+ TapeController,
+ CDROMController,
+ WORMController,
+ SerialController,
+ NetworkController,
+ DisplayController,
+ ParallelController,
+ PointerController,
+ KeyboardController,
+ AudioController,
+ OtherController,
+ DiskPeripheral,
+ FloppyDiskPeripheral,
+ TapePeripheral,
+ ModemPeripheral,
+ MonitorPeripheral,
+ PrinterPeripheral,
+ PointerPeripheral,
+ KeyboardPeripheral,
+ TerminalPeripheral,
+ LinePeripheral,
+ NetworkPeripheral,
+#ifdef _NT_PROM
+ Memory,
+#endif
+ OtherPeripheral,
+
+ /* new stuff for IP30 */
+ /* added without moving anything */
+ /* except ANONYMOUS. */
+
+ XTalkAdapter,
+ PCIAdapter,
+ GIOAdapter,
+ TPUAdapter,
+
+ Anonymous
+} CONFIGTYPE;
+
+typedef enum {
+ Failed = 1,
+ ReadOnly = 2,
+ Removable = 4,
+ ConsoleIn = 8,
+ ConsoleOut = 16,
+ Input = 32,
+ Output = 64
+} IDENTIFIERFLAG;
+
+#ifndef NULL /* for GetChild(NULL); */
+#define NULL 0
+#endif
+
+union key_u {
+ struct {
+#ifdef _MIPSEB
+ unsigned char c_bsize; /* block size in lines */
+ unsigned char c_lsize; /* line size in bytes/tag */
+ unsigned short c_size; /* cache size in 4K pages */
+#else /* _MIPSEL */
+ unsigned short c_size; /* cache size in 4K pages */
+ unsigned char c_lsize; /* line size in bytes/tag */
+ unsigned char c_bsize; /* block size in lines */
+#endif /* _MIPSEL */
+ } cache;
+ ULONG FullKey;
+};
+
+#if _MIPS_SIM == _MIPS_SIM_ABI64
+#define SGI_ARCS_VERS 64 /* sgi 64-bit version */
+#define SGI_ARCS_REV 0 /* rev .00 */
+#else
+#define SGI_ARCS_VERS 1 /* first version */
+#define SGI_ARCS_REV 10 /* rev .10, 3/04/92 */
+#endif
+
+typedef struct {
+ CONFIGCLASS Class;
+ CONFIGTYPE Type;
+ IDENTIFIERFLAG Flags;
+ USHORT Version;
+ USHORT Revision;
+ ULONG Key;
+ ULONG AffinityMask;
+ ULONG ConfigurationDataSize;
+ ULONG IdentifierLength;
+ char *Identifier;
+} COMPONENT;
+
+/* internal structure that holds pathname parsing data */
+struct cfgdata {
+ char *name; /* full name */
+ int minlen; /* minimum length to match */
+ CONFIGTYPE type; /* type of token */
+};
+
+/* System ID */
+typedef struct {
+ CHAR VendorId[8];
+ CHAR ProductId[8];
+} SYSTEMID;
+
+/* memory query functions */
+typedef enum memorytype {
+ ExceptionBlock,
+ SPBPage, /* ARCS == SystemParameterBlock */
+#ifndef _NT_PROM
+ FreeContiguous,
+ FreeMemory,
+ BadMemory,
+ LoadedProgram,
+ FirmwareTemporary,
+ FirmwarePermanent
+#else /* _NT_PROM */
+ FreeMemory,
+ BadMemory,
+ LoadedProgram,
+ FirmwareTemporary,
+ FirmwarePermanent,
+ FreeContiguous
+#endif /* _NT_PROM */
+} MEMORYTYPE;
+
+typedef struct {
+ MEMORYTYPE Type;
+ LONG BasePage;
+ LONG PageCount;
+} MEMORYDESCRIPTOR;
+
+#endif /* _ASM_ARC_HINV_H */
diff --git a/arch/mips/include/asm/fw/arc/types.h b/arch/mips/include/asm/fw/arc/types.h
new file mode 100644
index 000000000..ad1638061
--- /dev/null
+++ b/arch/mips/include/asm/fw/arc/types.h
@@ -0,0 +1,86 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright 1999 Ralf Baechle (ralf@gnu.org)
+ * Copyright 1999 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_ARC_TYPES_H
+#define _ASM_ARC_TYPES_H
+
+
+#ifdef CONFIG_FW_ARC32
+
+typedef char CHAR;
+typedef short SHORT;
+typedef long LARGE_INTEGER __attribute__ ((__mode__ (__DI__)));
+typedef long LONG __attribute__ ((__mode__ (__SI__)));
+typedef unsigned char UCHAR;
+typedef unsigned short USHORT;
+typedef unsigned long ULONG __attribute__ ((__mode__ (__SI__)));
+typedef void VOID;
+
+/* The pointer types. Note that we're using a 64-bit compiler but all
+ pointer in the ARC structures are only 32-bit, so we need some disgusting
+ workarounds. Keep your vomit bag handy. */
+typedef LONG _PCHAR;
+typedef LONG _PSHORT;
+typedef LONG _PLARGE_INTEGER;
+typedef LONG _PLONG;
+typedef LONG _PUCHAR;
+typedef LONG _PUSHORT;
+typedef LONG _PULONG;
+typedef LONG _PVOID;
+
+#endif /* CONFIG_FW_ARC32 */
+
+#ifdef CONFIG_FW_ARC64
+
+typedef char CHAR;
+typedef short SHORT;
+typedef long LARGE_INTEGER __attribute__ ((__mode__ (__DI__)));
+typedef long LONG __attribute__ ((__mode__ (__DI__)));
+typedef unsigned char UCHAR;
+typedef unsigned short USHORT;
+typedef unsigned long ULONG __attribute__ ((__mode__ (__DI__)));
+typedef void VOID;
+
+/* The pointer types. We're 64-bit and the firmware is also 64-bit, so
+ live is sane ... */
+typedef CHAR *_PCHAR;
+typedef SHORT *_PSHORT;
+typedef LARGE_INTEGER *_PLARGE_INTEGER;
+typedef LONG *_PLONG;
+typedef UCHAR *_PUCHAR;
+typedef USHORT *_PUSHORT;
+typedef ULONG *_PULONG;
+typedef VOID *_PVOID;
+
+#endif /* CONFIG_FW_ARC64 */
+
+typedef CHAR *PCHAR;
+typedef SHORT *PSHORT;
+typedef LARGE_INTEGER *PLARGE_INTEGER;
+typedef LONG *PLONG;
+typedef UCHAR *PUCHAR;
+typedef USHORT *PUSHORT;
+typedef ULONG *PULONG;
+typedef VOID *PVOID;
+
+/*
+ * Return type of ArcGetDisplayStatus()
+ */
+typedef struct {
+ USHORT CursorXPosition;
+ USHORT CursorYPosition;
+ USHORT CursorMaxXPosition;
+ USHORT CursorMaxYPosition;
+ USHORT ForegroundColor;
+ USHORT BackgroundColor;
+ UCHAR HighIntensity;
+ UCHAR Underscored;
+ UCHAR ReverseVideo;
+} DISPLAY_STATUS;
+
+#endif /* _ASM_ARC_TYPES_H */
diff --git a/arch/mips/include/asm/fw/cfe/cfe_api.h b/arch/mips/include/asm/fw/cfe/cfe_api.h
new file mode 100644
index 000000000..6457f3689
--- /dev/null
+++ b/arch/mips/include/asm/fw/cfe/cfe_api.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2000, 2001, 2002 Broadcom Corporation
+ */
+/*
+ * Broadcom Common Firmware Environment (CFE)
+ *
+ * This file contains declarations for doing callbacks to
+ * cfe from an application. It should be the only header
+ * needed by the application to use this library
+ *
+ * Authors: Mitch Lichtenberg, Chris Demetriou
+ */
+#ifndef CFE_API_H
+#define CFE_API_H
+
+#include <linux/types.h>
+#include <linux/string.h>
+
+typedef long intptr_t;
+
+
+/*
+ * Constants
+ */
+
+/* Seal indicating CFE's presence, passed to user program. */
+#define CFE_EPTSEAL 0x43464531
+
+#define CFE_MI_RESERVED 0 /* memory is reserved, do not use */
+#define CFE_MI_AVAILABLE 1 /* memory is available */
+
+#define CFE_FLG_WARMSTART 0x00000001
+#define CFE_FLG_FULL_ARENA 0x00000001
+#define CFE_FLG_ENV_PERMANENT 0x00000001
+
+#define CFE_CPU_CMD_START 1
+#define CFE_CPU_CMD_STOP 0
+
+#define CFE_STDHANDLE_CONSOLE 0
+
+#define CFE_DEV_NETWORK 1
+#define CFE_DEV_DISK 2
+#define CFE_DEV_FLASH 3
+#define CFE_DEV_SERIAL 4
+#define CFE_DEV_CPU 5
+#define CFE_DEV_NVRAM 6
+#define CFE_DEV_CLOCK 7
+#define CFE_DEV_OTHER 8
+#define CFE_DEV_MASK 0x0F
+
+#define CFE_CACHE_FLUSH_D 1
+#define CFE_CACHE_INVAL_I 2
+#define CFE_CACHE_INVAL_D 4
+#define CFE_CACHE_INVAL_L2 8
+
+#define CFE_FWI_64BIT 0x00000001
+#define CFE_FWI_32BIT 0x00000002
+#define CFE_FWI_RELOC 0x00000004
+#define CFE_FWI_UNCACHED 0x00000008
+#define CFE_FWI_MULTICPU 0x00000010
+#define CFE_FWI_FUNCSIM 0x00000020
+#define CFE_FWI_RTLSIM 0x00000040
+
+typedef struct {
+ int64_t fwi_version; /* major, minor, eco version */
+ int64_t fwi_totalmem; /* total installed mem */
+ int64_t fwi_flags; /* various flags */
+ int64_t fwi_boardid; /* board ID */
+ int64_t fwi_bootarea_va; /* VA of boot area */
+ int64_t fwi_bootarea_pa; /* PA of boot area */
+ int64_t fwi_bootarea_size; /* size of boot area */
+} cfe_fwinfo_t;
+
+
+/*
+ * Defines and prototypes for functions which take no arguments.
+ */
+int64_t cfe_getticks(void);
+
+/*
+ * Defines and prototypes for the rest of the functions.
+ */
+int cfe_close(int handle);
+int cfe_cpu_start(int cpu, void (*fn) (void), long sp, long gp, long a1);
+int cfe_cpu_stop(int cpu);
+int cfe_enumenv(int idx, char *name, int namelen, char *val, int vallen);
+int cfe_enummem(int idx, int flags, uint64_t * start, uint64_t * length,
+ uint64_t * type);
+int cfe_exit(int warm, int status);
+int cfe_flushcache(int flg);
+int cfe_getdevinfo(char *name);
+int cfe_getenv(char *name, char *dest, int destlen);
+int cfe_getfwinfo(cfe_fwinfo_t * info);
+int cfe_getstdhandle(int flg);
+int cfe_init(uint64_t handle, uint64_t ept);
+int cfe_inpstat(int handle);
+int cfe_ioctl(int handle, unsigned int ioctlnum, unsigned char *buffer,
+ int length, int *retlen, uint64_t offset);
+int cfe_open(char *name);
+int cfe_read(int handle, unsigned char *buffer, int length);
+int cfe_readblk(int handle, int64_t offset, unsigned char *buffer,
+ int length);
+int cfe_setenv(char *name, char *val);
+int cfe_write(int handle, const char *buffer, int length);
+int cfe_writeblk(int handle, int64_t offset, const char *buffer,
+ int length);
+
+#endif /* CFE_API_H */
diff --git a/arch/mips/include/asm/fw/cfe/cfe_error.h b/arch/mips/include/asm/fw/cfe/cfe_error.h
new file mode 100644
index 000000000..2f04a39fd
--- /dev/null
+++ b/arch/mips/include/asm/fw/cfe/cfe_error.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2000, 2001, 2002 Broadcom Corporation
+ */
+
+/*
+ * Broadcom Common Firmware Environment (CFE)
+ *
+ * CFE's global error code list is here.
+ *
+ * Author: Mitch Lichtenberg
+ */
+
+#define CFE_OK 0
+#define CFE_ERR -1 /* generic error */
+#define CFE_ERR_INV_COMMAND -2
+#define CFE_ERR_EOF -3
+#define CFE_ERR_IOERR -4
+#define CFE_ERR_NOMEM -5
+#define CFE_ERR_DEVNOTFOUND -6
+#define CFE_ERR_DEVOPEN -7
+#define CFE_ERR_INV_PARAM -8
+#define CFE_ERR_ENVNOTFOUND -9
+#define CFE_ERR_ENVREADONLY -10
+
+#define CFE_ERR_NOTELF -11
+#define CFE_ERR_NOT32BIT -12
+#define CFE_ERR_WRONGENDIAN -13
+#define CFE_ERR_BADELFVERS -14
+#define CFE_ERR_NOTMIPS -15
+#define CFE_ERR_BADELFFMT -16
+#define CFE_ERR_BADADDR -17
+
+#define CFE_ERR_FILENOTFOUND -18
+#define CFE_ERR_UNSUPPORTED -19
+
+#define CFE_ERR_HOSTUNKNOWN -20
+
+#define CFE_ERR_TIMEOUT -21
+
+#define CFE_ERR_PROTOCOLERR -22
+
+#define CFE_ERR_NETDOWN -23
+#define CFE_ERR_NONAMESERVER -24
+
+#define CFE_ERR_NOHANDLES -25
+#define CFE_ERR_ALREADYBOUND -26
+
+#define CFE_ERR_CANNOTSET -27
+#define CFE_ERR_NOMORE -28
+#define CFE_ERR_BADFILESYS -29
+#define CFE_ERR_FSNOTAVAIL -30
+
+#define CFE_ERR_INVBOOTBLOCK -31
+#define CFE_ERR_WRONGDEVTYPE -32
+#define CFE_ERR_BBCHECKSUM -33
+#define CFE_ERR_BOOTPROGCHKSUM -34
+
+#define CFE_ERR_LDRNOTAVAIL -35
+
+#define CFE_ERR_NOTREADY -36
+
+#define CFE_ERR_GETMEM -37
+#define CFE_ERR_SETMEM -38
+
+#define CFE_ERR_NOTCONN -39
+#define CFE_ERR_ADDRINUSE -40
diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h
new file mode 100644
index 000000000..d0494ce4b
--- /dev/null
+++ b/arch/mips/include/asm/fw/fw.h
@@ -0,0 +1,31 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ */
+#ifndef __ASM_FW_H_
+#define __ASM_FW_H_
+
+#include <asm/bootinfo.h> /* For cleaner code... */
+
+extern int fw_argc;
+extern int *_fw_argv;
+extern int *_fw_envp;
+
+/*
+ * Most firmware like YAMON, PMON, etc. pass arguments and environment
+ * variables as 32-bit pointers. These take care of sign extension.
+ */
+#define fw_argv(index) ((char *)(long)_fw_argv[(index)])
+#define fw_envp(index) ((char *)(long)_fw_envp[(index)])
+
+extern void fw_init_cmdline(void);
+extern char *fw_getcmdline(void);
+extern void fw_meminit(void);
+extern char *fw_getenv(char *name);
+extern unsigned long fw_getenvl(char *name);
+extern void fw_init_early_console(void);
+
+#endif /* __ASM_FW_H_ */
diff --git a/arch/mips/include/asm/ginvt.h b/arch/mips/include/asm/ginvt.h
new file mode 100644
index 000000000..6eb7c2b94
--- /dev/null
+++ b/arch/mips/include/asm/ginvt.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __MIPS_ASM_GINVT_H__
+#define __MIPS_ASM_GINVT_H__
+
+#include <asm/mipsregs.h>
+
+enum ginvt_type {
+ GINVT_FULL,
+ GINVT_VA,
+ GINVT_MMID,
+};
+
+#ifdef TOOLCHAIN_SUPPORTS_GINV
+# define _ASM_SET_GINV ".set ginv\n"
+#else
+_ASM_MACRO_1R1I(ginvt, rs, type,
+ _ASM_INSN_IF_MIPS(0x7c0000bd | (__rs << 21) | (\\type << 8))
+ _ASM_INSN32_IF_MM(0x0000717c | (__rs << 16) | (\\type << 9)));
+# define _ASM_SET_GINV
+#endif
+
+static __always_inline void ginvt(unsigned long addr, enum ginvt_type type)
+{
+ asm volatile(
+ ".set push\n"
+ _ASM_SET_GINV
+ " ginvt %0, %1\n"
+ ".set pop"
+ : /* no outputs */
+ : "r"(addr), "i"(type)
+ : "memory");
+}
+
+static inline void ginvt_full(void)
+{
+ ginvt(0, GINVT_FULL);
+}
+
+static inline void ginvt_va(unsigned long addr)
+{
+ addr &= PAGE_MASK << 1;
+ ginvt(addr, GINVT_VA);
+}
+
+static inline void ginvt_mmid(void)
+{
+ ginvt(0, GINVT_MMID);
+}
+
+static inline void ginvt_va_mmid(unsigned long addr)
+{
+ addr &= PAGE_MASK << 1;
+ ginvt(addr, GINVT_VA | GINVT_MMID);
+}
+
+#endif /* __MIPS_ASM_GINVT_H__ */
diff --git a/arch/mips/include/asm/gio_device.h b/arch/mips/include/asm/gio_device.h
new file mode 100644
index 000000000..159087f53
--- /dev/null
+++ b/arch/mips/include/asm/gio_device.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct gio_device_id {
+ __u8 id;
+};
+
+struct gio_device {
+ struct device dev;
+ struct resource resource;
+ unsigned int irq;
+ unsigned int slotno;
+
+ const char *name;
+ struct gio_device_id id;
+ unsigned id32:1;
+ unsigned gio64:1;
+};
+#define to_gio_device(d) container_of(d, struct gio_device, dev)
+
+struct gio_driver {
+ const char *name;
+ struct module *owner;
+ const struct gio_device_id *id_table;
+
+ int (*probe)(struct gio_device *, const struct gio_device_id *);
+ void (*remove)(struct gio_device *);
+ void (*shutdown)(struct gio_device *);
+
+ struct device_driver driver;
+};
+#define to_gio_driver(drv) container_of(drv, struct gio_driver, driver)
+
+extern struct gio_device *gio_dev_get(struct gio_device *);
+extern void gio_dev_put(struct gio_device *);
+
+extern int gio_device_register(struct gio_device *);
+extern void gio_device_unregister(struct gio_device *);
+extern void gio_release_dev(struct device *);
+
+static inline void gio_device_free(struct gio_device *dev)
+{
+ gio_release_dev(&dev->dev);
+}
+
+extern int gio_register_driver(struct gio_driver *);
+extern void gio_unregister_driver(struct gio_driver *);
+
+#define gio_get_drvdata(_dev) dev_get_drvdata(&(_dev)->dev)
+#define gio_set_drvdata(_dev, data) dev_set_drvdata(&(_dev)->dev, (data))
+
+extern void gio_set_master(struct gio_device *);
diff --git a/arch/mips/include/asm/gt64120.h b/arch/mips/include/asm/gt64120.h
new file mode 100644
index 000000000..5d68d7265
--- /dev/null
+++ b/arch/mips/include/asm/gt64120.h
@@ -0,0 +1,566 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2000, 2004, 2005 MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ * Maciej W. Rozycki <macro@mips.com>
+ * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef _ASM_GT64120_H
+#define _ASM_GT64120_H
+
+#include <asm/addrspace.h>
+#include <asm/byteorder.h>
+
+#define MSK(n) ((1 << (n)) - 1)
+
+/*
+ * Register offset addresses
+ */
+/* CPU Configuration. */
+#define GT_CPU_OFS 0x000
+
+#define GT_MULTI_OFS 0x120
+
+/* CPU Address Decode. */
+#define GT_SCS10LD_OFS 0x008
+#define GT_SCS10HD_OFS 0x010
+#define GT_SCS32LD_OFS 0x018
+#define GT_SCS32HD_OFS 0x020
+#define GT_CS20LD_OFS 0x028
+#define GT_CS20HD_OFS 0x030
+#define GT_CS3BOOTLD_OFS 0x038
+#define GT_CS3BOOTHD_OFS 0x040
+#define GT_PCI0IOLD_OFS 0x048
+#define GT_PCI0IOHD_OFS 0x050
+#define GT_PCI0M0LD_OFS 0x058
+#define GT_PCI0M0HD_OFS 0x060
+#define GT_ISD_OFS 0x068
+
+#define GT_PCI0M1LD_OFS 0x080
+#define GT_PCI0M1HD_OFS 0x088
+#define GT_PCI1IOLD_OFS 0x090
+#define GT_PCI1IOHD_OFS 0x098
+#define GT_PCI1M0LD_OFS 0x0a0
+#define GT_PCI1M0HD_OFS 0x0a8
+#define GT_PCI1M1LD_OFS 0x0b0
+#define GT_PCI1M1HD_OFS 0x0b8
+#define GT_PCI1M1LD_OFS 0x0b0
+#define GT_PCI1M1HD_OFS 0x0b8
+
+#define GT_SCS10AR_OFS 0x0d0
+#define GT_SCS32AR_OFS 0x0d8
+#define GT_CS20R_OFS 0x0e0
+#define GT_CS3BOOTR_OFS 0x0e8
+
+#define GT_PCI0IOREMAP_OFS 0x0f0
+#define GT_PCI0M0REMAP_OFS 0x0f8
+#define GT_PCI0M1REMAP_OFS 0x100
+#define GT_PCI1IOREMAP_OFS 0x108
+#define GT_PCI1M0REMAP_OFS 0x110
+#define GT_PCI1M1REMAP_OFS 0x118
+
+/* CPU Error Report. */
+#define GT_CPUERR_ADDRLO_OFS 0x070
+#define GT_CPUERR_ADDRHI_OFS 0x078
+
+#define GT_CPUERR_DATALO_OFS 0x128 /* GT-64120A only */
+#define GT_CPUERR_DATAHI_OFS 0x130 /* GT-64120A only */
+#define GT_CPUERR_PARITY_OFS 0x138 /* GT-64120A only */
+
+/* CPU Sync Barrier. */
+#define GT_PCI0SYNC_OFS 0x0c0
+#define GT_PCI1SYNC_OFS 0x0c8
+
+/* SDRAM and Device Address Decode. */
+#define GT_SCS0LD_OFS 0x400
+#define GT_SCS0HD_OFS 0x404
+#define GT_SCS1LD_OFS 0x408
+#define GT_SCS1HD_OFS 0x40c
+#define GT_SCS2LD_OFS 0x410
+#define GT_SCS2HD_OFS 0x414
+#define GT_SCS3LD_OFS 0x418
+#define GT_SCS3HD_OFS 0x41c
+#define GT_CS0LD_OFS 0x420
+#define GT_CS0HD_OFS 0x424
+#define GT_CS1LD_OFS 0x428
+#define GT_CS1HD_OFS 0x42c
+#define GT_CS2LD_OFS 0x430
+#define GT_CS2HD_OFS 0x434
+#define GT_CS3LD_OFS 0x438
+#define GT_CS3HD_OFS 0x43c
+#define GT_BOOTLD_OFS 0x440
+#define GT_BOOTHD_OFS 0x444
+
+#define GT_ADERR_OFS 0x470
+
+/* SDRAM Configuration. */
+#define GT_SDRAM_CFG_OFS 0x448
+
+#define GT_SDRAM_OPMODE_OFS 0x474
+#define GT_SDRAM_BM_OFS 0x478
+#define GT_SDRAM_ADDRDECODE_OFS 0x47c
+
+/* SDRAM Parameters. */
+#define GT_SDRAM_B0_OFS 0x44c
+#define GT_SDRAM_B1_OFS 0x450
+#define GT_SDRAM_B2_OFS 0x454
+#define GT_SDRAM_B3_OFS 0x458
+
+/* Device Parameters. */
+#define GT_DEV_B0_OFS 0x45c
+#define GT_DEV_B1_OFS 0x460
+#define GT_DEV_B2_OFS 0x464
+#define GT_DEV_B3_OFS 0x468
+#define GT_DEV_BOOT_OFS 0x46c
+
+/* ECC. */
+#define GT_ECC_ERRDATALO 0x480 /* GT-64120A only */
+#define GT_ECC_ERRDATAHI 0x484 /* GT-64120A only */
+#define GT_ECC_MEM 0x488 /* GT-64120A only */
+#define GT_ECC_CALC 0x48c /* GT-64120A only */
+#define GT_ECC_ERRADDR 0x490 /* GT-64120A only */
+
+/* DMA Record. */
+#define GT_DMA0_CNT_OFS 0x800
+#define GT_DMA1_CNT_OFS 0x804
+#define GT_DMA2_CNT_OFS 0x808
+#define GT_DMA3_CNT_OFS 0x80c
+#define GT_DMA0_SA_OFS 0x810
+#define GT_DMA1_SA_OFS 0x814
+#define GT_DMA2_SA_OFS 0x818
+#define GT_DMA3_SA_OFS 0x81c
+#define GT_DMA0_DA_OFS 0x820
+#define GT_DMA1_DA_OFS 0x824
+#define GT_DMA2_DA_OFS 0x828
+#define GT_DMA3_DA_OFS 0x82c
+#define GT_DMA0_NEXT_OFS 0x830
+#define GT_DMA1_NEXT_OFS 0x834
+#define GT_DMA2_NEXT_OFS 0x838
+#define GT_DMA3_NEXT_OFS 0x83c
+
+#define GT_DMA0_CUR_OFS 0x870
+#define GT_DMA1_CUR_OFS 0x874
+#define GT_DMA2_CUR_OFS 0x878
+#define GT_DMA3_CUR_OFS 0x87c
+
+/* DMA Channel Control. */
+#define GT_DMA0_CTRL_OFS 0x840
+#define GT_DMA1_CTRL_OFS 0x844
+#define GT_DMA2_CTRL_OFS 0x848
+#define GT_DMA3_CTRL_OFS 0x84c
+
+/* DMA Arbiter. */
+#define GT_DMA_ARB_OFS 0x860
+
+/* Timer/Counter. */
+#define GT_TC0_OFS 0x850
+#define GT_TC1_OFS 0x854
+#define GT_TC2_OFS 0x858
+#define GT_TC3_OFS 0x85c
+
+#define GT_TC_CONTROL_OFS 0x864
+
+/* PCI Internal. */
+#define GT_PCI0_CMD_OFS 0xc00
+#define GT_PCI0_TOR_OFS 0xc04
+#define GT_PCI0_BS_SCS10_OFS 0xc08
+#define GT_PCI0_BS_SCS32_OFS 0xc0c
+#define GT_PCI0_BS_CS20_OFS 0xc10
+#define GT_PCI0_BS_CS3BT_OFS 0xc14
+
+#define GT_PCI1_IACK_OFS 0xc30
+#define GT_PCI0_IACK_OFS 0xc34
+
+#define GT_PCI0_BARE_OFS 0xc3c
+#define GT_PCI0_PREFMBR_OFS 0xc40
+
+#define GT_PCI0_SCS10_BAR_OFS 0xc48
+#define GT_PCI0_SCS32_BAR_OFS 0xc4c
+#define GT_PCI0_CS20_BAR_OFS 0xc50
+#define GT_PCI0_CS3BT_BAR_OFS 0xc54
+#define GT_PCI0_SSCS10_BAR_OFS 0xc58
+#define GT_PCI0_SSCS32_BAR_OFS 0xc5c
+
+#define GT_PCI0_SCS3BT_BAR_OFS 0xc64
+
+#define GT_PCI1_CMD_OFS 0xc80
+#define GT_PCI1_TOR_OFS 0xc84
+#define GT_PCI1_BS_SCS10_OFS 0xc88
+#define GT_PCI1_BS_SCS32_OFS 0xc8c
+#define GT_PCI1_BS_CS20_OFS 0xc90
+#define GT_PCI1_BS_CS3BT_OFS 0xc94
+
+#define GT_PCI1_BARE_OFS 0xcbc
+#define GT_PCI1_PREFMBR_OFS 0xcc0
+
+#define GT_PCI1_SCS10_BAR_OFS 0xcc8
+#define GT_PCI1_SCS32_BAR_OFS 0xccc
+#define GT_PCI1_CS20_BAR_OFS 0xcd0
+#define GT_PCI1_CS3BT_BAR_OFS 0xcd4
+#define GT_PCI1_SSCS10_BAR_OFS 0xcd8
+#define GT_PCI1_SSCS32_BAR_OFS 0xcdc
+
+#define GT_PCI1_SCS3BT_BAR_OFS 0xce4
+
+#define GT_PCI1_CFGADDR_OFS 0xcf0
+#define GT_PCI1_CFGDATA_OFS 0xcf4
+#define GT_PCI0_CFGADDR_OFS 0xcf8
+#define GT_PCI0_CFGDATA_OFS 0xcfc
+
+/* Interrupts. */
+#define GT_INTRCAUSE_OFS 0xc18
+#define GT_INTRMASK_OFS 0xc1c
+
+#define GT_PCI0_ICMASK_OFS 0xc24
+#define GT_PCI0_SERR0MASK_OFS 0xc28
+
+#define GT_CPU_INTSEL_OFS 0xc70
+#define GT_PCI0_INTSEL_OFS 0xc74
+
+#define GT_HINTRCAUSE_OFS 0xc98
+#define GT_HINTRMASK_OFS 0xc9c
+
+#define GT_PCI0_HICMASK_OFS 0xca4
+#define GT_PCI1_SERR1MASK_OFS 0xca8
+
+
+/*
+ * I2O Support Registers
+ */
+#define INBOUND_MESSAGE_REGISTER0_PCI_SIDE 0x010
+#define INBOUND_MESSAGE_REGISTER1_PCI_SIDE 0x014
+#define OUTBOUND_MESSAGE_REGISTER0_PCI_SIDE 0x018
+#define OUTBOUND_MESSAGE_REGISTER1_PCI_SIDE 0x01c
+#define INBOUND_DOORBELL_REGISTER_PCI_SIDE 0x020
+#define INBOUND_INTERRUPT_CAUSE_REGISTER_PCI_SIDE 0x024
+#define INBOUND_INTERRUPT_MASK_REGISTER_PCI_SIDE 0x028
+#define OUTBOUND_DOORBELL_REGISTER_PCI_SIDE 0x02c
+#define OUTBOUND_INTERRUPT_CAUSE_REGISTER_PCI_SIDE 0x030
+#define OUTBOUND_INTERRUPT_MASK_REGISTER_PCI_SIDE 0x034
+#define INBOUND_QUEUE_PORT_VIRTUAL_REGISTER_PCI_SIDE 0x040
+#define OUTBOUND_QUEUE_PORT_VIRTUAL_REGISTER_PCI_SIDE 0x044
+#define QUEUE_CONTROL_REGISTER_PCI_SIDE 0x050
+#define QUEUE_BASE_ADDRESS_REGISTER_PCI_SIDE 0x054
+#define INBOUND_FREE_HEAD_POINTER_REGISTER_PCI_SIDE 0x060
+#define INBOUND_FREE_TAIL_POINTER_REGISTER_PCI_SIDE 0x064
+#define INBOUND_POST_HEAD_POINTER_REGISTER_PCI_SIDE 0x068
+#define INBOUND_POST_TAIL_POINTER_REGISTER_PCI_SIDE 0x06c
+#define OUTBOUND_FREE_HEAD_POINTER_REGISTER_PCI_SIDE 0x070
+#define OUTBOUND_FREE_TAIL_POINTER_REGISTER_PCI_SIDE 0x074
+#define OUTBOUND_POST_HEAD_POINTER_REGISTER_PCI_SIDE 0x078
+#define OUTBOUND_POST_TAIL_POINTER_REGISTER_PCI_SIDE 0x07c
+
+#define INBOUND_MESSAGE_REGISTER0_CPU_SIDE 0x1c10
+#define INBOUND_MESSAGE_REGISTER1_CPU_SIDE 0x1c14
+#define OUTBOUND_MESSAGE_REGISTER0_CPU_SIDE 0x1c18
+#define OUTBOUND_MESSAGE_REGISTER1_CPU_SIDE 0x1c1c
+#define INBOUND_DOORBELL_REGISTER_CPU_SIDE 0x1c20
+#define INBOUND_INTERRUPT_CAUSE_REGISTER_CPU_SIDE 0x1c24
+#define INBOUND_INTERRUPT_MASK_REGISTER_CPU_SIDE 0x1c28
+#define OUTBOUND_DOORBELL_REGISTER_CPU_SIDE 0x1c2c
+#define OUTBOUND_INTERRUPT_CAUSE_REGISTER_CPU_SIDE 0x1c30
+#define OUTBOUND_INTERRUPT_MASK_REGISTER_CPU_SIDE 0x1c34
+#define INBOUND_QUEUE_PORT_VIRTUAL_REGISTER_CPU_SIDE 0x1c40
+#define OUTBOUND_QUEUE_PORT_VIRTUAL_REGISTER_CPU_SIDE 0x1c44
+#define QUEUE_CONTROL_REGISTER_CPU_SIDE 0x1c50
+#define QUEUE_BASE_ADDRESS_REGISTER_CPU_SIDE 0x1c54
+#define INBOUND_FREE_HEAD_POINTER_REGISTER_CPU_SIDE 0x1c60
+#define INBOUND_FREE_TAIL_POINTER_REGISTER_CPU_SIDE 0x1c64
+#define INBOUND_POST_HEAD_POINTER_REGISTER_CPU_SIDE 0x1c68
+#define INBOUND_POST_TAIL_POINTER_REGISTER_CPU_SIDE 0x1c6c
+#define OUTBOUND_FREE_HEAD_POINTER_REGISTER_CPU_SIDE 0x1c70
+#define OUTBOUND_FREE_TAIL_POINTER_REGISTER_CPU_SIDE 0x1c74
+#define OUTBOUND_POST_HEAD_POINTER_REGISTER_CPU_SIDE 0x1c78
+#define OUTBOUND_POST_TAIL_POINTER_REGISTER_CPU_SIDE 0x1c7c
+
+/*
+ * Register encodings
+ */
+#define GT_CPU_ENDIAN_SHF 12
+#define GT_CPU_ENDIAN_MSK (MSK(1) << GT_CPU_ENDIAN_SHF)
+#define GT_CPU_ENDIAN_BIT GT_CPU_ENDIAN_MSK
+#define GT_CPU_WR_SHF 16
+#define GT_CPU_WR_MSK (MSK(1) << GT_CPU_WR_SHF)
+#define GT_CPU_WR_BIT GT_CPU_WR_MSK
+#define GT_CPU_WR_DXDXDXDX 0
+#define GT_CPU_WR_DDDD 1
+
+
+#define GT_PCI_DCRM_SHF 21
+#define GT_PCI_LD_SHF 0
+#define GT_PCI_LD_MSK (MSK(15) << GT_PCI_LD_SHF)
+#define GT_PCI_HD_SHF 0
+#define GT_PCI_HD_MSK (MSK(7) << GT_PCI_HD_SHF)
+#define GT_PCI_REMAP_SHF 0
+#define GT_PCI_REMAP_MSK (MSK(11) << GT_PCI_REMAP_SHF)
+
+
+#define GT_CFGADDR_CFGEN_SHF 31
+#define GT_CFGADDR_CFGEN_MSK (MSK(1) << GT_CFGADDR_CFGEN_SHF)
+#define GT_CFGADDR_CFGEN_BIT GT_CFGADDR_CFGEN_MSK
+
+#define GT_CFGADDR_BUSNUM_SHF 16
+#define GT_CFGADDR_BUSNUM_MSK (MSK(8) << GT_CFGADDR_BUSNUM_SHF)
+
+#define GT_CFGADDR_DEVNUM_SHF 11
+#define GT_CFGADDR_DEVNUM_MSK (MSK(5) << GT_CFGADDR_DEVNUM_SHF)
+
+#define GT_CFGADDR_FUNCNUM_SHF 8
+#define GT_CFGADDR_FUNCNUM_MSK (MSK(3) << GT_CFGADDR_FUNCNUM_SHF)
+
+#define GT_CFGADDR_REGNUM_SHF 2
+#define GT_CFGADDR_REGNUM_MSK (MSK(6) << GT_CFGADDR_REGNUM_SHF)
+
+
+#define GT_SDRAM_BM_ORDER_SHF 2
+#define GT_SDRAM_BM_ORDER_MSK (MSK(1) << GT_SDRAM_BM_ORDER_SHF)
+#define GT_SDRAM_BM_ORDER_BIT GT_SDRAM_BM_ORDER_MSK
+#define GT_SDRAM_BM_ORDER_SUB 1
+#define GT_SDRAM_BM_ORDER_LIN 0
+
+#define GT_SDRAM_BM_RSVD_ALL1 0xffb
+
+
+#define GT_SDRAM_ADDRDECODE_ADDR_SHF 0
+#define GT_SDRAM_ADDRDECODE_ADDR_MSK (MSK(3) << GT_SDRAM_ADDRDECODE_ADDR_SHF)
+#define GT_SDRAM_ADDRDECODE_ADDR_0 0
+#define GT_SDRAM_ADDRDECODE_ADDR_1 1
+#define GT_SDRAM_ADDRDECODE_ADDR_2 2
+#define GT_SDRAM_ADDRDECODE_ADDR_3 3
+#define GT_SDRAM_ADDRDECODE_ADDR_4 4
+#define GT_SDRAM_ADDRDECODE_ADDR_5 5
+#define GT_SDRAM_ADDRDECODE_ADDR_6 6
+#define GT_SDRAM_ADDRDECODE_ADDR_7 7
+
+
+#define GT_SDRAM_B0_CASLAT_SHF 0
+#define GT_SDRAM_B0_CASLAT_MSK (MSK(2) << GT_SDRAM_B0__SHF)
+#define GT_SDRAM_B0_CASLAT_2 1
+#define GT_SDRAM_B0_CASLAT_3 2
+
+#define GT_SDRAM_B0_FTDIS_SHF 2
+#define GT_SDRAM_B0_FTDIS_MSK (MSK(1) << GT_SDRAM_B0_FTDIS_SHF)
+#define GT_SDRAM_B0_FTDIS_BIT GT_SDRAM_B0_FTDIS_MSK
+
+#define GT_SDRAM_B0_SRASPRCHG_SHF 3
+#define GT_SDRAM_B0_SRASPRCHG_MSK (MSK(1) << GT_SDRAM_B0_SRASPRCHG_SHF)
+#define GT_SDRAM_B0_SRASPRCHG_BIT GT_SDRAM_B0_SRASPRCHG_MSK
+#define GT_SDRAM_B0_SRASPRCHG_2 0
+#define GT_SDRAM_B0_SRASPRCHG_3 1
+
+#define GT_SDRAM_B0_B0COMPAB_SHF 4
+#define GT_SDRAM_B0_B0COMPAB_MSK (MSK(1) << GT_SDRAM_B0_B0COMPAB_SHF)
+#define GT_SDRAM_B0_B0COMPAB_BIT GT_SDRAM_B0_B0COMPAB_MSK
+
+#define GT_SDRAM_B0_64BITINT_SHF 5
+#define GT_SDRAM_B0_64BITINT_MSK (MSK(1) << GT_SDRAM_B0_64BITINT_SHF)
+#define GT_SDRAM_B0_64BITINT_BIT GT_SDRAM_B0_64BITINT_MSK
+#define GT_SDRAM_B0_64BITINT_2 0
+#define GT_SDRAM_B0_64BITINT_4 1
+
+#define GT_SDRAM_B0_BW_SHF 6
+#define GT_SDRAM_B0_BW_MSK (MSK(1) << GT_SDRAM_B0_BW_SHF)
+#define GT_SDRAM_B0_BW_BIT GT_SDRAM_B0_BW_MSK
+#define GT_SDRAM_B0_BW_32 0
+#define GT_SDRAM_B0_BW_64 1
+
+#define GT_SDRAM_B0_BLODD_SHF 7
+#define GT_SDRAM_B0_BLODD_MSK (MSK(1) << GT_SDRAM_B0_BLODD_SHF)
+#define GT_SDRAM_B0_BLODD_BIT GT_SDRAM_B0_BLODD_MSK
+
+#define GT_SDRAM_B0_PAR_SHF 8
+#define GT_SDRAM_B0_PAR_MSK (MSK(1) << GT_SDRAM_B0_PAR_SHF)
+#define GT_SDRAM_B0_PAR_BIT GT_SDRAM_B0_PAR_MSK
+
+#define GT_SDRAM_B0_BYPASS_SHF 9
+#define GT_SDRAM_B0_BYPASS_MSK (MSK(1) << GT_SDRAM_B0_BYPASS_SHF)
+#define GT_SDRAM_B0_BYPASS_BIT GT_SDRAM_B0_BYPASS_MSK
+
+#define GT_SDRAM_B0_SRAS2SCAS_SHF 10
+#define GT_SDRAM_B0_SRAS2SCAS_MSK (MSK(1) << GT_SDRAM_B0_SRAS2SCAS_SHF)
+#define GT_SDRAM_B0_SRAS2SCAS_BIT GT_SDRAM_B0_SRAS2SCAS_MSK
+#define GT_SDRAM_B0_SRAS2SCAS_2 0
+#define GT_SDRAM_B0_SRAS2SCAS_3 1
+
+#define GT_SDRAM_B0_SIZE_SHF 11
+#define GT_SDRAM_B0_SIZE_MSK (MSK(1) << GT_SDRAM_B0_SIZE_SHF)
+#define GT_SDRAM_B0_SIZE_BIT GT_SDRAM_B0_SIZE_MSK
+#define GT_SDRAM_B0_SIZE_16M 0
+#define GT_SDRAM_B0_SIZE_64M 1
+
+#define GT_SDRAM_B0_EXTPAR_SHF 12
+#define GT_SDRAM_B0_EXTPAR_MSK (MSK(1) << GT_SDRAM_B0_EXTPAR_SHF)
+#define GT_SDRAM_B0_EXTPAR_BIT GT_SDRAM_B0_EXTPAR_MSK
+
+#define GT_SDRAM_B0_BLEN_SHF 13
+#define GT_SDRAM_B0_BLEN_MSK (MSK(1) << GT_SDRAM_B0_BLEN_SHF)
+#define GT_SDRAM_B0_BLEN_BIT GT_SDRAM_B0_BLEN_MSK
+#define GT_SDRAM_B0_BLEN_8 0
+#define GT_SDRAM_B0_BLEN_4 1
+
+
+#define GT_SDRAM_CFG_REFINT_SHF 0
+#define GT_SDRAM_CFG_REFINT_MSK (MSK(14) << GT_SDRAM_CFG_REFINT_SHF)
+
+#define GT_SDRAM_CFG_NINTERLEAVE_SHF 14
+#define GT_SDRAM_CFG_NINTERLEAVE_MSK (MSK(1) << GT_SDRAM_CFG_NINTERLEAVE_SHF)
+#define GT_SDRAM_CFG_NINTERLEAVE_BIT GT_SDRAM_CFG_NINTERLEAVE_MSK
+
+#define GT_SDRAM_CFG_RMW_SHF 15
+#define GT_SDRAM_CFG_RMW_MSK (MSK(1) << GT_SDRAM_CFG_RMW_SHF)
+#define GT_SDRAM_CFG_RMW_BIT GT_SDRAM_CFG_RMW_MSK
+
+#define GT_SDRAM_CFG_NONSTAGREF_SHF 16
+#define GT_SDRAM_CFG_NONSTAGREF_MSK (MSK(1) << GT_SDRAM_CFG_NONSTAGREF_SHF)
+#define GT_SDRAM_CFG_NONSTAGREF_BIT GT_SDRAM_CFG_NONSTAGREF_MSK
+
+#define GT_SDRAM_CFG_DUPCNTL_SHF 19
+#define GT_SDRAM_CFG_DUPCNTL_MSK (MSK(1) << GT_SDRAM_CFG_DUPCNTL_SHF)
+#define GT_SDRAM_CFG_DUPCNTL_BIT GT_SDRAM_CFG_DUPCNTL_MSK
+
+#define GT_SDRAM_CFG_DUPBA_SHF 20
+#define GT_SDRAM_CFG_DUPBA_MSK (MSK(1) << GT_SDRAM_CFG_DUPBA_SHF)
+#define GT_SDRAM_CFG_DUPBA_BIT GT_SDRAM_CFG_DUPBA_MSK
+
+#define GT_SDRAM_CFG_DUPEOT0_SHF 21
+#define GT_SDRAM_CFG_DUPEOT0_MSK (MSK(1) << GT_SDRAM_CFG_DUPEOT0_SHF)
+#define GT_SDRAM_CFG_DUPEOT0_BIT GT_SDRAM_CFG_DUPEOT0_MSK
+
+#define GT_SDRAM_CFG_DUPEOT1_SHF 22
+#define GT_SDRAM_CFG_DUPEOT1_MSK (MSK(1) << GT_SDRAM_CFG_DUPEOT1_SHF)
+#define GT_SDRAM_CFG_DUPEOT1_BIT GT_SDRAM_CFG_DUPEOT1_MSK
+
+#define GT_SDRAM_OPMODE_OP_SHF 0
+#define GT_SDRAM_OPMODE_OP_MSK (MSK(3) << GT_SDRAM_OPMODE_OP_SHF)
+#define GT_SDRAM_OPMODE_OP_NORMAL 0
+#define GT_SDRAM_OPMODE_OP_NOP 1
+#define GT_SDRAM_OPMODE_OP_PRCHG 2
+#define GT_SDRAM_OPMODE_OP_MODE 3
+#define GT_SDRAM_OPMODE_OP_CBR 4
+
+#define GT_TC_CONTROL_ENTC0_SHF 0
+#define GT_TC_CONTROL_ENTC0_MSK (MSK(1) << GT_TC_CONTROL_ENTC0_SHF)
+#define GT_TC_CONTROL_ENTC0_BIT GT_TC_CONTROL_ENTC0_MSK
+#define GT_TC_CONTROL_SELTC0_SHF 1
+#define GT_TC_CONTROL_SELTC0_MSK (MSK(1) << GT_TC_CONTROL_SELTC0_SHF)
+#define GT_TC_CONTROL_SELTC0_BIT GT_TC_CONTROL_SELTC0_MSK
+
+
+#define GT_PCI0_BARE_SWSCS3BOOTDIS_SHF 0
+#define GT_PCI0_BARE_SWSCS3BOOTDIS_MSK (MSK(1) << GT_PCI0_BARE_SWSCS3BOOTDIS_SHF)
+#define GT_PCI0_BARE_SWSCS3BOOTDIS_BIT GT_PCI0_BARE_SWSCS3BOOTDIS_MSK
+
+#define GT_PCI0_BARE_SWSCS32DIS_SHF 1
+#define GT_PCI0_BARE_SWSCS32DIS_MSK (MSK(1) << GT_PCI0_BARE_SWSCS32DIS_SHF)
+#define GT_PCI0_BARE_SWSCS32DIS_BIT GT_PCI0_BARE_SWSCS32DIS_MSK
+
+#define GT_PCI0_BARE_SWSCS10DIS_SHF 2
+#define GT_PCI0_BARE_SWSCS10DIS_MSK (MSK(1) << GT_PCI0_BARE_SWSCS10DIS_SHF)
+#define GT_PCI0_BARE_SWSCS10DIS_BIT GT_PCI0_BARE_SWSCS10DIS_MSK
+
+#define GT_PCI0_BARE_INTIODIS_SHF 3
+#define GT_PCI0_BARE_INTIODIS_MSK (MSK(1) << GT_PCI0_BARE_INTIODIS_SHF)
+#define GT_PCI0_BARE_INTIODIS_BIT GT_PCI0_BARE_INTIODIS_MSK
+
+#define GT_PCI0_BARE_INTMEMDIS_SHF 4
+#define GT_PCI0_BARE_INTMEMDIS_MSK (MSK(1) << GT_PCI0_BARE_INTMEMDIS_SHF)
+#define GT_PCI0_BARE_INTMEMDIS_BIT GT_PCI0_BARE_INTMEMDIS_MSK
+
+#define GT_PCI0_BARE_CS3BOOTDIS_SHF 5
+#define GT_PCI0_BARE_CS3BOOTDIS_MSK (MSK(1) << GT_PCI0_BARE_CS3BOOTDIS_SHF)
+#define GT_PCI0_BARE_CS3BOOTDIS_BIT GT_PCI0_BARE_CS3BOOTDIS_MSK
+
+#define GT_PCI0_BARE_CS20DIS_SHF 6
+#define GT_PCI0_BARE_CS20DIS_MSK (MSK(1) << GT_PCI0_BARE_CS20DIS_SHF)
+#define GT_PCI0_BARE_CS20DIS_BIT GT_PCI0_BARE_CS20DIS_MSK
+
+#define GT_PCI0_BARE_SCS32DIS_SHF 7
+#define GT_PCI0_BARE_SCS32DIS_MSK (MSK(1) << GT_PCI0_BARE_SCS32DIS_SHF)
+#define GT_PCI0_BARE_SCS32DIS_BIT GT_PCI0_BARE_SCS32DIS_MSK
+
+#define GT_PCI0_BARE_SCS10DIS_SHF 8
+#define GT_PCI0_BARE_SCS10DIS_MSK (MSK(1) << GT_PCI0_BARE_SCS10DIS_SHF)
+#define GT_PCI0_BARE_SCS10DIS_BIT GT_PCI0_BARE_SCS10DIS_MSK
+
+
+#define GT_INTRCAUSE_MASABORT0_SHF 18
+#define GT_INTRCAUSE_MASABORT0_MSK (MSK(1) << GT_INTRCAUSE_MASABORT0_SHF)
+#define GT_INTRCAUSE_MASABORT0_BIT GT_INTRCAUSE_MASABORT0_MSK
+
+#define GT_INTRCAUSE_TARABORT0_SHF 19
+#define GT_INTRCAUSE_TARABORT0_MSK (MSK(1) << GT_INTRCAUSE_TARABORT0_SHF)
+#define GT_INTRCAUSE_TARABORT0_BIT GT_INTRCAUSE_TARABORT0_MSK
+
+
+#define GT_PCI0_CFGADDR_REGNUM_SHF 2
+#define GT_PCI0_CFGADDR_REGNUM_MSK (MSK(6) << GT_PCI0_CFGADDR_REGNUM_SHF)
+#define GT_PCI0_CFGADDR_FUNCTNUM_SHF 8
+#define GT_PCI0_CFGADDR_FUNCTNUM_MSK (MSK(3) << GT_PCI0_CFGADDR_FUNCTNUM_SHF)
+#define GT_PCI0_CFGADDR_DEVNUM_SHF 11
+#define GT_PCI0_CFGADDR_DEVNUM_MSK (MSK(5) << GT_PCI0_CFGADDR_DEVNUM_SHF)
+#define GT_PCI0_CFGADDR_BUSNUM_SHF 16
+#define GT_PCI0_CFGADDR_BUSNUM_MSK (MSK(8) << GT_PCI0_CFGADDR_BUSNUM_SHF)
+#define GT_PCI0_CFGADDR_CONFIGEN_SHF 31
+#define GT_PCI0_CFGADDR_CONFIGEN_MSK (MSK(1) << GT_PCI0_CFGADDR_CONFIGEN_SHF)
+#define GT_PCI0_CFGADDR_CONFIGEN_BIT GT_PCI0_CFGADDR_CONFIGEN_MSK
+
+#define GT_PCI0_CMD_MBYTESWAP_SHF 0
+#define GT_PCI0_CMD_MBYTESWAP_MSK (MSK(1) << GT_PCI0_CMD_MBYTESWAP_SHF)
+#define GT_PCI0_CMD_MBYTESWAP_BIT GT_PCI0_CMD_MBYTESWAP_MSK
+#define GT_PCI0_CMD_MWORDSWAP_SHF 10
+#define GT_PCI0_CMD_MWORDSWAP_MSK (MSK(1) << GT_PCI0_CMD_MWORDSWAP_SHF)
+#define GT_PCI0_CMD_MWORDSWAP_BIT GT_PCI0_CMD_MWORDSWAP_MSK
+#define GT_PCI0_CMD_SBYTESWAP_SHF 16
+#define GT_PCI0_CMD_SBYTESWAP_MSK (MSK(1) << GT_PCI0_CMD_SBYTESWAP_SHF)
+#define GT_PCI0_CMD_SBYTESWAP_BIT GT_PCI0_CMD_SBYTESWAP_MSK
+#define GT_PCI0_CMD_SWORDSWAP_SHF 11
+#define GT_PCI0_CMD_SWORDSWAP_MSK (MSK(1) << GT_PCI0_CMD_SWORDSWAP_SHF)
+#define GT_PCI0_CMD_SWORDSWAP_BIT GT_PCI0_CMD_SWORDSWAP_MSK
+
+#define GT_INTR_T0EXP_SHF 8
+#define GT_INTR_T0EXP_MSK (MSK(1) << GT_INTR_T0EXP_SHF)
+#define GT_INTR_T0EXP_BIT GT_INTR_T0EXP_MSK
+#define GT_INTR_RETRYCTR0_SHF 20
+#define GT_INTR_RETRYCTR0_MSK (MSK(1) << GT_INTR_RETRYCTR0_SHF)
+#define GT_INTR_RETRYCTR0_BIT GT_INTR_RETRYCTR0_MSK
+
+/*
+ * Misc
+ */
+#define GT_DEF_PCI0_IO_BASE 0x10000000UL
+#define GT_DEF_PCI0_IO_SIZE 0x02000000UL
+#define GT_DEF_PCI0_MEM0_BASE 0x12000000UL
+#define GT_DEF_PCI0_MEM0_SIZE 0x02000000UL
+#define GT_DEF_BASE 0x14000000UL
+
+#define GT_MAX_BANKSIZE (256 * 1024 * 1024) /* Max 256MB bank */
+#define GT_LATTIM_MIN 6 /* Minimum lat */
+
+/*
+ * The gt64120_dep.h file must define the following macros
+ *
+ * GT_READ(ofs, data_pointer)
+ * GT_WRITE(ofs, data) - read/write GT64120 registers in 32bit
+ *
+ * TIMER - gt64120 timer irq, temporary solution until
+ * full gt64120 cascade interrupt support is in place
+ */
+
+#include <mach-gt64120.h>
+
+/*
+ * Because of an error/peculiarity in the Galileo chip, we need to swap the
+ * bytes when running bigendian. We also provide non-swapping versions.
+ */
+#define __GT_READ(ofs) \
+ (*(volatile u32 *)(GT64120_BASE+(ofs)))
+#define __GT_WRITE(ofs, data) \
+ do { *(volatile u32 *)(GT64120_BASE+(ofs)) = (data); } while (0)
+#define GT_READ(ofs) le32_to_cpu(__GT_READ(ofs))
+#define GT_WRITE(ofs, data) __GT_WRITE(ofs, cpu_to_le32(data))
+
+extern void gt641xx_set_base_clock(unsigned int clock);
+extern int gt641xx_timer0_state(void);
+
+#endif /* _ASM_GT64120_H */
diff --git a/arch/mips/include/asm/hardirq.h b/arch/mips/include/asm/hardirq.h
new file mode 100644
index 000000000..c977a86c2
--- /dev/null
+++ b/arch/mips/include/asm/hardirq.h
@@ -0,0 +1,18 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1997, 98, 99, 2000, 01, 05 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+#ifndef _ASM_HARDIRQ_H
+#define _ASM_HARDIRQ_H
+
+extern void ack_bad_irq(unsigned int irq);
+#define ack_bad_irq ack_bad_irq
+
+#include <asm-generic/hardirq.h>
+
+#endif /* _ASM_HARDIRQ_H */
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
new file mode 100644
index 000000000..f855478d1
--- /dev/null
+++ b/arch/mips/include/asm/hazards.h
@@ -0,0 +1,422 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 04, 07 Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) MIPS Technologies, Inc.
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef _ASM_HAZARDS_H
+#define _ASM_HAZARDS_H
+
+#include <linux/stringify.h>
+#include <asm/compiler.h>
+
+#define ___ssnop \
+ sll $0, $0, 1
+
+#define ___ehb \
+ sll $0, $0, 3
+
+/*
+ * TLB hazards
+ */
+#if (defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
+ defined(CONFIG_CPU_MIPSR6)) && \
+ !defined(CONFIG_CPU_CAVIUM_OCTEON) && !defined(CONFIG_CPU_LOONGSON64)
+
+/*
+ * MIPSR2 defines ehb for hazard avoidance
+ */
+
+#define __mtc0_tlbw_hazard \
+ ___ehb
+
+#define __mtc0_tlbr_hazard \
+ ___ehb
+
+#define __tlbw_use_hazard \
+ ___ehb
+
+#define __tlb_read_hazard \
+ ___ehb
+
+#define __tlb_probe_hazard \
+ ___ehb
+
+#define __irq_enable_hazard \
+ ___ehb
+
+#define __irq_disable_hazard \
+ ___ehb
+
+#define __back_to_back_c0_hazard \
+ ___ehb
+
+/*
+ * gcc has a tradition of misscompiling the previous construct using the
+ * address of a label as argument to inline assembler. Gas otoh has the
+ * annoying difference between la and dla which are only usable for 32-bit
+ * rsp. 64-bit code, so can't be used without conditional compilation.
+ * The alternative is switching the assembler to 64-bit code which happens
+ * to work right even for 32-bit code...
+ */
+#define instruction_hazard() \
+do { \
+ unsigned long tmp; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
+ " dla %0, 1f \n" \
+ " jr.hb %0 \n" \
+ " .set pop \n" \
+ "1: \n" \
+ : "=r" (tmp)); \
+} while (0)
+
+#elif (defined(CONFIG_CPU_MIPSR1) && !defined(CONFIG_MIPS_ALCHEMY)) || \
+ defined(CONFIG_CPU_BMIPS)
+
+/*
+ * These are slightly complicated by the fact that we guarantee R1 kernels to
+ * run fine on R2 processors.
+ */
+
+#define __mtc0_tlbw_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ehb
+
+#define __mtc0_tlbr_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ehb
+
+#define __tlbw_use_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ssnop; \
+ ___ehb
+
+#define __tlb_read_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ssnop; \
+ ___ehb
+
+#define __tlb_probe_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ssnop; \
+ ___ehb
+
+#define __irq_enable_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ssnop; \
+ ___ehb
+
+#define __irq_disable_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ssnop; \
+ ___ehb
+
+#define __back_to_back_c0_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ssnop; \
+ ___ehb
+
+/*
+ * gcc has a tradition of misscompiling the previous construct using the
+ * address of a label as argument to inline assembler. Gas otoh has the
+ * annoying difference between la and dla which are only usable for 32-bit
+ * rsp. 64-bit code, so can't be used without conditional compilation.
+ * The alternative is switching the assembler to 64-bit code which happens
+ * to work right even for 32-bit code...
+ */
+#define __instruction_hazard() \
+do { \
+ unsigned long tmp; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set mips64r2 \n" \
+ " dla %0, 1f \n" \
+ " jr.hb %0 \n" \
+ " .set pop \n" \
+ "1: \n" \
+ : "=r" (tmp)); \
+} while (0)
+
+#define instruction_hazard() \
+do { \
+ if (cpu_has_mips_r2_r6) \
+ __instruction_hazard(); \
+} while (0)
+
+#elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
+ defined(CONFIG_CPU_LOONGSON2EF) || defined(CONFIG_CPU_LOONGSON64) || \
+ defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR)
+
+/*
+ * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
+ */
+
+#define __mtc0_tlbw_hazard
+
+#define __mtc0_tlbr_hazard
+
+#define __tlbw_use_hazard
+
+#define __tlb_read_hazard
+
+#define __tlb_probe_hazard
+
+#define __irq_enable_hazard
+
+#define __irq_disable_hazard
+
+#define __back_to_back_c0_hazard
+
+#define instruction_hazard() do { } while (0)
+
+#elif defined(CONFIG_CPU_SB1)
+
+/*
+ * Mostly like R4000 for historic reasons
+ */
+#define __mtc0_tlbw_hazard
+
+#define __mtc0_tlbr_hazard
+
+#define __tlbw_use_hazard
+
+#define __tlb_read_hazard
+
+#define __tlb_probe_hazard
+
+#define __irq_enable_hazard
+
+#define __irq_disable_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ssnop
+
+#define __back_to_back_c0_hazard
+
+#define instruction_hazard() do { } while (0)
+
+#else
+
+/*
+ * Finally the catchall case for all other processors including R4000, R4400,
+ * R4600, R4700, R5000, RM7000, NEC VR41xx etc.
+ *
+ * The taken branch will result in a two cycle penalty for the two killed
+ * instructions on R4000 / R4400. Other processors only have a single cycle
+ * hazard so this is nice trick to have an optimal code for a range of
+ * processors.
+ */
+#define __mtc0_tlbw_hazard \
+ nop; \
+ nop
+
+#define __mtc0_tlbr_hazard \
+ nop; \
+ nop
+
+#define __tlbw_use_hazard \
+ nop; \
+ nop; \
+ nop
+
+#define __tlb_read_hazard \
+ nop; \
+ nop; \
+ nop
+
+#define __tlb_probe_hazard \
+ nop; \
+ nop; \
+ nop
+
+#define __irq_enable_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ssnop
+
+#define __irq_disable_hazard \
+ nop; \
+ nop; \
+ nop
+
+#define __back_to_back_c0_hazard \
+ ___ssnop; \
+ ___ssnop; \
+ ___ssnop
+
+#define instruction_hazard() do { } while (0)
+
+#endif
+
+
+/* FPU hazards */
+
+#if defined(CONFIG_CPU_SB1)
+
+#define __enable_fpu_hazard \
+ .set push; \
+ .set mips64; \
+ .set noreorder; \
+ ___ssnop; \
+ bnezl $0, .+4; \
+ ___ssnop; \
+ .set pop
+
+#define __disable_fpu_hazard
+
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
+ defined(CONFIG_CPU_MIPSR6)
+
+#define __enable_fpu_hazard \
+ ___ehb
+
+#define __disable_fpu_hazard \
+ ___ehb
+
+#else
+
+#define __enable_fpu_hazard \
+ nop; \
+ nop; \
+ nop; \
+ nop
+
+#define __disable_fpu_hazard \
+ ___ehb
+
+#endif
+
+#ifdef __ASSEMBLY__
+
+#define _ssnop ___ssnop
+#define _ehb ___ehb
+#define mtc0_tlbw_hazard __mtc0_tlbw_hazard
+#define mtc0_tlbr_hazard __mtc0_tlbr_hazard
+#define tlbw_use_hazard __tlbw_use_hazard
+#define tlb_read_hazard __tlb_read_hazard
+#define tlb_probe_hazard __tlb_probe_hazard
+#define irq_enable_hazard __irq_enable_hazard
+#define irq_disable_hazard __irq_disable_hazard
+#define back_to_back_c0_hazard __back_to_back_c0_hazard
+#define enable_fpu_hazard __enable_fpu_hazard
+#define disable_fpu_hazard __disable_fpu_hazard
+
+#else
+
+#define _ssnop() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(___ssnop) \
+ ); \
+} while (0)
+
+#define _ehb() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(___ehb) \
+ ); \
+} while (0)
+
+
+#define mtc0_tlbw_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__mtc0_tlbw_hazard) \
+ ); \
+} while (0)
+
+
+#define mtc0_tlbr_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__mtc0_tlbr_hazard) \
+ ); \
+} while (0)
+
+
+#define tlbw_use_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__tlbw_use_hazard) \
+ ); \
+} while (0)
+
+
+#define tlb_read_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__tlb_read_hazard) \
+ ); \
+} while (0)
+
+
+#define tlb_probe_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__tlb_probe_hazard) \
+ ); \
+} while (0)
+
+
+#define irq_enable_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__irq_enable_hazard) \
+ ); \
+} while (0)
+
+
+#define irq_disable_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__irq_disable_hazard) \
+ ); \
+} while (0)
+
+
+#define back_to_back_c0_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__back_to_back_c0_hazard) \
+ ); \
+} while (0)
+
+
+#define enable_fpu_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__enable_fpu_hazard) \
+ ); \
+} while (0)
+
+
+#define disable_fpu_hazard() \
+do { \
+ __asm__ __volatile__( \
+ __stringify(__disable_fpu_hazard) \
+ ); \
+} while (0)
+
+/*
+ * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
+ */
+extern void mips_ihb(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_HAZARDS_H */
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
new file mode 100644
index 000000000..9f021cf51
--- /dev/null
+++ b/arch/mips/include/asm/highmem.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * highmem.h: virtual kernel memory mappings for high memory
+ *
+ * Used in CONFIG_HIGHMEM systems for memory pages which
+ * are not addressable by direct kernel virtual addresses.
+ *
+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+ * Gerhard.Wichert@pdb.siemens.de
+ *
+ *
+ * Redesigned the x86 32-bit VM architecture to deal with
+ * up to 16 Terabyte physical memory. With current x86 CPUs
+ * we now support up to 64 Gigabytes physical RAM.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ */
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#ifdef __KERNEL__
+
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <asm/cpu-features.h>
+#include <asm/kmap_types.h>
+
+/* declarations for highmem.c */
+extern unsigned long highstart_pfn, highend_pfn;
+
+extern pte_t *pkmap_page_table;
+
+/*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+ * chunk of RAM.
+ */
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) || defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
+#define LAST_PKMAP 512
+#else
+#define LAST_PKMAP 1024
+#endif
+
+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
+#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+#define ARCH_HAS_KMAP_FLUSH_TLB
+extern void kmap_flush_tlb(unsigned long addr);
+extern void *kmap_atomic_pfn(unsigned long pfn);
+
+#define flush_cache_kmaps() BUG_ON(cpu_has_dc_aliases)
+
+extern void kmap_init(void);
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_HIGHMEM_H */
diff --git a/arch/mips/include/asm/hpet.h b/arch/mips/include/asm/hpet.h
new file mode 100644
index 000000000..d47268ece
--- /dev/null
+++ b/arch/mips/include/asm/hpet.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_HPET_H
+#define _ASM_HPET_H
+
+#ifdef CONFIG_RS780_HPET
+
+#define HPET_MMAP_SIZE 1024
+
+#define HPET_ID 0x000
+#define HPET_PERIOD 0x004
+#define HPET_CFG 0x010
+#define HPET_STATUS 0x020
+#define HPET_COUNTER 0x0f0
+
+#define HPET_Tn_CFG(n) (0x100 + 0x20 * n)
+#define HPET_Tn_CMP(n) (0x108 + 0x20 * n)
+#define HPET_Tn_ROUTE(n) (0x110 + 0x20 * n)
+
+#define HPET_T0_IRS 0x001
+#define HPET_T1_IRS 0x002
+#define HPET_T3_IRS 0x004
+
+#define HPET_T0_CFG 0x100
+#define HPET_T0_CMP 0x108
+#define HPET_T0_ROUTE 0x110
+#define HPET_T1_CFG 0x120
+#define HPET_T1_CMP 0x128
+#define HPET_T1_ROUTE 0x130
+#define HPET_T2_CFG 0x140
+#define HPET_T2_CMP 0x148
+#define HPET_T2_ROUTE 0x150
+
+#define HPET_ID_REV 0x000000ff
+#define HPET_ID_NUMBER 0x00001f00
+#define HPET_ID_64BIT 0x00002000
+#define HPET_ID_LEGSUP 0x00008000
+#define HPET_ID_VENDOR 0xffff0000
+#define HPET_ID_NUMBER_SHIFT 8
+#define HPET_ID_VENDOR_SHIFT 16
+
+#define HPET_CFG_ENABLE 0x001
+#define HPET_CFG_LEGACY 0x002
+#define HPET_LEGACY_8254 2
+#define HPET_LEGACY_RTC 8
+
+#define HPET_TN_LEVEL 0x0002
+#define HPET_TN_ENABLE 0x0004
+#define HPET_TN_PERIODIC 0x0008
+#define HPET_TN_PERIODIC_CAP 0x0010
+#define HPET_TN_64BIT_CAP 0x0020
+#define HPET_TN_SETVAL 0x0040
+#define HPET_TN_32BIT 0x0100
+#define HPET_TN_ROUTE 0x3e00
+#define HPET_TN_FSB 0x4000
+#define HPET_TN_FSB_CAP 0x8000
+#define HPET_TN_ROUTE_SHIFT 9
+
+/* Max HPET Period is 10^8 femto sec as in HPET spec */
+#define HPET_MAX_PERIOD 100000000UL
+/*
+ * Min HPET period is 10^5 femto sec just for safety. If it is less than this,
+ * then 32 bit HPET counter wrapsaround in less than 0.5 sec.
+ */
+#define HPET_MIN_PERIOD 100000UL
+
+#define HPET_ADDR 0x20000
+#define HPET_MMIO_ADDR 0x90000e0000020000
+#define HPET_FREQ 14318780
+#define HPET_COMPARE_VAL ((HPET_FREQ + HZ / 2) / HZ)
+#define HPET_T0_IRQ 0
+
+extern void __init setup_hpet_timer(void);
+#endif /* CONFIG_RS780_HPET */
+#endif /* _ASM_HPET_H */
diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h
new file mode 100644
index 000000000..c2144409c
--- /dev/null
+++ b/arch/mips/include/asm/hugetlb.h
@@ -0,0 +1,86 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008, 2009 Cavium Networks, Inc.
+ */
+
+#ifndef __ASM_HUGETLB_H
+#define __ASM_HUGETLB_H
+
+#include <asm/page.h>
+
+#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr,
+ unsigned long len)
+{
+ unsigned long task_size = STACK_TOP;
+ struct hstate *h = hstate_file(file);
+
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+ if (addr & ~huge_page_mask(h))
+ return -EINVAL;
+ if (len > task_size)
+ return -ENOMEM;
+ if (task_size - len < addr)
+ return -EINVAL;
+ return 0;
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t clear;
+ pte_t pte = *ptep;
+
+ pte_val(clear) = (unsigned long)invalid_pte_table;
+ set_pte_at(mm, addr, ptep, clear);
+ return pte;
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ /*
+ * clear the huge pte entry firstly, so that the other smp threads will
+ * not get old pte entry after finishing flush_tlb_page and before
+ * setting new huge pte entry
+ */
+ huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ flush_tlb_page(vma, addr);
+}
+
+#define __HAVE_ARCH_HUGE_PTE_NONE
+static inline int huge_pte_none(pte_t pte)
+{
+ unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL;
+ return !val || (val == (unsigned long)invalid_pte_table);
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep, pte_t pte,
+ int dirty)
+{
+ int changed = !pte_same(*ptep, pte);
+
+ if (changed) {
+ set_pte_at(vma->vm_mm, addr, ptep, pte);
+ /*
+ * There could be some standard sized pages in there,
+ * get them all.
+ */
+ flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
+ }
+ return changed;
+}
+
+#include <asm-generic/hugetlb.h>
+
+#endif /* __ASM_HUGETLB_H */
diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
new file mode 100644
index 000000000..9e8ef5994
--- /dev/null
+++ b/arch/mips/include/asm/hw_irq.h
@@ -0,0 +1,20 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000, 2001, 2002 by Ralf Baechle
+ */
+#ifndef __ASM_HW_IRQ_H
+#define __ASM_HW_IRQ_H
+
+#include <linux/atomic.h>
+
+extern atomic_t irq_err_count;
+
+/*
+ * interrupt-retrigger: NOP for now. This may not be appropriate for all
+ * machines, we'll see ...
+ */
+
+#endif /* __ASM_HW_IRQ_H */
diff --git a/arch/mips/include/asm/i8259.h b/arch/mips/include/asm/i8259.h
new file mode 100644
index 000000000..a54b9649d
--- /dev/null
+++ b/arch/mips/include/asm/i8259.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/asm-mips/i8259.h
+ *
+ * i8259A interrupt definitions.
+ *
+ * Copyright (C) 2003 Maciej W. Rozycki
+ * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef _ASM_I8259_H
+#define _ASM_I8259_H
+
+#include <linux/compiler.h>
+#include <linux/spinlock.h>
+
+#include <asm/io.h>
+#include <irq.h>
+
+/* i8259A PIC registers */
+#define PIC_MASTER_CMD 0x20
+#define PIC_MASTER_IMR 0x21
+#define PIC_MASTER_ISR PIC_MASTER_CMD
+#define PIC_MASTER_POLL PIC_MASTER_ISR
+#define PIC_MASTER_OCW3 PIC_MASTER_ISR
+#define PIC_SLAVE_CMD 0xa0
+#define PIC_SLAVE_IMR 0xa1
+
+/* i8259A PIC related value */
+#define PIC_CASCADE_IR 2
+#define MASTER_ICW4_DEFAULT 0x01
+#define SLAVE_ICW4_DEFAULT 0x01
+#define PIC_ICW4_AEOI 2
+
+extern raw_spinlock_t i8259A_lock;
+
+extern void make_8259A_irq(unsigned int irq);
+
+extern void init_i8259_irqs(void);
+extern struct irq_domain *__init_i8259_irqs(struct device_node *node);
+
+/**
+ * i8159_set_poll() - Override the i8259 polling function
+ * @poll: pointer to platform-specific polling function
+ *
+ * Call this to override the generic i8259 polling function, which directly
+ * accesses i8259 registers, with a platform specific one which may be faster
+ * in cases where hardware provides a more optimal means of polling for an
+ * interrupt.
+ */
+extern void i8259_set_poll(int (*poll)(void));
+
+/*
+ * Do the traditional i8259 interrupt polling thing. This is for the few
+ * cases where no better interrupt acknowledge method is available and we
+ * absolutely must touch the i8259.
+ */
+static inline int i8259_irq(void)
+{
+ int irq;
+
+ raw_spin_lock(&i8259A_lock);
+
+ /* Perform an interrupt acknowledge cycle on controller 1. */
+ outb(0x0C, PIC_MASTER_CMD); /* prepare for poll */
+ irq = inb(PIC_MASTER_CMD) & 7;
+ if (irq == PIC_CASCADE_IR) {
+ /*
+ * Interrupt is cascaded so perform interrupt
+ * acknowledge on controller 2.
+ */
+ outb(0x0C, PIC_SLAVE_CMD); /* prepare for poll */
+ irq = (inb(PIC_SLAVE_CMD) & 7) + 8;
+ }
+
+ if (unlikely(irq == 7)) {
+ /*
+ * This may be a spurious interrupt.
+ *
+ * Read the interrupt status register (ISR). If the most
+ * significant bit is not set then there is no valid
+ * interrupt.
+ */
+ outb(0x0B, PIC_MASTER_ISR); /* ISR register */
+ if(~inb(PIC_MASTER_ISR) & 0x80)
+ irq = -1;
+ }
+
+ raw_spin_unlock(&i8259A_lock);
+
+ return likely(irq >= 0) ? irq + I8259A_IRQ_BASE : irq;
+}
+
+#endif /* _ASM_I8259_H */
diff --git a/arch/mips/include/asm/ide.h b/arch/mips/include/asm/ide.h
new file mode 100644
index 000000000..bb674c3b0
--- /dev/null
+++ b/arch/mips/include/asm/ide.h
@@ -0,0 +1,13 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * This file contains the MIPS architecture specific IDE code.
+ */
+#ifndef __ASM_IDE_H
+#define __ASM_IDE_H
+
+#include <ide.h>
+
+#endif /* __ASM_IDE_H */
diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h
new file mode 100644
index 000000000..0992cad9c
--- /dev/null
+++ b/arch/mips/include/asm/idle.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_IDLE_H
+#define __ASM_IDLE_H
+
+#include <linux/cpuidle.h>
+#include <linux/linkage.h>
+
+extern void (*cpu_wait)(void);
+extern void r4k_wait(void);
+extern asmlinkage void __r4k_wait(void);
+extern void r4k_wait_irqoff(void);
+
+static inline int using_rollback_handler(void)
+{
+ return cpu_wait == r4k_wait;
+}
+
+extern void __init check_wait(void);
+
+extern int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index);
+
+#define MIPS_CPUIDLE_WAIT_STATE {\
+ .enter = mips_cpuidle_wait_enter,\
+ .exit_latency = 1,\
+ .target_residency = 1,\
+ .power_usage = UINT_MAX,\
+ .name = "wait",\
+ .desc = "MIPS wait",\
+}
+
+#endif /* __ASM_IDLE_H */
diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h
new file mode 100644
index 000000000..22912f784
--- /dev/null
+++ b/arch/mips/include/asm/inst.h
@@ -0,0 +1,88 @@
+/*
+ * Format of an instruction in memory.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 2000 by Ralf Baechle
+ * Copyright (C) 2006 by Thiemo Seufer
+ */
+#ifndef _ASM_INST_H
+#define _ASM_INST_H
+
+#include <uapi/asm/inst.h>
+
+/* HACHACHAHCAHC ... */
+
+/* In case some other massaging is needed, keep MIPSInst as wrapper */
+
+#define MIPSInst(x) x
+
+#define I_OPCODE_SFT 26
+#define MIPSInst_OPCODE(x) (MIPSInst(x) >> I_OPCODE_SFT)
+
+#define I_JTARGET_SFT 0
+#define MIPSInst_JTARGET(x) (MIPSInst(x) & 0x03ffffff)
+
+#define I_RS_SFT 21
+#define MIPSInst_RS(x) ((MIPSInst(x) & 0x03e00000) >> I_RS_SFT)
+
+#define I_RT_SFT 16
+#define MIPSInst_RT(x) ((MIPSInst(x) & 0x001f0000) >> I_RT_SFT)
+
+#define I_IMM_SFT 0
+#define MIPSInst_SIMM(x) ((int)((short)(MIPSInst(x) & 0xffff)))
+#define MIPSInst_UIMM(x) (MIPSInst(x) & 0xffff)
+
+#define I_CACHEOP_SFT 18
+#define MIPSInst_CACHEOP(x) ((MIPSInst(x) & 0x001c0000) >> I_CACHEOP_SFT)
+
+#define I_CACHESEL_SFT 16
+#define MIPSInst_CACHESEL(x) ((MIPSInst(x) & 0x00030000) >> I_CACHESEL_SFT)
+
+#define I_RD_SFT 11
+#define MIPSInst_RD(x) ((MIPSInst(x) & 0x0000f800) >> I_RD_SFT)
+
+#define I_RE_SFT 6
+#define MIPSInst_RE(x) ((MIPSInst(x) & 0x000007c0) >> I_RE_SFT)
+
+#define I_FUNC_SFT 0
+#define MIPSInst_FUNC(x) (MIPSInst(x) & 0x0000003f)
+
+#define I_FFMT_SFT 21
+#define MIPSInst_FFMT(x) ((MIPSInst(x) & 0x01e00000) >> I_FFMT_SFT)
+
+#define I_FT_SFT 16
+#define MIPSInst_FT(x) ((MIPSInst(x) & 0x001f0000) >> I_FT_SFT)
+
+#define I_FS_SFT 11
+#define MIPSInst_FS(x) ((MIPSInst(x) & 0x0000f800) >> I_FS_SFT)
+
+#define I_FD_SFT 6
+#define MIPSInst_FD(x) ((MIPSInst(x) & 0x000007c0) >> I_FD_SFT)
+
+#define I_FR_SFT 21
+#define MIPSInst_FR(x) ((MIPSInst(x) & 0x03e00000) >> I_FR_SFT)
+
+#define I_FMA_FUNC_SFT 2
+#define MIPSInst_FMA_FUNC(x) ((MIPSInst(x) & 0x0000003c) >> I_FMA_FUNC_SFT)
+
+#define I_FMA_FFMT_SFT 0
+#define MIPSInst_FMA_FFMT(x) (MIPSInst(x) & 0x00000003)
+
+typedef unsigned int mips_instruction;
+
+/* microMIPS instruction decode structure. Do NOT export!!! */
+struct mm_decoded_insn {
+ mips_instruction insn;
+ mips_instruction next_insn;
+ int pc_inc;
+ int next_pc_inc;
+ int micro_mips_mode;
+};
+
+/* Recode table from 16-bit register notation to 32-bit GPR. Do NOT export!!! */
+extern const int reg16to32[];
+
+#endif /* _ASM_INST_H */
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
new file mode 100644
index 000000000..78537aa23
--- /dev/null
+++ b/arch/mips/include/asm/io.h
@@ -0,0 +1,562 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995 Waldorf GmbH
+ * Copyright (C) 1994 - 2000, 06 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
+ * Author: Maciej W. Rozycki <macro@mips.com>
+ */
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#define ARCH_HAS_IOREMAP_WC
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/irqflags.h>
+
+#include <asm/addrspace.h>
+#include <asm/barrier.h>
+#include <asm/bug.h>
+#include <asm/byteorder.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm-generic/iomap.h>
+#include <asm/page.h>
+#include <asm/pgtable-bits.h>
+#include <asm/processor.h>
+#include <asm/string.h>
+#include <mangle-port.h>
+
+/*
+ * Raw operations are never swapped in software. OTOH values that raw
+ * operations are working on may or may not have been swapped by the bus
+ * hardware. An example use would be for flash memory that's used for
+ * execute in place.
+ */
+# define __raw_ioswabb(a, x) (x)
+# define __raw_ioswabw(a, x) (x)
+# define __raw_ioswabl(a, x) (x)
+# define __raw_ioswabq(a, x) (x)
+# define ____raw_ioswabq(a, x) (x)
+
+# define __relaxed_ioswabb ioswabb
+# define __relaxed_ioswabw ioswabw
+# define __relaxed_ioswabl ioswabl
+# define __relaxed_ioswabq ioswabq
+
+/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
+
+/*
+ * On MIPS I/O ports are memory mapped, so we access them using normal
+ * load/store instructions. mips_io_port_base is the virtual address to
+ * which all ports are being mapped. For sake of efficiency some code
+ * assumes that this is an address that can be loaded with a single lui
+ * instruction, so the lower 16 bits must be zero. Should be true on
+ * any sane architecture; generic code does not use this assumption.
+ */
+extern unsigned long mips_io_port_base;
+
+static inline void set_io_port_base(unsigned long base)
+{
+ mips_io_port_base = base;
+}
+
+/*
+ * Provide the necessary definitions for generic iomap. We make use of
+ * mips_io_port_base for iomap(), but we don't reserve any low addresses for
+ * use with I/O ports.
+ */
+
+#define HAVE_ARCH_PIO_SIZE
+#define PIO_OFFSET mips_io_port_base
+#define PIO_MASK IO_SPACE_LIMIT
+#define PIO_RESERVED 0x0UL
+
+/*
+ * Enforce in-order execution of data I/O. In the MIPS architecture
+ * these are equivalent to corresponding platform-specific memory
+ * barriers defined in <asm/barrier.h>. API pinched from PowerPC,
+ * with sync additionally defined.
+ */
+#define iobarrier_rw() mb()
+#define iobarrier_r() rmb()
+#define iobarrier_w() wmb()
+#define iobarrier_sync() iob()
+
+/*
+ * virt_to_phys - map virtual addresses to physical
+ * @address: address to remap
+ *
+ * The returned physical address is the physical (CPU) mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses directly mapped or allocated via kmalloc.
+ *
+ * This function does not give bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+static inline unsigned long virt_to_phys(volatile const void *address)
+{
+ return __pa(address);
+}
+
+/*
+ * phys_to_virt - map physical address to virtual
+ * @address: address to remap
+ *
+ * The returned virtual address is a current CPU mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses that have a kernel mapping
+ *
+ * This function does not handle bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+static inline void * phys_to_virt(unsigned long address)
+{
+ return (void *)(address + PAGE_OFFSET - PHYS_OFFSET);
+}
+
+/*
+ * ISA I/O bus memory addresses are 1:1 with the physical address.
+ */
+static inline unsigned long isa_virt_to_bus(volatile void *address)
+{
+ return virt_to_phys(address);
+}
+
+static inline void *isa_bus_to_virt(unsigned long address)
+{
+ return phys_to_virt(address);
+}
+
+/*
+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
+ * are forbidden in portable PCI drivers.
+ *
+ * Allow them for x86 for legacy drivers, though.
+ */
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+
+void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+ unsigned long prot_val);
+void iounmap(const volatile void __iomem *addr);
+
+/*
+ * ioremap - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ */
+#define ioremap(offset, size) \
+ ioremap_prot((offset), (size), _CACHE_UNCACHED)
+#define ioremap_uc ioremap
+
+/*
+ * ioremap_cache - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_cache performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked cachable by
+ * the CPU. Also enables full write-combining. Useful for some
+ * memory-like regions on I/O busses.
+ */
+#define ioremap_cache(offset, size) \
+ ioremap_prot((offset), (size), _page_cachable_default)
+
+/*
+ * ioremap_wc - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_wc performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked uncachable
+ * but accelerated by means of write-combining feature. It is specifically
+ * useful for PCIe prefetchable windows, which may vastly improve a
+ * communications performance. If it was determined on boot stage, what
+ * CPU CCA doesn't support UCA, the method shall fall-back to the
+ * _CACHE_UNCACHED option (see cpu_probe() method).
+ */
+#define ioremap_wc(offset, size) \
+ ioremap_prot((offset), (size), boot_cpu_data.writecombine)
+
+#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON64)
+#define war_io_reorder_wmb() wmb()
+#else
+#define war_io_reorder_wmb() barrier()
+#endif
+
+#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \
+ \
+static inline void pfx##write##bwlq(type val, \
+ volatile void __iomem *mem) \
+{ \
+ volatile type *__mem; \
+ type __val; \
+ \
+ if (barrier) \
+ iobarrier_rw(); \
+ else \
+ war_io_reorder_wmb(); \
+ \
+ __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
+ \
+ __val = pfx##ioswab##bwlq(__mem, val); \
+ \
+ if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
+ *__mem = __val; \
+ else if (cpu_has_64bits) { \
+ unsigned long __flags; \
+ type __tmp; \
+ \
+ if (irq) \
+ local_irq_save(__flags); \
+ __asm__ __volatile__( \
+ ".set push" "\t\t# __writeq""\n\t" \
+ ".set arch=r4000" "\n\t" \
+ "dsll32 %L0, %L0, 0" "\n\t" \
+ "dsrl32 %L0, %L0, 0" "\n\t" \
+ "dsll32 %M0, %M0, 0" "\n\t" \
+ "or %L0, %L0, %M0" "\n\t" \
+ "sd %L0, %2" "\n\t" \
+ ".set pop" "\n" \
+ : "=r" (__tmp) \
+ : "0" (__val), "m" (*__mem)); \
+ if (irq) \
+ local_irq_restore(__flags); \
+ } else \
+ BUG(); \
+} \
+ \
+static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
+{ \
+ volatile type *__mem; \
+ type __val; \
+ \
+ __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
+ \
+ if (barrier) \
+ iobarrier_rw(); \
+ \
+ if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
+ __val = *__mem; \
+ else if (cpu_has_64bits) { \
+ unsigned long __flags; \
+ \
+ if (irq) \
+ local_irq_save(__flags); \
+ __asm__ __volatile__( \
+ ".set push" "\t\t# __readq" "\n\t" \
+ ".set arch=r4000" "\n\t" \
+ "ld %L0, %1" "\n\t" \
+ "dsra32 %M0, %L0, 0" "\n\t" \
+ "sll %L0, %L0, 0" "\n\t" \
+ ".set pop" "\n" \
+ : "=r" (__val) \
+ : "m" (*__mem)); \
+ if (irq) \
+ local_irq_restore(__flags); \
+ } else { \
+ __val = 0; \
+ BUG(); \
+ } \
+ \
+ /* prevent prefetching of coherent DMA data prematurely */ \
+ if (!relax) \
+ rmb(); \
+ return pfx##ioswab##bwlq(__mem, __val); \
+}
+
+#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p) \
+ \
+static inline void pfx##out##bwlq##p(type val, unsigned long port) \
+{ \
+ volatile type *__addr; \
+ type __val; \
+ \
+ if (barrier) \
+ iobarrier_rw(); \
+ else \
+ war_io_reorder_wmb(); \
+ \
+ __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
+ \
+ __val = pfx##ioswab##bwlq(__addr, val); \
+ \
+ /* Really, we want this to be atomic */ \
+ BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
+ \
+ *__addr = __val; \
+} \
+ \
+static inline type pfx##in##bwlq##p(unsigned long port) \
+{ \
+ volatile type *__addr; \
+ type __val; \
+ \
+ __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
+ \
+ BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
+ \
+ if (barrier) \
+ iobarrier_rw(); \
+ \
+ __val = *__addr; \
+ \
+ /* prevent prefetching of coherent DMA data prematurely */ \
+ if (!relax) \
+ rmb(); \
+ return pfx##ioswab##bwlq(__addr, __val); \
+}
+
+#define __BUILD_MEMORY_PFX(bus, bwlq, type, relax) \
+ \
+__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1)
+
+#define BUILDIO_MEM(bwlq, type) \
+ \
+__BUILD_MEMORY_PFX(__raw_, bwlq, type, 0) \
+__BUILD_MEMORY_PFX(__relaxed_, bwlq, type, 1) \
+__BUILD_MEMORY_PFX(__mem_, bwlq, type, 0) \
+__BUILD_MEMORY_PFX(, bwlq, type, 0)
+
+BUILDIO_MEM(b, u8)
+BUILDIO_MEM(w, u16)
+BUILDIO_MEM(l, u32)
+#ifdef CONFIG_64BIT
+BUILDIO_MEM(q, u64)
+#else
+__BUILD_MEMORY_PFX(__raw_, q, u64, 0)
+__BUILD_MEMORY_PFX(__mem_, q, u64, 0)
+#endif
+
+#define __BUILD_IOPORT_PFX(bus, bwlq, type) \
+ __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,) \
+ __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p)
+
+#define BUILDIO_IOPORT(bwlq, type) \
+ __BUILD_IOPORT_PFX(, bwlq, type) \
+ __BUILD_IOPORT_PFX(__mem_, bwlq, type)
+
+BUILDIO_IOPORT(b, u8)
+BUILDIO_IOPORT(w, u16)
+BUILDIO_IOPORT(l, u32)
+#ifdef CONFIG_64BIT
+BUILDIO_IOPORT(q, u64)
+#endif
+
+#define __BUILDIO(bwlq, type) \
+ \
+__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0)
+
+__BUILDIO(q, u64)
+
+#define readb_relaxed __relaxed_readb
+#define readw_relaxed __relaxed_readw
+#define readl_relaxed __relaxed_readl
+#ifdef CONFIG_64BIT
+#define readq_relaxed __relaxed_readq
+#endif
+
+#define writeb_relaxed __relaxed_writeb
+#define writew_relaxed __relaxed_writew
+#define writel_relaxed __relaxed_writel
+#ifdef CONFIG_64BIT
+#define writeq_relaxed __relaxed_writeq
+#endif
+
+#define readb_be(addr) \
+ __raw_readb((__force unsigned *)(addr))
+#define readw_be(addr) \
+ be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
+#define readl_be(addr) \
+ be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
+#define readq_be(addr) \
+ be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
+
+#define writeb_be(val, addr) \
+ __raw_writeb((val), (__force unsigned *)(addr))
+#define writew_be(val, addr) \
+ __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
+#define writel_be(val, addr) \
+ __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
+#define writeq_be(val, addr) \
+ __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
+
+/*
+ * Some code tests for these symbols
+ */
+#ifdef CONFIG_64BIT
+#define readq readq
+#define writeq writeq
+#endif
+
+#define __BUILD_MEMORY_STRING(bwlq, type) \
+ \
+static inline void writes##bwlq(volatile void __iomem *mem, \
+ const void *addr, unsigned int count) \
+{ \
+ const volatile type *__addr = addr; \
+ \
+ while (count--) { \
+ __mem_write##bwlq(*__addr, mem); \
+ __addr++; \
+ } \
+} \
+ \
+static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
+ unsigned int count) \
+{ \
+ volatile type *__addr = addr; \
+ \
+ while (count--) { \
+ *__addr = __mem_read##bwlq(mem); \
+ __addr++; \
+ } \
+}
+
+#define __BUILD_IOPORT_STRING(bwlq, type) \
+ \
+static inline void outs##bwlq(unsigned long port, const void *addr, \
+ unsigned int count) \
+{ \
+ const volatile type *__addr = addr; \
+ \
+ while (count--) { \
+ __mem_out##bwlq(*__addr, port); \
+ __addr++; \
+ } \
+} \
+ \
+static inline void ins##bwlq(unsigned long port, void *addr, \
+ unsigned int count) \
+{ \
+ volatile type *__addr = addr; \
+ \
+ while (count--) { \
+ *__addr = __mem_in##bwlq(port); \
+ __addr++; \
+ } \
+}
+
+#define BUILDSTRING(bwlq, type) \
+ \
+__BUILD_MEMORY_STRING(bwlq, type) \
+__BUILD_IOPORT_STRING(bwlq, type)
+
+BUILDSTRING(b, u8)
+BUILDSTRING(w, u16)
+BUILDSTRING(l, u32)
+#ifdef CONFIG_64BIT
+BUILDSTRING(q, u64)
+#endif
+
+static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
+{
+ memset((void __force *) addr, val, count);
+}
+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
+{
+ memcpy(dst, (void __force *) src, count);
+}
+static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
+{
+ memcpy((void __force *) dst, src, count);
+}
+
+/*
+ * The caches on some architectures aren't dma-coherent and have need to
+ * handle this in software. There are three types of operations that
+ * can be applied to dma buffers.
+ *
+ * - dma_cache_wback_inv(start, size) makes caches and coherent by
+ * writing the content of the caches back to memory, if necessary.
+ * The function also invalidates the affected part of the caches as
+ * necessary before DMA transfers from outside to memory.
+ * - dma_cache_wback(start, size) makes caches and coherent by
+ * writing the content of the caches back to memory, if necessary.
+ * The function also invalidates the affected part of the caches as
+ * necessary before DMA transfers from outside to memory.
+ * - dma_cache_inv(start, size) invalidates the affected parts of the
+ * caches. Dirty lines of the caches may be written back or simply
+ * be discarded. This operation is necessary before dma operations
+ * to the memory.
+ *
+ * This API used to be exported; it now is for arch code internal use only.
+ */
+#ifdef CONFIG_DMA_NONCOHERENT
+
+extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
+extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
+extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
+
+#define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)
+#define dma_cache_wback(start, size) _dma_cache_wback(start, size)
+#define dma_cache_inv(start, size) _dma_cache_inv(start, size)
+
+#else /* Sane hardware */
+
+#define dma_cache_wback_inv(start,size) \
+ do { (void) (start); (void) (size); } while (0)
+#define dma_cache_wback(start,size) \
+ do { (void) (start); (void) (size); } while (0)
+#define dma_cache_inv(start,size) \
+ do { (void) (start); (void) (size); } while (0)
+
+#endif /* CONFIG_DMA_NONCOHERENT */
+
+/*
+ * Read a 32-bit register that requires a 64-bit read cycle on the bus.
+ * Avoid interrupt mucking, just adjust the address for 4-byte access.
+ * Assume the addresses are 8-byte aligned.
+ */
+#ifdef __MIPSEB__
+#define __CSR_32_ADJUST 4
+#else
+#define __CSR_32_ADJUST 0
+#endif
+
+#define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
+#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p) __va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p) p
+
+void __ioread64_copy(void *to, const void __iomem *from, size_t count);
+
+#endif /* _ASM_IO_H */
diff --git a/arch/mips/include/asm/ip32/crime.h b/arch/mips/include/asm/ip32/crime.h
new file mode 100644
index 000000000..16c94a27b
--- /dev/null
+++ b/arch/mips/include/asm/ip32/crime.h
@@ -0,0 +1,158 @@
+/*
+ * Definitions for the SGI CRIME (CPU, Rendering, Interconnect and Memory
+ * Engine)
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Harald Koerfgen
+ */
+
+#ifndef __ASM_CRIME_H__
+#define __ASM_CRIME_H__
+
+/*
+ * Address map
+ */
+#define CRIME_BASE 0x14000000 /* physical */
+
+struct sgi_crime {
+ volatile unsigned long id;
+#define CRIME_ID_MASK 0xff
+#define CRIME_ID_IDBITS 0xf0
+#define CRIME_ID_IDVALUE 0xa0
+#define CRIME_ID_REV 0x0f
+#define CRIME_REV_PETTY 0x00
+#define CRIME_REV_11 0x11
+#define CRIME_REV_13 0x13
+#define CRIME_REV_14 0x14
+
+ volatile unsigned long control;
+#define CRIME_CONTROL_MASK 0x3fff
+#define CRIME_CONTROL_TRITON_SYSADC 0x2000
+#define CRIME_CONTROL_CRIME_SYSADC 0x1000
+#define CRIME_CONTROL_HARD_RESET 0x0800
+#define CRIME_CONTROL_SOFT_RESET 0x0400
+#define CRIME_CONTROL_DOG_ENA 0x0200
+#define CRIME_CONTROL_ENDIANESS 0x0100
+#define CRIME_CONTROL_ENDIAN_BIG 0x0100
+#define CRIME_CONTROL_ENDIAN_LITTLE 0x0000
+#define CRIME_CONTROL_CQUEUE_HWM 0x000f
+#define CRIME_CONTROL_CQUEUE_SHFT 0
+#define CRIME_CONTROL_WBUF_HWM 0x00f0
+#define CRIME_CONTROL_WBUF_SHFT 8
+
+ volatile unsigned long istat;
+ volatile unsigned long imask;
+ volatile unsigned long soft_int;
+ volatile unsigned long hard_int;
+#define MACE_VID_IN1_INT BIT(0)
+#define MACE_VID_IN2_INT BIT(1)
+#define MACE_VID_OUT_INT BIT(2)
+#define MACE_ETHERNET_INT BIT(3)
+#define MACE_SUPERIO_INT BIT(4)
+#define MACE_MISC_INT BIT(5)
+#define MACE_AUDIO_INT BIT(6)
+#define MACE_PCI_BRIDGE_INT BIT(7)
+#define MACEPCI_SCSI0_INT BIT(8)
+#define MACEPCI_SCSI1_INT BIT(9)
+#define MACEPCI_SLOT0_INT BIT(10)
+#define MACEPCI_SLOT1_INT BIT(11)
+#define MACEPCI_SLOT2_INT BIT(12)
+#define MACEPCI_SHARED0_INT BIT(13)
+#define MACEPCI_SHARED1_INT BIT(14)
+#define MACEPCI_SHARED2_INT BIT(15)
+#define CRIME_GBE0_INT BIT(16)
+#define CRIME_GBE1_INT BIT(17)
+#define CRIME_GBE2_INT BIT(18)
+#define CRIME_GBE3_INT BIT(19)
+#define CRIME_CPUERR_INT BIT(20)
+#define CRIME_MEMERR_INT BIT(21)
+#define CRIME_RE_EMPTY_E_INT BIT(22)
+#define CRIME_RE_FULL_E_INT BIT(23)
+#define CRIME_RE_IDLE_E_INT BIT(24)
+#define CRIME_RE_EMPTY_L_INT BIT(25)
+#define CRIME_RE_FULL_L_INT BIT(26)
+#define CRIME_RE_IDLE_L_INT BIT(27)
+#define CRIME_SOFT0_INT BIT(28)
+#define CRIME_SOFT1_INT BIT(29)
+#define CRIME_SOFT2_INT BIT(30)
+#define CRIME_SYSCORERR_INT CRIME_SOFT2_INT
+#define CRIME_VICE_INT BIT(31)
+/* Masks for deciding who handles the interrupt */
+#define CRIME_MACE_INT_MASK 0x8f
+#define CRIME_MACEISA_INT_MASK 0x70
+#define CRIME_MACEPCI_INT_MASK 0xff00
+#define CRIME_CRIME_INT_MASK 0xffff0000
+
+ volatile unsigned long watchdog;
+#define CRIME_DOG_POWER_ON_RESET 0x00010000
+#define CRIME_DOG_WARM_RESET 0x00080000
+#define CRIME_DOG_TIMEOUT (CRIME_DOG_POWER_ON_RESET|CRIME_DOG_WARM_RESET)
+#define CRIME_DOG_VALUE 0x00007fff
+
+ volatile unsigned long timer;
+#define CRIME_MASTER_FREQ 66666500 /* Crime upcounter frequency */
+#define CRIME_NS_PER_TICK 15 /* for delay_calibrate */
+
+ volatile unsigned long cpu_error_addr;
+#define CRIME_CPU_ERROR_ADDR_MASK 0x3ffffffff
+
+ volatile unsigned long cpu_error_stat;
+#define CRIME_CPU_ERROR_MASK 0x7 /* cpu error stat is 3 bits */
+#define CRIME_CPU_ERROR_CPU_ILL_ADDR 0x4
+#define CRIME_CPU_ERROR_VICE_WRT_PRTY 0x2
+#define CRIME_CPU_ERROR_CPU_WRT_PRTY 0x1
+
+ unsigned long _pad0[54];
+
+ volatile unsigned long mc_ctrl;
+ volatile unsigned long bank_ctrl[8];
+#define CRIME_MEM_BANK_CONTROL_MASK 0x11f /* 9 bits 7:5 reserved */
+#define CRIME_MEM_BANK_CONTROL_ADDR 0x01f
+#define CRIME_MEM_BANK_CONTROL_SDRAM_SIZE 0x100
+#define CRIME_MAXBANKS 8
+
+ volatile unsigned long mem_ref_counter;
+#define CRIME_MEM_REF_COUNTER_MASK 0x3ff /* 10bit */
+
+ volatile unsigned long mem_error_stat;
+#define CRIME_MEM_ERROR_STAT_MASK 0x0ff7ffff /* 28-bit register */
+#define CRIME_MEM_ERROR_MACE_ID 0x0000007f
+#define CRIME_MEM_ERROR_MACE_ACCESS 0x00000080
+#define CRIME_MEM_ERROR_RE_ID 0x00007f00
+#define CRIME_MEM_ERROR_RE_ACCESS 0x00008000
+#define CRIME_MEM_ERROR_GBE_ACCESS 0x00010000
+#define CRIME_MEM_ERROR_VICE_ACCESS 0x00020000
+#define CRIME_MEM_ERROR_CPU_ACCESS 0x00040000
+#define CRIME_MEM_ERROR_RESERVED 0x00080000
+#define CRIME_MEM_ERROR_SOFT_ERR 0x00100000
+#define CRIME_MEM_ERROR_HARD_ERR 0x00200000
+#define CRIME_MEM_ERROR_MULTIPLE 0x00400000
+#define CRIME_MEM_ERROR_ECC 0x01800000
+#define CRIME_MEM_ERROR_MEM_ECC_RD 0x00800000
+#define CRIME_MEM_ERROR_MEM_ECC_RMW 0x01000000
+#define CRIME_MEM_ERROR_INV 0x0e000000
+#define CRIME_MEM_ERROR_INV_MEM_ADDR_RD 0x02000000
+#define CRIME_MEM_ERROR_INV_MEM_ADDR_WR 0x04000000
+#define CRIME_MEM_ERROR_INV_MEM_ADDR_RMW 0x08000000
+
+ volatile unsigned long mem_error_addr;
+#define CRIME_MEM_ERROR_ADDR_MASK 0x3fffffff
+
+ volatile unsigned long mem_ecc_syn;
+#define CRIME_MEM_ERROR_ECC_SYN_MASK 0xffffffff
+
+ volatile unsigned long mem_ecc_chk;
+#define CRIME_MEM_ERROR_ECC_CHK_MASK 0xffffffff
+
+ volatile unsigned long mem_ecc_repl;
+#define CRIME_MEM_ERROR_ECC_REPL_MASK 0xffffffff
+};
+
+extern struct sgi_crime __iomem *crime;
+
+#define CRIME_HI_MEM_BASE 0x40000000 /* this is where whole 1G of RAM is mapped */
+
+#endif /* __ASM_CRIME_H__ */
diff --git a/arch/mips/include/asm/ip32/ip32_ints.h b/arch/mips/include/asm/ip32/ip32_ints.h
new file mode 100644
index 000000000..72e3368de
--- /dev/null
+++ b/arch/mips/include/asm/ip32/ip32_ints.h
@@ -0,0 +1,114 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Harald Koerfgen
+ */
+
+#ifndef __ASM_IP32_INTS_H
+#define __ASM_IP32_INTS_H
+
+#include <asm/irq.h>
+
+/*
+ * This list reflects the assignment of interrupt numbers to
+ * interrupting events. Order is fairly irrelevant to handling
+ * priority. This differs from irix.
+ */
+
+enum ip32_irq_no {
+ /*
+ * CPU interrupts are 0 ... 7
+ */
+
+ CRIME_IRQ_BASE = MIPS_CPU_IRQ_BASE + 8,
+
+ /*
+ * MACE
+ */
+ MACE_VID_IN1_IRQ = CRIME_IRQ_BASE,
+ MACE_VID_IN2_IRQ,
+ MACE_VID_OUT_IRQ,
+ MACE_ETHERNET_IRQ,
+ /* SUPERIO, MISC, and AUDIO are MACEISA */
+ __MACE_SUPERIO,
+ __MACE_MISC,
+ __MACE_AUDIO,
+ MACE_PCI_BRIDGE_IRQ,
+
+ /*
+ * MACEPCI
+ */
+ MACEPCI_SCSI0_IRQ,
+ MACEPCI_SCSI1_IRQ,
+ MACEPCI_SLOT0_IRQ,
+ MACEPCI_SLOT1_IRQ,
+ MACEPCI_SLOT2_IRQ,
+ MACEPCI_SHARED0_IRQ,
+ MACEPCI_SHARED1_IRQ,
+ MACEPCI_SHARED2_IRQ,
+
+ /*
+ * CRIME
+ */
+ CRIME_GBE0_IRQ,
+ CRIME_GBE1_IRQ,
+ CRIME_GBE2_IRQ,
+ CRIME_GBE3_IRQ,
+ CRIME_CPUERR_IRQ,
+ CRIME_MEMERR_IRQ,
+ CRIME_RE_EMPTY_E_IRQ,
+ CRIME_RE_FULL_E_IRQ,
+ CRIME_RE_IDLE_E_IRQ,
+ CRIME_RE_EMPTY_L_IRQ,
+ CRIME_RE_FULL_L_IRQ,
+ CRIME_RE_IDLE_L_IRQ,
+ CRIME_SOFT0_IRQ,
+ CRIME_SOFT1_IRQ,
+ CRIME_SOFT2_IRQ,
+ CRIME_SYSCORERR_IRQ = CRIME_SOFT2_IRQ,
+ CRIME_VICE_IRQ,
+
+ /*
+ * MACEISA
+ */
+ MACEISA_AUDIO_SW_IRQ,
+ MACEISA_AUDIO_SC_IRQ,
+ MACEISA_AUDIO1_DMAT_IRQ,
+ MACEISA_AUDIO1_OF_IRQ,
+ MACEISA_AUDIO2_DMAT_IRQ,
+ MACEISA_AUDIO2_MERR_IRQ,
+ MACEISA_AUDIO3_DMAT_IRQ,
+ MACEISA_AUDIO3_MERR_IRQ,
+ MACEISA_RTC_IRQ,
+ MACEISA_KEYB_IRQ,
+ /* MACEISA_KEYB_POLL is not an IRQ */
+ __MACEISA_KEYB_POLL,
+ MACEISA_MOUSE_IRQ,
+ /* MACEISA_MOUSE_POLL is not an IRQ */
+ __MACEISA_MOUSE_POLL,
+ MACEISA_TIMER0_IRQ,
+ MACEISA_TIMER1_IRQ,
+ MACEISA_TIMER2_IRQ,
+ MACEISA_PARALLEL_IRQ,
+ MACEISA_PAR_CTXA_IRQ,
+ MACEISA_PAR_CTXB_IRQ,
+ MACEISA_PAR_MERR_IRQ,
+ MACEISA_SERIAL1_IRQ,
+ MACEISA_SERIAL1_TDMAT_IRQ,
+ MACEISA_SERIAL1_TDMAPR_IRQ,
+ MACEISA_SERIAL1_TDMAME_IRQ,
+ MACEISA_SERIAL1_RDMAT_IRQ,
+ MACEISA_SERIAL1_RDMAOR_IRQ,
+ MACEISA_SERIAL2_IRQ,
+ MACEISA_SERIAL2_TDMAT_IRQ,
+ MACEISA_SERIAL2_TDMAPR_IRQ,
+ MACEISA_SERIAL2_TDMAME_IRQ,
+ MACEISA_SERIAL2_RDMAT_IRQ,
+ MACEISA_SERIAL2_RDMAOR_IRQ,
+
+ IP32_IRQ_MAX = MACEISA_SERIAL2_RDMAOR_IRQ
+};
+
+#endif /* __ASM_IP32_INTS_H */
diff --git a/arch/mips/include/asm/ip32/mace.h b/arch/mips/include/asm/ip32/mace.h
new file mode 100644
index 000000000..253ed7ea8
--- /dev/null
+++ b/arch/mips/include/asm/ip32/mace.h
@@ -0,0 +1,365 @@
+/*
+ * Definitions for the SGI MACE (Multimedia, Audio and Communications Engine)
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Harald Koerfgen
+ * Copyright (C) 2004 Ladislav Michl
+ */
+
+#ifndef __ASM_MACE_H__
+#define __ASM_MACE_H__
+
+/*
+ * Address map
+ */
+#define MACE_BASE 0x1f000000 /* physical */
+
+/*
+ * PCI interface
+ */
+struct mace_pci {
+ volatile unsigned int error_addr;
+ volatile unsigned int error;
+#define MACEPCI_ERROR_MASTER_ABORT BIT(31)
+#define MACEPCI_ERROR_TARGET_ABORT BIT(30)
+#define MACEPCI_ERROR_DATA_PARITY_ERR BIT(29)
+#define MACEPCI_ERROR_RETRY_ERR BIT(28)
+#define MACEPCI_ERROR_ILLEGAL_CMD BIT(27)
+#define MACEPCI_ERROR_SYSTEM_ERR BIT(26)
+#define MACEPCI_ERROR_INTERRUPT_TEST BIT(25)
+#define MACEPCI_ERROR_PARITY_ERR BIT(24)
+#define MACEPCI_ERROR_OVERRUN BIT(23)
+#define MACEPCI_ERROR_RSVD BIT(22)
+#define MACEPCI_ERROR_MEMORY_ADDR BIT(21)
+#define MACEPCI_ERROR_CONFIG_ADDR BIT(20)
+#define MACEPCI_ERROR_MASTER_ABORT_ADDR_VALID BIT(19)
+#define MACEPCI_ERROR_TARGET_ABORT_ADDR_VALID BIT(18)
+#define MACEPCI_ERROR_DATA_PARITY_ADDR_VALID BIT(17)
+#define MACEPCI_ERROR_RETRY_ADDR_VALID BIT(16)
+#define MACEPCI_ERROR_SIG_TABORT BIT(4)
+#define MACEPCI_ERROR_DEVSEL_MASK 0xc0
+#define MACEPCI_ERROR_DEVSEL_FAST 0
+#define MACEPCI_ERROR_DEVSEL_MED 0x40
+#define MACEPCI_ERROR_DEVSEL_SLOW 0x80
+#define MACEPCI_ERROR_FBB BIT(1)
+#define MACEPCI_ERROR_66MHZ BIT(0)
+ volatile unsigned int control;
+#define MACEPCI_CONTROL_INT(x) BIT(x)
+#define MACEPCI_CONTROL_INT_MASK 0xff
+#define MACEPCI_CONTROL_SERR_ENA BIT(8)
+#define MACEPCI_CONTROL_ARB_N6 BIT(9)
+#define MACEPCI_CONTROL_PARITY_ERR BIT(10)
+#define MACEPCI_CONTROL_MRMRA_ENA BIT(11)
+#define MACEPCI_CONTROL_ARB_N3 BIT(12)
+#define MACEPCI_CONTROL_ARB_N4 BIT(13)
+#define MACEPCI_CONTROL_ARB_N5 BIT(14)
+#define MACEPCI_CONTROL_PARK_LIU BIT(15)
+#define MACEPCI_CONTROL_INV_INT(x) BIT(16+x)
+#define MACEPCI_CONTROL_INV_INT_MASK 0x00ff0000
+#define MACEPCI_CONTROL_OVERRUN_INT BIT(24)
+#define MACEPCI_CONTROL_PARITY_INT BIT(25)
+#define MACEPCI_CONTROL_SERR_INT BIT(26)
+#define MACEPCI_CONTROL_IT_INT BIT(27)
+#define MACEPCI_CONTROL_RE_INT BIT(28)
+#define MACEPCI_CONTROL_DPED_INT BIT(29)
+#define MACEPCI_CONTROL_TAR_INT BIT(30)
+#define MACEPCI_CONTROL_MAR_INT BIT(31)
+ volatile unsigned int rev;
+ unsigned int _pad[0xcf8/4 - 4];
+ volatile unsigned int config_addr;
+ union {
+ volatile unsigned char b[4];
+ volatile unsigned short w[2];
+ volatile unsigned int l;
+ } config_data;
+};
+#define MACEPCI_LOW_MEMORY 0x1a000000
+#define MACEPCI_LOW_IO 0x18000000
+#define MACEPCI_SWAPPED_VIEW 0
+#define MACEPCI_NATIVE_VIEW 0x40000000
+#define MACEPCI_IO 0x80000000
+#define MACEPCI_HI_MEMORY 0x280000000
+#define MACEPCI_HI_IO 0x100000000
+
+/*
+ * Video interface
+ */
+struct mace_video {
+ unsigned long xxx; /* later... */
+};
+
+/*
+ * Ethernet interface
+ */
+struct mace_ethernet {
+ volatile u64 mac_ctrl;
+ volatile unsigned long int_stat;
+ volatile unsigned long dma_ctrl;
+ volatile unsigned long timer;
+ volatile unsigned long tx_int_al;
+ volatile unsigned long rx_int_al;
+ volatile unsigned long tx_info;
+ volatile unsigned long tx_info_al;
+ volatile unsigned long rx_buff;
+ volatile unsigned long rx_buff_al1;
+ volatile unsigned long rx_buff_al2;
+ volatile unsigned long diag;
+ volatile unsigned long phy_data;
+ volatile unsigned long phy_regs;
+ volatile unsigned long phy_trans_go;
+ volatile unsigned long backoff_seed;
+ /*===================================*/
+ volatile unsigned long imq_reserved[4];
+ volatile unsigned long mac_addr;
+ volatile unsigned long mac_addr2;
+ volatile unsigned long mcast_filter;
+ volatile unsigned long tx_ring_base;
+ /* Following are read-only registers for debugging */
+ volatile unsigned long tx_pkt1_hdr;
+ volatile unsigned long tx_pkt1_ptr[3];
+ volatile unsigned long tx_pkt2_hdr;
+ volatile unsigned long tx_pkt2_ptr[3];
+ /*===================================*/
+ volatile unsigned long rx_fifo;
+};
+
+/*
+ * Peripherals
+ */
+
+/* Audio registers */
+struct mace_audio {
+ volatile unsigned long control;
+ volatile unsigned long codec_control; /* codec status control */
+ volatile unsigned long codec_mask; /* codec status input mask */
+ volatile unsigned long codec_read; /* codec status read data */
+ struct {
+ volatile unsigned long control; /* channel control */
+ volatile unsigned long read_ptr; /* channel read pointer */
+ volatile unsigned long write_ptr; /* channel write pointer */
+ volatile unsigned long depth; /* channel depth */
+ } chan[3];
+};
+
+
+/* register definitions for parallel port DMA */
+struct mace_parport {
+ /* 0 - do nothing,
+ * 1 - pulse terminal count to the device after buffer is drained */
+#define MACEPAR_CONTEXT_LASTFLAG BIT(63)
+ /* Should not cross 4K page boundary */
+#define MACEPAR_CONTEXT_DATA_BOUND 0x0000000000001000UL
+#define MACEPAR_CONTEXT_DATALEN_MASK 0x00000fff00000000UL
+#define MACEPAR_CONTEXT_DATALEN_SHIFT 32
+ /* Can be arbitrarily aligned on any byte boundary on output,
+ * 64 byte aligned on input */
+#define MACEPAR_CONTEXT_BASEADDR_MASK 0x00000000ffffffffUL
+ volatile u64 context_a;
+ volatile u64 context_b;
+ /* 0 - mem->device, 1 - device->mem */
+#define MACEPAR_CTLSTAT_DIRECTION BIT(0)
+ /* 0 - channel frozen, 1 - channel enabled */
+#define MACEPAR_CTLSTAT_ENABLE BIT(1)
+ /* 0 - channel active, 1 - complete channel reset */
+#define MACEPAR_CTLSTAT_RESET BIT(2)
+#define MACEPAR_CTLSTAT_CTXB_VALID BIT(3)
+#define MACEPAR_CTLSTAT_CTXA_VALID BIT(4)
+ volatile u64 cntlstat; /* Control/Status register */
+#define MACEPAR_DIAG_CTXINUSE BIT(0)
+ /* 1 - Dma engine is enabled and processing something */
+#define MACEPAR_DIAG_DMACTIVE BIT(1)
+ /* Counter of bytes left */
+#define MACEPAR_DIAG_CTRMASK 0x0000000000003ffcUL
+#define MACEPAR_DIAG_CTRSHIFT 2
+ volatile u64 diagnostic; /* RO: diagnostic register */
+};
+
+/* ISA Control and DMA registers */
+struct mace_isactrl {
+ volatile unsigned long ringbase;
+#define MACEISA_RINGBUFFERS_SIZE (8 * 4096)
+
+ volatile unsigned long misc;
+#define MACEISA_FLASH_WE BIT(0) /* 1=> Enable FLASH writes */
+#define MACEISA_PWD_CLEAR BIT(1) /* 1=> PWD CLEAR jumper detected */
+#define MACEISA_NIC_DEASSERT BIT(2)
+#define MACEISA_NIC_DATA BIT(3)
+#define MACEISA_LED_RED BIT(4) /* 0=> Illuminate red LED */
+#define MACEISA_LED_GREEN BIT(5) /* 0=> Illuminate green LED */
+#define MACEISA_DP_RAM_ENABLE BIT(6)
+
+ volatile unsigned long istat;
+ volatile unsigned long imask;
+#define MACEISA_AUDIO_SW_INT BIT(0)
+#define MACEISA_AUDIO_SC_INT BIT(1)
+#define MACEISA_AUDIO1_DMAT_INT BIT(2)
+#define MACEISA_AUDIO1_OF_INT BIT(3)
+#define MACEISA_AUDIO2_DMAT_INT BIT(4)
+#define MACEISA_AUDIO2_MERR_INT BIT(5)
+#define MACEISA_AUDIO3_DMAT_INT BIT(6)
+#define MACEISA_AUDIO3_MERR_INT BIT(7)
+#define MACEISA_RTC_INT BIT(8)
+#define MACEISA_KEYB_INT BIT(9)
+#define MACEISA_KEYB_POLL_INT BIT(10)
+#define MACEISA_MOUSE_INT BIT(11)
+#define MACEISA_MOUSE_POLL_INT BIT(12)
+#define MACEISA_TIMER0_INT BIT(13)
+#define MACEISA_TIMER1_INT BIT(14)
+#define MACEISA_TIMER2_INT BIT(15)
+#define MACEISA_PARALLEL_INT BIT(16)
+#define MACEISA_PAR_CTXA_INT BIT(17)
+#define MACEISA_PAR_CTXB_INT BIT(18)
+#define MACEISA_PAR_MERR_INT BIT(19)
+#define MACEISA_SERIAL1_INT BIT(20)
+#define MACEISA_SERIAL1_TDMAT_INT BIT(21)
+#define MACEISA_SERIAL1_TDMAPR_INT BIT(22)
+#define MACEISA_SERIAL1_TDMAME_INT BIT(23)
+#define MACEISA_SERIAL1_RDMAT_INT BIT(24)
+#define MACEISA_SERIAL1_RDMAOR_INT BIT(25)
+#define MACEISA_SERIAL2_INT BIT(26)
+#define MACEISA_SERIAL2_TDMAT_INT BIT(27)
+#define MACEISA_SERIAL2_TDMAPR_INT BIT(28)
+#define MACEISA_SERIAL2_TDMAME_INT BIT(29)
+#define MACEISA_SERIAL2_RDMAT_INT BIT(30)
+#define MACEISA_SERIAL2_RDMAOR_INT BIT(31)
+
+ volatile unsigned long _pad[0x2000/8 - 4];
+
+ volatile unsigned long dp_ram[0x400];
+ struct mace_parport parport;
+};
+
+/* Keyboard & Mouse registers
+ * -> drivers/input/serio/maceps2.c */
+struct mace_ps2port {
+ volatile unsigned long tx;
+ volatile unsigned long rx;
+ volatile unsigned long control;
+ volatile unsigned long status;
+};
+
+struct mace_ps2 {
+ struct mace_ps2port keyb;
+ struct mace_ps2port mouse;
+};
+
+/* I2C registers
+ * -> drivers/i2c/algos/i2c-algo-sgi.c */
+struct mace_i2c {
+ volatile unsigned long config;
+#define MACEI2C_RESET BIT(0)
+#define MACEI2C_FAST BIT(1)
+#define MACEI2C_DATA_OVERRIDE BIT(2)
+#define MACEI2C_CLOCK_OVERRIDE BIT(3)
+#define MACEI2C_DATA_STATUS BIT(4)
+#define MACEI2C_CLOCK_STATUS BIT(5)
+ volatile unsigned long control;
+ volatile unsigned long data;
+};
+
+/* Timer registers */
+typedef union {
+ volatile unsigned long ust_msc;
+ struct reg {
+ volatile unsigned int ust;
+ volatile unsigned int msc;
+ } reg;
+} timer_reg;
+
+struct mace_timers {
+ volatile unsigned long ust;
+#define MACE_UST_PERIOD_NS 960
+
+ volatile unsigned long compare1;
+ volatile unsigned long compare2;
+ volatile unsigned long compare3;
+
+ timer_reg audio_in;
+ timer_reg audio_out1;
+ timer_reg audio_out2;
+ timer_reg video_in1;
+ timer_reg video_in2;
+ timer_reg video_out;
+};
+
+struct mace_perif {
+ struct mace_audio audio;
+ char _pad0[0x10000 - sizeof(struct mace_audio)];
+
+ struct mace_isactrl ctrl;
+ char _pad1[0x10000 - sizeof(struct mace_isactrl)];
+
+ struct mace_ps2 ps2;
+ char _pad2[0x10000 - sizeof(struct mace_ps2)];
+
+ struct mace_i2c i2c;
+ char _pad3[0x10000 - sizeof(struct mace_i2c)];
+
+ struct mace_timers timers;
+ char _pad4[0x10000 - sizeof(struct mace_timers)];
+};
+
+
+/*
+ * ISA peripherals
+ */
+
+/* Parallel port */
+struct mace_parallel {
+};
+
+struct mace_ecp1284 { /* later... */
+};
+
+/* Serial port */
+struct mace_serial {
+ volatile unsigned long xxx; /* later... */
+};
+
+struct mace_isa {
+ struct mace_parallel parallel;
+ char _pad1[0x8000 - sizeof(struct mace_parallel)];
+
+ struct mace_ecp1284 ecp1284;
+ char _pad2[0x8000 - sizeof(struct mace_ecp1284)];
+
+ struct mace_serial serial1;
+ char _pad3[0x8000 - sizeof(struct mace_serial)];
+
+ struct mace_serial serial2;
+ char _pad4[0x8000 - sizeof(struct mace_serial)];
+
+ volatile unsigned char rtc[0x10000];
+};
+
+struct sgi_mace {
+ char _reserved[0x80000];
+
+ struct mace_pci pci;
+ char _pad0[0x80000 - sizeof(struct mace_pci)];
+
+ struct mace_video video_in1;
+ char _pad1[0x80000 - sizeof(struct mace_video)];
+
+ struct mace_video video_in2;
+ char _pad2[0x80000 - sizeof(struct mace_video)];
+
+ struct mace_video video_out;
+ char _pad3[0x80000 - sizeof(struct mace_video)];
+
+ struct mace_ethernet eth;
+ char _pad4[0x80000 - sizeof(struct mace_ethernet)];
+
+ struct mace_perif perif;
+ char _pad5[0x80000 - sizeof(struct mace_perif)];
+
+ struct mace_isa isa;
+ char _pad6[0x80000 - sizeof(struct mace_isa)];
+};
+
+extern struct sgi_mace __iomem *mace;
+
+#endif /* __ASM_MACE_H__ */
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
new file mode 100644
index 000000000..c5d351786
--- /dev/null
+++ b/arch/mips/include/asm/irq.h
@@ -0,0 +1,85 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 by Waldorf GMBH, written by Ralf Baechle
+ * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
+ */
+#ifndef _ASM_IRQ_H
+#define _ASM_IRQ_H
+
+#include <linux/linkage.h>
+#include <linux/smp.h>
+#include <linux/irqdomain.h>
+
+#include <asm/mipsmtregs.h>
+
+#include <irq.h>
+
+#define IRQ_STACK_SIZE THREAD_SIZE
+#define IRQ_STACK_START (IRQ_STACK_SIZE - 16)
+
+extern void *irq_stack[NR_CPUS];
+
+/*
+ * The highest address on the IRQ stack contains a dummy frame put down in
+ * genex.S (handle_int & except_vec_vi_handler) which is structured as follows:
+ *
+ * top ------------
+ * | task sp | <- irq_stack[cpu] + IRQ_STACK_START
+ * ------------
+ * | | <- First frame of IRQ context
+ * ------------
+ *
+ * task sp holds a copy of the task stack pointer where the struct pt_regs
+ * from exception entry can be found.
+ */
+
+static inline bool on_irq_stack(int cpu, unsigned long sp)
+{
+ unsigned long low = (unsigned long)irq_stack[cpu];
+ unsigned long high = low + IRQ_STACK_SIZE;
+
+ return (low <= sp && sp <= high);
+}
+
+#ifdef CONFIG_I8259
+static inline int irq_canonicalize(int irq)
+{
+ return ((irq == I8259A_IRQ_BASE + 2) ? I8259A_IRQ_BASE + 9 : irq);
+}
+#else
+#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
+#endif
+
+asmlinkage void plat_irq_dispatch(void);
+
+extern void do_IRQ(unsigned int irq);
+
+extern void arch_init_irq(void);
+extern void spurious_interrupt(void);
+
+extern int allocate_irqno(void);
+extern void alloc_legacy_irqno(void);
+extern void free_irqno(unsigned int irq);
+
+/*
+ * Before R2 the timer and performance counter interrupts were both fixed to
+ * IE7. Since R2 their number has to be read from the c0_intctl register.
+ */
+#define CP0_LEGACY_COMPARE_IRQ 7
+#define CP0_LEGACY_PERFCNT_IRQ 7
+
+extern int cp0_compare_irq;
+extern int cp0_compare_irq_shift;
+extern int cp0_perfcount_irq;
+extern int cp0_fdc_irq;
+
+extern int get_c0_fdc_int(void);
+
+void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
+ bool exclude_self);
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+
+#endif /* _ASM_IRQ_H */
diff --git a/arch/mips/include/asm/irq_cpu.h b/arch/mips/include/asm/irq_cpu.h
new file mode 100644
index 000000000..8d321180b
--- /dev/null
+++ b/arch/mips/include/asm/irq_cpu.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/asm-mips/irq_cpu.h
+ *
+ * MIPS CPU interrupt definitions.
+ *
+ * Copyright (C) 2002 Maciej W. Rozycki
+ */
+#ifndef _ASM_IRQ_CPU_H
+#define _ASM_IRQ_CPU_H
+
+extern void mips_cpu_irq_init(void);
+extern void rm7k_cpu_irq_init(void);
+extern void rm9k_cpu_irq_init(void);
+
+#ifdef CONFIG_IRQ_DOMAIN
+struct device_node;
+extern int mips_cpu_irq_of_init(struct device_node *of_node,
+ struct device_node *parent);
+#endif
+
+#endif /* _ASM_IRQ_CPU_H */
diff --git a/arch/mips/include/asm/irq_gt641xx.h b/arch/mips/include/asm/irq_gt641xx.h
new file mode 100644
index 000000000..d689c1c6c
--- /dev/null
+++ b/arch/mips/include/asm/irq_gt641xx.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Galileo/Marvell GT641xx IRQ definitions.
+ *
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#ifndef _ASM_IRQ_GT641XX_H
+#define _ASM_IRQ_GT641XX_H
+
+#ifndef GT641XX_IRQ_BASE
+#define GT641XX_IRQ_BASE 8
+#endif
+
+#define GT641XX_MEMORY_OUT_OF_RANGE_IRQ (GT641XX_IRQ_BASE + 1)
+#define GT641XX_DMA_OUT_OF_RANGE_IRQ (GT641XX_IRQ_BASE + 2)
+#define GT641XX_CPU_ACCESS_OUT_OF_RANGE_IRQ (GT641XX_IRQ_BASE + 3)
+#define GT641XX_DMA0_IRQ (GT641XX_IRQ_BASE + 4)
+#define GT641XX_DMA1_IRQ (GT641XX_IRQ_BASE + 5)
+#define GT641XX_DMA2_IRQ (GT641XX_IRQ_BASE + 6)
+#define GT641XX_DMA3_IRQ (GT641XX_IRQ_BASE + 7)
+#define GT641XX_TIMER0_IRQ (GT641XX_IRQ_BASE + 8)
+#define GT641XX_TIMER1_IRQ (GT641XX_IRQ_BASE + 9)
+#define GT641XX_TIMER2_IRQ (GT641XX_IRQ_BASE + 10)
+#define GT641XX_TIMER3_IRQ (GT641XX_IRQ_BASE + 11)
+#define GT641XX_PCI_0_MASTER_READ_ERROR_IRQ (GT641XX_IRQ_BASE + 12)
+#define GT641XX_PCI_0_SLAVE_WRITE_ERROR_IRQ (GT641XX_IRQ_BASE + 13)
+#define GT641XX_PCI_0_MASTER_WRITE_ERROR_IRQ (GT641XX_IRQ_BASE + 14)
+#define GT641XX_PCI_0_SLAVE_READ_ERROR_IRQ (GT641XX_IRQ_BASE + 15)
+#define GT641XX_PCI_0_ADDRESS_ERROR_IRQ (GT641XX_IRQ_BASE + 16)
+#define GT641XX_MEMORY_ERROR_IRQ (GT641XX_IRQ_BASE + 17)
+#define GT641XX_PCI_0_MASTER_ABORT_IRQ (GT641XX_IRQ_BASE + 18)
+#define GT641XX_PCI_0_TARGET_ABORT_IRQ (GT641XX_IRQ_BASE + 19)
+#define GT641XX_PCI_0_RETRY_TIMEOUT_IRQ (GT641XX_IRQ_BASE + 20)
+#define GT641XX_CPU_INT0_IRQ (GT641XX_IRQ_BASE + 21)
+#define GT641XX_CPU_INT1_IRQ (GT641XX_IRQ_BASE + 22)
+#define GT641XX_CPU_INT2_IRQ (GT641XX_IRQ_BASE + 23)
+#define GT641XX_CPU_INT3_IRQ (GT641XX_IRQ_BASE + 24)
+#define GT641XX_CPU_INT4_IRQ (GT641XX_IRQ_BASE + 25)
+#define GT641XX_PCI_INT0_IRQ (GT641XX_IRQ_BASE + 26)
+#define GT641XX_PCI_INT1_IRQ (GT641XX_IRQ_BASE + 27)
+#define GT641XX_PCI_INT2_IRQ (GT641XX_IRQ_BASE + 28)
+#define GT641XX_PCI_INT3_IRQ (GT641XX_IRQ_BASE + 29)
+
+extern void gt641xx_irq_dispatch(void);
+extern void gt641xx_irq_init(void);
+
+#endif /* _ASM_IRQ_GT641XX_H */
diff --git a/arch/mips/include/asm/irq_regs.h b/arch/mips/include/asm/irq_regs.h
new file mode 100644
index 000000000..7795dc02c
--- /dev/null
+++ b/arch/mips/include/asm/irq_regs.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_IRQ_REGS_H
+#define __ASM_IRQ_REGS_H
+
+#define ARCH_HAS_OWN_IRQ_REGS
+
+#include <linux/thread_info.h>
+
+static inline struct pt_regs *get_irq_regs(void)
+{
+ return current_thread_info()->regs;
+}
+
+static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+ struct pt_regs *old_regs;
+
+ old_regs = get_irq_regs();
+ current_thread_info()->regs = new_regs;
+
+ return old_regs;
+}
+
+#endif /* __ASM_IRQ_REGS_H */
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
new file mode 100644
index 000000000..f5b8300f4
--- /dev/null
+++ b/arch/mips/include/asm/irqflags.h
@@ -0,0 +1,185 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1996 by Paul M. Antoine
+ * Copyright (C) 1999 Silicon Graphics
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+#include <linux/stringify.h>
+#include <asm/compiler.h>
+#include <asm/hazards.h>
+
+#if defined(CONFIG_CPU_HAS_DIEI)
+
+static inline void arch_local_irq_disable(void)
+{
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noat \n"
+ " di \n"
+ " " __stringify(__irq_disable_hazard) " \n"
+ " .set pop \n"
+ : /* no outputs */
+ : /* no inputs */
+ : "memory");
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+
+ asm __volatile__(
+ " .set push \n"
+ " .set reorder \n"
+ " .set noat \n"
+#if defined(CONFIG_CPU_LOONGSON64) || defined(CONFIG_CPU_LOONGSON32)
+ " mfc0 %[flags], $12 \n"
+ " di \n"
+#else
+ " di %[flags] \n"
+#endif
+ " andi %[flags], 1 \n"
+ " " __stringify(__irq_disable_hazard) " \n"
+ " .set pop \n"
+ : [flags] "=r" (flags)
+ : /* no inputs */
+ : "memory");
+
+ return flags;
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long __tmp1;
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder \n"
+ " .set noat \n"
+#if defined(CONFIG_IRQ_MIPS_CPU)
+ /*
+ * Slow, but doesn't suffer from a relatively unlikely race
+ * condition we're having since days 1.
+ */
+ " beqz %[flags], 1f \n"
+ " di \n"
+ " ei \n"
+ "1: \n"
+#else
+ /*
+ * Fast, dangerous. Life is fun, life is good.
+ */
+ " mfc0 $1, $12 \n"
+ " ins $1, %[flags], 0, 1 \n"
+ " mtc0 $1, $12 \n"
+#endif
+ " " __stringify(__irq_disable_hazard) " \n"
+ " .set pop \n"
+ : [flags] "=r" (__tmp1)
+ : "0" (flags)
+ : "memory");
+}
+
+#else
+/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
+void arch_local_irq_disable(void);
+unsigned long arch_local_irq_save(void);
+void arch_local_irq_restore(unsigned long flags);
+#endif /* CONFIG_CPU_HAS_DIEI */
+
+static inline void arch_local_irq_enable(void)
+{
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set reorder \n"
+ " .set noat \n"
+#if defined(CONFIG_CPU_HAS_DIEI)
+ " ei \n"
+#else
+ " mfc0 $1,$12 \n"
+ " ori $1,0x1f \n"
+ " xori $1,0x1e \n"
+ " mtc0 $1,$12 \n"
+#endif
+ " " __stringify(__irq_enable_hazard) " \n"
+ " .set pop \n"
+ : /* no outputs */
+ : /* no inputs */
+ : "memory");
+}
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long flags;
+
+ asm __volatile__(
+ " .set push \n"
+ " .set reorder \n"
+ " mfc0 %[flags], $12 \n"
+ " .set pop \n"
+ : [flags] "=r" (flags));
+
+ return flags;
+}
+
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & 1);
+}
+
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#endif /* #ifndef __ASSEMBLY__ */
+
+/*
+ * Do the CPU's IRQ-state tracing from assembly code.
+ */
+#ifdef CONFIG_TRACE_IRQFLAGS
+/* Reload some registers clobbered by trace_hardirqs_on */
+#ifdef CONFIG_64BIT
+# define TRACE_IRQS_RELOAD_REGS \
+ LONG_L $11, PT_R11(sp); \
+ LONG_L $10, PT_R10(sp); \
+ LONG_L $9, PT_R9(sp); \
+ LONG_L $8, PT_R8(sp); \
+ LONG_L $7, PT_R7(sp); \
+ LONG_L $6, PT_R6(sp); \
+ LONG_L $5, PT_R5(sp); \
+ LONG_L $4, PT_R4(sp); \
+ LONG_L $2, PT_R2(sp)
+#else
+# define TRACE_IRQS_RELOAD_REGS \
+ LONG_L $7, PT_R7(sp); \
+ LONG_L $6, PT_R6(sp); \
+ LONG_L $5, PT_R5(sp); \
+ LONG_L $4, PT_R4(sp); \
+ LONG_L $2, PT_R2(sp)
+#endif
+# define TRACE_IRQS_ON \
+ CLI; /* make sure trace_hardirqs_on() is called in kernel level */ \
+ jal trace_hardirqs_on
+# define TRACE_IRQS_ON_RELOAD \
+ TRACE_IRQS_ON; \
+ TRACE_IRQS_RELOAD_REGS
+# define TRACE_IRQS_OFF \
+ jal trace_hardirqs_off
+#else
+# define TRACE_IRQS_ON
+# define TRACE_IRQS_ON_RELOAD
+# define TRACE_IRQS_OFF
+#endif
+
+#endif /* _ASM_IRQFLAGS_H */
diff --git a/arch/mips/include/asm/isa-rev.h b/arch/mips/include/asm/isa-rev.h
new file mode 100644
index 000000000..683ea3454
--- /dev/null
+++ b/arch/mips/include/asm/isa-rev.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 MIPS Tech, LLC
+ * Author: Matt Redfearn <matt.redfearn@mips.com>
+ */
+
+#ifndef __MIPS_ASM_ISA_REV_H__
+#define __MIPS_ASM_ISA_REV_H__
+
+/*
+ * The ISA revision level. This is 0 for MIPS I to V and N for
+ * MIPS{32,64}rN.
+ */
+
+/* If the compiler has defined __mips_isa_rev, believe it. */
+#ifdef __mips_isa_rev
+#define MIPS_ISA_REV __mips_isa_rev
+#else
+/* The compiler hasn't defined the isa rev so assume it's MIPS I - V (0) */
+#define MIPS_ISA_REV 0
+#endif
+
+
+#endif /* __MIPS_ASM_ISA_REV_H__ */
diff --git a/arch/mips/include/asm/isadep.h b/arch/mips/include/asm/isadep.h
new file mode 100644
index 000000000..d16832023
--- /dev/null
+++ b/arch/mips/include/asm/isadep.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Various ISA level dependent constants.
+ * Most of the following constants reflect the different layout
+ * of Coprocessor 0 registers.
+ *
+ * Copyright (c) 1998 Harald Koerfgen
+ */
+
+#ifndef __ASM_ISADEP_H
+#define __ASM_ISADEP_H
+
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+/*
+ * R2000 or R3000
+ */
+
+/*
+ * kernel or user mode? (CP0_STATUS)
+ */
+#define KU_MASK 0x08
+#define KU_USER 0x08
+#define KU_KERN 0x00
+
+#else
+/*
+ * kernel or user mode?
+ */
+#define KU_MASK 0x18
+#define KU_USER 0x10
+#define KU_KERN 0x00
+
+#endif
+
+#endif /* __ASM_ISADEP_H */
diff --git a/arch/mips/include/asm/jazz.h b/arch/mips/include/asm/jazz.h
new file mode 100644
index 000000000..a61970d01
--- /dev/null
+++ b/arch/mips/include/asm/jazz.h
@@ -0,0 +1,310 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 - 1998 by Andreas Busse and Ralf Baechle
+ */
+#ifndef __ASM_JAZZ_H
+#define __ASM_JAZZ_H
+
+/*
+ * The addresses below are virtual address. The mappings are
+ * created on startup via wired entries in the tlb. The Mips
+ * Magnum R3000 and R4000 machines are similar in many aspects,
+ * but many hardware register are accessible at 0xb9000000 in
+ * instead of 0xe0000000.
+ */
+
+#define JAZZ_LOCAL_IO_SPACE 0xe0000000
+
+/*
+ * Revision numbers in PICA_ASIC_REVISION
+ *
+ * 0xf0000000 - Rev1
+ * 0xf0000001 - Rev2
+ * 0xf0000002 - Rev3
+ */
+#define PICA_ASIC_REVISION 0xe0000008
+
+/*
+ * The segments of the seven segment LED are mapped
+ * to the control bits as follows:
+ *
+ * (7)
+ * ---------
+ * | |
+ * (2) | | (6)
+ * | (1) |
+ * ---------
+ * | |
+ * (3) | | (5)
+ * | (4) |
+ * --------- . (0)
+ */
+#define PICA_LED 0xe000f000
+
+/*
+ * Some characters for the LED control registers
+ * The original Mips machines seem to have a LED display
+ * with integrated decoder while the Acer machines can
+ * control each of the seven segments and the dot independently.
+ * It's only a toy, anyway...
+ */
+#define LED_DOT 0x01
+#define LED_SPACE 0x00
+#define LED_0 0xfc
+#define LED_1 0x60
+#define LED_2 0xda
+#define LED_3 0xf2
+#define LED_4 0x66
+#define LED_5 0xb6
+#define LED_6 0xbe
+#define LED_7 0xe0
+#define LED_8 0xfe
+#define LED_9 0xf6
+#define LED_A 0xee
+#define LED_b 0x3e
+#define LED_C 0x9c
+#define LED_d 0x7a
+#define LED_E 0x9e
+#define LED_F 0x8e
+
+#ifndef __ASSEMBLY__
+
+static __inline__ void pica_set_led(unsigned int bits)
+{
+ volatile unsigned int *led_register = (unsigned int *) PICA_LED;
+
+ *led_register = bits;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Base address of the Sonic Ethernet adapter in Jazz machines.
+ */
+#define JAZZ_ETHERNET_BASE 0xe0001000
+
+/*
+ * Base address of the 53C94 SCSI hostadapter in Jazz machines.
+ */
+#define JAZZ_SCSI_BASE 0xe0002000
+
+/*
+ * i8042 keyboard controller for JAZZ and PICA chipsets.
+ * This address is just a guess and seems to differ from
+ * other mips machines such as RC3xxx...
+ */
+#define JAZZ_KEYBOARD_ADDRESS 0xe0005000
+#define JAZZ_KEYBOARD_DATA 0xe0005000
+#define JAZZ_KEYBOARD_COMMAND 0xe0005001
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+ unsigned char data;
+ unsigned char command;
+} jazz_keyboard_hardware;
+
+#define jazz_kh ((keyboard_hardware *) JAZZ_KEYBOARD_ADDRESS)
+
+typedef struct {
+ unsigned char pad0[3];
+ unsigned char data;
+ unsigned char pad1[3];
+ unsigned char command;
+} mips_keyboard_hardware;
+
+/*
+ * For now. Needs to be changed for RC3xxx support. See below.
+ */
+#define keyboard_hardware jazz_keyboard_hardware
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * i8042 keyboard controller for most other Mips machines.
+ */
+#define MIPS_KEYBOARD_ADDRESS 0xb9005000
+#define MIPS_KEYBOARD_DATA 0xb9005003
+#define MIPS_KEYBOARD_COMMAND 0xb9005007
+
+/*
+ * Serial and parallel ports (WD 16C552) on the Mips JAZZ
+ */
+#define JAZZ_SERIAL1_BASE (unsigned int)0xe0006000
+#define JAZZ_SERIAL2_BASE (unsigned int)0xe0007000
+#define JAZZ_PARALLEL_BASE (unsigned int)0xe0008000
+
+/*
+ * Dummy Device Address. Used in jazzdma.c
+ */
+#define JAZZ_DUMMY_DEVICE 0xe000d000
+
+/*
+ * JAZZ timer registers and interrupt no.
+ * Note that the hardware timer interrupt is actually on
+ * cpu level 6, but to keep compatibility with PC stuff
+ * it is remapped to vector 0. See arch/mips/kernel/entry.S.
+ */
+#define JAZZ_TIMER_INTERVAL 0xe0000228
+#define JAZZ_TIMER_REGISTER 0xe0000230
+
+/*
+ * DRAM configuration register
+ */
+#ifndef __ASSEMBLY__
+#ifdef __MIPSEL__
+typedef struct {
+ unsigned int bank2 : 3;
+ unsigned int bank1 : 3;
+ unsigned int mem_bus_width : 1;
+ unsigned int reserved2 : 1;
+ unsigned int page_mode : 1;
+ unsigned int reserved1 : 23;
+} dram_configuration;
+#else /* defined (__MIPSEB__) */
+typedef struct {
+ unsigned int reserved1 : 23;
+ unsigned int page_mode : 1;
+ unsigned int reserved2 : 1;
+ unsigned int mem_bus_width : 1;
+ unsigned int bank1 : 3;
+ unsigned int bank2 : 3;
+} dram_configuration;
+#endif
+#endif /* !__ASSEMBLY__ */
+
+#define PICA_DRAM_CONFIG 0xe00fffe0
+
+/*
+ * JAZZ interrupt control registers
+ */
+#define JAZZ_IO_IRQ_SOURCE 0xe0010000
+#define JAZZ_IO_IRQ_ENABLE 0xe0010002
+
+/*
+ * JAZZ Interrupt Level definitions
+ *
+ * This is somewhat broken. For reasons which nobody can remember anymore
+ * we remap the Jazz interrupts to the usual ISA style interrupt numbers.
+ */
+#define JAZZ_IRQ_START 24
+#define JAZZ_IRQ_END (24 + 9)
+#define JAZZ_PARALLEL_IRQ (JAZZ_IRQ_START + 0)
+#define JAZZ_FLOPPY_IRQ (JAZZ_IRQ_START + 1)
+#define JAZZ_SOUND_IRQ (JAZZ_IRQ_START + 2)
+#define JAZZ_VIDEO_IRQ (JAZZ_IRQ_START + 3)
+#define JAZZ_ETHERNET_IRQ (JAZZ_IRQ_START + 4)
+#define JAZZ_SCSI_IRQ (JAZZ_IRQ_START + 5)
+#define JAZZ_KEYBOARD_IRQ (JAZZ_IRQ_START + 6)
+#define JAZZ_MOUSE_IRQ (JAZZ_IRQ_START + 7)
+#define JAZZ_SERIAL1_IRQ (JAZZ_IRQ_START + 8)
+#define JAZZ_SERIAL2_IRQ (JAZZ_IRQ_START + 9)
+
+#define JAZZ_TIMER_IRQ (MIPS_CPU_IRQ_BASE+6)
+
+
+/*
+ * JAZZ DMA Channels
+ * Note: Channels 4...7 are not used with respect to the Acer PICA-61
+ * chipset which does not provide these DMA channels.
+ */
+#define JAZZ_SCSI_DMA 0 /* SCSI */
+#define JAZZ_FLOPPY_DMA 1 /* FLOPPY */
+#define JAZZ_AUDIOL_DMA 2 /* AUDIO L */
+#define JAZZ_AUDIOR_DMA 3 /* AUDIO R */
+
+/*
+ * JAZZ R4030 MCT_ADR chip (DMA controller)
+ * Note: Virtual Addresses !
+ */
+#define JAZZ_R4030_CONFIG 0xE0000000 /* R4030 config register */
+#define JAZZ_R4030_REVISION 0xE0000008 /* same as PICA_ASIC_REVISION */
+#define JAZZ_R4030_INV_ADDR 0xE0000010 /* Invalid Address register */
+
+#define JAZZ_R4030_TRSTBL_BASE 0xE0000018 /* Translation Table Base */
+#define JAZZ_R4030_TRSTBL_LIM 0xE0000020 /* Translation Table Limit */
+#define JAZZ_R4030_TRSTBL_INV 0xE0000028 /* Translation Table Invalidate */
+
+#define JAZZ_R4030_CACHE_MTNC 0xE0000030 /* Cache Maintenance */
+#define JAZZ_R4030_R_FAIL_ADDR 0xE0000038 /* Remote Failed Address */
+#define JAZZ_R4030_M_FAIL_ADDR 0xE0000040 /* Memory Failed Address */
+
+#define JAZZ_R4030_CACHE_PTAG 0xE0000048 /* I/O Cache Physical Tag */
+#define JAZZ_R4030_CACHE_LTAG 0xE0000050 /* I/O Cache Logical Tag */
+#define JAZZ_R4030_CACHE_BMASK 0xE0000058 /* I/O Cache Byte Mask */
+#define JAZZ_R4030_CACHE_BWIN 0xE0000060 /* I/O Cache Buffer Window */
+
+/*
+ * Remote Speed Registers.
+ *
+ * 0: free, 1: Ethernet, 2: SCSI, 3: Floppy,
+ * 4: RTC, 5: Kb./Mouse 6: serial 1, 7: serial 2,
+ * 8: parallel, 9: NVRAM, 10: CPU, 11: PROM,
+ * 12: reserved, 13: free, 14: 7seg LED, 15: ???
+ */
+#define JAZZ_R4030_REM_SPEED 0xE0000070 /* 16 Remote Speed Registers */
+ /* 0xE0000070,78,80... 0xE00000E8 */
+#define JAZZ_R4030_IRQ_ENABLE 0xE00000E8 /* Internal Interrupt Enable */
+#define JAZZ_R4030_INVAL_ADDR 0xE0000010 /* Invalid address Register */
+#define JAZZ_R4030_IRQ_SOURCE 0xE0000200 /* Interrupt Source Register */
+#define JAZZ_R4030_I386_ERROR 0xE0000208 /* i386/EISA Bus Error */
+
+/*
+ * Virtual (E)ISA controller address
+ */
+#define JAZZ_EISA_IRQ_ACK 0xE0000238 /* EISA interrupt acknowledge */
+
+/*
+ * Access the R4030 DMA and I/O Controller
+ */
+#ifndef __ASSEMBLY__
+
+static inline void r4030_delay(void)
+{
+__asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ ".set\treorder");
+}
+
+static inline unsigned short r4030_read_reg16(unsigned long addr)
+{
+ unsigned short ret = *((volatile unsigned short *)addr);
+ r4030_delay();
+ return ret;
+}
+
+static inline unsigned int r4030_read_reg32(unsigned long addr)
+{
+ unsigned int ret = *((volatile unsigned int *)addr);
+ r4030_delay();
+ return ret;
+}
+
+static inline void r4030_write_reg16(unsigned long addr, unsigned val)
+{
+ *((volatile unsigned short *)addr) = val;
+ r4030_delay();
+}
+
+static inline void r4030_write_reg32(unsigned long addr, unsigned val)
+{
+ *((volatile unsigned int *)addr) = val;
+ r4030_delay();
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define JAZZ_FDC_BASE 0xe0003000
+#define JAZZ_RTC_BASE 0xe0004000
+#define JAZZ_PORT_BASE 0xe2000000
+
+#define JAZZ_EISA_BASE 0xe3000000
+
+#endif /* __ASM_JAZZ_H */
diff --git a/arch/mips/include/asm/jazzdma.h b/arch/mips/include/asm/jazzdma.h
new file mode 100644
index 000000000..c831da7fa
--- /dev/null
+++ b/arch/mips/include/asm/jazzdma.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Helpfile for jazzdma.c -- Mips Jazz R4030 DMA controller support
+ */
+#ifndef _ASM_JAZZDMA_H
+#define _ASM_JAZZDMA_H
+
+/*
+ * Prototypes and macros
+ */
+extern unsigned long vdma_alloc(unsigned long paddr, unsigned long size);
+extern int vdma_free(unsigned long laddr);
+extern unsigned long vdma_phys2log(unsigned long paddr);
+extern unsigned long vdma_log2phys(unsigned long laddr);
+extern void vdma_stats(void); /* for debugging only */
+
+extern void vdma_enable(int channel);
+extern void vdma_disable(int channel);
+extern void vdma_set_mode(int channel, int mode);
+extern void vdma_set_addr(int channel, long addr);
+extern void vdma_set_count(int channel, int count);
+extern int vdma_get_residue(int channel);
+extern int vdma_get_enable(int channel);
+
+/*
+ * some definitions used by the driver functions
+ */
+#define VDMA_PAGESIZE 4096
+#define VDMA_PGTBL_ENTRIES 4096
+#define VDMA_PGTBL_SIZE (sizeof(VDMA_PGTBL_ENTRY) * VDMA_PGTBL_ENTRIES)
+#define VDMA_PAGE_EMPTY 0xff000000
+
+/*
+ * Macros to get page no. and offset of a given address
+ * Note that VDMA_PAGE() works for physical addresses only
+ */
+#define VDMA_PAGE(a) ((unsigned int)(a) >> 12)
+#define VDMA_OFFSET(a) ((unsigned int)(a) & (VDMA_PAGESIZE-1))
+
+/*
+ * VDMA pagetable entry description
+ */
+typedef volatile struct VDMA_PGTBL_ENTRY {
+ unsigned int frame; /* physical frame no. */
+ unsigned int owner; /* owner of this entry (0=free) */
+} VDMA_PGTBL_ENTRY;
+
+
+/*
+ * DMA channel control registers
+ * in the R4030 MCT_ADR chip
+ */
+#define JAZZ_R4030_CHNL_MODE 0xE0000100 /* 8 DMA Channel Mode Registers, */
+ /* 0xE0000100,120,140... */
+#define JAZZ_R4030_CHNL_ENABLE 0xE0000108 /* 8 DMA Channel Enable Regs, */
+ /* 0xE0000108,128,148... */
+#define JAZZ_R4030_CHNL_COUNT 0xE0000110 /* 8 DMA Channel Byte Cnt Regs, */
+ /* 0xE0000110,130,150... */
+#define JAZZ_R4030_CHNL_ADDR 0xE0000118 /* 8 DMA Channel Address Regs, */
+ /* 0xE0000118,138,158... */
+
+/* channel enable register bits */
+
+#define R4030_CHNL_ENABLE (1<<0)
+#define R4030_CHNL_WRITE (1<<1)
+#define R4030_TC_INTR (1<<8)
+#define R4030_MEM_INTR (1<<9)
+#define R4030_ADDR_INTR (1<<10)
+
+/*
+ * Channel mode register bits
+ */
+#define R4030_MODE_ATIME_40 (0) /* device access time on remote bus */
+#define R4030_MODE_ATIME_80 (1)
+#define R4030_MODE_ATIME_120 (2)
+#define R4030_MODE_ATIME_160 (3)
+#define R4030_MODE_ATIME_200 (4)
+#define R4030_MODE_ATIME_240 (5)
+#define R4030_MODE_ATIME_280 (6)
+#define R4030_MODE_ATIME_320 (7)
+#define R4030_MODE_WIDTH_8 (1<<3) /* device data bus width */
+#define R4030_MODE_WIDTH_16 (2<<3)
+#define R4030_MODE_WIDTH_32 (3<<3)
+#define R4030_MODE_INTR_EN (1<<5)
+#define R4030_MODE_BURST (1<<6) /* Rev. 2 only */
+#define R4030_MODE_FAST_ACK (1<<7) /* Rev. 2 only */
+
+#endif /* _ASM_JAZZDMA_H */
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
new file mode 100644
index 000000000..3185fd322
--- /dev/null
+++ b/arch/mips/include/asm/jump_label.h
@@ -0,0 +1,75 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2010 Cavium Networks, Inc.
+ */
+#ifndef _ASM_MIPS_JUMP_LABEL_H
+#define _ASM_MIPS_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/isa-rev.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+#ifdef CONFIG_64BIT
+#define WORD_INSN ".dword"
+#else
+#define WORD_INSN ".word"
+#endif
+
+#ifdef CONFIG_CPU_MICROMIPS
+# define B_INSN "b32"
+# define J_INSN "j32"
+#elif MIPS_ISA_REV >= 6
+# define B_INSN "bc"
+# define J_INSN "bc"
+#else
+# define B_INSN "b"
+# define J_INSN "j"
+#endif
+
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
+ "2:\t.insn\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ WORD_INSN " 1b, %l[l_yes], %0\n\t"
+ ".popsection\n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:\t" J_INSN " %l[l_yes]\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ WORD_INSN " 1b, %l[l_yes], %0\n\t"
+ ".popsection\n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+#ifdef CONFIG_64BIT
+typedef u64 jump_label_t;
+#else
+typedef u32 jump_label_t;
+#endif
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_MIPS_JUMP_LABEL_H */
diff --git a/arch/mips/include/asm/kdebug.h b/arch/mips/include/asm/kdebug.h
new file mode 100644
index 000000000..a55a207cf
--- /dev/null
+++ b/arch/mips/include/asm/kdebug.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_MIPS_KDEBUG_H
+#define _ASM_MIPS_KDEBUG_H
+
+#include <linux/notifier.h>
+
+enum die_val {
+ DIE_OOPS = 1,
+ DIE_FP,
+ DIE_TRAP,
+ DIE_RI,
+ DIE_PAGE_FAULT,
+ DIE_BREAK,
+ DIE_SSTEPBP,
+ DIE_MSAFP,
+ DIE_UPROBE,
+ DIE_UPROBE_XOL,
+};
+
+#endif /* _ASM_MIPS_KDEBUG_H */
diff --git a/arch/mips/include/asm/kexec.h b/arch/mips/include/asm/kexec.h
new file mode 100644
index 000000000..d6d5fa5cc
--- /dev/null
+++ b/arch/mips/include/asm/kexec.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * kexec.h for kexec
+ * Created by <nschichan@corp.free.fr> on Thu Oct 12 14:59:34 2006
+ */
+
+#ifndef _MIPS_KEXEC
+# define _MIPS_KEXEC
+
+#include <asm/stacktrace.h>
+
+/* Maximum physical address we can use pages from */
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+/* Maximum address we can reach in physical address mode */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+ /* Maximum address we can use for the control code buffer */
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
+/* Reserve 3*4096 bytes for board-specific info */
+#define KEXEC_CONTROL_PAGE_SIZE (4096 + 3*4096)
+
+/* The native architecture */
+#define KEXEC_ARCH KEXEC_ARCH_MIPS
+#define MAX_NOTE_BYTES 1024
+
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
+{
+ if (oldregs)
+ memcpy(newregs, oldregs, sizeof(*newregs));
+ else
+ prepare_frametrace(newregs);
+}
+
+#ifdef CONFIG_KEXEC
+struct kimage;
+extern unsigned long kexec_args[4];
+extern int (*_machine_kexec_prepare)(struct kimage *);
+extern void (*_machine_kexec_shutdown)(void);
+extern void (*_machine_crash_shutdown)(struct pt_regs *regs);
+void default_machine_crash_shutdown(struct pt_regs *regs);
+void kexec_nonboot_cpu_jump(void);
+void kexec_reboot(void);
+#ifdef CONFIG_SMP
+extern const unsigned char kexec_smp_wait[];
+extern unsigned long secondary_kexec_args[4];
+extern atomic_t kexec_ready_to_reboot;
+extern void (*_crash_smp_send_stop)(void);
+#endif
+#endif
+
+#endif /* !_MIPS_KEXEC */
diff --git a/arch/mips/include/asm/kgdb.h b/arch/mips/include/asm/kgdb.h
new file mode 100644
index 000000000..4f2302267
--- /dev/null
+++ b/arch/mips/include/asm/kgdb.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KGDB_H_
+#define __ASM_KGDB_H_
+
+#ifdef __KERNEL__
+
+#include <asm/sgidefs.h>
+
+#if (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) || \
+ (_MIPS_ISA == _MIPS_ISA_MIPS32)
+
+#define KGDB_GDB_REG_SIZE 32
+#define GDB_SIZEOF_REG sizeof(u32)
+
+#elif (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4) || \
+ (_MIPS_ISA == _MIPS_ISA_MIPS64)
+
+#ifdef CONFIG_32BIT
+#define KGDB_GDB_REG_SIZE 32
+#define GDB_SIZEOF_REG sizeof(u32)
+#else /* CONFIG_CPU_32BIT */
+#define KGDB_GDB_REG_SIZE 64
+#define GDB_SIZEOF_REG sizeof(u64)
+#endif
+#else
+#error "Need to set KGDB_GDB_REG_SIZE for MIPS ISA"
+#endif /* _MIPS_ISA */
+
+#define BUFMAX 2048
+#define DBG_MAX_REG_NUM 72
+#define NUMREGBYTES (DBG_MAX_REG_NUM * sizeof(GDB_SIZEOF_REG))
+#define NUMCRITREGBYTES (12 * sizeof(GDB_SIZEOF_REG))
+#define BREAK_INSTR_SIZE 4
+#define CACHE_FLUSH_IS_SAFE 0
+
+extern void arch_kgdb_breakpoint(void);
+extern void *saved_vectors[32];
+extern void handle_exception(struct pt_regs *regs);
+extern void breakinst(void);
+extern int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig);
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_KGDB_H_ */
diff --git a/arch/mips/include/asm/kmap_types.h b/arch/mips/include/asm/kmap_types.h
new file mode 100644
index 000000000..16665dc24
--- /dev/null
+++ b/arch/mips/include/asm/kmap_types.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_KMAP_TYPES_H
+#define _ASM_KMAP_TYPES_H
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+#define __WITH_KM_FENCE
+#endif
+
+#include <asm-generic/kmap_types.h>
+
+#undef __WITH_KM_FENCE
+
+#endif
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
new file mode 100644
index 000000000..68b1e5d45
--- /dev/null
+++ b/arch/mips/include/asm/kprobes.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Kernel Probes (KProbes)
+ * include/asm-mips/kprobes.h
+ *
+ * Copyright 2006 Sony Corp.
+ * Copyright 2010 Cavium Networks
+ */
+
+#ifndef _ASM_KPROBES_H
+#define _ASM_KPROBES_H
+
+#include <asm-generic/kprobes.h>
+
+#ifdef CONFIG_KPROBES
+#include <linux/ptrace.h>
+#include <linux/types.h>
+
+#include <asm/cacheflush.h>
+#include <asm/kdebug.h>
+#include <asm/inst.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+
+struct kprobe;
+struct pt_regs;
+
+typedef union mips_instruction kprobe_opcode_t;
+
+#define MAX_INSN_SIZE 2
+
+#define flush_insn_slot(p) \
+do { \
+ if (p->addr) \
+ flush_icache_range((unsigned long)p->addr, \
+ (unsigned long)p->addr + \
+ (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \
+} while (0)
+
+
+#define kretprobe_blacklist_size 0
+
+void arch_remove_kprobe(struct kprobe *p);
+int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+
+/* Architecture specific copy of original instruction*/
+struct arch_specific_insn {
+ /* copy of the original instruction */
+ kprobe_opcode_t *insn;
+};
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned long status;
+ unsigned long old_SR;
+ unsigned long saved_SR;
+ unsigned long saved_epc;
+};
+
+#define SKIP_DELAYSLOT 0x0001
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned long kprobe_status;
+ unsigned long kprobe_old_SR;
+ unsigned long kprobe_saved_SR;
+ unsigned long kprobe_saved_epc;
+ /* Per-thread fields, used while emulating branches */
+ unsigned long flags;
+ unsigned long target_epc;
+ struct prev_kprobe prev_kprobe;
+};
+
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+
+#endif /* CONFIG_KPROBES */
+#endif /* _ASM_KPROBES_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
new file mode 100644
index 000000000..24f3d0f99
--- /dev/null
+++ b/arch/mips/include/asm/kvm_host.h
@@ -0,0 +1,1158 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __MIPS_KVM_HOST_H__
+#define __MIPS_KVM_HOST_H__
+
+#include <linux/cpumask.h>
+#include <linux/mutex.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/kvm.h>
+#include <linux/kvm_types.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
+
+#include <asm/inst.h>
+#include <asm/mipsregs.h>
+
+#include <kvm/iodev.h>
+
+/* MIPS KVM register ids */
+#define MIPS_CP0_32(_R, _S) \
+ (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
+
+#define MIPS_CP0_64(_R, _S) \
+ (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
+
+#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
+#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
+#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
+#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
+#define KVM_REG_MIPS_CP0_CONTEXTCONFIG MIPS_CP0_32(4, 1)
+#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
+#define KVM_REG_MIPS_CP0_XCONTEXTCONFIG MIPS_CP0_64(4, 3)
+#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
+#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
+#define KVM_REG_MIPS_CP0_SEGCTL0 MIPS_CP0_64(5, 2)
+#define KVM_REG_MIPS_CP0_SEGCTL1 MIPS_CP0_64(5, 3)
+#define KVM_REG_MIPS_CP0_SEGCTL2 MIPS_CP0_64(5, 4)
+#define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
+#define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
+#define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
+#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
+#define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
+#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
+#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
+#define KVM_REG_MIPS_CP0_BADINSTR MIPS_CP0_32(8, 1)
+#define KVM_REG_MIPS_CP0_BADINSTRP MIPS_CP0_32(8, 2)
+#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
+#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
+#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
+#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
+#define KVM_REG_MIPS_CP0_INTCTL MIPS_CP0_32(12, 1)
+#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
+#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
+#define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
+#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
+#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
+#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
+#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
+#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
+#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
+#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
+#define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6)
+#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
+#define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2)
+#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
+#define KVM_REG_MIPS_CP0_DIAG MIPS_CP0_32(22, 0)
+#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
+#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
+#define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
+#define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
+#define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
+#define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
+#define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
+
+
+#define KVM_MAX_VCPUS 16
+#define KVM_USER_MEM_SLOTS 16
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 0
+
+#define KVM_HALT_POLL_NS_DEFAULT 500000
+
+#ifdef CONFIG_KVM_MIPS_VZ
+extern unsigned long GUESTID_MASK;
+extern unsigned long GUESTID_FIRST_VERSION;
+extern unsigned long GUESTID_VERSION_MASK;
+#endif
+
+
+/*
+ * Special address that contains the comm page, used for reducing # of traps
+ * This needs to be within 32Kb of 0x0 (so the zero register can be used), but
+ * preferably not at 0x0 so that most kernel NULL pointer dereferences can be
+ * caught.
+ */
+#define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \
+ (0x8000 - PAGE_SIZE))
+
+#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
+ ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
+
+#define KVM_GUEST_KUSEG 0x00000000UL
+#define KVM_GUEST_KSEG0 0x40000000UL
+#define KVM_GUEST_KSEG1 0x40000000UL
+#define KVM_GUEST_KSEG23 0x60000000UL
+#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000)
+#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
+
+#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
+#define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
+#define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
+
+/*
+ * Map an address to a certain kernel segment
+ */
+#define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
+#define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
+#define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
+
+#define KVM_INVALID_PAGE 0xdeadbeef
+#define KVM_INVALID_ADDR 0xdeadbeef
+
+/*
+ * EVA has overlapping user & kernel address spaces, so user VAs may be >
+ * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
+ * PAGE_OFFSET.
+ */
+
+#define KVM_HVA_ERR_BAD (-1UL)
+#define KVM_HVA_ERR_RO_BAD (-2UL)
+
+static inline bool kvm_is_error_hva(unsigned long addr)
+{
+ return IS_ERR_VALUE(addr);
+}
+
+struct kvm_vm_stat {
+ ulong remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+ u64 wait_exits;
+ u64 cache_exits;
+ u64 signal_exits;
+ u64 int_exits;
+ u64 cop_unusable_exits;
+ u64 tlbmod_exits;
+ u64 tlbmiss_ld_exits;
+ u64 tlbmiss_st_exits;
+ u64 addrerr_st_exits;
+ u64 addrerr_ld_exits;
+ u64 syscall_exits;
+ u64 resvd_inst_exits;
+ u64 break_inst_exits;
+ u64 trap_inst_exits;
+ u64 msa_fpe_exits;
+ u64 fpe_exits;
+ u64 msa_disabled_exits;
+ u64 flush_dcache_exits;
+#ifdef CONFIG_KVM_MIPS_VZ
+ u64 vz_gpsi_exits;
+ u64 vz_gsfc_exits;
+ u64 vz_hc_exits;
+ u64 vz_grr_exits;
+ u64 vz_gva_exits;
+ u64 vz_ghfc_exits;
+ u64 vz_gpa_exits;
+ u64 vz_resvd_exits;
+#ifdef CONFIG_CPU_LOONGSON64
+ u64 vz_cpucfg_exits;
+#endif
+#endif
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+ u64 halt_poll_success_ns;
+ u64 halt_poll_fail_ns;
+ u64 halt_poll_invalid;
+ u64 halt_wakeup;
+};
+
+struct kvm_arch_memory_slot {
+};
+
+#ifdef CONFIG_CPU_LOONGSON64
+struct ipi_state {
+ uint32_t status;
+ uint32_t en;
+ uint32_t set;
+ uint32_t clear;
+ uint64_t buf[4];
+};
+
+struct loongson_kvm_ipi;
+
+struct ipi_io_device {
+ int node_id;
+ struct loongson_kvm_ipi *ipi;
+ struct kvm_io_device device;
+};
+
+struct loongson_kvm_ipi {
+ spinlock_t lock;
+ struct kvm *kvm;
+ struct ipi_state ipistate[16];
+ struct ipi_io_device dev_ipi[4];
+};
+#endif
+
+struct kvm_arch {
+ /* Guest physical mm */
+ struct mm_struct gpa_mm;
+ /* Mask of CPUs needing GPA ASID flush */
+ cpumask_t asid_flush_mask;
+#ifdef CONFIG_CPU_LOONGSON64
+ struct loongson_kvm_ipi ipi;
+#endif
+};
+
+#define N_MIPS_COPROC_REGS 32
+#define N_MIPS_COPROC_SEL 8
+
+struct mips_coproc {
+ unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+ unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+#endif
+};
+
+/*
+ * Coprocessor 0 register names
+ */
+#define MIPS_CP0_TLB_INDEX 0
+#define MIPS_CP0_TLB_RANDOM 1
+#define MIPS_CP0_TLB_LOW 2
+#define MIPS_CP0_TLB_LO0 2
+#define MIPS_CP0_TLB_LO1 3
+#define MIPS_CP0_TLB_CONTEXT 4
+#define MIPS_CP0_TLB_PG_MASK 5
+#define MIPS_CP0_TLB_WIRED 6
+#define MIPS_CP0_HWRENA 7
+#define MIPS_CP0_BAD_VADDR 8
+#define MIPS_CP0_COUNT 9
+#define MIPS_CP0_TLB_HI 10
+#define MIPS_CP0_COMPARE 11
+#define MIPS_CP0_STATUS 12
+#define MIPS_CP0_CAUSE 13
+#define MIPS_CP0_EXC_PC 14
+#define MIPS_CP0_PRID 15
+#define MIPS_CP0_CONFIG 16
+#define MIPS_CP0_LLADDR 17
+#define MIPS_CP0_WATCH_LO 18
+#define MIPS_CP0_WATCH_HI 19
+#define MIPS_CP0_TLB_XCONTEXT 20
+#define MIPS_CP0_DIAG 22
+#define MIPS_CP0_ECC 26
+#define MIPS_CP0_CACHE_ERR 27
+#define MIPS_CP0_TAG_LO 28
+#define MIPS_CP0_TAG_HI 29
+#define MIPS_CP0_ERROR_PC 30
+#define MIPS_CP0_DEBUG 23
+#define MIPS_CP0_DEPC 24
+#define MIPS_CP0_PERFCNT 25
+#define MIPS_CP0_ERRCTL 26
+#define MIPS_CP0_DATA_LO 28
+#define MIPS_CP0_DATA_HI 29
+#define MIPS_CP0_DESAVE 31
+
+#define MIPS_CP0_CONFIG_SEL 0
+#define MIPS_CP0_CONFIG1_SEL 1
+#define MIPS_CP0_CONFIG2_SEL 2
+#define MIPS_CP0_CONFIG3_SEL 3
+#define MIPS_CP0_CONFIG4_SEL 4
+#define MIPS_CP0_CONFIG5_SEL 5
+
+#define MIPS_CP0_GUESTCTL2 10
+#define MIPS_CP0_GUESTCTL2_SEL 5
+#define MIPS_CP0_GTOFFSET 12
+#define MIPS_CP0_GTOFFSET_SEL 7
+
+/* Resume Flags */
+#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
+#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
+
+#define RESUME_GUEST 0
+#define RESUME_GUEST_DR RESUME_FLAG_DR
+#define RESUME_HOST RESUME_FLAG_HOST
+
+enum emulation_result {
+ EMULATE_DONE, /* no further processing */
+ EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
+ EMULATE_FAIL, /* can't emulate this instruction */
+ EMULATE_WAIT, /* WAIT instruction */
+ EMULATE_PRIV_FAIL,
+ EMULATE_EXCEPT, /* A guest exception has been generated */
+ EMULATE_HYPERCALL, /* HYPCALL instruction */
+};
+
+#define mips3_paddr_to_tlbpfn(x) \
+ (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
+#define mips3_tlbpfn_to_paddr(x) \
+ ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
+
+#define MIPS3_PG_SHIFT 6
+#define MIPS3_PG_FRAME 0x3fffffc0
+
+#if defined(CONFIG_64BIT)
+#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13)
+#else
+#define VPN2_MASK 0xffffe000
+#endif
+#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data)
+#define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
+#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
+#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
+#define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1)
+#define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
+#define TLB_IS_DIRTY(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D)
+#define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
+ ((y) & VPN2_MASK & ~(x).tlb_mask))
+#define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
+ TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
+
+struct kvm_mips_tlb {
+ long tlb_mask;
+ long tlb_hi;
+ long tlb_lo[2];
+};
+
+#define KVM_MIPS_AUX_FPU 0x1
+#define KVM_MIPS_AUX_MSA 0x2
+
+#define KVM_MIPS_GUEST_TLB_SIZE 64
+struct kvm_vcpu_arch {
+ void *guest_ebase;
+ int (*vcpu_run)(struct kvm_vcpu *vcpu);
+
+ /* Host registers preserved across guest mode execution */
+ unsigned long host_stack;
+ unsigned long host_gp;
+ unsigned long host_pgd;
+ unsigned long host_entryhi;
+
+ /* Host CP0 registers used when handling exits from guest */
+ unsigned long host_cp0_badvaddr;
+ unsigned long host_cp0_epc;
+ u32 host_cp0_cause;
+ u32 host_cp0_guestctl0;
+ u32 host_cp0_badinstr;
+ u32 host_cp0_badinstrp;
+
+ /* GPRS */
+ unsigned long gprs[32];
+ unsigned long hi;
+ unsigned long lo;
+ unsigned long pc;
+
+ /* FPU State */
+ struct mips_fpu_struct fpu;
+ /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
+ unsigned int aux_inuse;
+
+ /* COP0 State */
+ struct mips_coproc *cop0;
+
+ /* Host KSEG0 address of the EI/DI offset */
+ void *kseg0_commpage;
+
+ /* Resume PC after MMIO completion */
+ unsigned long io_pc;
+ /* GPR used as IO source/target */
+ u32 io_gpr;
+
+ struct hrtimer comparecount_timer;
+ /* Count timer control KVM register */
+ u32 count_ctl;
+ /* Count bias from the raw time */
+ u32 count_bias;
+ /* Frequency of timer in Hz */
+ u32 count_hz;
+ /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
+ s64 count_dyn_bias;
+ /* Resume time */
+ ktime_t count_resume;
+ /* Period of timer tick in ns */
+ u64 count_period;
+
+ /* Bitmask of exceptions that are pending */
+ unsigned long pending_exceptions;
+
+ /* Bitmask of pending exceptions to be cleared */
+ unsigned long pending_exceptions_clr;
+
+ /* S/W Based TLB for guest */
+ struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
+
+ /* Guest kernel/user [partial] mm */
+ struct mm_struct guest_kernel_mm, guest_user_mm;
+
+ /* Guest ASID of last user mode execution */
+ unsigned int last_user_gasid;
+
+ /* Cache some mmu pages needed inside spinlock regions */
+ struct kvm_mmu_memory_cache mmu_page_cache;
+
+#ifdef CONFIG_KVM_MIPS_VZ
+ /* vcpu's vzguestid is different on each host cpu in an smp system */
+ u32 vzguestid[NR_CPUS];
+
+ /* wired guest TLB entries */
+ struct kvm_mips_tlb *wired_tlb;
+ unsigned int wired_tlb_limit;
+ unsigned int wired_tlb_used;
+
+ /* emulated guest MAAR registers */
+ unsigned long maar[6];
+#endif
+
+ /* Last CPU the VCPU state was loaded on */
+ int last_sched_cpu;
+ /* Last CPU the VCPU actually executed guest code on */
+ int last_exec_cpu;
+
+ /* WAIT executed */
+ int wait;
+
+ u8 fpu_enabled;
+ u8 msa_enabled;
+};
+
+static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
+ unsigned long val)
+{
+ unsigned long temp;
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
+ " " __LL "%0, %1 \n"
+ " or %0, %2 \n"
+ " " __SC "%0, %1 \n"
+ " .set pop \n"
+ : "=&r" (temp), "+m" (*reg)
+ : "r" (val));
+ } while (unlikely(!temp));
+}
+
+static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
+ unsigned long val)
+{
+ unsigned long temp;
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
+ " " __LL "%0, %1 \n"
+ " and %0, %2 \n"
+ " " __SC "%0, %1 \n"
+ " .set pop \n"
+ : "=&r" (temp), "+m" (*reg)
+ : "r" (~val));
+ } while (unlikely(!temp));
+}
+
+static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
+ unsigned long change,
+ unsigned long val)
+{
+ unsigned long temp;
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
+ " " __LL "%0, %1 \n"
+ " and %0, %2 \n"
+ " or %0, %3 \n"
+ " " __SC "%0, %1 \n"
+ " .set pop \n"
+ : "=&r" (temp), "+m" (*reg)
+ : "r" (~change), "r" (val & change));
+ } while (unlikely(!temp));
+}
+
+/* Guest register types, used in accessor build below */
+#define __KVMT32 u32
+#define __KVMTl unsigned long
+
+/*
+ * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
+ * These operate on the saved guest C0 state in RAM.
+ */
+
+/* Generate saved context simple accessors */
+#define __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
+static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
+{ \
+ return cop0->reg[(_reg)][(sel)]; \
+} \
+static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ cop0->reg[(_reg)][(sel)] = val; \
+}
+
+/* Generate saved context bitwise modifiers */
+#define __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
+static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ cop0->reg[(_reg)][(sel)] |= val; \
+} \
+static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ cop0->reg[(_reg)][(sel)] &= ~val; \
+} \
+static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type mask, \
+ __KVMT##type val) \
+{ \
+ unsigned long _mask = mask; \
+ cop0->reg[(_reg)][(sel)] &= ~_mask; \
+ cop0->reg[(_reg)][(sel)] |= val & _mask; \
+}
+
+/* Generate saved context atomic bitwise modifiers */
+#define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
+static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ _kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
+} \
+static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ _kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
+} \
+static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type mask, \
+ __KVMT##type val) \
+{ \
+ _kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
+ val); \
+}
+
+/*
+ * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
+ * These operate on the VZ guest C0 context in hardware.
+ */
+
+/* Generate VZ guest context simple accessors */
+#define __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
+static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
+{ \
+ return read_gc0_##name(); \
+} \
+static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ write_gc0_##name(val); \
+}
+
+/* Generate VZ guest context bitwise modifiers */
+#define __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
+static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ set_gc0_##name(val); \
+} \
+static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ clear_gc0_##name(val); \
+} \
+static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type mask, \
+ __KVMT##type val) \
+{ \
+ change_gc0_##name(mask, val); \
+}
+
+/* Generate VZ guest context save/restore to/from saved context */
+#define __BUILD_KVM_SAVE_VZ(name, _reg, sel) \
+static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \
+{ \
+ write_gc0_##name(cop0->reg[(_reg)][(sel)]); \
+} \
+static inline void kvm_save_gc0_##name(struct mips_coproc *cop0) \
+{ \
+ cop0->reg[(_reg)][(sel)] = read_gc0_##name(); \
+}
+
+/*
+ * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
+ * These wrap a set of operations to provide them with a different name.
+ */
+
+/* Generate simple accessor wrapper */
+#define __BUILD_KVM_RW_WRAP(name1, name2, type) \
+static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0) \
+{ \
+ return kvm_read_##name2(cop0); \
+} \
+static inline void kvm_write_##name1(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ kvm_write_##name2(cop0, val); \
+}
+
+/* Generate bitwise modifier wrapper */
+#define __BUILD_KVM_SET_WRAP(name1, name2, type) \
+static inline void kvm_set_##name1(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ kvm_set_##name2(cop0, val); \
+} \
+static inline void kvm_clear_##name1(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ kvm_clear_##name2(cop0, val); \
+} \
+static inline void kvm_change_##name1(struct mips_coproc *cop0, \
+ __KVMT##type mask, \
+ __KVMT##type val) \
+{ \
+ kvm_change_##name2(cop0, mask, val); \
+}
+
+/*
+ * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
+ * These generate accessors operating on the saved context in RAM, and wrap them
+ * with the common guest C0 accessors (for use by common emulation code).
+ */
+
+#define __BUILD_KVM_RW_SW(name, type, _reg, sel) \
+ __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
+ __BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
+
+#define __BUILD_KVM_SET_SW(name, type, _reg, sel) \
+ __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
+ __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
+
+#define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel) \
+ __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
+ __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
+
+#ifndef CONFIG_KVM_MIPS_VZ
+
+/*
+ * T&E (trap & emulate software based virtualisation)
+ * We generate the common accessors operating exclusively on the saved context
+ * in RAM.
+ */
+
+#define __BUILD_KVM_RW_HW __BUILD_KVM_RW_SW
+#define __BUILD_KVM_SET_HW __BUILD_KVM_SET_SW
+#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_ATOMIC_SW
+
+#else
+
+/*
+ * VZ (hardware assisted virtualisation)
+ * These macros use the active guest state in VZ mode (hardware registers),
+ */
+
+/*
+ * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
+ * These generate accessors operating on the VZ guest context in hardware, and
+ * wrap them with the common guest C0 accessors (for use by common emulation
+ * code).
+ *
+ * Accessors operating on the saved context in RAM are also generated to allow
+ * convenient explicit saving and restoring of the state.
+ */
+
+#define __BUILD_KVM_RW_HW(name, type, _reg, sel) \
+ __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
+ __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
+ __BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type) \
+ __BUILD_KVM_SAVE_VZ(name, _reg, sel)
+
+#define __BUILD_KVM_SET_HW(name, type, _reg, sel) \
+ __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
+ __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
+ __BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
+
+/*
+ * We can't do atomic modifications of COP0 state if hardware can modify it.
+ * Races must be handled explicitly.
+ */
+#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW
+
+#endif
+
+/*
+ * Define accessors for CP0 registers that are accessible to the guest. These
+ * are primarily used by common emulation code, which may need to access the
+ * registers differently depending on the implementation.
+ *
+ * fns_hw/sw name type reg num select
+ */
+__BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0)
+__BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0)
+__BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0)
+__BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0)
+__BUILD_KVM_RW_HW(contextconfig, 32, MIPS_CP0_TLB_CONTEXT, 1)
+__BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2)
+__BUILD_KVM_RW_HW(xcontextconfig, l, MIPS_CP0_TLB_CONTEXT, 3)
+__BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0)
+__BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1)
+__BUILD_KVM_RW_HW(segctl0, l, MIPS_CP0_TLB_PG_MASK, 2)
+__BUILD_KVM_RW_HW(segctl1, l, MIPS_CP0_TLB_PG_MASK, 3)
+__BUILD_KVM_RW_HW(segctl2, l, MIPS_CP0_TLB_PG_MASK, 4)
+__BUILD_KVM_RW_HW(pwbase, l, MIPS_CP0_TLB_PG_MASK, 5)
+__BUILD_KVM_RW_HW(pwfield, l, MIPS_CP0_TLB_PG_MASK, 6)
+__BUILD_KVM_RW_HW(pwsize, l, MIPS_CP0_TLB_PG_MASK, 7)
+__BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0)
+__BUILD_KVM_RW_HW(pwctl, 32, MIPS_CP0_TLB_WIRED, 6)
+__BUILD_KVM_RW_HW(hwrena, 32, MIPS_CP0_HWRENA, 0)
+__BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0)
+__BUILD_KVM_RW_HW(badinstr, 32, MIPS_CP0_BAD_VADDR, 1)
+__BUILD_KVM_RW_HW(badinstrp, 32, MIPS_CP0_BAD_VADDR, 2)
+__BUILD_KVM_RW_SW(count, 32, MIPS_CP0_COUNT, 0)
+__BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0)
+__BUILD_KVM_RW_HW(compare, 32, MIPS_CP0_COMPARE, 0)
+__BUILD_KVM_RW_HW(status, 32, MIPS_CP0_STATUS, 0)
+__BUILD_KVM_RW_HW(intctl, 32, MIPS_CP0_STATUS, 1)
+__BUILD_KVM_RW_HW(cause, 32, MIPS_CP0_CAUSE, 0)
+__BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0)
+__BUILD_KVM_RW_SW(prid, 32, MIPS_CP0_PRID, 0)
+__BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1)
+__BUILD_KVM_RW_HW(config, 32, MIPS_CP0_CONFIG, 0)
+__BUILD_KVM_RW_HW(config1, 32, MIPS_CP0_CONFIG, 1)
+__BUILD_KVM_RW_HW(config2, 32, MIPS_CP0_CONFIG, 2)
+__BUILD_KVM_RW_HW(config3, 32, MIPS_CP0_CONFIG, 3)
+__BUILD_KVM_RW_HW(config4, 32, MIPS_CP0_CONFIG, 4)
+__BUILD_KVM_RW_HW(config5, 32, MIPS_CP0_CONFIG, 5)
+__BUILD_KVM_RW_HW(config6, 32, MIPS_CP0_CONFIG, 6)
+__BUILD_KVM_RW_HW(config7, 32, MIPS_CP0_CONFIG, 7)
+__BUILD_KVM_RW_SW(maari, l, MIPS_CP0_LLADDR, 2)
+__BUILD_KVM_RW_HW(xcontext, l, MIPS_CP0_TLB_XCONTEXT, 0)
+__BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0)
+__BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2)
+__BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3)
+__BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4)
+__BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5)
+__BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6)
+__BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7)
+
+/* Bitwise operations (on HW state) */
+__BUILD_KVM_SET_HW(status, 32, MIPS_CP0_STATUS, 0)
+/* Cause can be modified asynchronously from hardirq hrtimer callback */
+__BUILD_KVM_ATOMIC_HW(cause, 32, MIPS_CP0_CAUSE, 0)
+__BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1)
+
+/* Bitwise operations (on saved state) */
+__BUILD_KVM_SET_SAVED(config, 32, MIPS_CP0_CONFIG, 0)
+__BUILD_KVM_SET_SAVED(config1, 32, MIPS_CP0_CONFIG, 1)
+__BUILD_KVM_SET_SAVED(config2, 32, MIPS_CP0_CONFIG, 2)
+__BUILD_KVM_SET_SAVED(config3, 32, MIPS_CP0_CONFIG, 3)
+__BUILD_KVM_SET_SAVED(config4, 32, MIPS_CP0_CONFIG, 4)
+__BUILD_KVM_SET_SAVED(config5, 32, MIPS_CP0_CONFIG, 5)
+
+/* Helpers */
+
+static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
+{
+ return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
+ vcpu->fpu_enabled;
+}
+
+static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
+{
+ return kvm_mips_guest_can_have_fpu(vcpu) &&
+ kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
+}
+
+static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
+{
+ return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
+ vcpu->msa_enabled;
+}
+
+static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
+{
+ return kvm_mips_guest_can_have_msa(vcpu) &&
+ kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
+}
+
+struct kvm_mips_callbacks {
+ int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
+ int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
+ int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
+ int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
+ int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
+ int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
+ int (*handle_syscall)(struct kvm_vcpu *vcpu);
+ int (*handle_res_inst)(struct kvm_vcpu *vcpu);
+ int (*handle_break)(struct kvm_vcpu *vcpu);
+ int (*handle_trap)(struct kvm_vcpu *vcpu);
+ int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
+ int (*handle_fpe)(struct kvm_vcpu *vcpu);
+ int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
+ int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
+ int (*hardware_enable)(void);
+ void (*hardware_disable)(void);
+ int (*check_extension)(struct kvm *kvm, long ext);
+ int (*vcpu_init)(struct kvm_vcpu *vcpu);
+ void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
+ int (*vcpu_setup)(struct kvm_vcpu *vcpu);
+ void (*flush_shadow_all)(struct kvm *kvm);
+ /*
+ * Must take care of flushing any cached GPA PTEs (e.g. guest entries in
+ * VZ root TLB, or T&E GVA page tables and corresponding root TLB
+ * mappings).
+ */
+ void (*flush_shadow_memslot)(struct kvm *kvm,
+ const struct kvm_memory_slot *slot);
+ gpa_t (*gva_to_gpa)(gva_t gva);
+ void (*queue_timer_int)(struct kvm_vcpu *vcpu);
+ void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
+ void (*queue_io_int)(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq);
+ void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq);
+ int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
+ u32 cause);
+ int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
+ u32 cause);
+ unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
+ int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
+ int (*get_one_reg)(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg, s64 *v);
+ int (*set_one_reg)(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg, s64 v);
+ int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
+ int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
+ int (*vcpu_run)(struct kvm_vcpu *vcpu);
+ void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
+};
+extern struct kvm_mips_callbacks *kvm_mips_callbacks;
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
+
+/* Debug: dump vcpu state */
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
+
+/* Building of entry/exception code */
+int kvm_mips_entry_setup(void);
+void *kvm_mips_build_vcpu_run(void *addr);
+void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler);
+void *kvm_mips_build_exception(void *addr, void *handler);
+void *kvm_mips_build_exit(void *addr);
+
+/* FPU/MSA context management */
+void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
+void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
+void kvm_own_fpu(struct kvm_vcpu *vcpu);
+void kvm_own_msa(struct kvm_vcpu *vcpu);
+void kvm_drop_fpu(struct kvm_vcpu *vcpu);
+void kvm_lose_fpu(struct kvm_vcpu *vcpu);
+
+/* TLB handling */
+u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
+
+u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
+
+u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
+
+#ifdef CONFIG_KVM_MIPS_VZ
+int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
+ struct kvm_vcpu *vcpu, bool write_fault);
+#endif
+extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
+ struct kvm_vcpu *vcpu,
+ bool write_fault);
+
+extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+ struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+ struct kvm_mips_tlb *tlb,
+ unsigned long gva,
+ bool write_fault);
+
+extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu,
+ bool write_fault);
+
+extern void kvm_mips_dump_host_tlbs(void);
+extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
+extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi,
+ bool user, bool kernel);
+
+extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
+ unsigned long entryhi);
+
+#ifdef CONFIG_KVM_MIPS_VZ
+int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
+int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
+ unsigned long *gpa);
+void kvm_vz_local_flush_roottlb_all_guests(void);
+void kvm_vz_local_flush_guesttlb_all(void);
+void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
+ unsigned int count);
+void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
+ unsigned int count);
+#ifdef CONFIG_CPU_LOONGSON64
+void kvm_loongson_clear_guest_vtlb(void);
+void kvm_loongson_clear_guest_ftlb(void);
+#endif
+#endif
+
+void kvm_mips_suspend_mm(int cpu);
+void kvm_mips_resume_mm(int cpu);
+
+/* MMU handling */
+
+/**
+ * enum kvm_mips_flush - Types of MMU flushes.
+ * @KMF_USER: Flush guest user virtual memory mappings.
+ * Guest USeg only.
+ * @KMF_KERN: Flush guest kernel virtual memory mappings.
+ * Guest USeg and KSeg2/3.
+ * @KMF_GPA: Flush guest physical memory mappings.
+ * Also includes KSeg0 if KMF_KERN is set.
+ */
+enum kvm_mips_flush {
+ KMF_USER = 0x0,
+ KMF_KERN = 0x1,
+ KMF_GPA = 0x2,
+};
+void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
+bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
+int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
+pgd_t *kvm_pgd_alloc(void);
+void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
+void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
+ bool user);
+void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu);
+void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu);
+
+enum kvm_mips_fault_result {
+ KVM_MIPS_MAPPED = 0,
+ KVM_MIPS_GVA,
+ KVM_MIPS_GPA,
+ KVM_MIPS_TLB,
+ KVM_MIPS_TLBINV,
+ KVM_MIPS_TLBMOD,
+};
+enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
+ unsigned long gva,
+ bool write);
+
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+int kvm_unmap_hva_range(struct kvm *kvm,
+ unsigned long start, unsigned long end, unsigned flags);
+int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+
+/* Emulation */
+int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
+int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
+int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
+
+/**
+ * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
+ * @vcpu: Virtual CPU.
+ *
+ * Returns: Whether the TLBL exception was likely due to an instruction
+ * fetch fault rather than a data load fault.
+ */
+static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
+{
+ unsigned long badvaddr = vcpu->host_cp0_badvaddr;
+ unsigned long epc = msk_isa16_mode(vcpu->pc);
+ u32 cause = vcpu->host_cp0_cause;
+
+ if (epc == badvaddr)
+ return true;
+
+ /*
+ * Branches may be 32-bit or 16-bit instructions.
+ * This isn't exact, but we don't really support MIPS16 or microMIPS yet
+ * in KVM anyway.
+ */
+ if ((cause & CAUSEF_BD) && badvaddr - epc <= 4)
+ return true;
+
+ return false;
+}
+
+extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu);
+
+long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_handle_ri(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu);
+
+u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
+void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
+void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
+int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
+int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
+int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
+void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
+void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
+enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
+
+/* fairly internal functions requiring some care to use */
+int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
+ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
+int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
+ u32 count, int min_drift);
+
+#ifdef CONFIG_KVM_MIPS_VZ
+void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
+void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
+#else
+static inline void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {}
+#endif
+
+enum emulation_result kvm_mips_check_privilege(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu);
+
+enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
+ u32 *opc,
+ u32 cause,
+ struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
+ u32 *opc,
+ u32 cause,
+ struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
+ u32 cause,
+ struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
+ u32 cause,
+ struct kvm_vcpu *vcpu);
+
+/* COP0 */
+enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
+
+unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
+unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
+unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
+unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
+
+/* Hypercalls (hypcall.c) */
+
+enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
+ union mips_instruction inst);
+int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
+
+/* Dynamic binary translation */
+extern int kvm_mips_trans_cache_index(union mips_instruction inst,
+ u32 *opc, struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
+ struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
+ struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
+ struct kvm_vcpu *vcpu);
+
+/* Misc */
+extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
+extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
+extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq);
+
+static inline void kvm_arch_hardware_unsetup(void) {}
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_free_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
+
+#endif /* __MIPS_KVM_HOST_H__ */
diff --git a/arch/mips/include/asm/kvm_types.h b/arch/mips/include/asm/kvm_types.h
new file mode 100644
index 000000000..213754d9e
--- /dev/null
+++ b/arch/mips/include/asm/kvm_types.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_MIPS_KVM_TYPES_H
+#define _ASM_MIPS_KVM_TYPES_H
+
+#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 4
+
+#endif /* _ASM_MIPS_KVM_TYPES_H */
diff --git a/arch/mips/include/asm/linkage.h b/arch/mips/include/asm/linkage.h
new file mode 100644
index 000000000..1829c2b6d
--- /dev/null
+++ b/arch/mips/include/asm/linkage.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#ifdef __ASSEMBLY__
+#include <asm/asm.h>
+#endif
+
+#define cond_syscall(x) asm(".weak\t" #x "\n" #x "\t=\tsys_ni_syscall")
+#define SYSCALL_ALIAS(alias, name) \
+ asm ( #alias " = " #name "\n\t.globl " #alias)
+
+#endif
diff --git a/arch/mips/include/asm/llsc.h b/arch/mips/include/asm/llsc.h
new file mode 100644
index 000000000..ec09fe5d6
--- /dev/null
+++ b/arch/mips/include/asm/llsc.h
@@ -0,0 +1,39 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Macros for 32/64-bit neutral inline assembler
+ */
+
+#ifndef __ASM_LLSC_H
+#define __ASM_LLSC_H
+
+#include <asm/isa-rev.h>
+
+#if _MIPS_SZLONG == 32
+#define __LL "ll "
+#define __SC "sc "
+#define __INS "ins "
+#define __EXT "ext "
+#elif _MIPS_SZLONG == 64
+#define __LL "lld "
+#define __SC "scd "
+#define __INS "dins "
+#define __EXT "dext "
+#endif
+
+/*
+ * Using a branch-likely instruction to check the result of an sc instruction
+ * works around a bug present in R10000 CPUs prior to revision 3.0 that could
+ * cause ll-sc sequences to execute non-atomically.
+ */
+#ifdef CONFIG_WAR_R10000_LLSC
+# define __SC_BEQZ "beqzl "
+#elif MIPS_ISA_REV >= 6
+# define __SC_BEQZ "beqzc "
+#else
+# define __SC_BEQZ "beqz "
+#endif
+
+#endif /* __ASM_LLSC_H */
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
new file mode 100644
index 000000000..ecda7295d
--- /dev/null
+++ b/arch/mips/include/asm/local.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARCH_MIPS_LOCAL_H
+#define _ARCH_MIPS_LOCAL_H
+
+#include <linux/percpu.h>
+#include <linux/bitops.h>
+#include <linux/atomic.h>
+#include <asm/cmpxchg.h>
+#include <asm/compiler.h>
+#include <asm/war.h>
+
+typedef struct
+{
+ atomic_long_t a;
+} local_t;
+
+#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+#define local_read(l) atomic_long_read(&(l)->a)
+#define local_set(l, i) atomic_long_set(&(l)->a, (i))
+
+#define local_add(i, l) atomic_long_add((i), (&(l)->a))
+#define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
+#define local_inc(l) atomic_long_inc(&(l)->a)
+#define local_dec(l) atomic_long_dec(&(l)->a)
+
+/*
+ * Same as above, but return the result value
+ */
+static __inline__ long local_add_return(long i, local_t * l)
+{
+ unsigned long result;
+
+ if (kernel_uses_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set arch=r4000 \n"
+ __SYNC(full, loongson3_war) " \n"
+ "1:" __LL "%1, %2 # local_add_return \n"
+ " addu %0, %1, %3 \n"
+ __SC "%0, %2 \n"
+ " beqzl %0, 1b \n"
+ " addu %0, %1, %3 \n"
+ " .set pop \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "Ir" (i), "m" (l->a.counter)
+ : "memory");
+ } else if (kernel_uses_llsc) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
+ __SYNC(full, loongson3_war) " \n"
+ "1:" __LL "%1, %2 # local_add_return \n"
+ " addu %0, %1, %3 \n"
+ __SC "%0, %2 \n"
+ " beqz %0, 1b \n"
+ " addu %0, %1, %3 \n"
+ " .set pop \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "Ir" (i), "m" (l->a.counter)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ result = l->a.counter;
+ result += i;
+ l->a.counter = result;
+ local_irq_restore(flags);
+ }
+
+ return result;
+}
+
+static __inline__ long local_sub_return(long i, local_t * l)
+{
+ unsigned long result;
+
+ if (kernel_uses_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set arch=r4000 \n"
+ __SYNC(full, loongson3_war) " \n"
+ "1:" __LL "%1, %2 # local_sub_return \n"
+ " subu %0, %1, %3 \n"
+ __SC "%0, %2 \n"
+ " beqzl %0, 1b \n"
+ " subu %0, %1, %3 \n"
+ " .set pop \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "Ir" (i), "m" (l->a.counter)
+ : "memory");
+ } else if (kernel_uses_llsc) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
+ __SYNC(full, loongson3_war) " \n"
+ "1:" __LL "%1, %2 # local_sub_return \n"
+ " subu %0, %1, %3 \n"
+ __SC "%0, %2 \n"
+ " beqz %0, 1b \n"
+ " subu %0, %1, %3 \n"
+ " .set pop \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "Ir" (i), "m" (l->a.counter)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ result = l->a.counter;
+ result -= i;
+ l->a.counter = result;
+ local_irq_restore(flags);
+ }
+
+ return result;
+}
+
+#define local_cmpxchg(l, o, n) \
+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
+#define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
+
+/**
+ * local_add_unless - add unless the number is a given value
+ * @l: pointer of type local_t
+ * @a: the amount to add to l...
+ * @u: ...unless l is equal to u.
+ *
+ * Atomically adds @a to @l, so long as it was not @u.
+ * Returns non-zero if @l was not @u, and zero otherwise.
+ */
+#define local_add_unless(l, a, u) \
+({ \
+ long c, old; \
+ c = local_read(l); \
+ while (c != (u) && (old = local_cmpxchg((l), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+
+#define local_dec_return(l) local_sub_return(1, (l))
+#define local_inc_return(l) local_add_return(1, (l))
+
+/*
+ * local_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @l: pointer of type local_t
+ *
+ * Atomically subtracts @i from @l and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_sub_and_test(i, l) (local_sub_return((i), (l)) == 0)
+
+/*
+ * local_inc_and_test - increment and test
+ * @l: pointer of type local_t
+ *
+ * Atomically increments @l by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_inc_and_test(l) (local_inc_return(l) == 0)
+
+/*
+ * local_dec_and_test - decrement by 1 and test
+ * @l: pointer of type local_t
+ *
+ * Atomically decrements @l by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0)
+
+/*
+ * local_add_negative - add and test if negative
+ * @l: pointer of type local_t
+ * @i: integer value to add
+ *
+ * Atomically adds @i to @l and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+#define local_add_negative(i, l) (local_add_return(i, (l)) < 0)
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations. Note they take
+ * a variable, not an address.
+ */
+
+#define __local_inc(l) ((l)->a.counter++)
+#define __local_dec(l) ((l)->a.counter++)
+#define __local_add(i, l) ((l)->a.counter+=(i))
+#define __local_sub(i, l) ((l)->a.counter-=(i))
+
+#endif /* _ARCH_MIPS_LOCAL_H */
diff --git a/arch/mips/include/asm/maar.h b/arch/mips/include/asm/maar.h
new file mode 100644
index 000000000..99f1c3e4b
--- /dev/null
+++ b/arch/mips/include/asm/maar.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2014 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#ifndef __MIPS_ASM_MIPS_MAAR_H__
+#define __MIPS_ASM_MIPS_MAAR_H__
+
+#include <asm/hazards.h>
+#include <asm/mipsregs.h>
+
+/**
+ * platform_maar_init() - perform platform-level MAAR configuration
+ * @num_pairs: The number of MAAR pairs present in the system.
+ *
+ * Platforms should implement this function such that it configures as many
+ * MAAR pairs as required, from 0 up to the maximum of num_pairs-1, and returns
+ * the number that were used. Any further MAARs will be configured to be
+ * invalid. The default implementation of this function will simply indicate
+ * that it has configured 0 MAAR pairs.
+ *
+ * Return: The number of MAAR pairs configured.
+ */
+unsigned platform_maar_init(unsigned num_pairs);
+
+/**
+ * write_maar_pair() - write to a pair of MAARs
+ * @idx: The index of the pair (ie. use MAARs idx*2 & (idx*2)+1).
+ * @lower: The lowest address that the MAAR pair will affect. Must be
+ * aligned to a 2^16 byte boundary.
+ * @upper: The highest address that the MAAR pair will affect. Must be
+ * aligned to one byte before a 2^16 byte boundary.
+ * @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The
+ * MIPS_MAAR_VL/MIPS_MAAR_VH attributes will automatically be set.
+ *
+ * Program the pair of MAAR registers specified by idx to apply the attributes
+ * specified by attrs to the range of addresses from lower to higher.
+ */
+static inline void write_maar_pair(unsigned idx, phys_addr_t lower,
+ phys_addr_t upper, unsigned attrs)
+{
+ /* Addresses begin at bit 16, but are shifted right 4 bits */
+ BUG_ON(lower & (0xffff | ~(MIPS_MAAR_ADDR << 4)));
+ BUG_ON(((upper & 0xffff) != 0xffff)
+ || ((upper & ~0xffffull) & ~(MIPS_MAAR_ADDR << 4)));
+
+ /* Automatically set MIPS_MAAR_VL */
+ attrs |= MIPS_MAAR_VL;
+
+ /*
+ * Write the upper address & attributes (both MIPS_MAAR_VL and
+ * MIPS_MAAR_VH matter)
+ */
+ write_c0_maari(idx << 1);
+ back_to_back_c0_hazard();
+ write_c0_maar(((upper >> 4) & MIPS_MAAR_ADDR) | attrs);
+ back_to_back_c0_hazard();
+#ifdef CONFIG_XPA
+ upper >>= MIPS_MAARX_ADDR_SHIFT;
+ writex_c0_maar(((upper >> 4) & MIPS_MAARX_ADDR) | MIPS_MAARX_VH);
+ back_to_back_c0_hazard();
+#endif
+
+ /* Write the lower address & attributes */
+ write_c0_maari((idx << 1) | 0x1);
+ back_to_back_c0_hazard();
+ write_c0_maar((lower >> 4) | attrs);
+ back_to_back_c0_hazard();
+#ifdef CONFIG_XPA
+ lower >>= MIPS_MAARX_ADDR_SHIFT;
+ writex_c0_maar(((lower >> 4) & MIPS_MAARX_ADDR) | MIPS_MAARX_VH);
+ back_to_back_c0_hazard();
+#endif
+}
+
+/**
+ * maar_init() - initialise MAARs
+ *
+ * Performs initialisation of MAARs for the current CPU, making use of the
+ * platforms implementation of platform_maar_init where necessary and
+ * duplicating the setup it provides on secondary CPUs.
+ */
+extern void maar_init(void);
+
+/**
+ * struct maar_config - MAAR configuration data
+ * @lower: The lowest address that the MAAR pair will affect. Must be
+ * aligned to a 2^16 byte boundary.
+ * @upper: The highest address that the MAAR pair will affect. Must be
+ * aligned to one byte before a 2^16 byte boundary.
+ * @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The
+ * MIPS_MAAR_VL attribute will automatically be set.
+ *
+ * Describes the configuration of a pair of Memory Accessibility Attribute
+ * Registers - applying attributes from attrs to the range of physical
+ * addresses from lower to upper inclusive.
+ */
+struct maar_config {
+ phys_addr_t lower;
+ phys_addr_t upper;
+ unsigned attrs;
+};
+
+/**
+ * maar_config() - configure MAARs according to provided data
+ * @cfg: Pointer to an array of struct maar_config.
+ * @num_cfg: The number of structs in the cfg array.
+ * @num_pairs: The number of MAAR pairs present in the system.
+ *
+ * Configures as many MAARs as are present and specified in the cfg
+ * array with the values taken from the cfg array.
+ *
+ * Return: The number of MAAR pairs configured.
+ */
+static inline unsigned maar_config(const struct maar_config *cfg,
+ unsigned num_cfg, unsigned num_pairs)
+{
+ unsigned i;
+
+ for (i = 0; i < min(num_cfg, num_pairs); i++)
+ write_maar_pair(i, cfg[i].lower, cfg[i].upper, cfg[i].attrs);
+
+ return i;
+}
+
+#endif /* __MIPS_ASM_MIPS_MAAR_H__ */
diff --git a/arch/mips/include/asm/mach-ar7/ar7.h b/arch/mips/include/asm/mach-ar7/ar7.h
new file mode 100644
index 000000000..dd09c3bf0
--- /dev/null
+++ b/arch/mips/include/asm/mach-ar7/ar7.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2006,2007 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2006,2007 Eugene Konev <ejka@openwrt.org>
+ */
+
+#ifndef __AR7_H__
+#define __AR7_H__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+
+#include <asm/addrspace.h>
+
+#define AR7_SDRAM_BASE 0x14000000
+
+#define AR7_REGS_BASE 0x08610000
+
+#define AR7_REGS_MAC0 (AR7_REGS_BASE + 0x0000)
+#define AR7_REGS_GPIO (AR7_REGS_BASE + 0x0900)
+/* 0x08610A00 - 0x08610BFF (512 bytes, 128 bytes / clock) */
+#define AR7_REGS_POWER (AR7_REGS_BASE + 0x0a00)
+#define AR7_REGS_CLOCKS (AR7_REGS_POWER + 0x80)
+#define UR8_REGS_CLOCKS (AR7_REGS_POWER + 0x20)
+#define AR7_REGS_UART0 (AR7_REGS_BASE + 0x0e00)
+#define AR7_REGS_USB (AR7_REGS_BASE + 0x1200)
+#define AR7_REGS_RESET (AR7_REGS_BASE + 0x1600)
+#define AR7_REGS_PINSEL (AR7_REGS_BASE + 0x160C)
+#define AR7_REGS_VLYNQ0 (AR7_REGS_BASE + 0x1800)
+#define AR7_REGS_DCL (AR7_REGS_BASE + 0x1a00)
+#define AR7_REGS_VLYNQ1 (AR7_REGS_BASE + 0x1c00)
+#define AR7_REGS_MDIO (AR7_REGS_BASE + 0x1e00)
+#define AR7_REGS_IRQ (AR7_REGS_BASE + 0x2400)
+#define AR7_REGS_MAC1 (AR7_REGS_BASE + 0x2800)
+
+#define AR7_REGS_WDT (AR7_REGS_BASE + 0x1f00)
+#define UR8_REGS_WDT (AR7_REGS_BASE + 0x0b00)
+#define UR8_REGS_UART1 (AR7_REGS_BASE + 0x0f00)
+
+/* Titan registers */
+#define TITAN_REGS_ESWITCH_BASE (0x08640000)
+#define TITAN_REGS_MAC0 (TITAN_REGS_ESWITCH_BASE)
+#define TITAN_REGS_MAC1 (TITAN_REGS_ESWITCH_BASE + 0x0800)
+#define TITAN_REGS_MDIO (TITAN_REGS_ESWITCH_BASE + 0x02000)
+#define TITAN_REGS_VLYNQ0 (AR7_REGS_BASE + 0x1c00)
+#define TITAN_REGS_VLYNQ1 (AR7_REGS_BASE + 0x1300)
+
+#define AR7_RESET_PERIPHERAL 0x0
+#define AR7_RESET_SOFTWARE 0x4
+#define AR7_RESET_STATUS 0x8
+
+#define AR7_RESET_BIT_CPMAC_LO 17
+#define AR7_RESET_BIT_CPMAC_HI 21
+#define AR7_RESET_BIT_MDIO 22
+#define AR7_RESET_BIT_EPHY 26
+
+#define TITAN_RESET_BIT_EPHY1 28
+
+/* GPIO control registers */
+#define AR7_GPIO_INPUT 0x0
+#define AR7_GPIO_OUTPUT 0x4
+#define AR7_GPIO_DIR 0x8
+#define AR7_GPIO_ENABLE 0xc
+#define TITAN_GPIO_INPUT_0 0x0
+#define TITAN_GPIO_INPUT_1 0x4
+#define TITAN_GPIO_OUTPUT_0 0x8
+#define TITAN_GPIO_OUTPUT_1 0xc
+#define TITAN_GPIO_DIR_0 0x10
+#define TITAN_GPIO_DIR_1 0x14
+#define TITAN_GPIO_ENBL_0 0x18
+#define TITAN_GPIO_ENBL_1 0x1c
+
+#define AR7_CHIP_7100 0x18
+#define AR7_CHIP_7200 0x2b
+#define AR7_CHIP_7300 0x05
+#define AR7_CHIP_TITAN 0x07
+#define TITAN_CHIP_1050 0x0f
+#define TITAN_CHIP_1055 0x0e
+#define TITAN_CHIP_1056 0x0d
+#define TITAN_CHIP_1060 0x07
+
+/* Interrupts */
+#define AR7_IRQ_UART0 15
+#define AR7_IRQ_UART1 16
+
+/* Clocks */
+#define AR7_AFE_CLOCK 35328000
+#define AR7_REF_CLOCK 25000000
+#define AR7_XTAL_CLOCK 24000000
+
+/* DCL */
+#define AR7_WDT_HW_ENA 0x10
+
+struct plat_cpmac_data {
+ int reset_bit;
+ int power_bit;
+ u32 phy_mask;
+ char dev_addr[6];
+};
+
+struct plat_dsl_data {
+ int reset_bit_dsl;
+ int reset_bit_sar;
+};
+
+extern int ar7_cpu_clock, ar7_bus_clock, ar7_dsp_clock;
+
+static inline int ar7_is_titan(void)
+{
+ return (readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x24)) & 0xffff) ==
+ AR7_CHIP_TITAN;
+}
+
+static inline u16 ar7_chip_id(void)
+{
+ return ar7_is_titan() ? AR7_CHIP_TITAN : (readl((void *)
+ KSEG1ADDR(AR7_REGS_GPIO + 0x14)) & 0xffff);
+}
+
+static inline u16 titan_chip_id(void)
+{
+ unsigned int val = readl((void *)KSEG1ADDR(AR7_REGS_GPIO +
+ TITAN_GPIO_INPUT_1));
+ return ((val >> 12) & 0x0f);
+}
+
+static inline u8 ar7_chip_rev(void)
+{
+ return (readl((void *)KSEG1ADDR(AR7_REGS_GPIO + (ar7_is_titan() ? 0x24 :
+ 0x14))) >> 16) & 0xff;
+}
+
+struct clk {
+ unsigned int rate;
+};
+
+static inline int ar7_has_high_cpmac(void)
+{
+ u16 chip_id = ar7_chip_id();
+ switch (chip_id) {
+ case AR7_CHIP_7100:
+ case AR7_CHIP_7200:
+ return 0;
+ case AR7_CHIP_7300:
+ return 1;
+ default:
+ return -ENXIO;
+ }
+}
+#define ar7_has_high_vlynq ar7_has_high_cpmac
+#define ar7_has_second_uart ar7_has_high_cpmac
+
+static inline void ar7_device_enable(u32 bit)
+{
+ void *reset_reg =
+ (void *)KSEG1ADDR(AR7_REGS_RESET + AR7_RESET_PERIPHERAL);
+ writel(readl(reset_reg) | (1 << bit), reset_reg);
+ msleep(20);
+}
+
+static inline void ar7_device_disable(u32 bit)
+{
+ void *reset_reg =
+ (void *)KSEG1ADDR(AR7_REGS_RESET + AR7_RESET_PERIPHERAL);
+ writel(readl(reset_reg) & ~(1 << bit), reset_reg);
+ msleep(20);
+}
+
+static inline void ar7_device_reset(u32 bit)
+{
+ ar7_device_disable(bit);
+ ar7_device_enable(bit);
+}
+
+static inline void ar7_device_on(u32 bit)
+{
+ void *power_reg = (void *)KSEG1ADDR(AR7_REGS_POWER);
+ writel(readl(power_reg) | (1 << bit), power_reg);
+ msleep(20);
+}
+
+static inline void ar7_device_off(u32 bit)
+{
+ void *power_reg = (void *)KSEG1ADDR(AR7_REGS_POWER);
+ writel(readl(power_reg) & ~(1 << bit), power_reg);
+ msleep(20);
+}
+
+int __init ar7_gpio_init(void);
+void __init ar7_init_clocks(void);
+
+/* Board specific GPIO functions */
+int ar7_gpio_enable(unsigned gpio);
+int ar7_gpio_disable(unsigned gpio);
+
+#endif /* __AR7_H__ */
diff --git a/arch/mips/include/asm/mach-ar7/irq.h b/arch/mips/include/asm/mach-ar7/irq.h
new file mode 100644
index 000000000..46bb730ea
--- /dev/null
+++ b/arch/mips/include/asm/mach-ar7/irq.h
@@ -0,0 +1,16 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Shamelessly copied from asm-mips/mach-emma2rh/
+ * Copyright (C) 2003 by Ralf Baechle
+ */
+#ifndef __ASM_AR7_IRQ_H
+#define __ASM_AR7_IRQ_H
+
+#define NR_IRQS 256
+
+#include <asm/mach-generic/irq.h>
+
+#endif /* __ASM_AR7_IRQ_H */
diff --git a/arch/mips/include/asm/mach-ar7/prom.h b/arch/mips/include/asm/mach-ar7/prom.h
new file mode 100644
index 000000000..9e1d20b06
--- /dev/null
+++ b/arch/mips/include/asm/mach-ar7/prom.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2006, 2007 Florian Fainelli <florian@openwrt.org>
+ */
+
+#ifndef __PROM_H__
+#define __PROM_H__
+
+extern char *prom_getenv(const char *name);
+extern void prom_meminit(void);
+
+#endif /* __PROM_H__ */
diff --git a/arch/mips/include/asm/mach-ar7/spaces.h b/arch/mips/include/asm/mach-ar7/spaces.h
new file mode 100644
index 000000000..a004d94df
--- /dev/null
+++ b/arch/mips/include/asm/mach-ar7/spaces.h
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_AR7_SPACES_H
+#define _ASM_AR7_SPACES_H
+
+/*
+ * This handles the memory map.
+ * We handle pages at KSEG0 for kernels with 32 bit address space.
+ */
+#define PAGE_OFFSET _AC(0x94000000, UL)
+#define PHYS_OFFSET _AC(0x14000000, UL)
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* __ASM_AR7_SPACES_H */
diff --git a/arch/mips/include/asm/mach-ath25/ath25_platform.h b/arch/mips/include/asm/mach-ath25/ath25_platform.h
new file mode 100644
index 000000000..0aacc55aa
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath25/ath25_platform.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_ATH25_PLATFORM_H
+#define __ASM_MACH_ATH25_PLATFORM_H
+
+#include <linux/etherdevice.h>
+
+/*
+ * This is board-specific data that is stored in a "fixed" location in flash.
+ * It is shared across operating systems, so it should not be changed lightly.
+ * The main reason we need it is in order to extract the ethernet MAC
+ * address(es).
+ */
+struct ath25_boarddata {
+ u32 magic; /* board data is valid */
+#define ATH25_BD_MAGIC 0x35333131 /* "5311", for all 531x/231x platforms */
+ u16 cksum; /* checksum (starting with BD_REV 2) */
+ u16 rev; /* revision of this struct */
+#define BD_REV 4
+ char board_name[64]; /* Name of board */
+ u16 major; /* Board major number */
+ u16 minor; /* Board minor number */
+ u32 flags; /* Board configuration */
+#define BD_ENET0 0x00000001 /* ENET0 is stuffed */
+#define BD_ENET1 0x00000002 /* ENET1 is stuffed */
+#define BD_UART1 0x00000004 /* UART1 is stuffed */
+#define BD_UART0 0x00000008 /* UART0 is stuffed (dma) */
+#define BD_RSTFACTORY 0x00000010 /* Reset factory defaults stuffed */
+#define BD_SYSLED 0x00000020 /* System LED stuffed */
+#define BD_EXTUARTCLK 0x00000040 /* External UART clock */
+#define BD_CPUFREQ 0x00000080 /* cpu freq is valid in nvram */
+#define BD_SYSFREQ 0x00000100 /* sys freq is set in nvram */
+#define BD_WLAN0 0x00000200 /* Enable WLAN0 */
+#define BD_MEMCAP 0x00000400 /* CAP SDRAM @ mem_cap for testing */
+#define BD_DISWATCHDOG 0x00000800 /* disable system watchdog */
+#define BD_WLAN1 0x00001000 /* Enable WLAN1 (ar5212) */
+#define BD_ISCASPER 0x00002000 /* FLAG for AR2312 */
+#define BD_WLAN0_2G_EN 0x00004000 /* FLAG for radio0_2G */
+#define BD_WLAN0_5G_EN 0x00008000 /* FLAG for radio0_2G */
+#define BD_WLAN1_2G_EN 0x00020000 /* FLAG for radio0_2G */
+#define BD_WLAN1_5G_EN 0x00040000 /* FLAG for radio0_2G */
+ u16 reset_config_gpio; /* Reset factory GPIO pin */
+ u16 sys_led_gpio; /* System LED GPIO pin */
+
+ u32 cpu_freq; /* CPU core frequency in Hz */
+ u32 sys_freq; /* System frequency in Hz */
+ u32 cnt_freq; /* Calculated C0_COUNT frequency */
+
+ u8 wlan0_mac[ETH_ALEN];
+ u8 enet0_mac[ETH_ALEN];
+ u8 enet1_mac[ETH_ALEN];
+
+ u16 pci_id; /* Pseudo PCIID for common code */
+ u16 mem_cap; /* cap bank1 in MB */
+
+ /* version 3 */
+ u8 wlan1_mac[ETH_ALEN]; /* (ar5212) */
+};
+
+#define BOARD_CONFIG_BUFSZ 0x1000
+
+/*
+ * Platform device information for the Wireless MAC
+ */
+struct ar231x_board_config {
+ u16 devid;
+
+ /* board config data */
+ struct ath25_boarddata *config;
+
+ /* radio calibration data */
+ const char *radio;
+};
+
+#endif /* __ASM_MACH_ATH25_PLATFORM_H */
diff --git a/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h
new file mode 100644
index 000000000..a54f20d95
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Atheros AR231x/AR531x SoC specific CPU feature overrides
+ *
+ * Copyright (C) 2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ */
+#ifndef __ASM_MACH_ATH25_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_ATH25_CPU_FEATURE_OVERRIDES_H
+
+/*
+ * The Atheros AR531x/AR231x SoCs have MIPS 4Kc/4KEc core.
+ */
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_sb1_cache 0
+#define cpu_has_fpu 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 1
+#define cpu_has_ejtag 1
+
+#if !defined(CONFIG_SOC_AR5312)
+# define cpu_has_llsc 1
+#else
+/*
+ * The MIPS 4Kc V0.9 core in the AR5312/AR2312 have problems with the
+ * ll/sc instructions.
+ */
+# define cpu_has_llsc 0
+#endif
+
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+
+#define cpu_has_mips32r1 1
+
+#if !defined(CONFIG_SOC_AR5312)
+# define cpu_has_mips32r2 1
+#endif
+
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#define cpu_has_dsp 0
+#define cpu_has_mipsmt 0
+
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs 0
+
+#endif /* __ASM_MACH_ATH25_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
new file mode 100644
index 000000000..1f9e571af
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -0,0 +1,1321 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Atheros AR71XX/AR724X/AR913X SoC register definitions
+ *
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ */
+
+#ifndef __ASM_MACH_AR71XX_REGS_H
+#define __ASM_MACH_AR71XX_REGS_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+
+#define AR71XX_APB_BASE 0x18000000
+#define AR71XX_GE0_BASE 0x19000000
+#define AR71XX_GE0_SIZE 0x10000
+#define AR71XX_GE1_BASE 0x1a000000
+#define AR71XX_GE1_SIZE 0x10000
+#define AR71XX_EHCI_BASE 0x1b000000
+#define AR71XX_EHCI_SIZE 0x1000
+#define AR71XX_OHCI_BASE 0x1c000000
+#define AR71XX_OHCI_SIZE 0x1000
+#define AR71XX_SPI_BASE 0x1f000000
+#define AR71XX_SPI_SIZE 0x01000000
+
+#define AR71XX_DDR_CTRL_BASE (AR71XX_APB_BASE + 0x00000000)
+#define AR71XX_DDR_CTRL_SIZE 0x100
+#define AR71XX_UART_BASE (AR71XX_APB_BASE + 0x00020000)
+#define AR71XX_UART_SIZE 0x100
+#define AR71XX_USB_CTRL_BASE (AR71XX_APB_BASE + 0x00030000)
+#define AR71XX_USB_CTRL_SIZE 0x100
+#define AR71XX_GPIO_BASE (AR71XX_APB_BASE + 0x00040000)
+#define AR71XX_GPIO_SIZE 0x100
+#define AR71XX_PLL_BASE (AR71XX_APB_BASE + 0x00050000)
+#define AR71XX_PLL_SIZE 0x100
+#define AR71XX_RESET_BASE (AR71XX_APB_BASE + 0x00060000)
+#define AR71XX_RESET_SIZE 0x100
+#define AR71XX_MII_BASE (AR71XX_APB_BASE + 0x00070000)
+#define AR71XX_MII_SIZE 0x100
+
+#define AR71XX_PCI_MEM_BASE 0x10000000
+#define AR71XX_PCI_MEM_SIZE 0x07000000
+
+#define AR71XX_PCI_WIN0_OFFS 0x10000000
+#define AR71XX_PCI_WIN1_OFFS 0x11000000
+#define AR71XX_PCI_WIN2_OFFS 0x12000000
+#define AR71XX_PCI_WIN3_OFFS 0x13000000
+#define AR71XX_PCI_WIN4_OFFS 0x14000000
+#define AR71XX_PCI_WIN5_OFFS 0x15000000
+#define AR71XX_PCI_WIN6_OFFS 0x16000000
+#define AR71XX_PCI_WIN7_OFFS 0x07000000
+
+#define AR71XX_PCI_CFG_BASE \
+ (AR71XX_PCI_MEM_BASE + AR71XX_PCI_WIN7_OFFS + 0x10000)
+#define AR71XX_PCI_CFG_SIZE 0x100
+
+#define AR7240_USB_CTRL_BASE (AR71XX_APB_BASE + 0x00030000)
+#define AR7240_USB_CTRL_SIZE 0x100
+#define AR7240_OHCI_BASE 0x1b000000
+#define AR7240_OHCI_SIZE 0x1000
+
+#define AR724X_PCI_MEM_BASE 0x10000000
+#define AR724X_PCI_MEM_SIZE 0x04000000
+
+#define AR724X_PCI_CFG_BASE 0x14000000
+#define AR724X_PCI_CFG_SIZE 0x1000
+#define AR724X_PCI_CRP_BASE (AR71XX_APB_BASE + 0x000c0000)
+#define AR724X_PCI_CRP_SIZE 0x1000
+#define AR724X_PCI_CTRL_BASE (AR71XX_APB_BASE + 0x000f0000)
+#define AR724X_PCI_CTRL_SIZE 0x100
+
+#define AR724X_EHCI_BASE 0x1b000000
+#define AR724X_EHCI_SIZE 0x1000
+
+#define AR913X_EHCI_BASE 0x1b000000
+#define AR913X_EHCI_SIZE 0x1000
+#define AR913X_WMAC_BASE (AR71XX_APB_BASE + 0x000C0000)
+#define AR913X_WMAC_SIZE 0x30000
+
+#define AR933X_UART_BASE (AR71XX_APB_BASE + 0x00020000)
+#define AR933X_UART_SIZE 0x14
+#define AR933X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define AR933X_GMAC_SIZE 0x04
+#define AR933X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
+#define AR933X_WMAC_SIZE 0x20000
+#define AR933X_EHCI_BASE 0x1b000000
+#define AR933X_EHCI_SIZE 0x1000
+
+#define AR934X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define AR934X_GMAC_SIZE 0x14
+#define AR934X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
+#define AR934X_WMAC_SIZE 0x20000
+#define AR934X_EHCI_BASE 0x1b000000
+#define AR934X_EHCI_SIZE 0x200
+#define AR934X_NFC_BASE 0x1b000200
+#define AR934X_NFC_SIZE 0xb8
+#define AR934X_SRIF_BASE (AR71XX_APB_BASE + 0x00116000)
+#define AR934X_SRIF_SIZE 0x1000
+
+#define QCA953X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define QCA953X_GMAC_SIZE 0x14
+#define QCA953X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
+#define QCA953X_WMAC_SIZE 0x20000
+#define QCA953X_EHCI_BASE 0x1b000000
+#define QCA953X_EHCI_SIZE 0x200
+#define QCA953X_SRIF_BASE (AR71XX_APB_BASE + 0x00116000)
+#define QCA953X_SRIF_SIZE 0x1000
+
+#define QCA953X_PCI_CFG_BASE0 0x14000000
+#define QCA953X_PCI_CTRL_BASE0 (AR71XX_APB_BASE + 0x000f0000)
+#define QCA953X_PCI_CRP_BASE0 (AR71XX_APB_BASE + 0x000c0000)
+#define QCA953X_PCI_MEM_BASE0 0x10000000
+#define QCA953X_PCI_MEM_SIZE 0x02000000
+
+#define QCA955X_PCI_MEM_BASE0 0x10000000
+#define QCA955X_PCI_MEM_BASE1 0x12000000
+#define QCA955X_PCI_MEM_SIZE 0x02000000
+#define QCA955X_PCI_CFG_BASE0 0x14000000
+#define QCA955X_PCI_CFG_BASE1 0x16000000
+#define QCA955X_PCI_CFG_SIZE 0x1000
+#define QCA955X_PCI_CRP_BASE0 (AR71XX_APB_BASE + 0x000c0000)
+#define QCA955X_PCI_CRP_BASE1 (AR71XX_APB_BASE + 0x00250000)
+#define QCA955X_PCI_CRP_SIZE 0x1000
+#define QCA955X_PCI_CTRL_BASE0 (AR71XX_APB_BASE + 0x000f0000)
+#define QCA955X_PCI_CTRL_BASE1 (AR71XX_APB_BASE + 0x00280000)
+#define QCA955X_PCI_CTRL_SIZE 0x100
+
+#define QCA955X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define QCA955X_GMAC_SIZE 0x40
+#define QCA955X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
+#define QCA955X_WMAC_SIZE 0x20000
+#define QCA955X_EHCI0_BASE 0x1b000000
+#define QCA955X_EHCI1_BASE 0x1b400000
+#define QCA955X_EHCI_SIZE 0x1000
+#define QCA955X_NFC_BASE 0x1b800200
+#define QCA955X_NFC_SIZE 0xb8
+
+#define QCA956X_PCI_MEM_BASE1 0x12000000
+#define QCA956X_PCI_MEM_SIZE 0x02000000
+#define QCA956X_PCI_CFG_BASE1 0x16000000
+#define QCA956X_PCI_CFG_SIZE 0x1000
+#define QCA956X_PCI_CRP_BASE1 (AR71XX_APB_BASE + 0x00250000)
+#define QCA956X_PCI_CRP_SIZE 0x1000
+#define QCA956X_PCI_CTRL_BASE1 (AR71XX_APB_BASE + 0x00280000)
+#define QCA956X_PCI_CTRL_SIZE 0x100
+
+#define QCA956X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
+#define QCA956X_WMAC_SIZE 0x20000
+#define QCA956X_EHCI0_BASE 0x1b000000
+#define QCA956X_EHCI1_BASE 0x1b400000
+#define QCA956X_EHCI_SIZE 0x200
+#define QCA956X_GMAC_SGMII_BASE (AR71XX_APB_BASE + 0x00070000)
+#define QCA956X_GMAC_SGMII_SIZE 0x64
+#define QCA956X_PLL_BASE (AR71XX_APB_BASE + 0x00050000)
+#define QCA956X_PLL_SIZE 0x50
+#define QCA956X_GMAC_BASE (AR71XX_APB_BASE + 0x00070000)
+#define QCA956X_GMAC_SIZE 0x64
+
+/*
+ * Hidden Registers
+ */
+#define QCA956X_MAC_CFG_BASE 0xb9000000
+#define QCA956X_MAC_CFG_SIZE 0x64
+
+#define QCA956X_MAC_CFG1_REG 0x00
+#define QCA956X_MAC_CFG1_SOFT_RST BIT(31)
+#define QCA956X_MAC_CFG1_RX_RST BIT(19)
+#define QCA956X_MAC_CFG1_TX_RST BIT(18)
+#define QCA956X_MAC_CFG1_LOOPBACK BIT(8)
+#define QCA956X_MAC_CFG1_RX_EN BIT(2)
+#define QCA956X_MAC_CFG1_TX_EN BIT(0)
+
+#define QCA956X_MAC_CFG2_REG 0x04
+#define QCA956X_MAC_CFG2_IF_1000 BIT(9)
+#define QCA956X_MAC_CFG2_IF_10_100 BIT(8)
+#define QCA956X_MAC_CFG2_HUGE_FRAME_EN BIT(5)
+#define QCA956X_MAC_CFG2_LEN_CHECK BIT(4)
+#define QCA956X_MAC_CFG2_PAD_CRC_EN BIT(2)
+#define QCA956X_MAC_CFG2_FDX BIT(0)
+
+#define QCA956X_MAC_MII_MGMT_CFG_REG 0x20
+#define QCA956X_MGMT_CFG_CLK_DIV_20 0x07
+
+#define QCA956X_MAC_FIFO_CFG0_REG 0x48
+#define QCA956X_MAC_FIFO_CFG1_REG 0x4c
+#define QCA956X_MAC_FIFO_CFG2_REG 0x50
+#define QCA956X_MAC_FIFO_CFG3_REG 0x54
+#define QCA956X_MAC_FIFO_CFG4_REG 0x58
+#define QCA956X_MAC_FIFO_CFG5_REG 0x5c
+
+#define QCA956X_DAM_RESET_OFFSET 0xb90001bc
+#define QCA956X_DAM_RESET_SIZE 0x4
+#define QCA956X_INLINE_CHKSUM_ENG BIT(27)
+
+/*
+ * DDR_CTRL block
+ */
+#define AR71XX_DDR_REG_PCI_WIN0 0x7c
+#define AR71XX_DDR_REG_PCI_WIN1 0x80
+#define AR71XX_DDR_REG_PCI_WIN2 0x84
+#define AR71XX_DDR_REG_PCI_WIN3 0x88
+#define AR71XX_DDR_REG_PCI_WIN4 0x8c
+#define AR71XX_DDR_REG_PCI_WIN5 0x90
+#define AR71XX_DDR_REG_PCI_WIN6 0x94
+#define AR71XX_DDR_REG_PCI_WIN7 0x98
+#define AR71XX_DDR_REG_FLUSH_GE0 0x9c
+#define AR71XX_DDR_REG_FLUSH_GE1 0xa0
+#define AR71XX_DDR_REG_FLUSH_USB 0xa4
+#define AR71XX_DDR_REG_FLUSH_PCI 0xa8
+
+#define AR724X_DDR_REG_FLUSH_GE0 0x7c
+#define AR724X_DDR_REG_FLUSH_GE1 0x80
+#define AR724X_DDR_REG_FLUSH_USB 0x84
+#define AR724X_DDR_REG_FLUSH_PCIE 0x88
+
+#define AR913X_DDR_REG_FLUSH_GE0 0x7c
+#define AR913X_DDR_REG_FLUSH_GE1 0x80
+#define AR913X_DDR_REG_FLUSH_USB 0x84
+#define AR913X_DDR_REG_FLUSH_WMAC 0x88
+
+#define AR933X_DDR_REG_FLUSH_GE0 0x7c
+#define AR933X_DDR_REG_FLUSH_GE1 0x80
+#define AR933X_DDR_REG_FLUSH_USB 0x84
+#define AR933X_DDR_REG_FLUSH_WMAC 0x88
+
+#define AR934X_DDR_REG_FLUSH_GE0 0x9c
+#define AR934X_DDR_REG_FLUSH_GE1 0xa0
+#define AR934X_DDR_REG_FLUSH_USB 0xa4
+#define AR934X_DDR_REG_FLUSH_PCIE 0xa8
+#define AR934X_DDR_REG_FLUSH_WMAC 0xac
+
+#define QCA953X_DDR_REG_FLUSH_GE0 0x9c
+#define QCA953X_DDR_REG_FLUSH_GE1 0xa0
+#define QCA953X_DDR_REG_FLUSH_USB 0xa4
+#define QCA953X_DDR_REG_FLUSH_PCIE 0xa8
+#define QCA953X_DDR_REG_FLUSH_WMAC 0xac
+
+/*
+ * PLL block
+ */
+#define AR71XX_PLL_REG_CPU_CONFIG 0x00
+#define AR71XX_PLL_REG_SEC_CONFIG 0x04
+#define AR71XX_PLL_REG_ETH0_INT_CLOCK 0x10
+#define AR71XX_PLL_REG_ETH1_INT_CLOCK 0x14
+
+#define AR71XX_PLL_FB_SHIFT 3
+#define AR71XX_PLL_FB_MASK 0x1f
+#define AR71XX_CPU_DIV_SHIFT 16
+#define AR71XX_CPU_DIV_MASK 0x3
+#define AR71XX_DDR_DIV_SHIFT 18
+#define AR71XX_DDR_DIV_MASK 0x3
+#define AR71XX_AHB_DIV_SHIFT 20
+#define AR71XX_AHB_DIV_MASK 0x7
+
+#define AR71XX_ETH0_PLL_SHIFT 17
+#define AR71XX_ETH1_PLL_SHIFT 19
+
+#define AR724X_PLL_REG_CPU_CONFIG 0x00
+#define AR724X_PLL_REG_PCIE_CONFIG 0x10
+
+#define AR724X_PLL_REG_PCIE_CONFIG_PPL_BYPASS BIT(16)
+#define AR724X_PLL_REG_PCIE_CONFIG_PPL_RESET BIT(25)
+
+#define AR724X_PLL_FB_SHIFT 0
+#define AR724X_PLL_FB_MASK 0x3ff
+#define AR724X_PLL_REF_DIV_SHIFT 10
+#define AR724X_PLL_REF_DIV_MASK 0xf
+#define AR724X_AHB_DIV_SHIFT 19
+#define AR724X_AHB_DIV_MASK 0x1
+#define AR724X_DDR_DIV_SHIFT 22
+#define AR724X_DDR_DIV_MASK 0x3
+
+#define AR7242_PLL_REG_ETH0_INT_CLOCK 0x2c
+
+#define AR913X_PLL_REG_CPU_CONFIG 0x00
+#define AR913X_PLL_REG_ETH_CONFIG 0x04
+#define AR913X_PLL_REG_ETH0_INT_CLOCK 0x14
+#define AR913X_PLL_REG_ETH1_INT_CLOCK 0x18
+
+#define AR913X_PLL_FB_SHIFT 0
+#define AR913X_PLL_FB_MASK 0x3ff
+#define AR913X_DDR_DIV_SHIFT 22
+#define AR913X_DDR_DIV_MASK 0x3
+#define AR913X_AHB_DIV_SHIFT 19
+#define AR913X_AHB_DIV_MASK 0x1
+
+#define AR913X_ETH0_PLL_SHIFT 20
+#define AR913X_ETH1_PLL_SHIFT 22
+
+#define AR933X_PLL_CPU_CONFIG_REG 0x00
+#define AR933X_PLL_CLOCK_CTRL_REG 0x08
+
+#define AR933X_PLL_CPU_CONFIG_NINT_SHIFT 10
+#define AR933X_PLL_CPU_CONFIG_NINT_MASK 0x3f
+#define AR933X_PLL_CPU_CONFIG_REFDIV_SHIFT 16
+#define AR933X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f
+#define AR933X_PLL_CPU_CONFIG_OUTDIV_SHIFT 23
+#define AR933X_PLL_CPU_CONFIG_OUTDIV_MASK 0x7
+
+#define AR933X_PLL_CLOCK_CTRL_BYPASS BIT(2)
+#define AR933X_PLL_CLOCK_CTRL_CPU_DIV_SHIFT 5
+#define AR933X_PLL_CLOCK_CTRL_CPU_DIV_MASK 0x3
+#define AR933X_PLL_CLOCK_CTRL_DDR_DIV_SHIFT 10
+#define AR933X_PLL_CLOCK_CTRL_DDR_DIV_MASK 0x3
+#define AR933X_PLL_CLOCK_CTRL_AHB_DIV_SHIFT 15
+#define AR933X_PLL_CLOCK_CTRL_AHB_DIV_MASK 0x7
+
+#define AR934X_PLL_CPU_CONFIG_REG 0x00
+#define AR934X_PLL_DDR_CONFIG_REG 0x04
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_REG 0x08
+#define AR934X_PLL_SWITCH_CLOCK_CONTROL_REG 0x24
+#define AR934X_PLL_ETH_XMII_CONTROL_REG 0x2c
+
+#define AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT 0
+#define AR934X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f
+#define AR934X_PLL_CPU_CONFIG_NINT_SHIFT 6
+#define AR934X_PLL_CPU_CONFIG_NINT_MASK 0x3f
+#define AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT 12
+#define AR934X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f
+#define AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT 19
+#define AR934X_PLL_CPU_CONFIG_OUTDIV_MASK 0x3
+
+#define AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT 0
+#define AR934X_PLL_DDR_CONFIG_NFRAC_MASK 0x3ff
+#define AR934X_PLL_DDR_CONFIG_NINT_SHIFT 10
+#define AR934X_PLL_DDR_CONFIG_NINT_MASK 0x3f
+#define AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT 16
+#define AR934X_PLL_DDR_CONFIG_REFDIV_MASK 0x1f
+#define AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT 23
+#define AR934X_PLL_DDR_CONFIG_OUTDIV_MASK 0x7
+
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_PLL_BYPASS BIT(2)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_PLL_BYPASS BIT(3)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_PLL_BYPASS BIT(4)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT 5
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK 0x1f
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_SHIFT 10
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_MASK 0x1f
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_SHIFT 15
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_MASK 0x1f
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPUCLK_FROM_CPUPLL BIT(20)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+
+#define AR934X_PLL_SWITCH_CLOCK_CONTROL_MDIO_CLK_SEL BIT(6)
+
+#define QCA953X_PLL_CPU_CONFIG_REG 0x00
+#define QCA953X_PLL_DDR_CONFIG_REG 0x04
+#define QCA953X_PLL_CLK_CTRL_REG 0x08
+#define QCA953X_PLL_SWITCH_CLOCK_CONTROL_REG 0x24
+#define QCA953X_PLL_ETH_XMII_CONTROL_REG 0x2c
+#define QCA953X_PLL_ETH_SGMII_CONTROL_REG 0x48
+
+#define QCA953X_PLL_CPU_CONFIG_NFRAC_SHIFT 0
+#define QCA953X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f
+#define QCA953X_PLL_CPU_CONFIG_NINT_SHIFT 6
+#define QCA953X_PLL_CPU_CONFIG_NINT_MASK 0x3f
+#define QCA953X_PLL_CPU_CONFIG_REFDIV_SHIFT 12
+#define QCA953X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f
+#define QCA953X_PLL_CPU_CONFIG_OUTDIV_SHIFT 19
+#define QCA953X_PLL_CPU_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA953X_PLL_DDR_CONFIG_NFRAC_SHIFT 0
+#define QCA953X_PLL_DDR_CONFIG_NFRAC_MASK 0x3ff
+#define QCA953X_PLL_DDR_CONFIG_NINT_SHIFT 10
+#define QCA953X_PLL_DDR_CONFIG_NINT_MASK 0x3f
+#define QCA953X_PLL_DDR_CONFIG_REFDIV_SHIFT 16
+#define QCA953X_PLL_DDR_CONFIG_REFDIV_MASK 0x1f
+#define QCA953X_PLL_DDR_CONFIG_OUTDIV_SHIFT 23
+#define QCA953X_PLL_DDR_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA953X_PLL_CLK_CTRL_CPU_PLL_BYPASS BIT(2)
+#define QCA953X_PLL_CLK_CTRL_DDR_PLL_BYPASS BIT(3)
+#define QCA953X_PLL_CLK_CTRL_AHB_PLL_BYPASS BIT(4)
+#define QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT 5
+#define QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_MASK 0x1f
+#define QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT 10
+#define QCA953X_PLL_CLK_CTRL_DDR_POST_DIV_MASK 0x1f
+#define QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT 15
+#define QCA953X_PLL_CLK_CTRL_AHB_POST_DIV_MASK 0x1f
+#define QCA953X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL BIT(20)
+#define QCA953X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
+#define QCA953X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+
+#define QCA955X_PLL_CPU_CONFIG_REG 0x00
+#define QCA955X_PLL_DDR_CONFIG_REG 0x04
+#define QCA955X_PLL_CLK_CTRL_REG 0x08
+#define QCA955X_PLL_ETH_XMII_CONTROL_REG 0x28
+#define QCA955X_PLL_ETH_SGMII_CONTROL_REG 0x48
+#define QCA955X_PLL_ETH_SGMII_SERDES_REG 0x4c
+
+#define QCA955X_PLL_CPU_CONFIG_NFRAC_SHIFT 0
+#define QCA955X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f
+#define QCA955X_PLL_CPU_CONFIG_NINT_SHIFT 6
+#define QCA955X_PLL_CPU_CONFIG_NINT_MASK 0x3f
+#define QCA955X_PLL_CPU_CONFIG_REFDIV_SHIFT 12
+#define QCA955X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f
+#define QCA955X_PLL_CPU_CONFIG_OUTDIV_SHIFT 19
+#define QCA955X_PLL_CPU_CONFIG_OUTDIV_MASK 0x3
+
+#define QCA955X_PLL_DDR_CONFIG_NFRAC_SHIFT 0
+#define QCA955X_PLL_DDR_CONFIG_NFRAC_MASK 0x3ff
+#define QCA955X_PLL_DDR_CONFIG_NINT_SHIFT 10
+#define QCA955X_PLL_DDR_CONFIG_NINT_MASK 0x3f
+#define QCA955X_PLL_DDR_CONFIG_REFDIV_SHIFT 16
+#define QCA955X_PLL_DDR_CONFIG_REFDIV_MASK 0x1f
+#define QCA955X_PLL_DDR_CONFIG_OUTDIV_SHIFT 23
+#define QCA955X_PLL_DDR_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA955X_PLL_CLK_CTRL_CPU_PLL_BYPASS BIT(2)
+#define QCA955X_PLL_CLK_CTRL_DDR_PLL_BYPASS BIT(3)
+#define QCA955X_PLL_CLK_CTRL_AHB_PLL_BYPASS BIT(4)
+#define QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT 5
+#define QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_MASK 0x1f
+#define QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT 10
+#define QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_MASK 0x1f
+#define QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT 15
+#define QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_MASK 0x1f
+#define QCA955X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL BIT(20)
+#define QCA955X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
+#define QCA955X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+
+#define QCA955X_PLL_ETH_SGMII_SERDES_LOCK_DETECT BIT(2)
+#define QCA955X_PLL_ETH_SGMII_SERDES_PLL_REFCLK BIT(1)
+#define QCA955X_PLL_ETH_SGMII_SERDES_EN_PLL BIT(0)
+
+#define QCA956X_PLL_CPU_CONFIG_REG 0x00
+#define QCA956X_PLL_CPU_CONFIG1_REG 0x04
+#define QCA956X_PLL_DDR_CONFIG_REG 0x08
+#define QCA956X_PLL_DDR_CONFIG1_REG 0x0c
+#define QCA956X_PLL_CLK_CTRL_REG 0x10
+#define QCA956X_PLL_SWITCH_CLOCK_CONTROL_REG 0x28
+#define QCA956X_PLL_ETH_XMII_CONTROL_REG 0x30
+#define QCA956X_PLL_ETH_SGMII_SERDES_REG 0x4c
+
+#define QCA956X_PLL_CPU_CONFIG_REFDIV_SHIFT 12
+#define QCA956X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f
+#define QCA956X_PLL_CPU_CONFIG_OUTDIV_SHIFT 19
+#define QCA956X_PLL_CPU_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_L_SHIFT 0
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_L_MASK 0x1f
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_H_SHIFT 5
+#define QCA956X_PLL_CPU_CONFIG1_NFRAC_H_MASK 0x1fff
+#define QCA956X_PLL_CPU_CONFIG1_NINT_SHIFT 18
+#define QCA956X_PLL_CPU_CONFIG1_NINT_MASK 0x1ff
+
+#define QCA956X_PLL_DDR_CONFIG_REFDIV_SHIFT 16
+#define QCA956X_PLL_DDR_CONFIG_REFDIV_MASK 0x1f
+#define QCA956X_PLL_DDR_CONFIG_OUTDIV_SHIFT 23
+#define QCA956X_PLL_DDR_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_L_SHIFT 0
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_L_MASK 0x1f
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_H_SHIFT 5
+#define QCA956X_PLL_DDR_CONFIG1_NFRAC_H_MASK 0x1fff
+#define QCA956X_PLL_DDR_CONFIG1_NINT_SHIFT 18
+#define QCA956X_PLL_DDR_CONFIG1_NINT_MASK 0x1ff
+
+#define QCA956X_PLL_CLK_CTRL_CPU_PLL_BYPASS BIT(2)
+#define QCA956X_PLL_CLK_CTRL_DDR_PLL_BYPASS BIT(3)
+#define QCA956X_PLL_CLK_CTRL_AHB_PLL_BYPASS BIT(4)
+#define QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT 5
+#define QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_MASK 0x1f
+#define QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT 10
+#define QCA956X_PLL_CLK_CTRL_DDR_POST_DIV_MASK 0x1f
+#define QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT 15
+#define QCA956X_PLL_CLK_CTRL_AHB_POST_DIV_MASK 0x1f
+#define QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_DDRPLL BIT(20)
+#define QCA956X_PLL_CLK_CTRL_CPU_DDRCLK_FROM_CPUPLL BIT(21)
+#define QCA956X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_I2C_CLK_SELB BIT(5)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL0_1 BIT(6)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_UART1_CLK_SEL BIT(7)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_USB_REFCLK_FREQ_SEL_SHIFT 8
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_USB_REFCLK_FREQ_SEL_MASK 0xf
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_EN_PLL_TOP BIT(12)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL0_2 BIT(13)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL1_1 BIT(14)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_MDIO_CLK_SEL1_2 BIT(15)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_SWITCH_FUNC_TST_MODE BIT(16)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_EEE_ENABLE BIT(17)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_OEN_CLK125M_PLL BIT(18)
+#define QCA956X_PLL_SWITCH_CLOCK_SPARE_SWITCHCLK_SEL BIT(19)
+
+#define QCA956X_PLL_ETH_XMII_TX_INVERT BIT(1)
+#define QCA956X_PLL_ETH_XMII_GIGE BIT(25)
+#define QCA956X_PLL_ETH_XMII_RX_DELAY_SHIFT 28
+#define QCA956X_PLL_ETH_XMII_RX_DELAY_MASK 0x3
+#define QCA956X_PLL_ETH_XMII_TX_DELAY_SHIFT 26
+#define QCA956X_PLL_ETH_XMII_TX_DELAY_MASK 3
+
+#define QCA956X_PLL_ETH_SGMII_SERDES_LOCK_DETECT BIT(2)
+#define QCA956X_PLL_ETH_SGMII_SERDES_PLL_REFCLK BIT(1)
+#define QCA956X_PLL_ETH_SGMII_SERDES_EN_PLL BIT(0)
+
+/*
+ * USB_CONFIG block
+ */
+#define AR71XX_USB_CTRL_REG_FLADJ 0x00
+#define AR71XX_USB_CTRL_REG_CONFIG 0x04
+
+/*
+ * RESET block
+ */
+#define AR71XX_RESET_REG_TIMER 0x00
+#define AR71XX_RESET_REG_TIMER_RELOAD 0x04
+#define AR71XX_RESET_REG_WDOG_CTRL 0x08
+#define AR71XX_RESET_REG_WDOG 0x0c
+#define AR71XX_RESET_REG_MISC_INT_STATUS 0x10
+#define AR71XX_RESET_REG_MISC_INT_ENABLE 0x14
+#define AR71XX_RESET_REG_PCI_INT_STATUS 0x18
+#define AR71XX_RESET_REG_PCI_INT_ENABLE 0x1c
+#define AR71XX_RESET_REG_GLOBAL_INT_STATUS 0x20
+#define AR71XX_RESET_REG_RESET_MODULE 0x24
+#define AR71XX_RESET_REG_PERFC_CTRL 0x2c
+#define AR71XX_RESET_REG_PERFC0 0x30
+#define AR71XX_RESET_REG_PERFC1 0x34
+#define AR71XX_RESET_REG_REV_ID 0x90
+
+#define AR913X_RESET_REG_GLOBAL_INT_STATUS 0x18
+#define AR913X_RESET_REG_RESET_MODULE 0x1c
+#define AR913X_RESET_REG_PERF_CTRL 0x20
+#define AR913X_RESET_REG_PERFC0 0x24
+#define AR913X_RESET_REG_PERFC1 0x28
+
+#define AR724X_RESET_REG_RESET_MODULE 0x1c
+
+#define AR933X_RESET_REG_RESET_MODULE 0x1c
+#define AR933X_RESET_REG_BOOTSTRAP 0xac
+
+#define AR934X_RESET_REG_RESET_MODULE 0x1c
+#define AR934X_RESET_REG_BOOTSTRAP 0xb0
+#define AR934X_RESET_REG_PCIE_WMAC_INT_STATUS 0xac
+
+#define QCA953X_RESET_REG_RESET_MODULE 0x1c
+#define QCA953X_RESET_REG_BOOTSTRAP 0xb0
+#define QCA953X_RESET_REG_PCIE_WMAC_INT_STATUS 0xac
+
+#define QCA955X_RESET_REG_RESET_MODULE 0x1c
+#define QCA955X_RESET_REG_BOOTSTRAP 0xb0
+#define QCA955X_RESET_REG_EXT_INT_STATUS 0xac
+
+#define QCA956X_RESET_REG_RESET_MODULE 0x1c
+#define QCA956X_RESET_REG_BOOTSTRAP 0xb0
+#define QCA956X_RESET_REG_EXT_INT_STATUS 0xac
+
+#define MISC_INT_MIPS_SI_TIMERINT_MASK BIT(28)
+#define MISC_INT_ETHSW BIT(12)
+#define MISC_INT_TIMER4 BIT(10)
+#define MISC_INT_TIMER3 BIT(9)
+#define MISC_INT_TIMER2 BIT(8)
+#define MISC_INT_DMA BIT(7)
+#define MISC_INT_OHCI BIT(6)
+#define MISC_INT_PERFC BIT(5)
+#define MISC_INT_WDOG BIT(4)
+#define MISC_INT_UART BIT(3)
+#define MISC_INT_GPIO BIT(2)
+#define MISC_INT_ERROR BIT(1)
+#define MISC_INT_TIMER BIT(0)
+
+#define AR71XX_RESET_EXTERNAL BIT(28)
+#define AR71XX_RESET_FULL_CHIP BIT(24)
+#define AR71XX_RESET_CPU_NMI BIT(21)
+#define AR71XX_RESET_CPU_COLD BIT(20)
+#define AR71XX_RESET_DMA BIT(19)
+#define AR71XX_RESET_SLIC BIT(18)
+#define AR71XX_RESET_STEREO BIT(17)
+#define AR71XX_RESET_DDR BIT(16)
+#define AR71XX_RESET_GE1_MAC BIT(13)
+#define AR71XX_RESET_GE1_PHY BIT(12)
+#define AR71XX_RESET_USBSUS_OVERRIDE BIT(10)
+#define AR71XX_RESET_GE0_MAC BIT(9)
+#define AR71XX_RESET_GE0_PHY BIT(8)
+#define AR71XX_RESET_USB_OHCI_DLL BIT(6)
+#define AR71XX_RESET_USB_HOST BIT(5)
+#define AR71XX_RESET_USB_PHY BIT(4)
+#define AR71XX_RESET_PCI_BUS BIT(1)
+#define AR71XX_RESET_PCI_CORE BIT(0)
+
+#define AR7240_RESET_USB_HOST BIT(5)
+#define AR7240_RESET_OHCI_DLL BIT(3)
+
+#define AR724X_RESET_GE1_MDIO BIT(23)
+#define AR724X_RESET_GE0_MDIO BIT(22)
+#define AR724X_RESET_PCIE_PHY_SERIAL BIT(10)
+#define AR724X_RESET_PCIE_PHY BIT(7)
+#define AR724X_RESET_PCIE BIT(6)
+#define AR724X_RESET_USB_HOST BIT(5)
+#define AR724X_RESET_USB_PHY BIT(4)
+#define AR724X_RESET_USBSUS_OVERRIDE BIT(3)
+
+#define AR913X_RESET_AMBA2WMAC BIT(22)
+#define AR913X_RESET_USBSUS_OVERRIDE BIT(10)
+#define AR913X_RESET_USB_HOST BIT(5)
+#define AR913X_RESET_USB_PHY BIT(4)
+
+#define AR933X_RESET_GE1_MDIO BIT(23)
+#define AR933X_RESET_GE0_MDIO BIT(22)
+#define AR933X_RESET_GE1_MAC BIT(13)
+#define AR933X_RESET_WMAC BIT(11)
+#define AR933X_RESET_GE0_MAC BIT(9)
+#define AR933X_RESET_USB_HOST BIT(5)
+#define AR933X_RESET_USB_PHY BIT(4)
+#define AR933X_RESET_USBSUS_OVERRIDE BIT(3)
+
+#define AR934X_RESET_HOST BIT(31)
+#define AR934X_RESET_SLIC BIT(30)
+#define AR934X_RESET_HDMA BIT(29)
+#define AR934X_RESET_EXTERNAL BIT(28)
+#define AR934X_RESET_RTC BIT(27)
+#define AR934X_RESET_PCIE_EP_INT BIT(26)
+#define AR934X_RESET_CHKSUM_ACC BIT(25)
+#define AR934X_RESET_FULL_CHIP BIT(24)
+#define AR934X_RESET_GE1_MDIO BIT(23)
+#define AR934X_RESET_GE0_MDIO BIT(22)
+#define AR934X_RESET_CPU_NMI BIT(21)
+#define AR934X_RESET_CPU_COLD BIT(20)
+#define AR934X_RESET_HOST_RESET_INT BIT(19)
+#define AR934X_RESET_PCIE_EP BIT(18)
+#define AR934X_RESET_UART1 BIT(17)
+#define AR934X_RESET_DDR BIT(16)
+#define AR934X_RESET_USB_PHY_PLL_PWD_EXT BIT(15)
+#define AR934X_RESET_NANDF BIT(14)
+#define AR934X_RESET_GE1_MAC BIT(13)
+#define AR934X_RESET_ETH_SWITCH_ANALOG BIT(12)
+#define AR934X_RESET_USB_PHY_ANALOG BIT(11)
+#define AR934X_RESET_HOST_DMA_INT BIT(10)
+#define AR934X_RESET_GE0_MAC BIT(9)
+#define AR934X_RESET_ETH_SWITCH BIT(8)
+#define AR934X_RESET_PCIE_PHY BIT(7)
+#define AR934X_RESET_PCIE BIT(6)
+#define AR934X_RESET_USB_HOST BIT(5)
+#define AR934X_RESET_USB_PHY BIT(4)
+#define AR934X_RESET_USBSUS_OVERRIDE BIT(3)
+#define AR934X_RESET_LUT BIT(2)
+#define AR934X_RESET_MBOX BIT(1)
+#define AR934X_RESET_I2S BIT(0)
+
+#define QCA953X_RESET_USB_EXT_PWR BIT(29)
+#define QCA953X_RESET_EXTERNAL BIT(28)
+#define QCA953X_RESET_RTC BIT(27)
+#define QCA953X_RESET_FULL_CHIP BIT(24)
+#define QCA953X_RESET_GE1_MDIO BIT(23)
+#define QCA953X_RESET_GE0_MDIO BIT(22)
+#define QCA953X_RESET_CPU_NMI BIT(21)
+#define QCA953X_RESET_CPU_COLD BIT(20)
+#define QCA953X_RESET_DDR BIT(16)
+#define QCA953X_RESET_USB_PHY_PLL_PWD_EXT BIT(15)
+#define QCA953X_RESET_GE1_MAC BIT(13)
+#define QCA953X_RESET_ETH_SWITCH_ANALOG BIT(12)
+#define QCA953X_RESET_USB_PHY_ANALOG BIT(11)
+#define QCA953X_RESET_GE0_MAC BIT(9)
+#define QCA953X_RESET_ETH_SWITCH BIT(8)
+#define QCA953X_RESET_PCIE_PHY BIT(7)
+#define QCA953X_RESET_PCIE BIT(6)
+#define QCA953X_RESET_USB_HOST BIT(5)
+#define QCA953X_RESET_USB_PHY BIT(4)
+#define QCA953X_RESET_USBSUS_OVERRIDE BIT(3)
+
+#define QCA955X_RESET_HOST BIT(31)
+#define QCA955X_RESET_SLIC BIT(30)
+#define QCA955X_RESET_HDMA BIT(29)
+#define QCA955X_RESET_EXTERNAL BIT(28)
+#define QCA955X_RESET_RTC BIT(27)
+#define QCA955X_RESET_PCIE_EP_INT BIT(26)
+#define QCA955X_RESET_CHKSUM_ACC BIT(25)
+#define QCA955X_RESET_FULL_CHIP BIT(24)
+#define QCA955X_RESET_GE1_MDIO BIT(23)
+#define QCA955X_RESET_GE0_MDIO BIT(22)
+#define QCA955X_RESET_CPU_NMI BIT(21)
+#define QCA955X_RESET_CPU_COLD BIT(20)
+#define QCA955X_RESET_HOST_RESET_INT BIT(19)
+#define QCA955X_RESET_PCIE_EP BIT(18)
+#define QCA955X_RESET_UART1 BIT(17)
+#define QCA955X_RESET_DDR BIT(16)
+#define QCA955X_RESET_USB_PHY_PLL_PWD_EXT BIT(15)
+#define QCA955X_RESET_NANDF BIT(14)
+#define QCA955X_RESET_GE1_MAC BIT(13)
+#define QCA955X_RESET_SGMII_ANALOG BIT(12)
+#define QCA955X_RESET_USB_PHY_ANALOG BIT(11)
+#define QCA955X_RESET_HOST_DMA_INT BIT(10)
+#define QCA955X_RESET_GE0_MAC BIT(9)
+#define QCA955X_RESET_SGMII BIT(8)
+#define QCA955X_RESET_PCIE_PHY BIT(7)
+#define QCA955X_RESET_PCIE BIT(6)
+#define QCA955X_RESET_USB_HOST BIT(5)
+#define QCA955X_RESET_USB_PHY BIT(4)
+#define QCA955X_RESET_USBSUS_OVERRIDE BIT(3)
+#define QCA955X_RESET_LUT BIT(2)
+#define QCA955X_RESET_MBOX BIT(1)
+#define QCA955X_RESET_I2S BIT(0)
+
+#define QCA956X_RESET_EXTERNAL BIT(28)
+#define QCA956X_RESET_FULL_CHIP BIT(24)
+#define QCA956X_RESET_GE1_MDIO BIT(23)
+#define QCA956X_RESET_GE0_MDIO BIT(22)
+#define QCA956X_RESET_CPU_NMI BIT(21)
+#define QCA956X_RESET_CPU_COLD BIT(20)
+#define QCA956X_RESET_DMA BIT(19)
+#define QCA956X_RESET_DDR BIT(16)
+#define QCA956X_RESET_GE1_MAC BIT(13)
+#define QCA956X_RESET_SGMII_ANALOG BIT(12)
+#define QCA956X_RESET_USB_PHY_ANALOG BIT(11)
+#define QCA956X_RESET_GE0_MAC BIT(9)
+#define QCA956X_RESET_SGMII BIT(8)
+#define QCA956X_RESET_USB_HOST BIT(5)
+#define QCA956X_RESET_USB_PHY BIT(4)
+#define QCA956X_RESET_USBSUS_OVERRIDE BIT(3)
+#define QCA956X_RESET_SWITCH_ANALOG BIT(2)
+#define QCA956X_RESET_SWITCH BIT(0)
+
+#define AR933X_BOOTSTRAP_MDIO_GPIO_EN BIT(18)
+#define AR933X_BOOTSTRAP_EEPBUSY BIT(4)
+#define AR933X_BOOTSTRAP_REF_CLK_40 BIT(0)
+
+#define AR934X_BOOTSTRAP_SW_OPTION8 BIT(23)
+#define AR934X_BOOTSTRAP_SW_OPTION7 BIT(22)
+#define AR934X_BOOTSTRAP_SW_OPTION6 BIT(21)
+#define AR934X_BOOTSTRAP_SW_OPTION5 BIT(20)
+#define AR934X_BOOTSTRAP_SW_OPTION4 BIT(19)
+#define AR934X_BOOTSTRAP_SW_OPTION3 BIT(18)
+#define AR934X_BOOTSTRAP_SW_OPTION2 BIT(17)
+#define AR934X_BOOTSTRAP_SW_OPTION1 BIT(16)
+#define AR934X_BOOTSTRAP_USB_MODE_DEVICE BIT(7)
+#define AR934X_BOOTSTRAP_PCIE_RC BIT(6)
+#define AR934X_BOOTSTRAP_EJTAG_MODE BIT(5)
+#define AR934X_BOOTSTRAP_REF_CLK_40 BIT(4)
+#define AR934X_BOOTSTRAP_BOOT_FROM_SPI BIT(2)
+#define AR934X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
+#define AR934X_BOOTSTRAP_DDR1 BIT(0)
+
+#define QCA953X_BOOTSTRAP_SW_OPTION2 BIT(12)
+#define QCA953X_BOOTSTRAP_SW_OPTION1 BIT(11)
+#define QCA953X_BOOTSTRAP_EJTAG_MODE BIT(5)
+#define QCA953X_BOOTSTRAP_REF_CLK_40 BIT(4)
+#define QCA953X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
+#define QCA953X_BOOTSTRAP_DDR1 BIT(0)
+
+#define QCA955X_BOOTSTRAP_REF_CLK_40 BIT(4)
+
+#define QCA956X_BOOTSTRAP_REF_CLK_40 BIT(2)
+
+#define AR934X_PCIE_WMAC_INT_WMAC_MISC BIT(0)
+#define AR934X_PCIE_WMAC_INT_WMAC_TX BIT(1)
+#define AR934X_PCIE_WMAC_INT_WMAC_RXLP BIT(2)
+#define AR934X_PCIE_WMAC_INT_WMAC_RXHP BIT(3)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC BIT(4)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC0 BIT(5)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC1 BIT(6)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC2 BIT(7)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC3 BIT(8)
+#define AR934X_PCIE_WMAC_INT_WMAC_ALL \
+ (AR934X_PCIE_WMAC_INT_WMAC_MISC | AR934X_PCIE_WMAC_INT_WMAC_TX | \
+ AR934X_PCIE_WMAC_INT_WMAC_RXLP | AR934X_PCIE_WMAC_INT_WMAC_RXHP)
+
+#define AR934X_PCIE_WMAC_INT_PCIE_ALL \
+ (AR934X_PCIE_WMAC_INT_PCIE_RC | AR934X_PCIE_WMAC_INT_PCIE_RC0 | \
+ AR934X_PCIE_WMAC_INT_PCIE_RC1 | AR934X_PCIE_WMAC_INT_PCIE_RC2 | \
+ AR934X_PCIE_WMAC_INT_PCIE_RC3)
+
+#define QCA953X_PCIE_WMAC_INT_WMAC_MISC BIT(0)
+#define QCA953X_PCIE_WMAC_INT_WMAC_TX BIT(1)
+#define QCA953X_PCIE_WMAC_INT_WMAC_RXLP BIT(2)
+#define QCA953X_PCIE_WMAC_INT_WMAC_RXHP BIT(3)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC BIT(4)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC0 BIT(5)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC1 BIT(6)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC2 BIT(7)
+#define QCA953X_PCIE_WMAC_INT_PCIE_RC3 BIT(8)
+#define QCA953X_PCIE_WMAC_INT_WMAC_ALL \
+ (QCA953X_PCIE_WMAC_INT_WMAC_MISC | QCA953X_PCIE_WMAC_INT_WMAC_TX | \
+ QCA953X_PCIE_WMAC_INT_WMAC_RXLP | QCA953X_PCIE_WMAC_INT_WMAC_RXHP)
+
+#define QCA953X_PCIE_WMAC_INT_PCIE_ALL \
+ (QCA953X_PCIE_WMAC_INT_PCIE_RC | QCA953X_PCIE_WMAC_INT_PCIE_RC0 | \
+ QCA953X_PCIE_WMAC_INT_PCIE_RC1 | QCA953X_PCIE_WMAC_INT_PCIE_RC2 | \
+ QCA953X_PCIE_WMAC_INT_PCIE_RC3)
+
+#define QCA955X_EXT_INT_WMAC_MISC BIT(0)
+#define QCA955X_EXT_INT_WMAC_TX BIT(1)
+#define QCA955X_EXT_INT_WMAC_RXLP BIT(2)
+#define QCA955X_EXT_INT_WMAC_RXHP BIT(3)
+#define QCA955X_EXT_INT_PCIE_RC1 BIT(4)
+#define QCA955X_EXT_INT_PCIE_RC1_INT0 BIT(5)
+#define QCA955X_EXT_INT_PCIE_RC1_INT1 BIT(6)
+#define QCA955X_EXT_INT_PCIE_RC1_INT2 BIT(7)
+#define QCA955X_EXT_INT_PCIE_RC1_INT3 BIT(8)
+#define QCA955X_EXT_INT_PCIE_RC2 BIT(12)
+#define QCA955X_EXT_INT_PCIE_RC2_INT0 BIT(13)
+#define QCA955X_EXT_INT_PCIE_RC2_INT1 BIT(14)
+#define QCA955X_EXT_INT_PCIE_RC2_INT2 BIT(15)
+#define QCA955X_EXT_INT_PCIE_RC2_INT3 BIT(16)
+#define QCA955X_EXT_INT_USB1 BIT(24)
+#define QCA955X_EXT_INT_USB2 BIT(28)
+
+#define QCA955X_EXT_INT_WMAC_ALL \
+ (QCA955X_EXT_INT_WMAC_MISC | QCA955X_EXT_INT_WMAC_TX | \
+ QCA955X_EXT_INT_WMAC_RXLP | QCA955X_EXT_INT_WMAC_RXHP)
+
+#define QCA955X_EXT_INT_PCIE_RC1_ALL \
+ (QCA955X_EXT_INT_PCIE_RC1 | QCA955X_EXT_INT_PCIE_RC1_INT0 | \
+ QCA955X_EXT_INT_PCIE_RC1_INT1 | QCA955X_EXT_INT_PCIE_RC1_INT2 | \
+ QCA955X_EXT_INT_PCIE_RC1_INT3)
+
+#define QCA955X_EXT_INT_PCIE_RC2_ALL \
+ (QCA955X_EXT_INT_PCIE_RC2 | QCA955X_EXT_INT_PCIE_RC2_INT0 | \
+ QCA955X_EXT_INT_PCIE_RC2_INT1 | QCA955X_EXT_INT_PCIE_RC2_INT2 | \
+ QCA955X_EXT_INT_PCIE_RC2_INT3)
+
+#define QCA956X_EXT_INT_WMAC_MISC BIT(0)
+#define QCA956X_EXT_INT_WMAC_TX BIT(1)
+#define QCA956X_EXT_INT_WMAC_RXLP BIT(2)
+#define QCA956X_EXT_INT_WMAC_RXHP BIT(3)
+#define QCA956X_EXT_INT_PCIE_RC1 BIT(4)
+#define QCA956X_EXT_INT_PCIE_RC1_INT0 BIT(5)
+#define QCA956X_EXT_INT_PCIE_RC1_INT1 BIT(6)
+#define QCA956X_EXT_INT_PCIE_RC1_INT2 BIT(7)
+#define QCA956X_EXT_INT_PCIE_RC1_INT3 BIT(8)
+#define QCA956X_EXT_INT_PCIE_RC2 BIT(12)
+#define QCA956X_EXT_INT_PCIE_RC2_INT0 BIT(13)
+#define QCA956X_EXT_INT_PCIE_RC2_INT1 BIT(14)
+#define QCA956X_EXT_INT_PCIE_RC2_INT2 BIT(15)
+#define QCA956X_EXT_INT_PCIE_RC2_INT3 BIT(16)
+#define QCA956X_EXT_INT_USB1 BIT(24)
+#define QCA956X_EXT_INT_USB2 BIT(28)
+
+#define QCA956X_EXT_INT_WMAC_ALL \
+ (QCA956X_EXT_INT_WMAC_MISC | QCA956X_EXT_INT_WMAC_TX | \
+ QCA956X_EXT_INT_WMAC_RXLP | QCA956X_EXT_INT_WMAC_RXHP)
+
+#define QCA956X_EXT_INT_PCIE_RC1_ALL \
+ (QCA956X_EXT_INT_PCIE_RC1 | QCA956X_EXT_INT_PCIE_RC1_INT0 | \
+ QCA956X_EXT_INT_PCIE_RC1_INT1 | QCA956X_EXT_INT_PCIE_RC1_INT2 | \
+ QCA956X_EXT_INT_PCIE_RC1_INT3)
+
+#define QCA956X_EXT_INT_PCIE_RC2_ALL \
+ (QCA956X_EXT_INT_PCIE_RC2 | QCA956X_EXT_INT_PCIE_RC2_INT0 | \
+ QCA956X_EXT_INT_PCIE_RC2_INT1 | QCA956X_EXT_INT_PCIE_RC2_INT2 | \
+ QCA956X_EXT_INT_PCIE_RC2_INT3)
+
+#define REV_ID_MAJOR_MASK 0xfff0
+#define REV_ID_MAJOR_AR71XX 0x00a0
+#define REV_ID_MAJOR_AR913X 0x00b0
+#define REV_ID_MAJOR_AR7240 0x00c0
+#define REV_ID_MAJOR_AR7241 0x0100
+#define REV_ID_MAJOR_AR7242 0x1100
+#define REV_ID_MAJOR_AR9330 0x0110
+#define REV_ID_MAJOR_AR9331 0x1110
+#define REV_ID_MAJOR_AR9341 0x0120
+#define REV_ID_MAJOR_AR9342 0x1120
+#define REV_ID_MAJOR_AR9344 0x2120
+#define REV_ID_MAJOR_QCA9533 0x0140
+#define REV_ID_MAJOR_QCA9533_V2 0x0160
+#define REV_ID_MAJOR_QCA9556 0x0130
+#define REV_ID_MAJOR_QCA9558 0x1130
+#define REV_ID_MAJOR_TP9343 0x0150
+#define REV_ID_MAJOR_QCA956X 0x1150
+
+#define AR71XX_REV_ID_MINOR_MASK 0x3
+#define AR71XX_REV_ID_MINOR_AR7130 0x0
+#define AR71XX_REV_ID_MINOR_AR7141 0x1
+#define AR71XX_REV_ID_MINOR_AR7161 0x2
+#define AR71XX_REV_ID_REVISION_MASK 0x3
+#define AR71XX_REV_ID_REVISION_SHIFT 2
+
+#define AR913X_REV_ID_MINOR_MASK 0x3
+#define AR913X_REV_ID_MINOR_AR9130 0x0
+#define AR913X_REV_ID_MINOR_AR9132 0x1
+#define AR913X_REV_ID_REVISION_MASK 0x3
+#define AR913X_REV_ID_REVISION_SHIFT 2
+
+#define AR933X_REV_ID_REVISION_MASK 0x3
+
+#define AR724X_REV_ID_REVISION_MASK 0x3
+
+#define AR934X_REV_ID_REVISION_MASK 0xf
+
+#define QCA953X_REV_ID_REVISION_MASK 0xf
+
+#define QCA955X_REV_ID_REVISION_MASK 0xf
+
+#define QCA956X_REV_ID_REVISION_MASK 0xf
+
+/*
+ * SPI block
+ */
+#define AR71XX_SPI_REG_FS 0x00 /* Function Select */
+#define AR71XX_SPI_REG_CTRL 0x04 /* SPI Control */
+#define AR71XX_SPI_REG_IOC 0x08 /* SPI I/O Control */
+#define AR71XX_SPI_REG_RDS 0x0c /* Read Data Shift */
+
+#define AR71XX_SPI_FS_GPIO BIT(0) /* Enable GPIO mode */
+
+#define AR71XX_SPI_CTRL_RD BIT(6) /* Remap Disable */
+#define AR71XX_SPI_CTRL_DIV_MASK 0x3f
+
+#define AR71XX_SPI_IOC_DO BIT(0) /* Data Out pin */
+#define AR71XX_SPI_IOC_CLK BIT(8) /* CLK pin */
+#define AR71XX_SPI_IOC_CS(n) BIT(16 + (n))
+#define AR71XX_SPI_IOC_CS0 AR71XX_SPI_IOC_CS(0)
+#define AR71XX_SPI_IOC_CS1 AR71XX_SPI_IOC_CS(1)
+#define AR71XX_SPI_IOC_CS2 AR71XX_SPI_IOC_CS(2)
+#define AR71XX_SPI_IOC_CS_ALL (AR71XX_SPI_IOC_CS0 | AR71XX_SPI_IOC_CS1 | \
+ AR71XX_SPI_IOC_CS2)
+
+/*
+ * GPIO block
+ */
+#define AR71XX_GPIO_REG_OE 0x00
+#define AR71XX_GPIO_REG_IN 0x04
+#define AR71XX_GPIO_REG_OUT 0x08
+#define AR71XX_GPIO_REG_SET 0x0c
+#define AR71XX_GPIO_REG_CLEAR 0x10
+#define AR71XX_GPIO_REG_INT_MODE 0x14
+#define AR71XX_GPIO_REG_INT_TYPE 0x18
+#define AR71XX_GPIO_REG_INT_POLARITY 0x1c
+#define AR71XX_GPIO_REG_INT_PENDING 0x20
+#define AR71XX_GPIO_REG_INT_ENABLE 0x24
+#define AR71XX_GPIO_REG_FUNC 0x28
+
+#define AR934X_GPIO_REG_OUT_FUNC0 0x2c
+#define AR934X_GPIO_REG_OUT_FUNC1 0x30
+#define AR934X_GPIO_REG_OUT_FUNC2 0x34
+#define AR934X_GPIO_REG_OUT_FUNC3 0x38
+#define AR934X_GPIO_REG_OUT_FUNC4 0x3c
+#define AR934X_GPIO_REG_OUT_FUNC5 0x40
+#define AR934X_GPIO_REG_FUNC 0x6c
+
+#define QCA953X_GPIO_REG_OUT_FUNC0 0x2c
+#define QCA953X_GPIO_REG_OUT_FUNC1 0x30
+#define QCA953X_GPIO_REG_OUT_FUNC2 0x34
+#define QCA953X_GPIO_REG_OUT_FUNC3 0x38
+#define QCA953X_GPIO_REG_OUT_FUNC4 0x3c
+#define QCA953X_GPIO_REG_IN_ENABLE0 0x44
+#define QCA953X_GPIO_REG_FUNC 0x6c
+
+#define QCA953X_GPIO_OUT_MUX_SPI_CS1 10
+#define QCA953X_GPIO_OUT_MUX_SPI_CS2 11
+#define QCA953X_GPIO_OUT_MUX_SPI_CS0 9
+#define QCA953X_GPIO_OUT_MUX_SPI_CLK 8
+#define QCA953X_GPIO_OUT_MUX_SPI_MOSI 12
+#define QCA953X_GPIO_OUT_MUX_LED_LINK1 41
+#define QCA953X_GPIO_OUT_MUX_LED_LINK2 42
+#define QCA953X_GPIO_OUT_MUX_LED_LINK3 43
+#define QCA953X_GPIO_OUT_MUX_LED_LINK4 44
+#define QCA953X_GPIO_OUT_MUX_LED_LINK5 45
+
+#define QCA955X_GPIO_REG_OUT_FUNC0 0x2c
+#define QCA955X_GPIO_REG_OUT_FUNC1 0x30
+#define QCA955X_GPIO_REG_OUT_FUNC2 0x34
+#define QCA955X_GPIO_REG_OUT_FUNC3 0x38
+#define QCA955X_GPIO_REG_OUT_FUNC4 0x3c
+#define QCA955X_GPIO_REG_OUT_FUNC5 0x40
+#define QCA955X_GPIO_REG_FUNC 0x6c
+
+#define QCA956X_GPIO_REG_OUT_FUNC0 0x2c
+#define QCA956X_GPIO_REG_OUT_FUNC1 0x30
+#define QCA956X_GPIO_REG_OUT_FUNC2 0x34
+#define QCA956X_GPIO_REG_OUT_FUNC3 0x38
+#define QCA956X_GPIO_REG_OUT_FUNC4 0x3c
+#define QCA956X_GPIO_REG_OUT_FUNC5 0x40
+#define QCA956X_GPIO_REG_IN_ENABLE0 0x44
+#define QCA956X_GPIO_REG_IN_ENABLE3 0x50
+#define QCA956X_GPIO_REG_FUNC 0x6c
+
+#define QCA956X_GPIO_OUT_MUX_GE0_MDO 32
+#define QCA956X_GPIO_OUT_MUX_GE0_MDC 33
+
+#define AR71XX_GPIO_COUNT 16
+#define AR7240_GPIO_COUNT 18
+#define AR7241_GPIO_COUNT 20
+#define AR913X_GPIO_COUNT 22
+#define AR933X_GPIO_COUNT 30
+#define AR934X_GPIO_COUNT 23
+#define QCA953X_GPIO_COUNT 18
+#define QCA955X_GPIO_COUNT 24
+#define QCA956X_GPIO_COUNT 23
+
+/*
+ * SRIF block
+ */
+#define AR934X_SRIF_CPU_DPLL1_REG 0x1c0
+#define AR934X_SRIF_CPU_DPLL2_REG 0x1c4
+#define AR934X_SRIF_CPU_DPLL3_REG 0x1c8
+
+#define AR934X_SRIF_DDR_DPLL1_REG 0x240
+#define AR934X_SRIF_DDR_DPLL2_REG 0x244
+#define AR934X_SRIF_DDR_DPLL3_REG 0x248
+
+#define AR934X_SRIF_DPLL1_REFDIV_SHIFT 27
+#define AR934X_SRIF_DPLL1_REFDIV_MASK 0x1f
+#define AR934X_SRIF_DPLL1_NINT_SHIFT 18
+#define AR934X_SRIF_DPLL1_NINT_MASK 0x1ff
+#define AR934X_SRIF_DPLL1_NFRAC_MASK 0x0003ffff
+
+#define AR934X_SRIF_DPLL2_LOCAL_PLL BIT(30)
+#define AR934X_SRIF_DPLL2_OUTDIV_SHIFT 13
+#define AR934X_SRIF_DPLL2_OUTDIV_MASK 0x7
+
+#define QCA953X_SRIF_CPU_DPLL1_REG 0x1c0
+#define QCA953X_SRIF_CPU_DPLL2_REG 0x1c4
+#define QCA953X_SRIF_CPU_DPLL3_REG 0x1c8
+
+#define QCA953X_SRIF_DDR_DPLL1_REG 0x240
+#define QCA953X_SRIF_DDR_DPLL2_REG 0x244
+#define QCA953X_SRIF_DDR_DPLL3_REG 0x248
+
+#define QCA953X_SRIF_DPLL1_REFDIV_SHIFT 27
+#define QCA953X_SRIF_DPLL1_REFDIV_MASK 0x1f
+#define QCA953X_SRIF_DPLL1_NINT_SHIFT 18
+#define QCA953X_SRIF_DPLL1_NINT_MASK 0x1ff
+#define QCA953X_SRIF_DPLL1_NFRAC_MASK 0x0003ffff
+
+#define QCA953X_SRIF_DPLL2_LOCAL_PLL BIT(30)
+#define QCA953X_SRIF_DPLL2_OUTDIV_SHIFT 13
+#define QCA953X_SRIF_DPLL2_OUTDIV_MASK 0x7
+
+#define AR71XX_GPIO_FUNC_STEREO_EN BIT(17)
+#define AR71XX_GPIO_FUNC_SLIC_EN BIT(16)
+#define AR71XX_GPIO_FUNC_SPI_CS2_EN BIT(13)
+#define AR71XX_GPIO_FUNC_SPI_CS1_EN BIT(12)
+#define AR71XX_GPIO_FUNC_UART_EN BIT(8)
+#define AR71XX_GPIO_FUNC_USB_OC_EN BIT(4)
+#define AR71XX_GPIO_FUNC_USB_CLK_EN BIT(0)
+
+#define AR724X_GPIO_FUNC_GE0_MII_CLK_EN BIT(19)
+#define AR724X_GPIO_FUNC_SPI_EN BIT(18)
+#define AR724X_GPIO_FUNC_SPI_CS_EN2 BIT(14)
+#define AR724X_GPIO_FUNC_SPI_CS_EN1 BIT(13)
+#define AR724X_GPIO_FUNC_CLK_OBS5_EN BIT(12)
+#define AR724X_GPIO_FUNC_CLK_OBS4_EN BIT(11)
+#define AR724X_GPIO_FUNC_CLK_OBS3_EN BIT(10)
+#define AR724X_GPIO_FUNC_CLK_OBS2_EN BIT(9)
+#define AR724X_GPIO_FUNC_CLK_OBS1_EN BIT(8)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED4_EN BIT(7)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED3_EN BIT(6)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED2_EN BIT(5)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED1_EN BIT(4)
+#define AR724X_GPIO_FUNC_ETH_SWITCH_LED0_EN BIT(3)
+#define AR724X_GPIO_FUNC_UART_RTS_CTS_EN BIT(2)
+#define AR724X_GPIO_FUNC_UART_EN BIT(1)
+#define AR724X_GPIO_FUNC_JTAG_DISABLE BIT(0)
+
+#define AR913X_GPIO_FUNC_WMAC_LED_EN BIT(22)
+#define AR913X_GPIO_FUNC_EXP_PORT_CS_EN BIT(21)
+#define AR913X_GPIO_FUNC_I2S_REFCLKEN BIT(20)
+#define AR913X_GPIO_FUNC_I2S_MCKEN BIT(19)
+#define AR913X_GPIO_FUNC_I2S1_EN BIT(18)
+#define AR913X_GPIO_FUNC_I2S0_EN BIT(17)
+#define AR913X_GPIO_FUNC_SLIC_EN BIT(16)
+#define AR913X_GPIO_FUNC_UART_RTSCTS_EN BIT(9)
+#define AR913X_GPIO_FUNC_UART_EN BIT(8)
+#define AR913X_GPIO_FUNC_USB_CLK_EN BIT(4)
+
+#define AR933X_GPIO_FUNC_SPDIF2TCK BIT(31)
+#define AR933X_GPIO_FUNC_SPDIF_EN BIT(30)
+#define AR933X_GPIO_FUNC_I2SO_22_18_EN BIT(29)
+#define AR933X_GPIO_FUNC_I2S_MCK_EN BIT(27)
+#define AR933X_GPIO_FUNC_I2SO_EN BIT(26)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED_DUPL BIT(25)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED_COLL BIT(24)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED_ACT BIT(23)
+#define AR933X_GPIO_FUNC_SPI_EN BIT(18)
+#define AR933X_GPIO_FUNC_SPI_CS_EN2 BIT(14)
+#define AR933X_GPIO_FUNC_SPI_CS_EN1 BIT(13)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED4_EN BIT(7)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED3_EN BIT(6)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED2_EN BIT(5)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED1_EN BIT(4)
+#define AR933X_GPIO_FUNC_ETH_SWITCH_LED0_EN BIT(3)
+#define AR933X_GPIO_FUNC_UART_RTS_CTS_EN BIT(2)
+#define AR933X_GPIO_FUNC_UART_EN BIT(1)
+#define AR933X_GPIO_FUNC_JTAG_DISABLE BIT(0)
+
+#define AR934X_GPIO_FUNC_CLK_OBS7_EN BIT(9)
+#define AR934X_GPIO_FUNC_CLK_OBS6_EN BIT(8)
+#define AR934X_GPIO_FUNC_CLK_OBS5_EN BIT(7)
+#define AR934X_GPIO_FUNC_CLK_OBS4_EN BIT(6)
+#define AR934X_GPIO_FUNC_CLK_OBS3_EN BIT(5)
+#define AR934X_GPIO_FUNC_CLK_OBS2_EN BIT(4)
+#define AR934X_GPIO_FUNC_CLK_OBS1_EN BIT(3)
+#define AR934X_GPIO_FUNC_CLK_OBS0_EN BIT(2)
+#define AR934X_GPIO_FUNC_JTAG_DISABLE BIT(1)
+
+#define AR934X_GPIO_OUT_GPIO 0
+#define AR934X_GPIO_OUT_SPI_CS1 7
+#define AR934X_GPIO_OUT_LED_LINK0 41
+#define AR934X_GPIO_OUT_LED_LINK1 42
+#define AR934X_GPIO_OUT_LED_LINK2 43
+#define AR934X_GPIO_OUT_LED_LINK3 44
+#define AR934X_GPIO_OUT_LED_LINK4 45
+#define AR934X_GPIO_OUT_EXT_LNA0 46
+#define AR934X_GPIO_OUT_EXT_LNA1 47
+
+#define QCA955X_GPIO_FUNC_CLK_OBS7_EN BIT(9)
+#define QCA955X_GPIO_FUNC_CLK_OBS6_EN BIT(8)
+#define QCA955X_GPIO_FUNC_CLK_OBS5_EN BIT(7)
+#define QCA955X_GPIO_FUNC_CLK_OBS4_EN BIT(6)
+#define QCA955X_GPIO_FUNC_CLK_OBS3_EN BIT(5)
+#define QCA955X_GPIO_FUNC_CLK_OBS2_EN BIT(4)
+#define QCA955X_GPIO_FUNC_CLK_OBS1_EN BIT(3)
+#define QCA955X_GPIO_FUNC_JTAG_DISABLE BIT(1)
+
+#define QCA955X_GPIO_OUT_GPIO 0
+#define QCA955X_MII_EXT_MDI 1
+#define QCA955X_SLIC_DATA_OUT 3
+#define QCA955X_SLIC_PCM_FS 4
+#define QCA955X_SLIC_PCM_CLK 5
+#define QCA955X_SPI_CLK 8
+#define QCA955X_SPI_CS_0 9
+#define QCA955X_SPI_CS_1 10
+#define QCA955X_SPI_CS_2 11
+#define QCA955X_SPI_MISO 12
+#define QCA955X_I2S_CLK 13
+#define QCA955X_I2S_WS 14
+#define QCA955X_I2S_SD 15
+#define QCA955X_I2S_MCK 16
+#define QCA955X_SPDIF_OUT 17
+#define QCA955X_UART1_TD 18
+#define QCA955X_UART1_RTS 19
+#define QCA955X_UART1_RD 20
+#define QCA955X_UART1_CTS 21
+#define QCA955X_UART0_SOUT 22
+#define QCA955X_SPDIF2_OUT 23
+#define QCA955X_LED_SGMII_SPEED0 24
+#define QCA955X_LED_SGMII_SPEED1 25
+#define QCA955X_LED_SGMII_DUPLEX 26
+#define QCA955X_LED_SGMII_LINK_UP 27
+#define QCA955X_SGMII_SPEED0_INVERT 28
+#define QCA955X_SGMII_SPEED1_INVERT 29
+#define QCA955X_SGMII_DUPLEX_INVERT 30
+#define QCA955X_SGMII_LINK_UP_INVERT 31
+#define QCA955X_GE1_MII_MDO 32
+#define QCA955X_GE1_MII_MDC 33
+#define QCA955X_SWCOM2 38
+#define QCA955X_SWCOM3 39
+#define QCA955X_MAC2_GPIO 40
+#define QCA955X_MAC3_GPIO 41
+#define QCA955X_ATT_LED 42
+#define QCA955X_PWR_LED 43
+#define QCA955X_TX_FRAME 44
+#define QCA955X_RX_CLEAR_EXTERNAL 45
+#define QCA955X_LED_NETWORK_EN 46
+#define QCA955X_LED_POWER_EN 47
+#define QCA955X_WMAC_GLUE_WOW 68
+#define QCA955X_RX_CLEAR_EXTENSION 70
+#define QCA955X_CP_NAND_CS1 73
+#define QCA955X_USB_SUSPEND 74
+#define QCA955X_ETH_TX_ERR 75
+#define QCA955X_DDR_DQ_OE 76
+#define QCA955X_CLKREQ_N_EP 77
+#define QCA955X_CLKREQ_N_RC 78
+#define QCA955X_CLK_OBS0 79
+#define QCA955X_CLK_OBS1 80
+#define QCA955X_CLK_OBS2 81
+#define QCA955X_CLK_OBS3 82
+#define QCA955X_CLK_OBS4 83
+#define QCA955X_CLK_OBS5 84
+
+/*
+ * MII_CTRL block
+ */
+#define AR71XX_MII_REG_MII0_CTRL 0x00
+#define AR71XX_MII_REG_MII1_CTRL 0x04
+
+#define AR71XX_MII_CTRL_IF_MASK 3
+#define AR71XX_MII_CTRL_SPEED_SHIFT 4
+#define AR71XX_MII_CTRL_SPEED_MASK 3
+#define AR71XX_MII_CTRL_SPEED_10 0
+#define AR71XX_MII_CTRL_SPEED_100 1
+#define AR71XX_MII_CTRL_SPEED_1000 2
+
+#define AR71XX_MII0_CTRL_IF_GMII 0
+#define AR71XX_MII0_CTRL_IF_MII 1
+#define AR71XX_MII0_CTRL_IF_RGMII 2
+#define AR71XX_MII0_CTRL_IF_RMII 3
+
+#define AR71XX_MII1_CTRL_IF_RGMII 0
+#define AR71XX_MII1_CTRL_IF_RMII 1
+
+/*
+ * AR933X GMAC interface
+ */
+#define AR933X_GMAC_REG_ETH_CFG 0x00
+
+#define AR933X_ETH_CFG_RGMII_GE0 BIT(0)
+#define AR933X_ETH_CFG_MII_GE0 BIT(1)
+#define AR933X_ETH_CFG_GMII_GE0 BIT(2)
+#define AR933X_ETH_CFG_MII_GE0_MASTER BIT(3)
+#define AR933X_ETH_CFG_MII_GE0_SLAVE BIT(4)
+#define AR933X_ETH_CFG_MII_GE0_ERR_EN BIT(5)
+#define AR933X_ETH_CFG_SW_PHY_SWAP BIT(7)
+#define AR933X_ETH_CFG_SW_PHY_ADDR_SWAP BIT(8)
+#define AR933X_ETH_CFG_RMII_GE0 BIT(9)
+#define AR933X_ETH_CFG_RMII_GE0_SPD_10 0
+#define AR933X_ETH_CFG_RMII_GE0_SPD_100 BIT(10)
+
+/*
+ * AR934X GMAC Interface
+ */
+#define AR934X_GMAC_REG_ETH_CFG 0x00
+
+#define AR934X_ETH_CFG_RGMII_GMAC0 BIT(0)
+#define AR934X_ETH_CFG_MII_GMAC0 BIT(1)
+#define AR934X_ETH_CFG_GMII_GMAC0 BIT(2)
+#define AR934X_ETH_CFG_MII_GMAC0_MASTER BIT(3)
+#define AR934X_ETH_CFG_MII_GMAC0_SLAVE BIT(4)
+#define AR934X_ETH_CFG_MII_GMAC0_ERR_EN BIT(5)
+#define AR934X_ETH_CFG_SW_ONLY_MODE BIT(6)
+#define AR934X_ETH_CFG_SW_PHY_SWAP BIT(7)
+#define AR934X_ETH_CFG_SW_APB_ACCESS BIT(9)
+#define AR934X_ETH_CFG_RMII_GMAC0 BIT(10)
+#define AR933X_ETH_CFG_MII_CNTL_SPEED BIT(11)
+#define AR934X_ETH_CFG_RMII_GMAC0_MASTER BIT(12)
+#define AR933X_ETH_CFG_SW_ACC_MSB_FIRST BIT(13)
+#define AR934X_ETH_CFG_RXD_DELAY BIT(14)
+#define AR934X_ETH_CFG_RXD_DELAY_MASK 0x3
+#define AR934X_ETH_CFG_RXD_DELAY_SHIFT 14
+#define AR934X_ETH_CFG_RDV_DELAY BIT(16)
+#define AR934X_ETH_CFG_RDV_DELAY_MASK 0x3
+#define AR934X_ETH_CFG_RDV_DELAY_SHIFT 16
+
+/*
+ * QCA953X GMAC Interface
+ */
+#define QCA953X_GMAC_REG_ETH_CFG 0x00
+
+#define QCA953X_ETH_CFG_SW_ONLY_MODE BIT(6)
+#define QCA953X_ETH_CFG_SW_PHY_SWAP BIT(7)
+#define QCA953X_ETH_CFG_SW_APB_ACCESS BIT(9)
+#define QCA953X_ETH_CFG_SW_ACC_MSB_FIRST BIT(13)
+
+/*
+ * QCA955X GMAC Interface
+ */
+
+#define QCA955X_GMAC_REG_ETH_CFG 0x00
+#define QCA955X_GMAC_REG_SGMII_SERDES 0x18
+
+#define QCA955X_ETH_CFG_RGMII_EN BIT(0)
+#define QCA955X_ETH_CFG_MII_GE0 BIT(1)
+#define QCA955X_ETH_CFG_GMII_GE0 BIT(2)
+#define QCA955X_ETH_CFG_MII_GE0_MASTER BIT(3)
+#define QCA955X_ETH_CFG_MII_GE0_SLAVE BIT(4)
+#define QCA955X_ETH_CFG_GE0_ERR_EN BIT(5)
+#define QCA955X_ETH_CFG_GE0_SGMII BIT(6)
+#define QCA955X_ETH_CFG_RMII_GE0 BIT(10)
+#define QCA955X_ETH_CFG_MII_CNTL_SPEED BIT(11)
+#define QCA955X_ETH_CFG_RMII_GE0_MASTER BIT(12)
+#define QCA955X_ETH_CFG_RXD_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_RXD_DELAY_SHIFT 14
+#define QCA955X_ETH_CFG_RDV_DELAY BIT(16)
+#define QCA955X_ETH_CFG_RDV_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_RDV_DELAY_SHIFT 16
+#define QCA955X_ETH_CFG_TXD_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_TXD_DELAY_SHIFT 18
+#define QCA955X_ETH_CFG_TXE_DELAY_MASK 0x3
+#define QCA955X_ETH_CFG_TXE_DELAY_SHIFT 20
+
+#define QCA955X_SGMII_SERDES_LOCK_DETECT_STATUS BIT(15)
+#define QCA955X_SGMII_SERDES_RES_CALIBRATION_SHIFT 23
+#define QCA955X_SGMII_SERDES_RES_CALIBRATION_MASK 0xf
+/*
+ * QCA956X GMAC Interface
+ */
+
+#define QCA956X_GMAC_REG_ETH_CFG 0x00
+#define QCA956X_GMAC_REG_SGMII_RESET 0x14
+#define QCA956X_GMAC_REG_SGMII_SERDES 0x18
+#define QCA956X_GMAC_REG_MR_AN_CONTROL 0x1c
+#define QCA956X_GMAC_REG_SGMII_CONFIG 0x34
+#define QCA956X_GMAC_REG_SGMII_DEBUG 0x58
+
+#define QCA956X_ETH_CFG_RGMII_EN BIT(0)
+#define QCA956X_ETH_CFG_GE0_SGMII BIT(6)
+#define QCA956X_ETH_CFG_SW_ONLY_MODE BIT(7)
+#define QCA956X_ETH_CFG_SW_PHY_SWAP BIT(8)
+#define QCA956X_ETH_CFG_SW_PHY_ADDR_SWAP BIT(9)
+#define QCA956X_ETH_CFG_SW_APB_ACCESS BIT(10)
+#define QCA956X_ETH_CFG_SW_ACC_MSB_FIRST BIT(13)
+#define QCA956X_ETH_CFG_RXD_DELAY_MASK 0x3
+#define QCA956X_ETH_CFG_RXD_DELAY_SHIFT 14
+#define QCA956X_ETH_CFG_RDV_DELAY_MASK 0x3
+#define QCA956X_ETH_CFG_RDV_DELAY_SHIFT 16
+
+#define QCA956X_SGMII_RESET_RX_CLK_N_RESET 0x0
+#define QCA956X_SGMII_RESET_RX_CLK_N BIT(0)
+#define QCA956X_SGMII_RESET_TX_CLK_N BIT(1)
+#define QCA956X_SGMII_RESET_RX_125M_N BIT(2)
+#define QCA956X_SGMII_RESET_TX_125M_N BIT(3)
+#define QCA956X_SGMII_RESET_HW_RX_125M_N BIT(4)
+
+#define QCA956X_SGMII_SERDES_CDR_BW_MASK 0x3
+#define QCA956X_SGMII_SERDES_CDR_BW_SHIFT 1
+#define QCA956X_SGMII_SERDES_TX_DR_CTRL_MASK 0x7
+#define QCA956X_SGMII_SERDES_TX_DR_CTRL_SHIFT 4
+#define QCA956X_SGMII_SERDES_PLL_BW BIT(8)
+#define QCA956X_SGMII_SERDES_VCO_FAST BIT(9)
+#define QCA956X_SGMII_SERDES_VCO_SLOW BIT(10)
+#define QCA956X_SGMII_SERDES_LOCK_DETECT_STATUS BIT(15)
+#define QCA956X_SGMII_SERDES_EN_SIGNAL_DETECT BIT(16)
+#define QCA956X_SGMII_SERDES_FIBER_SDO BIT(17)
+#define QCA956X_SGMII_SERDES_RES_CALIBRATION_SHIFT 23
+#define QCA956X_SGMII_SERDES_RES_CALIBRATION_MASK 0xf
+#define QCA956X_SGMII_SERDES_VCO_REG_SHIFT 27
+#define QCA956X_SGMII_SERDES_VCO_REG_MASK 0xf
+
+#define QCA956X_MR_AN_CONTROL_AN_ENABLE BIT(12)
+#define QCA956X_MR_AN_CONTROL_PHY_RESET BIT(15)
+
+#define QCA956X_SGMII_CONFIG_MODE_CTRL_SHIFT 0
+#define QCA956X_SGMII_CONFIG_MODE_CTRL_MASK 0x7
+
+#endif /* __ASM_MACH_AR71XX_REGS_H */
diff --git a/arch/mips/include/asm/mach-ath79/ar933x_uart.h b/arch/mips/include/asm/mach-ath79/ar933x_uart.h
new file mode 100644
index 000000000..cacf3545e
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/ar933x_uart.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Atheros AR933X UART defines
+ *
+ * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#ifndef __AR933X_UART_H
+#define __AR933X_UART_H
+
+#define AR933X_UART_REGS_SIZE 20
+#define AR933X_UART_FIFO_SIZE 16
+
+#define AR933X_UART_DATA_REG 0x00
+#define AR933X_UART_CS_REG 0x04
+#define AR933X_UART_CLOCK_REG 0x08
+#define AR933X_UART_INT_REG 0x0c
+#define AR933X_UART_INT_EN_REG 0x10
+
+#define AR933X_UART_DATA_TX_RX_MASK 0xff
+#define AR933X_UART_DATA_RX_CSR BIT(8)
+#define AR933X_UART_DATA_TX_CSR BIT(9)
+
+#define AR933X_UART_CS_PARITY_S 0
+#define AR933X_UART_CS_PARITY_M 0x3
+#define AR933X_UART_CS_PARITY_NONE 0
+#define AR933X_UART_CS_PARITY_ODD 2
+#define AR933X_UART_CS_PARITY_EVEN 3
+#define AR933X_UART_CS_IF_MODE_S 2
+#define AR933X_UART_CS_IF_MODE_M 0x3
+#define AR933X_UART_CS_IF_MODE_NONE 0
+#define AR933X_UART_CS_IF_MODE_DTE 1
+#define AR933X_UART_CS_IF_MODE_DCE 2
+#define AR933X_UART_CS_FLOW_CTRL_S 4
+#define AR933X_UART_CS_FLOW_CTRL_M 0x3
+#define AR933X_UART_CS_DMA_EN BIT(6)
+#define AR933X_UART_CS_TX_READY_ORIDE BIT(7)
+#define AR933X_UART_CS_RX_READY_ORIDE BIT(8)
+#define AR933X_UART_CS_TX_READY BIT(9)
+#define AR933X_UART_CS_RX_BREAK BIT(10)
+#define AR933X_UART_CS_TX_BREAK BIT(11)
+#define AR933X_UART_CS_HOST_INT BIT(12)
+#define AR933X_UART_CS_HOST_INT_EN BIT(13)
+#define AR933X_UART_CS_TX_BUSY BIT(14)
+#define AR933X_UART_CS_RX_BUSY BIT(15)
+
+#define AR933X_UART_CLOCK_STEP_M 0xffff
+#define AR933X_UART_CLOCK_SCALE_M 0xfff
+#define AR933X_UART_CLOCK_SCALE_S 16
+#define AR933X_UART_CLOCK_STEP_M 0xffff
+
+#define AR933X_UART_INT_RX_VALID BIT(0)
+#define AR933X_UART_INT_TX_READY BIT(1)
+#define AR933X_UART_INT_RX_FRAMING_ERR BIT(2)
+#define AR933X_UART_INT_RX_OFLOW_ERR BIT(3)
+#define AR933X_UART_INT_TX_OFLOW_ERR BIT(4)
+#define AR933X_UART_INT_RX_PARITY_ERR BIT(5)
+#define AR933X_UART_INT_RX_BREAK_ON BIT(6)
+#define AR933X_UART_INT_RX_BREAK_OFF BIT(7)
+#define AR933X_UART_INT_RX_FULL BIT(8)
+#define AR933X_UART_INT_TX_EMPTY BIT(9)
+#define AR933X_UART_INT_ALLINTS 0x3ff
+
+#endif /* __AR933X_UART_H */
diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h
new file mode 100644
index 000000000..70cda7449
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/ath79.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Atheros AR71XX/AR724X/AR913X common definitions
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15 BSP
+ */
+
+#ifndef __ASM_MACH_ATH79_H
+#define __ASM_MACH_ATH79_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+enum ath79_soc_type {
+ ATH79_SOC_UNKNOWN,
+ ATH79_SOC_AR7130,
+ ATH79_SOC_AR7141,
+ ATH79_SOC_AR7161,
+ ATH79_SOC_AR7240,
+ ATH79_SOC_AR7241,
+ ATH79_SOC_AR7242,
+ ATH79_SOC_AR9130,
+ ATH79_SOC_AR9132,
+ ATH79_SOC_AR9330,
+ ATH79_SOC_AR9331,
+ ATH79_SOC_AR9341,
+ ATH79_SOC_AR9342,
+ ATH79_SOC_AR9344,
+ ATH79_SOC_QCA9533,
+ ATH79_SOC_QCA9556,
+ ATH79_SOC_QCA9558,
+ ATH79_SOC_TP9343,
+ ATH79_SOC_QCA956X,
+};
+
+extern enum ath79_soc_type ath79_soc;
+extern unsigned int ath79_soc_rev;
+
+static inline int soc_is_ar71xx(void)
+{
+ return (ath79_soc == ATH79_SOC_AR7130 ||
+ ath79_soc == ATH79_SOC_AR7141 ||
+ ath79_soc == ATH79_SOC_AR7161);
+}
+
+static inline int soc_is_ar724x(void)
+{
+ return (ath79_soc == ATH79_SOC_AR7240 ||
+ ath79_soc == ATH79_SOC_AR7241 ||
+ ath79_soc == ATH79_SOC_AR7242);
+}
+
+static inline int soc_is_ar7240(void)
+{
+ return (ath79_soc == ATH79_SOC_AR7240);
+}
+
+static inline int soc_is_ar7241(void)
+{
+ return (ath79_soc == ATH79_SOC_AR7241);
+}
+
+static inline int soc_is_ar7242(void)
+{
+ return (ath79_soc == ATH79_SOC_AR7242);
+}
+
+static inline int soc_is_ar913x(void)
+{
+ return (ath79_soc == ATH79_SOC_AR9130 ||
+ ath79_soc == ATH79_SOC_AR9132);
+}
+
+static inline int soc_is_ar933x(void)
+{
+ return (ath79_soc == ATH79_SOC_AR9330 ||
+ ath79_soc == ATH79_SOC_AR9331);
+}
+
+static inline int soc_is_ar9341(void)
+{
+ return (ath79_soc == ATH79_SOC_AR9341);
+}
+
+static inline int soc_is_ar9342(void)
+{
+ return (ath79_soc == ATH79_SOC_AR9342);
+}
+
+static inline int soc_is_ar9344(void)
+{
+ return (ath79_soc == ATH79_SOC_AR9344);
+}
+
+static inline int soc_is_ar934x(void)
+{
+ return soc_is_ar9341() || soc_is_ar9342() || soc_is_ar9344();
+}
+
+static inline int soc_is_qca9533(void)
+{
+ return ath79_soc == ATH79_SOC_QCA9533;
+}
+
+static inline int soc_is_qca953x(void)
+{
+ return soc_is_qca9533();
+}
+
+static inline int soc_is_qca9556(void)
+{
+ return ath79_soc == ATH79_SOC_QCA9556;
+}
+
+static inline int soc_is_qca9558(void)
+{
+ return ath79_soc == ATH79_SOC_QCA9558;
+}
+
+static inline int soc_is_qca955x(void)
+{
+ return soc_is_qca9556() || soc_is_qca9558();
+}
+
+static inline int soc_is_tp9343(void)
+{
+ return ath79_soc == ATH79_SOC_TP9343;
+}
+
+static inline int soc_is_qca9561(void)
+{
+ return ath79_soc == ATH79_SOC_QCA956X;
+}
+
+static inline int soc_is_qca9563(void)
+{
+ return ath79_soc == ATH79_SOC_QCA956X;
+}
+
+static inline int soc_is_qca956x(void)
+{
+ return soc_is_qca9561() || soc_is_qca9563();
+}
+
+void ath79_ddr_wb_flush(unsigned int reg);
+void ath79_ddr_set_pci_windows(void);
+
+extern void __iomem *ath79_pll_base;
+extern void __iomem *ath79_reset_base;
+
+static inline void ath79_pll_wr(unsigned reg, u32 val)
+{
+ __raw_writel(val, ath79_pll_base + reg);
+}
+
+static inline u32 ath79_pll_rr(unsigned reg)
+{
+ return __raw_readl(ath79_pll_base + reg);
+}
+
+static inline void ath79_reset_wr(unsigned reg, u32 val)
+{
+ __raw_writel(val, ath79_reset_base + reg);
+ (void) __raw_readl(ath79_reset_base + reg); /* flush */
+}
+
+static inline u32 ath79_reset_rr(unsigned reg)
+{
+ return __raw_readl(ath79_reset_base + reg);
+}
+
+void ath79_device_reset_set(u32 mask);
+void ath79_device_reset_clear(u32 mask);
+
+#endif /* __ASM_MACH_ATH79_H */
diff --git a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
new file mode 100644
index 000000000..79ab3ad9f
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Atheros AR71XX/AR724X/AR913X specific CPU feature overrides
+ *
+ * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ */
+#ifndef __ASM_MACH_ATH79_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_ATH79_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_sb1_cache 0
+#define cpu_has_fpu 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_divec 1
+
+#define cpu_has_prefetch 1
+#define cpu_has_ejtag 1
+#define cpu_has_llsc 1
+
+#define cpu_has_mips16 1
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+#define cpu_has_rixi 0
+
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 1
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs 0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+#define cpu_has_vtag_icache 0
+#define cpu_has_dc_aliases 1
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_pindexed_dcache 0
+
+#endif /* __ASM_MACH_ATH79_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ath79/irq.h b/arch/mips/include/asm/mach-ath79/irq.h
new file mode 100644
index 000000000..882534be0
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/irq.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ */
+#ifndef __ASM_MACH_ATH79_IRQ_H
+#define __ASM_MACH_ATH79_IRQ_H
+
+#define MIPS_CPU_IRQ_BASE 0
+#define NR_IRQS 51
+
+#define ATH79_CPU_IRQ(_x) (MIPS_CPU_IRQ_BASE + (_x))
+
+#define ATH79_MISC_IRQ_BASE 8
+#define ATH79_MISC_IRQ_COUNT 32
+#define ATH79_MISC_IRQ(_x) (ATH79_MISC_IRQ_BASE + (_x))
+
+#define ATH79_PCI_IRQ_BASE (ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT)
+#define ATH79_PCI_IRQ_COUNT 6
+#define ATH79_PCI_IRQ(_x) (ATH79_PCI_IRQ_BASE + (_x))
+
+#define ATH79_IP2_IRQ_BASE (ATH79_PCI_IRQ_BASE + ATH79_PCI_IRQ_COUNT)
+#define ATH79_IP2_IRQ_COUNT 2
+#define ATH79_IP2_IRQ(_x) (ATH79_IP2_IRQ_BASE + (_x))
+
+#define ATH79_IP3_IRQ_BASE (ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT)
+#define ATH79_IP3_IRQ_COUNT 3
+#define ATH79_IP3_IRQ(_x) (ATH79_IP3_IRQ_BASE + (_x))
+
+#include <asm/mach-generic/irq.h>
+
+#endif /* __ASM_MACH_ATH79_IRQ_H */
diff --git a/arch/mips/include/asm/mach-ath79/kernel-entry-init.h b/arch/mips/include/asm/mach-ath79/kernel-entry-init.h
new file mode 100644
index 000000000..88db67bf4
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/kernel-entry-init.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Atheros AR71XX/AR724X/AR913X specific kernel entry setup
+ *
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ */
+#ifndef __ASM_MACH_ATH79_KERNEL_ENTRY_H
+#define __ASM_MACH_ATH79_KERNEL_ENTRY_H
+
+ /*
+ * Some bootloaders set the 'Kseg0 coherency algorithm' to
+ * 'Cacheable, noncoherent, write-through, no write allocate'
+ * and this cause performance issues. Let's go and change it to
+ * 'Cacheable, noncoherent, write-back, write allocate'
+ */
+ .macro kernel_entry_setup
+ mfc0 t0, CP0_CONFIG
+ li t1, ~CONF_CM_CMASK
+ and t0, t1
+ ori t0, CONF_CM_CACHABLE_NONCOHERENT
+ mtc0 t0, CP0_CONFIG
+ nop
+ .endm
+
+ .macro smp_slave_setup
+ .endm
+
+#endif /* __ASM_MACH_ATH79_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h
new file mode 100644
index 000000000..a7eec3364
--- /dev/null
+++ b/arch/mips/include/asm/mach-au1x00/au1000.h
@@ -0,0 +1,1211 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ * Include file for Alchemy Semiconductor's Au1k CPU.
+ *
+ * Copyright 2000-2001, 2006-2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+ /*
+ * some definitions add by takuzo@sm.sony.co.jp and sato@sm.sony.co.jp
+ */
+
+#ifndef _AU1000_H_
+#define _AU1000_H_
+
+/* SOC Interrupt numbers */
+/* Au1000-style (IC0/1): 2 controllers with 32 sources each */
+#define AU1000_INTC0_INT_BASE (MIPS_CPU_IRQ_BASE + 8)
+#define AU1000_INTC0_INT_LAST (AU1000_INTC0_INT_BASE + 31)
+#define AU1000_INTC1_INT_BASE (AU1000_INTC0_INT_LAST + 1)
+#define AU1000_INTC1_INT_LAST (AU1000_INTC1_INT_BASE + 31)
+#define AU1000_MAX_INTR AU1000_INTC1_INT_LAST
+
+/* Au1300-style (GPIC): 1 controller with up to 128 sources */
+#define ALCHEMY_GPIC_INT_BASE (MIPS_CPU_IRQ_BASE + 8)
+#define ALCHEMY_GPIC_INT_NUM 128
+#define ALCHEMY_GPIC_INT_LAST (ALCHEMY_GPIC_INT_BASE + ALCHEMY_GPIC_INT_NUM - 1)
+
+/* common clock names, shared among all variants. AUXPLL2 is Au1300 */
+#define ALCHEMY_ROOT_CLK "root_clk"
+#define ALCHEMY_CPU_CLK "cpu_clk"
+#define ALCHEMY_AUXPLL_CLK "auxpll_clk"
+#define ALCHEMY_AUXPLL2_CLK "auxpll2_clk"
+#define ALCHEMY_SYSBUS_CLK "sysbus_clk"
+#define ALCHEMY_PERIPH_CLK "periph_clk"
+#define ALCHEMY_MEM_CLK "mem_clk"
+#define ALCHEMY_LR_CLK "lr_clk"
+#define ALCHEMY_FG0_CLK "fg0_clk"
+#define ALCHEMY_FG1_CLK "fg1_clk"
+#define ALCHEMY_FG2_CLK "fg2_clk"
+#define ALCHEMY_FG3_CLK "fg3_clk"
+#define ALCHEMY_FG4_CLK "fg4_clk"
+#define ALCHEMY_FG5_CLK "fg5_clk"
+
+/* Au1300 peripheral interrupt numbers */
+#define AU1300_FIRST_INT (ALCHEMY_GPIC_INT_BASE)
+#define AU1300_UART1_INT (AU1300_FIRST_INT + 17)
+#define AU1300_UART2_INT (AU1300_FIRST_INT + 25)
+#define AU1300_UART3_INT (AU1300_FIRST_INT + 27)
+#define AU1300_SD1_INT (AU1300_FIRST_INT + 32)
+#define AU1300_SD2_INT (AU1300_FIRST_INT + 38)
+#define AU1300_PSC0_INT (AU1300_FIRST_INT + 48)
+#define AU1300_PSC1_INT (AU1300_FIRST_INT + 52)
+#define AU1300_PSC2_INT (AU1300_FIRST_INT + 56)
+#define AU1300_PSC3_INT (AU1300_FIRST_INT + 60)
+#define AU1300_NAND_INT (AU1300_FIRST_INT + 62)
+#define AU1300_DDMA_INT (AU1300_FIRST_INT + 75)
+#define AU1300_MMU_INT (AU1300_FIRST_INT + 76)
+#define AU1300_MPU_INT (AU1300_FIRST_INT + 77)
+#define AU1300_GPU_INT (AU1300_FIRST_INT + 78)
+#define AU1300_UDMA_INT (AU1300_FIRST_INT + 79)
+#define AU1300_TOY_INT (AU1300_FIRST_INT + 80)
+#define AU1300_TOY_MATCH0_INT (AU1300_FIRST_INT + 81)
+#define AU1300_TOY_MATCH1_INT (AU1300_FIRST_INT + 82)
+#define AU1300_TOY_MATCH2_INT (AU1300_FIRST_INT + 83)
+#define AU1300_RTC_INT (AU1300_FIRST_INT + 84)
+#define AU1300_RTC_MATCH0_INT (AU1300_FIRST_INT + 85)
+#define AU1300_RTC_MATCH1_INT (AU1300_FIRST_INT + 86)
+#define AU1300_RTC_MATCH2_INT (AU1300_FIRST_INT + 87)
+#define AU1300_UART0_INT (AU1300_FIRST_INT + 88)
+#define AU1300_SD0_INT (AU1300_FIRST_INT + 89)
+#define AU1300_USB_INT (AU1300_FIRST_INT + 90)
+#define AU1300_LCD_INT (AU1300_FIRST_INT + 91)
+#define AU1300_BSA_INT (AU1300_FIRST_INT + 92)
+#define AU1300_MPE_INT (AU1300_FIRST_INT + 93)
+#define AU1300_ITE_INT (AU1300_FIRST_INT + 94)
+#define AU1300_AES_INT (AU1300_FIRST_INT + 95)
+#define AU1300_CIM_INT (AU1300_FIRST_INT + 96)
+
+/**********************************************************************/
+
+/*
+ * Physical base addresses for integrated peripherals
+ * 0..au1000 1..au1500 2..au1100 3..au1550 4..au1200 5..au1300
+ */
+
+#define AU1000_AC97_PHYS_ADDR 0x10000000 /* 012 */
+#define AU1300_ROM_PHYS_ADDR 0x10000000 /* 5 */
+#define AU1300_OTP_PHYS_ADDR 0x10002000 /* 5 */
+#define AU1300_VSS_PHYS_ADDR 0x10003000 /* 5 */
+#define AU1300_UART0_PHYS_ADDR 0x10100000 /* 5 */
+#define AU1300_UART1_PHYS_ADDR 0x10101000 /* 5 */
+#define AU1300_UART2_PHYS_ADDR 0x10102000 /* 5 */
+#define AU1300_UART3_PHYS_ADDR 0x10103000 /* 5 */
+#define AU1000_USB_OHCI_PHYS_ADDR 0x10100000 /* 012 */
+#define AU1000_USB_UDC_PHYS_ADDR 0x10200000 /* 0123 */
+#define AU1300_GPIC_PHYS_ADDR 0x10200000 /* 5 */
+#define AU1000_IRDA_PHYS_ADDR 0x10300000 /* 02 */
+#define AU1200_AES_PHYS_ADDR 0x10300000 /* 45 */
+#define AU1000_IC0_PHYS_ADDR 0x10400000 /* 01234 */
+#define AU1300_GPU_PHYS_ADDR 0x10500000 /* 5 */
+#define AU1000_MAC0_PHYS_ADDR 0x10500000 /* 023 */
+#define AU1000_MAC1_PHYS_ADDR 0x10510000 /* 023 */
+#define AU1000_MACEN_PHYS_ADDR 0x10520000 /* 023 */
+#define AU1100_SD0_PHYS_ADDR 0x10600000 /* 245 */
+#define AU1300_SD1_PHYS_ADDR 0x10601000 /* 5 */
+#define AU1300_SD2_PHYS_ADDR 0x10602000 /* 5 */
+#define AU1100_SD1_PHYS_ADDR 0x10680000 /* 24 */
+#define AU1300_SYS_PHYS_ADDR 0x10900000 /* 5 */
+#define AU1550_PSC2_PHYS_ADDR 0x10A00000 /* 3 */
+#define AU1550_PSC3_PHYS_ADDR 0x10B00000 /* 3 */
+#define AU1300_PSC0_PHYS_ADDR 0x10A00000 /* 5 */
+#define AU1300_PSC1_PHYS_ADDR 0x10A01000 /* 5 */
+#define AU1300_PSC2_PHYS_ADDR 0x10A02000 /* 5 */
+#define AU1300_PSC3_PHYS_ADDR 0x10A03000 /* 5 */
+#define AU1000_I2S_PHYS_ADDR 0x11000000 /* 02 */
+#define AU1500_MAC0_PHYS_ADDR 0x11500000 /* 1 */
+#define AU1500_MAC1_PHYS_ADDR 0x11510000 /* 1 */
+#define AU1500_MACEN_PHYS_ADDR 0x11520000 /* 1 */
+#define AU1000_UART0_PHYS_ADDR 0x11100000 /* 01234 */
+#define AU1200_SWCNT_PHYS_ADDR 0x1110010C /* 4 */
+#define AU1000_UART1_PHYS_ADDR 0x11200000 /* 0234 */
+#define AU1000_UART2_PHYS_ADDR 0x11300000 /* 0 */
+#define AU1000_UART3_PHYS_ADDR 0x11400000 /* 0123 */
+#define AU1000_SSI0_PHYS_ADDR 0x11600000 /* 02 */
+#define AU1000_SSI1_PHYS_ADDR 0x11680000 /* 02 */
+#define AU1500_GPIO2_PHYS_ADDR 0x11700000 /* 1234 */
+#define AU1000_IC1_PHYS_ADDR 0x11800000 /* 01234 */
+#define AU1000_SYS_PHYS_ADDR 0x11900000 /* 012345 */
+#define AU1550_PSC0_PHYS_ADDR 0x11A00000 /* 34 */
+#define AU1550_PSC1_PHYS_ADDR 0x11B00000 /* 34 */
+#define AU1000_MEM_PHYS_ADDR 0x14000000 /* 01234 */
+#define AU1000_STATIC_MEM_PHYS_ADDR 0x14001000 /* 01234 */
+#define AU1300_UDMA_PHYS_ADDR 0x14001800 /* 5 */
+#define AU1000_DMA_PHYS_ADDR 0x14002000 /* 012 */
+#define AU1550_DBDMA_PHYS_ADDR 0x14002000 /* 345 */
+#define AU1550_DBDMA_CONF_PHYS_ADDR 0x14003000 /* 345 */
+#define AU1000_MACDMA0_PHYS_ADDR 0x14004000 /* 0123 */
+#define AU1000_MACDMA1_PHYS_ADDR 0x14004200 /* 0123 */
+#define AU1200_CIM_PHYS_ADDR 0x14004000 /* 45 */
+#define AU1500_PCI_PHYS_ADDR 0x14005000 /* 13 */
+#define AU1550_PE_PHYS_ADDR 0x14008000 /* 3 */
+#define AU1200_MAEBE_PHYS_ADDR 0x14010000 /* 4 */
+#define AU1200_MAEFE_PHYS_ADDR 0x14012000 /* 4 */
+#define AU1300_MAEITE_PHYS_ADDR 0x14010000 /* 5 */
+#define AU1300_MAEMPE_PHYS_ADDR 0x14014000 /* 5 */
+#define AU1550_USB_OHCI_PHYS_ADDR 0x14020000 /* 3 */
+#define AU1200_USB_CTL_PHYS_ADDR 0x14020000 /* 4 */
+#define AU1200_USB_OTG_PHYS_ADDR 0x14020020 /* 4 */
+#define AU1200_USB_OHCI_PHYS_ADDR 0x14020100 /* 4 */
+#define AU1200_USB_EHCI_PHYS_ADDR 0x14020200 /* 4 */
+#define AU1200_USB_UDC_PHYS_ADDR 0x14022000 /* 4 */
+#define AU1300_USB_EHCI_PHYS_ADDR 0x14020000 /* 5 */
+#define AU1300_USB_OHCI0_PHYS_ADDR 0x14020400 /* 5 */
+#define AU1300_USB_OHCI1_PHYS_ADDR 0x14020800 /* 5 */
+#define AU1300_USB_CTL_PHYS_ADDR 0x14021000 /* 5 */
+#define AU1300_USB_OTG_PHYS_ADDR 0x14022000 /* 5 */
+#define AU1300_MAEBSA_PHYS_ADDR 0x14030000 /* 5 */
+#define AU1100_LCD_PHYS_ADDR 0x15000000 /* 2 */
+#define AU1200_LCD_PHYS_ADDR 0x15000000 /* 45 */
+#define AU1500_PCI_MEM_PHYS_ADDR 0x400000000ULL /* 13 */
+#define AU1500_PCI_IO_PHYS_ADDR 0x500000000ULL /* 13 */
+#define AU1500_PCI_CONFIG0_PHYS_ADDR 0x600000000ULL /* 13 */
+#define AU1500_PCI_CONFIG1_PHYS_ADDR 0x680000000ULL /* 13 */
+#define AU1000_PCMCIA_IO_PHYS_ADDR 0xF00000000ULL /* 012345 */
+#define AU1000_PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL /* 012345 */
+#define AU1000_PCMCIA_MEM_PHYS_ADDR 0xF80000000ULL /* 012345 */
+
+/**********************************************************************/
+
+
+/*
+ * Au1300 GPIO+INT controller (GPIC) register offsets and bits
+ * Registers are 128bits (0x10 bytes), divided into 4 "banks".
+ */
+#define AU1300_GPIC_PINVAL 0x0000
+#define AU1300_GPIC_PINVALCLR 0x0010
+#define AU1300_GPIC_IPEND 0x0020
+#define AU1300_GPIC_PRIENC 0x0030
+#define AU1300_GPIC_IEN 0x0040 /* int_mask in manual */
+#define AU1300_GPIC_IDIS 0x0050 /* int_maskclr in manual */
+#define AU1300_GPIC_DMASEL 0x0060
+#define AU1300_GPIC_DEVSEL 0x0080
+#define AU1300_GPIC_DEVCLR 0x0090
+#define AU1300_GPIC_RSTVAL 0x00a0
+/* pin configuration space. one 32bit register for up to 128 IRQs */
+#define AU1300_GPIC_PINCFG 0x1000
+
+#define GPIC_GPIO_TO_BIT(gpio) \
+ (1 << ((gpio) & 0x1f))
+
+#define GPIC_GPIO_BANKOFF(gpio) \
+ (((gpio) >> 5) * 4)
+
+/* Pin Control bits: who owns the pin, what does it do */
+#define GPIC_CFG_PC_GPIN 0
+#define GPIC_CFG_PC_DEV 1
+#define GPIC_CFG_PC_GPOLOW 2
+#define GPIC_CFG_PC_GPOHIGH 3
+#define GPIC_CFG_PC_MASK 3
+
+/* assign pin to MIPS IRQ line */
+#define GPIC_CFG_IL_SET(x) (((x) & 3) << 2)
+#define GPIC_CFG_IL_MASK (3 << 2)
+
+/* pin interrupt type setup */
+#define GPIC_CFG_IC_OFF (0 << 4)
+#define GPIC_CFG_IC_LEVEL_LOW (1 << 4)
+#define GPIC_CFG_IC_LEVEL_HIGH (2 << 4)
+#define GPIC_CFG_IC_EDGE_FALL (5 << 4)
+#define GPIC_CFG_IC_EDGE_RISE (6 << 4)
+#define GPIC_CFG_IC_EDGE_BOTH (7 << 4)
+#define GPIC_CFG_IC_MASK (7 << 4)
+
+/* allow interrupt to wake cpu from 'wait' */
+#define GPIC_CFG_IDLEWAKE (1 << 7)
+
+/***********************************************************************/
+
+/* Au1000 SDRAM memory controller register offsets */
+#define AU1000_MEM_SDMODE0 0x0000
+#define AU1000_MEM_SDMODE1 0x0004
+#define AU1000_MEM_SDMODE2 0x0008
+#define AU1000_MEM_SDADDR0 0x000C
+#define AU1000_MEM_SDADDR1 0x0010
+#define AU1000_MEM_SDADDR2 0x0014
+#define AU1000_MEM_SDREFCFG 0x0018
+#define AU1000_MEM_SDPRECMD 0x001C
+#define AU1000_MEM_SDAUTOREF 0x0020
+#define AU1000_MEM_SDWRMD0 0x0024
+#define AU1000_MEM_SDWRMD1 0x0028
+#define AU1000_MEM_SDWRMD2 0x002C
+#define AU1000_MEM_SDSLEEP 0x0030
+#define AU1000_MEM_SDSMCKE 0x0034
+
+/* MEM_SDMODE register content definitions */
+#define MEM_SDMODE_F (1 << 22)
+#define MEM_SDMODE_SR (1 << 21)
+#define MEM_SDMODE_BS (1 << 20)
+#define MEM_SDMODE_RS (3 << 18)
+#define MEM_SDMODE_CS (7 << 15)
+#define MEM_SDMODE_TRAS (15 << 11)
+#define MEM_SDMODE_TMRD (3 << 9)
+#define MEM_SDMODE_TWR (3 << 7)
+#define MEM_SDMODE_TRP (3 << 5)
+#define MEM_SDMODE_TRCD (3 << 3)
+#define MEM_SDMODE_TCL (7 << 0)
+
+#define MEM_SDMODE_BS_2Bank (0 << 20)
+#define MEM_SDMODE_BS_4Bank (1 << 20)
+#define MEM_SDMODE_RS_11Row (0 << 18)
+#define MEM_SDMODE_RS_12Row (1 << 18)
+#define MEM_SDMODE_RS_13Row (2 << 18)
+#define MEM_SDMODE_RS_N(N) ((N) << 18)
+#define MEM_SDMODE_CS_7Col (0 << 15)
+#define MEM_SDMODE_CS_8Col (1 << 15)
+#define MEM_SDMODE_CS_9Col (2 << 15)
+#define MEM_SDMODE_CS_10Col (3 << 15)
+#define MEM_SDMODE_CS_11Col (4 << 15)
+#define MEM_SDMODE_CS_N(N) ((N) << 15)
+#define MEM_SDMODE_TRAS_N(N) ((N) << 11)
+#define MEM_SDMODE_TMRD_N(N) ((N) << 9)
+#define MEM_SDMODE_TWR_N(N) ((N) << 7)
+#define MEM_SDMODE_TRP_N(N) ((N) << 5)
+#define MEM_SDMODE_TRCD_N(N) ((N) << 3)
+#define MEM_SDMODE_TCL_N(N) ((N) << 0)
+
+/* MEM_SDADDR register contents definitions */
+#define MEM_SDADDR_E (1 << 20)
+#define MEM_SDADDR_CSBA (0x03FF << 10)
+#define MEM_SDADDR_CSMASK (0x03FF << 0)
+#define MEM_SDADDR_CSBA_N(N) ((N) & (0x03FF << 22) >> 12)
+#define MEM_SDADDR_CSMASK_N(N) ((N)&(0x03FF << 22) >> 22)
+
+/* MEM_SDREFCFG register content definitions */
+#define MEM_SDREFCFG_TRC (15 << 28)
+#define MEM_SDREFCFG_TRPM (3 << 26)
+#define MEM_SDREFCFG_E (1 << 25)
+#define MEM_SDREFCFG_RE (0x1ffffff << 0)
+#define MEM_SDREFCFG_TRC_N(N) ((N) << MEM_SDREFCFG_TRC)
+#define MEM_SDREFCFG_TRPM_N(N) ((N) << MEM_SDREFCFG_TRPM)
+#define MEM_SDREFCFG_REF_N(N) (N)
+
+/* Au1550 SDRAM Register Offsets */
+#define AU1550_MEM_SDMODE0 0x0800
+#define AU1550_MEM_SDMODE1 0x0808
+#define AU1550_MEM_SDMODE2 0x0810
+#define AU1550_MEM_SDADDR0 0x0820
+#define AU1550_MEM_SDADDR1 0x0828
+#define AU1550_MEM_SDADDR2 0x0830
+#define AU1550_MEM_SDCONFIGA 0x0840
+#define AU1550_MEM_SDCONFIGB 0x0848
+#define AU1550_MEM_SDSTAT 0x0850
+#define AU1550_MEM_SDERRADDR 0x0858
+#define AU1550_MEM_SDSTRIDE0 0x0860
+#define AU1550_MEM_SDSTRIDE1 0x0868
+#define AU1550_MEM_SDSTRIDE2 0x0870
+#define AU1550_MEM_SDWRMD0 0x0880
+#define AU1550_MEM_SDWRMD1 0x0888
+#define AU1550_MEM_SDWRMD2 0x0890
+#define AU1550_MEM_SDPRECMD 0x08C0
+#define AU1550_MEM_SDAUTOREF 0x08C8
+#define AU1550_MEM_SDSREF 0x08D0
+#define AU1550_MEM_SDSLEEP MEM_SDSREF
+
+/* Static Bus Controller register offsets */
+#define AU1000_MEM_STCFG0 0x000
+#define AU1000_MEM_STTIME0 0x004
+#define AU1000_MEM_STADDR0 0x008
+#define AU1000_MEM_STCFG1 0x010
+#define AU1000_MEM_STTIME1 0x014
+#define AU1000_MEM_STADDR1 0x018
+#define AU1000_MEM_STCFG2 0x020
+#define AU1000_MEM_STTIME2 0x024
+#define AU1000_MEM_STADDR2 0x028
+#define AU1000_MEM_STCFG3 0x030
+#define AU1000_MEM_STTIME3 0x034
+#define AU1000_MEM_STADDR3 0x038
+#define AU1000_MEM_STNDCTL 0x100
+#define AU1000_MEM_STSTAT 0x104
+
+#define MEM_STNAND_CMD 0x0
+#define MEM_STNAND_ADDR 0x4
+#define MEM_STNAND_DATA 0x20
+
+
+/* Programmable Counters 0 and 1 */
+#define AU1000_SYS_CNTRCTRL 0x14
+# define SYS_CNTRL_E1S (1 << 23)
+# define SYS_CNTRL_T1S (1 << 20)
+# define SYS_CNTRL_M21 (1 << 19)
+# define SYS_CNTRL_M11 (1 << 18)
+# define SYS_CNTRL_M01 (1 << 17)
+# define SYS_CNTRL_C1S (1 << 16)
+# define SYS_CNTRL_BP (1 << 14)
+# define SYS_CNTRL_EN1 (1 << 13)
+# define SYS_CNTRL_BT1 (1 << 12)
+# define SYS_CNTRL_EN0 (1 << 11)
+# define SYS_CNTRL_BT0 (1 << 10)
+# define SYS_CNTRL_E0 (1 << 8)
+# define SYS_CNTRL_E0S (1 << 7)
+# define SYS_CNTRL_32S (1 << 5)
+# define SYS_CNTRL_T0S (1 << 4)
+# define SYS_CNTRL_M20 (1 << 3)
+# define SYS_CNTRL_M10 (1 << 2)
+# define SYS_CNTRL_M00 (1 << 1)
+# define SYS_CNTRL_C0S (1 << 0)
+
+/* Programmable Counter 0 Registers */
+#define AU1000_SYS_TOYTRIM 0x00
+#define AU1000_SYS_TOYWRITE 0x04
+#define AU1000_SYS_TOYMATCH0 0x08
+#define AU1000_SYS_TOYMATCH1 0x0c
+#define AU1000_SYS_TOYMATCH2 0x10
+#define AU1000_SYS_TOYREAD 0x40
+
+/* Programmable Counter 1 Registers */
+#define AU1000_SYS_RTCTRIM 0x44
+#define AU1000_SYS_RTCWRITE 0x48
+#define AU1000_SYS_RTCMATCH0 0x4c
+#define AU1000_SYS_RTCMATCH1 0x50
+#define AU1000_SYS_RTCMATCH2 0x54
+#define AU1000_SYS_RTCREAD 0x58
+
+
+/* GPIO */
+#define AU1000_SYS_PINFUNC 0x2C
+# define SYS_PF_USB (1 << 15) /* 2nd USB device/host */
+# define SYS_PF_U3 (1 << 14) /* GPIO23/U3TXD */
+# define SYS_PF_U2 (1 << 13) /* GPIO22/U2TXD */
+# define SYS_PF_U1 (1 << 12) /* GPIO21/U1TXD */
+# define SYS_PF_SRC (1 << 11) /* GPIO6/SROMCKE */
+# define SYS_PF_CK5 (1 << 10) /* GPIO3/CLK5 */
+# define SYS_PF_CK4 (1 << 9) /* GPIO2/CLK4 */
+# define SYS_PF_IRF (1 << 8) /* GPIO15/IRFIRSEL */
+# define SYS_PF_UR3 (1 << 7) /* GPIO[14:9]/UART3 */
+# define SYS_PF_I2D (1 << 6) /* GPIO8/I2SDI */
+# define SYS_PF_I2S (1 << 5) /* I2S/GPIO[29:31] */
+# define SYS_PF_NI2 (1 << 4) /* NI2/GPIO[24:28] */
+# define SYS_PF_U0 (1 << 3) /* U0TXD/GPIO20 */
+# define SYS_PF_RD (1 << 2) /* IRTXD/GPIO19 */
+# define SYS_PF_A97 (1 << 1) /* AC97/SSL1 */
+# define SYS_PF_S0 (1 << 0) /* SSI_0/GPIO[16:18] */
+
+/* Au1100 only */
+# define SYS_PF_PC (1 << 18) /* PCMCIA/GPIO[207:204] */
+# define SYS_PF_LCD (1 << 17) /* extern lcd/GPIO[203:200] */
+# define SYS_PF_CS (1 << 16) /* EXTCLK0/32KHz to gpio2 */
+# define SYS_PF_EX0 (1 << 9) /* GPIO2/clock */
+
+/* Au1550 only. Redefines lots of pins */
+# define SYS_PF_PSC2_MASK (7 << 17)
+# define SYS_PF_PSC2_AC97 0
+# define SYS_PF_PSC2_SPI 0
+# define SYS_PF_PSC2_I2S (1 << 17)
+# define SYS_PF_PSC2_SMBUS (3 << 17)
+# define SYS_PF_PSC2_GPIO (7 << 17)
+# define SYS_PF_PSC3_MASK (7 << 20)
+# define SYS_PF_PSC3_AC97 0
+# define SYS_PF_PSC3_SPI 0
+# define SYS_PF_PSC3_I2S (1 << 20)
+# define SYS_PF_PSC3_SMBUS (3 << 20)
+# define SYS_PF_PSC3_GPIO (7 << 20)
+# define SYS_PF_PSC1_S1 (1 << 1)
+# define SYS_PF_MUST_BE_SET ((1 << 5) | (1 << 2))
+
+/* Au1200 only */
+#define SYS_PINFUNC_DMA (1 << 31)
+#define SYS_PINFUNC_S0A (1 << 30)
+#define SYS_PINFUNC_S1A (1 << 29)
+#define SYS_PINFUNC_LP0 (1 << 28)
+#define SYS_PINFUNC_LP1 (1 << 27)
+#define SYS_PINFUNC_LD16 (1 << 26)
+#define SYS_PINFUNC_LD8 (1 << 25)
+#define SYS_PINFUNC_LD1 (1 << 24)
+#define SYS_PINFUNC_LD0 (1 << 23)
+#define SYS_PINFUNC_P1A (3 << 21)
+#define SYS_PINFUNC_P1B (1 << 20)
+#define SYS_PINFUNC_FS3 (1 << 19)
+#define SYS_PINFUNC_P0A (3 << 17)
+#define SYS_PINFUNC_CS (1 << 16)
+#define SYS_PINFUNC_CIM (1 << 15)
+#define SYS_PINFUNC_P1C (1 << 14)
+#define SYS_PINFUNC_U1T (1 << 12)
+#define SYS_PINFUNC_U1R (1 << 11)
+#define SYS_PINFUNC_EX1 (1 << 10)
+#define SYS_PINFUNC_EX0 (1 << 9)
+#define SYS_PINFUNC_U0R (1 << 8)
+#define SYS_PINFUNC_MC (1 << 7)
+#define SYS_PINFUNC_S0B (1 << 6)
+#define SYS_PINFUNC_S0C (1 << 5)
+#define SYS_PINFUNC_P0B (1 << 4)
+#define SYS_PINFUNC_U0T (1 << 3)
+#define SYS_PINFUNC_S1B (1 << 2)
+
+/* Power Management */
+#define AU1000_SYS_SCRATCH0 0x18
+#define AU1000_SYS_SCRATCH1 0x1c
+#define AU1000_SYS_WAKEMSK 0x34
+#define AU1000_SYS_ENDIAN 0x38
+#define AU1000_SYS_POWERCTRL 0x3c
+#define AU1000_SYS_WAKESRC 0x5c
+#define AU1000_SYS_SLPPWR 0x78
+#define AU1000_SYS_SLEEP 0x7c
+
+#define SYS_WAKEMSK_D2 (1 << 9)
+#define SYS_WAKEMSK_M2 (1 << 8)
+#define SYS_WAKEMSK_GPIO(x) (1 << (x))
+
+/* Clock Controller */
+#define AU1000_SYS_FREQCTRL0 0x20
+#define AU1000_SYS_FREQCTRL1 0x24
+#define AU1000_SYS_CLKSRC 0x28
+#define AU1000_SYS_CPUPLL 0x60
+#define AU1000_SYS_AUXPLL 0x64
+#define AU1300_SYS_AUXPLL2 0x68
+
+
+/**********************************************************************/
+
+
+/* The PCI chip selects are outside the 32bit space, and since we can't
+ * just program the 36bit addresses into BARs, we have to take a chunk
+ * out of the 32bit space and reserve it for PCI. When these addresses
+ * are ioremap()ed, they'll be fixed up to the real 36bit address before
+ * being passed to the real ioremap function.
+ */
+#define ALCHEMY_PCI_MEMWIN_START (AU1500_PCI_MEM_PHYS_ADDR >> 4)
+#define ALCHEMY_PCI_MEMWIN_END (ALCHEMY_PCI_MEMWIN_START + 0x0FFFFFFF)
+
+/* for PCI IO it's simpler because we get to do the ioremap ourselves and then
+ * adjust the device's resources.
+ */
+#define ALCHEMY_PCI_IOWIN_START 0x00001000
+#define ALCHEMY_PCI_IOWIN_END 0x0000FFFF
+
+#ifdef CONFIG_PCI
+
+#define IOPORT_RESOURCE_START 0x00001000 /* skip legacy probing */
+#define IOPORT_RESOURCE_END 0xffffffff
+#define IOMEM_RESOURCE_START 0x10000000
+#define IOMEM_RESOURCE_END 0xfffffffffULL
+
+#else
+
+/* Don't allow any legacy ports probing */
+#define IOPORT_RESOURCE_START 0x10000000
+#define IOPORT_RESOURCE_END 0xffffffff
+#define IOMEM_RESOURCE_START 0x10000000
+#define IOMEM_RESOURCE_END 0xfffffffffULL
+
+#endif
+
+/* PCI controller block register offsets */
+#define PCI_REG_CMEM 0x0000
+#define PCI_REG_CONFIG 0x0004
+#define PCI_REG_B2BMASK_CCH 0x0008
+#define PCI_REG_B2BBASE0_VID 0x000C
+#define PCI_REG_B2BBASE1_SID 0x0010
+#define PCI_REG_MWMASK_DEV 0x0014
+#define PCI_REG_MWBASE_REV_CCL 0x0018
+#define PCI_REG_ERR_ADDR 0x001C
+#define PCI_REG_SPEC_INTACK 0x0020
+#define PCI_REG_ID 0x0100
+#define PCI_REG_STATCMD 0x0104
+#define PCI_REG_CLASSREV 0x0108
+#define PCI_REG_PARAM 0x010C
+#define PCI_REG_MBAR 0x0110
+#define PCI_REG_TIMEOUT 0x0140
+
+/* PCI controller block register bits */
+#define PCI_CMEM_E (1 << 28) /* enable cacheable memory */
+#define PCI_CMEM_CMBASE(x) (((x) & 0x3fff) << 14)
+#define PCI_CMEM_CMMASK(x) ((x) & 0x3fff)
+#define PCI_CONFIG_ERD (1 << 27) /* pci error during R/W */
+#define PCI_CONFIG_ET (1 << 26) /* error in target mode */
+#define PCI_CONFIG_EF (1 << 25) /* fatal error */
+#define PCI_CONFIG_EP (1 << 24) /* parity error */
+#define PCI_CONFIG_EM (1 << 23) /* multiple errors */
+#define PCI_CONFIG_BM (1 << 22) /* bad master error */
+#define PCI_CONFIG_PD (1 << 20) /* PCI Disable */
+#define PCI_CONFIG_BME (1 << 19) /* Byte Mask Enable for reads */
+#define PCI_CONFIG_NC (1 << 16) /* mark mem access non-coherent */
+#define PCI_CONFIG_IA (1 << 15) /* INTA# enabled (target mode) */
+#define PCI_CONFIG_IP (1 << 13) /* int on PCI_PERR# */
+#define PCI_CONFIG_IS (1 << 12) /* int on PCI_SERR# */
+#define PCI_CONFIG_IMM (1 << 11) /* int on master abort */
+#define PCI_CONFIG_ITM (1 << 10) /* int on target abort (as master) */
+#define PCI_CONFIG_ITT (1 << 9) /* int on target abort (as target) */
+#define PCI_CONFIG_IPB (1 << 8) /* int on PERR# in bus master acc */
+#define PCI_CONFIG_SIC_NO (0 << 6) /* no byte mask changes */
+#define PCI_CONFIG_SIC_BA_ADR (1 << 6) /* on byte/hw acc, invert adr bits */
+#define PCI_CONFIG_SIC_HWA_DAT (2 << 6) /* on halfword acc, swap data */
+#define PCI_CONFIG_SIC_ALL (3 << 6) /* swap data bytes on all accesses */
+#define PCI_CONFIG_ST (1 << 5) /* swap data by target transactions */
+#define PCI_CONFIG_SM (1 << 4) /* swap data from PCI ctl */
+#define PCI_CONFIG_AEN (1 << 3) /* enable internal arbiter */
+#define PCI_CONFIG_R2H (1 << 2) /* REQ2# to hi-prio arbiter */
+#define PCI_CONFIG_R1H (1 << 1) /* REQ1# to hi-prio arbiter */
+#define PCI_CONFIG_CH (1 << 0) /* PCI ctl to hi-prio arbiter */
+#define PCI_B2BMASK_B2BMASK(x) (((x) & 0xffff) << 16)
+#define PCI_B2BMASK_CCH(x) ((x) & 0xffff) /* 16 upper bits of class code */
+#define PCI_B2BBASE0_VID_B0(x) (((x) & 0xffff) << 16)
+#define PCI_B2BBASE0_VID_SV(x) ((x) & 0xffff)
+#define PCI_B2BBASE1_SID_B1(x) (((x) & 0xffff) << 16)
+#define PCI_B2BBASE1_SID_SI(x) ((x) & 0xffff)
+#define PCI_MWMASKDEV_MWMASK(x) (((x) & 0xffff) << 16)
+#define PCI_MWMASKDEV_DEVID(x) ((x) & 0xffff)
+#define PCI_MWBASEREVCCL_BASE(x) (((x) & 0xffff) << 16)
+#define PCI_MWBASEREVCCL_REV(x) (((x) & 0xff) << 8)
+#define PCI_MWBASEREVCCL_CCL(x) ((x) & 0xff)
+#define PCI_ID_DID(x) (((x) & 0xffff) << 16)
+#define PCI_ID_VID(x) ((x) & 0xffff)
+#define PCI_STATCMD_STATUS(x) (((x) & 0xffff) << 16)
+#define PCI_STATCMD_CMD(x) ((x) & 0xffff)
+#define PCI_CLASSREV_CLASS(x) (((x) & 0x00ffffff) << 8)
+#define PCI_CLASSREV_REV(x) ((x) & 0xff)
+#define PCI_PARAM_BIST(x) (((x) & 0xff) << 24)
+#define PCI_PARAM_HT(x) (((x) & 0xff) << 16)
+#define PCI_PARAM_LT(x) (((x) & 0xff) << 8)
+#define PCI_PARAM_CLS(x) ((x) & 0xff)
+#define PCI_TIMEOUT_RETRIES(x) (((x) & 0xff) << 8) /* max retries */
+#define PCI_TIMEOUT_TO(x) ((x) & 0xff) /* target ready timeout */
+
+
+/**********************************************************************/
+
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+#include <linux/delay.h>
+#include <linux/types.h>
+
+#include <linux/io.h>
+#include <linux/irq.h>
+
+#include <asm/cpu.h>
+
+/* helpers to access the SYS_* registers */
+static inline unsigned long alchemy_rdsys(int regofs)
+{
+ void __iomem *b = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR);
+
+ return __raw_readl(b + regofs);
+}
+
+static inline void alchemy_wrsys(unsigned long v, int regofs)
+{
+ void __iomem *b = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR);
+
+ __raw_writel(v, b + regofs);
+ wmb(); /* drain writebuffer */
+}
+
+/* helpers to access static memctrl registers */
+static inline unsigned long alchemy_rdsmem(int regofs)
+{
+ void __iomem *b = (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
+
+ return __raw_readl(b + regofs);
+}
+
+static inline void alchemy_wrsmem(unsigned long v, int regofs)
+{
+ void __iomem *b = (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
+
+ __raw_writel(v, b + regofs);
+ wmb(); /* drain writebuffer */
+}
+
+/* Early Au1000 have a write-only SYS_CPUPLL register. */
+static inline int au1xxx_cpu_has_pll_wo(void)
+{
+ switch (read_c0_prid()) {
+ case 0x00030100: /* Au1000 DA */
+ case 0x00030201: /* Au1000 HA */
+ case 0x00030202: /* Au1000 HB */
+ return 1;
+ }
+ return 0;
+}
+
+/* does CPU need CONFIG[OD] set to fix tons of errata? */
+static inline int au1xxx_cpu_needs_config_od(void)
+{
+ /*
+ * c0_config.od (bit 19) was write only (and read as 0) on the
+ * early revisions of Alchemy SOCs. It disables the bus trans-
+ * action overlapping and needs to be set to fix various errata.
+ */
+ switch (read_c0_prid()) {
+ case 0x00030100: /* Au1000 DA */
+ case 0x00030201: /* Au1000 HA */
+ case 0x00030202: /* Au1000 HB */
+ case 0x01030200: /* Au1500 AB */
+ /*
+ * Au1100/Au1200 errata actually keep silence about this bit,
+ * so we set it just in case for those revisions that require
+ * it to be set according to the (now gone) cpu_table.
+ */
+ case 0x02030200: /* Au1100 AB */
+ case 0x02030201: /* Au1100 BA */
+ case 0x02030202: /* Au1100 BC */
+ case 0x04030201: /* Au1200 AC */
+ return 1;
+ }
+ return 0;
+}
+
+#define ALCHEMY_CPU_UNKNOWN -1
+#define ALCHEMY_CPU_AU1000 0
+#define ALCHEMY_CPU_AU1500 1
+#define ALCHEMY_CPU_AU1100 2
+#define ALCHEMY_CPU_AU1550 3
+#define ALCHEMY_CPU_AU1200 4
+#define ALCHEMY_CPU_AU1300 5
+
+static inline int alchemy_get_cputype(void)
+{
+ switch (read_c0_prid() & (PRID_OPT_MASK | PRID_COMP_MASK)) {
+ case 0x00030000:
+ return ALCHEMY_CPU_AU1000;
+ break;
+ case 0x01030000:
+ return ALCHEMY_CPU_AU1500;
+ break;
+ case 0x02030000:
+ return ALCHEMY_CPU_AU1100;
+ break;
+ case 0x03030000:
+ return ALCHEMY_CPU_AU1550;
+ break;
+ case 0x04030000:
+ case 0x05030000:
+ return ALCHEMY_CPU_AU1200;
+ break;
+ case 0x800c0000:
+ return ALCHEMY_CPU_AU1300;
+ break;
+ }
+
+ return ALCHEMY_CPU_UNKNOWN;
+}
+
+/* return number of uarts on a given cputype */
+static inline int alchemy_get_uarts(int type)
+{
+ switch (type) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1300:
+ return 4;
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1200:
+ return 2;
+ case ALCHEMY_CPU_AU1100:
+ case ALCHEMY_CPU_AU1550:
+ return 3;
+ }
+ return 0;
+}
+
+/* enable an UART block if it isn't already */
+static inline void alchemy_uart_enable(u32 uart_phys)
+{
+ void __iomem *addr = (void __iomem *)KSEG1ADDR(uart_phys);
+
+ /* reset, enable clock, deassert reset */
+ if ((__raw_readl(addr + 0x100) & 3) != 3) {
+ __raw_writel(0, addr + 0x100);
+ wmb(); /* drain writebuffer */
+ __raw_writel(1, addr + 0x100);
+ wmb(); /* drain writebuffer */
+ }
+ __raw_writel(3, addr + 0x100);
+ wmb(); /* drain writebuffer */
+}
+
+static inline void alchemy_uart_disable(u32 uart_phys)
+{
+ void __iomem *addr = (void __iomem *)KSEG1ADDR(uart_phys);
+
+ __raw_writel(0, addr + 0x100); /* UART_MOD_CNTRL */
+ wmb(); /* drain writebuffer */
+}
+
+static inline void alchemy_uart_putchar(u32 uart_phys, u8 c)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(uart_phys);
+ int timeout, i;
+
+ /* check LSR TX_EMPTY bit */
+ timeout = 0xffffff;
+ do {
+ if (__raw_readl(base + 0x1c) & 0x20)
+ break;
+ /* slow down */
+ for (i = 10000; i; i--)
+ asm volatile ("nop");
+ } while (--timeout);
+
+ __raw_writel(c, base + 0x04); /* tx */
+ wmb(); /* drain writebuffer */
+}
+
+/* return number of ethernet MACs on a given cputype */
+static inline int alchemy_get_macs(int type)
+{
+ switch (type) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1550:
+ return 2;
+ case ALCHEMY_CPU_AU1100:
+ return 1;
+ }
+ return 0;
+}
+
+/* PM: arch/mips/alchemy/common/sleeper.S, power.c, irq.c */
+void alchemy_sleep_au1000(void);
+void alchemy_sleep_au1550(void);
+void alchemy_sleep_au1300(void);
+void au_sleep(void);
+
+/* USB: arch/mips/alchemy/common/usb.c */
+enum alchemy_usb_block {
+ ALCHEMY_USB_OHCI0,
+ ALCHEMY_USB_UDC0,
+ ALCHEMY_USB_EHCI0,
+ ALCHEMY_USB_OTG0,
+ ALCHEMY_USB_OHCI1,
+};
+int alchemy_usb_control(int block, int enable);
+
+/* PCI controller platform data */
+struct alchemy_pci_platdata {
+ int (*board_map_irq)(const struct pci_dev *d, u8 slot, u8 pin);
+ int (*board_pci_idsel)(unsigned int devsel, int assert);
+ /* bits to set/clear in PCI_CONFIG register */
+ unsigned long pci_cfg_set;
+ unsigned long pci_cfg_clr;
+};
+
+/* The IrDA peripheral has an IRFIRSEL pin, but on the DB/PB boards it's
+ * not used to select FIR/SIR mode on the transceiver but as a GPIO.
+ * Instead a CPLD has to be told about the mode. The driver calls the
+ * set_phy_mode() function in addition to driving the IRFIRSEL pin.
+ */
+#define AU1000_IRDA_PHY_MODE_OFF 0
+#define AU1000_IRDA_PHY_MODE_SIR 1
+#define AU1000_IRDA_PHY_MODE_FIR 2
+
+struct au1k_irda_platform_data {
+ void (*set_phy_mode)(int mode);
+};
+
+
+/* Multifunction pins: Each of these pins can either be assigned to the
+ * GPIO controller or a on-chip peripheral.
+ * Call "au1300_pinfunc_to_dev()" or "au1300_pinfunc_to_gpio()" to
+ * assign one of these to either the GPIO controller or the device.
+ */
+enum au1300_multifunc_pins {
+ /* wake-from-str pins 0-3 */
+ AU1300_PIN_WAKE0 = 0, AU1300_PIN_WAKE1, AU1300_PIN_WAKE2,
+ AU1300_PIN_WAKE3,
+ /* external clock sources for PSCs: 4-5 */
+ AU1300_PIN_EXTCLK0, AU1300_PIN_EXTCLK1,
+ /* 8bit MMC interface on SD0: 6-9 */
+ AU1300_PIN_SD0DAT4, AU1300_PIN_SD0DAT5, AU1300_PIN_SD0DAT6,
+ AU1300_PIN_SD0DAT7,
+ /* aux clk input for freqgen 3: 10 */
+ AU1300_PIN_FG3AUX,
+ /* UART1 pins: 11-18 */
+ AU1300_PIN_U1RI, AU1300_PIN_U1DCD, AU1300_PIN_U1DSR,
+ AU1300_PIN_U1CTS, AU1300_PIN_U1RTS, AU1300_PIN_U1DTR,
+ AU1300_PIN_U1RX, AU1300_PIN_U1TX,
+ /* UART0 pins: 19-24 */
+ AU1300_PIN_U0RI, AU1300_PIN_U0DCD, AU1300_PIN_U0DSR,
+ AU1300_PIN_U0CTS, AU1300_PIN_U0RTS, AU1300_PIN_U0DTR,
+ /* UART2: 25-26 */
+ AU1300_PIN_U2RX, AU1300_PIN_U2TX,
+ /* UART3: 27-28 */
+ AU1300_PIN_U3RX, AU1300_PIN_U3TX,
+ /* LCD controller PWMs, ext pixclock: 29-31 */
+ AU1300_PIN_LCDPWM0, AU1300_PIN_LCDPWM1, AU1300_PIN_LCDCLKIN,
+ /* SD1 interface: 32-37 */
+ AU1300_PIN_SD1DAT0, AU1300_PIN_SD1DAT1, AU1300_PIN_SD1DAT2,
+ AU1300_PIN_SD1DAT3, AU1300_PIN_SD1CMD, AU1300_PIN_SD1CLK,
+ /* SD2 interface: 38-43 */
+ AU1300_PIN_SD2DAT0, AU1300_PIN_SD2DAT1, AU1300_PIN_SD2DAT2,
+ AU1300_PIN_SD2DAT3, AU1300_PIN_SD2CMD, AU1300_PIN_SD2CLK,
+ /* PSC0/1 clocks: 44-45 */
+ AU1300_PIN_PSC0CLK, AU1300_PIN_PSC1CLK,
+ /* PSCs: 46-49/50-53/54-57/58-61 */
+ AU1300_PIN_PSC0SYNC0, AU1300_PIN_PSC0SYNC1, AU1300_PIN_PSC0D0,
+ AU1300_PIN_PSC0D1,
+ AU1300_PIN_PSC1SYNC0, AU1300_PIN_PSC1SYNC1, AU1300_PIN_PSC1D0,
+ AU1300_PIN_PSC1D1,
+ AU1300_PIN_PSC2SYNC0, AU1300_PIN_PSC2SYNC1, AU1300_PIN_PSC2D0,
+ AU1300_PIN_PSC2D1,
+ AU1300_PIN_PSC3SYNC0, AU1300_PIN_PSC3SYNC1, AU1300_PIN_PSC3D0,
+ AU1300_PIN_PSC3D1,
+ /* PCMCIA interface: 62-70 */
+ AU1300_PIN_PCE2, AU1300_PIN_PCE1, AU1300_PIN_PIOS16,
+ AU1300_PIN_PIOR, AU1300_PIN_PWE, AU1300_PIN_PWAIT,
+ AU1300_PIN_PREG, AU1300_PIN_POE, AU1300_PIN_PIOW,
+ /* camera interface H/V sync inputs: 71-72 */
+ AU1300_PIN_CIMLS, AU1300_PIN_CIMFS,
+ /* PSC2/3 clocks: 73-74 */
+ AU1300_PIN_PSC2CLK, AU1300_PIN_PSC3CLK,
+};
+
+/* GPIC (Au1300) pin management: arch/mips/alchemy/common/gpioint.c */
+extern void au1300_pinfunc_to_gpio(enum au1300_multifunc_pins gpio);
+extern void au1300_pinfunc_to_dev(enum au1300_multifunc_pins gpio);
+extern void au1300_set_irq_priority(unsigned int irq, int p);
+extern void au1300_set_dbdma_gpio(int dchan, unsigned int gpio);
+
+/* Au1300 allows to disconnect certain blocks from internal power supply */
+enum au1300_vss_block {
+ AU1300_VSS_MPE = 0,
+ AU1300_VSS_BSA,
+ AU1300_VSS_GPE,
+ AU1300_VSS_MGP,
+};
+
+extern void au1300_vss_block_control(int block, int enable);
+
+enum soc_au1000_ints {
+ AU1000_FIRST_INT = AU1000_INTC0_INT_BASE,
+ AU1000_UART0_INT = AU1000_FIRST_INT,
+ AU1000_UART1_INT,
+ AU1000_UART2_INT,
+ AU1000_UART3_INT,
+ AU1000_SSI0_INT,
+ AU1000_SSI1_INT,
+ AU1000_DMA_INT_BASE,
+
+ AU1000_TOY_INT = AU1000_FIRST_INT + 14,
+ AU1000_TOY_MATCH0_INT,
+ AU1000_TOY_MATCH1_INT,
+ AU1000_TOY_MATCH2_INT,
+ AU1000_RTC_INT,
+ AU1000_RTC_MATCH0_INT,
+ AU1000_RTC_MATCH1_INT,
+ AU1000_RTC_MATCH2_INT,
+ AU1000_IRDA_TX_INT,
+ AU1000_IRDA_RX_INT,
+ AU1000_USB_DEV_REQ_INT,
+ AU1000_USB_DEV_SUS_INT,
+ AU1000_USB_HOST_INT,
+ AU1000_ACSYNC_INT,
+ AU1000_MAC0_DMA_INT,
+ AU1000_MAC1_DMA_INT,
+ AU1000_I2S_UO_INT,
+ AU1000_AC97C_INT,
+ AU1000_GPIO0_INT,
+ AU1000_GPIO1_INT,
+ AU1000_GPIO2_INT,
+ AU1000_GPIO3_INT,
+ AU1000_GPIO4_INT,
+ AU1000_GPIO5_INT,
+ AU1000_GPIO6_INT,
+ AU1000_GPIO7_INT,
+ AU1000_GPIO8_INT,
+ AU1000_GPIO9_INT,
+ AU1000_GPIO10_INT,
+ AU1000_GPIO11_INT,
+ AU1000_GPIO12_INT,
+ AU1000_GPIO13_INT,
+ AU1000_GPIO14_INT,
+ AU1000_GPIO15_INT,
+ AU1000_GPIO16_INT,
+ AU1000_GPIO17_INT,
+ AU1000_GPIO18_INT,
+ AU1000_GPIO19_INT,
+ AU1000_GPIO20_INT,
+ AU1000_GPIO21_INT,
+ AU1000_GPIO22_INT,
+ AU1000_GPIO23_INT,
+ AU1000_GPIO24_INT,
+ AU1000_GPIO25_INT,
+ AU1000_GPIO26_INT,
+ AU1000_GPIO27_INT,
+ AU1000_GPIO28_INT,
+ AU1000_GPIO29_INT,
+ AU1000_GPIO30_INT,
+ AU1000_GPIO31_INT,
+};
+
+enum soc_au1100_ints {
+ AU1100_FIRST_INT = AU1000_INTC0_INT_BASE,
+ AU1100_UART0_INT = AU1100_FIRST_INT,
+ AU1100_UART1_INT,
+ AU1100_SD_INT,
+ AU1100_UART3_INT,
+ AU1100_SSI0_INT,
+ AU1100_SSI1_INT,
+ AU1100_DMA_INT_BASE,
+
+ AU1100_TOY_INT = AU1100_FIRST_INT + 14,
+ AU1100_TOY_MATCH0_INT,
+ AU1100_TOY_MATCH1_INT,
+ AU1100_TOY_MATCH2_INT,
+ AU1100_RTC_INT,
+ AU1100_RTC_MATCH0_INT,
+ AU1100_RTC_MATCH1_INT,
+ AU1100_RTC_MATCH2_INT,
+ AU1100_IRDA_TX_INT,
+ AU1100_IRDA_RX_INT,
+ AU1100_USB_DEV_REQ_INT,
+ AU1100_USB_DEV_SUS_INT,
+ AU1100_USB_HOST_INT,
+ AU1100_ACSYNC_INT,
+ AU1100_MAC0_DMA_INT,
+ AU1100_GPIO208_215_INT,
+ AU1100_LCD_INT,
+ AU1100_AC97C_INT,
+ AU1100_GPIO0_INT,
+ AU1100_GPIO1_INT,
+ AU1100_GPIO2_INT,
+ AU1100_GPIO3_INT,
+ AU1100_GPIO4_INT,
+ AU1100_GPIO5_INT,
+ AU1100_GPIO6_INT,
+ AU1100_GPIO7_INT,
+ AU1100_GPIO8_INT,
+ AU1100_GPIO9_INT,
+ AU1100_GPIO10_INT,
+ AU1100_GPIO11_INT,
+ AU1100_GPIO12_INT,
+ AU1100_GPIO13_INT,
+ AU1100_GPIO14_INT,
+ AU1100_GPIO15_INT,
+ AU1100_GPIO16_INT,
+ AU1100_GPIO17_INT,
+ AU1100_GPIO18_INT,
+ AU1100_GPIO19_INT,
+ AU1100_GPIO20_INT,
+ AU1100_GPIO21_INT,
+ AU1100_GPIO22_INT,
+ AU1100_GPIO23_INT,
+ AU1100_GPIO24_INT,
+ AU1100_GPIO25_INT,
+ AU1100_GPIO26_INT,
+ AU1100_GPIO27_INT,
+ AU1100_GPIO28_INT,
+ AU1100_GPIO29_INT,
+ AU1100_GPIO30_INT,
+ AU1100_GPIO31_INT,
+};
+
+enum soc_au1500_ints {
+ AU1500_FIRST_INT = AU1000_INTC0_INT_BASE,
+ AU1500_UART0_INT = AU1500_FIRST_INT,
+ AU1500_PCI_INTA,
+ AU1500_PCI_INTB,
+ AU1500_UART3_INT,
+ AU1500_PCI_INTC,
+ AU1500_PCI_INTD,
+ AU1500_DMA_INT_BASE,
+
+ AU1500_TOY_INT = AU1500_FIRST_INT + 14,
+ AU1500_TOY_MATCH0_INT,
+ AU1500_TOY_MATCH1_INT,
+ AU1500_TOY_MATCH2_INT,
+ AU1500_RTC_INT,
+ AU1500_RTC_MATCH0_INT,
+ AU1500_RTC_MATCH1_INT,
+ AU1500_RTC_MATCH2_INT,
+ AU1500_PCI_ERR_INT,
+ AU1500_RESERVED_INT,
+ AU1500_USB_DEV_REQ_INT,
+ AU1500_USB_DEV_SUS_INT,
+ AU1500_USB_HOST_INT,
+ AU1500_ACSYNC_INT,
+ AU1500_MAC0_DMA_INT,
+ AU1500_MAC1_DMA_INT,
+ AU1500_AC97C_INT = AU1500_FIRST_INT + 31,
+ AU1500_GPIO0_INT,
+ AU1500_GPIO1_INT,
+ AU1500_GPIO2_INT,
+ AU1500_GPIO3_INT,
+ AU1500_GPIO4_INT,
+ AU1500_GPIO5_INT,
+ AU1500_GPIO6_INT,
+ AU1500_GPIO7_INT,
+ AU1500_GPIO8_INT,
+ AU1500_GPIO9_INT,
+ AU1500_GPIO10_INT,
+ AU1500_GPIO11_INT,
+ AU1500_GPIO12_INT,
+ AU1500_GPIO13_INT,
+ AU1500_GPIO14_INT,
+ AU1500_GPIO15_INT,
+ AU1500_GPIO200_INT,
+ AU1500_GPIO201_INT,
+ AU1500_GPIO202_INT,
+ AU1500_GPIO203_INT,
+ AU1500_GPIO20_INT,
+ AU1500_GPIO204_INT,
+ AU1500_GPIO205_INT,
+ AU1500_GPIO23_INT,
+ AU1500_GPIO24_INT,
+ AU1500_GPIO25_INT,
+ AU1500_GPIO26_INT,
+ AU1500_GPIO27_INT,
+ AU1500_GPIO28_INT,
+ AU1500_GPIO206_INT,
+ AU1500_GPIO207_INT,
+ AU1500_GPIO208_215_INT,
+};
+
+enum soc_au1550_ints {
+ AU1550_FIRST_INT = AU1000_INTC0_INT_BASE,
+ AU1550_UART0_INT = AU1550_FIRST_INT,
+ AU1550_PCI_INTA,
+ AU1550_PCI_INTB,
+ AU1550_DDMA_INT,
+ AU1550_CRYPTO_INT,
+ AU1550_PCI_INTC,
+ AU1550_PCI_INTD,
+ AU1550_PCI_RST_INT,
+ AU1550_UART1_INT,
+ AU1550_UART3_INT,
+ AU1550_PSC0_INT,
+ AU1550_PSC1_INT,
+ AU1550_PSC2_INT,
+ AU1550_PSC3_INT,
+ AU1550_TOY_INT,
+ AU1550_TOY_MATCH0_INT,
+ AU1550_TOY_MATCH1_INT,
+ AU1550_TOY_MATCH2_INT,
+ AU1550_RTC_INT,
+ AU1550_RTC_MATCH0_INT,
+ AU1550_RTC_MATCH1_INT,
+ AU1550_RTC_MATCH2_INT,
+
+ AU1550_NAND_INT = AU1550_FIRST_INT + 23,
+ AU1550_USB_DEV_REQ_INT,
+ AU1550_USB_DEV_SUS_INT,
+ AU1550_USB_HOST_INT,
+ AU1550_MAC0_DMA_INT,
+ AU1550_MAC1_DMA_INT,
+ AU1550_GPIO0_INT = AU1550_FIRST_INT + 32,
+ AU1550_GPIO1_INT,
+ AU1550_GPIO2_INT,
+ AU1550_GPIO3_INT,
+ AU1550_GPIO4_INT,
+ AU1550_GPIO5_INT,
+ AU1550_GPIO6_INT,
+ AU1550_GPIO7_INT,
+ AU1550_GPIO8_INT,
+ AU1550_GPIO9_INT,
+ AU1550_GPIO10_INT,
+ AU1550_GPIO11_INT,
+ AU1550_GPIO12_INT,
+ AU1550_GPIO13_INT,
+ AU1550_GPIO14_INT,
+ AU1550_GPIO15_INT,
+ AU1550_GPIO200_INT,
+ AU1550_GPIO201_205_INT, /* Logical or of GPIO201:205 */
+ AU1550_GPIO16_INT,
+ AU1550_GPIO17_INT,
+ AU1550_GPIO20_INT,
+ AU1550_GPIO21_INT,
+ AU1550_GPIO22_INT,
+ AU1550_GPIO23_INT,
+ AU1550_GPIO24_INT,
+ AU1550_GPIO25_INT,
+ AU1550_GPIO26_INT,
+ AU1550_GPIO27_INT,
+ AU1550_GPIO28_INT,
+ AU1550_GPIO206_INT,
+ AU1550_GPIO207_INT,
+ AU1550_GPIO208_215_INT, /* Logical or of GPIO208:215 */
+};
+
+enum soc_au1200_ints {
+ AU1200_FIRST_INT = AU1000_INTC0_INT_BASE,
+ AU1200_UART0_INT = AU1200_FIRST_INT,
+ AU1200_SWT_INT,
+ AU1200_SD_INT,
+ AU1200_DDMA_INT,
+ AU1200_MAE_BE_INT,
+ AU1200_GPIO200_INT,
+ AU1200_GPIO201_INT,
+ AU1200_GPIO202_INT,
+ AU1200_UART1_INT,
+ AU1200_MAE_FE_INT,
+ AU1200_PSC0_INT,
+ AU1200_PSC1_INT,
+ AU1200_AES_INT,
+ AU1200_CAMERA_INT,
+ AU1200_TOY_INT,
+ AU1200_TOY_MATCH0_INT,
+ AU1200_TOY_MATCH1_INT,
+ AU1200_TOY_MATCH2_INT,
+ AU1200_RTC_INT,
+ AU1200_RTC_MATCH0_INT,
+ AU1200_RTC_MATCH1_INT,
+ AU1200_RTC_MATCH2_INT,
+ AU1200_GPIO203_INT,
+ AU1200_NAND_INT,
+ AU1200_GPIO204_INT,
+ AU1200_GPIO205_INT,
+ AU1200_GPIO206_INT,
+ AU1200_GPIO207_INT,
+ AU1200_GPIO208_215_INT, /* Logical OR of 208:215 */
+ AU1200_USB_INT,
+ AU1200_LCD_INT,
+ AU1200_MAE_BOTH_INT,
+ AU1200_GPIO0_INT,
+ AU1200_GPIO1_INT,
+ AU1200_GPIO2_INT,
+ AU1200_GPIO3_INT,
+ AU1200_GPIO4_INT,
+ AU1200_GPIO5_INT,
+ AU1200_GPIO6_INT,
+ AU1200_GPIO7_INT,
+ AU1200_GPIO8_INT,
+ AU1200_GPIO9_INT,
+ AU1200_GPIO10_INT,
+ AU1200_GPIO11_INT,
+ AU1200_GPIO12_INT,
+ AU1200_GPIO13_INT,
+ AU1200_GPIO14_INT,
+ AU1200_GPIO15_INT,
+ AU1200_GPIO16_INT,
+ AU1200_GPIO17_INT,
+ AU1200_GPIO18_INT,
+ AU1200_GPIO19_INT,
+ AU1200_GPIO20_INT,
+ AU1200_GPIO21_INT,
+ AU1200_GPIO22_INT,
+ AU1200_GPIO23_INT,
+ AU1200_GPIO24_INT,
+ AU1200_GPIO25_INT,
+ AU1200_GPIO26_INT,
+ AU1200_GPIO27_INT,
+ AU1200_GPIO28_INT,
+ AU1200_GPIO29_INT,
+ AU1200_GPIO30_INT,
+ AU1200_GPIO31_INT,
+};
+
+#endif /* !defined (_LANGUAGE_ASSEMBLY) */
+
+#endif
diff --git a/arch/mips/include/asm/mach-au1x00/au1000_dma.h b/arch/mips/include/asm/mach-au1x00/au1000_dma.h
new file mode 100644
index 000000000..0a0cd4270
--- /dev/null
+++ b/arch/mips/include/asm/mach-au1x00/au1000_dma.h
@@ -0,0 +1,453 @@
+/*
+ * BRIEF MODULE DESCRIPTION
+ * Defines for using and allocating DMA channels on the Alchemy
+ * Au1x00 MIPS processors.
+ *
+ * Copyright 2000, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+#ifndef __ASM_AU1000_DMA_H
+#define __ASM_AU1000_DMA_H
+
+#include <linux/io.h> /* need byte IO */
+#include <linux/spinlock.h> /* And spinlocks */
+#include <linux/delay.h>
+
+#define NUM_AU1000_DMA_CHANNELS 8
+
+/* DMA Channel Register Offsets */
+#define DMA_MODE_SET 0x00000000
+#define DMA_MODE_READ DMA_MODE_SET
+#define DMA_MODE_CLEAR 0x00000004
+/* DMA Mode register bits follow */
+#define DMA_DAH_MASK (0x0f << 20)
+#define DMA_DID_BIT 16
+#define DMA_DID_MASK (0x0f << DMA_DID_BIT)
+#define DMA_DS (1 << 15)
+#define DMA_BE (1 << 13)
+#define DMA_DR (1 << 12)
+#define DMA_TS8 (1 << 11)
+#define DMA_DW_BIT 9
+#define DMA_DW_MASK (0x03 << DMA_DW_BIT)
+#define DMA_DW8 (0 << DMA_DW_BIT)
+#define DMA_DW16 (1 << DMA_DW_BIT)
+#define DMA_DW32 (2 << DMA_DW_BIT)
+#define DMA_NC (1 << 8)
+#define DMA_IE (1 << 7)
+#define DMA_HALT (1 << 6)
+#define DMA_GO (1 << 5)
+#define DMA_AB (1 << 4)
+#define DMA_D1 (1 << 3)
+#define DMA_BE1 (1 << 2)
+#define DMA_D0 (1 << 1)
+#define DMA_BE0 (1 << 0)
+
+#define DMA_PERIPHERAL_ADDR 0x00000008
+#define DMA_BUFFER0_START 0x0000000C
+#define DMA_BUFFER1_START 0x00000014
+#define DMA_BUFFER0_COUNT 0x00000010
+#define DMA_BUFFER1_COUNT 0x00000018
+#define DMA_BAH_BIT 16
+#define DMA_BAH_MASK (0x0f << DMA_BAH_BIT)
+#define DMA_COUNT_BIT 0
+#define DMA_COUNT_MASK (0xffff << DMA_COUNT_BIT)
+
+/* DMA Device IDs follow */
+enum {
+ DMA_ID_UART0_TX = 0,
+ DMA_ID_UART0_RX,
+ DMA_ID_GP04,
+ DMA_ID_GP05,
+ DMA_ID_AC97C_TX,
+ DMA_ID_AC97C_RX,
+ DMA_ID_UART3_TX,
+ DMA_ID_UART3_RX,
+ DMA_ID_USBDEV_EP0_RX,
+ DMA_ID_USBDEV_EP0_TX,
+ DMA_ID_USBDEV_EP2_TX,
+ DMA_ID_USBDEV_EP3_TX,
+ DMA_ID_USBDEV_EP4_RX,
+ DMA_ID_USBDEV_EP5_RX,
+ DMA_ID_I2S_TX,
+ DMA_ID_I2S_RX,
+ DMA_NUM_DEV
+};
+
+/* DMA Device ID's for 2nd bank (AU1100) follow */
+enum {
+ DMA_ID_SD0_TX = 0,
+ DMA_ID_SD0_RX,
+ DMA_ID_SD1_TX,
+ DMA_ID_SD1_RX,
+ DMA_NUM_DEV_BANK2
+};
+
+struct dma_chan {
+ int dev_id; /* this channel is allocated if >= 0, */
+ /* free otherwise */
+ void __iomem *io;
+ const char *dev_str;
+ int irq;
+ void *irq_dev;
+ unsigned int fifo_addr;
+ unsigned int mode;
+};
+
+/* These are in arch/mips/au1000/common/dma.c */
+extern struct dma_chan au1000_dma_table[];
+extern int request_au1000_dma(int dev_id,
+ const char *dev_str,
+ irq_handler_t irqhandler,
+ unsigned long irqflags,
+ void *irq_dev_id);
+extern void free_au1000_dma(unsigned int dmanr);
+extern int au1000_dma_read_proc(char *buf, char **start, off_t fpos,
+ int length, int *eof, void *data);
+extern void dump_au1000_dma_channel(unsigned int dmanr);
+extern spinlock_t au1000_dma_spin_lock;
+
+static inline struct dma_chan *get_dma_chan(unsigned int dmanr)
+{
+ if (dmanr >= NUM_AU1000_DMA_CHANNELS ||
+ au1000_dma_table[dmanr].dev_id < 0)
+ return NULL;
+ return &au1000_dma_table[dmanr];
+}
+
+static inline unsigned long claim_dma_lock(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&au1000_dma_spin_lock, flags);
+ return flags;
+}
+
+static inline void release_dma_lock(unsigned long flags)
+{
+ spin_unlock_irqrestore(&au1000_dma_spin_lock, flags);
+}
+
+/*
+ * Set the DMA buffer enable bits in the mode register.
+ */
+static inline void enable_dma_buffer0(unsigned int dmanr)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return;
+ __raw_writel(DMA_BE0, chan->io + DMA_MODE_SET);
+}
+
+static inline void enable_dma_buffer1(unsigned int dmanr)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return;
+ __raw_writel(DMA_BE1, chan->io + DMA_MODE_SET);
+}
+static inline void enable_dma_buffers(unsigned int dmanr)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return;
+ __raw_writel(DMA_BE0 | DMA_BE1, chan->io + DMA_MODE_SET);
+}
+
+static inline void start_dma(unsigned int dmanr)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return;
+ __raw_writel(DMA_GO, chan->io + DMA_MODE_SET);
+}
+
+#define DMA_HALT_POLL 0x5000
+
+static inline void halt_dma(unsigned int dmanr)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+ int i;
+
+ if (!chan)
+ return;
+ __raw_writel(DMA_GO, chan->io + DMA_MODE_CLEAR);
+
+ /* Poll the halt bit */
+ for (i = 0; i < DMA_HALT_POLL; i++)
+ if (__raw_readl(chan->io + DMA_MODE_READ) & DMA_HALT)
+ break;
+ if (i == DMA_HALT_POLL)
+ printk(KERN_INFO "halt_dma: HALT poll expired!\n");
+}
+
+static inline void disable_dma(unsigned int dmanr)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return;
+
+ halt_dma(dmanr);
+
+ /* Now we can disable the buffers */
+ __raw_writel(~DMA_GO, chan->io + DMA_MODE_CLEAR);
+}
+
+static inline int dma_halted(unsigned int dmanr)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return 1;
+ return (__raw_readl(chan->io + DMA_MODE_READ) & DMA_HALT) ? 1 : 0;
+}
+
+/* Initialize a DMA channel. */
+static inline void init_dma(unsigned int dmanr)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+ u32 mode;
+
+ if (!chan)
+ return;
+
+ disable_dma(dmanr);
+
+ /* Set device FIFO address */
+ __raw_writel(CPHYSADDR(chan->fifo_addr), chan->io + DMA_PERIPHERAL_ADDR);
+
+ mode = chan->mode | (chan->dev_id << DMA_DID_BIT);
+ if (chan->irq)
+ mode |= DMA_IE;
+
+ __raw_writel(~mode, chan->io + DMA_MODE_CLEAR);
+ __raw_writel(mode, chan->io + DMA_MODE_SET);
+}
+
+/*
+ * Set mode for a specific DMA channel
+ */
+static inline void set_dma_mode(unsigned int dmanr, unsigned int mode)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return;
+ /*
+ * set_dma_mode is only allowed to change endianess, direction,
+ * transfer size, device FIFO width, and coherency settings.
+ * Make sure anything else is masked off.
+ */
+ mode &= (DMA_BE | DMA_DR | DMA_TS8 | DMA_DW_MASK | DMA_NC);
+ chan->mode &= ~(DMA_BE | DMA_DR | DMA_TS8 | DMA_DW_MASK | DMA_NC);
+ chan->mode |= mode;
+}
+
+static inline unsigned int get_dma_mode(unsigned int dmanr)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return 0;
+ return chan->mode;
+}
+
+static inline int get_dma_active_buffer(unsigned int dmanr)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return -1;
+ return (__raw_readl(chan->io + DMA_MODE_READ) & DMA_AB) ? 1 : 0;
+}
+
+/*
+ * Set the device FIFO address for a specific DMA channel - only
+ * applicable to GPO4 and GPO5. All the other devices have fixed
+ * FIFO addresses.
+ */
+static inline void set_dma_fifo_addr(unsigned int dmanr, unsigned int a)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return;
+
+ if (chan->mode & DMA_DS) /* second bank of device IDs */
+ return;
+
+ if (chan->dev_id != DMA_ID_GP04 && chan->dev_id != DMA_ID_GP05)
+ return;
+
+ __raw_writel(CPHYSADDR(a), chan->io + DMA_PERIPHERAL_ADDR);
+}
+
+/*
+ * Clear the DMA buffer done bits in the mode register.
+ */
+static inline void clear_dma_done0(unsigned int dmanr)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return;
+ __raw_writel(DMA_D0, chan->io + DMA_MODE_CLEAR);
+}
+
+static inline void clear_dma_done1(unsigned int dmanr)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return;
+ __raw_writel(DMA_D1, chan->io + DMA_MODE_CLEAR);
+}
+
+/*
+ * This does nothing - not applicable to Au1000 DMA.
+ */
+static inline void set_dma_page(unsigned int dmanr, char pagenr)
+{
+}
+
+/*
+ * Set Buffer 0 transfer address for specific DMA channel.
+ */
+static inline void set_dma_addr0(unsigned int dmanr, unsigned int a)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return;
+ __raw_writel(a, chan->io + DMA_BUFFER0_START);
+}
+
+/*
+ * Set Buffer 1 transfer address for specific DMA channel.
+ */
+static inline void set_dma_addr1(unsigned int dmanr, unsigned int a)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return;
+ __raw_writel(a, chan->io + DMA_BUFFER1_START);
+}
+
+
+/*
+ * Set Buffer 0 transfer size (max 64k) for a specific DMA channel.
+ */
+static inline void set_dma_count0(unsigned int dmanr, unsigned int count)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return;
+ count &= DMA_COUNT_MASK;
+ __raw_writel(count, chan->io + DMA_BUFFER0_COUNT);
+}
+
+/*
+ * Set Buffer 1 transfer size (max 64k) for a specific DMA channel.
+ */
+static inline void set_dma_count1(unsigned int dmanr, unsigned int count)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return;
+ count &= DMA_COUNT_MASK;
+ __raw_writel(count, chan->io + DMA_BUFFER1_COUNT);
+}
+
+/*
+ * Set both buffer transfer sizes (max 64k) for a specific DMA channel.
+ */
+static inline void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return;
+ count &= DMA_COUNT_MASK;
+ __raw_writel(count, chan->io + DMA_BUFFER0_COUNT);
+ __raw_writel(count, chan->io + DMA_BUFFER1_COUNT);
+}
+
+/*
+ * Returns which buffer has its done bit set in the mode register.
+ * Returns -1 if neither or both done bits set.
+ */
+static inline unsigned int get_dma_buffer_done(unsigned int dmanr)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return 0;
+ return __raw_readl(chan->io + DMA_MODE_READ) & (DMA_D0 | DMA_D1);
+}
+
+
+/*
+ * Returns the DMA channel's Buffer Done IRQ number.
+ */
+static inline int get_dma_done_irq(unsigned int dmanr)
+{
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return -1;
+ return chan->irq;
+}
+
+/*
+ * Get DMA residue count. Returns the number of _bytes_ left to transfer.
+ */
+static inline int get_dma_residue(unsigned int dmanr)
+{
+ int curBufCntReg, count;
+ struct dma_chan *chan = get_dma_chan(dmanr);
+
+ if (!chan)
+ return 0;
+
+ curBufCntReg = (__raw_readl(chan->io + DMA_MODE_READ) & DMA_AB) ?
+ DMA_BUFFER1_COUNT : DMA_BUFFER0_COUNT;
+
+ count = __raw_readl(chan->io + curBufCntReg) & DMA_COUNT_MASK;
+
+ if ((chan->mode & DMA_DW_MASK) == DMA_DW16)
+ count <<= 1;
+ else if ((chan->mode & DMA_DW_MASK) == DMA_DW32)
+ count <<= 2;
+
+ return count;
+}
+
+#endif /* __ASM_AU1000_DMA_H */
diff --git a/arch/mips/include/asm/mach-au1x00/au1100_mmc.h b/arch/mips/include/asm/mach-au1x00/au1100_mmc.h
new file mode 100644
index 000000000..cadab91ce
--- /dev/null
+++ b/arch/mips/include/asm/mach-au1x00/au1100_mmc.h
@@ -0,0 +1,210 @@
+/*
+ * BRIEF MODULE DESCRIPTION
+ * Defines for using the MMC/SD controllers on the
+ * Alchemy Au1100 mips processor.
+ *
+ * Copyright (c) 2003 Embedded Edge, LLC.
+ * Author: Embedded Edge, LLC.
+ * dan@embeddededge.com or tim@embeddededge.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+/*
+ * AU1100 MMC/SD definitions.
+ *
+ * From "AMD Alchemy Solutions Au1100 Processor Data Book - Preliminary"
+ * June, 2003
+ */
+
+#ifndef __ASM_AU1100_MMC_H
+#define __ASM_AU1100_MMC_H
+
+#include <linux/leds.h>
+
+struct au1xmmc_platform_data {
+ int(*cd_setup)(void *mmc_host, int on);
+ int(*card_inserted)(void *mmc_host);
+ int(*card_readonly)(void *mmc_host);
+ void(*set_power)(void *mmc_host, int state);
+ struct led_classdev *led;
+ unsigned long mask_host_caps;
+};
+
+#define SD0_BASE 0xB0600000
+#define SD1_BASE 0xB0680000
+
+
+/*
+ * Register offsets.
+ */
+#define SD_TXPORT (0x0000)
+#define SD_RXPORT (0x0004)
+#define SD_CONFIG (0x0008)
+#define SD_ENABLE (0x000C)
+#define SD_CONFIG2 (0x0010)
+#define SD_BLKSIZE (0x0014)
+#define SD_STATUS (0x0018)
+#define SD_DEBUG (0x001C)
+#define SD_CMD (0x0020)
+#define SD_CMDARG (0x0024)
+#define SD_RESP3 (0x0028)
+#define SD_RESP2 (0x002C)
+#define SD_RESP1 (0x0030)
+#define SD_RESP0 (0x0034)
+#define SD_TIMEOUT (0x0038)
+
+
+/*
+ * SD_TXPORT bit definitions.
+ */
+#define SD_TXPORT_TXD (0x000000ff)
+
+
+/*
+ * SD_RXPORT bit definitions.
+ */
+#define SD_RXPORT_RXD (0x000000ff)
+
+
+/*
+ * SD_CONFIG bit definitions.
+ */
+#define SD_CONFIG_DIV (0x000001ff)
+#define SD_CONFIG_DE (0x00000200)
+#define SD_CONFIG_NE (0x00000400)
+#define SD_CONFIG_TU (0x00000800)
+#define SD_CONFIG_TO (0x00001000)
+#define SD_CONFIG_RU (0x00002000)
+#define SD_CONFIG_RO (0x00004000)
+#define SD_CONFIG_I (0x00008000)
+#define SD_CONFIG_CR (0x00010000)
+#define SD_CONFIG_RAT (0x00020000)
+#define SD_CONFIG_DD (0x00040000)
+#define SD_CONFIG_DT (0x00080000)
+#define SD_CONFIG_SC (0x00100000)
+#define SD_CONFIG_RC (0x00200000)
+#define SD_CONFIG_WC (0x00400000)
+#define SD_CONFIG_xxx (0x00800000)
+#define SD_CONFIG_TH (0x01000000)
+#define SD_CONFIG_TE (0x02000000)
+#define SD_CONFIG_TA (0x04000000)
+#define SD_CONFIG_RH (0x08000000)
+#define SD_CONFIG_RA (0x10000000)
+#define SD_CONFIG_RF (0x20000000)
+#define SD_CONFIG_CD (0x40000000)
+#define SD_CONFIG_SI (0x80000000)
+
+
+/*
+ * SD_ENABLE bit definitions.
+ */
+#define SD_ENABLE_CE (0x00000001)
+#define SD_ENABLE_R (0x00000002)
+
+
+/*
+ * SD_CONFIG2 bit definitions.
+ */
+#define SD_CONFIG2_EN (0x00000001)
+#define SD_CONFIG2_FF (0x00000002)
+#define SD_CONFIG2_xx1 (0x00000004)
+#define SD_CONFIG2_DF (0x00000008)
+#define SD_CONFIG2_DC (0x00000010)
+#define SD_CONFIG2_xx2 (0x000000e0)
+#define SD_CONFIG2_BB (0x00000080)
+#define SD_CONFIG2_WB (0x00000100)
+#define SD_CONFIG2_RW (0x00000200)
+#define SD_CONFIG2_DP (0x00000400)
+
+
+/*
+ * SD_BLKSIZE bit definitions.
+ */
+#define SD_BLKSIZE_BS (0x000007ff)
+#define SD_BLKSIZE_BS_SHIFT (0)
+#define SD_BLKSIZE_BC (0x01ff0000)
+#define SD_BLKSIZE_BC_SHIFT (16)
+
+
+/*
+ * SD_STATUS bit definitions.
+ */
+#define SD_STATUS_DCRCW (0x00000007)
+#define SD_STATUS_xx1 (0x00000008)
+#define SD_STATUS_CB (0x00000010)
+#define SD_STATUS_DB (0x00000020)
+#define SD_STATUS_CF (0x00000040)
+#define SD_STATUS_D3 (0x00000080)
+#define SD_STATUS_xx2 (0x00000300)
+#define SD_STATUS_NE (0x00000400)
+#define SD_STATUS_TU (0x00000800)
+#define SD_STATUS_TO (0x00001000)
+#define SD_STATUS_RU (0x00002000)
+#define SD_STATUS_RO (0x00004000)
+#define SD_STATUS_I (0x00008000)
+#define SD_STATUS_CR (0x00010000)
+#define SD_STATUS_RAT (0x00020000)
+#define SD_STATUS_DD (0x00040000)
+#define SD_STATUS_DT (0x00080000)
+#define SD_STATUS_SC (0x00100000)
+#define SD_STATUS_RC (0x00200000)
+#define SD_STATUS_WC (0x00400000)
+#define SD_STATUS_xx3 (0x00800000)
+#define SD_STATUS_TH (0x01000000)
+#define SD_STATUS_TE (0x02000000)
+#define SD_STATUS_TA (0x04000000)
+#define SD_STATUS_RH (0x08000000)
+#define SD_STATUS_RA (0x10000000)
+#define SD_STATUS_RF (0x20000000)
+#define SD_STATUS_CD (0x40000000)
+#define SD_STATUS_SI (0x80000000)
+
+
+/*
+ * SD_CMD bit definitions.
+ */
+#define SD_CMD_GO (0x00000001)
+#define SD_CMD_RY (0x00000002)
+#define SD_CMD_xx1 (0x0000000c)
+#define SD_CMD_CT_MASK (0x000000f0)
+#define SD_CMD_CT_0 (0x00000000)
+#define SD_CMD_CT_1 (0x00000010)
+#define SD_CMD_CT_2 (0x00000020)
+#define SD_CMD_CT_3 (0x00000030)
+#define SD_CMD_CT_4 (0x00000040)
+#define SD_CMD_CT_5 (0x00000050)
+#define SD_CMD_CT_6 (0x00000060)
+#define SD_CMD_CT_7 (0x00000070)
+#define SD_CMD_CI (0x0000ff00)
+#define SD_CMD_CI_SHIFT (8)
+#define SD_CMD_RT_MASK (0x00ff0000)
+#define SD_CMD_RT_0 (0x00000000)
+#define SD_CMD_RT_1 (0x00010000)
+#define SD_CMD_RT_2 (0x00020000)
+#define SD_CMD_RT_3 (0x00030000)
+#define SD_CMD_RT_4 (0x00040000)
+#define SD_CMD_RT_5 (0x00050000)
+#define SD_CMD_RT_6 (0x00060000)
+#define SD_CMD_RT_1B (0x00810000)
+
+
+#endif /* __ASM_AU1100_MMC_H */
diff --git a/arch/mips/include/asm/mach-au1x00/au1200fb.h b/arch/mips/include/asm/mach-au1x00/au1200fb.h
new file mode 100644
index 000000000..e0e98f06c
--- /dev/null
+++ b/arch/mips/include/asm/mach-au1x00/au1200fb.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * platform data for au1200fb driver.
+ */
+
+#ifndef _AU1200FB_PLAT_H_
+#define _AU1200FB_PLAT_H_
+
+struct au1200fb_platdata {
+ int (*panel_index)(void);
+ int (*panel_init)(void);
+ int (*panel_shutdown)(void);
+};
+
+#endif
diff --git a/arch/mips/include/asm/mach-au1x00/au1550_spi.h b/arch/mips/include/asm/mach-au1x00/au1550_spi.h
new file mode 100644
index 000000000..fe6ca4606
--- /dev/null
+++ b/arch/mips/include/asm/mach-au1x00/au1550_spi.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * au1550_spi.h - Au1550 PSC SPI controller driver - platform data structure
+ */
+
+#ifndef _AU1550_SPI_H_
+#define _AU1550_SPI_H_
+
+struct au1550_spi_info {
+ u32 mainclk_hz; /* main input clock frequency of PSC */
+ u16 num_chipselect; /* number of chipselects supported */
+ void (*activate_cs)(struct au1550_spi_info *spi, int cs, int polarity);
+ void (*deactivate_cs)(struct au1550_spi_info *spi, int cs, int polarity);
+};
+
+#endif
diff --git a/arch/mips/include/asm/mach-au1x00/au1550nd.h b/arch/mips/include/asm/mach-au1x00/au1550nd.h
new file mode 100644
index 000000000..d26dc1dad
--- /dev/null
+++ b/arch/mips/include/asm/mach-au1x00/au1550nd.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * platform data for the Au1550 NAND driver
+ */
+
+#ifndef _AU1550ND_H_
+#define _AU1550ND_H_
+
+#include <linux/mtd/partitions.h>
+
+struct au1550nd_platdata {
+ struct mtd_partition *parts;
+ int num_parts;
+ int devwidth; /* 0 = 8bit device, 1 = 16bit device */
+};
+
+#endif
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
new file mode 100644
index 000000000..456ddba15
--- /dev/null
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
@@ -0,0 +1,388 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ * Include file for Alchemy Semiconductor's Au1550 Descriptor
+ * Based DMA Controller.
+ *
+ * Copyright 2004 Embedded Edge, LLC
+ * dan@embeddededge.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Specifics for the Au1xxx Descriptor-Based DMA Controller,
+ * first seen in the AU1550 part.
+ */
+#ifndef _AU1000_DBDMA_H_
+#define _AU1000_DBDMA_H_
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+typedef volatile struct dbdma_global {
+ u32 ddma_config;
+ u32 ddma_intstat;
+ u32 ddma_throttle;
+ u32 ddma_inten;
+} dbdma_global_t;
+
+/* General Configuration. */
+#define DDMA_CONFIG_AF (1 << 2)
+#define DDMA_CONFIG_AH (1 << 1)
+#define DDMA_CONFIG_AL (1 << 0)
+
+#define DDMA_THROTTLE_EN (1 << 31)
+
+/* The structure of a DMA Channel. */
+typedef volatile struct au1xxx_dma_channel {
+ u32 ddma_cfg; /* See below */
+ u32 ddma_desptr; /* 32-byte aligned pointer to descriptor */
+ u32 ddma_statptr; /* word aligned pointer to status word */
+ u32 ddma_dbell; /* A write activates channel operation */
+ u32 ddma_irq; /* If bit 0 set, interrupt pending */
+ u32 ddma_stat; /* See below */
+ u32 ddma_bytecnt; /* Byte count, valid only when chan idle */
+ /* Remainder, up to the 256 byte boundary, is reserved. */
+} au1x_dma_chan_t;
+
+#define DDMA_CFG_SED (1 << 9) /* source DMA level/edge detect */
+#define DDMA_CFG_SP (1 << 8) /* source DMA polarity */
+#define DDMA_CFG_DED (1 << 7) /* destination DMA level/edge detect */
+#define DDMA_CFG_DP (1 << 6) /* destination DMA polarity */
+#define DDMA_CFG_SYNC (1 << 5) /* Sync static bus controller */
+#define DDMA_CFG_PPR (1 << 4) /* PCI posted read/write control */
+#define DDMA_CFG_DFN (1 << 3) /* Descriptor fetch non-coherent */
+#define DDMA_CFG_SBE (1 << 2) /* Source big endian */
+#define DDMA_CFG_DBE (1 << 1) /* Destination big endian */
+#define DDMA_CFG_EN (1 << 0) /* Channel enable */
+
+/*
+ * Always set when descriptor processing done, regardless of
+ * interrupt enable state. Reflected in global intstat, don't
+ * clear this until global intstat is read/used.
+ */
+#define DDMA_IRQ_IN (1 << 0)
+
+#define DDMA_STAT_DB (1 << 2) /* Doorbell pushed */
+#define DDMA_STAT_V (1 << 1) /* Descriptor valid */
+#define DDMA_STAT_H (1 << 0) /* Channel Halted */
+
+/*
+ * "Standard" DDMA Descriptor.
+ * Must be 32-byte aligned.
+ */
+typedef volatile struct au1xxx_ddma_desc {
+ u32 dscr_cmd0; /* See below */
+ u32 dscr_cmd1; /* See below */
+ u32 dscr_source0; /* source phys address */
+ u32 dscr_source1; /* See below */
+ u32 dscr_dest0; /* Destination address */
+ u32 dscr_dest1; /* See below */
+ u32 dscr_stat; /* completion status */
+ u32 dscr_nxtptr; /* Next descriptor pointer (mostly) */
+ /*
+ * First 32 bytes are HW specific!!!
+ * Let's have some SW data following -- make sure it's 32 bytes.
+ */
+ u32 sw_status;
+ u32 sw_context;
+ u32 sw_reserved[6];
+} au1x_ddma_desc_t;
+
+#define DSCR_CMD0_V (1 << 31) /* Descriptor valid */
+#define DSCR_CMD0_MEM (1 << 30) /* mem-mem transfer */
+#define DSCR_CMD0_SID_MASK (0x1f << 25) /* Source ID */
+#define DSCR_CMD0_DID_MASK (0x1f << 20) /* Destination ID */
+#define DSCR_CMD0_SW_MASK (0x3 << 18) /* Source Width */
+#define DSCR_CMD0_DW_MASK (0x3 << 16) /* Destination Width */
+#define DSCR_CMD0_ARB (0x1 << 15) /* Set for Hi Pri */
+#define DSCR_CMD0_DT_MASK (0x3 << 13) /* Descriptor Type */
+#define DSCR_CMD0_SN (0x1 << 12) /* Source non-coherent */
+#define DSCR_CMD0_DN (0x1 << 11) /* Destination non-coherent */
+#define DSCR_CMD0_SM (0x1 << 10) /* Stride mode */
+#define DSCR_CMD0_IE (0x1 << 8) /* Interrupt Enable */
+#define DSCR_CMD0_SP (0x1 << 4) /* Status pointer select */
+#define DSCR_CMD0_CV (0x1 << 2) /* Clear Valid when done */
+#define DSCR_CMD0_ST_MASK (0x3 << 0) /* Status instruction */
+
+#define SW_STATUS_INUSE (1 << 0)
+
+/* Command 0 device IDs. */
+#define AU1550_DSCR_CMD0_UART0_TX 0
+#define AU1550_DSCR_CMD0_UART0_RX 1
+#define AU1550_DSCR_CMD0_UART3_TX 2
+#define AU1550_DSCR_CMD0_UART3_RX 3
+#define AU1550_DSCR_CMD0_DMA_REQ0 4
+#define AU1550_DSCR_CMD0_DMA_REQ1 5
+#define AU1550_DSCR_CMD0_DMA_REQ2 6
+#define AU1550_DSCR_CMD0_DMA_REQ3 7
+#define AU1550_DSCR_CMD0_USBDEV_RX0 8
+#define AU1550_DSCR_CMD0_USBDEV_TX0 9
+#define AU1550_DSCR_CMD0_USBDEV_TX1 10
+#define AU1550_DSCR_CMD0_USBDEV_TX2 11
+#define AU1550_DSCR_CMD0_USBDEV_RX3 12
+#define AU1550_DSCR_CMD0_USBDEV_RX4 13
+#define AU1550_DSCR_CMD0_PSC0_TX 14
+#define AU1550_DSCR_CMD0_PSC0_RX 15
+#define AU1550_DSCR_CMD0_PSC1_TX 16
+#define AU1550_DSCR_CMD0_PSC1_RX 17
+#define AU1550_DSCR_CMD0_PSC2_TX 18
+#define AU1550_DSCR_CMD0_PSC2_RX 19
+#define AU1550_DSCR_CMD0_PSC3_TX 20
+#define AU1550_DSCR_CMD0_PSC3_RX 21
+#define AU1550_DSCR_CMD0_PCI_WRITE 22
+#define AU1550_DSCR_CMD0_NAND_FLASH 23
+#define AU1550_DSCR_CMD0_MAC0_RX 24
+#define AU1550_DSCR_CMD0_MAC0_TX 25
+#define AU1550_DSCR_CMD0_MAC1_RX 26
+#define AU1550_DSCR_CMD0_MAC1_TX 27
+
+#define AU1200_DSCR_CMD0_UART0_TX 0
+#define AU1200_DSCR_CMD0_UART0_RX 1
+#define AU1200_DSCR_CMD0_UART1_TX 2
+#define AU1200_DSCR_CMD0_UART1_RX 3
+#define AU1200_DSCR_CMD0_DMA_REQ0 4
+#define AU1200_DSCR_CMD0_DMA_REQ1 5
+#define AU1200_DSCR_CMD0_MAE_BE 6
+#define AU1200_DSCR_CMD0_MAE_FE 7
+#define AU1200_DSCR_CMD0_SDMS_TX0 8
+#define AU1200_DSCR_CMD0_SDMS_RX0 9
+#define AU1200_DSCR_CMD0_SDMS_TX1 10
+#define AU1200_DSCR_CMD0_SDMS_RX1 11
+#define AU1200_DSCR_CMD0_AES_TX 13
+#define AU1200_DSCR_CMD0_AES_RX 12
+#define AU1200_DSCR_CMD0_PSC0_TX 14
+#define AU1200_DSCR_CMD0_PSC0_RX 15
+#define AU1200_DSCR_CMD0_PSC1_TX 16
+#define AU1200_DSCR_CMD0_PSC1_RX 17
+#define AU1200_DSCR_CMD0_CIM_RXA 18
+#define AU1200_DSCR_CMD0_CIM_RXB 19
+#define AU1200_DSCR_CMD0_CIM_RXC 20
+#define AU1200_DSCR_CMD0_MAE_BOTH 21
+#define AU1200_DSCR_CMD0_LCD 22
+#define AU1200_DSCR_CMD0_NAND_FLASH 23
+#define AU1200_DSCR_CMD0_PSC0_SYNC 24
+#define AU1200_DSCR_CMD0_PSC1_SYNC 25
+#define AU1200_DSCR_CMD0_CIM_SYNC 26
+
+#define AU1300_DSCR_CMD0_UART0_TX 0
+#define AU1300_DSCR_CMD0_UART0_RX 1
+#define AU1300_DSCR_CMD0_UART1_TX 2
+#define AU1300_DSCR_CMD0_UART1_RX 3
+#define AU1300_DSCR_CMD0_UART2_TX 4
+#define AU1300_DSCR_CMD0_UART2_RX 5
+#define AU1300_DSCR_CMD0_UART3_TX 6
+#define AU1300_DSCR_CMD0_UART3_RX 7
+#define AU1300_DSCR_CMD0_SDMS_TX0 8
+#define AU1300_DSCR_CMD0_SDMS_RX0 9
+#define AU1300_DSCR_CMD0_SDMS_TX1 10
+#define AU1300_DSCR_CMD0_SDMS_RX1 11
+#define AU1300_DSCR_CMD0_AES_TX 12
+#define AU1300_DSCR_CMD0_AES_RX 13
+#define AU1300_DSCR_CMD0_PSC0_TX 14
+#define AU1300_DSCR_CMD0_PSC0_RX 15
+#define AU1300_DSCR_CMD0_PSC1_TX 16
+#define AU1300_DSCR_CMD0_PSC1_RX 17
+#define AU1300_DSCR_CMD0_PSC2_TX 18
+#define AU1300_DSCR_CMD0_PSC2_RX 19
+#define AU1300_DSCR_CMD0_PSC3_TX 20
+#define AU1300_DSCR_CMD0_PSC3_RX 21
+#define AU1300_DSCR_CMD0_LCD 22
+#define AU1300_DSCR_CMD0_NAND_FLASH 23
+#define AU1300_DSCR_CMD0_SDMS_TX2 24
+#define AU1300_DSCR_CMD0_SDMS_RX2 25
+#define AU1300_DSCR_CMD0_CIM_SYNC 26
+#define AU1300_DSCR_CMD0_UDMA 27
+#define AU1300_DSCR_CMD0_DMA_REQ0 28
+#define AU1300_DSCR_CMD0_DMA_REQ1 29
+
+#define DSCR_CMD0_THROTTLE 30
+#define DSCR_CMD0_ALWAYS 31
+#define DSCR_NDEV_IDS 32
+/* This macro is used to find/create custom device types */
+#define DSCR_DEV2CUSTOM_ID(x, d) (((((x) & 0xFFFF) << 8) | 0x32000000) | \
+ ((d) & 0xFF))
+#define DSCR_CUSTOM2DEV_ID(x) ((x) & 0xFF)
+
+#define DSCR_CMD0_SID(x) (((x) & 0x1f) << 25)
+#define DSCR_CMD0_DID(x) (((x) & 0x1f) << 20)
+
+/* Source/Destination transfer width. */
+#define DSCR_CMD0_BYTE 0
+#define DSCR_CMD0_HALFWORD 1
+#define DSCR_CMD0_WORD 2
+
+#define DSCR_CMD0_SW(x) (((x) & 0x3) << 18)
+#define DSCR_CMD0_DW(x) (((x) & 0x3) << 16)
+
+/* DDMA Descriptor Type. */
+#define DSCR_CMD0_STANDARD 0
+#define DSCR_CMD0_LITERAL 1
+#define DSCR_CMD0_CMP_BRANCH 2
+
+#define DSCR_CMD0_DT(x) (((x) & 0x3) << 13)
+
+/* Status Instruction. */
+#define DSCR_CMD0_ST_NOCHANGE 0 /* Don't change */
+#define DSCR_CMD0_ST_CURRENT 1 /* Write current status */
+#define DSCR_CMD0_ST_CMD0 2 /* Write cmd0 with V cleared */
+#define DSCR_CMD0_ST_BYTECNT 3 /* Write remaining byte count */
+
+#define DSCR_CMD0_ST(x) (((x) & 0x3) << 0)
+
+/* Descriptor Command 1. */
+#define DSCR_CMD1_SUPTR_MASK (0xf << 28) /* upper 4 bits of src addr */
+#define DSCR_CMD1_DUPTR_MASK (0xf << 24) /* upper 4 bits of dest addr */
+#define DSCR_CMD1_FL_MASK (0x3 << 22) /* Flag bits */
+#define DSCR_CMD1_BC_MASK (0x3fffff) /* Byte count */
+
+/* Flag description. */
+#define DSCR_CMD1_FL_MEM_STRIDE0 0
+#define DSCR_CMD1_FL_MEM_STRIDE1 1
+#define DSCR_CMD1_FL_MEM_STRIDE2 2
+
+#define DSCR_CMD1_FL(x) (((x) & 0x3) << 22)
+
+/* Source1, 1-dimensional stride. */
+#define DSCR_SRC1_STS_MASK (3 << 30) /* Src xfer size */
+#define DSCR_SRC1_SAM_MASK (3 << 28) /* Src xfer movement */
+#define DSCR_SRC1_SB_MASK (0x3fff << 14) /* Block size */
+#define DSCR_SRC1_SB(x) (((x) & 0x3fff) << 14)
+#define DSCR_SRC1_SS_MASK (0x3fff << 0) /* Stride */
+#define DSCR_SRC1_SS(x) (((x) & 0x3fff) << 0)
+
+/* Dest1, 1-dimensional stride. */
+#define DSCR_DEST1_DTS_MASK (3 << 30) /* Dest xfer size */
+#define DSCR_DEST1_DAM_MASK (3 << 28) /* Dest xfer movement */
+#define DSCR_DEST1_DB_MASK (0x3fff << 14) /* Block size */
+#define DSCR_DEST1_DB(x) (((x) & 0x3fff) << 14)
+#define DSCR_DEST1_DS_MASK (0x3fff << 0) /* Stride */
+#define DSCR_DEST1_DS(x) (((x) & 0x3fff) << 0)
+
+#define DSCR_xTS_SIZE1 0
+#define DSCR_xTS_SIZE2 1
+#define DSCR_xTS_SIZE4 2
+#define DSCR_xTS_SIZE8 3
+#define DSCR_SRC1_STS(x) (((x) & 3) << 30)
+#define DSCR_DEST1_DTS(x) (((x) & 3) << 30)
+
+#define DSCR_xAM_INCREMENT 0
+#define DSCR_xAM_DECREMENT 1
+#define DSCR_xAM_STATIC 2
+#define DSCR_xAM_BURST 3
+#define DSCR_SRC1_SAM(x) (((x) & 3) << 28)
+#define DSCR_DEST1_DAM(x) (((x) & 3) << 28)
+
+/* The next descriptor pointer. */
+#define DSCR_NXTPTR_MASK (0x07ffffff)
+#define DSCR_NXTPTR(x) ((x) >> 5)
+#define DSCR_GET_NXTPTR(x) ((x) << 5)
+#define DSCR_NXTPTR_MS (1 << 27)
+
+/* The number of DBDMA channels. */
+#define NUM_DBDMA_CHANS 16
+
+/*
+ * DDMA API definitions
+ * FIXME: may not fit to this header file
+ */
+typedef struct dbdma_device_table {
+ u32 dev_id;
+ u32 dev_flags;
+ u32 dev_tsize;
+ u32 dev_devwidth;
+ u32 dev_physaddr; /* If FIFO */
+ u32 dev_intlevel;
+ u32 dev_intpolarity;
+} dbdev_tab_t;
+
+
+typedef struct dbdma_chan_config {
+ spinlock_t lock;
+
+ u32 chan_flags;
+ u32 chan_index;
+ dbdev_tab_t *chan_src;
+ dbdev_tab_t *chan_dest;
+ au1x_dma_chan_t *chan_ptr;
+ au1x_ddma_desc_t *chan_desc_base;
+ u32 cdb_membase; /* kmalloc base of above */
+ au1x_ddma_desc_t *get_ptr, *put_ptr, *cur_ptr;
+ void *chan_callparam;
+ void (*chan_callback)(int, void *);
+} chan_tab_t;
+
+#define DEV_FLAGS_INUSE (1 << 0)
+#define DEV_FLAGS_ANYUSE (1 << 1)
+#define DEV_FLAGS_OUT (1 << 2)
+#define DEV_FLAGS_IN (1 << 3)
+#define DEV_FLAGS_BURSTABLE (1 << 4)
+#define DEV_FLAGS_SYNC (1 << 5)
+/* end DDMA API definitions */
+
+/*
+ * External functions for drivers to use.
+ * Use this to allocate a DBDMA channel. The device IDs are one of
+ * the DSCR_CMD0 devices IDs, which is usually redefined to a more
+ * meaningful name. The 'callback' is called during DMA completion
+ * interrupt.
+ */
+extern u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
+ void (*callback)(int, void *),
+ void *callparam);
+
+#define DBDMA_MEM_CHAN DSCR_CMD0_ALWAYS
+
+/* Set the device width of an in/out FIFO. */
+u32 au1xxx_dbdma_set_devwidth(u32 chanid, int bits);
+
+/* Allocate a ring of descriptors for DBDMA. */
+u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries);
+
+/* Put buffers on source/destination descriptors. */
+u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags);
+u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags);
+
+/* Get a buffer from the destination descriptor. */
+u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes);
+
+void au1xxx_dbdma_stop(u32 chanid);
+void au1xxx_dbdma_start(u32 chanid);
+void au1xxx_dbdma_reset(u32 chanid);
+u32 au1xxx_get_dma_residue(u32 chanid);
+
+void au1xxx_dbdma_chan_free(u32 chanid);
+void au1xxx_dbdma_dump(u32 chanid);
+
+u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr);
+
+u32 au1xxx_ddma_add_device(dbdev_tab_t *dev);
+extern void au1xxx_ddma_del_device(u32 devid);
+void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp);
+
+/*
+ * Flags for the put_source/put_dest functions.
+ */
+#define DDMA_FLAGS_IE (1 << 0)
+#define DDMA_FLAGS_NOIE (1 << 1)
+
+#endif /* _LANGUAGE_ASSEMBLY */
+#endif /* _AU1000_DBDMA_H_ */
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_eth.h b/arch/mips/include/asm/mach-au1x00/au1xxx_eth.h
new file mode 100644
index 000000000..9d1c8d5ed
--- /dev/null
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_eth.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __AU1X00_ETH_DATA_H
+#define __AU1X00_ETH_DATA_H
+
+/* Platform specific PHY configuration passed to the MAC driver */
+struct au1000_eth_platform_data {
+ int phy_static_config;
+ int phy_search_highest_addr;
+ int phy1_search_mac0;
+ int phy_addr;
+ int phy_busid;
+ int phy_irq;
+ char mac[6];
+};
+
+void __init au1xxx_override_eth_cfg(unsigned port,
+ struct au1000_eth_platform_data *eth_data);
+
+#endif /* __AU1X00_ETH_DATA_H */
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_psc.h b/arch/mips/include/asm/mach-au1x00/au1xxx_psc.h
new file mode 100644
index 000000000..8a9cd754b
--- /dev/null
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_psc.h
@@ -0,0 +1,466 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ * Include file for Alchemy Semiconductor's Au1k CPU.
+ *
+ * Copyright 2004 Embedded Edge, LLC
+ * dan@embeddededge.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Specifics for the Au1xxx Programmable Serial Controllers, first
+ * seen in the AU1550 part.
+ */
+#ifndef _AU1000_PSC_H_
+#define _AU1000_PSC_H_
+
+/*
+ * The PSC select and control registers are common to all protocols.
+ */
+#define PSC_SEL_OFFSET 0x00000000
+#define PSC_CTRL_OFFSET 0x00000004
+
+#define PSC_SEL_CLK_MASK (3 << 4)
+#define PSC_SEL_CLK_INTCLK (0 << 4)
+#define PSC_SEL_CLK_EXTCLK (1 << 4)
+#define PSC_SEL_CLK_SERCLK (2 << 4)
+
+#define PSC_SEL_PS_MASK 0x00000007
+#define PSC_SEL_PS_DISABLED 0
+#define PSC_SEL_PS_SPIMODE 2
+#define PSC_SEL_PS_I2SMODE 3
+#define PSC_SEL_PS_AC97MODE 4
+#define PSC_SEL_PS_SMBUSMODE 5
+
+#define PSC_CTRL_DISABLE 0
+#define PSC_CTRL_SUSPEND 2
+#define PSC_CTRL_ENABLE 3
+
+/* AC97 Registers. */
+#define PSC_AC97CFG_OFFSET 0x00000008
+#define PSC_AC97MSK_OFFSET 0x0000000c
+#define PSC_AC97PCR_OFFSET 0x00000010
+#define PSC_AC97STAT_OFFSET 0x00000014
+#define PSC_AC97EVNT_OFFSET 0x00000018
+#define PSC_AC97TXRX_OFFSET 0x0000001c
+#define PSC_AC97CDC_OFFSET 0x00000020
+#define PSC_AC97RST_OFFSET 0x00000024
+#define PSC_AC97GPO_OFFSET 0x00000028
+#define PSC_AC97GPI_OFFSET 0x0000002c
+
+/* AC97 Config Register. */
+#define PSC_AC97CFG_RT_MASK (3 << 30)
+#define PSC_AC97CFG_RT_FIFO1 (0 << 30)
+#define PSC_AC97CFG_RT_FIFO2 (1 << 30)
+#define PSC_AC97CFG_RT_FIFO4 (2 << 30)
+#define PSC_AC97CFG_RT_FIFO8 (3 << 30)
+
+#define PSC_AC97CFG_TT_MASK (3 << 28)
+#define PSC_AC97CFG_TT_FIFO1 (0 << 28)
+#define PSC_AC97CFG_TT_FIFO2 (1 << 28)
+#define PSC_AC97CFG_TT_FIFO4 (2 << 28)
+#define PSC_AC97CFG_TT_FIFO8 (3 << 28)
+
+#define PSC_AC97CFG_DD_DISABLE (1 << 27)
+#define PSC_AC97CFG_DE_ENABLE (1 << 26)
+#define PSC_AC97CFG_SE_ENABLE (1 << 25)
+
+#define PSC_AC97CFG_LEN_MASK (0xf << 21)
+#define PSC_AC97CFG_TXSLOT_MASK (0x3ff << 11)
+#define PSC_AC97CFG_RXSLOT_MASK (0x3ff << 1)
+#define PSC_AC97CFG_GE_ENABLE (1)
+
+/* Enable slots 3-12. */
+#define PSC_AC97CFG_TXSLOT_ENA(x) (1 << (((x) - 3) + 11))
+#define PSC_AC97CFG_RXSLOT_ENA(x) (1 << (((x) - 3) + 1))
+
+/*
+ * The word length equation is ((x) * 2) + 2, so choose 'x' appropriately.
+ * The only sensible numbers are 7, 9, or possibly 11. Nah, just do the
+ * arithmetic in the macro.
+ */
+#define PSC_AC97CFG_SET_LEN(x) (((((x) - 2) / 2) & 0xf) << 21)
+#define PSC_AC97CFG_GET_LEN(x) (((((x) >> 21) & 0xf) * 2) + 2)
+
+/* AC97 Mask Register. */
+#define PSC_AC97MSK_GR (1 << 25)
+#define PSC_AC97MSK_CD (1 << 24)
+#define PSC_AC97MSK_RR (1 << 13)
+#define PSC_AC97MSK_RO (1 << 12)
+#define PSC_AC97MSK_RU (1 << 11)
+#define PSC_AC97MSK_TR (1 << 10)
+#define PSC_AC97MSK_TO (1 << 9)
+#define PSC_AC97MSK_TU (1 << 8)
+#define PSC_AC97MSK_RD (1 << 5)
+#define PSC_AC97MSK_TD (1 << 4)
+#define PSC_AC97MSK_ALLMASK (PSC_AC97MSK_GR | PSC_AC97MSK_CD | \
+ PSC_AC97MSK_RR | PSC_AC97MSK_RO | \
+ PSC_AC97MSK_RU | PSC_AC97MSK_TR | \
+ PSC_AC97MSK_TO | PSC_AC97MSK_TU | \
+ PSC_AC97MSK_RD | PSC_AC97MSK_TD)
+
+/* AC97 Protocol Control Register. */
+#define PSC_AC97PCR_RC (1 << 6)
+#define PSC_AC97PCR_RP (1 << 5)
+#define PSC_AC97PCR_RS (1 << 4)
+#define PSC_AC97PCR_TC (1 << 2)
+#define PSC_AC97PCR_TP (1 << 1)
+#define PSC_AC97PCR_TS (1 << 0)
+
+/* AC97 Status register (read only). */
+#define PSC_AC97STAT_CB (1 << 26)
+#define PSC_AC97STAT_CP (1 << 25)
+#define PSC_AC97STAT_CR (1 << 24)
+#define PSC_AC97STAT_RF (1 << 13)
+#define PSC_AC97STAT_RE (1 << 12)
+#define PSC_AC97STAT_RR (1 << 11)
+#define PSC_AC97STAT_TF (1 << 10)
+#define PSC_AC97STAT_TE (1 << 9)
+#define PSC_AC97STAT_TR (1 << 8)
+#define PSC_AC97STAT_RB (1 << 5)
+#define PSC_AC97STAT_TB (1 << 4)
+#define PSC_AC97STAT_DI (1 << 2)
+#define PSC_AC97STAT_DR (1 << 1)
+#define PSC_AC97STAT_SR (1 << 0)
+
+/* AC97 Event Register. */
+#define PSC_AC97EVNT_GR (1 << 25)
+#define PSC_AC97EVNT_CD (1 << 24)
+#define PSC_AC97EVNT_RR (1 << 13)
+#define PSC_AC97EVNT_RO (1 << 12)
+#define PSC_AC97EVNT_RU (1 << 11)
+#define PSC_AC97EVNT_TR (1 << 10)
+#define PSC_AC97EVNT_TO (1 << 9)
+#define PSC_AC97EVNT_TU (1 << 8)
+#define PSC_AC97EVNT_RD (1 << 5)
+#define PSC_AC97EVNT_TD (1 << 4)
+
+/* CODEC Command Register. */
+#define PSC_AC97CDC_RD (1 << 25)
+#define PSC_AC97CDC_ID_MASK (3 << 23)
+#define PSC_AC97CDC_INDX_MASK (0x7f << 16)
+#define PSC_AC97CDC_ID(x) (((x) & 0x03) << 23)
+#define PSC_AC97CDC_INDX(x) (((x) & 0x7f) << 16)
+
+/* AC97 Reset Control Register. */
+#define PSC_AC97RST_RST (1 << 1)
+#define PSC_AC97RST_SNC (1 << 0)
+
+/* PSC in I2S Mode. */
+typedef struct psc_i2s {
+ u32 psc_sel;
+ u32 psc_ctrl;
+ u32 psc_i2scfg;
+ u32 psc_i2smsk;
+ u32 psc_i2spcr;
+ u32 psc_i2sstat;
+ u32 psc_i2sevent;
+ u32 psc_i2stxrx;
+ u32 psc_i2sudf;
+} psc_i2s_t;
+
+#define PSC_I2SCFG_OFFSET 0x08
+#define PSC_I2SMASK_OFFSET 0x0C
+#define PSC_I2SPCR_OFFSET 0x10
+#define PSC_I2SSTAT_OFFSET 0x14
+#define PSC_I2SEVENT_OFFSET 0x18
+#define PSC_I2SRXTX_OFFSET 0x1C
+#define PSC_I2SUDF_OFFSET 0x20
+
+/* I2S Config Register. */
+#define PSC_I2SCFG_RT_MASK (3 << 30)
+#define PSC_I2SCFG_RT_FIFO1 (0 << 30)
+#define PSC_I2SCFG_RT_FIFO2 (1 << 30)
+#define PSC_I2SCFG_RT_FIFO4 (2 << 30)
+#define PSC_I2SCFG_RT_FIFO8 (3 << 30)
+
+#define PSC_I2SCFG_TT_MASK (3 << 28)
+#define PSC_I2SCFG_TT_FIFO1 (0 << 28)
+#define PSC_I2SCFG_TT_FIFO2 (1 << 28)
+#define PSC_I2SCFG_TT_FIFO4 (2 << 28)
+#define PSC_I2SCFG_TT_FIFO8 (3 << 28)
+
+#define PSC_I2SCFG_DD_DISABLE (1 << 27)
+#define PSC_I2SCFG_DE_ENABLE (1 << 26)
+#define PSC_I2SCFG_SET_WS(x) (((((x) / 2) - 1) & 0x7f) << 16)
+#define PSC_I2SCFG_WS(n) ((n & 0xFF) << 16)
+#define PSC_I2SCFG_WS_MASK (PSC_I2SCFG_WS(0x3F))
+#define PSC_I2SCFG_WI (1 << 15)
+
+#define PSC_I2SCFG_DIV_MASK (3 << 13)
+#define PSC_I2SCFG_DIV2 (0 << 13)
+#define PSC_I2SCFG_DIV4 (1 << 13)
+#define PSC_I2SCFG_DIV8 (2 << 13)
+#define PSC_I2SCFG_DIV16 (3 << 13)
+
+#define PSC_I2SCFG_BI (1 << 12)
+#define PSC_I2SCFG_BUF (1 << 11)
+#define PSC_I2SCFG_MLJ (1 << 10)
+#define PSC_I2SCFG_XM (1 << 9)
+
+/* The word length equation is simply LEN+1. */
+#define PSC_I2SCFG_SET_LEN(x) ((((x) - 1) & 0x1f) << 4)
+#define PSC_I2SCFG_GET_LEN(x) ((((x) >> 4) & 0x1f) + 1)
+
+#define PSC_I2SCFG_LB (1 << 2)
+#define PSC_I2SCFG_MLF (1 << 1)
+#define PSC_I2SCFG_MS (1 << 0)
+
+/* I2S Mask Register. */
+#define PSC_I2SMSK_RR (1 << 13)
+#define PSC_I2SMSK_RO (1 << 12)
+#define PSC_I2SMSK_RU (1 << 11)
+#define PSC_I2SMSK_TR (1 << 10)
+#define PSC_I2SMSK_TO (1 << 9)
+#define PSC_I2SMSK_TU (1 << 8)
+#define PSC_I2SMSK_RD (1 << 5)
+#define PSC_I2SMSK_TD (1 << 4)
+#define PSC_I2SMSK_ALLMASK (PSC_I2SMSK_RR | PSC_I2SMSK_RO | \
+ PSC_I2SMSK_RU | PSC_I2SMSK_TR | \
+ PSC_I2SMSK_TO | PSC_I2SMSK_TU | \
+ PSC_I2SMSK_RD | PSC_I2SMSK_TD)
+
+/* I2S Protocol Control Register. */
+#define PSC_I2SPCR_RC (1 << 6)
+#define PSC_I2SPCR_RP (1 << 5)
+#define PSC_I2SPCR_RS (1 << 4)
+#define PSC_I2SPCR_TC (1 << 2)
+#define PSC_I2SPCR_TP (1 << 1)
+#define PSC_I2SPCR_TS (1 << 0)
+
+/* I2S Status register (read only). */
+#define PSC_I2SSTAT_RF (1 << 13)
+#define PSC_I2SSTAT_RE (1 << 12)
+#define PSC_I2SSTAT_RR (1 << 11)
+#define PSC_I2SSTAT_TF (1 << 10)
+#define PSC_I2SSTAT_TE (1 << 9)
+#define PSC_I2SSTAT_TR (1 << 8)
+#define PSC_I2SSTAT_RB (1 << 5)
+#define PSC_I2SSTAT_TB (1 << 4)
+#define PSC_I2SSTAT_DI (1 << 2)
+#define PSC_I2SSTAT_DR (1 << 1)
+#define PSC_I2SSTAT_SR (1 << 0)
+
+/* I2S Event Register. */
+#define PSC_I2SEVNT_RR (1 << 13)
+#define PSC_I2SEVNT_RO (1 << 12)
+#define PSC_I2SEVNT_RU (1 << 11)
+#define PSC_I2SEVNT_TR (1 << 10)
+#define PSC_I2SEVNT_TO (1 << 9)
+#define PSC_I2SEVNT_TU (1 << 8)
+#define PSC_I2SEVNT_RD (1 << 5)
+#define PSC_I2SEVNT_TD (1 << 4)
+
+/* PSC in SPI Mode. */
+typedef struct psc_spi {
+ u32 psc_sel;
+ u32 psc_ctrl;
+ u32 psc_spicfg;
+ u32 psc_spimsk;
+ u32 psc_spipcr;
+ u32 psc_spistat;
+ u32 psc_spievent;
+ u32 psc_spitxrx;
+} psc_spi_t;
+
+/* SPI Config Register. */
+#define PSC_SPICFG_RT_MASK (3 << 30)
+#define PSC_SPICFG_RT_FIFO1 (0 << 30)
+#define PSC_SPICFG_RT_FIFO2 (1 << 30)
+#define PSC_SPICFG_RT_FIFO4 (2 << 30)
+#define PSC_SPICFG_RT_FIFO8 (3 << 30)
+
+#define PSC_SPICFG_TT_MASK (3 << 28)
+#define PSC_SPICFG_TT_FIFO1 (0 << 28)
+#define PSC_SPICFG_TT_FIFO2 (1 << 28)
+#define PSC_SPICFG_TT_FIFO4 (2 << 28)
+#define PSC_SPICFG_TT_FIFO8 (3 << 28)
+
+#define PSC_SPICFG_DD_DISABLE (1 << 27)
+#define PSC_SPICFG_DE_ENABLE (1 << 26)
+#define PSC_SPICFG_CLR_BAUD(x) ((x) & ~((0x3f) << 15))
+#define PSC_SPICFG_SET_BAUD(x) (((x) & 0x3f) << 15)
+
+#define PSC_SPICFG_SET_DIV(x) (((x) & 0x03) << 13)
+#define PSC_SPICFG_DIV2 0
+#define PSC_SPICFG_DIV4 1
+#define PSC_SPICFG_DIV8 2
+#define PSC_SPICFG_DIV16 3
+
+#define PSC_SPICFG_BI (1 << 12)
+#define PSC_SPICFG_PSE (1 << 11)
+#define PSC_SPICFG_CGE (1 << 10)
+#define PSC_SPICFG_CDE (1 << 9)
+
+#define PSC_SPICFG_CLR_LEN(x) ((x) & ~((0x1f) << 4))
+#define PSC_SPICFG_SET_LEN(x) (((x-1) & 0x1f) << 4)
+
+#define PSC_SPICFG_LB (1 << 3)
+#define PSC_SPICFG_MLF (1 << 1)
+#define PSC_SPICFG_MO (1 << 0)
+
+/* SPI Mask Register. */
+#define PSC_SPIMSK_MM (1 << 16)
+#define PSC_SPIMSK_RR (1 << 13)
+#define PSC_SPIMSK_RO (1 << 12)
+#define PSC_SPIMSK_RU (1 << 11)
+#define PSC_SPIMSK_TR (1 << 10)
+#define PSC_SPIMSK_TO (1 << 9)
+#define PSC_SPIMSK_TU (1 << 8)
+#define PSC_SPIMSK_SD (1 << 5)
+#define PSC_SPIMSK_MD (1 << 4)
+#define PSC_SPIMSK_ALLMASK (PSC_SPIMSK_MM | PSC_SPIMSK_RR | \
+ PSC_SPIMSK_RO | PSC_SPIMSK_TO | \
+ PSC_SPIMSK_TU | PSC_SPIMSK_SD | \
+ PSC_SPIMSK_MD)
+
+/* SPI Protocol Control Register. */
+#define PSC_SPIPCR_RC (1 << 6)
+#define PSC_SPIPCR_SP (1 << 5)
+#define PSC_SPIPCR_SS (1 << 4)
+#define PSC_SPIPCR_TC (1 << 2)
+#define PSC_SPIPCR_MS (1 << 0)
+
+/* SPI Status register (read only). */
+#define PSC_SPISTAT_RF (1 << 13)
+#define PSC_SPISTAT_RE (1 << 12)
+#define PSC_SPISTAT_RR (1 << 11)
+#define PSC_SPISTAT_TF (1 << 10)
+#define PSC_SPISTAT_TE (1 << 9)
+#define PSC_SPISTAT_TR (1 << 8)
+#define PSC_SPISTAT_SB (1 << 5)
+#define PSC_SPISTAT_MB (1 << 4)
+#define PSC_SPISTAT_DI (1 << 2)
+#define PSC_SPISTAT_DR (1 << 1)
+#define PSC_SPISTAT_SR (1 << 0)
+
+/* SPI Event Register. */
+#define PSC_SPIEVNT_MM (1 << 16)
+#define PSC_SPIEVNT_RR (1 << 13)
+#define PSC_SPIEVNT_RO (1 << 12)
+#define PSC_SPIEVNT_RU (1 << 11)
+#define PSC_SPIEVNT_TR (1 << 10)
+#define PSC_SPIEVNT_TO (1 << 9)
+#define PSC_SPIEVNT_TU (1 << 8)
+#define PSC_SPIEVNT_SD (1 << 5)
+#define PSC_SPIEVNT_MD (1 << 4)
+
+/* Transmit register control. */
+#define PSC_SPITXRX_LC (1 << 29)
+#define PSC_SPITXRX_SR (1 << 28)
+
+/* SMBus Config Register. */
+#define PSC_SMBCFG_RT_MASK (3 << 30)
+#define PSC_SMBCFG_RT_FIFO1 (0 << 30)
+#define PSC_SMBCFG_RT_FIFO2 (1 << 30)
+#define PSC_SMBCFG_RT_FIFO4 (2 << 30)
+#define PSC_SMBCFG_RT_FIFO8 (3 << 30)
+
+#define PSC_SMBCFG_TT_MASK (3 << 28)
+#define PSC_SMBCFG_TT_FIFO1 (0 << 28)
+#define PSC_SMBCFG_TT_FIFO2 (1 << 28)
+#define PSC_SMBCFG_TT_FIFO4 (2 << 28)
+#define PSC_SMBCFG_TT_FIFO8 (3 << 28)
+
+#define PSC_SMBCFG_DD_DISABLE (1 << 27)
+#define PSC_SMBCFG_DE_ENABLE (1 << 26)
+
+#define PSC_SMBCFG_SET_DIV(x) (((x) & 0x03) << 13)
+#define PSC_SMBCFG_DIV2 0
+#define PSC_SMBCFG_DIV4 1
+#define PSC_SMBCFG_DIV8 2
+#define PSC_SMBCFG_DIV16 3
+
+#define PSC_SMBCFG_GCE (1 << 9)
+#define PSC_SMBCFG_SFM (1 << 8)
+
+#define PSC_SMBCFG_SET_SLV(x) (((x) & 0x7f) << 1)
+
+/* SMBus Mask Register. */
+#define PSC_SMBMSK_DN (1 << 30)
+#define PSC_SMBMSK_AN (1 << 29)
+#define PSC_SMBMSK_AL (1 << 28)
+#define PSC_SMBMSK_RR (1 << 13)
+#define PSC_SMBMSK_RO (1 << 12)
+#define PSC_SMBMSK_RU (1 << 11)
+#define PSC_SMBMSK_TR (1 << 10)
+#define PSC_SMBMSK_TO (1 << 9)
+#define PSC_SMBMSK_TU (1 << 8)
+#define PSC_SMBMSK_SD (1 << 5)
+#define PSC_SMBMSK_MD (1 << 4)
+#define PSC_SMBMSK_ALLMASK (PSC_SMBMSK_DN | PSC_SMBMSK_AN | \
+ PSC_SMBMSK_AL | PSC_SMBMSK_RR | \
+ PSC_SMBMSK_RO | PSC_SMBMSK_TO | \
+ PSC_SMBMSK_TU | PSC_SMBMSK_SD | \
+ PSC_SMBMSK_MD)
+
+/* SMBus Protocol Control Register. */
+#define PSC_SMBPCR_DC (1 << 2)
+#define PSC_SMBPCR_MS (1 << 0)
+
+/* SMBus Status register (read only). */
+#define PSC_SMBSTAT_BB (1 << 28)
+#define PSC_SMBSTAT_RF (1 << 13)
+#define PSC_SMBSTAT_RE (1 << 12)
+#define PSC_SMBSTAT_RR (1 << 11)
+#define PSC_SMBSTAT_TF (1 << 10)
+#define PSC_SMBSTAT_TE (1 << 9)
+#define PSC_SMBSTAT_TR (1 << 8)
+#define PSC_SMBSTAT_SB (1 << 5)
+#define PSC_SMBSTAT_MB (1 << 4)
+#define PSC_SMBSTAT_DI (1 << 2)
+#define PSC_SMBSTAT_DR (1 << 1)
+#define PSC_SMBSTAT_SR (1 << 0)
+
+/* SMBus Event Register. */
+#define PSC_SMBEVNT_DN (1 << 30)
+#define PSC_SMBEVNT_AN (1 << 29)
+#define PSC_SMBEVNT_AL (1 << 28)
+#define PSC_SMBEVNT_RR (1 << 13)
+#define PSC_SMBEVNT_RO (1 << 12)
+#define PSC_SMBEVNT_RU (1 << 11)
+#define PSC_SMBEVNT_TR (1 << 10)
+#define PSC_SMBEVNT_TO (1 << 9)
+#define PSC_SMBEVNT_TU (1 << 8)
+#define PSC_SMBEVNT_SD (1 << 5)
+#define PSC_SMBEVNT_MD (1 << 4)
+#define PSC_SMBEVNT_ALLCLR (PSC_SMBEVNT_DN | PSC_SMBEVNT_AN | \
+ PSC_SMBEVNT_AL | PSC_SMBEVNT_RR | \
+ PSC_SMBEVNT_RO | PSC_SMBEVNT_TO | \
+ PSC_SMBEVNT_TU | PSC_SMBEVNT_SD | \
+ PSC_SMBEVNT_MD)
+
+/* Transmit register control. */
+#define PSC_SMBTXRX_RSR (1 << 28)
+#define PSC_SMBTXRX_STP (1 << 29)
+#define PSC_SMBTXRX_DATAMASK 0xff
+
+/* SMBus protocol timers register. */
+#define PSC_SMBTMR_SET_TH(x) (((x) & 0x03) << 30)
+#define PSC_SMBTMR_SET_PS(x) (((x) & 0x1f) << 25)
+#define PSC_SMBTMR_SET_PU(x) (((x) & 0x1f) << 20)
+#define PSC_SMBTMR_SET_SH(x) (((x) & 0x1f) << 15)
+#define PSC_SMBTMR_SET_SU(x) (((x) & 0x1f) << 10)
+#define PSC_SMBTMR_SET_CL(x) (((x) & 0x1f) << 5)
+#define PSC_SMBTMR_SET_CH(x) (((x) & 0x1f) << 0)
+
+#endif /* _AU1000_PSC_H_ */
diff --git a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h
new file mode 100644
index 000000000..e6e527224
--- /dev/null
+++ b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h
@@ -0,0 +1,94 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef __ASM_MACH_AU1X00_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_AU1X00_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb 1
+#define cpu_has_ftlb 0
+#define cpu_has_tlbinv 0
+#define cpu_has_segments 0
+#define cpu_has_eva 0
+#define cpu_has_htw 0
+#define cpu_has_ldpte 0
+#define cpu_has_rixiex 0
+#define cpu_has_maar 0
+#define cpu_has_rw_llb 0
+#define cpu_has_3kex 0
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_fpu 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_divec 1
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_prefetch 1
+#define cpu_has_mcheck 1
+#define cpu_has_ejtag 1
+#define cpu_has_llsc 1
+#define cpu_has_guestctl0ext 0
+#define cpu_has_guestctl1 0
+#define cpu_has_guestctl2 0
+#define cpu_has_guestid 0
+#define cpu_has_drg 0
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+#define cpu_has_rixi 0
+#define cpu_has_mmips 0
+#define cpu_has_lpa 0
+#define cpu_has_mhv 0
+#define cpu_has_vtag_icache 0
+#define cpu_has_dc_aliases 0
+#define cpu_has_ic_fills_f_dc 1
+#define cpu_has_pindexed_dcache 0
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 0
+#define cpu_has_mips32r6 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+#define cpu_has_mips64r6 0
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_dsp3 0
+#define cpu_has_mipsmt 0
+#define cpu_has_vp 0
+#define cpu_has_userlocal 0
+#define cpu_has_nofpuex 0
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_vint 0
+#define cpu_has_veic 0
+#define cpu_has_inclusive_pcaches 0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+#define cpu_scache_line_size() 0
+#define cpu_tcache_line_size() 0
+
+#define cpu_has_perf_cntr_intr_bit 0
+#define cpu_has_vz 0
+#define cpu_has_msa 0
+#define cpu_has_ufr 0
+#define cpu_has_fre 0
+#define cpu_has_cdmm 0
+#define cpu_has_small_pages 0
+#define cpu_has_nan_legacy 1
+#define cpu_has_nan_2008 1
+#define cpu_has_ebase_wg 0
+#define cpu_has_badinstr 0
+#define cpu_has_badinstrp 0
+#define cpu_has_contextconfig 0
+#define cpu_has_perf 0
+
+#endif /* __ASM_MACH_AU1X00_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
new file mode 100644
index 000000000..adde1fa50
--- /dev/null
+++ b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
@@ -0,0 +1,532 @@
+/*
+ * GPIO functions for Au1000, Au1500, Au1100, Au1550, Au1200
+ *
+ * Copyright (c) 2009 Manuel Lauss.
+ *
+ * Licensed under the terms outlined in the file COPYING.
+ */
+
+#ifndef _ALCHEMY_GPIO_AU1000_H_
+#define _ALCHEMY_GPIO_AU1000_H_
+
+#include <asm/mach-au1x00/au1000.h>
+
+/* The default GPIO numberspace as documented in the Alchemy manuals.
+ * GPIO0-31 from GPIO1 block, GPIO200-215 from GPIO2 block.
+ */
+#define ALCHEMY_GPIO1_BASE 0
+#define ALCHEMY_GPIO2_BASE 200
+
+#define ALCHEMY_GPIO1_NUM 32
+#define ALCHEMY_GPIO2_NUM 16
+#define ALCHEMY_GPIO1_MAX (ALCHEMY_GPIO1_BASE + ALCHEMY_GPIO1_NUM - 1)
+#define ALCHEMY_GPIO2_MAX (ALCHEMY_GPIO2_BASE + ALCHEMY_GPIO2_NUM - 1)
+
+#define MAKE_IRQ(intc, off) (AU1000_INTC##intc##_INT_BASE + (off))
+
+/* GPIO1 registers within SYS_ area */
+#define AU1000_SYS_TRIOUTRD 0x100
+#define AU1000_SYS_TRIOUTCLR 0x100
+#define AU1000_SYS_OUTPUTRD 0x108
+#define AU1000_SYS_OUTPUTSET 0x108
+#define AU1000_SYS_OUTPUTCLR 0x10C
+#define AU1000_SYS_PINSTATERD 0x110
+#define AU1000_SYS_PININPUTEN 0x110
+
+/* register offsets within GPIO2 block */
+#define AU1000_GPIO2_DIR 0x00
+#define AU1000_GPIO2_OUTPUT 0x08
+#define AU1000_GPIO2_PINSTATE 0x0C
+#define AU1000_GPIO2_INTENABLE 0x10
+#define AU1000_GPIO2_ENABLE 0x14
+
+struct gpio;
+
+static inline int au1000_gpio1_to_irq(int gpio)
+{
+ return MAKE_IRQ(1, gpio - ALCHEMY_GPIO1_BASE);
+}
+
+static inline int au1000_gpio2_to_irq(int gpio)
+{
+ return -ENXIO;
+}
+
+static inline int au1000_irq_to_gpio(int irq)
+{
+ if ((irq >= AU1000_GPIO0_INT) && (irq <= AU1000_GPIO31_INT))
+ return ALCHEMY_GPIO1_BASE + (irq - AU1000_GPIO0_INT) + 0;
+
+ return -ENXIO;
+}
+
+static inline int au1500_gpio1_to_irq(int gpio)
+{
+ gpio -= ALCHEMY_GPIO1_BASE;
+
+ switch (gpio) {
+ case 0 ... 15:
+ case 20:
+ case 23 ... 28: return MAKE_IRQ(1, gpio);
+ }
+
+ return -ENXIO;
+}
+
+static inline int au1500_gpio2_to_irq(int gpio)
+{
+ gpio -= ALCHEMY_GPIO2_BASE;
+
+ switch (gpio) {
+ case 0 ... 3: return MAKE_IRQ(1, 16 + gpio - 0);
+ case 4 ... 5: return MAKE_IRQ(1, 21 + gpio - 4);
+ case 6 ... 7: return MAKE_IRQ(1, 29 + gpio - 6);
+ }
+
+ return -ENXIO;
+}
+
+static inline int au1500_irq_to_gpio(int irq)
+{
+ switch (irq) {
+ case AU1500_GPIO0_INT ... AU1500_GPIO15_INT:
+ case AU1500_GPIO20_INT:
+ case AU1500_GPIO23_INT ... AU1500_GPIO28_INT:
+ return ALCHEMY_GPIO1_BASE + (irq - AU1500_GPIO0_INT) + 0;
+ case AU1500_GPIO200_INT ... AU1500_GPIO203_INT:
+ return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO200_INT) + 0;
+ case AU1500_GPIO204_INT ... AU1500_GPIO205_INT:
+ return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO204_INT) + 4;
+ case AU1500_GPIO206_INT ... AU1500_GPIO207_INT:
+ return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO206_INT) + 6;
+ case AU1500_GPIO208_215_INT:
+ return ALCHEMY_GPIO2_BASE + 8;
+ }
+
+ return -ENXIO;
+}
+
+static inline int au1100_gpio1_to_irq(int gpio)
+{
+ return MAKE_IRQ(1, gpio - ALCHEMY_GPIO1_BASE);
+}
+
+static inline int au1100_gpio2_to_irq(int gpio)
+{
+ gpio -= ALCHEMY_GPIO2_BASE;
+
+ if ((gpio >= 8) && (gpio <= 15))
+ return MAKE_IRQ(0, 29); /* shared GPIO208_215 */
+
+ return -ENXIO;
+}
+
+static inline int au1100_irq_to_gpio(int irq)
+{
+ switch (irq) {
+ case AU1100_GPIO0_INT ... AU1100_GPIO31_INT:
+ return ALCHEMY_GPIO1_BASE + (irq - AU1100_GPIO0_INT) + 0;
+ case AU1100_GPIO208_215_INT:
+ return ALCHEMY_GPIO2_BASE + 8;
+ }
+
+ return -ENXIO;
+}
+
+static inline int au1550_gpio1_to_irq(int gpio)
+{
+ gpio -= ALCHEMY_GPIO1_BASE;
+
+ switch (gpio) {
+ case 0 ... 15:
+ case 20 ... 28: return MAKE_IRQ(1, gpio);
+ case 16 ... 17: return MAKE_IRQ(1, 18 + gpio - 16);
+ }
+
+ return -ENXIO;
+}
+
+static inline int au1550_gpio2_to_irq(int gpio)
+{
+ gpio -= ALCHEMY_GPIO2_BASE;
+
+ switch (gpio) {
+ case 0: return MAKE_IRQ(1, 16);
+ case 1 ... 5: return MAKE_IRQ(1, 17); /* shared GPIO201_205 */
+ case 6 ... 7: return MAKE_IRQ(1, 29 + gpio - 6);
+ case 8 ... 15: return MAKE_IRQ(1, 31); /* shared GPIO208_215 */
+ }
+
+ return -ENXIO;
+}
+
+static inline int au1550_irq_to_gpio(int irq)
+{
+ switch (irq) {
+ case AU1550_GPIO0_INT ... AU1550_GPIO15_INT:
+ return ALCHEMY_GPIO1_BASE + (irq - AU1550_GPIO0_INT) + 0;
+ case AU1550_GPIO200_INT:
+ case AU1550_GPIO201_205_INT:
+ return ALCHEMY_GPIO2_BASE + (irq - AU1550_GPIO200_INT) + 0;
+ case AU1550_GPIO16_INT ... AU1550_GPIO28_INT:
+ return ALCHEMY_GPIO1_BASE + (irq - AU1550_GPIO16_INT) + 16;
+ case AU1550_GPIO206_INT ... AU1550_GPIO208_215_INT:
+ return ALCHEMY_GPIO2_BASE + (irq - AU1550_GPIO206_INT) + 6;
+ }
+
+ return -ENXIO;
+}
+
+static inline int au1200_gpio1_to_irq(int gpio)
+{
+ return MAKE_IRQ(1, gpio - ALCHEMY_GPIO1_BASE);
+}
+
+static inline int au1200_gpio2_to_irq(int gpio)
+{
+ gpio -= ALCHEMY_GPIO2_BASE;
+
+ switch (gpio) {
+ case 0 ... 2: return MAKE_IRQ(0, 5 + gpio - 0);
+ case 3: return MAKE_IRQ(0, 22);
+ case 4 ... 7: return MAKE_IRQ(0, 24 + gpio - 4);
+ case 8 ... 15: return MAKE_IRQ(0, 28); /* shared GPIO208_215 */
+ }
+
+ return -ENXIO;
+}
+
+static inline int au1200_irq_to_gpio(int irq)
+{
+ switch (irq) {
+ case AU1200_GPIO0_INT ... AU1200_GPIO31_INT:
+ return ALCHEMY_GPIO1_BASE + (irq - AU1200_GPIO0_INT) + 0;
+ case AU1200_GPIO200_INT ... AU1200_GPIO202_INT:
+ return ALCHEMY_GPIO2_BASE + (irq - AU1200_GPIO200_INT) + 0;
+ case AU1200_GPIO203_INT:
+ return ALCHEMY_GPIO2_BASE + 3;
+ case AU1200_GPIO204_INT ... AU1200_GPIO208_215_INT:
+ return ALCHEMY_GPIO2_BASE + (irq - AU1200_GPIO204_INT) + 4;
+ }
+
+ return -ENXIO;
+}
+
+/*
+ * GPIO1 block macros for common linux gpio functions.
+ */
+static inline void alchemy_gpio1_set_value(int gpio, int v)
+{
+ unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE);
+ unsigned long r = v ? AU1000_SYS_OUTPUTSET : AU1000_SYS_OUTPUTCLR;
+ alchemy_wrsys(mask, r);
+}
+
+static inline int alchemy_gpio1_get_value(int gpio)
+{
+ unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE);
+ return alchemy_rdsys(AU1000_SYS_PINSTATERD) & mask;
+}
+
+static inline int alchemy_gpio1_direction_input(int gpio)
+{
+ unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE);
+ alchemy_wrsys(mask, AU1000_SYS_TRIOUTCLR);
+ return 0;
+}
+
+static inline int alchemy_gpio1_direction_output(int gpio, int v)
+{
+ /* hardware switches to "output" mode when one of the two
+ * "set_value" registers is accessed.
+ */
+ alchemy_gpio1_set_value(gpio, v);
+ return 0;
+}
+
+static inline int alchemy_gpio1_is_valid(int gpio)
+{
+ return ((gpio >= ALCHEMY_GPIO1_BASE) && (gpio <= ALCHEMY_GPIO1_MAX));
+}
+
+static inline int alchemy_gpio1_to_irq(int gpio)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ return au1000_gpio1_to_irq(gpio);
+ case ALCHEMY_CPU_AU1100:
+ return au1100_gpio1_to_irq(gpio);
+ case ALCHEMY_CPU_AU1500:
+ return au1500_gpio1_to_irq(gpio);
+ case ALCHEMY_CPU_AU1550:
+ return au1550_gpio1_to_irq(gpio);
+ case ALCHEMY_CPU_AU1200:
+ return au1200_gpio1_to_irq(gpio);
+ }
+ return -ENXIO;
+}
+
+/* On Au1000, Au1500 and Au1100 GPIOs won't work as inputs before
+ * SYS_PININPUTEN is written to at least once. On Au1550/Au1200/Au1300 this
+ * register enables use of GPIOs as wake source.
+ */
+static inline void alchemy_gpio1_input_enable(void)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR);
+ __raw_writel(0, base + 0x110); /* the write op is key */
+ wmb();
+}
+
+/*
+ * GPIO2 block macros for common linux GPIO functions. The 'gpio'
+ * parameter must be in range of ALCHEMY_GPIO2_BASE..ALCHEMY_GPIO2_MAX.
+ */
+static inline void __alchemy_gpio2_mod_dir(int gpio, int to_out)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR);
+ unsigned long mask = 1 << (gpio - ALCHEMY_GPIO2_BASE);
+ unsigned long d = __raw_readl(base + AU1000_GPIO2_DIR);
+
+ if (to_out)
+ d |= mask;
+ else
+ d &= ~mask;
+ __raw_writel(d, base + AU1000_GPIO2_DIR);
+ wmb();
+}
+
+static inline void alchemy_gpio2_set_value(int gpio, int v)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR);
+ unsigned long mask;
+ mask = ((v) ? 0x00010001 : 0x00010000) << (gpio - ALCHEMY_GPIO2_BASE);
+ __raw_writel(mask, base + AU1000_GPIO2_OUTPUT);
+ wmb();
+}
+
+static inline int alchemy_gpio2_get_value(int gpio)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR);
+ return __raw_readl(base + AU1000_GPIO2_PINSTATE) &
+ (1 << (gpio - ALCHEMY_GPIO2_BASE));
+}
+
+static inline int alchemy_gpio2_direction_input(int gpio)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ __alchemy_gpio2_mod_dir(gpio, 0);
+ local_irq_restore(flags);
+ return 0;
+}
+
+static inline int alchemy_gpio2_direction_output(int gpio, int v)
+{
+ unsigned long flags;
+ alchemy_gpio2_set_value(gpio, v);
+ local_irq_save(flags);
+ __alchemy_gpio2_mod_dir(gpio, 1);
+ local_irq_restore(flags);
+ return 0;
+}
+
+static inline int alchemy_gpio2_is_valid(int gpio)
+{
+ return ((gpio >= ALCHEMY_GPIO2_BASE) && (gpio <= ALCHEMY_GPIO2_MAX));
+}
+
+static inline int alchemy_gpio2_to_irq(int gpio)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ return au1000_gpio2_to_irq(gpio);
+ case ALCHEMY_CPU_AU1100:
+ return au1100_gpio2_to_irq(gpio);
+ case ALCHEMY_CPU_AU1500:
+ return au1500_gpio2_to_irq(gpio);
+ case ALCHEMY_CPU_AU1550:
+ return au1550_gpio2_to_irq(gpio);
+ case ALCHEMY_CPU_AU1200:
+ return au1200_gpio2_to_irq(gpio);
+ }
+ return -ENXIO;
+}
+
+/**********************************************************************/
+
+/* GPIO2 shared interrupts and control */
+
+static inline void __alchemy_gpio2_mod_int(int gpio2, int en)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR);
+ unsigned long r = __raw_readl(base + AU1000_GPIO2_INTENABLE);
+ if (en)
+ r |= 1 << gpio2;
+ else
+ r &= ~(1 << gpio2);
+ __raw_writel(r, base + AU1000_GPIO2_INTENABLE);
+ wmb();
+}
+
+/**
+ * alchemy_gpio2_enable_int - Enable a GPIO2 pins' shared irq contribution.
+ * @gpio2: The GPIO2 pin to activate (200...215).
+ *
+ * GPIO208-215 have one shared interrupt line to the INTC. They are
+ * and'ed with a per-pin enable bit and finally or'ed together to form
+ * a single irq request (useful for active-high sources).
+ * With this function, a pins' individual contribution to the int request
+ * can be enabled. As with all other GPIO-based interrupts, the INTC
+ * must be programmed to accept the GPIO208_215 interrupt as well.
+ *
+ * NOTE: Calling this macro is only necessary for GPIO208-215; all other
+ * GPIO2-based interrupts have their own request to the INTC. Please
+ * consult your Alchemy databook for more information!
+ *
+ * NOTE: On the Au1550, GPIOs 201-205 also have a shared interrupt request
+ * line to the INTC, GPIO201_205. This function can be used for those
+ * as well.
+ *
+ * NOTE: 'gpio2' parameter must be in range of the GPIO2 numberspace
+ * (200-215 by default). No sanity checks are made,
+ */
+static inline void alchemy_gpio2_enable_int(int gpio2)
+{
+ unsigned long flags;
+
+ gpio2 -= ALCHEMY_GPIO2_BASE;
+
+ /* Au1100/Au1500 have GPIO208-215 enable bits at 0..7 */
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1100:
+ case ALCHEMY_CPU_AU1500:
+ gpio2 -= 8;
+ }
+
+ local_irq_save(flags);
+ __alchemy_gpio2_mod_int(gpio2, 1);
+ local_irq_restore(flags);
+}
+
+/**
+ * alchemy_gpio2_disable_int - Disable a GPIO2 pins' shared irq contribution.
+ * @gpio2: The GPIO2 pin to activate (200...215).
+ *
+ * see function alchemy_gpio2_enable_int() for more information.
+ */
+static inline void alchemy_gpio2_disable_int(int gpio2)
+{
+ unsigned long flags;
+
+ gpio2 -= ALCHEMY_GPIO2_BASE;
+
+ /* Au1100/Au1500 have GPIO208-215 enable bits at 0..7 */
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1100:
+ case ALCHEMY_CPU_AU1500:
+ gpio2 -= 8;
+ }
+
+ local_irq_save(flags);
+ __alchemy_gpio2_mod_int(gpio2, 0);
+ local_irq_restore(flags);
+}
+
+/**
+ * alchemy_gpio2_enable - Activate GPIO2 block.
+ *
+ * The GPIO2 block must be enabled excplicitly to work. On systems
+ * where this isn't done by the bootloader, this macro can be used.
+ */
+static inline void alchemy_gpio2_enable(void)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR);
+ __raw_writel(3, base + AU1000_GPIO2_ENABLE); /* reset, clock enabled */
+ wmb();
+ __raw_writel(1, base + AU1000_GPIO2_ENABLE); /* clock enabled */
+ wmb();
+}
+
+/**
+ * alchemy_gpio2_disable - disable GPIO2 block.
+ *
+ * Disable and put GPIO2 block in low-power mode.
+ */
+static inline void alchemy_gpio2_disable(void)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR);
+ __raw_writel(2, base + AU1000_GPIO2_ENABLE); /* reset, clock disabled */
+ wmb();
+}
+
+/**********************************************************************/
+
+/* wrappers for on-chip gpios; can be used before gpio chips have been
+ * registered with gpiolib.
+ */
+static inline int alchemy_gpio_direction_input(int gpio)
+{
+ return (gpio >= ALCHEMY_GPIO2_BASE) ?
+ alchemy_gpio2_direction_input(gpio) :
+ alchemy_gpio1_direction_input(gpio);
+}
+
+static inline int alchemy_gpio_direction_output(int gpio, int v)
+{
+ return (gpio >= ALCHEMY_GPIO2_BASE) ?
+ alchemy_gpio2_direction_output(gpio, v) :
+ alchemy_gpio1_direction_output(gpio, v);
+}
+
+static inline int alchemy_gpio_get_value(int gpio)
+{
+ return (gpio >= ALCHEMY_GPIO2_BASE) ?
+ alchemy_gpio2_get_value(gpio) :
+ alchemy_gpio1_get_value(gpio);
+}
+
+static inline void alchemy_gpio_set_value(int gpio, int v)
+{
+ if (gpio >= ALCHEMY_GPIO2_BASE)
+ alchemy_gpio2_set_value(gpio, v);
+ else
+ alchemy_gpio1_set_value(gpio, v);
+}
+
+static inline int alchemy_gpio_is_valid(int gpio)
+{
+ return (gpio >= ALCHEMY_GPIO2_BASE) ?
+ alchemy_gpio2_is_valid(gpio) :
+ alchemy_gpio1_is_valid(gpio);
+}
+
+static inline int alchemy_gpio_cansleep(int gpio)
+{
+ return 0; /* Alchemy never gets tired */
+}
+
+static inline int alchemy_gpio_to_irq(int gpio)
+{
+ return (gpio >= ALCHEMY_GPIO2_BASE) ?
+ alchemy_gpio2_to_irq(gpio) :
+ alchemy_gpio1_to_irq(gpio);
+}
+
+static inline int alchemy_irq_to_gpio(int irq)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ return au1000_irq_to_gpio(irq);
+ case ALCHEMY_CPU_AU1100:
+ return au1100_irq_to_gpio(irq);
+ case ALCHEMY_CPU_AU1500:
+ return au1500_irq_to_gpio(irq);
+ case ALCHEMY_CPU_AU1550:
+ return au1550_irq_to_gpio(irq);
+ case ALCHEMY_CPU_AU1200:
+ return au1200_irq_to_gpio(irq);
+ }
+ return -ENXIO;
+}
+
+#endif /* _ALCHEMY_GPIO_AU1000_H_ */
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
new file mode 100644
index 000000000..d16add7ba
--- /dev/null
+++ b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * gpio-au1300.h -- GPIO control for Au1300 GPIC and compatibles.
+ *
+ * Copyright (c) 2009-2011 Manuel Lauss <manuel.lauss@googlemail.com>
+ */
+
+#ifndef _GPIO_AU1300_H_
+#define _GPIO_AU1300_H_
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/mach-au1x00/au1000.h>
+
+struct gpio;
+struct gpio_chip;
+
+/* with the current GPIC design, up to 128 GPIOs are possible.
+ * The only implementation so far is in the Au1300, which has 75 externally
+ * available GPIOs.
+ */
+#define AU1300_GPIO_BASE 0
+#define AU1300_GPIO_NUM 75
+#define AU1300_GPIO_MAX (AU1300_GPIO_BASE + AU1300_GPIO_NUM - 1)
+
+#define AU1300_GPIC_ADDR \
+ (void __iomem *)KSEG1ADDR(AU1300_GPIC_PHYS_ADDR)
+
+static inline int au1300_gpio_get_value(unsigned int gpio)
+{
+ void __iomem *roff = AU1300_GPIC_ADDR;
+ int bit;
+
+ gpio -= AU1300_GPIO_BASE;
+ roff += GPIC_GPIO_BANKOFF(gpio);
+ bit = GPIC_GPIO_TO_BIT(gpio);
+ return __raw_readl(roff + AU1300_GPIC_PINVAL) & bit;
+}
+
+static inline int au1300_gpio_direction_input(unsigned int gpio)
+{
+ void __iomem *roff = AU1300_GPIC_ADDR;
+ unsigned long bit;
+
+ gpio -= AU1300_GPIO_BASE;
+
+ roff += GPIC_GPIO_BANKOFF(gpio);
+ bit = GPIC_GPIO_TO_BIT(gpio);
+ __raw_writel(bit, roff + AU1300_GPIC_DEVCLR);
+ wmb();
+
+ return 0;
+}
+
+static inline int au1300_gpio_set_value(unsigned int gpio, int v)
+{
+ void __iomem *roff = AU1300_GPIC_ADDR;
+ unsigned long bit;
+
+ gpio -= AU1300_GPIO_BASE;
+
+ roff += GPIC_GPIO_BANKOFF(gpio);
+ bit = GPIC_GPIO_TO_BIT(gpio);
+ __raw_writel(bit, roff + (v ? AU1300_GPIC_PINVAL
+ : AU1300_GPIC_PINVALCLR));
+ wmb();
+
+ return 0;
+}
+
+static inline int au1300_gpio_direction_output(unsigned int gpio, int v)
+{
+ /* hw switches to output automatically */
+ return au1300_gpio_set_value(gpio, v);
+}
+
+static inline int au1300_gpio_to_irq(unsigned int gpio)
+{
+ return AU1300_FIRST_INT + (gpio - AU1300_GPIO_BASE);
+}
+
+static inline int au1300_irq_to_gpio(unsigned int irq)
+{
+ return (irq - AU1300_FIRST_INT) + AU1300_GPIO_BASE;
+}
+
+static inline int au1300_gpio_is_valid(unsigned int gpio)
+{
+ int ret;
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1300:
+ ret = ((gpio >= AU1300_GPIO_BASE) && (gpio <= AU1300_GPIO_MAX));
+ break;
+ default:
+ ret = 0;
+ }
+ return ret;
+}
+
+static inline int au1300_gpio_cansleep(unsigned int gpio)
+{
+ return 0;
+}
+
+/* hardware remembers gpio 0-63 levels on powerup */
+static inline int au1300_gpio_getinitlvl(unsigned int gpio)
+{
+ void __iomem *roff = AU1300_GPIC_ADDR;
+ unsigned long v;
+
+ if (unlikely(gpio > 63))
+ return 0;
+ else if (gpio > 31) {
+ gpio -= 32;
+ roff += 4;
+ }
+
+ v = __raw_readl(roff + AU1300_GPIC_RSTVAL);
+ return (v >> gpio) & 1;
+}
+
+#endif /* _GPIO_AU1300_H_ */
diff --git a/arch/mips/include/asm/mach-au1x00/prom.h b/arch/mips/include/asm/mach-au1x00/prom.h
new file mode 100644
index 000000000..c62ee0246
--- /dev/null
+++ b/arch/mips/include/asm/mach-au1x00/prom.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __AU1X00_PROM_H
+#define __AU1X00_PROM_H
+
+extern int prom_argc;
+extern char **prom_argv;
+extern char **prom_envp;
+
+extern void prom_init_cmdline(void);
+extern char *prom_getenv(char *envname);
+extern int prom_get_ethernet_addr(char *ethernet_addr);
+
+#endif
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
new file mode 100644
index 000000000..93817bfb7
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net>
+ */
+
+#ifndef __ASM_BCM47XX_H
+#define __ASM_BCM47XX_H
+
+#include <linux/ssb/ssb.h>
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_soc.h>
+#include <linux/bcm47xx_nvram.h>
+#include <linux/bcm47xx_sprom.h>
+
+enum bcm47xx_bus_type {
+#ifdef CONFIG_BCM47XX_SSB
+ BCM47XX_BUS_TYPE_SSB,
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ BCM47XX_BUS_TYPE_BCMA,
+#endif
+};
+
+union bcm47xx_bus {
+#ifdef CONFIG_BCM47XX_SSB
+ struct ssb_bus ssb;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ struct bcma_soc bcma;
+#endif
+};
+
+extern union bcm47xx_bus bcm47xx_bus;
+extern enum bcm47xx_bus_type bcm47xx_bus_type;
+
+void bcm47xx_set_system_type(u16 chip_id);
+
+#endif /* __ASM_BCM47XX_H */
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
new file mode 100644
index 000000000..f879be3e8
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BCM47XX_BOARD_H
+#define __BCM47XX_BOARD_H
+
+enum bcm47xx_board {
+ BCM47XX_BOARD_ASUS_RTAC66U,
+ BCM47XX_BOARD_ASUS_RTN10,
+ BCM47XX_BOARD_ASUS_RTN10D,
+ BCM47XX_BOARD_ASUS_RTN10U,
+ BCM47XX_BOARD_ASUS_RTN12,
+ BCM47XX_BOARD_ASUS_RTN12B1,
+ BCM47XX_BOARD_ASUS_RTN12C1,
+ BCM47XX_BOARD_ASUS_RTN12D1,
+ BCM47XX_BOARD_ASUS_RTN12HP,
+ BCM47XX_BOARD_ASUS_RTN15U,
+ BCM47XX_BOARD_ASUS_RTN16,
+ BCM47XX_BOARD_ASUS_RTN53,
+ BCM47XX_BOARD_ASUS_RTN66U,
+ BCM47XX_BOARD_ASUS_WL300G,
+ BCM47XX_BOARD_ASUS_WL320GE,
+ BCM47XX_BOARD_ASUS_WL330GE,
+ BCM47XX_BOARD_ASUS_WL500G,
+ BCM47XX_BOARD_ASUS_WL500GD,
+ BCM47XX_BOARD_ASUS_WL500GPV1,
+ BCM47XX_BOARD_ASUS_WL500GPV2,
+ BCM47XX_BOARD_ASUS_WL500W,
+ BCM47XX_BOARD_ASUS_WL520GC,
+ BCM47XX_BOARD_ASUS_WL520GU,
+ BCM47XX_BOARD_ASUS_WL700GE,
+ BCM47XX_BOARD_ASUS_WLHDD,
+
+ BCM47XX_BOARD_BELKIN_F7D3301,
+ BCM47XX_BOARD_BELKIN_F7D3302,
+ BCM47XX_BOARD_BELKIN_F7D4301,
+ BCM47XX_BOARD_BELKIN_F7D4302,
+ BCM47XX_BOARD_BELKIN_F7D4401,
+
+ BCM47XX_BOARD_BUFFALO_WBR2_G54,
+ BCM47XX_BOARD_BUFFALO_WHR2_A54G54,
+ BCM47XX_BOARD_BUFFALO_WHR_G125,
+ BCM47XX_BOARD_BUFFALO_WHR_G54S,
+ BCM47XX_BOARD_BUFFALO_WHR_HP_G54,
+ BCM47XX_BOARD_BUFFALO_WLA2_G54L,
+ BCM47XX_BOARD_BUFFALO_WZR_G300N,
+ BCM47XX_BOARD_BUFFALO_WZR_RS_G54,
+ BCM47XX_BOARD_BUFFALO_WZR_RS_G54HP,
+
+ BCM47XX_BOARD_CISCO_M10V1,
+ BCM47XX_BOARD_CISCO_M20V1,
+
+ BCM47XX_BOARD_DELL_TM2300,
+
+ BCM47XX_BOARD_DLINK_DIR130,
+ BCM47XX_BOARD_DLINK_DIR330,
+
+ BCM47XX_BOARD_HUAWEI_E970,
+
+ BCM47XX_BOARD_LINKSYS_E900V1,
+ BCM47XX_BOARD_LINKSYS_E1000V1,
+ BCM47XX_BOARD_LINKSYS_E1000V2,
+ BCM47XX_BOARD_LINKSYS_E1000V21,
+ BCM47XX_BOARD_LINKSYS_E1200V2,
+ BCM47XX_BOARD_LINKSYS_E2000V1,
+ BCM47XX_BOARD_LINKSYS_E3000V1,
+ BCM47XX_BOARD_LINKSYS_E3200V1,
+ BCM47XX_BOARD_LINKSYS_E4200V1,
+ BCM47XX_BOARD_LINKSYS_WRT150NV1,
+ BCM47XX_BOARD_LINKSYS_WRT150NV11,
+ BCM47XX_BOARD_LINKSYS_WRT160NV1,
+ BCM47XX_BOARD_LINKSYS_WRT160NV3,
+ BCM47XX_BOARD_LINKSYS_WRT300N_V1,
+ BCM47XX_BOARD_LINKSYS_WRT300NV11,
+ BCM47XX_BOARD_LINKSYS_WRT310NV1,
+ BCM47XX_BOARD_LINKSYS_WRT310NV2,
+ BCM47XX_BOARD_LINKSYS_WRT54G3GV2,
+ BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0101,
+ BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0467,
+ BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0708,
+ BCM47XX_BOARD_LINKSYS_WRT600N_V11,
+ BCM47XX_BOARD_LINKSYS_WRT610NV1,
+ BCM47XX_BOARD_LINKSYS_WRT610NV2,
+ BCM47XX_BOARD_LINKSYS_WRTSL54GS,
+
+ BCM47XX_BOARD_LUXUL_ABR_4400_V1,
+ BCM47XX_BOARD_LUXUL_XAP_310_V1,
+ BCM47XX_BOARD_LUXUL_XAP_1210_V1,
+ BCM47XX_BOARD_LUXUL_XAP_1230_V1,
+ BCM47XX_BOARD_LUXUL_XAP_1240_V1,
+ BCM47XX_BOARD_LUXUL_XAP_1500_V1,
+ BCM47XX_BOARD_LUXUL_XBR_4400_V1,
+ BCM47XX_BOARD_LUXUL_XVW_P30_V1,
+ BCM47XX_BOARD_LUXUL_XWR_600_V1,
+ BCM47XX_BOARD_LUXUL_XWR_1750_V1,
+
+ BCM47XX_BOARD_MICROSOFT_MN700,
+
+ BCM47XX_BOARD_MOTOROLA_WE800G,
+ BCM47XX_BOARD_MOTOROLA_WR850GP,
+ BCM47XX_BOARD_MOTOROLA_WR850GV2V3,
+
+ BCM47XX_BOARD_NETGEAR_R6200_V1,
+ BCM47XX_BOARD_NETGEAR_WGR614V8,
+ BCM47XX_BOARD_NETGEAR_WGR614V9,
+ BCM47XX_BOARD_NETGEAR_WGR614_V10,
+ BCM47XX_BOARD_NETGEAR_WNDR3300,
+ BCM47XX_BOARD_NETGEAR_WNDR3400V1,
+ BCM47XX_BOARD_NETGEAR_WNDR3400V2,
+ BCM47XX_BOARD_NETGEAR_WNDR3400_V3,
+ BCM47XX_BOARD_NETGEAR_WNDR3400VCNA,
+ BCM47XX_BOARD_NETGEAR_WNDR3700V3,
+ BCM47XX_BOARD_NETGEAR_WNDR4000,
+ BCM47XX_BOARD_NETGEAR_WNDR4500V1,
+ BCM47XX_BOARD_NETGEAR_WNDR4500V2,
+ BCM47XX_BOARD_NETGEAR_WNR1000_V3,
+ BCM47XX_BOARD_NETGEAR_WNR2000,
+ BCM47XX_BOARD_NETGEAR_WNR3500L,
+ BCM47XX_BOARD_NETGEAR_WNR3500U,
+ BCM47XX_BOARD_NETGEAR_WNR3500V2,
+ BCM47XX_BOARD_NETGEAR_WNR3500V2VC,
+ BCM47XX_BOARD_NETGEAR_WNR834BV2,
+
+ BCM47XX_BOARD_PHICOMM_M1,
+
+ BCM47XX_BOARD_SIEMENS_SE505V2,
+
+ BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE,
+
+ BCM47XX_BOARD_ZTE_H218N,
+
+ BCM47XX_BOARD_UNKNOWN,
+ BCM47XX_BOARD_NO,
+};
+
+#define BCM47XX_BOARD_MAX_NAME 30
+
+void bcm47xx_board_detect(void);
+enum bcm47xx_board bcm47xx_board_get(void);
+const char *bcm47xx_board_get_name(void);
+
+#endif /* __BCM47XX_BOARD_H */
diff --git a/arch/mips/include/asm/mach-bcm47xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-bcm47xx/cpu-feature-overrides.h
new file mode 100644
index 000000000..b23ff47ea
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm47xx/cpu-feature-overrides.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_BCM47XX_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_BCM47XX_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_fpu 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 1
+#if defined(CONFIG_BCM47XX_BCMA) && !defined(CONFIG_BCM47XX_SSB)
+#define cpu_has_watch 1
+#elif defined(CONFIG_BCM47XX_SSB) && !defined(CONFIG_BCM47XX_BCMA)
+#define cpu_has_watch 0
+#endif
+#define cpu_has_divec 1
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_prefetch 1
+#define cpu_has_mcheck 1
+#define cpu_has_ejtag 1
+#define cpu_has_llsc 1
+
+/* cpu_has_mips16 */
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_rixi 0
+#define cpu_has_mmips 0
+#define cpu_has_smartmips 0
+#define cpu_has_vtag_icache 0
+/* cpu_has_dc_aliases */
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_pindexed_dcache 0
+#define cpu_icache_snoops_remote_store 0
+
+#define cpu_has_mips_2 1
+#define cpu_has_mips_3 0
+#define cpu_has_mips32r1 1
+#if defined(CONFIG_BCM47XX_BCMA) && !defined(CONFIG_BCM47XX_SSB)
+#define cpu_has_mips32r2 1
+#elif defined(CONFIG_BCM47XX_SSB) && !defined(CONFIG_BCM47XX_BCMA)
+#define cpu_has_mips32r2 0
+#endif
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#if defined(CONFIG_BCM47XX_BCMA) && !defined(CONFIG_BCM47XX_SSB)
+#define cpu_has_dsp 1
+#define cpu_has_dsp2 1
+#elif defined(CONFIG_BCM47XX_SSB) && !defined(CONFIG_BCM47XX_BCMA)
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#endif
+#define cpu_has_mipsmt 0
+/* cpu_has_userlocal */
+
+#define cpu_has_nofpuex 0
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#if defined(CONFIG_BCM47XX_BCMA) && !defined(CONFIG_BCM47XX_SSB)
+#define cpu_has_vint 1
+#elif defined(CONFIG_BCM47XX_SSB) && !defined(CONFIG_BCM47XX_BCMA)
+#define cpu_has_vint 0
+#endif
+#define cpu_has_veic 0
+#define cpu_has_inclusive_pcaches 0
+
+#if defined(CONFIG_BCM47XX_BCMA) && !defined(CONFIG_BCM47XX_SSB)
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+#define cpu_has_perf_cntr_intr_bit 1
+#elif defined(CONFIG_BCM47XX_SSB) && !defined(CONFIG_BCM47XX_BCMA)
+#define cpu_dcache_line_size() 16
+#define cpu_icache_line_size() 16
+#define cpu_has_perf_cntr_intr_bit 0
+#endif
+#define cpu_scache_line_size() 0
+#define cpu_has_vz 0
+
+#endif /* __ASM_MACH_BCM47XX_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_board.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_board.h
new file mode 100644
index 000000000..1d19a726f
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_board.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_BOARD_H_
+#define BCM63XX_BOARD_H_
+
+const char *board_get_name(void);
+
+void board_prom_init(void);
+
+void board_setup(void);
+
+int board_register_devices(void);
+
+#endif /* ! BCM63XX_BOARD_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
new file mode 100644
index 000000000..1cad18e66
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
@@ -0,0 +1,1068 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_CPU_H_
+#define BCM63XX_CPU_H_
+
+#include <linux/types.h>
+#include <linux/init.h>
+
+/*
+ * Macro to fetch bcm63xx cpu id and revision, should be optimized at
+ * compile time if only one CPU support is enabled (idea stolen from
+ * arm mach-types)
+ */
+#define BCM3368_CPU_ID 0x3368
+#define BCM6328_CPU_ID 0x6328
+#define BCM6338_CPU_ID 0x6338
+#define BCM6345_CPU_ID 0x6345
+#define BCM6348_CPU_ID 0x6348
+#define BCM6358_CPU_ID 0x6358
+#define BCM6362_CPU_ID 0x6362
+#define BCM6368_CPU_ID 0x6368
+
+void __init bcm63xx_cpu_init(void);
+u8 bcm63xx_get_cpu_rev(void);
+unsigned int bcm63xx_get_cpu_freq(void);
+
+static inline u16 __pure __bcm63xx_get_cpu_id(const u16 cpu_id)
+{
+ switch (cpu_id) {
+#ifdef CONFIG_BCM63XX_CPU_3368
+ case BCM3368_CPU_ID:
+#endif
+
+#ifdef CONFIG_BCM63XX_CPU_6328
+ case BCM6328_CPU_ID:
+#endif
+
+#ifdef CONFIG_BCM63XX_CPU_6338
+ case BCM6338_CPU_ID:
+#endif
+
+#ifdef CONFIG_BCM63XX_CPU_6345
+ case BCM6345_CPU_ID:
+#endif
+
+#ifdef CONFIG_BCM63XX_CPU_6348
+ case BCM6348_CPU_ID:
+#endif
+
+#ifdef CONFIG_BCM63XX_CPU_6358
+ case BCM6358_CPU_ID:
+#endif
+
+#ifdef CONFIG_BCM63XX_CPU_6362
+ case BCM6362_CPU_ID:
+#endif
+
+#ifdef CONFIG_BCM63XX_CPU_6368
+ case BCM6368_CPU_ID:
+#endif
+ break;
+ default:
+ unreachable();
+ }
+
+ return cpu_id;
+}
+
+extern u16 bcm63xx_cpu_id;
+
+static inline u16 __pure bcm63xx_get_cpu_id(void)
+{
+ const u16 cpu_id = bcm63xx_cpu_id;
+
+ return __bcm63xx_get_cpu_id(cpu_id);
+}
+
+#define BCMCPU_IS_3368() (bcm63xx_get_cpu_id() == BCM3368_CPU_ID)
+#define BCMCPU_IS_6328() (bcm63xx_get_cpu_id() == BCM6328_CPU_ID)
+#define BCMCPU_IS_6338() (bcm63xx_get_cpu_id() == BCM6338_CPU_ID)
+#define BCMCPU_IS_6345() (bcm63xx_get_cpu_id() == BCM6345_CPU_ID)
+#define BCMCPU_IS_6348() (bcm63xx_get_cpu_id() == BCM6348_CPU_ID)
+#define BCMCPU_IS_6358() (bcm63xx_get_cpu_id() == BCM6358_CPU_ID)
+#define BCMCPU_IS_6362() (bcm63xx_get_cpu_id() == BCM6362_CPU_ID)
+#define BCMCPU_IS_6368() (bcm63xx_get_cpu_id() == BCM6368_CPU_ID)
+
+/*
+ * While registers sets are (mostly) the same across 63xx CPU, base
+ * address of these sets do change.
+ */
+enum bcm63xx_regs_set {
+ RSET_DSL_LMEM = 0,
+ RSET_PERF,
+ RSET_TIMER,
+ RSET_WDT,
+ RSET_UART0,
+ RSET_UART1,
+ RSET_GPIO,
+ RSET_SPI,
+ RSET_HSSPI,
+ RSET_UDC0,
+ RSET_OHCI0,
+ RSET_OHCI_PRIV,
+ RSET_USBH_PRIV,
+ RSET_USBD,
+ RSET_USBDMA,
+ RSET_MPI,
+ RSET_PCMCIA,
+ RSET_PCIE,
+ RSET_DSL,
+ RSET_ENET0,
+ RSET_ENET1,
+ RSET_ENETDMA,
+ RSET_ENETDMAC,
+ RSET_ENETDMAS,
+ RSET_ENETSW,
+ RSET_EHCI0,
+ RSET_SDRAM,
+ RSET_MEMC,
+ RSET_DDR,
+ RSET_M2M,
+ RSET_ATM,
+ RSET_XTM,
+ RSET_XTMDMA,
+ RSET_XTMDMAC,
+ RSET_XTMDMAS,
+ RSET_PCM,
+ RSET_PCMDMA,
+ RSET_PCMDMAC,
+ RSET_PCMDMAS,
+ RSET_RNG,
+ RSET_MISC
+};
+
+#define RSET_DSL_LMEM_SIZE (64 * 1024 * 4)
+#define RSET_DSL_SIZE 4096
+#define RSET_WDT_SIZE 12
+#define BCM_6338_RSET_SPI_SIZE 64
+#define BCM_6348_RSET_SPI_SIZE 64
+#define BCM_6358_RSET_SPI_SIZE 1804
+#define BCM_6368_RSET_SPI_SIZE 1804
+#define RSET_ENET_SIZE 2048
+#define RSET_ENETDMA_SIZE 256
+#define RSET_6345_ENETDMA_SIZE 64
+#define RSET_ENETDMAC_SIZE(chans) (16 * (chans))
+#define RSET_ENETDMAS_SIZE(chans) (16 * (chans))
+#define RSET_ENETSW_SIZE 65536
+#define RSET_UART_SIZE 24
+#define RSET_HSSPI_SIZE 1536
+#define RSET_UDC_SIZE 256
+#define RSET_OHCI_SIZE 256
+#define RSET_EHCI_SIZE 256
+#define RSET_USBD_SIZE 256
+#define RSET_USBDMA_SIZE 1280
+#define RSET_PCMCIA_SIZE 12
+#define RSET_M2M_SIZE 256
+#define RSET_ATM_SIZE 4096
+#define RSET_XTM_SIZE 10240
+#define RSET_XTMDMA_SIZE 256
+#define RSET_XTMDMAC_SIZE(chans) (16 * (chans))
+#define RSET_XTMDMAS_SIZE(chans) (16 * (chans))
+#define RSET_RNG_SIZE 20
+
+/*
+ * 3368 register sets base address
+ */
+#define BCM_3368_DSL_LMEM_BASE (0xdeadbeef)
+#define BCM_3368_PERF_BASE (0xfff8c000)
+#define BCM_3368_TIMER_BASE (0xfff8c040)
+#define BCM_3368_WDT_BASE (0xfff8c080)
+#define BCM_3368_UART0_BASE (0xfff8c100)
+#define BCM_3368_UART1_BASE (0xfff8c120)
+#define BCM_3368_GPIO_BASE (0xfff8c080)
+#define BCM_3368_SPI_BASE (0xfff8c800)
+#define BCM_3368_HSSPI_BASE (0xdeadbeef)
+#define BCM_3368_UDC0_BASE (0xdeadbeef)
+#define BCM_3368_USBDMA_BASE (0xdeadbeef)
+#define BCM_3368_OHCI0_BASE (0xdeadbeef)
+#define BCM_3368_OHCI_PRIV_BASE (0xdeadbeef)
+#define BCM_3368_USBH_PRIV_BASE (0xdeadbeef)
+#define BCM_3368_USBD_BASE (0xdeadbeef)
+#define BCM_3368_MPI_BASE (0xfff80000)
+#define BCM_3368_PCMCIA_BASE (0xfff80054)
+#define BCM_3368_PCIE_BASE (0xdeadbeef)
+#define BCM_3368_SDRAM_REGS_BASE (0xdeadbeef)
+#define BCM_3368_DSL_BASE (0xdeadbeef)
+#define BCM_3368_UBUS_BASE (0xdeadbeef)
+#define BCM_3368_ENET0_BASE (0xfff98000)
+#define BCM_3368_ENET1_BASE (0xfff98800)
+#define BCM_3368_ENETDMA_BASE (0xfff99800)
+#define BCM_3368_ENETDMAC_BASE (0xfff99900)
+#define BCM_3368_ENETDMAS_BASE (0xfff99a00)
+#define BCM_3368_ENETSW_BASE (0xdeadbeef)
+#define BCM_3368_EHCI0_BASE (0xdeadbeef)
+#define BCM_3368_SDRAM_BASE (0xdeadbeef)
+#define BCM_3368_MEMC_BASE (0xfff84000)
+#define BCM_3368_DDR_BASE (0xdeadbeef)
+#define BCM_3368_M2M_BASE (0xdeadbeef)
+#define BCM_3368_ATM_BASE (0xdeadbeef)
+#define BCM_3368_XTM_BASE (0xdeadbeef)
+#define BCM_3368_XTMDMA_BASE (0xdeadbeef)
+#define BCM_3368_XTMDMAC_BASE (0xdeadbeef)
+#define BCM_3368_XTMDMAS_BASE (0xdeadbeef)
+#define BCM_3368_PCM_BASE (0xfff9c200)
+#define BCM_3368_PCMDMA_BASE (0xdeadbeef)
+#define BCM_3368_PCMDMAC_BASE (0xdeadbeef)
+#define BCM_3368_PCMDMAS_BASE (0xdeadbeef)
+#define BCM_3368_RNG_BASE (0xdeadbeef)
+#define BCM_3368_MISC_BASE (0xdeadbeef)
+
+/*
+ * 6328 register sets base address
+ */
+#define BCM_6328_DSL_LMEM_BASE (0xdeadbeef)
+#define BCM_6328_PERF_BASE (0xb0000000)
+#define BCM_6328_TIMER_BASE (0xb0000040)
+#define BCM_6328_WDT_BASE (0xb000005c)
+#define BCM_6328_UART0_BASE (0xb0000100)
+#define BCM_6328_UART1_BASE (0xb0000120)
+#define BCM_6328_GPIO_BASE (0xb0000080)
+#define BCM_6328_SPI_BASE (0xdeadbeef)
+#define BCM_6328_HSSPI_BASE (0xb0001000)
+#define BCM_6328_UDC0_BASE (0xdeadbeef)
+#define BCM_6328_USBDMA_BASE (0xb000c000)
+#define BCM_6328_OHCI0_BASE (0xb0002600)
+#define BCM_6328_OHCI_PRIV_BASE (0xdeadbeef)
+#define BCM_6328_USBH_PRIV_BASE (0xb0002700)
+#define BCM_6328_USBD_BASE (0xb0002400)
+#define BCM_6328_MPI_BASE (0xdeadbeef)
+#define BCM_6328_PCMCIA_BASE (0xdeadbeef)
+#define BCM_6328_PCIE_BASE (0xb0e40000)
+#define BCM_6328_SDRAM_REGS_BASE (0xdeadbeef)
+#define BCM_6328_DSL_BASE (0xb0001900)
+#define BCM_6328_UBUS_BASE (0xdeadbeef)
+#define BCM_6328_ENET0_BASE (0xdeadbeef)
+#define BCM_6328_ENET1_BASE (0xdeadbeef)
+#define BCM_6328_ENETDMA_BASE (0xb000d800)
+#define BCM_6328_ENETDMAC_BASE (0xb000da00)
+#define BCM_6328_ENETDMAS_BASE (0xb000dc00)
+#define BCM_6328_ENETSW_BASE (0xb0e00000)
+#define BCM_6328_EHCI0_BASE (0xb0002500)
+#define BCM_6328_SDRAM_BASE (0xdeadbeef)
+#define BCM_6328_MEMC_BASE (0xdeadbeef)
+#define BCM_6328_DDR_BASE (0xb0003000)
+#define BCM_6328_M2M_BASE (0xdeadbeef)
+#define BCM_6328_ATM_BASE (0xdeadbeef)
+#define BCM_6328_XTM_BASE (0xdeadbeef)
+#define BCM_6328_XTMDMA_BASE (0xb000b800)
+#define BCM_6328_XTMDMAC_BASE (0xdeadbeef)
+#define BCM_6328_XTMDMAS_BASE (0xdeadbeef)
+#define BCM_6328_PCM_BASE (0xb000a800)
+#define BCM_6328_PCMDMA_BASE (0xdeadbeef)
+#define BCM_6328_PCMDMAC_BASE (0xdeadbeef)
+#define BCM_6328_PCMDMAS_BASE (0xdeadbeef)
+#define BCM_6328_RNG_BASE (0xdeadbeef)
+#define BCM_6328_MISC_BASE (0xb0001800)
+#define BCM_6328_OTP_BASE (0xb0000600)
+
+/*
+ * 6338 register sets base address
+ */
+#define BCM_6338_DSL_LMEM_BASE (0xfff00000)
+#define BCM_6338_PERF_BASE (0xfffe0000)
+#define BCM_6338_BB_BASE (0xfffe0100)
+#define BCM_6338_TIMER_BASE (0xfffe0200)
+#define BCM_6338_WDT_BASE (0xfffe021c)
+#define BCM_6338_UART0_BASE (0xfffe0300)
+#define BCM_6338_UART1_BASE (0xdeadbeef)
+#define BCM_6338_GPIO_BASE (0xfffe0400)
+#define BCM_6338_SPI_BASE (0xfffe0c00)
+#define BCM_6338_HSSPI_BASE (0xdeadbeef)
+#define BCM_6338_UDC0_BASE (0xdeadbeef)
+#define BCM_6338_USBDMA_BASE (0xfffe2400)
+#define BCM_6338_OHCI0_BASE (0xdeadbeef)
+#define BCM_6338_OHCI_PRIV_BASE (0xfffe3000)
+#define BCM_6338_USBH_PRIV_BASE (0xdeadbeef)
+#define BCM_6338_USBD_BASE (0xdeadbeef)
+#define BCM_6338_MPI_BASE (0xfffe3160)
+#define BCM_6338_PCMCIA_BASE (0xdeadbeef)
+#define BCM_6338_PCIE_BASE (0xdeadbeef)
+#define BCM_6338_SDRAM_REGS_BASE (0xfffe3100)
+#define BCM_6338_DSL_BASE (0xfffe1000)
+#define BCM_6338_UBUS_BASE (0xdeadbeef)
+#define BCM_6338_ENET0_BASE (0xfffe2800)
+#define BCM_6338_ENET1_BASE (0xdeadbeef)
+#define BCM_6338_ENETDMA_BASE (0xfffe2400)
+#define BCM_6338_ENETDMAC_BASE (0xfffe2500)
+#define BCM_6338_ENETDMAS_BASE (0xfffe2600)
+#define BCM_6338_ENETSW_BASE (0xdeadbeef)
+#define BCM_6338_EHCI0_BASE (0xdeadbeef)
+#define BCM_6338_SDRAM_BASE (0xfffe3100)
+#define BCM_6338_MEMC_BASE (0xdeadbeef)
+#define BCM_6338_DDR_BASE (0xdeadbeef)
+#define BCM_6338_M2M_BASE (0xdeadbeef)
+#define BCM_6338_ATM_BASE (0xfffe2000)
+#define BCM_6338_XTM_BASE (0xdeadbeef)
+#define BCM_6338_XTMDMA_BASE (0xdeadbeef)
+#define BCM_6338_XTMDMAC_BASE (0xdeadbeef)
+#define BCM_6338_XTMDMAS_BASE (0xdeadbeef)
+#define BCM_6338_PCM_BASE (0xdeadbeef)
+#define BCM_6338_PCMDMA_BASE (0xdeadbeef)
+#define BCM_6338_PCMDMAC_BASE (0xdeadbeef)
+#define BCM_6338_PCMDMAS_BASE (0xdeadbeef)
+#define BCM_6338_RNG_BASE (0xdeadbeef)
+#define BCM_6338_MISC_BASE (0xdeadbeef)
+
+/*
+ * 6345 register sets base address
+ */
+#define BCM_6345_DSL_LMEM_BASE (0xfff00000)
+#define BCM_6345_PERF_BASE (0xfffe0000)
+#define BCM_6345_BB_BASE (0xfffe0100)
+#define BCM_6345_TIMER_BASE (0xfffe0200)
+#define BCM_6345_WDT_BASE (0xfffe021c)
+#define BCM_6345_UART0_BASE (0xfffe0300)
+#define BCM_6345_UART1_BASE (0xdeadbeef)
+#define BCM_6345_GPIO_BASE (0xfffe0400)
+#define BCM_6345_SPI_BASE (0xdeadbeef)
+#define BCM_6345_HSSPI_BASE (0xdeadbeef)
+#define BCM_6345_UDC0_BASE (0xdeadbeef)
+#define BCM_6345_USBDMA_BASE (0xfffe2800)
+#define BCM_6345_ENET0_BASE (0xfffe1800)
+#define BCM_6345_ENETDMA_BASE (0xfffe2800)
+#define BCM_6345_ENETDMAC_BASE (0xfffe2840)
+#define BCM_6345_ENETDMAS_BASE (0xfffe2a00)
+#define BCM_6345_ENETSW_BASE (0xdeadbeef)
+#define BCM_6345_PCMCIA_BASE (0xfffe2028)
+#define BCM_6345_MPI_BASE (0xfffe2000)
+#define BCM_6345_PCIE_BASE (0xdeadbeef)
+#define BCM_6345_OHCI0_BASE (0xfffe2100)
+#define BCM_6345_OHCI_PRIV_BASE (0xfffe2200)
+#define BCM_6345_USBH_PRIV_BASE (0xdeadbeef)
+#define BCM_6345_USBD_BASE (0xdeadbeef)
+#define BCM_6345_SDRAM_REGS_BASE (0xfffe2300)
+#define BCM_6345_DSL_BASE (0xdeadbeef)
+#define BCM_6345_UBUS_BASE (0xdeadbeef)
+#define BCM_6345_ENET1_BASE (0xdeadbeef)
+#define BCM_6345_EHCI0_BASE (0xdeadbeef)
+#define BCM_6345_SDRAM_BASE (0xfffe2300)
+#define BCM_6345_MEMC_BASE (0xdeadbeef)
+#define BCM_6345_DDR_BASE (0xdeadbeef)
+#define BCM_6345_M2M_BASE (0xdeadbeef)
+#define BCM_6345_ATM_BASE (0xfffe4000)
+#define BCM_6345_XTM_BASE (0xdeadbeef)
+#define BCM_6345_XTMDMA_BASE (0xdeadbeef)
+#define BCM_6345_XTMDMAC_BASE (0xdeadbeef)
+#define BCM_6345_XTMDMAS_BASE (0xdeadbeef)
+#define BCM_6345_PCM_BASE (0xdeadbeef)
+#define BCM_6345_PCMDMA_BASE (0xdeadbeef)
+#define BCM_6345_PCMDMAC_BASE (0xdeadbeef)
+#define BCM_6345_PCMDMAS_BASE (0xdeadbeef)
+#define BCM_6345_RNG_BASE (0xdeadbeef)
+#define BCM_6345_MISC_BASE (0xdeadbeef)
+
+/*
+ * 6348 register sets base address
+ */
+#define BCM_6348_DSL_LMEM_BASE (0xfff00000)
+#define BCM_6348_PERF_BASE (0xfffe0000)
+#define BCM_6348_TIMER_BASE (0xfffe0200)
+#define BCM_6348_WDT_BASE (0xfffe021c)
+#define BCM_6348_UART0_BASE (0xfffe0300)
+#define BCM_6348_UART1_BASE (0xdeadbeef)
+#define BCM_6348_GPIO_BASE (0xfffe0400)
+#define BCM_6348_SPI_BASE (0xfffe0c00)
+#define BCM_6348_HSSPI_BASE (0xdeadbeef)
+#define BCM_6348_UDC0_BASE (0xfffe1000)
+#define BCM_6348_USBDMA_BASE (0xdeadbeef)
+#define BCM_6348_OHCI0_BASE (0xfffe1b00)
+#define BCM_6348_OHCI_PRIV_BASE (0xfffe1c00)
+#define BCM_6348_USBH_PRIV_BASE (0xdeadbeef)
+#define BCM_6348_USBD_BASE (0xdeadbeef)
+#define BCM_6348_MPI_BASE (0xfffe2000)
+#define BCM_6348_PCMCIA_BASE (0xfffe2054)
+#define BCM_6348_PCIE_BASE (0xdeadbeef)
+#define BCM_6348_SDRAM_REGS_BASE (0xfffe2300)
+#define BCM_6348_M2M_BASE (0xfffe2800)
+#define BCM_6348_DSL_BASE (0xfffe3000)
+#define BCM_6348_ENET0_BASE (0xfffe6000)
+#define BCM_6348_ENET1_BASE (0xfffe6800)
+#define BCM_6348_ENETDMA_BASE (0xfffe7000)
+#define BCM_6348_ENETDMAC_BASE (0xfffe7100)
+#define BCM_6348_ENETDMAS_BASE (0xfffe7200)
+#define BCM_6348_ENETSW_BASE (0xdeadbeef)
+#define BCM_6348_EHCI0_BASE (0xdeadbeef)
+#define BCM_6348_SDRAM_BASE (0xfffe2300)
+#define BCM_6348_MEMC_BASE (0xdeadbeef)
+#define BCM_6348_DDR_BASE (0xdeadbeef)
+#define BCM_6348_ATM_BASE (0xfffe4000)
+#define BCM_6348_XTM_BASE (0xdeadbeef)
+#define BCM_6348_XTMDMA_BASE (0xdeadbeef)
+#define BCM_6348_XTMDMAC_BASE (0xdeadbeef)
+#define BCM_6348_XTMDMAS_BASE (0xdeadbeef)
+#define BCM_6348_PCM_BASE (0xdeadbeef)
+#define BCM_6348_PCMDMA_BASE (0xdeadbeef)
+#define BCM_6348_PCMDMAC_BASE (0xdeadbeef)
+#define BCM_6348_PCMDMAS_BASE (0xdeadbeef)
+#define BCM_6348_RNG_BASE (0xdeadbeef)
+#define BCM_6348_MISC_BASE (0xdeadbeef)
+
+/*
+ * 6358 register sets base address
+ */
+#define BCM_6358_DSL_LMEM_BASE (0xfff00000)
+#define BCM_6358_PERF_BASE (0xfffe0000)
+#define BCM_6358_TIMER_BASE (0xfffe0040)
+#define BCM_6358_WDT_BASE (0xfffe005c)
+#define BCM_6358_UART0_BASE (0xfffe0100)
+#define BCM_6358_UART1_BASE (0xfffe0120)
+#define BCM_6358_GPIO_BASE (0xfffe0080)
+#define BCM_6358_SPI_BASE (0xfffe0800)
+#define BCM_6358_HSSPI_BASE (0xdeadbeef)
+#define BCM_6358_UDC0_BASE (0xfffe0800)
+#define BCM_6358_USBDMA_BASE (0xdeadbeef)
+#define BCM_6358_OHCI0_BASE (0xfffe1400)
+#define BCM_6358_OHCI_PRIV_BASE (0xdeadbeef)
+#define BCM_6358_USBH_PRIV_BASE (0xfffe1500)
+#define BCM_6358_USBD_BASE (0xdeadbeef)
+#define BCM_6358_MPI_BASE (0xfffe1000)
+#define BCM_6358_PCMCIA_BASE (0xfffe1054)
+#define BCM_6358_PCIE_BASE (0xdeadbeef)
+#define BCM_6358_SDRAM_REGS_BASE (0xfffe2300)
+#define BCM_6358_M2M_BASE (0xdeadbeef)
+#define BCM_6358_DSL_BASE (0xfffe3000)
+#define BCM_6358_ENET0_BASE (0xfffe4000)
+#define BCM_6358_ENET1_BASE (0xfffe4800)
+#define BCM_6358_ENETDMA_BASE (0xfffe5000)
+#define BCM_6358_ENETDMAC_BASE (0xfffe5100)
+#define BCM_6358_ENETDMAS_BASE (0xfffe5200)
+#define BCM_6358_ENETSW_BASE (0xdeadbeef)
+#define BCM_6358_EHCI0_BASE (0xfffe1300)
+#define BCM_6358_SDRAM_BASE (0xdeadbeef)
+#define BCM_6358_MEMC_BASE (0xfffe1200)
+#define BCM_6358_DDR_BASE (0xfffe12a0)
+#define BCM_6358_ATM_BASE (0xfffe2000)
+#define BCM_6358_XTM_BASE (0xdeadbeef)
+#define BCM_6358_XTMDMA_BASE (0xdeadbeef)
+#define BCM_6358_XTMDMAC_BASE (0xdeadbeef)
+#define BCM_6358_XTMDMAS_BASE (0xdeadbeef)
+#define BCM_6358_PCM_BASE (0xfffe1600)
+#define BCM_6358_PCMDMA_BASE (0xfffe1800)
+#define BCM_6358_PCMDMAC_BASE (0xfffe1900)
+#define BCM_6358_PCMDMAS_BASE (0xfffe1a00)
+#define BCM_6358_RNG_BASE (0xdeadbeef)
+#define BCM_6358_MISC_BASE (0xdeadbeef)
+
+
+/*
+ * 6362 register sets base address
+ */
+#define BCM_6362_DSL_LMEM_BASE (0xdeadbeef)
+#define BCM_6362_PERF_BASE (0xb0000000)
+#define BCM_6362_TIMER_BASE (0xb0000040)
+#define BCM_6362_WDT_BASE (0xb000005c)
+#define BCM_6362_UART0_BASE (0xb0000100)
+#define BCM_6362_UART1_BASE (0xb0000120)
+#define BCM_6362_GPIO_BASE (0xb0000080)
+#define BCM_6362_SPI_BASE (0xb0000800)
+#define BCM_6362_HSSPI_BASE (0xb0001000)
+#define BCM_6362_UDC0_BASE (0xdeadbeef)
+#define BCM_6362_USBDMA_BASE (0xb000c000)
+#define BCM_6362_OHCI0_BASE (0xb0002600)
+#define BCM_6362_OHCI_PRIV_BASE (0xdeadbeef)
+#define BCM_6362_USBH_PRIV_BASE (0xb0002700)
+#define BCM_6362_USBD_BASE (0xb0002400)
+#define BCM_6362_MPI_BASE (0xdeadbeef)
+#define BCM_6362_PCMCIA_BASE (0xdeadbeef)
+#define BCM_6362_PCIE_BASE (0xb0e40000)
+#define BCM_6362_SDRAM_REGS_BASE (0xdeadbeef)
+#define BCM_6362_DSL_BASE (0xdeadbeef)
+#define BCM_6362_UBUS_BASE (0xdeadbeef)
+#define BCM_6362_ENET0_BASE (0xdeadbeef)
+#define BCM_6362_ENET1_BASE (0xdeadbeef)
+#define BCM_6362_ENETDMA_BASE (0xb000d800)
+#define BCM_6362_ENETDMAC_BASE (0xb000da00)
+#define BCM_6362_ENETDMAS_BASE (0xb000dc00)
+#define BCM_6362_ENETSW_BASE (0xb0e00000)
+#define BCM_6362_EHCI0_BASE (0xb0002500)
+#define BCM_6362_SDRAM_BASE (0xdeadbeef)
+#define BCM_6362_MEMC_BASE (0xdeadbeef)
+#define BCM_6362_DDR_BASE (0xb0003000)
+#define BCM_6362_M2M_BASE (0xdeadbeef)
+#define BCM_6362_ATM_BASE (0xdeadbeef)
+#define BCM_6362_XTM_BASE (0xb0007800)
+#define BCM_6362_XTMDMA_BASE (0xb000b800)
+#define BCM_6362_XTMDMAC_BASE (0xdeadbeef)
+#define BCM_6362_XTMDMAS_BASE (0xdeadbeef)
+#define BCM_6362_PCM_BASE (0xb000a800)
+#define BCM_6362_PCMDMA_BASE (0xdeadbeef)
+#define BCM_6362_PCMDMAC_BASE (0xdeadbeef)
+#define BCM_6362_PCMDMAS_BASE (0xdeadbeef)
+#define BCM_6362_RNG_BASE (0xdeadbeef)
+#define BCM_6362_MISC_BASE (0xb0001800)
+
+#define BCM_6362_NAND_REG_BASE (0xb0000200)
+#define BCM_6362_NAND_CACHE_BASE (0xb0000600)
+#define BCM_6362_LED_BASE (0xb0001900)
+#define BCM_6362_IPSEC_BASE (0xb0002800)
+#define BCM_6362_IPSEC_DMA_BASE (0xb000d000)
+#define BCM_6362_WLAN_CHIPCOMMON_BASE (0xb0004000)
+#define BCM_6362_WLAN_D11_BASE (0xb0005000)
+#define BCM_6362_WLAN_SHIM_BASE (0xb0007000)
+
+/*
+ * 6368 register sets base address
+ */
+#define BCM_6368_DSL_LMEM_BASE (0xdeadbeef)
+#define BCM_6368_PERF_BASE (0xb0000000)
+#define BCM_6368_TIMER_BASE (0xb0000040)
+#define BCM_6368_WDT_BASE (0xb000005c)
+#define BCM_6368_UART0_BASE (0xb0000100)
+#define BCM_6368_UART1_BASE (0xb0000120)
+#define BCM_6368_GPIO_BASE (0xb0000080)
+#define BCM_6368_SPI_BASE (0xb0000800)
+#define BCM_6368_HSSPI_BASE (0xdeadbeef)
+#define BCM_6368_UDC0_BASE (0xdeadbeef)
+#define BCM_6368_USBDMA_BASE (0xb0004800)
+#define BCM_6368_OHCI0_BASE (0xb0001600)
+#define BCM_6368_OHCI_PRIV_BASE (0xdeadbeef)
+#define BCM_6368_USBH_PRIV_BASE (0xb0001700)
+#define BCM_6368_USBD_BASE (0xb0001400)
+#define BCM_6368_MPI_BASE (0xb0001000)
+#define BCM_6368_PCMCIA_BASE (0xb0001054)
+#define BCM_6368_PCIE_BASE (0xdeadbeef)
+#define BCM_6368_SDRAM_REGS_BASE (0xdeadbeef)
+#define BCM_6368_M2M_BASE (0xdeadbeef)
+#define BCM_6368_DSL_BASE (0xdeadbeef)
+#define BCM_6368_ENET0_BASE (0xdeadbeef)
+#define BCM_6368_ENET1_BASE (0xdeadbeef)
+#define BCM_6368_ENETDMA_BASE (0xb0006800)
+#define BCM_6368_ENETDMAC_BASE (0xb0006a00)
+#define BCM_6368_ENETDMAS_BASE (0xb0006c00)
+#define BCM_6368_ENETSW_BASE (0xb0f00000)
+#define BCM_6368_EHCI0_BASE (0xb0001500)
+#define BCM_6368_SDRAM_BASE (0xdeadbeef)
+#define BCM_6368_MEMC_BASE (0xb0001200)
+#define BCM_6368_DDR_BASE (0xb0001280)
+#define BCM_6368_ATM_BASE (0xdeadbeef)
+#define BCM_6368_XTM_BASE (0xb0001800)
+#define BCM_6368_XTMDMA_BASE (0xb0005000)
+#define BCM_6368_XTMDMAC_BASE (0xb0005200)
+#define BCM_6368_XTMDMAS_BASE (0xb0005400)
+#define BCM_6368_PCM_BASE (0xb0004000)
+#define BCM_6368_PCMDMA_BASE (0xb0005800)
+#define BCM_6368_PCMDMAC_BASE (0xb0005a00)
+#define BCM_6368_PCMDMAS_BASE (0xb0005c00)
+#define BCM_6368_RNG_BASE (0xb0004180)
+#define BCM_6368_MISC_BASE (0xdeadbeef)
+
+
+extern const unsigned long *bcm63xx_regs_base;
+
+#define __GEN_CPU_REGS_TABLE(__cpu) \
+ [RSET_DSL_LMEM] = BCM_## __cpu ##_DSL_LMEM_BASE, \
+ [RSET_PERF] = BCM_## __cpu ##_PERF_BASE, \
+ [RSET_TIMER] = BCM_## __cpu ##_TIMER_BASE, \
+ [RSET_WDT] = BCM_## __cpu ##_WDT_BASE, \
+ [RSET_UART0] = BCM_## __cpu ##_UART0_BASE, \
+ [RSET_UART1] = BCM_## __cpu ##_UART1_BASE, \
+ [RSET_GPIO] = BCM_## __cpu ##_GPIO_BASE, \
+ [RSET_SPI] = BCM_## __cpu ##_SPI_BASE, \
+ [RSET_HSSPI] = BCM_## __cpu ##_HSSPI_BASE, \
+ [RSET_UDC0] = BCM_## __cpu ##_UDC0_BASE, \
+ [RSET_OHCI0] = BCM_## __cpu ##_OHCI0_BASE, \
+ [RSET_OHCI_PRIV] = BCM_## __cpu ##_OHCI_PRIV_BASE, \
+ [RSET_USBH_PRIV] = BCM_## __cpu ##_USBH_PRIV_BASE, \
+ [RSET_USBD] = BCM_## __cpu ##_USBD_BASE, \
+ [RSET_USBDMA] = BCM_## __cpu ##_USBDMA_BASE, \
+ [RSET_MPI] = BCM_## __cpu ##_MPI_BASE, \
+ [RSET_PCMCIA] = BCM_## __cpu ##_PCMCIA_BASE, \
+ [RSET_PCIE] = BCM_## __cpu ##_PCIE_BASE, \
+ [RSET_DSL] = BCM_## __cpu ##_DSL_BASE, \
+ [RSET_ENET0] = BCM_## __cpu ##_ENET0_BASE, \
+ [RSET_ENET1] = BCM_## __cpu ##_ENET1_BASE, \
+ [RSET_ENETDMA] = BCM_## __cpu ##_ENETDMA_BASE, \
+ [RSET_ENETDMAC] = BCM_## __cpu ##_ENETDMAC_BASE, \
+ [RSET_ENETDMAS] = BCM_## __cpu ##_ENETDMAS_BASE, \
+ [RSET_ENETSW] = BCM_## __cpu ##_ENETSW_BASE, \
+ [RSET_EHCI0] = BCM_## __cpu ##_EHCI0_BASE, \
+ [RSET_SDRAM] = BCM_## __cpu ##_SDRAM_BASE, \
+ [RSET_MEMC] = BCM_## __cpu ##_MEMC_BASE, \
+ [RSET_DDR] = BCM_## __cpu ##_DDR_BASE, \
+ [RSET_M2M] = BCM_## __cpu ##_M2M_BASE, \
+ [RSET_ATM] = BCM_## __cpu ##_ATM_BASE, \
+ [RSET_XTM] = BCM_## __cpu ##_XTM_BASE, \
+ [RSET_XTMDMA] = BCM_## __cpu ##_XTMDMA_BASE, \
+ [RSET_XTMDMAC] = BCM_## __cpu ##_XTMDMAC_BASE, \
+ [RSET_XTMDMAS] = BCM_## __cpu ##_XTMDMAS_BASE, \
+ [RSET_PCM] = BCM_## __cpu ##_PCM_BASE, \
+ [RSET_PCMDMA] = BCM_## __cpu ##_PCMDMA_BASE, \
+ [RSET_PCMDMAC] = BCM_## __cpu ##_PCMDMAC_BASE, \
+ [RSET_PCMDMAS] = BCM_## __cpu ##_PCMDMAS_BASE, \
+ [RSET_RNG] = BCM_## __cpu ##_RNG_BASE, \
+ [RSET_MISC] = BCM_## __cpu ##_MISC_BASE, \
+
+
+static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
+{
+ return bcm63xx_regs_base[set];
+}
+
+/*
+ * IRQ number changes across CPU too
+ */
+enum bcm63xx_irq {
+ IRQ_TIMER = 0,
+ IRQ_SPI,
+ IRQ_UART0,
+ IRQ_UART1,
+ IRQ_DSL,
+ IRQ_ENET0,
+ IRQ_ENET1,
+ IRQ_ENET_PHY,
+ IRQ_HSSPI,
+ IRQ_OHCI0,
+ IRQ_EHCI0,
+ IRQ_USBD,
+ IRQ_USBD_RXDMA0,
+ IRQ_USBD_TXDMA0,
+ IRQ_USBD_RXDMA1,
+ IRQ_USBD_TXDMA1,
+ IRQ_USBD_RXDMA2,
+ IRQ_USBD_TXDMA2,
+ IRQ_ENET0_RXDMA,
+ IRQ_ENET0_TXDMA,
+ IRQ_ENET1_RXDMA,
+ IRQ_ENET1_TXDMA,
+ IRQ_PCI,
+ IRQ_PCMCIA,
+ IRQ_ATM,
+ IRQ_ENETSW_RXDMA0,
+ IRQ_ENETSW_RXDMA1,
+ IRQ_ENETSW_RXDMA2,
+ IRQ_ENETSW_RXDMA3,
+ IRQ_ENETSW_TXDMA0,
+ IRQ_ENETSW_TXDMA1,
+ IRQ_ENETSW_TXDMA2,
+ IRQ_ENETSW_TXDMA3,
+ IRQ_XTM,
+ IRQ_XTM_DMA0,
+};
+
+/*
+ * 3368 irqs
+ */
+#define BCM_3368_TIMER_IRQ (IRQ_INTERNAL_BASE + 0)
+#define BCM_3368_SPI_IRQ (IRQ_INTERNAL_BASE + 1)
+#define BCM_3368_UART0_IRQ (IRQ_INTERNAL_BASE + 2)
+#define BCM_3368_UART1_IRQ (IRQ_INTERNAL_BASE + 3)
+#define BCM_3368_DSL_IRQ 0
+#define BCM_3368_UDC0_IRQ 0
+#define BCM_3368_OHCI0_IRQ 0
+#define BCM_3368_ENET0_IRQ (IRQ_INTERNAL_BASE + 8)
+#define BCM_3368_ENET1_IRQ (IRQ_INTERNAL_BASE + 6)
+#define BCM_3368_ENET_PHY_IRQ (IRQ_INTERNAL_BASE + 9)
+#define BCM_3368_ENET0_RXDMA_IRQ (IRQ_INTERNAL_BASE + 15)
+#define BCM_3368_ENET0_TXDMA_IRQ (IRQ_INTERNAL_BASE + 16)
+#define BCM_3368_HSSPI_IRQ 0
+#define BCM_3368_EHCI0_IRQ 0
+#define BCM_3368_USBD_IRQ 0
+#define BCM_3368_USBD_RXDMA0_IRQ 0
+#define BCM_3368_USBD_TXDMA0_IRQ 0
+#define BCM_3368_USBD_RXDMA1_IRQ 0
+#define BCM_3368_USBD_TXDMA1_IRQ 0
+#define BCM_3368_USBD_RXDMA2_IRQ 0
+#define BCM_3368_USBD_TXDMA2_IRQ 0
+#define BCM_3368_ENET1_RXDMA_IRQ (IRQ_INTERNAL_BASE + 17)
+#define BCM_3368_ENET1_TXDMA_IRQ (IRQ_INTERNAL_BASE + 18)
+#define BCM_3368_PCI_IRQ (IRQ_INTERNAL_BASE + 31)
+#define BCM_3368_PCMCIA_IRQ 0
+#define BCM_3368_ATM_IRQ 0
+#define BCM_3368_ENETSW_RXDMA0_IRQ 0
+#define BCM_3368_ENETSW_RXDMA1_IRQ 0
+#define BCM_3368_ENETSW_RXDMA2_IRQ 0
+#define BCM_3368_ENETSW_RXDMA3_IRQ 0
+#define BCM_3368_ENETSW_TXDMA0_IRQ 0
+#define BCM_3368_ENETSW_TXDMA1_IRQ 0
+#define BCM_3368_ENETSW_TXDMA2_IRQ 0
+#define BCM_3368_ENETSW_TXDMA3_IRQ 0
+#define BCM_3368_XTM_IRQ 0
+#define BCM_3368_XTM_DMA0_IRQ 0
+
+#define BCM_3368_EXT_IRQ0 (IRQ_INTERNAL_BASE + 25)
+#define BCM_3368_EXT_IRQ1 (IRQ_INTERNAL_BASE + 26)
+#define BCM_3368_EXT_IRQ2 (IRQ_INTERNAL_BASE + 27)
+#define BCM_3368_EXT_IRQ3 (IRQ_INTERNAL_BASE + 28)
+
+
+/*
+ * 6328 irqs
+ */
+#define BCM_6328_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32)
+
+#define BCM_6328_TIMER_IRQ (IRQ_INTERNAL_BASE + 31)
+#define BCM_6328_SPI_IRQ 0
+#define BCM_6328_UART0_IRQ (IRQ_INTERNAL_BASE + 28)
+#define BCM_6328_UART1_IRQ (BCM_6328_HIGH_IRQ_BASE + 7)
+#define BCM_6328_DSL_IRQ (IRQ_INTERNAL_BASE + 4)
+#define BCM_6328_UDC0_IRQ 0
+#define BCM_6328_ENET0_IRQ 0
+#define BCM_6328_ENET1_IRQ 0
+#define BCM_6328_ENET_PHY_IRQ (IRQ_INTERNAL_BASE + 12)
+#define BCM_6328_HSSPI_IRQ (IRQ_INTERNAL_BASE + 29)
+#define BCM_6328_OHCI0_IRQ (BCM_6328_HIGH_IRQ_BASE + 9)
+#define BCM_6328_EHCI0_IRQ (BCM_6328_HIGH_IRQ_BASE + 10)
+#define BCM_6328_USBD_IRQ (IRQ_INTERNAL_BASE + 4)
+#define BCM_6328_USBD_RXDMA0_IRQ (IRQ_INTERNAL_BASE + 5)
+#define BCM_6328_USBD_TXDMA0_IRQ (IRQ_INTERNAL_BASE + 6)
+#define BCM_6328_USBD_RXDMA1_IRQ (IRQ_INTERNAL_BASE + 7)
+#define BCM_6328_USBD_TXDMA1_IRQ (IRQ_INTERNAL_BASE + 8)
+#define BCM_6328_USBD_RXDMA2_IRQ (IRQ_INTERNAL_BASE + 9)
+#define BCM_6328_USBD_TXDMA2_IRQ (IRQ_INTERNAL_BASE + 10)
+#define BCM_6328_PCMCIA_IRQ 0
+#define BCM_6328_ENET0_RXDMA_IRQ 0
+#define BCM_6328_ENET0_TXDMA_IRQ 0
+#define BCM_6328_ENET1_RXDMA_IRQ 0
+#define BCM_6328_ENET1_TXDMA_IRQ 0
+#define BCM_6328_PCI_IRQ (IRQ_INTERNAL_BASE + 23)
+#define BCM_6328_ATM_IRQ 0
+#define BCM_6328_ENETSW_RXDMA0_IRQ (BCM_6328_HIGH_IRQ_BASE + 0)
+#define BCM_6328_ENETSW_RXDMA1_IRQ (BCM_6328_HIGH_IRQ_BASE + 1)
+#define BCM_6328_ENETSW_RXDMA2_IRQ (BCM_6328_HIGH_IRQ_BASE + 2)
+#define BCM_6328_ENETSW_RXDMA3_IRQ (BCM_6328_HIGH_IRQ_BASE + 3)
+#define BCM_6328_ENETSW_TXDMA0_IRQ 0
+#define BCM_6328_ENETSW_TXDMA1_IRQ 0
+#define BCM_6328_ENETSW_TXDMA2_IRQ 0
+#define BCM_6328_ENETSW_TXDMA3_IRQ 0
+#define BCM_6328_XTM_IRQ (BCM_6328_HIGH_IRQ_BASE + 31)
+#define BCM_6328_XTM_DMA0_IRQ (BCM_6328_HIGH_IRQ_BASE + 11)
+
+#define BCM_6328_PCM_DMA0_IRQ (IRQ_INTERNAL_BASE + 2)
+#define BCM_6328_PCM_DMA1_IRQ (IRQ_INTERNAL_BASE + 3)
+#define BCM_6328_EXT_IRQ0 (IRQ_INTERNAL_BASE + 24)
+#define BCM_6328_EXT_IRQ1 (IRQ_INTERNAL_BASE + 25)
+#define BCM_6328_EXT_IRQ2 (IRQ_INTERNAL_BASE + 26)
+#define BCM_6328_EXT_IRQ3 (IRQ_INTERNAL_BASE + 27)
+
+/*
+ * 6338 irqs
+ */
+#define BCM_6338_TIMER_IRQ (IRQ_INTERNAL_BASE + 0)
+#define BCM_6338_SPI_IRQ (IRQ_INTERNAL_BASE + 1)
+#define BCM_6338_UART0_IRQ (IRQ_INTERNAL_BASE + 2)
+#define BCM_6338_UART1_IRQ 0
+#define BCM_6338_DSL_IRQ (IRQ_INTERNAL_BASE + 5)
+#define BCM_6338_ENET0_IRQ (IRQ_INTERNAL_BASE + 8)
+#define BCM_6338_ENET1_IRQ 0
+#define BCM_6338_ENET_PHY_IRQ (IRQ_INTERNAL_BASE + 9)
+#define BCM_6338_HSSPI_IRQ 0
+#define BCM_6338_OHCI0_IRQ 0
+#define BCM_6338_EHCI0_IRQ 0
+#define BCM_6338_USBD_IRQ 0
+#define BCM_6338_USBD_RXDMA0_IRQ 0
+#define BCM_6338_USBD_TXDMA0_IRQ 0
+#define BCM_6338_USBD_RXDMA1_IRQ 0
+#define BCM_6338_USBD_TXDMA1_IRQ 0
+#define BCM_6338_USBD_RXDMA2_IRQ 0
+#define BCM_6338_USBD_TXDMA2_IRQ 0
+#define BCM_6338_ENET0_RXDMA_IRQ (IRQ_INTERNAL_BASE + 15)
+#define BCM_6338_ENET0_TXDMA_IRQ (IRQ_INTERNAL_BASE + 16)
+#define BCM_6338_ENET1_RXDMA_IRQ 0
+#define BCM_6338_ENET1_TXDMA_IRQ 0
+#define BCM_6338_PCI_IRQ 0
+#define BCM_6338_PCMCIA_IRQ 0
+#define BCM_6338_ATM_IRQ 0
+#define BCM_6338_ENETSW_RXDMA0_IRQ 0
+#define BCM_6338_ENETSW_RXDMA1_IRQ 0
+#define BCM_6338_ENETSW_RXDMA2_IRQ 0
+#define BCM_6338_ENETSW_RXDMA3_IRQ 0
+#define BCM_6338_ENETSW_TXDMA0_IRQ 0
+#define BCM_6338_ENETSW_TXDMA1_IRQ 0
+#define BCM_6338_ENETSW_TXDMA2_IRQ 0
+#define BCM_6338_ENETSW_TXDMA3_IRQ 0
+#define BCM_6338_XTM_IRQ 0
+#define BCM_6338_XTM_DMA0_IRQ 0
+
+/*
+ * 6345 irqs
+ */
+#define BCM_6345_TIMER_IRQ (IRQ_INTERNAL_BASE + 0)
+#define BCM_6345_SPI_IRQ 0
+#define BCM_6345_UART0_IRQ (IRQ_INTERNAL_BASE + 2)
+#define BCM_6345_UART1_IRQ 0
+#define BCM_6345_DSL_IRQ (IRQ_INTERNAL_BASE + 3)
+#define BCM_6345_ENET0_IRQ (IRQ_INTERNAL_BASE + 8)
+#define BCM_6345_ENET1_IRQ 0
+#define BCM_6345_ENET_PHY_IRQ (IRQ_INTERNAL_BASE + 12)
+#define BCM_6345_HSSPI_IRQ 0
+#define BCM_6345_OHCI0_IRQ 0
+#define BCM_6345_EHCI0_IRQ 0
+#define BCM_6345_USBD_IRQ 0
+#define BCM_6345_USBD_RXDMA0_IRQ 0
+#define BCM_6345_USBD_TXDMA0_IRQ 0
+#define BCM_6345_USBD_RXDMA1_IRQ 0
+#define BCM_6345_USBD_TXDMA1_IRQ 0
+#define BCM_6345_USBD_RXDMA2_IRQ 0
+#define BCM_6345_USBD_TXDMA2_IRQ 0
+#define BCM_6345_ENET0_RXDMA_IRQ (IRQ_INTERNAL_BASE + 13 + 1)
+#define BCM_6345_ENET0_TXDMA_IRQ (IRQ_INTERNAL_BASE + 13 + 2)
+#define BCM_6345_ENET1_RXDMA_IRQ 0
+#define BCM_6345_ENET1_TXDMA_IRQ 0
+#define BCM_6345_PCI_IRQ 0
+#define BCM_6345_PCMCIA_IRQ 0
+#define BCM_6345_ATM_IRQ 0
+#define BCM_6345_ENETSW_RXDMA0_IRQ 0
+#define BCM_6345_ENETSW_RXDMA1_IRQ 0
+#define BCM_6345_ENETSW_RXDMA2_IRQ 0
+#define BCM_6345_ENETSW_RXDMA3_IRQ 0
+#define BCM_6345_ENETSW_TXDMA0_IRQ 0
+#define BCM_6345_ENETSW_TXDMA1_IRQ 0
+#define BCM_6345_ENETSW_TXDMA2_IRQ 0
+#define BCM_6345_ENETSW_TXDMA3_IRQ 0
+#define BCM_6345_XTM_IRQ 0
+#define BCM_6345_XTM_DMA0_IRQ 0
+
+/*
+ * 6348 irqs
+ */
+#define BCM_6348_TIMER_IRQ (IRQ_INTERNAL_BASE + 0)
+#define BCM_6348_SPI_IRQ (IRQ_INTERNAL_BASE + 1)
+#define BCM_6348_UART0_IRQ (IRQ_INTERNAL_BASE + 2)
+#define BCM_6348_UART1_IRQ 0
+#define BCM_6348_DSL_IRQ (IRQ_INTERNAL_BASE + 4)
+#define BCM_6348_ENET0_IRQ (IRQ_INTERNAL_BASE + 8)
+#define BCM_6348_ENET1_IRQ (IRQ_INTERNAL_BASE + 7)
+#define BCM_6348_ENET_PHY_IRQ (IRQ_INTERNAL_BASE + 9)
+#define BCM_6348_HSSPI_IRQ 0
+#define BCM_6348_OHCI0_IRQ (IRQ_INTERNAL_BASE + 12)
+#define BCM_6348_EHCI0_IRQ 0
+#define BCM_6348_USBD_IRQ 0
+#define BCM_6348_USBD_RXDMA0_IRQ 0
+#define BCM_6348_USBD_TXDMA0_IRQ 0
+#define BCM_6348_USBD_RXDMA1_IRQ 0
+#define BCM_6348_USBD_TXDMA1_IRQ 0
+#define BCM_6348_USBD_RXDMA2_IRQ 0
+#define BCM_6348_USBD_TXDMA2_IRQ 0
+#define BCM_6348_ENET0_RXDMA_IRQ (IRQ_INTERNAL_BASE + 20)
+#define BCM_6348_ENET0_TXDMA_IRQ (IRQ_INTERNAL_BASE + 21)
+#define BCM_6348_ENET1_RXDMA_IRQ (IRQ_INTERNAL_BASE + 22)
+#define BCM_6348_ENET1_TXDMA_IRQ (IRQ_INTERNAL_BASE + 23)
+#define BCM_6348_PCI_IRQ (IRQ_INTERNAL_BASE + 24)
+#define BCM_6348_PCMCIA_IRQ (IRQ_INTERNAL_BASE + 24)
+#define BCM_6348_ATM_IRQ (IRQ_INTERNAL_BASE + 5)
+#define BCM_6348_ENETSW_RXDMA0_IRQ 0
+#define BCM_6348_ENETSW_RXDMA1_IRQ 0
+#define BCM_6348_ENETSW_RXDMA2_IRQ 0
+#define BCM_6348_ENETSW_RXDMA3_IRQ 0
+#define BCM_6348_ENETSW_TXDMA0_IRQ 0
+#define BCM_6348_ENETSW_TXDMA1_IRQ 0
+#define BCM_6348_ENETSW_TXDMA2_IRQ 0
+#define BCM_6348_ENETSW_TXDMA3_IRQ 0
+#define BCM_6348_XTM_IRQ 0
+#define BCM_6348_XTM_DMA0_IRQ 0
+
+/*
+ * 6358 irqs
+ */
+#define BCM_6358_TIMER_IRQ (IRQ_INTERNAL_BASE + 0)
+#define BCM_6358_SPI_IRQ (IRQ_INTERNAL_BASE + 1)
+#define BCM_6358_UART0_IRQ (IRQ_INTERNAL_BASE + 2)
+#define BCM_6358_UART1_IRQ (IRQ_INTERNAL_BASE + 3)
+#define BCM_6358_DSL_IRQ (IRQ_INTERNAL_BASE + 29)
+#define BCM_6358_ENET0_IRQ (IRQ_INTERNAL_BASE + 8)
+#define BCM_6358_ENET1_IRQ (IRQ_INTERNAL_BASE + 6)
+#define BCM_6358_ENET_PHY_IRQ (IRQ_INTERNAL_BASE + 9)
+#define BCM_6358_HSSPI_IRQ 0
+#define BCM_6358_OHCI0_IRQ (IRQ_INTERNAL_BASE + 5)
+#define BCM_6358_EHCI0_IRQ (IRQ_INTERNAL_BASE + 10)
+#define BCM_6358_USBD_IRQ 0
+#define BCM_6358_USBD_RXDMA0_IRQ 0
+#define BCM_6358_USBD_TXDMA0_IRQ 0
+#define BCM_6358_USBD_RXDMA1_IRQ 0
+#define BCM_6358_USBD_TXDMA1_IRQ 0
+#define BCM_6358_USBD_RXDMA2_IRQ 0
+#define BCM_6358_USBD_TXDMA2_IRQ 0
+#define BCM_6358_ENET0_RXDMA_IRQ (IRQ_INTERNAL_BASE + 15)
+#define BCM_6358_ENET0_TXDMA_IRQ (IRQ_INTERNAL_BASE + 16)
+#define BCM_6358_ENET1_RXDMA_IRQ (IRQ_INTERNAL_BASE + 17)
+#define BCM_6358_ENET1_TXDMA_IRQ (IRQ_INTERNAL_BASE + 18)
+#define BCM_6358_PCI_IRQ (IRQ_INTERNAL_BASE + 31)
+#define BCM_6358_PCMCIA_IRQ (IRQ_INTERNAL_BASE + 24)
+#define BCM_6358_ATM_IRQ (IRQ_INTERNAL_BASE + 19)
+#define BCM_6358_ENETSW_RXDMA0_IRQ 0
+#define BCM_6358_ENETSW_RXDMA1_IRQ 0
+#define BCM_6358_ENETSW_RXDMA2_IRQ 0
+#define BCM_6358_ENETSW_RXDMA3_IRQ 0
+#define BCM_6358_ENETSW_TXDMA0_IRQ 0
+#define BCM_6358_ENETSW_TXDMA1_IRQ 0
+#define BCM_6358_ENETSW_TXDMA2_IRQ 0
+#define BCM_6358_ENETSW_TXDMA3_IRQ 0
+#define BCM_6358_XTM_IRQ 0
+#define BCM_6358_XTM_DMA0_IRQ 0
+
+#define BCM_6358_PCM_DMA0_IRQ (IRQ_INTERNAL_BASE + 23)
+#define BCM_6358_PCM_DMA1_IRQ (IRQ_INTERNAL_BASE + 24)
+#define BCM_6358_EXT_IRQ0 (IRQ_INTERNAL_BASE + 25)
+#define BCM_6358_EXT_IRQ1 (IRQ_INTERNAL_BASE + 26)
+#define BCM_6358_EXT_IRQ2 (IRQ_INTERNAL_BASE + 27)
+#define BCM_6358_EXT_IRQ3 (IRQ_INTERNAL_BASE + 28)
+
+/*
+ * 6362 irqs
+ */
+#define BCM_6362_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32)
+
+#define BCM_6362_TIMER_IRQ (IRQ_INTERNAL_BASE + 0)
+#define BCM_6362_SPI_IRQ (IRQ_INTERNAL_BASE + 2)
+#define BCM_6362_UART0_IRQ (IRQ_INTERNAL_BASE + 3)
+#define BCM_6362_UART1_IRQ (IRQ_INTERNAL_BASE + 4)
+#define BCM_6362_DSL_IRQ (IRQ_INTERNAL_BASE + 28)
+#define BCM_6362_UDC0_IRQ 0
+#define BCM_6362_ENET0_IRQ 0
+#define BCM_6362_ENET1_IRQ 0
+#define BCM_6362_ENET_PHY_IRQ (IRQ_INTERNAL_BASE + 14)
+#define BCM_6362_HSSPI_IRQ (IRQ_INTERNAL_BASE + 5)
+#define BCM_6362_OHCI0_IRQ (IRQ_INTERNAL_BASE + 9)
+#define BCM_6362_EHCI0_IRQ (IRQ_INTERNAL_BASE + 10)
+#define BCM_6362_USBD_IRQ (IRQ_INTERNAL_BASE + 11)
+#define BCM_6362_USBD_RXDMA0_IRQ (IRQ_INTERNAL_BASE + 20)
+#define BCM_6362_USBD_TXDMA0_IRQ (IRQ_INTERNAL_BASE + 21)
+#define BCM_6362_USBD_RXDMA1_IRQ (IRQ_INTERNAL_BASE + 22)
+#define BCM_6362_USBD_TXDMA1_IRQ (IRQ_INTERNAL_BASE + 23)
+#define BCM_6362_USBD_RXDMA2_IRQ (IRQ_INTERNAL_BASE + 24)
+#define BCM_6362_USBD_TXDMA2_IRQ (IRQ_INTERNAL_BASE + 25)
+#define BCM_6362_PCMCIA_IRQ 0
+#define BCM_6362_ENET0_RXDMA_IRQ 0
+#define BCM_6362_ENET0_TXDMA_IRQ 0
+#define BCM_6362_ENET1_RXDMA_IRQ 0
+#define BCM_6362_ENET1_TXDMA_IRQ 0
+#define BCM_6362_PCI_IRQ (IRQ_INTERNAL_BASE + 30)
+#define BCM_6362_ATM_IRQ 0
+#define BCM_6362_ENETSW_RXDMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 0)
+#define BCM_6362_ENETSW_RXDMA1_IRQ (BCM_6362_HIGH_IRQ_BASE + 1)
+#define BCM_6362_ENETSW_RXDMA2_IRQ (BCM_6362_HIGH_IRQ_BASE + 2)
+#define BCM_6362_ENETSW_RXDMA3_IRQ (BCM_6362_HIGH_IRQ_BASE + 3)
+#define BCM_6362_ENETSW_TXDMA0_IRQ 0
+#define BCM_6362_ENETSW_TXDMA1_IRQ 0
+#define BCM_6362_ENETSW_TXDMA2_IRQ 0
+#define BCM_6362_ENETSW_TXDMA3_IRQ 0
+#define BCM_6362_XTM_IRQ 0
+#define BCM_6362_XTM_DMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 12)
+
+#define BCM_6362_RING_OSC_IRQ (IRQ_INTERNAL_BASE + 1)
+#define BCM_6362_WLAN_GPIO_IRQ (IRQ_INTERNAL_BASE + 6)
+#define BCM_6362_WLAN_IRQ (IRQ_INTERNAL_BASE + 7)
+#define BCM_6362_IPSEC_IRQ (IRQ_INTERNAL_BASE + 8)
+#define BCM_6362_NAND_IRQ (IRQ_INTERNAL_BASE + 12)
+#define BCM_6362_PCM_IRQ (IRQ_INTERNAL_BASE + 13)
+#define BCM_6362_DG_IRQ (IRQ_INTERNAL_BASE + 15)
+#define BCM_6362_EPHY_ENERGY0_IRQ (IRQ_INTERNAL_BASE + 16)
+#define BCM_6362_EPHY_ENERGY1_IRQ (IRQ_INTERNAL_BASE + 17)
+#define BCM_6362_EPHY_ENERGY2_IRQ (IRQ_INTERNAL_BASE + 18)
+#define BCM_6362_EPHY_ENERGY3_IRQ (IRQ_INTERNAL_BASE + 19)
+#define BCM_6362_IPSEC_DMA0_IRQ (IRQ_INTERNAL_BASE + 26)
+#define BCM_6362_IPSEC_DMA1_IRQ (IRQ_INTERNAL_BASE + 27)
+#define BCM_6362_FAP0_IRQ (IRQ_INTERNAL_BASE + 29)
+#define BCM_6362_PCM_DMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 4)
+#define BCM_6362_PCM_DMA1_IRQ (BCM_6362_HIGH_IRQ_BASE + 5)
+#define BCM_6362_DECT0_IRQ (BCM_6362_HIGH_IRQ_BASE + 6)
+#define BCM_6362_DECT1_IRQ (BCM_6362_HIGH_IRQ_BASE + 7)
+#define BCM_6362_EXT_IRQ0 (BCM_6362_HIGH_IRQ_BASE + 8)
+#define BCM_6362_EXT_IRQ1 (BCM_6362_HIGH_IRQ_BASE + 9)
+#define BCM_6362_EXT_IRQ2 (BCM_6362_HIGH_IRQ_BASE + 10)
+#define BCM_6362_EXT_IRQ3 (BCM_6362_HIGH_IRQ_BASE + 11)
+
+/*
+ * 6368 irqs
+ */
+#define BCM_6368_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32)
+
+#define BCM_6368_TIMER_IRQ (IRQ_INTERNAL_BASE + 0)
+#define BCM_6368_SPI_IRQ (IRQ_INTERNAL_BASE + 1)
+#define BCM_6368_UART0_IRQ (IRQ_INTERNAL_BASE + 2)
+#define BCM_6368_UART1_IRQ (IRQ_INTERNAL_BASE + 3)
+#define BCM_6368_DSL_IRQ (IRQ_INTERNAL_BASE + 4)
+#define BCM_6368_ENET0_IRQ 0
+#define BCM_6368_ENET1_IRQ 0
+#define BCM_6368_ENET_PHY_IRQ (IRQ_INTERNAL_BASE + 15)
+#define BCM_6368_HSSPI_IRQ 0
+#define BCM_6368_OHCI0_IRQ (IRQ_INTERNAL_BASE + 5)
+#define BCM_6368_EHCI0_IRQ (IRQ_INTERNAL_BASE + 7)
+#define BCM_6368_USBD_IRQ (IRQ_INTERNAL_BASE + 8)
+#define BCM_6368_USBD_RXDMA0_IRQ (IRQ_INTERNAL_BASE + 26)
+#define BCM_6368_USBD_TXDMA0_IRQ (IRQ_INTERNAL_BASE + 27)
+#define BCM_6368_USBD_RXDMA1_IRQ (IRQ_INTERNAL_BASE + 28)
+#define BCM_6368_USBD_TXDMA1_IRQ (IRQ_INTERNAL_BASE + 29)
+#define BCM_6368_USBD_RXDMA2_IRQ (IRQ_INTERNAL_BASE + 30)
+#define BCM_6368_USBD_TXDMA2_IRQ (IRQ_INTERNAL_BASE + 31)
+#define BCM_6368_PCMCIA_IRQ 0
+#define BCM_6368_ENET0_RXDMA_IRQ 0
+#define BCM_6368_ENET0_TXDMA_IRQ 0
+#define BCM_6368_ENET1_RXDMA_IRQ 0
+#define BCM_6368_ENET1_TXDMA_IRQ 0
+#define BCM_6368_PCI_IRQ (IRQ_INTERNAL_BASE + 13)
+#define BCM_6368_ATM_IRQ 0
+#define BCM_6368_ENETSW_RXDMA0_IRQ (BCM_6368_HIGH_IRQ_BASE + 0)
+#define BCM_6368_ENETSW_RXDMA1_IRQ (BCM_6368_HIGH_IRQ_BASE + 1)
+#define BCM_6368_ENETSW_RXDMA2_IRQ (BCM_6368_HIGH_IRQ_BASE + 2)
+#define BCM_6368_ENETSW_RXDMA3_IRQ (BCM_6368_HIGH_IRQ_BASE + 3)
+#define BCM_6368_ENETSW_TXDMA0_IRQ (BCM_6368_HIGH_IRQ_BASE + 4)
+#define BCM_6368_ENETSW_TXDMA1_IRQ (BCM_6368_HIGH_IRQ_BASE + 5)
+#define BCM_6368_ENETSW_TXDMA2_IRQ (BCM_6368_HIGH_IRQ_BASE + 6)
+#define BCM_6368_ENETSW_TXDMA3_IRQ (BCM_6368_HIGH_IRQ_BASE + 7)
+#define BCM_6368_XTM_IRQ (IRQ_INTERNAL_BASE + 11)
+#define BCM_6368_XTM_DMA0_IRQ (BCM_6368_HIGH_IRQ_BASE + 8)
+
+#define BCM_6368_PCM_DMA0_IRQ (BCM_6368_HIGH_IRQ_BASE + 30)
+#define BCM_6368_PCM_DMA1_IRQ (BCM_6368_HIGH_IRQ_BASE + 31)
+#define BCM_6368_EXT_IRQ0 (IRQ_INTERNAL_BASE + 20)
+#define BCM_6368_EXT_IRQ1 (IRQ_INTERNAL_BASE + 21)
+#define BCM_6368_EXT_IRQ2 (IRQ_INTERNAL_BASE + 22)
+#define BCM_6368_EXT_IRQ3 (IRQ_INTERNAL_BASE + 23)
+#define BCM_6368_EXT_IRQ4 (IRQ_INTERNAL_BASE + 24)
+#define BCM_6368_EXT_IRQ5 (IRQ_INTERNAL_BASE + 25)
+
+extern const int *bcm63xx_irqs;
+
+#define __GEN_CPU_IRQ_TABLE(__cpu) \
+ [IRQ_TIMER] = BCM_## __cpu ##_TIMER_IRQ, \
+ [IRQ_SPI] = BCM_## __cpu ##_SPI_IRQ, \
+ [IRQ_UART0] = BCM_## __cpu ##_UART0_IRQ, \
+ [IRQ_UART1] = BCM_## __cpu ##_UART1_IRQ, \
+ [IRQ_DSL] = BCM_## __cpu ##_DSL_IRQ, \
+ [IRQ_ENET0] = BCM_## __cpu ##_ENET0_IRQ, \
+ [IRQ_ENET1] = BCM_## __cpu ##_ENET1_IRQ, \
+ [IRQ_ENET_PHY] = BCM_## __cpu ##_ENET_PHY_IRQ, \
+ [IRQ_HSSPI] = BCM_## __cpu ##_HSSPI_IRQ, \
+ [IRQ_OHCI0] = BCM_## __cpu ##_OHCI0_IRQ, \
+ [IRQ_EHCI0] = BCM_## __cpu ##_EHCI0_IRQ, \
+ [IRQ_USBD] = BCM_## __cpu ##_USBD_IRQ, \
+ [IRQ_USBD_RXDMA0] = BCM_## __cpu ##_USBD_RXDMA0_IRQ, \
+ [IRQ_USBD_TXDMA0] = BCM_## __cpu ##_USBD_TXDMA0_IRQ, \
+ [IRQ_USBD_RXDMA1] = BCM_## __cpu ##_USBD_RXDMA1_IRQ, \
+ [IRQ_USBD_TXDMA1] = BCM_## __cpu ##_USBD_TXDMA1_IRQ, \
+ [IRQ_USBD_RXDMA2] = BCM_## __cpu ##_USBD_RXDMA2_IRQ, \
+ [IRQ_USBD_TXDMA2] = BCM_## __cpu ##_USBD_TXDMA2_IRQ, \
+ [IRQ_ENET0_RXDMA] = BCM_## __cpu ##_ENET0_RXDMA_IRQ, \
+ [IRQ_ENET0_TXDMA] = BCM_## __cpu ##_ENET0_TXDMA_IRQ, \
+ [IRQ_ENET1_RXDMA] = BCM_## __cpu ##_ENET1_RXDMA_IRQ, \
+ [IRQ_ENET1_TXDMA] = BCM_## __cpu ##_ENET1_TXDMA_IRQ, \
+ [IRQ_PCI] = BCM_## __cpu ##_PCI_IRQ, \
+ [IRQ_PCMCIA] = BCM_## __cpu ##_PCMCIA_IRQ, \
+ [IRQ_ATM] = BCM_## __cpu ##_ATM_IRQ, \
+ [IRQ_ENETSW_RXDMA0] = BCM_## __cpu ##_ENETSW_RXDMA0_IRQ, \
+ [IRQ_ENETSW_RXDMA1] = BCM_## __cpu ##_ENETSW_RXDMA1_IRQ, \
+ [IRQ_ENETSW_RXDMA2] = BCM_## __cpu ##_ENETSW_RXDMA2_IRQ, \
+ [IRQ_ENETSW_RXDMA3] = BCM_## __cpu ##_ENETSW_RXDMA3_IRQ, \
+ [IRQ_ENETSW_TXDMA0] = BCM_## __cpu ##_ENETSW_TXDMA0_IRQ, \
+ [IRQ_ENETSW_TXDMA1] = BCM_## __cpu ##_ENETSW_TXDMA1_IRQ, \
+ [IRQ_ENETSW_TXDMA2] = BCM_## __cpu ##_ENETSW_TXDMA2_IRQ, \
+ [IRQ_ENETSW_TXDMA3] = BCM_## __cpu ##_ENETSW_TXDMA3_IRQ, \
+ [IRQ_XTM] = BCM_## __cpu ##_XTM_IRQ, \
+ [IRQ_XTM_DMA0] = BCM_## __cpu ##_XTM_DMA0_IRQ, \
+
+static inline int bcm63xx_get_irq_number(enum bcm63xx_irq irq)
+{
+ return bcm63xx_irqs[irq];
+}
+
+/*
+ * return installed memory size
+ */
+unsigned int bcm63xx_get_memory_size(void);
+
+void bcm63xx_machine_halt(void);
+
+void bcm63xx_machine_reboot(void);
+
+#endif /* !BCM63XX_CPU_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cs.h
new file mode 100644
index 000000000..1c634d7c1
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cs.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_CS_H
+#define BCM63XX_CS_H
+
+int bcm63xx_set_cs_base(unsigned int cs, u32 base, unsigned int size);
+int bcm63xx_set_cs_timing(unsigned int cs, unsigned int wait,
+ unsigned int setup, unsigned int hold);
+int bcm63xx_set_cs_param(unsigned int cs, u32 flags);
+int bcm63xx_set_cs_status(unsigned int cs, int enable);
+
+#endif /* !BCM63XX_CS_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
new file mode 100644
index 000000000..da39e4d32
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_DEV_ENET_H_
+#define BCM63XX_DEV_ENET_H_
+
+#include <linux/if_ether.h>
+#include <linux/init.h>
+
+#include <bcm63xx_regs.h>
+
+/*
+ * on board ethernet platform data
+ */
+struct bcm63xx_enet_platform_data {
+ char mac_addr[ETH_ALEN];
+
+ int has_phy;
+
+ /* if has_phy, then set use_internal_phy */
+ int use_internal_phy;
+
+ /* or fill phy info to use an external one */
+ int phy_id;
+ int has_phy_interrupt;
+ int phy_interrupt;
+
+ /* if has_phy, use autonegotiated pause parameters or force
+ * them */
+ int pause_auto;
+ int pause_rx;
+ int pause_tx;
+
+ /* if !has_phy, set desired forced speed/duplex */
+ int force_speed_100;
+ int force_duplex_full;
+
+ /* if !has_phy, set callback to perform mii device
+ * init/remove */
+ int (*mii_config)(struct net_device *dev, int probe,
+ int (*mii_read)(struct net_device *dev,
+ int phy_id, int reg),
+ void (*mii_write)(struct net_device *dev,
+ int phy_id, int reg, int val));
+
+ /* DMA channel enable mask */
+ u32 dma_chan_en_mask;
+
+ /* DMA channel interrupt mask */
+ u32 dma_chan_int_mask;
+
+ /* DMA engine has internal SRAM */
+ bool dma_has_sram;
+
+ /* DMA channel register width */
+ unsigned int dma_chan_width;
+
+ /* DMA descriptor shift */
+ unsigned int dma_desc_shift;
+
+ /* dma channel ids */
+ int rx_chan;
+ int tx_chan;
+};
+
+/*
+ * on board ethernet switch platform data
+ */
+#define ENETSW_MAX_PORT 8
+#define ENETSW_PORTS_6328 5 /* 4 FE PHY + 1 RGMII */
+#define ENETSW_PORTS_6368 6 /* 4 FE PHY + 2 RGMII */
+
+#define ENETSW_RGMII_PORT0 4
+
+struct bcm63xx_enetsw_port {
+ int used;
+ int phy_id;
+
+ int bypass_link;
+ int force_speed;
+ int force_duplex_full;
+
+ const char *name;
+};
+
+struct bcm63xx_enetsw_platform_data {
+ char mac_addr[ETH_ALEN];
+ int num_ports;
+ struct bcm63xx_enetsw_port used_ports[ENETSW_MAX_PORT];
+
+ /* DMA channel enable mask */
+ u32 dma_chan_en_mask;
+
+ /* DMA channel interrupt mask */
+ u32 dma_chan_int_mask;
+
+ /* DMA channel register width */
+ unsigned int dma_chan_width;
+
+ /* DMA engine has internal SRAM */
+ bool dma_has_sram;
+};
+
+int __init bcm63xx_enet_register(int unit,
+ const struct bcm63xx_enet_platform_data *pd);
+
+int bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd);
+
+enum bcm63xx_regs_enetdmac {
+ ENETDMAC_CHANCFG,
+ ENETDMAC_IR,
+ ENETDMAC_IRMASK,
+ ENETDMAC_MAXBURST,
+ ENETDMAC_BUFALLOC,
+ ENETDMAC_RSTART,
+ ENETDMAC_FC,
+ ENETDMAC_LEN,
+};
+
+static inline unsigned long bcm63xx_enetdmacreg(enum bcm63xx_regs_enetdmac reg)
+{
+ extern const unsigned long *bcm63xx_regs_enetdmac;
+
+ return bcm63xx_regs_enetdmac[reg];
+}
+
+
+#endif /* ! BCM63XX_DEV_ENET_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_flash.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_flash.h
new file mode 100644
index 000000000..4d5005f2b
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_flash.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BCM63XX_FLASH_H
+#define __BCM63XX_FLASH_H
+
+enum {
+ BCM63XX_FLASH_TYPE_PARALLEL,
+ BCM63XX_FLASH_TYPE_SERIAL,
+ BCM63XX_FLASH_TYPE_NAND,
+};
+
+int __init bcm63xx_flash_register(void);
+
+#endif /* __BCM63XX_FLASH_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_hsspi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_hsspi.h
new file mode 100644
index 000000000..f93f176c1
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_hsspi.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_DEV_HSSPI_H
+#define BCM63XX_DEV_HSSPI_H
+
+#include <linux/types.h>
+
+int bcm63xx_hsspi_register(void);
+
+#endif /* BCM63XX_DEV_HSSPI_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_pci.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_pci.h
new file mode 100644
index 000000000..1951c125c
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_pci.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_DEV_PCI_H_
+#define BCM63XX_DEV_PCI_H_
+
+extern int bcm63xx_pci_enabled;
+
+#endif /* BCM63XX_DEV_PCI_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_pcmcia.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_pcmcia.h
new file mode 100644
index 000000000..01674ac58
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_pcmcia.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_DEV_PCMCIA_H_
+#define BCM63XX_DEV_PCMCIA_H_
+
+/*
+ * PCMCIA driver platform data
+ */
+struct bcm63xx_pcmcia_platform_data {
+ unsigned int ready_gpio;
+};
+
+int bcm63xx_pcmcia_register(void);
+
+#endif /* BCM63XX_DEV_PCMCIA_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
new file mode 100644
index 000000000..0ab750522
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_DEV_SPI_H
+#define BCM63XX_DEV_SPI_H
+
+#include <linux/types.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_regs.h>
+
+int __init bcm63xx_spi_register(void);
+
+#endif /* BCM63XX_DEV_SPI_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h
new file mode 100644
index 000000000..88f8cf1c7
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_DEV_UART_H_
+#define BCM63XX_DEV_UART_H_
+
+int bcm63xx_uart_register(unsigned int id);
+
+#endif /* BCM63XX_DEV_UART_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_usb_usbd.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_usb_usbd.h
new file mode 100644
index 000000000..3f920baff
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_usb_usbd.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_DEV_USB_USBD_H_
+#define BCM63XX_DEV_USB_USBD_H_
+
+/*
+ * usb device platform data
+ */
+struct bcm63xx_usbd_platform_data {
+ /* board can only support full speed (USB 1.1) */
+ int use_fullspeed;
+
+ /* 0-based port index, for chips with >1 USB PHY */
+ int port_no;
+};
+
+int bcm63xx_usbd_register(const struct bcm63xx_usbd_platform_data *pd);
+
+#endif /* BCM63XX_DEV_USB_USBD_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
new file mode 100644
index 000000000..9212429d5
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_GPIO_H
+#define BCM63XX_GPIO_H
+
+#include <linux/init.h>
+#include <bcm63xx_cpu.h>
+
+int __init bcm63xx_gpio_init(void);
+
+static inline unsigned long bcm63xx_gpio_count(void)
+{
+ switch (bcm63xx_get_cpu_id()) {
+ case BCM6328_CPU_ID:
+ return 32;
+ case BCM3368_CPU_ID:
+ return 40;
+ case BCM6338_CPU_ID:
+ return 8;
+ case BCM6345_CPU_ID:
+ return 16;
+ case BCM6358_CPU_ID:
+ case BCM6368_CPU_ID:
+ return 38;
+ case BCM6362_CPU_ID:
+ return 48;
+ case BCM6348_CPU_ID:
+ default:
+ return 37;
+ }
+}
+
+#define BCM63XX_GPIO_DIR_OUT 0x0
+#define BCM63XX_GPIO_DIR_IN 0x1
+
+#endif /* !BCM63XX_GPIO_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h
new file mode 100644
index 000000000..31c692433
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_IO_H_
+#define BCM63XX_IO_H_
+
+#include <asm/mach-bcm63xx/bcm63xx_cpu.h>
+
+/*
+ * Physical memory map, RAM is mapped at 0x0.
+ *
+ * Note that size MUST be a power of two.
+ */
+#define BCM_PCMCIA_COMMON_BASE_PA (0x20000000)
+#define BCM_PCMCIA_COMMON_SIZE (16 * 1024 * 1024)
+#define BCM_PCMCIA_COMMON_END_PA (BCM_PCMCIA_COMMON_BASE_PA + \
+ BCM_PCMCIA_COMMON_SIZE - 1)
+
+#define BCM_PCMCIA_ATTR_BASE_PA (0x21000000)
+#define BCM_PCMCIA_ATTR_SIZE (16 * 1024 * 1024)
+#define BCM_PCMCIA_ATTR_END_PA (BCM_PCMCIA_ATTR_BASE_PA + \
+ BCM_PCMCIA_ATTR_SIZE - 1)
+
+#define BCM_PCMCIA_IO_BASE_PA (0x22000000)
+#define BCM_PCMCIA_IO_SIZE (64 * 1024)
+#define BCM_PCMCIA_IO_END_PA (BCM_PCMCIA_IO_BASE_PA + \
+ BCM_PCMCIA_IO_SIZE - 1)
+
+#define BCM_PCI_MEM_BASE_PA (0x30000000)
+#define BCM_PCI_MEM_SIZE (128 * 1024 * 1024)
+#define BCM_PCI_MEM_END_PA (BCM_PCI_MEM_BASE_PA + \
+ BCM_PCI_MEM_SIZE - 1)
+
+#define BCM_PCI_IO_BASE_PA (0x08000000)
+#define BCM_PCI_IO_SIZE (64 * 1024)
+#define BCM_PCI_IO_END_PA (BCM_PCI_IO_BASE_PA + \
+ BCM_PCI_IO_SIZE - 1)
+#define BCM_PCI_IO_HALF_PA (BCM_PCI_IO_BASE_PA + \
+ (BCM_PCI_IO_SIZE / 2) - 1)
+
+#define BCM_CB_MEM_BASE_PA (0x38000000)
+#define BCM_CB_MEM_SIZE (128 * 1024 * 1024)
+#define BCM_CB_MEM_END_PA (BCM_CB_MEM_BASE_PA + \
+ BCM_CB_MEM_SIZE - 1)
+
+#define BCM_PCIE_MEM_BASE_PA 0x10f00000
+#define BCM_PCIE_MEM_SIZE (16 * 1024 * 1024)
+#define BCM_PCIE_MEM_END_PA (BCM_PCIE_MEM_BASE_PA + \
+ BCM_PCIE_MEM_SIZE - 1)
+
+/*
+ * Internal registers are accessed through KSEG3
+ */
+#define BCM_REGS_VA(x) ((void __iomem *)(x))
+
+#define bcm_readb(a) (*(volatile unsigned char *) BCM_REGS_VA(a))
+#define bcm_readw(a) (*(volatile unsigned short *) BCM_REGS_VA(a))
+#define bcm_readl(a) (*(volatile unsigned int *) BCM_REGS_VA(a))
+#define bcm_readq(a) (*(volatile u64 *) BCM_REGS_VA(a))
+#define bcm_writeb(v, a) (*(volatile unsigned char *) BCM_REGS_VA((a)) = (v))
+#define bcm_writew(v, a) (*(volatile unsigned short *) BCM_REGS_VA((a)) = (v))
+#define bcm_writel(v, a) (*(volatile unsigned int *) BCM_REGS_VA((a)) = (v))
+#define bcm_writeq(v, a) (*(volatile u64 *) BCM_REGS_VA((a)) = (v))
+
+/*
+ * IO helpers to access register set for current CPU
+ */
+#define bcm_rset_readb(s, o) bcm_readb(bcm63xx_regset_address(s) + (o))
+#define bcm_rset_readw(s, o) bcm_readw(bcm63xx_regset_address(s) + (o))
+#define bcm_rset_readl(s, o) bcm_readl(bcm63xx_regset_address(s) + (o))
+#define bcm_rset_writeb(s, v, o) bcm_writeb((v), \
+ bcm63xx_regset_address(s) + (o))
+#define bcm_rset_writew(s, v, o) bcm_writew((v), \
+ bcm63xx_regset_address(s) + (o))
+#define bcm_rset_writel(s, v, o) bcm_writel((v), \
+ bcm63xx_regset_address(s) + (o))
+
+/*
+ * helpers for frequently used register sets
+ */
+#define bcm_perf_readl(o) bcm_rset_readl(RSET_PERF, (o))
+#define bcm_perf_writel(v, o) bcm_rset_writel(RSET_PERF, (v), (o))
+#define bcm_timer_readl(o) bcm_rset_readl(RSET_TIMER, (o))
+#define bcm_timer_writel(v, o) bcm_rset_writel(RSET_TIMER, (v), (o))
+#define bcm_wdt_readl(o) bcm_rset_readl(RSET_WDT, (o))
+#define bcm_wdt_writel(v, o) bcm_rset_writel(RSET_WDT, (v), (o))
+#define bcm_gpio_readl(o) bcm_rset_readl(RSET_GPIO, (o))
+#define bcm_gpio_writel(v, o) bcm_rset_writel(RSET_GPIO, (v), (o))
+#define bcm_uart0_readl(o) bcm_rset_readl(RSET_UART0, (o))
+#define bcm_uart0_writel(v, o) bcm_rset_writel(RSET_UART0, (v), (o))
+#define bcm_mpi_readl(o) bcm_rset_readl(RSET_MPI, (o))
+#define bcm_mpi_writel(v, o) bcm_rset_writel(RSET_MPI, (v), (o))
+#define bcm_pcmcia_readl(o) bcm_rset_readl(RSET_PCMCIA, (o))
+#define bcm_pcmcia_writel(v, o) bcm_rset_writel(RSET_PCMCIA, (v), (o))
+#define bcm_pcie_readl(o) bcm_rset_readl(RSET_PCIE, (o))
+#define bcm_pcie_writel(v, o) bcm_rset_writel(RSET_PCIE, (v), (o))
+#define bcm_sdram_readl(o) bcm_rset_readl(RSET_SDRAM, (o))
+#define bcm_sdram_writel(v, o) bcm_rset_writel(RSET_SDRAM, (v), (o))
+#define bcm_memc_readl(o) bcm_rset_readl(RSET_MEMC, (o))
+#define bcm_memc_writel(v, o) bcm_rset_writel(RSET_MEMC, (v), (o))
+#define bcm_ddr_readl(o) bcm_rset_readl(RSET_DDR, (o))
+#define bcm_ddr_writel(v, o) bcm_rset_writel(RSET_DDR, (v), (o))
+#define bcm_misc_readl(o) bcm_rset_readl(RSET_MISC, (o))
+#define bcm_misc_writel(v, o) bcm_rset_writel(RSET_MISC, (v), (o))
+
+#endif /* ! BCM63XX_IO_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_irq.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_irq.h
new file mode 100644
index 000000000..7887bc690
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_irq.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_IRQ_H_
+#define BCM63XX_IRQ_H_
+
+#include <bcm63xx_cpu.h>
+
+#define IRQ_INTERNAL_BASE 8
+#define IRQ_EXTERNAL_BASE 100
+#define IRQ_EXT_0 (IRQ_EXTERNAL_BASE + 0)
+#define IRQ_EXT_1 (IRQ_EXTERNAL_BASE + 1)
+#define IRQ_EXT_2 (IRQ_EXTERNAL_BASE + 2)
+#define IRQ_EXT_3 (IRQ_EXTERNAL_BASE + 3)
+
+#endif /* ! BCM63XX_IRQ_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_iudma.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_iudma.h
new file mode 100644
index 000000000..73df916e4
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_iudma.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_IUDMA_H_
+#define BCM63XX_IUDMA_H_
+
+#include <linux/types.h>
+
+/*
+ * rx/tx dma descriptor
+ */
+struct bcm_enet_desc {
+ u32 len_stat;
+ u32 address;
+};
+
+/* control */
+#define DMADESC_LENGTH_SHIFT 16
+#define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT)
+#define DMADESC_OWNER_MASK (1 << 15)
+#define DMADESC_EOP_MASK (1 << 14)
+#define DMADESC_SOP_MASK (1 << 13)
+#define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
+#define DMADESC_WRAP_MASK (1 << 12)
+#define DMADESC_USB_NOZERO_MASK (1 << 1)
+#define DMADESC_USB_ZERO_MASK (1 << 0)
+
+/* status */
+#define DMADESC_UNDER_MASK (1 << 9)
+#define DMADESC_APPEND_CRC (1 << 8)
+#define DMADESC_OVSIZE_MASK (1 << 4)
+#define DMADESC_RXER_MASK (1 << 2)
+#define DMADESC_CRC_MASK (1 << 1)
+#define DMADESC_OV_MASK (1 << 0)
+#define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \
+ DMADESC_OVSIZE_MASK | \
+ DMADESC_RXER_MASK | \
+ DMADESC_CRC_MASK | \
+ DMADESC_OV_MASK)
+
+#endif /* ! BCM63XX_IUDMA_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
new file mode 100644
index 000000000..f78d725f2
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_NVRAM_H
+#define BCM63XX_NVRAM_H
+
+#include <linux/types.h>
+
+/**
+ * bcm63xx_nvram_init() - initializes nvram
+ * @nvram: address of the nvram data
+ *
+ * Initialized the local nvram copy from the target address and checks
+ * its checksum.
+ */
+void bcm63xx_nvram_init(void *nvram);
+
+/**
+ * bcm63xx_nvram_get_name() - returns the board name according to nvram
+ *
+ * Returns the board name field from nvram. Note that it might not be
+ * null terminated if it is exactly 16 bytes long.
+ */
+u8 *bcm63xx_nvram_get_name(void);
+
+/**
+ * bcm63xx_nvram_get_mac_address() - register & return a new mac address
+ * @mac: pointer to array for allocated mac
+ *
+ * Registers and returns a mac address from the allocated macs from nvram.
+ *
+ * Returns 0 on success.
+ */
+int bcm63xx_nvram_get_mac_address(u8 *mac);
+
+int bcm63xx_nvram_get_psi_size(void);
+
+#endif /* BCM63XX_NVRAM_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
new file mode 100644
index 000000000..9ceb5e728
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -0,0 +1,1431 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_REGS_H_
+#define BCM63XX_REGS_H_
+
+/*************************************************************************
+ * _REG relative to RSET_PERF
+ *************************************************************************/
+
+/* Chip Identifier / Revision register */
+#define PERF_REV_REG 0x0
+#define REV_CHIPID_SHIFT 16
+#define REV_CHIPID_MASK (0xffff << REV_CHIPID_SHIFT)
+#define REV_REVID_SHIFT 0
+#define REV_REVID_MASK (0xff << REV_REVID_SHIFT)
+
+/* Clock Control register */
+#define PERF_CKCTL_REG 0x4
+
+#define CKCTL_3368_MAC_EN (1 << 3)
+#define CKCTL_3368_TC_EN (1 << 5)
+#define CKCTL_3368_US_TOP_EN (1 << 6)
+#define CKCTL_3368_DS_TOP_EN (1 << 7)
+#define CKCTL_3368_APM_EN (1 << 8)
+#define CKCTL_3368_SPI_EN (1 << 9)
+#define CKCTL_3368_USBS_EN (1 << 10)
+#define CKCTL_3368_BMU_EN (1 << 11)
+#define CKCTL_3368_PCM_EN (1 << 12)
+#define CKCTL_3368_NTP_EN (1 << 13)
+#define CKCTL_3368_ACP_B_EN (1 << 14)
+#define CKCTL_3368_ACP_A_EN (1 << 15)
+#define CKCTL_3368_EMUSB_EN (1 << 17)
+#define CKCTL_3368_ENET0_EN (1 << 18)
+#define CKCTL_3368_ENET1_EN (1 << 19)
+#define CKCTL_3368_USBU_EN (1 << 20)
+#define CKCTL_3368_EPHY_EN (1 << 21)
+
+#define CKCTL_3368_ALL_SAFE_EN (CKCTL_3368_MAC_EN | \
+ CKCTL_3368_TC_EN | \
+ CKCTL_3368_US_TOP_EN | \
+ CKCTL_3368_DS_TOP_EN | \
+ CKCTL_3368_APM_EN | \
+ CKCTL_3368_SPI_EN | \
+ CKCTL_3368_USBS_EN | \
+ CKCTL_3368_BMU_EN | \
+ CKCTL_3368_PCM_EN | \
+ CKCTL_3368_NTP_EN | \
+ CKCTL_3368_ACP_B_EN | \
+ CKCTL_3368_ACP_A_EN | \
+ CKCTL_3368_EMUSB_EN | \
+ CKCTL_3368_USBU_EN)
+
+#define CKCTL_6328_PHYMIPS_EN (1 << 0)
+#define CKCTL_6328_ADSL_QPROC_EN (1 << 1)
+#define CKCTL_6328_ADSL_AFE_EN (1 << 2)
+#define CKCTL_6328_ADSL_EN (1 << 3)
+#define CKCTL_6328_MIPS_EN (1 << 4)
+#define CKCTL_6328_SAR_EN (1 << 5)
+#define CKCTL_6328_PCM_EN (1 << 6)
+#define CKCTL_6328_USBD_EN (1 << 7)
+#define CKCTL_6328_USBH_EN (1 << 8)
+#define CKCTL_6328_HSSPI_EN (1 << 9)
+#define CKCTL_6328_PCIE_EN (1 << 10)
+#define CKCTL_6328_ROBOSW_EN (1 << 11)
+
+#define CKCTL_6328_ALL_SAFE_EN (CKCTL_6328_PHYMIPS_EN | \
+ CKCTL_6328_ADSL_QPROC_EN | \
+ CKCTL_6328_ADSL_AFE_EN | \
+ CKCTL_6328_ADSL_EN | \
+ CKCTL_6328_SAR_EN | \
+ CKCTL_6328_PCM_EN | \
+ CKCTL_6328_USBD_EN | \
+ CKCTL_6328_USBH_EN | \
+ CKCTL_6328_ROBOSW_EN | \
+ CKCTL_6328_PCIE_EN)
+
+#define CKCTL_6338_ADSLPHY_EN (1 << 0)
+#define CKCTL_6338_MPI_EN (1 << 1)
+#define CKCTL_6338_DRAM_EN (1 << 2)
+#define CKCTL_6338_ENET_EN (1 << 4)
+#define CKCTL_6338_USBS_EN (1 << 4)
+#define CKCTL_6338_SAR_EN (1 << 5)
+#define CKCTL_6338_SPI_EN (1 << 9)
+
+#define CKCTL_6338_ALL_SAFE_EN (CKCTL_6338_ADSLPHY_EN | \
+ CKCTL_6338_MPI_EN | \
+ CKCTL_6338_ENET_EN | \
+ CKCTL_6338_SAR_EN | \
+ CKCTL_6338_SPI_EN)
+
+/* BCM6345 clock bits are shifted by 16 on the left, because of the test
+ * control register which is 16-bits wide. That way we do not have any
+ * specific BCM6345 code for handling clocks, and writing 0 to the test
+ * control register is fine.
+ */
+#define CKCTL_6345_CPU_EN (1 << 16)
+#define CKCTL_6345_BUS_EN (1 << 17)
+#define CKCTL_6345_EBI_EN (1 << 18)
+#define CKCTL_6345_UART_EN (1 << 19)
+#define CKCTL_6345_ADSLPHY_EN (1 << 20)
+#define CKCTL_6345_ENET_EN (1 << 23)
+#define CKCTL_6345_USBH_EN (1 << 24)
+
+#define CKCTL_6345_ALL_SAFE_EN (CKCTL_6345_ENET_EN | \
+ CKCTL_6345_USBH_EN | \
+ CKCTL_6345_ADSLPHY_EN)
+
+#define CKCTL_6348_ADSLPHY_EN (1 << 0)
+#define CKCTL_6348_MPI_EN (1 << 1)
+#define CKCTL_6348_SDRAM_EN (1 << 2)
+#define CKCTL_6348_M2M_EN (1 << 3)
+#define CKCTL_6348_ENET_EN (1 << 4)
+#define CKCTL_6348_SAR_EN (1 << 5)
+#define CKCTL_6348_USBS_EN (1 << 6)
+#define CKCTL_6348_USBH_EN (1 << 8)
+#define CKCTL_6348_SPI_EN (1 << 9)
+
+#define CKCTL_6348_ALL_SAFE_EN (CKCTL_6348_ADSLPHY_EN | \
+ CKCTL_6348_M2M_EN | \
+ CKCTL_6348_ENET_EN | \
+ CKCTL_6348_SAR_EN | \
+ CKCTL_6348_USBS_EN | \
+ CKCTL_6348_USBH_EN | \
+ CKCTL_6348_SPI_EN)
+
+#define CKCTL_6358_ENET_EN (1 << 4)
+#define CKCTL_6358_ADSLPHY_EN (1 << 5)
+#define CKCTL_6358_PCM_EN (1 << 8)
+#define CKCTL_6358_SPI_EN (1 << 9)
+#define CKCTL_6358_USBS_EN (1 << 10)
+#define CKCTL_6358_SAR_EN (1 << 11)
+#define CKCTL_6358_EMUSB_EN (1 << 17)
+#define CKCTL_6358_ENET0_EN (1 << 18)
+#define CKCTL_6358_ENET1_EN (1 << 19)
+#define CKCTL_6358_USBSU_EN (1 << 20)
+#define CKCTL_6358_EPHY_EN (1 << 21)
+
+#define CKCTL_6358_ALL_SAFE_EN (CKCTL_6358_ENET_EN | \
+ CKCTL_6358_ADSLPHY_EN | \
+ CKCTL_6358_PCM_EN | \
+ CKCTL_6358_SPI_EN | \
+ CKCTL_6358_USBS_EN | \
+ CKCTL_6358_SAR_EN | \
+ CKCTL_6358_EMUSB_EN | \
+ CKCTL_6358_ENET0_EN | \
+ CKCTL_6358_ENET1_EN | \
+ CKCTL_6358_USBSU_EN | \
+ CKCTL_6358_EPHY_EN)
+
+#define CKCTL_6362_ADSL_QPROC_EN (1 << 1)
+#define CKCTL_6362_ADSL_AFE_EN (1 << 2)
+#define CKCTL_6362_ADSL_EN (1 << 3)
+#define CKCTL_6362_MIPS_EN (1 << 4)
+#define CKCTL_6362_WLAN_OCP_EN (1 << 5)
+#define CKCTL_6362_SWPKT_USB_EN (1 << 7)
+#define CKCTL_6362_SWPKT_SAR_EN (1 << 8)
+#define CKCTL_6362_SAR_EN (1 << 9)
+#define CKCTL_6362_ROBOSW_EN (1 << 10)
+#define CKCTL_6362_PCM_EN (1 << 11)
+#define CKCTL_6362_USBD_EN (1 << 12)
+#define CKCTL_6362_USBH_EN (1 << 13)
+#define CKCTL_6362_IPSEC_EN (1 << 14)
+#define CKCTL_6362_SPI_EN (1 << 15)
+#define CKCTL_6362_HSSPI_EN (1 << 16)
+#define CKCTL_6362_PCIE_EN (1 << 17)
+#define CKCTL_6362_FAP_EN (1 << 18)
+#define CKCTL_6362_PHYMIPS_EN (1 << 19)
+#define CKCTL_6362_NAND_EN (1 << 20)
+
+#define CKCTL_6362_ALL_SAFE_EN (CKCTL_6362_PHYMIPS_EN | \
+ CKCTL_6362_ADSL_QPROC_EN | \
+ CKCTL_6362_ADSL_AFE_EN | \
+ CKCTL_6362_ADSL_EN | \
+ CKCTL_6362_SAR_EN | \
+ CKCTL_6362_PCM_EN | \
+ CKCTL_6362_IPSEC_EN | \
+ CKCTL_6362_USBD_EN | \
+ CKCTL_6362_USBH_EN | \
+ CKCTL_6362_ROBOSW_EN | \
+ CKCTL_6362_PCIE_EN)
+
+
+#define CKCTL_6368_VDSL_QPROC_EN (1 << 2)
+#define CKCTL_6368_VDSL_AFE_EN (1 << 3)
+#define CKCTL_6368_VDSL_BONDING_EN (1 << 4)
+#define CKCTL_6368_VDSL_EN (1 << 5)
+#define CKCTL_6368_PHYMIPS_EN (1 << 6)
+#define CKCTL_6368_SWPKT_USB_EN (1 << 7)
+#define CKCTL_6368_SWPKT_SAR_EN (1 << 8)
+#define CKCTL_6368_SPI_EN (1 << 9)
+#define CKCTL_6368_USBD_EN (1 << 10)
+#define CKCTL_6368_SAR_EN (1 << 11)
+#define CKCTL_6368_ROBOSW_EN (1 << 12)
+#define CKCTL_6368_UTOPIA_EN (1 << 13)
+#define CKCTL_6368_PCM_EN (1 << 14)
+#define CKCTL_6368_USBH_EN (1 << 15)
+#define CKCTL_6368_DISABLE_GLESS_EN (1 << 16)
+#define CKCTL_6368_NAND_EN (1 << 17)
+#define CKCTL_6368_IPSEC_EN (1 << 18)
+
+#define CKCTL_6368_ALL_SAFE_EN (CKCTL_6368_SWPKT_USB_EN | \
+ CKCTL_6368_SWPKT_SAR_EN | \
+ CKCTL_6368_SPI_EN | \
+ CKCTL_6368_USBD_EN | \
+ CKCTL_6368_SAR_EN | \
+ CKCTL_6368_ROBOSW_EN | \
+ CKCTL_6368_UTOPIA_EN | \
+ CKCTL_6368_PCM_EN | \
+ CKCTL_6368_USBH_EN | \
+ CKCTL_6368_DISABLE_GLESS_EN | \
+ CKCTL_6368_NAND_EN | \
+ CKCTL_6368_IPSEC_EN)
+
+/* System PLL Control register */
+#define PERF_SYS_PLL_CTL_REG 0x8
+#define SYS_PLL_SOFT_RESET 0x1
+
+/* Interrupt Mask register */
+#define PERF_IRQMASK_3368_REG 0xc
+#define PERF_IRQMASK_6328_REG(x) (0x20 + (x) * 0x10)
+#define PERF_IRQMASK_6338_REG 0xc
+#define PERF_IRQMASK_6345_REG 0xc
+#define PERF_IRQMASK_6348_REG 0xc
+#define PERF_IRQMASK_6358_REG(x) (0xc + (x) * 0x2c)
+#define PERF_IRQMASK_6362_REG(x) (0x20 + (x) * 0x10)
+#define PERF_IRQMASK_6368_REG(x) (0x20 + (x) * 0x10)
+
+/* Interrupt Status register */
+#define PERF_IRQSTAT_3368_REG 0x10
+#define PERF_IRQSTAT_6328_REG(x) (0x28 + (x) * 0x10)
+#define PERF_IRQSTAT_6338_REG 0x10
+#define PERF_IRQSTAT_6345_REG 0x10
+#define PERF_IRQSTAT_6348_REG 0x10
+#define PERF_IRQSTAT_6358_REG(x) (0x10 + (x) * 0x2c)
+#define PERF_IRQSTAT_6362_REG(x) (0x28 + (x) * 0x10)
+#define PERF_IRQSTAT_6368_REG(x) (0x28 + (x) * 0x10)
+
+/* External Interrupt Configuration register */
+#define PERF_EXTIRQ_CFG_REG_3368 0x14
+#define PERF_EXTIRQ_CFG_REG_6328 0x18
+#define PERF_EXTIRQ_CFG_REG_6338 0x14
+#define PERF_EXTIRQ_CFG_REG_6345 0x14
+#define PERF_EXTIRQ_CFG_REG_6348 0x14
+#define PERF_EXTIRQ_CFG_REG_6358 0x14
+#define PERF_EXTIRQ_CFG_REG_6362 0x18
+#define PERF_EXTIRQ_CFG_REG_6368 0x18
+
+#define PERF_EXTIRQ_CFG_REG2_6368 0x1c
+
+/* for 6348 only */
+#define EXTIRQ_CFG_SENSE_6348(x) (1 << (x))
+#define EXTIRQ_CFG_STAT_6348(x) (1 << (x + 5))
+#define EXTIRQ_CFG_CLEAR_6348(x) (1 << (x + 10))
+#define EXTIRQ_CFG_MASK_6348(x) (1 << (x + 15))
+#define EXTIRQ_CFG_BOTHEDGE_6348(x) (1 << (x + 20))
+#define EXTIRQ_CFG_LEVELSENSE_6348(x) (1 << (x + 25))
+#define EXTIRQ_CFG_CLEAR_ALL_6348 (0xf << 10)
+#define EXTIRQ_CFG_MASK_ALL_6348 (0xf << 15)
+
+/* for all others */
+#define EXTIRQ_CFG_SENSE(x) (1 << (x))
+#define EXTIRQ_CFG_STAT(x) (1 << (x + 4))
+#define EXTIRQ_CFG_CLEAR(x) (1 << (x + 8))
+#define EXTIRQ_CFG_MASK(x) (1 << (x + 12))
+#define EXTIRQ_CFG_BOTHEDGE(x) (1 << (x + 16))
+#define EXTIRQ_CFG_LEVELSENSE(x) (1 << (x + 20))
+#define EXTIRQ_CFG_CLEAR_ALL (0xf << 8)
+#define EXTIRQ_CFG_MASK_ALL (0xf << 12)
+
+/* Soft Reset register */
+#define PERF_SOFTRESET_REG 0x28
+#define PERF_SOFTRESET_6328_REG 0x10
+#define PERF_SOFTRESET_6358_REG 0x34
+#define PERF_SOFTRESET_6362_REG 0x10
+#define PERF_SOFTRESET_6368_REG 0x10
+
+#define SOFTRESET_3368_SPI_MASK (1 << 0)
+#define SOFTRESET_3368_ENET_MASK (1 << 2)
+#define SOFTRESET_3368_MPI_MASK (1 << 3)
+#define SOFTRESET_3368_EPHY_MASK (1 << 6)
+#define SOFTRESET_3368_USBS_MASK (1 << 11)
+#define SOFTRESET_3368_PCM_MASK (1 << 13)
+
+#define SOFTRESET_6328_SPI_MASK (1 << 0)
+#define SOFTRESET_6328_EPHY_MASK (1 << 1)
+#define SOFTRESET_6328_SAR_MASK (1 << 2)
+#define SOFTRESET_6328_ENETSW_MASK (1 << 3)
+#define SOFTRESET_6328_USBS_MASK (1 << 4)
+#define SOFTRESET_6328_USBH_MASK (1 << 5)
+#define SOFTRESET_6328_PCM_MASK (1 << 6)
+#define SOFTRESET_6328_PCIE_CORE_MASK (1 << 7)
+#define SOFTRESET_6328_PCIE_MASK (1 << 8)
+#define SOFTRESET_6328_PCIE_EXT_MASK (1 << 9)
+#define SOFTRESET_6328_PCIE_HARD_MASK (1 << 10)
+
+#define SOFTRESET_6338_SPI_MASK (1 << 0)
+#define SOFTRESET_6338_ENET_MASK (1 << 2)
+#define SOFTRESET_6338_USBH_MASK (1 << 3)
+#define SOFTRESET_6338_USBS_MASK (1 << 4)
+#define SOFTRESET_6338_ADSL_MASK (1 << 5)
+#define SOFTRESET_6338_DMAMEM_MASK (1 << 6)
+#define SOFTRESET_6338_SAR_MASK (1 << 7)
+#define SOFTRESET_6338_ACLC_MASK (1 << 8)
+#define SOFTRESET_6338_ADSLMIPSPLL_MASK (1 << 10)
+#define SOFTRESET_6338_ALL (SOFTRESET_6338_SPI_MASK | \
+ SOFTRESET_6338_ENET_MASK | \
+ SOFTRESET_6338_USBH_MASK | \
+ SOFTRESET_6338_USBS_MASK | \
+ SOFTRESET_6338_ADSL_MASK | \
+ SOFTRESET_6338_DMAMEM_MASK | \
+ SOFTRESET_6338_SAR_MASK | \
+ SOFTRESET_6338_ACLC_MASK | \
+ SOFTRESET_6338_ADSLMIPSPLL_MASK)
+
+#define SOFTRESET_6348_SPI_MASK (1 << 0)
+#define SOFTRESET_6348_ENET_MASK (1 << 2)
+#define SOFTRESET_6348_USBH_MASK (1 << 3)
+#define SOFTRESET_6348_USBS_MASK (1 << 4)
+#define SOFTRESET_6348_ADSL_MASK (1 << 5)
+#define SOFTRESET_6348_DMAMEM_MASK (1 << 6)
+#define SOFTRESET_6348_SAR_MASK (1 << 7)
+#define SOFTRESET_6348_ACLC_MASK (1 << 8)
+#define SOFTRESET_6348_ADSLMIPSPLL_MASK (1 << 10)
+
+#define SOFTRESET_6348_ALL (SOFTRESET_6348_SPI_MASK | \
+ SOFTRESET_6348_ENET_MASK | \
+ SOFTRESET_6348_USBH_MASK | \
+ SOFTRESET_6348_USBS_MASK | \
+ SOFTRESET_6348_ADSL_MASK | \
+ SOFTRESET_6348_DMAMEM_MASK | \
+ SOFTRESET_6348_SAR_MASK | \
+ SOFTRESET_6348_ACLC_MASK | \
+ SOFTRESET_6348_ADSLMIPSPLL_MASK)
+
+#define SOFTRESET_6358_SPI_MASK (1 << 0)
+#define SOFTRESET_6358_ENET_MASK (1 << 2)
+#define SOFTRESET_6358_MPI_MASK (1 << 3)
+#define SOFTRESET_6358_EPHY_MASK (1 << 6)
+#define SOFTRESET_6358_SAR_MASK (1 << 7)
+#define SOFTRESET_6358_USBH_MASK (1 << 12)
+#define SOFTRESET_6358_PCM_MASK (1 << 13)
+#define SOFTRESET_6358_ADSL_MASK (1 << 14)
+
+#define SOFTRESET_6362_SPI_MASK (1 << 0)
+#define SOFTRESET_6362_IPSEC_MASK (1 << 1)
+#define SOFTRESET_6362_EPHY_MASK (1 << 2)
+#define SOFTRESET_6362_SAR_MASK (1 << 3)
+#define SOFTRESET_6362_ENETSW_MASK (1 << 4)
+#define SOFTRESET_6362_USBS_MASK (1 << 5)
+#define SOFTRESET_6362_USBH_MASK (1 << 6)
+#define SOFTRESET_6362_PCM_MASK (1 << 7)
+#define SOFTRESET_6362_PCIE_CORE_MASK (1 << 8)
+#define SOFTRESET_6362_PCIE_MASK (1 << 9)
+#define SOFTRESET_6362_PCIE_EXT_MASK (1 << 10)
+#define SOFTRESET_6362_WLAN_SHIM_MASK (1 << 11)
+#define SOFTRESET_6362_DDR_PHY_MASK (1 << 12)
+#define SOFTRESET_6362_FAP_MASK (1 << 13)
+#define SOFTRESET_6362_WLAN_UBUS_MASK (1 << 14)
+
+#define SOFTRESET_6368_SPI_MASK (1 << 0)
+#define SOFTRESET_6368_MPI_MASK (1 << 3)
+#define SOFTRESET_6368_EPHY_MASK (1 << 6)
+#define SOFTRESET_6368_SAR_MASK (1 << 7)
+#define SOFTRESET_6368_ENETSW_MASK (1 << 10)
+#define SOFTRESET_6368_USBS_MASK (1 << 11)
+#define SOFTRESET_6368_USBH_MASK (1 << 12)
+#define SOFTRESET_6368_PCM_MASK (1 << 13)
+
+/* MIPS PLL control register */
+#define PERF_MIPSPLLCTL_REG 0x34
+#define MIPSPLLCTL_N1_SHIFT 20
+#define MIPSPLLCTL_N1_MASK (0x7 << MIPSPLLCTL_N1_SHIFT)
+#define MIPSPLLCTL_N2_SHIFT 15
+#define MIPSPLLCTL_N2_MASK (0x1f << MIPSPLLCTL_N2_SHIFT)
+#define MIPSPLLCTL_M1REF_SHIFT 12
+#define MIPSPLLCTL_M1REF_MASK (0x7 << MIPSPLLCTL_M1REF_SHIFT)
+#define MIPSPLLCTL_M2REF_SHIFT 9
+#define MIPSPLLCTL_M2REF_MASK (0x7 << MIPSPLLCTL_M2REF_SHIFT)
+#define MIPSPLLCTL_M1CPU_SHIFT 6
+#define MIPSPLLCTL_M1CPU_MASK (0x7 << MIPSPLLCTL_M1CPU_SHIFT)
+#define MIPSPLLCTL_M1BUS_SHIFT 3
+#define MIPSPLLCTL_M1BUS_MASK (0x7 << MIPSPLLCTL_M1BUS_SHIFT)
+#define MIPSPLLCTL_M2BUS_SHIFT 0
+#define MIPSPLLCTL_M2BUS_MASK (0x7 << MIPSPLLCTL_M2BUS_SHIFT)
+
+/* ADSL PHY PLL Control register */
+#define PERF_ADSLPLLCTL_REG 0x38
+#define ADSLPLLCTL_N1_SHIFT 20
+#define ADSLPLLCTL_N1_MASK (0x7 << ADSLPLLCTL_N1_SHIFT)
+#define ADSLPLLCTL_N2_SHIFT 15
+#define ADSLPLLCTL_N2_MASK (0x1f << ADSLPLLCTL_N2_SHIFT)
+#define ADSLPLLCTL_M1REF_SHIFT 12
+#define ADSLPLLCTL_M1REF_MASK (0x7 << ADSLPLLCTL_M1REF_SHIFT)
+#define ADSLPLLCTL_M2REF_SHIFT 9
+#define ADSLPLLCTL_M2REF_MASK (0x7 << ADSLPLLCTL_M2REF_SHIFT)
+#define ADSLPLLCTL_M1CPU_SHIFT 6
+#define ADSLPLLCTL_M1CPU_MASK (0x7 << ADSLPLLCTL_M1CPU_SHIFT)
+#define ADSLPLLCTL_M1BUS_SHIFT 3
+#define ADSLPLLCTL_M1BUS_MASK (0x7 << ADSLPLLCTL_M1BUS_SHIFT)
+#define ADSLPLLCTL_M2BUS_SHIFT 0
+#define ADSLPLLCTL_M2BUS_MASK (0x7 << ADSLPLLCTL_M2BUS_SHIFT)
+
+#define ADSLPLLCTL_VAL(n1, n2, m1ref, m2ref, m1cpu, m1bus, m2bus) \
+ (((n1) << ADSLPLLCTL_N1_SHIFT) | \
+ ((n2) << ADSLPLLCTL_N2_SHIFT) | \
+ ((m1ref) << ADSLPLLCTL_M1REF_SHIFT) | \
+ ((m2ref) << ADSLPLLCTL_M2REF_SHIFT) | \
+ ((m1cpu) << ADSLPLLCTL_M1CPU_SHIFT) | \
+ ((m1bus) << ADSLPLLCTL_M1BUS_SHIFT) | \
+ ((m2bus) << ADSLPLLCTL_M2BUS_SHIFT))
+
+
+/*************************************************************************
+ * _REG relative to RSET_TIMER
+ *************************************************************************/
+
+#define BCM63XX_TIMER_COUNT 4
+#define TIMER_T0_ID 0
+#define TIMER_T1_ID 1
+#define TIMER_T2_ID 2
+#define TIMER_WDT_ID 3
+
+/* Timer irqstat register */
+#define TIMER_IRQSTAT_REG 0
+#define TIMER_IRQSTAT_TIMER_CAUSE(x) (1 << (x))
+#define TIMER_IRQSTAT_TIMER0_CAUSE (1 << 0)
+#define TIMER_IRQSTAT_TIMER1_CAUSE (1 << 1)
+#define TIMER_IRQSTAT_TIMER2_CAUSE (1 << 2)
+#define TIMER_IRQSTAT_WDT_CAUSE (1 << 3)
+#define TIMER_IRQSTAT_TIMER_IR_EN(x) (1 << ((x) + 8))
+#define TIMER_IRQSTAT_TIMER0_IR_EN (1 << 8)
+#define TIMER_IRQSTAT_TIMER1_IR_EN (1 << 9)
+#define TIMER_IRQSTAT_TIMER2_IR_EN (1 << 10)
+
+/* Timer control register */
+#define TIMER_CTLx_REG(x) (0x4 + (x * 4))
+#define TIMER_CTL0_REG 0x4
+#define TIMER_CTL1_REG 0x8
+#define TIMER_CTL2_REG 0xC
+#define TIMER_CTL_COUNTDOWN_MASK (0x3fffffff)
+#define TIMER_CTL_MONOTONIC_MASK (1 << 30)
+#define TIMER_CTL_ENABLE_MASK (1 << 31)
+
+
+/*************************************************************************
+ * _REG relative to RSET_WDT
+ *************************************************************************/
+
+/* Watchdog default count register */
+#define WDT_DEFVAL_REG 0x0
+
+/* Watchdog control register */
+#define WDT_CTL_REG 0x4
+
+/* Watchdog control register constants */
+#define WDT_START_1 (0xff00)
+#define WDT_START_2 (0x00ff)
+#define WDT_STOP_1 (0xee00)
+#define WDT_STOP_2 (0x00ee)
+
+/* Watchdog reset length register */
+#define WDT_RSTLEN_REG 0x8
+
+/* Watchdog soft reset register (BCM6328 only) */
+#define WDT_SOFTRESET_REG 0xc
+
+/*************************************************************************
+ * _REG relative to RSET_GPIO
+ *************************************************************************/
+
+/* GPIO registers */
+#define GPIO_CTL_HI_REG 0x0
+#define GPIO_CTL_LO_REG 0x4
+#define GPIO_DATA_HI_REG 0x8
+#define GPIO_DATA_LO_REG 0xC
+#define GPIO_DATA_LO_REG_6345 0x8
+
+/* GPIO mux registers and constants */
+#define GPIO_MODE_REG 0x18
+
+#define GPIO_MODE_6348_G4_DIAG 0x00090000
+#define GPIO_MODE_6348_G4_UTOPIA 0x00080000
+#define GPIO_MODE_6348_G4_LEGACY_LED 0x00030000
+#define GPIO_MODE_6348_G4_MII_SNOOP 0x00020000
+#define GPIO_MODE_6348_G4_EXT_EPHY 0x00010000
+#define GPIO_MODE_6348_G3_DIAG 0x00009000
+#define GPIO_MODE_6348_G3_UTOPIA 0x00008000
+#define GPIO_MODE_6348_G3_EXT_MII 0x00007000
+#define GPIO_MODE_6348_G2_DIAG 0x00000900
+#define GPIO_MODE_6348_G2_PCI 0x00000500
+#define GPIO_MODE_6348_G1_DIAG 0x00000090
+#define GPIO_MODE_6348_G1_UTOPIA 0x00000080
+#define GPIO_MODE_6348_G1_SPI_UART 0x00000060
+#define GPIO_MODE_6348_G1_SPI_MASTER 0x00000060
+#define GPIO_MODE_6348_G1_MII_PCCARD 0x00000040
+#define GPIO_MODE_6348_G1_MII_SNOOP 0x00000020
+#define GPIO_MODE_6348_G1_EXT_EPHY 0x00000010
+#define GPIO_MODE_6348_G0_DIAG 0x00000009
+#define GPIO_MODE_6348_G0_EXT_MII 0x00000007
+
+#define GPIO_MODE_6358_EXTRACS (1 << 5)
+#define GPIO_MODE_6358_UART1 (1 << 6)
+#define GPIO_MODE_6358_EXTRA_SPI_SS (1 << 7)
+#define GPIO_MODE_6358_SERIAL_LED (1 << 10)
+#define GPIO_MODE_6358_UTOPIA (1 << 12)
+
+#define GPIO_MODE_6368_ANALOG_AFE_0 (1 << 0)
+#define GPIO_MODE_6368_ANALOG_AFE_1 (1 << 1)
+#define GPIO_MODE_6368_SYS_IRQ (1 << 2)
+#define GPIO_MODE_6368_SERIAL_LED_DATA (1 << 3)
+#define GPIO_MODE_6368_SERIAL_LED_CLK (1 << 4)
+#define GPIO_MODE_6368_INET_LED (1 << 5)
+#define GPIO_MODE_6368_EPHY0_LED (1 << 6)
+#define GPIO_MODE_6368_EPHY1_LED (1 << 7)
+#define GPIO_MODE_6368_EPHY2_LED (1 << 8)
+#define GPIO_MODE_6368_EPHY3_LED (1 << 9)
+#define GPIO_MODE_6368_ROBOSW_LED_DAT (1 << 10)
+#define GPIO_MODE_6368_ROBOSW_LED_CLK (1 << 11)
+#define GPIO_MODE_6368_ROBOSW_LED0 (1 << 12)
+#define GPIO_MODE_6368_ROBOSW_LED1 (1 << 13)
+#define GPIO_MODE_6368_USBD_LED (1 << 14)
+#define GPIO_MODE_6368_NTR_PULSE (1 << 15)
+#define GPIO_MODE_6368_PCI_REQ1 (1 << 16)
+#define GPIO_MODE_6368_PCI_GNT1 (1 << 17)
+#define GPIO_MODE_6368_PCI_INTB (1 << 18)
+#define GPIO_MODE_6368_PCI_REQ0 (1 << 19)
+#define GPIO_MODE_6368_PCI_GNT0 (1 << 20)
+#define GPIO_MODE_6368_PCMCIA_CD1 (1 << 22)
+#define GPIO_MODE_6368_PCMCIA_CD2 (1 << 23)
+#define GPIO_MODE_6368_PCMCIA_VS1 (1 << 24)
+#define GPIO_MODE_6368_PCMCIA_VS2 (1 << 25)
+#define GPIO_MODE_6368_EBI_CS2 (1 << 26)
+#define GPIO_MODE_6368_EBI_CS3 (1 << 27)
+#define GPIO_MODE_6368_SPI_SSN2 (1 << 28)
+#define GPIO_MODE_6368_SPI_SSN3 (1 << 29)
+#define GPIO_MODE_6368_SPI_SSN4 (1 << 30)
+#define GPIO_MODE_6368_SPI_SSN5 (1 << 31)
+
+
+#define GPIO_PINMUX_OTHR_REG 0x24
+#define GPIO_PINMUX_OTHR_6328_USB_SHIFT 12
+#define GPIO_PINMUX_OTHR_6328_USB_MASK (3 << GPIO_PINMUX_OTHR_6328_USB_SHIFT)
+#define GPIO_PINMUX_OTHR_6328_USB_HOST (1 << GPIO_PINMUX_OTHR_6328_USB_SHIFT)
+#define GPIO_PINMUX_OTHR_6328_USB_DEV (2 << GPIO_PINMUX_OTHR_6328_USB_SHIFT)
+
+#define GPIO_BASEMODE_6368_REG 0x38
+#define GPIO_BASEMODE_6368_UART2 0x1
+#define GPIO_BASEMODE_6368_GPIO 0x0
+#define GPIO_BASEMODE_6368_MASK 0x7
+/* those bits must be kept as read in gpio basemode register*/
+
+#define GPIO_STRAPBUS_REG 0x40
+#define STRAPBUS_6358_BOOT_SEL_PARALLEL (1 << 1)
+#define STRAPBUS_6358_BOOT_SEL_SERIAL (0 << 1)
+#define STRAPBUS_6368_BOOT_SEL_MASK 0x3
+#define STRAPBUS_6368_BOOT_SEL_NAND 0
+#define STRAPBUS_6368_BOOT_SEL_SERIAL 1
+#define STRAPBUS_6368_BOOT_SEL_PARALLEL 3
+
+
+/*************************************************************************
+ * _REG relative to RSET_ENET
+ *************************************************************************/
+
+/* Receiver Configuration register */
+#define ENET_RXCFG_REG 0x0
+#define ENET_RXCFG_ALLMCAST_SHIFT 1
+#define ENET_RXCFG_ALLMCAST_MASK (1 << ENET_RXCFG_ALLMCAST_SHIFT)
+#define ENET_RXCFG_PROMISC_SHIFT 3
+#define ENET_RXCFG_PROMISC_MASK (1 << ENET_RXCFG_PROMISC_SHIFT)
+#define ENET_RXCFG_LOOPBACK_SHIFT 4
+#define ENET_RXCFG_LOOPBACK_MASK (1 << ENET_RXCFG_LOOPBACK_SHIFT)
+#define ENET_RXCFG_ENFLOW_SHIFT 5
+#define ENET_RXCFG_ENFLOW_MASK (1 << ENET_RXCFG_ENFLOW_SHIFT)
+
+/* Receive Maximum Length register */
+#define ENET_RXMAXLEN_REG 0x4
+#define ENET_RXMAXLEN_SHIFT 0
+#define ENET_RXMAXLEN_MASK (0x7ff << ENET_RXMAXLEN_SHIFT)
+
+/* Transmit Maximum Length register */
+#define ENET_TXMAXLEN_REG 0x8
+#define ENET_TXMAXLEN_SHIFT 0
+#define ENET_TXMAXLEN_MASK (0x7ff << ENET_TXMAXLEN_SHIFT)
+
+/* MII Status/Control register */
+#define ENET_MIISC_REG 0x10
+#define ENET_MIISC_MDCFREQDIV_SHIFT 0
+#define ENET_MIISC_MDCFREQDIV_MASK (0x7f << ENET_MIISC_MDCFREQDIV_SHIFT)
+#define ENET_MIISC_PREAMBLEEN_SHIFT 7
+#define ENET_MIISC_PREAMBLEEN_MASK (1 << ENET_MIISC_PREAMBLEEN_SHIFT)
+
+/* MII Data register */
+#define ENET_MIIDATA_REG 0x14
+#define ENET_MIIDATA_DATA_SHIFT 0
+#define ENET_MIIDATA_DATA_MASK (0xffff << ENET_MIIDATA_DATA_SHIFT)
+#define ENET_MIIDATA_TA_SHIFT 16
+#define ENET_MIIDATA_TA_MASK (0x3 << ENET_MIIDATA_TA_SHIFT)
+#define ENET_MIIDATA_REG_SHIFT 18
+#define ENET_MIIDATA_REG_MASK (0x1f << ENET_MIIDATA_REG_SHIFT)
+#define ENET_MIIDATA_PHYID_SHIFT 23
+#define ENET_MIIDATA_PHYID_MASK (0x1f << ENET_MIIDATA_PHYID_SHIFT)
+#define ENET_MIIDATA_OP_READ_MASK (0x6 << 28)
+#define ENET_MIIDATA_OP_WRITE_MASK (0x5 << 28)
+
+/* Ethernet Interrupt Mask register */
+#define ENET_IRMASK_REG 0x18
+
+/* Ethernet Interrupt register */
+#define ENET_IR_REG 0x1c
+#define ENET_IR_MII (1 << 0)
+#define ENET_IR_MIB (1 << 1)
+#define ENET_IR_FLOWC (1 << 2)
+
+/* Ethernet Control register */
+#define ENET_CTL_REG 0x2c
+#define ENET_CTL_ENABLE_SHIFT 0
+#define ENET_CTL_ENABLE_MASK (1 << ENET_CTL_ENABLE_SHIFT)
+#define ENET_CTL_DISABLE_SHIFT 1
+#define ENET_CTL_DISABLE_MASK (1 << ENET_CTL_DISABLE_SHIFT)
+#define ENET_CTL_SRESET_SHIFT 2
+#define ENET_CTL_SRESET_MASK (1 << ENET_CTL_SRESET_SHIFT)
+#define ENET_CTL_EPHYSEL_SHIFT 3
+#define ENET_CTL_EPHYSEL_MASK (1 << ENET_CTL_EPHYSEL_SHIFT)
+
+/* Transmit Control register */
+#define ENET_TXCTL_REG 0x30
+#define ENET_TXCTL_FD_SHIFT 0
+#define ENET_TXCTL_FD_MASK (1 << ENET_TXCTL_FD_SHIFT)
+
+/* Transmit Watermask register */
+#define ENET_TXWMARK_REG 0x34
+#define ENET_TXWMARK_WM_SHIFT 0
+#define ENET_TXWMARK_WM_MASK (0x3f << ENET_TXWMARK_WM_SHIFT)
+
+/* MIB Control register */
+#define ENET_MIBCTL_REG 0x38
+#define ENET_MIBCTL_RDCLEAR_SHIFT 0
+#define ENET_MIBCTL_RDCLEAR_MASK (1 << ENET_MIBCTL_RDCLEAR_SHIFT)
+
+/* Perfect Match Data Low register */
+#define ENET_PML_REG(x) (0x58 + (x) * 8)
+#define ENET_PMH_REG(x) (0x5c + (x) * 8)
+#define ENET_PMH_DATAVALID_SHIFT 16
+#define ENET_PMH_DATAVALID_MASK (1 << ENET_PMH_DATAVALID_SHIFT)
+
+/* MIB register */
+#define ENET_MIB_REG(x) (0x200 + (x) * 4)
+#define ENET_MIB_REG_COUNT 55
+
+
+/*************************************************************************
+ * _REG relative to RSET_ENETDMA
+ *************************************************************************/
+#define ENETDMA_CHAN_WIDTH 0x10
+#define ENETDMA_6345_CHAN_WIDTH 0x40
+
+/* Controller Configuration Register */
+#define ENETDMA_CFG_REG (0x0)
+#define ENETDMA_CFG_EN_SHIFT 0
+#define ENETDMA_CFG_EN_MASK (1 << ENETDMA_CFG_EN_SHIFT)
+#define ENETDMA_CFG_FLOWCH_MASK(x) (1 << ((x >> 1) + 1))
+
+/* Flow Control Descriptor Low Threshold register */
+#define ENETDMA_FLOWCL_REG(x) (0x4 + (x) * 6)
+
+/* Flow Control Descriptor High Threshold register */
+#define ENETDMA_FLOWCH_REG(x) (0x8 + (x) * 6)
+
+/* Flow Control Descriptor Buffer Alloca Threshold register */
+#define ENETDMA_BUFALLOC_REG(x) (0xc + (x) * 6)
+#define ENETDMA_BUFALLOC_FORCE_SHIFT 31
+#define ENETDMA_BUFALLOC_FORCE_MASK (1 << ENETDMA_BUFALLOC_FORCE_SHIFT)
+
+/* Global interrupt status */
+#define ENETDMA_GLB_IRQSTAT_REG (0x40)
+
+/* Global interrupt mask */
+#define ENETDMA_GLB_IRQMASK_REG (0x44)
+
+/* Channel Configuration register */
+#define ENETDMA_CHANCFG_REG(x) (0x100 + (x) * 0x10)
+#define ENETDMA_CHANCFG_EN_SHIFT 0
+#define ENETDMA_CHANCFG_EN_MASK (1 << ENETDMA_CHANCFG_EN_SHIFT)
+#define ENETDMA_CHANCFG_PKTHALT_SHIFT 1
+#define ENETDMA_CHANCFG_PKTHALT_MASK (1 << ENETDMA_CHANCFG_PKTHALT_SHIFT)
+
+/* Interrupt Control/Status register */
+#define ENETDMA_IR_REG(x) (0x104 + (x) * 0x10)
+#define ENETDMA_IR_BUFDONE_MASK (1 << 0)
+#define ENETDMA_IR_PKTDONE_MASK (1 << 1)
+#define ENETDMA_IR_NOTOWNER_MASK (1 << 2)
+
+/* Interrupt Mask register */
+#define ENETDMA_IRMASK_REG(x) (0x108 + (x) * 0x10)
+
+/* Maximum Burst Length */
+#define ENETDMA_MAXBURST_REG(x) (0x10C + (x) * 0x10)
+
+/* Ring Start Address register */
+#define ENETDMA_RSTART_REG(x) (0x200 + (x) * 0x10)
+
+/* State Ram Word 2 */
+#define ENETDMA_SRAM2_REG(x) (0x204 + (x) * 0x10)
+
+/* State Ram Word 3 */
+#define ENETDMA_SRAM3_REG(x) (0x208 + (x) * 0x10)
+
+/* State Ram Word 4 */
+#define ENETDMA_SRAM4_REG(x) (0x20c + (x) * 0x10)
+
+/* Broadcom 6345 ENET DMA definitions */
+#define ENETDMA_6345_CHANCFG_REG (0x00)
+
+#define ENETDMA_6345_MAXBURST_REG (0x04)
+
+#define ENETDMA_6345_RSTART_REG (0x08)
+
+#define ENETDMA_6345_LEN_REG (0x0C)
+
+#define ENETDMA_6345_IR_REG (0x14)
+
+#define ENETDMA_6345_IRMASK_REG (0x18)
+
+#define ENETDMA_6345_FC_REG (0x1C)
+
+#define ENETDMA_6345_BUFALLOC_REG (0x20)
+
+/* Shift down for EOP, SOP and WRAP bits */
+#define ENETDMA_6345_DESC_SHIFT (3)
+
+/*************************************************************************
+ * _REG relative to RSET_ENETDMAC
+ *************************************************************************/
+
+/* Channel Configuration register */
+#define ENETDMAC_CHANCFG_REG (0x0)
+#define ENETDMAC_CHANCFG_EN_SHIFT 0
+#define ENETDMAC_CHANCFG_EN_MASK (1 << ENETDMAC_CHANCFG_EN_SHIFT)
+#define ENETDMAC_CHANCFG_PKTHALT_SHIFT 1
+#define ENETDMAC_CHANCFG_PKTHALT_MASK (1 << ENETDMAC_CHANCFG_PKTHALT_SHIFT)
+#define ENETDMAC_CHANCFG_BUFHALT_SHIFT 2
+#define ENETDMAC_CHANCFG_BUFHALT_MASK (1 << ENETDMAC_CHANCFG_BUFHALT_SHIFT)
+#define ENETDMAC_CHANCFG_CHAINING_SHIFT 2
+#define ENETDMAC_CHANCFG_CHAINING_MASK (1 << ENETDMAC_CHANCFG_CHAINING_SHIFT)
+#define ENETDMAC_CHANCFG_WRAP_EN_SHIFT 3
+#define ENETDMAC_CHANCFG_WRAP_EN_MASK (1 << ENETDMAC_CHANCFG_WRAP_EN_SHIFT)
+#define ENETDMAC_CHANCFG_FLOWC_EN_SHIFT 4
+#define ENETDMAC_CHANCFG_FLOWC_EN_MASK (1 << ENETDMAC_CHANCFG_FLOWC_EN_SHIFT)
+
+/* Interrupt Control/Status register */
+#define ENETDMAC_IR_REG (0x4)
+#define ENETDMAC_IR_BUFDONE_MASK (1 << 0)
+#define ENETDMAC_IR_PKTDONE_MASK (1 << 1)
+#define ENETDMAC_IR_NOTOWNER_MASK (1 << 2)
+
+/* Interrupt Mask register */
+#define ENETDMAC_IRMASK_REG (0x8)
+
+/* Maximum Burst Length */
+#define ENETDMAC_MAXBURST_REG (0xc)
+
+
+/*************************************************************************
+ * _REG relative to RSET_ENETDMAS
+ *************************************************************************/
+
+/* Ring Start Address register */
+#define ENETDMAS_RSTART_REG (0x0)
+
+/* State Ram Word 2 */
+#define ENETDMAS_SRAM2_REG (0x4)
+
+/* State Ram Word 3 */
+#define ENETDMAS_SRAM3_REG (0x8)
+
+/* State Ram Word 4 */
+#define ENETDMAS_SRAM4_REG (0xc)
+
+
+/*************************************************************************
+ * _REG relative to RSET_ENETSW
+ *************************************************************************/
+
+/* Port traffic control */
+#define ENETSW_PTCTRL_REG(x) (0x0 + (x))
+#define ENETSW_PTCTRL_RXDIS_MASK (1 << 0)
+#define ENETSW_PTCTRL_TXDIS_MASK (1 << 1)
+
+/* Switch mode register */
+#define ENETSW_SWMODE_REG (0xb)
+#define ENETSW_SWMODE_FWD_EN_MASK (1 << 1)
+
+/* IMP override Register */
+#define ENETSW_IMPOV_REG (0xe)
+#define ENETSW_IMPOV_FORCE_MASK (1 << 7)
+#define ENETSW_IMPOV_TXFLOW_MASK (1 << 5)
+#define ENETSW_IMPOV_RXFLOW_MASK (1 << 4)
+#define ENETSW_IMPOV_1000_MASK (1 << 3)
+#define ENETSW_IMPOV_100_MASK (1 << 2)
+#define ENETSW_IMPOV_FDX_MASK (1 << 1)
+#define ENETSW_IMPOV_LINKUP_MASK (1 << 0)
+
+/* Port override Register */
+#define ENETSW_PORTOV_REG(x) (0x58 + (x))
+#define ENETSW_PORTOV_ENABLE_MASK (1 << 6)
+#define ENETSW_PORTOV_TXFLOW_MASK (1 << 5)
+#define ENETSW_PORTOV_RXFLOW_MASK (1 << 4)
+#define ENETSW_PORTOV_1000_MASK (1 << 3)
+#define ENETSW_PORTOV_100_MASK (1 << 2)
+#define ENETSW_PORTOV_FDX_MASK (1 << 1)
+#define ENETSW_PORTOV_LINKUP_MASK (1 << 0)
+
+/* MDIO control register */
+#define ENETSW_MDIOC_REG (0xb0)
+#define ENETSW_MDIOC_EXT_MASK (1 << 16)
+#define ENETSW_MDIOC_REG_SHIFT 20
+#define ENETSW_MDIOC_PHYID_SHIFT 25
+#define ENETSW_MDIOC_RD_MASK (1 << 30)
+#define ENETSW_MDIOC_WR_MASK (1 << 31)
+
+/* MDIO data register */
+#define ENETSW_MDIOD_REG (0xb4)
+
+/* Global Management Configuration Register */
+#define ENETSW_GMCR_REG (0x200)
+#define ENETSW_GMCR_RST_MIB_MASK (1 << 0)
+
+/* MIB register */
+#define ENETSW_MIB_REG(x) (0x2800 + (x) * 4)
+#define ENETSW_MIB_REG_COUNT 47
+
+/* Jumbo control register port mask register */
+#define ENETSW_JMBCTL_PORT_REG (0x4004)
+
+/* Jumbo control mib good frame register */
+#define ENETSW_JMBCTL_MAXSIZE_REG (0x4008)
+
+
+/*************************************************************************
+ * _REG relative to RSET_OHCI_PRIV
+ *************************************************************************/
+
+#define OHCI_PRIV_REG 0x0
+#define OHCI_PRIV_PORT1_HOST_SHIFT 0
+#define OHCI_PRIV_PORT1_HOST_MASK (1 << OHCI_PRIV_PORT1_HOST_SHIFT)
+#define OHCI_PRIV_REG_SWAP_SHIFT 3
+#define OHCI_PRIV_REG_SWAP_MASK (1 << OHCI_PRIV_REG_SWAP_SHIFT)
+
+
+/*************************************************************************
+ * _REG relative to RSET_USBH_PRIV
+ *************************************************************************/
+
+#define USBH_PRIV_SWAP_6358_REG 0x0
+#define USBH_PRIV_SWAP_6368_REG 0x1c
+
+#define USBH_PRIV_SWAP_USBD_SHIFT 6
+#define USBH_PRIV_SWAP_USBD_MASK (1 << USBH_PRIV_SWAP_USBD_SHIFT)
+#define USBH_PRIV_SWAP_EHCI_ENDN_SHIFT 4
+#define USBH_PRIV_SWAP_EHCI_ENDN_MASK (1 << USBH_PRIV_SWAP_EHCI_ENDN_SHIFT)
+#define USBH_PRIV_SWAP_EHCI_DATA_SHIFT 3
+#define USBH_PRIV_SWAP_EHCI_DATA_MASK (1 << USBH_PRIV_SWAP_EHCI_DATA_SHIFT)
+#define USBH_PRIV_SWAP_OHCI_ENDN_SHIFT 1
+#define USBH_PRIV_SWAP_OHCI_ENDN_MASK (1 << USBH_PRIV_SWAP_OHCI_ENDN_SHIFT)
+#define USBH_PRIV_SWAP_OHCI_DATA_SHIFT 0
+#define USBH_PRIV_SWAP_OHCI_DATA_MASK (1 << USBH_PRIV_SWAP_OHCI_DATA_SHIFT)
+
+#define USBH_PRIV_UTMI_CTL_6368_REG 0x10
+#define USBH_PRIV_UTMI_CTL_NODRIV_SHIFT 12
+#define USBH_PRIV_UTMI_CTL_NODRIV_MASK (0xf << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT)
+#define USBH_PRIV_UTMI_CTL_HOSTB_SHIFT 0
+#define USBH_PRIV_UTMI_CTL_HOSTB_MASK (0xf << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT)
+
+#define USBH_PRIV_TEST_6358_REG 0x24
+#define USBH_PRIV_TEST_6368_REG 0x14
+
+#define USBH_PRIV_SETUP_6368_REG 0x28
+#define USBH_PRIV_SETUP_IOC_SHIFT 4
+#define USBH_PRIV_SETUP_IOC_MASK (1 << USBH_PRIV_SETUP_IOC_SHIFT)
+
+
+/*************************************************************************
+ * _REG relative to RSET_USBD
+ *************************************************************************/
+
+/* General control */
+#define USBD_CONTROL_REG 0x00
+#define USBD_CONTROL_TXZLENINS_SHIFT 14
+#define USBD_CONTROL_TXZLENINS_MASK (1 << USBD_CONTROL_TXZLENINS_SHIFT)
+#define USBD_CONTROL_AUTO_CSRS_SHIFT 13
+#define USBD_CONTROL_AUTO_CSRS_MASK (1 << USBD_CONTROL_AUTO_CSRS_SHIFT)
+#define USBD_CONTROL_RXZSCFG_SHIFT 12
+#define USBD_CONTROL_RXZSCFG_MASK (1 << USBD_CONTROL_RXZSCFG_SHIFT)
+#define USBD_CONTROL_INIT_SEL_SHIFT 8
+#define USBD_CONTROL_INIT_SEL_MASK (0xf << USBD_CONTROL_INIT_SEL_SHIFT)
+#define USBD_CONTROL_FIFO_RESET_SHIFT 6
+#define USBD_CONTROL_FIFO_RESET_MASK (3 << USBD_CONTROL_FIFO_RESET_SHIFT)
+#define USBD_CONTROL_SETUPERRLOCK_SHIFT 5
+#define USBD_CONTROL_SETUPERRLOCK_MASK (1 << USBD_CONTROL_SETUPERRLOCK_SHIFT)
+#define USBD_CONTROL_DONE_CSRS_SHIFT 0
+#define USBD_CONTROL_DONE_CSRS_MASK (1 << USBD_CONTROL_DONE_CSRS_SHIFT)
+
+/* Strap options */
+#define USBD_STRAPS_REG 0x04
+#define USBD_STRAPS_APP_SELF_PWR_SHIFT 10
+#define USBD_STRAPS_APP_SELF_PWR_MASK (1 << USBD_STRAPS_APP_SELF_PWR_SHIFT)
+#define USBD_STRAPS_APP_DISCON_SHIFT 9
+#define USBD_STRAPS_APP_DISCON_MASK (1 << USBD_STRAPS_APP_DISCON_SHIFT)
+#define USBD_STRAPS_APP_CSRPRGSUP_SHIFT 8
+#define USBD_STRAPS_APP_CSRPRGSUP_MASK (1 << USBD_STRAPS_APP_CSRPRGSUP_SHIFT)
+#define USBD_STRAPS_APP_RMTWKUP_SHIFT 6
+#define USBD_STRAPS_APP_RMTWKUP_MASK (1 << USBD_STRAPS_APP_RMTWKUP_SHIFT)
+#define USBD_STRAPS_APP_RAM_IF_SHIFT 7
+#define USBD_STRAPS_APP_RAM_IF_MASK (1 << USBD_STRAPS_APP_RAM_IF_SHIFT)
+#define USBD_STRAPS_APP_8BITPHY_SHIFT 2
+#define USBD_STRAPS_APP_8BITPHY_MASK (1 << USBD_STRAPS_APP_8BITPHY_SHIFT)
+#define USBD_STRAPS_SPEED_SHIFT 0
+#define USBD_STRAPS_SPEED_MASK (3 << USBD_STRAPS_SPEED_SHIFT)
+
+/* Stall control */
+#define USBD_STALL_REG 0x08
+#define USBD_STALL_UPDATE_SHIFT 7
+#define USBD_STALL_UPDATE_MASK (1 << USBD_STALL_UPDATE_SHIFT)
+#define USBD_STALL_ENABLE_SHIFT 6
+#define USBD_STALL_ENABLE_MASK (1 << USBD_STALL_ENABLE_SHIFT)
+#define USBD_STALL_EPNUM_SHIFT 0
+#define USBD_STALL_EPNUM_MASK (0xf << USBD_STALL_EPNUM_SHIFT)
+
+/* General status */
+#define USBD_STATUS_REG 0x0c
+#define USBD_STATUS_SOF_SHIFT 16
+#define USBD_STATUS_SOF_MASK (0x7ff << USBD_STATUS_SOF_SHIFT)
+#define USBD_STATUS_SPD_SHIFT 12
+#define USBD_STATUS_SPD_MASK (3 << USBD_STATUS_SPD_SHIFT)
+#define USBD_STATUS_ALTINTF_SHIFT 8
+#define USBD_STATUS_ALTINTF_MASK (0xf << USBD_STATUS_ALTINTF_SHIFT)
+#define USBD_STATUS_INTF_SHIFT 4
+#define USBD_STATUS_INTF_MASK (0xf << USBD_STATUS_INTF_SHIFT)
+#define USBD_STATUS_CFG_SHIFT 0
+#define USBD_STATUS_CFG_MASK (0xf << USBD_STATUS_CFG_SHIFT)
+
+/* Other events */
+#define USBD_EVENTS_REG 0x10
+#define USBD_EVENTS_USB_LINK_SHIFT 10
+#define USBD_EVENTS_USB_LINK_MASK (1 << USBD_EVENTS_USB_LINK_SHIFT)
+
+/* IRQ status */
+#define USBD_EVENT_IRQ_STATUS_REG 0x14
+
+/* IRQ level (2 bits per IRQ event) */
+#define USBD_EVENT_IRQ_CFG_HI_REG 0x18
+
+#define USBD_EVENT_IRQ_CFG_LO_REG 0x1c
+
+#define USBD_EVENT_IRQ_CFG_SHIFT(x) ((x & 0xf) << 1)
+#define USBD_EVENT_IRQ_CFG_MASK(x) (3 << USBD_EVENT_IRQ_CFG_SHIFT(x))
+#define USBD_EVENT_IRQ_CFG_RISING(x) (0 << USBD_EVENT_IRQ_CFG_SHIFT(x))
+#define USBD_EVENT_IRQ_CFG_FALLING(x) (1 << USBD_EVENT_IRQ_CFG_SHIFT(x))
+
+/* IRQ mask (1=unmasked) */
+#define USBD_EVENT_IRQ_MASK_REG 0x20
+
+/* IRQ bits */
+#define USBD_EVENT_IRQ_USB_LINK 10
+#define USBD_EVENT_IRQ_SETCFG 9
+#define USBD_EVENT_IRQ_SETINTF 8
+#define USBD_EVENT_IRQ_ERRATIC_ERR 7
+#define USBD_EVENT_IRQ_SET_CSRS 6
+#define USBD_EVENT_IRQ_SUSPEND 5
+#define USBD_EVENT_IRQ_EARLY_SUSPEND 4
+#define USBD_EVENT_IRQ_SOF 3
+#define USBD_EVENT_IRQ_ENUM_ON 2
+#define USBD_EVENT_IRQ_SETUP 1
+#define USBD_EVENT_IRQ_USB_RESET 0
+
+/* TX FIFO partitioning */
+#define USBD_TXFIFO_CONFIG_REG 0x40
+#define USBD_TXFIFO_CONFIG_END_SHIFT 16
+#define USBD_TXFIFO_CONFIG_END_MASK (0xff << USBD_TXFIFO_CONFIG_END_SHIFT)
+#define USBD_TXFIFO_CONFIG_START_SHIFT 0
+#define USBD_TXFIFO_CONFIG_START_MASK (0xff << USBD_TXFIFO_CONFIG_START_SHIFT)
+
+/* RX FIFO partitioning */
+#define USBD_RXFIFO_CONFIG_REG 0x44
+#define USBD_RXFIFO_CONFIG_END_SHIFT 16
+#define USBD_RXFIFO_CONFIG_END_MASK (0xff << USBD_TXFIFO_CONFIG_END_SHIFT)
+#define USBD_RXFIFO_CONFIG_START_SHIFT 0
+#define USBD_RXFIFO_CONFIG_START_MASK (0xff << USBD_TXFIFO_CONFIG_START_SHIFT)
+
+/* TX FIFO/endpoint configuration */
+#define USBD_TXFIFO_EPSIZE_REG 0x48
+
+/* RX FIFO/endpoint configuration */
+#define USBD_RXFIFO_EPSIZE_REG 0x4c
+
+/* Endpoint<->DMA mappings */
+#define USBD_EPNUM_TYPEMAP_REG 0x50
+#define USBD_EPNUM_TYPEMAP_TYPE_SHIFT 8
+#define USBD_EPNUM_TYPEMAP_TYPE_MASK (0x3 << USBD_EPNUM_TYPEMAP_TYPE_SHIFT)
+#define USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT 0
+#define USBD_EPNUM_TYPEMAP_DMA_CH_MASK (0xf << USBD_EPNUM_TYPEMAP_DMACH_SHIFT)
+
+/* Misc per-endpoint settings */
+#define USBD_CSR_SETUPADDR_REG 0x80
+#define USBD_CSR_SETUPADDR_DEF 0xb550
+
+#define USBD_CSR_EP_REG(x) (0x84 + (x) * 4)
+#define USBD_CSR_EP_MAXPKT_SHIFT 19
+#define USBD_CSR_EP_MAXPKT_MASK (0x7ff << USBD_CSR_EP_MAXPKT_SHIFT)
+#define USBD_CSR_EP_ALTIFACE_SHIFT 15
+#define USBD_CSR_EP_ALTIFACE_MASK (0xf << USBD_CSR_EP_ALTIFACE_SHIFT)
+#define USBD_CSR_EP_IFACE_SHIFT 11
+#define USBD_CSR_EP_IFACE_MASK (0xf << USBD_CSR_EP_IFACE_SHIFT)
+#define USBD_CSR_EP_CFG_SHIFT 7
+#define USBD_CSR_EP_CFG_MASK (0xf << USBD_CSR_EP_CFG_SHIFT)
+#define USBD_CSR_EP_TYPE_SHIFT 5
+#define USBD_CSR_EP_TYPE_MASK (3 << USBD_CSR_EP_TYPE_SHIFT)
+#define USBD_CSR_EP_DIR_SHIFT 4
+#define USBD_CSR_EP_DIR_MASK (1 << USBD_CSR_EP_DIR_SHIFT)
+#define USBD_CSR_EP_LOG_SHIFT 0
+#define USBD_CSR_EP_LOG_MASK (0xf << USBD_CSR_EP_LOG_SHIFT)
+
+
+/*************************************************************************
+ * _REG relative to RSET_MPI
+ *************************************************************************/
+
+/* well known (hard wired) chip select */
+#define MPI_CS_PCMCIA_COMMON 4
+#define MPI_CS_PCMCIA_ATTR 5
+#define MPI_CS_PCMCIA_IO 6
+
+/* Chip select base register */
+#define MPI_CSBASE_REG(x) (0x0 + (x) * 8)
+#define MPI_CSBASE_BASE_SHIFT 13
+#define MPI_CSBASE_BASE_MASK (0x1ffff << MPI_CSBASE_BASE_SHIFT)
+#define MPI_CSBASE_SIZE_SHIFT 0
+#define MPI_CSBASE_SIZE_MASK (0xf << MPI_CSBASE_SIZE_SHIFT)
+
+#define MPI_CSBASE_SIZE_8K 0
+#define MPI_CSBASE_SIZE_16K 1
+#define MPI_CSBASE_SIZE_32K 2
+#define MPI_CSBASE_SIZE_64K 3
+#define MPI_CSBASE_SIZE_128K 4
+#define MPI_CSBASE_SIZE_256K 5
+#define MPI_CSBASE_SIZE_512K 6
+#define MPI_CSBASE_SIZE_1M 7
+#define MPI_CSBASE_SIZE_2M 8
+#define MPI_CSBASE_SIZE_4M 9
+#define MPI_CSBASE_SIZE_8M 10
+#define MPI_CSBASE_SIZE_16M 11
+#define MPI_CSBASE_SIZE_32M 12
+#define MPI_CSBASE_SIZE_64M 13
+#define MPI_CSBASE_SIZE_128M 14
+#define MPI_CSBASE_SIZE_256M 15
+
+/* Chip select control register */
+#define MPI_CSCTL_REG(x) (0x4 + (x) * 8)
+#define MPI_CSCTL_ENABLE_MASK (1 << 0)
+#define MPI_CSCTL_WAIT_SHIFT 1
+#define MPI_CSCTL_WAIT_MASK (0x7 << MPI_CSCTL_WAIT_SHIFT)
+#define MPI_CSCTL_DATA16_MASK (1 << 4)
+#define MPI_CSCTL_SYNCMODE_MASK (1 << 7)
+#define MPI_CSCTL_TSIZE_MASK (1 << 8)
+#define MPI_CSCTL_ENDIANSWAP_MASK (1 << 10)
+#define MPI_CSCTL_SETUP_SHIFT 16
+#define MPI_CSCTL_SETUP_MASK (0xf << MPI_CSCTL_SETUP_SHIFT)
+#define MPI_CSCTL_HOLD_SHIFT 20
+#define MPI_CSCTL_HOLD_MASK (0xf << MPI_CSCTL_HOLD_SHIFT)
+
+/* PCI registers */
+#define MPI_SP0_RANGE_REG 0x100
+#define MPI_SP0_REMAP_REG 0x104
+#define MPI_SP0_REMAP_ENABLE_MASK (1 << 0)
+#define MPI_SP1_RANGE_REG 0x10C
+#define MPI_SP1_REMAP_REG 0x110
+#define MPI_SP1_REMAP_ENABLE_MASK (1 << 0)
+
+#define MPI_L2PCFG_REG 0x11C
+#define MPI_L2PCFG_CFG_TYPE_SHIFT 0
+#define MPI_L2PCFG_CFG_TYPE_MASK (0x3 << MPI_L2PCFG_CFG_TYPE_SHIFT)
+#define MPI_L2PCFG_REG_SHIFT 2
+#define MPI_L2PCFG_REG_MASK (0x3f << MPI_L2PCFG_REG_SHIFT)
+#define MPI_L2PCFG_FUNC_SHIFT 8
+#define MPI_L2PCFG_FUNC_MASK (0x7 << MPI_L2PCFG_FUNC_SHIFT)
+#define MPI_L2PCFG_DEVNUM_SHIFT 11
+#define MPI_L2PCFG_DEVNUM_MASK (0x1f << MPI_L2PCFG_DEVNUM_SHIFT)
+#define MPI_L2PCFG_CFG_USEREG_MASK (1 << 30)
+#define MPI_L2PCFG_CFG_SEL_MASK (1 << 31)
+
+#define MPI_L2PMEMRANGE1_REG 0x120
+#define MPI_L2PMEMBASE1_REG 0x124
+#define MPI_L2PMEMREMAP1_REG 0x128
+#define MPI_L2PMEMRANGE2_REG 0x12C
+#define MPI_L2PMEMBASE2_REG 0x130
+#define MPI_L2PMEMREMAP2_REG 0x134
+#define MPI_L2PIORANGE_REG 0x138
+#define MPI_L2PIOBASE_REG 0x13C
+#define MPI_L2PIOREMAP_REG 0x140
+#define MPI_L2P_BASE_MASK (0xffff8000)
+#define MPI_L2PREMAP_ENABLED_MASK (1 << 0)
+#define MPI_L2PREMAP_IS_CARDBUS_MASK (1 << 2)
+
+#define MPI_PCIMODESEL_REG 0x144
+#define MPI_PCIMODESEL_BAR1_NOSWAP_MASK (1 << 0)
+#define MPI_PCIMODESEL_BAR2_NOSWAP_MASK (1 << 1)
+#define MPI_PCIMODESEL_EXT_ARB_MASK (1 << 2)
+#define MPI_PCIMODESEL_PREFETCH_SHIFT 4
+#define MPI_PCIMODESEL_PREFETCH_MASK (0xf << MPI_PCIMODESEL_PREFETCH_SHIFT)
+
+#define MPI_LOCBUSCTL_REG 0x14C
+#define MPI_LOCBUSCTL_EN_PCI_GPIO_MASK (1 << 0)
+#define MPI_LOCBUSCTL_U2P_NOSWAP_MASK (1 << 1)
+
+#define MPI_LOCINT_REG 0x150
+#define MPI_LOCINT_MASK(x) (1 << (x + 16))
+#define MPI_LOCINT_STAT(x) (1 << (x))
+#define MPI_LOCINT_DIR_FAILED 6
+#define MPI_LOCINT_EXT_PCI_INT 7
+#define MPI_LOCINT_SERR 8
+#define MPI_LOCINT_CSERR 9
+
+#define MPI_PCICFGCTL_REG 0x178
+#define MPI_PCICFGCTL_CFGADDR_SHIFT 2
+#define MPI_PCICFGCTL_CFGADDR_MASK (0x1f << MPI_PCICFGCTL_CFGADDR_SHIFT)
+#define MPI_PCICFGCTL_WRITEEN_MASK (1 << 7)
+
+#define MPI_PCICFGDATA_REG 0x17C
+
+/* PCI host bridge custom register */
+#define BCMPCI_REG_TIMERS 0x40
+#define REG_TIMER_TRDY_SHIFT 0
+#define REG_TIMER_TRDY_MASK (0xff << REG_TIMER_TRDY_SHIFT)
+#define REG_TIMER_RETRY_SHIFT 8
+#define REG_TIMER_RETRY_MASK (0xff << REG_TIMER_RETRY_SHIFT)
+
+
+/*************************************************************************
+ * _REG relative to RSET_PCMCIA
+ *************************************************************************/
+
+#define PCMCIA_C1_REG 0x0
+#define PCMCIA_C1_CD1_MASK (1 << 0)
+#define PCMCIA_C1_CD2_MASK (1 << 1)
+#define PCMCIA_C1_VS1_MASK (1 << 2)
+#define PCMCIA_C1_VS2_MASK (1 << 3)
+#define PCMCIA_C1_VS1OE_MASK (1 << 6)
+#define PCMCIA_C1_VS2OE_MASK (1 << 7)
+#define PCMCIA_C1_CBIDSEL_SHIFT (8)
+#define PCMCIA_C1_CBIDSEL_MASK (0x1f << PCMCIA_C1_CBIDSEL_SHIFT)
+#define PCMCIA_C1_EN_PCMCIA_GPIO_MASK (1 << 13)
+#define PCMCIA_C1_EN_PCMCIA_MASK (1 << 14)
+#define PCMCIA_C1_EN_CARDBUS_MASK (1 << 15)
+#define PCMCIA_C1_RESET_MASK (1 << 18)
+
+#define PCMCIA_C2_REG 0x8
+#define PCMCIA_C2_DATA16_MASK (1 << 0)
+#define PCMCIA_C2_BYTESWAP_MASK (1 << 1)
+#define PCMCIA_C2_RWCOUNT_SHIFT 2
+#define PCMCIA_C2_RWCOUNT_MASK (0x3f << PCMCIA_C2_RWCOUNT_SHIFT)
+#define PCMCIA_C2_INACTIVE_SHIFT 8
+#define PCMCIA_C2_INACTIVE_MASK (0x3f << PCMCIA_C2_INACTIVE_SHIFT)
+#define PCMCIA_C2_SETUP_SHIFT 16
+#define PCMCIA_C2_SETUP_MASK (0x3f << PCMCIA_C2_SETUP_SHIFT)
+#define PCMCIA_C2_HOLD_SHIFT 24
+#define PCMCIA_C2_HOLD_MASK (0x3f << PCMCIA_C2_HOLD_SHIFT)
+
+
+/*************************************************************************
+ * _REG relative to RSET_SDRAM
+ *************************************************************************/
+
+#define SDRAM_CFG_REG 0x0
+#define SDRAM_CFG_ROW_SHIFT 4
+#define SDRAM_CFG_ROW_MASK (0x3 << SDRAM_CFG_ROW_SHIFT)
+#define SDRAM_CFG_COL_SHIFT 6
+#define SDRAM_CFG_COL_MASK (0x3 << SDRAM_CFG_COL_SHIFT)
+#define SDRAM_CFG_32B_SHIFT 10
+#define SDRAM_CFG_32B_MASK (1 << SDRAM_CFG_32B_SHIFT)
+#define SDRAM_CFG_BANK_SHIFT 13
+#define SDRAM_CFG_BANK_MASK (1 << SDRAM_CFG_BANK_SHIFT)
+
+#define SDRAM_MBASE_REG 0xc
+
+#define SDRAM_PRIO_REG 0x2C
+#define SDRAM_PRIO_MIPS_SHIFT 29
+#define SDRAM_PRIO_MIPS_MASK (1 << SDRAM_PRIO_MIPS_SHIFT)
+#define SDRAM_PRIO_ADSL_SHIFT 30
+#define SDRAM_PRIO_ADSL_MASK (1 << SDRAM_PRIO_ADSL_SHIFT)
+#define SDRAM_PRIO_EN_SHIFT 31
+#define SDRAM_PRIO_EN_MASK (1 << SDRAM_PRIO_EN_SHIFT)
+
+
+/*************************************************************************
+ * _REG relative to RSET_MEMC
+ *************************************************************************/
+
+#define MEMC_CFG_REG 0x4
+#define MEMC_CFG_32B_SHIFT 1
+#define MEMC_CFG_32B_MASK (1 << MEMC_CFG_32B_SHIFT)
+#define MEMC_CFG_COL_SHIFT 3
+#define MEMC_CFG_COL_MASK (0x3 << MEMC_CFG_COL_SHIFT)
+#define MEMC_CFG_ROW_SHIFT 6
+#define MEMC_CFG_ROW_MASK (0x3 << MEMC_CFG_ROW_SHIFT)
+
+
+/*************************************************************************
+ * _REG relative to RSET_DDR
+ *************************************************************************/
+
+#define DDR_CSEND_REG 0x8
+
+#define DDR_DMIPSPLLCFG_REG 0x18
+#define DMIPSPLLCFG_M1_SHIFT 0
+#define DMIPSPLLCFG_M1_MASK (0xff << DMIPSPLLCFG_M1_SHIFT)
+#define DMIPSPLLCFG_N1_SHIFT 23
+#define DMIPSPLLCFG_N1_MASK (0x3f << DMIPSPLLCFG_N1_SHIFT)
+#define DMIPSPLLCFG_N2_SHIFT 29
+#define DMIPSPLLCFG_N2_MASK (0x7 << DMIPSPLLCFG_N2_SHIFT)
+
+#define DDR_DMIPSPLLCFG_6368_REG 0x20
+#define DMIPSPLLCFG_6368_P1_SHIFT 0
+#define DMIPSPLLCFG_6368_P1_MASK (0xf << DMIPSPLLCFG_6368_P1_SHIFT)
+#define DMIPSPLLCFG_6368_P2_SHIFT 4
+#define DMIPSPLLCFG_6368_P2_MASK (0xf << DMIPSPLLCFG_6368_P2_SHIFT)
+#define DMIPSPLLCFG_6368_NDIV_SHIFT 16
+#define DMIPSPLLCFG_6368_NDIV_MASK (0x1ff << DMIPSPLLCFG_6368_NDIV_SHIFT)
+
+#define DDR_DMIPSPLLDIV_6368_REG 0x24
+#define DMIPSPLLDIV_6368_MDIV_SHIFT 0
+#define DMIPSPLLDIV_6368_MDIV_MASK (0xff << DMIPSPLLDIV_6368_MDIV_SHIFT)
+
+
+/*************************************************************************
+ * _REG relative to RSET_M2M
+ *************************************************************************/
+
+#define M2M_RX 0
+#define M2M_TX 1
+
+#define M2M_SRC_REG(x) ((x) * 0x40 + 0x00)
+#define M2M_DST_REG(x) ((x) * 0x40 + 0x04)
+#define M2M_SIZE_REG(x) ((x) * 0x40 + 0x08)
+
+#define M2M_CTRL_REG(x) ((x) * 0x40 + 0x0c)
+#define M2M_CTRL_ENABLE_MASK (1 << 0)
+#define M2M_CTRL_IRQEN_MASK (1 << 1)
+#define M2M_CTRL_ERROR_CLR_MASK (1 << 6)
+#define M2M_CTRL_DONE_CLR_MASK (1 << 7)
+#define M2M_CTRL_NOINC_MASK (1 << 8)
+#define M2M_CTRL_PCMCIASWAP_MASK (1 << 9)
+#define M2M_CTRL_SWAPBYTE_MASK (1 << 10)
+#define M2M_CTRL_ENDIAN_MASK (1 << 11)
+
+#define M2M_STAT_REG(x) ((x) * 0x40 + 0x10)
+#define M2M_STAT_DONE (1 << 0)
+#define M2M_STAT_ERROR (1 << 1)
+
+#define M2M_SRCID_REG(x) ((x) * 0x40 + 0x14)
+#define M2M_DSTID_REG(x) ((x) * 0x40 + 0x18)
+
+/*************************************************************************
+ * _REG relative to RSET_SPI
+ *************************************************************************/
+
+/* BCM 6338/6348 SPI core */
+#define SPI_6348_CMD 0x00 /* 16-bits register */
+#define SPI_6348_INT_STATUS 0x02
+#define SPI_6348_INT_MASK_ST 0x03
+#define SPI_6348_INT_MASK 0x04
+#define SPI_6348_ST 0x05
+#define SPI_6348_CLK_CFG 0x06
+#define SPI_6348_FILL_BYTE 0x07
+#define SPI_6348_MSG_TAIL 0x09
+#define SPI_6348_RX_TAIL 0x0b
+#define SPI_6348_MSG_CTL 0x40 /* 8-bits register */
+#define SPI_6348_MSG_CTL_WIDTH 8
+#define SPI_6348_MSG_DATA 0x41
+#define SPI_6348_MSG_DATA_SIZE 0x3f
+#define SPI_6348_RX_DATA 0x80
+#define SPI_6348_RX_DATA_SIZE 0x3f
+
+/* BCM 3368/6358/6262/6368 SPI core */
+#define SPI_6358_MSG_CTL 0x00 /* 16-bits register */
+#define SPI_6358_MSG_CTL_WIDTH 16
+#define SPI_6358_MSG_DATA 0x02
+#define SPI_6358_MSG_DATA_SIZE 0x21e
+#define SPI_6358_RX_DATA 0x400
+#define SPI_6358_RX_DATA_SIZE 0x220
+#define SPI_6358_CMD 0x700 /* 16-bits register */
+#define SPI_6358_INT_STATUS 0x702
+#define SPI_6358_INT_MASK_ST 0x703
+#define SPI_6358_INT_MASK 0x704
+#define SPI_6358_ST 0x705
+#define SPI_6358_CLK_CFG 0x706
+#define SPI_6358_FILL_BYTE 0x707
+#define SPI_6358_MSG_TAIL 0x709
+#define SPI_6358_RX_TAIL 0x70B
+
+/* Shared SPI definitions */
+
+/* Message configuration */
+#define SPI_FD_RW 0x00
+#define SPI_HD_W 0x01
+#define SPI_HD_R 0x02
+#define SPI_BYTE_CNT_SHIFT 0
+#define SPI_6348_MSG_TYPE_SHIFT 6
+#define SPI_6358_MSG_TYPE_SHIFT 14
+
+/* Command */
+#define SPI_CMD_NOOP 0x00
+#define SPI_CMD_SOFT_RESET 0x01
+#define SPI_CMD_HARD_RESET 0x02
+#define SPI_CMD_START_IMMEDIATE 0x03
+#define SPI_CMD_COMMAND_SHIFT 0
+#define SPI_CMD_COMMAND_MASK 0x000f
+#define SPI_CMD_DEVICE_ID_SHIFT 4
+#define SPI_CMD_PREPEND_BYTE_CNT_SHIFT 8
+#define SPI_CMD_ONE_BYTE_SHIFT 11
+#define SPI_CMD_ONE_WIRE_SHIFT 12
+#define SPI_DEV_ID_0 0
+#define SPI_DEV_ID_1 1
+#define SPI_DEV_ID_2 2
+#define SPI_DEV_ID_3 3
+
+/* Interrupt mask */
+#define SPI_INTR_CMD_DONE 0x01
+#define SPI_INTR_RX_OVERFLOW 0x02
+#define SPI_INTR_TX_UNDERFLOW 0x04
+#define SPI_INTR_TX_OVERFLOW 0x08
+#define SPI_INTR_RX_UNDERFLOW 0x10
+#define SPI_INTR_CLEAR_ALL 0x1f
+
+/* Status */
+#define SPI_RX_EMPTY 0x02
+#define SPI_CMD_BUSY 0x04
+#define SPI_SERIAL_BUSY 0x08
+
+/* Clock configuration */
+#define SPI_CLK_20MHZ 0x00
+#define SPI_CLK_0_391MHZ 0x01
+#define SPI_CLK_0_781MHZ 0x02 /* default */
+#define SPI_CLK_1_563MHZ 0x03
+#define SPI_CLK_3_125MHZ 0x04
+#define SPI_CLK_6_250MHZ 0x05
+#define SPI_CLK_12_50MHZ 0x06
+#define SPI_CLK_MASK 0x07
+#define SPI_SSOFFTIME_MASK 0x38
+#define SPI_SSOFFTIME_SHIFT 3
+#define SPI_BYTE_SWAP 0x80
+
+/*************************************************************************
+ * _REG relative to RSET_MISC
+ *************************************************************************/
+#define MISC_SERDES_CTRL_6328_REG 0x0
+#define MISC_SERDES_CTRL_6362_REG 0x4
+#define SERDES_PCIE_EN (1 << 0)
+#define SERDES_PCIE_EXD_EN (1 << 15)
+
+#define MISC_STRAPBUS_6362_REG 0x14
+#define STRAPBUS_6362_FCVO_SHIFT 1
+#define STRAPBUS_6362_HSSPI_CLK_FAST (1 << 13)
+#define STRAPBUS_6362_FCVO_MASK (0x1f << STRAPBUS_6362_FCVO_SHIFT)
+#define STRAPBUS_6362_BOOT_SEL_SERIAL (1 << 15)
+#define STRAPBUS_6362_BOOT_SEL_NAND (0 << 15)
+
+#define MISC_STRAPBUS_6328_REG 0x240
+#define STRAPBUS_6328_FCVO_SHIFT 7
+#define STRAPBUS_6328_FCVO_MASK (0x1f << STRAPBUS_6328_FCVO_SHIFT)
+#define STRAPBUS_6328_BOOT_SEL_SERIAL (1 << 18)
+#define STRAPBUS_6328_BOOT_SEL_NAND (0 << 18)
+
+/*************************************************************************
+ * _REG relative to RSET_PCIE
+ *************************************************************************/
+
+#define PCIE_CONFIG2_REG 0x408
+#define CONFIG2_BAR1_SIZE_EN 1
+#define CONFIG2_BAR1_SIZE_MASK 0xf
+
+#define PCIE_IDVAL3_REG 0x43c
+#define IDVAL3_CLASS_CODE_MASK 0xffffff
+#define IDVAL3_SUBCLASS_SHIFT 8
+#define IDVAL3_CLASS_SHIFT 16
+
+#define PCIE_DLSTATUS_REG 0x1048
+#define DLSTATUS_PHYLINKUP (1 << 13)
+
+#define PCIE_BRIDGE_OPT1_REG 0x2820
+#define OPT1_RD_BE_OPT_EN (1 << 7)
+#define OPT1_RD_REPLY_BE_FIX_EN (1 << 9)
+#define OPT1_PCIE_BRIDGE_HOLE_DET_EN (1 << 11)
+#define OPT1_L1_INT_STATUS_MASK_POL (1 << 12)
+
+#define PCIE_BRIDGE_OPT2_REG 0x2824
+#define OPT2_UBUS_UR_DECODE_DIS (1 << 2)
+#define OPT2_TX_CREDIT_CHK_EN (1 << 4)
+#define OPT2_CFG_TYPE1_BD_SEL (1 << 7)
+#define OPT2_CFG_TYPE1_BUS_NO_SHIFT 16
+#define OPT2_CFG_TYPE1_BUS_NO_MASK (0xff << OPT2_CFG_TYPE1_BUS_NO_SHIFT)
+
+#define PCIE_BRIDGE_BAR0_BASEMASK_REG 0x2828
+#define PCIE_BRIDGE_BAR1_BASEMASK_REG 0x2830
+#define BASEMASK_REMAP_EN (1 << 0)
+#define BASEMASK_SWAP_EN (1 << 1)
+#define BASEMASK_MASK_SHIFT 4
+#define BASEMASK_MASK_MASK (0xfff << BASEMASK_MASK_SHIFT)
+#define BASEMASK_BASE_SHIFT 20
+#define BASEMASK_BASE_MASK (0xfff << BASEMASK_BASE_SHIFT)
+
+#define PCIE_BRIDGE_BAR0_REBASE_ADDR_REG 0x282c
+#define PCIE_BRIDGE_BAR1_REBASE_ADDR_REG 0x2834
+#define REBASE_ADDR_BASE_SHIFT 20
+#define REBASE_ADDR_BASE_MASK (0xfff << REBASE_ADDR_BASE_SHIFT)
+
+#define PCIE_BRIDGE_RC_INT_MASK_REG 0x2854
+#define PCIE_RC_INT_A (1 << 0)
+#define PCIE_RC_INT_B (1 << 1)
+#define PCIE_RC_INT_C (1 << 2)
+#define PCIE_RC_INT_D (1 << 3)
+
+#define PCIE_DEVICE_OFFSET 0x8000
+
+/*************************************************************************
+ * _REG relative to RSET_OTP
+ *************************************************************************/
+
+#define OTP_USER_BITS_6328_REG(i) (0x20 + (i) * 4)
+#define OTP_6328_REG3_TP1_DISABLED BIT(9)
+
+#endif /* BCM63XX_REGS_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_reset.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_reset.h
new file mode 100644
index 000000000..2c0645b7d
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_reset.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BCM63XX_RESET_H
+#define __BCM63XX_RESET_H
+
+enum bcm63xx_core_reset {
+ BCM63XX_RESET_SPI,
+ BCM63XX_RESET_ENET,
+ BCM63XX_RESET_USBH,
+ BCM63XX_RESET_USBD,
+ BCM63XX_RESET_SAR,
+ BCM63XX_RESET_DSL,
+ BCM63XX_RESET_EPHY,
+ BCM63XX_RESET_ENETSW,
+ BCM63XX_RESET_PCM,
+ BCM63XX_RESET_MPI,
+ BCM63XX_RESET_PCIE,
+ BCM63XX_RESET_PCIE_EXT,
+};
+
+void bcm63xx_core_set_reset(enum bcm63xx_core_reset, int reset);
+
+#endif
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_timer.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_timer.h
new file mode 100644
index 000000000..bcbece793
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_timer.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_TIMER_H_
+#define BCM63XX_TIMER_H_
+
+int bcm63xx_timer_register(int id, void (*callback)(void *data), void *data);
+void bcm63xx_timer_unregister(int id);
+int bcm63xx_timer_set(int id, int monotonic, unsigned int countdown_us);
+int bcm63xx_timer_enable(int id);
+int bcm63xx_timer_disable(int id);
+unsigned int bcm63xx_timer_countdown(unsigned int countdown_us);
+
+#endif /* !BCM63XX_TIMER_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h
new file mode 100644
index 000000000..830f53f28
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BOARD_BCM963XX_H_
+#define BOARD_BCM963XX_H_
+
+#include <linux/types.h>
+#include <linux/gpio.h>
+#include <linux/leds.h>
+#include <bcm63xx_dev_enet.h>
+#include <bcm63xx_dev_usb_usbd.h>
+
+/*
+ * flash mapping
+ */
+#define BCM963XX_CFE_VERSION_OFFSET 0x570
+#define BCM963XX_NVRAM_OFFSET 0x580
+
+/*
+ * board definition
+ */
+struct board_info {
+ u8 name[16];
+ unsigned int expected_cpu_id;
+
+ /* enabled feature/device */
+ unsigned int has_enet0:1;
+ unsigned int has_enet1:1;
+ unsigned int has_enetsw:1;
+ unsigned int has_pci:1;
+ unsigned int has_pccard:1;
+ unsigned int has_ohci0:1;
+ unsigned int has_ehci0:1;
+ unsigned int has_usbd:1;
+ unsigned int has_uart0:1;
+ unsigned int has_uart1:1;
+
+ /* ethernet config */
+ struct bcm63xx_enet_platform_data enet0;
+ struct bcm63xx_enet_platform_data enet1;
+ struct bcm63xx_enetsw_platform_data enetsw;
+
+ /* USB config */
+ struct bcm63xx_usbd_platform_data usbd;
+
+ /* GPIO LEDs */
+ struct gpio_led leds[5];
+
+ /* External PHY reset GPIO */
+ unsigned int ephy_reset_gpio;
+
+ /* External PHY reset GPIO flags from gpio.h */
+ unsigned long ephy_reset_gpio_flags;
+};
+
+#endif /* ! BOARD_BCM963XX_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h
new file mode 100644
index 000000000..0ebecbdb9
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_BCM963XX_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_BCM963XX_CPU_FEATURE_OVERRIDES_H
+
+#include <bcm63xx_cpu.h>
+
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_4k_cache 1
+#define cpu_has_fpu 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 1
+#define cpu_has_watch 0
+#define cpu_has_divec 1
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_prefetch 1
+#define cpu_has_mcheck 1
+#define cpu_has_ejtag 1
+#define cpu_has_llsc 1
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+#define cpu_has_vtag_icache 0
+
+#if !defined(CONFIG_SYS_HAS_CPU_BMIPS4350)
+#define cpu_has_dc_aliases 0
+#endif
+
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_pindexed_dcache 0
+
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+
+#define cpu_has_nofpuex 0
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+
+#define cpu_dcache_line_size() 16
+#define cpu_icache_line_size() 16
+#define cpu_scache_line_size() 0
+
+#endif /* __ASM_MACH_BCM963XX_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/ioremap.h b/arch/mips/include/asm/mach-bcm63xx/ioremap.h
new file mode 100644
index 000000000..73f31825b
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/ioremap.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCM63XX_IOREMAP_H_
+#define BCM63XX_IOREMAP_H_
+
+#include <bcm63xx_cpu.h>
+
+static inline int is_bcm63xx_internal_registers(phys_addr_t offset)
+{
+ switch (bcm63xx_get_cpu_id()) {
+ case BCM3368_CPU_ID:
+ if (offset >= 0xfff80000)
+ return 1;
+ break;
+ case BCM6338_CPU_ID:
+ case BCM6345_CPU_ID:
+ case BCM6348_CPU_ID:
+ case BCM6358_CPU_ID:
+ if (offset >= 0xfff00000)
+ return 1;
+ break;
+ case BCM6328_CPU_ID:
+ case BCM6362_CPU_ID:
+ case BCM6368_CPU_ID:
+ if (offset >= 0xb0000000 && offset < 0xb1000000)
+ return 1;
+ break;
+ }
+ return 0;
+}
+
+static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
+ unsigned long flags)
+{
+ if (is_bcm63xx_internal_registers(offset))
+ return (void __iomem *)offset;
+ return NULL;
+}
+
+static inline int plat_iounmap(const volatile void __iomem *addr)
+{
+ return is_bcm63xx_internal_registers((unsigned long)addr);
+}
+
+#endif /* BCM63XX_IOREMAP_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/irq.h b/arch/mips/include/asm/mach-bcm63xx/irq.h
new file mode 100644
index 000000000..b016f0615
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/irq.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_BCM63XX_IRQ_H
+#define __ASM_MACH_BCM63XX_IRQ_H
+
+#define NR_IRQS 128
+#define MIPS_CPU_IRQ_BASE 0
+
+#endif
diff --git a/arch/mips/include/asm/mach-bcm63xx/spaces.h b/arch/mips/include/asm/mach-bcm63xx/spaces.h
new file mode 100644
index 000000000..1410ed0da
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/spaces.h
@@ -0,0 +1,17 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_BCM63XX_SPACES_H
+#define _ASM_BCM63XX_SPACES_H
+
+#include <asm/bmips-spaces.h>
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* __ASM_BCM63XX_SPACES_H */
diff --git a/arch/mips/include/asm/mach-bmips/cpu-feature-overrides.h b/arch/mips/include/asm/mach-bmips/cpu-feature-overrides.h
new file mode 100644
index 000000000..68a219d80
--- /dev/null
+++ b/arch/mips/include/asm/mach-bmips/cpu-feature-overrides.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_BMIPS_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_BMIPS_CPU_FEATURE_OVERRIDES_H
+
+/* Invariants across all BMIPS processors */
+#define cpu_has_vtag_icache 0
+#define cpu_icache_snoops_remote_store 1
+
+/* Processor ISA compatibility is MIPS32R1 */
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#endif /* __ASM_MACH_BMIPS_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-bmips/ioremap.h b/arch/mips/include/asm/mach-bmips/ioremap.h
new file mode 100644
index 000000000..63b4af991
--- /dev/null
+++ b/arch/mips/include/asm/mach-bmips/ioremap.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_BMIPS_IOREMAP_H
+#define __ASM_MACH_BMIPS_IOREMAP_H
+
+#include <linux/types.h>
+
+static inline int is_bmips_internal_registers(phys_addr_t offset)
+{
+ if (offset >= 0xfff80000)
+ return 1;
+
+ return 0;
+}
+
+static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
+ unsigned long flags)
+{
+ if (is_bmips_internal_registers(offset))
+ return (void __iomem *)offset;
+
+ return NULL;
+}
+
+static inline int plat_iounmap(const volatile void __iomem *addr)
+{
+ return is_bmips_internal_registers((unsigned long)addr);
+}
+
+#endif /* __ASM_MACH_BMIPS_IOREMAP_H */
diff --git a/arch/mips/include/asm/mach-bmips/spaces.h b/arch/mips/include/asm/mach-bmips/spaces.h
new file mode 100644
index 000000000..c59b28fd9
--- /dev/null
+++ b/arch/mips/include/asm/mach-bmips/spaces.h
@@ -0,0 +1,18 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_BMIPS_SPACES_H
+#define _ASM_BMIPS_SPACES_H
+
+/* Avoid collisions with system base register (SBR) region on BMIPS3300 */
+#include <asm/bmips-spaces.h>
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* __ASM_BMIPS_SPACES_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
new file mode 100644
index 000000000..513270c8a
--- /dev/null
+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
@@ -0,0 +1,80 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 Cavium Networks
+ */
+#ifndef __ASM_MACH_CAVIUM_OCTEON_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_CAVIUM_OCTEON_CPU_FEATURE_OVERRIDES_H
+
+#include <linux/types.h>
+#include <asm/mipsregs.h>
+
+/*
+ * Cavium Octeons are MIPS64v2 processors
+ */
+#define cpu_dcache_line_size() 128
+#define cpu_icache_line_size() 128
+
+
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 0
+#define cpu_has_tx39_cache 0
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_divec 1
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_prefetch 1
+
+#define cpu_has_llsc 1
+/*
+ * We Disable LL/SC on non SMP systems as it is faster to disable
+ * interrupts for atomic access than a LL/SC.
+ */
+#ifdef CONFIG_SMP
+# define kernel_uses_llsc 1
+#else
+# define kernel_uses_llsc 0
+#endif
+#define cpu_has_vtag_icache 1
+#define cpu_has_dc_aliases 0
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_64bits 1
+#define cpu_has_octeon_cache 1
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 1
+#define cpu_has_mips64r1 1
+#define cpu_has_mips64r2 1
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_mipsmt 0
+#define cpu_has_vint 0
+#define cpu_has_veic 0
+#define cpu_hwrena_impl_bits (MIPS_HWRENA_IMPL1 | MIPS_HWRENA_IMPL2)
+#define cpu_has_wsbh 1
+
+#define cpu_has_rixi (cpu_data[0].cputype != CPU_CAVIUM_OCTEON)
+
+#define ARCH_HAS_SPINLOCK_PREFETCH 1
+#define spin_lock_prefetch(x) prefetch(x)
+#define PREFETCH_STRIDE 128
+
+#ifdef __OCTEON__
+/*
+ * All gcc versions that have OCTEON support define __OCTEON__ and have the
+ * __builtin_popcount support.
+ */
+#define ARCH_HAS_USABLE_BUILTIN_POPCOUNT 1
+#endif
+
+/*
+ * The last 256MB are reserved for device to device mappings and the
+ * BAR1 hole.
+ */
+#define MAX_DMA32_PFN (((1ULL << 32) - (1ULL << 28)) >> PAGE_SHIFT)
+
+#endif
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h
new file mode 100644
index 000000000..64b86b9d3
--- /dev/null
+++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h
@@ -0,0 +1,58 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2008 Cavium Networks
+ */
+#ifndef __OCTEON_IRQ_H__
+#define __OCTEON_IRQ_H__
+
+#define NR_IRQS OCTEON_IRQ_LAST
+#define MIPS_CPU_IRQ_BASE OCTEON_IRQ_SW0
+
+enum octeon_irq {
+/* 1 - 8 represent the 8 MIPS standard interrupt sources */
+ OCTEON_IRQ_SW0 = 1,
+ OCTEON_IRQ_SW1,
+/* CIU0, CUI2, CIU4 are 3, 4, 5 */
+ OCTEON_IRQ_5 = 6,
+ OCTEON_IRQ_PERF,
+ OCTEON_IRQ_TIMER,
+/* sources in CIU_INTX_EN0 */
+ OCTEON_IRQ_WORKQ0,
+ OCTEON_IRQ_WDOG0 = OCTEON_IRQ_WORKQ0 + 64,
+ OCTEON_IRQ_MBOX0 = OCTEON_IRQ_WDOG0 + 32,
+ OCTEON_IRQ_MBOX1,
+ OCTEON_IRQ_MBOX2,
+ OCTEON_IRQ_MBOX3,
+ OCTEON_IRQ_PCI_INT0,
+ OCTEON_IRQ_PCI_INT1,
+ OCTEON_IRQ_PCI_INT2,
+ OCTEON_IRQ_PCI_INT3,
+ OCTEON_IRQ_PCI_MSI0,
+ OCTEON_IRQ_PCI_MSI1,
+ OCTEON_IRQ_PCI_MSI2,
+ OCTEON_IRQ_PCI_MSI3,
+
+ OCTEON_IRQ_TWSI,
+ OCTEON_IRQ_TWSI2,
+ OCTEON_IRQ_RML,
+ OCTEON_IRQ_TIMER0,
+ OCTEON_IRQ_TIMER1,
+ OCTEON_IRQ_TIMER2,
+ OCTEON_IRQ_TIMER3,
+#ifndef CONFIG_PCI_MSI
+ OCTEON_IRQ_LAST = 127
+#endif
+};
+
+#ifdef CONFIG_PCI_MSI
+/* 256 - 511 represent the MSI interrupts 0-255 */
+#define OCTEON_IRQ_MSI_BIT0 (256)
+
+#define OCTEON_IRQ_MSI_LAST (OCTEON_IRQ_MSI_BIT0 + 255)
+#define OCTEON_IRQ_LAST (OCTEON_IRQ_MSI_LAST + 1)
+#endif
+
+#endif
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
new file mode 100644
index 000000000..c38b38ce5
--- /dev/null
+++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
@@ -0,0 +1,160 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005-2008 Cavium Networks, Inc
+ */
+#ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
+#define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
+
+#define CP0_CVMCTL_REG $9, 7
+#define CP0_CVMMEMCTL_REG $11,7
+#define CP0_PRID_REG $15, 0
+#define CP0_DCACHE_ERR_REG $27, 1
+#define CP0_PRID_OCTEON_PASS1 0x000d0000
+#define CP0_PRID_OCTEON_CN30XX 0x000d0200
+
+.macro kernel_entry_setup
+ # Registers set by bootloader:
+ # (only 32 bits set by bootloader, all addresses are physical
+ # addresses, and need to have the appropriate memory region set
+ # by the kernel
+ # a0 = argc
+ # a1 = argv (kseg0 compat addr)
+ # a2 = 1 if init core, zero otherwise
+ # a3 = address of boot descriptor block
+ .set push
+ .set arch=octeon
+ # Read the cavium mem control register
+ dmfc0 v0, CP0_CVMMEMCTL_REG
+ # Clear the lower 6 bits, the CVMSEG size
+ dins v0, $0, 0, 6
+ ori v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
+ dmtc0 v0, CP0_CVMMEMCTL_REG # Write the cavium mem control register
+ dmfc0 v0, CP0_CVMCTL_REG # Read the cavium control register
+ # Disable unaligned load/store support but leave HW fixup enabled
+ # Needed for octeon specific memcpy
+ or v0, v0, 0x5001
+ xor v0, v0, 0x1001
+ # First clear off CvmCtl[IPPCI] bit and move the performance
+ # counters interrupt to IRQ 6
+ dli v1, ~(7 << 7)
+ and v0, v0, v1
+ ori v0, v0, (6 << 7)
+
+ mfc0 v1, CP0_PRID_REG
+ and t1, v1, 0xfff8
+ xor t1, t1, 0x9000 # 63-P1
+ beqz t1, 4f
+ and t1, v1, 0xfff8
+ xor t1, t1, 0x9008 # 63-P2
+ beqz t1, 4f
+ and t1, v1, 0xfff8
+ xor t1, t1, 0x9100 # 68-P1
+ beqz t1, 4f
+ and t1, v1, 0xff00
+ xor t1, t1, 0x9200 # 66-PX
+ bnez t1, 5f # Skip WAR for others.
+ and t1, v1, 0x00ff
+ slti t1, t1, 2 # 66-P1.2 and later good.
+ beqz t1, 5f
+
+4: # core-16057 work around
+ or v0, v0, 0x2000 # Set IPREF bit.
+
+5: # No core-16057 work around
+ # Write the cavium control register
+ dmtc0 v0, CP0_CVMCTL_REG
+ sync
+ # Flush dcache after config change
+ cache 9, 0($0)
+ # Zero all of CVMSEG to make sure parity is correct
+ dli v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
+ dsll v0, 7
+ beqz v0, 2f
+1: dsubu v0, 8
+ sd $0, -32768(v0)
+ bnez v0, 1b
+2:
+ mfc0 v0, CP0_PRID_REG
+ bbit0 v0, 15, 1f
+ # OCTEON II or better have bit 15 set. Clear the error bits.
+ and t1, v0, 0xff00
+ dli v0, 0x9500
+ bge t1, v0, 1f # OCTEON III has no DCACHE_ERR_REG COP0
+ dli v0, 0x27
+ dmtc0 v0, CP0_DCACHE_ERR_REG
+1:
+ # Get my core id
+ rdhwr v0, $0
+ # Jump the master to kernel_entry
+ bne a2, zero, octeon_main_processor
+ nop
+
+#ifdef CONFIG_SMP
+
+ #
+ # All cores other than the master need to wait here for SMP bootstrap
+ # to begin
+ #
+
+octeon_spin_wait_boot:
+#ifdef CONFIG_RELOCATABLE
+ PTR_LA t0, octeon_processor_relocated_kernel_entry
+ LONG_L t0, (t0)
+ beq zero, t0, 1f
+ nop
+
+ jr t0
+ nop
+1:
+#endif /* CONFIG_RELOCATABLE */
+
+ # This is the variable where the next core to boot is stored
+ PTR_LA t0, octeon_processor_boot
+ # Get the core id of the next to be booted
+ LONG_L t1, (t0)
+ # Keep looping if it isn't me
+ bne t1, v0, octeon_spin_wait_boot
+ nop
+ # Get my GP from the global variable
+ PTR_LA t0, octeon_processor_gp
+ LONG_L gp, (t0)
+ # Get my SP from the global variable
+ PTR_LA t0, octeon_processor_sp
+ LONG_L sp, (t0)
+ # Set the SP global variable to zero so the master knows we've started
+ LONG_S zero, (t0)
+#ifdef __OCTEON__
+ syncw
+ syncw
+#else
+ sync
+#endif
+ # Jump to the normal Linux SMP entry point
+ j smp_bootstrap
+ nop
+#else /* CONFIG_SMP */
+
+ #
+ # Someone tried to boot SMP with a non SMP kernel. All extra cores
+ # will halt here.
+ #
+octeon_wait_forever:
+ wait
+ b octeon_wait_forever
+ nop
+
+#endif /* CONFIG_SMP */
+octeon_main_processor:
+ .set pop
+.endm
+
+/*
+ * Do SMP slave processor setup necessary before we can safely execute C code.
+ */
+ .macro smp_slave_setup
+ .endm
+
+#endif /* __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
new file mode 100644
index 000000000..239fcc874
--- /dev/null
+++ b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
@@ -0,0 +1,64 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ */
+#ifndef __ASM_MACH_GENERIC_MANGLE_PORT_H
+#define __ASM_MACH_GENERIC_MANGLE_PORT_H
+
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
+
+static inline bool __should_swizzle_bits(volatile void *a)
+{
+ extern const bool octeon_should_swizzle_table[];
+ u64 did = ((u64)(uintptr_t)a >> 40) & 0xff;
+
+ return octeon_should_swizzle_table[did];
+}
+
+# define __swizzle_addr_b(port) (port)
+# define __swizzle_addr_w(port) (port)
+# define __swizzle_addr_l(port) (port)
+# define __swizzle_addr_q(port) (port)
+
+#else /* __LITTLE_ENDIAN */
+
+#define __should_swizzle_bits(a) false
+
+static inline bool __should_swizzle_addr(u64 p)
+{
+ /* boot bus? */
+ return ((p >> 40) & 0xff) == 0;
+}
+
+# define __swizzle_addr_b(port) \
+ (__should_swizzle_addr(port) ? (port) ^ 7 : (port))
+# define __swizzle_addr_w(port) \
+ (__should_swizzle_addr(port) ? (port) ^ 6 : (port))
+# define __swizzle_addr_l(port) \
+ (__should_swizzle_addr(port) ? (port) ^ 4 : (port))
+# define __swizzle_addr_q(port) (port)
+
+#endif /* __BIG_ENDIAN */
+
+
+# define ioswabb(a, x) (x)
+# define __mem_ioswabb(a, x) (x)
+# define ioswabw(a, x) (__should_swizzle_bits(a) ? \
+ le16_to_cpu((__force __le16)(x)) : \
+ (x))
+# define __mem_ioswabw(a, x) (x)
+# define ioswabl(a, x) (__should_swizzle_bits(a) ? \
+ le32_to_cpu((__force __le32)(x)) : \
+ (x))
+# define __mem_ioswabl(a, x) (x)
+# define ioswabq(a, x) (__should_swizzle_bits(a) ? \
+ le64_to_cpu((__force __le64)(x)) : \
+ (x))
+# define __mem_ioswabq(a, x) (x)
+
+#endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/spaces.h b/arch/mips/include/asm/mach-cavium-octeon/spaces.h
new file mode 100644
index 000000000..daa91accf
--- /dev/null
+++ b/arch/mips/include/asm/mach-cavium-octeon/spaces.h
@@ -0,0 +1,24 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Cavium, Inc.
+ */
+#ifndef _ASM_MACH_CAVIUM_OCTEON_SPACES_H
+#define _ASM_MACH_CAVIUM_OCTEON_SPACES_H
+
+#include <linux/const.h>
+
+#ifdef CONFIG_64BIT
+/* They are all the same and some OCTEON II cores cannot handle 0xa8.. */
+#define CAC_BASE _AC(0x8000000000000000, UL)
+#define UNCAC_BASE _AC(0x8000000000000000, UL)
+#define IO_BASE _AC(0x8000000000000000, UL)
+
+
+#endif /* CONFIG_64BIT */
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* _ASM_MACH_CAVIUM_OCTEON_SPACES_H */
diff --git a/arch/mips/include/asm/mach-cobalt/cobalt.h b/arch/mips/include/asm/mach-cobalt/cobalt.h
new file mode 100644
index 000000000..5b9fce73f
--- /dev/null
+++ b/arch/mips/include/asm/mach-cobalt/cobalt.h
@@ -0,0 +1,22 @@
+/*
+ * The Cobalt board ID information.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1997 Cobalt Microserver
+ * Copyright (C) 1997, 2003 Ralf Baechle
+ * Copyright (C) 2001, 2002, 2003 Liam Davies (ldavies@agile.tv)
+ */
+#ifndef __ASM_COBALT_H
+#define __ASM_COBALT_H
+
+extern int cobalt_board_id;
+
+#define COBALT_BRD_ID_QUBE1 0x3
+#define COBALT_BRD_ID_RAQ1 0x4
+#define COBALT_BRD_ID_QUBE2 0x5
+#define COBALT_BRD_ID_RAQ2 0x6
+
+#endif /* __ASM_COBALT_H */
diff --git a/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h
new file mode 100644
index 000000000..291fe90aa
--- /dev/null
+++ b/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h
@@ -0,0 +1,57 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006, 07 Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_COBALT_CPU_FEATURE_OVERRIDES_H
+#define __ASM_COBALT_CPU_FEATURE_OVERRIDES_H
+
+
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_32fpr 1
+#define cpu_has_counter 1
+#define cpu_has_watch 0
+#define cpu_has_divec 1
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_prefetch 0
+#define cpu_has_mcheck 0
+#define cpu_has_ejtag 0
+
+#define cpu_has_inclusive_pcaches 0
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+#define cpu_scache_line_size() 0
+
+#ifdef CONFIG_64BIT
+#define cpu_has_llsc 0
+#else
+#define cpu_has_llsc 1
+#endif
+
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+#define cpu_has_vtag_icache 0
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_icache_snoops_remote_store 0
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#endif /* __ASM_COBALT_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-cobalt/irq.h b/arch/mips/include/asm/mach-cobalt/irq.h
new file mode 100644
index 000000000..9da9acf5d
--- /dev/null
+++ b/arch/mips/include/asm/mach-cobalt/irq.h
@@ -0,0 +1,57 @@
+/*
+ * Cobalt IRQ definitions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1997 Cobalt Microserver
+ * Copyright (C) 1997, 2003 Ralf Baechle
+ * Copyright (C) 2001-2003 Liam Davies (ldavies@agile.tv)
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#ifndef _ASM_COBALT_IRQ_H
+#define _ASM_COBALT_IRQ_H
+
+/*
+ * i8259 interrupts used on Cobalt:
+ *
+ * 8 - RTC
+ * 9 - PCI slot
+ * 14 - IDE0
+ * 15 - IDE1(no connector on board)
+ */
+#define I8259A_IRQ_BASE 0
+
+#define PCISLOT_IRQ (I8259A_IRQ_BASE + 9)
+
+/*
+ * CPU interrupts used on Cobalt:
+ *
+ * 0 - Software interrupt 0 (unused)
+ * 1 - Software interrupt 0 (unused)
+ * 2 - cascade GT64111
+ * 3 - ethernet or SCSI host controller
+ * 4 - ethernet
+ * 5 - 16550 UART
+ * 6 - cascade i8259
+ * 7 - CP0 counter
+ */
+#define MIPS_CPU_IRQ_BASE 16
+
+#define GT641XX_CASCADE_IRQ (MIPS_CPU_IRQ_BASE + 2)
+#define RAQ2_SCSI_IRQ (MIPS_CPU_IRQ_BASE + 3)
+#define ETH0_IRQ (MIPS_CPU_IRQ_BASE + 3)
+#define QUBE1_ETH0_IRQ (MIPS_CPU_IRQ_BASE + 4)
+#define ETH1_IRQ (MIPS_CPU_IRQ_BASE + 4)
+#define SERIAL_IRQ (MIPS_CPU_IRQ_BASE + 5)
+#define SCSI_IRQ (MIPS_CPU_IRQ_BASE + 5)
+#define I8259_CASCADE_IRQ (MIPS_CPU_IRQ_BASE + 6)
+
+#define GT641XX_IRQ_BASE 24
+
+#include <asm/irq_gt641xx.h>
+
+#define NR_IRQS (GT641XX_PCI_INT3_IRQ + 1)
+
+#endif /* _ASM_COBALT_IRQ_H */
diff --git a/arch/mips/include/asm/mach-cobalt/mach-gt64120.h b/arch/mips/include/asm/mach-cobalt/mach-gt64120.h
new file mode 100644
index 000000000..ddb968a55
--- /dev/null
+++ b/arch/mips/include/asm/mach-cobalt/mach-gt64120.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2006 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#ifndef _COBALT_MACH_GT64120_H
+#define _COBALT_MACH_GT64120_H
+
+/*
+ * Cobalt uses GT64111. GT64111 is almost the same as GT64120.
+ */
+
+#define GT64120_BASE CKSEG1ADDR(GT_DEF_BASE)
+
+#endif /* _COBALT_MACH_GT64120_H */
diff --git a/arch/mips/include/asm/mach-db1x00/bcsr.h b/arch/mips/include/asm/mach-db1x00/bcsr.h
new file mode 100644
index 000000000..4a27738e8
--- /dev/null
+++ b/arch/mips/include/asm/mach-db1x00/bcsr.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * bcsr.h -- Db1xxx/Pb1xxx Devboard CPLD registers ("BCSR") abstraction.
+ *
+ * All Alchemy development boards (except, of course, the weird PB1000)
+ * have a few registers in a CPLD with standardised layout; they mostly
+ * only differ in base address and bit meanings in the RESETS and BOARD
+ * registers.
+ *
+ * All data taken from the official AMD board documentation sheets.
+ */
+
+#ifndef _DB1XXX_BCSR_H_
+#define _DB1XXX_BCSR_H_
+
+
+/* BCSR base addresses on various boards. BCSR base 2 refers to the
+ * physical address of the first HEXLEDS register, which is usually
+ * a variable offset from the WHOAMI register.
+ */
+
+/* DB1000, DB1100, DB1500, PB1100, PB1500 */
+#define DB1000_BCSR_PHYS_ADDR 0x0E000000
+#define DB1000_BCSR_HEXLED_OFS 0x01000000
+
+#define DB1550_BCSR_PHYS_ADDR 0x0F000000
+#define DB1550_BCSR_HEXLED_OFS 0x00400000
+
+#define PB1550_BCSR_PHYS_ADDR 0x0F000000
+#define PB1550_BCSR_HEXLED_OFS 0x00800000
+
+#define DB1200_BCSR_PHYS_ADDR 0x19800000
+#define DB1200_BCSR_HEXLED_OFS 0x00400000
+
+#define PB1200_BCSR_PHYS_ADDR 0x0D800000
+#define PB1200_BCSR_HEXLED_OFS 0x00400000
+
+#define DB1300_BCSR_PHYS_ADDR 0x19800000
+#define DB1300_BCSR_HEXLED_OFS 0x00400000
+
+enum bcsr_id {
+ /* BCSR base 1 */
+ BCSR_WHOAMI = 0,
+ BCSR_STATUS,
+ BCSR_SWITCHES,
+ BCSR_RESETS,
+ BCSR_PCMCIA,
+ BCSR_BOARD,
+ BCSR_LEDS,
+ BCSR_SYSTEM,
+ /* Au1200/1300 based boards */
+ BCSR_INTCLR,
+ BCSR_INTSET,
+ BCSR_MASKCLR,
+ BCSR_MASKSET,
+ BCSR_SIGSTAT,
+ BCSR_INTSTAT,
+
+ /* BCSR base 2 */
+ BCSR_HEXLEDS,
+ BCSR_RSVD1,
+ BCSR_HEXCLEAR,
+
+ BCSR_CNT,
+};
+
+/* register offsets, valid for all Db1xxx/Pb1xxx boards */
+#define BCSR_REG_WHOAMI 0x00
+#define BCSR_REG_STATUS 0x04
+#define BCSR_REG_SWITCHES 0x08
+#define BCSR_REG_RESETS 0x0c
+#define BCSR_REG_PCMCIA 0x10
+#define BCSR_REG_BOARD 0x14
+#define BCSR_REG_LEDS 0x18
+#define BCSR_REG_SYSTEM 0x1c
+/* Au1200/Au1300 based boards: CPLD IRQ muxer */
+#define BCSR_REG_INTCLR 0x20
+#define BCSR_REG_INTSET 0x24
+#define BCSR_REG_MASKCLR 0x28
+#define BCSR_REG_MASKSET 0x2c
+#define BCSR_REG_SIGSTAT 0x30
+#define BCSR_REG_INTSTAT 0x34
+
+/* hexled control, offset from BCSR base 2 */
+#define BCSR_REG_HEXLEDS 0x00
+#define BCSR_REG_HEXCLEAR 0x08
+
+/*
+ * Register Bits and Pieces.
+ */
+#define BCSR_WHOAMI_DCID(x) ((x) & 0xf)
+#define BCSR_WHOAMI_CPLD(x) (((x) >> 4) & 0xf)
+#define BCSR_WHOAMI_BOARD(x) (((x) >> 8) & 0xf)
+
+/* register "WHOAMI" bits 11:8 identify the board */
+enum bcsr_whoami_boards {
+ BCSR_WHOAMI_PB1500 = 1,
+ BCSR_WHOAMI_PB1500R2,
+ BCSR_WHOAMI_PB1100,
+ BCSR_WHOAMI_DB1000,
+ BCSR_WHOAMI_DB1100,
+ BCSR_WHOAMI_DB1500,
+ BCSR_WHOAMI_DB1550,
+ BCSR_WHOAMI_PB1550_DDR,
+ BCSR_WHOAMI_PB1550 = BCSR_WHOAMI_PB1550_DDR,
+ BCSR_WHOAMI_PB1550_SDR,
+ BCSR_WHOAMI_PB1200_DDR1,
+ BCSR_WHOAMI_PB1200 = BCSR_WHOAMI_PB1200_DDR1,
+ BCSR_WHOAMI_PB1200_DDR2,
+ BCSR_WHOAMI_DB1200,
+ BCSR_WHOAMI_DB1300,
+};
+
+/* STATUS reg. Unless otherwise noted, they're valid on all boards.
+ * PB1200 = DB1200.
+ */
+#define BCSR_STATUS_PC0VS 0x0003
+#define BCSR_STATUS_PC1VS 0x000C
+#define BCSR_STATUS_PC0FI 0x0010
+#define BCSR_STATUS_PC1FI 0x0020
+#define BCSR_STATUS_PB1550_SWAPBOOT 0x0040
+#define BCSR_STATUS_SRAMWIDTH 0x0080
+#define BCSR_STATUS_FLASHBUSY 0x0100
+#define BCSR_STATUS_ROMBUSY 0x0400
+#define BCSR_STATUS_SD0WP 0x0400 /* DB1200/DB1300:SD1 */
+#define BCSR_STATUS_SD1WP 0x0800
+#define BCSR_STATUS_USBOTGID 0x0800 /* PB/DB1550 */
+#define BCSR_STATUS_DB1000_SWAPBOOT 0x2000
+#define BCSR_STATUS_DB1200_SWAPBOOT 0x0040 /* DB1200/1300 */
+#define BCSR_STATUS_IDECBLID 0x0200 /* DB1200/1300 */
+#define BCSR_STATUS_DB1200_U0RXD 0x1000 /* DB1200 */
+#define BCSR_STATUS_DB1200_U1RXD 0x2000 /* DB1200 */
+#define BCSR_STATUS_FLASHDEN 0xC000
+#define BCSR_STATUS_DB1550_U0RXD 0x1000 /* DB1550 */
+#define BCSR_STATUS_DB1550_U3RXD 0x2000 /* DB1550 */
+#define BCSR_STATUS_PB1550_U0RXD 0x1000 /* PB1550 */
+#define BCSR_STATUS_PB1550_U1RXD 0x2000 /* PB1550 */
+#define BCSR_STATUS_PB1550_U3RXD 0x8000 /* PB1550 */
+
+#define BCSR_STATUS_CFWP 0x4000 /* DB1300 */
+#define BCSR_STATUS_USBOCn 0x2000 /* DB1300 */
+#define BCSR_STATUS_OTGOCn 0x1000 /* DB1300 */
+#define BCSR_STATUS_DCDMARQ 0x0010 /* DB1300 */
+#define BCSR_STATUS_IDEDMARQ 0x0020 /* DB1300 */
+
+/* DB/PB1000,1100,1500,1550 */
+#define BCSR_RESETS_PHY0 0x0001
+#define BCSR_RESETS_PHY1 0x0002
+#define BCSR_RESETS_DC 0x0004
+#define BCSR_RESETS_FIR_SEL 0x2000
+#define BCSR_RESETS_IRDA_MODE_MASK 0xC000
+#define BCSR_RESETS_IRDA_MODE_FULL 0x0000
+#define BCSR_RESETS_PB1550_WSCFSM 0x2000
+#define BCSR_RESETS_IRDA_MODE_OFF 0x4000
+#define BCSR_RESETS_IRDA_MODE_2_3 0x8000
+#define BCSR_RESETS_IRDA_MODE_1_3 0xC000
+#define BCSR_RESETS_DMAREQ 0x8000 /* PB1550 */
+
+#define BCSR_BOARD_PCIM66EN 0x0001
+#define BCSR_BOARD_SD0PWR 0x0040
+#define BCSR_BOARD_SD1PWR 0x0080
+#define BCSR_BOARD_PCIM33 0x0100
+#define BCSR_BOARD_PCIEXTARB 0x0200
+#define BCSR_BOARD_GPIO200RST 0x0400
+#define BCSR_BOARD_PCICLKOUT 0x0800
+#define BCSR_BOARD_PB1100_SD0PWR 0x0400
+#define BCSR_BOARD_PB1100_SD1PWR 0x0800
+#define BCSR_BOARD_PCICFG 0x1000
+#define BCSR_BOARD_SPISEL 0x2000 /* PB/DB1550 */
+#define BCSR_BOARD_SD0WP 0x4000 /* DB1100 */
+#define BCSR_BOARD_SD1WP 0x8000 /* DB1100 */
+
+
+/* DB/PB1200/1300 */
+#define BCSR_RESETS_ETH 0x0001
+#define BCSR_RESETS_CAMERA 0x0002
+#define BCSR_RESETS_DC 0x0004
+#define BCSR_RESETS_IDE 0x0008
+#define BCSR_RESETS_TV 0x0010 /* DB1200/1300 */
+/* Not resets but in the same register */
+#define BCSR_RESETS_PWMR1MUX 0x0800 /* DB1200 */
+#define BCSR_RESETS_PB1200_WSCFSM 0x0800 /* PB1200 */
+#define BCSR_RESETS_PSC0MUX 0x1000
+#define BCSR_RESETS_PSC1MUX 0x2000
+#define BCSR_RESETS_SPISEL 0x4000
+#define BCSR_RESETS_SD1MUX 0x8000 /* PB1200 */
+
+#define BCSR_RESETS_VDDQSHDN 0x0200 /* DB1300 */
+#define BCSR_RESETS_OTPPGM 0x0400 /* DB1300 */
+#define BCSR_RESETS_OTPSCLK 0x0800 /* DB1300 */
+#define BCSR_RESETS_OTPWRPROT 0x1000 /* DB1300 */
+#define BCSR_RESETS_OTPCSB 0x2000 /* DB1300 */
+#define BCSR_RESETS_OTGPWR 0x4000 /* DB1300 */
+#define BCSR_RESETS_USBHPWR 0x8000 /* DB1300 */
+
+#define BCSR_BOARD_LCDVEE 0x0001
+#define BCSR_BOARD_LCDVDD 0x0002
+#define BCSR_BOARD_LCDBL 0x0004
+#define BCSR_BOARD_CAMSNAP 0x0010
+#define BCSR_BOARD_CAMPWR 0x0020
+#define BCSR_BOARD_SD0PWR 0x0040
+#define BCSR_BOARD_CAMCS 0x0010 /* DB1300 */
+#define BCSR_BOARD_HDMI_DE 0x0040 /* DB1300 */
+
+#define BCSR_SWITCHES_DIP 0x00FF
+#define BCSR_SWITCHES_DIP_1 0x0080
+#define BCSR_SWITCHES_DIP_2 0x0040
+#define BCSR_SWITCHES_DIP_3 0x0020
+#define BCSR_SWITCHES_DIP_4 0x0010
+#define BCSR_SWITCHES_DIP_5 0x0008
+#define BCSR_SWITCHES_DIP_6 0x0004
+#define BCSR_SWITCHES_DIP_7 0x0002
+#define BCSR_SWITCHES_DIP_8 0x0001
+#define BCSR_SWITCHES_ROTARY 0x0F00
+
+
+#define BCSR_PCMCIA_PC0VPP 0x0003
+#define BCSR_PCMCIA_PC0VCC 0x000C
+#define BCSR_PCMCIA_PC0DRVEN 0x0010
+#define BCSR_PCMCIA_PC0RST 0x0080
+#define BCSR_PCMCIA_PC1VPP 0x0300
+#define BCSR_PCMCIA_PC1VCC 0x0C00
+#define BCSR_PCMCIA_PC1DRVEN 0x1000
+#define BCSR_PCMCIA_PC1RST 0x8000
+
+
+#define BCSR_LEDS_DECIMALS 0x0003
+#define BCSR_LEDS_LED0 0x0100
+#define BCSR_LEDS_LED1 0x0200
+#define BCSR_LEDS_LED2 0x0400
+#define BCSR_LEDS_LED3 0x0800
+
+
+#define BCSR_SYSTEM_RESET 0x8000 /* clear to reset */
+#define BCSR_SYSTEM_PWROFF 0x4000 /* set to power off */
+#define BCSR_SYSTEM_VDDI 0x001F /* PB1xxx boards */
+#define BCSR_SYSTEM_DEBUGCSMASK 0x003F /* DB1300 */
+#define BCSR_SYSTEM_UDMAMODE 0x0100 /* DB1300 */
+#define BCSR_SYSTEM_WAKEONIRQ 0x0200 /* DB1300 */
+#define BCSR_SYSTEM_VDDI1300 0x3C00 /* DB1300 */
+
+
+
+/* initialize BCSR for a board. Provide the PHYSICAL addresses of both
+ * BCSR spaces.
+ */
+void __init bcsr_init(unsigned long bcsr1_phys, unsigned long bcsr2_phys);
+
+/* read a board register */
+unsigned short bcsr_read(enum bcsr_id reg);
+
+/* write to a board register */
+void bcsr_write(enum bcsr_id reg, unsigned short val);
+
+/* modify a register. clear bits set in 'clr', set bits set in 'set' */
+void bcsr_mod(enum bcsr_id reg, unsigned short clr, unsigned short set);
+
+/* install CPLD IRQ demuxer (DB1200/PB1200) */
+void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq);
+
+#endif
diff --git a/arch/mips/include/asm/mach-db1x00/irq.h b/arch/mips/include/asm/mach-db1x00/irq.h
new file mode 100644
index 000000000..15b266932
--- /dev/null
+++ b/arch/mips/include/asm/mach-db1x00/irq.h
@@ -0,0 +1,23 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 by Ralf Baechle
+ */
+#ifndef __ASM_MACH_GENERIC_IRQ_H
+#define __ASM_MACH_GENERIC_IRQ_H
+
+
+#ifdef NR_IRQS
+#undef NR_IRQS
+#endif
+
+#ifndef MIPS_CPU_IRQ_BASE
+#define MIPS_CPU_IRQ_BASE 0
+#endif
+
+/* 8 (MIPS) + 128 (au1300) + 16 (cpld) */
+#define NR_IRQS 152
+
+#endif /* __ASM_MACH_GENERIC_IRQ_H */
diff --git a/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h b/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h
new file mode 100644
index 000000000..1896e88f6
--- /dev/null
+++ b/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * CPU feature overrides for DECstation systems. Two variations
+ * are generally applicable.
+ *
+ * Copyright (C) 2013 Maciej W. Rozycki
+ */
+#ifndef __ASM_MACH_DEC_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_DEC_CPU_FEATURE_OVERRIDES_H
+
+/* Generic ones first. */
+#define cpu_has_tlb 1
+#define cpu_has_tlbinv 0
+#define cpu_has_segments 0
+#define cpu_has_eva 0
+#define cpu_has_htw 0
+#define cpu_has_rixiex 0
+#define cpu_has_maar 0
+#define cpu_has_rw_llb 0
+#define cpu_has_tx39_cache 0
+#define cpu_has_divec 0
+#define cpu_has_prefetch 0
+#define cpu_has_mcheck 0
+#define cpu_has_ejtag 0
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+#define cpu_has_rixi 0
+#define cpu_has_xpa 0
+#define cpu_has_vtag_icache 0
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_pindexed_dcache 0
+#define cpu_icache_snoops_remote_store 1
+#define cpu_has_mips_4 0
+#define cpu_has_mips_5 0
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+#define cpu_has_perf_cntr_intr_bit 0
+#define cpu_has_vz 0
+#define cpu_has_fre 0
+#define cpu_has_cdmm 0
+
+/* R3k-specific ones. */
+#ifdef CONFIG_CPU_R3000
+#define cpu_has_3kex 1
+#define cpu_has_4kex 0
+#define cpu_has_3k_cache 1
+#define cpu_has_4k_cache 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 0
+#define cpu_has_watch 0
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_llsc 0
+#define cpu_has_dc_aliases 0
+#define cpu_has_mips_2 0
+#define cpu_has_mips_3 0
+#define cpu_has_nofpuex 1
+#define cpu_has_inclusive_pcaches 0
+#define cpu_dcache_line_size() 4
+#define cpu_icache_line_size() 4
+#define cpu_scache_line_size() 0
+#endif /* CONFIG_CPU_R3000 */
+
+/* R4k-specific ones. */
+#ifdef CONFIG_CPU_R4X00
+#define cpu_has_3kex 0
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_32fpr 1
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_vce 1
+#define cpu_has_cache_cdex_p 1
+#define cpu_has_cache_cdex_s 1
+#define cpu_has_llsc 1
+#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
+#define cpu_has_mips_2 1
+#define cpu_has_mips_3 1
+#define cpu_has_nofpuex 0
+#define cpu_has_inclusive_pcaches 1
+#define cpu_dcache_line_size() 16
+#define cpu_icache_line_size() 16
+#define cpu_scache_line_size() 32
+#endif /* CONFIG_CPU_R4X00 */
+
+#endif /* __ASM_MACH_DEC_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-dec/mc146818rtc.h b/arch/mips/include/asm/mach-dec/mc146818rtc.h
new file mode 100644
index 000000000..d4614e2a8
--- /dev/null
+++ b/arch/mips/include/asm/mach-dec/mc146818rtc.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RTC definitions for DECstation style attached Dallas DS1287 chip.
+ *
+ * Copyright (C) 1998, 2001 by Ralf Baechle
+ * Copyright (C) 1998 by Harald Koerfgen
+ * Copyright (C) 2002, 2005 Maciej W. Rozycki
+ */
+#ifndef __ASM_MIPS_DEC_RTC_DEC_H
+#define __ASM_MIPS_DEC_RTC_DEC_H
+
+#include <linux/types.h>
+#include <asm/addrspace.h>
+#include <asm/dec/system.h>
+
+extern volatile u8 *dec_rtc_base;
+
+#define ARCH_RTC_LOCATION
+
+#define RTC_PORT(x) CPHYSADDR((long)dec_rtc_base)
+#define RTC_IO_EXTENT dec_kn_slot_size
+#define RTC_IOMAPPED 0
+#undef RTC_IRQ
+
+#define RTC_DEC_YEAR 0x3f /* Where we store the real year on DECs. */
+
+static inline unsigned char CMOS_READ(unsigned long addr)
+{
+ return dec_rtc_base[addr * 4];
+}
+
+static inline void CMOS_WRITE(unsigned char data, unsigned long addr)
+{
+ dec_rtc_base[addr * 4] = data;
+}
+
+#define RTC_ALWAYS_BCD 0
+
+#endif /* __ASM_MIPS_DEC_RTC_DEC_H */
diff --git a/arch/mips/include/asm/mach-generic/cpu-feature-overrides.h b/arch/mips/include/asm/mach-generic/cpu-feature-overrides.h
new file mode 100644
index 000000000..42be9e9ce
--- /dev/null
+++ b/arch/mips/include/asm/mach-generic/cpu-feature-overrides.h
@@ -0,0 +1,13 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Ralf Baechle
+ */
+#ifndef __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H
+
+/* Intentionally empty file ... */
+
+#endif /* __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-generic/floppy.h b/arch/mips/include/asm/mach-generic/floppy.h
new file mode 100644
index 000000000..e0c9cd41f
--- /dev/null
+++ b/arch/mips/include/asm/mach-generic/floppy.h
@@ -0,0 +1,133 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1997, 1998, 2003 by Ralf Baechle
+ */
+#ifndef __ASM_MACH_GENERIC_FLOPPY_H
+#define __ASM_MACH_GENERIC_FLOPPY_H
+
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <asm/bootinfo.h>
+#include <asm/cachectl.h>
+#include <asm/dma.h>
+#include <asm/floppy.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+/*
+ * How to access the FDC's registers.
+ */
+static inline unsigned char fd_inb(unsigned int base, unsigned int reg)
+{
+ return inb_p(base + reg);
+}
+
+static inline void fd_outb(unsigned char value, unsigned int base, unsigned int reg)
+{
+ outb_p(value, base + reg);
+}
+
+/*
+ * How to access the floppy DMA functions.
+ */
+static inline void fd_enable_dma(void)
+{
+ enable_dma(FLOPPY_DMA);
+}
+
+static inline void fd_disable_dma(void)
+{
+ disable_dma(FLOPPY_DMA);
+}
+
+static inline int fd_request_dma(void)
+{
+ return request_dma(FLOPPY_DMA, "floppy");
+}
+
+static inline void fd_free_dma(void)
+{
+ free_dma(FLOPPY_DMA);
+}
+
+static inline void fd_clear_dma_ff(void)
+{
+ clear_dma_ff(FLOPPY_DMA);
+}
+
+static inline void fd_set_dma_mode(char mode)
+{
+ set_dma_mode(FLOPPY_DMA, mode);
+}
+
+static inline void fd_set_dma_addr(char *addr)
+{
+ set_dma_addr(FLOPPY_DMA, (unsigned long) addr);
+}
+
+static inline void fd_set_dma_count(unsigned int count)
+{
+ set_dma_count(FLOPPY_DMA, count);
+}
+
+static inline int fd_get_dma_residue(void)
+{
+ return get_dma_residue(FLOPPY_DMA);
+}
+
+static inline void fd_enable_irq(void)
+{
+ enable_irq(FLOPPY_IRQ);
+}
+
+static inline void fd_disable_irq(void)
+{
+ disable_irq(FLOPPY_IRQ);
+}
+
+static inline int fd_request_irq(void)
+{
+ return request_irq(FLOPPY_IRQ, floppy_interrupt,
+ 0, "floppy", NULL);
+}
+
+static inline void fd_free_irq(void)
+{
+ free_irq(FLOPPY_IRQ, NULL);
+}
+
+#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL);
+
+
+static inline unsigned long fd_getfdaddr1(void)
+{
+ return 0x3f0;
+}
+
+static inline unsigned long fd_dma_mem_alloc(unsigned long size)
+{
+ return __get_dma_pages(GFP_KERNEL, get_order(size));
+}
+
+static inline void fd_dma_mem_free(unsigned long addr, unsigned long size)
+{
+ free_pages(addr, get_order(size));
+}
+
+static inline unsigned long fd_drive_type(unsigned long n)
+{
+ if (n == 0)
+ return 4; /* 3,5", 1.44mb */
+
+ return 0;
+}
+
+#endif /* __ASM_MACH_GENERIC_FLOPPY_H */
diff --git a/arch/mips/include/asm/mach-generic/ide.h b/arch/mips/include/asm/mach-generic/ide.h
new file mode 100644
index 000000000..4ae5fbcb1
--- /dev/null
+++ b/arch/mips/include/asm/mach-generic/ide.h
@@ -0,0 +1,138 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994-1996 Linus Torvalds & authors
+ *
+ * Copied from i386; many of the especially older MIPS or ISA-based platforms
+ * are basically identical. Using this file probably implies i8259 PIC
+ * support in a system but the very least interrupt numbers 0 - 15 need to
+ * be put aside for legacy devices.
+ */
+#ifndef __ASM_MACH_GENERIC_IDE_H
+#define __ASM_MACH_GENERIC_IDE_H
+
+#ifdef __KERNEL__
+
+#include <linux/pci.h>
+#include <linux/stddef.h>
+#include <asm/processor.h>
+
+/* MIPS port and memory-mapped I/O string operations. */
+static inline void __ide_flush_prologue(void)
+{
+#ifdef CONFIG_SMP
+ if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
+ preempt_disable();
+#endif
+}
+
+static inline void __ide_flush_epilogue(void)
+{
+#ifdef CONFIG_SMP
+ if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
+ preempt_enable();
+#endif
+}
+
+static inline void __ide_flush_dcache_range(unsigned long addr, unsigned long size)
+{
+ if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) {
+ unsigned long end = addr + size;
+
+ while (addr < end) {
+ local_flush_data_cache_page((void *)addr);
+ addr += PAGE_SIZE;
+ }
+ }
+}
+
+/*
+ * insw() and gang might be called with interrupts disabled, so we can't
+ * send IPIs for flushing due to the potencial of deadlocks, see the comment
+ * above smp_call_function() in arch/mips/kernel/smp.c. We work around the
+ * problem by disabling preemption so we know we actually perform the flush
+ * on the processor that actually has the lines to be flushed which hopefully
+ * is even better for performance anyway.
+ */
+static inline void __ide_insw(unsigned long port, void *addr,
+ unsigned int count)
+{
+ __ide_flush_prologue();
+ insw(port, addr, count);
+ __ide_flush_dcache_range((unsigned long)addr, count * 2);
+ __ide_flush_epilogue();
+}
+
+static inline void __ide_insl(unsigned long port, void *addr, unsigned int count)
+{
+ __ide_flush_prologue();
+ insl(port, addr, count);
+ __ide_flush_dcache_range((unsigned long)addr, count * 4);
+ __ide_flush_epilogue();
+}
+
+static inline void __ide_outsw(unsigned long port, const void *addr,
+ unsigned long count)
+{
+ __ide_flush_prologue();
+ outsw(port, addr, count);
+ __ide_flush_dcache_range((unsigned long)addr, count * 2);
+ __ide_flush_epilogue();
+}
+
+static inline void __ide_outsl(unsigned long port, const void *addr,
+ unsigned long count)
+{
+ __ide_flush_prologue();
+ outsl(port, addr, count);
+ __ide_flush_dcache_range((unsigned long)addr, count * 4);
+ __ide_flush_epilogue();
+}
+
+static inline void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
+{
+ __ide_flush_prologue();
+ readsw(port, addr, count);
+ __ide_flush_dcache_range((unsigned long)addr, count * 2);
+ __ide_flush_epilogue();
+}
+
+static inline void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
+{
+ __ide_flush_prologue();
+ readsl(port, addr, count);
+ __ide_flush_dcache_range((unsigned long)addr, count * 4);
+ __ide_flush_epilogue();
+}
+
+static inline void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
+{
+ __ide_flush_prologue();
+ writesw(port, addr, count);
+ __ide_flush_dcache_range((unsigned long)addr, count * 2);
+ __ide_flush_epilogue();
+}
+
+static inline void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
+{
+ __ide_flush_prologue();
+ writesl(port, addr, count);
+ __ide_flush_dcache_range((unsigned long)addr, count * 4);
+ __ide_flush_epilogue();
+}
+
+/* ide_insw calls insw, not __ide_insw. Why? */
+#undef insw
+#undef insl
+#undef outsw
+#undef outsl
+#define insw(port, addr, count) __ide_insw(port, addr, count)
+#define insl(port, addr, count) __ide_insl(port, addr, count)
+#define outsw(port, addr, count) __ide_outsw(port, addr, count)
+#define outsl(port, addr, count) __ide_outsl(port, addr, count)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_MACH_GENERIC_IDE_H */
diff --git a/arch/mips/include/asm/mach-generic/ioremap.h b/arch/mips/include/asm/mach-generic/ioremap.h
new file mode 100644
index 000000000..f2442b845
--- /dev/null
+++ b/arch/mips/include/asm/mach-generic/ioremap.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/asm-mips/mach-generic/ioremap.h
+ */
+#ifndef __ASM_MACH_GENERIC_IOREMAP_H
+#define __ASM_MACH_GENERIC_IOREMAP_H
+
+#include <linux/types.h>
+
+static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
+ unsigned long flags)
+{
+ return NULL;
+}
+
+static inline int plat_iounmap(const volatile void __iomem *addr)
+{
+ return 0;
+}
+
+#endif /* __ASM_MACH_GENERIC_IOREMAP_H */
diff --git a/arch/mips/include/asm/mach-generic/irq.h b/arch/mips/include/asm/mach-generic/irq.h
new file mode 100644
index 000000000..079889ced
--- /dev/null
+++ b/arch/mips/include/asm/mach-generic/irq.h
@@ -0,0 +1,39 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 by Ralf Baechle
+ */
+#ifndef __ASM_MACH_GENERIC_IRQ_H
+#define __ASM_MACH_GENERIC_IRQ_H
+
+#ifndef NR_IRQS
+#define NR_IRQS 256
+#endif
+
+#ifdef CONFIG_I8259
+#ifndef I8259A_IRQ_BASE
+#define I8259A_IRQ_BASE 0
+#endif
+#endif
+
+#ifdef CONFIG_IRQ_MIPS_CPU
+
+#ifndef MIPS_CPU_IRQ_BASE
+#ifdef CONFIG_I8259
+#define MIPS_CPU_IRQ_BASE 16
+#else
+#define MIPS_CPU_IRQ_BASE 0
+#endif /* CONFIG_I8259 */
+#endif
+
+#ifdef CONFIG_IRQ_CPU_RM7K
+#ifndef RM7K_CPU_IRQ_BASE
+#define RM7K_CPU_IRQ_BASE (MIPS_CPU_IRQ_BASE+8)
+#endif
+#endif
+
+#endif /* CONFIG_IRQ_MIPS_CPU */
+
+#endif /* __ASM_MACH_GENERIC_IRQ_H */
diff --git a/arch/mips/include/asm/mach-generic/kernel-entry-init.h b/arch/mips/include/asm/mach-generic/kernel-entry-init.h
new file mode 100644
index 000000000..a229297c8
--- /dev/null
+++ b/arch/mips/include/asm/mach-generic/kernel-entry-init.h
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Embedded Alley Solutions, Inc
+ * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_MACH_GENERIC_KERNEL_ENTRY_H
+#define __ASM_MACH_GENERIC_KERNEL_ENTRY_H
+
+/* Intentionally empty macro, used in head.S. Override in
+ * arch/mips/mach-xxx/kernel-entry-init.h when necessary.
+ */
+ .macro kernel_entry_setup
+ .endm
+
+/*
+ * Do SMP slave processor setup necessary before we can safely execute C code.
+ */
+ .macro smp_slave_setup
+ .endm
+
+
+#endif /* __ASM_MACH_GENERIC_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-generic/kmalloc.h b/arch/mips/include/asm/mach-generic/kmalloc.h
new file mode 100644
index 000000000..649a98338
--- /dev/null
+++ b/arch/mips/include/asm/mach-generic/kmalloc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_GENERIC_KMALLOC_H
+#define __ASM_MACH_GENERIC_KMALLOC_H
+
+#ifdef CONFIG_DMA_NONCOHERENT
+/*
+ * Total overkill for most systems but need as a safe default.
+ * Set this one if any device in the system might do non-coherent DMA.
+ */
+#define ARCH_DMA_MINALIGN 128
+#endif
+
+#endif /* __ASM_MACH_GENERIC_KMALLOC_H */
diff --git a/arch/mips/include/asm/mach-generic/mangle-port.h b/arch/mips/include/asm/mach-generic/mangle-port.h
new file mode 100644
index 000000000..77c65c294
--- /dev/null
+++ b/arch/mips/include/asm/mach-generic/mangle-port.h
@@ -0,0 +1,52 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ */
+#ifndef __ASM_MACH_GENERIC_MANGLE_PORT_H
+#define __ASM_MACH_GENERIC_MANGLE_PORT_H
+
+#define __swizzle_addr_b(port) (port)
+#define __swizzle_addr_w(port) (port)
+#define __swizzle_addr_l(port) (port)
+#define __swizzle_addr_q(port) (port)
+
+/*
+ * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware;
+ * less sane hardware forces software to fiddle with this...
+ *
+ * Regardless, if the host bus endianness mismatches that of PCI/ISA, then
+ * you can't have the numerical value of data and byte addresses within
+ * multibyte quantities both preserved at the same time. Hence two
+ * variations of functions: non-prefixed ones that preserve the value
+ * and prefixed ones that preserve byte addresses. The latters are
+ * typically used for moving raw data between a peripheral and memory (cf.
+ * string I/O functions), hence the "__mem_" prefix.
+ */
+#if defined(CONFIG_SWAP_IO_SPACE)
+
+# define ioswabb(a, x) (x)
+# define __mem_ioswabb(a, x) (x)
+# define ioswabw(a, x) le16_to_cpu((__force __le16)(x))
+# define __mem_ioswabw(a, x) (x)
+# define ioswabl(a, x) le32_to_cpu((__force __le32)(x))
+# define __mem_ioswabl(a, x) (x)
+# define ioswabq(a, x) le64_to_cpu((__force __le64)(x))
+# define __mem_ioswabq(a, x) (x)
+
+#else
+
+# define ioswabb(a, x) (x)
+# define __mem_ioswabb(a, x) (x)
+# define ioswabw(a, x) (x)
+# define __mem_ioswabw(a, x) ((__force u16)cpu_to_le16(x))
+# define ioswabl(a, x) (x)
+# define __mem_ioswabl(a, x) ((__force u32)cpu_to_le32(x))
+# define ioswabq(a, x) (x)
+# define __mem_ioswabq(a, x) ((__force u64)cpu_to_le64(x))
+
+#endif
+
+#endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */
diff --git a/arch/mips/include/asm/mach-generic/mc146818rtc.h b/arch/mips/include/asm/mach-generic/mc146818rtc.h
new file mode 100644
index 000000000..9c72e540f
--- /dev/null
+++ b/arch/mips/include/asm/mach-generic/mc146818rtc.h
@@ -0,0 +1,36 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998, 2001, 03 by Ralf Baechle
+ *
+ * RTC routines for PC style attached Dallas chip.
+ */
+#ifndef __ASM_MACH_GENERIC_MC146818RTC_H
+#define __ASM_MACH_GENERIC_MC146818RTC_H
+
+#include <asm/io.h>
+
+#define RTC_PORT(x) (0x70 + (x))
+#define RTC_IRQ 8
+
+static inline unsigned char CMOS_READ(unsigned long addr)
+{
+ outb_p(addr, RTC_PORT(0));
+ return inb_p(RTC_PORT(1));
+}
+
+static inline void CMOS_WRITE(unsigned char data, unsigned long addr)
+{
+ outb_p(addr, RTC_PORT(0));
+ outb_p(data, RTC_PORT(1));
+}
+
+#define RTC_ALWAYS_BCD 0
+
+#ifndef mc146818_decode_year
+#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1900)
+#endif
+
+#endif /* __ASM_MACH_GENERIC_MC146818RTC_H */
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
new file mode 100644
index 000000000..c3ac06a6a
--- /dev/null
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -0,0 +1,110 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_MACH_GENERIC_SPACES_H
+#define _ASM_MACH_GENERIC_SPACES_H
+
+#include <linux/const.h>
+
+#include <asm/mipsregs.h>
+
+#ifndef IO_SPACE_LIMIT
+#define IO_SPACE_LIMIT 0xffff
+#endif
+
+/*
+ * This gives the physical RAM offset.
+ */
+#ifndef __ASSEMBLY__
+# if defined(CONFIG_MIPS_AUTO_PFN_OFFSET)
+# define PHYS_OFFSET ((unsigned long)PFN_PHYS(ARCH_PFN_OFFSET))
+# elif !defined(PHYS_OFFSET)
+# define PHYS_OFFSET _AC(0, UL)
+# endif
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_32BIT
+#ifdef CONFIG_KVM_GUEST
+#define CAC_BASE _AC(0x40000000, UL)
+#else
+#define CAC_BASE _AC(0x80000000, UL)
+#endif
+#ifndef IO_BASE
+#define IO_BASE _AC(0xa0000000, UL)
+#endif
+#ifndef UNCAC_BASE
+#define UNCAC_BASE _AC(0xa0000000, UL)
+#endif
+
+#ifndef MAP_BASE
+#ifdef CONFIG_KVM_GUEST
+#define MAP_BASE _AC(0x60000000, UL)
+#else
+#define MAP_BASE _AC(0xc0000000, UL)
+#endif
+#endif
+
+/*
+ * Memory above this physical address will be considered highmem.
+ */
+#ifndef HIGHMEM_START
+#define HIGHMEM_START _AC(0x20000000, UL)
+#endif
+
+#endif /* CONFIG_32BIT */
+
+#ifdef CONFIG_64BIT
+
+#ifndef CAC_BASE
+#define CAC_BASE PHYS_TO_XKPHYS(read_c0_config() & CONF_CM_CMASK, 0)
+#endif
+
+#ifndef IO_BASE
+#define IO_BASE _AC(0x9000000000000000, UL)
+#endif
+
+#ifndef UNCAC_BASE
+#define UNCAC_BASE _AC(0x9000000000000000, UL)
+#endif
+
+#ifndef MAP_BASE
+#define MAP_BASE _AC(0xc000000000000000, UL)
+#endif
+
+/*
+ * Memory above this physical address will be considered highmem.
+ * Fixme: 59 bits is a fictive number and makes assumptions about processors
+ * in the distant future. Nobody will care for a few years :-)
+ */
+#ifndef HIGHMEM_START
+#define HIGHMEM_START (_AC(1, UL) << _AC(59, UL))
+#endif
+
+#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK))
+#define TO_CAC(x) (CAC_BASE | ((x) & TO_PHYS_MASK))
+#define TO_UNCAC(x) (UNCAC_BASE | ((x) & TO_PHYS_MASK))
+
+#endif /* CONFIG_64BIT */
+
+/*
+ * This handles the memory map.
+ */
+#ifndef PAGE_OFFSET
+#define PAGE_OFFSET (CAC_BASE + PHYS_OFFSET)
+#endif
+
+#ifndef FIXADDR_TOP
+#ifdef CONFIG_KVM_GUEST
+#define FIXADDR_TOP ((unsigned long)(long)(int)0x7ffe0000)
+#else
+#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
+#endif
+#endif
+
+#endif /* __ASM_MACH_GENERIC_SPACES_H */
diff --git a/arch/mips/include/asm/mach-generic/topology.h b/arch/mips/include/asm/mach-generic/topology.h
new file mode 100644
index 000000000..5428f333a
--- /dev/null
+++ b/arch/mips/include/asm/mach-generic/topology.h
@@ -0,0 +1 @@
+#include <asm-generic/topology.h>
diff --git a/arch/mips/include/asm/mach-ingenic/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ingenic/cpu-feature-overrides.h
new file mode 100644
index 000000000..7c5e576f9
--- /dev/null
+++ b/arch/mips/include/asm/mach-ingenic/cpu-feature-overrides.h
@@ -0,0 +1,50 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+#ifndef __ASM_MACH_JZ4740_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_JZ4740_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_counter 0
+#define cpu_has_watch 1
+#define cpu_has_divec 1
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_prefetch 1
+#define cpu_has_mcheck 1
+#define cpu_has_ejtag 1
+#define cpu_has_llsc 1
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+#define kernel_uses_llsc 1
+#define cpu_has_vtag_icache 1
+#define cpu_has_dc_aliases 0
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_pindexed_dcache 0
+#define cpu_has_mips32r1 1
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+#define cpu_has_nofpuex 0
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_inclusive_pcaches 0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+
+#endif
diff --git a/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
new file mode 100644
index 000000000..b80d5eafc
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
@@ -0,0 +1,51 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 07 Ralf Baechle
+ */
+#ifndef __ASM_MACH_IP22_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_IP22_CPU_FEATURE_OVERRIDES_H
+
+#include <asm/cpu.h>
+
+/*
+ * IP22 with a variety of processors so we can't use defaults for everything.
+ */
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_4k_cache 1
+#define cpu_has_32fpr 1
+#define cpu_has_counter 1
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_divec 0
+#define cpu_has_cache_cdex_p 1
+#define cpu_has_prefetch 0
+#define cpu_has_mcheck 0
+#define cpu_has_ejtag 0
+
+#define cpu_has_llsc 1
+#define cpu_has_vtag_icache 0 /* Needs to change for R8000 */
+#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
+#define cpu_has_ic_fills_f_dc 0
+
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+
+#define cpu_has_nofpuex 0
+#define cpu_has_64bits 1
+
+#define cpu_has_mips_2 1
+#define cpu_has_mips_3 1
+#define cpu_has_mips_5 0
+
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#endif /* __ASM_MACH_IP22_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ip22/spaces.h b/arch/mips/include/asm/mach-ip22/spaces.h
new file mode 100644
index 000000000..24fe92cb5
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip22/spaces.h
@@ -0,0 +1,17 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_MACH_IP22_SPACES_H
+#define _ASM_MACH_IP22_SPACES_H
+
+#define PHYS_OFFSET _AC(0x08000000, UL)
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* __ASM_MACH_IP22_SPACES_H */
diff --git a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
new file mode 100644
index 000000000..79d6fd249
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
@@ -0,0 +1,76 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 07 Ralf Baechle
+ */
+#ifndef __ASM_MACH_IP27_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_IP27_CPU_FEATURE_OVERRIDES_H
+
+#include <asm/cpu.h>
+
+/*
+ * IP27 only comes with R1x000 family processors, all using the same config
+ */
+#define cpu_has_tlb 1
+#define cpu_has_tlbinv 0
+#define cpu_has_segments 0
+#define cpu_has_eva 0
+#define cpu_has_htw 0
+#define cpu_has_rixiex 0
+#define cpu_has_maar 0
+#define cpu_has_rw_llb 0
+#define cpu_has_3kex 0
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_nofpuex 0
+#define cpu_has_32fpr 1
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_64bits 1
+#define cpu_has_divec 0
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_prefetch 1
+#define cpu_has_mcheck 0
+#define cpu_has_ejtag 0
+#define cpu_has_llsc 1
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+#define cpu_has_rixi 0
+#define cpu_has_xpa 0
+#define cpu_has_vtag_icache 0
+#define cpu_has_dc_aliases 0
+#define cpu_has_ic_fills_f_dc 0
+
+#define cpu_icache_snoops_remote_store 1
+
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+#define cpu_has_mips32r6 0
+#define cpu_has_mips64r6 0
+
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+#define cpu_has_inclusive_pcaches 1
+#define cpu_has_perf_cntr_intr_bit 0
+#define cpu_has_vz 0
+#define cpu_has_fre 0
+#define cpu_has_cdmm 0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 64
+#define cpu_scache_line_size() 128
+
+#endif /* __ASM_MACH_IP27_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ip27/irq.h b/arch/mips/include/asm/mach-ip27/irq.h
new file mode 100644
index 000000000..f45d7999f
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip27/irq.h
@@ -0,0 +1,24 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999, 2000, 01, 02, 03 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2001 Kanoj Sarcar
+ */
+#ifndef __ASM_MACH_IP27_IRQ_H
+#define __ASM_MACH_IP27_IRQ_H
+
+#define NR_IRQS 256
+
+#include <asm/mach-generic/irq.h>
+
+#define IP27_HUB_PEND0_IRQ (MIPS_CPU_IRQ_BASE + 2)
+#define IP27_HUB_PEND1_IRQ (MIPS_CPU_IRQ_BASE + 3)
+#define IP27_RT_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 4)
+
+#define IP27_HUB_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8)
+#define IP27_HUB_IRQ_COUNT 128
+
+#endif /* __ASM_MACH_IP27_IRQ_H */
diff --git a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
new file mode 100644
index 000000000..3e54f605a
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
@@ -0,0 +1,96 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2005 Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __ASM_MACH_IP27_KERNEL_ENTRY_H
+#define __ASM_MACH_IP27_KERNEL_ENTRY_H
+
+#include <asm/sn/addrs.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/klkernvars.h>
+
+/*
+ * TLB bits
+ */
+#define PAGE_GLOBAL (1 << 6)
+#define PAGE_VALID (1 << 7)
+#define PAGE_DIRTY (1 << 8)
+#define CACHE_CACHABLE_COW (5 << 9)
+
+ /*
+ * inputs are the text nasid in t1, data nasid in t2.
+ */
+ .macro MAPPED_KERNEL_SETUP_TLB
+#ifdef CONFIG_MAPPED_KERNEL
+ /*
+ * This needs to read the nasid - assume 0 for now.
+ * Drop in 0xffffffffc0000000 in tlbhi, 0+VG in tlblo_0,
+ * 0+DVG in tlblo_1.
+ */
+ dli t0, 0xffffffffc0000000
+ dmtc0 t0, CP0_ENTRYHI
+ li t0, 0x1c000 # Offset of text into node memory
+ dsll t1, NASID_SHFT # Shift text nasid into place
+ dsll t2, NASID_SHFT # Same for data nasid
+ or t1, t1, t0 # Physical load address of kernel text
+ or t2, t2, t0 # Physical load address of kernel data
+ dsrl t1, 12 # 4K pfn
+ dsrl t2, 12 # 4K pfn
+ dsll t1, 6 # Get pfn into place
+ dsll t2, 6 # Get pfn into place
+ li t0, ((PAGE_GLOBAL | PAGE_VALID | CACHE_CACHABLE_COW) >> 6)
+ or t0, t0, t1
+ mtc0 t0, CP0_ENTRYLO0 # physaddr, VG, cach exlwr
+ li t0, ((PAGE_GLOBAL | PAGE_VALID | PAGE_DIRTY | CACHE_CACHABLE_COW) >> 6)
+ or t0, t0, t2
+ mtc0 t0, CP0_ENTRYLO1 # physaddr, DVG, cach exlwr
+ li t0, 0x1ffe000 # MAPPED_KERN_TLBMASK, TLBPGMASK_16M
+ mtc0 t0, CP0_PAGEMASK
+ li t0, 0 # KMAP_INX
+ mtc0 t0, CP0_INDEX
+ li t0, 1
+ mtc0 t0, CP0_WIRED
+ tlbwi
+#else
+ mtc0 zero, CP0_WIRED
+#endif
+ .endm
+
+/*
+ * Intentionally empty macro, used in head.S. Override in
+ * arch/mips/mach-xxx/kernel-entry-init.h when necessary.
+ */
+ .macro kernel_entry_setup
+ GET_NASID_ASM t1
+ move t2, t1 # text and data are here
+ MAPPED_KERNEL_SETUP_TLB
+ .endm
+
+/*
+ * Do SMP slave processor setup necessary before we can safely execute C code.
+ */
+ .macro smp_slave_setup
+ GET_NASID_ASM t1
+ dli t0, KLDIR_OFFSET + (KLI_KERN_VARS * KLDIR_ENT_SIZE) + \
+ KLDIR_OFF_POINTER + CAC_BASE
+ dsll t1, NASID_SHFT
+ or t0, t0, t1
+ ld t0, 0(t0) # t0 points to kern_vars struct
+ lh t1, KV_RO_NASID_OFFSET(t0)
+ lh t2, KV_RW_NASID_OFFSET(t0)
+ MAPPED_KERNEL_SETUP_TLB
+
+ /*
+ * We might not get launched at the address the kernel is linked to,
+ * so we jump there.
+ */
+ PTR_LA t0, 0f
+ jr t0
+0:
+ .endm
+
+#endif /* __ASM_MACH_IP27_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-ip27/mangle-port.h b/arch/mips/include/asm/mach-ip27/mangle-port.h
new file mode 100644
index 000000000..f71c38bbf
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip27/mangle-port.h
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ */
+#ifndef __ASM_MACH_IP27_MANGLE_PORT_H
+#define __ASM_MACH_IP27_MANGLE_PORT_H
+
+#define __swizzle_addr_b(port) ((port) ^ 3)
+#define __swizzle_addr_w(port) ((port) ^ 2)
+#define __swizzle_addr_l(port) (port)
+#define __swizzle_addr_q(port) (port)
+
+# define ioswabb(a, x) (x)
+# define __mem_ioswabb(a, x) (x)
+# define ioswabw(a, x) (x)
+# define __mem_ioswabw(a, x) ((__force u16)cpu_to_le16(x))
+# define ioswabl(a, x) (x)
+# define __mem_ioswabl(a, x) ((__force u32)cpu_to_le32(x))
+# define ioswabq(a, x) (x)
+# define __mem_ioswabq(a, x) ((__force u64)cpu_to_le64(x))
+
+#endif /* __ASM_MACH_IP27_MANGLE_PORT_H */
diff --git a/arch/mips/include/asm/mach-ip27/mmzone.h b/arch/mips/include/asm/mach-ip27/mmzone.h
new file mode 100644
index 000000000..08c36e50a
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip27/mmzone.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_MACH_MMZONE_H
+#define _ASM_MACH_MMZONE_H
+
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/klkernvars.h>
+
+#define pa_to_nid(addr) NASID_GET(addr)
+
+struct hub_data {
+ kern_vars_t kern_vars;
+ DECLARE_BITMAP(h_bigwin_used, HUB_NUM_BIG_WINDOW);
+ cpumask_t h_cpus;
+};
+
+struct node_data {
+ struct pglist_data pglist;
+ struct hub_data hub;
+};
+
+extern struct node_data *__node_data[];
+
+#define NODE_DATA(n) (&__node_data[(n)]->pglist)
+#define hub_data(n) (&__node_data[(n)]->hub)
+
+#endif /* _ASM_MACH_MMZONE_H */
diff --git a/arch/mips/include/asm/mach-ip27/spaces.h b/arch/mips/include/asm/mach-ip27/spaces.h
new file mode 100644
index 000000000..66421e9a6
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip27/spaces.h
@@ -0,0 +1,35 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 99 Ralf Baechle
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ * Copyright (C) 1990, 1999 by Silicon Graphics, Inc.
+ */
+#ifndef _ASM_MACH_IP27_SPACES_H
+#define _ASM_MACH_IP27_SPACES_H
+
+#include <linux/const.h>
+
+/*
+ * IP27 uses the R10000's uncached attribute feature. Attribute 3 selects
+ * uncached memory addressing. Hide the definitions on 32-bit compilation
+ * of the compat-vdso code.
+ */
+#ifdef CONFIG_64BIT
+#define HSPEC_BASE _AC(0x9000000000000000, UL)
+#define IO_BASE _AC(0x9200000000000000, UL)
+#define MSPEC_BASE _AC(0x9400000000000000, UL)
+#define UNCAC_BASE _AC(0x9600000000000000, UL)
+#define CAC_BASE _AC(0xa800000000000000, UL)
+#endif
+
+#define TO_MSPEC(x) (MSPEC_BASE | ((x) & TO_PHYS_MASK))
+#define TO_HSPEC(x) (HSPEC_BASE | ((x) & TO_PHYS_MASK))
+
+#define HIGHMEM_START (~0UL)
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* _ASM_MACH_IP27_SPACES_H */
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h
new file mode 100644
index 000000000..d66cc53fe
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip27/topology.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_MACH_TOPOLOGY_H
+#define _ASM_MACH_TOPOLOGY_H 1
+
+#include <asm/sn/types.h>
+#include <asm/mmzone.h>
+
+struct cpuinfo_ip27 {
+ nasid_t p_nasid; /* my node ID in numa-as-id-space */
+ unsigned short p_speed; /* cpu speed in MHz */
+ unsigned char p_slice; /* Physical position on node board */
+};
+
+extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
+
+#define cpu_to_node(cpu) (cputonasid(cpu))
+#define cpumask_of_node(node) ((node) == -1 ? \
+ cpu_all_mask : \
+ &hub_data(node)->h_cpus)
+struct pci_bus;
+extern int pcibus_to_node(struct pci_bus *);
+
+#define cpumask_of_pcibus(bus) (cpumask_of_node(pcibus_to_node(bus)))
+
+extern unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
+
+#define node_distance(from, to) (__node_distances[(from)][(to)])
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_MACH_TOPOLOGY_H */
diff --git a/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
new file mode 100644
index 000000000..613bbc10c
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
@@ -0,0 +1,54 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Ralf Baechle
+ * 6/2004 pf
+ */
+#ifndef __ASM_MACH_IP28_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_IP28_CPU_FEATURE_OVERRIDES_H
+
+#include <asm/cpu.h>
+
+/*
+ * IP28 only comes with R10000 family processors all using the same config
+ */
+#define cpu_has_watch 1
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_divec 0
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_prefetch 1
+#define cpu_has_mcheck 0
+#define cpu_has_ejtag 0
+
+#define cpu_has_llsc 0
+#define cpu_has_vtag_icache 0
+#define cpu_has_dc_aliases 0 /* see probe_pcache() */
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_icache_snoops_remote_store 1
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+
+#define cpu_has_nofpuex 0
+#define cpu_has_64bits 1
+
+#define cpu_has_4kex 1
+#define cpu_has_4k_cache 1
+
+#define cpu_has_inclusive_pcaches 1
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 64
+
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#endif /* __ASM_MACH_IP28_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ip28/spaces.h b/arch/mips/include/asm/mach-ip28/spaces.h
new file mode 100644
index 000000000..c4a912733
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip28/spaces.h
@@ -0,0 +1,18 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
+ * 2004 pf
+ */
+#ifndef _ASM_MACH_IP28_SPACES_H
+#define _ASM_MACH_IP28_SPACES_H
+
+#define PHYS_OFFSET _AC(0x20000000, UL)
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* _ASM_MACH_IP28_SPACES_H */
diff --git a/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h
new file mode 100644
index 000000000..2635b6ba1
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IP30/Octane cpu-features overrides.
+ *
+ * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
+ * 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
+ * 2009 Johannes Dickgreber <tanzy@gmx.de>
+ * 2015 Joshua Kinard <kumba@gentoo.org>
+ *
+ */
+#ifndef __ASM_MACH_IP30_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_IP30_CPU_FEATURE_OVERRIDES_H
+
+#include <asm/cpu.h>
+
+/*
+ * IP30 only supports R1[024]000 processors, all using the same config
+ */
+#define cpu_has_tlb 1
+#define cpu_has_tlbinv 0
+#define cpu_has_segments 0
+#define cpu_has_eva 0
+#define cpu_has_htw 0
+#define cpu_has_rixiex 0
+#define cpu_has_maar 0
+#define cpu_has_rw_llb 0
+#define cpu_has_3kex 0
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_nofpuex 0
+#define cpu_has_32fpr 1
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_64bits 1
+#define cpu_has_divec 0
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_prefetch 1
+#define cpu_has_mcheck 0
+#define cpu_has_ejtag 0
+#define cpu_has_llsc 1
+#define cpu_has_mips16 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+#define cpu_has_rixi 0
+#define cpu_has_xpa 0
+#define cpu_has_vtag_icache 0
+#define cpu_has_dc_aliases 0
+#define cpu_has_ic_fills_f_dc 0
+
+#define cpu_icache_snoops_remote_store 1
+
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+#define cpu_has_mips32r6 0
+#define cpu_has_mips64r6 0
+
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+#define cpu_has_inclusive_pcaches 1
+#define cpu_has_perf_cntr_intr_bit 0
+#define cpu_has_vz 0
+#define cpu_has_fre 0
+#define cpu_has_cdmm 0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 64
+#define cpu_scache_line_size() 128
+
+#endif /* __ASM_MACH_IP30_CPU_FEATURE_OVERRIDES_H */
+
diff --git a/arch/mips/include/asm/mach-ip30/kernel-entry-init.h b/arch/mips/include/asm/mach-ip30/kernel-entry-init.h
new file mode 100644
index 000000000..be0472c97
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip30/kernel-entry-init.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_MACH_IP30_KERNEL_ENTRY_H
+#define __ASM_MACH_IP30_KERNEL_ENTRY_H
+
+ .macro kernel_entry_setup
+ .endm
+
+ .macro smp_slave_setup
+ move gp, a0
+ .endm
+
+#endif /* __ASM_MACH_IP30_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-ip30/mangle-port.h b/arch/mips/include/asm/mach-ip30/mangle-port.h
new file mode 100644
index 000000000..439c6a601
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip30/mangle-port.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ */
+#ifndef __ASM_MACH_IP30_MANGLE_PORT_H
+#define __ASM_MACH_IP30_MANGLE_PORT_H
+
+#define __swizzle_addr_b(port) ((port)^3)
+#define __swizzle_addr_w(port) ((port)^2)
+#define __swizzle_addr_l(port) (port)
+#define __swizzle_addr_q(port) (port)
+
+#define ioswabb(a, x) (x)
+#define __mem_ioswabb(a, x) (x)
+#define ioswabw(a, x) (x)
+#define __mem_ioswabw(a, x) ((__force u16)cpu_to_le16(x))
+#define ioswabl(a, x) (x)
+#define __mem_ioswabl(a, x) ((__force u32)cpu_to_le32(x))
+#define ioswabq(a, x) (x)
+#define __mem_ioswabq(a, x) ((__force u64)cpu_to_le64(x))
+
+#endif /* __ASM_MACH_IP30_MANGLE_PORT_H */
diff --git a/arch/mips/include/asm/mach-ip30/spaces.h b/arch/mips/include/asm/mach-ip30/spaces.h
new file mode 100644
index 000000000..c8a302dfb
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip30/spaces.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2016 Joshua Kinard <kumba@gentoo.org>
+ *
+ */
+#ifndef _ASM_MACH_IP30_SPACES_H
+#define _ASM_MACH_IP30_SPACES_H
+
+/*
+ * Memory in IP30/Octane is offset 512MB in the physical address space.
+ */
+#define PHYS_OFFSET _AC(0x20000000, UL)
+
+#ifdef CONFIG_64BIT
+#define CAC_BASE _AC(0xA800000000000000, UL)
+#endif
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* _ASM_MACH_IP30_SPACES_H */
diff --git a/arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h
new file mode 100644
index 000000000..63b4c8890
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip32/cpu-feature-overrides.h
@@ -0,0 +1,51 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Ilya A. Volynets-Evenbakh
+ * Copyright (C) 2005, 07 Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_MACH_IP32_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_IP32_CPU_FEATURE_OVERRIDES_H
+
+
+/*
+ * R5000 has an interesting "restriction": ll(d)/sc(d)
+ * instructions to XKPHYS region simply do uncached bus
+ * requests. This breaks all the atomic bitops functions.
+ * so, for 64bit IP32 kernel we just don't use ll/sc.
+ * This does not affect luserland.
+ */
+#if (defined(CONFIG_CPU_R5000) || defined(CONFIG_CPU_NEVADA)) && defined(CONFIG_64BIT)
+#define cpu_has_llsc 0
+#else
+#define cpu_has_llsc 1
+#endif
+
+/* Settings which are common for all ip32 CPUs */
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_32fpr 1
+#define cpu_has_counter 1
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_mcheck 0
+#define cpu_has_ejtag 0
+#define cpu_has_vtag_icache 0
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_4k_cache 1
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+
+
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#endif /* __ASM_MACH_IP32_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ip32/kmalloc.h b/arch/mips/include/asm/mach-ip32/kmalloc.h
new file mode 100644
index 000000000..07a0146ea
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip32/kmalloc.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_IP32_KMALLOC_H
+#define __ASM_MACH_IP32_KMALLOC_H
+
+
+#if defined(CONFIG_CPU_R5000) || defined(CONFIG_CPU_RM7000)
+#define ARCH_DMA_MINALIGN 32
+#else
+#define ARCH_DMA_MINALIGN 128
+#endif
+
+#endif /* __ASM_MACH_IP32_KMALLOC_H */
diff --git a/arch/mips/include/asm/mach-ip32/mangle-port.h b/arch/mips/include/asm/mach-ip32/mangle-port.h
new file mode 100644
index 000000000..4bc3d20e8
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip32/mangle-port.h
@@ -0,0 +1,26 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Ladislav Michl
+ * Copyright (C) 2004 Ralf Baechle
+ */
+#ifndef __ASM_MACH_IP32_MANGLE_PORT_H
+#define __ASM_MACH_IP32_MANGLE_PORT_H
+
+#define __swizzle_addr_b(port) ((port) ^ 3)
+#define __swizzle_addr_w(port) ((port) ^ 2)
+#define __swizzle_addr_l(port) (port)
+#define __swizzle_addr_q(port) (port)
+
+# define ioswabb(a, x) (x)
+# define __mem_ioswabb(a, x) (x)
+# define ioswabw(a, x) (x)
+# define __mem_ioswabw(a, x) ((__force u16)cpu_to_le16(x))
+# define ioswabl(a, x) (x)
+# define __mem_ioswabl(a, x) ((__force u32)cpu_to_le32(x))
+# define ioswabq(a, x) (x)
+# define __mem_ioswabq(a, x) ((__force u64)cpu_to_le64(x))
+
+#endif /* __ASM_MACH_IP32_MANGLE_PORT_H */
diff --git a/arch/mips/include/asm/mach-jazz/floppy.h b/arch/mips/include/asm/mach-jazz/floppy.h
new file mode 100644
index 000000000..294ebb834
--- /dev/null
+++ b/arch/mips/include/asm/mach-jazz/floppy.h
@@ -0,0 +1,133 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998, 2003 by Ralf Baechle
+ */
+#ifndef __ASM_MACH_JAZZ_FLOPPY_H
+#define __ASM_MACH_JAZZ_FLOPPY_H
+
+#include <linux/delay.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <asm/addrspace.h>
+#include <asm/jazz.h>
+#include <asm/jazzdma.h>
+
+static inline unsigned char fd_inb(unsigned int base, unsigned int reg)
+{
+ unsigned char c;
+
+ c = *(volatile unsigned char *) (base + reg);
+ udelay(1);
+
+ return c;
+}
+
+static inline void fd_outb(unsigned char value, unsigned int base, unsigned int reg)
+{
+ *(volatile unsigned char *) (base + reg) = value;
+}
+
+/*
+ * How to access the floppy DMA functions.
+ */
+static inline void fd_enable_dma(void)
+{
+ vdma_enable(JAZZ_FLOPPY_DMA);
+}
+
+static inline void fd_disable_dma(void)
+{
+ vdma_disable(JAZZ_FLOPPY_DMA);
+}
+
+static inline int fd_request_dma(void)
+{
+ return 0;
+}
+
+static inline void fd_free_dma(void)
+{
+}
+
+static inline void fd_clear_dma_ff(void)
+{
+}
+
+static inline void fd_set_dma_mode(char mode)
+{
+ vdma_set_mode(JAZZ_FLOPPY_DMA, mode);
+}
+
+static inline void fd_set_dma_addr(char *a)
+{
+ vdma_set_addr(JAZZ_FLOPPY_DMA, vdma_phys2log(CPHYSADDR((unsigned long)a)));
+}
+
+static inline void fd_set_dma_count(unsigned int count)
+{
+ vdma_set_count(JAZZ_FLOPPY_DMA, count);
+}
+
+static inline int fd_get_dma_residue(void)
+{
+ return vdma_get_residue(JAZZ_FLOPPY_DMA);
+}
+
+static inline void fd_enable_irq(void)
+{
+}
+
+static inline void fd_disable_irq(void)
+{
+}
+
+static inline int fd_request_irq(void)
+{
+ return request_irq(FLOPPY_IRQ, floppy_interrupt,
+ 0, "floppy", NULL);
+}
+
+static inline void fd_free_irq(void)
+{
+ free_irq(FLOPPY_IRQ, NULL);
+}
+
+static inline unsigned long fd_getfdaddr1(void)
+{
+ return JAZZ_FDC_BASE;
+}
+
+static inline unsigned long fd_dma_mem_alloc(unsigned long size)
+{
+ unsigned long mem;
+
+ mem = __get_dma_pages(GFP_KERNEL, get_order(size));
+ if(!mem)
+ return 0;
+ vdma_alloc(CPHYSADDR(mem), size); /* XXX error checking */
+
+ return mem;
+}
+
+static inline void fd_dma_mem_free(unsigned long addr, unsigned long size)
+{
+ vdma_free(vdma_phys2log(CPHYSADDR(addr)));
+ free_pages(addr, get_order(size));
+}
+
+static inline unsigned long fd_drive_type(unsigned long n)
+{
+ /* XXX This is wrong for machines with ED 2.88mb disk drives like the
+ Olivetti M700. Anyway, we should suck this from the ARC
+ firmware. */
+ if (n == 0)
+ return 4; /* 3,5", 1.44mb */
+
+ return 0;
+}
+
+#endif /* __ASM_MACH_JAZZ_FLOPPY_H */
diff --git a/arch/mips/include/asm/mach-jazz/mc146818rtc.h b/arch/mips/include/asm/mach-jazz/mc146818rtc.h
new file mode 100644
index 000000000..987f727af
--- /dev/null
+++ b/arch/mips/include/asm/mach-jazz/mc146818rtc.h
@@ -0,0 +1,38 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998, 2001, 03 by Ralf Baechle
+ * Copyright (C) 2007 Thomas Bogendoerfer
+ *
+ * RTC routines for Jazz style attached Dallas chip.
+ */
+#ifndef __ASM_MACH_JAZZ_MC146818RTC_H
+#define __ASM_MACH_JAZZ_MC146818RTC_H
+
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/jazz.h>
+
+#define RTC_PORT(x) (0x70 + (x))
+#define RTC_IRQ 8
+
+static inline unsigned char CMOS_READ(unsigned long addr)
+{
+ outb_p(addr, RTC_PORT(0));
+ return *(volatile char *)JAZZ_RTC_BASE;
+}
+
+static inline void CMOS_WRITE(unsigned char data, unsigned long addr)
+{
+ outb_p(addr, RTC_PORT(0));
+ *(volatile char *)JAZZ_RTC_BASE = data;
+}
+
+#define RTC_ALWAYS_BCD 0
+
+#define mc146818_decode_year(year) ((year) + 1980)
+
+#endif /* __ASM_MACH_JAZZ_MC146818RTC_H */
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-lantiq/falcon/cpu-feature-overrides.h
new file mode 100644
index 000000000..10226976f
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/falcon/cpu-feature-overrides.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Lantiq FALCON specific CPU feature overrides
+ *
+ * Copyright (C) 2013 Thomas Langer, Lantiq Deutschland
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ */
+#ifndef __ASM_MACH_FALCON_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_FALCON_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_sb1_cache 0
+#define cpu_has_fpu 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_divec 1
+
+#define cpu_has_prefetch 1
+#define cpu_has_ejtag 1
+#define cpu_has_llsc 1
+
+#define cpu_has_mips16 1
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 1
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#define cpu_has_dsp 1
+#define cpu_has_mipsmt 1
+
+#define cpu_has_vint 1
+#define cpu_has_veic 1
+
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs 0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+
+#endif /* __ASM_MACH_FALCON_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
new file mode 100644
index 000000000..6eeda90f7
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#ifndef _FALCON_IRQ__
+#define _FALCON_IRQ__
+
+#define INT_NUM_IRQ0 8
+#define INT_NUM_IM0_IRL0 (INT_NUM_IRQ0 + 0)
+#define INT_NUM_IM1_IRL0 (INT_NUM_IM0_IRL0 + 32)
+#define INT_NUM_IM2_IRL0 (INT_NUM_IM1_IRL0 + 32)
+#define INT_NUM_IM3_IRL0 (INT_NUM_IM2_IRL0 + 32)
+#define INT_NUM_IM4_IRL0 (INT_NUM_IM3_IRL0 + 32)
+#define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32)
+#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
+
+#define MAX_IM 5
+
+#endif /* _FALCON_IRQ__ */
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/irq.h b/arch/mips/include/asm/mach-lantiq/falcon/irq.h
new file mode 100644
index 000000000..c14312fb0
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/falcon/irq.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#ifndef __FALCON_IRQ_H
+#define __FALCON_IRQ_H
+
+#include <falcon_irq.h>
+
+#define NR_IRQS 328
+
+#include <asm/mach-generic/irq.h>
+
+#endif
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
new file mode 100644
index 000000000..5855ba1bd
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+
+#ifndef _LTQ_FALCON_H__
+#define _LTQ_FALCON_H__
+
+#ifdef CONFIG_SOC_FALCON
+
+#include <linux/pinctrl/pinctrl.h>
+#include <lantiq.h>
+
+/* Chip IDs */
+#define SOC_ID_FALCON 0x01B8
+
+/* SoC Types */
+#define SOC_TYPE_FALCON 0x01
+
+/*
+ * during early_printk no ioremap possible at this early stage
+ * let's use KSEG1 instead
+ */
+#define LTQ_ASC0_BASE_ADDR 0x1E100C00
+#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC0_BASE_ADDR)
+
+/* WDT */
+#define LTQ_RST_CAUSE_WDTRST 0x0002
+
+/* CHIP ID */
+#define LTQ_STATUS_BASE_ADDR 0x1E802000
+
+#define FALCON_CHIPID ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x0c))
+#define FALCON_CHIPTYPE ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x38))
+#define FALCON_CHIPCONF ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x40))
+
+/* SYSCTL - start/stop/restart/configure/... different parts of the Soc */
+#define SYSCTL_SYS1 0
+#define SYSCTL_SYSETH 1
+#define SYSCTL_SYSGPE 2
+
+/* BOOT_SEL - find what boot media we have */
+#define BS_FLASH 0x1
+#define BS_SPI 0x4
+
+/* global register ranges */
+extern __iomem void *ltq_ebu_membase;
+extern __iomem void *ltq_sys1_membase;
+#define ltq_ebu_w32(x, y) ltq_w32((x), ltq_ebu_membase + (y))
+#define ltq_ebu_r32(x) ltq_r32(ltq_ebu_membase + (x))
+
+#define ltq_sys1_w32(x, y) ltq_w32((x), ltq_sys1_membase + (y))
+#define ltq_sys1_r32(x) ltq_r32(ltq_sys1_membase + (x))
+#define ltq_sys1_w32_mask(clear, set, reg) \
+ ltq_sys1_w32((ltq_sys1_r32(reg) & ~(clear)) | (set), reg)
+
+/* allow the gpio and pinctrl drivers to talk to eachother */
+extern int pinctrl_falcon_get_range_size(int id);
+extern void pinctrl_falcon_add_gpio_range(struct pinctrl_gpio_range *range);
+
+/*
+ * to keep the irq code generic we need to define this to 0 as falcon
+ * has no EIU/EBU
+ */
+#define LTQ_EBU_PCC_ISTAT 0
+
+#endif /* CONFIG_SOC_FALCON */
+#endif /* _LTQ_XWAY_H__ */
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h
new file mode 100644
index 000000000..6ceb0287d
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/lantiq.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+#ifndef _LANTIQ_H__
+#define _LANTIQ_H__
+
+#include <linux/irq.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+
+/* generic reg access functions */
+#define ltq_r32(reg) __raw_readl(reg)
+#define ltq_w32(val, reg) __raw_writel(val, reg)
+#define ltq_w32_mask(clear, set, reg) \
+ ltq_w32((ltq_r32(reg) & ~(clear)) | (set), reg)
+#define ltq_r8(reg) __raw_readb(reg)
+#define ltq_w8(val, reg) __raw_writeb(val, reg)
+
+/* register access macros for EBU and CGU */
+#define ltq_ebu_w32(x, y) ltq_w32((x), ltq_ebu_membase + (y))
+#define ltq_ebu_r32(x) ltq_r32(ltq_ebu_membase + (x))
+#define ltq_ebu_w32_mask(x, y, z) \
+ ltq_w32_mask(x, y, ltq_ebu_membase + (z))
+extern __iomem void *ltq_ebu_membase;
+
+/* spinlock all ebu i/o */
+extern spinlock_t ebu_lock;
+
+/* some irq helpers */
+extern void ltq_disable_irq(struct irq_data *data);
+extern void ltq_mask_and_ack_irq(struct irq_data *data);
+extern void ltq_enable_irq(struct irq_data *data);
+extern int ltq_eiu_get_irq(int exin);
+
+/* clock handling */
+extern int clk_activate(struct clk *clk);
+extern void clk_deactivate(struct clk *clk);
+extern struct clk *clk_get_cpu(void);
+extern struct clk *clk_get_fpi(void);
+extern struct clk *clk_get_io(void);
+extern struct clk *clk_get_ppe(void);
+
+/* find out what bootsource we have */
+extern unsigned char ltq_boot_select(void);
+/* find out the soc type */
+extern int ltq_soc_type(void);
+
+#define IOPORT_RESOURCE_START 0x10000000
+#define IOPORT_RESOURCE_END 0xffffffff
+#define IOMEM_RESOURCE_START 0x10000000
+#define IOMEM_RESOURCE_END 0xffffffff
+
+#endif
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
new file mode 100644
index 000000000..70ebb4d6f
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+
+#ifndef _LANTIQ_PLATFORM_H__
+#define _LANTIQ_PLATFORM_H__
+
+#include <linux/socket.h>
+
+/* struct used to pass info to network drivers */
+struct ltq_eth_data {
+ struct sockaddr mac;
+ int mii_mode;
+};
+
+#endif
diff --git a/arch/mips/include/asm/mach-lantiq/xway/irq.h b/arch/mips/include/asm/mach-lantiq/xway/irq.h
new file mode 100644
index 000000000..2980e7771
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/xway/irq.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+
+#ifndef __LANTIQ_IRQ_H
+#define __LANTIQ_IRQ_H
+
+#include <lantiq_irq.h>
+
+#define NR_IRQS 256
+
+#include <asm/mach-generic/irq.h>
+
+#endif
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
new file mode 100644
index 000000000..5f0d0ba99
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+
+#ifndef _LANTIQ_XWAY_IRQ_H__
+#define _LANTIQ_XWAY_IRQ_H__
+
+#define INT_NUM_IRQ0 8
+#define INT_NUM_IM0_IRL0 (INT_NUM_IRQ0 + 0)
+#define INT_NUM_IM1_IRL0 (INT_NUM_IRQ0 + 32)
+#define INT_NUM_IM2_IRL0 (INT_NUM_IRQ0 + 64)
+#define INT_NUM_IM3_IRL0 (INT_NUM_IRQ0 + 96)
+#define INT_NUM_IM4_IRL0 (INT_NUM_IRQ0 + 128)
+#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
+
+#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0)
+
+#define MAX_IM 5
+
+#endif
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
new file mode 100644
index 000000000..4790cfa19
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+
+#ifndef _LTQ_XWAY_H__
+#define _LTQ_XWAY_H__
+
+#ifdef CONFIG_SOC_TYPE_XWAY
+
+#include <lantiq.h>
+
+/* Chip IDs */
+#define SOC_ID_DANUBE1 0x129
+#define SOC_ID_DANUBE2 0x12B
+#define SOC_ID_TWINPASS 0x12D
+#define SOC_ID_AMAZON_SE_1 0x152 /* 50601 */
+#define SOC_ID_AMAZON_SE_2 0x153 /* 50600 */
+#define SOC_ID_ARX188 0x16C
+#define SOC_ID_ARX168_1 0x16D
+#define SOC_ID_ARX168_2 0x16E
+#define SOC_ID_ARX182 0x16F
+#define SOC_ID_GRX188 0x170
+#define SOC_ID_GRX168 0x171
+
+#define SOC_ID_VRX288 0x1C0 /* v1.1 */
+#define SOC_ID_VRX282 0x1C1 /* v1.1 */
+#define SOC_ID_VRX268 0x1C2 /* v1.1 */
+#define SOC_ID_GRX268 0x1C8 /* v1.1 */
+#define SOC_ID_GRX288 0x1C9 /* v1.1 */
+#define SOC_ID_VRX288_2 0x00B /* v1.2 */
+#define SOC_ID_VRX268_2 0x00C /* v1.2 */
+#define SOC_ID_GRX288_2 0x00D /* v1.2 */
+#define SOC_ID_GRX282_2 0x00E /* v1.2 */
+#define SOC_ID_VRX220 0x000
+
+#define SOC_ID_ARX362 0x004
+#define SOC_ID_ARX368 0x005
+#define SOC_ID_ARX382 0x007
+#define SOC_ID_ARX388 0x008
+#define SOC_ID_URX388 0x009
+#define SOC_ID_GRX383 0x010
+#define SOC_ID_GRX369 0x011
+#define SOC_ID_GRX387 0x00F
+#define SOC_ID_GRX389 0x012
+
+ /* SoC Types */
+#define SOC_TYPE_DANUBE 0x01
+#define SOC_TYPE_TWINPASS 0x02
+#define SOC_TYPE_AR9 0x03
+#define SOC_TYPE_VR9 0x04 /* v1.1 */
+#define SOC_TYPE_VR9_2 0x05 /* v1.2 */
+#define SOC_TYPE_AMAZON_SE 0x06
+#define SOC_TYPE_AR10 0x07
+#define SOC_TYPE_GRX390 0x08
+#define SOC_TYPE_VRX220 0x09
+
+/* BOOT_SEL - find what boot media we have */
+#define BS_EXT_ROM 0x0
+#define BS_FLASH 0x1
+#define BS_MII0 0x2
+#define BS_PCI 0x3
+#define BS_UART1 0x4
+#define BS_SPI 0x5
+#define BS_NAND 0x6
+#define BS_RMII0 0x7
+
+/* helpers used to access the cgu */
+#define ltq_cgu_w32(x, y) ltq_w32((x), ltq_cgu_membase + (y))
+#define ltq_cgu_r32(x) ltq_r32(ltq_cgu_membase + (x))
+extern __iomem void *ltq_cgu_membase;
+
+/*
+ * during early_printk no ioremap is possible
+ * let's use KSEG1 instead
+ */
+#define LTQ_ASC1_BASE_ADDR 0x1E100C00
+#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC1_BASE_ADDR)
+
+/* EBU - external bus unit */
+#define LTQ_EBU_BUSCON0 0x0060
+#define LTQ_EBU_PCC_CON 0x0090
+#define LTQ_EBU_PCC_IEN 0x00A4
+#define LTQ_EBU_PCC_ISTAT 0x00A0
+#define LTQ_EBU_BUSCON1 0x0064
+#define LTQ_EBU_ADDRSEL1 0x0024
+#define EBU_WRDIS 0x80000000
+
+/* WDT */
+#define LTQ_RST_CAUSE_WDTRST 0x20
+
+/* MPS - multi processor unit (voice) */
+#define LTQ_MPS_BASE_ADDR (KSEG1 + 0x1F107000)
+#define LTQ_MPS_CHIPID ((u32 *)(LTQ_MPS_BASE_ADDR + 0x0344))
+
+/* allow booting xrx200 phys */
+int xrx200_gphy_boot(struct device *dev, unsigned int id, dma_addr_t dev_addr);
+
+/* request a non-gpio and set the PIO config */
+#define PMU_PPE BIT(13)
+extern void ltq_pmu_enable(unsigned int module);
+extern void ltq_pmu_disable(unsigned int module);
+
+#endif /* CONFIG_SOC_TYPE_XWAY */
+#endif /* _LTQ_XWAY_H__ */
diff --git a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
new file mode 100644
index 000000000..8218a1356
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2011 John Crispin <john@phrozen.org>
+ */
+
+#ifndef LTQ_DMA_H__
+#define LTQ_DMA_H__
+
+#define LTQ_DESC_SIZE 0x08 /* each descriptor is 64bit */
+#define LTQ_DESC_NUM 0x40 /* 64 descriptors / channel */
+
+#define LTQ_DMA_OWN BIT(31) /* owner bit */
+#define LTQ_DMA_C BIT(30) /* complete bit */
+#define LTQ_DMA_SOP BIT(29) /* start of packet */
+#define LTQ_DMA_EOP BIT(28) /* end of packet */
+#define LTQ_DMA_TX_OFFSET(x) ((x & 0x1f) << 23) /* data bytes offset */
+#define LTQ_DMA_RX_OFFSET(x) ((x & 0x7) << 23) /* data bytes offset */
+#define LTQ_DMA_SIZE_MASK (0xffff) /* the size field is 16 bit */
+
+struct ltq_dma_desc {
+ u32 ctl;
+ u32 addr;
+};
+
+struct ltq_dma_channel {
+ int nr; /* the channel number */
+ int irq; /* the mapped irq */
+ int desc; /* the current descriptor */
+ struct ltq_dma_desc *desc_base; /* the descriptor base */
+ int phys; /* physical addr */
+ struct device *dev;
+};
+
+enum {
+ DMA_PORT_ETOP = 0,
+ DMA_PORT_DEU,
+};
+
+extern void ltq_dma_enable_irq(struct ltq_dma_channel *ch);
+extern void ltq_dma_disable_irq(struct ltq_dma_channel *ch);
+extern void ltq_dma_ack_irq(struct ltq_dma_channel *ch);
+extern void ltq_dma_open(struct ltq_dma_channel *ch);
+extern void ltq_dma_close(struct ltq_dma_channel *ch);
+extern void ltq_dma_alloc_tx(struct ltq_dma_channel *ch);
+extern void ltq_dma_alloc_rx(struct ltq_dma_channel *ch);
+extern void ltq_dma_free(struct ltq_dma_channel *ch);
+extern void ltq_dma_init_port(int p);
+
+#endif
diff --git a/arch/mips/include/asm/mach-loongson2ef/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson2ef/cpu-feature-overrides.h
new file mode 100644
index 000000000..b2ee859ca
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/cpu-feature-overrides.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2009 Wu Zhangjin <wuzhangjin@gmail.com>
+ * Copyright (C) 2009 Philippe Vachon <philippe@cowpig.ca>
+ * Copyright (C) 2009 Zhang Le <r0bertz@gentoo.org>
+ *
+ * reference: /proc/cpuinfo,
+ * arch/mips/kernel/cpu-probe.c(cpu_probe_legacy),
+ * arch/mips/kernel/proc.c(show_cpuinfo),
+ * loongson2f user manual.
+ */
+
+#ifndef __ASM_MACH_LOONGSON2EF_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_LOONGSON2EF_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_32fpr 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_4kex 1
+#define cpu_has_64bits 1
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_counter 1
+#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
+#define cpu_has_divec 0
+#define cpu_has_ejtag 0
+#define cpu_has_inclusive_pcaches 1
+#define cpu_has_llsc 1
+#define cpu_has_mcheck 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_mips3d 0
+#define cpu_has_mipsmt 0
+#define cpu_has_smartmips 0
+#define cpu_has_tlb 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_vce 0
+#define cpu_has_veic 0
+#define cpu_has_vint 0
+#define cpu_has_vtag_icache 0
+#define cpu_has_watch 1
+
+#endif /* __ASM_MACH_LOONGSON64_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536.h b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536.h
new file mode 100644
index 000000000..9795b3361
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The header file of cs5536 south bridge.
+ *
+ * Copyright (C) 2007 Lemote, Inc.
+ * Author : jlliu <liujl@lemote.com>
+ */
+
+#ifndef _CS5536_H
+#define _CS5536_H
+
+#include <linux/types.h>
+
+extern void _rdmsr(u32 msr, u32 *hi, u32 *lo);
+extern void _wrmsr(u32 msr, u32 hi, u32 lo);
+
+/*
+ * MSR module base
+ */
+#define CS5536_SB_MSR_BASE (0x00000000)
+#define CS5536_GLIU_MSR_BASE (0x10000000)
+#define CS5536_ILLEGAL_MSR_BASE (0x20000000)
+#define CS5536_USB_MSR_BASE (0x40000000)
+#define CS5536_IDE_MSR_BASE (0x60000000)
+#define CS5536_DIVIL_MSR_BASE (0x80000000)
+#define CS5536_ACC_MSR_BASE (0xa0000000)
+#define CS5536_UNUSED_MSR_BASE (0xc0000000)
+#define CS5536_GLCP_MSR_BASE (0xe0000000)
+
+#define SB_MSR_REG(offset) (CS5536_SB_MSR_BASE | (offset))
+#define GLIU_MSR_REG(offset) (CS5536_GLIU_MSR_BASE | (offset))
+#define ILLEGAL_MSR_REG(offset) (CS5536_ILLEGAL_MSR_BASE | (offset))
+#define USB_MSR_REG(offset) (CS5536_USB_MSR_BASE | (offset))
+#define IDE_MSR_REG(offset) (CS5536_IDE_MSR_BASE | (offset))
+#define DIVIL_MSR_REG(offset) (CS5536_DIVIL_MSR_BASE | (offset))
+#define ACC_MSR_REG(offset) (CS5536_ACC_MSR_BASE | (offset))
+#define UNUSED_MSR_REG(offset) (CS5536_UNUSED_MSR_BASE | (offset))
+#define GLCP_MSR_REG(offset) (CS5536_GLCP_MSR_BASE | (offset))
+
+/*
+ * BAR SPACE OF VIRTUAL PCI :
+ * range for pci probe use, length is the actual size.
+ */
+/* IO space for all DIVIL modules */
+#define CS5536_IRQ_RANGE 0xffffffe0 /* USERD FOR PCI PROBE */
+#define CS5536_IRQ_LENGTH 0x20 /* THE REGS ACTUAL LENGTH */
+#define CS5536_SMB_RANGE 0xfffffff8
+#define CS5536_SMB_LENGTH 0x08
+#define CS5536_GPIO_RANGE 0xffffff00
+#define CS5536_GPIO_LENGTH 0x100
+#define CS5536_MFGPT_RANGE 0xffffffc0
+#define CS5536_MFGPT_LENGTH 0x40
+#define CS5536_ACPI_RANGE 0xffffffe0
+#define CS5536_ACPI_LENGTH 0x20
+#define CS5536_PMS_RANGE 0xffffff80
+#define CS5536_PMS_LENGTH 0x80
+/* IO space for IDE */
+#define CS5536_IDE_RANGE 0xfffffff0
+#define CS5536_IDE_LENGTH 0x10
+/* IO space for ACC */
+#define CS5536_ACC_RANGE 0xffffff80
+#define CS5536_ACC_LENGTH 0x80
+/* MEM space for ALL USB modules */
+#define CS5536_OHCI_RANGE 0xfffff000
+#define CS5536_OHCI_LENGTH 0x1000
+#define CS5536_EHCI_RANGE 0xfffff000
+#define CS5536_EHCI_LENGTH 0x1000
+
+/*
+ * PCI MSR ACCESS
+ */
+#define PCI_MSR_CTRL 0xF0
+#define PCI_MSR_ADDR 0xF4
+#define PCI_MSR_DATA_LO 0xF8
+#define PCI_MSR_DATA_HI 0xFC
+
+/**************** MSR *****************************/
+
+/*
+ * GLIU STANDARD MSR
+ */
+#define GLIU_CAP 0x00
+#define GLIU_CONFIG 0x01
+#define GLIU_SMI 0x02
+#define GLIU_ERROR 0x03
+#define GLIU_PM 0x04
+#define GLIU_DIAG 0x05
+
+/*
+ * GLIU SPEC. MSR
+ */
+#define GLIU_P2D_BM0 0x20
+#define GLIU_P2D_BM1 0x21
+#define GLIU_P2D_BM2 0x22
+#define GLIU_P2D_BMK0 0x23
+#define GLIU_P2D_BMK1 0x24
+#define GLIU_P2D_BM3 0x25
+#define GLIU_P2D_BM4 0x26
+#define GLIU_COH 0x80
+#define GLIU_PAE 0x81
+#define GLIU_ARB 0x82
+#define GLIU_ASMI 0x83
+#define GLIU_AERR 0x84
+#define GLIU_DEBUG 0x85
+#define GLIU_PHY_CAP 0x86
+#define GLIU_NOUT_RESP 0x87
+#define GLIU_NOUT_WDATA 0x88
+#define GLIU_WHOAMI 0x8B
+#define GLIU_SLV_DIS 0x8C
+#define GLIU_IOD_BM0 0xE0
+#define GLIU_IOD_BM1 0xE1
+#define GLIU_IOD_BM2 0xE2
+#define GLIU_IOD_BM3 0xE3
+#define GLIU_IOD_BM4 0xE4
+#define GLIU_IOD_BM5 0xE5
+#define GLIU_IOD_BM6 0xE6
+#define GLIU_IOD_BM7 0xE7
+#define GLIU_IOD_BM8 0xE8
+#define GLIU_IOD_BM9 0xE9
+#define GLIU_IOD_SC0 0xEA
+#define GLIU_IOD_SC1 0xEB
+#define GLIU_IOD_SC2 0xEC
+#define GLIU_IOD_SC3 0xED
+#define GLIU_IOD_SC4 0xEE
+#define GLIU_IOD_SC5 0xEF
+#define GLIU_IOD_SC6 0xF0
+#define GLIU_IOD_SC7 0xF1
+
+/*
+ * SB STANDARD
+ */
+#define SB_CAP 0x00
+#define SB_CONFIG 0x01
+#define SB_SMI 0x02
+#define SB_ERROR 0x03
+#define SB_MAR_ERR_EN 0x00000001
+#define SB_TAR_ERR_EN 0x00000002
+#define SB_RSVD_BIT1 0x00000004
+#define SB_EXCEP_ERR_EN 0x00000008
+#define SB_SYSE_ERR_EN 0x00000010
+#define SB_PARE_ERR_EN 0x00000020
+#define SB_TAS_ERR_EN 0x00000040
+#define SB_MAR_ERR_FLAG 0x00010000
+#define SB_TAR_ERR_FLAG 0x00020000
+#define SB_RSVD_BIT2 0x00040000
+#define SB_EXCEP_ERR_FLAG 0x00080000
+#define SB_SYSE_ERR_FLAG 0x00100000
+#define SB_PARE_ERR_FLAG 0x00200000
+#define SB_TAS_ERR_FLAG 0x00400000
+#define SB_PM 0x04
+#define SB_DIAG 0x05
+
+/*
+ * SB SPEC.
+ */
+#define SB_CTRL 0x10
+#define SB_R0 0x20
+#define SB_R1 0x21
+#define SB_R2 0x22
+#define SB_R3 0x23
+#define SB_R4 0x24
+#define SB_R5 0x25
+#define SB_R6 0x26
+#define SB_R7 0x27
+#define SB_R8 0x28
+#define SB_R9 0x29
+#define SB_R10 0x2A
+#define SB_R11 0x2B
+#define SB_R12 0x2C
+#define SB_R13 0x2D
+#define SB_R14 0x2E
+#define SB_R15 0x2F
+
+/*
+ * GLCP STANDARD
+ */
+#define GLCP_CAP 0x00
+#define GLCP_CONFIG 0x01
+#define GLCP_SMI 0x02
+#define GLCP_ERROR 0x03
+#define GLCP_PM 0x04
+#define GLCP_DIAG 0x05
+
+/*
+ * GLCP SPEC.
+ */
+#define GLCP_CLK_DIS_DELAY 0x08
+#define GLCP_PM_CLK_DISABLE 0x09
+#define GLCP_GLB_PM 0x0B
+#define GLCP_DBG_OUT 0x0C
+#define GLCP_RSVD1 0x0D
+#define GLCP_SOFT_COM 0x0E
+#define SOFT_BAR_SMB_FLAG 0x00000001
+#define SOFT_BAR_GPIO_FLAG 0x00000002
+#define SOFT_BAR_MFGPT_FLAG 0x00000004
+#define SOFT_BAR_IRQ_FLAG 0x00000008
+#define SOFT_BAR_PMS_FLAG 0x00000010
+#define SOFT_BAR_ACPI_FLAG 0x00000020
+#define SOFT_BAR_IDE_FLAG 0x00000400
+#define SOFT_BAR_ACC_FLAG 0x00000800
+#define SOFT_BAR_OHCI_FLAG 0x00001000
+#define SOFT_BAR_EHCI_FLAG 0x00002000
+#define GLCP_RSVD2 0x0F
+#define GLCP_CLK_OFF 0x10
+#define GLCP_CLK_ACTIVE 0x11
+#define GLCP_CLK_DISABLE 0x12
+#define GLCP_CLK4ACK 0x13
+#define GLCP_SYS_RST 0x14
+#define GLCP_RSVD3 0x15
+#define GLCP_DBG_CLK_CTRL 0x16
+#define GLCP_CHIP_REV_ID 0x17
+
+/* PIC */
+#define PIC_YSEL_LOW 0x20
+#define PIC_YSEL_LOW_USB_SHIFT 8
+#define PIC_YSEL_LOW_ACC_SHIFT 16
+#define PIC_YSEL_LOW_FLASH_SHIFT 24
+#define PIC_YSEL_HIGH 0x21
+#define PIC_ZSEL_LOW 0x22
+#define PIC_ZSEL_HIGH 0x23
+#define PIC_IRQM_PRIM 0x24
+#define PIC_IRQM_LPC 0x25
+#define PIC_XIRR_STS_LOW 0x26
+#define PIC_XIRR_STS_HIGH 0x27
+#define PCI_SHDW 0x34
+
+/*
+ * DIVIL STANDARD
+ */
+#define DIVIL_CAP 0x00
+#define DIVIL_CONFIG 0x01
+#define DIVIL_SMI 0x02
+#define DIVIL_ERROR 0x03
+#define DIVIL_PM 0x04
+#define DIVIL_DIAG 0x05
+
+/*
+ * DIVIL SPEC.
+ */
+#define DIVIL_LBAR_IRQ 0x08
+#define DIVIL_LBAR_KEL 0x09
+#define DIVIL_LBAR_SMB 0x0B
+#define DIVIL_LBAR_GPIO 0x0C
+#define DIVIL_LBAR_MFGPT 0x0D
+#define DIVIL_LBAR_ACPI 0x0E
+#define DIVIL_LBAR_PMS 0x0F
+#define DIVIL_LEG_IO 0x14
+#define DIVIL_BALL_OPTS 0x15
+#define DIVIL_SOFT_IRQ 0x16
+#define DIVIL_SOFT_RESET 0x17
+
+/* MFGPT */
+#define MFGPT_IRQ 0x28
+
+/*
+ * IDE STANDARD
+ */
+#define IDE_CAP 0x00
+#define IDE_CONFIG 0x01
+#define IDE_SMI 0x02
+#define IDE_ERROR 0x03
+#define IDE_PM 0x04
+#define IDE_DIAG 0x05
+
+/*
+ * IDE SPEC.
+ */
+#define IDE_IO_BAR 0x08
+#define IDE_CFG 0x10
+#define IDE_DTC 0x12
+#define IDE_CAST 0x13
+#define IDE_ETC 0x14
+#define IDE_INTERNAL_PM 0x15
+
+/*
+ * ACC STANDARD
+ */
+#define ACC_CAP 0x00
+#define ACC_CONFIG 0x01
+#define ACC_SMI 0x02
+#define ACC_ERROR 0x03
+#define ACC_PM 0x04
+#define ACC_DIAG 0x05
+
+/*
+ * USB STANDARD
+ */
+#define USB_CAP 0x00
+#define USB_CONFIG 0x01
+#define USB_SMI 0x02
+#define USB_ERROR 0x03
+#define USB_PM 0x04
+#define USB_DIAG 0x05
+
+/*
+ * USB SPEC.
+ */
+#define USB_OHCI 0x08
+#define USB_EHCI 0x09
+
+/****************** NATIVE ***************************/
+/* GPIO : I/O SPACE; REG : 32BITS */
+#define GPIOL_OUT_VAL 0x00
+#define GPIOL_OUT_EN 0x04
+
+#endif /* _CS5536_H */
diff --git a/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_mfgpt.h b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_mfgpt.h
new file mode 100644
index 000000000..52e8bb0fc
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_mfgpt.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * cs5536 mfgpt header file
+ */
+
+#ifndef _CS5536_MFGPT_H
+#define _CS5536_MFGPT_H
+
+#include <cs5536/cs5536.h>
+#include <cs5536/cs5536_pci.h>
+
+#ifdef CONFIG_CS5536_MFGPT
+extern void setup_mfgpt0_timer(void);
+extern void disable_mfgpt0_counter(void);
+extern void enable_mfgpt0_counter(void);
+#else
+static inline void __maybe_unused setup_mfgpt0_timer(void)
+{
+}
+static inline void __maybe_unused disable_mfgpt0_counter(void)
+{
+}
+static inline void __maybe_unused enable_mfgpt0_counter(void)
+{
+}
+#endif
+
+#define MFGPT_TICK_RATE 14318000
+#define COMPARE ((MFGPT_TICK_RATE + HZ/2) / HZ)
+
+#define MFGPT_BASE mfgpt_base
+#define MFGPT0_CMP2 (MFGPT_BASE + 2)
+#define MFGPT0_CNT (MFGPT_BASE + 4)
+#define MFGPT0_SETUP (MFGPT_BASE + 6)
+
+#endif /*!_CS5536_MFGPT_H */
diff --git a/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_pci.h b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_pci.h
new file mode 100644
index 000000000..a0d4b7528
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_pci.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * the definition file of cs5536 Virtual Support Module(VSM).
+ * pci configuration space can be accessed through the VSM, so
+ * there is no need of the MSR read/write now, except the spec.
+ * MSR registers which are not implemented yet.
+ *
+ * Copyright (C) 2007 Lemote Inc.
+ * Author : jlliu, liujl@lemote.com
+ */
+
+#ifndef _CS5536_PCI_H
+#define _CS5536_PCI_H
+
+#include <linux/types.h>
+#include <linux/pci_regs.h>
+
+extern void cs5536_pci_conf_write4(int function, int reg, u32 value);
+extern u32 cs5536_pci_conf_read4(int function, int reg);
+
+#define CS5536_ACC_INTR 9
+#define CS5536_IDE_INTR 14
+#define CS5536_USB_INTR 11
+#define CS5536_MFGPT_INTR 5
+#define CS5536_UART1_INTR 4
+#define CS5536_UART2_INTR 3
+
+/************** PCI BUS DEVICE FUNCTION ***************/
+
+/*
+ * PCI bus device function
+ */
+#define PCI_BUS_CS5536 0
+#define PCI_IDSEL_CS5536 14
+
+/********** STANDARD PCI-2.2 EXPANSION ****************/
+
+/*
+ * PCI configuration space
+ * we have to virtualize the PCI configure space head, so we should
+ * define the necessary IDs and some others.
+ */
+
+/* CONFIG of PCI VENDOR ID*/
+#define CFG_PCI_VENDOR_ID(mod_dev_id, sys_vendor_id) \
+ (((mod_dev_id) << 16) | (sys_vendor_id))
+
+/* VENDOR ID */
+#define CS5536_VENDOR_ID 0x1022
+
+/* DEVICE ID */
+#define CS5536_ISA_DEVICE_ID 0x2090
+#define CS5536_IDE_DEVICE_ID 0x209a
+#define CS5536_ACC_DEVICE_ID 0x2093
+#define CS5536_OHCI_DEVICE_ID 0x2094
+#define CS5536_EHCI_DEVICE_ID 0x2095
+
+/* CLASS CODE : CLASS SUB-CLASS INTERFACE */
+#define CS5536_ISA_CLASS_CODE 0x060100
+#define CS5536_IDE_CLASS_CODE 0x010180
+#define CS5536_ACC_CLASS_CODE 0x040100
+#define CS5536_OHCI_CLASS_CODE 0x0C0310
+#define CS5536_EHCI_CLASS_CODE 0x0C0320
+
+/* BHLC : BIST HEADER-TYPE LATENCY-TIMER CACHE-LINE-SIZE */
+
+#define CFG_PCI_CACHE_LINE_SIZE(header_type, latency_timer) \
+ ((PCI_NONE_BIST << 24) | ((header_type) << 16) \
+ | ((latency_timer) << 8) | PCI_NORMAL_CACHE_LINE_SIZE);
+
+#define PCI_NONE_BIST 0x00 /* RO not implemented yet. */
+#define PCI_BRIDGE_HEADER_TYPE 0x80 /* RO */
+#define PCI_NORMAL_HEADER_TYPE 0x00
+#define PCI_NORMAL_LATENCY_TIMER 0x00
+#define PCI_NORMAL_CACHE_LINE_SIZE 0x08 /* RW */
+
+/* BAR */
+#define PCI_BAR0_REG 0x10
+#define PCI_BAR1_REG 0x14
+#define PCI_BAR2_REG 0x18
+#define PCI_BAR3_REG 0x1c
+#define PCI_BAR4_REG 0x20
+#define PCI_BAR5_REG 0x24
+#define PCI_BAR_RANGE_MASK 0xFFFFFFFF
+
+/* CARDBUS CIS POINTER */
+#define PCI_CARDBUS_CIS_POINTER 0x00000000
+
+/* SUBSYSTEM VENDOR ID */
+#define CS5536_SUB_VENDOR_ID CS5536_VENDOR_ID
+
+/* SUBSYSTEM ID */
+#define CS5536_ISA_SUB_ID CS5536_ISA_DEVICE_ID
+#define CS5536_IDE_SUB_ID CS5536_IDE_DEVICE_ID
+#define CS5536_ACC_SUB_ID CS5536_ACC_DEVICE_ID
+#define CS5536_OHCI_SUB_ID CS5536_OHCI_DEVICE_ID
+#define CS5536_EHCI_SUB_ID CS5536_EHCI_DEVICE_ID
+
+/* EXPANSION ROM BAR */
+#define PCI_EXPANSION_ROM_BAR 0x00000000
+
+/* CAPABILITIES POINTER */
+#define PCI_CAPLIST_POINTER 0x00000000
+#define PCI_CAPLIST_USB_POINTER 0x40
+/* INTERRUPT */
+
+#define CFG_PCI_INTERRUPT_LINE(pin, mod_intr) \
+ ((PCI_MAX_LATENCY << 24) | (PCI_MIN_GRANT << 16) | \
+ ((pin) << 8) | (mod_intr))
+
+#define PCI_MAX_LATENCY 0x40
+#define PCI_MIN_GRANT 0x00
+#define PCI_DEFAULT_PIN 0x01
+
+/*********** EXPANSION PCI REG ************************/
+
+/*
+ * ISA EXPANSION
+ */
+#define PCI_UART1_INT_REG 0x50
+#define PCI_UART2_INT_REG 0x54
+#define PCI_ISA_FIXUP_REG 0x58
+
+/*
+ * IDE EXPANSION
+ */
+#define PCI_IDE_CFG_REG 0x40
+#define CS5536_IDE_FLASH_SIGNATURE 0xDEADBEEF
+#define PCI_IDE_DTC_REG 0x48
+#define PCI_IDE_CAST_REG 0x4C
+#define PCI_IDE_ETC_REG 0x50
+#define PCI_IDE_PM_REG 0x54
+#define PCI_IDE_INT_REG 0x60
+
+/*
+ * ACC EXPANSION
+ */
+#define PCI_ACC_INT_REG 0x50
+
+/*
+ * OHCI EXPANSION : INTTERUPT IS IMPLEMENTED BY THE OHCI
+ */
+#define PCI_OHCI_PM_REG 0x40
+#define PCI_OHCI_INT_REG 0x50
+
+/*
+ * EHCI EXPANSION
+ */
+#define PCI_EHCI_LEGSMIEN_REG 0x50
+#define PCI_EHCI_LEGSMISTS_REG 0x54
+#define PCI_EHCI_FLADJ_REG 0x60
+
+#endif /* _CS5536_PCI_H_ */
diff --git a/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_vsm.h b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_vsm.h
new file mode 100644
index 000000000..70d0153cc
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_vsm.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * the read/write interfaces for Virtual Support Module(VSM)
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#ifndef _CS5536_VSM_H
+#define _CS5536_VSM_H
+
+#include <linux/types.h>
+
+typedef void (*cs5536_pci_vsm_write)(int reg, u32 value);
+typedef u32 (*cs5536_pci_vsm_read)(int reg);
+
+#define DECLARE_CS5536_MODULE(name) \
+extern void pci_##name##_write_reg(int reg, u32 value); \
+extern u32 pci_##name##_read_reg(int reg);
+
+/* ide module */
+DECLARE_CS5536_MODULE(ide)
+/* acc module */
+DECLARE_CS5536_MODULE(acc)
+/* ohci module */
+DECLARE_CS5536_MODULE(ohci)
+/* isa module */
+DECLARE_CS5536_MODULE(isa)
+/* ehci module */
+DECLARE_CS5536_MODULE(ehci)
+
+#endif /* _CS5536_VSM_H */
diff --git a/arch/mips/include/asm/mach-loongson2ef/loongson.h b/arch/mips/include/asm/mach-loongson2ef/loongson.h
new file mode 100644
index 000000000..57e571128
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/loongson.h
@@ -0,0 +1,327 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#ifndef __ASM_MACH_LOONGSON2EF_LOONGSON_H
+#define __ASM_MACH_LOONGSON2EF_LOONGSON_H
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+
+/* loongson internal northbridge initialization */
+extern void bonito_irq_init(void);
+
+/* machine-specific reboot/halt operation */
+extern void mach_prepare_reboot(void);
+extern void mach_prepare_shutdown(void);
+
+/* environment arguments from bootloader */
+extern u32 cpu_clock_freq;
+extern u32 memsize, highmemsize;
+
+/* loongson-specific command line, env and memory initialization */
+extern void __init prom_init_memory(void);
+extern void __init prom_init_machtype(void);
+extern void __init prom_init_env(void);
+#ifdef CONFIG_LOONGSON_UART_BASE
+extern unsigned long _loongson_uart_base, loongson_uart_base;
+extern void prom_init_loongson_uart_base(void);
+#endif
+
+static inline void prom_init_uart_base(void)
+{
+#ifdef CONFIG_LOONGSON_UART_BASE
+ prom_init_loongson_uart_base();
+#endif
+}
+
+/* irq operation functions */
+extern void bonito_irqdispatch(void);
+extern void __init bonito_irq_init(void);
+extern void __init mach_init_irq(void);
+extern void mach_irq_dispatch(unsigned int pending);
+extern int mach_i8259_irq(void);
+
+/* We need this in some places... */
+#define delay() ({ \
+ int x; \
+ for (x = 0; x < 100000; x++) \
+ __asm__ __volatile__(""); \
+})
+
+#define LOONGSON_REG(x) \
+ (*(volatile u32 *)((char *)CKSEG1ADDR(LOONGSON_REG_BASE) + (x)))
+
+#define LOONGSON_IRQ_BASE 32
+#define LOONGSON2_PERFCNT_IRQ (MIPS_CPU_IRQ_BASE + 6) /* cpu perf counter */
+
+#include <linux/interrupt.h>
+static inline void do_perfcnt_IRQ(void)
+{
+#if IS_ENABLED(CONFIG_OPROFILE)
+ do_IRQ(LOONGSON2_PERFCNT_IRQ);
+#endif
+}
+
+#define LOONGSON_FLASH_BASE 0x1c000000
+#define LOONGSON_FLASH_SIZE 0x02000000 /* 32M */
+#define LOONGSON_FLASH_TOP (LOONGSON_FLASH_BASE+LOONGSON_FLASH_SIZE-1)
+
+#define LOONGSON_LIO0_BASE 0x1e000000
+#define LOONGSON_LIO0_SIZE 0x01C00000 /* 28M */
+#define LOONGSON_LIO0_TOP (LOONGSON_LIO0_BASE+LOONGSON_LIO0_SIZE-1)
+
+#define LOONGSON_BOOT_BASE 0x1fc00000
+#define LOONGSON_BOOT_SIZE 0x00100000 /* 1M */
+#define LOONGSON_BOOT_TOP (LOONGSON_BOOT_BASE+LOONGSON_BOOT_SIZE-1)
+#define LOONGSON_REG_BASE 0x1fe00000
+#define LOONGSON_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */
+#define LOONGSON_REG_TOP (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1)
+
+#define LOONGSON_LIO1_BASE 0x1ff00000
+#define LOONGSON_LIO1_SIZE 0x00100000 /* 1M */
+#define LOONGSON_LIO1_TOP (LOONGSON_LIO1_BASE+LOONGSON_LIO1_SIZE-1)
+
+#define LOONGSON_PCILO0_BASE 0x10000000
+#define LOONGSON_PCILO1_BASE 0x14000000
+#define LOONGSON_PCILO2_BASE 0x18000000
+#define LOONGSON_PCILO_BASE LOONGSON_PCILO0_BASE
+#define LOONGSON_PCILO_SIZE 0x0c000000 /* 64M * 3 */
+#define LOONGSON_PCILO_TOP (LOONGSON_PCILO0_BASE+LOONGSON_PCILO_SIZE-1)
+
+#define LOONGSON_PCICFG_BASE 0x1fe80000
+#define LOONGSON_PCICFG_SIZE 0x00000800 /* 2K */
+#define LOONGSON_PCICFG_TOP (LOONGSON_PCICFG_BASE+LOONGSON_PCICFG_SIZE-1)
+#define LOONGSON_PCIIO_BASE 0x1fd00000
+
+#define LOONGSON_PCIIO_SIZE 0x00100000 /* 1M */
+#define LOONGSON_PCIIO_TOP (LOONGSON_PCIIO_BASE+LOONGSON_PCIIO_SIZE-1)
+
+/* Loongson Register Bases */
+
+#define LOONGSON_PCICONFIGBASE 0x00
+#define LOONGSON_REGBASE 0x100
+
+/* PCI Configuration Registers */
+
+#define LOONGSON_PCI_REG(x) LOONGSON_REG(LOONGSON_PCICONFIGBASE + (x))
+#define LOONGSON_PCIDID LOONGSON_PCI_REG(0x00)
+#define LOONGSON_PCICMD LOONGSON_PCI_REG(0x04)
+#define LOONGSON_PCICLASS LOONGSON_PCI_REG(0x08)
+#define LOONGSON_PCILTIMER LOONGSON_PCI_REG(0x0c)
+#define LOONGSON_PCIBASE0 LOONGSON_PCI_REG(0x10)
+#define LOONGSON_PCIBASE1 LOONGSON_PCI_REG(0x14)
+#define LOONGSON_PCIBASE2 LOONGSON_PCI_REG(0x18)
+#define LOONGSON_PCIBASE3 LOONGSON_PCI_REG(0x1c)
+#define LOONGSON_PCIBASE4 LOONGSON_PCI_REG(0x20)
+#define LOONGSON_PCIEXPRBASE LOONGSON_PCI_REG(0x30)
+#define LOONGSON_PCIINT LOONGSON_PCI_REG(0x3c)
+
+#define LOONGSON_PCI_ISR4C LOONGSON_PCI_REG(0x4c)
+
+#define LOONGSON_PCICMD_PERR_CLR 0x80000000
+#define LOONGSON_PCICMD_SERR_CLR 0x40000000
+#define LOONGSON_PCICMD_MABORT_CLR 0x20000000
+#define LOONGSON_PCICMD_MTABORT_CLR 0x10000000
+#define LOONGSON_PCICMD_TABORT_CLR 0x08000000
+#define LOONGSON_PCICMD_MPERR_CLR 0x01000000
+#define LOONGSON_PCICMD_PERRRESPEN 0x00000040
+#define LOONGSON_PCICMD_ASTEPEN 0x00000080
+#define LOONGSON_PCICMD_SERREN 0x00000100
+#define LOONGSON_PCILTIMER_BUSLATENCY 0x0000ff00
+#define LOONGSON_PCILTIMER_BUSLATENCY_SHIFT 8
+
+/* Loongson h/w Configuration */
+
+#define LOONGSON_GENCFG_OFFSET 0x4
+#define LOONGSON_GENCFG LOONGSON_REG(LOONGSON_REGBASE + LOONGSON_GENCFG_OFFSET)
+
+#define LOONGSON_GENCFG_DEBUGMODE 0x00000001
+#define LOONGSON_GENCFG_SNOOPEN 0x00000002
+#define LOONGSON_GENCFG_CPUSELFRESET 0x00000004
+
+#define LOONGSON_GENCFG_FORCE_IRQA 0x00000008
+#define LOONGSON_GENCFG_IRQA_ISOUT 0x00000010
+#define LOONGSON_GENCFG_IRQA_FROM_INT1 0x00000020
+#define LOONGSON_GENCFG_BYTESWAP 0x00000040
+
+#define LOONGSON_GENCFG_UNCACHED 0x00000080
+#define LOONGSON_GENCFG_PREFETCHEN 0x00000100
+#define LOONGSON_GENCFG_WBEHINDEN 0x00000200
+#define LOONGSON_GENCFG_CACHEALG 0x00000c00
+#define LOONGSON_GENCFG_CACHEALG_SHIFT 10
+#define LOONGSON_GENCFG_PCIQUEUE 0x00001000
+#define LOONGSON_GENCFG_CACHESTOP 0x00002000
+#define LOONGSON_GENCFG_MSTRBYTESWAP 0x00004000
+#define LOONGSON_GENCFG_BUSERREN 0x00008000
+#define LOONGSON_GENCFG_NORETRYTIMEOUT 0x00010000
+#define LOONGSON_GENCFG_SHORTCOPYTIMEOUT 0x00020000
+
+/* PCI address map control */
+
+#define LOONGSON_PCIMAP LOONGSON_REG(LOONGSON_REGBASE + 0x10)
+#define LOONGSON_PCIMEMBASECFG LOONGSON_REG(LOONGSON_REGBASE + 0x14)
+#define LOONGSON_PCIMAP_CFG LOONGSON_REG(LOONGSON_REGBASE + 0x18)
+
+/* GPIO Regs - r/w */
+
+#define LOONGSON_GPIODATA LOONGSON_REG(LOONGSON_REGBASE + 0x1c)
+#define LOONGSON_GPIOIE LOONGSON_REG(LOONGSON_REGBASE + 0x20)
+
+/* ICU Configuration Regs - r/w */
+
+#define LOONGSON_INTEDGE LOONGSON_REG(LOONGSON_REGBASE + 0x24)
+#define LOONGSON_INTSTEER LOONGSON_REG(LOONGSON_REGBASE + 0x28)
+#define LOONGSON_INTPOL LOONGSON_REG(LOONGSON_REGBASE + 0x2c)
+
+/* ICU Enable Regs - IntEn & IntISR are r/o. */
+
+#define LOONGSON_INTENSET LOONGSON_REG(LOONGSON_REGBASE + 0x30)
+#define LOONGSON_INTENCLR LOONGSON_REG(LOONGSON_REGBASE + 0x34)
+#define LOONGSON_INTEN LOONGSON_REG(LOONGSON_REGBASE + 0x38)
+#define LOONGSON_INTISR LOONGSON_REG(LOONGSON_REGBASE + 0x3c)
+
+/* ICU */
+#define LOONGSON_ICU_MBOXES 0x0000000f
+#define LOONGSON_ICU_MBOXES_SHIFT 0
+#define LOONGSON_ICU_DMARDY 0x00000010
+#define LOONGSON_ICU_DMAEMPTY 0x00000020
+#define LOONGSON_ICU_COPYRDY 0x00000040
+#define LOONGSON_ICU_COPYEMPTY 0x00000080
+#define LOONGSON_ICU_COPYERR 0x00000100
+#define LOONGSON_ICU_PCIIRQ 0x00000200
+#define LOONGSON_ICU_MASTERERR 0x00000400
+#define LOONGSON_ICU_SYSTEMERR 0x00000800
+#define LOONGSON_ICU_DRAMPERR 0x00001000
+#define LOONGSON_ICU_RETRYERR 0x00002000
+#define LOONGSON_ICU_GPIOS 0x01ff0000
+#define LOONGSON_ICU_GPIOS_SHIFT 16
+#define LOONGSON_ICU_GPINS 0x7e000000
+#define LOONGSON_ICU_GPINS_SHIFT 25
+#define LOONGSON_ICU_MBOX(N) (1<<(LOONGSON_ICU_MBOXES_SHIFT+(N)))
+#define LOONGSON_ICU_GPIO(N) (1<<(LOONGSON_ICU_GPIOS_SHIFT+(N)))
+#define LOONGSON_ICU_GPIN(N) (1<<(LOONGSON_ICU_GPINS_SHIFT+(N)))
+
+/* PCI prefetch window base & mask */
+
+#define LOONGSON_MEM_WIN_BASE_L LOONGSON_REG(LOONGSON_REGBASE + 0x40)
+#define LOONGSON_MEM_WIN_BASE_H LOONGSON_REG(LOONGSON_REGBASE + 0x44)
+#define LOONGSON_MEM_WIN_MASK_L LOONGSON_REG(LOONGSON_REGBASE + 0x48)
+#define LOONGSON_MEM_WIN_MASK_H LOONGSON_REG(LOONGSON_REGBASE + 0x4c)
+
+/* PCI_Hit*_Sel_* */
+
+#define LOONGSON_PCI_HIT0_SEL_L LOONGSON_REG(LOONGSON_REGBASE + 0x50)
+#define LOONGSON_PCI_HIT0_SEL_H LOONGSON_REG(LOONGSON_REGBASE + 0x54)
+#define LOONGSON_PCI_HIT1_SEL_L LOONGSON_REG(LOONGSON_REGBASE + 0x58)
+#define LOONGSON_PCI_HIT1_SEL_H LOONGSON_REG(LOONGSON_REGBASE + 0x5c)
+#define LOONGSON_PCI_HIT2_SEL_L LOONGSON_REG(LOONGSON_REGBASE + 0x60)
+#define LOONGSON_PCI_HIT2_SEL_H LOONGSON_REG(LOONGSON_REGBASE + 0x64)
+
+/* PXArb Config & Status */
+
+#define LOONGSON_PXARB_CFG LOONGSON_REG(LOONGSON_REGBASE + 0x68)
+#define LOONGSON_PXARB_STATUS LOONGSON_REG(LOONGSON_REGBASE + 0x6c)
+
+/* Chip Config registor of each physical cpu package, PRid >= Loongson-2F */
+#define LOONGSON_CHIPCFG (void __iomem *)TO_UNCAC(0x1fc00180)
+
+/* pcimap */
+
+#define LOONGSON_PCIMAP_PCIMAP_LO0 0x0000003f
+#define LOONGSON_PCIMAP_PCIMAP_LO0_SHIFT 0
+#define LOONGSON_PCIMAP_PCIMAP_LO1 0x00000fc0
+#define LOONGSON_PCIMAP_PCIMAP_LO1_SHIFT 6
+#define LOONGSON_PCIMAP_PCIMAP_LO2 0x0003f000
+#define LOONGSON_PCIMAP_PCIMAP_LO2_SHIFT 12
+#define LOONGSON_PCIMAP_PCIMAP_2 0x00040000
+#define LOONGSON_PCIMAP_WIN(WIN, ADDR) \
+ ((((ADDR)>>26) & LOONGSON_PCIMAP_PCIMAP_LO0) << ((WIN)*6))
+
+#ifdef CONFIG_CPU_SUPPORTS_CPUFREQ
+#include <linux/cpufreq.h>
+extern struct cpufreq_frequency_table loongson2_clockmod_table[];
+extern int loongson2_cpu_set_rate(unsigned long rate_khz);
+#endif
+
+/*
+ * address windows configuration module
+ *
+ * loongson2e do not have this module
+ */
+#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
+
+/* address window config module base address */
+#define LOONGSON_ADDRWINCFG_BASE 0x3ff00000ul
+#define LOONGSON_ADDRWINCFG_SIZE 0x180
+
+extern unsigned long _loongson_addrwincfg_base;
+#define LOONGSON_ADDRWINCFG(offset) \
+ (*(volatile u64 *)(_loongson_addrwincfg_base + (offset)))
+
+#define CPU_WIN0_BASE LOONGSON_ADDRWINCFG(0x00)
+#define CPU_WIN1_BASE LOONGSON_ADDRWINCFG(0x08)
+#define CPU_WIN2_BASE LOONGSON_ADDRWINCFG(0x10)
+#define CPU_WIN3_BASE LOONGSON_ADDRWINCFG(0x18)
+
+#define CPU_WIN0_MASK LOONGSON_ADDRWINCFG(0x20)
+#define CPU_WIN1_MASK LOONGSON_ADDRWINCFG(0x28)
+#define CPU_WIN2_MASK LOONGSON_ADDRWINCFG(0x30)
+#define CPU_WIN3_MASK LOONGSON_ADDRWINCFG(0x38)
+
+#define CPU_WIN0_MMAP LOONGSON_ADDRWINCFG(0x40)
+#define CPU_WIN1_MMAP LOONGSON_ADDRWINCFG(0x48)
+#define CPU_WIN2_MMAP LOONGSON_ADDRWINCFG(0x50)
+#define CPU_WIN3_MMAP LOONGSON_ADDRWINCFG(0x58)
+
+#define PCIDMA_WIN0_BASE LOONGSON_ADDRWINCFG(0x60)
+#define PCIDMA_WIN1_BASE LOONGSON_ADDRWINCFG(0x68)
+#define PCIDMA_WIN2_BASE LOONGSON_ADDRWINCFG(0x70)
+#define PCIDMA_WIN3_BASE LOONGSON_ADDRWINCFG(0x78)
+
+#define PCIDMA_WIN0_MASK LOONGSON_ADDRWINCFG(0x80)
+#define PCIDMA_WIN1_MASK LOONGSON_ADDRWINCFG(0x88)
+#define PCIDMA_WIN2_MASK LOONGSON_ADDRWINCFG(0x90)
+#define PCIDMA_WIN3_MASK LOONGSON_ADDRWINCFG(0x98)
+
+#define PCIDMA_WIN0_MMAP LOONGSON_ADDRWINCFG(0xa0)
+#define PCIDMA_WIN1_MMAP LOONGSON_ADDRWINCFG(0xa8)
+#define PCIDMA_WIN2_MMAP LOONGSON_ADDRWINCFG(0xb0)
+#define PCIDMA_WIN3_MMAP LOONGSON_ADDRWINCFG(0xb8)
+
+#define ADDRWIN_WIN0 0
+#define ADDRWIN_WIN1 1
+#define ADDRWIN_WIN2 2
+#define ADDRWIN_WIN3 3
+
+#define ADDRWIN_MAP_DST_DDR 0
+#define ADDRWIN_MAP_DST_PCI 1
+#define ADDRWIN_MAP_DST_LIO 1
+
+/*
+ * s: CPU, PCIDMA
+ * d: DDR, PCI, LIO
+ * win: 0, 1, 2, 3
+ * src: map source
+ * dst: map destination
+ * size: ~mask + 1
+ */
+#define LOONGSON_ADDRWIN_CFG(s, d, w, src, dst, size) do {\
+ s##_WIN##w##_BASE = (src); \
+ s##_WIN##w##_MMAP = (dst) | ADDRWIN_MAP_DST_##d; \
+ s##_WIN##w##_MASK = ~(size-1); \
+} while (0)
+
+#define LOONGSON_ADDRWIN_CPUTOPCI(win, src, dst, size) \
+ LOONGSON_ADDRWIN_CFG(CPU, PCI, win, src, dst, size)
+#define LOONGSON_ADDRWIN_CPUTODDR(win, src, dst, size) \
+ LOONGSON_ADDRWIN_CFG(CPU, DDR, win, src, dst, size)
+#define LOONGSON_ADDRWIN_PCITODDR(win, src, dst, size) \
+ LOONGSON_ADDRWIN_CFG(PCIDMA, DDR, win, src, dst, size)
+
+#endif /* ! CONFIG_CPU_SUPPORTS_ADDRWINCFG */
+
+#endif /* __ASM_MACH_LOONGSON2EF_LOONGSON_H */
diff --git a/arch/mips/include/asm/mach-loongson2ef/machine.h b/arch/mips/include/asm/mach-loongson2ef/machine.h
new file mode 100644
index 000000000..4097267ef
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/machine.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#ifndef __ASM_MACH_LOONGSON2EF_MACHINE_H
+#define __ASM_MACH_LOONGSON2EF_MACHINE_H
+
+#ifdef CONFIG_LEMOTE_FULOONG2E
+
+#define LOONGSON_MACHTYPE MACH_LEMOTE_FL2E
+
+#endif
+
+/* use fuloong2f as the default machine of LEMOTE_MACH2F */
+#ifdef CONFIG_LEMOTE_MACH2F
+
+#define LOONGSON_MACHTYPE MACH_LEMOTE_FL2F
+
+#endif
+
+#endif /* __ASM_MACH_LOONGSON2EF_MACHINE_H */
diff --git a/arch/mips/include/asm/mach-loongson2ef/mem.h b/arch/mips/include/asm/mach-loongson2ef/mem.h
new file mode 100644
index 000000000..d1d759b89
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/mem.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#ifndef __ASM_MACH_LOONGSON2EF_MEM_H
+#define __ASM_MACH_LOONGSON2EF_MEM_H
+
+/*
+ * high memory space
+ *
+ * in loongson2e, starts from 512M
+ * in loongson2f, starts from 2G 256M
+ */
+#ifdef CONFIG_CPU_LOONGSON2E
+#define LOONGSON_HIGHMEM_START 0x20000000
+#else
+#define LOONGSON_HIGHMEM_START 0x90000000
+#endif
+
+/*
+ * the peripheral registers(MMIO):
+ *
+ * On the Lemote Loongson 2e system, reside between 0x1000:0000 and 0x2000:0000.
+ * On the Lemote Loongson 2f system, reside between 0x1000:0000 and 0x8000:0000.
+ */
+
+#define LOONGSON_MMIO_MEM_START 0x10000000
+
+#ifdef CONFIG_CPU_LOONGSON2E
+#define LOONGSON_MMIO_MEM_END 0x20000000
+#else
+#define LOONGSON_MMIO_MEM_END 0x80000000
+#endif
+
+#endif /* __ASM_MACH_LOONGSON2EF_MEM_H */
diff --git a/arch/mips/include/asm/mach-loongson2ef/pci.h b/arch/mips/include/asm/mach-loongson2ef/pci.h
new file mode 100644
index 000000000..5588c5bc5
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/pci.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2008 Zhang Le <r0bertz@gentoo.org>
+ * Copyright (c) 2009 Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#ifndef __ASM_MACH_LOONGSON2EF_PCI_H_
+#define __ASM_MACH_LOONGSON2EF_PCI_H_
+
+extern struct pci_ops loongson_pci_ops;
+
+/* this is an offset from mips_io_port_base */
+#define LOONGSON_PCI_IO_START 0x00004000UL
+
+#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
+
+/*
+ * we use address window2 to map cpu address space to pci space
+ * window2: cpu [1G, 2G] -> pci [1G, 2G]
+ * why not use window 0 & 1? because they are used by cpu when booting.
+ * window0: cpu [0, 256M] -> ddr [0, 256M]
+ * window1: cpu [256M, 512M] -> pci [256M, 512M]
+ */
+
+/* the smallest LOONGSON_CPU_MEM_SRC can be 512M */
+#define LOONGSON_CPU_MEM_SRC 0x40000000ul /* 1G */
+#define LOONGSON_PCI_MEM_DST LOONGSON_CPU_MEM_SRC
+
+#define LOONGSON_PCI_MEM_START LOONGSON_PCI_MEM_DST
+#define LOONGSON_PCI_MEM_END (0x80000000ul-1) /* 2G */
+
+#define MMAP_CPUTOPCI_SIZE (LOONGSON_PCI_MEM_END - \
+ LOONGSON_PCI_MEM_START + 1)
+
+#else /* loongson2f/32bit & loongson2e */
+
+/* this pci memory space is mapped by pcimap in pci.c */
+#define LOONGSON_PCI_MEM_START LOONGSON_PCILO1_BASE
+#define LOONGSON_PCI_MEM_END (LOONGSON_PCILO1_BASE + 0x04000000 * 2)
+
+/* this is an offset from mips_io_port_base */
+#define LOONGSON_PCI_IO_START 0x00004000UL
+
+#endif /* !CONFIG_CPU_SUPPORTS_ADDRWINCFG */
+
+#endif /* !__ASM_MACH_LOONGSON2EF_PCI_H_ */
diff --git a/arch/mips/include/asm/mach-loongson2ef/spaces.h b/arch/mips/include/asm/mach-loongson2ef/spaces.h
new file mode 100644
index 000000000..ba4e8e9b6
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/spaces.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_LOONGSON2EF_SPACES_H_
+#define __ASM_MACH_LOONGSON2EF_SPACES_H_
+
+#if defined(CONFIG_64BIT)
+#define CAC_BASE _AC(0x9800000000000000, UL)
+#endif /* CONFIG_64BIT */
+
+#include <asm/mach-generic/spaces.h>
+#endif
diff --git a/arch/mips/include/asm/mach-loongson32/cpufreq.h b/arch/mips/include/asm/mach-loongson32/cpufreq.h
new file mode 100644
index 000000000..e422a3288
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson32/cpufreq.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Loongson 1 CPUFreq platform support.
+ */
+
+#ifndef __ASM_MACH_LOONGSON32_CPUFREQ_H
+#define __ASM_MACH_LOONGSON32_CPUFREQ_H
+
+struct plat_ls1x_cpufreq {
+ const char *clk_name; /* CPU clk */
+ const char *osc_clk_name; /* OSC clk */
+ unsigned int max_freq; /* in kHz */
+ unsigned int min_freq; /* in kHz */
+};
+
+#endif /* __ASM_MACH_LOONGSON32_CPUFREQ_H */
diff --git a/arch/mips/include/asm/mach-loongson32/dma.h b/arch/mips/include/asm/mach-loongson32/dma.h
new file mode 100644
index 000000000..e917b3ccb
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson32/dma.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2015 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Loongson 1 NAND platform support.
+ */
+
+#ifndef __ASM_MACH_LOONGSON32_DMA_H
+#define __ASM_MACH_LOONGSON32_DMA_H
+
+#define LS1X_DMA_CHANNEL0 0
+#define LS1X_DMA_CHANNEL1 1
+#define LS1X_DMA_CHANNEL2 2
+
+struct plat_ls1x_dma {
+ int nr_channels;
+};
+
+extern struct plat_ls1x_dma ls1b_dma_pdata;
+
+#endif /* __ASM_MACH_LOONGSON32_DMA_H */
diff --git a/arch/mips/include/asm/mach-loongson32/irq.h b/arch/mips/include/asm/mach-loongson32/irq.h
new file mode 100644
index 000000000..6115f025b
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson32/irq.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * IRQ mappings for Loongson 1
+ */
+
+#ifndef __ASM_MACH_LOONGSON32_IRQ_H
+#define __ASM_MACH_LOONGSON32_IRQ_H
+
+/*
+ * CPU core Interrupt Numbers
+ */
+#define MIPS_CPU_IRQ_BASE 0
+#define MIPS_CPU_IRQ(x) (MIPS_CPU_IRQ_BASE + (x))
+
+#define SOFTINT0_IRQ MIPS_CPU_IRQ(0)
+#define SOFTINT1_IRQ MIPS_CPU_IRQ(1)
+#define INT0_IRQ MIPS_CPU_IRQ(2)
+#define INT1_IRQ MIPS_CPU_IRQ(3)
+#define INT2_IRQ MIPS_CPU_IRQ(4)
+#define INT3_IRQ MIPS_CPU_IRQ(5)
+#define INT4_IRQ MIPS_CPU_IRQ(6)
+#define TIMER_IRQ MIPS_CPU_IRQ(7) /* cpu timer */
+
+#define MIPS_CPU_IRQS (MIPS_CPU_IRQ(7) + 1 - MIPS_CPU_IRQ_BASE)
+
+/*
+ * INT0~3 Interrupt Numbers
+ */
+#define LS1X_IRQ_BASE MIPS_CPU_IRQS
+#define LS1X_IRQ(n, x) (LS1X_IRQ_BASE + (n << 5) + (x))
+
+#define LS1X_UART0_IRQ LS1X_IRQ(0, 2)
+#if defined(CONFIG_LOONGSON1_LS1B)
+#define LS1X_UART1_IRQ LS1X_IRQ(0, 3)
+#define LS1X_UART2_IRQ LS1X_IRQ(0, 4)
+#define LS1X_UART3_IRQ LS1X_IRQ(0, 5)
+#elif defined(CONFIG_LOONGSON1_LS1C)
+#define LS1X_UART1_IRQ LS1X_IRQ(0, 4)
+#define LS1X_UART2_IRQ LS1X_IRQ(0, 5)
+#endif
+#define LS1X_CAN0_IRQ LS1X_IRQ(0, 6)
+#define LS1X_CAN1_IRQ LS1X_IRQ(0, 7)
+#define LS1X_SPI0_IRQ LS1X_IRQ(0, 8)
+#define LS1X_SPI1_IRQ LS1X_IRQ(0, 9)
+#define LS1X_AC97_IRQ LS1X_IRQ(0, 10)
+#define LS1X_DMA0_IRQ LS1X_IRQ(0, 13)
+#define LS1X_DMA1_IRQ LS1X_IRQ(0, 14)
+#define LS1X_DMA2_IRQ LS1X_IRQ(0, 15)
+#if defined(CONFIG_LOONGSON1_LS1C)
+#define LS1X_NAND_IRQ LS1X_IRQ(0, 16)
+#endif
+#define LS1X_PWM0_IRQ LS1X_IRQ(0, 17)
+#define LS1X_PWM1_IRQ LS1X_IRQ(0, 18)
+#define LS1X_PWM2_IRQ LS1X_IRQ(0, 19)
+#define LS1X_PWM3_IRQ LS1X_IRQ(0, 20)
+#define LS1X_RTC_INT0_IRQ LS1X_IRQ(0, 21)
+#define LS1X_RTC_INT1_IRQ LS1X_IRQ(0, 22)
+#define LS1X_RTC_INT2_IRQ LS1X_IRQ(0, 23)
+#if defined(CONFIG_LOONGSON1_LS1B)
+#define LS1X_TOY_INT0_IRQ LS1X_IRQ(0, 24)
+#define LS1X_TOY_INT1_IRQ LS1X_IRQ(0, 25)
+#define LS1X_TOY_INT2_IRQ LS1X_IRQ(0, 26)
+#define LS1X_RTC_TICK_IRQ LS1X_IRQ(0, 27)
+#define LS1X_TOY_TICK_IRQ LS1X_IRQ(0, 28)
+#define LS1X_UART4_IRQ LS1X_IRQ(0, 29)
+#define LS1X_UART5_IRQ LS1X_IRQ(0, 30)
+#elif defined(CONFIG_LOONGSON1_LS1C)
+#define LS1X_UART3_IRQ LS1X_IRQ(0, 29)
+#define LS1X_ADC_IRQ LS1X_IRQ(0, 30)
+#define LS1X_SDIO_IRQ LS1X_IRQ(0, 31)
+#endif
+
+#define LS1X_EHCI_IRQ LS1X_IRQ(1, 0)
+#define LS1X_OHCI_IRQ LS1X_IRQ(1, 1)
+#if defined(CONFIG_LOONGSON1_LS1B)
+#define LS1X_GMAC0_IRQ LS1X_IRQ(1, 2)
+#define LS1X_GMAC1_IRQ LS1X_IRQ(1, 3)
+#elif defined(CONFIG_LOONGSON1_LS1C)
+#define LS1X_OTG_IRQ LS1X_IRQ(1, 2)
+#define LS1X_GMAC0_IRQ LS1X_IRQ(1, 3)
+#define LS1X_CAM_IRQ LS1X_IRQ(1, 4)
+#define LS1X_UART4_IRQ LS1X_IRQ(1, 5)
+#define LS1X_UART5_IRQ LS1X_IRQ(1, 6)
+#define LS1X_UART6_IRQ LS1X_IRQ(1, 7)
+#define LS1X_UART7_IRQ LS1X_IRQ(1, 8)
+#define LS1X_UART8_IRQ LS1X_IRQ(1, 9)
+#define LS1X_UART9_IRQ LS1X_IRQ(1, 13)
+#define LS1X_UART10_IRQ LS1X_IRQ(1, 14)
+#define LS1X_UART11_IRQ LS1X_IRQ(1, 15)
+#define LS1X_I2C0_IRQ LS1X_IRQ(1, 17)
+#define LS1X_I2C1_IRQ LS1X_IRQ(1, 18)
+#define LS1X_I2C2_IRQ LS1X_IRQ(1, 19)
+#endif
+
+#if defined(CONFIG_LOONGSON1_LS1B)
+#define INTN 4
+#elif defined(CONFIG_LOONGSON1_LS1C)
+#define INTN 5
+#endif
+
+#define LS1X_IRQS (LS1X_IRQ(INTN, 31) + 1 - LS1X_IRQ_BASE)
+
+#define NR_IRQS (MIPS_CPU_IRQS + LS1X_IRQS)
+
+#endif /* __ASM_MACH_LOONGSON32_IRQ_H */
diff --git a/arch/mips/include/asm/mach-loongson32/loongson1.h b/arch/mips/include/asm/mach-loongson32/loongson1.h
new file mode 100644
index 000000000..eb3ddbec1
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson32/loongson1.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Register mappings for Loongson 1
+ */
+
+#ifndef __ASM_MACH_LOONGSON32_LOONGSON1_H
+#define __ASM_MACH_LOONGSON32_LOONGSON1_H
+
+#if defined(CONFIG_LOONGSON1_LS1B)
+#define DEFAULT_MEMSIZE 64 /* If no memsize provided */
+#elif defined(CONFIG_LOONGSON1_LS1C)
+#define DEFAULT_MEMSIZE 32
+#endif
+
+/* Loongson 1 Register Bases */
+#define LS1X_MUX_BASE 0x1fd00420
+#define LS1X_INTC_BASE 0x1fd01040
+#define LS1X_GPIO0_BASE 0x1fd010c0
+#define LS1X_GPIO1_BASE 0x1fd010c4
+#define LS1X_DMAC_BASE 0x1fd01160
+#define LS1X_CBUS_BASE 0x1fd011c0
+#define LS1X_EHCI_BASE 0x1fe00000
+#define LS1X_OHCI_BASE 0x1fe08000
+#define LS1X_GMAC0_BASE 0x1fe10000
+#define LS1X_GMAC1_BASE 0x1fe20000
+
+#define LS1X_UART0_BASE 0x1fe40000
+#define LS1X_UART1_BASE 0x1fe44000
+#define LS1X_UART2_BASE 0x1fe48000
+#define LS1X_UART3_BASE 0x1fe4c000
+#define LS1X_CAN0_BASE 0x1fe50000
+#define LS1X_CAN1_BASE 0x1fe54000
+#define LS1X_I2C0_BASE 0x1fe58000
+#define LS1X_I2C1_BASE 0x1fe68000
+#define LS1X_I2C2_BASE 0x1fe70000
+#define LS1X_PWM0_BASE 0x1fe5c000
+#define LS1X_PWM1_BASE 0x1fe5c010
+#define LS1X_PWM2_BASE 0x1fe5c020
+#define LS1X_PWM3_BASE 0x1fe5c030
+#define LS1X_WDT_BASE 0x1fe5c060
+#define LS1X_RTC_BASE 0x1fe64000
+#define LS1X_AC97_BASE 0x1fe74000
+#define LS1X_NAND_BASE 0x1fe78000
+#define LS1X_CLK_BASE 0x1fe78030
+
+#include <regs-clk.h>
+#include <regs-mux.h>
+#include <regs-pwm.h>
+#include <regs-rtc.h>
+#include <regs-wdt.h>
+
+#endif /* __ASM_MACH_LOONGSON32_LOONGSON1_H */
diff --git a/arch/mips/include/asm/mach-loongson32/nand.h b/arch/mips/include/asm/mach-loongson32/nand.h
new file mode 100644
index 000000000..aaf5ed19d
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson32/nand.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2015 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Loongson 1 NAND platform support.
+ */
+
+#ifndef __ASM_MACH_LOONGSON32_NAND_H
+#define __ASM_MACH_LOONGSON32_NAND_H
+
+#include <linux/dmaengine.h>
+#include <linux/mtd/partitions.h>
+
+struct plat_ls1x_nand {
+ struct mtd_partition *parts;
+ unsigned int nr_parts;
+
+ int hold_cycle;
+ int wait_cycle;
+};
+
+extern struct plat_ls1x_nand ls1b_nand_pdata;
+
+bool ls1x_dma_filter_fn(struct dma_chan *chan, void *param);
+
+#endif /* __ASM_MACH_LOONGSON32_NAND_H */
diff --git a/arch/mips/include/asm/mach-loongson32/platform.h b/arch/mips/include/asm/mach-loongson32/platform.h
new file mode 100644
index 000000000..eb83e2741
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson32/platform.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ */
+
+#ifndef __ASM_MACH_LOONGSON32_PLATFORM_H
+#define __ASM_MACH_LOONGSON32_PLATFORM_H
+
+#include <linux/platform_device.h>
+
+#include <dma.h>
+#include <nand.h>
+
+extern struct platform_device ls1x_uart_pdev;
+extern struct platform_device ls1x_cpufreq_pdev;
+extern struct platform_device ls1x_eth0_pdev;
+extern struct platform_device ls1x_eth1_pdev;
+extern struct platform_device ls1x_ehci_pdev;
+extern struct platform_device ls1x_gpio0_pdev;
+extern struct platform_device ls1x_gpio1_pdev;
+extern struct platform_device ls1x_rtc_pdev;
+extern struct platform_device ls1x_wdt_pdev;
+
+void __init ls1x_clk_init(void);
+void __init ls1x_rtc_set_extclk(struct platform_device *pdev);
+void __init ls1x_serial_set_uartclk(struct platform_device *pdev);
+
+#endif /* __ASM_MACH_LOONGSON32_PLATFORM_H */
diff --git a/arch/mips/include/asm/mach-loongson32/regs-clk.h b/arch/mips/include/asm/mach-loongson32/regs-clk.h
new file mode 100644
index 000000000..98136fa8b
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson32/regs-clk.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Loongson 1 Clock Register Definitions.
+ */
+
+#ifndef __ASM_MACH_LOONGSON32_REGS_CLK_H
+#define __ASM_MACH_LOONGSON32_REGS_CLK_H
+
+#define LS1X_CLK_REG(x) \
+ ((void __iomem *)KSEG1ADDR(LS1X_CLK_BASE + (x)))
+
+#define LS1X_CLK_PLL_FREQ LS1X_CLK_REG(0x0)
+#define LS1X_CLK_PLL_DIV LS1X_CLK_REG(0x4)
+
+#if defined(CONFIG_LOONGSON1_LS1B)
+/* Clock PLL Divisor Register Bits */
+#define DIV_DC_EN BIT(31)
+#define DIV_DC_RST BIT(30)
+#define DIV_CPU_EN BIT(25)
+#define DIV_CPU_RST BIT(24)
+#define DIV_DDR_EN BIT(19)
+#define DIV_DDR_RST BIT(18)
+#define RST_DC_EN BIT(5)
+#define RST_DC BIT(4)
+#define RST_DDR_EN BIT(3)
+#define RST_DDR BIT(2)
+#define RST_CPU_EN BIT(1)
+#define RST_CPU BIT(0)
+
+#define DIV_DC_SHIFT 26
+#define DIV_CPU_SHIFT 20
+#define DIV_DDR_SHIFT 14
+
+#define DIV_DC_WIDTH 4
+#define DIV_CPU_WIDTH 4
+#define DIV_DDR_WIDTH 4
+
+#define BYPASS_DC_SHIFT 12
+#define BYPASS_DDR_SHIFT 10
+#define BYPASS_CPU_SHIFT 8
+
+#define BYPASS_DC_WIDTH 1
+#define BYPASS_DDR_WIDTH 1
+#define BYPASS_CPU_WIDTH 1
+
+#elif defined(CONFIG_LOONGSON1_LS1C)
+/* PLL/SDRAM Frequency configuration register Bits */
+#define PLL_VALID BIT(31)
+#define FRAC_N GENMASK(23, 16)
+#define RST_TIME GENMASK(3, 2)
+#define SDRAM_DIV GENMASK(1, 0)
+
+/* CPU/CAMERA/DC Frequency configuration register Bits */
+#define DIV_DC_EN BIT(31)
+#define DIV_DC GENMASK(30, 24)
+#define DIV_CAM_EN BIT(23)
+#define DIV_CAM GENMASK(22, 16)
+#define DIV_CPU_EN BIT(15)
+#define DIV_CPU GENMASK(14, 8)
+#define DIV_DC_SEL_EN BIT(5)
+#define DIV_DC_SEL BIT(4)
+#define DIV_CAM_SEL_EN BIT(3)
+#define DIV_CAM_SEL BIT(2)
+#define DIV_CPU_SEL_EN BIT(1)
+#define DIV_CPU_SEL BIT(0)
+
+#define DIV_DC_SHIFT 24
+#define DIV_CAM_SHIFT 16
+#define DIV_CPU_SHIFT 8
+#define DIV_DDR_SHIFT 0
+
+#define DIV_DC_WIDTH 7
+#define DIV_CAM_WIDTH 7
+#define DIV_CPU_WIDTH 7
+#define DIV_DDR_WIDTH 2
+
+#endif
+
+#endif /* __ASM_MACH_LOONGSON32_REGS_CLK_H */
diff --git a/arch/mips/include/asm/mach-loongson32/regs-mux.h b/arch/mips/include/asm/mach-loongson32/regs-mux.h
new file mode 100644
index 000000000..95788a4f0
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson32/regs-mux.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Loongson 1 MUX Register Definitions.
+ */
+
+#ifndef __ASM_MACH_LOONGSON32_REGS_MUX_H
+#define __ASM_MACH_LOONGSON32_REGS_MUX_H
+
+#define LS1X_MUX_REG(x) \
+ ((void __iomem *)KSEG1ADDR(LS1X_MUX_BASE + (x)))
+
+#define LS1X_MUX_CTRL0 LS1X_MUX_REG(0x0)
+#define LS1X_MUX_CTRL1 LS1X_MUX_REG(0x4)
+
+#if defined(CONFIG_LOONGSON1_LS1B)
+/* MUX CTRL0 Register Bits */
+#define UART0_USE_PWM23 BIT(28)
+#define UART0_USE_PWM01 BIT(27)
+#define UART1_USE_LCD0_5_6_11 BIT(26)
+#define I2C2_USE_CAN1 BIT(25)
+#define I2C1_USE_CAN0 BIT(24)
+#define NAND3_USE_UART5 BIT(23)
+#define NAND3_USE_UART4 BIT(22)
+#define NAND3_USE_UART1_DAT BIT(21)
+#define NAND3_USE_UART1_CTS BIT(20)
+#define NAND3_USE_PWM23 BIT(19)
+#define NAND3_USE_PWM01 BIT(18)
+#define NAND2_USE_UART5 BIT(17)
+#define NAND2_USE_UART4 BIT(16)
+#define NAND2_USE_UART1_DAT BIT(15)
+#define NAND2_USE_UART1_CTS BIT(14)
+#define NAND2_USE_PWM23 BIT(13)
+#define NAND2_USE_PWM01 BIT(12)
+#define NAND1_USE_UART5 BIT(11)
+#define NAND1_USE_UART4 BIT(10)
+#define NAND1_USE_UART1_DAT BIT(9)
+#define NAND1_USE_UART1_CTS BIT(8)
+#define NAND1_USE_PWM23 BIT(7)
+#define NAND1_USE_PWM01 BIT(6)
+#define GMAC1_USE_UART1 BIT(4)
+#define GMAC1_USE_UART0 BIT(3)
+#define LCD_USE_UART0_DAT BIT(2)
+#define LCD_USE_UART15 BIT(1)
+#define LCD_USE_UART0 BIT(0)
+
+/* MUX CTRL1 Register Bits */
+#define USB_RESET BIT(31)
+#define SPI1_CS_USE_PWM01 BIT(24)
+#define SPI1_USE_CAN BIT(23)
+#define DISABLE_DDR_CONFSPACE BIT(20)
+#define DDR32TO16EN BIT(16)
+#define GMAC1_SHUT BIT(13)
+#define GMAC0_SHUT BIT(12)
+#define USB_SHUT BIT(11)
+#define UART1_3_USE_CAN1 BIT(5)
+#define UART1_2_USE_CAN0 BIT(4)
+#define GMAC1_USE_TXCLK BIT(3)
+#define GMAC0_USE_TXCLK BIT(2)
+#define GMAC1_USE_PWM23 BIT(1)
+#define GMAC0_USE_PWM01 BIT(0)
+
+#elif defined(CONFIG_LOONGSON1_LS1C)
+
+/* SHUT_CTRL Register Bits */
+#define UART_SPLIT GENMASK(31, 30)
+#define OUTPUT_CLK GENMASK(29, 26)
+#define ADC_SHUT BIT(25)
+#define SDIO_SHUT BIT(24)
+#define DMA2_SHUT BIT(23)
+#define DMA1_SHUT BIT(22)
+#define DMA0_SHUT BIT(21)
+#define SPI1_SHUT BIT(20)
+#define SPI0_SHUT BIT(19)
+#define I2C2_SHUT BIT(18)
+#define I2C1_SHUT BIT(17)
+#define I2C0_SHUT BIT(16)
+#define AC97_SHUT BIT(15)
+#define I2S_SHUT BIT(14)
+#define UART3_SHUT BIT(13)
+#define UART2_SHUT BIT(12)
+#define UART1_SHUT BIT(11)
+#define UART0_SHUT BIT(10)
+#define CAN1_SHUT BIT(9)
+#define CAN0_SHUT BIT(8)
+#define ECC_SHUT BIT(7)
+#define GMAC_SHUT BIT(6)
+#define USBHOST_SHUT BIT(5)
+#define USBOTG_SHUT BIT(4)
+#define SDRAM_SHUT BIT(3)
+#define SRAM_SHUT BIT(2)
+#define CAM_SHUT BIT(1)
+#define LCD_SHUT BIT(0)
+
+#define UART_SPLIT_SHIFT 30
+#define OUTPUT_CLK_SHIFT 26
+
+/* MISC_CTRL Register Bits */
+#define USBHOST_RSTN BIT(31)
+#define PHY_INTF_SELI GENMASK(30, 28)
+#define AC97_EN BIT(25)
+#define SDIO_DMA_EN GENMASK(24, 23)
+#define ADC_DMA_EN BIT(22)
+#define SDIO_USE_SPI1 BIT(17)
+#define SDIO_USE_SPI0 BIT(16)
+#define SRAM_CTRL GENMASK(15, 0)
+
+#define PHY_INTF_SELI_SHIFT 28
+#define SDIO_DMA_EN_SHIFT 23
+#define SRAM_CTRL_SHIFT 0
+
+#define LS1X_CBUS_REG(n, x) \
+ ((void __iomem *)KSEG1ADDR(LS1X_CBUS_BASE + (n * 0x04) + (x)))
+
+#define LS1X_CBUS_FIRST(n) LS1X_CBUS_REG(n, 0x00)
+#define LS1X_CBUS_SECOND(n) LS1X_CBUS_REG(n, 0x10)
+#define LS1X_CBUS_THIRD(n) LS1X_CBUS_REG(n, 0x20)
+#define LS1X_CBUS_FOURTHT(n) LS1X_CBUS_REG(n, 0x30)
+#define LS1X_CBUS_FIFTHT(n) LS1X_CBUS_REG(n, 0x40)
+
+#endif
+
+#endif /* __ASM_MACH_LOONGSON32_REGS_MUX_H */
diff --git a/arch/mips/include/asm/mach-loongson32/regs-pwm.h b/arch/mips/include/asm/mach-loongson32/regs-pwm.h
new file mode 100644
index 000000000..ec870c82d
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson32/regs-pwm.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Loongson 1 PWM Register Definitions.
+ */
+
+#ifndef __ASM_MACH_LOONGSON32_REGS_PWM_H
+#define __ASM_MACH_LOONGSON32_REGS_PWM_H
+
+/* Loongson 1 PWM Timer Register Definitions */
+#define PWM_CNT 0x0
+#define PWM_HRC 0x4
+#define PWM_LRC 0x8
+#define PWM_CTRL 0xc
+
+/* PWM Control Register Bits */
+#define CNT_RST BIT(7)
+#define INT_SR BIT(6)
+#define INT_EN BIT(5)
+#define PWM_SINGLE BIT(4)
+#define PWM_OE BIT(3)
+#define CNT_EN BIT(0)
+
+#endif /* __ASM_MACH_LOONGSON32_REGS_PWM_H */
diff --git a/arch/mips/include/asm/mach-loongson32/regs-rtc.h b/arch/mips/include/asm/mach-loongson32/regs-rtc.h
new file mode 100644
index 000000000..a3d096be1
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson32/regs-rtc.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2016 Yang Ling <gnaygnil@gmail.com>
+ *
+ * Loongson 1 RTC timer Register Definitions.
+ */
+
+#ifndef __ASM_MACH_LOONGSON32_REGS_RTC_H
+#define __ASM_MACH_LOONGSON32_REGS_RTC_H
+
+#define LS1X_RTC_REG(x) \
+ ((void __iomem *)KSEG1ADDR(LS1X_RTC_BASE + (x)))
+
+#define LS1X_RTC_CTRL LS1X_RTC_REG(0x40)
+
+#define RTC_EXTCLK_OK (BIT(5) | BIT(8))
+#define RTC_EXTCLK_EN BIT(8)
+
+#endif /* __ASM_MACH_LOONGSON32_REGS_RTC_H */
diff --git a/arch/mips/include/asm/mach-loongson32/regs-wdt.h b/arch/mips/include/asm/mach-loongson32/regs-wdt.h
new file mode 100644
index 000000000..c6d345fe1
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson32/regs-wdt.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Loongson 1 Watchdog Register Definitions.
+ */
+
+#ifndef __ASM_MACH_LOONGSON32_REGS_WDT_H
+#define __ASM_MACH_LOONGSON32_REGS_WDT_H
+
+#define WDT_EN 0x0
+#define WDT_TIMER 0x4
+#define WDT_SET 0x8
+
+#endif /* __ASM_MACH_LOONGSON32_REGS_WDT_H */
diff --git a/arch/mips/include/asm/mach-loongson64/boot_param.h b/arch/mips/include/asm/mach-loongson64/boot_param.h
new file mode 100644
index 000000000..afc92b7a6
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson64/boot_param.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_LOONGSON64_BOOT_PARAM_H_
+#define __ASM_MACH_LOONGSON64_BOOT_PARAM_H_
+
+#include <linux/types.h>
+
+#define SYSTEM_RAM_LOW 1
+#define SYSTEM_RAM_HIGH 2
+#define SYSTEM_RAM_RESERVED 3
+#define PCI_IO 4
+#define PCI_MEM 5
+#define LOONGSON_CFG_REG 6
+#define VIDEO_ROM 7
+#define ADAPTER_ROM 8
+#define ACPI_TABLE 9
+#define SMBIOS_TABLE 10
+#define MAX_MEMORY_TYPE 11
+
+#define LOONGSON3_BOOT_MEM_MAP_MAX 128
+struct efi_memory_map_loongson {
+ u16 vers; /* version of efi_memory_map */
+ u32 nr_map; /* number of memory_maps */
+ u32 mem_freq; /* memory frequence */
+ struct mem_map {
+ u32 node_id; /* node_id which memory attached to */
+ u32 mem_type; /* system memory, pci memory, pci io, etc. */
+ u64 mem_start; /* memory map start address */
+ u32 mem_size; /* each memory_map size, not the total size */
+ } map[LOONGSON3_BOOT_MEM_MAP_MAX];
+} __packed;
+
+enum loongson_cpu_type {
+ Legacy_2E = 0x0,
+ Legacy_2F = 0x1,
+ Legacy_3A = 0x2,
+ Legacy_3B = 0x3,
+ Legacy_1A = 0x4,
+ Legacy_1B = 0x5,
+ Legacy_2G = 0x6,
+ Legacy_2H = 0x7,
+ Loongson_1A = 0x100,
+ Loongson_1B = 0x101,
+ Loongson_2E = 0x200,
+ Loongson_2F = 0x201,
+ Loongson_2G = 0x202,
+ Loongson_2H = 0x203,
+ Loongson_3A = 0x300,
+ Loongson_3B = 0x301
+};
+
+/*
+ * Capability and feature descriptor structure for MIPS CPU
+ */
+struct efi_cpuinfo_loongson {
+ u16 vers; /* version of efi_cpuinfo_loongson */
+ u32 processor_id; /* PRID, e.g. 6305, 6306 */
+ u32 cputype; /* Loongson_3A/3B, etc. */
+ u32 total_node; /* num of total numa nodes */
+ u16 cpu_startup_core_id; /* Boot core id */
+ u16 reserved_cores_mask;
+ u32 cpu_clock_freq; /* cpu_clock */
+ u32 nr_cpus;
+} __packed;
+
+#define MAX_UARTS 64
+struct uart_device {
+ u32 iotype; /* see include/linux/serial_core.h */
+ u32 uartclk;
+ u32 int_offset;
+ u64 uart_base;
+} __packed;
+
+#define MAX_SENSORS 64
+#define SENSOR_TEMPER 0x00000001
+#define SENSOR_VOLTAGE 0x00000002
+#define SENSOR_FAN 0x00000004
+struct sensor_device {
+ char name[32]; /* a formal name */
+ char label[64]; /* a flexible description */
+ u32 type; /* SENSOR_* */
+ u32 id; /* instance id of a sensor-class */
+ u32 fan_policy; /* see loongson_hwmon.h */
+ u32 fan_percent;/* only for constant speed policy */
+ u64 base_addr; /* base address of device registers */
+} __packed;
+
+struct system_loongson {
+ u16 vers; /* version of system_loongson */
+ u32 ccnuma_smp; /* 0: no numa; 1: has numa */
+ u32 sing_double_channel; /* 1:single; 2:double */
+ u32 nr_uarts;
+ struct uart_device uarts[MAX_UARTS];
+ u32 nr_sensors;
+ struct sensor_device sensors[MAX_SENSORS];
+ char has_ec;
+ char ec_name[32];
+ u64 ec_base_addr;
+ char has_tcm;
+ char tcm_name[32];
+ u64 tcm_base_addr;
+ u64 workarounds; /* see workarounds.h */
+} __packed;
+
+struct irq_source_routing_table {
+ u16 vers;
+ u16 size;
+ u16 rtr_bus;
+ u16 rtr_devfn;
+ u32 vendor;
+ u32 device;
+ u32 PIC_type; /* conform use HT or PCI to route to CPU-PIC */
+ u64 ht_int_bit; /* 3A: 1<<24; 3B: 1<<16 */
+ u64 ht_enable; /* irqs used in this PIC */
+ u32 node_id; /* node id: 0x0-0; 0x1-1; 0x10-2; 0x11-3 */
+ u64 pci_mem_start_addr;
+ u64 pci_mem_end_addr;
+ u64 pci_io_start_addr;
+ u64 pci_io_end_addr;
+ u64 pci_config_addr;
+ u32 dma_mask_bits;
+} __packed;
+
+struct interface_info {
+ u16 vers; /* version of the specificition */
+ u16 size;
+ u8 flag;
+ char description[64];
+} __packed;
+
+#define MAX_RESOURCE_NUMBER 128
+struct resource_loongson {
+ u64 start; /* resource start address */
+ u64 end; /* resource end address */
+ char name[64];
+ u32 flags;
+};
+
+struct archdev_data {}; /* arch specific additions */
+
+struct board_devices {
+ char name[64]; /* hold the device name */
+ u32 num_resources; /* number of device_resource */
+ /* for each device's resource */
+ struct resource_loongson resource[MAX_RESOURCE_NUMBER];
+ /* arch specific additions */
+ struct archdev_data archdata;
+};
+
+struct loongson_special_attribute {
+ u16 vers; /* version of this special */
+ char special_name[64]; /* special_atribute_name */
+ u32 loongson_special_type; /* type of special device */
+ /* for each device's resource */
+ struct resource_loongson resource[MAX_RESOURCE_NUMBER];
+};
+
+struct loongson_params {
+ u64 memory_offset; /* efi_memory_map_loongson struct offset */
+ u64 cpu_offset; /* efi_cpuinfo_loongson struct offset */
+ u64 system_offset; /* system_loongson struct offset */
+ u64 irq_offset; /* irq_source_routing_table struct offset */
+ u64 interface_offset; /* interface_info struct offset */
+ u64 special_offset; /* loongson_special_attribute struct offset */
+ u64 boarddev_table_offset; /* board_devices offset */
+};
+
+struct smbios_tables {
+ u16 vers; /* version of smbios */
+ u64 vga_bios; /* vga_bios address */
+ struct loongson_params lp;
+};
+
+struct efi_reset_system_t {
+ u64 ResetCold;
+ u64 ResetWarm;
+ u64 ResetType;
+ u64 Shutdown;
+ u64 DoSuspend; /* NULL if not support */
+};
+
+struct efi_loongson {
+ u64 mps; /* MPS table */
+ u64 acpi; /* ACPI table (IA64 ext 0.71) */
+ u64 acpi20; /* ACPI table (ACPI 2.0) */
+ struct smbios_tables smbios; /* SM BIOS table */
+ u64 sal_systab; /* SAL system table */
+ u64 boot_info; /* boot info table */
+};
+
+struct boot_params {
+ struct efi_loongson efi;
+ struct efi_reset_system_t reset_system;
+};
+
+enum loongson_bridge_type {
+ LS7A = 1,
+ RS780E = 2,
+ VIRTUAL = 3
+};
+
+struct loongson_system_configuration {
+ u32 nr_cpus;
+ u32 nr_nodes;
+ int cores_per_node;
+ int cores_per_package;
+ u16 boot_cpu_id;
+ u16 reserved_cpus_mask;
+ enum loongson_cpu_type cputype;
+ enum loongson_bridge_type bridgetype;
+ u64 ht_control_base;
+ u64 pci_mem_start_addr;
+ u64 pci_mem_end_addr;
+ u64 pci_io_base;
+ u64 restart_addr;
+ u64 poweroff_addr;
+ u64 suspend_addr;
+ u64 vgabios_addr;
+ u32 dma_mask_bits;
+ char ecname[32];
+ u32 nr_uarts;
+ struct uart_device uarts[MAX_UARTS];
+ u32 nr_sensors;
+ struct sensor_device sensors[MAX_SENSORS];
+ u64 workarounds;
+ void (*early_config)(void);
+};
+
+extern struct efi_memory_map_loongson *loongson_memmap;
+extern struct loongson_system_configuration loongson_sysconf;
+
+extern u32 node_id_offset;
+extern void ls7a_early_config(void);
+extern void rs780e_early_config(void);
+extern void virtual_early_config(void);
+
+#endif
diff --git a/arch/mips/include/asm/mach-loongson64/builtin_dtbs.h b/arch/mips/include/asm/mach-loongson64/builtin_dtbs.h
new file mode 100644
index 000000000..839410cda
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson64/builtin_dtbs.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019 Jiaxun Yang <jiaxun.yang@flygoat.com>
+ *
+ * Built-in Generic dtbs for MACH_LOONGSON64
+ */
+
+#ifndef __ASM_MACH_LOONGSON64_BUILTIN_DTBS_H_
+#define __ASM_MACH_LOONGSON64_BUILTIN_DTBS_H_
+
+extern u32 __dtb_loongson64c_4core_ls7a_begin[];
+extern u32 __dtb_loongson64c_4core_rs780e_begin[];
+extern u32 __dtb_loongson64c_8core_rs780e_begin[];
+extern u32 __dtb_loongson64g_4core_ls7a_begin[];
+extern u32 __dtb_loongson64v_4core_virtio_begin[];
+#endif
diff --git a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
new file mode 100644
index 000000000..eb181224e
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
@@ -0,0 +1,51 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009 Wu Zhangjin <wuzhangjin@gmail.com>
+ * Copyright (C) 2009 Philippe Vachon <philippe@cowpig.ca>
+ * Copyright (C) 2009 Zhang Le <r0bertz@gentoo.org>
+ *
+ * reference: /proc/cpuinfo,
+ * arch/mips/kernel/cpu-probe.c(cpu_probe_legacy),
+ * arch/mips/kernel/proc.c(show_cpuinfo),
+ * loongson2f user manual.
+ */
+
+#ifndef __ASM_MACH_LOONGSON64_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_LOONGSON64_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_32fpr 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_4kex 1
+#define cpu_has_64bits 1
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_counter 1
+#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
+#define cpu_has_divec 0
+#define cpu_has_inclusive_pcaches 1
+#define cpu_has_llsc 1
+#define cpu_has_mcheck 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_mips3d 0
+#define cpu_has_mipsmt 0
+#define cpu_has_smartmips 0
+#define cpu_has_tlb 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_vce 0
+#define cpu_has_veic 0
+#define cpu_has_vint 0
+#define cpu_has_vtag_icache 0
+#define cpu_has_wsbh 1
+#define cpu_has_ic_fills_f_dc 1
+#define cpu_hwrena_impl_bits 0xc0000000
+#define cpu_has_mac2008_only 1
+#define cpu_has_mips_r2_exec_hazard 0
+#define cpu_has_perf_cntr_intr_bit 0
+
+#endif /* __ASM_MACH_LOONGSON64_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-loongson64/cpucfg-emul.h b/arch/mips/include/asm/mach-loongson64/cpucfg-emul.h
new file mode 100644
index 000000000..d64af19c2
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson64/cpucfg-emul.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_MACH_LOONGSON64_CPUCFG_EMUL_H_
+#define _ASM_MACH_LOONGSON64_CPUCFG_EMUL_H_
+
+#include <asm/cpu-info.h>
+
+#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
+
+#include <loongson_regs.h>
+
+#define LOONGSON_FPREV_MASK 0x7
+
+void loongson3_cpucfg_synthesize_data(struct cpuinfo_mips *c);
+
+static inline bool loongson3_cpucfg_emulation_enabled(struct cpuinfo_mips *c)
+{
+ /* All supported cores have non-zero LOONGSON_CFG1 data. */
+ return c->loongson3_cpucfg_data[0] != 0;
+}
+
+static inline u32 loongson3_cpucfg_read_synthesized(struct cpuinfo_mips *c,
+ __u64 sel)
+{
+ switch (sel) {
+ case LOONGSON_CFG0:
+ return c->processor_id;
+ case LOONGSON_CFG1:
+ case LOONGSON_CFG2:
+ case LOONGSON_CFG3:
+ return c->loongson3_cpucfg_data[sel - 1];
+ case LOONGSON_CFG4:
+ case LOONGSON_CFG5:
+ /* CPUCFG selects 4 and 5 are related to the input clock
+ * signal.
+ *
+ * Unimplemented for now.
+ */
+ return 0;
+ case LOONGSON_CFG6:
+ /* CPUCFG select 6 is for the undocumented Safe Extension. */
+ return 0;
+ case LOONGSON_CFG7:
+ /* CPUCFG select 7 is for the virtualization extension.
+ * We don't know if the two currently known features are
+ * supported on older cores according to the public
+ * documentation, so leave this at zero.
+ */
+ return 0;
+ }
+
+ /*
+ * Return 0 for unrecognized CPUCFG selects, which is real hardware
+ * behavior observed on Loongson 3A R4.
+ */
+ return 0;
+}
+#else
+static inline void loongson3_cpucfg_synthesize_data(struct cpuinfo_mips *c)
+{
+}
+
+static inline bool loongson3_cpucfg_emulation_enabled(struct cpuinfo_mips *c)
+{
+ return false;
+}
+
+static inline u32 loongson3_cpucfg_read_synthesized(struct cpuinfo_mips *c,
+ __u64 sel)
+{
+ return 0;
+}
+#endif
+
+#endif /* _ASM_MACH_LOONGSON64_CPUCFG_EMUL_H_ */
diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h
new file mode 100644
index 000000000..98ea977cf
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson64/irq.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_LOONGSON64_IRQ_H_
+#define __ASM_MACH_LOONGSON64_IRQ_H_
+
+/* cpu core interrupt numbers */
+#define NR_IRQS_LEGACY 16
+#define NR_MIPS_CPU_IRQS 8
+#define NR_MAX_CHAINED_IRQS 40 /* Chained IRQs means those not directly used by devices */
+#define NR_IRQS (NR_IRQS_LEGACY + NR_MIPS_CPU_IRQS + NR_MAX_CHAINED_IRQS + 256)
+
+#define MIPS_CPU_IRQ_BASE NR_IRQS_LEGACY
+
+#include <asm/mach-generic/irq.h>
+
+#endif /* __ASM_MACH_LOONGSON64_IRQ_H_ */
diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
new file mode 100644
index 000000000..28572ddfb
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
@@ -0,0 +1,86 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Embedded Alley Solutions, Inc
+ * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2009 Jiajie Chen (chenjiajie@cse.buaa.edu.cn)
+ * Copyright (C) 2012 Huacai Chen (chenhc@lemote.com)
+ */
+#ifndef __ASM_MACH_LOONGSON64_KERNEL_ENTRY_H
+#define __ASM_MACH_LOONGSON64_KERNEL_ENTRY_H
+
+#include <asm/cpu.h>
+
+/*
+ * Override macros used in arch/mips/kernel/head.S.
+ */
+ .macro kernel_entry_setup
+ .set push
+ .set mips64
+ /* Set LPA on LOONGSON3 config3 */
+ mfc0 t0, CP0_CONFIG3
+ or t0, (0x1 << 7)
+ mtc0 t0, CP0_CONFIG3
+ /* Set ELPA on LOONGSON3 pagegrain */
+ mfc0 t0, CP0_PAGEGRAIN
+ or t0, (0x1 << 29)
+ mtc0 t0, CP0_PAGEGRAIN
+ /* Enable STFill Buffer */
+ mfc0 t0, CP0_PRID
+ /* Loongson-3A R4+ */
+ andi t1, t0, PRID_IMP_MASK
+ li t2, PRID_IMP_LOONGSON_64G
+ beq t1, t2, 1f
+ nop
+ /* Loongson-3A R2/R3 */
+ andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
+ slti t0, t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0)
+ bnez t0, 2f
+ nop
+1:
+ mfc0 t0, CP0_CONFIG6
+ or t0, 0x100
+ mtc0 t0, CP0_CONFIG6
+2:
+ _ehb
+ .set pop
+ .endm
+
+/*
+ * Do SMP slave processor setup.
+ */
+ .macro smp_slave_setup
+ .set push
+ .set mips64
+ /* Set LPA on LOONGSON3 config3 */
+ mfc0 t0, CP0_CONFIG3
+ or t0, (0x1 << 7)
+ mtc0 t0, CP0_CONFIG3
+ /* Set ELPA on LOONGSON3 pagegrain */
+ mfc0 t0, CP0_PAGEGRAIN
+ or t0, (0x1 << 29)
+ mtc0 t0, CP0_PAGEGRAIN
+ /* Enable STFill Buffer */
+ mfc0 t0, CP0_PRID
+ /* Loongson-3A R4+ */
+ andi t1, t0, PRID_IMP_MASK
+ li t2, PRID_IMP_LOONGSON_64G
+ beq t1, t2, 1f
+ nop
+ /* Loongson-3A R2/R3 */
+ andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
+ slti t0, t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0)
+ bnez t0, 2f
+ nop
+1:
+ mfc0 t0, CP0_CONFIG6
+ or t0, 0x100
+ mtc0 t0, CP0_CONFIG6
+2:
+ _ehb
+ .set pop
+ .endm
+
+#endif /* __ASM_MACH_LOONGSON64_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-loongson64/loongson.h b/arch/mips/include/asm/mach-loongson64/loongson.h
new file mode 100644
index 000000000..fde1b75c4
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson64/loongson.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#ifndef __ASM_MACH_LOONGSON64_LOONGSON_H
+#define __ASM_MACH_LOONGSON64_LOONGSON_H
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <boot_param.h>
+
+
+/* machine-specific reboot/halt operation */
+extern void mach_prepare_reboot(void);
+extern void mach_prepare_shutdown(void);
+
+/* environment arguments from bootloader */
+extern u32 cpu_clock_freq;
+extern u32 memsize, highmemsize;
+extern const struct plat_smp_ops loongson3_smp_ops;
+
+/* loongson-specific command line, env and memory initialization */
+extern void __init prom_init_memory(void);
+extern void __init prom_init_env(void);
+extern void *loongson_fdt_blob;
+
+/* irq operation functions */
+extern void mach_irq_dispatch(unsigned int pending);
+extern int mach_i8259_irq(void);
+
+/* We need this in some places... */
+#define delay() ({ \
+ int x; \
+ for (x = 0; x < 100000; x++) \
+ __asm__ __volatile__(""); \
+})
+
+#define LOONGSON_REG(x) \
+ (*(volatile u32 *)((char *)CKSEG1ADDR(LOONGSON_REG_BASE) + (x)))
+
+#define LOONGSON3_REG8(base, x) \
+ (*(volatile u8 *)((char *)TO_UNCAC(base) + (x)))
+
+#define LOONGSON3_REG32(base, x) \
+ (*(volatile u32 *)((char *)TO_UNCAC(base) + (x)))
+
+#define LOONGSON_FLASH_BASE 0x1c000000
+#define LOONGSON_FLASH_SIZE 0x02000000 /* 32M */
+#define LOONGSON_FLASH_TOP (LOONGSON_FLASH_BASE+LOONGSON_FLASH_SIZE-1)
+
+#define LOONGSON_LIO0_BASE 0x1e000000
+#define LOONGSON_LIO0_SIZE 0x01C00000 /* 28M */
+#define LOONGSON_LIO0_TOP (LOONGSON_LIO0_BASE+LOONGSON_LIO0_SIZE-1)
+
+#define LOONGSON_BOOT_BASE 0x1fc00000
+#define LOONGSON_BOOT_SIZE 0x00100000 /* 1M */
+#define LOONGSON_BOOT_TOP (LOONGSON_BOOT_BASE+LOONGSON_BOOT_SIZE-1)
+#define LOONGSON_REG_BASE 0x1fe00000
+#define LOONGSON_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */
+#define LOONGSON_REG_TOP (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1)
+/* Loongson-3 specific registers */
+#define LOONGSON3_REG_BASE 0x3ff00000
+#define LOONGSON3_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */
+#define LOONGSON3_REG_TOP (LOONGSON3_REG_BASE+LOONGSON3_REG_SIZE-1)
+
+#define LOONGSON_LIO1_BASE 0x1ff00000
+#define LOONGSON_LIO1_SIZE 0x00100000 /* 1M */
+#define LOONGSON_LIO1_TOP (LOONGSON_LIO1_BASE+LOONGSON_LIO1_SIZE-1)
+
+#define LOONGSON_PCILO0_BASE 0x10000000
+#define LOONGSON_PCILO1_BASE 0x14000000
+#define LOONGSON_PCILO2_BASE 0x18000000
+#define LOONGSON_PCILO_BASE LOONGSON_PCILO0_BASE
+#define LOONGSON_PCILO_SIZE 0x0c000000 /* 64M * 3 */
+#define LOONGSON_PCILO_TOP (LOONGSON_PCILO0_BASE+LOONGSON_PCILO_SIZE-1)
+
+#define LOONGSON_PCICFG_BASE 0x1fe80000
+#define LOONGSON_PCICFG_SIZE 0x00000800 /* 2K */
+#define LOONGSON_PCICFG_TOP (LOONGSON_PCICFG_BASE+LOONGSON_PCICFG_SIZE-1)
+
+#define LOONGSON_PCIIO_BASE loongson_sysconf.pci_io_base
+
+#define LOONGSON_PCIIO_SIZE 0x00100000 /* 1M */
+#define LOONGSON_PCIIO_TOP (LOONGSON_PCIIO_BASE+LOONGSON_PCIIO_SIZE-1)
+
+/* Loongson Register Bases */
+
+#define LOONGSON_PCICONFIGBASE 0x00
+#define LOONGSON_REGBASE 0x100
+
+/* PCI Configuration Registers */
+
+#define LOONGSON_PCI_REG(x) LOONGSON_REG(LOONGSON_PCICONFIGBASE + (x))
+#define LOONGSON_PCIDID LOONGSON_PCI_REG(0x00)
+#define LOONGSON_PCICMD LOONGSON_PCI_REG(0x04)
+#define LOONGSON_PCICLASS LOONGSON_PCI_REG(0x08)
+#define LOONGSON_PCILTIMER LOONGSON_PCI_REG(0x0c)
+#define LOONGSON_PCIBASE0 LOONGSON_PCI_REG(0x10)
+#define LOONGSON_PCIBASE1 LOONGSON_PCI_REG(0x14)
+#define LOONGSON_PCIBASE2 LOONGSON_PCI_REG(0x18)
+#define LOONGSON_PCIBASE3 LOONGSON_PCI_REG(0x1c)
+#define LOONGSON_PCIBASE4 LOONGSON_PCI_REG(0x20)
+#define LOONGSON_PCIEXPRBASE LOONGSON_PCI_REG(0x30)
+#define LOONGSON_PCIINT LOONGSON_PCI_REG(0x3c)
+
+#define LOONGSON_PCI_ISR4C LOONGSON_PCI_REG(0x4c)
+
+#define LOONGSON_PCICMD_PERR_CLR 0x80000000
+#define LOONGSON_PCICMD_SERR_CLR 0x40000000
+#define LOONGSON_PCICMD_MABORT_CLR 0x20000000
+#define LOONGSON_PCICMD_MTABORT_CLR 0x10000000
+#define LOONGSON_PCICMD_TABORT_CLR 0x08000000
+#define LOONGSON_PCICMD_MPERR_CLR 0x01000000
+#define LOONGSON_PCICMD_PERRRESPEN 0x00000040
+#define LOONGSON_PCICMD_ASTEPEN 0x00000080
+#define LOONGSON_PCICMD_SERREN 0x00000100
+#define LOONGSON_PCILTIMER_BUSLATENCY 0x0000ff00
+#define LOONGSON_PCILTIMER_BUSLATENCY_SHIFT 8
+
+/* Loongson h/w Configuration */
+
+#define LOONGSON_GENCFG_OFFSET 0x4
+#define LOONGSON_GENCFG LOONGSON_REG(LOONGSON_REGBASE + LOONGSON_GENCFG_OFFSET)
+
+#define LOONGSON_GENCFG_DEBUGMODE 0x00000001
+#define LOONGSON_GENCFG_SNOOPEN 0x00000002
+#define LOONGSON_GENCFG_CPUSELFRESET 0x00000004
+
+#define LOONGSON_GENCFG_FORCE_IRQA 0x00000008
+#define LOONGSON_GENCFG_IRQA_ISOUT 0x00000010
+#define LOONGSON_GENCFG_IRQA_FROM_INT1 0x00000020
+#define LOONGSON_GENCFG_BYTESWAP 0x00000040
+
+#define LOONGSON_GENCFG_UNCACHED 0x00000080
+#define LOONGSON_GENCFG_PREFETCHEN 0x00000100
+#define LOONGSON_GENCFG_WBEHINDEN 0x00000200
+#define LOONGSON_GENCFG_CACHEALG 0x00000c00
+#define LOONGSON_GENCFG_CACHEALG_SHIFT 10
+#define LOONGSON_GENCFG_PCIQUEUE 0x00001000
+#define LOONGSON_GENCFG_CACHESTOP 0x00002000
+#define LOONGSON_GENCFG_MSTRBYTESWAP 0x00004000
+#define LOONGSON_GENCFG_BUSERREN 0x00008000
+#define LOONGSON_GENCFG_NORETRYTIMEOUT 0x00010000
+#define LOONGSON_GENCFG_SHORTCOPYTIMEOUT 0x00020000
+
+/* PCI address map control */
+
+#define LOONGSON_PCIMAP LOONGSON_REG(LOONGSON_REGBASE + 0x10)
+#define LOONGSON_PCIMEMBASECFG LOONGSON_REG(LOONGSON_REGBASE + 0x14)
+#define LOONGSON_PCIMAP_CFG LOONGSON_REG(LOONGSON_REGBASE + 0x18)
+
+/* GPIO Regs - r/w */
+
+#define LOONGSON_GPIODATA LOONGSON_REG(LOONGSON_REGBASE + 0x1c)
+#define LOONGSON_GPIOIE LOONGSON_REG(LOONGSON_REGBASE + 0x20)
+
+/* ICU Configuration Regs - r/w */
+
+#define LOONGSON_INTEDGE LOONGSON_REG(LOONGSON_REGBASE + 0x24)
+#define LOONGSON_INTSTEER LOONGSON_REG(LOONGSON_REGBASE + 0x28)
+#define LOONGSON_INTPOL LOONGSON_REG(LOONGSON_REGBASE + 0x2c)
+
+/* ICU Enable Regs - IntEn & IntISR are r/o. */
+
+#define LOONGSON_INTENSET LOONGSON_REG(LOONGSON_REGBASE + 0x30)
+#define LOONGSON_INTENCLR LOONGSON_REG(LOONGSON_REGBASE + 0x34)
+#define LOONGSON_INTEN LOONGSON_REG(LOONGSON_REGBASE + 0x38)
+#define LOONGSON_INTISR LOONGSON_REG(LOONGSON_REGBASE + 0x3c)
+
+/* ICU */
+#define LOONGSON_ICU_MBOXES 0x0000000f
+#define LOONGSON_ICU_MBOXES_SHIFT 0
+#define LOONGSON_ICU_DMARDY 0x00000010
+#define LOONGSON_ICU_DMAEMPTY 0x00000020
+#define LOONGSON_ICU_COPYRDY 0x00000040
+#define LOONGSON_ICU_COPYEMPTY 0x00000080
+#define LOONGSON_ICU_COPYERR 0x00000100
+#define LOONGSON_ICU_PCIIRQ 0x00000200
+#define LOONGSON_ICU_MASTERERR 0x00000400
+#define LOONGSON_ICU_SYSTEMERR 0x00000800
+#define LOONGSON_ICU_DRAMPERR 0x00001000
+#define LOONGSON_ICU_RETRYERR 0x00002000
+#define LOONGSON_ICU_GPIOS 0x01ff0000
+#define LOONGSON_ICU_GPIOS_SHIFT 16
+#define LOONGSON_ICU_GPINS 0x7e000000
+#define LOONGSON_ICU_GPINS_SHIFT 25
+#define LOONGSON_ICU_MBOX(N) (1<<(LOONGSON_ICU_MBOXES_SHIFT+(N)))
+#define LOONGSON_ICU_GPIO(N) (1<<(LOONGSON_ICU_GPIOS_SHIFT+(N)))
+#define LOONGSON_ICU_GPIN(N) (1<<(LOONGSON_ICU_GPINS_SHIFT+(N)))
+
+/* PCI prefetch window base & mask */
+
+#define LOONGSON_MEM_WIN_BASE_L LOONGSON_REG(LOONGSON_REGBASE + 0x40)
+#define LOONGSON_MEM_WIN_BASE_H LOONGSON_REG(LOONGSON_REGBASE + 0x44)
+#define LOONGSON_MEM_WIN_MASK_L LOONGSON_REG(LOONGSON_REGBASE + 0x48)
+#define LOONGSON_MEM_WIN_MASK_H LOONGSON_REG(LOONGSON_REGBASE + 0x4c)
+
+/* PCI_Hit*_Sel_* */
+
+#define LOONGSON_PCI_HIT0_SEL_L LOONGSON_REG(LOONGSON_REGBASE + 0x50)
+#define LOONGSON_PCI_HIT0_SEL_H LOONGSON_REG(LOONGSON_REGBASE + 0x54)
+#define LOONGSON_PCI_HIT1_SEL_L LOONGSON_REG(LOONGSON_REGBASE + 0x58)
+#define LOONGSON_PCI_HIT1_SEL_H LOONGSON_REG(LOONGSON_REGBASE + 0x5c)
+#define LOONGSON_PCI_HIT2_SEL_L LOONGSON_REG(LOONGSON_REGBASE + 0x60)
+#define LOONGSON_PCI_HIT2_SEL_H LOONGSON_REG(LOONGSON_REGBASE + 0x64)
+
+/* PXArb Config & Status */
+
+#define LOONGSON_PXARB_CFG LOONGSON_REG(LOONGSON_REGBASE + 0x68)
+#define LOONGSON_PXARB_STATUS LOONGSON_REG(LOONGSON_REGBASE + 0x6c)
+
+#define MAX_PACKAGES 4
+
+/* Chip Config registor of each physical cpu package, PRid >= Loongson-2F */
+extern u64 loongson_chipcfg[MAX_PACKAGES];
+#define LOONGSON_CHIPCFG(id) (*(volatile u32 *)(loongson_chipcfg[id]))
+
+/* Chip Temperature registor of each physical cpu package, PRid >= Loongson-3A */
+extern u64 loongson_chiptemp[MAX_PACKAGES];
+#define LOONGSON_CHIPTEMP(id) (*(volatile u32 *)(loongson_chiptemp[id]))
+
+/* Freq Control register of each physical cpu package, PRid >= Loongson-3B */
+extern u64 loongson_freqctrl[MAX_PACKAGES];
+#define LOONGSON_FREQCTRL(id) (*(volatile u32 *)(loongson_freqctrl[id]))
+
+/* pcimap */
+
+#define LOONGSON_PCIMAP_PCIMAP_LO0 0x0000003f
+#define LOONGSON_PCIMAP_PCIMAP_LO0_SHIFT 0
+#define LOONGSON_PCIMAP_PCIMAP_LO1 0x00000fc0
+#define LOONGSON_PCIMAP_PCIMAP_LO1_SHIFT 6
+#define LOONGSON_PCIMAP_PCIMAP_LO2 0x0003f000
+#define LOONGSON_PCIMAP_PCIMAP_LO2_SHIFT 12
+#define LOONGSON_PCIMAP_PCIMAP_2 0x00040000
+#define LOONGSON_PCIMAP_WIN(WIN, ADDR) \
+ ((((ADDR)>>26) & LOONGSON_PCIMAP_PCIMAP_LO0) << ((WIN)*6))
+
+#endif /* __ASM_MACH_LOONGSON64_LOONGSON_H */
diff --git a/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h b/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h
new file mode 100644
index 000000000..545f91f2a
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LOONGSON_HWMON_H_
+#define __LOONGSON_HWMON_H_
+
+#include <linux/types.h>
+
+#define MIN_TEMP 0
+#define MAX_TEMP 255
+#define NOT_VALID_TEMP 999
+
+typedef int (*get_temp_fun)(int);
+extern int loongson3_cpu_temp(int);
+
+/* 0:Max speed, 1:Manual, 2:Auto */
+enum fan_control_mode {
+ FAN_FULL_MODE = 0,
+ FAN_MANUAL_MODE = 1,
+ FAN_AUTO_MODE = 2,
+ FAN_MODE_END
+};
+
+struct temp_range {
+ u8 low;
+ u8 high;
+ u8 level;
+};
+
+#define CONSTANT_SPEED_POLICY 0 /* at constant speed */
+#define STEP_SPEED_POLICY 1 /* use up/down arrays to describe policy */
+#define KERNEL_HELPER_POLICY 2 /* kernel as a helper to fan control */
+
+#define MAX_STEP_NUM 16
+#define MAX_FAN_LEVEL 255
+
+/* loongson_fan_policy works when fan work at FAN_AUTO_MODE */
+struct loongson_fan_policy {
+ u8 type;
+
+ /* percent only used when type is CONSTANT_SPEED_POLICY */
+ u8 percent;
+
+ /* period between two check. (Unit: S) */
+ u8 adjust_period;
+
+ /* fan adjust usually depend on a temprature input */
+ get_temp_fun depend_temp;
+
+ /* up_step/down_step used when type is STEP_SPEED_POLICY */
+ u8 up_step_num;
+ u8 down_step_num;
+ struct temp_range up_step[MAX_STEP_NUM];
+ struct temp_range down_step[MAX_STEP_NUM];
+ struct delayed_work work;
+};
+
+#endif /* __LOONGSON_HWMON_H_*/
diff --git a/arch/mips/include/asm/mach-loongson64/loongson_regs.h b/arch/mips/include/asm/mach-loongson64/loongson_regs.h
new file mode 100644
index 000000000..83dbb9fdf
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson64/loongson_regs.h
@@ -0,0 +1,246 @@
+/*
+ * Read/Write Loongson Extension Registers
+ */
+
+#ifndef _LOONGSON_REGS_H_
+#define _LOONGSON_REGS_H_
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+#include <asm/mipsregs.h>
+#include <asm/cpu.h>
+
+static inline bool cpu_has_cfg(void)
+{
+ return ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G);
+}
+
+static inline u32 read_cpucfg(u32 reg)
+{
+ u32 __res;
+
+ __asm__ __volatile__(
+ "parse_r __res,%0\n\t"
+ "parse_r reg,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8080118 | (reg << 21) | (__res << 11))\n\t"
+ :"=r"(__res)
+ :"r"(reg)
+ :
+ );
+ return __res;
+}
+
+/* Bit Domains for CFG registers */
+#define LOONGSON_CFG0 0x0
+#define LOONGSON_CFG0_PRID GENMASK(31, 0)
+
+#define LOONGSON_CFG1 0x1
+#define LOONGSON_CFG1_FP BIT(0)
+#define LOONGSON_CFG1_FPREV GENMASK(3, 1)
+#define LOONGSON_CFG1_MMI BIT(4)
+#define LOONGSON_CFG1_MSA1 BIT(5)
+#define LOONGSON_CFG1_MSA2 BIT(6)
+#define LOONGSON_CFG1_CGP BIT(7)
+#define LOONGSON_CFG1_WRP BIT(8)
+#define LOONGSON_CFG1_LSX1 BIT(9)
+#define LOONGSON_CFG1_LSX2 BIT(10)
+#define LOONGSON_CFG1_LASX BIT(11)
+#define LOONGSON_CFG1_R6FXP BIT(12)
+#define LOONGSON_CFG1_R6CRCP BIT(13)
+#define LOONGSON_CFG1_R6FPP BIT(14)
+#define LOONGSON_CFG1_CNT64 BIT(15)
+#define LOONGSON_CFG1_LSLDR0 BIT(16)
+#define LOONGSON_CFG1_LSPREF BIT(17)
+#define LOONGSON_CFG1_LSPREFX BIT(18)
+#define LOONGSON_CFG1_LSSYNCI BIT(19)
+#define LOONGSON_CFG1_LSUCA BIT(20)
+#define LOONGSON_CFG1_LLSYNC BIT(21)
+#define LOONGSON_CFG1_TGTSYNC BIT(22)
+#define LOONGSON_CFG1_LLEXC BIT(23)
+#define LOONGSON_CFG1_SCRAND BIT(24)
+#define LOONGSON_CFG1_MUALP BIT(25)
+#define LOONGSON_CFG1_KMUALEN BIT(26)
+#define LOONGSON_CFG1_ITLBT BIT(27)
+#define LOONGSON_CFG1_LSUPERF BIT(28)
+#define LOONGSON_CFG1_SFBP BIT(29)
+#define LOONGSON_CFG1_CDMAP BIT(30)
+
+#define LOONGSON_CFG1_FPREV_OFFSET 1
+
+#define LOONGSON_CFG2 0x2
+#define LOONGSON_CFG2_LEXT1 BIT(0)
+#define LOONGSON_CFG2_LEXT2 BIT(1)
+#define LOONGSON_CFG2_LEXT3 BIT(2)
+#define LOONGSON_CFG2_LSPW BIT(3)
+#define LOONGSON_CFG2_LBT1 BIT(4)
+#define LOONGSON_CFG2_LBT2 BIT(5)
+#define LOONGSON_CFG2_LBT3 BIT(6)
+#define LOONGSON_CFG2_LBTMMU BIT(7)
+#define LOONGSON_CFG2_LPMP BIT(8)
+#define LOONGSON_CFG2_LPMREV GENMASK(11, 9)
+#define LOONGSON_CFG2_LAMO BIT(12)
+#define LOONGSON_CFG2_LPIXU BIT(13)
+#define LOONGSON_CFG2_LPIXNU BIT(14)
+#define LOONGSON_CFG2_LVZP BIT(15)
+#define LOONGSON_CFG2_LVZREV GENMASK(18, 16)
+#define LOONGSON_CFG2_LGFTP BIT(19)
+#define LOONGSON_CFG2_LGFTPREV GENMASK(22, 20)
+#define LOONGSON_CFG2_LLFTP BIT(23)
+#define LOONGSON_CFG2_LLFTPREV GENMASK(26, 24)
+#define LOONGSON_CFG2_LCSRP BIT(27)
+#define LOONGSON_CFG2_LDISBLIKELY BIT(28)
+
+#define LOONGSON_CFG2_LPMREV_OFFSET 9
+#define LOONGSON_CFG2_LPM_REV1 (1 << LOONGSON_CFG2_LPMREV_OFFSET)
+#define LOONGSON_CFG2_LPM_REV2 (2 << LOONGSON_CFG2_LPMREV_OFFSET)
+#define LOONGSON_CFG2_LVZREV_OFFSET 16
+#define LOONGSON_CFG2_LVZ_REV1 (1 << LOONGSON_CFG2_LVZREV_OFFSET)
+#define LOONGSON_CFG2_LVZ_REV2 (2 << LOONGSON_CFG2_LVZREV_OFFSET)
+
+#define LOONGSON_CFG3 0x3
+#define LOONGSON_CFG3_LCAMP BIT(0)
+#define LOONGSON_CFG3_LCAMREV GENMASK(3, 1)
+#define LOONGSON_CFG3_LCAMNUM GENMASK(11, 4)
+#define LOONGSON_CFG3_LCAMKW GENMASK(19, 12)
+#define LOONGSON_CFG3_LCAMVW GENMASK(27, 20)
+
+#define LOONGSON_CFG3_LCAMREV_OFFSET 1
+#define LOONGSON_CFG3_LCAM_REV1 (1 << LOONGSON_CFG3_LCAMREV_OFFSET)
+#define LOONGSON_CFG3_LCAM_REV2 (2 << LOONGSON_CFG3_LCAMREV_OFFSET)
+#define LOONGSON_CFG3_LCAMNUM_OFFSET 4
+#define LOONGSON_CFG3_LCAMNUM_REV1 (0x3f << LOONGSON_CFG3_LCAMNUM_OFFSET)
+#define LOONGSON_CFG3_LCAMKW_OFFSET 12
+#define LOONGSON_CFG3_LCAMKW_REV1 (0x27 << LOONGSON_CFG3_LCAMKW_OFFSET)
+#define LOONGSON_CFG3_LCAMVW_OFFSET 20
+#define LOONGSON_CFG3_LCAMVW_REV1 (0x3f << LOONGSON_CFG3_LCAMVW_OFFSET)
+
+#define LOONGSON_CFG4 0x4
+#define LOONGSON_CFG4_CCFREQ GENMASK(31, 0)
+
+#define LOONGSON_CFG5 0x5
+#define LOONGSON_CFG5_CFM GENMASK(15, 0)
+#define LOONGSON_CFG5_CFD GENMASK(31, 16)
+
+#define LOONGSON_CFG6 0x6
+
+#define LOONGSON_CFG7 0x7
+#define LOONGSON_CFG7_GCCAEQRP BIT(0)
+#define LOONGSON_CFG7_UCAWINP BIT(1)
+
+static inline bool cpu_has_csr(void)
+{
+ if (cpu_has_cfg())
+ return (read_cpucfg(LOONGSON_CFG2) & LOONGSON_CFG2_LCSRP);
+
+ return false;
+}
+
+static inline u32 csr_readl(u32 reg)
+{
+ u32 __res;
+
+ /* RDCSR reg, val */
+ __asm__ __volatile__(
+ "parse_r __res,%0\n\t"
+ "parse_r reg,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8000118 | (reg << 21) | (__res << 11))\n\t"
+ :"=r"(__res)
+ :"r"(reg)
+ :
+ );
+ return __res;
+}
+
+static inline u64 csr_readq(u32 reg)
+{
+ u64 __res;
+
+ /* DRDCSR reg, val */
+ __asm__ __volatile__(
+ "parse_r __res,%0\n\t"
+ "parse_r reg,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8020118 | (reg << 21) | (__res << 11))\n\t"
+ :"=r"(__res)
+ :"r"(reg)
+ :
+ );
+ return __res;
+}
+
+static inline void csr_writel(u32 val, u32 reg)
+{
+ /* WRCSR reg, val */
+ __asm__ __volatile__(
+ "parse_r reg,%0\n\t"
+ "parse_r val,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8010118 | (reg << 21) | (val << 11))\n\t"
+ :
+ :"r"(reg),"r"(val)
+ :
+ );
+}
+
+static inline void csr_writeq(u64 val, u32 reg)
+{
+ /* DWRCSR reg, val */
+ __asm__ __volatile__(
+ "parse_r reg,%0\n\t"
+ "parse_r val,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8030118 | (reg << 21) | (val << 11))\n\t"
+ :
+ :"r"(reg),"r"(val)
+ :
+ );
+}
+
+/* Public CSR Register can also be accessed with regular addresses */
+#define CSR_PUBLIC_MMIO_BASE 0x1fe00000
+
+#define MMIO_CSR(x) (void *)TO_UNCAC(CSR_PUBLIC_MMIO_BASE + x)
+
+#define LOONGSON_CSR_FEATURES 0x8
+#define LOONGSON_CSRF_TEMP BIT(0)
+#define LOONGSON_CSRF_NODECNT BIT(1)
+#define LOONGSON_CSRF_MSI BIT(2)
+#define LOONGSON_CSRF_EXTIOI BIT(3)
+#define LOONGSON_CSRF_IPI BIT(4)
+#define LOONGSON_CSRF_FREQ BIT(5)
+
+#define LOONGSON_CSR_VENDOR 0x10 /* Vendor name string, should be "Loongson" */
+#define LOONGSON_CSR_CPUNAME 0x20 /* Processor name string */
+#define LOONGSON_CSR_NODECNT 0x408
+#define LOONGSON_CSR_CPUTEMP 0x428
+
+/* PerCore CSR, only accessable by local cores */
+#define LOONGSON_CSR_IPI_STATUS 0x1000
+#define LOONGSON_CSR_IPI_EN 0x1004
+#define LOONGSON_CSR_IPI_SET 0x1008
+#define LOONGSON_CSR_IPI_CLEAR 0x100c
+#define LOONGSON_CSR_IPI_SEND 0x1040
+#define CSR_IPI_SEND_IP_SHIFT 0
+#define CSR_IPI_SEND_CPU_SHIFT 16
+#define CSR_IPI_SEND_BLOCK BIT(31)
+
+static inline u64 drdtime(void)
+{
+ int rID = 0;
+ u64 val = 0;
+
+ __asm__ __volatile__(
+ "parse_r rID,%0\n\t"
+ "parse_r val,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8090118 | (rID << 21) | (val << 11))\n\t"
+ :"=r"(rID),"=r"(val)
+ :
+ );
+ return val;
+}
+
+#endif
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
new file mode 100644
index 000000000..ebb1deaa7
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2010 Loongson Inc. & Lemote Inc. &
+ * Institute of Computing Technology
+ * Author: Xiang Gao, gaoxiang@ict.ac.cn
+ * Huacai Chen, chenhc@lemote.com
+ * Xiaofu Meng, Shuangshuang Zhang
+ */
+#ifndef _ASM_MACH_LOONGSON64_MMZONE_H
+#define _ASM_MACH_LOONGSON64_MMZONE_H
+
+#define NODE_ADDRSPACE_SHIFT 44
+
+#define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
+#define nid_to_addrbase(nid) ((unsigned long)(nid) << NODE_ADDRSPACE_SHIFT)
+
+extern struct pglist_data *__node_data[];
+
+#define NODE_DATA(n) (__node_data[n])
+
+extern void setup_zero_pages(void);
+extern void __init prom_init_numa_memory(void);
+
+#endif /* _ASM_MACH_MMZONE_H */
diff --git a/arch/mips/include/asm/mach-loongson64/pci.h b/arch/mips/include/asm/mach-loongson64/pci.h
new file mode 100644
index 000000000..8b59d64a2
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson64/pci.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2008 Zhang Le <r0bertz@gentoo.org>
+ * Copyright (c) 2009 Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#ifndef __ASM_MACH_LOONGSON64_PCI_H_
+#define __ASM_MACH_LOONGSON64_PCI_H_
+
+extern struct pci_ops loongson_pci_ops;
+
+/* this is an offset from mips_io_port_base */
+#define LOONGSON_PCI_IO_START 0x00004000UL
+
+#define LOONGSON_PCI_MEM_START 0x40000000UL
+#define LOONGSON_PCI_MEM_END 0x7effffffUL
+
+
+#endif /* !__ASM_MACH_LOONGSON64_PCI_H_ */
diff --git a/arch/mips/include/asm/mach-loongson64/spaces.h b/arch/mips/include/asm/mach-loongson64/spaces.h
new file mode 100644
index 000000000..ce04e998a
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson64/spaces.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_LOONGSON64_SPACES_H_
+#define __ASM_MACH_LOONGSON64_SPACES_H_
+
+#if defined(CONFIG_64BIT)
+#define CAC_BASE _AC(0x9800000000000000, UL)
+#endif /* CONFIG_64BIT */
+
+/* Skip 128k to trap NULL pointer dereferences */
+#define PCI_IOBASE _AC(0xc000000000000000 + SZ_128K, UL)
+#define PCI_IOSIZE SZ_16M
+#define MAP_BASE (PCI_IOBASE + PCI_IOSIZE)
+
+#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
+
+#include <asm/mach-generic/spaces.h>
+#endif
diff --git a/arch/mips/include/asm/mach-loongson64/topology.h b/arch/mips/include/asm/mach-loongson64/topology.h
new file mode 100644
index 000000000..3414a1fd1
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson64/topology.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_MACH_TOPOLOGY_H
+#define _ASM_MACH_TOPOLOGY_H
+
+#ifdef CONFIG_NUMA
+
+#define cpu_to_node(cpu) (cpu_logical_map(cpu) >> 2)
+
+extern cpumask_t __node_cpumask[];
+#define cpumask_of_node(node) (&__node_cpumask[node])
+
+struct pci_bus;
+extern int pcibus_to_node(struct pci_bus *);
+
+#define cpumask_of_pcibus(bus) (cpu_online_mask)
+
+extern unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
+
+#define node_distance(from, to) (__node_distances[(from)][(to)])
+
+#endif
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_MACH_TOPOLOGY_H */
diff --git a/arch/mips/include/asm/mach-loongson64/workarounds.h b/arch/mips/include/asm/mach-loongson64/workarounds.h
new file mode 100644
index 000000000..17b71172a
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson64/workarounds.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_LOONGSON64_WORKAROUNDS_H_
+#define __ASM_MACH_LOONGSON64_WORKAROUNDS_H_
+
+#define WORKAROUND_CPUFREQ 0x00000001
+#define WORKAROUND_CPUHOTPLUG 0x00000002
+
+#endif
diff --git a/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h b/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h
new file mode 100644
index 000000000..de3b66a37
--- /dev/null
+++ b/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h
@@ -0,0 +1,70 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 2004 Chris Dearman
+ * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_MACH_MIPS_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_MIPS_CPU_FEATURE_OVERRIDES_H
+
+
+/*
+ * CPU feature overrides for MIPS boards
+ */
+#ifdef CONFIG_CPU_MIPS32
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_4k_cache 1
+/* #define cpu_has_fpu ? */
+/* #define cpu_has_32fpr ? */
+#define cpu_has_counter 1
+/* #define cpu_has_watch ? */
+#define cpu_has_divec 1
+#define cpu_has_vce 0
+/* #define cpu_has_cache_cdex_p ? */
+/* #define cpu_has_cache_cdex_s ? */
+/* #define cpu_has_prefetch ? */
+#define cpu_has_mcheck 1
+/* #define cpu_has_ejtag ? */
+#define cpu_has_llsc 1
+/* #define cpu_has_vtag_icache ? */
+/* #define cpu_has_dc_aliases ? */
+/* #define cpu_has_ic_fills_f_dc ? */
+#define cpu_has_clo_clz 1
+#define cpu_has_nofpuex 0
+/* #define cpu_has_64bits ? */
+/* #define cpu_has_64bit_zero_reg ? */
+/* #define cpu_has_inclusive_pcaches ? */
+#define cpu_icache_snoops_remote_store 1
+#endif
+
+#ifdef CONFIG_CPU_MIPS64
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_4k_cache 1
+/* #define cpu_has_fpu ? */
+/* #define cpu_has_32fpr ? */
+#define cpu_has_counter 1
+/* #define cpu_has_watch ? */
+#define cpu_has_divec 1
+#define cpu_has_vce 0
+/* #define cpu_has_cache_cdex_p ? */
+/* #define cpu_has_cache_cdex_s ? */
+/* #define cpu_has_prefetch ? */
+#define cpu_has_mcheck 1
+/* #define cpu_has_ejtag ? */
+#define cpu_has_llsc 1
+/* #define cpu_has_vtag_icache ? */
+/* #define cpu_has_dc_aliases ? */
+/* #define cpu_has_ic_fills_f_dc ? */
+#define cpu_has_clo_clz 1
+#define cpu_has_nofpuex 0
+/* #define cpu_has_64bits ? */
+/* #define cpu_has_64bit_zero_reg ? */
+/* #define cpu_has_inclusive_pcaches ? */
+#define cpu_icache_snoops_remote_store 1
+#endif
+
+#endif /* __ASM_MACH_MIPS_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-malta/irq.h b/arch/mips/include/asm/mach-malta/irq.h
new file mode 100644
index 000000000..e1bd4298b
--- /dev/null
+++ b/arch/mips/include/asm/mach-malta/irq.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_MIPS_IRQ_H
+#define __ASM_MACH_MIPS_IRQ_H
+
+
+#define NR_IRQS 256
+
+#include <asm/mach-generic/irq.h>
+
+#endif /* __ASM_MACH_MIPS_IRQ_H */
diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
new file mode 100644
index 000000000..ab03eb3fa
--- /dev/null
+++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
@@ -0,0 +1,145 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Chris Dearman (chris@mips.com)
+ * Copyright (C) 2007 Mips Technologies, Inc.
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ */
+#ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
+#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
+
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+
+ /*
+ * Prepare segments for EVA boot:
+ *
+ * This is in case the processor boots in legacy configuration
+ * (SI_EVAReset is de-asserted and CONFIG5.K == 0)
+ *
+ * ========================= Mappings =============================
+ * Virtual memory Physical memory Mapping
+ * 0x00000000 - 0x7fffffff 0x80000000 - 0xfffffffff MUSUK (kuseg)
+ * Flat 2GB physical memory
+ *
+ * 0x80000000 - 0x9fffffff 0x00000000 - 0x1ffffffff MUSUK (kseg0)
+ * 0xa0000000 - 0xbf000000 0x00000000 - 0x1ffffffff MUSUK (kseg1)
+ * 0xc0000000 - 0xdfffffff - MK (kseg2)
+ * 0xe0000000 - 0xffffffff - MK (kseg3)
+ *
+ *
+ * Lowmem is expanded to 2GB
+ *
+ * The following code uses the t0, t1, t2 and ra registers without
+ * previously preserving them.
+ *
+ */
+ .macro platform_eva_init
+
+ .set push
+ .set reorder
+ /*
+ * Get Config.K0 value and use it to program
+ * the segmentation registers
+ */
+ mfc0 t1, CP0_CONFIG
+ andi t1, 0x7 /* CCA */
+ move t2, t1
+ ins t2, t1, 16, 3
+ /* SegCtl0 */
+ li t0, ((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+ or t0, t2
+ mtc0 t0, CP0_SEGCTL0
+
+ /* SegCtl1 */
+ li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (2 << MIPS_SEGCFG_C_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+ ins t0, t1, 16, 3
+ mtc0 t0, CP0_SEGCTL1
+
+ /* SegCtl2 */
+ li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (6 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (4 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+ or t0, t2
+ mtc0 t0, CP0_SEGCTL2
+
+ jal mips_ihb
+ mfc0 t0, $16, 5
+ li t2, 0x40000000 /* K bit */
+ or t0, t0, t2
+ mtc0 t0, $16, 5
+ sync
+ jal mips_ihb
+
+ .set pop
+ .endm
+
+ .macro kernel_entry_setup
+
+#ifdef CONFIG_EVA
+ sync
+ ehb
+
+ mfc0 t1, CP0_CONFIG
+ bgez t1, 9f
+ mfc0 t0, CP0_CONFIG, 1
+ bgez t0, 9f
+ mfc0 t0, CP0_CONFIG, 2
+ bgez t0, 9f
+ mfc0 t0, CP0_CONFIG, 3
+ sll t0, t0, 6 /* SC bit */
+ bgez t0, 9f
+
+ platform_eva_init
+ b 0f
+9:
+ /* Assume we came from YAMON... */
+ PTR_LA v0, 0x9fc00534 /* YAMON print */
+ lw v0, (v0)
+ move a0, zero
+ PTR_LA a1, nonsc_processor
+ jal v0
+
+ PTR_LA v0, 0x9fc00520 /* YAMON exit */
+ lw v0, (v0)
+ li a0, 1
+ jal v0
+
+1: b 1b
+ nop
+ __INITDATA
+nonsc_processor:
+ .asciz "EVA kernel requires a MIPS core with Segment Control implemented\n"
+ __FINIT
+#endif /* CONFIG_EVA */
+0:
+ .endm
+
+/*
+ * Do SMP slave processor setup necessary before we can safely execute C code.
+ */
+ .macro smp_slave_setup
+#ifdef CONFIG_EVA
+ sync
+ ehb
+ platform_eva_init
+#endif
+ .endm
+
+#endif /* __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H */
diff --git a/arch/mips/include/asm/mach-malta/mach-gt64120.h b/arch/mips/include/asm/mach-malta/mach-gt64120.h
new file mode 100644
index 000000000..b9dee7c3e
--- /dev/null
+++ b/arch/mips/include/asm/mach-malta/mach-gt64120.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This is a direct copy of the ev96100.h file, with a global
+ * search and replace. The numbers are the same.
+ *
+ * The reason I'm duplicating this is so that the 64120/96100
+ * defines won't be confusing in the source code.
+ */
+#ifndef _ASM_MACH_MIPS_MACH_GT64120_DEP_H
+#define _ASM_MACH_MIPS_MACH_GT64120_DEP_H
+
+#define MIPS_GT_BASE 0x1be00000
+
+extern unsigned long _pcictrl_gt64120;
+/*
+ * GT64120 config space base address
+ */
+#define GT64120_BASE _pcictrl_gt64120
+
+#endif /* _ASM_MACH_MIPS_MACH_GT64120_DEP_H */
diff --git a/arch/mips/include/asm/mach-malta/mc146818rtc.h b/arch/mips/include/asm/mach-malta/mc146818rtc.h
new file mode 100644
index 000000000..e8cc7fdf7
--- /dev/null
+++ b/arch/mips/include/asm/mach-malta/mc146818rtc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2003 by Ralf Baechle
+ *
+ * RTC routines for Malta style attached PIIX4 device, which contains a
+ * Motorola MC146818A-compatible Real Time Clock.
+ */
+#ifndef __ASM_MACH_MALTA_MC146818RTC_H
+#define __ASM_MACH_MALTA_MC146818RTC_H
+
+#include <asm/io.h>
+#include <asm/mips-boards/generic.h>
+#include <asm/mips-boards/malta.h>
+
+#define RTC_PORT(x) (0x70 + (x))
+#define RTC_IRQ 8
+
+static inline unsigned char CMOS_READ(unsigned long addr)
+{
+ outb(addr, MALTA_RTC_ADR_REG);
+ return inb(MALTA_RTC_DAT_REG);
+}
+
+static inline void CMOS_WRITE(unsigned char data, unsigned long addr)
+{
+ outb(addr, MALTA_RTC_ADR_REG);
+ outb(data, MALTA_RTC_DAT_REG);
+}
+
+#define RTC_ALWAYS_BCD 0
+
+#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1900)
+
+#endif /* __ASM_MACH_MALTA_MC146818RTC_H */
diff --git a/arch/mips/include/asm/mach-malta/spaces.h b/arch/mips/include/asm/mach-malta/spaces.h
new file mode 100644
index 000000000..d7e54971e
--- /dev/null
+++ b/arch/mips/include/asm/mach-malta/spaces.h
@@ -0,0 +1,46 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ */
+
+#ifndef _ASM_MALTA_SPACES_H
+#define _ASM_MALTA_SPACES_H
+
+#ifdef CONFIG_EVA
+
+/*
+ * Traditional Malta Board Memory Map for EVA
+ *
+ * 0x00000000 - 0x0fffffff: 1st RAM region, 256MB
+ * 0x10000000 - 0x1bffffff: GIC and CPC Control Registers
+ * 0x1c000000 - 0x1fffffff: I/O And Flash
+ * 0x20000000 - 0x7fffffff: 2nd RAM region, 1.5GB
+ * 0x80000000 - 0xffffffff: Physical memory aliases to 0x0 (2GB)
+ *
+ * The kernel is still located in 0x80000000(kseg0). However,
+ * the physical mask has been shifted to 0x80000000 which exploits the alias
+ * on the Malta board. As a result of which, we override the __pa_symbol
+ * to peform direct mapping from virtual to physical addresses. In other
+ * words, the 0x80000000 virtual address maps to 0x80000000 physical address
+ * which in turn aliases to 0x0. We do this in order to be able to use a flat
+ * 2GB of memory (0x80000000 - 0xffffffff) so we can avoid the I/O hole in
+ * 0x10000000 - 0x1fffffff.
+ * The last 64KB of physical memory are reserved for correct HIGHMEM
+ * macros arithmetics.
+ *
+ */
+
+#define PAGE_OFFSET _AC(0x0, UL)
+#define PHYS_OFFSET _AC(0x80000000, UL)
+#define HIGHMEM_START _AC(0xffff0000, UL)
+
+#define __pa_symbol(x) (RELOC_HIDE((unsigned long)(x), 0))
+
+#endif /* CONFIG_EVA */
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* _ASM_MALTA_SPACES_H */
diff --git a/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h b/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h
new file mode 100644
index 000000000..0c29ff820
--- /dev/null
+++ b/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h
@@ -0,0 +1,57 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 Netlogic Microsystems
+ * Copyright (C) 2003 Ralf Baechle
+ */
+#ifndef __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_4kex 1
+#define cpu_has_4k_cache 1
+#define cpu_has_watch 1
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_counter 1
+#define cpu_has_divec 1
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_prefetch 1
+#define cpu_has_mcheck 1
+#define cpu_has_ejtag 1
+
+#define cpu_has_llsc 1
+#define cpu_has_vtag_icache 0
+#define cpu_has_ic_fills_f_dc 1
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_mipsmt 0
+#define cpu_icache_snoops_remote_store 1
+
+#define cpu_has_64bits 1
+
+#define cpu_has_mips32r1 1
+#define cpu_has_mips64r1 1
+
+#define cpu_has_inclusive_pcaches 0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+
+#if defined(CONFIG_CPU_XLR)
+#define cpu_has_userlocal 0
+#define cpu_has_dc_aliases 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r2 0
+#elif defined(CONFIG_CPU_XLP)
+#define cpu_has_userlocal 1
+#define cpu_has_mips32r2 1
+#define cpu_has_mips64r2 1
+#else
+#error "Unknown Netlogic CPU"
+#endif
+
+#endif /* __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-netlogic/irq.h b/arch/mips/include/asm/mach-netlogic/irq.h
new file mode 100644
index 000000000..c0dbd530c
--- /dev/null
+++ b/arch/mips/include/asm/mach-netlogic/irq.h
@@ -0,0 +1,17 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 Netlogic Microsystems.
+ */
+#ifndef __ASM_NETLOGIC_IRQ_H
+#define __ASM_NETLOGIC_IRQ_H
+
+#include <asm/mach-netlogic/multi-node.h>
+#define NLM_IRQS_PER_NODE 1024
+#define NR_IRQS (NLM_IRQS_PER_NODE * NLM_NR_NODES)
+
+#define MIPS_CPU_IRQ_BASE 0
+
+#endif /* __ASM_NETLOGIC_IRQ_H */
diff --git a/arch/mips/include/asm/mach-netlogic/multi-node.h b/arch/mips/include/asm/mach-netlogic/multi-node.h
new file mode 100644
index 000000000..8bdf47e29
--- /dev/null
+++ b/arch/mips/include/asm/mach-netlogic/multi-node.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NETLOGIC_MULTI_NODE_H_
+#define _NETLOGIC_MULTI_NODE_H_
+
+#ifndef CONFIG_NLM_MULTINODE
+#define NLM_NR_NODES 1
+#else
+#if defined(CONFIG_NLM_MULTINODE_2)
+#define NLM_NR_NODES 2
+#elif defined(CONFIG_NLM_MULTINODE_4)
+#define NLM_NR_NODES 4
+#else
+#define NLM_NR_NODES 1
+#endif
+#endif
+
+#define NLM_THREADS_PER_CORE 4
+
+struct nlm_soc_info {
+ unsigned long coremask; /* cores enabled on the soc */
+ unsigned long ebase; /* not used now */
+ uint64_t irqmask; /* EIMR for the node */
+ uint64_t sysbase; /* only for XLP - sys block base */
+ uint64_t picbase; /* PIC block base */
+ spinlock_t piclock; /* lock for PIC access */
+ cpumask_t cpumask; /* logical cpu mask for node */
+ unsigned int socbus;
+};
+
+extern struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
+#define nlm_get_node(i) (&nlm_nodes[i])
+#define nlm_node_present(n) ((n) >= 0 && (n) < NLM_NR_NODES && \
+ nlm_get_node(n)->coremask != 0)
+#ifdef CONFIG_CPU_XLR
+#define nlm_current_node() (&nlm_nodes[0])
+#else
+#define nlm_current_node() (&nlm_nodes[nlm_nodeid()])
+#endif
+void nlm_node_init(int node);
+
+#endif
diff --git a/arch/mips/include/asm/mach-pic32/cpu-feature-overrides.h b/arch/mips/include/asm/mach-pic32/cpu-feature-overrides.h
new file mode 100644
index 000000000..468230834
--- /dev/null
+++ b/arch/mips/include/asm/mach-pic32/cpu-feature-overrides.h
@@ -0,0 +1,32 @@
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_MACH_PIC32_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_PIC32_CPU_FEATURE_OVERRIDES_H
+
+/*
+ * CPU feature overrides for PIC32 boards
+ */
+#ifdef CONFIG_CPU_MIPS32
+#define cpu_has_vint 1
+#define cpu_has_veic 0
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_4k_cache 1
+#define cpu_has_fpu 0
+#define cpu_has_counter 1
+#define cpu_has_llsc 1
+#define cpu_has_nofpuex 0
+#define cpu_icache_snoops_remote_store 1
+#endif
+
+#ifdef CONFIG_CPU_MIPS64
+#error This platform does not support 64bit.
+#endif
+
+#endif /* __ASM_MACH_PIC32_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-pic32/irq.h b/arch/mips/include/asm/mach-pic32/irq.h
new file mode 100644
index 000000000..ddaf999bc
--- /dev/null
+++ b/arch/mips/include/asm/mach-pic32/irq.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ */
+#ifndef __ASM_MACH_PIC32_IRQ_H
+#define __ASM_MACH_PIC32_IRQ_H
+
+#define NR_IRQS 256
+#define MIPS_CPU_IRQ_BASE 0
+
+#include <asm/mach-generic/irq.h>
+
+#endif /* __ASM_MACH_PIC32_IRQ_H */
diff --git a/arch/mips/include/asm/mach-pic32/pic32.h b/arch/mips/include/asm/mach-pic32/pic32.h
new file mode 100644
index 000000000..53918a671
--- /dev/null
+++ b/arch/mips/include/asm/mach-pic32/pic32.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ */
+#ifndef _ASM_MACH_PIC32_H
+#define _ASM_MACH_PIC32_H
+
+#include <linux/io.h>
+
+/*
+ * PIC32 register offsets for SET/CLR/INV where supported.
+ */
+#define PIC32_CLR(_reg) ((_reg) + 0x04)
+#define PIC32_SET(_reg) ((_reg) + 0x08)
+#define PIC32_INV(_reg) ((_reg) + 0x0C)
+
+/*
+ * PIC32 Base Register Offsets
+ */
+#define PIC32_BASE_CONFIG 0x1f800000
+#define PIC32_BASE_OSC 0x1f801200
+#define PIC32_BASE_RESET 0x1f801240
+#define PIC32_BASE_PPS 0x1f801400
+#define PIC32_BASE_UART 0x1f822000
+#define PIC32_BASE_PORT 0x1f860000
+#define PIC32_BASE_DEVCFG2 0x1fc4ff44
+
+/*
+ * Register unlock sequence required for some register access.
+ */
+void pic32_syskey_unlock_debug(const char *fn, const ulong ln);
+#define pic32_syskey_unlock() \
+ pic32_syskey_unlock_debug(__func__, __LINE__)
+
+#endif /* _ASM_MACH_PIC32_H */
diff --git a/arch/mips/include/asm/mach-pic32/spaces.h b/arch/mips/include/asm/mach-pic32/spaces.h
new file mode 100644
index 000000000..eb557b52c
--- /dev/null
+++ b/arch/mips/include/asm/mach-pic32/spaces.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ */
+#ifndef _ASM_MACH_PIC32_SPACES_H
+#define _ASM_MACH_PIC32_SPACES_H
+
+#ifdef CONFIG_PIC32MZDA
+#define PHYS_OFFSET _AC(0x08000000, UL)
+#endif
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* __ASM_MACH_PIC32_SPACES_H */
diff --git a/arch/mips/include/asm/mach-pistachio/irq.h b/arch/mips/include/asm/mach-pistachio/irq.h
new file mode 100644
index 000000000..74ac01650
--- /dev/null
+++ b/arch/mips/include/asm/mach-pistachio/irq.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Pistachio IRQ setup
+ *
+ * Copyright (C) 2014 Google, Inc.
+ */
+
+#ifndef __ASM_MACH_PISTACHIO_IRQ_H
+#define __ASM_MACH_PISTACHIO_IRQ_H
+
+#define NR_IRQS 256
+
+#include <asm/mach-generic/irq.h>
+
+#endif /* __ASM_MACH_PISTACHIO_IRQ_H */
diff --git a/arch/mips/include/asm/mach-ralink/irq.h b/arch/mips/include/asm/mach-ralink/irq.h
new file mode 100644
index 000000000..2262243d1
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/irq.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_RALINK_IRQ_H
+#define __ASM_MACH_RALINK_IRQ_H
+
+#define GIC_NUM_INTRS 64
+#define NR_IRQS 256
+
+#include <asm/mach-generic/irq.h>
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/mt7620.h b/arch/mips/include/asm/mach-ralink/mt7620.h
new file mode 100644
index 000000000..757ce53d0
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/mt7620.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+ */
+
+#ifndef _MT7620_REGS_H_
+#define _MT7620_REGS_H_
+
+#define MT7620_SYSC_BASE 0x10000000
+
+#define SYSC_REG_CHIP_NAME0 0x00
+#define SYSC_REG_CHIP_NAME1 0x04
+#define SYSC_REG_EFUSE_CFG 0x08
+#define SYSC_REG_CHIP_REV 0x0c
+#define SYSC_REG_SYSTEM_CONFIG0 0x10
+#define SYSC_REG_SYSTEM_CONFIG1 0x14
+#define SYSC_REG_CLKCFG0 0x2c
+#define SYSC_REG_CPU_SYS_CLKCFG 0x3c
+#define SYSC_REG_CPLL_CONFIG0 0x54
+#define SYSC_REG_CPLL_CONFIG1 0x58
+
+#define MT7620_CHIP_NAME0 0x3637544d
+#define MT7620_CHIP_NAME1 0x20203032
+#define MT7628_CHIP_NAME1 0x20203832
+
+#define SYSCFG0_XTAL_FREQ_SEL BIT(6)
+
+#define CHIP_REV_PKG_MASK 0x1
+#define CHIP_REV_PKG_SHIFT 16
+#define CHIP_REV_VER_MASK 0xf
+#define CHIP_REV_VER_SHIFT 8
+#define CHIP_REV_ECO_MASK 0xf
+
+#define CLKCFG0_PERI_CLK_SEL BIT(4)
+
+#define CPU_SYS_CLKCFG_OCP_RATIO_SHIFT 16
+#define CPU_SYS_CLKCFG_OCP_RATIO_MASK 0xf
+#define CPU_SYS_CLKCFG_OCP_RATIO_1 0 /* 1:1 (Reserved) */
+#define CPU_SYS_CLKCFG_OCP_RATIO_1_5 1 /* 1:1.5 (Reserved) */
+#define CPU_SYS_CLKCFG_OCP_RATIO_2 2 /* 1:2 */
+#define CPU_SYS_CLKCFG_OCP_RATIO_2_5 3 /* 1:2.5 (Reserved) */
+#define CPU_SYS_CLKCFG_OCP_RATIO_3 4 /* 1:3 */
+#define CPU_SYS_CLKCFG_OCP_RATIO_3_5 5 /* 1:3.5 (Reserved) */
+#define CPU_SYS_CLKCFG_OCP_RATIO_4 6 /* 1:4 */
+#define CPU_SYS_CLKCFG_OCP_RATIO_5 7 /* 1:5 */
+#define CPU_SYS_CLKCFG_OCP_RATIO_10 8 /* 1:10 */
+#define CPU_SYS_CLKCFG_CPU_FDIV_SHIFT 8
+#define CPU_SYS_CLKCFG_CPU_FDIV_MASK 0x1f
+#define CPU_SYS_CLKCFG_CPU_FFRAC_SHIFT 0
+#define CPU_SYS_CLKCFG_CPU_FFRAC_MASK 0x1f
+
+#define CPLL_CFG0_SW_CFG BIT(31)
+#define CPLL_CFG0_PLL_MULT_RATIO_SHIFT 16
+#define CPLL_CFG0_PLL_MULT_RATIO_MASK 0x7
+#define CPLL_CFG0_LC_CURFCK BIT(15)
+#define CPLL_CFG0_BYPASS_REF_CLK BIT(14)
+#define CPLL_CFG0_PLL_DIV_RATIO_SHIFT 10
+#define CPLL_CFG0_PLL_DIV_RATIO_MASK 0x3
+
+#define CPLL_CFG1_CPU_AUX1 BIT(25)
+#define CPLL_CFG1_CPU_AUX0 BIT(24)
+
+#define SYSCFG0_DRAM_TYPE_MASK 0x3
+#define SYSCFG0_DRAM_TYPE_SHIFT 4
+#define SYSCFG0_DRAM_TYPE_SDRAM 0
+#define SYSCFG0_DRAM_TYPE_DDR1 1
+#define SYSCFG0_DRAM_TYPE_DDR2 2
+#define SYSCFG0_DRAM_TYPE_UNKNOWN 3
+
+#define SYSCFG0_DRAM_TYPE_DDR2_MT7628 0
+#define SYSCFG0_DRAM_TYPE_DDR1_MT7628 1
+
+#define MT7620_DRAM_BASE 0x0
+#define MT7620_SDRAM_SIZE_MIN 2
+#define MT7620_SDRAM_SIZE_MAX 64
+#define MT7620_DDR1_SIZE_MIN 32
+#define MT7620_DDR1_SIZE_MAX 128
+#define MT7620_DDR2_SIZE_MIN 32
+#define MT7620_DDR2_SIZE_MAX 256
+
+#define MT7620_GPIO_MODE_UART0_SHIFT 2
+#define MT7620_GPIO_MODE_UART0_MASK 0x7
+#define MT7620_GPIO_MODE_UART0(x) ((x) << MT7620_GPIO_MODE_UART0_SHIFT)
+#define MT7620_GPIO_MODE_UARTF 0x0
+#define MT7620_GPIO_MODE_PCM_UARTF 0x1
+#define MT7620_GPIO_MODE_PCM_I2S 0x2
+#define MT7620_GPIO_MODE_I2S_UARTF 0x3
+#define MT7620_GPIO_MODE_PCM_GPIO 0x4
+#define MT7620_GPIO_MODE_GPIO_UARTF 0x5
+#define MT7620_GPIO_MODE_GPIO_I2S 0x6
+#define MT7620_GPIO_MODE_GPIO 0x7
+
+#define MT7620_GPIO_MODE_NAND 0
+#define MT7620_GPIO_MODE_SD 1
+#define MT7620_GPIO_MODE_ND_SD_GPIO 2
+#define MT7620_GPIO_MODE_ND_SD_MASK 0x3
+#define MT7620_GPIO_MODE_ND_SD_SHIFT 18
+
+#define MT7620_GPIO_MODE_PCIE_RST 0
+#define MT7620_GPIO_MODE_PCIE_REF 1
+#define MT7620_GPIO_MODE_PCIE_GPIO 2
+#define MT7620_GPIO_MODE_PCIE_MASK 0x3
+#define MT7620_GPIO_MODE_PCIE_SHIFT 16
+
+#define MT7620_GPIO_MODE_WDT_RST 0
+#define MT7620_GPIO_MODE_WDT_REF 1
+#define MT7620_GPIO_MODE_WDT_GPIO 2
+#define MT7620_GPIO_MODE_WDT_MASK 0x3
+#define MT7620_GPIO_MODE_WDT_SHIFT 21
+
+#define MT7620_GPIO_MODE_MDIO 0
+#define MT7620_GPIO_MODE_MDIO_REFCLK 1
+#define MT7620_GPIO_MODE_MDIO_GPIO 2
+#define MT7620_GPIO_MODE_MDIO_MASK 0x3
+#define MT7620_GPIO_MODE_MDIO_SHIFT 7
+
+#define MT7620_GPIO_MODE_I2C 0
+#define MT7620_GPIO_MODE_UART1 5
+#define MT7620_GPIO_MODE_RGMII1 9
+#define MT7620_GPIO_MODE_RGMII2 10
+#define MT7620_GPIO_MODE_SPI 11
+#define MT7620_GPIO_MODE_SPI_REF_CLK 12
+#define MT7620_GPIO_MODE_WLED 13
+#define MT7620_GPIO_MODE_JTAG 15
+#define MT7620_GPIO_MODE_EPHY 15
+#define MT7620_GPIO_MODE_PA 20
+
+static inline int mt7620_get_eco(void)
+{
+ return rt_sysc_r32(SYSC_REG_CHIP_REV) & CHIP_REV_ECO_MASK;
+}
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/mt7620/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/mt7620/cpu-feature-overrides.h
new file mode 100644
index 000000000..c4579f170
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/mt7620/cpu-feature-overrides.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Ralink MT7620 specific CPU feature overrides
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ */
+#ifndef _MT7620_CPU_FEATURE_OVERRIDES_H
+#define _MT7620_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_sb1_cache 0
+#define cpu_has_fpu 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_divec 1
+
+#define cpu_has_prefetch 1
+#define cpu_has_ejtag 1
+#define cpu_has_llsc 1
+
+#define cpu_has_mips16 1
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 1
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#define cpu_has_dsp 1
+#define cpu_has_dsp2 0
+#define cpu_has_mipsmt 0
+
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs 0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+
+#endif /* _MT7620_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/mt7621.h b/arch/mips/include/asm/mach-ralink/mt7621.h
new file mode 100644
index 000000000..e1af1ba50
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/mt7621.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2015 John Crispin <john@phrozen.org>
+ */
+
+#ifndef _MT7621_REGS_H_
+#define _MT7621_REGS_H_
+
+#define MT7621_PALMBUS_BASE 0x1C000000
+#define MT7621_PALMBUS_SIZE 0x03FFFFFF
+
+#define MT7621_SYSC_BASE 0x1E000000
+
+#define SYSC_REG_CHIP_NAME0 0x00
+#define SYSC_REG_CHIP_NAME1 0x04
+#define SYSC_REG_CHIP_REV 0x0c
+#define SYSC_REG_SYSTEM_CONFIG0 0x10
+#define SYSC_REG_SYSTEM_CONFIG1 0x14
+
+#define CHIP_REV_PKG_MASK 0x1
+#define CHIP_REV_PKG_SHIFT 16
+#define CHIP_REV_VER_MASK 0xf
+#define CHIP_REV_VER_SHIFT 8
+#define CHIP_REV_ECO_MASK 0xf
+
+#define MT7621_DRAM_BASE 0x0
+#define MT7621_DDR2_SIZE_MIN 32
+#define MT7621_DDR2_SIZE_MAX 256
+
+#define MT7621_CHIP_NAME0 0x3637544D
+#define MT7621_CHIP_NAME1 0x20203132
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/mt7621/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/mt7621/cpu-feature-overrides.h
new file mode 100644
index 000000000..168359a0a
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/mt7621/cpu-feature-overrides.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Ralink MT7621 specific CPU feature overrides
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2015 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ */
+#ifndef _MT7621_CPU_FEATURE_OVERRIDES_H
+#define _MT7621_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_sb1_cache 0
+#define cpu_has_fpu 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_divec 1
+
+#define cpu_has_prefetch 1
+#define cpu_has_ejtag 1
+#define cpu_has_llsc 1
+
+#define cpu_has_mips16 1
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 1
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#define cpu_has_dsp 1
+#define cpu_has_dsp2 0
+#define cpu_has_mipsmt 1
+
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs 0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+
+#define cpu_has_dc_aliases 0
+#define cpu_has_vtag_icache 0
+
+#define cpu_has_rixi 0
+#define cpu_has_tlbinv 0
+#define cpu_has_userlocal 1
+
+#endif /* _MT7621_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/pinmux.h b/arch/mips/include/asm/mach-ralink/pinmux.h
new file mode 100644
index 000000000..048309348
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/pinmux.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ */
+
+#ifndef _RT288X_PINMUX_H__
+#define _RT288X_PINMUX_H__
+
+#define FUNC(name, value, pin_first, pin_count) \
+ { name, value, pin_first, pin_count }
+
+#define GRP(_name, _func, _mask, _shift) \
+ { .name = _name, .mask = _mask, .shift = _shift, \
+ .func = _func, .gpio = _mask, \
+ .func_count = ARRAY_SIZE(_func) }
+
+#define GRP_G(_name, _func, _mask, _gpio, _shift) \
+ { .name = _name, .mask = _mask, .shift = _shift, \
+ .func = _func, .gpio = _gpio, \
+ .func_count = ARRAY_SIZE(_func) }
+
+struct rt2880_pmx_group;
+
+struct rt2880_pmx_func {
+ const char *name;
+ const char value;
+
+ int pin_first;
+ int pin_count;
+ int *pins;
+
+ int *groups;
+ int group_count;
+
+ int enabled;
+};
+
+struct rt2880_pmx_group {
+ const char *name;
+ int enabled;
+
+ const u32 shift;
+ const char mask;
+ const char gpio;
+
+ struct rt2880_pmx_func *func;
+ int func_count;
+};
+
+extern struct rt2880_pmx_group *rt2880_pinmux_data;
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/ralink_regs.h b/arch/mips/include/asm/mach-ralink/ralink_regs.h
new file mode 100644
index 000000000..9dbd9f087
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/ralink_regs.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Ralink SoC register definitions
+ *
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+ * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ */
+
+#ifndef _RALINK_REGS_H_
+#define _RALINK_REGS_H_
+
+#include <linux/io.h>
+
+enum ralink_soc_type {
+ RALINK_UNKNOWN = 0,
+ RT2880_SOC,
+ RT3883_SOC,
+ RT305X_SOC_RT3050,
+ RT305X_SOC_RT3052,
+ RT305X_SOC_RT3350,
+ RT305X_SOC_RT3352,
+ RT305X_SOC_RT5350,
+ MT762X_SOC_MT7620A,
+ MT762X_SOC_MT7620N,
+ MT762X_SOC_MT7621AT,
+ MT762X_SOC_MT7628AN,
+ MT762X_SOC_MT7688,
+};
+extern enum ralink_soc_type ralink_soc;
+
+extern __iomem void *rt_sysc_membase;
+extern __iomem void *rt_memc_membase;
+
+static inline void rt_sysc_w32(u32 val, unsigned reg)
+{
+ __raw_writel(val, rt_sysc_membase + reg);
+}
+
+static inline u32 rt_sysc_r32(unsigned reg)
+{
+ return __raw_readl(rt_sysc_membase + reg);
+}
+
+static inline void rt_sysc_m32(u32 clr, u32 set, unsigned reg)
+{
+ u32 val = rt_sysc_r32(reg) & ~clr;
+
+ __raw_writel(val | set, rt_sysc_membase + reg);
+}
+
+static inline void rt_memc_w32(u32 val, unsigned reg)
+{
+ __raw_writel(val, rt_memc_membase + reg);
+}
+
+static inline u32 rt_memc_r32(unsigned reg)
+{
+ return __raw_readl(rt_memc_membase + reg);
+}
+
+#endif /* _RALINK_REGS_H_ */
diff --git a/arch/mips/include/asm/mach-ralink/rt288x.h b/arch/mips/include/asm/mach-ralink/rt288x.h
new file mode 100644
index 000000000..5d10178f2
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt288x.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+ */
+
+#ifndef _RT288X_REGS_H_
+#define _RT288X_REGS_H_
+
+#define RT2880_SYSC_BASE 0x00300000
+
+#define SYSC_REG_CHIP_NAME0 0x00
+#define SYSC_REG_CHIP_NAME1 0x04
+#define SYSC_REG_CHIP_ID 0x0c
+#define SYSC_REG_SYSTEM_CONFIG 0x10
+#define SYSC_REG_CLKCFG 0x30
+
+#define RT2880_CHIP_NAME0 0x38325452
+#define RT2880_CHIP_NAME1 0x20203038
+
+#define CHIP_ID_ID_MASK 0xff
+#define CHIP_ID_ID_SHIFT 8
+#define CHIP_ID_REV_MASK 0xff
+
+#define SYSTEM_CONFIG_CPUCLK_SHIFT 20
+#define SYSTEM_CONFIG_CPUCLK_MASK 0x3
+#define SYSTEM_CONFIG_CPUCLK_250 0x0
+#define SYSTEM_CONFIG_CPUCLK_266 0x1
+#define SYSTEM_CONFIG_CPUCLK_280 0x2
+#define SYSTEM_CONFIG_CPUCLK_300 0x3
+
+#define RT2880_GPIO_MODE_I2C BIT(0)
+#define RT2880_GPIO_MODE_UART0 BIT(1)
+#define RT2880_GPIO_MODE_SPI BIT(2)
+#define RT2880_GPIO_MODE_UART1 BIT(3)
+#define RT2880_GPIO_MODE_JTAG BIT(4)
+#define RT2880_GPIO_MODE_MDIO BIT(5)
+#define RT2880_GPIO_MODE_SDRAM BIT(6)
+#define RT2880_GPIO_MODE_PCI BIT(7)
+
+#define CLKCFG_SRAM_CS_N_WDT BIT(9)
+
+#define RT2880_SDRAM_BASE 0x08000000
+#define RT2880_MEM_SIZE_MIN 2
+#define RT2880_MEM_SIZE_MAX 128
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h
new file mode 100644
index 000000000..fdaf8c918
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Ralink RT288x specific CPU feature overrides
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ */
+#ifndef _RT288X_CPU_FEATURE_OVERRIDES_H
+#define _RT288X_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_sb1_cache 0
+#define cpu_has_fpu 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_divec 1
+
+#define cpu_has_prefetch 1
+#define cpu_has_ejtag 1
+#define cpu_has_llsc 1
+
+#define cpu_has_mips16 1
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 1
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#define cpu_has_dsp 0
+#define cpu_has_mipsmt 0
+
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs 0
+
+#define cpu_dcache_line_size() 16
+#define cpu_icache_line_size() 16
+
+#endif /* _RT288X_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/rt305x.h b/arch/mips/include/asm/mach-ralink/rt305x.h
new file mode 100644
index 000000000..b54619dc4
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt305x.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+ */
+
+#ifndef _RT305X_REGS_H_
+#define _RT305X_REGS_H_
+
+extern enum ralink_soc_type ralink_soc;
+
+static inline int soc_is_rt3050(void)
+{
+ return ralink_soc == RT305X_SOC_RT3050;
+}
+
+static inline int soc_is_rt3052(void)
+{
+ return ralink_soc == RT305X_SOC_RT3052;
+}
+
+static inline int soc_is_rt305x(void)
+{
+ return soc_is_rt3050() || soc_is_rt3052();
+}
+
+static inline int soc_is_rt3350(void)
+{
+ return ralink_soc == RT305X_SOC_RT3350;
+}
+
+static inline int soc_is_rt3352(void)
+{
+ return ralink_soc == RT305X_SOC_RT3352;
+}
+
+static inline int soc_is_rt5350(void)
+{
+ return ralink_soc == RT305X_SOC_RT5350;
+}
+
+#define RT305X_SYSC_BASE 0x10000000
+
+#define SYSC_REG_CHIP_NAME0 0x00
+#define SYSC_REG_CHIP_NAME1 0x04
+#define SYSC_REG_CHIP_ID 0x0c
+#define SYSC_REG_SYSTEM_CONFIG 0x10
+
+#define RT3052_CHIP_NAME0 0x30335452
+#define RT3052_CHIP_NAME1 0x20203235
+
+#define RT3350_CHIP_NAME0 0x33335452
+#define RT3350_CHIP_NAME1 0x20203035
+
+#define RT3352_CHIP_NAME0 0x33335452
+#define RT3352_CHIP_NAME1 0x20203235
+
+#define RT5350_CHIP_NAME0 0x33355452
+#define RT5350_CHIP_NAME1 0x20203035
+
+#define CHIP_ID_ID_MASK 0xff
+#define CHIP_ID_ID_SHIFT 8
+#define CHIP_ID_REV_MASK 0xff
+
+#define RT305X_SYSCFG_CPUCLK_SHIFT 18
+#define RT305X_SYSCFG_CPUCLK_MASK 0x1
+#define RT305X_SYSCFG_CPUCLK_LOW 0x0
+#define RT305X_SYSCFG_CPUCLK_HIGH 0x1
+
+#define RT305X_SYSCFG_SRAM_CS0_MODE_SHIFT 2
+#define RT305X_SYSCFG_CPUCLK_MASK 0x1
+#define RT305X_SYSCFG_SRAM_CS0_MODE_WDT 0x1
+
+#define RT3352_SYSCFG0_CPUCLK_SHIFT 8
+#define RT3352_SYSCFG0_CPUCLK_MASK 0x1
+#define RT3352_SYSCFG0_CPUCLK_LOW 0x0
+#define RT3352_SYSCFG0_CPUCLK_HIGH 0x1
+
+#define RT5350_SYSCFG0_CPUCLK_SHIFT 8
+#define RT5350_SYSCFG0_CPUCLK_MASK 0x3
+#define RT5350_SYSCFG0_CPUCLK_360 0x0
+#define RT5350_SYSCFG0_CPUCLK_320 0x2
+#define RT5350_SYSCFG0_CPUCLK_300 0x3
+
+#define RT5350_SYSCFG0_DRAM_SIZE_SHIFT 12
+#define RT5350_SYSCFG0_DRAM_SIZE_MASK 7
+#define RT5350_SYSCFG0_DRAM_SIZE_2M 0
+#define RT5350_SYSCFG0_DRAM_SIZE_8M 1
+#define RT5350_SYSCFG0_DRAM_SIZE_16M 2
+#define RT5350_SYSCFG0_DRAM_SIZE_32M 3
+#define RT5350_SYSCFG0_DRAM_SIZE_64M 4
+
+/* multi function gpio pins */
+#define RT305X_GPIO_I2C_SD 1
+#define RT305X_GPIO_I2C_SCLK 2
+#define RT305X_GPIO_SPI_EN 3
+#define RT305X_GPIO_SPI_CLK 4
+/* GPIO 7-14 is shared between UART0, PCM and I2S interfaces */
+#define RT305X_GPIO_7 7
+#define RT305X_GPIO_10 10
+#define RT305X_GPIO_14 14
+#define RT305X_GPIO_UART1_TXD 15
+#define RT305X_GPIO_UART1_RXD 16
+#define RT305X_GPIO_JTAG_TDO 17
+#define RT305X_GPIO_JTAG_TDI 18
+#define RT305X_GPIO_MDIO_MDC 22
+#define RT305X_GPIO_MDIO_MDIO 23
+#define RT305X_GPIO_SDRAM_MD16 24
+#define RT305X_GPIO_SDRAM_MD31 39
+#define RT305X_GPIO_GE0_TXD0 40
+#define RT305X_GPIO_GE0_RXCLK 51
+
+#define RT305X_GPIO_MODE_UART0_SHIFT 2
+#define RT305X_GPIO_MODE_UART0_MASK 0x7
+#define RT305X_GPIO_MODE_UART0(x) ((x) << RT305X_GPIO_MODE_UART0_SHIFT)
+#define RT305X_GPIO_MODE_UARTF 0
+#define RT305X_GPIO_MODE_PCM_UARTF 1
+#define RT305X_GPIO_MODE_PCM_I2S 2
+#define RT305X_GPIO_MODE_I2S_UARTF 3
+#define RT305X_GPIO_MODE_PCM_GPIO 4
+#define RT305X_GPIO_MODE_GPIO_UARTF 5
+#define RT305X_GPIO_MODE_GPIO_I2S 6
+#define RT305X_GPIO_MODE_GPIO 7
+
+#define RT305X_GPIO_MODE_I2C 0
+#define RT305X_GPIO_MODE_SPI 1
+#define RT305X_GPIO_MODE_UART1 5
+#define RT305X_GPIO_MODE_JTAG 6
+#define RT305X_GPIO_MODE_MDIO 7
+#define RT305X_GPIO_MODE_SDRAM 8
+#define RT305X_GPIO_MODE_RGMII 9
+#define RT5350_GPIO_MODE_PHY_LED 14
+#define RT5350_GPIO_MODE_SPI_CS1 21
+#define RT3352_GPIO_MODE_LNA 18
+#define RT3352_GPIO_MODE_PA 20
+
+#define RT3352_SYSC_REG_SYSCFG0 0x010
+#define RT3352_SYSC_REG_SYSCFG1 0x014
+#define RT3352_SYSC_REG_CLKCFG1 0x030
+#define RT3352_SYSC_REG_RSTCTRL 0x034
+#define RT3352_SYSC_REG_USB_PS 0x05c
+
+#define RT3352_CLKCFG0_XTAL_SEL BIT(20)
+#define RT3352_CLKCFG1_UPHY0_CLK_EN BIT(18)
+#define RT3352_CLKCFG1_UPHY1_CLK_EN BIT(20)
+#define RT3352_RSTCTRL_UHST BIT(22)
+#define RT3352_RSTCTRL_UDEV BIT(25)
+#define RT3352_SYSCFG1_USB0_HOST_MODE BIT(10)
+
+#define RT305X_SDRAM_BASE 0x00000000
+#define RT305X_MEM_SIZE_MIN 2
+#define RT305X_MEM_SIZE_MAX 64
+#define RT3352_MEM_SIZE_MIN 2
+#define RT3352_MEM_SIZE_MAX 256
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h
new file mode 100644
index 000000000..7a385fe78
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Ralink RT305x specific CPU feature overrides
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ */
+#ifndef _RT305X_CPU_FEATURE_OVERRIDES_H
+#define _RT305X_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_sb1_cache 0
+#define cpu_has_fpu 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_divec 1
+
+#define cpu_has_prefetch 1
+#define cpu_has_ejtag 1
+#define cpu_has_llsc 1
+
+#define cpu_has_mips16 1
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 1
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#define cpu_has_dsp 1
+#define cpu_has_mipsmt 0
+
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs 0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+
+#endif /* _RT305X_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/rt3883.h b/arch/mips/include/asm/mach-ralink/rt3883.h
new file mode 100644
index 000000000..565f25484
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt3883.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Ralink RT3662/RT3883 SoC register definitions
+ *
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#ifndef _RT3883_REGS_H_
+#define _RT3883_REGS_H_
+
+#include <linux/bitops.h>
+
+#define RT3883_SDRAM_BASE 0x00000000
+#define RT3883_SYSC_BASE 0x10000000
+#define RT3883_TIMER_BASE 0x10000100
+#define RT3883_INTC_BASE 0x10000200
+#define RT3883_MEMC_BASE 0x10000300
+#define RT3883_UART0_BASE 0x10000500
+#define RT3883_PIO_BASE 0x10000600
+#define RT3883_FSCC_BASE 0x10000700
+#define RT3883_NANDC_BASE 0x10000810
+#define RT3883_I2C_BASE 0x10000900
+#define RT3883_I2S_BASE 0x10000a00
+#define RT3883_SPI_BASE 0x10000b00
+#define RT3883_UART1_BASE 0x10000c00
+#define RT3883_PCM_BASE 0x10002000
+#define RT3883_GDMA_BASE 0x10002800
+#define RT3883_CODEC1_BASE 0x10003000
+#define RT3883_CODEC2_BASE 0x10003800
+#define RT3883_FE_BASE 0x10100000
+#define RT3883_ROM_BASE 0x10118000
+#define RT3883_USBDEV_BASE 0x10112000
+#define RT3883_PCI_BASE 0x10140000
+#define RT3883_WLAN_BASE 0x10180000
+#define RT3883_USBHOST_BASE 0x101c0000
+#define RT3883_BOOT_BASE 0x1c000000
+#define RT3883_SRAM_BASE 0x1e000000
+#define RT3883_PCIMEM_BASE 0x20000000
+
+#define RT3883_EHCI_BASE (RT3883_USBHOST_BASE)
+#define RT3883_OHCI_BASE (RT3883_USBHOST_BASE + 0x1000)
+
+#define RT3883_SYSC_SIZE 0x100
+#define RT3883_TIMER_SIZE 0x100
+#define RT3883_INTC_SIZE 0x100
+#define RT3883_MEMC_SIZE 0x100
+#define RT3883_UART0_SIZE 0x100
+#define RT3883_UART1_SIZE 0x100
+#define RT3883_PIO_SIZE 0x100
+#define RT3883_FSCC_SIZE 0x100
+#define RT3883_NANDC_SIZE 0x0f0
+#define RT3883_I2C_SIZE 0x100
+#define RT3883_I2S_SIZE 0x100
+#define RT3883_SPI_SIZE 0x100
+#define RT3883_PCM_SIZE 0x800
+#define RT3883_GDMA_SIZE 0x800
+#define RT3883_CODEC1_SIZE 0x800
+#define RT3883_CODEC2_SIZE 0x800
+#define RT3883_FE_SIZE 0x10000
+#define RT3883_ROM_SIZE 0x4000
+#define RT3883_USBDEV_SIZE 0x4000
+#define RT3883_PCI_SIZE 0x40000
+#define RT3883_WLAN_SIZE 0x40000
+#define RT3883_USBHOST_SIZE 0x40000
+#define RT3883_BOOT_SIZE (32 * 1024 * 1024)
+#define RT3883_SRAM_SIZE (32 * 1024 * 1024)
+
+/* SYSC registers */
+#define RT3883_SYSC_REG_CHIPID0_3 0x00 /* Chip ID 0 */
+#define RT3883_SYSC_REG_CHIPID4_7 0x04 /* Chip ID 1 */
+#define RT3883_SYSC_REG_REVID 0x0c /* Chip Revision Identification */
+#define RT3883_SYSC_REG_SYSCFG0 0x10 /* System Configuration 0 */
+#define RT3883_SYSC_REG_SYSCFG1 0x14 /* System Configuration 1 */
+#define RT3883_SYSC_REG_CLKCFG0 0x2c /* Clock Configuration 0 */
+#define RT3883_SYSC_REG_CLKCFG1 0x30 /* Clock Configuration 1 */
+#define RT3883_SYSC_REG_RSTCTRL 0x34 /* Reset Control*/
+#define RT3883_SYSC_REG_RSTSTAT 0x38 /* Reset Status*/
+#define RT3883_SYSC_REG_USB_PS 0x5c /* USB Power saving control */
+#define RT3883_SYSC_REG_GPIO_MODE 0x60 /* GPIO Purpose Select */
+#define RT3883_SYSC_REG_PCIE_CLK_GEN0 0x7c
+#define RT3883_SYSC_REG_PCIE_CLK_GEN1 0x80
+#define RT3883_SYSC_REG_PCIE_CLK_GEN2 0x84
+#define RT3883_SYSC_REG_PMU 0x88
+#define RT3883_SYSC_REG_PMU1 0x8c
+
+#define RT3883_CHIP_NAME0 0x38335452
+#define RT3883_CHIP_NAME1 0x20203338
+
+#define RT3883_REVID_VER_ID_MASK 0x0f
+#define RT3883_REVID_VER_ID_SHIFT 8
+#define RT3883_REVID_ECO_ID_MASK 0x0f
+
+#define RT3883_SYSCFG0_DRAM_TYPE_DDR2 BIT(17)
+#define RT3883_SYSCFG0_CPUCLK_SHIFT 8
+#define RT3883_SYSCFG0_CPUCLK_MASK 0x3
+#define RT3883_SYSCFG0_CPUCLK_250 0x0
+#define RT3883_SYSCFG0_CPUCLK_384 0x1
+#define RT3883_SYSCFG0_CPUCLK_480 0x2
+#define RT3883_SYSCFG0_CPUCLK_500 0x3
+
+#define RT3883_SYSCFG1_USB0_HOST_MODE BIT(10)
+#define RT3883_SYSCFG1_PCIE_RC_MODE BIT(8)
+#define RT3883_SYSCFG1_PCI_HOST_MODE BIT(7)
+#define RT3883_SYSCFG1_PCI_66M_MODE BIT(6)
+#define RT3883_SYSCFG1_GPIO2_AS_WDT_OUT BIT(2)
+
+#define RT3883_CLKCFG1_PCIE_CLK_EN BIT(21)
+#define RT3883_CLKCFG1_UPHY1_CLK_EN BIT(20)
+#define RT3883_CLKCFG1_PCI_CLK_EN BIT(19)
+#define RT3883_CLKCFG1_UPHY0_CLK_EN BIT(18)
+
+#define RT3883_GPIO_MODE_UART0_SHIFT 2
+#define RT3883_GPIO_MODE_UART0_MASK 0x7
+#define RT3883_GPIO_MODE_UART0(x) ((x) << RT3883_GPIO_MODE_UART0_SHIFT)
+#define RT3883_GPIO_MODE_UARTF 0x0
+#define RT3883_GPIO_MODE_PCM_UARTF 0x1
+#define RT3883_GPIO_MODE_PCM_I2S 0x2
+#define RT3883_GPIO_MODE_I2S_UARTF 0x3
+#define RT3883_GPIO_MODE_PCM_GPIO 0x4
+#define RT3883_GPIO_MODE_GPIO_UARTF 0x5
+#define RT3883_GPIO_MODE_GPIO_I2S 0x6
+#define RT3883_GPIO_MODE_GPIO 0x7
+
+#define RT3883_GPIO_MODE_I2C 0
+#define RT3883_GPIO_MODE_SPI 1
+#define RT3883_GPIO_MODE_UART1 5
+#define RT3883_GPIO_MODE_JTAG 6
+#define RT3883_GPIO_MODE_MDIO 7
+#define RT3883_GPIO_MODE_GE1 9
+#define RT3883_GPIO_MODE_GE2 10
+
+#define RT3883_GPIO_MODE_PCI_SHIFT 11
+#define RT3883_GPIO_MODE_PCI_MASK 0x7
+#define RT3883_GPIO_MODE_PCI (RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT)
+#define RT3883_GPIO_MODE_LNA_A_SHIFT 16
+#define RT3883_GPIO_MODE_LNA_A_MASK 0x3
+#define _RT3883_GPIO_MODE_LNA_A(_x) ((_x) << RT3883_GPIO_MODE_LNA_A_SHIFT)
+#define RT3883_GPIO_MODE_LNA_A_GPIO 0x3
+#define RT3883_GPIO_MODE_LNA_A _RT3883_GPIO_MODE_LNA_A(RT3883_GPIO_MODE_LNA_A_MASK)
+#define RT3883_GPIO_MODE_LNA_G_SHIFT 18
+#define RT3883_GPIO_MODE_LNA_G_MASK 0x3
+#define _RT3883_GPIO_MODE_LNA_G(_x) ((_x) << RT3883_GPIO_MODE_LNA_G_SHIFT)
+#define RT3883_GPIO_MODE_LNA_G_GPIO 0x3
+#define RT3883_GPIO_MODE_LNA_G _RT3883_GPIO_MODE_LNA_G(RT3883_GPIO_MODE_LNA_G_MASK)
+
+#define RT3883_GPIO_I2C_SD 1
+#define RT3883_GPIO_I2C_SCLK 2
+#define RT3883_GPIO_SPI_CS0 3
+#define RT3883_GPIO_SPI_CLK 4
+#define RT3883_GPIO_SPI_MOSI 5
+#define RT3883_GPIO_SPI_MISO 6
+#define RT3883_GPIO_7 7
+#define RT3883_GPIO_10 10
+#define RT3883_GPIO_11 11
+#define RT3883_GPIO_14 14
+#define RT3883_GPIO_UART1_TXD 15
+#define RT3883_GPIO_UART1_RXD 16
+#define RT3883_GPIO_JTAG_TDO 17
+#define RT3883_GPIO_JTAG_TDI 18
+#define RT3883_GPIO_JTAG_TMS 19
+#define RT3883_GPIO_JTAG_TCLK 20
+#define RT3883_GPIO_JTAG_TRST_N 21
+#define RT3883_GPIO_MDIO_MDC 22
+#define RT3883_GPIO_MDIO_MDIO 23
+#define RT3883_GPIO_LNA_PE_A0 32
+#define RT3883_GPIO_LNA_PE_A1 33
+#define RT3883_GPIO_LNA_PE_A2 34
+#define RT3883_GPIO_LNA_PE_G0 35
+#define RT3883_GPIO_LNA_PE_G1 36
+#define RT3883_GPIO_LNA_PE_G2 37
+#define RT3883_GPIO_PCI_AD0 40
+#define RT3883_GPIO_PCI_AD31 71
+#define RT3883_GPIO_GE2_TXD0 72
+#define RT3883_GPIO_GE2_TXD1 73
+#define RT3883_GPIO_GE2_TXD2 74
+#define RT3883_GPIO_GE2_TXD3 75
+#define RT3883_GPIO_GE2_TXEN 76
+#define RT3883_GPIO_GE2_TXCLK 77
+#define RT3883_GPIO_GE2_RXD0 78
+#define RT3883_GPIO_GE2_RXD1 79
+#define RT3883_GPIO_GE2_RXD2 80
+#define RT3883_GPIO_GE2_RXD3 81
+#define RT3883_GPIO_GE2_RXDV 82
+#define RT3883_GPIO_GE2_RXCLK 83
+#define RT3883_GPIO_GE1_TXD0 84
+#define RT3883_GPIO_GE1_TXD1 85
+#define RT3883_GPIO_GE1_TXD2 86
+#define RT3883_GPIO_GE1_TXD3 87
+#define RT3883_GPIO_GE1_TXEN 88
+#define RT3883_GPIO_GE1_TXCLK 89
+#define RT3883_GPIO_GE1_RXD0 90
+#define RT3883_GPIO_GE1_RXD1 91
+#define RT3883_GPIO_GE1_RXD2 92
+#define RT3883_GPIO_GE1_RXD3 93
+#define RT3883_GPIO_GE1_RXDV 94
+#define RT3883_GPIO_GE1_RXCLK 95
+
+#define RT3883_RSTCTRL_PCIE_PCI_PDM BIT(27)
+#define RT3883_RSTCTRL_FLASH BIT(26)
+#define RT3883_RSTCTRL_UDEV BIT(25)
+#define RT3883_RSTCTRL_PCI BIT(24)
+#define RT3883_RSTCTRL_PCIE BIT(23)
+#define RT3883_RSTCTRL_UHST BIT(22)
+#define RT3883_RSTCTRL_FE BIT(21)
+#define RT3883_RSTCTRL_WLAN BIT(20)
+#define RT3883_RSTCTRL_UART1 BIT(29)
+#define RT3883_RSTCTRL_SPI BIT(18)
+#define RT3883_RSTCTRL_I2S BIT(17)
+#define RT3883_RSTCTRL_I2C BIT(16)
+#define RT3883_RSTCTRL_NAND BIT(15)
+#define RT3883_RSTCTRL_DMA BIT(14)
+#define RT3883_RSTCTRL_PIO BIT(13)
+#define RT3883_RSTCTRL_UART BIT(12)
+#define RT3883_RSTCTRL_PCM BIT(11)
+#define RT3883_RSTCTRL_MC BIT(10)
+#define RT3883_RSTCTRL_INTC BIT(9)
+#define RT3883_RSTCTRL_TIMER BIT(8)
+#define RT3883_RSTCTRL_SYS BIT(0)
+
+#define RT3883_INTC_INT_SYSCTL BIT(0)
+#define RT3883_INTC_INT_TIMER0 BIT(1)
+#define RT3883_INTC_INT_TIMER1 BIT(2)
+#define RT3883_INTC_INT_IA BIT(3)
+#define RT3883_INTC_INT_PCM BIT(4)
+#define RT3883_INTC_INT_UART0 BIT(5)
+#define RT3883_INTC_INT_PIO BIT(6)
+#define RT3883_INTC_INT_DMA BIT(7)
+#define RT3883_INTC_INT_NAND BIT(8)
+#define RT3883_INTC_INT_PERFC BIT(9)
+#define RT3883_INTC_INT_I2S BIT(10)
+#define RT3883_INTC_INT_UART1 BIT(12)
+#define RT3883_INTC_INT_UHST BIT(18)
+#define RT3883_INTC_INT_UDEV BIT(19)
+
+/* FLASH/SRAM/Codec Controller registers */
+#define RT3883_FSCC_REG_FLASH_CFG0 0x00
+#define RT3883_FSCC_REG_FLASH_CFG1 0x04
+#define RT3883_FSCC_REG_CODEC_CFG0 0x40
+#define RT3883_FSCC_REG_CODEC_CFG1 0x44
+
+#define RT3883_FLASH_CFG_WIDTH_SHIFT 26
+#define RT3883_FLASH_CFG_WIDTH_MASK 0x3
+#define RT3883_FLASH_CFG_WIDTH_8BIT 0x0
+#define RT3883_FLASH_CFG_WIDTH_16BIT 0x1
+#define RT3883_FLASH_CFG_WIDTH_32BIT 0x2
+
+#define RT3883_SDRAM_BASE 0x00000000
+#define RT3883_MEM_SIZE_MIN 2
+#define RT3883_MEM_SIZE_MAX 256
+
+#endif /* _RT3883_REGS_H_ */
diff --git a/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h
new file mode 100644
index 000000000..0a61910f6
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Ralink RT3662/RT3883 specific CPU feature overrides
+ *
+ * Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ */
+#ifndef _RT3883_CPU_FEATURE_OVERRIDES_H
+#define _RT3883_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_sb1_cache 0
+#define cpu_has_fpu 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_divec 1
+
+#define cpu_has_prefetch 1
+#define cpu_has_ejtag 1
+#define cpu_has_llsc 1
+
+#define cpu_has_mips16 1
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 1
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#define cpu_has_dsp 1
+#define cpu_has_mipsmt 0
+
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs 0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+
+#endif /* _RT3883_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h b/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h
new file mode 100644
index 000000000..8539ccfb6
--- /dev/null
+++ b/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * IDT RC32434 specific CPU feature overrides
+ *
+ * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ */
+#ifndef __ASM_MACH_RC32434_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_RC32434_CPU_FEATURE_OVERRIDES_H
+
+/*
+ * The IDT RC32434 SOC has a built-in MIPS 4Kc core.
+ */
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_sb1_cache 0
+#define cpu_has_fpu 0
+#define cpu_has_32fpr 0
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_divec 1
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_prefetch 1
+#define cpu_has_mcheck 1
+#define cpu_has_ejtag 1
+#define cpu_has_llsc 1
+
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+
+#define cpu_has_vtag_icache 0
+
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_mipsmt 0
+
+/* #define cpu_has_nofpuex ? */
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs 0
+
+#define cpu_has_inclusive_pcaches 0
+
+#define cpu_dcache_line_size() 16
+#define cpu_icache_line_size() 16
+
+#endif /* __ASM_MACH_RC32434_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-rc32434/ddr.h b/arch/mips/include/asm/mach-rc32434/ddr.h
new file mode 100644
index 000000000..e1cad0c7f
--- /dev/null
+++ b/arch/mips/include/asm/mach-rc32434/ddr.h
@@ -0,0 +1,141 @@
+/*
+ * Definitions for the DDR registers
+ *
+ * Copyright 2002 Ryan Holm <ryan.holmQVist@idt.com>
+ * Copyright 2008 Florian Fainelli <florian@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef _ASM_RC32434_DDR_H_
+#define _ASM_RC32434_DDR_H_
+
+#include <asm/mach-rc32434/rb.h>
+
+/* DDR register structure */
+struct ddr_ram {
+ u32 ddrbase;
+ u32 ddrmask;
+ u32 res1;
+ u32 res2;
+ u32 ddrc;
+ u32 ddrabase;
+ u32 ddramask;
+ u32 ddramap;
+ u32 ddrcust;
+ u32 ddrrdc;
+ u32 ddrspare;
+};
+
+#define DDR0_PHYS_ADDR 0x18018000
+
+/* DDR banks masks */
+#define DDR_MASK 0xffff0000
+#define DDR0_BASE_MSK DDR_MASK
+#define DDR1_BASE_MSK DDR_MASK
+
+/* DDR bank0 registers */
+#define RC32434_DDR0_ATA_BIT 5
+#define RC32434_DDR0_ATA_MSK 0x000000E0
+#define RC32434_DDR0_DBW_BIT 8
+#define RC32434_DDR0_DBW_MSK 0x00000100
+#define RC32434_DDR0_WR_BIT 9
+#define RC32434_DDR0_WR_MSK 0x00000600
+#define RC32434_DDR0_PS_BIT 11
+#define RC32434_DDR0_PS_MSK 0x00001800
+#define RC32434_DDR0_DTYPE_BIT 13
+#define RC32434_DDR0_DTYPE_MSK 0x0000e000
+#define RC32434_DDR0_RFC_BIT 16
+#define RC32434_DDR0_RFC_MSK 0x000f0000
+#define RC32434_DDR0_RP_BIT 20
+#define RC32434_DDR0_RP_MSK 0x00300000
+#define RC32434_DDR0_AP_BIT 22
+#define RC32434_DDR0_AP_MSK 0x00400000
+#define RC32434_DDR0_RCD_BIT 23
+#define RC32434_DDR0_RCD_MSK 0x01800000
+#define RC32434_DDR0_CL_BIT 25
+#define RC32434_DDR0_CL_MSK 0x06000000
+#define RC32434_DDR0_DBM_BIT 27
+#define RC32434_DDR0_DBM_MSK 0x08000000
+#define RC32434_DDR0_SDS_BIT 28
+#define RC32434_DDR0_SDS_MSK 0x10000000
+#define RC32434_DDR0_ATP_BIT 29
+#define RC32434_DDR0_ATP_MSK 0x60000000
+#define RC32434_DDR0_RE_BIT 31
+#define RC32434_DDR0_RE_MSK 0x80000000
+
+/* DDR bank C registers */
+#define RC32434_DDRC_MSK(x) BIT_TO_MASK(x)
+#define RC32434_DDRC_CES_BIT 0
+#define RC32434_DDRC_ACE_BIT 1
+
+/* Custom DDR bank registers */
+#define RC32434_DCST_MSK(x) BIT_TO_MASK(x)
+#define RC32434_DCST_CS_BIT 0
+#define RC32434_DCST_CS_MSK 0x00000003
+#define RC32434_DCST_WE_BIT 2
+#define RC32434_DCST_RAS_BIT 3
+#define RC32434_DCST_CAS_BIT 4
+#define RC32434_DSCT_CKE_BIT 5
+#define RC32434_DSCT_BA_BIT 6
+#define RC32434_DSCT_BA_MSK 0x000000c0
+
+/* DDR QSC registers */
+#define RC32434_QSC_DM_BIT 0
+#define RC32434_QSC_DM_MSK 0x00000003
+#define RC32434_QSC_DQSBS_BIT 2
+#define RC32434_QSC_DQSBS_MSK 0x000000fc
+#define RC32434_QSC_DB_BIT 8
+#define RC32434_QSC_DB_MSK 0x00000100
+#define RC32434_QSC_DBSP_BIT 9
+#define RC32434_QSC_DBSP_MSK 0x01fffe00
+#define RC32434_QSC_BDP_BIT 25
+#define RC32434_QSC_BDP_MSK 0x7e000000
+
+/* DDR LLC registers */
+#define RC32434_LLC_EAO_BIT 0
+#define RC32434_LLC_EAO_MSK 0x00000001
+#define RC32434_LLC_EO_BIT 1
+#define RC32434_LLC_EO_MSK 0x0000003e
+#define RC32434_LLC_FS_BIT 6
+#define RC32434_LLC_FS_MSK 0x000000c0
+#define RC32434_LLC_AS_BIT 8
+#define RC32434_LLC_AS_MSK 0x00000700
+#define RC32434_LLC_SP_BIT 11
+#define RC32434_LLC_SP_MSK 0x001ff800
+
+/* DDR LLFC registers */
+#define RC32434_LLFC_MSK(x) BIT_TO_MASK(x)
+#define RC32434_LLFC_MEN_BIT 0
+#define RC32434_LLFC_EAN_BIT 1
+#define RC32434_LLFC_FF_BIT 2
+
+/* DDR DLLTA registers */
+#define RC32434_DLLTA_ADDR_BIT 2
+#define RC32434_DLLTA_ADDR_MSK 0xfffffffc
+
+/* DDR DLLED registers */
+#define RC32434_DLLED_MSK(x) BIT_TO_MASK(x)
+#define RC32434_DLLED_DBE_BIT 0
+#define RC32434_DLLED_DTE_BIT 1
+
+#endif /* _ASM_RC32434_DDR_H_ */
diff --git a/arch/mips/include/asm/mach-rc32434/dma.h b/arch/mips/include/asm/mach-rc32434/dma.h
new file mode 100644
index 000000000..44dc87bb8
--- /dev/null
+++ b/arch/mips/include/asm/mach-rc32434/dma.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2002 Integrated Device Technology, Inc.
+ * All rights reserved.
+ *
+ * DMA register definition.
+ *
+ * Author : ryan.holmQVist@idt.com
+ * Date : 20011005
+ */
+
+#ifndef __ASM_RC32434_DMA_H
+#define __ASM_RC32434_DMA_H
+
+#include <asm/mach-rc32434/rb.h>
+
+#define DMA0_BASE_ADDR 0x18040000
+
+/*
+ * DMA descriptor (in physical memory).
+ */
+
+struct dma_desc {
+ u32 control; /* Control. use DMAD_* */
+ u32 ca; /* Current Address. */
+ u32 devcs; /* Device control and status. */
+ u32 link; /* Next descriptor in chain. */
+};
+
+#define DMA_DESC_SIZ sizeof(struct dma_desc)
+#define DMA_DESC_COUNT_BIT 0
+#define DMA_DESC_COUNT_MSK 0x0003ffff
+#define DMA_DESC_DS_BIT 20
+#define DMA_DESC_DS_MSK 0x00300000
+
+#define DMA_DESC_DEV_CMD_BIT 22
+#define DMA_DESC_DEV_CMD_MSK 0x01c00000
+
+/* DMA command sizes */
+#define DMA_DESC_DEV_CMD_BYTE 0
+#define DMA_DESC_DEV_CMD_HLF_WD 1
+#define DMA_DESC_DEV_CMD_WORD 2
+#define DMA_DESC_DEV_CMD_2WORDS 3
+#define DMA_DESC_DEV_CMD_4WORDS 4
+#define DMA_DESC_DEV_CMD_6WORDS 5
+#define DMA_DESC_DEV_CMD_8WORDS 6
+#define DMA_DESC_DEV_CMD_16WORDS 7
+
+/* DMA descriptors interrupts */
+#define DMA_DESC_COF (1 << 25) /* Chain on finished */
+#define DMA_DESC_COD (1 << 26) /* Chain on done */
+#define DMA_DESC_IOF (1 << 27) /* Interrupt on finished */
+#define DMA_DESC_IOD (1 << 28) /* Interrupt on done */
+#define DMA_DESC_TERM (1 << 29) /* Terminated */
+#define DMA_DESC_DONE (1 << 30) /* Done */
+#define DMA_DESC_FINI (1 << 31) /* Finished */
+
+/*
+ * DMA register (within Internal Register Map).
+ */
+
+struct dma_reg {
+ u32 dmac; /* Control. */
+ u32 dmas; /* Status. */
+ u32 dmasm; /* Mask. */
+ u32 dmadptr; /* Descriptor pointer. */
+ u32 dmandptr; /* Next descriptor pointer. */
+};
+
+/* DMA channels specific registers */
+#define DMA_CHAN_RUN_BIT (1 << 0)
+#define DMA_CHAN_DONE_BIT (1 << 1)
+#define DMA_CHAN_MODE_BIT (1 << 2)
+#define DMA_CHAN_MODE_MSK 0x0000000c
+#define DMA_CHAN_MODE_AUTO 0
+#define DMA_CHAN_MODE_BURST 1
+#define DMA_CHAN_MODE_XFRT 2
+#define DMA_CHAN_MODE_RSVD 3
+#define DMA_CHAN_ACT_BIT (1 << 4)
+
+/* DMA status registers */
+#define DMA_STAT_FINI (1 << 0)
+#define DMA_STAT_DONE (1 << 1)
+#define DMA_STAT_CHAIN (1 << 2)
+#define DMA_STAT_ERR (1 << 3)
+#define DMA_STAT_HALT (1 << 4)
+
+/*
+ * DMA channel definitions
+ */
+
+#define DMA_CHAN_ETH_RCV 0
+#define DMA_CHAN_ETH_XMT 1
+#define DMA_CHAN_MEM_TO_FIFO 2
+#define DMA_CHAN_FIFO_TO_MEM 3
+#define DMA_CHAN_PCI_TO_MEM 4
+#define DMA_CHAN_MEM_TO_PCI 5
+#define DMA_CHAN_COUNT 6
+
+struct dma_channel {
+ struct dma_reg ch[DMA_CHAN_COUNT];
+};
+
+#endif /* __ASM_RC32434_DMA_H */
diff --git a/arch/mips/include/asm/mach-rc32434/dma_v.h b/arch/mips/include/asm/mach-rc32434/dma_v.h
new file mode 100644
index 000000000..37d73b987
--- /dev/null
+++ b/arch/mips/include/asm/mach-rc32434/dma_v.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2002 Integrated Device Technology, Inc.
+ * All rights reserved.
+ *
+ * DMA register definition.
+ *
+ * Author : ryan.holmQVist@idt.com
+ * Date : 20011005
+ */
+
+#ifndef _ASM_RC32434_DMA_V_H_
+#define _ASM_RC32434_DMA_V_H_
+
+#include <asm/mach-rc32434/dma.h>
+#include <asm/mach-rc32434/rc32434.h>
+
+#define DMA_CHAN_OFFSET 0x14
+#define IS_DMA_USED(X) (((X) & \
+ (DMA_DESC_FINI | DMA_DESC_DONE | DMA_DESC_TERM)) \
+ != 0)
+#define DMA_COUNT(count) ((count) & DMA_DESC_COUNT_MSK)
+
+#define DMA_HALT_TIMEOUT 500
+
+static inline int rc32434_halt_dma(struct dma_reg *ch)
+{
+ int timeout = 1;
+ if (__raw_readl(&ch->dmac) & DMA_CHAN_RUN_BIT) {
+ __raw_writel(0, &ch->dmac);
+ for (timeout = DMA_HALT_TIMEOUT; timeout > 0; timeout--) {
+ if (__raw_readl(&ch->dmas) & DMA_STAT_HALT) {
+ __raw_writel(0, &ch->dmas);
+ break;
+ }
+ }
+ }
+
+ return timeout ? 0 : 1;
+}
+
+static inline void rc32434_start_dma(struct dma_reg *ch, u32 dma_addr)
+{
+ __raw_writel(0, &ch->dmandptr);
+ __raw_writel(dma_addr, &ch->dmadptr);
+}
+
+static inline void rc32434_chain_dma(struct dma_reg *ch, u32 dma_addr)
+{
+ __raw_writel(dma_addr, &ch->dmandptr);
+}
+
+#endif /* _ASM_RC32434_DMA_V_H_ */
diff --git a/arch/mips/include/asm/mach-rc32434/eth.h b/arch/mips/include/asm/mach-rc32434/eth.h
new file mode 100644
index 000000000..c2645faad
--- /dev/null
+++ b/arch/mips/include/asm/mach-rc32434/eth.h
@@ -0,0 +1,220 @@
+/*
+ * Definitions for the Ethernet registers
+ *
+ * Copyright 2002 Allend Stichter <allen.stichter@idt.com>
+ * Copyright 2008 Florian Fainelli <florian@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __ASM_RC32434_ETH_H
+#define __ASM_RC32434_ETH_H
+
+
+#define ETH0_BASE_ADDR 0x18060000
+
+struct eth_regs {
+ u32 ethintfc;
+ u32 ethfifott;
+ u32 etharc;
+ u32 ethhash0;
+ u32 ethhash1;
+ u32 ethu0[4]; /* Reserved. */
+ u32 ethpfs;
+ u32 ethmcp;
+ u32 eth_u1[10]; /* Reserved. */
+ u32 ethspare;
+ u32 eth_u2[42]; /* Reserved. */
+ u32 ethsal0;
+ u32 ethsah0;
+ u32 ethsal1;
+ u32 ethsah1;
+ u32 ethsal2;
+ u32 ethsah2;
+ u32 ethsal3;
+ u32 ethsah3;
+ u32 ethrbc;
+ u32 ethrpc;
+ u32 ethrupc;
+ u32 ethrfc;
+ u32 ethtbc;
+ u32 ethgpf;
+ u32 eth_u9[50]; /* Reserved. */
+ u32 ethmac1;
+ u32 ethmac2;
+ u32 ethipgt;
+ u32 ethipgr;
+ u32 ethclrt;
+ u32 ethmaxf;
+ u32 eth_u10; /* Reserved. */
+ u32 ethmtest;
+ u32 miimcfg;
+ u32 miimcmd;
+ u32 miimaddr;
+ u32 miimwtd;
+ u32 miimrdd;
+ u32 miimind;
+ u32 eth_u11; /* Reserved. */
+ u32 eth_u12; /* Reserved. */
+ u32 ethcfsa0;
+ u32 ethcfsa1;
+ u32 ethcfsa2;
+};
+
+/* Ethernet interrupt registers */
+#define ETH_INT_FC_EN (1 << 0)
+#define ETH_INT_FC_ITS (1 << 1)
+#define ETH_INT_FC_RIP (1 << 2)
+#define ETH_INT_FC_JAM (1 << 3)
+#define ETH_INT_FC_OVR (1 << 4)
+#define ETH_INT_FC_UND (1 << 5)
+#define ETH_INT_FC_IOC 0x000000c0
+
+/* Ethernet FIFO registers */
+#define ETH_FIFI_TT_TTH_BIT 0
+#define ETH_FIFO_TT_TTH 0x0000007f
+
+/* Ethernet ARC/multicast registers */
+#define ETH_ARC_PRO (1 << 0)
+#define ETH_ARC_AM (1 << 1)
+#define ETH_ARC_AFM (1 << 2)
+#define ETH_ARC_AB (1 << 3)
+
+/* Ethernet SAL registers */
+#define ETH_SAL_BYTE_5 0x000000ff
+#define ETH_SAL_BYTE_4 0x0000ff00
+#define ETH_SAL_BYTE_3 0x00ff0000
+#define ETH_SAL_BYTE_2 0xff000000
+
+/* Ethernet SAH registers */
+#define ETH_SAH_BYTE1 0x000000ff
+#define ETH_SAH_BYTE0 0x0000ff00
+
+/* Ethernet GPF register */
+#define ETH_GPF_PTV 0x0000ffff
+
+/* Ethernet PFG register */
+#define ETH_PFS_PFD (1 << 0)
+
+/* Ethernet CFSA[0-3] registers */
+#define ETH_CFSA0_CFSA4 0x000000ff
+#define ETH_CFSA0_CFSA5 0x0000ff00
+#define ETH_CFSA1_CFSA2 0x000000ff
+#define ETH_CFSA1_CFSA3 0x0000ff00
+#define ETH_CFSA1_CFSA0 0x000000ff
+#define ETH_CFSA1_CFSA1 0x0000ff00
+
+/* Ethernet MAC1 registers */
+#define ETH_MAC1_RE (1 << 0)
+#define ETH_MAC1_PAF (1 << 1)
+#define ETH_MAC1_RFC (1 << 2)
+#define ETH_MAC1_TFC (1 << 3)
+#define ETH_MAC1_LB (1 << 4)
+#define ETH_MAC1_MR (1 << 31)
+
+/* Ethernet MAC2 registers */
+#define ETH_MAC2_FD (1 << 0)
+#define ETH_MAC2_FLC (1 << 1)
+#define ETH_MAC2_HFE (1 << 2)
+#define ETH_MAC2_DC (1 << 3)
+#define ETH_MAC2_CEN (1 << 4)
+#define ETH_MAC2_PE (1 << 5)
+#define ETH_MAC2_VPE (1 << 6)
+#define ETH_MAC2_APE (1 << 7)
+#define ETH_MAC2_PPE (1 << 8)
+#define ETH_MAC2_LPE (1 << 9)
+#define ETH_MAC2_NB (1 << 12)
+#define ETH_MAC2_BP (1 << 13)
+#define ETH_MAC2_ED (1 << 14)
+
+/* Ethernet IPGT register */
+#define ETH_IPGT 0x0000007f
+
+/* Ethernet IPGR registers */
+#define ETH_IPGR_IPGR2 0x0000007f
+#define ETH_IPGR_IPGR1 0x00007f00
+
+/* Ethernet CLRT registers */
+#define ETH_CLRT_MAX_RET 0x0000000f
+#define ETH_CLRT_COL_WIN 0x00003f00
+
+/* Ethernet MAXF register */
+#define ETH_MAXF 0x0000ffff
+
+/* Ethernet test registers */
+#define ETH_TEST_REG (1 << 2)
+#define ETH_MCP_DIV 0x000000ff
+
+/* MII registers */
+#define ETH_MII_CFG_RSVD 0x0000000c
+#define ETH_MII_CMD_RD (1 << 0)
+#define ETH_MII_CMD_SCN (1 << 1)
+#define ETH_MII_REG_ADDR 0x0000001f
+#define ETH_MII_PHY_ADDR 0x00001f00
+#define ETH_MII_WTD_DATA 0x0000ffff
+#define ETH_MII_RDD_DATA 0x0000ffff
+#define ETH_MII_IND_BSY (1 << 0)
+#define ETH_MII_IND_SCN (1 << 1)
+#define ETH_MII_IND_NV (1 << 2)
+
+/*
+ * Values for the DEVCS field of the Ethernet DMA Rx and Tx descriptors.
+ */
+
+#define ETH_RX_FD (1 << 0)
+#define ETH_RX_LD (1 << 1)
+#define ETH_RX_ROK (1 << 2)
+#define ETH_RX_FM (1 << 3)
+#define ETH_RX_MP (1 << 4)
+#define ETH_RX_BP (1 << 5)
+#define ETH_RX_VLT (1 << 6)
+#define ETH_RX_CF (1 << 7)
+#define ETH_RX_OVR (1 << 8)
+#define ETH_RX_CRC (1 << 9)
+#define ETH_RX_CV (1 << 10)
+#define ETH_RX_DB (1 << 11)
+#define ETH_RX_LE (1 << 12)
+#define ETH_RX_LOR (1 << 13)
+#define ETH_RX_CES (1 << 14)
+#define ETH_RX_LEN_BIT 16
+#define ETH_RX_LEN 0xffff0000
+
+#define ETH_TX_FD (1 << 0)
+#define ETH_TX_LD (1 << 1)
+#define ETH_TX_OEN (1 << 2)
+#define ETH_TX_PEN (1 << 3)
+#define ETH_TX_CEN (1 << 4)
+#define ETH_TX_HEN (1 << 5)
+#define ETH_TX_TOK (1 << 6)
+#define ETH_TX_MP (1 << 7)
+#define ETH_TX_BP (1 << 8)
+#define ETH_TX_UND (1 << 9)
+#define ETH_TX_OF (1 << 10)
+#define ETH_TX_ED (1 << 11)
+#define ETH_TX_EC (1 << 12)
+#define ETH_TX_LC (1 << 13)
+#define ETH_TX_TD (1 << 14)
+#define ETH_TX_CRC (1 << 15)
+#define ETH_TX_LE (1 << 16)
+#define ETH_TX_CC 0x001E0000
+
+#endif /* __ASM_RC32434_ETH_H */
diff --git a/arch/mips/include/asm/mach-rc32434/gpio.h b/arch/mips/include/asm/mach-rc32434/gpio.h
new file mode 100644
index 000000000..a3192da9f
--- /dev/null
+++ b/arch/mips/include/asm/mach-rc32434/gpio.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2002 Integrated Device Technology, Inc.
+ * All rights reserved.
+ *
+ * GPIO register definition.
+ *
+ * Author : ryan.holmQVist@idt.com
+ * Date : 20011005
+ * Copyright (C) 2001, 2002 Ryan Holm <ryan.holmQVist@idt.com>
+ * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org>
+ */
+
+#ifndef _RC32434_GPIO_H_
+#define _RC32434_GPIO_H_
+
+struct rb532_gpio_reg {
+ u32 gpiofunc; /* GPIO Function Register
+ * gpiofunc[x]==0 bit = gpio
+ * func[x]==1 bit = altfunc
+ */
+ u32 gpiocfg; /* GPIO Configuration Register
+ * gpiocfg[x]==0 bit = input
+ * gpiocfg[x]==1 bit = output
+ */
+ u32 gpiod; /* GPIO Data Register
+ * gpiod[x] read/write gpio pinX status
+ */
+ u32 gpioilevel; /* GPIO Interrupt Status Register
+ * interrupt level (see gpioistat)
+ */
+ u32 gpioistat; /* Gpio Interrupt Status Register
+ * istat[x] = (gpiod[x] == level[x])
+ * cleared in ISR (STICKY bits)
+ */
+ u32 gpionmien; /* GPIO Non-maskable Interrupt Enable Register */
+};
+
+/* UART GPIO signals */
+#define RC32434_UART0_SOUT (1 << 0)
+#define RC32434_UART0_SIN (1 << 1)
+#define RC32434_UART0_RTS (1 << 2)
+#define RC32434_UART0_CTS (1 << 3)
+
+/* M & P bus GPIO signals */
+#define RC32434_MP_BIT_22 (1 << 4)
+#define RC32434_MP_BIT_23 (1 << 5)
+#define RC32434_MP_BIT_24 (1 << 6)
+#define RC32434_MP_BIT_25 (1 << 7)
+
+/* CPU GPIO signals */
+#define RC32434_CPU_GPIO (1 << 8)
+
+/* Reserved GPIO signals */
+#define RC32434_AF_SPARE_6 (1 << 9)
+#define RC32434_AF_SPARE_4 (1 << 10)
+#define RC32434_AF_SPARE_3 (1 << 11)
+#define RC32434_AF_SPARE_2 (1 << 12)
+
+/* PCI messaging unit */
+#define RC32434_PCI_MSU_GPIO (1 << 13)
+
+/* NAND GPIO signals */
+#define GPIO_RDY 8
+#define GPIO_WPX 9
+#define GPIO_ALE 10
+#define GPIO_CLE 11
+
+/* Compact Flash GPIO pin */
+#define CF_GPIO_NUM 13
+
+/* S1 button GPIO (shared with UART0_SIN) */
+#define GPIO_BTN_S1 1
+
+extern void rb532_gpio_set_ilevel(int bit, unsigned gpio);
+extern void rb532_gpio_set_istat(int bit, unsigned gpio);
+extern void rb532_gpio_set_func(unsigned gpio);
+
+#endif /* _RC32434_GPIO_H_ */
diff --git a/arch/mips/include/asm/mach-rc32434/integ.h b/arch/mips/include/asm/mach-rc32434/integ.h
new file mode 100644
index 000000000..fa65bc3d8
--- /dev/null
+++ b/arch/mips/include/asm/mach-rc32434/integ.h
@@ -0,0 +1,59 @@
+/*
+ * Definitions for the Watchdog registers
+ *
+ * Copyright 2002 Ryan Holm <ryan.holmQVist@idt.com>
+ * Copyright 2008 Florian Fainelli <florian@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RC32434_INTEG_H__
+#define __RC32434_INTEG_H__
+
+#include <asm/mach-rc32434/rb.h>
+
+#define INTEG0_BASE_ADDR 0x18030030
+
+struct integ {
+ u32 errcs; /* sticky use ERRCS_ */
+ u32 wtcount; /* Watchdog timer count reg. */
+ u32 wtcompare; /* Watchdog timer timeout value. */
+ u32 wtc; /* Watchdog timer control. use WTC_ */
+};
+
+/* Error counters */
+#define RC32434_ERR_WTO 0
+#define RC32434_ERR_WNE 1
+#define RC32434_ERR_UCW 2
+#define RC32434_ERR_UCR 3
+#define RC32434_ERR_UPW 4
+#define RC32434_ERR_UPR 5
+#define RC32434_ERR_UDW 6
+#define RC32434_ERR_UDR 7
+#define RC32434_ERR_SAE 8
+#define RC32434_ERR_WRE 9
+
+/* Watchdog control bits */
+#define RC32434_WTC_EN 0
+#define RC32434_WTC_TO 1
+
+#endif /* __RC32434_INTEG_H__ */
diff --git a/arch/mips/include/asm/mach-rc32434/irq.h b/arch/mips/include/asm/mach-rc32434/irq.h
new file mode 100644
index 000000000..ebe32bd5a
--- /dev/null
+++ b/arch/mips/include/asm/mach-rc32434/irq.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_RC32434_IRQ_H
+#define __ASM_RC32434_IRQ_H
+
+#define NR_IRQS 256
+
+#include <asm/mach-generic/irq.h>
+#include <asm/mach-rc32434/rb.h>
+
+/* Interrupt Controller */
+#define IC_GROUP0_PEND (REGBASE + 0x38000)
+#define IC_GROUP0_MASK (REGBASE + 0x38008)
+#define IC_GROUP_OFFSET 0x0C
+
+#define NUM_INTR_GROUPS 5
+
+/* 16550 UARTs */
+#define GROUP0_IRQ_BASE 8 /* GRP2 IRQ numbers start here */
+ /* GRP3 IRQ numbers start here */
+#define GROUP1_IRQ_BASE (GROUP0_IRQ_BASE + 32)
+ /* GRP4 IRQ numbers start here */
+#define GROUP2_IRQ_BASE (GROUP1_IRQ_BASE + 32)
+ /* GRP5 IRQ numbers start here */
+#define GROUP3_IRQ_BASE (GROUP2_IRQ_BASE + 32)
+#define GROUP4_IRQ_BASE (GROUP3_IRQ_BASE + 32)
+
+#define UART0_IRQ (GROUP3_IRQ_BASE + 0)
+
+#define ETH0_DMA_RX_IRQ (GROUP1_IRQ_BASE + 0)
+#define ETH0_DMA_TX_IRQ (GROUP1_IRQ_BASE + 1)
+#define ETH0_RX_OVR_IRQ (GROUP3_IRQ_BASE + 9)
+#define ETH0_TX_UND_IRQ (GROUP3_IRQ_BASE + 10)
+
+#define GPIO_MAPPED_IRQ_BASE GROUP4_IRQ_BASE
+#define GPIO_MAPPED_IRQ_GROUP 4
+
+#endif /* __ASM_RC32434_IRQ_H */
diff --git a/arch/mips/include/asm/mach-rc32434/pci.h b/arch/mips/include/asm/mach-rc32434/pci.h
new file mode 100644
index 000000000..3eb767c8a
--- /dev/null
+++ b/arch/mips/include/asm/mach-rc32434/pci.h
@@ -0,0 +1,478 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Copyright 2004 IDT Inc. (rischelp@idt.com)
+ *
+ * Initial Release
+ */
+
+#ifndef _ASM_RC32434_PCI_H_
+#define _ASM_RC32434_PCI_H_
+
+#define epld_mask ((volatile unsigned char *)0xB900000d)
+
+#define PCI0_BASE_ADDR 0x18080000
+#define PCI_LBA_COUNT 4
+
+struct pci_map {
+ u32 address; /* Address. */
+ u32 control; /* Control. */
+ u32 mapping; /* mapping. */
+};
+
+struct pci_reg {
+ u32 pcic;
+ u32 pcis;
+ u32 pcism;
+ u32 pcicfga;
+ u32 pcicfgd;
+ volatile struct pci_map pcilba[PCI_LBA_COUNT];
+ u32 pcidac;
+ u32 pcidas;
+ u32 pcidasm;
+ u32 pcidad;
+ u32 pcidma8c;
+ u32 pcidma9c;
+ u32 pcitc;
+};
+
+#define PCI_MSU_COUNT 2
+
+struct pci_msu {
+ u32 pciim[PCI_MSU_COUNT];
+ u32 pciom[PCI_MSU_COUNT];
+ u32 pciid;
+ u32 pciiic;
+ u32 pciiim;
+ u32 pciiod;
+ u32 pciioic;
+ u32 pciioim;
+};
+
+/*
+ * PCI Control Register
+ */
+
+#define PCI_CTL_EN (1 << 0)
+#define PCI_CTL_TNR (1 << 1)
+#define PCI_CTL_SCE (1 << 2)
+#define PCI_CTL_IEN (1 << 3)
+#define PCI_CTL_AAA (1 << 4)
+#define PCI_CTL_EAP (1 << 5)
+#define PCI_CTL_PCIM_BIT 6
+#define PCI_CTL_PCIM 0x000001c0
+
+#define PCI_CTL_PCIM_DIS 0
+#define PCI_CTL_PCIM_TNR 1 /* Satellite - target not ready */
+#define PCI_CTL_PCIM_SUS 2 /* Satellite - suspended CPU. */
+#define PCI_CTL_PCIM_EXT 3 /* Host - external arbiter. */
+#define PCI_CTL PCIM_PRIO 4 /* Host - fixed priority arb. */
+#define PCI_CTL_PCIM_RR 5 /* Host - round robin priority. */
+#define PCI_CTL_PCIM_RSVD6 6
+#define PCI_CTL_PCIM_RSVD7 7
+
+#define PCI_CTL_IGM (1 << 9)
+
+/*
+ * PCI Status Register
+ */
+
+#define PCI_STAT_EED (1 << 0)
+#define PCI_STAT_WR (1 << 1)
+#define PCI_STAT_NMI (1 << 2)
+#define PCI_STAT_II (1 << 3)
+#define PCI_STAT_CWE (1 << 4)
+#define PCI_STAT_CRE (1 << 5)
+#define PCI_STAT_MDPE (1 << 6)
+#define PCI_STAT_STA (1 << 7)
+#define PCI_STAT_RTA (1 << 8)
+#define PCI_STAT_RMA (1 << 9)
+#define PCI_STAT_SSE (1 << 10)
+#define PCI_STAT_OSE (1 << 11)
+#define PCI_STAT_PE (1 << 12)
+#define PCI_STAT_TAE (1 << 13)
+#define PCI_STAT_RLE (1 << 14)
+#define PCI_STAT_BME (1 << 15)
+#define PCI_STAT_PRD (1 << 16)
+#define PCI_STAT_RIP (1 << 17)
+
+/*
+ * PCI Status Mask Register
+ */
+
+#define PCI_STATM_EED PCI_STAT_EED
+#define PCI_STATM_WR PCI_STAT_WR
+#define PCI_STATM_NMI PCI_STAT_NMI
+#define PCI_STATM_II PCI_STAT_II
+#define PCI_STATM_CWE PCI_STAT_CWE
+#define PCI_STATM_CRE PCI_STAT_CRE
+#define PCI_STATM_MDPE PCI_STAT_MDPE
+#define PCI_STATM_STA PCI_STAT_STA
+#define PCI_STATM_RTA PCI_STAT_RTA
+#define PCI_STATM_RMA PCI_STAT_RMA
+#define PCI_STATM_SSE PCI_STAT_SSE
+#define PCI_STATM_OSE PCI_STAT_OSE
+#define PCI_STATM_PE PCI_STAT_PE
+#define PCI_STATM_TAE PCI_STAT_TAE
+#define PCI_STATM_RLE PCI_STAT_RLE
+#define PCI_STATM_BME PCI_STAT_BME
+#define PCI_STATM_PRD PCI_STAT_PRD
+#define PCI_STATM_RIP PCI_STAT_RIP
+
+/*
+ * PCI Configuration Address Register
+ */
+#define PCI_CFGA_REG_BIT 2
+#define PCI_CFGA_REG 0x000000fc
+#define PCI_CFGA_REG_ID (0x00 >> 2) /* use PCFGID */
+#define PCI_CFGA_REG_04 (0x04 >> 2) /* use PCFG04_ */
+#define PCI_CFGA_REG_08 (0x08 >> 2) /* use PCFG08_ */
+#define PCI_CFGA_REG_0C (0x0C >> 2) /* use PCFG0C_ */
+#define PCI_CFGA_REG_PBA0 (0x10 >> 2) /* use PCIPBA_ */
+#define PCI_CFGA_REG_PBA1 (0x14 >> 2) /* use PCIPBA_ */
+#define PCI_CFGA_REG_PBA2 (0x18 >> 2) /* use PCIPBA_ */
+#define PCI_CFGA_REG_PBA3 (0x1c >> 2) /* use PCIPBA_ */
+#define PCI_CFGA_REG_SUBSYS (0x2c >> 2) /* use PCFGSS_ */
+#define PCI_CFGA_REG_3C (0x3C >> 2) /* use PCFG3C_ */
+#define PCI_CFGA_REG_PBBA0C (0x44 >> 2) /* use PCIPBAC_ */
+#define PCI_CFGA_REG_PBA0M (0x48 >> 2)
+#define PCI_CFGA_REG_PBA1C (0x4c >> 2) /* use PCIPBAC_ */
+#define PCI_CFGA_REG_PBA1M (0x50 >> 2)
+#define PCI_CFGA_REG_PBA2C (0x54 >> 2) /* use PCIPBAC_ */
+#define PCI_CFGA_REG_PBA2M (0x58 >> 2)
+#define PCI_CFGA_REG_PBA3C (0x5c >> 2) /* use PCIPBAC_ */
+#define PCI_CFGA_REG_PBA3M (0x60 >> 2)
+#define PCI_CFGA_REG_PMGT (0x64 >> 2)
+#define PCI_CFGA_FUNC_BIT 8
+#define PCI_CFGA_FUNC 0x00000700
+#define PCI_CFGA_DEV_BIT 11
+#define PCI_CFGA_DEV 0x0000f800
+#define PCI_CFGA_DEV_INTERN 0
+#define PCI_CFGA_BUS_BIT 16
+#define PCI CFGA_BUS 0x00ff0000
+#define PCI_CFGA_BUS_TYPE0 0
+#define PCI_CFGA_EN (1 << 31)
+
+/* PCI CFG04 commands */
+#define PCI_CFG04_CMD_IO_ENA (1 << 0)
+#define PCI_CFG04_CMD_MEM_ENA (1 << 1)
+#define PCI_CFG04_CMD_BM_ENA (1 << 2)
+#define PCI_CFG04_CMD_MW_INV (1 << 4)
+#define PCI_CFG04_CMD_PAR_ENA (1 << 6)
+#define PCI_CFG04_CMD_SER_ENA (1 << 8)
+#define PCI_CFG04_CMD_FAST_ENA (1 << 9)
+
+/* PCI CFG04 status fields */
+#define PCI_CFG04_STAT_BIT 16
+#define PCI_CFG04_STAT 0xffff0000
+#define PCI_CFG04_STAT_66_MHZ (1 << 21)
+#define PCI_CFG04_STAT_FBB (1 << 23)
+#define PCI_CFG04_STAT_MDPE (1 << 24)
+#define PCI_CFG04_STAT_DST (1 << 25)
+#define PCI_CFG04_STAT_STA (1 << 27)
+#define PCI_CFG04_STAT_RTA (1 << 28)
+#define PCI_CFG04_STAT_RMA (1 << 29)
+#define PCI_CFG04_STAT_SSE (1 << 30)
+#define PCI_CFG04_STAT_PE (1 << 31)
+
+#define PCI_PBA_MSI (1 << 0)
+#define PCI_PBA_P (1 << 2)
+
+/* PCI PBAC registers */
+#define PCI_PBAC_MSI (1 << 0)
+#define PCI_PBAC_P (1 << 1)
+#define PCI_PBAC_SIZE_BIT 2
+#define PCI_PBAC_SIZE 0x0000007c
+#define PCI_PBAC_SB (1 << 7)
+#define PCI_PBAC_PP (1 << 8)
+#define PCI_PBAC_MR_BIT 9
+#define PCI_PBAC_MR 0x00000600
+#define PCI_PBAC_MR_RD 0
+#define PCI_PBAC_MR_RD_LINE 1
+#define PCI_PBAC_MR_RD_MULT 2
+#define PCI_PBAC_MRL (1 << 11)
+#define PCI_PBAC_MRM (1 << 12)
+#define PCI_PBAC_TRP (1 << 13)
+
+#define PCI_CFG40_TRDY_TIM 0x000000ff
+#define PCI_CFG40_RET_LIM 0x0000ff00
+
+/*
+ * PCI Local Base Address [0|1|2|3] Register
+ */
+
+#define PCI_LBA_BADDR_BIT 0
+#define PCI_LBA_BADDR 0xffffff00
+
+/*
+ * PCI Local Base Address Control Register
+ */
+
+#define PCI_LBAC_MSI (1 << 0)
+#define PCI_LBAC_MSI_MEM 0
+#define PCI_LBAC_MSI_IO 1
+#define PCI_LBAC_SIZE_BIT 2
+#define PCI_LBAC_SIZE 0x0000007c
+#define PCI_LBAC_SB (1 << 7)
+#define PCI_LBAC_RT (1 << 8)
+#define PCI_LBAC_RT_NO_PREF 0
+#define PCI_LBAC_RT_PREF 1
+
+/*
+ * PCI Local Base Address [0|1|2|3] Mapping Register
+ */
+#define PCI_LBAM_MADDR_BIT 8
+#define PCI_LBAM_MADDR 0xffffff00
+
+/*
+ * PCI Decoupled Access Control Register
+ */
+#define PCI_DAC_DEN (1 << 0)
+
+/*
+ * PCI Decoupled Access Status Register
+ */
+#define PCI_DAS_D (1 << 0)
+#define PCI_DAS_B (1 << 1)
+#define PCI_DAS_E (1 << 2)
+#define PCI_DAS_OFE (1 << 3)
+#define PCI_DAS_OFF (1 << 4)
+#define PCI_DAS_IFE (1 << 5)
+#define PCI_DAS_IFF (1 << 6)
+
+/*
+ * PCI DMA Channel 8 Configuration Register
+ */
+#define PCI_DMA8C_MBS_BIT 0
+#define PCI_DMA8C_MBS 0x00000fff /* Maximum Burst Size. */
+#define PCI_DMA8C_OUR (1 << 12)
+
+/*
+ * PCI DMA Channel 9 Configuration Register
+ */
+#define PCI_DMA9C_MBS_BIT 0 /* Maximum Burst Size. */
+#define PCI_DMA9C_MBS 0x00000fff
+
+/*
+ * PCI to Memory(DMA Channel 8) AND Memory to PCI DMA(DMA Channel 9)Descriptors
+ */
+
+#define PCI_DMAD_PT_BIT 22 /* in DEVCMD field (descriptor) */
+#define PCI_DMAD_PT 0x00c00000 /* preferred transaction field */
+/* These are for reads (DMA channel 8) */
+#define PCI_DMAD_DEVCMD_MR 0 /* memory read */
+#define PCI_DMAD_DEVCMD_MRL 1 /* memory read line */
+#define PCI_DMAD_DEVCMD_MRM 2 /* memory read multiple */
+#define PCI_DMAD_DEVCMD_IOR 3 /* I/O read */
+/* These are for writes (DMA channel 9) */
+#define PCI_DMAD_DEVCMD_MW 0 /* memory write */
+#define PCI_DMAD_DEVCMD_MWI 1 /* memory write invalidate */
+#define PCI_DMAD_DEVCMD_IOW 3 /* I/O write */
+
+/* Swap byte field applies to both DMA channel 8 and 9 */
+#define PCI_DMAD_SB (1 << 24) /* swap byte field */
+
+
+/*
+ * PCI Target Control Register
+ */
+
+#define PCI_TC_RTIMER_BIT 0
+#define PCI_TC_RTIMER 0x000000ff
+#define PCI_TC_DTIMER_BIT 8
+#define PCI_TC_DTIMER 0x0000ff00
+#define PCI_TC_RDR (1 << 18)
+#define PCI_TC_DDT (1 << 19)
+
+/*
+ * PCI messaging unit [applies to both inbound and outbound registers ]
+ */
+#define PCI_MSU_M0 (1 << 0)
+#define PCI_MSU_M1 (1 << 1)
+#define PCI_MSU_DB (1 << 2)
+
+#define PCI_MSG_ADDR 0xB8088010
+#define PCI0_ADDR 0xB8080000
+#define rc32434_pci ((struct pci_reg *) PCI0_ADDR)
+#define rc32434_pci_msg ((struct pci_msu *) PCI_MSG_ADDR)
+
+#define PCIM_SHFT 0x6
+#define PCIM_BIT_LEN 0x7
+#define PCIM_H_EA 0x3
+#define PCIM_H_IA_FIX 0x4
+#define PCIM_H_IA_RR 0x5
+
+#define PCI_ADDR_START 0x50000000
+
+#define CPUTOPCI_MEM_WIN 0x02000000
+#define CPUTOPCI_IO_WIN 0x00100000
+#define PCILBA_SIZE_SHFT 2
+#define PCILBA_SIZE_MASK 0x1F
+#define SIZE_256MB 0x1C
+#define SIZE_128MB 0x1B
+#define SIZE_64MB 0x1A
+#define SIZE_32MB 0x19
+#define SIZE_16MB 0x18
+#define SIZE_4MB 0x16
+#define SIZE_2MB 0x15
+#define SIZE_1MB 0x14
+#define KORINA_CONFIG0_ADDR 0x80000000
+#define KORINA_CONFIG1_ADDR 0x80000004
+#define KORINA_CONFIG2_ADDR 0x80000008
+#define KORINA_CONFIG3_ADDR 0x8000000C
+#define KORINA_CONFIG4_ADDR 0x80000010
+#define KORINA_CONFIG5_ADDR 0x80000014
+#define KORINA_CONFIG6_ADDR 0x80000018
+#define KORINA_CONFIG7_ADDR 0x8000001C
+#define KORINA_CONFIG8_ADDR 0x80000020
+#define KORINA_CONFIG9_ADDR 0x80000024
+#define KORINA_CONFIG10_ADDR 0x80000028
+#define KORINA_CONFIG11_ADDR 0x8000002C
+#define KORINA_CONFIG12_ADDR 0x80000030
+#define KORINA_CONFIG13_ADDR 0x80000034
+#define KORINA_CONFIG14_ADDR 0x80000038
+#define KORINA_CONFIG15_ADDR 0x8000003C
+#define KORINA_CONFIG16_ADDR 0x80000040
+#define KORINA_CONFIG17_ADDR 0x80000044
+#define KORINA_CONFIG18_ADDR 0x80000048
+#define KORINA_CONFIG19_ADDR 0x8000004C
+#define KORINA_CONFIG20_ADDR 0x80000050
+#define KORINA_CONFIG21_ADDR 0x80000054
+#define KORINA_CONFIG22_ADDR 0x80000058
+#define KORINA_CONFIG23_ADDR 0x8000005C
+#define KORINA_CONFIG24_ADDR 0x80000060
+#define KORINA_CONFIG25_ADDR 0x80000064
+#define KORINA_CMD (PCI_CFG04_CMD_IO_ENA | \
+ PCI_CFG04_CMD_MEM_ENA | \
+ PCI_CFG04_CMD_BM_ENA | \
+ PCI_CFG04_CMD_MW_INV | \
+ PCI_CFG04_CMD_PAR_ENA | \
+ PCI_CFG04_CMD_SER_ENA)
+
+#define KORINA_STAT (PCI_CFG04_STAT_MDPE | \
+ PCI_CFG04_STAT_STA | \
+ PCI_CFG04_STAT_RTA | \
+ PCI_CFG04_STAT_RMA | \
+ PCI_CFG04_STAT_SSE | \
+ PCI_CFG04_STAT_PE)
+
+#define KORINA_CNFG1 (KORINA_STAT | KORINA_CMD)
+
+#define KORINA_REVID 0
+#define KORINA_CLASS_CODE 0
+#define KORINA_CNFG2 ((KORINA_CLASS_CODE<<8) | \
+ KORINA_REVID)
+
+#define KORINA_CACHE_LINE_SIZE 4
+#define KORINA_MASTER_LAT 0x3c
+#define KORINA_HEADER_TYPE 0
+#define KORINA_BIST 0
+
+#define KORINA_CNFG3 ((KORINA_BIST << 24) | \
+ (KORINA_HEADER_TYPE<<16) | \
+ (KORINA_MASTER_LAT<<8) | \
+ KORINA_CACHE_LINE_SIZE)
+
+#define KORINA_BAR0 0x00000008 /* 128 MB Memory */
+#define KORINA_BAR1 0x18800001 /* 1 MB IO */
+#define KORINA_BAR2 0x18000001 /* 2 MB IO window for Korina
+ internal Registers */
+#define KORINA_BAR3 0x48000008 /* Spare 128 MB Memory */
+
+#define KORINA_CNFG4 KORINA_BAR0
+#define KORINA_CNFG5 KORINA_BAR1
+#define KORINA_CNFG6 KORINA_BAR2
+#define KORINA_CNFG7 KORINA_BAR3
+
+#define KORINA_SUBSYS_VENDOR_ID 0x011d
+#define KORINA_SUBSYSTEM_ID 0x0214
+#define KORINA_CNFG8 0
+#define KORINA_CNFG9 0
+#define KORINA_CNFG10 0
+#define KORINA_CNFG11 ((KORINA_SUBSYS_VENDOR_ID<<16) | \
+ KORINA_SUBSYSTEM_ID)
+#define KORINA_INT_LINE 1
+#define KORINA_INT_PIN 1
+#define KORINA_MIN_GNT 8
+#define KORINA_MAX_LAT 0x38
+#define KORINA_CNFG12 0
+#define KORINA_CNFG13 0
+#define KORINA_CNFG14 0
+#define KORINA_CNFG15 ((KORINA_MAX_LAT<<24) | \
+ (KORINA_MIN_GNT<<16) | \
+ (KORINA_INT_PIN<<8) | \
+ KORINA_INT_LINE)
+#define KORINA_RETRY_LIMIT 0x80
+#define KORINA_TRDY_LIMIT 0x80
+#define KORINA_CNFG16 ((KORINA_RETRY_LIMIT<<8) | \
+ KORINA_TRDY_LIMIT)
+#define PCI_PBAxC_R 0x0
+#define PCI_PBAxC_RL 0x1
+#define PCI_PBAxC_RM 0x2
+#define SIZE_SHFT 2
+
+#if defined(__MIPSEB__)
+#define KORINA_PBA0C (PCI_PBAC_MRL | PCI_PBAC_SB | \
+ ((PCI_PBAxC_RM & 0x3) << PCI_PBAC_MR_BIT) | \
+ PCI_PBAC_PP | \
+ (SIZE_128MB<<SIZE_SHFT) | \
+ PCI_PBAC_P)
+#else
+#define KORINA_PBA0C (PCI_PBAC_MRL | \
+ ((PCI_PBAxC_RM & 0x3) << PCI_PBAC_MR_BIT) | \
+ PCI_PBAC_PP | \
+ (SIZE_128MB<<SIZE_SHFT) | \
+ PCI_PBAC_P)
+#endif
+#define KORINA_CNFG17 KORINA_PBA0C
+#define KORINA_PBA0M 0x0
+#define KORINA_CNFG18 KORINA_PBA0M
+
+#if defined(__MIPSEB__)
+#define KORINA_PBA1C ((SIZE_1MB<<SIZE_SHFT) | PCI_PBAC_SB | \
+ PCI_PBAC_MSI)
+#else
+#define KORINA_PBA1C ((SIZE_1MB<<SIZE_SHFT) | \
+ PCI_PBAC_MSI)
+#endif
+#define KORINA_CNFG19 KORINA_PBA1C
+#define KORINA_PBA1M 0x0
+#define KORINA_CNFG20 KORINA_PBA1M
+
+#if defined(__MIPSEB__)
+#define KORINA_PBA2C ((SIZE_2MB<<SIZE_SHFT) | PCI_PBAC_SB | \
+ PCI_PBAC_MSI)
+#else
+#define KORINA_PBA2C ((SIZE_2MB<<SIZE_SHFT) | \
+ PCI_PBAC_MSI)
+#endif
+#define KORINA_CNFG21 KORINA_PBA2C
+#define KORINA_PBA2M 0x18000000
+#define KORINA_CNFG22 KORINA_PBA2M
+#define KORINA_PBA3C 0
+#define KORINA_CNFG23 KORINA_PBA3C
+#define KORINA_PBA3M 0
+#define KORINA_CNFG24 KORINA_PBA3M
+
+#define PCITC_DTIMER_VAL 8
+#define PCITC_RTIMER_VAL 0x10
+
+#endif /* __ASM_RC32434_PCI_H */
diff --git a/arch/mips/include/asm/mach-rc32434/prom.h b/arch/mips/include/asm/mach-rc32434/prom.h
new file mode 100644
index 000000000..660707f1b
--- /dev/null
+++ b/arch/mips/include/asm/mach-rc32434/prom.h
@@ -0,0 +1,40 @@
+/*
+ * Definitions for the PROM
+ *
+ * Copyright 2002 Ryan Holm <ryan.holmQVist@idt.com>
+ * Copyright 2008 Florian Fainelli <florian@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#define PROM_ENTRY(x) (0xbfc00000 + ((x) * 8))
+
+#define SR_NMI 0x00180000
+#define SERIAL_SPEED_ENTRY 0x00000001
+
+#define FREQ_TAG "HZ="
+#define KMAC_TAG "kmac="
+#define MEM_TAG "mem="
+#define BOARD_TAG "board="
+
+#define BOARD_RB532 "500"
+#define BOARD_RB532A "500r5"
diff --git a/arch/mips/include/asm/mach-rc32434/rb.h b/arch/mips/include/asm/mach-rc32434/rb.h
new file mode 100644
index 000000000..d502673a4
--- /dev/null
+++ b/arch/mips/include/asm/mach-rc32434/rb.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *
+ * Copyright (C) 2004 IDT Inc.
+ * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ */
+#ifndef __ASM_RC32434_RB_H
+#define __ASM_RC32434_RB_H
+
+#include <linux/genhd.h>
+
+#define REGBASE 0x18000000
+#define IDT434_REG_BASE ((volatile void *) KSEG1ADDR(REGBASE))
+#define UART0BASE 0x58000
+#define RST (1 << 15)
+#define DEV0BASE 0x010000
+#define DEV0MASK 0x010004
+#define DEV0C 0x010008
+#define DEV0T 0x01000C
+#define DEV1BASE 0x010010
+#define DEV1MASK 0x010014
+#define DEV1C 0x010018
+#define DEV1TC 0x01001C
+#define DEV2BASE 0x010020
+#define DEV2MASK 0x010024
+#define DEV2C 0x010028
+#define DEV2TC 0x01002C
+#define DEV3BASE 0x010030
+#define DEV3MASK 0x010034
+#define DEV3C 0x010038
+#define DEV3TC 0x01003C
+#define BTCS 0x010040
+#define BTCOMPARE 0x010044
+#define GPIOBASE 0x050000
+/* Offsets relative to GPIOBASE */
+#define GPIOFUNC 0x00
+#define GPIOCFG 0x04
+#define GPIOD 0x08
+#define GPIOILEVEL 0x0C
+#define GPIOISTAT 0x10
+#define GPIONMIEN 0x14
+#define IMASK6 0x38
+#define LO_WPX (1 << 0)
+#define LO_ALE (1 << 1)
+#define LO_CLE (1 << 2)
+#define LO_CEX (1 << 3)
+#define LO_FOFF (1 << 5)
+#define LO_SPICS (1 << 6)
+#define LO_ULED (1 << 7)
+
+#define BIT_TO_MASK(x) (1 << x)
+
+struct dev_reg {
+ u32 base;
+ u32 mask;
+ u32 ctl;
+ u32 timing;
+};
+
+struct korina_device {
+ char *name;
+ unsigned char mac[6];
+ struct net_device *dev;
+};
+
+struct mpmc_device {
+ unsigned char state;
+ spinlock_t lock;
+ void __iomem *base;
+};
+
+extern void set_latch_u5(unsigned char or_mask, unsigned char nand_mask);
+extern unsigned char get_latch_u5(void);
+
+#endif /* __ASM_RC32434_RB_H */
diff --git a/arch/mips/include/asm/mach-rc32434/rc32434.h b/arch/mips/include/asm/mach-rc32434/rc32434.h
new file mode 100644
index 000000000..1bec6cc8a
--- /dev/null
+++ b/arch/mips/include/asm/mach-rc32434/rc32434.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions for IDT RC323434 CPU.
+ */
+
+#ifndef _ASM_RC32434_RC32434_H_
+#define _ASM_RC32434_RC32434_H_
+
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#define IDT_CLOCK_MULT 2
+
+/* cpu pipeline flush */
+static inline void rc32434_sync(void)
+{
+ __asm__ volatile ("sync");
+}
+
+#endif /* _ASM_RC32434_RC32434_H_ */
diff --git a/arch/mips/include/asm/mach-rc32434/timer.h b/arch/mips/include/asm/mach-rc32434/timer.h
new file mode 100644
index 000000000..cda26bb9e
--- /dev/null
+++ b/arch/mips/include/asm/mach-rc32434/timer.h
@@ -0,0 +1,65 @@
+/*
+ * Definitions for timer registers
+ *
+ * Copyright 2004 Philip Rischel <rischelp@idt.com>
+ * Copyright 2008 Florian Fainelli <florian@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __ASM_RC32434_TIMER_H
+#define __ASM_RC32434_TIMER_H
+
+#include <asm/mach-rc32434/rb.h>
+
+#define TIMER0_BASE_ADDR 0x18028000
+#define TIMER_COUNT 3
+
+struct timer_counter {
+ u32 count;
+ u32 compare;
+ u32 ctc; /*use CTC_ */
+};
+
+struct timer {
+ struct timer_counter tim[TIMER_COUNT];
+ u32 rcount; /* use RCOUNT_ */
+ u32 rcompare; /* use RCOMPARE_ */
+ u32 rtc; /* use RTC_ */
+};
+
+#define RC32434_CTC_EN_BIT 0
+#define RC32434_CTC_TO_BIT 1
+
+/* Real time clock registers */
+#define RC32434_RTC_MSK(x) BIT_TO_MASK(x)
+#define RC32434_RTC_CE_BIT 0
+#define RC32434_RTC_TO_BIT 1
+#define RC32434_RTC_RQE_BIT 2
+
+/* Counter registers */
+#define RC32434_RCOUNT_BIT 0
+#define RC32434_RCOUNT_MSK 0x0000ffff
+#define RC32434_RCOMP_BIT 0
+#define RC32434_RCOMP_MSK 0x0000ffff
+
+#endif /* __ASM_RC32434_TIMER_H */
diff --git a/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h b/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h
new file mode 100644
index 000000000..e1e182300
--- /dev/null
+++ b/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h
@@ -0,0 +1,42 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 04, 07 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * SNI RM200 C apparently was only shipped with R4600 V2.0 and R5000 processors.
+ */
+#ifndef __ASM_MACH_RM200_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_RM200_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_4k_cache 1
+#define cpu_has_32fpr 1
+#define cpu_has_counter 1
+#define cpu_has_watch 0
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_divec 0
+#define cpu_has_cache_cdex_p 1
+#define cpu_has_prefetch 0
+#define cpu_has_mcheck 0
+#define cpu_has_ejtag 0
+#define cpu_has_llsc 1
+#define cpu_has_vtag_icache 0
+#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_nofpuex 0
+#define cpu_has_64bits 1
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#endif /* __ASM_MACH_RM200_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-rm/mc146818rtc.h b/arch/mips/include/asm/mach-rm/mc146818rtc.h
new file mode 100644
index 000000000..a074f4f84
--- /dev/null
+++ b/arch/mips/include/asm/mach-rm/mc146818rtc.h
@@ -0,0 +1,21 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 by Ralf Baechle
+ *
+ * RTC routines for PC style attached Dallas chip with ARC epoch.
+ */
+#ifndef __ASM_MACH_RM_MC146818RTC_H
+#define __ASM_MACH_RM_MC146818RTC_H
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1900)
+#else
+#define mc146818_decode_year(year) ((year) + 1980)
+#endif
+
+#include <asm/mach-generic/mc146818rtc.h>
+
+#endif /* __ASM_MACH_RM_MC146818RTC_H */
diff --git a/arch/mips/include/asm/mach-sibyte/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sibyte/cpu-feature-overrides.h
new file mode 100644
index 000000000..702235805
--- /dev/null
+++ b/arch/mips/include/asm/mach-sibyte/cpu-feature-overrides.h
@@ -0,0 +1,49 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 04, 07 Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_MACH_SIBYTE_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_SIBYTE_CPU_FEATURE_OVERRIDES_H
+
+/*
+ * Sibyte are MIPS64 processors wired to a specific configuration
+ */
+#define cpu_has_watch 1
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_divec 1
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_prefetch 1
+#define cpu_has_mcheck 1
+#define cpu_has_ejtag 1
+
+#define cpu_has_llsc 1
+#define cpu_has_vtag_icache 1
+#define cpu_has_dc_aliases 0
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+#define cpu_icache_snoops_remote_store 0
+
+#define cpu_has_nofpuex 0
+#define cpu_has_64bits 1
+
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 1
+#define cpu_has_mips64r2 0
+
+#define cpu_has_inclusive_pcaches 0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+#define cpu_scache_line_size() 32
+
+#endif /* __ASM_MACH_SIBYTE_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-tx39xx/ioremap.h b/arch/mips/include/asm/mach-tx39xx/ioremap.h
new file mode 100644
index 000000000..157a72923
--- /dev/null
+++ b/arch/mips/include/asm/mach-tx39xx/ioremap.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/asm-mips/mach-tx39xx/ioremap.h
+ */
+#ifndef __ASM_MACH_TX39XX_IOREMAP_H
+#define __ASM_MACH_TX39XX_IOREMAP_H
+
+#include <linux/types.h>
+
+static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
+ unsigned long flags)
+{
+#define TXX9_DIRECTMAP_BASE 0xff000000ul
+ if (offset >= TXX9_DIRECTMAP_BASE &&
+ offset < TXX9_DIRECTMAP_BASE + 0xff0000)
+ return (void __iomem *)offset;
+ return NULL;
+}
+
+static inline int plat_iounmap(const volatile void __iomem *addr)
+{
+ return (unsigned long)addr >= TXX9_DIRECTMAP_BASE;
+}
+
+#endif /* __ASM_MACH_TX39XX_IOREMAP_H */
diff --git a/arch/mips/include/asm/mach-tx39xx/mangle-port.h b/arch/mips/include/asm/mach-tx39xx/mangle-port.h
new file mode 100644
index 000000000..95be45995
--- /dev/null
+++ b/arch/mips/include/asm/mach-tx39xx/mangle-port.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_TX39XX_MANGLE_PORT_H
+#define __ASM_MACH_TX39XX_MANGLE_PORT_H
+
+#if defined(CONFIG_TOSHIBA_JMR3927)
+extern unsigned long (*__swizzle_addr_b)(unsigned long port);
+#define NEEDS_TXX9_SWIZZLE_ADDR_B
+#else
+#define __swizzle_addr_b(port) (port)
+#endif
+#define __swizzle_addr_w(port) (port)
+#define __swizzle_addr_l(port) (port)
+#define __swizzle_addr_q(port) (port)
+
+#define ioswabb(a, x) (x)
+#define __mem_ioswabb(a, x) (x)
+#define ioswabw(a, x) le16_to_cpu((__force __le16)(x))
+#define __mem_ioswabw(a, x) (x)
+#define ioswabl(a, x) le32_to_cpu((__force __le32)(x))
+#define __mem_ioswabl(a, x) (x)
+#define ioswabq(a, x) le64_to_cpu((__force __le64)(x))
+#define __mem_ioswabq(a, x) (x)
+
+#endif /* __ASM_MACH_TX39XX_MANGLE_PORT_H */
diff --git a/arch/mips/include/asm/mach-tx39xx/spaces.h b/arch/mips/include/asm/mach-tx39xx/spaces.h
new file mode 100644
index 000000000..151fe7a1c
--- /dev/null
+++ b/arch/mips/include/asm/mach-tx39xx/spaces.h
@@ -0,0 +1,17 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_TX39XX_SPACES_H
+#define _ASM_TX39XX_SPACES_H
+
+#define FIXADDR_TOP ((unsigned long)(long)(int)0xfefe0000)
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* __ASM_TX39XX_SPACES_H */
diff --git a/arch/mips/include/asm/mach-tx49xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-tx49xx/cpu-feature-overrides.h
new file mode 100644
index 000000000..04e424725
--- /dev/null
+++ b/arch/mips/include/asm/mach-tx49xx/cpu-feature-overrides.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_TX49XX_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_TX49XX_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_llsc 1
+#define cpu_has_64bits 1
+#define cpu_has_inclusive_pcaches 0
+
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+#define cpu_has_vtag_icache 0
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#endif /* __ASM_MACH_TX49XX_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-tx49xx/ioremap.h b/arch/mips/include/asm/mach-tx49xx/ioremap.h
new file mode 100644
index 000000000..b1f3710ac
--- /dev/null
+++ b/arch/mips/include/asm/mach-tx49xx/ioremap.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/asm-mips/mach-tx49xx/ioremap.h
+ */
+#ifndef __ASM_MACH_TX49XX_IOREMAP_H
+#define __ASM_MACH_TX49XX_IOREMAP_H
+
+#include <linux/types.h>
+
+static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
+ unsigned long flags)
+{
+#ifdef CONFIG_64BIT
+#define TXX9_DIRECTMAP_BASE 0xfff000000ul
+#else
+#define TXX9_DIRECTMAP_BASE 0xff000000ul
+#endif
+ if (offset >= TXX9_DIRECTMAP_BASE &&
+ offset < TXX9_DIRECTMAP_BASE + 0x400000)
+ return (void __iomem *)(unsigned long)(int)offset;
+ return NULL;
+}
+
+static inline int plat_iounmap(const volatile void __iomem *addr)
+{
+ return (unsigned long)addr >=
+ (unsigned long)(int)(TXX9_DIRECTMAP_BASE & 0xffffffff);
+}
+
+#endif /* __ASM_MACH_TX49XX_IOREMAP_H */
diff --git a/arch/mips/include/asm/mach-tx49xx/kmalloc.h b/arch/mips/include/asm/mach-tx49xx/kmalloc.h
new file mode 100644
index 000000000..c2a0a6fa4
--- /dev/null
+++ b/arch/mips/include/asm/mach-tx49xx/kmalloc.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_TX49XX_KMALLOC_H
+#define __ASM_MACH_TX49XX_KMALLOC_H
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+#endif /* __ASM_MACH_TX49XX_KMALLOC_H */
diff --git a/arch/mips/include/asm/mach-tx49xx/mangle-port.h b/arch/mips/include/asm/mach-tx49xx/mangle-port.h
new file mode 100644
index 000000000..98c7abf44
--- /dev/null
+++ b/arch/mips/include/asm/mach-tx49xx/mangle-port.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_TX49XX_MANGLE_PORT_H
+#define __ASM_MACH_TX49XX_MANGLE_PORT_H
+
+#define __swizzle_addr_b(port) (port)
+#define __swizzle_addr_w(port) (port)
+#define __swizzle_addr_l(port) (port)
+#define __swizzle_addr_q(port) (port)
+
+#define ioswabb(a, x) (x)
+#define __mem_ioswabb(a, x) (x)
+#if defined(CONFIG_TOSHIBA_RBTX4939) && \
+ IS_ENABLED(CONFIG_SMC91X) && \
+ defined(__BIG_ENDIAN)
+#define NEEDS_TXX9_IOSWABW
+extern u16 (*ioswabw)(volatile u16 *a, u16 x);
+extern u16 (*__mem_ioswabw)(volatile u16 *a, u16 x);
+#else
+#define ioswabw(a, x) le16_to_cpu((__force __le16)(x))
+#define __mem_ioswabw(a, x) (x)
+#endif
+#define ioswabl(a, x) le32_to_cpu((__force __le32)(x))
+#define __mem_ioswabl(a, x) (x)
+#define ioswabq(a, x) le64_to_cpu((__force __le64)(x))
+#define __mem_ioswabq(a, x) (x)
+
+#endif /* __ASM_MACH_TX49XX_MANGLE_PORT_H */
diff --git a/arch/mips/include/asm/mach-tx49xx/spaces.h b/arch/mips/include/asm/mach-tx49xx/spaces.h
new file mode 100644
index 000000000..0cb10a6f4
--- /dev/null
+++ b/arch/mips/include/asm/mach-tx49xx/spaces.h
@@ -0,0 +1,17 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_TX49XX_SPACES_H
+#define _ASM_TX49XX_SPACES_H
+
+#define FIXADDR_TOP ((unsigned long)(long)(int)0xfefe0000)
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* __ASM_TX49XX_SPACES_H */
diff --git a/arch/mips/include/asm/mach-vr41xx/irq.h b/arch/mips/include/asm/mach-vr41xx/irq.h
new file mode 100644
index 000000000..4281b2b93
--- /dev/null
+++ b/arch/mips/include/asm/mach-vr41xx/irq.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_VR41XX_IRQ_H
+#define __ASM_MACH_VR41XX_IRQ_H
+
+#include <asm/vr41xx/irq.h> /* for MIPS_CPU_IRQ_BASE */
+
+#include <asm/mach-generic/irq.h>
+
+#endif /* __ASM_MACH_VR41XX_IRQ_H */
diff --git a/arch/mips/include/asm/machine.h b/arch/mips/include/asm/machine.h
new file mode 100644
index 000000000..fc64cce27
--- /dev/null
+++ b/arch/mips/include/asm/machine.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#ifndef __MIPS_ASM_MACHINE_H__
+#define __MIPS_ASM_MACHINE_H__
+
+#include <linux/libfdt.h>
+#include <linux/of.h>
+
+struct mips_machine {
+ const struct of_device_id *matches;
+ const void *fdt;
+ bool (*detect)(void);
+ const void *(*fixup_fdt)(const void *fdt, const void *match_data);
+ unsigned int (*measure_hpt_freq)(void);
+};
+
+extern long __mips_machines_start;
+extern long __mips_machines_end;
+
+#define MIPS_MACHINE(name) \
+ static const struct mips_machine __mips_mach_##name \
+ __used __section(".mips.machines.init")
+
+#define for_each_mips_machine(mach) \
+ for ((mach) = (struct mips_machine *)&__mips_machines_start; \
+ (mach) < (struct mips_machine *)&__mips_machines_end; \
+ (mach)++)
+
+/**
+ * mips_machine_is_compatible() - check if a machine is compatible with an FDT
+ * @mach: the machine struct to check
+ * @fdt: the FDT to check for compatibility with
+ *
+ * Check whether the given machine @mach is compatible with the given flattened
+ * device tree @fdt, based upon the compatibility property of the root node.
+ *
+ * Return: the device id matched if any, else NULL
+ */
+static inline const struct of_device_id *
+mips_machine_is_compatible(const struct mips_machine *mach, const void *fdt)
+{
+ const struct of_device_id *match;
+
+ if (!mach->matches)
+ return NULL;
+
+ for (match = mach->matches; match->compatible[0]; match++) {
+ if (fdt_node_check_compatible(fdt, 0, match->compatible) == 0)
+ return match;
+ }
+
+ return NULL;
+}
+
+/**
+ * struct mips_fdt_fixup - Describe a fixup to apply to an FDT
+ * @apply: applies the fixup to @fdt, returns zero on success else -errno
+ * @description: a short description of the fixup
+ *
+ * Describes a fixup applied to an FDT blob by the @apply function. The
+ * @description field provides a short description of the fixup intended for
+ * use in error messages if the @apply function returns non-zero.
+ */
+struct mips_fdt_fixup {
+ int (*apply)(void *fdt);
+ const char *description;
+};
+
+/**
+ * apply_mips_fdt_fixups() - apply fixups to an FDT blob
+ * @fdt_out: buffer in which to place the fixed-up FDT
+ * @fdt_out_size: the size of the @fdt_out buffer
+ * @fdt_in: the FDT blob
+ * @fixups: pointer to an array of fixups to be applied
+ *
+ * Loop through the array of fixups pointed to by @fixups, calling the apply
+ * function on each until either one returns an error or we reach the end of
+ * the list as indicated by an entry with a NULL apply field.
+ *
+ * Return: zero on success, else -errno
+ */
+extern int __init apply_mips_fdt_fixups(void *fdt_out, size_t fdt_out_size,
+ const void *fdt_in,
+ const struct mips_fdt_fixup *fixups);
+
+#endif /* __MIPS_ASM_MACHINE_H__ */
diff --git a/arch/mips/include/asm/mc146818-time.h b/arch/mips/include/asm/mc146818-time.h
new file mode 100644
index 000000000..cbf5cec34
--- /dev/null
+++ b/arch/mips/include/asm/mc146818-time.h
@@ -0,0 +1,119 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Machine dependent access functions for RTC registers.
+ */
+#ifndef __ASM_MC146818_TIME_H
+#define __ASM_MC146818_TIME_H
+
+#include <linux/bcd.h>
+#include <linux/mc146818rtc.h>
+#include <linux/time.h>
+
+/*
+ * For check timing call set_rtc_mmss() 500ms; used in timer interrupt.
+ */
+#define USEC_AFTER 500000
+#define USEC_BEFORE 500000
+
+/*
+ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
+ * called 500 ms after the second nowtime has started, because when
+ * nowtime is written into the registers of the CMOS clock, it will
+ * jump to the next second precisely 500 ms later. Check the Motorola
+ * MC146818A or Dallas DS12887 data sheet for details.
+ *
+ * BUG: This routine does not handle hour overflow properly; it just
+ * sets the minutes. Usually you'll only notice that after reboot!
+ */
+static inline int mc146818_set_rtc_mmss(unsigned long nowtime)
+{
+ int real_seconds, real_minutes, cmos_minutes;
+ unsigned char save_control, save_freq_select;
+ int retval = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
+ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+ cmos_minutes = CMOS_READ(RTC_MINUTES);
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ cmos_minutes = bcd2bin(cmos_minutes);
+
+ /*
+ * since we're only adjusting minutes and seconds,
+ * don't interfere with hour overflow. This avoids
+ * messing with unknown time zones but requires your
+ * RTC not to be off by more than 15 minutes
+ */
+ real_seconds = nowtime % 60;
+ real_minutes = nowtime / 60;
+ if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
+ real_minutes += 30; /* correct for half hour time zone */
+ real_minutes %= 60;
+
+ if (abs(real_minutes - cmos_minutes) < 30) {
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ real_seconds = bin2bcd(real_seconds);
+ real_minutes = bin2bcd(real_minutes);
+ }
+ CMOS_WRITE(real_seconds, RTC_SECONDS);
+ CMOS_WRITE(real_minutes, RTC_MINUTES);
+ } else {
+ printk_once(KERN_NOTICE
+ "set_rtc_mmss: can't update from %d to %d\n",
+ cmos_minutes, real_minutes);
+ retval = -1;
+ }
+
+ /* The following flags have to be released exactly in this order,
+ * otherwise the DS12887 (popular MC146818A clone with integrated
+ * battery and quartz) will not reset the oscillator and will not
+ * update precisely 500 ms later. You won't find this mentioned in
+ * the Dallas Semiconductor data sheets, but who believes data
+ * sheets anyway ... -- Markus Kuhn
+ */
+ CMOS_WRITE(save_control, RTC_CONTROL);
+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ return retval;
+}
+
+static inline time64_t mc146818_get_cmos_time(void)
+{
+ unsigned int year, mon, day, hour, min, sec;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+
+ do {
+ sec = CMOS_READ(RTC_SECONDS);
+ min = CMOS_READ(RTC_MINUTES);
+ hour = CMOS_READ(RTC_HOURS);
+ day = CMOS_READ(RTC_DAY_OF_MONTH);
+ mon = CMOS_READ(RTC_MONTH);
+ year = CMOS_READ(RTC_YEAR);
+ } while (sec != CMOS_READ(RTC_SECONDS));
+
+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ sec = bcd2bin(sec);
+ min = bcd2bin(min);
+ hour = bcd2bin(hour);
+ day = bcd2bin(day);
+ mon = bcd2bin(mon);
+ year = bcd2bin(year);
+ }
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ year = mc146818_decode_year(year);
+
+ return mktime64(year, mon, day, hour, min, sec);
+}
+
+#endif /* __ASM_MC146818_TIME_H */
diff --git a/arch/mips/include/asm/mc146818rtc.h b/arch/mips/include/asm/mc146818rtc.h
new file mode 100644
index 000000000..68b4da6d5
--- /dev/null
+++ b/arch/mips/include/asm/mc146818rtc.h
@@ -0,0 +1,16 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Machine dependent access functions for RTC registers.
+ *
+ * Copyright (C) 1996, 1997, 1998, 2000 Ralf Baechle
+ * Copyright (C) 2002 Maciej W. Rozycki
+ */
+#ifndef _ASM_MC146818RTC_H
+#define _ASM_MC146818RTC_H
+
+#include <mc146818rtc.h>
+
+#endif /* _ASM_MC146818RTC_H */
diff --git a/arch/mips/include/asm/mips-boards/bonito64.h b/arch/mips/include/asm/mips-boards/bonito64.h
new file mode 100644
index 000000000..5368891d4
--- /dev/null
+++ b/arch/mips/include/asm/mips-boards/bonito64.h
@@ -0,0 +1,430 @@
+/*
+ * Bonito Register Map
+ *
+ * This file is the original bonito.h from Algorithmics with minor changes
+ * to fit into linux.
+ *
+ * Copyright (c) 1999 Algorithmics Ltd
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2001 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Algorithmics gives permission for anyone to use and modify this file
+ * without any obligation or license condition except that you retain
+ * this copyright message in any source redistribution in whole or part.
+ *
+ */
+
+/* Revision 1.48 autogenerated on 08/17/99 15:20:01 */
+/* This bonito64 version editted from bonito.h Revision 1.48 on 11/09/00 */
+
+#ifndef _ASM_MIPS_BOARDS_BONITO64_H
+#define _ASM_MIPS_BOARDS_BONITO64_H
+
+#ifdef __ASSEMBLY__
+
+/* offsets from base register */
+#define BONITO(x) (x)
+
+#else
+
+/*
+ * Algorithmics Bonito64 system controller register base.
+ */
+extern unsigned long _pcictrl_bonito;
+extern unsigned long _pcictrl_bonito_pcicfg;
+
+#define BONITO(x) *(volatile u32 *)(_pcictrl_bonito + (x))
+
+#endif /* __ASSEMBLY__ */
+
+
+#define BONITO_BOOT_BASE 0x1fc00000
+#define BONITO_BOOT_SIZE 0x00100000
+#define BONITO_BOOT_TOP (BONITO_BOOT_BASE+BONITO_BOOT_SIZE-1)
+#define BONITO_FLASH_BASE 0x1c000000
+#define BONITO_FLASH_SIZE 0x03000000
+#define BONITO_FLASH_TOP (BONITO_FLASH_BASE+BONITO_FLASH_SIZE-1)
+#define BONITO_SOCKET_BASE 0x1f800000
+#define BONITO_SOCKET_SIZE 0x00400000
+#define BONITO_SOCKET_TOP (BONITO_SOCKET_BASE+BONITO_SOCKET_SIZE-1)
+#define BONITO_REG_BASE 0x1fe00000
+#define BONITO_REG_SIZE 0x00040000
+#define BONITO_REG_TOP (BONITO_REG_BASE+BONITO_REG_SIZE-1)
+#define BONITO_DEV_BASE 0x1ff00000
+#define BONITO_DEV_SIZE 0x00100000
+#define BONITO_DEV_TOP (BONITO_DEV_BASE+BONITO_DEV_SIZE-1)
+#define BONITO_PCILO_BASE 0x10000000
+#define BONITO_PCILO_SIZE 0x0c000000
+#define BONITO_PCILO_TOP (BONITO_PCILO_BASE+BONITO_PCILO_SIZE-1)
+#define BONITO_PCILO0_BASE 0x10000000
+#define BONITO_PCILO1_BASE 0x14000000
+#define BONITO_PCILO2_BASE 0x18000000
+#define BONITO_PCIHI_BASE 0x20000000
+#define BONITO_PCIHI_SIZE 0x20000000
+#define BONITO_PCIHI_TOP (BONITO_PCIHI_BASE+BONITO_PCIHI_SIZE-1)
+#define BONITO_PCIIO_BASE 0x1fd00000
+#define BONITO_PCIIO_SIZE 0x00100000
+#define BONITO_PCIIO_TOP (BONITO_PCIIO_BASE+BONITO_PCIIO_SIZE-1)
+#define BONITO_PCICFG_BASE 0x1fe80000
+#define BONITO_PCICFG_SIZE 0x00080000
+#define BONITO_PCICFG_TOP (BONITO_PCICFG_BASE+BONITO_PCICFG_SIZE-1)
+
+
+/* Bonito Register Bases */
+
+#define BONITO_PCICONFIGBASE 0x00
+#define BONITO_REGBASE 0x100
+
+
+/* PCI Configuration Registers */
+
+#define BONITO_PCI_REG(x) BONITO(BONITO_PCICONFIGBASE + (x))
+#define BONITO_PCIDID BONITO_PCI_REG(0x00)
+#define BONITO_PCICMD BONITO_PCI_REG(0x04)
+#define BONITO_PCICLASS BONITO_PCI_REG(0x08)
+#define BONITO_PCILTIMER BONITO_PCI_REG(0x0c)
+#define BONITO_PCIBASE0 BONITO_PCI_REG(0x10)
+#define BONITO_PCIBASE1 BONITO_PCI_REG(0x14)
+#define BONITO_PCIBASE2 BONITO_PCI_REG(0x18)
+#define BONITO_PCIEXPRBASE BONITO_PCI_REG(0x30)
+#define BONITO_PCIINT BONITO_PCI_REG(0x3c)
+
+#define BONITO_PCICMD_PERR_CLR 0x80000000
+#define BONITO_PCICMD_SERR_CLR 0x40000000
+#define BONITO_PCICMD_MABORT_CLR 0x20000000
+#define BONITO_PCICMD_MTABORT_CLR 0x10000000
+#define BONITO_PCICMD_TABORT_CLR 0x08000000
+#define BONITO_PCICMD_MPERR_CLR 0x01000000
+#define BONITO_PCICMD_PERRRESPEN 0x00000040
+#define BONITO_PCICMD_ASTEPEN 0x00000080
+#define BONITO_PCICMD_SERREN 0x00000100
+#define BONITO_PCILTIMER_BUSLATENCY 0x0000ff00
+#define BONITO_PCILTIMER_BUSLATENCY_SHIFT 8
+
+
+
+
+/* 1. Bonito h/w Configuration */
+/* Power on register */
+
+#define BONITO_BONPONCFG BONITO(BONITO_REGBASE + 0x00)
+
+#define BONITO_BONPONCFG_SYSCONTROLLERRD 0x00040000
+#define BONITO_BONPONCFG_ROMCS1SAMP 0x00020000
+#define BONITO_BONPONCFG_ROMCS0SAMP 0x00010000
+#define BONITO_BONPONCFG_CPUBIGEND 0x00004000
+/* Added by RPF 11-9-00 */
+#define BONITO_BONPONCFG_BURSTORDER 0x00001000
+/* --- */
+#define BONITO_BONPONCFG_CPUPARITY 0x00002000
+#define BONITO_BONPONCFG_CPUTYPE 0x00000007
+#define BONITO_BONPONCFG_CPUTYPE_SHIFT 0
+#define BONITO_BONPONCFG_PCIRESET_OUT 0x00000008
+#define BONITO_BONPONCFG_IS_ARBITER 0x00000010
+#define BONITO_BONPONCFG_ROMBOOT 0x000000c0
+#define BONITO_BONPONCFG_ROMBOOT_SHIFT 6
+
+#define BONITO_BONPONCFG_ROMBOOT_FLASH (0x0<<BONITO_BONPONCFG_ROMBOOT_SHIFT)
+#define BONITO_BONPONCFG_ROMBOOT_SOCKET (0x1<<BONITO_BONPONCFG_ROMBOOT_SHIFT)
+#define BONITO_BONPONCFG_ROMBOOT_SDRAM (0x2<<BONITO_BONPONCFG_ROMBOOT_SHIFT)
+#define BONITO_BONPONCFG_ROMBOOT_CPURESET (0x3<<BONITO_BONPONCFG_ROMBOOT_SHIFT)
+
+#define BONITO_BONPONCFG_ROMCS0WIDTH 0x00000100
+#define BONITO_BONPONCFG_ROMCS1WIDTH 0x00000200
+#define BONITO_BONPONCFG_ROMCS0FAST 0x00000400
+#define BONITO_BONPONCFG_ROMCS1FAST 0x00000800
+#define BONITO_BONPONCFG_CONFIG_DIS 0x00000020
+
+
+/* Other Bonito configuration */
+
+#define BONITO_BONGENCFG_OFFSET 0x4
+#define BONITO_BONGENCFG BONITO(BONITO_REGBASE + BONITO_BONGENCFG_OFFSET)
+
+#define BONITO_BONGENCFG_DEBUGMODE 0x00000001
+#define BONITO_BONGENCFG_SNOOPEN 0x00000002
+#define BONITO_BONGENCFG_CPUSELFRESET 0x00000004
+
+#define BONITO_BONGENCFG_FORCE_IRQA 0x00000008
+#define BONITO_BONGENCFG_IRQA_ISOUT 0x00000010
+#define BONITO_BONGENCFG_IRQA_FROM_INT1 0x00000020
+#define BONITO_BONGENCFG_BYTESWAP 0x00000040
+
+#define BONITO_BONGENCFG_UNCACHED 0x00000080
+#define BONITO_BONGENCFG_PREFETCHEN 0x00000100
+#define BONITO_BONGENCFG_WBEHINDEN 0x00000200
+#define BONITO_BONGENCFG_CACHEALG 0x00000c00
+#define BONITO_BONGENCFG_CACHEALG_SHIFT 10
+#define BONITO_BONGENCFG_PCIQUEUE 0x00001000
+#define BONITO_BONGENCFG_CACHESTOP 0x00002000
+#define BONITO_BONGENCFG_MSTRBYTESWAP 0x00004000
+#define BONITO_BONGENCFG_BUSERREN 0x00008000
+#define BONITO_BONGENCFG_NORETRYTIMEOUT 0x00010000
+#define BONITO_BONGENCFG_SHORTCOPYTIMEOUT 0x00020000
+
+/* 2. IO & IDE configuration */
+
+#define BONITO_IODEVCFG BONITO(BONITO_REGBASE + 0x08)
+
+/* 3. IO & IDE configuration */
+
+#define BONITO_SDCFG BONITO(BONITO_REGBASE + 0x0c)
+
+/* 4. PCI address map control */
+
+#define BONITO_PCIMAP BONITO(BONITO_REGBASE + 0x10)
+#define BONITO_PCIMEMBASECFG BONITO(BONITO_REGBASE + 0x14)
+#define BONITO_PCIMAP_CFG BONITO(BONITO_REGBASE + 0x18)
+
+/* 5. ICU & GPIO regs */
+
+/* GPIO Regs - r/w */
+
+#define BONITO_GPIODATA_OFFSET 0x1c
+#define BONITO_GPIODATA BONITO(BONITO_REGBASE + BONITO_GPIODATA_OFFSET)
+#define BONITO_GPIOIE BONITO(BONITO_REGBASE + 0x20)
+
+/* ICU Configuration Regs - r/w */
+
+#define BONITO_INTEDGE BONITO(BONITO_REGBASE + 0x24)
+#define BONITO_INTSTEER BONITO(BONITO_REGBASE + 0x28)
+#define BONITO_INTPOL BONITO(BONITO_REGBASE + 0x2c)
+
+/* ICU Enable Regs - IntEn & IntISR are r/o. */
+
+#define BONITO_INTENSET BONITO(BONITO_REGBASE + 0x30)
+#define BONITO_INTENCLR BONITO(BONITO_REGBASE + 0x34)
+#define BONITO_INTEN BONITO(BONITO_REGBASE + 0x38)
+#define BONITO_INTISR BONITO(BONITO_REGBASE + 0x3c)
+
+/* PCI mail boxes */
+
+#define BONITO_PCIMAIL0_OFFSET 0x40
+#define BONITO_PCIMAIL1_OFFSET 0x44
+#define BONITO_PCIMAIL2_OFFSET 0x48
+#define BONITO_PCIMAIL3_OFFSET 0x4c
+#define BONITO_PCIMAIL0 BONITO(BONITO_REGBASE + 0x40)
+#define BONITO_PCIMAIL1 BONITO(BONITO_REGBASE + 0x44)
+#define BONITO_PCIMAIL2 BONITO(BONITO_REGBASE + 0x48)
+#define BONITO_PCIMAIL3 BONITO(BONITO_REGBASE + 0x4c)
+
+
+/* 6. PCI cache */
+
+#define BONITO_PCICACHECTRL BONITO(BONITO_REGBASE + 0x50)
+#define BONITO_PCICACHETAG BONITO(BONITO_REGBASE + 0x54)
+
+#define BONITO_PCIBADADDR BONITO(BONITO_REGBASE + 0x58)
+#define BONITO_PCIMSTAT BONITO(BONITO_REGBASE + 0x5c)
+
+
+/*
+#define BONITO_PCIRDPOST BONITO(BONITO_REGBASE + 0x60)
+#define BONITO_PCIDATA BONITO(BONITO_REGBASE + 0x64)
+*/
+
+/* 7. IDE DMA & Copier */
+
+#define BONITO_CONFIGBASE 0x000
+#define BONITO_BONITOBASE 0x100
+#define BONITO_LDMABASE 0x200
+#define BONITO_COPBASE 0x300
+#define BONITO_REG_BLOCKMASK 0x300
+
+#define BONITO_LDMACTRL BONITO(BONITO_LDMABASE + 0x0)
+#define BONITO_LDMASTAT BONITO(BONITO_LDMABASE + 0x0)
+#define BONITO_LDMAADDR BONITO(BONITO_LDMABASE + 0x4)
+#define BONITO_LDMAGO BONITO(BONITO_LDMABASE + 0x8)
+#define BONITO_LDMADATA BONITO(BONITO_LDMABASE + 0xc)
+
+#define BONITO_COPCTRL BONITO(BONITO_COPBASE + 0x0)
+#define BONITO_COPSTAT BONITO(BONITO_COPBASE + 0x0)
+#define BONITO_COPPADDR BONITO(BONITO_COPBASE + 0x4)
+#define BONITO_COPDADDR BONITO(BONITO_COPBASE + 0x8)
+#define BONITO_COPGO BONITO(BONITO_COPBASE + 0xc)
+
+
+/* ###### Bit Definitions for individual Registers #### */
+
+/* Gen DMA. */
+
+#define BONITO_IDECOPDADDR_DMA_DADDR 0x0ffffffc
+#define BONITO_IDECOPDADDR_DMA_DADDR_SHIFT 2
+#define BONITO_IDECOPPADDR_DMA_PADDR 0xfffffffc
+#define BONITO_IDECOPPADDR_DMA_PADDR_SHIFT 2
+#define BONITO_IDECOPGO_DMA_SIZE 0x0000fffe
+#define BONITO_IDECOPGO_DMA_SIZE_SHIFT 0
+#define BONITO_IDECOPGO_DMA_WRITE 0x00010000
+#define BONITO_IDECOPGO_DMAWCOUNT 0x000f0000
+#define BONITO_IDECOPGO_DMAWCOUNT_SHIFT 16
+
+#define BONITO_IDECOPCTRL_DMA_STARTBIT 0x80000000
+#define BONITO_IDECOPCTRL_DMA_RSTBIT 0x40000000
+
+/* DRAM - sdCfg */
+
+#define BONITO_SDCFG_AROWBITS 0x00000003
+#define BONITO_SDCFG_AROWBITS_SHIFT 0
+#define BONITO_SDCFG_ACOLBITS 0x0000000c
+#define BONITO_SDCFG_ACOLBITS_SHIFT 2
+#define BONITO_SDCFG_ABANKBIT 0x00000010
+#define BONITO_SDCFG_ASIDES 0x00000020
+#define BONITO_SDCFG_AABSENT 0x00000040
+#define BONITO_SDCFG_AWIDTH64 0x00000080
+
+#define BONITO_SDCFG_BROWBITS 0x00000300
+#define BONITO_SDCFG_BROWBITS_SHIFT 8
+#define BONITO_SDCFG_BCOLBITS 0x00000c00
+#define BONITO_SDCFG_BCOLBITS_SHIFT 10
+#define BONITO_SDCFG_BBANKBIT 0x00001000
+#define BONITO_SDCFG_BSIDES 0x00002000
+#define BONITO_SDCFG_BABSENT 0x00004000
+#define BONITO_SDCFG_BWIDTH64 0x00008000
+
+#define BONITO_SDCFG_EXTRDDATA 0x00010000
+#define BONITO_SDCFG_EXTRASCAS 0x00020000
+#define BONITO_SDCFG_EXTPRECH 0x00040000
+#define BONITO_SDCFG_EXTRASWIDTH 0x00180000
+#define BONITO_SDCFG_EXTRASWIDTH_SHIFT 19
+/* Changed by RPF 11-9-00 */
+#define BONITO_SDCFG_DRAMMODESET 0x00200000
+/* --- */
+#define BONITO_SDCFG_DRAMEXTREGS 0x00400000
+#define BONITO_SDCFG_DRAMPARITY 0x00800000
+/* Added by RPF 11-9-00 */
+#define BONITO_SDCFG_DRAMBURSTLEN 0x03000000
+#define BONITO_SDCFG_DRAMBURSTLEN_SHIFT 24
+#define BONITO_SDCFG_DRAMMODESET_DONE 0x80000000
+/* --- */
+
+/* PCI Cache - pciCacheCtrl */
+
+#define BONITO_PCICACHECTRL_CACHECMD 0x00000007
+#define BONITO_PCICACHECTRL_CACHECMD_SHIFT 0
+#define BONITO_PCICACHECTRL_CACHECMDLINE 0x00000018
+#define BONITO_PCICACHECTRL_CACHECMDLINE_SHIFT 3
+#define BONITO_PCICACHECTRL_CMDEXEC 0x00000020
+
+#define BONITO_PCICACHECTRL_IOBCCOH_PRES 0x00000100
+#define BONITO_PCICACHECTRL_IOBCCOH_EN 0x00000200
+#define BONITO_PCICACHECTRL_CPUCOH_PRES 0x00000400
+#define BONITO_PCICACHECTRL_CPUCOH_EN 0x00000800
+
+#define BONITO_IODEVCFG_BUFFBIT_CS0 0x00000001
+#define BONITO_IODEVCFG_SPEEDBIT_CS0 0x00000002
+#define BONITO_IODEVCFG_MOREABITS_CS0 0x00000004
+
+#define BONITO_IODEVCFG_BUFFBIT_CS1 0x00000008
+#define BONITO_IODEVCFG_SPEEDBIT_CS1 0x00000010
+#define BONITO_IODEVCFG_MOREABITS_CS1 0x00000020
+
+#define BONITO_IODEVCFG_BUFFBIT_CS2 0x00000040
+#define BONITO_IODEVCFG_SPEEDBIT_CS2 0x00000080
+#define BONITO_IODEVCFG_MOREABITS_CS2 0x00000100
+
+#define BONITO_IODEVCFG_BUFFBIT_CS3 0x00000200
+#define BONITO_IODEVCFG_SPEEDBIT_CS3 0x00000400
+#define BONITO_IODEVCFG_MOREABITS_CS3 0x00000800
+
+#define BONITO_IODEVCFG_BUFFBIT_IDE 0x00001000
+#define BONITO_IODEVCFG_SPEEDBIT_IDE 0x00002000
+#define BONITO_IODEVCFG_WORDSWAPBIT_IDE 0x00004000
+#define BONITO_IODEVCFG_MODEBIT_IDE 0x00008000
+#define BONITO_IODEVCFG_DMAON_IDE 0x001f0000
+#define BONITO_IODEVCFG_DMAON_IDE_SHIFT 16
+#define BONITO_IODEVCFG_DMAOFF_IDE 0x01e00000
+#define BONITO_IODEVCFG_DMAOFF_IDE_SHIFT 21
+#define BONITO_IODEVCFG_EPROMSPLIT 0x02000000
+/* Added by RPF 11-9-00 */
+#define BONITO_IODEVCFG_CPUCLOCKPERIOD 0xfc000000
+#define BONITO_IODEVCFG_CPUCLOCKPERIOD_SHIFT 26
+/* --- */
+
+/* gpio */
+#define BONITO_GPIO_GPIOW 0x000003ff
+#define BONITO_GPIO_GPIOW_SHIFT 0
+#define BONITO_GPIO_GPIOR 0x01ff0000
+#define BONITO_GPIO_GPIOR_SHIFT 16
+#define BONITO_GPIO_GPINR 0xfe000000
+#define BONITO_GPIO_GPINR_SHIFT 25
+#define BONITO_GPIO_IOW(N) (1<<(BONITO_GPIO_GPIOW_SHIFT+(N)))
+#define BONITO_GPIO_IOR(N) (1<<(BONITO_GPIO_GPIOR_SHIFT+(N)))
+#define BONITO_GPIO_INR(N) (1<<(BONITO_GPIO_GPINR_SHIFT+(N)))
+
+/* ICU */
+#define BONITO_ICU_MBOXES 0x0000000f
+#define BONITO_ICU_MBOXES_SHIFT 0
+#define BONITO_ICU_DMARDY 0x00000010
+#define BONITO_ICU_DMAEMPTY 0x00000020
+#define BONITO_ICU_COPYRDY 0x00000040
+#define BONITO_ICU_COPYEMPTY 0x00000080
+#define BONITO_ICU_COPYERR 0x00000100
+#define BONITO_ICU_PCIIRQ 0x00000200
+#define BONITO_ICU_MASTERERR 0x00000400
+#define BONITO_ICU_SYSTEMERR 0x00000800
+#define BONITO_ICU_DRAMPERR 0x00001000
+#define BONITO_ICU_RETRYERR 0x00002000
+#define BONITO_ICU_GPIOS 0x01ff0000
+#define BONITO_ICU_GPIOS_SHIFT 16
+#define BONITO_ICU_GPINS 0x7e000000
+#define BONITO_ICU_GPINS_SHIFT 25
+#define BONITO_ICU_MBOX(N) (1<<(BONITO_ICU_MBOXES_SHIFT+(N)))
+#define BONITO_ICU_GPIO(N) (1<<(BONITO_ICU_GPIOS_SHIFT+(N)))
+#define BONITO_ICU_GPIN(N) (1<<(BONITO_ICU_GPINS_SHIFT+(N)))
+
+/* pcimap */
+
+#define BONITO_PCIMAP_PCIMAP_LO0 0x0000003f
+#define BONITO_PCIMAP_PCIMAP_LO0_SHIFT 0
+#define BONITO_PCIMAP_PCIMAP_LO1 0x00000fc0
+#define BONITO_PCIMAP_PCIMAP_LO1_SHIFT 6
+#define BONITO_PCIMAP_PCIMAP_LO2 0x0003f000
+#define BONITO_PCIMAP_PCIMAP_LO2_SHIFT 12
+#define BONITO_PCIMAP_PCIMAP_2 0x00040000
+#define BONITO_PCIMAP_WIN(WIN, ADDR) ((((ADDR)>>26) & BONITO_PCIMAP_PCIMAP_LO0) << ((WIN)*6))
+
+#define BONITO_PCIMAP_WINSIZE (1<<26)
+#define BONITO_PCIMAP_WINOFFSET(ADDR) ((ADDR) & (BONITO_PCIMAP_WINSIZE - 1))
+#define BONITO_PCIMAP_WINBASE(ADDR) ((ADDR) << 26)
+
+/* pcimembaseCfg */
+
+#define BONITO_PCIMEMBASECFG_MASK 0xf0000000
+#define BONITO_PCIMEMBASECFG_MEMBASE0_MASK 0x0000001f
+#define BONITO_PCIMEMBASECFG_MEMBASE0_MASK_SHIFT 0
+#define BONITO_PCIMEMBASECFG_MEMBASE0_TRANS 0x000003e0
+#define BONITO_PCIMEMBASECFG_MEMBASE0_TRANS_SHIFT 5
+#define BONITO_PCIMEMBASECFG_MEMBASE0_CACHED 0x00000400
+#define BONITO_PCIMEMBASECFG_MEMBASE0_IO 0x00000800
+
+#define BONITO_PCIMEMBASECFG_MEMBASE1_MASK 0x0001f000
+#define BONITO_PCIMEMBASECFG_MEMBASE1_MASK_SHIFT 12
+#define BONITO_PCIMEMBASECFG_MEMBASE1_TRANS 0x003e0000
+#define BONITO_PCIMEMBASECFG_MEMBASE1_TRANS_SHIFT 17
+#define BONITO_PCIMEMBASECFG_MEMBASE1_CACHED 0x00400000
+#define BONITO_PCIMEMBASECFG_MEMBASE1_IO 0x00800000
+
+#define BONITO_PCIMEMBASECFG_ASHIFT 23
+#define BONITO_PCIMEMBASECFG_AMASK 0x007fffff
+#define BONITO_PCIMEMBASECFGSIZE(WIN, SIZE) (((~((SIZE)-1))>>(BONITO_PCIMEMBASECFG_ASHIFT-BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT)) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK)
+#define BONITO_PCIMEMBASECFGBASE(WIN, BASE) (((BASE)>>(BONITO_PCIMEMBASECFG_ASHIFT-BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS_SHIFT)) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS)
+
+#define BONITO_PCIMEMBASECFG_SIZE(WIN, CFG) (((((~(CFG)) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK)) << (BONITO_PCIMEMBASECFG_ASHIFT - BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT)) | BONITO_PCIMEMBASECFG_AMASK)
+
+
+#define BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG) ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT)
+#define BONITO_PCIMEMBASECFG_ADDRTRANS(WIN, CFG) ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT)
+
+#define BONITO_PCITOPHYS(WIN, ADDR, CFG) ( \
+ (((ADDR) & (~(BONITO_PCIMEMBASECFG_MASK))) & (~(BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG)))) | \
+ (BONITO_PCIMEMBASECFG_ADDRTRANS(WIN, CFG)) \
+ )
+
+/* PCICmd */
+
+#define BONITO_PCICMD_MEMEN 0x00000002
+#define BONITO_PCICMD_MSTREN 0x00000004
+
+
+#endif /* _ASM_MIPS_BOARDS_BONITO64_H */
diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h
new file mode 100644
index 000000000..c904c2455
--- /dev/null
+++ b/arch/mips/include/asm/mips-boards/generic.h
@@ -0,0 +1,79 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Defines of the MIPS boards specific address-MAP, registers, etc.
+ *
+ * Copyright (C) 2000,2012 MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ * Steven J. Hill <sjhill@mips.com>
+ */
+#ifndef __ASM_MIPS_BOARDS_GENERIC_H
+#define __ASM_MIPS_BOARDS_GENERIC_H
+
+#include <asm/addrspace.h>
+#include <asm/byteorder.h>
+#include <asm/mips-boards/bonito64.h>
+
+/*
+ * Display register base.
+ */
+#define ASCII_DISPLAY_WORD_BASE 0x1f000410
+#define ASCII_DISPLAY_POS_BASE 0x1f000418
+
+/*
+ * Revision register.
+ */
+#define MIPS_REVISION_REG 0x1fc00010
+#define MIPS_REVISION_CORID_QED_RM5261 0
+#define MIPS_REVISION_CORID_CORE_LV 1
+#define MIPS_REVISION_CORID_BONITO64 2
+#define MIPS_REVISION_CORID_CORE_20K 3
+#define MIPS_REVISION_CORID_CORE_FPGA 4
+#define MIPS_REVISION_CORID_CORE_MSC 5
+#define MIPS_REVISION_CORID_CORE_EMUL 6
+#define MIPS_REVISION_CORID_CORE_FPGA2 7
+#define MIPS_REVISION_CORID_CORE_FPGAR2 8
+#define MIPS_REVISION_CORID_CORE_FPGA3 9
+#define MIPS_REVISION_CORID_CORE_24K 10
+#define MIPS_REVISION_CORID_CORE_FPGA4 11
+#define MIPS_REVISION_CORID_CORE_FPGA5 12
+
+/**** Artificial corid defines ****/
+/*
+ * CoreEMUL with Bonito System Controller is treated like a Core20K
+ * CoreEMUL with SOC-it 101 System Controller is treated like a CoreMSC
+ */
+#define MIPS_REVISION_CORID_CORE_EMUL_BON -1
+#define MIPS_REVISION_CORID_CORE_EMUL_MSC -2
+
+#define MIPS_REVISION_CORID (((*(volatile u32 *)ioremap(MIPS_REVISION_REG, 4)) >> 10) & 0x3f)
+
+#define MIPS_REVISION_SCON_OTHER 0
+#define MIPS_REVISION_SCON_SOCITSC 1
+#define MIPS_REVISION_SCON_SOCITSCP 2
+
+/* Artificial SCON defines for MIPS_REVISION_SCON_OTHER */
+#define MIPS_REVISION_SCON_UNKNOWN -1
+#define MIPS_REVISION_SCON_GT64120 -2
+#define MIPS_REVISION_SCON_BONITO -3
+#define MIPS_REVISION_SCON_BRTL -4
+#define MIPS_REVISION_SCON_SOCIT -5
+#define MIPS_REVISION_SCON_ROCIT -6
+
+#define MIPS_REVISION_SCONID (((*(volatile u32 *)ioremap(MIPS_REVISION_REG, 4)) >> 24) & 0xff)
+
+extern int mips_revision_sconid;
+
+#ifdef CONFIG_PCI
+extern void mips_pcibios_init(void);
+#else
+#define mips_pcibios_init() do { } while (0)
+#endif
+
+extern void mips_scroll_message(void);
+extern void mips_display_message(const char *str);
+
+#endif /* __ASM_MIPS_BOARDS_GENERIC_H */
diff --git a/arch/mips/include/asm/mips-boards/launch.h b/arch/mips/include/asm/mips-boards/launch.h
new file mode 100644
index 000000000..f93aa5ee2
--- /dev/null
+++ b/arch/mips/include/asm/mips-boards/launch.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *
+ */
+
+#ifndef _ASSEMBLER_
+
+struct cpulaunch {
+ unsigned long pc;
+ unsigned long gp;
+ unsigned long sp;
+ unsigned long a0;
+ unsigned long _pad[3]; /* pad to cache line size to avoid thrashing */
+ unsigned long flags;
+};
+
+#else
+
+#define LOG2CPULAUNCH 5
+#define LAUNCH_PC 0
+#define LAUNCH_GP 4
+#define LAUNCH_SP 8
+#define LAUNCH_A0 12
+#define LAUNCH_FLAGS 28
+
+#endif
+
+#define LAUNCH_FREADY 1
+#define LAUNCH_FGO 2
+#define LAUNCH_FGONE 4
+
+#define CPULAUNCH 0x00000f00
+#define NCPULAUNCH 8
+
+/* Polling period in count cycles for secondary CPU's */
+#define LAUNCHPERIOD 10000
diff --git a/arch/mips/include/asm/mips-boards/malta.h b/arch/mips/include/asm/mips-boards/malta.h
new file mode 100644
index 000000000..254be3d62
--- /dev/null
+++ b/arch/mips/include/asm/mips-boards/malta.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Defines of the Malta board specific address-MAP, registers, etc.
+ */
+#ifndef __ASM_MIPS_BOARDS_MALTA_H
+#define __ASM_MIPS_BOARDS_MALTA_H
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/mips-boards/msc01_pci.h>
+#include <asm/gt64120.h>
+
+/* Mips interrupt controller found in SOCit variations */
+#define MIPS_MSC01_IC_REG_BASE 0x1bc40000
+#define MIPS_SOCITSC_IC_REG_BASE 0x1ffa0000
+
+/*
+ * Malta I/O ports base address for the Galileo GT64120 and Algorithmics
+ * Bonito system controllers.
+ */
+#define MALTA_GT_PORT_BASE get_gt_port_base(GT_PCI0IOLD_OFS)
+#define MALTA_BONITO_PORT_BASE ((unsigned long)ioremap (0x1fd00000, 0x10000))
+#define MALTA_MSC_PORT_BASE get_msc_port_base(MSC01_PCI_SC2PIOBASL)
+
+static inline unsigned long get_gt_port_base(unsigned long reg)
+{
+ unsigned long addr;
+ addr = GT_READ(reg);
+ return (unsigned long) ioremap (((addr & 0xffff) << 21), 0x10000);
+}
+
+static inline unsigned long get_msc_port_base(unsigned long reg)
+{
+ unsigned long addr;
+ MSC_READ(reg, addr);
+ return (unsigned long) ioremap(addr, 0x10000);
+}
+
+/*
+ * GCMP Specific definitions
+ */
+#define GCMP_BASE_ADDR 0x1fbf8000
+#define GCMP_ADDRSPACE_SZ (256 * 1024)
+
+/*
+ * GIC Specific definitions
+ */
+#define GIC_BASE_ADDR 0x1bdc0000
+#define GIC_ADDRSPACE_SZ (128 * 1024)
+
+/*
+ * CPC Specific definitions
+ */
+#define CPC_BASE_ADDR 0x1bde0000
+
+/*
+ * MSC01 BIU Specific definitions
+ * FIXME : These should be elsewhere ?
+ */
+#define MSC01_BIU_REG_BASE 0x1bc80000
+#define MSC01_BIU_ADDRSPACE_SZ (256 * 1024)
+#define MSC01_SC_CFG_OFS 0x0110
+#define MSC01_SC_CFG_GICPRES_MSK 0x00000004
+#define MSC01_SC_CFG_GICPRES_SHF 2
+#define MSC01_SC_CFG_GICENA_SHF 3
+
+/*
+ * Malta RTC-device indirect register access.
+ */
+#define MALTA_RTC_ADR_REG 0x70
+#define MALTA_RTC_DAT_REG 0x71
+
+/*
+ * Malta SMSC FDC37M817 Super I/O Controller register.
+ */
+#define SMSC_CONFIG_REG 0x3f0
+#define SMSC_DATA_REG 0x3f1
+
+#define SMSC_CONFIG_DEVNUM 0x7
+#define SMSC_CONFIG_ACTIVATE 0x30
+#define SMSC_CONFIG_ENTER 0x55
+#define SMSC_CONFIG_EXIT 0xaa
+
+#define SMSC_CONFIG_DEVNUM_FLOPPY 0
+
+#define SMSC_CONFIG_ACTIVATE_ENABLE 1
+
+#define SMSC_WRITE(x, a) outb(x, a)
+
+#define MALTA_JMPRS_REG 0x1f000210
+
+extern void __init *malta_dt_shim(void *fdt);
+
+#endif /* __ASM_MIPS_BOARDS_MALTA_H */
diff --git a/arch/mips/include/asm/mips-boards/maltaint.h b/arch/mips/include/asm/mips-boards/maltaint.h
new file mode 100644
index 000000000..817698abf
--- /dev/null
+++ b/arch/mips/include/asm/mips-boards/maltaint.h
@@ -0,0 +1,63 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000,2012 MIPS Technologies, Inc. All rights reserved.
+ * Carsten Langgaard <carstenl@mips.com>
+ * Steven J. Hill <sjhill@mips.com>
+ */
+#ifndef _MIPS_MALTAINT_H
+#define _MIPS_MALTAINT_H
+
+/*
+ * Interrupts 0..15 are used for Malta ISA compatible interrupts
+ */
+#define MALTA_INT_BASE 0
+
+/* CPU interrupt offsets */
+#define MIPSCPU_INT_SW0 0
+#define MIPSCPU_INT_SW1 1
+#define MIPSCPU_INT_MB0 2
+#define MIPSCPU_INT_I8259A MIPSCPU_INT_MB0
+#define MIPSCPU_INT_GIC MIPSCPU_INT_MB0 /* GIC chained interrupt */
+#define MIPSCPU_INT_MB1 3
+#define MIPSCPU_INT_SMI MIPSCPU_INT_MB1
+#define MIPSCPU_INT_MB2 4
+#define MIPSCPU_INT_MB3 5
+#define MIPSCPU_INT_COREHI MIPSCPU_INT_MB3
+#define MIPSCPU_INT_MB4 6
+#define MIPSCPU_INT_CORELO MIPSCPU_INT_MB4
+
+/*
+ * Interrupts 96..127 are used for Soc-it Classic interrupts
+ */
+#define MSC01C_INT_BASE 96
+
+/* SOC-it Classic interrupt offsets */
+#define MSC01C_INT_TMR 0
+#define MSC01C_INT_PCI 1
+
+/*
+ * Interrupts 96..127 are used for Soc-it EIC interrupts
+ */
+#define MSC01E_INT_BASE 96
+
+/* SOC-it EIC interrupt offsets */
+#define MSC01E_INT_SW0 1
+#define MSC01E_INT_SW1 2
+#define MSC01E_INT_MB0 3
+#define MSC01E_INT_I8259A MSC01E_INT_MB0
+#define MSC01E_INT_MB1 4
+#define MSC01E_INT_SMI MSC01E_INT_MB1
+#define MSC01E_INT_MB2 5
+#define MSC01E_INT_MB3 6
+#define MSC01E_INT_COREHI MSC01E_INT_MB3
+#define MSC01E_INT_MB4 7
+#define MSC01E_INT_CORELO MSC01E_INT_MB4
+#define MSC01E_INT_TMR 8
+#define MSC01E_INT_PCI 9
+#define MSC01E_INT_PERFCTR 10
+#define MSC01E_INT_CPUCTR 11
+
+#endif /* !(_MIPS_MALTAINT_H) */
diff --git a/arch/mips/include/asm/mips-boards/msc01_pci.h b/arch/mips/include/asm/mips-boards/msc01_pci.h
new file mode 100644
index 000000000..e036b7dd6
--- /dev/null
+++ b/arch/mips/include/asm/mips-boards/msc01_pci.h
@@ -0,0 +1,258 @@
+/*
+ * PCI Register definitions for the MIPS System Controller.
+ *
+ * Copyright (C) 2002, 2005 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ * Maciej W. Rozycki <macro@mips.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_MIPS_BOARDS_MSC01_PCI_H
+#define __ASM_MIPS_BOARDS_MSC01_PCI_H
+
+/*
+ * Register offset addresses
+ */
+
+#define MSC01_PCI_ID_OFS 0x0000
+#define MSC01_PCI_SC2PMBASL_OFS 0x0208
+#define MSC01_PCI_SC2PMMSKL_OFS 0x0218
+#define MSC01_PCI_SC2PMMAPL_OFS 0x0228
+#define MSC01_PCI_SC2PIOBASL_OFS 0x0248
+#define MSC01_PCI_SC2PIOMSKL_OFS 0x0258
+#define MSC01_PCI_SC2PIOMAPL_OFS 0x0268
+#define MSC01_PCI_P2SCMSKL_OFS 0x0308
+#define MSC01_PCI_P2SCMAPL_OFS 0x0318
+#define MSC01_PCI_INTCFG_OFS 0x0600
+#define MSC01_PCI_INTSTAT_OFS 0x0608
+#define MSC01_PCI_CFGADDR_OFS 0x0610
+#define MSC01_PCI_CFGDATA_OFS 0x0618
+#define MSC01_PCI_IACK_OFS 0x0620
+#define MSC01_PCI_HEAD0_OFS 0x2000 /* DevID, VendorID */
+#define MSC01_PCI_HEAD1_OFS 0x2008 /* Status, Command */
+#define MSC01_PCI_HEAD2_OFS 0x2010 /* Class code, RevID */
+#define MSC01_PCI_HEAD3_OFS 0x2018 /* bist, header, latency */
+#define MSC01_PCI_HEAD4_OFS 0x2020 /* BAR 0 */
+#define MSC01_PCI_HEAD5_OFS 0x2028 /* BAR 1 */
+#define MSC01_PCI_HEAD6_OFS 0x2030 /* BAR 2 */
+#define MSC01_PCI_HEAD7_OFS 0x2038 /* BAR 3 */
+#define MSC01_PCI_HEAD8_OFS 0x2040 /* BAR 4 */
+#define MSC01_PCI_HEAD9_OFS 0x2048 /* BAR 5 */
+#define MSC01_PCI_HEAD10_OFS 0x2050 /* CardBus CIS Ptr */
+#define MSC01_PCI_HEAD11_OFS 0x2058 /* SubSystem ID, -VendorID */
+#define MSC01_PCI_HEAD12_OFS 0x2060 /* ROM BAR */
+#define MSC01_PCI_HEAD13_OFS 0x2068 /* Capabilities ptr */
+#define MSC01_PCI_HEAD14_OFS 0x2070 /* reserved */
+#define MSC01_PCI_HEAD15_OFS 0x2078 /* Maxl, ming, intpin, int */
+#define MSC01_PCI_BAR0_OFS 0x2220
+#define MSC01_PCI_CFG_OFS 0x2380
+#define MSC01_PCI_SWAP_OFS 0x2388
+
+
+/*****************************************************************************
+ * Register encodings
+ ****************************************************************************/
+
+#define MSC01_PCI_ID_ID_SHF 16
+#define MSC01_PCI_ID_ID_MSK 0x00ff0000
+#define MSC01_PCI_ID_ID_HOSTBRIDGE 82
+#define MSC01_PCI_ID_MAR_SHF 8
+#define MSC01_PCI_ID_MAR_MSK 0x0000ff00
+#define MSC01_PCI_ID_MIR_SHF 0
+#define MSC01_PCI_ID_MIR_MSK 0x000000ff
+
+#define MSC01_PCI_SC2PMBASL_BAS_SHF 24
+#define MSC01_PCI_SC2PMBASL_BAS_MSK 0xff000000
+
+#define MSC01_PCI_SC2PMMSKL_MSK_SHF 24
+#define MSC01_PCI_SC2PMMSKL_MSK_MSK 0xff000000
+
+#define MSC01_PCI_SC2PMMAPL_MAP_SHF 24
+#define MSC01_PCI_SC2PMMAPL_MAP_MSK 0xff000000
+
+#define MSC01_PCI_SC2PIOBASL_BAS_SHF 24
+#define MSC01_PCI_SC2PIOBASL_BAS_MSK 0xff000000
+
+#define MSC01_PCI_SC2PIOMSKL_MSK_SHF 24
+#define MSC01_PCI_SC2PIOMSKL_MSK_MSK 0xff000000
+
+#define MSC01_PCI_SC2PIOMAPL_MAP_SHF 24
+#define MSC01_PCI_SC2PIOMAPL_MAP_MSK 0xff000000
+
+#define MSC01_PCI_P2SCMSKL_MSK_SHF 24
+#define MSC01_PCI_P2SCMSKL_MSK_MSK 0xff000000
+
+#define MSC01_PCI_P2SCMAPL_MAP_SHF 24
+#define MSC01_PCI_P2SCMAPL_MAP_MSK 0xff000000
+
+#define MSC01_PCI_INTCFG_RST_SHF 10
+#define MSC01_PCI_INTCFG_RST_MSK 0x00000400
+#define MSC01_PCI_INTCFG_RST_BIT 0x00000400
+#define MSC01_PCI_INTCFG_MWE_SHF 9
+#define MSC01_PCI_INTCFG_MWE_MSK 0x00000200
+#define MSC01_PCI_INTCFG_MWE_BIT 0x00000200
+#define MSC01_PCI_INTCFG_DTO_SHF 8
+#define MSC01_PCI_INTCFG_DTO_MSK 0x00000100
+#define MSC01_PCI_INTCFG_DTO_BIT 0x00000100
+#define MSC01_PCI_INTCFG_MA_SHF 7
+#define MSC01_PCI_INTCFG_MA_MSK 0x00000080
+#define MSC01_PCI_INTCFG_MA_BIT 0x00000080
+#define MSC01_PCI_INTCFG_TA_SHF 6
+#define MSC01_PCI_INTCFG_TA_MSK 0x00000040
+#define MSC01_PCI_INTCFG_TA_BIT 0x00000040
+#define MSC01_PCI_INTCFG_RTY_SHF 5
+#define MSC01_PCI_INTCFG_RTY_MSK 0x00000020
+#define MSC01_PCI_INTCFG_RTY_BIT 0x00000020
+#define MSC01_PCI_INTCFG_MWP_SHF 4
+#define MSC01_PCI_INTCFG_MWP_MSK 0x00000010
+#define MSC01_PCI_INTCFG_MWP_BIT 0x00000010
+#define MSC01_PCI_INTCFG_MRP_SHF 3
+#define MSC01_PCI_INTCFG_MRP_MSK 0x00000008
+#define MSC01_PCI_INTCFG_MRP_BIT 0x00000008
+#define MSC01_PCI_INTCFG_SWP_SHF 2
+#define MSC01_PCI_INTCFG_SWP_MSK 0x00000004
+#define MSC01_PCI_INTCFG_SWP_BIT 0x00000004
+#define MSC01_PCI_INTCFG_SRP_SHF 1
+#define MSC01_PCI_INTCFG_SRP_MSK 0x00000002
+#define MSC01_PCI_INTCFG_SRP_BIT 0x00000002
+#define MSC01_PCI_INTCFG_SE_SHF 0
+#define MSC01_PCI_INTCFG_SE_MSK 0x00000001
+#define MSC01_PCI_INTCFG_SE_BIT 0x00000001
+
+#define MSC01_PCI_INTSTAT_RST_SHF 10
+#define MSC01_PCI_INTSTAT_RST_MSK 0x00000400
+#define MSC01_PCI_INTSTAT_RST_BIT 0x00000400
+#define MSC01_PCI_INTSTAT_MWE_SHF 9
+#define MSC01_PCI_INTSTAT_MWE_MSK 0x00000200
+#define MSC01_PCI_INTSTAT_MWE_BIT 0x00000200
+#define MSC01_PCI_INTSTAT_DTO_SHF 8
+#define MSC01_PCI_INTSTAT_DTO_MSK 0x00000100
+#define MSC01_PCI_INTSTAT_DTO_BIT 0x00000100
+#define MSC01_PCI_INTSTAT_MA_SHF 7
+#define MSC01_PCI_INTSTAT_MA_MSK 0x00000080
+#define MSC01_PCI_INTSTAT_MA_BIT 0x00000080
+#define MSC01_PCI_INTSTAT_TA_SHF 6
+#define MSC01_PCI_INTSTAT_TA_MSK 0x00000040
+#define MSC01_PCI_INTSTAT_TA_BIT 0x00000040
+#define MSC01_PCI_INTSTAT_RTY_SHF 5
+#define MSC01_PCI_INTSTAT_RTY_MSK 0x00000020
+#define MSC01_PCI_INTSTAT_RTY_BIT 0x00000020
+#define MSC01_PCI_INTSTAT_MWP_SHF 4
+#define MSC01_PCI_INTSTAT_MWP_MSK 0x00000010
+#define MSC01_PCI_INTSTAT_MWP_BIT 0x00000010
+#define MSC01_PCI_INTSTAT_MRP_SHF 3
+#define MSC01_PCI_INTSTAT_MRP_MSK 0x00000008
+#define MSC01_PCI_INTSTAT_MRP_BIT 0x00000008
+#define MSC01_PCI_INTSTAT_SWP_SHF 2
+#define MSC01_PCI_INTSTAT_SWP_MSK 0x00000004
+#define MSC01_PCI_INTSTAT_SWP_BIT 0x00000004
+#define MSC01_PCI_INTSTAT_SRP_SHF 1
+#define MSC01_PCI_INTSTAT_SRP_MSK 0x00000002
+#define MSC01_PCI_INTSTAT_SRP_BIT 0x00000002
+#define MSC01_PCI_INTSTAT_SE_SHF 0
+#define MSC01_PCI_INTSTAT_SE_MSK 0x00000001
+#define MSC01_PCI_INTSTAT_SE_BIT 0x00000001
+
+#define MSC01_PCI_CFGADDR_BNUM_SHF 16
+#define MSC01_PCI_CFGADDR_BNUM_MSK 0x00ff0000
+#define MSC01_PCI_CFGADDR_DNUM_SHF 11
+#define MSC01_PCI_CFGADDR_DNUM_MSK 0x0000f800
+#define MSC01_PCI_CFGADDR_FNUM_SHF 8
+#define MSC01_PCI_CFGADDR_FNUM_MSK 0x00000700
+#define MSC01_PCI_CFGADDR_RNUM_SHF 2
+#define MSC01_PCI_CFGADDR_RNUM_MSK 0x000000fc
+
+#define MSC01_PCI_CFGDATA_DATA_SHF 0
+#define MSC01_PCI_CFGDATA_DATA_MSK 0xffffffff
+
+/* The defines below are ONLY valid for a MEM bar! */
+#define MSC01_PCI_BAR0_SIZE_SHF 4
+#define MSC01_PCI_BAR0_SIZE_MSK 0xfffffff0
+#define MSC01_PCI_BAR0_P_SHF 3
+#define MSC01_PCI_BAR0_P_MSK 0x00000008
+#define MSC01_PCI_BAR0_P_BIT MSC01_PCI_BAR0_P_MSK
+#define MSC01_PCI_BAR0_D_SHF 1
+#define MSC01_PCI_BAR0_D_MSK 0x00000006
+#define MSC01_PCI_BAR0_T_SHF 0
+#define MSC01_PCI_BAR0_T_MSK 0x00000001
+#define MSC01_PCI_BAR0_T_BIT MSC01_PCI_BAR0_T_MSK
+
+
+#define MSC01_PCI_CFG_RA_SHF 17
+#define MSC01_PCI_CFG_RA_MSK 0x00020000
+#define MSC01_PCI_CFG_RA_BIT MSC01_PCI_CFG_RA_MSK
+#define MSC01_PCI_CFG_G_SHF 16
+#define MSC01_PCI_CFG_G_MSK 0x00010000
+#define MSC01_PCI_CFG_G_BIT MSC01_PCI_CFG_G_MSK
+#define MSC01_PCI_CFG_EN_SHF 15
+#define MSC01_PCI_CFG_EN_MSK 0x00008000
+#define MSC01_PCI_CFG_EN_BIT MSC01_PCI_CFG_EN_MSK
+#define MSC01_PCI_CFG_MAXRTRY_SHF 0
+#define MSC01_PCI_CFG_MAXRTRY_MSK 0x00000fff
+
+#define MSC01_PCI_SWAP_IO_SHF 18
+#define MSC01_PCI_SWAP_IO_MSK 0x000c0000
+#define MSC01_PCI_SWAP_MEM_SHF 16
+#define MSC01_PCI_SWAP_MEM_MSK 0x00030000
+#define MSC01_PCI_SWAP_BAR0_SHF 0
+#define MSC01_PCI_SWAP_BAR0_MSK 0x00000003
+#define MSC01_PCI_SWAP_NOSWAP 0
+#define MSC01_PCI_SWAP_BYTESWAP 1
+
+/*
+ * MIPS System controller PCI register base.
+ *
+ * FIXME - are these macros specific to Malta and co or to the MSC? If the
+ * latter, they should be moved elsewhere.
+ */
+#define MIPS_MSC01_PCI_REG_BASE 0x1bd00000
+#define MIPS_SOCITSC_PCI_REG_BASE 0x1ff10000
+
+extern unsigned long _pcictrl_msc;
+
+#define MSC01_PCI_REG_BASE _pcictrl_msc
+
+#define MSC_WRITE(reg, data) do { *(volatile u32 *)(reg) = data; } while (0)
+#define MSC_READ(reg, data) do { data = *(volatile u32 *)(reg); } while (0)
+
+/*
+ * Registers absolute addresses
+ */
+
+#define MSC01_PCI_ID (MSC01_PCI_REG_BASE + MSC01_PCI_ID_OFS)
+#define MSC01_PCI_SC2PMBASL (MSC01_PCI_REG_BASE + MSC01_PCI_SC2PMBASL_OFS)
+#define MSC01_PCI_SC2PMMSKL (MSC01_PCI_REG_BASE + MSC01_PCI_SC2PMMSKL_OFS)
+#define MSC01_PCI_SC2PMMAPL (MSC01_PCI_REG_BASE + MSC01_PCI_SC2PMMAPL_OFS)
+#define MSC01_PCI_SC2PIOBASL (MSC01_PCI_REG_BASE + MSC01_PCI_SC2PIOBASL_OFS)
+#define MSC01_PCI_SC2PIOMSKL (MSC01_PCI_REG_BASE + MSC01_PCI_SC2PIOMSKL_OFS)
+#define MSC01_PCI_SC2PIOMAPL (MSC01_PCI_REG_BASE + MSC01_PCI_SC2PIOMAPL_OFS)
+#define MSC01_PCI_P2SCMSKL (MSC01_PCI_REG_BASE + MSC01_PCI_P2SCMSKL_OFS)
+#define MSC01_PCI_P2SCMAPL (MSC01_PCI_REG_BASE + MSC01_PCI_P2SCMAPL_OFS)
+#define MSC01_PCI_INTCFG (MSC01_PCI_REG_BASE + MSC01_PCI_INTCFG_OFS)
+#define MSC01_PCI_INTSTAT (MSC01_PCI_REG_BASE + MSC01_PCI_INTSTAT_OFS)
+#define MSC01_PCI_CFGADDR (MSC01_PCI_REG_BASE + MSC01_PCI_CFGADDR_OFS)
+#define MSC01_PCI_CFGDATA (MSC01_PCI_REG_BASE + MSC01_PCI_CFGDATA_OFS)
+#define MSC01_PCI_IACK (MSC01_PCI_REG_BASE + MSC01_PCI_IACK_OFS)
+#define MSC01_PCI_HEAD0 (MSC01_PCI_REG_BASE + MSC01_PCI_HEAD0_OFS)
+#define MSC01_PCI_HEAD1 (MSC01_PCI_REG_BASE + MSC01_PCI_HEAD1_OFS)
+#define MSC01_PCI_HEAD2 (MSC01_PCI_REG_BASE + MSC01_PCI_HEAD2_OFS)
+#define MSC01_PCI_HEAD3 (MSC01_PCI_REG_BASE + MSC01_PCI_HEAD3_OFS)
+#define MSC01_PCI_HEAD4 (MSC01_PCI_REG_BASE + MSC01_PCI_HEAD4_OFS)
+#define MSC01_PCI_HEAD5 (MSC01_PCI_REG_BASE + MSC01_PCI_HEAD5_OFS)
+#define MSC01_PCI_HEAD6 (MSC01_PCI_REG_BASE + MSC01_PCI_HEAD6_OFS)
+#define MSC01_PCI_HEAD7 (MSC01_PCI_REG_BASE + MSC01_PCI_HEAD7_OFS)
+#define MSC01_PCI_HEAD8 (MSC01_PCI_REG_BASE + MSC01_PCI_HEAD8_OFS)
+#define MSC01_PCI_HEAD9 (MSC01_PCI_REG_BASE + MSC01_PCI_HEAD9_OFS)
+#define MSC01_PCI_HEAD10 (MSC01_PCI_REG_BASE + MSC01_PCI_HEAD10_OFS)
+#define MSC01_PCI_HEAD11 (MSC01_PCI_REG_BASE + MSC01_PCI_HEAD11_OFS)
+#define MSC01_PCI_HEAD12 (MSC01_PCI_REG_BASE + MSC01_PCI_HEAD11_OFS)
+#define MSC01_PCI_HEAD13 (MSC01_PCI_REG_BASE + MSC01_PCI_HEAD11_OFS)
+#define MSC01_PCI_HEAD14 (MSC01_PCI_REG_BASE + MSC01_PCI_HEAD11_OFS)
+#define MSC01_PCI_HEAD15 (MSC01_PCI_REG_BASE + MSC01_PCI_HEAD11_OFS)
+#define MSC01_PCI_BAR0 (MSC01_PCI_REG_BASE + MSC01_PCI_BAR0_OFS)
+#define MSC01_PCI_CFG (MSC01_PCI_REG_BASE + MSC01_PCI_CFG_OFS)
+#define MSC01_PCI_SWAP (MSC01_PCI_REG_BASE + MSC01_PCI_SWAP_OFS)
+
+#endif /* __ASM_MIPS_BOARDS_MSC01_PCI_H */
diff --git a/arch/mips/include/asm/mips-boards/piix4.h b/arch/mips/include/asm/mips-boards/piix4.h
new file mode 100644
index 000000000..e174bc7c8
--- /dev/null
+++ b/arch/mips/include/asm/mips-boards/piix4.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ *
+ * Register definitions for Intel PIIX4 South Bridge Device.
+ */
+#ifndef __ASM_MIPS_BOARDS_PIIX4_H
+#define __ASM_MIPS_BOARDS_PIIX4_H
+
+/* PIRQX Route Control */
+#define PIIX4_FUNC0_PIRQRC 0x60
+#define PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_DISABLE (1 << 7)
+#define PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MASK 0xf
+#define PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MAX 16
+/* SERIRQ Control */
+#define PIIX4_FUNC0_SERIRQC 0x64
+#define PIIX4_FUNC0_SERIRQC_EN (1 << 7)
+#define PIIX4_FUNC0_SERIRQC_CONT (1 << 6)
+/* Top Of Memory */
+#define PIIX4_FUNC0_TOM 0x69
+#define PIIX4_FUNC0_TOM_TOP_OF_MEMORY_MASK 0xf0
+/* Deterministic Latency Control */
+#define PIIX4_FUNC0_DLC 0x82
+#define PIIX4_FUNC0_DLC_USBPR_EN (1 << 2)
+#define PIIX4_FUNC0_DLC_PASSIVE_RELEASE_EN (1 << 1)
+#define PIIX4_FUNC0_DLC_DELAYED_TRANSACTION_EN (1 << 0)
+/* General Configuration */
+#define PIIX4_FUNC0_GENCFG 0xb0
+#define PIIX4_FUNC0_GENCFG_SERIRQ (1 << 16)
+
+/* IDE Timing */
+#define PIIX4_FUNC1_IDETIM_PRIMARY_LO 0x40
+#define PIIX4_FUNC1_IDETIM_PRIMARY_HI 0x41
+#define PIIX4_FUNC1_IDETIM_PRIMARY_HI_IDE_DECODE_EN (1 << 7)
+#define PIIX4_FUNC1_IDETIM_SECONDARY_LO 0x42
+#define PIIX4_FUNC1_IDETIM_SECONDARY_HI 0x43
+#define PIIX4_FUNC1_IDETIM_SECONDARY_HI_IDE_DECODE_EN (1 << 7)
+
+/* Power Management Configuration Space */
+#define PIIX4_FUNC3_PMBA 0x40
+#define PIIX4_FUNC3_PMREGMISC 0x80
+#define PIIX4_FUNC3_PMREGMISC_EN (1 << 0)
+
+/* Power Management IO Space */
+#define PIIX4_FUNC3IO_PMSTS 0x00
+#define PIIX4_FUNC3IO_PMSTS_PWRBTN_STS (1 << 8)
+#define PIIX4_FUNC3IO_PMCNTRL 0x04
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_EN (1 << 13)
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP (0x7 << 10)
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF (0x0 << 10)
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_STR (0x1 << 10)
+
+/* Data for magic special PCI cycle */
+#define PIIX4_SUSPEND_MAGIC 0x00120002
+
+#endif /* __ASM_MIPS_BOARDS_PIIX4_H */
diff --git a/arch/mips/include/asm/mips-boards/sead3-addr.h b/arch/mips/include/asm/mips-boards/sead3-addr.h
new file mode 100644
index 000000000..c0db57802
--- /dev/null
+++ b/arch/mips/include/asm/mips-boards/sead3-addr.h
@@ -0,0 +1,83 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2015 Imagination Technologies, Inc.
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __ASM_MIPS_BOARDS_SEAD3_ADDR_H
+#define __ASM_MIPS_BOARDS_SEAD3_ADDR_H
+
+/*
+ * Target #0 Register Decode
+ */
+#define SEAD3_SD_SPDCNF 0xbb000040
+#define SEAD3_SD_SPADDR 0xbb000048
+#define SEAD3_SD_DATA 0xbb000050
+
+/*
+ * Target #1 Register Decode
+ */
+#define SEAD3_CFG 0xbb100110
+#define SEAD3_GIC_BASE_ADDRESS 0xbb1c0000
+#define SEAD3_SHARED_SECTION 0xbb1c0000
+#define SEAD3_VPE_LOCAL_SECTION 0xbb1c8000
+#define SEAD3_VPE_OTHER_SECTION 0xbb1cc000
+#define SEAD3_USER_MODE_VISIBLE_SECTION 0xbb1d0000
+
+/*
+ * Target #3 Register Decode
+ */
+#define SEAD3_USB_HS_BASE 0xbb200000
+#define SEAD3_USB_HS_IDENTIFICATION_REGS 0xbb200000
+#define SEAD3_USB_HS_CAPABILITY_REGS 0xbb200100
+#define SEAD3_USB_HS_OPERATIONAL_REGS 0xbb200140
+#define SEAD3_RESERVED 0xbe800000
+
+/*
+ * Target #3 Register Decode
+ */
+#define SEAD3_SRAM 0xbe000000
+#define SEAD3_OPTIONAL_SRAM 0xbe400000
+#define SEAD3_FPGA 0xbf000000
+
+#define SEAD3_PI_PIC32_USB_STATUS 0xbf000060
+#define SEAD3_PI_PIC32_USB_STATUS_IO_RDY (1 << 0)
+#define SEAD3_PI_PIC32_USB_STATUS_SPL_INT (1 << 1)
+#define SEAD3_PI_PIC32_USB_STATUS_GPIOA_INT (1 << 2)
+#define SEAD3_PI_PIC32_USB_STATUS_GPIOB_INT (1 << 3)
+
+#define SEAD3_PI_SOFT_ENDIAN 0xbf000070
+
+#define SEAD3_CPLD_P_SWITCH 0xbf000200
+#define SEAD3_CPLD_F_SWITCH 0xbf000208
+#define SEAD3_CPLD_P_LED 0xbf000210
+#define SEAD3_CPLD_F_LED 0xbf000218
+#define SEAD3_NEWSC_LIVE 0xbf000220
+#define SEAD3_NEWSC_REG 0xbf000228
+#define SEAD3_NEWSC_CTRL 0xbf000230
+
+#define SEAD3_LCD_CONTROL 0xbf000400
+#define SEAD3_LCD_DATA 0xbf000408
+#define SEAD3_CPLD_LCD_STATUS 0xbf000410
+#define SEAD3_CPLD_LCD_DATA 0xbf000418
+
+#define SEAD3_CPLD_PI_DEVRST 0xbf000480
+#define SEAD3_CPLD_PI_DEVRST_IC32_RST (1 << 0)
+#define SEAD3_RESERVED_0 0xbf000500
+
+#define SEAD3_PIC32_REGISTERS 0xbf000600
+#define SEAD3_RESERVED_1 0xbf000700
+#define SEAD3_UART_CH_0 0xbf000800
+#define SEAD3_UART_CH_1 0xbf000900
+#define SEAD3_RESERVED_2 0xbf000a00
+#define SEAD3_ETHERNET 0xbf010000
+#define SEAD3_RESERVED_3 0xbf020000
+#define SEAD3_USER_EXPANSION 0xbf400000
+#define SEAD3_RESERVED_4 0xbf800000
+#define SEAD3_BOOT_FLASH_EXTENSION 0xbfa00000
+#define SEAD3_BOOT_FLASH 0xbfc00000
+#define SEAD3_REVISION_REGISTER 0xbfc00010
+
+#endif /* __ASM_MIPS_BOARDS_SEAD3_ADDR_H */
diff --git a/arch/mips/include/asm/mips-boards/sim.h b/arch/mips/include/asm/mips-boards/sim.h
new file mode 100644
index 000000000..ca37a4f32
--- /dev/null
+++ b/arch/mips/include/asm/mips-boards/sim.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_MIPS_BOARDS_SIM_H
+#define _ASM_MIPS_BOARDS_SIM_H
+
+#define STATS_ON 1
+#define STATS_OFF 2
+#define STATS_CLEAR 3
+#define STATS_DUMP 4
+#define TRACE_ON 5
+#define TRACE_OFF 6
+
+
+#define simcfg(code) \
+({ \
+ __asm__ __volatile__( \
+ "sltiu $0,$0, %0" \
+ ::"i"(code) \
+ ); \
+})
+
+
+
+#endif
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
new file mode 100644
index 000000000..23c67c087
--- /dev/null
+++ b/arch/mips/include/asm/mips-cm.h
@@ -0,0 +1,459 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#ifndef __MIPS_ASM_MIPS_CPS_H__
+# error Please include asm/mips-cps.h rather than asm/mips-cm.h
+#endif
+
+#ifndef __MIPS_ASM_MIPS_CM_H__
+#define __MIPS_ASM_MIPS_CM_H__
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+
+/* The base address of the CM GCR block */
+extern void __iomem *mips_gcr_base;
+
+/* The base address of the CM L2-only sync region */
+extern void __iomem *mips_cm_l2sync_base;
+
+/**
+ * __mips_cm_phys_base - retrieve the physical base address of the CM
+ *
+ * This function returns the physical base address of the Coherence Manager
+ * global control block, or 0 if no Coherence Manager is present. It provides
+ * a default implementation which reads the CMGCRBase register where available,
+ * and may be overridden by platforms which determine this address in a
+ * different way by defining a function with the same prototype except for the
+ * name mips_cm_phys_base (without underscores).
+ */
+extern phys_addr_t __mips_cm_phys_base(void);
+
+/*
+ * mips_cm_is64 - determine CM register width
+ *
+ * The CM register width is determined by the version of the CM, with CM3
+ * introducing 64 bit GCRs and all prior CM versions having 32 bit GCRs.
+ * However we may run a kernel built for MIPS32 on a system with 64 bit GCRs,
+ * or vice-versa. This variable indicates the width of the memory accesses
+ * that the kernel will perform to GCRs, which may differ from the actual
+ * width of the GCRs.
+ *
+ * It's set to 0 for 32-bit accesses and 1 for 64-bit accesses.
+ */
+extern int mips_cm_is64;
+
+/**
+ * mips_cm_error_report - Report CM cache errors
+ */
+#ifdef CONFIG_MIPS_CM
+extern void mips_cm_error_report(void);
+#else
+static inline void mips_cm_error_report(void) {}
+#endif
+
+/**
+ * mips_cm_probe - probe for a Coherence Manager
+ *
+ * Attempt to detect the presence of a Coherence Manager. Returns 0 if a CM
+ * is successfully detected, else -errno.
+ */
+#ifdef CONFIG_MIPS_CM
+extern int mips_cm_probe(void);
+#else
+static inline int mips_cm_probe(void)
+{
+ return -ENODEV;
+}
+#endif
+
+/**
+ * mips_cm_present - determine whether a Coherence Manager is present
+ *
+ * Returns true if a CM is present in the system, else false.
+ */
+static inline bool mips_cm_present(void)
+{
+#ifdef CONFIG_MIPS_CM
+ return mips_gcr_base != NULL;
+#else
+ return false;
+#endif
+}
+
+/**
+ * mips_cm_has_l2sync - determine whether an L2-only sync region is present
+ *
+ * Returns true if the system implements an L2-only sync region, else false.
+ */
+static inline bool mips_cm_has_l2sync(void)
+{
+#ifdef CONFIG_MIPS_CM
+ return mips_cm_l2sync_base != NULL;
+#else
+ return false;
+#endif
+}
+
+/* Offsets to register blocks from the CM base address */
+#define MIPS_CM_GCB_OFS 0x0000 /* Global Control Block */
+#define MIPS_CM_CLCB_OFS 0x2000 /* Core Local Control Block */
+#define MIPS_CM_COCB_OFS 0x4000 /* Core Other Control Block */
+#define MIPS_CM_GDB_OFS 0x6000 /* Global Debug Block */
+
+/* Total size of the CM memory mapped registers */
+#define MIPS_CM_GCR_SIZE 0x8000
+
+/* Size of the L2-only sync region */
+#define MIPS_CM_L2SYNC_SIZE 0x1000
+
+#define GCR_ACCESSOR_RO(sz, off, name) \
+ CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_GCB_OFS + off, name) \
+ CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_COCB_OFS + off, redir_##name)
+
+#define GCR_ACCESSOR_RW(sz, off, name) \
+ CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_GCB_OFS + off, name) \
+ CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_COCB_OFS + off, redir_##name)
+
+#define GCR_CX_ACCESSOR_RO(sz, off, name) \
+ CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_CLCB_OFS + off, cl_##name) \
+ CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_COCB_OFS + off, co_##name)
+
+#define GCR_CX_ACCESSOR_RW(sz, off, name) \
+ CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_CLCB_OFS + off, cl_##name) \
+ CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_COCB_OFS + off, co_##name)
+
+/* GCR_CONFIG - Information about the system */
+GCR_ACCESSOR_RO(64, 0x000, config)
+#define CM_GCR_CONFIG_CLUSTER_COH_CAPABLE BIT_ULL(43)
+#define CM_GCR_CONFIG_CLUSTER_ID GENMASK_ULL(39, 32)
+#define CM_GCR_CONFIG_NUM_CLUSTERS GENMASK(29, 23)
+#define CM_GCR_CONFIG_NUMIOCU GENMASK(15, 8)
+#define CM_GCR_CONFIG_PCORES GENMASK(7, 0)
+
+/* GCR_BASE - Base address of the Global Configuration Registers (GCRs) */
+GCR_ACCESSOR_RW(64, 0x008, base)
+#define CM_GCR_BASE_GCRBASE GENMASK_ULL(47, 15)
+#define CM_GCR_BASE_CMDEFTGT GENMASK(1, 0)
+#define CM_GCR_BASE_CMDEFTGT_MEM 0
+#define CM_GCR_BASE_CMDEFTGT_RESERVED 1
+#define CM_GCR_BASE_CMDEFTGT_IOCU0 2
+#define CM_GCR_BASE_CMDEFTGT_IOCU1 3
+
+/* GCR_ACCESS - Controls core/IOCU access to GCRs */
+GCR_ACCESSOR_RW(32, 0x020, access)
+#define CM_GCR_ACCESS_ACCESSEN GENMASK(7, 0)
+
+/* GCR_REV - Indicates the Coherence Manager revision */
+GCR_ACCESSOR_RO(32, 0x030, rev)
+#define CM_GCR_REV_MAJOR GENMASK(15, 8)
+#define CM_GCR_REV_MINOR GENMASK(7, 0)
+
+#define CM_ENCODE_REV(major, minor) \
+ (FIELD_PREP(CM_GCR_REV_MAJOR, major) | \
+ FIELD_PREP(CM_GCR_REV_MINOR, minor))
+
+#define CM_REV_CM2 CM_ENCODE_REV(6, 0)
+#define CM_REV_CM2_5 CM_ENCODE_REV(7, 0)
+#define CM_REV_CM3 CM_ENCODE_REV(8, 0)
+#define CM_REV_CM3_5 CM_ENCODE_REV(9, 0)
+
+/* GCR_ERR_CONTROL - Control error checking logic */
+GCR_ACCESSOR_RW(32, 0x038, err_control)
+#define CM_GCR_ERR_CONTROL_L2_ECC_EN BIT(1)
+#define CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT BIT(0)
+
+/* GCR_ERR_MASK - Control which errors are reported as interrupts */
+GCR_ACCESSOR_RW(64, 0x040, error_mask)
+
+/* GCR_ERR_CAUSE - Indicates the type of error that occurred */
+GCR_ACCESSOR_RW(64, 0x048, error_cause)
+#define CM_GCR_ERROR_CAUSE_ERRTYPE GENMASK(31, 27)
+#define CM3_GCR_ERROR_CAUSE_ERRTYPE GENMASK_ULL(63, 58)
+#define CM_GCR_ERROR_CAUSE_ERRINFO GENMASK(26, 0)
+
+/* GCR_ERR_ADDR - Indicates the address associated with an error */
+GCR_ACCESSOR_RW(64, 0x050, error_addr)
+
+/* GCR_ERR_MULT - Indicates when multiple errors have occurred */
+GCR_ACCESSOR_RW(64, 0x058, error_mult)
+#define CM_GCR_ERROR_MULT_ERR2ND GENMASK(4, 0)
+
+/* GCR_L2_ONLY_SYNC_BASE - Base address of the L2 cache-only sync region */
+GCR_ACCESSOR_RW(64, 0x070, l2_only_sync_base)
+#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE GENMASK(31, 12)
+#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN BIT(0)
+
+/* GCR_GIC_BASE - Base address of the Global Interrupt Controller (GIC) */
+GCR_ACCESSOR_RW(64, 0x080, gic_base)
+#define CM_GCR_GIC_BASE_GICBASE GENMASK(31, 17)
+#define CM_GCR_GIC_BASE_GICEN BIT(0)
+
+/* GCR_CPC_BASE - Base address of the Cluster Power Controller (CPC) */
+GCR_ACCESSOR_RW(64, 0x088, cpc_base)
+#define CM_GCR_CPC_BASE_CPCBASE GENMASK(31, 15)
+#define CM_GCR_CPC_BASE_CPCEN BIT(0)
+
+/* GCR_REGn_BASE - Base addresses of CM address regions */
+GCR_ACCESSOR_RW(64, 0x090, reg0_base)
+GCR_ACCESSOR_RW(64, 0x0a0, reg1_base)
+GCR_ACCESSOR_RW(64, 0x0b0, reg2_base)
+GCR_ACCESSOR_RW(64, 0x0c0, reg3_base)
+#define CM_GCR_REGn_BASE_BASEADDR GENMASK(31, 16)
+
+/* GCR_REGn_MASK - Size & destination of CM address regions */
+GCR_ACCESSOR_RW(64, 0x098, reg0_mask)
+GCR_ACCESSOR_RW(64, 0x0a8, reg1_mask)
+GCR_ACCESSOR_RW(64, 0x0b8, reg2_mask)
+GCR_ACCESSOR_RW(64, 0x0c8, reg3_mask)
+#define CM_GCR_REGn_MASK_ADDRMASK GENMASK(31, 16)
+#define CM_GCR_REGn_MASK_CCAOVR GENMASK(7, 5)
+#define CM_GCR_REGn_MASK_CCAOVREN BIT(4)
+#define CM_GCR_REGn_MASK_DROPL2 BIT(2)
+#define CM_GCR_REGn_MASK_CMTGT GENMASK(1, 0)
+#define CM_GCR_REGn_MASK_CMTGT_DISABLED 0x0
+#define CM_GCR_REGn_MASK_CMTGT_MEM 0x1
+#define CM_GCR_REGn_MASK_CMTGT_IOCU0 0x2
+#define CM_GCR_REGn_MASK_CMTGT_IOCU1 0x3
+
+/* GCR_GIC_STATUS - Indicates presence of a Global Interrupt Controller (GIC) */
+GCR_ACCESSOR_RO(32, 0x0d0, gic_status)
+#define CM_GCR_GIC_STATUS_EX BIT(0)
+
+/* GCR_CPC_STATUS - Indicates presence of a Cluster Power Controller (CPC) */
+GCR_ACCESSOR_RO(32, 0x0f0, cpc_status)
+#define CM_GCR_CPC_STATUS_EX BIT(0)
+
+/* GCR_L2_CONFIG - Indicates L2 cache configuration when Config5.L2C=1 */
+GCR_ACCESSOR_RW(32, 0x130, l2_config)
+#define CM_GCR_L2_CONFIG_BYPASS BIT(20)
+#define CM_GCR_L2_CONFIG_SET_SIZE GENMASK(15, 12)
+#define CM_GCR_L2_CONFIG_LINE_SIZE GENMASK(11, 8)
+#define CM_GCR_L2_CONFIG_ASSOC GENMASK(7, 0)
+
+/* GCR_SYS_CONFIG2 - Further information about the system */
+GCR_ACCESSOR_RO(32, 0x150, sys_config2)
+#define CM_GCR_SYS_CONFIG2_MAXVPW GENMASK(3, 0)
+
+/* GCR_L2_PFT_CONTROL - Controls hardware L2 prefetching */
+GCR_ACCESSOR_RW(32, 0x300, l2_pft_control)
+#define CM_GCR_L2_PFT_CONTROL_PAGEMASK GENMASK(31, 12)
+#define CM_GCR_L2_PFT_CONTROL_PFTEN BIT(8)
+#define CM_GCR_L2_PFT_CONTROL_NPFT GENMASK(7, 0)
+
+/* GCR_L2_PFT_CONTROL_B - Controls hardware L2 prefetching */
+GCR_ACCESSOR_RW(32, 0x308, l2_pft_control_b)
+#define CM_GCR_L2_PFT_CONTROL_B_CEN BIT(8)
+#define CM_GCR_L2_PFT_CONTROL_B_PORTID GENMASK(7, 0)
+
+/* GCR_L2SM_COP - L2 cache op state machine control */
+GCR_ACCESSOR_RW(32, 0x620, l2sm_cop)
+#define CM_GCR_L2SM_COP_PRESENT BIT(31)
+#define CM_GCR_L2SM_COP_RESULT GENMASK(8, 6)
+#define CM_GCR_L2SM_COP_RESULT_DONTCARE 0
+#define CM_GCR_L2SM_COP_RESULT_DONE_OK 1
+#define CM_GCR_L2SM_COP_RESULT_DONE_ERROR 2
+#define CM_GCR_L2SM_COP_RESULT_ABORT_OK 3
+#define CM_GCR_L2SM_COP_RESULT_ABORT_ERROR 4
+#define CM_GCR_L2SM_COP_RUNNING BIT(5)
+#define CM_GCR_L2SM_COP_TYPE GENMASK(4, 2)
+#define CM_GCR_L2SM_COP_TYPE_IDX_WBINV 0
+#define CM_GCR_L2SM_COP_TYPE_IDX_STORETAG 1
+#define CM_GCR_L2SM_COP_TYPE_IDX_STORETAGDATA 2
+#define CM_GCR_L2SM_COP_TYPE_HIT_INV 4
+#define CM_GCR_L2SM_COP_TYPE_HIT_WBINV 5
+#define CM_GCR_L2SM_COP_TYPE_HIT_WB 6
+#define CM_GCR_L2SM_COP_TYPE_FETCHLOCK 7
+#define CM_GCR_L2SM_COP_CMD GENMASK(1, 0)
+#define CM_GCR_L2SM_COP_CMD_START 1 /* only when idle */
+#define CM_GCR_L2SM_COP_CMD_ABORT 3 /* only when running */
+
+/* GCR_L2SM_TAG_ADDR_COP - L2 cache op state machine address control */
+GCR_ACCESSOR_RW(64, 0x628, l2sm_tag_addr_cop)
+#define CM_GCR_L2SM_TAG_ADDR_COP_NUM_LINES GENMASK_ULL(63, 48)
+#define CM_GCR_L2SM_TAG_ADDR_COP_START_TAG GENMASK_ULL(47, 6)
+
+/* GCR_BEV_BASE - Controls the location of the BEV for powered up cores */
+GCR_ACCESSOR_RW(64, 0x680, bev_base)
+
+/* GCR_Cx_RESET_RELEASE - Controls core reset for CM 1.x */
+GCR_CX_ACCESSOR_RW(32, 0x000, reset_release)
+
+/* GCR_Cx_COHERENCE - Controls core coherence */
+GCR_CX_ACCESSOR_RW(32, 0x008, coherence)
+#define CM_GCR_Cx_COHERENCE_COHDOMAINEN GENMASK(7, 0)
+#define CM3_GCR_Cx_COHERENCE_COHEN BIT(0)
+
+/* GCR_Cx_CONFIG - Information about a core's configuration */
+GCR_CX_ACCESSOR_RO(32, 0x010, config)
+#define CM_GCR_Cx_CONFIG_IOCUTYPE GENMASK(11, 10)
+#define CM_GCR_Cx_CONFIG_PVPE GENMASK(9, 0)
+
+/* GCR_Cx_OTHER - Configure the core-other/redirect GCR block */
+GCR_CX_ACCESSOR_RW(32, 0x018, other)
+#define CM_GCR_Cx_OTHER_CORENUM GENMASK(31, 16) /* CM < 3 */
+#define CM_GCR_Cx_OTHER_CLUSTER_EN BIT(31) /* CM >= 3.5 */
+#define CM_GCR_Cx_OTHER_GIC_EN BIT(30) /* CM >= 3.5 */
+#define CM_GCR_Cx_OTHER_BLOCK GENMASK(25, 24) /* CM >= 3.5 */
+#define CM_GCR_Cx_OTHER_BLOCK_LOCAL 0
+#define CM_GCR_Cx_OTHER_BLOCK_GLOBAL 1
+#define CM_GCR_Cx_OTHER_BLOCK_USER 2
+#define CM_GCR_Cx_OTHER_BLOCK_GLOBAL_HIGH 3
+#define CM_GCR_Cx_OTHER_CLUSTER GENMASK(21, 16) /* CM >= 3.5 */
+#define CM3_GCR_Cx_OTHER_CORE GENMASK(13, 8) /* CM >= 3 */
+#define CM_GCR_Cx_OTHER_CORE_CM 32
+#define CM3_GCR_Cx_OTHER_VP GENMASK(2, 0) /* CM >= 3 */
+
+/* GCR_Cx_RESET_BASE - Configure where powered up cores will fetch from */
+GCR_CX_ACCESSOR_RW(32, 0x020, reset_base)
+#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE GENMASK(31, 12)
+
+/* GCR_Cx_ID - Identify the current core */
+GCR_CX_ACCESSOR_RO(32, 0x028, id)
+#define CM_GCR_Cx_ID_CLUSTER GENMASK(15, 8)
+#define CM_GCR_Cx_ID_CORE GENMASK(7, 0)
+
+/* GCR_Cx_RESET_EXT_BASE - Configure behaviour when cores reset or power up */
+GCR_CX_ACCESSOR_RW(32, 0x030, reset_ext_base)
+#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET BIT(31)
+#define CM_GCR_Cx_RESET_EXT_BASE_UEB BIT(30)
+#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK GENMASK(27, 20)
+#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA GENMASK(7, 1)
+#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT BIT(0)
+
+/**
+ * mips_cm_l2sync - perform an L2-only sync operation
+ *
+ * If an L2-only sync region is present in the system then this function
+ * performs and L2-only sync and returns zero. Otherwise it returns -ENODEV.
+ */
+static inline int mips_cm_l2sync(void)
+{
+ if (!mips_cm_has_l2sync())
+ return -ENODEV;
+
+ writel(0, mips_cm_l2sync_base);
+ return 0;
+}
+
+/**
+ * mips_cm_revision() - return CM revision
+ *
+ * Return: The revision of the CM, from GCR_REV, or 0 if no CM is present. The
+ * return value should be checked against the CM_REV_* macros.
+ */
+static inline int mips_cm_revision(void)
+{
+ if (!mips_cm_present())
+ return 0;
+
+ return read_gcr_rev();
+}
+
+/**
+ * mips_cm_max_vp_width() - return the width in bits of VP indices
+ *
+ * Return: the width, in bits, of VP indices in fields that combine core & VP
+ * indices.
+ */
+static inline unsigned int mips_cm_max_vp_width(void)
+{
+ extern int smp_num_siblings;
+
+ if (mips_cm_revision() >= CM_REV_CM3)
+ return FIELD_GET(CM_GCR_SYS_CONFIG2_MAXVPW,
+ read_gcr_sys_config2());
+
+ if (mips_cm_present()) {
+ /*
+ * We presume that all cores in the system will have the same
+ * number of VP(E)s, and if that ever changes then this will
+ * need revisiting.
+ */
+ return FIELD_GET(CM_GCR_Cx_CONFIG_PVPE, read_gcr_cl_config()) + 1;
+ }
+
+ if (IS_ENABLED(CONFIG_SMP))
+ return smp_num_siblings;
+
+ return 1;
+}
+
+/**
+ * mips_cm_vp_id() - calculate the hardware VP ID for a CPU
+ * @cpu: the CPU whose VP ID to calculate
+ *
+ * Hardware such as the GIC uses identifiers for VPs which may not match the
+ * CPU numbers used by Linux. This function calculates the hardware VP
+ * identifier corresponding to a given CPU.
+ *
+ * Return: the VP ID for the CPU.
+ */
+static inline unsigned int mips_cm_vp_id(unsigned int cpu)
+{
+ unsigned int core = cpu_core(&cpu_data[cpu]);
+ unsigned int vp = cpu_vpe_id(&cpu_data[cpu]);
+
+ return (core * mips_cm_max_vp_width()) + vp;
+}
+
+#ifdef CONFIG_MIPS_CM
+
+/**
+ * mips_cm_lock_other - lock access to redirect/other region
+ * @cluster: the other cluster to be accessed
+ * @core: the other core to be accessed
+ * @vp: the VP within the other core to be accessed
+ * @block: the register block to be accessed
+ *
+ * Configure the redirect/other region for the local core/VP (depending upon
+ * the CM revision) to target the specified @cluster, @core, @vp & register
+ * @block. Must be called before using the redirect/other region, and followed
+ * by a call to mips_cm_unlock_other() when access to the redirect/other region
+ * is complete.
+ *
+ * This function acquires a spinlock such that code between it &
+ * mips_cm_unlock_other() calls cannot be pre-empted by anything which may
+ * reconfigure the redirect/other region, and cannot be interfered with by
+ * another VP in the core. As such calls to this function should not be nested.
+ */
+extern void mips_cm_lock_other(unsigned int cluster, unsigned int core,
+ unsigned int vp, unsigned int block);
+
+/**
+ * mips_cm_unlock_other - unlock access to redirect/other region
+ *
+ * Must be called after mips_cm_lock_other() once all required access to the
+ * redirect/other region has been completed.
+ */
+extern void mips_cm_unlock_other(void);
+
+#else /* !CONFIG_MIPS_CM */
+
+static inline void mips_cm_lock_other(unsigned int cluster, unsigned int core,
+ unsigned int vp, unsigned int block) { }
+static inline void mips_cm_unlock_other(void) { }
+
+#endif /* !CONFIG_MIPS_CM */
+
+/**
+ * mips_cm_lock_other_cpu - lock access to redirect/other region
+ * @cpu: the other CPU whose register we want to access
+ *
+ * Configure the redirect/other region for the local core/VP (depending upon
+ * the CM revision) to target the specified @cpu & register @block. This is
+ * equivalent to calling mips_cm_lock_other() but accepts a Linux CPU number
+ * for convenience.
+ */
+static inline void mips_cm_lock_other_cpu(unsigned int cpu, unsigned int block)
+{
+ struct cpuinfo_mips *d = &cpu_data[cpu];
+
+ mips_cm_lock_other(cpu_cluster(d), cpu_core(d), cpu_vpe_id(d), block);
+}
+
+#endif /* __MIPS_ASM_MIPS_CM_H__ */
diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h
new file mode 100644
index 000000000..b54453f16
--- /dev/null
+++ b/arch/mips/include/asm/mips-cpc.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#ifndef __MIPS_ASM_MIPS_CPS_H__
+# error Please include asm/mips-cps.h rather than asm/mips-cpc.h
+#endif
+
+#ifndef __MIPS_ASM_MIPS_CPC_H__
+#define __MIPS_ASM_MIPS_CPC_H__
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+
+/* The base address of the CPC registers */
+extern void __iomem *mips_cpc_base;
+
+/**
+ * mips_cpc_default_phys_base - retrieve the default physical base address of
+ * the CPC
+ *
+ * Returns the default physical base address of the Cluster Power Controller
+ * memory mapped registers. This is platform dependant & must therefore be
+ * implemented per-platform.
+ */
+extern phys_addr_t mips_cpc_default_phys_base(void);
+
+/**
+ * mips_cpc_probe - probe for a Cluster Power Controller
+ *
+ * Attempt to detect the presence of a Cluster Power Controller. Returns 0 if
+ * a CPC is successfully detected, else -errno.
+ */
+#ifdef CONFIG_MIPS_CPC
+extern int mips_cpc_probe(void);
+#else
+static inline int mips_cpc_probe(void)
+{
+ return -ENODEV;
+}
+#endif
+
+/**
+ * mips_cpc_present - determine whether a Cluster Power Controller is present
+ *
+ * Returns true if a CPC is present in the system, else false.
+ */
+static inline bool mips_cpc_present(void)
+{
+#ifdef CONFIG_MIPS_CPC
+ return mips_cpc_base != NULL;
+#else
+ return false;
+#endif
+}
+
+/* Offsets from the CPC base address to various control blocks */
+#define MIPS_CPC_GCB_OFS 0x0000
+#define MIPS_CPC_CLCB_OFS 0x2000
+#define MIPS_CPC_COCB_OFS 0x4000
+
+#define CPC_ACCESSOR_RO(sz, off, name) \
+ CPS_ACCESSOR_RO(cpc, sz, MIPS_CPC_GCB_OFS + off, name) \
+ CPS_ACCESSOR_RO(cpc, sz, MIPS_CPC_COCB_OFS + off, redir_##name)
+
+#define CPC_ACCESSOR_RW(sz, off, name) \
+ CPS_ACCESSOR_RW(cpc, sz, MIPS_CPC_GCB_OFS + off, name) \
+ CPS_ACCESSOR_RW(cpc, sz, MIPS_CPC_COCB_OFS + off, redir_##name)
+
+#define CPC_CX_ACCESSOR_RO(sz, off, name) \
+ CPS_ACCESSOR_RO(cpc, sz, MIPS_CPC_CLCB_OFS + off, cl_##name) \
+ CPS_ACCESSOR_RO(cpc, sz, MIPS_CPC_COCB_OFS + off, co_##name)
+
+#define CPC_CX_ACCESSOR_RW(sz, off, name) \
+ CPS_ACCESSOR_RW(cpc, sz, MIPS_CPC_CLCB_OFS + off, cl_##name) \
+ CPS_ACCESSOR_RW(cpc, sz, MIPS_CPC_COCB_OFS + off, co_##name)
+
+/* CPC_ACCESS - Control core/IOCU access to CPC registers prior to CM 3 */
+CPC_ACCESSOR_RW(32, 0x000, access)
+
+/* CPC_SEQDEL - Configure delays between command sequencer steps */
+CPC_ACCESSOR_RW(32, 0x008, seqdel)
+
+/* CPC_RAIL - Configure the delay from rail power-up to stability */
+CPC_ACCESSOR_RW(32, 0x010, rail)
+
+/* CPC_RESETLEN - Configure the length of reset sequences */
+CPC_ACCESSOR_RW(32, 0x018, resetlen)
+
+/* CPC_REVISION - Indicates the revisison of the CPC */
+CPC_ACCESSOR_RO(32, 0x020, revision)
+
+/* CPC_PWRUP_CTL - Control power to the Coherence Manager (CM) */
+CPC_ACCESSOR_RW(32, 0x030, pwrup_ctl)
+#define CPC_PWRUP_CTL_CM_PWRUP BIT(0)
+
+/* CPC_CONFIG - Mirrors GCR_CONFIG */
+CPC_ACCESSOR_RW(64, 0x138, config)
+
+/* CPC_SYS_CONFIG - Control cluster endianness */
+CPC_ACCESSOR_RW(32, 0x140, sys_config)
+#define CPC_SYS_CONFIG_BE_IMMEDIATE BIT(2)
+#define CPC_SYS_CONFIG_BE_STATUS BIT(1)
+#define CPC_SYS_CONFIG_BE BIT(0)
+
+/* CPC_Cx_CMD - Instruct the CPC to take action on a core */
+CPC_CX_ACCESSOR_RW(32, 0x000, cmd)
+#define CPC_Cx_CMD GENMASK(3, 0)
+#define CPC_Cx_CMD_CLOCKOFF 0x1
+#define CPC_Cx_CMD_PWRDOWN 0x2
+#define CPC_Cx_CMD_PWRUP 0x3
+#define CPC_Cx_CMD_RESET 0x4
+
+/* CPC_Cx_STAT_CONF - Indicates core configuration & state */
+CPC_CX_ACCESSOR_RW(32, 0x008, stat_conf)
+#define CPC_Cx_STAT_CONF_PWRUPE BIT(23)
+#define CPC_Cx_STAT_CONF_SEQSTATE GENMASK(22, 19)
+#define CPC_Cx_STAT_CONF_SEQSTATE_D0 0x0
+#define CPC_Cx_STAT_CONF_SEQSTATE_U0 0x1
+#define CPC_Cx_STAT_CONF_SEQSTATE_U1 0x2
+#define CPC_Cx_STAT_CONF_SEQSTATE_U2 0x3
+#define CPC_Cx_STAT_CONF_SEQSTATE_U3 0x4
+#define CPC_Cx_STAT_CONF_SEQSTATE_U4 0x5
+#define CPC_Cx_STAT_CONF_SEQSTATE_U5 0x6
+#define CPC_Cx_STAT_CONF_SEQSTATE_U6 0x7
+#define CPC_Cx_STAT_CONF_SEQSTATE_D1 0x8
+#define CPC_Cx_STAT_CONF_SEQSTATE_D3 0x9
+#define CPC_Cx_STAT_CONF_SEQSTATE_D2 0xa
+#define CPC_Cx_STAT_CONF_CLKGAT_IMPL BIT(17)
+#define CPC_Cx_STAT_CONF_PWRDN_IMPL BIT(16)
+#define CPC_Cx_STAT_CONF_EJTAG_PROBE BIT(15)
+
+/* CPC_Cx_OTHER - Configure the core-other register block prior to CM 3 */
+CPC_CX_ACCESSOR_RW(32, 0x010, other)
+#define CPC_Cx_OTHER_CORENUM GENMASK(23, 16)
+
+/* CPC_Cx_VP_STOP - Stop Virtual Processors (VPs) within a core from running */
+CPC_CX_ACCESSOR_RW(32, 0x020, vp_stop)
+
+/* CPC_Cx_VP_START - Start Virtual Processors (VPs) within a core running */
+CPC_CX_ACCESSOR_RW(32, 0x028, vp_run)
+
+/* CPC_Cx_VP_RUNNING - Indicate which Virtual Processors (VPs) are running */
+CPC_CX_ACCESSOR_RW(32, 0x030, vp_running)
+
+/* CPC_Cx_CONFIG - Mirrors GCR_Cx_CONFIG */
+CPC_CX_ACCESSOR_RW(32, 0x090, config)
+
+#ifdef CONFIG_MIPS_CPC
+
+/**
+ * mips_cpc_lock_other - lock access to another core
+ * core: the other core to be accessed
+ *
+ * Call before operating upon a core via the 'other' register region in
+ * order to prevent the region being moved during access. Must be called
+ * within the bounds of a mips_cm_{lock,unlock}_other pair, and followed
+ * by a call to mips_cpc_unlock_other.
+ */
+extern void mips_cpc_lock_other(unsigned int core);
+
+/**
+ * mips_cpc_unlock_other - unlock access to another core
+ *
+ * Call after operating upon another core via the 'other' register region.
+ * Must be called after mips_cpc_lock_other.
+ */
+extern void mips_cpc_unlock_other(void);
+
+#else /* !CONFIG_MIPS_CPC */
+
+static inline void mips_cpc_lock_other(unsigned int core) { }
+static inline void mips_cpc_unlock_other(void) { }
+
+#endif /* !CONFIG_MIPS_CPC */
+
+#endif /* __MIPS_ASM_MIPS_CPC_H__ */
diff --git a/arch/mips/include/asm/mips-cps.h b/arch/mips/include/asm/mips-cps.h
new file mode 100644
index 000000000..fd43d8768
--- /dev/null
+++ b/arch/mips/include/asm/mips-cps.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2017 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#ifndef __MIPS_ASM_MIPS_CPS_H__
+#define __MIPS_ASM_MIPS_CPS_H__
+
+#include <linux/io.h>
+#include <linux/types.h>
+
+extern unsigned long __cps_access_bad_size(void)
+ __compiletime_error("Bad size for CPS accessor");
+
+#define CPS_ACCESSOR_A(unit, off, name) \
+static inline void *addr_##unit##_##name(void) \
+{ \
+ return mips_##unit##_base + (off); \
+}
+
+#define CPS_ACCESSOR_R(unit, sz, name) \
+static inline uint##sz##_t read_##unit##_##name(void) \
+{ \
+ uint64_t val64; \
+ \
+ switch (sz) { \
+ case 32: \
+ return __raw_readl(addr_##unit##_##name()); \
+ \
+ case 64: \
+ if (mips_cm_is64) \
+ return __raw_readq(addr_##unit##_##name()); \
+ \
+ val64 = __raw_readl(addr_##unit##_##name() + 4); \
+ val64 <<= 32; \
+ val64 |= __raw_readl(addr_##unit##_##name()); \
+ return val64; \
+ \
+ default: \
+ return __cps_access_bad_size(); \
+ } \
+}
+
+#define CPS_ACCESSOR_W(unit, sz, name) \
+static inline void write_##unit##_##name(uint##sz##_t val) \
+{ \
+ switch (sz) { \
+ case 32: \
+ __raw_writel(val, addr_##unit##_##name()); \
+ break; \
+ \
+ case 64: \
+ if (mips_cm_is64) { \
+ __raw_writeq(val, addr_##unit##_##name()); \
+ break; \
+ } \
+ \
+ __raw_writel((uint64_t)val >> 32, \
+ addr_##unit##_##name() + 4); \
+ __raw_writel(val, addr_##unit##_##name()); \
+ break; \
+ \
+ default: \
+ __cps_access_bad_size(); \
+ break; \
+ } \
+}
+
+#define CPS_ACCESSOR_M(unit, sz, name) \
+static inline void change_##unit##_##name(uint##sz##_t mask, \
+ uint##sz##_t val) \
+{ \
+ uint##sz##_t reg_val = read_##unit##_##name(); \
+ reg_val &= ~mask; \
+ reg_val |= val; \
+ write_##unit##_##name(reg_val); \
+} \
+ \
+static inline void set_##unit##_##name(uint##sz##_t val) \
+{ \
+ change_##unit##_##name(val, val); \
+} \
+ \
+static inline void clear_##unit##_##name(uint##sz##_t val) \
+{ \
+ change_##unit##_##name(val, 0); \
+}
+
+#define CPS_ACCESSOR_RO(unit, sz, off, name) \
+ CPS_ACCESSOR_A(unit, off, name) \
+ CPS_ACCESSOR_R(unit, sz, name)
+
+#define CPS_ACCESSOR_WO(unit, sz, off, name) \
+ CPS_ACCESSOR_A(unit, off, name) \
+ CPS_ACCESSOR_W(unit, sz, name)
+
+#define CPS_ACCESSOR_RW(unit, sz, off, name) \
+ CPS_ACCESSOR_A(unit, off, name) \
+ CPS_ACCESSOR_R(unit, sz, name) \
+ CPS_ACCESSOR_W(unit, sz, name) \
+ CPS_ACCESSOR_M(unit, sz, name)
+
+#include <asm/mips-cm.h>
+#include <asm/mips-cpc.h>
+#include <asm/mips-gic.h>
+
+/**
+ * mips_cps_numclusters - return the number of clusters present in the system
+ *
+ * Returns the number of clusters in the system.
+ */
+static inline unsigned int mips_cps_numclusters(void)
+{
+ unsigned int num_clusters;
+
+ if (mips_cm_revision() < CM_REV_CM3_5)
+ return 1;
+
+ num_clusters = read_gcr_config() & CM_GCR_CONFIG_NUM_CLUSTERS;
+ num_clusters >>= __ffs(CM_GCR_CONFIG_NUM_CLUSTERS);
+ return num_clusters;
+}
+
+/**
+ * mips_cps_cluster_config - return (GCR|CPC)_CONFIG from a cluster
+ * @cluster: the ID of the cluster whose config we want
+ *
+ * Read the value of GCR_CONFIG (or its CPC_CONFIG mirror) from a @cluster.
+ *
+ * Returns the value of GCR_CONFIG.
+ */
+static inline uint64_t mips_cps_cluster_config(unsigned int cluster)
+{
+ uint64_t config;
+
+ if (mips_cm_revision() < CM_REV_CM3_5) {
+ /*
+ * Prior to CM 3.5 we don't have the notion of multiple
+ * clusters so we can trivially read the GCR_CONFIG register
+ * within this cluster.
+ */
+ WARN_ON(cluster != 0);
+ config = read_gcr_config();
+ } else {
+ /*
+ * From CM 3.5 onwards we read the CPC_CONFIG mirror of
+ * GCR_CONFIG via the redirect region, since the CPC is always
+ * powered up allowing us not to need to power up the CM.
+ */
+ mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+ config = read_cpc_redir_config();
+ mips_cm_unlock_other();
+ }
+
+ return config;
+}
+
+/**
+ * mips_cps_numcores - return the number of cores present in a cluster
+ * @cluster: the ID of the cluster whose core count we want
+ *
+ * Returns the value of the PCORES field of the GCR_CONFIG register plus 1, or
+ * zero if no Coherence Manager is present.
+ */
+static inline unsigned int mips_cps_numcores(unsigned int cluster)
+{
+ if (!mips_cm_present())
+ return 0;
+
+ /* Add one before masking to handle 0xff indicating no cores */
+ return (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES;
+}
+
+/**
+ * mips_cps_numiocu - return the number of IOCUs present in a cluster
+ * @cluster: the ID of the cluster whose IOCU count we want
+ *
+ * Returns the value of the NUMIOCU field of the GCR_CONFIG register, or zero
+ * if no Coherence Manager is present.
+ */
+static inline unsigned int mips_cps_numiocu(unsigned int cluster)
+{
+ unsigned int num_iocu;
+
+ if (!mips_cm_present())
+ return 0;
+
+ num_iocu = mips_cps_cluster_config(cluster) & CM_GCR_CONFIG_NUMIOCU;
+ num_iocu >>= __ffs(CM_GCR_CONFIG_NUMIOCU);
+ return num_iocu;
+}
+
+/**
+ * mips_cps_numvps - return the number of VPs (threads) supported by a core
+ * @cluster: the ID of the cluster containing the core we want to examine
+ * @core: the ID of the core whose VP count we want
+ *
+ * Returns the number of Virtual Processors (VPs, ie. hardware threads) that
+ * are supported by the given @core in the given @cluster. If the core or the
+ * kernel do not support hardware mutlti-threading this returns 1.
+ */
+static inline unsigned int mips_cps_numvps(unsigned int cluster, unsigned int core)
+{
+ unsigned int cfg;
+
+ if (!mips_cm_present())
+ return 1;
+
+ if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
+ && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
+ return 1;
+
+ mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+
+ if (mips_cm_revision() < CM_REV_CM3_5) {
+ /*
+ * Prior to CM 3.5 we can only have one cluster & don't have
+ * CPC_Cx_CONFIG, so we read GCR_Cx_CONFIG.
+ */
+ cfg = read_gcr_co_config();
+ } else {
+ /*
+ * From CM 3.5 onwards we read CPC_Cx_CONFIG because the CPC is
+ * always powered, which allows us to not worry about powering
+ * up the cluster's CM here.
+ */
+ cfg = read_cpc_co_config();
+ }
+
+ mips_cm_unlock_other();
+
+ return (cfg + 1) & CM_GCR_Cx_CONFIG_PVPE;
+}
+
+#endif /* __MIPS_ASM_MIPS_CPS_H__ */
diff --git a/arch/mips/include/asm/mips-gic.h b/arch/mips/include/asm/mips-gic.h
new file mode 100644
index 000000000..084cac1c5
--- /dev/null
+++ b/arch/mips/include/asm/mips-gic.h
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2017 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#ifndef __MIPS_ASM_MIPS_CPS_H__
+# error Please include asm/mips-cps.h rather than asm/mips-gic.h
+#endif
+
+#ifndef __MIPS_ASM_MIPS_GIC_H__
+#define __MIPS_ASM_MIPS_GIC_H__
+
+#include <linux/bitops.h>
+
+/* The base address of the GIC registers */
+extern void __iomem *mips_gic_base;
+
+/* Offsets from the GIC base address to various control blocks */
+#define MIPS_GIC_SHARED_OFS 0x00000
+#define MIPS_GIC_SHARED_SZ 0x08000
+#define MIPS_GIC_LOCAL_OFS 0x08000
+#define MIPS_GIC_LOCAL_SZ 0x04000
+#define MIPS_GIC_REDIR_OFS 0x0c000
+#define MIPS_GIC_REDIR_SZ 0x04000
+#define MIPS_GIC_USER_OFS 0x10000
+#define MIPS_GIC_USER_SZ 0x10000
+
+/* For read-only shared registers */
+#define GIC_ACCESSOR_RO(sz, off, name) \
+ CPS_ACCESSOR_RO(gic, sz, MIPS_GIC_SHARED_OFS + off, name)
+
+/* For read-write shared registers */
+#define GIC_ACCESSOR_RW(sz, off, name) \
+ CPS_ACCESSOR_RW(gic, sz, MIPS_GIC_SHARED_OFS + off, name)
+
+/* For read-only local registers */
+#define GIC_VX_ACCESSOR_RO(sz, off, name) \
+ CPS_ACCESSOR_RO(gic, sz, MIPS_GIC_LOCAL_OFS + off, vl_##name) \
+ CPS_ACCESSOR_RO(gic, sz, MIPS_GIC_REDIR_OFS + off, vo_##name)
+
+/* For read-write local registers */
+#define GIC_VX_ACCESSOR_RW(sz, off, name) \
+ CPS_ACCESSOR_RW(gic, sz, MIPS_GIC_LOCAL_OFS + off, vl_##name) \
+ CPS_ACCESSOR_RW(gic, sz, MIPS_GIC_REDIR_OFS + off, vo_##name)
+
+/* For read-only shared per-interrupt registers */
+#define GIC_ACCESSOR_RO_INTR_REG(sz, off, stride, name) \
+static inline void __iomem *addr_gic_##name(unsigned int intr) \
+{ \
+ return mips_gic_base + (off) + (intr * (stride)); \
+} \
+ \
+static inline unsigned int read_gic_##name(unsigned int intr) \
+{ \
+ BUILD_BUG_ON(sz != 32); \
+ return __raw_readl(addr_gic_##name(intr)); \
+}
+
+/* For read-write shared per-interrupt registers */
+#define GIC_ACCESSOR_RW_INTR_REG(sz, off, stride, name) \
+ GIC_ACCESSOR_RO_INTR_REG(sz, off, stride, name) \
+ \
+static inline void write_gic_##name(unsigned int intr, \
+ unsigned int val) \
+{ \
+ BUILD_BUG_ON(sz != 32); \
+ __raw_writel(val, addr_gic_##name(intr)); \
+}
+
+/* For read-only local per-interrupt registers */
+#define GIC_VX_ACCESSOR_RO_INTR_REG(sz, off, stride, name) \
+ GIC_ACCESSOR_RO_INTR_REG(sz, MIPS_GIC_LOCAL_OFS + off, \
+ stride, vl_##name) \
+ GIC_ACCESSOR_RO_INTR_REG(sz, MIPS_GIC_REDIR_OFS + off, \
+ stride, vo_##name)
+
+/* For read-write local per-interrupt registers */
+#define GIC_VX_ACCESSOR_RW_INTR_REG(sz, off, stride, name) \
+ GIC_ACCESSOR_RW_INTR_REG(sz, MIPS_GIC_LOCAL_OFS + off, \
+ stride, vl_##name) \
+ GIC_ACCESSOR_RW_INTR_REG(sz, MIPS_GIC_REDIR_OFS + off, \
+ stride, vo_##name)
+
+/* For read-only shared bit-per-interrupt registers */
+#define GIC_ACCESSOR_RO_INTR_BIT(off, name) \
+static inline void __iomem *addr_gic_##name(void) \
+{ \
+ return mips_gic_base + (off); \
+} \
+ \
+static inline unsigned int read_gic_##name(unsigned int intr) \
+{ \
+ void __iomem *addr = addr_gic_##name(); \
+ unsigned int val; \
+ \
+ if (mips_cm_is64) { \
+ addr += (intr / 64) * sizeof(uint64_t); \
+ val = __raw_readq(addr) >> intr % 64; \
+ } else { \
+ addr += (intr / 32) * sizeof(uint32_t); \
+ val = __raw_readl(addr) >> intr % 32; \
+ } \
+ \
+ return val & 0x1; \
+}
+
+/* For read-write shared bit-per-interrupt registers */
+#define GIC_ACCESSOR_RW_INTR_BIT(off, name) \
+ GIC_ACCESSOR_RO_INTR_BIT(off, name) \
+ \
+static inline void write_gic_##name(unsigned int intr) \
+{ \
+ void __iomem *addr = addr_gic_##name(); \
+ \
+ if (mips_cm_is64) { \
+ addr += (intr / 64) * sizeof(uint64_t); \
+ __raw_writeq(BIT(intr % 64), addr); \
+ } else { \
+ addr += (intr / 32) * sizeof(uint32_t); \
+ __raw_writel(BIT(intr % 32), addr); \
+ } \
+} \
+ \
+static inline void change_gic_##name(unsigned int intr, \
+ unsigned int val) \
+{ \
+ void __iomem *addr = addr_gic_##name(); \
+ \
+ if (mips_cm_is64) { \
+ uint64_t _val; \
+ \
+ addr += (intr / 64) * sizeof(uint64_t); \
+ _val = __raw_readq(addr); \
+ _val &= ~BIT_ULL(intr % 64); \
+ _val |= (uint64_t)val << (intr % 64); \
+ __raw_writeq(_val, addr); \
+ } else { \
+ uint32_t _val; \
+ \
+ addr += (intr / 32) * sizeof(uint32_t); \
+ _val = __raw_readl(addr); \
+ _val &= ~BIT(intr % 32); \
+ _val |= val << (intr % 32); \
+ __raw_writel(_val, addr); \
+ } \
+}
+
+/* For read-only local bit-per-interrupt registers */
+#define GIC_VX_ACCESSOR_RO_INTR_BIT(sz, off, name) \
+ GIC_ACCESSOR_RO_INTR_BIT(sz, MIPS_GIC_LOCAL_OFS + off, \
+ vl_##name) \
+ GIC_ACCESSOR_RO_INTR_BIT(sz, MIPS_GIC_REDIR_OFS + off, \
+ vo_##name)
+
+/* For read-write local bit-per-interrupt registers */
+#define GIC_VX_ACCESSOR_RW_INTR_BIT(sz, off, name) \
+ GIC_ACCESSOR_RW_INTR_BIT(sz, MIPS_GIC_LOCAL_OFS + off, \
+ vl_##name) \
+ GIC_ACCESSOR_RW_INTR_BIT(sz, MIPS_GIC_REDIR_OFS + off, \
+ vo_##name)
+
+/* GIC_SH_CONFIG - Information about the GIC configuration */
+GIC_ACCESSOR_RW(32, 0x000, config)
+#define GIC_CONFIG_COUNTSTOP BIT(28)
+#define GIC_CONFIG_COUNTBITS GENMASK(27, 24)
+#define GIC_CONFIG_NUMINTERRUPTS GENMASK(23, 16)
+#define GIC_CONFIG_PVPS GENMASK(6, 0)
+
+/* GIC_SH_COUNTER - Shared global counter value */
+GIC_ACCESSOR_RW(64, 0x010, counter)
+GIC_ACCESSOR_RW(32, 0x010, counter_32l)
+GIC_ACCESSOR_RW(32, 0x014, counter_32h)
+
+/* GIC_SH_POL_* - Configures interrupt polarity */
+GIC_ACCESSOR_RW_INTR_BIT(0x100, pol)
+#define GIC_POL_ACTIVE_LOW 0 /* when level triggered */
+#define GIC_POL_ACTIVE_HIGH 1 /* when level triggered */
+#define GIC_POL_FALLING_EDGE 0 /* when single-edge triggered */
+#define GIC_POL_RISING_EDGE 1 /* when single-edge triggered */
+
+/* GIC_SH_TRIG_* - Configures interrupts to be edge or level triggered */
+GIC_ACCESSOR_RW_INTR_BIT(0x180, trig)
+#define GIC_TRIG_LEVEL 0
+#define GIC_TRIG_EDGE 1
+
+/* GIC_SH_DUAL_* - Configures whether interrupts trigger on both edges */
+GIC_ACCESSOR_RW_INTR_BIT(0x200, dual)
+#define GIC_DUAL_SINGLE 0 /* when edge-triggered */
+#define GIC_DUAL_DUAL 1 /* when edge-triggered */
+
+/* GIC_SH_WEDGE - Write an 'edge', ie. trigger an interrupt */
+GIC_ACCESSOR_RW(32, 0x280, wedge)
+#define GIC_WEDGE_RW BIT(31)
+#define GIC_WEDGE_INTR GENMASK(7, 0)
+
+/* GIC_SH_RMASK_* - Reset/clear shared interrupt mask bits */
+GIC_ACCESSOR_RW_INTR_BIT(0x300, rmask)
+
+/* GIC_SH_SMASK_* - Set shared interrupt mask bits */
+GIC_ACCESSOR_RW_INTR_BIT(0x380, smask)
+
+/* GIC_SH_MASK_* - Read the current shared interrupt mask */
+GIC_ACCESSOR_RO_INTR_BIT(0x400, mask)
+
+/* GIC_SH_PEND_* - Read currently pending shared interrupts */
+GIC_ACCESSOR_RO_INTR_BIT(0x480, pend)
+
+/* GIC_SH_MAPx_PIN - Map shared interrupts to a particular CPU pin */
+GIC_ACCESSOR_RW_INTR_REG(32, 0x500, 0x4, map_pin)
+#define GIC_MAP_PIN_MAP_TO_PIN BIT(31)
+#define GIC_MAP_PIN_MAP_TO_NMI BIT(30)
+#define GIC_MAP_PIN_MAP GENMASK(5, 0)
+
+/* GIC_SH_MAPx_VP - Map shared interrupts to a particular Virtual Processor */
+GIC_ACCESSOR_RW_INTR_REG(32, 0x2000, 0x20, map_vp)
+
+/* GIC_Vx_CTL - VP-level interrupt control */
+GIC_VX_ACCESSOR_RW(32, 0x000, ctl)
+#define GIC_VX_CTL_FDC_ROUTABLE BIT(4)
+#define GIC_VX_CTL_SWINT_ROUTABLE BIT(3)
+#define GIC_VX_CTL_PERFCNT_ROUTABLE BIT(2)
+#define GIC_VX_CTL_TIMER_ROUTABLE BIT(1)
+#define GIC_VX_CTL_EIC BIT(0)
+
+/* GIC_Vx_PEND - Read currently pending local interrupts */
+GIC_VX_ACCESSOR_RO(32, 0x004, pend)
+
+/* GIC_Vx_MASK - Read the current local interrupt mask */
+GIC_VX_ACCESSOR_RO(32, 0x008, mask)
+
+/* GIC_Vx_RMASK - Reset/clear local interrupt mask bits */
+GIC_VX_ACCESSOR_RW(32, 0x00c, rmask)
+
+/* GIC_Vx_SMASK - Set local interrupt mask bits */
+GIC_VX_ACCESSOR_RW(32, 0x010, smask)
+
+/* GIC_Vx_*_MAP - Route local interrupts to the desired pins */
+GIC_VX_ACCESSOR_RW_INTR_REG(32, 0x040, 0x4, map)
+
+/* GIC_Vx_WD_MAP - Route the local watchdog timer interrupt */
+GIC_VX_ACCESSOR_RW(32, 0x040, wd_map)
+
+/* GIC_Vx_COMPARE_MAP - Route the local count/compare interrupt */
+GIC_VX_ACCESSOR_RW(32, 0x044, compare_map)
+
+/* GIC_Vx_TIMER_MAP - Route the local CPU timer (cp0 count/compare) interrupt */
+GIC_VX_ACCESSOR_RW(32, 0x048, timer_map)
+
+/* GIC_Vx_FDC_MAP - Route the local fast debug channel interrupt */
+GIC_VX_ACCESSOR_RW(32, 0x04c, fdc_map)
+
+/* GIC_Vx_PERFCTR_MAP - Route the local performance counter interrupt */
+GIC_VX_ACCESSOR_RW(32, 0x050, perfctr_map)
+
+/* GIC_Vx_SWINT0_MAP - Route the local software interrupt 0 */
+GIC_VX_ACCESSOR_RW(32, 0x054, swint0_map)
+
+/* GIC_Vx_SWINT1_MAP - Route the local software interrupt 1 */
+GIC_VX_ACCESSOR_RW(32, 0x058, swint1_map)
+
+/* GIC_Vx_OTHER - Configure access to other Virtual Processor registers */
+GIC_VX_ACCESSOR_RW(32, 0x080, other)
+#define GIC_VX_OTHER_VPNUM GENMASK(5, 0)
+
+/* GIC_Vx_IDENT - Retrieve the local Virtual Processor's ID */
+GIC_VX_ACCESSOR_RO(32, 0x088, ident)
+#define GIC_VX_IDENT_VPNUM GENMASK(5, 0)
+
+/* GIC_Vx_COMPARE - Value to compare with GIC_SH_COUNTER */
+GIC_VX_ACCESSOR_RW(64, 0x0a0, compare)
+
+/* GIC_Vx_EIC_SHADOW_SET_BASE - Set shadow register set for each interrupt */
+GIC_VX_ACCESSOR_RW_INTR_REG(32, 0x100, 0x4, eic_shadow_set)
+
+/**
+ * enum mips_gic_local_interrupt - GIC local interrupts
+ * @GIC_LOCAL_INT_WD: GIC watchdog timer interrupt
+ * @GIC_LOCAL_INT_COMPARE: GIC count/compare interrupt
+ * @GIC_LOCAL_INT_TIMER: CP0 count/compare interrupt
+ * @GIC_LOCAL_INT_PERFCTR: Performance counter interrupt
+ * @GIC_LOCAL_INT_SWINT0: Software interrupt 0
+ * @GIC_LOCAL_INT_SWINT1: Software interrupt 1
+ * @GIC_LOCAL_INT_FDC: Fast debug channel interrupt
+ * @GIC_NUM_LOCAL_INTRS: The number of local interrupts
+ *
+ * Enumerates interrupts provided by the GIC that are local to a VP.
+ */
+enum mips_gic_local_interrupt {
+ GIC_LOCAL_INT_WD,
+ GIC_LOCAL_INT_COMPARE,
+ GIC_LOCAL_INT_TIMER,
+ GIC_LOCAL_INT_PERFCTR,
+ GIC_LOCAL_INT_SWINT0,
+ GIC_LOCAL_INT_SWINT1,
+ GIC_LOCAL_INT_FDC,
+ GIC_NUM_LOCAL_INTRS
+};
+
+/**
+ * mips_gic_present() - Determine whether a GIC is present
+ *
+ * Determines whether a MIPS Global Interrupt Controller (GIC) is present in
+ * the system that the kernel is running on.
+ *
+ * Return true if a GIC is present, else false.
+ */
+static inline bool mips_gic_present(void)
+{
+ return IS_ENABLED(CONFIG_MIPS_GIC) && mips_gic_base;
+}
+
+/**
+ * mips_gic_vx_map_reg() - Return GIC_Vx_<intr>_MAP register offset
+ * @intr: A GIC local interrupt
+ *
+ * Determine the index of the GIC_VL_<intr>_MAP or GIC_VO_<intr>_MAP register
+ * within the block of GIC map registers. This is almost the same as the order
+ * of interrupts in the pending & mask registers, as used by enum
+ * mips_gic_local_interrupt, but moves the FDC interrupt & thus offsets the
+ * interrupts after it...
+ *
+ * Return: The map register index corresponding to @intr.
+ *
+ * The return value is suitable for use with the (read|write)_gic_v[lo]_map
+ * accessor functions.
+ */
+static inline unsigned int
+mips_gic_vx_map_reg(enum mips_gic_local_interrupt intr)
+{
+ /* WD, Compare & Timer are 1:1 */
+ if (intr <= GIC_LOCAL_INT_TIMER)
+ return intr;
+
+ /* FDC moves to after Timer... */
+ if (intr == GIC_LOCAL_INT_FDC)
+ return GIC_LOCAL_INT_TIMER + 1;
+
+ /* As a result everything else is offset by 1 */
+ return intr + 1;
+}
+
+/**
+ * gic_get_c0_compare_int() - Return cp0 count/compare interrupt virq
+ *
+ * Determine the virq number to use for the coprocessor 0 count/compare
+ * interrupt, which may be routed via the GIC.
+ *
+ * Returns the virq number or a negative error number.
+ */
+extern int gic_get_c0_compare_int(void);
+
+/**
+ * gic_get_c0_perfcount_int() - Return performance counter interrupt virq
+ *
+ * Determine the virq number to use for CPU performance counter interrupts,
+ * which may be routed via the GIC.
+ *
+ * Returns the virq number or a negative error number.
+ */
+extern int gic_get_c0_perfcount_int(void);
+
+/**
+ * gic_get_c0_fdc_int() - Return fast debug channel interrupt virq
+ *
+ * Determine the virq number to use for fast debug channel (FDC) interrupts,
+ * which may be routed via the GIC.
+ *
+ * Returns the virq number or a negative error number.
+ */
+extern int gic_get_c0_fdc_int(void);
+
+#endif /* __MIPS_ASM_MIPS_CPS_H__ */
diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h
new file mode 100644
index 000000000..20621e1ca
--- /dev/null
+++ b/arch/mips/include/asm/mips-r2-to-r6-emul.h
@@ -0,0 +1,101 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ */
+
+#ifndef __ASM_MIPS_R2_TO_R6_EMUL_H
+#define __ASM_MIPS_R2_TO_R6_EMUL_H
+
+struct mips_r2_emulator_stats {
+ u64 movs;
+ u64 hilo;
+ u64 muls;
+ u64 divs;
+ u64 dsps;
+ u64 bops;
+ u64 traps;
+ u64 fpus;
+ u64 loads;
+ u64 stores;
+ u64 llsc;
+ u64 dsemul;
+};
+
+struct mips_r2br_emulator_stats {
+ u64 jrs;
+ u64 bltzl;
+ u64 bgezl;
+ u64 bltzll;
+ u64 bgezll;
+ u64 bltzall;
+ u64 bgezall;
+ u64 bltzal;
+ u64 bgezal;
+ u64 beql;
+ u64 bnel;
+ u64 blezl;
+ u64 bgtzl;
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+#define MIPS_R2_STATS(M) \
+do { \
+ u32 nir; \
+ int err; \
+ \
+ preempt_disable(); \
+ __this_cpu_inc(mipsr2emustats.M); \
+ err = __get_user(nir, (u32 __user *)regs->cp0_epc); \
+ if (!err) { \
+ if (nir == BREAK_MATH(0)) \
+ __this_cpu_inc(mipsr2bdemustats.M); \
+ } \
+ preempt_enable(); \
+} while (0)
+
+#define MIPS_R2BR_STATS(M) \
+do { \
+ preempt_disable(); \
+ __this_cpu_inc(mipsr2bremustats.M); \
+ preempt_enable(); \
+} while (0)
+
+#else
+
+#define MIPS_R2_STATS(M) do { } while (0)
+#define MIPS_R2BR_STATS(M) do { } while (0)
+
+#endif /* CONFIG_DEBUG_FS */
+
+struct r2_decoder_table {
+ u32 mask;
+ u32 code;
+ int (*func)(struct pt_regs *regs, u32 inst);
+};
+
+
+extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
+ const char *str);
+
+#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR
+static int mipsr2_emulation;
+static inline int mipsr2_decoder(struct pt_regs *regs, u32 inst,
+ unsigned long *fcr31)
+{
+ return 0;
+};
+#else
+/* MIPS R2 Emulator ON/OFF */
+extern int mipsr2_emulation;
+extern int mipsr2_decoder(struct pt_regs *regs, u32 inst,
+ unsigned long *fcr31);
+#endif /* CONFIG_MIPSR2_TO_R6_EMULATOR */
+
+#define NO_R6EMU (cpu_has_mips_r6 && !mipsr2_emulation)
+
+#endif /* __ASM_MIPS_R2_TO_R6_EMUL_H */
diff --git a/arch/mips/include/asm/mips_mt.h b/arch/mips/include/asm/mips_mt.h
new file mode 100644
index 000000000..b444523ec
--- /dev/null
+++ b/arch/mips/include/asm/mips_mt.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions and declarations for MIPS MT support that are common between
+ * the VSMP, and AP/SP kernel models.
+ */
+#ifndef __ASM_MIPS_MT_H
+#define __ASM_MIPS_MT_H
+
+#include <linux/cpumask.h>
+
+/*
+ * How many VPEs and TCs is Linux allowed to use? 0 means no limit.
+ */
+extern int tclimit;
+extern int vpelimit;
+
+extern cpumask_t mt_fpu_cpumask;
+extern unsigned long mt_fpemul_threshold;
+
+extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value);
+
+#ifdef CONFIG_MIPS_MT
+extern void mips_mt_set_cpuoptions(void);
+#else
+static inline void mips_mt_set_cpuoptions(void) { }
+#endif
+
+struct class;
+extern struct class *mt_class;
+
+#endif /* __ASM_MIPS_MT_H */
diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h
new file mode 100644
index 000000000..be4cf9d47
--- /dev/null
+++ b/arch/mips/include/asm/mipsmtregs.h
@@ -0,0 +1,423 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MT regs definitions, follows on from mipsregs.h
+ * Copyright (C) 2004 - 2005 MIPS Technologies, Inc. All rights reserved.
+ * Elizabeth Clarke et. al.
+ *
+ */
+#ifndef _ASM_MIPSMTREGS_H
+#define _ASM_MIPSMTREGS_H
+
+#include <asm/mipsregs.h>
+#include <asm/war.h>
+
+#ifndef __ASSEMBLY__
+
+/*
+ * C macros
+ */
+
+#define read_c0_mvpcontrol() __read_32bit_c0_register($0, 1)
+#define write_c0_mvpcontrol(val) __write_32bit_c0_register($0, 1, val)
+
+#define read_c0_mvpconf0() __read_32bit_c0_register($0, 2)
+#define read_c0_mvpconf1() __read_32bit_c0_register($0, 3)
+
+#define read_c0_vpecontrol() __read_32bit_c0_register($1, 1)
+#define write_c0_vpecontrol(val) __write_32bit_c0_register($1, 1, val)
+
+#define read_c0_vpeconf0() __read_32bit_c0_register($1, 2)
+#define write_c0_vpeconf0(val) __write_32bit_c0_register($1, 2, val)
+
+#define read_c0_vpeconf1() __read_32bit_c0_register($1, 3)
+#define write_c0_vpeconf1(val) __write_32bit_c0_register($1, 3, val)
+
+#define read_c0_tcstatus() __read_32bit_c0_register($2, 1)
+#define write_c0_tcstatus(val) __write_32bit_c0_register($2, 1, val)
+
+#define read_c0_tcbind() __read_32bit_c0_register($2, 2)
+
+#define write_c0_tchalt(val) __write_32bit_c0_register($2, 4, val)
+
+#define read_c0_tccontext() __read_32bit_c0_register($2, 5)
+#define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val)
+
+#else /* Assembly */
+/*
+ * Macros for use in assembly language code
+ */
+
+#define CP0_MVPCONTROL $0, 1
+#define CP0_MVPCONF0 $0, 2
+#define CP0_MVPCONF1 $0, 3
+#define CP0_VPECONTROL $1, 1
+#define CP0_VPECONF0 $1, 2
+#define CP0_VPECONF1 $1, 3
+#define CP0_YQMASK $1, 4
+#define CP0_VPESCHEDULE $1, 5
+#define CP0_VPESCHEFBK $1, 6
+#define CP0_TCSTATUS $2, 1
+#define CP0_TCBIND $2, 2
+#define CP0_TCRESTART $2, 3
+#define CP0_TCHALT $2, 4
+#define CP0_TCCONTEXT $2, 5
+#define CP0_TCSCHEDULE $2, 6
+#define CP0_TCSCHEFBK $2, 7
+#define CP0_SRSCONF0 $6, 1
+#define CP0_SRSCONF1 $6, 2
+#define CP0_SRSCONF2 $6, 3
+#define CP0_SRSCONF3 $6, 4
+#define CP0_SRSCONF4 $6, 5
+
+#endif
+
+/* MVPControl fields */
+#define MVPCONTROL_EVP (_ULCAST_(1))
+
+#define MVPCONTROL_VPC_SHIFT 1
+#define MVPCONTROL_VPC (_ULCAST_(1) << MVPCONTROL_VPC_SHIFT)
+
+#define MVPCONTROL_STLB_SHIFT 2
+#define MVPCONTROL_STLB (_ULCAST_(1) << MVPCONTROL_STLB_SHIFT)
+
+
+/* MVPConf0 fields */
+#define MVPCONF0_PTC_SHIFT 0
+#define MVPCONF0_PTC ( _ULCAST_(0xff))
+#define MVPCONF0_PVPE_SHIFT 10
+#define MVPCONF0_PVPE ( _ULCAST_(0xf) << MVPCONF0_PVPE_SHIFT)
+#define MVPCONF0_TCA_SHIFT 15
+#define MVPCONF0_TCA ( _ULCAST_(1) << MVPCONF0_TCA_SHIFT)
+#define MVPCONF0_PTLBE_SHIFT 16
+#define MVPCONF0_PTLBE (_ULCAST_(0x3ff) << MVPCONF0_PTLBE_SHIFT)
+#define MVPCONF0_TLBS_SHIFT 29
+#define MVPCONF0_TLBS (_ULCAST_(1) << MVPCONF0_TLBS_SHIFT)
+#define MVPCONF0_M_SHIFT 31
+#define MVPCONF0_M (_ULCAST_(0x1) << MVPCONF0_M_SHIFT)
+
+
+/* config3 fields */
+#define CONFIG3_MT_SHIFT 2
+#define CONFIG3_MT (_ULCAST_(1) << CONFIG3_MT_SHIFT)
+
+
+/* VPEControl fields (per VPE) */
+#define VPECONTROL_TARGTC (_ULCAST_(0xff))
+
+#define VPECONTROL_TE_SHIFT 15
+#define VPECONTROL_TE (_ULCAST_(1) << VPECONTROL_TE_SHIFT)
+#define VPECONTROL_EXCPT_SHIFT 16
+#define VPECONTROL_EXCPT (_ULCAST_(0x7) << VPECONTROL_EXCPT_SHIFT)
+
+/* Thread Exception Codes for EXCPT field */
+#define THREX_TU 0
+#define THREX_TO 1
+#define THREX_IYQ 2
+#define THREX_GSX 3
+#define THREX_YSCH 4
+#define THREX_GSSCH 5
+
+#define VPECONTROL_GSI_SHIFT 20
+#define VPECONTROL_GSI (_ULCAST_(1) << VPECONTROL_GSI_SHIFT)
+#define VPECONTROL_YSI_SHIFT 21
+#define VPECONTROL_YSI (_ULCAST_(1) << VPECONTROL_YSI_SHIFT)
+
+/* VPEConf0 fields (per VPE) */
+#define VPECONF0_VPA_SHIFT 0
+#define VPECONF0_VPA (_ULCAST_(1) << VPECONF0_VPA_SHIFT)
+#define VPECONF0_MVP_SHIFT 1
+#define VPECONF0_MVP (_ULCAST_(1) << VPECONF0_MVP_SHIFT)
+#define VPECONF0_XTC_SHIFT 21
+#define VPECONF0_XTC (_ULCAST_(0xff) << VPECONF0_XTC_SHIFT)
+
+/* VPEConf1 fields (per VPE) */
+#define VPECONF1_NCP1_SHIFT 0
+#define VPECONF1_NCP1 (_ULCAST_(0xff) << VPECONF1_NCP1_SHIFT)
+#define VPECONF1_NCP2_SHIFT 10
+#define VPECONF1_NCP2 (_ULCAST_(0xff) << VPECONF1_NCP2_SHIFT)
+#define VPECONF1_NCX_SHIFT 20
+#define VPECONF1_NCX (_ULCAST_(0xff) << VPECONF1_NCX_SHIFT)
+
+/* TCStatus fields (per TC) */
+#define TCSTATUS_TASID (_ULCAST_(0xff))
+#define TCSTATUS_IXMT_SHIFT 10
+#define TCSTATUS_IXMT (_ULCAST_(1) << TCSTATUS_IXMT_SHIFT)
+#define TCSTATUS_TKSU_SHIFT 11
+#define TCSTATUS_TKSU (_ULCAST_(3) << TCSTATUS_TKSU_SHIFT)
+#define TCSTATUS_A_SHIFT 13
+#define TCSTATUS_A (_ULCAST_(1) << TCSTATUS_A_SHIFT)
+#define TCSTATUS_DA_SHIFT 15
+#define TCSTATUS_DA (_ULCAST_(1) << TCSTATUS_DA_SHIFT)
+#define TCSTATUS_DT_SHIFT 20
+#define TCSTATUS_DT (_ULCAST_(1) << TCSTATUS_DT_SHIFT)
+#define TCSTATUS_TDS_SHIFT 21
+#define TCSTATUS_TDS (_ULCAST_(1) << TCSTATUS_TDS_SHIFT)
+#define TCSTATUS_TSST_SHIFT 22
+#define TCSTATUS_TSST (_ULCAST_(1) << TCSTATUS_TSST_SHIFT)
+#define TCSTATUS_RNST_SHIFT 23
+#define TCSTATUS_RNST (_ULCAST_(3) << TCSTATUS_RNST_SHIFT)
+/* Codes for RNST */
+#define TC_RUNNING 0
+#define TC_WAITING 1
+#define TC_YIELDING 2
+#define TC_GATED 3
+
+#define TCSTATUS_TMX_SHIFT 27
+#define TCSTATUS_TMX (_ULCAST_(1) << TCSTATUS_TMX_SHIFT)
+/* TCStatus TCU bits can use same definitions/offsets as CU bits in Status */
+
+/* TCBind */
+#define TCBIND_CURVPE_SHIFT 0
+#define TCBIND_CURVPE (_ULCAST_(0xf))
+
+#define TCBIND_CURTC_SHIFT 21
+
+#define TCBIND_CURTC (_ULCAST_(0xff) << TCBIND_CURTC_SHIFT)
+
+/* TCHalt */
+#define TCHALT_H (_ULCAST_(1))
+
+#ifndef __ASSEMBLY__
+
+static inline unsigned core_nvpes(void)
+{
+ unsigned conf0;
+
+ if (!cpu_has_mipsmt)
+ return 1;
+
+ conf0 = read_c0_mvpconf0();
+ return ((conf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+}
+
+static inline unsigned int dvpe(void)
+{
+ int res = 0;
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder \n"
+ " .set noat \n"
+ " .set mips32r2 \n"
+ " .word 0x41610001 # dvpe $1 \n"
+ " move %0, $1 \n"
+ " ehb \n"
+ " .set pop \n"
+ : "=r" (res));
+
+ instruction_hazard();
+
+ return res;
+}
+
+static inline void __raw_evpe(void)
+{
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder \n"
+ " .set noat \n"
+ " .set mips32r2 \n"
+ " .word 0x41600021 # evpe \n"
+ " ehb \n"
+ " .set pop \n");
+}
+
+/* Enable virtual processor execution if previous suggested it should be.
+ EVPE_ENABLE to force */
+
+#define EVPE_ENABLE MVPCONTROL_EVP
+
+static inline void evpe(int previous)
+{
+ if ((previous & MVPCONTROL_EVP))
+ __raw_evpe();
+}
+
+static inline unsigned int dmt(void)
+{
+ int res;
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set mips32r2 \n"
+ " .set noat \n"
+ " .word 0x41610BC1 # dmt $1 \n"
+ " ehb \n"
+ " move %0, $1 \n"
+ " .set pop \n"
+ : "=r" (res));
+
+ instruction_hazard();
+
+ return res;
+}
+
+static inline void __raw_emt(void)
+{
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder \n"
+ " .set mips32r2 \n"
+ " .word 0x41600be1 # emt \n"
+ " ehb \n"
+ " .set pop");
+}
+
+/* enable multi-threaded execution if previous suggested it should be.
+ EMT_ENABLE to force */
+
+#define EMT_ENABLE VPECONTROL_TE
+
+static inline void emt(int previous)
+{
+ if ((previous & EMT_ENABLE))
+ __raw_emt();
+}
+
+static inline void ehb(void)
+{
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set mips32r2 \n"
+ " ehb \n"
+ " .set pop \n");
+}
+
+#define mftc0(rt,sel) \
+({ \
+ unsigned long __res; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set mips32r2 \n" \
+ " .set noat \n" \
+ " # mftc0 $1, $" #rt ", " #sel " \n" \
+ " .word 0x41000800 | (" #rt " << 16) | " #sel " \n" \
+ " move %0, $1 \n" \
+ " .set pop \n" \
+ : "=r" (__res)); \
+ \
+ __res; \
+})
+
+#define mftgpr(rt) \
+({ \
+ unsigned long __res; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set mips32r2 \n" \
+ " # mftgpr $1," #rt " \n" \
+ " .word 0x41000820 | (" #rt " << 16) \n" \
+ " move %0, $1 \n" \
+ " .set pop \n" \
+ : "=r" (__res)); \
+ \
+ __res; \
+})
+
+#define mftr(rt, u, sel) \
+({ \
+ unsigned long __res; \
+ \
+ __asm__ __volatile__( \
+ " mftr %0, " #rt ", " #u ", " #sel " \n" \
+ : "=r" (__res)); \
+ \
+ __res; \
+})
+
+#define mttgpr(rd,v) \
+do { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set mips32r2 \n" \
+ " .set noat \n" \
+ " move $1, %0 \n" \
+ " # mttgpr $1, " #rd " \n" \
+ " .word 0x41810020 | (" #rd " << 11) \n" \
+ " .set pop \n" \
+ : : "r" (v)); \
+} while (0)
+
+#define mttc0(rd, sel, v) \
+({ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set mips32r2 \n" \
+ " .set noat \n" \
+ " move $1, %0 \n" \
+ " # mttc0 %0," #rd ", " #sel " \n" \
+ " .word 0x41810000 | (" #rd " << 11) | " #sel " \n" \
+ " .set pop \n" \
+ : \
+ : "r" (v)); \
+})
+
+
+#define mttr(rd, u, sel, v) \
+({ \
+ __asm__ __volatile__( \
+ "mttr %0," #rd ", " #u ", " #sel \
+ : : "r" (v)); \
+})
+
+
+#define settc(tc) \
+do { \
+ write_c0_vpecontrol((read_c0_vpecontrol()&~VPECONTROL_TARGTC) | (tc)); \
+ ehb(); \
+} while (0)
+
+
+/* you *must* set the target tc (settc) before trying to use these */
+#define read_vpe_c0_vpecontrol() mftc0(1, 1)
+#define write_vpe_c0_vpecontrol(val) mttc0(1, 1, val)
+#define read_vpe_c0_vpeconf0() mftc0(1, 2)
+#define write_vpe_c0_vpeconf0(val) mttc0(1, 2, val)
+#define read_vpe_c0_vpeconf1() mftc0(1, 3)
+#define write_vpe_c0_vpeconf1(val) mttc0(1, 3, val)
+#define read_vpe_c0_count() mftc0(9, 0)
+#define write_vpe_c0_count(val) mttc0(9, 0, val)
+#define read_vpe_c0_status() mftc0(12, 0)
+#define write_vpe_c0_status(val) mttc0(12, 0, val)
+#define read_vpe_c0_cause() mftc0(13, 0)
+#define write_vpe_c0_cause(val) mttc0(13, 0, val)
+#define read_vpe_c0_config() mftc0(16, 0)
+#define write_vpe_c0_config(val) mttc0(16, 0, val)
+#define read_vpe_c0_config1() mftc0(16, 1)
+#define write_vpe_c0_config1(val) mttc0(16, 1, val)
+#define read_vpe_c0_config7() mftc0(16, 7)
+#define write_vpe_c0_config7(val) mttc0(16, 7, val)
+#define read_vpe_c0_ebase() mftc0(15, 1)
+#define write_vpe_c0_ebase(val) mttc0(15, 1, val)
+#define write_vpe_c0_compare(val) mttc0(11, 0, val)
+#define read_vpe_c0_badvaddr() mftc0(8, 0)
+#define read_vpe_c0_epc() mftc0(14, 0)
+#define write_vpe_c0_epc(val) mttc0(14, 0, val)
+
+
+/* TC */
+#define read_tc_c0_tcstatus() mftc0(2, 1)
+#define write_tc_c0_tcstatus(val) mttc0(2, 1, val)
+#define read_tc_c0_tcbind() mftc0(2, 2)
+#define write_tc_c0_tcbind(val) mttc0(2, 2, val)
+#define read_tc_c0_tcrestart() mftc0(2, 3)
+#define write_tc_c0_tcrestart(val) mttc0(2, 3, val)
+#define read_tc_c0_tchalt() mftc0(2, 4)
+#define write_tc_c0_tchalt(val) mttc0(2, 4, val)
+#define read_tc_c0_tccontext() mftc0(2, 5)
+#define write_tc_c0_tccontext(val) mttc0(2, 5, val)
+
+/* GPR */
+#define read_tc_gpr_sp() mftgpr(29)
+#define write_tc_gpr_sp(val) mttgpr(29, val)
+#define read_tc_gpr_gp() mftgpr(28)
+#define write_tc_gpr_gp(val) mttgpr(28, val)
+
+__BUILD_SET_C0(mvpcontrol)
+
+#endif /* Not __ASSEMBLY__ */
+
+#endif
diff --git a/arch/mips/include/asm/mipsprom.h b/arch/mips/include/asm/mipsprom.h
new file mode 100644
index 000000000..2eda19f8f
--- /dev/null
+++ b/arch/mips/include/asm/mipsprom.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MIPSPROM_H
+#define __ASM_MIPSPROM_H
+
+#define PROM_RESET 0
+#define PROM_EXEC 1
+#define PROM_RESTART 2
+#define PROM_REINIT 3
+#define PROM_REBOOT 4
+#define PROM_AUTOBOOT 5
+#define PROM_OPEN 6
+#define PROM_READ 7
+#define PROM_WRITE 8
+#define PROM_IOCTL 9
+#define PROM_CLOSE 10
+#define PROM_GETCHAR 11
+#define PROM_PUTCHAR 12
+#define PROM_SHOWCHAR 13 /* XXX */
+#define PROM_GETS 14 /* XXX */
+#define PROM_PUTS 15 /* XXX */
+#define PROM_PRINTF 16 /* XXX */
+
+/* What are these for? */
+#define PROM_INITPROTO 17 /* XXX */
+#define PROM_PROTOENABLE 18 /* XXX */
+#define PROM_PROTODISABLE 19 /* XXX */
+#define PROM_GETPKT 20 /* XXX */
+#define PROM_PUTPKT 21 /* XXX */
+
+/* More PROM shit. Probably has to do with VME RMW cycles??? */
+#define PROM_ORW_RMW 22 /* XXX */
+#define PROM_ORH_RMW 23 /* XXX */
+#define PROM_ORB_RMW 24 /* XXX */
+#define PROM_ANDW_RMW 25 /* XXX */
+#define PROM_ANDH_RMW 26 /* XXX */
+#define PROM_ANDB_RMW 27 /* XXX */
+
+/* Cache handling stuff */
+#define PROM_FLUSHCACHE 28 /* XXX */
+#define PROM_CLEARCACHE 29 /* XXX */
+
+/* Libc alike stuff */
+#define PROM_SETJMP 30 /* XXX */
+#define PROM_LONGJMP 31 /* XXX */
+#define PROM_BEVUTLB 32 /* XXX */
+#define PROM_GETENV 33 /* XXX */
+#define PROM_SETENV 34 /* XXX */
+#define PROM_ATOB 35 /* XXX */
+#define PROM_STRCMP 36 /* XXX */
+#define PROM_STRLEN 37 /* XXX */
+#define PROM_STRCPY 38 /* XXX */
+#define PROM_STRCAT 39 /* XXX */
+
+/* Misc stuff */
+#define PROM_PARSER 40 /* XXX */
+#define PROM_RANGE 41 /* XXX */
+#define PROM_ARGVIZE 42 /* XXX */
+#define PROM_HELP 43 /* XXX */
+
+/* Entry points for some PROM commands */
+#define PROM_DUMPCMD 44 /* XXX */
+#define PROM_SETENVCMD 45 /* XXX */
+#define PROM_UNSETENVCMD 46 /* XXX */
+#define PROM_PRINTENVCMD 47 /* XXX */
+#define PROM_BEVEXCEPT 48 /* XXX */
+#define PROM_ENABLECMD 49 /* XXX */
+#define PROM_DISABLECMD 50 /* XXX */
+
+#define PROM_CLEARNOFAULT 51 /* XXX */
+#define PROM_NOTIMPLEMENT 52 /* XXX */
+
+#define PROM_NV_GET 53 /* XXX */
+#define PROM_NV_SET 54 /* XXX */
+
+extern char *prom_getenv(char *);
+
+#endif /* __ASM_MIPSPROM_H */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
new file mode 100644
index 000000000..7a7467d3f
--- /dev/null
+++ b/arch/mips/include/asm/mipsregs.h
@@ -0,0 +1,2927 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1997, 2000, 2001 by Ralf Baechle
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Modified for further R[236]000 support by Paul M. Antoine, 1996.
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000, 07 MIPS Technologies, Inc.
+ * Copyright (C) 2003, 2004 Maciej W. Rozycki
+ */
+#ifndef _ASM_MIPSREGS_H
+#define _ASM_MIPSREGS_H
+
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <asm/hazards.h>
+#include <asm/isa-rev.h>
+#include <asm/war.h>
+
+/*
+ * The following macros are especially useful for __asm__
+ * inline assembler.
+ */
+#ifndef __STR
+#define __STR(x) #x
+#endif
+#ifndef STR
+#define STR(x) __STR(x)
+#endif
+
+/*
+ * Configure language
+ */
+#ifdef __ASSEMBLY__
+#define _ULCAST_
+#define _U64CAST_
+#else
+#define _ULCAST_ (unsigned long)
+#define _U64CAST_ (u64)
+#endif
+
+/*
+ * Coprocessor 0 register names
+ */
+#define CP0_INDEX $0
+#define CP0_RANDOM $1
+#define CP0_ENTRYLO0 $2
+#define CP0_ENTRYLO1 $3
+#define CP0_CONF $3
+#define CP0_GLOBALNUMBER $3, 1
+#define CP0_CONTEXT $4
+#define CP0_PAGEMASK $5
+#define CP0_PAGEGRAIN $5, 1
+#define CP0_SEGCTL0 $5, 2
+#define CP0_SEGCTL1 $5, 3
+#define CP0_SEGCTL2 $5, 4
+#define CP0_WIRED $6
+#define CP0_INFO $7
+#define CP0_HWRENA $7
+#define CP0_BADVADDR $8
+#define CP0_BADINSTR $8, 1
+#define CP0_COUNT $9
+#define CP0_ENTRYHI $10
+#define CP0_GUESTCTL1 $10, 4
+#define CP0_GUESTCTL2 $10, 5
+#define CP0_GUESTCTL3 $10, 6
+#define CP0_COMPARE $11
+#define CP0_GUESTCTL0EXT $11, 4
+#define CP0_STATUS $12
+#define CP0_GUESTCTL0 $12, 6
+#define CP0_GTOFFSET $12, 7
+#define CP0_CAUSE $13
+#define CP0_EPC $14
+#define CP0_PRID $15
+#define CP0_EBASE $15, 1
+#define CP0_CMGCRBASE $15, 3
+#define CP0_CONFIG $16
+#define CP0_CONFIG3 $16, 3
+#define CP0_CONFIG5 $16, 5
+#define CP0_CONFIG6 $16, 6
+#define CP0_LLADDR $17
+#define CP0_WATCHLO $18
+#define CP0_WATCHHI $19
+#define CP0_XCONTEXT $20
+#define CP0_FRAMEMASK $21
+#define CP0_DIAGNOSTIC $22
+#define CP0_DIAGNOSTIC1 $22, 1
+#define CP0_DEBUG $23
+#define CP0_DEPC $24
+#define CP0_PERFORMANCE $25
+#define CP0_ECC $26
+#define CP0_CACHEERR $27
+#define CP0_TAGLO $28
+#define CP0_TAGHI $29
+#define CP0_ERROREPC $30
+#define CP0_DESAVE $31
+
+/*
+ * R4640/R4650 cp0 register names. These registers are listed
+ * here only for completeness; without MMU these CPUs are not useable
+ * by Linux. A future ELKS port might take make Linux run on them
+ * though ...
+ */
+#define CP0_IBASE $0
+#define CP0_IBOUND $1
+#define CP0_DBASE $2
+#define CP0_DBOUND $3
+#define CP0_CALG $17
+#define CP0_IWATCH $18
+#define CP0_DWATCH $19
+
+/*
+ * Coprocessor 0 Set 1 register names
+ */
+#define CP0_S1_DERRADDR0 $26
+#define CP0_S1_DERRADDR1 $27
+#define CP0_S1_INTCONTROL $20
+
+/*
+ * Coprocessor 0 Set 2 register names
+ */
+#define CP0_S2_SRSCTL $12 /* MIPSR2 */
+
+/*
+ * Coprocessor 0 Set 3 register names
+ */
+#define CP0_S3_SRSMAP $12 /* MIPSR2 */
+
+/*
+ * TX39 Series
+ */
+#define CP0_TX39_CACHE $7
+
+
+/* Generic EntryLo bit definitions */
+#define ENTRYLO_G (_ULCAST_(1) << 0)
+#define ENTRYLO_V (_ULCAST_(1) << 1)
+#define ENTRYLO_D (_ULCAST_(1) << 2)
+#define ENTRYLO_C_SHIFT 3
+#define ENTRYLO_C (_ULCAST_(7) << ENTRYLO_C_SHIFT)
+
+/* R3000 EntryLo bit definitions */
+#define R3K_ENTRYLO_G (_ULCAST_(1) << 8)
+#define R3K_ENTRYLO_V (_ULCAST_(1) << 9)
+#define R3K_ENTRYLO_D (_ULCAST_(1) << 10)
+#define R3K_ENTRYLO_N (_ULCAST_(1) << 11)
+
+/* MIPS32/64 EntryLo bit definitions */
+#define MIPS_ENTRYLO_PFN_SHIFT 6
+#define MIPS_ENTRYLO_XI (_ULCAST_(1) << (BITS_PER_LONG - 2))
+#define MIPS_ENTRYLO_RI (_ULCAST_(1) << (BITS_PER_LONG - 1))
+
+/*
+ * MIPSr6+ GlobalNumber register definitions
+ */
+#define MIPS_GLOBALNUMBER_VP_SHF 0
+#define MIPS_GLOBALNUMBER_VP (_ULCAST_(0xff) << MIPS_GLOBALNUMBER_VP_SHF)
+#define MIPS_GLOBALNUMBER_CORE_SHF 8
+#define MIPS_GLOBALNUMBER_CORE (_ULCAST_(0xff) << MIPS_GLOBALNUMBER_CORE_SHF)
+#define MIPS_GLOBALNUMBER_CLUSTER_SHF 16
+#define MIPS_GLOBALNUMBER_CLUSTER (_ULCAST_(0xf) << MIPS_GLOBALNUMBER_CLUSTER_SHF)
+
+/*
+ * Values for PageMask register
+ */
+#ifdef CONFIG_CPU_VR41XX
+
+/* Why doesn't stupidity hurt ... */
+
+#define PM_1K 0x00000000
+#define PM_4K 0x00001800
+#define PM_16K 0x00007800
+#define PM_64K 0x0001f800
+#define PM_256K 0x0007f800
+
+#else
+
+#define PM_4K 0x00000000
+#define PM_8K 0x00002000
+#define PM_16K 0x00006000
+#define PM_32K 0x0000e000
+#define PM_64K 0x0001e000
+#define PM_128K 0x0003e000
+#define PM_256K 0x0007e000
+#define PM_512K 0x000fe000
+#define PM_1M 0x001fe000
+#define PM_2M 0x003fe000
+#define PM_4M 0x007fe000
+#define PM_8M 0x00ffe000
+#define PM_16M 0x01ffe000
+#define PM_32M 0x03ffe000
+#define PM_64M 0x07ffe000
+#define PM_256M 0x1fffe000
+#define PM_1G 0x7fffe000
+
+#endif
+
+/*
+ * Default page size for a given kernel configuration
+ */
+#ifdef CONFIG_PAGE_SIZE_4KB
+#define PM_DEFAULT_MASK PM_4K
+#elif defined(CONFIG_PAGE_SIZE_8KB)
+#define PM_DEFAULT_MASK PM_8K
+#elif defined(CONFIG_PAGE_SIZE_16KB)
+#define PM_DEFAULT_MASK PM_16K
+#elif defined(CONFIG_PAGE_SIZE_32KB)
+#define PM_DEFAULT_MASK PM_32K
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+#define PM_DEFAULT_MASK PM_64K
+#else
+#error Bad page size configuration!
+#endif
+
+/*
+ * Default huge tlb size for a given kernel configuration
+ */
+#ifdef CONFIG_PAGE_SIZE_4KB
+#define PM_HUGE_MASK PM_1M
+#elif defined(CONFIG_PAGE_SIZE_8KB)
+#define PM_HUGE_MASK PM_4M
+#elif defined(CONFIG_PAGE_SIZE_16KB)
+#define PM_HUGE_MASK PM_16M
+#elif defined(CONFIG_PAGE_SIZE_32KB)
+#define PM_HUGE_MASK PM_64M
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+#define PM_HUGE_MASK PM_256M
+#elif defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
+#error Bad page size configuration for hugetlbfs!
+#endif
+
+/*
+ * Wired register bits
+ */
+#define MIPSR6_WIRED_LIMIT_SHIFT 16
+#define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << MIPSR6_WIRED_LIMIT_SHIFT)
+#define MIPSR6_WIRED_WIRED_SHIFT 0
+#define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << MIPSR6_WIRED_WIRED_SHIFT)
+
+/*
+ * Values used for computation of new tlb entries
+ */
+#define PL_4K 12
+#define PL_16K 14
+#define PL_64K 16
+#define PL_256K 18
+#define PL_1M 20
+#define PL_4M 22
+#define PL_16M 24
+#define PL_64M 26
+#define PL_256M 28
+
+/*
+ * PageGrain bits
+ */
+#define PG_RIE (_ULCAST_(1) << 31)
+#define PG_XIE (_ULCAST_(1) << 30)
+#define PG_ELPA (_ULCAST_(1) << 29)
+#define PG_ESP (_ULCAST_(1) << 28)
+#define PG_IEC (_ULCAST_(1) << 27)
+
+/* MIPS32/64 EntryHI bit definitions */
+#define MIPS_ENTRYHI_EHINV (_ULCAST_(1) << 10)
+#define MIPS_ENTRYHI_ASIDX (_ULCAST_(0x3) << 8)
+#define MIPS_ENTRYHI_ASID (_ULCAST_(0xff) << 0)
+
+/*
+ * R4x00 interrupt enable / cause bits
+ */
+#define IE_SW0 (_ULCAST_(1) << 8)
+#define IE_SW1 (_ULCAST_(1) << 9)
+#define IE_IRQ0 (_ULCAST_(1) << 10)
+#define IE_IRQ1 (_ULCAST_(1) << 11)
+#define IE_IRQ2 (_ULCAST_(1) << 12)
+#define IE_IRQ3 (_ULCAST_(1) << 13)
+#define IE_IRQ4 (_ULCAST_(1) << 14)
+#define IE_IRQ5 (_ULCAST_(1) << 15)
+
+/*
+ * R4x00 interrupt cause bits
+ */
+#define C_SW0 (_ULCAST_(1) << 8)
+#define C_SW1 (_ULCAST_(1) << 9)
+#define C_IRQ0 (_ULCAST_(1) << 10)
+#define C_IRQ1 (_ULCAST_(1) << 11)
+#define C_IRQ2 (_ULCAST_(1) << 12)
+#define C_IRQ3 (_ULCAST_(1) << 13)
+#define C_IRQ4 (_ULCAST_(1) << 14)
+#define C_IRQ5 (_ULCAST_(1) << 15)
+
+/*
+ * Bitfields in the R4xx0 cp0 status register
+ */
+#define ST0_IE 0x00000001
+#define ST0_EXL 0x00000002
+#define ST0_ERL 0x00000004
+#define ST0_KSU 0x00000018
+# define KSU_USER 0x00000010
+# define KSU_SUPERVISOR 0x00000008
+# define KSU_KERNEL 0x00000000
+#define ST0_UX 0x00000020
+#define ST0_SX 0x00000040
+#define ST0_KX 0x00000080
+#define ST0_DE 0x00010000
+#define ST0_CE 0x00020000
+
+/*
+ * Setting c0_status.co enables Hit_Writeback and Hit_Writeback_Invalidate
+ * cacheops in userspace. This bit exists only on RM7000 and RM9000
+ * processors.
+ */
+#define ST0_CO 0x08000000
+
+/*
+ * Bitfields in the R[23]000 cp0 status register.
+ */
+#define ST0_IEC 0x00000001
+#define ST0_KUC 0x00000002
+#define ST0_IEP 0x00000004
+#define ST0_KUP 0x00000008
+#define ST0_IEO 0x00000010
+#define ST0_KUO 0x00000020
+/* bits 6 & 7 are reserved on R[23]000 */
+#define ST0_ISC 0x00010000
+#define ST0_SWC 0x00020000
+#define ST0_CM 0x00080000
+
+/*
+ * Bits specific to the R4640/R4650
+ */
+#define ST0_UM (_ULCAST_(1) << 4)
+#define ST0_IL (_ULCAST_(1) << 23)
+#define ST0_DL (_ULCAST_(1) << 24)
+
+/*
+ * Enable the MIPS MDMX and DSP ASEs
+ */
+#define ST0_MX 0x01000000
+
+/*
+ * Status register bits available in all MIPS CPUs.
+ */
+#define ST0_IM 0x0000ff00
+#define STATUSB_IP0 8
+#define STATUSF_IP0 (_ULCAST_(1) << 8)
+#define STATUSB_IP1 9
+#define STATUSF_IP1 (_ULCAST_(1) << 9)
+#define STATUSB_IP2 10
+#define STATUSF_IP2 (_ULCAST_(1) << 10)
+#define STATUSB_IP3 11
+#define STATUSF_IP3 (_ULCAST_(1) << 11)
+#define STATUSB_IP4 12
+#define STATUSF_IP4 (_ULCAST_(1) << 12)
+#define STATUSB_IP5 13
+#define STATUSF_IP5 (_ULCAST_(1) << 13)
+#define STATUSB_IP6 14
+#define STATUSF_IP6 (_ULCAST_(1) << 14)
+#define STATUSB_IP7 15
+#define STATUSF_IP7 (_ULCAST_(1) << 15)
+#define STATUSB_IP8 0
+#define STATUSF_IP8 (_ULCAST_(1) << 0)
+#define STATUSB_IP9 1
+#define STATUSF_IP9 (_ULCAST_(1) << 1)
+#define STATUSB_IP10 2
+#define STATUSF_IP10 (_ULCAST_(1) << 2)
+#define STATUSB_IP11 3
+#define STATUSF_IP11 (_ULCAST_(1) << 3)
+#define STATUSB_IP12 4
+#define STATUSF_IP12 (_ULCAST_(1) << 4)
+#define STATUSB_IP13 5
+#define STATUSF_IP13 (_ULCAST_(1) << 5)
+#define STATUSB_IP14 6
+#define STATUSF_IP14 (_ULCAST_(1) << 6)
+#define STATUSB_IP15 7
+#define STATUSF_IP15 (_ULCAST_(1) << 7)
+#define ST0_CH 0x00040000
+#define ST0_NMI 0x00080000
+#define ST0_SR 0x00100000
+#define ST0_TS 0x00200000
+#define ST0_BEV 0x00400000
+#define ST0_RE 0x02000000
+#define ST0_FR 0x04000000
+#define ST0_CU 0xf0000000
+#define ST0_CU0 0x10000000
+#define ST0_CU1 0x20000000
+#define ST0_CU2 0x40000000
+#define ST0_CU3 0x80000000
+#define ST0_XX 0x80000000 /* MIPS IV naming */
+
+/* in-kernel enabled CUs */
+#ifdef CONFIG_CPU_LOONGSON64
+#define ST0_KERNEL_CUMASK (ST0_CU0 | ST0_CU2)
+#else
+#define ST0_KERNEL_CUMASK ST0_CU0
+#endif
+
+/*
+ * Bitfields and bit numbers in the coprocessor 0 IntCtl register. (MIPSR2)
+ */
+#define INTCTLB_IPFDC 23
+#define INTCTLF_IPFDC (_ULCAST_(7) << INTCTLB_IPFDC)
+#define INTCTLB_IPPCI 26
+#define INTCTLF_IPPCI (_ULCAST_(7) << INTCTLB_IPPCI)
+#define INTCTLB_IPTI 29
+#define INTCTLF_IPTI (_ULCAST_(7) << INTCTLB_IPTI)
+
+/*
+ * Bitfields and bit numbers in the coprocessor 0 cause register.
+ *
+ * Refer to your MIPS R4xx0 manual, chapter 5 for explanation.
+ */
+#define CAUSEB_EXCCODE 2
+#define CAUSEF_EXCCODE (_ULCAST_(31) << 2)
+#define CAUSEB_IP 8
+#define CAUSEF_IP (_ULCAST_(255) << 8)
+#define CAUSEB_IP0 8
+#define CAUSEF_IP0 (_ULCAST_(1) << 8)
+#define CAUSEB_IP1 9
+#define CAUSEF_IP1 (_ULCAST_(1) << 9)
+#define CAUSEB_IP2 10
+#define CAUSEF_IP2 (_ULCAST_(1) << 10)
+#define CAUSEB_IP3 11
+#define CAUSEF_IP3 (_ULCAST_(1) << 11)
+#define CAUSEB_IP4 12
+#define CAUSEF_IP4 (_ULCAST_(1) << 12)
+#define CAUSEB_IP5 13
+#define CAUSEF_IP5 (_ULCAST_(1) << 13)
+#define CAUSEB_IP6 14
+#define CAUSEF_IP6 (_ULCAST_(1) << 14)
+#define CAUSEB_IP7 15
+#define CAUSEF_IP7 (_ULCAST_(1) << 15)
+#define CAUSEB_FDCI 21
+#define CAUSEF_FDCI (_ULCAST_(1) << 21)
+#define CAUSEB_WP 22
+#define CAUSEF_WP (_ULCAST_(1) << 22)
+#define CAUSEB_IV 23
+#define CAUSEF_IV (_ULCAST_(1) << 23)
+#define CAUSEB_PCI 26
+#define CAUSEF_PCI (_ULCAST_(1) << 26)
+#define CAUSEB_DC 27
+#define CAUSEF_DC (_ULCAST_(1) << 27)
+#define CAUSEB_CE 28
+#define CAUSEF_CE (_ULCAST_(3) << 28)
+#define CAUSEB_TI 30
+#define CAUSEF_TI (_ULCAST_(1) << 30)
+#define CAUSEB_BD 31
+#define CAUSEF_BD (_ULCAST_(1) << 31)
+
+/*
+ * Cause.ExcCode trap codes.
+ */
+#define EXCCODE_INT 0 /* Interrupt pending */
+#define EXCCODE_MOD 1 /* TLB modified fault */
+#define EXCCODE_TLBL 2 /* TLB miss on load or ifetch */
+#define EXCCODE_TLBS 3 /* TLB miss on a store */
+#define EXCCODE_ADEL 4 /* Address error on a load or ifetch */
+#define EXCCODE_ADES 5 /* Address error on a store */
+#define EXCCODE_IBE 6 /* Bus error on an ifetch */
+#define EXCCODE_DBE 7 /* Bus error on a load or store */
+#define EXCCODE_SYS 8 /* System call */
+#define EXCCODE_BP 9 /* Breakpoint */
+#define EXCCODE_RI 10 /* Reserved instruction exception */
+#define EXCCODE_CPU 11 /* Coprocessor unusable */
+#define EXCCODE_OV 12 /* Arithmetic overflow */
+#define EXCCODE_TR 13 /* Trap instruction */
+#define EXCCODE_MSAFPE 14 /* MSA floating point exception */
+#define EXCCODE_FPE 15 /* Floating point exception */
+#define EXCCODE_TLBRI 19 /* TLB Read-Inhibit exception */
+#define EXCCODE_TLBXI 20 /* TLB Execution-Inhibit exception */
+#define EXCCODE_MSADIS 21 /* MSA disabled exception */
+#define EXCCODE_MDMX 22 /* MDMX unusable exception */
+#define EXCCODE_WATCH 23 /* Watch address reference */
+#define EXCCODE_MCHECK 24 /* Machine check */
+#define EXCCODE_THREAD 25 /* Thread exceptions (MT) */
+#define EXCCODE_DSPDIS 26 /* DSP disabled exception */
+#define EXCCODE_GE 27 /* Virtualized guest exception (VZ) */
+#define EXCCODE_CACHEERR 30 /* Parity/ECC occured on a core */
+
+/* Implementation specific trap codes used by MIPS cores */
+#define MIPS_EXCCODE_TLBPAR 16 /* TLB parity error exception */
+
+/* Implementation specific trap codes used by Loongson cores */
+#define LOONGSON_EXCCODE_GSEXC 16 /* Loongson-specific exception */
+
+/*
+ * Bits in the coprocessor 0 config register.
+ */
+/* Generic bits. */
+#define CONF_CM_CACHABLE_NO_WA 0
+#define CONF_CM_CACHABLE_WA 1
+#define CONF_CM_UNCACHED 2
+#define CONF_CM_CACHABLE_NONCOHERENT 3
+#define CONF_CM_CACHABLE_CE 4
+#define CONF_CM_CACHABLE_COW 5
+#define CONF_CM_CACHABLE_CUW 6
+#define CONF_CM_CACHABLE_ACCELERATED 7
+#define CONF_CM_CMASK 7
+#define CONF_BE (_ULCAST_(1) << 15)
+
+/* Bits common to various processors. */
+#define CONF_CU (_ULCAST_(1) << 3)
+#define CONF_DB (_ULCAST_(1) << 4)
+#define CONF_IB (_ULCAST_(1) << 5)
+#define CONF_DC (_ULCAST_(7) << 6)
+#define CONF_IC (_ULCAST_(7) << 9)
+#define CONF_EB (_ULCAST_(1) << 13)
+#define CONF_EM (_ULCAST_(1) << 14)
+#define CONF_SM (_ULCAST_(1) << 16)
+#define CONF_SC (_ULCAST_(1) << 17)
+#define CONF_EW (_ULCAST_(3) << 18)
+#define CONF_EP (_ULCAST_(15)<< 24)
+#define CONF_EC (_ULCAST_(7) << 28)
+#define CONF_CM (_ULCAST_(1) << 31)
+
+/* Bits specific to the R4xx0. */
+#define R4K_CONF_SW (_ULCAST_(1) << 20)
+#define R4K_CONF_SS (_ULCAST_(1) << 21)
+#define R4K_CONF_SB (_ULCAST_(3) << 22)
+
+/* Bits specific to the R5000. */
+#define R5K_CONF_SE (_ULCAST_(1) << 12)
+#define R5K_CONF_SS (_ULCAST_(3) << 20)
+
+/* Bits specific to the RM7000. */
+#define RM7K_CONF_SE (_ULCAST_(1) << 3)
+#define RM7K_CONF_TE (_ULCAST_(1) << 12)
+#define RM7K_CONF_CLK (_ULCAST_(1) << 16)
+#define RM7K_CONF_TC (_ULCAST_(1) << 17)
+#define RM7K_CONF_SI (_ULCAST_(3) << 20)
+#define RM7K_CONF_SC (_ULCAST_(1) << 31)
+
+/* Bits specific to the R10000. */
+#define R10K_CONF_DN (_ULCAST_(3) << 3)
+#define R10K_CONF_CT (_ULCAST_(1) << 5)
+#define R10K_CONF_PE (_ULCAST_(1) << 6)
+#define R10K_CONF_PM (_ULCAST_(3) << 7)
+#define R10K_CONF_EC (_ULCAST_(15)<< 9)
+#define R10K_CONF_SB (_ULCAST_(1) << 13)
+#define R10K_CONF_SK (_ULCAST_(1) << 14)
+#define R10K_CONF_SS (_ULCAST_(7) << 16)
+#define R10K_CONF_SC (_ULCAST_(7) << 19)
+#define R10K_CONF_DC (_ULCAST_(7) << 26)
+#define R10K_CONF_IC (_ULCAST_(7) << 29)
+
+/* Bits specific to the VR41xx. */
+#define VR41_CONF_CS (_ULCAST_(1) << 12)
+#define VR41_CONF_P4K (_ULCAST_(1) << 13)
+#define VR41_CONF_BP (_ULCAST_(1) << 16)
+#define VR41_CONF_M16 (_ULCAST_(1) << 20)
+#define VR41_CONF_AD (_ULCAST_(1) << 23)
+
+/* Bits specific to the R30xx. */
+#define R30XX_CONF_FDM (_ULCAST_(1) << 19)
+#define R30XX_CONF_REV (_ULCAST_(1) << 22)
+#define R30XX_CONF_AC (_ULCAST_(1) << 23)
+#define R30XX_CONF_RF (_ULCAST_(1) << 24)
+#define R30XX_CONF_HALT (_ULCAST_(1) << 25)
+#define R30XX_CONF_FPINT (_ULCAST_(7) << 26)
+#define R30XX_CONF_DBR (_ULCAST_(1) << 29)
+#define R30XX_CONF_SB (_ULCAST_(1) << 30)
+#define R30XX_CONF_LOCK (_ULCAST_(1) << 31)
+
+/* Bits specific to the TX49. */
+#define TX49_CONF_DC (_ULCAST_(1) << 16)
+#define TX49_CONF_IC (_ULCAST_(1) << 17) /* conflict with CONF_SC */
+#define TX49_CONF_HALT (_ULCAST_(1) << 18)
+#define TX49_CONF_CWFON (_ULCAST_(1) << 27)
+
+/* Bits specific to the MIPS32/64 PRA. */
+#define MIPS_CONF_VI (_ULCAST_(1) << 3)
+#define MIPS_CONF_MT (_ULCAST_(7) << 7)
+#define MIPS_CONF_MT_TLB (_ULCAST_(1) << 7)
+#define MIPS_CONF_MT_FTLB (_ULCAST_(4) << 7)
+#define MIPS_CONF_AR (_ULCAST_(7) << 10)
+#define MIPS_CONF_AT (_ULCAST_(3) << 13)
+#define MIPS_CONF_BE (_ULCAST_(1) << 15)
+#define MIPS_CONF_BM (_ULCAST_(1) << 16)
+#define MIPS_CONF_MM (_ULCAST_(3) << 17)
+#define MIPS_CONF_MM_SYSAD (_ULCAST_(1) << 17)
+#define MIPS_CONF_MM_FULL (_ULCAST_(2) << 17)
+#define MIPS_CONF_SB (_ULCAST_(1) << 21)
+#define MIPS_CONF_UDI (_ULCAST_(1) << 22)
+#define MIPS_CONF_DSP (_ULCAST_(1) << 23)
+#define MIPS_CONF_ISP (_ULCAST_(1) << 24)
+#define MIPS_CONF_KU (_ULCAST_(3) << 25)
+#define MIPS_CONF_K23 (_ULCAST_(3) << 28)
+#define MIPS_CONF_M (_ULCAST_(1) << 31)
+
+/*
+ * Bits in the MIPS32/64 PRA coprocessor 0 config registers 1 and above.
+ */
+#define MIPS_CONF1_FP (_ULCAST_(1) << 0)
+#define MIPS_CONF1_EP (_ULCAST_(1) << 1)
+#define MIPS_CONF1_CA (_ULCAST_(1) << 2)
+#define MIPS_CONF1_WR (_ULCAST_(1) << 3)
+#define MIPS_CONF1_PC (_ULCAST_(1) << 4)
+#define MIPS_CONF1_MD (_ULCAST_(1) << 5)
+#define MIPS_CONF1_C2 (_ULCAST_(1) << 6)
+#define MIPS_CONF1_DA_SHF 7
+#define MIPS_CONF1_DA_SZ 3
+#define MIPS_CONF1_DA (_ULCAST_(7) << 7)
+#define MIPS_CONF1_DL_SHF 10
+#define MIPS_CONF1_DL_SZ 3
+#define MIPS_CONF1_DL (_ULCAST_(7) << 10)
+#define MIPS_CONF1_DS_SHF 13
+#define MIPS_CONF1_DS_SZ 3
+#define MIPS_CONF1_DS (_ULCAST_(7) << 13)
+#define MIPS_CONF1_IA_SHF 16
+#define MIPS_CONF1_IA_SZ 3
+#define MIPS_CONF1_IA (_ULCAST_(7) << 16)
+#define MIPS_CONF1_IL_SHF 19
+#define MIPS_CONF1_IL_SZ 3
+#define MIPS_CONF1_IL (_ULCAST_(7) << 19)
+#define MIPS_CONF1_IS_SHF 22
+#define MIPS_CONF1_IS_SZ 3
+#define MIPS_CONF1_IS (_ULCAST_(7) << 22)
+#define MIPS_CONF1_TLBS_SHIFT (25)
+#define MIPS_CONF1_TLBS_SIZE (6)
+#define MIPS_CONF1_TLBS (_ULCAST_(63) << MIPS_CONF1_TLBS_SHIFT)
+
+#define MIPS_CONF2_SA (_ULCAST_(15)<< 0)
+#define MIPS_CONF2_SL (_ULCAST_(15)<< 4)
+#define MIPS_CONF2_SS (_ULCAST_(15)<< 8)
+#define MIPS_CONF2_SU (_ULCAST_(15)<< 12)
+#define MIPS_CONF2_TA (_ULCAST_(15)<< 16)
+#define MIPS_CONF2_TL (_ULCAST_(15)<< 20)
+#define MIPS_CONF2_TS (_ULCAST_(15)<< 24)
+#define MIPS_CONF2_TU (_ULCAST_(7) << 28)
+
+#define MIPS_CONF3_TL (_ULCAST_(1) << 0)
+#define MIPS_CONF3_SM (_ULCAST_(1) << 1)
+#define MIPS_CONF3_MT (_ULCAST_(1) << 2)
+#define MIPS_CONF3_CDMM (_ULCAST_(1) << 3)
+#define MIPS_CONF3_SP (_ULCAST_(1) << 4)
+#define MIPS_CONF3_VINT (_ULCAST_(1) << 5)
+#define MIPS_CONF3_VEIC (_ULCAST_(1) << 6)
+#define MIPS_CONF3_LPA (_ULCAST_(1) << 7)
+#define MIPS_CONF3_ITL (_ULCAST_(1) << 8)
+#define MIPS_CONF3_CTXTC (_ULCAST_(1) << 9)
+#define MIPS_CONF3_DSP (_ULCAST_(1) << 10)
+#define MIPS_CONF3_DSP2P (_ULCAST_(1) << 11)
+#define MIPS_CONF3_RXI (_ULCAST_(1) << 12)
+#define MIPS_CONF3_ULRI (_ULCAST_(1) << 13)
+#define MIPS_CONF3_ISA (_ULCAST_(3) << 14)
+#define MIPS_CONF3_ISA_OE (_ULCAST_(1) << 16)
+#define MIPS_CONF3_MCU (_ULCAST_(1) << 17)
+#define MIPS_CONF3_MMAR (_ULCAST_(7) << 18)
+#define MIPS_CONF3_IPLW (_ULCAST_(3) << 21)
+#define MIPS_CONF3_VZ (_ULCAST_(1) << 23)
+#define MIPS_CONF3_PW (_ULCAST_(1) << 24)
+#define MIPS_CONF3_SC (_ULCAST_(1) << 25)
+#define MIPS_CONF3_BI (_ULCAST_(1) << 26)
+#define MIPS_CONF3_BP (_ULCAST_(1) << 27)
+#define MIPS_CONF3_MSA (_ULCAST_(1) << 28)
+#define MIPS_CONF3_CMGCR (_ULCAST_(1) << 29)
+#define MIPS_CONF3_BPG (_ULCAST_(1) << 30)
+
+#define MIPS_CONF4_MMUSIZEEXT_SHIFT (0)
+#define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0)
+#define MIPS_CONF4_FTLBSETS_SHIFT (0)
+#define MIPS_CONF4_FTLBSETS (_ULCAST_(15) << MIPS_CONF4_FTLBSETS_SHIFT)
+#define MIPS_CONF4_FTLBWAYS_SHIFT (4)
+#define MIPS_CONF4_FTLBWAYS (_ULCAST_(15) << MIPS_CONF4_FTLBWAYS_SHIFT)
+#define MIPS_CONF4_FTLBPAGESIZE_SHIFT (8)
+/* bits 10:8 in FTLB-only configurations */
+#define MIPS_CONF4_FTLBPAGESIZE (_ULCAST_(7) << MIPS_CONF4_FTLBPAGESIZE_SHIFT)
+/* bits 12:8 in VTLB-FTLB only configurations */
+#define MIPS_CONF4_VFTLBPAGESIZE (_ULCAST_(31) << MIPS_CONF4_FTLBPAGESIZE_SHIFT)
+#define MIPS_CONF4_MMUEXTDEF (_ULCAST_(3) << 14)
+#define MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT (_ULCAST_(1) << 14)
+#define MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT (_ULCAST_(2) << 14)
+#define MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT (_ULCAST_(3) << 14)
+#define MIPS_CONF4_KSCREXIST_SHIFT (16)
+#define MIPS_CONF4_KSCREXIST (_ULCAST_(255) << MIPS_CONF4_KSCREXIST_SHIFT)
+#define MIPS_CONF4_VTLBSIZEEXT_SHIFT (24)
+#define MIPS_CONF4_VTLBSIZEEXT (_ULCAST_(15) << MIPS_CONF4_VTLBSIZEEXT_SHIFT)
+#define MIPS_CONF4_AE (_ULCAST_(1) << 28)
+#define MIPS_CONF4_IE (_ULCAST_(3) << 29)
+#define MIPS_CONF4_TLBINV (_ULCAST_(2) << 29)
+
+#define MIPS_CONF5_NF (_ULCAST_(1) << 0)
+#define MIPS_CONF5_UFR (_ULCAST_(1) << 2)
+#define MIPS_CONF5_MRP (_ULCAST_(1) << 3)
+#define MIPS_CONF5_LLB (_ULCAST_(1) << 4)
+#define MIPS_CONF5_MVH (_ULCAST_(1) << 5)
+#define MIPS_CONF5_VP (_ULCAST_(1) << 7)
+#define MIPS_CONF5_SBRI (_ULCAST_(1) << 6)
+#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
+#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
+#define MIPS_CONF5_CA2 (_ULCAST_(1) << 14)
+#define MIPS_CONF5_MI (_ULCAST_(1) << 17)
+#define MIPS_CONF5_CRCP (_ULCAST_(1) << 18)
+#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
+#define MIPS_CONF5_EVA (_ULCAST_(1) << 28)
+#define MIPS_CONF5_CV (_ULCAST_(1) << 29)
+#define MIPS_CONF5_K (_ULCAST_(1) << 30)
+
+/* Config6 feature bits for proAptiv/P5600 */
+
+/* Jump register cache prediction disable */
+#define MTI_CONF6_JRCD (_ULCAST_(1) << 0)
+/* MIPSr6 extensions enable */
+#define MTI_CONF6_R6 (_ULCAST_(1) << 2)
+/* IFU Performance Control */
+#define MTI_CONF6_IFUPERFCTL (_ULCAST_(3) << 10)
+#define MTI_CONF6_SYND (_ULCAST_(1) << 13)
+/* Sleep state performance counter disable */
+#define MTI_CONF6_SPCD (_ULCAST_(1) << 14)
+/* proAptiv FTLB on/off bit */
+#define MTI_CONF6_FTLBEN (_ULCAST_(1) << 15)
+/* Disable load/store bonding */
+#define MTI_CONF6_DLSB (_ULCAST_(1) << 21)
+/* FTLB probability bits */
+#define MTI_CONF6_FTLBP_SHIFT (16)
+
+/* Config6 feature bits for Loongson-3 */
+
+/* Loongson-3 internal timer bit */
+#define LOONGSON_CONF6_INTIMER (_ULCAST_(1) << 6)
+/* Loongson-3 external timer bit */
+#define LOONGSON_CONF6_EXTIMER (_ULCAST_(1) << 7)
+/* Loongson-3 SFB on/off bit, STFill in manual */
+#define LOONGSON_CONF6_SFBEN (_ULCAST_(1) << 8)
+/* Loongson-3's LL on exclusive cacheline */
+#define LOONGSON_CONF6_LLEXC (_ULCAST_(1) << 16)
+/* Loongson-3's SC has a random delay */
+#define LOONGSON_CONF6_SCRAND (_ULCAST_(1) << 17)
+/* Loongson-3 FTLB on/off bit, VTLBOnly in manual */
+#define LOONGSON_CONF6_FTLBDIS (_ULCAST_(1) << 22)
+
+#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
+
+#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
+
+#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
+#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
+
+/* Ingenic HPTLB off bits */
+#define XBURST_PAGECTRL_HPTLB_DIS 0xa9000000
+
+/* Ingenic Config7 bits */
+#define MIPS_CONF7_BTB_LOOP_EN (_ULCAST_(1) << 4)
+
+/* Config7 Bits specific to MIPS Technologies. */
+
+/* Performance counters implemented Per TC */
+#define MTI_CONF7_PTC (_ULCAST_(1) << 19)
+
+/* WatchLo* register definitions */
+#define MIPS_WATCHLO_IRW (_ULCAST_(0x7) << 0)
+
+/* WatchHi* register definitions */
+#define MIPS_WATCHHI_M (_ULCAST_(1) << 31)
+#define MIPS_WATCHHI_G (_ULCAST_(1) << 30)
+#define MIPS_WATCHHI_WM (_ULCAST_(0x3) << 28)
+#define MIPS_WATCHHI_WM_R_RVA (_ULCAST_(0) << 28)
+#define MIPS_WATCHHI_WM_R_GPA (_ULCAST_(1) << 28)
+#define MIPS_WATCHHI_WM_G_GVA (_ULCAST_(2) << 28)
+#define MIPS_WATCHHI_EAS (_ULCAST_(0x3) << 24)
+#define MIPS_WATCHHI_ASID (_ULCAST_(0xff) << 16)
+#define MIPS_WATCHHI_MASK (_ULCAST_(0x1ff) << 3)
+#define MIPS_WATCHHI_I (_ULCAST_(1) << 2)
+#define MIPS_WATCHHI_R (_ULCAST_(1) << 1)
+#define MIPS_WATCHHI_W (_ULCAST_(1) << 0)
+#define MIPS_WATCHHI_IRW (_ULCAST_(0x7) << 0)
+
+/* PerfCnt control register definitions */
+#define MIPS_PERFCTRL_EXL (_ULCAST_(1) << 0)
+#define MIPS_PERFCTRL_K (_ULCAST_(1) << 1)
+#define MIPS_PERFCTRL_S (_ULCAST_(1) << 2)
+#define MIPS_PERFCTRL_U (_ULCAST_(1) << 3)
+#define MIPS_PERFCTRL_IE (_ULCAST_(1) << 4)
+#define MIPS_PERFCTRL_EVENT_S 5
+#define MIPS_PERFCTRL_EVENT (_ULCAST_(0x3ff) << MIPS_PERFCTRL_EVENT_S)
+#define MIPS_PERFCTRL_PCTD (_ULCAST_(1) << 15)
+#define MIPS_PERFCTRL_EC (_ULCAST_(0x3) << 23)
+#define MIPS_PERFCTRL_EC_R (_ULCAST_(0) << 23)
+#define MIPS_PERFCTRL_EC_RI (_ULCAST_(1) << 23)
+#define MIPS_PERFCTRL_EC_G (_ULCAST_(2) << 23)
+#define MIPS_PERFCTRL_EC_GRI (_ULCAST_(3) << 23)
+#define MIPS_PERFCTRL_W (_ULCAST_(1) << 30)
+#define MIPS_PERFCTRL_M (_ULCAST_(1) << 31)
+
+/* PerfCnt control register MT extensions used by MIPS cores */
+#define MIPS_PERFCTRL_VPEID_S 16
+#define MIPS_PERFCTRL_VPEID (_ULCAST_(0xf) << MIPS_PERFCTRL_VPEID_S)
+#define MIPS_PERFCTRL_TCID_S 22
+#define MIPS_PERFCTRL_TCID (_ULCAST_(0xff) << MIPS_PERFCTRL_TCID_S)
+#define MIPS_PERFCTRL_MT_EN (_ULCAST_(0x3) << 20)
+#define MIPS_PERFCTRL_MT_EN_ALL (_ULCAST_(0) << 20)
+#define MIPS_PERFCTRL_MT_EN_VPE (_ULCAST_(1) << 20)
+#define MIPS_PERFCTRL_MT_EN_TC (_ULCAST_(2) << 20)
+
+/* PerfCnt control register MT extensions used by BMIPS5000 */
+#define BRCM_PERFCTRL_TC (_ULCAST_(1) << 30)
+
+/* PerfCnt control register MT extensions used by Netlogic XLR */
+#define XLR_PERFCTRL_ALLTHREADS (_ULCAST_(1) << 13)
+
+/* MAAR bit definitions */
+#define MIPS_MAAR_VH (_U64CAST_(1) << 63)
+#define MIPS_MAAR_ADDR GENMASK_ULL(55, 12)
+#define MIPS_MAAR_ADDR_SHIFT 12
+#define MIPS_MAAR_S (_ULCAST_(1) << 1)
+#define MIPS_MAAR_VL (_ULCAST_(1) << 0)
+#ifdef CONFIG_XPA
+#define MIPS_MAAR_V (MIPS_MAAR_VH | MIPS_MAAR_VL)
+#else
+#define MIPS_MAAR_V MIPS_MAAR_VL
+#endif
+#define MIPS_MAARX_VH (_ULCAST_(1) << 31)
+#define MIPS_MAARX_ADDR 0xF
+#define MIPS_MAARX_ADDR_SHIFT 32
+
+/* MAARI bit definitions */
+#define MIPS_MAARI_INDEX (_ULCAST_(0x3f) << 0)
+
+/* EBase bit definitions */
+#define MIPS_EBASE_CPUNUM_SHIFT 0
+#define MIPS_EBASE_CPUNUM (_ULCAST_(0x3ff) << 0)
+#define MIPS_EBASE_WG_SHIFT 11
+#define MIPS_EBASE_WG (_ULCAST_(1) << 11)
+#define MIPS_EBASE_BASE_SHIFT 12
+#define MIPS_EBASE_BASE (~_ULCAST_((1 << MIPS_EBASE_BASE_SHIFT) - 1))
+
+/* CMGCRBase bit definitions */
+#define MIPS_CMGCRB_BASE 11
+#define MIPS_CMGCRF_BASE (~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))
+
+/* LLAddr bit definitions */
+#define MIPS_LLADDR_LLB_SHIFT 0
+#define MIPS_LLADDR_LLB (_ULCAST_(1) << MIPS_LLADDR_LLB_SHIFT)
+
+/*
+ * Bits in the MIPS32 Memory Segmentation registers.
+ */
+#define MIPS_SEGCFG_PA_SHIFT 9
+#define MIPS_SEGCFG_PA (_ULCAST_(127) << MIPS_SEGCFG_PA_SHIFT)
+#define MIPS_SEGCFG_AM_SHIFT 4
+#define MIPS_SEGCFG_AM (_ULCAST_(7) << MIPS_SEGCFG_AM_SHIFT)
+#define MIPS_SEGCFG_EU_SHIFT 3
+#define MIPS_SEGCFG_EU (_ULCAST_(1) << MIPS_SEGCFG_EU_SHIFT)
+#define MIPS_SEGCFG_C_SHIFT 0
+#define MIPS_SEGCFG_C (_ULCAST_(7) << MIPS_SEGCFG_C_SHIFT)
+
+#define MIPS_SEGCFG_UUSK _ULCAST_(7)
+#define MIPS_SEGCFG_USK _ULCAST_(5)
+#define MIPS_SEGCFG_MUSUK _ULCAST_(4)
+#define MIPS_SEGCFG_MUSK _ULCAST_(3)
+#define MIPS_SEGCFG_MSK _ULCAST_(2)
+#define MIPS_SEGCFG_MK _ULCAST_(1)
+#define MIPS_SEGCFG_UK _ULCAST_(0)
+
+#define MIPS_PWFIELD_GDI_SHIFT 24
+#define MIPS_PWFIELD_GDI_MASK 0x3f000000
+#define MIPS_PWFIELD_UDI_SHIFT 18
+#define MIPS_PWFIELD_UDI_MASK 0x00fc0000
+#define MIPS_PWFIELD_MDI_SHIFT 12
+#define MIPS_PWFIELD_MDI_MASK 0x0003f000
+#define MIPS_PWFIELD_PTI_SHIFT 6
+#define MIPS_PWFIELD_PTI_MASK 0x00000fc0
+#define MIPS_PWFIELD_PTEI_SHIFT 0
+#define MIPS_PWFIELD_PTEI_MASK 0x0000003f
+
+#define MIPS_PWSIZE_PS_SHIFT 30
+#define MIPS_PWSIZE_PS_MASK 0x40000000
+#define MIPS_PWSIZE_GDW_SHIFT 24
+#define MIPS_PWSIZE_GDW_MASK 0x3f000000
+#define MIPS_PWSIZE_UDW_SHIFT 18
+#define MIPS_PWSIZE_UDW_MASK 0x00fc0000
+#define MIPS_PWSIZE_MDW_SHIFT 12
+#define MIPS_PWSIZE_MDW_MASK 0x0003f000
+#define MIPS_PWSIZE_PTW_SHIFT 6
+#define MIPS_PWSIZE_PTW_MASK 0x00000fc0
+#define MIPS_PWSIZE_PTEW_SHIFT 0
+#define MIPS_PWSIZE_PTEW_MASK 0x0000003f
+
+#define MIPS_PWCTL_PWEN_SHIFT 31
+#define MIPS_PWCTL_PWEN_MASK 0x80000000
+#define MIPS_PWCTL_XK_SHIFT 28
+#define MIPS_PWCTL_XK_MASK 0x10000000
+#define MIPS_PWCTL_XS_SHIFT 27
+#define MIPS_PWCTL_XS_MASK 0x08000000
+#define MIPS_PWCTL_XU_SHIFT 26
+#define MIPS_PWCTL_XU_MASK 0x04000000
+#define MIPS_PWCTL_DPH_SHIFT 7
+#define MIPS_PWCTL_DPH_MASK 0x00000080
+#define MIPS_PWCTL_HUGEPG_SHIFT 6
+#define MIPS_PWCTL_HUGEPG_MASK 0x00000060
+#define MIPS_PWCTL_PSN_SHIFT 0
+#define MIPS_PWCTL_PSN_MASK 0x0000003f
+
+/* GuestCtl0 fields */
+#define MIPS_GCTL0_GM_SHIFT 31
+#define MIPS_GCTL0_GM (_ULCAST_(1) << MIPS_GCTL0_GM_SHIFT)
+#define MIPS_GCTL0_RI_SHIFT 30
+#define MIPS_GCTL0_RI (_ULCAST_(1) << MIPS_GCTL0_RI_SHIFT)
+#define MIPS_GCTL0_MC_SHIFT 29
+#define MIPS_GCTL0_MC (_ULCAST_(1) << MIPS_GCTL0_MC_SHIFT)
+#define MIPS_GCTL0_CP0_SHIFT 28
+#define MIPS_GCTL0_CP0 (_ULCAST_(1) << MIPS_GCTL0_CP0_SHIFT)
+#define MIPS_GCTL0_AT_SHIFT 26
+#define MIPS_GCTL0_AT (_ULCAST_(0x3) << MIPS_GCTL0_AT_SHIFT)
+#define MIPS_GCTL0_GT_SHIFT 25
+#define MIPS_GCTL0_GT (_ULCAST_(1) << MIPS_GCTL0_GT_SHIFT)
+#define MIPS_GCTL0_CG_SHIFT 24
+#define MIPS_GCTL0_CG (_ULCAST_(1) << MIPS_GCTL0_CG_SHIFT)
+#define MIPS_GCTL0_CF_SHIFT 23
+#define MIPS_GCTL0_CF (_ULCAST_(1) << MIPS_GCTL0_CF_SHIFT)
+#define MIPS_GCTL0_G1_SHIFT 22
+#define MIPS_GCTL0_G1 (_ULCAST_(1) << MIPS_GCTL0_G1_SHIFT)
+#define MIPS_GCTL0_G0E_SHIFT 19
+#define MIPS_GCTL0_G0E (_ULCAST_(1) << MIPS_GCTL0_G0E_SHIFT)
+#define MIPS_GCTL0_PT_SHIFT 18
+#define MIPS_GCTL0_PT (_ULCAST_(1) << MIPS_GCTL0_PT_SHIFT)
+#define MIPS_GCTL0_RAD_SHIFT 9
+#define MIPS_GCTL0_RAD (_ULCAST_(1) << MIPS_GCTL0_RAD_SHIFT)
+#define MIPS_GCTL0_DRG_SHIFT 8
+#define MIPS_GCTL0_DRG (_ULCAST_(1) << MIPS_GCTL0_DRG_SHIFT)
+#define MIPS_GCTL0_G2_SHIFT 7
+#define MIPS_GCTL0_G2 (_ULCAST_(1) << MIPS_GCTL0_G2_SHIFT)
+#define MIPS_GCTL0_GEXC_SHIFT 2
+#define MIPS_GCTL0_GEXC (_ULCAST_(0x1f) << MIPS_GCTL0_GEXC_SHIFT)
+#define MIPS_GCTL0_SFC2_SHIFT 1
+#define MIPS_GCTL0_SFC2 (_ULCAST_(1) << MIPS_GCTL0_SFC2_SHIFT)
+#define MIPS_GCTL0_SFC1_SHIFT 0
+#define MIPS_GCTL0_SFC1 (_ULCAST_(1) << MIPS_GCTL0_SFC1_SHIFT)
+
+/* GuestCtl0.AT Guest address translation control */
+#define MIPS_GCTL0_AT_ROOT 1 /* Guest MMU under Root control */
+#define MIPS_GCTL0_AT_GUEST 3 /* Guest MMU under Guest control */
+
+/* GuestCtl0.GExcCode Hypervisor exception cause codes */
+#define MIPS_GCTL0_GEXC_GPSI 0 /* Guest Privileged Sensitive Instruction */
+#define MIPS_GCTL0_GEXC_GSFC 1 /* Guest Software Field Change */
+#define MIPS_GCTL0_GEXC_HC 2 /* Hypercall */
+#define MIPS_GCTL0_GEXC_GRR 3 /* Guest Reserved Instruction Redirect */
+#define MIPS_GCTL0_GEXC_GVA 8 /* Guest Virtual Address available */
+#define MIPS_GCTL0_GEXC_GHFC 9 /* Guest Hardware Field Change */
+#define MIPS_GCTL0_GEXC_GPA 10 /* Guest Physical Address available */
+
+/* GuestCtl0Ext fields */
+#define MIPS_GCTL0EXT_RPW_SHIFT 8
+#define MIPS_GCTL0EXT_RPW (_ULCAST_(0x3) << MIPS_GCTL0EXT_RPW_SHIFT)
+#define MIPS_GCTL0EXT_NCC_SHIFT 6
+#define MIPS_GCTL0EXT_NCC (_ULCAST_(0x3) << MIPS_GCTL0EXT_NCC_SHIFT)
+#define MIPS_GCTL0EXT_CGI_SHIFT 4
+#define MIPS_GCTL0EXT_CGI (_ULCAST_(1) << MIPS_GCTL0EXT_CGI_SHIFT)
+#define MIPS_GCTL0EXT_FCD_SHIFT 3
+#define MIPS_GCTL0EXT_FCD (_ULCAST_(1) << MIPS_GCTL0EXT_FCD_SHIFT)
+#define MIPS_GCTL0EXT_OG_SHIFT 2
+#define MIPS_GCTL0EXT_OG (_ULCAST_(1) << MIPS_GCTL0EXT_OG_SHIFT)
+#define MIPS_GCTL0EXT_BG_SHIFT 1
+#define MIPS_GCTL0EXT_BG (_ULCAST_(1) << MIPS_GCTL0EXT_BG_SHIFT)
+#define MIPS_GCTL0EXT_MG_SHIFT 0
+#define MIPS_GCTL0EXT_MG (_ULCAST_(1) << MIPS_GCTL0EXT_MG_SHIFT)
+
+/* GuestCtl0Ext.RPW Root page walk configuration */
+#define MIPS_GCTL0EXT_RPW_BOTH 0 /* Root PW for GPA->RPA and RVA->RPA */
+#define MIPS_GCTL0EXT_RPW_GPA 2 /* Root PW for GPA->RPA */
+#define MIPS_GCTL0EXT_RPW_RVA 3 /* Root PW for RVA->RPA */
+
+/* GuestCtl0Ext.NCC Nested cache coherency attributes */
+#define MIPS_GCTL0EXT_NCC_IND 0 /* Guest CCA independent of Root CCA */
+#define MIPS_GCTL0EXT_NCC_MOD 1 /* Guest CCA modified by Root CCA */
+
+/* GuestCtl1 fields */
+#define MIPS_GCTL1_ID_SHIFT 0
+#define MIPS_GCTL1_ID_WIDTH 8
+#define MIPS_GCTL1_ID (_ULCAST_(0xff) << MIPS_GCTL1_ID_SHIFT)
+#define MIPS_GCTL1_RID_SHIFT 16
+#define MIPS_GCTL1_RID_WIDTH 8
+#define MIPS_GCTL1_RID (_ULCAST_(0xff) << MIPS_GCTL1_RID_SHIFT)
+#define MIPS_GCTL1_EID_SHIFT 24
+#define MIPS_GCTL1_EID_WIDTH 8
+#define MIPS_GCTL1_EID (_ULCAST_(0xff) << MIPS_GCTL1_EID_SHIFT)
+
+/* GuestID reserved for root context */
+#define MIPS_GCTL1_ROOT_GUESTID 0
+
+/* CDMMBase register bit definitions */
+#define MIPS_CDMMBASE_SIZE_SHIFT 0
+#define MIPS_CDMMBASE_SIZE (_ULCAST_(511) << MIPS_CDMMBASE_SIZE_SHIFT)
+#define MIPS_CDMMBASE_CI (_ULCAST_(1) << 9)
+#define MIPS_CDMMBASE_EN (_ULCAST_(1) << 10)
+#define MIPS_CDMMBASE_ADDR_SHIFT 11
+#define MIPS_CDMMBASE_ADDR_START 15
+
+/* RDHWR register numbers */
+#define MIPS_HWR_CPUNUM 0 /* CPU number */
+#define MIPS_HWR_SYNCISTEP 1 /* SYNCI step size */
+#define MIPS_HWR_CC 2 /* Cycle counter */
+#define MIPS_HWR_CCRES 3 /* Cycle counter resolution */
+#define MIPS_HWR_ULR 29 /* UserLocal */
+#define MIPS_HWR_IMPL1 30 /* Implementation dependent */
+#define MIPS_HWR_IMPL2 31 /* Implementation dependent */
+
+/* Bits in HWREna register */
+#define MIPS_HWRENA_CPUNUM (_ULCAST_(1) << MIPS_HWR_CPUNUM)
+#define MIPS_HWRENA_SYNCISTEP (_ULCAST_(1) << MIPS_HWR_SYNCISTEP)
+#define MIPS_HWRENA_CC (_ULCAST_(1) << MIPS_HWR_CC)
+#define MIPS_HWRENA_CCRES (_ULCAST_(1) << MIPS_HWR_CCRES)
+#define MIPS_HWRENA_ULR (_ULCAST_(1) << MIPS_HWR_ULR)
+#define MIPS_HWRENA_IMPL1 (_ULCAST_(1) << MIPS_HWR_IMPL1)
+#define MIPS_HWRENA_IMPL2 (_ULCAST_(1) << MIPS_HWR_IMPL2)
+
+/*
+ * Bitfields in the TX39 family CP0 Configuration Register 3
+ */
+#define TX39_CONF_ICS_SHIFT 19
+#define TX39_CONF_ICS_MASK 0x00380000
+#define TX39_CONF_ICS_1KB 0x00000000
+#define TX39_CONF_ICS_2KB 0x00080000
+#define TX39_CONF_ICS_4KB 0x00100000
+#define TX39_CONF_ICS_8KB 0x00180000
+#define TX39_CONF_ICS_16KB 0x00200000
+
+#define TX39_CONF_DCS_SHIFT 16
+#define TX39_CONF_DCS_MASK 0x00070000
+#define TX39_CONF_DCS_1KB 0x00000000
+#define TX39_CONF_DCS_2KB 0x00010000
+#define TX39_CONF_DCS_4KB 0x00020000
+#define TX39_CONF_DCS_8KB 0x00030000
+#define TX39_CONF_DCS_16KB 0x00040000
+
+#define TX39_CONF_CWFON 0x00004000
+#define TX39_CONF_WBON 0x00002000
+#define TX39_CONF_RF_SHIFT 10
+#define TX39_CONF_RF_MASK 0x00000c00
+#define TX39_CONF_DOZE 0x00000200
+#define TX39_CONF_HALT 0x00000100
+#define TX39_CONF_LOCK 0x00000080
+#define TX39_CONF_ICE 0x00000020
+#define TX39_CONF_DCE 0x00000010
+#define TX39_CONF_IRSIZE_SHIFT 2
+#define TX39_CONF_IRSIZE_MASK 0x0000000c
+#define TX39_CONF_DRSIZE_SHIFT 0
+#define TX39_CONF_DRSIZE_MASK 0x00000003
+
+/*
+ * Interesting Bits in the R10K CP0 Branch Diagnostic Register
+ */
+/* Disable Branch Target Address Cache */
+#define R10K_DIAG_D_BTAC (_ULCAST_(1) << 27)
+/* Enable Branch Prediction Global History */
+#define R10K_DIAG_E_GHIST (_ULCAST_(1) << 26)
+/* Disable Branch Return Cache */
+#define R10K_DIAG_D_BRC (_ULCAST_(1) << 22)
+
+/* Flush BTB */
+#define LOONGSON_DIAG_BTB (_ULCAST_(1) << 1)
+/* Flush ITLB */
+#define LOONGSON_DIAG_ITLB (_ULCAST_(1) << 2)
+/* Flush DTLB */
+#define LOONGSON_DIAG_DTLB (_ULCAST_(1) << 3)
+/* Allow some CACHE instructions (CACHE0, 1, 3, 21 and 23) in user mode */
+#define LOONGSON_DIAG_UCAC (_ULCAST_(1) << 8)
+/* Flush VTLB */
+#define LOONGSON_DIAG_VTLB (_ULCAST_(1) << 12)
+/* Flush FTLB */
+#define LOONGSON_DIAG_FTLB (_ULCAST_(1) << 13)
+
+/*
+ * Diag1 (GSCause in Loongson-speak) fields
+ */
+/* Loongson-specific exception code (GSExcCode) */
+#define LOONGSON_DIAG1_EXCCODE_SHIFT 2
+#define LOONGSON_DIAG1_EXCCODE GENMASK(6, 2)
+
+/* CvmCtl register field definitions */
+#define CVMCTL_IPPCI_SHIFT 7
+#define CVMCTL_IPPCI (_U64CAST_(0x7) << CVMCTL_IPPCI_SHIFT)
+#define CVMCTL_IPTI_SHIFT 4
+#define CVMCTL_IPTI (_U64CAST_(0x7) << CVMCTL_IPTI_SHIFT)
+
+/* CvmMemCtl2 register field definitions */
+#define CVMMEMCTL2_INHIBITTS (_U64CAST_(1) << 17)
+
+/* CvmVMConfig register field definitions */
+#define CVMVMCONF_DGHT (_U64CAST_(1) << 60)
+#define CVMVMCONF_MMUSIZEM1_S 12
+#define CVMVMCONF_MMUSIZEM1 (_U64CAST_(0xff) << CVMVMCONF_MMUSIZEM1_S)
+#define CVMVMCONF_RMMUSIZEM1_S 0
+#define CVMVMCONF_RMMUSIZEM1 (_U64CAST_(0xff) << CVMVMCONF_RMMUSIZEM1_S)
+
+/*
+ * Coprocessor 1 (FPU) register names
+ */
+#define CP1_REVISION $0
+#define CP1_UFR $1
+#define CP1_UNFR $4
+#define CP1_FCCR $25
+#define CP1_FEXR $26
+#define CP1_FENR $28
+#define CP1_STATUS $31
+
+
+/*
+ * Bits in the MIPS32/64 coprocessor 1 (FPU) revision register.
+ */
+#define MIPS_FPIR_S (_ULCAST_(1) << 16)
+#define MIPS_FPIR_D (_ULCAST_(1) << 17)
+#define MIPS_FPIR_PS (_ULCAST_(1) << 18)
+#define MIPS_FPIR_3D (_ULCAST_(1) << 19)
+#define MIPS_FPIR_W (_ULCAST_(1) << 20)
+#define MIPS_FPIR_L (_ULCAST_(1) << 21)
+#define MIPS_FPIR_F64 (_ULCAST_(1) << 22)
+#define MIPS_FPIR_HAS2008 (_ULCAST_(1) << 23)
+#define MIPS_FPIR_UFRP (_ULCAST_(1) << 28)
+#define MIPS_FPIR_FREP (_ULCAST_(1) << 29)
+
+/*
+ * Bits in the MIPS32/64 coprocessor 1 (FPU) condition codes register.
+ */
+#define MIPS_FCCR_CONDX_S 0
+#define MIPS_FCCR_CONDX (_ULCAST_(255) << MIPS_FCCR_CONDX_S)
+#define MIPS_FCCR_COND0_S 0
+#define MIPS_FCCR_COND0 (_ULCAST_(1) << MIPS_FCCR_COND0_S)
+#define MIPS_FCCR_COND1_S 1
+#define MIPS_FCCR_COND1 (_ULCAST_(1) << MIPS_FCCR_COND1_S)
+#define MIPS_FCCR_COND2_S 2
+#define MIPS_FCCR_COND2 (_ULCAST_(1) << MIPS_FCCR_COND2_S)
+#define MIPS_FCCR_COND3_S 3
+#define MIPS_FCCR_COND3 (_ULCAST_(1) << MIPS_FCCR_COND3_S)
+#define MIPS_FCCR_COND4_S 4
+#define MIPS_FCCR_COND4 (_ULCAST_(1) << MIPS_FCCR_COND4_S)
+#define MIPS_FCCR_COND5_S 5
+#define MIPS_FCCR_COND5 (_ULCAST_(1) << MIPS_FCCR_COND5_S)
+#define MIPS_FCCR_COND6_S 6
+#define MIPS_FCCR_COND6 (_ULCAST_(1) << MIPS_FCCR_COND6_S)
+#define MIPS_FCCR_COND7_S 7
+#define MIPS_FCCR_COND7 (_ULCAST_(1) << MIPS_FCCR_COND7_S)
+
+/*
+ * Bits in the MIPS32/64 coprocessor 1 (FPU) enables register.
+ */
+#define MIPS_FENR_FS_S 2
+#define MIPS_FENR_FS (_ULCAST_(1) << MIPS_FENR_FS_S)
+
+/*
+ * FPU Status Register Values
+ */
+#define FPU_CSR_COND_S 23 /* $fcc0 */
+#define FPU_CSR_COND (_ULCAST_(1) << FPU_CSR_COND_S)
+
+#define FPU_CSR_FS_S 24 /* flush denormalised results to 0 */
+#define FPU_CSR_FS (_ULCAST_(1) << FPU_CSR_FS_S)
+
+#define FPU_CSR_CONDX_S 25 /* $fcc[7:1] */
+#define FPU_CSR_CONDX (_ULCAST_(127) << FPU_CSR_CONDX_S)
+#define FPU_CSR_COND1_S 25 /* $fcc1 */
+#define FPU_CSR_COND1 (_ULCAST_(1) << FPU_CSR_COND1_S)
+#define FPU_CSR_COND2_S 26 /* $fcc2 */
+#define FPU_CSR_COND2 (_ULCAST_(1) << FPU_CSR_COND2_S)
+#define FPU_CSR_COND3_S 27 /* $fcc3 */
+#define FPU_CSR_COND3 (_ULCAST_(1) << FPU_CSR_COND3_S)
+#define FPU_CSR_COND4_S 28 /* $fcc4 */
+#define FPU_CSR_COND4 (_ULCAST_(1) << FPU_CSR_COND4_S)
+#define FPU_CSR_COND5_S 29 /* $fcc5 */
+#define FPU_CSR_COND5 (_ULCAST_(1) << FPU_CSR_COND5_S)
+#define FPU_CSR_COND6_S 30 /* $fcc6 */
+#define FPU_CSR_COND6 (_ULCAST_(1) << FPU_CSR_COND6_S)
+#define FPU_CSR_COND7_S 31 /* $fcc7 */
+#define FPU_CSR_COND7 (_ULCAST_(1) << FPU_CSR_COND7_S)
+
+/*
+ * Bits 22:20 of the FPU Status Register will be read as 0,
+ * and should be written as zero.
+ * MAC2008 was removed in Release 5 so we still treat it as
+ * reserved.
+ */
+#define FPU_CSR_RSVD (_ULCAST_(7) << 20)
+
+#define FPU_CSR_MAC2008 (_ULCAST_(1) << 20)
+#define FPU_CSR_ABS2008 (_ULCAST_(1) << 19)
+#define FPU_CSR_NAN2008 (_ULCAST_(1) << 18)
+
+/*
+ * X the exception cause indicator
+ * E the exception enable
+ * S the sticky/flag bit
+*/
+#define FPU_CSR_ALL_X 0x0003f000
+#define FPU_CSR_UNI_X 0x00020000
+#define FPU_CSR_INV_X 0x00010000
+#define FPU_CSR_DIV_X 0x00008000
+#define FPU_CSR_OVF_X 0x00004000
+#define FPU_CSR_UDF_X 0x00002000
+#define FPU_CSR_INE_X 0x00001000
+
+#define FPU_CSR_ALL_E 0x00000f80
+#define FPU_CSR_INV_E 0x00000800
+#define FPU_CSR_DIV_E 0x00000400
+#define FPU_CSR_OVF_E 0x00000200
+#define FPU_CSR_UDF_E 0x00000100
+#define FPU_CSR_INE_E 0x00000080
+
+#define FPU_CSR_ALL_S 0x0000007c
+#define FPU_CSR_INV_S 0x00000040
+#define FPU_CSR_DIV_S 0x00000020
+#define FPU_CSR_OVF_S 0x00000010
+#define FPU_CSR_UDF_S 0x00000008
+#define FPU_CSR_INE_S 0x00000004
+
+/* Bits 0 and 1 of FPU Status Register specify the rounding mode */
+#define FPU_CSR_RM 0x00000003
+#define FPU_CSR_RN 0x0 /* nearest */
+#define FPU_CSR_RZ 0x1 /* towards zero */
+#define FPU_CSR_RU 0x2 /* towards +Infinity */
+#define FPU_CSR_RD 0x3 /* towards -Infinity */
+
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Macros for handling the ISA mode bit for MIPS16 and microMIPS.
+ */
+#if defined(CONFIG_SYS_SUPPORTS_MIPS16) || \
+ defined(CONFIG_SYS_SUPPORTS_MICROMIPS)
+#define get_isa16_mode(x) ((x) & 0x1)
+#define msk_isa16_mode(x) ((x) & ~0x1)
+#define set_isa16_mode(x) do { (x) |= 0x1; } while(0)
+#else
+#define get_isa16_mode(x) 0
+#define msk_isa16_mode(x) (x)
+#define set_isa16_mode(x) do { } while(0)
+#endif
+
+/*
+ * microMIPS instructions can be 16-bit or 32-bit in length. This
+ * returns a 1 if the instruction is 16-bit and a 0 if 32-bit.
+ */
+static inline int mm_insn_16bit(u16 insn)
+{
+ u16 opcode = (insn >> 10) & 0x7;
+
+ return (opcode >= 1 && opcode <= 3) ? 1 : 0;
+}
+
+/*
+ * Helper macros for generating raw instruction encodings in inline asm.
+ */
+#ifdef CONFIG_CPU_MICROMIPS
+#define _ASM_INSN16_IF_MM(_enc) \
+ ".insn\n\t" \
+ ".hword (" #_enc ")\n\t"
+#define _ASM_INSN32_IF_MM(_enc) \
+ ".insn\n\t" \
+ ".hword ((" #_enc ") >> 16)\n\t" \
+ ".hword ((" #_enc ") & 0xffff)\n\t"
+#else
+#define _ASM_INSN_IF_MIPS(_enc) \
+ ".insn\n\t" \
+ ".word (" #_enc ")\n\t"
+#endif
+
+#ifndef _ASM_INSN16_IF_MM
+#define _ASM_INSN16_IF_MM(_enc)
+#endif
+#ifndef _ASM_INSN32_IF_MM
+#define _ASM_INSN32_IF_MM(_enc)
+#endif
+#ifndef _ASM_INSN_IF_MIPS
+#define _ASM_INSN_IF_MIPS(_enc)
+#endif
+
+/*
+ * parse_r var, r - Helper assembler macro for parsing register names.
+ *
+ * This converts the register name in $n form provided in \r to the
+ * corresponding register number, which is assigned to the variable \var. It is
+ * needed to allow explicit encoding of instructions in inline assembly where
+ * registers are chosen by the compiler in $n form, allowing us to avoid using
+ * fixed register numbers.
+ *
+ * It also allows newer instructions (not implemented by the assembler) to be
+ * transparently implemented using assembler macros, instead of needing separate
+ * cases depending on toolchain support.
+ *
+ * Simple usage example:
+ * __asm__ __volatile__("parse_r __rt, %0\n\t"
+ * ".insn\n\t"
+ * "# di %0\n\t"
+ * ".word (0x41606000 | (__rt << 16))"
+ * : "=r" (status);
+ */
+
+/* Match an individual register number and assign to \var */
+#define _IFC_REG(n) \
+ ".ifc \\r, $" #n "\n\t" \
+ "\\var = " #n "\n\t" \
+ ".endif\n\t"
+
+__asm__(".macro parse_r var r\n\t"
+ "\\var = -1\n\t"
+ _IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3)
+ _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7)
+ _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11)
+ _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15)
+ _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19)
+ _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23)
+ _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27)
+ _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31)
+ ".iflt \\var\n\t"
+ ".error \"Unable to parse register name \\r\"\n\t"
+ ".endif\n\t"
+ ".endm");
+
+#undef _IFC_REG
+
+/*
+ * C macros for generating assembler macros for common instruction formats.
+ *
+ * The names of the operands can be chosen by the caller, and the encoding of
+ * register operand \<Rn> is assigned to __<Rn> where it can be accessed from
+ * the ENC encodings.
+ */
+
+/* Instructions with no operands */
+#define _ASM_MACRO_0(OP, ENC) \
+ __asm__(".macro " #OP "\n\t" \
+ ENC \
+ ".endm")
+
+/* Instructions with 1 register operand & 1 immediate operand */
+#define _ASM_MACRO_1R1I(OP, R1, I2, ENC) \
+ __asm__(".macro " #OP " " #R1 ", " #I2 "\n\t" \
+ "parse_r __" #R1 ", \\" #R1 "\n\t" \
+ ENC \
+ ".endm")
+
+/* Instructions with 2 register operands */
+#define _ASM_MACRO_2R(OP, R1, R2, ENC) \
+ __asm__(".macro " #OP " " #R1 ", " #R2 "\n\t" \
+ "parse_r __" #R1 ", \\" #R1 "\n\t" \
+ "parse_r __" #R2 ", \\" #R2 "\n\t" \
+ ENC \
+ ".endm")
+
+/* Instructions with 3 register operands */
+#define _ASM_MACRO_3R(OP, R1, R2, R3, ENC) \
+ __asm__(".macro " #OP " " #R1 ", " #R2 ", " #R3 "\n\t" \
+ "parse_r __" #R1 ", \\" #R1 "\n\t" \
+ "parse_r __" #R2 ", \\" #R2 "\n\t" \
+ "parse_r __" #R3 ", \\" #R3 "\n\t" \
+ ENC \
+ ".endm")
+
+/* Instructions with 2 register operands and 1 optional select operand */
+#define _ASM_MACRO_2R_1S(OP, R1, R2, SEL3, ENC) \
+ __asm__(".macro " #OP " " #R1 ", " #R2 ", " #SEL3 " = 0\n\t" \
+ "parse_r __" #R1 ", \\" #R1 "\n\t" \
+ "parse_r __" #R2 ", \\" #R2 "\n\t" \
+ ENC \
+ ".endm")
+
+/*
+ * TLB Invalidate Flush
+ */
+static inline void tlbinvf(void)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ "# tlbinvf\n\t"
+ _ASM_INSN_IF_MIPS(0x42000004)
+ _ASM_INSN32_IF_MM(0x0000537c)
+ ".set pop");
+}
+
+
+/*
+ * Functions to access the R10000 performance counters. These are basically
+ * mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit
+ * performance counter number encoded into bits 1 ... 5 of the instruction.
+ * Only performance counters 0 to 1 actually exist, so for a non-R10000 aware
+ * disassembler these will look like an access to sel 0 or 1.
+ */
+#define read_r10k_perf_cntr(counter) \
+({ \
+ unsigned int __res; \
+ __asm__ __volatile__( \
+ "mfpc\t%0, %1" \
+ : "=r" (__res) \
+ : "i" (counter)); \
+ \
+ __res; \
+})
+
+#define write_r10k_perf_cntr(counter,val) \
+do { \
+ __asm__ __volatile__( \
+ "mtpc\t%0, %1" \
+ : \
+ : "r" (val), "i" (counter)); \
+} while (0)
+
+#define read_r10k_perf_event(counter) \
+({ \
+ unsigned int __res; \
+ __asm__ __volatile__( \
+ "mfps\t%0, %1" \
+ : "=r" (__res) \
+ : "i" (counter)); \
+ \
+ __res; \
+})
+
+#define write_r10k_perf_cntl(counter,val) \
+do { \
+ __asm__ __volatile__( \
+ "mtps\t%0, %1" \
+ : \
+ : "r" (val), "i" (counter)); \
+} while (0)
+
+
+/*
+ * Macros to access the system control coprocessor
+ */
+
+#define ___read_32bit_c0_register(source, sel, vol) \
+({ unsigned int __res; \
+ if (sel == 0) \
+ __asm__ vol( \
+ "mfc0\t%0, " #source "\n\t" \
+ : "=r" (__res)); \
+ else \
+ __asm__ vol( \
+ ".set\tpush\n\t" \
+ ".set\tmips32\n\t" \
+ "mfc0\t%0, " #source ", " #sel "\n\t" \
+ ".set\tpop\n\t" \
+ : "=r" (__res)); \
+ __res; \
+})
+
+#define ___read_64bit_c0_register(source, sel, vol) \
+({ unsigned long long __res; \
+ if (sizeof(unsigned long) == 4) \
+ __res = __read_64bit_c0_split(source, sel, vol); \
+ else if (sel == 0) \
+ __asm__ vol( \
+ ".set\tpush\n\t" \
+ ".set\tmips3\n\t" \
+ "dmfc0\t%0, " #source "\n\t" \
+ ".set\tpop" \
+ : "=r" (__res)); \
+ else \
+ __asm__ vol( \
+ ".set\tpush\n\t" \
+ ".set\tmips64\n\t" \
+ "dmfc0\t%0, " #source ", " #sel "\n\t" \
+ ".set\tpop" \
+ : "=r" (__res)); \
+ __res; \
+})
+
+#define __read_32bit_c0_register(source, sel) \
+ ___read_32bit_c0_register(source, sel, __volatile__)
+
+#define __read_const_32bit_c0_register(source, sel) \
+ ___read_32bit_c0_register(source, sel,)
+
+#define __read_64bit_c0_register(source, sel) \
+ ___read_64bit_c0_register(source, sel, __volatile__)
+
+#define __read_const_64bit_c0_register(source, sel) \
+ ___read_64bit_c0_register(source, sel,)
+
+#define __write_32bit_c0_register(register, sel, value) \
+do { \
+ if (sel == 0) \
+ __asm__ __volatile__( \
+ "mtc0\t%z0, " #register "\n\t" \
+ : : "Jr" ((unsigned int)(value))); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tmips32\n\t" \
+ "mtc0\t%z0, " #register ", " #sel "\n\t" \
+ ".set\tpop" \
+ : : "Jr" ((unsigned int)(value))); \
+} while (0)
+
+#define __write_64bit_c0_register(register, sel, value) \
+do { \
+ if (sizeof(unsigned long) == 4) \
+ __write_64bit_c0_split(register, sel, value); \
+ else if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tmips3\n\t" \
+ "dmtc0\t%z0, " #register "\n\t" \
+ ".set\tpop" \
+ : : "Jr" (value)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tmips64\n\t" \
+ "dmtc0\t%z0, " #register ", " #sel "\n\t" \
+ ".set\tpop" \
+ : : "Jr" (value)); \
+} while (0)
+
+#define __read_ulong_c0_register(reg, sel) \
+ ((sizeof(unsigned long) == 4) ? \
+ (unsigned long) __read_32bit_c0_register(reg, sel) : \
+ (unsigned long) __read_64bit_c0_register(reg, sel))
+
+#define __read_const_ulong_c0_register(reg, sel) \
+ ((sizeof(unsigned long) == 4) ? \
+ (unsigned long) __read_const_32bit_c0_register(reg, sel) : \
+ (unsigned long) __read_const_64bit_c0_register(reg, sel))
+
+#define __write_ulong_c0_register(reg, sel, val) \
+do { \
+ if (sizeof(unsigned long) == 4) \
+ __write_32bit_c0_register(reg, sel, val); \
+ else \
+ __write_64bit_c0_register(reg, sel, val); \
+} while (0)
+
+/*
+ * On RM7000/RM9000 these are uses to access cop0 set 1 registers
+ */
+#define __read_32bit_c0_ctrl_register(source) \
+({ unsigned int __res; \
+ __asm__ __volatile__( \
+ "cfc0\t%0, " #source "\n\t" \
+ : "=r" (__res)); \
+ __res; \
+})
+
+#define __write_32bit_c0_ctrl_register(register, value) \
+do { \
+ __asm__ __volatile__( \
+ "ctc0\t%z0, " #register "\n\t" \
+ : : "Jr" ((unsigned int)(value))); \
+} while (0)
+
+/*
+ * These versions are only needed for systems with more than 38 bits of
+ * physical address space running the 32-bit kernel. That's none atm :-)
+ */
+#define __read_64bit_c0_split(source, sel, vol) \
+({ \
+ unsigned long long __val; \
+ unsigned long __flags; \
+ \
+ local_irq_save(__flags); \
+ if (sel == 0) \
+ __asm__ vol( \
+ ".set\tpush\n\t" \
+ ".set\tmips64\n\t" \
+ "dmfc0\t%L0, " #source "\n\t" \
+ "dsra\t%M0, %L0, 32\n\t" \
+ "sll\t%L0, %L0, 0\n\t" \
+ ".set\tpop" \
+ : "=r" (__val)); \
+ else \
+ __asm__ vol( \
+ ".set\tpush\n\t" \
+ ".set\tmips64\n\t" \
+ "dmfc0\t%L0, " #source ", " #sel "\n\t" \
+ "dsra\t%M0, %L0, 32\n\t" \
+ "sll\t%L0, %L0, 0\n\t" \
+ ".set\tpop" \
+ : "=r" (__val)); \
+ local_irq_restore(__flags); \
+ \
+ __val; \
+})
+
+#define __write_64bit_c0_split(source, sel, val) \
+do { \
+ unsigned long long __tmp = (val); \
+ unsigned long __flags; \
+ \
+ local_irq_save(__flags); \
+ if (MIPS_ISA_REV >= 2) \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\t" MIPS_ISA_LEVEL "\n\t" \
+ "dins\t%L0, %M0, 32, 32\n\t" \
+ "dmtc0\t%L0, " #source ", " #sel "\n\t" \
+ ".set\tpop" \
+ : "+r" (__tmp)); \
+ else if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tmips64\n\t" \
+ "dsll\t%L0, %L0, 32\n\t" \
+ "dsrl\t%L0, %L0, 32\n\t" \
+ "dsll\t%M0, %M0, 32\n\t" \
+ "or\t%L0, %L0, %M0\n\t" \
+ "dmtc0\t%L0, " #source "\n\t" \
+ ".set\tpop" \
+ : "+r" (__tmp)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tmips64\n\t" \
+ "dsll\t%L0, %L0, 32\n\t" \
+ "dsrl\t%L0, %L0, 32\n\t" \
+ "dsll\t%M0, %M0, 32\n\t" \
+ "or\t%L0, %L0, %M0\n\t" \
+ "dmtc0\t%L0, " #source ", " #sel "\n\t" \
+ ".set\tpop" \
+ : "+r" (__tmp)); \
+ local_irq_restore(__flags); \
+} while (0)
+
+#ifndef TOOLCHAIN_SUPPORTS_XPA
+_ASM_MACRO_2R_1S(mfhc0, rt, rs, sel,
+ _ASM_INSN_IF_MIPS(0x40400000 | __rt << 16 | __rs << 11 | \\sel)
+ _ASM_INSN32_IF_MM(0x000000f4 | __rt << 21 | __rs << 16 | \\sel << 11));
+_ASM_MACRO_2R_1S(mthc0, rt, rd, sel,
+ _ASM_INSN_IF_MIPS(0x40c00000 | __rt << 16 | __rd << 11 | \\sel)
+ _ASM_INSN32_IF_MM(0x000002f4 | __rt << 21 | __rd << 16 | \\sel << 11));
+#define _ASM_SET_XPA ""
+#else /* !TOOLCHAIN_SUPPORTS_XPA */
+#define _ASM_SET_XPA ".set\txpa\n\t"
+#endif
+
+#define __readx_32bit_c0_register(source, sel) \
+({ \
+ unsigned int __res; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set mips32r2 \n" \
+ _ASM_SET_XPA \
+ " mfhc0 %0, " #source ", %1 \n" \
+ " .set pop \n" \
+ : "=r" (__res) \
+ : "i" (sel)); \
+ __res; \
+})
+
+#define __writex_32bit_c0_register(register, sel, value) \
+do { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set mips32r2 \n" \
+ _ASM_SET_XPA \
+ " mthc0 %z0, " #register ", %1 \n" \
+ " .set pop \n" \
+ : \
+ : "Jr" (value), "i" (sel)); \
+} while (0)
+
+#define read_c0_index() __read_32bit_c0_register($0, 0)
+#define write_c0_index(val) __write_32bit_c0_register($0, 0, val)
+
+#define read_c0_random() __read_32bit_c0_register($1, 0)
+#define write_c0_random(val) __write_32bit_c0_register($1, 0, val)
+
+#define read_c0_entrylo0() __read_ulong_c0_register($2, 0)
+#define write_c0_entrylo0(val) __write_ulong_c0_register($2, 0, val)
+
+#define readx_c0_entrylo0() __readx_32bit_c0_register($2, 0)
+#define writex_c0_entrylo0(val) __writex_32bit_c0_register($2, 0, val)
+
+#define read_c0_entrylo1() __read_ulong_c0_register($3, 0)
+#define write_c0_entrylo1(val) __write_ulong_c0_register($3, 0, val)
+
+#define readx_c0_entrylo1() __readx_32bit_c0_register($3, 0)
+#define writex_c0_entrylo1(val) __writex_32bit_c0_register($3, 0, val)
+
+#define read_c0_conf() __read_32bit_c0_register($3, 0)
+#define write_c0_conf(val) __write_32bit_c0_register($3, 0, val)
+
+#define read_c0_globalnumber() __read_32bit_c0_register($3, 1)
+
+#define read_c0_context() __read_ulong_c0_register($4, 0)
+#define write_c0_context(val) __write_ulong_c0_register($4, 0, val)
+
+#define read_c0_contextconfig() __read_32bit_c0_register($4, 1)
+#define write_c0_contextconfig(val) __write_32bit_c0_register($4, 1, val)
+
+#define read_c0_userlocal() __read_ulong_c0_register($4, 2)
+#define write_c0_userlocal(val) __write_ulong_c0_register($4, 2, val)
+
+#define read_c0_xcontextconfig() __read_ulong_c0_register($4, 3)
+#define write_c0_xcontextconfig(val) __write_ulong_c0_register($4, 3, val)
+
+#define read_c0_memorymapid() __read_32bit_c0_register($4, 5)
+#define write_c0_memorymapid(val) __write_32bit_c0_register($4, 5, val)
+
+#define read_c0_pagemask() __read_32bit_c0_register($5, 0)
+#define write_c0_pagemask(val) __write_32bit_c0_register($5, 0, val)
+
+#define read_c0_pagegrain() __read_32bit_c0_register($5, 1)
+#define write_c0_pagegrain(val) __write_32bit_c0_register($5, 1, val)
+
+#define read_c0_wired() __read_32bit_c0_register($6, 0)
+#define write_c0_wired(val) __write_32bit_c0_register($6, 0, val)
+
+#define read_c0_info() __read_32bit_c0_register($7, 0)
+
+#define read_c0_cache() __read_32bit_c0_register($7, 0) /* TX39xx */
+#define write_c0_cache(val) __write_32bit_c0_register($7, 0, val)
+
+#define read_c0_badvaddr() __read_ulong_c0_register($8, 0)
+#define write_c0_badvaddr(val) __write_ulong_c0_register($8, 0, val)
+
+#define read_c0_badinstr() __read_32bit_c0_register($8, 1)
+#define read_c0_badinstrp() __read_32bit_c0_register($8, 2)
+
+#define read_c0_count() __read_32bit_c0_register($9, 0)
+#define write_c0_count(val) __write_32bit_c0_register($9, 0, val)
+
+#define read_c0_entryhi() __read_ulong_c0_register($10, 0)
+#define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val)
+
+#define read_c0_guestctl1() __read_32bit_c0_register($10, 4)
+#define write_c0_guestctl1(val) __write_32bit_c0_register($10, 4, val)
+
+#define read_c0_guestctl2() __read_32bit_c0_register($10, 5)
+#define write_c0_guestctl2(val) __write_32bit_c0_register($10, 5, val)
+
+#define read_c0_guestctl3() __read_32bit_c0_register($10, 6)
+#define write_c0_guestctl3(val) __write_32bit_c0_register($10, 6, val)
+
+#define read_c0_compare() __read_32bit_c0_register($11, 0)
+#define write_c0_compare(val) __write_32bit_c0_register($11, 0, val)
+
+#define read_c0_guestctl0ext() __read_32bit_c0_register($11, 4)
+#define write_c0_guestctl0ext(val) __write_32bit_c0_register($11, 4, val)
+
+#define read_c0_status() __read_32bit_c0_register($12, 0)
+
+#define write_c0_status(val) __write_32bit_c0_register($12, 0, val)
+
+#define read_c0_guestctl0() __read_32bit_c0_register($12, 6)
+#define write_c0_guestctl0(val) __write_32bit_c0_register($12, 6, val)
+
+#define read_c0_gtoffset() __read_32bit_c0_register($12, 7)
+#define write_c0_gtoffset(val) __write_32bit_c0_register($12, 7, val)
+
+#define read_c0_cause() __read_32bit_c0_register($13, 0)
+#define write_c0_cause(val) __write_32bit_c0_register($13, 0, val)
+
+#define read_c0_epc() __read_ulong_c0_register($14, 0)
+#define write_c0_epc(val) __write_ulong_c0_register($14, 0, val)
+
+#define read_c0_prid() __read_const_32bit_c0_register($15, 0)
+
+#define read_c0_cmgcrbase() __read_ulong_c0_register($15, 3)
+
+#define read_c0_config() __read_32bit_c0_register($16, 0)
+#define read_c0_config1() __read_32bit_c0_register($16, 1)
+#define read_c0_config2() __read_32bit_c0_register($16, 2)
+#define read_c0_config3() __read_32bit_c0_register($16, 3)
+#define read_c0_config4() __read_32bit_c0_register($16, 4)
+#define read_c0_config5() __read_32bit_c0_register($16, 5)
+#define read_c0_config6() __read_32bit_c0_register($16, 6)
+#define read_c0_config7() __read_32bit_c0_register($16, 7)
+#define write_c0_config(val) __write_32bit_c0_register($16, 0, val)
+#define write_c0_config1(val) __write_32bit_c0_register($16, 1, val)
+#define write_c0_config2(val) __write_32bit_c0_register($16, 2, val)
+#define write_c0_config3(val) __write_32bit_c0_register($16, 3, val)
+#define write_c0_config4(val) __write_32bit_c0_register($16, 4, val)
+#define write_c0_config5(val) __write_32bit_c0_register($16, 5, val)
+#define write_c0_config6(val) __write_32bit_c0_register($16, 6, val)
+#define write_c0_config7(val) __write_32bit_c0_register($16, 7, val)
+
+#define read_c0_lladdr() __read_ulong_c0_register($17, 0)
+#define write_c0_lladdr(val) __write_ulong_c0_register($17, 0, val)
+#define read_c0_maar() __read_ulong_c0_register($17, 1)
+#define write_c0_maar(val) __write_ulong_c0_register($17, 1, val)
+#define readx_c0_maar() __readx_32bit_c0_register($17, 1)
+#define writex_c0_maar(val) __writex_32bit_c0_register($17, 1, val)
+#define read_c0_maari() __read_32bit_c0_register($17, 2)
+#define write_c0_maari(val) __write_32bit_c0_register($17, 2, val)
+
+/*
+ * The WatchLo register. There may be up to 8 of them.
+ */
+#define read_c0_watchlo0() __read_ulong_c0_register($18, 0)
+#define read_c0_watchlo1() __read_ulong_c0_register($18, 1)
+#define read_c0_watchlo2() __read_ulong_c0_register($18, 2)
+#define read_c0_watchlo3() __read_ulong_c0_register($18, 3)
+#define read_c0_watchlo4() __read_ulong_c0_register($18, 4)
+#define read_c0_watchlo5() __read_ulong_c0_register($18, 5)
+#define read_c0_watchlo6() __read_ulong_c0_register($18, 6)
+#define read_c0_watchlo7() __read_ulong_c0_register($18, 7)
+#define write_c0_watchlo0(val) __write_ulong_c0_register($18, 0, val)
+#define write_c0_watchlo1(val) __write_ulong_c0_register($18, 1, val)
+#define write_c0_watchlo2(val) __write_ulong_c0_register($18, 2, val)
+#define write_c0_watchlo3(val) __write_ulong_c0_register($18, 3, val)
+#define write_c0_watchlo4(val) __write_ulong_c0_register($18, 4, val)
+#define write_c0_watchlo5(val) __write_ulong_c0_register($18, 5, val)
+#define write_c0_watchlo6(val) __write_ulong_c0_register($18, 6, val)
+#define write_c0_watchlo7(val) __write_ulong_c0_register($18, 7, val)
+
+/*
+ * The WatchHi register. There may be up to 8 of them.
+ */
+#define read_c0_watchhi0() __read_32bit_c0_register($19, 0)
+#define read_c0_watchhi1() __read_32bit_c0_register($19, 1)
+#define read_c0_watchhi2() __read_32bit_c0_register($19, 2)
+#define read_c0_watchhi3() __read_32bit_c0_register($19, 3)
+#define read_c0_watchhi4() __read_32bit_c0_register($19, 4)
+#define read_c0_watchhi5() __read_32bit_c0_register($19, 5)
+#define read_c0_watchhi6() __read_32bit_c0_register($19, 6)
+#define read_c0_watchhi7() __read_32bit_c0_register($19, 7)
+
+#define write_c0_watchhi0(val) __write_32bit_c0_register($19, 0, val)
+#define write_c0_watchhi1(val) __write_32bit_c0_register($19, 1, val)
+#define write_c0_watchhi2(val) __write_32bit_c0_register($19, 2, val)
+#define write_c0_watchhi3(val) __write_32bit_c0_register($19, 3, val)
+#define write_c0_watchhi4(val) __write_32bit_c0_register($19, 4, val)
+#define write_c0_watchhi5(val) __write_32bit_c0_register($19, 5, val)
+#define write_c0_watchhi6(val) __write_32bit_c0_register($19, 6, val)
+#define write_c0_watchhi7(val) __write_32bit_c0_register($19, 7, val)
+
+#define read_c0_xcontext() __read_ulong_c0_register($20, 0)
+#define write_c0_xcontext(val) __write_ulong_c0_register($20, 0, val)
+
+#define read_c0_intcontrol() __read_32bit_c0_ctrl_register($20)
+#define write_c0_intcontrol(val) __write_32bit_c0_ctrl_register($20, val)
+
+#define read_c0_framemask() __read_32bit_c0_register($21, 0)
+#define write_c0_framemask(val) __write_32bit_c0_register($21, 0, val)
+
+#define read_c0_diag() __read_32bit_c0_register($22, 0)
+#define write_c0_diag(val) __write_32bit_c0_register($22, 0, val)
+
+/* R10K CP0 Branch Diagnostic register is 64bits wide */
+#define read_c0_r10k_diag() __read_64bit_c0_register($22, 0)
+#define write_c0_r10k_diag(val) __write_64bit_c0_register($22, 0, val)
+
+#define read_c0_diag1() __read_32bit_c0_register($22, 1)
+#define write_c0_diag1(val) __write_32bit_c0_register($22, 1, val)
+
+#define read_c0_diag2() __read_32bit_c0_register($22, 2)
+#define write_c0_diag2(val) __write_32bit_c0_register($22, 2, val)
+
+#define read_c0_diag3() __read_32bit_c0_register($22, 3)
+#define write_c0_diag3(val) __write_32bit_c0_register($22, 3, val)
+
+#define read_c0_diag4() __read_32bit_c0_register($22, 4)
+#define write_c0_diag4(val) __write_32bit_c0_register($22, 4, val)
+
+#define read_c0_diag5() __read_32bit_c0_register($22, 5)
+#define write_c0_diag5(val) __write_32bit_c0_register($22, 5, val)
+
+#define read_c0_debug() __read_32bit_c0_register($23, 0)
+#define write_c0_debug(val) __write_32bit_c0_register($23, 0, val)
+
+#define read_c0_depc() __read_ulong_c0_register($24, 0)
+#define write_c0_depc(val) __write_ulong_c0_register($24, 0, val)
+
+/*
+ * MIPS32 / MIPS64 performance counters
+ */
+#define read_c0_perfctrl0() __read_32bit_c0_register($25, 0)
+#define write_c0_perfctrl0(val) __write_32bit_c0_register($25, 0, val)
+#define read_c0_perfcntr0() __read_32bit_c0_register($25, 1)
+#define write_c0_perfcntr0(val) __write_32bit_c0_register($25, 1, val)
+#define read_c0_perfcntr0_64() __read_64bit_c0_register($25, 1)
+#define write_c0_perfcntr0_64(val) __write_64bit_c0_register($25, 1, val)
+#define read_c0_perfctrl1() __read_32bit_c0_register($25, 2)
+#define write_c0_perfctrl1(val) __write_32bit_c0_register($25, 2, val)
+#define read_c0_perfcntr1() __read_32bit_c0_register($25, 3)
+#define write_c0_perfcntr1(val) __write_32bit_c0_register($25, 3, val)
+#define read_c0_perfcntr1_64() __read_64bit_c0_register($25, 3)
+#define write_c0_perfcntr1_64(val) __write_64bit_c0_register($25, 3, val)
+#define read_c0_perfctrl2() __read_32bit_c0_register($25, 4)
+#define write_c0_perfctrl2(val) __write_32bit_c0_register($25, 4, val)
+#define read_c0_perfcntr2() __read_32bit_c0_register($25, 5)
+#define write_c0_perfcntr2(val) __write_32bit_c0_register($25, 5, val)
+#define read_c0_perfcntr2_64() __read_64bit_c0_register($25, 5)
+#define write_c0_perfcntr2_64(val) __write_64bit_c0_register($25, 5, val)
+#define read_c0_perfctrl3() __read_32bit_c0_register($25, 6)
+#define write_c0_perfctrl3(val) __write_32bit_c0_register($25, 6, val)
+#define read_c0_perfcntr3() __read_32bit_c0_register($25, 7)
+#define write_c0_perfcntr3(val) __write_32bit_c0_register($25, 7, val)
+#define read_c0_perfcntr3_64() __read_64bit_c0_register($25, 7)
+#define write_c0_perfcntr3_64(val) __write_64bit_c0_register($25, 7, val)
+
+#define read_c0_ecc() __read_32bit_c0_register($26, 0)
+#define write_c0_ecc(val) __write_32bit_c0_register($26, 0, val)
+
+#define read_c0_derraddr0() __read_ulong_c0_register($26, 1)
+#define write_c0_derraddr0(val) __write_ulong_c0_register($26, 1, val)
+
+#define read_c0_cacheerr() __read_32bit_c0_register($27, 0)
+
+#define read_c0_derraddr1() __read_ulong_c0_register($27, 1)
+#define write_c0_derraddr1(val) __write_ulong_c0_register($27, 1, val)
+
+#define read_c0_taglo() __read_32bit_c0_register($28, 0)
+#define write_c0_taglo(val) __write_32bit_c0_register($28, 0, val)
+
+#define read_c0_dtaglo() __read_32bit_c0_register($28, 2)
+#define write_c0_dtaglo(val) __write_32bit_c0_register($28, 2, val)
+
+#define read_c0_ddatalo() __read_32bit_c0_register($28, 3)
+#define write_c0_ddatalo(val) __write_32bit_c0_register($28, 3, val)
+
+#define read_c0_staglo() __read_32bit_c0_register($28, 4)
+#define write_c0_staglo(val) __write_32bit_c0_register($28, 4, val)
+
+#define read_c0_taghi() __read_32bit_c0_register($29, 0)
+#define write_c0_taghi(val) __write_32bit_c0_register($29, 0, val)
+
+#define read_c0_errorepc() __read_ulong_c0_register($30, 0)
+#define write_c0_errorepc(val) __write_ulong_c0_register($30, 0, val)
+
+/* MIPSR2 */
+#define read_c0_hwrena() __read_32bit_c0_register($7, 0)
+#define write_c0_hwrena(val) __write_32bit_c0_register($7, 0, val)
+
+#define read_c0_intctl() __read_32bit_c0_register($12, 1)
+#define write_c0_intctl(val) __write_32bit_c0_register($12, 1, val)
+
+#define read_c0_srsctl() __read_32bit_c0_register($12, 2)
+#define write_c0_srsctl(val) __write_32bit_c0_register($12, 2, val)
+
+#define read_c0_srsmap() __read_32bit_c0_register($12, 3)
+#define write_c0_srsmap(val) __write_32bit_c0_register($12, 3, val)
+
+#define read_c0_ebase() __read_32bit_c0_register($15, 1)
+#define write_c0_ebase(val) __write_32bit_c0_register($15, 1, val)
+
+#define read_c0_ebase_64() __read_64bit_c0_register($15, 1)
+#define write_c0_ebase_64(val) __write_64bit_c0_register($15, 1, val)
+
+#define read_c0_cdmmbase() __read_ulong_c0_register($15, 2)
+#define write_c0_cdmmbase(val) __write_ulong_c0_register($15, 2, val)
+
+/* MIPSR3 */
+#define read_c0_segctl0() __read_32bit_c0_register($5, 2)
+#define write_c0_segctl0(val) __write_32bit_c0_register($5, 2, val)
+
+#define read_c0_segctl1() __read_32bit_c0_register($5, 3)
+#define write_c0_segctl1(val) __write_32bit_c0_register($5, 3, val)
+
+#define read_c0_segctl2() __read_32bit_c0_register($5, 4)
+#define write_c0_segctl2(val) __write_32bit_c0_register($5, 4, val)
+
+/* Hardware Page Table Walker */
+#define read_c0_pwbase() __read_ulong_c0_register($5, 5)
+#define write_c0_pwbase(val) __write_ulong_c0_register($5, 5, val)
+
+#define read_c0_pwfield() __read_ulong_c0_register($5, 6)
+#define write_c0_pwfield(val) __write_ulong_c0_register($5, 6, val)
+
+#define read_c0_pwsize() __read_ulong_c0_register($5, 7)
+#define write_c0_pwsize(val) __write_ulong_c0_register($5, 7, val)
+
+#define read_c0_pwctl() __read_32bit_c0_register($6, 6)
+#define write_c0_pwctl(val) __write_32bit_c0_register($6, 6, val)
+
+#define read_c0_pgd() __read_64bit_c0_register($9, 7)
+#define write_c0_pgd(val) __write_64bit_c0_register($9, 7, val)
+
+#define read_c0_kpgd() __read_64bit_c0_register($31, 7)
+#define write_c0_kpgd(val) __write_64bit_c0_register($31, 7, val)
+
+/* Cavium OCTEON (cnMIPS) */
+#define read_c0_cvmcount() __read_ulong_c0_register($9, 6)
+#define write_c0_cvmcount(val) __write_ulong_c0_register($9, 6, val)
+
+#define read_c0_cvmctl() __read_64bit_c0_register($9, 7)
+#define write_c0_cvmctl(val) __write_64bit_c0_register($9, 7, val)
+
+#define read_c0_cvmmemctl() __read_64bit_c0_register($11, 7)
+#define write_c0_cvmmemctl(val) __write_64bit_c0_register($11, 7, val)
+
+#define read_c0_cvmmemctl2() __read_64bit_c0_register($16, 6)
+#define write_c0_cvmmemctl2(val) __write_64bit_c0_register($16, 6, val)
+
+#define read_c0_cvmvmconfig() __read_64bit_c0_register($16, 7)
+#define write_c0_cvmvmconfig(val) __write_64bit_c0_register($16, 7, val)
+
+/*
+ * The cacheerr registers are not standardized. On OCTEON, they are
+ * 64 bits wide.
+ */
+#define read_octeon_c0_icacheerr() __read_64bit_c0_register($27, 0)
+#define write_octeon_c0_icacheerr(val) __write_64bit_c0_register($27, 0, val)
+
+#define read_octeon_c0_dcacheerr() __read_64bit_c0_register($27, 1)
+#define write_octeon_c0_dcacheerr(val) __write_64bit_c0_register($27, 1, val)
+
+/* BMIPS3300 */
+#define read_c0_brcm_config_0() __read_32bit_c0_register($22, 0)
+#define write_c0_brcm_config_0(val) __write_32bit_c0_register($22, 0, val)
+
+#define read_c0_brcm_bus_pll() __read_32bit_c0_register($22, 4)
+#define write_c0_brcm_bus_pll(val) __write_32bit_c0_register($22, 4, val)
+
+#define read_c0_brcm_reset() __read_32bit_c0_register($22, 5)
+#define write_c0_brcm_reset(val) __write_32bit_c0_register($22, 5, val)
+
+/* BMIPS43xx */
+#define read_c0_brcm_cmt_intr() __read_32bit_c0_register($22, 1)
+#define write_c0_brcm_cmt_intr(val) __write_32bit_c0_register($22, 1, val)
+
+#define read_c0_brcm_cmt_ctrl() __read_32bit_c0_register($22, 2)
+#define write_c0_brcm_cmt_ctrl(val) __write_32bit_c0_register($22, 2, val)
+
+#define read_c0_brcm_cmt_local() __read_32bit_c0_register($22, 3)
+#define write_c0_brcm_cmt_local(val) __write_32bit_c0_register($22, 3, val)
+
+#define read_c0_brcm_config_1() __read_32bit_c0_register($22, 5)
+#define write_c0_brcm_config_1(val) __write_32bit_c0_register($22, 5, val)
+
+#define read_c0_brcm_cbr() __read_32bit_c0_register($22, 6)
+#define write_c0_brcm_cbr(val) __write_32bit_c0_register($22, 6, val)
+
+/* BMIPS5000 */
+#define read_c0_brcm_config() __read_32bit_c0_register($22, 0)
+#define write_c0_brcm_config(val) __write_32bit_c0_register($22, 0, val)
+
+#define read_c0_brcm_mode() __read_32bit_c0_register($22, 1)
+#define write_c0_brcm_mode(val) __write_32bit_c0_register($22, 1, val)
+
+#define read_c0_brcm_action() __read_32bit_c0_register($22, 2)
+#define write_c0_brcm_action(val) __write_32bit_c0_register($22, 2, val)
+
+#define read_c0_brcm_edsp() __read_32bit_c0_register($22, 3)
+#define write_c0_brcm_edsp(val) __write_32bit_c0_register($22, 3, val)
+
+#define read_c0_brcm_bootvec() __read_32bit_c0_register($22, 4)
+#define write_c0_brcm_bootvec(val) __write_32bit_c0_register($22, 4, val)
+
+#define read_c0_brcm_sleepcount() __read_32bit_c0_register($22, 7)
+#define write_c0_brcm_sleepcount(val) __write_32bit_c0_register($22, 7, val)
+
+/* Ingenic page ctrl register */
+#define write_c0_page_ctrl(val) __write_32bit_c0_register($5, 4, val)
+
+/*
+ * Macros to access the guest system control coprocessor
+ */
+
+#ifndef TOOLCHAIN_SUPPORTS_VIRT
+_ASM_MACRO_2R_1S(mfgc0, rt, rs, sel,
+ _ASM_INSN_IF_MIPS(0x40600000 | __rt << 16 | __rs << 11 | \\sel)
+ _ASM_INSN32_IF_MM(0x000004fc | __rt << 21 | __rs << 16 | \\sel << 11));
+_ASM_MACRO_2R_1S(dmfgc0, rt, rs, sel,
+ _ASM_INSN_IF_MIPS(0x40600100 | __rt << 16 | __rs << 11 | \\sel)
+ _ASM_INSN32_IF_MM(0x580004fc | __rt << 21 | __rs << 16 | \\sel << 11));
+_ASM_MACRO_2R_1S(mtgc0, rt, rd, sel,
+ _ASM_INSN_IF_MIPS(0x40600200 | __rt << 16 | __rd << 11 | \\sel)
+ _ASM_INSN32_IF_MM(0x000006fc | __rt << 21 | __rd << 16 | \\sel << 11));
+_ASM_MACRO_2R_1S(dmtgc0, rt, rd, sel,
+ _ASM_INSN_IF_MIPS(0x40600300 | __rt << 16 | __rd << 11 | \\sel)
+ _ASM_INSN32_IF_MM(0x580006fc | __rt << 21 | __rd << 16 | \\sel << 11));
+_ASM_MACRO_0(tlbgp, _ASM_INSN_IF_MIPS(0x42000010)
+ _ASM_INSN32_IF_MM(0x0000017c));
+_ASM_MACRO_0(tlbgr, _ASM_INSN_IF_MIPS(0x42000009)
+ _ASM_INSN32_IF_MM(0x0000117c));
+_ASM_MACRO_0(tlbgwi, _ASM_INSN_IF_MIPS(0x4200000a)
+ _ASM_INSN32_IF_MM(0x0000217c));
+_ASM_MACRO_0(tlbgwr, _ASM_INSN_IF_MIPS(0x4200000e)
+ _ASM_INSN32_IF_MM(0x0000317c));
+_ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
+ _ASM_INSN32_IF_MM(0x0000517c));
+#define _ASM_SET_VIRT ""
+#else /* !TOOLCHAIN_SUPPORTS_VIRT */
+#define _ASM_SET_VIRT ".set\tvirt\n\t"
+#endif
+
+#define __read_32bit_gc0_register(source, sel) \
+({ int __res; \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tmips32r5\n\t" \
+ _ASM_SET_VIRT \
+ "mfgc0\t%0, " #source ", %1\n\t" \
+ ".set\tpop" \
+ : "=r" (__res) \
+ : "i" (sel)); \
+ __res; \
+})
+
+#define __read_64bit_gc0_register(source, sel) \
+({ unsigned long long __res; \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tmips64r5\n\t" \
+ _ASM_SET_VIRT \
+ "dmfgc0\t%0, " #source ", %1\n\t" \
+ ".set\tpop" \
+ : "=r" (__res) \
+ : "i" (sel)); \
+ __res; \
+})
+
+#define __write_32bit_gc0_register(register, sel, value) \
+do { \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tmips32r5\n\t" \
+ _ASM_SET_VIRT \
+ "mtgc0\t%z0, " #register ", %1\n\t" \
+ ".set\tpop" \
+ : : "Jr" ((unsigned int)(value)), \
+ "i" (sel)); \
+} while (0)
+
+#define __write_64bit_gc0_register(register, sel, value) \
+do { \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tmips64r5\n\t" \
+ _ASM_SET_VIRT \
+ "dmtgc0\t%z0, " #register ", %1\n\t" \
+ ".set\tpop" \
+ : : "Jr" (value), \
+ "i" (sel)); \
+} while (0)
+
+#define __read_ulong_gc0_register(reg, sel) \
+ ((sizeof(unsigned long) == 4) ? \
+ (unsigned long) __read_32bit_gc0_register(reg, sel) : \
+ (unsigned long) __read_64bit_gc0_register(reg, sel))
+
+#define __write_ulong_gc0_register(reg, sel, val) \
+do { \
+ if (sizeof(unsigned long) == 4) \
+ __write_32bit_gc0_register(reg, sel, val); \
+ else \
+ __write_64bit_gc0_register(reg, sel, val); \
+} while (0)
+
+#define read_gc0_index() __read_32bit_gc0_register($0, 0)
+#define write_gc0_index(val) __write_32bit_gc0_register($0, 0, val)
+
+#define read_gc0_entrylo0() __read_ulong_gc0_register($2, 0)
+#define write_gc0_entrylo0(val) __write_ulong_gc0_register($2, 0, val)
+
+#define read_gc0_entrylo1() __read_ulong_gc0_register($3, 0)
+#define write_gc0_entrylo1(val) __write_ulong_gc0_register($3, 0, val)
+
+#define read_gc0_context() __read_ulong_gc0_register($4, 0)
+#define write_gc0_context(val) __write_ulong_gc0_register($4, 0, val)
+
+#define read_gc0_contextconfig() __read_32bit_gc0_register($4, 1)
+#define write_gc0_contextconfig(val) __write_32bit_gc0_register($4, 1, val)
+
+#define read_gc0_userlocal() __read_ulong_gc0_register($4, 2)
+#define write_gc0_userlocal(val) __write_ulong_gc0_register($4, 2, val)
+
+#define read_gc0_xcontextconfig() __read_ulong_gc0_register($4, 3)
+#define write_gc0_xcontextconfig(val) __write_ulong_gc0_register($4, 3, val)
+
+#define read_gc0_pagemask() __read_32bit_gc0_register($5, 0)
+#define write_gc0_pagemask(val) __write_32bit_gc0_register($5, 0, val)
+
+#define read_gc0_pagegrain() __read_32bit_gc0_register($5, 1)
+#define write_gc0_pagegrain(val) __write_32bit_gc0_register($5, 1, val)
+
+#define read_gc0_segctl0() __read_ulong_gc0_register($5, 2)
+#define write_gc0_segctl0(val) __write_ulong_gc0_register($5, 2, val)
+
+#define read_gc0_segctl1() __read_ulong_gc0_register($5, 3)
+#define write_gc0_segctl1(val) __write_ulong_gc0_register($5, 3, val)
+
+#define read_gc0_segctl2() __read_ulong_gc0_register($5, 4)
+#define write_gc0_segctl2(val) __write_ulong_gc0_register($5, 4, val)
+
+#define read_gc0_pwbase() __read_ulong_gc0_register($5, 5)
+#define write_gc0_pwbase(val) __write_ulong_gc0_register($5, 5, val)
+
+#define read_gc0_pwfield() __read_ulong_gc0_register($5, 6)
+#define write_gc0_pwfield(val) __write_ulong_gc0_register($5, 6, val)
+
+#define read_gc0_pwsize() __read_ulong_gc0_register($5, 7)
+#define write_gc0_pwsize(val) __write_ulong_gc0_register($5, 7, val)
+
+#define read_gc0_wired() __read_32bit_gc0_register($6, 0)
+#define write_gc0_wired(val) __write_32bit_gc0_register($6, 0, val)
+
+#define read_gc0_pwctl() __read_32bit_gc0_register($6, 6)
+#define write_gc0_pwctl(val) __write_32bit_gc0_register($6, 6, val)
+
+#define read_gc0_hwrena() __read_32bit_gc0_register($7, 0)
+#define write_gc0_hwrena(val) __write_32bit_gc0_register($7, 0, val)
+
+#define read_gc0_badvaddr() __read_ulong_gc0_register($8, 0)
+#define write_gc0_badvaddr(val) __write_ulong_gc0_register($8, 0, val)
+
+#define read_gc0_badinstr() __read_32bit_gc0_register($8, 1)
+#define write_gc0_badinstr(val) __write_32bit_gc0_register($8, 1, val)
+
+#define read_gc0_badinstrp() __read_32bit_gc0_register($8, 2)
+#define write_gc0_badinstrp(val) __write_32bit_gc0_register($8, 2, val)
+
+#define read_gc0_count() __read_32bit_gc0_register($9, 0)
+
+#define read_gc0_entryhi() __read_ulong_gc0_register($10, 0)
+#define write_gc0_entryhi(val) __write_ulong_gc0_register($10, 0, val)
+
+#define read_gc0_compare() __read_32bit_gc0_register($11, 0)
+#define write_gc0_compare(val) __write_32bit_gc0_register($11, 0, val)
+
+#define read_gc0_status() __read_32bit_gc0_register($12, 0)
+#define write_gc0_status(val) __write_32bit_gc0_register($12, 0, val)
+
+#define read_gc0_intctl() __read_32bit_gc0_register($12, 1)
+#define write_gc0_intctl(val) __write_32bit_gc0_register($12, 1, val)
+
+#define read_gc0_cause() __read_32bit_gc0_register($13, 0)
+#define write_gc0_cause(val) __write_32bit_gc0_register($13, 0, val)
+
+#define read_gc0_epc() __read_ulong_gc0_register($14, 0)
+#define write_gc0_epc(val) __write_ulong_gc0_register($14, 0, val)
+
+#define read_gc0_prid() __read_32bit_gc0_register($15, 0)
+
+#define read_gc0_ebase() __read_32bit_gc0_register($15, 1)
+#define write_gc0_ebase(val) __write_32bit_gc0_register($15, 1, val)
+
+#define read_gc0_ebase_64() __read_64bit_gc0_register($15, 1)
+#define write_gc0_ebase_64(val) __write_64bit_gc0_register($15, 1, val)
+
+#define read_gc0_config() __read_32bit_gc0_register($16, 0)
+#define read_gc0_config1() __read_32bit_gc0_register($16, 1)
+#define read_gc0_config2() __read_32bit_gc0_register($16, 2)
+#define read_gc0_config3() __read_32bit_gc0_register($16, 3)
+#define read_gc0_config4() __read_32bit_gc0_register($16, 4)
+#define read_gc0_config5() __read_32bit_gc0_register($16, 5)
+#define read_gc0_config6() __read_32bit_gc0_register($16, 6)
+#define read_gc0_config7() __read_32bit_gc0_register($16, 7)
+#define write_gc0_config(val) __write_32bit_gc0_register($16, 0, val)
+#define write_gc0_config1(val) __write_32bit_gc0_register($16, 1, val)
+#define write_gc0_config2(val) __write_32bit_gc0_register($16, 2, val)
+#define write_gc0_config3(val) __write_32bit_gc0_register($16, 3, val)
+#define write_gc0_config4(val) __write_32bit_gc0_register($16, 4, val)
+#define write_gc0_config5(val) __write_32bit_gc0_register($16, 5, val)
+#define write_gc0_config6(val) __write_32bit_gc0_register($16, 6, val)
+#define write_gc0_config7(val) __write_32bit_gc0_register($16, 7, val)
+
+#define read_gc0_lladdr() __read_ulong_gc0_register($17, 0)
+#define write_gc0_lladdr(val) __write_ulong_gc0_register($17, 0, val)
+
+#define read_gc0_watchlo0() __read_ulong_gc0_register($18, 0)
+#define read_gc0_watchlo1() __read_ulong_gc0_register($18, 1)
+#define read_gc0_watchlo2() __read_ulong_gc0_register($18, 2)
+#define read_gc0_watchlo3() __read_ulong_gc0_register($18, 3)
+#define read_gc0_watchlo4() __read_ulong_gc0_register($18, 4)
+#define read_gc0_watchlo5() __read_ulong_gc0_register($18, 5)
+#define read_gc0_watchlo6() __read_ulong_gc0_register($18, 6)
+#define read_gc0_watchlo7() __read_ulong_gc0_register($18, 7)
+#define write_gc0_watchlo0(val) __write_ulong_gc0_register($18, 0, val)
+#define write_gc0_watchlo1(val) __write_ulong_gc0_register($18, 1, val)
+#define write_gc0_watchlo2(val) __write_ulong_gc0_register($18, 2, val)
+#define write_gc0_watchlo3(val) __write_ulong_gc0_register($18, 3, val)
+#define write_gc0_watchlo4(val) __write_ulong_gc0_register($18, 4, val)
+#define write_gc0_watchlo5(val) __write_ulong_gc0_register($18, 5, val)
+#define write_gc0_watchlo6(val) __write_ulong_gc0_register($18, 6, val)
+#define write_gc0_watchlo7(val) __write_ulong_gc0_register($18, 7, val)
+
+#define read_gc0_watchhi0() __read_32bit_gc0_register($19, 0)
+#define read_gc0_watchhi1() __read_32bit_gc0_register($19, 1)
+#define read_gc0_watchhi2() __read_32bit_gc0_register($19, 2)
+#define read_gc0_watchhi3() __read_32bit_gc0_register($19, 3)
+#define read_gc0_watchhi4() __read_32bit_gc0_register($19, 4)
+#define read_gc0_watchhi5() __read_32bit_gc0_register($19, 5)
+#define read_gc0_watchhi6() __read_32bit_gc0_register($19, 6)
+#define read_gc0_watchhi7() __read_32bit_gc0_register($19, 7)
+#define write_gc0_watchhi0(val) __write_32bit_gc0_register($19, 0, val)
+#define write_gc0_watchhi1(val) __write_32bit_gc0_register($19, 1, val)
+#define write_gc0_watchhi2(val) __write_32bit_gc0_register($19, 2, val)
+#define write_gc0_watchhi3(val) __write_32bit_gc0_register($19, 3, val)
+#define write_gc0_watchhi4(val) __write_32bit_gc0_register($19, 4, val)
+#define write_gc0_watchhi5(val) __write_32bit_gc0_register($19, 5, val)
+#define write_gc0_watchhi6(val) __write_32bit_gc0_register($19, 6, val)
+#define write_gc0_watchhi7(val) __write_32bit_gc0_register($19, 7, val)
+
+#define read_gc0_xcontext() __read_ulong_gc0_register($20, 0)
+#define write_gc0_xcontext(val) __write_ulong_gc0_register($20, 0, val)
+
+#define read_gc0_perfctrl0() __read_32bit_gc0_register($25, 0)
+#define write_gc0_perfctrl0(val) __write_32bit_gc0_register($25, 0, val)
+#define read_gc0_perfcntr0() __read_32bit_gc0_register($25, 1)
+#define write_gc0_perfcntr0(val) __write_32bit_gc0_register($25, 1, val)
+#define read_gc0_perfcntr0_64() __read_64bit_gc0_register($25, 1)
+#define write_gc0_perfcntr0_64(val) __write_64bit_gc0_register($25, 1, val)
+#define read_gc0_perfctrl1() __read_32bit_gc0_register($25, 2)
+#define write_gc0_perfctrl1(val) __write_32bit_gc0_register($25, 2, val)
+#define read_gc0_perfcntr1() __read_32bit_gc0_register($25, 3)
+#define write_gc0_perfcntr1(val) __write_32bit_gc0_register($25, 3, val)
+#define read_gc0_perfcntr1_64() __read_64bit_gc0_register($25, 3)
+#define write_gc0_perfcntr1_64(val) __write_64bit_gc0_register($25, 3, val)
+#define read_gc0_perfctrl2() __read_32bit_gc0_register($25, 4)
+#define write_gc0_perfctrl2(val) __write_32bit_gc0_register($25, 4, val)
+#define read_gc0_perfcntr2() __read_32bit_gc0_register($25, 5)
+#define write_gc0_perfcntr2(val) __write_32bit_gc0_register($25, 5, val)
+#define read_gc0_perfcntr2_64() __read_64bit_gc0_register($25, 5)
+#define write_gc0_perfcntr2_64(val) __write_64bit_gc0_register($25, 5, val)
+#define read_gc0_perfctrl3() __read_32bit_gc0_register($25, 6)
+#define write_gc0_perfctrl3(val) __write_32bit_gc0_register($25, 6, val)
+#define read_gc0_perfcntr3() __read_32bit_gc0_register($25, 7)
+#define write_gc0_perfcntr3(val) __write_32bit_gc0_register($25, 7, val)
+#define read_gc0_perfcntr3_64() __read_64bit_gc0_register($25, 7)
+#define write_gc0_perfcntr3_64(val) __write_64bit_gc0_register($25, 7, val)
+
+#define read_gc0_errorepc() __read_ulong_gc0_register($30, 0)
+#define write_gc0_errorepc(val) __write_ulong_gc0_register($30, 0, val)
+
+#define read_gc0_kscratch1() __read_ulong_gc0_register($31, 2)
+#define read_gc0_kscratch2() __read_ulong_gc0_register($31, 3)
+#define read_gc0_kscratch3() __read_ulong_gc0_register($31, 4)
+#define read_gc0_kscratch4() __read_ulong_gc0_register($31, 5)
+#define read_gc0_kscratch5() __read_ulong_gc0_register($31, 6)
+#define read_gc0_kscratch6() __read_ulong_gc0_register($31, 7)
+#define write_gc0_kscratch1(val) __write_ulong_gc0_register($31, 2, val)
+#define write_gc0_kscratch2(val) __write_ulong_gc0_register($31, 3, val)
+#define write_gc0_kscratch3(val) __write_ulong_gc0_register($31, 4, val)
+#define write_gc0_kscratch4(val) __write_ulong_gc0_register($31, 5, val)
+#define write_gc0_kscratch5(val) __write_ulong_gc0_register($31, 6, val)
+#define write_gc0_kscratch6(val) __write_ulong_gc0_register($31, 7, val)
+
+/* Cavium OCTEON (cnMIPS) */
+#define read_gc0_cvmcount() __read_ulong_gc0_register($9, 6)
+#define write_gc0_cvmcount(val) __write_ulong_gc0_register($9, 6, val)
+
+#define read_gc0_cvmctl() __read_64bit_gc0_register($9, 7)
+#define write_gc0_cvmctl(val) __write_64bit_gc0_register($9, 7, val)
+
+#define read_gc0_cvmmemctl() __read_64bit_gc0_register($11, 7)
+#define write_gc0_cvmmemctl(val) __write_64bit_gc0_register($11, 7, val)
+
+#define read_gc0_cvmmemctl2() __read_64bit_gc0_register($16, 6)
+#define write_gc0_cvmmemctl2(val) __write_64bit_gc0_register($16, 6, val)
+
+/*
+ * Macros to access the floating point coprocessor control registers
+ */
+#define _read_32bit_cp1_register(source, gas_hardfloat) \
+({ \
+ unsigned int __res; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set reorder \n" \
+ " # gas fails to assemble cfc1 for some archs, \n" \
+ " # like Octeon. \n" \
+ " .set mips1 \n" \
+ " "STR(gas_hardfloat)" \n" \
+ " cfc1 %0,"STR(source)" \n" \
+ " .set pop \n" \
+ : "=r" (__res)); \
+ __res; \
+})
+
+#define _write_32bit_cp1_register(dest, val, gas_hardfloat) \
+do { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set reorder \n" \
+ " "STR(gas_hardfloat)" \n" \
+ " ctc1 %0,"STR(dest)" \n" \
+ " .set pop \n" \
+ : : "r" (val)); \
+} while (0)
+
+#ifdef GAS_HAS_SET_HARDFLOAT
+#define read_32bit_cp1_register(source) \
+ _read_32bit_cp1_register(source, .set hardfloat)
+#define write_32bit_cp1_register(dest, val) \
+ _write_32bit_cp1_register(dest, val, .set hardfloat)
+#else
+#define read_32bit_cp1_register(source) \
+ _read_32bit_cp1_register(source, )
+#define write_32bit_cp1_register(dest, val) \
+ _write_32bit_cp1_register(dest, val, )
+#endif
+
+#ifdef TOOLCHAIN_SUPPORTS_DSP
+#define rddsp(mask) \
+({ \
+ unsigned int __dspctl; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " rddsp %0, %x1 \n" \
+ " .set pop \n" \
+ : "=r" (__dspctl) \
+ : "i" (mask)); \
+ __dspctl; \
+})
+
+#define wrdsp(val, mask) \
+do { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " wrdsp %0, %x1 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (val), "i" (mask)); \
+} while (0)
+
+#define mflo0() \
+({ \
+ long mflo0; \
+ __asm__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " mflo %0, $ac0 \n" \
+ " .set pop \n" \
+ : "=r" (mflo0)); \
+ mflo0; \
+})
+
+#define mflo1() \
+({ \
+ long mflo1; \
+ __asm__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " mflo %0, $ac1 \n" \
+ " .set pop \n" \
+ : "=r" (mflo1)); \
+ mflo1; \
+})
+
+#define mflo2() \
+({ \
+ long mflo2; \
+ __asm__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " mflo %0, $ac2 \n" \
+ " .set pop \n" \
+ : "=r" (mflo2)); \
+ mflo2; \
+})
+
+#define mflo3() \
+({ \
+ long mflo3; \
+ __asm__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " mflo %0, $ac3 \n" \
+ " .set pop \n" \
+ : "=r" (mflo3)); \
+ mflo3; \
+})
+
+#define mfhi0() \
+({ \
+ long mfhi0; \
+ __asm__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " mfhi %0, $ac0 \n" \
+ " .set pop \n" \
+ : "=r" (mfhi0)); \
+ mfhi0; \
+})
+
+#define mfhi1() \
+({ \
+ long mfhi1; \
+ __asm__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " mfhi %0, $ac1 \n" \
+ " .set pop \n" \
+ : "=r" (mfhi1)); \
+ mfhi1; \
+})
+
+#define mfhi2() \
+({ \
+ long mfhi2; \
+ __asm__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " mfhi %0, $ac2 \n" \
+ " .set pop \n" \
+ : "=r" (mfhi2)); \
+ mfhi2; \
+})
+
+#define mfhi3() \
+({ \
+ long mfhi3; \
+ __asm__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " mfhi %0, $ac3 \n" \
+ " .set pop \n" \
+ : "=r" (mfhi3)); \
+ mfhi3; \
+})
+
+
+#define mtlo0(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " mtlo %0, $ac0 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mtlo1(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " mtlo %0, $ac1 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mtlo2(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " mtlo %0, $ac2 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mtlo3(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " mtlo %0, $ac3 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mthi0(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " mthi %0, $ac0 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mthi1(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " mthi %0, $ac1 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mthi2(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " mthi %0, $ac2 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mthi3(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " .set dsp \n" \
+ " mthi %0, $ac3 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#else
+
+#define rddsp(mask) \
+({ \
+ unsigned int __res; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " # rddsp $1, %x1 \n" \
+ _ASM_INSN_IF_MIPS(0x7c000cb8 | (%x1 << 16)) \
+ _ASM_INSN32_IF_MM(0x0020067c | (%x1 << 14)) \
+ " move %0, $1 \n" \
+ " .set pop \n" \
+ : "=r" (__res) \
+ : "i" (mask)); \
+ __res; \
+})
+
+#define wrdsp(val, mask) \
+do { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " move $1, %0 \n" \
+ " # wrdsp $1, %x1 \n" \
+ _ASM_INSN_IF_MIPS(0x7c2004f8 | (%x1 << 11)) \
+ _ASM_INSN32_IF_MM(0x0020167c | (%x1 << 14)) \
+ " .set pop \n" \
+ : \
+ : "r" (val), "i" (mask)); \
+} while (0)
+
+#define _dsp_mfxxx(ins) \
+({ \
+ unsigned long __treg; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ _ASM_INSN_IF_MIPS(0x00000810 | %X1) \
+ _ASM_INSN32_IF_MM(0x0001007c | %x1) \
+ " move %0, $1 \n" \
+ " .set pop \n" \
+ : "=r" (__treg) \
+ : "i" (ins)); \
+ __treg; \
+})
+
+#define _dsp_mtxxx(val, ins) \
+do { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " move $1, %0 \n" \
+ _ASM_INSN_IF_MIPS(0x00200011 | %X1) \
+ _ASM_INSN32_IF_MM(0x0001207c | %x1) \
+ " .set pop \n" \
+ : \
+ : "r" (val), "i" (ins)); \
+} while (0)
+
+#ifdef CONFIG_CPU_MICROMIPS
+
+#define _dsp_mflo(reg) _dsp_mfxxx((reg << 14) | 0x1000)
+#define _dsp_mfhi(reg) _dsp_mfxxx((reg << 14) | 0x0000)
+
+#define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 14) | 0x1000))
+#define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 14) | 0x0000))
+
+#else /* !CONFIG_CPU_MICROMIPS */
+
+#define _dsp_mflo(reg) _dsp_mfxxx((reg << 21) | 0x0002)
+#define _dsp_mfhi(reg) _dsp_mfxxx((reg << 21) | 0x0000)
+
+#define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0002))
+#define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0000))
+
+#endif /* CONFIG_CPU_MICROMIPS */
+
+#define mflo0() _dsp_mflo(0)
+#define mflo1() _dsp_mflo(1)
+#define mflo2() _dsp_mflo(2)
+#define mflo3() _dsp_mflo(3)
+
+#define mfhi0() _dsp_mfhi(0)
+#define mfhi1() _dsp_mfhi(1)
+#define mfhi2() _dsp_mfhi(2)
+#define mfhi3() _dsp_mfhi(3)
+
+#define mtlo0(x) _dsp_mtlo(x, 0)
+#define mtlo1(x) _dsp_mtlo(x, 1)
+#define mtlo2(x) _dsp_mtlo(x, 2)
+#define mtlo3(x) _dsp_mtlo(x, 3)
+
+#define mthi0(x) _dsp_mthi(x, 0)
+#define mthi1(x) _dsp_mthi(x, 1)
+#define mthi2(x) _dsp_mthi(x, 2)
+#define mthi3(x) _dsp_mthi(x, 3)
+
+#endif
+
+/*
+ * TLB operations.
+ *
+ * It is responsibility of the caller to take care of any TLB hazards.
+ */
+static inline void tlb_probe(void)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ "tlbp\n\t"
+ ".set reorder");
+}
+
+static inline void tlb_read(void)
+{
+#ifdef CONFIG_WAR_MIPS34K_MISSED_ITLB
+ int res = 0;
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder \n"
+ " .set noat \n"
+ " .set mips32r2 \n"
+ " .word 0x41610001 # dvpe $1 \n"
+ " move %0, $1 \n"
+ " ehb \n"
+ " .set pop \n"
+ : "=r" (res));
+
+ instruction_hazard();
+#endif
+
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ "tlbr\n\t"
+ ".set reorder");
+
+#ifdef CONFIG_WAR_MIPS34K_MISSED_ITLB
+ if ((res & _ULCAST_(1)))
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder \n"
+ " .set noat \n"
+ " .set mips32r2 \n"
+ " .word 0x41600021 # evpe \n"
+ " ehb \n"
+ " .set pop \n");
+#endif
+}
+
+static inline void tlb_write_indexed(void)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ "tlbwi\n\t"
+ ".set reorder");
+}
+
+static inline void tlb_write_random(void)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ "tlbwr\n\t"
+ ".set reorder");
+}
+
+/*
+ * Guest TLB operations.
+ *
+ * It is responsibility of the caller to take care of any TLB hazards.
+ */
+static inline void guest_tlb_probe(void)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ _ASM_SET_VIRT
+ "tlbgp\n\t"
+ ".set pop");
+}
+
+static inline void guest_tlb_read(void)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ _ASM_SET_VIRT
+ "tlbgr\n\t"
+ ".set pop");
+}
+
+static inline void guest_tlb_write_indexed(void)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ _ASM_SET_VIRT
+ "tlbgwi\n\t"
+ ".set pop");
+}
+
+static inline void guest_tlb_write_random(void)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ _ASM_SET_VIRT
+ "tlbgwr\n\t"
+ ".set pop");
+}
+
+/*
+ * Guest TLB Invalidate Flush
+ */
+static inline void guest_tlbinvf(void)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ _ASM_SET_VIRT
+ "tlbginvf\n\t"
+ ".set pop");
+}
+
+/*
+ * Manipulate bits in a register.
+ */
+#define __BUILD_SET_COMMON(name) \
+static inline unsigned int \
+set_##name(unsigned int set) \
+{ \
+ unsigned int res, new; \
+ \
+ res = read_##name(); \
+ new = res | set; \
+ write_##name(new); \
+ \
+ return res; \
+} \
+ \
+static inline unsigned int \
+clear_##name(unsigned int clear) \
+{ \
+ unsigned int res, new; \
+ \
+ res = read_##name(); \
+ new = res & ~clear; \
+ write_##name(new); \
+ \
+ return res; \
+} \
+ \
+static inline unsigned int \
+change_##name(unsigned int change, unsigned int val) \
+{ \
+ unsigned int res, new; \
+ \
+ res = read_##name(); \
+ new = res & ~change; \
+ new |= (val & change); \
+ write_##name(new); \
+ \
+ return res; \
+}
+
+/*
+ * Manipulate bits in a c0 register.
+ */
+#define __BUILD_SET_C0(name) __BUILD_SET_COMMON(c0_##name)
+
+__BUILD_SET_C0(status)
+__BUILD_SET_C0(cause)
+__BUILD_SET_C0(config)
+__BUILD_SET_C0(config5)
+__BUILD_SET_C0(config6)
+__BUILD_SET_C0(config7)
+__BUILD_SET_C0(diag)
+__BUILD_SET_C0(intcontrol)
+__BUILD_SET_C0(intctl)
+__BUILD_SET_C0(srsmap)
+__BUILD_SET_C0(pagegrain)
+__BUILD_SET_C0(guestctl0)
+__BUILD_SET_C0(guestctl0ext)
+__BUILD_SET_C0(guestctl1)
+__BUILD_SET_C0(guestctl2)
+__BUILD_SET_C0(guestctl3)
+__BUILD_SET_C0(brcm_config_0)
+__BUILD_SET_C0(brcm_bus_pll)
+__BUILD_SET_C0(brcm_reset)
+__BUILD_SET_C0(brcm_cmt_intr)
+__BUILD_SET_C0(brcm_cmt_ctrl)
+__BUILD_SET_C0(brcm_config)
+__BUILD_SET_C0(brcm_mode)
+
+/*
+ * Manipulate bits in a guest c0 register.
+ */
+#define __BUILD_SET_GC0(name) __BUILD_SET_COMMON(gc0_##name)
+
+__BUILD_SET_GC0(wired)
+__BUILD_SET_GC0(status)
+__BUILD_SET_GC0(cause)
+__BUILD_SET_GC0(ebase)
+__BUILD_SET_GC0(config1)
+
+/*
+ * Return low 10 bits of ebase.
+ * Note that under KVM (MIPSVZ) this returns vcpu id.
+ */
+static inline unsigned int get_ebase_cpunum(void)
+{
+ return read_c0_ebase() & MIPS_EBASE_CPUNUM;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_MIPSREGS_H */
diff --git a/arch/mips/include/asm/mmiowb.h b/arch/mips/include/asm/mmiowb.h
new file mode 100644
index 000000000..a40824e3e
--- /dev/null
+++ b/arch/mips/include/asm/mmiowb.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_MMIOWB_H
+#define _ASM_MMIOWB_H
+
+#include <asm/io.h>
+
+#define mmiowb() iobarrier_w()
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* _ASM_MMIOWB_H */
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
new file mode 100644
index 000000000..5df0238f6
--- /dev/null
+++ b/arch/mips/include/asm/mmu.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MMU_H
+#define __ASM_MMU_H
+
+#include <linux/atomic.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+typedef struct {
+ union {
+ u64 asid[NR_CPUS];
+ atomic64_t mmid;
+ };
+
+ void *vdso;
+
+ /* lock to be held whilst modifying fp_bd_emupage_allocmap */
+ spinlock_t bd_emupage_lock;
+ /* bitmap tracking allocation of fp_bd_emupage */
+ unsigned long *bd_emupage_allocmap;
+ /* wait queue for threads requiring an emuframe */
+ wait_queue_head_t bd_emupage_queue;
+} mm_context_t;
+
+#endif /* __ASM_MMU_H */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
new file mode 100644
index 000000000..cddead91a
--- /dev/null
+++ b/arch/mips/include/asm/mmu_context.h
@@ -0,0 +1,240 @@
+/*
+ * Switch a MMU context.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_MMU_CONTEXT_H
+#define _ASM_MMU_CONTEXT_H
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm_types.h>
+#include <linux/smp.h>
+#include <linux/slab.h>
+
+#include <asm/barrier.h>
+#include <asm/cacheflush.h>
+#include <asm/dsemul.h>
+#include <asm/ginvt.h>
+#include <asm/hazards.h>
+#include <asm/tlbflush.h>
+#include <asm-generic/mm_hooks.h>
+
+#define htw_set_pwbase(pgd) \
+do { \
+ if (cpu_has_htw) { \
+ write_c0_pwbase(pgd); \
+ back_to_back_c0_hazard(); \
+ } \
+} while (0)
+
+extern void tlbmiss_handler_setup_pgd(unsigned long);
+extern char tlbmiss_handler_setup_pgd_end[];
+
+/* Note: This is also implemented with uasm in arch/mips/kvm/entry.c */
+#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
+do { \
+ tlbmiss_handler_setup_pgd((unsigned long)(pgd)); \
+ htw_set_pwbase((unsigned long)pgd); \
+} while (0)
+
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
+
+#define TLBMISS_HANDLER_RESTORE() \
+ write_c0_xcontext((unsigned long) smp_processor_id() << \
+ SMP_CPUID_REGSHIFT)
+
+#define TLBMISS_HANDLER_SETUP() \
+ do { \
+ TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \
+ TLBMISS_HANDLER_RESTORE(); \
+ } while (0)
+
+#else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/
+
+/*
+ * For the fast tlb miss handlers, we keep a per cpu array of pointers
+ * to the current pgd for each processor. Also, the proc. id is stuffed
+ * into the context register.
+ */
+extern unsigned long pgd_current[];
+
+#define TLBMISS_HANDLER_RESTORE() \
+ write_c0_context((unsigned long) smp_processor_id() << \
+ SMP_CPUID_REGSHIFT)
+
+#define TLBMISS_HANDLER_SETUP() \
+ TLBMISS_HANDLER_RESTORE(); \
+ back_to_back_c0_hazard(); \
+ TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
+#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
+
+/*
+ * The ginvt instruction will invalidate wired entries when its type field
+ * targets anything other than the entire TLB. That means that if we were to
+ * allow the kernel to create wired entries with the MMID of current->active_mm
+ * then those wired entries could be invalidated when we later use ginvt to
+ * invalidate TLB entries with that MMID.
+ *
+ * In order to prevent ginvt from trashing wired entries, we reserve one MMID
+ * for use by the kernel when creating wired entries. This MMID will never be
+ * assigned to a struct mm, and we'll never target it with a ginvt instruction.
+ */
+#define MMID_KERNEL_WIRED 0
+
+/*
+ * All unused by hardware upper bits will be considered
+ * as a software asid extension.
+ */
+static inline u64 asid_version_mask(unsigned int cpu)
+{
+ unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
+
+ return ~(u64)(asid_mask | (asid_mask - 1));
+}
+
+static inline u64 asid_first_version(unsigned int cpu)
+{
+ return ~asid_version_mask(cpu) + 1;
+}
+
+static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm)
+{
+ if (cpu_has_mmid)
+ return atomic64_read(&mm->context.mmid);
+
+ return mm->context.asid[cpu];
+}
+
+static inline void set_cpu_context(unsigned int cpu,
+ struct mm_struct *mm, u64 ctx)
+{
+ if (cpu_has_mmid)
+ atomic64_set(&mm->context.mmid, ctx);
+ else
+ mm->context.asid[cpu] = ctx;
+}
+
+#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
+#define cpu_asid(cpu, mm) \
+ (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+extern void get_new_mmu_context(struct mm_struct *mm);
+extern void check_mmu_context(struct mm_struct *mm);
+extern void check_switch_mmu_context(struct mm_struct *mm);
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ int i;
+
+ if (cpu_has_mmid) {
+ set_cpu_context(0, mm, 0);
+ } else {
+ for_each_possible_cpu(i)
+ set_cpu_context(i, mm, 0);
+ }
+
+ mm->context.bd_emupage_allocmap = NULL;
+ spin_lock_init(&mm->context.bd_emupage_lock);
+ init_waitqueue_head(&mm->context.bd_emupage_queue);
+
+ return 0;
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long flags;
+ local_irq_save(flags);
+
+ htw_stop();
+ check_switch_mmu_context(next);
+
+ /*
+ * Mark current->active_mm as not "active" anymore.
+ * We don't want to mislead possible IPI tlb flush routines.
+ */
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+ htw_start();
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Destroy context related info for an mm_struct that is about
+ * to be put to rest.
+ */
+static inline void destroy_context(struct mm_struct *mm)
+{
+ dsemul_mm_cleanup(mm);
+}
+
+#define activate_mm(prev, next) switch_mm(prev, next, current)
+#define deactivate_mm(tsk, mm) do { } while (0)
+
+static inline void
+drop_mmu_context(struct mm_struct *mm)
+{
+ unsigned long flags;
+ unsigned int cpu;
+ u32 old_mmid;
+ u64 ctx;
+
+ local_irq_save(flags);
+
+ cpu = smp_processor_id();
+ ctx = cpu_context(cpu, mm);
+
+ if (!ctx) {
+ /* no-op */
+ } else if (cpu_has_mmid) {
+ /*
+ * Globally invalidating TLB entries associated with the MMID
+ * is pretty cheap using the GINVT instruction, so we'll do
+ * that rather than incur the overhead of allocating a new
+ * MMID. The latter would be especially difficult since MMIDs
+ * are global & other CPUs may be actively using ctx.
+ */
+ htw_stop();
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(ctx & cpu_asid_mask(&cpu_data[cpu]));
+ mtc0_tlbw_hazard();
+ ginvt_mmid();
+ sync_ginv();
+ write_c0_memorymapid(old_mmid);
+ instruction_hazard();
+ htw_start();
+ } else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
+ /*
+ * mm is currently active, so we can't really drop it.
+ * Instead we bump the ASID.
+ */
+ htw_stop();
+ get_new_mmu_context(mm);
+ write_c0_entryhi(cpu_asid(cpu, mm));
+ htw_start();
+ } else {
+ /* will get a new context next time */
+ set_cpu_context(cpu, mm, 0);
+ }
+
+ local_irq_restore(flags);
+}
+
+#endif /* _ASM_MMU_CONTEXT_H */
diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h
new file mode 100644
index 000000000..b826b8473
--- /dev/null
+++ b/arch/mips/include/asm/mmzone.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
+ * Rewritten for Linux 2.6 by Christoph Hellwig (hch@lst.de) Jan 2004
+ */
+#ifndef _ASM_MMZONE_H_
+#define _ASM_MMZONE_H_
+
+#include <asm/page.h>
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+# include <mmzone.h>
+#endif
+
+#ifndef pa_to_nid
+#define pa_to_nid(addr) 0
+#endif
+
+#ifndef nid_to_addrbase
+#define nid_to_addrbase(nid) 0
+#endif
+
+#ifdef CONFIG_DISCONTIGMEM
+
+#define pfn_to_nid(pfn) pa_to_nid((pfn) << PAGE_SHIFT)
+
+#endif /* CONFIG_DISCONTIGMEM */
+
+#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
new file mode 100644
index 000000000..724a08825
--- /dev/null
+++ b/arch/mips/include/asm/module.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_MODULE_H
+#define _ASM_MODULE_H
+
+#include <linux/list.h>
+#include <linux/elf.h>
+#include <asm/extable.h>
+
+struct mod_arch_specific {
+ /* Data Bus Error exception tables */
+ struct list_head dbe_list;
+ const struct exception_table_entry *dbe_start;
+ const struct exception_table_entry *dbe_end;
+ struct mips_hi16 *r_mips_hi16_list;
+};
+
+typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
+
+typedef struct {
+ Elf64_Addr r_offset; /* Address of relocation. */
+ Elf64_Word r_sym; /* Symbol index. */
+ Elf64_Byte r_ssym; /* Special symbol. */
+ Elf64_Byte r_type3; /* Third relocation. */
+ Elf64_Byte r_type2; /* Second relocation. */
+ Elf64_Byte r_type; /* First relocation. */
+} Elf64_Mips_Rel;
+
+typedef struct {
+ Elf64_Addr r_offset; /* Address of relocation. */
+ Elf64_Word r_sym; /* Symbol index. */
+ Elf64_Byte r_ssym; /* Special symbol. */
+ Elf64_Byte r_type3; /* Third relocation. */
+ Elf64_Byte r_type2; /* Second relocation. */
+ Elf64_Byte r_type; /* First relocation. */
+ Elf64_Sxword r_addend; /* Addend. */
+} Elf64_Mips_Rela;
+
+#ifdef CONFIG_32BIT
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Sym Elf32_Sym
+#define Elf_Ehdr Elf32_Ehdr
+#define Elf_Addr Elf32_Addr
+#define Elf_Rel Elf32_Rel
+#define Elf_Rela Elf32_Rela
+#define ELF_R_TYPE(X) ELF32_R_TYPE(X)
+#define ELF_R_SYM(X) ELF32_R_SYM(X)
+
+#define Elf_Mips_Rel Elf32_Rel
+#define Elf_Mips_Rela Elf32_Rela
+
+#define ELF_MIPS_R_SYM(rel) ELF32_R_SYM((rel).r_info)
+#define ELF_MIPS_R_TYPE(rel) ELF32_R_TYPE((rel).r_info)
+
+#endif
+
+#ifdef CONFIG_64BIT
+#define Elf_Shdr Elf64_Shdr
+#define Elf_Sym Elf64_Sym
+#define Elf_Ehdr Elf64_Ehdr
+#define Elf_Addr Elf64_Addr
+#define Elf_Rel Elf64_Rel
+#define Elf_Rela Elf64_Rela
+#define ELF_R_TYPE(X) ELF64_R_TYPE(X)
+#define ELF_R_SYM(X) ELF64_R_SYM(X)
+
+#define Elf_Mips_Rel Elf64_Mips_Rel
+#define Elf_Mips_Rela Elf64_Mips_Rela
+
+#define ELF_MIPS_R_SYM(rel) ((rel).r_sym)
+#define ELF_MIPS_R_TYPE(rel) ((rel).r_type)
+
+#endif
+
+#ifdef CONFIG_MODULES
+/* Given an address, look for it in the exception tables. */
+const struct exception_table_entry*search_module_dbetables(unsigned long addr);
+#else
+/* Given an address, look for it in the exception tables. */
+static inline const struct exception_table_entry *
+search_module_dbetables(unsigned long addr)
+{
+ return NULL;
+}
+#endif
+
+#endif /* _ASM_MODULE_H */
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
new file mode 100644
index 000000000..e0a3dd523
--- /dev/null
+++ b/arch/mips/include/asm/msa.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+#ifndef _ASM_MSA_H
+#define _ASM_MSA_H
+
+#include <asm/mipsregs.h>
+
+#ifndef __ASSEMBLY__
+
+#include <asm/inst.h>
+
+extern void _save_msa(struct task_struct *);
+extern void _restore_msa(struct task_struct *);
+extern void _init_msa_upper(void);
+
+extern void read_msa_wr_b(unsigned idx, union fpureg *to);
+extern void read_msa_wr_h(unsigned idx, union fpureg *to);
+extern void read_msa_wr_w(unsigned idx, union fpureg *to);
+extern void read_msa_wr_d(unsigned idx, union fpureg *to);
+
+/**
+ * read_msa_wr() - Read a single MSA vector register
+ * @idx: The index of the vector register to read
+ * @to: The FPU register union to store the registers value in
+ * @fmt: The format of the data in the vector register
+ *
+ * Read the value of MSA vector register idx into the FPU register
+ * union to, using the format fmt.
+ */
+static inline void read_msa_wr(unsigned idx, union fpureg *to,
+ enum msa_2b_fmt fmt)
+{
+ switch (fmt) {
+ case msa_fmt_b:
+ read_msa_wr_b(idx, to);
+ break;
+
+ case msa_fmt_h:
+ read_msa_wr_h(idx, to);
+ break;
+
+ case msa_fmt_w:
+ read_msa_wr_w(idx, to);
+ break;
+
+ case msa_fmt_d:
+ read_msa_wr_d(idx, to);
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+extern void write_msa_wr_b(unsigned idx, union fpureg *from);
+extern void write_msa_wr_h(unsigned idx, union fpureg *from);
+extern void write_msa_wr_w(unsigned idx, union fpureg *from);
+extern void write_msa_wr_d(unsigned idx, union fpureg *from);
+
+/**
+ * write_msa_wr() - Write a single MSA vector register
+ * @idx: The index of the vector register to write
+ * @from: The FPU register union to take the registers value from
+ * @fmt: The format of the data in the vector register
+ *
+ * Write the value from the FPU register union from into MSA vector
+ * register idx, using the format fmt.
+ */
+static inline void write_msa_wr(unsigned idx, union fpureg *from,
+ enum msa_2b_fmt fmt)
+{
+ switch (fmt) {
+ case msa_fmt_b:
+ write_msa_wr_b(idx, from);
+ break;
+
+ case msa_fmt_h:
+ write_msa_wr_h(idx, from);
+ break;
+
+ case msa_fmt_w:
+ write_msa_wr_w(idx, from);
+ break;
+
+ case msa_fmt_d:
+ write_msa_wr_d(idx, from);
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+static inline void enable_msa(void)
+{
+ if (cpu_has_msa) {
+ set_c0_config5(MIPS_CONF5_MSAEN);
+ enable_fpu_hazard();
+ }
+}
+
+static inline void disable_msa(void)
+{
+ if (cpu_has_msa) {
+ clear_c0_config5(MIPS_CONF5_MSAEN);
+ disable_fpu_hazard();
+ }
+}
+
+static inline int is_msa_enabled(void)
+{
+ if (!cpu_has_msa)
+ return 0;
+
+ return read_c0_config5() & MIPS_CONF5_MSAEN;
+}
+
+static inline int thread_msa_context_live(void)
+{
+ /*
+ * Check cpu_has_msa only if it's a constant. This will allow the
+ * compiler to optimise out code for CPUs without MSA without adding
+ * an extra redundant check for CPUs with MSA.
+ */
+ if (__builtin_constant_p(cpu_has_msa) && !cpu_has_msa)
+ return 0;
+
+ return test_thread_flag(TIF_MSA_CTX_LIVE);
+}
+
+static inline void save_msa(struct task_struct *t)
+{
+ if (cpu_has_msa)
+ _save_msa(t);
+}
+
+static inline void restore_msa(struct task_struct *t)
+{
+ if (cpu_has_msa)
+ _restore_msa(t);
+}
+
+static inline void init_msa_upper(void)
+{
+ /*
+ * Check cpu_has_msa only if it's a constant. This will allow the
+ * compiler to optimise out code for CPUs without MSA without adding
+ * an extra redundant check for CPUs with MSA.
+ */
+ if (__builtin_constant_p(cpu_has_msa) && !cpu_has_msa)
+ return;
+
+ _init_msa_upper();
+}
+
+#ifndef TOOLCHAIN_SUPPORTS_MSA
+/*
+ * Define assembler macros using .word for the c[ft]cmsa instructions in order
+ * to allow compilation with toolchains that do not support MSA. Once all
+ * toolchains in use support MSA these can be removed.
+ */
+_ASM_MACRO_2R(cfcmsa, rd, cs,
+ _ASM_INSN_IF_MIPS(0x787e0019 | __cs << 11 | __rd << 6)
+ _ASM_INSN32_IF_MM(0x587e0016 | __cs << 11 | __rd << 6));
+_ASM_MACRO_2R(ctcmsa, cd, rs,
+ _ASM_INSN_IF_MIPS(0x783e0019 | __rs << 11 | __cd << 6)
+ _ASM_INSN32_IF_MM(0x583e0016 | __rs << 11 | __cd << 6));
+#define _ASM_SET_MSA ""
+#else /* TOOLCHAIN_SUPPORTS_MSA */
+#define _ASM_SET_MSA ".set\tfp=64\n\t" \
+ ".set\tmsa\n\t"
+#endif
+
+#define __BUILD_MSA_CTL_REG(name, cs) \
+static inline unsigned int read_msa_##name(void) \
+{ \
+ unsigned int reg; \
+ __asm__ __volatile__( \
+ " .set push\n" \
+ _ASM_SET_MSA \
+ " cfcmsa %0, $" #cs "\n" \
+ " .set pop\n" \
+ : "=r"(reg)); \
+ return reg; \
+} \
+ \
+static inline void write_msa_##name(unsigned int val) \
+{ \
+ __asm__ __volatile__( \
+ " .set push\n" \
+ _ASM_SET_MSA \
+ " ctcmsa $" #cs ", %0\n" \
+ " .set pop\n" \
+ : : "r"(val)); \
+}
+
+__BUILD_MSA_CTL_REG(ir, 0)
+__BUILD_MSA_CTL_REG(csr, 1)
+__BUILD_MSA_CTL_REG(access, 2)
+__BUILD_MSA_CTL_REG(save, 3)
+__BUILD_MSA_CTL_REG(modify, 4)
+__BUILD_MSA_CTL_REG(request, 5)
+__BUILD_MSA_CTL_REG(map, 6)
+__BUILD_MSA_CTL_REG(unmap, 7)
+
+#endif /* !__ASSEMBLY__ */
+
+#define MSA_IR 0
+#define MSA_CSR 1
+#define MSA_ACCESS 2
+#define MSA_SAVE 3
+#define MSA_MODIFY 4
+#define MSA_REQUEST 5
+#define MSA_MAP 6
+#define MSA_UNMAP 7
+
+/* MSA Implementation Register (MSAIR) */
+#define MSA_IR_REVB 0
+#define MSA_IR_REVF (_ULCAST_(0xff) << MSA_IR_REVB)
+#define MSA_IR_PROCB 8
+#define MSA_IR_PROCF (_ULCAST_(0xff) << MSA_IR_PROCB)
+#define MSA_IR_WRPB 16
+#define MSA_IR_WRPF (_ULCAST_(0x1) << MSA_IR_WRPB)
+
+/* MSA Control & Status Register (MSACSR) */
+#define MSA_CSR_RMB 0
+#define MSA_CSR_RMF (_ULCAST_(0x3) << MSA_CSR_RMB)
+#define MSA_CSR_RM_NEAREST 0
+#define MSA_CSR_RM_TO_ZERO 1
+#define MSA_CSR_RM_TO_POS 2
+#define MSA_CSR_RM_TO_NEG 3
+#define MSA_CSR_FLAGSB 2
+#define MSA_CSR_FLAGSF (_ULCAST_(0x1f) << MSA_CSR_FLAGSB)
+#define MSA_CSR_FLAGS_IB 2
+#define MSA_CSR_FLAGS_IF (_ULCAST_(0x1) << MSA_CSR_FLAGS_IB)
+#define MSA_CSR_FLAGS_UB 3
+#define MSA_CSR_FLAGS_UF (_ULCAST_(0x1) << MSA_CSR_FLAGS_UB)
+#define MSA_CSR_FLAGS_OB 4
+#define MSA_CSR_FLAGS_OF (_ULCAST_(0x1) << MSA_CSR_FLAGS_OB)
+#define MSA_CSR_FLAGS_ZB 5
+#define MSA_CSR_FLAGS_ZF (_ULCAST_(0x1) << MSA_CSR_FLAGS_ZB)
+#define MSA_CSR_FLAGS_VB 6
+#define MSA_CSR_FLAGS_VF (_ULCAST_(0x1) << MSA_CSR_FLAGS_VB)
+#define MSA_CSR_ENABLESB 7
+#define MSA_CSR_ENABLESF (_ULCAST_(0x1f) << MSA_CSR_ENABLESB)
+#define MSA_CSR_ENABLES_IB 7
+#define MSA_CSR_ENABLES_IF (_ULCAST_(0x1) << MSA_CSR_ENABLES_IB)
+#define MSA_CSR_ENABLES_UB 8
+#define MSA_CSR_ENABLES_UF (_ULCAST_(0x1) << MSA_CSR_ENABLES_UB)
+#define MSA_CSR_ENABLES_OB 9
+#define MSA_CSR_ENABLES_OF (_ULCAST_(0x1) << MSA_CSR_ENABLES_OB)
+#define MSA_CSR_ENABLES_ZB 10
+#define MSA_CSR_ENABLES_ZF (_ULCAST_(0x1) << MSA_CSR_ENABLES_ZB)
+#define MSA_CSR_ENABLES_VB 11
+#define MSA_CSR_ENABLES_VF (_ULCAST_(0x1) << MSA_CSR_ENABLES_VB)
+#define MSA_CSR_CAUSEB 12
+#define MSA_CSR_CAUSEF (_ULCAST_(0x3f) << MSA_CSR_CAUSEB)
+#define MSA_CSR_CAUSE_IB 12
+#define MSA_CSR_CAUSE_IF (_ULCAST_(0x1) << MSA_CSR_CAUSE_IB)
+#define MSA_CSR_CAUSE_UB 13
+#define MSA_CSR_CAUSE_UF (_ULCAST_(0x1) << MSA_CSR_CAUSE_UB)
+#define MSA_CSR_CAUSE_OB 14
+#define MSA_CSR_CAUSE_OF (_ULCAST_(0x1) << MSA_CSR_CAUSE_OB)
+#define MSA_CSR_CAUSE_ZB 15
+#define MSA_CSR_CAUSE_ZF (_ULCAST_(0x1) << MSA_CSR_CAUSE_ZB)
+#define MSA_CSR_CAUSE_VB 16
+#define MSA_CSR_CAUSE_VF (_ULCAST_(0x1) << MSA_CSR_CAUSE_VB)
+#define MSA_CSR_CAUSE_EB 17
+#define MSA_CSR_CAUSE_EF (_ULCAST_(0x1) << MSA_CSR_CAUSE_EB)
+#define MSA_CSR_NXB 18
+#define MSA_CSR_NXF (_ULCAST_(0x1) << MSA_CSR_NXB)
+#define MSA_CSR_FSB 24
+#define MSA_CSR_FSF (_ULCAST_(0x1) << MSA_CSR_FSB)
+
+#endif /* _ASM_MSA_H */
diff --git a/arch/mips/include/asm/msc01_ic.h b/arch/mips/include/asm/msc01_ic.h
new file mode 100644
index 000000000..ff7f074d0
--- /dev/null
+++ b/arch/mips/include/asm/msc01_ic.h
@@ -0,0 +1,147 @@
+/*
+ * PCI Register definitions for the MIPS System Controller.
+ *
+ * Copyright (C) 2004 MIPS Technologies, Inc. All rights reserved.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef __ASM_MIPS_BOARDS_MSC01_IC_H
+#define __ASM_MIPS_BOARDS_MSC01_IC_H
+
+/*****************************************************************************
+ * Register offset addresses
+ *****************************************************************************/
+
+#define MSC01_IC_RST_OFS 0x00008 /* Software reset */
+#define MSC01_IC_ENAL_OFS 0x00100 /* Int_in enable mask 31:0 */
+#define MSC01_IC_ENAH_OFS 0x00108 /* Int_in enable mask 63:32 */
+#define MSC01_IC_DISL_OFS 0x00120 /* Int_in disable mask 31:0 */
+#define MSC01_IC_DISH_OFS 0x00128 /* Int_in disable mask 63:32 */
+#define MSC01_IC_ISBL_OFS 0x00140 /* Raw int_in 31:0 */
+#define MSC01_IC_ISBH_OFS 0x00148 /* Raw int_in 63:32 */
+#define MSC01_IC_ISAL_OFS 0x00160 /* Masked int_in 31:0 */
+#define MSC01_IC_ISAH_OFS 0x00168 /* Masked int_in 63:32 */
+#define MSC01_IC_LVL_OFS 0x00180 /* Disable priority int_out */
+#define MSC01_IC_RAMW_OFS 0x00180 /* Shadow set RAM (EI) */
+#define MSC01_IC_OSB_OFS 0x00188 /* Raw int_out */
+#define MSC01_IC_OSA_OFS 0x00190 /* Masked int_out */
+#define MSC01_IC_GENA_OFS 0x00198 /* Global HW int enable */
+#define MSC01_IC_BASE_OFS 0x001a0 /* Base address of IC_VEC */
+#define MSC01_IC_VEC_OFS 0x001b0 /* Active int's vector address */
+#define MSC01_IC_EOI_OFS 0x001c0 /* Enable lower level ints */
+#define MSC01_IC_CFG_OFS 0x001c8 /* Configuration register */
+#define MSC01_IC_TRLD_OFS 0x001d0 /* Interval timer reload val */
+#define MSC01_IC_TVAL_OFS 0x001e0 /* Interval timer current val */
+#define MSC01_IC_TCFG_OFS 0x001f0 /* Interval timer config */
+#define MSC01_IC_SUP_OFS 0x00200 /* Set up int_in line 0 */
+#define MSC01_IC_ENA_OFS 0x00800 /* Int_in enable mask 63:0 */
+#define MSC01_IC_DIS_OFS 0x00820 /* Int_in disable mask 63:0 */
+#define MSC01_IC_ISB_OFS 0x00840 /* Raw int_in 63:0 */
+#define MSC01_IC_ISA_OFS 0x00860 /* Masked int_in 63:0 */
+
+/*****************************************************************************
+ * Register field encodings
+ *****************************************************************************/
+
+#define MSC01_IC_RST_RST_SHF 0
+#define MSC01_IC_RST_RST_MSK 0x00000001
+#define MSC01_IC_RST_RST_BIT MSC01_IC_RST_RST_MSK
+#define MSC01_IC_LVL_LVL_SHF 0
+#define MSC01_IC_LVL_LVL_MSK 0x000000ff
+#define MSC01_IC_LVL_SPUR_SHF 16
+#define MSC01_IC_LVL_SPUR_MSK 0x00010000
+#define MSC01_IC_LVL_SPUR_BIT MSC01_IC_LVL_SPUR_MSK
+#define MSC01_IC_RAMW_RIPL_SHF 0
+#define MSC01_IC_RAMW_RIPL_MSK 0x0000003f
+#define MSC01_IC_RAMW_DATA_SHF 6
+#define MSC01_IC_RAMW_DATA_MSK 0x00000fc0
+#define MSC01_IC_RAMW_ADDR_SHF 25
+#define MSC01_IC_RAMW_ADDR_MSK 0x7e000000
+#define MSC01_IC_RAMW_READ_SHF 31
+#define MSC01_IC_RAMW_READ_MSK 0x80000000
+#define MSC01_IC_RAMW_READ_BIT MSC01_IC_RAMW_READ_MSK
+#define MSC01_IC_OSB_OSB_SHF 0
+#define MSC01_IC_OSB_OSB_MSK 0x000000ff
+#define MSC01_IC_OSA_OSA_SHF 0
+#define MSC01_IC_OSA_OSA_MSK 0x000000ff
+#define MSC01_IC_GENA_GENA_SHF 0
+#define MSC01_IC_GENA_GENA_MSK 0x00000001
+#define MSC01_IC_GENA_GENA_BIT MSC01_IC_GENA_GENA_MSK
+#define MSC01_IC_CFG_DIS_SHF 0
+#define MSC01_IC_CFG_DIS_MSK 0x00000001
+#define MSC01_IC_CFG_DIS_BIT MSC01_IC_CFG_DIS_MSK
+#define MSC01_IC_CFG_SHFT_SHF 8
+#define MSC01_IC_CFG_SHFT_MSK 0x00000f00
+#define MSC01_IC_TCFG_ENA_SHF 0
+#define MSC01_IC_TCFG_ENA_MSK 0x00000001
+#define MSC01_IC_TCFG_ENA_BIT MSC01_IC_TCFG_ENA_MSK
+#define MSC01_IC_TCFG_INT_SHF 8
+#define MSC01_IC_TCFG_INT_MSK 0x00000100
+#define MSC01_IC_TCFG_INT_BIT MSC01_IC_TCFG_INT_MSK
+#define MSC01_IC_TCFG_EDGE_SHF 16
+#define MSC01_IC_TCFG_EDGE_MSK 0x00010000
+#define MSC01_IC_TCFG_EDGE_BIT MSC01_IC_TCFG_EDGE_MSK
+#define MSC01_IC_SUP_PRI_SHF 0
+#define MSC01_IC_SUP_PRI_MSK 0x00000007
+#define MSC01_IC_SUP_EDGE_SHF 8
+#define MSC01_IC_SUP_EDGE_MSK 0x00000100
+#define MSC01_IC_SUP_EDGE_BIT MSC01_IC_SUP_EDGE_MSK
+#define MSC01_IC_SUP_STEP 8
+
+/*
+ * MIPS System controller interrupt register base.
+ *
+ */
+
+/*****************************************************************************
+ * Absolute register addresses
+ *****************************************************************************/
+
+#define MSC01_IC_RST (MSC01_IC_REG_BASE + MSC01_IC_RST_OFS)
+#define MSC01_IC_ENAL (MSC01_IC_REG_BASE + MSC01_IC_ENAL_OFS)
+#define MSC01_IC_ENAH (MSC01_IC_REG_BASE + MSC01_IC_ENAH_OFS)
+#define MSC01_IC_DISL (MSC01_IC_REG_BASE + MSC01_IC_DISL_OFS)
+#define MSC01_IC_DISH (MSC01_IC_REG_BASE + MSC01_IC_DISH_OFS)
+#define MSC01_IC_ISBL (MSC01_IC_REG_BASE + MSC01_IC_ISBL_OFS)
+#define MSC01_IC_ISBH (MSC01_IC_REG_BASE + MSC01_IC_ISBH_OFS)
+#define MSC01_IC_ISAL (MSC01_IC_REG_BASE + MSC01_IC_ISAL_OFS)
+#define MSC01_IC_ISAH (MSC01_IC_REG_BASE + MSC01_IC_ISAH_OFS)
+#define MSC01_IC_LVL (MSC01_IC_REG_BASE + MSC01_IC_LVL_OFS)
+#define MSC01_IC_RAMW (MSC01_IC_REG_BASE + MSC01_IC_RAMW_OFS)
+#define MSC01_IC_OSB (MSC01_IC_REG_BASE + MSC01_IC_OSB_OFS)
+#define MSC01_IC_OSA (MSC01_IC_REG_BASE + MSC01_IC_OSA_OFS)
+#define MSC01_IC_GENA (MSC01_IC_REG_BASE + MSC01_IC_GENA_OFS)
+#define MSC01_IC_BASE (MSC01_IC_REG_BASE + MSC01_IC_BASE_OFS)
+#define MSC01_IC_VEC (MSC01_IC_REG_BASE + MSC01_IC_VEC_OFS)
+#define MSC01_IC_EOI (MSC01_IC_REG_BASE + MSC01_IC_EOI_OFS)
+#define MSC01_IC_CFG (MSC01_IC_REG_BASE + MSC01_IC_CFG_OFS)
+#define MSC01_IC_TRLD (MSC01_IC_REG_BASE + MSC01_IC_TRLD_OFS)
+#define MSC01_IC_TVAL (MSC01_IC_REG_BASE + MSC01_IC_TVAL_OFS)
+#define MSC01_IC_TCFG (MSC01_IC_REG_BASE + MSC01_IC_TCFG_OFS)
+#define MSC01_IC_SUP (MSC01_IC_REG_BASE + MSC01_IC_SUP_OFS)
+#define MSC01_IC_ENA (MSC01_IC_REG_BASE + MSC01_IC_ENA_OFS)
+#define MSC01_IC_DIS (MSC01_IC_REG_BASE + MSC01_IC_DIS_OFS)
+#define MSC01_IC_ISB (MSC01_IC_REG_BASE + MSC01_IC_ISB_OFS)
+#define MSC01_IC_ISA (MSC01_IC_REG_BASE + MSC01_IC_ISA_OFS)
+
+/*
+ * Soc-it interrupts are configurable.
+ * Every board describes its IRQ mapping with this table.
+ */
+typedef struct msc_irqmap {
+ int im_irq;
+ int im_type;
+ int im_lvl;
+} msc_irqmap_t;
+
+/* im_type */
+#define MSC01_IRQ_LEVEL 0
+#define MSC01_IRQ_EDGE 1
+
+extern void __init init_msc_irqs(unsigned long icubase, unsigned int base, msc_irqmap_t *imp, int nirq);
+extern void ll_msc_irq(void);
+
+#endif /* __ASM_MIPS_BOARDS_MSC01_IC_H */
diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h
new file mode 100644
index 000000000..57616649b
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/common.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NETLOGIC_COMMON_H_
+#define _NETLOGIC_COMMON_H_
+
+/*
+ * Common SMP definitions
+ */
+#define RESET_VEC_PHYS 0x1fc00000
+#define RESET_VEC_SIZE 8192 /* 8KB reset code and data */
+#define RESET_DATA_PHYS (RESET_VEC_PHYS + (1<<10))
+
+/* Offsets of parameters in the RESET_DATA_PHYS area */
+#define BOOT_THREAD_MODE 0
+#define BOOT_NMI_LOCK 4
+#define BOOT_NMI_HANDLER 8
+
+/* CPU ready flags for each CPU */
+#define BOOT_CPU_READY 2048
+
+#ifndef __ASSEMBLY__
+#include <linux/cpumask.h>
+#include <linux/spinlock.h>
+#include <asm/irq.h>
+#include <asm/mach-netlogic/multi-node.h>
+
+struct irq_desc;
+void nlm_smp_function_ipi_handler(struct irq_desc *desc);
+void nlm_smp_resched_ipi_handler(struct irq_desc *desc);
+void nlm_smp_irq_init(int hwcpuid);
+void nlm_boot_secondary_cpus(void);
+int nlm_wakeup_secondary_cpus(void);
+void nlm_rmiboot_preboot(void);
+void nlm_percpu_init(int hwcpuid);
+
+static inline void *
+nlm_get_boot_data(int offset)
+{
+ return (void *)(CKSEG1ADDR(RESET_DATA_PHYS) + offset);
+}
+
+static inline void
+nlm_set_nmi_handler(void *handler)
+{
+ void *nmih = nlm_get_boot_data(BOOT_NMI_HANDLER);
+
+ *(int64_t *)nmih = (long)handler;
+}
+
+/*
+ * Misc.
+ */
+void nlm_init_boot_cpu(void);
+unsigned int nlm_get_cpu_frequency(void);
+extern const struct plat_smp_ops nlm_smp_ops;
+extern char nlm_reset_entry[], nlm_reset_entry_end[];
+
+extern unsigned int nlm_threads_per_core;
+extern cpumask_t nlm_cpumask;
+
+struct irq_data;
+uint64_t nlm_pci_irqmask(int node);
+void nlm_setup_pic_irq(int node, int picirq, int irq, int irt);
+void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *));
+
+#ifdef CONFIG_PCI_MSI
+void nlm_dispatch_msi(int node, int lirq);
+void nlm_dispatch_msix(int node, int msixirq);
+#endif
+
+/*
+ * The NR_IRQs is divided between nodes, each of them has a separate irq space
+ */
+static inline int nlm_irq_to_xirq(int node, int irq)
+{
+ return node * NR_IRQS / NLM_NR_NODES + irq;
+}
+
+#ifdef CONFIG_CPU_XLR
+#define nlm_cores_per_node() 8
+#else
+static inline int nlm_cores_per_node(void)
+{
+ return ((read_c0_prid() & PRID_IMP_MASK)
+ == PRID_IMP_NETLOGIC_XLP9XX) ? 32 : 8;
+}
+#endif
+static inline int nlm_threads_per_node(void)
+{
+ return nlm_cores_per_node() * NLM_THREADS_PER_CORE;
+}
+
+static inline int nlm_hwtid_to_node(int hwtid)
+{
+ return hwtid / nlm_threads_per_node();
+}
+
+extern int nlm_cpu_ready[];
+#endif /* __ASSEMBLY__ */
+#endif /* _NETLOGIC_COMMON_H_ */
diff --git a/arch/mips/include/asm/netlogic/haldefs.h b/arch/mips/include/asm/netlogic/haldefs.h
new file mode 100644
index 000000000..79c7cccdc
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/haldefs.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __NLM_HAL_HALDEFS_H__
+#define __NLM_HAL_HALDEFS_H__
+
+#include <linux/irqflags.h> /* for local_irq_disable */
+
+/*
+ * This file contains platform specific memory mapped IO implementation
+ * and will provide a way to read 32/64 bit memory mapped registers in
+ * all ABIs
+ */
+static inline uint32_t
+nlm_read_reg(uint64_t base, uint32_t reg)
+{
+ volatile uint32_t *addr = (volatile uint32_t *)(long)base + reg;
+
+ return *addr;
+}
+
+static inline void
+nlm_write_reg(uint64_t base, uint32_t reg, uint32_t val)
+{
+ volatile uint32_t *addr = (volatile uint32_t *)(long)base + reg;
+
+ *addr = val;
+}
+
+/*
+ * For o32 compilation, we have to disable interrupts to access 64 bit
+ * registers
+ *
+ * We need to disable interrupts because we save just the lower 32 bits of
+ * registers in interrupt handling. So if we get hit by an interrupt while
+ * using the upper 32 bits of a register, we lose.
+ */
+
+static inline uint64_t
+nlm_read_reg64(uint64_t base, uint32_t reg)
+{
+ uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
+ volatile uint64_t *ptr = (volatile uint64_t *)(long)addr;
+ uint64_t val;
+
+ if (sizeof(unsigned long) == 4) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __asm__ __volatile__(
+ ".set push" "\n\t"
+ ".set mips64" "\n\t"
+ "ld %L0, %1" "\n\t"
+ "dsra32 %M0, %L0, 0" "\n\t"
+ "sll %L0, %L0, 0" "\n\t"
+ ".set pop" "\n"
+ : "=r" (val)
+ : "m" (*ptr));
+ local_irq_restore(flags);
+ } else
+ val = *ptr;
+
+ return val;
+}
+
+static inline void
+nlm_write_reg64(uint64_t base, uint32_t reg, uint64_t val)
+{
+ uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
+ volatile uint64_t *ptr = (volatile uint64_t *)(long)addr;
+
+ if (sizeof(unsigned long) == 4) {
+ unsigned long flags;
+ uint64_t tmp;
+
+ local_irq_save(flags);
+ __asm__ __volatile__(
+ ".set push" "\n\t"
+ ".set mips64" "\n\t"
+ "dsll32 %L0, %L0, 0" "\n\t"
+ "dsrl32 %L0, %L0, 0" "\n\t"
+ "dsll32 %M0, %M0, 0" "\n\t"
+ "or %L0, %L0, %M0" "\n\t"
+ "sd %L0, %2" "\n\t"
+ ".set pop" "\n"
+ : "=r" (tmp)
+ : "0" (val), "m" (*ptr));
+ local_irq_restore(flags);
+ } else
+ *ptr = val;
+}
+
+/*
+ * Routines to store 32/64 bit values to 64 bit addresses,
+ * used when going thru XKPHYS to access registers
+ */
+static inline uint32_t
+nlm_read_reg_xkphys(uint64_t base, uint32_t reg)
+{
+ return nlm_read_reg(base, reg);
+}
+
+static inline void
+nlm_write_reg_xkphys(uint64_t base, uint32_t reg, uint32_t val)
+{
+ nlm_write_reg(base, reg, val);
+}
+
+static inline uint64_t
+nlm_read_reg64_xkphys(uint64_t base, uint32_t reg)
+{
+ return nlm_read_reg64(base, reg);
+}
+
+static inline void
+nlm_write_reg64_xkphys(uint64_t base, uint32_t reg, uint64_t val)
+{
+ nlm_write_reg64(base, reg, val);
+}
+
+/* Location where IO base is mapped */
+extern uint64_t nlm_io_base;
+
+#if defined(CONFIG_CPU_XLP)
+static inline uint64_t
+nlm_pcicfg_base(uint32_t devoffset)
+{
+ return nlm_io_base + devoffset;
+}
+
+#elif defined(CONFIG_CPU_XLR)
+
+static inline uint64_t
+nlm_mmio_base(uint32_t devoffset)
+{
+ return nlm_io_base + devoffset;
+}
+#endif
+
+#endif
diff --git a/arch/mips/include/asm/netlogic/interrupt.h b/arch/mips/include/asm/netlogic/interrupt.h
new file mode 100644
index 000000000..ed5993d9b
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/interrupt.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ASM_NLM_INTERRUPT_H
+#define _ASM_NLM_INTERRUPT_H
+
+/* Defines for the IRQ numbers */
+
+#define IRQ_IPI_SMP_FUNCTION 3
+#define IRQ_IPI_SMP_RESCHEDULE 4
+#define IRQ_FMN 5
+#define IRQ_TIMER 7
+
+#endif
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h
new file mode 100644
index 000000000..788baf399
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/mips-extns.h
@@ -0,0 +1,301 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ASM_NLM_MIPS_EXTS_H
+#define _ASM_NLM_MIPS_EXTS_H
+
+/*
+ * XLR and XLP interrupt request and interrupt mask registers
+ */
+/*
+ * NOTE: Do not save/restore flags around write_c0_eimr().
+ * On non-R2 platforms the flags has part of EIMR that is shadowed in STATUS
+ * register. Restoring flags will overwrite the lower 8 bits of EIMR.
+ *
+ * Call with interrupts disabled.
+ */
+#define write_c0_eimr(val) \
+do { \
+ if (sizeof(unsigned long) == 4) { \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dsll\t%L0, %L0, 32\n\t" \
+ "dsrl\t%L0, %L0, 32\n\t" \
+ "dsll\t%M0, %M0, 32\n\t" \
+ "or\t%L0, %L0, %M0\n\t" \
+ "dmtc0\t%L0, $9, 7\n\t" \
+ ".set\tmips0" \
+ : : "r" (val)); \
+ } else \
+ __write_64bit_c0_register($9, 7, (val)); \
+} while (0)
+
+/*
+ * Handling the 64 bit EIMR and EIRR registers in 32-bit mode with
+ * standard functions will be very inefficient. This provides
+ * optimized functions for the normal operations on the registers.
+ *
+ * Call with interrupts disabled.
+ */
+static inline void ack_c0_eirr(int irq)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "li $1, 1\n\t"
+ "dsllv $1, $1, %0\n\t"
+ "dmtc0 $1, $9, 6\n\t"
+ ".set pop"
+ : : "r" (irq));
+}
+
+static inline void set_c0_eimr(int irq)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "li $1, 1\n\t"
+ "dsllv %0, $1, %0\n\t"
+ "dmfc0 $1, $9, 7\n\t"
+ "or $1, %0\n\t"
+ "dmtc0 $1, $9, 7\n\t"
+ ".set pop"
+ : "+r" (irq));
+}
+
+static inline void clear_c0_eimr(int irq)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "li $1, 1\n\t"
+ "dsllv %0, $1, %0\n\t"
+ "dmfc0 $1, $9, 7\n\t"
+ "or $1, %0\n\t"
+ "xor $1, %0\n\t"
+ "dmtc0 $1, $9, 7\n\t"
+ ".set pop"
+ : "+r" (irq));
+}
+
+/*
+ * Read c0 eimr and c0 eirr, do AND of the two values, the result is
+ * the interrupts which are raised and are not masked.
+ */
+static inline uint64_t read_c0_eirr_and_eimr(void)
+{
+ uint64_t val;
+
+#ifdef CONFIG_64BIT
+ val = __read_64bit_c0_register($9, 6) & __read_64bit_c0_register($9, 7);
+#else
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "dmfc0 %M0, $9, 6\n\t"
+ "dmfc0 %L0, $9, 7\n\t"
+ "and %M0, %L0\n\t"
+ "dsll %L0, %M0, 32\n\t"
+ "dsra %M0, %M0, 32\n\t"
+ "dsra %L0, %L0, 32\n\t"
+ ".set pop"
+ : "=r" (val));
+#endif
+ return val;
+}
+
+static inline int hard_smp_processor_id(void)
+{
+ return __read_32bit_c0_register($15, 1) & 0x3ff;
+}
+
+static inline int nlm_nodeid(void)
+{
+ uint32_t prid = read_c0_prid() & PRID_IMP_MASK;
+
+ if ((prid == PRID_IMP_NETLOGIC_XLP9XX) ||
+ (prid == PRID_IMP_NETLOGIC_XLP5XX))
+ return (__read_32bit_c0_register($15, 1) >> 7) & 0x7;
+ else
+ return (__read_32bit_c0_register($15, 1) >> 5) & 0x3;
+}
+
+static inline unsigned int nlm_core_id(void)
+{
+ uint32_t prid = read_c0_prid() & PRID_IMP_MASK;
+
+ if ((prid == PRID_IMP_NETLOGIC_XLP9XX) ||
+ (prid == PRID_IMP_NETLOGIC_XLP5XX))
+ return (read_c0_ebase() & 0x7c) >> 2;
+ else
+ return (read_c0_ebase() & 0x1c) >> 2;
+}
+
+static inline unsigned int nlm_thread_id(void)
+{
+ return read_c0_ebase() & 0x3;
+}
+
+#define __read_64bit_c2_split(source, sel) \
+({ \
+ unsigned long long __val; \
+ unsigned long __flags; \
+ \
+ local_irq_save(__flags); \
+ if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmfc2\t%M0, " #source "\n\t" \
+ "dsll\t%L0, %M0, 32\n\t" \
+ "dsra\t%M0, %M0, 32\n\t" \
+ "dsra\t%L0, %L0, 32\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__val)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmfc2\t%M0, " #source ", " #sel "\n\t" \
+ "dsll\t%L0, %M0, 32\n\t" \
+ "dsra\t%M0, %M0, 32\n\t" \
+ "dsra\t%L0, %L0, 32\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__val)); \
+ local_irq_restore(__flags); \
+ \
+ __val; \
+})
+
+#define __write_64bit_c2_split(source, sel, val) \
+do { \
+ unsigned long __flags; \
+ \
+ local_irq_save(__flags); \
+ if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dsll\t%L0, %L0, 32\n\t" \
+ "dsrl\t%L0, %L0, 32\n\t" \
+ "dsll\t%M0, %M0, 32\n\t" \
+ "or\t%L0, %L0, %M0\n\t" \
+ "dmtc2\t%L0, " #source "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "r" (val)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dsll\t%L0, %L0, 32\n\t" \
+ "dsrl\t%L0, %L0, 32\n\t" \
+ "dsll\t%M0, %M0, 32\n\t" \
+ "or\t%L0, %L0, %M0\n\t" \
+ "dmtc2\t%L0, " #source ", " #sel "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "r" (val)); \
+ local_irq_restore(__flags); \
+} while (0)
+
+#define __read_32bit_c2_register(source, sel) \
+({ uint32_t __res; \
+ if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips32\n\t" \
+ "mfc2\t%0, " #source "\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__res)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips32\n\t" \
+ "mfc2\t%0, " #source ", " #sel "\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__res)); \
+ __res; \
+})
+
+#define __read_64bit_c2_register(source, sel) \
+({ unsigned long long __res; \
+ if (sizeof(unsigned long) == 4) \
+ __res = __read_64bit_c2_split(source, sel); \
+ else if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmfc2\t%0, " #source "\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__res)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmfc2\t%0, " #source ", " #sel "\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__res)); \
+ __res; \
+})
+
+#define __write_64bit_c2_register(register, sel, value) \
+do { \
+ if (sizeof(unsigned long) == 4) \
+ __write_64bit_c2_split(register, sel, value); \
+ else if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmtc2\t%z0, " #register "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "Jr" (value)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmtc2\t%z0, " #register ", " #sel "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "Jr" (value)); \
+} while (0)
+
+#define __write_32bit_c2_register(reg, sel, value) \
+({ \
+ if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips32\n\t" \
+ "mtc2\t%z0, " #reg "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "Jr" (value)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips32\n\t" \
+ "mtc2\t%z0, " #reg ", " #sel "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "Jr" (value)); \
+})
+
+#endif /*_ASM_NLM_MIPS_EXTS_H */
diff --git a/arch/mips/include/asm/netlogic/psb-bootinfo.h b/arch/mips/include/asm/netlogic/psb-bootinfo.h
new file mode 100644
index 000000000..c716e9397
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/psb-bootinfo.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ASM_NETLOGIC_BOOTINFO_H
+#define _ASM_NETLOGIC_BOOTINFO_H
+
+struct psb_info {
+ uint64_t boot_level;
+ uint64_t io_base;
+ uint64_t output_device;
+ uint64_t uart_print;
+ uint64_t led_output;
+ uint64_t init;
+ uint64_t exit;
+ uint64_t warm_reset;
+ uint64_t wakeup;
+ uint64_t online_cpu_map;
+ uint64_t master_reentry_sp;
+ uint64_t master_reentry_gp;
+ uint64_t master_reentry_fn;
+ uint64_t slave_reentry_fn;
+ uint64_t magic_dword;
+ uint64_t uart_putchar;
+ uint64_t size;
+ uint64_t uart_getchar;
+ uint64_t nmi_handler;
+ uint64_t psb_version;
+ uint64_t mac_addr;
+ uint64_t cpu_frequency;
+ uint64_t board_version;
+ uint64_t malloc;
+ uint64_t free;
+ uint64_t global_shmem_addr;
+ uint64_t global_shmem_size;
+ uint64_t psb_os_cpu_map;
+ uint64_t userapp_cpu_map;
+ uint64_t wakeup_os;
+ uint64_t psb_mem_map;
+ uint64_t board_major_version;
+ uint64_t board_minor_version;
+ uint64_t board_manf_revision;
+ uint64_t board_serial_number;
+ uint64_t psb_physaddr_map;
+ uint64_t xlr_loaderip_config;
+ uint64_t bldr_envp;
+ uint64_t avail_mem_map;
+};
+
+/* This is what netlboot passes and linux boot_mem_map is subtly different */
+#define NLM_BOOT_MEM_MAP_MAX 32
+struct nlm_boot_mem_map {
+ int nr_map;
+ struct nlm_boot_mem_map_entry {
+ uint64_t addr; /* start of memory segment */
+ uint64_t size; /* size of memory segment */
+ uint32_t type; /* type of memory segment */
+ } map[NLM_BOOT_MEM_MAP_MAX];
+};
+#define NLM_BOOT_MEM_RAM 1
+
+/* Pointer to saved boot loader info */
+extern struct psb_info nlm_prom_info;
+
+#endif
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/bridge.h b/arch/mips/include/asm/netlogic/xlp-hal/bridge.h
new file mode 100644
index 000000000..3067f9834
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlp-hal/bridge.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __NLM_HAL_BRIDGE_H__
+#define __NLM_HAL_BRIDGE_H__
+
+/**
+* @file_name mio.h
+* @author Netlogic Microsystems
+* @brief Basic definitions of XLP memory and io subsystem
+*/
+
+/*
+ * BRIDGE specific registers
+ *
+ * These registers start after the PCIe header, which has 0x40
+ * standard entries
+ */
+#define BRIDGE_MODE 0x00
+#define BRIDGE_PCI_CFG_BASE 0x01
+#define BRIDGE_PCI_CFG_LIMIT 0x02
+#define BRIDGE_PCIE_CFG_BASE 0x03
+#define BRIDGE_PCIE_CFG_LIMIT 0x04
+#define BRIDGE_BUSNUM_BAR0 0x05
+#define BRIDGE_BUSNUM_BAR1 0x06
+#define BRIDGE_BUSNUM_BAR2 0x07
+#define BRIDGE_BUSNUM_BAR3 0x08
+#define BRIDGE_BUSNUM_BAR4 0x09
+#define BRIDGE_BUSNUM_BAR5 0x0a
+#define BRIDGE_BUSNUM_BAR6 0x0b
+#define BRIDGE_FLASH_BAR0 0x0c
+#define BRIDGE_FLASH_BAR1 0x0d
+#define BRIDGE_FLASH_BAR2 0x0e
+#define BRIDGE_FLASH_BAR3 0x0f
+#define BRIDGE_FLASH_LIMIT0 0x10
+#define BRIDGE_FLASH_LIMIT1 0x11
+#define BRIDGE_FLASH_LIMIT2 0x12
+#define BRIDGE_FLASH_LIMIT3 0x13
+
+#define BRIDGE_DRAM_BAR(i) (0x14 + (i))
+#define BRIDGE_DRAM_LIMIT(i) (0x1c + (i))
+#define BRIDGE_DRAM_NODE_TRANSLN(i) (0x24 + (i))
+#define BRIDGE_DRAM_CHNL_TRANSLN(i) (0x2c + (i))
+
+#define BRIDGE_PCIEMEM_BASE0 0x34
+#define BRIDGE_PCIEMEM_BASE1 0x35
+#define BRIDGE_PCIEMEM_BASE2 0x36
+#define BRIDGE_PCIEMEM_BASE3 0x37
+#define BRIDGE_PCIEMEM_LIMIT0 0x38
+#define BRIDGE_PCIEMEM_LIMIT1 0x39
+#define BRIDGE_PCIEMEM_LIMIT2 0x3a
+#define BRIDGE_PCIEMEM_LIMIT3 0x3b
+#define BRIDGE_PCIEIO_BASE0 0x3c
+#define BRIDGE_PCIEIO_BASE1 0x3d
+#define BRIDGE_PCIEIO_BASE2 0x3e
+#define BRIDGE_PCIEIO_BASE3 0x3f
+#define BRIDGE_PCIEIO_LIMIT0 0x40
+#define BRIDGE_PCIEIO_LIMIT1 0x41
+#define BRIDGE_PCIEIO_LIMIT2 0x42
+#define BRIDGE_PCIEIO_LIMIT3 0x43
+#define BRIDGE_PCIEMEM_BASE4 0x44
+#define BRIDGE_PCIEMEM_BASE5 0x45
+#define BRIDGE_PCIEMEM_BASE6 0x46
+#define BRIDGE_PCIEMEM_LIMIT4 0x47
+#define BRIDGE_PCIEMEM_LIMIT5 0x48
+#define BRIDGE_PCIEMEM_LIMIT6 0x49
+#define BRIDGE_PCIEIO_BASE4 0x4a
+#define BRIDGE_PCIEIO_BASE5 0x4b
+#define BRIDGE_PCIEIO_BASE6 0x4c
+#define BRIDGE_PCIEIO_LIMIT4 0x4d
+#define BRIDGE_PCIEIO_LIMIT5 0x4e
+#define BRIDGE_PCIEIO_LIMIT6 0x4f
+#define BRIDGE_NBU_EVENT_CNT_CTL 0x50
+#define BRIDGE_EVNTCTR1_LOW 0x51
+#define BRIDGE_EVNTCTR1_HI 0x52
+#define BRIDGE_EVNT_CNT_CTL2 0x53
+#define BRIDGE_EVNTCTR2_LOW 0x54
+#define BRIDGE_EVNTCTR2_HI 0x55
+#define BRIDGE_TRACEBUF_MATCH0 0x56
+#define BRIDGE_TRACEBUF_MATCH1 0x57
+#define BRIDGE_TRACEBUF_MATCH_LOW 0x58
+#define BRIDGE_TRACEBUF_MATCH_HI 0x59
+#define BRIDGE_TRACEBUF_CTRL 0x5a
+#define BRIDGE_TRACEBUF_INIT 0x5b
+#define BRIDGE_TRACEBUF_ACCESS 0x5c
+#define BRIDGE_TRACEBUF_READ_DATA0 0x5d
+#define BRIDGE_TRACEBUF_READ_DATA1 0x5d
+#define BRIDGE_TRACEBUF_READ_DATA2 0x5f
+#define BRIDGE_TRACEBUF_READ_DATA3 0x60
+#define BRIDGE_TRACEBUF_STATUS 0x61
+#define BRIDGE_ADDRESS_ERROR0 0x62
+#define BRIDGE_ADDRESS_ERROR1 0x63
+#define BRIDGE_ADDRESS_ERROR2 0x64
+#define BRIDGE_TAG_ECC_ADDR_ERROR0 0x65
+#define BRIDGE_TAG_ECC_ADDR_ERROR1 0x66
+#define BRIDGE_TAG_ECC_ADDR_ERROR2 0x67
+#define BRIDGE_LINE_FLUSH0 0x68
+#define BRIDGE_LINE_FLUSH1 0x69
+#define BRIDGE_NODE_ID 0x6a
+#define BRIDGE_ERROR_INTERRUPT_EN 0x6b
+#define BRIDGE_PCIE0_WEIGHT 0x2c0
+#define BRIDGE_PCIE1_WEIGHT 0x2c1
+#define BRIDGE_PCIE2_WEIGHT 0x2c2
+#define BRIDGE_PCIE3_WEIGHT 0x2c3
+#define BRIDGE_USB_WEIGHT 0x2c4
+#define BRIDGE_NET_WEIGHT 0x2c5
+#define BRIDGE_POE_WEIGHT 0x2c6
+#define BRIDGE_CMS_WEIGHT 0x2c7
+#define BRIDGE_DMAENG_WEIGHT 0x2c8
+#define BRIDGE_SEC_WEIGHT 0x2c9
+#define BRIDGE_COMP_WEIGHT 0x2ca
+#define BRIDGE_GIO_WEIGHT 0x2cb
+#define BRIDGE_FLASH_WEIGHT 0x2cc
+
+/* FIXME verify */
+#define BRIDGE_9XX_FLASH_BAR(i) (0x11 + (i))
+#define BRIDGE_9XX_FLASH_BAR_LIMIT(i) (0x15 + (i))
+
+#define BRIDGE_9XX_DRAM_BAR(i) (0x19 + (i))
+#define BRIDGE_9XX_DRAM_LIMIT(i) (0x29 + (i))
+#define BRIDGE_9XX_DRAM_NODE_TRANSLN(i) (0x39 + (i))
+#define BRIDGE_9XX_DRAM_CHNL_TRANSLN(i) (0x49 + (i))
+
+#define BRIDGE_9XX_ADDRESS_ERROR0 0x9d
+#define BRIDGE_9XX_ADDRESS_ERROR1 0x9e
+#define BRIDGE_9XX_ADDRESS_ERROR2 0x9f
+
+#define BRIDGE_9XX_PCIEMEM_BASE0 0x59
+#define BRIDGE_9XX_PCIEMEM_BASE1 0x5a
+#define BRIDGE_9XX_PCIEMEM_BASE2 0x5b
+#define BRIDGE_9XX_PCIEMEM_BASE3 0x5c
+#define BRIDGE_9XX_PCIEMEM_LIMIT0 0x5d
+#define BRIDGE_9XX_PCIEMEM_LIMIT1 0x5e
+#define BRIDGE_9XX_PCIEMEM_LIMIT2 0x5f
+#define BRIDGE_9XX_PCIEMEM_LIMIT3 0x60
+#define BRIDGE_9XX_PCIEIO_BASE0 0x61
+#define BRIDGE_9XX_PCIEIO_BASE1 0x62
+#define BRIDGE_9XX_PCIEIO_BASE2 0x63
+#define BRIDGE_9XX_PCIEIO_BASE3 0x64
+#define BRIDGE_9XX_PCIEIO_LIMIT0 0x65
+#define BRIDGE_9XX_PCIEIO_LIMIT1 0x66
+#define BRIDGE_9XX_PCIEIO_LIMIT2 0x67
+#define BRIDGE_9XX_PCIEIO_LIMIT3 0x68
+
+#ifndef __ASSEMBLY__
+
+#define nlm_read_bridge_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_bridge_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_get_bridge_pcibase(node) nlm_pcicfg_base(cpu_is_xlp9xx() ? \
+ XLP9XX_IO_BRIDGE_OFFSET(node) : XLP_IO_BRIDGE_OFFSET(node))
+#define nlm_get_bridge_regbase(node) \
+ (nlm_get_bridge_pcibase(node) + XLP_IO_PCI_HDRSZ)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __NLM_HAL_BRIDGE_H__ */
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h b/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h
new file mode 100644
index 000000000..a06b59292
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __NLM_HAL_CPUCONTROL_H__
+#define __NLM_HAL_CPUCONTROL_H__
+
+#define CPU_BLOCKID_IFU 0
+#define CPU_BLOCKID_ICU 1
+#define CPU_BLOCKID_IEU 2
+#define CPU_BLOCKID_LSU 3
+#define CPU_BLOCKID_MMU 4
+#define CPU_BLOCKID_PRF 5
+#define CPU_BLOCKID_SCH 7
+#define CPU_BLOCKID_SCU 8
+#define CPU_BLOCKID_FPU 9
+#define CPU_BLOCKID_MAP 10
+
+#define IFU_BRUB_RESERVE 0x007
+
+#define ICU_DEFEATURE 0x100
+
+#define LSU_DEFEATURE 0x304
+#define LSU_DEBUG_ADDR 0x305
+#define LSU_DEBUG_DATA0 0x306
+#define LSU_CERRLOG_REGID 0x309
+#define SCHED_DEFEATURE 0x700
+
+/* Offsets of interest from the 'MAP' Block */
+#define MAP_THREADMODE 0x00
+#define MAP_EXT_EBASE_ENABLE 0x04
+#define MAP_CCDI_CONFIG 0x08
+#define MAP_THRD0_CCDI_STATUS 0x0c
+#define MAP_THRD1_CCDI_STATUS 0x10
+#define MAP_THRD2_CCDI_STATUS 0x14
+#define MAP_THRD3_CCDI_STATUS 0x18
+#define MAP_THRD0_DEBUG_MODE 0x1c
+#define MAP_THRD1_DEBUG_MODE 0x20
+#define MAP_THRD2_DEBUG_MODE 0x24
+#define MAP_THRD3_DEBUG_MODE 0x28
+#define MAP_MISC_STATE 0x60
+#define MAP_DEBUG_READ_CTL 0x64
+#define MAP_DEBUG_READ_REG0 0x68
+#define MAP_DEBUG_READ_REG1 0x6c
+
+#define MMU_SETUP 0x400
+#define MMU_LFSRSEED 0x401
+#define MMU_HPW_NUM_PAGE_LVL 0x410
+#define MMU_PGWKR_PGDBASE 0x411
+#define MMU_PGWKR_PGDSHFT 0x412
+#define MMU_PGWKR_PGDMASK 0x413
+#define MMU_PGWKR_PUDSHFT 0x414
+#define MMU_PGWKR_PUDMASK 0x415
+#define MMU_PGWKR_PMDSHFT 0x416
+#define MMU_PGWKR_PMDMASK 0x417
+#define MMU_PGWKR_PTESHFT 0x418
+#define MMU_PGWKR_PTEMASK 0x419
+
+#endif /* __NLM_CPUCONTROL_H__ */
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/iomap.h b/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
new file mode 100644
index 000000000..805bfd21f
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __NLM_HAL_IOMAP_H__
+#define __NLM_HAL_IOMAP_H__
+
+#define XLP_DEFAULT_IO_BASE 0x18000000
+#define XLP_DEFAULT_PCI_ECFG_BASE XLP_DEFAULT_IO_BASE
+#define XLP_DEFAULT_PCI_CFG_BASE 0x1c000000
+
+#define NMI_BASE 0xbfc00000
+#define XLP_IO_CLK 133333333
+
+#define XLP_PCIE_CFG_SIZE 0x1000 /* 4K */
+#define XLP_PCIE_DEV_BLK_SIZE (8 * XLP_PCIE_CFG_SIZE)
+#define XLP_PCIE_BUS_BLK_SIZE (256 * XLP_PCIE_DEV_BLK_SIZE)
+#define XLP_IO_SIZE (64 << 20) /* ECFG space size */
+#define XLP_IO_PCI_HDRSZ 0x100
+#define XLP_IO_DEV(node, dev) ((dev) + (node) * 8)
+#define XLP_IO_PCI_OFFSET(b, d, f) (((b) << 20) | ((d) << 15) | ((f) << 12))
+
+#define XLP_HDR_OFFSET(node, bus, dev, fn) \
+ XLP_IO_PCI_OFFSET(bus, XLP_IO_DEV(node, dev), fn)
+
+#define XLP_IO_BRIDGE_OFFSET(node) XLP_HDR_OFFSET(node, 0, 0, 0)
+/* coherent inter chip */
+#define XLP_IO_CIC0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 0, 1)
+#define XLP_IO_CIC1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 0, 2)
+#define XLP_IO_CIC2_OFFSET(node) XLP_HDR_OFFSET(node, 0, 0, 3)
+#define XLP_IO_PIC_OFFSET(node) XLP_HDR_OFFSET(node, 0, 0, 4)
+
+#define XLP_IO_PCIE_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 1, i)
+#define XLP_IO_PCIE0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 1, 0)
+#define XLP_IO_PCIE1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 1, 1)
+#define XLP_IO_PCIE2_OFFSET(node) XLP_HDR_OFFSET(node, 0, 1, 2)
+#define XLP_IO_PCIE3_OFFSET(node) XLP_HDR_OFFSET(node, 0, 1, 3)
+
+#define XLP_IO_USB_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 2, i)
+#define XLP_IO_USB_EHCI0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 0)
+#define XLP_IO_USB_OHCI0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 1)
+#define XLP_IO_USB_OHCI1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 2)
+#define XLP_IO_USB_EHCI1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 3)
+#define XLP_IO_USB_OHCI2_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 4)
+#define XLP_IO_USB_OHCI3_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 5)
+
+#define XLP_IO_SATA_OFFSET(node) XLP_HDR_OFFSET(node, 0, 3, 2)
+
+/* XLP2xx has an updated USB block */
+#define XLP2XX_IO_USB_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 4, i)
+#define XLP2XX_IO_USB_XHCI0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 4, 1)
+#define XLP2XX_IO_USB_XHCI1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 4, 2)
+#define XLP2XX_IO_USB_XHCI2_OFFSET(node) XLP_HDR_OFFSET(node, 0, 4, 3)
+
+#define XLP_IO_NAE_OFFSET(node) XLP_HDR_OFFSET(node, 0, 3, 0)
+#define XLP_IO_POE_OFFSET(node) XLP_HDR_OFFSET(node, 0, 3, 1)
+
+#define XLP_IO_CMS_OFFSET(node) XLP_HDR_OFFSET(node, 0, 4, 0)
+
+#define XLP_IO_DMA_OFFSET(node) XLP_HDR_OFFSET(node, 0, 5, 1)
+#define XLP_IO_SEC_OFFSET(node) XLP_HDR_OFFSET(node, 0, 5, 2)
+#define XLP_IO_CMP_OFFSET(node) XLP_HDR_OFFSET(node, 0, 5, 3)
+
+#define XLP_IO_UART_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 6, i)
+#define XLP_IO_UART0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 0)
+#define XLP_IO_UART1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 1)
+#define XLP_IO_I2C_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 6, 2 + i)
+#define XLP_IO_I2C0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 2)
+#define XLP_IO_I2C1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 3)
+#define XLP_IO_GPIO_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 4)
+/* on 2XX, all I2C busses are on the same block */
+#define XLP2XX_IO_I2C_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 7)
+
+/* system management */
+#define XLP_IO_SYS_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 5)
+#define XLP_IO_JTAG_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 6)
+
+/* Flash */
+#define XLP_IO_NOR_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 0)
+#define XLP_IO_NAND_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 1)
+#define XLP_IO_SPI_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 2)
+#define XLP_IO_MMC_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 3)
+
+/* Things have changed drastically in XLP 9XX */
+#define XLP9XX_HDR_OFFSET(n, d, f) \
+ XLP_IO_PCI_OFFSET(xlp9xx_get_socbus(n), d, f)
+
+#define XLP9XX_IO_BRIDGE_OFFSET(node) XLP_IO_PCI_OFFSET(0, 0, node)
+#define XLP9XX_IO_PIC_OFFSET(node) XLP9XX_HDR_OFFSET(node, 2, 0)
+#define XLP9XX_IO_UART_OFFSET(node) XLP9XX_HDR_OFFSET(node, 2, 2)
+#define XLP9XX_IO_SYS_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 0)
+#define XLP9XX_IO_FUSE_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 1)
+#define XLP9XX_IO_CLOCK_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 2)
+#define XLP9XX_IO_POWER_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 3)
+#define XLP9XX_IO_JTAG_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 4)
+
+#define XLP9XX_IO_PCIE_OFFSET(node, i) XLP9XX_HDR_OFFSET(node, 1, i)
+#define XLP9XX_IO_PCIE0_OFFSET(node) XLP9XX_HDR_OFFSET(node, 1, 0)
+#define XLP9XX_IO_PCIE2_OFFSET(node) XLP9XX_HDR_OFFSET(node, 1, 2)
+#define XLP9XX_IO_PCIE3_OFFSET(node) XLP9XX_HDR_OFFSET(node, 1, 3)
+
+/* XLP9xx USB block */
+#define XLP9XX_IO_USB_OFFSET(node, i) XLP9XX_HDR_OFFSET(node, 4, i)
+#define XLP9XX_IO_USB_XHCI0_OFFSET(node) XLP9XX_HDR_OFFSET(node, 4, 1)
+#define XLP9XX_IO_USB_XHCI1_OFFSET(node) XLP9XX_HDR_OFFSET(node, 4, 2)
+
+/* XLP9XX on-chip SATA controller */
+#define XLP9XX_IO_SATA_OFFSET(node) XLP9XX_HDR_OFFSET(node, 3, 2)
+
+/* Flash */
+#define XLP9XX_IO_NOR_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 0)
+#define XLP9XX_IO_NAND_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 1)
+#define XLP9XX_IO_SPI_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 2)
+#define XLP9XX_IO_MMC_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 3)
+
+/* PCI config header register id's */
+#define XLP_PCI_CFGREG0 0x00
+#define XLP_PCI_CFGREG1 0x01
+#define XLP_PCI_CFGREG2 0x02
+#define XLP_PCI_CFGREG3 0x03
+#define XLP_PCI_CFGREG4 0x04
+#define XLP_PCI_CFGREG5 0x05
+#define XLP_PCI_DEVINFO_REG0 0x30
+#define XLP_PCI_DEVINFO_REG1 0x31
+#define XLP_PCI_DEVINFO_REG2 0x32
+#define XLP_PCI_DEVINFO_REG3 0x33
+#define XLP_PCI_DEVINFO_REG4 0x34
+#define XLP_PCI_DEVINFO_REG5 0x35
+#define XLP_PCI_DEVINFO_REG6 0x36
+#define XLP_PCI_DEVINFO_REG7 0x37
+#define XLP_PCI_DEVSCRATCH_REG0 0x38
+#define XLP_PCI_DEVSCRATCH_REG1 0x39
+#define XLP_PCI_DEVSCRATCH_REG2 0x3a
+#define XLP_PCI_DEVSCRATCH_REG3 0x3b
+#define XLP_PCI_MSGSTN_REG 0x3c
+#define XLP_PCI_IRTINFO_REG 0x3d
+#define XLP_PCI_UCODEINFO_REG 0x3e
+#define XLP_PCI_SBB_WT_REG 0x3f
+
+/* PCI IDs for SoC device */
+#define PCI_VENDOR_NETLOGIC 0x184e
+
+#define PCI_DEVICE_ID_NLM_ROOT 0x1001
+#define PCI_DEVICE_ID_NLM_ICI 0x1002
+#define PCI_DEVICE_ID_NLM_PIC 0x1003
+#define PCI_DEVICE_ID_NLM_PCIE 0x1004
+#define PCI_DEVICE_ID_NLM_EHCI 0x1007
+#define PCI_DEVICE_ID_NLM_OHCI 0x1008
+#define PCI_DEVICE_ID_NLM_NAE 0x1009
+#define PCI_DEVICE_ID_NLM_POE 0x100A
+#define PCI_DEVICE_ID_NLM_FMN 0x100B
+#define PCI_DEVICE_ID_NLM_RAID 0x100D
+#define PCI_DEVICE_ID_NLM_SAE 0x100D
+#define PCI_DEVICE_ID_NLM_RSA 0x100E
+#define PCI_DEVICE_ID_NLM_CMP 0x100F
+#define PCI_DEVICE_ID_NLM_UART 0x1010
+#define PCI_DEVICE_ID_NLM_I2C 0x1011
+#define PCI_DEVICE_ID_NLM_NOR 0x1015
+#define PCI_DEVICE_ID_NLM_NAND 0x1016
+#define PCI_DEVICE_ID_NLM_MMC 0x1018
+#define PCI_DEVICE_ID_NLM_SATA 0x101A
+#define PCI_DEVICE_ID_NLM_XHCI 0x101D
+
+#define PCI_DEVICE_ID_XLP9XX_MMC 0x9018
+#define PCI_DEVICE_ID_XLP9XX_SATA 0x901A
+#define PCI_DEVICE_ID_XLP9XX_XHCI 0x901D
+
+#ifndef __ASSEMBLY__
+
+#define nlm_read_pci_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_pci_reg(b, r, v) nlm_write_reg(b, r, v)
+
+static inline int xlp9xx_get_socbus(int node)
+{
+ uint64_t socbridge;
+
+ if (node == 0)
+ return 1;
+ socbridge = nlm_pcicfg_base(XLP9XX_IO_BRIDGE_OFFSET(node));
+ return (nlm_read_pci_reg(socbridge, 0x6) >> 8) & 0xff;
+}
+#endif /* !__ASSEMBLY */
+
+#endif /* __NLM_HAL_IOMAP_H__ */
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h b/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
new file mode 100644
index 000000000..91540f41e
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __NLM_HAL_PCIBUS_H__
+#define __NLM_HAL_PCIBUS_H__
+
+/* PCIE Memory and IO regions */
+#define PCIE_MEM_BASE 0xd0000000ULL
+#define PCIE_MEM_LIMIT 0xdfffffffULL
+#define PCIE_IO_BASE 0x14000000ULL
+#define PCIE_IO_LIMIT 0x15ffffffULL
+
+#define PCIE_BRIDGE_CMD 0x1
+#define PCIE_BRIDGE_MSI_CAP 0x14
+#define PCIE_BRIDGE_MSI_ADDRL 0x15
+#define PCIE_BRIDGE_MSI_ADDRH 0x16
+#define PCIE_BRIDGE_MSI_DATA 0x17
+
+/* XLP Global PCIE configuration space registers */
+#define PCIE_BYTE_SWAP_MEM_BASE 0x247
+#define PCIE_BYTE_SWAP_MEM_LIM 0x248
+#define PCIE_BYTE_SWAP_IO_BASE 0x249
+#define PCIE_BYTE_SWAP_IO_LIM 0x24A
+
+#define PCIE_BRIDGE_MSIX_ADDR_BASE 0x24F
+#define PCIE_BRIDGE_MSIX_ADDR_LIMIT 0x250
+#define PCIE_MSI_STATUS 0x25A
+#define PCIE_MSI_EN 0x25B
+#define PCIE_MSIX_STATUS 0x25D
+#define PCIE_INT_STATUS0 0x25F
+#define PCIE_INT_STATUS1 0x260
+#define PCIE_INT_EN0 0x261
+#define PCIE_INT_EN1 0x262
+
+/* XLP9XX has basic changes */
+#define PCIE_9XX_BYTE_SWAP_MEM_BASE 0x25c
+#define PCIE_9XX_BYTE_SWAP_MEM_LIM 0x25d
+#define PCIE_9XX_BYTE_SWAP_IO_BASE 0x25e
+#define PCIE_9XX_BYTE_SWAP_IO_LIM 0x25f
+
+#define PCIE_9XX_BRIDGE_MSIX_ADDR_BASE 0x264
+#define PCIE_9XX_BRIDGE_MSIX_ADDR_LIMIT 0x265
+#define PCIE_9XX_MSI_STATUS 0x283
+#define PCIE_9XX_MSI_EN 0x284
+/* 128 MSIX vectors available in 9xx */
+#define PCIE_9XX_MSIX_STATUS0 0x286
+#define PCIE_9XX_MSIX_STATUSX(n) (n + 0x286)
+#define PCIE_9XX_MSIX_VEC 0x296
+#define PCIE_9XX_MSIX_VECX(n) (n + 0x296)
+#define PCIE_9XX_INT_STATUS0 0x397
+#define PCIE_9XX_INT_STATUS1 0x398
+#define PCIE_9XX_INT_EN0 0x399
+#define PCIE_9XX_INT_EN1 0x39a
+
+/* other */
+#define PCIE_NLINKS 4
+
+/* MSI addresses */
+#define MSI_ADDR_BASE 0xfffee00000ULL
+#define MSI_ADDR_SZ 0x10000
+#define MSI_LINK_ADDR(n, l) (MSI_ADDR_BASE + \
+ (PCIE_NLINKS * (n) + (l)) * MSI_ADDR_SZ)
+#define MSIX_ADDR_BASE 0xfffef00000ULL
+#define MSIX_LINK_ADDR(n, l) (MSIX_ADDR_BASE + \
+ (PCIE_NLINKS * (n) + (l)) * MSI_ADDR_SZ)
+#ifndef __ASSEMBLY__
+
+#define nlm_read_pcie_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_pcie_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_get_pcie_base(node, inst) nlm_pcicfg_base(cpu_is_xlp9xx() ? \
+ XLP9XX_IO_PCIE_OFFSET(node, inst) : XLP_IO_PCIE_OFFSET(node, inst))
+
+#ifdef CONFIG_PCI_MSI
+void xlp_init_node_msi_irqs(int node, int link);
+#else
+static inline void xlp_init_node_msi_irqs(int node, int link) {}
+#endif
+
+struct pci_dev *xlp_get_pcie_link(const struct pci_dev *dev);
+
+#endif
+#endif /* __NLM_HAL_PCIBUS_H__ */
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pic.h b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
new file mode 100644
index 000000000..41cefe94f
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
@@ -0,0 +1,366 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NLM_HAL_PIC_H
+#define _NLM_HAL_PIC_H
+
+/* PIC Specific registers */
+#define PIC_CTRL 0x00
+
+/* PIC control register defines */
+#define PIC_CTRL_ITV 32 /* interrupt timeout value */
+#define PIC_CTRL_ICI 19 /* ICI interrupt timeout enable */
+#define PIC_CTRL_ITE 18 /* interrupt timeout enable */
+#define PIC_CTRL_STE 10 /* system timer interrupt enable */
+#define PIC_CTRL_WWR1 8 /* watchdog 1 wraparound count for reset */
+#define PIC_CTRL_WWR0 6 /* watchdog 0 wraparound count for reset */
+#define PIC_CTRL_WWN1 4 /* watchdog 1 wraparound count for NMI */
+#define PIC_CTRL_WWN0 2 /* watchdog 0 wraparound count for NMI */
+#define PIC_CTRL_WTE 0 /* watchdog timer enable */
+
+/* PIC Status register defines */
+#define PIC_ICI_STATUS 33 /* ICI interrupt timeout status */
+#define PIC_ITE_STATUS 32 /* interrupt timeout status */
+#define PIC_STS_STATUS 4 /* System timer interrupt status */
+#define PIC_WNS_STATUS 2 /* NMI status for watchdog timers */
+#define PIC_WIS_STATUS 0 /* Interrupt status for watchdog timers */
+
+/* PIC IPI control register offsets */
+#define PIC_IPICTRL_NMI 32
+#define PIC_IPICTRL_RIV 20 /* received interrupt vector */
+#define PIC_IPICTRL_IDB 16 /* interrupt destination base */
+#define PIC_IPICTRL_DTE 0 /* interrupt destination thread enables */
+
+/* PIC IRT register offsets */
+#define PIC_IRT_ENABLE 31
+#define PIC_IRT_NMI 29
+#define PIC_IRT_SCH 28 /* Scheduling scheme */
+#define PIC_IRT_RVEC 20 /* Interrupt receive vectors */
+#define PIC_IRT_DT 19 /* Destination type */
+#define PIC_IRT_DB 16 /* Destination base */
+#define PIC_IRT_DTE 0 /* Destination thread enables */
+
+#define PIC_BYTESWAP 0x02
+#define PIC_STATUS 0x04
+#define PIC_INTR_TIMEOUT 0x06
+#define PIC_ICI0_INTR_TIMEOUT 0x08
+#define PIC_ICI1_INTR_TIMEOUT 0x0a
+#define PIC_ICI2_INTR_TIMEOUT 0x0c
+#define PIC_IPI_CTL 0x0e
+#define PIC_INT_ACK 0x10
+#define PIC_INT_PENDING0 0x12
+#define PIC_INT_PENDING1 0x14
+#define PIC_INT_PENDING2 0x16
+
+#define PIC_WDOG0_MAXVAL 0x18
+#define PIC_WDOG0_COUNT 0x1a
+#define PIC_WDOG0_ENABLE0 0x1c
+#define PIC_WDOG0_ENABLE1 0x1e
+#define PIC_WDOG0_BEATCMD 0x20
+#define PIC_WDOG0_BEAT0 0x22
+#define PIC_WDOG0_BEAT1 0x24
+
+#define PIC_WDOG1_MAXVAL 0x26
+#define PIC_WDOG1_COUNT 0x28
+#define PIC_WDOG1_ENABLE0 0x2a
+#define PIC_WDOG1_ENABLE1 0x2c
+#define PIC_WDOG1_BEATCMD 0x2e
+#define PIC_WDOG1_BEAT0 0x30
+#define PIC_WDOG1_BEAT1 0x32
+
+#define PIC_WDOG_MAXVAL(i) (PIC_WDOG0_MAXVAL + ((i) ? 7 : 0))
+#define PIC_WDOG_COUNT(i) (PIC_WDOG0_COUNT + ((i) ? 7 : 0))
+#define PIC_WDOG_ENABLE0(i) (PIC_WDOG0_ENABLE0 + ((i) ? 7 : 0))
+#define PIC_WDOG_ENABLE1(i) (PIC_WDOG0_ENABLE1 + ((i) ? 7 : 0))
+#define PIC_WDOG_BEATCMD(i) (PIC_WDOG0_BEATCMD + ((i) ? 7 : 0))
+#define PIC_WDOG_BEAT0(i) (PIC_WDOG0_BEAT0 + ((i) ? 7 : 0))
+#define PIC_WDOG_BEAT1(i) (PIC_WDOG0_BEAT1 + ((i) ? 7 : 0))
+
+#define PIC_TIMER0_MAXVAL 0x34
+#define PIC_TIMER1_MAXVAL 0x36
+#define PIC_TIMER2_MAXVAL 0x38
+#define PIC_TIMER3_MAXVAL 0x3a
+#define PIC_TIMER4_MAXVAL 0x3c
+#define PIC_TIMER5_MAXVAL 0x3e
+#define PIC_TIMER6_MAXVAL 0x40
+#define PIC_TIMER7_MAXVAL 0x42
+#define PIC_TIMER_MAXVAL(i) (PIC_TIMER0_MAXVAL + ((i) * 2))
+
+#define PIC_TIMER0_COUNT 0x44
+#define PIC_TIMER1_COUNT 0x46
+#define PIC_TIMER2_COUNT 0x48
+#define PIC_TIMER3_COUNT 0x4a
+#define PIC_TIMER4_COUNT 0x4c
+#define PIC_TIMER5_COUNT 0x4e
+#define PIC_TIMER6_COUNT 0x50
+#define PIC_TIMER7_COUNT 0x52
+#define PIC_TIMER_COUNT(i) (PIC_TIMER0_COUNT + ((i) * 2))
+
+#define PIC_ITE0_N0_N1 0x54
+#define PIC_ITE1_N0_N1 0x58
+#define PIC_ITE2_N0_N1 0x5c
+#define PIC_ITE3_N0_N1 0x60
+#define PIC_ITE4_N0_N1 0x64
+#define PIC_ITE5_N0_N1 0x68
+#define PIC_ITE6_N0_N1 0x6c
+#define PIC_ITE7_N0_N1 0x70
+#define PIC_ITE_N0_N1(i) (PIC_ITE0_N0_N1 + ((i) * 4))
+
+#define PIC_ITE0_N2_N3 0x56
+#define PIC_ITE1_N2_N3 0x5a
+#define PIC_ITE2_N2_N3 0x5e
+#define PIC_ITE3_N2_N3 0x62
+#define PIC_ITE4_N2_N3 0x66
+#define PIC_ITE5_N2_N3 0x6a
+#define PIC_ITE6_N2_N3 0x6e
+#define PIC_ITE7_N2_N3 0x72
+#define PIC_ITE_N2_N3(i) (PIC_ITE0_N2_N3 + ((i) * 4))
+
+#define PIC_IRT0 0x74
+#define PIC_IRT(i) (PIC_IRT0 + ((i) * 2))
+
+#define PIC_9XX_PENDING_0 0x6
+#define PIC_9XX_PENDING_1 0x8
+#define PIC_9XX_PENDING_2 0xa
+#define PIC_9XX_PENDING_3 0xc
+
+#define PIC_9XX_IRT0 0x1c0
+#define PIC_9XX_IRT(i) (PIC_9XX_IRT0 + ((i) * 2))
+
+/*
+ * IRT Map
+ */
+#define PIC_NUM_IRTS 160
+#define PIC_9XX_NUM_IRTS 256
+
+#define PIC_IRT_WD_0_INDEX 0
+#define PIC_IRT_WD_1_INDEX 1
+#define PIC_IRT_WD_NMI_0_INDEX 2
+#define PIC_IRT_WD_NMI_1_INDEX 3
+#define PIC_IRT_TIMER_0_INDEX 4
+#define PIC_IRT_TIMER_1_INDEX 5
+#define PIC_IRT_TIMER_2_INDEX 6
+#define PIC_IRT_TIMER_3_INDEX 7
+#define PIC_IRT_TIMER_4_INDEX 8
+#define PIC_IRT_TIMER_5_INDEX 9
+#define PIC_IRT_TIMER_6_INDEX 10
+#define PIC_IRT_TIMER_7_INDEX 11
+#define PIC_IRT_CLOCK_INDEX PIC_IRT_TIMER_7_INDEX
+#define PIC_IRT_TIMER_INDEX(num) ((num) + PIC_IRT_TIMER_0_INDEX)
+
+
+/* 11 and 12 */
+#define PIC_NUM_MSG_Q_IRTS 32
+#define PIC_IRT_MSG_Q0_INDEX 12
+#define PIC_IRT_MSG_Q_INDEX(qid) ((qid) + PIC_IRT_MSG_Q0_INDEX)
+/* 12 to 43 */
+#define PIC_IRT_MSG_0_INDEX 44
+#define PIC_IRT_MSG_1_INDEX 45
+/* 44 and 45 */
+#define PIC_NUM_PCIE_MSIX_IRTS 32
+#define PIC_IRT_PCIE_MSIX_0_INDEX 46
+#define PIC_IRT_PCIE_MSIX_INDEX(num) ((num) + PIC_IRT_PCIE_MSIX_0_INDEX)
+/* 46 to 77 */
+#define PIC_NUM_PCIE_LINK_IRTS 4
+#define PIC_IRT_PCIE_LINK_0_INDEX 78
+#define PIC_IRT_PCIE_LINK_1_INDEX 79
+#define PIC_IRT_PCIE_LINK_2_INDEX 80
+#define PIC_IRT_PCIE_LINK_3_INDEX 81
+#define PIC_IRT_PCIE_LINK_INDEX(num) ((num) + PIC_IRT_PCIE_LINK_0_INDEX)
+
+#define PIC_9XX_IRT_PCIE_LINK_0_INDEX 191
+#define PIC_9XX_IRT_PCIE_LINK_INDEX(num) \
+ ((num) + PIC_9XX_IRT_PCIE_LINK_0_INDEX)
+
+#define PIC_CLOCK_TIMER 7
+
+#if !defined(LOCORE) && !defined(__ASSEMBLY__)
+
+/*
+ * Misc
+ */
+#define PIC_IRT_VALID 1
+#define PIC_LOCAL_SCHEDULING 1
+#define PIC_GLOBAL_SCHEDULING 0
+
+#define nlm_read_pic_reg(b, r) nlm_read_reg64(b, r)
+#define nlm_write_pic_reg(b, r, v) nlm_write_reg64(b, r, v)
+#define nlm_get_pic_pcibase(node) nlm_pcicfg_base(cpu_is_xlp9xx() ? \
+ XLP9XX_IO_PIC_OFFSET(node) : XLP_IO_PIC_OFFSET(node))
+#define nlm_get_pic_regbase(node) (nlm_get_pic_pcibase(node) + XLP_IO_PCI_HDRSZ)
+
+/* We use PIC on node 0 as a timer */
+#define pic_timer_freq() nlm_get_pic_frequency(0)
+
+/* IRT and h/w interrupt routines */
+static inline void
+nlm_9xx_pic_write_irt(uint64_t base, int irt_num, int en, int nmi,
+ int sch, int vec, int dt, int db, int cpu)
+{
+ uint64_t val;
+
+ val = (((uint64_t)en & 0x1) << 22) | ((nmi & 0x1) << 23) |
+ ((0 /*mc*/) << 20) | ((vec & 0x3f) << 24) |
+ ((dt & 0x1) << 21) | (0 /*ptr*/ << 16) |
+ (cpu & 0x3ff);
+
+ nlm_write_pic_reg(base, PIC_9XX_IRT(irt_num), val);
+}
+
+static inline void
+nlm_pic_write_irt(uint64_t base, int irt_num, int en, int nmi,
+ int sch, int vec, int dt, int db, int dte)
+{
+ uint64_t val;
+
+ val = (((uint64_t)en & 0x1) << 31) | ((nmi & 0x1) << 29) |
+ ((sch & 0x1) << 28) | ((vec & 0x3f) << 20) |
+ ((dt & 0x1) << 19) | ((db & 0x7) << 16) |
+ (dte & 0xffff);
+
+ nlm_write_pic_reg(base, PIC_IRT(irt_num), val);
+}
+
+static inline void
+nlm_pic_write_irt_direct(uint64_t base, int irt_num, int en, int nmi,
+ int sch, int vec, int cpu)
+{
+ if (cpu_is_xlp9xx())
+ nlm_9xx_pic_write_irt(base, irt_num, en, nmi, sch, vec,
+ 1, 0, cpu);
+ else
+ nlm_pic_write_irt(base, irt_num, en, nmi, sch, vec, 1,
+ (cpu >> 4), /* thread group */
+ 1 << (cpu & 0xf)); /* thread mask */
+}
+
+static inline uint64_t
+nlm_pic_read_timer(uint64_t base, int timer)
+{
+ return nlm_read_pic_reg(base, PIC_TIMER_COUNT(timer));
+}
+
+static inline uint32_t
+nlm_pic_read_timer32(uint64_t base, int timer)
+{
+ return (uint32_t)nlm_read_pic_reg(base, PIC_TIMER_COUNT(timer));
+}
+
+static inline void
+nlm_pic_write_timer(uint64_t base, int timer, uint64_t value)
+{
+ nlm_write_pic_reg(base, PIC_TIMER_COUNT(timer), value);
+}
+
+static inline void
+nlm_pic_set_timer(uint64_t base, int timer, uint64_t value, int irq, int cpu)
+{
+ uint64_t pic_ctrl = nlm_read_pic_reg(base, PIC_CTRL);
+ int en;
+
+ en = (irq > 0);
+ nlm_write_pic_reg(base, PIC_TIMER_MAXVAL(timer), value);
+ nlm_pic_write_irt_direct(base, PIC_IRT_TIMER_INDEX(timer),
+ en, 0, 0, irq, cpu);
+
+ /* enable the timer */
+ pic_ctrl |= (1 << (PIC_CTRL_STE + timer));
+ nlm_write_pic_reg(base, PIC_CTRL, pic_ctrl);
+}
+
+static inline void
+nlm_pic_enable_irt(uint64_t base, int irt)
+{
+ uint64_t reg;
+
+ if (cpu_is_xlp9xx()) {
+ reg = nlm_read_pic_reg(base, PIC_9XX_IRT(irt));
+ nlm_write_pic_reg(base, PIC_9XX_IRT(irt), reg | (1 << 22));
+ } else {
+ reg = nlm_read_pic_reg(base, PIC_IRT(irt));
+ nlm_write_pic_reg(base, PIC_IRT(irt), reg | (1u << 31));
+ }
+}
+
+static inline void
+nlm_pic_disable_irt(uint64_t base, int irt)
+{
+ uint64_t reg;
+
+ if (cpu_is_xlp9xx()) {
+ reg = nlm_read_pic_reg(base, PIC_9XX_IRT(irt));
+ reg &= ~((uint64_t)1 << 22);
+ nlm_write_pic_reg(base, PIC_9XX_IRT(irt), reg);
+ } else {
+ reg = nlm_read_pic_reg(base, PIC_IRT(irt));
+ reg &= ~((uint64_t)1 << 31);
+ nlm_write_pic_reg(base, PIC_IRT(irt), reg);
+ }
+}
+
+static inline void
+nlm_pic_send_ipi(uint64_t base, int hwt, int irq, int nmi)
+{
+ uint64_t ipi;
+
+ if (cpu_is_xlp9xx())
+ ipi = (nmi << 23) | (irq << 24) |
+ (0/*mcm*/ << 20) | (0/*ptr*/ << 16) | hwt;
+ else
+ ipi = ((uint64_t)nmi << 31) | (irq << 20) |
+ ((hwt >> 4) << 16) | (1 << (hwt & 0xf));
+
+ nlm_write_pic_reg(base, PIC_IPI_CTL, ipi);
+}
+
+static inline void
+nlm_pic_ack(uint64_t base, int irt_num)
+{
+ nlm_write_pic_reg(base, PIC_INT_ACK, irt_num);
+
+ /* Ack the Status register for Watchdog & System timers */
+ if (irt_num < 12)
+ nlm_write_pic_reg(base, PIC_STATUS, (1 << irt_num));
+}
+
+static inline void
+nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt, int en)
+{
+ nlm_pic_write_irt_direct(base, irt, en, 0, 0, irq, hwt);
+}
+
+int nlm_irq_to_irt(int irq);
+
+#endif /* __ASSEMBLY__ */
+#endif /* _NLM_HAL_PIC_H */
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/sys.h b/arch/mips/include/asm/netlogic/xlp-hal/sys.h
new file mode 100644
index 000000000..6bcf3952e
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlp-hal/sys.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __NLM_HAL_SYS_H__
+#define __NLM_HAL_SYS_H__
+
+/**
+* @file_name sys.h
+* @author Netlogic Microsystems
+* @brief HAL for System configuration registers
+*/
+#define SYS_CHIP_RESET 0x00
+#define SYS_POWER_ON_RESET_CFG 0x01
+#define SYS_EFUSE_DEVICE_CFG_STATUS0 0x02
+#define SYS_EFUSE_DEVICE_CFG_STATUS1 0x03
+#define SYS_EFUSE_DEVICE_CFG_STATUS2 0x04
+#define SYS_EFUSE_DEVICE_CFG3 0x05
+#define SYS_EFUSE_DEVICE_CFG4 0x06
+#define SYS_EFUSE_DEVICE_CFG5 0x07
+#define SYS_EFUSE_DEVICE_CFG6 0x08
+#define SYS_EFUSE_DEVICE_CFG7 0x09
+#define SYS_PLL_CTRL 0x0a
+#define SYS_CPU_RESET 0x0b
+#define SYS_CPU_NONCOHERENT_MODE 0x0d
+#define SYS_CORE_DFS_DIS_CTRL 0x0e
+#define SYS_CORE_DFS_RST_CTRL 0x0f
+#define SYS_CORE_DFS_BYP_CTRL 0x10
+#define SYS_CORE_DFS_PHA_CTRL 0x11
+#define SYS_CORE_DFS_DIV_INC_CTRL 0x12
+#define SYS_CORE_DFS_DIV_DEC_CTRL 0x13
+#define SYS_CORE_DFS_DIV_VALUE 0x14
+#define SYS_RESET 0x15
+#define SYS_DFS_DIS_CTRL 0x16
+#define SYS_DFS_RST_CTRL 0x17
+#define SYS_DFS_BYP_CTRL 0x18
+#define SYS_DFS_DIV_INC_CTRL 0x19
+#define SYS_DFS_DIV_DEC_CTRL 0x1a
+#define SYS_DFS_DIV_VALUE0 0x1b
+#define SYS_DFS_DIV_VALUE1 0x1c
+#define SYS_SENSE_AMP_DLY 0x1d
+#define SYS_SOC_SENSE_AMP_DLY 0x1e
+#define SYS_CTRL0 0x1f
+#define SYS_CTRL1 0x20
+#define SYS_TIMEOUT_BS1 0x21
+#define SYS_BYTE_SWAP 0x22
+#define SYS_VRM_VID 0x23
+#define SYS_PWR_RAM_CMD 0x24
+#define SYS_PWR_RAM_ADDR 0x25
+#define SYS_PWR_RAM_DATA0 0x26
+#define SYS_PWR_RAM_DATA1 0x27
+#define SYS_PWR_RAM_DATA2 0x28
+#define SYS_PWR_UCODE 0x29
+#define SYS_CPU0_PWR_STATUS 0x2a
+#define SYS_CPU1_PWR_STATUS 0x2b
+#define SYS_CPU2_PWR_STATUS 0x2c
+#define SYS_CPU3_PWR_STATUS 0x2d
+#define SYS_CPU4_PWR_STATUS 0x2e
+#define SYS_CPU5_PWR_STATUS 0x2f
+#define SYS_CPU6_PWR_STATUS 0x30
+#define SYS_CPU7_PWR_STATUS 0x31
+#define SYS_STATUS 0x32
+#define SYS_INT_POL 0x33
+#define SYS_INT_TYPE 0x34
+#define SYS_INT_STATUS 0x35
+#define SYS_INT_MASK0 0x36
+#define SYS_INT_MASK1 0x37
+#define SYS_UCO_S_ECC 0x38
+#define SYS_UCO_M_ECC 0x39
+#define SYS_UCO_ADDR 0x3a
+#define SYS_UCO_INSTR 0x3b
+#define SYS_MEM_BIST0 0x3c
+#define SYS_MEM_BIST1 0x3d
+#define SYS_MEM_BIST2 0x3e
+#define SYS_MEM_BIST3 0x3f
+#define SYS_MEM_BIST4 0x40
+#define SYS_MEM_BIST5 0x41
+#define SYS_MEM_BIST6 0x42
+#define SYS_MEM_BIST7 0x43
+#define SYS_MEM_BIST8 0x44
+#define SYS_MEM_BIST9 0x45
+#define SYS_MEM_BIST10 0x46
+#define SYS_MEM_BIST11 0x47
+#define SYS_MEM_BIST12 0x48
+#define SYS_SCRTCH0 0x49
+#define SYS_SCRTCH1 0x4a
+#define SYS_SCRTCH2 0x4b
+#define SYS_SCRTCH3 0x4c
+
+/* PLL registers XLP2XX */
+#define SYS_CPU_PLL_CTRL0(core) (0x1c0 + (core * 4))
+#define SYS_CPU_PLL_CTRL1(core) (0x1c1 + (core * 4))
+#define SYS_CPU_PLL_CTRL2(core) (0x1c2 + (core * 4))
+#define SYS_CPU_PLL_CTRL3(core) (0x1c3 + (core * 4))
+#define SYS_PLL_CTRL0 0x240
+#define SYS_PLL_CTRL1 0x241
+#define SYS_PLL_CTRL2 0x242
+#define SYS_PLL_CTRL3 0x243
+#define SYS_DMC_PLL_CTRL0 0x244
+#define SYS_DMC_PLL_CTRL1 0x245
+#define SYS_DMC_PLL_CTRL2 0x246
+#define SYS_DMC_PLL_CTRL3 0x247
+
+#define SYS_PLL_CTRL0_DEVX(x) (0x248 + (x) * 4)
+#define SYS_PLL_CTRL1_DEVX(x) (0x249 + (x) * 4)
+#define SYS_PLL_CTRL2_DEVX(x) (0x24a + (x) * 4)
+#define SYS_PLL_CTRL3_DEVX(x) (0x24b + (x) * 4)
+
+#define SYS_CPU_PLL_CHG_CTRL 0x288
+#define SYS_PLL_CHG_CTRL 0x289
+#define SYS_CLK_DEV_DIS 0x28a
+#define SYS_CLK_DEV_SEL 0x28b
+#define SYS_CLK_DEV_DIV 0x28c
+#define SYS_CLK_DEV_CHG 0x28d
+#define SYS_CLK_DEV_SEL_REG 0x28e
+#define SYS_CLK_DEV_DIV_REG 0x28f
+#define SYS_CPU_PLL_LOCK 0x29f
+#define SYS_SYS_PLL_LOCK 0x2a0
+#define SYS_PLL_MEM_CMD 0x2a1
+#define SYS_CPU_PLL_MEM_REQ 0x2a2
+#define SYS_SYS_PLL_MEM_REQ 0x2a3
+#define SYS_PLL_MEM_STAT 0x2a4
+
+/* PLL registers XLP9XX */
+#define SYS_9XX_CPU_PLL_CTRL0(core) (0xc0 + (core * 4))
+#define SYS_9XX_CPU_PLL_CTRL1(core) (0xc1 + (core * 4))
+#define SYS_9XX_CPU_PLL_CTRL2(core) (0xc2 + (core * 4))
+#define SYS_9XX_CPU_PLL_CTRL3(core) (0xc3 + (core * 4))
+#define SYS_9XX_DMC_PLL_CTRL0 0x140
+#define SYS_9XX_DMC_PLL_CTRL1 0x141
+#define SYS_9XX_DMC_PLL_CTRL2 0x142
+#define SYS_9XX_DMC_PLL_CTRL3 0x143
+#define SYS_9XX_PLL_CTRL0 0x144
+#define SYS_9XX_PLL_CTRL1 0x145
+#define SYS_9XX_PLL_CTRL2 0x146
+#define SYS_9XX_PLL_CTRL3 0x147
+
+#define SYS_9XX_PLL_CTRL0_DEVX(x) (0x148 + (x) * 4)
+#define SYS_9XX_PLL_CTRL1_DEVX(x) (0x149 + (x) * 4)
+#define SYS_9XX_PLL_CTRL2_DEVX(x) (0x14a + (x) * 4)
+#define SYS_9XX_PLL_CTRL3_DEVX(x) (0x14b + (x) * 4)
+
+#define SYS_9XX_CPU_PLL_CHG_CTRL 0x188
+#define SYS_9XX_PLL_CHG_CTRL 0x189
+#define SYS_9XX_CLK_DEV_DIS 0x18a
+#define SYS_9XX_CLK_DEV_SEL 0x18b
+#define SYS_9XX_CLK_DEV_DIV 0x18d
+#define SYS_9XX_CLK_DEV_CHG 0x18f
+
+#define SYS_9XX_CLK_DEV_SEL_REG 0x1a4
+#define SYS_9XX_CLK_DEV_DIV_REG 0x1a6
+
+/* Registers changed on 9XX */
+#define SYS_9XX_POWER_ON_RESET_CFG 0x00
+#define SYS_9XX_CHIP_RESET 0x01
+#define SYS_9XX_CPU_RESET 0x02
+#define SYS_9XX_CPU_NONCOHERENT_MODE 0x03
+
+/* XLP 9XX fuse block registers */
+#define FUSE_9XX_DEVCFG6 0xc6
+
+#ifndef __ASSEMBLY__
+
+#define nlm_read_sys_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_sys_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_get_sys_pcibase(node) nlm_pcicfg_base(cpu_is_xlp9xx() ? \
+ XLP9XX_IO_SYS_OFFSET(node) : XLP_IO_SYS_OFFSET(node))
+#define nlm_get_sys_regbase(node) (nlm_get_sys_pcibase(node) + XLP_IO_PCI_HDRSZ)
+
+/* XLP9XX fuse block */
+#define nlm_get_fuse_pcibase(node) \
+ nlm_pcicfg_base(XLP9XX_IO_FUSE_OFFSET(node))
+#define nlm_get_fuse_regbase(node) \
+ (nlm_get_fuse_pcibase(node) + XLP_IO_PCI_HDRSZ)
+
+#define nlm_get_clock_pcibase(node) \
+ nlm_pcicfg_base(XLP9XX_IO_CLOCK_OFFSET(node))
+#define nlm_get_clock_regbase(node) \
+ (nlm_get_clock_pcibase(node) + XLP_IO_PCI_HDRSZ)
+
+unsigned int nlm_get_pic_frequency(int node);
+#endif
+#endif
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/uart.h b/arch/mips/include/asm/netlogic/xlp-hal/uart.h
new file mode 100644
index 000000000..a6c54424d
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlp-hal/uart.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XLP_HAL_UART_H__
+#define __XLP_HAL_UART_H__
+
+/* UART Specific registers */
+#define UART_RX_DATA 0x00
+#define UART_TX_DATA 0x00
+
+#define UART_INT_EN 0x01
+#define UART_INT_ID 0x02
+#define UART_FIFO_CTL 0x02
+#define UART_LINE_CTL 0x03
+#define UART_MODEM_CTL 0x04
+#define UART_LINE_STS 0x05
+#define UART_MODEM_STS 0x06
+
+#define UART_DIVISOR0 0x00
+#define UART_DIVISOR1 0x01
+
+#define BASE_BAUD (XLP_IO_CLK/16)
+#define BAUD_DIVISOR(baud) (BASE_BAUD / baud)
+
+/* LCR mask values */
+#define LCR_5BITS 0x00
+#define LCR_6BITS 0x01
+#define LCR_7BITS 0x02
+#define LCR_8BITS 0x03
+#define LCR_STOPB 0x04
+#define LCR_PENAB 0x08
+#define LCR_PODD 0x00
+#define LCR_PEVEN 0x10
+#define LCR_PONE 0x20
+#define LCR_PZERO 0x30
+#define LCR_SBREAK 0x40
+#define LCR_EFR_ENABLE 0xbf
+#define LCR_DLAB 0x80
+
+/* MCR mask values */
+#define MCR_DTR 0x01
+#define MCR_RTS 0x02
+#define MCR_DRS 0x04
+#define MCR_IE 0x08
+#define MCR_LOOPBACK 0x10
+
+/* FCR mask values */
+#define FCR_RCV_RST 0x02
+#define FCR_XMT_RST 0x04
+#define FCR_RX_LOW 0x00
+#define FCR_RX_MEDL 0x40
+#define FCR_RX_MEDH 0x80
+#define FCR_RX_HIGH 0xc0
+
+/* IER mask values */
+#define IER_ERXRDY 0x1
+#define IER_ETXRDY 0x2
+#define IER_ERLS 0x4
+#define IER_EMSC 0x8
+
+#if !defined(LOCORE) && !defined(__ASSEMBLY__)
+
+#define nlm_read_uart_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_uart_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_get_uart_pcibase(node, inst) \
+ nlm_pcicfg_base(cpu_is_xlp9xx() ? XLP9XX_IO_UART_OFFSET(node) : \
+ XLP_IO_UART_OFFSET(node, inst))
+#define nlm_get_uart_regbase(node, inst) \
+ (nlm_get_uart_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
+
+static inline void
+nlm_uart_set_baudrate(uint64_t base, int baud)
+{
+ uint32_t lcr;
+
+ lcr = nlm_read_uart_reg(base, UART_LINE_CTL);
+
+ /* enable divisor register, and write baud values */
+ nlm_write_uart_reg(base, UART_LINE_CTL, lcr | (1 << 7));
+ nlm_write_uart_reg(base, UART_DIVISOR0,
+ (BAUD_DIVISOR(baud) & 0xff));
+ nlm_write_uart_reg(base, UART_DIVISOR1,
+ ((BAUD_DIVISOR(baud) >> 8) & 0xff));
+
+ /* restore default lcr */
+ nlm_write_uart_reg(base, UART_LINE_CTL, lcr);
+}
+
+static inline void
+nlm_uart_outbyte(uint64_t base, char c)
+{
+ uint32_t lsr;
+
+ for (;;) {
+ lsr = nlm_read_uart_reg(base, UART_LINE_STS);
+ if (lsr & 0x20)
+ break;
+ }
+
+ nlm_write_uart_reg(base, UART_TX_DATA, (int)c);
+}
+
+static inline char
+nlm_uart_inbyte(uint64_t base)
+{
+ int data, lsr;
+
+ for (;;) {
+ lsr = nlm_read_uart_reg(base, UART_LINE_STS);
+ if (lsr & 0x80) { /* parity/frame/break-error - push a zero */
+ data = 0;
+ break;
+ }
+ if (lsr & 0x01) { /* Rx data */
+ data = nlm_read_uart_reg(base, UART_RX_DATA);
+ break;
+ }
+ }
+
+ return (char)data;
+}
+
+static inline int
+nlm_uart_init(uint64_t base, int baud, int databits, int stopbits,
+ int parity, int int_en, int loopback)
+{
+ uint32_t lcr;
+
+ lcr = 0;
+ if (databits >= 8)
+ lcr |= LCR_8BITS;
+ else if (databits == 7)
+ lcr |= LCR_7BITS;
+ else if (databits == 6)
+ lcr |= LCR_6BITS;
+ else
+ lcr |= LCR_5BITS;
+
+ if (stopbits > 1)
+ lcr |= LCR_STOPB;
+
+ lcr |= parity << 3;
+
+ /* setup default lcr */
+ nlm_write_uart_reg(base, UART_LINE_CTL, lcr);
+
+ /* Reset the FIFOs */
+ nlm_write_uart_reg(base, UART_LINE_CTL, FCR_RCV_RST | FCR_XMT_RST);
+
+ nlm_uart_set_baudrate(base, baud);
+
+ if (loopback)
+ nlm_write_uart_reg(base, UART_MODEM_CTL, 0x1f);
+
+ if (int_en)
+ nlm_write_uart_reg(base, UART_INT_EN, IER_ERXRDY | IER_ETXRDY);
+
+ return 0;
+}
+#endif /* !LOCORE && !__ASSEMBLY__ */
+#endif /* __XLP_HAL_UART_H__ */
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h
new file mode 100644
index 000000000..feb6ed807
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NLM_HAL_XLP_H
+#define _NLM_HAL_XLP_H
+
+#define PIC_UART_0_IRQ 17
+#define PIC_UART_1_IRQ 18
+
+#define PIC_PCIE_LINK_LEGACY_IRQ_BASE 19
+#define PIC_PCIE_LINK_LEGACY_IRQ(i) (19 + (i))
+
+#define PIC_EHCI_0_IRQ 23
+#define PIC_EHCI_1_IRQ 24
+#define PIC_OHCI_0_IRQ 25
+#define PIC_OHCI_1_IRQ 26
+#define PIC_OHCI_2_IRQ 27
+#define PIC_OHCI_3_IRQ 28
+#define PIC_2XX_XHCI_0_IRQ 23
+#define PIC_2XX_XHCI_1_IRQ 24
+#define PIC_2XX_XHCI_2_IRQ 25
+#define PIC_9XX_XHCI_0_IRQ 23
+#define PIC_9XX_XHCI_1_IRQ 24
+#define PIC_9XX_XHCI_2_IRQ 25
+
+#define PIC_MMC_IRQ 29
+#define PIC_I2C_0_IRQ 30
+#define PIC_I2C_1_IRQ 31
+#define PIC_I2C_2_IRQ 32
+#define PIC_I2C_3_IRQ 33
+#define PIC_SPI_IRQ 34
+#define PIC_NAND_IRQ 37
+#define PIC_SATA_IRQ 38
+#define PIC_GPIO_IRQ 39
+
+#define PIC_PCIE_LINK_MSI_IRQ_BASE 44 /* 44 - 47 MSI IRQ */
+#define PIC_PCIE_LINK_MSI_IRQ(i) (44 + (i))
+
+/* MSI-X with second link-level dispatch */
+#define PIC_PCIE_MSIX_IRQ_BASE 48 /* 48 - 51 MSI-X IRQ */
+#define PIC_PCIE_MSIX_IRQ(i) (48 + (i))
+
+/* XLP9xx and XLP8xx has 128 and 32 MSIX vectors respectively */
+#define NLM_MSIX_VEC_BASE 96 /* 96 - 223 - MSIX mapped */
+#define NLM_MSI_VEC_BASE 224 /* 224 -351 - MSI mapped */
+
+#define NLM_PIC_INDIRECT_VEC_BASE 512
+#define NLM_GPIO_VEC_BASE 768
+
+#define PIC_IRQ_BASE 8
+#define PIC_IRT_FIRST_IRQ PIC_IRQ_BASE
+#define PIC_IRT_LAST_IRQ 63
+
+#ifndef __ASSEMBLY__
+
+/* SMP support functions */
+void xlp_boot_core0_siblings(void);
+void xlp_wakeup_secondary_cpus(void);
+
+void xlp_mmu_init(void);
+void nlm_hal_init(void);
+int nlm_get_dram_map(int node, uint64_t *dram_map, int nentries);
+
+struct pci_dev;
+int xlp_socdev_to_node(const struct pci_dev *dev);
+
+/* Device tree related */
+void xlp_early_init_devtree(void);
+void *xlp_dt_init(void *fdtp);
+
+static inline int cpu_is_xlpii(void)
+{
+ int chip = read_c0_prid() & PRID_IMP_MASK;
+
+ return chip == PRID_IMP_NETLOGIC_XLP2XX ||
+ chip == PRID_IMP_NETLOGIC_XLP9XX ||
+ chip == PRID_IMP_NETLOGIC_XLP5XX;
+}
+
+static inline int cpu_is_xlp9xx(void)
+{
+ int chip = read_c0_prid() & PRID_IMP_MASK;
+
+ return chip == PRID_IMP_NETLOGIC_XLP9XX ||
+ chip == PRID_IMP_NETLOGIC_XLP5XX;
+}
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_NLM_XLP_H */
diff --git a/arch/mips/include/asm/netlogic/xlr/bridge.h b/arch/mips/include/asm/netlogic/xlr/bridge.h
new file mode 100644
index 000000000..2d02428c4
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlr/bridge.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ASM_NLM_BRIDGE_H_
+#define _ASM_NLM_BRIDGE_H_
+
+#define BRIDGE_DRAM_0_BAR 0
+#define BRIDGE_DRAM_1_BAR 1
+#define BRIDGE_DRAM_2_BAR 2
+#define BRIDGE_DRAM_3_BAR 3
+#define BRIDGE_DRAM_4_BAR 4
+#define BRIDGE_DRAM_5_BAR 5
+#define BRIDGE_DRAM_6_BAR 6
+#define BRIDGE_DRAM_7_BAR 7
+#define BRIDGE_DRAM_CHN_0_MTR_0_BAR 8
+#define BRIDGE_DRAM_CHN_0_MTR_1_BAR 9
+#define BRIDGE_DRAM_CHN_0_MTR_2_BAR 10
+#define BRIDGE_DRAM_CHN_0_MTR_3_BAR 11
+#define BRIDGE_DRAM_CHN_0_MTR_4_BAR 12
+#define BRIDGE_DRAM_CHN_0_MTR_5_BAR 13
+#define BRIDGE_DRAM_CHN_0_MTR_6_BAR 14
+#define BRIDGE_DRAM_CHN_0_MTR_7_BAR 15
+#define BRIDGE_DRAM_CHN_1_MTR_0_BAR 16
+#define BRIDGE_DRAM_CHN_1_MTR_1_BAR 17
+#define BRIDGE_DRAM_CHN_1_MTR_2_BAR 18
+#define BRIDGE_DRAM_CHN_1_MTR_3_BAR 19
+#define BRIDGE_DRAM_CHN_1_MTR_4_BAR 20
+#define BRIDGE_DRAM_CHN_1_MTR_5_BAR 21
+#define BRIDGE_DRAM_CHN_1_MTR_6_BAR 22
+#define BRIDGE_DRAM_CHN_1_MTR_7_BAR 23
+#define BRIDGE_CFG_BAR 24
+#define BRIDGE_PHNX_IO_BAR 25
+#define BRIDGE_FLASH_BAR 26
+#define BRIDGE_SRAM_BAR 27
+#define BRIDGE_HTMEM_BAR 28
+#define BRIDGE_HTINT_BAR 29
+#define BRIDGE_HTPIC_BAR 30
+#define BRIDGE_HTSM_BAR 31
+#define BRIDGE_HTIO_BAR 32
+#define BRIDGE_HTCFG_BAR 33
+#define BRIDGE_PCIXCFG_BAR 34
+#define BRIDGE_PCIXMEM_BAR 35
+#define BRIDGE_PCIXIO_BAR 36
+#define BRIDGE_DEVICE_MASK 37
+#define BRIDGE_AERR_INTR_LOG1 38
+#define BRIDGE_AERR_INTR_LOG2 39
+#define BRIDGE_AERR_INTR_LOG3 40
+#define BRIDGE_AERR_DEV_STAT 41
+#define BRIDGE_AERR1_LOG1 42
+#define BRIDGE_AERR1_LOG2 43
+#define BRIDGE_AERR1_LOG3 44
+#define BRIDGE_AERR1_DEV_STAT 45
+#define BRIDGE_AERR_INTR_EN 46
+#define BRIDGE_AERR_UPG 47
+#define BRIDGE_AERR_CLEAR 48
+#define BRIDGE_AERR1_CLEAR 49
+#define BRIDGE_SBE_COUNTS 50
+#define BRIDGE_DBE_COUNTS 51
+#define BRIDGE_BITERR_INT_EN 52
+
+#define BRIDGE_SYS2IO_CREDITS 53
+#define BRIDGE_EVNT_CNT_CTRL1 54
+#define BRIDGE_EVNT_COUNTER1 55
+#define BRIDGE_EVNT_CNT_CTRL2 56
+#define BRIDGE_EVNT_COUNTER2 57
+#define BRIDGE_RESERVED1 58
+
+#define BRIDGE_DEFEATURE 59
+#define BRIDGE_SCRATCH0 60
+#define BRIDGE_SCRATCH1 61
+#define BRIDGE_SCRATCH2 62
+#define BRIDGE_SCRATCH3 63
+
+#endif
diff --git a/arch/mips/include/asm/netlogic/xlr/flash.h b/arch/mips/include/asm/netlogic/xlr/flash.h
new file mode 100644
index 000000000..f8aca5472
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlr/flash.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ASM_NLM_FLASH_H_
+#define _ASM_NLM_FLASH_H_
+
+#define FLASH_CSBASE_ADDR(cs) (cs)
+#define FLASH_CSADDR_MASK(cs) (0x10 + (cs))
+#define FLASH_CSDEV_PARM(cs) (0x20 + (cs))
+#define FLASH_CSTIME_PARMA(cs) (0x30 + (cs))
+#define FLASH_CSTIME_PARMB(cs) (0x40 + (cs))
+
+#define FLASH_INT_MASK 0x50
+#define FLASH_INT_STATUS 0x60
+#define FLASH_ERROR_STATUS 0x70
+#define FLASH_ERROR_ADDR 0x80
+
+#define FLASH_NAND_CLE(cs) (0x90 + (cs))
+#define FLASH_NAND_ALE(cs) (0xa0 + (cs))
+
+#define FLASH_NAND_CSDEV_PARAM 0x000041e6
+#define FLASH_NAND_CSTIME_PARAMA 0x4f400e22
+#define FLASH_NAND_CSTIME_PARAMB 0x000083cf
+
+#endif
diff --git a/arch/mips/include/asm/netlogic/xlr/fmn.h b/arch/mips/include/asm/netlogic/xlr/fmn.h
new file mode 100644
index 000000000..d79c68fa7
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlr/fmn.h
@@ -0,0 +1,365 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NLM_FMN_H_
+#define _NLM_FMN_H_
+
+#include <asm/netlogic/mips-extns.h> /* for COP2 access */
+
+/* Station IDs */
+#define FMN_STNID_CPU0 0x00
+#define FMN_STNID_CPU1 0x08
+#define FMN_STNID_CPU2 0x10
+#define FMN_STNID_CPU3 0x18
+#define FMN_STNID_CPU4 0x20
+#define FMN_STNID_CPU5 0x28
+#define FMN_STNID_CPU6 0x30
+#define FMN_STNID_CPU7 0x38
+
+#define FMN_STNID_XGS0_TX 64
+#define FMN_STNID_XMAC0_00_TX 64
+#define FMN_STNID_XMAC0_01_TX 65
+#define FMN_STNID_XMAC0_02_TX 66
+#define FMN_STNID_XMAC0_03_TX 67
+#define FMN_STNID_XMAC0_04_TX 68
+#define FMN_STNID_XMAC0_05_TX 69
+#define FMN_STNID_XMAC0_06_TX 70
+#define FMN_STNID_XMAC0_07_TX 71
+#define FMN_STNID_XMAC0_08_TX 72
+#define FMN_STNID_XMAC0_09_TX 73
+#define FMN_STNID_XMAC0_10_TX 74
+#define FMN_STNID_XMAC0_11_TX 75
+#define FMN_STNID_XMAC0_12_TX 76
+#define FMN_STNID_XMAC0_13_TX 77
+#define FMN_STNID_XMAC0_14_TX 78
+#define FMN_STNID_XMAC0_15_TX 79
+
+#define FMN_STNID_XGS1_TX 80
+#define FMN_STNID_XMAC1_00_TX 80
+#define FMN_STNID_XMAC1_01_TX 81
+#define FMN_STNID_XMAC1_02_TX 82
+#define FMN_STNID_XMAC1_03_TX 83
+#define FMN_STNID_XMAC1_04_TX 84
+#define FMN_STNID_XMAC1_05_TX 85
+#define FMN_STNID_XMAC1_06_TX 86
+#define FMN_STNID_XMAC1_07_TX 87
+#define FMN_STNID_XMAC1_08_TX 88
+#define FMN_STNID_XMAC1_09_TX 89
+#define FMN_STNID_XMAC1_10_TX 90
+#define FMN_STNID_XMAC1_11_TX 91
+#define FMN_STNID_XMAC1_12_TX 92
+#define FMN_STNID_XMAC1_13_TX 93
+#define FMN_STNID_XMAC1_14_TX 94
+#define FMN_STNID_XMAC1_15_TX 95
+
+#define FMN_STNID_GMAC 96
+#define FMN_STNID_GMACJFR_0 96
+#define FMN_STNID_GMACRFR_0 97
+#define FMN_STNID_GMACTX0 98
+#define FMN_STNID_GMACTX1 99
+#define FMN_STNID_GMACTX2 100
+#define FMN_STNID_GMACTX3 101
+#define FMN_STNID_GMACJFR_1 102
+#define FMN_STNID_GMACRFR_1 103
+
+#define FMN_STNID_DMA 104
+#define FMN_STNID_DMA_0 104
+#define FMN_STNID_DMA_1 105
+#define FMN_STNID_DMA_2 106
+#define FMN_STNID_DMA_3 107
+
+#define FMN_STNID_XGS0FR 112
+#define FMN_STNID_XMAC0JFR 112
+#define FMN_STNID_XMAC0RFR 113
+
+#define FMN_STNID_XGS1FR 114
+#define FMN_STNID_XMAC1JFR 114
+#define FMN_STNID_XMAC1RFR 115
+#define FMN_STNID_SEC 120
+#define FMN_STNID_SEC0 120
+#define FMN_STNID_SEC1 121
+#define FMN_STNID_SEC2 122
+#define FMN_STNID_SEC3 123
+#define FMN_STNID_PK0 124
+#define FMN_STNID_SEC_RSA 124
+#define FMN_STNID_SEC_RSVD0 125
+#define FMN_STNID_SEC_RSVD1 126
+#define FMN_STNID_SEC_RSVD2 127
+
+#define FMN_STNID_GMAC1 80
+#define FMN_STNID_GMAC1_FR_0 81
+#define FMN_STNID_GMAC1_TX0 82
+#define FMN_STNID_GMAC1_TX1 83
+#define FMN_STNID_GMAC1_TX2 84
+#define FMN_STNID_GMAC1_TX3 85
+#define FMN_STNID_GMAC1_FR_1 87
+#define FMN_STNID_GMAC0 96
+#define FMN_STNID_GMAC0_FR_0 97
+#define FMN_STNID_GMAC0_TX0 98
+#define FMN_STNID_GMAC0_TX1 99
+#define FMN_STNID_GMAC0_TX2 100
+#define FMN_STNID_GMAC0_TX3 101
+#define FMN_STNID_GMAC0_FR_1 103
+#define FMN_STNID_CMP_0 108
+#define FMN_STNID_CMP_1 109
+#define FMN_STNID_CMP_2 110
+#define FMN_STNID_CMP_3 111
+#define FMN_STNID_PCIE_0 116
+#define FMN_STNID_PCIE_1 117
+#define FMN_STNID_PCIE_2 118
+#define FMN_STNID_PCIE_3 119
+#define FMN_STNID_XLS_PK0 121
+
+#define nlm_read_c2_cc0(s) __read_32bit_c2_register($16, s)
+#define nlm_read_c2_cc1(s) __read_32bit_c2_register($17, s)
+#define nlm_read_c2_cc2(s) __read_32bit_c2_register($18, s)
+#define nlm_read_c2_cc3(s) __read_32bit_c2_register($19, s)
+#define nlm_read_c2_cc4(s) __read_32bit_c2_register($20, s)
+#define nlm_read_c2_cc5(s) __read_32bit_c2_register($21, s)
+#define nlm_read_c2_cc6(s) __read_32bit_c2_register($22, s)
+#define nlm_read_c2_cc7(s) __read_32bit_c2_register($23, s)
+#define nlm_read_c2_cc8(s) __read_32bit_c2_register($24, s)
+#define nlm_read_c2_cc9(s) __read_32bit_c2_register($25, s)
+#define nlm_read_c2_cc10(s) __read_32bit_c2_register($26, s)
+#define nlm_read_c2_cc11(s) __read_32bit_c2_register($27, s)
+#define nlm_read_c2_cc12(s) __read_32bit_c2_register($28, s)
+#define nlm_read_c2_cc13(s) __read_32bit_c2_register($29, s)
+#define nlm_read_c2_cc14(s) __read_32bit_c2_register($30, s)
+#define nlm_read_c2_cc15(s) __read_32bit_c2_register($31, s)
+
+#define nlm_write_c2_cc0(s, v) __write_32bit_c2_register($16, s, v)
+#define nlm_write_c2_cc1(s, v) __write_32bit_c2_register($17, s, v)
+#define nlm_write_c2_cc2(s, v) __write_32bit_c2_register($18, s, v)
+#define nlm_write_c2_cc3(s, v) __write_32bit_c2_register($19, s, v)
+#define nlm_write_c2_cc4(s, v) __write_32bit_c2_register($20, s, v)
+#define nlm_write_c2_cc5(s, v) __write_32bit_c2_register($21, s, v)
+#define nlm_write_c2_cc6(s, v) __write_32bit_c2_register($22, s, v)
+#define nlm_write_c2_cc7(s, v) __write_32bit_c2_register($23, s, v)
+#define nlm_write_c2_cc8(s, v) __write_32bit_c2_register($24, s, v)
+#define nlm_write_c2_cc9(s, v) __write_32bit_c2_register($25, s, v)
+#define nlm_write_c2_cc10(s, v) __write_32bit_c2_register($26, s, v)
+#define nlm_write_c2_cc11(s, v) __write_32bit_c2_register($27, s, v)
+#define nlm_write_c2_cc12(s, v) __write_32bit_c2_register($28, s, v)
+#define nlm_write_c2_cc13(s, v) __write_32bit_c2_register($29, s, v)
+#define nlm_write_c2_cc14(s, v) __write_32bit_c2_register($30, s, v)
+#define nlm_write_c2_cc15(s, v) __write_32bit_c2_register($31, s, v)
+
+#define nlm_read_c2_status0() __read_32bit_c2_register($2, 0)
+#define nlm_write_c2_status0(v) __write_32bit_c2_register($2, 0, v)
+#define nlm_read_c2_status1() __read_32bit_c2_register($2, 1)
+#define nlm_write_c2_status1(v) __write_32bit_c2_register($2, 1, v)
+#define nlm_read_c2_status(sel) __read_32bit_c2_register($2, 0)
+#define nlm_read_c2_config() __read_32bit_c2_register($3, 0)
+#define nlm_write_c2_config(v) __write_32bit_c2_register($3, 0, v)
+#define nlm_read_c2_bucksize(b) __read_32bit_c2_register($4, b)
+#define nlm_write_c2_bucksize(b, v) __write_32bit_c2_register($4, b, v)
+
+#define nlm_read_c2_rx_msg0() __read_64bit_c2_register($1, 0)
+#define nlm_read_c2_rx_msg1() __read_64bit_c2_register($1, 1)
+#define nlm_read_c2_rx_msg2() __read_64bit_c2_register($1, 2)
+#define nlm_read_c2_rx_msg3() __read_64bit_c2_register($1, 3)
+
+#define nlm_write_c2_tx_msg0(v) __write_64bit_c2_register($0, 0, v)
+#define nlm_write_c2_tx_msg1(v) __write_64bit_c2_register($0, 1, v)
+#define nlm_write_c2_tx_msg2(v) __write_64bit_c2_register($0, 2, v)
+#define nlm_write_c2_tx_msg3(v) __write_64bit_c2_register($0, 3, v)
+
+#define FMN_STN_RX_QSIZE 256
+#define FMN_NSTATIONS 128
+#define FMN_CORE_NBUCKETS 8
+
+static inline void nlm_msgsnd(unsigned int stid)
+{
+ __asm__ volatile (
+ ".set push\n"
+ ".set noreorder\n"
+ ".set noat\n"
+ "move $1, %0\n"
+ "c2 0x10001\n" /* msgsnd $1 */
+ ".set pop\n"
+ : : "r" (stid) : "$1"
+ );
+}
+
+static inline void nlm_msgld(unsigned int pri)
+{
+ __asm__ volatile (
+ ".set push\n"
+ ".set noreorder\n"
+ ".set noat\n"
+ "move $1, %0\n"
+ "c2 0x10002\n" /* msgld $1 */
+ ".set pop\n"
+ : : "r" (pri) : "$1"
+ );
+}
+
+static inline void nlm_msgwait(unsigned int mask)
+{
+ __asm__ volatile (
+ ".set push\n"
+ ".set noreorder\n"
+ ".set noat\n"
+ "move $8, %0\n"
+ "c2 0x10003\n" /* msgwait $1 */
+ ".set pop\n"
+ : : "r" (mask) : "$1"
+ );
+}
+
+/*
+ * Disable interrupts and enable COP2 access
+ */
+static inline uint32_t nlm_cop2_enable_irqsave(void)
+{
+ uint32_t sr = read_c0_status();
+
+ write_c0_status((sr & ~ST0_IE) | ST0_CU2);
+ return sr;
+}
+
+static inline void nlm_cop2_disable_irqrestore(uint32_t sr)
+{
+ write_c0_status(sr);
+}
+
+static inline void nlm_fmn_setup_intr(int irq, unsigned int tmask)
+{
+ uint32_t config;
+
+ config = (1 << 24) /* interrupt water mark - 1 msg */
+ | (irq << 16) /* irq */
+ | (tmask << 8) /* thread mask */
+ | 0x2; /* enable watermark intr, disable empty intr */
+ nlm_write_c2_config(config);
+}
+
+struct nlm_fmn_msg {
+ uint64_t msg0;
+ uint64_t msg1;
+ uint64_t msg2;
+ uint64_t msg3;
+};
+
+static inline int nlm_fmn_send(unsigned int size, unsigned int code,
+ unsigned int stid, struct nlm_fmn_msg *msg)
+{
+ unsigned int dest;
+ uint32_t status;
+ int i;
+
+ /*
+ * Make sure that all the writes pending at the cpu are flushed.
+ * Any writes pending on CPU will not be see by devices. L1/L2
+ * caches are coherent with IO, so no cache flush needed.
+ */
+ __asm __volatile("sync");
+
+ /* Load TX message buffers */
+ nlm_write_c2_tx_msg0(msg->msg0);
+ nlm_write_c2_tx_msg1(msg->msg1);
+ nlm_write_c2_tx_msg2(msg->msg2);
+ nlm_write_c2_tx_msg3(msg->msg3);
+ dest = ((size - 1) << 16) | (code << 8) | stid;
+
+ /*
+ * Retry a few times on credit fail, this should be a
+ * transient condition, unless there is a configuration
+ * failure, or the receiver is stuck.
+ */
+ for (i = 0; i < 8; i++) {
+ nlm_msgsnd(dest);
+ status = nlm_read_c2_status0();
+ if ((status & 0x4) == 0)
+ return 0;
+ }
+
+ /* If there is a credit failure, return error */
+ return status & 0x06;
+}
+
+static inline int nlm_fmn_receive(int bucket, int *size, int *code, int *stid,
+ struct nlm_fmn_msg *msg)
+{
+ uint32_t status, tmp;
+
+ nlm_msgld(bucket);
+
+ /* wait for load pending to clear */
+ do {
+ status = nlm_read_c2_status0();
+ } while ((status & 0x08) != 0);
+
+ /* receive error bits */
+ tmp = status & 0x30;
+ if (tmp != 0)
+ return tmp;
+
+ *size = ((status & 0xc0) >> 6) + 1;
+ *code = (status & 0xff00) >> 8;
+ *stid = (status & 0x7f0000) >> 16;
+ msg->msg0 = nlm_read_c2_rx_msg0();
+ msg->msg1 = nlm_read_c2_rx_msg1();
+ msg->msg2 = nlm_read_c2_rx_msg2();
+ msg->msg3 = nlm_read_c2_rx_msg3();
+
+ return 0;
+}
+
+struct xlr_fmn_info {
+ int num_buckets;
+ int start_stn_id;
+ int end_stn_id;
+ int credit_config[128];
+};
+
+struct xlr_board_fmn_config {
+ int bucket_size[128]; /* size of buckets for all stations */
+ struct xlr_fmn_info cpu[8];
+ struct xlr_fmn_info gmac[2];
+ struct xlr_fmn_info dma;
+ struct xlr_fmn_info cmp;
+ struct xlr_fmn_info sae;
+ struct xlr_fmn_info xgmac[2];
+};
+
+extern int nlm_register_fmn_handler(int start, int end,
+ void (*fn)(int, int, int, int, struct nlm_fmn_msg *, void *),
+ void *arg);
+extern void xlr_percpu_fmn_init(void);
+extern void nlm_setup_fmn_irq(void);
+extern void xlr_board_info_setup(void);
+
+extern struct xlr_board_fmn_config xlr_board_fmn_config;
+#endif
diff --git a/arch/mips/include/asm/netlogic/xlr/gpio.h b/arch/mips/include/asm/netlogic/xlr/gpio.h
new file mode 100644
index 000000000..8492e835b
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlr/gpio.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ASM_NLM_GPIO_H
+#define _ASM_NLM_GPIO_H
+
+#define GPIO_INT_EN_REG 0
+#define GPIO_INPUT_INVERSION_REG 1
+#define GPIO_IO_DIR_REG 2
+#define GPIO_IO_DATA_WR_REG 3
+#define GPIO_IO_DATA_RD_REG 4
+
+#define GPIO_SWRESET_REG 8
+#define GPIO_DRAM1_CNTRL_REG 9
+#define GPIO_DRAM1_RATIO_REG 10
+#define GPIO_DRAM1_RESET_REG 11
+#define GPIO_DRAM1_STATUS_REG 12
+#define GPIO_DRAM2_CNTRL_REG 13
+#define GPIO_DRAM2_RATIO_REG 14
+#define GPIO_DRAM2_RESET_REG 15
+#define GPIO_DRAM2_STATUS_REG 16
+
+#define GPIO_PWRON_RESET_CFG_REG 21
+#define GPIO_BIST_ALL_GO_STATUS_REG 24
+#define GPIO_BIST_CPU_GO_STATUS_REG 25
+#define GPIO_BIST_DEV_GO_STATUS_REG 26
+
+#define GPIO_FUSE_BANK_REG 35
+#define GPIO_CPU_RESET_REG 40
+#define GPIO_RNG_REG 43
+
+#define PWRON_RESET_PCMCIA_BOOT 17
+
+#define GPIO_LED_BITMAP 0x1700000
+#define GPIO_LED_0_SHIFT 20
+#define GPIO_LED_1_SHIFT 24
+
+#define GPIO_LED_OUTPUT_CODE_RESET 0x01
+#define GPIO_LED_OUTPUT_CODE_HARD_RESET 0x02
+#define GPIO_LED_OUTPUT_CODE_SOFT_RESET 0x03
+#define GPIO_LED_OUTPUT_CODE_MAIN 0x04
+
+#endif
diff --git a/arch/mips/include/asm/netlogic/xlr/iomap.h b/arch/mips/include/asm/netlogic/xlr/iomap.h
new file mode 100644
index 000000000..ff4533d6e
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlr/iomap.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ASM_NLM_IOMAP_H
+#define _ASM_NLM_IOMAP_H
+
+#define DEFAULT_NETLOGIC_IO_BASE CKSEG1ADDR(0x1ef00000)
+#define NETLOGIC_IO_DDR2_CHN0_OFFSET 0x01000
+#define NETLOGIC_IO_DDR2_CHN1_OFFSET 0x02000
+#define NETLOGIC_IO_DDR2_CHN2_OFFSET 0x03000
+#define NETLOGIC_IO_DDR2_CHN3_OFFSET 0x04000
+#define NETLOGIC_IO_PIC_OFFSET 0x08000
+#define NETLOGIC_IO_UART_0_OFFSET 0x14000
+#define NETLOGIC_IO_UART_1_OFFSET 0x15100
+
+#define NETLOGIC_IO_SIZE 0x1000
+
+#define NETLOGIC_IO_BRIDGE_OFFSET 0x00000
+
+#define NETLOGIC_IO_RLD2_CHN0_OFFSET 0x05000
+#define NETLOGIC_IO_RLD2_CHN1_OFFSET 0x06000
+
+#define NETLOGIC_IO_SRAM_OFFSET 0x07000
+
+#define NETLOGIC_IO_PCIX_OFFSET 0x09000
+#define NETLOGIC_IO_HT_OFFSET 0x0A000
+
+#define NETLOGIC_IO_SECURITY_OFFSET 0x0B000
+
+#define NETLOGIC_IO_GMAC_0_OFFSET 0x0C000
+#define NETLOGIC_IO_GMAC_1_OFFSET 0x0D000
+#define NETLOGIC_IO_GMAC_2_OFFSET 0x0E000
+#define NETLOGIC_IO_GMAC_3_OFFSET 0x0F000
+
+/* XLS devices */
+#define NETLOGIC_IO_GMAC_4_OFFSET 0x20000
+#define NETLOGIC_IO_GMAC_5_OFFSET 0x21000
+#define NETLOGIC_IO_GMAC_6_OFFSET 0x22000
+#define NETLOGIC_IO_GMAC_7_OFFSET 0x23000
+
+#define NETLOGIC_IO_PCIE_0_OFFSET 0x1E000
+#define NETLOGIC_IO_PCIE_1_OFFSET 0x1F000
+#define NETLOGIC_IO_SRIO_0_OFFSET 0x1E000
+#define NETLOGIC_IO_SRIO_1_OFFSET 0x1F000
+
+#define NETLOGIC_IO_USB_0_OFFSET 0x24000
+#define NETLOGIC_IO_USB_1_OFFSET 0x25000
+
+#define NETLOGIC_IO_COMP_OFFSET 0x1D000
+/* end XLS devices */
+
+/* XLR devices */
+#define NETLOGIC_IO_SPI4_0_OFFSET 0x10000
+#define NETLOGIC_IO_XGMAC_0_OFFSET 0x11000
+#define NETLOGIC_IO_SPI4_1_OFFSET 0x12000
+#define NETLOGIC_IO_XGMAC_1_OFFSET 0x13000
+/* end XLR devices */
+
+#define NETLOGIC_IO_I2C_0_OFFSET 0x16000
+#define NETLOGIC_IO_I2C_1_OFFSET 0x17000
+
+#define NETLOGIC_IO_GPIO_OFFSET 0x18000
+#define NETLOGIC_IO_FLASH_OFFSET 0x19000
+#define NETLOGIC_IO_TB_OFFSET 0x1C000
+
+#define NETLOGIC_CPLD_OFFSET KSEG1ADDR(0x1d840000)
+
+/*
+ * Base Address (Virtual) of the PCI Config address space
+ * For now, choose 256M phys in kseg1 = 0xA0000000 + (1<<28)
+ * Config space spans 256 (num of buses) * 256 (num functions) * 256 bytes
+ * ie 1<<24 = 16M
+ */
+#define DEFAULT_PCI_CONFIG_BASE 0x18000000
+#define DEFAULT_HT_TYPE0_CFG_BASE 0x16000000
+#define DEFAULT_HT_TYPE1_CFG_BASE 0x17000000
+
+#endif
diff --git a/arch/mips/include/asm/netlogic/xlr/msidef.h b/arch/mips/include/asm/netlogic/xlr/msidef.h
new file mode 100644
index 000000000..c95d18edf
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlr/msidef.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ASM_RMI_MSIDEF_H
+#define ASM_RMI_MSIDEF_H
+
+/*
+ * Constants for Intel APIC based MSI messages.
+ * Adapted for the RMI XLR using identical defines
+ */
+
+/*
+ * Shifts for MSI data
+ */
+
+#define MSI_DATA_VECTOR_SHIFT 0
+#define MSI_DATA_VECTOR_MASK 0x000000ff
+#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & \
+ MSI_DATA_VECTOR_MASK)
+
+#define MSI_DATA_DELIVERY_MODE_SHIFT 8
+#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
+#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
+
+#define MSI_DATA_LEVEL_SHIFT 14
+#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
+#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
+
+#define MSI_DATA_TRIGGER_SHIFT 15
+#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
+#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
+
+/*
+ * Shift/mask fields for msi address
+ */
+
+#define MSI_ADDR_BASE_HI 0
+#define MSI_ADDR_BASE_LO 0xfee00000
+
+#define MSI_ADDR_DEST_MODE_SHIFT 2
+#define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT)
+#define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT)
+
+#define MSI_ADDR_REDIRECTION_SHIFT 3
+#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
+#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
+
+#define MSI_ADDR_DEST_ID_SHIFT 12
+#define MSI_ADDR_DEST_ID_MASK 0x00ffff0
+#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
+ MSI_ADDR_DEST_ID_MASK)
+
+#endif /* ASM_RMI_MSIDEF_H */
diff --git a/arch/mips/include/asm/netlogic/xlr/pic.h b/arch/mips/include/asm/netlogic/xlr/pic.h
new file mode 100644
index 000000000..3c80a7523
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlr/pic.h
@@ -0,0 +1,306 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ASM_NLM_XLR_PIC_H
+#define _ASM_NLM_XLR_PIC_H
+
+#define PIC_CLK_HZ 66666666
+#define pic_timer_freq() PIC_CLK_HZ
+
+/* PIC hardware interrupt numbers */
+#define PIC_IRT_WD_INDEX 0
+#define PIC_IRT_TIMER_0_INDEX 1
+#define PIC_IRT_TIMER_INDEX(i) ((i) + PIC_IRT_TIMER_0_INDEX)
+#define PIC_IRT_TIMER_1_INDEX 2
+#define PIC_IRT_TIMER_2_INDEX 3
+#define PIC_IRT_TIMER_3_INDEX 4
+#define PIC_IRT_TIMER_4_INDEX 5
+#define PIC_IRT_TIMER_5_INDEX 6
+#define PIC_IRT_TIMER_6_INDEX 7
+#define PIC_IRT_TIMER_7_INDEX 8
+#define PIC_IRT_CLOCK_INDEX PIC_IRT_TIMER_7_INDEX
+#define PIC_IRT_UART_0_INDEX 9
+#define PIC_IRT_UART_1_INDEX 10
+#define PIC_IRT_I2C_0_INDEX 11
+#define PIC_IRT_I2C_1_INDEX 12
+#define PIC_IRT_PCMCIA_INDEX 13
+#define PIC_IRT_GPIO_INDEX 14
+#define PIC_IRT_HYPER_INDEX 15
+#define PIC_IRT_PCIX_INDEX 16
+/* XLS */
+#define PIC_IRT_CDE_INDEX 15
+#define PIC_IRT_BRIDGE_TB_XLS_INDEX 16
+/* XLS */
+#define PIC_IRT_GMAC0_INDEX 17
+#define PIC_IRT_GMAC1_INDEX 18
+#define PIC_IRT_GMAC2_INDEX 19
+#define PIC_IRT_GMAC3_INDEX 20
+#define PIC_IRT_XGS0_INDEX 21
+#define PIC_IRT_XGS1_INDEX 22
+#define PIC_IRT_HYPER_FATAL_INDEX 23
+#define PIC_IRT_PCIX_FATAL_INDEX 24
+#define PIC_IRT_BRIDGE_AERR_INDEX 25
+#define PIC_IRT_BRIDGE_BERR_INDEX 26
+#define PIC_IRT_BRIDGE_TB_XLR_INDEX 27
+#define PIC_IRT_BRIDGE_AERR_NMI_INDEX 28
+/* XLS */
+#define PIC_IRT_GMAC4_INDEX 21
+#define PIC_IRT_GMAC5_INDEX 22
+#define PIC_IRT_GMAC6_INDEX 23
+#define PIC_IRT_GMAC7_INDEX 24
+#define PIC_IRT_BRIDGE_ERR_INDEX 25
+#define PIC_IRT_PCIE_LINK0_INDEX 26
+#define PIC_IRT_PCIE_LINK1_INDEX 27
+#define PIC_IRT_PCIE_LINK2_INDEX 23
+#define PIC_IRT_PCIE_LINK3_INDEX 24
+#define PIC_IRT_PCIE_XLSB0_LINK2_INDEX 28
+#define PIC_IRT_PCIE_XLSB0_LINK3_INDEX 29
+#define PIC_IRT_SRIO_LINK0_INDEX 26
+#define PIC_IRT_SRIO_LINK1_INDEX 27
+#define PIC_IRT_SRIO_LINK2_INDEX 28
+#define PIC_IRT_SRIO_LINK3_INDEX 29
+#define PIC_IRT_PCIE_INT_INDEX 28
+#define PIC_IRT_PCIE_FATAL_INDEX 29
+#define PIC_IRT_GPIO_B_INDEX 30
+#define PIC_IRT_USB_INDEX 31
+/* XLS */
+#define PIC_NUM_IRTS 32
+
+
+#define PIC_CLOCK_TIMER 7
+
+/* PIC Registers */
+#define PIC_CTRL 0x00
+#define PIC_CTRL_STE 8 /* timer enable start bit */
+#define PIC_IPI 0x04
+#define PIC_INT_ACK 0x06
+
+#define WD_MAX_VAL_0 0x08
+#define WD_MAX_VAL_1 0x09
+#define WD_MASK_0 0x0a
+#define WD_MASK_1 0x0b
+#define WD_HEARBEAT_0 0x0c
+#define WD_HEARBEAT_1 0x0d
+
+#define PIC_IRT_0_BASE 0x40
+#define PIC_IRT_1_BASE 0x80
+#define PIC_TIMER_MAXVAL_0_BASE 0x100
+#define PIC_TIMER_MAXVAL_1_BASE 0x110
+#define PIC_TIMER_COUNT_0_BASE 0x120
+#define PIC_TIMER_COUNT_1_BASE 0x130
+
+#define PIC_IRT_0(picintr) (PIC_IRT_0_BASE + (picintr))
+#define PIC_IRT_1(picintr) (PIC_IRT_1_BASE + (picintr))
+
+#define PIC_TIMER_MAXVAL_0(i) (PIC_TIMER_MAXVAL_0_BASE + (i))
+#define PIC_TIMER_MAXVAL_1(i) (PIC_TIMER_MAXVAL_1_BASE + (i))
+#define PIC_TIMER_COUNT_0(i) (PIC_TIMER_COUNT_0_BASE + (i))
+#define PIC_TIMER_COUNT_1(i) (PIC_TIMER_COUNT_0_BASE + (i))
+
+/*
+ * Mapping between hardware interrupt numbers and IRQs on CPU
+ * we use a simple scheme to map PIC interrupts 0-31 to IRQs
+ * 8-39. This leaves the IRQ 0-7 for cpu interrupts like
+ * count/compare and FMN
+ */
+#define PIC_IRQ_BASE 8
+#define PIC_INTR_TO_IRQ(i) (PIC_IRQ_BASE + (i))
+#define PIC_IRQ_TO_INTR(i) ((i) - PIC_IRQ_BASE)
+
+#define PIC_IRT_FIRST_IRQ PIC_IRQ_BASE
+#define PIC_WD_IRQ PIC_INTR_TO_IRQ(PIC_IRT_WD_INDEX)
+#define PIC_TIMER_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_0_INDEX)
+#define PIC_TIMER_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_1_INDEX)
+#define PIC_TIMER_2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_2_INDEX)
+#define PIC_TIMER_3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_3_INDEX)
+#define PIC_TIMER_4_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_4_INDEX)
+#define PIC_TIMER_5_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_5_INDEX)
+#define PIC_TIMER_6_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_6_INDEX)
+#define PIC_TIMER_7_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_7_INDEX)
+#define PIC_CLOCK_IRQ (PIC_TIMER_7_IRQ)
+#define PIC_UART_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_UART_0_INDEX)
+#define PIC_UART_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_UART_1_INDEX)
+#define PIC_I2C_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_I2C_0_INDEX)
+#define PIC_I2C_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_I2C_1_INDEX)
+#define PIC_PCMCIA_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCMCIA_INDEX)
+#define PIC_GPIO_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GPIO_INDEX)
+#define PIC_HYPER_IRQ PIC_INTR_TO_IRQ(PIC_IRT_HYPER_INDEX)
+#define PIC_PCIX_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIX_INDEX)
+/* XLS */
+#define PIC_CDE_IRQ PIC_INTR_TO_IRQ(PIC_IRT_CDE_INDEX)
+#define PIC_BRIDGE_TB_XLS_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_TB_XLS_INDEX)
+/* end XLS */
+#define PIC_GMAC_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC0_INDEX)
+#define PIC_GMAC_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC1_INDEX)
+#define PIC_GMAC_2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC2_INDEX)
+#define PIC_GMAC_3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC3_INDEX)
+#define PIC_XGS_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_XGS0_INDEX)
+#define PIC_XGS_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_XGS1_INDEX)
+#define PIC_HYPER_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_HYPER_FATAL_INDEX)
+#define PIC_PCIX_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIX_FATAL_INDEX)
+#define PIC_BRIDGE_AERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_INDEX)
+#define PIC_BRIDGE_BERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_BERR_INDEX)
+#define PIC_BRIDGE_TB_XLR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_TB_XLR_INDEX)
+#define PIC_BRIDGE_AERR_NMI_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_NMI_INDEX)
+/* XLS defines */
+#define PIC_GMAC_4_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC4_INDEX)
+#define PIC_GMAC_5_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC5_INDEX)
+#define PIC_GMAC_6_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC6_INDEX)
+#define PIC_GMAC_7_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC7_INDEX)
+#define PIC_BRIDGE_ERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_ERR_INDEX)
+#define PIC_PCIE_LINK0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK0_INDEX)
+#define PIC_PCIE_LINK1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK1_INDEX)
+#define PIC_PCIE_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK2_INDEX)
+#define PIC_PCIE_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK3_INDEX)
+#define PIC_PCIE_XLSB0_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_XLSB0_LINK2_INDEX)
+#define PIC_PCIE_XLSB0_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_XLSB0_LINK3_INDEX)
+#define PIC_SRIO_LINK0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK0_INDEX)
+#define PIC_SRIO_LINK1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK1_INDEX)
+#define PIC_SRIO_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK2_INDEX)
+#define PIC_SRIO_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK3_INDEX)
+#define PIC_PCIE_INT_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_INT__INDEX)
+#define PIC_PCIE_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_FATAL_INDEX)
+#define PIC_GPIO_B_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GPIO_B_INDEX)
+#define PIC_USB_IRQ PIC_INTR_TO_IRQ(PIC_IRT_USB_INDEX)
+#define PIC_IRT_LAST_IRQ PIC_USB_IRQ
+/* end XLS */
+
+#ifndef __ASSEMBLY__
+
+#define PIC_IRQ_IS_EDGE_TRIGGERED(irq) (((irq) >= PIC_TIMER_0_IRQ) && \
+ ((irq) <= PIC_TIMER_7_IRQ))
+#define PIC_IRQ_IS_IRT(irq) (((irq) >= PIC_IRT_FIRST_IRQ) && \
+ ((irq) <= PIC_IRT_LAST_IRQ))
+
+static inline int
+nlm_irq_to_irt(int irq)
+{
+ if (PIC_IRQ_IS_IRT(irq) == 0)
+ return -1;
+
+ return PIC_IRQ_TO_INTR(irq);
+}
+
+static inline int
+nlm_irt_to_irq(int irt)
+{
+
+ return PIC_INTR_TO_IRQ(irt);
+}
+
+static inline void
+nlm_pic_enable_irt(uint64_t base, int irt)
+{
+ uint32_t reg;
+
+ reg = nlm_read_reg(base, PIC_IRT_1(irt));
+ nlm_write_reg(base, PIC_IRT_1(irt), reg | (1u << 31));
+}
+
+static inline void
+nlm_pic_disable_irt(uint64_t base, int irt)
+{
+ uint32_t reg;
+
+ reg = nlm_read_reg(base, PIC_IRT_1(irt));
+ nlm_write_reg(base, PIC_IRT_1(irt), reg & ~(1u << 31));
+}
+
+static inline void
+nlm_pic_send_ipi(uint64_t base, int hwt, int irq, int nmi)
+{
+ unsigned int tid, pid;
+
+ tid = hwt & 0x3;
+ pid = (hwt >> 2) & 0x07;
+ nlm_write_reg(base, PIC_IPI,
+ (pid << 20) | (tid << 16) | (nmi << 8) | irq);
+}
+
+static inline void
+nlm_pic_ack(uint64_t base, int irt)
+{
+ nlm_write_reg(base, PIC_INT_ACK, 1u << irt);
+}
+
+static inline void
+nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt, int en)
+{
+ nlm_write_reg(base, PIC_IRT_0(irt), (1u << hwt));
+ /* local scheduling, invalid, level by default */
+ nlm_write_reg(base, PIC_IRT_1(irt),
+ (en << 30) | (1 << 6) | irq);
+}
+
+static inline uint64_t
+nlm_pic_read_timer(uint64_t base, int timer)
+{
+ uint32_t up1, up2, low;
+
+ up1 = nlm_read_reg(base, PIC_TIMER_COUNT_1(timer));
+ low = nlm_read_reg(base, PIC_TIMER_COUNT_0(timer));
+ up2 = nlm_read_reg(base, PIC_TIMER_COUNT_1(timer));
+
+ if (up1 != up2) /* wrapped, get the new low */
+ low = nlm_read_reg(base, PIC_TIMER_COUNT_0(timer));
+ return ((uint64_t)up2 << 32) | low;
+
+}
+
+static inline uint32_t
+nlm_pic_read_timer32(uint64_t base, int timer)
+{
+ return nlm_read_reg(base, PIC_TIMER_COUNT_0(timer));
+}
+
+static inline void
+nlm_pic_set_timer(uint64_t base, int timer, uint64_t value, int irq, int cpu)
+{
+ uint32_t up, low;
+ uint64_t pic_ctrl = nlm_read_reg(base, PIC_CTRL);
+ int en;
+
+ en = (irq > 0);
+ up = value >> 32;
+ low = value & 0xFFFFFFFF;
+ nlm_write_reg(base, PIC_TIMER_MAXVAL_0(timer), low);
+ nlm_write_reg(base, PIC_TIMER_MAXVAL_1(timer), up);
+ nlm_pic_init_irt(base, PIC_IRT_TIMER_INDEX(timer), irq, cpu, 0);
+
+ /* enable the timer */
+ pic_ctrl |= (1 << (PIC_CTRL_STE + timer));
+ nlm_write_reg(base, PIC_CTRL, pic_ctrl);
+}
+#endif
+#endif /* _ASM_NLM_XLR_PIC_H */
diff --git a/arch/mips/include/asm/netlogic/xlr/xlr.h b/arch/mips/include/asm/netlogic/xlr/xlr.h
new file mode 100644
index 000000000..ceb991ca8
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlr/xlr.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ASM_NLM_XLR_H
+#define _ASM_NLM_XLR_H
+
+/* SMP helpers */
+void xlr_wakeup_secondary_cpus(void);
+
+/* XLS B silicon "Rook" */
+static inline unsigned int nlm_chip_is_xls_b(void)
+{
+ uint32_t prid = read_c0_prid();
+
+ return ((prid & 0xf000) == 0x4000);
+}
+
+/* XLR chip types */
+/* The XLS product line has chip versions 0x[48c]? */
+static inline unsigned int nlm_chip_is_xls(void)
+{
+ uint32_t prid = read_c0_prid();
+
+ return ((prid & 0xf000) == 0x8000 || (prid & 0xf000) == 0x4000 ||
+ (prid & 0xf000) == 0xc000);
+}
+
+#endif /* _ASM_NLM_XLR_H */
diff --git a/arch/mips/include/asm/octeon/cvmx-address.h b/arch/mips/include/asm/octeon/cvmx-address.h
new file mode 100644
index 000000000..e4444f8c4
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-address.h
@@ -0,0 +1,341 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2009 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * Typedefs and defines for working with Octeon physical addresses.
+ *
+ */
+#ifndef __CVMX_ADDRESS_H__
+#define __CVMX_ADDRESS_H__
+
+#if 0
+typedef enum {
+ CVMX_MIPS_SPACE_XKSEG = 3LL,
+ CVMX_MIPS_SPACE_XKPHYS = 2LL,
+ CVMX_MIPS_SPACE_XSSEG = 1LL,
+ CVMX_MIPS_SPACE_XUSEG = 0LL
+} cvmx_mips_space_t;
+#endif
+
+typedef enum {
+ CVMX_MIPS_XKSEG_SPACE_KSEG0 = 0LL,
+ CVMX_MIPS_XKSEG_SPACE_KSEG1 = 1LL,
+ CVMX_MIPS_XKSEG_SPACE_SSEG = 2LL,
+ CVMX_MIPS_XKSEG_SPACE_KSEG3 = 3LL
+} cvmx_mips_xkseg_space_t;
+
+/* decodes <14:13> of a kseg3 window address */
+typedef enum {
+ CVMX_ADD_WIN_SCR = 0L,
+ /* see cvmx_add_win_dma_dec_t for further decode */
+ CVMX_ADD_WIN_DMA = 1L,
+ CVMX_ADD_WIN_UNUSED = 2L,
+ CVMX_ADD_WIN_UNUSED2 = 3L
+} cvmx_add_win_dec_t;
+
+/* decode within DMA space */
+typedef enum {
+ /*
+ * Add store data to the write buffer entry, allocating it if
+ * necessary.
+ */
+ CVMX_ADD_WIN_DMA_ADD = 0L,
+ /* send out the write buffer entry to DRAM */
+ CVMX_ADD_WIN_DMA_SENDMEM = 1L,
+ /* store data must be normal DRAM memory space address in this case */
+ /* send out the write buffer entry as an IOBDMA command */
+ CVMX_ADD_WIN_DMA_SENDDMA = 2L,
+ /* see CVMX_ADD_WIN_DMA_SEND_DEC for data contents */
+ /* send out the write buffer entry as an IO write */
+ CVMX_ADD_WIN_DMA_SENDIO = 3L,
+ /* store data must be normal IO space address in this case */
+ /* send out a single-tick command on the NCB bus */
+ CVMX_ADD_WIN_DMA_SENDSINGLE = 4L,
+ /* no write buffer data needed/used */
+} cvmx_add_win_dma_dec_t;
+
+/*
+ * Physical Address Decode
+ *
+ * Octeon-I HW never interprets this X (<39:36> reserved
+ * for future expansion), software should set to 0.
+ *
+ * - 0x0 XXX0 0000 0000 to DRAM Cached
+ * - 0x0 XXX0 0FFF FFFF
+ *
+ * - 0x0 XXX0 1000 0000 to Boot Bus Uncached (Converted to 0x1 00X0 1000 0000
+ * - 0x0 XXX0 1FFF FFFF + EJTAG to 0x1 00X0 1FFF FFFF)
+ *
+ * - 0x0 XXX0 2000 0000 to DRAM Cached
+ * - 0x0 XXXF FFFF FFFF
+ *
+ * - 0x1 00X0 0000 0000 to Boot Bus Uncached
+ * - 0x1 00XF FFFF FFFF
+ *
+ * - 0x1 01X0 0000 0000 to Other NCB Uncached
+ * - 0x1 FFXF FFFF FFFF devices
+ *
+ * Decode of all Octeon addresses
+ */
+typedef union {
+
+ uint64_t u64;
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* mapped or unmapped virtual address */
+ struct {
+ uint64_t R:2;
+ uint64_t offset:62;
+ } sva;
+
+ /* mapped USEG virtual addresses (typically) */
+ struct {
+ uint64_t zeroes:33;
+ uint64_t offset:31;
+ } suseg;
+
+ /* mapped or unmapped virtual address */
+ struct {
+ uint64_t ones:33;
+ uint64_t sp:2;
+ uint64_t offset:29;
+ } sxkseg;
+
+ /*
+ * physical address accessed through xkphys unmapped virtual
+ * address.
+ */
+ struct {
+ uint64_t R:2; /* CVMX_MIPS_SPACE_XKPHYS in this case */
+ uint64_t cca:3; /* ignored by octeon */
+ uint64_t mbz:10;
+ uint64_t pa:49; /* physical address */
+ } sxkphys;
+
+ /* physical address */
+ struct {
+ uint64_t mbz:15;
+ /* if set, the address is uncached and resides on MCB bus */
+ uint64_t is_io:1;
+ /*
+ * the hardware ignores this field when is_io==0, else
+ * device ID.
+ */
+ uint64_t did:8;
+ /* the hardware ignores <39:36> in Octeon I */
+ uint64_t unaddr:4;
+ uint64_t offset:36;
+ } sphys;
+
+ /* physical mem address */
+ struct {
+ /* techically, <47:40> are dont-cares */
+ uint64_t zeroes:24;
+ /* the hardware ignores <39:36> in Octeon I */
+ uint64_t unaddr:4;
+ uint64_t offset:36;
+ } smem;
+
+ /* physical IO address */
+ struct {
+ uint64_t mem_region:2;
+ uint64_t mbz:13;
+ /* 1 in this case */
+ uint64_t is_io:1;
+ /*
+ * The hardware ignores this field when is_io==0, else
+ * device ID.
+ */
+ uint64_t did:8;
+ /* the hardware ignores <39:36> in Octeon I */
+ uint64_t unaddr:4;
+ uint64_t offset:36;
+ } sio;
+
+ /*
+ * Scratchpad virtual address - accessed through a window at
+ * the end of kseg3
+ */
+ struct {
+ uint64_t ones:49;
+ /* CVMX_ADD_WIN_SCR (0) in this case */
+ cvmx_add_win_dec_t csrdec:2;
+ uint64_t addr:13;
+ } sscr;
+
+ /* there should only be stores to IOBDMA space, no loads */
+ /*
+ * IOBDMA virtual address - accessed through a window at the
+ * end of kseg3
+ */
+ struct {
+ uint64_t ones:49;
+ uint64_t csrdec:2; /* CVMX_ADD_WIN_DMA (1) in this case */
+ uint64_t unused2:3;
+ uint64_t type:3;
+ uint64_t addr:7;
+ } sdma;
+
+ struct {
+ uint64_t didspace:24;
+ uint64_t unused:40;
+ } sfilldidspace;
+#else
+ struct {
+ uint64_t offset:62;
+ uint64_t R:2;
+ } sva;
+
+ struct {
+ uint64_t offset:31;
+ uint64_t zeroes:33;
+ } suseg;
+
+ struct {
+ uint64_t offset:29;
+ uint64_t sp:2;
+ uint64_t ones:33;
+ } sxkseg;
+
+ struct {
+ uint64_t pa:49;
+ uint64_t mbz:10;
+ uint64_t cca:3;
+ uint64_t R:2;
+ } sxkphys;
+
+ struct {
+ uint64_t offset:36;
+ uint64_t unaddr:4;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t mbz:15;
+ } sphys;
+
+ struct {
+ uint64_t offset:36;
+ uint64_t unaddr:4;
+ uint64_t zeroes:24;
+ } smem;
+
+ struct {
+ uint64_t offset:36;
+ uint64_t unaddr:4;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t mbz:13;
+ uint64_t mem_region:2;
+ } sio;
+
+ struct {
+ uint64_t addr:13;
+ cvmx_add_win_dec_t csrdec:2;
+ uint64_t ones:49;
+ } sscr;
+
+ struct {
+ uint64_t addr:7;
+ uint64_t type:3;
+ uint64_t unused2:3;
+ uint64_t csrdec:2;
+ uint64_t ones:49;
+ } sdma;
+
+ struct {
+ uint64_t unused:40;
+ uint64_t didspace:24;
+ } sfilldidspace;
+#endif
+
+} cvmx_addr_t;
+
+/* These macros for used by 32 bit applications */
+
+#define CVMX_MIPS32_SPACE_KSEG0 1l
+#define CVMX_ADD_SEG32(segment, add) \
+ (((int32_t)segment << 31) | (int32_t)(add))
+
+/*
+ * Currently all IOs are performed using XKPHYS addressing. Linux uses
+ * the CvmMemCtl register to enable XKPHYS addressing to IO space from
+ * user mode. Future OSes may need to change the upper bits of IO
+ * addresses. The following define controls the upper two bits for all
+ * IO addresses generated by the simple executive library.
+ */
+#define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS
+
+/* These macros simplify the process of creating common IO addresses */
+#define CVMX_ADD_SEG(segment, add) ((((uint64_t)segment) << 62) | (add))
+#ifndef CVMX_ADD_IO_SEG
+#define CVMX_ADD_IO_SEG(add) CVMX_ADD_SEG(CVMX_IO_SEG, (add))
+#endif
+#define CVMX_ADDR_DIDSPACE(did) (((CVMX_IO_SEG) << 22) | ((1ULL) << 8) | (did))
+#define CVMX_ADDR_DID(did) (CVMX_ADDR_DIDSPACE(did) << 40)
+#define CVMX_FULL_DID(did, subdid) (((did) << 3) | (subdid))
+
+ /* from include/ncb_rsl_id.v */
+#define CVMX_OCT_DID_MIS 0ULL /* misc stuff */
+#define CVMX_OCT_DID_GMX0 1ULL
+#define CVMX_OCT_DID_GMX1 2ULL
+#define CVMX_OCT_DID_PCI 3ULL
+#define CVMX_OCT_DID_KEY 4ULL
+#define CVMX_OCT_DID_FPA 5ULL
+#define CVMX_OCT_DID_DFA 6ULL
+#define CVMX_OCT_DID_ZIP 7ULL
+#define CVMX_OCT_DID_RNG 8ULL
+#define CVMX_OCT_DID_IPD 9ULL
+#define CVMX_OCT_DID_PKT 10ULL
+#define CVMX_OCT_DID_TIM 11ULL
+#define CVMX_OCT_DID_TAG 12ULL
+ /* the rest are not on the IO bus */
+#define CVMX_OCT_DID_L2C 16ULL
+#define CVMX_OCT_DID_LMC 17ULL
+#define CVMX_OCT_DID_SPX0 18ULL
+#define CVMX_OCT_DID_SPX1 19ULL
+#define CVMX_OCT_DID_PIP 20ULL
+#define CVMX_OCT_DID_ASX0 22ULL
+#define CVMX_OCT_DID_ASX1 23ULL
+#define CVMX_OCT_DID_IOB 30ULL
+
+#define CVMX_OCT_DID_PKT_SEND CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL)
+#define CVMX_OCT_DID_TAG_SWTAG CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL)
+#define CVMX_OCT_DID_TAG_TAG1 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL)
+#define CVMX_OCT_DID_TAG_TAG2 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL)
+#define CVMX_OCT_DID_TAG_TAG3 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL)
+#define CVMX_OCT_DID_TAG_NULL_RD CVMX_FULL_DID(CVMX_OCT_DID_TAG, 4ULL)
+#define CVMX_OCT_DID_TAG_CSR CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL)
+#define CVMX_OCT_DID_FAU_FAI CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL)
+#define CVMX_OCT_DID_TIM_CSR CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL)
+#define CVMX_OCT_DID_KEY_RW CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL)
+#define CVMX_OCT_DID_PCI_6 CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL)
+#define CVMX_OCT_DID_MIS_BOO CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL)
+#define CVMX_OCT_DID_PCI_RML CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL)
+#define CVMX_OCT_DID_IPD_CSR CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL)
+#define CVMX_OCT_DID_DFA_CSR CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL)
+#define CVMX_OCT_DID_MIS_CSR CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL)
+#define CVMX_OCT_DID_ZIP_CSR CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL)
+
+#endif /* __CVMX_ADDRESS_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-agl-defs.h b/arch/mips/include/asm/octeon/cvmx-agl-defs.h
new file mode 100644
index 000000000..3635ab384
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-agl-defs.h
@@ -0,0 +1,1759 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_AGL_DEFS_H__
+#define __CVMX_AGL_DEFS_H__
+
+#define CVMX_AGL_GMX_BAD_REG (CVMX_ADD_IO_SEG(0x00011800E0000518ull))
+#define CVMX_AGL_GMX_BIST (CVMX_ADD_IO_SEG(0x00011800E0000400ull))
+#define CVMX_AGL_GMX_DRV_CTL (CVMX_ADD_IO_SEG(0x00011800E00007F0ull))
+#define CVMX_AGL_GMX_INF_MODE (CVMX_ADD_IO_SEG(0x00011800E00007F8ull))
+#define CVMX_AGL_GMX_PRTX_CFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000010ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000180ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000188ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000190ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000198ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM4(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM5(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000108ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000100ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_DECISION(offset) (CVMX_ADD_IO_SEG(0x00011800E0000040ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_FRM_CHK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000020ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_FRM_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000018ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_FRM_MAX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000030ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_FRM_MIN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000028ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_IFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000058ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_INT_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000008ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_INT_REG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000000ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_JABBER(offset) (CVMX_ADD_IO_SEG(0x00011800E0000038ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000068ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_RX_INBND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000060ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000050ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_OCTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000088ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000098ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_PKTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000080ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(offset) (CVMX_ADD_IO_SEG(0x00011800E00000C0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000090ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_UDD_SKP(offset) (CVMX_ADD_IO_SEG(0x00011800E0000048ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RX_BP_DROPX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000420ull) + ((offset) & 1) * 8)
+#define CVMX_AGL_GMX_RX_BP_OFFX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000460ull) + ((offset) & 1) * 8)
+#define CVMX_AGL_GMX_RX_BP_ONX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000440ull) + ((offset) & 1) * 8)
+#define CVMX_AGL_GMX_RX_PRT_INFO (CVMX_ADD_IO_SEG(0x00011800E00004E8ull))
+#define CVMX_AGL_GMX_RX_TX_STATUS (CVMX_ADD_IO_SEG(0x00011800E00007E8ull))
+#define CVMX_AGL_GMX_SMACX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000230ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_STAT_BP (CVMX_ADD_IO_SEG(0x00011800E0000520ull))
+#define CVMX_AGL_GMX_TXX_APPEND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000218ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_CLK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000208ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000270ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_MIN_PKT(offset) (CVMX_ADD_IO_SEG(0x00011800E0000240ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000248ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000238ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_PAUSE_TOGO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000258ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_PAUSE_ZERO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000260ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_SOFT_PAUSE(offset) (CVMX_ADD_IO_SEG(0x00011800E0000250ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000280ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000288ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000290ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000298ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT4(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT5(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT6(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT7(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT8(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT9(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000268ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_THRESH(offset) (CVMX_ADD_IO_SEG(0x00011800E0000210ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TX_BP (CVMX_ADD_IO_SEG(0x00011800E00004D0ull))
+#define CVMX_AGL_GMX_TX_COL_ATTEMPT (CVMX_ADD_IO_SEG(0x00011800E0000498ull))
+#define CVMX_AGL_GMX_TX_IFG (CVMX_ADD_IO_SEG(0x00011800E0000488ull))
+#define CVMX_AGL_GMX_TX_INT_EN (CVMX_ADD_IO_SEG(0x00011800E0000508ull))
+#define CVMX_AGL_GMX_TX_INT_REG (CVMX_ADD_IO_SEG(0x00011800E0000500ull))
+#define CVMX_AGL_GMX_TX_JAM (CVMX_ADD_IO_SEG(0x00011800E0000490ull))
+#define CVMX_AGL_GMX_TX_LFSR (CVMX_ADD_IO_SEG(0x00011800E00004F8ull))
+#define CVMX_AGL_GMX_TX_OVR_BP (CVMX_ADD_IO_SEG(0x00011800E00004C8ull))
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC (CVMX_ADD_IO_SEG(0x00011800E00004A0ull))
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE (CVMX_ADD_IO_SEG(0x00011800E00004A8ull))
+#define CVMX_AGL_PRTX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0002000ull) + ((offset) & 1) * 8)
+
+union cvmx_agl_gmx_bad_reg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_bad_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63:26;
+ uint64_t txpsh1:1;
+ uint64_t txpop1:1;
+ uint64_t ovrflw1:1;
+ uint64_t txpsh:1;
+ uint64_t txpop:1;
+ uint64_t ovrflw:1;
+ uint64_t reserved_27_31:5;
+ uint64_t statovr:1;
+ uint64_t reserved_24_25:2;
+ uint64_t loststat:2;
+ uint64_t reserved_4_21:18;
+ uint64_t out_ovr:2;
+ uint64_t reserved_0_1:2;
+#else
+ uint64_t reserved_0_1:2;
+ uint64_t out_ovr:2;
+ uint64_t reserved_4_21:18;
+ uint64_t loststat:2;
+ uint64_t reserved_24_25:2;
+ uint64_t statovr:1;
+ uint64_t reserved_27_31:5;
+ uint64_t ovrflw:1;
+ uint64_t txpop:1;
+ uint64_t txpsh:1;
+ uint64_t ovrflw1:1;
+ uint64_t txpop1:1;
+ uint64_t txpsh1:1;
+ uint64_t reserved_38_63:26;
+#endif
+ } s;
+ struct cvmx_agl_gmx_bad_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63:26;
+ uint64_t txpsh1:1;
+ uint64_t txpop1:1;
+ uint64_t ovrflw1:1;
+ uint64_t txpsh:1;
+ uint64_t txpop:1;
+ uint64_t ovrflw:1;
+ uint64_t reserved_27_31:5;
+ uint64_t statovr:1;
+ uint64_t reserved_23_25:3;
+ uint64_t loststat:1;
+ uint64_t reserved_4_21:18;
+ uint64_t out_ovr:2;
+ uint64_t reserved_0_1:2;
+#else
+ uint64_t reserved_0_1:2;
+ uint64_t out_ovr:2;
+ uint64_t reserved_4_21:18;
+ uint64_t loststat:1;
+ uint64_t reserved_23_25:3;
+ uint64_t statovr:1;
+ uint64_t reserved_27_31:5;
+ uint64_t ovrflw:1;
+ uint64_t txpop:1;
+ uint64_t txpsh:1;
+ uint64_t ovrflw1:1;
+ uint64_t txpop1:1;
+ uint64_t txpsh1:1;
+ uint64_t reserved_38_63:26;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_bad_reg_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_35_63:29;
+ uint64_t txpsh:1;
+ uint64_t txpop:1;
+ uint64_t ovrflw:1;
+ uint64_t reserved_27_31:5;
+ uint64_t statovr:1;
+ uint64_t reserved_23_25:3;
+ uint64_t loststat:1;
+ uint64_t reserved_3_21:19;
+ uint64_t out_ovr:1;
+ uint64_t reserved_0_1:2;
+#else
+ uint64_t reserved_0_1:2;
+ uint64_t out_ovr:1;
+ uint64_t reserved_3_21:19;
+ uint64_t loststat:1;
+ uint64_t reserved_23_25:3;
+ uint64_t statovr:1;
+ uint64_t reserved_27_31:5;
+ uint64_t ovrflw:1;
+ uint64_t txpop:1;
+ uint64_t txpsh:1;
+ uint64_t reserved_35_63:29;
+#endif
+ } cn56xx;
+};
+
+union cvmx_agl_gmx_bist {
+ uint64_t u64;
+ struct cvmx_agl_gmx_bist_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t status:25;
+#else
+ uint64_t status:25;
+ uint64_t reserved_25_63:39;
+#endif
+ } s;
+ struct cvmx_agl_gmx_bist_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t status:10;
+#else
+ uint64_t status:10;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn52xx;
+};
+
+union cvmx_agl_gmx_drv_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_drv_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63:15;
+ uint64_t byp_en1:1;
+ uint64_t reserved_45_47:3;
+ uint64_t pctl1:5;
+ uint64_t reserved_37_39:3;
+ uint64_t nctl1:5;
+ uint64_t reserved_17_31:15;
+ uint64_t byp_en:1;
+ uint64_t reserved_13_15:3;
+ uint64_t pctl:5;
+ uint64_t reserved_5_7:3;
+ uint64_t nctl:5;
+#else
+ uint64_t nctl:5;
+ uint64_t reserved_5_7:3;
+ uint64_t pctl:5;
+ uint64_t reserved_13_15:3;
+ uint64_t byp_en:1;
+ uint64_t reserved_17_31:15;
+ uint64_t nctl1:5;
+ uint64_t reserved_37_39:3;
+ uint64_t pctl1:5;
+ uint64_t reserved_45_47:3;
+ uint64_t byp_en1:1;
+ uint64_t reserved_49_63:15;
+#endif
+ } s;
+ struct cvmx_agl_gmx_drv_ctl_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t byp_en:1;
+ uint64_t reserved_13_15:3;
+ uint64_t pctl:5;
+ uint64_t reserved_5_7:3;
+ uint64_t nctl:5;
+#else
+ uint64_t nctl:5;
+ uint64_t reserved_5_7:3;
+ uint64_t pctl:5;
+ uint64_t reserved_13_15:3;
+ uint64_t byp_en:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } cn56xx;
+};
+
+union cvmx_agl_gmx_inf_mode {
+ uint64_t u64;
+ struct cvmx_agl_gmx_inf_mode_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t en:1;
+ uint64_t reserved_0_0:1;
+#else
+ uint64_t reserved_0_0:1;
+ uint64_t en:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_prtx_cfg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_prtx_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t tx_idle:1;
+ uint64_t rx_idle:1;
+ uint64_t reserved_9_11:3;
+ uint64_t speed_msb:1;
+ uint64_t reserved_7_7:1;
+ uint64_t burst:1;
+ uint64_t tx_en:1;
+ uint64_t rx_en:1;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+#else
+ uint64_t en:1;
+ uint64_t speed:1;
+ uint64_t duplex:1;
+ uint64_t slottime:1;
+ uint64_t rx_en:1;
+ uint64_t tx_en:1;
+ uint64_t burst:1;
+ uint64_t reserved_7_7:1;
+ uint64_t speed_msb:1;
+ uint64_t reserved_9_11:3;
+ uint64_t rx_idle:1;
+ uint64_t tx_idle:1;
+ uint64_t reserved_14_63:50;
+#endif
+ } s;
+ struct cvmx_agl_gmx_prtx_cfg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t tx_en:1;
+ uint64_t rx_en:1;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+#else
+ uint64_t en:1;
+ uint64_t speed:1;
+ uint64_t duplex:1;
+ uint64_t slottime:1;
+ uint64_t rx_en:1;
+ uint64_t tx_en:1;
+ uint64_t reserved_6_63:58;
+#endif
+ } cn52xx;
+};
+
+union cvmx_agl_gmx_rxx_adr_cam0 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr:64;
+#else
+ uint64_t adr:64;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_adr_cam1 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr:64;
+#else
+ uint64_t adr:64;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_adr_cam2 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr:64;
+#else
+ uint64_t adr:64;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_adr_cam3 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr:64;
+#else
+ uint64_t adr:64;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_adr_cam4 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr:64;
+#else
+ uint64_t adr:64;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_adr_cam5 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t adr:64;
+#else
+ uint64_t adr:64;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_adr_cam_en {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t en:8;
+#else
+ uint64_t en:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_adr_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t cam_mode:1;
+ uint64_t mcst:2;
+ uint64_t bcst:1;
+#else
+ uint64_t bcst:1;
+ uint64_t mcst:2;
+ uint64_t cam_mode:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_decision {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_decision_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t cnt:5;
+#else
+ uint64_t cnt:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_frm_chk {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_chk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+#else
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_chk_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t reserved_1_1:1;
+ uint64_t minerr:1;
+#else
+ uint64_t minerr:1;
+ uint64_t reserved_1_1:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn52xx;
+};
+
+union cvmx_agl_gmx_rxx_frm_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t ptp_mode:1;
+ uint64_t reserved_11_11:1;
+ uint64_t null_dis:1;
+ uint64_t pre_align:1;
+ uint64_t pad_len:1;
+ uint64_t vlan_len:1;
+ uint64_t pre_free:1;
+ uint64_t ctl_smac:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_drp:1;
+ uint64_t pre_strp:1;
+ uint64_t pre_chk:1;
+#else
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t pre_align:1;
+ uint64_t null_dis:1;
+ uint64_t reserved_11_11:1;
+ uint64_t ptp_mode:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t pre_align:1;
+ uint64_t pad_len:1;
+ uint64_t vlan_len:1;
+ uint64_t pre_free:1;
+ uint64_t ctl_smac:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_drp:1;
+ uint64_t pre_strp:1;
+ uint64_t pre_chk:1;
+#else
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t pre_align:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn52xx;
+};
+
+union cvmx_agl_gmx_rxx_frm_max {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_max_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t len:16;
+#else
+ uint64_t len:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_frm_min {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_min_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t len:16;
+#else
+ uint64_t len:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_ifg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_ifg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t ifg:4;
+#else
+ uint64_t ifg:4;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_int_en {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+#else
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_20_63:44;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_int_en_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
+ uint64_t reserved_16_18:3;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t reserved_1_1:1;
+ uint64_t minerr:1;
+#else
+ uint64_t minerr:1;
+ uint64_t reserved_1_1:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t pause_drp:1;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn52xx;
+};
+
+union cvmx_agl_gmx_rxx_int_reg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+#else
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_20_63:44;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rxx_int_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
+ uint64_t reserved_16_18:3;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t reserved_1_1:1;
+ uint64_t minerr:1;
+#else
+ uint64_t minerr:1;
+ uint64_t reserved_1_1:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t pause_drp:1;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn52xx;
+};
+
+union cvmx_agl_gmx_rxx_jabber {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_jabber_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t cnt:16;
+#else
+ uint64_t cnt:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_pause_drop_time {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t status:16;
+#else
+ uint64_t status:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_rx_inbnd {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_rx_inbnd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t duplex:1;
+ uint64_t speed:2;
+ uint64_t status:1;
+#else
+ uint64_t status:1;
+ uint64_t speed:2;
+ uint64_t duplex:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_stats_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t rd_clr:1;
+#else
+ uint64_t rd_clr:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_stats_octs {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t cnt:48;
+#else
+ uint64_t cnt:48;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_stats_octs_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t cnt:48;
+#else
+ uint64_t cnt:48;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_stats_octs_dmac {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t cnt:48;
+#else
+ uint64_t cnt:48;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_stats_octs_drp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t cnt:48;
+#else
+ uint64_t cnt:48;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_stats_pkts {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_stats_pkts_bad {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_stats_pkts_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_stats_pkts_dmac {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_stats_pkts_drp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rxx_udd_skp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_udd_skp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t fcssel:1;
+ uint64_t reserved_7_7:1;
+ uint64_t len:7;
+#else
+ uint64_t len:7;
+ uint64_t reserved_7_7:1;
+ uint64_t fcssel:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rx_bp_dropx {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_bp_dropx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t mark:6;
+#else
+ uint64_t mark:6;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rx_bp_offx {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_bp_offx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t mark:6;
+#else
+ uint64_t mark:6;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rx_bp_onx {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_bp_onx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t mark:9;
+#else
+ uint64_t mark:9;
+ uint64_t reserved_9_63:55;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_rx_prt_info {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_prt_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63:46;
+ uint64_t drop:2;
+ uint64_t reserved_2_15:14;
+ uint64_t commit:2;
+#else
+ uint64_t commit:2;
+ uint64_t reserved_2_15:14;
+ uint64_t drop:2;
+ uint64_t reserved_18_63:46;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rx_prt_info_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t drop:1;
+ uint64_t reserved_1_15:15;
+ uint64_t commit:1;
+#else
+ uint64_t commit:1;
+ uint64_t reserved_1_15:15;
+ uint64_t drop:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } cn56xx;
+};
+
+union cvmx_agl_gmx_rx_tx_status {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_tx_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t tx:2;
+ uint64_t reserved_2_3:2;
+ uint64_t rx:2;
+#else
+ uint64_t rx:2;
+ uint64_t reserved_2_3:2;
+ uint64_t tx:2;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+ struct cvmx_agl_gmx_rx_tx_status_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t tx:1;
+ uint64_t reserved_1_3:3;
+ uint64_t rx:1;
+#else
+ uint64_t rx:1;
+ uint64_t reserved_1_3:3;
+ uint64_t tx:1;
+ uint64_t reserved_5_63:59;
+#endif
+ } cn56xx;
+};
+
+union cvmx_agl_gmx_smacx {
+ uint64_t u64;
+ struct cvmx_agl_gmx_smacx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t smac:48;
+#else
+ uint64_t smac:48;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_stat_bp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_stat_bp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t bp:1;
+ uint64_t cnt:16;
+#else
+ uint64_t cnt:16;
+ uint64_t bp:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_append {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_append_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t force_fcs:1;
+ uint64_t fcs:1;
+ uint64_t pad:1;
+ uint64_t preamble:1;
+#else
+ uint64_t preamble:1;
+ uint64_t pad:1;
+ uint64_t fcs:1;
+ uint64_t force_fcs:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_clk {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_clk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t clk_cnt:6;
+#else
+ uint64_t clk_cnt:6;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t xsdef_en:1;
+ uint64_t xscol_en:1;
+#else
+ uint64_t xscol_en:1;
+ uint64_t xsdef_en:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_min_pkt {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_min_pkt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t min_size:8;
+#else
+ uint64_t min_size:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_pause_pkt_interval {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t interval:16;
+#else
+ uint64_t interval:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_pause_pkt_time {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t time:16;
+#else
+ uint64_t time:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_pause_togo {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_togo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t time:16;
+#else
+ uint64_t time:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_pause_zero {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_zero_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t send:1;
+#else
+ uint64_t send:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_soft_pause {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_soft_pause_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t time:16;
+#else
+ uint64_t time:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_stat0 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t xsdef:32;
+ uint64_t xscol:32;
+#else
+ uint64_t xscol:32;
+ uint64_t xsdef:32;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_stat1 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t scol:32;
+ uint64_t mcol:32;
+#else
+ uint64_t mcol:32;
+ uint64_t scol:32;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_stat2 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t octs:48;
+#else
+ uint64_t octs:48;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_stat3 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t pkts:32;
+#else
+ uint64_t pkts:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_stat4 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t hist1:32;
+ uint64_t hist0:32;
+#else
+ uint64_t hist0:32;
+ uint64_t hist1:32;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_stat5 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat5_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t hist3:32;
+ uint64_t hist2:32;
+#else
+ uint64_t hist2:32;
+ uint64_t hist3:32;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_stat6 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat6_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t hist5:32;
+ uint64_t hist4:32;
+#else
+ uint64_t hist4:32;
+ uint64_t hist5:32;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_stat7 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat7_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t hist7:32;
+ uint64_t hist6:32;
+#else
+ uint64_t hist6:32;
+ uint64_t hist7:32;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_stat8 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat8_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mcst:32;
+ uint64_t bcst:32;
+#else
+ uint64_t bcst:32;
+ uint64_t mcst:32;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_stat9 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat9_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t undflw:32;
+ uint64_t ctl:32;
+#else
+ uint64_t ctl:32;
+ uint64_t undflw:32;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_stats_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stats_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t rd_clr:1;
+#else
+ uint64_t rd_clr:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_txx_thresh {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_thresh_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t cnt:6;
+#else
+ uint64_t cnt:6;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_tx_bp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_bp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t bp:2;
+#else
+ uint64_t bp:2;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_bp_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t bp:1;
+#else
+ uint64_t bp:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } cn56xx;
+};
+
+union cvmx_agl_gmx_tx_col_attempt {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_col_attempt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t limit:5;
+#else
+ uint64_t limit:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_tx_ifg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_ifg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t ifg2:4;
+ uint64_t ifg1:4;
+#else
+ uint64_t ifg1:4;
+ uint64_t ifg2:4;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_tx_int_en {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63:42;
+ uint64_t ptp_lost:2;
+ uint64_t reserved_18_19:2;
+ uint64_t late_col:2;
+ uint64_t reserved_14_15:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xscol:2;
+ uint64_t reserved_4_7:4;
+ uint64_t undflw:2;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:2;
+ uint64_t reserved_4_7:4;
+ uint64_t xscol:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_14_15:2;
+ uint64_t late_col:2;
+ uint64_t reserved_18_19:2;
+ uint64_t ptp_lost:2;
+ uint64_t reserved_22_63:42;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_int_en_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63:46;
+ uint64_t late_col:2;
+ uint64_t reserved_14_15:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xscol:2;
+ uint64_t reserved_4_7:4;
+ uint64_t undflw:2;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:2;
+ uint64_t reserved_4_7:4;
+ uint64_t xscol:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_14_15:2;
+ uint64_t late_col:2;
+ uint64_t reserved_18_63:46;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_tx_int_en_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t late_col:1;
+ uint64_t reserved_13_15:3;
+ uint64_t xsdef:1;
+ uint64_t reserved_9_11:3;
+ uint64_t xscol:1;
+ uint64_t reserved_3_7:5;
+ uint64_t undflw:1;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:1;
+ uint64_t reserved_3_7:5;
+ uint64_t xscol:1;
+ uint64_t reserved_9_11:3;
+ uint64_t xsdef:1;
+ uint64_t reserved_13_15:3;
+ uint64_t late_col:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } cn56xx;
+};
+
+union cvmx_agl_gmx_tx_int_reg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63:42;
+ uint64_t ptp_lost:2;
+ uint64_t reserved_18_19:2;
+ uint64_t late_col:2;
+ uint64_t reserved_14_15:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xscol:2;
+ uint64_t reserved_4_7:4;
+ uint64_t undflw:2;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:2;
+ uint64_t reserved_4_7:4;
+ uint64_t xscol:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_14_15:2;
+ uint64_t late_col:2;
+ uint64_t reserved_18_19:2;
+ uint64_t ptp_lost:2;
+ uint64_t reserved_22_63:42;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_int_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63:46;
+ uint64_t late_col:2;
+ uint64_t reserved_14_15:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xscol:2;
+ uint64_t reserved_4_7:4;
+ uint64_t undflw:2;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:2;
+ uint64_t reserved_4_7:4;
+ uint64_t xscol:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_14_15:2;
+ uint64_t late_col:2;
+ uint64_t reserved_18_63:46;
+#endif
+ } cn52xx;
+ struct cvmx_agl_gmx_tx_int_reg_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t late_col:1;
+ uint64_t reserved_13_15:3;
+ uint64_t xsdef:1;
+ uint64_t reserved_9_11:3;
+ uint64_t xscol:1;
+ uint64_t reserved_3_7:5;
+ uint64_t undflw:1;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:1;
+ uint64_t reserved_3_7:5;
+ uint64_t xscol:1;
+ uint64_t reserved_9_11:3;
+ uint64_t xsdef:1;
+ uint64_t reserved_13_15:3;
+ uint64_t late_col:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } cn56xx;
+};
+
+union cvmx_agl_gmx_tx_jam {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_jam_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t jam:8;
+#else
+ uint64_t jam:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_tx_lfsr {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_lfsr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t lfsr:16;
+#else
+ uint64_t lfsr:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_tx_ovr_bp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_ovr_bp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t en:2;
+ uint64_t reserved_6_7:2;
+ uint64_t bp:2;
+ uint64_t reserved_2_3:2;
+ uint64_t ign_full:2;
+#else
+ uint64_t ign_full:2;
+ uint64_t reserved_2_3:2;
+ uint64_t bp:2;
+ uint64_t reserved_6_7:2;
+ uint64_t en:2;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+ struct cvmx_agl_gmx_tx_ovr_bp_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t en:1;
+ uint64_t reserved_5_7:3;
+ uint64_t bp:1;
+ uint64_t reserved_1_3:3;
+ uint64_t ign_full:1;
+#else
+ uint64_t ign_full:1;
+ uint64_t reserved_1_3:3;
+ uint64_t bp:1;
+ uint64_t reserved_5_7:3;
+ uint64_t en:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn56xx;
+};
+
+union cvmx_agl_gmx_tx_pause_pkt_dmac {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t dmac:48;
+#else
+ uint64_t dmac:48;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_agl_gmx_tx_pause_pkt_type {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t type:16;
+#else
+ uint64_t type:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_agl_prtx_ctl {
+ uint64_t u64;
+ struct cvmx_agl_prtx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t drv_byp:1;
+ uint64_t reserved_62_62:1;
+ uint64_t cmp_pctl:6;
+ uint64_t reserved_54_55:2;
+ uint64_t cmp_nctl:6;
+ uint64_t reserved_46_47:2;
+ uint64_t drv_pctl:6;
+ uint64_t reserved_38_39:2;
+ uint64_t drv_nctl:6;
+ uint64_t reserved_29_31:3;
+ uint64_t clk_set:5;
+ uint64_t clkrx_byp:1;
+ uint64_t reserved_21_22:2;
+ uint64_t clkrx_set:5;
+ uint64_t clktx_byp:1;
+ uint64_t reserved_13_14:2;
+ uint64_t clktx_set:5;
+ uint64_t reserved_5_7:3;
+ uint64_t dllrst:1;
+ uint64_t comp:1;
+ uint64_t enable:1;
+ uint64_t clkrst:1;
+ uint64_t mode:1;
+#else
+ uint64_t mode:1;
+ uint64_t clkrst:1;
+ uint64_t enable:1;
+ uint64_t comp:1;
+ uint64_t dllrst:1;
+ uint64_t reserved_5_7:3;
+ uint64_t clktx_set:5;
+ uint64_t reserved_13_14:2;
+ uint64_t clktx_byp:1;
+ uint64_t clkrx_set:5;
+ uint64_t reserved_21_22:2;
+ uint64_t clkrx_byp:1;
+ uint64_t clk_set:5;
+ uint64_t reserved_29_31:3;
+ uint64_t drv_nctl:6;
+ uint64_t reserved_38_39:2;
+ uint64_t drv_pctl:6;
+ uint64_t reserved_46_47:2;
+ uint64_t cmp_nctl:6;
+ uint64_t reserved_54_55:2;
+ uint64_t cmp_pctl:6;
+ uint64_t reserved_62_62:1;
+ uint64_t drv_byp:1;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-asm.h b/arch/mips/include/asm/octeon/cvmx-asm.h
new file mode 100644
index 000000000..31eacc24b
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-asm.h
@@ -0,0 +1,139 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * This is file defines ASM primitives for the executive.
+ */
+#ifndef __CVMX_ASM_H__
+#define __CVMX_ASM_H__
+
+#include <asm/octeon/octeon-model.h>
+
+/* other useful stuff */
+#define CVMX_SYNC asm volatile ("sync" : : : "memory")
+/* String version of SYNCW macro for using in inline asm constructs */
+#define CVMX_SYNCW_STR "syncw\nsyncw\n"
+#ifdef __OCTEON__
+
+/* Deprecated, will be removed in future release */
+#define CVMX_SYNCIO asm volatile ("nop")
+
+#define CVMX_SYNCIOBDMA asm volatile ("synciobdma" : : : "memory")
+
+/* Deprecated, will be removed in future release */
+#define CVMX_SYNCIOALL asm volatile ("nop")
+
+/*
+ * We actually use two syncw instructions in a row when we need a write
+ * memory barrier. This is because the CN3XXX series of Octeons have
+ * errata Core-401. This can cause a single syncw to not enforce
+ * ordering under very rare conditions. Even if it is rare, better safe
+ * than sorry.
+ */
+#define CVMX_SYNCW asm volatile ("syncw\n\tsyncw" : : : "memory")
+
+/*
+ * Define new sync instructions to be normal SYNC instructions for
+ * operating systems that use threads.
+ */
+#define CVMX_SYNCWS CVMX_SYNCW
+#define CVMX_SYNCS CVMX_SYNC
+#define CVMX_SYNCWS_STR CVMX_SYNCW_STR
+#else
+/*
+ * Not using a Cavium compiler, always use the slower sync so the
+ * assembler stays happy.
+ */
+/* Deprecated, will be removed in future release */
+#define CVMX_SYNCIO asm volatile ("nop")
+
+#define CVMX_SYNCIOBDMA asm volatile ("sync" : : : "memory")
+
+/* Deprecated, will be removed in future release */
+#define CVMX_SYNCIOALL asm volatile ("nop")
+
+#define CVMX_SYNCW asm volatile ("sync" : : : "memory")
+#define CVMX_SYNCWS CVMX_SYNCW
+#define CVMX_SYNCS CVMX_SYNC
+#define CVMX_SYNCWS_STR CVMX_SYNCW_STR
+#endif
+
+/*
+ * CVMX_PREPARE_FOR_STORE makes each byte of the block unpredictable
+ * (actually old value or zero) until that byte is stored to (by this or
+ * another processor. Note that the value of each byte is not only
+ * unpredictable, but may also change again - up until the point when one
+ * of the cores stores to the byte.
+ */
+#define CVMX_PREPARE_FOR_STORE(address, offset) \
+ asm volatile ("pref 30, " CVMX_TMP_STR(offset) "(%[rbase])" : : \
+ [rbase] "d" (address))
+/*
+ * This is a command headed to the L2 controller to tell it to clear
+ * its dirty bit for a block. Basically, SW is telling HW that the
+ * current version of the block will not be used.
+ */
+#define CVMX_DONT_WRITE_BACK(address, offset) \
+ asm volatile ("pref 29, " CVMX_TMP_STR(offset) "(%[rbase])" : : \
+ [rbase] "d" (address))
+
+/* flush stores, invalidate entire icache */
+#define CVMX_ICACHE_INVALIDATE \
+ { CVMX_SYNC; asm volatile ("synci 0($0)" : : ); }
+
+/* flush stores, invalidate entire icache */
+#define CVMX_ICACHE_INVALIDATE2 \
+ { CVMX_SYNC; asm volatile ("cache 0, 0($0)" : : ); }
+
+/* complete prefetches, invalidate entire dcache */
+#define CVMX_DCACHE_INVALIDATE \
+ { CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); }
+
+#define CVMX_CACHE(op, address, offset) \
+ asm volatile ("cache " CVMX_TMP_STR(op) ", " CVMX_TMP_STR(offset) "(%[rbase])" \
+ : : [rbase] "d" (address) )
+/* fetch and lock the state. */
+#define CVMX_CACHE_LCKL2(address, offset) CVMX_CACHE(31, address, offset)
+/* unlock the state. */
+#define CVMX_CACHE_WBIL2(address, offset) CVMX_CACHE(23, address, offset)
+/* invalidate the cache block and clear the USED bits for the block */
+#define CVMX_CACHE_WBIL2I(address, offset) CVMX_CACHE(3, address, offset)
+/* load virtual tag and data for the L2 cache block into L2C_TAD0_TAG register */
+#define CVMX_CACHE_LTGL2I(address, offset) CVMX_CACHE(7, address, offset)
+
+#define CVMX_POP(result, input) \
+ asm ("pop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+#define CVMX_DPOP(result, input) \
+ asm ("dpop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+
+/* some new cop0-like stuff */
+#define CVMX_RDHWR(result, regstr) \
+ asm volatile ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
+#define CVMX_RDHWRNV(result, regstr) \
+ asm ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
+#endif /* __CVMX_ASM_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-asxx-defs.h b/arch/mips/include/asm/octeon/cvmx-asxx-defs.h
new file mode 100644
index 000000000..70f4a5729
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-asxx-defs.h
@@ -0,0 +1,566 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (C) 2003-2018 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_ASXX_DEFS_H__
+#define __CVMX_ASXX_DEFS_H__
+
+#define CVMX_ASXX_GMII_RX_CLK_SET(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000180ull))
+#define CVMX_ASXX_GMII_RX_DAT_SET(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000188ull))
+#define CVMX_ASXX_INT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000018ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000010ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_MII_RX_DAT_SET(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000190ull))
+#define CVMX_ASXX_PRT_LOOP(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000040ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_RLD_BYPASS(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000248ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_RLD_BYPASS_SETTING(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000250ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_RLD_COMP(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000220ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_RLD_DATA_DRV(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000218ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_RLD_FCRAM_MODE(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000210ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_RLD_NCTL_STRONG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000230ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_RLD_NCTL_WEAK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000240ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_RLD_PCTL_STRONG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000228ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_RLD_PCTL_WEAK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000238ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_RLD_SETTING(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000258ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_RX_CLK_SETX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000020ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
+#define CVMX_ASXX_RX_PRT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000000ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_RX_WOL(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000100ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_RX_WOL_MSK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000108ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_RX_WOL_POWOK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000118ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_RX_WOL_SIG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000110ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_TX_CLK_SETX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
+#define CVMX_ASXX_TX_COMP_BYP(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000068ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_ASXX_TX_HI_WATERX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
+#define CVMX_ASXX_TX_PRT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000008ull) + ((block_id) & 1) * 0x8000000ull)
+
+void __cvmx_interrupt_asxx_enable(int block);
+
+union cvmx_asxx_gmii_rx_clk_set {
+ uint64_t u64;
+ struct cvmx_asxx_gmii_rx_clk_set_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t setting:5;
+#else
+ uint64_t setting:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_asxx_gmii_rx_dat_set {
+ uint64_t u64;
+ struct cvmx_asxx_gmii_rx_dat_set_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t setting:5;
+#else
+ uint64_t setting:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_asxx_int_en {
+ uint64_t u64;
+ struct cvmx_asxx_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t txpsh:4;
+ uint64_t txpop:4;
+ uint64_t ovrflw:4;
+#else
+ uint64_t ovrflw:4;
+ uint64_t txpop:4;
+ uint64_t txpsh:4;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+ struct cvmx_asxx_int_en_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t txpsh:3;
+ uint64_t reserved_7_7:1;
+ uint64_t txpop:3;
+ uint64_t reserved_3_3:1;
+ uint64_t ovrflw:3;
+#else
+ uint64_t ovrflw:3;
+ uint64_t reserved_3_3:1;
+ uint64_t txpop:3;
+ uint64_t reserved_7_7:1;
+ uint64_t txpsh:3;
+ uint64_t reserved_11_63:53;
+#endif
+ } cn30xx;
+};
+
+union cvmx_asxx_int_reg {
+ uint64_t u64;
+ struct cvmx_asxx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t txpsh:4;
+ uint64_t txpop:4;
+ uint64_t ovrflw:4;
+#else
+ uint64_t ovrflw:4;
+ uint64_t txpop:4;
+ uint64_t txpsh:4;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+ struct cvmx_asxx_int_reg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t txpsh:3;
+ uint64_t reserved_7_7:1;
+ uint64_t txpop:3;
+ uint64_t reserved_3_3:1;
+ uint64_t ovrflw:3;
+#else
+ uint64_t ovrflw:3;
+ uint64_t reserved_3_3:1;
+ uint64_t txpop:3;
+ uint64_t reserved_7_7:1;
+ uint64_t txpsh:3;
+ uint64_t reserved_11_63:53;
+#endif
+ } cn30xx;
+};
+
+union cvmx_asxx_mii_rx_dat_set {
+ uint64_t u64;
+ struct cvmx_asxx_mii_rx_dat_set_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t setting:5;
+#else
+ uint64_t setting:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_asxx_prt_loop {
+ uint64_t u64;
+ struct cvmx_asxx_prt_loop_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t ext_loop:4;
+ uint64_t int_loop:4;
+#else
+ uint64_t int_loop:4;
+ uint64_t ext_loop:4;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+ struct cvmx_asxx_prt_loop_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t ext_loop:3;
+ uint64_t reserved_3_3:1;
+ uint64_t int_loop:3;
+#else
+ uint64_t int_loop:3;
+ uint64_t reserved_3_3:1;
+ uint64_t ext_loop:3;
+ uint64_t reserved_7_63:57;
+#endif
+ } cn30xx;
+};
+
+union cvmx_asxx_rld_bypass {
+ uint64_t u64;
+ struct cvmx_asxx_rld_bypass_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t bypass:1;
+#else
+ uint64_t bypass:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_asxx_rld_bypass_setting {
+ uint64_t u64;
+ struct cvmx_asxx_rld_bypass_setting_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t setting:5;
+#else
+ uint64_t setting:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_asxx_rld_comp {
+ uint64_t u64;
+ struct cvmx_asxx_rld_comp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t pctl:5;
+ uint64_t nctl:4;
+#else
+ uint64_t nctl:4;
+ uint64_t pctl:5;
+ uint64_t reserved_9_63:55;
+#endif
+ } s;
+ struct cvmx_asxx_rld_comp_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t pctl:4;
+ uint64_t nctl:4;
+#else
+ uint64_t nctl:4;
+ uint64_t pctl:4;
+ uint64_t reserved_8_63:56;
+#endif
+ } cn38xx;
+};
+
+union cvmx_asxx_rld_data_drv {
+ uint64_t u64;
+ struct cvmx_asxx_rld_data_drv_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t pctl:4;
+ uint64_t nctl:4;
+#else
+ uint64_t nctl:4;
+ uint64_t pctl:4;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_asxx_rld_fcram_mode {
+ uint64_t u64;
+ struct cvmx_asxx_rld_fcram_mode_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t mode:1;
+#else
+ uint64_t mode:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_asxx_rld_nctl_strong {
+ uint64_t u64;
+ struct cvmx_asxx_rld_nctl_strong_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t nctl:5;
+#else
+ uint64_t nctl:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_asxx_rld_nctl_weak {
+ uint64_t u64;
+ struct cvmx_asxx_rld_nctl_weak_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t nctl:5;
+#else
+ uint64_t nctl:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_asxx_rld_pctl_strong {
+ uint64_t u64;
+ struct cvmx_asxx_rld_pctl_strong_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t pctl:5;
+#else
+ uint64_t pctl:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_asxx_rld_pctl_weak {
+ uint64_t u64;
+ struct cvmx_asxx_rld_pctl_weak_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t pctl:5;
+#else
+ uint64_t pctl:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_asxx_rld_setting {
+ uint64_t u64;
+ struct cvmx_asxx_rld_setting_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t dfaset:5;
+ uint64_t dfalag:1;
+ uint64_t dfalead:1;
+ uint64_t dfalock:1;
+ uint64_t setting:5;
+#else
+ uint64_t setting:5;
+ uint64_t dfalock:1;
+ uint64_t dfalead:1;
+ uint64_t dfalag:1;
+ uint64_t dfaset:5;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+ struct cvmx_asxx_rld_setting_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t setting:5;
+#else
+ uint64_t setting:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } cn38xx;
+};
+
+union cvmx_asxx_rx_clk_setx {
+ uint64_t u64;
+ struct cvmx_asxx_rx_clk_setx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t setting:5;
+#else
+ uint64_t setting:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_asxx_rx_prt_en {
+ uint64_t u64;
+ struct cvmx_asxx_rx_prt_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t prt_en:4;
+#else
+ uint64_t prt_en:4;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+ struct cvmx_asxx_rx_prt_en_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t prt_en:3;
+#else
+ uint64_t prt_en:3;
+ uint64_t reserved_3_63:61;
+#endif
+ } cn30xx;
+};
+
+union cvmx_asxx_rx_wol {
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t status:1;
+ uint64_t enable:1;
+#else
+ uint64_t enable:1;
+ uint64_t status:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_asxx_rx_wol_msk {
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_msk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t msk:64;
+#else
+ uint64_t msk:64;
+#endif
+ } s;
+};
+
+union cvmx_asxx_rx_wol_powok {
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_powok_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t powerok:1;
+#else
+ uint64_t powerok:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_asxx_rx_wol_sig {
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_sig_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t sig:32;
+#else
+ uint64_t sig:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_asxx_tx_clk_setx {
+ uint64_t u64;
+ struct cvmx_asxx_tx_clk_setx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t setting:5;
+#else
+ uint64_t setting:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_asxx_tx_comp_byp {
+ uint64_t u64;
+ struct cvmx_asxx_tx_comp_byp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_asxx_tx_comp_byp_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t bypass:1;
+ uint64_t pctl:4;
+ uint64_t nctl:4;
+#else
+ uint64_t nctl:4;
+ uint64_t pctl:4;
+ uint64_t bypass:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn30xx;
+ struct cvmx_asxx_tx_comp_byp_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t pctl:4;
+ uint64_t nctl:4;
+#else
+ uint64_t nctl:4;
+ uint64_t pctl:4;
+ uint64_t reserved_8_63:56;
+#endif
+ } cn38xx;
+ struct cvmx_asxx_tx_comp_byp_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t bypass:1;
+ uint64_t reserved_13_15:3;
+ uint64_t pctl:5;
+ uint64_t reserved_5_7:3;
+ uint64_t nctl:5;
+#else
+ uint64_t nctl:5;
+ uint64_t reserved_5_7:3;
+ uint64_t pctl:5;
+ uint64_t reserved_13_15:3;
+ uint64_t bypass:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } cn50xx;
+ struct cvmx_asxx_tx_comp_byp_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t pctl:5;
+ uint64_t reserved_5_7:3;
+ uint64_t nctl:5;
+#else
+ uint64_t nctl:5;
+ uint64_t reserved_5_7:3;
+ uint64_t pctl:5;
+ uint64_t reserved_13_63:51;
+#endif
+ } cn58xx;
+};
+
+union cvmx_asxx_tx_hi_waterx {
+ uint64_t u64;
+ struct cvmx_asxx_tx_hi_waterx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t mark:4;
+#else
+ uint64_t mark:4;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+ struct cvmx_asxx_tx_hi_waterx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t mark:3;
+#else
+ uint64_t mark:3;
+ uint64_t reserved_3_63:61;
+#endif
+ } cn30xx;
+};
+
+union cvmx_asxx_tx_prt_en {
+ uint64_t u64;
+ struct cvmx_asxx_tx_prt_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t prt_en:4;
+#else
+ uint64_t prt_en:4;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+ struct cvmx_asxx_tx_prt_en_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t prt_en:3;
+#else
+ uint64_t prt_en:3;
+ uint64_t reserved_3_63:61;
+#endif
+ } cn30xx;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-boot-vector.h b/arch/mips/include/asm/octeon/cvmx-boot-vector.h
new file mode 100644
index 000000000..8db08241d
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-boot-vector.h
@@ -0,0 +1,53 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003-2017 Cavium, Inc.
+ */
+
+#ifndef __CVMX_BOOT_VECTOR_H__
+#define __CVMX_BOOT_VECTOR_H__
+
+#include <asm/octeon/octeon.h>
+
+/*
+ * The boot vector table is made up of an array of 1024 elements of
+ * struct cvmx_boot_vector_element. There is one entry for each
+ * possible MIPS CPUNum, indexed by the CPUNum.
+ *
+ * Once cvmx_boot_vector_get() returns a non-NULL value (indicating
+ * success), NMI to a core will cause execution to transfer to the
+ * target_ptr location for that core's entry in the vector table.
+ *
+ * The struct cvmx_boot_vector_element fields app0, app1, and app2 can
+ * be used by the application that has set the target_ptr in any
+ * application specific manner, they are not touched by the vectoring
+ * code.
+ *
+ * The boot vector code clobbers the CP0_DESAVE register, and on
+ * OCTEON II and later CPUs also clobbers CP0_KScratch2. All GP
+ * registers are preserved, except on pre-OCTEON II CPUs, where k1 is
+ * clobbered.
+ *
+ */
+
+
+/*
+ * Applications install the boot bus code in cvmx-boot-vector.c, which
+ * uses this magic:
+ */
+#define OCTEON_BOOT_MOVEABLE_MAGIC1 0xdb00110ad358eacdull
+
+struct cvmx_boot_vector_element {
+ /* kseg0 or xkphys address of target code. */
+ uint64_t target_ptr;
+ /* Three application specific arguments. */
+ uint64_t app0;
+ uint64_t app1;
+ uint64_t app2;
+};
+
+struct cvmx_boot_vector_element *cvmx_boot_vector_get(void);
+
+#endif /* __CVMX_BOOT_VECTOR_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
new file mode 100644
index 000000000..e77e8b7c0
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
@@ -0,0 +1,424 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Header file containing the ABI with the bootloader.
+ */
+
+#ifndef __CVMX_BOOTINFO_H__
+#define __CVMX_BOOTINFO_H__
+
+#include "cvmx-coremask.h"
+
+/*
+ * Current major and minor versions of the CVMX bootinfo block that is
+ * passed from the bootloader to the application. This is versioned
+ * so that applications can properly handle multiple bootloader
+ * versions.
+ */
+#define CVMX_BOOTINFO_MAJ_VER 1
+#define CVMX_BOOTINFO_MIN_VER 4
+
+#if (CVMX_BOOTINFO_MAJ_VER == 1)
+#define CVMX_BOOTINFO_OCTEON_SERIAL_LEN 20
+/*
+ * This structure is populated by the bootloader. For binary
+ * compatibility the only changes that should be made are
+ * adding members to the end of the structure, and the minor
+ * version should be incremented at that time.
+ * If an incompatible change is made, the major version
+ * must be incremented, and the minor version should be reset
+ * to 0.
+ */
+struct cvmx_bootinfo {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t major_version;
+ uint32_t minor_version;
+
+ uint64_t stack_top;
+ uint64_t heap_base;
+ uint64_t heap_end;
+ uint64_t desc_vaddr;
+
+ uint32_t exception_base_addr;
+ uint32_t stack_size;
+ uint32_t flags;
+ uint32_t core_mask;
+ /* DRAM size in megabytes */
+ uint32_t dram_size;
+ /* physical address of free memory descriptor block*/
+ uint32_t phy_mem_desc_addr;
+ /* used to pass flags from app to debugger */
+ uint32_t debugger_flags_base_addr;
+
+ /* CPU clock speed, in hz */
+ uint32_t eclock_hz;
+
+ /* DRAM clock speed, in hz */
+ uint32_t dclock_hz;
+
+ uint32_t reserved0;
+ uint16_t board_type;
+ uint8_t board_rev_major;
+ uint8_t board_rev_minor;
+ uint16_t reserved1;
+ uint8_t reserved2;
+ uint8_t reserved3;
+ char board_serial_number[CVMX_BOOTINFO_OCTEON_SERIAL_LEN];
+ uint8_t mac_addr_base[6];
+ uint8_t mac_addr_count;
+#if (CVMX_BOOTINFO_MIN_VER >= 1)
+ /*
+ * Several boards support compact flash on the Octeon boot
+ * bus. The CF memory spaces may be mapped to different
+ * addresses on different boards. These are the physical
+ * addresses, so care must be taken to use the correct
+ * XKPHYS/KSEG0 addressing depending on the application's
+ * ABI. These values will be 0 if CF is not present.
+ */
+ uint64_t compact_flash_common_base_addr;
+ uint64_t compact_flash_attribute_base_addr;
+ /*
+ * Base address of the LED display (as on EBT3000 board)
+ * This will be 0 if LED display not present.
+ */
+ uint64_t led_display_base_addr;
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 2)
+ /* DFA reference clock in hz (if applicable)*/
+ uint32_t dfa_ref_clock_hz;
+
+ /*
+ * flags indicating various configuration options. These
+ * flags supercede the 'flags' variable and should be used
+ * instead if available.
+ */
+ uint32_t config_flags;
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 3)
+ /*
+ * Address of the OF Flattened Device Tree structure
+ * describing the board.
+ */
+ uint64_t fdt_addr;
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 4)
+ /*
+ * Coremask used for processors with more than 32 cores
+ * or with OCI. This replaces core_mask.
+ */
+ struct cvmx_coremask ext_core_mask;
+#endif
+#else /* __BIG_ENDIAN */
+ /*
+ * Little-Endian: When the CPU mode is switched to
+ * little-endian, the view of the structure has some of the
+ * fields swapped.
+ */
+ uint32_t minor_version;
+ uint32_t major_version;
+
+ uint64_t stack_top;
+ uint64_t heap_base;
+ uint64_t heap_end;
+ uint64_t desc_vaddr;
+
+ uint32_t stack_size;
+ uint32_t exception_base_addr;
+
+ uint32_t core_mask;
+ uint32_t flags;
+
+ uint32_t phy_mem_desc_addr;
+ uint32_t dram_size;
+
+ uint32_t eclock_hz;
+ uint32_t debugger_flags_base_addr;
+
+ uint32_t reserved0;
+ uint32_t dclock_hz;
+
+ uint8_t reserved3;
+ uint8_t reserved2;
+ uint16_t reserved1;
+ uint8_t board_rev_minor;
+ uint8_t board_rev_major;
+ uint16_t board_type;
+
+ char board_serial_number[CVMX_BOOTINFO_OCTEON_SERIAL_LEN];
+ uint8_t mac_addr_base[6];
+ uint8_t mac_addr_count;
+ uint8_t pad[5];
+
+#if (CVMX_BOOTINFO_MIN_VER >= 1)
+ uint64_t compact_flash_common_base_addr;
+ uint64_t compact_flash_attribute_base_addr;
+ uint64_t led_display_base_addr;
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 2)
+ uint32_t config_flags;
+ uint32_t dfa_ref_clock_hz;
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 3)
+ uint64_t fdt_addr;
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 4)
+ struct cvmx_coremask ext_core_mask;
+#endif
+#endif
+};
+
+#define CVMX_BOOTINFO_CFG_FLAG_PCI_HOST (1ull << 0)
+#define CVMX_BOOTINFO_CFG_FLAG_PCI_TARGET (1ull << 1)
+#define CVMX_BOOTINFO_CFG_FLAG_DEBUG (1ull << 2)
+#define CVMX_BOOTINFO_CFG_FLAG_NO_MAGIC (1ull << 3)
+/* This flag is set if the TLB mappings are not contained in the
+ * 0x10000000 - 0x20000000 boot bus region. */
+#define CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING (1ull << 4)
+#define CVMX_BOOTINFO_CFG_FLAG_BREAK (1ull << 5)
+
+#endif /* (CVMX_BOOTINFO_MAJ_VER == 1) */
+
+/* Type defines for board and chip types */
+enum cvmx_board_types_enum {
+ CVMX_BOARD_TYPE_NULL = 0,
+ CVMX_BOARD_TYPE_SIM = 1,
+ CVMX_BOARD_TYPE_EBT3000 = 2,
+ CVMX_BOARD_TYPE_KODAMA = 3,
+ CVMX_BOARD_TYPE_NIAGARA = 4,
+ CVMX_BOARD_TYPE_NAC38 = 5, /* formerly NAO38 */
+ CVMX_BOARD_TYPE_THUNDER = 6,
+ CVMX_BOARD_TYPE_TRANTOR = 7,
+ CVMX_BOARD_TYPE_EBH3000 = 8,
+ CVMX_BOARD_TYPE_EBH3100 = 9,
+ CVMX_BOARD_TYPE_HIKARI = 10,
+ CVMX_BOARD_TYPE_CN3010_EVB_HS5 = 11,
+ CVMX_BOARD_TYPE_CN3005_EVB_HS5 = 12,
+ CVMX_BOARD_TYPE_KBP = 13,
+ /* Deprecated, CVMX_BOARD_TYPE_CN3010_EVB_HS5 supports the CN3020 */
+ CVMX_BOARD_TYPE_CN3020_EVB_HS5 = 14,
+ CVMX_BOARD_TYPE_EBT5800 = 15,
+ CVMX_BOARD_TYPE_NICPRO2 = 16,
+ CVMX_BOARD_TYPE_EBH5600 = 17,
+ CVMX_BOARD_TYPE_EBH5601 = 18,
+ CVMX_BOARD_TYPE_EBH5200 = 19,
+ CVMX_BOARD_TYPE_BBGW_REF = 20,
+ CVMX_BOARD_TYPE_NIC_XLE_4G = 21,
+ CVMX_BOARD_TYPE_EBT5600 = 22,
+ CVMX_BOARD_TYPE_EBH5201 = 23,
+ CVMX_BOARD_TYPE_EBT5200 = 24,
+ CVMX_BOARD_TYPE_CB5600 = 25,
+ CVMX_BOARD_TYPE_CB5601 = 26,
+ CVMX_BOARD_TYPE_CB5200 = 27,
+ /* Special 'generic' board type, supports many boards */
+ CVMX_BOARD_TYPE_GENERIC = 28,
+ CVMX_BOARD_TYPE_EBH5610 = 29,
+ CVMX_BOARD_TYPE_LANAI2_A = 30,
+ CVMX_BOARD_TYPE_LANAI2_U = 31,
+ CVMX_BOARD_TYPE_EBB5600 = 32,
+ CVMX_BOARD_TYPE_EBB6300 = 33,
+ CVMX_BOARD_TYPE_NIC_XLE_10G = 34,
+ CVMX_BOARD_TYPE_LANAI2_G = 35,
+ CVMX_BOARD_TYPE_EBT5810 = 36,
+ CVMX_BOARD_TYPE_NIC10E = 37,
+ CVMX_BOARD_TYPE_EP6300C = 38,
+ CVMX_BOARD_TYPE_EBB6800 = 39,
+ CVMX_BOARD_TYPE_NIC4E = 40,
+ CVMX_BOARD_TYPE_NIC2E = 41,
+ CVMX_BOARD_TYPE_EBB6600 = 42,
+ CVMX_BOARD_TYPE_REDWING = 43,
+ CVMX_BOARD_TYPE_NIC68_4 = 44,
+ CVMX_BOARD_TYPE_NIC10E_66 = 45,
+ CVMX_BOARD_TYPE_MAX,
+
+ /*
+ * The range from CVMX_BOARD_TYPE_MAX to
+ * CVMX_BOARD_TYPE_CUST_DEFINED_MIN is reserved for future
+ * SDK use.
+ */
+
+ /*
+ * Set aside a range for customer boards. These numbers are managed
+ * by Cavium.
+ */
+ CVMX_BOARD_TYPE_CUST_DEFINED_MIN = 10000,
+ CVMX_BOARD_TYPE_CUST_WSX16 = 10001,
+ CVMX_BOARD_TYPE_CUST_NS0216 = 10002,
+ CVMX_BOARD_TYPE_CUST_NB5 = 10003,
+ CVMX_BOARD_TYPE_CUST_WMR500 = 10004,
+ CVMX_BOARD_TYPE_CUST_ITB101 = 10005,
+ CVMX_BOARD_TYPE_CUST_NTE102 = 10006,
+ CVMX_BOARD_TYPE_CUST_AGS103 = 10007,
+ CVMX_BOARD_TYPE_CUST_GST104 = 10008,
+ CVMX_BOARD_TYPE_CUST_GCT105 = 10009,
+ CVMX_BOARD_TYPE_CUST_AGS106 = 10010,
+ CVMX_BOARD_TYPE_CUST_SGM107 = 10011,
+ CVMX_BOARD_TYPE_CUST_GCT108 = 10012,
+ CVMX_BOARD_TYPE_CUST_AGS109 = 10013,
+ CVMX_BOARD_TYPE_CUST_GCT110 = 10014,
+ CVMX_BOARD_TYPE_CUST_L2_AIR_SENDER = 10015,
+ CVMX_BOARD_TYPE_CUST_L2_AIR_RECEIVER = 10016,
+ CVMX_BOARD_TYPE_CUST_L2_ACCTON2_TX = 10017,
+ CVMX_BOARD_TYPE_CUST_L2_ACCTON2_RX = 10018,
+ CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_TX = 10019,
+ CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_RX = 10020,
+ CVMX_BOARD_TYPE_CUST_L2_ZINWELL = 10021,
+ CVMX_BOARD_TYPE_CUST_DEFINED_MAX = 20000,
+
+ /*
+ * Set aside a range for customer private use. The SDK won't
+ * use any numbers in this range.
+ */
+ CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001,
+ CVMX_BOARD_TYPE_UBNT_E100 = 20002,
+ CVMX_BOARD_TYPE_UBNT_E200 = 20003,
+ CVMX_BOARD_TYPE_UBNT_E220 = 20005,
+ CVMX_BOARD_TYPE_CUST_DSR1000N = 20006,
+ CVMX_BOARD_TYPE_KONTRON_S1901 = 21901,
+ CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000,
+
+ /* The remaining range is reserved for future use. */
+};
+
+enum cvmx_chip_types_enum {
+ CVMX_CHIP_TYPE_NULL = 0,
+ CVMX_CHIP_SIM_TYPE_DEPRECATED = 1,
+ CVMX_CHIP_TYPE_OCTEON_SAMPLE = 2,
+ CVMX_CHIP_TYPE_MAX,
+};
+
+/* Compatibility alias for NAC38 name change, planned to be removed
+ * from SDK 1.7 */
+#define CVMX_BOARD_TYPE_NAO38 CVMX_BOARD_TYPE_NAC38
+
+/* Functions to return string based on type */
+#define ENUM_BRD_TYPE_CASE(x) \
+ case x: return (&#x[16]); /* Skip CVMX_BOARD_TYPE_ */
+static inline const char *cvmx_board_type_to_string(enum
+ cvmx_board_types_enum type)
+{
+ switch (type) {
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NULL)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_SIM)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT3000)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KODAMA)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIAGARA)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NAC38)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_THUNDER)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_TRANTOR)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3000)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3100)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_HIKARI)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3010_EVB_HS5)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3005_EVB_HS5)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KBP)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3020_EVB_HS5)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5800)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NICPRO2)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5600)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5601)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5200)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_BBGW_REF)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_4G)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5600)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5201)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5200)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5600)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5601)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5200)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_GENERIC)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5610)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_A)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_U)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB5600)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6300)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_10G)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_G)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5810)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC10E)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EP6300C)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6800)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC4E)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC2E)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6600)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_REDWING)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC68_4)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC10E_66)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MAX)
+
+ /* Customer boards listed here */
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MIN)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WSX16)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NS0216)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NB5)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WMR500)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_ITB101)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NTE102)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS103)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GST104)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT105)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS106)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_SGM107)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT108)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS109)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT110)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_AIR_SENDER)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_AIR_RECEIVER)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ACCTON2_TX)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ACCTON2_RX)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_TX)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_RX)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ZINWELL)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MAX)
+
+ /* Customer private range */
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MIN)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E100)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E200)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E220)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DSR1000N)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KONTRON_S1901)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX)
+ }
+ return NULL;
+}
+
+#define ENUM_CHIP_TYPE_CASE(x) \
+ case x: return (&#x[15]); /* Skip CVMX_CHIP_TYPE */
+static inline const char *cvmx_chip_type_to_string(enum
+ cvmx_chip_types_enum type)
+{
+ switch (type) {
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_NULL)
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_SIM_TYPE_DEPRECATED)
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_OCTEON_SAMPLE)
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_MAX)
+ }
+ return "Unsupported Chip";
+}
+
+#endif /* __CVMX_BOOTINFO_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-bootmem.h b/arch/mips/include/asm/octeon/cvmx-bootmem.h
new file mode 100644
index 000000000..689a82cac
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-bootmem.h
@@ -0,0 +1,341 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Simple allocate only memory allocator. Used to allocate memory at
+ * application start time.
+ */
+
+#ifndef __CVMX_BOOTMEM_H__
+#define __CVMX_BOOTMEM_H__
+/* Must be multiple of 8, changing breaks ABI */
+#define CVMX_BOOTMEM_NAME_LEN 128
+
+/* Can change without breaking ABI */
+#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64
+
+/* minimum alignment of bootmem alloced blocks */
+#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull)
+
+/* Flags for cvmx_bootmem_phy_mem* functions */
+/* Allocate from end of block instead of beginning */
+#define CVMX_BOOTMEM_FLAG_END_ALLOC (1 << 0)
+
+/* Don't do any locking. */
+#define CVMX_BOOTMEM_FLAG_NO_LOCKING (1 << 1)
+
+/* First bytes of each free physical block of memory contain this structure,
+ * which is used to maintain the free memory list. Since the bootloader is
+ * only 32 bits, there is a union providing 64 and 32 bit versions. The
+ * application init code converts addresses to 64 bit addresses before the
+ * application starts.
+ */
+struct cvmx_bootmem_block_header {
+ /*
+ * Note: these are referenced from assembly routines in the
+ * bootloader, so this structure should not be changed
+ * without changing those routines as well.
+ */
+ uint64_t next_block_addr;
+ uint64_t size;
+
+};
+
+/*
+ * Structure for named memory blocks. Number of descriptors available
+ * can be changed without affecting compatibility, but name length
+ * changes require a bump in the bootmem descriptor version Note: This
+ * structure must be naturally 64 bit aligned, as a single memory
+ * image will be used by both 32 and 64 bit programs.
+ */
+struct cvmx_bootmem_named_block_desc {
+ /* Base address of named block */
+ uint64_t base_addr;
+ /*
+ * Size actually allocated for named block (may differ from
+ * requested).
+ */
+ uint64_t size;
+ /* name of named block */
+ char name[CVMX_BOOTMEM_NAME_LEN];
+};
+
+/* Current descriptor versions */
+/* CVMX bootmem descriptor major version */
+#define CVMX_BOOTMEM_DESC_MAJ_VER 3
+
+/* CVMX bootmem descriptor minor version */
+#define CVMX_BOOTMEM_DESC_MIN_VER 0
+
+/* First three members of cvmx_bootmem_desc_t are left in original
+ * positions for backwards compatibility.
+ */
+struct cvmx_bootmem_desc {
+#if defined(__BIG_ENDIAN_BITFIELD) || defined(CVMX_BUILD_FOR_LINUX_HOST)
+ /* spinlock to control access to list */
+ uint32_t lock;
+ /* flags for indicating various conditions */
+ uint32_t flags;
+ uint64_t head_addr;
+
+ /* Incremented when incompatible changes made */
+ uint32_t major_version;
+
+ /*
+ * Incremented changed when compatible changes made, reset to
+ * zero when major incremented.
+ */
+ uint32_t minor_version;
+
+ uint64_t app_data_addr;
+ uint64_t app_data_size;
+
+ /* number of elements in named blocks array */
+ uint32_t named_block_num_blocks;
+
+ /* length of name array in bootmem blocks */
+ uint32_t named_block_name_len;
+ /* address of named memory block descriptors */
+ uint64_t named_block_array_addr;
+#else /* __LITTLE_ENDIAN */
+ uint32_t flags;
+ uint32_t lock;
+ uint64_t head_addr;
+
+ uint32_t minor_version;
+ uint32_t major_version;
+ uint64_t app_data_addr;
+ uint64_t app_data_size;
+
+ uint32_t named_block_name_len;
+ uint32_t named_block_num_blocks;
+ uint64_t named_block_array_addr;
+#endif
+};
+
+/**
+ * Initialize the boot alloc memory structures. This is
+ * normally called inside of cvmx_user_app_init()
+ *
+ * @mem_desc_ptr: Address of the free memory list
+ */
+extern int cvmx_bootmem_init(void *mem_desc_ptr);
+
+/**
+ * Allocate a block of memory from the free list that was
+ * passed to the application by the bootloader at a specific
+ * address. This is an allocate-only algorithm, so
+ * freeing memory is not possible. Allocation will fail if
+ * memory cannot be allocated at the specified address.
+ *
+ * @size: Size in bytes of block to allocate
+ * @address: Physical address to allocate memory at. If this memory is not
+ * available, the allocation fails.
+ * @alignment: Alignment required - must be power of 2
+ * Returns pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address,
+ uint64_t alignment);
+
+/**
+ * Frees a previously allocated named bootmem block.
+ *
+ * @name: name of block to free
+ *
+ * Returns 0 on failure,
+ * !0 on success
+ */
+
+
+/**
+ * Allocate a block of memory from the free list that was passed
+ * to the application by the bootloader, and assign it a name in the
+ * global named block table. (part of the cvmx_bootmem_descriptor_t structure)
+ * Named blocks can later be freed.
+ *
+ * @size: Size in bytes of block to allocate
+ * @alignment: Alignment required - must be power of 2
+ * @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
+ *
+ * Returns a pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment,
+ char *name);
+
+/**
+ * Allocate a block of memory from a specific range of the free list
+ * that was passed to the application by the bootloader, and assign it
+ * a name in the global named block table. (part of the
+ * cvmx_bootmem_descriptor_t structure) Named blocks can later be
+ * freed. If request cannot be satisfied within the address range
+ * specified, NULL is returned
+ *
+ * @size: Size in bytes of block to allocate
+ * @min_addr: minimum address of range
+ * @max_addr: maximum address of range
+ * @align: Alignment of memory to be allocated. (must be a power of 2)
+ * @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
+ *
+ * Returns a pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr,
+ uint64_t max_addr, uint64_t align,
+ char *name);
+
+/**
+ * Allocate if needed a block of memory from a specific range of the
+ * free list that was passed to the application by the bootloader, and
+ * assign it a name in the global named block table. (part of the
+ * cvmx_bootmem_descriptor_t structure) Named blocks can later be
+ * freed. If the requested name block is already allocated, return
+ * the pointer to block of memory. If request cannot be satisfied
+ * within the address range specified, NULL is returned
+ *
+ * @param size Size in bytes of block to allocate
+ * @param min_addr minimum address of range
+ * @param max_addr maximum address of range
+ * @param align Alignment of memory to be allocated. (must be a power of 2)
+ * @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
+ * @param init Initialization function
+ *
+ * The initialization function is optional, if omitted the named block
+ * is initialized to all zeros when it is created, i.e. once.
+ *
+ * @return pointer to block of memory, NULL on error
+ */
+void *cvmx_bootmem_alloc_named_range_once(uint64_t size,
+ uint64_t min_addr,
+ uint64_t max_addr,
+ uint64_t align,
+ char *name,
+ void (*init) (void *));
+
+extern int cvmx_bootmem_free_named(char *name);
+
+/**
+ * Finds a named bootmem block by name.
+ *
+ * @name: name of block to free
+ *
+ * Returns pointer to named block descriptor on success
+ * 0 on failure
+ */
+struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name);
+
+/**
+ * Allocates a block of physical memory from the free list, at
+ * (optional) requested address and alignment.
+ *
+ * @req_size: size of region to allocate. All requests are rounded up
+ * to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
+ *
+ * @address_min: Minimum address that block can occupy.
+ *
+ * @address_max: Specifies the maximum address_min (inclusive) that
+ * the allocation can use.
+ *
+ * @alignment: Requested alignment of the block. If this alignment
+ * cannot be met, the allocation fails. This must be a
+ * power of 2. (Note: Alignment of
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
+ * internally enforced. Requested alignments of less than
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ *
+ * @flags: Flags to control options for the allocation.
+ *
+ * Returns physical address of block allocated, or -1 on failure
+ */
+int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
+ uint64_t address_max, uint64_t alignment,
+ uint32_t flags);
+
+/**
+ * Allocates a named block of physical memory from the free list, at
+ * (optional) requested address and alignment.
+ *
+ * @param size size of region to allocate. All requests are rounded
+ * up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE
+ * bytes size
+ * @param min_addr Minimum address that block can occupy.
+ * @param max_addr Specifies the maximum address_min (inclusive) that
+ * the allocation can use.
+ * @param alignment Requested alignment of the block. If this
+ * alignment cannot be met, the allocation fails.
+ * This must be a power of 2. (Note: Alignment of
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
+ * internally enforced. Requested alignments of less
+ * than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ * @param name name to assign to named block
+ * @param flags Flags to control options for the allocation.
+ *
+ * @return physical address of block allocated, or -1 on failure
+ */
+int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
+ uint64_t max_addr,
+ uint64_t alignment,
+ char *name, uint32_t flags);
+
+/**
+ * Frees a block to the bootmem allocator list. This must
+ * be used with care, as the size provided must match the size
+ * of the block that was allocated, or the list will become
+ * corrupted.
+ *
+ * IMPORTANT: This is only intended to be used as part of named block
+ * frees and initial population of the free memory list.
+ * *
+ *
+ * @phy_addr: physical address of block
+ * @size: size of block in bytes.
+ * @flags: flags for passing options
+ *
+ * Returns 1 on success,
+ * 0 on failure
+ */
+int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags);
+
+/**
+ * Locks the bootmem allocator. This is useful in certain situations
+ * where multiple allocations must be made without being interrupted.
+ * This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
+ *
+ */
+void cvmx_bootmem_lock(void);
+
+/**
+ * Unlocks the bootmem allocator. This is useful in certain situations
+ * where multiple allocations must be made without being interrupted.
+ * This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
+ *
+ */
+void cvmx_bootmem_unlock(void);
+
+extern struct cvmx_bootmem_desc *cvmx_bootmem_get_desc(void);
+
+#endif /* __CVMX_BOOTMEM_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
new file mode 100644
index 000000000..1d18be8cd
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Octeon CIU definitions
+ *
+ * Copyright (C) 2003-2018 Cavium, Inc.
+ */
+
+#ifndef __CVMX_CIU_DEFS_H__
+#define __CVMX_CIU_DEFS_H__
+
+#include <asm/bitfield.h>
+
+#define CVMX_CIU_ADDR(addr, coreid, coremask, offset) \
+ (CVMX_ADD_IO_SEG(0x0001070000000000ull + addr##ull) + \
+ (((coreid) & (coremask)) * offset))
+
+#define CVMX_CIU_EN2_PPX_IP4(c) CVMX_CIU_ADDR(0xA400, c, 0x0F, 8)
+#define CVMX_CIU_EN2_PPX_IP4_W1C(c) CVMX_CIU_ADDR(0xCC00, c, 0x0F, 8)
+#define CVMX_CIU_EN2_PPX_IP4_W1S(c) CVMX_CIU_ADDR(0xAC00, c, 0x0F, 8)
+#define CVMX_CIU_FUSE CVMX_CIU_ADDR(0x0728, 0, 0x00, 0)
+#define CVMX_CIU_INT_SUM1 CVMX_CIU_ADDR(0x0108, 0, 0x00, 0)
+#define CVMX_CIU_INTX_EN0(c) CVMX_CIU_ADDR(0x0200, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN0_W1C(c) CVMX_CIU_ADDR(0x2200, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN0_W1S(c) CVMX_CIU_ADDR(0x6200, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN1(c) CVMX_CIU_ADDR(0x0208, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN1_W1C(c) CVMX_CIU_ADDR(0x2208, c, 0x3F, 16)
+#define CVMX_CIU_INTX_EN1_W1S(c) CVMX_CIU_ADDR(0x6208, c, 0x3F, 16)
+#define CVMX_CIU_INTX_SUM0(c) CVMX_CIU_ADDR(0x0000, c, 0x3F, 8)
+#define CVMX_CIU_NMI CVMX_CIU_ADDR(0x0718, 0, 0x00, 0)
+#define CVMX_CIU_PCI_INTA CVMX_CIU_ADDR(0x0750, 0, 0x00, 0)
+#define CVMX_CIU_PP_BIST_STAT CVMX_CIU_ADDR(0x07E0, 0, 0x00, 0)
+#define CVMX_CIU_PP_DBG CVMX_CIU_ADDR(0x0708, 0, 0x00, 0)
+#define CVMX_CIU_PP_RST CVMX_CIU_ADDR(0x0700, 0, 0x00, 0)
+#define CVMX_CIU_QLM0 CVMX_CIU_ADDR(0x0780, 0, 0x00, 0)
+#define CVMX_CIU_QLM1 CVMX_CIU_ADDR(0x0788, 0, 0x00, 0)
+#define CVMX_CIU_QLM_JTGC CVMX_CIU_ADDR(0x0768, 0, 0x00, 0)
+#define CVMX_CIU_QLM_JTGD CVMX_CIU_ADDR(0x0770, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_BIST CVMX_CIU_ADDR(0x0738, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_PRST1 CVMX_CIU_ADDR(0x0758, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_PRST CVMX_CIU_ADDR(0x0748, 0, 0x00, 0)
+#define CVMX_CIU_SOFT_RST CVMX_CIU_ADDR(0x0740, 0, 0x00, 0)
+#define CVMX_CIU_SUM2_PPX_IP4(c) CVMX_CIU_ADDR(0x8C00, c, 0x0F, 8)
+#define CVMX_CIU_TIM_MULTI_CAST CVMX_CIU_ADDR(0xC200, 0, 0x00, 0)
+#define CVMX_CIU_TIMX(c) CVMX_CIU_ADDR(0x0480, c, 0x0F, 8)
+
+static inline uint64_t CVMX_CIU_MBOX_CLRX(unsigned int coreid)
+{
+ if (cvmx_get_octeon_family() == (OCTEON_CN68XX & OCTEON_FAMILY_MASK))
+ return CVMX_CIU_ADDR(0x100100600, coreid, 0x0F, 8);
+ else
+ return CVMX_CIU_ADDR(0x000000680, coreid, 0x0F, 8);
+}
+
+static inline uint64_t CVMX_CIU_MBOX_SETX(unsigned int coreid)
+{
+ if (cvmx_get_octeon_family() == (OCTEON_CN68XX & OCTEON_FAMILY_MASK))
+ return CVMX_CIU_ADDR(0x100100400, coreid, 0x0F, 8);
+ else
+ return CVMX_CIU_ADDR(0x000000600, coreid, 0x0F, 8);
+}
+
+static inline uint64_t CVMX_CIU_PP_POKEX(unsigned int coreid)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_CIU_ADDR(0x100100200, coreid, 0x0F, 8);
+ case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
+ return CVMX_CIU_ADDR(0x000030000, coreid, 0x0F, 8) -
+ 0x60000000000ull;
+ default:
+ return CVMX_CIU_ADDR(0x000000580, coreid, 0x0F, 8);
+ }
+}
+
+static inline uint64_t CVMX_CIU_WDOGX(unsigned int coreid)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_CIU_ADDR(0x100100000, coreid, 0x0F, 8);
+ case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
+ return CVMX_CIU_ADDR(0x000020000, coreid, 0x0F, 8) -
+ 0x60000000000ull;
+ default:
+ return CVMX_CIU_ADDR(0x000000500, coreid, 0x0F, 8);
+ }
+}
+
+
+union cvmx_ciu_qlm {
+ uint64_t u64;
+ struct cvmx_ciu_qlm_s {
+ __BITFIELD_FIELD(uint64_t g2bypass:1,
+ __BITFIELD_FIELD(uint64_t reserved_53_62:10,
+ __BITFIELD_FIELD(uint64_t g2deemph:5,
+ __BITFIELD_FIELD(uint64_t reserved_45_47:3,
+ __BITFIELD_FIELD(uint64_t g2margin:5,
+ __BITFIELD_FIELD(uint64_t reserved_32_39:8,
+ __BITFIELD_FIELD(uint64_t txbypass:1,
+ __BITFIELD_FIELD(uint64_t reserved_21_30:10,
+ __BITFIELD_FIELD(uint64_t txdeemph:5,
+ __BITFIELD_FIELD(uint64_t reserved_13_15:3,
+ __BITFIELD_FIELD(uint64_t txmargin:5,
+ __BITFIELD_FIELD(uint64_t reserved_4_7:4,
+ __BITFIELD_FIELD(uint64_t lane_en:4,
+ ;)))))))))))))
+ } s;
+};
+
+union cvmx_ciu_qlm_jtgc {
+ uint64_t u64;
+ struct cvmx_ciu_qlm_jtgc_s {
+ __BITFIELD_FIELD(uint64_t reserved_17_63:47,
+ __BITFIELD_FIELD(uint64_t bypass_ext:1,
+ __BITFIELD_FIELD(uint64_t reserved_11_15:5,
+ __BITFIELD_FIELD(uint64_t clk_div:3,
+ __BITFIELD_FIELD(uint64_t reserved_7_7:1,
+ __BITFIELD_FIELD(uint64_t mux_sel:3,
+ __BITFIELD_FIELD(uint64_t bypass:4,
+ ;)))))))
+ } s;
+};
+
+union cvmx_ciu_qlm_jtgd {
+ uint64_t u64;
+ struct cvmx_ciu_qlm_jtgd_s {
+ __BITFIELD_FIELD(uint64_t capture:1,
+ __BITFIELD_FIELD(uint64_t shift:1,
+ __BITFIELD_FIELD(uint64_t update:1,
+ __BITFIELD_FIELD(uint64_t reserved_45_60:16,
+ __BITFIELD_FIELD(uint64_t select:5,
+ __BITFIELD_FIELD(uint64_t reserved_37_39:3,
+ __BITFIELD_FIELD(uint64_t shft_cnt:5,
+ __BITFIELD_FIELD(uint64_t shft_reg:32,
+ ;))))))))
+ } s;
+};
+
+union cvmx_ciu_soft_prst {
+ uint64_t u64;
+ struct cvmx_ciu_soft_prst_s {
+ __BITFIELD_FIELD(uint64_t reserved_3_63:61,
+ __BITFIELD_FIELD(uint64_t host64:1,
+ __BITFIELD_FIELD(uint64_t npi:1,
+ __BITFIELD_FIELD(uint64_t soft_prst:1,
+ ;))))
+ } s;
+};
+
+union cvmx_ciu_timx {
+ uint64_t u64;
+ struct cvmx_ciu_timx_s {
+ __BITFIELD_FIELD(uint64_t reserved_37_63:27,
+ __BITFIELD_FIELD(uint64_t one_shot:1,
+ __BITFIELD_FIELD(uint64_t len:36,
+ ;)))
+ } s;
+};
+
+union cvmx_ciu_wdogx {
+ uint64_t u64;
+ struct cvmx_ciu_wdogx_s {
+ __BITFIELD_FIELD(uint64_t reserved_46_63:18,
+ __BITFIELD_FIELD(uint64_t gstopen:1,
+ __BITFIELD_FIELD(uint64_t dstop:1,
+ __BITFIELD_FIELD(uint64_t cnt:24,
+ __BITFIELD_FIELD(uint64_t len:16,
+ __BITFIELD_FIELD(uint64_t state:2,
+ __BITFIELD_FIELD(uint64_t mode:2,
+ ;)))))))
+ } s;
+};
+
+#endif /* __CVMX_CIU_DEFS_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-ciu2-defs.h b/arch/mips/include/asm/octeon/cvmx-ciu2-defs.h
new file mode 100644
index 000000000..5babd88d4
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-ciu2-defs.h
@@ -0,0 +1,48 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_CIU2_DEFS_H__
+#define __CVMX_CIU2_DEFS_H__
+
+#define CVMX_CIU2_ACK_PPX_IP2(block_id) (CVMX_ADD_IO_SEG(0x00010701000C0000ull) + ((block_id) & 31) * 0x200000ull)
+#define CVMX_CIU2_ACK_PPX_IP3(block_id) (CVMX_ADD_IO_SEG(0x00010701000C0200ull) + ((block_id) & 31) * 0x200000ull)
+#define CVMX_CIU2_EN_PPX_IP2_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100092000ull) + ((block_id) & 31) * 0x200000ull)
+#define CVMX_CIU2_EN_PPX_IP2_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100091000ull) + ((block_id) & 31) * 0x200000ull)
+#define CVMX_CIU2_EN_PPX_IP2_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100090000ull) + ((block_id) & 31) * 0x200000ull)
+#define CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B0000ull) + ((block_id) & 31) * 0x200000ull)
+#define CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A0000ull) + ((block_id) & 31) * 0x200000ull)
+#define CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B8200ull) + ((block_id) & 31) * 0x200000ull)
+#define CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A8200ull) + ((block_id) & 31) * 0x200000ull)
+#define CVMX_CIU2_INTR_CIU_READY (CVMX_ADD_IO_SEG(0x0001070100102008ull))
+#define CVMX_CIU2_RAW_PPX_IP2_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100040000ull) + ((block_id) & 31) * 0x200000ull)
+#define CVMX_CIU2_SRC_PPX_IP2_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100082000ull) + ((block_id) & 31) * 0x200000ull)
+#define CVMX_CIU2_SRC_PPX_IP2_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100081000ull) + ((block_id) & 31) * 0x200000ull)
+#define CVMX_CIU2_SRC_PPX_IP2_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100080000ull) + ((block_id) & 31) * 0x200000ull)
+#define CVMX_CIU2_SUM_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x0001070100000000ull) + ((offset) & 31) * 8)
+#define CVMX_CIU2_SUM_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x0001070100000200ull) + ((offset) & 31) * 8)
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-ciu3-defs.h b/arch/mips/include/asm/octeon/cvmx-ciu3-defs.h
new file mode 100644
index 000000000..547f778f5
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-ciu3-defs.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2003-2016 Cavium Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ */
+
+#ifndef __CVMX_CIU3_DEFS_H__
+#define __CVMX_CIU3_DEFS_H__
+
+#define CVMX_CIU3_FUSE CVMX_ADD_IO_SEG(0x00010100000001A0ull)
+#define CVMX_CIU3_BIST CVMX_ADD_IO_SEG(0x00010100000001C0ull)
+#define CVMX_CIU3_CONST CVMX_ADD_IO_SEG(0x0001010000000220ull)
+#define CVMX_CIU3_CTL CVMX_ADD_IO_SEG(0x00010100000000E0ull)
+#define CVMX_CIU3_DESTX_IO_INT(offset) (CVMX_ADD_IO_SEG(0x0001010000210000ull) + ((offset) & 7) * 8)
+#define CVMX_CIU3_DESTX_PP_INT(offset) (CVMX_ADD_IO_SEG(0x0001010000200000ull) + ((offset) & 255) * 8)
+#define CVMX_CIU3_GSTOP CVMX_ADD_IO_SEG(0x0001010000000140ull)
+#define CVMX_CIU3_IDTX_CTL(offset) (CVMX_ADD_IO_SEG(0x0001010000110000ull) + ((offset) & 255) * 8)
+#define CVMX_CIU3_IDTX_IO(offset) (CVMX_ADD_IO_SEG(0x0001010000130000ull) + ((offset) & 255) * 8)
+#define CVMX_CIU3_IDTX_PPX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001010000120000ull) + ((block_id) & 255) * 0x20ull)
+#define CVMX_CIU3_INTR_RAM_ECC_CTL CVMX_ADD_IO_SEG(0x0001010000000260ull)
+#define CVMX_CIU3_INTR_RAM_ECC_ST CVMX_ADD_IO_SEG(0x0001010000000280ull)
+#define CVMX_CIU3_INTR_READY CVMX_ADD_IO_SEG(0x00010100000002A0ull)
+#define CVMX_CIU3_INTR_SLOWDOWN CVMX_ADD_IO_SEG(0x0001010000000240ull)
+#define CVMX_CIU3_ISCX_CTL(offset) (CVMX_ADD_IO_SEG(0x0001010080000000ull) + ((offset) & 1048575) * 8)
+#define CVMX_CIU3_ISCX_W1C(offset) (CVMX_ADD_IO_SEG(0x0001010090000000ull) + ((offset) & 1048575) * 8)
+#define CVMX_CIU3_ISCX_W1S(offset) (CVMX_ADD_IO_SEG(0x00010100A0000000ull) + ((offset) & 1048575) * 8)
+#define CVMX_CIU3_NMI CVMX_ADD_IO_SEG(0x0001010000000160ull)
+#define CVMX_CIU3_SISCX(offset) (CVMX_ADD_IO_SEG(0x0001010000220000ull) + ((offset) & 255) * 8)
+#define CVMX_CIU3_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001010000010000ull) + ((offset) & 15) * 8)
+
+union cvmx_ciu3_bist {
+ uint64_t u64;
+ struct cvmx_ciu3_bist_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t bist : 9;
+#else
+ uint64_t bist : 9;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_const {
+ uint64_t u64;
+ struct cvmx_ciu3_const_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dests_io : 16;
+ uint64_t pintsn : 16;
+ uint64_t dests_pp : 16;
+ uint64_t idt : 16;
+#else
+ uint64_t idt : 16;
+ uint64_t dests_pp : 16;
+ uint64_t pintsn : 16;
+ uint64_t dests_io : 16;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_ctl {
+ uint64_t u64;
+ struct cvmx_ciu3_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t mcd_sel : 2;
+ uint64_t iscmem_le : 1;
+ uint64_t seq_dis : 1;
+ uint64_t cclk_dis : 1;
+#else
+ uint64_t cclk_dis : 1;
+ uint64_t seq_dis : 1;
+ uint64_t iscmem_le : 1;
+ uint64_t mcd_sel : 2;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_destx_io_int {
+ uint64_t u64;
+ struct cvmx_ciu3_destx_io_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63 : 12;
+ uint64_t intsn : 20;
+ uint64_t reserved_10_31 : 22;
+ uint64_t intidt : 8;
+ uint64_t newint : 1;
+ uint64_t intr : 1;
+#else
+ uint64_t intr : 1;
+ uint64_t newint : 1;
+ uint64_t intidt : 8;
+ uint64_t reserved_10_31 : 22;
+ uint64_t intsn : 20;
+ uint64_t reserved_52_63 : 12;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_destx_pp_int {
+ uint64_t u64;
+ struct cvmx_ciu3_destx_pp_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63 : 12;
+ uint64_t intsn : 20;
+ uint64_t reserved_10_31 : 22;
+ uint64_t intidt : 8;
+ uint64_t newint : 1;
+ uint64_t intr : 1;
+#else
+ uint64_t intr : 1;
+ uint64_t newint : 1;
+ uint64_t intidt : 8;
+ uint64_t reserved_10_31 : 22;
+ uint64_t intsn : 20;
+ uint64_t reserved_52_63 : 12;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_gstop {
+ uint64_t u64;
+ struct cvmx_ciu3_gstop_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t gstop : 1;
+#else
+ uint64_t gstop : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_idtx_ctl {
+ uint64_t u64;
+ struct cvmx_ciu3_idtx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63 : 12;
+ uint64_t intsn : 20;
+ uint64_t reserved_4_31 : 28;
+ uint64_t intr : 1;
+ uint64_t newint : 1;
+ uint64_t ip_num : 2;
+#else
+ uint64_t ip_num : 2;
+ uint64_t newint : 1;
+ uint64_t intr : 1;
+ uint64_t reserved_4_31 : 28;
+ uint64_t intsn : 20;
+ uint64_t reserved_52_63 : 12;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_idtx_io {
+ uint64_t u64;
+ struct cvmx_ciu3_idtx_io_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t io : 5;
+#else
+ uint64_t io : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_idtx_ppx {
+ uint64_t u64;
+ struct cvmx_ciu3_idtx_ppx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t pp : 48;
+#else
+ uint64_t pp : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_intr_ram_ecc_ctl {
+ uint64_t u64;
+ struct cvmx_ciu3_intr_ram_ecc_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t flip_synd : 2;
+ uint64_t ecc_ena : 1;
+#else
+ uint64_t ecc_ena : 1;
+ uint64_t flip_synd : 2;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_intr_ram_ecc_st {
+ uint64_t u64;
+ struct cvmx_ciu3_intr_ram_ecc_st_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63 : 12;
+ uint64_t addr : 20;
+ uint64_t reserved_6_31 : 26;
+ uint64_t sisc_dbe : 1;
+ uint64_t sisc_sbe : 1;
+ uint64_t idt_dbe : 1;
+ uint64_t idt_sbe : 1;
+ uint64_t isc_dbe : 1;
+ uint64_t isc_sbe : 1;
+#else
+ uint64_t isc_sbe : 1;
+ uint64_t isc_dbe : 1;
+ uint64_t idt_sbe : 1;
+ uint64_t idt_dbe : 1;
+ uint64_t sisc_sbe : 1;
+ uint64_t sisc_dbe : 1;
+ uint64_t reserved_6_31 : 26;
+ uint64_t addr : 20;
+ uint64_t reserved_52_63 : 12;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_intr_ready {
+ uint64_t u64;
+ struct cvmx_ciu3_intr_ready_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63 : 18;
+ uint64_t index : 14;
+ uint64_t reserved_1_31 : 31;
+ uint64_t ready : 1;
+#else
+ uint64_t ready : 1;
+ uint64_t reserved_1_31 : 31;
+ uint64_t index : 14;
+ uint64_t reserved_46_63 : 18;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_intr_slowdown {
+ uint64_t u64;
+ struct cvmx_ciu3_intr_slowdown_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t ctl : 3;
+#else
+ uint64_t ctl : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_iscx_ctl {
+ uint64_t u64;
+ struct cvmx_ciu3_iscx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t idt : 8;
+ uint64_t imp : 1;
+ uint64_t reserved_2_14 : 13;
+ uint64_t en : 1;
+ uint64_t raw : 1;
+#else
+ uint64_t raw : 1;
+ uint64_t en : 1;
+ uint64_t reserved_2_14 : 13;
+ uint64_t imp : 1;
+ uint64_t idt : 8;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_iscx_w1c {
+ uint64_t u64;
+ struct cvmx_ciu3_iscx_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t en : 1;
+ uint64_t raw : 1;
+#else
+ uint64_t raw : 1;
+ uint64_t en : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_iscx_w1s {
+ uint64_t u64;
+ struct cvmx_ciu3_iscx_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t en : 1;
+ uint64_t raw : 1;
+#else
+ uint64_t raw : 1;
+ uint64_t en : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_nmi {
+ uint64_t u64;
+ struct cvmx_ciu3_nmi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t nmi : 48;
+#else
+ uint64_t nmi : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_siscx {
+ uint64_t u64;
+ struct cvmx_ciu3_siscx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t en : 64;
+#else
+ uint64_t en : 64;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_timx {
+ uint64_t u64;
+ struct cvmx_ciu3_timx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63 : 27;
+ uint64_t one_shot : 1;
+ uint64_t len : 36;
+#else
+ uint64_t len : 36;
+ uint64_t one_shot : 1;
+ uint64_t reserved_37_63 : 27;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
new file mode 100644
index 000000000..a07a36f7d
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
@@ -0,0 +1,619 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Support functions for managing command queues used for
+ * various hardware blocks.
+ *
+ * The common command queue infrastructure abstracts out the
+ * software necessary for adding to Octeon's chained queue
+ * structures. These structures are used for commands to the
+ * PKO, ZIP, DFA, RAID, and DMA engine blocks. Although each
+ * hardware unit takes commands and CSRs of different types,
+ * they all use basic linked command buffers to store the
+ * pending request. In general, users of the CVMX API don't
+ * call cvmx-cmd-queue functions directly. Instead the hardware
+ * unit specific wrapper should be used. The wrappers perform
+ * unit specific validation and CSR writes to submit the
+ * commands.
+ *
+ * Even though most software will never directly interact with
+ * cvmx-cmd-queue, knowledge of its internal working can help
+ * in diagnosing performance problems and help with debugging.
+ *
+ * Command queue pointers are stored in a global named block
+ * called "cvmx_cmd_queues". Except for the PKO queues, each
+ * hardware queue is stored in its own cache line to reduce SMP
+ * contention on spin locks. The PKO queues are stored such that
+ * every 16th queue is next to each other in memory. This scheme
+ * allows for queues being in separate cache lines when there
+ * are low number of queues per port. With 16 queues per port,
+ * the first queue for each port is in the same cache area. The
+ * second queues for each port are in another area, etc. This
+ * allows software to implement very efficient lockless PKO with
+ * 16 queues per port using a minimum of cache lines per core.
+ * All queues for a given core will be isolated in the same
+ * cache area.
+ *
+ * In addition to the memory pointer layout, cvmx-cmd-queue
+ * provides an optimized fair ll/sc locking mechanism for the
+ * queues. The lock uses a "ticket / now serving" model to
+ * maintain fair order on contended locks. In addition, it uses
+ * predicted locking time to limit cache contention. When a core
+ * know it must wait in line for a lock, it spins on the
+ * internal cycle counter to completely eliminate any causes of
+ * bus traffic.
+ *
+ */
+
+#ifndef __CVMX_CMD_QUEUE_H__
+#define __CVMX_CMD_QUEUE_H__
+
+#include <linux/prefetch.h>
+
+#include <asm/compiler.h>
+
+#include <asm/octeon/cvmx-fpa.h>
+/**
+ * By default we disable the max depth support. Most programs
+ * don't use it and it slows down the command queue processing
+ * significantly.
+ */
+#ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH
+#define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0
+#endif
+
+/**
+ * Enumeration representing all hardware blocks that use command
+ * queues. Each hardware block has up to 65536 sub identifiers for
+ * multiple command queues. Not all chips support all hardware
+ * units.
+ */
+typedef enum {
+ CVMX_CMD_QUEUE_PKO_BASE = 0x00000,
+
+#define CVMX_CMD_QUEUE_PKO(queue) \
+ ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff&(queue))))
+
+ CVMX_CMD_QUEUE_ZIP = 0x10000,
+ CVMX_CMD_QUEUE_DFA = 0x20000,
+ CVMX_CMD_QUEUE_RAID = 0x30000,
+ CVMX_CMD_QUEUE_DMA_BASE = 0x40000,
+
+#define CVMX_CMD_QUEUE_DMA(queue) \
+ ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff&(queue))))
+
+ CVMX_CMD_QUEUE_END = 0x50000,
+} cvmx_cmd_queue_id_t;
+
+/**
+ * Command write operations can fail if the command queue needs
+ * a new buffer and the associated FPA pool is empty. It can also
+ * fail if the number of queued command words reaches the maximum
+ * set at initialization.
+ */
+typedef enum {
+ CVMX_CMD_QUEUE_SUCCESS = 0,
+ CVMX_CMD_QUEUE_NO_MEMORY = -1,
+ CVMX_CMD_QUEUE_FULL = -2,
+ CVMX_CMD_QUEUE_INVALID_PARAM = -3,
+ CVMX_CMD_QUEUE_ALREADY_SETUP = -4,
+} cvmx_cmd_queue_result_t;
+
+typedef struct {
+ /* You have lock when this is your ticket */
+ uint8_t now_serving;
+ uint64_t unused1:24;
+ /* Maximum outstanding command words */
+ uint32_t max_depth;
+ /* FPA pool buffers come from */
+ uint64_t fpa_pool:3;
+ /* Top of command buffer pointer shifted 7 */
+ uint64_t base_ptr_div128:29;
+ uint64_t unused2:6;
+ /* FPA buffer size in 64bit words minus 1 */
+ uint64_t pool_size_m1:13;
+ /* Number of commands already used in buffer */
+ uint64_t index:13;
+} __cvmx_cmd_queue_state_t;
+
+/**
+ * This structure contains the global state of all command queues.
+ * It is stored in a bootmem named block and shared by all
+ * applications running on Octeon. Tickets are stored in a differnet
+ * cache line that queue information to reduce the contention on the
+ * ll/sc used to get a ticket. If this is not the case, the update
+ * of queue state causes the ll/sc to fail quite often.
+ */
+typedef struct {
+ uint64_t ticket[(CVMX_CMD_QUEUE_END >> 16) * 256];
+ __cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END >> 16) * 256];
+} __cvmx_cmd_queue_all_state_t;
+
+/**
+ * Initialize a command queue for use. The initial FPA buffer is
+ * allocated and the hardware unit is configured to point to the
+ * new command queue.
+ *
+ * @queue_id: Hardware command queue to initialize.
+ * @max_depth: Maximum outstanding commands that can be queued.
+ * @fpa_pool: FPA pool the command queues should come from.
+ * @pool_size: Size of each buffer in the FPA pool (bytes)
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
+ int max_depth, int fpa_pool,
+ int pool_size);
+
+/**
+ * Shutdown a queue a free it's command buffers to the FPA. The
+ * hardware connected to the queue must be stopped before this
+ * function is called.
+ *
+ * @queue_id: Queue to shutdown
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id);
+
+/**
+ * Return the number of command words pending in the queue. This
+ * function may be relatively slow for some hardware units.
+ *
+ * @queue_id: Hardware command queue to query
+ *
+ * Returns Number of outstanding commands
+ */
+int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id);
+
+/**
+ * Return the command buffer to be written to. The purpose of this
+ * function is to allow CVMX routine access t othe low level buffer
+ * for initial hardware setup. User applications should not call this
+ * function directly.
+ *
+ * @queue_id: Command queue to query
+ *
+ * Returns Command buffer or NULL on failure
+ */
+void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id);
+
+/**
+ * Get the index into the state arrays for the supplied queue id.
+ *
+ * @queue_id: Queue ID to get an index for
+ *
+ * Returns Index into the state arrays
+ */
+static inline int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id)
+{
+ /*
+ * Warning: This code currently only works with devices that
+ * have 256 queues or less. Devices with more than 16 queues
+ * are laid out in memory to allow cores quick access to
+ * every 16th queue. This reduces cache thrashing when you are
+ * running 16 queues per port to support lockless operation.
+ */
+ int unit = queue_id >> 16;
+ int q = (queue_id >> 4) & 0xf;
+ int core = queue_id & 0xf;
+ return unit * 256 + core * 16 + q;
+}
+
+/**
+ * Lock the supplied queue so nobody else is updating it at the same
+ * time as us.
+ *
+ * @queue_id: Queue ID to lock
+ * @qptr: Pointer to the queue's global state
+ */
+static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
+ __cvmx_cmd_queue_state_t *qptr)
+{
+ extern __cvmx_cmd_queue_all_state_t
+ *__cvmx_cmd_queue_state_ptr;
+ int tmp;
+ int my_ticket;
+ prefetch(qptr);
+ asm volatile (
+ ".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ /* Atomic add one to ticket_ptr */
+ "ll %[my_ticket], %[ticket_ptr]\n"
+ /* and store the original value */
+ "li %[ticket], 1\n"
+ /* in my_ticket */
+ "baddu %[ticket], %[my_ticket]\n"
+ "sc %[ticket], %[ticket_ptr]\n"
+ "beqz %[ticket], 1b\n"
+ " nop\n"
+ /* Load the current now_serving ticket */
+ "lbu %[ticket], %[now_serving]\n"
+ "2:\n"
+ /* Jump out if now_serving == my_ticket */
+ "beq %[ticket], %[my_ticket], 4f\n"
+ /* Find out how many tickets are in front of me */
+ " subu %[ticket], %[my_ticket], %[ticket]\n"
+ /* Use tickets in front of me minus one to delay */
+ "subu %[ticket], 1\n"
+ /* Delay will be ((tickets in front)-1)*32 loops */
+ "cins %[ticket], %[ticket], 5, 7\n"
+ "3:\n"
+ /* Loop here until our ticket might be up */
+ "bnez %[ticket], 3b\n"
+ " subu %[ticket], 1\n"
+ /* Jump back up to check out ticket again */
+ "b 2b\n"
+ /* Load the current now_serving ticket */
+ " lbu %[ticket], %[now_serving]\n"
+ "4:\n"
+ ".set pop\n" :
+ [ticket_ptr] "=" GCC_OFF_SMALL_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
+ [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
+ [my_ticket] "=r"(my_ticket)
+ );
+}
+
+/**
+ * Unlock the queue, flushing all writes.
+ *
+ * @qptr: Queue to unlock
+ */
+static inline void __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_state_t *qptr)
+{
+ qptr->now_serving++;
+ CVMX_SYNCWS;
+}
+
+/**
+ * Get the queue state structure for the given queue id
+ *
+ * @queue_id: Queue id to get
+ *
+ * Returns Queue structure or NULL on failure
+ */
+static inline __cvmx_cmd_queue_state_t
+ *__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id)
+{
+ extern __cvmx_cmd_queue_all_state_t
+ *__cvmx_cmd_queue_state_ptr;
+ return &__cvmx_cmd_queue_state_ptr->
+ state[__cvmx_cmd_queue_get_index(queue_id)];
+}
+
+/**
+ * Write an arbitrary number of command words to a command queue.
+ * This is a generic function; the fixed number of command word
+ * functions yield higher performance.
+ *
+ * @queue_id: Hardware command queue to write to
+ * @use_locking:
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
+ * @cmd_count: Number of command words to write
+ * @cmds: Array of commands to write
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t
+ queue_id,
+ int use_locking,
+ int cmd_count,
+ uint64_t *cmds)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+
+ /* Make sure nobody else is updating the same queue */
+ if (likely(use_locking))
+ __cvmx_cmd_queue_lock(queue_id, qptr);
+
+ /*
+ * If a max queue length was specified then make sure we don't
+ * exceed it. If any part of the command would be below the
+ * limit we allow it.
+ */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
+ if (unlikely
+ (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_FULL;
+ }
+ }
+
+ /*
+ * Normally there is plenty of room in the current buffer for
+ * the command.
+ */
+ if (likely(qptr->index + cmd_count < qptr->pool_size_m1)) {
+ uint64_t *ptr =
+ (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
+ base_ptr_div128 << 7);
+ ptr += qptr->index;
+ qptr->index += cmd_count;
+ while (cmd_count--)
+ *ptr++ = *cmds++;
+ } else {
+ uint64_t *ptr;
+ int count;
+ /*
+ * We need a new command buffer. Fail if there isn't
+ * one available.
+ */
+ uint64_t *new_buffer =
+ (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
+ if (unlikely(new_buffer == NULL)) {
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ ptr =
+ (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
+ base_ptr_div128 << 7);
+ /*
+ * Figure out how many command words will fit in this
+ * buffer. One location will be needed for the next
+ * buffer pointer.
+ */
+ count = qptr->pool_size_m1 - qptr->index;
+ ptr += qptr->index;
+ cmd_count -= count;
+ while (count--)
+ *ptr++ = *cmds++;
+ *ptr = cvmx_ptr_to_phys(new_buffer);
+ /*
+ * The current buffer is full and has a link to the
+ * next buffer. Time to write the rest of the commands
+ * into the new buffer.
+ */
+ qptr->base_ptr_div128 = *ptr >> 7;
+ qptr->index = cmd_count;
+ ptr = new_buffer;
+ while (cmd_count--)
+ *ptr++ = *cmds++;
+ }
+
+ /* All updates are complete. Release the lock and return */
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+/**
+ * Simple function to write two command words to a command
+ * queue.
+ *
+ * @queue_id: Hardware command queue to write to
+ * @use_locking:
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
+ * @cmd1: Command
+ * @cmd2: Command
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t
+ queue_id,
+ int use_locking,
+ uint64_t cmd1,
+ uint64_t cmd2)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+
+ /* Make sure nobody else is updating the same queue */
+ if (likely(use_locking))
+ __cvmx_cmd_queue_lock(queue_id, qptr);
+
+ /*
+ * If a max queue length was specified then make sure we don't
+ * exceed it. If any part of the command would be below the
+ * limit we allow it.
+ */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
+ if (unlikely
+ (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_FULL;
+ }
+ }
+
+ /*
+ * Normally there is plenty of room in the current buffer for
+ * the command.
+ */
+ if (likely(qptr->index + 2 < qptr->pool_size_m1)) {
+ uint64_t *ptr =
+ (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
+ base_ptr_div128 << 7);
+ ptr += qptr->index;
+ qptr->index += 2;
+ ptr[0] = cmd1;
+ ptr[1] = cmd2;
+ } else {
+ uint64_t *ptr;
+ /*
+ * Figure out how many command words will fit in this
+ * buffer. One location will be needed for the next
+ * buffer pointer.
+ */
+ int count = qptr->pool_size_m1 - qptr->index;
+ /*
+ * We need a new command buffer. Fail if there isn't
+ * one available.
+ */
+ uint64_t *new_buffer =
+ (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
+ if (unlikely(new_buffer == NULL)) {
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ count--;
+ ptr =
+ (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
+ base_ptr_div128 << 7);
+ ptr += qptr->index;
+ *ptr++ = cmd1;
+ if (likely(count))
+ *ptr++ = cmd2;
+ *ptr = cvmx_ptr_to_phys(new_buffer);
+ /*
+ * The current buffer is full and has a link to the
+ * next buffer. Time to write the rest of the commands
+ * into the new buffer.
+ */
+ qptr->base_ptr_div128 = *ptr >> 7;
+ qptr->index = 0;
+ if (unlikely(count == 0)) {
+ qptr->index = 1;
+ new_buffer[0] = cmd2;
+ }
+ }
+
+ /* All updates are complete. Release the lock and return */
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+/**
+ * Simple function to write three command words to a command
+ * queue.
+ *
+ * @queue_id: Hardware command queue to write to
+ * @use_locking:
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
+ * @cmd1: Command
+ * @cmd2: Command
+ * @cmd3: Command
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t
+ queue_id,
+ int use_locking,
+ uint64_t cmd1,
+ uint64_t cmd2,
+ uint64_t cmd3)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+
+ /* Make sure nobody else is updating the same queue */
+ if (likely(use_locking))
+ __cvmx_cmd_queue_lock(queue_id, qptr);
+
+ /*
+ * If a max queue length was specified then make sure we don't
+ * exceed it. If any part of the command would be below the
+ * limit we allow it.
+ */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
+ if (unlikely
+ (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_FULL;
+ }
+ }
+
+ /*
+ * Normally there is plenty of room in the current buffer for
+ * the command.
+ */
+ if (likely(qptr->index + 3 < qptr->pool_size_m1)) {
+ uint64_t *ptr =
+ (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
+ base_ptr_div128 << 7);
+ ptr += qptr->index;
+ qptr->index += 3;
+ ptr[0] = cmd1;
+ ptr[1] = cmd2;
+ ptr[2] = cmd3;
+ } else {
+ uint64_t *ptr;
+ /*
+ * Figure out how many command words will fit in this
+ * buffer. One location will be needed for the next
+ * buffer pointer
+ */
+ int count = qptr->pool_size_m1 - qptr->index;
+ /*
+ * We need a new command buffer. Fail if there isn't
+ * one available
+ */
+ uint64_t *new_buffer =
+ (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
+ if (unlikely(new_buffer == NULL)) {
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ count--;
+ ptr =
+ (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
+ base_ptr_div128 << 7);
+ ptr += qptr->index;
+ *ptr++ = cmd1;
+ if (count) {
+ *ptr++ = cmd2;
+ if (count > 1)
+ *ptr++ = cmd3;
+ }
+ *ptr = cvmx_ptr_to_phys(new_buffer);
+ /*
+ * The current buffer is full and has a link to the
+ * next buffer. Time to write the rest of the commands
+ * into the new buffer.
+ */
+ qptr->base_ptr_div128 = *ptr >> 7;
+ qptr->index = 0;
+ ptr = new_buffer;
+ if (count == 0) {
+ *ptr++ = cmd2;
+ qptr->index++;
+ }
+ if (count < 2) {
+ *ptr++ = cmd3;
+ qptr->index++;
+ }
+ }
+
+ /* All updates are complete. Release the lock and return */
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+#endif /* __CVMX_CMD_QUEUE_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-config.h b/arch/mips/include/asm/octeon/cvmx-config.h
new file mode 100644
index 000000000..a8c358c02
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-config.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __CVMX_CONFIG_H__
+#define __CVMX_CONFIG_H__
+
+/************************* Config Specific Defines ************************/
+#define CVMX_LLM_NUM_PORTS 1
+#define CVMX_NULL_POINTER_PROTECT 1
+#define CVMX_ENABLE_DEBUG_PRINTS 1
+/* PKO queues per port for interface 0 (ports 0-15) */
+#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 1
+/* PKO queues per port for interface 1 (ports 16-31) */
+#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 1
+/* Limit on the number of PKO ports enabled for interface 0 */
+#define CVMX_PKO_MAX_PORTS_INTERFACE0 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0
+/* Limit on the number of PKO ports enabled for interface 1 */
+#define CVMX_PKO_MAX_PORTS_INTERFACE1 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1
+/* PKO queues per port for PCI (ports 32-35) */
+#define CVMX_PKO_QUEUES_PER_PORT_PCI 1
+/* PKO queues per port for Loop devices (ports 36-39) */
+#define CVMX_PKO_QUEUES_PER_PORT_LOOP 1
+
+/************************* FPA allocation *********************************/
+/* Pool sizes in bytes, must be multiple of a cache line */
+#define CVMX_FPA_POOL_0_SIZE (16 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_1_SIZE (1 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_2_SIZE (8 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_3_SIZE (0 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_4_SIZE (0 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_5_SIZE (0 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_6_SIZE (0 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_7_SIZE (0 * CVMX_CACHE_LINE_SIZE)
+
+/* Pools in use */
+/* Packet buffers */
+#define CVMX_FPA_PACKET_POOL (0)
+#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
+/* Work queue entries */
+#define CVMX_FPA_WQE_POOL (1)
+#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
+/* PKO queue command buffers */
+#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
+#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE CVMX_FPA_POOL_2_SIZE
+
+/************************* FAU allocation ********************************/
+/* The fetch and add registers are allocated here. They are arranged
+ * in order of descending size so that all alignment constraints are
+ * automatically met. The enums are linked so that the following enum
+ * continues allocating where the previous one left off, so the
+ * numbering within each enum always starts with zero. The macros
+ * take care of the address increment size, so the values entered
+ * always increase by 1. FAU registers are accessed with byte
+ * addresses.
+ */
+
+#define CVMX_FAU_REG_64_ADDR(x) ((x << 3) + CVMX_FAU_REG_64_START)
+typedef enum {
+ CVMX_FAU_REG_64_START = 0,
+ CVMX_FAU_REG_64_END = CVMX_FAU_REG_64_ADDR(0),
+} cvmx_fau_reg_64_t;
+
+#define CVMX_FAU_REG_32_ADDR(x) ((x << 2) + CVMX_FAU_REG_32_START)
+typedef enum {
+ CVMX_FAU_REG_32_START = CVMX_FAU_REG_64_END,
+ CVMX_FAU_REG_32_END = CVMX_FAU_REG_32_ADDR(0),
+} cvmx_fau_reg_32_t;
+
+#define CVMX_FAU_REG_16_ADDR(x) ((x << 1) + CVMX_FAU_REG_16_START)
+typedef enum {
+ CVMX_FAU_REG_16_START = CVMX_FAU_REG_32_END,
+ CVMX_FAU_REG_16_END = CVMX_FAU_REG_16_ADDR(0),
+} cvmx_fau_reg_16_t;
+
+#define CVMX_FAU_REG_8_ADDR(x) ((x) + CVMX_FAU_REG_8_START)
+typedef enum {
+ CVMX_FAU_REG_8_START = CVMX_FAU_REG_16_END,
+ CVMX_FAU_REG_8_END = CVMX_FAU_REG_8_ADDR(0),
+} cvmx_fau_reg_8_t;
+
+/*
+ * The name CVMX_FAU_REG_AVAIL_BASE is provided to indicate the first
+ * available FAU address that is not allocated in cvmx-config.h. This
+ * is 64 bit aligned.
+ */
+#define CVMX_FAU_REG_AVAIL_BASE ((CVMX_FAU_REG_8_END + 0x7) & (~0x7ULL))
+#define CVMX_FAU_REG_END (2048)
+
+/********************** scratch memory allocation *************************/
+/* Scratchpad memory allocation. Note that these are byte memory
+ * addresses. Some uses of scratchpad (IOBDMA for example) require
+ * the use of 8-byte aligned addresses, so proper alignment needs to
+ * be taken into account.
+ */
+/* Generic scratch iobdma area */
+#define CVMX_SCR_SCRATCH (0)
+/* First location available after cvmx-config.h allocated region. */
+#define CVMX_SCR_REG_AVAIL_BASE (8)
+
+/*
+ * CVMX_HELPER_FIRST_MBUFF_SKIP is the number of bytes to reserve
+ * before the beginning of the packet. If necessary, override the
+ * default here. See the IPD section of the hardware manual for MBUFF
+ * SKIP details.
+ */
+#define CVMX_HELPER_FIRST_MBUFF_SKIP 184
+
+/*
+ * CVMX_HELPER_NOT_FIRST_MBUFF_SKIP is the number of bytes to reserve
+ * in each chained packet element. If necessary, override the default
+ * here.
+ */
+#define CVMX_HELPER_NOT_FIRST_MBUFF_SKIP 0
+
+/*
+ * CVMX_HELPER_ENABLE_BACK_PRESSURE controls whether back pressure is
+ * enabled for all input ports. This controls if IPD sends
+ * backpressure to all ports if Octeon's FPA pools don't have enough
+ * packet or work queue entries. Even when this is off, it is still
+ * possible to get backpressure from individual hardware ports. When
+ * configuring backpressure, also check
+ * CVMX_HELPER_DISABLE_*_BACKPRESSURE below. If necessary, override
+ * the default here.
+ */
+#define CVMX_HELPER_ENABLE_BACK_PRESSURE 1
+
+/*
+ * CVMX_HELPER_ENABLE_IPD controls if the IPD is enabled in the helper
+ * function. Once it is enabled the hardware starts accepting
+ * packets. You might want to skip the IPD enable if configuration
+ * changes are need from the default helper setup. If necessary,
+ * override the default here.
+ */
+#define CVMX_HELPER_ENABLE_IPD 0
+
+/*
+ * CVMX_HELPER_INPUT_TAG_TYPE selects the type of tag that the IPD assigns
+ * to incoming packets.
+ */
+#define CVMX_HELPER_INPUT_TAG_TYPE CVMX_POW_TAG_TYPE_ORDERED
+
+#define CVMX_ENABLE_PARAMETER_CHECKING 0
+
+/*
+ * The following select which fields are used by the PIP to generate
+ * the tag on INPUT
+ * 0: don't include
+ * 1: include
+ */
+#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_DST_IP 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_DST_IP 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL 0
+#define CVMX_HELPER_INPUT_TAG_INPUT_PORT 1
+
+/* Select skip mode for input ports */
+#define CVMX_HELPER_INPUT_PORT_SKIP_MODE CVMX_PIP_PORT_CFG_MODE_SKIPL2
+
+/*
+ * Force backpressure to be disabled. This overrides all other
+ * backpressure configuration.
+ */
+#define CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE 0
+
+#endif /* __CVMX_CONFIG_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-coremask.h b/arch/mips/include/asm/octeon/cvmx-coremask.h
new file mode 100644
index 000000000..097dc096d
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-coremask.h
@@ -0,0 +1,89 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2016 Cavium Inc. (support@cavium.com).
+ *
+ */
+
+/*
+ * Module to support operations on bitmap of cores. Coremask can be used to
+ * select a specific core, a group of cores, or all available cores, for
+ * initialization and differentiation of roles within a single shared binary
+ * executable image.
+ *
+ * The core numbers used in this file are the same value as what is found in
+ * the COP0_EBASE register and the rdhwr 0 instruction.
+ *
+ * For the CN78XX and other multi-node environments the core numbers are not
+ * contiguous. The core numbers for the CN78XX are as follows:
+ *
+ * Node 0: Cores 0 - 47
+ * Node 1: Cores 128 - 175
+ * Node 2: Cores 256 - 303
+ * Node 3: Cores 384 - 431
+ *
+ */
+
+#ifndef __CVMX_COREMASK_H__
+#define __CVMX_COREMASK_H__
+
+#define CVMX_MIPS_MAX_CORES 1024
+/* bits per holder */
+#define CVMX_COREMASK_ELTSZ 64
+
+/* cvmx_coremask_t's size in u64 */
+#define CVMX_COREMASK_BMPSZ (CVMX_MIPS_MAX_CORES / CVMX_COREMASK_ELTSZ)
+
+
+/* cvmx_coremask_t */
+struct cvmx_coremask {
+ u64 coremask_bitmap[CVMX_COREMASK_BMPSZ];
+};
+
+/*
+ * Is ``core'' set in the coremask?
+ */
+static inline bool cvmx_coremask_is_core_set(const struct cvmx_coremask *pcm,
+ int core)
+{
+ int n, i;
+
+ n = core % CVMX_COREMASK_ELTSZ;
+ i = core / CVMX_COREMASK_ELTSZ;
+
+ return (pcm->coremask_bitmap[i] & ((u64)1 << n)) != 0;
+}
+
+/*
+ * Make a copy of a coremask
+ */
+static inline void cvmx_coremask_copy(struct cvmx_coremask *dest,
+ const struct cvmx_coremask *src)
+{
+ memcpy(dest, src, sizeof(*dest));
+}
+
+/*
+ * Set the lower 64-bit of the coremask.
+ */
+static inline void cvmx_coremask_set64(struct cvmx_coremask *pcm,
+ uint64_t coremask_64)
+{
+ pcm->coremask_bitmap[0] = coremask_64;
+}
+
+/*
+ * Clear ``core'' from the coremask.
+ */
+static inline void cvmx_coremask_clear_core(struct cvmx_coremask *pcm, int core)
+{
+ int n, i;
+
+ n = core % CVMX_COREMASK_ELTSZ;
+ i = core / CVMX_COREMASK_ELTSZ;
+ pcm->coremask_bitmap[i] &= ~(1ull << n);
+}
+
+#endif /* __CVMX_COREMASK_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-dbg-defs.h b/arch/mips/include/asm/octeon/cvmx-dbg-defs.h
new file mode 100644
index 000000000..828d07d87
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-dbg-defs.h
@@ -0,0 +1,101 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_DBG_DEFS_H__
+#define __CVMX_DBG_DEFS_H__
+
+#define CVMX_DBG_DATA (CVMX_ADD_IO_SEG(0x00011F00000001E8ull))
+
+union cvmx_dbg_data {
+ uint64_t u64;
+ struct cvmx_dbg_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63:41;
+ uint64_t c_mul:5;
+ uint64_t dsel_ext:1;
+ uint64_t data:17;
+#else
+ uint64_t data:17;
+ uint64_t dsel_ext:1;
+ uint64_t c_mul:5;
+ uint64_t reserved_23_63:41;
+#endif
+ } s;
+ struct cvmx_dbg_data_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63:33;
+ uint64_t pll_mul:3;
+ uint64_t reserved_23_27:5;
+ uint64_t c_mul:5;
+ uint64_t dsel_ext:1;
+ uint64_t data:17;
+#else
+ uint64_t data:17;
+ uint64_t dsel_ext:1;
+ uint64_t c_mul:5;
+ uint64_t reserved_23_27:5;
+ uint64_t pll_mul:3;
+ uint64_t reserved_31_63:33;
+#endif
+ } cn30xx;
+ struct cvmx_dbg_data_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t d_mul:4;
+ uint64_t dclk_mul2:1;
+ uint64_t cclk_div2:1;
+ uint64_t c_mul:5;
+ uint64_t dsel_ext:1;
+ uint64_t data:17;
+#else
+ uint64_t data:17;
+ uint64_t dsel_ext:1;
+ uint64_t c_mul:5;
+ uint64_t cclk_div2:1;
+ uint64_t dclk_mul2:1;
+ uint64_t d_mul:4;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn38xx;
+ struct cvmx_dbg_data_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t rem:6;
+ uint64_t c_mul:5;
+ uint64_t dsel_ext:1;
+ uint64_t data:17;
+#else
+ uint64_t data:17;
+ uint64_t dsel_ext:1;
+ uint64_t c_mul:5;
+ uint64_t rem:6;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn58xx;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-dpi-defs.h b/arch/mips/include/asm/octeon/cvmx-dpi-defs.h
new file mode 100644
index 000000000..e8613e1f6
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-dpi-defs.h
@@ -0,0 +1,874 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_DPI_DEFS_H__
+#define __CVMX_DPI_DEFS_H__
+
+#define CVMX_DPI_BIST_STATUS (CVMX_ADD_IO_SEG(0x0001DF0000000000ull))
+#define CVMX_DPI_CTL (CVMX_ADD_IO_SEG(0x0001DF0000000040ull))
+#define CVMX_DPI_DMAX_COUNTS(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000300ull) + ((offset) & 7) * 8)
+#define CVMX_DPI_DMAX_DBELL(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000200ull) + ((offset) & 7) * 8)
+#define CVMX_DPI_DMAX_ERR_RSP_STATUS(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000A80ull) + ((offset) & 7) * 8)
+#define CVMX_DPI_DMAX_IBUFF_SADDR(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000280ull) + ((offset) & 7) * 8)
+#define CVMX_DPI_DMAX_IFLIGHT(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000A00ull) + ((offset) & 7) * 8)
+#define CVMX_DPI_DMAX_NADDR(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000380ull) + ((offset) & 7) * 8)
+#define CVMX_DPI_DMAX_REQBNK0(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000400ull) + ((offset) & 7) * 8)
+#define CVMX_DPI_DMAX_REQBNK1(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000480ull) + ((offset) & 7) * 8)
+#define CVMX_DPI_DMA_CONTROL (CVMX_ADD_IO_SEG(0x0001DF0000000048ull))
+#define CVMX_DPI_DMA_ENGX_EN(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000080ull) + ((offset) & 7) * 8)
+#define CVMX_DPI_DMA_PPX_CNT(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000B00ull) + ((offset) & 31) * 8)
+#define CVMX_DPI_ENGX_BUF(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000880ull) + ((offset) & 7) * 8)
+#define CVMX_DPI_INFO_REG (CVMX_ADD_IO_SEG(0x0001DF0000000980ull))
+#define CVMX_DPI_INT_EN (CVMX_ADD_IO_SEG(0x0001DF0000000010ull))
+#define CVMX_DPI_INT_REG (CVMX_ADD_IO_SEG(0x0001DF0000000008ull))
+#define CVMX_DPI_NCBX_CFG(block_id) (CVMX_ADD_IO_SEG(0x0001DF0000000800ull))
+#define CVMX_DPI_PINT_INFO (CVMX_ADD_IO_SEG(0x0001DF0000000830ull))
+#define CVMX_DPI_PKT_ERR_RSP (CVMX_ADD_IO_SEG(0x0001DF0000000078ull))
+#define CVMX_DPI_REQ_ERR_RSP (CVMX_ADD_IO_SEG(0x0001DF0000000058ull))
+#define CVMX_DPI_REQ_ERR_RSP_EN (CVMX_ADD_IO_SEG(0x0001DF0000000068ull))
+#define CVMX_DPI_REQ_ERR_RST (CVMX_ADD_IO_SEG(0x0001DF0000000060ull))
+#define CVMX_DPI_REQ_ERR_RST_EN (CVMX_ADD_IO_SEG(0x0001DF0000000070ull))
+#define CVMX_DPI_REQ_ERR_SKIP_COMP (CVMX_ADD_IO_SEG(0x0001DF0000000838ull))
+#define CVMX_DPI_REQ_GBL_EN (CVMX_ADD_IO_SEG(0x0001DF0000000050ull))
+#define CVMX_DPI_SLI_PRTX_CFG(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000900ull) + ((offset) & 3) * 8)
+static inline uint64_t CVMX_DPI_SLI_PRTX_ERR(unsigned long offset)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001DF0000000920ull) + (offset) * 8;
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1))
+ return CVMX_ADD_IO_SEG(0x0001DF0000000928ull) + (offset) * 8;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2))
+ return CVMX_ADD_IO_SEG(0x0001DF0000000920ull) + (offset) * 8;
+ return CVMX_ADD_IO_SEG(0x0001DF0000000920ull) + (offset) * 8;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001DF0000000928ull) + (offset) * 8;
+ }
+ return CVMX_ADD_IO_SEG(0x0001DF0000000920ull) + (offset) * 8;
+}
+
+#define CVMX_DPI_SLI_PRTX_ERR_INFO(offset) (CVMX_ADD_IO_SEG(0x0001DF0000000940ull) + ((offset) & 3) * 8)
+
+union cvmx_dpi_bist_status {
+ uint64_t u64;
+ struct cvmx_dpi_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63:17;
+ uint64_t bist:47;
+#else
+ uint64_t bist:47;
+ uint64_t reserved_47_63:17;
+#endif
+ } s;
+ struct cvmx_dpi_bist_status_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_45_63:19;
+ uint64_t bist:45;
+#else
+ uint64_t bist:45;
+ uint64_t reserved_45_63:19;
+#endif
+ } cn63xx;
+ struct cvmx_dpi_bist_status_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63:27;
+ uint64_t bist:37;
+#else
+ uint64_t bist:37;
+ uint64_t reserved_37_63:27;
+#endif
+ } cn63xxp1;
+};
+
+union cvmx_dpi_ctl {
+ uint64_t u64;
+ struct cvmx_dpi_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t clk:1;
+ uint64_t en:1;
+#else
+ uint64_t en:1;
+ uint64_t clk:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+ struct cvmx_dpi_ctl_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t en:1;
+#else
+ uint64_t en:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } cn61xx;
+};
+
+union cvmx_dpi_dmax_counts {
+ uint64_t u64;
+ struct cvmx_dpi_dmax_counts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63:25;
+ uint64_t fcnt:7;
+ uint64_t dbell:32;
+#else
+ uint64_t dbell:32;
+ uint64_t fcnt:7;
+ uint64_t reserved_39_63:25;
+#endif
+ } s;
+};
+
+union cvmx_dpi_dmax_dbell {
+ uint64_t u64;
+ struct cvmx_dpi_dmax_dbell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t dbell:16;
+#else
+ uint64_t dbell:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_dpi_dmax_err_rsp_status {
+ uint64_t u64;
+ struct cvmx_dpi_dmax_err_rsp_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t status:6;
+#else
+ uint64_t status:6;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_dpi_dmax_ibuff_saddr {
+ uint64_t u64;
+ struct cvmx_dpi_dmax_ibuff_saddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ uint64_t csize:14;
+ uint64_t reserved_41_47:7;
+ uint64_t idle:1;
+ uint64_t saddr:33;
+ uint64_t reserved_0_6:7;
+#else
+ uint64_t reserved_0_6:7;
+ uint64_t saddr:33;
+ uint64_t idle:1;
+ uint64_t reserved_41_47:7;
+ uint64_t csize:14;
+ uint64_t reserved_62_63:2;
+#endif
+ } s;
+ struct cvmx_dpi_dmax_ibuff_saddr_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ uint64_t csize:14;
+ uint64_t reserved_41_47:7;
+ uint64_t idle:1;
+ uint64_t reserved_36_39:4;
+ uint64_t saddr:29;
+ uint64_t reserved_0_6:7;
+#else
+ uint64_t reserved_0_6:7;
+ uint64_t saddr:29;
+ uint64_t reserved_36_39:4;
+ uint64_t idle:1;
+ uint64_t reserved_41_47:7;
+ uint64_t csize:14;
+ uint64_t reserved_62_63:2;
+#endif
+ } cn61xx;
+};
+
+union cvmx_dpi_dmax_iflight {
+ uint64_t u64;
+ struct cvmx_dpi_dmax_iflight_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t cnt:3;
+#else
+ uint64_t cnt:3;
+ uint64_t reserved_3_63:61;
+#endif
+ } s;
+};
+
+union cvmx_dpi_dmax_naddr {
+ uint64_t u64;
+ struct cvmx_dpi_dmax_naddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t addr:40;
+#else
+ uint64_t addr:40;
+ uint64_t reserved_40_63:24;
+#endif
+ } s;
+ struct cvmx_dpi_dmax_naddr_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63:28;
+ uint64_t addr:36;
+#else
+ uint64_t addr:36;
+ uint64_t reserved_36_63:28;
+#endif
+ } cn61xx;
+};
+
+union cvmx_dpi_dmax_reqbnk0 {
+ uint64_t u64;
+ struct cvmx_dpi_dmax_reqbnk0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t state:64;
+#else
+ uint64_t state:64;
+#endif
+ } s;
+};
+
+union cvmx_dpi_dmax_reqbnk1 {
+ uint64_t u64;
+ struct cvmx_dpi_dmax_reqbnk1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t state:64;
+#else
+ uint64_t state:64;
+#endif
+ } s;
+};
+
+union cvmx_dpi_dma_control {
+ uint64_t u64;
+ struct cvmx_dpi_dma_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ uint64_t dici_mode:1;
+ uint64_t pkt_en1:1;
+ uint64_t ffp_dis:1;
+ uint64_t commit_mode:1;
+ uint64_t pkt_hp:1;
+ uint64_t pkt_en:1;
+ uint64_t reserved_54_55:2;
+ uint64_t dma_enb:6;
+ uint64_t reserved_34_47:14;
+ uint64_t b0_lend:1;
+ uint64_t dwb_denb:1;
+ uint64_t dwb_ichk:9;
+ uint64_t fpa_que:3;
+ uint64_t o_add1:1;
+ uint64_t o_ro:1;
+ uint64_t o_ns:1;
+ uint64_t o_es:2;
+ uint64_t o_mode:1;
+ uint64_t reserved_0_13:14;
+#else
+ uint64_t reserved_0_13:14;
+ uint64_t o_mode:1;
+ uint64_t o_es:2;
+ uint64_t o_ns:1;
+ uint64_t o_ro:1;
+ uint64_t o_add1:1;
+ uint64_t fpa_que:3;
+ uint64_t dwb_ichk:9;
+ uint64_t dwb_denb:1;
+ uint64_t b0_lend:1;
+ uint64_t reserved_34_47:14;
+ uint64_t dma_enb:6;
+ uint64_t reserved_54_55:2;
+ uint64_t pkt_en:1;
+ uint64_t pkt_hp:1;
+ uint64_t commit_mode:1;
+ uint64_t ffp_dis:1;
+ uint64_t pkt_en1:1;
+ uint64_t dici_mode:1;
+ uint64_t reserved_62_63:2;
+#endif
+ } s;
+ struct cvmx_dpi_dma_control_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63:3;
+ uint64_t pkt_en1:1;
+ uint64_t ffp_dis:1;
+ uint64_t commit_mode:1;
+ uint64_t pkt_hp:1;
+ uint64_t pkt_en:1;
+ uint64_t reserved_54_55:2;
+ uint64_t dma_enb:6;
+ uint64_t reserved_34_47:14;
+ uint64_t b0_lend:1;
+ uint64_t dwb_denb:1;
+ uint64_t dwb_ichk:9;
+ uint64_t fpa_que:3;
+ uint64_t o_add1:1;
+ uint64_t o_ro:1;
+ uint64_t o_ns:1;
+ uint64_t o_es:2;
+ uint64_t o_mode:1;
+ uint64_t reserved_0_13:14;
+#else
+ uint64_t reserved_0_13:14;
+ uint64_t o_mode:1;
+ uint64_t o_es:2;
+ uint64_t o_ns:1;
+ uint64_t o_ro:1;
+ uint64_t o_add1:1;
+ uint64_t fpa_que:3;
+ uint64_t dwb_ichk:9;
+ uint64_t dwb_denb:1;
+ uint64_t b0_lend:1;
+ uint64_t reserved_34_47:14;
+ uint64_t dma_enb:6;
+ uint64_t reserved_54_55:2;
+ uint64_t pkt_en:1;
+ uint64_t pkt_hp:1;
+ uint64_t commit_mode:1;
+ uint64_t ffp_dis:1;
+ uint64_t pkt_en1:1;
+ uint64_t reserved_61_63:3;
+#endif
+ } cn63xx;
+ struct cvmx_dpi_dma_control_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63:5;
+ uint64_t commit_mode:1;
+ uint64_t pkt_hp:1;
+ uint64_t pkt_en:1;
+ uint64_t reserved_54_55:2;
+ uint64_t dma_enb:6;
+ uint64_t reserved_34_47:14;
+ uint64_t b0_lend:1;
+ uint64_t dwb_denb:1;
+ uint64_t dwb_ichk:9;
+ uint64_t fpa_que:3;
+ uint64_t o_add1:1;
+ uint64_t o_ro:1;
+ uint64_t o_ns:1;
+ uint64_t o_es:2;
+ uint64_t o_mode:1;
+ uint64_t reserved_0_13:14;
+#else
+ uint64_t reserved_0_13:14;
+ uint64_t o_mode:1;
+ uint64_t o_es:2;
+ uint64_t o_ns:1;
+ uint64_t o_ro:1;
+ uint64_t o_add1:1;
+ uint64_t fpa_que:3;
+ uint64_t dwb_ichk:9;
+ uint64_t dwb_denb:1;
+ uint64_t b0_lend:1;
+ uint64_t reserved_34_47:14;
+ uint64_t dma_enb:6;
+ uint64_t reserved_54_55:2;
+ uint64_t pkt_en:1;
+ uint64_t pkt_hp:1;
+ uint64_t commit_mode:1;
+ uint64_t reserved_59_63:5;
+#endif
+ } cn63xxp1;
+};
+
+union cvmx_dpi_dma_engx_en {
+ uint64_t u64;
+ struct cvmx_dpi_dma_engx_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t qen:8;
+#else
+ uint64_t qen:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_dpi_dma_ppx_cnt {
+ uint64_t u64;
+ struct cvmx_dpi_dma_ppx_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t cnt:16;
+#else
+ uint64_t cnt:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_dpi_engx_buf {
+ uint64_t u64;
+ struct cvmx_dpi_engx_buf_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63:27;
+ uint64_t compblks:5;
+ uint64_t reserved_9_31:23;
+ uint64_t base:5;
+ uint64_t blks:4;
+#else
+ uint64_t blks:4;
+ uint64_t base:5;
+ uint64_t reserved_9_31:23;
+ uint64_t compblks:5;
+ uint64_t reserved_37_63:27;
+#endif
+ } s;
+ struct cvmx_dpi_engx_buf_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t base:4;
+ uint64_t blks:4;
+#else
+ uint64_t blks:4;
+ uint64_t base:4;
+ uint64_t reserved_8_63:56;
+#endif
+ } cn63xx;
+};
+
+union cvmx_dpi_info_reg {
+ uint64_t u64;
+ struct cvmx_dpi_info_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t ffp:4;
+ uint64_t reserved_2_3:2;
+ uint64_t ncb:1;
+ uint64_t rsl:1;
+#else
+ uint64_t rsl:1;
+ uint64_t ncb:1;
+ uint64_t reserved_2_3:2;
+ uint64_t ffp:4;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+ struct cvmx_dpi_info_reg_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t ncb:1;
+ uint64_t rsl:1;
+#else
+ uint64_t rsl:1;
+ uint64_t ncb:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } cn63xxp1;
+};
+
+union cvmx_dpi_int_en {
+ uint64_t u64;
+ struct cvmx_dpi_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t sprt3_rst:1;
+ uint64_t sprt2_rst:1;
+ uint64_t sprt1_rst:1;
+ uint64_t sprt0_rst:1;
+ uint64_t reserved_23_23:1;
+ uint64_t req_badfil:1;
+ uint64_t req_inull:1;
+ uint64_t req_anull:1;
+ uint64_t req_undflw:1;
+ uint64_t req_ovrflw:1;
+ uint64_t req_badlen:1;
+ uint64_t req_badadr:1;
+ uint64_t dmadbo:8;
+ uint64_t reserved_2_7:6;
+ uint64_t nfovr:1;
+ uint64_t nderr:1;
+#else
+ uint64_t nderr:1;
+ uint64_t nfovr:1;
+ uint64_t reserved_2_7:6;
+ uint64_t dmadbo:8;
+ uint64_t req_badadr:1;
+ uint64_t req_badlen:1;
+ uint64_t req_ovrflw:1;
+ uint64_t req_undflw:1;
+ uint64_t req_anull:1;
+ uint64_t req_inull:1;
+ uint64_t req_badfil:1;
+ uint64_t reserved_23_23:1;
+ uint64_t sprt0_rst:1;
+ uint64_t sprt1_rst:1;
+ uint64_t sprt2_rst:1;
+ uint64_t sprt3_rst:1;
+ uint64_t reserved_28_63:36;
+#endif
+ } s;
+ struct cvmx_dpi_int_en_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63:38;
+ uint64_t sprt1_rst:1;
+ uint64_t sprt0_rst:1;
+ uint64_t reserved_23_23:1;
+ uint64_t req_badfil:1;
+ uint64_t req_inull:1;
+ uint64_t req_anull:1;
+ uint64_t req_undflw:1;
+ uint64_t req_ovrflw:1;
+ uint64_t req_badlen:1;
+ uint64_t req_badadr:1;
+ uint64_t dmadbo:8;
+ uint64_t reserved_2_7:6;
+ uint64_t nfovr:1;
+ uint64_t nderr:1;
+#else
+ uint64_t nderr:1;
+ uint64_t nfovr:1;
+ uint64_t reserved_2_7:6;
+ uint64_t dmadbo:8;
+ uint64_t req_badadr:1;
+ uint64_t req_badlen:1;
+ uint64_t req_ovrflw:1;
+ uint64_t req_undflw:1;
+ uint64_t req_anull:1;
+ uint64_t req_inull:1;
+ uint64_t req_badfil:1;
+ uint64_t reserved_23_23:1;
+ uint64_t sprt0_rst:1;
+ uint64_t sprt1_rst:1;
+ uint64_t reserved_26_63:38;
+#endif
+ } cn63xx;
+};
+
+union cvmx_dpi_int_reg {
+ uint64_t u64;
+ struct cvmx_dpi_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t sprt3_rst:1;
+ uint64_t sprt2_rst:1;
+ uint64_t sprt1_rst:1;
+ uint64_t sprt0_rst:1;
+ uint64_t reserved_23_23:1;
+ uint64_t req_badfil:1;
+ uint64_t req_inull:1;
+ uint64_t req_anull:1;
+ uint64_t req_undflw:1;
+ uint64_t req_ovrflw:1;
+ uint64_t req_badlen:1;
+ uint64_t req_badadr:1;
+ uint64_t dmadbo:8;
+ uint64_t reserved_2_7:6;
+ uint64_t nfovr:1;
+ uint64_t nderr:1;
+#else
+ uint64_t nderr:1;
+ uint64_t nfovr:1;
+ uint64_t reserved_2_7:6;
+ uint64_t dmadbo:8;
+ uint64_t req_badadr:1;
+ uint64_t req_badlen:1;
+ uint64_t req_ovrflw:1;
+ uint64_t req_undflw:1;
+ uint64_t req_anull:1;
+ uint64_t req_inull:1;
+ uint64_t req_badfil:1;
+ uint64_t reserved_23_23:1;
+ uint64_t sprt0_rst:1;
+ uint64_t sprt1_rst:1;
+ uint64_t sprt2_rst:1;
+ uint64_t sprt3_rst:1;
+ uint64_t reserved_28_63:36;
+#endif
+ } s;
+ struct cvmx_dpi_int_reg_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63:38;
+ uint64_t sprt1_rst:1;
+ uint64_t sprt0_rst:1;
+ uint64_t reserved_23_23:1;
+ uint64_t req_badfil:1;
+ uint64_t req_inull:1;
+ uint64_t req_anull:1;
+ uint64_t req_undflw:1;
+ uint64_t req_ovrflw:1;
+ uint64_t req_badlen:1;
+ uint64_t req_badadr:1;
+ uint64_t dmadbo:8;
+ uint64_t reserved_2_7:6;
+ uint64_t nfovr:1;
+ uint64_t nderr:1;
+#else
+ uint64_t nderr:1;
+ uint64_t nfovr:1;
+ uint64_t reserved_2_7:6;
+ uint64_t dmadbo:8;
+ uint64_t req_badadr:1;
+ uint64_t req_badlen:1;
+ uint64_t req_ovrflw:1;
+ uint64_t req_undflw:1;
+ uint64_t req_anull:1;
+ uint64_t req_inull:1;
+ uint64_t req_badfil:1;
+ uint64_t reserved_23_23:1;
+ uint64_t sprt0_rst:1;
+ uint64_t sprt1_rst:1;
+ uint64_t reserved_26_63:38;
+#endif
+ } cn63xx;
+};
+
+union cvmx_dpi_ncbx_cfg {
+ uint64_t u64;
+ struct cvmx_dpi_ncbx_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t molr:6;
+#else
+ uint64_t molr:6;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_dpi_pint_info {
+ uint64_t u64;
+ struct cvmx_dpi_pint_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t iinfo:6;
+ uint64_t reserved_6_7:2;
+ uint64_t sinfo:6;
+#else
+ uint64_t sinfo:6;
+ uint64_t reserved_6_7:2;
+ uint64_t iinfo:6;
+ uint64_t reserved_14_63:50;
+#endif
+ } s;
+};
+
+union cvmx_dpi_pkt_err_rsp {
+ uint64_t u64;
+ struct cvmx_dpi_pkt_err_rsp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t pkterr:1;
+#else
+ uint64_t pkterr:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_dpi_req_err_rsp {
+ uint64_t u64;
+ struct cvmx_dpi_req_err_rsp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t qerr:8;
+#else
+ uint64_t qerr:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_dpi_req_err_rsp_en {
+ uint64_t u64;
+ struct cvmx_dpi_req_err_rsp_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t en:8;
+#else
+ uint64_t en:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_dpi_req_err_rst {
+ uint64_t u64;
+ struct cvmx_dpi_req_err_rst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t qerr:8;
+#else
+ uint64_t qerr:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_dpi_req_err_rst_en {
+ uint64_t u64;
+ struct cvmx_dpi_req_err_rst_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t en:8;
+#else
+ uint64_t en:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_dpi_req_err_skip_comp {
+ uint64_t u64;
+ struct cvmx_dpi_req_err_skip_comp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t en_rst:8;
+ uint64_t reserved_8_15:8;
+ uint64_t en_rsp:8;
+#else
+ uint64_t en_rsp:8;
+ uint64_t reserved_8_15:8;
+ uint64_t en_rst:8;
+ uint64_t reserved_24_63:40;
+#endif
+ } s;
+};
+
+union cvmx_dpi_req_gbl_en {
+ uint64_t u64;
+ struct cvmx_dpi_req_gbl_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t qen:8;
+#else
+ uint64_t qen:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_dpi_sli_prtx_cfg {
+ uint64_t u64;
+ struct cvmx_dpi_sli_prtx_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t halt:1;
+ uint64_t qlm_cfg:4;
+ uint64_t reserved_17_19:3;
+ uint64_t rd_mode:1;
+ uint64_t reserved_14_15:2;
+ uint64_t molr:6;
+ uint64_t mps_lim:1;
+ uint64_t reserved_5_6:2;
+ uint64_t mps:1;
+ uint64_t mrrs_lim:1;
+ uint64_t reserved_2_2:1;
+ uint64_t mrrs:2;
+#else
+ uint64_t mrrs:2;
+ uint64_t reserved_2_2:1;
+ uint64_t mrrs_lim:1;
+ uint64_t mps:1;
+ uint64_t reserved_5_6:2;
+ uint64_t mps_lim:1;
+ uint64_t molr:6;
+ uint64_t reserved_14_15:2;
+ uint64_t rd_mode:1;
+ uint64_t reserved_17_19:3;
+ uint64_t qlm_cfg:4;
+ uint64_t halt:1;
+ uint64_t reserved_25_63:39;
+#endif
+ } s;
+ struct cvmx_dpi_sli_prtx_cfg_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t halt:1;
+ uint64_t reserved_21_23:3;
+ uint64_t qlm_cfg:1;
+ uint64_t reserved_17_19:3;
+ uint64_t rd_mode:1;
+ uint64_t reserved_14_15:2;
+ uint64_t molr:6;
+ uint64_t mps_lim:1;
+ uint64_t reserved_5_6:2;
+ uint64_t mps:1;
+ uint64_t mrrs_lim:1;
+ uint64_t reserved_2_2:1;
+ uint64_t mrrs:2;
+#else
+ uint64_t mrrs:2;
+ uint64_t reserved_2_2:1;
+ uint64_t mrrs_lim:1;
+ uint64_t mps:1;
+ uint64_t reserved_5_6:2;
+ uint64_t mps_lim:1;
+ uint64_t molr:6;
+ uint64_t reserved_14_15:2;
+ uint64_t rd_mode:1;
+ uint64_t reserved_17_19:3;
+ uint64_t qlm_cfg:1;
+ uint64_t reserved_21_23:3;
+ uint64_t halt:1;
+ uint64_t reserved_25_63:39;
+#endif
+ } cn63xx;
+};
+
+union cvmx_dpi_sli_prtx_err {
+ uint64_t u64;
+ struct cvmx_dpi_sli_prtx_err_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr:61;
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t addr:61;
+#endif
+ } s;
+};
+
+union cvmx_dpi_sli_prtx_err_info {
+ uint64_t u64;
+ struct cvmx_dpi_sli_prtx_err_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t lock:1;
+ uint64_t reserved_5_7:3;
+ uint64_t type:1;
+ uint64_t reserved_3_3:1;
+ uint64_t reqq:3;
+#else
+ uint64_t reqq:3;
+ uint64_t reserved_3_3:1;
+ uint64_t type:1;
+ uint64_t reserved_5_7:3;
+ uint64_t lock:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-fau.h b/arch/mips/include/asm/octeon/cvmx-fau.h
new file mode 100644
index 000000000..dafeae300
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-fau.h
@@ -0,0 +1,619 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Interface to the hardware Fetch and Add Unit.
+ */
+
+#ifndef __CVMX_FAU_H__
+#define __CVMX_FAU_H__
+
+/*
+ * Octeon Fetch and Add Unit (FAU)
+ */
+
+#define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0)
+#define CVMX_FAU_BITS_SCRADDR 63, 56
+#define CVMX_FAU_BITS_LEN 55, 48
+#define CVMX_FAU_BITS_INEVAL 35, 14
+#define CVMX_FAU_BITS_TAGWAIT 13, 13
+#define CVMX_FAU_BITS_NOADD 13, 13
+#define CVMX_FAU_BITS_SIZE 12, 11
+#define CVMX_FAU_BITS_REGISTER 10, 0
+
+typedef enum {
+ CVMX_FAU_OP_SIZE_8 = 0,
+ CVMX_FAU_OP_SIZE_16 = 1,
+ CVMX_FAU_OP_SIZE_32 = 2,
+ CVMX_FAU_OP_SIZE_64 = 3
+} cvmx_fau_op_size_t;
+
+/**
+ * Tagwait return definition. If a timeout occurs, the error
+ * bit will be set. Otherwise the value of the register before
+ * the update will be returned.
+ */
+typedef struct {
+ uint64_t error:1;
+ int64_t value:63;
+} cvmx_fau_tagwait64_t;
+
+/**
+ * Tagwait return definition. If a timeout occurs, the error
+ * bit will be set. Otherwise the value of the register before
+ * the update will be returned.
+ */
+typedef struct {
+ uint64_t error:1;
+ int32_t value:31;
+} cvmx_fau_tagwait32_t;
+
+/**
+ * Tagwait return definition. If a timeout occurs, the error
+ * bit will be set. Otherwise the value of the register before
+ * the update will be returned.
+ */
+typedef struct {
+ uint64_t error:1;
+ int16_t value:15;
+} cvmx_fau_tagwait16_t;
+
+/**
+ * Tagwait return definition. If a timeout occurs, the error
+ * bit will be set. Otherwise the value of the register before
+ * the update will be returned.
+ */
+typedef struct {
+ uint64_t error:1;
+ int8_t value:7;
+} cvmx_fau_tagwait8_t;
+
+/**
+ * Asynchronous tagwait return definition. If a timeout occurs,
+ * the error bit will be set. Otherwise the value of the
+ * register before the update will be returned.
+ */
+typedef union {
+ uint64_t u64;
+ struct {
+ uint64_t invalid:1;
+ uint64_t data:63; /* unpredictable if invalid is set */
+ } s;
+} cvmx_fau_async_tagwait_result_t;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+#define SWIZZLE_8 0
+#define SWIZZLE_16 0
+#define SWIZZLE_32 0
+#else
+#define SWIZZLE_8 0x7
+#define SWIZZLE_16 0x6
+#define SWIZZLE_32 0x4
+#endif
+
+/**
+ * Builds a store I/O address for writing to the FAU
+ *
+ * @noadd: 0 = Store value is atomically added to the current value
+ * 1 = Store value is atomically written over the current value
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
+ * Returns Address to store for atomic update
+ */
+static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
+{
+ return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
+ cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |
+ cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
+}
+
+/**
+ * Builds a I/O address for accessing the FAU
+ *
+ * @tagwait: Should the atomic add wait for the current tag switch
+ * operation to complete.
+ * - 0 = Don't wait
+ * - 1 = Wait for tag switch to complete
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
+ * @value: Signed value to add.
+ * Note: When performing 32 and 64 bit access, only the low
+ * 22 bits are available.
+ * Returns Address to read from for atomic update
+ */
+static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg,
+ int64_t value)
+{
+ return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
+ cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
+ cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
+ cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
+}
+
+/**
+ * Perform an atomic 64 bit add
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @value: Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * Returns Value of the register before the update
+ */
+static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
+ int64_t value)
+{
+ return cvmx_read64_int64(__cvmx_fau_atomic_address(0, reg, value));
+}
+
+/**
+ * Perform an atomic 32 bit add
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @value: Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * Returns Value of the register before the update
+ */
+static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
+ int32_t value)
+{
+ reg ^= SWIZZLE_32;
+ return cvmx_read64_int32(__cvmx_fau_atomic_address(0, reg, value));
+}
+
+/**
+ * Perform an atomic 16 bit add
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @value: Signed value to add.
+ * Returns Value of the register before the update
+ */
+static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg,
+ int16_t value)
+{
+ reg ^= SWIZZLE_16;
+ return cvmx_read64_int16(__cvmx_fau_atomic_address(0, reg, value));
+}
+
+/**
+ * Perform an atomic 8 bit add
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * @value: Signed value to add.
+ * Returns Value of the register before the update
+ */
+static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
+{
+ reg ^= SWIZZLE_8;
+ return cvmx_read64_int8(__cvmx_fau_atomic_address(0, reg, value));
+}
+
+/**
+ * Perform an atomic 64 bit add after the current tag switch
+ * completes
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @value: Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * Returns If a timeout occurs, the error bit will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ */
+static inline cvmx_fau_tagwait64_t
+cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
+{
+ union {
+ uint64_t i64;
+ cvmx_fau_tagwait64_t t;
+ } result;
+ result.i64 =
+ cvmx_read64_int64(__cvmx_fau_atomic_address(1, reg, value));
+ return result.t;
+}
+
+/**
+ * Perform an atomic 32 bit add after the current tag switch
+ * completes
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @value: Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * Returns If a timeout occurs, the error bit will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ */
+static inline cvmx_fau_tagwait32_t
+cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
+{
+ union {
+ uint64_t i32;
+ cvmx_fau_tagwait32_t t;
+ } result;
+ reg ^= SWIZZLE_32;
+ result.i32 =
+ cvmx_read64_int32(__cvmx_fau_atomic_address(1, reg, value));
+ return result.t;
+}
+
+/**
+ * Perform an atomic 16 bit add after the current tag switch
+ * completes
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @value: Signed value to add.
+ * Returns If a timeout occurs, the error bit will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ */
+static inline cvmx_fau_tagwait16_t
+cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
+{
+ union {
+ uint64_t i16;
+ cvmx_fau_tagwait16_t t;
+ } result;
+ reg ^= SWIZZLE_16;
+ result.i16 =
+ cvmx_read64_int16(__cvmx_fau_atomic_address(1, reg, value));
+ return result.t;
+}
+
+/**
+ * Perform an atomic 8 bit add after the current tag switch
+ * completes
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * @value: Signed value to add.
+ * Returns If a timeout occurs, the error bit will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ */
+static inline cvmx_fau_tagwait8_t
+cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
+{
+ union {
+ uint64_t i8;
+ cvmx_fau_tagwait8_t t;
+ } result;
+ reg ^= SWIZZLE_8;
+ result.i8 = cvmx_read64_int8(__cvmx_fau_atomic_address(1, reg, value));
+ return result.t;
+}
+
+/**
+ * Builds I/O data for async operations
+ *
+ * @scraddr: Scratch pad byte address to write to. Must be 8 byte aligned
+ * @value: Signed value to add.
+ * Note: When performing 32 and 64 bit access, only the low
+ * 22 bits are available.
+ * @tagwait: Should the atomic add wait for the current tag switch
+ * operation to complete.
+ * - 0 = Don't wait
+ * - 1 = Wait for tag switch to complete
+ * @size: The size of the operation:
+ * - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
+ * - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
+ * - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
+ * - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
+ * Returns Data to write using cvmx_send_single
+ */
+static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value,
+ uint64_t tagwait,
+ cvmx_fau_op_size_t size,
+ uint64_t reg)
+{
+ return CVMX_FAU_LOAD_IO_ADDRESS |
+ cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr >> 3) |
+ cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |
+ cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
+ cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
+ cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |
+ cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
+}
+
+/**
+ * Perform an async atomic 64 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @scraddr: Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @value: Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * Returns Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr,
+ cvmx_fau_reg_64_t reg,
+ int64_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data
+ (scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));
+}
+
+/**
+ * Perform an async atomic 32 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @scraddr: Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @value: Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * Returns Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
+ cvmx_fau_reg_32_t reg,
+ int32_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data
+ (scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));
+}
+
+/**
+ * Perform an async atomic 16 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @scraddr: Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @value: Signed value to add.
+ * Returns Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr,
+ cvmx_fau_reg_16_t reg,
+ int16_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data
+ (scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));
+}
+
+/**
+ * Perform an async atomic 8 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @scraddr: Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * @value: Signed value to add.
+ * Returns Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr,
+ cvmx_fau_reg_8_t reg,
+ int8_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data
+ (scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));
+}
+
+/**
+ * Perform an async atomic 64 bit add after the current tag
+ * switch completes.
+ *
+ * @scraddr: Scratch memory byte address to put response in. Must be
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @value: Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * Returns Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,
+ cvmx_fau_reg_64_t reg,
+ int64_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data
+ (scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));
+}
+
+/**
+ * Perform an async atomic 32 bit add after the current tag
+ * switch completes.
+ *
+ * @scraddr: Scratch memory byte address to put response in. Must be
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @value: Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * Returns Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,
+ cvmx_fau_reg_32_t reg,
+ int32_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data
+ (scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));
+}
+
+/**
+ * Perform an async atomic 16 bit add after the current tag
+ * switch completes.
+ *
+ * @scraddr: Scratch memory byte address to put response in. Must be
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @value: Signed value to add.
+ *
+ * Returns Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr,
+ cvmx_fau_reg_16_t reg,
+ int16_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data
+ (scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));
+}
+
+/**
+ * Perform an async atomic 8 bit add after the current tag
+ * switch completes.
+ *
+ * @scraddr: Scratch memory byte address to put response in. Must be
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * @value: Signed value to add.
+ *
+ * Returns Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr,
+ cvmx_fau_reg_8_t reg,
+ int8_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data
+ (scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));
+}
+
+/**
+ * Perform an atomic 64 bit add
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @value: Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
+{
+ cvmx_write64_int64(__cvmx_fau_store_address(0, reg), value);
+}
+
+/**
+ * Perform an atomic 32 bit add
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @value: Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
+{
+ reg ^= SWIZZLE_32;
+ cvmx_write64_int32(__cvmx_fau_store_address(0, reg), value);
+}
+
+/**
+ * Perform an atomic 16 bit add
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @value: Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
+{
+ reg ^= SWIZZLE_16;
+ cvmx_write64_int16(__cvmx_fau_store_address(0, reg), value);
+}
+
+/**
+ * Perform an atomic 8 bit add
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * @value: Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
+{
+ reg ^= SWIZZLE_8;
+ cvmx_write64_int8(__cvmx_fau_store_address(0, reg), value);
+}
+
+/**
+ * Perform an atomic 64 bit write
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @value: Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
+{
+ cvmx_write64_int64(__cvmx_fau_store_address(1, reg), value);
+}
+
+/**
+ * Perform an atomic 32 bit write
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @value: Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
+{
+ reg ^= SWIZZLE_32;
+ cvmx_write64_int32(__cvmx_fau_store_address(1, reg), value);
+}
+
+/**
+ * Perform an atomic 16 bit write
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @value: Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
+{
+ reg ^= SWIZZLE_16;
+ cvmx_write64_int16(__cvmx_fau_store_address(1, reg), value);
+}
+
+/**
+ * Perform an atomic 8 bit write
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * @value: Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg, int8_t value)
+{
+ reg ^= SWIZZLE_8;
+ cvmx_write64_int8(__cvmx_fau_store_address(1, reg), value);
+}
+
+#endif /* __CVMX_FAU_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-fpa-defs.h b/arch/mips/include/asm/octeon/cvmx-fpa-defs.h
new file mode 100644
index 000000000..322943f7c
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-fpa-defs.h
@@ -0,0 +1,1252 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_FPA_DEFS_H__
+#define __CVMX_FPA_DEFS_H__
+
+#define CVMX_FPA_ADDR_RANGE_ERROR (CVMX_ADD_IO_SEG(0x0001180028000458ull))
+#define CVMX_FPA_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011800280000E8ull))
+#define CVMX_FPA_CTL_STATUS (CVMX_ADD_IO_SEG(0x0001180028000050ull))
+#define CVMX_FPA_FPF0_MARKS (CVMX_ADD_IO_SEG(0x0001180028000000ull))
+#define CVMX_FPA_FPF0_SIZE (CVMX_ADD_IO_SEG(0x0001180028000058ull))
+#define CVMX_FPA_FPF1_MARKS CVMX_FPA_FPFX_MARKS(1)
+#define CVMX_FPA_FPF2_MARKS CVMX_FPA_FPFX_MARKS(2)
+#define CVMX_FPA_FPF3_MARKS CVMX_FPA_FPFX_MARKS(3)
+#define CVMX_FPA_FPF4_MARKS CVMX_FPA_FPFX_MARKS(4)
+#define CVMX_FPA_FPF5_MARKS CVMX_FPA_FPFX_MARKS(5)
+#define CVMX_FPA_FPF6_MARKS CVMX_FPA_FPFX_MARKS(6)
+#define CVMX_FPA_FPF7_MARKS CVMX_FPA_FPFX_MARKS(7)
+#define CVMX_FPA_FPF8_MARKS (CVMX_ADD_IO_SEG(0x0001180028000240ull))
+#define CVMX_FPA_FPF8_SIZE (CVMX_ADD_IO_SEG(0x0001180028000248ull))
+#define CVMX_FPA_FPFX_MARKS(offset) (CVMX_ADD_IO_SEG(0x0001180028000008ull) + ((offset) & 7) * 8 - 8*1)
+#define CVMX_FPA_FPFX_SIZE(offset) (CVMX_ADD_IO_SEG(0x0001180028000060ull) + ((offset) & 7) * 8 - 8*1)
+#define CVMX_FPA_INT_ENB (CVMX_ADD_IO_SEG(0x0001180028000048ull))
+#define CVMX_FPA_INT_SUM (CVMX_ADD_IO_SEG(0x0001180028000040ull))
+#define CVMX_FPA_PACKET_THRESHOLD (CVMX_ADD_IO_SEG(0x0001180028000460ull))
+#define CVMX_FPA_POOLX_END_ADDR(offset) (CVMX_ADD_IO_SEG(0x0001180028000358ull) + ((offset) & 15) * 8)
+#define CVMX_FPA_POOLX_START_ADDR(offset) (CVMX_ADD_IO_SEG(0x0001180028000258ull) + ((offset) & 15) * 8)
+#define CVMX_FPA_POOLX_THRESHOLD(offset) (CVMX_ADD_IO_SEG(0x0001180028000140ull) + ((offset) & 15) * 8)
+#define CVMX_FPA_QUE0_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(0)
+#define CVMX_FPA_QUE1_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(1)
+#define CVMX_FPA_QUE2_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(2)
+#define CVMX_FPA_QUE3_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(3)
+#define CVMX_FPA_QUE4_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(4)
+#define CVMX_FPA_QUE5_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(5)
+#define CVMX_FPA_QUE6_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(6)
+#define CVMX_FPA_QUE7_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(7)
+#define CVMX_FPA_QUE8_PAGE_INDEX (CVMX_ADD_IO_SEG(0x0001180028000250ull))
+#define CVMX_FPA_QUEX_AVAILABLE(offset) (CVMX_ADD_IO_SEG(0x0001180028000098ull) + ((offset) & 15) * 8)
+#define CVMX_FPA_QUEX_PAGE_INDEX(offset) (CVMX_ADD_IO_SEG(0x00011800280000F0ull) + ((offset) & 7) * 8)
+#define CVMX_FPA_QUE_ACT (CVMX_ADD_IO_SEG(0x0001180028000138ull))
+#define CVMX_FPA_QUE_EXP (CVMX_ADD_IO_SEG(0x0001180028000130ull))
+#define CVMX_FPA_WART_CTL (CVMX_ADD_IO_SEG(0x00011800280000D8ull))
+#define CVMX_FPA_WART_STATUS (CVMX_ADD_IO_SEG(0x00011800280000E0ull))
+#define CVMX_FPA_WQE_THRESHOLD (CVMX_ADD_IO_SEG(0x0001180028000468ull))
+#define CVMX_FPA_CLK_COUNT (CVMX_ADD_IO_SEG(0x00012800000000F0ull))
+
+union cvmx_fpa_addr_range_error {
+ uint64_t u64;
+ struct cvmx_fpa_addr_range_error_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63:26;
+ uint64_t pool:5;
+ uint64_t addr:33;
+#else
+ uint64_t addr:33;
+ uint64_t pool:5;
+ uint64_t reserved_38_63:26;
+#endif
+ } s;
+};
+
+union cvmx_fpa_bist_status {
+ uint64_t u64;
+ struct cvmx_fpa_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t frd:1;
+ uint64_t fpf0:1;
+ uint64_t fpf1:1;
+ uint64_t ffr:1;
+ uint64_t fdr:1;
+#else
+ uint64_t fdr:1;
+ uint64_t ffr:1;
+ uint64_t fpf1:1;
+ uint64_t fpf0:1;
+ uint64_t frd:1;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_fpa_ctl_status {
+ uint64_t u64;
+ struct cvmx_fpa_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63:43;
+ uint64_t free_en:1;
+ uint64_t ret_off:1;
+ uint64_t req_off:1;
+ uint64_t reset:1;
+ uint64_t use_ldt:1;
+ uint64_t use_stt:1;
+ uint64_t enb:1;
+ uint64_t mem1_err:7;
+ uint64_t mem0_err:7;
+#else
+ uint64_t mem0_err:7;
+ uint64_t mem1_err:7;
+ uint64_t enb:1;
+ uint64_t use_stt:1;
+ uint64_t use_ldt:1;
+ uint64_t reset:1;
+ uint64_t req_off:1;
+ uint64_t ret_off:1;
+ uint64_t free_en:1;
+ uint64_t reserved_21_63:43;
+#endif
+ } s;
+ struct cvmx_fpa_ctl_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63:46;
+ uint64_t reset:1;
+ uint64_t use_ldt:1;
+ uint64_t use_stt:1;
+ uint64_t enb:1;
+ uint64_t mem1_err:7;
+ uint64_t mem0_err:7;
+#else
+ uint64_t mem0_err:7;
+ uint64_t mem1_err:7;
+ uint64_t enb:1;
+ uint64_t use_stt:1;
+ uint64_t use_ldt:1;
+ uint64_t reset:1;
+ uint64_t reserved_18_63:46;
+#endif
+ } cn30xx;
+};
+
+union cvmx_fpa_fpfx_marks {
+ uint64_t u64;
+ struct cvmx_fpa_fpfx_marks_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63:42;
+ uint64_t fpf_wr:11;
+ uint64_t fpf_rd:11;
+#else
+ uint64_t fpf_rd:11;
+ uint64_t fpf_wr:11;
+ uint64_t reserved_22_63:42;
+#endif
+ } s;
+};
+
+union cvmx_fpa_fpfx_size {
+ uint64_t u64;
+ struct cvmx_fpa_fpfx_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t fpf_siz:11;
+#else
+ uint64_t fpf_siz:11;
+ uint64_t reserved_11_63:53;
+#endif
+ } s;
+};
+
+union cvmx_fpa_fpf0_marks {
+ uint64_t u64;
+ struct cvmx_fpa_fpf0_marks_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t fpf_wr:12;
+ uint64_t fpf_rd:12;
+#else
+ uint64_t fpf_rd:12;
+ uint64_t fpf_wr:12;
+ uint64_t reserved_24_63:40;
+#endif
+ } s;
+};
+
+union cvmx_fpa_fpf0_size {
+ uint64_t u64;
+ struct cvmx_fpa_fpf0_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t fpf_siz:12;
+#else
+ uint64_t fpf_siz:12;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+};
+
+union cvmx_fpa_fpf8_marks {
+ uint64_t u64;
+ struct cvmx_fpa_fpf8_marks_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63:42;
+ uint64_t fpf_wr:11;
+ uint64_t fpf_rd:11;
+#else
+ uint64_t fpf_rd:11;
+ uint64_t fpf_wr:11;
+ uint64_t reserved_22_63:42;
+#endif
+ } s;
+};
+
+union cvmx_fpa_fpf8_size {
+ uint64_t u64;
+ struct cvmx_fpa_fpf8_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t fpf_siz:12;
+#else
+ uint64_t fpf_siz:12;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+};
+
+union cvmx_fpa_int_enb {
+ uint64_t u64;
+ struct cvmx_fpa_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_50_63:14;
+ uint64_t paddr_e:1;
+ uint64_t reserved_44_48:5;
+ uint64_t free7:1;
+ uint64_t free6:1;
+ uint64_t free5:1;
+ uint64_t free4:1;
+ uint64_t free3:1;
+ uint64_t free2:1;
+ uint64_t free1:1;
+ uint64_t free0:1;
+ uint64_t pool7th:1;
+ uint64_t pool6th:1;
+ uint64_t pool5th:1;
+ uint64_t pool4th:1;
+ uint64_t pool3th:1;
+ uint64_t pool2th:1;
+ uint64_t pool1th:1;
+ uint64_t pool0th:1;
+ uint64_t q7_perr:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_und:1;
+ uint64_t q6_perr:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_und:1;
+ uint64_t q5_perr:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_und:1;
+ uint64_t q4_perr:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_und:1;
+ uint64_t q3_perr:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_und:1;
+ uint64_t q2_perr:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_und:1;
+ uint64_t q1_perr:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_und:1;
+ uint64_t q0_perr:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_und:1;
+ uint64_t fed1_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed0_sbe:1;
+#else
+ uint64_t fed0_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed1_dbe:1;
+ uint64_t q0_und:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_perr:1;
+ uint64_t q1_und:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_perr:1;
+ uint64_t q2_und:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_perr:1;
+ uint64_t q3_und:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_perr:1;
+ uint64_t q4_und:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_perr:1;
+ uint64_t q5_und:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_perr:1;
+ uint64_t q6_und:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_perr:1;
+ uint64_t q7_und:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_perr:1;
+ uint64_t pool0th:1;
+ uint64_t pool1th:1;
+ uint64_t pool2th:1;
+ uint64_t pool3th:1;
+ uint64_t pool4th:1;
+ uint64_t pool5th:1;
+ uint64_t pool6th:1;
+ uint64_t pool7th:1;
+ uint64_t free0:1;
+ uint64_t free1:1;
+ uint64_t free2:1;
+ uint64_t free3:1;
+ uint64_t free4:1;
+ uint64_t free5:1;
+ uint64_t free6:1;
+ uint64_t free7:1;
+ uint64_t reserved_44_48:5;
+ uint64_t paddr_e:1;
+ uint64_t reserved_50_63:14;
+#endif
+ } s;
+ struct cvmx_fpa_int_enb_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t q7_perr:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_und:1;
+ uint64_t q6_perr:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_und:1;
+ uint64_t q5_perr:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_und:1;
+ uint64_t q4_perr:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_und:1;
+ uint64_t q3_perr:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_und:1;
+ uint64_t q2_perr:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_und:1;
+ uint64_t q1_perr:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_und:1;
+ uint64_t q0_perr:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_und:1;
+ uint64_t fed1_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed0_sbe:1;
+#else
+ uint64_t fed0_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed1_dbe:1;
+ uint64_t q0_und:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_perr:1;
+ uint64_t q1_und:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_perr:1;
+ uint64_t q2_und:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_perr:1;
+ uint64_t q3_und:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_perr:1;
+ uint64_t q4_und:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_perr:1;
+ uint64_t q5_und:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_perr:1;
+ uint64_t q6_und:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_perr:1;
+ uint64_t q7_und:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_perr:1;
+ uint64_t reserved_28_63:36;
+#endif
+ } cn30xx;
+ struct cvmx_fpa_int_enb_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_50_63:14;
+ uint64_t paddr_e:1;
+ uint64_t res_44:5;
+ uint64_t free7:1;
+ uint64_t free6:1;
+ uint64_t free5:1;
+ uint64_t free4:1;
+ uint64_t free3:1;
+ uint64_t free2:1;
+ uint64_t free1:1;
+ uint64_t free0:1;
+ uint64_t pool7th:1;
+ uint64_t pool6th:1;
+ uint64_t pool5th:1;
+ uint64_t pool4th:1;
+ uint64_t pool3th:1;
+ uint64_t pool2th:1;
+ uint64_t pool1th:1;
+ uint64_t pool0th:1;
+ uint64_t q7_perr:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_und:1;
+ uint64_t q6_perr:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_und:1;
+ uint64_t q5_perr:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_und:1;
+ uint64_t q4_perr:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_und:1;
+ uint64_t q3_perr:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_und:1;
+ uint64_t q2_perr:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_und:1;
+ uint64_t q1_perr:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_und:1;
+ uint64_t q0_perr:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_und:1;
+ uint64_t fed1_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed0_sbe:1;
+#else
+ uint64_t fed0_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed1_dbe:1;
+ uint64_t q0_und:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_perr:1;
+ uint64_t q1_und:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_perr:1;
+ uint64_t q2_und:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_perr:1;
+ uint64_t q3_und:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_perr:1;
+ uint64_t q4_und:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_perr:1;
+ uint64_t q5_und:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_perr:1;
+ uint64_t q6_und:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_perr:1;
+ uint64_t q7_und:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_perr:1;
+ uint64_t pool0th:1;
+ uint64_t pool1th:1;
+ uint64_t pool2th:1;
+ uint64_t pool3th:1;
+ uint64_t pool4th:1;
+ uint64_t pool5th:1;
+ uint64_t pool6th:1;
+ uint64_t pool7th:1;
+ uint64_t free0:1;
+ uint64_t free1:1;
+ uint64_t free2:1;
+ uint64_t free3:1;
+ uint64_t free4:1;
+ uint64_t free5:1;
+ uint64_t free6:1;
+ uint64_t free7:1;
+ uint64_t res_44:5;
+ uint64_t paddr_e:1;
+ uint64_t reserved_50_63:14;
+#endif
+ } cn61xx;
+ struct cvmx_fpa_int_enb_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t free7:1;
+ uint64_t free6:1;
+ uint64_t free5:1;
+ uint64_t free4:1;
+ uint64_t free3:1;
+ uint64_t free2:1;
+ uint64_t free1:1;
+ uint64_t free0:1;
+ uint64_t pool7th:1;
+ uint64_t pool6th:1;
+ uint64_t pool5th:1;
+ uint64_t pool4th:1;
+ uint64_t pool3th:1;
+ uint64_t pool2th:1;
+ uint64_t pool1th:1;
+ uint64_t pool0th:1;
+ uint64_t q7_perr:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_und:1;
+ uint64_t q6_perr:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_und:1;
+ uint64_t q5_perr:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_und:1;
+ uint64_t q4_perr:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_und:1;
+ uint64_t q3_perr:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_und:1;
+ uint64_t q2_perr:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_und:1;
+ uint64_t q1_perr:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_und:1;
+ uint64_t q0_perr:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_und:1;
+ uint64_t fed1_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed0_sbe:1;
+#else
+ uint64_t fed0_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed1_dbe:1;
+ uint64_t q0_und:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_perr:1;
+ uint64_t q1_und:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_perr:1;
+ uint64_t q2_und:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_perr:1;
+ uint64_t q3_und:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_perr:1;
+ uint64_t q4_und:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_perr:1;
+ uint64_t q5_und:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_perr:1;
+ uint64_t q6_und:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_perr:1;
+ uint64_t q7_und:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_perr:1;
+ uint64_t pool0th:1;
+ uint64_t pool1th:1;
+ uint64_t pool2th:1;
+ uint64_t pool3th:1;
+ uint64_t pool4th:1;
+ uint64_t pool5th:1;
+ uint64_t pool6th:1;
+ uint64_t pool7th:1;
+ uint64_t free0:1;
+ uint64_t free1:1;
+ uint64_t free2:1;
+ uint64_t free3:1;
+ uint64_t free4:1;
+ uint64_t free5:1;
+ uint64_t free6:1;
+ uint64_t free7:1;
+ uint64_t reserved_44_63:20;
+#endif
+ } cn63xx;
+ struct cvmx_fpa_int_enb_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_50_63:14;
+ uint64_t paddr_e:1;
+ uint64_t pool8th:1;
+ uint64_t q8_perr:1;
+ uint64_t q8_coff:1;
+ uint64_t q8_und:1;
+ uint64_t free8:1;
+ uint64_t free7:1;
+ uint64_t free6:1;
+ uint64_t free5:1;
+ uint64_t free4:1;
+ uint64_t free3:1;
+ uint64_t free2:1;
+ uint64_t free1:1;
+ uint64_t free0:1;
+ uint64_t pool7th:1;
+ uint64_t pool6th:1;
+ uint64_t pool5th:1;
+ uint64_t pool4th:1;
+ uint64_t pool3th:1;
+ uint64_t pool2th:1;
+ uint64_t pool1th:1;
+ uint64_t pool0th:1;
+ uint64_t q7_perr:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_und:1;
+ uint64_t q6_perr:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_und:1;
+ uint64_t q5_perr:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_und:1;
+ uint64_t q4_perr:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_und:1;
+ uint64_t q3_perr:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_und:1;
+ uint64_t q2_perr:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_und:1;
+ uint64_t q1_perr:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_und:1;
+ uint64_t q0_perr:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_und:1;
+ uint64_t fed1_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed0_sbe:1;
+#else
+ uint64_t fed0_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed1_dbe:1;
+ uint64_t q0_und:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_perr:1;
+ uint64_t q1_und:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_perr:1;
+ uint64_t q2_und:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_perr:1;
+ uint64_t q3_und:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_perr:1;
+ uint64_t q4_und:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_perr:1;
+ uint64_t q5_und:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_perr:1;
+ uint64_t q6_und:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_perr:1;
+ uint64_t q7_und:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_perr:1;
+ uint64_t pool0th:1;
+ uint64_t pool1th:1;
+ uint64_t pool2th:1;
+ uint64_t pool3th:1;
+ uint64_t pool4th:1;
+ uint64_t pool5th:1;
+ uint64_t pool6th:1;
+ uint64_t pool7th:1;
+ uint64_t free0:1;
+ uint64_t free1:1;
+ uint64_t free2:1;
+ uint64_t free3:1;
+ uint64_t free4:1;
+ uint64_t free5:1;
+ uint64_t free6:1;
+ uint64_t free7:1;
+ uint64_t free8:1;
+ uint64_t q8_und:1;
+ uint64_t q8_coff:1;
+ uint64_t q8_perr:1;
+ uint64_t pool8th:1;
+ uint64_t paddr_e:1;
+ uint64_t reserved_50_63:14;
+#endif
+ } cn68xx;
+};
+
+union cvmx_fpa_int_sum {
+ uint64_t u64;
+ struct cvmx_fpa_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_50_63:14;
+ uint64_t paddr_e:1;
+ uint64_t pool8th:1;
+ uint64_t q8_perr:1;
+ uint64_t q8_coff:1;
+ uint64_t q8_und:1;
+ uint64_t free8:1;
+ uint64_t free7:1;
+ uint64_t free6:1;
+ uint64_t free5:1;
+ uint64_t free4:1;
+ uint64_t free3:1;
+ uint64_t free2:1;
+ uint64_t free1:1;
+ uint64_t free0:1;
+ uint64_t pool7th:1;
+ uint64_t pool6th:1;
+ uint64_t pool5th:1;
+ uint64_t pool4th:1;
+ uint64_t pool3th:1;
+ uint64_t pool2th:1;
+ uint64_t pool1th:1;
+ uint64_t pool0th:1;
+ uint64_t q7_perr:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_und:1;
+ uint64_t q6_perr:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_und:1;
+ uint64_t q5_perr:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_und:1;
+ uint64_t q4_perr:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_und:1;
+ uint64_t q3_perr:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_und:1;
+ uint64_t q2_perr:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_und:1;
+ uint64_t q1_perr:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_und:1;
+ uint64_t q0_perr:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_und:1;
+ uint64_t fed1_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed0_sbe:1;
+#else
+ uint64_t fed0_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed1_dbe:1;
+ uint64_t q0_und:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_perr:1;
+ uint64_t q1_und:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_perr:1;
+ uint64_t q2_und:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_perr:1;
+ uint64_t q3_und:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_perr:1;
+ uint64_t q4_und:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_perr:1;
+ uint64_t q5_und:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_perr:1;
+ uint64_t q6_und:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_perr:1;
+ uint64_t q7_und:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_perr:1;
+ uint64_t pool0th:1;
+ uint64_t pool1th:1;
+ uint64_t pool2th:1;
+ uint64_t pool3th:1;
+ uint64_t pool4th:1;
+ uint64_t pool5th:1;
+ uint64_t pool6th:1;
+ uint64_t pool7th:1;
+ uint64_t free0:1;
+ uint64_t free1:1;
+ uint64_t free2:1;
+ uint64_t free3:1;
+ uint64_t free4:1;
+ uint64_t free5:1;
+ uint64_t free6:1;
+ uint64_t free7:1;
+ uint64_t free8:1;
+ uint64_t q8_und:1;
+ uint64_t q8_coff:1;
+ uint64_t q8_perr:1;
+ uint64_t pool8th:1;
+ uint64_t paddr_e:1;
+ uint64_t reserved_50_63:14;
+#endif
+ } s;
+ struct cvmx_fpa_int_sum_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t q7_perr:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_und:1;
+ uint64_t q6_perr:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_und:1;
+ uint64_t q5_perr:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_und:1;
+ uint64_t q4_perr:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_und:1;
+ uint64_t q3_perr:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_und:1;
+ uint64_t q2_perr:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_und:1;
+ uint64_t q1_perr:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_und:1;
+ uint64_t q0_perr:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_und:1;
+ uint64_t fed1_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed0_sbe:1;
+#else
+ uint64_t fed0_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed1_dbe:1;
+ uint64_t q0_und:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_perr:1;
+ uint64_t q1_und:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_perr:1;
+ uint64_t q2_und:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_perr:1;
+ uint64_t q3_und:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_perr:1;
+ uint64_t q4_und:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_perr:1;
+ uint64_t q5_und:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_perr:1;
+ uint64_t q6_und:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_perr:1;
+ uint64_t q7_und:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_perr:1;
+ uint64_t reserved_28_63:36;
+#endif
+ } cn30xx;
+ struct cvmx_fpa_int_sum_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_50_63:14;
+ uint64_t paddr_e:1;
+ uint64_t reserved_44_48:5;
+ uint64_t free7:1;
+ uint64_t free6:1;
+ uint64_t free5:1;
+ uint64_t free4:1;
+ uint64_t free3:1;
+ uint64_t free2:1;
+ uint64_t free1:1;
+ uint64_t free0:1;
+ uint64_t pool7th:1;
+ uint64_t pool6th:1;
+ uint64_t pool5th:1;
+ uint64_t pool4th:1;
+ uint64_t pool3th:1;
+ uint64_t pool2th:1;
+ uint64_t pool1th:1;
+ uint64_t pool0th:1;
+ uint64_t q7_perr:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_und:1;
+ uint64_t q6_perr:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_und:1;
+ uint64_t q5_perr:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_und:1;
+ uint64_t q4_perr:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_und:1;
+ uint64_t q3_perr:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_und:1;
+ uint64_t q2_perr:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_und:1;
+ uint64_t q1_perr:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_und:1;
+ uint64_t q0_perr:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_und:1;
+ uint64_t fed1_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed0_sbe:1;
+#else
+ uint64_t fed0_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed1_dbe:1;
+ uint64_t q0_und:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_perr:1;
+ uint64_t q1_und:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_perr:1;
+ uint64_t q2_und:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_perr:1;
+ uint64_t q3_und:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_perr:1;
+ uint64_t q4_und:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_perr:1;
+ uint64_t q5_und:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_perr:1;
+ uint64_t q6_und:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_perr:1;
+ uint64_t q7_und:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_perr:1;
+ uint64_t pool0th:1;
+ uint64_t pool1th:1;
+ uint64_t pool2th:1;
+ uint64_t pool3th:1;
+ uint64_t pool4th:1;
+ uint64_t pool5th:1;
+ uint64_t pool6th:1;
+ uint64_t pool7th:1;
+ uint64_t free0:1;
+ uint64_t free1:1;
+ uint64_t free2:1;
+ uint64_t free3:1;
+ uint64_t free4:1;
+ uint64_t free5:1;
+ uint64_t free6:1;
+ uint64_t free7:1;
+ uint64_t reserved_44_48:5;
+ uint64_t paddr_e:1;
+ uint64_t reserved_50_63:14;
+#endif
+ } cn61xx;
+ struct cvmx_fpa_int_sum_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t free7:1;
+ uint64_t free6:1;
+ uint64_t free5:1;
+ uint64_t free4:1;
+ uint64_t free3:1;
+ uint64_t free2:1;
+ uint64_t free1:1;
+ uint64_t free0:1;
+ uint64_t pool7th:1;
+ uint64_t pool6th:1;
+ uint64_t pool5th:1;
+ uint64_t pool4th:1;
+ uint64_t pool3th:1;
+ uint64_t pool2th:1;
+ uint64_t pool1th:1;
+ uint64_t pool0th:1;
+ uint64_t q7_perr:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_und:1;
+ uint64_t q6_perr:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_und:1;
+ uint64_t q5_perr:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_und:1;
+ uint64_t q4_perr:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_und:1;
+ uint64_t q3_perr:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_und:1;
+ uint64_t q2_perr:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_und:1;
+ uint64_t q1_perr:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_und:1;
+ uint64_t q0_perr:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_und:1;
+ uint64_t fed1_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed0_sbe:1;
+#else
+ uint64_t fed0_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed1_dbe:1;
+ uint64_t q0_und:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_perr:1;
+ uint64_t q1_und:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_perr:1;
+ uint64_t q2_und:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_perr:1;
+ uint64_t q3_und:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_perr:1;
+ uint64_t q4_und:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_perr:1;
+ uint64_t q5_und:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_perr:1;
+ uint64_t q6_und:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_perr:1;
+ uint64_t q7_und:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_perr:1;
+ uint64_t pool0th:1;
+ uint64_t pool1th:1;
+ uint64_t pool2th:1;
+ uint64_t pool3th:1;
+ uint64_t pool4th:1;
+ uint64_t pool5th:1;
+ uint64_t pool6th:1;
+ uint64_t pool7th:1;
+ uint64_t free0:1;
+ uint64_t free1:1;
+ uint64_t free2:1;
+ uint64_t free3:1;
+ uint64_t free4:1;
+ uint64_t free5:1;
+ uint64_t free6:1;
+ uint64_t free7:1;
+ uint64_t reserved_44_63:20;
+#endif
+ } cn63xx;
+};
+
+union cvmx_fpa_packet_threshold {
+ uint64_t u64;
+ struct cvmx_fpa_packet_threshold_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t thresh:32;
+#else
+ uint64_t thresh:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_fpa_poolx_end_addr {
+ uint64_t u64;
+ struct cvmx_fpa_poolx_end_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63:31;
+ uint64_t addr:33;
+#else
+ uint64_t addr:33;
+ uint64_t reserved_33_63:31;
+#endif
+ } s;
+};
+
+union cvmx_fpa_poolx_start_addr {
+ uint64_t u64;
+ struct cvmx_fpa_poolx_start_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63:31;
+ uint64_t addr:33;
+#else
+ uint64_t addr:33;
+ uint64_t reserved_33_63:31;
+#endif
+ } s;
+};
+
+union cvmx_fpa_poolx_threshold {
+ uint64_t u64;
+ struct cvmx_fpa_poolx_threshold_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t thresh:32;
+#else
+ uint64_t thresh:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_fpa_poolx_threshold_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t thresh:29;
+#else
+ uint64_t thresh:29;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn61xx;
+};
+
+union cvmx_fpa_quex_available {
+ uint64_t u64;
+ struct cvmx_fpa_quex_available_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t que_siz:32;
+#else
+ uint64_t que_siz:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_fpa_quex_available_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t que_siz:29;
+#else
+ uint64_t que_siz:29;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn30xx;
+};
+
+union cvmx_fpa_quex_page_index {
+ uint64_t u64;
+ struct cvmx_fpa_quex_page_index_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t pg_num:25;
+#else
+ uint64_t pg_num:25;
+ uint64_t reserved_25_63:39;
+#endif
+ } s;
+};
+
+union cvmx_fpa_que8_page_index {
+ uint64_t u64;
+ struct cvmx_fpa_que8_page_index_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t pg_num:25;
+#else
+ uint64_t pg_num:25;
+ uint64_t reserved_25_63:39;
+#endif
+ } s;
+};
+
+union cvmx_fpa_que_act {
+ uint64_t u64;
+ struct cvmx_fpa_que_act_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t act_que:3;
+ uint64_t act_indx:26;
+#else
+ uint64_t act_indx:26;
+ uint64_t act_que:3;
+ uint64_t reserved_29_63:35;
+#endif
+ } s;
+};
+
+union cvmx_fpa_que_exp {
+ uint64_t u64;
+ struct cvmx_fpa_que_exp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t exp_que:3;
+ uint64_t exp_indx:26;
+#else
+ uint64_t exp_indx:26;
+ uint64_t exp_que:3;
+ uint64_t reserved_29_63:35;
+#endif
+ } s;
+};
+
+union cvmx_fpa_wart_ctl {
+ uint64_t u64;
+ struct cvmx_fpa_wart_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t ctl:16;
+#else
+ uint64_t ctl:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_fpa_wart_status {
+ uint64_t u64;
+ struct cvmx_fpa_wart_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t status:32;
+#else
+ uint64_t status:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_fpa_wqe_threshold {
+ uint64_t u64;
+ struct cvmx_fpa_wqe_threshold_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t thresh:32;
+#else
+ uint64_t thresh:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-fpa.h b/arch/mips/include/asm/octeon/cvmx-fpa.h
new file mode 100644
index 000000000..29ae63606
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-fpa.h
@@ -0,0 +1,308 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the hardware Free Pool Allocator.
+ *
+ *
+ */
+
+#ifndef __CVMX_FPA_H__
+#define __CVMX_FPA_H__
+
+#include <linux/delay.h>
+
+#include <asm/octeon/cvmx-address.h>
+#include <asm/octeon/cvmx-fpa-defs.h>
+
+#define CVMX_FPA_NUM_POOLS 8
+#define CVMX_FPA_MIN_BLOCK_SIZE 128
+#define CVMX_FPA_ALIGNMENT 128
+
+/**
+ * Structure describing the data format used for stores to the FPA.
+ */
+typedef union {
+ uint64_t u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /*
+ * the (64-bit word) location in scratchpad to write
+ * to (if len != 0)
+ */
+ uint64_t scraddr:8;
+ /* the number of words in the response (0 => no response) */
+ uint64_t len:8;
+ /* the ID of the device on the non-coherent bus */
+ uint64_t did:8;
+ /*
+ * the address that will appear in the first tick on
+ * the NCB bus.
+ */
+ uint64_t addr:40;
+#else
+ uint64_t addr:40;
+ uint64_t did:8;
+ uint64_t len:8;
+ uint64_t scraddr:8;
+#endif
+ } s;
+} cvmx_fpa_iobdma_data_t;
+
+/**
+ * Structure describing the current state of a FPA pool.
+ */
+typedef struct {
+ /* Name it was created under */
+ const char *name;
+ /* Size of each block */
+ uint64_t size;
+ /* The base memory address of whole block */
+ void *base;
+ /* The number of elements in the pool at creation */
+ uint64_t starting_element_count;
+} cvmx_fpa_pool_info_t;
+
+/**
+ * Current state of all the pools. Use access functions
+ * instead of using it directly.
+ */
+extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
+
+/* CSR typedefs have been moved to cvmx-csr-*.h */
+
+/**
+ * Return the name of the pool
+ *
+ * @pool: Pool to get the name of
+ * Returns The name
+ */
+static inline const char *cvmx_fpa_get_name(uint64_t pool)
+{
+ return cvmx_fpa_pool_info[pool].name;
+}
+
+/**
+ * Return the base of the pool
+ *
+ * @pool: Pool to get the base of
+ * Returns The base
+ */
+static inline void *cvmx_fpa_get_base(uint64_t pool)
+{
+ return cvmx_fpa_pool_info[pool].base;
+}
+
+/**
+ * Check if a pointer belongs to an FPA pool. Return non-zero
+ * if the supplied pointer is inside the memory controlled by
+ * an FPA pool.
+ *
+ * @pool: Pool to check
+ * @ptr: Pointer to check
+ * Returns Non-zero if pointer is in the pool. Zero if not
+ */
+static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
+{
+ return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
+ ((char *)ptr <
+ ((char *)(cvmx_fpa_pool_info[pool].base)) +
+ cvmx_fpa_pool_info[pool].size *
+ cvmx_fpa_pool_info[pool].starting_element_count));
+}
+
+/**
+ * Enable the FPA for use. Must be performed after any CSR
+ * configuration but before any other FPA functions.
+ */
+static inline void cvmx_fpa_enable(void)
+{
+ union cvmx_fpa_ctl_status status;
+
+ status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
+ if (status.s.enb) {
+ cvmx_dprintf
+ ("Warning: Enabling FPA when FPA already enabled.\n");
+ }
+
+ /*
+ * Do runtime check as we allow pass1 compiled code to run on
+ * pass2 chips.
+ */
+ if (cvmx_octeon_is_pass1()) {
+ union cvmx_fpa_fpfx_marks marks;
+ int i;
+ for (i = 1; i < 8; i++) {
+ marks.u64 =
+ cvmx_read_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull);
+ marks.s.fpf_wr = 0xe0;
+ cvmx_write_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull,
+ marks.u64);
+ }
+
+ /* Enforce a 10 cycle delay between config and enable */
+ __delay(10);
+ }
+
+ /* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
+ status.u64 = 0;
+ status.s.enb = 1;
+ cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
+}
+
+/**
+ * Get a new block from the FPA
+ *
+ * @pool: Pool to get the block from
+ * Returns Pointer to the block or NULL on failure
+ */
+static inline void *cvmx_fpa_alloc(uint64_t pool)
+{
+ uint64_t address =
+ cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));
+ if (address)
+ return cvmx_phys_to_ptr(address);
+ else
+ return NULL;
+}
+
+/**
+ * Asynchronously get a new block from the FPA
+ *
+ * @scr_addr: Local scratch address to put response in. This is a byte address,
+ * but must be 8 byte aligned.
+ * @pool: Pool to get the block from
+ */
+static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
+{
+ cvmx_fpa_iobdma_data_t data;
+
+ /*
+ * Hardware only uses 64 bit aligned locations, so convert
+ * from byte address to 64-bit index
+ */
+ data.s.scraddr = scr_addr >> 3;
+ data.s.len = 1;
+ data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);
+ data.s.addr = 0;
+ cvmx_send_single(data.u64);
+}
+
+/**
+ * Free a block allocated with a FPA pool. Does NOT provide memory
+ * ordering in cases where the memory block was modified by the core.
+ *
+ * @ptr: Block to free
+ * @pool: Pool to put it in
+ * @num_cache_lines:
+ * Cache lines to invalidate
+ */
+static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
+ uint64_t num_cache_lines)
+{
+ cvmx_addr_t newptr;
+ newptr.u64 = cvmx_ptr_to_phys(ptr);
+ newptr.sfilldidspace.didspace =
+ CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
+ /* Prevent GCC from reordering around free */
+ barrier();
+ /* value written is number of cache lines not written back */
+ cvmx_write_io(newptr.u64, num_cache_lines);
+}
+
+/**
+ * Free a block allocated with a FPA pool. Provides required memory
+ * ordering in cases where memory block was modified by core.
+ *
+ * @ptr: Block to free
+ * @pool: Pool to put it in
+ * @num_cache_lines:
+ * Cache lines to invalidate
+ */
+static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
+ uint64_t num_cache_lines)
+{
+ cvmx_addr_t newptr;
+ newptr.u64 = cvmx_ptr_to_phys(ptr);
+ newptr.sfilldidspace.didspace =
+ CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
+ /*
+ * Make sure that any previous writes to memory go out before
+ * we free this buffer. This also serves as a barrier to
+ * prevent GCC from reordering operations to after the
+ * free.
+ */
+ CVMX_SYNCWS;
+ /* value written is number of cache lines not written back */
+ cvmx_write_io(newptr.u64, num_cache_lines);
+}
+
+/**
+ * Setup a FPA pool to control a new block of memory.
+ * This can only be called once per pool. Make sure proper
+ * locking enforces this.
+ *
+ * @pool: Pool to initialize
+ * 0 <= pool < 8
+ * @name: Constant character string to name this pool.
+ * String is not copied.
+ * @buffer: Pointer to the block of memory to use. This must be
+ * accessible by all processors and external hardware.
+ * @block_size: Size for each block controlled by the FPA
+ * @num_blocks: Number of blocks
+ *
+ * Returns 0 on Success,
+ * -1 on failure
+ */
+extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
+ uint64_t block_size, uint64_t num_blocks);
+
+/**
+ * Shutdown a Memory pool and validate that it had all of
+ * the buffers originally placed in it. This should only be
+ * called by one processor after all hardware has finished
+ * using the pool.
+ *
+ * @pool: Pool to shutdown
+ * Returns Zero on success
+ * - Positive is count of missing buffers
+ * - Negative is too many buffers or corrupted pointers
+ */
+extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
+
+/**
+ * Get the size of blocks controlled by the pool
+ * This is resolved to a constant at compile time.
+ *
+ * @pool: Pool to access
+ * Returns Size of the block in bytes
+ */
+uint64_t cvmx_fpa_get_block_size(uint64_t pool);
+
+#endif /* __CVM_FPA_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h b/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h
new file mode 100644
index 000000000..bdba676f1
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h
@@ -0,0 +1,2259 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (C) 2003-2018 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_GMXX_DEFS_H__
+#define __CVMX_GMXX_DEFS_H__
+
+static inline uint64_t CVMX_GMXX_HG2_CONTROL(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000550ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000550ull) + (block_id) * 0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_INF_MODE(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + (block_id) * 0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_PRTX_CFG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000010ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000010ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000010ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM0(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000180ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000180ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000180ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM1(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000188ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000188ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000188ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM2(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000190ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000190ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000190ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM3(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000198ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000198ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000198ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM4(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM5(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_ADR_CAM_EN(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000108ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000108ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000108ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_ADR_CTL(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000100ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000100ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000100ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_FRM_CTL(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000018ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000018ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000018ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+#define CVMX_GMXX_RXX_FRM_MAX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000030ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
+#define CVMX_GMXX_RXX_FRM_MIN(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000028ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
+
+static inline uint64_t CVMX_GMXX_RXX_INT_EN(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000008ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000008ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000008ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_INT_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000000ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000000ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000000ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+static inline uint64_t CVMX_GMXX_RXX_JABBER(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000038ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000038ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000038ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+#define CVMX_GMXX_RXX_RX_INBND(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000060ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
+
+static inline uint64_t CVMX_GMXX_RX_PRTS(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000410ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000410ull) + (block_id) * 0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_RX_XAUI_CTL(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000530ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000530ull) + (block_id) * 0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_SMACX(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000230ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000230ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000230ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_BURST(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000228ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000228ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000228ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+#define CVMX_GMXX_TXX_CLK(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000208ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
+static inline uint64_t CVMX_GMXX_TXX_CTL(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000270ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000270ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000270ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000248ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000248ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000248ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_TIME(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000238ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000238ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000238ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_SLOT(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000220ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000220ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000220ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+static inline uint64_t CVMX_GMXX_TXX_THRESH(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000210ull) + ((offset) + (block_id) * 0x0ull) * 2048;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000210ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000210ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
+}
+
+static inline uint64_t CVMX_GMXX_TX_INT_EN(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000508ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000508ull) + (block_id) * 0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_INT_REG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000500ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000500ull) + (block_id) * 0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_OVR_BP(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + (block_id) * 0x8000000ull;
+}
+
+static inline uint64_t CVMX_GMXX_TX_PRTS(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000480ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000480ull) + (block_id) * 0x8000000ull;
+}
+
+#define CVMX_GMXX_TX_SPI_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800080004C0ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_GMXX_TX_SPI_MAX(block_id) (CVMX_ADD_IO_SEG(0x00011800080004B0ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_GMXX_TX_SPI_THRESH(block_id) (CVMX_ADD_IO_SEG(0x00011800080004B8ull) + ((block_id) & 1) * 0x8000000ull)
+static inline uint64_t CVMX_GMXX_TX_XAUI_CTL(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180008000528ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180008000528ull) + (block_id) * 0x8000000ull;
+}
+
+void __cvmx_interrupt_gmxx_enable(int interface);
+
+union cvmx_gmxx_hg2_control {
+ uint64_t u64;
+ struct cvmx_gmxx_hg2_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63:45;
+ uint64_t hg2tx_en:1;
+ uint64_t hg2rx_en:1;
+ uint64_t phys_en:1;
+ uint64_t logl_en:16;
+#else
+ uint64_t logl_en:16;
+ uint64_t phys_en:1;
+ uint64_t hg2rx_en:1;
+ uint64_t hg2tx_en:1;
+ uint64_t reserved_19_63:45;
+#endif
+ } s;
+};
+
+union cvmx_gmxx_inf_mode {
+ uint64_t u64;
+ struct cvmx_gmxx_inf_mode_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t rate:4;
+ uint64_t reserved_12_15:4;
+ uint64_t speed:4;
+ uint64_t reserved_7_7:1;
+ uint64_t mode:3;
+ uint64_t reserved_3_3:1;
+ uint64_t p0mii:1;
+ uint64_t en:1;
+ uint64_t type:1;
+#else
+ uint64_t type:1;
+ uint64_t en:1;
+ uint64_t p0mii:1;
+ uint64_t reserved_3_3:1;
+ uint64_t mode:3;
+ uint64_t reserved_7_7:1;
+ uint64_t speed:4;
+ uint64_t reserved_12_15:4;
+ uint64_t rate:4;
+ uint64_t reserved_20_63:44;
+#endif
+ } s;
+ struct cvmx_gmxx_inf_mode_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t p0mii:1;
+ uint64_t en:1;
+ uint64_t type:1;
+#else
+ uint64_t type:1;
+ uint64_t en:1;
+ uint64_t p0mii:1;
+ uint64_t reserved_3_63:61;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_inf_mode_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t en:1;
+ uint64_t type:1;
+#else
+ uint64_t type:1;
+ uint64_t en:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } cn31xx;
+ struct cvmx_gmxx_inf_mode_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t speed:2;
+ uint64_t reserved_6_7:2;
+ uint64_t mode:2;
+ uint64_t reserved_2_3:2;
+ uint64_t en:1;
+ uint64_t type:1;
+#else
+ uint64_t type:1;
+ uint64_t en:1;
+ uint64_t reserved_2_3:2;
+ uint64_t mode:2;
+ uint64_t reserved_6_7:2;
+ uint64_t speed:2;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_inf_mode_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t speed:4;
+ uint64_t reserved_5_7:3;
+ uint64_t mode:1;
+ uint64_t reserved_2_3:2;
+ uint64_t en:1;
+ uint64_t type:1;
+#else
+ uint64_t type:1;
+ uint64_t en:1;
+ uint64_t reserved_2_3:2;
+ uint64_t mode:1;
+ uint64_t reserved_5_7:3;
+ uint64_t speed:4;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn61xx;
+ struct cvmx_gmxx_inf_mode_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t rate:4;
+ uint64_t reserved_12_15:4;
+ uint64_t speed:4;
+ uint64_t reserved_5_7:3;
+ uint64_t mode:1;
+ uint64_t reserved_2_3:2;
+ uint64_t en:1;
+ uint64_t type:1;
+#else
+ uint64_t type:1;
+ uint64_t en:1;
+ uint64_t reserved_2_3:2;
+ uint64_t mode:1;
+ uint64_t reserved_5_7:3;
+ uint64_t speed:4;
+ uint64_t reserved_12_15:4;
+ uint64_t rate:4;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn66xx;
+ struct cvmx_gmxx_inf_mode_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t speed:4;
+ uint64_t reserved_7_7:1;
+ uint64_t mode:3;
+ uint64_t reserved_2_3:2;
+ uint64_t en:1;
+ uint64_t type:1;
+#else
+ uint64_t type:1;
+ uint64_t en:1;
+ uint64_t reserved_2_3:2;
+ uint64_t mode:3;
+ uint64_t reserved_7_7:1;
+ uint64_t speed:4;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn68xx;
+};
+
+union cvmx_gmxx_prtx_cfg {
+ uint64_t u64;
+ struct cvmx_gmxx_prtx_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63:42;
+ uint64_t pknd:6;
+ uint64_t reserved_14_15:2;
+ uint64_t tx_idle:1;
+ uint64_t rx_idle:1;
+ uint64_t reserved_9_11:3;
+ uint64_t speed_msb:1;
+ uint64_t reserved_4_7:4;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+#else
+ uint64_t en:1;
+ uint64_t speed:1;
+ uint64_t duplex:1;
+ uint64_t slottime:1;
+ uint64_t reserved_4_7:4;
+ uint64_t speed_msb:1;
+ uint64_t reserved_9_11:3;
+ uint64_t rx_idle:1;
+ uint64_t tx_idle:1;
+ uint64_t reserved_14_15:2;
+ uint64_t pknd:6;
+ uint64_t reserved_22_63:42;
+#endif
+ } s;
+ struct cvmx_gmxx_prtx_cfg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+#else
+ uint64_t en:1;
+ uint64_t speed:1;
+ uint64_t duplex:1;
+ uint64_t slottime:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_prtx_cfg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t tx_idle:1;
+ uint64_t rx_idle:1;
+ uint64_t reserved_9_11:3;
+ uint64_t speed_msb:1;
+ uint64_t reserved_4_7:4;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+#else
+ uint64_t en:1;
+ uint64_t speed:1;
+ uint64_t duplex:1;
+ uint64_t slottime:1;
+ uint64_t reserved_4_7:4;
+ uint64_t speed_msb:1;
+ uint64_t reserved_9_11:3;
+ uint64_t rx_idle:1;
+ uint64_t tx_idle:1;
+ uint64_t reserved_14_63:50;
+#endif
+ } cn52xx;
+};
+
+union cvmx_gmxx_rxx_adr_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t cam_mode:1;
+ uint64_t mcst:2;
+ uint64_t bcst:1;
+#else
+ uint64_t bcst:1;
+ uint64_t mcst:2;
+ uint64_t cam_mode:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_gmxx_rxx_frm_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t ptp_mode:1;
+ uint64_t reserved_11_11:1;
+ uint64_t null_dis:1;
+ uint64_t pre_align:1;
+ uint64_t pad_len:1;
+ uint64_t vlan_len:1;
+ uint64_t pre_free:1;
+ uint64_t ctl_smac:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_drp:1;
+ uint64_t pre_strp:1;
+ uint64_t pre_chk:1;
+#else
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t pre_align:1;
+ uint64_t null_dis:1;
+ uint64_t reserved_11_11:1;
+ uint64_t ptp_mode:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_frm_ctl_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t pad_len:1;
+ uint64_t vlan_len:1;
+ uint64_t pre_free:1;
+ uint64_t ctl_smac:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_drp:1;
+ uint64_t pre_strp:1;
+ uint64_t pre_chk:1;
+#else
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t vlan_len:1;
+ uint64_t pre_free:1;
+ uint64_t ctl_smac:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_drp:1;
+ uint64_t pre_strp:1;
+ uint64_t pre_chk:1;
+#else
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } cn31xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t null_dis:1;
+ uint64_t pre_align:1;
+ uint64_t reserved_7_8:2;
+ uint64_t pre_free:1;
+ uint64_t ctl_smac:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_drp:1;
+ uint64_t pre_strp:1;
+ uint64_t pre_chk:1;
+#else
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t reserved_7_8:2;
+ uint64_t pre_align:1;
+ uint64_t null_dis:1;
+ uint64_t reserved_11_63:53;
+#endif
+ } cn50xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t pre_align:1;
+ uint64_t reserved_7_8:2;
+ uint64_t pre_free:1;
+ uint64_t ctl_smac:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_drp:1;
+ uint64_t pre_strp:1;
+ uint64_t pre_chk:1;
+#else
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t reserved_7_8:2;
+ uint64_t pre_align:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn56xxp1;
+ struct cvmx_gmxx_rxx_frm_ctl_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t null_dis:1;
+ uint64_t pre_align:1;
+ uint64_t pad_len:1;
+ uint64_t vlan_len:1;
+ uint64_t pre_free:1;
+ uint64_t ctl_smac:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_drp:1;
+ uint64_t pre_strp:1;
+ uint64_t pre_chk:1;
+#else
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t pre_align:1;
+ uint64_t null_dis:1;
+ uint64_t reserved_11_63:53;
+#endif
+ } cn58xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t ptp_mode:1;
+ uint64_t reserved_11_11:1;
+ uint64_t null_dis:1;
+ uint64_t pre_align:1;
+ uint64_t reserved_7_8:2;
+ uint64_t pre_free:1;
+ uint64_t ctl_smac:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_drp:1;
+ uint64_t pre_strp:1;
+ uint64_t pre_chk:1;
+#else
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t reserved_7_8:2;
+ uint64_t pre_align:1;
+ uint64_t null_dis:1;
+ uint64_t reserved_11_11:1;
+ uint64_t ptp_mode:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } cn61xx;
+};
+
+union cvmx_gmxx_rxx_frm_max {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_max_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t len:16;
+#else
+ uint64_t len:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_gmxx_rxx_frm_min {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_min_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t len:16;
+#else
+ uint64_t len:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_gmxx_rxx_int_en {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t hg2cc:1;
+ uint64_t hg2fld:1;
+ uint64_t undat:1;
+ uint64_t uneop:1;
+ uint64_t unsop:1;
+ uint64_t bad_term:1;
+ uint64_t bad_seq:1;
+ uint64_t rem_fault:1;
+ uint64_t loc_fault:1;
+ uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+#else
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t hg2fld:1;
+ uint64_t hg2cc:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_int_en_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63:45;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+#else
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t reserved_19_63:45;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_rxx_int_en_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t reserved_6_6:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t reserved_2_2:1;
+ uint64_t carext:1;
+ uint64_t reserved_0_0:1;
+#else
+ uint64_t reserved_0_0:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t reserved_6_6:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn50xx;
+ struct cvmx_gmxx_rxx_int_en_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t hg2cc:1;
+ uint64_t hg2fld:1;
+ uint64_t undat:1;
+ uint64_t uneop:1;
+ uint64_t unsop:1;
+ uint64_t bad_term:1;
+ uint64_t bad_seq:1;
+ uint64_t rem_fault:1;
+ uint64_t loc_fault:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_16_18:3;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t reserved_2_2:1;
+ uint64_t carext:1;
+ uint64_t reserved_0_0:1;
+#else
+ uint64_t reserved_0_0:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t hg2fld:1;
+ uint64_t hg2cc:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_rxx_int_en_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63:37;
+ uint64_t undat:1;
+ uint64_t uneop:1;
+ uint64_t unsop:1;
+ uint64_t bad_term:1;
+ uint64_t bad_seq:1;
+ uint64_t rem_fault:1;
+ uint64_t loc_fault:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_16_18:3;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t reserved_2_2:1;
+ uint64_t carext:1;
+ uint64_t reserved_0_0:1;
+#else
+ uint64_t reserved_0_0:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t reserved_27_63:37;
+#endif
+ } cn56xxp1;
+ struct cvmx_gmxx_rxx_int_en_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+#else
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn58xx;
+ struct cvmx_gmxx_rxx_int_en_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t hg2cc:1;
+ uint64_t hg2fld:1;
+ uint64_t undat:1;
+ uint64_t uneop:1;
+ uint64_t unsop:1;
+ uint64_t bad_term:1;
+ uint64_t bad_seq:1;
+ uint64_t rem_fault:1;
+ uint64_t loc_fault:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_16_18:3;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t reserved_2_2:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+#else
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t hg2fld:1;
+ uint64_t hg2cc:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn61xx;
+};
+
+union cvmx_gmxx_rxx_int_reg {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t hg2cc:1;
+ uint64_t hg2fld:1;
+ uint64_t undat:1;
+ uint64_t uneop:1;
+ uint64_t unsop:1;
+ uint64_t bad_term:1;
+ uint64_t bad_seq:1;
+ uint64_t rem_fault:1;
+ uint64_t loc_fault:1;
+ uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+#else
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t hg2fld:1;
+ uint64_t hg2cc:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } s;
+ struct cvmx_gmxx_rxx_int_reg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63:45;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+#else
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t reserved_19_63:45;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_rxx_int_reg_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t reserved_6_6:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t reserved_2_2:1;
+ uint64_t carext:1;
+ uint64_t reserved_0_0:1;
+#else
+ uint64_t reserved_0_0:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t reserved_6_6:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn50xx;
+ struct cvmx_gmxx_rxx_int_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t hg2cc:1;
+ uint64_t hg2fld:1;
+ uint64_t undat:1;
+ uint64_t uneop:1;
+ uint64_t unsop:1;
+ uint64_t bad_term:1;
+ uint64_t bad_seq:1;
+ uint64_t rem_fault:1;
+ uint64_t loc_fault:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_16_18:3;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t reserved_2_2:1;
+ uint64_t carext:1;
+ uint64_t reserved_0_0:1;
+#else
+ uint64_t reserved_0_0:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t hg2fld:1;
+ uint64_t hg2cc:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_rxx_int_reg_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63:37;
+ uint64_t undat:1;
+ uint64_t uneop:1;
+ uint64_t unsop:1;
+ uint64_t bad_term:1;
+ uint64_t bad_seq:1;
+ uint64_t rem_fault:1;
+ uint64_t loc_fault:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_16_18:3;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t reserved_2_2:1;
+ uint64_t carext:1;
+ uint64_t reserved_0_0:1;
+#else
+ uint64_t reserved_0_0:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t reserved_27_63:37;
+#endif
+ } cn56xxp1;
+ struct cvmx_gmxx_rxx_int_reg_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+#else
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn58xx;
+ struct cvmx_gmxx_rxx_int_reg_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t hg2cc:1;
+ uint64_t hg2fld:1;
+ uint64_t undat:1;
+ uint64_t uneop:1;
+ uint64_t unsop:1;
+ uint64_t bad_term:1;
+ uint64_t bad_seq:1;
+ uint64_t rem_fault:1;
+ uint64_t loc_fault:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_16_18:3;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t reserved_2_2:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+#else
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t hg2fld:1;
+ uint64_t hg2cc:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn61xx;
+};
+
+union cvmx_gmxx_rxx_jabber {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_jabber_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t cnt:16;
+#else
+ uint64_t cnt:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_gmxx_rxx_rx_inbnd {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_rx_inbnd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t duplex:1;
+ uint64_t speed:2;
+ uint64_t status:1;
+#else
+ uint64_t status:1;
+ uint64_t speed:2;
+ uint64_t duplex:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_gmxx_rx_prts {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_prts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t prts:3;
+#else
+ uint64_t prts:3;
+ uint64_t reserved_3_63:61;
+#endif
+ } s;
+};
+
+union cvmx_gmxx_rx_xaui_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_xaui_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t status:2;
+#else
+ uint64_t status:2;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_gmxx_txx_thresh {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_thresh_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t cnt:10;
+#else
+ uint64_t cnt:10;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+ struct cvmx_gmxx_txx_thresh_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t cnt:7;
+#else
+ uint64_t cnt:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_txx_thresh_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t cnt:9;
+#else
+ uint64_t cnt:9;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn38xx;
+};
+
+union cvmx_gmxx_tx_int_en {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t xchange:1;
+ uint64_t ptp_lost:4;
+ uint64_t late_col:4;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:4;
+ uint64_t reserved_6_7:2;
+ uint64_t xscol:4;
+ uint64_t xsdef:4;
+ uint64_t late_col:4;
+ uint64_t ptp_lost:4;
+ uint64_t xchange:1;
+ uint64_t reserved_25_63:39;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_int_en_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63:45;
+ uint64_t late_col:3;
+ uint64_t reserved_15_15:1;
+ uint64_t xsdef:3;
+ uint64_t reserved_11_11:1;
+ uint64_t xscol:3;
+ uint64_t reserved_5_7:3;
+ uint64_t undflw:3;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:3;
+ uint64_t reserved_5_7:3;
+ uint64_t xscol:3;
+ uint64_t reserved_11_11:1;
+ uint64_t xsdef:3;
+ uint64_t reserved_15_15:1;
+ uint64_t late_col:3;
+ uint64_t reserved_19_63:45;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_tx_int_en_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63:49;
+ uint64_t xsdef:3;
+ uint64_t reserved_11_11:1;
+ uint64_t xscol:3;
+ uint64_t reserved_5_7:3;
+ uint64_t undflw:3;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:3;
+ uint64_t reserved_5_7:3;
+ uint64_t xscol:3;
+ uint64_t reserved_11_11:1;
+ uint64_t xsdef:3;
+ uint64_t reserved_15_63:49;
+#endif
+ } cn31xx;
+ struct cvmx_gmxx_tx_int_en_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t late_col:4;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t ncb_nxa:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t ncb_nxa:1;
+ uint64_t undflw:4;
+ uint64_t reserved_6_7:2;
+ uint64_t xscol:4;
+ uint64_t xsdef:4;
+ uint64_t late_col:4;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn38xx;
+ struct cvmx_gmxx_tx_int_en_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t ncb_nxa:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t ncb_nxa:1;
+ uint64_t undflw:4;
+ uint64_t reserved_6_7:2;
+ uint64_t xscol:4;
+ uint64_t xsdef:4;
+ uint64_t reserved_16_63:48;
+#endif
+ } cn38xxp2;
+ struct cvmx_gmxx_tx_int_en_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t late_col:4;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:4;
+ uint64_t reserved_6_7:2;
+ uint64_t xscol:4;
+ uint64_t xsdef:4;
+ uint64_t late_col:4;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_tx_int_en_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t ptp_lost:4;
+ uint64_t late_col:4;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:4;
+ uint64_t reserved_6_7:2;
+ uint64_t xscol:4;
+ uint64_t xsdef:4;
+ uint64_t late_col:4;
+ uint64_t ptp_lost:4;
+ uint64_t reserved_24_63:40;
+#endif
+ } cn63xx;
+ struct cvmx_gmxx_tx_int_en_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t xchange:1;
+ uint64_t ptp_lost:4;
+ uint64_t late_col:4;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t pko_nxp:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t pko_nxp:1;
+ uint64_t undflw:4;
+ uint64_t reserved_6_7:2;
+ uint64_t xscol:4;
+ uint64_t xsdef:4;
+ uint64_t late_col:4;
+ uint64_t ptp_lost:4;
+ uint64_t xchange:1;
+ uint64_t reserved_25_63:39;
+#endif
+ } cn68xx;
+ struct cvmx_gmxx_tx_int_en_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t xchange:1;
+ uint64_t reserved_22_23:2;
+ uint64_t ptp_lost:2;
+ uint64_t reserved_18_19:2;
+ uint64_t late_col:2;
+ uint64_t reserved_14_15:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xscol:2;
+ uint64_t reserved_4_7:4;
+ uint64_t undflw:2;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:2;
+ uint64_t reserved_4_7:4;
+ uint64_t xscol:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_14_15:2;
+ uint64_t late_col:2;
+ uint64_t reserved_18_19:2;
+ uint64_t ptp_lost:2;
+ uint64_t reserved_22_23:2;
+ uint64_t xchange:1;
+ uint64_t reserved_25_63:39;
+#endif
+ } cnf71xx;
+};
+
+union cvmx_gmxx_tx_int_reg {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t xchange:1;
+ uint64_t ptp_lost:4;
+ uint64_t late_col:4;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:4;
+ uint64_t reserved_6_7:2;
+ uint64_t xscol:4;
+ uint64_t xsdef:4;
+ uint64_t late_col:4;
+ uint64_t ptp_lost:4;
+ uint64_t xchange:1;
+ uint64_t reserved_25_63:39;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_int_reg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63:45;
+ uint64_t late_col:3;
+ uint64_t reserved_15_15:1;
+ uint64_t xsdef:3;
+ uint64_t reserved_11_11:1;
+ uint64_t xscol:3;
+ uint64_t reserved_5_7:3;
+ uint64_t undflw:3;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:3;
+ uint64_t reserved_5_7:3;
+ uint64_t xscol:3;
+ uint64_t reserved_11_11:1;
+ uint64_t xsdef:3;
+ uint64_t reserved_15_15:1;
+ uint64_t late_col:3;
+ uint64_t reserved_19_63:45;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_tx_int_reg_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63:49;
+ uint64_t xsdef:3;
+ uint64_t reserved_11_11:1;
+ uint64_t xscol:3;
+ uint64_t reserved_5_7:3;
+ uint64_t undflw:3;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:3;
+ uint64_t reserved_5_7:3;
+ uint64_t xscol:3;
+ uint64_t reserved_11_11:1;
+ uint64_t xsdef:3;
+ uint64_t reserved_15_63:49;
+#endif
+ } cn31xx;
+ struct cvmx_gmxx_tx_int_reg_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t late_col:4;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t ncb_nxa:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t ncb_nxa:1;
+ uint64_t undflw:4;
+ uint64_t reserved_6_7:2;
+ uint64_t xscol:4;
+ uint64_t xsdef:4;
+ uint64_t late_col:4;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn38xx;
+ struct cvmx_gmxx_tx_int_reg_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t ncb_nxa:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t ncb_nxa:1;
+ uint64_t undflw:4;
+ uint64_t reserved_6_7:2;
+ uint64_t xscol:4;
+ uint64_t xsdef:4;
+ uint64_t reserved_16_63:48;
+#endif
+ } cn38xxp2;
+ struct cvmx_gmxx_tx_int_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t late_col:4;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:4;
+ uint64_t reserved_6_7:2;
+ uint64_t xscol:4;
+ uint64_t xsdef:4;
+ uint64_t late_col:4;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn52xx;
+ struct cvmx_gmxx_tx_int_reg_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t ptp_lost:4;
+ uint64_t late_col:4;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:4;
+ uint64_t reserved_6_7:2;
+ uint64_t xscol:4;
+ uint64_t xsdef:4;
+ uint64_t late_col:4;
+ uint64_t ptp_lost:4;
+ uint64_t reserved_24_63:40;
+#endif
+ } cn63xx;
+ struct cvmx_gmxx_tx_int_reg_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t xchange:1;
+ uint64_t ptp_lost:4;
+ uint64_t late_col:4;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t pko_nxp:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t pko_nxp:1;
+ uint64_t undflw:4;
+ uint64_t reserved_6_7:2;
+ uint64_t xscol:4;
+ uint64_t xsdef:4;
+ uint64_t late_col:4;
+ uint64_t ptp_lost:4;
+ uint64_t xchange:1;
+ uint64_t reserved_25_63:39;
+#endif
+ } cn68xx;
+ struct cvmx_gmxx_tx_int_reg_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t xchange:1;
+ uint64_t reserved_22_23:2;
+ uint64_t ptp_lost:2;
+ uint64_t reserved_18_19:2;
+ uint64_t late_col:2;
+ uint64_t reserved_14_15:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xscol:2;
+ uint64_t reserved_4_7:4;
+ uint64_t undflw:2;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+#else
+ uint64_t pko_nxa:1;
+ uint64_t reserved_1_1:1;
+ uint64_t undflw:2;
+ uint64_t reserved_4_7:4;
+ uint64_t xscol:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_14_15:2;
+ uint64_t late_col:2;
+ uint64_t reserved_18_19:2;
+ uint64_t ptp_lost:2;
+ uint64_t reserved_22_23:2;
+ uint64_t xchange:1;
+ uint64_t reserved_25_63:39;
+#endif
+ } cnf71xx;
+};
+
+union cvmx_gmxx_tx_ovr_bp {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_ovr_bp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t tx_prt_bp:16;
+ uint64_t reserved_12_31:20;
+ uint64_t en:4;
+ uint64_t bp:4;
+ uint64_t ign_full:4;
+#else
+ uint64_t ign_full:4;
+ uint64_t bp:4;
+ uint64_t en:4;
+ uint64_t reserved_12_31:20;
+ uint64_t tx_prt_bp:16;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_ovr_bp_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t en:3;
+ uint64_t reserved_7_7:1;
+ uint64_t bp:3;
+ uint64_t reserved_3_3:1;
+ uint64_t ign_full:3;
+#else
+ uint64_t ign_full:3;
+ uint64_t reserved_3_3:1;
+ uint64_t bp:3;
+ uint64_t reserved_7_7:1;
+ uint64_t en:3;
+ uint64_t reserved_11_63:53;
+#endif
+ } cn30xx;
+ struct cvmx_gmxx_tx_ovr_bp_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t en:4;
+ uint64_t bp:4;
+ uint64_t ign_full:4;
+#else
+ uint64_t ign_full:4;
+ uint64_t bp:4;
+ uint64_t en:4;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn38xx;
+ struct cvmx_gmxx_tx_ovr_bp_cnf71xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t tx_prt_bp:16;
+ uint64_t reserved_10_31:22;
+ uint64_t en:2;
+ uint64_t reserved_6_7:2;
+ uint64_t bp:2;
+ uint64_t reserved_2_3:2;
+ uint64_t ign_full:2;
+#else
+ uint64_t ign_full:2;
+ uint64_t reserved_2_3:2;
+ uint64_t bp:2;
+ uint64_t reserved_6_7:2;
+ uint64_t en:2;
+ uint64_t reserved_10_31:22;
+ uint64_t tx_prt_bp:16;
+ uint64_t reserved_48_63:16;
+#endif
+ } cnf71xx;
+};
+
+union cvmx_gmxx_tx_prts {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_prts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t prts:5;
+#else
+ uint64_t prts:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_gmxx_tx_spi_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_spi_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t tpa_clr:1;
+ uint64_t cont_pkt:1;
+#else
+ uint64_t cont_pkt:1;
+ uint64_t tpa_clr:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_gmxx_tx_spi_max {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_spi_max_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63:41;
+ uint64_t slice:7;
+ uint64_t max2:8;
+ uint64_t max1:8;
+#else
+ uint64_t max1:8;
+ uint64_t max2:8;
+ uint64_t slice:7;
+ uint64_t reserved_23_63:41;
+#endif
+ } s;
+ struct cvmx_gmxx_tx_spi_max_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t max2:8;
+ uint64_t max1:8;
+#else
+ uint64_t max1:8;
+ uint64_t max2:8;
+ uint64_t reserved_16_63:48;
+#endif
+ } cn38xx;
+};
+
+union cvmx_gmxx_tx_spi_thresh {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_spi_thresh_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t thresh:6;
+#else
+ uint64_t thresh:6;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_gmxx_tx_xaui_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_xaui_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t hg_pause_hgi:2;
+ uint64_t hg_en:1;
+ uint64_t reserved_7_7:1;
+ uint64_t ls_byp:1;
+ uint64_t ls:2;
+ uint64_t reserved_2_3:2;
+ uint64_t uni_en:1;
+ uint64_t dic_en:1;
+#else
+ uint64_t dic_en:1;
+ uint64_t uni_en:1;
+ uint64_t reserved_2_3:2;
+ uint64_t ls:2;
+ uint64_t ls_byp:1;
+ uint64_t reserved_7_7:1;
+ uint64_t hg_en:1;
+ uint64_t hg_pause_hgi:2;
+ uint64_t reserved_11_63:53;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-gpio-defs.h b/arch/mips/include/asm/octeon/cvmx-gpio-defs.h
new file mode 100644
index 000000000..5420fa667
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-gpio-defs.h
@@ -0,0 +1,399 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_GPIO_DEFS_H__
+#define __CVMX_GPIO_DEFS_H__
+
+#define CVMX_GPIO_BIT_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001070000000800ull) + ((offset) & 15) * 8)
+#define CVMX_GPIO_BOOT_ENA (CVMX_ADD_IO_SEG(0x00010700000008A8ull))
+#define CVMX_GPIO_CLK_GENX(offset) (CVMX_ADD_IO_SEG(0x00010700000008C0ull) + ((offset) & 3) * 8)
+#define CVMX_GPIO_CLK_QLMX(offset) (CVMX_ADD_IO_SEG(0x00010700000008E0ull) + ((offset) & 1) * 8)
+#define CVMX_GPIO_DBG_ENA (CVMX_ADD_IO_SEG(0x00010700000008A0ull))
+#define CVMX_GPIO_INT_CLR (CVMX_ADD_IO_SEG(0x0001070000000898ull))
+#define CVMX_GPIO_MULTI_CAST (CVMX_ADD_IO_SEG(0x00010700000008B0ull))
+#define CVMX_GPIO_PIN_ENA (CVMX_ADD_IO_SEG(0x00010700000008B8ull))
+#define CVMX_GPIO_RX_DAT (CVMX_ADD_IO_SEG(0x0001070000000880ull))
+#define CVMX_GPIO_TIM_CTL (CVMX_ADD_IO_SEG(0x00010700000008A0ull))
+#define CVMX_GPIO_TX_CLR (CVMX_ADD_IO_SEG(0x0001070000000890ull))
+#define CVMX_GPIO_TX_SET (CVMX_ADD_IO_SEG(0x0001070000000888ull))
+#define CVMX_GPIO_XBIT_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001070000000900ull) + ((offset) & 31) * 8 - 8*16)
+
+union cvmx_gpio_bit_cfgx {
+ uint64_t u64;
+ struct cvmx_gpio_bit_cfgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63:42;
+ uint64_t output_sel:5;
+ uint64_t synce_sel:2;
+ uint64_t clk_gen:1;
+ uint64_t clk_sel:2;
+ uint64_t fil_sel:4;
+ uint64_t fil_cnt:4;
+ uint64_t int_type:1;
+ uint64_t int_en:1;
+ uint64_t rx_xor:1;
+ uint64_t tx_oe:1;
+#else
+ uint64_t tx_oe:1;
+ uint64_t rx_xor:1;
+ uint64_t int_en:1;
+ uint64_t int_type:1;
+ uint64_t fil_cnt:4;
+ uint64_t fil_sel:4;
+ uint64_t clk_sel:2;
+ uint64_t clk_gen:1;
+ uint64_t synce_sel:2;
+ uint64_t output_sel:5;
+ uint64_t reserved_21_63:42;
+#endif
+ } s;
+ struct cvmx_gpio_bit_cfgx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t fil_sel:4;
+ uint64_t fil_cnt:4;
+ uint64_t int_type:1;
+ uint64_t int_en:1;
+ uint64_t rx_xor:1;
+ uint64_t tx_oe:1;
+#else
+ uint64_t tx_oe:1;
+ uint64_t rx_xor:1;
+ uint64_t int_en:1;
+ uint64_t int_type:1;
+ uint64_t fil_cnt:4;
+ uint64_t fil_sel:4;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn30xx;
+ struct cvmx_gpio_bit_cfgx_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63:49;
+ uint64_t clk_gen:1;
+ uint64_t clk_sel:2;
+ uint64_t fil_sel:4;
+ uint64_t fil_cnt:4;
+ uint64_t int_type:1;
+ uint64_t int_en:1;
+ uint64_t rx_xor:1;
+ uint64_t tx_oe:1;
+#else
+ uint64_t tx_oe:1;
+ uint64_t rx_xor:1;
+ uint64_t int_en:1;
+ uint64_t int_type:1;
+ uint64_t fil_cnt:4;
+ uint64_t fil_sel:4;
+ uint64_t clk_sel:2;
+ uint64_t clk_gen:1;
+ uint64_t reserved_15_63:49;
+#endif
+ } cn52xx;
+};
+
+union cvmx_gpio_boot_ena {
+ uint64_t u64;
+ struct cvmx_gpio_boot_ena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t boot_ena:4;
+ uint64_t reserved_0_7:8;
+#else
+ uint64_t reserved_0_7:8;
+ uint64_t boot_ena:4;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+};
+
+union cvmx_gpio_clk_genx {
+ uint64_t u64;
+ struct cvmx_gpio_clk_genx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t n:32;
+#else
+ uint64_t n:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_gpio_clk_qlmx {
+ uint64_t u64;
+ struct cvmx_gpio_clk_qlmx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t qlm_sel:3;
+ uint64_t reserved_3_7:5;
+ uint64_t div:1;
+ uint64_t lane_sel:2;
+#else
+ uint64_t lane_sel:2;
+ uint64_t div:1;
+ uint64_t reserved_3_7:5;
+ uint64_t qlm_sel:3;
+ uint64_t reserved_11_63:53;
+#endif
+ } s;
+ struct cvmx_gpio_clk_qlmx_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t qlm_sel:2;
+ uint64_t reserved_3_7:5;
+ uint64_t div:1;
+ uint64_t lane_sel:2;
+#else
+ uint64_t lane_sel:2;
+ uint64_t div:1;
+ uint64_t reserved_3_7:5;
+ uint64_t qlm_sel:2;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn61xx;
+ struct cvmx_gpio_clk_qlmx_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t div:1;
+ uint64_t lane_sel:2;
+#else
+ uint64_t lane_sel:2;
+ uint64_t div:1;
+ uint64_t reserved_3_63:61;
+#endif
+ } cn63xx;
+};
+
+union cvmx_gpio_dbg_ena {
+ uint64_t u64;
+ struct cvmx_gpio_dbg_ena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63:43;
+ uint64_t dbg_ena:21;
+#else
+ uint64_t dbg_ena:21;
+ uint64_t reserved_21_63:43;
+#endif
+ } s;
+};
+
+union cvmx_gpio_int_clr {
+ uint64_t u64;
+ struct cvmx_gpio_int_clr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t type:16;
+#else
+ uint64_t type:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_gpio_multi_cast {
+ uint64_t u64;
+ struct cvmx_gpio_multi_cast_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t en:1;
+#else
+ uint64_t en:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_gpio_pin_ena {
+ uint64_t u64;
+ struct cvmx_gpio_pin_ena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t ena19:1;
+ uint64_t ena18:1;
+ uint64_t reserved_0_17:18;
+#else
+ uint64_t reserved_0_17:18;
+ uint64_t ena18:1;
+ uint64_t ena19:1;
+ uint64_t reserved_20_63:44;
+#endif
+ } s;
+};
+
+union cvmx_gpio_rx_dat {
+ uint64_t u64;
+ struct cvmx_gpio_rx_dat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t dat:24;
+#else
+ uint64_t dat:24;
+ uint64_t reserved_24_63:40;
+#endif
+ } s;
+ struct cvmx_gpio_rx_dat_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t dat:16;
+#else
+ uint64_t dat:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } cn38xx;
+ struct cvmx_gpio_rx_dat_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t dat:20;
+#else
+ uint64_t dat:20;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn61xx;
+};
+
+union cvmx_gpio_tim_ctl {
+ uint64_t u64;
+ struct cvmx_gpio_tim_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t sel:4;
+#else
+ uint64_t sel:4;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_gpio_tx_clr {
+ uint64_t u64;
+ struct cvmx_gpio_tx_clr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t clr:24;
+#else
+ uint64_t clr:24;
+ uint64_t reserved_24_63:40;
+#endif
+ } s;
+ struct cvmx_gpio_tx_clr_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t clr:16;
+#else
+ uint64_t clr:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } cn38xx;
+ struct cvmx_gpio_tx_clr_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t clr:20;
+#else
+ uint64_t clr:20;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn61xx;
+};
+
+union cvmx_gpio_tx_set {
+ uint64_t u64;
+ struct cvmx_gpio_tx_set_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t set:24;
+#else
+ uint64_t set:24;
+ uint64_t reserved_24_63:40;
+#endif
+ } s;
+ struct cvmx_gpio_tx_set_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t set:16;
+#else
+ uint64_t set:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } cn38xx;
+ struct cvmx_gpio_tx_set_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t set:20;
+#else
+ uint64_t set:20;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn61xx;
+};
+
+union cvmx_gpio_xbit_cfgx {
+ uint64_t u64;
+ struct cvmx_gpio_xbit_cfgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t synce_sel:2;
+ uint64_t clk_gen:1;
+ uint64_t clk_sel:2;
+ uint64_t fil_sel:4;
+ uint64_t fil_cnt:4;
+ uint64_t int_type:1;
+ uint64_t int_en:1;
+ uint64_t rx_xor:1;
+ uint64_t tx_oe:1;
+#else
+ uint64_t tx_oe:1;
+ uint64_t rx_xor:1;
+ uint64_t int_en:1;
+ uint64_t int_type:1;
+ uint64_t fil_cnt:4;
+ uint64_t fil_sel:4;
+ uint64_t clk_sel:2;
+ uint64_t clk_gen:1;
+ uint64_t synce_sel:2;
+ uint64_t reserved_17_63:47;
+#endif
+ } s;
+ struct cvmx_gpio_xbit_cfgx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t fil_sel:4;
+ uint64_t fil_cnt:4;
+ uint64_t reserved_2_3:2;
+ uint64_t rx_xor:1;
+ uint64_t tx_oe:1;
+#else
+ uint64_t tx_oe:1;
+ uint64_t rx_xor:1;
+ uint64_t reserved_2_3:2;
+ uint64_t fil_cnt:4;
+ uint64_t fil_sel:4;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn30xx;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-board.h b/arch/mips/include/asm/octeon/cvmx-helper-board.h
new file mode 100644
index 000000000..ce52aafe7
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-helper-board.h
@@ -0,0 +1,124 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ *
+ * Helper functions to abstract board specific data about
+ * network ports from the rest of the cvmx-helper files.
+ *
+ */
+#ifndef __CVMX_HELPER_BOARD_H__
+#define __CVMX_HELPER_BOARD_H__
+
+#include <asm/octeon/cvmx-helper.h>
+
+enum cvmx_helper_board_usb_clock_types {
+ USB_CLOCK_TYPE_REF_12,
+ USB_CLOCK_TYPE_REF_24,
+ USB_CLOCK_TYPE_REF_48,
+ USB_CLOCK_TYPE_CRYSTAL_12,
+};
+
+typedef enum {
+ set_phy_link_flags_autoneg = 0x1,
+ set_phy_link_flags_flow_control_dont_touch = 0x0 << 1,
+ set_phy_link_flags_flow_control_enable = 0x1 << 1,
+ set_phy_link_flags_flow_control_disable = 0x2 << 1,
+ set_phy_link_flags_flow_control_mask = 0x3 << 1, /* Mask for 2 bit wide flow control field */
+} cvmx_helper_board_set_phy_link_flags_types_t;
+
+/*
+ * Fake IPD port, the RGMII/MII interface may use different PHY, use
+ * this macro to return appropriate MIX address to read the PHY.
+ */
+#define CVMX_HELPER_BOARD_MGMT_IPD_PORT -10
+
+/**
+ * Return the MII PHY address associated with the given IPD
+ * port. A result of -1 means there isn't a MII capable PHY
+ * connected to this port. On chips supporting multiple MII
+ * busses the bus number is encoded in bits <15:8>.
+ *
+ * This function must be modifed for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It relys on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @ipd_port: Octeon IPD port to get the MII address for.
+ *
+ * Returns MII PHY address and bus number or -1.
+ */
+extern int cvmx_helper_board_get_mii_address(int ipd_port);
+
+/**
+ * This function is the board specific method of determining an
+ * ethernet ports link speed. Most Octeon boards have Marvell PHYs
+ * and are handled by the fall through case. This function must be
+ * updated for boards that don't have the normal Marvell PHYs.
+ *
+ * This function must be modifed for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It relys on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @ipd_port: IPD input port associated with the port we want to get link
+ * status for.
+ *
+ * Returns The ports link status. If the link isn't fully resolved, this must
+ * return zero.
+ */
+extern union cvmx_helper_link_info __cvmx_helper_board_link_get(int ipd_port);
+
+/**
+ * This function is called by cvmx_helper_interface_probe() after it
+ * determines the number of ports Octeon can support on a specific
+ * interface. This function is the per board location to override
+ * this value. It is called with the number of ports Octeon might
+ * support and should return the number of actual ports on the
+ * board.
+ *
+ * This function must be modifed for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It relys on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @interface: Interface to probe
+ * @supported_ports:
+ * Number of ports Octeon supports.
+ *
+ * Returns Number of ports the actual board supports. Many times this will
+ * simple be "support_ports".
+ */
+extern int __cvmx_helper_board_interface_probe(int interface,
+ int supported_ports);
+
+enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(void);
+
+#endif /* __CVMX_HELPER_BOARD_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-errata.h b/arch/mips/include/asm/octeon/cvmx-helper-errata.h
new file mode 100644
index 000000000..5fc99189f
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-helper-errata.h
@@ -0,0 +1,33 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_HELPER_ERRATA_H__
+#define __CVMX_HELPER_ERRATA_H__
+
+extern void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm);
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-jtag.h b/arch/mips/include/asm/octeon/cvmx-helper-jtag.h
new file mode 100644
index 000000000..29f016ddb
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-helper-jtag.h
@@ -0,0 +1,43 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Helper utilities for qlm_jtag.
+ *
+ */
+
+#ifndef __CVMX_HELPER_JTAG_H__
+#define __CVMX_HELPER_JTAG_H__
+
+extern void cvmx_helper_qlm_jtag_init(void);
+extern uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data);
+extern void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits);
+extern void cvmx_helper_qlm_jtag_update(int qlm);
+
+#endif /* __CVMX_HELPER_JTAG_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-loop.h b/arch/mips/include/asm/octeon/cvmx-helper-loop.h
new file mode 100644
index 000000000..077f0e9d3
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-helper-loop.h
@@ -0,0 +1,60 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as published by
+ * the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or NONINFRINGEMENT.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for LOOP initialization, configuration,
+ * and monitoring.
+ *
+ */
+#ifndef __CVMX_HELPER_LOOP_H__
+#define __CVMX_HELPER_LOOP_H__
+
+/**
+ * Probe a LOOP interface and determine the number of ports
+ * connected to it. The LOOP interface should still be down after
+ * this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+extern int __cvmx_helper_loop_probe(int interface);
+static inline int __cvmx_helper_loop_enumerate(int interface) {return 4; }
+
+/**
+ * Bringup and enable a LOOP interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_loop_enable(int interface);
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-npi.h b/arch/mips/include/asm/octeon/cvmx-helper-npi.h
new file mode 100644
index 000000000..8df4c7faf
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-helper-npi.h
@@ -0,0 +1,61 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for NPI initialization, configuration,
+ * and monitoring.
+ *
+ */
+#ifndef __CVMX_HELPER_NPI_H__
+#define __CVMX_HELPER_NPI_H__
+
+/**
+ * Probe a NPI interface and determine the number of ports
+ * connected to it. The NPI interface should still be down after
+ * this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+extern int __cvmx_helper_npi_probe(int interface);
+#define __cvmx_helper_npi_enumerate __cvmx_helper_npi_probe
+
+/**
+ * Bringup and enable a NPI interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_npi_enable(int interface);
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
new file mode 100644
index 000000000..3e79a7f89
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
@@ -0,0 +1,93 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for RGMII/GMII/MII initialization, configuration,
+ * and monitoring.
+ *
+ */
+#ifndef __CVMX_HELPER_RGMII_H__
+#define __CVMX_HELPER_RGMII_H__
+
+/**
+ * Probe RGMII ports and determine the number present
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of RGMII/GMII/MII ports (0-4).
+ */
+extern int __cvmx_helper_rgmii_probe(int interface);
+#define __cvmx_helper_rgmii_enumerate __cvmx_helper_rgmii_probe
+
+/**
+ * Put an RGMII interface in loopback mode. Internal packets sent
+ * out will be received back again on the same port. Externally
+ * received packets will echo back out.
+ *
+ * @port: IPD port number to loop.
+ */
+extern void cvmx_helper_rgmii_internal_loopback(int port);
+
+/**
+ * Configure all of the ASX, GMX, and PKO registers required
+ * to get RGMII to function on the supplied interface.
+ *
+ * @interface: PKO Interface to configure (0 or 1)
+ *
+ * Returns Zero on success
+ */
+extern int __cvmx_helper_rgmii_enable(int interface);
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+extern union cvmx_helper_link_info __cvmx_helper_rgmii_link_get(int ipd_port);
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get().
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_rgmii_link_set(int ipd_port,
+ union cvmx_helper_link_info link_info);
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h b/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h
new file mode 100644
index 000000000..8aac90f18
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h
@@ -0,0 +1,87 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for SGMII initialization, configuration,
+ * and monitoring.
+ *
+ */
+#ifndef __CVMX_HELPER_SGMII_H__
+#define __CVMX_HELPER_SGMII_H__
+
+/**
+ * Probe a SGMII interface and determine the number of ports
+ * connected to it. The SGMII interface should still be down after
+ * this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+extern int __cvmx_helper_sgmii_probe(int interface);
+extern int __cvmx_helper_sgmii_enumerate(int interface);
+
+/**
+ * Bringup and enable a SGMII interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_sgmii_enable(int interface);
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+extern union cvmx_helper_link_info __cvmx_helper_sgmii_link_get(int ipd_port);
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get().
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_sgmii_link_set(int ipd_port,
+ union cvmx_helper_link_info link_info);
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-spi.h b/arch/mips/include/asm/octeon/cvmx-helper-spi.h
new file mode 100644
index 000000000..bc8cab936
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-helper-spi.h
@@ -0,0 +1,84 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for SPI initialization, configuration,
+ * and monitoring.
+ */
+#ifndef __CVMX_HELPER_SPI_H__
+#define __CVMX_HELPER_SPI_H__
+
+/**
+ * Probe a SPI interface and determine the number of ports
+ * connected to it. The SPI interface should still be down after
+ * this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+extern int __cvmx_helper_spi_probe(int interface);
+extern int __cvmx_helper_spi_enumerate(int interface);
+
+/**
+ * Bringup and enable a SPI interface. After this call packet I/O
+ * should be fully functional. This is called with IPD enabled but
+ * PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_spi_enable(int interface);
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+extern union cvmx_helper_link_info __cvmx_helper_spi_link_get(int ipd_port);
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get().
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_spi_link_set(int ipd_port,
+ union cvmx_helper_link_info link_info);
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-util.h b/arch/mips/include/asm/octeon/cvmx-helper-util.h
new file mode 100644
index 000000000..97b27a07c
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-helper-util.h
@@ -0,0 +1,192 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Small helper utilities.
+ *
+ */
+
+#ifndef __CVMX_HELPER_UTIL_H__
+#define __CVMX_HELPER_UTIL_H__
+
+/**
+ * Convert a interface mode into a human readable string
+ *
+ * @mode: Mode to convert
+ *
+ * Returns String
+ */
+extern const char
+ *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode);
+
+/**
+ * Setup Random Early Drop to automatically begin dropping packets.
+ *
+ * @pass_thresh:
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
+ * @drop_thresh:
+ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
+ * Returns Zero on success. Negative on failure
+ */
+extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
+
+/**
+ * Get the version of the CVMX libraries.
+ *
+ * Returns Version string. Note this buffer is allocated statically
+ * and will be shared by all callers.
+ */
+extern const char *cvmx_helper_get_version(void);
+
+/**
+ * Setup the common GMX settings that determine the number of
+ * ports. These setting apply to almost all configurations of all
+ * chips.
+ *
+ * @interface: Interface to configure
+ * @num_ports: Number of ports on the interface
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_setup_gmx(int interface, int num_ports);
+
+/**
+ * Returns the IPD/PKO port number for a port on the given
+ * interface.
+ *
+ * @interface: Interface to use
+ * @port: Port on the interface
+ *
+ * Returns IPD/PKO port number
+ */
+extern int cvmx_helper_get_ipd_port(int interface, int port);
+
+/**
+ * Returns the IPD/PKO port number for the first port on the given
+ * interface.
+ *
+ * @interface: Interface to use
+ *
+ * Returns IPD/PKO port number
+ */
+static inline int cvmx_helper_get_first_ipd_port(int interface)
+{
+ return cvmx_helper_get_ipd_port(interface, 0);
+}
+
+/**
+ * Returns the IPD/PKO port number for the last port on the given
+ * interface.
+ *
+ * @interface: Interface to use
+ *
+ * Returns IPD/PKO port number
+ */
+static inline int cvmx_helper_get_last_ipd_port(int interface)
+{
+ extern int cvmx_helper_ports_on_interface(int interface);
+
+ return cvmx_helper_get_first_ipd_port(interface) +
+ cvmx_helper_ports_on_interface(interface) - 1;
+}
+
+/**
+ * Free the packet buffers contained in a work queue entry.
+ * The work queue entry is not freed.
+ *
+ * @work: Work queue entry with packet to free
+ */
+static inline void cvmx_helper_free_packet_data(struct cvmx_wqe *work)
+{
+ uint64_t number_buffers;
+ union cvmx_buf_ptr buffer_ptr;
+ union cvmx_buf_ptr next_buffer_ptr;
+ uint64_t start_of_buffer;
+
+ number_buffers = work->word2.s.bufs;
+ if (number_buffers == 0)
+ return;
+ buffer_ptr = work->packet_ptr;
+
+ /*
+ * Since the number of buffers is not zero, we know this is
+ * not a dynamic short packet. We need to check if it is a
+ * packet received with IPD_CTL_STATUS[NO_WPTR]. If this is
+ * true, we need to free all buffers except for the first
+ * one. The caller doesn't expect their WQE pointer to be
+ * freed
+ */
+ start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
+ if (cvmx_ptr_to_phys(work) == start_of_buffer) {
+ next_buffer_ptr =
+ *(union cvmx_buf_ptr *) cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
+ buffer_ptr = next_buffer_ptr;
+ number_buffers--;
+ }
+
+ while (number_buffers--) {
+ /*
+ * Remember the back pointer is in cache lines, not
+ * 64bit words
+ */
+ start_of_buffer =
+ ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
+ /*
+ * Read pointer to next buffer before we free the
+ * current buffer.
+ */
+ next_buffer_ptr =
+ *(union cvmx_buf_ptr *) cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
+ cvmx_fpa_free(cvmx_phys_to_ptr(start_of_buffer),
+ buffer_ptr.s.pool, 0);
+ buffer_ptr = next_buffer_ptr;
+ }
+}
+
+/**
+ * Returns the interface number for an IPD/PKO port number.
+ *
+ * @ipd_port: IPD/PKO port number
+ *
+ * Returns Interface number
+ */
+extern int cvmx_helper_get_interface_num(int ipd_port);
+
+/**
+ * Returns the interface index number for an IPD/PKO port
+ * number.
+ *
+ * @ipd_port: IPD/PKO port number
+ *
+ * Returns Interface index number
+ */
+extern int cvmx_helper_get_interface_index_num(int ipd_port);
+
+#endif /* __CVMX_HELPER_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-xaui.h b/arch/mips/include/asm/octeon/cvmx-helper-xaui.h
new file mode 100644
index 000000000..c18da2eba
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-helper-xaui.h
@@ -0,0 +1,87 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for XAUI initialization, configuration,
+ * and monitoring.
+ *
+ */
+#ifndef __CVMX_HELPER_XAUI_H__
+#define __CVMX_HELPER_XAUI_H__
+
+/**
+ * Probe a XAUI interface and determine the number of ports
+ * connected to it. The XAUI interface should still be down
+ * after this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+extern int __cvmx_helper_xaui_probe(int interface);
+extern int __cvmx_helper_xaui_enumerate(int interface);
+
+/**
+ * Bringup and enable a XAUI interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_xaui_enable(int interface);
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+extern union cvmx_helper_link_info __cvmx_helper_xaui_link_get(int ipd_port);
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get().
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_xaui_link_set(int ipd_port,
+ union cvmx_helper_link_info link_info);
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-helper.h b/arch/mips/include/asm/octeon/cvmx-helper.h
new file mode 100644
index 000000000..c6c99e28e
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-helper.h
@@ -0,0 +1,178 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Helper functions for common, but complicated tasks.
+ *
+ */
+
+#ifndef __CVMX_HELPER_H__
+#define __CVMX_HELPER_H__
+
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-fpa.h>
+#include <asm/octeon/cvmx-wqe.h>
+
+typedef enum {
+ CVMX_HELPER_INTERFACE_MODE_DISABLED,
+ CVMX_HELPER_INTERFACE_MODE_RGMII,
+ CVMX_HELPER_INTERFACE_MODE_GMII,
+ CVMX_HELPER_INTERFACE_MODE_SPI,
+ CVMX_HELPER_INTERFACE_MODE_PCIE,
+ CVMX_HELPER_INTERFACE_MODE_XAUI,
+ CVMX_HELPER_INTERFACE_MODE_SGMII,
+ CVMX_HELPER_INTERFACE_MODE_PICMG,
+ CVMX_HELPER_INTERFACE_MODE_NPI,
+ CVMX_HELPER_INTERFACE_MODE_LOOP,
+} cvmx_helper_interface_mode_t;
+
+union cvmx_helper_link_info {
+ uint64_t u64;
+ struct {
+ uint64_t reserved_20_63:44;
+ uint64_t link_up:1; /**< Is the physical link up? */
+ uint64_t full_duplex:1; /**< 1 if the link is full duplex */
+ uint64_t speed:18; /**< Speed of the link in Mbps */
+ } s;
+};
+
+#include <asm/octeon/cvmx-helper-errata.h>
+#include <asm/octeon/cvmx-helper-loop.h>
+#include <asm/octeon/cvmx-helper-npi.h>
+#include <asm/octeon/cvmx-helper-rgmii.h>
+#include <asm/octeon/cvmx-helper-sgmii.h>
+#include <asm/octeon/cvmx-helper-spi.h>
+#include <asm/octeon/cvmx-helper-util.h>
+#include <asm/octeon/cvmx-helper-xaui.h>
+
+/**
+ * This function enables the IPD and also enables the packet interfaces.
+ * The packet interfaces (RGMII and SPI) must be enabled after the
+ * IPD. This should be called by the user program after any additional
+ * IPD configuration changes are made if CVMX_HELPER_ENABLE_IPD
+ * is not set in the executive-config.h file.
+ *
+ * Returns 0 on success
+ * -1 on failure
+ */
+extern int cvmx_helper_ipd_and_packet_input_enable(void);
+
+/**
+ * Initialize the PIP, IPD, and PKO hardware to support
+ * simple priority based queues for the ethernet ports. Each
+ * port is configured with a number of priority queues based
+ * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
+ * priority than the previous.
+ *
+ * Returns Zero on success, non-zero on failure
+ */
+extern int cvmx_helper_initialize_packet_io_global(void);
+
+/**
+ * Does core local initialization for packet io
+ *
+ * Returns Zero on success, non-zero on failure
+ */
+extern int cvmx_helper_initialize_packet_io_local(void);
+
+/**
+ * Returns the number of ports on the given interface.
+ * The interface must be initialized before the port count
+ * can be returned.
+ *
+ * @interface: Which interface to return port count for.
+ *
+ * Returns Port count for interface
+ * -1 for uninitialized interface
+ */
+extern int cvmx_helper_ports_on_interface(int interface);
+
+/**
+ * Return the number of interfaces the chip has. Each interface
+ * may have multiple ports. Most chips support two interfaces,
+ * but the CNX0XX and CNX1XX are exceptions. These only support
+ * one interface.
+ *
+ * Returns Number of interfaces on chip
+ */
+extern int cvmx_helper_get_number_of_interfaces(void);
+
+/**
+ * Get the operating mode of an interface. Depending on the Octeon
+ * chip and configuration, this function returns an enumeration
+ * of the type of packet I/O supported by an interface.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Mode of the interface. Unknown or unsupported interfaces return
+ * DISABLED.
+ */
+extern cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
+ interface);
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+extern union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port);
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get().
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int cvmx_helper_link_set(int ipd_port,
+ union cvmx_helper_link_info link_info);
+
+/**
+ * This function probes an interface to determine the actual
+ * number of hardware ports connected to it. It doesn't setup the
+ * ports or enable them. The main goal here is to set the global
+ * interface_port_count[interface] correctly. Hardware setup of the
+ * ports will be performed later.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int cvmx_helper_interface_probe(int interface);
+extern int cvmx_helper_interface_enumerate(int interface);
+
+#endif /* __CVMX_HELPER_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-iob-defs.h b/arch/mips/include/asm/octeon/cvmx-iob-defs.h
new file mode 100644
index 000000000..989b67bba
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-iob-defs.h
@@ -0,0 +1,903 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_IOB_DEFS_H__
+#define __CVMX_IOB_DEFS_H__
+
+#define CVMX_IOB_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011800F00007F8ull))
+#define CVMX_IOB_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011800F0000050ull))
+#define CVMX_IOB_DWB_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000028ull))
+#define CVMX_IOB_FAU_TIMEOUT (CVMX_ADD_IO_SEG(0x00011800F0000000ull))
+#define CVMX_IOB_I2C_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000010ull))
+#define CVMX_IOB_INB_CONTROL_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000078ull))
+#define CVMX_IOB_INB_CONTROL_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F0000088ull))
+#define CVMX_IOB_INB_DATA_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000070ull))
+#define CVMX_IOB_INB_DATA_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F0000080ull))
+#define CVMX_IOB_INT_ENB (CVMX_ADD_IO_SEG(0x00011800F0000060ull))
+#define CVMX_IOB_INT_SUM (CVMX_ADD_IO_SEG(0x00011800F0000058ull))
+#define CVMX_IOB_N2C_L2C_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000020ull))
+#define CVMX_IOB_N2C_RSP_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000008ull))
+#define CVMX_IOB_OUTB_COM_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000040ull))
+#define CVMX_IOB_OUTB_CONTROL_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000098ull))
+#define CVMX_IOB_OUTB_CONTROL_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F00000A8ull))
+#define CVMX_IOB_OUTB_DATA_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000090ull))
+#define CVMX_IOB_OUTB_DATA_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F00000A0ull))
+#define CVMX_IOB_OUTB_FPA_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000048ull))
+#define CVMX_IOB_OUTB_REQ_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000038ull))
+#define CVMX_IOB_P2C_REQ_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000018ull))
+#define CVMX_IOB_PKT_ERR (CVMX_ADD_IO_SEG(0x00011800F0000068ull))
+#define CVMX_IOB_TO_CMB_CREDITS (CVMX_ADD_IO_SEG(0x00011800F00000B0ull))
+#define CVMX_IOB_TO_NCB_DID_00_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000800ull))
+#define CVMX_IOB_TO_NCB_DID_111_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000B78ull))
+#define CVMX_IOB_TO_NCB_DID_223_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000EF8ull))
+#define CVMX_IOB_TO_NCB_DID_24_CREDITS (CVMX_ADD_IO_SEG(0x00011800F00008C0ull))
+#define CVMX_IOB_TO_NCB_DID_32_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000900ull))
+#define CVMX_IOB_TO_NCB_DID_40_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000940ull))
+#define CVMX_IOB_TO_NCB_DID_55_CREDITS (CVMX_ADD_IO_SEG(0x00011800F00009B8ull))
+#define CVMX_IOB_TO_NCB_DID_64_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000A00ull))
+#define CVMX_IOB_TO_NCB_DID_79_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000A78ull))
+#define CVMX_IOB_TO_NCB_DID_96_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000B00ull))
+#define CVMX_IOB_TO_NCB_DID_98_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000B10ull))
+
+union cvmx_iob_bist_status {
+ uint64_t u64;
+ struct cvmx_iob_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t ibd:1;
+ uint64_t icd:1;
+#else
+ uint64_t icd:1;
+ uint64_t ibd:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+ struct cvmx_iob_bist_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63:46;
+ uint64_t icnrcb:1;
+ uint64_t icr0:1;
+ uint64_t icr1:1;
+ uint64_t icnr1:1;
+ uint64_t icnr0:1;
+ uint64_t ibdr0:1;
+ uint64_t ibdr1:1;
+ uint64_t ibr0:1;
+ uint64_t ibr1:1;
+ uint64_t icnrt:1;
+ uint64_t ibrq0:1;
+ uint64_t ibrq1:1;
+ uint64_t icrn0:1;
+ uint64_t icrn1:1;
+ uint64_t icrp0:1;
+ uint64_t icrp1:1;
+ uint64_t ibd:1;
+ uint64_t icd:1;
+#else
+ uint64_t icd:1;
+ uint64_t ibd:1;
+ uint64_t icrp1:1;
+ uint64_t icrp0:1;
+ uint64_t icrn1:1;
+ uint64_t icrn0:1;
+ uint64_t ibrq1:1;
+ uint64_t ibrq0:1;
+ uint64_t icnrt:1;
+ uint64_t ibr1:1;
+ uint64_t ibr0:1;
+ uint64_t ibdr1:1;
+ uint64_t ibdr0:1;
+ uint64_t icnr0:1;
+ uint64_t icnr1:1;
+ uint64_t icr1:1;
+ uint64_t icr0:1;
+ uint64_t icnrcb:1;
+ uint64_t reserved_18_63:46;
+#endif
+ } cn30xx;
+ struct cvmx_iob_bist_status_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63:41;
+ uint64_t xmdfif:1;
+ uint64_t xmcfif:1;
+ uint64_t iorfif:1;
+ uint64_t rsdfif:1;
+ uint64_t iocfif:1;
+ uint64_t icnrcb:1;
+ uint64_t icr0:1;
+ uint64_t icr1:1;
+ uint64_t icnr1:1;
+ uint64_t icnr0:1;
+ uint64_t ibdr0:1;
+ uint64_t ibdr1:1;
+ uint64_t ibr0:1;
+ uint64_t ibr1:1;
+ uint64_t icnrt:1;
+ uint64_t ibrq0:1;
+ uint64_t ibrq1:1;
+ uint64_t icrn0:1;
+ uint64_t icrn1:1;
+ uint64_t icrp0:1;
+ uint64_t icrp1:1;
+ uint64_t ibd:1;
+ uint64_t icd:1;
+#else
+ uint64_t icd:1;
+ uint64_t ibd:1;
+ uint64_t icrp1:1;
+ uint64_t icrp0:1;
+ uint64_t icrn1:1;
+ uint64_t icrn0:1;
+ uint64_t ibrq1:1;
+ uint64_t ibrq0:1;
+ uint64_t icnrt:1;
+ uint64_t ibr1:1;
+ uint64_t ibr0:1;
+ uint64_t ibdr1:1;
+ uint64_t ibdr0:1;
+ uint64_t icnr0:1;
+ uint64_t icnr1:1;
+ uint64_t icr1:1;
+ uint64_t icr0:1;
+ uint64_t icnrcb:1;
+ uint64_t iocfif:1;
+ uint64_t rsdfif:1;
+ uint64_t iorfif:1;
+ uint64_t xmcfif:1;
+ uint64_t xmdfif:1;
+ uint64_t reserved_23_63:41;
+#endif
+ } cn61xx;
+ struct cvmx_iob_bist_status_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63:46;
+ uint64_t xmdfif:1;
+ uint64_t xmcfif:1;
+ uint64_t iorfif:1;
+ uint64_t rsdfif:1;
+ uint64_t iocfif:1;
+ uint64_t icnrcb:1;
+ uint64_t icr0:1;
+ uint64_t icr1:1;
+ uint64_t icnr0:1;
+ uint64_t ibr0:1;
+ uint64_t ibr1:1;
+ uint64_t icnrt:1;
+ uint64_t ibrq0:1;
+ uint64_t ibrq1:1;
+ uint64_t icrn0:1;
+ uint64_t icrn1:1;
+ uint64_t ibd:1;
+ uint64_t icd:1;
+#else
+ uint64_t icd:1;
+ uint64_t ibd:1;
+ uint64_t icrn1:1;
+ uint64_t icrn0:1;
+ uint64_t ibrq1:1;
+ uint64_t ibrq0:1;
+ uint64_t icnrt:1;
+ uint64_t ibr1:1;
+ uint64_t ibr0:1;
+ uint64_t icnr0:1;
+ uint64_t icr1:1;
+ uint64_t icr0:1;
+ uint64_t icnrcb:1;
+ uint64_t iocfif:1;
+ uint64_t rsdfif:1;
+ uint64_t iorfif:1;
+ uint64_t xmcfif:1;
+ uint64_t xmdfif:1;
+ uint64_t reserved_18_63:46;
+#endif
+ } cn68xx;
+};
+
+union cvmx_iob_ctl_status {
+ uint64_t u64;
+ struct cvmx_iob_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t fif_dly:1;
+ uint64_t xmc_per:4;
+ uint64_t reserved_5_5:1;
+ uint64_t outb_mat:1;
+ uint64_t inb_mat:1;
+ uint64_t pko_enb:1;
+ uint64_t dwb_enb:1;
+ uint64_t fau_end:1;
+#else
+ uint64_t fau_end:1;
+ uint64_t dwb_enb:1;
+ uint64_t pko_enb:1;
+ uint64_t inb_mat:1;
+ uint64_t outb_mat:1;
+ uint64_t reserved_5_5:1;
+ uint64_t xmc_per:4;
+ uint64_t fif_dly:1;
+ uint64_t reserved_11_63:53;
+#endif
+ } s;
+ struct cvmx_iob_ctl_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t outb_mat:1;
+ uint64_t inb_mat:1;
+ uint64_t pko_enb:1;
+ uint64_t dwb_enb:1;
+ uint64_t fau_end:1;
+#else
+ uint64_t fau_end:1;
+ uint64_t dwb_enb:1;
+ uint64_t pko_enb:1;
+ uint64_t inb_mat:1;
+ uint64_t outb_mat:1;
+ uint64_t reserved_5_63:59;
+#endif
+ } cn30xx;
+ struct cvmx_iob_ctl_status_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t rr_mode:1;
+ uint64_t outb_mat:1;
+ uint64_t inb_mat:1;
+ uint64_t pko_enb:1;
+ uint64_t dwb_enb:1;
+ uint64_t fau_end:1;
+#else
+ uint64_t fau_end:1;
+ uint64_t dwb_enb:1;
+ uint64_t pko_enb:1;
+ uint64_t inb_mat:1;
+ uint64_t outb_mat:1;
+ uint64_t rr_mode:1;
+ uint64_t reserved_6_63:58;
+#endif
+ } cn52xx;
+ struct cvmx_iob_ctl_status_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t fif_dly:1;
+ uint64_t xmc_per:4;
+ uint64_t rr_mode:1;
+ uint64_t outb_mat:1;
+ uint64_t inb_mat:1;
+ uint64_t pko_enb:1;
+ uint64_t dwb_enb:1;
+ uint64_t fau_end:1;
+#else
+ uint64_t fau_end:1;
+ uint64_t dwb_enb:1;
+ uint64_t pko_enb:1;
+ uint64_t inb_mat:1;
+ uint64_t outb_mat:1;
+ uint64_t rr_mode:1;
+ uint64_t xmc_per:4;
+ uint64_t fif_dly:1;
+ uint64_t reserved_11_63:53;
+#endif
+ } cn61xx;
+ struct cvmx_iob_ctl_status_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t xmc_per:4;
+ uint64_t rr_mode:1;
+ uint64_t outb_mat:1;
+ uint64_t inb_mat:1;
+ uint64_t pko_enb:1;
+ uint64_t dwb_enb:1;
+ uint64_t fau_end:1;
+#else
+ uint64_t fau_end:1;
+ uint64_t dwb_enb:1;
+ uint64_t pko_enb:1;
+ uint64_t inb_mat:1;
+ uint64_t outb_mat:1;
+ uint64_t rr_mode:1;
+ uint64_t xmc_per:4;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn63xx;
+ struct cvmx_iob_ctl_status_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t fif_dly:1;
+ uint64_t xmc_per:4;
+ uint64_t rsvr5:1;
+ uint64_t outb_mat:1;
+ uint64_t inb_mat:1;
+ uint64_t pko_enb:1;
+ uint64_t dwb_enb:1;
+ uint64_t fau_end:1;
+#else
+ uint64_t fau_end:1;
+ uint64_t dwb_enb:1;
+ uint64_t pko_enb:1;
+ uint64_t inb_mat:1;
+ uint64_t outb_mat:1;
+ uint64_t rsvr5:1;
+ uint64_t xmc_per:4;
+ uint64_t fif_dly:1;
+ uint64_t reserved_11_63:53;
+#endif
+ } cn68xx;
+};
+
+union cvmx_iob_dwb_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_dwb_pri_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t cnt_enb:1;
+ uint64_t cnt_val:15;
+#else
+ uint64_t cnt_val:15;
+ uint64_t cnt_enb:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_iob_fau_timeout {
+ uint64_t u64;
+ struct cvmx_iob_fau_timeout_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t tout_enb:1;
+ uint64_t tout_val:12;
+#else
+ uint64_t tout_val:12;
+ uint64_t tout_enb:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+};
+
+union cvmx_iob_i2c_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_i2c_pri_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t cnt_enb:1;
+ uint64_t cnt_val:15;
+#else
+ uint64_t cnt_val:15;
+ uint64_t cnt_enb:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_iob_inb_control_match {
+ uint64_t u64;
+ struct cvmx_iob_inb_control_match_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t mask:8;
+ uint64_t opc:4;
+ uint64_t dst:9;
+ uint64_t src:8;
+#else
+ uint64_t src:8;
+ uint64_t dst:9;
+ uint64_t opc:4;
+ uint64_t mask:8;
+ uint64_t reserved_29_63:35;
+#endif
+ } s;
+};
+
+union cvmx_iob_inb_control_match_enb {
+ uint64_t u64;
+ struct cvmx_iob_inb_control_match_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t mask:8;
+ uint64_t opc:4;
+ uint64_t dst:9;
+ uint64_t src:8;
+#else
+ uint64_t src:8;
+ uint64_t dst:9;
+ uint64_t opc:4;
+ uint64_t mask:8;
+ uint64_t reserved_29_63:35;
+#endif
+ } s;
+};
+
+union cvmx_iob_inb_data_match {
+ uint64_t u64;
+ struct cvmx_iob_inb_data_match_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } s;
+};
+
+union cvmx_iob_inb_data_match_enb {
+ uint64_t u64;
+ struct cvmx_iob_inb_data_match_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } s;
+};
+
+union cvmx_iob_int_enb {
+ uint64_t u64;
+ struct cvmx_iob_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t p_dat:1;
+ uint64_t np_dat:1;
+ uint64_t p_eop:1;
+ uint64_t p_sop:1;
+ uint64_t np_eop:1;
+ uint64_t np_sop:1;
+#else
+ uint64_t np_sop:1;
+ uint64_t np_eop:1;
+ uint64_t p_sop:1;
+ uint64_t p_eop:1;
+ uint64_t np_dat:1;
+ uint64_t p_dat:1;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+ struct cvmx_iob_int_enb_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t p_eop:1;
+ uint64_t p_sop:1;
+ uint64_t np_eop:1;
+ uint64_t np_sop:1;
+#else
+ uint64_t np_sop:1;
+ uint64_t np_eop:1;
+ uint64_t p_sop:1;
+ uint64_t p_eop:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } cn30xx;
+ struct cvmx_iob_int_enb_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } cn68xx;
+};
+
+union cvmx_iob_int_sum {
+ uint64_t u64;
+ struct cvmx_iob_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t p_dat:1;
+ uint64_t np_dat:1;
+ uint64_t p_eop:1;
+ uint64_t p_sop:1;
+ uint64_t np_eop:1;
+ uint64_t np_sop:1;
+#else
+ uint64_t np_sop:1;
+ uint64_t np_eop:1;
+ uint64_t p_sop:1;
+ uint64_t p_eop:1;
+ uint64_t np_dat:1;
+ uint64_t p_dat:1;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+ struct cvmx_iob_int_sum_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t p_eop:1;
+ uint64_t p_sop:1;
+ uint64_t np_eop:1;
+ uint64_t np_sop:1;
+#else
+ uint64_t np_sop:1;
+ uint64_t np_eop:1;
+ uint64_t p_sop:1;
+ uint64_t p_eop:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } cn30xx;
+ struct cvmx_iob_int_sum_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } cn68xx;
+};
+
+union cvmx_iob_n2c_l2c_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t cnt_enb:1;
+ uint64_t cnt_val:15;
+#else
+ uint64_t cnt_val:15;
+ uint64_t cnt_enb:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_iob_n2c_rsp_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t cnt_enb:1;
+ uint64_t cnt_val:15;
+#else
+ uint64_t cnt_val:15;
+ uint64_t cnt_enb:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_iob_outb_com_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_outb_com_pri_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t cnt_enb:1;
+ uint64_t cnt_val:15;
+#else
+ uint64_t cnt_val:15;
+ uint64_t cnt_enb:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_iob_outb_control_match {
+ uint64_t u64;
+ struct cvmx_iob_outb_control_match_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63:38;
+ uint64_t mask:8;
+ uint64_t eot:1;
+ uint64_t dst:8;
+ uint64_t src:9;
+#else
+ uint64_t src:9;
+ uint64_t dst:8;
+ uint64_t eot:1;
+ uint64_t mask:8;
+ uint64_t reserved_26_63:38;
+#endif
+ } s;
+};
+
+union cvmx_iob_outb_control_match_enb {
+ uint64_t u64;
+ struct cvmx_iob_outb_control_match_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63:38;
+ uint64_t mask:8;
+ uint64_t eot:1;
+ uint64_t dst:8;
+ uint64_t src:9;
+#else
+ uint64_t src:9;
+ uint64_t dst:8;
+ uint64_t eot:1;
+ uint64_t mask:8;
+ uint64_t reserved_26_63:38;
+#endif
+ } s;
+};
+
+union cvmx_iob_outb_data_match {
+ uint64_t u64;
+ struct cvmx_iob_outb_data_match_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } s;
+};
+
+union cvmx_iob_outb_data_match_enb {
+ uint64_t u64;
+ struct cvmx_iob_outb_data_match_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } s;
+};
+
+union cvmx_iob_outb_fpa_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_outb_fpa_pri_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t cnt_enb:1;
+ uint64_t cnt_val:15;
+#else
+ uint64_t cnt_val:15;
+ uint64_t cnt_enb:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_iob_outb_req_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_outb_req_pri_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t cnt_enb:1;
+ uint64_t cnt_val:15;
+#else
+ uint64_t cnt_val:15;
+ uint64_t cnt_enb:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_iob_p2c_req_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_p2c_req_pri_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t cnt_enb:1;
+ uint64_t cnt_val:15;
+#else
+ uint64_t cnt_val:15;
+ uint64_t cnt_enb:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_iob_pkt_err {
+ uint64_t u64;
+ struct cvmx_iob_pkt_err_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t vport:6;
+ uint64_t port:6;
+#else
+ uint64_t port:6;
+ uint64_t vport:6;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+ struct cvmx_iob_pkt_err_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t port:6;
+#else
+ uint64_t port:6;
+ uint64_t reserved_6_63:58;
+#endif
+ } cn30xx;
+};
+
+union cvmx_iob_to_cmb_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_cmb_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t ncb_rd:3;
+ uint64_t ncb_wr:3;
+#else
+ uint64_t ncb_wr:3;
+ uint64_t ncb_rd:3;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+ struct cvmx_iob_to_cmb_credits_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t pko_rd:3;
+ uint64_t ncb_rd:3;
+ uint64_t ncb_wr:3;
+#else
+ uint64_t ncb_wr:3;
+ uint64_t ncb_rd:3;
+ uint64_t pko_rd:3;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn52xx;
+ struct cvmx_iob_to_cmb_credits_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t dwb:3;
+ uint64_t ncb_rd:3;
+ uint64_t ncb_wr:3;
+#else
+ uint64_t ncb_wr:3;
+ uint64_t ncb_rd:3;
+ uint64_t dwb:3;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn68xx;
+};
+
+union cvmx_iob_to_ncb_did_00_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_00_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t crd:7;
+#else
+ uint64_t crd:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_iob_to_ncb_did_111_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_111_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t crd:7;
+#else
+ uint64_t crd:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_iob_to_ncb_did_223_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_223_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t crd:7;
+#else
+ uint64_t crd:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_iob_to_ncb_did_24_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_24_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t crd:7;
+#else
+ uint64_t crd:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_iob_to_ncb_did_32_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_32_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t crd:7;
+#else
+ uint64_t crd:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_iob_to_ncb_did_40_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_40_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t crd:7;
+#else
+ uint64_t crd:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_iob_to_ncb_did_55_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_55_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t crd:7;
+#else
+ uint64_t crd:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_iob_to_ncb_did_64_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_64_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t crd:7;
+#else
+ uint64_t crd:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_iob_to_ncb_did_79_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_79_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t crd:7;
+#else
+ uint64_t crd:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_iob_to_ncb_did_96_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_96_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t crd:7;
+#else
+ uint64_t crd:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_iob_to_ncb_did_98_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_ncb_did_98_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t crd:7;
+#else
+ uint64_t crd:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-ipd-defs.h b/arch/mips/include/asm/octeon/cvmx-ipd-defs.h
new file mode 100644
index 000000000..c0a4ac7b4
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-ipd-defs.h
@@ -0,0 +1,1472 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_IPD_DEFS_H__
+#define __CVMX_IPD_DEFS_H__
+
+#define CVMX_IPD_1ST_MBUFF_SKIP (CVMX_ADD_IO_SEG(0x00014F0000000000ull))
+#define CVMX_IPD_1st_NEXT_PTR_BACK (CVMX_ADD_IO_SEG(0x00014F0000000150ull))
+#define CVMX_IPD_2nd_NEXT_PTR_BACK (CVMX_ADD_IO_SEG(0x00014F0000000158ull))
+#define CVMX_IPD_BIST_STATUS (CVMX_ADD_IO_SEG(0x00014F00000007F8ull))
+#define CVMX_IPD_BPIDX_MBUF_TH(offset) (CVMX_ADD_IO_SEG(0x00014F0000002000ull) + ((offset) & 63) * 8)
+#define CVMX_IPD_BPID_BP_COUNTERX(offset) (CVMX_ADD_IO_SEG(0x00014F0000003000ull) + ((offset) & 63) * 8)
+#define CVMX_IPD_BP_PRT_RED_END (CVMX_ADD_IO_SEG(0x00014F0000000328ull))
+#define CVMX_IPD_CLK_COUNT (CVMX_ADD_IO_SEG(0x00014F0000000338ull))
+#define CVMX_IPD_CREDITS (CVMX_ADD_IO_SEG(0x00014F0000004410ull))
+#define CVMX_IPD_CTL_STATUS (CVMX_ADD_IO_SEG(0x00014F0000000018ull))
+#define CVMX_IPD_ECC_CTL (CVMX_ADD_IO_SEG(0x00014F0000004408ull))
+#define CVMX_IPD_FREE_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000780ull))
+#define CVMX_IPD_FREE_PTR_VALUE (CVMX_ADD_IO_SEG(0x00014F0000000788ull))
+#define CVMX_IPD_HOLD_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000790ull))
+#define CVMX_IPD_INT_ENB (CVMX_ADD_IO_SEG(0x00014F0000000160ull))
+#define CVMX_IPD_INT_SUM (CVMX_ADD_IO_SEG(0x00014F0000000168ull))
+#define CVMX_IPD_NEXT_PKT_PTR (CVMX_ADD_IO_SEG(0x00014F00000007A0ull))
+#define CVMX_IPD_NEXT_WQE_PTR (CVMX_ADD_IO_SEG(0x00014F00000007A8ull))
+#define CVMX_IPD_NOT_1ST_MBUFF_SKIP (CVMX_ADD_IO_SEG(0x00014F0000000008ull))
+#define CVMX_IPD_ON_BP_DROP_PKTX(block_id) (CVMX_ADD_IO_SEG(0x00014F0000004100ull))
+#define CVMX_IPD_PACKET_MBUFF_SIZE (CVMX_ADD_IO_SEG(0x00014F0000000010ull))
+#define CVMX_IPD_PKT_ERR (CVMX_ADD_IO_SEG(0x00014F00000003F0ull))
+#define CVMX_IPD_PKT_PTR_VALID (CVMX_ADD_IO_SEG(0x00014F0000000358ull))
+#define CVMX_IPD_PORTX_BP_PAGE_CNT(offset) (CVMX_ADD_IO_SEG(0x00014F0000000028ull) + ((offset) & 63) * 8)
+#define CVMX_IPD_PORTX_BP_PAGE_CNT2(offset) (CVMX_ADD_IO_SEG(0x00014F0000000368ull) + ((offset) & 63) * 8 - 8*36)
+#define CVMX_IPD_PORTX_BP_PAGE_CNT3(offset) (CVMX_ADD_IO_SEG(0x00014F00000003D0ull) + ((offset) & 63) * 8 - 8*40)
+#define CVMX_IPD_PORT_BP_COUNTERS2_PAIRX(offset) (CVMX_ADD_IO_SEG(0x00014F0000000388ull) + ((offset) & 63) * 8 - 8*36)
+#define CVMX_IPD_PORT_BP_COUNTERS3_PAIRX(offset) (CVMX_ADD_IO_SEG(0x00014F00000003B0ull) + ((offset) & 63) * 8 - 8*40)
+#define CVMX_IPD_PORT_BP_COUNTERS4_PAIRX(offset) (CVMX_ADD_IO_SEG(0x00014F0000000410ull) + ((offset) & 63) * 8 - 8*44)
+#define CVMX_IPD_PORT_BP_COUNTERS_PAIRX(offset) (CVMX_ADD_IO_SEG(0x00014F00000001B8ull) + ((offset) & 63) * 8)
+#define CVMX_IPD_PORT_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000798ull))
+#define CVMX_IPD_PORT_QOS_INTX(offset) (CVMX_ADD_IO_SEG(0x00014F0000000808ull) + ((offset) & 7) * 8)
+#define CVMX_IPD_PORT_QOS_INT_ENBX(offset) (CVMX_ADD_IO_SEG(0x00014F0000000848ull) + ((offset) & 7) * 8)
+#define CVMX_IPD_PORT_QOS_X_CNT(offset) (CVMX_ADD_IO_SEG(0x00014F0000000888ull) + ((offset) & 511) * 8)
+#define CVMX_IPD_PORT_SOPX(block_id) (CVMX_ADD_IO_SEG(0x00014F0000004400ull))
+#define CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000348ull))
+#define CVMX_IPD_PRC_PORT_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000350ull))
+#define CVMX_IPD_PTR_COUNT (CVMX_ADD_IO_SEG(0x00014F0000000320ull))
+#define CVMX_IPD_PWP_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000340ull))
+#define CVMX_IPD_QOS0_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(0)
+#define CVMX_IPD_QOS1_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(1)
+#define CVMX_IPD_QOS2_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(2)
+#define CVMX_IPD_QOS3_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(3)
+#define CVMX_IPD_QOS4_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(4)
+#define CVMX_IPD_QOS5_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(5)
+#define CVMX_IPD_QOS6_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(6)
+#define CVMX_IPD_QOS7_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(7)
+#define CVMX_IPD_QOSX_RED_MARKS(offset) (CVMX_ADD_IO_SEG(0x00014F0000000178ull) + ((offset) & 7) * 8)
+#define CVMX_IPD_QUE0_FREE_PAGE_CNT (CVMX_ADD_IO_SEG(0x00014F0000000330ull))
+#define CVMX_IPD_RED_BPID_ENABLEX(block_id) (CVMX_ADD_IO_SEG(0x00014F0000004200ull))
+#define CVMX_IPD_RED_DELAY (CVMX_ADD_IO_SEG(0x00014F0000004300ull))
+#define CVMX_IPD_RED_PORT_ENABLE (CVMX_ADD_IO_SEG(0x00014F00000002D8ull))
+#define CVMX_IPD_RED_PORT_ENABLE2 (CVMX_ADD_IO_SEG(0x00014F00000003A8ull))
+#define CVMX_IPD_RED_QUE0_PARAM CVMX_IPD_RED_QUEX_PARAM(0)
+#define CVMX_IPD_RED_QUE1_PARAM CVMX_IPD_RED_QUEX_PARAM(1)
+#define CVMX_IPD_RED_QUE2_PARAM CVMX_IPD_RED_QUEX_PARAM(2)
+#define CVMX_IPD_RED_QUE3_PARAM CVMX_IPD_RED_QUEX_PARAM(3)
+#define CVMX_IPD_RED_QUE4_PARAM CVMX_IPD_RED_QUEX_PARAM(4)
+#define CVMX_IPD_RED_QUE5_PARAM CVMX_IPD_RED_QUEX_PARAM(5)
+#define CVMX_IPD_RED_QUE6_PARAM CVMX_IPD_RED_QUEX_PARAM(6)
+#define CVMX_IPD_RED_QUE7_PARAM CVMX_IPD_RED_QUEX_PARAM(7)
+#define CVMX_IPD_RED_QUEX_PARAM(offset) (CVMX_ADD_IO_SEG(0x00014F00000002E0ull) + ((offset) & 7) * 8)
+#define CVMX_IPD_REQ_WGT (CVMX_ADD_IO_SEG(0x00014F0000004418ull))
+#define CVMX_IPD_SUB_PORT_BP_PAGE_CNT (CVMX_ADD_IO_SEG(0x00014F0000000148ull))
+#define CVMX_IPD_SUB_PORT_FCS (CVMX_ADD_IO_SEG(0x00014F0000000170ull))
+#define CVMX_IPD_SUB_PORT_QOS_CNT (CVMX_ADD_IO_SEG(0x00014F0000000800ull))
+#define CVMX_IPD_WQE_FPA_QUEUE (CVMX_ADD_IO_SEG(0x00014F0000000020ull))
+#define CVMX_IPD_WQE_PTR_VALID (CVMX_ADD_IO_SEG(0x00014F0000000360ull))
+
+union cvmx_ipd_1st_mbuff_skip {
+ uint64_t u64;
+ struct cvmx_ipd_1st_mbuff_skip_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t skip_sz:6;
+#else
+ uint64_t skip_sz:6;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_ipd_1st_next_ptr_back {
+ uint64_t u64;
+ struct cvmx_ipd_1st_next_ptr_back_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t back:4;
+#else
+ uint64_t back:4;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_ipd_2nd_next_ptr_back {
+ uint64_t u64;
+ struct cvmx_ipd_2nd_next_ptr_back_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t back:4;
+#else
+ uint64_t back:4;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_ipd_bist_status {
+ uint64_t u64;
+ struct cvmx_ipd_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63:41;
+ uint64_t iiwo1:1;
+ uint64_t iiwo0:1;
+ uint64_t iio1:1;
+ uint64_t iio0:1;
+ uint64_t pbm4:1;
+ uint64_t csr_mem:1;
+ uint64_t csr_ncmd:1;
+ uint64_t pwq_wqed:1;
+ uint64_t pwq_wp1:1;
+ uint64_t pwq_pow:1;
+ uint64_t ipq_pbe1:1;
+ uint64_t ipq_pbe0:1;
+ uint64_t pbm3:1;
+ uint64_t pbm2:1;
+ uint64_t pbm1:1;
+ uint64_t pbm0:1;
+ uint64_t pbm_word:1;
+ uint64_t pwq1:1;
+ uint64_t pwq0:1;
+ uint64_t prc_off:1;
+ uint64_t ipd_old:1;
+ uint64_t ipd_new:1;
+ uint64_t pwp:1;
+#else
+ uint64_t pwp:1;
+ uint64_t ipd_new:1;
+ uint64_t ipd_old:1;
+ uint64_t prc_off:1;
+ uint64_t pwq0:1;
+ uint64_t pwq1:1;
+ uint64_t pbm_word:1;
+ uint64_t pbm0:1;
+ uint64_t pbm1:1;
+ uint64_t pbm2:1;
+ uint64_t pbm3:1;
+ uint64_t ipq_pbe0:1;
+ uint64_t ipq_pbe1:1;
+ uint64_t pwq_pow:1;
+ uint64_t pwq_wp1:1;
+ uint64_t pwq_wqed:1;
+ uint64_t csr_ncmd:1;
+ uint64_t csr_mem:1;
+ uint64_t pbm4:1;
+ uint64_t iio0:1;
+ uint64_t iio1:1;
+ uint64_t iiwo0:1;
+ uint64_t iiwo1:1;
+ uint64_t reserved_23_63:41;
+#endif
+ } s;
+ struct cvmx_ipd_bist_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t pwq_wqed:1;
+ uint64_t pwq_wp1:1;
+ uint64_t pwq_pow:1;
+ uint64_t ipq_pbe1:1;
+ uint64_t ipq_pbe0:1;
+ uint64_t pbm3:1;
+ uint64_t pbm2:1;
+ uint64_t pbm1:1;
+ uint64_t pbm0:1;
+ uint64_t pbm_word:1;
+ uint64_t pwq1:1;
+ uint64_t pwq0:1;
+ uint64_t prc_off:1;
+ uint64_t ipd_old:1;
+ uint64_t ipd_new:1;
+ uint64_t pwp:1;
+#else
+ uint64_t pwp:1;
+ uint64_t ipd_new:1;
+ uint64_t ipd_old:1;
+ uint64_t prc_off:1;
+ uint64_t pwq0:1;
+ uint64_t pwq1:1;
+ uint64_t pbm_word:1;
+ uint64_t pbm0:1;
+ uint64_t pbm1:1;
+ uint64_t pbm2:1;
+ uint64_t pbm3:1;
+ uint64_t ipq_pbe0:1;
+ uint64_t ipq_pbe1:1;
+ uint64_t pwq_pow:1;
+ uint64_t pwq_wp1:1;
+ uint64_t pwq_wqed:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } cn30xx;
+ struct cvmx_ipd_bist_status_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63:46;
+ uint64_t csr_mem:1;
+ uint64_t csr_ncmd:1;
+ uint64_t pwq_wqed:1;
+ uint64_t pwq_wp1:1;
+ uint64_t pwq_pow:1;
+ uint64_t ipq_pbe1:1;
+ uint64_t ipq_pbe0:1;
+ uint64_t pbm3:1;
+ uint64_t pbm2:1;
+ uint64_t pbm1:1;
+ uint64_t pbm0:1;
+ uint64_t pbm_word:1;
+ uint64_t pwq1:1;
+ uint64_t pwq0:1;
+ uint64_t prc_off:1;
+ uint64_t ipd_old:1;
+ uint64_t ipd_new:1;
+ uint64_t pwp:1;
+#else
+ uint64_t pwp:1;
+ uint64_t ipd_new:1;
+ uint64_t ipd_old:1;
+ uint64_t prc_off:1;
+ uint64_t pwq0:1;
+ uint64_t pwq1:1;
+ uint64_t pbm_word:1;
+ uint64_t pbm0:1;
+ uint64_t pbm1:1;
+ uint64_t pbm2:1;
+ uint64_t pbm3:1;
+ uint64_t ipq_pbe0:1;
+ uint64_t ipq_pbe1:1;
+ uint64_t pwq_pow:1;
+ uint64_t pwq_wp1:1;
+ uint64_t pwq_wqed:1;
+ uint64_t csr_ncmd:1;
+ uint64_t csr_mem:1;
+ uint64_t reserved_18_63:46;
+#endif
+ } cn52xx;
+};
+
+union cvmx_ipd_bp_prt_red_end {
+ uint64_t u64;
+ struct cvmx_ipd_bp_prt_red_end_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t prt_enb:48;
+#else
+ uint64_t prt_enb:48;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+ struct cvmx_ipd_bp_prt_red_end_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63:28;
+ uint64_t prt_enb:36;
+#else
+ uint64_t prt_enb:36;
+ uint64_t reserved_36_63:28;
+#endif
+ } cn30xx;
+ struct cvmx_ipd_bp_prt_red_end_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t prt_enb:40;
+#else
+ uint64_t prt_enb:40;
+ uint64_t reserved_40_63:24;
+#endif
+ } cn52xx;
+ struct cvmx_ipd_bp_prt_red_end_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t prt_enb:44;
+#else
+ uint64_t prt_enb:44;
+ uint64_t reserved_44_63:20;
+#endif
+ } cn63xx;
+};
+
+union cvmx_ipd_bpidx_mbuf_th {
+ uint64_t u64;
+ struct cvmx_ipd_bpidx_mbuf_th_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63:46;
+ uint64_t bp_enb:1;
+ uint64_t page_cnt:17;
+#else
+ uint64_t page_cnt:17;
+ uint64_t bp_enb:1;
+ uint64_t reserved_18_63:46;
+#endif
+ } s;
+};
+
+union cvmx_ipd_bpid_bp_counterx {
+ uint64_t u64;
+ struct cvmx_ipd_bpid_bp_counterx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t cnt_val:25;
+#else
+ uint64_t cnt_val:25;
+ uint64_t reserved_25_63:39;
+#endif
+ } s;
+};
+
+union cvmx_ipd_clk_count {
+ uint64_t u64;
+ struct cvmx_ipd_clk_count_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t clk_cnt:64;
+#else
+ uint64_t clk_cnt:64;
+#endif
+ } s;
+};
+
+union cvmx_ipd_credits {
+ uint64_t u64;
+ struct cvmx_ipd_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t iob_wrc:8;
+ uint64_t iob_wr:8;
+#else
+ uint64_t iob_wr:8;
+ uint64_t iob_wrc:8;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_ipd_ctl_status {
+ uint64_t u64;
+ struct cvmx_ipd_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63:46;
+ uint64_t use_sop:1;
+ uint64_t rst_done:1;
+ uint64_t clken:1;
+ uint64_t no_wptr:1;
+ uint64_t pq_apkt:1;
+ uint64_t pq_nabuf:1;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+#else
+ uint64_t ipd_en:1;
+ uint64_t opc_mode:2;
+ uint64_t pbp_en:1;
+ uint64_t wqe_lend:1;
+ uint64_t pkt_lend:1;
+ uint64_t naddbuf:1;
+ uint64_t addpkt:1;
+ uint64_t reset:1;
+ uint64_t len_m8:1;
+ uint64_t pkt_off:1;
+ uint64_t ipd_full:1;
+ uint64_t pq_nabuf:1;
+ uint64_t pq_apkt:1;
+ uint64_t no_wptr:1;
+ uint64_t clken:1;
+ uint64_t rst_done:1;
+ uint64_t use_sop:1;
+ uint64_t reserved_18_63:46;
+#endif
+ } s;
+ struct cvmx_ipd_ctl_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+#else
+ uint64_t ipd_en:1;
+ uint64_t opc_mode:2;
+ uint64_t pbp_en:1;
+ uint64_t wqe_lend:1;
+ uint64_t pkt_lend:1;
+ uint64_t naddbuf:1;
+ uint64_t addpkt:1;
+ uint64_t reset:1;
+ uint64_t len_m8:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn30xx;
+ struct cvmx_ipd_ctl_status_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+#else
+ uint64_t ipd_en:1;
+ uint64_t opc_mode:2;
+ uint64_t pbp_en:1;
+ uint64_t wqe_lend:1;
+ uint64_t pkt_lend:1;
+ uint64_t naddbuf:1;
+ uint64_t addpkt:1;
+ uint64_t reset:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn38xxp2;
+ struct cvmx_ipd_ctl_status_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63:49;
+ uint64_t no_wptr:1;
+ uint64_t pq_apkt:1;
+ uint64_t pq_nabuf:1;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+#else
+ uint64_t ipd_en:1;
+ uint64_t opc_mode:2;
+ uint64_t pbp_en:1;
+ uint64_t wqe_lend:1;
+ uint64_t pkt_lend:1;
+ uint64_t naddbuf:1;
+ uint64_t addpkt:1;
+ uint64_t reset:1;
+ uint64_t len_m8:1;
+ uint64_t pkt_off:1;
+ uint64_t ipd_full:1;
+ uint64_t pq_nabuf:1;
+ uint64_t pq_apkt:1;
+ uint64_t no_wptr:1;
+ uint64_t reserved_15_63:49;
+#endif
+ } cn50xx;
+ struct cvmx_ipd_ctl_status_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+#else
+ uint64_t ipd_en:1;
+ uint64_t opc_mode:2;
+ uint64_t pbp_en:1;
+ uint64_t wqe_lend:1;
+ uint64_t pkt_lend:1;
+ uint64_t naddbuf:1;
+ uint64_t addpkt:1;
+ uint64_t reset:1;
+ uint64_t len_m8:1;
+ uint64_t pkt_off:1;
+ uint64_t ipd_full:1;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn58xx;
+ struct cvmx_ipd_ctl_status_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t clken:1;
+ uint64_t no_wptr:1;
+ uint64_t pq_apkt:1;
+ uint64_t pq_nabuf:1;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+#else
+ uint64_t ipd_en:1;
+ uint64_t opc_mode:2;
+ uint64_t pbp_en:1;
+ uint64_t wqe_lend:1;
+ uint64_t pkt_lend:1;
+ uint64_t naddbuf:1;
+ uint64_t addpkt:1;
+ uint64_t reset:1;
+ uint64_t len_m8:1;
+ uint64_t pkt_off:1;
+ uint64_t ipd_full:1;
+ uint64_t pq_nabuf:1;
+ uint64_t pq_apkt:1;
+ uint64_t no_wptr:1;
+ uint64_t clken:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } cn63xxp1;
+};
+
+union cvmx_ipd_ecc_ctl {
+ uint64_t u64;
+ struct cvmx_ipd_ecc_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t pm3_syn:2;
+ uint64_t pm2_syn:2;
+ uint64_t pm1_syn:2;
+ uint64_t pm0_syn:2;
+#else
+ uint64_t pm0_syn:2;
+ uint64_t pm1_syn:2;
+ uint64_t pm2_syn:2;
+ uint64_t pm3_syn:2;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_ipd_free_ptr_fifo_ctl {
+ uint64_t u64;
+ struct cvmx_ipd_free_ptr_fifo_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t max_cnts:7;
+ uint64_t wraddr:8;
+ uint64_t praddr:8;
+ uint64_t cena:1;
+ uint64_t raddr:8;
+#else
+ uint64_t raddr:8;
+ uint64_t cena:1;
+ uint64_t praddr:8;
+ uint64_t wraddr:8;
+ uint64_t max_cnts:7;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_ipd_free_ptr_value {
+ uint64_t u64;
+ struct cvmx_ipd_free_ptr_value_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63:31;
+ uint64_t ptr:33;
+#else
+ uint64_t ptr:33;
+ uint64_t reserved_33_63:31;
+#endif
+ } s;
+};
+
+union cvmx_ipd_hold_ptr_fifo_ctl {
+ uint64_t u64;
+ struct cvmx_ipd_hold_ptr_fifo_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_43_63:21;
+ uint64_t ptr:33;
+ uint64_t max_pkt:3;
+ uint64_t praddr:3;
+ uint64_t cena:1;
+ uint64_t raddr:3;
+#else
+ uint64_t raddr:3;
+ uint64_t cena:1;
+ uint64_t praddr:3;
+ uint64_t max_pkt:3;
+ uint64_t ptr:33;
+ uint64_t reserved_43_63:21;
+#endif
+ } s;
+};
+
+union cvmx_ipd_int_enb {
+ uint64_t u64;
+ struct cvmx_ipd_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63:41;
+ uint64_t pw3_dbe:1;
+ uint64_t pw3_sbe:1;
+ uint64_t pw2_dbe:1;
+ uint64_t pw2_sbe:1;
+ uint64_t pw1_dbe:1;
+ uint64_t pw1_sbe:1;
+ uint64_t pw0_dbe:1;
+ uint64_t pw0_sbe:1;
+ uint64_t dat:1;
+ uint64_t eop:1;
+ uint64_t sop:1;
+ uint64_t pq_sub:1;
+ uint64_t pq_add:1;
+ uint64_t bc_ovr:1;
+ uint64_t d_coll:1;
+ uint64_t c_coll:1;
+ uint64_t cc_ovr:1;
+ uint64_t dc_ovr:1;
+ uint64_t bp_sub:1;
+ uint64_t prc_par3:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par0:1;
+#else
+ uint64_t prc_par0:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par3:1;
+ uint64_t bp_sub:1;
+ uint64_t dc_ovr:1;
+ uint64_t cc_ovr:1;
+ uint64_t c_coll:1;
+ uint64_t d_coll:1;
+ uint64_t bc_ovr:1;
+ uint64_t pq_add:1;
+ uint64_t pq_sub:1;
+ uint64_t sop:1;
+ uint64_t eop:1;
+ uint64_t dat:1;
+ uint64_t pw0_sbe:1;
+ uint64_t pw0_dbe:1;
+ uint64_t pw1_sbe:1;
+ uint64_t pw1_dbe:1;
+ uint64_t pw2_sbe:1;
+ uint64_t pw2_dbe:1;
+ uint64_t pw3_sbe:1;
+ uint64_t pw3_dbe:1;
+ uint64_t reserved_23_63:41;
+#endif
+ } s;
+ struct cvmx_ipd_int_enb_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t bp_sub:1;
+ uint64_t prc_par3:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par0:1;
+#else
+ uint64_t prc_par0:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par3:1;
+ uint64_t bp_sub:1;
+ uint64_t reserved_5_63:59;
+#endif
+ } cn30xx;
+ struct cvmx_ipd_int_enb_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t bc_ovr:1;
+ uint64_t d_coll:1;
+ uint64_t c_coll:1;
+ uint64_t cc_ovr:1;
+ uint64_t dc_ovr:1;
+ uint64_t bp_sub:1;
+ uint64_t prc_par3:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par0:1;
+#else
+ uint64_t prc_par0:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par3:1;
+ uint64_t bp_sub:1;
+ uint64_t dc_ovr:1;
+ uint64_t cc_ovr:1;
+ uint64_t c_coll:1;
+ uint64_t d_coll:1;
+ uint64_t bc_ovr:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn38xx;
+ struct cvmx_ipd_int_enb_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t pq_sub:1;
+ uint64_t pq_add:1;
+ uint64_t bc_ovr:1;
+ uint64_t d_coll:1;
+ uint64_t c_coll:1;
+ uint64_t cc_ovr:1;
+ uint64_t dc_ovr:1;
+ uint64_t bp_sub:1;
+ uint64_t prc_par3:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par0:1;
+#else
+ uint64_t prc_par0:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par3:1;
+ uint64_t bp_sub:1;
+ uint64_t dc_ovr:1;
+ uint64_t cc_ovr:1;
+ uint64_t c_coll:1;
+ uint64_t d_coll:1;
+ uint64_t bc_ovr:1;
+ uint64_t pq_add:1;
+ uint64_t pq_sub:1;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn52xx;
+};
+
+union cvmx_ipd_int_sum {
+ uint64_t u64;
+ struct cvmx_ipd_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63:41;
+ uint64_t pw3_dbe:1;
+ uint64_t pw3_sbe:1;
+ uint64_t pw2_dbe:1;
+ uint64_t pw2_sbe:1;
+ uint64_t pw1_dbe:1;
+ uint64_t pw1_sbe:1;
+ uint64_t pw0_dbe:1;
+ uint64_t pw0_sbe:1;
+ uint64_t dat:1;
+ uint64_t eop:1;
+ uint64_t sop:1;
+ uint64_t pq_sub:1;
+ uint64_t pq_add:1;
+ uint64_t bc_ovr:1;
+ uint64_t d_coll:1;
+ uint64_t c_coll:1;
+ uint64_t cc_ovr:1;
+ uint64_t dc_ovr:1;
+ uint64_t bp_sub:1;
+ uint64_t prc_par3:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par0:1;
+#else
+ uint64_t prc_par0:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par3:1;
+ uint64_t bp_sub:1;
+ uint64_t dc_ovr:1;
+ uint64_t cc_ovr:1;
+ uint64_t c_coll:1;
+ uint64_t d_coll:1;
+ uint64_t bc_ovr:1;
+ uint64_t pq_add:1;
+ uint64_t pq_sub:1;
+ uint64_t sop:1;
+ uint64_t eop:1;
+ uint64_t dat:1;
+ uint64_t pw0_sbe:1;
+ uint64_t pw0_dbe:1;
+ uint64_t pw1_sbe:1;
+ uint64_t pw1_dbe:1;
+ uint64_t pw2_sbe:1;
+ uint64_t pw2_dbe:1;
+ uint64_t pw3_sbe:1;
+ uint64_t pw3_dbe:1;
+ uint64_t reserved_23_63:41;
+#endif
+ } s;
+ struct cvmx_ipd_int_sum_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t bp_sub:1;
+ uint64_t prc_par3:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par0:1;
+#else
+ uint64_t prc_par0:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par3:1;
+ uint64_t bp_sub:1;
+ uint64_t reserved_5_63:59;
+#endif
+ } cn30xx;
+ struct cvmx_ipd_int_sum_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t bc_ovr:1;
+ uint64_t d_coll:1;
+ uint64_t c_coll:1;
+ uint64_t cc_ovr:1;
+ uint64_t dc_ovr:1;
+ uint64_t bp_sub:1;
+ uint64_t prc_par3:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par0:1;
+#else
+ uint64_t prc_par0:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par3:1;
+ uint64_t bp_sub:1;
+ uint64_t dc_ovr:1;
+ uint64_t cc_ovr:1;
+ uint64_t c_coll:1;
+ uint64_t d_coll:1;
+ uint64_t bc_ovr:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn38xx;
+ struct cvmx_ipd_int_sum_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t pq_sub:1;
+ uint64_t pq_add:1;
+ uint64_t bc_ovr:1;
+ uint64_t d_coll:1;
+ uint64_t c_coll:1;
+ uint64_t cc_ovr:1;
+ uint64_t dc_ovr:1;
+ uint64_t bp_sub:1;
+ uint64_t prc_par3:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par0:1;
+#else
+ uint64_t prc_par0:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par3:1;
+ uint64_t bp_sub:1;
+ uint64_t dc_ovr:1;
+ uint64_t cc_ovr:1;
+ uint64_t c_coll:1;
+ uint64_t d_coll:1;
+ uint64_t bc_ovr:1;
+ uint64_t pq_add:1;
+ uint64_t pq_sub:1;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn52xx;
+};
+
+union cvmx_ipd_next_pkt_ptr {
+ uint64_t u64;
+ struct cvmx_ipd_next_pkt_ptr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63:31;
+ uint64_t ptr:33;
+#else
+ uint64_t ptr:33;
+ uint64_t reserved_33_63:31;
+#endif
+ } s;
+};
+
+union cvmx_ipd_next_wqe_ptr {
+ uint64_t u64;
+ struct cvmx_ipd_next_wqe_ptr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63:31;
+ uint64_t ptr:33;
+#else
+ uint64_t ptr:33;
+ uint64_t reserved_33_63:31;
+#endif
+ } s;
+};
+
+union cvmx_ipd_not_1st_mbuff_skip {
+ uint64_t u64;
+ struct cvmx_ipd_not_1st_mbuff_skip_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t skip_sz:6;
+#else
+ uint64_t skip_sz:6;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_ipd_on_bp_drop_pktx {
+ uint64_t u64;
+ struct cvmx_ipd_on_bp_drop_pktx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t prt_enb:64;
+#else
+ uint64_t prt_enb:64;
+#endif
+ } s;
+};
+
+union cvmx_ipd_packet_mbuff_size {
+ uint64_t u64;
+ struct cvmx_ipd_packet_mbuff_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t mb_size:12;
+#else
+ uint64_t mb_size:12;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+};
+
+union cvmx_ipd_pkt_err {
+ uint64_t u64;
+ struct cvmx_ipd_pkt_err_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t reasm:6;
+#else
+ uint64_t reasm:6;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_ipd_pkt_ptr_valid {
+ uint64_t u64;
+ struct cvmx_ipd_pkt_ptr_valid_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t ptr:29;
+#else
+ uint64_t ptr:29;
+ uint64_t reserved_29_63:35;
+#endif
+ } s;
+};
+
+union cvmx_ipd_portx_bp_page_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_portx_bp_page_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63:46;
+ uint64_t bp_enb:1;
+ uint64_t page_cnt:17;
+#else
+ uint64_t page_cnt:17;
+ uint64_t bp_enb:1;
+ uint64_t reserved_18_63:46;
+#endif
+ } s;
+};
+
+union cvmx_ipd_portx_bp_page_cnt2 {
+ uint64_t u64;
+ struct cvmx_ipd_portx_bp_page_cnt2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63:46;
+ uint64_t bp_enb:1;
+ uint64_t page_cnt:17;
+#else
+ uint64_t page_cnt:17;
+ uint64_t bp_enb:1;
+ uint64_t reserved_18_63:46;
+#endif
+ } s;
+};
+
+union cvmx_ipd_portx_bp_page_cnt3 {
+ uint64_t u64;
+ struct cvmx_ipd_portx_bp_page_cnt3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63:46;
+ uint64_t bp_enb:1;
+ uint64_t page_cnt:17;
+#else
+ uint64_t page_cnt:17;
+ uint64_t bp_enb:1;
+ uint64_t reserved_18_63:46;
+#endif
+ } s;
+};
+
+union cvmx_ipd_port_bp_counters2_pairx {
+ uint64_t u64;
+ struct cvmx_ipd_port_bp_counters2_pairx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t cnt_val:25;
+#else
+ uint64_t cnt_val:25;
+ uint64_t reserved_25_63:39;
+#endif
+ } s;
+};
+
+union cvmx_ipd_port_bp_counters3_pairx {
+ uint64_t u64;
+ struct cvmx_ipd_port_bp_counters3_pairx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t cnt_val:25;
+#else
+ uint64_t cnt_val:25;
+ uint64_t reserved_25_63:39;
+#endif
+ } s;
+};
+
+union cvmx_ipd_port_bp_counters4_pairx {
+ uint64_t u64;
+ struct cvmx_ipd_port_bp_counters4_pairx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t cnt_val:25;
+#else
+ uint64_t cnt_val:25;
+ uint64_t reserved_25_63:39;
+#endif
+ } s;
+};
+
+union cvmx_ipd_port_bp_counters_pairx {
+ uint64_t u64;
+ struct cvmx_ipd_port_bp_counters_pairx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t cnt_val:25;
+#else
+ uint64_t cnt_val:25;
+ uint64_t reserved_25_63:39;
+#endif
+ } s;
+};
+
+union cvmx_ipd_port_ptr_fifo_ctl {
+ uint64_t u64;
+ struct cvmx_ipd_port_ptr_fifo_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t ptr:33;
+ uint64_t max_pkt:7;
+ uint64_t cena:1;
+ uint64_t raddr:7;
+#else
+ uint64_t raddr:7;
+ uint64_t cena:1;
+ uint64_t max_pkt:7;
+ uint64_t ptr:33;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_ipd_port_qos_x_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_port_qos_x_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wmark:32;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t wmark:32;
+#endif
+ } s;
+};
+
+union cvmx_ipd_port_qos_intx {
+ uint64_t u64;
+ struct cvmx_ipd_port_qos_intx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t intr:64;
+#else
+ uint64_t intr:64;
+#endif
+ } s;
+};
+
+union cvmx_ipd_port_qos_int_enbx {
+ uint64_t u64;
+ struct cvmx_ipd_port_qos_int_enbx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t enb:64;
+#else
+ uint64_t enb:64;
+#endif
+ } s;
+};
+
+union cvmx_ipd_port_sopx {
+ uint64_t u64;
+ struct cvmx_ipd_port_sopx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t sop:64;
+#else
+ uint64_t sop:64;
+#endif
+ } s;
+};
+
+union cvmx_ipd_prc_hold_ptr_fifo_ctl {
+ uint64_t u64;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63:25;
+ uint64_t max_pkt:3;
+ uint64_t praddr:3;
+ uint64_t ptr:29;
+ uint64_t cena:1;
+ uint64_t raddr:3;
+#else
+ uint64_t raddr:3;
+ uint64_t cena:1;
+ uint64_t ptr:29;
+ uint64_t praddr:3;
+ uint64_t max_pkt:3;
+ uint64_t reserved_39_63:25;
+#endif
+ } s;
+};
+
+union cvmx_ipd_prc_port_ptr_fifo_ctl {
+ uint64_t u64;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t max_pkt:7;
+ uint64_t ptr:29;
+ uint64_t cena:1;
+ uint64_t raddr:7;
+#else
+ uint64_t raddr:7;
+ uint64_t cena:1;
+ uint64_t ptr:29;
+ uint64_t max_pkt:7;
+ uint64_t reserved_44_63:20;
+#endif
+ } s;
+};
+
+union cvmx_ipd_ptr_count {
+ uint64_t u64;
+ struct cvmx_ipd_ptr_count_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63:45;
+ uint64_t pktv_cnt:1;
+ uint64_t wqev_cnt:1;
+ uint64_t pfif_cnt:3;
+ uint64_t pkt_pcnt:7;
+ uint64_t wqe_pcnt:7;
+#else
+ uint64_t wqe_pcnt:7;
+ uint64_t pkt_pcnt:7;
+ uint64_t pfif_cnt:3;
+ uint64_t wqev_cnt:1;
+ uint64_t pktv_cnt:1;
+ uint64_t reserved_19_63:45;
+#endif
+ } s;
+};
+
+union cvmx_ipd_pwp_ptr_fifo_ctl {
+ uint64_t u64;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63:3;
+ uint64_t max_cnts:7;
+ uint64_t wraddr:8;
+ uint64_t praddr:8;
+ uint64_t ptr:29;
+ uint64_t cena:1;
+ uint64_t raddr:8;
+#else
+ uint64_t raddr:8;
+ uint64_t cena:1;
+ uint64_t ptr:29;
+ uint64_t praddr:8;
+ uint64_t wraddr:8;
+ uint64_t max_cnts:7;
+ uint64_t reserved_61_63:3;
+#endif
+ } s;
+};
+
+union cvmx_ipd_qosx_red_marks {
+ uint64_t u64;
+ struct cvmx_ipd_qosx_red_marks_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t drop:32;
+ uint64_t pass:32;
+#else
+ uint64_t pass:32;
+ uint64_t drop:32;
+#endif
+ } s;
+};
+
+union cvmx_ipd_que0_free_page_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_que0_free_page_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t q0_pcnt:32;
+#else
+ uint64_t q0_pcnt:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_ipd_red_bpid_enablex {
+ uint64_t u64;
+ struct cvmx_ipd_red_bpid_enablex_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t prt_enb:64;
+#else
+ uint64_t prt_enb:64;
+#endif
+ } s;
+};
+
+union cvmx_ipd_red_delay {
+ uint64_t u64;
+ struct cvmx_ipd_red_delay_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t prb_dly:14;
+ uint64_t avg_dly:14;
+#else
+ uint64_t avg_dly:14;
+ uint64_t prb_dly:14;
+ uint64_t reserved_28_63:36;
+#endif
+ } s;
+};
+
+union cvmx_ipd_red_port_enable {
+ uint64_t u64;
+ struct cvmx_ipd_red_port_enable_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t prb_dly:14;
+ uint64_t avg_dly:14;
+ uint64_t prt_enb:36;
+#else
+ uint64_t prt_enb:36;
+ uint64_t avg_dly:14;
+ uint64_t prb_dly:14;
+#endif
+ } s;
+};
+
+union cvmx_ipd_red_port_enable2 {
+ uint64_t u64;
+ struct cvmx_ipd_red_port_enable2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t prt_enb:12;
+#else
+ uint64_t prt_enb:12;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+ struct cvmx_ipd_red_port_enable2_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t prt_enb:4;
+#else
+ uint64_t prt_enb:4;
+ uint64_t reserved_4_63:60;
+#endif
+ } cn52xx;
+ struct cvmx_ipd_red_port_enable2_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t prt_enb:8;
+#else
+ uint64_t prt_enb:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } cn63xx;
+};
+
+union cvmx_ipd_red_quex_param {
+ uint64_t u64;
+ struct cvmx_ipd_red_quex_param_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63:15;
+ uint64_t use_pcnt:1;
+ uint64_t new_con:8;
+ uint64_t avg_con:8;
+ uint64_t prb_con:32;
+#else
+ uint64_t prb_con:32;
+ uint64_t avg_con:8;
+ uint64_t new_con:8;
+ uint64_t use_pcnt:1;
+ uint64_t reserved_49_63:15;
+#endif
+ } s;
+};
+
+union cvmx_ipd_req_wgt {
+ uint64_t u64;
+ struct cvmx_ipd_req_wgt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wgt7:8;
+ uint64_t wgt6:8;
+ uint64_t wgt5:8;
+ uint64_t wgt4:8;
+ uint64_t wgt3:8;
+ uint64_t wgt2:8;
+ uint64_t wgt1:8;
+ uint64_t wgt0:8;
+#else
+ uint64_t wgt0:8;
+ uint64_t wgt1:8;
+ uint64_t wgt2:8;
+ uint64_t wgt3:8;
+ uint64_t wgt4:8;
+ uint64_t wgt5:8;
+ uint64_t wgt6:8;
+ uint64_t wgt7:8;
+#endif
+ } s;
+};
+
+union cvmx_ipd_sub_port_bp_page_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63:33;
+ uint64_t port:6;
+ uint64_t page_cnt:25;
+#else
+ uint64_t page_cnt:25;
+ uint64_t port:6;
+ uint64_t reserved_31_63:33;
+#endif
+ } s;
+};
+
+union cvmx_ipd_sub_port_fcs {
+ uint64_t u64;
+ struct cvmx_ipd_sub_port_fcs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t port_bit2:4;
+ uint64_t reserved_32_35:4;
+ uint64_t port_bit:32;
+#else
+ uint64_t port_bit:32;
+ uint64_t reserved_32_35:4;
+ uint64_t port_bit2:4;
+ uint64_t reserved_40_63:24;
+#endif
+ } s;
+ struct cvmx_ipd_sub_port_fcs_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t port_bit:3;
+#else
+ uint64_t port_bit:3;
+ uint64_t reserved_3_63:61;
+#endif
+ } cn30xx;
+ struct cvmx_ipd_sub_port_fcs_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t port_bit:32;
+#else
+ uint64_t port_bit:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn38xx;
+};
+
+union cvmx_ipd_sub_port_qos_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_sub_port_qos_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_41_63:23;
+ uint64_t port_qos:9;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t port_qos:9;
+ uint64_t reserved_41_63:23;
+#endif
+ } s;
+};
+
+union cvmx_ipd_wqe_fpa_queue {
+ uint64_t u64;
+ struct cvmx_ipd_wqe_fpa_queue_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t wqe_pool:3;
+#else
+ uint64_t wqe_pool:3;
+ uint64_t reserved_3_63:61;
+#endif
+ } s;
+};
+
+union cvmx_ipd_wqe_ptr_valid {
+ uint64_t u64;
+ struct cvmx_ipd_wqe_ptr_valid_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t ptr:29;
+#else
+ uint64_t ptr:29;
+ uint64_t reserved_29_63:35;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-ipd.h b/arch/mips/include/asm/octeon/cvmx-ipd.h
new file mode 100644
index 000000000..adab7b54c
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-ipd.h
@@ -0,0 +1,339 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ *
+ * Interface to the hardware Input Packet Data unit.
+ */
+
+#ifndef __CVMX_IPD_H__
+#define __CVMX_IPD_H__
+
+#include <asm/octeon/octeon-feature.h>
+
+#include <asm/octeon/cvmx-ipd-defs.h>
+#include <asm/octeon/cvmx-pip-defs.h>
+
+enum cvmx_ipd_mode {
+ CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
+ CVMX_IPD_OPC_MODE_STF = 1LL, /* All blocks into L2 */
+ CVMX_IPD_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */
+ CVMX_IPD_OPC_MODE_STF2_STT = 3LL /* 1st, 2nd blocks L2, rest DRAM */
+};
+
+#ifndef CVMX_ENABLE_LEN_M8_FIX
+#define CVMX_ENABLE_LEN_M8_FIX 0
+#endif
+
+/* CSR typedefs have been moved to cvmx-csr-*.h */
+typedef union cvmx_ipd_1st_mbuff_skip cvmx_ipd_mbuff_first_skip_t;
+typedef union cvmx_ipd_1st_next_ptr_back cvmx_ipd_first_next_ptr_back_t;
+
+typedef cvmx_ipd_mbuff_first_skip_t cvmx_ipd_mbuff_not_first_skip_t;
+typedef cvmx_ipd_first_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t;
+
+/**
+ * Configure IPD
+ *
+ * @mbuff_size: Packets buffer size in 8 byte words
+ * @first_mbuff_skip:
+ * Number of 8 byte words to skip in the first buffer
+ * @not_first_mbuff_skip:
+ * Number of 8 byte words to skip in each following buffer
+ * @first_back: Must be same as first_mbuff_skip / 128
+ * @second_back:
+ * Must be same as not_first_mbuff_skip / 128
+ * @wqe_fpa_pool:
+ * FPA pool to get work entries from
+ * @cache_mode:
+ * @back_pres_enable_flag:
+ * Enable or disable port back pressure
+ */
+static inline void cvmx_ipd_config(uint64_t mbuff_size,
+ uint64_t first_mbuff_skip,
+ uint64_t not_first_mbuff_skip,
+ uint64_t first_back,
+ uint64_t second_back,
+ uint64_t wqe_fpa_pool,
+ enum cvmx_ipd_mode cache_mode,
+ uint64_t back_pres_enable_flag)
+{
+ cvmx_ipd_mbuff_first_skip_t first_skip;
+ cvmx_ipd_mbuff_not_first_skip_t not_first_skip;
+ union cvmx_ipd_packet_mbuff_size size;
+ cvmx_ipd_first_next_ptr_back_t first_back_struct;
+ cvmx_ipd_second_next_ptr_back_t second_back_struct;
+ union cvmx_ipd_wqe_fpa_queue wqe_pool;
+ union cvmx_ipd_ctl_status ipd_ctl_reg;
+
+ first_skip.u64 = 0;
+ first_skip.s.skip_sz = first_mbuff_skip;
+ cvmx_write_csr(CVMX_IPD_1ST_MBUFF_SKIP, first_skip.u64);
+
+ not_first_skip.u64 = 0;
+ not_first_skip.s.skip_sz = not_first_mbuff_skip;
+ cvmx_write_csr(CVMX_IPD_NOT_1ST_MBUFF_SKIP, not_first_skip.u64);
+
+ size.u64 = 0;
+ size.s.mb_size = mbuff_size;
+ cvmx_write_csr(CVMX_IPD_PACKET_MBUFF_SIZE, size.u64);
+
+ first_back_struct.u64 = 0;
+ first_back_struct.s.back = first_back;
+ cvmx_write_csr(CVMX_IPD_1st_NEXT_PTR_BACK, first_back_struct.u64);
+
+ second_back_struct.u64 = 0;
+ second_back_struct.s.back = second_back;
+ cvmx_write_csr(CVMX_IPD_2nd_NEXT_PTR_BACK, second_back_struct.u64);
+
+ wqe_pool.u64 = 0;
+ wqe_pool.s.wqe_pool = wqe_fpa_pool;
+ cvmx_write_csr(CVMX_IPD_WQE_FPA_QUEUE, wqe_pool.u64);
+
+ ipd_ctl_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ ipd_ctl_reg.s.opc_mode = cache_mode;
+ ipd_ctl_reg.s.pbp_en = back_pres_enable_flag;
+ cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_reg.u64);
+
+ /* Note: the example RED code that used to be here has been moved to
+ cvmx_helper_setup_red */
+}
+
+/**
+ * Enable IPD
+ */
+static inline void cvmx_ipd_enable(void)
+{
+ union cvmx_ipd_ctl_status ipd_reg;
+ ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ if (ipd_reg.s.ipd_en) {
+ cvmx_dprintf
+ ("Warning: Enabling IPD when IPD already enabled.\n");
+ }
+ ipd_reg.s.ipd_en = 1;
+#if CVMX_ENABLE_LEN_M8_FIX
+ if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
+ ipd_reg.s.len_m8 = TRUE;
+#endif
+ cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
+}
+
+/**
+ * Disable IPD
+ */
+static inline void cvmx_ipd_disable(void)
+{
+ union cvmx_ipd_ctl_status ipd_reg;
+ ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ ipd_reg.s.ipd_en = 0;
+ cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
+}
+
+/**
+ * Supportive function for cvmx_fpa_shutdown_pool.
+ */
+static inline void cvmx_ipd_free_ptr(void)
+{
+ /* Only CN38XXp{1,2} cannot read pointer out of the IPD */
+ if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1)
+ && !OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
+ int no_wptr = 0;
+ union cvmx_ipd_ptr_count ipd_ptr_count;
+ ipd_ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
+
+ /* Handle Work Queue Entry in cn56xx and cn52xx */
+ if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
+ union cvmx_ipd_ctl_status ipd_ctl_status;
+ ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ if (ipd_ctl_status.s.no_wptr)
+ no_wptr = 1;
+ }
+
+ /* Free the prefetched WQE */
+ if (ipd_ptr_count.s.wqev_cnt) {
+ union cvmx_ipd_wqe_ptr_valid ipd_wqe_ptr_valid;
+ ipd_wqe_ptr_valid.u64 =
+ cvmx_read_csr(CVMX_IPD_WQE_PTR_VALID);
+ if (no_wptr)
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ ((uint64_t) ipd_wqe_ptr_valid.s.
+ ptr << 7), CVMX_FPA_PACKET_POOL,
+ 0);
+ else
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ ((uint64_t) ipd_wqe_ptr_valid.s.
+ ptr << 7), CVMX_FPA_WQE_POOL, 0);
+ }
+
+ /* Free all WQE in the fifo */
+ if (ipd_ptr_count.s.wqe_pcnt) {
+ int i;
+ union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl;
+ ipd_pwp_ptr_fifo_ctl.u64 =
+ cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
+ for (i = 0; i < ipd_ptr_count.s.wqe_pcnt; i++) {
+ ipd_pwp_ptr_fifo_ctl.s.cena = 0;
+ ipd_pwp_ptr_fifo_ctl.s.raddr =
+ ipd_pwp_ptr_fifo_ctl.s.max_cnts +
+ (ipd_pwp_ptr_fifo_ctl.s.wraddr +
+ i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
+ cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
+ ipd_pwp_ptr_fifo_ctl.u64);
+ ipd_pwp_ptr_fifo_ctl.u64 =
+ cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
+ if (no_wptr)
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ ((uint64_t)
+ ipd_pwp_ptr_fifo_ctl.s.
+ ptr << 7),
+ CVMX_FPA_PACKET_POOL, 0);
+ else
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ ((uint64_t)
+ ipd_pwp_ptr_fifo_ctl.s.
+ ptr << 7),
+ CVMX_FPA_WQE_POOL, 0);
+ }
+ ipd_pwp_ptr_fifo_ctl.s.cena = 1;
+ cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
+ ipd_pwp_ptr_fifo_ctl.u64);
+ }
+
+ /* Free the prefetched packet */
+ if (ipd_ptr_count.s.pktv_cnt) {
+ union cvmx_ipd_pkt_ptr_valid ipd_pkt_ptr_valid;
+ ipd_pkt_ptr_valid.u64 =
+ cvmx_read_csr(CVMX_IPD_PKT_PTR_VALID);
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ (ipd_pkt_ptr_valid.s.ptr << 7),
+ CVMX_FPA_PACKET_POOL, 0);
+ }
+
+ /* Free the per port prefetched packets */
+ if (1) {
+ int i;
+ union cvmx_ipd_prc_port_ptr_fifo_ctl
+ ipd_prc_port_ptr_fifo_ctl;
+ ipd_prc_port_ptr_fifo_ctl.u64 =
+ cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
+
+ for (i = 0; i < ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
+ i++) {
+ ipd_prc_port_ptr_fifo_ctl.s.cena = 0;
+ ipd_prc_port_ptr_fifo_ctl.s.raddr =
+ i % ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
+ cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
+ ipd_prc_port_ptr_fifo_ctl.u64);
+ ipd_prc_port_ptr_fifo_ctl.u64 =
+ cvmx_read_csr
+ (CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ ((uint64_t)
+ ipd_prc_port_ptr_fifo_ctl.s.
+ ptr << 7), CVMX_FPA_PACKET_POOL,
+ 0);
+ }
+ ipd_prc_port_ptr_fifo_ctl.s.cena = 1;
+ cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
+ ipd_prc_port_ptr_fifo_ctl.u64);
+ }
+
+ /* Free all packets in the holding fifo */
+ if (ipd_ptr_count.s.pfif_cnt) {
+ int i;
+ union cvmx_ipd_prc_hold_ptr_fifo_ctl
+ ipd_prc_hold_ptr_fifo_ctl;
+
+ ipd_prc_hold_ptr_fifo_ctl.u64 =
+ cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
+
+ for (i = 0; i < ipd_ptr_count.s.pfif_cnt; i++) {
+ ipd_prc_hold_ptr_fifo_ctl.s.cena = 0;
+ ipd_prc_hold_ptr_fifo_ctl.s.raddr =
+ (ipd_prc_hold_ptr_fifo_ctl.s.praddr +
+ i) % ipd_prc_hold_ptr_fifo_ctl.s.max_pkt;
+ cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
+ ipd_prc_hold_ptr_fifo_ctl.u64);
+ ipd_prc_hold_ptr_fifo_ctl.u64 =
+ cvmx_read_csr
+ (CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ ((uint64_t)
+ ipd_prc_hold_ptr_fifo_ctl.s.
+ ptr << 7), CVMX_FPA_PACKET_POOL,
+ 0);
+ }
+ ipd_prc_hold_ptr_fifo_ctl.s.cena = 1;
+ cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
+ ipd_prc_hold_ptr_fifo_ctl.u64);
+ }
+
+ /* Free all packets in the fifo */
+ if (ipd_ptr_count.s.pkt_pcnt) {
+ int i;
+ union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl;
+ ipd_pwp_ptr_fifo_ctl.u64 =
+ cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
+
+ for (i = 0; i < ipd_ptr_count.s.pkt_pcnt; i++) {
+ ipd_pwp_ptr_fifo_ctl.s.cena = 0;
+ ipd_pwp_ptr_fifo_ctl.s.raddr =
+ (ipd_pwp_ptr_fifo_ctl.s.praddr +
+ i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
+ cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
+ ipd_pwp_ptr_fifo_ctl.u64);
+ ipd_pwp_ptr_fifo_ctl.u64 =
+ cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ ((uint64_t) ipd_pwp_ptr_fifo_ctl.
+ s.ptr << 7),
+ CVMX_FPA_PACKET_POOL, 0);
+ }
+ ipd_pwp_ptr_fifo_ctl.s.cena = 1;
+ cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
+ ipd_pwp_ptr_fifo_ctl.u64);
+ }
+
+ /* Reset the IPD to get all buffers out of it */
+ {
+ union cvmx_ipd_ctl_status ipd_ctl_status;
+ ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ ipd_ctl_status.s.reset = 1;
+ cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
+ }
+
+ /* Reset the PIP */
+ {
+ union cvmx_pip_sft_rst pip_sft_rst;
+ pip_sft_rst.u64 = cvmx_read_csr(CVMX_PIP_SFT_RST);
+ pip_sft_rst.s.rst = 1;
+ cvmx_write_csr(CVMX_PIP_SFT_RST, pip_sft_rst.u64);
+ }
+ }
+}
+
+#endif /* __CVMX_IPD_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
new file mode 100644
index 000000000..3ea84acf1
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
@@ -0,0 +1,239 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_L2C_DEFS_H__
+#define __CVMX_L2C_DEFS_H__
+
+#include <uapi/asm/bitfield.h>
+
+#define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull))
+#define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull))
+#define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull))
+#define CVMX_L2C_ERR_TDTX(block_id) \
+ (CVMX_ADD_IO_SEG(0x0001180080A007E0ull) + ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_ERR_TTGX(block_id) \
+ (CVMX_ADD_IO_SEG(0x0001180080A007E8ull) + ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull))
+#define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull))
+#define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull))
+#define CVMX_L2C_PFCX(offset) (CVMX_ADD_IO_SEG(0x0001180080000098ull) + \
+ ((offset) & 3) * 8)
+#define CVMX_L2C_PFC0 CVMX_L2C_PFCX(0)
+#define CVMX_L2C_PFC1 CVMX_L2C_PFCX(1)
+#define CVMX_L2C_PFC2 CVMX_L2C_PFCX(2)
+#define CVMX_L2C_PFC3 CVMX_L2C_PFCX(3)
+#define CVMX_L2C_SPAR0 (CVMX_ADD_IO_SEG(0x0001180080000068ull))
+#define CVMX_L2C_SPAR1 (CVMX_ADD_IO_SEG(0x0001180080000070ull))
+#define CVMX_L2C_SPAR2 (CVMX_ADD_IO_SEG(0x0001180080000078ull))
+#define CVMX_L2C_SPAR3 (CVMX_ADD_IO_SEG(0x0001180080000080ull))
+#define CVMX_L2C_SPAR4 (CVMX_ADD_IO_SEG(0x0001180080000088ull))
+#define CVMX_L2C_TADX_PFCX(offset, block_id) \
+ (CVMX_ADD_IO_SEG(0x0001180080A00400ull) + (((offset) & 3) + \
+ ((block_id) & 7) * 0x8000ull) * 8)
+#define CVMX_L2C_TADX_PFC0(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00400ull) + \
+ ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_TADX_PFC1(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00408ull) + \
+ ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_TADX_PFC2(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00410ull) + \
+ ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_TADX_PFC3(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00418ull) + \
+ ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_TADX_PRF(offset) (CVMX_ADD_IO_SEG(0x0001180080A00008ull) + \
+ ((offset) & 7) * 0x40000ull)
+#define CVMX_L2C_TADX_TAG(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00010ull) + \
+ ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_WPAR_IOBX(offset) (CVMX_ADD_IO_SEG(0x0001180080840200ull) + \
+ ((offset) & 1) * 8)
+#define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + \
+ ((offset) & 31) * 8)
+
+
+union cvmx_l2c_err_tdtx {
+ uint64_t u64;
+ struct cvmx_l2c_err_tdtx_s {
+ __BITFIELD_FIELD(uint64_t dbe:1,
+ __BITFIELD_FIELD(uint64_t sbe:1,
+ __BITFIELD_FIELD(uint64_t vdbe:1,
+ __BITFIELD_FIELD(uint64_t vsbe:1,
+ __BITFIELD_FIELD(uint64_t syn:10,
+ __BITFIELD_FIELD(uint64_t reserved_22_49:28,
+ __BITFIELD_FIELD(uint64_t wayidx:18,
+ __BITFIELD_FIELD(uint64_t reserved_2_3:2,
+ __BITFIELD_FIELD(uint64_t type:2,
+ ;)))))))))
+ } s;
+};
+
+union cvmx_l2c_err_ttgx {
+ uint64_t u64;
+ struct cvmx_l2c_err_ttgx_s {
+ __BITFIELD_FIELD(uint64_t dbe:1,
+ __BITFIELD_FIELD(uint64_t sbe:1,
+ __BITFIELD_FIELD(uint64_t noway:1,
+ __BITFIELD_FIELD(uint64_t reserved_56_60:5,
+ __BITFIELD_FIELD(uint64_t syn:6,
+ __BITFIELD_FIELD(uint64_t reserved_22_49:28,
+ __BITFIELD_FIELD(uint64_t wayidx:15,
+ __BITFIELD_FIELD(uint64_t reserved_2_6:5,
+ __BITFIELD_FIELD(uint64_t type:2,
+ ;)))))))))
+ } s;
+};
+
+union cvmx_l2c_cfg {
+ uint64_t u64;
+ struct cvmx_l2c_cfg_s {
+ __BITFIELD_FIELD(uint64_t reserved_20_63:44,
+ __BITFIELD_FIELD(uint64_t bstrun:1,
+ __BITFIELD_FIELD(uint64_t lbist:1,
+ __BITFIELD_FIELD(uint64_t xor_bank:1,
+ __BITFIELD_FIELD(uint64_t dpres1:1,
+ __BITFIELD_FIELD(uint64_t dpres0:1,
+ __BITFIELD_FIELD(uint64_t dfill_dis:1,
+ __BITFIELD_FIELD(uint64_t fpexp:4,
+ __BITFIELD_FIELD(uint64_t fpempty:1,
+ __BITFIELD_FIELD(uint64_t fpen:1,
+ __BITFIELD_FIELD(uint64_t idxalias:1,
+ __BITFIELD_FIELD(uint64_t mwf_crd:4,
+ __BITFIELD_FIELD(uint64_t rsp_arb_mode:1,
+ __BITFIELD_FIELD(uint64_t rfb_arb_mode:1,
+ __BITFIELD_FIELD(uint64_t lrf_arb_mode:1,
+ ;)))))))))))))))
+ } s;
+};
+
+union cvmx_l2c_ctl {
+ uint64_t u64;
+ struct cvmx_l2c_ctl_s {
+ __BITFIELD_FIELD(uint64_t reserved_30_63:34,
+ __BITFIELD_FIELD(uint64_t sepcmt:1,
+ __BITFIELD_FIELD(uint64_t rdf_fast:1,
+ __BITFIELD_FIELD(uint64_t disstgl2i:1,
+ __BITFIELD_FIELD(uint64_t l2dfsbe:1,
+ __BITFIELD_FIELD(uint64_t l2dfdbe:1,
+ __BITFIELD_FIELD(uint64_t discclk:1,
+ __BITFIELD_FIELD(uint64_t maxvab:4,
+ __BITFIELD_FIELD(uint64_t maxlfb:4,
+ __BITFIELD_FIELD(uint64_t rsp_arb_mode:1,
+ __BITFIELD_FIELD(uint64_t xmc_arb_mode:1,
+ __BITFIELD_FIELD(uint64_t ef_ena:1,
+ __BITFIELD_FIELD(uint64_t ef_cnt:7,
+ __BITFIELD_FIELD(uint64_t vab_thresh:4,
+ __BITFIELD_FIELD(uint64_t disecc:1,
+ __BITFIELD_FIELD(uint64_t disidxalias:1,
+ ;))))))))))))))))
+ } s;
+};
+
+union cvmx_l2c_dbg {
+ uint64_t u64;
+ struct cvmx_l2c_dbg_s {
+ __BITFIELD_FIELD(uint64_t reserved_15_63:49,
+ __BITFIELD_FIELD(uint64_t lfb_enum:4,
+ __BITFIELD_FIELD(uint64_t lfb_dmp:1,
+ __BITFIELD_FIELD(uint64_t ppnum:4,
+ __BITFIELD_FIELD(uint64_t set:3,
+ __BITFIELD_FIELD(uint64_t finv:1,
+ __BITFIELD_FIELD(uint64_t l2d:1,
+ __BITFIELD_FIELD(uint64_t l2t:1,
+ ;))))))))
+ } s;
+};
+
+union cvmx_l2c_pfctl {
+ uint64_t u64;
+ struct cvmx_l2c_pfctl_s {
+ __BITFIELD_FIELD(uint64_t reserved_36_63:28,
+ __BITFIELD_FIELD(uint64_t cnt3rdclr:1,
+ __BITFIELD_FIELD(uint64_t cnt2rdclr:1,
+ __BITFIELD_FIELD(uint64_t cnt1rdclr:1,
+ __BITFIELD_FIELD(uint64_t cnt0rdclr:1,
+ __BITFIELD_FIELD(uint64_t cnt3ena:1,
+ __BITFIELD_FIELD(uint64_t cnt3clr:1,
+ __BITFIELD_FIELD(uint64_t cnt3sel:6,
+ __BITFIELD_FIELD(uint64_t cnt2ena:1,
+ __BITFIELD_FIELD(uint64_t cnt2clr:1,
+ __BITFIELD_FIELD(uint64_t cnt2sel:6,
+ __BITFIELD_FIELD(uint64_t cnt1ena:1,
+ __BITFIELD_FIELD(uint64_t cnt1clr:1,
+ __BITFIELD_FIELD(uint64_t cnt1sel:6,
+ __BITFIELD_FIELD(uint64_t cnt0ena:1,
+ __BITFIELD_FIELD(uint64_t cnt0clr:1,
+ __BITFIELD_FIELD(uint64_t cnt0sel:6,
+ ;)))))))))))))))))
+ } s;
+};
+
+union cvmx_l2c_tadx_prf {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_prf_s {
+ __BITFIELD_FIELD(uint64_t reserved_32_63:32,
+ __BITFIELD_FIELD(uint64_t cnt3sel:8,
+ __BITFIELD_FIELD(uint64_t cnt2sel:8,
+ __BITFIELD_FIELD(uint64_t cnt1sel:8,
+ __BITFIELD_FIELD(uint64_t cnt0sel:8,
+ ;)))))
+ } s;
+};
+
+union cvmx_l2c_tadx_tag {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_tag_s {
+ __BITFIELD_FIELD(uint64_t reserved_46_63:18,
+ __BITFIELD_FIELD(uint64_t ecc:6,
+ __BITFIELD_FIELD(uint64_t reserved_36_39:4,
+ __BITFIELD_FIELD(uint64_t tag:19,
+ __BITFIELD_FIELD(uint64_t reserved_4_16:13,
+ __BITFIELD_FIELD(uint64_t use:1,
+ __BITFIELD_FIELD(uint64_t valid:1,
+ __BITFIELD_FIELD(uint64_t dirty:1,
+ __BITFIELD_FIELD(uint64_t lock:1,
+ ;)))))))))
+ } s;
+};
+
+union cvmx_l2c_lckbase {
+ uint64_t u64;
+ struct cvmx_l2c_lckbase_s {
+ __BITFIELD_FIELD(uint64_t reserved_31_63:33,
+ __BITFIELD_FIELD(uint64_t lck_base:27,
+ __BITFIELD_FIELD(uint64_t reserved_1_3:3,
+ __BITFIELD_FIELD(uint64_t lck_ena:1,
+ ;))))
+ } s;
+};
+
+union cvmx_l2c_lckoff {
+ uint64_t u64;
+ struct cvmx_l2c_lckoff_s {
+ __BITFIELD_FIELD(uint64_t reserved_10_63:54,
+ __BITFIELD_FIELD(uint64_t lck_offset:10,
+ ;))
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c.h b/arch/mips/include/asm/octeon/cvmx-l2c.h
new file mode 100644
index 000000000..4459a3200
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-l2c.h
@@ -0,0 +1,364 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Interface to the Level 2 Cache (L2C) control, measurement, and debugging
+ * facilities.
+ */
+
+#ifndef __CVMX_L2C_H__
+#define __CVMX_L2C_H__
+
+#include <uapi/asm/bitfield.h>
+
+#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro */
+#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() /* Deprecated macro */
+#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro */
+
+/* Based on 128 byte cache line size */
+#define CVMX_L2C_IDX_ADDR_SHIFT 7
+#define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1)
+
+/* Defines for index aliasing computations */
+#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + \
+ cvmx_l2c_get_set_bits())
+#define CVMX_L2C_ALIAS_MASK (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT)
+#define CVMX_L2C_MEMBANK_SELECT_SIZE 4096
+
+/* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */
+#define CVMX_L2C_TADS 1
+
+union cvmx_l2c_tag {
+ uint64_t u64;
+ struct {
+ __BITFIELD_FIELD(uint64_t reserved:28,
+ __BITFIELD_FIELD(uint64_t V:1,
+ __BITFIELD_FIELD(uint64_t D:1,
+ __BITFIELD_FIELD(uint64_t L:1,
+ __BITFIELD_FIELD(uint64_t U:1,
+ __BITFIELD_FIELD(uint64_t addr:32,
+ ;))))))
+ } s;
+};
+
+/* L2C Performance Counter events. */
+enum cvmx_l2c_event {
+ CVMX_L2C_EVENT_CYCLES = 0,
+ CVMX_L2C_EVENT_INSTRUCTION_MISS = 1,
+ CVMX_L2C_EVENT_INSTRUCTION_HIT = 2,
+ CVMX_L2C_EVENT_DATA_MISS = 3,
+ CVMX_L2C_EVENT_DATA_HIT = 4,
+ CVMX_L2C_EVENT_MISS = 5,
+ CVMX_L2C_EVENT_HIT = 6,
+ CVMX_L2C_EVENT_VICTIM_HIT = 7,
+ CVMX_L2C_EVENT_INDEX_CONFLICT = 8,
+ CVMX_L2C_EVENT_TAG_PROBE = 9,
+ CVMX_L2C_EVENT_TAG_UPDATE = 10,
+ CVMX_L2C_EVENT_TAG_COMPLETE = 11,
+ CVMX_L2C_EVENT_TAG_DIRTY = 12,
+ CVMX_L2C_EVENT_DATA_STORE_NOP = 13,
+ CVMX_L2C_EVENT_DATA_STORE_READ = 14,
+ CVMX_L2C_EVENT_DATA_STORE_WRITE = 15,
+ CVMX_L2C_EVENT_FILL_DATA_VALID = 16,
+ CVMX_L2C_EVENT_WRITE_REQUEST = 17,
+ CVMX_L2C_EVENT_READ_REQUEST = 18,
+ CVMX_L2C_EVENT_WRITE_DATA_VALID = 19,
+ CVMX_L2C_EVENT_XMC_NOP = 20,
+ CVMX_L2C_EVENT_XMC_LDT = 21,
+ CVMX_L2C_EVENT_XMC_LDI = 22,
+ CVMX_L2C_EVENT_XMC_LDD = 23,
+ CVMX_L2C_EVENT_XMC_STF = 24,
+ CVMX_L2C_EVENT_XMC_STT = 25,
+ CVMX_L2C_EVENT_XMC_STP = 26,
+ CVMX_L2C_EVENT_XMC_STC = 27,
+ CVMX_L2C_EVENT_XMC_DWB = 28,
+ CVMX_L2C_EVENT_XMC_PL2 = 29,
+ CVMX_L2C_EVENT_XMC_PSL1 = 30,
+ CVMX_L2C_EVENT_XMC_IOBLD = 31,
+ CVMX_L2C_EVENT_XMC_IOBST = 32,
+ CVMX_L2C_EVENT_XMC_IOBDMA = 33,
+ CVMX_L2C_EVENT_XMC_IOBRSP = 34,
+ CVMX_L2C_EVENT_XMC_BUS_VALID = 35,
+ CVMX_L2C_EVENT_XMC_MEM_DATA = 36,
+ CVMX_L2C_EVENT_XMC_REFL_DATA = 37,
+ CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38,
+ CVMX_L2C_EVENT_RSC_NOP = 39,
+ CVMX_L2C_EVENT_RSC_STDN = 40,
+ CVMX_L2C_EVENT_RSC_FILL = 41,
+ CVMX_L2C_EVENT_RSC_REFL = 42,
+ CVMX_L2C_EVENT_RSC_STIN = 43,
+ CVMX_L2C_EVENT_RSC_SCIN = 44,
+ CVMX_L2C_EVENT_RSC_SCFL = 45,
+ CVMX_L2C_EVENT_RSC_SCDN = 46,
+ CVMX_L2C_EVENT_RSC_DATA_VALID = 47,
+ CVMX_L2C_EVENT_RSC_VALID_FILL = 48,
+ CVMX_L2C_EVENT_RSC_VALID_STRSP = 49,
+ CVMX_L2C_EVENT_RSC_VALID_REFL = 50,
+ CVMX_L2C_EVENT_LRF_REQ = 51,
+ CVMX_L2C_EVENT_DT_RD_ALLOC = 52,
+ CVMX_L2C_EVENT_DT_WR_INVAL = 53,
+ CVMX_L2C_EVENT_MAX
+};
+
+/* L2C Performance Counter events for Octeon2. */
+enum cvmx_l2c_tad_event {
+ CVMX_L2C_TAD_EVENT_NONE = 0,
+ CVMX_L2C_TAD_EVENT_TAG_HIT = 1,
+ CVMX_L2C_TAD_EVENT_TAG_MISS = 2,
+ CVMX_L2C_TAD_EVENT_TAG_NOALLOC = 3,
+ CVMX_L2C_TAD_EVENT_TAG_VICTIM = 4,
+ CVMX_L2C_TAD_EVENT_SC_FAIL = 5,
+ CVMX_L2C_TAD_EVENT_SC_PASS = 6,
+ CVMX_L2C_TAD_EVENT_LFB_VALID = 7,
+ CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB = 8,
+ CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB = 9,
+ CVMX_L2C_TAD_EVENT_QUAD0_INDEX = 128,
+ CVMX_L2C_TAD_EVENT_QUAD0_READ = 129,
+ CVMX_L2C_TAD_EVENT_QUAD0_BANK = 130,
+ CVMX_L2C_TAD_EVENT_QUAD0_WDAT = 131,
+ CVMX_L2C_TAD_EVENT_QUAD1_INDEX = 144,
+ CVMX_L2C_TAD_EVENT_QUAD1_READ = 145,
+ CVMX_L2C_TAD_EVENT_QUAD1_BANK = 146,
+ CVMX_L2C_TAD_EVENT_QUAD1_WDAT = 147,
+ CVMX_L2C_TAD_EVENT_QUAD2_INDEX = 160,
+ CVMX_L2C_TAD_EVENT_QUAD2_READ = 161,
+ CVMX_L2C_TAD_EVENT_QUAD2_BANK = 162,
+ CVMX_L2C_TAD_EVENT_QUAD2_WDAT = 163,
+ CVMX_L2C_TAD_EVENT_QUAD3_INDEX = 176,
+ CVMX_L2C_TAD_EVENT_QUAD3_READ = 177,
+ CVMX_L2C_TAD_EVENT_QUAD3_BANK = 178,
+ CVMX_L2C_TAD_EVENT_QUAD3_WDAT = 179,
+ CVMX_L2C_TAD_EVENT_MAX
+};
+
+/**
+ * Configure one of the four L2 Cache performance counters to capture event
+ * occurrences.
+ *
+ * @counter: The counter to configure. Range 0..3.
+ * @event: The type of L2 Cache event occurrence to count.
+ * @clear_on_read: When asserted, any read of the performance counter
+ * clears the counter.
+ *
+ * @note The routine does not clear the counter.
+ */
+void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
+ uint32_t clear_on_read);
+
+/**
+ * Read the given L2 Cache performance counter. The counter must be configured
+ * before reading, but this routine does not enforce this requirement.
+ *
+ * @counter: The counter to configure. Range 0..3.
+ *
+ * Returns The current counter value.
+ */
+uint64_t cvmx_l2c_read_perf(uint32_t counter);
+
+/**
+ * Return the L2 Cache way partitioning for a given core.
+ *
+ * @core: The core processor of interest.
+ *
+ * Returns The mask specifying the partitioning. 0 bits in mask indicates
+ * the cache 'ways' that a core can evict from.
+ * -1 on error
+ */
+int cvmx_l2c_get_core_way_partition(uint32_t core);
+
+/**
+ * Partitions the L2 cache for a core
+ *
+ * @core: The core that the partitioning applies to.
+ * @mask: The partitioning of the ways expressed as a binary
+ * mask. A 0 bit allows the core to evict cache lines from
+ * a way, while a 1 bit blocks the core from evicting any
+ * lines from that way. There must be at least one allowed
+ * way (0 bit) in the mask.
+ *
+
+ * @note If any ways are blocked for all cores and the HW blocks, then
+ * those ways will never have any cache lines evicted from them.
+ * All cores and the hardware blocks are free to read from all
+ * ways regardless of the partitioning.
+ */
+int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask);
+
+/**
+ * Return the L2 Cache way partitioning for the hw blocks.
+ *
+ * Returns The mask specifying the reserved way. 0 bits in mask indicates
+ * the cache 'ways' that a core can evict from.
+ * -1 on error
+ */
+int cvmx_l2c_get_hw_way_partition(void);
+
+/**
+ * Partitions the L2 cache for the hardware blocks.
+ *
+ * @mask: The partitioning of the ways expressed as a binary
+ * mask. A 0 bit allows the core to evict cache lines from
+ * a way, while a 1 bit blocks the core from evicting any
+ * lines from that way. There must be at least one allowed
+ * way (0 bit) in the mask.
+ *
+
+ * @note If any ways are blocked for all cores and the HW blocks, then
+ * those ways will never have any cache lines evicted from them.
+ * All cores and the hardware blocks are free to read from all
+ * ways regardless of the partitioning.
+ */
+int cvmx_l2c_set_hw_way_partition(uint32_t mask);
+
+
+/**
+ * Locks a line in the L2 cache at the specified physical address
+ *
+ * @addr: physical address of line to lock
+ *
+ * Returns 0 on success,
+ * 1 if line not locked.
+ */
+int cvmx_l2c_lock_line(uint64_t addr);
+
+/**
+ * Locks a specified memory region in the L2 cache.
+ *
+ * Note that if not all lines can be locked, that means that all
+ * but one of the ways (associations) available to the locking
+ * core are locked. Having only 1 association available for
+ * normal caching may have a significant adverse affect on performance.
+ * Care should be taken to ensure that enough of the L2 cache is left
+ * unlocked to allow for normal caching of DRAM.
+ *
+ * @start: Physical address of the start of the region to lock
+ * @len: Length (in bytes) of region to lock
+ *
+ * Returns Number of requested lines that where not locked.
+ * 0 on success (all locked)
+ */
+int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len);
+
+/**
+ * Unlock and flush a cache line from the L2 cache.
+ * IMPORTANT: Must only be run by one core at a time due to use
+ * of L2C debug features.
+ * Note that this function will flush a matching but unlocked cache line.
+ * (If address is not in L2, no lines are flushed.)
+ *
+ * @address: Physical address to unlock
+ *
+ * Returns 0: line not unlocked
+ * 1: line unlocked
+ */
+int cvmx_l2c_unlock_line(uint64_t address);
+
+/**
+ * Unlocks a region of memory that is locked in the L2 cache
+ *
+ * @start: start physical address
+ * @len: length (in bytes) to unlock
+ *
+ * Returns Number of locked lines that the call unlocked
+ */
+int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len);
+
+/**
+ * Read the L2 controller tag for a given location in L2
+ *
+ * @association:
+ * Which association to read line from
+ * @index: Which way to read from.
+ *
+ * Returns l2c tag structure for line requested.
+ */
+union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index);
+
+/* Wrapper providing a deprecated old function name */
+static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association,
+ uint32_t index)
+ __attribute__((deprecated));
+static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association,
+ uint32_t index)
+{
+ return cvmx_l2c_get_tag(association, index);
+}
+
+
+/**
+ * Returns the cache index for a given physical address
+ *
+ * @addr: physical address
+ *
+ * Returns L2 cache index
+ */
+uint32_t cvmx_l2c_address_to_index(uint64_t addr);
+
+/**
+ * Flushes (and unlocks) the entire L2 cache.
+ * IMPORTANT: Must only be run by one core at a time due to use
+ * of L2C debug features.
+ */
+void cvmx_l2c_flush(void);
+
+/**
+ *
+ * Returns the size of the L2 cache in bytes,
+ * -1 on error (unrecognized model)
+ */
+int cvmx_l2c_get_cache_size_bytes(void);
+
+/**
+ * Return the number of sets in the L2 Cache
+ *
+ * Returns
+ */
+int cvmx_l2c_get_num_sets(void);
+
+/**
+ * Return log base 2 of the number of sets in the L2 cache
+ * Returns
+ */
+int cvmx_l2c_get_set_bits(void);
+/**
+ * Return the number of associations in the L2 Cache
+ *
+ * Returns
+ */
+int cvmx_l2c_get_num_assoc(void);
+
+/**
+ * Flush a line from the L2 cache
+ * This should only be called from one core at a time, as this routine
+ * sets the core to the 'debug' core in order to flush the line.
+ *
+ * @assoc: Association (or way) to flush
+ * @index: Index to flush
+ */
+void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index);
+
+#endif /* __CVMX_L2C_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-l2d-defs.h b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
new file mode 100644
index 000000000..a951ad5d6
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
@@ -0,0 +1,60 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_L2D_DEFS_H__
+#define __CVMX_L2D_DEFS_H__
+
+#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull))
+#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
+
+
+union cvmx_l2d_err {
+ uint64_t u64;
+ struct cvmx_l2d_err_s {
+ __BITFIELD_FIELD(uint64_t reserved_6_63:58,
+ __BITFIELD_FIELD(uint64_t bmhclsel:1,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;)))))))
+ } s;
+};
+
+union cvmx_l2d_fus3 {
+ uint64_t u64;
+ struct cvmx_l2d_fus3_s {
+ __BITFIELD_FIELD(uint64_t reserved_40_63:24,
+ __BITFIELD_FIELD(uint64_t ema_ctl:3,
+ __BITFIELD_FIELD(uint64_t reserved_34_36:3,
+ __BITFIELD_FIELD(uint64_t q3fus:34,
+ ;))))
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-l2t-defs.h b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
new file mode 100644
index 000000000..06ea13251
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
@@ -0,0 +1,143 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_L2T_DEFS_H__
+#define __CVMX_L2T_DEFS_H__
+
+#include <uapi/asm/bitfield.h>
+
+#define CVMX_L2T_ERR (CVMX_ADD_IO_SEG(0x0001180080000008ull))
+
+
+union cvmx_l2t_err {
+ uint64_t u64;
+ struct cvmx_l2t_err_s {
+ __BITFIELD_FIELD(uint64_t reserved_29_63:35,
+ __BITFIELD_FIELD(uint64_t fadru:1,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t fset:3,
+ __BITFIELD_FIELD(uint64_t fadr:10,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;))))))))))))))
+ } s;
+ struct cvmx_l2t_err_cn30xx {
+ __BITFIELD_FIELD(uint64_t reserved_28_63:36,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t reserved_23_23:1,
+ __BITFIELD_FIELD(uint64_t fset:2,
+ __BITFIELD_FIELD(uint64_t reserved_19_20:2,
+ __BITFIELD_FIELD(uint64_t fadr:8,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;)))))))))))))))
+ } cn30xx;
+ struct cvmx_l2t_err_cn31xx {
+ __BITFIELD_FIELD(uint64_t reserved_28_63:36,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t reserved_23_23:1,
+ __BITFIELD_FIELD(uint64_t fset:2,
+ __BITFIELD_FIELD(uint64_t reserved_20_20:1,
+ __BITFIELD_FIELD(uint64_t fadr:9,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;)))))))))))))))
+ } cn31xx;
+ struct cvmx_l2t_err_cn38xx {
+ __BITFIELD_FIELD(uint64_t reserved_28_63:36,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t fset:3,
+ __BITFIELD_FIELD(uint64_t fadr:10,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;)))))))))))))
+ } cn38xx;
+ struct cvmx_l2t_err_cn50xx {
+ __BITFIELD_FIELD(uint64_t reserved_28_63:36,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t fset:3,
+ __BITFIELD_FIELD(uint64_t reserved_18_20:3,
+ __BITFIELD_FIELD(uint64_t fadr:7,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;))))))))))))))
+ } cn50xx;
+ struct cvmx_l2t_err_cn52xx {
+ __BITFIELD_FIELD(uint64_t reserved_28_63:36,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t fset:3,
+ __BITFIELD_FIELD(uint64_t reserved_20_20:1,
+ __BITFIELD_FIELD(uint64_t fadr:9,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;))))))))))))))
+ } cn52xx;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-led-defs.h b/arch/mips/include/asm/octeon/cvmx-led-defs.h
new file mode 100644
index 000000000..023790752
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-led-defs.h
@@ -0,0 +1,214 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_LED_DEFS_H__
+#define __CVMX_LED_DEFS_H__
+
+#define CVMX_LED_BLINK (CVMX_ADD_IO_SEG(0x0001180000001A48ull))
+#define CVMX_LED_CLK_PHASE (CVMX_ADD_IO_SEG(0x0001180000001A08ull))
+#define CVMX_LED_CYLON (CVMX_ADD_IO_SEG(0x0001180000001AF8ull))
+#define CVMX_LED_DBG (CVMX_ADD_IO_SEG(0x0001180000001A18ull))
+#define CVMX_LED_EN (CVMX_ADD_IO_SEG(0x0001180000001A00ull))
+#define CVMX_LED_POLARITY (CVMX_ADD_IO_SEG(0x0001180000001A50ull))
+#define CVMX_LED_PRT (CVMX_ADD_IO_SEG(0x0001180000001A10ull))
+#define CVMX_LED_PRT_FMT (CVMX_ADD_IO_SEG(0x0001180000001A30ull))
+#define CVMX_LED_PRT_STATUSX(offset) (CVMX_ADD_IO_SEG(0x0001180000001A80ull) + ((offset) & 7) * 8)
+#define CVMX_LED_UDD_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001180000001A20ull) + ((offset) & 1) * 8)
+#define CVMX_LED_UDD_DATX(offset) (CVMX_ADD_IO_SEG(0x0001180000001A38ull) + ((offset) & 1) * 8)
+#define CVMX_LED_UDD_DAT_CLRX(offset) (CVMX_ADD_IO_SEG(0x0001180000001AC8ull) + ((offset) & 1) * 16)
+#define CVMX_LED_UDD_DAT_SETX(offset) (CVMX_ADD_IO_SEG(0x0001180000001AC0ull) + ((offset) & 1) * 16)
+
+union cvmx_led_blink {
+ uint64_t u64;
+ struct cvmx_led_blink_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t rate:8;
+#else
+ uint64_t rate:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_led_clk_phase {
+ uint64_t u64;
+ struct cvmx_led_clk_phase_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t phase:7;
+#else
+ uint64_t phase:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_led_cylon {
+ uint64_t u64;
+ struct cvmx_led_cylon_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t rate:16;
+#else
+ uint64_t rate:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_led_dbg {
+ uint64_t u64;
+ struct cvmx_led_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t dbg_en:1;
+#else
+ uint64_t dbg_en:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_led_en {
+ uint64_t u64;
+ struct cvmx_led_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t en:1;
+#else
+ uint64_t en:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_led_polarity {
+ uint64_t u64;
+ struct cvmx_led_polarity_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t polarity:1;
+#else
+ uint64_t polarity:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_led_prt {
+ uint64_t u64;
+ struct cvmx_led_prt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t prt_en:8;
+#else
+ uint64_t prt_en:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_led_prt_fmt {
+ uint64_t u64;
+ struct cvmx_led_prt_fmt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t format:4;
+#else
+ uint64_t format:4;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_led_prt_statusx {
+ uint64_t u64;
+ struct cvmx_led_prt_statusx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t status:6;
+#else
+ uint64_t status:6;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_led_udd_cntx {
+ uint64_t u64;
+ struct cvmx_led_udd_cntx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t cnt:6;
+#else
+ uint64_t cnt:6;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_led_udd_datx {
+ uint64_t u64;
+ struct cvmx_led_udd_datx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t dat:32;
+#else
+ uint64_t dat:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_led_udd_dat_clrx {
+ uint64_t u64;
+ struct cvmx_led_udd_dat_clrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t clr:32;
+#else
+ uint64_t clr:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_led_udd_dat_setx {
+ uint64_t u64;
+ struct cvmx_led_udd_dat_setx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t set:32;
+#else
+ uint64_t set:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-lmcx-defs.h b/arch/mips/include/asm/octeon/cvmx-lmcx-defs.h
new file mode 100644
index 000000000..4167a4c7a
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-lmcx-defs.h
@@ -0,0 +1,2943 @@
+/***********************license start***************
+ * Author: Cavium Inc.
+ *
+ * Contact: support@cavium.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Inc. for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_LMCX_DEFS_H__
+#define __CVMX_LMCX_DEFS_H__
+
+#define CVMX_LMCX_BIST_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000F0ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_BIST_RESULT(block_id) (CVMX_ADD_IO_SEG(0x00011800880000F8ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_CHAR_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000220ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CHAR_MASK0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000228ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CHAR_MASK1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000230ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CHAR_MASK2(block_id) (CVMX_ADD_IO_SEG(0x0001180088000238ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CHAR_MASK3(block_id) (CVMX_ADD_IO_SEG(0x0001180088000240ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CHAR_MASK4(block_id) (CVMX_ADD_IO_SEG(0x0001180088000318ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_COMP_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000028ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_COMP_CTL2(block_id) (CVMX_ADD_IO_SEG(0x00011800880001B8ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CONFIG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000188ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CONTROL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000190ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000010ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000090ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DCLK_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001E0ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_DCLK_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000070ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DCLK_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000068ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DCLK_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000B8ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DDR2_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000018ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DDR_PLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000258ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_DELAY_CFG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000088ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DIMMX_PARAMS(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000270ull) + (((offset) & 1) + ((block_id) & 3) * 0x200000ull) * 8)
+#define CVMX_LMCX_DIMM_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000310ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_DLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000C0ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DLL_CTL2(block_id) (CVMX_ADD_IO_SEG(0x00011800880001C8ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_DLL_CTL3(block_id) (CVMX_ADD_IO_SEG(0x0001180088000218ull) + ((block_id) & 3) * 0x1000000ull)
+static inline uint64_t CVMX_LMCX_DUAL_MEMCFG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000098ull) + (block_id) * 0x60000000ull;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000098ull) + (block_id) * 0x60000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000098ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180088000098ull) + (block_id) * 0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_ECC_SYND(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000038ull) + (block_id) * 0x60000000ull;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000038ull) + (block_id) * 0x60000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000038ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180088000038ull) + (block_id) * 0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_FADR(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000020ull) + (block_id) * 0x60000000ull;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000020ull) + (block_id) * 0x60000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000020ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180088000020ull) + (block_id) * 0x60000000ull;
+}
+
+#define CVMX_LMCX_IFB_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001D0ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_IFB_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000050ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_IFB_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000048ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_INT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001F0ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_INT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800880001E8ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_MEM_CFG0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000000ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_MEM_CFG1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000008ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_MODEREG_PARAMS0(block_id) (CVMX_ADD_IO_SEG(0x00011800880001A8ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_MODEREG_PARAMS1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000260ull) + ((block_id) & 3) * 0x1000000ull)
+static inline uint64_t CVMX_LMCX_NXM(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + (block_id) * 0x60000000ull;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + (block_id) * 0x60000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + (block_id) * 0x60000000ull;
+}
+
+#define CVMX_LMCX_OPS_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001D8ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_OPS_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000060ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_OPS_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000058ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_PHY_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000210ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_PLL_BWCTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000040ull))
+#define CVMX_LMCX_PLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000A8ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_PLL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800880000B0ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_READ_LEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000140ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_READ_LEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000148ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_READ_LEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000100ull) + (((offset) & 3) + ((block_id) & 1) * 0xC000000ull) * 8)
+#define CVMX_LMCX_RESET_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000180ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_RLEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880002A0ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_RLEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x00011800880002A8ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_RLEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000280ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8)
+#define CVMX_LMCX_RODT_COMP_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000A0ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_RODT_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000078ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_RODT_MASK(block_id) (CVMX_ADD_IO_SEG(0x0001180088000268ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_SCRAMBLED_FADR(block_id) (CVMX_ADD_IO_SEG(0x0001180088000330ull))
+#define CVMX_LMCX_SCRAMBLE_CFG0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000320ull))
+#define CVMX_LMCX_SCRAMBLE_CFG1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000328ull))
+#define CVMX_LMCX_SLOT_CTL0(block_id) (CVMX_ADD_IO_SEG(0x00011800880001F8ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_SLOT_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000200ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_SLOT_CTL2(block_id) (CVMX_ADD_IO_SEG(0x0001180088000208ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_TIMING_PARAMS0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000198ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_TIMING_PARAMS1(block_id) (CVMX_ADD_IO_SEG(0x00011800880001A0ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_TRO_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000248ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_TRO_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180088000250ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_WLEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000300ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_WLEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000308ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_WLEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800880002B0ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8)
+#define CVMX_LMCX_WODT_CTL0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000030ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_WODT_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000080ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_WODT_MASK(block_id) (CVMX_ADD_IO_SEG(0x00011800880001B0ull) + ((block_id) & 3) * 0x1000000ull)
+
+union cvmx_lmcx_bist_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_bist_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t start:1;
+#else
+ uint64_t start:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_bist_result {
+ uint64_t u64;
+ struct cvmx_lmcx_bist_result_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t csrd2e:1;
+ uint64_t csre2d:1;
+ uint64_t mwf:1;
+ uint64_t mwd:3;
+ uint64_t mwc:1;
+ uint64_t mrf:1;
+ uint64_t mrd:3;
+#else
+ uint64_t mrd:3;
+ uint64_t mrf:1;
+ uint64_t mwc:1;
+ uint64_t mwd:3;
+ uint64_t mwf:1;
+ uint64_t csre2d:1;
+ uint64_t csrd2e:1;
+ uint64_t reserved_11_63:53;
+#endif
+ } s;
+ struct cvmx_lmcx_bist_result_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t mwf:1;
+ uint64_t mwd:3;
+ uint64_t mwc:1;
+ uint64_t mrf:1;
+ uint64_t mrd:3;
+#else
+ uint64_t mrd:3;
+ uint64_t mrf:1;
+ uint64_t mwc:1;
+ uint64_t mwd:3;
+ uint64_t mwf:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn50xx;
+};
+
+union cvmx_lmcx_char_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_char_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t dr:1;
+ uint64_t skew_on:1;
+ uint64_t en:1;
+ uint64_t sel:1;
+ uint64_t prog:8;
+ uint64_t prbs:32;
+#else
+ uint64_t prbs:32;
+ uint64_t prog:8;
+ uint64_t sel:1;
+ uint64_t en:1;
+ uint64_t skew_on:1;
+ uint64_t dr:1;
+ uint64_t reserved_44_63:20;
+#endif
+ } s;
+ struct cvmx_lmcx_char_ctl_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63:22;
+ uint64_t en:1;
+ uint64_t sel:1;
+ uint64_t prog:8;
+ uint64_t prbs:32;
+#else
+ uint64_t prbs:32;
+ uint64_t prog:8;
+ uint64_t sel:1;
+ uint64_t en:1;
+ uint64_t reserved_42_63:22;
+#endif
+ } cn63xx;
+};
+
+union cvmx_lmcx_char_mask0 {
+ uint64_t u64;
+ struct cvmx_lmcx_char_mask0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask:64;
+#else
+ uint64_t mask:64;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_char_mask1 {
+ uint64_t u64;
+ struct cvmx_lmcx_char_mask1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t mask:8;
+#else
+ uint64_t mask:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_char_mask2 {
+ uint64_t u64;
+ struct cvmx_lmcx_char_mask2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask:64;
+#else
+ uint64_t mask:64;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_char_mask3 {
+ uint64_t u64;
+ struct cvmx_lmcx_char_mask3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t mask:8;
+#else
+ uint64_t mask:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_char_mask4 {
+ uint64_t u64;
+ struct cvmx_lmcx_char_mask4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63:31;
+ uint64_t reset_n_mask:1;
+ uint64_t a_mask:16;
+ uint64_t ba_mask:3;
+ uint64_t we_n_mask:1;
+ uint64_t cas_n_mask:1;
+ uint64_t ras_n_mask:1;
+ uint64_t odt1_mask:2;
+ uint64_t odt0_mask:2;
+ uint64_t cs1_n_mask:2;
+ uint64_t cs0_n_mask:2;
+ uint64_t cke_mask:2;
+#else
+ uint64_t cke_mask:2;
+ uint64_t cs0_n_mask:2;
+ uint64_t cs1_n_mask:2;
+ uint64_t odt0_mask:2;
+ uint64_t odt1_mask:2;
+ uint64_t ras_n_mask:1;
+ uint64_t cas_n_mask:1;
+ uint64_t we_n_mask:1;
+ uint64_t ba_mask:3;
+ uint64_t a_mask:16;
+ uint64_t reset_n_mask:1;
+ uint64_t reserved_33_63:31;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_comp_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_comp_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t nctl_csr:4;
+ uint64_t nctl_clk:4;
+ uint64_t nctl_cmd:4;
+ uint64_t nctl_dat:4;
+ uint64_t pctl_csr:4;
+ uint64_t pctl_clk:4;
+ uint64_t reserved_0_7:8;
+#else
+ uint64_t reserved_0_7:8;
+ uint64_t pctl_clk:4;
+ uint64_t pctl_csr:4;
+ uint64_t nctl_dat:4;
+ uint64_t nctl_cmd:4;
+ uint64_t nctl_clk:4;
+ uint64_t nctl_csr:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_comp_ctl_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t nctl_csr:4;
+ uint64_t nctl_clk:4;
+ uint64_t nctl_cmd:4;
+ uint64_t nctl_dat:4;
+ uint64_t pctl_csr:4;
+ uint64_t pctl_clk:4;
+ uint64_t pctl_cmd:4;
+ uint64_t pctl_dat:4;
+#else
+ uint64_t pctl_dat:4;
+ uint64_t pctl_cmd:4;
+ uint64_t pctl_clk:4;
+ uint64_t pctl_csr:4;
+ uint64_t nctl_dat:4;
+ uint64_t nctl_cmd:4;
+ uint64_t nctl_clk:4;
+ uint64_t nctl_csr:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn30xx;
+ struct cvmx_lmcx_comp_ctl_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t nctl_csr:4;
+ uint64_t reserved_20_27:8;
+ uint64_t nctl_dat:4;
+ uint64_t pctl_csr:4;
+ uint64_t reserved_5_11:7;
+ uint64_t pctl_dat:5;
+#else
+ uint64_t pctl_dat:5;
+ uint64_t reserved_5_11:7;
+ uint64_t pctl_csr:4;
+ uint64_t nctl_dat:4;
+ uint64_t reserved_20_27:8;
+ uint64_t nctl_csr:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn50xx;
+ struct cvmx_lmcx_comp_ctl_cn58xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t nctl_csr:4;
+ uint64_t reserved_20_27:8;
+ uint64_t nctl_dat:4;
+ uint64_t pctl_csr:4;
+ uint64_t reserved_4_11:8;
+ uint64_t pctl_dat:4;
+#else
+ uint64_t pctl_dat:4;
+ uint64_t reserved_4_11:8;
+ uint64_t pctl_csr:4;
+ uint64_t nctl_dat:4;
+ uint64_t reserved_20_27:8;
+ uint64_t nctl_csr:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn58xxp1;
+};
+
+union cvmx_lmcx_comp_ctl2 {
+ uint64_t u64;
+ struct cvmx_lmcx_comp_ctl2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63:30;
+ uint64_t ddr__ptune:4;
+ uint64_t ddr__ntune:4;
+ uint64_t m180:1;
+ uint64_t byp:1;
+ uint64_t ptune:4;
+ uint64_t ntune:4;
+ uint64_t rodt_ctl:4;
+ uint64_t cmd_ctl:4;
+ uint64_t ck_ctl:4;
+ uint64_t dqx_ctl:4;
+#else
+ uint64_t dqx_ctl:4;
+ uint64_t ck_ctl:4;
+ uint64_t cmd_ctl:4;
+ uint64_t rodt_ctl:4;
+ uint64_t ntune:4;
+ uint64_t ptune:4;
+ uint64_t byp:1;
+ uint64_t m180:1;
+ uint64_t ddr__ntune:4;
+ uint64_t ddr__ptune:4;
+ uint64_t reserved_34_63:30;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_config {
+ uint64_t u64;
+ struct cvmx_lmcx_config_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63:3;
+ uint64_t mode32b:1;
+ uint64_t scrz:1;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t init_status:4;
+ uint64_t mirrmask:4;
+ uint64_t rankmask:4;
+ uint64_t rank_ena:1;
+ uint64_t sref_with_dll:1;
+ uint64_t early_dqx:1;
+ uint64_t sequence:3;
+ uint64_t ref_zqcs_int:19;
+ uint64_t reset:1;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t init_start:1;
+#else
+ uint64_t init_start:1;
+ uint64_t ecc_ena:1;
+ uint64_t row_lsb:3;
+ uint64_t pbank_lsb:4;
+ uint64_t idlepower:3;
+ uint64_t forcewrite:4;
+ uint64_t ecc_adr:1;
+ uint64_t reset:1;
+ uint64_t ref_zqcs_int:19;
+ uint64_t sequence:3;
+ uint64_t early_dqx:1;
+ uint64_t sref_with_dll:1;
+ uint64_t rank_ena:1;
+ uint64_t rankmask:4;
+ uint64_t mirrmask:4;
+ uint64_t init_status:4;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t scrz:1;
+ uint64_t mode32b:1;
+ uint64_t reserved_61_63:3;
+#endif
+ } s;
+ struct cvmx_lmcx_config_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63:5;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t init_status:4;
+ uint64_t mirrmask:4;
+ uint64_t rankmask:4;
+ uint64_t rank_ena:1;
+ uint64_t sref_with_dll:1;
+ uint64_t early_dqx:1;
+ uint64_t sequence:3;
+ uint64_t ref_zqcs_int:19;
+ uint64_t reset:1;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t init_start:1;
+#else
+ uint64_t init_start:1;
+ uint64_t ecc_ena:1;
+ uint64_t row_lsb:3;
+ uint64_t pbank_lsb:4;
+ uint64_t idlepower:3;
+ uint64_t forcewrite:4;
+ uint64_t ecc_adr:1;
+ uint64_t reset:1;
+ uint64_t ref_zqcs_int:19;
+ uint64_t sequence:3;
+ uint64_t early_dqx:1;
+ uint64_t sref_with_dll:1;
+ uint64_t rank_ena:1;
+ uint64_t rankmask:4;
+ uint64_t mirrmask:4;
+ uint64_t init_status:4;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t reserved_59_63:5;
+#endif
+ } cn63xx;
+ struct cvmx_lmcx_config_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_55_63:9;
+ uint64_t init_status:4;
+ uint64_t mirrmask:4;
+ uint64_t rankmask:4;
+ uint64_t rank_ena:1;
+ uint64_t sref_with_dll:1;
+ uint64_t early_dqx:1;
+ uint64_t sequence:3;
+ uint64_t ref_zqcs_int:19;
+ uint64_t reset:1;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t init_start:1;
+#else
+ uint64_t init_start:1;
+ uint64_t ecc_ena:1;
+ uint64_t row_lsb:3;
+ uint64_t pbank_lsb:4;
+ uint64_t idlepower:3;
+ uint64_t forcewrite:4;
+ uint64_t ecc_adr:1;
+ uint64_t reset:1;
+ uint64_t ref_zqcs_int:19;
+ uint64_t sequence:3;
+ uint64_t early_dqx:1;
+ uint64_t sref_with_dll:1;
+ uint64_t rank_ena:1;
+ uint64_t rankmask:4;
+ uint64_t mirrmask:4;
+ uint64_t init_status:4;
+ uint64_t reserved_55_63:9;
+#endif
+ } cn63xxp1;
+ struct cvmx_lmcx_config_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63:4;
+ uint64_t scrz:1;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t init_status:4;
+ uint64_t mirrmask:4;
+ uint64_t rankmask:4;
+ uint64_t rank_ena:1;
+ uint64_t sref_with_dll:1;
+ uint64_t early_dqx:1;
+ uint64_t sequence:3;
+ uint64_t ref_zqcs_int:19;
+ uint64_t reset:1;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t init_start:1;
+#else
+ uint64_t init_start:1;
+ uint64_t ecc_ena:1;
+ uint64_t row_lsb:3;
+ uint64_t pbank_lsb:4;
+ uint64_t idlepower:3;
+ uint64_t forcewrite:4;
+ uint64_t ecc_adr:1;
+ uint64_t reset:1;
+ uint64_t ref_zqcs_int:19;
+ uint64_t sequence:3;
+ uint64_t early_dqx:1;
+ uint64_t sref_with_dll:1;
+ uint64_t rank_ena:1;
+ uint64_t rankmask:4;
+ uint64_t mirrmask:4;
+ uint64_t init_status:4;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t scrz:1;
+ uint64_t reserved_60_63:4;
+#endif
+ } cn66xx;
+};
+
+union cvmx_lmcx_control {
+ uint64_t u64;
+ struct cvmx_lmcx_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t scramble_ena:1;
+ uint64_t thrcnt:12;
+ uint64_t persub:8;
+ uint64_t thrmax:4;
+ uint64_t crm_cnt:5;
+ uint64_t crm_thr:5;
+ uint64_t crm_max:5;
+ uint64_t rodt_bprch:1;
+ uint64_t wodt_bprch:1;
+ uint64_t bprch:2;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t nxm_write_en:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t inorder_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t throttle_rd:1;
+ uint64_t fprch2:2;
+ uint64_t pocas:1;
+ uint64_t ddr2t:1;
+ uint64_t bwcnt:1;
+ uint64_t rdimm_ena:1;
+#else
+ uint64_t rdimm_ena:1;
+ uint64_t bwcnt:1;
+ uint64_t ddr2t:1;
+ uint64_t pocas:1;
+ uint64_t fprch2:2;
+ uint64_t throttle_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t inorder_wr:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t nxm_write_en:1;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t bprch:2;
+ uint64_t wodt_bprch:1;
+ uint64_t rodt_bprch:1;
+ uint64_t crm_max:5;
+ uint64_t crm_thr:5;
+ uint64_t crm_cnt:5;
+ uint64_t thrmax:4;
+ uint64_t persub:8;
+ uint64_t thrcnt:12;
+ uint64_t scramble_ena:1;
+#endif
+ } s;
+ struct cvmx_lmcx_control_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t rodt_bprch:1;
+ uint64_t wodt_bprch:1;
+ uint64_t bprch:2;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t nxm_write_en:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t inorder_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t throttle_rd:1;
+ uint64_t fprch2:2;
+ uint64_t pocas:1;
+ uint64_t ddr2t:1;
+ uint64_t bwcnt:1;
+ uint64_t rdimm_ena:1;
+#else
+ uint64_t rdimm_ena:1;
+ uint64_t bwcnt:1;
+ uint64_t ddr2t:1;
+ uint64_t pocas:1;
+ uint64_t fprch2:2;
+ uint64_t throttle_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t inorder_wr:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t nxm_write_en:1;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t bprch:2;
+ uint64_t wodt_bprch:1;
+ uint64_t rodt_bprch:1;
+ uint64_t reserved_24_63:40;
+#endif
+ } cn63xx;
+ struct cvmx_lmcx_control_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t scramble_ena:1;
+ uint64_t reserved_24_62:39;
+ uint64_t rodt_bprch:1;
+ uint64_t wodt_bprch:1;
+ uint64_t bprch:2;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t nxm_write_en:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t inorder_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t throttle_rd:1;
+ uint64_t fprch2:2;
+ uint64_t pocas:1;
+ uint64_t ddr2t:1;
+ uint64_t bwcnt:1;
+ uint64_t rdimm_ena:1;
+#else
+ uint64_t rdimm_ena:1;
+ uint64_t bwcnt:1;
+ uint64_t ddr2t:1;
+ uint64_t pocas:1;
+ uint64_t fprch2:2;
+ uint64_t throttle_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t inorder_wr:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t nxm_write_en:1;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t bprch:2;
+ uint64_t wodt_bprch:1;
+ uint64_t rodt_bprch:1;
+ uint64_t reserved_24_62:39;
+ uint64_t scramble_ena:1;
+#endif
+ } cn66xx;
+ struct cvmx_lmcx_control_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_63_63:1;
+ uint64_t thrcnt:12;
+ uint64_t persub:8;
+ uint64_t thrmax:4;
+ uint64_t crm_cnt:5;
+ uint64_t crm_thr:5;
+ uint64_t crm_max:5;
+ uint64_t rodt_bprch:1;
+ uint64_t wodt_bprch:1;
+ uint64_t bprch:2;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t nxm_write_en:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t inorder_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t throttle_rd:1;
+ uint64_t fprch2:2;
+ uint64_t pocas:1;
+ uint64_t ddr2t:1;
+ uint64_t bwcnt:1;
+ uint64_t rdimm_ena:1;
+#else
+ uint64_t rdimm_ena:1;
+ uint64_t bwcnt:1;
+ uint64_t ddr2t:1;
+ uint64_t pocas:1;
+ uint64_t fprch2:2;
+ uint64_t throttle_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t inorder_wr:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t nxm_write_en:1;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t bprch:2;
+ uint64_t wodt_bprch:1;
+ uint64_t rodt_bprch:1;
+ uint64_t crm_max:5;
+ uint64_t crm_thr:5;
+ uint64_t crm_cnt:5;
+ uint64_t thrmax:4;
+ uint64_t persub:8;
+ uint64_t thrcnt:12;
+ uint64_t reserved_63_63:1;
+#endif
+ } cn68xx;
+};
+
+union cvmx_lmcx_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t pll_div2:1;
+ uint64_t pll_bypass:1;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t reserved_10_11:2;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+#else
+ uint64_t dic:2;
+ uint64_t qs_dic:2;
+ uint64_t tskw:2;
+ uint64_t sil_lat:2;
+ uint64_t bprch:1;
+ uint64_t fprch2:1;
+ uint64_t reserved_10_11:2;
+ uint64_t inorder_mrf:1;
+ uint64_t inorder_mwf:1;
+ uint64_t r2r_slot:1;
+ uint64_t rdimm_ena:1;
+ uint64_t pll_bypass:1;
+ uint64_t pll_div2:1;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t slow_scf:1;
+ uint64_t ddr__pctl:4;
+ uint64_t ddr__nctl:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_ctl_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t pll_div2:1;
+ uint64_t pll_bypass:1;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t dreset:1;
+ uint64_t mode32b:1;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+#else
+ uint64_t dic:2;
+ uint64_t qs_dic:2;
+ uint64_t tskw:2;
+ uint64_t sil_lat:2;
+ uint64_t bprch:1;
+ uint64_t fprch2:1;
+ uint64_t mode32b:1;
+ uint64_t dreset:1;
+ uint64_t inorder_mrf:1;
+ uint64_t inorder_mwf:1;
+ uint64_t r2r_slot:1;
+ uint64_t rdimm_ena:1;
+ uint64_t pll_bypass:1;
+ uint64_t pll_div2:1;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t slow_scf:1;
+ uint64_t ddr__pctl:4;
+ uint64_t ddr__nctl:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn30xx;
+ struct cvmx_lmcx_ctl_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t reserved_16_17:2;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t set_zero:1;
+ uint64_t mode128b:1;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+#else
+ uint64_t dic:2;
+ uint64_t qs_dic:2;
+ uint64_t tskw:2;
+ uint64_t sil_lat:2;
+ uint64_t bprch:1;
+ uint64_t fprch2:1;
+ uint64_t mode128b:1;
+ uint64_t set_zero:1;
+ uint64_t inorder_mrf:1;
+ uint64_t inorder_mwf:1;
+ uint64_t r2r_slot:1;
+ uint64_t rdimm_ena:1;
+ uint64_t reserved_16_17:2;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t slow_scf:1;
+ uint64_t ddr__pctl:4;
+ uint64_t ddr__nctl:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn38xx;
+ struct cvmx_lmcx_ctl_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t reserved_17_17:1;
+ uint64_t pll_bypass:1;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t dreset:1;
+ uint64_t mode32b:1;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+#else
+ uint64_t dic:2;
+ uint64_t qs_dic:2;
+ uint64_t tskw:2;
+ uint64_t sil_lat:2;
+ uint64_t bprch:1;
+ uint64_t fprch2:1;
+ uint64_t mode32b:1;
+ uint64_t dreset:1;
+ uint64_t inorder_mrf:1;
+ uint64_t inorder_mwf:1;
+ uint64_t r2r_slot:1;
+ uint64_t rdimm_ena:1;
+ uint64_t pll_bypass:1;
+ uint64_t reserved_17_17:1;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t slow_scf:1;
+ uint64_t ddr__pctl:4;
+ uint64_t ddr__nctl:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn50xx;
+ struct cvmx_lmcx_ctl_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t reserved_16_17:2;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t dreset:1;
+ uint64_t mode32b:1;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+#else
+ uint64_t dic:2;
+ uint64_t qs_dic:2;
+ uint64_t tskw:2;
+ uint64_t sil_lat:2;
+ uint64_t bprch:1;
+ uint64_t fprch2:1;
+ uint64_t mode32b:1;
+ uint64_t dreset:1;
+ uint64_t inorder_mrf:1;
+ uint64_t inorder_mwf:1;
+ uint64_t r2r_slot:1;
+ uint64_t rdimm_ena:1;
+ uint64_t reserved_16_17:2;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t slow_scf:1;
+ uint64_t ddr__pctl:4;
+ uint64_t ddr__nctl:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn52xx;
+ struct cvmx_lmcx_ctl_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t reserved_16_17:2;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t dreset:1;
+ uint64_t mode128b:1;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+#else
+ uint64_t dic:2;
+ uint64_t qs_dic:2;
+ uint64_t tskw:2;
+ uint64_t sil_lat:2;
+ uint64_t bprch:1;
+ uint64_t fprch2:1;
+ uint64_t mode128b:1;
+ uint64_t dreset:1;
+ uint64_t inorder_mrf:1;
+ uint64_t inorder_mwf:1;
+ uint64_t r2r_slot:1;
+ uint64_t rdimm_ena:1;
+ uint64_t reserved_16_17:2;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t slow_scf:1;
+ uint64_t ddr__pctl:4;
+ uint64_t ddr__nctl:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn58xx;
+};
+
+union cvmx_lmcx_ctl1 {
+ uint64_t u64;
+ struct cvmx_lmcx_ctl1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63:43;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t sequence:3;
+ uint64_t sil_mode:1;
+ uint64_t dcc_enable:1;
+ uint64_t reserved_2_7:6;
+ uint64_t data_layout:2;
+#else
+ uint64_t data_layout:2;
+ uint64_t reserved_2_7:6;
+ uint64_t dcc_enable:1;
+ uint64_t sil_mode:1;
+ uint64_t sequence:3;
+ uint64_t idlepower:3;
+ uint64_t forcewrite:4;
+ uint64_t ecc_adr:1;
+ uint64_t reserved_21_63:43;
+#endif
+ } s;
+ struct cvmx_lmcx_ctl1_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t data_layout:2;
+#else
+ uint64_t data_layout:2;
+ uint64_t reserved_2_63:62;
+#endif
+ } cn30xx;
+ struct cvmx_lmcx_ctl1_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t sil_mode:1;
+ uint64_t dcc_enable:1;
+ uint64_t reserved_2_7:6;
+ uint64_t data_layout:2;
+#else
+ uint64_t data_layout:2;
+ uint64_t reserved_2_7:6;
+ uint64_t dcc_enable:1;
+ uint64_t sil_mode:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn50xx;
+ struct cvmx_lmcx_ctl1_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63:43;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t sequence:3;
+ uint64_t sil_mode:1;
+ uint64_t dcc_enable:1;
+ uint64_t reserved_0_7:8;
+#else
+ uint64_t reserved_0_7:8;
+ uint64_t dcc_enable:1;
+ uint64_t sil_mode:1;
+ uint64_t sequence:3;
+ uint64_t idlepower:3;
+ uint64_t forcewrite:4;
+ uint64_t ecc_adr:1;
+ uint64_t reserved_21_63:43;
+#endif
+ } cn52xx;
+ struct cvmx_lmcx_ctl1_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t sil_mode:1;
+ uint64_t dcc_enable:1;
+ uint64_t reserved_0_7:8;
+#else
+ uint64_t reserved_0_7:8;
+ uint64_t dcc_enable:1;
+ uint64_t sil_mode:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn58xx;
+};
+
+union cvmx_lmcx_dclk_cnt {
+ uint64_t u64;
+ struct cvmx_lmcx_dclk_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dclkcnt:64;
+#else
+ uint64_t dclkcnt:64;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_dclk_cnt_hi {
+ uint64_t u64;
+ struct cvmx_lmcx_dclk_cnt_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t dclkcnt_hi:32;
+#else
+ uint64_t dclkcnt_hi:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_dclk_cnt_lo {
+ uint64_t u64;
+ struct cvmx_lmcx_dclk_cnt_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t dclkcnt_lo:32;
+#else
+ uint64_t dclkcnt_lo:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_dclk_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_dclk_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t off90_ena:1;
+ uint64_t dclk90_byp:1;
+ uint64_t dclk90_ld:1;
+ uint64_t dclk90_vlu:5;
+#else
+ uint64_t dclk90_vlu:5;
+ uint64_t dclk90_ld:1;
+ uint64_t dclk90_byp:1;
+ uint64_t off90_ena:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_ddr2_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_ddr2_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t bank8:1;
+ uint64_t burst8:1;
+ uint64_t addlat:3;
+ uint64_t pocas:1;
+ uint64_t bwcnt:1;
+ uint64_t twr:3;
+ uint64_t silo_hc:1;
+ uint64_t ddr_eof:4;
+ uint64_t tfaw:5;
+ uint64_t crip_mode:1;
+ uint64_t ddr2t:1;
+ uint64_t odt_ena:1;
+ uint64_t qdll_ena:1;
+ uint64_t dll90_vlu:5;
+ uint64_t dll90_byp:1;
+ uint64_t rdqs:1;
+ uint64_t ddr2:1;
+#else
+ uint64_t ddr2:1;
+ uint64_t rdqs:1;
+ uint64_t dll90_byp:1;
+ uint64_t dll90_vlu:5;
+ uint64_t qdll_ena:1;
+ uint64_t odt_ena:1;
+ uint64_t ddr2t:1;
+ uint64_t crip_mode:1;
+ uint64_t tfaw:5;
+ uint64_t ddr_eof:4;
+ uint64_t silo_hc:1;
+ uint64_t twr:3;
+ uint64_t bwcnt:1;
+ uint64_t pocas:1;
+ uint64_t addlat:3;
+ uint64_t burst8:1;
+ uint64_t bank8:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_ddr2_ctl_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t bank8:1;
+ uint64_t burst8:1;
+ uint64_t addlat:3;
+ uint64_t pocas:1;
+ uint64_t bwcnt:1;
+ uint64_t twr:3;
+ uint64_t silo_hc:1;
+ uint64_t ddr_eof:4;
+ uint64_t tfaw:5;
+ uint64_t crip_mode:1;
+ uint64_t ddr2t:1;
+ uint64_t odt_ena:1;
+ uint64_t qdll_ena:1;
+ uint64_t dll90_vlu:5;
+ uint64_t dll90_byp:1;
+ uint64_t reserved_1_1:1;
+ uint64_t ddr2:1;
+#else
+ uint64_t ddr2:1;
+ uint64_t reserved_1_1:1;
+ uint64_t dll90_byp:1;
+ uint64_t dll90_vlu:5;
+ uint64_t qdll_ena:1;
+ uint64_t odt_ena:1;
+ uint64_t ddr2t:1;
+ uint64_t crip_mode:1;
+ uint64_t tfaw:5;
+ uint64_t ddr_eof:4;
+ uint64_t silo_hc:1;
+ uint64_t twr:3;
+ uint64_t bwcnt:1;
+ uint64_t pocas:1;
+ uint64_t addlat:3;
+ uint64_t burst8:1;
+ uint64_t bank8:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn30xx;
+};
+
+union cvmx_lmcx_ddr_pll_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_ddr_pll_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63:37;
+ uint64_t jtg_test_mode:1;
+ uint64_t dfm_div_reset:1;
+ uint64_t dfm_ps_en:3;
+ uint64_t ddr_div_reset:1;
+ uint64_t ddr_ps_en:3;
+ uint64_t diffamp:4;
+ uint64_t cps:3;
+ uint64_t cpb:3;
+ uint64_t reset_n:1;
+ uint64_t clkf:7;
+#else
+ uint64_t clkf:7;
+ uint64_t reset_n:1;
+ uint64_t cpb:3;
+ uint64_t cps:3;
+ uint64_t diffamp:4;
+ uint64_t ddr_ps_en:3;
+ uint64_t ddr_div_reset:1;
+ uint64_t dfm_ps_en:3;
+ uint64_t dfm_div_reset:1;
+ uint64_t jtg_test_mode:1;
+ uint64_t reserved_27_63:37;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_delay_cfg {
+ uint64_t u64;
+ struct cvmx_lmcx_delay_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63:49;
+ uint64_t dq:5;
+ uint64_t cmd:5;
+ uint64_t clk:5;
+#else
+ uint64_t clk:5;
+ uint64_t cmd:5;
+ uint64_t dq:5;
+ uint64_t reserved_15_63:49;
+#endif
+ } s;
+ struct cvmx_lmcx_delay_cfg_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t dq:4;
+ uint64_t reserved_9_9:1;
+ uint64_t cmd:4;
+ uint64_t reserved_4_4:1;
+ uint64_t clk:4;
+#else
+ uint64_t clk:4;
+ uint64_t reserved_4_4:1;
+ uint64_t cmd:4;
+ uint64_t reserved_9_9:1;
+ uint64_t dq:4;
+ uint64_t reserved_14_63:50;
+#endif
+ } cn38xx;
+};
+
+union cvmx_lmcx_dimmx_params {
+ uint64_t u64;
+ struct cvmx_lmcx_dimmx_params_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rc15:4;
+ uint64_t rc14:4;
+ uint64_t rc13:4;
+ uint64_t rc12:4;
+ uint64_t rc11:4;
+ uint64_t rc10:4;
+ uint64_t rc9:4;
+ uint64_t rc8:4;
+ uint64_t rc7:4;
+ uint64_t rc6:4;
+ uint64_t rc5:4;
+ uint64_t rc4:4;
+ uint64_t rc3:4;
+ uint64_t rc2:4;
+ uint64_t rc1:4;
+ uint64_t rc0:4;
+#else
+ uint64_t rc0:4;
+ uint64_t rc1:4;
+ uint64_t rc2:4;
+ uint64_t rc3:4;
+ uint64_t rc4:4;
+ uint64_t rc5:4;
+ uint64_t rc6:4;
+ uint64_t rc7:4;
+ uint64_t rc8:4;
+ uint64_t rc9:4;
+ uint64_t rc10:4;
+ uint64_t rc11:4;
+ uint64_t rc12:4;
+ uint64_t rc13:4;
+ uint64_t rc14:4;
+ uint64_t rc15:4;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_dimm_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_dimm_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63:18;
+ uint64_t parity:1;
+ uint64_t tcws:13;
+ uint64_t dimm1_wmask:16;
+ uint64_t dimm0_wmask:16;
+#else
+ uint64_t dimm0_wmask:16;
+ uint64_t dimm1_wmask:16;
+ uint64_t tcws:13;
+ uint64_t parity:1;
+ uint64_t reserved_46_63:18;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_dll_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_dll_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t dreset:1;
+ uint64_t dll90_byp:1;
+ uint64_t dll90_ena:1;
+ uint64_t dll90_vlu:5;
+#else
+ uint64_t dll90_vlu:5;
+ uint64_t dll90_ena:1;
+ uint64_t dll90_byp:1;
+ uint64_t dreset:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_dll_ctl2 {
+ uint64_t u64;
+ struct cvmx_lmcx_dll_ctl2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t intf_en:1;
+ uint64_t dll_bringup:1;
+ uint64_t dreset:1;
+ uint64_t quad_dll_ena:1;
+ uint64_t byp_sel:4;
+ uint64_t byp_setting:8;
+#else
+ uint64_t byp_setting:8;
+ uint64_t byp_sel:4;
+ uint64_t quad_dll_ena:1;
+ uint64_t dreset:1;
+ uint64_t dll_bringup:1;
+ uint64_t intf_en:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+ struct cvmx_lmcx_dll_ctl2_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63:49;
+ uint64_t dll_bringup:1;
+ uint64_t dreset:1;
+ uint64_t quad_dll_ena:1;
+ uint64_t byp_sel:4;
+ uint64_t byp_setting:8;
+#else
+ uint64_t byp_setting:8;
+ uint64_t byp_sel:4;
+ uint64_t quad_dll_ena:1;
+ uint64_t dreset:1;
+ uint64_t dll_bringup:1;
+ uint64_t reserved_15_63:49;
+#endif
+ } cn63xx;
+};
+
+union cvmx_lmcx_dll_ctl3 {
+ uint64_t u64;
+ struct cvmx_lmcx_dll_ctl3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_41_63:23;
+ uint64_t dclk90_fwd:1;
+ uint64_t ddr_90_dly_byp:1;
+ uint64_t dclk90_recal_dis:1;
+ uint64_t dclk90_byp_sel:1;
+ uint64_t dclk90_byp_setting:8;
+ uint64_t dll_fast:1;
+ uint64_t dll90_setting:8;
+ uint64_t fine_tune_mode:1;
+ uint64_t dll_mode:1;
+ uint64_t dll90_byte_sel:4;
+ uint64_t offset_ena:1;
+ uint64_t load_offset:1;
+ uint64_t mode_sel:2;
+ uint64_t byte_sel:4;
+ uint64_t offset:6;
+#else
+ uint64_t offset:6;
+ uint64_t byte_sel:4;
+ uint64_t mode_sel:2;
+ uint64_t load_offset:1;
+ uint64_t offset_ena:1;
+ uint64_t dll90_byte_sel:4;
+ uint64_t dll_mode:1;
+ uint64_t fine_tune_mode:1;
+ uint64_t dll90_setting:8;
+ uint64_t dll_fast:1;
+ uint64_t dclk90_byp_setting:8;
+ uint64_t dclk90_byp_sel:1;
+ uint64_t dclk90_recal_dis:1;
+ uint64_t ddr_90_dly_byp:1;
+ uint64_t dclk90_fwd:1;
+ uint64_t reserved_41_63:23;
+#endif
+ } s;
+ struct cvmx_lmcx_dll_ctl3_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t dll_fast:1;
+ uint64_t dll90_setting:8;
+ uint64_t fine_tune_mode:1;
+ uint64_t dll_mode:1;
+ uint64_t dll90_byte_sel:4;
+ uint64_t offset_ena:1;
+ uint64_t load_offset:1;
+ uint64_t mode_sel:2;
+ uint64_t byte_sel:4;
+ uint64_t offset:6;
+#else
+ uint64_t offset:6;
+ uint64_t byte_sel:4;
+ uint64_t mode_sel:2;
+ uint64_t load_offset:1;
+ uint64_t offset_ena:1;
+ uint64_t dll90_byte_sel:4;
+ uint64_t dll_mode:1;
+ uint64_t fine_tune_mode:1;
+ uint64_t dll90_setting:8;
+ uint64_t dll_fast:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn63xx;
+};
+
+union cvmx_lmcx_dual_memcfg {
+ uint64_t u64;
+ struct cvmx_lmcx_dual_memcfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t bank8:1;
+ uint64_t row_lsb:3;
+ uint64_t reserved_8_15:8;
+ uint64_t cs_mask:8;
+#else
+ uint64_t cs_mask:8;
+ uint64_t reserved_8_15:8;
+ uint64_t row_lsb:3;
+ uint64_t bank8:1;
+ uint64_t reserved_20_63:44;
+#endif
+ } s;
+ struct cvmx_lmcx_dual_memcfg_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63:45;
+ uint64_t row_lsb:3;
+ uint64_t reserved_8_15:8;
+ uint64_t cs_mask:8;
+#else
+ uint64_t cs_mask:8;
+ uint64_t reserved_8_15:8;
+ uint64_t row_lsb:3;
+ uint64_t reserved_19_63:45;
+#endif
+ } cn61xx;
+};
+
+union cvmx_lmcx_ecc_synd {
+ uint64_t u64;
+ struct cvmx_lmcx_ecc_synd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t mrdsyn3:8;
+ uint64_t mrdsyn2:8;
+ uint64_t mrdsyn1:8;
+ uint64_t mrdsyn0:8;
+#else
+ uint64_t mrdsyn0:8;
+ uint64_t mrdsyn1:8;
+ uint64_t mrdsyn2:8;
+ uint64_t mrdsyn3:8;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_fadr {
+ uint64_t u64;
+ struct cvmx_lmcx_fadr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_lmcx_fadr_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t fdimm:2;
+ uint64_t fbunk:1;
+ uint64_t fbank:3;
+ uint64_t frow:14;
+ uint64_t fcol:12;
+#else
+ uint64_t fcol:12;
+ uint64_t frow:14;
+ uint64_t fbank:3;
+ uint64_t fbunk:1;
+ uint64_t fdimm:2;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn30xx;
+ struct cvmx_lmcx_fadr_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63:28;
+ uint64_t fdimm:2;
+ uint64_t fbunk:1;
+ uint64_t fbank:3;
+ uint64_t frow:16;
+ uint64_t fcol:14;
+#else
+ uint64_t fcol:14;
+ uint64_t frow:16;
+ uint64_t fbank:3;
+ uint64_t fbunk:1;
+ uint64_t fdimm:2;
+ uint64_t reserved_36_63:28;
+#endif
+ } cn61xx;
+};
+
+union cvmx_lmcx_ifb_cnt {
+ uint64_t u64;
+ struct cvmx_lmcx_ifb_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ifbcnt:64;
+#else
+ uint64_t ifbcnt:64;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_ifb_cnt_hi {
+ uint64_t u64;
+ struct cvmx_lmcx_ifb_cnt_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ifbcnt_hi:32;
+#else
+ uint64_t ifbcnt_hi:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_ifb_cnt_lo {
+ uint64_t u64;
+ struct cvmx_lmcx_ifb_cnt_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ifbcnt_lo:32;
+#else
+ uint64_t ifbcnt_lo:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_int {
+ uint64_t u64;
+ struct cvmx_lmcx_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t ded_err:4;
+ uint64_t sec_err:4;
+ uint64_t nxm_wr_err:1;
+#else
+ uint64_t nxm_wr_err:1;
+ uint64_t sec_err:4;
+ uint64_t ded_err:4;
+ uint64_t reserved_9_63:55;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_int_en {
+ uint64_t u64;
+ struct cvmx_lmcx_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t intr_ded_ena:1;
+ uint64_t intr_sec_ena:1;
+ uint64_t intr_nxm_wr_ena:1;
+#else
+ uint64_t intr_nxm_wr_ena:1;
+ uint64_t intr_sec_ena:1;
+ uint64_t intr_ded_ena:1;
+ uint64_t reserved_3_63:61;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_mem_cfg0 {
+ uint64_t u64;
+ struct cvmx_lmcx_mem_cfg0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t reset:1;
+ uint64_t silo_qc:1;
+ uint64_t bunk_ena:1;
+ uint64_t ded_err:4;
+ uint64_t sec_err:4;
+ uint64_t intr_ded_ena:1;
+ uint64_t intr_sec_ena:1;
+ uint64_t tcl:4;
+ uint64_t ref_int:6;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t init_start:1;
+#else
+ uint64_t init_start:1;
+ uint64_t ecc_ena:1;
+ uint64_t row_lsb:3;
+ uint64_t pbank_lsb:4;
+ uint64_t ref_int:6;
+ uint64_t tcl:4;
+ uint64_t intr_sec_ena:1;
+ uint64_t intr_ded_ena:1;
+ uint64_t sec_err:4;
+ uint64_t ded_err:4;
+ uint64_t bunk_ena:1;
+ uint64_t silo_qc:1;
+ uint64_t reset:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_mem_cfg1 {
+ uint64_t u64;
+ struct cvmx_lmcx_mem_cfg1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t comp_bypass:1;
+ uint64_t trrd:3;
+ uint64_t caslat:3;
+ uint64_t tmrd:3;
+ uint64_t trfc:5;
+ uint64_t trp:4;
+ uint64_t twtr:4;
+ uint64_t trcd:4;
+ uint64_t tras:5;
+#else
+ uint64_t tras:5;
+ uint64_t trcd:4;
+ uint64_t twtr:4;
+ uint64_t trp:4;
+ uint64_t trfc:5;
+ uint64_t tmrd:3;
+ uint64_t caslat:3;
+ uint64_t trrd:3;
+ uint64_t comp_bypass:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_mem_cfg1_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63:33;
+ uint64_t trrd:3;
+ uint64_t caslat:3;
+ uint64_t tmrd:3;
+ uint64_t trfc:5;
+ uint64_t trp:4;
+ uint64_t twtr:4;
+ uint64_t trcd:4;
+ uint64_t tras:5;
+#else
+ uint64_t tras:5;
+ uint64_t trcd:4;
+ uint64_t twtr:4;
+ uint64_t trp:4;
+ uint64_t trfc:5;
+ uint64_t tmrd:3;
+ uint64_t caslat:3;
+ uint64_t trrd:3;
+ uint64_t reserved_31_63:33;
+#endif
+ } cn38xx;
+};
+
+union cvmx_lmcx_modereg_params0 {
+ uint64_t u64;
+ struct cvmx_lmcx_modereg_params0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t ppd:1;
+ uint64_t wrp:3;
+ uint64_t dllr:1;
+ uint64_t tm:1;
+ uint64_t rbt:1;
+ uint64_t cl:4;
+ uint64_t bl:2;
+ uint64_t qoff:1;
+ uint64_t tdqs:1;
+ uint64_t wlev:1;
+ uint64_t al:2;
+ uint64_t dll:1;
+ uint64_t mpr:1;
+ uint64_t mprloc:2;
+ uint64_t cwl:3;
+#else
+ uint64_t cwl:3;
+ uint64_t mprloc:2;
+ uint64_t mpr:1;
+ uint64_t dll:1;
+ uint64_t al:2;
+ uint64_t wlev:1;
+ uint64_t tdqs:1;
+ uint64_t qoff:1;
+ uint64_t bl:2;
+ uint64_t cl:4;
+ uint64_t rbt:1;
+ uint64_t tm:1;
+ uint64_t dllr:1;
+ uint64_t wrp:3;
+ uint64_t ppd:1;
+ uint64_t reserved_25_63:39;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_modereg_params1 {
+ uint64_t u64;
+ struct cvmx_lmcx_modereg_params1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t rtt_nom_11:3;
+ uint64_t dic_11:2;
+ uint64_t rtt_wr_11:2;
+ uint64_t srt_11:1;
+ uint64_t asr_11:1;
+ uint64_t pasr_11:3;
+ uint64_t rtt_nom_10:3;
+ uint64_t dic_10:2;
+ uint64_t rtt_wr_10:2;
+ uint64_t srt_10:1;
+ uint64_t asr_10:1;
+ uint64_t pasr_10:3;
+ uint64_t rtt_nom_01:3;
+ uint64_t dic_01:2;
+ uint64_t rtt_wr_01:2;
+ uint64_t srt_01:1;
+ uint64_t asr_01:1;
+ uint64_t pasr_01:3;
+ uint64_t rtt_nom_00:3;
+ uint64_t dic_00:2;
+ uint64_t rtt_wr_00:2;
+ uint64_t srt_00:1;
+ uint64_t asr_00:1;
+ uint64_t pasr_00:3;
+#else
+ uint64_t pasr_00:3;
+ uint64_t asr_00:1;
+ uint64_t srt_00:1;
+ uint64_t rtt_wr_00:2;
+ uint64_t dic_00:2;
+ uint64_t rtt_nom_00:3;
+ uint64_t pasr_01:3;
+ uint64_t asr_01:1;
+ uint64_t srt_01:1;
+ uint64_t rtt_wr_01:2;
+ uint64_t dic_01:2;
+ uint64_t rtt_nom_01:3;
+ uint64_t pasr_10:3;
+ uint64_t asr_10:1;
+ uint64_t srt_10:1;
+ uint64_t rtt_wr_10:2;
+ uint64_t dic_10:2;
+ uint64_t rtt_nom_10:3;
+ uint64_t pasr_11:3;
+ uint64_t asr_11:1;
+ uint64_t srt_11:1;
+ uint64_t rtt_wr_11:2;
+ uint64_t dic_11:2;
+ uint64_t rtt_nom_11:3;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_nxm {
+ uint64_t u64;
+ struct cvmx_lmcx_nxm_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t mem_msb_d3_r1:4;
+ uint64_t mem_msb_d3_r0:4;
+ uint64_t mem_msb_d2_r1:4;
+ uint64_t mem_msb_d2_r0:4;
+ uint64_t mem_msb_d1_r1:4;
+ uint64_t mem_msb_d1_r0:4;
+ uint64_t mem_msb_d0_r1:4;
+ uint64_t mem_msb_d0_r0:4;
+ uint64_t cs_mask:8;
+#else
+ uint64_t cs_mask:8;
+ uint64_t mem_msb_d0_r0:4;
+ uint64_t mem_msb_d0_r1:4;
+ uint64_t mem_msb_d1_r0:4;
+ uint64_t mem_msb_d1_r1:4;
+ uint64_t mem_msb_d2_r0:4;
+ uint64_t mem_msb_d2_r1:4;
+ uint64_t mem_msb_d3_r0:4;
+ uint64_t mem_msb_d3_r1:4;
+ uint64_t reserved_40_63:24;
+#endif
+ } s;
+ struct cvmx_lmcx_nxm_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t cs_mask:8;
+#else
+ uint64_t cs_mask:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } cn52xx;
+};
+
+union cvmx_lmcx_ops_cnt {
+ uint64_t u64;
+ struct cvmx_lmcx_ops_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t opscnt:64;
+#else
+ uint64_t opscnt:64;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_ops_cnt_hi {
+ uint64_t u64;
+ struct cvmx_lmcx_ops_cnt_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t opscnt_hi:32;
+#else
+ uint64_t opscnt_hi:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_ops_cnt_lo {
+ uint64_t u64;
+ struct cvmx_lmcx_ops_cnt_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t opscnt_lo:32;
+#else
+ uint64_t opscnt_lo:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_phy_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_phy_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63:49;
+ uint64_t rx_always_on:1;
+ uint64_t lv_mode:1;
+ uint64_t ck_tune1:1;
+ uint64_t ck_dlyout1:4;
+ uint64_t ck_tune0:1;
+ uint64_t ck_dlyout0:4;
+ uint64_t loopback:1;
+ uint64_t loopback_pos:1;
+ uint64_t ts_stagger:1;
+#else
+ uint64_t ts_stagger:1;
+ uint64_t loopback_pos:1;
+ uint64_t loopback:1;
+ uint64_t ck_dlyout0:4;
+ uint64_t ck_tune0:1;
+ uint64_t ck_dlyout1:4;
+ uint64_t ck_tune1:1;
+ uint64_t lv_mode:1;
+ uint64_t rx_always_on:1;
+ uint64_t reserved_15_63:49;
+#endif
+ } s;
+ struct cvmx_lmcx_phy_ctl_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t lv_mode:1;
+ uint64_t ck_tune1:1;
+ uint64_t ck_dlyout1:4;
+ uint64_t ck_tune0:1;
+ uint64_t ck_dlyout0:4;
+ uint64_t loopback:1;
+ uint64_t loopback_pos:1;
+ uint64_t ts_stagger:1;
+#else
+ uint64_t ts_stagger:1;
+ uint64_t loopback_pos:1;
+ uint64_t loopback:1;
+ uint64_t ck_dlyout0:4;
+ uint64_t ck_tune0:1;
+ uint64_t ck_dlyout1:4;
+ uint64_t ck_tune1:1;
+ uint64_t lv_mode:1;
+ uint64_t reserved_14_63:50;
+#endif
+ } cn63xxp1;
+};
+
+union cvmx_lmcx_pll_bwctl {
+ uint64_t u64;
+ struct cvmx_lmcx_pll_bwctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t bwupd:1;
+ uint64_t bwctl:4;
+#else
+ uint64_t bwctl:4;
+ uint64_t bwupd:1;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_pll_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_pll_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_30_63:34;
+ uint64_t bypass:1;
+ uint64_t fasten_n:1;
+ uint64_t div_reset:1;
+ uint64_t reset_n:1;
+ uint64_t clkf:12;
+ uint64_t clkr:6;
+ uint64_t reserved_6_7:2;
+ uint64_t en16:1;
+ uint64_t en12:1;
+ uint64_t en8:1;
+ uint64_t en6:1;
+ uint64_t en4:1;
+ uint64_t en2:1;
+#else
+ uint64_t en2:1;
+ uint64_t en4:1;
+ uint64_t en6:1;
+ uint64_t en8:1;
+ uint64_t en12:1;
+ uint64_t en16:1;
+ uint64_t reserved_6_7:2;
+ uint64_t clkr:6;
+ uint64_t clkf:12;
+ uint64_t reset_n:1;
+ uint64_t div_reset:1;
+ uint64_t fasten_n:1;
+ uint64_t bypass:1;
+ uint64_t reserved_30_63:34;
+#endif
+ } s;
+ struct cvmx_lmcx_pll_ctl_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t fasten_n:1;
+ uint64_t div_reset:1;
+ uint64_t reset_n:1;
+ uint64_t clkf:12;
+ uint64_t clkr:6;
+ uint64_t reserved_6_7:2;
+ uint64_t en16:1;
+ uint64_t en12:1;
+ uint64_t en8:1;
+ uint64_t en6:1;
+ uint64_t en4:1;
+ uint64_t en2:1;
+#else
+ uint64_t en2:1;
+ uint64_t en4:1;
+ uint64_t en6:1;
+ uint64_t en8:1;
+ uint64_t en12:1;
+ uint64_t en16:1;
+ uint64_t reserved_6_7:2;
+ uint64_t clkr:6;
+ uint64_t clkf:12;
+ uint64_t reset_n:1;
+ uint64_t div_reset:1;
+ uint64_t fasten_n:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn50xx;
+ struct cvmx_lmcx_pll_ctl_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t div_reset:1;
+ uint64_t reset_n:1;
+ uint64_t clkf:12;
+ uint64_t clkr:6;
+ uint64_t reserved_6_7:2;
+ uint64_t en16:1;
+ uint64_t en12:1;
+ uint64_t en8:1;
+ uint64_t en6:1;
+ uint64_t en4:1;
+ uint64_t en2:1;
+#else
+ uint64_t en2:1;
+ uint64_t en4:1;
+ uint64_t en6:1;
+ uint64_t en8:1;
+ uint64_t en12:1;
+ uint64_t en16:1;
+ uint64_t reserved_6_7:2;
+ uint64_t clkr:6;
+ uint64_t clkf:12;
+ uint64_t reset_n:1;
+ uint64_t div_reset:1;
+ uint64_t reserved_28_63:36;
+#endif
+ } cn56xxp1;
+};
+
+union cvmx_lmcx_pll_status {
+ uint64_t u64;
+ struct cvmx_lmcx_pll_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:5;
+ uint64_t ddr__pctl:5;
+ uint64_t reserved_2_21:20;
+ uint64_t rfslip:1;
+ uint64_t fbslip:1;
+#else
+ uint64_t fbslip:1;
+ uint64_t rfslip:1;
+ uint64_t reserved_2_21:20;
+ uint64_t ddr__pctl:5;
+ uint64_t ddr__nctl:5;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_pll_status_cn58xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t rfslip:1;
+ uint64_t fbslip:1;
+#else
+ uint64_t fbslip:1;
+ uint64_t rfslip:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } cn58xxp1;
+};
+
+union cvmx_lmcx_read_level_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_read_level_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t rankmask:4;
+ uint64_t pattern:8;
+ uint64_t row:16;
+ uint64_t col:12;
+ uint64_t reserved_3_3:1;
+ uint64_t bnk:3;
+#else
+ uint64_t bnk:3;
+ uint64_t reserved_3_3:1;
+ uint64_t col:12;
+ uint64_t row:16;
+ uint64_t pattern:8;
+ uint64_t rankmask:4;
+ uint64_t reserved_44_63:20;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_read_level_dbg {
+ uint64_t u64;
+ struct cvmx_lmcx_read_level_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t bitmask:16;
+ uint64_t reserved_4_15:12;
+ uint64_t byte:4;
+#else
+ uint64_t byte:4;
+ uint64_t reserved_4_15:12;
+ uint64_t bitmask:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_read_level_rankx {
+ uint64_t u64;
+ struct cvmx_lmcx_read_level_rankx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63:26;
+ uint64_t status:2;
+ uint64_t byte8:4;
+ uint64_t byte7:4;
+ uint64_t byte6:4;
+ uint64_t byte5:4;
+ uint64_t byte4:4;
+ uint64_t byte3:4;
+ uint64_t byte2:4;
+ uint64_t byte1:4;
+ uint64_t byte0:4;
+#else
+ uint64_t byte0:4;
+ uint64_t byte1:4;
+ uint64_t byte2:4;
+ uint64_t byte3:4;
+ uint64_t byte4:4;
+ uint64_t byte5:4;
+ uint64_t byte6:4;
+ uint64_t byte7:4;
+ uint64_t byte8:4;
+ uint64_t status:2;
+ uint64_t reserved_38_63:26;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_reset_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_reset_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t ddr3psv:1;
+ uint64_t ddr3psoft:1;
+ uint64_t ddr3pwarm:1;
+ uint64_t ddr3rst:1;
+#else
+ uint64_t ddr3rst:1;
+ uint64_t ddr3pwarm:1;
+ uint64_t ddr3psoft:1;
+ uint64_t ddr3psv:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_rlevel_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_rlevel_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63:42;
+ uint64_t delay_unload_3:1;
+ uint64_t delay_unload_2:1;
+ uint64_t delay_unload_1:1;
+ uint64_t delay_unload_0:1;
+ uint64_t bitmask:8;
+ uint64_t or_dis:1;
+ uint64_t offset_en:1;
+ uint64_t offset:4;
+ uint64_t byte:4;
+#else
+ uint64_t byte:4;
+ uint64_t offset:4;
+ uint64_t offset_en:1;
+ uint64_t or_dis:1;
+ uint64_t bitmask:8;
+ uint64_t delay_unload_0:1;
+ uint64_t delay_unload_1:1;
+ uint64_t delay_unload_2:1;
+ uint64_t delay_unload_3:1;
+ uint64_t reserved_22_63:42;
+#endif
+ } s;
+ struct cvmx_lmcx_rlevel_ctl_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t offset_en:1;
+ uint64_t offset:4;
+ uint64_t byte:4;
+#else
+ uint64_t byte:4;
+ uint64_t offset:4;
+ uint64_t offset_en:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn63xxp1;
+};
+
+union cvmx_lmcx_rlevel_dbg {
+ uint64_t u64;
+ struct cvmx_lmcx_rlevel_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bitmask:64;
+#else
+ uint64_t bitmask:64;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_rlevel_rankx {
+ uint64_t u64;
+ struct cvmx_lmcx_rlevel_rankx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63:8;
+ uint64_t status:2;
+ uint64_t byte8:6;
+ uint64_t byte7:6;
+ uint64_t byte6:6;
+ uint64_t byte5:6;
+ uint64_t byte4:6;
+ uint64_t byte3:6;
+ uint64_t byte2:6;
+ uint64_t byte1:6;
+ uint64_t byte0:6;
+#else
+ uint64_t byte0:6;
+ uint64_t byte1:6;
+ uint64_t byte2:6;
+ uint64_t byte3:6;
+ uint64_t byte4:6;
+ uint64_t byte5:6;
+ uint64_t byte6:6;
+ uint64_t byte7:6;
+ uint64_t byte8:6;
+ uint64_t status:2;
+ uint64_t reserved_56_63:8;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_rodt_comp_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_rodt_comp_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t enable:1;
+ uint64_t reserved_12_15:4;
+ uint64_t nctl:4;
+ uint64_t reserved_5_7:3;
+ uint64_t pctl:5;
+#else
+ uint64_t pctl:5;
+ uint64_t reserved_5_7:3;
+ uint64_t nctl:4;
+ uint64_t reserved_12_15:4;
+ uint64_t enable:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_rodt_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_rodt_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t rodt_hi3:4;
+ uint64_t rodt_hi2:4;
+ uint64_t rodt_hi1:4;
+ uint64_t rodt_hi0:4;
+ uint64_t rodt_lo3:4;
+ uint64_t rodt_lo2:4;
+ uint64_t rodt_lo1:4;
+ uint64_t rodt_lo0:4;
+#else
+ uint64_t rodt_lo0:4;
+ uint64_t rodt_lo1:4;
+ uint64_t rodt_lo2:4;
+ uint64_t rodt_lo3:4;
+ uint64_t rodt_hi0:4;
+ uint64_t rodt_hi1:4;
+ uint64_t rodt_hi2:4;
+ uint64_t rodt_hi3:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_rodt_mask {
+ uint64_t u64;
+ struct cvmx_lmcx_rodt_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rodt_d3_r1:8;
+ uint64_t rodt_d3_r0:8;
+ uint64_t rodt_d2_r1:8;
+ uint64_t rodt_d2_r0:8;
+ uint64_t rodt_d1_r1:8;
+ uint64_t rodt_d1_r0:8;
+ uint64_t rodt_d0_r1:8;
+ uint64_t rodt_d0_r0:8;
+#else
+ uint64_t rodt_d0_r0:8;
+ uint64_t rodt_d0_r1:8;
+ uint64_t rodt_d1_r0:8;
+ uint64_t rodt_d1_r1:8;
+ uint64_t rodt_d2_r0:8;
+ uint64_t rodt_d2_r1:8;
+ uint64_t rodt_d3_r0:8;
+ uint64_t rodt_d3_r1:8;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_scramble_cfg0 {
+ uint64_t u64;
+ struct cvmx_lmcx_scramble_cfg0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t key:64;
+#else
+ uint64_t key:64;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_scramble_cfg1 {
+ uint64_t u64;
+ struct cvmx_lmcx_scramble_cfg1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t key:64;
+#else
+ uint64_t key:64;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_scrambled_fadr {
+ uint64_t u64;
+ struct cvmx_lmcx_scrambled_fadr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63:28;
+ uint64_t fdimm:2;
+ uint64_t fbunk:1;
+ uint64_t fbank:3;
+ uint64_t frow:16;
+ uint64_t fcol:14;
+#else
+ uint64_t fcol:14;
+ uint64_t frow:16;
+ uint64_t fbank:3;
+ uint64_t fbunk:1;
+ uint64_t fdimm:2;
+ uint64_t reserved_36_63:28;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_slot_ctl0 {
+ uint64_t u64;
+ struct cvmx_lmcx_slot_ctl0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t w2w_init:6;
+ uint64_t w2r_init:6;
+ uint64_t r2w_init:6;
+ uint64_t r2r_init:6;
+#else
+ uint64_t r2r_init:6;
+ uint64_t r2w_init:6;
+ uint64_t w2r_init:6;
+ uint64_t w2w_init:6;
+ uint64_t reserved_24_63:40;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_slot_ctl1 {
+ uint64_t u64;
+ struct cvmx_lmcx_slot_ctl1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t w2w_xrank_init:6;
+ uint64_t w2r_xrank_init:6;
+ uint64_t r2w_xrank_init:6;
+ uint64_t r2r_xrank_init:6;
+#else
+ uint64_t r2r_xrank_init:6;
+ uint64_t r2w_xrank_init:6;
+ uint64_t w2r_xrank_init:6;
+ uint64_t w2w_xrank_init:6;
+ uint64_t reserved_24_63:40;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_slot_ctl2 {
+ uint64_t u64;
+ struct cvmx_lmcx_slot_ctl2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t w2w_xdimm_init:6;
+ uint64_t w2r_xdimm_init:6;
+ uint64_t r2w_xdimm_init:6;
+ uint64_t r2r_xdimm_init:6;
+#else
+ uint64_t r2r_xdimm_init:6;
+ uint64_t r2w_xdimm_init:6;
+ uint64_t w2r_xdimm_init:6;
+ uint64_t w2w_xdimm_init:6;
+ uint64_t reserved_24_63:40;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_timing_params0 {
+ uint64_t u64;
+ struct cvmx_lmcx_timing_params0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63:17;
+ uint64_t trp_ext:1;
+ uint64_t tcksre:4;
+ uint64_t trp:4;
+ uint64_t tzqinit:4;
+ uint64_t tdllk:4;
+ uint64_t tmod:4;
+ uint64_t tmrd:4;
+ uint64_t txpr:4;
+ uint64_t tcke:4;
+ uint64_t tzqcs:4;
+ uint64_t tckeon:10;
+#else
+ uint64_t tckeon:10;
+ uint64_t tzqcs:4;
+ uint64_t tcke:4;
+ uint64_t txpr:4;
+ uint64_t tmrd:4;
+ uint64_t tmod:4;
+ uint64_t tdllk:4;
+ uint64_t tzqinit:4;
+ uint64_t trp:4;
+ uint64_t tcksre:4;
+ uint64_t trp_ext:1;
+ uint64_t reserved_47_63:17;
+#endif
+ } s;
+ struct cvmx_lmcx_timing_params0_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63:17;
+ uint64_t trp_ext:1;
+ uint64_t tcksre:4;
+ uint64_t trp:4;
+ uint64_t tzqinit:4;
+ uint64_t tdllk:4;
+ uint64_t tmod:4;
+ uint64_t tmrd:4;
+ uint64_t txpr:4;
+ uint64_t tcke:4;
+ uint64_t tzqcs:4;
+ uint64_t reserved_0_9:10;
+#else
+ uint64_t reserved_0_9:10;
+ uint64_t tzqcs:4;
+ uint64_t tcke:4;
+ uint64_t txpr:4;
+ uint64_t tmrd:4;
+ uint64_t tmod:4;
+ uint64_t tdllk:4;
+ uint64_t tzqinit:4;
+ uint64_t trp:4;
+ uint64_t tcksre:4;
+ uint64_t trp_ext:1;
+ uint64_t reserved_47_63:17;
+#endif
+ } cn61xx;
+ struct cvmx_lmcx_timing_params0_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63:18;
+ uint64_t tcksre:4;
+ uint64_t trp:4;
+ uint64_t tzqinit:4;
+ uint64_t tdllk:4;
+ uint64_t tmod:4;
+ uint64_t tmrd:4;
+ uint64_t txpr:4;
+ uint64_t tcke:4;
+ uint64_t tzqcs:4;
+ uint64_t tckeon:10;
+#else
+ uint64_t tckeon:10;
+ uint64_t tzqcs:4;
+ uint64_t tcke:4;
+ uint64_t txpr:4;
+ uint64_t tmrd:4;
+ uint64_t tmod:4;
+ uint64_t tdllk:4;
+ uint64_t tzqinit:4;
+ uint64_t trp:4;
+ uint64_t tcksre:4;
+ uint64_t reserved_46_63:18;
+#endif
+ } cn63xxp1;
+};
+
+union cvmx_lmcx_timing_params1 {
+ uint64_t u64;
+ struct cvmx_lmcx_timing_params1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63:17;
+ uint64_t tras_ext:1;
+ uint64_t txpdll:5;
+ uint64_t tfaw:5;
+ uint64_t twldqsen:4;
+ uint64_t twlmrd:4;
+ uint64_t txp:3;
+ uint64_t trrd:3;
+ uint64_t trfc:5;
+ uint64_t twtr:4;
+ uint64_t trcd:4;
+ uint64_t tras:5;
+ uint64_t tmprr:4;
+#else
+ uint64_t tmprr:4;
+ uint64_t tras:5;
+ uint64_t trcd:4;
+ uint64_t twtr:4;
+ uint64_t trfc:5;
+ uint64_t trrd:3;
+ uint64_t txp:3;
+ uint64_t twlmrd:4;
+ uint64_t twldqsen:4;
+ uint64_t tfaw:5;
+ uint64_t txpdll:5;
+ uint64_t tras_ext:1;
+ uint64_t reserved_47_63:17;
+#endif
+ } s;
+ struct cvmx_lmcx_timing_params1_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63:18;
+ uint64_t txpdll:5;
+ uint64_t tfaw:5;
+ uint64_t twldqsen:4;
+ uint64_t twlmrd:4;
+ uint64_t txp:3;
+ uint64_t trrd:3;
+ uint64_t trfc:5;
+ uint64_t twtr:4;
+ uint64_t trcd:4;
+ uint64_t tras:5;
+ uint64_t tmprr:4;
+#else
+ uint64_t tmprr:4;
+ uint64_t tras:5;
+ uint64_t trcd:4;
+ uint64_t twtr:4;
+ uint64_t trfc:5;
+ uint64_t trrd:3;
+ uint64_t txp:3;
+ uint64_t twlmrd:4;
+ uint64_t twldqsen:4;
+ uint64_t tfaw:5;
+ uint64_t txpdll:5;
+ uint64_t reserved_46_63:18;
+#endif
+ } cn63xxp1;
+};
+
+union cvmx_lmcx_tro_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_tro_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63:31;
+ uint64_t rclk_cnt:32;
+ uint64_t treset:1;
+#else
+ uint64_t treset:1;
+ uint64_t rclk_cnt:32;
+ uint64_t reserved_33_63:31;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_tro_stat {
+ uint64_t u64;
+ struct cvmx_lmcx_tro_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ring_cnt:32;
+#else
+ uint64_t ring_cnt:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_wlevel_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_wlevel_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63:42;
+ uint64_t rtt_nom:3;
+ uint64_t bitmask:8;
+ uint64_t or_dis:1;
+ uint64_t sset:1;
+ uint64_t lanemask:9;
+#else
+ uint64_t lanemask:9;
+ uint64_t sset:1;
+ uint64_t or_dis:1;
+ uint64_t bitmask:8;
+ uint64_t rtt_nom:3;
+ uint64_t reserved_22_63:42;
+#endif
+ } s;
+ struct cvmx_lmcx_wlevel_ctl_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t sset:1;
+ uint64_t lanemask:9;
+#else
+ uint64_t lanemask:9;
+ uint64_t sset:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn63xxp1;
+};
+
+union cvmx_lmcx_wlevel_dbg {
+ uint64_t u64;
+ struct cvmx_lmcx_wlevel_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t bitmask:8;
+ uint64_t byte:4;
+#else
+ uint64_t byte:4;
+ uint64_t bitmask:8;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_wlevel_rankx {
+ uint64_t u64;
+ struct cvmx_lmcx_wlevel_rankx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63:17;
+ uint64_t status:2;
+ uint64_t byte8:5;
+ uint64_t byte7:5;
+ uint64_t byte6:5;
+ uint64_t byte5:5;
+ uint64_t byte4:5;
+ uint64_t byte3:5;
+ uint64_t byte2:5;
+ uint64_t byte1:5;
+ uint64_t byte0:5;
+#else
+ uint64_t byte0:5;
+ uint64_t byte1:5;
+ uint64_t byte2:5;
+ uint64_t byte3:5;
+ uint64_t byte4:5;
+ uint64_t byte5:5;
+ uint64_t byte6:5;
+ uint64_t byte7:5;
+ uint64_t byte8:5;
+ uint64_t status:2;
+ uint64_t reserved_47_63:17;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_wodt_ctl0 {
+ uint64_t u64;
+ struct cvmx_lmcx_wodt_ctl0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t wodt_d1_r1:8;
+ uint64_t wodt_d1_r0:8;
+ uint64_t wodt_d0_r1:8;
+ uint64_t wodt_d0_r0:8;
+#else
+ uint64_t wodt_d0_r0:8;
+ uint64_t wodt_d0_r1:8;
+ uint64_t wodt_d1_r0:8;
+ uint64_t wodt_d1_r1:8;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn30xx;
+ struct cvmx_lmcx_wodt_ctl0_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t wodt_hi3:4;
+ uint64_t wodt_hi2:4;
+ uint64_t wodt_hi1:4;
+ uint64_t wodt_hi0:4;
+ uint64_t wodt_lo3:4;
+ uint64_t wodt_lo2:4;
+ uint64_t wodt_lo1:4;
+ uint64_t wodt_lo0:4;
+#else
+ uint64_t wodt_lo0:4;
+ uint64_t wodt_lo1:4;
+ uint64_t wodt_lo2:4;
+ uint64_t wodt_lo3:4;
+ uint64_t wodt_hi0:4;
+ uint64_t wodt_hi1:4;
+ uint64_t wodt_hi2:4;
+ uint64_t wodt_hi3:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn38xx;
+};
+
+union cvmx_lmcx_wodt_ctl1 {
+ uint64_t u64;
+ struct cvmx_lmcx_wodt_ctl1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t wodt_d3_r1:8;
+ uint64_t wodt_d3_r0:8;
+ uint64_t wodt_d2_r1:8;
+ uint64_t wodt_d2_r0:8;
+#else
+ uint64_t wodt_d2_r0:8;
+ uint64_t wodt_d2_r1:8;
+ uint64_t wodt_d3_r0:8;
+ uint64_t wodt_d3_r1:8;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_lmcx_wodt_mask {
+ uint64_t u64;
+ struct cvmx_lmcx_wodt_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wodt_d3_r1:8;
+ uint64_t wodt_d3_r0:8;
+ uint64_t wodt_d2_r1:8;
+ uint64_t wodt_d2_r0:8;
+ uint64_t wodt_d1_r1:8;
+ uint64_t wodt_d1_r0:8;
+ uint64_t wodt_d0_r1:8;
+ uint64_t wodt_d0_r0:8;
+#else
+ uint64_t wodt_d0_r0:8;
+ uint64_t wodt_d0_r1:8;
+ uint64_t wodt_d1_r0:8;
+ uint64_t wodt_d1_r1:8;
+ uint64_t wodt_d2_r0:8;
+ uint64_t wodt_d2_r1:8;
+ uint64_t wodt_d3_r0:8;
+ uint64_t wodt_d3_r1:8;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-mio-defs.h b/arch/mips/include/asm/octeon/cvmx-mio-defs.h
new file mode 100644
index 000000000..4ad95d040
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-mio-defs.h
@@ -0,0 +1,4396 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_MIO_DEFS_H__
+#define __CVMX_MIO_DEFS_H__
+
+#define CVMX_MIO_BOOT_BIST_STAT (CVMX_ADD_IO_SEG(0x00011800000000F8ull))
+#define CVMX_MIO_BOOT_COMP (CVMX_ADD_IO_SEG(0x00011800000000B8ull))
+#define CVMX_MIO_BOOT_DMA_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001180000000100ull) + ((offset) & 3) * 8)
+#define CVMX_MIO_BOOT_DMA_INTX(offset) (CVMX_ADD_IO_SEG(0x0001180000000138ull) + ((offset) & 3) * 8)
+#define CVMX_MIO_BOOT_DMA_INT_ENX(offset) (CVMX_ADD_IO_SEG(0x0001180000000150ull) + ((offset) & 3) * 8)
+#define CVMX_MIO_BOOT_DMA_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001180000000120ull) + ((offset) & 3) * 8)
+#define CVMX_MIO_BOOT_ERR (CVMX_ADD_IO_SEG(0x00011800000000A0ull))
+#define CVMX_MIO_BOOT_INT (CVMX_ADD_IO_SEG(0x00011800000000A8ull))
+#define CVMX_MIO_BOOT_LOC_ADR (CVMX_ADD_IO_SEG(0x0001180000000090ull))
+#define CVMX_MIO_BOOT_LOC_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001180000000080ull) + ((offset) & 1) * 8)
+#define CVMX_MIO_BOOT_LOC_DAT (CVMX_ADD_IO_SEG(0x0001180000000098ull))
+#define CVMX_MIO_BOOT_PIN_DEFS (CVMX_ADD_IO_SEG(0x00011800000000C0ull))
+#define CVMX_MIO_BOOT_REG_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001180000000000ull) + ((offset) & 7) * 8)
+#define CVMX_MIO_BOOT_REG_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001180000000040ull) + ((offset) & 7) * 8)
+#define CVMX_MIO_BOOT_THR (CVMX_ADD_IO_SEG(0x00011800000000B0ull))
+#define CVMX_MIO_EMM_BUF_DAT (CVMX_ADD_IO_SEG(0x00011800000020E8ull))
+#define CVMX_MIO_EMM_BUF_IDX (CVMX_ADD_IO_SEG(0x00011800000020E0ull))
+#define CVMX_MIO_EMM_CFG (CVMX_ADD_IO_SEG(0x0001180000002000ull))
+#define CVMX_MIO_EMM_CMD (CVMX_ADD_IO_SEG(0x0001180000002058ull))
+#define CVMX_MIO_EMM_DMA (CVMX_ADD_IO_SEG(0x0001180000002050ull))
+#define CVMX_MIO_EMM_INT (CVMX_ADD_IO_SEG(0x0001180000002078ull))
+#define CVMX_MIO_EMM_INT_EN (CVMX_ADD_IO_SEG(0x0001180000002080ull))
+#define CVMX_MIO_EMM_MODEX(offset) (CVMX_ADD_IO_SEG(0x0001180000002008ull) + ((offset) & 3) * 8)
+#define CVMX_MIO_EMM_RCA (CVMX_ADD_IO_SEG(0x00011800000020A0ull))
+#define CVMX_MIO_EMM_RSP_HI (CVMX_ADD_IO_SEG(0x0001180000002070ull))
+#define CVMX_MIO_EMM_RSP_LO (CVMX_ADD_IO_SEG(0x0001180000002068ull))
+#define CVMX_MIO_EMM_RSP_STS (CVMX_ADD_IO_SEG(0x0001180000002060ull))
+#define CVMX_MIO_EMM_SAMPLE (CVMX_ADD_IO_SEG(0x0001180000002090ull))
+#define CVMX_MIO_EMM_STS_MASK (CVMX_ADD_IO_SEG(0x0001180000002098ull))
+#define CVMX_MIO_EMM_SWITCH (CVMX_ADD_IO_SEG(0x0001180000002048ull))
+#define CVMX_MIO_EMM_WDOG (CVMX_ADD_IO_SEG(0x0001180000002088ull))
+#define CVMX_MIO_FUS_BNK_DATX(offset) (CVMX_ADD_IO_SEG(0x0001180000001520ull) + ((offset) & 3) * 8)
+#define CVMX_MIO_FUS_DAT0 (CVMX_ADD_IO_SEG(0x0001180000001400ull))
+#define CVMX_MIO_FUS_DAT1 (CVMX_ADD_IO_SEG(0x0001180000001408ull))
+#define CVMX_MIO_FUS_DAT2 (CVMX_ADD_IO_SEG(0x0001180000001410ull))
+#define CVMX_MIO_FUS_DAT3 (CVMX_ADD_IO_SEG(0x0001180000001418ull))
+#define CVMX_MIO_FUS_EMA (CVMX_ADD_IO_SEG(0x0001180000001550ull))
+#define CVMX_MIO_FUS_PDF (CVMX_ADD_IO_SEG(0x0001180000001420ull))
+#define CVMX_MIO_FUS_PLL (CVMX_ADD_IO_SEG(0x0001180000001580ull))
+#define CVMX_MIO_FUS_PROG (CVMX_ADD_IO_SEG(0x0001180000001510ull))
+#define CVMX_MIO_FUS_PROG_TIMES (CVMX_ADD_IO_SEG(0x0001180000001518ull))
+#define CVMX_MIO_FUS_RCMD (CVMX_ADD_IO_SEG(0x0001180000001500ull))
+#define CVMX_MIO_FUS_READ_TIMES (CVMX_ADD_IO_SEG(0x0001180000001570ull))
+#define CVMX_MIO_FUS_REPAIR_RES0 (CVMX_ADD_IO_SEG(0x0001180000001558ull))
+#define CVMX_MIO_FUS_REPAIR_RES1 (CVMX_ADD_IO_SEG(0x0001180000001560ull))
+#define CVMX_MIO_FUS_REPAIR_RES2 (CVMX_ADD_IO_SEG(0x0001180000001568ull))
+#define CVMX_MIO_FUS_SPR_REPAIR_RES (CVMX_ADD_IO_SEG(0x0001180000001548ull))
+#define CVMX_MIO_FUS_SPR_REPAIR_SUM (CVMX_ADD_IO_SEG(0x0001180000001540ull))
+#define CVMX_MIO_FUS_TGG (CVMX_ADD_IO_SEG(0x0001180000001428ull))
+#define CVMX_MIO_FUS_UNLOCK (CVMX_ADD_IO_SEG(0x0001180000001578ull))
+#define CVMX_MIO_FUS_WADR (CVMX_ADD_IO_SEG(0x0001180000001508ull))
+#define CVMX_MIO_GPIO_COMP (CVMX_ADD_IO_SEG(0x00011800000000C8ull))
+#define CVMX_MIO_NDF_DMA_CFG (CVMX_ADD_IO_SEG(0x0001180000000168ull))
+#define CVMX_MIO_NDF_DMA_INT (CVMX_ADD_IO_SEG(0x0001180000000170ull))
+#define CVMX_MIO_NDF_DMA_INT_EN (CVMX_ADD_IO_SEG(0x0001180000000178ull))
+#define CVMX_MIO_PLL_CTL (CVMX_ADD_IO_SEG(0x0001180000001448ull))
+#define CVMX_MIO_PLL_SETTING (CVMX_ADD_IO_SEG(0x0001180000001440ull))
+#define CVMX_MIO_PTP_CKOUT_HI_INCR (CVMX_ADD_IO_SEG(0x0001070000000F40ull))
+#define CVMX_MIO_PTP_CKOUT_LO_INCR (CVMX_ADD_IO_SEG(0x0001070000000F48ull))
+#define CVMX_MIO_PTP_CKOUT_THRESH_HI (CVMX_ADD_IO_SEG(0x0001070000000F38ull))
+#define CVMX_MIO_PTP_CKOUT_THRESH_LO (CVMX_ADD_IO_SEG(0x0001070000000F30ull))
+#define CVMX_MIO_PTP_CLOCK_CFG (CVMX_ADD_IO_SEG(0x0001070000000F00ull))
+#define CVMX_MIO_PTP_CLOCK_COMP (CVMX_ADD_IO_SEG(0x0001070000000F18ull))
+#define CVMX_MIO_PTP_CLOCK_HI (CVMX_ADD_IO_SEG(0x0001070000000F10ull))
+#define CVMX_MIO_PTP_CLOCK_LO (CVMX_ADD_IO_SEG(0x0001070000000F08ull))
+#define CVMX_MIO_PTP_EVT_CNT (CVMX_ADD_IO_SEG(0x0001070000000F28ull))
+#define CVMX_MIO_PTP_PHY_1PPS_IN (CVMX_ADD_IO_SEG(0x0001070000000F70ull))
+#define CVMX_MIO_PTP_PPS_HI_INCR (CVMX_ADD_IO_SEG(0x0001070000000F60ull))
+#define CVMX_MIO_PTP_PPS_LO_INCR (CVMX_ADD_IO_SEG(0x0001070000000F68ull))
+#define CVMX_MIO_PTP_PPS_THRESH_HI (CVMX_ADD_IO_SEG(0x0001070000000F58ull))
+#define CVMX_MIO_PTP_PPS_THRESH_LO (CVMX_ADD_IO_SEG(0x0001070000000F50ull))
+#define CVMX_MIO_PTP_TIMESTAMP (CVMX_ADD_IO_SEG(0x0001070000000F20ull))
+#define CVMX_MIO_QLMX_CFG(offset) (CVMX_ADD_IO_SEG(0x0001180000001590ull) + ((offset) & 7) * 8)
+#define CVMX_MIO_RST_BOOT (CVMX_ADD_IO_SEG(0x0001180000001600ull))
+#define CVMX_MIO_RST_CFG (CVMX_ADD_IO_SEG(0x0001180000001610ull))
+#define CVMX_MIO_RST_CKILL (CVMX_ADD_IO_SEG(0x0001180000001638ull))
+#define CVMX_MIO_RST_CNTLX(offset) (CVMX_ADD_IO_SEG(0x0001180000001648ull) + ((offset) & 3) * 8)
+#define CVMX_MIO_RST_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180000001618ull) + ((offset) & 1) * 8)
+#define CVMX_MIO_RST_DELAY (CVMX_ADD_IO_SEG(0x0001180000001608ull))
+#define CVMX_MIO_RST_INT (CVMX_ADD_IO_SEG(0x0001180000001628ull))
+#define CVMX_MIO_RST_INT_EN (CVMX_ADD_IO_SEG(0x0001180000001630ull))
+#define CVMX_MIO_TWSX_INT(offset) (CVMX_ADD_IO_SEG(0x0001180000001010ull) + ((offset) & 1) * 512)
+#define CVMX_MIO_TWSX_SW_TWSI(offset) (CVMX_ADD_IO_SEG(0x0001180000001000ull) + ((offset) & 1) * 512)
+#define CVMX_MIO_TWSX_SW_TWSI_EXT(offset) (CVMX_ADD_IO_SEG(0x0001180000001018ull) + ((offset) & 1) * 512)
+#define CVMX_MIO_TWSX_TWSI_SW(offset) (CVMX_ADD_IO_SEG(0x0001180000001008ull) + ((offset) & 1) * 512)
+#define CVMX_MIO_UART2_DLH (CVMX_ADD_IO_SEG(0x0001180000000488ull))
+#define CVMX_MIO_UART2_DLL (CVMX_ADD_IO_SEG(0x0001180000000480ull))
+#define CVMX_MIO_UART2_FAR (CVMX_ADD_IO_SEG(0x0001180000000520ull))
+#define CVMX_MIO_UART2_FCR (CVMX_ADD_IO_SEG(0x0001180000000450ull))
+#define CVMX_MIO_UART2_HTX (CVMX_ADD_IO_SEG(0x0001180000000708ull))
+#define CVMX_MIO_UART2_IER (CVMX_ADD_IO_SEG(0x0001180000000408ull))
+#define CVMX_MIO_UART2_IIR (CVMX_ADD_IO_SEG(0x0001180000000410ull))
+#define CVMX_MIO_UART2_LCR (CVMX_ADD_IO_SEG(0x0001180000000418ull))
+#define CVMX_MIO_UART2_LSR (CVMX_ADD_IO_SEG(0x0001180000000428ull))
+#define CVMX_MIO_UART2_MCR (CVMX_ADD_IO_SEG(0x0001180000000420ull))
+#define CVMX_MIO_UART2_MSR (CVMX_ADD_IO_SEG(0x0001180000000430ull))
+#define CVMX_MIO_UART2_RBR (CVMX_ADD_IO_SEG(0x0001180000000400ull))
+#define CVMX_MIO_UART2_RFL (CVMX_ADD_IO_SEG(0x0001180000000608ull))
+#define CVMX_MIO_UART2_RFW (CVMX_ADD_IO_SEG(0x0001180000000530ull))
+#define CVMX_MIO_UART2_SBCR (CVMX_ADD_IO_SEG(0x0001180000000620ull))
+#define CVMX_MIO_UART2_SCR (CVMX_ADD_IO_SEG(0x0001180000000438ull))
+#define CVMX_MIO_UART2_SFE (CVMX_ADD_IO_SEG(0x0001180000000630ull))
+#define CVMX_MIO_UART2_SRR (CVMX_ADD_IO_SEG(0x0001180000000610ull))
+#define CVMX_MIO_UART2_SRT (CVMX_ADD_IO_SEG(0x0001180000000638ull))
+#define CVMX_MIO_UART2_SRTS (CVMX_ADD_IO_SEG(0x0001180000000618ull))
+#define CVMX_MIO_UART2_STT (CVMX_ADD_IO_SEG(0x0001180000000700ull))
+#define CVMX_MIO_UART2_TFL (CVMX_ADD_IO_SEG(0x0001180000000600ull))
+#define CVMX_MIO_UART2_TFR (CVMX_ADD_IO_SEG(0x0001180000000528ull))
+#define CVMX_MIO_UART2_THR (CVMX_ADD_IO_SEG(0x0001180000000440ull))
+#define CVMX_MIO_UART2_USR (CVMX_ADD_IO_SEG(0x0001180000000538ull))
+#define CVMX_MIO_UARTX_DLH(offset) (CVMX_ADD_IO_SEG(0x0001180000000888ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_DLL(offset) (CVMX_ADD_IO_SEG(0x0001180000000880ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_FAR(offset) (CVMX_ADD_IO_SEG(0x0001180000000920ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_FCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000850ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_HTX(offset) (CVMX_ADD_IO_SEG(0x0001180000000B08ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_IER(offset) (CVMX_ADD_IO_SEG(0x0001180000000808ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_IIR(offset) (CVMX_ADD_IO_SEG(0x0001180000000810ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_LCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000818ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_LSR(offset) (CVMX_ADD_IO_SEG(0x0001180000000828ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_MCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000820ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_MSR(offset) (CVMX_ADD_IO_SEG(0x0001180000000830ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_RBR(offset) (CVMX_ADD_IO_SEG(0x0001180000000800ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_RFL(offset) (CVMX_ADD_IO_SEG(0x0001180000000A08ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_RFW(offset) (CVMX_ADD_IO_SEG(0x0001180000000930ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SBCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000A20ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000838ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SFE(offset) (CVMX_ADD_IO_SEG(0x0001180000000A30ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SRR(offset) (CVMX_ADD_IO_SEG(0x0001180000000A10ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SRT(offset) (CVMX_ADD_IO_SEG(0x0001180000000A38ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SRTS(offset) (CVMX_ADD_IO_SEG(0x0001180000000A18ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_STT(offset) (CVMX_ADD_IO_SEG(0x0001180000000B00ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_TFL(offset) (CVMX_ADD_IO_SEG(0x0001180000000A00ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_TFR(offset) (CVMX_ADD_IO_SEG(0x0001180000000928ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_THR(offset) (CVMX_ADD_IO_SEG(0x0001180000000840ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_USR(offset) (CVMX_ADD_IO_SEG(0x0001180000000938ull) + ((offset) & 1) * 1024)
+
+union cvmx_mio_boot_bist_stat {
+ uint64_t u64;
+ struct cvmx_mio_boot_bist_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_mio_boot_bist_stat_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t ncbo_1:1;
+ uint64_t ncbo_0:1;
+ uint64_t loc:1;
+ uint64_t ncbi:1;
+#else
+ uint64_t ncbi:1;
+ uint64_t loc:1;
+ uint64_t ncbo_0:1;
+ uint64_t ncbo_1:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } cn30xx;
+ struct cvmx_mio_boot_bist_stat_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t ncbo_0:1;
+ uint64_t loc:1;
+ uint64_t ncbi:1;
+#else
+ uint64_t ncbi:1;
+ uint64_t loc:1;
+ uint64_t ncbo_0:1;
+ uint64_t reserved_3_63:61;
+#endif
+ } cn38xx;
+ struct cvmx_mio_boot_bist_stat_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t pcm_1:1;
+ uint64_t pcm_0:1;
+ uint64_t ncbo_1:1;
+ uint64_t ncbo_0:1;
+ uint64_t loc:1;
+ uint64_t ncbi:1;
+#else
+ uint64_t ncbi:1;
+ uint64_t loc:1;
+ uint64_t ncbo_0:1;
+ uint64_t ncbo_1:1;
+ uint64_t pcm_0:1;
+ uint64_t pcm_1:1;
+ uint64_t reserved_6_63:58;
+#endif
+ } cn50xx;
+ struct cvmx_mio_boot_bist_stat_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t ndf:2;
+ uint64_t ncbo_0:1;
+ uint64_t dma:1;
+ uint64_t loc:1;
+ uint64_t ncbi:1;
+#else
+ uint64_t ncbi:1;
+ uint64_t loc:1;
+ uint64_t dma:1;
+ uint64_t ncbo_0:1;
+ uint64_t ndf:2;
+ uint64_t reserved_6_63:58;
+#endif
+ } cn52xx;
+ struct cvmx_mio_boot_bist_stat_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t ncbo_0:1;
+ uint64_t dma:1;
+ uint64_t loc:1;
+ uint64_t ncbi:1;
+#else
+ uint64_t ncbi:1;
+ uint64_t loc:1;
+ uint64_t dma:1;
+ uint64_t ncbo_0:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } cn52xxp1;
+ struct cvmx_mio_boot_bist_stat_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t stat:12;
+#else
+ uint64_t stat:12;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn61xx;
+ struct cvmx_mio_boot_bist_stat_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t stat:9;
+#else
+ uint64_t stat:9;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn63xx;
+ struct cvmx_mio_boot_bist_stat_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t stat:10;
+#else
+ uint64_t stat:10;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn66xx;
+};
+
+union cvmx_mio_boot_comp {
+ uint64_t u64;
+ struct cvmx_mio_boot_comp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_mio_boot_comp_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t pctl:5;
+ uint64_t nctl:5;
+#else
+ uint64_t nctl:5;
+ uint64_t pctl:5;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn50xx;
+ struct cvmx_mio_boot_comp_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t pctl:6;
+ uint64_t nctl:6;
+#else
+ uint64_t nctl:6;
+ uint64_t pctl:6;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn61xx;
+};
+
+union cvmx_mio_boot_dma_cfgx {
+ uint64_t u64;
+ struct cvmx_mio_boot_dma_cfgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t en:1;
+ uint64_t rw:1;
+ uint64_t clr:1;
+ uint64_t reserved_60_60:1;
+ uint64_t swap32:1;
+ uint64_t swap16:1;
+ uint64_t swap8:1;
+ uint64_t endian:1;
+ uint64_t size:20;
+ uint64_t adr:36;
+#else
+ uint64_t adr:36;
+ uint64_t size:20;
+ uint64_t endian:1;
+ uint64_t swap8:1;
+ uint64_t swap16:1;
+ uint64_t swap32:1;
+ uint64_t reserved_60_60:1;
+ uint64_t clr:1;
+ uint64_t rw:1;
+ uint64_t en:1;
+#endif
+ } s;
+};
+
+union cvmx_mio_boot_dma_intx {
+ uint64_t u64;
+ struct cvmx_mio_boot_dma_intx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t dmarq:1;
+ uint64_t done:1;
+#else
+ uint64_t done:1;
+ uint64_t dmarq:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_mio_boot_dma_int_enx {
+ uint64_t u64;
+ struct cvmx_mio_boot_dma_int_enx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t dmarq:1;
+ uint64_t done:1;
+#else
+ uint64_t done:1;
+ uint64_t dmarq:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_mio_boot_dma_timx {
+ uint64_t u64;
+ struct cvmx_mio_boot_dma_timx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dmack_pi:1;
+ uint64_t dmarq_pi:1;
+ uint64_t tim_mult:2;
+ uint64_t rd_dly:3;
+ uint64_t ddr:1;
+ uint64_t width:1;
+ uint64_t reserved_48_54:7;
+ uint64_t pause:6;
+ uint64_t dmack_h:6;
+ uint64_t we_n:6;
+ uint64_t we_a:6;
+ uint64_t oe_n:6;
+ uint64_t oe_a:6;
+ uint64_t dmack_s:6;
+ uint64_t dmarq:6;
+#else
+ uint64_t dmarq:6;
+ uint64_t dmack_s:6;
+ uint64_t oe_a:6;
+ uint64_t oe_n:6;
+ uint64_t we_a:6;
+ uint64_t we_n:6;
+ uint64_t dmack_h:6;
+ uint64_t pause:6;
+ uint64_t reserved_48_54:7;
+ uint64_t width:1;
+ uint64_t ddr:1;
+ uint64_t rd_dly:3;
+ uint64_t tim_mult:2;
+ uint64_t dmarq_pi:1;
+ uint64_t dmack_pi:1;
+#endif
+ } s;
+};
+
+union cvmx_mio_boot_err {
+ uint64_t u64;
+ struct cvmx_mio_boot_err_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t wait_err:1;
+ uint64_t adr_err:1;
+#else
+ uint64_t adr_err:1;
+ uint64_t wait_err:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_mio_boot_int {
+ uint64_t u64;
+ struct cvmx_mio_boot_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t wait_int:1;
+ uint64_t adr_int:1;
+#else
+ uint64_t adr_int:1;
+ uint64_t wait_int:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_mio_boot_loc_adr {
+ uint64_t u64;
+ struct cvmx_mio_boot_loc_adr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t adr:5;
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t adr:5;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_boot_loc_cfgx {
+ uint64_t u64;
+ struct cvmx_mio_boot_loc_cfgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t en:1;
+ uint64_t reserved_28_30:3;
+ uint64_t base:25;
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t base:25;
+ uint64_t reserved_28_30:3;
+ uint64_t en:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_mio_boot_loc_dat {
+ uint64_t u64;
+ struct cvmx_mio_boot_loc_dat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } s;
+};
+
+union cvmx_mio_boot_pin_defs {
+ uint64_t u64;
+ struct cvmx_mio_boot_pin_defs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t user1:16;
+ uint64_t ale:1;
+ uint64_t width:1;
+ uint64_t dmack_p2:1;
+ uint64_t dmack_p1:1;
+ uint64_t dmack_p0:1;
+ uint64_t term:2;
+ uint64_t nand:1;
+ uint64_t user0:8;
+#else
+ uint64_t user0:8;
+ uint64_t nand:1;
+ uint64_t term:2;
+ uint64_t dmack_p0:1;
+ uint64_t dmack_p1:1;
+ uint64_t dmack_p2:1;
+ uint64_t width:1;
+ uint64_t ale:1;
+ uint64_t user1:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_mio_boot_pin_defs_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t ale:1;
+ uint64_t width:1;
+ uint64_t reserved_13_13:1;
+ uint64_t dmack_p1:1;
+ uint64_t dmack_p0:1;
+ uint64_t term:2;
+ uint64_t nand:1;
+ uint64_t reserved_0_7:8;
+#else
+ uint64_t reserved_0_7:8;
+ uint64_t nand:1;
+ uint64_t term:2;
+ uint64_t dmack_p0:1;
+ uint64_t dmack_p1:1;
+ uint64_t reserved_13_13:1;
+ uint64_t width:1;
+ uint64_t ale:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } cn52xx;
+ struct cvmx_mio_boot_pin_defs_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t ale:1;
+ uint64_t width:1;
+ uint64_t dmack_p2:1;
+ uint64_t dmack_p1:1;
+ uint64_t dmack_p0:1;
+ uint64_t term:2;
+ uint64_t reserved_0_8:9;
+#else
+ uint64_t reserved_0_8:9;
+ uint64_t term:2;
+ uint64_t dmack_p0:1;
+ uint64_t dmack_p1:1;
+ uint64_t dmack_p2:1;
+ uint64_t width:1;
+ uint64_t ale:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } cn56xx;
+ struct cvmx_mio_boot_pin_defs_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t user1:16;
+ uint64_t ale:1;
+ uint64_t width:1;
+ uint64_t reserved_13_13:1;
+ uint64_t dmack_p1:1;
+ uint64_t dmack_p0:1;
+ uint64_t term:2;
+ uint64_t nand:1;
+ uint64_t user0:8;
+#else
+ uint64_t user0:8;
+ uint64_t nand:1;
+ uint64_t term:2;
+ uint64_t dmack_p0:1;
+ uint64_t dmack_p1:1;
+ uint64_t reserved_13_13:1;
+ uint64_t width:1;
+ uint64_t ale:1;
+ uint64_t user1:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn61xx;
+};
+
+union cvmx_mio_boot_reg_cfgx {
+ uint64_t u64;
+ struct cvmx_mio_boot_reg_cfgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t dmack:2;
+ uint64_t tim_mult:2;
+ uint64_t rd_dly:3;
+ uint64_t sam:1;
+ uint64_t we_ext:2;
+ uint64_t oe_ext:2;
+ uint64_t en:1;
+ uint64_t orbit:1;
+ uint64_t ale:1;
+ uint64_t width:1;
+ uint64_t size:12;
+ uint64_t base:16;
+#else
+ uint64_t base:16;
+ uint64_t size:12;
+ uint64_t width:1;
+ uint64_t ale:1;
+ uint64_t orbit:1;
+ uint64_t en:1;
+ uint64_t oe_ext:2;
+ uint64_t we_ext:2;
+ uint64_t sam:1;
+ uint64_t rd_dly:3;
+ uint64_t tim_mult:2;
+ uint64_t dmack:2;
+ uint64_t reserved_44_63:20;
+#endif
+ } s;
+ struct cvmx_mio_boot_reg_cfgx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63:27;
+ uint64_t sam:1;
+ uint64_t we_ext:2;
+ uint64_t oe_ext:2;
+ uint64_t en:1;
+ uint64_t orbit:1;
+ uint64_t ale:1;
+ uint64_t width:1;
+ uint64_t size:12;
+ uint64_t base:16;
+#else
+ uint64_t base:16;
+ uint64_t size:12;
+ uint64_t width:1;
+ uint64_t ale:1;
+ uint64_t orbit:1;
+ uint64_t en:1;
+ uint64_t oe_ext:2;
+ uint64_t we_ext:2;
+ uint64_t sam:1;
+ uint64_t reserved_37_63:27;
+#endif
+ } cn30xx;
+ struct cvmx_mio_boot_reg_cfgx_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t en:1;
+ uint64_t orbit:1;
+ uint64_t reserved_28_29:2;
+ uint64_t size:12;
+ uint64_t base:16;
+#else
+ uint64_t base:16;
+ uint64_t size:12;
+ uint64_t reserved_28_29:2;
+ uint64_t orbit:1;
+ uint64_t en:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn38xx;
+ struct cvmx_mio_boot_reg_cfgx_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63:22;
+ uint64_t tim_mult:2;
+ uint64_t rd_dly:3;
+ uint64_t sam:1;
+ uint64_t we_ext:2;
+ uint64_t oe_ext:2;
+ uint64_t en:1;
+ uint64_t orbit:1;
+ uint64_t ale:1;
+ uint64_t width:1;
+ uint64_t size:12;
+ uint64_t base:16;
+#else
+ uint64_t base:16;
+ uint64_t size:12;
+ uint64_t width:1;
+ uint64_t ale:1;
+ uint64_t orbit:1;
+ uint64_t en:1;
+ uint64_t oe_ext:2;
+ uint64_t we_ext:2;
+ uint64_t sam:1;
+ uint64_t rd_dly:3;
+ uint64_t tim_mult:2;
+ uint64_t reserved_42_63:22;
+#endif
+ } cn50xx;
+};
+
+union cvmx_mio_boot_reg_timx {
+ uint64_t u64;
+ struct cvmx_mio_boot_reg_timx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pagem:1;
+ uint64_t waitm:1;
+ uint64_t pages:2;
+ uint64_t ale:6;
+ uint64_t page:6;
+ uint64_t wait:6;
+ uint64_t pause:6;
+ uint64_t wr_hld:6;
+ uint64_t rd_hld:6;
+ uint64_t we:6;
+ uint64_t oe:6;
+ uint64_t ce:6;
+ uint64_t adr:6;
+#else
+ uint64_t adr:6;
+ uint64_t ce:6;
+ uint64_t oe:6;
+ uint64_t we:6;
+ uint64_t rd_hld:6;
+ uint64_t wr_hld:6;
+ uint64_t pause:6;
+ uint64_t wait:6;
+ uint64_t page:6;
+ uint64_t ale:6;
+ uint64_t pages:2;
+ uint64_t waitm:1;
+ uint64_t pagem:1;
+#endif
+ } s;
+ struct cvmx_mio_boot_reg_timx_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pagem:1;
+ uint64_t waitm:1;
+ uint64_t pages:2;
+ uint64_t reserved_54_59:6;
+ uint64_t page:6;
+ uint64_t wait:6;
+ uint64_t pause:6;
+ uint64_t wr_hld:6;
+ uint64_t rd_hld:6;
+ uint64_t we:6;
+ uint64_t oe:6;
+ uint64_t ce:6;
+ uint64_t adr:6;
+#else
+ uint64_t adr:6;
+ uint64_t ce:6;
+ uint64_t oe:6;
+ uint64_t we:6;
+ uint64_t rd_hld:6;
+ uint64_t wr_hld:6;
+ uint64_t pause:6;
+ uint64_t wait:6;
+ uint64_t page:6;
+ uint64_t reserved_54_59:6;
+ uint64_t pages:2;
+ uint64_t waitm:1;
+ uint64_t pagem:1;
+#endif
+ } cn38xx;
+};
+
+union cvmx_mio_boot_thr {
+ uint64_t u64;
+ struct cvmx_mio_boot_thr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63:42;
+ uint64_t dma_thr:6;
+ uint64_t reserved_14_15:2;
+ uint64_t fif_cnt:6;
+ uint64_t reserved_6_7:2;
+ uint64_t fif_thr:6;
+#else
+ uint64_t fif_thr:6;
+ uint64_t reserved_6_7:2;
+ uint64_t fif_cnt:6;
+ uint64_t reserved_14_15:2;
+ uint64_t dma_thr:6;
+ uint64_t reserved_22_63:42;
+#endif
+ } s;
+ struct cvmx_mio_boot_thr_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t fif_cnt:6;
+ uint64_t reserved_6_7:2;
+ uint64_t fif_thr:6;
+#else
+ uint64_t fif_thr:6;
+ uint64_t reserved_6_7:2;
+ uint64_t fif_cnt:6;
+ uint64_t reserved_14_63:50;
+#endif
+ } cn30xx;
+};
+
+union cvmx_mio_emm_buf_dat {
+ uint64_t u64;
+ struct cvmx_mio_emm_buf_dat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dat:64;
+#else
+ uint64_t dat:64;
+#endif
+ } s;
+};
+
+union cvmx_mio_emm_buf_idx {
+ uint64_t u64;
+ struct cvmx_mio_emm_buf_idx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t inc:1;
+ uint64_t reserved_7_15:9;
+ uint64_t buf_num:1;
+ uint64_t offset:6;
+#else
+ uint64_t offset:6;
+ uint64_t buf_num:1;
+ uint64_t reserved_7_15:9;
+ uint64_t inc:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } s;
+};
+
+union cvmx_mio_emm_cfg {
+ uint64_t u64;
+ struct cvmx_mio_emm_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t boot_fail:1;
+ uint64_t reserved_4_15:12;
+ uint64_t bus_ena:4;
+#else
+ uint64_t bus_ena:4;
+ uint64_t reserved_4_15:12;
+ uint64_t boot_fail:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } s;
+};
+
+union cvmx_mio_emm_cmd {
+ uint64_t u64;
+ struct cvmx_mio_emm_cmd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ uint64_t bus_id:2;
+ uint64_t cmd_val:1;
+ uint64_t reserved_56_58:3;
+ uint64_t dbuf:1;
+ uint64_t offset:6;
+ uint64_t reserved_43_48:6;
+ uint64_t ctype_xor:2;
+ uint64_t rtype_xor:3;
+ uint64_t cmd_idx:6;
+ uint64_t arg:32;
+#else
+ uint64_t arg:32;
+ uint64_t cmd_idx:6;
+ uint64_t rtype_xor:3;
+ uint64_t ctype_xor:2;
+ uint64_t reserved_43_48:6;
+ uint64_t offset:6;
+ uint64_t dbuf:1;
+ uint64_t reserved_56_58:3;
+ uint64_t cmd_val:1;
+ uint64_t bus_id:2;
+ uint64_t reserved_62_63:2;
+#endif
+ } s;
+};
+
+union cvmx_mio_emm_dma {
+ uint64_t u64;
+ struct cvmx_mio_emm_dma_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ uint64_t bus_id:2;
+ uint64_t dma_val:1;
+ uint64_t sector:1;
+ uint64_t dat_null:1;
+ uint64_t thres:6;
+ uint64_t rel_wr:1;
+ uint64_t rw:1;
+ uint64_t multi:1;
+ uint64_t block_cnt:16;
+ uint64_t card_addr:32;
+#else
+ uint64_t card_addr:32;
+ uint64_t block_cnt:16;
+ uint64_t multi:1;
+ uint64_t rw:1;
+ uint64_t rel_wr:1;
+ uint64_t thres:6;
+ uint64_t dat_null:1;
+ uint64_t sector:1;
+ uint64_t dma_val:1;
+ uint64_t bus_id:2;
+ uint64_t reserved_62_63:2;
+#endif
+ } s;
+};
+
+union cvmx_mio_emm_int {
+ uint64_t u64;
+ struct cvmx_mio_emm_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t switch_err:1;
+ uint64_t switch_done:1;
+ uint64_t dma_err:1;
+ uint64_t cmd_err:1;
+ uint64_t dma_done:1;
+ uint64_t cmd_done:1;
+ uint64_t buf_done:1;
+#else
+ uint64_t buf_done:1;
+ uint64_t cmd_done:1;
+ uint64_t dma_done:1;
+ uint64_t cmd_err:1;
+ uint64_t dma_err:1;
+ uint64_t switch_done:1;
+ uint64_t switch_err:1;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_mio_emm_int_en {
+ uint64_t u64;
+ struct cvmx_mio_emm_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t switch_err:1;
+ uint64_t switch_done:1;
+ uint64_t dma_err:1;
+ uint64_t cmd_err:1;
+ uint64_t dma_done:1;
+ uint64_t cmd_done:1;
+ uint64_t buf_done:1;
+#else
+ uint64_t buf_done:1;
+ uint64_t cmd_done:1;
+ uint64_t dma_done:1;
+ uint64_t cmd_err:1;
+ uint64_t dma_err:1;
+ uint64_t switch_done:1;
+ uint64_t switch_err:1;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_mio_emm_modex {
+ uint64_t u64;
+ struct cvmx_mio_emm_modex_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63:15;
+ uint64_t hs_timing:1;
+ uint64_t reserved_43_47:5;
+ uint64_t bus_width:3;
+ uint64_t reserved_36_39:4;
+ uint64_t power_class:4;
+ uint64_t clk_hi:16;
+ uint64_t clk_lo:16;
+#else
+ uint64_t clk_lo:16;
+ uint64_t clk_hi:16;
+ uint64_t power_class:4;
+ uint64_t reserved_36_39:4;
+ uint64_t bus_width:3;
+ uint64_t reserved_43_47:5;
+ uint64_t hs_timing:1;
+ uint64_t reserved_49_63:15;
+#endif
+ } s;
+};
+
+union cvmx_mio_emm_rca {
+ uint64_t u64;
+ struct cvmx_mio_emm_rca_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t card_rca:16;
+#else
+ uint64_t card_rca:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_mio_emm_rsp_hi {
+ uint64_t u64;
+ struct cvmx_mio_emm_rsp_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dat:64;
+#else
+ uint64_t dat:64;
+#endif
+ } s;
+};
+
+union cvmx_mio_emm_rsp_lo {
+ uint64_t u64;
+ struct cvmx_mio_emm_rsp_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dat:64;
+#else
+ uint64_t dat:64;
+#endif
+ } s;
+};
+
+union cvmx_mio_emm_rsp_sts {
+ uint64_t u64;
+ struct cvmx_mio_emm_rsp_sts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ uint64_t bus_id:2;
+ uint64_t cmd_val:1;
+ uint64_t switch_val:1;
+ uint64_t dma_val:1;
+ uint64_t dma_pend:1;
+ uint64_t reserved_29_55:27;
+ uint64_t dbuf_err:1;
+ uint64_t reserved_24_27:4;
+ uint64_t dbuf:1;
+ uint64_t blk_timeout:1;
+ uint64_t blk_crc_err:1;
+ uint64_t rsp_busybit:1;
+ uint64_t stp_timeout:1;
+ uint64_t stp_crc_err:1;
+ uint64_t stp_bad_sts:1;
+ uint64_t stp_val:1;
+ uint64_t rsp_timeout:1;
+ uint64_t rsp_crc_err:1;
+ uint64_t rsp_bad_sts:1;
+ uint64_t rsp_val:1;
+ uint64_t rsp_type:3;
+ uint64_t cmd_type:2;
+ uint64_t cmd_idx:6;
+ uint64_t cmd_done:1;
+#else
+ uint64_t cmd_done:1;
+ uint64_t cmd_idx:6;
+ uint64_t cmd_type:2;
+ uint64_t rsp_type:3;
+ uint64_t rsp_val:1;
+ uint64_t rsp_bad_sts:1;
+ uint64_t rsp_crc_err:1;
+ uint64_t rsp_timeout:1;
+ uint64_t stp_val:1;
+ uint64_t stp_bad_sts:1;
+ uint64_t stp_crc_err:1;
+ uint64_t stp_timeout:1;
+ uint64_t rsp_busybit:1;
+ uint64_t blk_crc_err:1;
+ uint64_t blk_timeout:1;
+ uint64_t dbuf:1;
+ uint64_t reserved_24_27:4;
+ uint64_t dbuf_err:1;
+ uint64_t reserved_29_55:27;
+ uint64_t dma_pend:1;
+ uint64_t dma_val:1;
+ uint64_t switch_val:1;
+ uint64_t cmd_val:1;
+ uint64_t bus_id:2;
+ uint64_t reserved_62_63:2;
+#endif
+ } s;
+};
+
+union cvmx_mio_emm_sample {
+ uint64_t u64;
+ struct cvmx_mio_emm_sample_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63:38;
+ uint64_t cmd_cnt:10;
+ uint64_t reserved_10_15:6;
+ uint64_t dat_cnt:10;
+#else
+ uint64_t dat_cnt:10;
+ uint64_t reserved_10_15:6;
+ uint64_t cmd_cnt:10;
+ uint64_t reserved_26_63:38;
+#endif
+ } s;
+};
+
+union cvmx_mio_emm_sts_mask {
+ uint64_t u64;
+ struct cvmx_mio_emm_sts_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t sts_msk:32;
+#else
+ uint64_t sts_msk:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_mio_emm_switch {
+ uint64_t u64;
+ struct cvmx_mio_emm_switch_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ uint64_t bus_id:2;
+ uint64_t switch_exe:1;
+ uint64_t switch_err0:1;
+ uint64_t switch_err1:1;
+ uint64_t switch_err2:1;
+ uint64_t reserved_49_55:7;
+ uint64_t hs_timing:1;
+ uint64_t reserved_43_47:5;
+ uint64_t bus_width:3;
+ uint64_t reserved_36_39:4;
+ uint64_t power_class:4;
+ uint64_t clk_hi:16;
+ uint64_t clk_lo:16;
+#else
+ uint64_t clk_lo:16;
+ uint64_t clk_hi:16;
+ uint64_t power_class:4;
+ uint64_t reserved_36_39:4;
+ uint64_t bus_width:3;
+ uint64_t reserved_43_47:5;
+ uint64_t hs_timing:1;
+ uint64_t reserved_49_55:7;
+ uint64_t switch_err2:1;
+ uint64_t switch_err1:1;
+ uint64_t switch_err0:1;
+ uint64_t switch_exe:1;
+ uint64_t bus_id:2;
+ uint64_t reserved_62_63:2;
+#endif
+ } s;
+};
+
+union cvmx_mio_emm_wdog {
+ uint64_t u64;
+ struct cvmx_mio_emm_wdog_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63:38;
+ uint64_t clk_cnt:26;
+#else
+ uint64_t clk_cnt:26;
+ uint64_t reserved_26_63:38;
+#endif
+ } s;
+};
+
+union cvmx_mio_fus_bnk_datx {
+ uint64_t u64;
+ struct cvmx_mio_fus_bnk_datx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dat:64;
+#else
+ uint64_t dat:64;
+#endif
+ } s;
+};
+
+union cvmx_mio_fus_dat0 {
+ uint64_t u64;
+ struct cvmx_mio_fus_dat0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t man_info:32;
+#else
+ uint64_t man_info:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_mio_fus_dat1 {
+ uint64_t u64;
+ struct cvmx_mio_fus_dat1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t man_info:32;
+#else
+ uint64_t man_info:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_mio_fus_dat2 {
+ uint64_t u64;
+ struct cvmx_mio_fus_dat2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63:5;
+ uint64_t run_platform:3;
+ uint64_t gbl_pwr_throttle:8;
+ uint64_t fus118:1;
+ uint64_t rom_info:10;
+ uint64_t power_limit:2;
+ uint64_t dorm_crypto:1;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_30_31:2;
+ uint64_t nokasu:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t rst_sht:1;
+ uint64_t bist_dis:1;
+ uint64_t chip_id:8;
+ uint64_t reserved_0_15:16;
+#else
+ uint64_t reserved_0_15:16;
+ uint64_t chip_id:8;
+ uint64_t bist_dis:1;
+ uint64_t rst_sht:1;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t nokasu:1;
+ uint64_t reserved_30_31:2;
+ uint64_t raid_en:1;
+ uint64_t fus318:1;
+ uint64_t dorm_crypto:1;
+ uint64_t power_limit:2;
+ uint64_t rom_info:10;
+ uint64_t fus118:1;
+ uint64_t gbl_pwr_throttle:8;
+ uint64_t run_platform:3;
+ uint64_t reserved_59_63:5;
+#endif
+ } s;
+ struct cvmx_mio_fus_dat2_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t rst_sht:1;
+ uint64_t bist_dis:1;
+ uint64_t chip_id:8;
+ uint64_t pll_off:4;
+ uint64_t reserved_1_11:11;
+ uint64_t pp_dis:1;
+#else
+ uint64_t pp_dis:1;
+ uint64_t reserved_1_11:11;
+ uint64_t pll_off:4;
+ uint64_t chip_id:8;
+ uint64_t bist_dis:1;
+ uint64_t rst_sht:1;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn30xx;
+ struct cvmx_mio_fus_dat2_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t rst_sht:1;
+ uint64_t bist_dis:1;
+ uint64_t chip_id:8;
+ uint64_t pll_off:4;
+ uint64_t reserved_2_11:10;
+ uint64_t pp_dis:2;
+#else
+ uint64_t pp_dis:2;
+ uint64_t reserved_2_11:10;
+ uint64_t pll_off:4;
+ uint64_t chip_id:8;
+ uint64_t bist_dis:1;
+ uint64_t rst_sht:1;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn31xx;
+ struct cvmx_mio_fus_dat2_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t rst_sht:1;
+ uint64_t bist_dis:1;
+ uint64_t chip_id:8;
+ uint64_t pp_dis:16;
+#else
+ uint64_t pp_dis:16;
+ uint64_t chip_id:8;
+ uint64_t bist_dis:1;
+ uint64_t rst_sht:1;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn38xx;
+ struct cvmx_mio_fus_dat2_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63:30;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_30_31:2;
+ uint64_t nokasu:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t rst_sht:1;
+ uint64_t bist_dis:1;
+ uint64_t chip_id:8;
+ uint64_t reserved_2_15:14;
+ uint64_t pp_dis:2;
+#else
+ uint64_t pp_dis:2;
+ uint64_t reserved_2_15:14;
+ uint64_t chip_id:8;
+ uint64_t bist_dis:1;
+ uint64_t rst_sht:1;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t nokasu:1;
+ uint64_t reserved_30_31:2;
+ uint64_t raid_en:1;
+ uint64_t fus318:1;
+ uint64_t reserved_34_63:30;
+#endif
+ } cn50xx;
+ struct cvmx_mio_fus_dat2_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63:30;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_30_31:2;
+ uint64_t nokasu:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t rst_sht:1;
+ uint64_t bist_dis:1;
+ uint64_t chip_id:8;
+ uint64_t reserved_4_15:12;
+ uint64_t pp_dis:4;
+#else
+ uint64_t pp_dis:4;
+ uint64_t reserved_4_15:12;
+ uint64_t chip_id:8;
+ uint64_t bist_dis:1;
+ uint64_t rst_sht:1;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t nokasu:1;
+ uint64_t reserved_30_31:2;
+ uint64_t raid_en:1;
+ uint64_t fus318:1;
+ uint64_t reserved_34_63:30;
+#endif
+ } cn52xx;
+ struct cvmx_mio_fus_dat2_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63:30;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_30_31:2;
+ uint64_t nokasu:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t rst_sht:1;
+ uint64_t bist_dis:1;
+ uint64_t chip_id:8;
+ uint64_t reserved_12_15:4;
+ uint64_t pp_dis:12;
+#else
+ uint64_t pp_dis:12;
+ uint64_t reserved_12_15:4;
+ uint64_t chip_id:8;
+ uint64_t bist_dis:1;
+ uint64_t rst_sht:1;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t nokasu:1;
+ uint64_t reserved_30_31:2;
+ uint64_t raid_en:1;
+ uint64_t fus318:1;
+ uint64_t reserved_34_63:30;
+#endif
+ } cn56xx;
+ struct cvmx_mio_fus_dat2_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_30_63:34;
+ uint64_t nokasu:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t rst_sht:1;
+ uint64_t bist_dis:1;
+ uint64_t chip_id:8;
+ uint64_t pp_dis:16;
+#else
+ uint64_t pp_dis:16;
+ uint64_t chip_id:8;
+ uint64_t bist_dis:1;
+ uint64_t rst_sht:1;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t nokasu:1;
+ uint64_t reserved_30_63:34;
+#endif
+ } cn58xx;
+ struct cvmx_mio_fus_dat2_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t fus118:1;
+ uint64_t rom_info:10;
+ uint64_t power_limit:2;
+ uint64_t dorm_crypto:1;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_29_31:3;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t reserved_24_25:2;
+ uint64_t chip_id:8;
+ uint64_t reserved_4_15:12;
+ uint64_t pp_dis:4;
+#else
+ uint64_t pp_dis:4;
+ uint64_t reserved_4_15:12;
+ uint64_t chip_id:8;
+ uint64_t reserved_24_25:2;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t reserved_29_31:3;
+ uint64_t raid_en:1;
+ uint64_t fus318:1;
+ uint64_t dorm_crypto:1;
+ uint64_t power_limit:2;
+ uint64_t rom_info:10;
+ uint64_t fus118:1;
+ uint64_t reserved_48_63:16;
+#endif
+ } cn61xx;
+ struct cvmx_mio_fus_dat2_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_35_63:29;
+ uint64_t dorm_crypto:1;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_29_31:3;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t reserved_24_25:2;
+ uint64_t chip_id:8;
+ uint64_t reserved_6_15:10;
+ uint64_t pp_dis:6;
+#else
+ uint64_t pp_dis:6;
+ uint64_t reserved_6_15:10;
+ uint64_t chip_id:8;
+ uint64_t reserved_24_25:2;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t reserved_29_31:3;
+ uint64_t raid_en:1;
+ uint64_t fus318:1;
+ uint64_t dorm_crypto:1;
+ uint64_t reserved_35_63:29;
+#endif
+ } cn63xx;
+ struct cvmx_mio_fus_dat2_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t fus118:1;
+ uint64_t rom_info:10;
+ uint64_t power_limit:2;
+ uint64_t dorm_crypto:1;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_29_31:3;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t reserved_24_25:2;
+ uint64_t chip_id:8;
+ uint64_t reserved_10_15:6;
+ uint64_t pp_dis:10;
+#else
+ uint64_t pp_dis:10;
+ uint64_t reserved_10_15:6;
+ uint64_t chip_id:8;
+ uint64_t reserved_24_25:2;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t reserved_29_31:3;
+ uint64_t raid_en:1;
+ uint64_t fus318:1;
+ uint64_t dorm_crypto:1;
+ uint64_t power_limit:2;
+ uint64_t rom_info:10;
+ uint64_t fus118:1;
+ uint64_t reserved_48_63:16;
+#endif
+ } cn66xx;
+ struct cvmx_mio_fus_dat2_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63:27;
+ uint64_t power_limit:2;
+ uint64_t dorm_crypto:1;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_29_31:3;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t reserved_24_25:2;
+ uint64_t chip_id:8;
+ uint64_t reserved_0_15:16;
+#else
+ uint64_t reserved_0_15:16;
+ uint64_t chip_id:8;
+ uint64_t reserved_24_25:2;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t reserved_29_31:3;
+ uint64_t raid_en:1;
+ uint64_t fus318:1;
+ uint64_t dorm_crypto:1;
+ uint64_t power_limit:2;
+ uint64_t reserved_37_63:27;
+#endif
+ } cn68xx;
+ struct cvmx_mio_fus_dat2_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t fus118:1;
+ uint64_t rom_info:10;
+ uint64_t power_limit:2;
+ uint64_t dorm_crypto:1;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_31_29:3;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t reserved_25_24:2;
+ uint64_t chip_id:8;
+ uint64_t reserved_15_0:16;
+#else
+ uint64_t reserved_15_0:16;
+ uint64_t chip_id:8;
+ uint64_t reserved_25_24:2;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t reserved_31_29:3;
+ uint64_t raid_en:1;
+ uint64_t fus318:1;
+ uint64_t dorm_crypto:1;
+ uint64_t power_limit:2;
+ uint64_t rom_info:10;
+ uint64_t fus118:1;
+ uint64_t reserved_48_63:16;
+#endif
+ } cn70xx;
+ struct cvmx_mio_fus_dat2_cn73xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63:5;
+ uint64_t run_platform:3;
+ uint64_t gbl_pwr_throttle:8;
+ uint64_t fus118:1;
+ uint64_t rom_info:10;
+ uint64_t power_limit:2;
+ uint64_t dorm_crypto:1;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_31_29:3;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t reserved_25_24:2;
+ uint64_t chip_id:8;
+ uint64_t reserved_15_0:16;
+#else
+ uint64_t reserved_15_0:16;
+ uint64_t chip_id:8;
+ uint64_t reserved_25_24:2;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t reserved_31_29:3;
+ uint64_t raid_en:1;
+ uint64_t fus318:1;
+ uint64_t dorm_crypto:1;
+ uint64_t power_limit:2;
+ uint64_t rom_info:10;
+ uint64_t fus118:1;
+ uint64_t gbl_pwr_throttle:8;
+ uint64_t run_platform:3;
+ uint64_t reserved_59_63:5;
+#endif
+ } cn73xx;
+ struct cvmx_mio_fus_dat2_cn78xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63:5;
+ uint64_t run_platform:3;
+ uint64_t reserved_48_55:8;
+ uint64_t fus118:1;
+ uint64_t rom_info:10;
+ uint64_t power_limit:2;
+ uint64_t dorm_crypto:1;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_31_29:3;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t reserved_25_24:2;
+ uint64_t chip_id:8;
+ uint64_t reserved_0_15:16;
+#else
+ uint64_t reserved_0_15:16;
+ uint64_t chip_id:8;
+ uint64_t reserved_25_24:2;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t reserved_31_29:3;
+ uint64_t raid_en:1;
+ uint64_t fus318:1;
+ uint64_t dorm_crypto:1;
+ uint64_t power_limit:2;
+ uint64_t rom_info:10;
+ uint64_t fus118:1;
+ uint64_t reserved_48_55:8;
+ uint64_t run_platform:3;
+ uint64_t reserved_59_63:5;
+#endif
+ } cn78xx;
+ struct cvmx_mio_fus_dat2_cn78xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63:5;
+ uint64_t run_platform:3;
+ uint64_t gbl_pwr_throttle:8;
+ uint64_t fus118:1;
+ uint64_t rom_info:10;
+ uint64_t power_limit:2;
+ uint64_t dorm_crypto:1;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_31_29:3;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t reserved_25_24:2;
+ uint64_t chip_id:8;
+ uint64_t reserved_0_15:16;
+#else
+ uint64_t reserved_0_15:16;
+ uint64_t chip_id:8;
+ uint64_t reserved_25_24:2;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t reserved_31_29:3;
+ uint64_t raid_en:1;
+ uint64_t fus318:1;
+ uint64_t dorm_crypto:1;
+ uint64_t power_limit:2;
+ uint64_t rom_info:10;
+ uint64_t fus118:1;
+ uint64_t gbl_pwr_throttle:8;
+ uint64_t run_platform:3;
+ uint64_t reserved_59_63:5;
+#endif
+ } cn78xxp2;
+};
+
+union cvmx_mio_fus_dat3 {
+ uint64_t u64;
+ struct cvmx_mio_fus_dat3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ema0:6;
+ uint64_t pll_ctl:10;
+ uint64_t dfa_info_dte:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t pll_alt_matrix:1;
+ uint64_t reserved_38_39:2;
+ uint64_t efus_lck_rsv:1;
+ uint64_t efus_lck_man:1;
+ uint64_t pll_half_dis:1;
+ uint64_t l2c_crip:3;
+ uint64_t reserved_28_31:4;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t reserved_0_23:24;
+#else
+ uint64_t reserved_0_23:24;
+ uint64_t nodfa_dte:1;
+ uint64_t nozip:1;
+ uint64_t efus_ign:1;
+ uint64_t efus_lck:1;
+ uint64_t reserved_28_31:4;
+ uint64_t l2c_crip:3;
+ uint64_t pll_half_dis:1;
+ uint64_t efus_lck_man:1;
+ uint64_t efus_lck_rsv:1;
+ uint64_t reserved_38_39:2;
+ uint64_t pll_alt_matrix:1;
+ uint64_t dfa_info_clm:4;
+ uint64_t dfa_info_dte:3;
+ uint64_t pll_ctl:10;
+ uint64_t ema0:6;
+#endif
+ } s;
+ struct cvmx_mio_fus_dat3_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t pll_div4:1;
+ uint64_t reserved_29_30:2;
+ uint64_t bar2_en:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t icache:24;
+#else
+ uint64_t icache:24;
+ uint64_t nodfa_dte:1;
+ uint64_t nozip:1;
+ uint64_t efus_ign:1;
+ uint64_t efus_lck:1;
+ uint64_t bar2_en:1;
+ uint64_t reserved_29_30:2;
+ uint64_t pll_div4:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn30xx;
+ struct cvmx_mio_fus_dat3_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t pll_div4:1;
+ uint64_t zip_crip:2;
+ uint64_t bar2_en:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t icache:24;
+#else
+ uint64_t icache:24;
+ uint64_t nodfa_dte:1;
+ uint64_t nozip:1;
+ uint64_t efus_ign:1;
+ uint64_t efus_lck:1;
+ uint64_t bar2_en:1;
+ uint64_t zip_crip:2;
+ uint64_t pll_div4:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn31xx;
+ struct cvmx_mio_fus_dat3_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63:33;
+ uint64_t zip_crip:2;
+ uint64_t bar2_en:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t icache:24;
+#else
+ uint64_t icache:24;
+ uint64_t nodfa_dte:1;
+ uint64_t nozip:1;
+ uint64_t efus_ign:1;
+ uint64_t efus_lck:1;
+ uint64_t bar2_en:1;
+ uint64_t zip_crip:2;
+ uint64_t reserved_31_63:33;
+#endif
+ } cn38xx;
+ struct cvmx_mio_fus_dat3_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t bar2_en:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t icache:24;
+#else
+ uint64_t icache:24;
+ uint64_t nodfa_dte:1;
+ uint64_t nozip:1;
+ uint64_t efus_ign:1;
+ uint64_t efus_lck:1;
+ uint64_t bar2_en:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn38xxp2;
+ struct cvmx_mio_fus_dat3_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63:6;
+ uint64_t pll_ctl:10;
+ uint64_t dfa_info_dte:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t reserved_40_40:1;
+ uint64_t ema:2;
+ uint64_t efus_lck_rsv:1;
+ uint64_t efus_lck_man:1;
+ uint64_t pll_half_dis:1;
+ uint64_t l2c_crip:3;
+ uint64_t reserved_31_31:1;
+ uint64_t zip_info:2;
+ uint64_t bar2_en:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t reserved_0_23:24;
+#else
+ uint64_t reserved_0_23:24;
+ uint64_t nodfa_dte:1;
+ uint64_t nozip:1;
+ uint64_t efus_ign:1;
+ uint64_t efus_lck:1;
+ uint64_t bar2_en:1;
+ uint64_t zip_info:2;
+ uint64_t reserved_31_31:1;
+ uint64_t l2c_crip:3;
+ uint64_t pll_half_dis:1;
+ uint64_t efus_lck_man:1;
+ uint64_t efus_lck_rsv:1;
+ uint64_t ema:2;
+ uint64_t reserved_40_40:1;
+ uint64_t dfa_info_clm:4;
+ uint64_t dfa_info_dte:3;
+ uint64_t pll_ctl:10;
+ uint64_t reserved_58_63:6;
+#endif
+ } cn61xx;
+ struct cvmx_mio_fus_dat3_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ema0:6;
+ uint64_t pll_ctl:10;
+ uint64_t dfa_info_dte:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t pll_alt_matrix:1;
+ uint64_t pll_bwadj_denom:2;
+ uint64_t efus_lck_rsv:1;
+ uint64_t efus_lck_man:1;
+ uint64_t pll_half_dis:1;
+ uint64_t l2c_crip:3;
+ uint64_t use_int_refclk:1;
+ uint64_t zip_info:2;
+ uint64_t bar2_sz_conf:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t ema1:6;
+ uint64_t reserved_0_17:18;
+#else
+ uint64_t reserved_0_17:18;
+ uint64_t ema1:6;
+ uint64_t nodfa_dte:1;
+ uint64_t nozip:1;
+ uint64_t efus_ign:1;
+ uint64_t efus_lck:1;
+ uint64_t bar2_sz_conf:1;
+ uint64_t zip_info:2;
+ uint64_t use_int_refclk:1;
+ uint64_t l2c_crip:3;
+ uint64_t pll_half_dis:1;
+ uint64_t efus_lck_man:1;
+ uint64_t efus_lck_rsv:1;
+ uint64_t pll_bwadj_denom:2;
+ uint64_t pll_alt_matrix:1;
+ uint64_t dfa_info_clm:4;
+ uint64_t dfa_info_dte:3;
+ uint64_t pll_ctl:10;
+ uint64_t ema0:6;
+#endif
+ } cn70xx;
+ struct cvmx_mio_fus_dat3_cn70xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ema0:6;
+ uint64_t pll_ctl:10;
+ uint64_t dfa_info_dte:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t reserved_38_40:3;
+ uint64_t efus_lck_rsv:1;
+ uint64_t efus_lck_man:1;
+ uint64_t pll_half_dis:1;
+ uint64_t l2c_crip:3;
+ uint64_t reserved_31_31:1;
+ uint64_t zip_info:2;
+ uint64_t bar2_sz_conf:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t ema1:6;
+ uint64_t reserved_0_17:18;
+#else
+ uint64_t reserved_0_17:18;
+ uint64_t ema1:6;
+ uint64_t nodfa_dte:1;
+ uint64_t nozip:1;
+ uint64_t efus_ign:1;
+ uint64_t efus_lck:1;
+ uint64_t bar2_sz_conf:1;
+ uint64_t zip_info:2;
+ uint64_t reserved_31_31:1;
+ uint64_t l2c_crip:3;
+ uint64_t pll_half_dis:1;
+ uint64_t efus_lck_man:1;
+ uint64_t efus_lck_rsv:1;
+ uint64_t reserved_38_40:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t dfa_info_dte:3;
+ uint64_t pll_ctl:10;
+ uint64_t ema0:6;
+#endif
+ } cn70xxp1;
+ struct cvmx_mio_fus_dat3_cn73xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ema0:6;
+ uint64_t pll_ctl:10;
+ uint64_t dfa_info_dte:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t pll_alt_matrix:1;
+ uint64_t pll_bwadj_denom:2;
+ uint64_t efus_lck_rsv:1;
+ uint64_t efus_lck_man:1;
+ uint64_t pll_half_dis:1;
+ uint64_t l2c_crip:3;
+ uint64_t use_int_refclk:1;
+ uint64_t zip_info:2;
+ uint64_t bar2_sz_conf:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t ema1:6;
+ uint64_t nohna_dte:1;
+ uint64_t hna_info_dte:3;
+ uint64_t hna_info_clm:4;
+ uint64_t reserved_9_9:1;
+ uint64_t core_pll_mul:5;
+ uint64_t pnr_pll_mul:4;
+#else
+ uint64_t pnr_pll_mul:4;
+ uint64_t core_pll_mul:5;
+ uint64_t reserved_9_9:1;
+ uint64_t hna_info_clm:4;
+ uint64_t hna_info_dte:3;
+ uint64_t nohna_dte:1;
+ uint64_t ema1:6;
+ uint64_t nodfa_dte:1;
+ uint64_t nozip:1;
+ uint64_t efus_ign:1;
+ uint64_t efus_lck:1;
+ uint64_t bar2_sz_conf:1;
+ uint64_t zip_info:2;
+ uint64_t use_int_refclk:1;
+ uint64_t l2c_crip:3;
+ uint64_t pll_half_dis:1;
+ uint64_t efus_lck_man:1;
+ uint64_t efus_lck_rsv:1;
+ uint64_t pll_bwadj_denom:2;
+ uint64_t pll_alt_matrix:1;
+ uint64_t dfa_info_clm:4;
+ uint64_t dfa_info_dte:3;
+ uint64_t pll_ctl:10;
+ uint64_t ema0:6;
+#endif
+ } cn73xx;
+ struct cvmx_mio_fus_dat3_cn78xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ema0:6;
+ uint64_t pll_ctl:10;
+ uint64_t dfa_info_dte:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t reserved_38_40:3;
+ uint64_t efus_lck_rsv:1;
+ uint64_t efus_lck_man:1;
+ uint64_t pll_half_dis:1;
+ uint64_t l2c_crip:3;
+ uint64_t reserved_31_31:1;
+ uint64_t zip_info:2;
+ uint64_t bar2_sz_conf:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t ema1:6;
+ uint64_t nohna_dte:1;
+ uint64_t hna_info_dte:3;
+ uint64_t hna_info_clm:4;
+ uint64_t reserved_0_9:10;
+#else
+ uint64_t reserved_0_9:10;
+ uint64_t hna_info_clm:4;
+ uint64_t hna_info_dte:3;
+ uint64_t nohna_dte:1;
+ uint64_t ema1:6;
+ uint64_t nodfa_dte:1;
+ uint64_t nozip:1;
+ uint64_t efus_ign:1;
+ uint64_t efus_lck:1;
+ uint64_t bar2_sz_conf:1;
+ uint64_t zip_info:2;
+ uint64_t reserved_31_31:1;
+ uint64_t l2c_crip:3;
+ uint64_t pll_half_dis:1;
+ uint64_t efus_lck_man:1;
+ uint64_t efus_lck_rsv:1;
+ uint64_t reserved_38_40:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t dfa_info_dte:3;
+ uint64_t pll_ctl:10;
+ uint64_t ema0:6;
+#endif
+ } cn78xx;
+ struct cvmx_mio_fus_dat3_cnf75xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ema0:6;
+ uint64_t pll_ctl:10;
+ uint64_t dfa_info_dte:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t pll_alt_matrix:1;
+ uint64_t pll_bwadj_denom:2;
+ uint64_t efus_lck_rsv:1;
+ uint64_t efus_lck_man:1;
+ uint64_t pll_half_dis:1;
+ uint64_t l2c_crip:3;
+ uint64_t use_int_refclk:1;
+ uint64_t zip_info:2;
+ uint64_t bar2_sz_conf:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t ema1:6;
+ uint64_t reserved_9_17:9;
+ uint64_t core_pll_mul:5;
+ uint64_t pnr_pll_mul:4;
+#else
+ uint64_t pnr_pll_mul:4;
+ uint64_t core_pll_mul:5;
+ uint64_t reserved_9_17:9;
+ uint64_t ema1:6;
+ uint64_t nodfa_dte:1;
+ uint64_t nozip:1;
+ uint64_t efus_ign:1;
+ uint64_t efus_lck:1;
+ uint64_t bar2_sz_conf:1;
+ uint64_t zip_info:2;
+ uint64_t use_int_refclk:1;
+ uint64_t l2c_crip:3;
+ uint64_t pll_half_dis:1;
+ uint64_t efus_lck_man:1;
+ uint64_t efus_lck_rsv:1;
+ uint64_t pll_bwadj_denom:2;
+ uint64_t pll_alt_matrix:1;
+ uint64_t dfa_info_clm:4;
+ uint64_t dfa_info_dte:3;
+ uint64_t pll_ctl:10;
+ uint64_t ema0:6;
+#endif
+ } cnf75xx;
+};
+
+union cvmx_mio_fus_ema {
+ uint64_t u64;
+ struct cvmx_mio_fus_ema_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t eff_ema:3;
+ uint64_t reserved_3_3:1;
+ uint64_t ema:3;
+#else
+ uint64_t ema:3;
+ uint64_t reserved_3_3:1;
+ uint64_t eff_ema:3;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+ struct cvmx_mio_fus_ema_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t ema:2;
+#else
+ uint64_t ema:2;
+ uint64_t reserved_2_63:62;
+#endif
+ } cn58xx;
+};
+
+union cvmx_mio_fus_pdf {
+ uint64_t u64;
+ struct cvmx_mio_fus_pdf_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pdf:64;
+#else
+ uint64_t pdf:64;
+#endif
+ } s;
+};
+
+union cvmx_mio_fus_pll {
+ uint64_t u64;
+ struct cvmx_mio_fus_pll_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t rclk_align_r:8;
+ uint64_t rclk_align_l:8;
+ uint64_t reserved_8_31:24;
+ uint64_t c_cout_rst:1;
+ uint64_t c_cout_sel:2;
+ uint64_t pnr_cout_rst:1;
+ uint64_t pnr_cout_sel:2;
+ uint64_t rfslip:1;
+ uint64_t fbslip:1;
+#else
+ uint64_t fbslip:1;
+ uint64_t rfslip:1;
+ uint64_t pnr_cout_sel:2;
+ uint64_t pnr_cout_rst:1;
+ uint64_t c_cout_sel:2;
+ uint64_t c_cout_rst:1;
+ uint64_t reserved_8_31:24;
+ uint64_t rclk_align_l:8;
+ uint64_t rclk_align_r:8;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+ struct cvmx_mio_fus_pll_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t rfslip:1;
+ uint64_t fbslip:1;
+#else
+ uint64_t fbslip:1;
+ uint64_t rfslip:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } cn50xx;
+ struct cvmx_mio_fus_pll_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t c_cout_rst:1;
+ uint64_t c_cout_sel:2;
+ uint64_t pnr_cout_rst:1;
+ uint64_t pnr_cout_sel:2;
+ uint64_t rfslip:1;
+ uint64_t fbslip:1;
+#else
+ uint64_t fbslip:1;
+ uint64_t rfslip:1;
+ uint64_t pnr_cout_sel:2;
+ uint64_t pnr_cout_rst:1;
+ uint64_t c_cout_sel:2;
+ uint64_t c_cout_rst:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } cn61xx;
+};
+
+union cvmx_mio_fus_prog {
+ uint64_t u64;
+ struct cvmx_mio_fus_prog_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t soft:1;
+ uint64_t prog:1;
+#else
+ uint64_t prog:1;
+ uint64_t soft:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+ struct cvmx_mio_fus_prog_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t prog:1;
+#else
+ uint64_t prog:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } cn30xx;
+};
+
+union cvmx_mio_fus_prog_times {
+ uint64_t u64;
+ struct cvmx_mio_fus_prog_times_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_35_63:29;
+ uint64_t vgate_pin:1;
+ uint64_t fsrc_pin:1;
+ uint64_t prog_pin:1;
+ uint64_t reserved_6_31:26;
+ uint64_t setup:6;
+#else
+ uint64_t setup:6;
+ uint64_t reserved_6_31:26;
+ uint64_t prog_pin:1;
+ uint64_t fsrc_pin:1;
+ uint64_t vgate_pin:1;
+ uint64_t reserved_35_63:29;
+#endif
+ } s;
+ struct cvmx_mio_fus_prog_times_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63:31;
+ uint64_t prog_pin:1;
+ uint64_t out:8;
+ uint64_t sclk_lo:4;
+ uint64_t sclk_hi:12;
+ uint64_t setup:8;
+#else
+ uint64_t setup:8;
+ uint64_t sclk_hi:12;
+ uint64_t sclk_lo:4;
+ uint64_t out:8;
+ uint64_t prog_pin:1;
+ uint64_t reserved_33_63:31;
+#endif
+ } cn50xx;
+ struct cvmx_mio_fus_prog_times_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_35_63:29;
+ uint64_t vgate_pin:1;
+ uint64_t fsrc_pin:1;
+ uint64_t prog_pin:1;
+ uint64_t out:7;
+ uint64_t sclk_lo:4;
+ uint64_t sclk_hi:15;
+ uint64_t setup:6;
+#else
+ uint64_t setup:6;
+ uint64_t sclk_hi:15;
+ uint64_t sclk_lo:4;
+ uint64_t out:7;
+ uint64_t prog_pin:1;
+ uint64_t fsrc_pin:1;
+ uint64_t vgate_pin:1;
+ uint64_t reserved_35_63:29;
+#endif
+ } cn61xx;
+};
+
+union cvmx_mio_fus_rcmd {
+ uint64_t u64;
+ struct cvmx_mio_fus_rcmd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t dat:8;
+ uint64_t reserved_13_15:3;
+ uint64_t pend:1;
+ uint64_t reserved_9_11:3;
+ uint64_t efuse:1;
+ uint64_t addr:8;
+#else
+ uint64_t addr:8;
+ uint64_t efuse:1;
+ uint64_t reserved_9_11:3;
+ uint64_t pend:1;
+ uint64_t reserved_13_15:3;
+ uint64_t dat:8;
+ uint64_t reserved_24_63:40;
+#endif
+ } s;
+ struct cvmx_mio_fus_rcmd_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t dat:8;
+ uint64_t reserved_13_15:3;
+ uint64_t pend:1;
+ uint64_t reserved_9_11:3;
+ uint64_t efuse:1;
+ uint64_t reserved_7_7:1;
+ uint64_t addr:7;
+#else
+ uint64_t addr:7;
+ uint64_t reserved_7_7:1;
+ uint64_t efuse:1;
+ uint64_t reserved_9_11:3;
+ uint64_t pend:1;
+ uint64_t reserved_13_15:3;
+ uint64_t dat:8;
+ uint64_t reserved_24_63:40;
+#endif
+ } cn30xx;
+};
+
+union cvmx_mio_fus_read_times {
+ uint64_t u64;
+ struct cvmx_mio_fus_read_times_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63:38;
+ uint64_t sch:4;
+ uint64_t fsh:4;
+ uint64_t prh:4;
+ uint64_t sdh:4;
+ uint64_t setup:10;
+#else
+ uint64_t setup:10;
+ uint64_t sdh:4;
+ uint64_t prh:4;
+ uint64_t fsh:4;
+ uint64_t sch:4;
+ uint64_t reserved_26_63:38;
+#endif
+ } s;
+};
+
+union cvmx_mio_fus_repair_res0 {
+ uint64_t u64;
+ struct cvmx_mio_fus_repair_res0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_55_63:9;
+ uint64_t too_many:1;
+ uint64_t repair2:18;
+ uint64_t repair1:18;
+ uint64_t repair0:18;
+#else
+ uint64_t repair0:18;
+ uint64_t repair1:18;
+ uint64_t repair2:18;
+ uint64_t too_many:1;
+ uint64_t reserved_55_63:9;
+#endif
+ } s;
+};
+
+union cvmx_mio_fus_repair_res1 {
+ uint64_t u64;
+ struct cvmx_mio_fus_repair_res1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63:10;
+ uint64_t repair5:18;
+ uint64_t repair4:18;
+ uint64_t repair3:18;
+#else
+ uint64_t repair3:18;
+ uint64_t repair4:18;
+ uint64_t repair5:18;
+ uint64_t reserved_54_63:10;
+#endif
+ } s;
+};
+
+union cvmx_mio_fus_repair_res2 {
+ uint64_t u64;
+ struct cvmx_mio_fus_repair_res2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63:46;
+ uint64_t repair6:18;
+#else
+ uint64_t repair6:18;
+ uint64_t reserved_18_63:46;
+#endif
+ } s;
+};
+
+union cvmx_mio_fus_spr_repair_res {
+ uint64_t u64;
+ struct cvmx_mio_fus_spr_repair_res_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63:22;
+ uint64_t repair2:14;
+ uint64_t repair1:14;
+ uint64_t repair0:14;
+#else
+ uint64_t repair0:14;
+ uint64_t repair1:14;
+ uint64_t repair2:14;
+ uint64_t reserved_42_63:22;
+#endif
+ } s;
+};
+
+union cvmx_mio_fus_spr_repair_sum {
+ uint64_t u64;
+ struct cvmx_mio_fus_spr_repair_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t too_many:1;
+#else
+ uint64_t too_many:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_mio_fus_tgg {
+ uint64_t u64;
+ struct cvmx_mio_fus_tgg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t val:1;
+ uint64_t dat:63;
+#else
+ uint64_t dat:63;
+ uint64_t val:1;
+#endif
+ } s;
+};
+
+union cvmx_mio_fus_unlock {
+ uint64_t u64;
+ struct cvmx_mio_fus_unlock_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t key:24;
+#else
+ uint64_t key:24;
+ uint64_t reserved_24_63:40;
+#endif
+ } s;
+};
+
+union cvmx_mio_fus_wadr {
+ uint64_t u64;
+ struct cvmx_mio_fus_wadr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t addr:10;
+#else
+ uint64_t addr:10;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+ struct cvmx_mio_fus_wadr_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t addr:2;
+#else
+ uint64_t addr:2;
+ uint64_t reserved_2_63:62;
+#endif
+ } cn50xx;
+ struct cvmx_mio_fus_wadr_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t addr:3;
+#else
+ uint64_t addr:3;
+ uint64_t reserved_3_63:61;
+#endif
+ } cn52xx;
+ struct cvmx_mio_fus_wadr_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t addr:4;
+#else
+ uint64_t addr:4;
+ uint64_t reserved_4_63:60;
+#endif
+ } cn61xx;
+};
+
+union cvmx_mio_gpio_comp {
+ uint64_t u64;
+ struct cvmx_mio_gpio_comp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t pctl:6;
+ uint64_t nctl:6;
+#else
+ uint64_t nctl:6;
+ uint64_t pctl:6;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+};
+
+union cvmx_mio_ndf_dma_cfg {
+ uint64_t u64;
+ struct cvmx_mio_ndf_dma_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t en:1;
+ uint64_t rw:1;
+ uint64_t clr:1;
+ uint64_t reserved_60_60:1;
+ uint64_t swap32:1;
+ uint64_t swap16:1;
+ uint64_t swap8:1;
+ uint64_t endian:1;
+ uint64_t size:20;
+ uint64_t adr:36;
+#else
+ uint64_t adr:36;
+ uint64_t size:20;
+ uint64_t endian:1;
+ uint64_t swap8:1;
+ uint64_t swap16:1;
+ uint64_t swap32:1;
+ uint64_t reserved_60_60:1;
+ uint64_t clr:1;
+ uint64_t rw:1;
+ uint64_t en:1;
+#endif
+ } s;
+};
+
+union cvmx_mio_ndf_dma_int {
+ uint64_t u64;
+ struct cvmx_mio_ndf_dma_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t done:1;
+#else
+ uint64_t done:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_mio_ndf_dma_int_en {
+ uint64_t u64;
+ struct cvmx_mio_ndf_dma_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t done:1;
+#else
+ uint64_t done:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_mio_pll_ctl {
+ uint64_t u64;
+ struct cvmx_mio_pll_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t bw_ctl:5;
+#else
+ uint64_t bw_ctl:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_mio_pll_setting {
+ uint64_t u64;
+ struct cvmx_mio_pll_setting_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t setting:17;
+#else
+ uint64_t setting:17;
+ uint64_t reserved_17_63:47;
+#endif
+ } s;
+};
+
+union cvmx_mio_ptp_ckout_hi_incr {
+ uint64_t u64;
+ struct cvmx_mio_ptp_ckout_hi_incr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec:32;
+ uint64_t frnanosec:32;
+#else
+ uint64_t frnanosec:32;
+ uint64_t nanosec:32;
+#endif
+ } s;
+};
+
+union cvmx_mio_ptp_ckout_lo_incr {
+ uint64_t u64;
+ struct cvmx_mio_ptp_ckout_lo_incr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec:32;
+ uint64_t frnanosec:32;
+#else
+ uint64_t frnanosec:32;
+ uint64_t nanosec:32;
+#endif
+ } s;
+};
+
+union cvmx_mio_ptp_ckout_thresh_hi {
+ uint64_t u64;
+ struct cvmx_mio_ptp_ckout_thresh_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec:64;
+#else
+ uint64_t nanosec:64;
+#endif
+ } s;
+};
+
+union cvmx_mio_ptp_ckout_thresh_lo {
+ uint64_t u64;
+ struct cvmx_mio_ptp_ckout_thresh_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t frnanosec:32;
+#else
+ uint64_t frnanosec:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_mio_ptp_clock_cfg {
+ uint64_t u64;
+ struct cvmx_mio_ptp_clock_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63:22;
+ uint64_t pps:1;
+ uint64_t ckout:1;
+ uint64_t ext_clk_edge:2;
+ uint64_t ckout_out4:1;
+ uint64_t pps_out:5;
+ uint64_t pps_inv:1;
+ uint64_t pps_en:1;
+ uint64_t ckout_out:4;
+ uint64_t ckout_inv:1;
+ uint64_t ckout_en:1;
+ uint64_t evcnt_in:6;
+ uint64_t evcnt_edge:1;
+ uint64_t evcnt_en:1;
+ uint64_t tstmp_in:6;
+ uint64_t tstmp_edge:1;
+ uint64_t tstmp_en:1;
+ uint64_t ext_clk_in:6;
+ uint64_t ext_clk_en:1;
+ uint64_t ptp_en:1;
+#else
+ uint64_t ptp_en:1;
+ uint64_t ext_clk_en:1;
+ uint64_t ext_clk_in:6;
+ uint64_t tstmp_en:1;
+ uint64_t tstmp_edge:1;
+ uint64_t tstmp_in:6;
+ uint64_t evcnt_en:1;
+ uint64_t evcnt_edge:1;
+ uint64_t evcnt_in:6;
+ uint64_t ckout_en:1;
+ uint64_t ckout_inv:1;
+ uint64_t ckout_out:4;
+ uint64_t pps_en:1;
+ uint64_t pps_inv:1;
+ uint64_t pps_out:5;
+ uint64_t ckout_out4:1;
+ uint64_t ext_clk_edge:2;
+ uint64_t ckout:1;
+ uint64_t pps:1;
+ uint64_t reserved_42_63:22;
+#endif
+ } s;
+ struct cvmx_mio_ptp_clock_cfg_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t evcnt_in:6;
+ uint64_t evcnt_edge:1;
+ uint64_t evcnt_en:1;
+ uint64_t tstmp_in:6;
+ uint64_t tstmp_edge:1;
+ uint64_t tstmp_en:1;
+ uint64_t ext_clk_in:6;
+ uint64_t ext_clk_en:1;
+ uint64_t ptp_en:1;
+#else
+ uint64_t ptp_en:1;
+ uint64_t ext_clk_en:1;
+ uint64_t ext_clk_in:6;
+ uint64_t tstmp_en:1;
+ uint64_t tstmp_edge:1;
+ uint64_t tstmp_in:6;
+ uint64_t evcnt_en:1;
+ uint64_t evcnt_edge:1;
+ uint64_t evcnt_in:6;
+ uint64_t reserved_24_63:40;
+#endif
+ } cn63xx;
+ struct cvmx_mio_ptp_clock_cfg_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t ext_clk_edge:2;
+ uint64_t ckout_out4:1;
+ uint64_t pps_out:5;
+ uint64_t pps_inv:1;
+ uint64_t pps_en:1;
+ uint64_t ckout_out:4;
+ uint64_t ckout_inv:1;
+ uint64_t ckout_en:1;
+ uint64_t evcnt_in:6;
+ uint64_t evcnt_edge:1;
+ uint64_t evcnt_en:1;
+ uint64_t tstmp_in:6;
+ uint64_t tstmp_edge:1;
+ uint64_t tstmp_en:1;
+ uint64_t ext_clk_in:6;
+ uint64_t ext_clk_en:1;
+ uint64_t ptp_en:1;
+#else
+ uint64_t ptp_en:1;
+ uint64_t ext_clk_en:1;
+ uint64_t ext_clk_in:6;
+ uint64_t tstmp_en:1;
+ uint64_t tstmp_edge:1;
+ uint64_t tstmp_in:6;
+ uint64_t evcnt_en:1;
+ uint64_t evcnt_edge:1;
+ uint64_t evcnt_in:6;
+ uint64_t ckout_en:1;
+ uint64_t ckout_inv:1;
+ uint64_t ckout_out:4;
+ uint64_t pps_en:1;
+ uint64_t pps_inv:1;
+ uint64_t pps_out:5;
+ uint64_t ckout_out4:1;
+ uint64_t ext_clk_edge:2;
+ uint64_t reserved_40_63:24;
+#endif
+ } cn66xx;
+};
+
+union cvmx_mio_ptp_clock_comp {
+ uint64_t u64;
+ struct cvmx_mio_ptp_clock_comp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec:32;
+ uint64_t frnanosec:32;
+#else
+ uint64_t frnanosec:32;
+ uint64_t nanosec:32;
+#endif
+ } s;
+};
+
+union cvmx_mio_ptp_clock_hi {
+ uint64_t u64;
+ struct cvmx_mio_ptp_clock_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec:64;
+#else
+ uint64_t nanosec:64;
+#endif
+ } s;
+};
+
+union cvmx_mio_ptp_clock_lo {
+ uint64_t u64;
+ struct cvmx_mio_ptp_clock_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t frnanosec:32;
+#else
+ uint64_t frnanosec:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_mio_ptp_evt_cnt {
+ uint64_t u64;
+ struct cvmx_mio_ptp_evt_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t cntr:64;
+#else
+ uint64_t cntr:64;
+#endif
+ } s;
+};
+
+union cvmx_mio_ptp_phy_1pps_in {
+ uint64_t u64;
+ struct cvmx_mio_ptp_phy_1pps_in_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t sel:5;
+#else
+ uint64_t sel:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_mio_ptp_pps_hi_incr {
+ uint64_t u64;
+ struct cvmx_mio_ptp_pps_hi_incr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec:32;
+ uint64_t frnanosec:32;
+#else
+ uint64_t frnanosec:32;
+ uint64_t nanosec:32;
+#endif
+ } s;
+};
+
+union cvmx_mio_ptp_pps_lo_incr {
+ uint64_t u64;
+ struct cvmx_mio_ptp_pps_lo_incr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec:32;
+ uint64_t frnanosec:32;
+#else
+ uint64_t frnanosec:32;
+ uint64_t nanosec:32;
+#endif
+ } s;
+};
+
+union cvmx_mio_ptp_pps_thresh_hi {
+ uint64_t u64;
+ struct cvmx_mio_ptp_pps_thresh_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec:64;
+#else
+ uint64_t nanosec:64;
+#endif
+ } s;
+};
+
+union cvmx_mio_ptp_pps_thresh_lo {
+ uint64_t u64;
+ struct cvmx_mio_ptp_pps_thresh_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t frnanosec:32;
+#else
+ uint64_t frnanosec:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_mio_ptp_timestamp {
+ uint64_t u64;
+ struct cvmx_mio_ptp_timestamp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t nanosec:64;
+#else
+ uint64_t nanosec:64;
+#endif
+ } s;
+};
+
+union cvmx_mio_qlmx_cfg {
+ uint64_t u64;
+ struct cvmx_mio_qlmx_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63:49;
+ uint64_t prtmode:1;
+ uint64_t reserved_12_13:2;
+ uint64_t qlm_spd:4;
+ uint64_t reserved_4_7:4;
+ uint64_t qlm_cfg:4;
+#else
+ uint64_t qlm_cfg:4;
+ uint64_t reserved_4_7:4;
+ uint64_t qlm_spd:4;
+ uint64_t reserved_12_13:2;
+ uint64_t prtmode:1;
+ uint64_t reserved_15_63:49;
+#endif
+ } s;
+ struct cvmx_mio_qlmx_cfg_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63:49;
+ uint64_t prtmode:1;
+ uint64_t reserved_12_13:2;
+ uint64_t qlm_spd:4;
+ uint64_t reserved_2_7:6;
+ uint64_t qlm_cfg:2;
+#else
+ uint64_t qlm_cfg:2;
+ uint64_t reserved_2_7:6;
+ uint64_t qlm_spd:4;
+ uint64_t reserved_12_13:2;
+ uint64_t prtmode:1;
+ uint64_t reserved_15_63:49;
+#endif
+ } cn61xx;
+ struct cvmx_mio_qlmx_cfg_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t qlm_spd:4;
+ uint64_t reserved_4_7:4;
+ uint64_t qlm_cfg:4;
+#else
+ uint64_t qlm_cfg:4;
+ uint64_t reserved_4_7:4;
+ uint64_t qlm_spd:4;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn66xx;
+ struct cvmx_mio_qlmx_cfg_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t qlm_spd:4;
+ uint64_t reserved_3_7:5;
+ uint64_t qlm_cfg:3;
+#else
+ uint64_t qlm_cfg:3;
+ uint64_t reserved_3_7:5;
+ uint64_t qlm_spd:4;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn68xx;
+};
+
+union cvmx_mio_rst_boot {
+ uint64_t u64;
+ struct cvmx_mio_rst_boot_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t chipkill:1;
+ uint64_t jtcsrdis:1;
+ uint64_t ejtagdis:1;
+ uint64_t romen:1;
+ uint64_t ckill_ppdis:1;
+ uint64_t jt_tstmode:1;
+ uint64_t reserved_50_57:8;
+ uint64_t lboot_ext:2;
+ uint64_t reserved_44_47:4;
+ uint64_t qlm4_spd:4;
+ uint64_t qlm3_spd:4;
+ uint64_t c_mul:6;
+ uint64_t pnr_mul:6;
+ uint64_t qlm2_spd:4;
+ uint64_t qlm1_spd:4;
+ uint64_t qlm0_spd:4;
+ uint64_t lboot:10;
+ uint64_t rboot:1;
+ uint64_t rboot_pin:1;
+#else
+ uint64_t rboot_pin:1;
+ uint64_t rboot:1;
+ uint64_t lboot:10;
+ uint64_t qlm0_spd:4;
+ uint64_t qlm1_spd:4;
+ uint64_t qlm2_spd:4;
+ uint64_t pnr_mul:6;
+ uint64_t c_mul:6;
+ uint64_t qlm3_spd:4;
+ uint64_t qlm4_spd:4;
+ uint64_t reserved_44_47:4;
+ uint64_t lboot_ext:2;
+ uint64_t reserved_50_57:8;
+ uint64_t jt_tstmode:1;
+ uint64_t ckill_ppdis:1;
+ uint64_t romen:1;
+ uint64_t ejtagdis:1;
+ uint64_t jtcsrdis:1;
+ uint64_t chipkill:1;
+#endif
+ } s;
+ struct cvmx_mio_rst_boot_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t chipkill:1;
+ uint64_t jtcsrdis:1;
+ uint64_t ejtagdis:1;
+ uint64_t romen:1;
+ uint64_t ckill_ppdis:1;
+ uint64_t jt_tstmode:1;
+ uint64_t reserved_50_57:8;
+ uint64_t lboot_ext:2;
+ uint64_t reserved_36_47:12;
+ uint64_t c_mul:6;
+ uint64_t pnr_mul:6;
+ uint64_t qlm2_spd:4;
+ uint64_t qlm1_spd:4;
+ uint64_t qlm0_spd:4;
+ uint64_t lboot:10;
+ uint64_t rboot:1;
+ uint64_t rboot_pin:1;
+#else
+ uint64_t rboot_pin:1;
+ uint64_t rboot:1;
+ uint64_t lboot:10;
+ uint64_t qlm0_spd:4;
+ uint64_t qlm1_spd:4;
+ uint64_t qlm2_spd:4;
+ uint64_t pnr_mul:6;
+ uint64_t c_mul:6;
+ uint64_t reserved_36_47:12;
+ uint64_t lboot_ext:2;
+ uint64_t reserved_50_57:8;
+ uint64_t jt_tstmode:1;
+ uint64_t ckill_ppdis:1;
+ uint64_t romen:1;
+ uint64_t ejtagdis:1;
+ uint64_t jtcsrdis:1;
+ uint64_t chipkill:1;
+#endif
+ } cn61xx;
+ struct cvmx_mio_rst_boot_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63:28;
+ uint64_t c_mul:6;
+ uint64_t pnr_mul:6;
+ uint64_t qlm2_spd:4;
+ uint64_t qlm1_spd:4;
+ uint64_t qlm0_spd:4;
+ uint64_t lboot:10;
+ uint64_t rboot:1;
+ uint64_t rboot_pin:1;
+#else
+ uint64_t rboot_pin:1;
+ uint64_t rboot:1;
+ uint64_t lboot:10;
+ uint64_t qlm0_spd:4;
+ uint64_t qlm1_spd:4;
+ uint64_t qlm2_spd:4;
+ uint64_t pnr_mul:6;
+ uint64_t c_mul:6;
+ uint64_t reserved_36_63:28;
+#endif
+ } cn63xx;
+ struct cvmx_mio_rst_boot_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t chipkill:1;
+ uint64_t jtcsrdis:1;
+ uint64_t ejtagdis:1;
+ uint64_t romen:1;
+ uint64_t ckill_ppdis:1;
+ uint64_t reserved_50_58:9;
+ uint64_t lboot_ext:2;
+ uint64_t reserved_36_47:12;
+ uint64_t c_mul:6;
+ uint64_t pnr_mul:6;
+ uint64_t qlm2_spd:4;
+ uint64_t qlm1_spd:4;
+ uint64_t qlm0_spd:4;
+ uint64_t lboot:10;
+ uint64_t rboot:1;
+ uint64_t rboot_pin:1;
+#else
+ uint64_t rboot_pin:1;
+ uint64_t rboot:1;
+ uint64_t lboot:10;
+ uint64_t qlm0_spd:4;
+ uint64_t qlm1_spd:4;
+ uint64_t qlm2_spd:4;
+ uint64_t pnr_mul:6;
+ uint64_t c_mul:6;
+ uint64_t reserved_36_47:12;
+ uint64_t lboot_ext:2;
+ uint64_t reserved_50_58:9;
+ uint64_t ckill_ppdis:1;
+ uint64_t romen:1;
+ uint64_t ejtagdis:1;
+ uint64_t jtcsrdis:1;
+ uint64_t chipkill:1;
+#endif
+ } cn66xx;
+ struct cvmx_mio_rst_boot_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63:5;
+ uint64_t jt_tstmode:1;
+ uint64_t reserved_44_57:14;
+ uint64_t qlm4_spd:4;
+ uint64_t qlm3_spd:4;
+ uint64_t c_mul:6;
+ uint64_t pnr_mul:6;
+ uint64_t qlm2_spd:4;
+ uint64_t qlm1_spd:4;
+ uint64_t qlm0_spd:4;
+ uint64_t lboot:10;
+ uint64_t rboot:1;
+ uint64_t rboot_pin:1;
+#else
+ uint64_t rboot_pin:1;
+ uint64_t rboot:1;
+ uint64_t lboot:10;
+ uint64_t qlm0_spd:4;
+ uint64_t qlm1_spd:4;
+ uint64_t qlm2_spd:4;
+ uint64_t pnr_mul:6;
+ uint64_t c_mul:6;
+ uint64_t qlm3_spd:4;
+ uint64_t qlm4_spd:4;
+ uint64_t reserved_44_57:14;
+ uint64_t jt_tstmode:1;
+ uint64_t reserved_59_63:5;
+#endif
+ } cn68xx;
+ struct cvmx_mio_rst_boot_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t qlm4_spd:4;
+ uint64_t qlm3_spd:4;
+ uint64_t c_mul:6;
+ uint64_t pnr_mul:6;
+ uint64_t qlm2_spd:4;
+ uint64_t qlm1_spd:4;
+ uint64_t qlm0_spd:4;
+ uint64_t lboot:10;
+ uint64_t rboot:1;
+ uint64_t rboot_pin:1;
+#else
+ uint64_t rboot_pin:1;
+ uint64_t rboot:1;
+ uint64_t lboot:10;
+ uint64_t qlm0_spd:4;
+ uint64_t qlm1_spd:4;
+ uint64_t qlm2_spd:4;
+ uint64_t pnr_mul:6;
+ uint64_t c_mul:6;
+ uint64_t qlm3_spd:4;
+ uint64_t qlm4_spd:4;
+ uint64_t reserved_44_63:20;
+#endif
+ } cn68xxp1;
+};
+
+union cvmx_mio_rst_cfg {
+ uint64_t u64;
+ struct cvmx_mio_rst_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t cntl_clr_bist:1;
+ uint64_t warm_clr_bist:1;
+ uint64_t soft_clr_bist:1;
+#else
+ uint64_t soft_clr_bist:1;
+ uint64_t warm_clr_bist:1;
+ uint64_t cntl_clr_bist:1;
+ uint64_t reserved_3_63:61;
+#endif
+ } s;
+ struct cvmx_mio_rst_cfg_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bist_delay:58;
+ uint64_t reserved_3_5:3;
+ uint64_t cntl_clr_bist:1;
+ uint64_t warm_clr_bist:1;
+ uint64_t soft_clr_bist:1;
+#else
+ uint64_t soft_clr_bist:1;
+ uint64_t warm_clr_bist:1;
+ uint64_t cntl_clr_bist:1;
+ uint64_t reserved_3_5:3;
+ uint64_t bist_delay:58;
+#endif
+ } cn61xx;
+ struct cvmx_mio_rst_cfg_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bist_delay:58;
+ uint64_t reserved_2_5:4;
+ uint64_t warm_clr_bist:1;
+ uint64_t soft_clr_bist:1;
+#else
+ uint64_t soft_clr_bist:1;
+ uint64_t warm_clr_bist:1;
+ uint64_t reserved_2_5:4;
+ uint64_t bist_delay:58;
+#endif
+ } cn63xxp1;
+ struct cvmx_mio_rst_cfg_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bist_delay:56;
+ uint64_t reserved_3_7:5;
+ uint64_t cntl_clr_bist:1;
+ uint64_t warm_clr_bist:1;
+ uint64_t soft_clr_bist:1;
+#else
+ uint64_t soft_clr_bist:1;
+ uint64_t warm_clr_bist:1;
+ uint64_t cntl_clr_bist:1;
+ uint64_t reserved_3_7:5;
+ uint64_t bist_delay:56;
+#endif
+ } cn68xx;
+};
+
+union cvmx_mio_rst_ckill {
+ uint64_t u64;
+ struct cvmx_mio_rst_ckill_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63:17;
+ uint64_t timer:47;
+#else
+ uint64_t timer:47;
+ uint64_t reserved_47_63:17;
+#endif
+ } s;
+};
+
+union cvmx_mio_rst_cntlx {
+ uint64_t u64;
+ struct cvmx_mio_rst_cntlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t in_rev_ln:1;
+ uint64_t rev_lanes:1;
+ uint64_t gen1_only:1;
+ uint64_t prst_link:1;
+ uint64_t rst_done:1;
+ uint64_t rst_link:1;
+ uint64_t host_mode:1;
+ uint64_t prtmode:2;
+ uint64_t rst_drv:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_val:1;
+#else
+ uint64_t rst_val:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_drv:1;
+ uint64_t prtmode:2;
+ uint64_t host_mode:1;
+ uint64_t rst_link:1;
+ uint64_t rst_done:1;
+ uint64_t prst_link:1;
+ uint64_t gen1_only:1;
+ uint64_t rev_lanes:1;
+ uint64_t in_rev_ln:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+ struct cvmx_mio_rst_cntlx_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t prst_link:1;
+ uint64_t rst_done:1;
+ uint64_t rst_link:1;
+ uint64_t host_mode:1;
+ uint64_t prtmode:2;
+ uint64_t rst_drv:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_val:1;
+#else
+ uint64_t rst_val:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_drv:1;
+ uint64_t prtmode:2;
+ uint64_t host_mode:1;
+ uint64_t rst_link:1;
+ uint64_t rst_done:1;
+ uint64_t prst_link:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn66xx;
+};
+
+union cvmx_mio_rst_ctlx {
+ uint64_t u64;
+ struct cvmx_mio_rst_ctlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t in_rev_ln:1;
+ uint64_t rev_lanes:1;
+ uint64_t gen1_only:1;
+ uint64_t prst_link:1;
+ uint64_t rst_done:1;
+ uint64_t rst_link:1;
+ uint64_t host_mode:1;
+ uint64_t prtmode:2;
+ uint64_t rst_drv:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_val:1;
+#else
+ uint64_t rst_val:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_drv:1;
+ uint64_t prtmode:2;
+ uint64_t host_mode:1;
+ uint64_t rst_link:1;
+ uint64_t rst_done:1;
+ uint64_t prst_link:1;
+ uint64_t gen1_only:1;
+ uint64_t rev_lanes:1;
+ uint64_t in_rev_ln:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+ struct cvmx_mio_rst_ctlx_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t prst_link:1;
+ uint64_t rst_done:1;
+ uint64_t rst_link:1;
+ uint64_t host_mode:1;
+ uint64_t prtmode:2;
+ uint64_t rst_drv:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_val:1;
+#else
+ uint64_t rst_val:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_drv:1;
+ uint64_t prtmode:2;
+ uint64_t host_mode:1;
+ uint64_t rst_link:1;
+ uint64_t rst_done:1;
+ uint64_t prst_link:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn63xx;
+ struct cvmx_mio_rst_ctlx_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t rst_done:1;
+ uint64_t rst_link:1;
+ uint64_t host_mode:1;
+ uint64_t prtmode:2;
+ uint64_t rst_drv:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_val:1;
+#else
+ uint64_t rst_val:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_drv:1;
+ uint64_t prtmode:2;
+ uint64_t host_mode:1;
+ uint64_t rst_link:1;
+ uint64_t rst_done:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn63xxp1;
+};
+
+union cvmx_mio_rst_delay {
+ uint64_t u64;
+ struct cvmx_mio_rst_delay_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t warm_rst_dly:16;
+ uint64_t soft_rst_dly:16;
+#else
+ uint64_t soft_rst_dly:16;
+ uint64_t warm_rst_dly:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_mio_rst_int {
+ uint64_t u64;
+ struct cvmx_mio_rst_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t perst1:1;
+ uint64_t perst0:1;
+ uint64_t reserved_4_7:4;
+ uint64_t rst_link3:1;
+ uint64_t rst_link2:1;
+ uint64_t rst_link1:1;
+ uint64_t rst_link0:1;
+#else
+ uint64_t rst_link0:1;
+ uint64_t rst_link1:1;
+ uint64_t rst_link2:1;
+ uint64_t rst_link3:1;
+ uint64_t reserved_4_7:4;
+ uint64_t perst0:1;
+ uint64_t perst1:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+ struct cvmx_mio_rst_int_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t perst1:1;
+ uint64_t perst0:1;
+ uint64_t reserved_2_7:6;
+ uint64_t rst_link1:1;
+ uint64_t rst_link0:1;
+#else
+ uint64_t rst_link0:1;
+ uint64_t rst_link1:1;
+ uint64_t reserved_2_7:6;
+ uint64_t perst0:1;
+ uint64_t perst1:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn61xx;
+};
+
+union cvmx_mio_rst_int_en {
+ uint64_t u64;
+ struct cvmx_mio_rst_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t perst1:1;
+ uint64_t perst0:1;
+ uint64_t reserved_4_7:4;
+ uint64_t rst_link3:1;
+ uint64_t rst_link2:1;
+ uint64_t rst_link1:1;
+ uint64_t rst_link0:1;
+#else
+ uint64_t rst_link0:1;
+ uint64_t rst_link1:1;
+ uint64_t rst_link2:1;
+ uint64_t rst_link3:1;
+ uint64_t reserved_4_7:4;
+ uint64_t perst0:1;
+ uint64_t perst1:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+ struct cvmx_mio_rst_int_en_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t perst1:1;
+ uint64_t perst0:1;
+ uint64_t reserved_2_7:6;
+ uint64_t rst_link1:1;
+ uint64_t rst_link0:1;
+#else
+ uint64_t rst_link0:1;
+ uint64_t rst_link1:1;
+ uint64_t reserved_2_7:6;
+ uint64_t perst0:1;
+ uint64_t perst1:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn61xx;
+};
+
+union cvmx_mio_twsx_int {
+ uint64_t u64;
+ struct cvmx_mio_twsx_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t scl:1;
+ uint64_t sda:1;
+ uint64_t scl_ovr:1;
+ uint64_t sda_ovr:1;
+ uint64_t reserved_7_7:1;
+ uint64_t core_en:1;
+ uint64_t ts_en:1;
+ uint64_t st_en:1;
+ uint64_t reserved_3_3:1;
+ uint64_t core_int:1;
+ uint64_t ts_int:1;
+ uint64_t st_int:1;
+#else
+ uint64_t st_int:1;
+ uint64_t ts_int:1;
+ uint64_t core_int:1;
+ uint64_t reserved_3_3:1;
+ uint64_t st_en:1;
+ uint64_t ts_en:1;
+ uint64_t core_en:1;
+ uint64_t reserved_7_7:1;
+ uint64_t sda_ovr:1;
+ uint64_t scl_ovr:1;
+ uint64_t sda:1;
+ uint64_t scl:1;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+ struct cvmx_mio_twsx_int_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t core_en:1;
+ uint64_t ts_en:1;
+ uint64_t st_en:1;
+ uint64_t reserved_3_3:1;
+ uint64_t core_int:1;
+ uint64_t ts_int:1;
+ uint64_t st_int:1;
+#else
+ uint64_t st_int:1;
+ uint64_t ts_int:1;
+ uint64_t core_int:1;
+ uint64_t reserved_3_3:1;
+ uint64_t st_en:1;
+ uint64_t ts_en:1;
+ uint64_t core_en:1;
+ uint64_t reserved_7_63:57;
+#endif
+ } cn38xxp2;
+};
+
+union cvmx_mio_twsx_sw_twsi {
+ uint64_t u64;
+ struct cvmx_mio_twsx_sw_twsi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t v:1;
+ uint64_t slonly:1;
+ uint64_t eia:1;
+ uint64_t op:4;
+ uint64_t r:1;
+ uint64_t sovr:1;
+ uint64_t size:3;
+ uint64_t scr:2;
+ uint64_t a:10;
+ uint64_t ia:5;
+ uint64_t eop_ia:3;
+ uint64_t d:32;
+#else
+ uint64_t d:32;
+ uint64_t eop_ia:3;
+ uint64_t ia:5;
+ uint64_t a:10;
+ uint64_t scr:2;
+ uint64_t size:3;
+ uint64_t sovr:1;
+ uint64_t r:1;
+ uint64_t op:4;
+ uint64_t eia:1;
+ uint64_t slonly:1;
+ uint64_t v:1;
+#endif
+ } s;
+};
+
+union cvmx_mio_twsx_sw_twsi_ext {
+ uint64_t u64;
+ struct cvmx_mio_twsx_sw_twsi_ext_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t ia:8;
+ uint64_t d:32;
+#else
+ uint64_t d:32;
+ uint64_t ia:8;
+ uint64_t reserved_40_63:24;
+#endif
+ } s;
+};
+
+union cvmx_mio_twsx_twsi_sw {
+ uint64_t u64;
+ struct cvmx_mio_twsx_twsi_sw_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t v:2;
+ uint64_t reserved_32_61:30;
+ uint64_t d:32;
+#else
+ uint64_t d:32;
+ uint64_t reserved_32_61:30;
+ uint64_t v:2;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_dlh {
+ uint64_t u64;
+ struct cvmx_mio_uartx_dlh_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t dlh:8;
+#else
+ uint64_t dlh:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_dll {
+ uint64_t u64;
+ struct cvmx_mio_uartx_dll_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t dll:8;
+#else
+ uint64_t dll:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_far {
+ uint64_t u64;
+ struct cvmx_mio_uartx_far_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t far:1;
+#else
+ uint64_t far:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_fcr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_fcr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t rxtrig:2;
+ uint64_t txtrig:2;
+ uint64_t reserved_3_3:1;
+ uint64_t txfr:1;
+ uint64_t rxfr:1;
+ uint64_t en:1;
+#else
+ uint64_t en:1;
+ uint64_t rxfr:1;
+ uint64_t txfr:1;
+ uint64_t reserved_3_3:1;
+ uint64_t txtrig:2;
+ uint64_t rxtrig:2;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_htx {
+ uint64_t u64;
+ struct cvmx_mio_uartx_htx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t htx:1;
+#else
+ uint64_t htx:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_ier {
+ uint64_t u64;
+ struct cvmx_mio_uartx_ier_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t ptime:1;
+ uint64_t reserved_4_6:3;
+ uint64_t edssi:1;
+ uint64_t elsi:1;
+ uint64_t etbei:1;
+ uint64_t erbfi:1;
+#else
+ uint64_t erbfi:1;
+ uint64_t etbei:1;
+ uint64_t elsi:1;
+ uint64_t edssi:1;
+ uint64_t reserved_4_6:3;
+ uint64_t ptime:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_iir {
+ uint64_t u64;
+ struct cvmx_mio_uartx_iir_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t fen:2;
+ uint64_t reserved_4_5:2;
+ uint64_t iid:4;
+#else
+ uint64_t iid:4;
+ uint64_t reserved_4_5:2;
+ uint64_t fen:2;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_lcr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_lcr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t dlab:1;
+ uint64_t brk:1;
+ uint64_t reserved_5_5:1;
+ uint64_t eps:1;
+ uint64_t pen:1;
+ uint64_t stop:1;
+ uint64_t cls:2;
+#else
+ uint64_t cls:2;
+ uint64_t stop:1;
+ uint64_t pen:1;
+ uint64_t eps:1;
+ uint64_t reserved_5_5:1;
+ uint64_t brk:1;
+ uint64_t dlab:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_lsr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_lsr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t ferr:1;
+ uint64_t temt:1;
+ uint64_t thre:1;
+ uint64_t bi:1;
+ uint64_t fe:1;
+ uint64_t pe:1;
+ uint64_t oe:1;
+ uint64_t dr:1;
+#else
+ uint64_t dr:1;
+ uint64_t oe:1;
+ uint64_t pe:1;
+ uint64_t fe:1;
+ uint64_t bi:1;
+ uint64_t thre:1;
+ uint64_t temt:1;
+ uint64_t ferr:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_mcr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_mcr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t afce:1;
+ uint64_t loop:1;
+ uint64_t out2:1;
+ uint64_t out1:1;
+ uint64_t rts:1;
+ uint64_t dtr:1;
+#else
+ uint64_t dtr:1;
+ uint64_t rts:1;
+ uint64_t out1:1;
+ uint64_t out2:1;
+ uint64_t loop:1;
+ uint64_t afce:1;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_msr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_msr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t dcd:1;
+ uint64_t ri:1;
+ uint64_t dsr:1;
+ uint64_t cts:1;
+ uint64_t ddcd:1;
+ uint64_t teri:1;
+ uint64_t ddsr:1;
+ uint64_t dcts:1;
+#else
+ uint64_t dcts:1;
+ uint64_t ddsr:1;
+ uint64_t teri:1;
+ uint64_t ddcd:1;
+ uint64_t cts:1;
+ uint64_t dsr:1;
+ uint64_t ri:1;
+ uint64_t dcd:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_rbr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_rbr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t rbr:8;
+#else
+ uint64_t rbr:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_rfl {
+ uint64_t u64;
+ struct cvmx_mio_uartx_rfl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t rfl:7;
+#else
+ uint64_t rfl:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_rfw {
+ uint64_t u64;
+ struct cvmx_mio_uartx_rfw_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t rffe:1;
+ uint64_t rfpe:1;
+ uint64_t rfwd:8;
+#else
+ uint64_t rfwd:8;
+ uint64_t rfpe:1;
+ uint64_t rffe:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_sbcr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_sbcr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t sbcr:1;
+#else
+ uint64_t sbcr:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_scr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_scr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t scr:8;
+#else
+ uint64_t scr:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_sfe {
+ uint64_t u64;
+ struct cvmx_mio_uartx_sfe_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t sfe:1;
+#else
+ uint64_t sfe:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_srr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_srr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t stfr:1;
+ uint64_t srfr:1;
+ uint64_t usr:1;
+#else
+ uint64_t usr:1;
+ uint64_t srfr:1;
+ uint64_t stfr:1;
+ uint64_t reserved_3_63:61;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_srt {
+ uint64_t u64;
+ struct cvmx_mio_uartx_srt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t srt:2;
+#else
+ uint64_t srt:2;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_srts {
+ uint64_t u64;
+ struct cvmx_mio_uartx_srts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t srts:1;
+#else
+ uint64_t srts:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_stt {
+ uint64_t u64;
+ struct cvmx_mio_uartx_stt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t stt:2;
+#else
+ uint64_t stt:2;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_tfl {
+ uint64_t u64;
+ struct cvmx_mio_uartx_tfl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t tfl:7;
+#else
+ uint64_t tfl:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_tfr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_tfr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t tfr:8;
+#else
+ uint64_t tfr:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_thr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_thr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t thr:8;
+#else
+ uint64_t thr:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uartx_usr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_usr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t rff:1;
+ uint64_t rfne:1;
+ uint64_t tfe:1;
+ uint64_t tfnf:1;
+ uint64_t busy:1;
+#else
+ uint64_t busy:1;
+ uint64_t tfnf:1;
+ uint64_t tfe:1;
+ uint64_t rfne:1;
+ uint64_t rff:1;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_dlh {
+ uint64_t u64;
+ struct cvmx_mio_uart2_dlh_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t dlh:8;
+#else
+ uint64_t dlh:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_dll {
+ uint64_t u64;
+ struct cvmx_mio_uart2_dll_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t dll:8;
+#else
+ uint64_t dll:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_far {
+ uint64_t u64;
+ struct cvmx_mio_uart2_far_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t far:1;
+#else
+ uint64_t far:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_fcr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_fcr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t rxtrig:2;
+ uint64_t txtrig:2;
+ uint64_t reserved_3_3:1;
+ uint64_t txfr:1;
+ uint64_t rxfr:1;
+ uint64_t en:1;
+#else
+ uint64_t en:1;
+ uint64_t rxfr:1;
+ uint64_t txfr:1;
+ uint64_t reserved_3_3:1;
+ uint64_t txtrig:2;
+ uint64_t rxtrig:2;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_htx {
+ uint64_t u64;
+ struct cvmx_mio_uart2_htx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t htx:1;
+#else
+ uint64_t htx:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_ier {
+ uint64_t u64;
+ struct cvmx_mio_uart2_ier_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t ptime:1;
+ uint64_t reserved_4_6:3;
+ uint64_t edssi:1;
+ uint64_t elsi:1;
+ uint64_t etbei:1;
+ uint64_t erbfi:1;
+#else
+ uint64_t erbfi:1;
+ uint64_t etbei:1;
+ uint64_t elsi:1;
+ uint64_t edssi:1;
+ uint64_t reserved_4_6:3;
+ uint64_t ptime:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_iir {
+ uint64_t u64;
+ struct cvmx_mio_uart2_iir_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t fen:2;
+ uint64_t reserved_4_5:2;
+ uint64_t iid:4;
+#else
+ uint64_t iid:4;
+ uint64_t reserved_4_5:2;
+ uint64_t fen:2;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_lcr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_lcr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t dlab:1;
+ uint64_t brk:1;
+ uint64_t reserved_5_5:1;
+ uint64_t eps:1;
+ uint64_t pen:1;
+ uint64_t stop:1;
+ uint64_t cls:2;
+#else
+ uint64_t cls:2;
+ uint64_t stop:1;
+ uint64_t pen:1;
+ uint64_t eps:1;
+ uint64_t reserved_5_5:1;
+ uint64_t brk:1;
+ uint64_t dlab:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_lsr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_lsr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t ferr:1;
+ uint64_t temt:1;
+ uint64_t thre:1;
+ uint64_t bi:1;
+ uint64_t fe:1;
+ uint64_t pe:1;
+ uint64_t oe:1;
+ uint64_t dr:1;
+#else
+ uint64_t dr:1;
+ uint64_t oe:1;
+ uint64_t pe:1;
+ uint64_t fe:1;
+ uint64_t bi:1;
+ uint64_t thre:1;
+ uint64_t temt:1;
+ uint64_t ferr:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_mcr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_mcr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t afce:1;
+ uint64_t loop:1;
+ uint64_t out2:1;
+ uint64_t out1:1;
+ uint64_t rts:1;
+ uint64_t dtr:1;
+#else
+ uint64_t dtr:1;
+ uint64_t rts:1;
+ uint64_t out1:1;
+ uint64_t out2:1;
+ uint64_t loop:1;
+ uint64_t afce:1;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_msr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_msr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t dcd:1;
+ uint64_t ri:1;
+ uint64_t dsr:1;
+ uint64_t cts:1;
+ uint64_t ddcd:1;
+ uint64_t teri:1;
+ uint64_t ddsr:1;
+ uint64_t dcts:1;
+#else
+ uint64_t dcts:1;
+ uint64_t ddsr:1;
+ uint64_t teri:1;
+ uint64_t ddcd:1;
+ uint64_t cts:1;
+ uint64_t dsr:1;
+ uint64_t ri:1;
+ uint64_t dcd:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_rbr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_rbr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t rbr:8;
+#else
+ uint64_t rbr:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_rfl {
+ uint64_t u64;
+ struct cvmx_mio_uart2_rfl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t rfl:7;
+#else
+ uint64_t rfl:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_rfw {
+ uint64_t u64;
+ struct cvmx_mio_uart2_rfw_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t rffe:1;
+ uint64_t rfpe:1;
+ uint64_t rfwd:8;
+#else
+ uint64_t rfwd:8;
+ uint64_t rfpe:1;
+ uint64_t rffe:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_sbcr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_sbcr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t sbcr:1;
+#else
+ uint64_t sbcr:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_scr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_scr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t scr:8;
+#else
+ uint64_t scr:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_sfe {
+ uint64_t u64;
+ struct cvmx_mio_uart2_sfe_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t sfe:1;
+#else
+ uint64_t sfe:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_srr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_srr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t stfr:1;
+ uint64_t srfr:1;
+ uint64_t usr:1;
+#else
+ uint64_t usr:1;
+ uint64_t srfr:1;
+ uint64_t stfr:1;
+ uint64_t reserved_3_63:61;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_srt {
+ uint64_t u64;
+ struct cvmx_mio_uart2_srt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t srt:2;
+#else
+ uint64_t srt:2;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_srts {
+ uint64_t u64;
+ struct cvmx_mio_uart2_srts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t srts:1;
+#else
+ uint64_t srts:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_stt {
+ uint64_t u64;
+ struct cvmx_mio_uart2_stt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t stt:2;
+#else
+ uint64_t stt:2;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_tfl {
+ uint64_t u64;
+ struct cvmx_mio_uart2_tfl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t tfl:7;
+#else
+ uint64_t tfl:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_tfr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_tfr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t tfr:8;
+#else
+ uint64_t tfr:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_thr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_thr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t thr:8;
+#else
+ uint64_t thr:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_mio_uart2_usr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_usr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t rff:1;
+ uint64_t rfne:1;
+ uint64_t tfe:1;
+ uint64_t tfnf:1;
+ uint64_t busy:1;
+#else
+ uint64_t busy:1;
+ uint64_t tfnf:1;
+ uint64_t tfe:1;
+ uint64_t rfne:1;
+ uint64_t rff:1;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-mixx-defs.h b/arch/mips/include/asm/octeon/cvmx-mixx-defs.h
new file mode 100644
index 000000000..cd60d43e8
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-mixx-defs.h
@@ -0,0 +1,430 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_MIXX_DEFS_H__
+#define __CVMX_MIXX_DEFS_H__
+
+#define CVMX_MIXX_BIST(offset) (CVMX_ADD_IO_SEG(0x0001070000100078ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_CTL(offset) (CVMX_ADD_IO_SEG(0x0001070000100020ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_INTENA(offset) (CVMX_ADD_IO_SEG(0x0001070000100050ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_IRCNT(offset) (CVMX_ADD_IO_SEG(0x0001070000100030ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_IRHWM(offset) (CVMX_ADD_IO_SEG(0x0001070000100028ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_IRING1(offset) (CVMX_ADD_IO_SEG(0x0001070000100010ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_IRING2(offset) (CVMX_ADD_IO_SEG(0x0001070000100018ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_ISR(offset) (CVMX_ADD_IO_SEG(0x0001070000100048ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_ORCNT(offset) (CVMX_ADD_IO_SEG(0x0001070000100040ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_ORHWM(offset) (CVMX_ADD_IO_SEG(0x0001070000100038ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_ORING1(offset) (CVMX_ADD_IO_SEG(0x0001070000100000ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_ORING2(offset) (CVMX_ADD_IO_SEG(0x0001070000100008ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_REMCNT(offset) (CVMX_ADD_IO_SEG(0x0001070000100058ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_TSCTL(offset) (CVMX_ADD_IO_SEG(0x0001070000100068ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_TSTAMP(offset) (CVMX_ADD_IO_SEG(0x0001070000100060ull) + ((offset) & 1) * 2048)
+
+union cvmx_mixx_bist {
+ uint64_t u64;
+ struct cvmx_mixx_bist_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t opfdat:1;
+ uint64_t mrgdat:1;
+ uint64_t mrqdat:1;
+ uint64_t ipfdat:1;
+ uint64_t irfdat:1;
+ uint64_t orfdat:1;
+#else
+ uint64_t orfdat:1;
+ uint64_t irfdat:1;
+ uint64_t ipfdat:1;
+ uint64_t mrqdat:1;
+ uint64_t mrgdat:1;
+ uint64_t opfdat:1;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+ struct cvmx_mixx_bist_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t mrqdat:1;
+ uint64_t ipfdat:1;
+ uint64_t irfdat:1;
+ uint64_t orfdat:1;
+#else
+ uint64_t orfdat:1;
+ uint64_t irfdat:1;
+ uint64_t ipfdat:1;
+ uint64_t mrqdat:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } cn52xx;
+};
+
+union cvmx_mixx_ctl {
+ uint64_t u64;
+ struct cvmx_mixx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t ts_thresh:4;
+ uint64_t crc_strip:1;
+ uint64_t busy:1;
+ uint64_t en:1;
+ uint64_t reset:1;
+ uint64_t lendian:1;
+ uint64_t nbtarb:1;
+ uint64_t mrq_hwm:2;
+#else
+ uint64_t mrq_hwm:2;
+ uint64_t nbtarb:1;
+ uint64_t lendian:1;
+ uint64_t reset:1;
+ uint64_t en:1;
+ uint64_t busy:1;
+ uint64_t crc_strip:1;
+ uint64_t ts_thresh:4;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+ struct cvmx_mixx_ctl_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t crc_strip:1;
+ uint64_t busy:1;
+ uint64_t en:1;
+ uint64_t reset:1;
+ uint64_t lendian:1;
+ uint64_t nbtarb:1;
+ uint64_t mrq_hwm:2;
+#else
+ uint64_t mrq_hwm:2;
+ uint64_t nbtarb:1;
+ uint64_t lendian:1;
+ uint64_t reset:1;
+ uint64_t en:1;
+ uint64_t busy:1;
+ uint64_t crc_strip:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } cn52xx;
+};
+
+union cvmx_mixx_intena {
+ uint64_t u64;
+ struct cvmx_mixx_intena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t tsena:1;
+ uint64_t orunena:1;
+ uint64_t irunena:1;
+ uint64_t data_drpena:1;
+ uint64_t ithena:1;
+ uint64_t othena:1;
+ uint64_t ivfena:1;
+ uint64_t ovfena:1;
+#else
+ uint64_t ovfena:1;
+ uint64_t ivfena:1;
+ uint64_t othena:1;
+ uint64_t ithena:1;
+ uint64_t data_drpena:1;
+ uint64_t irunena:1;
+ uint64_t orunena:1;
+ uint64_t tsena:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+ struct cvmx_mixx_intena_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t orunena:1;
+ uint64_t irunena:1;
+ uint64_t data_drpena:1;
+ uint64_t ithena:1;
+ uint64_t othena:1;
+ uint64_t ivfena:1;
+ uint64_t ovfena:1;
+#else
+ uint64_t ovfena:1;
+ uint64_t ivfena:1;
+ uint64_t othena:1;
+ uint64_t ithena:1;
+ uint64_t data_drpena:1;
+ uint64_t irunena:1;
+ uint64_t orunena:1;
+ uint64_t reserved_7_63:57;
+#endif
+ } cn52xx;
+};
+
+union cvmx_mixx_ircnt {
+ uint64_t u64;
+ struct cvmx_mixx_ircnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t ircnt:20;
+#else
+ uint64_t ircnt:20;
+ uint64_t reserved_20_63:44;
+#endif
+ } s;
+};
+
+union cvmx_mixx_irhwm {
+ uint64_t u64;
+ struct cvmx_mixx_irhwm_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t ibplwm:20;
+ uint64_t irhwm:20;
+#else
+ uint64_t irhwm:20;
+ uint64_t ibplwm:20;
+ uint64_t reserved_40_63:24;
+#endif
+ } s;
+};
+
+union cvmx_mixx_iring1 {
+ uint64_t u64;
+ struct cvmx_mixx_iring1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63:4;
+ uint64_t isize:20;
+ uint64_t ibase:37;
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t ibase:37;
+ uint64_t isize:20;
+ uint64_t reserved_60_63:4;
+#endif
+ } s;
+ struct cvmx_mixx_iring1_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63:4;
+ uint64_t isize:20;
+ uint64_t reserved_36_39:4;
+ uint64_t ibase:33;
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t ibase:33;
+ uint64_t reserved_36_39:4;
+ uint64_t isize:20;
+ uint64_t reserved_60_63:4;
+#endif
+ } cn52xx;
+};
+
+union cvmx_mixx_iring2 {
+ uint64_t u64;
+ struct cvmx_mixx_iring2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63:12;
+ uint64_t itlptr:20;
+ uint64_t reserved_20_31:12;
+ uint64_t idbell:20;
+#else
+ uint64_t idbell:20;
+ uint64_t reserved_20_31:12;
+ uint64_t itlptr:20;
+ uint64_t reserved_52_63:12;
+#endif
+ } s;
+};
+
+union cvmx_mixx_isr {
+ uint64_t u64;
+ struct cvmx_mixx_isr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t ts:1;
+ uint64_t orun:1;
+ uint64_t irun:1;
+ uint64_t data_drp:1;
+ uint64_t irthresh:1;
+ uint64_t orthresh:1;
+ uint64_t idblovf:1;
+ uint64_t odblovf:1;
+#else
+ uint64_t odblovf:1;
+ uint64_t idblovf:1;
+ uint64_t orthresh:1;
+ uint64_t irthresh:1;
+ uint64_t data_drp:1;
+ uint64_t irun:1;
+ uint64_t orun:1;
+ uint64_t ts:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+ struct cvmx_mixx_isr_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t orun:1;
+ uint64_t irun:1;
+ uint64_t data_drp:1;
+ uint64_t irthresh:1;
+ uint64_t orthresh:1;
+ uint64_t idblovf:1;
+ uint64_t odblovf:1;
+#else
+ uint64_t odblovf:1;
+ uint64_t idblovf:1;
+ uint64_t orthresh:1;
+ uint64_t irthresh:1;
+ uint64_t data_drp:1;
+ uint64_t irun:1;
+ uint64_t orun:1;
+ uint64_t reserved_7_63:57;
+#endif
+ } cn52xx;
+};
+
+union cvmx_mixx_orcnt {
+ uint64_t u64;
+ struct cvmx_mixx_orcnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t orcnt:20;
+#else
+ uint64_t orcnt:20;
+ uint64_t reserved_20_63:44;
+#endif
+ } s;
+};
+
+union cvmx_mixx_orhwm {
+ uint64_t u64;
+ struct cvmx_mixx_orhwm_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t orhwm:20;
+#else
+ uint64_t orhwm:20;
+ uint64_t reserved_20_63:44;
+#endif
+ } s;
+};
+
+union cvmx_mixx_oring1 {
+ uint64_t u64;
+ struct cvmx_mixx_oring1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63:4;
+ uint64_t osize:20;
+ uint64_t obase:37;
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t obase:37;
+ uint64_t osize:20;
+ uint64_t reserved_60_63:4;
+#endif
+ } s;
+ struct cvmx_mixx_oring1_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63:4;
+ uint64_t osize:20;
+ uint64_t reserved_36_39:4;
+ uint64_t obase:33;
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t obase:33;
+ uint64_t reserved_36_39:4;
+ uint64_t osize:20;
+ uint64_t reserved_60_63:4;
+#endif
+ } cn52xx;
+};
+
+union cvmx_mixx_oring2 {
+ uint64_t u64;
+ struct cvmx_mixx_oring2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63:12;
+ uint64_t otlptr:20;
+ uint64_t reserved_20_31:12;
+ uint64_t odbell:20;
+#else
+ uint64_t odbell:20;
+ uint64_t reserved_20_31:12;
+ uint64_t otlptr:20;
+ uint64_t reserved_52_63:12;
+#endif
+ } s;
+};
+
+union cvmx_mixx_remcnt {
+ uint64_t u64;
+ struct cvmx_mixx_remcnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63:12;
+ uint64_t iremcnt:20;
+ uint64_t reserved_20_31:12;
+ uint64_t oremcnt:20;
+#else
+ uint64_t oremcnt:20;
+ uint64_t reserved_20_31:12;
+ uint64_t iremcnt:20;
+ uint64_t reserved_52_63:12;
+#endif
+ } s;
+};
+
+union cvmx_mixx_tsctl {
+ uint64_t u64;
+ struct cvmx_mixx_tsctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63:43;
+ uint64_t tsavl:5;
+ uint64_t reserved_13_15:3;
+ uint64_t tstot:5;
+ uint64_t reserved_5_7:3;
+ uint64_t tscnt:5;
+#else
+ uint64_t tscnt:5;
+ uint64_t reserved_5_7:3;
+ uint64_t tstot:5;
+ uint64_t reserved_13_15:3;
+ uint64_t tsavl:5;
+ uint64_t reserved_21_63:43;
+#endif
+ } s;
+};
+
+union cvmx_mixx_tstamp {
+ uint64_t u64;
+ struct cvmx_mixx_tstamp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t tstamp:64;
+#else
+ uint64_t tstamp:64;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-npei-defs.h b/arch/mips/include/asm/octeon/cvmx-npei-defs.h
new file mode 100644
index 000000000..6a51b1ef8
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-npei-defs.h
@@ -0,0 +1,3925 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_NPEI_DEFS_H__
+#define __CVMX_NPEI_DEFS_H__
+
+#define CVMX_NPEI_BAR1_INDEXX(offset) (0x0000000000000000ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_BIST_STATUS (0x0000000000000580ull)
+#define CVMX_NPEI_BIST_STATUS2 (0x0000000000000680ull)
+#define CVMX_NPEI_CTL_PORT0 (0x0000000000000250ull)
+#define CVMX_NPEI_CTL_PORT1 (0x0000000000000260ull)
+#define CVMX_NPEI_CTL_STATUS (0x0000000000000570ull)
+#define CVMX_NPEI_CTL_STATUS2 (0x0000000000003C00ull)
+#define CVMX_NPEI_DATA_OUT_CNT (0x00000000000005F0ull)
+#define CVMX_NPEI_DBG_DATA (0x0000000000000510ull)
+#define CVMX_NPEI_DBG_SELECT (0x0000000000000500ull)
+#define CVMX_NPEI_DMA0_INT_LEVEL (0x00000000000005C0ull)
+#define CVMX_NPEI_DMA1_INT_LEVEL (0x00000000000005D0ull)
+#define CVMX_NPEI_DMAX_COUNTS(offset) (0x0000000000000450ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMAX_DBELL(offset) (0x00000000000003B0ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMAX_IBUFF_SADDR(offset) (0x0000000000000400ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMAX_NADDR(offset) (0x00000000000004A0ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMA_CNTS (0x00000000000005E0ull)
+#define CVMX_NPEI_DMA_CONTROL (0x00000000000003A0ull)
+#define CVMX_NPEI_DMA_PCIE_REQ_NUM (0x00000000000005B0ull)
+#define CVMX_NPEI_DMA_STATE1 (0x00000000000006C0ull)
+#define CVMX_NPEI_DMA_STATE1_P1 (0x0000000000000680ull)
+#define CVMX_NPEI_DMA_STATE2 (0x00000000000006D0ull)
+#define CVMX_NPEI_DMA_STATE2_P1 (0x0000000000000690ull)
+#define CVMX_NPEI_DMA_STATE3_P1 (0x00000000000006A0ull)
+#define CVMX_NPEI_DMA_STATE4_P1 (0x00000000000006B0ull)
+#define CVMX_NPEI_DMA_STATE5_P1 (0x00000000000006C0ull)
+#define CVMX_NPEI_INT_A_ENB (0x0000000000000560ull)
+#define CVMX_NPEI_INT_A_ENB2 (0x0000000000003CE0ull)
+#define CVMX_NPEI_INT_A_SUM (0x0000000000000550ull)
+#define CVMX_NPEI_INT_ENB (0x0000000000000540ull)
+#define CVMX_NPEI_INT_ENB2 (0x0000000000003CD0ull)
+#define CVMX_NPEI_INT_INFO (0x0000000000000590ull)
+#define CVMX_NPEI_INT_SUM (0x0000000000000530ull)
+#define CVMX_NPEI_INT_SUM2 (0x0000000000003CC0ull)
+#define CVMX_NPEI_LAST_WIN_RDATA0 (0x0000000000000600ull)
+#define CVMX_NPEI_LAST_WIN_RDATA1 (0x0000000000000610ull)
+#define CVMX_NPEI_MEM_ACCESS_CTL (0x00000000000004F0ull)
+#define CVMX_NPEI_MEM_ACCESS_SUBIDX(offset) (0x0000000000000280ull + ((offset) & 31) * 16 - 16*12)
+#define CVMX_NPEI_MSI_ENB0 (0x0000000000003C50ull)
+#define CVMX_NPEI_MSI_ENB1 (0x0000000000003C60ull)
+#define CVMX_NPEI_MSI_ENB2 (0x0000000000003C70ull)
+#define CVMX_NPEI_MSI_ENB3 (0x0000000000003C80ull)
+#define CVMX_NPEI_MSI_RCV0 (0x0000000000003C10ull)
+#define CVMX_NPEI_MSI_RCV1 (0x0000000000003C20ull)
+#define CVMX_NPEI_MSI_RCV2 (0x0000000000003C30ull)
+#define CVMX_NPEI_MSI_RCV3 (0x0000000000003C40ull)
+#define CVMX_NPEI_MSI_RD_MAP (0x0000000000003CA0ull)
+#define CVMX_NPEI_MSI_W1C_ENB0 (0x0000000000003CF0ull)
+#define CVMX_NPEI_MSI_W1C_ENB1 (0x0000000000003D00ull)
+#define CVMX_NPEI_MSI_W1C_ENB2 (0x0000000000003D10ull)
+#define CVMX_NPEI_MSI_W1C_ENB3 (0x0000000000003D20ull)
+#define CVMX_NPEI_MSI_W1S_ENB0 (0x0000000000003D30ull)
+#define CVMX_NPEI_MSI_W1S_ENB1 (0x0000000000003D40ull)
+#define CVMX_NPEI_MSI_W1S_ENB2 (0x0000000000003D50ull)
+#define CVMX_NPEI_MSI_W1S_ENB3 (0x0000000000003D60ull)
+#define CVMX_NPEI_MSI_WR_MAP (0x0000000000003C90ull)
+#define CVMX_NPEI_PCIE_CREDIT_CNT (0x0000000000003D70ull)
+#define CVMX_NPEI_PCIE_MSI_RCV (0x0000000000003CB0ull)
+#define CVMX_NPEI_PCIE_MSI_RCV_B1 (0x0000000000000650ull)
+#define CVMX_NPEI_PCIE_MSI_RCV_B2 (0x0000000000000660ull)
+#define CVMX_NPEI_PCIE_MSI_RCV_B3 (0x0000000000000670ull)
+#define CVMX_NPEI_PKTX_CNTS(offset) (0x0000000000002400ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_BADDR(offset) (0x0000000000002800ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) (0x0000000000002C00ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) (0x0000000000003000ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_HEADER(offset) (0x0000000000003400ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_IN_BP(offset) (0x0000000000003800ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_SLIST_BADDR(offset) (0x0000000000001400ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) (0x0000000000001800ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) (0x0000000000001C00ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKT_CNT_INT (0x0000000000001110ull)
+#define CVMX_NPEI_PKT_CNT_INT_ENB (0x0000000000001130ull)
+#define CVMX_NPEI_PKT_DATA_OUT_ES (0x00000000000010B0ull)
+#define CVMX_NPEI_PKT_DATA_OUT_NS (0x00000000000010A0ull)
+#define CVMX_NPEI_PKT_DATA_OUT_ROR (0x0000000000001090ull)
+#define CVMX_NPEI_PKT_DPADDR (0x0000000000001080ull)
+#define CVMX_NPEI_PKT_INPUT_CONTROL (0x0000000000001150ull)
+#define CVMX_NPEI_PKT_INSTR_ENB (0x0000000000001000ull)
+#define CVMX_NPEI_PKT_INSTR_RD_SIZE (0x0000000000001190ull)
+#define CVMX_NPEI_PKT_INSTR_SIZE (0x0000000000001020ull)
+#define CVMX_NPEI_PKT_INT_LEVELS (0x0000000000001100ull)
+#define CVMX_NPEI_PKT_IN_BP (0x00000000000006B0ull)
+#define CVMX_NPEI_PKT_IN_DONEX_CNTS(offset) (0x0000000000002000ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKT_IN_INSTR_COUNTS (0x00000000000006A0ull)
+#define CVMX_NPEI_PKT_IN_PCIE_PORT (0x00000000000011A0ull)
+#define CVMX_NPEI_PKT_IPTR (0x0000000000001070ull)
+#define CVMX_NPEI_PKT_OUTPUT_WMARK (0x0000000000001160ull)
+#define CVMX_NPEI_PKT_OUT_BMODE (0x00000000000010D0ull)
+#define CVMX_NPEI_PKT_OUT_ENB (0x0000000000001010ull)
+#define CVMX_NPEI_PKT_PCIE_PORT (0x00000000000010E0ull)
+#define CVMX_NPEI_PKT_PORT_IN_RST (0x0000000000000690ull)
+#define CVMX_NPEI_PKT_SLIST_ES (0x0000000000001050ull)
+#define CVMX_NPEI_PKT_SLIST_ID_SIZE (0x0000000000001180ull)
+#define CVMX_NPEI_PKT_SLIST_NS (0x0000000000001040ull)
+#define CVMX_NPEI_PKT_SLIST_ROR (0x0000000000001030ull)
+#define CVMX_NPEI_PKT_TIME_INT (0x0000000000001120ull)
+#define CVMX_NPEI_PKT_TIME_INT_ENB (0x0000000000001140ull)
+#define CVMX_NPEI_RSL_INT_BLOCKS (0x0000000000000520ull)
+#define CVMX_NPEI_SCRATCH_1 (0x0000000000000270ull)
+#define CVMX_NPEI_STATE1 (0x0000000000000620ull)
+#define CVMX_NPEI_STATE2 (0x0000000000000630ull)
+#define CVMX_NPEI_STATE3 (0x0000000000000640ull)
+#define CVMX_NPEI_WINDOW_CTL (0x0000000000000380ull)
+#define CVMX_NPEI_WIN_RD_ADDR (0x0000000000000210ull)
+#define CVMX_NPEI_WIN_RD_DATA (0x0000000000000240ull)
+#define CVMX_NPEI_WIN_WR_ADDR (0x0000000000000200ull)
+#define CVMX_NPEI_WIN_WR_DATA (0x0000000000000220ull)
+#define CVMX_NPEI_WIN_WR_MASK (0x0000000000000230ull)
+
+union cvmx_npei_bar1_indexx {
+ uint32_t u32;
+ struct cvmx_npei_bar1_indexx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_18_31:14;
+ uint32_t addr_idx:14;
+ uint32_t ca:1;
+ uint32_t end_swp:2;
+ uint32_t addr_v:1;
+#else
+ uint32_t addr_v:1;
+ uint32_t end_swp:2;
+ uint32_t ca:1;
+ uint32_t addr_idx:14;
+ uint32_t reserved_18_31:14;
+#endif
+ } s;
+};
+
+union cvmx_npei_bist_status {
+ uint64_t u64;
+ struct cvmx_npei_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pkt_rdf:1;
+ uint64_t reserved_60_62:3;
+ uint64_t pcr_gim:1;
+ uint64_t pkt_pif:1;
+ uint64_t pcsr_int:1;
+ uint64_t pcsr_im:1;
+ uint64_t pcsr_cnt:1;
+ uint64_t pcsr_id:1;
+ uint64_t pcsr_sl:1;
+ uint64_t reserved_50_52:3;
+ uint64_t pkt_ind:1;
+ uint64_t pkt_slm:1;
+ uint64_t reserved_36_47:12;
+ uint64_t d0_pst:1;
+ uint64_t d1_pst:1;
+ uint64_t d2_pst:1;
+ uint64_t d3_pst:1;
+ uint64_t reserved_31_31:1;
+ uint64_t n2p0_c:1;
+ uint64_t n2p0_o:1;
+ uint64_t n2p1_c:1;
+ uint64_t n2p1_o:1;
+ uint64_t cpl_p0:1;
+ uint64_t cpl_p1:1;
+ uint64_t p2n1_po:1;
+ uint64_t p2n1_no:1;
+ uint64_t p2n1_co:1;
+ uint64_t p2n0_po:1;
+ uint64_t p2n0_no:1;
+ uint64_t p2n0_co:1;
+ uint64_t p2n0_c0:1;
+ uint64_t p2n0_c1:1;
+ uint64_t p2n0_n:1;
+ uint64_t p2n0_p0:1;
+ uint64_t p2n0_p1:1;
+ uint64_t p2n1_c0:1;
+ uint64_t p2n1_c1:1;
+ uint64_t p2n1_n:1;
+ uint64_t p2n1_p0:1;
+ uint64_t p2n1_p1:1;
+ uint64_t csm0:1;
+ uint64_t csm1:1;
+ uint64_t dif0:1;
+ uint64_t dif1:1;
+ uint64_t dif2:1;
+ uint64_t dif3:1;
+ uint64_t reserved_2_2:1;
+ uint64_t msi:1;
+ uint64_t ncb_cmd:1;
+#else
+ uint64_t ncb_cmd:1;
+ uint64_t msi:1;
+ uint64_t reserved_2_2:1;
+ uint64_t dif3:1;
+ uint64_t dif2:1;
+ uint64_t dif1:1;
+ uint64_t dif0:1;
+ uint64_t csm1:1;
+ uint64_t csm0:1;
+ uint64_t p2n1_p1:1;
+ uint64_t p2n1_p0:1;
+ uint64_t p2n1_n:1;
+ uint64_t p2n1_c1:1;
+ uint64_t p2n1_c0:1;
+ uint64_t p2n0_p1:1;
+ uint64_t p2n0_p0:1;
+ uint64_t p2n0_n:1;
+ uint64_t p2n0_c1:1;
+ uint64_t p2n0_c0:1;
+ uint64_t p2n0_co:1;
+ uint64_t p2n0_no:1;
+ uint64_t p2n0_po:1;
+ uint64_t p2n1_co:1;
+ uint64_t p2n1_no:1;
+ uint64_t p2n1_po:1;
+ uint64_t cpl_p1:1;
+ uint64_t cpl_p0:1;
+ uint64_t n2p1_o:1;
+ uint64_t n2p1_c:1;
+ uint64_t n2p0_o:1;
+ uint64_t n2p0_c:1;
+ uint64_t reserved_31_31:1;
+ uint64_t d3_pst:1;
+ uint64_t d2_pst:1;
+ uint64_t d1_pst:1;
+ uint64_t d0_pst:1;
+ uint64_t reserved_36_47:12;
+ uint64_t pkt_slm:1;
+ uint64_t pkt_ind:1;
+ uint64_t reserved_50_52:3;
+ uint64_t pcsr_sl:1;
+ uint64_t pcsr_id:1;
+ uint64_t pcsr_cnt:1;
+ uint64_t pcsr_im:1;
+ uint64_t pcsr_int:1;
+ uint64_t pkt_pif:1;
+ uint64_t pcr_gim:1;
+ uint64_t reserved_60_62:3;
+ uint64_t pkt_rdf:1;
+#endif
+ } s;
+ struct cvmx_npei_bist_status_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pkt_rdf:1;
+ uint64_t reserved_60_62:3;
+ uint64_t pcr_gim:1;
+ uint64_t pkt_pif:1;
+ uint64_t pcsr_int:1;
+ uint64_t pcsr_im:1;
+ uint64_t pcsr_cnt:1;
+ uint64_t pcsr_id:1;
+ uint64_t pcsr_sl:1;
+ uint64_t pkt_imem:1;
+ uint64_t pkt_pfm:1;
+ uint64_t pkt_pof:1;
+ uint64_t reserved_48_49:2;
+ uint64_t pkt_pop0:1;
+ uint64_t pkt_pop1:1;
+ uint64_t d0_mem:1;
+ uint64_t d1_mem:1;
+ uint64_t d2_mem:1;
+ uint64_t d3_mem:1;
+ uint64_t d4_mem:1;
+ uint64_t ds_mem:1;
+ uint64_t reserved_36_39:4;
+ uint64_t d0_pst:1;
+ uint64_t d1_pst:1;
+ uint64_t d2_pst:1;
+ uint64_t d3_pst:1;
+ uint64_t d4_pst:1;
+ uint64_t n2p0_c:1;
+ uint64_t n2p0_o:1;
+ uint64_t n2p1_c:1;
+ uint64_t n2p1_o:1;
+ uint64_t cpl_p0:1;
+ uint64_t cpl_p1:1;
+ uint64_t p2n1_po:1;
+ uint64_t p2n1_no:1;
+ uint64_t p2n1_co:1;
+ uint64_t p2n0_po:1;
+ uint64_t p2n0_no:1;
+ uint64_t p2n0_co:1;
+ uint64_t p2n0_c0:1;
+ uint64_t p2n0_c1:1;
+ uint64_t p2n0_n:1;
+ uint64_t p2n0_p0:1;
+ uint64_t p2n0_p1:1;
+ uint64_t p2n1_c0:1;
+ uint64_t p2n1_c1:1;
+ uint64_t p2n1_n:1;
+ uint64_t p2n1_p0:1;
+ uint64_t p2n1_p1:1;
+ uint64_t csm0:1;
+ uint64_t csm1:1;
+ uint64_t dif0:1;
+ uint64_t dif1:1;
+ uint64_t dif2:1;
+ uint64_t dif3:1;
+ uint64_t dif4:1;
+ uint64_t msi:1;
+ uint64_t ncb_cmd:1;
+#else
+ uint64_t ncb_cmd:1;
+ uint64_t msi:1;
+ uint64_t dif4:1;
+ uint64_t dif3:1;
+ uint64_t dif2:1;
+ uint64_t dif1:1;
+ uint64_t dif0:1;
+ uint64_t csm1:1;
+ uint64_t csm0:1;
+ uint64_t p2n1_p1:1;
+ uint64_t p2n1_p0:1;
+ uint64_t p2n1_n:1;
+ uint64_t p2n1_c1:1;
+ uint64_t p2n1_c0:1;
+ uint64_t p2n0_p1:1;
+ uint64_t p2n0_p0:1;
+ uint64_t p2n0_n:1;
+ uint64_t p2n0_c1:1;
+ uint64_t p2n0_c0:1;
+ uint64_t p2n0_co:1;
+ uint64_t p2n0_no:1;
+ uint64_t p2n0_po:1;
+ uint64_t p2n1_co:1;
+ uint64_t p2n1_no:1;
+ uint64_t p2n1_po:1;
+ uint64_t cpl_p1:1;
+ uint64_t cpl_p0:1;
+ uint64_t n2p1_o:1;
+ uint64_t n2p1_c:1;
+ uint64_t n2p0_o:1;
+ uint64_t n2p0_c:1;
+ uint64_t d4_pst:1;
+ uint64_t d3_pst:1;
+ uint64_t d2_pst:1;
+ uint64_t d1_pst:1;
+ uint64_t d0_pst:1;
+ uint64_t reserved_36_39:4;
+ uint64_t ds_mem:1;
+ uint64_t d4_mem:1;
+ uint64_t d3_mem:1;
+ uint64_t d2_mem:1;
+ uint64_t d1_mem:1;
+ uint64_t d0_mem:1;
+ uint64_t pkt_pop1:1;
+ uint64_t pkt_pop0:1;
+ uint64_t reserved_48_49:2;
+ uint64_t pkt_pof:1;
+ uint64_t pkt_pfm:1;
+ uint64_t pkt_imem:1;
+ uint64_t pcsr_sl:1;
+ uint64_t pcsr_id:1;
+ uint64_t pcsr_cnt:1;
+ uint64_t pcsr_im:1;
+ uint64_t pcsr_int:1;
+ uint64_t pkt_pif:1;
+ uint64_t pcr_gim:1;
+ uint64_t reserved_60_62:3;
+ uint64_t pkt_rdf:1;
+#endif
+ } cn52xx;
+ struct cvmx_npei_bist_status_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63:18;
+ uint64_t d0_mem0:1;
+ uint64_t d1_mem1:1;
+ uint64_t d2_mem2:1;
+ uint64_t d3_mem3:1;
+ uint64_t dr0_mem:1;
+ uint64_t d0_mem:1;
+ uint64_t d1_mem:1;
+ uint64_t d2_mem:1;
+ uint64_t d3_mem:1;
+ uint64_t dr1_mem:1;
+ uint64_t d0_pst:1;
+ uint64_t d1_pst:1;
+ uint64_t d2_pst:1;
+ uint64_t d3_pst:1;
+ uint64_t dr2_mem:1;
+ uint64_t n2p0_c:1;
+ uint64_t n2p0_o:1;
+ uint64_t n2p1_c:1;
+ uint64_t n2p1_o:1;
+ uint64_t cpl_p0:1;
+ uint64_t cpl_p1:1;
+ uint64_t p2n1_po:1;
+ uint64_t p2n1_no:1;
+ uint64_t p2n1_co:1;
+ uint64_t p2n0_po:1;
+ uint64_t p2n0_no:1;
+ uint64_t p2n0_co:1;
+ uint64_t p2n0_c0:1;
+ uint64_t p2n0_c1:1;
+ uint64_t p2n0_n:1;
+ uint64_t p2n0_p0:1;
+ uint64_t p2n0_p1:1;
+ uint64_t p2n1_c0:1;
+ uint64_t p2n1_c1:1;
+ uint64_t p2n1_n:1;
+ uint64_t p2n1_p0:1;
+ uint64_t p2n1_p1:1;
+ uint64_t csm0:1;
+ uint64_t csm1:1;
+ uint64_t dif0:1;
+ uint64_t dif1:1;
+ uint64_t dif2:1;
+ uint64_t dif3:1;
+ uint64_t dr3_mem:1;
+ uint64_t msi:1;
+ uint64_t ncb_cmd:1;
+#else
+ uint64_t ncb_cmd:1;
+ uint64_t msi:1;
+ uint64_t dr3_mem:1;
+ uint64_t dif3:1;
+ uint64_t dif2:1;
+ uint64_t dif1:1;
+ uint64_t dif0:1;
+ uint64_t csm1:1;
+ uint64_t csm0:1;
+ uint64_t p2n1_p1:1;
+ uint64_t p2n1_p0:1;
+ uint64_t p2n1_n:1;
+ uint64_t p2n1_c1:1;
+ uint64_t p2n1_c0:1;
+ uint64_t p2n0_p1:1;
+ uint64_t p2n0_p0:1;
+ uint64_t p2n0_n:1;
+ uint64_t p2n0_c1:1;
+ uint64_t p2n0_c0:1;
+ uint64_t p2n0_co:1;
+ uint64_t p2n0_no:1;
+ uint64_t p2n0_po:1;
+ uint64_t p2n1_co:1;
+ uint64_t p2n1_no:1;
+ uint64_t p2n1_po:1;
+ uint64_t cpl_p1:1;
+ uint64_t cpl_p0:1;
+ uint64_t n2p1_o:1;
+ uint64_t n2p1_c:1;
+ uint64_t n2p0_o:1;
+ uint64_t n2p0_c:1;
+ uint64_t dr2_mem:1;
+ uint64_t d3_pst:1;
+ uint64_t d2_pst:1;
+ uint64_t d1_pst:1;
+ uint64_t d0_pst:1;
+ uint64_t dr1_mem:1;
+ uint64_t d3_mem:1;
+ uint64_t d2_mem:1;
+ uint64_t d1_mem:1;
+ uint64_t d0_mem:1;
+ uint64_t dr0_mem:1;
+ uint64_t d3_mem3:1;
+ uint64_t d2_mem2:1;
+ uint64_t d1_mem1:1;
+ uint64_t d0_mem0:1;
+ uint64_t reserved_46_63:18;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_bist_status_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63:6;
+ uint64_t pcsr_int:1;
+ uint64_t pcsr_im:1;
+ uint64_t pcsr_cnt:1;
+ uint64_t pcsr_id:1;
+ uint64_t pcsr_sl:1;
+ uint64_t pkt_pout:1;
+ uint64_t pkt_imem:1;
+ uint64_t pkt_cntm:1;
+ uint64_t pkt_ind:1;
+ uint64_t pkt_slm:1;
+ uint64_t pkt_odf:1;
+ uint64_t pkt_oif:1;
+ uint64_t pkt_out:1;
+ uint64_t pkt_i0:1;
+ uint64_t pkt_i1:1;
+ uint64_t pkt_s0:1;
+ uint64_t pkt_s1:1;
+ uint64_t d0_mem:1;
+ uint64_t d1_mem:1;
+ uint64_t d2_mem:1;
+ uint64_t d3_mem:1;
+ uint64_t d4_mem:1;
+ uint64_t d0_pst:1;
+ uint64_t d1_pst:1;
+ uint64_t d2_pst:1;
+ uint64_t d3_pst:1;
+ uint64_t d4_pst:1;
+ uint64_t n2p0_c:1;
+ uint64_t n2p0_o:1;
+ uint64_t n2p1_c:1;
+ uint64_t n2p1_o:1;
+ uint64_t cpl_p0:1;
+ uint64_t cpl_p1:1;
+ uint64_t p2n1_po:1;
+ uint64_t p2n1_no:1;
+ uint64_t p2n1_co:1;
+ uint64_t p2n0_po:1;
+ uint64_t p2n0_no:1;
+ uint64_t p2n0_co:1;
+ uint64_t p2n0_c0:1;
+ uint64_t p2n0_c1:1;
+ uint64_t p2n0_n:1;
+ uint64_t p2n0_p0:1;
+ uint64_t p2n0_p1:1;
+ uint64_t p2n1_c0:1;
+ uint64_t p2n1_c1:1;
+ uint64_t p2n1_n:1;
+ uint64_t p2n1_p0:1;
+ uint64_t p2n1_p1:1;
+ uint64_t csm0:1;
+ uint64_t csm1:1;
+ uint64_t dif0:1;
+ uint64_t dif1:1;
+ uint64_t dif2:1;
+ uint64_t dif3:1;
+ uint64_t dif4:1;
+ uint64_t msi:1;
+ uint64_t ncb_cmd:1;
+#else
+ uint64_t ncb_cmd:1;
+ uint64_t msi:1;
+ uint64_t dif4:1;
+ uint64_t dif3:1;
+ uint64_t dif2:1;
+ uint64_t dif1:1;
+ uint64_t dif0:1;
+ uint64_t csm1:1;
+ uint64_t csm0:1;
+ uint64_t p2n1_p1:1;
+ uint64_t p2n1_p0:1;
+ uint64_t p2n1_n:1;
+ uint64_t p2n1_c1:1;
+ uint64_t p2n1_c0:1;
+ uint64_t p2n0_p1:1;
+ uint64_t p2n0_p0:1;
+ uint64_t p2n0_n:1;
+ uint64_t p2n0_c1:1;
+ uint64_t p2n0_c0:1;
+ uint64_t p2n0_co:1;
+ uint64_t p2n0_no:1;
+ uint64_t p2n0_po:1;
+ uint64_t p2n1_co:1;
+ uint64_t p2n1_no:1;
+ uint64_t p2n1_po:1;
+ uint64_t cpl_p1:1;
+ uint64_t cpl_p0:1;
+ uint64_t n2p1_o:1;
+ uint64_t n2p1_c:1;
+ uint64_t n2p0_o:1;
+ uint64_t n2p0_c:1;
+ uint64_t d4_pst:1;
+ uint64_t d3_pst:1;
+ uint64_t d2_pst:1;
+ uint64_t d1_pst:1;
+ uint64_t d0_pst:1;
+ uint64_t d4_mem:1;
+ uint64_t d3_mem:1;
+ uint64_t d2_mem:1;
+ uint64_t d1_mem:1;
+ uint64_t d0_mem:1;
+ uint64_t pkt_s1:1;
+ uint64_t pkt_s0:1;
+ uint64_t pkt_i1:1;
+ uint64_t pkt_i0:1;
+ uint64_t pkt_out:1;
+ uint64_t pkt_oif:1;
+ uint64_t pkt_odf:1;
+ uint64_t pkt_slm:1;
+ uint64_t pkt_ind:1;
+ uint64_t pkt_cntm:1;
+ uint64_t pkt_imem:1;
+ uint64_t pkt_pout:1;
+ uint64_t pcsr_sl:1;
+ uint64_t pcsr_id:1;
+ uint64_t pcsr_cnt:1;
+ uint64_t pcsr_im:1;
+ uint64_t pcsr_int:1;
+ uint64_t reserved_58_63:6;
+#endif
+ } cn56xxp1;
+};
+
+union cvmx_npei_bist_status2 {
+ uint64_t u64;
+ struct cvmx_npei_bist_status2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t prd_tag:1;
+ uint64_t prd_st0:1;
+ uint64_t prd_st1:1;
+ uint64_t prd_err:1;
+ uint64_t nrd_st:1;
+ uint64_t nwe_st:1;
+ uint64_t nwe_wr0:1;
+ uint64_t nwe_wr1:1;
+ uint64_t pkt_rd:1;
+ uint64_t psc_p0:1;
+ uint64_t psc_p1:1;
+ uint64_t pkt_gd:1;
+ uint64_t pkt_gl:1;
+ uint64_t pkt_blk:1;
+#else
+ uint64_t pkt_blk:1;
+ uint64_t pkt_gl:1;
+ uint64_t pkt_gd:1;
+ uint64_t psc_p1:1;
+ uint64_t psc_p0:1;
+ uint64_t pkt_rd:1;
+ uint64_t nwe_wr1:1;
+ uint64_t nwe_wr0:1;
+ uint64_t nwe_st:1;
+ uint64_t nrd_st:1;
+ uint64_t prd_err:1;
+ uint64_t prd_st1:1;
+ uint64_t prd_st0:1;
+ uint64_t prd_tag:1;
+ uint64_t reserved_14_63:50;
+#endif
+ } s;
+};
+
+union cvmx_npei_ctl_port0 {
+ uint64_t u64;
+ struct cvmx_npei_ctl_port0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63:43;
+ uint64_t waitl_com:1;
+ uint64_t intd:1;
+ uint64_t intc:1;
+ uint64_t intb:1;
+ uint64_t inta:1;
+ uint64_t intd_map:2;
+ uint64_t intc_map:2;
+ uint64_t intb_map:2;
+ uint64_t inta_map:2;
+ uint64_t ctlp_ro:1;
+ uint64_t reserved_6_6:1;
+ uint64_t ptlp_ro:1;
+ uint64_t bar2_enb:1;
+ uint64_t bar2_esx:2;
+ uint64_t bar2_cax:1;
+ uint64_t wait_com:1;
+#else
+ uint64_t wait_com:1;
+ uint64_t bar2_cax:1;
+ uint64_t bar2_esx:2;
+ uint64_t bar2_enb:1;
+ uint64_t ptlp_ro:1;
+ uint64_t reserved_6_6:1;
+ uint64_t ctlp_ro:1;
+ uint64_t inta_map:2;
+ uint64_t intb_map:2;
+ uint64_t intc_map:2;
+ uint64_t intd_map:2;
+ uint64_t inta:1;
+ uint64_t intb:1;
+ uint64_t intc:1;
+ uint64_t intd:1;
+ uint64_t waitl_com:1;
+ uint64_t reserved_21_63:43;
+#endif
+ } s;
+};
+
+union cvmx_npei_ctl_port1 {
+ uint64_t u64;
+ struct cvmx_npei_ctl_port1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63:43;
+ uint64_t waitl_com:1;
+ uint64_t intd:1;
+ uint64_t intc:1;
+ uint64_t intb:1;
+ uint64_t inta:1;
+ uint64_t intd_map:2;
+ uint64_t intc_map:2;
+ uint64_t intb_map:2;
+ uint64_t inta_map:2;
+ uint64_t ctlp_ro:1;
+ uint64_t reserved_6_6:1;
+ uint64_t ptlp_ro:1;
+ uint64_t bar2_enb:1;
+ uint64_t bar2_esx:2;
+ uint64_t bar2_cax:1;
+ uint64_t wait_com:1;
+#else
+ uint64_t wait_com:1;
+ uint64_t bar2_cax:1;
+ uint64_t bar2_esx:2;
+ uint64_t bar2_enb:1;
+ uint64_t ptlp_ro:1;
+ uint64_t reserved_6_6:1;
+ uint64_t ctlp_ro:1;
+ uint64_t inta_map:2;
+ uint64_t intb_map:2;
+ uint64_t intc_map:2;
+ uint64_t intd_map:2;
+ uint64_t inta:1;
+ uint64_t intb:1;
+ uint64_t intc:1;
+ uint64_t intd:1;
+ uint64_t waitl_com:1;
+ uint64_t reserved_21_63:43;
+#endif
+ } s;
+};
+
+union cvmx_npei_ctl_status {
+ uint64_t u64;
+ struct cvmx_npei_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t p1_ntags:6;
+ uint64_t p0_ntags:6;
+ uint64_t cfg_rtry:16;
+ uint64_t ring_en:1;
+ uint64_t lnk_rst:1;
+ uint64_t arb:1;
+ uint64_t pkt_bp:4;
+ uint64_t host_mode:1;
+ uint64_t chip_rev:8;
+#else
+ uint64_t chip_rev:8;
+ uint64_t host_mode:1;
+ uint64_t pkt_bp:4;
+ uint64_t arb:1;
+ uint64_t lnk_rst:1;
+ uint64_t ring_en:1;
+ uint64_t cfg_rtry:16;
+ uint64_t p0_ntags:6;
+ uint64_t p1_ntags:6;
+ uint64_t reserved_44_63:20;
+#endif
+ } s;
+ struct cvmx_npei_ctl_status_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t p1_ntags:6;
+ uint64_t p0_ntags:6;
+ uint64_t cfg_rtry:16;
+ uint64_t reserved_15_15:1;
+ uint64_t lnk_rst:1;
+ uint64_t arb:1;
+ uint64_t reserved_9_12:4;
+ uint64_t host_mode:1;
+ uint64_t chip_rev:8;
+#else
+ uint64_t chip_rev:8;
+ uint64_t host_mode:1;
+ uint64_t reserved_9_12:4;
+ uint64_t arb:1;
+ uint64_t lnk_rst:1;
+ uint64_t reserved_15_15:1;
+ uint64_t cfg_rtry:16;
+ uint64_t p0_ntags:6;
+ uint64_t p1_ntags:6;
+ uint64_t reserved_44_63:20;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_ctl_status_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63:49;
+ uint64_t lnk_rst:1;
+ uint64_t arb:1;
+ uint64_t pkt_bp:4;
+ uint64_t host_mode:1;
+ uint64_t chip_rev:8;
+#else
+ uint64_t chip_rev:8;
+ uint64_t host_mode:1;
+ uint64_t pkt_bp:4;
+ uint64_t arb:1;
+ uint64_t lnk_rst:1;
+ uint64_t reserved_15_63:49;
+#endif
+ } cn56xxp1;
+};
+
+union cvmx_npei_ctl_status2 {
+ uint64_t u64;
+ struct cvmx_npei_ctl_status2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t mps:1;
+ uint64_t mrrs:3;
+ uint64_t c1_w_flt:1;
+ uint64_t c0_w_flt:1;
+ uint64_t c1_b1_s:3;
+ uint64_t c0_b1_s:3;
+ uint64_t c1_wi_d:1;
+ uint64_t c1_b0_d:1;
+ uint64_t c0_wi_d:1;
+ uint64_t c0_b0_d:1;
+#else
+ uint64_t c0_b0_d:1;
+ uint64_t c0_wi_d:1;
+ uint64_t c1_b0_d:1;
+ uint64_t c1_wi_d:1;
+ uint64_t c0_b1_s:3;
+ uint64_t c1_b1_s:3;
+ uint64_t c0_w_flt:1;
+ uint64_t c1_w_flt:1;
+ uint64_t mrrs:3;
+ uint64_t mps:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_npei_data_out_cnt {
+ uint64_t u64;
+ struct cvmx_npei_data_out_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t p1_ucnt:16;
+ uint64_t p1_fcnt:6;
+ uint64_t p0_ucnt:16;
+ uint64_t p0_fcnt:6;
+#else
+ uint64_t p0_fcnt:6;
+ uint64_t p0_ucnt:16;
+ uint64_t p1_fcnt:6;
+ uint64_t p1_ucnt:16;
+ uint64_t reserved_44_63:20;
+#endif
+ } s;
+};
+
+union cvmx_npei_dbg_data {
+ uint64_t u64;
+ struct cvmx_npei_dbg_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t qlm0_rev_lanes:1;
+ uint64_t reserved_25_26:2;
+ uint64_t qlm1_spd:2;
+ uint64_t c_mul:5;
+ uint64_t dsel_ext:1;
+ uint64_t data:17;
+#else
+ uint64_t data:17;
+ uint64_t dsel_ext:1;
+ uint64_t c_mul:5;
+ uint64_t qlm1_spd:2;
+ uint64_t reserved_25_26:2;
+ uint64_t qlm0_rev_lanes:1;
+ uint64_t reserved_28_63:36;
+#endif
+ } s;
+ struct cvmx_npei_dbg_data_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t qlm0_link_width:1;
+ uint64_t qlm0_rev_lanes:1;
+ uint64_t qlm1_mode:2;
+ uint64_t qlm1_spd:2;
+ uint64_t c_mul:5;
+ uint64_t dsel_ext:1;
+ uint64_t data:17;
+#else
+ uint64_t data:17;
+ uint64_t dsel_ext:1;
+ uint64_t c_mul:5;
+ uint64_t qlm1_spd:2;
+ uint64_t qlm1_mode:2;
+ uint64_t qlm0_rev_lanes:1;
+ uint64_t qlm0_link_width:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn52xx;
+ struct cvmx_npei_dbg_data_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t qlm2_rev_lanes:1;
+ uint64_t qlm0_rev_lanes:1;
+ uint64_t qlm3_spd:2;
+ uint64_t qlm1_spd:2;
+ uint64_t c_mul:5;
+ uint64_t dsel_ext:1;
+ uint64_t data:17;
+#else
+ uint64_t data:17;
+ uint64_t dsel_ext:1;
+ uint64_t c_mul:5;
+ uint64_t qlm1_spd:2;
+ uint64_t qlm3_spd:2;
+ uint64_t qlm0_rev_lanes:1;
+ uint64_t qlm2_rev_lanes:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn56xx;
+};
+
+union cvmx_npei_dbg_select {
+ uint64_t u64;
+ struct cvmx_npei_dbg_select_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t dbg_sel:16;
+#else
+ uint64_t dbg_sel:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_npei_dmax_counts {
+ uint64_t u64;
+ struct cvmx_npei_dmax_counts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63:25;
+ uint64_t fcnt:7;
+ uint64_t dbell:32;
+#else
+ uint64_t dbell:32;
+ uint64_t fcnt:7;
+ uint64_t reserved_39_63:25;
+#endif
+ } s;
+};
+
+union cvmx_npei_dmax_dbell {
+ uint32_t u32;
+ struct cvmx_npei_dmax_dbell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31:16;
+ uint32_t dbell:16;
+#else
+ uint32_t dbell:16;
+ uint32_t reserved_16_31:16;
+#endif
+ } s;
+};
+
+union cvmx_npei_dmax_ibuff_saddr {
+ uint64_t u64;
+ struct cvmx_npei_dmax_ibuff_saddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63:27;
+ uint64_t idle:1;
+ uint64_t saddr:29;
+ uint64_t reserved_0_6:7;
+#else
+ uint64_t reserved_0_6:7;
+ uint64_t saddr:29;
+ uint64_t idle:1;
+ uint64_t reserved_37_63:27;
+#endif
+ } s;
+ struct cvmx_npei_dmax_ibuff_saddr_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63:28;
+ uint64_t saddr:29;
+ uint64_t reserved_0_6:7;
+#else
+ uint64_t reserved_0_6:7;
+ uint64_t saddr:29;
+ uint64_t reserved_36_63:28;
+#endif
+ } cn52xxp1;
+};
+
+union cvmx_npei_dmax_naddr {
+ uint64_t u64;
+ struct cvmx_npei_dmax_naddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63:28;
+ uint64_t addr:36;
+#else
+ uint64_t addr:36;
+ uint64_t reserved_36_63:28;
+#endif
+ } s;
+};
+
+union cvmx_npei_dma0_int_level {
+ uint64_t u64;
+ struct cvmx_npei_dma0_int_level_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t time:32;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t time:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_dma1_int_level {
+ uint64_t u64;
+ struct cvmx_npei_dma1_int_level_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t time:32;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t time:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_dma_cnts {
+ uint64_t u64;
+ struct cvmx_npei_dma_cnts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dma1:32;
+ uint64_t dma0:32;
+#else
+ uint64_t dma0:32;
+ uint64_t dma1:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_dma_control {
+ uint64_t u64;
+ struct cvmx_npei_dma_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t p_32b_m:1;
+ uint64_t dma4_enb:1;
+ uint64_t dma3_enb:1;
+ uint64_t dma2_enb:1;
+ uint64_t dma1_enb:1;
+ uint64_t dma0_enb:1;
+ uint64_t b0_lend:1;
+ uint64_t dwb_denb:1;
+ uint64_t dwb_ichk:9;
+ uint64_t fpa_que:3;
+ uint64_t o_add1:1;
+ uint64_t o_ro:1;
+ uint64_t o_ns:1;
+ uint64_t o_es:2;
+ uint64_t o_mode:1;
+ uint64_t csize:14;
+#else
+ uint64_t csize:14;
+ uint64_t o_mode:1;
+ uint64_t o_es:2;
+ uint64_t o_ns:1;
+ uint64_t o_ro:1;
+ uint64_t o_add1:1;
+ uint64_t fpa_que:3;
+ uint64_t dwb_ichk:9;
+ uint64_t dwb_denb:1;
+ uint64_t b0_lend:1;
+ uint64_t dma0_enb:1;
+ uint64_t dma1_enb:1;
+ uint64_t dma2_enb:1;
+ uint64_t dma3_enb:1;
+ uint64_t dma4_enb:1;
+ uint64_t p_32b_m:1;
+ uint64_t reserved_40_63:24;
+#endif
+ } s;
+ struct cvmx_npei_dma_control_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63:26;
+ uint64_t dma3_enb:1;
+ uint64_t dma2_enb:1;
+ uint64_t dma1_enb:1;
+ uint64_t dma0_enb:1;
+ uint64_t b0_lend:1;
+ uint64_t dwb_denb:1;
+ uint64_t dwb_ichk:9;
+ uint64_t fpa_que:3;
+ uint64_t o_add1:1;
+ uint64_t o_ro:1;
+ uint64_t o_ns:1;
+ uint64_t o_es:2;
+ uint64_t o_mode:1;
+ uint64_t csize:14;
+#else
+ uint64_t csize:14;
+ uint64_t o_mode:1;
+ uint64_t o_es:2;
+ uint64_t o_ns:1;
+ uint64_t o_ro:1;
+ uint64_t o_add1:1;
+ uint64_t fpa_que:3;
+ uint64_t dwb_ichk:9;
+ uint64_t dwb_denb:1;
+ uint64_t b0_lend:1;
+ uint64_t dma0_enb:1;
+ uint64_t dma1_enb:1;
+ uint64_t dma2_enb:1;
+ uint64_t dma3_enb:1;
+ uint64_t reserved_38_63:26;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_dma_control_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63:25;
+ uint64_t dma4_enb:1;
+ uint64_t dma3_enb:1;
+ uint64_t dma2_enb:1;
+ uint64_t dma1_enb:1;
+ uint64_t dma0_enb:1;
+ uint64_t b0_lend:1;
+ uint64_t dwb_denb:1;
+ uint64_t dwb_ichk:9;
+ uint64_t fpa_que:3;
+ uint64_t o_add1:1;
+ uint64_t o_ro:1;
+ uint64_t o_ns:1;
+ uint64_t o_es:2;
+ uint64_t o_mode:1;
+ uint64_t csize:14;
+#else
+ uint64_t csize:14;
+ uint64_t o_mode:1;
+ uint64_t o_es:2;
+ uint64_t o_ns:1;
+ uint64_t o_ro:1;
+ uint64_t o_add1:1;
+ uint64_t fpa_que:3;
+ uint64_t dwb_ichk:9;
+ uint64_t dwb_denb:1;
+ uint64_t b0_lend:1;
+ uint64_t dma0_enb:1;
+ uint64_t dma1_enb:1;
+ uint64_t dma2_enb:1;
+ uint64_t dma3_enb:1;
+ uint64_t dma4_enb:1;
+ uint64_t reserved_39_63:25;
+#endif
+ } cn56xxp1;
+};
+
+union cvmx_npei_dma_pcie_req_num {
+ uint64_t u64;
+ struct cvmx_npei_dma_pcie_req_num_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dma_arb:1;
+ uint64_t reserved_53_62:10;
+ uint64_t pkt_cnt:5;
+ uint64_t reserved_45_47:3;
+ uint64_t dma4_cnt:5;
+ uint64_t reserved_37_39:3;
+ uint64_t dma3_cnt:5;
+ uint64_t reserved_29_31:3;
+ uint64_t dma2_cnt:5;
+ uint64_t reserved_21_23:3;
+ uint64_t dma1_cnt:5;
+ uint64_t reserved_13_15:3;
+ uint64_t dma0_cnt:5;
+ uint64_t reserved_5_7:3;
+ uint64_t dma_cnt:5;
+#else
+ uint64_t dma_cnt:5;
+ uint64_t reserved_5_7:3;
+ uint64_t dma0_cnt:5;
+ uint64_t reserved_13_15:3;
+ uint64_t dma1_cnt:5;
+ uint64_t reserved_21_23:3;
+ uint64_t dma2_cnt:5;
+ uint64_t reserved_29_31:3;
+ uint64_t dma3_cnt:5;
+ uint64_t reserved_37_39:3;
+ uint64_t dma4_cnt:5;
+ uint64_t reserved_45_47:3;
+ uint64_t pkt_cnt:5;
+ uint64_t reserved_53_62:10;
+ uint64_t dma_arb:1;
+#endif
+ } s;
+};
+
+union cvmx_npei_dma_state1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t d4_dwe:8;
+ uint64_t d3_dwe:8;
+ uint64_t d2_dwe:8;
+ uint64_t d1_dwe:8;
+ uint64_t d0_dwe:8;
+#else
+ uint64_t d0_dwe:8;
+ uint64_t d1_dwe:8;
+ uint64_t d2_dwe:8;
+ uint64_t d3_dwe:8;
+ uint64_t d4_dwe:8;
+ uint64_t reserved_40_63:24;
+#endif
+ } s;
+};
+
+union cvmx_npei_dma_state1_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state1_p1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63:4;
+ uint64_t d0_difst:7;
+ uint64_t d1_difst:7;
+ uint64_t d2_difst:7;
+ uint64_t d3_difst:7;
+ uint64_t d4_difst:7;
+ uint64_t d0_reqst:5;
+ uint64_t d1_reqst:5;
+ uint64_t d2_reqst:5;
+ uint64_t d3_reqst:5;
+ uint64_t d4_reqst:5;
+#else
+ uint64_t d4_reqst:5;
+ uint64_t d3_reqst:5;
+ uint64_t d2_reqst:5;
+ uint64_t d1_reqst:5;
+ uint64_t d0_reqst:5;
+ uint64_t d4_difst:7;
+ uint64_t d3_difst:7;
+ uint64_t d2_difst:7;
+ uint64_t d1_difst:7;
+ uint64_t d0_difst:7;
+ uint64_t reserved_60_63:4;
+#endif
+ } s;
+ struct cvmx_npei_dma_state1_p1_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63:4;
+ uint64_t d0_difst:7;
+ uint64_t d1_difst:7;
+ uint64_t d2_difst:7;
+ uint64_t d3_difst:7;
+ uint64_t reserved_25_31:7;
+ uint64_t d0_reqst:5;
+ uint64_t d1_reqst:5;
+ uint64_t d2_reqst:5;
+ uint64_t d3_reqst:5;
+ uint64_t reserved_0_4:5;
+#else
+ uint64_t reserved_0_4:5;
+ uint64_t d3_reqst:5;
+ uint64_t d2_reqst:5;
+ uint64_t d1_reqst:5;
+ uint64_t d0_reqst:5;
+ uint64_t reserved_25_31:7;
+ uint64_t d3_difst:7;
+ uint64_t d2_difst:7;
+ uint64_t d1_difst:7;
+ uint64_t d0_difst:7;
+ uint64_t reserved_60_63:4;
+#endif
+ } cn52xxp1;
+};
+
+union cvmx_npei_dma_state2 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t ndwe:4;
+ uint64_t reserved_21_23:3;
+ uint64_t ndre:5;
+ uint64_t reserved_10_15:6;
+ uint64_t prd:10;
+#else
+ uint64_t prd:10;
+ uint64_t reserved_10_15:6;
+ uint64_t ndre:5;
+ uint64_t reserved_21_23:3;
+ uint64_t ndwe:4;
+ uint64_t reserved_28_63:36;
+#endif
+ } s;
+};
+
+union cvmx_npei_dma_state2_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state2_p1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_45_63:19;
+ uint64_t d0_dffst:9;
+ uint64_t d1_dffst:9;
+ uint64_t d2_dffst:9;
+ uint64_t d3_dffst:9;
+ uint64_t d4_dffst:9;
+#else
+ uint64_t d4_dffst:9;
+ uint64_t d3_dffst:9;
+ uint64_t d2_dffst:9;
+ uint64_t d1_dffst:9;
+ uint64_t d0_dffst:9;
+ uint64_t reserved_45_63:19;
+#endif
+ } s;
+ struct cvmx_npei_dma_state2_p1_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_45_63:19;
+ uint64_t d0_dffst:9;
+ uint64_t d1_dffst:9;
+ uint64_t d2_dffst:9;
+ uint64_t d3_dffst:9;
+ uint64_t reserved_0_8:9;
+#else
+ uint64_t reserved_0_8:9;
+ uint64_t d3_dffst:9;
+ uint64_t d2_dffst:9;
+ uint64_t d1_dffst:9;
+ uint64_t d0_dffst:9;
+ uint64_t reserved_45_63:19;
+#endif
+ } cn52xxp1;
+};
+
+union cvmx_npei_dma_state3_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state3_p1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63:4;
+ uint64_t d0_drest:15;
+ uint64_t d1_drest:15;
+ uint64_t d2_drest:15;
+ uint64_t d3_drest:15;
+#else
+ uint64_t d3_drest:15;
+ uint64_t d2_drest:15;
+ uint64_t d1_drest:15;
+ uint64_t d0_drest:15;
+ uint64_t reserved_60_63:4;
+#endif
+ } s;
+};
+
+union cvmx_npei_dma_state4_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state4_p1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63:12;
+ uint64_t d0_dwest:13;
+ uint64_t d1_dwest:13;
+ uint64_t d2_dwest:13;
+ uint64_t d3_dwest:13;
+#else
+ uint64_t d3_dwest:13;
+ uint64_t d2_dwest:13;
+ uint64_t d1_dwest:13;
+ uint64_t d0_dwest:13;
+ uint64_t reserved_52_63:12;
+#endif
+ } s;
+};
+
+union cvmx_npei_dma_state5_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state5_p1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t d4_drest:15;
+ uint64_t d4_dwest:13;
+#else
+ uint64_t d4_dwest:13;
+ uint64_t d4_drest:15;
+ uint64_t reserved_28_63:36;
+#endif
+ } s;
+};
+
+union cvmx_npei_int_a_enb {
+ uint64_t u64;
+ struct cvmx_npei_int_a_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t pout_err:1;
+ uint64_t pin_bp:1;
+ uint64_t p1_rdlk:1;
+ uint64_t p0_rdlk:1;
+ uint64_t pgl_err:1;
+ uint64_t pdi_err:1;
+ uint64_t pop_err:1;
+ uint64_t pins_err:1;
+ uint64_t dma1_cpl:1;
+ uint64_t dma0_cpl:1;
+#else
+ uint64_t dma0_cpl:1;
+ uint64_t dma1_cpl:1;
+ uint64_t pins_err:1;
+ uint64_t pop_err:1;
+ uint64_t pdi_err:1;
+ uint64_t pgl_err:1;
+ uint64_t p0_rdlk:1;
+ uint64_t p1_rdlk:1;
+ uint64_t pin_bp:1;
+ uint64_t pout_err:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+ struct cvmx_npei_int_a_enb_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t dma1_cpl:1;
+ uint64_t dma0_cpl:1;
+#else
+ uint64_t dma0_cpl:1;
+ uint64_t dma1_cpl:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } cn52xxp1;
+};
+
+union cvmx_npei_int_a_enb2 {
+ uint64_t u64;
+ struct cvmx_npei_int_a_enb2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t pout_err:1;
+ uint64_t pin_bp:1;
+ uint64_t p1_rdlk:1;
+ uint64_t p0_rdlk:1;
+ uint64_t pgl_err:1;
+ uint64_t pdi_err:1;
+ uint64_t pop_err:1;
+ uint64_t pins_err:1;
+ uint64_t dma1_cpl:1;
+ uint64_t dma0_cpl:1;
+#else
+ uint64_t dma0_cpl:1;
+ uint64_t dma1_cpl:1;
+ uint64_t pins_err:1;
+ uint64_t pop_err:1;
+ uint64_t pdi_err:1;
+ uint64_t pgl_err:1;
+ uint64_t p0_rdlk:1;
+ uint64_t p1_rdlk:1;
+ uint64_t pin_bp:1;
+ uint64_t pout_err:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+ struct cvmx_npei_int_a_enb2_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t dma1_cpl:1;
+ uint64_t dma0_cpl:1;
+#else
+ uint64_t dma0_cpl:1;
+ uint64_t dma1_cpl:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } cn52xxp1;
+};
+
+union cvmx_npei_int_a_sum {
+ uint64_t u64;
+ struct cvmx_npei_int_a_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t pout_err:1;
+ uint64_t pin_bp:1;
+ uint64_t p1_rdlk:1;
+ uint64_t p0_rdlk:1;
+ uint64_t pgl_err:1;
+ uint64_t pdi_err:1;
+ uint64_t pop_err:1;
+ uint64_t pins_err:1;
+ uint64_t dma1_cpl:1;
+ uint64_t dma0_cpl:1;
+#else
+ uint64_t dma0_cpl:1;
+ uint64_t dma1_cpl:1;
+ uint64_t pins_err:1;
+ uint64_t pop_err:1;
+ uint64_t pdi_err:1;
+ uint64_t pgl_err:1;
+ uint64_t p0_rdlk:1;
+ uint64_t p1_rdlk:1;
+ uint64_t pin_bp:1;
+ uint64_t pout_err:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+ struct cvmx_npei_int_a_sum_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t dma1_cpl:1;
+ uint64_t dma0_cpl:1;
+#else
+ uint64_t dma0_cpl:1;
+ uint64_t dma1_cpl:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } cn52xxp1;
+};
+
+union cvmx_npei_int_enb {
+ uint64_t u64;
+ struct cvmx_npei_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mio_inta:1;
+ uint64_t reserved_62_62:1;
+ uint64_t int_a:1;
+ uint64_t c1_ldwn:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c1_hpint:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_wake:1;
+ uint64_t crs1_dr:1;
+ uint64_t c1_se:1;
+ uint64_t crs1_er:1;
+ uint64_t c1_aeri:1;
+ uint64_t c0_hpint:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_wake:1;
+ uint64_t crs0_dr:1;
+ uint64_t c0_se:1;
+ uint64_t crs0_er:1;
+ uint64_t c0_aeri:1;
+ uint64_t ptime:1;
+ uint64_t pcnt:1;
+ uint64_t pidbof:1;
+ uint64_t psldbof:1;
+ uint64_t dtime1:1;
+ uint64_t dtime0:1;
+ uint64_t dcnt1:1;
+ uint64_t dcnt0:1;
+ uint64_t dma1fi:1;
+ uint64_t dma0fi:1;
+ uint64_t dma4dbo:1;
+ uint64_t dma3dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma0dbo:1;
+ uint64_t iob2big:1;
+ uint64_t bar0_to:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t bar0_to:1;
+ uint64_t iob2big:1;
+ uint64_t dma0dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma3dbo:1;
+ uint64_t dma4dbo:1;
+ uint64_t dma0fi:1;
+ uint64_t dma1fi:1;
+ uint64_t dcnt0:1;
+ uint64_t dcnt1:1;
+ uint64_t dtime0:1;
+ uint64_t dtime1:1;
+ uint64_t psldbof:1;
+ uint64_t pidbof:1;
+ uint64_t pcnt:1;
+ uint64_t ptime:1;
+ uint64_t c0_aeri:1;
+ uint64_t crs0_er:1;
+ uint64_t c0_se:1;
+ uint64_t crs0_dr:1;
+ uint64_t c0_wake:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_hpint:1;
+ uint64_t c1_aeri:1;
+ uint64_t crs1_er:1;
+ uint64_t c1_se:1;
+ uint64_t crs1_dr:1;
+ uint64_t c1_wake:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_hpint:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_ldwn:1;
+ uint64_t int_a:1;
+ uint64_t reserved_62_62:1;
+ uint64_t mio_inta:1;
+#endif
+ } s;
+ struct cvmx_npei_int_enb_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mio_inta:1;
+ uint64_t reserved_62_62:1;
+ uint64_t int_a:1;
+ uint64_t c1_ldwn:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c1_hpint:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_wake:1;
+ uint64_t crs1_dr:1;
+ uint64_t c1_se:1;
+ uint64_t crs1_er:1;
+ uint64_t c1_aeri:1;
+ uint64_t c0_hpint:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_wake:1;
+ uint64_t crs0_dr:1;
+ uint64_t c0_se:1;
+ uint64_t crs0_er:1;
+ uint64_t c0_aeri:1;
+ uint64_t ptime:1;
+ uint64_t pcnt:1;
+ uint64_t pidbof:1;
+ uint64_t psldbof:1;
+ uint64_t dtime1:1;
+ uint64_t dtime0:1;
+ uint64_t dcnt1:1;
+ uint64_t dcnt0:1;
+ uint64_t dma1fi:1;
+ uint64_t dma0fi:1;
+ uint64_t reserved_8_8:1;
+ uint64_t dma3dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma0dbo:1;
+ uint64_t iob2big:1;
+ uint64_t bar0_to:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t bar0_to:1;
+ uint64_t iob2big:1;
+ uint64_t dma0dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma3dbo:1;
+ uint64_t reserved_8_8:1;
+ uint64_t dma0fi:1;
+ uint64_t dma1fi:1;
+ uint64_t dcnt0:1;
+ uint64_t dcnt1:1;
+ uint64_t dtime0:1;
+ uint64_t dtime1:1;
+ uint64_t psldbof:1;
+ uint64_t pidbof:1;
+ uint64_t pcnt:1;
+ uint64_t ptime:1;
+ uint64_t c0_aeri:1;
+ uint64_t crs0_er:1;
+ uint64_t c0_se:1;
+ uint64_t crs0_dr:1;
+ uint64_t c0_wake:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_hpint:1;
+ uint64_t c1_aeri:1;
+ uint64_t crs1_er:1;
+ uint64_t c1_se:1;
+ uint64_t crs1_dr:1;
+ uint64_t c1_wake:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_hpint:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_ldwn:1;
+ uint64_t int_a:1;
+ uint64_t reserved_62_62:1;
+ uint64_t mio_inta:1;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_int_enb_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mio_inta:1;
+ uint64_t reserved_61_62:2;
+ uint64_t c1_ldwn:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c1_hpint:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_wake:1;
+ uint64_t reserved_29_29:1;
+ uint64_t c1_se:1;
+ uint64_t reserved_27_27:1;
+ uint64_t c1_aeri:1;
+ uint64_t c0_hpint:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_wake:1;
+ uint64_t reserved_22_22:1;
+ uint64_t c0_se:1;
+ uint64_t reserved_20_20:1;
+ uint64_t c0_aeri:1;
+ uint64_t ptime:1;
+ uint64_t pcnt:1;
+ uint64_t pidbof:1;
+ uint64_t psldbof:1;
+ uint64_t dtime1:1;
+ uint64_t dtime0:1;
+ uint64_t dcnt1:1;
+ uint64_t dcnt0:1;
+ uint64_t dma1fi:1;
+ uint64_t dma0fi:1;
+ uint64_t dma4dbo:1;
+ uint64_t dma3dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma0dbo:1;
+ uint64_t iob2big:1;
+ uint64_t bar0_to:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t bar0_to:1;
+ uint64_t iob2big:1;
+ uint64_t dma0dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma3dbo:1;
+ uint64_t dma4dbo:1;
+ uint64_t dma0fi:1;
+ uint64_t dma1fi:1;
+ uint64_t dcnt0:1;
+ uint64_t dcnt1:1;
+ uint64_t dtime0:1;
+ uint64_t dtime1:1;
+ uint64_t psldbof:1;
+ uint64_t pidbof:1;
+ uint64_t pcnt:1;
+ uint64_t ptime:1;
+ uint64_t c0_aeri:1;
+ uint64_t reserved_20_20:1;
+ uint64_t c0_se:1;
+ uint64_t reserved_22_22:1;
+ uint64_t c0_wake:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_hpint:1;
+ uint64_t c1_aeri:1;
+ uint64_t reserved_27_27:1;
+ uint64_t c1_se:1;
+ uint64_t reserved_29_29:1;
+ uint64_t c1_wake:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_hpint:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_ldwn:1;
+ uint64_t reserved_61_62:2;
+ uint64_t mio_inta:1;
+#endif
+ } cn56xxp1;
+};
+
+union cvmx_npei_int_enb2 {
+ uint64_t u64;
+ struct cvmx_npei_int_enb2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ uint64_t int_a:1;
+ uint64_t c1_ldwn:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c1_hpint:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_wake:1;
+ uint64_t crs1_dr:1;
+ uint64_t c1_se:1;
+ uint64_t crs1_er:1;
+ uint64_t c1_aeri:1;
+ uint64_t c0_hpint:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_wake:1;
+ uint64_t crs0_dr:1;
+ uint64_t c0_se:1;
+ uint64_t crs0_er:1;
+ uint64_t c0_aeri:1;
+ uint64_t ptime:1;
+ uint64_t pcnt:1;
+ uint64_t pidbof:1;
+ uint64_t psldbof:1;
+ uint64_t dtime1:1;
+ uint64_t dtime0:1;
+ uint64_t dcnt1:1;
+ uint64_t dcnt0:1;
+ uint64_t dma1fi:1;
+ uint64_t dma0fi:1;
+ uint64_t dma4dbo:1;
+ uint64_t dma3dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma0dbo:1;
+ uint64_t iob2big:1;
+ uint64_t bar0_to:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t bar0_to:1;
+ uint64_t iob2big:1;
+ uint64_t dma0dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma3dbo:1;
+ uint64_t dma4dbo:1;
+ uint64_t dma0fi:1;
+ uint64_t dma1fi:1;
+ uint64_t dcnt0:1;
+ uint64_t dcnt1:1;
+ uint64_t dtime0:1;
+ uint64_t dtime1:1;
+ uint64_t psldbof:1;
+ uint64_t pidbof:1;
+ uint64_t pcnt:1;
+ uint64_t ptime:1;
+ uint64_t c0_aeri:1;
+ uint64_t crs0_er:1;
+ uint64_t c0_se:1;
+ uint64_t crs0_dr:1;
+ uint64_t c0_wake:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_hpint:1;
+ uint64_t c1_aeri:1;
+ uint64_t crs1_er:1;
+ uint64_t c1_se:1;
+ uint64_t crs1_dr:1;
+ uint64_t c1_wake:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_hpint:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_ldwn:1;
+ uint64_t int_a:1;
+ uint64_t reserved_62_63:2;
+#endif
+ } s;
+ struct cvmx_npei_int_enb2_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ uint64_t int_a:1;
+ uint64_t c1_ldwn:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c1_hpint:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_wake:1;
+ uint64_t crs1_dr:1;
+ uint64_t c1_se:1;
+ uint64_t crs1_er:1;
+ uint64_t c1_aeri:1;
+ uint64_t c0_hpint:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_wake:1;
+ uint64_t crs0_dr:1;
+ uint64_t c0_se:1;
+ uint64_t crs0_er:1;
+ uint64_t c0_aeri:1;
+ uint64_t ptime:1;
+ uint64_t pcnt:1;
+ uint64_t pidbof:1;
+ uint64_t psldbof:1;
+ uint64_t dtime1:1;
+ uint64_t dtime0:1;
+ uint64_t dcnt1:1;
+ uint64_t dcnt0:1;
+ uint64_t dma1fi:1;
+ uint64_t dma0fi:1;
+ uint64_t reserved_8_8:1;
+ uint64_t dma3dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma0dbo:1;
+ uint64_t iob2big:1;
+ uint64_t bar0_to:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t bar0_to:1;
+ uint64_t iob2big:1;
+ uint64_t dma0dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma3dbo:1;
+ uint64_t reserved_8_8:1;
+ uint64_t dma0fi:1;
+ uint64_t dma1fi:1;
+ uint64_t dcnt0:1;
+ uint64_t dcnt1:1;
+ uint64_t dtime0:1;
+ uint64_t dtime1:1;
+ uint64_t psldbof:1;
+ uint64_t pidbof:1;
+ uint64_t pcnt:1;
+ uint64_t ptime:1;
+ uint64_t c0_aeri:1;
+ uint64_t crs0_er:1;
+ uint64_t c0_se:1;
+ uint64_t crs0_dr:1;
+ uint64_t c0_wake:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_hpint:1;
+ uint64_t c1_aeri:1;
+ uint64_t crs1_er:1;
+ uint64_t c1_se:1;
+ uint64_t crs1_dr:1;
+ uint64_t c1_wake:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_hpint:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_ldwn:1;
+ uint64_t int_a:1;
+ uint64_t reserved_62_63:2;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_int_enb2_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63:3;
+ uint64_t c1_ldwn:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c1_hpint:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_wake:1;
+ uint64_t reserved_29_29:1;
+ uint64_t c1_se:1;
+ uint64_t reserved_27_27:1;
+ uint64_t c1_aeri:1;
+ uint64_t c0_hpint:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_wake:1;
+ uint64_t reserved_22_22:1;
+ uint64_t c0_se:1;
+ uint64_t reserved_20_20:1;
+ uint64_t c0_aeri:1;
+ uint64_t ptime:1;
+ uint64_t pcnt:1;
+ uint64_t pidbof:1;
+ uint64_t psldbof:1;
+ uint64_t dtime1:1;
+ uint64_t dtime0:1;
+ uint64_t dcnt1:1;
+ uint64_t dcnt0:1;
+ uint64_t dma1fi:1;
+ uint64_t dma0fi:1;
+ uint64_t dma4dbo:1;
+ uint64_t dma3dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma0dbo:1;
+ uint64_t iob2big:1;
+ uint64_t bar0_to:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t bar0_to:1;
+ uint64_t iob2big:1;
+ uint64_t dma0dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma3dbo:1;
+ uint64_t dma4dbo:1;
+ uint64_t dma0fi:1;
+ uint64_t dma1fi:1;
+ uint64_t dcnt0:1;
+ uint64_t dcnt1:1;
+ uint64_t dtime0:1;
+ uint64_t dtime1:1;
+ uint64_t psldbof:1;
+ uint64_t pidbof:1;
+ uint64_t pcnt:1;
+ uint64_t ptime:1;
+ uint64_t c0_aeri:1;
+ uint64_t reserved_20_20:1;
+ uint64_t c0_se:1;
+ uint64_t reserved_22_22:1;
+ uint64_t c0_wake:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_hpint:1;
+ uint64_t c1_aeri:1;
+ uint64_t reserved_27_27:1;
+ uint64_t c1_se:1;
+ uint64_t reserved_29_29:1;
+ uint64_t c1_wake:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_hpint:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_ldwn:1;
+ uint64_t reserved_61_63:3;
+#endif
+ } cn56xxp1;
+};
+
+union cvmx_npei_int_info {
+ uint64_t u64;
+ struct cvmx_npei_int_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t pidbof:6;
+ uint64_t psldbof:6;
+#else
+ uint64_t psldbof:6;
+ uint64_t pidbof:6;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+};
+
+union cvmx_npei_int_sum {
+ uint64_t u64;
+ struct cvmx_npei_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mio_inta:1;
+ uint64_t reserved_62_62:1;
+ uint64_t int_a:1;
+ uint64_t c1_ldwn:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c1_hpint:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_wake:1;
+ uint64_t crs1_dr:1;
+ uint64_t c1_se:1;
+ uint64_t crs1_er:1;
+ uint64_t c1_aeri:1;
+ uint64_t c0_hpint:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_wake:1;
+ uint64_t crs0_dr:1;
+ uint64_t c0_se:1;
+ uint64_t crs0_er:1;
+ uint64_t c0_aeri:1;
+ uint64_t ptime:1;
+ uint64_t pcnt:1;
+ uint64_t pidbof:1;
+ uint64_t psldbof:1;
+ uint64_t dtime1:1;
+ uint64_t dtime0:1;
+ uint64_t dcnt1:1;
+ uint64_t dcnt0:1;
+ uint64_t dma1fi:1;
+ uint64_t dma0fi:1;
+ uint64_t dma4dbo:1;
+ uint64_t dma3dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma0dbo:1;
+ uint64_t iob2big:1;
+ uint64_t bar0_to:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t bar0_to:1;
+ uint64_t iob2big:1;
+ uint64_t dma0dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma3dbo:1;
+ uint64_t dma4dbo:1;
+ uint64_t dma0fi:1;
+ uint64_t dma1fi:1;
+ uint64_t dcnt0:1;
+ uint64_t dcnt1:1;
+ uint64_t dtime0:1;
+ uint64_t dtime1:1;
+ uint64_t psldbof:1;
+ uint64_t pidbof:1;
+ uint64_t pcnt:1;
+ uint64_t ptime:1;
+ uint64_t c0_aeri:1;
+ uint64_t crs0_er:1;
+ uint64_t c0_se:1;
+ uint64_t crs0_dr:1;
+ uint64_t c0_wake:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_hpint:1;
+ uint64_t c1_aeri:1;
+ uint64_t crs1_er:1;
+ uint64_t c1_se:1;
+ uint64_t crs1_dr:1;
+ uint64_t c1_wake:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_hpint:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_ldwn:1;
+ uint64_t int_a:1;
+ uint64_t reserved_62_62:1;
+ uint64_t mio_inta:1;
+#endif
+ } s;
+ struct cvmx_npei_int_sum_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mio_inta:1;
+ uint64_t reserved_62_62:1;
+ uint64_t int_a:1;
+ uint64_t c1_ldwn:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c1_hpint:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_wake:1;
+ uint64_t crs1_dr:1;
+ uint64_t c1_se:1;
+ uint64_t crs1_er:1;
+ uint64_t c1_aeri:1;
+ uint64_t c0_hpint:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_wake:1;
+ uint64_t crs0_dr:1;
+ uint64_t c0_se:1;
+ uint64_t crs0_er:1;
+ uint64_t c0_aeri:1;
+ uint64_t reserved_15_18:4;
+ uint64_t dtime1:1;
+ uint64_t dtime0:1;
+ uint64_t dcnt1:1;
+ uint64_t dcnt0:1;
+ uint64_t dma1fi:1;
+ uint64_t dma0fi:1;
+ uint64_t reserved_8_8:1;
+ uint64_t dma3dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma0dbo:1;
+ uint64_t iob2big:1;
+ uint64_t bar0_to:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t bar0_to:1;
+ uint64_t iob2big:1;
+ uint64_t dma0dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma3dbo:1;
+ uint64_t reserved_8_8:1;
+ uint64_t dma0fi:1;
+ uint64_t dma1fi:1;
+ uint64_t dcnt0:1;
+ uint64_t dcnt1:1;
+ uint64_t dtime0:1;
+ uint64_t dtime1:1;
+ uint64_t reserved_15_18:4;
+ uint64_t c0_aeri:1;
+ uint64_t crs0_er:1;
+ uint64_t c0_se:1;
+ uint64_t crs0_dr:1;
+ uint64_t c0_wake:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_hpint:1;
+ uint64_t c1_aeri:1;
+ uint64_t crs1_er:1;
+ uint64_t c1_se:1;
+ uint64_t crs1_dr:1;
+ uint64_t c1_wake:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_hpint:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_ldwn:1;
+ uint64_t int_a:1;
+ uint64_t reserved_62_62:1;
+ uint64_t mio_inta:1;
+#endif
+ } cn52xxp1;
+ struct cvmx_npei_int_sum_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mio_inta:1;
+ uint64_t reserved_61_62:2;
+ uint64_t c1_ldwn:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c1_hpint:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_wake:1;
+ uint64_t reserved_29_29:1;
+ uint64_t c1_se:1;
+ uint64_t reserved_27_27:1;
+ uint64_t c1_aeri:1;
+ uint64_t c0_hpint:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_wake:1;
+ uint64_t reserved_22_22:1;
+ uint64_t c0_se:1;
+ uint64_t reserved_20_20:1;
+ uint64_t c0_aeri:1;
+ uint64_t reserved_15_18:4;
+ uint64_t dtime1:1;
+ uint64_t dtime0:1;
+ uint64_t dcnt1:1;
+ uint64_t dcnt0:1;
+ uint64_t dma1fi:1;
+ uint64_t dma0fi:1;
+ uint64_t dma4dbo:1;
+ uint64_t dma3dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma0dbo:1;
+ uint64_t iob2big:1;
+ uint64_t bar0_to:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t bar0_to:1;
+ uint64_t iob2big:1;
+ uint64_t dma0dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma3dbo:1;
+ uint64_t dma4dbo:1;
+ uint64_t dma0fi:1;
+ uint64_t dma1fi:1;
+ uint64_t dcnt0:1;
+ uint64_t dcnt1:1;
+ uint64_t dtime0:1;
+ uint64_t dtime1:1;
+ uint64_t reserved_15_18:4;
+ uint64_t c0_aeri:1;
+ uint64_t reserved_20_20:1;
+ uint64_t c0_se:1;
+ uint64_t reserved_22_22:1;
+ uint64_t c0_wake:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_hpint:1;
+ uint64_t c1_aeri:1;
+ uint64_t reserved_27_27:1;
+ uint64_t c1_se:1;
+ uint64_t reserved_29_29:1;
+ uint64_t c1_wake:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_hpint:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_ldwn:1;
+ uint64_t reserved_61_62:2;
+ uint64_t mio_inta:1;
+#endif
+ } cn56xxp1;
+};
+
+union cvmx_npei_int_sum2 {
+ uint64_t u64;
+ struct cvmx_npei_int_sum2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mio_inta:1;
+ uint64_t reserved_62_62:1;
+ uint64_t int_a:1;
+ uint64_t c1_ldwn:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c1_hpint:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_wake:1;
+ uint64_t crs1_dr:1;
+ uint64_t c1_se:1;
+ uint64_t crs1_er:1;
+ uint64_t c1_aeri:1;
+ uint64_t c0_hpint:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_wake:1;
+ uint64_t crs0_dr:1;
+ uint64_t c0_se:1;
+ uint64_t crs0_er:1;
+ uint64_t c0_aeri:1;
+ uint64_t reserved_15_18:4;
+ uint64_t dtime1:1;
+ uint64_t dtime0:1;
+ uint64_t dcnt1:1;
+ uint64_t dcnt0:1;
+ uint64_t dma1fi:1;
+ uint64_t dma0fi:1;
+ uint64_t reserved_8_8:1;
+ uint64_t dma3dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma0dbo:1;
+ uint64_t iob2big:1;
+ uint64_t bar0_to:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t bar0_to:1;
+ uint64_t iob2big:1;
+ uint64_t dma0dbo:1;
+ uint64_t dma1dbo:1;
+ uint64_t dma2dbo:1;
+ uint64_t dma3dbo:1;
+ uint64_t reserved_8_8:1;
+ uint64_t dma0fi:1;
+ uint64_t dma1fi:1;
+ uint64_t dcnt0:1;
+ uint64_t dcnt1:1;
+ uint64_t dtime0:1;
+ uint64_t dtime1:1;
+ uint64_t reserved_15_18:4;
+ uint64_t c0_aeri:1;
+ uint64_t crs0_er:1;
+ uint64_t c0_se:1;
+ uint64_t crs0_dr:1;
+ uint64_t c0_wake:1;
+ uint64_t c0_pmei:1;
+ uint64_t c0_hpint:1;
+ uint64_t c1_aeri:1;
+ uint64_t crs1_er:1;
+ uint64_t c1_se:1;
+ uint64_t crs1_dr:1;
+ uint64_t c1_wake:1;
+ uint64_t c1_pmei:1;
+ uint64_t c1_hpint:1;
+ uint64_t c0_up_b0:1;
+ uint64_t c0_up_b1:1;
+ uint64_t c0_up_b2:1;
+ uint64_t c0_up_wi:1;
+ uint64_t c0_up_bx:1;
+ uint64_t c0_un_b0:1;
+ uint64_t c0_un_b1:1;
+ uint64_t c0_un_b2:1;
+ uint64_t c0_un_wi:1;
+ uint64_t c0_un_bx:1;
+ uint64_t c1_up_b0:1;
+ uint64_t c1_up_b1:1;
+ uint64_t c1_up_b2:1;
+ uint64_t c1_up_wi:1;
+ uint64_t c1_up_bx:1;
+ uint64_t c1_un_b0:1;
+ uint64_t c1_un_b1:1;
+ uint64_t c1_un_b2:1;
+ uint64_t c1_un_wi:1;
+ uint64_t c1_un_bx:1;
+ uint64_t c0_un_wf:1;
+ uint64_t c1_un_wf:1;
+ uint64_t c0_up_wf:1;
+ uint64_t c1_up_wf:1;
+ uint64_t c0_exc:1;
+ uint64_t c1_exc:1;
+ uint64_t c0_ldwn:1;
+ uint64_t c1_ldwn:1;
+ uint64_t int_a:1;
+ uint64_t reserved_62_62:1;
+ uint64_t mio_inta:1;
+#endif
+ } s;
+};
+
+union cvmx_npei_last_win_rdata0 {
+ uint64_t u64;
+ struct cvmx_npei_last_win_rdata0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_last_win_rdata1 {
+ uint64_t u64;
+ struct cvmx_npei_last_win_rdata1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_mem_access_ctl {
+ uint64_t u64;
+ struct cvmx_npei_mem_access_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t max_word:4;
+ uint64_t timer:10;
+#else
+ uint64_t timer:10;
+ uint64_t max_word:4;
+ uint64_t reserved_14_63:50;
+#endif
+ } s;
+};
+
+union cvmx_npei_mem_access_subidx {
+ uint64_t u64;
+ struct cvmx_npei_mem_access_subidx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63:22;
+ uint64_t zero:1;
+ uint64_t port:2;
+ uint64_t nmerge:1;
+ uint64_t esr:2;
+ uint64_t esw:2;
+ uint64_t nsr:1;
+ uint64_t nsw:1;
+ uint64_t ror:1;
+ uint64_t row:1;
+ uint64_t ba:30;
+#else
+ uint64_t ba:30;
+ uint64_t row:1;
+ uint64_t ror:1;
+ uint64_t nsw:1;
+ uint64_t nsr:1;
+ uint64_t esw:2;
+ uint64_t esr:2;
+ uint64_t nmerge:1;
+ uint64_t port:2;
+ uint64_t zero:1;
+ uint64_t reserved_42_63:22;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_enb0 {
+ uint64_t u64;
+ struct cvmx_npei_msi_enb0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t enb:64;
+#else
+ uint64_t enb:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_enb1 {
+ uint64_t u64;
+ struct cvmx_npei_msi_enb1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t enb:64;
+#else
+ uint64_t enb:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_enb2 {
+ uint64_t u64;
+ struct cvmx_npei_msi_enb2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t enb:64;
+#else
+ uint64_t enb:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_enb3 {
+ uint64_t u64;
+ struct cvmx_npei_msi_enb3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t enb:64;
+#else
+ uint64_t enb:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_rcv0 {
+ uint64_t u64;
+ struct cvmx_npei_msi_rcv0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t intr:64;
+#else
+ uint64_t intr:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_rcv1 {
+ uint64_t u64;
+ struct cvmx_npei_msi_rcv1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t intr:64;
+#else
+ uint64_t intr:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_rcv2 {
+ uint64_t u64;
+ struct cvmx_npei_msi_rcv2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t intr:64;
+#else
+ uint64_t intr:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_rcv3 {
+ uint64_t u64;
+ struct cvmx_npei_msi_rcv3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t intr:64;
+#else
+ uint64_t intr:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_rd_map {
+ uint64_t u64;
+ struct cvmx_npei_msi_rd_map_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t rd_int:8;
+ uint64_t msi_int:8;
+#else
+ uint64_t msi_int:8;
+ uint64_t rd_int:8;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_w1c_enb0 {
+ uint64_t u64;
+ struct cvmx_npei_msi_w1c_enb0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t clr:64;
+#else
+ uint64_t clr:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_w1c_enb1 {
+ uint64_t u64;
+ struct cvmx_npei_msi_w1c_enb1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t clr:64;
+#else
+ uint64_t clr:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_w1c_enb2 {
+ uint64_t u64;
+ struct cvmx_npei_msi_w1c_enb2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t clr:64;
+#else
+ uint64_t clr:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_w1c_enb3 {
+ uint64_t u64;
+ struct cvmx_npei_msi_w1c_enb3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t clr:64;
+#else
+ uint64_t clr:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_w1s_enb0 {
+ uint64_t u64;
+ struct cvmx_npei_msi_w1s_enb0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t set:64;
+#else
+ uint64_t set:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_w1s_enb1 {
+ uint64_t u64;
+ struct cvmx_npei_msi_w1s_enb1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t set:64;
+#else
+ uint64_t set:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_w1s_enb2 {
+ uint64_t u64;
+ struct cvmx_npei_msi_w1s_enb2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t set:64;
+#else
+ uint64_t set:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_w1s_enb3 {
+ uint64_t u64;
+ struct cvmx_npei_msi_w1s_enb3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t set:64;
+#else
+ uint64_t set:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_msi_wr_map {
+ uint64_t u64;
+ struct cvmx_npei_msi_wr_map_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t ciu_int:8;
+ uint64_t msi_int:8;
+#else
+ uint64_t msi_int:8;
+ uint64_t ciu_int:8;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_npei_pcie_credit_cnt {
+ uint64_t u64;
+ struct cvmx_npei_pcie_credit_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t p1_ccnt:8;
+ uint64_t p1_ncnt:8;
+ uint64_t p1_pcnt:8;
+ uint64_t p0_ccnt:8;
+ uint64_t p0_ncnt:8;
+ uint64_t p0_pcnt:8;
+#else
+ uint64_t p0_pcnt:8;
+ uint64_t p0_ncnt:8;
+ uint64_t p0_ccnt:8;
+ uint64_t p1_pcnt:8;
+ uint64_t p1_ncnt:8;
+ uint64_t p1_ccnt:8;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_npei_pcie_msi_rcv {
+ uint64_t u64;
+ struct cvmx_npei_pcie_msi_rcv_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t intr:8;
+#else
+ uint64_t intr:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_npei_pcie_msi_rcv_b1 {
+ uint64_t u64;
+ struct cvmx_npei_pcie_msi_rcv_b1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t intr:8;
+ uint64_t reserved_0_7:8;
+#else
+ uint64_t reserved_0_7:8;
+ uint64_t intr:8;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_npei_pcie_msi_rcv_b2 {
+ uint64_t u64;
+ struct cvmx_npei_pcie_msi_rcv_b2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t intr:8;
+ uint64_t reserved_0_15:16;
+#else
+ uint64_t reserved_0_15:16;
+ uint64_t intr:8;
+ uint64_t reserved_24_63:40;
+#endif
+ } s;
+};
+
+union cvmx_npei_pcie_msi_rcv_b3 {
+ uint64_t u64;
+ struct cvmx_npei_pcie_msi_rcv_b3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t intr:8;
+ uint64_t reserved_0_23:24;
+#else
+ uint64_t reserved_0_23:24;
+ uint64_t intr:8;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pktx_cnts {
+ uint64_t u64;
+ struct cvmx_npei_pktx_cnts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63:10;
+ uint64_t timer:22;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t timer:22;
+ uint64_t reserved_54_63:10;
+#endif
+ } s;
+};
+
+union cvmx_npei_pktx_in_bp {
+ uint64_t u64;
+ struct cvmx_npei_pktx_in_bp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wmark:32;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t wmark:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pktx_instr_baddr {
+ uint64_t u64;
+ struct cvmx_npei_pktx_instr_baddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr:61;
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t addr:61;
+#endif
+ } s;
+};
+
+union cvmx_npei_pktx_instr_baoff_dbell {
+ uint64_t u64;
+ struct cvmx_npei_pktx_instr_baoff_dbell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t aoff:32;
+ uint64_t dbell:32;
+#else
+ uint64_t dbell:32;
+ uint64_t aoff:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pktx_instr_fifo_rsize {
+ uint64_t u64;
+ struct cvmx_npei_pktx_instr_fifo_rsize_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t max:9;
+ uint64_t rrp:9;
+ uint64_t wrp:9;
+ uint64_t fcnt:5;
+ uint64_t rsize:32;
+#else
+ uint64_t rsize:32;
+ uint64_t fcnt:5;
+ uint64_t wrp:9;
+ uint64_t rrp:9;
+ uint64_t max:9;
+#endif
+ } s;
+};
+
+union cvmx_npei_pktx_instr_header {
+ uint64_t u64;
+ struct cvmx_npei_pktx_instr_header_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t pbp:1;
+ uint64_t reserved_38_42:5;
+ uint64_t rparmode:2;
+ uint64_t reserved_35_35:1;
+ uint64_t rskp_len:7;
+ uint64_t reserved_22_27:6;
+ uint64_t use_ihdr:1;
+ uint64_t reserved_16_20:5;
+ uint64_t par_mode:2;
+ uint64_t reserved_13_13:1;
+ uint64_t skp_len:7;
+ uint64_t reserved_0_5:6;
+#else
+ uint64_t reserved_0_5:6;
+ uint64_t skp_len:7;
+ uint64_t reserved_13_13:1;
+ uint64_t par_mode:2;
+ uint64_t reserved_16_20:5;
+ uint64_t use_ihdr:1;
+ uint64_t reserved_22_27:6;
+ uint64_t rskp_len:7;
+ uint64_t reserved_35_35:1;
+ uint64_t rparmode:2;
+ uint64_t reserved_38_42:5;
+ uint64_t pbp:1;
+ uint64_t reserved_44_63:20;
+#endif
+ } s;
+};
+
+union cvmx_npei_pktx_slist_baddr {
+ uint64_t u64;
+ struct cvmx_npei_pktx_slist_baddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr:60;
+ uint64_t reserved_0_3:4;
+#else
+ uint64_t reserved_0_3:4;
+ uint64_t addr:60;
+#endif
+ } s;
+};
+
+union cvmx_npei_pktx_slist_baoff_dbell {
+ uint64_t u64;
+ struct cvmx_npei_pktx_slist_baoff_dbell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t aoff:32;
+ uint64_t dbell:32;
+#else
+ uint64_t dbell:32;
+ uint64_t aoff:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pktx_slist_fifo_rsize {
+ uint64_t u64;
+ struct cvmx_npei_pktx_slist_fifo_rsize_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t rsize:32;
+#else
+ uint64_t rsize:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_cnt_int {
+ uint64_t u64;
+ struct cvmx_npei_pkt_cnt_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t port:32;
+#else
+ uint64_t port:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_cnt_int_enb {
+ uint64_t u64;
+ struct cvmx_npei_pkt_cnt_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t port:32;
+#else
+ uint64_t port:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_data_out_es {
+ uint64_t u64;
+ struct cvmx_npei_pkt_data_out_es_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t es:64;
+#else
+ uint64_t es:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_data_out_ns {
+ uint64_t u64;
+ struct cvmx_npei_pkt_data_out_ns_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t nsr:32;
+#else
+ uint64_t nsr:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_data_out_ror {
+ uint64_t u64;
+ struct cvmx_npei_pkt_data_out_ror_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ror:32;
+#else
+ uint64_t ror:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_dpaddr {
+ uint64_t u64;
+ struct cvmx_npei_pkt_dpaddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t dptr:32;
+#else
+ uint64_t dptr:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_in_bp {
+ uint64_t u64;
+ struct cvmx_npei_pkt_in_bp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t bp:32;
+#else
+ uint64_t bp:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_in_donex_cnts {
+ uint64_t u64;
+ struct cvmx_npei_pkt_in_donex_cnts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_in_instr_counts {
+ uint64_t u64;
+ struct cvmx_npei_pkt_in_instr_counts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wr_cnt:32;
+ uint64_t rd_cnt:32;
+#else
+ uint64_t rd_cnt:32;
+ uint64_t wr_cnt:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_in_pcie_port {
+ uint64_t u64;
+ struct cvmx_npei_pkt_in_pcie_port_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pp:64;
+#else
+ uint64_t pp:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_input_control {
+ uint64_t u64;
+ struct cvmx_npei_pkt_input_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63:41;
+ uint64_t pkt_rr:1;
+ uint64_t pbp_dhi:13;
+ uint64_t d_nsr:1;
+ uint64_t d_esr:2;
+ uint64_t d_ror:1;
+ uint64_t use_csr:1;
+ uint64_t nsr:1;
+ uint64_t esr:2;
+ uint64_t ror:1;
+#else
+ uint64_t ror:1;
+ uint64_t esr:2;
+ uint64_t nsr:1;
+ uint64_t use_csr:1;
+ uint64_t d_ror:1;
+ uint64_t d_esr:2;
+ uint64_t d_nsr:1;
+ uint64_t pbp_dhi:13;
+ uint64_t pkt_rr:1;
+ uint64_t reserved_23_63:41;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_instr_enb {
+ uint64_t u64;
+ struct cvmx_npei_pkt_instr_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t enb:32;
+#else
+ uint64_t enb:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_instr_rd_size {
+ uint64_t u64;
+ struct cvmx_npei_pkt_instr_rd_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rdsize:64;
+#else
+ uint64_t rdsize:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_instr_size {
+ uint64_t u64;
+ struct cvmx_npei_pkt_instr_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t is_64b:32;
+#else
+ uint64_t is_64b:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_int_levels {
+ uint64_t u64;
+ struct cvmx_npei_pkt_int_levels_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63:10;
+ uint64_t time:22;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t time:22;
+ uint64_t reserved_54_63:10;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_iptr {
+ uint64_t u64;
+ struct cvmx_npei_pkt_iptr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t iptr:32;
+#else
+ uint64_t iptr:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_out_bmode {
+ uint64_t u64;
+ struct cvmx_npei_pkt_out_bmode_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t bmode:32;
+#else
+ uint64_t bmode:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_out_enb {
+ uint64_t u64;
+ struct cvmx_npei_pkt_out_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t enb:32;
+#else
+ uint64_t enb:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_output_wmark {
+ uint64_t u64;
+ struct cvmx_npei_pkt_output_wmark_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t wmark:32;
+#else
+ uint64_t wmark:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_pcie_port {
+ uint64_t u64;
+ struct cvmx_npei_pkt_pcie_port_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pp:64;
+#else
+ uint64_t pp:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_port_in_rst {
+ uint64_t u64;
+ struct cvmx_npei_pkt_port_in_rst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t in_rst:32;
+ uint64_t out_rst:32;
+#else
+ uint64_t out_rst:32;
+ uint64_t in_rst:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_slist_es {
+ uint64_t u64;
+ struct cvmx_npei_pkt_slist_es_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t es:64;
+#else
+ uint64_t es:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_slist_id_size {
+ uint64_t u64;
+ struct cvmx_npei_pkt_slist_id_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63:41;
+ uint64_t isize:7;
+ uint64_t bsize:16;
+#else
+ uint64_t bsize:16;
+ uint64_t isize:7;
+ uint64_t reserved_23_63:41;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_slist_ns {
+ uint64_t u64;
+ struct cvmx_npei_pkt_slist_ns_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t nsr:32;
+#else
+ uint64_t nsr:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_slist_ror {
+ uint64_t u64;
+ struct cvmx_npei_pkt_slist_ror_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ror:32;
+#else
+ uint64_t ror:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_time_int {
+ uint64_t u64;
+ struct cvmx_npei_pkt_time_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t port:32;
+#else
+ uint64_t port:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_pkt_time_int_enb {
+ uint64_t u64;
+ struct cvmx_npei_pkt_time_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t port:32;
+#else
+ uint64_t port:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npei_rsl_int_blocks {
+ uint64_t u64;
+ struct cvmx_npei_rsl_int_blocks_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63:33;
+ uint64_t iob:1;
+ uint64_t lmc1:1;
+ uint64_t agl:1;
+ uint64_t reserved_24_27:4;
+ uint64_t asxpcs1:1;
+ uint64_t asxpcs0:1;
+ uint64_t reserved_21_21:1;
+ uint64_t pip:1;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
+ uint64_t lmc0:1;
+ uint64_t l2c:1;
+ uint64_t usb1:1;
+ uint64_t rad:1;
+ uint64_t usb:1;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t reserved_8_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t npei:1;
+ uint64_t gmx1:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+#else
+ uint64_t mio:1;
+ uint64_t gmx0:1;
+ uint64_t gmx1:1;
+ uint64_t npei:1;
+ uint64_t key:1;
+ uint64_t fpa:1;
+ uint64_t dfa:1;
+ uint64_t zip:1;
+ uint64_t reserved_8_8:1;
+ uint64_t ipd:1;
+ uint64_t pko:1;
+ uint64_t tim:1;
+ uint64_t pow:1;
+ uint64_t usb:1;
+ uint64_t rad:1;
+ uint64_t usb1:1;
+ uint64_t l2c:1;
+ uint64_t lmc0:1;
+ uint64_t spx0:1;
+ uint64_t spx1:1;
+ uint64_t pip:1;
+ uint64_t reserved_21_21:1;
+ uint64_t asxpcs0:1;
+ uint64_t asxpcs1:1;
+ uint64_t reserved_24_27:4;
+ uint64_t agl:1;
+ uint64_t lmc1:1;
+ uint64_t iob:1;
+ uint64_t reserved_31_63:33;
+#endif
+ } s;
+};
+
+union cvmx_npei_scratch_1 {
+ uint64_t u64;
+ struct cvmx_npei_scratch_1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_state1 {
+ uint64_t u64;
+ struct cvmx_npei_state1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t cpl1:12;
+ uint64_t cpl0:12;
+ uint64_t arb:1;
+ uint64_t csr:39;
+#else
+ uint64_t csr:39;
+ uint64_t arb:1;
+ uint64_t cpl0:12;
+ uint64_t cpl1:12;
+#endif
+ } s;
+};
+
+union cvmx_npei_state2 {
+ uint64_t u64;
+ struct cvmx_npei_state2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t npei:1;
+ uint64_t rac:1;
+ uint64_t csm1:15;
+ uint64_t csm0:15;
+ uint64_t nnp0:8;
+ uint64_t nnd:8;
+#else
+ uint64_t nnd:8;
+ uint64_t nnp0:8;
+ uint64_t csm0:15;
+ uint64_t csm1:15;
+ uint64_t rac:1;
+ uint64_t npei:1;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_npei_state3 {
+ uint64_t u64;
+ struct cvmx_npei_state3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63:8;
+ uint64_t psm1:15;
+ uint64_t psm0:15;
+ uint64_t nsm1:13;
+ uint64_t nsm0:13;
+#else
+ uint64_t nsm0:13;
+ uint64_t nsm1:13;
+ uint64_t psm0:15;
+ uint64_t psm1:15;
+ uint64_t reserved_56_63:8;
+#endif
+ } s;
+};
+
+union cvmx_npei_win_rd_addr {
+ uint64_t u64;
+ struct cvmx_npei_win_rd_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_51_63:13;
+ uint64_t ld_cmd:2;
+ uint64_t iobit:1;
+ uint64_t rd_addr:48;
+#else
+ uint64_t rd_addr:48;
+ uint64_t iobit:1;
+ uint64_t ld_cmd:2;
+ uint64_t reserved_51_63:13;
+#endif
+ } s;
+};
+
+union cvmx_npei_win_rd_data {
+ uint64_t u64;
+ struct cvmx_npei_win_rd_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rd_data:64;
+#else
+ uint64_t rd_data:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_win_wr_addr {
+ uint64_t u64;
+ struct cvmx_npei_win_wr_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63:15;
+ uint64_t iobit:1;
+ uint64_t wr_addr:46;
+ uint64_t reserved_0_1:2;
+#else
+ uint64_t reserved_0_1:2;
+ uint64_t wr_addr:46;
+ uint64_t iobit:1;
+ uint64_t reserved_49_63:15;
+#endif
+ } s;
+};
+
+union cvmx_npei_win_wr_data {
+ uint64_t u64;
+ struct cvmx_npei_win_wr_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wr_data:64;
+#else
+ uint64_t wr_data:64;
+#endif
+ } s;
+};
+
+union cvmx_npei_win_wr_mask {
+ uint64_t u64;
+ struct cvmx_npei_win_wr_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t wr_mask:8;
+#else
+ uint64_t wr_mask:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_npei_window_ctl {
+ uint64_t u64;
+ struct cvmx_npei_window_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t time:32;
+#else
+ uint64_t time:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-npi-defs.h b/arch/mips/include/asm/octeon/cvmx-npi-defs.h
new file mode 100644
index 000000000..ba4967fda
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-npi-defs.h
@@ -0,0 +1,2514 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_NPI_DEFS_H__
+#define __CVMX_NPI_DEFS_H__
+
+#define CVMX_NPI_BASE_ADDR_INPUT0 CVMX_NPI_BASE_ADDR_INPUTX(0)
+#define CVMX_NPI_BASE_ADDR_INPUT1 CVMX_NPI_BASE_ADDR_INPUTX(1)
+#define CVMX_NPI_BASE_ADDR_INPUT2 CVMX_NPI_BASE_ADDR_INPUTX(2)
+#define CVMX_NPI_BASE_ADDR_INPUT3 CVMX_NPI_BASE_ADDR_INPUTX(3)
+#define CVMX_NPI_BASE_ADDR_INPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000000070ull) + ((offset) & 3) * 16)
+#define CVMX_NPI_BASE_ADDR_OUTPUT0 CVMX_NPI_BASE_ADDR_OUTPUTX(0)
+#define CVMX_NPI_BASE_ADDR_OUTPUT1 CVMX_NPI_BASE_ADDR_OUTPUTX(1)
+#define CVMX_NPI_BASE_ADDR_OUTPUT2 CVMX_NPI_BASE_ADDR_OUTPUTX(2)
+#define CVMX_NPI_BASE_ADDR_OUTPUT3 CVMX_NPI_BASE_ADDR_OUTPUTX(3)
+#define CVMX_NPI_BASE_ADDR_OUTPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F00000000B8ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011F00000003F8ull))
+#define CVMX_NPI_BUFF_SIZE_OUTPUT0 CVMX_NPI_BUFF_SIZE_OUTPUTX(0)
+#define CVMX_NPI_BUFF_SIZE_OUTPUT1 CVMX_NPI_BUFF_SIZE_OUTPUTX(1)
+#define CVMX_NPI_BUFF_SIZE_OUTPUT2 CVMX_NPI_BUFF_SIZE_OUTPUTX(2)
+#define CVMX_NPI_BUFF_SIZE_OUTPUT3 CVMX_NPI_BUFF_SIZE_OUTPUTX(3)
+#define CVMX_NPI_BUFF_SIZE_OUTPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F00000000E0ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_COMP_CTL (CVMX_ADD_IO_SEG(0x00011F0000000218ull))
+#define CVMX_NPI_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011F0000000010ull))
+#define CVMX_NPI_DBG_SELECT (CVMX_ADD_IO_SEG(0x00011F0000000008ull))
+#define CVMX_NPI_DMA_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000000128ull))
+#define CVMX_NPI_DMA_HIGHP_COUNTS (CVMX_ADD_IO_SEG(0x00011F0000000148ull))
+#define CVMX_NPI_DMA_HIGHP_NADDR (CVMX_ADD_IO_SEG(0x00011F0000000158ull))
+#define CVMX_NPI_DMA_LOWP_COUNTS (CVMX_ADD_IO_SEG(0x00011F0000000140ull))
+#define CVMX_NPI_DMA_LOWP_NADDR (CVMX_ADD_IO_SEG(0x00011F0000000150ull))
+#define CVMX_NPI_HIGHP_DBELL (CVMX_ADD_IO_SEG(0x00011F0000000120ull))
+#define CVMX_NPI_HIGHP_IBUFF_SADDR (CVMX_ADD_IO_SEG(0x00011F0000000110ull))
+#define CVMX_NPI_INPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000000138ull))
+#define CVMX_NPI_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000000020ull))
+#define CVMX_NPI_INT_SUM (CVMX_ADD_IO_SEG(0x00011F0000000018ull))
+#define CVMX_NPI_LOWP_DBELL (CVMX_ADD_IO_SEG(0x00011F0000000118ull))
+#define CVMX_NPI_LOWP_IBUFF_SADDR (CVMX_ADD_IO_SEG(0x00011F0000000108ull))
+#define CVMX_NPI_MEM_ACCESS_SUBID3 CVMX_NPI_MEM_ACCESS_SUBIDX(3)
+#define CVMX_NPI_MEM_ACCESS_SUBID4 CVMX_NPI_MEM_ACCESS_SUBIDX(4)
+#define CVMX_NPI_MEM_ACCESS_SUBID5 CVMX_NPI_MEM_ACCESS_SUBIDX(5)
+#define CVMX_NPI_MEM_ACCESS_SUBID6 CVMX_NPI_MEM_ACCESS_SUBIDX(6)
+#define CVMX_NPI_MEM_ACCESS_SUBIDX(offset) (CVMX_ADD_IO_SEG(0x00011F0000000028ull) + ((offset) & 7) * 8 - 8*3)
+#define CVMX_NPI_MSI_RCV (0x0000000000000190ull)
+#define CVMX_NPI_NPI_MSI_RCV (CVMX_ADD_IO_SEG(0x00011F0000001190ull))
+#define CVMX_NPI_NUM_DESC_OUTPUT0 CVMX_NPI_NUM_DESC_OUTPUTX(0)
+#define CVMX_NPI_NUM_DESC_OUTPUT1 CVMX_NPI_NUM_DESC_OUTPUTX(1)
+#define CVMX_NPI_NUM_DESC_OUTPUT2 CVMX_NPI_NUM_DESC_OUTPUTX(2)
+#define CVMX_NPI_NUM_DESC_OUTPUT3 CVMX_NPI_NUM_DESC_OUTPUTX(3)
+#define CVMX_NPI_NUM_DESC_OUTPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000000050ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_OUTPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000000100ull))
+#define CVMX_NPI_P0_DBPAIR_ADDR CVMX_NPI_PX_DBPAIR_ADDR(0)
+#define CVMX_NPI_P0_INSTR_ADDR CVMX_NPI_PX_INSTR_ADDR(0)
+#define CVMX_NPI_P0_INSTR_CNTS CVMX_NPI_PX_INSTR_CNTS(0)
+#define CVMX_NPI_P0_PAIR_CNTS CVMX_NPI_PX_PAIR_CNTS(0)
+#define CVMX_NPI_P1_DBPAIR_ADDR CVMX_NPI_PX_DBPAIR_ADDR(1)
+#define CVMX_NPI_P1_INSTR_ADDR CVMX_NPI_PX_INSTR_ADDR(1)
+#define CVMX_NPI_P1_INSTR_CNTS CVMX_NPI_PX_INSTR_CNTS(1)
+#define CVMX_NPI_P1_PAIR_CNTS CVMX_NPI_PX_PAIR_CNTS(1)
+#define CVMX_NPI_P2_DBPAIR_ADDR CVMX_NPI_PX_DBPAIR_ADDR(2)
+#define CVMX_NPI_P2_INSTR_ADDR CVMX_NPI_PX_INSTR_ADDR(2)
+#define CVMX_NPI_P2_INSTR_CNTS CVMX_NPI_PX_INSTR_CNTS(2)
+#define CVMX_NPI_P2_PAIR_CNTS CVMX_NPI_PX_PAIR_CNTS(2)
+#define CVMX_NPI_P3_DBPAIR_ADDR CVMX_NPI_PX_DBPAIR_ADDR(3)
+#define CVMX_NPI_P3_INSTR_ADDR CVMX_NPI_PX_INSTR_ADDR(3)
+#define CVMX_NPI_P3_INSTR_CNTS CVMX_NPI_PX_INSTR_CNTS(3)
+#define CVMX_NPI_P3_PAIR_CNTS CVMX_NPI_PX_PAIR_CNTS(3)
+#define CVMX_NPI_PCI_BAR1_INDEXX(offset) (CVMX_ADD_IO_SEG(0x00011F0000001100ull) + ((offset) & 31) * 4)
+#define CVMX_NPI_PCI_BIST_REG (CVMX_ADD_IO_SEG(0x00011F00000011C0ull))
+#define CVMX_NPI_PCI_BURST_SIZE (CVMX_ADD_IO_SEG(0x00011F00000000D8ull))
+#define CVMX_NPI_PCI_CFG00 (CVMX_ADD_IO_SEG(0x00011F0000001800ull))
+#define CVMX_NPI_PCI_CFG01 (CVMX_ADD_IO_SEG(0x00011F0000001804ull))
+#define CVMX_NPI_PCI_CFG02 (CVMX_ADD_IO_SEG(0x00011F0000001808ull))
+#define CVMX_NPI_PCI_CFG03 (CVMX_ADD_IO_SEG(0x00011F000000180Cull))
+#define CVMX_NPI_PCI_CFG04 (CVMX_ADD_IO_SEG(0x00011F0000001810ull))
+#define CVMX_NPI_PCI_CFG05 (CVMX_ADD_IO_SEG(0x00011F0000001814ull))
+#define CVMX_NPI_PCI_CFG06 (CVMX_ADD_IO_SEG(0x00011F0000001818ull))
+#define CVMX_NPI_PCI_CFG07 (CVMX_ADD_IO_SEG(0x00011F000000181Cull))
+#define CVMX_NPI_PCI_CFG08 (CVMX_ADD_IO_SEG(0x00011F0000001820ull))
+#define CVMX_NPI_PCI_CFG09 (CVMX_ADD_IO_SEG(0x00011F0000001824ull))
+#define CVMX_NPI_PCI_CFG10 (CVMX_ADD_IO_SEG(0x00011F0000001828ull))
+#define CVMX_NPI_PCI_CFG11 (CVMX_ADD_IO_SEG(0x00011F000000182Cull))
+#define CVMX_NPI_PCI_CFG12 (CVMX_ADD_IO_SEG(0x00011F0000001830ull))
+#define CVMX_NPI_PCI_CFG13 (CVMX_ADD_IO_SEG(0x00011F0000001834ull))
+#define CVMX_NPI_PCI_CFG15 (CVMX_ADD_IO_SEG(0x00011F000000183Cull))
+#define CVMX_NPI_PCI_CFG16 (CVMX_ADD_IO_SEG(0x00011F0000001840ull))
+#define CVMX_NPI_PCI_CFG17 (CVMX_ADD_IO_SEG(0x00011F0000001844ull))
+#define CVMX_NPI_PCI_CFG18 (CVMX_ADD_IO_SEG(0x00011F0000001848ull))
+#define CVMX_NPI_PCI_CFG19 (CVMX_ADD_IO_SEG(0x00011F000000184Cull))
+#define CVMX_NPI_PCI_CFG20 (CVMX_ADD_IO_SEG(0x00011F0000001850ull))
+#define CVMX_NPI_PCI_CFG21 (CVMX_ADD_IO_SEG(0x00011F0000001854ull))
+#define CVMX_NPI_PCI_CFG22 (CVMX_ADD_IO_SEG(0x00011F0000001858ull))
+#define CVMX_NPI_PCI_CFG56 (CVMX_ADD_IO_SEG(0x00011F00000018E0ull))
+#define CVMX_NPI_PCI_CFG57 (CVMX_ADD_IO_SEG(0x00011F00000018E4ull))
+#define CVMX_NPI_PCI_CFG58 (CVMX_ADD_IO_SEG(0x00011F00000018E8ull))
+#define CVMX_NPI_PCI_CFG59 (CVMX_ADD_IO_SEG(0x00011F00000018ECull))
+#define CVMX_NPI_PCI_CFG60 (CVMX_ADD_IO_SEG(0x00011F00000018F0ull))
+#define CVMX_NPI_PCI_CFG61 (CVMX_ADD_IO_SEG(0x00011F00000018F4ull))
+#define CVMX_NPI_PCI_CFG62 (CVMX_ADD_IO_SEG(0x00011F00000018F8ull))
+#define CVMX_NPI_PCI_CFG63 (CVMX_ADD_IO_SEG(0x00011F00000018FCull))
+#define CVMX_NPI_PCI_CNT_REG (CVMX_ADD_IO_SEG(0x00011F00000011B8ull))
+#define CVMX_NPI_PCI_CTL_STATUS_2 (CVMX_ADD_IO_SEG(0x00011F000000118Cull))
+#define CVMX_NPI_PCI_INT_ARB_CFG (CVMX_ADD_IO_SEG(0x00011F0000000130ull))
+#define CVMX_NPI_PCI_INT_ENB2 (CVMX_ADD_IO_SEG(0x00011F00000011A0ull))
+#define CVMX_NPI_PCI_INT_SUM2 (CVMX_ADD_IO_SEG(0x00011F0000001198ull))
+#define CVMX_NPI_PCI_READ_CMD (CVMX_ADD_IO_SEG(0x00011F0000000048ull))
+#define CVMX_NPI_PCI_READ_CMD_6 (CVMX_ADD_IO_SEG(0x00011F0000001180ull))
+#define CVMX_NPI_PCI_READ_CMD_C (CVMX_ADD_IO_SEG(0x00011F0000001184ull))
+#define CVMX_NPI_PCI_READ_CMD_E (CVMX_ADD_IO_SEG(0x00011F0000001188ull))
+#define CVMX_NPI_PCI_SCM_REG (CVMX_ADD_IO_SEG(0x00011F00000011A8ull))
+#define CVMX_NPI_PCI_TSR_REG (CVMX_ADD_IO_SEG(0x00011F00000011B0ull))
+#define CVMX_NPI_PORT32_INSTR_HDR (CVMX_ADD_IO_SEG(0x00011F00000001F8ull))
+#define CVMX_NPI_PORT33_INSTR_HDR (CVMX_ADD_IO_SEG(0x00011F0000000200ull))
+#define CVMX_NPI_PORT34_INSTR_HDR (CVMX_ADD_IO_SEG(0x00011F0000000208ull))
+#define CVMX_NPI_PORT35_INSTR_HDR (CVMX_ADD_IO_SEG(0x00011F0000000210ull))
+#define CVMX_NPI_PORT_BP_CONTROL (CVMX_ADD_IO_SEG(0x00011F00000001F0ull))
+#define CVMX_NPI_PX_DBPAIR_ADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000000180ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_PX_INSTR_ADDR(offset) (CVMX_ADD_IO_SEG(0x00011F00000001C0ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_PX_INSTR_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F00000001A0ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_PX_PAIR_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000000160ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_RSL_INT_BLOCKS (CVMX_ADD_IO_SEG(0x00011F0000000000ull))
+#define CVMX_NPI_SIZE_INPUT0 CVMX_NPI_SIZE_INPUTX(0)
+#define CVMX_NPI_SIZE_INPUT1 CVMX_NPI_SIZE_INPUTX(1)
+#define CVMX_NPI_SIZE_INPUT2 CVMX_NPI_SIZE_INPUTX(2)
+#define CVMX_NPI_SIZE_INPUT3 CVMX_NPI_SIZE_INPUTX(3)
+#define CVMX_NPI_SIZE_INPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000000078ull) + ((offset) & 3) * 16)
+#define CVMX_NPI_WIN_READ_TO (CVMX_ADD_IO_SEG(0x00011F00000001E0ull))
+
+union cvmx_npi_base_addr_inputx {
+ uint64_t u64;
+ struct cvmx_npi_base_addr_inputx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t baddr:61;
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t baddr:61;
+#endif
+ } s;
+};
+
+union cvmx_npi_base_addr_outputx {
+ uint64_t u64;
+ struct cvmx_npi_base_addr_outputx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t baddr:61;
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t baddr:61;
+#endif
+ } s;
+};
+
+union cvmx_npi_bist_status {
+ uint64_t u64;
+ struct cvmx_npi_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t csr_bs:1;
+ uint64_t dif_bs:1;
+ uint64_t rdp_bs:1;
+ uint64_t pcnc_bs:1;
+ uint64_t pcn_bs:1;
+ uint64_t rdn_bs:1;
+ uint64_t pcac_bs:1;
+ uint64_t pcad_bs:1;
+ uint64_t rdnl_bs:1;
+ uint64_t pgf_bs:1;
+ uint64_t pig_bs:1;
+ uint64_t pof0_bs:1;
+ uint64_t pof1_bs:1;
+ uint64_t pof2_bs:1;
+ uint64_t pof3_bs:1;
+ uint64_t pos_bs:1;
+ uint64_t nus_bs:1;
+ uint64_t dob_bs:1;
+ uint64_t pdf_bs:1;
+ uint64_t dpi_bs:1;
+#else
+ uint64_t dpi_bs:1;
+ uint64_t pdf_bs:1;
+ uint64_t dob_bs:1;
+ uint64_t nus_bs:1;
+ uint64_t pos_bs:1;
+ uint64_t pof3_bs:1;
+ uint64_t pof2_bs:1;
+ uint64_t pof1_bs:1;
+ uint64_t pof0_bs:1;
+ uint64_t pig_bs:1;
+ uint64_t pgf_bs:1;
+ uint64_t rdnl_bs:1;
+ uint64_t pcad_bs:1;
+ uint64_t pcac_bs:1;
+ uint64_t rdn_bs:1;
+ uint64_t pcn_bs:1;
+ uint64_t pcnc_bs:1;
+ uint64_t rdp_bs:1;
+ uint64_t dif_bs:1;
+ uint64_t csr_bs:1;
+ uint64_t reserved_20_63:44;
+#endif
+ } s;
+ struct cvmx_npi_bist_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t csr_bs:1;
+ uint64_t dif_bs:1;
+ uint64_t rdp_bs:1;
+ uint64_t pcnc_bs:1;
+ uint64_t pcn_bs:1;
+ uint64_t rdn_bs:1;
+ uint64_t pcac_bs:1;
+ uint64_t pcad_bs:1;
+ uint64_t rdnl_bs:1;
+ uint64_t pgf_bs:1;
+ uint64_t pig_bs:1;
+ uint64_t pof0_bs:1;
+ uint64_t reserved_5_7:3;
+ uint64_t pos_bs:1;
+ uint64_t nus_bs:1;
+ uint64_t dob_bs:1;
+ uint64_t pdf_bs:1;
+ uint64_t dpi_bs:1;
+#else
+ uint64_t dpi_bs:1;
+ uint64_t pdf_bs:1;
+ uint64_t dob_bs:1;
+ uint64_t nus_bs:1;
+ uint64_t pos_bs:1;
+ uint64_t reserved_5_7:3;
+ uint64_t pof0_bs:1;
+ uint64_t pig_bs:1;
+ uint64_t pgf_bs:1;
+ uint64_t rdnl_bs:1;
+ uint64_t pcad_bs:1;
+ uint64_t pcac_bs:1;
+ uint64_t rdn_bs:1;
+ uint64_t pcn_bs:1;
+ uint64_t pcnc_bs:1;
+ uint64_t rdp_bs:1;
+ uint64_t dif_bs:1;
+ uint64_t csr_bs:1;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn30xx;
+ struct cvmx_npi_bist_status_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t csr_bs:1;
+ uint64_t dif_bs:1;
+ uint64_t rdp_bs:1;
+ uint64_t pcnc_bs:1;
+ uint64_t pcn_bs:1;
+ uint64_t rdn_bs:1;
+ uint64_t pcac_bs:1;
+ uint64_t pcad_bs:1;
+ uint64_t rdnl_bs:1;
+ uint64_t pgf_bs:1;
+ uint64_t pig_bs:1;
+ uint64_t pof0_bs:1;
+ uint64_t pof1_bs:1;
+ uint64_t reserved_5_6:2;
+ uint64_t pos_bs:1;
+ uint64_t nus_bs:1;
+ uint64_t dob_bs:1;
+ uint64_t pdf_bs:1;
+ uint64_t dpi_bs:1;
+#else
+ uint64_t dpi_bs:1;
+ uint64_t pdf_bs:1;
+ uint64_t dob_bs:1;
+ uint64_t nus_bs:1;
+ uint64_t pos_bs:1;
+ uint64_t reserved_5_6:2;
+ uint64_t pof1_bs:1;
+ uint64_t pof0_bs:1;
+ uint64_t pig_bs:1;
+ uint64_t pgf_bs:1;
+ uint64_t rdnl_bs:1;
+ uint64_t pcad_bs:1;
+ uint64_t pcac_bs:1;
+ uint64_t rdn_bs:1;
+ uint64_t pcn_bs:1;
+ uint64_t pcnc_bs:1;
+ uint64_t rdp_bs:1;
+ uint64_t dif_bs:1;
+ uint64_t csr_bs:1;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn50xx;
+};
+
+union cvmx_npi_buff_size_outputx {
+ uint64_t u64;
+ struct cvmx_npi_buff_size_outputx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63:41;
+ uint64_t isize:7;
+ uint64_t bsize:16;
+#else
+ uint64_t bsize:16;
+ uint64_t isize:7;
+ uint64_t reserved_23_63:41;
+#endif
+ } s;
+};
+
+union cvmx_npi_comp_ctl {
+ uint64_t u64;
+ struct cvmx_npi_comp_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t pctl:5;
+ uint64_t nctl:5;
+#else
+ uint64_t nctl:5;
+ uint64_t pctl:5;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+};
+
+union cvmx_npi_ctl_status {
+ uint64_t u64;
+ struct cvmx_npi_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_63_63:1;
+ uint64_t chip_rev:8;
+ uint64_t dis_pniw:1;
+ uint64_t out3_enb:1;
+ uint64_t out2_enb:1;
+ uint64_t out1_enb:1;
+ uint64_t out0_enb:1;
+ uint64_t ins3_enb:1;
+ uint64_t ins2_enb:1;
+ uint64_t ins1_enb:1;
+ uint64_t ins0_enb:1;
+ uint64_t ins3_64b:1;
+ uint64_t ins2_64b:1;
+ uint64_t ins1_64b:1;
+ uint64_t ins0_64b:1;
+ uint64_t pci_wdis:1;
+ uint64_t wait_com:1;
+ uint64_t reserved_37_39:3;
+ uint64_t max_word:5;
+ uint64_t reserved_10_31:22;
+ uint64_t timer:10;
+#else
+ uint64_t timer:10;
+ uint64_t reserved_10_31:22;
+ uint64_t max_word:5;
+ uint64_t reserved_37_39:3;
+ uint64_t wait_com:1;
+ uint64_t pci_wdis:1;
+ uint64_t ins0_64b:1;
+ uint64_t ins1_64b:1;
+ uint64_t ins2_64b:1;
+ uint64_t ins3_64b:1;
+ uint64_t ins0_enb:1;
+ uint64_t ins1_enb:1;
+ uint64_t ins2_enb:1;
+ uint64_t ins3_enb:1;
+ uint64_t out0_enb:1;
+ uint64_t out1_enb:1;
+ uint64_t out2_enb:1;
+ uint64_t out3_enb:1;
+ uint64_t dis_pniw:1;
+ uint64_t chip_rev:8;
+ uint64_t reserved_63_63:1;
+#endif
+ } s;
+ struct cvmx_npi_ctl_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_63_63:1;
+ uint64_t chip_rev:8;
+ uint64_t dis_pniw:1;
+ uint64_t reserved_51_53:3;
+ uint64_t out0_enb:1;
+ uint64_t reserved_47_49:3;
+ uint64_t ins0_enb:1;
+ uint64_t reserved_43_45:3;
+ uint64_t ins0_64b:1;
+ uint64_t pci_wdis:1;
+ uint64_t wait_com:1;
+ uint64_t reserved_37_39:3;
+ uint64_t max_word:5;
+ uint64_t reserved_10_31:22;
+ uint64_t timer:10;
+#else
+ uint64_t timer:10;
+ uint64_t reserved_10_31:22;
+ uint64_t max_word:5;
+ uint64_t reserved_37_39:3;
+ uint64_t wait_com:1;
+ uint64_t pci_wdis:1;
+ uint64_t ins0_64b:1;
+ uint64_t reserved_43_45:3;
+ uint64_t ins0_enb:1;
+ uint64_t reserved_47_49:3;
+ uint64_t out0_enb:1;
+ uint64_t reserved_51_53:3;
+ uint64_t dis_pniw:1;
+ uint64_t chip_rev:8;
+ uint64_t reserved_63_63:1;
+#endif
+ } cn30xx;
+ struct cvmx_npi_ctl_status_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_63_63:1;
+ uint64_t chip_rev:8;
+ uint64_t dis_pniw:1;
+ uint64_t reserved_52_53:2;
+ uint64_t out1_enb:1;
+ uint64_t out0_enb:1;
+ uint64_t reserved_48_49:2;
+ uint64_t ins1_enb:1;
+ uint64_t ins0_enb:1;
+ uint64_t reserved_44_45:2;
+ uint64_t ins1_64b:1;
+ uint64_t ins0_64b:1;
+ uint64_t pci_wdis:1;
+ uint64_t wait_com:1;
+ uint64_t reserved_37_39:3;
+ uint64_t max_word:5;
+ uint64_t reserved_10_31:22;
+ uint64_t timer:10;
+#else
+ uint64_t timer:10;
+ uint64_t reserved_10_31:22;
+ uint64_t max_word:5;
+ uint64_t reserved_37_39:3;
+ uint64_t wait_com:1;
+ uint64_t pci_wdis:1;
+ uint64_t ins0_64b:1;
+ uint64_t ins1_64b:1;
+ uint64_t reserved_44_45:2;
+ uint64_t ins0_enb:1;
+ uint64_t ins1_enb:1;
+ uint64_t reserved_48_49:2;
+ uint64_t out0_enb:1;
+ uint64_t out1_enb:1;
+ uint64_t reserved_52_53:2;
+ uint64_t dis_pniw:1;
+ uint64_t chip_rev:8;
+ uint64_t reserved_63_63:1;
+#endif
+ } cn31xx;
+};
+
+union cvmx_npi_dbg_select {
+ uint64_t u64;
+ struct cvmx_npi_dbg_select_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t dbg_sel:16;
+#else
+ uint64_t dbg_sel:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_npi_dma_control {
+ uint64_t u64;
+ struct cvmx_npi_dma_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63:28;
+ uint64_t b0_lend:1;
+ uint64_t dwb_denb:1;
+ uint64_t dwb_ichk:9;
+ uint64_t fpa_que:3;
+ uint64_t o_add1:1;
+ uint64_t o_ro:1;
+ uint64_t o_ns:1;
+ uint64_t o_es:2;
+ uint64_t o_mode:1;
+ uint64_t hp_enb:1;
+ uint64_t lp_enb:1;
+ uint64_t csize:14;
+#else
+ uint64_t csize:14;
+ uint64_t lp_enb:1;
+ uint64_t hp_enb:1;
+ uint64_t o_mode:1;
+ uint64_t o_es:2;
+ uint64_t o_ns:1;
+ uint64_t o_ro:1;
+ uint64_t o_add1:1;
+ uint64_t fpa_que:3;
+ uint64_t dwb_ichk:9;
+ uint64_t dwb_denb:1;
+ uint64_t b0_lend:1;
+ uint64_t reserved_36_63:28;
+#endif
+ } s;
+};
+
+union cvmx_npi_dma_highp_counts {
+ uint64_t u64;
+ struct cvmx_npi_dma_highp_counts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63:25;
+ uint64_t fcnt:7;
+ uint64_t dbell:32;
+#else
+ uint64_t dbell:32;
+ uint64_t fcnt:7;
+ uint64_t reserved_39_63:25;
+#endif
+ } s;
+};
+
+union cvmx_npi_dma_highp_naddr {
+ uint64_t u64;
+ struct cvmx_npi_dma_highp_naddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t state:4;
+ uint64_t addr:36;
+#else
+ uint64_t addr:36;
+ uint64_t state:4;
+ uint64_t reserved_40_63:24;
+#endif
+ } s;
+};
+
+union cvmx_npi_dma_lowp_counts {
+ uint64_t u64;
+ struct cvmx_npi_dma_lowp_counts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63:25;
+ uint64_t fcnt:7;
+ uint64_t dbell:32;
+#else
+ uint64_t dbell:32;
+ uint64_t fcnt:7;
+ uint64_t reserved_39_63:25;
+#endif
+ } s;
+};
+
+union cvmx_npi_dma_lowp_naddr {
+ uint64_t u64;
+ struct cvmx_npi_dma_lowp_naddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t state:4;
+ uint64_t addr:36;
+#else
+ uint64_t addr:36;
+ uint64_t state:4;
+ uint64_t reserved_40_63:24;
+#endif
+ } s;
+};
+
+union cvmx_npi_highp_dbell {
+ uint64_t u64;
+ struct cvmx_npi_highp_dbell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t dbell:16;
+#else
+ uint64_t dbell:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_npi_highp_ibuff_saddr {
+ uint64_t u64;
+ struct cvmx_npi_highp_ibuff_saddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63:28;
+ uint64_t saddr:36;
+#else
+ uint64_t saddr:36;
+ uint64_t reserved_36_63:28;
+#endif
+ } s;
+};
+
+union cvmx_npi_input_control {
+ uint64_t u64;
+ struct cvmx_npi_input_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63:41;
+ uint64_t pkt_rr:1;
+ uint64_t pbp_dhi:13;
+ uint64_t d_nsr:1;
+ uint64_t d_esr:2;
+ uint64_t d_ror:1;
+ uint64_t use_csr:1;
+ uint64_t nsr:1;
+ uint64_t esr:2;
+ uint64_t ror:1;
+#else
+ uint64_t ror:1;
+ uint64_t esr:2;
+ uint64_t nsr:1;
+ uint64_t use_csr:1;
+ uint64_t d_ror:1;
+ uint64_t d_esr:2;
+ uint64_t d_nsr:1;
+ uint64_t pbp_dhi:13;
+ uint64_t pkt_rr:1;
+ uint64_t reserved_23_63:41;
+#endif
+ } s;
+ struct cvmx_npi_input_control_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63:42;
+ uint64_t pbp_dhi:13;
+ uint64_t d_nsr:1;
+ uint64_t d_esr:2;
+ uint64_t d_ror:1;
+ uint64_t use_csr:1;
+ uint64_t nsr:1;
+ uint64_t esr:2;
+ uint64_t ror:1;
+#else
+ uint64_t ror:1;
+ uint64_t esr:2;
+ uint64_t nsr:1;
+ uint64_t use_csr:1;
+ uint64_t d_ror:1;
+ uint64_t d_esr:2;
+ uint64_t d_nsr:1;
+ uint64_t pbp_dhi:13;
+ uint64_t reserved_22_63:42;
+#endif
+ } cn30xx;
+};
+
+union cvmx_npi_int_enb {
+ uint64_t u64;
+ struct cvmx_npi_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ uint64_t q1_a_f:1;
+ uint64_t q1_s_e:1;
+ uint64_t pdf_p_f:1;
+ uint64_t pdf_p_e:1;
+ uint64_t pcf_p_f:1;
+ uint64_t pcf_p_e:1;
+ uint64_t rdx_s_e:1;
+ uint64_t rwx_s_e:1;
+ uint64_t pnc_a_f:1;
+ uint64_t pnc_s_e:1;
+ uint64_t com_a_f:1;
+ uint64_t com_s_e:1;
+ uint64_t q3_a_f:1;
+ uint64_t q3_s_e:1;
+ uint64_t q2_a_f:1;
+ uint64_t q2_s_e:1;
+ uint64_t pcr_a_f:1;
+ uint64_t pcr_s_e:1;
+ uint64_t fcr_a_f:1;
+ uint64_t fcr_s_e:1;
+ uint64_t iobdma:1;
+ uint64_t p_dperr:1;
+ uint64_t win_rto:1;
+ uint64_t i3_pperr:1;
+ uint64_t i2_pperr:1;
+ uint64_t i1_pperr:1;
+ uint64_t i0_pperr:1;
+ uint64_t p3_ptout:1;
+ uint64_t p2_ptout:1;
+ uint64_t p1_ptout:1;
+ uint64_t p0_ptout:1;
+ uint64_t p3_pperr:1;
+ uint64_t p2_pperr:1;
+ uint64_t p1_pperr:1;
+ uint64_t p0_pperr:1;
+ uint64_t g3_rtout:1;
+ uint64_t g2_rtout:1;
+ uint64_t g1_rtout:1;
+ uint64_t g0_rtout:1;
+ uint64_t p3_perr:1;
+ uint64_t p2_perr:1;
+ uint64_t p1_perr:1;
+ uint64_t p0_perr:1;
+ uint64_t p3_rtout:1;
+ uint64_t p2_rtout:1;
+ uint64_t p1_rtout:1;
+ uint64_t p0_rtout:1;
+ uint64_t i3_overf:1;
+ uint64_t i2_overf:1;
+ uint64_t i1_overf:1;
+ uint64_t i0_overf:1;
+ uint64_t i3_rtout:1;
+ uint64_t i2_rtout:1;
+ uint64_t i1_rtout:1;
+ uint64_t i0_rtout:1;
+ uint64_t po3_2sml:1;
+ uint64_t po2_2sml:1;
+ uint64_t po1_2sml:1;
+ uint64_t po0_2sml:1;
+ uint64_t pci_rsl:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t pci_rsl:1;
+ uint64_t po0_2sml:1;
+ uint64_t po1_2sml:1;
+ uint64_t po2_2sml:1;
+ uint64_t po3_2sml:1;
+ uint64_t i0_rtout:1;
+ uint64_t i1_rtout:1;
+ uint64_t i2_rtout:1;
+ uint64_t i3_rtout:1;
+ uint64_t i0_overf:1;
+ uint64_t i1_overf:1;
+ uint64_t i2_overf:1;
+ uint64_t i3_overf:1;
+ uint64_t p0_rtout:1;
+ uint64_t p1_rtout:1;
+ uint64_t p2_rtout:1;
+ uint64_t p3_rtout:1;
+ uint64_t p0_perr:1;
+ uint64_t p1_perr:1;
+ uint64_t p2_perr:1;
+ uint64_t p3_perr:1;
+ uint64_t g0_rtout:1;
+ uint64_t g1_rtout:1;
+ uint64_t g2_rtout:1;
+ uint64_t g3_rtout:1;
+ uint64_t p0_pperr:1;
+ uint64_t p1_pperr:1;
+ uint64_t p2_pperr:1;
+ uint64_t p3_pperr:1;
+ uint64_t p0_ptout:1;
+ uint64_t p1_ptout:1;
+ uint64_t p2_ptout:1;
+ uint64_t p3_ptout:1;
+ uint64_t i0_pperr:1;
+ uint64_t i1_pperr:1;
+ uint64_t i2_pperr:1;
+ uint64_t i3_pperr:1;
+ uint64_t win_rto:1;
+ uint64_t p_dperr:1;
+ uint64_t iobdma:1;
+ uint64_t fcr_s_e:1;
+ uint64_t fcr_a_f:1;
+ uint64_t pcr_s_e:1;
+ uint64_t pcr_a_f:1;
+ uint64_t q2_s_e:1;
+ uint64_t q2_a_f:1;
+ uint64_t q3_s_e:1;
+ uint64_t q3_a_f:1;
+ uint64_t com_s_e:1;
+ uint64_t com_a_f:1;
+ uint64_t pnc_s_e:1;
+ uint64_t pnc_a_f:1;
+ uint64_t rwx_s_e:1;
+ uint64_t rdx_s_e:1;
+ uint64_t pcf_p_e:1;
+ uint64_t pcf_p_f:1;
+ uint64_t pdf_p_e:1;
+ uint64_t pdf_p_f:1;
+ uint64_t q1_s_e:1;
+ uint64_t q1_a_f:1;
+ uint64_t reserved_62_63:2;
+#endif
+ } s;
+ struct cvmx_npi_int_enb_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ uint64_t q1_a_f:1;
+ uint64_t q1_s_e:1;
+ uint64_t pdf_p_f:1;
+ uint64_t pdf_p_e:1;
+ uint64_t pcf_p_f:1;
+ uint64_t pcf_p_e:1;
+ uint64_t rdx_s_e:1;
+ uint64_t rwx_s_e:1;
+ uint64_t pnc_a_f:1;
+ uint64_t pnc_s_e:1;
+ uint64_t com_a_f:1;
+ uint64_t com_s_e:1;
+ uint64_t q3_a_f:1;
+ uint64_t q3_s_e:1;
+ uint64_t q2_a_f:1;
+ uint64_t q2_s_e:1;
+ uint64_t pcr_a_f:1;
+ uint64_t pcr_s_e:1;
+ uint64_t fcr_a_f:1;
+ uint64_t fcr_s_e:1;
+ uint64_t iobdma:1;
+ uint64_t p_dperr:1;
+ uint64_t win_rto:1;
+ uint64_t reserved_36_38:3;
+ uint64_t i0_pperr:1;
+ uint64_t reserved_32_34:3;
+ uint64_t p0_ptout:1;
+ uint64_t reserved_28_30:3;
+ uint64_t p0_pperr:1;
+ uint64_t reserved_24_26:3;
+ uint64_t g0_rtout:1;
+ uint64_t reserved_20_22:3;
+ uint64_t p0_perr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t p0_rtout:1;
+ uint64_t reserved_12_14:3;
+ uint64_t i0_overf:1;
+ uint64_t reserved_8_10:3;
+ uint64_t i0_rtout:1;
+ uint64_t reserved_4_6:3;
+ uint64_t po0_2sml:1;
+ uint64_t pci_rsl:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t pci_rsl:1;
+ uint64_t po0_2sml:1;
+ uint64_t reserved_4_6:3;
+ uint64_t i0_rtout:1;
+ uint64_t reserved_8_10:3;
+ uint64_t i0_overf:1;
+ uint64_t reserved_12_14:3;
+ uint64_t p0_rtout:1;
+ uint64_t reserved_16_18:3;
+ uint64_t p0_perr:1;
+ uint64_t reserved_20_22:3;
+ uint64_t g0_rtout:1;
+ uint64_t reserved_24_26:3;
+ uint64_t p0_pperr:1;
+ uint64_t reserved_28_30:3;
+ uint64_t p0_ptout:1;
+ uint64_t reserved_32_34:3;
+ uint64_t i0_pperr:1;
+ uint64_t reserved_36_38:3;
+ uint64_t win_rto:1;
+ uint64_t p_dperr:1;
+ uint64_t iobdma:1;
+ uint64_t fcr_s_e:1;
+ uint64_t fcr_a_f:1;
+ uint64_t pcr_s_e:1;
+ uint64_t pcr_a_f:1;
+ uint64_t q2_s_e:1;
+ uint64_t q2_a_f:1;
+ uint64_t q3_s_e:1;
+ uint64_t q3_a_f:1;
+ uint64_t com_s_e:1;
+ uint64_t com_a_f:1;
+ uint64_t pnc_s_e:1;
+ uint64_t pnc_a_f:1;
+ uint64_t rwx_s_e:1;
+ uint64_t rdx_s_e:1;
+ uint64_t pcf_p_e:1;
+ uint64_t pcf_p_f:1;
+ uint64_t pdf_p_e:1;
+ uint64_t pdf_p_f:1;
+ uint64_t q1_s_e:1;
+ uint64_t q1_a_f:1;
+ uint64_t reserved_62_63:2;
+#endif
+ } cn30xx;
+ struct cvmx_npi_int_enb_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ uint64_t q1_a_f:1;
+ uint64_t q1_s_e:1;
+ uint64_t pdf_p_f:1;
+ uint64_t pdf_p_e:1;
+ uint64_t pcf_p_f:1;
+ uint64_t pcf_p_e:1;
+ uint64_t rdx_s_e:1;
+ uint64_t rwx_s_e:1;
+ uint64_t pnc_a_f:1;
+ uint64_t pnc_s_e:1;
+ uint64_t com_a_f:1;
+ uint64_t com_s_e:1;
+ uint64_t q3_a_f:1;
+ uint64_t q3_s_e:1;
+ uint64_t q2_a_f:1;
+ uint64_t q2_s_e:1;
+ uint64_t pcr_a_f:1;
+ uint64_t pcr_s_e:1;
+ uint64_t fcr_a_f:1;
+ uint64_t fcr_s_e:1;
+ uint64_t iobdma:1;
+ uint64_t p_dperr:1;
+ uint64_t win_rto:1;
+ uint64_t reserved_37_38:2;
+ uint64_t i1_pperr:1;
+ uint64_t i0_pperr:1;
+ uint64_t reserved_33_34:2;
+ uint64_t p1_ptout:1;
+ uint64_t p0_ptout:1;
+ uint64_t reserved_29_30:2;
+ uint64_t p1_pperr:1;
+ uint64_t p0_pperr:1;
+ uint64_t reserved_25_26:2;
+ uint64_t g1_rtout:1;
+ uint64_t g0_rtout:1;
+ uint64_t reserved_21_22:2;
+ uint64_t p1_perr:1;
+ uint64_t p0_perr:1;
+ uint64_t reserved_17_18:2;
+ uint64_t p1_rtout:1;
+ uint64_t p0_rtout:1;
+ uint64_t reserved_13_14:2;
+ uint64_t i1_overf:1;
+ uint64_t i0_overf:1;
+ uint64_t reserved_9_10:2;
+ uint64_t i1_rtout:1;
+ uint64_t i0_rtout:1;
+ uint64_t reserved_5_6:2;
+ uint64_t po1_2sml:1;
+ uint64_t po0_2sml:1;
+ uint64_t pci_rsl:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t pci_rsl:1;
+ uint64_t po0_2sml:1;
+ uint64_t po1_2sml:1;
+ uint64_t reserved_5_6:2;
+ uint64_t i0_rtout:1;
+ uint64_t i1_rtout:1;
+ uint64_t reserved_9_10:2;
+ uint64_t i0_overf:1;
+ uint64_t i1_overf:1;
+ uint64_t reserved_13_14:2;
+ uint64_t p0_rtout:1;
+ uint64_t p1_rtout:1;
+ uint64_t reserved_17_18:2;
+ uint64_t p0_perr:1;
+ uint64_t p1_perr:1;
+ uint64_t reserved_21_22:2;
+ uint64_t g0_rtout:1;
+ uint64_t g1_rtout:1;
+ uint64_t reserved_25_26:2;
+ uint64_t p0_pperr:1;
+ uint64_t p1_pperr:1;
+ uint64_t reserved_29_30:2;
+ uint64_t p0_ptout:1;
+ uint64_t p1_ptout:1;
+ uint64_t reserved_33_34:2;
+ uint64_t i0_pperr:1;
+ uint64_t i1_pperr:1;
+ uint64_t reserved_37_38:2;
+ uint64_t win_rto:1;
+ uint64_t p_dperr:1;
+ uint64_t iobdma:1;
+ uint64_t fcr_s_e:1;
+ uint64_t fcr_a_f:1;
+ uint64_t pcr_s_e:1;
+ uint64_t pcr_a_f:1;
+ uint64_t q2_s_e:1;
+ uint64_t q2_a_f:1;
+ uint64_t q3_s_e:1;
+ uint64_t q3_a_f:1;
+ uint64_t com_s_e:1;
+ uint64_t com_a_f:1;
+ uint64_t pnc_s_e:1;
+ uint64_t pnc_a_f:1;
+ uint64_t rwx_s_e:1;
+ uint64_t rdx_s_e:1;
+ uint64_t pcf_p_e:1;
+ uint64_t pcf_p_f:1;
+ uint64_t pdf_p_e:1;
+ uint64_t pdf_p_f:1;
+ uint64_t q1_s_e:1;
+ uint64_t q1_a_f:1;
+ uint64_t reserved_62_63:2;
+#endif
+ } cn31xx;
+ struct cvmx_npi_int_enb_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63:22;
+ uint64_t iobdma:1;
+ uint64_t p_dperr:1;
+ uint64_t win_rto:1;
+ uint64_t i3_pperr:1;
+ uint64_t i2_pperr:1;
+ uint64_t i1_pperr:1;
+ uint64_t i0_pperr:1;
+ uint64_t p3_ptout:1;
+ uint64_t p2_ptout:1;
+ uint64_t p1_ptout:1;
+ uint64_t p0_ptout:1;
+ uint64_t p3_pperr:1;
+ uint64_t p2_pperr:1;
+ uint64_t p1_pperr:1;
+ uint64_t p0_pperr:1;
+ uint64_t g3_rtout:1;
+ uint64_t g2_rtout:1;
+ uint64_t g1_rtout:1;
+ uint64_t g0_rtout:1;
+ uint64_t p3_perr:1;
+ uint64_t p2_perr:1;
+ uint64_t p1_perr:1;
+ uint64_t p0_perr:1;
+ uint64_t p3_rtout:1;
+ uint64_t p2_rtout:1;
+ uint64_t p1_rtout:1;
+ uint64_t p0_rtout:1;
+ uint64_t i3_overf:1;
+ uint64_t i2_overf:1;
+ uint64_t i1_overf:1;
+ uint64_t i0_overf:1;
+ uint64_t i3_rtout:1;
+ uint64_t i2_rtout:1;
+ uint64_t i1_rtout:1;
+ uint64_t i0_rtout:1;
+ uint64_t po3_2sml:1;
+ uint64_t po2_2sml:1;
+ uint64_t po1_2sml:1;
+ uint64_t po0_2sml:1;
+ uint64_t pci_rsl:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t pci_rsl:1;
+ uint64_t po0_2sml:1;
+ uint64_t po1_2sml:1;
+ uint64_t po2_2sml:1;
+ uint64_t po3_2sml:1;
+ uint64_t i0_rtout:1;
+ uint64_t i1_rtout:1;
+ uint64_t i2_rtout:1;
+ uint64_t i3_rtout:1;
+ uint64_t i0_overf:1;
+ uint64_t i1_overf:1;
+ uint64_t i2_overf:1;
+ uint64_t i3_overf:1;
+ uint64_t p0_rtout:1;
+ uint64_t p1_rtout:1;
+ uint64_t p2_rtout:1;
+ uint64_t p3_rtout:1;
+ uint64_t p0_perr:1;
+ uint64_t p1_perr:1;
+ uint64_t p2_perr:1;
+ uint64_t p3_perr:1;
+ uint64_t g0_rtout:1;
+ uint64_t g1_rtout:1;
+ uint64_t g2_rtout:1;
+ uint64_t g3_rtout:1;
+ uint64_t p0_pperr:1;
+ uint64_t p1_pperr:1;
+ uint64_t p2_pperr:1;
+ uint64_t p3_pperr:1;
+ uint64_t p0_ptout:1;
+ uint64_t p1_ptout:1;
+ uint64_t p2_ptout:1;
+ uint64_t p3_ptout:1;
+ uint64_t i0_pperr:1;
+ uint64_t i1_pperr:1;
+ uint64_t i2_pperr:1;
+ uint64_t i3_pperr:1;
+ uint64_t win_rto:1;
+ uint64_t p_dperr:1;
+ uint64_t iobdma:1;
+ uint64_t reserved_42_63:22;
+#endif
+ } cn38xxp2;
+};
+
+union cvmx_npi_int_sum {
+ uint64_t u64;
+ struct cvmx_npi_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ uint64_t q1_a_f:1;
+ uint64_t q1_s_e:1;
+ uint64_t pdf_p_f:1;
+ uint64_t pdf_p_e:1;
+ uint64_t pcf_p_f:1;
+ uint64_t pcf_p_e:1;
+ uint64_t rdx_s_e:1;
+ uint64_t rwx_s_e:1;
+ uint64_t pnc_a_f:1;
+ uint64_t pnc_s_e:1;
+ uint64_t com_a_f:1;
+ uint64_t com_s_e:1;
+ uint64_t q3_a_f:1;
+ uint64_t q3_s_e:1;
+ uint64_t q2_a_f:1;
+ uint64_t q2_s_e:1;
+ uint64_t pcr_a_f:1;
+ uint64_t pcr_s_e:1;
+ uint64_t fcr_a_f:1;
+ uint64_t fcr_s_e:1;
+ uint64_t iobdma:1;
+ uint64_t p_dperr:1;
+ uint64_t win_rto:1;
+ uint64_t i3_pperr:1;
+ uint64_t i2_pperr:1;
+ uint64_t i1_pperr:1;
+ uint64_t i0_pperr:1;
+ uint64_t p3_ptout:1;
+ uint64_t p2_ptout:1;
+ uint64_t p1_ptout:1;
+ uint64_t p0_ptout:1;
+ uint64_t p3_pperr:1;
+ uint64_t p2_pperr:1;
+ uint64_t p1_pperr:1;
+ uint64_t p0_pperr:1;
+ uint64_t g3_rtout:1;
+ uint64_t g2_rtout:1;
+ uint64_t g1_rtout:1;
+ uint64_t g0_rtout:1;
+ uint64_t p3_perr:1;
+ uint64_t p2_perr:1;
+ uint64_t p1_perr:1;
+ uint64_t p0_perr:1;
+ uint64_t p3_rtout:1;
+ uint64_t p2_rtout:1;
+ uint64_t p1_rtout:1;
+ uint64_t p0_rtout:1;
+ uint64_t i3_overf:1;
+ uint64_t i2_overf:1;
+ uint64_t i1_overf:1;
+ uint64_t i0_overf:1;
+ uint64_t i3_rtout:1;
+ uint64_t i2_rtout:1;
+ uint64_t i1_rtout:1;
+ uint64_t i0_rtout:1;
+ uint64_t po3_2sml:1;
+ uint64_t po2_2sml:1;
+ uint64_t po1_2sml:1;
+ uint64_t po0_2sml:1;
+ uint64_t pci_rsl:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t pci_rsl:1;
+ uint64_t po0_2sml:1;
+ uint64_t po1_2sml:1;
+ uint64_t po2_2sml:1;
+ uint64_t po3_2sml:1;
+ uint64_t i0_rtout:1;
+ uint64_t i1_rtout:1;
+ uint64_t i2_rtout:1;
+ uint64_t i3_rtout:1;
+ uint64_t i0_overf:1;
+ uint64_t i1_overf:1;
+ uint64_t i2_overf:1;
+ uint64_t i3_overf:1;
+ uint64_t p0_rtout:1;
+ uint64_t p1_rtout:1;
+ uint64_t p2_rtout:1;
+ uint64_t p3_rtout:1;
+ uint64_t p0_perr:1;
+ uint64_t p1_perr:1;
+ uint64_t p2_perr:1;
+ uint64_t p3_perr:1;
+ uint64_t g0_rtout:1;
+ uint64_t g1_rtout:1;
+ uint64_t g2_rtout:1;
+ uint64_t g3_rtout:1;
+ uint64_t p0_pperr:1;
+ uint64_t p1_pperr:1;
+ uint64_t p2_pperr:1;
+ uint64_t p3_pperr:1;
+ uint64_t p0_ptout:1;
+ uint64_t p1_ptout:1;
+ uint64_t p2_ptout:1;
+ uint64_t p3_ptout:1;
+ uint64_t i0_pperr:1;
+ uint64_t i1_pperr:1;
+ uint64_t i2_pperr:1;
+ uint64_t i3_pperr:1;
+ uint64_t win_rto:1;
+ uint64_t p_dperr:1;
+ uint64_t iobdma:1;
+ uint64_t fcr_s_e:1;
+ uint64_t fcr_a_f:1;
+ uint64_t pcr_s_e:1;
+ uint64_t pcr_a_f:1;
+ uint64_t q2_s_e:1;
+ uint64_t q2_a_f:1;
+ uint64_t q3_s_e:1;
+ uint64_t q3_a_f:1;
+ uint64_t com_s_e:1;
+ uint64_t com_a_f:1;
+ uint64_t pnc_s_e:1;
+ uint64_t pnc_a_f:1;
+ uint64_t rwx_s_e:1;
+ uint64_t rdx_s_e:1;
+ uint64_t pcf_p_e:1;
+ uint64_t pcf_p_f:1;
+ uint64_t pdf_p_e:1;
+ uint64_t pdf_p_f:1;
+ uint64_t q1_s_e:1;
+ uint64_t q1_a_f:1;
+ uint64_t reserved_62_63:2;
+#endif
+ } s;
+ struct cvmx_npi_int_sum_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ uint64_t q1_a_f:1;
+ uint64_t q1_s_e:1;
+ uint64_t pdf_p_f:1;
+ uint64_t pdf_p_e:1;
+ uint64_t pcf_p_f:1;
+ uint64_t pcf_p_e:1;
+ uint64_t rdx_s_e:1;
+ uint64_t rwx_s_e:1;
+ uint64_t pnc_a_f:1;
+ uint64_t pnc_s_e:1;
+ uint64_t com_a_f:1;
+ uint64_t com_s_e:1;
+ uint64_t q3_a_f:1;
+ uint64_t q3_s_e:1;
+ uint64_t q2_a_f:1;
+ uint64_t q2_s_e:1;
+ uint64_t pcr_a_f:1;
+ uint64_t pcr_s_e:1;
+ uint64_t fcr_a_f:1;
+ uint64_t fcr_s_e:1;
+ uint64_t iobdma:1;
+ uint64_t p_dperr:1;
+ uint64_t win_rto:1;
+ uint64_t reserved_36_38:3;
+ uint64_t i0_pperr:1;
+ uint64_t reserved_32_34:3;
+ uint64_t p0_ptout:1;
+ uint64_t reserved_28_30:3;
+ uint64_t p0_pperr:1;
+ uint64_t reserved_24_26:3;
+ uint64_t g0_rtout:1;
+ uint64_t reserved_20_22:3;
+ uint64_t p0_perr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t p0_rtout:1;
+ uint64_t reserved_12_14:3;
+ uint64_t i0_overf:1;
+ uint64_t reserved_8_10:3;
+ uint64_t i0_rtout:1;
+ uint64_t reserved_4_6:3;
+ uint64_t po0_2sml:1;
+ uint64_t pci_rsl:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t pci_rsl:1;
+ uint64_t po0_2sml:1;
+ uint64_t reserved_4_6:3;
+ uint64_t i0_rtout:1;
+ uint64_t reserved_8_10:3;
+ uint64_t i0_overf:1;
+ uint64_t reserved_12_14:3;
+ uint64_t p0_rtout:1;
+ uint64_t reserved_16_18:3;
+ uint64_t p0_perr:1;
+ uint64_t reserved_20_22:3;
+ uint64_t g0_rtout:1;
+ uint64_t reserved_24_26:3;
+ uint64_t p0_pperr:1;
+ uint64_t reserved_28_30:3;
+ uint64_t p0_ptout:1;
+ uint64_t reserved_32_34:3;
+ uint64_t i0_pperr:1;
+ uint64_t reserved_36_38:3;
+ uint64_t win_rto:1;
+ uint64_t p_dperr:1;
+ uint64_t iobdma:1;
+ uint64_t fcr_s_e:1;
+ uint64_t fcr_a_f:1;
+ uint64_t pcr_s_e:1;
+ uint64_t pcr_a_f:1;
+ uint64_t q2_s_e:1;
+ uint64_t q2_a_f:1;
+ uint64_t q3_s_e:1;
+ uint64_t q3_a_f:1;
+ uint64_t com_s_e:1;
+ uint64_t com_a_f:1;
+ uint64_t pnc_s_e:1;
+ uint64_t pnc_a_f:1;
+ uint64_t rwx_s_e:1;
+ uint64_t rdx_s_e:1;
+ uint64_t pcf_p_e:1;
+ uint64_t pcf_p_f:1;
+ uint64_t pdf_p_e:1;
+ uint64_t pdf_p_f:1;
+ uint64_t q1_s_e:1;
+ uint64_t q1_a_f:1;
+ uint64_t reserved_62_63:2;
+#endif
+ } cn30xx;
+ struct cvmx_npi_int_sum_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ uint64_t q1_a_f:1;
+ uint64_t q1_s_e:1;
+ uint64_t pdf_p_f:1;
+ uint64_t pdf_p_e:1;
+ uint64_t pcf_p_f:1;
+ uint64_t pcf_p_e:1;
+ uint64_t rdx_s_e:1;
+ uint64_t rwx_s_e:1;
+ uint64_t pnc_a_f:1;
+ uint64_t pnc_s_e:1;
+ uint64_t com_a_f:1;
+ uint64_t com_s_e:1;
+ uint64_t q3_a_f:1;
+ uint64_t q3_s_e:1;
+ uint64_t q2_a_f:1;
+ uint64_t q2_s_e:1;
+ uint64_t pcr_a_f:1;
+ uint64_t pcr_s_e:1;
+ uint64_t fcr_a_f:1;
+ uint64_t fcr_s_e:1;
+ uint64_t iobdma:1;
+ uint64_t p_dperr:1;
+ uint64_t win_rto:1;
+ uint64_t reserved_37_38:2;
+ uint64_t i1_pperr:1;
+ uint64_t i0_pperr:1;
+ uint64_t reserved_33_34:2;
+ uint64_t p1_ptout:1;
+ uint64_t p0_ptout:1;
+ uint64_t reserved_29_30:2;
+ uint64_t p1_pperr:1;
+ uint64_t p0_pperr:1;
+ uint64_t reserved_25_26:2;
+ uint64_t g1_rtout:1;
+ uint64_t g0_rtout:1;
+ uint64_t reserved_21_22:2;
+ uint64_t p1_perr:1;
+ uint64_t p0_perr:1;
+ uint64_t reserved_17_18:2;
+ uint64_t p1_rtout:1;
+ uint64_t p0_rtout:1;
+ uint64_t reserved_13_14:2;
+ uint64_t i1_overf:1;
+ uint64_t i0_overf:1;
+ uint64_t reserved_9_10:2;
+ uint64_t i1_rtout:1;
+ uint64_t i0_rtout:1;
+ uint64_t reserved_5_6:2;
+ uint64_t po1_2sml:1;
+ uint64_t po0_2sml:1;
+ uint64_t pci_rsl:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t pci_rsl:1;
+ uint64_t po0_2sml:1;
+ uint64_t po1_2sml:1;
+ uint64_t reserved_5_6:2;
+ uint64_t i0_rtout:1;
+ uint64_t i1_rtout:1;
+ uint64_t reserved_9_10:2;
+ uint64_t i0_overf:1;
+ uint64_t i1_overf:1;
+ uint64_t reserved_13_14:2;
+ uint64_t p0_rtout:1;
+ uint64_t p1_rtout:1;
+ uint64_t reserved_17_18:2;
+ uint64_t p0_perr:1;
+ uint64_t p1_perr:1;
+ uint64_t reserved_21_22:2;
+ uint64_t g0_rtout:1;
+ uint64_t g1_rtout:1;
+ uint64_t reserved_25_26:2;
+ uint64_t p0_pperr:1;
+ uint64_t p1_pperr:1;
+ uint64_t reserved_29_30:2;
+ uint64_t p0_ptout:1;
+ uint64_t p1_ptout:1;
+ uint64_t reserved_33_34:2;
+ uint64_t i0_pperr:1;
+ uint64_t i1_pperr:1;
+ uint64_t reserved_37_38:2;
+ uint64_t win_rto:1;
+ uint64_t p_dperr:1;
+ uint64_t iobdma:1;
+ uint64_t fcr_s_e:1;
+ uint64_t fcr_a_f:1;
+ uint64_t pcr_s_e:1;
+ uint64_t pcr_a_f:1;
+ uint64_t q2_s_e:1;
+ uint64_t q2_a_f:1;
+ uint64_t q3_s_e:1;
+ uint64_t q3_a_f:1;
+ uint64_t com_s_e:1;
+ uint64_t com_a_f:1;
+ uint64_t pnc_s_e:1;
+ uint64_t pnc_a_f:1;
+ uint64_t rwx_s_e:1;
+ uint64_t rdx_s_e:1;
+ uint64_t pcf_p_e:1;
+ uint64_t pcf_p_f:1;
+ uint64_t pdf_p_e:1;
+ uint64_t pdf_p_f:1;
+ uint64_t q1_s_e:1;
+ uint64_t q1_a_f:1;
+ uint64_t reserved_62_63:2;
+#endif
+ } cn31xx;
+ struct cvmx_npi_int_sum_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63:22;
+ uint64_t iobdma:1;
+ uint64_t p_dperr:1;
+ uint64_t win_rto:1;
+ uint64_t i3_pperr:1;
+ uint64_t i2_pperr:1;
+ uint64_t i1_pperr:1;
+ uint64_t i0_pperr:1;
+ uint64_t p3_ptout:1;
+ uint64_t p2_ptout:1;
+ uint64_t p1_ptout:1;
+ uint64_t p0_ptout:1;
+ uint64_t p3_pperr:1;
+ uint64_t p2_pperr:1;
+ uint64_t p1_pperr:1;
+ uint64_t p0_pperr:1;
+ uint64_t g3_rtout:1;
+ uint64_t g2_rtout:1;
+ uint64_t g1_rtout:1;
+ uint64_t g0_rtout:1;
+ uint64_t p3_perr:1;
+ uint64_t p2_perr:1;
+ uint64_t p1_perr:1;
+ uint64_t p0_perr:1;
+ uint64_t p3_rtout:1;
+ uint64_t p2_rtout:1;
+ uint64_t p1_rtout:1;
+ uint64_t p0_rtout:1;
+ uint64_t i3_overf:1;
+ uint64_t i2_overf:1;
+ uint64_t i1_overf:1;
+ uint64_t i0_overf:1;
+ uint64_t i3_rtout:1;
+ uint64_t i2_rtout:1;
+ uint64_t i1_rtout:1;
+ uint64_t i0_rtout:1;
+ uint64_t po3_2sml:1;
+ uint64_t po2_2sml:1;
+ uint64_t po1_2sml:1;
+ uint64_t po0_2sml:1;
+ uint64_t pci_rsl:1;
+ uint64_t rml_wto:1;
+ uint64_t rml_rto:1;
+#else
+ uint64_t rml_rto:1;
+ uint64_t rml_wto:1;
+ uint64_t pci_rsl:1;
+ uint64_t po0_2sml:1;
+ uint64_t po1_2sml:1;
+ uint64_t po2_2sml:1;
+ uint64_t po3_2sml:1;
+ uint64_t i0_rtout:1;
+ uint64_t i1_rtout:1;
+ uint64_t i2_rtout:1;
+ uint64_t i3_rtout:1;
+ uint64_t i0_overf:1;
+ uint64_t i1_overf:1;
+ uint64_t i2_overf:1;
+ uint64_t i3_overf:1;
+ uint64_t p0_rtout:1;
+ uint64_t p1_rtout:1;
+ uint64_t p2_rtout:1;
+ uint64_t p3_rtout:1;
+ uint64_t p0_perr:1;
+ uint64_t p1_perr:1;
+ uint64_t p2_perr:1;
+ uint64_t p3_perr:1;
+ uint64_t g0_rtout:1;
+ uint64_t g1_rtout:1;
+ uint64_t g2_rtout:1;
+ uint64_t g3_rtout:1;
+ uint64_t p0_pperr:1;
+ uint64_t p1_pperr:1;
+ uint64_t p2_pperr:1;
+ uint64_t p3_pperr:1;
+ uint64_t p0_ptout:1;
+ uint64_t p1_ptout:1;
+ uint64_t p2_ptout:1;
+ uint64_t p3_ptout:1;
+ uint64_t i0_pperr:1;
+ uint64_t i1_pperr:1;
+ uint64_t i2_pperr:1;
+ uint64_t i3_pperr:1;
+ uint64_t win_rto:1;
+ uint64_t p_dperr:1;
+ uint64_t iobdma:1;
+ uint64_t reserved_42_63:22;
+#endif
+ } cn38xxp2;
+};
+
+union cvmx_npi_lowp_dbell {
+ uint64_t u64;
+ struct cvmx_npi_lowp_dbell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t dbell:16;
+#else
+ uint64_t dbell:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_npi_lowp_ibuff_saddr {
+ uint64_t u64;
+ struct cvmx_npi_lowp_ibuff_saddr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63:28;
+ uint64_t saddr:36;
+#else
+ uint64_t saddr:36;
+ uint64_t reserved_36_63:28;
+#endif
+ } s;
+};
+
+union cvmx_npi_mem_access_subidx {
+ uint64_t u64;
+ struct cvmx_npi_mem_access_subidx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63:26;
+ uint64_t shortl:1;
+ uint64_t nmerge:1;
+ uint64_t esr:2;
+ uint64_t esw:2;
+ uint64_t nsr:1;
+ uint64_t nsw:1;
+ uint64_t ror:1;
+ uint64_t row:1;
+ uint64_t ba:28;
+#else
+ uint64_t ba:28;
+ uint64_t row:1;
+ uint64_t ror:1;
+ uint64_t nsw:1;
+ uint64_t nsr:1;
+ uint64_t esw:2;
+ uint64_t esr:2;
+ uint64_t nmerge:1;
+ uint64_t shortl:1;
+ uint64_t reserved_38_63:26;
+#endif
+ } s;
+ struct cvmx_npi_mem_access_subidx_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63:28;
+ uint64_t esr:2;
+ uint64_t esw:2;
+ uint64_t nsr:1;
+ uint64_t nsw:1;
+ uint64_t ror:1;
+ uint64_t row:1;
+ uint64_t ba:28;
+#else
+ uint64_t ba:28;
+ uint64_t row:1;
+ uint64_t ror:1;
+ uint64_t nsw:1;
+ uint64_t nsr:1;
+ uint64_t esw:2;
+ uint64_t esr:2;
+ uint64_t reserved_36_63:28;
+#endif
+ } cn31xx;
+};
+
+union cvmx_npi_msi_rcv {
+ uint64_t u64;
+ struct cvmx_npi_msi_rcv_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t int_vec:64;
+#else
+ uint64_t int_vec:64;
+#endif
+ } s;
+};
+
+union cvmx_npi_num_desc_outputx {
+ uint64_t u64;
+ struct cvmx_npi_num_desc_outputx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t size:32;
+#else
+ uint64_t size:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npi_output_control {
+ uint64_t u64;
+ struct cvmx_npi_output_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63:15;
+ uint64_t pkt_rr:1;
+ uint64_t p3_bmode:1;
+ uint64_t p2_bmode:1;
+ uint64_t p1_bmode:1;
+ uint64_t p0_bmode:1;
+ uint64_t o3_es:2;
+ uint64_t o3_ns:1;
+ uint64_t o3_ro:1;
+ uint64_t o2_es:2;
+ uint64_t o2_ns:1;
+ uint64_t o2_ro:1;
+ uint64_t o1_es:2;
+ uint64_t o1_ns:1;
+ uint64_t o1_ro:1;
+ uint64_t o0_es:2;
+ uint64_t o0_ns:1;
+ uint64_t o0_ro:1;
+ uint64_t o3_csrm:1;
+ uint64_t o2_csrm:1;
+ uint64_t o1_csrm:1;
+ uint64_t o0_csrm:1;
+ uint64_t reserved_20_23:4;
+ uint64_t iptr_o3:1;
+ uint64_t iptr_o2:1;
+ uint64_t iptr_o1:1;
+ uint64_t iptr_o0:1;
+ uint64_t esr_sl3:2;
+ uint64_t nsr_sl3:1;
+ uint64_t ror_sl3:1;
+ uint64_t esr_sl2:2;
+ uint64_t nsr_sl2:1;
+ uint64_t ror_sl2:1;
+ uint64_t esr_sl1:2;
+ uint64_t nsr_sl1:1;
+ uint64_t ror_sl1:1;
+ uint64_t esr_sl0:2;
+ uint64_t nsr_sl0:1;
+ uint64_t ror_sl0:1;
+#else
+ uint64_t ror_sl0:1;
+ uint64_t nsr_sl0:1;
+ uint64_t esr_sl0:2;
+ uint64_t ror_sl1:1;
+ uint64_t nsr_sl1:1;
+ uint64_t esr_sl1:2;
+ uint64_t ror_sl2:1;
+ uint64_t nsr_sl2:1;
+ uint64_t esr_sl2:2;
+ uint64_t ror_sl3:1;
+ uint64_t nsr_sl3:1;
+ uint64_t esr_sl3:2;
+ uint64_t iptr_o0:1;
+ uint64_t iptr_o1:1;
+ uint64_t iptr_o2:1;
+ uint64_t iptr_o3:1;
+ uint64_t reserved_20_23:4;
+ uint64_t o0_csrm:1;
+ uint64_t o1_csrm:1;
+ uint64_t o2_csrm:1;
+ uint64_t o3_csrm:1;
+ uint64_t o0_ro:1;
+ uint64_t o0_ns:1;
+ uint64_t o0_es:2;
+ uint64_t o1_ro:1;
+ uint64_t o1_ns:1;
+ uint64_t o1_es:2;
+ uint64_t o2_ro:1;
+ uint64_t o2_ns:1;
+ uint64_t o2_es:2;
+ uint64_t o3_ro:1;
+ uint64_t o3_ns:1;
+ uint64_t o3_es:2;
+ uint64_t p0_bmode:1;
+ uint64_t p1_bmode:1;
+ uint64_t p2_bmode:1;
+ uint64_t p3_bmode:1;
+ uint64_t pkt_rr:1;
+ uint64_t reserved_49_63:15;
+#endif
+ } s;
+ struct cvmx_npi_output_control_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_45_63:19;
+ uint64_t p0_bmode:1;
+ uint64_t reserved_32_43:12;
+ uint64_t o0_es:2;
+ uint64_t o0_ns:1;
+ uint64_t o0_ro:1;
+ uint64_t reserved_25_27:3;
+ uint64_t o0_csrm:1;
+ uint64_t reserved_17_23:7;
+ uint64_t iptr_o0:1;
+ uint64_t reserved_4_15:12;
+ uint64_t esr_sl0:2;
+ uint64_t nsr_sl0:1;
+ uint64_t ror_sl0:1;
+#else
+ uint64_t ror_sl0:1;
+ uint64_t nsr_sl0:1;
+ uint64_t esr_sl0:2;
+ uint64_t reserved_4_15:12;
+ uint64_t iptr_o0:1;
+ uint64_t reserved_17_23:7;
+ uint64_t o0_csrm:1;
+ uint64_t reserved_25_27:3;
+ uint64_t o0_ro:1;
+ uint64_t o0_ns:1;
+ uint64_t o0_es:2;
+ uint64_t reserved_32_43:12;
+ uint64_t p0_bmode:1;
+ uint64_t reserved_45_63:19;
+#endif
+ } cn30xx;
+ struct cvmx_npi_output_control_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63:18;
+ uint64_t p1_bmode:1;
+ uint64_t p0_bmode:1;
+ uint64_t reserved_36_43:8;
+ uint64_t o1_es:2;
+ uint64_t o1_ns:1;
+ uint64_t o1_ro:1;
+ uint64_t o0_es:2;
+ uint64_t o0_ns:1;
+ uint64_t o0_ro:1;
+ uint64_t reserved_26_27:2;
+ uint64_t o1_csrm:1;
+ uint64_t o0_csrm:1;
+ uint64_t reserved_18_23:6;
+ uint64_t iptr_o1:1;
+ uint64_t iptr_o0:1;
+ uint64_t reserved_8_15:8;
+ uint64_t esr_sl1:2;
+ uint64_t nsr_sl1:1;
+ uint64_t ror_sl1:1;
+ uint64_t esr_sl0:2;
+ uint64_t nsr_sl0:1;
+ uint64_t ror_sl0:1;
+#else
+ uint64_t ror_sl0:1;
+ uint64_t nsr_sl0:1;
+ uint64_t esr_sl0:2;
+ uint64_t ror_sl1:1;
+ uint64_t nsr_sl1:1;
+ uint64_t esr_sl1:2;
+ uint64_t reserved_8_15:8;
+ uint64_t iptr_o0:1;
+ uint64_t iptr_o1:1;
+ uint64_t reserved_18_23:6;
+ uint64_t o0_csrm:1;
+ uint64_t o1_csrm:1;
+ uint64_t reserved_26_27:2;
+ uint64_t o0_ro:1;
+ uint64_t o0_ns:1;
+ uint64_t o0_es:2;
+ uint64_t o1_ro:1;
+ uint64_t o1_ns:1;
+ uint64_t o1_es:2;
+ uint64_t reserved_36_43:8;
+ uint64_t p0_bmode:1;
+ uint64_t p1_bmode:1;
+ uint64_t reserved_46_63:18;
+#endif
+ } cn31xx;
+ struct cvmx_npi_output_control_cn38xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t p3_bmode:1;
+ uint64_t p2_bmode:1;
+ uint64_t p1_bmode:1;
+ uint64_t p0_bmode:1;
+ uint64_t o3_es:2;
+ uint64_t o3_ns:1;
+ uint64_t o3_ro:1;
+ uint64_t o2_es:2;
+ uint64_t o2_ns:1;
+ uint64_t o2_ro:1;
+ uint64_t o1_es:2;
+ uint64_t o1_ns:1;
+ uint64_t o1_ro:1;
+ uint64_t o0_es:2;
+ uint64_t o0_ns:1;
+ uint64_t o0_ro:1;
+ uint64_t o3_csrm:1;
+ uint64_t o2_csrm:1;
+ uint64_t o1_csrm:1;
+ uint64_t o0_csrm:1;
+ uint64_t reserved_20_23:4;
+ uint64_t iptr_o3:1;
+ uint64_t iptr_o2:1;
+ uint64_t iptr_o1:1;
+ uint64_t iptr_o0:1;
+ uint64_t esr_sl3:2;
+ uint64_t nsr_sl3:1;
+ uint64_t ror_sl3:1;
+ uint64_t esr_sl2:2;
+ uint64_t nsr_sl2:1;
+ uint64_t ror_sl2:1;
+ uint64_t esr_sl1:2;
+ uint64_t nsr_sl1:1;
+ uint64_t ror_sl1:1;
+ uint64_t esr_sl0:2;
+ uint64_t nsr_sl0:1;
+ uint64_t ror_sl0:1;
+#else
+ uint64_t ror_sl0:1;
+ uint64_t nsr_sl0:1;
+ uint64_t esr_sl0:2;
+ uint64_t ror_sl1:1;
+ uint64_t nsr_sl1:1;
+ uint64_t esr_sl1:2;
+ uint64_t ror_sl2:1;
+ uint64_t nsr_sl2:1;
+ uint64_t esr_sl2:2;
+ uint64_t ror_sl3:1;
+ uint64_t nsr_sl3:1;
+ uint64_t esr_sl3:2;
+ uint64_t iptr_o0:1;
+ uint64_t iptr_o1:1;
+ uint64_t iptr_o2:1;
+ uint64_t iptr_o3:1;
+ uint64_t reserved_20_23:4;
+ uint64_t o0_csrm:1;
+ uint64_t o1_csrm:1;
+ uint64_t o2_csrm:1;
+ uint64_t o3_csrm:1;
+ uint64_t o0_ro:1;
+ uint64_t o0_ns:1;
+ uint64_t o0_es:2;
+ uint64_t o1_ro:1;
+ uint64_t o1_ns:1;
+ uint64_t o1_es:2;
+ uint64_t o2_ro:1;
+ uint64_t o2_ns:1;
+ uint64_t o2_es:2;
+ uint64_t o3_ro:1;
+ uint64_t o3_ns:1;
+ uint64_t o3_es:2;
+ uint64_t p0_bmode:1;
+ uint64_t p1_bmode:1;
+ uint64_t p2_bmode:1;
+ uint64_t p3_bmode:1;
+ uint64_t reserved_48_63:16;
+#endif
+ } cn38xxp2;
+ struct cvmx_npi_output_control_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63:15;
+ uint64_t pkt_rr:1;
+ uint64_t reserved_46_47:2;
+ uint64_t p1_bmode:1;
+ uint64_t p0_bmode:1;
+ uint64_t reserved_36_43:8;
+ uint64_t o1_es:2;
+ uint64_t o1_ns:1;
+ uint64_t o1_ro:1;
+ uint64_t o0_es:2;
+ uint64_t o0_ns:1;
+ uint64_t o0_ro:1;
+ uint64_t reserved_26_27:2;
+ uint64_t o1_csrm:1;
+ uint64_t o0_csrm:1;
+ uint64_t reserved_18_23:6;
+ uint64_t iptr_o1:1;
+ uint64_t iptr_o0:1;
+ uint64_t reserved_8_15:8;
+ uint64_t esr_sl1:2;
+ uint64_t nsr_sl1:1;
+ uint64_t ror_sl1:1;
+ uint64_t esr_sl0:2;
+ uint64_t nsr_sl0:1;
+ uint64_t ror_sl0:1;
+#else
+ uint64_t ror_sl0:1;
+ uint64_t nsr_sl0:1;
+ uint64_t esr_sl0:2;
+ uint64_t ror_sl1:1;
+ uint64_t nsr_sl1:1;
+ uint64_t esr_sl1:2;
+ uint64_t reserved_8_15:8;
+ uint64_t iptr_o0:1;
+ uint64_t iptr_o1:1;
+ uint64_t reserved_18_23:6;
+ uint64_t o0_csrm:1;
+ uint64_t o1_csrm:1;
+ uint64_t reserved_26_27:2;
+ uint64_t o0_ro:1;
+ uint64_t o0_ns:1;
+ uint64_t o0_es:2;
+ uint64_t o1_ro:1;
+ uint64_t o1_ns:1;
+ uint64_t o1_es:2;
+ uint64_t reserved_36_43:8;
+ uint64_t p0_bmode:1;
+ uint64_t p1_bmode:1;
+ uint64_t reserved_46_47:2;
+ uint64_t pkt_rr:1;
+ uint64_t reserved_49_63:15;
+#endif
+ } cn50xx;
+};
+
+union cvmx_npi_px_dbpair_addr {
+ uint64_t u64;
+ struct cvmx_npi_px_dbpair_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_63_63:1;
+ uint64_t state:2;
+ uint64_t naddr:61;
+#else
+ uint64_t naddr:61;
+ uint64_t state:2;
+ uint64_t reserved_63_63:1;
+#endif
+ } s;
+};
+
+union cvmx_npi_px_instr_addr {
+ uint64_t u64;
+ struct cvmx_npi_px_instr_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t state:3;
+ uint64_t naddr:61;
+#else
+ uint64_t naddr:61;
+ uint64_t state:3;
+#endif
+ } s;
+};
+
+union cvmx_npi_px_instr_cnts {
+ uint64_t u64;
+ struct cvmx_npi_px_instr_cnts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63:26;
+ uint64_t fcnt:6;
+ uint64_t avail:32;
+#else
+ uint64_t avail:32;
+ uint64_t fcnt:6;
+ uint64_t reserved_38_63:26;
+#endif
+ } s;
+};
+
+union cvmx_npi_px_pair_cnts {
+ uint64_t u64;
+ struct cvmx_npi_px_pair_cnts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63:27;
+ uint64_t fcnt:5;
+ uint64_t avail:32;
+#else
+ uint64_t avail:32;
+ uint64_t fcnt:5;
+ uint64_t reserved_37_63:27;
+#endif
+ } s;
+};
+
+union cvmx_npi_pci_burst_size {
+ uint64_t u64;
+ struct cvmx_npi_pci_burst_size_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t wr_brst:7;
+ uint64_t rd_brst:7;
+#else
+ uint64_t rd_brst:7;
+ uint64_t wr_brst:7;
+ uint64_t reserved_14_63:50;
+#endif
+ } s;
+};
+
+union cvmx_npi_pci_int_arb_cfg {
+ uint64_t u64;
+ struct cvmx_npi_pci_int_arb_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t hostmode:1;
+ uint64_t pci_ovr:4;
+ uint64_t reserved_5_7:3;
+ uint64_t en:1;
+ uint64_t park_mod:1;
+ uint64_t park_dev:3;
+#else
+ uint64_t park_dev:3;
+ uint64_t park_mod:1;
+ uint64_t en:1;
+ uint64_t reserved_5_7:3;
+ uint64_t pci_ovr:4;
+ uint64_t hostmode:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+ struct cvmx_npi_pci_int_arb_cfg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t en:1;
+ uint64_t park_mod:1;
+ uint64_t park_dev:3;
+#else
+ uint64_t park_dev:3;
+ uint64_t park_mod:1;
+ uint64_t en:1;
+ uint64_t reserved_5_63:59;
+#endif
+ } cn30xx;
+};
+
+union cvmx_npi_pci_read_cmd {
+ uint64_t u64;
+ struct cvmx_npi_pci_read_cmd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t cmd_size:11;
+#else
+ uint64_t cmd_size:11;
+ uint64_t reserved_11_63:53;
+#endif
+ } s;
+};
+
+union cvmx_npi_port32_instr_hdr {
+ uint64_t u64;
+ struct cvmx_npi_port32_instr_hdr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t pbp:1;
+ uint64_t rsv_f:5;
+ uint64_t rparmode:2;
+ uint64_t rsv_e:1;
+ uint64_t rskp_len:7;
+ uint64_t rsv_d:6;
+ uint64_t use_ihdr:1;
+ uint64_t rsv_c:5;
+ uint64_t par_mode:2;
+ uint64_t rsv_b:1;
+ uint64_t skp_len:7;
+ uint64_t rsv_a:6;
+#else
+ uint64_t rsv_a:6;
+ uint64_t skp_len:7;
+ uint64_t rsv_b:1;
+ uint64_t par_mode:2;
+ uint64_t rsv_c:5;
+ uint64_t use_ihdr:1;
+ uint64_t rsv_d:6;
+ uint64_t rskp_len:7;
+ uint64_t rsv_e:1;
+ uint64_t rparmode:2;
+ uint64_t rsv_f:5;
+ uint64_t pbp:1;
+ uint64_t reserved_44_63:20;
+#endif
+ } s;
+};
+
+union cvmx_npi_port33_instr_hdr {
+ uint64_t u64;
+ struct cvmx_npi_port33_instr_hdr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t pbp:1;
+ uint64_t rsv_f:5;
+ uint64_t rparmode:2;
+ uint64_t rsv_e:1;
+ uint64_t rskp_len:7;
+ uint64_t rsv_d:6;
+ uint64_t use_ihdr:1;
+ uint64_t rsv_c:5;
+ uint64_t par_mode:2;
+ uint64_t rsv_b:1;
+ uint64_t skp_len:7;
+ uint64_t rsv_a:6;
+#else
+ uint64_t rsv_a:6;
+ uint64_t skp_len:7;
+ uint64_t rsv_b:1;
+ uint64_t par_mode:2;
+ uint64_t rsv_c:5;
+ uint64_t use_ihdr:1;
+ uint64_t rsv_d:6;
+ uint64_t rskp_len:7;
+ uint64_t rsv_e:1;
+ uint64_t rparmode:2;
+ uint64_t rsv_f:5;
+ uint64_t pbp:1;
+ uint64_t reserved_44_63:20;
+#endif
+ } s;
+};
+
+union cvmx_npi_port34_instr_hdr {
+ uint64_t u64;
+ struct cvmx_npi_port34_instr_hdr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t pbp:1;
+ uint64_t rsv_f:5;
+ uint64_t rparmode:2;
+ uint64_t rsv_e:1;
+ uint64_t rskp_len:7;
+ uint64_t rsv_d:6;
+ uint64_t use_ihdr:1;
+ uint64_t rsv_c:5;
+ uint64_t par_mode:2;
+ uint64_t rsv_b:1;
+ uint64_t skp_len:7;
+ uint64_t rsv_a:6;
+#else
+ uint64_t rsv_a:6;
+ uint64_t skp_len:7;
+ uint64_t rsv_b:1;
+ uint64_t par_mode:2;
+ uint64_t rsv_c:5;
+ uint64_t use_ihdr:1;
+ uint64_t rsv_d:6;
+ uint64_t rskp_len:7;
+ uint64_t rsv_e:1;
+ uint64_t rparmode:2;
+ uint64_t rsv_f:5;
+ uint64_t pbp:1;
+ uint64_t reserved_44_63:20;
+#endif
+ } s;
+};
+
+union cvmx_npi_port35_instr_hdr {
+ uint64_t u64;
+ struct cvmx_npi_port35_instr_hdr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t pbp:1;
+ uint64_t rsv_f:5;
+ uint64_t rparmode:2;
+ uint64_t rsv_e:1;
+ uint64_t rskp_len:7;
+ uint64_t rsv_d:6;
+ uint64_t use_ihdr:1;
+ uint64_t rsv_c:5;
+ uint64_t par_mode:2;
+ uint64_t rsv_b:1;
+ uint64_t skp_len:7;
+ uint64_t rsv_a:6;
+#else
+ uint64_t rsv_a:6;
+ uint64_t skp_len:7;
+ uint64_t rsv_b:1;
+ uint64_t par_mode:2;
+ uint64_t rsv_c:5;
+ uint64_t use_ihdr:1;
+ uint64_t rsv_d:6;
+ uint64_t rskp_len:7;
+ uint64_t rsv_e:1;
+ uint64_t rparmode:2;
+ uint64_t rsv_f:5;
+ uint64_t pbp:1;
+ uint64_t reserved_44_63:20;
+#endif
+ } s;
+};
+
+union cvmx_npi_port_bp_control {
+ uint64_t u64;
+ struct cvmx_npi_port_bp_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t bp_on:4;
+ uint64_t enb:4;
+#else
+ uint64_t enb:4;
+ uint64_t bp_on:4;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_npi_rsl_int_blocks {
+ uint64_t u64;
+ struct cvmx_npi_rsl_int_blocks_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t rint_31:1;
+ uint64_t iob:1;
+ uint64_t reserved_28_29:2;
+ uint64_t rint_27:1;
+ uint64_t rint_26:1;
+ uint64_t rint_25:1;
+ uint64_t rint_24:1;
+ uint64_t asx1:1;
+ uint64_t asx0:1;
+ uint64_t rint_21:1;
+ uint64_t pip:1;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
+ uint64_t lmc:1;
+ uint64_t l2c:1;
+ uint64_t rint_15:1;
+ uint64_t reserved_13_14:2;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t rint_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t npi:1;
+ uint64_t gmx1:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+#else
+ uint64_t mio:1;
+ uint64_t gmx0:1;
+ uint64_t gmx1:1;
+ uint64_t npi:1;
+ uint64_t key:1;
+ uint64_t fpa:1;
+ uint64_t dfa:1;
+ uint64_t zip:1;
+ uint64_t rint_8:1;
+ uint64_t ipd:1;
+ uint64_t pko:1;
+ uint64_t tim:1;
+ uint64_t pow:1;
+ uint64_t reserved_13_14:2;
+ uint64_t rint_15:1;
+ uint64_t l2c:1;
+ uint64_t lmc:1;
+ uint64_t spx0:1;
+ uint64_t spx1:1;
+ uint64_t pip:1;
+ uint64_t rint_21:1;
+ uint64_t asx0:1;
+ uint64_t asx1:1;
+ uint64_t rint_24:1;
+ uint64_t rint_25:1;
+ uint64_t rint_26:1;
+ uint64_t rint_27:1;
+ uint64_t reserved_28_29:2;
+ uint64_t iob:1;
+ uint64_t rint_31:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_npi_rsl_int_blocks_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t rint_31:1;
+ uint64_t iob:1;
+ uint64_t rint_29:1;
+ uint64_t rint_28:1;
+ uint64_t rint_27:1;
+ uint64_t rint_26:1;
+ uint64_t rint_25:1;
+ uint64_t rint_24:1;
+ uint64_t asx1:1;
+ uint64_t asx0:1;
+ uint64_t rint_21:1;
+ uint64_t pip:1;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
+ uint64_t lmc:1;
+ uint64_t l2c:1;
+ uint64_t rint_15:1;
+ uint64_t rint_14:1;
+ uint64_t usb:1;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t rint_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t npi:1;
+ uint64_t gmx1:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+#else
+ uint64_t mio:1;
+ uint64_t gmx0:1;
+ uint64_t gmx1:1;
+ uint64_t npi:1;
+ uint64_t key:1;
+ uint64_t fpa:1;
+ uint64_t dfa:1;
+ uint64_t zip:1;
+ uint64_t rint_8:1;
+ uint64_t ipd:1;
+ uint64_t pko:1;
+ uint64_t tim:1;
+ uint64_t pow:1;
+ uint64_t usb:1;
+ uint64_t rint_14:1;
+ uint64_t rint_15:1;
+ uint64_t l2c:1;
+ uint64_t lmc:1;
+ uint64_t spx0:1;
+ uint64_t spx1:1;
+ uint64_t pip:1;
+ uint64_t rint_21:1;
+ uint64_t asx0:1;
+ uint64_t asx1:1;
+ uint64_t rint_24:1;
+ uint64_t rint_25:1;
+ uint64_t rint_26:1;
+ uint64_t rint_27:1;
+ uint64_t rint_28:1;
+ uint64_t rint_29:1;
+ uint64_t iob:1;
+ uint64_t rint_31:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn30xx;
+ struct cvmx_npi_rsl_int_blocks_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t rint_31:1;
+ uint64_t iob:1;
+ uint64_t rint_29:1;
+ uint64_t rint_28:1;
+ uint64_t rint_27:1;
+ uint64_t rint_26:1;
+ uint64_t rint_25:1;
+ uint64_t rint_24:1;
+ uint64_t asx1:1;
+ uint64_t asx0:1;
+ uint64_t rint_21:1;
+ uint64_t pip:1;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
+ uint64_t lmc:1;
+ uint64_t l2c:1;
+ uint64_t rint_15:1;
+ uint64_t rint_14:1;
+ uint64_t rint_13:1;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t rint_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t npi:1;
+ uint64_t gmx1:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+#else
+ uint64_t mio:1;
+ uint64_t gmx0:1;
+ uint64_t gmx1:1;
+ uint64_t npi:1;
+ uint64_t key:1;
+ uint64_t fpa:1;
+ uint64_t dfa:1;
+ uint64_t zip:1;
+ uint64_t rint_8:1;
+ uint64_t ipd:1;
+ uint64_t pko:1;
+ uint64_t tim:1;
+ uint64_t pow:1;
+ uint64_t rint_13:1;
+ uint64_t rint_14:1;
+ uint64_t rint_15:1;
+ uint64_t l2c:1;
+ uint64_t lmc:1;
+ uint64_t spx0:1;
+ uint64_t spx1:1;
+ uint64_t pip:1;
+ uint64_t rint_21:1;
+ uint64_t asx0:1;
+ uint64_t asx1:1;
+ uint64_t rint_24:1;
+ uint64_t rint_25:1;
+ uint64_t rint_26:1;
+ uint64_t rint_27:1;
+ uint64_t rint_28:1;
+ uint64_t rint_29:1;
+ uint64_t iob:1;
+ uint64_t rint_31:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn38xx;
+ struct cvmx_npi_rsl_int_blocks_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63:33;
+ uint64_t iob:1;
+ uint64_t lmc1:1;
+ uint64_t agl:1;
+ uint64_t reserved_24_27:4;
+ uint64_t asx1:1;
+ uint64_t asx0:1;
+ uint64_t reserved_21_21:1;
+ uint64_t pip:1;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
+ uint64_t lmc:1;
+ uint64_t l2c:1;
+ uint64_t reserved_15_15:1;
+ uint64_t rad:1;
+ uint64_t usb:1;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t reserved_8_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t npi:1;
+ uint64_t gmx1:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+#else
+ uint64_t mio:1;
+ uint64_t gmx0:1;
+ uint64_t gmx1:1;
+ uint64_t npi:1;
+ uint64_t key:1;
+ uint64_t fpa:1;
+ uint64_t dfa:1;
+ uint64_t zip:1;
+ uint64_t reserved_8_8:1;
+ uint64_t ipd:1;
+ uint64_t pko:1;
+ uint64_t tim:1;
+ uint64_t pow:1;
+ uint64_t usb:1;
+ uint64_t rad:1;
+ uint64_t reserved_15_15:1;
+ uint64_t l2c:1;
+ uint64_t lmc:1;
+ uint64_t spx0:1;
+ uint64_t spx1:1;
+ uint64_t pip:1;
+ uint64_t reserved_21_21:1;
+ uint64_t asx0:1;
+ uint64_t asx1:1;
+ uint64_t reserved_24_27:4;
+ uint64_t agl:1;
+ uint64_t lmc1:1;
+ uint64_t iob:1;
+ uint64_t reserved_31_63:33;
+#endif
+ } cn50xx;
+};
+
+union cvmx_npi_size_inputx {
+ uint64_t u64;
+ struct cvmx_npi_size_inputx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t size:32;
+#else
+ uint64_t size:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_npi_win_read_to {
+ uint64_t u64;
+ struct cvmx_npi_win_read_to_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t time:32;
+#else
+ uint64_t time:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-packet.h b/arch/mips/include/asm/octeon/cvmx-packet.h
new file mode 100644
index 000000000..895e93d68
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-packet.h
@@ -0,0 +1,69 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Packet buffer defines.
+ */
+
+#ifndef __CVMX_PACKET_H__
+#define __CVMX_PACKET_H__
+
+/**
+ * This structure defines a buffer pointer on Octeon
+ */
+union cvmx_buf_ptr {
+ void *ptr;
+ uint64_t u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* if set, invert the "free" pick of the overall
+ * packet. HW always sets this bit to 0 on inbound
+ * packet */
+ uint64_t i:1;
+
+ /* Indicates the amount to back up to get to the
+ * buffer start in cache lines. In most cases this is
+ * less than one complete cache line, so the value is
+ * zero */
+ uint64_t back:4;
+ /* The pool that the buffer came from / goes to */
+ uint64_t pool:3;
+ /* The size of the segment pointed to by addr (in bytes) */
+ uint64_t size:16;
+ /* Pointer to the first byte of the data, NOT buffer */
+ uint64_t addr:40;
+#else
+ uint64_t addr:40;
+ uint64_t size:16;
+ uint64_t pool:3;
+ uint64_t back:4;
+ uint64_t i:1;
+#endif
+ } s;
+};
+
+#endif /* __CVMX_PACKET_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-pci-defs.h b/arch/mips/include/asm/octeon/cvmx-pci-defs.h
new file mode 100644
index 000000000..be56b693b
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-pci-defs.h
@@ -0,0 +1,2037 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_PCI_DEFS_H__
+#define __CVMX_PCI_DEFS_H__
+
+#define CVMX_PCI_BAR1_INDEXX(offset) (0x0000000000000100ull + ((offset) & 31) * 4)
+#define CVMX_PCI_BIST_REG (0x00000000000001C0ull)
+#define CVMX_PCI_CFG00 (0x0000000000000000ull)
+#define CVMX_PCI_CFG01 (0x0000000000000004ull)
+#define CVMX_PCI_CFG02 (0x0000000000000008ull)
+#define CVMX_PCI_CFG03 (0x000000000000000Cull)
+#define CVMX_PCI_CFG04 (0x0000000000000010ull)
+#define CVMX_PCI_CFG05 (0x0000000000000014ull)
+#define CVMX_PCI_CFG06 (0x0000000000000018ull)
+#define CVMX_PCI_CFG07 (0x000000000000001Cull)
+#define CVMX_PCI_CFG08 (0x0000000000000020ull)
+#define CVMX_PCI_CFG09 (0x0000000000000024ull)
+#define CVMX_PCI_CFG10 (0x0000000000000028ull)
+#define CVMX_PCI_CFG11 (0x000000000000002Cull)
+#define CVMX_PCI_CFG12 (0x0000000000000030ull)
+#define CVMX_PCI_CFG13 (0x0000000000000034ull)
+#define CVMX_PCI_CFG15 (0x000000000000003Cull)
+#define CVMX_PCI_CFG16 (0x0000000000000040ull)
+#define CVMX_PCI_CFG17 (0x0000000000000044ull)
+#define CVMX_PCI_CFG18 (0x0000000000000048ull)
+#define CVMX_PCI_CFG19 (0x000000000000004Cull)
+#define CVMX_PCI_CFG20 (0x0000000000000050ull)
+#define CVMX_PCI_CFG21 (0x0000000000000054ull)
+#define CVMX_PCI_CFG22 (0x0000000000000058ull)
+#define CVMX_PCI_CFG56 (0x00000000000000E0ull)
+#define CVMX_PCI_CFG57 (0x00000000000000E4ull)
+#define CVMX_PCI_CFG58 (0x00000000000000E8ull)
+#define CVMX_PCI_CFG59 (0x00000000000000ECull)
+#define CVMX_PCI_CFG60 (0x00000000000000F0ull)
+#define CVMX_PCI_CFG61 (0x00000000000000F4ull)
+#define CVMX_PCI_CFG62 (0x00000000000000F8ull)
+#define CVMX_PCI_CFG63 (0x00000000000000FCull)
+#define CVMX_PCI_CNT_REG (0x00000000000001B8ull)
+#define CVMX_PCI_CTL_STATUS_2 (0x000000000000018Cull)
+#define CVMX_PCI_DBELL_X(offset) (0x0000000000000080ull + ((offset) & 3) * 8)
+#define CVMX_PCI_DMA_CNT0 CVMX_PCI_DMA_CNTX(0)
+#define CVMX_PCI_DMA_CNT1 CVMX_PCI_DMA_CNTX(1)
+#define CVMX_PCI_DMA_CNTX(offset) (0x00000000000000A0ull + ((offset) & 1) * 8)
+#define CVMX_PCI_DMA_INT_LEV0 CVMX_PCI_DMA_INT_LEVX(0)
+#define CVMX_PCI_DMA_INT_LEV1 CVMX_PCI_DMA_INT_LEVX(1)
+#define CVMX_PCI_DMA_INT_LEVX(offset) (0x00000000000000A4ull + ((offset) & 1) * 8)
+#define CVMX_PCI_DMA_TIME0 CVMX_PCI_DMA_TIMEX(0)
+#define CVMX_PCI_DMA_TIME1 CVMX_PCI_DMA_TIMEX(1)
+#define CVMX_PCI_DMA_TIMEX(offset) (0x00000000000000B0ull + ((offset) & 1) * 4)
+#define CVMX_PCI_INSTR_COUNT0 CVMX_PCI_INSTR_COUNTX(0)
+#define CVMX_PCI_INSTR_COUNT1 CVMX_PCI_INSTR_COUNTX(1)
+#define CVMX_PCI_INSTR_COUNT2 CVMX_PCI_INSTR_COUNTX(2)
+#define CVMX_PCI_INSTR_COUNT3 CVMX_PCI_INSTR_COUNTX(3)
+#define CVMX_PCI_INSTR_COUNTX(offset) (0x0000000000000084ull + ((offset) & 3) * 8)
+#define CVMX_PCI_INT_ENB (0x0000000000000038ull)
+#define CVMX_PCI_INT_ENB2 (0x00000000000001A0ull)
+#define CVMX_PCI_INT_SUM (0x0000000000000030ull)
+#define CVMX_PCI_INT_SUM2 (0x0000000000000198ull)
+#define CVMX_PCI_MSI_RCV (0x00000000000000F0ull)
+#define CVMX_PCI_PKTS_SENT0 CVMX_PCI_PKTS_SENTX(0)
+#define CVMX_PCI_PKTS_SENT1 CVMX_PCI_PKTS_SENTX(1)
+#define CVMX_PCI_PKTS_SENT2 CVMX_PCI_PKTS_SENTX(2)
+#define CVMX_PCI_PKTS_SENT3 CVMX_PCI_PKTS_SENTX(3)
+#define CVMX_PCI_PKTS_SENTX(offset) (0x0000000000000040ull + ((offset) & 3) * 16)
+#define CVMX_PCI_PKTS_SENT_INT_LEV0 CVMX_PCI_PKTS_SENT_INT_LEVX(0)
+#define CVMX_PCI_PKTS_SENT_INT_LEV1 CVMX_PCI_PKTS_SENT_INT_LEVX(1)
+#define CVMX_PCI_PKTS_SENT_INT_LEV2 CVMX_PCI_PKTS_SENT_INT_LEVX(2)
+#define CVMX_PCI_PKTS_SENT_INT_LEV3 CVMX_PCI_PKTS_SENT_INT_LEVX(3)
+#define CVMX_PCI_PKTS_SENT_INT_LEVX(offset) (0x0000000000000048ull + ((offset) & 3) * 16)
+#define CVMX_PCI_PKTS_SENT_TIME0 CVMX_PCI_PKTS_SENT_TIMEX(0)
+#define CVMX_PCI_PKTS_SENT_TIME1 CVMX_PCI_PKTS_SENT_TIMEX(1)
+#define CVMX_PCI_PKTS_SENT_TIME2 CVMX_PCI_PKTS_SENT_TIMEX(2)
+#define CVMX_PCI_PKTS_SENT_TIME3 CVMX_PCI_PKTS_SENT_TIMEX(3)
+#define CVMX_PCI_PKTS_SENT_TIMEX(offset) (0x000000000000004Cull + ((offset) & 3) * 16)
+#define CVMX_PCI_PKT_CREDITS0 CVMX_PCI_PKT_CREDITSX(0)
+#define CVMX_PCI_PKT_CREDITS1 CVMX_PCI_PKT_CREDITSX(1)
+#define CVMX_PCI_PKT_CREDITS2 CVMX_PCI_PKT_CREDITSX(2)
+#define CVMX_PCI_PKT_CREDITS3 CVMX_PCI_PKT_CREDITSX(3)
+#define CVMX_PCI_PKT_CREDITSX(offset) (0x0000000000000044ull + ((offset) & 3) * 16)
+#define CVMX_PCI_READ_CMD_6 (0x0000000000000180ull)
+#define CVMX_PCI_READ_CMD_C (0x0000000000000184ull)
+#define CVMX_PCI_READ_CMD_E (0x0000000000000188ull)
+#define CVMX_PCI_READ_TIMEOUT (CVMX_ADD_IO_SEG(0x00011F00000000B0ull))
+#define CVMX_PCI_SCM_REG (0x00000000000001A8ull)
+#define CVMX_PCI_TSR_REG (0x00000000000001B0ull)
+#define CVMX_PCI_WIN_RD_ADDR (0x0000000000000008ull)
+#define CVMX_PCI_WIN_RD_DATA (0x0000000000000020ull)
+#define CVMX_PCI_WIN_WR_ADDR (0x0000000000000000ull)
+#define CVMX_PCI_WIN_WR_DATA (0x0000000000000010ull)
+#define CVMX_PCI_WIN_WR_MASK (0x0000000000000018ull)
+
+union cvmx_pci_bar1_indexx {
+ uint32_t u32;
+ struct cvmx_pci_bar1_indexx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_18_31:14;
+ uint32_t addr_idx:14;
+ uint32_t ca:1;
+ uint32_t end_swp:2;
+ uint32_t addr_v:1;
+#else
+ uint32_t addr_v:1;
+ uint32_t end_swp:2;
+ uint32_t ca:1;
+ uint32_t addr_idx:14;
+ uint32_t reserved_18_31:14;
+#endif
+ } s;
+};
+
+union cvmx_pci_bist_reg {
+ uint64_t u64;
+ struct cvmx_pci_bist_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t rsp_bs:1;
+ uint64_t dma0_bs:1;
+ uint64_t cmd0_bs:1;
+ uint64_t cmd_bs:1;
+ uint64_t csr2p_bs:1;
+ uint64_t csrr_bs:1;
+ uint64_t rsp2p_bs:1;
+ uint64_t csr2n_bs:1;
+ uint64_t dat2n_bs:1;
+ uint64_t dbg2n_bs:1;
+#else
+ uint64_t dbg2n_bs:1;
+ uint64_t dat2n_bs:1;
+ uint64_t csr2n_bs:1;
+ uint64_t rsp2p_bs:1;
+ uint64_t csrr_bs:1;
+ uint64_t csr2p_bs:1;
+ uint64_t cmd_bs:1;
+ uint64_t cmd0_bs:1;
+ uint64_t dma0_bs:1;
+ uint64_t rsp_bs:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg00 {
+ uint32_t u32;
+ struct cvmx_pci_cfg00_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t devid:16;
+ uint32_t vendid:16;
+#else
+ uint32_t vendid:16;
+ uint32_t devid:16;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg01 {
+ uint32_t u32;
+ struct cvmx_pci_cfg01_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dpe:1;
+ uint32_t sse:1;
+ uint32_t rma:1;
+ uint32_t rta:1;
+ uint32_t sta:1;
+ uint32_t devt:2;
+ uint32_t mdpe:1;
+ uint32_t fbb:1;
+ uint32_t reserved_22_22:1;
+ uint32_t m66:1;
+ uint32_t cle:1;
+ uint32_t i_stat:1;
+ uint32_t reserved_11_18:8;
+ uint32_t i_dis:1;
+ uint32_t fbbe:1;
+ uint32_t see:1;
+ uint32_t ads:1;
+ uint32_t pee:1;
+ uint32_t vps:1;
+ uint32_t mwice:1;
+ uint32_t scse:1;
+ uint32_t me:1;
+ uint32_t msae:1;
+ uint32_t isae:1;
+#else
+ uint32_t isae:1;
+ uint32_t msae:1;
+ uint32_t me:1;
+ uint32_t scse:1;
+ uint32_t mwice:1;
+ uint32_t vps:1;
+ uint32_t pee:1;
+ uint32_t ads:1;
+ uint32_t see:1;
+ uint32_t fbbe:1;
+ uint32_t i_dis:1;
+ uint32_t reserved_11_18:8;
+ uint32_t i_stat:1;
+ uint32_t cle:1;
+ uint32_t m66:1;
+ uint32_t reserved_22_22:1;
+ uint32_t fbb:1;
+ uint32_t mdpe:1;
+ uint32_t devt:2;
+ uint32_t sta:1;
+ uint32_t rta:1;
+ uint32_t rma:1;
+ uint32_t sse:1;
+ uint32_t dpe:1;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg02 {
+ uint32_t u32;
+ struct cvmx_pci_cfg02_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cc:24;
+ uint32_t rid:8;
+#else
+ uint32_t rid:8;
+ uint32_t cc:24;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg03 {
+ uint32_t u32;
+ struct cvmx_pci_cfg03_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t bcap:1;
+ uint32_t brb:1;
+ uint32_t reserved_28_29:2;
+ uint32_t bcod:4;
+ uint32_t ht:8;
+ uint32_t lt:8;
+ uint32_t cls:8;
+#else
+ uint32_t cls:8;
+ uint32_t lt:8;
+ uint32_t ht:8;
+ uint32_t bcod:4;
+ uint32_t reserved_28_29:2;
+ uint32_t brb:1;
+ uint32_t bcap:1;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg04 {
+ uint32_t u32;
+ struct cvmx_pci_cfg04_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lbase:20;
+ uint32_t lbasez:8;
+ uint32_t pf:1;
+ uint32_t typ:2;
+ uint32_t mspc:1;
+#else
+ uint32_t mspc:1;
+ uint32_t typ:2;
+ uint32_t pf:1;
+ uint32_t lbasez:8;
+ uint32_t lbase:20;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg05 {
+ uint32_t u32;
+ struct cvmx_pci_cfg05_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t hbase:32;
+#else
+ uint32_t hbase:32;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg06 {
+ uint32_t u32;
+ struct cvmx_pci_cfg06_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lbase:5;
+ uint32_t lbasez:23;
+ uint32_t pf:1;
+ uint32_t typ:2;
+ uint32_t mspc:1;
+#else
+ uint32_t mspc:1;
+ uint32_t typ:2;
+ uint32_t pf:1;
+ uint32_t lbasez:23;
+ uint32_t lbase:5;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg07 {
+ uint32_t u32;
+ struct cvmx_pci_cfg07_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t hbase:32;
+#else
+ uint32_t hbase:32;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg08 {
+ uint32_t u32;
+ struct cvmx_pci_cfg08_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t lbasez:28;
+ uint32_t pf:1;
+ uint32_t typ:2;
+ uint32_t mspc:1;
+#else
+ uint32_t mspc:1;
+ uint32_t typ:2;
+ uint32_t pf:1;
+ uint32_t lbasez:28;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg09 {
+ uint32_t u32;
+ struct cvmx_pci_cfg09_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t hbase:25;
+ uint32_t hbasez:7;
+#else
+ uint32_t hbasez:7;
+ uint32_t hbase:25;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg10 {
+ uint32_t u32;
+ struct cvmx_pci_cfg10_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cisp:32;
+#else
+ uint32_t cisp:32;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg11 {
+ uint32_t u32;
+ struct cvmx_pci_cfg11_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ssid:16;
+ uint32_t ssvid:16;
+#else
+ uint32_t ssvid:16;
+ uint32_t ssid:16;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg12 {
+ uint32_t u32;
+ struct cvmx_pci_cfg12_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t erbar:16;
+ uint32_t erbarz:5;
+ uint32_t reserved_1_10:10;
+ uint32_t erbar_en:1;
+#else
+ uint32_t erbar_en:1;
+ uint32_t reserved_1_10:10;
+ uint32_t erbarz:5;
+ uint32_t erbar:16;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg13 {
+ uint32_t u32;
+ struct cvmx_pci_cfg13_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_8_31:24;
+ uint32_t cp:8;
+#else
+ uint32_t cp:8;
+ uint32_t reserved_8_31:24;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg15 {
+ uint32_t u32;
+ struct cvmx_pci_cfg15_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t ml:8;
+ uint32_t mg:8;
+ uint32_t inta:8;
+ uint32_t il:8;
+#else
+ uint32_t il:8;
+ uint32_t inta:8;
+ uint32_t mg:8;
+ uint32_t ml:8;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg16 {
+ uint32_t u32;
+ struct cvmx_pci_cfg16_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t trdnpr:1;
+ uint32_t trdard:1;
+ uint32_t rdsati:1;
+ uint32_t trdrs:1;
+ uint32_t trtae:1;
+ uint32_t twsei:1;
+ uint32_t twsen:1;
+ uint32_t twtae:1;
+ uint32_t tmae:1;
+ uint32_t tslte:3;
+ uint32_t tilt:4;
+ uint32_t pbe:12;
+ uint32_t dppmr:1;
+ uint32_t reserved_2_2:1;
+ uint32_t tswc:1;
+ uint32_t mltd:1;
+#else
+ uint32_t mltd:1;
+ uint32_t tswc:1;
+ uint32_t reserved_2_2:1;
+ uint32_t dppmr:1;
+ uint32_t pbe:12;
+ uint32_t tilt:4;
+ uint32_t tslte:3;
+ uint32_t tmae:1;
+ uint32_t twtae:1;
+ uint32_t twsen:1;
+ uint32_t twsei:1;
+ uint32_t trtae:1;
+ uint32_t trdrs:1;
+ uint32_t rdsati:1;
+ uint32_t trdard:1;
+ uint32_t trdnpr:1;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg17 {
+ uint32_t u32;
+ struct cvmx_pci_cfg17_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t tscme:32;
+#else
+ uint32_t tscme:32;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg18 {
+ uint32_t u32;
+ struct cvmx_pci_cfg18_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t tdsrps:32;
+#else
+ uint32_t tdsrps:32;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg19 {
+ uint32_t u32;
+ struct cvmx_pci_cfg19_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t mrbcm:1;
+ uint32_t mrbci:1;
+ uint32_t mdwe:1;
+ uint32_t mdre:1;
+ uint32_t mdrimc:1;
+ uint32_t mdrrmc:3;
+ uint32_t tmes:8;
+ uint32_t teci:1;
+ uint32_t tmei:1;
+ uint32_t tmse:1;
+ uint32_t tmdpes:1;
+ uint32_t tmapes:1;
+ uint32_t reserved_9_10:2;
+ uint32_t tibcd:1;
+ uint32_t tibde:1;
+ uint32_t reserved_6_6:1;
+ uint32_t tidomc:1;
+ uint32_t tdomc:5;
+#else
+ uint32_t tdomc:5;
+ uint32_t tidomc:1;
+ uint32_t reserved_6_6:1;
+ uint32_t tibde:1;
+ uint32_t tibcd:1;
+ uint32_t reserved_9_10:2;
+ uint32_t tmapes:1;
+ uint32_t tmdpes:1;
+ uint32_t tmse:1;
+ uint32_t tmei:1;
+ uint32_t teci:1;
+ uint32_t tmes:8;
+ uint32_t mdrrmc:3;
+ uint32_t mdrimc:1;
+ uint32_t mdre:1;
+ uint32_t mdwe:1;
+ uint32_t mrbci:1;
+ uint32_t mrbcm:1;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg20 {
+ uint32_t u32;
+ struct cvmx_pci_cfg20_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t mdsp:32;
+#else
+ uint32_t mdsp:32;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg21 {
+ uint32_t u32;
+ struct cvmx_pci_cfg21_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t scmre:32;
+#else
+ uint32_t scmre:32;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg22 {
+ uint32_t u32;
+ struct cvmx_pci_cfg22_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t mac:7;
+ uint32_t reserved_19_24:6;
+ uint32_t flush:1;
+ uint32_t mra:1;
+ uint32_t mtta:1;
+ uint32_t mrv:8;
+ uint32_t mttv:8;
+#else
+ uint32_t mttv:8;
+ uint32_t mrv:8;
+ uint32_t mtta:1;
+ uint32_t mra:1;
+ uint32_t flush:1;
+ uint32_t reserved_19_24:6;
+ uint32_t mac:7;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg56 {
+ uint32_t u32;
+ struct cvmx_pci_cfg56_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_23_31:9;
+ uint32_t most:3;
+ uint32_t mmbc:2;
+ uint32_t roe:1;
+ uint32_t dpere:1;
+ uint32_t ncp:8;
+ uint32_t pxcid:8;
+#else
+ uint32_t pxcid:8;
+ uint32_t ncp:8;
+ uint32_t dpere:1;
+ uint32_t roe:1;
+ uint32_t mmbc:2;
+ uint32_t most:3;
+ uint32_t reserved_23_31:9;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg57 {
+ uint32_t u32;
+ struct cvmx_pci_cfg57_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_30_31:2;
+ uint32_t scemr:1;
+ uint32_t mcrsd:3;
+ uint32_t mostd:3;
+ uint32_t mmrbcd:2;
+ uint32_t dc:1;
+ uint32_t usc:1;
+ uint32_t scd:1;
+ uint32_t m133:1;
+ uint32_t w64:1;
+ uint32_t bn:8;
+ uint32_t dn:5;
+ uint32_t fn:3;
+#else
+ uint32_t fn:3;
+ uint32_t dn:5;
+ uint32_t bn:8;
+ uint32_t w64:1;
+ uint32_t m133:1;
+ uint32_t scd:1;
+ uint32_t usc:1;
+ uint32_t dc:1;
+ uint32_t mmrbcd:2;
+ uint32_t mostd:3;
+ uint32_t mcrsd:3;
+ uint32_t scemr:1;
+ uint32_t reserved_30_31:2;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg58 {
+ uint32_t u32;
+ struct cvmx_pci_cfg58_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pmes:5;
+ uint32_t d2s:1;
+ uint32_t d1s:1;
+ uint32_t auxc:3;
+ uint32_t dsi:1;
+ uint32_t reserved_20_20:1;
+ uint32_t pmec:1;
+ uint32_t pcimiv:3;
+ uint32_t ncp:8;
+ uint32_t pmcid:8;
+#else
+ uint32_t pmcid:8;
+ uint32_t ncp:8;
+ uint32_t pcimiv:3;
+ uint32_t pmec:1;
+ uint32_t reserved_20_20:1;
+ uint32_t dsi:1;
+ uint32_t auxc:3;
+ uint32_t d1s:1;
+ uint32_t d2s:1;
+ uint32_t pmes:5;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg59 {
+ uint32_t u32;
+ struct cvmx_pci_cfg59_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pmdia:8;
+ uint32_t bpccen:1;
+ uint32_t bd3h:1;
+ uint32_t reserved_16_21:6;
+ uint32_t pmess:1;
+ uint32_t pmedsia:2;
+ uint32_t pmds:4;
+ uint32_t pmeens:1;
+ uint32_t reserved_2_7:6;
+ uint32_t ps:2;
+#else
+ uint32_t ps:2;
+ uint32_t reserved_2_7:6;
+ uint32_t pmeens:1;
+ uint32_t pmds:4;
+ uint32_t pmedsia:2;
+ uint32_t pmess:1;
+ uint32_t reserved_16_21:6;
+ uint32_t bd3h:1;
+ uint32_t bpccen:1;
+ uint32_t pmdia:8;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg60 {
+ uint32_t u32;
+ struct cvmx_pci_cfg60_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_24_31:8;
+ uint32_t m64:1;
+ uint32_t mme:3;
+ uint32_t mmc:3;
+ uint32_t msien:1;
+ uint32_t ncp:8;
+ uint32_t msicid:8;
+#else
+ uint32_t msicid:8;
+ uint32_t ncp:8;
+ uint32_t msien:1;
+ uint32_t mmc:3;
+ uint32_t mme:3;
+ uint32_t m64:1;
+ uint32_t reserved_24_31:8;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg61 {
+ uint32_t u32;
+ struct cvmx_pci_cfg61_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t msi31t2:30;
+ uint32_t reserved_0_1:2;
+#else
+ uint32_t reserved_0_1:2;
+ uint32_t msi31t2:30;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg62 {
+ uint32_t u32;
+ struct cvmx_pci_cfg62_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t msi:32;
+#else
+ uint32_t msi:32;
+#endif
+ } s;
+};
+
+union cvmx_pci_cfg63 {
+ uint32_t u32;
+ struct cvmx_pci_cfg63_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31:16;
+ uint32_t msimd:16;
+#else
+ uint32_t msimd:16;
+ uint32_t reserved_16_31:16;
+#endif
+ } s;
+};
+
+union cvmx_pci_cnt_reg {
+ uint64_t u64;
+ struct cvmx_pci_cnt_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63:26;
+ uint64_t hm_pcix:1;
+ uint64_t hm_speed:2;
+ uint64_t ap_pcix:1;
+ uint64_t ap_speed:2;
+ uint64_t pcicnt:32;
+#else
+ uint64_t pcicnt:32;
+ uint64_t ap_speed:2;
+ uint64_t ap_pcix:1;
+ uint64_t hm_speed:2;
+ uint64_t hm_pcix:1;
+ uint64_t reserved_38_63:26;
+#endif
+ } s;
+};
+
+union cvmx_pci_ctl_status_2 {
+ uint32_t u32;
+ struct cvmx_pci_ctl_status_2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_29_31:3;
+ uint32_t bb1_hole:3;
+ uint32_t bb1_siz:1;
+ uint32_t bb_ca:1;
+ uint32_t bb_es:2;
+ uint32_t bb1:1;
+ uint32_t bb0:1;
+ uint32_t erst_n:1;
+ uint32_t bar2pres:1;
+ uint32_t scmtyp:1;
+ uint32_t scm:1;
+ uint32_t en_wfilt:1;
+ uint32_t reserved_14_14:1;
+ uint32_t ap_pcix:1;
+ uint32_t ap_64ad:1;
+ uint32_t b12_bist:1;
+ uint32_t pmo_amod:1;
+ uint32_t pmo_fpc:3;
+ uint32_t tsr_hwm:3;
+ uint32_t bar2_enb:1;
+ uint32_t bar2_esx:2;
+ uint32_t bar2_cax:1;
+#else
+ uint32_t bar2_cax:1;
+ uint32_t bar2_esx:2;
+ uint32_t bar2_enb:1;
+ uint32_t tsr_hwm:3;
+ uint32_t pmo_fpc:3;
+ uint32_t pmo_amod:1;
+ uint32_t b12_bist:1;
+ uint32_t ap_64ad:1;
+ uint32_t ap_pcix:1;
+ uint32_t reserved_14_14:1;
+ uint32_t en_wfilt:1;
+ uint32_t scm:1;
+ uint32_t scmtyp:1;
+ uint32_t bar2pres:1;
+ uint32_t erst_n:1;
+ uint32_t bb0:1;
+ uint32_t bb1:1;
+ uint32_t bb_es:2;
+ uint32_t bb_ca:1;
+ uint32_t bb1_siz:1;
+ uint32_t bb1_hole:3;
+ uint32_t reserved_29_31:3;
+#endif
+ } s;
+ struct cvmx_pci_ctl_status_2_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_20_31:12;
+ uint32_t erst_n:1;
+ uint32_t bar2pres:1;
+ uint32_t scmtyp:1;
+ uint32_t scm:1;
+ uint32_t en_wfilt:1;
+ uint32_t reserved_14_14:1;
+ uint32_t ap_pcix:1;
+ uint32_t ap_64ad:1;
+ uint32_t b12_bist:1;
+ uint32_t pmo_amod:1;
+ uint32_t pmo_fpc:3;
+ uint32_t tsr_hwm:3;
+ uint32_t bar2_enb:1;
+ uint32_t bar2_esx:2;
+ uint32_t bar2_cax:1;
+#else
+ uint32_t bar2_cax:1;
+ uint32_t bar2_esx:2;
+ uint32_t bar2_enb:1;
+ uint32_t tsr_hwm:3;
+ uint32_t pmo_fpc:3;
+ uint32_t pmo_amod:1;
+ uint32_t b12_bist:1;
+ uint32_t ap_64ad:1;
+ uint32_t ap_pcix:1;
+ uint32_t reserved_14_14:1;
+ uint32_t en_wfilt:1;
+ uint32_t scm:1;
+ uint32_t scmtyp:1;
+ uint32_t bar2pres:1;
+ uint32_t erst_n:1;
+ uint32_t reserved_20_31:12;
+#endif
+ } cn31xx;
+};
+
+union cvmx_pci_dbellx {
+ uint32_t u32;
+ struct cvmx_pci_dbellx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_16_31:16;
+ uint32_t inc_val:16;
+#else
+ uint32_t inc_val:16;
+ uint32_t reserved_16_31:16;
+#endif
+ } s;
+};
+
+union cvmx_pci_dma_cntx {
+ uint32_t u32;
+ struct cvmx_pci_dma_cntx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dma_cnt:32;
+#else
+ uint32_t dma_cnt:32;
+#endif
+ } s;
+};
+
+union cvmx_pci_dma_int_levx {
+ uint32_t u32;
+ struct cvmx_pci_dma_int_levx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pkt_cnt:32;
+#else
+ uint32_t pkt_cnt:32;
+#endif
+ } s;
+};
+
+union cvmx_pci_dma_timex {
+ uint32_t u32;
+ struct cvmx_pci_dma_timex_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dma_time:32;
+#else
+ uint32_t dma_time:32;
+#endif
+ } s;
+};
+
+union cvmx_pci_instr_countx {
+ uint32_t u32;
+ struct cvmx_pci_instr_countx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t icnt:32;
+#else
+ uint32_t icnt:32;
+#endif
+ } s;
+};
+
+union cvmx_pci_int_enb {
+ uint64_t u64;
+ struct cvmx_pci_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63:30;
+ uint64_t ill_rd:1;
+ uint64_t ill_wr:1;
+ uint64_t win_wr:1;
+ uint64_t dma1_fi:1;
+ uint64_t dma0_fi:1;
+ uint64_t idtime1:1;
+ uint64_t idtime0:1;
+ uint64_t idcnt1:1;
+ uint64_t idcnt0:1;
+ uint64_t iptime3:1;
+ uint64_t iptime2:1;
+ uint64_t iptime1:1;
+ uint64_t iptime0:1;
+ uint64_t ipcnt3:1;
+ uint64_t ipcnt2:1;
+ uint64_t ipcnt1:1;
+ uint64_t ipcnt0:1;
+ uint64_t irsl_int:1;
+ uint64_t ill_rrd:1;
+ uint64_t ill_rwr:1;
+ uint64_t idperr:1;
+ uint64_t iaperr:1;
+ uint64_t iserr:1;
+ uint64_t itsr_abt:1;
+ uint64_t imsc_msg:1;
+ uint64_t imsi_mabt:1;
+ uint64_t imsi_tabt:1;
+ uint64_t imsi_per:1;
+ uint64_t imr_tto:1;
+ uint64_t imr_abt:1;
+ uint64_t itr_abt:1;
+ uint64_t imr_wtto:1;
+ uint64_t imr_wabt:1;
+ uint64_t itr_wabt:1;
+#else
+ uint64_t itr_wabt:1;
+ uint64_t imr_wabt:1;
+ uint64_t imr_wtto:1;
+ uint64_t itr_abt:1;
+ uint64_t imr_abt:1;
+ uint64_t imr_tto:1;
+ uint64_t imsi_per:1;
+ uint64_t imsi_tabt:1;
+ uint64_t imsi_mabt:1;
+ uint64_t imsc_msg:1;
+ uint64_t itsr_abt:1;
+ uint64_t iserr:1;
+ uint64_t iaperr:1;
+ uint64_t idperr:1;
+ uint64_t ill_rwr:1;
+ uint64_t ill_rrd:1;
+ uint64_t irsl_int:1;
+ uint64_t ipcnt0:1;
+ uint64_t ipcnt1:1;
+ uint64_t ipcnt2:1;
+ uint64_t ipcnt3:1;
+ uint64_t iptime0:1;
+ uint64_t iptime1:1;
+ uint64_t iptime2:1;
+ uint64_t iptime3:1;
+ uint64_t idcnt0:1;
+ uint64_t idcnt1:1;
+ uint64_t idtime0:1;
+ uint64_t idtime1:1;
+ uint64_t dma0_fi:1;
+ uint64_t dma1_fi:1;
+ uint64_t win_wr:1;
+ uint64_t ill_wr:1;
+ uint64_t ill_rd:1;
+ uint64_t reserved_34_63:30;
+#endif
+ } s;
+ struct cvmx_pci_int_enb_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63:30;
+ uint64_t ill_rd:1;
+ uint64_t ill_wr:1;
+ uint64_t win_wr:1;
+ uint64_t dma1_fi:1;
+ uint64_t dma0_fi:1;
+ uint64_t idtime1:1;
+ uint64_t idtime0:1;
+ uint64_t idcnt1:1;
+ uint64_t idcnt0:1;
+ uint64_t reserved_22_24:3;
+ uint64_t iptime0:1;
+ uint64_t reserved_18_20:3;
+ uint64_t ipcnt0:1;
+ uint64_t irsl_int:1;
+ uint64_t ill_rrd:1;
+ uint64_t ill_rwr:1;
+ uint64_t idperr:1;
+ uint64_t iaperr:1;
+ uint64_t iserr:1;
+ uint64_t itsr_abt:1;
+ uint64_t imsc_msg:1;
+ uint64_t imsi_mabt:1;
+ uint64_t imsi_tabt:1;
+ uint64_t imsi_per:1;
+ uint64_t imr_tto:1;
+ uint64_t imr_abt:1;
+ uint64_t itr_abt:1;
+ uint64_t imr_wtto:1;
+ uint64_t imr_wabt:1;
+ uint64_t itr_wabt:1;
+#else
+ uint64_t itr_wabt:1;
+ uint64_t imr_wabt:1;
+ uint64_t imr_wtto:1;
+ uint64_t itr_abt:1;
+ uint64_t imr_abt:1;
+ uint64_t imr_tto:1;
+ uint64_t imsi_per:1;
+ uint64_t imsi_tabt:1;
+ uint64_t imsi_mabt:1;
+ uint64_t imsc_msg:1;
+ uint64_t itsr_abt:1;
+ uint64_t iserr:1;
+ uint64_t iaperr:1;
+ uint64_t idperr:1;
+ uint64_t ill_rwr:1;
+ uint64_t ill_rrd:1;
+ uint64_t irsl_int:1;
+ uint64_t ipcnt0:1;
+ uint64_t reserved_18_20:3;
+ uint64_t iptime0:1;
+ uint64_t reserved_22_24:3;
+ uint64_t idcnt0:1;
+ uint64_t idcnt1:1;
+ uint64_t idtime0:1;
+ uint64_t idtime1:1;
+ uint64_t dma0_fi:1;
+ uint64_t dma1_fi:1;
+ uint64_t win_wr:1;
+ uint64_t ill_wr:1;
+ uint64_t ill_rd:1;
+ uint64_t reserved_34_63:30;
+#endif
+ } cn30xx;
+ struct cvmx_pci_int_enb_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63:30;
+ uint64_t ill_rd:1;
+ uint64_t ill_wr:1;
+ uint64_t win_wr:1;
+ uint64_t dma1_fi:1;
+ uint64_t dma0_fi:1;
+ uint64_t idtime1:1;
+ uint64_t idtime0:1;
+ uint64_t idcnt1:1;
+ uint64_t idcnt0:1;
+ uint64_t reserved_23_24:2;
+ uint64_t iptime1:1;
+ uint64_t iptime0:1;
+ uint64_t reserved_19_20:2;
+ uint64_t ipcnt1:1;
+ uint64_t ipcnt0:1;
+ uint64_t irsl_int:1;
+ uint64_t ill_rrd:1;
+ uint64_t ill_rwr:1;
+ uint64_t idperr:1;
+ uint64_t iaperr:1;
+ uint64_t iserr:1;
+ uint64_t itsr_abt:1;
+ uint64_t imsc_msg:1;
+ uint64_t imsi_mabt:1;
+ uint64_t imsi_tabt:1;
+ uint64_t imsi_per:1;
+ uint64_t imr_tto:1;
+ uint64_t imr_abt:1;
+ uint64_t itr_abt:1;
+ uint64_t imr_wtto:1;
+ uint64_t imr_wabt:1;
+ uint64_t itr_wabt:1;
+#else
+ uint64_t itr_wabt:1;
+ uint64_t imr_wabt:1;
+ uint64_t imr_wtto:1;
+ uint64_t itr_abt:1;
+ uint64_t imr_abt:1;
+ uint64_t imr_tto:1;
+ uint64_t imsi_per:1;
+ uint64_t imsi_tabt:1;
+ uint64_t imsi_mabt:1;
+ uint64_t imsc_msg:1;
+ uint64_t itsr_abt:1;
+ uint64_t iserr:1;
+ uint64_t iaperr:1;
+ uint64_t idperr:1;
+ uint64_t ill_rwr:1;
+ uint64_t ill_rrd:1;
+ uint64_t irsl_int:1;
+ uint64_t ipcnt0:1;
+ uint64_t ipcnt1:1;
+ uint64_t reserved_19_20:2;
+ uint64_t iptime0:1;
+ uint64_t iptime1:1;
+ uint64_t reserved_23_24:2;
+ uint64_t idcnt0:1;
+ uint64_t idcnt1:1;
+ uint64_t idtime0:1;
+ uint64_t idtime1:1;
+ uint64_t dma0_fi:1;
+ uint64_t dma1_fi:1;
+ uint64_t win_wr:1;
+ uint64_t ill_wr:1;
+ uint64_t ill_rd:1;
+ uint64_t reserved_34_63:30;
+#endif
+ } cn31xx;
+};
+
+union cvmx_pci_int_enb2 {
+ uint64_t u64;
+ struct cvmx_pci_int_enb2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63:30;
+ uint64_t ill_rd:1;
+ uint64_t ill_wr:1;
+ uint64_t win_wr:1;
+ uint64_t dma1_fi:1;
+ uint64_t dma0_fi:1;
+ uint64_t rdtime1:1;
+ uint64_t rdtime0:1;
+ uint64_t rdcnt1:1;
+ uint64_t rdcnt0:1;
+ uint64_t rptime3:1;
+ uint64_t rptime2:1;
+ uint64_t rptime1:1;
+ uint64_t rptime0:1;
+ uint64_t rpcnt3:1;
+ uint64_t rpcnt2:1;
+ uint64_t rpcnt1:1;
+ uint64_t rpcnt0:1;
+ uint64_t rrsl_int:1;
+ uint64_t ill_rrd:1;
+ uint64_t ill_rwr:1;
+ uint64_t rdperr:1;
+ uint64_t raperr:1;
+ uint64_t rserr:1;
+ uint64_t rtsr_abt:1;
+ uint64_t rmsc_msg:1;
+ uint64_t rmsi_mabt:1;
+ uint64_t rmsi_tabt:1;
+ uint64_t rmsi_per:1;
+ uint64_t rmr_tto:1;
+ uint64_t rmr_abt:1;
+ uint64_t rtr_abt:1;
+ uint64_t rmr_wtto:1;
+ uint64_t rmr_wabt:1;
+ uint64_t rtr_wabt:1;
+#else
+ uint64_t rtr_wabt:1;
+ uint64_t rmr_wabt:1;
+ uint64_t rmr_wtto:1;
+ uint64_t rtr_abt:1;
+ uint64_t rmr_abt:1;
+ uint64_t rmr_tto:1;
+ uint64_t rmsi_per:1;
+ uint64_t rmsi_tabt:1;
+ uint64_t rmsi_mabt:1;
+ uint64_t rmsc_msg:1;
+ uint64_t rtsr_abt:1;
+ uint64_t rserr:1;
+ uint64_t raperr:1;
+ uint64_t rdperr:1;
+ uint64_t ill_rwr:1;
+ uint64_t ill_rrd:1;
+ uint64_t rrsl_int:1;
+ uint64_t rpcnt0:1;
+ uint64_t rpcnt1:1;
+ uint64_t rpcnt2:1;
+ uint64_t rpcnt3:1;
+ uint64_t rptime0:1;
+ uint64_t rptime1:1;
+ uint64_t rptime2:1;
+ uint64_t rptime3:1;
+ uint64_t rdcnt0:1;
+ uint64_t rdcnt1:1;
+ uint64_t rdtime0:1;
+ uint64_t rdtime1:1;
+ uint64_t dma0_fi:1;
+ uint64_t dma1_fi:1;
+ uint64_t win_wr:1;
+ uint64_t ill_wr:1;
+ uint64_t ill_rd:1;
+ uint64_t reserved_34_63:30;
+#endif
+ } s;
+ struct cvmx_pci_int_enb2_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63:30;
+ uint64_t ill_rd:1;
+ uint64_t ill_wr:1;
+ uint64_t win_wr:1;
+ uint64_t dma1_fi:1;
+ uint64_t dma0_fi:1;
+ uint64_t rdtime1:1;
+ uint64_t rdtime0:1;
+ uint64_t rdcnt1:1;
+ uint64_t rdcnt0:1;
+ uint64_t reserved_22_24:3;
+ uint64_t rptime0:1;
+ uint64_t reserved_18_20:3;
+ uint64_t rpcnt0:1;
+ uint64_t rrsl_int:1;
+ uint64_t ill_rrd:1;
+ uint64_t ill_rwr:1;
+ uint64_t rdperr:1;
+ uint64_t raperr:1;
+ uint64_t rserr:1;
+ uint64_t rtsr_abt:1;
+ uint64_t rmsc_msg:1;
+ uint64_t rmsi_mabt:1;
+ uint64_t rmsi_tabt:1;
+ uint64_t rmsi_per:1;
+ uint64_t rmr_tto:1;
+ uint64_t rmr_abt:1;
+ uint64_t rtr_abt:1;
+ uint64_t rmr_wtto:1;
+ uint64_t rmr_wabt:1;
+ uint64_t rtr_wabt:1;
+#else
+ uint64_t rtr_wabt:1;
+ uint64_t rmr_wabt:1;
+ uint64_t rmr_wtto:1;
+ uint64_t rtr_abt:1;
+ uint64_t rmr_abt:1;
+ uint64_t rmr_tto:1;
+ uint64_t rmsi_per:1;
+ uint64_t rmsi_tabt:1;
+ uint64_t rmsi_mabt:1;
+ uint64_t rmsc_msg:1;
+ uint64_t rtsr_abt:1;
+ uint64_t rserr:1;
+ uint64_t raperr:1;
+ uint64_t rdperr:1;
+ uint64_t ill_rwr:1;
+ uint64_t ill_rrd:1;
+ uint64_t rrsl_int:1;
+ uint64_t rpcnt0:1;
+ uint64_t reserved_18_20:3;
+ uint64_t rptime0:1;
+ uint64_t reserved_22_24:3;
+ uint64_t rdcnt0:1;
+ uint64_t rdcnt1:1;
+ uint64_t rdtime0:1;
+ uint64_t rdtime1:1;
+ uint64_t dma0_fi:1;
+ uint64_t dma1_fi:1;
+ uint64_t win_wr:1;
+ uint64_t ill_wr:1;
+ uint64_t ill_rd:1;
+ uint64_t reserved_34_63:30;
+#endif
+ } cn30xx;
+ struct cvmx_pci_int_enb2_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63:30;
+ uint64_t ill_rd:1;
+ uint64_t ill_wr:1;
+ uint64_t win_wr:1;
+ uint64_t dma1_fi:1;
+ uint64_t dma0_fi:1;
+ uint64_t rdtime1:1;
+ uint64_t rdtime0:1;
+ uint64_t rdcnt1:1;
+ uint64_t rdcnt0:1;
+ uint64_t reserved_23_24:2;
+ uint64_t rptime1:1;
+ uint64_t rptime0:1;
+ uint64_t reserved_19_20:2;
+ uint64_t rpcnt1:1;
+ uint64_t rpcnt0:1;
+ uint64_t rrsl_int:1;
+ uint64_t ill_rrd:1;
+ uint64_t ill_rwr:1;
+ uint64_t rdperr:1;
+ uint64_t raperr:1;
+ uint64_t rserr:1;
+ uint64_t rtsr_abt:1;
+ uint64_t rmsc_msg:1;
+ uint64_t rmsi_mabt:1;
+ uint64_t rmsi_tabt:1;
+ uint64_t rmsi_per:1;
+ uint64_t rmr_tto:1;
+ uint64_t rmr_abt:1;
+ uint64_t rtr_abt:1;
+ uint64_t rmr_wtto:1;
+ uint64_t rmr_wabt:1;
+ uint64_t rtr_wabt:1;
+#else
+ uint64_t rtr_wabt:1;
+ uint64_t rmr_wabt:1;
+ uint64_t rmr_wtto:1;
+ uint64_t rtr_abt:1;
+ uint64_t rmr_abt:1;
+ uint64_t rmr_tto:1;
+ uint64_t rmsi_per:1;
+ uint64_t rmsi_tabt:1;
+ uint64_t rmsi_mabt:1;
+ uint64_t rmsc_msg:1;
+ uint64_t rtsr_abt:1;
+ uint64_t rserr:1;
+ uint64_t raperr:1;
+ uint64_t rdperr:1;
+ uint64_t ill_rwr:1;
+ uint64_t ill_rrd:1;
+ uint64_t rrsl_int:1;
+ uint64_t rpcnt0:1;
+ uint64_t rpcnt1:1;
+ uint64_t reserved_19_20:2;
+ uint64_t rptime0:1;
+ uint64_t rptime1:1;
+ uint64_t reserved_23_24:2;
+ uint64_t rdcnt0:1;
+ uint64_t rdcnt1:1;
+ uint64_t rdtime0:1;
+ uint64_t rdtime1:1;
+ uint64_t dma0_fi:1;
+ uint64_t dma1_fi:1;
+ uint64_t win_wr:1;
+ uint64_t ill_wr:1;
+ uint64_t ill_rd:1;
+ uint64_t reserved_34_63:30;
+#endif
+ } cn31xx;
+};
+
+union cvmx_pci_int_sum {
+ uint64_t u64;
+ struct cvmx_pci_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63:30;
+ uint64_t ill_rd:1;
+ uint64_t ill_wr:1;
+ uint64_t win_wr:1;
+ uint64_t dma1_fi:1;
+ uint64_t dma0_fi:1;
+ uint64_t dtime1:1;
+ uint64_t dtime0:1;
+ uint64_t dcnt1:1;
+ uint64_t dcnt0:1;
+ uint64_t ptime3:1;
+ uint64_t ptime2:1;
+ uint64_t ptime1:1;
+ uint64_t ptime0:1;
+ uint64_t pcnt3:1;
+ uint64_t pcnt2:1;
+ uint64_t pcnt1:1;
+ uint64_t pcnt0:1;
+ uint64_t rsl_int:1;
+ uint64_t ill_rrd:1;
+ uint64_t ill_rwr:1;
+ uint64_t dperr:1;
+ uint64_t aperr:1;
+ uint64_t serr:1;
+ uint64_t tsr_abt:1;
+ uint64_t msc_msg:1;
+ uint64_t msi_mabt:1;
+ uint64_t msi_tabt:1;
+ uint64_t msi_per:1;
+ uint64_t mr_tto:1;
+ uint64_t mr_abt:1;
+ uint64_t tr_abt:1;
+ uint64_t mr_wtto:1;
+ uint64_t mr_wabt:1;
+ uint64_t tr_wabt:1;
+#else
+ uint64_t tr_wabt:1;
+ uint64_t mr_wabt:1;
+ uint64_t mr_wtto:1;
+ uint64_t tr_abt:1;
+ uint64_t mr_abt:1;
+ uint64_t mr_tto:1;
+ uint64_t msi_per:1;
+ uint64_t msi_tabt:1;
+ uint64_t msi_mabt:1;
+ uint64_t msc_msg:1;
+ uint64_t tsr_abt:1;
+ uint64_t serr:1;
+ uint64_t aperr:1;
+ uint64_t dperr:1;
+ uint64_t ill_rwr:1;
+ uint64_t ill_rrd:1;
+ uint64_t rsl_int:1;
+ uint64_t pcnt0:1;
+ uint64_t pcnt1:1;
+ uint64_t pcnt2:1;
+ uint64_t pcnt3:1;
+ uint64_t ptime0:1;
+ uint64_t ptime1:1;
+ uint64_t ptime2:1;
+ uint64_t ptime3:1;
+ uint64_t dcnt0:1;
+ uint64_t dcnt1:1;
+ uint64_t dtime0:1;
+ uint64_t dtime1:1;
+ uint64_t dma0_fi:1;
+ uint64_t dma1_fi:1;
+ uint64_t win_wr:1;
+ uint64_t ill_wr:1;
+ uint64_t ill_rd:1;
+ uint64_t reserved_34_63:30;
+#endif
+ } s;
+ struct cvmx_pci_int_sum_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63:30;
+ uint64_t ill_rd:1;
+ uint64_t ill_wr:1;
+ uint64_t win_wr:1;
+ uint64_t dma1_fi:1;
+ uint64_t dma0_fi:1;
+ uint64_t dtime1:1;
+ uint64_t dtime0:1;
+ uint64_t dcnt1:1;
+ uint64_t dcnt0:1;
+ uint64_t reserved_22_24:3;
+ uint64_t ptime0:1;
+ uint64_t reserved_18_20:3;
+ uint64_t pcnt0:1;
+ uint64_t rsl_int:1;
+ uint64_t ill_rrd:1;
+ uint64_t ill_rwr:1;
+ uint64_t dperr:1;
+ uint64_t aperr:1;
+ uint64_t serr:1;
+ uint64_t tsr_abt:1;
+ uint64_t msc_msg:1;
+ uint64_t msi_mabt:1;
+ uint64_t msi_tabt:1;
+ uint64_t msi_per:1;
+ uint64_t mr_tto:1;
+ uint64_t mr_abt:1;
+ uint64_t tr_abt:1;
+ uint64_t mr_wtto:1;
+ uint64_t mr_wabt:1;
+ uint64_t tr_wabt:1;
+#else
+ uint64_t tr_wabt:1;
+ uint64_t mr_wabt:1;
+ uint64_t mr_wtto:1;
+ uint64_t tr_abt:1;
+ uint64_t mr_abt:1;
+ uint64_t mr_tto:1;
+ uint64_t msi_per:1;
+ uint64_t msi_tabt:1;
+ uint64_t msi_mabt:1;
+ uint64_t msc_msg:1;
+ uint64_t tsr_abt:1;
+ uint64_t serr:1;
+ uint64_t aperr:1;
+ uint64_t dperr:1;
+ uint64_t ill_rwr:1;
+ uint64_t ill_rrd:1;
+ uint64_t rsl_int:1;
+ uint64_t pcnt0:1;
+ uint64_t reserved_18_20:3;
+ uint64_t ptime0:1;
+ uint64_t reserved_22_24:3;
+ uint64_t dcnt0:1;
+ uint64_t dcnt1:1;
+ uint64_t dtime0:1;
+ uint64_t dtime1:1;
+ uint64_t dma0_fi:1;
+ uint64_t dma1_fi:1;
+ uint64_t win_wr:1;
+ uint64_t ill_wr:1;
+ uint64_t ill_rd:1;
+ uint64_t reserved_34_63:30;
+#endif
+ } cn30xx;
+ struct cvmx_pci_int_sum_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63:30;
+ uint64_t ill_rd:1;
+ uint64_t ill_wr:1;
+ uint64_t win_wr:1;
+ uint64_t dma1_fi:1;
+ uint64_t dma0_fi:1;
+ uint64_t dtime1:1;
+ uint64_t dtime0:1;
+ uint64_t dcnt1:1;
+ uint64_t dcnt0:1;
+ uint64_t reserved_23_24:2;
+ uint64_t ptime1:1;
+ uint64_t ptime0:1;
+ uint64_t reserved_19_20:2;
+ uint64_t pcnt1:1;
+ uint64_t pcnt0:1;
+ uint64_t rsl_int:1;
+ uint64_t ill_rrd:1;
+ uint64_t ill_rwr:1;
+ uint64_t dperr:1;
+ uint64_t aperr:1;
+ uint64_t serr:1;
+ uint64_t tsr_abt:1;
+ uint64_t msc_msg:1;
+ uint64_t msi_mabt:1;
+ uint64_t msi_tabt:1;
+ uint64_t msi_per:1;
+ uint64_t mr_tto:1;
+ uint64_t mr_abt:1;
+ uint64_t tr_abt:1;
+ uint64_t mr_wtto:1;
+ uint64_t mr_wabt:1;
+ uint64_t tr_wabt:1;
+#else
+ uint64_t tr_wabt:1;
+ uint64_t mr_wabt:1;
+ uint64_t mr_wtto:1;
+ uint64_t tr_abt:1;
+ uint64_t mr_abt:1;
+ uint64_t mr_tto:1;
+ uint64_t msi_per:1;
+ uint64_t msi_tabt:1;
+ uint64_t msi_mabt:1;
+ uint64_t msc_msg:1;
+ uint64_t tsr_abt:1;
+ uint64_t serr:1;
+ uint64_t aperr:1;
+ uint64_t dperr:1;
+ uint64_t ill_rwr:1;
+ uint64_t ill_rrd:1;
+ uint64_t rsl_int:1;
+ uint64_t pcnt0:1;
+ uint64_t pcnt1:1;
+ uint64_t reserved_19_20:2;
+ uint64_t ptime0:1;
+ uint64_t ptime1:1;
+ uint64_t reserved_23_24:2;
+ uint64_t dcnt0:1;
+ uint64_t dcnt1:1;
+ uint64_t dtime0:1;
+ uint64_t dtime1:1;
+ uint64_t dma0_fi:1;
+ uint64_t dma1_fi:1;
+ uint64_t win_wr:1;
+ uint64_t ill_wr:1;
+ uint64_t ill_rd:1;
+ uint64_t reserved_34_63:30;
+#endif
+ } cn31xx;
+};
+
+union cvmx_pci_int_sum2 {
+ uint64_t u64;
+ struct cvmx_pci_int_sum2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63:30;
+ uint64_t ill_rd:1;
+ uint64_t ill_wr:1;
+ uint64_t win_wr:1;
+ uint64_t dma1_fi:1;
+ uint64_t dma0_fi:1;
+ uint64_t dtime1:1;
+ uint64_t dtime0:1;
+ uint64_t dcnt1:1;
+ uint64_t dcnt0:1;
+ uint64_t ptime3:1;
+ uint64_t ptime2:1;
+ uint64_t ptime1:1;
+ uint64_t ptime0:1;
+ uint64_t pcnt3:1;
+ uint64_t pcnt2:1;
+ uint64_t pcnt1:1;
+ uint64_t pcnt0:1;
+ uint64_t rsl_int:1;
+ uint64_t ill_rrd:1;
+ uint64_t ill_rwr:1;
+ uint64_t dperr:1;
+ uint64_t aperr:1;
+ uint64_t serr:1;
+ uint64_t tsr_abt:1;
+ uint64_t msc_msg:1;
+ uint64_t msi_mabt:1;
+ uint64_t msi_tabt:1;
+ uint64_t msi_per:1;
+ uint64_t mr_tto:1;
+ uint64_t mr_abt:1;
+ uint64_t tr_abt:1;
+ uint64_t mr_wtto:1;
+ uint64_t mr_wabt:1;
+ uint64_t tr_wabt:1;
+#else
+ uint64_t tr_wabt:1;
+ uint64_t mr_wabt:1;
+ uint64_t mr_wtto:1;
+ uint64_t tr_abt:1;
+ uint64_t mr_abt:1;
+ uint64_t mr_tto:1;
+ uint64_t msi_per:1;
+ uint64_t msi_tabt:1;
+ uint64_t msi_mabt:1;
+ uint64_t msc_msg:1;
+ uint64_t tsr_abt:1;
+ uint64_t serr:1;
+ uint64_t aperr:1;
+ uint64_t dperr:1;
+ uint64_t ill_rwr:1;
+ uint64_t ill_rrd:1;
+ uint64_t rsl_int:1;
+ uint64_t pcnt0:1;
+ uint64_t pcnt1:1;
+ uint64_t pcnt2:1;
+ uint64_t pcnt3:1;
+ uint64_t ptime0:1;
+ uint64_t ptime1:1;
+ uint64_t ptime2:1;
+ uint64_t ptime3:1;
+ uint64_t dcnt0:1;
+ uint64_t dcnt1:1;
+ uint64_t dtime0:1;
+ uint64_t dtime1:1;
+ uint64_t dma0_fi:1;
+ uint64_t dma1_fi:1;
+ uint64_t win_wr:1;
+ uint64_t ill_wr:1;
+ uint64_t ill_rd:1;
+ uint64_t reserved_34_63:30;
+#endif
+ } s;
+ struct cvmx_pci_int_sum2_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63:30;
+ uint64_t ill_rd:1;
+ uint64_t ill_wr:1;
+ uint64_t win_wr:1;
+ uint64_t dma1_fi:1;
+ uint64_t dma0_fi:1;
+ uint64_t dtime1:1;
+ uint64_t dtime0:1;
+ uint64_t dcnt1:1;
+ uint64_t dcnt0:1;
+ uint64_t reserved_22_24:3;
+ uint64_t ptime0:1;
+ uint64_t reserved_18_20:3;
+ uint64_t pcnt0:1;
+ uint64_t rsl_int:1;
+ uint64_t ill_rrd:1;
+ uint64_t ill_rwr:1;
+ uint64_t dperr:1;
+ uint64_t aperr:1;
+ uint64_t serr:1;
+ uint64_t tsr_abt:1;
+ uint64_t msc_msg:1;
+ uint64_t msi_mabt:1;
+ uint64_t msi_tabt:1;
+ uint64_t msi_per:1;
+ uint64_t mr_tto:1;
+ uint64_t mr_abt:1;
+ uint64_t tr_abt:1;
+ uint64_t mr_wtto:1;
+ uint64_t mr_wabt:1;
+ uint64_t tr_wabt:1;
+#else
+ uint64_t tr_wabt:1;
+ uint64_t mr_wabt:1;
+ uint64_t mr_wtto:1;
+ uint64_t tr_abt:1;
+ uint64_t mr_abt:1;
+ uint64_t mr_tto:1;
+ uint64_t msi_per:1;
+ uint64_t msi_tabt:1;
+ uint64_t msi_mabt:1;
+ uint64_t msc_msg:1;
+ uint64_t tsr_abt:1;
+ uint64_t serr:1;
+ uint64_t aperr:1;
+ uint64_t dperr:1;
+ uint64_t ill_rwr:1;
+ uint64_t ill_rrd:1;
+ uint64_t rsl_int:1;
+ uint64_t pcnt0:1;
+ uint64_t reserved_18_20:3;
+ uint64_t ptime0:1;
+ uint64_t reserved_22_24:3;
+ uint64_t dcnt0:1;
+ uint64_t dcnt1:1;
+ uint64_t dtime0:1;
+ uint64_t dtime1:1;
+ uint64_t dma0_fi:1;
+ uint64_t dma1_fi:1;
+ uint64_t win_wr:1;
+ uint64_t ill_wr:1;
+ uint64_t ill_rd:1;
+ uint64_t reserved_34_63:30;
+#endif
+ } cn30xx;
+ struct cvmx_pci_int_sum2_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63:30;
+ uint64_t ill_rd:1;
+ uint64_t ill_wr:1;
+ uint64_t win_wr:1;
+ uint64_t dma1_fi:1;
+ uint64_t dma0_fi:1;
+ uint64_t dtime1:1;
+ uint64_t dtime0:1;
+ uint64_t dcnt1:1;
+ uint64_t dcnt0:1;
+ uint64_t reserved_23_24:2;
+ uint64_t ptime1:1;
+ uint64_t ptime0:1;
+ uint64_t reserved_19_20:2;
+ uint64_t pcnt1:1;
+ uint64_t pcnt0:1;
+ uint64_t rsl_int:1;
+ uint64_t ill_rrd:1;
+ uint64_t ill_rwr:1;
+ uint64_t dperr:1;
+ uint64_t aperr:1;
+ uint64_t serr:1;
+ uint64_t tsr_abt:1;
+ uint64_t msc_msg:1;
+ uint64_t msi_mabt:1;
+ uint64_t msi_tabt:1;
+ uint64_t msi_per:1;
+ uint64_t mr_tto:1;
+ uint64_t mr_abt:1;
+ uint64_t tr_abt:1;
+ uint64_t mr_wtto:1;
+ uint64_t mr_wabt:1;
+ uint64_t tr_wabt:1;
+#else
+ uint64_t tr_wabt:1;
+ uint64_t mr_wabt:1;
+ uint64_t mr_wtto:1;
+ uint64_t tr_abt:1;
+ uint64_t mr_abt:1;
+ uint64_t mr_tto:1;
+ uint64_t msi_per:1;
+ uint64_t msi_tabt:1;
+ uint64_t msi_mabt:1;
+ uint64_t msc_msg:1;
+ uint64_t tsr_abt:1;
+ uint64_t serr:1;
+ uint64_t aperr:1;
+ uint64_t dperr:1;
+ uint64_t ill_rwr:1;
+ uint64_t ill_rrd:1;
+ uint64_t rsl_int:1;
+ uint64_t pcnt0:1;
+ uint64_t pcnt1:1;
+ uint64_t reserved_19_20:2;
+ uint64_t ptime0:1;
+ uint64_t ptime1:1;
+ uint64_t reserved_23_24:2;
+ uint64_t dcnt0:1;
+ uint64_t dcnt1:1;
+ uint64_t dtime0:1;
+ uint64_t dtime1:1;
+ uint64_t dma0_fi:1;
+ uint64_t dma1_fi:1;
+ uint64_t win_wr:1;
+ uint64_t ill_wr:1;
+ uint64_t ill_rd:1;
+ uint64_t reserved_34_63:30;
+#endif
+ } cn31xx;
+};
+
+union cvmx_pci_msi_rcv {
+ uint32_t u32;
+ struct cvmx_pci_msi_rcv_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_6_31:26;
+ uint32_t intr:6;
+#else
+ uint32_t intr:6;
+ uint32_t reserved_6_31:26;
+#endif
+ } s;
+};
+
+union cvmx_pci_pkt_creditsx {
+ uint32_t u32;
+ struct cvmx_pci_pkt_creditsx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pkt_cnt:16;
+ uint32_t ptr_cnt:16;
+#else
+ uint32_t ptr_cnt:16;
+ uint32_t pkt_cnt:16;
+#endif
+ } s;
+};
+
+union cvmx_pci_pkts_sentx {
+ uint32_t u32;
+ struct cvmx_pci_pkts_sentx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pkt_cnt:32;
+#else
+ uint32_t pkt_cnt:32;
+#endif
+ } s;
+};
+
+union cvmx_pci_pkts_sent_int_levx {
+ uint32_t u32;
+ struct cvmx_pci_pkts_sent_int_levx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pkt_cnt:32;
+#else
+ uint32_t pkt_cnt:32;
+#endif
+ } s;
+};
+
+union cvmx_pci_pkts_sent_timex {
+ uint32_t u32;
+ struct cvmx_pci_pkts_sent_timex_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t pkt_time:32;
+#else
+ uint32_t pkt_time:32;
+#endif
+ } s;
+};
+
+union cvmx_pci_read_cmd_6 {
+ uint32_t u32;
+ struct cvmx_pci_read_cmd_6_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_9_31:23;
+ uint32_t min_data:6;
+ uint32_t prefetch:3;
+#else
+ uint32_t prefetch:3;
+ uint32_t min_data:6;
+ uint32_t reserved_9_31:23;
+#endif
+ } s;
+};
+
+union cvmx_pci_read_cmd_c {
+ uint32_t u32;
+ struct cvmx_pci_read_cmd_c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_9_31:23;
+ uint32_t min_data:6;
+ uint32_t prefetch:3;
+#else
+ uint32_t prefetch:3;
+ uint32_t min_data:6;
+ uint32_t reserved_9_31:23;
+#endif
+ } s;
+};
+
+union cvmx_pci_read_cmd_e {
+ uint32_t u32;
+ struct cvmx_pci_read_cmd_e_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t reserved_9_31:23;
+ uint32_t min_data:6;
+ uint32_t prefetch:3;
+#else
+ uint32_t prefetch:3;
+ uint32_t min_data:6;
+ uint32_t reserved_9_31:23;
+#endif
+ } s;
+};
+
+union cvmx_pci_read_timeout {
+ uint64_t u64;
+ struct cvmx_pci_read_timeout_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t enb:1;
+ uint64_t cnt:31;
+#else
+ uint64_t cnt:31;
+ uint64_t enb:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pci_scm_reg {
+ uint64_t u64;
+ struct cvmx_pci_scm_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t scm:32;
+#else
+ uint64_t scm:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pci_tsr_reg {
+ uint64_t u64;
+ struct cvmx_pci_tsr_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63:28;
+ uint64_t tsr:36;
+#else
+ uint64_t tsr:36;
+ uint64_t reserved_36_63:28;
+#endif
+ } s;
+};
+
+union cvmx_pci_win_rd_addr {
+ uint64_t u64;
+ struct cvmx_pci_win_rd_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63:15;
+ uint64_t iobit:1;
+ uint64_t reserved_0_47:48;
+#else
+ uint64_t reserved_0_47:48;
+ uint64_t iobit:1;
+ uint64_t reserved_49_63:15;
+#endif
+ } s;
+ struct cvmx_pci_win_rd_addr_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63:15;
+ uint64_t iobit:1;
+ uint64_t rd_addr:46;
+ uint64_t reserved_0_1:2;
+#else
+ uint64_t reserved_0_1:2;
+ uint64_t rd_addr:46;
+ uint64_t iobit:1;
+ uint64_t reserved_49_63:15;
+#endif
+ } cn30xx;
+ struct cvmx_pci_win_rd_addr_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63:15;
+ uint64_t iobit:1;
+ uint64_t rd_addr:45;
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t rd_addr:45;
+ uint64_t iobit:1;
+ uint64_t reserved_49_63:15;
+#endif
+ } cn38xx;
+};
+
+union cvmx_pci_win_rd_data {
+ uint64_t u64;
+ struct cvmx_pci_win_rd_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rd_data:64;
+#else
+ uint64_t rd_data:64;
+#endif
+ } s;
+};
+
+union cvmx_pci_win_wr_addr {
+ uint64_t u64;
+ struct cvmx_pci_win_wr_addr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63:15;
+ uint64_t iobit:1;
+ uint64_t wr_addr:45;
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t wr_addr:45;
+ uint64_t iobit:1;
+ uint64_t reserved_49_63:15;
+#endif
+ } s;
+};
+
+union cvmx_pci_win_wr_data {
+ uint64_t u64;
+ struct cvmx_pci_win_wr_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wr_data:64;
+#else
+ uint64_t wr_data:64;
+#endif
+ } s;
+};
+
+union cvmx_pci_win_wr_mask {
+ uint64_t u64;
+ struct cvmx_pci_win_wr_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t wr_mask:8;
+#else
+ uint64_t wr_mask:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h b/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h
new file mode 100644
index 000000000..e2dce1acf
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h
@@ -0,0 +1,368 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_PCIERCX_DEFS_H__
+#define __CVMX_PCIERCX_DEFS_H__
+
+#include <uapi/asm/bitfield.h>
+
+#define CVMX_PCIERCX_CFG001(block_id) (0x0000000000000004ull)
+#define CVMX_PCIERCX_CFG006(block_id) (0x0000000000000018ull)
+#define CVMX_PCIERCX_CFG008(block_id) (0x0000000000000020ull)
+#define CVMX_PCIERCX_CFG009(block_id) (0x0000000000000024ull)
+#define CVMX_PCIERCX_CFG010(block_id) (0x0000000000000028ull)
+#define CVMX_PCIERCX_CFG011(block_id) (0x000000000000002Cull)
+#define CVMX_PCIERCX_CFG030(block_id) (0x0000000000000078ull)
+#define CVMX_PCIERCX_CFG031(block_id) (0x000000000000007Cull)
+#define CVMX_PCIERCX_CFG032(block_id) (0x0000000000000080ull)
+#define CVMX_PCIERCX_CFG034(block_id) (0x0000000000000088ull)
+#define CVMX_PCIERCX_CFG035(block_id) (0x000000000000008Cull)
+#define CVMX_PCIERCX_CFG040(block_id) (0x00000000000000A0ull)
+#define CVMX_PCIERCX_CFG066(block_id) (0x0000000000000108ull)
+#define CVMX_PCIERCX_CFG069(block_id) (0x0000000000000114ull)
+#define CVMX_PCIERCX_CFG070(block_id) (0x0000000000000118ull)
+#define CVMX_PCIERCX_CFG075(block_id) (0x000000000000012Cull)
+#define CVMX_PCIERCX_CFG448(block_id) (0x0000000000000700ull)
+#define CVMX_PCIERCX_CFG452(block_id) (0x0000000000000710ull)
+#define CVMX_PCIERCX_CFG455(block_id) (0x000000000000071Cull)
+#define CVMX_PCIERCX_CFG515(block_id) (0x000000000000080Cull)
+
+union cvmx_pciercx_cfg001 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg001_s {
+ __BITFIELD_FIELD(uint32_t dpe:1,
+ __BITFIELD_FIELD(uint32_t sse:1,
+ __BITFIELD_FIELD(uint32_t rma:1,
+ __BITFIELD_FIELD(uint32_t rta:1,
+ __BITFIELD_FIELD(uint32_t sta:1,
+ __BITFIELD_FIELD(uint32_t devt:2,
+ __BITFIELD_FIELD(uint32_t mdpe:1,
+ __BITFIELD_FIELD(uint32_t fbb:1,
+ __BITFIELD_FIELD(uint32_t reserved_22_22:1,
+ __BITFIELD_FIELD(uint32_t m66:1,
+ __BITFIELD_FIELD(uint32_t cl:1,
+ __BITFIELD_FIELD(uint32_t i_stat:1,
+ __BITFIELD_FIELD(uint32_t reserved_11_18:8,
+ __BITFIELD_FIELD(uint32_t i_dis:1,
+ __BITFIELD_FIELD(uint32_t fbbe:1,
+ __BITFIELD_FIELD(uint32_t see:1,
+ __BITFIELD_FIELD(uint32_t ids_wcc:1,
+ __BITFIELD_FIELD(uint32_t per:1,
+ __BITFIELD_FIELD(uint32_t vps:1,
+ __BITFIELD_FIELD(uint32_t mwice:1,
+ __BITFIELD_FIELD(uint32_t scse:1,
+ __BITFIELD_FIELD(uint32_t me:1,
+ __BITFIELD_FIELD(uint32_t msae:1,
+ __BITFIELD_FIELD(uint32_t isae:1,
+ ;))))))))))))))))))))))))
+ } s;
+};
+
+union cvmx_pciercx_cfg006 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg006_s {
+ __BITFIELD_FIELD(uint32_t slt:8,
+ __BITFIELD_FIELD(uint32_t subbnum:8,
+ __BITFIELD_FIELD(uint32_t sbnum:8,
+ __BITFIELD_FIELD(uint32_t pbnum:8,
+ ;))))
+ } s;
+};
+
+union cvmx_pciercx_cfg008 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg008_s {
+ __BITFIELD_FIELD(uint32_t ml_addr:12,
+ __BITFIELD_FIELD(uint32_t reserved_16_19:4,
+ __BITFIELD_FIELD(uint32_t mb_addr:12,
+ __BITFIELD_FIELD(uint32_t reserved_0_3:4,
+ ;))))
+ } s;
+};
+
+union cvmx_pciercx_cfg009 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg009_s {
+ __BITFIELD_FIELD(uint32_t lmem_limit:12,
+ __BITFIELD_FIELD(uint32_t reserved_17_19:3,
+ __BITFIELD_FIELD(uint32_t mem64b:1,
+ __BITFIELD_FIELD(uint32_t lmem_base:12,
+ __BITFIELD_FIELD(uint32_t reserved_1_3:3,
+ __BITFIELD_FIELD(uint32_t mem64a:1,
+ ;))))))
+ } s;
+};
+
+union cvmx_pciercx_cfg010 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg010_s {
+ uint32_t umem_base;
+ } s;
+};
+
+union cvmx_pciercx_cfg011 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg011_s {
+ uint32_t umem_limit;
+ } s;
+};
+
+union cvmx_pciercx_cfg030 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg030_s {
+ __BITFIELD_FIELD(uint32_t reserved_22_31:10,
+ __BITFIELD_FIELD(uint32_t tp:1,
+ __BITFIELD_FIELD(uint32_t ap_d:1,
+ __BITFIELD_FIELD(uint32_t ur_d:1,
+ __BITFIELD_FIELD(uint32_t fe_d:1,
+ __BITFIELD_FIELD(uint32_t nfe_d:1,
+ __BITFIELD_FIELD(uint32_t ce_d:1,
+ __BITFIELD_FIELD(uint32_t reserved_15_15:1,
+ __BITFIELD_FIELD(uint32_t mrrs:3,
+ __BITFIELD_FIELD(uint32_t ns_en:1,
+ __BITFIELD_FIELD(uint32_t ap_en:1,
+ __BITFIELD_FIELD(uint32_t pf_en:1,
+ __BITFIELD_FIELD(uint32_t etf_en:1,
+ __BITFIELD_FIELD(uint32_t mps:3,
+ __BITFIELD_FIELD(uint32_t ro_en:1,
+ __BITFIELD_FIELD(uint32_t ur_en:1,
+ __BITFIELD_FIELD(uint32_t fe_en:1,
+ __BITFIELD_FIELD(uint32_t nfe_en:1,
+ __BITFIELD_FIELD(uint32_t ce_en:1,
+ ;)))))))))))))))))))
+ } s;
+};
+
+union cvmx_pciercx_cfg031 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg031_s {
+ __BITFIELD_FIELD(uint32_t pnum:8,
+ __BITFIELD_FIELD(uint32_t reserved_23_23:1,
+ __BITFIELD_FIELD(uint32_t aspm:1,
+ __BITFIELD_FIELD(uint32_t lbnc:1,
+ __BITFIELD_FIELD(uint32_t dllarc:1,
+ __BITFIELD_FIELD(uint32_t sderc:1,
+ __BITFIELD_FIELD(uint32_t cpm:1,
+ __BITFIELD_FIELD(uint32_t l1el:3,
+ __BITFIELD_FIELD(uint32_t l0el:3,
+ __BITFIELD_FIELD(uint32_t aslpms:2,
+ __BITFIELD_FIELD(uint32_t mlw:6,
+ __BITFIELD_FIELD(uint32_t mls:4,
+ ;))))))))))))
+ } s;
+};
+
+union cvmx_pciercx_cfg032 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg032_s {
+ __BITFIELD_FIELD(uint32_t lab:1,
+ __BITFIELD_FIELD(uint32_t lbm:1,
+ __BITFIELD_FIELD(uint32_t dlla:1,
+ __BITFIELD_FIELD(uint32_t scc:1,
+ __BITFIELD_FIELD(uint32_t lt:1,
+ __BITFIELD_FIELD(uint32_t reserved_26_26:1,
+ __BITFIELD_FIELD(uint32_t nlw:6,
+ __BITFIELD_FIELD(uint32_t ls:4,
+ __BITFIELD_FIELD(uint32_t reserved_12_15:4,
+ __BITFIELD_FIELD(uint32_t lab_int_enb:1,
+ __BITFIELD_FIELD(uint32_t lbm_int_enb:1,
+ __BITFIELD_FIELD(uint32_t hawd:1,
+ __BITFIELD_FIELD(uint32_t ecpm:1,
+ __BITFIELD_FIELD(uint32_t es:1,
+ __BITFIELD_FIELD(uint32_t ccc:1,
+ __BITFIELD_FIELD(uint32_t rl:1,
+ __BITFIELD_FIELD(uint32_t ld:1,
+ __BITFIELD_FIELD(uint32_t rcb:1,
+ __BITFIELD_FIELD(uint32_t reserved_2_2:1,
+ __BITFIELD_FIELD(uint32_t aslpc:2,
+ ;))))))))))))))))))))
+ } s;
+};
+
+union cvmx_pciercx_cfg034 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg034_s {
+ __BITFIELD_FIELD(uint32_t reserved_25_31:7,
+ __BITFIELD_FIELD(uint32_t dlls_c:1,
+ __BITFIELD_FIELD(uint32_t emis:1,
+ __BITFIELD_FIELD(uint32_t pds:1,
+ __BITFIELD_FIELD(uint32_t mrlss:1,
+ __BITFIELD_FIELD(uint32_t ccint_d:1,
+ __BITFIELD_FIELD(uint32_t pd_c:1,
+ __BITFIELD_FIELD(uint32_t mrls_c:1,
+ __BITFIELD_FIELD(uint32_t pf_d:1,
+ __BITFIELD_FIELD(uint32_t abp_d:1,
+ __BITFIELD_FIELD(uint32_t reserved_13_15:3,
+ __BITFIELD_FIELD(uint32_t dlls_en:1,
+ __BITFIELD_FIELD(uint32_t emic:1,
+ __BITFIELD_FIELD(uint32_t pcc:1,
+ __BITFIELD_FIELD(uint32_t pic:1,
+ __BITFIELD_FIELD(uint32_t aic:1,
+ __BITFIELD_FIELD(uint32_t hpint_en:1,
+ __BITFIELD_FIELD(uint32_t ccint_en:1,
+ __BITFIELD_FIELD(uint32_t pd_en:1,
+ __BITFIELD_FIELD(uint32_t mrls_en:1,
+ __BITFIELD_FIELD(uint32_t pf_en:1,
+ __BITFIELD_FIELD(uint32_t abp_en:1,
+ ;))))))))))))))))))))))
+ } s;
+};
+
+union cvmx_pciercx_cfg035 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg035_s {
+ __BITFIELD_FIELD(uint32_t reserved_17_31:15,
+ __BITFIELD_FIELD(uint32_t crssv:1,
+ __BITFIELD_FIELD(uint32_t reserved_5_15:11,
+ __BITFIELD_FIELD(uint32_t crssve:1,
+ __BITFIELD_FIELD(uint32_t pmeie:1,
+ __BITFIELD_FIELD(uint32_t sefee:1,
+ __BITFIELD_FIELD(uint32_t senfee:1,
+ __BITFIELD_FIELD(uint32_t secee:1,
+ ;))))))))
+ } s;
+};
+
+union cvmx_pciercx_cfg040 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg040_s {
+ __BITFIELD_FIELD(uint32_t reserved_22_31:10,
+ __BITFIELD_FIELD(uint32_t ler:1,
+ __BITFIELD_FIELD(uint32_t ep3s:1,
+ __BITFIELD_FIELD(uint32_t ep2s:1,
+ __BITFIELD_FIELD(uint32_t ep1s:1,
+ __BITFIELD_FIELD(uint32_t eqc:1,
+ __BITFIELD_FIELD(uint32_t cdl:1,
+ __BITFIELD_FIELD(uint32_t cde:4,
+ __BITFIELD_FIELD(uint32_t csos:1,
+ __BITFIELD_FIELD(uint32_t emc:1,
+ __BITFIELD_FIELD(uint32_t tm:3,
+ __BITFIELD_FIELD(uint32_t sde:1,
+ __BITFIELD_FIELD(uint32_t hasd:1,
+ __BITFIELD_FIELD(uint32_t ec:1,
+ __BITFIELD_FIELD(uint32_t tls:4,
+ ;)))))))))))))))
+ } s;
+};
+
+union cvmx_pciercx_cfg070 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg070_s {
+ __BITFIELD_FIELD(uint32_t reserved_12_31:20,
+ __BITFIELD_FIELD(uint32_t tplp:1,
+ __BITFIELD_FIELD(uint32_t reserved_9_10:2,
+ __BITFIELD_FIELD(uint32_t ce:1,
+ __BITFIELD_FIELD(uint32_t cc:1,
+ __BITFIELD_FIELD(uint32_t ge:1,
+ __BITFIELD_FIELD(uint32_t gc:1,
+ __BITFIELD_FIELD(uint32_t fep:5,
+ ;))))))))
+ } s;
+};
+
+union cvmx_pciercx_cfg075 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg075_s {
+ __BITFIELD_FIELD(uint32_t reserved_3_31:29,
+ __BITFIELD_FIELD(uint32_t fere:1,
+ __BITFIELD_FIELD(uint32_t nfere:1,
+ __BITFIELD_FIELD(uint32_t cere:1,
+ ;))))
+ } s;
+};
+
+union cvmx_pciercx_cfg448 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg448_s {
+ __BITFIELD_FIELD(uint32_t rtl:16,
+ __BITFIELD_FIELD(uint32_t rtltl:16,
+ ;))
+ } s;
+};
+
+union cvmx_pciercx_cfg452 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg452_s {
+ __BITFIELD_FIELD(uint32_t reserved_26_31:6,
+ __BITFIELD_FIELD(uint32_t eccrc:1,
+ __BITFIELD_FIELD(uint32_t reserved_22_24:3,
+ __BITFIELD_FIELD(uint32_t lme:6,
+ __BITFIELD_FIELD(uint32_t reserved_12_15:4,
+ __BITFIELD_FIELD(uint32_t link_rate:4,
+ __BITFIELD_FIELD(uint32_t flm:1,
+ __BITFIELD_FIELD(uint32_t reserved_6_6:1,
+ __BITFIELD_FIELD(uint32_t dllle:1,
+ __BITFIELD_FIELD(uint32_t reserved_4_4:1,
+ __BITFIELD_FIELD(uint32_t ra:1,
+ __BITFIELD_FIELD(uint32_t le:1,
+ __BITFIELD_FIELD(uint32_t sd:1,
+ __BITFIELD_FIELD(uint32_t omr:1,
+ ;))))))))))))))
+ } s;
+};
+
+union cvmx_pciercx_cfg455 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg455_s {
+ __BITFIELD_FIELD(uint32_t m_cfg0_filt:1,
+ __BITFIELD_FIELD(uint32_t m_io_filt:1,
+ __BITFIELD_FIELD(uint32_t msg_ctrl:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_ecrc_filt:1,
+ __BITFIELD_FIELD(uint32_t m_ecrc_filt:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_len_err:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_attr_err:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_tc_err:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_fun_err:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_rid_err:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_tag_err:1,
+ __BITFIELD_FIELD(uint32_t m_lk_filt:1,
+ __BITFIELD_FIELD(uint32_t m_cfg1_filt:1,
+ __BITFIELD_FIELD(uint32_t m_bar_match:1,
+ __BITFIELD_FIELD(uint32_t m_pois_filt:1,
+ __BITFIELD_FIELD(uint32_t m_fun:1,
+ __BITFIELD_FIELD(uint32_t dfcwt:1,
+ __BITFIELD_FIELD(uint32_t reserved_11_14:4,
+ __BITFIELD_FIELD(uint32_t skpiv:11,
+ ;)))))))))))))))))))
+ } s;
+};
+
+union cvmx_pciercx_cfg515 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg515_s {
+ __BITFIELD_FIELD(uint32_t reserved_21_31:11,
+ __BITFIELD_FIELD(uint32_t s_d_e:1,
+ __BITFIELD_FIELD(uint32_t ctcrb:1,
+ __BITFIELD_FIELD(uint32_t cpyts:1,
+ __BITFIELD_FIELD(uint32_t dsc:1,
+ __BITFIELD_FIELD(uint32_t le:9,
+ __BITFIELD_FIELD(uint32_t n_fts:8,
+ ;)))))))
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h b/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h
new file mode 100644
index 000000000..5f013269a
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h
@@ -0,0 +1,826 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (C) 2003-2018 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_PCSX_DEFS_H__
+#define __CVMX_PCSX_DEFS_H__
+
+static inline uint64_t CVMX_PCSX_ANX_ADV_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001010ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001010ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001010ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001010ull) + ((offset) + (block_id) * 0x4000ull) * 1024;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0001010ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+}
+
+static inline uint64_t CVMX_PCSX_ANX_EXT_ST_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001028ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001028ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001028ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001028ull) + ((offset) + (block_id) * 0x4000ull) * 1024;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0001028ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+}
+
+static inline uint64_t CVMX_PCSX_ANX_LP_ABIL_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001018ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001018ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001018ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001018ull) + ((offset) + (block_id) * 0x4000ull) * 1024;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0001018ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+}
+
+static inline uint64_t CVMX_PCSX_ANX_RESULTS_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001020ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001020ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001020ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001020ull) + ((offset) + (block_id) * 0x4000ull) * 1024;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0001020ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+}
+
+static inline uint64_t CVMX_PCSX_INTX_EN_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001088ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001088ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001088ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001088ull) + ((offset) + (block_id) * 0x4000ull) * 1024;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0001088ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+}
+
+static inline uint64_t CVMX_PCSX_INTX_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001080ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001080ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001080ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001080ull) + ((offset) + (block_id) * 0x4000ull) * 1024;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0001080ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+}
+
+static inline uint64_t CVMX_PCSX_LINKX_TIMER_COUNT_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001040ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001040ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001040ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001040ull) + ((offset) + (block_id) * 0x4000ull) * 1024;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0001040ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+}
+
+static inline uint64_t CVMX_PCSX_LOG_ANLX_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001090ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001090ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001090ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001090ull) + ((offset) + (block_id) * 0x4000ull) * 1024;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0001090ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+}
+
+static inline uint64_t CVMX_PCSX_MISCX_CTL_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001078ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001078ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001078ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001078ull) + ((offset) + (block_id) * 0x4000ull) * 1024;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0001078ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+}
+
+static inline uint64_t CVMX_PCSX_MRX_CONTROL_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001000ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001000ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001000ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001000ull) + ((offset) + (block_id) * 0x4000ull) * 1024;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0001000ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+}
+
+static inline uint64_t CVMX_PCSX_MRX_STATUS_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001008ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001008ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001008ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001008ull) + ((offset) + (block_id) * 0x4000ull) * 1024;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0001008ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+}
+
+static inline uint64_t CVMX_PCSX_RXX_STATES_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001058ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001058ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001058ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001058ull) + ((offset) + (block_id) * 0x4000ull) * 1024;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0001058ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+}
+
+static inline uint64_t CVMX_PCSX_RXX_SYNC_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001050ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001050ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001050ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001050ull) + ((offset) + (block_id) * 0x4000ull) * 1024;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0001050ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+}
+
+static inline uint64_t CVMX_PCSX_SGMX_AN_ADV_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001068ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001068ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001068ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001068ull) + ((offset) + (block_id) * 0x4000ull) * 1024;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0001068ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+}
+
+static inline uint64_t CVMX_PCSX_SGMX_LP_ADV_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001070ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001070ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001070ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001070ull) + ((offset) + (block_id) * 0x4000ull) * 1024;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0001070ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+}
+
+static inline uint64_t CVMX_PCSX_TXX_STATES_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001060ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001060ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001060ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001060ull) + ((offset) + (block_id) * 0x4000ull) * 1024;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0001060ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+}
+
+static inline uint64_t CVMX_PCSX_TX_RXX_POLARITY_REG(unsigned long offset, unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001048ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001048ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001048ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0001048ull) + ((offset) + (block_id) * 0x4000ull) * 1024;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0001048ull) + ((offset) + (block_id) * 0x20000ull) * 1024;
+}
+
+void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
+
+union cvmx_pcsx_anx_adv_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_anx_adv_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t np:1;
+ uint64_t reserved_14_14:1;
+ uint64_t rem_flt:2;
+ uint64_t reserved_9_11:3;
+ uint64_t pause:2;
+ uint64_t hfd:1;
+ uint64_t fd:1;
+ uint64_t reserved_0_4:5;
+#else
+ uint64_t reserved_0_4:5;
+ uint64_t fd:1;
+ uint64_t hfd:1;
+ uint64_t pause:2;
+ uint64_t reserved_9_11:3;
+ uint64_t rem_flt:2;
+ uint64_t reserved_14_14:1;
+ uint64_t np:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pcsx_anx_ext_st_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_anx_ext_st_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t thou_xfd:1;
+ uint64_t thou_xhd:1;
+ uint64_t thou_tfd:1;
+ uint64_t thou_thd:1;
+ uint64_t reserved_0_11:12;
+#else
+ uint64_t reserved_0_11:12;
+ uint64_t thou_thd:1;
+ uint64_t thou_tfd:1;
+ uint64_t thou_xhd:1;
+ uint64_t thou_xfd:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pcsx_anx_lp_abil_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_anx_lp_abil_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t np:1;
+ uint64_t ack:1;
+ uint64_t rem_flt:2;
+ uint64_t reserved_9_11:3;
+ uint64_t pause:2;
+ uint64_t hfd:1;
+ uint64_t fd:1;
+ uint64_t reserved_0_4:5;
+#else
+ uint64_t reserved_0_4:5;
+ uint64_t fd:1;
+ uint64_t hfd:1;
+ uint64_t pause:2;
+ uint64_t reserved_9_11:3;
+ uint64_t rem_flt:2;
+ uint64_t ack:1;
+ uint64_t np:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pcsx_anx_results_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_anx_results_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t pause:2;
+ uint64_t spd:2;
+ uint64_t an_cpt:1;
+ uint64_t dup:1;
+ uint64_t link_ok:1;
+#else
+ uint64_t link_ok:1;
+ uint64_t dup:1;
+ uint64_t an_cpt:1;
+ uint64_t spd:2;
+ uint64_t pause:2;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_pcsx_intx_en_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_intx_en_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t dbg_sync_en:1;
+ uint64_t dup:1;
+ uint64_t sync_bad_en:1;
+ uint64_t an_bad_en:1;
+ uint64_t rxlock_en:1;
+ uint64_t rxbad_en:1;
+ uint64_t rxerr_en:1;
+ uint64_t txbad_en:1;
+ uint64_t txfifo_en:1;
+ uint64_t txfifu_en:1;
+ uint64_t an_err_en:1;
+ uint64_t xmit_en:1;
+ uint64_t lnkspd_en:1;
+#else
+ uint64_t lnkspd_en:1;
+ uint64_t xmit_en:1;
+ uint64_t an_err_en:1;
+ uint64_t txfifu_en:1;
+ uint64_t txfifo_en:1;
+ uint64_t txbad_en:1;
+ uint64_t rxerr_en:1;
+ uint64_t rxbad_en:1;
+ uint64_t rxlock_en:1;
+ uint64_t an_bad_en:1;
+ uint64_t sync_bad_en:1;
+ uint64_t dup:1;
+ uint64_t dbg_sync_en:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+ struct cvmx_pcsx_intx_en_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t dup:1;
+ uint64_t sync_bad_en:1;
+ uint64_t an_bad_en:1;
+ uint64_t rxlock_en:1;
+ uint64_t rxbad_en:1;
+ uint64_t rxerr_en:1;
+ uint64_t txbad_en:1;
+ uint64_t txfifo_en:1;
+ uint64_t txfifu_en:1;
+ uint64_t an_err_en:1;
+ uint64_t xmit_en:1;
+ uint64_t lnkspd_en:1;
+#else
+ uint64_t lnkspd_en:1;
+ uint64_t xmit_en:1;
+ uint64_t an_err_en:1;
+ uint64_t txfifu_en:1;
+ uint64_t txfifo_en:1;
+ uint64_t txbad_en:1;
+ uint64_t rxerr_en:1;
+ uint64_t rxbad_en:1;
+ uint64_t rxlock_en:1;
+ uint64_t an_bad_en:1;
+ uint64_t sync_bad_en:1;
+ uint64_t dup:1;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn52xx;
+};
+
+union cvmx_pcsx_intx_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_intx_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t dbg_sync:1;
+ uint64_t dup:1;
+ uint64_t sync_bad:1;
+ uint64_t an_bad:1;
+ uint64_t rxlock:1;
+ uint64_t rxbad:1;
+ uint64_t rxerr:1;
+ uint64_t txbad:1;
+ uint64_t txfifo:1;
+ uint64_t txfifu:1;
+ uint64_t an_err:1;
+ uint64_t xmit:1;
+ uint64_t lnkspd:1;
+#else
+ uint64_t lnkspd:1;
+ uint64_t xmit:1;
+ uint64_t an_err:1;
+ uint64_t txfifu:1;
+ uint64_t txfifo:1;
+ uint64_t txbad:1;
+ uint64_t rxerr:1;
+ uint64_t rxbad:1;
+ uint64_t rxlock:1;
+ uint64_t an_bad:1;
+ uint64_t sync_bad:1;
+ uint64_t dup:1;
+ uint64_t dbg_sync:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+ struct cvmx_pcsx_intx_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t dup:1;
+ uint64_t sync_bad:1;
+ uint64_t an_bad:1;
+ uint64_t rxlock:1;
+ uint64_t rxbad:1;
+ uint64_t rxerr:1;
+ uint64_t txbad:1;
+ uint64_t txfifo:1;
+ uint64_t txfifu:1;
+ uint64_t an_err:1;
+ uint64_t xmit:1;
+ uint64_t lnkspd:1;
+#else
+ uint64_t lnkspd:1;
+ uint64_t xmit:1;
+ uint64_t an_err:1;
+ uint64_t txfifu:1;
+ uint64_t txfifo:1;
+ uint64_t txbad:1;
+ uint64_t rxerr:1;
+ uint64_t rxbad:1;
+ uint64_t rxlock:1;
+ uint64_t an_bad:1;
+ uint64_t sync_bad:1;
+ uint64_t dup:1;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn52xx;
+};
+
+union cvmx_pcsx_linkx_timer_count_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_linkx_timer_count_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t count:16;
+#else
+ uint64_t count:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pcsx_log_anlx_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_log_anlx_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t lafifovfl:1;
+ uint64_t la_en:1;
+ uint64_t pkt_sz:2;
+#else
+ uint64_t pkt_sz:2;
+ uint64_t la_en:1;
+ uint64_t lafifovfl:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_pcsx_miscx_ctl_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_miscx_ctl_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t sgmii:1;
+ uint64_t gmxeno:1;
+ uint64_t loopbck2:1;
+ uint64_t mac_phy:1;
+ uint64_t mode:1;
+ uint64_t an_ovrd:1;
+ uint64_t samp_pt:7;
+#else
+ uint64_t samp_pt:7;
+ uint64_t an_ovrd:1;
+ uint64_t mode:1;
+ uint64_t mac_phy:1;
+ uint64_t loopbck2:1;
+ uint64_t gmxeno:1;
+ uint64_t sgmii:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+};
+
+union cvmx_pcsx_mrx_control_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_mrx_control_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t reset:1;
+ uint64_t loopbck1:1;
+ uint64_t spdlsb:1;
+ uint64_t an_en:1;
+ uint64_t pwr_dn:1;
+ uint64_t reserved_10_10:1;
+ uint64_t rst_an:1;
+ uint64_t dup:1;
+ uint64_t coltst:1;
+ uint64_t spdmsb:1;
+ uint64_t uni:1;
+ uint64_t reserved_0_4:5;
+#else
+ uint64_t reserved_0_4:5;
+ uint64_t uni:1;
+ uint64_t spdmsb:1;
+ uint64_t coltst:1;
+ uint64_t dup:1;
+ uint64_t rst_an:1;
+ uint64_t reserved_10_10:1;
+ uint64_t pwr_dn:1;
+ uint64_t an_en:1;
+ uint64_t spdlsb:1;
+ uint64_t loopbck1:1;
+ uint64_t reset:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pcsx_mrx_status_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_mrx_status_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t hun_t4:1;
+ uint64_t hun_xfd:1;
+ uint64_t hun_xhd:1;
+ uint64_t ten_fd:1;
+ uint64_t ten_hd:1;
+ uint64_t hun_t2fd:1;
+ uint64_t hun_t2hd:1;
+ uint64_t ext_st:1;
+ uint64_t reserved_7_7:1;
+ uint64_t prb_sup:1;
+ uint64_t an_cpt:1;
+ uint64_t rm_flt:1;
+ uint64_t an_abil:1;
+ uint64_t lnk_st:1;
+ uint64_t reserved_1_1:1;
+ uint64_t extnd:1;
+#else
+ uint64_t extnd:1;
+ uint64_t reserved_1_1:1;
+ uint64_t lnk_st:1;
+ uint64_t an_abil:1;
+ uint64_t rm_flt:1;
+ uint64_t an_cpt:1;
+ uint64_t prb_sup:1;
+ uint64_t reserved_7_7:1;
+ uint64_t ext_st:1;
+ uint64_t hun_t2hd:1;
+ uint64_t hun_t2fd:1;
+ uint64_t ten_hd:1;
+ uint64_t ten_fd:1;
+ uint64_t hun_xhd:1;
+ uint64_t hun_xfd:1;
+ uint64_t hun_t4:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pcsx_rxx_states_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_rxx_states_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t rx_bad:1;
+ uint64_t rx_st:5;
+ uint64_t sync_bad:1;
+ uint64_t sync:4;
+ uint64_t an_bad:1;
+ uint64_t an_st:4;
+#else
+ uint64_t an_st:4;
+ uint64_t an_bad:1;
+ uint64_t sync:4;
+ uint64_t sync_bad:1;
+ uint64_t rx_st:5;
+ uint64_t rx_bad:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pcsx_rxx_sync_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_rxx_sync_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t sync:1;
+ uint64_t bit_lock:1;
+#else
+ uint64_t bit_lock:1;
+ uint64_t sync:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_pcsx_sgmx_an_adv_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t link:1;
+ uint64_t ack:1;
+ uint64_t reserved_13_13:1;
+ uint64_t dup:1;
+ uint64_t speed:2;
+ uint64_t reserved_1_9:9;
+ uint64_t one:1;
+#else
+ uint64_t one:1;
+ uint64_t reserved_1_9:9;
+ uint64_t speed:2;
+ uint64_t dup:1;
+ uint64_t reserved_13_13:1;
+ uint64_t ack:1;
+ uint64_t link:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pcsx_sgmx_lp_adv_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t link:1;
+ uint64_t reserved_13_14:2;
+ uint64_t dup:1;
+ uint64_t speed:2;
+ uint64_t reserved_1_9:9;
+ uint64_t one:1;
+#else
+ uint64_t one:1;
+ uint64_t reserved_1_9:9;
+ uint64_t speed:2;
+ uint64_t dup:1;
+ uint64_t reserved_13_14:2;
+ uint64_t link:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pcsx_txx_states_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_txx_states_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t xmit:2;
+ uint64_t tx_bad:1;
+ uint64_t ord_st:4;
+#else
+ uint64_t ord_st:4;
+ uint64_t tx_bad:1;
+ uint64_t xmit:2;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_pcsx_tx_rxx_polarity_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t rxovrd:1;
+ uint64_t autorxpl:1;
+ uint64_t rxplrt:1;
+ uint64_t txplrt:1;
+#else
+ uint64_t txplrt:1;
+ uint64_t rxplrt:1;
+ uint64_t autorxpl:1;
+ uint64_t rxovrd:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h b/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h
new file mode 100644
index 000000000..b353775ee
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h
@@ -0,0 +1,664 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (C) 2003-2018 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_PCSXX_DEFS_H__
+#define __CVMX_PCSXX_DEFS_H__
+
+static inline uint64_t CVMX_PCSXX_10GBX_STATUS_REG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000828ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000828ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000828ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0000828ull) + (block_id) * 0x1000000ull;
+}
+
+static inline uint64_t CVMX_PCSXX_BIST_STATUS_REG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000870ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000870ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000870ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0000870ull) + (block_id) * 0x1000000ull;
+}
+
+static inline uint64_t CVMX_PCSXX_BIT_LOCK_STATUS_REG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000850ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000850ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000850ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0000850ull) + (block_id) * 0x1000000ull;
+}
+
+static inline uint64_t CVMX_PCSXX_CONTROL1_REG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000800ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000800ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000800ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0000800ull) + (block_id) * 0x1000000ull;
+}
+
+static inline uint64_t CVMX_PCSXX_CONTROL2_REG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000818ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000818ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000818ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0000818ull) + (block_id) * 0x1000000ull;
+}
+
+static inline uint64_t CVMX_PCSXX_INT_EN_REG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000860ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000860ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000860ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0000860ull) + (block_id) * 0x1000000ull;
+}
+
+static inline uint64_t CVMX_PCSXX_INT_REG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000858ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000858ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000858ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0000858ull) + (block_id) * 0x1000000ull;
+}
+
+static inline uint64_t CVMX_PCSXX_LOG_ANL_REG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000868ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000868ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000868ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0000868ull) + (block_id) * 0x1000000ull;
+}
+
+static inline uint64_t CVMX_PCSXX_MISC_CTL_REG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000848ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000848ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000848ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0000848ull) + (block_id) * 0x1000000ull;
+}
+
+static inline uint64_t CVMX_PCSXX_RX_SYNC_STATES_REG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000838ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000838ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000838ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0000838ull) + (block_id) * 0x1000000ull;
+}
+
+static inline uint64_t CVMX_PCSXX_SPD_ABIL_REG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000810ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000810ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000810ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0000810ull) + (block_id) * 0x1000000ull;
+}
+
+static inline uint64_t CVMX_PCSXX_STATUS1_REG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000808ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000808ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000808ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0000808ull) + (block_id) * 0x1000000ull;
+}
+
+static inline uint64_t CVMX_PCSXX_STATUS2_REG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000820ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000820ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000820ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0000820ull) + (block_id) * 0x1000000ull;
+}
+
+static inline uint64_t CVMX_PCSXX_TX_RX_POLARITY_REG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000840ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000840ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000840ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0000840ull) + (block_id) * 0x1000000ull;
+}
+
+static inline uint64_t CVMX_PCSXX_TX_RX_STATES_REG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000830ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000830ull) + (block_id) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800B0000830ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800B0000830ull) + (block_id) * 0x1000000ull;
+}
+
+void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
+
+union cvmx_pcsxx_10gbx_status_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_10gbx_status_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t alignd:1;
+ uint64_t pattst:1;
+ uint64_t reserved_4_10:7;
+ uint64_t l3sync:1;
+ uint64_t l2sync:1;
+ uint64_t l1sync:1;
+ uint64_t l0sync:1;
+#else
+ uint64_t l0sync:1;
+ uint64_t l1sync:1;
+ uint64_t l2sync:1;
+ uint64_t l3sync:1;
+ uint64_t reserved_4_10:7;
+ uint64_t pattst:1;
+ uint64_t alignd:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+};
+
+union cvmx_pcsxx_bist_status_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_bist_status_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t bist_status:1;
+#else
+ uint64_t bist_status:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_pcsxx_bit_lock_status_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_bit_lock_status_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t bitlck3:1;
+ uint64_t bitlck2:1;
+ uint64_t bitlck1:1;
+ uint64_t bitlck0:1;
+#else
+ uint64_t bitlck0:1;
+ uint64_t bitlck1:1;
+ uint64_t bitlck2:1;
+ uint64_t bitlck3:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_pcsxx_control1_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_control1_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t reset:1;
+ uint64_t loopbck1:1;
+ uint64_t spdsel1:1;
+ uint64_t reserved_12_12:1;
+ uint64_t lo_pwr:1;
+ uint64_t reserved_7_10:4;
+ uint64_t spdsel0:1;
+ uint64_t spd:4;
+ uint64_t reserved_0_1:2;
+#else
+ uint64_t reserved_0_1:2;
+ uint64_t spd:4;
+ uint64_t spdsel0:1;
+ uint64_t reserved_7_10:4;
+ uint64_t lo_pwr:1;
+ uint64_t reserved_12_12:1;
+ uint64_t spdsel1:1;
+ uint64_t loopbck1:1;
+ uint64_t reset:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pcsxx_control2_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_control2_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t type:2;
+#else
+ uint64_t type:2;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_pcsxx_int_en_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_int_en_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t dbg_sync_en:1;
+ uint64_t algnlos_en:1;
+ uint64_t synlos_en:1;
+ uint64_t bitlckls_en:1;
+ uint64_t rxsynbad_en:1;
+ uint64_t rxbad_en:1;
+ uint64_t txflt_en:1;
+#else
+ uint64_t txflt_en:1;
+ uint64_t rxbad_en:1;
+ uint64_t rxsynbad_en:1;
+ uint64_t bitlckls_en:1;
+ uint64_t synlos_en:1;
+ uint64_t algnlos_en:1;
+ uint64_t dbg_sync_en:1;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+ struct cvmx_pcsxx_int_en_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t algnlos_en:1;
+ uint64_t synlos_en:1;
+ uint64_t bitlckls_en:1;
+ uint64_t rxsynbad_en:1;
+ uint64_t rxbad_en:1;
+ uint64_t txflt_en:1;
+#else
+ uint64_t txflt_en:1;
+ uint64_t rxbad_en:1;
+ uint64_t rxsynbad_en:1;
+ uint64_t bitlckls_en:1;
+ uint64_t synlos_en:1;
+ uint64_t algnlos_en:1;
+ uint64_t reserved_6_63:58;
+#endif
+ } cn52xx;
+};
+
+union cvmx_pcsxx_int_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t dbg_sync:1;
+ uint64_t algnlos:1;
+ uint64_t synlos:1;
+ uint64_t bitlckls:1;
+ uint64_t rxsynbad:1;
+ uint64_t rxbad:1;
+ uint64_t txflt:1;
+#else
+ uint64_t txflt:1;
+ uint64_t rxbad:1;
+ uint64_t rxsynbad:1;
+ uint64_t bitlckls:1;
+ uint64_t synlos:1;
+ uint64_t algnlos:1;
+ uint64_t dbg_sync:1;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+ struct cvmx_pcsxx_int_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t algnlos:1;
+ uint64_t synlos:1;
+ uint64_t bitlckls:1;
+ uint64_t rxsynbad:1;
+ uint64_t rxbad:1;
+ uint64_t txflt:1;
+#else
+ uint64_t txflt:1;
+ uint64_t rxbad:1;
+ uint64_t rxsynbad:1;
+ uint64_t bitlckls:1;
+ uint64_t synlos:1;
+ uint64_t algnlos:1;
+ uint64_t reserved_6_63:58;
+#endif
+ } cn52xx;
+};
+
+union cvmx_pcsxx_log_anl_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_log_anl_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t enc_mode:1;
+ uint64_t drop_ln:2;
+ uint64_t lafifovfl:1;
+ uint64_t la_en:1;
+ uint64_t pkt_sz:2;
+#else
+ uint64_t pkt_sz:2;
+ uint64_t la_en:1;
+ uint64_t lafifovfl:1;
+ uint64_t drop_ln:2;
+ uint64_t enc_mode:1;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_pcsxx_misc_ctl_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_misc_ctl_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t tx_swap:1;
+ uint64_t rx_swap:1;
+ uint64_t xaui:1;
+ uint64_t gmxeno:1;
+#else
+ uint64_t gmxeno:1;
+ uint64_t xaui:1;
+ uint64_t rx_swap:1;
+ uint64_t tx_swap:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_pcsxx_rx_sync_states_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_rx_sync_states_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t sync3st:4;
+ uint64_t sync2st:4;
+ uint64_t sync1st:4;
+ uint64_t sync0st:4;
+#else
+ uint64_t sync0st:4;
+ uint64_t sync1st:4;
+ uint64_t sync2st:4;
+ uint64_t sync3st:4;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pcsxx_spd_abil_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_spd_abil_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t tenpasst:1;
+ uint64_t tengb:1;
+#else
+ uint64_t tengb:1;
+ uint64_t tenpasst:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_pcsxx_status1_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_status1_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t flt:1;
+ uint64_t reserved_3_6:4;
+ uint64_t rcv_lnk:1;
+ uint64_t lpable:1;
+ uint64_t reserved_0_0:1;
+#else
+ uint64_t reserved_0_0:1;
+ uint64_t lpable:1;
+ uint64_t rcv_lnk:1;
+ uint64_t reserved_3_6:4;
+ uint64_t flt:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_pcsxx_status2_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_status2_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t dev:2;
+ uint64_t reserved_12_13:2;
+ uint64_t xmtflt:1;
+ uint64_t rcvflt:1;
+ uint64_t reserved_3_9:7;
+ uint64_t tengb_w:1;
+ uint64_t tengb_x:1;
+ uint64_t tengb_r:1;
+#else
+ uint64_t tengb_r:1;
+ uint64_t tengb_x:1;
+ uint64_t tengb_w:1;
+ uint64_t reserved_3_9:7;
+ uint64_t rcvflt:1;
+ uint64_t xmtflt:1;
+ uint64_t reserved_12_13:2;
+ uint64_t dev:2;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pcsxx_tx_rx_polarity_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t xor_rxplrt:4;
+ uint64_t xor_txplrt:4;
+ uint64_t rxplrt:1;
+ uint64_t txplrt:1;
+#else
+ uint64_t txplrt:1;
+ uint64_t rxplrt:1;
+ uint64_t xor_txplrt:4;
+ uint64_t xor_rxplrt:4;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t rxplrt:1;
+ uint64_t txplrt:1;
+#else
+ uint64_t txplrt:1;
+ uint64_t rxplrt:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } cn52xxp1;
+};
+
+union cvmx_pcsxx_tx_rx_states_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_tx_rx_states_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t term_err:1;
+ uint64_t syn3bad:1;
+ uint64_t syn2bad:1;
+ uint64_t syn1bad:1;
+ uint64_t syn0bad:1;
+ uint64_t rxbad:1;
+ uint64_t algn_st:3;
+ uint64_t rx_st:2;
+ uint64_t tx_st:3;
+#else
+ uint64_t tx_st:3;
+ uint64_t rx_st:2;
+ uint64_t algn_st:3;
+ uint64_t rxbad:1;
+ uint64_t syn0bad:1;
+ uint64_t syn1bad:1;
+ uint64_t syn2bad:1;
+ uint64_t syn3bad:1;
+ uint64_t term_err:1;
+ uint64_t reserved_14_63:50;
+#endif
+ } s;
+ struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t syn3bad:1;
+ uint64_t syn2bad:1;
+ uint64_t syn1bad:1;
+ uint64_t syn0bad:1;
+ uint64_t rxbad:1;
+ uint64_t algn_st:3;
+ uint64_t rx_st:2;
+ uint64_t tx_st:3;
+#else
+ uint64_t tx_st:3;
+ uint64_t rx_st:2;
+ uint64_t algn_st:3;
+ uint64_t rxbad:1;
+ uint64_t syn0bad:1;
+ uint64_t syn1bad:1;
+ uint64_t syn2bad:1;
+ uint64_t syn3bad:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } cn52xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pemx-defs.h b/arch/mips/include/asm/octeon/cvmx-pemx-defs.h
new file mode 100644
index 000000000..d2d6dba93
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-pemx-defs.h
@@ -0,0 +1,651 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_PEMX_DEFS_H__
+#define __CVMX_PEMX_DEFS_H__
+
+#define CVMX_PEMX_BAR1_INDEXX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C00000A8ull) + (((offset) & 15) + ((block_id) & 1) * 0x200000ull) * 8)
+#define CVMX_PEMX_BAR2_MASK(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000130ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_BAR_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000128ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_BIST_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000018ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_BIST_STATUS2(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000420ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_CFG_RD(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000030ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_CFG_WR(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000028ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_CPL_LUT_VALID(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000098ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_CTL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000000ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_DBG_INFO(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000008ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_DBG_INFO_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800C00000A0ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_DIAG_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000020ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_INB_READ_CREDITS(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000138ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_INT_ENB(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000410ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_INT_ENB_INT(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000418ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_INT_SUM(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000408ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_P2N_BAR0_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000080ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_P2N_BAR1_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000088ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_P2N_BAR2_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000090ull) + ((block_id) & 1) * 0x1000000ull)
+#define CVMX_PEMX_P2P_BARX_END(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C0000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x100000ull) * 16)
+#define CVMX_PEMX_P2P_BARX_START(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C0000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x100000ull) * 16)
+#define CVMX_PEMX_TLP_CREDITS(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000038ull) + ((block_id) & 1) * 0x1000000ull)
+
+union cvmx_pemx_bar1_indexx {
+ uint64_t u64;
+ struct cvmx_pemx_bar1_indexx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t addr_idx:16;
+ uint64_t ca:1;
+ uint64_t end_swp:2;
+ uint64_t addr_v:1;
+#else
+ uint64_t addr_v:1;
+ uint64_t end_swp:2;
+ uint64_t ca:1;
+ uint64_t addr_idx:16;
+ uint64_t reserved_20_63:44;
+#endif
+ } s;
+};
+
+union cvmx_pemx_bar2_mask {
+ uint64_t u64;
+ struct cvmx_pemx_bar2_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63:26;
+ uint64_t mask:35;
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t mask:35;
+ uint64_t reserved_38_63:26;
+#endif
+ } s;
+};
+
+union cvmx_pemx_bar_ctl {
+ uint64_t u64;
+ struct cvmx_pemx_bar_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t bar1_siz:3;
+ uint64_t bar2_enb:1;
+ uint64_t bar2_esx:2;
+ uint64_t bar2_cax:1;
+#else
+ uint64_t bar2_cax:1;
+ uint64_t bar2_esx:2;
+ uint64_t bar2_enb:1;
+ uint64_t bar1_siz:3;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+};
+
+union cvmx_pemx_bist_status {
+ uint64_t u64;
+ struct cvmx_pemx_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t retry:1;
+ uint64_t rqdata0:1;
+ uint64_t rqdata1:1;
+ uint64_t rqdata2:1;
+ uint64_t rqdata3:1;
+ uint64_t rqhdr1:1;
+ uint64_t rqhdr0:1;
+ uint64_t sot:1;
+#else
+ uint64_t sot:1;
+ uint64_t rqhdr0:1;
+ uint64_t rqhdr1:1;
+ uint64_t rqdata3:1;
+ uint64_t rqdata2:1;
+ uint64_t rqdata1:1;
+ uint64_t rqdata0:1;
+ uint64_t retry:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_pemx_bist_status2 {
+ uint64_t u64;
+ struct cvmx_pemx_bist_status2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t e2p_cpl:1;
+ uint64_t e2p_n:1;
+ uint64_t e2p_p:1;
+ uint64_t peai_p2e:1;
+ uint64_t pef_tpf1:1;
+ uint64_t pef_tpf0:1;
+ uint64_t pef_tnf:1;
+ uint64_t pef_tcf1:1;
+ uint64_t pef_tc0:1;
+ uint64_t ppf:1;
+#else
+ uint64_t ppf:1;
+ uint64_t pef_tc0:1;
+ uint64_t pef_tcf1:1;
+ uint64_t pef_tnf:1;
+ uint64_t pef_tpf0:1;
+ uint64_t pef_tpf1:1;
+ uint64_t peai_p2e:1;
+ uint64_t e2p_p:1;
+ uint64_t e2p_n:1;
+ uint64_t e2p_cpl:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+};
+
+union cvmx_pemx_cfg_rd {
+ uint64_t u64;
+ struct cvmx_pemx_cfg_rd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data:32;
+ uint64_t addr:32;
+#else
+ uint64_t addr:32;
+ uint64_t data:32;
+#endif
+ } s;
+};
+
+union cvmx_pemx_cfg_wr {
+ uint64_t u64;
+ struct cvmx_pemx_cfg_wr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data:32;
+ uint64_t addr:32;
+#else
+ uint64_t addr:32;
+ uint64_t data:32;
+#endif
+ } s;
+};
+
+union cvmx_pemx_cpl_lut_valid {
+ uint64_t u64;
+ struct cvmx_pemx_cpl_lut_valid_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t tag:32;
+#else
+ uint64_t tag:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pemx_ctl_status {
+ uint64_t u64;
+ struct cvmx_pemx_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t auto_sd:1;
+ uint64_t dnum:5;
+ uint64_t pbus:8;
+ uint64_t reserved_32_33:2;
+ uint64_t cfg_rtry:16;
+ uint64_t reserved_12_15:4;
+ uint64_t pm_xtoff:1;
+ uint64_t pm_xpme:1;
+ uint64_t ob_p_cmd:1;
+ uint64_t reserved_7_8:2;
+ uint64_t nf_ecrc:1;
+ uint64_t dly_one:1;
+ uint64_t lnk_enb:1;
+ uint64_t ro_ctlp:1;
+ uint64_t fast_lm:1;
+ uint64_t inv_ecrc:1;
+ uint64_t inv_lcrc:1;
+#else
+ uint64_t inv_lcrc:1;
+ uint64_t inv_ecrc:1;
+ uint64_t fast_lm:1;
+ uint64_t ro_ctlp:1;
+ uint64_t lnk_enb:1;
+ uint64_t dly_one:1;
+ uint64_t nf_ecrc:1;
+ uint64_t reserved_7_8:2;
+ uint64_t ob_p_cmd:1;
+ uint64_t pm_xpme:1;
+ uint64_t pm_xtoff:1;
+ uint64_t reserved_12_15:4;
+ uint64_t cfg_rtry:16;
+ uint64_t reserved_32_33:2;
+ uint64_t pbus:8;
+ uint64_t dnum:5;
+ uint64_t auto_sd:1;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_pemx_dbg_info {
+ uint64_t u64;
+ struct cvmx_pemx_dbg_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63:33;
+ uint64_t ecrc_e:1;
+ uint64_t rawwpp:1;
+ uint64_t racpp:1;
+ uint64_t ramtlp:1;
+ uint64_t rarwdns:1;
+ uint64_t caar:1;
+ uint64_t racca:1;
+ uint64_t racur:1;
+ uint64_t rauc:1;
+ uint64_t rqo:1;
+ uint64_t fcuv:1;
+ uint64_t rpe:1;
+ uint64_t fcpvwt:1;
+ uint64_t dpeoosd:1;
+ uint64_t rtwdle:1;
+ uint64_t rdwdle:1;
+ uint64_t mre:1;
+ uint64_t rte:1;
+ uint64_t acto:1;
+ uint64_t rvdm:1;
+ uint64_t rumep:1;
+ uint64_t rptamrc:1;
+ uint64_t rpmerc:1;
+ uint64_t rfemrc:1;
+ uint64_t rnfemrc:1;
+ uint64_t rcemrc:1;
+ uint64_t rpoison:1;
+ uint64_t recrce:1;
+ uint64_t rtlplle:1;
+ uint64_t rtlpmal:1;
+ uint64_t spoison:1;
+#else
+ uint64_t spoison:1;
+ uint64_t rtlpmal:1;
+ uint64_t rtlplle:1;
+ uint64_t recrce:1;
+ uint64_t rpoison:1;
+ uint64_t rcemrc:1;
+ uint64_t rnfemrc:1;
+ uint64_t rfemrc:1;
+ uint64_t rpmerc:1;
+ uint64_t rptamrc:1;
+ uint64_t rumep:1;
+ uint64_t rvdm:1;
+ uint64_t acto:1;
+ uint64_t rte:1;
+ uint64_t mre:1;
+ uint64_t rdwdle:1;
+ uint64_t rtwdle:1;
+ uint64_t dpeoosd:1;
+ uint64_t fcpvwt:1;
+ uint64_t rpe:1;
+ uint64_t fcuv:1;
+ uint64_t rqo:1;
+ uint64_t rauc:1;
+ uint64_t racur:1;
+ uint64_t racca:1;
+ uint64_t caar:1;
+ uint64_t rarwdns:1;
+ uint64_t ramtlp:1;
+ uint64_t racpp:1;
+ uint64_t rawwpp:1;
+ uint64_t ecrc_e:1;
+ uint64_t reserved_31_63:33;
+#endif
+ } s;
+};
+
+union cvmx_pemx_dbg_info_en {
+ uint64_t u64;
+ struct cvmx_pemx_dbg_info_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63:33;
+ uint64_t ecrc_e:1;
+ uint64_t rawwpp:1;
+ uint64_t racpp:1;
+ uint64_t ramtlp:1;
+ uint64_t rarwdns:1;
+ uint64_t caar:1;
+ uint64_t racca:1;
+ uint64_t racur:1;
+ uint64_t rauc:1;
+ uint64_t rqo:1;
+ uint64_t fcuv:1;
+ uint64_t rpe:1;
+ uint64_t fcpvwt:1;
+ uint64_t dpeoosd:1;
+ uint64_t rtwdle:1;
+ uint64_t rdwdle:1;
+ uint64_t mre:1;
+ uint64_t rte:1;
+ uint64_t acto:1;
+ uint64_t rvdm:1;
+ uint64_t rumep:1;
+ uint64_t rptamrc:1;
+ uint64_t rpmerc:1;
+ uint64_t rfemrc:1;
+ uint64_t rnfemrc:1;
+ uint64_t rcemrc:1;
+ uint64_t rpoison:1;
+ uint64_t recrce:1;
+ uint64_t rtlplle:1;
+ uint64_t rtlpmal:1;
+ uint64_t spoison:1;
+#else
+ uint64_t spoison:1;
+ uint64_t rtlpmal:1;
+ uint64_t rtlplle:1;
+ uint64_t recrce:1;
+ uint64_t rpoison:1;
+ uint64_t rcemrc:1;
+ uint64_t rnfemrc:1;
+ uint64_t rfemrc:1;
+ uint64_t rpmerc:1;
+ uint64_t rptamrc:1;
+ uint64_t rumep:1;
+ uint64_t rvdm:1;
+ uint64_t acto:1;
+ uint64_t rte:1;
+ uint64_t mre:1;
+ uint64_t rdwdle:1;
+ uint64_t rtwdle:1;
+ uint64_t dpeoosd:1;
+ uint64_t fcpvwt:1;
+ uint64_t rpe:1;
+ uint64_t fcuv:1;
+ uint64_t rqo:1;
+ uint64_t rauc:1;
+ uint64_t racur:1;
+ uint64_t racca:1;
+ uint64_t caar:1;
+ uint64_t rarwdns:1;
+ uint64_t ramtlp:1;
+ uint64_t racpp:1;
+ uint64_t rawwpp:1;
+ uint64_t ecrc_e:1;
+ uint64_t reserved_31_63:33;
+#endif
+ } s;
+};
+
+union cvmx_pemx_diag_status {
+ uint64_t u64;
+ struct cvmx_pemx_diag_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t pm_dst:1;
+ uint64_t pm_stat:1;
+ uint64_t pm_en:1;
+ uint64_t aux_en:1;
+#else
+ uint64_t aux_en:1;
+ uint64_t pm_en:1;
+ uint64_t pm_stat:1;
+ uint64_t pm_dst:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_pemx_inb_read_credits {
+ uint64_t u64;
+ struct cvmx_pemx_inb_read_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t num:6;
+#else
+ uint64_t num:6;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_pemx_int_enb {
+ uint64_t u64;
+ struct cvmx_pemx_int_enb_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t crs_dr:1;
+ uint64_t crs_er:1;
+ uint64_t rdlk:1;
+ uint64_t exc:1;
+ uint64_t un_bx:1;
+ uint64_t un_b2:1;
+ uint64_t un_b1:1;
+ uint64_t up_bx:1;
+ uint64_t up_b2:1;
+ uint64_t up_b1:1;
+ uint64_t pmem:1;
+ uint64_t pmei:1;
+ uint64_t se:1;
+ uint64_t aeri:1;
+#else
+ uint64_t aeri:1;
+ uint64_t se:1;
+ uint64_t pmei:1;
+ uint64_t pmem:1;
+ uint64_t up_b1:1;
+ uint64_t up_b2:1;
+ uint64_t up_bx:1;
+ uint64_t un_b1:1;
+ uint64_t un_b2:1;
+ uint64_t un_bx:1;
+ uint64_t exc:1;
+ uint64_t rdlk:1;
+ uint64_t crs_er:1;
+ uint64_t crs_dr:1;
+ uint64_t reserved_14_63:50;
+#endif
+ } s;
+};
+
+union cvmx_pemx_int_enb_int {
+ uint64_t u64;
+ struct cvmx_pemx_int_enb_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t crs_dr:1;
+ uint64_t crs_er:1;
+ uint64_t rdlk:1;
+ uint64_t exc:1;
+ uint64_t un_bx:1;
+ uint64_t un_b2:1;
+ uint64_t un_b1:1;
+ uint64_t up_bx:1;
+ uint64_t up_b2:1;
+ uint64_t up_b1:1;
+ uint64_t pmem:1;
+ uint64_t pmei:1;
+ uint64_t se:1;
+ uint64_t aeri:1;
+#else
+ uint64_t aeri:1;
+ uint64_t se:1;
+ uint64_t pmei:1;
+ uint64_t pmem:1;
+ uint64_t up_b1:1;
+ uint64_t up_b2:1;
+ uint64_t up_bx:1;
+ uint64_t un_b1:1;
+ uint64_t un_b2:1;
+ uint64_t un_bx:1;
+ uint64_t exc:1;
+ uint64_t rdlk:1;
+ uint64_t crs_er:1;
+ uint64_t crs_dr:1;
+ uint64_t reserved_14_63:50;
+#endif
+ } s;
+};
+
+union cvmx_pemx_int_sum {
+ uint64_t u64;
+ struct cvmx_pemx_int_sum_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t crs_dr:1;
+ uint64_t crs_er:1;
+ uint64_t rdlk:1;
+ uint64_t exc:1;
+ uint64_t un_bx:1;
+ uint64_t un_b2:1;
+ uint64_t un_b1:1;
+ uint64_t up_bx:1;
+ uint64_t up_b2:1;
+ uint64_t up_b1:1;
+ uint64_t pmem:1;
+ uint64_t pmei:1;
+ uint64_t se:1;
+ uint64_t aeri:1;
+#else
+ uint64_t aeri:1;
+ uint64_t se:1;
+ uint64_t pmei:1;
+ uint64_t pmem:1;
+ uint64_t up_b1:1;
+ uint64_t up_b2:1;
+ uint64_t up_bx:1;
+ uint64_t un_b1:1;
+ uint64_t un_b2:1;
+ uint64_t un_bx:1;
+ uint64_t exc:1;
+ uint64_t rdlk:1;
+ uint64_t crs_er:1;
+ uint64_t crs_dr:1;
+ uint64_t reserved_14_63:50;
+#endif
+ } s;
+};
+
+union cvmx_pemx_p2n_bar0_start {
+ uint64_t u64;
+ struct cvmx_pemx_p2n_bar0_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr:50;
+ uint64_t reserved_0_13:14;
+#else
+ uint64_t reserved_0_13:14;
+ uint64_t addr:50;
+#endif
+ } s;
+};
+
+union cvmx_pemx_p2n_bar1_start {
+ uint64_t u64;
+ struct cvmx_pemx_p2n_bar1_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr:38;
+ uint64_t reserved_0_25:26;
+#else
+ uint64_t reserved_0_25:26;
+ uint64_t addr:38;
+#endif
+ } s;
+};
+
+union cvmx_pemx_p2n_bar2_start {
+ uint64_t u64;
+ struct cvmx_pemx_p2n_bar2_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr:23;
+ uint64_t reserved_0_40:41;
+#else
+ uint64_t reserved_0_40:41;
+ uint64_t addr:23;
+#endif
+ } s;
+};
+
+union cvmx_pemx_p2p_barx_end {
+ uint64_t u64;
+ struct cvmx_pemx_p2p_barx_end_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr:52;
+ uint64_t reserved_0_11:12;
+#else
+ uint64_t reserved_0_11:12;
+ uint64_t addr:52;
+#endif
+ } s;
+};
+
+union cvmx_pemx_p2p_barx_start {
+ uint64_t u64;
+ struct cvmx_pemx_p2p_barx_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr:52;
+ uint64_t reserved_0_11:12;
+#else
+ uint64_t reserved_0_11:12;
+ uint64_t addr:52;
+#endif
+ } s;
+};
+
+union cvmx_pemx_tlp_credits {
+ uint64_t u64;
+ struct cvmx_pemx_tlp_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63:8;
+ uint64_t peai_ppf:8;
+ uint64_t pem_cpl:8;
+ uint64_t pem_np:8;
+ uint64_t pem_p:8;
+ uint64_t sli_cpl:8;
+ uint64_t sli_np:8;
+ uint64_t sli_p:8;
+#else
+ uint64_t sli_p:8;
+ uint64_t sli_np:8;
+ uint64_t sli_cpl:8;
+ uint64_t pem_p:8;
+ uint64_t pem_np:8;
+ uint64_t pem_cpl:8;
+ uint64_t peai_ppf:8;
+ uint64_t reserved_56_63:8;
+#endif
+ } s;
+ struct cvmx_pemx_tlp_credits_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63:8;
+ uint64_t peai_ppf:8;
+ uint64_t reserved_24_47:24;
+ uint64_t sli_cpl:8;
+ uint64_t sli_np:8;
+ uint64_t sli_p:8;
+#else
+ uint64_t sli_p:8;
+ uint64_t sli_np:8;
+ uint64_t sli_cpl:8;
+ uint64_t reserved_24_47:24;
+ uint64_t peai_ppf:8;
+ uint64_t reserved_56_63:8;
+#endif
+ } cn61xx;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pescx-defs.h b/arch/mips/include/asm/octeon/cvmx-pescx-defs.h
new file mode 100644
index 000000000..665610825
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-pescx-defs.h
@@ -0,0 +1,579 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_PESCX_DEFS_H__
+#define __CVMX_PESCX_DEFS_H__
+
+#define CVMX_PESCX_BIST_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000018ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_BIST_STATUS2(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000418ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_CFG_RD(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000030ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_CFG_WR(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000028ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_CPL_LUT_VALID(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000098ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_CTL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000000ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_CTL_STATUS2(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000400ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_DBG_INFO(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000008ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_DBG_INFO_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800C80000A0ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_DIAG_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000020ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_P2N_BAR0_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000080ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_P2N_BAR1_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000088ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_P2N_BAR2_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000090ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_P2P_BARX_END(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x800000ull) * 16)
+#define CVMX_PESCX_P2P_BARX_START(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x800000ull) * 16)
+#define CVMX_PESCX_TLP_CREDITS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000038ull) + ((block_id) & 1) * 0x8000000ull)
+
+union cvmx_pescx_bist_status {
+ uint64_t u64;
+ struct cvmx_pescx_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t rqdata5:1;
+ uint64_t ctlp_or:1;
+ uint64_t ntlp_or:1;
+ uint64_t ptlp_or:1;
+ uint64_t retry:1;
+ uint64_t rqdata0:1;
+ uint64_t rqdata1:1;
+ uint64_t rqdata2:1;
+ uint64_t rqdata3:1;
+ uint64_t rqdata4:1;
+ uint64_t rqhdr1:1;
+ uint64_t rqhdr0:1;
+ uint64_t sot:1;
+#else
+ uint64_t sot:1;
+ uint64_t rqhdr0:1;
+ uint64_t rqhdr1:1;
+ uint64_t rqdata4:1;
+ uint64_t rqdata3:1;
+ uint64_t rqdata2:1;
+ uint64_t rqdata1:1;
+ uint64_t rqdata0:1;
+ uint64_t retry:1;
+ uint64_t ptlp_or:1;
+ uint64_t ntlp_or:1;
+ uint64_t ctlp_or:1;
+ uint64_t rqdata5:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+ struct cvmx_pescx_bist_status_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t ctlp_or:1;
+ uint64_t ntlp_or:1;
+ uint64_t ptlp_or:1;
+ uint64_t retry:1;
+ uint64_t rqdata0:1;
+ uint64_t rqdata1:1;
+ uint64_t rqdata2:1;
+ uint64_t rqdata3:1;
+ uint64_t rqdata4:1;
+ uint64_t rqhdr1:1;
+ uint64_t rqhdr0:1;
+ uint64_t sot:1;
+#else
+ uint64_t sot:1;
+ uint64_t rqhdr0:1;
+ uint64_t rqhdr1:1;
+ uint64_t rqdata4:1;
+ uint64_t rqdata3:1;
+ uint64_t rqdata2:1;
+ uint64_t rqdata1:1;
+ uint64_t rqdata0:1;
+ uint64_t retry:1;
+ uint64_t ptlp_or:1;
+ uint64_t ntlp_or:1;
+ uint64_t ctlp_or:1;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn52xxp1;
+};
+
+union cvmx_pescx_bist_status2 {
+ uint64_t u64;
+ struct cvmx_pescx_bist_status2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t cto_p2e:1;
+ uint64_t e2p_cpl:1;
+ uint64_t e2p_n:1;
+ uint64_t e2p_p:1;
+ uint64_t e2p_rsl:1;
+ uint64_t dbg_p2e:1;
+ uint64_t peai_p2e:1;
+ uint64_t rsl_p2e:1;
+ uint64_t pef_tpf1:1;
+ uint64_t pef_tpf0:1;
+ uint64_t pef_tnf:1;
+ uint64_t pef_tcf1:1;
+ uint64_t pef_tc0:1;
+ uint64_t ppf:1;
+#else
+ uint64_t ppf:1;
+ uint64_t pef_tc0:1;
+ uint64_t pef_tcf1:1;
+ uint64_t pef_tnf:1;
+ uint64_t pef_tpf0:1;
+ uint64_t pef_tpf1:1;
+ uint64_t rsl_p2e:1;
+ uint64_t peai_p2e:1;
+ uint64_t dbg_p2e:1;
+ uint64_t e2p_rsl:1;
+ uint64_t e2p_p:1;
+ uint64_t e2p_n:1;
+ uint64_t e2p_cpl:1;
+ uint64_t cto_p2e:1;
+ uint64_t reserved_14_63:50;
+#endif
+ } s;
+};
+
+union cvmx_pescx_cfg_rd {
+ uint64_t u64;
+ struct cvmx_pescx_cfg_rd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data:32;
+ uint64_t addr:32;
+#else
+ uint64_t addr:32;
+ uint64_t data:32;
+#endif
+ } s;
+};
+
+union cvmx_pescx_cfg_wr {
+ uint64_t u64;
+ struct cvmx_pescx_cfg_wr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data:32;
+ uint64_t addr:32;
+#else
+ uint64_t addr:32;
+ uint64_t data:32;
+#endif
+ } s;
+};
+
+union cvmx_pescx_cpl_lut_valid {
+ uint64_t u64;
+ struct cvmx_pescx_cpl_lut_valid_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t tag:32;
+#else
+ uint64_t tag:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pescx_ctl_status {
+ uint64_t u64;
+ struct cvmx_pescx_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t dnum:5;
+ uint64_t pbus:8;
+ uint64_t qlm_cfg:2;
+ uint64_t lane_swp:1;
+ uint64_t pm_xtoff:1;
+ uint64_t pm_xpme:1;
+ uint64_t ob_p_cmd:1;
+ uint64_t reserved_7_8:2;
+ uint64_t nf_ecrc:1;
+ uint64_t dly_one:1;
+ uint64_t lnk_enb:1;
+ uint64_t ro_ctlp:1;
+ uint64_t reserved_2_2:1;
+ uint64_t inv_ecrc:1;
+ uint64_t inv_lcrc:1;
+#else
+ uint64_t inv_lcrc:1;
+ uint64_t inv_ecrc:1;
+ uint64_t reserved_2_2:1;
+ uint64_t ro_ctlp:1;
+ uint64_t lnk_enb:1;
+ uint64_t dly_one:1;
+ uint64_t nf_ecrc:1;
+ uint64_t reserved_7_8:2;
+ uint64_t ob_p_cmd:1;
+ uint64_t pm_xpme:1;
+ uint64_t pm_xtoff:1;
+ uint64_t lane_swp:1;
+ uint64_t qlm_cfg:2;
+ uint64_t pbus:8;
+ uint64_t dnum:5;
+ uint64_t reserved_28_63:36;
+#endif
+ } s;
+ struct cvmx_pescx_ctl_status_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t dnum:5;
+ uint64_t pbus:8;
+ uint64_t qlm_cfg:2;
+ uint64_t reserved_12_12:1;
+ uint64_t pm_xtoff:1;
+ uint64_t pm_xpme:1;
+ uint64_t ob_p_cmd:1;
+ uint64_t reserved_7_8:2;
+ uint64_t nf_ecrc:1;
+ uint64_t dly_one:1;
+ uint64_t lnk_enb:1;
+ uint64_t ro_ctlp:1;
+ uint64_t reserved_2_2:1;
+ uint64_t inv_ecrc:1;
+ uint64_t inv_lcrc:1;
+#else
+ uint64_t inv_lcrc:1;
+ uint64_t inv_ecrc:1;
+ uint64_t reserved_2_2:1;
+ uint64_t ro_ctlp:1;
+ uint64_t lnk_enb:1;
+ uint64_t dly_one:1;
+ uint64_t nf_ecrc:1;
+ uint64_t reserved_7_8:2;
+ uint64_t ob_p_cmd:1;
+ uint64_t pm_xpme:1;
+ uint64_t pm_xtoff:1;
+ uint64_t reserved_12_12:1;
+ uint64_t qlm_cfg:2;
+ uint64_t pbus:8;
+ uint64_t dnum:5;
+ uint64_t reserved_28_63:36;
+#endif
+ } cn56xx;
+};
+
+union cvmx_pescx_ctl_status2 {
+ uint64_t u64;
+ struct cvmx_pescx_ctl_status2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t pclk_run:1;
+ uint64_t pcierst:1;
+#else
+ uint64_t pcierst:1;
+ uint64_t pclk_run:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+ struct cvmx_pescx_ctl_status2_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t pcierst:1;
+#else
+ uint64_t pcierst:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } cn52xxp1;
+};
+
+union cvmx_pescx_dbg_info {
+ uint64_t u64;
+ struct cvmx_pescx_dbg_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63:33;
+ uint64_t ecrc_e:1;
+ uint64_t rawwpp:1;
+ uint64_t racpp:1;
+ uint64_t ramtlp:1;
+ uint64_t rarwdns:1;
+ uint64_t caar:1;
+ uint64_t racca:1;
+ uint64_t racur:1;
+ uint64_t rauc:1;
+ uint64_t rqo:1;
+ uint64_t fcuv:1;
+ uint64_t rpe:1;
+ uint64_t fcpvwt:1;
+ uint64_t dpeoosd:1;
+ uint64_t rtwdle:1;
+ uint64_t rdwdle:1;
+ uint64_t mre:1;
+ uint64_t rte:1;
+ uint64_t acto:1;
+ uint64_t rvdm:1;
+ uint64_t rumep:1;
+ uint64_t rptamrc:1;
+ uint64_t rpmerc:1;
+ uint64_t rfemrc:1;
+ uint64_t rnfemrc:1;
+ uint64_t rcemrc:1;
+ uint64_t rpoison:1;
+ uint64_t recrce:1;
+ uint64_t rtlplle:1;
+ uint64_t rtlpmal:1;
+ uint64_t spoison:1;
+#else
+ uint64_t spoison:1;
+ uint64_t rtlpmal:1;
+ uint64_t rtlplle:1;
+ uint64_t recrce:1;
+ uint64_t rpoison:1;
+ uint64_t rcemrc:1;
+ uint64_t rnfemrc:1;
+ uint64_t rfemrc:1;
+ uint64_t rpmerc:1;
+ uint64_t rptamrc:1;
+ uint64_t rumep:1;
+ uint64_t rvdm:1;
+ uint64_t acto:1;
+ uint64_t rte:1;
+ uint64_t mre:1;
+ uint64_t rdwdle:1;
+ uint64_t rtwdle:1;
+ uint64_t dpeoosd:1;
+ uint64_t fcpvwt:1;
+ uint64_t rpe:1;
+ uint64_t fcuv:1;
+ uint64_t rqo:1;
+ uint64_t rauc:1;
+ uint64_t racur:1;
+ uint64_t racca:1;
+ uint64_t caar:1;
+ uint64_t rarwdns:1;
+ uint64_t ramtlp:1;
+ uint64_t racpp:1;
+ uint64_t rawwpp:1;
+ uint64_t ecrc_e:1;
+ uint64_t reserved_31_63:33;
+#endif
+ } s;
+};
+
+union cvmx_pescx_dbg_info_en {
+ uint64_t u64;
+ struct cvmx_pescx_dbg_info_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63:33;
+ uint64_t ecrc_e:1;
+ uint64_t rawwpp:1;
+ uint64_t racpp:1;
+ uint64_t ramtlp:1;
+ uint64_t rarwdns:1;
+ uint64_t caar:1;
+ uint64_t racca:1;
+ uint64_t racur:1;
+ uint64_t rauc:1;
+ uint64_t rqo:1;
+ uint64_t fcuv:1;
+ uint64_t rpe:1;
+ uint64_t fcpvwt:1;
+ uint64_t dpeoosd:1;
+ uint64_t rtwdle:1;
+ uint64_t rdwdle:1;
+ uint64_t mre:1;
+ uint64_t rte:1;
+ uint64_t acto:1;
+ uint64_t rvdm:1;
+ uint64_t rumep:1;
+ uint64_t rptamrc:1;
+ uint64_t rpmerc:1;
+ uint64_t rfemrc:1;
+ uint64_t rnfemrc:1;
+ uint64_t rcemrc:1;
+ uint64_t rpoison:1;
+ uint64_t recrce:1;
+ uint64_t rtlplle:1;
+ uint64_t rtlpmal:1;
+ uint64_t spoison:1;
+#else
+ uint64_t spoison:1;
+ uint64_t rtlpmal:1;
+ uint64_t rtlplle:1;
+ uint64_t recrce:1;
+ uint64_t rpoison:1;
+ uint64_t rcemrc:1;
+ uint64_t rnfemrc:1;
+ uint64_t rfemrc:1;
+ uint64_t rpmerc:1;
+ uint64_t rptamrc:1;
+ uint64_t rumep:1;
+ uint64_t rvdm:1;
+ uint64_t acto:1;
+ uint64_t rte:1;
+ uint64_t mre:1;
+ uint64_t rdwdle:1;
+ uint64_t rtwdle:1;
+ uint64_t dpeoosd:1;
+ uint64_t fcpvwt:1;
+ uint64_t rpe:1;
+ uint64_t fcuv:1;
+ uint64_t rqo:1;
+ uint64_t rauc:1;
+ uint64_t racur:1;
+ uint64_t racca:1;
+ uint64_t caar:1;
+ uint64_t rarwdns:1;
+ uint64_t ramtlp:1;
+ uint64_t racpp:1;
+ uint64_t rawwpp:1;
+ uint64_t ecrc_e:1;
+ uint64_t reserved_31_63:33;
+#endif
+ } s;
+};
+
+union cvmx_pescx_diag_status {
+ uint64_t u64;
+ struct cvmx_pescx_diag_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t pm_dst:1;
+ uint64_t pm_stat:1;
+ uint64_t pm_en:1;
+ uint64_t aux_en:1;
+#else
+ uint64_t aux_en:1;
+ uint64_t pm_en:1;
+ uint64_t pm_stat:1;
+ uint64_t pm_dst:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_pescx_p2n_bar0_start {
+ uint64_t u64;
+ struct cvmx_pescx_p2n_bar0_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr:50;
+ uint64_t reserved_0_13:14;
+#else
+ uint64_t reserved_0_13:14;
+ uint64_t addr:50;
+#endif
+ } s;
+};
+
+union cvmx_pescx_p2n_bar1_start {
+ uint64_t u64;
+ struct cvmx_pescx_p2n_bar1_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr:38;
+ uint64_t reserved_0_25:26;
+#else
+ uint64_t reserved_0_25:26;
+ uint64_t addr:38;
+#endif
+ } s;
+};
+
+union cvmx_pescx_p2n_bar2_start {
+ uint64_t u64;
+ struct cvmx_pescx_p2n_bar2_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr:25;
+ uint64_t reserved_0_38:39;
+#else
+ uint64_t reserved_0_38:39;
+ uint64_t addr:25;
+#endif
+ } s;
+};
+
+union cvmx_pescx_p2p_barx_end {
+ uint64_t u64;
+ struct cvmx_pescx_p2p_barx_end_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr:52;
+ uint64_t reserved_0_11:12;
+#else
+ uint64_t reserved_0_11:12;
+ uint64_t addr:52;
+#endif
+ } s;
+};
+
+union cvmx_pescx_p2p_barx_start {
+ uint64_t u64;
+ struct cvmx_pescx_p2p_barx_start_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t addr:52;
+ uint64_t reserved_0_11:12;
+#else
+ uint64_t reserved_0_11:12;
+ uint64_t addr:52;
+#endif
+ } s;
+};
+
+union cvmx_pescx_tlp_credits {
+ uint64_t u64;
+ struct cvmx_pescx_tlp_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_pescx_tlp_credits_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63:8;
+ uint64_t peai_ppf:8;
+ uint64_t pesc_cpl:8;
+ uint64_t pesc_np:8;
+ uint64_t pesc_p:8;
+ uint64_t npei_cpl:8;
+ uint64_t npei_np:8;
+ uint64_t npei_p:8;
+#else
+ uint64_t npei_p:8;
+ uint64_t npei_np:8;
+ uint64_t npei_cpl:8;
+ uint64_t pesc_p:8;
+ uint64_t pesc_np:8;
+ uint64_t pesc_cpl:8;
+ uint64_t peai_ppf:8;
+ uint64_t reserved_56_63:8;
+#endif
+ } cn52xx;
+ struct cvmx_pescx_tlp_credits_cn52xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63:26;
+ uint64_t peai_ppf:8;
+ uint64_t pesc_cpl:5;
+ uint64_t pesc_np:5;
+ uint64_t pesc_p:5;
+ uint64_t npei_cpl:5;
+ uint64_t npei_np:5;
+ uint64_t npei_p:5;
+#else
+ uint64_t npei_p:5;
+ uint64_t npei_np:5;
+ uint64_t npei_cpl:5;
+ uint64_t pesc_p:5;
+ uint64_t pesc_np:5;
+ uint64_t pesc_cpl:5;
+ uint64_t peai_ppf:8;
+ uint64_t reserved_38_63:26;
+#endif
+ } cn52xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pexp-defs.h b/arch/mips/include/asm/octeon/cvmx-pexp-defs.h
new file mode 100644
index 000000000..eb673f351
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-pexp-defs.h
@@ -0,0 +1,224 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_PEXP_DEFS_H__
+#define __CVMX_PEXP_DEFS_H__
+
+#define CVMX_PEXP_NPEI_BAR1_INDEXX(offset) (CVMX_ADD_IO_SEG(0x00011F0000008000ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011F0000008580ull))
+#define CVMX_PEXP_NPEI_BIST_STATUS2 (CVMX_ADD_IO_SEG(0x00011F0000008680ull))
+#define CVMX_PEXP_NPEI_CTL_PORT0 (CVMX_ADD_IO_SEG(0x00011F0000008250ull))
+#define CVMX_PEXP_NPEI_CTL_PORT1 (CVMX_ADD_IO_SEG(0x00011F0000008260ull))
+#define CVMX_PEXP_NPEI_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011F0000008570ull))
+#define CVMX_PEXP_NPEI_CTL_STATUS2 (CVMX_ADD_IO_SEG(0x00011F000000BC00ull))
+#define CVMX_PEXP_NPEI_DATA_OUT_CNT (CVMX_ADD_IO_SEG(0x00011F00000085F0ull))
+#define CVMX_PEXP_NPEI_DBG_DATA (CVMX_ADD_IO_SEG(0x00011F0000008510ull))
+#define CVMX_PEXP_NPEI_DBG_SELECT (CVMX_ADD_IO_SEG(0x00011F0000008500ull))
+#define CVMX_PEXP_NPEI_DMA0_INT_LEVEL (CVMX_ADD_IO_SEG(0x00011F00000085C0ull))
+#define CVMX_PEXP_NPEI_DMA1_INT_LEVEL (CVMX_ADD_IO_SEG(0x00011F00000085D0ull))
+#define CVMX_PEXP_NPEI_DMAX_COUNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000008450ull) + ((offset) & 7) * 16)
+#define CVMX_PEXP_NPEI_DMAX_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F00000083B0ull) + ((offset) & 7) * 16)
+#define CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000008400ull) + ((offset) & 7) * 16)
+#define CVMX_PEXP_NPEI_DMAX_NADDR(offset) (CVMX_ADD_IO_SEG(0x00011F00000084A0ull) + ((offset) & 7) * 16)
+#define CVMX_PEXP_NPEI_DMA_CNTS (CVMX_ADD_IO_SEG(0x00011F00000085E0ull))
+#define CVMX_PEXP_NPEI_DMA_CONTROL (CVMX_ADD_IO_SEG(0x00011F00000083A0ull))
+#define CVMX_PEXP_NPEI_DMA_PCIE_REQ_NUM (CVMX_ADD_IO_SEG(0x00011F00000085B0ull))
+#define CVMX_PEXP_NPEI_DMA_STATE1 (CVMX_ADD_IO_SEG(0x00011F00000086C0ull))
+#define CVMX_PEXP_NPEI_DMA_STATE1_P1 (CVMX_ADD_IO_SEG(0x00011F0000008680ull))
+#define CVMX_PEXP_NPEI_DMA_STATE2 (CVMX_ADD_IO_SEG(0x00011F00000086D0ull))
+#define CVMX_PEXP_NPEI_DMA_STATE2_P1 (CVMX_ADD_IO_SEG(0x00011F0000008690ull))
+#define CVMX_PEXP_NPEI_DMA_STATE3_P1 (CVMX_ADD_IO_SEG(0x00011F00000086A0ull))
+#define CVMX_PEXP_NPEI_DMA_STATE4_P1 (CVMX_ADD_IO_SEG(0x00011F00000086B0ull))
+#define CVMX_PEXP_NPEI_DMA_STATE5_P1 (CVMX_ADD_IO_SEG(0x00011F00000086C0ull))
+#define CVMX_PEXP_NPEI_INT_A_ENB (CVMX_ADD_IO_SEG(0x00011F0000008560ull))
+#define CVMX_PEXP_NPEI_INT_A_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BCE0ull))
+#define CVMX_PEXP_NPEI_INT_A_SUM (CVMX_ADD_IO_SEG(0x00011F0000008550ull))
+#define CVMX_PEXP_NPEI_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000008540ull))
+#define CVMX_PEXP_NPEI_INT_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BCD0ull))
+#define CVMX_PEXP_NPEI_INT_INFO (CVMX_ADD_IO_SEG(0x00011F0000008590ull))
+#define CVMX_PEXP_NPEI_INT_SUM (CVMX_ADD_IO_SEG(0x00011F0000008530ull))
+#define CVMX_PEXP_NPEI_INT_SUM2 (CVMX_ADD_IO_SEG(0x00011F000000BCC0ull))
+#define CVMX_PEXP_NPEI_LAST_WIN_RDATA0 (CVMX_ADD_IO_SEG(0x00011F0000008600ull))
+#define CVMX_PEXP_NPEI_LAST_WIN_RDATA1 (CVMX_ADD_IO_SEG(0x00011F0000008610ull))
+#define CVMX_PEXP_NPEI_MEM_ACCESS_CTL (CVMX_ADD_IO_SEG(0x00011F00000084F0ull))
+#define CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(offset) (CVMX_ADD_IO_SEG(0x00011F0000008280ull) + ((offset) & 31) * 16 - 16*12)
+#define CVMX_PEXP_NPEI_MSI_ENB0 (CVMX_ADD_IO_SEG(0x00011F000000BC50ull))
+#define CVMX_PEXP_NPEI_MSI_ENB1 (CVMX_ADD_IO_SEG(0x00011F000000BC60ull))
+#define CVMX_PEXP_NPEI_MSI_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BC70ull))
+#define CVMX_PEXP_NPEI_MSI_ENB3 (CVMX_ADD_IO_SEG(0x00011F000000BC80ull))
+#define CVMX_PEXP_NPEI_MSI_RCV0 (CVMX_ADD_IO_SEG(0x00011F000000BC10ull))
+#define CVMX_PEXP_NPEI_MSI_RCV1 (CVMX_ADD_IO_SEG(0x00011F000000BC20ull))
+#define CVMX_PEXP_NPEI_MSI_RCV2 (CVMX_ADD_IO_SEG(0x00011F000000BC30ull))
+#define CVMX_PEXP_NPEI_MSI_RCV3 (CVMX_ADD_IO_SEG(0x00011F000000BC40ull))
+#define CVMX_PEXP_NPEI_MSI_RD_MAP (CVMX_ADD_IO_SEG(0x00011F000000BCA0ull))
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB0 (CVMX_ADD_IO_SEG(0x00011F000000BCF0ull))
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB1 (CVMX_ADD_IO_SEG(0x00011F000000BD00ull))
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BD10ull))
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB3 (CVMX_ADD_IO_SEG(0x00011F000000BD20ull))
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB0 (CVMX_ADD_IO_SEG(0x00011F000000BD30ull))
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB1 (CVMX_ADD_IO_SEG(0x00011F000000BD40ull))
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BD50ull))
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB3 (CVMX_ADD_IO_SEG(0x00011F000000BD60ull))
+#define CVMX_PEXP_NPEI_MSI_WR_MAP (CVMX_ADD_IO_SEG(0x00011F000000BC90ull))
+#define CVMX_PEXP_NPEI_PCIE_CREDIT_CNT (CVMX_ADD_IO_SEG(0x00011F000000BD70ull))
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV (CVMX_ADD_IO_SEG(0x00011F000000BCB0ull))
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B1 (CVMX_ADD_IO_SEG(0x00011F0000008650ull))
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B2 (CVMX_ADD_IO_SEG(0x00011F0000008660ull))
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B3 (CVMX_ADD_IO_SEG(0x00011F0000008670ull))
+#define CVMX_PEXP_NPEI_PKTX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F000000A400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_INSTR_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F000000A800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F000000AC00ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F000000B000ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_INSTR_HEADER(offset) (CVMX_ADD_IO_SEG(0x00011F000000B400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_IN_BP(offset) (CVMX_ADD_IO_SEG(0x00011F000000B800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_SLIST_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000009400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F0000009800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000009C00ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKT_CNT_INT (CVMX_ADD_IO_SEG(0x00011F0000009110ull))
+#define CVMX_PEXP_NPEI_PKT_CNT_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000009130ull))
+#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ES (CVMX_ADD_IO_SEG(0x00011F00000090B0ull))
+#define CVMX_PEXP_NPEI_PKT_DATA_OUT_NS (CVMX_ADD_IO_SEG(0x00011F00000090A0ull))
+#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ROR (CVMX_ADD_IO_SEG(0x00011F0000009090ull))
+#define CVMX_PEXP_NPEI_PKT_DPADDR (CVMX_ADD_IO_SEG(0x00011F0000009080ull))
+#define CVMX_PEXP_NPEI_PKT_INPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000009150ull))
+#define CVMX_PEXP_NPEI_PKT_INSTR_ENB (CVMX_ADD_IO_SEG(0x00011F0000009000ull))
+#define CVMX_PEXP_NPEI_PKT_INSTR_RD_SIZE (CVMX_ADD_IO_SEG(0x00011F0000009190ull))
+#define CVMX_PEXP_NPEI_PKT_INSTR_SIZE (CVMX_ADD_IO_SEG(0x00011F0000009020ull))
+#define CVMX_PEXP_NPEI_PKT_INT_LEVELS (CVMX_ADD_IO_SEG(0x00011F0000009100ull))
+#define CVMX_PEXP_NPEI_PKT_IN_BP (CVMX_ADD_IO_SEG(0x00011F00000086B0ull))
+#define CVMX_PEXP_NPEI_PKT_IN_DONEX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F000000A000ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKT_IN_INSTR_COUNTS (CVMX_ADD_IO_SEG(0x00011F00000086A0ull))
+#define CVMX_PEXP_NPEI_PKT_IN_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000091A0ull))
+#define CVMX_PEXP_NPEI_PKT_IPTR (CVMX_ADD_IO_SEG(0x00011F0000009070ull))
+#define CVMX_PEXP_NPEI_PKT_OUTPUT_WMARK (CVMX_ADD_IO_SEG(0x00011F0000009160ull))
+#define CVMX_PEXP_NPEI_PKT_OUT_BMODE (CVMX_ADD_IO_SEG(0x00011F00000090D0ull))
+#define CVMX_PEXP_NPEI_PKT_OUT_ENB (CVMX_ADD_IO_SEG(0x00011F0000009010ull))
+#define CVMX_PEXP_NPEI_PKT_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000090E0ull))
+#define CVMX_PEXP_NPEI_PKT_PORT_IN_RST (CVMX_ADD_IO_SEG(0x00011F0000008690ull))
+#define CVMX_PEXP_NPEI_PKT_SLIST_ES (CVMX_ADD_IO_SEG(0x00011F0000009050ull))
+#define CVMX_PEXP_NPEI_PKT_SLIST_ID_SIZE (CVMX_ADD_IO_SEG(0x00011F0000009180ull))
+#define CVMX_PEXP_NPEI_PKT_SLIST_NS (CVMX_ADD_IO_SEG(0x00011F0000009040ull))
+#define CVMX_PEXP_NPEI_PKT_SLIST_ROR (CVMX_ADD_IO_SEG(0x00011F0000009030ull))
+#define CVMX_PEXP_NPEI_PKT_TIME_INT (CVMX_ADD_IO_SEG(0x00011F0000009120ull))
+#define CVMX_PEXP_NPEI_PKT_TIME_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000009140ull))
+#define CVMX_PEXP_NPEI_RSL_INT_BLOCKS (CVMX_ADD_IO_SEG(0x00011F0000008520ull))
+#define CVMX_PEXP_NPEI_SCRATCH_1 (CVMX_ADD_IO_SEG(0x00011F0000008270ull))
+#define CVMX_PEXP_NPEI_STATE1 (CVMX_ADD_IO_SEG(0x00011F0000008620ull))
+#define CVMX_PEXP_NPEI_STATE2 (CVMX_ADD_IO_SEG(0x00011F0000008630ull))
+#define CVMX_PEXP_NPEI_STATE3 (CVMX_ADD_IO_SEG(0x00011F0000008640ull))
+#define CVMX_PEXP_NPEI_WINDOW_CTL (CVMX_ADD_IO_SEG(0x00011F0000008380ull))
+#define CVMX_PEXP_SLI_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011F0000010580ull))
+#define CVMX_PEXP_SLI_CTL_PORTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000010050ull) + ((offset) & 3) * 16)
+#define CVMX_PEXP_SLI_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011F0000010570ull))
+#define CVMX_PEXP_SLI_DATA_OUT_CNT (CVMX_ADD_IO_SEG(0x00011F00000105F0ull))
+#define CVMX_PEXP_SLI_DBG_DATA (CVMX_ADD_IO_SEG(0x00011F0000010310ull))
+#define CVMX_PEXP_SLI_DBG_SELECT (CVMX_ADD_IO_SEG(0x00011F0000010300ull))
+#define CVMX_PEXP_SLI_DMAX_CNT(offset) (CVMX_ADD_IO_SEG(0x00011F0000010400ull) + ((offset) & 1) * 16)
+#define CVMX_PEXP_SLI_DMAX_INT_LEVEL(offset) (CVMX_ADD_IO_SEG(0x00011F00000103E0ull) + ((offset) & 1) * 16)
+#define CVMX_PEXP_SLI_DMAX_TIM(offset) (CVMX_ADD_IO_SEG(0x00011F0000010420ull) + ((offset) & 1) * 16)
+#define CVMX_PEXP_SLI_INT_ENB_CIU (CVMX_ADD_IO_SEG(0x00011F0000013CD0ull))
+#define CVMX_PEXP_SLI_INT_ENB_PORTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000010340ull) + ((offset) & 1) * 16)
+#define CVMX_PEXP_SLI_INT_SUM (CVMX_ADD_IO_SEG(0x00011F0000010330ull))
+#define CVMX_PEXP_SLI_LAST_WIN_RDATA0 (CVMX_ADD_IO_SEG(0x00011F0000010600ull))
+#define CVMX_PEXP_SLI_LAST_WIN_RDATA1 (CVMX_ADD_IO_SEG(0x00011F0000010610ull))
+#define CVMX_PEXP_SLI_LAST_WIN_RDATA2 (CVMX_ADD_IO_SEG(0x00011F00000106C0ull))
+#define CVMX_PEXP_SLI_LAST_WIN_RDATA3 (CVMX_ADD_IO_SEG(0x00011F00000106D0ull))
+#define CVMX_PEXP_SLI_MAC_CREDIT_CNT (CVMX_ADD_IO_SEG(0x00011F0000013D70ull))
+#define CVMX_PEXP_SLI_MAC_CREDIT_CNT2 (CVMX_ADD_IO_SEG(0x00011F0000013E10ull))
+#define CVMX_PEXP_SLI_MEM_ACCESS_CTL (CVMX_ADD_IO_SEG(0x00011F00000102F0ull))
+#define CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(offset) (CVMX_ADD_IO_SEG(0x00011F00000100E0ull) + ((offset) & 31) * 16 - 16*12)
+#define CVMX_PEXP_SLI_MSI_ENB0 (CVMX_ADD_IO_SEG(0x00011F0000013C50ull))
+#define CVMX_PEXP_SLI_MSI_ENB1 (CVMX_ADD_IO_SEG(0x00011F0000013C60ull))
+#define CVMX_PEXP_SLI_MSI_ENB2 (CVMX_ADD_IO_SEG(0x00011F0000013C70ull))
+#define CVMX_PEXP_SLI_MSI_ENB3 (CVMX_ADD_IO_SEG(0x00011F0000013C80ull))
+#define CVMX_PEXP_SLI_MSI_RCV0 (CVMX_ADD_IO_SEG(0x00011F0000013C10ull))
+#define CVMX_PEXP_SLI_MSI_RCV1 (CVMX_ADD_IO_SEG(0x00011F0000013C20ull))
+#define CVMX_PEXP_SLI_MSI_RCV2 (CVMX_ADD_IO_SEG(0x00011F0000013C30ull))
+#define CVMX_PEXP_SLI_MSI_RCV3 (CVMX_ADD_IO_SEG(0x00011F0000013C40ull))
+#define CVMX_PEXP_SLI_MSI_RD_MAP (CVMX_ADD_IO_SEG(0x00011F0000013CA0ull))
+#define CVMX_PEXP_SLI_MSI_W1C_ENB0 (CVMX_ADD_IO_SEG(0x00011F0000013CF0ull))
+#define CVMX_PEXP_SLI_MSI_W1C_ENB1 (CVMX_ADD_IO_SEG(0x00011F0000013D00ull))
+#define CVMX_PEXP_SLI_MSI_W1C_ENB2 (CVMX_ADD_IO_SEG(0x00011F0000013D10ull))
+#define CVMX_PEXP_SLI_MSI_W1C_ENB3 (CVMX_ADD_IO_SEG(0x00011F0000013D20ull))
+#define CVMX_PEXP_SLI_MSI_W1S_ENB0 (CVMX_ADD_IO_SEG(0x00011F0000013D30ull))
+#define CVMX_PEXP_SLI_MSI_W1S_ENB1 (CVMX_ADD_IO_SEG(0x00011F0000013D40ull))
+#define CVMX_PEXP_SLI_MSI_W1S_ENB2 (CVMX_ADD_IO_SEG(0x00011F0000013D50ull))
+#define CVMX_PEXP_SLI_MSI_W1S_ENB3 (CVMX_ADD_IO_SEG(0x00011F0000013D60ull))
+#define CVMX_PEXP_SLI_MSI_WR_MAP (CVMX_ADD_IO_SEG(0x00011F0000013C90ull))
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV (CVMX_ADD_IO_SEG(0x00011F0000013CB0ull))
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B1 (CVMX_ADD_IO_SEG(0x00011F0000010650ull))
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B2 (CVMX_ADD_IO_SEG(0x00011F0000010660ull))
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B3 (CVMX_ADD_IO_SEG(0x00011F0000010670ull))
+#define CVMX_PEXP_SLI_PKTX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000012400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_INSTR_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000012800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_INSTR_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F0000012C00ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_INSTR_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000013000ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_INSTR_HEADER(offset) (CVMX_ADD_IO_SEG(0x00011F0000013400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_IN_BP(offset) (CVMX_ADD_IO_SEG(0x00011F0000013800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_OUT_SIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000010C00ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_SLIST_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000011400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_SLIST_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F0000011800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_SLIST_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000011C00ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKT_CNT_INT (CVMX_ADD_IO_SEG(0x00011F0000011130ull))
+#define CVMX_PEXP_SLI_PKT_CNT_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000011150ull))
+#define CVMX_PEXP_SLI_PKT_CTL (CVMX_ADD_IO_SEG(0x00011F0000011220ull))
+#define CVMX_PEXP_SLI_PKT_DATA_OUT_ES (CVMX_ADD_IO_SEG(0x00011F00000110B0ull))
+#define CVMX_PEXP_SLI_PKT_DATA_OUT_NS (CVMX_ADD_IO_SEG(0x00011F00000110A0ull))
+#define CVMX_PEXP_SLI_PKT_DATA_OUT_ROR (CVMX_ADD_IO_SEG(0x00011F0000011090ull))
+#define CVMX_PEXP_SLI_PKT_DPADDR (CVMX_ADD_IO_SEG(0x00011F0000011080ull))
+#define CVMX_PEXP_SLI_PKT_INPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000011170ull))
+#define CVMX_PEXP_SLI_PKT_INSTR_ENB (CVMX_ADD_IO_SEG(0x00011F0000011000ull))
+#define CVMX_PEXP_SLI_PKT_INSTR_RD_SIZE (CVMX_ADD_IO_SEG(0x00011F00000111A0ull))
+#define CVMX_PEXP_SLI_PKT_INSTR_SIZE (CVMX_ADD_IO_SEG(0x00011F0000011020ull))
+#define CVMX_PEXP_SLI_PKT_INT_LEVELS (CVMX_ADD_IO_SEG(0x00011F0000011120ull))
+#define CVMX_PEXP_SLI_PKT_IN_BP (CVMX_ADD_IO_SEG(0x00011F0000011210ull))
+#define CVMX_PEXP_SLI_PKT_IN_DONEX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000012000ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKT_IN_INSTR_COUNTS (CVMX_ADD_IO_SEG(0x00011F0000011200ull))
+#define CVMX_PEXP_SLI_PKT_IN_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000111B0ull))
+#define CVMX_PEXP_SLI_PKT_IPTR (CVMX_ADD_IO_SEG(0x00011F0000011070ull))
+#define CVMX_PEXP_SLI_PKT_OUTPUT_WMARK (CVMX_ADD_IO_SEG(0x00011F0000011180ull))
+#define CVMX_PEXP_SLI_PKT_OUT_BMODE (CVMX_ADD_IO_SEG(0x00011F00000110D0ull))
+#define CVMX_PEXP_SLI_PKT_OUT_BP_EN (CVMX_ADD_IO_SEG(0x00011F0000011240ull))
+#define CVMX_PEXP_SLI_PKT_OUT_ENB (CVMX_ADD_IO_SEG(0x00011F0000011010ull))
+#define CVMX_PEXP_SLI_PKT_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000110E0ull))
+#define CVMX_PEXP_SLI_PKT_PORT_IN_RST (CVMX_ADD_IO_SEG(0x00011F00000111F0ull))
+#define CVMX_PEXP_SLI_PKT_SLIST_ES (CVMX_ADD_IO_SEG(0x00011F0000011050ull))
+#define CVMX_PEXP_SLI_PKT_SLIST_NS (CVMX_ADD_IO_SEG(0x00011F0000011040ull))
+#define CVMX_PEXP_SLI_PKT_SLIST_ROR (CVMX_ADD_IO_SEG(0x00011F0000011030ull))
+#define CVMX_PEXP_SLI_PKT_TIME_INT (CVMX_ADD_IO_SEG(0x00011F0000011140ull))
+#define CVMX_PEXP_SLI_PKT_TIME_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000011160ull))
+#define CVMX_PEXP_SLI_PORTX_PKIND(offset) (CVMX_ADD_IO_SEG(0x00011F0000010800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_S2M_PORTX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011F0000013D80ull) + ((offset) & 3) * 16)
+#define CVMX_PEXP_SLI_SCRATCH_1 (CVMX_ADD_IO_SEG(0x00011F00000103C0ull))
+#define CVMX_PEXP_SLI_SCRATCH_2 (CVMX_ADD_IO_SEG(0x00011F00000103D0ull))
+#define CVMX_PEXP_SLI_STATE1 (CVMX_ADD_IO_SEG(0x00011F0000010620ull))
+#define CVMX_PEXP_SLI_STATE2 (CVMX_ADD_IO_SEG(0x00011F0000010630ull))
+#define CVMX_PEXP_SLI_STATE3 (CVMX_ADD_IO_SEG(0x00011F0000010640ull))
+#define CVMX_PEXP_SLI_TX_PIPE (CVMX_ADD_IO_SEG(0x00011F0000011230ull))
+#define CVMX_PEXP_SLI_WINDOW_CTL (CVMX_ADD_IO_SEG(0x00011F00000102E0ull))
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pip-defs.h b/arch/mips/include/asm/octeon/cvmx-pip-defs.h
new file mode 100644
index 000000000..e42f411bd
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-pip-defs.h
@@ -0,0 +1,2734 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_PIP_DEFS_H__
+#define __CVMX_PIP_DEFS_H__
+
+/*
+ * Enumeration representing the amount of packet processing
+ * and validation performed by the input hardware.
+ */
+enum cvmx_pip_port_parse_mode {
+ /*
+ * Packet input doesn't perform any processing of the input
+ * packet.
+ */
+ CVMX_PIP_PORT_CFG_MODE_NONE = 0ull,
+ /*
+ * Full packet processing is performed with pointer starting
+ * at the L2 (ethernet MAC) header.
+ */
+ CVMX_PIP_PORT_CFG_MODE_SKIPL2 = 1ull,
+ /*
+ * Input packets are assumed to be IP. Results from non IP
+ * packets is undefined. Pointers reference the beginning of
+ * the IP header.
+ */
+ CVMX_PIP_PORT_CFG_MODE_SKIPIP = 2ull
+};
+
+#define CVMX_PIP_ALT_SKIP_CFGX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002A00ull) + ((offset) & 3) * 8)
+#define CVMX_PIP_BCK_PRS (CVMX_ADD_IO_SEG(0x00011800A0000038ull))
+#define CVMX_PIP_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011800A0000000ull))
+#define CVMX_PIP_BSEL_EXT_CFGX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002800ull) + ((offset) & 3) * 16)
+#define CVMX_PIP_BSEL_EXT_POSX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002808ull) + ((offset) & 3) * 16)
+#define CVMX_PIP_BSEL_TBL_ENTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0003000ull) + ((offset) & 511) * 8)
+#define CVMX_PIP_CLKEN (CVMX_ADD_IO_SEG(0x00011800A0000040ull))
+#define CVMX_PIP_CRC_CTLX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000040ull) + ((offset) & 1) * 8)
+#define CVMX_PIP_CRC_IVX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000050ull) + ((offset) & 1) * 8)
+#define CVMX_PIP_DEC_IPSECX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000080ull) + ((offset) & 3) * 8)
+#define CVMX_PIP_DSA_SRC_GRP (CVMX_ADD_IO_SEG(0x00011800A0000190ull))
+#define CVMX_PIP_DSA_VID_GRP (CVMX_ADD_IO_SEG(0x00011800A0000198ull))
+#define CVMX_PIP_FRM_LEN_CHKX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000180ull) + ((offset) & 1) * 8)
+#define CVMX_PIP_GBL_CFG (CVMX_ADD_IO_SEG(0x00011800A0000028ull))
+#define CVMX_PIP_GBL_CTL (CVMX_ADD_IO_SEG(0x00011800A0000020ull))
+#define CVMX_PIP_HG_PRI_QOS (CVMX_ADD_IO_SEG(0x00011800A00001A0ull))
+#define CVMX_PIP_INT_EN (CVMX_ADD_IO_SEG(0x00011800A0000010ull))
+#define CVMX_PIP_INT_REG (CVMX_ADD_IO_SEG(0x00011800A0000008ull))
+#define CVMX_PIP_IP_OFFSET (CVMX_ADD_IO_SEG(0x00011800A0000060ull))
+#define CVMX_PIP_PRI_TBLX(offset) (CVMX_ADD_IO_SEG(0x00011800A0004000ull) + ((offset) & 255) * 8)
+#define CVMX_PIP_PRT_CFGBX(offset) (CVMX_ADD_IO_SEG(0x00011800A0008000ull) + ((offset) & 63) * 8)
+#define CVMX_PIP_PRT_CFGX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000200ull) + ((offset) & 63) * 8)
+#define CVMX_PIP_PRT_TAGX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000400ull) + ((offset) & 63) * 8)
+#define CVMX_PIP_QOS_DIFFX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000600ull) + ((offset) & 63) * 8)
+#define CVMX_PIP_QOS_VLANX(offset) (CVMX_ADD_IO_SEG(0x00011800A00000C0ull) + ((offset) & 7) * 8)
+#define CVMX_PIP_QOS_WATCHX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000100ull) + ((offset) & 7) * 8)
+#define CVMX_PIP_RAW_WORD (CVMX_ADD_IO_SEG(0x00011800A00000B0ull))
+#define CVMX_PIP_SFT_RST (CVMX_ADD_IO_SEG(0x00011800A0000030ull))
+#define CVMX_PIP_STAT0_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000800ull) + ((offset) & 63) * 80)
+#define CVMX_PIP_STAT0_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040000ull) + ((offset) & 63) * 128)
+#define CVMX_PIP_STAT10_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0001480ull) + ((offset) & 63) * 16)
+#define CVMX_PIP_STAT10_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040050ull) + ((offset) & 63) * 128)
+#define CVMX_PIP_STAT11_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0001488ull) + ((offset) & 63) * 16)
+#define CVMX_PIP_STAT11_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040058ull) + ((offset) & 63) * 128)
+#define CVMX_PIP_STAT1_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000808ull) + ((offset) & 63) * 80)
+#define CVMX_PIP_STAT1_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040008ull) + ((offset) & 63) * 128)
+#define CVMX_PIP_STAT2_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000810ull) + ((offset) & 63) * 80)
+#define CVMX_PIP_STAT2_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040010ull) + ((offset) & 63) * 128)
+#define CVMX_PIP_STAT3_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000818ull) + ((offset) & 63) * 80)
+#define CVMX_PIP_STAT3_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040018ull) + ((offset) & 63) * 128)
+#define CVMX_PIP_STAT4_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000820ull) + ((offset) & 63) * 80)
+#define CVMX_PIP_STAT4_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040020ull) + ((offset) & 63) * 128)
+#define CVMX_PIP_STAT5_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000828ull) + ((offset) & 63) * 80)
+#define CVMX_PIP_STAT5_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040028ull) + ((offset) & 63) * 128)
+#define CVMX_PIP_STAT6_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000830ull) + ((offset) & 63) * 80)
+#define CVMX_PIP_STAT6_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040030ull) + ((offset) & 63) * 128)
+#define CVMX_PIP_STAT7_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000838ull) + ((offset) & 63) * 80)
+#define CVMX_PIP_STAT7_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040038ull) + ((offset) & 63) * 128)
+#define CVMX_PIP_STAT8_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000840ull) + ((offset) & 63) * 80)
+#define CVMX_PIP_STAT8_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040040ull) + ((offset) & 63) * 128)
+#define CVMX_PIP_STAT9_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0000848ull) + ((offset) & 63) * 80)
+#define CVMX_PIP_STAT9_X(offset) (CVMX_ADD_IO_SEG(0x00011800A0040048ull) + ((offset) & 63) * 128)
+#define CVMX_PIP_STAT_CTL (CVMX_ADD_IO_SEG(0x00011800A0000018ull))
+#define CVMX_PIP_STAT_INB_ERRSX(offset) (CVMX_ADD_IO_SEG(0x00011800A0001A10ull) + ((offset) & 63) * 32)
+#define CVMX_PIP_STAT_INB_ERRS_PKNDX(offset) (CVMX_ADD_IO_SEG(0x00011800A0020010ull) + ((offset) & 63) * 32)
+#define CVMX_PIP_STAT_INB_OCTSX(offset) (CVMX_ADD_IO_SEG(0x00011800A0001A08ull) + ((offset) & 63) * 32)
+#define CVMX_PIP_STAT_INB_OCTS_PKNDX(offset) (CVMX_ADD_IO_SEG(0x00011800A0020008ull) + ((offset) & 63) * 32)
+#define CVMX_PIP_STAT_INB_PKTSX(offset) (CVMX_ADD_IO_SEG(0x00011800A0001A00ull) + ((offset) & 63) * 32)
+#define CVMX_PIP_STAT_INB_PKTS_PKNDX(offset) (CVMX_ADD_IO_SEG(0x00011800A0020000ull) + ((offset) & 63) * 32)
+#define CVMX_PIP_SUB_PKIND_FCSX(block_id) (CVMX_ADD_IO_SEG(0x00011800A0080000ull))
+#define CVMX_PIP_TAG_INCX(offset) (CVMX_ADD_IO_SEG(0x00011800A0001800ull) + ((offset) & 63) * 8)
+#define CVMX_PIP_TAG_MASK (CVMX_ADD_IO_SEG(0x00011800A0000070ull))
+#define CVMX_PIP_TAG_SECRET (CVMX_ADD_IO_SEG(0x00011800A0000068ull))
+#define CVMX_PIP_TODO_ENTRY (CVMX_ADD_IO_SEG(0x00011800A0000078ull))
+#define CVMX_PIP_VLAN_ETYPESX(offset) (CVMX_ADD_IO_SEG(0x00011800A00001C0ull) + ((offset) & 1) * 8)
+#define CVMX_PIP_XSTAT0_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002000ull) + ((offset) & 63) * 80 - 80*40)
+#define CVMX_PIP_XSTAT10_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0001700ull) + ((offset) & 63) * 16 - 16*40)
+#define CVMX_PIP_XSTAT11_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0001708ull) + ((offset) & 63) * 16 - 16*40)
+#define CVMX_PIP_XSTAT1_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002008ull) + ((offset) & 63) * 80 - 80*40)
+#define CVMX_PIP_XSTAT2_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002010ull) + ((offset) & 63) * 80 - 80*40)
+#define CVMX_PIP_XSTAT3_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002018ull) + ((offset) & 63) * 80 - 80*40)
+#define CVMX_PIP_XSTAT4_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002020ull) + ((offset) & 63) * 80 - 80*40)
+#define CVMX_PIP_XSTAT5_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002028ull) + ((offset) & 63) * 80 - 80*40)
+#define CVMX_PIP_XSTAT6_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002030ull) + ((offset) & 63) * 80 - 80*40)
+#define CVMX_PIP_XSTAT7_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002038ull) + ((offset) & 63) * 80 - 80*40)
+#define CVMX_PIP_XSTAT8_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002040ull) + ((offset) & 63) * 80 - 80*40)
+#define CVMX_PIP_XSTAT9_PRTX(offset) (CVMX_ADD_IO_SEG(0x00011800A0002048ull) + ((offset) & 63) * 80 - 80*40)
+
+union cvmx_pip_alt_skip_cfgx {
+ uint64_t u64;
+ struct cvmx_pip_alt_skip_cfgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_57_63:7;
+ uint64_t len:1;
+ uint64_t reserved_46_55:10;
+ uint64_t bit1:6;
+ uint64_t reserved_38_39:2;
+ uint64_t bit0:6;
+ uint64_t reserved_23_31:9;
+ uint64_t skip3:7;
+ uint64_t reserved_15_15:1;
+ uint64_t skip2:7;
+ uint64_t reserved_7_7:1;
+ uint64_t skip1:7;
+#else
+ uint64_t skip1:7;
+ uint64_t reserved_7_7:1;
+ uint64_t skip2:7;
+ uint64_t reserved_15_15:1;
+ uint64_t skip3:7;
+ uint64_t reserved_23_31:9;
+ uint64_t bit0:6;
+ uint64_t reserved_38_39:2;
+ uint64_t bit1:6;
+ uint64_t reserved_46_55:10;
+ uint64_t len:1;
+ uint64_t reserved_57_63:7;
+#endif
+ } s;
+};
+
+union cvmx_pip_bck_prs {
+ uint64_t u64;
+ struct cvmx_pip_bck_prs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bckprs:1;
+ uint64_t reserved_13_62:50;
+ uint64_t hiwater:5;
+ uint64_t reserved_5_7:3;
+ uint64_t lowater:5;
+#else
+ uint64_t lowater:5;
+ uint64_t reserved_5_7:3;
+ uint64_t hiwater:5;
+ uint64_t reserved_13_62:50;
+ uint64_t bckprs:1;
+#endif
+ } s;
+};
+
+union cvmx_pip_bist_status {
+ uint64_t u64;
+ struct cvmx_pip_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63:42;
+ uint64_t bist:22;
+#else
+ uint64_t bist:22;
+ uint64_t reserved_22_63:42;
+#endif
+ } s;
+ struct cvmx_pip_bist_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63:46;
+ uint64_t bist:18;
+#else
+ uint64_t bist:18;
+ uint64_t reserved_18_63:46;
+#endif
+ } cn30xx;
+ struct cvmx_pip_bist_status_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t bist:17;
+#else
+ uint64_t bist:17;
+ uint64_t reserved_17_63:47;
+#endif
+ } cn50xx;
+ struct cvmx_pip_bist_status_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t bist:20;
+#else
+ uint64_t bist:20;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn61xx;
+};
+
+union cvmx_pip_bsel_ext_cfgx {
+ uint64_t u64;
+ struct cvmx_pip_bsel_ext_cfgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63:8;
+ uint64_t upper_tag:16;
+ uint64_t tag:8;
+ uint64_t reserved_25_31:7;
+ uint64_t offset:9;
+ uint64_t reserved_7_15:9;
+ uint64_t skip:7;
+#else
+ uint64_t skip:7;
+ uint64_t reserved_7_15:9;
+ uint64_t offset:9;
+ uint64_t reserved_25_31:7;
+ uint64_t tag:8;
+ uint64_t upper_tag:16;
+ uint64_t reserved_56_63:8;
+#endif
+ } s;
+};
+
+union cvmx_pip_bsel_ext_posx {
+ uint64_t u64;
+ struct cvmx_pip_bsel_ext_posx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pos7_val:1;
+ uint64_t pos7:7;
+ uint64_t pos6_val:1;
+ uint64_t pos6:7;
+ uint64_t pos5_val:1;
+ uint64_t pos5:7;
+ uint64_t pos4_val:1;
+ uint64_t pos4:7;
+ uint64_t pos3_val:1;
+ uint64_t pos3:7;
+ uint64_t pos2_val:1;
+ uint64_t pos2:7;
+ uint64_t pos1_val:1;
+ uint64_t pos1:7;
+ uint64_t pos0_val:1;
+ uint64_t pos0:7;
+#else
+ uint64_t pos0:7;
+ uint64_t pos0_val:1;
+ uint64_t pos1:7;
+ uint64_t pos1_val:1;
+ uint64_t pos2:7;
+ uint64_t pos2_val:1;
+ uint64_t pos3:7;
+ uint64_t pos3_val:1;
+ uint64_t pos4:7;
+ uint64_t pos4_val:1;
+ uint64_t pos5:7;
+ uint64_t pos5_val:1;
+ uint64_t pos6:7;
+ uint64_t pos6_val:1;
+ uint64_t pos7:7;
+ uint64_t pos7_val:1;
+#endif
+ } s;
+};
+
+union cvmx_pip_bsel_tbl_entx {
+ uint64_t u64;
+ struct cvmx_pip_bsel_tbl_entx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t tag_en:1;
+ uint64_t grp_en:1;
+ uint64_t tt_en:1;
+ uint64_t qos_en:1;
+ uint64_t reserved_40_59:20;
+ uint64_t tag:8;
+ uint64_t reserved_22_31:10;
+ uint64_t grp:6;
+ uint64_t reserved_10_15:6;
+ uint64_t tt:2;
+ uint64_t reserved_3_7:5;
+ uint64_t qos:3;
+#else
+ uint64_t qos:3;
+ uint64_t reserved_3_7:5;
+ uint64_t tt:2;
+ uint64_t reserved_10_15:6;
+ uint64_t grp:6;
+ uint64_t reserved_22_31:10;
+ uint64_t tag:8;
+ uint64_t reserved_40_59:20;
+ uint64_t qos_en:1;
+ uint64_t tt_en:1;
+ uint64_t grp_en:1;
+ uint64_t tag_en:1;
+#endif
+ } s;
+ struct cvmx_pip_bsel_tbl_entx_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t tag_en:1;
+ uint64_t grp_en:1;
+ uint64_t tt_en:1;
+ uint64_t qos_en:1;
+ uint64_t reserved_40_59:20;
+ uint64_t tag:8;
+ uint64_t reserved_20_31:12;
+ uint64_t grp:4;
+ uint64_t reserved_10_15:6;
+ uint64_t tt:2;
+ uint64_t reserved_3_7:5;
+ uint64_t qos:3;
+#else
+ uint64_t qos:3;
+ uint64_t reserved_3_7:5;
+ uint64_t tt:2;
+ uint64_t reserved_10_15:6;
+ uint64_t grp:4;
+ uint64_t reserved_20_31:12;
+ uint64_t tag:8;
+ uint64_t reserved_40_59:20;
+ uint64_t qos_en:1;
+ uint64_t tt_en:1;
+ uint64_t grp_en:1;
+ uint64_t tag_en:1;
+#endif
+ } cn61xx;
+};
+
+union cvmx_pip_clken {
+ uint64_t u64;
+ struct cvmx_pip_clken_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t clken:1;
+#else
+ uint64_t clken:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_pip_crc_ctlx {
+ uint64_t u64;
+ struct cvmx_pip_crc_ctlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t invres:1;
+ uint64_t reflect:1;
+#else
+ uint64_t reflect:1;
+ uint64_t invres:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_pip_crc_ivx {
+ uint64_t u64;
+ struct cvmx_pip_crc_ivx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t iv:32;
+#else
+ uint64_t iv:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_dec_ipsecx {
+ uint64_t u64;
+ struct cvmx_pip_dec_ipsecx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63:46;
+ uint64_t tcp:1;
+ uint64_t udp:1;
+ uint64_t dprt:16;
+#else
+ uint64_t dprt:16;
+ uint64_t udp:1;
+ uint64_t tcp:1;
+ uint64_t reserved_18_63:46;
+#endif
+ } s;
+};
+
+union cvmx_pip_dsa_src_grp {
+ uint64_t u64;
+ struct cvmx_pip_dsa_src_grp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t map15:4;
+ uint64_t map14:4;
+ uint64_t map13:4;
+ uint64_t map12:4;
+ uint64_t map11:4;
+ uint64_t map10:4;
+ uint64_t map9:4;
+ uint64_t map8:4;
+ uint64_t map7:4;
+ uint64_t map6:4;
+ uint64_t map5:4;
+ uint64_t map4:4;
+ uint64_t map3:4;
+ uint64_t map2:4;
+ uint64_t map1:4;
+ uint64_t map0:4;
+#else
+ uint64_t map0:4;
+ uint64_t map1:4;
+ uint64_t map2:4;
+ uint64_t map3:4;
+ uint64_t map4:4;
+ uint64_t map5:4;
+ uint64_t map6:4;
+ uint64_t map7:4;
+ uint64_t map8:4;
+ uint64_t map9:4;
+ uint64_t map10:4;
+ uint64_t map11:4;
+ uint64_t map12:4;
+ uint64_t map13:4;
+ uint64_t map14:4;
+ uint64_t map15:4;
+#endif
+ } s;
+};
+
+union cvmx_pip_dsa_vid_grp {
+ uint64_t u64;
+ struct cvmx_pip_dsa_vid_grp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t map15:4;
+ uint64_t map14:4;
+ uint64_t map13:4;
+ uint64_t map12:4;
+ uint64_t map11:4;
+ uint64_t map10:4;
+ uint64_t map9:4;
+ uint64_t map8:4;
+ uint64_t map7:4;
+ uint64_t map6:4;
+ uint64_t map5:4;
+ uint64_t map4:4;
+ uint64_t map3:4;
+ uint64_t map2:4;
+ uint64_t map1:4;
+ uint64_t map0:4;
+#else
+ uint64_t map0:4;
+ uint64_t map1:4;
+ uint64_t map2:4;
+ uint64_t map3:4;
+ uint64_t map4:4;
+ uint64_t map5:4;
+ uint64_t map6:4;
+ uint64_t map7:4;
+ uint64_t map8:4;
+ uint64_t map9:4;
+ uint64_t map10:4;
+ uint64_t map11:4;
+ uint64_t map12:4;
+ uint64_t map13:4;
+ uint64_t map14:4;
+ uint64_t map15:4;
+#endif
+ } s;
+};
+
+union cvmx_pip_frm_len_chkx {
+ uint64_t u64;
+ struct cvmx_pip_frm_len_chkx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t maxlen:16;
+ uint64_t minlen:16;
+#else
+ uint64_t minlen:16;
+ uint64_t maxlen:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_gbl_cfg {
+ uint64_t u64;
+ struct cvmx_pip_gbl_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63:45;
+ uint64_t tag_syn:1;
+ uint64_t ip6_udp:1;
+ uint64_t max_l2:1;
+ uint64_t reserved_11_15:5;
+ uint64_t raw_shf:3;
+ uint64_t reserved_3_7:5;
+ uint64_t nip_shf:3;
+#else
+ uint64_t nip_shf:3;
+ uint64_t reserved_3_7:5;
+ uint64_t raw_shf:3;
+ uint64_t reserved_11_15:5;
+ uint64_t max_l2:1;
+ uint64_t ip6_udp:1;
+ uint64_t tag_syn:1;
+ uint64_t reserved_19_63:45;
+#endif
+ } s;
+};
+
+union cvmx_pip_gbl_ctl {
+ uint64_t u64;
+ struct cvmx_pip_gbl_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t egrp_dis:1;
+ uint64_t ihmsk_dis:1;
+ uint64_t dsa_grp_tvid:1;
+ uint64_t dsa_grp_scmd:1;
+ uint64_t dsa_grp_sid:1;
+ uint64_t reserved_21_23:3;
+ uint64_t ring_en:1;
+ uint64_t reserved_17_19:3;
+ uint64_t ignrs:1;
+ uint64_t vs_wqe:1;
+ uint64_t vs_qos:1;
+ uint64_t l2_mal:1;
+ uint64_t tcp_flag:1;
+ uint64_t l4_len:1;
+ uint64_t l4_chk:1;
+ uint64_t l4_prt:1;
+ uint64_t l4_mal:1;
+ uint64_t reserved_6_7:2;
+ uint64_t ip6_eext:2;
+ uint64_t ip4_opts:1;
+ uint64_t ip_hop:1;
+ uint64_t ip_mal:1;
+ uint64_t ip_chk:1;
+#else
+ uint64_t ip_chk:1;
+ uint64_t ip_mal:1;
+ uint64_t ip_hop:1;
+ uint64_t ip4_opts:1;
+ uint64_t ip6_eext:2;
+ uint64_t reserved_6_7:2;
+ uint64_t l4_mal:1;
+ uint64_t l4_prt:1;
+ uint64_t l4_chk:1;
+ uint64_t l4_len:1;
+ uint64_t tcp_flag:1;
+ uint64_t l2_mal:1;
+ uint64_t vs_qos:1;
+ uint64_t vs_wqe:1;
+ uint64_t ignrs:1;
+ uint64_t reserved_17_19:3;
+ uint64_t ring_en:1;
+ uint64_t reserved_21_23:3;
+ uint64_t dsa_grp_sid:1;
+ uint64_t dsa_grp_scmd:1;
+ uint64_t dsa_grp_tvid:1;
+ uint64_t ihmsk_dis:1;
+ uint64_t egrp_dis:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } s;
+ struct cvmx_pip_gbl_ctl_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t ignrs:1;
+ uint64_t vs_wqe:1;
+ uint64_t vs_qos:1;
+ uint64_t l2_mal:1;
+ uint64_t tcp_flag:1;
+ uint64_t l4_len:1;
+ uint64_t l4_chk:1;
+ uint64_t l4_prt:1;
+ uint64_t l4_mal:1;
+ uint64_t reserved_6_7:2;
+ uint64_t ip6_eext:2;
+ uint64_t ip4_opts:1;
+ uint64_t ip_hop:1;
+ uint64_t ip_mal:1;
+ uint64_t ip_chk:1;
+#else
+ uint64_t ip_chk:1;
+ uint64_t ip_mal:1;
+ uint64_t ip_hop:1;
+ uint64_t ip4_opts:1;
+ uint64_t ip6_eext:2;
+ uint64_t reserved_6_7:2;
+ uint64_t l4_mal:1;
+ uint64_t l4_prt:1;
+ uint64_t l4_chk:1;
+ uint64_t l4_len:1;
+ uint64_t tcp_flag:1;
+ uint64_t l2_mal:1;
+ uint64_t vs_qos:1;
+ uint64_t vs_wqe:1;
+ uint64_t ignrs:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } cn30xx;
+ struct cvmx_pip_gbl_ctl_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63:37;
+ uint64_t dsa_grp_tvid:1;
+ uint64_t dsa_grp_scmd:1;
+ uint64_t dsa_grp_sid:1;
+ uint64_t reserved_21_23:3;
+ uint64_t ring_en:1;
+ uint64_t reserved_17_19:3;
+ uint64_t ignrs:1;
+ uint64_t vs_wqe:1;
+ uint64_t vs_qos:1;
+ uint64_t l2_mal:1;
+ uint64_t tcp_flag:1;
+ uint64_t l4_len:1;
+ uint64_t l4_chk:1;
+ uint64_t l4_prt:1;
+ uint64_t l4_mal:1;
+ uint64_t reserved_6_7:2;
+ uint64_t ip6_eext:2;
+ uint64_t ip4_opts:1;
+ uint64_t ip_hop:1;
+ uint64_t ip_mal:1;
+ uint64_t ip_chk:1;
+#else
+ uint64_t ip_chk:1;
+ uint64_t ip_mal:1;
+ uint64_t ip_hop:1;
+ uint64_t ip4_opts:1;
+ uint64_t ip6_eext:2;
+ uint64_t reserved_6_7:2;
+ uint64_t l4_mal:1;
+ uint64_t l4_prt:1;
+ uint64_t l4_chk:1;
+ uint64_t l4_len:1;
+ uint64_t tcp_flag:1;
+ uint64_t l2_mal:1;
+ uint64_t vs_qos:1;
+ uint64_t vs_wqe:1;
+ uint64_t ignrs:1;
+ uint64_t reserved_17_19:3;
+ uint64_t ring_en:1;
+ uint64_t reserved_21_23:3;
+ uint64_t dsa_grp_sid:1;
+ uint64_t dsa_grp_scmd:1;
+ uint64_t dsa_grp_tvid:1;
+ uint64_t reserved_27_63:37;
+#endif
+ } cn52xx;
+ struct cvmx_pip_gbl_ctl_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63:43;
+ uint64_t ring_en:1;
+ uint64_t reserved_17_19:3;
+ uint64_t ignrs:1;
+ uint64_t vs_wqe:1;
+ uint64_t vs_qos:1;
+ uint64_t l2_mal:1;
+ uint64_t tcp_flag:1;
+ uint64_t l4_len:1;
+ uint64_t l4_chk:1;
+ uint64_t l4_prt:1;
+ uint64_t l4_mal:1;
+ uint64_t reserved_6_7:2;
+ uint64_t ip6_eext:2;
+ uint64_t ip4_opts:1;
+ uint64_t ip_hop:1;
+ uint64_t ip_mal:1;
+ uint64_t ip_chk:1;
+#else
+ uint64_t ip_chk:1;
+ uint64_t ip_mal:1;
+ uint64_t ip_hop:1;
+ uint64_t ip4_opts:1;
+ uint64_t ip6_eext:2;
+ uint64_t reserved_6_7:2;
+ uint64_t l4_mal:1;
+ uint64_t l4_prt:1;
+ uint64_t l4_chk:1;
+ uint64_t l4_len:1;
+ uint64_t tcp_flag:1;
+ uint64_t l2_mal:1;
+ uint64_t vs_qos:1;
+ uint64_t vs_wqe:1;
+ uint64_t ignrs:1;
+ uint64_t reserved_17_19:3;
+ uint64_t ring_en:1;
+ uint64_t reserved_21_63:43;
+#endif
+ } cn56xxp1;
+ struct cvmx_pip_gbl_ctl_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t ihmsk_dis:1;
+ uint64_t dsa_grp_tvid:1;
+ uint64_t dsa_grp_scmd:1;
+ uint64_t dsa_grp_sid:1;
+ uint64_t reserved_21_23:3;
+ uint64_t ring_en:1;
+ uint64_t reserved_17_19:3;
+ uint64_t ignrs:1;
+ uint64_t vs_wqe:1;
+ uint64_t vs_qos:1;
+ uint64_t l2_mal:1;
+ uint64_t tcp_flag:1;
+ uint64_t l4_len:1;
+ uint64_t l4_chk:1;
+ uint64_t l4_prt:1;
+ uint64_t l4_mal:1;
+ uint64_t reserved_6_7:2;
+ uint64_t ip6_eext:2;
+ uint64_t ip4_opts:1;
+ uint64_t ip_hop:1;
+ uint64_t ip_mal:1;
+ uint64_t ip_chk:1;
+#else
+ uint64_t ip_chk:1;
+ uint64_t ip_mal:1;
+ uint64_t ip_hop:1;
+ uint64_t ip4_opts:1;
+ uint64_t ip6_eext:2;
+ uint64_t reserved_6_7:2;
+ uint64_t l4_mal:1;
+ uint64_t l4_prt:1;
+ uint64_t l4_chk:1;
+ uint64_t l4_len:1;
+ uint64_t tcp_flag:1;
+ uint64_t l2_mal:1;
+ uint64_t vs_qos:1;
+ uint64_t vs_wqe:1;
+ uint64_t ignrs:1;
+ uint64_t reserved_17_19:3;
+ uint64_t ring_en:1;
+ uint64_t reserved_21_23:3;
+ uint64_t dsa_grp_sid:1;
+ uint64_t dsa_grp_scmd:1;
+ uint64_t dsa_grp_tvid:1;
+ uint64_t ihmsk_dis:1;
+ uint64_t reserved_28_63:36;
+#endif
+ } cn61xx;
+ struct cvmx_pip_gbl_ctl_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t egrp_dis:1;
+ uint64_t ihmsk_dis:1;
+ uint64_t dsa_grp_tvid:1;
+ uint64_t dsa_grp_scmd:1;
+ uint64_t dsa_grp_sid:1;
+ uint64_t reserved_17_23:7;
+ uint64_t ignrs:1;
+ uint64_t vs_wqe:1;
+ uint64_t vs_qos:1;
+ uint64_t l2_mal:1;
+ uint64_t tcp_flag:1;
+ uint64_t l4_len:1;
+ uint64_t l4_chk:1;
+ uint64_t l4_prt:1;
+ uint64_t l4_mal:1;
+ uint64_t reserved_6_7:2;
+ uint64_t ip6_eext:2;
+ uint64_t ip4_opts:1;
+ uint64_t ip_hop:1;
+ uint64_t ip_mal:1;
+ uint64_t ip_chk:1;
+#else
+ uint64_t ip_chk:1;
+ uint64_t ip_mal:1;
+ uint64_t ip_hop:1;
+ uint64_t ip4_opts:1;
+ uint64_t ip6_eext:2;
+ uint64_t reserved_6_7:2;
+ uint64_t l4_mal:1;
+ uint64_t l4_prt:1;
+ uint64_t l4_chk:1;
+ uint64_t l4_len:1;
+ uint64_t tcp_flag:1;
+ uint64_t l2_mal:1;
+ uint64_t vs_qos:1;
+ uint64_t vs_wqe:1;
+ uint64_t ignrs:1;
+ uint64_t reserved_17_23:7;
+ uint64_t dsa_grp_sid:1;
+ uint64_t dsa_grp_scmd:1;
+ uint64_t dsa_grp_tvid:1;
+ uint64_t ihmsk_dis:1;
+ uint64_t egrp_dis:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn68xx;
+ struct cvmx_pip_gbl_ctl_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t ihmsk_dis:1;
+ uint64_t dsa_grp_tvid:1;
+ uint64_t dsa_grp_scmd:1;
+ uint64_t dsa_grp_sid:1;
+ uint64_t reserved_17_23:7;
+ uint64_t ignrs:1;
+ uint64_t vs_wqe:1;
+ uint64_t vs_qos:1;
+ uint64_t l2_mal:1;
+ uint64_t tcp_flag:1;
+ uint64_t l4_len:1;
+ uint64_t l4_chk:1;
+ uint64_t l4_prt:1;
+ uint64_t l4_mal:1;
+ uint64_t reserved_6_7:2;
+ uint64_t ip6_eext:2;
+ uint64_t ip4_opts:1;
+ uint64_t ip_hop:1;
+ uint64_t ip_mal:1;
+ uint64_t ip_chk:1;
+#else
+ uint64_t ip_chk:1;
+ uint64_t ip_mal:1;
+ uint64_t ip_hop:1;
+ uint64_t ip4_opts:1;
+ uint64_t ip6_eext:2;
+ uint64_t reserved_6_7:2;
+ uint64_t l4_mal:1;
+ uint64_t l4_prt:1;
+ uint64_t l4_chk:1;
+ uint64_t l4_len:1;
+ uint64_t tcp_flag:1;
+ uint64_t l2_mal:1;
+ uint64_t vs_qos:1;
+ uint64_t vs_wqe:1;
+ uint64_t ignrs:1;
+ uint64_t reserved_17_23:7;
+ uint64_t dsa_grp_sid:1;
+ uint64_t dsa_grp_scmd:1;
+ uint64_t dsa_grp_tvid:1;
+ uint64_t ihmsk_dis:1;
+ uint64_t reserved_28_63:36;
+#endif
+ } cn68xxp1;
+};
+
+union cvmx_pip_hg_pri_qos {
+ uint64_t u64;
+ struct cvmx_pip_hg_pri_qos_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t up_qos:1;
+ uint64_t reserved_11_11:1;
+ uint64_t qos:3;
+ uint64_t reserved_6_7:2;
+ uint64_t pri:6;
+#else
+ uint64_t pri:6;
+ uint64_t reserved_6_7:2;
+ uint64_t qos:3;
+ uint64_t reserved_11_11:1;
+ uint64_t up_qos:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+};
+
+union cvmx_pip_int_en {
+ uint64_t u64;
+ struct cvmx_pip_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t punyerr:1;
+ uint64_t lenerr:1;
+ uint64_t maxerr:1;
+ uint64_t minerr:1;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t crcerr:1;
+ uint64_t pktdrp:1;
+#else
+ uint64_t pktdrp:1;
+ uint64_t crcerr:1;
+ uint64_t bckprs:1;
+ uint64_t prtnxa:1;
+ uint64_t badtag:1;
+ uint64_t skprunt:1;
+ uint64_t todoovr:1;
+ uint64_t feperr:1;
+ uint64_t beperr:1;
+ uint64_t minerr:1;
+ uint64_t maxerr:1;
+ uint64_t lenerr:1;
+ uint64_t punyerr:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+ struct cvmx_pip_int_en_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t crcerr:1;
+ uint64_t pktdrp:1;
+#else
+ uint64_t pktdrp:1;
+ uint64_t crcerr:1;
+ uint64_t bckprs:1;
+ uint64_t prtnxa:1;
+ uint64_t badtag:1;
+ uint64_t skprunt:1;
+ uint64_t todoovr:1;
+ uint64_t feperr:1;
+ uint64_t beperr:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn30xx;
+ struct cvmx_pip_int_en_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t lenerr:1;
+ uint64_t maxerr:1;
+ uint64_t minerr:1;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t reserved_1_1:1;
+ uint64_t pktdrp:1;
+#else
+ uint64_t pktdrp:1;
+ uint64_t reserved_1_1:1;
+ uint64_t bckprs:1;
+ uint64_t prtnxa:1;
+ uint64_t badtag:1;
+ uint64_t skprunt:1;
+ uint64_t todoovr:1;
+ uint64_t feperr:1;
+ uint64_t beperr:1;
+ uint64_t minerr:1;
+ uint64_t maxerr:1;
+ uint64_t lenerr:1;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn50xx;
+ struct cvmx_pip_int_en_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t punyerr:1;
+ uint64_t lenerr:1;
+ uint64_t maxerr:1;
+ uint64_t minerr:1;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t reserved_1_1:1;
+ uint64_t pktdrp:1;
+#else
+ uint64_t pktdrp:1;
+ uint64_t reserved_1_1:1;
+ uint64_t bckprs:1;
+ uint64_t prtnxa:1;
+ uint64_t badtag:1;
+ uint64_t skprunt:1;
+ uint64_t todoovr:1;
+ uint64_t feperr:1;
+ uint64_t beperr:1;
+ uint64_t minerr:1;
+ uint64_t maxerr:1;
+ uint64_t lenerr:1;
+ uint64_t punyerr:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } cn52xx;
+ struct cvmx_pip_int_en_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t lenerr:1;
+ uint64_t maxerr:1;
+ uint64_t minerr:1;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t crcerr:1;
+ uint64_t pktdrp:1;
+#else
+ uint64_t pktdrp:1;
+ uint64_t crcerr:1;
+ uint64_t bckprs:1;
+ uint64_t prtnxa:1;
+ uint64_t badtag:1;
+ uint64_t skprunt:1;
+ uint64_t todoovr:1;
+ uint64_t feperr:1;
+ uint64_t beperr:1;
+ uint64_t minerr:1;
+ uint64_t maxerr:1;
+ uint64_t lenerr:1;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn56xxp1;
+ struct cvmx_pip_int_en_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t punyerr:1;
+ uint64_t reserved_9_11:3;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t crcerr:1;
+ uint64_t pktdrp:1;
+#else
+ uint64_t pktdrp:1;
+ uint64_t crcerr:1;
+ uint64_t bckprs:1;
+ uint64_t prtnxa:1;
+ uint64_t badtag:1;
+ uint64_t skprunt:1;
+ uint64_t todoovr:1;
+ uint64_t feperr:1;
+ uint64_t beperr:1;
+ uint64_t reserved_9_11:3;
+ uint64_t punyerr:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } cn58xx;
+};
+
+union cvmx_pip_int_reg {
+ uint64_t u64;
+ struct cvmx_pip_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t punyerr:1;
+ uint64_t lenerr:1;
+ uint64_t maxerr:1;
+ uint64_t minerr:1;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t crcerr:1;
+ uint64_t pktdrp:1;
+#else
+ uint64_t pktdrp:1;
+ uint64_t crcerr:1;
+ uint64_t bckprs:1;
+ uint64_t prtnxa:1;
+ uint64_t badtag:1;
+ uint64_t skprunt:1;
+ uint64_t todoovr:1;
+ uint64_t feperr:1;
+ uint64_t beperr:1;
+ uint64_t minerr:1;
+ uint64_t maxerr:1;
+ uint64_t lenerr:1;
+ uint64_t punyerr:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+ struct cvmx_pip_int_reg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t crcerr:1;
+ uint64_t pktdrp:1;
+#else
+ uint64_t pktdrp:1;
+ uint64_t crcerr:1;
+ uint64_t bckprs:1;
+ uint64_t prtnxa:1;
+ uint64_t badtag:1;
+ uint64_t skprunt:1;
+ uint64_t todoovr:1;
+ uint64_t feperr:1;
+ uint64_t beperr:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn30xx;
+ struct cvmx_pip_int_reg_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t lenerr:1;
+ uint64_t maxerr:1;
+ uint64_t minerr:1;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t reserved_1_1:1;
+ uint64_t pktdrp:1;
+#else
+ uint64_t pktdrp:1;
+ uint64_t reserved_1_1:1;
+ uint64_t bckprs:1;
+ uint64_t prtnxa:1;
+ uint64_t badtag:1;
+ uint64_t skprunt:1;
+ uint64_t todoovr:1;
+ uint64_t feperr:1;
+ uint64_t beperr:1;
+ uint64_t minerr:1;
+ uint64_t maxerr:1;
+ uint64_t lenerr:1;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn50xx;
+ struct cvmx_pip_int_reg_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t punyerr:1;
+ uint64_t lenerr:1;
+ uint64_t maxerr:1;
+ uint64_t minerr:1;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t reserved_1_1:1;
+ uint64_t pktdrp:1;
+#else
+ uint64_t pktdrp:1;
+ uint64_t reserved_1_1:1;
+ uint64_t bckprs:1;
+ uint64_t prtnxa:1;
+ uint64_t badtag:1;
+ uint64_t skprunt:1;
+ uint64_t todoovr:1;
+ uint64_t feperr:1;
+ uint64_t beperr:1;
+ uint64_t minerr:1;
+ uint64_t maxerr:1;
+ uint64_t lenerr:1;
+ uint64_t punyerr:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } cn52xx;
+ struct cvmx_pip_int_reg_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t lenerr:1;
+ uint64_t maxerr:1;
+ uint64_t minerr:1;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t crcerr:1;
+ uint64_t pktdrp:1;
+#else
+ uint64_t pktdrp:1;
+ uint64_t crcerr:1;
+ uint64_t bckprs:1;
+ uint64_t prtnxa:1;
+ uint64_t badtag:1;
+ uint64_t skprunt:1;
+ uint64_t todoovr:1;
+ uint64_t feperr:1;
+ uint64_t beperr:1;
+ uint64_t minerr:1;
+ uint64_t maxerr:1;
+ uint64_t lenerr:1;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn56xxp1;
+ struct cvmx_pip_int_reg_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t punyerr:1;
+ uint64_t reserved_9_11:3;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t crcerr:1;
+ uint64_t pktdrp:1;
+#else
+ uint64_t pktdrp:1;
+ uint64_t crcerr:1;
+ uint64_t bckprs:1;
+ uint64_t prtnxa:1;
+ uint64_t badtag:1;
+ uint64_t skprunt:1;
+ uint64_t todoovr:1;
+ uint64_t feperr:1;
+ uint64_t beperr:1;
+ uint64_t reserved_9_11:3;
+ uint64_t punyerr:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } cn58xx;
+};
+
+union cvmx_pip_ip_offset {
+ uint64_t u64;
+ struct cvmx_pip_ip_offset_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t offset:3;
+#else
+ uint64_t offset:3;
+ uint64_t reserved_3_63:61;
+#endif
+ } s;
+};
+
+union cvmx_pip_pri_tblx {
+ uint64_t u64;
+ struct cvmx_pip_pri_tblx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t diff2_padd:8;
+ uint64_t hg2_padd:8;
+ uint64_t vlan2_padd:8;
+ uint64_t reserved_38_39:2;
+ uint64_t diff2_bpid:6;
+ uint64_t reserved_30_31:2;
+ uint64_t hg2_bpid:6;
+ uint64_t reserved_22_23:2;
+ uint64_t vlan2_bpid:6;
+ uint64_t reserved_11_15:5;
+ uint64_t diff2_qos:3;
+ uint64_t reserved_7_7:1;
+ uint64_t hg2_qos:3;
+ uint64_t reserved_3_3:1;
+ uint64_t vlan2_qos:3;
+#else
+ uint64_t vlan2_qos:3;
+ uint64_t reserved_3_3:1;
+ uint64_t hg2_qos:3;
+ uint64_t reserved_7_7:1;
+ uint64_t diff2_qos:3;
+ uint64_t reserved_11_15:5;
+ uint64_t vlan2_bpid:6;
+ uint64_t reserved_22_23:2;
+ uint64_t hg2_bpid:6;
+ uint64_t reserved_30_31:2;
+ uint64_t diff2_bpid:6;
+ uint64_t reserved_38_39:2;
+ uint64_t vlan2_padd:8;
+ uint64_t hg2_padd:8;
+ uint64_t diff2_padd:8;
+#endif
+ } s;
+};
+
+union cvmx_pip_prt_cfgx {
+ uint64_t u64;
+ struct cvmx_pip_prt_cfgx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_55_63:9;
+ uint64_t ih_pri:1;
+ uint64_t len_chk_sel:1;
+ uint64_t pad_len:1;
+ uint64_t vlan_len:1;
+ uint64_t lenerr_en:1;
+ uint64_t maxerr_en:1;
+ uint64_t minerr_en:1;
+ uint64_t grp_wat_47:4;
+ uint64_t qos_wat_47:4;
+ uint64_t reserved_37_39:3;
+ uint64_t rawdrp:1;
+ uint64_t tag_inc:2;
+ uint64_t dyn_rs:1;
+ uint64_t inst_hdr:1;
+ uint64_t grp_wat:4;
+ uint64_t hg_qos:1;
+ uint64_t qos:3;
+ uint64_t qos_wat:4;
+ uint64_t qos_vsel:1;
+ uint64_t qos_vod:1;
+ uint64_t qos_diff:1;
+ uint64_t qos_vlan:1;
+ uint64_t reserved_13_15:3;
+ uint64_t crc_en:1;
+ uint64_t higig_en:1;
+ uint64_t dsa_en:1;
+ uint64_t mode:2;
+ uint64_t reserved_7_7:1;
+ uint64_t skip:7;
+#else
+ uint64_t skip:7;
+ uint64_t reserved_7_7:1;
+ uint64_t mode:2;
+ uint64_t dsa_en:1;
+ uint64_t higig_en:1;
+ uint64_t crc_en:1;
+ uint64_t reserved_13_15:3;
+ uint64_t qos_vlan:1;
+ uint64_t qos_diff:1;
+ uint64_t qos_vod:1;
+ uint64_t qos_vsel:1;
+ uint64_t qos_wat:4;
+ uint64_t qos:3;
+ uint64_t hg_qos:1;
+ uint64_t grp_wat:4;
+ uint64_t inst_hdr:1;
+ uint64_t dyn_rs:1;
+ uint64_t tag_inc:2;
+ uint64_t rawdrp:1;
+ uint64_t reserved_37_39:3;
+ uint64_t qos_wat_47:4;
+ uint64_t grp_wat_47:4;
+ uint64_t minerr_en:1;
+ uint64_t maxerr_en:1;
+ uint64_t lenerr_en:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t len_chk_sel:1;
+ uint64_t ih_pri:1;
+ uint64_t reserved_55_63:9;
+#endif
+ } s;
+ struct cvmx_pip_prt_cfgx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63:27;
+ uint64_t rawdrp:1;
+ uint64_t tag_inc:2;
+ uint64_t dyn_rs:1;
+ uint64_t inst_hdr:1;
+ uint64_t grp_wat:4;
+ uint64_t reserved_27_27:1;
+ uint64_t qos:3;
+ uint64_t qos_wat:4;
+ uint64_t reserved_18_19:2;
+ uint64_t qos_diff:1;
+ uint64_t qos_vlan:1;
+ uint64_t reserved_10_15:6;
+ uint64_t mode:2;
+ uint64_t reserved_7_7:1;
+ uint64_t skip:7;
+#else
+ uint64_t skip:7;
+ uint64_t reserved_7_7:1;
+ uint64_t mode:2;
+ uint64_t reserved_10_15:6;
+ uint64_t qos_vlan:1;
+ uint64_t qos_diff:1;
+ uint64_t reserved_18_19:2;
+ uint64_t qos_wat:4;
+ uint64_t qos:3;
+ uint64_t reserved_27_27:1;
+ uint64_t grp_wat:4;
+ uint64_t inst_hdr:1;
+ uint64_t dyn_rs:1;
+ uint64_t tag_inc:2;
+ uint64_t rawdrp:1;
+ uint64_t reserved_37_63:27;
+#endif
+ } cn30xx;
+ struct cvmx_pip_prt_cfgx_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63:27;
+ uint64_t rawdrp:1;
+ uint64_t tag_inc:2;
+ uint64_t dyn_rs:1;
+ uint64_t inst_hdr:1;
+ uint64_t grp_wat:4;
+ uint64_t reserved_27_27:1;
+ uint64_t qos:3;
+ uint64_t qos_wat:4;
+ uint64_t reserved_18_19:2;
+ uint64_t qos_diff:1;
+ uint64_t qos_vlan:1;
+ uint64_t reserved_13_15:3;
+ uint64_t crc_en:1;
+ uint64_t reserved_10_11:2;
+ uint64_t mode:2;
+ uint64_t reserved_7_7:1;
+ uint64_t skip:7;
+#else
+ uint64_t skip:7;
+ uint64_t reserved_7_7:1;
+ uint64_t mode:2;
+ uint64_t reserved_10_11:2;
+ uint64_t crc_en:1;
+ uint64_t reserved_13_15:3;
+ uint64_t qos_vlan:1;
+ uint64_t qos_diff:1;
+ uint64_t reserved_18_19:2;
+ uint64_t qos_wat:4;
+ uint64_t qos:3;
+ uint64_t reserved_27_27:1;
+ uint64_t grp_wat:4;
+ uint64_t inst_hdr:1;
+ uint64_t dyn_rs:1;
+ uint64_t tag_inc:2;
+ uint64_t rawdrp:1;
+ uint64_t reserved_37_63:27;
+#endif
+ } cn38xx;
+ struct cvmx_pip_prt_cfgx_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_53_63:11;
+ uint64_t pad_len:1;
+ uint64_t vlan_len:1;
+ uint64_t lenerr_en:1;
+ uint64_t maxerr_en:1;
+ uint64_t minerr_en:1;
+ uint64_t grp_wat_47:4;
+ uint64_t qos_wat_47:4;
+ uint64_t reserved_37_39:3;
+ uint64_t rawdrp:1;
+ uint64_t tag_inc:2;
+ uint64_t dyn_rs:1;
+ uint64_t inst_hdr:1;
+ uint64_t grp_wat:4;
+ uint64_t reserved_27_27:1;
+ uint64_t qos:3;
+ uint64_t qos_wat:4;
+ uint64_t reserved_19_19:1;
+ uint64_t qos_vod:1;
+ uint64_t qos_diff:1;
+ uint64_t qos_vlan:1;
+ uint64_t reserved_13_15:3;
+ uint64_t crc_en:1;
+ uint64_t reserved_10_11:2;
+ uint64_t mode:2;
+ uint64_t reserved_7_7:1;
+ uint64_t skip:7;
+#else
+ uint64_t skip:7;
+ uint64_t reserved_7_7:1;
+ uint64_t mode:2;
+ uint64_t reserved_10_11:2;
+ uint64_t crc_en:1;
+ uint64_t reserved_13_15:3;
+ uint64_t qos_vlan:1;
+ uint64_t qos_diff:1;
+ uint64_t qos_vod:1;
+ uint64_t reserved_19_19:1;
+ uint64_t qos_wat:4;
+ uint64_t qos:3;
+ uint64_t reserved_27_27:1;
+ uint64_t grp_wat:4;
+ uint64_t inst_hdr:1;
+ uint64_t dyn_rs:1;
+ uint64_t tag_inc:2;
+ uint64_t rawdrp:1;
+ uint64_t reserved_37_39:3;
+ uint64_t qos_wat_47:4;
+ uint64_t grp_wat_47:4;
+ uint64_t minerr_en:1;
+ uint64_t maxerr_en:1;
+ uint64_t lenerr_en:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t reserved_53_63:11;
+#endif
+ } cn50xx;
+ struct cvmx_pip_prt_cfgx_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_53_63:11;
+ uint64_t pad_len:1;
+ uint64_t vlan_len:1;
+ uint64_t lenerr_en:1;
+ uint64_t maxerr_en:1;
+ uint64_t minerr_en:1;
+ uint64_t grp_wat_47:4;
+ uint64_t qos_wat_47:4;
+ uint64_t reserved_37_39:3;
+ uint64_t rawdrp:1;
+ uint64_t tag_inc:2;
+ uint64_t dyn_rs:1;
+ uint64_t inst_hdr:1;
+ uint64_t grp_wat:4;
+ uint64_t hg_qos:1;
+ uint64_t qos:3;
+ uint64_t qos_wat:4;
+ uint64_t qos_vsel:1;
+ uint64_t qos_vod:1;
+ uint64_t qos_diff:1;
+ uint64_t qos_vlan:1;
+ uint64_t reserved_13_15:3;
+ uint64_t crc_en:1;
+ uint64_t higig_en:1;
+ uint64_t dsa_en:1;
+ uint64_t mode:2;
+ uint64_t reserved_7_7:1;
+ uint64_t skip:7;
+#else
+ uint64_t skip:7;
+ uint64_t reserved_7_7:1;
+ uint64_t mode:2;
+ uint64_t dsa_en:1;
+ uint64_t higig_en:1;
+ uint64_t crc_en:1;
+ uint64_t reserved_13_15:3;
+ uint64_t qos_vlan:1;
+ uint64_t qos_diff:1;
+ uint64_t qos_vod:1;
+ uint64_t qos_vsel:1;
+ uint64_t qos_wat:4;
+ uint64_t qos:3;
+ uint64_t hg_qos:1;
+ uint64_t grp_wat:4;
+ uint64_t inst_hdr:1;
+ uint64_t dyn_rs:1;
+ uint64_t tag_inc:2;
+ uint64_t rawdrp:1;
+ uint64_t reserved_37_39:3;
+ uint64_t qos_wat_47:4;
+ uint64_t grp_wat_47:4;
+ uint64_t minerr_en:1;
+ uint64_t maxerr_en:1;
+ uint64_t lenerr_en:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t reserved_53_63:11;
+#endif
+ } cn52xx;
+ struct cvmx_pip_prt_cfgx_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63:27;
+ uint64_t rawdrp:1;
+ uint64_t tag_inc:2;
+ uint64_t dyn_rs:1;
+ uint64_t inst_hdr:1;
+ uint64_t grp_wat:4;
+ uint64_t reserved_27_27:1;
+ uint64_t qos:3;
+ uint64_t qos_wat:4;
+ uint64_t reserved_19_19:1;
+ uint64_t qos_vod:1;
+ uint64_t qos_diff:1;
+ uint64_t qos_vlan:1;
+ uint64_t reserved_13_15:3;
+ uint64_t crc_en:1;
+ uint64_t reserved_10_11:2;
+ uint64_t mode:2;
+ uint64_t reserved_7_7:1;
+ uint64_t skip:7;
+#else
+ uint64_t skip:7;
+ uint64_t reserved_7_7:1;
+ uint64_t mode:2;
+ uint64_t reserved_10_11:2;
+ uint64_t crc_en:1;
+ uint64_t reserved_13_15:3;
+ uint64_t qos_vlan:1;
+ uint64_t qos_diff:1;
+ uint64_t qos_vod:1;
+ uint64_t reserved_19_19:1;
+ uint64_t qos_wat:4;
+ uint64_t qos:3;
+ uint64_t reserved_27_27:1;
+ uint64_t grp_wat:4;
+ uint64_t inst_hdr:1;
+ uint64_t dyn_rs:1;
+ uint64_t tag_inc:2;
+ uint64_t rawdrp:1;
+ uint64_t reserved_37_63:27;
+#endif
+ } cn58xx;
+ struct cvmx_pip_prt_cfgx_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_55_63:9;
+ uint64_t ih_pri:1;
+ uint64_t len_chk_sel:1;
+ uint64_t pad_len:1;
+ uint64_t vlan_len:1;
+ uint64_t lenerr_en:1;
+ uint64_t maxerr_en:1;
+ uint64_t minerr_en:1;
+ uint64_t grp_wat_47:4;
+ uint64_t qos_wat_47:4;
+ uint64_t reserved_37_39:3;
+ uint64_t rawdrp:1;
+ uint64_t tag_inc:2;
+ uint64_t dyn_rs:1;
+ uint64_t inst_hdr:1;
+ uint64_t grp_wat:4;
+ uint64_t hg_qos:1;
+ uint64_t qos:3;
+ uint64_t qos_wat:4;
+ uint64_t reserved_19_19:1;
+ uint64_t qos_vod:1;
+ uint64_t qos_diff:1;
+ uint64_t qos_vlan:1;
+ uint64_t reserved_13_15:3;
+ uint64_t crc_en:1;
+ uint64_t higig_en:1;
+ uint64_t dsa_en:1;
+ uint64_t mode:2;
+ uint64_t reserved_7_7:1;
+ uint64_t skip:7;
+#else
+ uint64_t skip:7;
+ uint64_t reserved_7_7:1;
+ uint64_t mode:2;
+ uint64_t dsa_en:1;
+ uint64_t higig_en:1;
+ uint64_t crc_en:1;
+ uint64_t reserved_13_15:3;
+ uint64_t qos_vlan:1;
+ uint64_t qos_diff:1;
+ uint64_t qos_vod:1;
+ uint64_t reserved_19_19:1;
+ uint64_t qos_wat:4;
+ uint64_t qos:3;
+ uint64_t hg_qos:1;
+ uint64_t grp_wat:4;
+ uint64_t inst_hdr:1;
+ uint64_t dyn_rs:1;
+ uint64_t tag_inc:2;
+ uint64_t rawdrp:1;
+ uint64_t reserved_37_39:3;
+ uint64_t qos_wat_47:4;
+ uint64_t grp_wat_47:4;
+ uint64_t minerr_en:1;
+ uint64_t maxerr_en:1;
+ uint64_t lenerr_en:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t len_chk_sel:1;
+ uint64_t ih_pri:1;
+ uint64_t reserved_55_63:9;
+#endif
+ } cn68xx;
+};
+
+union cvmx_pip_prt_cfgbx {
+ uint64_t u64;
+ struct cvmx_pip_prt_cfgbx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63:25;
+ uint64_t alt_skp_sel:2;
+ uint64_t alt_skp_en:1;
+ uint64_t reserved_35_35:1;
+ uint64_t bsel_num:2;
+ uint64_t bsel_en:1;
+ uint64_t reserved_24_31:8;
+ uint64_t base:8;
+ uint64_t reserved_6_15:10;
+ uint64_t bpid:6;
+#else
+ uint64_t bpid:6;
+ uint64_t reserved_6_15:10;
+ uint64_t base:8;
+ uint64_t reserved_24_31:8;
+ uint64_t bsel_en:1;
+ uint64_t bsel_num:2;
+ uint64_t reserved_35_35:1;
+ uint64_t alt_skp_en:1;
+ uint64_t alt_skp_sel:2;
+ uint64_t reserved_39_63:25;
+#endif
+ } s;
+ struct cvmx_pip_prt_cfgbx_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63:25;
+ uint64_t alt_skp_sel:2;
+ uint64_t alt_skp_en:1;
+ uint64_t reserved_35_35:1;
+ uint64_t bsel_num:2;
+ uint64_t bsel_en:1;
+ uint64_t reserved_0_31:32;
+#else
+ uint64_t reserved_0_31:32;
+ uint64_t bsel_en:1;
+ uint64_t bsel_num:2;
+ uint64_t reserved_35_35:1;
+ uint64_t alt_skp_en:1;
+ uint64_t alt_skp_sel:2;
+ uint64_t reserved_39_63:25;
+#endif
+ } cn61xx;
+ struct cvmx_pip_prt_cfgbx_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63:25;
+ uint64_t alt_skp_sel:2;
+ uint64_t alt_skp_en:1;
+ uint64_t reserved_0_35:36;
+#else
+ uint64_t reserved_0_35:36;
+ uint64_t alt_skp_en:1;
+ uint64_t alt_skp_sel:2;
+ uint64_t reserved_39_63:25;
+#endif
+ } cn66xx;
+ struct cvmx_pip_prt_cfgbx_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t base:8;
+ uint64_t reserved_6_15:10;
+ uint64_t bpid:6;
+#else
+ uint64_t bpid:6;
+ uint64_t reserved_6_15:10;
+ uint64_t base:8;
+ uint64_t reserved_24_63:40;
+#endif
+ } cn68xxp1;
+};
+
+union cvmx_pip_prt_tagx {
+ uint64_t u64;
+ struct cvmx_pip_prt_tagx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63:10;
+ uint64_t portadd_en:1;
+ uint64_t inc_hwchk:1;
+ uint64_t reserved_50_51:2;
+ uint64_t grptagbase_msb:2;
+ uint64_t reserved_46_47:2;
+ uint64_t grptagmask_msb:2;
+ uint64_t reserved_42_43:2;
+ uint64_t grp_msb:2;
+ uint64_t grptagbase:4;
+ uint64_t grptagmask:4;
+ uint64_t grptag:1;
+ uint64_t grptag_mskip:1;
+ uint64_t tag_mode:2;
+ uint64_t inc_vs:2;
+ uint64_t inc_vlan:1;
+ uint64_t inc_prt_flag:1;
+ uint64_t ip6_dprt_flag:1;
+ uint64_t ip4_dprt_flag:1;
+ uint64_t ip6_sprt_flag:1;
+ uint64_t ip4_sprt_flag:1;
+ uint64_t ip6_nxth_flag:1;
+ uint64_t ip4_pctl_flag:1;
+ uint64_t ip6_dst_flag:1;
+ uint64_t ip4_dst_flag:1;
+ uint64_t ip6_src_flag:1;
+ uint64_t ip4_src_flag:1;
+ uint64_t tcp6_tag_type:2;
+ uint64_t tcp4_tag_type:2;
+ uint64_t ip6_tag_type:2;
+ uint64_t ip4_tag_type:2;
+ uint64_t non_tag_type:2;
+ uint64_t grp:4;
+#else
+ uint64_t grp:4;
+ uint64_t non_tag_type:2;
+ uint64_t ip4_tag_type:2;
+ uint64_t ip6_tag_type:2;
+ uint64_t tcp4_tag_type:2;
+ uint64_t tcp6_tag_type:2;
+ uint64_t ip4_src_flag:1;
+ uint64_t ip6_src_flag:1;
+ uint64_t ip4_dst_flag:1;
+ uint64_t ip6_dst_flag:1;
+ uint64_t ip4_pctl_flag:1;
+ uint64_t ip6_nxth_flag:1;
+ uint64_t ip4_sprt_flag:1;
+ uint64_t ip6_sprt_flag:1;
+ uint64_t ip4_dprt_flag:1;
+ uint64_t ip6_dprt_flag:1;
+ uint64_t inc_prt_flag:1;
+ uint64_t inc_vlan:1;
+ uint64_t inc_vs:2;
+ uint64_t tag_mode:2;
+ uint64_t grptag_mskip:1;
+ uint64_t grptag:1;
+ uint64_t grptagmask:4;
+ uint64_t grptagbase:4;
+ uint64_t grp_msb:2;
+ uint64_t reserved_42_43:2;
+ uint64_t grptagmask_msb:2;
+ uint64_t reserved_46_47:2;
+ uint64_t grptagbase_msb:2;
+ uint64_t reserved_50_51:2;
+ uint64_t inc_hwchk:1;
+ uint64_t portadd_en:1;
+ uint64_t reserved_54_63:10;
+#endif
+ } s;
+ struct cvmx_pip_prt_tagx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t grptagbase:4;
+ uint64_t grptagmask:4;
+ uint64_t grptag:1;
+ uint64_t reserved_30_30:1;
+ uint64_t tag_mode:2;
+ uint64_t inc_vs:2;
+ uint64_t inc_vlan:1;
+ uint64_t inc_prt_flag:1;
+ uint64_t ip6_dprt_flag:1;
+ uint64_t ip4_dprt_flag:1;
+ uint64_t ip6_sprt_flag:1;
+ uint64_t ip4_sprt_flag:1;
+ uint64_t ip6_nxth_flag:1;
+ uint64_t ip4_pctl_flag:1;
+ uint64_t ip6_dst_flag:1;
+ uint64_t ip4_dst_flag:1;
+ uint64_t ip6_src_flag:1;
+ uint64_t ip4_src_flag:1;
+ uint64_t tcp6_tag_type:2;
+ uint64_t tcp4_tag_type:2;
+ uint64_t ip6_tag_type:2;
+ uint64_t ip4_tag_type:2;
+ uint64_t non_tag_type:2;
+ uint64_t grp:4;
+#else
+ uint64_t grp:4;
+ uint64_t non_tag_type:2;
+ uint64_t ip4_tag_type:2;
+ uint64_t ip6_tag_type:2;
+ uint64_t tcp4_tag_type:2;
+ uint64_t tcp6_tag_type:2;
+ uint64_t ip4_src_flag:1;
+ uint64_t ip6_src_flag:1;
+ uint64_t ip4_dst_flag:1;
+ uint64_t ip6_dst_flag:1;
+ uint64_t ip4_pctl_flag:1;
+ uint64_t ip6_nxth_flag:1;
+ uint64_t ip4_sprt_flag:1;
+ uint64_t ip6_sprt_flag:1;
+ uint64_t ip4_dprt_flag:1;
+ uint64_t ip6_dprt_flag:1;
+ uint64_t inc_prt_flag:1;
+ uint64_t inc_vlan:1;
+ uint64_t inc_vs:2;
+ uint64_t tag_mode:2;
+ uint64_t reserved_30_30:1;
+ uint64_t grptag:1;
+ uint64_t grptagmask:4;
+ uint64_t grptagbase:4;
+ uint64_t reserved_40_63:24;
+#endif
+ } cn30xx;
+ struct cvmx_pip_prt_tagx_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t grptagbase:4;
+ uint64_t grptagmask:4;
+ uint64_t grptag:1;
+ uint64_t grptag_mskip:1;
+ uint64_t tag_mode:2;
+ uint64_t inc_vs:2;
+ uint64_t inc_vlan:1;
+ uint64_t inc_prt_flag:1;
+ uint64_t ip6_dprt_flag:1;
+ uint64_t ip4_dprt_flag:1;
+ uint64_t ip6_sprt_flag:1;
+ uint64_t ip4_sprt_flag:1;
+ uint64_t ip6_nxth_flag:1;
+ uint64_t ip4_pctl_flag:1;
+ uint64_t ip6_dst_flag:1;
+ uint64_t ip4_dst_flag:1;
+ uint64_t ip6_src_flag:1;
+ uint64_t ip4_src_flag:1;
+ uint64_t tcp6_tag_type:2;
+ uint64_t tcp4_tag_type:2;
+ uint64_t ip6_tag_type:2;
+ uint64_t ip4_tag_type:2;
+ uint64_t non_tag_type:2;
+ uint64_t grp:4;
+#else
+ uint64_t grp:4;
+ uint64_t non_tag_type:2;
+ uint64_t ip4_tag_type:2;
+ uint64_t ip6_tag_type:2;
+ uint64_t tcp4_tag_type:2;
+ uint64_t tcp6_tag_type:2;
+ uint64_t ip4_src_flag:1;
+ uint64_t ip6_src_flag:1;
+ uint64_t ip4_dst_flag:1;
+ uint64_t ip6_dst_flag:1;
+ uint64_t ip4_pctl_flag:1;
+ uint64_t ip6_nxth_flag:1;
+ uint64_t ip4_sprt_flag:1;
+ uint64_t ip6_sprt_flag:1;
+ uint64_t ip4_dprt_flag:1;
+ uint64_t ip6_dprt_flag:1;
+ uint64_t inc_prt_flag:1;
+ uint64_t inc_vlan:1;
+ uint64_t inc_vs:2;
+ uint64_t tag_mode:2;
+ uint64_t grptag_mskip:1;
+ uint64_t grptag:1;
+ uint64_t grptagmask:4;
+ uint64_t grptagbase:4;
+ uint64_t reserved_40_63:24;
+#endif
+ } cn50xx;
+};
+
+union cvmx_pip_qos_diffx {
+ uint64_t u64;
+ struct cvmx_pip_qos_diffx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t qos:3;
+#else
+ uint64_t qos:3;
+ uint64_t reserved_3_63:61;
+#endif
+ } s;
+};
+
+union cvmx_pip_qos_vlanx {
+ uint64_t u64;
+ struct cvmx_pip_qos_vlanx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t qos1:3;
+ uint64_t reserved_3_3:1;
+ uint64_t qos:3;
+#else
+ uint64_t qos:3;
+ uint64_t reserved_3_3:1;
+ uint64_t qos1:3;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+ struct cvmx_pip_qos_vlanx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t qos:3;
+#else
+ uint64_t qos:3;
+ uint64_t reserved_3_63:61;
+#endif
+ } cn30xx;
+};
+
+union cvmx_pip_qos_watchx {
+ uint64_t u64;
+ struct cvmx_pip_qos_watchx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t mask:16;
+ uint64_t reserved_30_31:2;
+ uint64_t grp:6;
+ uint64_t reserved_23_23:1;
+ uint64_t qos:3;
+ uint64_t reserved_19_19:1;
+ uint64_t match_type:3;
+ uint64_t match_value:16;
+#else
+ uint64_t match_value:16;
+ uint64_t match_type:3;
+ uint64_t reserved_19_19:1;
+ uint64_t qos:3;
+ uint64_t reserved_23_23:1;
+ uint64_t grp:6;
+ uint64_t reserved_30_31:2;
+ uint64_t mask:16;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+ struct cvmx_pip_qos_watchx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t mask:16;
+ uint64_t reserved_28_31:4;
+ uint64_t grp:4;
+ uint64_t reserved_23_23:1;
+ uint64_t qos:3;
+ uint64_t reserved_18_19:2;
+ uint64_t match_type:2;
+ uint64_t match_value:16;
+#else
+ uint64_t match_value:16;
+ uint64_t match_type:2;
+ uint64_t reserved_18_19:2;
+ uint64_t qos:3;
+ uint64_t reserved_23_23:1;
+ uint64_t grp:4;
+ uint64_t reserved_28_31:4;
+ uint64_t mask:16;
+ uint64_t reserved_48_63:16;
+#endif
+ } cn30xx;
+ struct cvmx_pip_qos_watchx_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t mask:16;
+ uint64_t reserved_28_31:4;
+ uint64_t grp:4;
+ uint64_t reserved_23_23:1;
+ uint64_t qos:3;
+ uint64_t reserved_19_19:1;
+ uint64_t match_type:3;
+ uint64_t match_value:16;
+#else
+ uint64_t match_value:16;
+ uint64_t match_type:3;
+ uint64_t reserved_19_19:1;
+ uint64_t qos:3;
+ uint64_t reserved_23_23:1;
+ uint64_t grp:4;
+ uint64_t reserved_28_31:4;
+ uint64_t mask:16;
+ uint64_t reserved_48_63:16;
+#endif
+ } cn50xx;
+};
+
+union cvmx_pip_raw_word {
+ uint64_t u64;
+ struct cvmx_pip_raw_word_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63:8;
+ uint64_t word:56;
+#else
+ uint64_t word:56;
+ uint64_t reserved_56_63:8;
+#endif
+ } s;
+};
+
+union cvmx_pip_sft_rst {
+ uint64_t u64;
+ struct cvmx_pip_sft_rst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t rst:1;
+#else
+ uint64_t rst:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat0_x {
+ uint64_t u64;
+ struct cvmx_pip_stat0_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t drp_pkts:32;
+ uint64_t drp_octs:32;
+#else
+ uint64_t drp_octs:32;
+ uint64_t drp_pkts:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat0_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat0_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t drp_pkts:32;
+ uint64_t drp_octs:32;
+#else
+ uint64_t drp_octs:32;
+ uint64_t drp_pkts:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat10_x {
+ uint64_t u64;
+ struct cvmx_pip_stat10_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcast:32;
+ uint64_t mcast:32;
+#else
+ uint64_t mcast:32;
+ uint64_t bcast:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat10_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat10_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcast:32;
+ uint64_t mcast:32;
+#else
+ uint64_t mcast:32;
+ uint64_t bcast:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat11_x {
+ uint64_t u64;
+ struct cvmx_pip_stat11_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcast:32;
+ uint64_t mcast:32;
+#else
+ uint64_t mcast:32;
+ uint64_t bcast:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat11_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat11_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcast:32;
+ uint64_t mcast:32;
+#else
+ uint64_t mcast:32;
+ uint64_t bcast:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat1_x {
+ uint64_t u64;
+ struct cvmx_pip_stat1_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t octs:48;
+#else
+ uint64_t octs:48;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat1_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat1_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t octs:48;
+#else
+ uint64_t octs:48;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat2_x {
+ uint64_t u64;
+ struct cvmx_pip_stat2_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pkts:32;
+ uint64_t raw:32;
+#else
+ uint64_t raw:32;
+ uint64_t pkts:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat2_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat2_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pkts:32;
+ uint64_t raw:32;
+#else
+ uint64_t raw:32;
+ uint64_t pkts:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat3_x {
+ uint64_t u64;
+ struct cvmx_pip_stat3_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcst:32;
+ uint64_t mcst:32;
+#else
+ uint64_t mcst:32;
+ uint64_t bcst:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat3_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat3_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcst:32;
+ uint64_t mcst:32;
+#else
+ uint64_t mcst:32;
+ uint64_t bcst:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat4_x {
+ uint64_t u64;
+ struct cvmx_pip_stat4_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h65to127:32;
+ uint64_t h64:32;
+#else
+ uint64_t h64:32;
+ uint64_t h65to127:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat4_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat4_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h65to127:32;
+ uint64_t h64:32;
+#else
+ uint64_t h64:32;
+ uint64_t h65to127:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat5_x {
+ uint64_t u64;
+ struct cvmx_pip_stat5_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h256to511:32;
+ uint64_t h128to255:32;
+#else
+ uint64_t h128to255:32;
+ uint64_t h256to511:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat5_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat5_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h256to511:32;
+ uint64_t h128to255:32;
+#else
+ uint64_t h128to255:32;
+ uint64_t h256to511:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat6_x {
+ uint64_t u64;
+ struct cvmx_pip_stat6_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h1024to1518:32;
+ uint64_t h512to1023:32;
+#else
+ uint64_t h512to1023:32;
+ uint64_t h1024to1518:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat6_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat6_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h1024to1518:32;
+ uint64_t h512to1023:32;
+#else
+ uint64_t h512to1023:32;
+ uint64_t h1024to1518:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat7_x {
+ uint64_t u64;
+ struct cvmx_pip_stat7_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fcs:32;
+ uint64_t h1519:32;
+#else
+ uint64_t h1519:32;
+ uint64_t fcs:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat7_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat7_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fcs:32;
+ uint64_t h1519:32;
+#else
+ uint64_t h1519:32;
+ uint64_t fcs:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat8_x {
+ uint64_t u64;
+ struct cvmx_pip_stat8_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t frag:32;
+ uint64_t undersz:32;
+#else
+ uint64_t undersz:32;
+ uint64_t frag:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat8_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat8_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t frag:32;
+ uint64_t undersz:32;
+#else
+ uint64_t undersz:32;
+ uint64_t frag:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat9_x {
+ uint64_t u64;
+ struct cvmx_pip_stat9_x_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t jabber:32;
+ uint64_t oversz:32;
+#else
+ uint64_t oversz:32;
+ uint64_t jabber:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat9_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat9_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t jabber:32;
+ uint64_t oversz:32;
+#else
+ uint64_t oversz:32;
+ uint64_t jabber:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat_ctl {
+ uint64_t u64;
+ struct cvmx_pip_stat_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t mode:1;
+ uint64_t reserved_1_7:7;
+ uint64_t rdclr:1;
+#else
+ uint64_t rdclr:1;
+ uint64_t reserved_1_7:7;
+ uint64_t mode:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } s;
+ struct cvmx_pip_stat_ctl_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t rdclr:1;
+#else
+ uint64_t rdclr:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } cn30xx;
+};
+
+union cvmx_pip_stat_inb_errsx {
+ uint64_t u64;
+ struct cvmx_pip_stat_inb_errsx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t errs:16;
+#else
+ uint64_t errs:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat_inb_errs_pkndx {
+ uint64_t u64;
+ struct cvmx_pip_stat_inb_errs_pkndx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t errs:16;
+#else
+ uint64_t errs:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat_inb_octsx {
+ uint64_t u64;
+ struct cvmx_pip_stat_inb_octsx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t octs:48;
+#else
+ uint64_t octs:48;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat_inb_octs_pkndx {
+ uint64_t u64;
+ struct cvmx_pip_stat_inb_octs_pkndx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t octs:48;
+#else
+ uint64_t octs:48;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat_inb_pktsx {
+ uint64_t u64;
+ struct cvmx_pip_stat_inb_pktsx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t pkts:32;
+#else
+ uint64_t pkts:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_stat_inb_pkts_pkndx {
+ uint64_t u64;
+ struct cvmx_pip_stat_inb_pkts_pkndx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t pkts:32;
+#else
+ uint64_t pkts:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_sub_pkind_fcsx {
+ uint64_t u64;
+ struct cvmx_pip_sub_pkind_fcsx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t port_bit:64;
+#else
+ uint64_t port_bit:64;
+#endif
+ } s;
+};
+
+union cvmx_pip_tag_incx {
+ uint64_t u64;
+ struct cvmx_pip_tag_incx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t en:8;
+#else
+ uint64_t en:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_pip_tag_mask {
+ uint64_t u64;
+ struct cvmx_pip_tag_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t mask:16;
+#else
+ uint64_t mask:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pip_tag_secret {
+ uint64_t u64;
+ struct cvmx_pip_tag_secret_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t dst:16;
+ uint64_t src:16;
+#else
+ uint64_t src:16;
+ uint64_t dst:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_todo_entry {
+ uint64_t u64;
+ struct cvmx_pip_todo_entry_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t val:1;
+ uint64_t reserved_62_62:1;
+ uint64_t entry:62;
+#else
+ uint64_t entry:62;
+ uint64_t reserved_62_62:1;
+ uint64_t val:1;
+#endif
+ } s;
+};
+
+union cvmx_pip_vlan_etypesx {
+ uint64_t u64;
+ struct cvmx_pip_vlan_etypesx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t type3:16;
+ uint64_t type2:16;
+ uint64_t type1:16;
+ uint64_t type0:16;
+#else
+ uint64_t type0:16;
+ uint64_t type1:16;
+ uint64_t type2:16;
+ uint64_t type3:16;
+#endif
+ } s;
+};
+
+union cvmx_pip_xstat0_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat0_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t drp_pkts:32;
+ uint64_t drp_octs:32;
+#else
+ uint64_t drp_octs:32;
+ uint64_t drp_pkts:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_xstat10_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat10_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcast:32;
+ uint64_t mcast:32;
+#else
+ uint64_t mcast:32;
+ uint64_t bcast:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_xstat11_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat11_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcast:32;
+ uint64_t mcast:32;
+#else
+ uint64_t mcast:32;
+ uint64_t bcast:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_xstat1_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat1_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t octs:48;
+#else
+ uint64_t octs:48;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_pip_xstat2_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat2_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t pkts:32;
+ uint64_t raw:32;
+#else
+ uint64_t raw:32;
+ uint64_t pkts:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_xstat3_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat3_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bcst:32;
+ uint64_t mcst:32;
+#else
+ uint64_t mcst:32;
+ uint64_t bcst:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_xstat4_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat4_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h65to127:32;
+ uint64_t h64:32;
+#else
+ uint64_t h64:32;
+ uint64_t h65to127:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_xstat5_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat5_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h256to511:32;
+ uint64_t h128to255:32;
+#else
+ uint64_t h128to255:32;
+ uint64_t h256to511:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_xstat6_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat6_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t h1024to1518:32;
+ uint64_t h512to1023:32;
+#else
+ uint64_t h512to1023:32;
+ uint64_t h1024to1518:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_xstat7_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat7_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fcs:32;
+ uint64_t h1519:32;
+#else
+ uint64_t h1519:32;
+ uint64_t fcs:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_xstat8_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat8_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t frag:32;
+ uint64_t undersz:32;
+#else
+ uint64_t undersz:32;
+ uint64_t frag:32;
+#endif
+ } s;
+};
+
+union cvmx_pip_xstat9_prtx {
+ uint64_t u64;
+ struct cvmx_pip_xstat9_prtx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t jabber:32;
+ uint64_t oversz:32;
+#else
+ uint64_t oversz:32;
+ uint64_t jabber:32;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pip.h b/arch/mips/include/asm/octeon/cvmx-pip.h
new file mode 100644
index 000000000..01ca7267a
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-pip.h
@@ -0,0 +1,524 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Interface to the hardware Packet Input Processing unit.
+ *
+ */
+
+#ifndef __CVMX_PIP_H__
+#define __CVMX_PIP_H__
+
+#include <asm/octeon/cvmx-wqe.h>
+#include <asm/octeon/cvmx-fpa.h>
+#include <asm/octeon/cvmx-pip-defs.h>
+
+#define CVMX_PIP_NUM_INPUT_PORTS 48
+#define CVMX_PIP_NUM_WATCHERS 4
+
+/*
+ * Encodes the different error and exception codes
+ */
+typedef enum {
+ CVMX_PIP_L4_NO_ERR = 0ull,
+ /*
+ * 1 = TCP (UDP) packet not long enough to cover TCP (UDP)
+ * header
+ */
+ CVMX_PIP_L4_MAL_ERR = 1ull,
+ /* 2 = TCP/UDP checksum failure */
+ CVMX_PIP_CHK_ERR = 2ull,
+ /*
+ * 3 = TCP/UDP length check (TCP/UDP length does not match IP
+ * length).
+ */
+ CVMX_PIP_L4_LENGTH_ERR = 3ull,
+ /* 4 = illegal TCP/UDP port (either source or dest port is zero) */
+ CVMX_PIP_BAD_PRT_ERR = 4ull,
+ /* 8 = TCP flags = FIN only */
+ CVMX_PIP_TCP_FLG8_ERR = 8ull,
+ /* 9 = TCP flags = 0 */
+ CVMX_PIP_TCP_FLG9_ERR = 9ull,
+ /* 10 = TCP flags = FIN+RST+* */
+ CVMX_PIP_TCP_FLG10_ERR = 10ull,
+ /* 11 = TCP flags = SYN+URG+* */
+ CVMX_PIP_TCP_FLG11_ERR = 11ull,
+ /* 12 = TCP flags = SYN+RST+* */
+ CVMX_PIP_TCP_FLG12_ERR = 12ull,
+ /* 13 = TCP flags = SYN+FIN+* */
+ CVMX_PIP_TCP_FLG13_ERR = 13ull
+} cvmx_pip_l4_err_t;
+
+typedef enum {
+
+ CVMX_PIP_IP_NO_ERR = 0ull,
+ /* 1 = not IPv4 or IPv6 */
+ CVMX_PIP_NOT_IP = 1ull,
+ /* 2 = IPv4 header checksum violation */
+ CVMX_PIP_IPV4_HDR_CHK = 2ull,
+ /* 3 = malformed (packet not long enough to cover IP hdr) */
+ CVMX_PIP_IP_MAL_HDR = 3ull,
+ /* 4 = malformed (packet not long enough to cover len in IP hdr) */
+ CVMX_PIP_IP_MAL_PKT = 4ull,
+ /* 5 = TTL / hop count equal zero */
+ CVMX_PIP_TTL_HOP = 5ull,
+ /* 6 = IPv4 options / IPv6 early extension headers */
+ CVMX_PIP_OPTS = 6ull
+} cvmx_pip_ip_exc_t;
+
+/**
+ * NOTES
+ * late collision (data received before collision)
+ * late collisions cannot be detected by the receiver
+ * they would appear as JAM bits which would appear as bad FCS
+ * or carrier extend error which is CVMX_PIP_EXTEND_ERR
+ */
+typedef enum {
+ /* No error */
+ CVMX_PIP_RX_NO_ERR = 0ull,
+ /* RGM+SPI 1 = partially received packet (buffering/bandwidth
+ * not adequate) */
+ CVMX_PIP_PARTIAL_ERR = 1ull,
+ /* RGM+SPI 2 = receive packet too large and truncated */
+ CVMX_PIP_JABBER_ERR = 2ull,
+ /*
+ * RGM 3 = max frame error (pkt len > max frame len) (with FCS
+ * error)
+ */
+ CVMX_PIP_OVER_FCS_ERR = 3ull,
+ /* RGM+SPI 4 = max frame error (pkt len > max frame len) */
+ CVMX_PIP_OVER_ERR = 4ull,
+ /*
+ * RGM 5 = nibble error (data not byte multiple - 100M and 10M
+ * only)
+ */
+ CVMX_PIP_ALIGN_ERR = 5ull,
+ /*
+ * RGM 6 = min frame error (pkt len < min frame len) (with FCS
+ * error)
+ */
+ CVMX_PIP_UNDER_FCS_ERR = 6ull,
+ /* RGM 7 = FCS error */
+ CVMX_PIP_GMX_FCS_ERR = 7ull,
+ /* RGM+SPI 8 = min frame error (pkt len < min frame len) */
+ CVMX_PIP_UNDER_ERR = 8ull,
+ /* RGM 9 = Frame carrier extend error */
+ CVMX_PIP_EXTEND_ERR = 9ull,
+ /*
+ * RGM 10 = length mismatch (len did not match len in L2
+ * length/type)
+ */
+ CVMX_PIP_LENGTH_ERR = 10ull,
+ /* RGM 11 = Frame error (some or all data bits marked err) */
+ CVMX_PIP_DAT_ERR = 11ull,
+ /* SPI 11 = DIP4 error */
+ CVMX_PIP_DIP_ERR = 11ull,
+ /*
+ * RGM 12 = packet was not large enough to pass the skipper -
+ * no inspection could occur.
+ */
+ CVMX_PIP_SKIP_ERR = 12ull,
+ /*
+ * RGM 13 = studder error (data not repeated - 100M and 10M
+ * only)
+ */
+ CVMX_PIP_NIBBLE_ERR = 13ull,
+ /* RGM+SPI 16 = FCS error */
+ CVMX_PIP_PIP_FCS = 16L,
+ /*
+ * RGM+SPI+PCI 17 = packet was not large enough to pass the
+ * skipper - no inspection could occur.
+ */
+ CVMX_PIP_PIP_SKIP_ERR = 17L,
+ /*
+ * RGM+SPI+PCI 18 = malformed l2 (packet not long enough to
+ * cover L2 hdr).
+ */
+ CVMX_PIP_PIP_L2_MAL_HDR = 18L
+ /*
+ * NOTES: xx = late collision (data received before collision)
+ * late collisions cannot be detected by the receiver
+ * they would appear as JAM bits which would appear as
+ * bad FCS or carrier extend error which is
+ * CVMX_PIP_EXTEND_ERR
+ */
+} cvmx_pip_rcv_err_t;
+
+/**
+ * This defines the err_code field errors in the work Q entry
+ */
+typedef union {
+ cvmx_pip_l4_err_t l4_err;
+ cvmx_pip_ip_exc_t ip_exc;
+ cvmx_pip_rcv_err_t rcv_err;
+} cvmx_pip_err_t;
+
+/**
+ * Status statistics for a port
+ */
+typedef struct {
+ /* Inbound octets marked to be dropped by the IPD */
+ uint32_t dropped_octets;
+ /* Inbound packets marked to be dropped by the IPD */
+ uint32_t dropped_packets;
+ /* RAW PCI Packets received by PIP per port */
+ uint32_t pci_raw_packets;
+ /* Number of octets processed by PIP */
+ uint32_t octets;
+ /* Number of packets processed by PIP */
+ uint32_t packets;
+ /*
+ * Number of identified L2 multicast packets. Does not
+ * include broadcast packets. Only includes packets whose
+ * parse mode is SKIP_TO_L2
+ */
+ uint32_t multicast_packets;
+ /*
+ * Number of identified L2 broadcast packets. Does not
+ * include multicast packets. Only includes packets whose
+ * parse mode is SKIP_TO_L2
+ */
+ uint32_t broadcast_packets;
+ /* Number of 64B packets */
+ uint32_t len_64_packets;
+ /* Number of 65-127B packets */
+ uint32_t len_65_127_packets;
+ /* Number of 128-255B packets */
+ uint32_t len_128_255_packets;
+ /* Number of 256-511B packets */
+ uint32_t len_256_511_packets;
+ /* Number of 512-1023B packets */
+ uint32_t len_512_1023_packets;
+ /* Number of 1024-1518B packets */
+ uint32_t len_1024_1518_packets;
+ /* Number of 1519-max packets */
+ uint32_t len_1519_max_packets;
+ /* Number of packets with FCS or Align opcode errors */
+ uint32_t fcs_align_err_packets;
+ /* Number of packets with length < min */
+ uint32_t runt_packets;
+ /* Number of packets with length < min and FCS error */
+ uint32_t runt_crc_packets;
+ /* Number of packets with length > max */
+ uint32_t oversize_packets;
+ /* Number of packets with length > max and FCS error */
+ uint32_t oversize_crc_packets;
+ /* Number of packets without GMX/SPX/PCI errors received by PIP */
+ uint32_t inb_packets;
+ /*
+ * Total number of octets from all packets received by PIP,
+ * including CRC
+ */
+ uint64_t inb_octets;
+ /* Number of packets with GMX/SPX/PCI errors received by PIP */
+ uint16_t inb_errors;
+} cvmx_pip_port_status_t;
+
+/**
+ * Definition of the PIP custom header that can be prepended
+ * to a packet by external hardware.
+ */
+typedef union {
+ uint64_t u64;
+ struct {
+ /*
+ * Documented as R - Set if the Packet is RAWFULL. If
+ * set, this header must be the full 8 bytes.
+ */
+ uint64_t rawfull:1;
+ /* Must be zero */
+ uint64_t reserved0:5;
+ /* PIP parse mode for this packet */
+ uint64_t parse_mode:2;
+ /* Must be zero */
+ uint64_t reserved1:1;
+ /*
+ * Skip amount, including this header, to the
+ * beginning of the packet
+ */
+ uint64_t skip_len:7;
+ /* Must be zero */
+ uint64_t reserved2:6;
+ /* POW input queue for this packet */
+ uint64_t qos:3;
+ /* POW input group for this packet */
+ uint64_t grp:4;
+ /*
+ * Flag to store this packet in the work queue entry,
+ * if possible
+ */
+ uint64_t rs:1;
+ /* POW input tag type */
+ uint64_t tag_type:2;
+ /* POW input tag */
+ uint64_t tag:32;
+ } s;
+} cvmx_pip_pkt_inst_hdr_t;
+
+/* CSR typedefs have been moved to cvmx-csr-*.h */
+
+/**
+ * Configure an ethernet input port
+ *
+ * @port_num: Port number to configure
+ * @port_cfg: Port hardware configuration
+ * @port_tag_cfg:
+ * Port POW tagging configuration
+ */
+static inline void cvmx_pip_config_port(uint64_t port_num,
+ union cvmx_pip_prt_cfgx port_cfg,
+ union cvmx_pip_prt_tagx port_tag_cfg)
+{
+ cvmx_write_csr(CVMX_PIP_PRT_CFGX(port_num), port_cfg.u64);
+ cvmx_write_csr(CVMX_PIP_PRT_TAGX(port_num), port_tag_cfg.u64);
+}
+#if 0
+/**
+ * @deprecated This function is a thin wrapper around the Pass1 version
+ * of the CVMX_PIP_QOS_WATCHX CSR; Pass2 has added a field for
+ * setting the group that is incompatible with this function,
+ * the preferred upgrade path is to use the CSR directly.
+ *
+ * Configure the global QoS packet watchers. Each watcher is
+ * capable of matching a field in a packet to determine the
+ * QoS queue for scheduling.
+ *
+ * @watcher: Watcher number to configure (0 - 3).
+ * @match_type: Watcher match type
+ * @match_value:
+ * Value the watcher will match against
+ * @qos: QoS queue for packets matching this watcher
+ */
+static inline void cvmx_pip_config_watcher(uint64_t watcher,
+ cvmx_pip_qos_watch_types match_type,
+ uint64_t match_value, uint64_t qos)
+{
+ cvmx_pip_port_watcher_cfg_t watcher_config;
+
+ watcher_config.u64 = 0;
+ watcher_config.s.match_type = match_type;
+ watcher_config.s.match_value = match_value;
+ watcher_config.s.qos = qos;
+
+ cvmx_write_csr(CVMX_PIP_QOS_WATCHX(watcher), watcher_config.u64);
+}
+#endif
+/**
+ * Configure the VLAN priority to QoS queue mapping.
+ *
+ * @vlan_priority:
+ * VLAN priority (0-7)
+ * @qos: QoS queue for packets matching this watcher
+ */
+static inline void cvmx_pip_config_vlan_qos(uint64_t vlan_priority,
+ uint64_t qos)
+{
+ union cvmx_pip_qos_vlanx pip_qos_vlanx;
+ pip_qos_vlanx.u64 = 0;
+ pip_qos_vlanx.s.qos = qos;
+ cvmx_write_csr(CVMX_PIP_QOS_VLANX(vlan_priority), pip_qos_vlanx.u64);
+}
+
+/**
+ * Configure the Diffserv to QoS queue mapping.
+ *
+ * @diffserv: Diffserv field value (0-63)
+ * @qos: QoS queue for packets matching this watcher
+ */
+static inline void cvmx_pip_config_diffserv_qos(uint64_t diffserv, uint64_t qos)
+{
+ union cvmx_pip_qos_diffx pip_qos_diffx;
+ pip_qos_diffx.u64 = 0;
+ pip_qos_diffx.s.qos = qos;
+ cvmx_write_csr(CVMX_PIP_QOS_DIFFX(diffserv), pip_qos_diffx.u64);
+}
+
+/**
+ * Get the status counters for a port.
+ *
+ * @port_num: Port number to get statistics for.
+ * @clear: Set to 1 to clear the counters after they are read
+ * @status: Where to put the results.
+ */
+static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear,
+ cvmx_pip_port_status_t *status)
+{
+ union cvmx_pip_stat_ctl pip_stat_ctl;
+ union cvmx_pip_stat0_prtx stat0;
+ union cvmx_pip_stat1_prtx stat1;
+ union cvmx_pip_stat2_prtx stat2;
+ union cvmx_pip_stat3_prtx stat3;
+ union cvmx_pip_stat4_prtx stat4;
+ union cvmx_pip_stat5_prtx stat5;
+ union cvmx_pip_stat6_prtx stat6;
+ union cvmx_pip_stat7_prtx stat7;
+ union cvmx_pip_stat8_prtx stat8;
+ union cvmx_pip_stat9_prtx stat9;
+ union cvmx_pip_stat_inb_pktsx pip_stat_inb_pktsx;
+ union cvmx_pip_stat_inb_octsx pip_stat_inb_octsx;
+ union cvmx_pip_stat_inb_errsx pip_stat_inb_errsx;
+
+ pip_stat_ctl.u64 = 0;
+ pip_stat_ctl.s.rdclr = clear;
+ cvmx_write_csr(CVMX_PIP_STAT_CTL, pip_stat_ctl.u64);
+
+ stat0.u64 = cvmx_read_csr(CVMX_PIP_STAT0_PRTX(port_num));
+ stat1.u64 = cvmx_read_csr(CVMX_PIP_STAT1_PRTX(port_num));
+ stat2.u64 = cvmx_read_csr(CVMX_PIP_STAT2_PRTX(port_num));
+ stat3.u64 = cvmx_read_csr(CVMX_PIP_STAT3_PRTX(port_num));
+ stat4.u64 = cvmx_read_csr(CVMX_PIP_STAT4_PRTX(port_num));
+ stat5.u64 = cvmx_read_csr(CVMX_PIP_STAT5_PRTX(port_num));
+ stat6.u64 = cvmx_read_csr(CVMX_PIP_STAT6_PRTX(port_num));
+ stat7.u64 = cvmx_read_csr(CVMX_PIP_STAT7_PRTX(port_num));
+ stat8.u64 = cvmx_read_csr(CVMX_PIP_STAT8_PRTX(port_num));
+ stat9.u64 = cvmx_read_csr(CVMX_PIP_STAT9_PRTX(port_num));
+ pip_stat_inb_pktsx.u64 =
+ cvmx_read_csr(CVMX_PIP_STAT_INB_PKTSX(port_num));
+ pip_stat_inb_octsx.u64 =
+ cvmx_read_csr(CVMX_PIP_STAT_INB_OCTSX(port_num));
+ pip_stat_inb_errsx.u64 =
+ cvmx_read_csr(CVMX_PIP_STAT_INB_ERRSX(port_num));
+
+ status->dropped_octets = stat0.s.drp_octs;
+ status->dropped_packets = stat0.s.drp_pkts;
+ status->octets = stat1.s.octs;
+ status->pci_raw_packets = stat2.s.raw;
+ status->packets = stat2.s.pkts;
+ status->multicast_packets = stat3.s.mcst;
+ status->broadcast_packets = stat3.s.bcst;
+ status->len_64_packets = stat4.s.h64;
+ status->len_65_127_packets = stat4.s.h65to127;
+ status->len_128_255_packets = stat5.s.h128to255;
+ status->len_256_511_packets = stat5.s.h256to511;
+ status->len_512_1023_packets = stat6.s.h512to1023;
+ status->len_1024_1518_packets = stat6.s.h1024to1518;
+ status->len_1519_max_packets = stat7.s.h1519;
+ status->fcs_align_err_packets = stat7.s.fcs;
+ status->runt_packets = stat8.s.undersz;
+ status->runt_crc_packets = stat8.s.frag;
+ status->oversize_packets = stat9.s.oversz;
+ status->oversize_crc_packets = stat9.s.jabber;
+ status->inb_packets = pip_stat_inb_pktsx.s.pkts;
+ status->inb_octets = pip_stat_inb_octsx.s.octs;
+ status->inb_errors = pip_stat_inb_errsx.s.errs;
+
+ if (cvmx_octeon_is_pass1()) {
+ /*
+ * Kludge to fix Octeon Pass 1 errata - Drop counts
+ * don't work.
+ */
+ if (status->inb_packets > status->packets)
+ status->dropped_packets =
+ status->inb_packets - status->packets;
+ else
+ status->dropped_packets = 0;
+ if (status->inb_octets - status->inb_packets * 4 >
+ status->octets)
+ status->dropped_octets =
+ status->inb_octets - status->inb_packets * 4 -
+ status->octets;
+ else
+ status->dropped_octets = 0;
+ }
+}
+
+/**
+ * Configure the hardware CRC engine
+ *
+ * @interface: Interface to configure (0 or 1)
+ * @invert_result:
+ * Invert the result of the CRC
+ * @reflect: Reflect
+ * @initialization_vector:
+ * CRC initialization vector
+ */
+static inline void cvmx_pip_config_crc(uint64_t interface,
+ uint64_t invert_result, uint64_t reflect,
+ uint32_t initialization_vector)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ union cvmx_pip_crc_ctlx config;
+ union cvmx_pip_crc_ivx pip_crc_ivx;
+
+ config.u64 = 0;
+ config.s.invres = invert_result;
+ config.s.reflect = reflect;
+ cvmx_write_csr(CVMX_PIP_CRC_CTLX(interface), config.u64);
+
+ pip_crc_ivx.u64 = 0;
+ pip_crc_ivx.s.iv = initialization_vector;
+ cvmx_write_csr(CVMX_PIP_CRC_IVX(interface), pip_crc_ivx.u64);
+ }
+}
+
+/**
+ * Clear all bits in a tag mask. This should be called on
+ * startup before any calls to cvmx_pip_tag_mask_set. Each bit
+ * set in the final mask represent a byte used in the packet for
+ * tag generation.
+ *
+ * @mask_index: Which tag mask to clear (0..3)
+ */
+static inline void cvmx_pip_tag_mask_clear(uint64_t mask_index)
+{
+ uint64_t index;
+ union cvmx_pip_tag_incx pip_tag_incx;
+ pip_tag_incx.u64 = 0;
+ pip_tag_incx.s.en = 0;
+ for (index = mask_index * 16; index < (mask_index + 1) * 16; index++)
+ cvmx_write_csr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64);
+}
+
+/**
+ * Sets a range of bits in the tag mask. The tag mask is used
+ * when the cvmx_pip_port_tag_cfg_t tag_mode is non zero.
+ * There are four separate masks that can be configured.
+ *
+ * @mask_index: Which tag mask to modify (0..3)
+ * @offset: Offset into the bitmask to set bits at. Use the GCC macro
+ * offsetof() to determine the offsets into packet headers.
+ * For example, offsetof(ethhdr, protocol) returns the offset
+ * of the ethernet protocol field. The bitmask selects which
+ * bytes to include the tag, with bit offset X selecting
+ * byte at offset X from the beginning of the packet data.
+ * @len: Number of bytes to include. Usually this is the sizeof()
+ * the field.
+ */
+static inline void cvmx_pip_tag_mask_set(uint64_t mask_index, uint64_t offset,
+ uint64_t len)
+{
+ while (len--) {
+ union cvmx_pip_tag_incx pip_tag_incx;
+ uint64_t index = mask_index * 16 + offset / 8;
+ pip_tag_incx.u64 = cvmx_read_csr(CVMX_PIP_TAG_INCX(index));
+ pip_tag_incx.s.en |= 0x80 >> (offset & 0x7);
+ cvmx_write_csr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64);
+ offset++;
+ }
+}
+
+#endif /* __CVMX_PIP_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-pko-defs.h b/arch/mips/include/asm/octeon/cvmx-pko-defs.h
new file mode 100644
index 000000000..7e14c0d32
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-pko-defs.h
@@ -0,0 +1,2205 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_PKO_DEFS_H__
+#define __CVMX_PKO_DEFS_H__
+
+#define CVMX_PKO_MEM_COUNT0 (CVMX_ADD_IO_SEG(0x0001180050001080ull))
+#define CVMX_PKO_MEM_COUNT1 (CVMX_ADD_IO_SEG(0x0001180050001088ull))
+#define CVMX_PKO_MEM_DEBUG0 (CVMX_ADD_IO_SEG(0x0001180050001100ull))
+#define CVMX_PKO_MEM_DEBUG1 (CVMX_ADD_IO_SEG(0x0001180050001108ull))
+#define CVMX_PKO_MEM_DEBUG10 (CVMX_ADD_IO_SEG(0x0001180050001150ull))
+#define CVMX_PKO_MEM_DEBUG11 (CVMX_ADD_IO_SEG(0x0001180050001158ull))
+#define CVMX_PKO_MEM_DEBUG12 (CVMX_ADD_IO_SEG(0x0001180050001160ull))
+#define CVMX_PKO_MEM_DEBUG13 (CVMX_ADD_IO_SEG(0x0001180050001168ull))
+#define CVMX_PKO_MEM_DEBUG14 (CVMX_ADD_IO_SEG(0x0001180050001170ull))
+#define CVMX_PKO_MEM_DEBUG2 (CVMX_ADD_IO_SEG(0x0001180050001110ull))
+#define CVMX_PKO_MEM_DEBUG3 (CVMX_ADD_IO_SEG(0x0001180050001118ull))
+#define CVMX_PKO_MEM_DEBUG4 (CVMX_ADD_IO_SEG(0x0001180050001120ull))
+#define CVMX_PKO_MEM_DEBUG5 (CVMX_ADD_IO_SEG(0x0001180050001128ull))
+#define CVMX_PKO_MEM_DEBUG6 (CVMX_ADD_IO_SEG(0x0001180050001130ull))
+#define CVMX_PKO_MEM_DEBUG7 (CVMX_ADD_IO_SEG(0x0001180050001138ull))
+#define CVMX_PKO_MEM_DEBUG8 (CVMX_ADD_IO_SEG(0x0001180050001140ull))
+#define CVMX_PKO_MEM_DEBUG9 (CVMX_ADD_IO_SEG(0x0001180050001148ull))
+#define CVMX_PKO_MEM_IPORT_PTRS (CVMX_ADD_IO_SEG(0x0001180050001030ull))
+#define CVMX_PKO_MEM_IPORT_QOS (CVMX_ADD_IO_SEG(0x0001180050001038ull))
+#define CVMX_PKO_MEM_IQUEUE_PTRS (CVMX_ADD_IO_SEG(0x0001180050001040ull))
+#define CVMX_PKO_MEM_IQUEUE_QOS (CVMX_ADD_IO_SEG(0x0001180050001048ull))
+#define CVMX_PKO_MEM_PORT_PTRS (CVMX_ADD_IO_SEG(0x0001180050001010ull))
+#define CVMX_PKO_MEM_PORT_QOS (CVMX_ADD_IO_SEG(0x0001180050001018ull))
+#define CVMX_PKO_MEM_PORT_RATE0 (CVMX_ADD_IO_SEG(0x0001180050001020ull))
+#define CVMX_PKO_MEM_PORT_RATE1 (CVMX_ADD_IO_SEG(0x0001180050001028ull))
+#define CVMX_PKO_MEM_QUEUE_PTRS (CVMX_ADD_IO_SEG(0x0001180050001000ull))
+#define CVMX_PKO_MEM_QUEUE_QOS (CVMX_ADD_IO_SEG(0x0001180050001008ull))
+#define CVMX_PKO_MEM_THROTTLE_INT (CVMX_ADD_IO_SEG(0x0001180050001058ull))
+#define CVMX_PKO_MEM_THROTTLE_PIPE (CVMX_ADD_IO_SEG(0x0001180050001050ull))
+#define CVMX_PKO_REG_BIST_RESULT (CVMX_ADD_IO_SEG(0x0001180050000080ull))
+#define CVMX_PKO_REG_CMD_BUF (CVMX_ADD_IO_SEG(0x0001180050000010ull))
+#define CVMX_PKO_REG_CRC_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180050000028ull) + ((offset) & 1) * 8)
+#define CVMX_PKO_REG_CRC_ENABLE (CVMX_ADD_IO_SEG(0x0001180050000020ull))
+#define CVMX_PKO_REG_CRC_IVX(offset) (CVMX_ADD_IO_SEG(0x0001180050000038ull) + ((offset) & 1) * 8)
+#define CVMX_PKO_REG_DEBUG0 (CVMX_ADD_IO_SEG(0x0001180050000098ull))
+#define CVMX_PKO_REG_DEBUG1 (CVMX_ADD_IO_SEG(0x00011800500000A0ull))
+#define CVMX_PKO_REG_DEBUG2 (CVMX_ADD_IO_SEG(0x00011800500000A8ull))
+#define CVMX_PKO_REG_DEBUG3 (CVMX_ADD_IO_SEG(0x00011800500000B0ull))
+#define CVMX_PKO_REG_DEBUG4 (CVMX_ADD_IO_SEG(0x00011800500000B8ull))
+#define CVMX_PKO_REG_ENGINE_INFLIGHT (CVMX_ADD_IO_SEG(0x0001180050000050ull))
+#define CVMX_PKO_REG_ENGINE_INFLIGHT1 (CVMX_ADD_IO_SEG(0x0001180050000318ull))
+#define CVMX_PKO_REG_ENGINE_STORAGEX(offset) (CVMX_ADD_IO_SEG(0x0001180050000300ull) + ((offset) & 1) * 8)
+#define CVMX_PKO_REG_ENGINE_THRESH (CVMX_ADD_IO_SEG(0x0001180050000058ull))
+#define CVMX_PKO_REG_ERROR (CVMX_ADD_IO_SEG(0x0001180050000088ull))
+#define CVMX_PKO_REG_FLAGS (CVMX_ADD_IO_SEG(0x0001180050000000ull))
+#define CVMX_PKO_REG_GMX_PORT_MODE (CVMX_ADD_IO_SEG(0x0001180050000018ull))
+#define CVMX_PKO_REG_INT_MASK (CVMX_ADD_IO_SEG(0x0001180050000090ull))
+#define CVMX_PKO_REG_LOOPBACK_BPID (CVMX_ADD_IO_SEG(0x0001180050000118ull))
+#define CVMX_PKO_REG_LOOPBACK_PKIND (CVMX_ADD_IO_SEG(0x0001180050000068ull))
+#define CVMX_PKO_REG_MIN_PKT (CVMX_ADD_IO_SEG(0x0001180050000070ull))
+#define CVMX_PKO_REG_PREEMPT (CVMX_ADD_IO_SEG(0x0001180050000110ull))
+#define CVMX_PKO_REG_QUEUE_MODE (CVMX_ADD_IO_SEG(0x0001180050000048ull))
+#define CVMX_PKO_REG_QUEUE_PREEMPT (CVMX_ADD_IO_SEG(0x0001180050000108ull))
+#define CVMX_PKO_REG_QUEUE_PTRS1 (CVMX_ADD_IO_SEG(0x0001180050000100ull))
+#define CVMX_PKO_REG_READ_IDX (CVMX_ADD_IO_SEG(0x0001180050000008ull))
+#define CVMX_PKO_REG_THROTTLE (CVMX_ADD_IO_SEG(0x0001180050000078ull))
+#define CVMX_PKO_REG_TIMESTAMP (CVMX_ADD_IO_SEG(0x0001180050000060ull))
+
+union cvmx_pko_mem_count0 {
+ uint64_t u64;
+ struct cvmx_pko_mem_count0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t count:32;
+#else
+ uint64_t count:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pko_mem_count1 {
+ uint64_t u64;
+ struct cvmx_pko_mem_count1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t count:48;
+#else
+ uint64_t count:48;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_pko_mem_debug0 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fau:28;
+ uint64_t cmd:14;
+ uint64_t segs:6;
+ uint64_t size:16;
+#else
+ uint64_t size:16;
+ uint64_t segs:6;
+ uint64_t cmd:14;
+ uint64_t fau:28;
+#endif
+ } s;
+};
+
+union cvmx_pko_mem_debug1 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t i:1;
+ uint64_t back:4;
+ uint64_t pool:3;
+ uint64_t size:16;
+ uint64_t ptr:40;
+#else
+ uint64_t ptr:40;
+ uint64_t size:16;
+ uint64_t pool:3;
+ uint64_t back:4;
+ uint64_t i:1;
+#endif
+ } s;
+};
+
+union cvmx_pko_mem_debug10 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug10_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug10_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fau:28;
+ uint64_t cmd:14;
+ uint64_t segs:6;
+ uint64_t size:16;
+#else
+ uint64_t size:16;
+ uint64_t segs:6;
+ uint64_t cmd:14;
+ uint64_t fau:28;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug10_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63:15;
+ uint64_t ptrs1:17;
+ uint64_t reserved_17_31:15;
+ uint64_t ptrs2:17;
+#else
+ uint64_t ptrs2:17;
+ uint64_t reserved_17_31:15;
+ uint64_t ptrs1:17;
+ uint64_t reserved_49_63:15;
+#endif
+ } cn50xx;
+};
+
+union cvmx_pko_mem_debug11 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug11_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t i:1;
+ uint64_t back:4;
+ uint64_t pool:3;
+ uint64_t size:16;
+ uint64_t reserved_0_39:40;
+#else
+ uint64_t reserved_0_39:40;
+ uint64_t size:16;
+ uint64_t pool:3;
+ uint64_t back:4;
+ uint64_t i:1;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug11_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t i:1;
+ uint64_t back:4;
+ uint64_t pool:3;
+ uint64_t size:16;
+ uint64_t ptr:40;
+#else
+ uint64_t ptr:40;
+ uint64_t size:16;
+ uint64_t pool:3;
+ uint64_t back:4;
+ uint64_t i:1;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug11_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63:41;
+ uint64_t maj:1;
+ uint64_t uid:3;
+ uint64_t sop:1;
+ uint64_t len:1;
+ uint64_t chk:1;
+ uint64_t cnt:13;
+ uint64_t mod:3;
+#else
+ uint64_t mod:3;
+ uint64_t cnt:13;
+ uint64_t chk:1;
+ uint64_t len:1;
+ uint64_t sop:1;
+ uint64_t uid:3;
+ uint64_t maj:1;
+ uint64_t reserved_23_63:41;
+#endif
+ } cn50xx;
+};
+
+union cvmx_pko_mem_debug12 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug12_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug12_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug12_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t fau:28;
+ uint64_t cmd:14;
+ uint64_t segs:6;
+ uint64_t size:16;
+#else
+ uint64_t size:16;
+ uint64_t segs:6;
+ uint64_t cmd:14;
+ uint64_t fau:28;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug12_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t state:64;
+#else
+ uint64_t state:64;
+#endif
+ } cn68xx;
+};
+
+union cvmx_pko_mem_debug13 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug13_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug13_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_51_63:13;
+ uint64_t widx:17;
+ uint64_t ridx2:17;
+ uint64_t widx2:17;
+#else
+ uint64_t widx2:17;
+ uint64_t ridx2:17;
+ uint64_t widx:17;
+ uint64_t reserved_51_63:13;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug13_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t i:1;
+ uint64_t back:4;
+ uint64_t pool:3;
+ uint64_t size:16;
+ uint64_t ptr:40;
+#else
+ uint64_t ptr:40;
+ uint64_t size:16;
+ uint64_t pool:3;
+ uint64_t back:4;
+ uint64_t i:1;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug13_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t state:64;
+#else
+ uint64_t state:64;
+#endif
+ } cn68xx;
+};
+
+union cvmx_pko_mem_debug14 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug14_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug14_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t ridx:17;
+#else
+ uint64_t ridx:17;
+ uint64_t reserved_17_63:47;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug14_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } cn52xx;
+};
+
+union cvmx_pko_mem_debug2 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t i:1;
+ uint64_t back:4;
+ uint64_t pool:3;
+ uint64_t size:16;
+ uint64_t ptr:40;
+#else
+ uint64_t ptr:40;
+ uint64_t size:16;
+ uint64_t pool:3;
+ uint64_t back:4;
+ uint64_t i:1;
+#endif
+ } s;
+};
+
+union cvmx_pko_mem_debug3 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug3_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t i:1;
+ uint64_t back:4;
+ uint64_t pool:3;
+ uint64_t size:16;
+ uint64_t ptr:40;
+#else
+ uint64_t ptr:40;
+ uint64_t size:16;
+ uint64_t pool:3;
+ uint64_t back:4;
+ uint64_t i:1;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug3_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } cn50xx;
+};
+
+union cvmx_pko_mem_debug4 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug4_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug4_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t cmnd_segs:3;
+ uint64_t cmnd_siz:16;
+ uint64_t cmnd_off:6;
+ uint64_t uid:3;
+ uint64_t dread_sop:1;
+ uint64_t init_dwrite:1;
+ uint64_t chk_once:1;
+ uint64_t chk_mode:1;
+ uint64_t active:1;
+ uint64_t static_p:1;
+ uint64_t qos:3;
+ uint64_t qcb_ridx:5;
+ uint64_t qid_off_max:4;
+ uint64_t qid_off:4;
+ uint64_t qid_base:8;
+ uint64_t wait:1;
+ uint64_t minor:2;
+ uint64_t major:3;
+#else
+ uint64_t major:3;
+ uint64_t minor:2;
+ uint64_t wait:1;
+ uint64_t qid_base:8;
+ uint64_t qid_off:4;
+ uint64_t qid_off_max:4;
+ uint64_t qcb_ridx:5;
+ uint64_t qos:3;
+ uint64_t static_p:1;
+ uint64_t active:1;
+ uint64_t chk_mode:1;
+ uint64_t chk_once:1;
+ uint64_t init_dwrite:1;
+ uint64_t dread_sop:1;
+ uint64_t uid:3;
+ uint64_t cmnd_off:6;
+ uint64_t cmnd_siz:16;
+ uint64_t cmnd_segs:3;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug4_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t curr_siz:8;
+ uint64_t curr_off:16;
+ uint64_t cmnd_segs:6;
+ uint64_t cmnd_siz:16;
+ uint64_t cmnd_off:6;
+ uint64_t uid:2;
+ uint64_t dread_sop:1;
+ uint64_t init_dwrite:1;
+ uint64_t chk_once:1;
+ uint64_t chk_mode:1;
+ uint64_t wait:1;
+ uint64_t minor:2;
+ uint64_t major:3;
+#else
+ uint64_t major:3;
+ uint64_t minor:2;
+ uint64_t wait:1;
+ uint64_t chk_mode:1;
+ uint64_t chk_once:1;
+ uint64_t init_dwrite:1;
+ uint64_t dread_sop:1;
+ uint64_t uid:2;
+ uint64_t cmnd_off:6;
+ uint64_t cmnd_siz:16;
+ uint64_t cmnd_segs:6;
+ uint64_t curr_off:16;
+ uint64_t curr_siz:8;
+#endif
+ } cn52xx;
+};
+
+union cvmx_pko_mem_debug5 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug5_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug5_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dwri_mod:1;
+ uint64_t dwri_sop:1;
+ uint64_t dwri_len:1;
+ uint64_t dwri_cnt:13;
+ uint64_t cmnd_siz:16;
+ uint64_t uid:1;
+ uint64_t xfer_wor:1;
+ uint64_t xfer_dwr:1;
+ uint64_t cbuf_fre:1;
+ uint64_t reserved_27_27:1;
+ uint64_t chk_mode:1;
+ uint64_t active:1;
+ uint64_t qos:3;
+ uint64_t qcb_ridx:5;
+ uint64_t qid_off:3;
+ uint64_t qid_base:7;
+ uint64_t wait:1;
+ uint64_t minor:2;
+ uint64_t major:4;
+#else
+ uint64_t major:4;
+ uint64_t minor:2;
+ uint64_t wait:1;
+ uint64_t qid_base:7;
+ uint64_t qid_off:3;
+ uint64_t qcb_ridx:5;
+ uint64_t qos:3;
+ uint64_t active:1;
+ uint64_t chk_mode:1;
+ uint64_t reserved_27_27:1;
+ uint64_t cbuf_fre:1;
+ uint64_t xfer_dwr:1;
+ uint64_t xfer_wor:1;
+ uint64_t uid:1;
+ uint64_t cmnd_siz:16;
+ uint64_t dwri_cnt:13;
+ uint64_t dwri_len:1;
+ uint64_t dwri_sop:1;
+ uint64_t dwri_mod:1;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug5_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t curr_ptr:29;
+ uint64_t curr_siz:16;
+ uint64_t curr_off:16;
+ uint64_t cmnd_segs:3;
+#else
+ uint64_t cmnd_segs:3;
+ uint64_t curr_off:16;
+ uint64_t curr_siz:16;
+ uint64_t curr_ptr:29;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug5_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63:10;
+ uint64_t nxt_inflt:6;
+ uint64_t curr_ptr:40;
+ uint64_t curr_siz:8;
+#else
+ uint64_t curr_siz:8;
+ uint64_t curr_ptr:40;
+ uint64_t nxt_inflt:6;
+ uint64_t reserved_54_63:10;
+#endif
+ } cn52xx;
+ struct cvmx_pko_mem_debug5_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63:8;
+ uint64_t ptp:1;
+ uint64_t major_3:1;
+ uint64_t nxt_inflt:6;
+ uint64_t curr_ptr:40;
+ uint64_t curr_siz:8;
+#else
+ uint64_t curr_siz:8;
+ uint64_t curr_ptr:40;
+ uint64_t nxt_inflt:6;
+ uint64_t major_3:1;
+ uint64_t ptp:1;
+ uint64_t reserved_56_63:8;
+#endif
+ } cn61xx;
+ struct cvmx_pko_mem_debug5_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_57_63:7;
+ uint64_t uid_2:1;
+ uint64_t ptp:1;
+ uint64_t major_3:1;
+ uint64_t nxt_inflt:6;
+ uint64_t curr_ptr:40;
+ uint64_t curr_siz:8;
+#else
+ uint64_t curr_siz:8;
+ uint64_t curr_ptr:40;
+ uint64_t nxt_inflt:6;
+ uint64_t major_3:1;
+ uint64_t ptp:1;
+ uint64_t uid_2:1;
+ uint64_t reserved_57_63:7;
+#endif
+ } cn68xx;
+};
+
+union cvmx_pko_mem_debug6 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug6_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63:27;
+ uint64_t qid_offres:4;
+ uint64_t qid_offths:4;
+ uint64_t preempter:1;
+ uint64_t preemptee:1;
+ uint64_t preempted:1;
+ uint64_t active:1;
+ uint64_t statc:1;
+ uint64_t qos:3;
+ uint64_t qcb_ridx:5;
+ uint64_t qid_offmax:4;
+ uint64_t reserved_0_11:12;
+#else
+ uint64_t reserved_0_11:12;
+ uint64_t qid_offmax:4;
+ uint64_t qcb_ridx:5;
+ uint64_t qos:3;
+ uint64_t statc:1;
+ uint64_t active:1;
+ uint64_t preempted:1;
+ uint64_t preemptee:1;
+ uint64_t preempter:1;
+ uint64_t qid_offths:4;
+ uint64_t qid_offres:4;
+ uint64_t reserved_37_63:27;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug6_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t qid_offm:3;
+ uint64_t static_p:1;
+ uint64_t work_min:3;
+ uint64_t dwri_chk:1;
+ uint64_t dwri_uid:1;
+ uint64_t dwri_mod:2;
+#else
+ uint64_t dwri_mod:2;
+ uint64_t dwri_uid:1;
+ uint64_t dwri_chk:1;
+ uint64_t work_min:3;
+ uint64_t static_p:1;
+ uint64_t qid_offm:3;
+ uint64_t reserved_11_63:53;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug6_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t curr_ptr:11;
+#else
+ uint64_t curr_ptr:11;
+ uint64_t reserved_11_63:53;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug6_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63:27;
+ uint64_t qid_offres:4;
+ uint64_t qid_offths:4;
+ uint64_t preempter:1;
+ uint64_t preemptee:1;
+ uint64_t preempted:1;
+ uint64_t active:1;
+ uint64_t statc:1;
+ uint64_t qos:3;
+ uint64_t qcb_ridx:5;
+ uint64_t qid_offmax:4;
+ uint64_t qid_off:4;
+ uint64_t qid_base:8;
+#else
+ uint64_t qid_base:8;
+ uint64_t qid_off:4;
+ uint64_t qid_offmax:4;
+ uint64_t qcb_ridx:5;
+ uint64_t qos:3;
+ uint64_t statc:1;
+ uint64_t active:1;
+ uint64_t preempted:1;
+ uint64_t preemptee:1;
+ uint64_t preempter:1;
+ uint64_t qid_offths:4;
+ uint64_t qid_offres:4;
+ uint64_t reserved_37_63:27;
+#endif
+ } cn52xx;
+};
+
+union cvmx_pko_mem_debug7 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug7_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug7_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63:6;
+ uint64_t dwb:9;
+ uint64_t start:33;
+ uint64_t size:16;
+#else
+ uint64_t size:16;
+ uint64_t start:33;
+ uint64_t dwb:9;
+ uint64_t reserved_58_63:6;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug7_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t qos:5;
+ uint64_t tail:1;
+ uint64_t buf_siz:13;
+ uint64_t buf_ptr:33;
+ uint64_t qcb_widx:6;
+ uint64_t qcb_ridx:6;
+#else
+ uint64_t qcb_ridx:6;
+ uint64_t qcb_widx:6;
+ uint64_t buf_ptr:33;
+ uint64_t buf_siz:13;
+ uint64_t tail:1;
+ uint64_t qos:5;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug7_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t qos:3;
+ uint64_t tail:1;
+ uint64_t buf_siz:13;
+ uint64_t buf_ptr:33;
+ uint64_t qcb_widx:7;
+ uint64_t qcb_ridx:7;
+#else
+ uint64_t qcb_ridx:7;
+ uint64_t qcb_widx:7;
+ uint64_t buf_ptr:33;
+ uint64_t buf_siz:13;
+ uint64_t tail:1;
+ uint64_t qos:3;
+#endif
+ } cn68xx;
+};
+
+union cvmx_pko_mem_debug8 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug8_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63:5;
+ uint64_t tail:1;
+ uint64_t buf_siz:13;
+ uint64_t reserved_0_44:45;
+#else
+ uint64_t reserved_0_44:45;
+ uint64_t buf_siz:13;
+ uint64_t tail:1;
+ uint64_t reserved_59_63:5;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug8_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t qos:5;
+ uint64_t tail:1;
+ uint64_t buf_siz:13;
+ uint64_t buf_ptr:33;
+ uint64_t qcb_widx:6;
+ uint64_t qcb_ridx:6;
+#else
+ uint64_t qcb_ridx:6;
+ uint64_t qcb_widx:6;
+ uint64_t buf_ptr:33;
+ uint64_t buf_siz:13;
+ uint64_t tail:1;
+ uint64_t qos:5;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug8_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t doorbell:20;
+ uint64_t reserved_6_7:2;
+ uint64_t static_p:1;
+ uint64_t s_tail:1;
+ uint64_t static_q:1;
+ uint64_t qos:3;
+#else
+ uint64_t qos:3;
+ uint64_t static_q:1;
+ uint64_t s_tail:1;
+ uint64_t static_p:1;
+ uint64_t reserved_6_7:2;
+ uint64_t doorbell:20;
+ uint64_t reserved_28_63:36;
+#endif
+ } cn50xx;
+ struct cvmx_pko_mem_debug8_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t preempter:1;
+ uint64_t doorbell:20;
+ uint64_t reserved_7_7:1;
+ uint64_t preemptee:1;
+ uint64_t static_p:1;
+ uint64_t s_tail:1;
+ uint64_t static_q:1;
+ uint64_t qos:3;
+#else
+ uint64_t qos:3;
+ uint64_t static_q:1;
+ uint64_t s_tail:1;
+ uint64_t static_p:1;
+ uint64_t preemptee:1;
+ uint64_t reserved_7_7:1;
+ uint64_t doorbell:20;
+ uint64_t preempter:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn52xx;
+ struct cvmx_pko_mem_debug8_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63:22;
+ uint64_t qid_qqos:8;
+ uint64_t reserved_33_33:1;
+ uint64_t qid_idx:4;
+ uint64_t preempter:1;
+ uint64_t doorbell:20;
+ uint64_t reserved_7_7:1;
+ uint64_t preemptee:1;
+ uint64_t static_p:1;
+ uint64_t s_tail:1;
+ uint64_t static_q:1;
+ uint64_t qos:3;
+#else
+ uint64_t qos:3;
+ uint64_t static_q:1;
+ uint64_t s_tail:1;
+ uint64_t static_p:1;
+ uint64_t preemptee:1;
+ uint64_t reserved_7_7:1;
+ uint64_t doorbell:20;
+ uint64_t preempter:1;
+ uint64_t qid_idx:4;
+ uint64_t reserved_33_33:1;
+ uint64_t qid_qqos:8;
+ uint64_t reserved_42_63:22;
+#endif
+ } cn61xx;
+ struct cvmx_pko_mem_debug8_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63:27;
+ uint64_t preempter:1;
+ uint64_t doorbell:20;
+ uint64_t reserved_9_15:7;
+ uint64_t preemptee:1;
+ uint64_t static_p:1;
+ uint64_t s_tail:1;
+ uint64_t static_q:1;
+ uint64_t qos:5;
+#else
+ uint64_t qos:5;
+ uint64_t static_q:1;
+ uint64_t s_tail:1;
+ uint64_t static_p:1;
+ uint64_t preemptee:1;
+ uint64_t reserved_9_15:7;
+ uint64_t doorbell:20;
+ uint64_t preempter:1;
+ uint64_t reserved_37_63:27;
+#endif
+ } cn68xx;
+};
+
+union cvmx_pko_mem_debug9 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug9_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63:15;
+ uint64_t ptrs0:17;
+ uint64_t reserved_0_31:32;
+#else
+ uint64_t reserved_0_31:32;
+ uint64_t ptrs0:17;
+ uint64_t reserved_49_63:15;
+#endif
+ } s;
+ struct cvmx_pko_mem_debug9_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t doorbell:20;
+ uint64_t reserved_5_7:3;
+ uint64_t s_tail:1;
+ uint64_t static_q:1;
+ uint64_t qos:3;
+#else
+ uint64_t qos:3;
+ uint64_t static_q:1;
+ uint64_t s_tail:1;
+ uint64_t reserved_5_7:3;
+ uint64_t doorbell:20;
+ uint64_t reserved_28_63:36;
+#endif
+ } cn30xx;
+ struct cvmx_pko_mem_debug9_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t doorbell:20;
+ uint64_t reserved_6_7:2;
+ uint64_t static_p:1;
+ uint64_t s_tail:1;
+ uint64_t static_q:1;
+ uint64_t qos:3;
+#else
+ uint64_t qos:3;
+ uint64_t static_q:1;
+ uint64_t s_tail:1;
+ uint64_t static_p:1;
+ uint64_t reserved_6_7:2;
+ uint64_t doorbell:20;
+ uint64_t reserved_28_63:36;
+#endif
+ } cn38xx;
+ struct cvmx_pko_mem_debug9_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_49_63:15;
+ uint64_t ptrs0:17;
+ uint64_t reserved_17_31:15;
+ uint64_t ptrs3:17;
+#else
+ uint64_t ptrs3:17;
+ uint64_t reserved_17_31:15;
+ uint64_t ptrs0:17;
+ uint64_t reserved_49_63:15;
+#endif
+ } cn50xx;
+};
+
+union cvmx_pko_mem_iport_ptrs {
+ uint64_t u64;
+ struct cvmx_pko_mem_iport_ptrs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_63_63:1;
+ uint64_t crc:1;
+ uint64_t static_p:1;
+ uint64_t qos_mask:8;
+ uint64_t min_pkt:3;
+ uint64_t reserved_31_49:19;
+ uint64_t pipe:7;
+ uint64_t reserved_21_23:3;
+ uint64_t intr:5;
+ uint64_t reserved_13_15:3;
+ uint64_t eid:5;
+ uint64_t reserved_7_7:1;
+ uint64_t ipid:7;
+#else
+ uint64_t ipid:7;
+ uint64_t reserved_7_7:1;
+ uint64_t eid:5;
+ uint64_t reserved_13_15:3;
+ uint64_t intr:5;
+ uint64_t reserved_21_23:3;
+ uint64_t pipe:7;
+ uint64_t reserved_31_49:19;
+ uint64_t min_pkt:3;
+ uint64_t qos_mask:8;
+ uint64_t static_p:1;
+ uint64_t crc:1;
+ uint64_t reserved_63_63:1;
+#endif
+ } s;
+};
+
+union cvmx_pko_mem_iport_qos {
+ uint64_t u64;
+ struct cvmx_pko_mem_iport_qos_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63:3;
+ uint64_t qos_mask:8;
+ uint64_t reserved_13_52:40;
+ uint64_t eid:5;
+ uint64_t reserved_7_7:1;
+ uint64_t ipid:7;
+#else
+ uint64_t ipid:7;
+ uint64_t reserved_7_7:1;
+ uint64_t eid:5;
+ uint64_t reserved_13_52:40;
+ uint64_t qos_mask:8;
+ uint64_t reserved_61_63:3;
+#endif
+ } s;
+};
+
+union cvmx_pko_mem_iqueue_ptrs {
+ uint64_t u64;
+ struct cvmx_pko_mem_iqueue_ptrs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t s_tail:1;
+ uint64_t static_p:1;
+ uint64_t static_q:1;
+ uint64_t qos_mask:8;
+ uint64_t buf_ptr:31;
+ uint64_t tail:1;
+ uint64_t index:5;
+ uint64_t reserved_15_15:1;
+ uint64_t ipid:7;
+ uint64_t qid:8;
+#else
+ uint64_t qid:8;
+ uint64_t ipid:7;
+ uint64_t reserved_15_15:1;
+ uint64_t index:5;
+ uint64_t tail:1;
+ uint64_t buf_ptr:31;
+ uint64_t qos_mask:8;
+ uint64_t static_q:1;
+ uint64_t static_p:1;
+ uint64_t s_tail:1;
+#endif
+ } s;
+};
+
+union cvmx_pko_mem_iqueue_qos {
+ uint64_t u64;
+ struct cvmx_pko_mem_iqueue_qos_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63:3;
+ uint64_t qos_mask:8;
+ uint64_t reserved_15_52:38;
+ uint64_t ipid:7;
+ uint64_t qid:8;
+#else
+ uint64_t qid:8;
+ uint64_t ipid:7;
+ uint64_t reserved_15_52:38;
+ uint64_t qos_mask:8;
+ uint64_t reserved_61_63:3;
+#endif
+ } s;
+};
+
+union cvmx_pko_mem_port_ptrs {
+ uint64_t u64;
+ struct cvmx_pko_mem_port_ptrs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ uint64_t static_p:1;
+ uint64_t qos_mask:8;
+ uint64_t reserved_16_52:37;
+ uint64_t bp_port:6;
+ uint64_t eid:4;
+ uint64_t pid:6;
+#else
+ uint64_t pid:6;
+ uint64_t eid:4;
+ uint64_t bp_port:6;
+ uint64_t reserved_16_52:37;
+ uint64_t qos_mask:8;
+ uint64_t static_p:1;
+ uint64_t reserved_62_63:2;
+#endif
+ } s;
+};
+
+union cvmx_pko_mem_port_qos {
+ uint64_t u64;
+ struct cvmx_pko_mem_port_qos_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63:3;
+ uint64_t qos_mask:8;
+ uint64_t reserved_10_52:43;
+ uint64_t eid:4;
+ uint64_t pid:6;
+#else
+ uint64_t pid:6;
+ uint64_t eid:4;
+ uint64_t reserved_10_52:43;
+ uint64_t qos_mask:8;
+ uint64_t reserved_61_63:3;
+#endif
+ } s;
+};
+
+union cvmx_pko_mem_port_rate0 {
+ uint64_t u64;
+ struct cvmx_pko_mem_port_rate0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_51_63:13;
+ uint64_t rate_word:19;
+ uint64_t rate_pkt:24;
+ uint64_t reserved_7_7:1;
+ uint64_t pid:7;
+#else
+ uint64_t pid:7;
+ uint64_t reserved_7_7:1;
+ uint64_t rate_pkt:24;
+ uint64_t rate_word:19;
+ uint64_t reserved_51_63:13;
+#endif
+ } s;
+ struct cvmx_pko_mem_port_rate0_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_51_63:13;
+ uint64_t rate_word:19;
+ uint64_t rate_pkt:24;
+ uint64_t reserved_6_7:2;
+ uint64_t pid:6;
+#else
+ uint64_t pid:6;
+ uint64_t reserved_6_7:2;
+ uint64_t rate_pkt:24;
+ uint64_t rate_word:19;
+ uint64_t reserved_51_63:13;
+#endif
+ } cn52xx;
+};
+
+union cvmx_pko_mem_port_rate1 {
+ uint64_t u64;
+ struct cvmx_pko_mem_port_rate1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t rate_lim:24;
+ uint64_t reserved_7_7:1;
+ uint64_t pid:7;
+#else
+ uint64_t pid:7;
+ uint64_t reserved_7_7:1;
+ uint64_t rate_lim:24;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_pko_mem_port_rate1_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t rate_lim:24;
+ uint64_t reserved_6_7:2;
+ uint64_t pid:6;
+#else
+ uint64_t pid:6;
+ uint64_t reserved_6_7:2;
+ uint64_t rate_lim:24;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn52xx;
+};
+
+union cvmx_pko_mem_queue_ptrs {
+ uint64_t u64;
+ struct cvmx_pko_mem_queue_ptrs_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t s_tail:1;
+ uint64_t static_p:1;
+ uint64_t static_q:1;
+ uint64_t qos_mask:8;
+ uint64_t buf_ptr:36;
+ uint64_t tail:1;
+ uint64_t index:3;
+ uint64_t port:6;
+ uint64_t queue:7;
+#else
+ uint64_t queue:7;
+ uint64_t port:6;
+ uint64_t index:3;
+ uint64_t tail:1;
+ uint64_t buf_ptr:36;
+ uint64_t qos_mask:8;
+ uint64_t static_q:1;
+ uint64_t static_p:1;
+ uint64_t s_tail:1;
+#endif
+ } s;
+};
+
+union cvmx_pko_mem_queue_qos {
+ uint64_t u64;
+ struct cvmx_pko_mem_queue_qos_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63:3;
+ uint64_t qos_mask:8;
+ uint64_t reserved_13_52:40;
+ uint64_t pid:6;
+ uint64_t qid:7;
+#else
+ uint64_t qid:7;
+ uint64_t pid:6;
+ uint64_t reserved_13_52:40;
+ uint64_t qos_mask:8;
+ uint64_t reserved_61_63:3;
+#endif
+ } s;
+};
+
+union cvmx_pko_mem_throttle_int {
+ uint64_t u64;
+ struct cvmx_pko_mem_throttle_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63:17;
+ uint64_t word:15;
+ uint64_t reserved_14_31:18;
+ uint64_t packet:6;
+ uint64_t reserved_5_7:3;
+ uint64_t intr:5;
+#else
+ uint64_t intr:5;
+ uint64_t reserved_5_7:3;
+ uint64_t packet:6;
+ uint64_t reserved_14_31:18;
+ uint64_t word:15;
+ uint64_t reserved_47_63:17;
+#endif
+ } s;
+};
+
+union cvmx_pko_mem_throttle_pipe {
+ uint64_t u64;
+ struct cvmx_pko_mem_throttle_pipe_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63:17;
+ uint64_t word:15;
+ uint64_t reserved_14_31:18;
+ uint64_t packet:6;
+ uint64_t reserved_7_7:1;
+ uint64_t pipe:7;
+#else
+ uint64_t pipe:7;
+ uint64_t reserved_7_7:1;
+ uint64_t packet:6;
+ uint64_t reserved_14_31:18;
+ uint64_t word:15;
+ uint64_t reserved_47_63:17;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_bist_result {
+ uint64_t u64;
+ struct cvmx_pko_reg_bist_result_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_pko_reg_bist_result_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63:37;
+ uint64_t psb2:5;
+ uint64_t count:1;
+ uint64_t rif:1;
+ uint64_t wif:1;
+ uint64_t ncb:1;
+ uint64_t out:1;
+ uint64_t crc:1;
+ uint64_t chk:1;
+ uint64_t qsb:2;
+ uint64_t qcb:2;
+ uint64_t pdb:4;
+ uint64_t psb:7;
+#else
+ uint64_t psb:7;
+ uint64_t pdb:4;
+ uint64_t qcb:2;
+ uint64_t qsb:2;
+ uint64_t chk:1;
+ uint64_t crc:1;
+ uint64_t out:1;
+ uint64_t ncb:1;
+ uint64_t wif:1;
+ uint64_t rif:1;
+ uint64_t count:1;
+ uint64_t psb2:5;
+ uint64_t reserved_27_63:37;
+#endif
+ } cn30xx;
+ struct cvmx_pko_reg_bist_result_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63:31;
+ uint64_t csr:1;
+ uint64_t iob:1;
+ uint64_t out_crc:1;
+ uint64_t out_ctl:3;
+ uint64_t out_sta:1;
+ uint64_t out_wif:1;
+ uint64_t prt_chk:3;
+ uint64_t prt_nxt:1;
+ uint64_t prt_psb:6;
+ uint64_t ncb_inb:2;
+ uint64_t prt_qcb:2;
+ uint64_t prt_qsb:3;
+ uint64_t dat_dat:4;
+ uint64_t dat_ptr:4;
+#else
+ uint64_t dat_ptr:4;
+ uint64_t dat_dat:4;
+ uint64_t prt_qsb:3;
+ uint64_t prt_qcb:2;
+ uint64_t ncb_inb:2;
+ uint64_t prt_psb:6;
+ uint64_t prt_nxt:1;
+ uint64_t prt_chk:3;
+ uint64_t out_wif:1;
+ uint64_t out_sta:1;
+ uint64_t out_ctl:3;
+ uint64_t out_crc:1;
+ uint64_t iob:1;
+ uint64_t csr:1;
+ uint64_t reserved_33_63:31;
+#endif
+ } cn50xx;
+ struct cvmx_pko_reg_bist_result_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_35_63:29;
+ uint64_t csr:1;
+ uint64_t iob:1;
+ uint64_t out_dat:1;
+ uint64_t out_ctl:3;
+ uint64_t out_sta:1;
+ uint64_t out_wif:1;
+ uint64_t prt_chk:3;
+ uint64_t prt_nxt:1;
+ uint64_t prt_psb:8;
+ uint64_t ncb_inb:2;
+ uint64_t prt_qcb:2;
+ uint64_t prt_qsb:3;
+ uint64_t prt_ctl:2;
+ uint64_t dat_dat:2;
+ uint64_t dat_ptr:4;
+#else
+ uint64_t dat_ptr:4;
+ uint64_t dat_dat:2;
+ uint64_t prt_ctl:2;
+ uint64_t prt_qsb:3;
+ uint64_t prt_qcb:2;
+ uint64_t ncb_inb:2;
+ uint64_t prt_psb:8;
+ uint64_t prt_nxt:1;
+ uint64_t prt_chk:3;
+ uint64_t out_wif:1;
+ uint64_t out_sta:1;
+ uint64_t out_ctl:3;
+ uint64_t out_dat:1;
+ uint64_t iob:1;
+ uint64_t csr:1;
+ uint64_t reserved_35_63:29;
+#endif
+ } cn52xx;
+ struct cvmx_pko_reg_bist_result_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63:28;
+ uint64_t crc:1;
+ uint64_t csr:1;
+ uint64_t iob:1;
+ uint64_t out_dat:1;
+ uint64_t reserved_31_31:1;
+ uint64_t out_ctl:2;
+ uint64_t out_sta:1;
+ uint64_t out_wif:1;
+ uint64_t prt_chk:3;
+ uint64_t prt_nxt:1;
+ uint64_t prt_psb7:1;
+ uint64_t reserved_21_21:1;
+ uint64_t prt_psb:6;
+ uint64_t ncb_inb:2;
+ uint64_t prt_qcb:2;
+ uint64_t prt_qsb:3;
+ uint64_t prt_ctl:2;
+ uint64_t dat_dat:2;
+ uint64_t dat_ptr:4;
+#else
+ uint64_t dat_ptr:4;
+ uint64_t dat_dat:2;
+ uint64_t prt_ctl:2;
+ uint64_t prt_qsb:3;
+ uint64_t prt_qcb:2;
+ uint64_t ncb_inb:2;
+ uint64_t prt_psb:6;
+ uint64_t reserved_21_21:1;
+ uint64_t prt_psb7:1;
+ uint64_t prt_nxt:1;
+ uint64_t prt_chk:3;
+ uint64_t out_wif:1;
+ uint64_t out_sta:1;
+ uint64_t out_ctl:2;
+ uint64_t reserved_31_31:1;
+ uint64_t out_dat:1;
+ uint64_t iob:1;
+ uint64_t csr:1;
+ uint64_t crc:1;
+ uint64_t reserved_36_63:28;
+#endif
+ } cn68xx;
+ struct cvmx_pko_reg_bist_result_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_35_63:29;
+ uint64_t csr:1;
+ uint64_t iob:1;
+ uint64_t out_dat:1;
+ uint64_t reserved_31_31:1;
+ uint64_t out_ctl:2;
+ uint64_t out_sta:1;
+ uint64_t out_wif:1;
+ uint64_t prt_chk:3;
+ uint64_t prt_nxt:1;
+ uint64_t prt_psb7:1;
+ uint64_t reserved_21_21:1;
+ uint64_t prt_psb:6;
+ uint64_t ncb_inb:2;
+ uint64_t prt_qcb:2;
+ uint64_t prt_qsb:3;
+ uint64_t prt_ctl:2;
+ uint64_t dat_dat:2;
+ uint64_t dat_ptr:4;
+#else
+ uint64_t dat_ptr:4;
+ uint64_t dat_dat:2;
+ uint64_t prt_ctl:2;
+ uint64_t prt_qsb:3;
+ uint64_t prt_qcb:2;
+ uint64_t ncb_inb:2;
+ uint64_t prt_psb:6;
+ uint64_t reserved_21_21:1;
+ uint64_t prt_psb7:1;
+ uint64_t prt_nxt:1;
+ uint64_t prt_chk:3;
+ uint64_t out_wif:1;
+ uint64_t out_sta:1;
+ uint64_t out_ctl:2;
+ uint64_t reserved_31_31:1;
+ uint64_t out_dat:1;
+ uint64_t iob:1;
+ uint64_t csr:1;
+ uint64_t reserved_35_63:29;
+#endif
+ } cn68xxp1;
+};
+
+union cvmx_pko_reg_cmd_buf {
+ uint64_t u64;
+ struct cvmx_pko_reg_cmd_buf_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_23_63:41;
+ uint64_t pool:3;
+ uint64_t reserved_13_19:7;
+ uint64_t size:13;
+#else
+ uint64_t size:13;
+ uint64_t reserved_13_19:7;
+ uint64_t pool:3;
+ uint64_t reserved_23_63:41;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_crc_ctlx {
+ uint64_t u64;
+ struct cvmx_pko_reg_crc_ctlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t invres:1;
+ uint64_t refin:1;
+#else
+ uint64_t refin:1;
+ uint64_t invres:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_crc_enable {
+ uint64_t u64;
+ struct cvmx_pko_reg_crc_enable_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t enable:32;
+#else
+ uint64_t enable:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_crc_ivx {
+ uint64_t u64;
+ struct cvmx_pko_reg_crc_ivx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t iv:32;
+#else
+ uint64_t iv:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_debug0 {
+ uint64_t u64;
+ struct cvmx_pko_reg_debug0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t asserts:64;
+#else
+ uint64_t asserts:64;
+#endif
+ } s;
+ struct cvmx_pko_reg_debug0_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t asserts:17;
+#else
+ uint64_t asserts:17;
+ uint64_t reserved_17_63:47;
+#endif
+ } cn30xx;
+};
+
+union cvmx_pko_reg_debug1 {
+ uint64_t u64;
+ struct cvmx_pko_reg_debug1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t asserts:64;
+#else
+ uint64_t asserts:64;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_debug2 {
+ uint64_t u64;
+ struct cvmx_pko_reg_debug2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t asserts:64;
+#else
+ uint64_t asserts:64;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_debug3 {
+ uint64_t u64;
+ struct cvmx_pko_reg_debug3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t asserts:64;
+#else
+ uint64_t asserts:64;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_debug4 {
+ uint64_t u64;
+ struct cvmx_pko_reg_debug4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t asserts:64;
+#else
+ uint64_t asserts:64;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_engine_inflight {
+ uint64_t u64;
+ struct cvmx_pko_reg_engine_inflight_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t engine15:4;
+ uint64_t engine14:4;
+ uint64_t engine13:4;
+ uint64_t engine12:4;
+ uint64_t engine11:4;
+ uint64_t engine10:4;
+ uint64_t engine9:4;
+ uint64_t engine8:4;
+ uint64_t engine7:4;
+ uint64_t engine6:4;
+ uint64_t engine5:4;
+ uint64_t engine4:4;
+ uint64_t engine3:4;
+ uint64_t engine2:4;
+ uint64_t engine1:4;
+ uint64_t engine0:4;
+#else
+ uint64_t engine0:4;
+ uint64_t engine1:4;
+ uint64_t engine2:4;
+ uint64_t engine3:4;
+ uint64_t engine4:4;
+ uint64_t engine5:4;
+ uint64_t engine6:4;
+ uint64_t engine7:4;
+ uint64_t engine8:4;
+ uint64_t engine9:4;
+ uint64_t engine10:4;
+ uint64_t engine11:4;
+ uint64_t engine12:4;
+ uint64_t engine13:4;
+ uint64_t engine14:4;
+ uint64_t engine15:4;
+#endif
+ } s;
+ struct cvmx_pko_reg_engine_inflight_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t engine9:4;
+ uint64_t engine8:4;
+ uint64_t engine7:4;
+ uint64_t engine6:4;
+ uint64_t engine5:4;
+ uint64_t engine4:4;
+ uint64_t engine3:4;
+ uint64_t engine2:4;
+ uint64_t engine1:4;
+ uint64_t engine0:4;
+#else
+ uint64_t engine0:4;
+ uint64_t engine1:4;
+ uint64_t engine2:4;
+ uint64_t engine3:4;
+ uint64_t engine4:4;
+ uint64_t engine5:4;
+ uint64_t engine6:4;
+ uint64_t engine7:4;
+ uint64_t engine8:4;
+ uint64_t engine9:4;
+ uint64_t reserved_40_63:24;
+#endif
+ } cn52xx;
+ struct cvmx_pko_reg_engine_inflight_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63:8;
+ uint64_t engine13:4;
+ uint64_t engine12:4;
+ uint64_t engine11:4;
+ uint64_t engine10:4;
+ uint64_t engine9:4;
+ uint64_t engine8:4;
+ uint64_t engine7:4;
+ uint64_t engine6:4;
+ uint64_t engine5:4;
+ uint64_t engine4:4;
+ uint64_t engine3:4;
+ uint64_t engine2:4;
+ uint64_t engine1:4;
+ uint64_t engine0:4;
+#else
+ uint64_t engine0:4;
+ uint64_t engine1:4;
+ uint64_t engine2:4;
+ uint64_t engine3:4;
+ uint64_t engine4:4;
+ uint64_t engine5:4;
+ uint64_t engine6:4;
+ uint64_t engine7:4;
+ uint64_t engine8:4;
+ uint64_t engine9:4;
+ uint64_t engine10:4;
+ uint64_t engine11:4;
+ uint64_t engine12:4;
+ uint64_t engine13:4;
+ uint64_t reserved_56_63:8;
+#endif
+ } cn61xx;
+ struct cvmx_pko_reg_engine_inflight_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t engine11:4;
+ uint64_t engine10:4;
+ uint64_t engine9:4;
+ uint64_t engine8:4;
+ uint64_t engine7:4;
+ uint64_t engine6:4;
+ uint64_t engine5:4;
+ uint64_t engine4:4;
+ uint64_t engine3:4;
+ uint64_t engine2:4;
+ uint64_t engine1:4;
+ uint64_t engine0:4;
+#else
+ uint64_t engine0:4;
+ uint64_t engine1:4;
+ uint64_t engine2:4;
+ uint64_t engine3:4;
+ uint64_t engine4:4;
+ uint64_t engine5:4;
+ uint64_t engine6:4;
+ uint64_t engine7:4;
+ uint64_t engine8:4;
+ uint64_t engine9:4;
+ uint64_t engine10:4;
+ uint64_t engine11:4;
+ uint64_t reserved_48_63:16;
+#endif
+ } cn63xx;
+};
+
+union cvmx_pko_reg_engine_inflight1 {
+ uint64_t u64;
+ struct cvmx_pko_reg_engine_inflight1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t engine19:4;
+ uint64_t engine18:4;
+ uint64_t engine17:4;
+ uint64_t engine16:4;
+#else
+ uint64_t engine16:4;
+ uint64_t engine17:4;
+ uint64_t engine18:4;
+ uint64_t engine19:4;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_engine_storagex {
+ uint64_t u64;
+ struct cvmx_pko_reg_engine_storagex_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t engine15:4;
+ uint64_t engine14:4;
+ uint64_t engine13:4;
+ uint64_t engine12:4;
+ uint64_t engine11:4;
+ uint64_t engine10:4;
+ uint64_t engine9:4;
+ uint64_t engine8:4;
+ uint64_t engine7:4;
+ uint64_t engine6:4;
+ uint64_t engine5:4;
+ uint64_t engine4:4;
+ uint64_t engine3:4;
+ uint64_t engine2:4;
+ uint64_t engine1:4;
+ uint64_t engine0:4;
+#else
+ uint64_t engine0:4;
+ uint64_t engine1:4;
+ uint64_t engine2:4;
+ uint64_t engine3:4;
+ uint64_t engine4:4;
+ uint64_t engine5:4;
+ uint64_t engine6:4;
+ uint64_t engine7:4;
+ uint64_t engine8:4;
+ uint64_t engine9:4;
+ uint64_t engine10:4;
+ uint64_t engine11:4;
+ uint64_t engine12:4;
+ uint64_t engine13:4;
+ uint64_t engine14:4;
+ uint64_t engine15:4;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_engine_thresh {
+ uint64_t u64;
+ struct cvmx_pko_reg_engine_thresh_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t mask:20;
+#else
+ uint64_t mask:20;
+ uint64_t reserved_20_63:44;
+#endif
+ } s;
+ struct cvmx_pko_reg_engine_thresh_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t mask:10;
+#else
+ uint64_t mask:10;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn52xx;
+ struct cvmx_pko_reg_engine_thresh_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t mask:14;
+#else
+ uint64_t mask:14;
+ uint64_t reserved_14_63:50;
+#endif
+ } cn61xx;
+ struct cvmx_pko_reg_engine_thresh_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t mask:12;
+#else
+ uint64_t mask:12;
+ uint64_t reserved_12_63:52;
+#endif
+ } cn63xx;
+};
+
+union cvmx_pko_reg_error {
+ uint64_t u64;
+ struct cvmx_pko_reg_error_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t loopback:1;
+ uint64_t currzero:1;
+ uint64_t doorbell:1;
+ uint64_t parity:1;
+#else
+ uint64_t parity:1;
+ uint64_t doorbell:1;
+ uint64_t currzero:1;
+ uint64_t loopback:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+ struct cvmx_pko_reg_error_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t doorbell:1;
+ uint64_t parity:1;
+#else
+ uint64_t parity:1;
+ uint64_t doorbell:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } cn30xx;
+ struct cvmx_pko_reg_error_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t currzero:1;
+ uint64_t doorbell:1;
+ uint64_t parity:1;
+#else
+ uint64_t parity:1;
+ uint64_t doorbell:1;
+ uint64_t currzero:1;
+ uint64_t reserved_3_63:61;
+#endif
+ } cn50xx;
+};
+
+union cvmx_pko_reg_flags {
+ uint64_t u64;
+ struct cvmx_pko_reg_flags_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t dis_perf3:1;
+ uint64_t dis_perf2:1;
+ uint64_t dis_perf1:1;
+ uint64_t dis_perf0:1;
+ uint64_t ena_throttle:1;
+ uint64_t reset:1;
+ uint64_t store_be:1;
+ uint64_t ena_dwb:1;
+ uint64_t ena_pko:1;
+#else
+ uint64_t ena_pko:1;
+ uint64_t ena_dwb:1;
+ uint64_t store_be:1;
+ uint64_t reset:1;
+ uint64_t ena_throttle:1;
+ uint64_t dis_perf0:1;
+ uint64_t dis_perf1:1;
+ uint64_t dis_perf2:1;
+ uint64_t dis_perf3:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } s;
+ struct cvmx_pko_reg_flags_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t reset:1;
+ uint64_t store_be:1;
+ uint64_t ena_dwb:1;
+ uint64_t ena_pko:1;
+#else
+ uint64_t ena_pko:1;
+ uint64_t ena_dwb:1;
+ uint64_t store_be:1;
+ uint64_t reset:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } cn30xx;
+ struct cvmx_pko_reg_flags_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t dis_perf3:1;
+ uint64_t dis_perf2:1;
+ uint64_t reserved_4_6:3;
+ uint64_t reset:1;
+ uint64_t store_be:1;
+ uint64_t ena_dwb:1;
+ uint64_t ena_pko:1;
+#else
+ uint64_t ena_pko:1;
+ uint64_t ena_dwb:1;
+ uint64_t store_be:1;
+ uint64_t reset:1;
+ uint64_t reserved_4_6:3;
+ uint64_t dis_perf2:1;
+ uint64_t dis_perf3:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn61xx;
+ struct cvmx_pko_reg_flags_cn68xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t dis_perf1:1;
+ uint64_t dis_perf0:1;
+ uint64_t ena_throttle:1;
+ uint64_t reset:1;
+ uint64_t store_be:1;
+ uint64_t ena_dwb:1;
+ uint64_t ena_pko:1;
+#else
+ uint64_t ena_pko:1;
+ uint64_t ena_dwb:1;
+ uint64_t store_be:1;
+ uint64_t reset:1;
+ uint64_t ena_throttle:1;
+ uint64_t dis_perf0:1;
+ uint64_t dis_perf1:1;
+ uint64_t reserved_7_63:57;
+#endif
+ } cn68xxp1;
+};
+
+union cvmx_pko_reg_gmx_port_mode {
+ uint64_t u64;
+ struct cvmx_pko_reg_gmx_port_mode_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t mode1:3;
+ uint64_t mode0:3;
+#else
+ uint64_t mode0:3;
+ uint64_t mode1:3;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_int_mask {
+ uint64_t u64;
+ struct cvmx_pko_reg_int_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t loopback:1;
+ uint64_t currzero:1;
+ uint64_t doorbell:1;
+ uint64_t parity:1;
+#else
+ uint64_t parity:1;
+ uint64_t doorbell:1;
+ uint64_t currzero:1;
+ uint64_t loopback:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+ struct cvmx_pko_reg_int_mask_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t doorbell:1;
+ uint64_t parity:1;
+#else
+ uint64_t parity:1;
+ uint64_t doorbell:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } cn30xx;
+ struct cvmx_pko_reg_int_mask_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t currzero:1;
+ uint64_t doorbell:1;
+ uint64_t parity:1;
+#else
+ uint64_t parity:1;
+ uint64_t doorbell:1;
+ uint64_t currzero:1;
+ uint64_t reserved_3_63:61;
+#endif
+ } cn50xx;
+};
+
+union cvmx_pko_reg_loopback_bpid {
+ uint64_t u64;
+ struct cvmx_pko_reg_loopback_bpid_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63:5;
+ uint64_t bpid7:6;
+ uint64_t reserved_52_52:1;
+ uint64_t bpid6:6;
+ uint64_t reserved_45_45:1;
+ uint64_t bpid5:6;
+ uint64_t reserved_38_38:1;
+ uint64_t bpid4:6;
+ uint64_t reserved_31_31:1;
+ uint64_t bpid3:6;
+ uint64_t reserved_24_24:1;
+ uint64_t bpid2:6;
+ uint64_t reserved_17_17:1;
+ uint64_t bpid1:6;
+ uint64_t reserved_10_10:1;
+ uint64_t bpid0:6;
+ uint64_t reserved_0_3:4;
+#else
+ uint64_t reserved_0_3:4;
+ uint64_t bpid0:6;
+ uint64_t reserved_10_10:1;
+ uint64_t bpid1:6;
+ uint64_t reserved_17_17:1;
+ uint64_t bpid2:6;
+ uint64_t reserved_24_24:1;
+ uint64_t bpid3:6;
+ uint64_t reserved_31_31:1;
+ uint64_t bpid4:6;
+ uint64_t reserved_38_38:1;
+ uint64_t bpid5:6;
+ uint64_t reserved_45_45:1;
+ uint64_t bpid6:6;
+ uint64_t reserved_52_52:1;
+ uint64_t bpid7:6;
+ uint64_t reserved_59_63:5;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_loopback_pkind {
+ uint64_t u64;
+ struct cvmx_pko_reg_loopback_pkind_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63:5;
+ uint64_t pkind7:6;
+ uint64_t reserved_52_52:1;
+ uint64_t pkind6:6;
+ uint64_t reserved_45_45:1;
+ uint64_t pkind5:6;
+ uint64_t reserved_38_38:1;
+ uint64_t pkind4:6;
+ uint64_t reserved_31_31:1;
+ uint64_t pkind3:6;
+ uint64_t reserved_24_24:1;
+ uint64_t pkind2:6;
+ uint64_t reserved_17_17:1;
+ uint64_t pkind1:6;
+ uint64_t reserved_10_10:1;
+ uint64_t pkind0:6;
+ uint64_t num_ports:4;
+#else
+ uint64_t num_ports:4;
+ uint64_t pkind0:6;
+ uint64_t reserved_10_10:1;
+ uint64_t pkind1:6;
+ uint64_t reserved_17_17:1;
+ uint64_t pkind2:6;
+ uint64_t reserved_24_24:1;
+ uint64_t pkind3:6;
+ uint64_t reserved_31_31:1;
+ uint64_t pkind4:6;
+ uint64_t reserved_38_38:1;
+ uint64_t pkind5:6;
+ uint64_t reserved_45_45:1;
+ uint64_t pkind6:6;
+ uint64_t reserved_52_52:1;
+ uint64_t pkind7:6;
+ uint64_t reserved_59_63:5;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_min_pkt {
+ uint64_t u64;
+ struct cvmx_pko_reg_min_pkt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t size7:8;
+ uint64_t size6:8;
+ uint64_t size5:8;
+ uint64_t size4:8;
+ uint64_t size3:8;
+ uint64_t size2:8;
+ uint64_t size1:8;
+ uint64_t size0:8;
+#else
+ uint64_t size0:8;
+ uint64_t size1:8;
+ uint64_t size2:8;
+ uint64_t size3:8;
+ uint64_t size4:8;
+ uint64_t size5:8;
+ uint64_t size6:8;
+ uint64_t size7:8;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_preempt {
+ uint64_t u64;
+ struct cvmx_pko_reg_preempt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t min_size:16;
+#else
+ uint64_t min_size:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_queue_mode {
+ uint64_t u64;
+ struct cvmx_pko_reg_queue_mode_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t mode:2;
+#else
+ uint64_t mode:2;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_queue_preempt {
+ uint64_t u64;
+ struct cvmx_pko_reg_queue_preempt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t preemptee:1;
+ uint64_t preempter:1;
+#else
+ uint64_t preempter:1;
+ uint64_t preemptee:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_queue_ptrs1 {
+ uint64_t u64;
+ struct cvmx_pko_reg_queue_ptrs1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t idx3:1;
+ uint64_t qid7:1;
+#else
+ uint64_t qid7:1;
+ uint64_t idx3:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_read_idx {
+ uint64_t u64;
+ struct cvmx_pko_reg_read_idx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t inc:8;
+ uint64_t index:8;
+#else
+ uint64_t index:8;
+ uint64_t inc:8;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_throttle {
+ uint64_t u64;
+ struct cvmx_pko_reg_throttle_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t int_mask:32;
+#else
+ uint64_t int_mask:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pko_reg_timestamp {
+ uint64_t u64;
+ struct cvmx_pko_reg_timestamp_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t wqe_word:4;
+#else
+ uint64_t wqe_word:4;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pko.h b/arch/mips/include/asm/octeon/cvmx-pko.h
new file mode 100644
index 000000000..03fb64b13
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-pko.h
@@ -0,0 +1,643 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ *
+ * Interface to the hardware Packet Output unit.
+ *
+ * Starting with SDK 1.7.0, the PKO output functions now support
+ * two types of locking. CVMX_PKO_LOCK_ATOMIC_TAG continues to
+ * function similarly to previous SDKs by using POW atomic tags
+ * to preserve ordering and exclusivity. As a new option, you
+ * can now pass CVMX_PKO_LOCK_CMD_QUEUE which uses a ll/sc
+ * memory based locking instead. This locking has the advantage
+ * of not affecting the tag state but doesn't preserve packet
+ * ordering. CVMX_PKO_LOCK_CMD_QUEUE is appropriate in most
+ * generic code while CVMX_PKO_LOCK_CMD_QUEUE should be used
+ * with hand tuned fast path code.
+ *
+ * Some of other SDK differences visible to the command queuing:
+ * - PKO indexes are no longer stored in the FAU. A large
+ * percentage of the FAU register block used to be tied up
+ * maintaining PKO queue pointers. These are now stored in a
+ * global named block.
+ * - The PKO <b>use_locking</b> parameter can now have a global
+ * effect. Since all application use the same named block,
+ * queue locking correctly applies across all operating
+ * systems when using CVMX_PKO_LOCK_CMD_QUEUE.
+ * - PKO 3 word commands are now supported. Use
+ * cvmx_pko_send_packet_finish3().
+ *
+ */
+
+#ifndef __CVMX_PKO_H__
+#define __CVMX_PKO_H__
+
+#include <asm/octeon/cvmx-fpa.h>
+#include <asm/octeon/cvmx-pow.h>
+#include <asm/octeon/cvmx-cmd-queue.h>
+#include <asm/octeon/cvmx-pko-defs.h>
+
+/* Adjust the command buffer size by 1 word so that in the case of using only
+ * two word PKO commands no command words stradle buffers. The useful values
+ * for this are 0 and 1. */
+#define CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST (1)
+
+#define CVMX_PKO_MAX_OUTPUT_QUEUES_STATIC 256
+#define CVMX_PKO_MAX_OUTPUT_QUEUES ((OCTEON_IS_MODEL(OCTEON_CN31XX) || \
+ OCTEON_IS_MODEL(OCTEON_CN3010) || OCTEON_IS_MODEL(OCTEON_CN3005) || \
+ OCTEON_IS_MODEL(OCTEON_CN50XX)) ? 32 : \
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) || \
+ OCTEON_IS_MODEL(OCTEON_CN56XX)) ? 256 : 128)
+#define CVMX_PKO_NUM_OUTPUT_PORTS 40
+/* use this for queues that are not used */
+#define CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID 63
+#define CVMX_PKO_QUEUE_STATIC_PRIORITY 9
+#define CVMX_PKO_ILLEGAL_QUEUE 0xFFFF
+#define CVMX_PKO_MAX_QUEUE_DEPTH 0
+
+typedef enum {
+ CVMX_PKO_SUCCESS,
+ CVMX_PKO_INVALID_PORT,
+ CVMX_PKO_INVALID_QUEUE,
+ CVMX_PKO_INVALID_PRIORITY,
+ CVMX_PKO_NO_MEMORY,
+ CVMX_PKO_PORT_ALREADY_SETUP,
+ CVMX_PKO_CMD_QUEUE_INIT_ERROR
+} cvmx_pko_status_t;
+
+/**
+ * This enumeration represents the differnet locking modes supported by PKO.
+ */
+typedef enum {
+ /*
+ * PKO doesn't do any locking. It is the responsibility of the
+ * application to make sure that no other core is accessing
+ * the same queue at the same time
+ */
+ CVMX_PKO_LOCK_NONE = 0,
+ /*
+ * PKO performs an atomic tagswitch to insure exclusive access
+ * to the output queue. This will maintain packet ordering on
+ * output.
+ */
+ CVMX_PKO_LOCK_ATOMIC_TAG = 1,
+ /*
+ * PKO uses the common command queue locks to insure exclusive
+ * access to the output queue. This is a memory based
+ * ll/sc. This is the most portable locking mechanism.
+ */
+ CVMX_PKO_LOCK_CMD_QUEUE = 2,
+} cvmx_pko_lock_t;
+
+typedef struct {
+ uint32_t packets;
+ uint64_t octets;
+ uint64_t doorbell;
+} cvmx_pko_port_status_t;
+
+/**
+ * This structure defines the address to use on a packet enqueue
+ */
+typedef union {
+ uint64_t u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* Must CVMX_IO_SEG */
+ uint64_t mem_space:2;
+ /* Must be zero */
+ uint64_t reserved:13;
+ /* Must be one */
+ uint64_t is_io:1;
+ /* The ID of the device on the non-coherent bus */
+ uint64_t did:8;
+ /* Must be zero */
+ uint64_t reserved2:4;
+ /* Must be zero */
+ uint64_t reserved3:18;
+ /*
+ * The hardware likes to have the output port in
+ * addition to the output queue,
+ */
+ uint64_t port:6;
+ /*
+ * The output queue to send the packet to (0-127 are
+ * legal)
+ */
+ uint64_t queue:9;
+ /* Must be zero */
+ uint64_t reserved4:3;
+#else
+ uint64_t reserved4:3;
+ uint64_t queue:9;
+ uint64_t port:9;
+ uint64_t reserved3:15;
+ uint64_t reserved2:4;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t reserved:13;
+ uint64_t mem_space:2;
+#endif
+ } s;
+} cvmx_pko_doorbell_address_t;
+
+/**
+ * Structure of the first packet output command word.
+ */
+union cvmx_pko_command_word0 {
+ uint64_t u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /*
+ * The size of the reg1 operation - could be 8, 16,
+ * 32, or 64 bits.
+ */
+ uint64_t size1:2;
+ /*
+ * The size of the reg0 operation - could be 8, 16,
+ * 32, or 64 bits.
+ */
+ uint64_t size0:2;
+ /*
+ * If set, subtract 1, if clear, subtract packet
+ * size.
+ */
+ uint64_t subone1:1;
+ /*
+ * The register, subtract will be done if reg1 is
+ * non-zero.
+ */
+ uint64_t reg1:11;
+ /* If set, subtract 1, if clear, subtract packet size */
+ uint64_t subone0:1;
+ /* The register, subtract will be done if reg0 is non-zero */
+ uint64_t reg0:11;
+ /*
+ * When set, interpret segment pointer and segment
+ * bytes in little endian order.
+ */
+ uint64_t le:1;
+ /*
+ * When set, packet data not allocated in L2 cache by
+ * PKO.
+ */
+ uint64_t n2:1;
+ /*
+ * If set and rsp is set, word3 contains a pointer to
+ * a work queue entry.
+ */
+ uint64_t wqp:1;
+ /* If set, the hardware will send a response when done */
+ uint64_t rsp:1;
+ /*
+ * If set, the supplied pkt_ptr is really a pointer to
+ * a list of pkt_ptr's.
+ */
+ uint64_t gather:1;
+ /*
+ * If ipoffp1 is non zero, (ipoffp1-1) is the number
+ * of bytes to IP header, and the hardware will
+ * calculate and insert the UDP/TCP checksum.
+ */
+ uint64_t ipoffp1:7;
+ /*
+ * If set, ignore the I bit (force to zero) from all
+ * pointer structures.
+ */
+ uint64_t ignore_i:1;
+ /*
+ * If clear, the hardware will attempt to free the
+ * buffers containing the packet.
+ */
+ uint64_t dontfree:1;
+ /*
+ * The total number of segs in the packet, if gather
+ * set, also gather list length.
+ */
+ uint64_t segs:6;
+ /* Including L2, but no trailing CRC */
+ uint64_t total_bytes:16;
+#else
+ uint64_t total_bytes:16;
+ uint64_t segs:6;
+ uint64_t dontfree:1;
+ uint64_t ignore_i:1;
+ uint64_t ipoffp1:7;
+ uint64_t gather:1;
+ uint64_t rsp:1;
+ uint64_t wqp:1;
+ uint64_t n2:1;
+ uint64_t le:1;
+ uint64_t reg0:11;
+ uint64_t subone0:1;
+ uint64_t reg1:11;
+ uint64_t subone1:1;
+ uint64_t size0:2;
+ uint64_t size1:2;
+#endif
+ } s;
+};
+
+/* CSR typedefs have been moved to cvmx-csr-*.h */
+
+/**
+ * Definition of internal state for Packet output processing
+ */
+typedef struct {
+ /* ptr to start of buffer, offset kept in FAU reg */
+ uint64_t *start_ptr;
+} cvmx_pko_state_elem_t;
+
+/**
+ * Call before any other calls to initialize the packet
+ * output system.
+ */
+extern void cvmx_pko_initialize_global(void);
+extern int cvmx_pko_initialize_local(void);
+
+/**
+ * Enables the packet output hardware. It must already be
+ * configured.
+ */
+extern void cvmx_pko_enable(void);
+
+/**
+ * Disables the packet output. Does not affect any configuration.
+ */
+extern void cvmx_pko_disable(void);
+
+/**
+ * Shutdown and free resources required by packet output.
+ */
+
+extern void cvmx_pko_shutdown(void);
+
+/**
+ * Configure a output port and the associated queues for use.
+ *
+ * @port: Port to configure.
+ * @base_queue: First queue number to associate with this port.
+ * @num_queues: Number of queues t oassociate with this port
+ * @priority: Array of priority levels for each queue. Values are
+ * allowed to be 1-8. A value of 8 get 8 times the traffic
+ * of a value of 1. There must be num_queues elements in the
+ * array.
+ */
+extern cvmx_pko_status_t cvmx_pko_config_port(uint64_t port,
+ uint64_t base_queue,
+ uint64_t num_queues,
+ const uint64_t priority[]);
+
+/**
+ * Ring the packet output doorbell. This tells the packet
+ * output hardware that "len" command words have been added
+ * to its pending list. This command includes the required
+ * CVMX_SYNCWS before the doorbell ring.
+ *
+ * @port: Port the packet is for
+ * @queue: Queue the packet is for
+ * @len: Length of the command in 64 bit words
+ */
+static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue,
+ uint64_t len)
+{
+ cvmx_pko_doorbell_address_t ptr;
+
+ ptr.u64 = 0;
+ ptr.s.mem_space = CVMX_IO_SEG;
+ ptr.s.did = CVMX_OCT_DID_PKT_SEND;
+ ptr.s.is_io = 1;
+ ptr.s.port = port;
+ ptr.s.queue = queue;
+ /*
+ * Need to make sure output queue data is in DRAM before
+ * doorbell write.
+ */
+ CVMX_SYNCWS;
+ cvmx_write_io(ptr.u64, len);
+}
+
+/**
+ * Prepare to send a packet. This may initiate a tag switch to
+ * get exclusive access to the output queue structure, and
+ * performs other prep work for the packet send operation.
+ *
+ * cvmx_pko_send_packet_finish() MUST be called after this function is called,
+ * and must be called with the same port/queue/use_locking arguments.
+ *
+ * The use_locking parameter allows the caller to use three
+ * possible locking modes.
+ * - CVMX_PKO_LOCK_NONE
+ * - PKO doesn't do any locking. It is the responsibility
+ * of the application to make sure that no other core
+ * is accessing the same queue at the same time.
+ * - CVMX_PKO_LOCK_ATOMIC_TAG
+ * - PKO performs an atomic tagswitch to insure exclusive
+ * access to the output queue. This will maintain
+ * packet ordering on output.
+ * - CVMX_PKO_LOCK_CMD_QUEUE
+ * - PKO uses the common command queue locks to insure
+ * exclusive access to the output queue. This is a
+ * memory based ll/sc. This is the most portable
+ * locking mechanism.
+ *
+ * NOTE: If atomic locking is used, the POW entry CANNOT be
+ * descheduled, as it does not contain a valid WQE pointer.
+ *
+ * @port: Port to send it on
+ * @queue: Queue to use
+ * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
+ * CVMX_PKO_LOCK_CMD_QUEUE
+ */
+
+static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
+ cvmx_pko_lock_t use_locking)
+{
+ if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG) {
+ /*
+ * Must do a full switch here to handle all cases. We
+ * use a fake WQE pointer, as the POW does not access
+ * this memory. The WQE pointer and group are only
+ * used if this work is descheduled, which is not
+ * supported by the
+ * cvmx_pko_send_packet_prepare/cvmx_pko_send_packet_finish
+ * combination. Note that this is a special case in
+ * which these fake values can be used - this is not a
+ * general technique.
+ */
+ uint32_t tag =
+ CVMX_TAG_SW_BITS_INTERNAL << CVMX_TAG_SW_SHIFT |
+ CVMX_TAG_SUBGROUP_PKO << CVMX_TAG_SUBGROUP_SHIFT |
+ (CVMX_TAG_SUBGROUP_MASK & queue);
+ cvmx_pow_tag_sw_full((struct cvmx_wqe *) cvmx_phys_to_ptr(0x80), tag,
+ CVMX_POW_TAG_TYPE_ATOMIC, 0);
+ }
+}
+
+/**
+ * Complete packet output. cvmx_pko_send_packet_prepare() must be
+ * called exactly once before this, and the same parameters must be
+ * passed to both cvmx_pko_send_packet_prepare() and
+ * cvmx_pko_send_packet_finish().
+ *
+ * @port: Port to send it on
+ * @queue: Queue to use
+ * @pko_command:
+ * PKO HW command word
+ * @packet: Packet to send
+ * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
+ * CVMX_PKO_LOCK_CMD_QUEUE
+ *
+ * Returns: CVMX_PKO_SUCCESS on success, or error code on
+ * failure of output
+ */
+static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(
+ uint64_t port,
+ uint64_t queue,
+ union cvmx_pko_command_word0 pko_command,
+ union cvmx_buf_ptr packet,
+ cvmx_pko_lock_t use_locking)
+{
+ cvmx_cmd_queue_result_t result;
+ if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
+ cvmx_pow_tag_sw_wait();
+ result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),
+ (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
+ pko_command.u64, packet.u64);
+ if (likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
+ cvmx_pko_doorbell(port, queue, 2);
+ return CVMX_PKO_SUCCESS;
+ } else if ((result == CVMX_CMD_QUEUE_NO_MEMORY)
+ || (result == CVMX_CMD_QUEUE_FULL)) {
+ return CVMX_PKO_NO_MEMORY;
+ } else {
+ return CVMX_PKO_INVALID_QUEUE;
+ }
+}
+
+/**
+ * Complete packet output. cvmx_pko_send_packet_prepare() must be
+ * called exactly once before this, and the same parameters must be
+ * passed to both cvmx_pko_send_packet_prepare() and
+ * cvmx_pko_send_packet_finish().
+ *
+ * @port: Port to send it on
+ * @queue: Queue to use
+ * @pko_command:
+ * PKO HW command word
+ * @packet: Packet to send
+ * @addr: Plysical address of a work queue entry or physical address
+ * to zero on complete.
+ * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
+ * CVMX_PKO_LOCK_CMD_QUEUE
+ *
+ * Returns: CVMX_PKO_SUCCESS on success, or error code on
+ * failure of output
+ */
+static inline cvmx_pko_status_t cvmx_pko_send_packet_finish3(
+ uint64_t port,
+ uint64_t queue,
+ union cvmx_pko_command_word0 pko_command,
+ union cvmx_buf_ptr packet,
+ uint64_t addr,
+ cvmx_pko_lock_t use_locking)
+{
+ cvmx_cmd_queue_result_t result;
+ if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
+ cvmx_pow_tag_sw_wait();
+ result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),
+ (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
+ pko_command.u64, packet.u64, addr);
+ if (likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
+ cvmx_pko_doorbell(port, queue, 3);
+ return CVMX_PKO_SUCCESS;
+ } else if ((result == CVMX_CMD_QUEUE_NO_MEMORY)
+ || (result == CVMX_CMD_QUEUE_FULL)) {
+ return CVMX_PKO_NO_MEMORY;
+ } else {
+ return CVMX_PKO_INVALID_QUEUE;
+ }
+}
+
+/**
+ * Return the pko output queue associated with a port and a specific core.
+ * In normal mode (PKO lockless operation is disabled), the value returned
+ * is the base queue.
+ *
+ * @port: Port number
+ * @core: Core to get queue for
+ *
+ * Returns Core-specific output queue
+ */
+static inline int cvmx_pko_get_base_queue_per_core(int port, int core)
+{
+#ifndef CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0
+#define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0 16
+#endif
+#ifndef CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1
+#define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1 16
+#endif
+
+ if (port < CVMX_PKO_MAX_PORTS_INTERFACE0)
+ return port * CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 + core;
+ else if (port >= 16 && port < 16 + CVMX_PKO_MAX_PORTS_INTERFACE1)
+ return CVMX_PKO_MAX_PORTS_INTERFACE0 *
+ CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 + (port -
+ 16) *
+ CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 + core;
+ else if ((port >= 32) && (port < 36))
+ return CVMX_PKO_MAX_PORTS_INTERFACE0 *
+ CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
+ CVMX_PKO_MAX_PORTS_INTERFACE1 *
+ CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 + (port -
+ 32) *
+ CVMX_PKO_QUEUES_PER_PORT_PCI;
+ else if ((port >= 36) && (port < 40))
+ return CVMX_PKO_MAX_PORTS_INTERFACE0 *
+ CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
+ CVMX_PKO_MAX_PORTS_INTERFACE1 *
+ CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 +
+ 4 * CVMX_PKO_QUEUES_PER_PORT_PCI + (port -
+ 36) *
+ CVMX_PKO_QUEUES_PER_PORT_LOOP;
+ else
+ /* Given the limit on the number of ports we can map to
+ * CVMX_MAX_OUTPUT_QUEUES_STATIC queues (currently 256,
+ * divided among all cores), the remaining unmapped ports
+ * are assigned an illegal queue number */
+ return CVMX_PKO_ILLEGAL_QUEUE;
+}
+
+/**
+ * For a given port number, return the base pko output queue
+ * for the port.
+ *
+ * @port: Port number
+ * Returns Base output queue
+ */
+static inline int cvmx_pko_get_base_queue(int port)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return port;
+
+ return cvmx_pko_get_base_queue_per_core(port, 0);
+}
+
+/**
+ * For a given port number, return the number of pko output queues.
+ *
+ * @port: Port number
+ * Returns Number of output queues
+ */
+static inline int cvmx_pko_get_num_queues(int port)
+{
+ if (port < 16)
+ return CVMX_PKO_QUEUES_PER_PORT_INTERFACE0;
+ else if (port < 32)
+ return CVMX_PKO_QUEUES_PER_PORT_INTERFACE1;
+ else if (port < 36)
+ return CVMX_PKO_QUEUES_PER_PORT_PCI;
+ else if (port < 40)
+ return CVMX_PKO_QUEUES_PER_PORT_LOOP;
+ else
+ return 0;
+}
+
+/**
+ * Get the status counters for a port.
+ *
+ * @port_num: Port number to get statistics for.
+ * @clear: Set to 1 to clear the counters after they are read
+ * @status: Where to put the results.
+ */
+static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
+ cvmx_pko_port_status_t *status)
+{
+ union cvmx_pko_reg_read_idx pko_reg_read_idx;
+ union cvmx_pko_mem_count0 pko_mem_count0;
+ union cvmx_pko_mem_count1 pko_mem_count1;
+
+ pko_reg_read_idx.u64 = 0;
+ pko_reg_read_idx.s.index = port_num;
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
+
+ pko_mem_count0.u64 = cvmx_read_csr(CVMX_PKO_MEM_COUNT0);
+ status->packets = pko_mem_count0.s.count;
+ if (clear) {
+ pko_mem_count0.s.count = port_num;
+ cvmx_write_csr(CVMX_PKO_MEM_COUNT0, pko_mem_count0.u64);
+ }
+
+ pko_mem_count1.u64 = cvmx_read_csr(CVMX_PKO_MEM_COUNT1);
+ status->octets = pko_mem_count1.s.count;
+ if (clear) {
+ pko_mem_count1.s.count = port_num;
+ cvmx_write_csr(CVMX_PKO_MEM_COUNT1, pko_mem_count1.u64);
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ union cvmx_pko_mem_debug9 debug9;
+ pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num);
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
+ debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
+ status->doorbell = debug9.cn38xx.doorbell;
+ } else {
+ union cvmx_pko_mem_debug8 debug8;
+ pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num);
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
+ debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
+ status->doorbell = debug8.cn50xx.doorbell;
+ }
+}
+
+/**
+ * Rate limit a PKO port to a max packets/sec. This function is only
+ * supported on CN57XX, CN56XX, CN55XX, and CN54XX.
+ *
+ * @port: Port to rate limit
+ * @packets_s: Maximum packet/sec
+ * @burst: Maximum number of packets to burst in a row before rate
+ * limiting cuts in.
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst);
+
+/**
+ * Rate limit a PKO port to a max bits/sec. This function is only
+ * supported on CN57XX, CN56XX, CN55XX, and CN54XX.
+ *
+ * @port: Port to rate limit
+ * @bits_s: PKO rate limit in bits/sec
+ * @burst: Maximum number of bits to burst before rate
+ * limiting cuts in.
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst);
+
+#endif /* __CVMX_PKO_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-pow-defs.h b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
new file mode 100644
index 000000000..474dd5443
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
@@ -0,0 +1,1001 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_POW_DEFS_H__
+#define __CVMX_POW_DEFS_H__
+
+#define CVMX_POW_BIST_STAT (CVMX_ADD_IO_SEG(0x00016700000003F8ull))
+#define CVMX_POW_DS_PC (CVMX_ADD_IO_SEG(0x0001670000000398ull))
+#define CVMX_POW_ECC_ERR (CVMX_ADD_IO_SEG(0x0001670000000218ull))
+#define CVMX_POW_INT_CTL (CVMX_ADD_IO_SEG(0x0001670000000220ull))
+#define CVMX_POW_IQ_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000000340ull) + ((offset) & 7) * 8)
+#define CVMX_POW_IQ_COM_CNT (CVMX_ADD_IO_SEG(0x0001670000000388ull))
+#define CVMX_POW_IQ_INT (CVMX_ADD_IO_SEG(0x0001670000000238ull))
+#define CVMX_POW_IQ_INT_EN (CVMX_ADD_IO_SEG(0x0001670000000240ull))
+#define CVMX_POW_IQ_THRX(offset) (CVMX_ADD_IO_SEG(0x00016700000003A0ull) + ((offset) & 7) * 8)
+#define CVMX_POW_NOS_CNT (CVMX_ADD_IO_SEG(0x0001670000000228ull))
+#define CVMX_POW_NW_TIM (CVMX_ADD_IO_SEG(0x0001670000000210ull))
+#define CVMX_POW_PF_RST_MSK (CVMX_ADD_IO_SEG(0x0001670000000230ull))
+#define CVMX_POW_PP_GRP_MSKX(offset) (CVMX_ADD_IO_SEG(0x0001670000000000ull) + ((offset) & 15) * 8)
+#define CVMX_POW_QOS_RNDX(offset) (CVMX_ADD_IO_SEG(0x00016700000001C0ull) + ((offset) & 7) * 8)
+#define CVMX_POW_QOS_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000000180ull) + ((offset) & 7) * 8)
+#define CVMX_POW_TS_PC (CVMX_ADD_IO_SEG(0x0001670000000390ull))
+#define CVMX_POW_WA_COM_PC (CVMX_ADD_IO_SEG(0x0001670000000380ull))
+#define CVMX_POW_WA_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000000300ull) + ((offset) & 7) * 8)
+#define CVMX_POW_WQ_INT (CVMX_ADD_IO_SEG(0x0001670000000200ull))
+#define CVMX_POW_WQ_INT_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000000100ull) + ((offset) & 15) * 8)
+#define CVMX_POW_WQ_INT_PC (CVMX_ADD_IO_SEG(0x0001670000000208ull))
+#define CVMX_POW_WQ_INT_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000000080ull) + ((offset) & 15) * 8)
+#define CVMX_POW_WS_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000000280ull) + ((offset) & 15) * 8)
+
+#define CVMX_SSO_WQ_INT (CVMX_ADD_IO_SEG(0x0001670000001000ull))
+#define CVMX_SSO_WQ_IQ_DIS (CVMX_ADD_IO_SEG(0x0001670000001010ull))
+#define CVMX_SSO_WQ_INT_PC (CVMX_ADD_IO_SEG(0x0001670000001020ull))
+#define CVMX_SSO_PPX_GRP_MSK(offset) (CVMX_ADD_IO_SEG(0x0001670000006000ull) + ((offset) & 31) * 8)
+#define CVMX_SSO_WQ_INT_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000007000ull) + ((offset) & 63) * 8)
+
+union cvmx_pow_bist_stat {
+ uint64_t u64;
+ struct cvmx_pow_bist_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t pp:16;
+ uint64_t reserved_0_15:16;
+#else
+ uint64_t reserved_0_15:16;
+ uint64_t pp:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_pow_bist_stat_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t pp:1;
+ uint64_t reserved_9_15:7;
+ uint64_t cam:1;
+ uint64_t nbt1:1;
+ uint64_t nbt0:1;
+ uint64_t index:1;
+ uint64_t fidx:1;
+ uint64_t nbr1:1;
+ uint64_t nbr0:1;
+ uint64_t pend:1;
+ uint64_t adr:1;
+#else
+ uint64_t adr:1;
+ uint64_t pend:1;
+ uint64_t nbr0:1;
+ uint64_t nbr1:1;
+ uint64_t fidx:1;
+ uint64_t index:1;
+ uint64_t nbt0:1;
+ uint64_t nbt1:1;
+ uint64_t cam:1;
+ uint64_t reserved_9_15:7;
+ uint64_t pp:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } cn30xx;
+ struct cvmx_pow_bist_stat_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_18_63:46;
+ uint64_t pp:2;
+ uint64_t reserved_9_15:7;
+ uint64_t cam:1;
+ uint64_t nbt1:1;
+ uint64_t nbt0:1;
+ uint64_t index:1;
+ uint64_t fidx:1;
+ uint64_t nbr1:1;
+ uint64_t nbr0:1;
+ uint64_t pend:1;
+ uint64_t adr:1;
+#else
+ uint64_t adr:1;
+ uint64_t pend:1;
+ uint64_t nbr0:1;
+ uint64_t nbr1:1;
+ uint64_t fidx:1;
+ uint64_t index:1;
+ uint64_t nbt0:1;
+ uint64_t nbt1:1;
+ uint64_t cam:1;
+ uint64_t reserved_9_15:7;
+ uint64_t pp:2;
+ uint64_t reserved_18_63:46;
+#endif
+ } cn31xx;
+ struct cvmx_pow_bist_stat_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t pp:16;
+ uint64_t reserved_10_15:6;
+ uint64_t cam:1;
+ uint64_t nbt:1;
+ uint64_t index:1;
+ uint64_t fidx:1;
+ uint64_t nbr1:1;
+ uint64_t nbr0:1;
+ uint64_t pend1:1;
+ uint64_t pend0:1;
+ uint64_t adr1:1;
+ uint64_t adr0:1;
+#else
+ uint64_t adr0:1;
+ uint64_t adr1:1;
+ uint64_t pend0:1;
+ uint64_t pend1:1;
+ uint64_t nbr0:1;
+ uint64_t nbr1:1;
+ uint64_t fidx:1;
+ uint64_t index:1;
+ uint64_t nbt:1;
+ uint64_t cam:1;
+ uint64_t reserved_10_15:6;
+ uint64_t pp:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn38xx;
+ struct cvmx_pow_bist_stat_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t pp:4;
+ uint64_t reserved_9_15:7;
+ uint64_t cam:1;
+ uint64_t nbt1:1;
+ uint64_t nbt0:1;
+ uint64_t index:1;
+ uint64_t fidx:1;
+ uint64_t nbr1:1;
+ uint64_t nbr0:1;
+ uint64_t pend:1;
+ uint64_t adr:1;
+#else
+ uint64_t adr:1;
+ uint64_t pend:1;
+ uint64_t nbr0:1;
+ uint64_t nbr1:1;
+ uint64_t fidx:1;
+ uint64_t index:1;
+ uint64_t nbt0:1;
+ uint64_t nbt1:1;
+ uint64_t cam:1;
+ uint64_t reserved_9_15:7;
+ uint64_t pp:4;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn52xx;
+ struct cvmx_pow_bist_stat_cn56xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t pp:12;
+ uint64_t reserved_10_15:6;
+ uint64_t cam:1;
+ uint64_t nbt:1;
+ uint64_t index:1;
+ uint64_t fidx:1;
+ uint64_t nbr1:1;
+ uint64_t nbr0:1;
+ uint64_t pend1:1;
+ uint64_t pend0:1;
+ uint64_t adr1:1;
+ uint64_t adr0:1;
+#else
+ uint64_t adr0:1;
+ uint64_t adr1:1;
+ uint64_t pend0:1;
+ uint64_t pend1:1;
+ uint64_t nbr0:1;
+ uint64_t nbr1:1;
+ uint64_t fidx:1;
+ uint64_t index:1;
+ uint64_t nbt:1;
+ uint64_t cam:1;
+ uint64_t reserved_10_15:6;
+ uint64_t pp:12;
+ uint64_t reserved_28_63:36;
+#endif
+ } cn56xx;
+ struct cvmx_pow_bist_stat_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t pp:4;
+ uint64_t reserved_12_15:4;
+ uint64_t cam:1;
+ uint64_t nbr:3;
+ uint64_t nbt:4;
+ uint64_t index:1;
+ uint64_t fidx:1;
+ uint64_t pend:1;
+ uint64_t adr:1;
+#else
+ uint64_t adr:1;
+ uint64_t pend:1;
+ uint64_t fidx:1;
+ uint64_t index:1;
+ uint64_t nbt:4;
+ uint64_t nbr:3;
+ uint64_t cam:1;
+ uint64_t reserved_12_15:4;
+ uint64_t pp:4;
+ uint64_t reserved_20_63:44;
+#endif
+ } cn61xx;
+ struct cvmx_pow_bist_stat_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63:42;
+ uint64_t pp:6;
+ uint64_t reserved_12_15:4;
+ uint64_t cam:1;
+ uint64_t nbr:3;
+ uint64_t nbt:4;
+ uint64_t index:1;
+ uint64_t fidx:1;
+ uint64_t pend:1;
+ uint64_t adr:1;
+#else
+ uint64_t adr:1;
+ uint64_t pend:1;
+ uint64_t fidx:1;
+ uint64_t index:1;
+ uint64_t nbt:4;
+ uint64_t nbr:3;
+ uint64_t cam:1;
+ uint64_t reserved_12_15:4;
+ uint64_t pp:6;
+ uint64_t reserved_22_63:42;
+#endif
+ } cn63xx;
+ struct cvmx_pow_bist_stat_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_26_63:38;
+ uint64_t pp:10;
+ uint64_t reserved_12_15:4;
+ uint64_t cam:1;
+ uint64_t nbr:3;
+ uint64_t nbt:4;
+ uint64_t index:1;
+ uint64_t fidx:1;
+ uint64_t pend:1;
+ uint64_t adr:1;
+#else
+ uint64_t adr:1;
+ uint64_t pend:1;
+ uint64_t fidx:1;
+ uint64_t index:1;
+ uint64_t nbt:4;
+ uint64_t nbr:3;
+ uint64_t cam:1;
+ uint64_t reserved_12_15:4;
+ uint64_t pp:10;
+ uint64_t reserved_26_63:38;
+#endif
+ } cn66xx;
+};
+
+union cvmx_pow_ds_pc {
+ uint64_t u64;
+ struct cvmx_pow_ds_pc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ds_pc:32;
+#else
+ uint64_t ds_pc:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pow_ecc_err {
+ uint64_t u64;
+ struct cvmx_pow_ecc_err_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_45_63:19;
+ uint64_t iop_ie:13;
+ uint64_t reserved_29_31:3;
+ uint64_t iop:13;
+ uint64_t reserved_14_15:2;
+ uint64_t rpe_ie:1;
+ uint64_t rpe:1;
+ uint64_t reserved_9_11:3;
+ uint64_t syn:5;
+ uint64_t dbe_ie:1;
+ uint64_t sbe_ie:1;
+ uint64_t dbe:1;
+ uint64_t sbe:1;
+#else
+ uint64_t sbe:1;
+ uint64_t dbe:1;
+ uint64_t sbe_ie:1;
+ uint64_t dbe_ie:1;
+ uint64_t syn:5;
+ uint64_t reserved_9_11:3;
+ uint64_t rpe:1;
+ uint64_t rpe_ie:1;
+ uint64_t reserved_14_15:2;
+ uint64_t iop:13;
+ uint64_t reserved_29_31:3;
+ uint64_t iop_ie:13;
+ uint64_t reserved_45_63:19;
+#endif
+ } s;
+ struct cvmx_pow_ecc_err_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t rpe_ie:1;
+ uint64_t rpe:1;
+ uint64_t reserved_9_11:3;
+ uint64_t syn:5;
+ uint64_t dbe_ie:1;
+ uint64_t sbe_ie:1;
+ uint64_t dbe:1;
+ uint64_t sbe:1;
+#else
+ uint64_t sbe:1;
+ uint64_t dbe:1;
+ uint64_t sbe_ie:1;
+ uint64_t dbe_ie:1;
+ uint64_t syn:5;
+ uint64_t reserved_9_11:3;
+ uint64_t rpe:1;
+ uint64_t rpe_ie:1;
+ uint64_t reserved_14_63:50;
+#endif
+ } cn31xx;
+};
+
+union cvmx_pow_int_ctl {
+ uint64_t u64;
+ struct cvmx_pow_int_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t pfr_dis:1;
+ uint64_t nbr_thr:5;
+#else
+ uint64_t nbr_thr:5;
+ uint64_t pfr_dis:1;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_pow_iq_cntx {
+ uint64_t u64;
+ struct cvmx_pow_iq_cntx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t iq_cnt:32;
+#else
+ uint64_t iq_cnt:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pow_iq_com_cnt {
+ uint64_t u64;
+ struct cvmx_pow_iq_com_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t iq_cnt:32;
+#else
+ uint64_t iq_cnt:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pow_iq_int {
+ uint64_t u64;
+ struct cvmx_pow_iq_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t iq_int:8;
+#else
+ uint64_t iq_int:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_pow_iq_int_en {
+ uint64_t u64;
+ struct cvmx_pow_iq_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t int_en:8;
+#else
+ uint64_t int_en:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_pow_iq_thrx {
+ uint64_t u64;
+ struct cvmx_pow_iq_thrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t iq_thr:32;
+#else
+ uint64_t iq_thr:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pow_nos_cnt {
+ uint64_t u64;
+ struct cvmx_pow_nos_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t nos_cnt:12;
+#else
+ uint64_t nos_cnt:12;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+ struct cvmx_pow_nos_cnt_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t nos_cnt:7;
+#else
+ uint64_t nos_cnt:7;
+ uint64_t reserved_7_63:57;
+#endif
+ } cn30xx;
+ struct cvmx_pow_nos_cnt_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t nos_cnt:9;
+#else
+ uint64_t nos_cnt:9;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn31xx;
+ struct cvmx_pow_nos_cnt_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t nos_cnt:10;
+#else
+ uint64_t nos_cnt:10;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn52xx;
+ struct cvmx_pow_nos_cnt_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t nos_cnt:11;
+#else
+ uint64_t nos_cnt:11;
+ uint64_t reserved_11_63:53;
+#endif
+ } cn63xx;
+};
+
+union cvmx_pow_nw_tim {
+ uint64_t u64;
+ struct cvmx_pow_nw_tim_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t nw_tim:10;
+#else
+ uint64_t nw_tim:10;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+};
+
+union cvmx_pow_pf_rst_msk {
+ uint64_t u64;
+ struct cvmx_pow_pf_rst_msk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t rst_msk:8;
+#else
+ uint64_t rst_msk:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_pow_pp_grp_mskx {
+ uint64_t u64;
+ struct cvmx_pow_pp_grp_mskx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t qos7_pri:4;
+ uint64_t qos6_pri:4;
+ uint64_t qos5_pri:4;
+ uint64_t qos4_pri:4;
+ uint64_t qos3_pri:4;
+ uint64_t qos2_pri:4;
+ uint64_t qos1_pri:4;
+ uint64_t qos0_pri:4;
+ uint64_t grp_msk:16;
+#else
+ uint64_t grp_msk:16;
+ uint64_t qos0_pri:4;
+ uint64_t qos1_pri:4;
+ uint64_t qos2_pri:4;
+ uint64_t qos3_pri:4;
+ uint64_t qos4_pri:4;
+ uint64_t qos5_pri:4;
+ uint64_t qos6_pri:4;
+ uint64_t qos7_pri:4;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+ struct cvmx_pow_pp_grp_mskx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t grp_msk:16;
+#else
+ uint64_t grp_msk:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } cn30xx;
+};
+
+union cvmx_pow_qos_rndx {
+ uint64_t u64;
+ struct cvmx_pow_qos_rndx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t rnd_p3:8;
+ uint64_t rnd_p2:8;
+ uint64_t rnd_p1:8;
+ uint64_t rnd:8;
+#else
+ uint64_t rnd:8;
+ uint64_t rnd_p1:8;
+ uint64_t rnd_p2:8;
+ uint64_t rnd_p3:8;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pow_qos_thrx {
+ uint64_t u64;
+ struct cvmx_pow_qos_thrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63:4;
+ uint64_t des_cnt:12;
+ uint64_t buf_cnt:12;
+ uint64_t free_cnt:12;
+ uint64_t reserved_23_23:1;
+ uint64_t max_thr:11;
+ uint64_t reserved_11_11:1;
+ uint64_t min_thr:11;
+#else
+ uint64_t min_thr:11;
+ uint64_t reserved_11_11:1;
+ uint64_t max_thr:11;
+ uint64_t reserved_23_23:1;
+ uint64_t free_cnt:12;
+ uint64_t buf_cnt:12;
+ uint64_t des_cnt:12;
+ uint64_t reserved_60_63:4;
+#endif
+ } s;
+ struct cvmx_pow_qos_thrx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_55_63:9;
+ uint64_t des_cnt:7;
+ uint64_t reserved_43_47:5;
+ uint64_t buf_cnt:7;
+ uint64_t reserved_31_35:5;
+ uint64_t free_cnt:7;
+ uint64_t reserved_18_23:6;
+ uint64_t max_thr:6;
+ uint64_t reserved_6_11:6;
+ uint64_t min_thr:6;
+#else
+ uint64_t min_thr:6;
+ uint64_t reserved_6_11:6;
+ uint64_t max_thr:6;
+ uint64_t reserved_18_23:6;
+ uint64_t free_cnt:7;
+ uint64_t reserved_31_35:5;
+ uint64_t buf_cnt:7;
+ uint64_t reserved_43_47:5;
+ uint64_t des_cnt:7;
+ uint64_t reserved_55_63:9;
+#endif
+ } cn30xx;
+ struct cvmx_pow_qos_thrx_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_57_63:7;
+ uint64_t des_cnt:9;
+ uint64_t reserved_45_47:3;
+ uint64_t buf_cnt:9;
+ uint64_t reserved_33_35:3;
+ uint64_t free_cnt:9;
+ uint64_t reserved_20_23:4;
+ uint64_t max_thr:8;
+ uint64_t reserved_8_11:4;
+ uint64_t min_thr:8;
+#else
+ uint64_t min_thr:8;
+ uint64_t reserved_8_11:4;
+ uint64_t max_thr:8;
+ uint64_t reserved_20_23:4;
+ uint64_t free_cnt:9;
+ uint64_t reserved_33_35:3;
+ uint64_t buf_cnt:9;
+ uint64_t reserved_45_47:3;
+ uint64_t des_cnt:9;
+ uint64_t reserved_57_63:7;
+#endif
+ } cn31xx;
+ struct cvmx_pow_qos_thrx_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_58_63:6;
+ uint64_t des_cnt:10;
+ uint64_t reserved_46_47:2;
+ uint64_t buf_cnt:10;
+ uint64_t reserved_34_35:2;
+ uint64_t free_cnt:10;
+ uint64_t reserved_21_23:3;
+ uint64_t max_thr:9;
+ uint64_t reserved_9_11:3;
+ uint64_t min_thr:9;
+#else
+ uint64_t min_thr:9;
+ uint64_t reserved_9_11:3;
+ uint64_t max_thr:9;
+ uint64_t reserved_21_23:3;
+ uint64_t free_cnt:10;
+ uint64_t reserved_34_35:2;
+ uint64_t buf_cnt:10;
+ uint64_t reserved_46_47:2;
+ uint64_t des_cnt:10;
+ uint64_t reserved_58_63:6;
+#endif
+ } cn52xx;
+ struct cvmx_pow_qos_thrx_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63:5;
+ uint64_t des_cnt:11;
+ uint64_t reserved_47_47:1;
+ uint64_t buf_cnt:11;
+ uint64_t reserved_35_35:1;
+ uint64_t free_cnt:11;
+ uint64_t reserved_22_23:2;
+ uint64_t max_thr:10;
+ uint64_t reserved_10_11:2;
+ uint64_t min_thr:10;
+#else
+ uint64_t min_thr:10;
+ uint64_t reserved_10_11:2;
+ uint64_t max_thr:10;
+ uint64_t reserved_22_23:2;
+ uint64_t free_cnt:11;
+ uint64_t reserved_35_35:1;
+ uint64_t buf_cnt:11;
+ uint64_t reserved_47_47:1;
+ uint64_t des_cnt:11;
+ uint64_t reserved_59_63:5;
+#endif
+ } cn63xx;
+};
+
+union cvmx_pow_ts_pc {
+ uint64_t u64;
+ struct cvmx_pow_ts_pc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ts_pc:32;
+#else
+ uint64_t ts_pc:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pow_wa_com_pc {
+ uint64_t u64;
+ struct cvmx_pow_wa_com_pc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t wa_pc:32;
+#else
+ uint64_t wa_pc:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pow_wa_pcx {
+ uint64_t u64;
+ struct cvmx_pow_wa_pcx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t wa_pc:32;
+#else
+ uint64_t wa_pc:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pow_wq_int {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t iq_dis:16;
+ uint64_t wq_int:16;
+#else
+ uint64_t wq_int:16;
+ uint64_t iq_dis:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_pow_wq_int_cntx {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_cntx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t tc_cnt:4;
+ uint64_t ds_cnt:12;
+ uint64_t iq_cnt:12;
+#else
+ uint64_t iq_cnt:12;
+ uint64_t ds_cnt:12;
+ uint64_t tc_cnt:4;
+ uint64_t reserved_28_63:36;
+#endif
+ } s;
+ struct cvmx_pow_wq_int_cntx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t tc_cnt:4;
+ uint64_t reserved_19_23:5;
+ uint64_t ds_cnt:7;
+ uint64_t reserved_7_11:5;
+ uint64_t iq_cnt:7;
+#else
+ uint64_t iq_cnt:7;
+ uint64_t reserved_7_11:5;
+ uint64_t ds_cnt:7;
+ uint64_t reserved_19_23:5;
+ uint64_t tc_cnt:4;
+ uint64_t reserved_28_63:36;
+#endif
+ } cn30xx;
+ struct cvmx_pow_wq_int_cntx_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t tc_cnt:4;
+ uint64_t reserved_21_23:3;
+ uint64_t ds_cnt:9;
+ uint64_t reserved_9_11:3;
+ uint64_t iq_cnt:9;
+#else
+ uint64_t iq_cnt:9;
+ uint64_t reserved_9_11:3;
+ uint64_t ds_cnt:9;
+ uint64_t reserved_21_23:3;
+ uint64_t tc_cnt:4;
+ uint64_t reserved_28_63:36;
+#endif
+ } cn31xx;
+ struct cvmx_pow_wq_int_cntx_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t tc_cnt:4;
+ uint64_t reserved_22_23:2;
+ uint64_t ds_cnt:10;
+ uint64_t reserved_10_11:2;
+ uint64_t iq_cnt:10;
+#else
+ uint64_t iq_cnt:10;
+ uint64_t reserved_10_11:2;
+ uint64_t ds_cnt:10;
+ uint64_t reserved_22_23:2;
+ uint64_t tc_cnt:4;
+ uint64_t reserved_28_63:36;
+#endif
+ } cn52xx;
+ struct cvmx_pow_wq_int_cntx_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t tc_cnt:4;
+ uint64_t reserved_23_23:1;
+ uint64_t ds_cnt:11;
+ uint64_t reserved_11_11:1;
+ uint64_t iq_cnt:11;
+#else
+ uint64_t iq_cnt:11;
+ uint64_t reserved_11_11:1;
+ uint64_t ds_cnt:11;
+ uint64_t reserved_23_23:1;
+ uint64_t tc_cnt:4;
+ uint64_t reserved_28_63:36;
+#endif
+ } cn63xx;
+};
+
+union cvmx_pow_wq_int_pc {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_pc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63:4;
+ uint64_t pc:28;
+ uint64_t reserved_28_31:4;
+ uint64_t pc_thr:20;
+ uint64_t reserved_0_7:8;
+#else
+ uint64_t reserved_0_7:8;
+ uint64_t pc_thr:20;
+ uint64_t reserved_28_31:4;
+ uint64_t pc:28;
+ uint64_t reserved_60_63:4;
+#endif
+ } s;
+};
+
+union cvmx_pow_wq_int_thrx {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_thrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_23_23:1;
+ uint64_t ds_thr:11;
+ uint64_t reserved_11_11:1;
+ uint64_t iq_thr:11;
+#else
+ uint64_t iq_thr:11;
+ uint64_t reserved_11_11:1;
+ uint64_t ds_thr:11;
+ uint64_t reserved_23_23:1;
+ uint64_t tc_thr:4;
+ uint64_t tc_en:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } s;
+ struct cvmx_pow_wq_int_thrx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_18_23:6;
+ uint64_t ds_thr:6;
+ uint64_t reserved_6_11:6;
+ uint64_t iq_thr:6;
+#else
+ uint64_t iq_thr:6;
+ uint64_t reserved_6_11:6;
+ uint64_t ds_thr:6;
+ uint64_t reserved_18_23:6;
+ uint64_t tc_thr:4;
+ uint64_t tc_en:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn30xx;
+ struct cvmx_pow_wq_int_thrx_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_20_23:4;
+ uint64_t ds_thr:8;
+ uint64_t reserved_8_11:4;
+ uint64_t iq_thr:8;
+#else
+ uint64_t iq_thr:8;
+ uint64_t reserved_8_11:4;
+ uint64_t ds_thr:8;
+ uint64_t reserved_20_23:4;
+ uint64_t tc_thr:4;
+ uint64_t tc_en:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn31xx;
+ struct cvmx_pow_wq_int_thrx_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_21_23:3;
+ uint64_t ds_thr:9;
+ uint64_t reserved_9_11:3;
+ uint64_t iq_thr:9;
+#else
+ uint64_t iq_thr:9;
+ uint64_t reserved_9_11:3;
+ uint64_t ds_thr:9;
+ uint64_t reserved_21_23:3;
+ uint64_t tc_thr:4;
+ uint64_t tc_en:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn52xx;
+ struct cvmx_pow_wq_int_thrx_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_22_23:2;
+ uint64_t ds_thr:10;
+ uint64_t reserved_10_11:2;
+ uint64_t iq_thr:10;
+#else
+ uint64_t iq_thr:10;
+ uint64_t reserved_10_11:2;
+ uint64_t ds_thr:10;
+ uint64_t reserved_22_23:2;
+ uint64_t tc_thr:4;
+ uint64_t tc_en:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn63xx;
+};
+
+union cvmx_pow_ws_pcx {
+ uint64_t u64;
+ struct cvmx_pow_ws_pcx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ws_pc:32;
+#else
+ uint64_t ws_pc:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_sso_wq_int_thrx {
+ uint64_t u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63:31;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_26_27:2;
+ uint64_t ds_thr:12;
+ uint64_t reserved_12_13:2;
+ uint64_t iq_thr:12;
+#else
+ uint64_t iq_thr:12;
+ uint64_t reserved_12_13:2;
+ uint64_t ds_thr:12;
+ uint64_t reserved_26_27:2;
+ uint64_t tc_thr:4;
+ uint64_t tc_en:1;
+ uint64_t reserved_33_63:31;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pow.h b/arch/mips/include/asm/octeon/cvmx-pow.h
new file mode 100644
index 000000000..a3b23811e
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-pow.h
@@ -0,0 +1,2215 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * Interface to the hardware Packet Order / Work unit.
+ *
+ * New, starting with SDK 1.7.0, cvmx-pow supports a number of
+ * extended consistency checks. The define
+ * CVMX_ENABLE_POW_CHECKS controls the runtime insertion of POW
+ * internal state checks to find common programming errors. If
+ * CVMX_ENABLE_POW_CHECKS is not defined, checks are by default
+ * enabled. For example, cvmx-pow will check for the following
+ * program errors or POW state inconsistency.
+ * - Requesting a POW operation with an active tag switch in
+ * progress.
+ * - Waiting for a tag switch to complete for an excessively
+ * long period. This is normally a sign of an error in locking
+ * causing deadlock.
+ * - Illegal tag switches from NULL_NULL.
+ * - Illegal tag switches from NULL.
+ * - Illegal deschedule request.
+ * - WQE pointer not matching the one attached to the core by
+ * the POW.
+ *
+ */
+
+#ifndef __CVMX_POW_H__
+#define __CVMX_POW_H__
+
+#include <asm/octeon/cvmx-pow-defs.h>
+
+#include <asm/octeon/cvmx-scratch.h>
+#include <asm/octeon/cvmx-wqe.h>
+
+/* Default to having all POW constancy checks turned on */
+#ifndef CVMX_ENABLE_POW_CHECKS
+#define CVMX_ENABLE_POW_CHECKS 1
+#endif
+
+enum cvmx_pow_tag_type {
+ /* Tag ordering is maintained */
+ CVMX_POW_TAG_TYPE_ORDERED = 0L,
+ /* Tag ordering is maintained, and at most one PP has the tag */
+ CVMX_POW_TAG_TYPE_ATOMIC = 1L,
+ /*
+ * The work queue entry from the order - NEVER tag switch from
+ * NULL to NULL
+ */
+ CVMX_POW_TAG_TYPE_NULL = 2L,
+ /* A tag switch to NULL, and there is no space reserved in POW
+ * - NEVER tag switch to NULL_NULL
+ * - NEVER tag switch from NULL_NULL
+ * - NULL_NULL is entered at the beginning of time and on a deschedule.
+ * - NULL_NULL can be exited by a new work request. A NULL_SWITCH
+ * load can also switch the state to NULL
+ */
+ CVMX_POW_TAG_TYPE_NULL_NULL = 3L
+};
+
+/**
+ * Wait flag values for pow functions.
+ */
+typedef enum {
+ CVMX_POW_WAIT = 1,
+ CVMX_POW_NO_WAIT = 0,
+} cvmx_pow_wait_t;
+
+/**
+ * POW tag operations. These are used in the data stored to the POW.
+ */
+typedef enum {
+ /*
+ * switch the tag (only) for this PP
+ * - the previous tag should be non-NULL in this case
+ * - tag switch response required
+ * - fields used: op, type, tag
+ */
+ CVMX_POW_TAG_OP_SWTAG = 0L,
+ /*
+ * switch the tag for this PP, with full information
+ * - this should be used when the previous tag is NULL
+ * - tag switch response required
+ * - fields used: address, op, grp, type, tag
+ */
+ CVMX_POW_TAG_OP_SWTAG_FULL = 1L,
+ /*
+ * switch the tag (and/or group) for this PP and de-schedule
+ * - OK to keep the tag the same and only change the group
+ * - fields used: op, no_sched, grp, type, tag
+ */
+ CVMX_POW_TAG_OP_SWTAG_DESCH = 2L,
+ /*
+ * just de-schedule
+ * - fields used: op, no_sched
+ */
+ CVMX_POW_TAG_OP_DESCH = 3L,
+ /*
+ * create an entirely new work queue entry
+ * - fields used: address, op, qos, grp, type, tag
+ */
+ CVMX_POW_TAG_OP_ADDWQ = 4L,
+ /*
+ * just update the work queue pointer and grp for this PP
+ * - fields used: address, op, grp
+ */
+ CVMX_POW_TAG_OP_UPDATE_WQP_GRP = 5L,
+ /*
+ * set the no_sched bit on the de-schedule list
+ *
+ * - does nothing if the selected entry is not on the
+ * de-schedule list
+ *
+ * - does nothing if the stored work queue pointer does not
+ * match the address field
+ *
+ * - fields used: address, index, op
+ *
+ * Before issuing a *_NSCHED operation, SW must guarantee
+ * that all prior deschedules and set/clr NSCHED operations
+ * are complete and all prior switches are complete. The
+ * hardware provides the opsdone bit and swdone bit for SW
+ * polling. After issuing a *_NSCHED operation, SW must
+ * guarantee that the set/clr NSCHED is complete before any
+ * subsequent operations.
+ */
+ CVMX_POW_TAG_OP_SET_NSCHED = 6L,
+ /*
+ * clears the no_sched bit on the de-schedule list
+ *
+ * - does nothing if the selected entry is not on the
+ * de-schedule list
+ *
+ * - does nothing if the stored work queue pointer does not
+ * match the address field
+ *
+ * - fields used: address, index, op
+ *
+ * Before issuing a *_NSCHED operation, SW must guarantee that
+ * all prior deschedules and set/clr NSCHED operations are
+ * complete and all prior switches are complete. The hardware
+ * provides the opsdone bit and swdone bit for SW
+ * polling. After issuing a *_NSCHED operation, SW must
+ * guarantee that the set/clr NSCHED is complete before any
+ * subsequent operations.
+ */
+ CVMX_POW_TAG_OP_CLR_NSCHED = 7L,
+ /* do nothing */
+ CVMX_POW_TAG_OP_NOP = 15L
+} cvmx_pow_tag_op_t;
+
+/**
+ * This structure defines the store data on a store to POW
+ */
+typedef union {
+ uint64_t u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /*
+ * Don't reschedule this entry. no_sched is used for
+ * CVMX_POW_TAG_OP_SWTAG_DESCH and
+ * CVMX_POW_TAG_OP_DESCH
+ */
+ uint64_t no_sched:1;
+ uint64_t unused:2;
+ /* Tontains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
+ uint64_t index:13;
+ /* The operation to perform */
+ cvmx_pow_tag_op_t op:4;
+ uint64_t unused2:2;
+ /*
+ * The QOS level for the packet. qos is only used for
+ * CVMX_POW_TAG_OP_ADDWQ
+ */
+ uint64_t qos:3;
+ /*
+ * The group that the work queue entry will be
+ * scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ,
+ * CVMX_POW_TAG_OP_SWTAG_FULL,
+ * CVMX_POW_TAG_OP_SWTAG_DESCH, and
+ * CVMX_POW_TAG_OP_UPDATE_WQP_GRP
+ */
+ uint64_t grp:4;
+ /*
+ * The type of the tag. type is used for everything
+ * except CVMX_POW_TAG_OP_DESCH,
+ * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and
+ * CVMX_POW_TAG_OP_*_NSCHED
+ */
+ uint64_t type:3;
+ /*
+ * The actual tag. tag is used for everything except
+ * CVMX_POW_TAG_OP_DESCH,
+ * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and
+ * CVMX_POW_TAG_OP_*_NSCHED
+ */
+ uint64_t tag:32;
+#else
+ uint64_t tag:32;
+ uint64_t type:3;
+ uint64_t grp:4;
+ uint64_t qos:3;
+ uint64_t unused2:2;
+ cvmx_pow_tag_op_t op:4;
+ uint64_t index:13;
+ uint64_t unused:2;
+ uint64_t no_sched:1;
+#endif
+ } s;
+} cvmx_pow_tag_req_t;
+
+/**
+ * This structure describes the address to load stuff from POW
+ */
+typedef union {
+ uint64_t u64;
+
+ /**
+ * Address for new work request loads (did<2:0> == 0)
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t mem_region:2;
+ /* Must be zero */
+ uint64_t reserved_49_61:13;
+ /* Must be one */
+ uint64_t is_io:1;
+ /* the ID of POW -- did<2:0> == 0 in this case */
+ uint64_t did:8;
+ /* Must be zero */
+ uint64_t reserved_4_39:36;
+ /*
+ * If set, don't return load response until work is
+ * available.
+ */
+ uint64_t wait:1;
+ /* Must be zero */
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t wait:1;
+ uint64_t reserved_4_39:36;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t reserved_49_61:13;
+ uint64_t mem_region:2;
+#endif
+ } swork;
+
+ /**
+ * Address for loads to get POW internal status
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t mem_region:2;
+ /* Must be zero */
+ uint64_t reserved_49_61:13;
+ /* Must be one */
+ uint64_t is_io:1;
+ /* the ID of POW -- did<2:0> == 1 in this case */
+ uint64_t did:8;
+ /* Must be zero */
+ uint64_t reserved_10_39:30;
+ /* The core id to get status for */
+ uint64_t coreid:4;
+ /*
+ * If set and get_cur is set, return reverse tag-list
+ * pointer rather than forward tag-list pointer.
+ */
+ uint64_t get_rev:1;
+ /*
+ * If set, return current status rather than pending
+ * status.
+ */
+ uint64_t get_cur:1;
+ /*
+ * If set, get the work-queue pointer rather than
+ * tag/type.
+ */
+ uint64_t get_wqp:1;
+ /* Must be zero */
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t get_wqp:1;
+ uint64_t get_cur:1;
+ uint64_t get_rev:1;
+ uint64_t coreid:4;
+ uint64_t reserved_10_39:30;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t reserved_49_61:13;
+ uint64_t mem_region:2;
+#endif
+ } sstatus;
+
+ /**
+ * Address for memory loads to get POW internal state
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t mem_region:2;
+ /* Must be zero */
+ uint64_t reserved_49_61:13;
+ /* Must be one */
+ uint64_t is_io:1;
+ /* the ID of POW -- did<2:0> == 2 in this case */
+ uint64_t did:8;
+ /* Must be zero */
+ uint64_t reserved_16_39:24;
+ /* POW memory index */
+ uint64_t index:11;
+ /*
+ * If set, return deschedule information rather than
+ * the standard response for work-queue index (invalid
+ * if the work-queue entry is not on the deschedule
+ * list).
+ */
+ uint64_t get_des:1;
+ /*
+ * If set, get the work-queue pointer rather than
+ * tag/type (no effect when get_des set).
+ */
+ uint64_t get_wqp:1;
+ /* Must be zero */
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t get_wqp:1;
+ uint64_t get_des:1;
+ uint64_t index:11;
+ uint64_t reserved_16_39:24;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t reserved_49_61:13;
+ uint64_t mem_region:2;
+#endif
+ } smemload;
+
+ /**
+ * Address for index/pointer loads
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t mem_region:2;
+ /* Must be zero */
+ uint64_t reserved_49_61:13;
+ /* Must be one */
+ uint64_t is_io:1;
+ /* the ID of POW -- did<2:0> == 3 in this case */
+ uint64_t did:8;
+ /* Must be zero */
+ uint64_t reserved_9_39:31;
+ /*
+ * when {get_rmt ==0 AND get_des_get_tail == 0}, this
+ * field selects one of eight POW internal-input
+ * queues (0-7), one per QOS level; values 8-15 are
+ * illegal in this case; when {get_rmt ==0 AND
+ * get_des_get_tail == 1}, this field selects one of
+ * 16 deschedule lists (per group); when get_rmt ==1,
+ * this field selects one of 16 memory-input queue
+ * lists. The two memory-input queue lists associated
+ * with each QOS level are:
+ *
+ * - qosgrp = 0, qosgrp = 8: QOS0
+ * - qosgrp = 1, qosgrp = 9: QOS1
+ * - qosgrp = 2, qosgrp = 10: QOS2
+ * - qosgrp = 3, qosgrp = 11: QOS3
+ * - qosgrp = 4, qosgrp = 12: QOS4
+ * - qosgrp = 5, qosgrp = 13: QOS5
+ * - qosgrp = 6, qosgrp = 14: QOS6
+ * - qosgrp = 7, qosgrp = 15: QOS7
+ */
+ uint64_t qosgrp:4;
+ /*
+ * If set and get_rmt is clear, return deschedule list
+ * indexes rather than indexes for the specified qos
+ * level; if set and get_rmt is set, return the tail
+ * pointer rather than the head pointer for the
+ * specified qos level.
+ */
+ uint64_t get_des_get_tail:1;
+ /*
+ * If set, return remote pointers rather than the
+ * local indexes for the specified qos level.
+ */
+ uint64_t get_rmt:1;
+ /* Must be zero */
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t get_rmt:1;
+ uint64_t get_des_get_tail:1;
+ uint64_t qosgrp:4;
+ uint64_t reserved_9_39:31;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t reserved_49_61:13;
+ uint64_t mem_region:2;
+#endif
+ } sindexload;
+
+ /**
+ * address for NULL_RD request (did<2:0> == 4) when this is read,
+ * HW attempts to change the state to NULL if it is NULL_NULL (the
+ * hardware cannot switch from NULL_NULL to NULL if a POW entry is
+ * not available - software may need to recover by finishing
+ * another piece of work before a POW entry can ever become
+ * available.)
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t mem_region:2;
+ /* Must be zero */
+ uint64_t reserved_49_61:13;
+ /* Must be one */
+ uint64_t is_io:1;
+ /* the ID of POW -- did<2:0> == 4 in this case */
+ uint64_t did:8;
+ /* Must be zero */
+ uint64_t reserved_0_39:40;
+#else
+ uint64_t reserved_0_39:40;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t reserved_49_61:13;
+ uint64_t mem_region:2;
+#endif
+ } snull_rd;
+} cvmx_pow_load_addr_t;
+
+/**
+ * This structure defines the response to a load/SENDSINGLE to POW
+ * (except CSR reads)
+ */
+typedef union {
+ uint64_t u64;
+
+ /**
+ * Response to new work request loads
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /*
+ * Set when no new work queue entry was returned. *
+ * If there was de-scheduled work, the HW will
+ * definitely return it. When this bit is set, it
+ * could mean either mean:
+ *
+ * - There was no work, or
+ *
+ * - There was no work that the HW could find. This
+ * case can happen, regardless of the wait bit value
+ * in the original request, when there is work in
+ * the IQ's that is too deep down the list.
+ */
+ uint64_t no_work:1;
+ /* Must be zero */
+ uint64_t reserved_40_62:23;
+ /* 36 in O1 -- the work queue pointer */
+ uint64_t addr:40;
+#else
+ uint64_t addr:40;
+ uint64_t reserved_40_62:23;
+ uint64_t no_work:1;
+#endif
+ } s_work;
+
+ /**
+ * Result for a POW Status Load (when get_cur==0 and get_wqp==0)
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ /* Set when there is a pending non-NULL SWTAG or
+ * SWTAG_FULL, and the POW entry has not left the list
+ * for the original tag. */
+ uint64_t pend_switch:1;
+ /* Set when SWTAG_FULL and pend_switch is set. */
+ uint64_t pend_switch_full:1;
+ /*
+ * Set when there is a pending NULL SWTAG, or an
+ * implicit switch to NULL.
+ */
+ uint64_t pend_switch_null:1;
+ /* Set when there is a pending DESCHED or SWTAG_DESCHED. */
+ uint64_t pend_desched:1;
+ /*
+ * Set when there is a pending SWTAG_DESCHED and
+ * pend_desched is set.
+ */
+ uint64_t pend_desched_switch:1;
+ /* Set when nosched is desired and pend_desched is set. */
+ uint64_t pend_nosched:1;
+ /* Set when there is a pending GET_WORK. */
+ uint64_t pend_new_work:1;
+ /*
+ * When pend_new_work is set, this bit indicates that
+ * the wait bit was set.
+ */
+ uint64_t pend_new_work_wait:1;
+ /* Set when there is a pending NULL_RD. */
+ uint64_t pend_null_rd:1;
+ /* Set when there is a pending CLR_NSCHED. */
+ uint64_t pend_nosched_clr:1;
+ uint64_t reserved_51:1;
+ /* This is the index when pend_nosched_clr is set. */
+ uint64_t pend_index:11;
+ /*
+ * This is the new_grp when (pend_desched AND
+ * pend_desched_switch) is set.
+ */
+ uint64_t pend_grp:4;
+ uint64_t reserved_34_35:2;
+ /*
+ * This is the tag type when pend_switch or
+ * (pend_desched AND pend_desched_switch) are set.
+ */
+ uint64_t pend_type:2;
+ /*
+ * - this is the tag when pend_switch or (pend_desched
+ * AND pend_desched_switch) are set.
+ */
+ uint64_t pend_tag:32;
+#else
+ uint64_t pend_tag:32;
+ uint64_t pend_type:2;
+ uint64_t reserved_34_35:2;
+ uint64_t pend_grp:4;
+ uint64_t pend_index:11;
+ uint64_t reserved_51:1;
+ uint64_t pend_nosched_clr:1;
+ uint64_t pend_null_rd:1;
+ uint64_t pend_new_work_wait:1;
+ uint64_t pend_new_work:1;
+ uint64_t pend_nosched:1;
+ uint64_t pend_desched_switch:1;
+ uint64_t pend_desched:1;
+ uint64_t pend_switch_null:1;
+ uint64_t pend_switch_full:1;
+ uint64_t pend_switch:1;
+ uint64_t reserved_62_63:2;
+#endif
+ } s_sstatus0;
+
+ /**
+ * Result for a POW Status Load (when get_cur==0 and get_wqp==1)
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ /*
+ * Set when there is a pending non-NULL SWTAG or
+ * SWTAG_FULL, and the POW entry has not left the list
+ * for the original tag.
+ */
+ uint64_t pend_switch:1;
+ /* Set when SWTAG_FULL and pend_switch is set. */
+ uint64_t pend_switch_full:1;
+ /*
+ * Set when there is a pending NULL SWTAG, or an
+ * implicit switch to NULL.
+ */
+ uint64_t pend_switch_null:1;
+ /*
+ * Set when there is a pending DESCHED or
+ * SWTAG_DESCHED.
+ */
+ uint64_t pend_desched:1;
+ /*
+ * Set when there is a pending SWTAG_DESCHED and
+ * pend_desched is set.
+ */
+ uint64_t pend_desched_switch:1;
+ /* Set when nosched is desired and pend_desched is set. */
+ uint64_t pend_nosched:1;
+ /* Set when there is a pending GET_WORK. */
+ uint64_t pend_new_work:1;
+ /*
+ * When pend_new_work is set, this bit indicates that
+ * the wait bit was set.
+ */
+ uint64_t pend_new_work_wait:1;
+ /* Set when there is a pending NULL_RD. */
+ uint64_t pend_null_rd:1;
+ /* Set when there is a pending CLR_NSCHED. */
+ uint64_t pend_nosched_clr:1;
+ uint64_t reserved_51:1;
+ /* This is the index when pend_nosched_clr is set. */
+ uint64_t pend_index:11;
+ /*
+ * This is the new_grp when (pend_desched AND
+ * pend_desched_switch) is set.
+ */
+ uint64_t pend_grp:4;
+ /* This is the wqp when pend_nosched_clr is set. */
+ uint64_t pend_wqp:36;
+#else
+ uint64_t pend_wqp:36;
+ uint64_t pend_grp:4;
+ uint64_t pend_index:11;
+ uint64_t reserved_51:1;
+ uint64_t pend_nosched_clr:1;
+ uint64_t pend_null_rd:1;
+ uint64_t pend_new_work_wait:1;
+ uint64_t pend_new_work:1;
+ uint64_t pend_nosched:1;
+ uint64_t pend_desched_switch:1;
+ uint64_t pend_desched:1;
+ uint64_t pend_switch_null:1;
+ uint64_t pend_switch_full:1;
+ uint64_t pend_switch:1;
+ uint64_t reserved_62_63:2;
+#endif
+ } s_sstatus1;
+
+ /**
+ * Result for a POW Status Load (when get_cur==1, get_wqp==0, and
+ * get_rev==0)
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ /*
+ * Points to the next POW entry in the tag list when
+ * tail == 0 (and tag_type is not NULL or NULL_NULL).
+ */
+ uint64_t link_index:11;
+ /* The POW entry attached to the core. */
+ uint64_t index:11;
+ /*
+ * The group attached to the core (updated when new
+ * tag list entered on SWTAG_FULL).
+ */
+ uint64_t grp:4;
+ /*
+ * Set when this POW entry is at the head of its tag
+ * list (also set when in the NULL or NULL_NULL
+ * state).
+ */
+ uint64_t head:1;
+ /*
+ * Set when this POW entry is at the tail of its tag
+ * list (also set when in the NULL or NULL_NULL
+ * state).
+ */
+ uint64_t tail:1;
+ /*
+ * The tag type attached to the core (updated when new
+ * tag list entered on SWTAG, SWTAG_FULL, or
+ * SWTAG_DESCHED).
+ */
+ uint64_t tag_type:2;
+ /*
+ * The tag attached to the core (updated when new tag
+ * list entered on SWTAG, SWTAG_FULL, or
+ * SWTAG_DESCHED).
+ */
+ uint64_t tag:32;
+#else
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t tail:1;
+ uint64_t head:1;
+ uint64_t grp:4;
+ uint64_t index:11;
+ uint64_t link_index:11;
+ uint64_t reserved_62_63:2;
+#endif
+ } s_sstatus2;
+
+ /**
+ * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1)
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ /*
+ * Points to the prior POW entry in the tag list when
+ * head == 0 (and tag_type is not NULL or
+ * NULL_NULL). This field is unpredictable when the
+ * core's state is NULL or NULL_NULL.
+ */
+ uint64_t revlink_index:11;
+ /* The POW entry attached to the core. */
+ uint64_t index:11;
+ /*
+ * The group attached to the core (updated when new
+ * tag list entered on SWTAG_FULL).
+ */
+ uint64_t grp:4;
+ /* Set when this POW entry is at the head of its tag
+ * list (also set when in the NULL or NULL_NULL
+ * state).
+ */
+ uint64_t head:1;
+ /*
+ * Set when this POW entry is at the tail of its tag
+ * list (also set when in the NULL or NULL_NULL
+ * state).
+ */
+ uint64_t tail:1;
+ /*
+ * The tag type attached to the core (updated when new
+ * tag list entered on SWTAG, SWTAG_FULL, or
+ * SWTAG_DESCHED).
+ */
+ uint64_t tag_type:2;
+ /*
+ * The tag attached to the core (updated when new tag
+ * list entered on SWTAG, SWTAG_FULL, or
+ * SWTAG_DESCHED).
+ */
+ uint64_t tag:32;
+#else
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t tail:1;
+ uint64_t head:1;
+ uint64_t grp:4;
+ uint64_t index:11;
+ uint64_t revlink_index:11;
+ uint64_t reserved_62_63:2;
+#endif
+ } s_sstatus3;
+
+ /**
+ * Result for a POW Status Load (when get_cur==1, get_wqp==1, and
+ * get_rev==0)
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ /*
+ * Points to the next POW entry in the tag list when
+ * tail == 0 (and tag_type is not NULL or NULL_NULL).
+ */
+ uint64_t link_index:11;
+ /* The POW entry attached to the core. */
+ uint64_t index:11;
+ /*
+ * The group attached to the core (updated when new
+ * tag list entered on SWTAG_FULL).
+ */
+ uint64_t grp:4;
+ /*
+ * The wqp attached to the core (updated when new tag
+ * list entered on SWTAG_FULL).
+ */
+ uint64_t wqp:36;
+#else
+ uint64_t wqp:36;
+ uint64_t grp:4;
+ uint64_t index:11;
+ uint64_t link_index:11;
+ uint64_t reserved_62_63:2;
+#endif
+ } s_sstatus4;
+
+ /**
+ * Result for a POW Status Load (when get_cur==1, get_wqp==1, and
+ * get_rev==1)
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_62_63:2;
+ /*
+ * Points to the prior POW entry in the tag list when
+ * head == 0 (and tag_type is not NULL or
+ * NULL_NULL). This field is unpredictable when the
+ * core's state is NULL or NULL_NULL.
+ */
+ uint64_t revlink_index:11;
+ /* The POW entry attached to the core. */
+ uint64_t index:11;
+ /*
+ * The group attached to the core (updated when new
+ * tag list entered on SWTAG_FULL).
+ */
+ uint64_t grp:4;
+ /*
+ * The wqp attached to the core (updated when new tag
+ * list entered on SWTAG_FULL).
+ */
+ uint64_t wqp:36;
+#else
+ uint64_t wqp:36;
+ uint64_t grp:4;
+ uint64_t index:11;
+ uint64_t revlink_index:11;
+ uint64_t reserved_62_63:2;
+#endif
+ } s_sstatus5;
+
+ /**
+ * Result For POW Memory Load (get_des == 0 and get_wqp == 0)
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_51_63:13;
+ /*
+ * The next entry in the input, free, descheduled_head
+ * list (unpredictable if entry is the tail of the
+ * list).
+ */
+ uint64_t next_index:11;
+ /* The group of the POW entry. */
+ uint64_t grp:4;
+ uint64_t reserved_35:1;
+ /*
+ * Set when this POW entry is at the tail of its tag
+ * list (also set when in the NULL or NULL_NULL
+ * state).
+ */
+ uint64_t tail:1;
+ /* The tag type of the POW entry. */
+ uint64_t tag_type:2;
+ /* The tag of the POW entry. */
+ uint64_t tag:32;
+#else
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t tail:1;
+ uint64_t reserved_35:1;
+ uint64_t grp:4;
+ uint64_t next_index:11;
+ uint64_t reserved_51_63:13;
+#endif
+ } s_smemload0;
+
+ /**
+ * Result For POW Memory Load (get_des == 0 and get_wqp == 1)
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_51_63:13;
+ /*
+ * The next entry in the input, free, descheduled_head
+ * list (unpredictable if entry is the tail of the
+ * list).
+ */
+ uint64_t next_index:11;
+ /* The group of the POW entry. */
+ uint64_t grp:4;
+ /* The WQP held in the POW entry. */
+ uint64_t wqp:36;
+#else
+ uint64_t wqp:36;
+ uint64_t grp:4;
+ uint64_t next_index:11;
+ uint64_t reserved_51_63:13;
+#endif
+ } s_smemload1;
+
+ /**
+ * Result For POW Memory Load (get_des == 1)
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_51_63:13;
+ /*
+ * The next entry in the tag list connected to the
+ * descheduled head.
+ */
+ uint64_t fwd_index:11;
+ /* The group of the POW entry. */
+ uint64_t grp:4;
+ /* The nosched bit for the POW entry. */
+ uint64_t nosched:1;
+ /* There is a pending tag switch */
+ uint64_t pend_switch:1;
+ /*
+ * The next tag type for the new tag list when
+ * pend_switch is set.
+ */
+ uint64_t pend_type:2;
+ /*
+ * The next tag for the new tag list when pend_switch
+ * is set.
+ */
+ uint64_t pend_tag:32;
+#else
+ uint64_t pend_tag:32;
+ uint64_t pend_type:2;
+ uint64_t pend_switch:1;
+ uint64_t nosched:1;
+ uint64_t grp:4;
+ uint64_t fwd_index:11;
+ uint64_t reserved_51_63:13;
+#endif
+ } s_smemload2;
+
+ /**
+ * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0)
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63:12;
+ /*
+ * set when there is one or more POW entries on the
+ * free list.
+ */
+ uint64_t free_val:1;
+ /*
+ * set when there is exactly one POW entry on the free
+ * list.
+ */
+ uint64_t free_one:1;
+ uint64_t reserved_49:1;
+ /*
+ * when free_val is set, indicates the first entry on
+ * the free list.
+ */
+ uint64_t free_head:11;
+ uint64_t reserved_37:1;
+ /*
+ * when free_val is set, indicates the last entry on
+ * the free list.
+ */
+ uint64_t free_tail:11;
+ /*
+ * set when there is one or more POW entries on the
+ * input Q list selected by qosgrp.
+ */
+ uint64_t loc_val:1;
+ /*
+ * set when there is exactly one POW entry on the
+ * input Q list selected by qosgrp.
+ */
+ uint64_t loc_one:1;
+ uint64_t reserved_23:1;
+ /*
+ * when loc_val is set, indicates the first entry on
+ * the input Q list selected by qosgrp.
+ */
+ uint64_t loc_head:11;
+ uint64_t reserved_11:1;
+ /*
+ * when loc_val is set, indicates the last entry on
+ * the input Q list selected by qosgrp.
+ */
+ uint64_t loc_tail:11;
+#else
+ uint64_t loc_tail:11;
+ uint64_t reserved_11:1;
+ uint64_t loc_head:11;
+ uint64_t reserved_23:1;
+ uint64_t loc_one:1;
+ uint64_t loc_val:1;
+ uint64_t free_tail:11;
+ uint64_t reserved_37:1;
+ uint64_t free_head:11;
+ uint64_t reserved_49:1;
+ uint64_t free_one:1;
+ uint64_t free_val:1;
+ uint64_t reserved_52_63:12;
+#endif
+ } sindexload0;
+
+ /**
+ * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1)
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63:12;
+ /*
+ * set when there is one or more POW entries on the
+ * nosched list.
+ */
+ uint64_t nosched_val:1;
+ /*
+ * set when there is exactly one POW entry on the
+ * nosched list.
+ */
+ uint64_t nosched_one:1;
+ uint64_t reserved_49:1;
+ /*
+ * when nosched_val is set, indicates the first entry
+ * on the nosched list.
+ */
+ uint64_t nosched_head:11;
+ uint64_t reserved_37:1;
+ /*
+ * when nosched_val is set, indicates the last entry
+ * on the nosched list.
+ */
+ uint64_t nosched_tail:11;
+ /*
+ * set when there is one or more descheduled heads on
+ * the descheduled list selected by qosgrp.
+ */
+ uint64_t des_val:1;
+ /*
+ * set when there is exactly one descheduled head on
+ * the descheduled list selected by qosgrp.
+ */
+ uint64_t des_one:1;
+ uint64_t reserved_23:1;
+ /*
+ * when des_val is set, indicates the first
+ * descheduled head on the descheduled list selected
+ * by qosgrp.
+ */
+ uint64_t des_head:11;
+ uint64_t reserved_11:1;
+ /*
+ * when des_val is set, indicates the last descheduled
+ * head on the descheduled list selected by qosgrp.
+ */
+ uint64_t des_tail:11;
+#else
+ uint64_t des_tail:11;
+ uint64_t reserved_11:1;
+ uint64_t des_head:11;
+ uint64_t reserved_23:1;
+ uint64_t des_one:1;
+ uint64_t des_val:1;
+ uint64_t nosched_tail:11;
+ uint64_t reserved_37:1;
+ uint64_t nosched_head:11;
+ uint64_t reserved_49:1;
+ uint64_t nosched_one:1;
+ uint64_t nosched_val:1;
+ uint64_t reserved_52_63:12;
+#endif
+ } sindexload1;
+
+ /**
+ * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0)
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63:25;
+ /*
+ * Set when this DRAM list is the current head
+ * (i.e. is the next to be reloaded when the POW
+ * hardware reloads a POW entry from DRAM). The POW
+ * hardware alternates between the two DRAM lists
+ * associated with a QOS level when it reloads work
+ * from DRAM into the POW unit.
+ */
+ uint64_t rmt_is_head:1;
+ /*
+ * Set when the DRAM portion of the input Q list
+ * selected by qosgrp contains one or more pieces of
+ * work.
+ */
+ uint64_t rmt_val:1;
+ /*
+ * Set when the DRAM portion of the input Q list
+ * selected by qosgrp contains exactly one piece of
+ * work.
+ */
+ uint64_t rmt_one:1;
+ /*
+ * When rmt_val is set, indicates the first piece of
+ * work on the DRAM input Q list selected by
+ * qosgrp.
+ */
+ uint64_t rmt_head:36;
+#else
+ uint64_t rmt_head:36;
+ uint64_t rmt_one:1;
+ uint64_t rmt_val:1;
+ uint64_t rmt_is_head:1;
+ uint64_t reserved_39_63:25;
+#endif
+ } sindexload2;
+
+ /**
+ * Result For POW Index/Pointer Load (get_rmt ==
+ * 1/get_des_get_tail == 1)
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_39_63:25;
+ /*
+ * set when this DRAM list is the current head
+ * (i.e. is the next to be reloaded when the POW
+ * hardware reloads a POW entry from DRAM). The POW
+ * hardware alternates between the two DRAM lists
+ * associated with a QOS level when it reloads work
+ * from DRAM into the POW unit.
+ */
+ uint64_t rmt_is_head:1;
+ /*
+ * set when the DRAM portion of the input Q list
+ * selected by qosgrp contains one or more pieces of
+ * work.
+ */
+ uint64_t rmt_val:1;
+ /*
+ * set when the DRAM portion of the input Q list
+ * selected by qosgrp contains exactly one piece of
+ * work.
+ */
+ uint64_t rmt_one:1;
+ /*
+ * when rmt_val is set, indicates the last piece of
+ * work on the DRAM input Q list selected by
+ * qosgrp.
+ */
+ uint64_t rmt_tail:36;
+#else
+ uint64_t rmt_tail:36;
+ uint64_t rmt_one:1;
+ uint64_t rmt_val:1;
+ uint64_t rmt_is_head:1;
+ uint64_t reserved_39_63:25;
+#endif
+ } sindexload3;
+
+ /**
+ * Response to NULL_RD request loads
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t unused:62;
+ /* of type cvmx_pow_tag_type_t. state is one of the
+ * following:
+ *
+ * - CVMX_POW_TAG_TYPE_ORDERED
+ * - CVMX_POW_TAG_TYPE_ATOMIC
+ * - CVMX_POW_TAG_TYPE_NULL
+ * - CVMX_POW_TAG_TYPE_NULL_NULL
+ */
+ uint64_t state:2;
+#else
+ uint64_t state:2;
+ uint64_t unused:62;
+#endif
+ } s_null_rd;
+
+} cvmx_pow_tag_load_resp_t;
+
+/**
+ * This structure describes the address used for stores to the POW.
+ * The store address is meaningful on stores to the POW. The
+ * hardware assumes that an aligned 64-bit store was used for all
+ * these stores. Note the assumption that the work queue entry is
+ * aligned on an 8-byte boundary (since the low-order 3 address bits
+ * must be zero). Note that not all fields are used by all
+ * operations.
+ *
+ * NOTE: The following is the behavior of the pending switch bit at the PP
+ * for POW stores (i.e. when did<7:3> == 0xc)
+ * - did<2:0> == 0 => pending switch bit is set
+ * - did<2:0> == 1 => no affect on the pending switch bit
+ * - did<2:0> == 3 => pending switch bit is cleared
+ * - did<2:0> == 7 => no affect on the pending switch bit
+ * - did<2:0> == others => must not be used
+ * - No other loads/stores have an affect on the pending switch bit
+ * - The switch bus from POW can clear the pending switch bit
+ *
+ * NOTE: did<2:0> == 2 is used by the HW for a special single-cycle
+ * ADDWQ command that only contains the pointer). SW must never use
+ * did<2:0> == 2.
+ */
+typedef union {
+ /**
+ * Unsigned 64 bit integer representation of store address
+ */
+ uint64_t u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* Memory region. Should be CVMX_IO_SEG in most cases */
+ uint64_t mem_reg:2;
+ uint64_t reserved_49_61:13; /* Must be zero */
+ uint64_t is_io:1; /* Must be one */
+ /* Device ID of POW. Note that different sub-dids are used. */
+ uint64_t did:8;
+ uint64_t reserved_36_39:4; /* Must be zero */
+ /* Address field. addr<2:0> must be zero */
+ uint64_t addr:36;
+#else
+ uint64_t addr:36;
+ uint64_t reserved_36_39:4;
+ uint64_t did:8;
+ uint64_t is_io:1;
+ uint64_t reserved_49_61:13;
+ uint64_t mem_reg:2;
+#endif
+ } stag;
+} cvmx_pow_tag_store_addr_t;
+
+/**
+ * decode of the store data when an IOBDMA SENDSINGLE is sent to POW
+ */
+typedef union {
+ uint64_t u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /*
+ * the (64-bit word) location in scratchpad to write
+ * to (if len != 0)
+ */
+ uint64_t scraddr:8;
+ /* the number of words in the response (0 => no response) */
+ uint64_t len:8;
+ /* the ID of the device on the non-coherent bus */
+ uint64_t did:8;
+ uint64_t unused:36;
+ /* if set, don't return load response until work is available */
+ uint64_t wait:1;
+ uint64_t unused2:3;
+#else
+ uint64_t unused2:3;
+ uint64_t wait:1;
+ uint64_t unused:36;
+ uint64_t did:8;
+ uint64_t len:8;
+ uint64_t scraddr:8;
+#endif
+ } s;
+
+} cvmx_pow_iobdma_store_t;
+
+/* CSR typedefs have been moved to cvmx-csr-*.h */
+
+/**
+ * Get the POW tag for this core. This returns the current
+ * tag type, tag, group, and POW entry index associated with
+ * this core. Index is only valid if the tag type isn't NULL_NULL.
+ * If a tag switch is pending this routine returns the tag before
+ * the tag switch, not after.
+ *
+ * Returns Current tag
+ */
+static inline cvmx_pow_tag_req_t cvmx_pow_get_current_tag(void)
+{
+ cvmx_pow_load_addr_t load_addr;
+ cvmx_pow_tag_load_resp_t load_resp;
+ cvmx_pow_tag_req_t result;
+
+ load_addr.u64 = 0;
+ load_addr.sstatus.mem_region = CVMX_IO_SEG;
+ load_addr.sstatus.is_io = 1;
+ load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
+ load_addr.sstatus.coreid = cvmx_get_core_num();
+ load_addr.sstatus.get_cur = 1;
+ load_resp.u64 = cvmx_read_csr(load_addr.u64);
+ result.u64 = 0;
+ result.s.grp = load_resp.s_sstatus2.grp;
+ result.s.index = load_resp.s_sstatus2.index;
+ result.s.type = load_resp.s_sstatus2.tag_type;
+ result.s.tag = load_resp.s_sstatus2.tag;
+ return result;
+}
+
+/**
+ * Get the POW WQE for this core. This returns the work queue
+ * entry currently associated with this core.
+ *
+ * Returns WQE pointer
+ */
+static inline struct cvmx_wqe *cvmx_pow_get_current_wqp(void)
+{
+ cvmx_pow_load_addr_t load_addr;
+ cvmx_pow_tag_load_resp_t load_resp;
+
+ load_addr.u64 = 0;
+ load_addr.sstatus.mem_region = CVMX_IO_SEG;
+ load_addr.sstatus.is_io = 1;
+ load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
+ load_addr.sstatus.coreid = cvmx_get_core_num();
+ load_addr.sstatus.get_cur = 1;
+ load_addr.sstatus.get_wqp = 1;
+ load_resp.u64 = cvmx_read_csr(load_addr.u64);
+ return (struct cvmx_wqe *) cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp);
+}
+
+#ifndef CVMX_MF_CHORD
+#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
+#endif
+
+/**
+ * Print a warning if a tag switch is pending for this core
+ *
+ * @function: Function name checking for a pending tag switch
+ */
+static inline void __cvmx_pow_warn_if_pending_switch(const char *function)
+{
+ uint64_t switch_complete;
+ CVMX_MF_CHORD(switch_complete);
+ if (!switch_complete)
+ pr_warn("%s called with tag switch in progress\n", function);
+}
+
+/**
+ * Waits for a tag switch to complete by polling the completion bit.
+ * Note that switches to NULL complete immediately and do not need
+ * to be waited for.
+ */
+static inline void cvmx_pow_tag_sw_wait(void)
+{
+ const uint64_t MAX_CYCLES = 1ull << 31;
+ uint64_t switch_complete;
+ uint64_t start_cycle = cvmx_get_cycle();
+ while (1) {
+ CVMX_MF_CHORD(switch_complete);
+ if (unlikely(switch_complete))
+ break;
+ if (unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) {
+ pr_warn("Tag switch is taking a long time, possible deadlock\n");
+ start_cycle = -MAX_CYCLES - 1;
+ }
+ }
+}
+
+/**
+ * Synchronous work request. Requests work from the POW.
+ * This function does NOT wait for previous tag switches to complete,
+ * so the caller must ensure that there is not a pending tag switch.
+ *
+ * @wait: When set, call stalls until work becomes avaiable, or times out.
+ * If not set, returns immediately.
+ *
+ * Returns: the WQE pointer from POW. Returns NULL if no work
+ * was available.
+ */
+static inline struct cvmx_wqe *cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t
+ wait)
+{
+ cvmx_pow_load_addr_t ptr;
+ cvmx_pow_tag_load_resp_t result;
+
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ ptr.u64 = 0;
+ ptr.swork.mem_region = CVMX_IO_SEG;
+ ptr.swork.is_io = 1;
+ ptr.swork.did = CVMX_OCT_DID_TAG_SWTAG;
+ ptr.swork.wait = wait;
+
+ result.u64 = cvmx_read_csr(ptr.u64);
+
+ if (result.s_work.no_work)
+ return NULL;
+ else
+ return (struct cvmx_wqe *) cvmx_phys_to_ptr(result.s_work.addr);
+}
+
+/**
+ * Synchronous work request. Requests work from the POW.
+ * This function waits for any previous tag switch to complete before
+ * requesting the new work.
+ *
+ * @wait: When set, call stalls until work becomes avaiable, or times out.
+ * If not set, returns immediately.
+ *
+ * Returns: the WQE pointer from POW. Returns NULL if no work
+ * was available.
+ */
+static inline struct cvmx_wqe *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ /* Must not have a switch pending when requesting work */
+ cvmx_pow_tag_sw_wait();
+ return cvmx_pow_work_request_sync_nocheck(wait);
+
+}
+
+/**
+ * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state.
+ * This function waits for any previous tag switch to complete before
+ * requesting the null_rd.
+ *
+ * Returns: the POW state of type cvmx_pow_tag_type_t.
+ */
+static inline enum cvmx_pow_tag_type cvmx_pow_work_request_null_rd(void)
+{
+ cvmx_pow_load_addr_t ptr;
+ cvmx_pow_tag_load_resp_t result;
+
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ /* Must not have a switch pending when requesting work */
+ cvmx_pow_tag_sw_wait();
+
+ ptr.u64 = 0;
+ ptr.snull_rd.mem_region = CVMX_IO_SEG;
+ ptr.snull_rd.is_io = 1;
+ ptr.snull_rd.did = CVMX_OCT_DID_TAG_NULL_RD;
+
+ result.u64 = cvmx_read_csr(ptr.u64);
+
+ return (enum cvmx_pow_tag_type) result.s_null_rd.state;
+}
+
+/**
+ * Asynchronous work request. Work is requested from the POW unit,
+ * and should later be checked with function
+ * cvmx_pow_work_response_async. This function does NOT wait for
+ * previous tag switches to complete, so the caller must ensure that
+ * there is not a pending tag switch.
+ *
+ * @scr_addr: Scratch memory address that response will be returned
+ * to, which is either a valid WQE, or a response with the
+ * invalid bit set. Byte address, must be 8 byte aligned.
+ *
+ * @wait: 1 to cause response to wait for work to become available (or
+ * timeout), 0 to cause response to return immediately
+ */
+static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
+ cvmx_pow_wait_t wait)
+{
+ cvmx_pow_iobdma_store_t data;
+
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ /* scr_addr must be 8 byte aligned */
+ data.s.scraddr = scr_addr >> 3;
+ data.s.len = 1;
+ data.s.did = CVMX_OCT_DID_TAG_SWTAG;
+ data.s.wait = wait;
+ cvmx_send_single(data.u64);
+}
+
+/**
+ * Asynchronous work request. Work is requested from the POW unit,
+ * and should later be checked with function
+ * cvmx_pow_work_response_async. This function waits for any previous
+ * tag switch to complete before requesting the new work.
+ *
+ * @scr_addr: Scratch memory address that response will be returned
+ * to, which is either a valid WQE, or a response with the
+ * invalid bit set. Byte address, must be 8 byte aligned.
+ *
+ * @wait: 1 to cause response to wait for work to become available (or
+ * timeout), 0 to cause response to return immediately
+ */
+static inline void cvmx_pow_work_request_async(int scr_addr,
+ cvmx_pow_wait_t wait)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ /* Must not have a switch pending when requesting work */
+ cvmx_pow_tag_sw_wait();
+ cvmx_pow_work_request_async_nocheck(scr_addr, wait);
+}
+
+/**
+ * Gets result of asynchronous work request. Performs a IOBDMA sync
+ * to wait for the response.
+ *
+ * @scr_addr: Scratch memory address to get result from Byte address,
+ * must be 8 byte aligned.
+ *
+ * Returns: the WQE from the scratch register, or NULL if no
+ * work was available.
+ */
+static inline struct cvmx_wqe *cvmx_pow_work_response_async(int scr_addr)
+{
+ cvmx_pow_tag_load_resp_t result;
+
+ CVMX_SYNCIOBDMA;
+ result.u64 = cvmx_scratch_read64(scr_addr);
+
+ if (result.s_work.no_work)
+ return NULL;
+ else
+ return (struct cvmx_wqe *) cvmx_phys_to_ptr(result.s_work.addr);
+}
+
+/**
+ * Checks if a work queue entry pointer returned by a work
+ * request is valid. It may be invalid due to no work
+ * being available or due to a timeout.
+ *
+ * @wqe_ptr: pointer to a work queue entry returned by the POW
+ *
+ * Returns 0 if pointer is valid
+ * 1 if invalid (no work was returned)
+ */
+static inline uint64_t cvmx_pow_work_invalid(struct cvmx_wqe *wqe_ptr)
+{
+ return wqe_ptr == NULL;
+}
+
+/**
+ * Starts a tag switch to the provided tag value and tag type.
+ * Completion for the tag switch must be checked for separately. This
+ * function does NOT update the work queue entry in dram to match tag
+ * value and type, so the application must keep track of these if they
+ * are important to the application. This tag switch command must not
+ * be used for switches to NULL, as the tag switch pending bit will be
+ * set by the switch request, but never cleared by the hardware.
+ *
+ * NOTE: This should not be used when switching from a NULL tag. Use
+ * cvmx_pow_tag_sw_full() instead.
+ *
+ * This function does no checks, so the caller must ensure that any
+ * previous tag switch has completed.
+ *
+ * @tag: new tag value
+ * @tag_type: new tag type (ordered or atomic)
+ */
+static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag,
+ enum cvmx_pow_tag_type tag_type)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ if (CVMX_ENABLE_POW_CHECKS) {
+ cvmx_pow_tag_req_t current_tag;
+ __cvmx_pow_warn_if_pending_switch(__func__);
+ current_tag = cvmx_pow_get_current_tag();
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
+ pr_warn("%s called with NULL_NULL tag\n", __func__);
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
+ pr_warn("%s called with NULL tag\n", __func__);
+ if ((current_tag.s.type == tag_type)
+ && (current_tag.s.tag == tag))
+ pr_warn("%s called to perform a tag switch to the same tag\n",
+ __func__);
+ if (tag_type == CVMX_POW_TAG_TYPE_NULL)
+ pr_warn("%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n",
+ __func__);
+ }
+
+ /*
+ * Note that WQE in DRAM is not updated here, as the POW does
+ * not read from DRAM once the WQE is in flight. See hardware
+ * manual for complete details. It is the application's
+ * responsibility to keep track of the current tag value if
+ * that is important.
+ */
+
+ tag_req.u64 = 0;
+ tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
+ tag_req.s.tag = tag;
+ tag_req.s.type = tag_type;
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
+
+ /* once this store arrives at POW, it will attempt the switch
+ software must wait for the switch to complete separately */
+ cvmx_write_io(ptr.u64, tag_req.u64);
+}
+
+/**
+ * Starts a tag switch to the provided tag value and tag type.
+ * Completion for the tag switch must be checked for separately. This
+ * function does NOT update the work queue entry in dram to match tag
+ * value and type, so the application must keep track of these if they
+ * are important to the application. This tag switch command must not
+ * be used for switches to NULL, as the tag switch pending bit will be
+ * set by the switch request, but never cleared by the hardware.
+ *
+ * NOTE: This should not be used when switching from a NULL tag. Use
+ * cvmx_pow_tag_sw_full() instead.
+ *
+ * This function waits for any previous tag switch to complete, and also
+ * displays an error on tag switches to NULL.
+ *
+ * @tag: new tag value
+ * @tag_type: new tag type (ordered or atomic)
+ */
+static inline void cvmx_pow_tag_sw(uint32_t tag,
+ enum cvmx_pow_tag_type tag_type)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ /*
+ * Note that WQE in DRAM is not updated here, as the POW does
+ * not read from DRAM once the WQE is in flight. See hardware
+ * manual for complete details. It is the application's
+ * responsibility to keep track of the current tag value if
+ * that is important.
+ */
+
+ /*
+ * Ensure that there is not a pending tag switch, as a tag
+ * switch cannot be started if a previous switch is still
+ * pending.
+ */
+ cvmx_pow_tag_sw_wait();
+ cvmx_pow_tag_sw_nocheck(tag, tag_type);
+}
+
+/**
+ * Starts a tag switch to the provided tag value and tag type.
+ * Completion for the tag switch must be checked for separately. This
+ * function does NOT update the work queue entry in dram to match tag
+ * value and type, so the application must keep track of these if they
+ * are important to the application. This tag switch command must not
+ * be used for switches to NULL, as the tag switch pending bit will be
+ * set by the switch request, but never cleared by the hardware.
+ *
+ * This function must be used for tag switches from NULL.
+ *
+ * This function does no checks, so the caller must ensure that any
+ * previous tag switch has completed.
+ *
+ * @wqp: pointer to work queue entry to submit. This entry is
+ * updated to match the other parameters
+ * @tag: tag value to be assigned to work queue entry
+ * @tag_type: type of tag
+ * @group: group value for the work queue entry.
+ */
+static inline void cvmx_pow_tag_sw_full_nocheck(struct cvmx_wqe *wqp, uint32_t tag,
+ enum cvmx_pow_tag_type tag_type,
+ uint64_t group)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ if (CVMX_ENABLE_POW_CHECKS) {
+ cvmx_pow_tag_req_t current_tag;
+ __cvmx_pow_warn_if_pending_switch(__func__);
+ current_tag = cvmx_pow_get_current_tag();
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
+ pr_warn("%s called with NULL_NULL tag\n", __func__);
+ if ((current_tag.s.type == tag_type)
+ && (current_tag.s.tag == tag))
+ pr_warn("%s called to perform a tag switch to the same tag\n",
+ __func__);
+ if (tag_type == CVMX_POW_TAG_TYPE_NULL)
+ pr_warn("%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n",
+ __func__);
+ if (wqp != cvmx_phys_to_ptr(0x80))
+ if (wqp != cvmx_pow_get_current_wqp())
+ pr_warn("%s passed WQE(%p) doesn't match the address in the POW(%p)\n",
+ __func__, wqp,
+ cvmx_pow_get_current_wqp());
+ }
+
+ /*
+ * Note that WQE in DRAM is not updated here, as the POW does
+ * not read from DRAM once the WQE is in flight. See hardware
+ * manual for complete details. It is the application's
+ * responsibility to keep track of the current tag value if
+ * that is important.
+ */
+
+ tag_req.u64 = 0;
+ tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_FULL;
+ tag_req.s.tag = tag;
+ tag_req.s.type = tag_type;
+ tag_req.s.grp = group;
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
+ ptr.sio.offset = CAST64(wqp);
+
+ /*
+ * once this store arrives at POW, it will attempt the switch
+ * software must wait for the switch to complete separately.
+ */
+ cvmx_write_io(ptr.u64, tag_req.u64);
+}
+
+/**
+ * Starts a tag switch to the provided tag value and tag type.
+ * Completion for the tag switch must be checked for separately. This
+ * function does NOT update the work queue entry in dram to match tag
+ * value and type, so the application must keep track of these if they
+ * are important to the application. This tag switch command must not
+ * be used for switches to NULL, as the tag switch pending bit will be
+ * set by the switch request, but never cleared by the hardware.
+ *
+ * This function must be used for tag switches from NULL.
+ *
+ * This function waits for any pending tag switches to complete
+ * before requesting the tag switch.
+ *
+ * @wqp: pointer to work queue entry to submit. This entry is updated
+ * to match the other parameters
+ * @tag: tag value to be assigned to work queue entry
+ * @tag_type: type of tag
+ * @group: group value for the work queue entry.
+ */
+static inline void cvmx_pow_tag_sw_full(struct cvmx_wqe *wqp, uint32_t tag,
+ enum cvmx_pow_tag_type tag_type,
+ uint64_t group)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ /*
+ * Ensure that there is not a pending tag switch, as a tag
+ * switch cannot be started if a previous switch is still
+ * pending.
+ */
+ cvmx_pow_tag_sw_wait();
+ cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group);
+}
+
+/**
+ * Switch to a NULL tag, which ends any ordering or
+ * synchronization provided by the POW for the current
+ * work queue entry. This operation completes immediately,
+ * so completion should not be waited for.
+ * This function does NOT wait for previous tag switches to complete,
+ * so the caller must ensure that any previous tag switches have completed.
+ */
+static inline void cvmx_pow_tag_sw_null_nocheck(void)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ if (CVMX_ENABLE_POW_CHECKS) {
+ cvmx_pow_tag_req_t current_tag;
+ __cvmx_pow_warn_if_pending_switch(__func__);
+ current_tag = cvmx_pow_get_current_tag();
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
+ pr_warn("%s called with NULL_NULL tag\n", __func__);
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
+ pr_warn("%s called when we already have a NULL tag\n",
+ __func__);
+ }
+
+ tag_req.u64 = 0;
+ tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
+ tag_req.s.type = CVMX_POW_TAG_TYPE_NULL;
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
+
+ cvmx_write_io(ptr.u64, tag_req.u64);
+
+ /* switch to NULL completes immediately */
+}
+
+/**
+ * Switch to a NULL tag, which ends any ordering or
+ * synchronization provided by the POW for the current
+ * work queue entry. This operation completes immediately,
+ * so completion should not be waited for.
+ * This function waits for any pending tag switches to complete
+ * before requesting the switch to NULL.
+ */
+static inline void cvmx_pow_tag_sw_null(void)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ /*
+ * Ensure that there is not a pending tag switch, as a tag
+ * switch cannot be started if a previous switch is still
+ * pending.
+ */
+ cvmx_pow_tag_sw_wait();
+ cvmx_pow_tag_sw_null_nocheck();
+
+ /* switch to NULL completes immediately */
+}
+
+/**
+ * Submits work to an input queue. This function updates the work
+ * queue entry in DRAM to match the arguments given. Note that the
+ * tag provided is for the work queue entry submitted, and is
+ * unrelated to the tag that the core currently holds.
+ *
+ * @wqp: pointer to work queue entry to submit. This entry is
+ * updated to match the other parameters
+ * @tag: tag value to be assigned to work queue entry
+ * @tag_type: type of tag
+ * @qos: Input queue to add to.
+ * @grp: group value for the work queue entry.
+ */
+static inline void cvmx_pow_work_submit(struct cvmx_wqe *wqp, uint32_t tag,
+ enum cvmx_pow_tag_type tag_type,
+ uint64_t qos, uint64_t grp)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ wqp->word1.tag = tag;
+ wqp->word1.tag_type = tag_type;
+
+ cvmx_wqe_set_qos(wqp, qos);
+ cvmx_wqe_set_grp(wqp, grp);
+
+ tag_req.u64 = 0;
+ tag_req.s.op = CVMX_POW_TAG_OP_ADDWQ;
+ tag_req.s.type = tag_type;
+ tag_req.s.tag = tag;
+ tag_req.s.qos = qos;
+ tag_req.s.grp = grp;
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
+ ptr.sio.offset = cvmx_ptr_to_phys(wqp);
+
+ /*
+ * SYNC write to memory before the work submit. This is
+ * necessary as POW may read values from DRAM at this time.
+ */
+ CVMX_SYNCWS;
+ cvmx_write_io(ptr.u64, tag_req.u64);
+}
+
+/**
+ * This function sets the group mask for a core. The group mask
+ * indicates which groups each core will accept work from. There are
+ * 16 groups.
+ *
+ * @core_num: core to apply mask to
+ * @mask: Group mask. There are 16 groups, so only bits 0-15 are valid,
+ * representing groups 0-15.
+ * Each 1 bit in the mask enables the core to accept work from
+ * the corresponding group.
+ */
+static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
+{
+ union cvmx_pow_pp_grp_mskx grp_msk;
+
+ grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
+ grp_msk.s.grp_msk = mask;
+ cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
+}
+
+/**
+ * This function sets POW static priorities for a core. Each input queue has
+ * an associated priority value.
+ *
+ * @core_num: core to apply priorities to
+ * @priority: Vector of 8 priorities, one per POW Input Queue (0-7).
+ * Highest priority is 0 and lowest is 7. A priority value
+ * of 0xF instructs POW to skip the Input Queue when
+ * scheduling to this specific core.
+ * NOTE: priorities should not have gaps in values, meaning
+ * {0,1,1,1,1,1,1,1} is a valid configuration while
+ * {0,2,2,2,2,2,2,2} is not.
+ */
+static inline void cvmx_pow_set_priority(uint64_t core_num,
+ const uint8_t priority[])
+{
+ /* POW priorities are supported on CN5xxx and later */
+ if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ union cvmx_pow_pp_grp_mskx grp_msk;
+
+ grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
+ grp_msk.s.qos0_pri = priority[0];
+ grp_msk.s.qos1_pri = priority[1];
+ grp_msk.s.qos2_pri = priority[2];
+ grp_msk.s.qos3_pri = priority[3];
+ grp_msk.s.qos4_pri = priority[4];
+ grp_msk.s.qos5_pri = priority[5];
+ grp_msk.s.qos6_pri = priority[6];
+ grp_msk.s.qos7_pri = priority[7];
+
+ /* Detect gaps between priorities and flag error */
+ {
+ int i;
+ uint32_t prio_mask = 0;
+
+ for (i = 0; i < 8; i++)
+ if (priority[i] != 0xF)
+ prio_mask |= 1 << priority[i];
+
+ if (prio_mask ^ ((1 << cvmx_pop(prio_mask)) - 1)) {
+ pr_err("POW static priorities should be "
+ "contiguous (0x%llx)\n",
+ (unsigned long long)prio_mask);
+ return;
+ }
+ }
+
+ cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
+ }
+}
+
+/**
+ * Performs a tag switch and then an immediate deschedule. This completes
+ * immediately, so completion must not be waited for. This function does NOT
+ * update the wqe in DRAM to match arguments.
+ *
+ * This function does NOT wait for any prior tag switches to complete, so the
+ * calling code must do this.
+ *
+ * Note the following CAVEAT of the Octeon HW behavior when
+ * re-scheduling DE-SCHEDULEd items whose (next) state is
+ * ORDERED:
+ * - If there are no switches pending at the time that the
+ * HW executes the de-schedule, the HW will only re-schedule
+ * the head of the FIFO associated with the given tag. This
+ * means that in many respects, the HW treats this ORDERED
+ * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
+ * case (to an ORDERED tag), the HW will do the switch
+ * before the deschedule whenever it is possible to do
+ * the switch immediately, so it may often look like
+ * this case.
+ * - If there is a pending switch to ORDERED at the time
+ * the HW executes the de-schedule, the HW will perform
+ * the switch at the time it re-schedules, and will be
+ * able to reschedule any/all of the entries with the
+ * same tag.
+ * Due to this behavior, the RECOMMENDATION to software is
+ * that they have a (next) state of ATOMIC when they
+ * DE-SCHEDULE. If an ORDERED tag is what was really desired,
+ * SW can choose to immediately switch to an ORDERED tag
+ * after the work (that has an ATOMIC tag) is re-scheduled.
+ * Note that since there are never any tag switches pending
+ * when the HW re-schedules, this switch can be IMMEDIATE upon
+ * the reception of the pointer during the re-schedule.
+ *
+ * @tag: New tag value
+ * @tag_type: New tag type
+ * @group: New group value
+ * @no_sched: Control whether this work queue entry will be rescheduled.
+ * - 1 : don't schedule this work
+ * - 0 : allow this work to be scheduled.
+ */
+static inline void cvmx_pow_tag_sw_desched_nocheck(
+ uint32_t tag,
+ enum cvmx_pow_tag_type tag_type,
+ uint64_t group,
+ uint64_t no_sched)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ if (CVMX_ENABLE_POW_CHECKS) {
+ cvmx_pow_tag_req_t current_tag;
+ __cvmx_pow_warn_if_pending_switch(__func__);
+ current_tag = cvmx_pow_get_current_tag();
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
+ pr_warn("%s called with NULL_NULL tag\n", __func__);
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
+ pr_warn("%s called with NULL tag. Deschedule not allowed from NULL state\n",
+ __func__);
+ if ((current_tag.s.type != CVMX_POW_TAG_TYPE_ATOMIC)
+ && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC))
+ pr_warn("%s called where neither the before or after tag is ATOMIC\n",
+ __func__);
+ }
+
+ tag_req.u64 = 0;
+ tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
+ tag_req.s.tag = tag;
+ tag_req.s.type = tag_type;
+ tag_req.s.grp = group;
+ tag_req.s.no_sched = no_sched;
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
+ /*
+ * since TAG3 is used, this store will clear the local pending
+ * switch bit.
+ */
+ cvmx_write_io(ptr.u64, tag_req.u64);
+}
+
+/**
+ * Performs a tag switch and then an immediate deschedule. This completes
+ * immediately, so completion must not be waited for. This function does NOT
+ * update the wqe in DRAM to match arguments.
+ *
+ * This function waits for any prior tag switches to complete, so the
+ * calling code may call this function with a pending tag switch.
+ *
+ * Note the following CAVEAT of the Octeon HW behavior when
+ * re-scheduling DE-SCHEDULEd items whose (next) state is
+ * ORDERED:
+ * - If there are no switches pending at the time that the
+ * HW executes the de-schedule, the HW will only re-schedule
+ * the head of the FIFO associated with the given tag. This
+ * means that in many respects, the HW treats this ORDERED
+ * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
+ * case (to an ORDERED tag), the HW will do the switch
+ * before the deschedule whenever it is possible to do
+ * the switch immediately, so it may often look like
+ * this case.
+ * - If there is a pending switch to ORDERED at the time
+ * the HW executes the de-schedule, the HW will perform
+ * the switch at the time it re-schedules, and will be
+ * able to reschedule any/all of the entries with the
+ * same tag.
+ * Due to this behavior, the RECOMMENDATION to software is
+ * that they have a (next) state of ATOMIC when they
+ * DE-SCHEDULE. If an ORDERED tag is what was really desired,
+ * SW can choose to immediately switch to an ORDERED tag
+ * after the work (that has an ATOMIC tag) is re-scheduled.
+ * Note that since there are never any tag switches pending
+ * when the HW re-schedules, this switch can be IMMEDIATE upon
+ * the reception of the pointer during the re-schedule.
+ *
+ * @tag: New tag value
+ * @tag_type: New tag type
+ * @group: New group value
+ * @no_sched: Control whether this work queue entry will be rescheduled.
+ * - 1 : don't schedule this work
+ * - 0 : allow this work to be scheduled.
+ */
+static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
+ enum cvmx_pow_tag_type tag_type,
+ uint64_t group, uint64_t no_sched)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ /* Need to make sure any writes to the work queue entry are complete */
+ CVMX_SYNCWS;
+ /*
+ * Ensure that there is not a pending tag switch, as a tag
+ * switch cannot be started if a previous switch is still
+ * pending.
+ */
+ cvmx_pow_tag_sw_wait();
+ cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched);
+}
+
+/**
+ * Deschedules the current work queue entry.
+ *
+ * @no_sched: no schedule flag value to be set on the work queue
+ * entry. If this is set the entry will not be
+ * rescheduled.
+ */
+static inline void cvmx_pow_desched(uint64_t no_sched)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ if (CVMX_ENABLE_POW_CHECKS) {
+ cvmx_pow_tag_req_t current_tag;
+ __cvmx_pow_warn_if_pending_switch(__func__);
+ current_tag = cvmx_pow_get_current_tag();
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
+ pr_warn("%s called with NULL_NULL tag\n", __func__);
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
+ pr_warn("%s called with NULL tag. Deschedule not expected from NULL state\n",
+ __func__);
+ }
+
+ /* Need to make sure any writes to the work queue entry are complete */
+ CVMX_SYNCWS;
+
+ tag_req.u64 = 0;
+ tag_req.s.op = CVMX_POW_TAG_OP_DESCH;
+ tag_req.s.no_sched = no_sched;
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
+ /*
+ * since TAG3 is used, this store will clear the local pending
+ * switch bit.
+ */
+ cvmx_write_io(ptr.u64, tag_req.u64);
+}
+
+/****************************************************
+* Define usage of bits within the 32 bit tag values.
+*****************************************************/
+
+/*
+ * Number of bits of the tag used by software. The SW bits are always
+ * a contiguous block of the high starting at bit 31. The hardware
+ * bits are always the low bits. By default, the top 8 bits of the
+ * tag are reserved for software, and the low 24 are set by the IPD
+ * unit.
+ */
+#define CVMX_TAG_SW_BITS (8)
+#define CVMX_TAG_SW_SHIFT (32 - CVMX_TAG_SW_BITS)
+
+/* Below is the list of values for the top 8 bits of the tag. */
+/*
+ * Tag values with top byte of this value are reserved for internal
+ * executive uses.
+ */
+#define CVMX_TAG_SW_BITS_INTERNAL 0x1
+/* The executive divides the remaining 24 bits as follows:
+ * - the upper 8 bits (bits 23 - 16 of the tag) define a subgroup
+ *
+ * - the lower 16 bits (bits 15 - 0 of the tag) define are the value
+ * with the subgroup
+ *
+ * Note that this section describes the format of tags generated by
+ * software - refer to the hardware documentation for a description of
+ * the tags values generated by the packet input hardware. Subgroups
+ * are defined here.
+ */
+/* Mask for the value portion of the tag */
+#define CVMX_TAG_SUBGROUP_MASK 0xFFFF
+#define CVMX_TAG_SUBGROUP_SHIFT 16
+#define CVMX_TAG_SUBGROUP_PKO 0x1
+
+/* End of executive tag subgroup definitions */
+
+/*
+ * The remaining values software bit values 0x2 - 0xff are available
+ * for application use.
+ */
+
+/**
+ * This function creates a 32 bit tag value from the two values provided.
+ *
+ * @sw_bits: The upper bits (number depends on configuration) are set
+ * to this value. The remainder of bits are set by the
+ * hw_bits parameter.
+ *
+ * @hw_bits: The lower bits (number depends on configuration) are set
+ * to this value. The remainder of bits are set by the
+ * sw_bits parameter.
+ *
+ * Returns 32 bit value of the combined hw and sw bits.
+ */
+static inline uint32_t cvmx_pow_tag_compose(uint64_t sw_bits, uint64_t hw_bits)
+{
+ return ((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) <<
+ CVMX_TAG_SW_SHIFT) |
+ (hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS));
+}
+
+/**
+ * Extracts the bits allocated for software use from the tag
+ *
+ * @tag: 32 bit tag value
+ *
+ * Returns N bit software tag value, where N is configurable with the
+ * CVMX_TAG_SW_BITS define
+ */
+static inline uint32_t cvmx_pow_tag_get_sw_bits(uint64_t tag)
+{
+ return (tag >> (32 - CVMX_TAG_SW_BITS)) &
+ cvmx_build_mask(CVMX_TAG_SW_BITS);
+}
+
+/**
+ *
+ * Extracts the bits allocated for hardware use from the tag
+ *
+ * @tag: 32 bit tag value
+ *
+ * Returns (32 - N) bit software tag value, where N is configurable
+ * with the CVMX_TAG_SW_BITS define
+ */
+static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag)
+{
+ return tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS);
+}
+
+/**
+ * Store the current POW internal state into the supplied
+ * buffer. It is recommended that you pass a buffer of at least
+ * 128KB. The format of the capture may change based on SDK
+ * version and Octeon chip.
+ *
+ * @buffer: Buffer to store capture into
+ * @buffer_size:
+ * The size of the supplied buffer
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int cvmx_pow_capture(void *buffer, int buffer_size);
+
+/**
+ * Dump a POW capture to the console in a human readable format.
+ *
+ * @buffer: POW capture from cvmx_pow_capture()
+ * @buffer_size:
+ * Size of the buffer
+ */
+extern void cvmx_pow_display(void *buffer, int buffer_size);
+
+/**
+ * Return the number of POW entries supported by this chip
+ *
+ * Returns Number of POW entries
+ */
+extern int cvmx_pow_get_num_entries(void);
+
+#endif /* __CVMX_POW_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-rnm-defs.h b/arch/mips/include/asm/octeon/cvmx-rnm-defs.h
new file mode 100644
index 000000000..94295d2fe
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-rnm-defs.h
@@ -0,0 +1,171 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_RNM_DEFS_H__
+#define __CVMX_RNM_DEFS_H__
+
+#define CVMX_RNM_BIST_STATUS (CVMX_ADD_IO_SEG(0x0001180040000008ull))
+#define CVMX_RNM_CTL_STATUS (CVMX_ADD_IO_SEG(0x0001180040000000ull))
+#define CVMX_RNM_EER_DBG (CVMX_ADD_IO_SEG(0x0001180040000018ull))
+#define CVMX_RNM_EER_KEY (CVMX_ADD_IO_SEG(0x0001180040000010ull))
+#define CVMX_RNM_SERIAL_NUM (CVMX_ADD_IO_SEG(0x0001180040000020ull))
+
+union cvmx_rnm_bist_status {
+ uint64_t u64;
+ struct cvmx_rnm_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t rrc:1;
+ uint64_t mem:1;
+#else
+ uint64_t mem:1;
+ uint64_t rrc:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_rnm_ctl_status {
+ uint64_t u64;
+ struct cvmx_rnm_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t dis_mak:1;
+ uint64_t eer_lck:1;
+ uint64_t eer_val:1;
+ uint64_t ent_sel:4;
+ uint64_t exp_ent:1;
+ uint64_t rng_rst:1;
+ uint64_t rnm_rst:1;
+ uint64_t rng_en:1;
+ uint64_t ent_en:1;
+#else
+ uint64_t ent_en:1;
+ uint64_t rng_en:1;
+ uint64_t rnm_rst:1;
+ uint64_t rng_rst:1;
+ uint64_t exp_ent:1;
+ uint64_t ent_sel:4;
+ uint64_t eer_val:1;
+ uint64_t eer_lck:1;
+ uint64_t dis_mak:1;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+ struct cvmx_rnm_ctl_status_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t rng_rst:1;
+ uint64_t rnm_rst:1;
+ uint64_t rng_en:1;
+ uint64_t ent_en:1;
+#else
+ uint64_t ent_en:1;
+ uint64_t rng_en:1;
+ uint64_t rnm_rst:1;
+ uint64_t rng_rst:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } cn30xx;
+ struct cvmx_rnm_ctl_status_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t ent_sel:4;
+ uint64_t exp_ent:1;
+ uint64_t rng_rst:1;
+ uint64_t rnm_rst:1;
+ uint64_t rng_en:1;
+ uint64_t ent_en:1;
+#else
+ uint64_t ent_en:1;
+ uint64_t rng_en:1;
+ uint64_t rnm_rst:1;
+ uint64_t rng_rst:1;
+ uint64_t exp_ent:1;
+ uint64_t ent_sel:4;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn50xx;
+ struct cvmx_rnm_ctl_status_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t eer_lck:1;
+ uint64_t eer_val:1;
+ uint64_t ent_sel:4;
+ uint64_t exp_ent:1;
+ uint64_t rng_rst:1;
+ uint64_t rnm_rst:1;
+ uint64_t rng_en:1;
+ uint64_t ent_en:1;
+#else
+ uint64_t ent_en:1;
+ uint64_t rng_en:1;
+ uint64_t rnm_rst:1;
+ uint64_t rng_rst:1;
+ uint64_t exp_ent:1;
+ uint64_t ent_sel:4;
+ uint64_t eer_val:1;
+ uint64_t eer_lck:1;
+ uint64_t reserved_11_63:53;
+#endif
+ } cn63xx;
+};
+
+union cvmx_rnm_eer_dbg {
+ uint64_t u64;
+ struct cvmx_rnm_eer_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dat:64;
+#else
+ uint64_t dat:64;
+#endif
+ } s;
+};
+
+union cvmx_rnm_eer_key {
+ uint64_t u64;
+ struct cvmx_rnm_eer_key_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t key:64;
+#else
+ uint64_t key:64;
+#endif
+ } s;
+};
+
+union cvmx_rnm_serial_num {
+ uint64_t u64;
+ struct cvmx_rnm_serial_num_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dat:64;
+#else
+ uint64_t dat:64;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-rst-defs.h b/arch/mips/include/asm/octeon/cvmx-rst-defs.h
new file mode 100644
index 000000000..accc9977d
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-rst-defs.h
@@ -0,0 +1,278 @@
+/***********************license start***************
+ * Author: Cavium Inc.
+ *
+ * Contact: support@cavium.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2014 Cavium Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Inc. for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_RST_DEFS_H__
+#define __CVMX_RST_DEFS_H__
+
+#define CVMX_RST_BOOT (CVMX_ADD_IO_SEG(0x0001180006001600ull))
+#define CVMX_RST_CFG (CVMX_ADD_IO_SEG(0x0001180006001610ull))
+#define CVMX_RST_CKILL (CVMX_ADD_IO_SEG(0x0001180006001638ull))
+#define CVMX_RST_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180006001640ull) + ((offset) & 3) * 8)
+#define CVMX_RST_DELAY (CVMX_ADD_IO_SEG(0x0001180006001608ull))
+#define CVMX_RST_ECO (CVMX_ADD_IO_SEG(0x00011800060017B8ull))
+#define CVMX_RST_INT (CVMX_ADD_IO_SEG(0x0001180006001628ull))
+#define CVMX_RST_OCX (CVMX_ADD_IO_SEG(0x0001180006001618ull))
+#define CVMX_RST_POWER_DBG (CVMX_ADD_IO_SEG(0x0001180006001708ull))
+#define CVMX_RST_PP_POWER (CVMX_ADD_IO_SEG(0x0001180006001700ull))
+#define CVMX_RST_SOFT_PRSTX(offset) (CVMX_ADD_IO_SEG(0x00011800060016C0ull) + ((offset) & 3) * 8)
+#define CVMX_RST_SOFT_RST (CVMX_ADD_IO_SEG(0x0001180006001680ull))
+
+union cvmx_rst_boot {
+ uint64_t u64;
+ struct cvmx_rst_boot_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t chipkill:1;
+ uint64_t jtcsrdis:1;
+ uint64_t ejtagdis:1;
+ uint64_t romen:1;
+ uint64_t ckill_ppdis:1;
+ uint64_t jt_tstmode:1;
+ uint64_t vrm_err:1;
+ uint64_t reserved_37_56:20;
+ uint64_t c_mul:7;
+ uint64_t pnr_mul:6;
+ uint64_t reserved_21_23:3;
+ uint64_t lboot_oci:3;
+ uint64_t lboot_ext:6;
+ uint64_t lboot:10;
+ uint64_t rboot:1;
+ uint64_t rboot_pin:1;
+#else
+ uint64_t rboot_pin:1;
+ uint64_t rboot:1;
+ uint64_t lboot:10;
+ uint64_t lboot_ext:6;
+ uint64_t lboot_oci:3;
+ uint64_t reserved_21_23:3;
+ uint64_t pnr_mul:6;
+ uint64_t c_mul:7;
+ uint64_t reserved_37_56:20;
+ uint64_t vrm_err:1;
+ uint64_t jt_tstmode:1;
+ uint64_t ckill_ppdis:1;
+ uint64_t romen:1;
+ uint64_t ejtagdis:1;
+ uint64_t jtcsrdis:1;
+ uint64_t chipkill:1;
+#endif
+ } s;
+};
+
+union cvmx_rst_cfg {
+ uint64_t u64;
+ struct cvmx_rst_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bist_delay:58;
+ uint64_t reserved_3_5:3;
+ uint64_t cntl_clr_bist:1;
+ uint64_t warm_clr_bist:1;
+ uint64_t soft_clr_bist:1;
+#else
+ uint64_t soft_clr_bist:1;
+ uint64_t warm_clr_bist:1;
+ uint64_t cntl_clr_bist:1;
+ uint64_t reserved_3_5:3;
+ uint64_t bist_delay:58;
+#endif
+ } s;
+};
+
+union cvmx_rst_ckill {
+ uint64_t u64;
+ struct cvmx_rst_ckill_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63:17;
+ uint64_t timer:47;
+#else
+ uint64_t timer:47;
+ uint64_t reserved_47_63:17;
+#endif
+ } s;
+};
+
+union cvmx_rst_ctlx {
+ uint64_t u64;
+ struct cvmx_rst_ctlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t prst_link:1;
+ uint64_t rst_done:1;
+ uint64_t rst_link:1;
+ uint64_t host_mode:1;
+ uint64_t reserved_4_5:2;
+ uint64_t rst_drv:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_val:1;
+#else
+ uint64_t rst_val:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_drv:1;
+ uint64_t reserved_4_5:2;
+ uint64_t host_mode:1;
+ uint64_t rst_link:1;
+ uint64_t rst_done:1;
+ uint64_t prst_link:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+};
+
+union cvmx_rst_delay {
+ uint64_t u64;
+ struct cvmx_rst_delay_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t warm_rst_dly:16;
+ uint64_t soft_rst_dly:16;
+#else
+ uint64_t soft_rst_dly:16;
+ uint64_t warm_rst_dly:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_rst_eco {
+ uint64_t u64;
+ struct cvmx_rst_eco_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t eco_rw:32;
+#else
+ uint64_t eco_rw:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_rst_int {
+ uint64_t u64;
+ struct cvmx_rst_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t perst:4;
+ uint64_t reserved_4_7:4;
+ uint64_t rst_link:4;
+#else
+ uint64_t rst_link:4;
+ uint64_t reserved_4_7:4;
+ uint64_t perst:4;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+ struct cvmx_rst_int_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t perst:3;
+ uint64_t reserved_3_7:5;
+ uint64_t rst_link:3;
+#else
+ uint64_t rst_link:3;
+ uint64_t reserved_3_7:5;
+ uint64_t perst:3;
+ uint64_t reserved_11_63:53;
+#endif
+ } cn70xx;
+};
+
+union cvmx_rst_ocx {
+ uint64_t u64;
+ struct cvmx_rst_ocx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t rst_link:3;
+#else
+ uint64_t rst_link:3;
+ uint64_t reserved_3_63:61;
+#endif
+ } s;
+};
+
+union cvmx_rst_power_dbg {
+ uint64_t u64;
+ struct cvmx_rst_power_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t str:3;
+#else
+ uint64_t str:3;
+ uint64_t reserved_3_63:61;
+#endif
+ } s;
+};
+
+union cvmx_rst_pp_power {
+ uint64_t u64;
+ struct cvmx_rst_pp_power_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t gate:48;
+#else
+ uint64_t gate:48;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+ struct cvmx_rst_pp_power_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t gate:4;
+#else
+ uint64_t gate:4;
+ uint64_t reserved_4_63:60;
+#endif
+ } cn70xx;
+};
+
+union cvmx_rst_soft_prstx {
+ uint64_t u64;
+ struct cvmx_rst_soft_prstx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t soft_prst:1;
+#else
+ uint64_t soft_prst:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_rst_soft_rst {
+ uint64_t u64;
+ struct cvmx_rst_soft_rst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t soft_rst:1;
+#else
+ uint64_t soft_rst:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-scratch.h b/arch/mips/include/asm/octeon/cvmx-scratch.h
new file mode 100644
index 000000000..8d21cc5e4
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-scratch.h
@@ -0,0 +1,139 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ *
+ * This file provides support for the processor local scratch memory.
+ * Scratch memory is byte addressable - all addresses are byte addresses.
+ *
+ */
+
+#ifndef __CVMX_SCRATCH_H__
+#define __CVMX_SCRATCH_H__
+
+/*
+ * Note: This define must be a long, not a long long in order to
+ * compile without warnings for both 32bit and 64bit.
+ */
+#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
+
+/**
+ * Reads an 8 bit value from the processor local scratchpad memory.
+ *
+ * @address: byte address to read from
+ *
+ * Returns value read
+ */
+static inline uint8_t cvmx_scratch_read8(uint64_t address)
+{
+ return *CASTPTR(volatile uint8_t, CVMX_SCRATCH_BASE + address);
+}
+
+/**
+ * Reads a 16 bit value from the processor local scratchpad memory.
+ *
+ * @address: byte address to read from
+ *
+ * Returns value read
+ */
+static inline uint16_t cvmx_scratch_read16(uint64_t address)
+{
+ return *CASTPTR(volatile uint16_t, CVMX_SCRATCH_BASE + address);
+}
+
+/**
+ * Reads a 32 bit value from the processor local scratchpad memory.
+ *
+ * @address: byte address to read from
+ *
+ * Returns value read
+ */
+static inline uint32_t cvmx_scratch_read32(uint64_t address)
+{
+ return *CASTPTR(volatile uint32_t, CVMX_SCRATCH_BASE + address);
+}
+
+/**
+ * Reads a 64 bit value from the processor local scratchpad memory.
+ *
+ * @address: byte address to read from
+ *
+ * Returns value read
+ */
+static inline uint64_t cvmx_scratch_read64(uint64_t address)
+{
+ return *CASTPTR(volatile uint64_t, CVMX_SCRATCH_BASE + address);
+}
+
+/**
+ * Writes an 8 bit value to the processor local scratchpad memory.
+ *
+ * @address: byte address to write to
+ * @value: value to write
+ */
+static inline void cvmx_scratch_write8(uint64_t address, uint64_t value)
+{
+ *CASTPTR(volatile uint8_t, CVMX_SCRATCH_BASE + address) =
+ (uint8_t) value;
+}
+
+/**
+ * Writes a 32 bit value to the processor local scratchpad memory.
+ *
+ * @address: byte address to write to
+ * @value: value to write
+ */
+static inline void cvmx_scratch_write16(uint64_t address, uint64_t value)
+{
+ *CASTPTR(volatile uint16_t, CVMX_SCRATCH_BASE + address) =
+ (uint16_t) value;
+}
+
+/**
+ * Writes a 16 bit value to the processor local scratchpad memory.
+ *
+ * @address: byte address to write to
+ * @value: value to write
+ */
+static inline void cvmx_scratch_write32(uint64_t address, uint64_t value)
+{
+ *CASTPTR(volatile uint32_t, CVMX_SCRATCH_BASE + address) =
+ (uint32_t) value;
+}
+
+/**
+ * Writes a 64 bit value to the processor local scratchpad memory.
+ *
+ * @address: byte address to write to
+ * @value: value to write
+ */
+static inline void cvmx_scratch_write64(uint64_t address, uint64_t value)
+{
+ *CASTPTR(volatile uint64_t, CVMX_SCRATCH_BASE + address) = value;
+}
+
+#endif /* __CVMX_SCRATCH_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-sli-defs.h b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
new file mode 100644
index 000000000..5ef6c3815
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
@@ -0,0 +1,129 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_SLI_DEFS_H__
+#define __CVMX_SLI_DEFS_H__
+
+#include <uapi/asm/bitfield.h>
+
+#define CVMX_SLI_PCIE_MSI_RCV CVMX_SLI_PCIE_MSI_RCV_FUNC()
+static inline uint64_t CVMX_SLI_PCIE_MSI_RCV_FUNC(void)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ return 0x0000000000003CB0ull;
+ case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
+ return 0x0000000000003CB0ull;
+ fallthrough;
+ default:
+ return 0x0000000000023CB0ull;
+ }
+}
+
+
+union cvmx_sli_ctl_portx {
+ uint64_t u64;
+ struct cvmx_sli_ctl_portx_s {
+ __BITFIELD_FIELD(uint64_t reserved_22_63:42,
+ __BITFIELD_FIELD(uint64_t intd:1,
+ __BITFIELD_FIELD(uint64_t intc:1,
+ __BITFIELD_FIELD(uint64_t intb:1,
+ __BITFIELD_FIELD(uint64_t inta:1,
+ __BITFIELD_FIELD(uint64_t dis_port:1,
+ __BITFIELD_FIELD(uint64_t waitl_com:1,
+ __BITFIELD_FIELD(uint64_t intd_map:2,
+ __BITFIELD_FIELD(uint64_t intc_map:2,
+ __BITFIELD_FIELD(uint64_t intb_map:2,
+ __BITFIELD_FIELD(uint64_t inta_map:2,
+ __BITFIELD_FIELD(uint64_t ctlp_ro:1,
+ __BITFIELD_FIELD(uint64_t reserved_6_6:1,
+ __BITFIELD_FIELD(uint64_t ptlp_ro:1,
+ __BITFIELD_FIELD(uint64_t reserved_1_4:4,
+ __BITFIELD_FIELD(uint64_t wait_com:1,
+ ;))))))))))))))))
+ } s;
+};
+
+union cvmx_sli_mem_access_ctl {
+ uint64_t u64;
+ struct cvmx_sli_mem_access_ctl_s {
+ __BITFIELD_FIELD(uint64_t reserved_14_63:50,
+ __BITFIELD_FIELD(uint64_t max_word:4,
+ __BITFIELD_FIELD(uint64_t timer:10,
+ ;)))
+ } s;
+};
+
+union cvmx_sli_s2m_portx_ctl {
+ uint64_t u64;
+ struct cvmx_sli_s2m_portx_ctl_s {
+ __BITFIELD_FIELD(uint64_t reserved_5_63:59,
+ __BITFIELD_FIELD(uint64_t wind_d:1,
+ __BITFIELD_FIELD(uint64_t bar0_d:1,
+ __BITFIELD_FIELD(uint64_t mrrs:3,
+ ;))))
+ } s;
+};
+
+union cvmx_sli_mem_access_subidx {
+ uint64_t u64;
+ struct cvmx_sli_mem_access_subidx_s {
+ __BITFIELD_FIELD(uint64_t reserved_43_63:21,
+ __BITFIELD_FIELD(uint64_t zero:1,
+ __BITFIELD_FIELD(uint64_t port:3,
+ __BITFIELD_FIELD(uint64_t nmerge:1,
+ __BITFIELD_FIELD(uint64_t esr:2,
+ __BITFIELD_FIELD(uint64_t esw:2,
+ __BITFIELD_FIELD(uint64_t wtype:2,
+ __BITFIELD_FIELD(uint64_t rtype:2,
+ __BITFIELD_FIELD(uint64_t ba:30,
+ ;)))))))))
+ } s;
+ struct cvmx_sli_mem_access_subidx_cn68xx {
+ __BITFIELD_FIELD(uint64_t reserved_43_63:21,
+ __BITFIELD_FIELD(uint64_t zero:1,
+ __BITFIELD_FIELD(uint64_t port:3,
+ __BITFIELD_FIELD(uint64_t nmerge:1,
+ __BITFIELD_FIELD(uint64_t esr:2,
+ __BITFIELD_FIELD(uint64_t esw:2,
+ __BITFIELD_FIELD(uint64_t wtype:2,
+ __BITFIELD_FIELD(uint64_t rtype:2,
+ __BITFIELD_FIELD(uint64_t ba:28,
+ __BITFIELD_FIELD(uint64_t reserved_0_1:2,
+ ;))))))))))
+ } cn68xx;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-spi.h b/arch/mips/include/asm/octeon/cvmx-spi.h
new file mode 100644
index 000000000..d5038cc4b
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-spi.h
@@ -0,0 +1,269 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * This file contains defines for the SPI interface
+ */
+#ifndef __CVMX_SPI_H__
+#define __CVMX_SPI_H__
+
+#include <asm/octeon/cvmx-gmxx-defs.h>
+
+/* CSR typedefs have been moved to cvmx-csr-*.h */
+
+typedef enum {
+ CVMX_SPI_MODE_UNKNOWN = 0,
+ CVMX_SPI_MODE_TX_HALFPLEX = 1,
+ CVMX_SPI_MODE_RX_HALFPLEX = 2,
+ CVMX_SPI_MODE_DUPLEX = 3
+} cvmx_spi_mode_t;
+
+/** Callbacks structure to customize SPI4 initialization sequence */
+typedef struct {
+ /** Called to reset SPI4 DLL */
+ int (*reset_cb) (int interface, cvmx_spi_mode_t mode);
+
+ /** Called to setup calendar */
+ int (*calendar_setup_cb) (int interface, cvmx_spi_mode_t mode,
+ int num_ports);
+
+ /** Called for Tx and Rx clock detection */
+ int (*clock_detect_cb) (int interface, cvmx_spi_mode_t mode,
+ int timeout);
+
+ /** Called to perform link training */
+ int (*training_cb) (int interface, cvmx_spi_mode_t mode, int timeout);
+
+ /** Called for calendar data synchronization */
+ int (*calendar_sync_cb) (int interface, cvmx_spi_mode_t mode,
+ int timeout);
+
+ /** Called when interface is up */
+ int (*interface_up_cb) (int interface, cvmx_spi_mode_t mode);
+
+} cvmx_spi_callbacks_t;
+
+/**
+ * Return true if the supplied interface is configured for SPI
+ *
+ * @interface: Interface to check
+ * Returns True if interface is SPI
+ */
+static inline int cvmx_spi_is_spi_interface(int interface)
+{
+ uint64_t gmxState = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+ return (gmxState & 0x2) && (gmxState & 0x1);
+}
+
+/**
+ * Initialize and start the SPI interface.
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for clock synchronization in seconds
+ * @num_ports: Number of SPI ports to configure
+ *
+ * Returns Zero on success, negative of failure.
+ */
+extern int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode,
+ int timeout, int num_ports);
+
+/**
+ * This routine restarts the SPI interface after it has lost synchronization
+ * with its corespondant system.
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for clock synchronization in seconds
+ * Returns Zero on success, negative of failure.
+ */
+extern int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode,
+ int timeout);
+
+/**
+ * Return non-zero if the SPI interface has a SPI4000 attached
+ *
+ * @interface: SPI interface the SPI4000 is connected to
+ *
+ * Returns
+ */
+static inline int cvmx_spi4000_is_present(int interface)
+{
+ return 0;
+}
+
+/**
+ * Initialize the SPI4000 for use
+ *
+ * @interface: SPI interface the SPI4000 is connected to
+ */
+static inline int cvmx_spi4000_initialize(int interface)
+{
+ return 0;
+}
+
+/**
+ * Poll all the SPI4000 port and check its speed
+ *
+ * @interface: Interface the SPI4000 is on
+ * @port: Port to poll (0-9)
+ * Returns Status of the port. 0=down. All other values the port is up.
+ */
+static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
+ int interface,
+ int port)
+{
+ union cvmx_gmxx_rxx_rx_inbnd r;
+ r.u64 = 0;
+ return r;
+}
+
+/**
+ * Get current SPI4 initialization callbacks
+ *
+ * @callbacks: Pointer to the callbacks structure.to fill
+ *
+ * Returns Pointer to cvmx_spi_callbacks_t structure.
+ */
+extern void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t *callbacks);
+
+/**
+ * Set new SPI4 initialization callbacks
+ *
+ * @new_callbacks: Pointer to an updated callbacks structure.
+ */
+extern void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks);
+
+/**
+ * Callback to perform SPI4 reset
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+extern int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode);
+
+/**
+ * Callback to setup calendar and miscellaneous settings before clock
+ * detection
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @num_ports: Number of ports to configure on SPI
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+extern int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
+ int num_ports);
+
+/**
+ * Callback to perform clock detection
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for clock synchronization in seconds
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+extern int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode,
+ int timeout);
+
+/**
+ * Callback to perform link training
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for link to be trained (in seconds)
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+extern int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode,
+ int timeout);
+
+/**
+ * Callback to perform calendar data synchronization
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for calendar data in seconds
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+extern int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode,
+ int timeout);
+
+/**
+ * Callback to handle interface up
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+extern int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode);
+
+#endif /* __CVMX_SPI_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-spinlock.h b/arch/mips/include/asm/octeon/cvmx-spinlock.h
new file mode 100644
index 000000000..4f09cff8b
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-spinlock.h
@@ -0,0 +1,232 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * Implementation of spinlocks for Octeon CVMX. Although similar in
+ * function to Linux kernel spinlocks, they are not compatible.
+ * Octeon CVMX spinlocks are only used to synchronize with the boot
+ * monitor and other non-Linux programs running in the system.
+ */
+
+#ifndef __CVMX_SPINLOCK_H__
+#define __CVMX_SPINLOCK_H__
+
+#include <asm/octeon/cvmx-asm.h>
+
+/* Spinlocks for Octeon */
+
+/* define these to enable recursive spinlock debugging */
+/*#define CVMX_SPINLOCK_DEBUG */
+
+/**
+ * Spinlocks for Octeon CVMX
+ */
+typedef struct {
+ volatile uint32_t value;
+} cvmx_spinlock_t;
+
+/* note - macros not expanded in inline ASM, so values hardcoded */
+#define CVMX_SPINLOCK_UNLOCKED_VAL 0
+#define CVMX_SPINLOCK_LOCKED_VAL 1
+
+#define CVMX_SPINLOCK_UNLOCKED_INITIALIZER {CVMX_SPINLOCK_UNLOCKED_VAL}
+
+/**
+ * Initialize a spinlock
+ *
+ * @lock: Lock to initialize
+ */
+static inline void cvmx_spinlock_init(cvmx_spinlock_t *lock)
+{
+ lock->value = CVMX_SPINLOCK_UNLOCKED_VAL;
+}
+
+/**
+ * Return non-zero if the spinlock is currently locked
+ *
+ * @lock: Lock to check
+ * Returns Non-zero if locked
+ */
+static inline int cvmx_spinlock_locked(cvmx_spinlock_t *lock)
+{
+ return lock->value != CVMX_SPINLOCK_UNLOCKED_VAL;
+}
+
+/**
+ * Releases lock
+ *
+ * @lock: pointer to lock structure
+ */
+static inline void cvmx_spinlock_unlock(cvmx_spinlock_t *lock)
+{
+ CVMX_SYNCWS;
+ lock->value = 0;
+ CVMX_SYNCWS;
+}
+
+/**
+ * Attempts to take the lock, but does not spin if lock is not available.
+ * May take some time to acquire the lock even if it is available
+ * due to the ll/sc not succeeding.
+ *
+ * @lock: pointer to lock structure
+ *
+ * Returns 0: lock successfully taken
+ * 1: lock not taken, held by someone else
+ * These return values match the Linux semantics.
+ */
+
+static inline unsigned int cvmx_spinlock_trylock(cvmx_spinlock_t *lock)
+{
+ unsigned int tmp;
+
+ __asm__ __volatile__(".set noreorder \n"
+ "1: ll %[tmp], %[val] \n"
+ /* if lock held, fail immediately */
+ " bnez %[tmp], 2f \n"
+ " li %[tmp], 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " li %[tmp], 0 \n"
+ "2: \n"
+ ".set reorder \n" :
+ [val] "+m"(lock->value), [tmp] "=&r"(tmp)
+ : : "memory");
+
+ return tmp != 0; /* normalize to 0 or 1 */
+}
+
+/**
+ * Gets lock, spins until lock is taken
+ *
+ * @lock: pointer to lock structure
+ */
+static inline void cvmx_spinlock_lock(cvmx_spinlock_t *lock)
+{
+ unsigned int tmp;
+
+ __asm__ __volatile__(".set noreorder \n"
+ "1: ll %[tmp], %[val] \n"
+ " bnez %[tmp], 1b \n"
+ " li %[tmp], 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n" :
+ [val] "+m"(lock->value), [tmp] "=&r"(tmp)
+ : : "memory");
+
+}
+
+/** ********************************************************************
+ * Bit spinlocks
+ * These spinlocks use a single bit (bit 31) of a 32 bit word for locking.
+ * The rest of the bits in the word are left undisturbed. This enables more
+ * compact data structures as only 1 bit is consumed for the lock.
+ *
+ */
+
+/**
+ * Gets lock, spins until lock is taken
+ * Preserves the low 31 bits of the 32 bit
+ * word used for the lock.
+ *
+ *
+ * @word: word to lock bit 31 of
+ */
+static inline void cvmx_spinlock_bit_lock(uint32_t *word)
+{
+ unsigned int tmp;
+ unsigned int sav;
+
+ __asm__ __volatile__(".set noreorder \n"
+ ".set noat \n"
+ "1: ll %[tmp], %[val] \n"
+ " bbit1 %[tmp], 31, 1b \n"
+ " li $at, 1 \n"
+ " ins %[tmp], $at, 31, 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set at \n"
+ ".set reorder \n" :
+ [val] "+m"(*word), [tmp] "=&r"(tmp), [sav] "=&r"(sav)
+ : : "memory");
+
+}
+
+/**
+ * Attempts to get lock, returns immediately with success/failure
+ * Preserves the low 31 bits of the 32 bit
+ * word used for the lock.
+ *
+ *
+ * @word: word to lock bit 31 of
+ * Returns 0: lock successfully taken
+ * 1: lock not taken, held by someone else
+ * These return values match the Linux semantics.
+ */
+static inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
+{
+ unsigned int tmp;
+
+ __asm__ __volatile__(".set noreorder\n\t"
+ ".set noat\n"
+ "1: ll %[tmp], %[val] \n"
+ /* if lock held, fail immediately */
+ " bbit1 %[tmp], 31, 2f \n"
+ " li $at, 1 \n"
+ " ins %[tmp], $at, 31, 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " li %[tmp], 0 \n"
+ "2: \n"
+ ".set at \n"
+ ".set reorder \n" :
+ [val] "+m"(*word), [tmp] "=&r"(tmp)
+ : : "memory");
+
+ return tmp != 0; /* normalize to 0 or 1 */
+}
+
+/**
+ * Releases bit lock
+ *
+ * Unconditionally clears bit 31 of the lock word. Note that this is
+ * done non-atomically, as this implementation assumes that the rest
+ * of the bits in the word are protected by the lock.
+ *
+ * @word: word to unlock bit 31 in
+ */
+static inline void cvmx_spinlock_bit_unlock(uint32_t *word)
+{
+ CVMX_SYNCWS;
+ *word &= ~(1UL << 31);
+ CVMX_SYNCWS;
+}
+
+#endif /* __CVMX_SPINLOCK_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-spxx-defs.h b/arch/mips/include/asm/octeon/cvmx-spxx-defs.h
new file mode 100644
index 000000000..8471ed2de
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-spxx-defs.h
@@ -0,0 +1,446 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (C) 2003-2018 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_SPXX_DEFS_H__
+#define __CVMX_SPXX_DEFS_H__
+
+#define CVMX_SPXX_BCKPRS_CNT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000340ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SPXX_BIST_STAT(block_id) (CVMX_ADD_IO_SEG(0x00011800900007F8ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SPXX_CLK_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000348ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SPXX_CLK_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000350ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SPXX_DBG_DESKEW_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000368ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SPXX_DBG_DESKEW_STATE(block_id) (CVMX_ADD_IO_SEG(0x0001180090000370ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SPXX_DRV_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000358ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SPXX_ERR_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000320ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SPXX_INT_DAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000318ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SPXX_INT_MSK(block_id) (CVMX_ADD_IO_SEG(0x0001180090000308ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SPXX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x0001180090000300ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SPXX_INT_SYNC(block_id) (CVMX_ADD_IO_SEG(0x0001180090000310ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SPXX_TPA_ACC(block_id) (CVMX_ADD_IO_SEG(0x0001180090000338ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SPXX_TPA_MAX(block_id) (CVMX_ADD_IO_SEG(0x0001180090000330ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SPXX_TPA_SEL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000328ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SPXX_TRN4_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000360ull) + ((block_id) & 1) * 0x8000000ull)
+
+void __cvmx_interrupt_spxx_int_msk_enable(int index);
+
+union cvmx_spxx_bckprs_cnt {
+ uint64_t u64;
+ struct cvmx_spxx_bckprs_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_spxx_bist_stat {
+ uint64_t u64;
+ struct cvmx_spxx_bist_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t stat2:1;
+ uint64_t stat1:1;
+ uint64_t stat0:1;
+#else
+ uint64_t stat0:1;
+ uint64_t stat1:1;
+ uint64_t stat2:1;
+ uint64_t reserved_3_63:61;
+#endif
+ } s;
+};
+
+union cvmx_spxx_clk_ctl {
+ uint64_t u64;
+ struct cvmx_spxx_clk_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t seetrn:1;
+ uint64_t reserved_12_15:4;
+ uint64_t clkdly:5;
+ uint64_t runbist:1;
+ uint64_t statdrv:1;
+ uint64_t statrcv:1;
+ uint64_t sndtrn:1;
+ uint64_t drptrn:1;
+ uint64_t rcvtrn:1;
+ uint64_t srxdlck:1;
+#else
+ uint64_t srxdlck:1;
+ uint64_t rcvtrn:1;
+ uint64_t drptrn:1;
+ uint64_t sndtrn:1;
+ uint64_t statrcv:1;
+ uint64_t statdrv:1;
+ uint64_t runbist:1;
+ uint64_t clkdly:5;
+ uint64_t reserved_12_15:4;
+ uint64_t seetrn:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } s;
+};
+
+union cvmx_spxx_clk_stat {
+ uint64_t u64;
+ struct cvmx_spxx_clk_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t stxcal:1;
+ uint64_t reserved_9_9:1;
+ uint64_t srxtrn:1;
+ uint64_t s4clk1:1;
+ uint64_t s4clk0:1;
+ uint64_t d4clk1:1;
+ uint64_t d4clk0:1;
+ uint64_t reserved_0_3:4;
+#else
+ uint64_t reserved_0_3:4;
+ uint64_t d4clk0:1;
+ uint64_t d4clk1:1;
+ uint64_t s4clk0:1;
+ uint64_t s4clk1:1;
+ uint64_t srxtrn:1;
+ uint64_t reserved_9_9:1;
+ uint64_t stxcal:1;
+ uint64_t reserved_11_63:53;
+#endif
+ } s;
+};
+
+union cvmx_spxx_dbg_deskew_ctl {
+ uint64_t u64;
+ struct cvmx_spxx_dbg_deskew_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_30_63:34;
+ uint64_t fallnop:1;
+ uint64_t fall8:1;
+ uint64_t reserved_26_27:2;
+ uint64_t sstep_go:1;
+ uint64_t sstep:1;
+ uint64_t reserved_22_23:2;
+ uint64_t clrdly:1;
+ uint64_t dec:1;
+ uint64_t inc:1;
+ uint64_t mux:1;
+ uint64_t offset:5;
+ uint64_t bitsel:5;
+ uint64_t offdly:6;
+ uint64_t dllfrc:1;
+ uint64_t dlldis:1;
+#else
+ uint64_t dlldis:1;
+ uint64_t dllfrc:1;
+ uint64_t offdly:6;
+ uint64_t bitsel:5;
+ uint64_t offset:5;
+ uint64_t mux:1;
+ uint64_t inc:1;
+ uint64_t dec:1;
+ uint64_t clrdly:1;
+ uint64_t reserved_22_23:2;
+ uint64_t sstep:1;
+ uint64_t sstep_go:1;
+ uint64_t reserved_26_27:2;
+ uint64_t fall8:1;
+ uint64_t fallnop:1;
+ uint64_t reserved_30_63:34;
+#endif
+ } s;
+};
+
+union cvmx_spxx_dbg_deskew_state {
+ uint64_t u64;
+ struct cvmx_spxx_dbg_deskew_state_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t testres:1;
+ uint64_t unxterm:1;
+ uint64_t muxsel:2;
+ uint64_t offset:5;
+#else
+ uint64_t offset:5;
+ uint64_t muxsel:2;
+ uint64_t unxterm:1;
+ uint64_t testres:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } s;
+};
+
+union cvmx_spxx_drv_ctl {
+ uint64_t u64;
+ struct cvmx_spxx_drv_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_spxx_drv_ctl_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t stx4ncmp:4;
+ uint64_t stx4pcmp:4;
+ uint64_t srx4cmp:8;
+#else
+ uint64_t srx4cmp:8;
+ uint64_t stx4pcmp:4;
+ uint64_t stx4ncmp:4;
+ uint64_t reserved_16_63:48;
+#endif
+ } cn38xx;
+ struct cvmx_spxx_drv_ctl_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t stx4ncmp:4;
+ uint64_t stx4pcmp:4;
+ uint64_t reserved_10_15:6;
+ uint64_t srx4cmp:10;
+#else
+ uint64_t srx4cmp:10;
+ uint64_t reserved_10_15:6;
+ uint64_t stx4pcmp:4;
+ uint64_t stx4ncmp:4;
+ uint64_t reserved_24_63:40;
+#endif
+ } cn58xx;
+};
+
+union cvmx_spxx_err_ctl {
+ uint64_t u64;
+ struct cvmx_spxx_err_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t prtnxa:1;
+ uint64_t dipcls:1;
+ uint64_t dippay:1;
+ uint64_t reserved_4_5:2;
+ uint64_t errcnt:4;
+#else
+ uint64_t errcnt:4;
+ uint64_t reserved_4_5:2;
+ uint64_t dippay:1;
+ uint64_t dipcls:1;
+ uint64_t prtnxa:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } s;
+};
+
+union cvmx_spxx_int_dat {
+ uint64_t u64;
+ struct cvmx_spxx_int_dat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t mul:1;
+ uint64_t reserved_14_30:17;
+ uint64_t calbnk:2;
+ uint64_t rsvop:4;
+ uint64_t prt:8;
+#else
+ uint64_t prt:8;
+ uint64_t rsvop:4;
+ uint64_t calbnk:2;
+ uint64_t reserved_14_30:17;
+ uint64_t mul:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_spxx_int_msk {
+ uint64_t u64;
+ struct cvmx_spxx_int_msk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t calerr:1;
+ uint64_t syncerr:1;
+ uint64_t diperr:1;
+ uint64_t tpaovr:1;
+ uint64_t rsverr:1;
+ uint64_t drwnng:1;
+ uint64_t clserr:1;
+ uint64_t spiovr:1;
+ uint64_t reserved_2_3:2;
+ uint64_t abnorm:1;
+ uint64_t prtnxa:1;
+#else
+ uint64_t prtnxa:1;
+ uint64_t abnorm:1;
+ uint64_t reserved_2_3:2;
+ uint64_t spiovr:1;
+ uint64_t clserr:1;
+ uint64_t drwnng:1;
+ uint64_t rsverr:1;
+ uint64_t tpaovr:1;
+ uint64_t diperr:1;
+ uint64_t syncerr:1;
+ uint64_t calerr:1;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+};
+
+union cvmx_spxx_int_reg {
+ uint64_t u64;
+ struct cvmx_spxx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t spf:1;
+ uint64_t reserved_12_30:19;
+ uint64_t calerr:1;
+ uint64_t syncerr:1;
+ uint64_t diperr:1;
+ uint64_t tpaovr:1;
+ uint64_t rsverr:1;
+ uint64_t drwnng:1;
+ uint64_t clserr:1;
+ uint64_t spiovr:1;
+ uint64_t reserved_2_3:2;
+ uint64_t abnorm:1;
+ uint64_t prtnxa:1;
+#else
+ uint64_t prtnxa:1;
+ uint64_t abnorm:1;
+ uint64_t reserved_2_3:2;
+ uint64_t spiovr:1;
+ uint64_t clserr:1;
+ uint64_t drwnng:1;
+ uint64_t rsverr:1;
+ uint64_t tpaovr:1;
+ uint64_t diperr:1;
+ uint64_t syncerr:1;
+ uint64_t calerr:1;
+ uint64_t reserved_12_30:19;
+ uint64_t spf:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_spxx_int_sync {
+ uint64_t u64;
+ struct cvmx_spxx_int_sync_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t calerr:1;
+ uint64_t syncerr:1;
+ uint64_t diperr:1;
+ uint64_t tpaovr:1;
+ uint64_t rsverr:1;
+ uint64_t drwnng:1;
+ uint64_t clserr:1;
+ uint64_t spiovr:1;
+ uint64_t reserved_2_3:2;
+ uint64_t abnorm:1;
+ uint64_t prtnxa:1;
+#else
+ uint64_t prtnxa:1;
+ uint64_t abnorm:1;
+ uint64_t reserved_2_3:2;
+ uint64_t spiovr:1;
+ uint64_t clserr:1;
+ uint64_t drwnng:1;
+ uint64_t rsverr:1;
+ uint64_t tpaovr:1;
+ uint64_t diperr:1;
+ uint64_t syncerr:1;
+ uint64_t calerr:1;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+};
+
+union cvmx_spxx_tpa_acc {
+ uint64_t u64;
+ struct cvmx_spxx_tpa_acc_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_spxx_tpa_max {
+ uint64_t u64;
+ struct cvmx_spxx_tpa_max_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t max:32;
+#else
+ uint64_t max:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_spxx_tpa_sel {
+ uint64_t u64;
+ struct cvmx_spxx_tpa_sel_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t prtsel:4;
+#else
+ uint64_t prtsel:4;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_spxx_trn4_ctl {
+ uint64_t u64;
+ struct cvmx_spxx_trn4_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t trntest:1;
+ uint64_t jitter:3;
+ uint64_t clr_boot:1;
+ uint64_t set_boot:1;
+ uint64_t maxdist:5;
+ uint64_t macro_en:1;
+ uint64_t mux_en:1;
+#else
+ uint64_t mux_en:1;
+ uint64_t macro_en:1;
+ uint64_t maxdist:5;
+ uint64_t set_boot:1;
+ uint64_t clr_boot:1;
+ uint64_t jitter:3;
+ uint64_t trntest:1;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-sriox-defs.h b/arch/mips/include/asm/octeon/cvmx-sriox-defs.h
new file mode 100644
index 000000000..34d0fadb5
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-sriox-defs.h
@@ -0,0 +1,1614 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_SRIOX_DEFS_H__
+#define __CVMX_SRIOX_DEFS_H__
+
+#define CVMX_SRIOX_ACC_CTRL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000148ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_ASMBLY_ID(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000200ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_ASMBLY_INFO(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000208ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_BELL_RESP_CTRL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000310ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_BIST_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000108ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_IMSG_CTRL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000508ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_IMSG_INST_HDRX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000510ull) + (((offset) & 1) + ((block_id) & 3) * 0x200000ull) * 8)
+#define CVMX_SRIOX_IMSG_QOS_GRPX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000600ull) + (((offset) & 31) + ((block_id) & 3) * 0x200000ull) * 8)
+#define CVMX_SRIOX_IMSG_STATUSX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000700ull) + (((offset) & 31) + ((block_id) & 3) * 0x200000ull) * 8)
+#define CVMX_SRIOX_IMSG_VPORT_THR(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000500ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_IMSG_VPORT_THR2(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000528ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_INT2_ENABLE(block_id) (CVMX_ADD_IO_SEG(0x00011800C80003E0ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_INT2_REG(block_id) (CVMX_ADD_IO_SEG(0x00011800C80003E8ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_INT_ENABLE(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000110ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_INT_INFO0(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000120ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_INT_INFO1(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000128ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_INT_INFO2(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000130ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_INT_INFO3(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000138ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000118ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_IP_FEATURE(block_id) (CVMX_ADD_IO_SEG(0x00011800C80003F8ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_MAC_BUFFERS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000390ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_MAINT_OP(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000158ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_MAINT_RD_DATA(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000160ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_MCE_TX_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000240ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_MEM_OP_CTRL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000168ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_OMSG_CTRLX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000488ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64)
+#define CVMX_SRIOX_OMSG_DONE_COUNTSX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C80004B0ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64)
+#define CVMX_SRIOX_OMSG_FMP_MRX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000498ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64)
+#define CVMX_SRIOX_OMSG_NMP_MRX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C80004A0ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64)
+#define CVMX_SRIOX_OMSG_PORTX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000480ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64)
+#define CVMX_SRIOX_OMSG_SILO_THR(block_id) (CVMX_ADD_IO_SEG(0x00011800C80004F8ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_OMSG_SP_MRX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000490ull) + (((offset) & 1) + ((block_id) & 3) * 0x40000ull) * 64)
+#define CVMX_SRIOX_PRIOX_IN_USE(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C80003C0ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8)
+#define CVMX_SRIOX_RX_BELL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000308ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_RX_BELL_SEQ(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000300ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_RX_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000380ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_S2M_TYPEX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000180ull) + (((offset) & 15) + ((block_id) & 3) * 0x200000ull) * 8)
+#define CVMX_SRIOX_SEQ(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000278ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_STATUS_REG(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000100ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_TAG_CTRL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000178ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_TLP_CREDITS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000150ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_TX_BELL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000280ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_TX_BELL_INFO(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000288ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_TX_CTRL(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000170ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_TX_EMPHASIS(block_id) (CVMX_ADD_IO_SEG(0x00011800C80003F0ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_TX_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000388ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_SRIOX_WR_DONE_COUNTS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000340ull) + ((block_id) & 3) * 0x1000000ull)
+
+union cvmx_sriox_acc_ctrl {
+ uint64_t u64;
+ struct cvmx_sriox_acc_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_7_63:57;
+ uint64_t deny_adr2:1;
+ uint64_t deny_adr1:1;
+ uint64_t deny_adr0:1;
+ uint64_t reserved_3_3:1;
+ uint64_t deny_bar2:1;
+ uint64_t deny_bar1:1;
+ uint64_t deny_bar0:1;
+#else
+ uint64_t deny_bar0:1;
+ uint64_t deny_bar1:1;
+ uint64_t deny_bar2:1;
+ uint64_t reserved_3_3:1;
+ uint64_t deny_adr0:1;
+ uint64_t deny_adr1:1;
+ uint64_t deny_adr2:1;
+ uint64_t reserved_7_63:57;
+#endif
+ } s;
+ struct cvmx_sriox_acc_ctrl_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t deny_bar2:1;
+ uint64_t deny_bar1:1;
+ uint64_t deny_bar0:1;
+#else
+ uint64_t deny_bar0:1;
+ uint64_t deny_bar1:1;
+ uint64_t deny_bar2:1;
+ uint64_t reserved_3_63:61;
+#endif
+ } cn63xx;
+};
+
+union cvmx_sriox_asmbly_id {
+ uint64_t u64;
+ struct cvmx_sriox_asmbly_id_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t assy_id:16;
+ uint64_t assy_ven:16;
+#else
+ uint64_t assy_ven:16;
+ uint64_t assy_id:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_sriox_asmbly_info {
+ uint64_t u64;
+ struct cvmx_sriox_asmbly_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t assy_rev:16;
+ uint64_t reserved_0_15:16;
+#else
+ uint64_t reserved_0_15:16;
+ uint64_t assy_rev:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_sriox_bell_resp_ctrl {
+ uint64_t u64;
+ struct cvmx_sriox_bell_resp_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t rp1_sid:1;
+ uint64_t rp0_sid:2;
+ uint64_t rp1_pid:1;
+ uint64_t rp0_pid:2;
+#else
+ uint64_t rp0_pid:2;
+ uint64_t rp1_pid:1;
+ uint64_t rp0_sid:2;
+ uint64_t rp1_sid:1;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_sriox_bist_status {
+ uint64_t u64;
+ struct cvmx_sriox_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_45_63:19;
+ uint64_t lram:1;
+ uint64_t mram:2;
+ uint64_t cram:2;
+ uint64_t bell:2;
+ uint64_t otag:2;
+ uint64_t itag:1;
+ uint64_t ofree:1;
+ uint64_t rtn:2;
+ uint64_t obulk:4;
+ uint64_t optrs:4;
+ uint64_t oarb2:2;
+ uint64_t rxbuf2:2;
+ uint64_t oarb:2;
+ uint64_t ispf:1;
+ uint64_t ospf:1;
+ uint64_t txbuf:2;
+ uint64_t rxbuf:2;
+ uint64_t imsg:5;
+ uint64_t omsg:7;
+#else
+ uint64_t omsg:7;
+ uint64_t imsg:5;
+ uint64_t rxbuf:2;
+ uint64_t txbuf:2;
+ uint64_t ospf:1;
+ uint64_t ispf:1;
+ uint64_t oarb:2;
+ uint64_t rxbuf2:2;
+ uint64_t oarb2:2;
+ uint64_t optrs:4;
+ uint64_t obulk:4;
+ uint64_t rtn:2;
+ uint64_t ofree:1;
+ uint64_t itag:1;
+ uint64_t otag:2;
+ uint64_t bell:2;
+ uint64_t cram:2;
+ uint64_t mram:2;
+ uint64_t lram:1;
+ uint64_t reserved_45_63:19;
+#endif
+ } s;
+ struct cvmx_sriox_bist_status_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t mram:2;
+ uint64_t cram:2;
+ uint64_t bell:2;
+ uint64_t otag:2;
+ uint64_t itag:1;
+ uint64_t ofree:1;
+ uint64_t rtn:2;
+ uint64_t obulk:4;
+ uint64_t optrs:4;
+ uint64_t oarb2:2;
+ uint64_t rxbuf2:2;
+ uint64_t oarb:2;
+ uint64_t ispf:1;
+ uint64_t ospf:1;
+ uint64_t txbuf:2;
+ uint64_t rxbuf:2;
+ uint64_t imsg:5;
+ uint64_t omsg:7;
+#else
+ uint64_t omsg:7;
+ uint64_t imsg:5;
+ uint64_t rxbuf:2;
+ uint64_t txbuf:2;
+ uint64_t ospf:1;
+ uint64_t ispf:1;
+ uint64_t oarb:2;
+ uint64_t rxbuf2:2;
+ uint64_t oarb2:2;
+ uint64_t optrs:4;
+ uint64_t obulk:4;
+ uint64_t rtn:2;
+ uint64_t ofree:1;
+ uint64_t itag:1;
+ uint64_t otag:2;
+ uint64_t bell:2;
+ uint64_t cram:2;
+ uint64_t mram:2;
+ uint64_t reserved_44_63:20;
+#endif
+ } cn63xx;
+ struct cvmx_sriox_bist_status_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t mram:2;
+ uint64_t cram:2;
+ uint64_t bell:2;
+ uint64_t otag:2;
+ uint64_t itag:1;
+ uint64_t ofree:1;
+ uint64_t rtn:2;
+ uint64_t obulk:4;
+ uint64_t optrs:4;
+ uint64_t reserved_20_23:4;
+ uint64_t oarb:2;
+ uint64_t ispf:1;
+ uint64_t ospf:1;
+ uint64_t txbuf:2;
+ uint64_t rxbuf:2;
+ uint64_t imsg:5;
+ uint64_t omsg:7;
+#else
+ uint64_t omsg:7;
+ uint64_t imsg:5;
+ uint64_t rxbuf:2;
+ uint64_t txbuf:2;
+ uint64_t ospf:1;
+ uint64_t ispf:1;
+ uint64_t oarb:2;
+ uint64_t reserved_20_23:4;
+ uint64_t optrs:4;
+ uint64_t obulk:4;
+ uint64_t rtn:2;
+ uint64_t ofree:1;
+ uint64_t itag:1;
+ uint64_t otag:2;
+ uint64_t bell:2;
+ uint64_t cram:2;
+ uint64_t mram:2;
+ uint64_t reserved_44_63:20;
+#endif
+ } cn63xxp1;
+};
+
+union cvmx_sriox_imsg_ctrl {
+ uint64_t u64;
+ struct cvmx_sriox_imsg_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t to_mode:1;
+ uint64_t reserved_30_30:1;
+ uint64_t rsp_thr:6;
+ uint64_t reserved_22_23:2;
+ uint64_t rp1_sid:1;
+ uint64_t rp0_sid:2;
+ uint64_t rp1_pid:1;
+ uint64_t rp0_pid:2;
+ uint64_t reserved_15_15:1;
+ uint64_t prt_sel:3;
+ uint64_t lttr:4;
+ uint64_t prio:4;
+ uint64_t mbox:4;
+#else
+ uint64_t mbox:4;
+ uint64_t prio:4;
+ uint64_t lttr:4;
+ uint64_t prt_sel:3;
+ uint64_t reserved_15_15:1;
+ uint64_t rp0_pid:2;
+ uint64_t rp1_pid:1;
+ uint64_t rp0_sid:2;
+ uint64_t rp1_sid:1;
+ uint64_t reserved_22_23:2;
+ uint64_t rsp_thr:6;
+ uint64_t reserved_30_30:1;
+ uint64_t to_mode:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_sriox_imsg_inst_hdrx {
+ uint64_t u64;
+ struct cvmx_sriox_imsg_inst_hdrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t r:1;
+ uint64_t reserved_58_62:5;
+ uint64_t pm:2;
+ uint64_t reserved_55_55:1;
+ uint64_t sl:7;
+ uint64_t reserved_46_47:2;
+ uint64_t nqos:1;
+ uint64_t ngrp:1;
+ uint64_t ntt:1;
+ uint64_t ntag:1;
+ uint64_t reserved_35_41:7;
+ uint64_t rs:1;
+ uint64_t tt:2;
+ uint64_t tag:32;
+#else
+ uint64_t tag:32;
+ uint64_t tt:2;
+ uint64_t rs:1;
+ uint64_t reserved_35_41:7;
+ uint64_t ntag:1;
+ uint64_t ntt:1;
+ uint64_t ngrp:1;
+ uint64_t nqos:1;
+ uint64_t reserved_46_47:2;
+ uint64_t sl:7;
+ uint64_t reserved_55_55:1;
+ uint64_t pm:2;
+ uint64_t reserved_58_62:5;
+ uint64_t r:1;
+#endif
+ } s;
+};
+
+union cvmx_sriox_imsg_qos_grpx {
+ uint64_t u64;
+ struct cvmx_sriox_imsg_qos_grpx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_63_63:1;
+ uint64_t qos7:3;
+ uint64_t grp7:4;
+ uint64_t reserved_55_55:1;
+ uint64_t qos6:3;
+ uint64_t grp6:4;
+ uint64_t reserved_47_47:1;
+ uint64_t qos5:3;
+ uint64_t grp5:4;
+ uint64_t reserved_39_39:1;
+ uint64_t qos4:3;
+ uint64_t grp4:4;
+ uint64_t reserved_31_31:1;
+ uint64_t qos3:3;
+ uint64_t grp3:4;
+ uint64_t reserved_23_23:1;
+ uint64_t qos2:3;
+ uint64_t grp2:4;
+ uint64_t reserved_15_15:1;
+ uint64_t qos1:3;
+ uint64_t grp1:4;
+ uint64_t reserved_7_7:1;
+ uint64_t qos0:3;
+ uint64_t grp0:4;
+#else
+ uint64_t grp0:4;
+ uint64_t qos0:3;
+ uint64_t reserved_7_7:1;
+ uint64_t grp1:4;
+ uint64_t qos1:3;
+ uint64_t reserved_15_15:1;
+ uint64_t grp2:4;
+ uint64_t qos2:3;
+ uint64_t reserved_23_23:1;
+ uint64_t grp3:4;
+ uint64_t qos3:3;
+ uint64_t reserved_31_31:1;
+ uint64_t grp4:4;
+ uint64_t qos4:3;
+ uint64_t reserved_39_39:1;
+ uint64_t grp5:4;
+ uint64_t qos5:3;
+ uint64_t reserved_47_47:1;
+ uint64_t grp6:4;
+ uint64_t qos6:3;
+ uint64_t reserved_55_55:1;
+ uint64_t grp7:4;
+ uint64_t qos7:3;
+ uint64_t reserved_63_63:1;
+#endif
+ } s;
+};
+
+union cvmx_sriox_imsg_statusx {
+ uint64_t u64;
+ struct cvmx_sriox_imsg_statusx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t val1:1;
+ uint64_t err1:1;
+ uint64_t toe1:1;
+ uint64_t toc1:1;
+ uint64_t prt1:1;
+ uint64_t reserved_58_58:1;
+ uint64_t tt1:1;
+ uint64_t dis1:1;
+ uint64_t seg1:4;
+ uint64_t mbox1:2;
+ uint64_t lttr1:2;
+ uint64_t sid1:16;
+ uint64_t val0:1;
+ uint64_t err0:1;
+ uint64_t toe0:1;
+ uint64_t toc0:1;
+ uint64_t prt0:1;
+ uint64_t reserved_26_26:1;
+ uint64_t tt0:1;
+ uint64_t dis0:1;
+ uint64_t seg0:4;
+ uint64_t mbox0:2;
+ uint64_t lttr0:2;
+ uint64_t sid0:16;
+#else
+ uint64_t sid0:16;
+ uint64_t lttr0:2;
+ uint64_t mbox0:2;
+ uint64_t seg0:4;
+ uint64_t dis0:1;
+ uint64_t tt0:1;
+ uint64_t reserved_26_26:1;
+ uint64_t prt0:1;
+ uint64_t toc0:1;
+ uint64_t toe0:1;
+ uint64_t err0:1;
+ uint64_t val0:1;
+ uint64_t sid1:16;
+ uint64_t lttr1:2;
+ uint64_t mbox1:2;
+ uint64_t seg1:4;
+ uint64_t dis1:1;
+ uint64_t tt1:1;
+ uint64_t reserved_58_58:1;
+ uint64_t prt1:1;
+ uint64_t toc1:1;
+ uint64_t toe1:1;
+ uint64_t err1:1;
+ uint64_t val1:1;
+#endif
+ } s;
+};
+
+union cvmx_sriox_imsg_vport_thr {
+ uint64_t u64;
+ struct cvmx_sriox_imsg_vport_thr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_54_63:10;
+ uint64_t max_tot:6;
+ uint64_t reserved_46_47:2;
+ uint64_t max_s1:6;
+ uint64_t reserved_38_39:2;
+ uint64_t max_s0:6;
+ uint64_t sp_vport:1;
+ uint64_t reserved_20_30:11;
+ uint64_t buf_thr:4;
+ uint64_t reserved_14_15:2;
+ uint64_t max_p1:6;
+ uint64_t reserved_6_7:2;
+ uint64_t max_p0:6;
+#else
+ uint64_t max_p0:6;
+ uint64_t reserved_6_7:2;
+ uint64_t max_p1:6;
+ uint64_t reserved_14_15:2;
+ uint64_t buf_thr:4;
+ uint64_t reserved_20_30:11;
+ uint64_t sp_vport:1;
+ uint64_t max_s0:6;
+ uint64_t reserved_38_39:2;
+ uint64_t max_s1:6;
+ uint64_t reserved_46_47:2;
+ uint64_t max_tot:6;
+ uint64_t reserved_54_63:10;
+#endif
+ } s;
+};
+
+union cvmx_sriox_imsg_vport_thr2 {
+ uint64_t u64;
+ struct cvmx_sriox_imsg_vport_thr2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63:18;
+ uint64_t max_s3:6;
+ uint64_t reserved_38_39:2;
+ uint64_t max_s2:6;
+ uint64_t reserved_0_31:32;
+#else
+ uint64_t reserved_0_31:32;
+ uint64_t max_s2:6;
+ uint64_t reserved_38_39:2;
+ uint64_t max_s3:6;
+ uint64_t reserved_46_63:18;
+#endif
+ } s;
+};
+
+union cvmx_sriox_int2_enable {
+ uint64_t u64;
+ struct cvmx_sriox_int2_enable_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t pko_rst:1;
+#else
+ uint64_t pko_rst:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_sriox_int2_reg {
+ uint64_t u64;
+ struct cvmx_sriox_int2_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t int_sum:1;
+ uint64_t reserved_1_30:30;
+ uint64_t pko_rst:1;
+#else
+ uint64_t pko_rst:1;
+ uint64_t reserved_1_30:30;
+ uint64_t int_sum:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_sriox_int_enable {
+ uint64_t u64;
+ struct cvmx_sriox_int_enable_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63:37;
+ uint64_t zero_pkt:1;
+ uint64_t ttl_tout:1;
+ uint64_t fail:1;
+ uint64_t degrade:1;
+ uint64_t mac_buf:1;
+ uint64_t f_error:1;
+ uint64_t rtry_err:1;
+ uint64_t pko_err:1;
+ uint64_t omsg_err:1;
+ uint64_t omsg1:1;
+ uint64_t omsg0:1;
+ uint64_t link_up:1;
+ uint64_t link_dwn:1;
+ uint64_t phy_erb:1;
+ uint64_t log_erb:1;
+ uint64_t soft_rx:1;
+ uint64_t soft_tx:1;
+ uint64_t mce_rx:1;
+ uint64_t mce_tx:1;
+ uint64_t wr_done:1;
+ uint64_t sli_err:1;
+ uint64_t deny_wr:1;
+ uint64_t bar_err:1;
+ uint64_t maint_op:1;
+ uint64_t rxbell:1;
+ uint64_t bell_err:1;
+ uint64_t txbell:1;
+#else
+ uint64_t txbell:1;
+ uint64_t bell_err:1;
+ uint64_t rxbell:1;
+ uint64_t maint_op:1;
+ uint64_t bar_err:1;
+ uint64_t deny_wr:1;
+ uint64_t sli_err:1;
+ uint64_t wr_done:1;
+ uint64_t mce_tx:1;
+ uint64_t mce_rx:1;
+ uint64_t soft_tx:1;
+ uint64_t soft_rx:1;
+ uint64_t log_erb:1;
+ uint64_t phy_erb:1;
+ uint64_t link_dwn:1;
+ uint64_t link_up:1;
+ uint64_t omsg0:1;
+ uint64_t omsg1:1;
+ uint64_t omsg_err:1;
+ uint64_t pko_err:1;
+ uint64_t rtry_err:1;
+ uint64_t f_error:1;
+ uint64_t mac_buf:1;
+ uint64_t degrade:1;
+ uint64_t fail:1;
+ uint64_t ttl_tout:1;
+ uint64_t zero_pkt:1;
+ uint64_t reserved_27_63:37;
+#endif
+ } s;
+ struct cvmx_sriox_int_enable_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63:42;
+ uint64_t f_error:1;
+ uint64_t rtry_err:1;
+ uint64_t pko_err:1;
+ uint64_t omsg_err:1;
+ uint64_t omsg1:1;
+ uint64_t omsg0:1;
+ uint64_t link_up:1;
+ uint64_t link_dwn:1;
+ uint64_t phy_erb:1;
+ uint64_t log_erb:1;
+ uint64_t soft_rx:1;
+ uint64_t soft_tx:1;
+ uint64_t mce_rx:1;
+ uint64_t mce_tx:1;
+ uint64_t wr_done:1;
+ uint64_t sli_err:1;
+ uint64_t deny_wr:1;
+ uint64_t bar_err:1;
+ uint64_t maint_op:1;
+ uint64_t rxbell:1;
+ uint64_t bell_err:1;
+ uint64_t txbell:1;
+#else
+ uint64_t txbell:1;
+ uint64_t bell_err:1;
+ uint64_t rxbell:1;
+ uint64_t maint_op:1;
+ uint64_t bar_err:1;
+ uint64_t deny_wr:1;
+ uint64_t sli_err:1;
+ uint64_t wr_done:1;
+ uint64_t mce_tx:1;
+ uint64_t mce_rx:1;
+ uint64_t soft_tx:1;
+ uint64_t soft_rx:1;
+ uint64_t log_erb:1;
+ uint64_t phy_erb:1;
+ uint64_t link_dwn:1;
+ uint64_t link_up:1;
+ uint64_t omsg0:1;
+ uint64_t omsg1:1;
+ uint64_t omsg_err:1;
+ uint64_t pko_err:1;
+ uint64_t rtry_err:1;
+ uint64_t f_error:1;
+ uint64_t reserved_22_63:42;
+#endif
+ } cn63xxp1;
+};
+
+union cvmx_sriox_int_info0 {
+ uint64_t u64;
+ struct cvmx_sriox_int_info0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t cmd:4;
+ uint64_t type:4;
+ uint64_t tag:8;
+ uint64_t reserved_42_47:6;
+ uint64_t length:10;
+ uint64_t status:3;
+ uint64_t reserved_16_28:13;
+ uint64_t be0:8;
+ uint64_t be1:8;
+#else
+ uint64_t be1:8;
+ uint64_t be0:8;
+ uint64_t reserved_16_28:13;
+ uint64_t status:3;
+ uint64_t length:10;
+ uint64_t reserved_42_47:6;
+ uint64_t tag:8;
+ uint64_t type:4;
+ uint64_t cmd:4;
+#endif
+ } s;
+};
+
+union cvmx_sriox_int_info1 {
+ uint64_t u64;
+ struct cvmx_sriox_int_info1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t info1:64;
+#else
+ uint64_t info1:64;
+#endif
+ } s;
+};
+
+union cvmx_sriox_int_info2 {
+ uint64_t u64;
+ struct cvmx_sriox_int_info2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t prio:2;
+ uint64_t tt:1;
+ uint64_t sis:1;
+ uint64_t ssize:4;
+ uint64_t did:16;
+ uint64_t xmbox:4;
+ uint64_t mbox:2;
+ uint64_t letter:2;
+ uint64_t rsrvd:30;
+ uint64_t lns:1;
+ uint64_t intr:1;
+#else
+ uint64_t intr:1;
+ uint64_t lns:1;
+ uint64_t rsrvd:30;
+ uint64_t letter:2;
+ uint64_t mbox:2;
+ uint64_t xmbox:4;
+ uint64_t did:16;
+ uint64_t ssize:4;
+ uint64_t sis:1;
+ uint64_t tt:1;
+ uint64_t prio:2;
+#endif
+ } s;
+};
+
+union cvmx_sriox_int_info3 {
+ uint64_t u64;
+ struct cvmx_sriox_int_info3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t prio:2;
+ uint64_t tt:2;
+ uint64_t type:4;
+ uint64_t other:48;
+ uint64_t reserved_0_7:8;
+#else
+ uint64_t reserved_0_7:8;
+ uint64_t other:48;
+ uint64_t type:4;
+ uint64_t tt:2;
+ uint64_t prio:2;
+#endif
+ } s;
+};
+
+union cvmx_sriox_int_reg {
+ uint64_t u64;
+ struct cvmx_sriox_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t int2_sum:1;
+ uint64_t reserved_27_30:4;
+ uint64_t zero_pkt:1;
+ uint64_t ttl_tout:1;
+ uint64_t fail:1;
+ uint64_t degrad:1;
+ uint64_t mac_buf:1;
+ uint64_t f_error:1;
+ uint64_t rtry_err:1;
+ uint64_t pko_err:1;
+ uint64_t omsg_err:1;
+ uint64_t omsg1:1;
+ uint64_t omsg0:1;
+ uint64_t link_up:1;
+ uint64_t link_dwn:1;
+ uint64_t phy_erb:1;
+ uint64_t log_erb:1;
+ uint64_t soft_rx:1;
+ uint64_t soft_tx:1;
+ uint64_t mce_rx:1;
+ uint64_t mce_tx:1;
+ uint64_t wr_done:1;
+ uint64_t sli_err:1;
+ uint64_t deny_wr:1;
+ uint64_t bar_err:1;
+ uint64_t maint_op:1;
+ uint64_t rxbell:1;
+ uint64_t bell_err:1;
+ uint64_t txbell:1;
+#else
+ uint64_t txbell:1;
+ uint64_t bell_err:1;
+ uint64_t rxbell:1;
+ uint64_t maint_op:1;
+ uint64_t bar_err:1;
+ uint64_t deny_wr:1;
+ uint64_t sli_err:1;
+ uint64_t wr_done:1;
+ uint64_t mce_tx:1;
+ uint64_t mce_rx:1;
+ uint64_t soft_tx:1;
+ uint64_t soft_rx:1;
+ uint64_t log_erb:1;
+ uint64_t phy_erb:1;
+ uint64_t link_dwn:1;
+ uint64_t link_up:1;
+ uint64_t omsg0:1;
+ uint64_t omsg1:1;
+ uint64_t omsg_err:1;
+ uint64_t pko_err:1;
+ uint64_t rtry_err:1;
+ uint64_t f_error:1;
+ uint64_t mac_buf:1;
+ uint64_t degrad:1;
+ uint64_t fail:1;
+ uint64_t ttl_tout:1;
+ uint64_t zero_pkt:1;
+ uint64_t reserved_27_30:4;
+ uint64_t int2_sum:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_sriox_int_reg_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63:42;
+ uint64_t f_error:1;
+ uint64_t rtry_err:1;
+ uint64_t pko_err:1;
+ uint64_t omsg_err:1;
+ uint64_t omsg1:1;
+ uint64_t omsg0:1;
+ uint64_t link_up:1;
+ uint64_t link_dwn:1;
+ uint64_t phy_erb:1;
+ uint64_t log_erb:1;
+ uint64_t soft_rx:1;
+ uint64_t soft_tx:1;
+ uint64_t mce_rx:1;
+ uint64_t mce_tx:1;
+ uint64_t wr_done:1;
+ uint64_t sli_err:1;
+ uint64_t deny_wr:1;
+ uint64_t bar_err:1;
+ uint64_t maint_op:1;
+ uint64_t rxbell:1;
+ uint64_t bell_err:1;
+ uint64_t txbell:1;
+#else
+ uint64_t txbell:1;
+ uint64_t bell_err:1;
+ uint64_t rxbell:1;
+ uint64_t maint_op:1;
+ uint64_t bar_err:1;
+ uint64_t deny_wr:1;
+ uint64_t sli_err:1;
+ uint64_t wr_done:1;
+ uint64_t mce_tx:1;
+ uint64_t mce_rx:1;
+ uint64_t soft_tx:1;
+ uint64_t soft_rx:1;
+ uint64_t log_erb:1;
+ uint64_t phy_erb:1;
+ uint64_t link_dwn:1;
+ uint64_t link_up:1;
+ uint64_t omsg0:1;
+ uint64_t omsg1:1;
+ uint64_t omsg_err:1;
+ uint64_t pko_err:1;
+ uint64_t rtry_err:1;
+ uint64_t f_error:1;
+ uint64_t reserved_22_63:42;
+#endif
+ } cn63xxp1;
+};
+
+union cvmx_sriox_ip_feature {
+ uint64_t u64;
+ struct cvmx_sriox_ip_feature_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ops:32;
+ uint64_t reserved_15_31:17;
+ uint64_t no_vmin:1;
+ uint64_t a66:1;
+ uint64_t a50:1;
+ uint64_t reserved_11_11:1;
+ uint64_t tx_flow:1;
+ uint64_t pt_width:2;
+ uint64_t tx_pol:4;
+ uint64_t rx_pol:4;
+#else
+ uint64_t rx_pol:4;
+ uint64_t tx_pol:4;
+ uint64_t pt_width:2;
+ uint64_t tx_flow:1;
+ uint64_t reserved_11_11:1;
+ uint64_t a50:1;
+ uint64_t a66:1;
+ uint64_t no_vmin:1;
+ uint64_t reserved_15_31:17;
+ uint64_t ops:32;
+#endif
+ } s;
+ struct cvmx_sriox_ip_feature_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ops:32;
+ uint64_t reserved_14_31:18;
+ uint64_t a66:1;
+ uint64_t a50:1;
+ uint64_t reserved_11_11:1;
+ uint64_t tx_flow:1;
+ uint64_t pt_width:2;
+ uint64_t tx_pol:4;
+ uint64_t rx_pol:4;
+#else
+ uint64_t rx_pol:4;
+ uint64_t tx_pol:4;
+ uint64_t pt_width:2;
+ uint64_t tx_flow:1;
+ uint64_t reserved_11_11:1;
+ uint64_t a50:1;
+ uint64_t a66:1;
+ uint64_t reserved_14_31:18;
+ uint64_t ops:32;
+#endif
+ } cn63xx;
+};
+
+union cvmx_sriox_mac_buffers {
+ uint64_t u64;
+ struct cvmx_sriox_mac_buffers_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63:8;
+ uint64_t tx_enb:8;
+ uint64_t reserved_44_47:4;
+ uint64_t tx_inuse:4;
+ uint64_t tx_stat:8;
+ uint64_t reserved_24_31:8;
+ uint64_t rx_enb:8;
+ uint64_t reserved_12_15:4;
+ uint64_t rx_inuse:4;
+ uint64_t rx_stat:8;
+#else
+ uint64_t rx_stat:8;
+ uint64_t rx_inuse:4;
+ uint64_t reserved_12_15:4;
+ uint64_t rx_enb:8;
+ uint64_t reserved_24_31:8;
+ uint64_t tx_stat:8;
+ uint64_t tx_inuse:4;
+ uint64_t reserved_44_47:4;
+ uint64_t tx_enb:8;
+ uint64_t reserved_56_63:8;
+#endif
+ } s;
+};
+
+union cvmx_sriox_maint_op {
+ uint64_t u64;
+ struct cvmx_sriox_maint_op_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wr_data:32;
+ uint64_t reserved_27_31:5;
+ uint64_t fail:1;
+ uint64_t pending:1;
+ uint64_t op:1;
+ uint64_t addr:24;
+#else
+ uint64_t addr:24;
+ uint64_t op:1;
+ uint64_t pending:1;
+ uint64_t fail:1;
+ uint64_t reserved_27_31:5;
+ uint64_t wr_data:32;
+#endif
+ } s;
+};
+
+union cvmx_sriox_maint_rd_data {
+ uint64_t u64;
+ struct cvmx_sriox_maint_rd_data_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63:31;
+ uint64_t valid:1;
+ uint64_t rd_data:32;
+#else
+ uint64_t rd_data:32;
+ uint64_t valid:1;
+ uint64_t reserved_33_63:31;
+#endif
+ } s;
+};
+
+union cvmx_sriox_mce_tx_ctl {
+ uint64_t u64;
+ struct cvmx_sriox_mce_tx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t mce:1;
+#else
+ uint64_t mce:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_sriox_mem_op_ctrl {
+ uint64_t u64;
+ struct cvmx_sriox_mem_op_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t rr_ro:1;
+ uint64_t w_ro:1;
+ uint64_t reserved_6_7:2;
+ uint64_t rp1_sid:1;
+ uint64_t rp0_sid:2;
+ uint64_t rp1_pid:1;
+ uint64_t rp0_pid:2;
+#else
+ uint64_t rp0_pid:2;
+ uint64_t rp1_pid:1;
+ uint64_t rp0_sid:2;
+ uint64_t rp1_sid:1;
+ uint64_t reserved_6_7:2;
+ uint64_t w_ro:1;
+ uint64_t rr_ro:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+};
+
+union cvmx_sriox_omsg_ctrlx {
+ uint64_t u64;
+ struct cvmx_sriox_omsg_ctrlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t testmode:1;
+ uint64_t reserved_37_62:26;
+ uint64_t silo_max:5;
+ uint64_t rtry_thr:16;
+ uint64_t rtry_en:1;
+ uint64_t reserved_11_14:4;
+ uint64_t idm_tt:1;
+ uint64_t idm_sis:1;
+ uint64_t idm_did:1;
+ uint64_t lttr_sp:4;
+ uint64_t lttr_mp:4;
+#else
+ uint64_t lttr_mp:4;
+ uint64_t lttr_sp:4;
+ uint64_t idm_did:1;
+ uint64_t idm_sis:1;
+ uint64_t idm_tt:1;
+ uint64_t reserved_11_14:4;
+ uint64_t rtry_en:1;
+ uint64_t rtry_thr:16;
+ uint64_t silo_max:5;
+ uint64_t reserved_37_62:26;
+ uint64_t testmode:1;
+#endif
+ } s;
+ struct cvmx_sriox_omsg_ctrlx_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t testmode:1;
+ uint64_t reserved_32_62:31;
+ uint64_t rtry_thr:16;
+ uint64_t rtry_en:1;
+ uint64_t reserved_11_14:4;
+ uint64_t idm_tt:1;
+ uint64_t idm_sis:1;
+ uint64_t idm_did:1;
+ uint64_t lttr_sp:4;
+ uint64_t lttr_mp:4;
+#else
+ uint64_t lttr_mp:4;
+ uint64_t lttr_sp:4;
+ uint64_t idm_did:1;
+ uint64_t idm_sis:1;
+ uint64_t idm_tt:1;
+ uint64_t reserved_11_14:4;
+ uint64_t rtry_en:1;
+ uint64_t rtry_thr:16;
+ uint64_t reserved_32_62:31;
+ uint64_t testmode:1;
+#endif
+ } cn63xxp1;
+};
+
+union cvmx_sriox_omsg_done_countsx {
+ uint64_t u64;
+ struct cvmx_sriox_omsg_done_countsx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t bad:16;
+ uint64_t good:16;
+#else
+ uint64_t good:16;
+ uint64_t bad:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_sriox_omsg_fmp_mrx {
+ uint64_t u64;
+ struct cvmx_sriox_omsg_fmp_mrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63:49;
+ uint64_t ctlr_sp:1;
+ uint64_t ctlr_fmp:1;
+ uint64_t ctlr_nmp:1;
+ uint64_t id_sp:1;
+ uint64_t id_fmp:1;
+ uint64_t id_nmp:1;
+ uint64_t id_psd:1;
+ uint64_t mbox_sp:1;
+ uint64_t mbox_fmp:1;
+ uint64_t mbox_nmp:1;
+ uint64_t mbox_psd:1;
+ uint64_t all_sp:1;
+ uint64_t all_fmp:1;
+ uint64_t all_nmp:1;
+ uint64_t all_psd:1;
+#else
+ uint64_t all_psd:1;
+ uint64_t all_nmp:1;
+ uint64_t all_fmp:1;
+ uint64_t all_sp:1;
+ uint64_t mbox_psd:1;
+ uint64_t mbox_nmp:1;
+ uint64_t mbox_fmp:1;
+ uint64_t mbox_sp:1;
+ uint64_t id_psd:1;
+ uint64_t id_nmp:1;
+ uint64_t id_fmp:1;
+ uint64_t id_sp:1;
+ uint64_t ctlr_nmp:1;
+ uint64_t ctlr_fmp:1;
+ uint64_t ctlr_sp:1;
+ uint64_t reserved_15_63:49;
+#endif
+ } s;
+};
+
+union cvmx_sriox_omsg_nmp_mrx {
+ uint64_t u64;
+ struct cvmx_sriox_omsg_nmp_mrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63:49;
+ uint64_t ctlr_sp:1;
+ uint64_t ctlr_fmp:1;
+ uint64_t ctlr_nmp:1;
+ uint64_t id_sp:1;
+ uint64_t id_fmp:1;
+ uint64_t id_nmp:1;
+ uint64_t reserved_8_8:1;
+ uint64_t mbox_sp:1;
+ uint64_t mbox_fmp:1;
+ uint64_t mbox_nmp:1;
+ uint64_t reserved_4_4:1;
+ uint64_t all_sp:1;
+ uint64_t all_fmp:1;
+ uint64_t all_nmp:1;
+ uint64_t reserved_0_0:1;
+#else
+ uint64_t reserved_0_0:1;
+ uint64_t all_nmp:1;
+ uint64_t all_fmp:1;
+ uint64_t all_sp:1;
+ uint64_t reserved_4_4:1;
+ uint64_t mbox_nmp:1;
+ uint64_t mbox_fmp:1;
+ uint64_t mbox_sp:1;
+ uint64_t reserved_8_8:1;
+ uint64_t id_nmp:1;
+ uint64_t id_fmp:1;
+ uint64_t id_sp:1;
+ uint64_t ctlr_nmp:1;
+ uint64_t ctlr_fmp:1;
+ uint64_t ctlr_sp:1;
+ uint64_t reserved_15_63:49;
+#endif
+ } s;
+};
+
+union cvmx_sriox_omsg_portx {
+ uint64_t u64;
+ struct cvmx_sriox_omsg_portx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t enable:1;
+ uint64_t reserved_3_30:28;
+ uint64_t port:3;
+#else
+ uint64_t port:3;
+ uint64_t reserved_3_30:28;
+ uint64_t enable:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_sriox_omsg_portx_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t enable:1;
+ uint64_t reserved_2_30:29;
+ uint64_t port:2;
+#else
+ uint64_t port:2;
+ uint64_t reserved_2_30:29;
+ uint64_t enable:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn63xx;
+};
+
+union cvmx_sriox_omsg_silo_thr {
+ uint64_t u64;
+ struct cvmx_sriox_omsg_silo_thr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t tot_silo:5;
+#else
+ uint64_t tot_silo:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_sriox_omsg_sp_mrx {
+ uint64_t u64;
+ struct cvmx_sriox_omsg_sp_mrx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t xmbox_sp:1;
+ uint64_t ctlr_sp:1;
+ uint64_t ctlr_fmp:1;
+ uint64_t ctlr_nmp:1;
+ uint64_t id_sp:1;
+ uint64_t id_fmp:1;
+ uint64_t id_nmp:1;
+ uint64_t id_psd:1;
+ uint64_t mbox_sp:1;
+ uint64_t mbox_fmp:1;
+ uint64_t mbox_nmp:1;
+ uint64_t mbox_psd:1;
+ uint64_t all_sp:1;
+ uint64_t all_fmp:1;
+ uint64_t all_nmp:1;
+ uint64_t all_psd:1;
+#else
+ uint64_t all_psd:1;
+ uint64_t all_nmp:1;
+ uint64_t all_fmp:1;
+ uint64_t all_sp:1;
+ uint64_t mbox_psd:1;
+ uint64_t mbox_nmp:1;
+ uint64_t mbox_fmp:1;
+ uint64_t mbox_sp:1;
+ uint64_t id_psd:1;
+ uint64_t id_nmp:1;
+ uint64_t id_fmp:1;
+ uint64_t id_sp:1;
+ uint64_t ctlr_nmp:1;
+ uint64_t ctlr_fmp:1;
+ uint64_t ctlr_sp:1;
+ uint64_t xmbox_sp:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_sriox_priox_in_use {
+ uint64_t u64;
+ struct cvmx_sriox_priox_in_use_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t end_cnt:16;
+ uint64_t start_cnt:16;
+#else
+ uint64_t start_cnt:16;
+ uint64_t end_cnt:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_sriox_rx_bell {
+ uint64_t u64;
+ struct cvmx_sriox_rx_bell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t data:16;
+ uint64_t src_id:16;
+ uint64_t count:8;
+ uint64_t reserved_5_7:3;
+ uint64_t dest_id:1;
+ uint64_t id16:1;
+ uint64_t reserved_2_2:1;
+ uint64_t priority:2;
+#else
+ uint64_t priority:2;
+ uint64_t reserved_2_2:1;
+ uint64_t id16:1;
+ uint64_t dest_id:1;
+ uint64_t reserved_5_7:3;
+ uint64_t count:8;
+ uint64_t src_id:16;
+ uint64_t data:16;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_sriox_rx_bell_seq {
+ uint64_t u64;
+ struct cvmx_sriox_rx_bell_seq_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t count:8;
+ uint64_t seq:32;
+#else
+ uint64_t seq:32;
+ uint64_t count:8;
+ uint64_t reserved_40_63:24;
+#endif
+ } s;
+};
+
+union cvmx_sriox_rx_status {
+ uint64_t u64;
+ struct cvmx_sriox_rx_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rtn_pr3:8;
+ uint64_t rtn_pr2:8;
+ uint64_t rtn_pr1:8;
+ uint64_t reserved_28_39:12;
+ uint64_t mbox:4;
+ uint64_t comp:8;
+ uint64_t reserved_13_15:3;
+ uint64_t n_post:5;
+ uint64_t post:8;
+#else
+ uint64_t post:8;
+ uint64_t n_post:5;
+ uint64_t reserved_13_15:3;
+ uint64_t comp:8;
+ uint64_t mbox:4;
+ uint64_t reserved_28_39:12;
+ uint64_t rtn_pr1:8;
+ uint64_t rtn_pr2:8;
+ uint64_t rtn_pr3:8;
+#endif
+ } s;
+};
+
+union cvmx_sriox_s2m_typex {
+ uint64_t u64;
+ struct cvmx_sriox_s2m_typex_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63:45;
+ uint64_t wr_op:3;
+ uint64_t reserved_15_15:1;
+ uint64_t rd_op:3;
+ uint64_t wr_prior:2;
+ uint64_t rd_prior:2;
+ uint64_t reserved_6_7:2;
+ uint64_t src_id:1;
+ uint64_t id16:1;
+ uint64_t reserved_2_3:2;
+ uint64_t iaow_sel:2;
+#else
+ uint64_t iaow_sel:2;
+ uint64_t reserved_2_3:2;
+ uint64_t id16:1;
+ uint64_t src_id:1;
+ uint64_t reserved_6_7:2;
+ uint64_t rd_prior:2;
+ uint64_t wr_prior:2;
+ uint64_t rd_op:3;
+ uint64_t reserved_15_15:1;
+ uint64_t wr_op:3;
+ uint64_t reserved_19_63:45;
+#endif
+ } s;
+};
+
+union cvmx_sriox_seq {
+ uint64_t u64;
+ struct cvmx_sriox_seq_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t seq:32;
+#else
+ uint64_t seq:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_sriox_status_reg {
+ uint64_t u64;
+ struct cvmx_sriox_status_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t access:1;
+ uint64_t srio:1;
+#else
+ uint64_t srio:1;
+ uint64_t access:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } s;
+};
+
+union cvmx_sriox_tag_ctrl {
+ uint64_t u64;
+ struct cvmx_sriox_tag_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t o_clr:1;
+ uint64_t reserved_13_15:3;
+ uint64_t otag:5;
+ uint64_t reserved_5_7:3;
+ uint64_t itag:5;
+#else
+ uint64_t itag:5;
+ uint64_t reserved_5_7:3;
+ uint64_t otag:5;
+ uint64_t reserved_13_15:3;
+ uint64_t o_clr:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } s;
+};
+
+union cvmx_sriox_tlp_credits {
+ uint64_t u64;
+ struct cvmx_sriox_tlp_credits_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t mbox:4;
+ uint64_t comp:8;
+ uint64_t reserved_13_15:3;
+ uint64_t n_post:5;
+ uint64_t post:8;
+#else
+ uint64_t post:8;
+ uint64_t n_post:5;
+ uint64_t reserved_13_15:3;
+ uint64_t comp:8;
+ uint64_t mbox:4;
+ uint64_t reserved_28_63:36;
+#endif
+ } s;
+};
+
+union cvmx_sriox_tx_bell {
+ uint64_t u64;
+ struct cvmx_sriox_tx_bell_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t data:16;
+ uint64_t dest_id:16;
+ uint64_t reserved_9_15:7;
+ uint64_t pending:1;
+ uint64_t reserved_5_7:3;
+ uint64_t src_id:1;
+ uint64_t id16:1;
+ uint64_t reserved_2_2:1;
+ uint64_t priority:2;
+#else
+ uint64_t priority:2;
+ uint64_t reserved_2_2:1;
+ uint64_t id16:1;
+ uint64_t src_id:1;
+ uint64_t reserved_5_7:3;
+ uint64_t pending:1;
+ uint64_t reserved_9_15:7;
+ uint64_t dest_id:16;
+ uint64_t data:16;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_sriox_tx_bell_info {
+ uint64_t u64;
+ struct cvmx_sriox_tx_bell_info_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t data:16;
+ uint64_t dest_id:16;
+ uint64_t reserved_8_15:8;
+ uint64_t timeout:1;
+ uint64_t error:1;
+ uint64_t retry:1;
+ uint64_t src_id:1;
+ uint64_t id16:1;
+ uint64_t reserved_2_2:1;
+ uint64_t priority:2;
+#else
+ uint64_t priority:2;
+ uint64_t reserved_2_2:1;
+ uint64_t id16:1;
+ uint64_t src_id:1;
+ uint64_t retry:1;
+ uint64_t error:1;
+ uint64_t timeout:1;
+ uint64_t reserved_8_15:8;
+ uint64_t dest_id:16;
+ uint64_t data:16;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+};
+
+union cvmx_sriox_tx_ctrl {
+ uint64_t u64;
+ struct cvmx_sriox_tx_ctrl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_53_63:11;
+ uint64_t tag_th2:5;
+ uint64_t reserved_45_47:3;
+ uint64_t tag_th1:5;
+ uint64_t reserved_37_39:3;
+ uint64_t tag_th0:5;
+ uint64_t reserved_20_31:12;
+ uint64_t tx_th2:4;
+ uint64_t reserved_12_15:4;
+ uint64_t tx_th1:4;
+ uint64_t reserved_4_7:4;
+ uint64_t tx_th0:4;
+#else
+ uint64_t tx_th0:4;
+ uint64_t reserved_4_7:4;
+ uint64_t tx_th1:4;
+ uint64_t reserved_12_15:4;
+ uint64_t tx_th2:4;
+ uint64_t reserved_20_31:12;
+ uint64_t tag_th0:5;
+ uint64_t reserved_37_39:3;
+ uint64_t tag_th1:5;
+ uint64_t reserved_45_47:3;
+ uint64_t tag_th2:5;
+ uint64_t reserved_53_63:11;
+#endif
+ } s;
+};
+
+union cvmx_sriox_tx_emphasis {
+ uint64_t u64;
+ struct cvmx_sriox_tx_emphasis_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t emph:4;
+#else
+ uint64_t emph:4;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_sriox_tx_status {
+ uint64_t u64;
+ struct cvmx_sriox_tx_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t s2m_pr3:8;
+ uint64_t s2m_pr2:8;
+ uint64_t s2m_pr1:8;
+ uint64_t s2m_pr0:8;
+#else
+ uint64_t s2m_pr0:8;
+ uint64_t s2m_pr1:8;
+ uint64_t s2m_pr2:8;
+ uint64_t s2m_pr3:8;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_sriox_wr_done_counts {
+ uint64_t u64;
+ struct cvmx_sriox_wr_done_counts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t bad:16;
+ uint64_t good:16;
+#else
+ uint64_t good:16;
+ uint64_t bad:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-srxx-defs.h b/arch/mips/include/asm/octeon/cvmx-srxx-defs.h
new file mode 100644
index 000000000..76b2a42f5
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-srxx-defs.h
@@ -0,0 +1,140 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_SRXX_DEFS_H__
+#define __CVMX_SRXX_DEFS_H__
+
+#define CVMX_SRXX_COM_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000200ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SRXX_IGN_RX_FULL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000218ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SRXX_SPI4_CALX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180090000000ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8)
+#define CVMX_SRXX_SPI4_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000208ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SRXX_SW_TICK_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000220ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_SRXX_SW_TICK_DAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000228ull) + ((block_id) & 1) * 0x8000000ull)
+
+union cvmx_srxx_com_ctl {
+ uint64_t u64;
+ struct cvmx_srxx_com_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t prts:4;
+ uint64_t st_en:1;
+ uint64_t reserved_1_2:2;
+ uint64_t inf_en:1;
+#else
+ uint64_t inf_en:1;
+ uint64_t reserved_1_2:2;
+ uint64_t st_en:1;
+ uint64_t prts:4;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_srxx_ign_rx_full {
+ uint64_t u64;
+ struct cvmx_srxx_ign_rx_full_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t ignore:16;
+#else
+ uint64_t ignore:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_srxx_spi4_calx {
+ uint64_t u64;
+ struct cvmx_srxx_spi4_calx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t oddpar:1;
+ uint64_t prt3:4;
+ uint64_t prt2:4;
+ uint64_t prt1:4;
+ uint64_t prt0:4;
+#else
+ uint64_t prt0:4;
+ uint64_t prt1:4;
+ uint64_t prt2:4;
+ uint64_t prt3:4;
+ uint64_t oddpar:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } s;
+};
+
+union cvmx_srxx_spi4_stat {
+ uint64_t u64;
+ struct cvmx_srxx_spi4_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t m:8;
+ uint64_t reserved_7_7:1;
+ uint64_t len:7;
+#else
+ uint64_t len:7;
+ uint64_t reserved_7_7:1;
+ uint64_t m:8;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_srxx_sw_tick_ctl {
+ uint64_t u64;
+ struct cvmx_srxx_sw_tick_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t eop:1;
+ uint64_t sop:1;
+ uint64_t mod:4;
+ uint64_t opc:4;
+ uint64_t adr:4;
+#else
+ uint64_t adr:4;
+ uint64_t opc:4;
+ uint64_t mod:4;
+ uint64_t sop:1;
+ uint64_t eop:1;
+ uint64_t reserved_14_63:50;
+#endif
+ } s;
+};
+
+union cvmx_srxx_sw_tick_dat {
+ uint64_t u64;
+ struct cvmx_srxx_sw_tick_dat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dat:64;
+#else
+ uint64_t dat:64;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-stxx-defs.h b/arch/mips/include/asm/octeon/cvmx-stxx-defs.h
new file mode 100644
index 000000000..f49d82145
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-stxx-defs.h
@@ -0,0 +1,330 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (C) 2003-2018 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_STXX_DEFS_H__
+#define __CVMX_STXX_DEFS_H__
+
+#define CVMX_STXX_ARB_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000608ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_STXX_BCKPRS_CNT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000688ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_STXX_COM_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000600ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_STXX_DIP_CNT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000690ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_STXX_IGN_CAL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000610ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_STXX_INT_MSK(block_id) (CVMX_ADD_IO_SEG(0x00011800900006A0ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_STXX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x0001180090000698ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_STXX_INT_SYNC(block_id) (CVMX_ADD_IO_SEG(0x00011800900006A8ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_STXX_MIN_BST(block_id) (CVMX_ADD_IO_SEG(0x0001180090000618ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_STXX_SPI4_CALX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180090000400ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8)
+#define CVMX_STXX_SPI4_DAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000628ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_STXX_SPI4_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000630ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_STXX_STAT_BYTES_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180090000648ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_STXX_STAT_BYTES_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180090000680ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_STXX_STAT_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000638ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_STXX_STAT_PKT_XMT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000640ull) + ((block_id) & 1) * 0x8000000ull)
+
+void __cvmx_interrupt_stxx_int_msk_enable(int index);
+
+union cvmx_stxx_arb_ctl {
+ uint64_t u64;
+ struct cvmx_stxx_arb_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t mintrn:1;
+ uint64_t reserved_4_4:1;
+ uint64_t igntpa:1;
+ uint64_t reserved_0_2:3;
+#else
+ uint64_t reserved_0_2:3;
+ uint64_t igntpa:1;
+ uint64_t reserved_4_4:1;
+ uint64_t mintrn:1;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_stxx_bckprs_cnt {
+ uint64_t u64;
+ struct cvmx_stxx_bckprs_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_stxx_com_ctl {
+ uint64_t u64;
+ struct cvmx_stxx_com_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t st_en:1;
+ uint64_t reserved_1_2:2;
+ uint64_t inf_en:1;
+#else
+ uint64_t inf_en:1;
+ uint64_t reserved_1_2:2;
+ uint64_t st_en:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+};
+
+union cvmx_stxx_dip_cnt {
+ uint64_t u64;
+ struct cvmx_stxx_dip_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t frmmax:4;
+ uint64_t dipmax:4;
+#else
+ uint64_t dipmax:4;
+ uint64_t frmmax:4;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_stxx_ign_cal {
+ uint64_t u64;
+ struct cvmx_stxx_ign_cal_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t igntpa:16;
+#else
+ uint64_t igntpa:16;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_stxx_int_msk {
+ uint64_t u64;
+ struct cvmx_stxx_int_msk_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t frmerr:1;
+ uint64_t unxfrm:1;
+ uint64_t nosync:1;
+ uint64_t diperr:1;
+ uint64_t datovr:1;
+ uint64_t ovrbst:1;
+ uint64_t calpar1:1;
+ uint64_t calpar0:1;
+#else
+ uint64_t calpar0:1;
+ uint64_t calpar1:1;
+ uint64_t ovrbst:1;
+ uint64_t datovr:1;
+ uint64_t diperr:1;
+ uint64_t nosync:1;
+ uint64_t unxfrm:1;
+ uint64_t frmerr:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_stxx_int_reg {
+ uint64_t u64;
+ struct cvmx_stxx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t syncerr:1;
+ uint64_t frmerr:1;
+ uint64_t unxfrm:1;
+ uint64_t nosync:1;
+ uint64_t diperr:1;
+ uint64_t datovr:1;
+ uint64_t ovrbst:1;
+ uint64_t calpar1:1;
+ uint64_t calpar0:1;
+#else
+ uint64_t calpar0:1;
+ uint64_t calpar1:1;
+ uint64_t ovrbst:1;
+ uint64_t datovr:1;
+ uint64_t diperr:1;
+ uint64_t nosync:1;
+ uint64_t unxfrm:1;
+ uint64_t frmerr:1;
+ uint64_t syncerr:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } s;
+};
+
+union cvmx_stxx_int_sync {
+ uint64_t u64;
+ struct cvmx_stxx_int_sync_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t frmerr:1;
+ uint64_t unxfrm:1;
+ uint64_t nosync:1;
+ uint64_t diperr:1;
+ uint64_t datovr:1;
+ uint64_t ovrbst:1;
+ uint64_t calpar1:1;
+ uint64_t calpar0:1;
+#else
+ uint64_t calpar0:1;
+ uint64_t calpar1:1;
+ uint64_t ovrbst:1;
+ uint64_t datovr:1;
+ uint64_t diperr:1;
+ uint64_t nosync:1;
+ uint64_t unxfrm:1;
+ uint64_t frmerr:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_stxx_min_bst {
+ uint64_t u64;
+ struct cvmx_stxx_min_bst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t minb:9;
+#else
+ uint64_t minb:9;
+ uint64_t reserved_9_63:55;
+#endif
+ } s;
+};
+
+union cvmx_stxx_spi4_calx {
+ uint64_t u64;
+ struct cvmx_stxx_spi4_calx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t oddpar:1;
+ uint64_t prt3:4;
+ uint64_t prt2:4;
+ uint64_t prt1:4;
+ uint64_t prt0:4;
+#else
+ uint64_t prt0:4;
+ uint64_t prt1:4;
+ uint64_t prt2:4;
+ uint64_t prt3:4;
+ uint64_t oddpar:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } s;
+};
+
+union cvmx_stxx_spi4_dat {
+ uint64_t u64;
+ struct cvmx_stxx_spi4_dat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t alpha:16;
+ uint64_t max_t:16;
+#else
+ uint64_t max_t:16;
+ uint64_t alpha:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_stxx_spi4_stat {
+ uint64_t u64;
+ struct cvmx_stxx_spi4_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t m:8;
+ uint64_t reserved_7_7:1;
+ uint64_t len:7;
+#else
+ uint64_t len:7;
+ uint64_t reserved_7_7:1;
+ uint64_t m:8;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+};
+
+union cvmx_stxx_stat_bytes_hi {
+ uint64_t u64;
+ struct cvmx_stxx_stat_bytes_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_stxx_stat_bytes_lo {
+ uint64_t u64;
+ struct cvmx_stxx_stat_bytes_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_stxx_stat_ctl {
+ uint64_t u64;
+ struct cvmx_stxx_stat_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t clr:1;
+ uint64_t bckprs:4;
+#else
+ uint64_t bckprs:4;
+ uint64_t clr:1;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_stxx_stat_pkt_xmt {
+ uint64_t u64;
+ struct cvmx_stxx_stat_pkt_xmt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+#else
+ uint64_t cnt:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-sysinfo.h b/arch/mips/include/asm/octeon/cvmx-sysinfo.h
new file mode 100644
index 000000000..c6c3ee39c
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-sysinfo.h
@@ -0,0 +1,125 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * This module provides system/board information obtained by the bootloader.
+ */
+
+#ifndef __CVMX_SYSINFO_H__
+#define __CVMX_SYSINFO_H__
+
+#include "cvmx-coremask.h"
+
+#define OCTEON_SERIAL_LEN 20
+/**
+ * Structure describing application specific information.
+ * __cvmx_app_init() populates this from the cvmx boot descriptor.
+ * This structure is private to simple executive applications, so
+ * no versioning is required.
+ *
+ * This structure must be provided with some fields set in order to
+ * use simple executive functions in other applications (Linux kernel,
+ * u-boot, etc.) The cvmx_sysinfo_minimal_initialize() function is
+ * provided to set the required values in these cases.
+ */
+struct cvmx_sysinfo {
+ /* System wide variables */
+ /* installed DRAM in system, in bytes */
+ uint64_t system_dram_size;
+
+ /* ptr to memory descriptor block */
+ uint64_t phy_mem_desc_addr;
+
+ /* Application image specific variables */
+ /* stack top address (virtual) */
+ uint64_t stack_top;
+ /* heap base address (virtual) */
+ uint64_t heap_base;
+ /* stack size in bytes */
+ uint32_t stack_size;
+ /* heap size in bytes */
+ uint32_t heap_size;
+ /* coremask defining cores running application */
+ struct cvmx_coremask core_mask;
+ /* Deprecated, use cvmx_coremask_first_core() to select init core */
+ uint32_t init_core;
+
+ /* exception base address, as set by bootloader */
+ uint64_t exception_base_addr;
+
+ /* cpu clock speed in hz */
+ uint32_t cpu_clock_hz;
+
+ /* dram data rate in hz (data rate = 2 * clock rate */
+ uint32_t dram_data_rate_hz;
+
+
+ uint16_t board_type;
+ uint8_t board_rev_major;
+ uint8_t board_rev_minor;
+ uint8_t mac_addr_base[6];
+ uint8_t mac_addr_count;
+ char board_serial_number[OCTEON_SERIAL_LEN];
+ /*
+ * Several boards support compact flash on the Octeon boot
+ * bus. The CF memory spaces may be mapped to different
+ * addresses on different boards. These values will be 0 if
+ * CF is not present. Note that these addresses are physical
+ * addresses, and it is up to the application to use the
+ * proper addressing mode (XKPHYS, KSEG0, etc.)
+ */
+ uint64_t compact_flash_common_base_addr;
+ uint64_t compact_flash_attribute_base_addr;
+ /*
+ * Base address of the LED display (as on EBT3000 board) This
+ * will be 0 if LED display not present. Note that this
+ * address is a physical address, and it is up to the
+ * application to use the proper addressing mode (XKPHYS,
+ * KSEG0, etc.)
+ */
+ uint64_t led_display_base_addr;
+ /* DFA reference clock in hz (if applicable)*/
+ uint32_t dfa_ref_clock_hz;
+ /* configuration flags from bootloader */
+ uint32_t bootloader_config_flags;
+
+ /* Uart number used for console */
+ uint8_t console_uart_num;
+};
+
+/**
+ * This function returns the system/board information as obtained
+ * by the bootloader.
+ *
+ *
+ * Returns Pointer to the boot information structure
+ *
+ */
+
+extern struct cvmx_sysinfo *cvmx_sysinfo_get(void);
+
+#endif /* __CVMX_SYSINFO_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-uctlx-defs.h b/arch/mips/include/asm/octeon/cvmx-uctlx-defs.h
new file mode 100644
index 000000000..6cf228016
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-uctlx-defs.h
@@ -0,0 +1,386 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_UCTLX_DEFS_H__
+#define __CVMX_UCTLX_DEFS_H__
+
+#define CVMX_UCTLX_BIST_STATUS(block_id) (CVMX_ADD_IO_SEG(0x000118006F0000A0ull))
+#define CVMX_UCTLX_CLK_RST_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000000ull))
+#define CVMX_UCTLX_EHCI_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000080ull))
+#define CVMX_UCTLX_EHCI_FLA(block_id) (CVMX_ADD_IO_SEG(0x000118006F0000A8ull))
+#define CVMX_UCTLX_ERTO_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000090ull))
+#define CVMX_UCTLX_IF_ENA(block_id) (CVMX_ADD_IO_SEG(0x000118006F000030ull))
+#define CVMX_UCTLX_INT_ENA(block_id) (CVMX_ADD_IO_SEG(0x000118006F000028ull))
+#define CVMX_UCTLX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x000118006F000020ull))
+#define CVMX_UCTLX_OHCI_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000088ull))
+#define CVMX_UCTLX_ORTO_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000098ull))
+#define CVMX_UCTLX_PPAF_WM(block_id) (CVMX_ADD_IO_SEG(0x000118006F000038ull))
+#define CVMX_UCTLX_UPHY_CTL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x000118006F000008ull))
+#define CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(offset, block_id) (CVMX_ADD_IO_SEG(0x000118006F000010ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8)
+
+union cvmx_uctlx_bist_status {
+ uint64_t u64;
+ struct cvmx_uctlx_bist_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t data_bis:1;
+ uint64_t desc_bis:1;
+ uint64_t erbm_bis:1;
+ uint64_t orbm_bis:1;
+ uint64_t wrbm_bis:1;
+ uint64_t ppaf_bis:1;
+#else
+ uint64_t ppaf_bis:1;
+ uint64_t wrbm_bis:1;
+ uint64_t orbm_bis:1;
+ uint64_t erbm_bis:1;
+ uint64_t desc_bis:1;
+ uint64_t data_bis:1;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_uctlx_clk_rst_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_clk_rst_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t clear_bist:1;
+ uint64_t start_bist:1;
+ uint64_t ehci_sm:1;
+ uint64_t ohci_clkcktrst:1;
+ uint64_t ohci_sm:1;
+ uint64_t ohci_susp_lgcy:1;
+ uint64_t app_start_clk:1;
+ uint64_t o_clkdiv_rst:1;
+ uint64_t h_clkdiv_byp:1;
+ uint64_t h_clkdiv_rst:1;
+ uint64_t h_clkdiv_en:1;
+ uint64_t o_clkdiv_en:1;
+ uint64_t h_div:4;
+ uint64_t p_refclk_sel:2;
+ uint64_t p_refclk_div:2;
+ uint64_t reserved_4_4:1;
+ uint64_t p_com_on:1;
+ uint64_t p_por:1;
+ uint64_t p_prst:1;
+ uint64_t hrst:1;
+#else
+ uint64_t hrst:1;
+ uint64_t p_prst:1;
+ uint64_t p_por:1;
+ uint64_t p_com_on:1;
+ uint64_t reserved_4_4:1;
+ uint64_t p_refclk_div:2;
+ uint64_t p_refclk_sel:2;
+ uint64_t h_div:4;
+ uint64_t o_clkdiv_en:1;
+ uint64_t h_clkdiv_en:1;
+ uint64_t h_clkdiv_rst:1;
+ uint64_t h_clkdiv_byp:1;
+ uint64_t o_clkdiv_rst:1;
+ uint64_t app_start_clk:1;
+ uint64_t ohci_susp_lgcy:1;
+ uint64_t ohci_sm:1;
+ uint64_t ohci_clkcktrst:1;
+ uint64_t ehci_sm:1;
+ uint64_t start_bist:1;
+ uint64_t clear_bist:1;
+ uint64_t reserved_25_63:39;
+#endif
+ } s;
+};
+
+union cvmx_uctlx_ehci_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_ehci_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t desc_rbm:1;
+ uint64_t reg_nb:1;
+ uint64_t l2c_dc:1;
+ uint64_t l2c_bc:1;
+ uint64_t l2c_0pag:1;
+ uint64_t l2c_stt:1;
+ uint64_t l2c_buff_emod:2;
+ uint64_t l2c_desc_emod:2;
+ uint64_t inv_reg_a2:1;
+ uint64_t ehci_64b_addr_en:1;
+ uint64_t l2c_addr_msb:8;
+#else
+ uint64_t l2c_addr_msb:8;
+ uint64_t ehci_64b_addr_en:1;
+ uint64_t inv_reg_a2:1;
+ uint64_t l2c_desc_emod:2;
+ uint64_t l2c_buff_emod:2;
+ uint64_t l2c_stt:1;
+ uint64_t l2c_0pag:1;
+ uint64_t l2c_bc:1;
+ uint64_t l2c_dc:1;
+ uint64_t reg_nb:1;
+ uint64_t desc_rbm:1;
+ uint64_t reserved_20_63:44;
+#endif
+ } s;
+};
+
+union cvmx_uctlx_ehci_fla {
+ uint64_t u64;
+ struct cvmx_uctlx_ehci_fla_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_6_63:58;
+ uint64_t fla:6;
+#else
+ uint64_t fla:6;
+ uint64_t reserved_6_63:58;
+#endif
+ } s;
+};
+
+union cvmx_uctlx_erto_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_erto_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t to_val:27;
+ uint64_t reserved_0_4:5;
+#else
+ uint64_t reserved_0_4:5;
+ uint64_t to_val:27;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_uctlx_if_ena {
+ uint64_t u64;
+ struct cvmx_uctlx_if_ena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t en:1;
+#else
+ uint64_t en:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+};
+
+union cvmx_uctlx_int_ena {
+ uint64_t u64;
+ struct cvmx_uctlx_int_ena_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t ec_ovf_e:1;
+ uint64_t oc_ovf_e:1;
+ uint64_t wb_pop_e:1;
+ uint64_t wb_psh_f:1;
+ uint64_t cf_psh_f:1;
+ uint64_t or_psh_f:1;
+ uint64_t er_psh_f:1;
+ uint64_t pp_psh_f:1;
+#else
+ uint64_t pp_psh_f:1;
+ uint64_t er_psh_f:1;
+ uint64_t or_psh_f:1;
+ uint64_t cf_psh_f:1;
+ uint64_t wb_psh_f:1;
+ uint64_t wb_pop_e:1;
+ uint64_t oc_ovf_e:1;
+ uint64_t ec_ovf_e:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_uctlx_int_reg {
+ uint64_t u64;
+ struct cvmx_uctlx_int_reg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t ec_ovf_e:1;
+ uint64_t oc_ovf_e:1;
+ uint64_t wb_pop_e:1;
+ uint64_t wb_psh_f:1;
+ uint64_t cf_psh_f:1;
+ uint64_t or_psh_f:1;
+ uint64_t er_psh_f:1;
+ uint64_t pp_psh_f:1;
+#else
+ uint64_t pp_psh_f:1;
+ uint64_t er_psh_f:1;
+ uint64_t or_psh_f:1;
+ uint64_t cf_psh_f:1;
+ uint64_t wb_psh_f:1;
+ uint64_t wb_pop_e:1;
+ uint64_t oc_ovf_e:1;
+ uint64_t ec_ovf_e:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+};
+
+union cvmx_uctlx_ohci_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_ohci_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63:45;
+ uint64_t reg_nb:1;
+ uint64_t l2c_dc:1;
+ uint64_t l2c_bc:1;
+ uint64_t l2c_0pag:1;
+ uint64_t l2c_stt:1;
+ uint64_t l2c_buff_emod:2;
+ uint64_t l2c_desc_emod:2;
+ uint64_t inv_reg_a2:1;
+ uint64_t reserved_8_8:1;
+ uint64_t l2c_addr_msb:8;
+#else
+ uint64_t l2c_addr_msb:8;
+ uint64_t reserved_8_8:1;
+ uint64_t inv_reg_a2:1;
+ uint64_t l2c_desc_emod:2;
+ uint64_t l2c_buff_emod:2;
+ uint64_t l2c_stt:1;
+ uint64_t l2c_0pag:1;
+ uint64_t l2c_bc:1;
+ uint64_t l2c_dc:1;
+ uint64_t reg_nb:1;
+ uint64_t reserved_19_63:45;
+#endif
+ } s;
+};
+
+union cvmx_uctlx_orto_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_orto_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t to_val:24;
+ uint64_t reserved_0_7:8;
+#else
+ uint64_t reserved_0_7:8;
+ uint64_t to_val:24;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+};
+
+union cvmx_uctlx_ppaf_wm {
+ uint64_t u64;
+ struct cvmx_uctlx_ppaf_wm_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t wm:5;
+#else
+ uint64_t wm:5;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+};
+
+union cvmx_uctlx_uphy_ctl_status {
+ uint64_t u64;
+ struct cvmx_uctlx_uphy_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t bist_done:1;
+ uint64_t bist_err:1;
+ uint64_t hsbist:1;
+ uint64_t fsbist:1;
+ uint64_t lsbist:1;
+ uint64_t siddq:1;
+ uint64_t vtest_en:1;
+ uint64_t uphy_bist:1;
+ uint64_t bist_en:1;
+ uint64_t ate_reset:1;
+#else
+ uint64_t ate_reset:1;
+ uint64_t bist_en:1;
+ uint64_t uphy_bist:1;
+ uint64_t vtest_en:1;
+ uint64_t siddq:1;
+ uint64_t lsbist:1;
+ uint64_t fsbist:1;
+ uint64_t hsbist:1;
+ uint64_t bist_err:1;
+ uint64_t bist_done:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+};
+
+union cvmx_uctlx_uphy_portx_ctl_status {
+ uint64_t u64;
+ struct cvmx_uctlx_uphy_portx_ctl_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_43_63:21;
+ uint64_t tdata_out:4;
+ uint64_t txbiststuffenh:1;
+ uint64_t txbiststuffen:1;
+ uint64_t dmpulldown:1;
+ uint64_t dppulldown:1;
+ uint64_t vbusvldext:1;
+ uint64_t portreset:1;
+ uint64_t txhsvxtune:2;
+ uint64_t txvreftune:4;
+ uint64_t txrisetune:1;
+ uint64_t txpreemphasistune:1;
+ uint64_t txfslstune:4;
+ uint64_t sqrxtune:3;
+ uint64_t compdistune:3;
+ uint64_t loop_en:1;
+ uint64_t tclk:1;
+ uint64_t tdata_sel:1;
+ uint64_t taddr_in:4;
+ uint64_t tdata_in:8;
+#else
+ uint64_t tdata_in:8;
+ uint64_t taddr_in:4;
+ uint64_t tdata_sel:1;
+ uint64_t tclk:1;
+ uint64_t loop_en:1;
+ uint64_t compdistune:3;
+ uint64_t sqrxtune:3;
+ uint64_t txfslstune:4;
+ uint64_t txpreemphasistune:1;
+ uint64_t txrisetune:1;
+ uint64_t txvreftune:4;
+ uint64_t txhsvxtune:2;
+ uint64_t portreset:1;
+ uint64_t vbusvldext:1;
+ uint64_t dppulldown:1;
+ uint64_t dmpulldown:1;
+ uint64_t txbiststuffen:1;
+ uint64_t txbiststuffenh:1;
+ uint64_t tdata_out:4;
+ uint64_t reserved_43_63:21;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-wqe.h b/arch/mips/include/asm/octeon/cvmx-wqe.h
new file mode 100644
index 000000000..9cec2299b
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-wqe.h
@@ -0,0 +1,658 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ *
+ * This header file defines the work queue entry (wqe) data structure.
+ * Since this is a commonly used structure that depends on structures
+ * from several hardware blocks, those definitions have been placed
+ * in this file to create a single point of definition of the wqe
+ * format.
+ * Data structures are still named according to the block that they
+ * relate to.
+ *
+ */
+
+#ifndef __CVMX_WQE_H__
+#define __CVMX_WQE_H__
+
+#include <asm/octeon/cvmx-packet.h>
+
+
+#define OCT_TAG_TYPE_STRING(x) \
+ (((x) == CVMX_POW_TAG_TYPE_ORDERED) ? "ORDERED" : \
+ (((x) == CVMX_POW_TAG_TYPE_ATOMIC) ? "ATOMIC" : \
+ (((x) == CVMX_POW_TAG_TYPE_NULL) ? "NULL" : \
+ "NULL_NULL")))
+
+/**
+ * HW decode / err_code in work queue entry
+ */
+typedef union {
+ uint64_t u64;
+
+ /* Use this struct if the hardware determines that the packet is IP */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* HW sets this to the number of buffers used by this packet */
+ uint64_t bufs:8;
+ /* HW sets to the number of L2 bytes prior to the IP */
+ uint64_t ip_offset:8;
+ /* set to 1 if we found DSA/VLAN in the L2 */
+ uint64_t vlan_valid:1;
+ /* Set to 1 if the DSA/VLAN tag is stacked */
+ uint64_t vlan_stacked:1;
+ uint64_t unassigned:1;
+ /* HW sets to the DSA/VLAN CFI flag (valid when vlan_valid) */
+ uint64_t vlan_cfi:1;
+ /* HW sets to the DSA/VLAN_ID field (valid when vlan_valid) */
+ uint64_t vlan_id:12;
+ /* Ring Identifier (if PCIe). Requires PIP_GBL_CTL[RING_EN]=1 */
+ uint64_t pr:4;
+ uint64_t unassigned2:8;
+ /* the packet needs to be decompressed */
+ uint64_t dec_ipcomp:1;
+ /* the packet is either TCP or UDP */
+ uint64_t tcp_or_udp:1;
+ /* the packet needs to be decrypted (ESP or AH) */
+ uint64_t dec_ipsec:1;
+ /* the packet is IPv6 */
+ uint64_t is_v6:1;
+
+ /*
+ * (rcv_error, not_IP, IP_exc, is_frag, L4_error,
+ * software, etc.).
+ */
+
+ /*
+ * reserved for software use, hardware will clear on
+ * packet creation.
+ */
+ uint64_t software:1;
+ /* exceptional conditions below */
+ /* the receive interface hardware detected an L4 error
+ * (only applies if !is_frag) (only applies if
+ * !rcv_error && !not_IP && !IP_exc && !is_frag)
+ * failure indicated in err_code below, decode:
+ *
+ * - 1 = Malformed L4
+ * - 2 = L4 Checksum Error: the L4 checksum value is
+ * - 3 = UDP Length Error: The UDP length field would
+ * make the UDP data longer than what remains in
+ * the IP packet (as defined by the IP header
+ * length field).
+ * - 4 = Bad L4 Port: either the source or destination
+ * TCP/UDP port is 0.
+ * - 8 = TCP FIN Only: the packet is TCP and only the
+ * FIN flag set.
+ * - 9 = TCP No Flags: the packet is TCP and no flags
+ * are set.
+ * - 10 = TCP FIN RST: the packet is TCP and both FIN
+ * and RST are set.
+ * - 11 = TCP SYN URG: the packet is TCP and both SYN
+ * and URG are set.
+ * - 12 = TCP SYN RST: the packet is TCP and both SYN
+ * and RST are set.
+ * - 13 = TCP SYN FIN: the packet is TCP and both SYN
+ * and FIN are set.
+ */
+ uint64_t L4_error:1;
+ /* set if the packet is a fragment */
+ uint64_t is_frag:1;
+ /* the receive interface hardware detected an IP error
+ * / exception (only applies if !rcv_error && !not_IP)
+ * failure indicated in err_code below, decode:
+ *
+ * - 1 = Not IP: the IP version field is neither 4 nor
+ * 6.
+ * - 2 = IPv4 Header Checksum Error: the IPv4 header
+ * has a checksum violation.
+ * - 3 = IP Malformed Header: the packet is not long
+ * enough to contain the IP header.
+ * - 4 = IP Malformed: the packet is not long enough
+ * to contain the bytes indicated by the IP
+ * header. Pad is allowed.
+ * - 5 = IP TTL Hop: the IPv4 TTL field or the IPv6
+ * Hop Count field are zero.
+ * - 6 = IP Options
+ */
+ uint64_t IP_exc:1;
+ /*
+ * Set if the hardware determined that the packet is a
+ * broadcast.
+ */
+ uint64_t is_bcast:1;
+ /*
+ * St if the hardware determined that the packet is a
+ * multi-cast.
+ */
+ uint64_t is_mcast:1;
+ /*
+ * Set if the packet may not be IP (must be zero in
+ * this case).
+ */
+ uint64_t not_IP:1;
+ /*
+ * The receive interface hardware detected a receive
+ * error (must be zero in this case).
+ */
+ uint64_t rcv_error:1;
+ /* lower err_code = first-level descriptor of the
+ * work */
+ /* zero for packet submitted by hardware that isn't on
+ * the slow path */
+ /* type is cvmx_pip_err_t */
+ uint64_t err_code:8;
+#else
+ uint64_t err_code:8;
+ uint64_t rcv_error:1;
+ uint64_t not_IP:1;
+ uint64_t is_mcast:1;
+ uint64_t is_bcast:1;
+ uint64_t IP_exc:1;
+ uint64_t is_frag:1;
+ uint64_t L4_error:1;
+ uint64_t software:1;
+ uint64_t is_v6:1;
+ uint64_t dec_ipsec:1;
+ uint64_t tcp_or_udp:1;
+ uint64_t dec_ipcomp:1;
+ uint64_t unassigned2:4;
+ uint64_t unassigned2a:4;
+ uint64_t pr:4;
+ uint64_t vlan_id:12;
+ uint64_t vlan_cfi:1;
+ uint64_t unassigned:1;
+ uint64_t vlan_stacked:1;
+ uint64_t vlan_valid:1;
+ uint64_t ip_offset:8;
+ uint64_t bufs:8;
+#endif
+ } s;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bufs:8;
+ uint64_t ip_offset:8;
+ uint64_t vlan_valid:1;
+ uint64_t vlan_stacked:1;
+ uint64_t unassigned:1;
+ uint64_t vlan_cfi:1;
+ uint64_t vlan_id:12;
+ uint64_t port:12; /* MAC/PIP port number. */
+ uint64_t dec_ipcomp:1;
+ uint64_t tcp_or_udp:1;
+ uint64_t dec_ipsec:1;
+ uint64_t is_v6:1;
+ uint64_t software:1;
+ uint64_t L4_error:1;
+ uint64_t is_frag:1;
+ uint64_t IP_exc:1;
+ uint64_t is_bcast:1;
+ uint64_t is_mcast:1;
+ uint64_t not_IP:1;
+ uint64_t rcv_error:1;
+ uint64_t err_code:8;
+#else
+ uint64_t err_code:8;
+ uint64_t rcv_error:1;
+ uint64_t not_IP:1;
+ uint64_t is_mcast:1;
+ uint64_t is_bcast:1;
+ uint64_t IP_exc:1;
+ uint64_t is_frag:1;
+ uint64_t L4_error:1;
+ uint64_t software:1;
+ uint64_t is_v6:1;
+ uint64_t dec_ipsec:1;
+ uint64_t tcp_or_udp:1;
+ uint64_t dec_ipcomp:1;
+ uint64_t port:12;
+ uint64_t vlan_id:12;
+ uint64_t vlan_cfi:1;
+ uint64_t unassigned:1;
+ uint64_t vlan_stacked:1;
+ uint64_t vlan_valid:1;
+ uint64_t ip_offset:8;
+ uint64_t bufs:8;
+#endif
+ } s_cn68xx;
+
+ /* use this to get at the 16 vlan bits */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t unused1:16;
+ uint64_t vlan:16;
+ uint64_t unused2:32;
+#else
+ uint64_t unused2:32;
+ uint64_t vlan:16;
+ uint64_t unused1:16;
+
+#endif
+ } svlan;
+
+ /*
+ * use this struct if the hardware could not determine that
+ * the packet is ip.
+ */
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /*
+ * HW sets this to the number of buffers used by this
+ * packet.
+ */
+ uint64_t bufs:8;
+ uint64_t unused:8;
+ /* set to 1 if we found DSA/VLAN in the L2 */
+ uint64_t vlan_valid:1;
+ /* Set to 1 if the DSA/VLAN tag is stacked */
+ uint64_t vlan_stacked:1;
+ uint64_t unassigned:1;
+ /*
+ * HW sets to the DSA/VLAN CFI flag (valid when
+ * vlan_valid)
+ */
+ uint64_t vlan_cfi:1;
+ /*
+ * HW sets to the DSA/VLAN_ID field (valid when
+ * vlan_valid).
+ */
+ uint64_t vlan_id:12;
+ /*
+ * Ring Identifier (if PCIe). Requires
+ * PIP_GBL_CTL[RING_EN]=1
+ */
+ uint64_t pr:4;
+ uint64_t unassigned2:12;
+ /*
+ * reserved for software use, hardware will clear on
+ * packet creation.
+ */
+ uint64_t software:1;
+ uint64_t unassigned3:1;
+ /*
+ * set if the hardware determined that the packet is
+ * rarp.
+ */
+ uint64_t is_rarp:1;
+ /*
+ * set if the hardware determined that the packet is
+ * arp
+ */
+ uint64_t is_arp:1;
+ /*
+ * set if the hardware determined that the packet is a
+ * broadcast.
+ */
+ uint64_t is_bcast:1;
+ /*
+ * set if the hardware determined that the packet is a
+ * multi-cast
+ */
+ uint64_t is_mcast:1;
+ /*
+ * set if the packet may not be IP (must be one in
+ * this case)
+ */
+ uint64_t not_IP:1;
+ /* The receive interface hardware detected a receive
+ * error. Failure indicated in err_code below,
+ * decode:
+ *
+ * - 1 = partial error: a packet was partially
+ * received, but internal buffering / bandwidth
+ * was not adequate to receive the entire
+ * packet.
+ * - 2 = jabber error: the RGMII packet was too large
+ * and is truncated.
+ * - 3 = overrun error: the RGMII packet is longer
+ * than allowed and had an FCS error.
+ * - 4 = oversize error: the RGMII packet is longer
+ * than allowed.
+ * - 5 = alignment error: the RGMII packet is not an
+ * integer number of bytes
+ * and had an FCS error (100M and 10M only).
+ * - 6 = fragment error: the RGMII packet is shorter
+ * than allowed and had an FCS error.
+ * - 7 = GMX FCS error: the RGMII packet had an FCS
+ * error.
+ * - 8 = undersize error: the RGMII packet is shorter
+ * than allowed.
+ * - 9 = extend error: the RGMII packet had an extend
+ * error.
+ * - 10 = length mismatch error: the RGMII packet had
+ * a length that did not match the length field
+ * in the L2 HDR.
+ * - 11 = RGMII RX error/SPI4 DIP4 Error: the RGMII
+ * packet had one or more data reception errors
+ * (RXERR) or the SPI4 packet had one or more
+ * DIP4 errors.
+ * - 12 = RGMII skip error/SPI4 Abort Error: the RGMII
+ * packet was not large enough to cover the
+ * skipped bytes or the SPI4 packet was
+ * terminated with an About EOPS.
+ * - 13 = RGMII nibble error/SPI4 Port NXA Error: the
+ * RGMII packet had a studder error (data not
+ * repeated - 10/100M only) or the SPI4 packet
+ * was sent to an NXA.
+ * - 16 = FCS error: a SPI4.2 packet had an FCS error.
+ * - 17 = Skip error: a packet was not large enough to
+ * cover the skipped bytes.
+ * - 18 = L2 header malformed: the packet is not long
+ * enough to contain the L2.
+ */
+
+ uint64_t rcv_error:1;
+ /*
+ * lower err_code = first-level descriptor of the
+ * work
+ */
+ /*
+ * zero for packet submitted by hardware that isn't on
+ * the slow path
+ */
+ /* type is cvmx_pip_err_t (union, so can't use directly */
+ uint64_t err_code:8;
+#else
+ uint64_t err_code:8;
+ uint64_t rcv_error:1;
+ uint64_t not_IP:1;
+ uint64_t is_mcast:1;
+ uint64_t is_bcast:1;
+ uint64_t is_arp:1;
+ uint64_t is_rarp:1;
+ uint64_t unassigned3:1;
+ uint64_t software:1;
+ uint64_t unassigned2:4;
+ uint64_t unassigned2a:8;
+ uint64_t pr:4;
+ uint64_t vlan_id:12;
+ uint64_t vlan_cfi:1;
+ uint64_t unassigned:1;
+ uint64_t vlan_stacked:1;
+ uint64_t vlan_valid:1;
+ uint64_t unused:8;
+ uint64_t bufs:8;
+#endif
+ } snoip;
+
+} cvmx_pip_wqe_word2;
+
+union cvmx_pip_wqe_word0 {
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /**
+ * raw chksum result generated by the HW
+ */
+ uint16_t hw_chksum;
+ /**
+ * Field unused by hardware - available for software
+ */
+ uint8_t unused;
+ /**
+ * Next pointer used by hardware for list maintenance.
+ * May be written/read by HW before the work queue
+ * entry is scheduled to a PP (Only 36 bits used in
+ * Octeon 1)
+ */
+ uint64_t next_ptr:40;
+#else
+ uint64_t next_ptr:40;
+ uint8_t unused;
+ uint16_t hw_chksum;
+#endif
+ } cn38xx;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t l4ptr:8; /* 56..63 */
+ uint64_t unused0:8; /* 48..55 */
+ uint64_t l3ptr:8; /* 40..47 */
+ uint64_t l2ptr:8; /* 32..39 */
+ uint64_t unused1:18; /* 14..31 */
+ uint64_t bpid:6; /* 8..13 */
+ uint64_t unused2:2; /* 6..7 */
+ uint64_t pknd:6; /* 0..5 */
+#else
+ uint64_t pknd:6; /* 0..5 */
+ uint64_t unused2:2; /* 6..7 */
+ uint64_t bpid:6; /* 8..13 */
+ uint64_t unused1:18; /* 14..31 */
+ uint64_t l2ptr:8; /* 32..39 */
+ uint64_t l3ptr:8; /* 40..47 */
+ uint64_t unused0:8; /* 48..55 */
+ uint64_t l4ptr:8; /* 56..63 */
+#endif
+ } cn68xx;
+};
+
+union cvmx_wqe_word0 {
+ uint64_t u64;
+ union cvmx_pip_wqe_word0 pip;
+};
+
+union cvmx_wqe_word1 {
+ uint64_t u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t len:16;
+ uint64_t varies:14;
+ /**
+ * the type of the tag (ORDERED, ATOMIC, NULL)
+ */
+ uint64_t tag_type:2;
+ uint64_t tag:32;
+#else
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t varies:14;
+ uint64_t len:16;
+#endif
+ };
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t len:16;
+ uint64_t zero_0:1;
+ /**
+ * HW sets this to what it thought the priority of
+ * the input packet was
+ */
+ uint64_t qos:3;
+
+ uint64_t zero_1:1;
+ /**
+ * the group that the work queue entry will be scheduled to
+ */
+ uint64_t grp:6;
+ uint64_t zero_2:3;
+ uint64_t tag_type:2;
+ uint64_t tag:32;
+#else
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t zero_2:3;
+ uint64_t grp:6;
+ uint64_t zero_1:1;
+ uint64_t qos:3;
+ uint64_t zero_0:1;
+ uint64_t len:16;
+#endif
+ } cn68xx;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /**
+ * HW sets to the total number of bytes in the packet
+ */
+ uint64_t len:16;
+ /**
+ * HW sets this to input physical port
+ */
+ uint64_t ipprt:6;
+
+ /**
+ * HW sets this to what it thought the priority of
+ * the input packet was
+ */
+ uint64_t qos:3;
+
+ /**
+ * the group that the work queue entry will be scheduled to
+ */
+ uint64_t grp:4;
+ /**
+ * the type of the tag (ORDERED, ATOMIC, NULL)
+ */
+ uint64_t tag_type:3;
+ /**
+ * the synchronization/ordering tag
+ */
+ uint64_t tag:32;
+#else
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t zero_2:1;
+ uint64_t grp:4;
+ uint64_t qos:3;
+ uint64_t ipprt:6;
+ uint64_t len:16;
+#endif
+ } cn38xx;
+};
+
+/**
+ * Work queue entry format
+ *
+ * must be 8-byte aligned
+ */
+struct cvmx_wqe {
+
+ /*****************************************************************
+ * WORD 0
+ * HW WRITE: the following 64 bits are filled by HW when a packet arrives
+ */
+ union cvmx_wqe_word0 word0;
+
+ /*****************************************************************
+ * WORD 1
+ * HW WRITE: the following 64 bits are filled by HW when a packet arrives
+ */
+ union cvmx_wqe_word1 word1;
+
+ /**
+ * WORD 2 HW WRITE: the following 64-bits are filled in by
+ * hardware when a packet arrives This indicates a variety of
+ * status and error conditions.
+ */
+ cvmx_pip_wqe_word2 word2;
+
+ /**
+ * Pointer to the first segment of the packet.
+ */
+ union cvmx_buf_ptr packet_ptr;
+
+ /**
+ * HW WRITE: octeon will fill in a programmable amount from the
+ * packet, up to (at most, but perhaps less) the amount
+ * needed to fill the work queue entry to 128 bytes
+ *
+ * If the packet is recognized to be IP, the hardware starts
+ * (except that the IPv4 header is padded for appropriate
+ * alignment) writing here where the IP header starts. If the
+ * packet is not recognized to be IP, the hardware starts
+ * writing the beginning of the packet here.
+ */
+ uint8_t packet_data[96];
+
+ /**
+ * If desired, SW can make the work Q entry any length. For the
+ * purposes of discussion here, Assume 128B always, as this is all that
+ * the hardware deals with.
+ *
+ */
+
+} CVMX_CACHE_LINE_ALIGNED;
+
+static inline int cvmx_wqe_get_port(struct cvmx_wqe *work)
+{
+ int port;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ port = work->word2.s_cn68xx.port;
+ else
+ port = work->word1.cn38xx.ipprt;
+
+ return port;
+}
+
+static inline void cvmx_wqe_set_port(struct cvmx_wqe *work, int port)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ work->word2.s_cn68xx.port = port;
+ else
+ work->word1.cn38xx.ipprt = port;
+}
+
+static inline int cvmx_wqe_get_grp(struct cvmx_wqe *work)
+{
+ int grp;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ grp = work->word1.cn68xx.grp;
+ else
+ grp = work->word1.cn38xx.grp;
+
+ return grp;
+}
+
+static inline void cvmx_wqe_set_grp(struct cvmx_wqe *work, int grp)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ work->word1.cn68xx.grp = grp;
+ else
+ work->word1.cn38xx.grp = grp;
+}
+
+static inline int cvmx_wqe_get_qos(struct cvmx_wqe *work)
+{
+ int qos;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ qos = work->word1.cn68xx.qos;
+ else
+ qos = work->word1.cn38xx.qos;
+
+ return qos;
+}
+
+static inline void cvmx_wqe_set_qos(struct cvmx_wqe *work, int qos)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+ work->word1.cn68xx.qos = qos;
+ else
+ work->word1.cn38xx.qos = qos;
+}
+
+#endif /* __CVMX_WQE_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
new file mode 100644
index 000000000..25854abc9
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -0,0 +1,495 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_H__
+#define __CVMX_H__
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+
+enum cvmx_mips_space {
+ CVMX_MIPS_SPACE_XKSEG = 3LL,
+ CVMX_MIPS_SPACE_XKPHYS = 2LL,
+ CVMX_MIPS_SPACE_XSSEG = 1LL,
+ CVMX_MIPS_SPACE_XUSEG = 0LL
+};
+
+/* These macros for use when using 32 bit pointers. */
+#define CVMX_MIPS32_SPACE_KSEG0 1l
+#define CVMX_ADD_SEG32(segment, add) \
+ (((int32_t)segment << 31) | (int32_t)(add))
+
+#define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS
+
+/* These macros simplify the process of creating common IO addresses */
+#define CVMX_ADD_SEG(segment, add) \
+ ((((uint64_t)segment) << 62) | (add))
+#ifndef CVMX_ADD_IO_SEG
+#define CVMX_ADD_IO_SEG(add) CVMX_ADD_SEG(CVMX_IO_SEG, (add))
+#endif
+
+#include <asm/octeon/cvmx-asm.h>
+#include <asm/octeon/cvmx-packet.h>
+#include <asm/octeon/cvmx-sysinfo.h>
+
+#include <asm/octeon/cvmx-ciu-defs.h>
+#include <asm/octeon/cvmx-ciu3-defs.h>
+#include <asm/octeon/cvmx-gpio-defs.h>
+#include <asm/octeon/cvmx-iob-defs.h>
+#include <asm/octeon/cvmx-ipd-defs.h>
+#include <asm/octeon/cvmx-l2c-defs.h>
+#include <asm/octeon/cvmx-l2d-defs.h>
+#include <asm/octeon/cvmx-l2t-defs.h>
+#include <asm/octeon/cvmx-led-defs.h>
+#include <asm/octeon/cvmx-mio-defs.h>
+#include <asm/octeon/cvmx-pow-defs.h>
+
+#include <asm/octeon/cvmx-bootinfo.h>
+#include <asm/octeon/cvmx-bootmem.h>
+#include <asm/octeon/cvmx-l2c.h>
+
+#ifndef CVMX_ENABLE_DEBUG_PRINTS
+#define CVMX_ENABLE_DEBUG_PRINTS 1
+#endif
+
+#if CVMX_ENABLE_DEBUG_PRINTS
+#define cvmx_dprintf printk
+#else
+#define cvmx_dprintf(...) {}
+#endif
+
+#define CVMX_MAX_CORES (16)
+#define CVMX_CACHE_LINE_SIZE (128) /* In bytes */
+#define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1) /* In bytes */
+#define CVMX_CACHE_LINE_ALIGNED __attribute__ ((aligned(CVMX_CACHE_LINE_SIZE)))
+#define CAST64(v) ((long long)(long)(v))
+#define CASTPTR(type, v) ((type *)(long)(v))
+
+/*
+ * Returns processor ID, different Linux and simple exec versions
+ * provided in the cvmx-app-init*.c files.
+ */
+static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
+static inline uint32_t cvmx_get_proc_id(void)
+{
+ uint32_t id;
+ asm("mfc0 %0, $15,0" : "=r"(id));
+ return id;
+}
+
+/* turn the variable name into a string */
+#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
+#define CVMX_TMP_STR2(x) #x
+
+/**
+ * Builds a bit mask given the required size in bits.
+ *
+ * @bits: Number of bits in the mask
+ * Returns The mask
+ */ static inline uint64_t cvmx_build_mask(uint64_t bits)
+{
+ return ~((~0x0ull) << bits);
+}
+
+/**
+ * Builds a memory address for I/O based on the Major and Sub DID.
+ *
+ * @major_did: 5 bit major did
+ * @sub_did: 3 bit sub did
+ * Returns I/O base address
+ */
+static inline uint64_t cvmx_build_io_address(uint64_t major_did,
+ uint64_t sub_did)
+{
+ return (0x1ull << 48) | (major_did << 43) | (sub_did << 40);
+}
+
+/**
+ * Perform mask and shift to place the supplied value into
+ * the supplied bit rage.
+ *
+ * Example: cvmx_build_bits(39,24,value)
+ * <pre>
+ * 6 5 4 3 3 2 1
+ * 3 5 7 9 1 3 5 7 0
+ * +-------+-------+-------+-------+-------+-------+-------+------+
+ * 000000000000000000000000___________value000000000000000000000000
+ * </pre>
+ *
+ * @high_bit: Highest bit value can occupy (inclusive) 0-63
+ * @low_bit: Lowest bit value can occupy inclusive 0-high_bit
+ * @value: Value to use
+ * Returns Value masked and shifted
+ */
+static inline uint64_t cvmx_build_bits(uint64_t high_bit,
+ uint64_t low_bit, uint64_t value)
+{
+ return (value & cvmx_build_mask(high_bit - low_bit + 1)) << low_bit;
+}
+
+/**
+ * Convert a memory pointer (void*) into a hardware compatible
+ * memory address (uint64_t). Octeon hardware widgets don't
+ * understand logical addresses.
+ *
+ * @ptr: C style memory pointer
+ * Returns Hardware physical address
+ */
+static inline uint64_t cvmx_ptr_to_phys(void *ptr)
+{
+ if (sizeof(void *) == 8) {
+ /*
+ * We're running in 64 bit mode. Normally this means
+ * that we can use 40 bits of address space (the
+ * hardware limit). Unfortunately there is one case
+ * were we need to limit this to 30 bits, sign
+ * extended 32 bit. Although these are 64 bits wide,
+ * only 30 bits can be used.
+ */
+ if ((CAST64(ptr) >> 62) == 3)
+ return CAST64(ptr) & cvmx_build_mask(30);
+ else
+ return CAST64(ptr) & cvmx_build_mask(40);
+ } else {
+ return (long)(ptr) & 0x1fffffff;
+ }
+}
+
+/**
+ * Convert a hardware physical address (uint64_t) into a
+ * memory pointer (void *).
+ *
+ * @physical_address:
+ * Hardware physical address to memory
+ * Returns Pointer to memory
+ */
+static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
+{
+ if (sizeof(void *) == 8) {
+ /* Just set the top bit, avoiding any TLB ugliness */
+ return CASTPTR(void,
+ CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ physical_address));
+ } else {
+ return CASTPTR(void,
+ CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0,
+ physical_address));
+ }
+}
+
+/* The following #if controls the definition of the macro
+ CVMX_BUILD_WRITE64. This macro is used to build a store operation to
+ a full 64bit address. With a 64bit ABI, this can be done with a simple
+ pointer access. 32bit ABIs require more complicated assembly */
+
+/* We have a full 64bit ABI. Writing to a 64bit address can be done with
+ a simple volatile pointer */
+#define CVMX_BUILD_WRITE64(TYPE, ST) \
+static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
+{ \
+ *CASTPTR(volatile TYPE##_t, addr) = val; \
+}
+
+
+/* The following #if controls the definition of the macro
+ CVMX_BUILD_READ64. This macro is used to build a load operation from
+ a full 64bit address. With a 64bit ABI, this can be done with a simple
+ pointer access. 32bit ABIs require more complicated assembly */
+
+/* We have a full 64bit ABI. Writing to a 64bit address can be done with
+ a simple volatile pointer */
+#define CVMX_BUILD_READ64(TYPE, LT) \
+static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
+{ \
+ return *CASTPTR(volatile TYPE##_t, addr); \
+}
+
+
+/* The following defines 8 functions for writing to a 64bit address. Each
+ takes two arguments, the address and the value to write.
+ cvmx_write64_int64 cvmx_write64_uint64
+ cvmx_write64_int32 cvmx_write64_uint32
+ cvmx_write64_int16 cvmx_write64_uint16
+ cvmx_write64_int8 cvmx_write64_uint8 */
+CVMX_BUILD_WRITE64(int64, "sd");
+CVMX_BUILD_WRITE64(int32, "sw");
+CVMX_BUILD_WRITE64(int16, "sh");
+CVMX_BUILD_WRITE64(int8, "sb");
+CVMX_BUILD_WRITE64(uint64, "sd");
+CVMX_BUILD_WRITE64(uint32, "sw");
+CVMX_BUILD_WRITE64(uint16, "sh");
+CVMX_BUILD_WRITE64(uint8, "sb");
+#define cvmx_write64 cvmx_write64_uint64
+
+/* The following defines 8 functions for reading from a 64bit address. Each
+ takes the address as the only argument
+ cvmx_read64_int64 cvmx_read64_uint64
+ cvmx_read64_int32 cvmx_read64_uint32
+ cvmx_read64_int16 cvmx_read64_uint16
+ cvmx_read64_int8 cvmx_read64_uint8 */
+CVMX_BUILD_READ64(int64, "ld");
+CVMX_BUILD_READ64(int32, "lw");
+CVMX_BUILD_READ64(int16, "lh");
+CVMX_BUILD_READ64(int8, "lb");
+CVMX_BUILD_READ64(uint64, "ld");
+CVMX_BUILD_READ64(uint32, "lw");
+CVMX_BUILD_READ64(uint16, "lhu");
+CVMX_BUILD_READ64(uint8, "lbu");
+#define cvmx_read64 cvmx_read64_uint64
+
+
+static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
+{
+ cvmx_write64(csr_addr, val);
+
+ /*
+ * Perform an immediate read after every write to an RSL
+ * register to force the write to complete. It doesn't matter
+ * what RSL read we do, so we choose CVMX_MIO_BOOT_BIST_STAT
+ * because it is fast and harmless.
+ */
+ if (((csr_addr >> 40) & 0x7ffff) == (0x118))
+ cvmx_read64(CVMX_MIO_BOOT_BIST_STAT);
+}
+
+static inline void cvmx_writeq_csr(void __iomem *csr_addr, uint64_t val)
+{
+ cvmx_write_csr((__force uint64_t)csr_addr, val);
+}
+
+static inline void cvmx_write_io(uint64_t io_addr, uint64_t val)
+{
+ cvmx_write64(io_addr, val);
+
+}
+
+static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
+{
+ uint64_t val = cvmx_read64(csr_addr);
+ return val;
+}
+
+static inline uint64_t cvmx_readq_csr(void __iomem *csr_addr)
+{
+ return cvmx_read_csr((__force uint64_t) csr_addr);
+}
+
+static inline void cvmx_send_single(uint64_t data)
+{
+ const uint64_t CVMX_IOBDMA_SENDSINGLE = 0xffffffffffffa200ull;
+ cvmx_write64(CVMX_IOBDMA_SENDSINGLE, data);
+}
+
+static inline void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr)
+{
+ union {
+ uint64_t u64;
+ struct {
+ uint64_t scraddr:8;
+ uint64_t len:8;
+ uint64_t addr:48;
+ } s;
+ } addr;
+ addr.u64 = csr_addr;
+ addr.s.scraddr = scraddr >> 3;
+ addr.s.len = 1;
+ cvmx_send_single(addr.u64);
+}
+
+/* Return true if Octeon is CN38XX pass 1 */
+static inline int cvmx_octeon_is_pass1(void)
+{
+#if OCTEON_IS_COMMON_BINARY()
+ return 0; /* Pass 1 isn't supported for common binaries */
+#else
+/* Now that we know we're built for a specific model, only check CN38XX */
+#if OCTEON_IS_MODEL(OCTEON_CN38XX)
+ return cvmx_get_proc_id() == OCTEON_CN38XX_PASS1;
+#else
+ return 0; /* Built for non CN38XX chip, we're not CN38XX pass1 */
+#endif
+#endif
+}
+
+static inline unsigned int cvmx_get_core_num(void)
+{
+ unsigned int core_num;
+ CVMX_RDHWRNV(core_num, 0);
+ return core_num;
+}
+
+/* Maximum # of bits to define core in node */
+#define CVMX_NODE_NO_SHIFT 7
+#define CVMX_NODE_MASK 0x3
+static inline unsigned int cvmx_get_node_num(void)
+{
+ unsigned int core_num = cvmx_get_core_num();
+
+ return (core_num >> CVMX_NODE_NO_SHIFT) & CVMX_NODE_MASK;
+}
+
+static inline unsigned int cvmx_get_local_core_num(void)
+{
+ return cvmx_get_core_num() & ((1 << CVMX_NODE_NO_SHIFT) - 1);
+}
+
+#define CVMX_NODE_BITS (2) /* Number of bits to define a node */
+#define CVMX_MAX_NODES (1 << CVMX_NODE_BITS)
+#define CVMX_NODE_IO_SHIFT (36)
+#define CVMX_NODE_MEM_SHIFT (40)
+#define CVMX_NODE_IO_MASK ((uint64_t)CVMX_NODE_MASK << CVMX_NODE_IO_SHIFT)
+
+static inline void cvmx_write_csr_node(uint64_t node, uint64_t csr_addr,
+ uint64_t val)
+{
+ uint64_t composite_csr_addr, node_addr;
+
+ node_addr = (node & CVMX_NODE_MASK) << CVMX_NODE_IO_SHIFT;
+ composite_csr_addr = (csr_addr & ~CVMX_NODE_IO_MASK) | node_addr;
+
+ cvmx_write64_uint64(composite_csr_addr, val);
+ if (((csr_addr >> 40) & 0x7ffff) == (0x118))
+ cvmx_read64_uint64(CVMX_MIO_BOOT_BIST_STAT | node_addr);
+}
+
+static inline uint64_t cvmx_read_csr_node(uint64_t node, uint64_t csr_addr)
+{
+ uint64_t node_addr;
+
+ node_addr = (csr_addr & ~CVMX_NODE_IO_MASK) |
+ (node & CVMX_NODE_MASK) << CVMX_NODE_IO_SHIFT;
+ return cvmx_read_csr(node_addr);
+}
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for POP instruction.
+ *
+ * @val: 32 bit value to count set bits in
+ *
+ * Returns Number of bits set
+ */
+static inline uint32_t cvmx_pop(uint32_t val)
+{
+ uint32_t pop;
+ CVMX_POP(pop, val);
+ return pop;
+}
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for DPOP instruction.
+ *
+ * @val: 64 bit value to count set bits in
+ *
+ * Returns Number of bits set
+ */
+static inline int cvmx_dpop(uint64_t val)
+{
+ int pop;
+ CVMX_DPOP(pop, val);
+ return pop;
+}
+
+/**
+ * Provide current cycle counter as a return value
+ *
+ * Returns current cycle counter
+ */
+
+static inline uint64_t cvmx_get_cycle(void)
+{
+ uint64_t cycle;
+ CVMX_RDHWR(cycle, 31);
+ return cycle;
+}
+
+/**
+ * Reads a chip global cycle counter. This counts CPU cycles since
+ * chip reset. The counter is 64 bit.
+ * This register does not exist on CN38XX pass 1 silicion
+ *
+ * Returns Global chip cycle count since chip reset.
+ */
+static inline uint64_t cvmx_get_cycle_global(void)
+{
+ if (cvmx_octeon_is_pass1())
+ return 0;
+ else
+ return cvmx_read64(CVMX_IPD_CLK_COUNT);
+}
+
+/**
+ * This macro spins on a field waiting for it to reach a value. It
+ * is common in code to need to wait for a specific field in a CSR
+ * to match a specific value. Conceptually this macro expands to:
+ *
+ * 1) read csr at "address" with a csr typedef of "type"
+ * 2) Check if ("type".s."field" "op" "value")
+ * 3) If #2 isn't true loop to #1 unless too much time has passed.
+ */
+#define CVMX_WAIT_FOR_FIELD64(address, type, field, op, value, timeout_usec)\
+ ( \
+{ \
+ int result; \
+ do { \
+ uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \
+ cvmx_sysinfo_get()->cpu_clock_hz / 1000000; \
+ type c; \
+ while (1) { \
+ c.u64 = cvmx_read_csr(address); \
+ if ((c.s.field) op(value)) { \
+ result = 0; \
+ break; \
+ } else if (cvmx_get_cycle() > done) { \
+ result = -1; \
+ break; \
+ } else \
+ __delay(100); \
+ } \
+ } while (0); \
+ result; \
+})
+
+/***************************************************************************/
+
+/* Return the number of cores available in the chip */
+static inline uint32_t cvmx_octeon_num_cores(void)
+{
+ u64 ciu_fuse_reg;
+ u64 ciu_fuse;
+
+ if (OCTEON_IS_OCTEON3() && !OCTEON_IS_MODEL(OCTEON_CN70XX))
+ ciu_fuse_reg = CVMX_CIU3_FUSE;
+ else
+ ciu_fuse_reg = CVMX_CIU_FUSE;
+ ciu_fuse = cvmx_read_csr(ciu_fuse_reg);
+ return cvmx_dpop(ciu_fuse);
+}
+
+#endif /* __CVMX_H__ */
diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h
new file mode 100644
index 000000000..a19ca3b27
--- /dev/null
+++ b/arch/mips/include/asm/octeon/octeon-feature.h
@@ -0,0 +1,213 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * File defining checks for different Octeon features.
+ */
+
+#ifndef __OCTEON_FEATURE_H__
+#define __OCTEON_FEATURE_H__
+#include <asm/octeon/cvmx-mio-defs.h>
+#include <asm/octeon/cvmx-rnm-defs.h>
+
+enum octeon_feature {
+ /* CN68XX uses port kinds for packet interface */
+ OCTEON_FEATURE_PKND,
+ /* CN68XX has different fields in word0 - word2 */
+ OCTEON_FEATURE_CN68XX_WQE,
+ /*
+ * Octeon models in the CN5XXX family and higher support
+ * atomic add instructions to memory (saa/saad).
+ */
+ OCTEON_FEATURE_SAAD,
+ /* Does this Octeon support the ZIP offload engine? */
+ OCTEON_FEATURE_ZIP,
+ OCTEON_FEATURE_DORM_CRYPTO,
+ /* Does this Octeon support PCI express? */
+ OCTEON_FEATURE_PCIE,
+ /* Does this Octeon support SRIOs */
+ OCTEON_FEATURE_SRIO,
+ /* Does this Octeon support Interlaken */
+ OCTEON_FEATURE_ILK,
+ /* Some Octeon models support internal memory for storing
+ * cryptographic keys */
+ OCTEON_FEATURE_KEY_MEMORY,
+ /* Octeon has a LED controller for banks of external LEDs */
+ OCTEON_FEATURE_LED_CONTROLLER,
+ /* Octeon has a trace buffer */
+ OCTEON_FEATURE_TRA,
+ /* Octeon has a management port */
+ OCTEON_FEATURE_MGMT_PORT,
+ /* Octeon has a raid unit */
+ OCTEON_FEATURE_RAID,
+ /* Octeon has a builtin USB */
+ OCTEON_FEATURE_USB,
+ /* Octeon IPD can run without using work queue entries */
+ OCTEON_FEATURE_NO_WPTR,
+ /* Octeon has DFA state machines */
+ OCTEON_FEATURE_DFA,
+ /* Octeon MDIO block supports clause 45 transactions for 10
+ * Gig support */
+ OCTEON_FEATURE_MDIO_CLAUSE_45,
+ /*
+ * CN52XX and CN56XX used a block named NPEI for PCIe
+ * access. Newer chips replaced this with SLI+DPI.
+ */
+ OCTEON_FEATURE_NPEI,
+ OCTEON_FEATURE_HFA,
+ OCTEON_FEATURE_DFM,
+ OCTEON_FEATURE_CIU2,
+ OCTEON_FEATURE_CIU3,
+ /* Octeon has FPA first seen on 78XX */
+ OCTEON_FEATURE_FPA3,
+ OCTEON_FEATURE_FAU,
+ OCTEON_MAX_FEATURE
+};
+
+enum octeon_feature_bits {
+ OCTEON_HAS_CRYPTO = 0x0001, /* Crypto acceleration using COP2 */
+};
+extern enum octeon_feature_bits __octeon_feature_bits;
+
+/**
+ * octeon_has_crypto() - Check if this OCTEON has crypto acceleration support.
+ *
+ * Returns: Non-zero if the feature exists. Zero if the feature does not exist.
+ */
+static inline int octeon_has_crypto(void)
+{
+ return __octeon_feature_bits & OCTEON_HAS_CRYPTO;
+}
+
+/**
+ * Determine if the current Octeon supports a specific feature. These
+ * checks have been optimized to be fairly quick, but they should still
+ * be kept out of fast path code.
+ *
+ * @feature: Feature to check for. This should always be a constant so the
+ * compiler can remove the switch statement through optimization.
+ *
+ * Returns Non zero if the feature exists. Zero if the feature does not
+ * exist.
+ */
+static inline bool octeon_has_feature(enum octeon_feature feature)
+{
+ switch (feature) {
+ case OCTEON_FEATURE_SAAD:
+ return !OCTEON_IS_MODEL(OCTEON_CN3XXX);
+
+ case OCTEON_FEATURE_DORM_CRYPTO:
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ union cvmx_mio_fus_dat2 fus_2;
+ fus_2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
+ return !fus_2.s.nocrypto && !fus_2.s.nomul && fus_2.s.dorm_crypto;
+ } else {
+ return false;
+ }
+
+ case OCTEON_FEATURE_PCIE:
+ return OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX)
+ || OCTEON_IS_MODEL(OCTEON_CN6XXX)
+ || OCTEON_IS_MODEL(OCTEON_CN7XXX);
+
+ case OCTEON_FEATURE_SRIO:
+ return OCTEON_IS_MODEL(OCTEON_CN63XX)
+ || OCTEON_IS_MODEL(OCTEON_CN66XX);
+
+ case OCTEON_FEATURE_ILK:
+ return (OCTEON_IS_MODEL(OCTEON_CN68XX));
+
+ case OCTEON_FEATURE_KEY_MEMORY:
+ return OCTEON_IS_MODEL(OCTEON_CN38XX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX)
+ || OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN6XXX);
+
+ case OCTEON_FEATURE_LED_CONTROLLER:
+ return OCTEON_IS_MODEL(OCTEON_CN38XX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX)
+ || OCTEON_IS_MODEL(OCTEON_CN56XX);
+
+ case OCTEON_FEATURE_TRA:
+ return !(OCTEON_IS_MODEL(OCTEON_CN30XX)
+ || OCTEON_IS_MODEL(OCTEON_CN50XX));
+ case OCTEON_FEATURE_MGMT_PORT:
+ return OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX)
+ || OCTEON_IS_MODEL(OCTEON_CN6XXX);
+
+ case OCTEON_FEATURE_RAID:
+ return OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX)
+ || OCTEON_IS_MODEL(OCTEON_CN6XXX);
+
+ case OCTEON_FEATURE_USB:
+ return !(OCTEON_IS_MODEL(OCTEON_CN38XX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX));
+
+ case OCTEON_FEATURE_NO_WPTR:
+ return (OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX)
+ || OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ && !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
+ && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X);
+
+ case OCTEON_FEATURE_MDIO_CLAUSE_45:
+ return !(OCTEON_IS_MODEL(OCTEON_CN3XXX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX)
+ || OCTEON_IS_MODEL(OCTEON_CN50XX));
+
+ case OCTEON_FEATURE_NPEI:
+ return OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX);
+
+ case OCTEON_FEATURE_PKND:
+ return OCTEON_IS_MODEL(OCTEON_CN68XX);
+
+ case OCTEON_FEATURE_CN68XX_WQE:
+ return OCTEON_IS_MODEL(OCTEON_CN68XX);
+
+ case OCTEON_FEATURE_CIU2:
+ return OCTEON_IS_MODEL(OCTEON_CN68XX);
+ case OCTEON_FEATURE_CIU3:
+ case OCTEON_FEATURE_FPA3:
+ return OCTEON_IS_MODEL(OCTEON_CN78XX)
+ || OCTEON_IS_MODEL(OCTEON_CNF75XX)
+ || OCTEON_IS_MODEL(OCTEON_CN73XX);
+ case OCTEON_FEATURE_FAU:
+ return !(OCTEON_IS_MODEL(OCTEON_CN78XX)
+ || OCTEON_IS_MODEL(OCTEON_CNF75XX)
+ || OCTEON_IS_MODEL(OCTEON_CN73XX));
+
+ default:
+ break;
+ }
+ return false;
+}
+
+#endif /* __OCTEON_FEATURE_H__ */
diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h
new file mode 100644
index 000000000..6c68517c2
--- /dev/null
+++ b/arch/mips/include/asm/octeon/octeon-model.h
@@ -0,0 +1,409 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2010 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+#ifndef __OCTEON_MODEL_H__
+#define __OCTEON_MODEL_H__
+
+/*
+ * The defines below should be used with the OCTEON_IS_MODEL() macro
+ * to determine what model of chip the software is running on. Models
+ * ending in 'XX' match multiple models (families), while specific
+ * models match only that model. If a pass (revision) is specified,
+ * then only that revision will be matched. Care should be taken when
+ * checking for both specific models and families that the specific
+ * models are checked for first. While these defines are similar to
+ * the processor ID, they are not intended to be used by anything
+ * other that the OCTEON_IS_MODEL framework, and the values are
+ * subject to change at anytime without notice.
+ *
+ * NOTE: only the OCTEON_IS_MODEL() macro/function and the OCTEON_CN*
+ * macros should be used outside of this file. All other macros are
+ * for internal use only, and may change without notice.
+ */
+
+#define OCTEON_FAMILY_MASK 0x00ffff00
+#define OCTEON_PRID_MASK 0x00ffffff
+
+/* Flag bits in top byte */
+/* Ignores revision in model checks */
+#define OM_IGNORE_REVISION 0x01000000
+/* Check submodels */
+#define OM_CHECK_SUBMODEL 0x02000000
+/* Match all models previous than the one specified */
+#define OM_MATCH_PREVIOUS_MODELS 0x04000000
+/* Ignores the minor revison on newer parts */
+#define OM_IGNORE_MINOR_REVISION 0x08000000
+#define OM_FLAG_MASK 0xff000000
+
+/* Match all cn5XXX Octeon models. */
+#define OM_MATCH_5XXX_FAMILY_MODELS 0x20000000
+/* Match all cn6XXX Octeon models. */
+#define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000
+/* Match all cnf7XXX Octeon models. */
+#define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000
+/* Match all cn7XXX Octeon models. */
+#define OM_MATCH_7XXX_FAMILY_MODELS 0x10000000
+#define OM_MATCH_FAMILY_MODELS (OM_MATCH_5XXX_FAMILY_MODELS | \
+ OM_MATCH_6XXX_FAMILY_MODELS | \
+ OM_MATCH_F7XXX_FAMILY_MODELS | \
+ OM_MATCH_7XXX_FAMILY_MODELS)
+/*
+ * CN7XXX models with new revision encoding
+ */
+
+#define OCTEON_CNF75XX_PASS1_0 0x000d9800
+#define OCTEON_CNF75XX (OCTEON_CNF75XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CNF75XX_PASS1_X (OCTEON_CNF75XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN73XX_PASS1_0 0x000d9700
+#define OCTEON_CN73XX_PASS1_1 0x000d9701
+#define OCTEON_CN73XX (OCTEON_CN73XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN73XX_PASS1_X (OCTEON_CN73XX_PASS1_0 | \
+ OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN70XX_PASS1_0 0x000d9600
+#define OCTEON_CN70XX_PASS1_1 0x000d9601
+#define OCTEON_CN70XX_PASS1_2 0x000d9602
+
+#define OCTEON_CN70XX_PASS2_0 0x000d9608
+
+#define OCTEON_CN70XX (OCTEON_CN70XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN70XX_PASS1_X (OCTEON_CN70XX_PASS1_0 | \
+ OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN70XX_PASS2_X (OCTEON_CN70XX_PASS2_0 | \
+ OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN71XX OCTEON_CN70XX
+
+#define OCTEON_CN78XX_PASS1_0 0x000d9500
+#define OCTEON_CN78XX_PASS1_1 0x000d9501
+#define OCTEON_CN78XX_PASS2_0 0x000d9508
+
+#define OCTEON_CN78XX (OCTEON_CN78XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN78XX_PASS1_X (OCTEON_CN78XX_PASS1_0 | \
+ OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN78XX_PASS2_X (OCTEON_CN78XX_PASS2_0 | \
+ OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN76XX (0x000d9540 | OM_CHECK_SUBMODEL)
+
+/*
+ * CNF7XXX models with new revision encoding
+ */
+#define OCTEON_CNF71XX_PASS1_0 0x000d9400
+#define OCTEON_CNF71XX_PASS1_1 0x000d9401
+
+#define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+
+/*
+ * CN6XXX models with new revision encoding
+ */
+#define OCTEON_CN68XX_PASS1_0 0x000d9100
+#define OCTEON_CN68XX_PASS1_1 0x000d9101
+#define OCTEON_CN68XX_PASS1_2 0x000d9102
+#define OCTEON_CN68XX_PASS2_0 0x000d9108
+#define OCTEON_CN68XX_PASS2_1 0x000d9109
+#define OCTEON_CN68XX_PASS2_2 0x000d910a
+
+#define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN68XX_PASS2_X (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN68XX_PASS1 OCTEON_CN68XX_PASS1_X
+#define OCTEON_CN68XX_PASS2 OCTEON_CN68XX_PASS2_X
+
+#define OCTEON_CN66XX_PASS1_0 0x000d9200
+#define OCTEON_CN66XX_PASS1_2 0x000d9202
+
+#define OCTEON_CN66XX (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN66XX_PASS1_X (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN63XX_PASS1_0 0x000d9000
+#define OCTEON_CN63XX_PASS1_1 0x000d9001
+#define OCTEON_CN63XX_PASS1_2 0x000d9002
+#define OCTEON_CN63XX_PASS2_0 0x000d9008
+#define OCTEON_CN63XX_PASS2_1 0x000d9009
+#define OCTEON_CN63XX_PASS2_2 0x000d900a
+
+#define OCTEON_CN63XX (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+
+/* CN62XX is same as CN63XX with 1 MB cache */
+#define OCTEON_CN62XX OCTEON_CN63XX
+
+#define OCTEON_CN61XX_PASS1_0 0x000d9300
+#define OCTEON_CN61XX_PASS1_1 0x000d9301
+
+#define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+
+/* CN60XX is same as CN61XX with 512 KB cache */
+#define OCTEON_CN60XX OCTEON_CN61XX
+
+/*
+ * CN5XXX models with new revision encoding
+ */
+#define OCTEON_CN58XX_PASS1_0 0x000d0300
+#define OCTEON_CN58XX_PASS1_1 0x000d0301
+#define OCTEON_CN58XX_PASS1_2 0x000d0303
+#define OCTEON_CN58XX_PASS2_0 0x000d0308
+#define OCTEON_CN58XX_PASS2_1 0x000d0309
+#define OCTEON_CN58XX_PASS2_2 0x000d030a
+#define OCTEON_CN58XX_PASS2_3 0x000d030b
+
+#define OCTEON_CN58XX (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X
+#define OCTEON_CN58XX_PASS2 OCTEON_CN58XX_PASS2_X
+
+#define OCTEON_CN56XX_PASS1_0 0x000d0400
+#define OCTEON_CN56XX_PASS1_1 0x000d0401
+#define OCTEON_CN56XX_PASS2_0 0x000d0408
+#define OCTEON_CN56XX_PASS2_1 0x000d0409
+
+#define OCTEON_CN56XX (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN56XX_PASS1_X (OCTEON_CN56XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN56XX_PASS2_X (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN56XX_PASS1 OCTEON_CN56XX_PASS1_X
+#define OCTEON_CN56XX_PASS2 OCTEON_CN56XX_PASS2_X
+
+#define OCTEON_CN57XX OCTEON_CN56XX
+#define OCTEON_CN57XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN57XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN55XX OCTEON_CN56XX
+#define OCTEON_CN55XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN55XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN54XX OCTEON_CN56XX
+#define OCTEON_CN54XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN54XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN50XX_PASS1_0 0x000d0600
+
+#define OCTEON_CN50XX (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN50XX_PASS1_X (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN50XX_PASS1 OCTEON_CN50XX_PASS1_X
+
+/*
+ * NOTE: Octeon CN5000F model is not identifiable using the
+ * OCTEON_IS_MODEL() functions, but are treated as CN50XX.
+ */
+
+#define OCTEON_CN52XX_PASS1_0 0x000d0700
+#define OCTEON_CN52XX_PASS2_0 0x000d0708
+
+#define OCTEON_CN52XX (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN52XX_PASS1_X (OCTEON_CN52XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN52XX_PASS2_X (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN52XX_PASS1 OCTEON_CN52XX_PASS1_X
+#define OCTEON_CN52XX_PASS2 OCTEON_CN52XX_PASS2_X
+
+/*
+ * CN3XXX models with old revision enconding
+ */
+#define OCTEON_CN38XX_PASS1 0x000d0000
+#define OCTEON_CN38XX_PASS2 0x000d0001
+#define OCTEON_CN38XX_PASS3 0x000d0003
+#define OCTEON_CN38XX (OCTEON_CN38XX_PASS3 | OM_IGNORE_REVISION)
+
+#define OCTEON_CN36XX OCTEON_CN38XX
+#define OCTEON_CN36XX_PASS2 OCTEON_CN38XX_PASS2
+#define OCTEON_CN36XX_PASS3 OCTEON_CN38XX_PASS3
+
+/* The OCTEON_CN31XX matches CN31XX models and the CN3020 */
+#define OCTEON_CN31XX_PASS1 0x000d0100
+#define OCTEON_CN31XX_PASS1_1 0x000d0102
+#define OCTEON_CN31XX (OCTEON_CN31XX_PASS1 | OM_IGNORE_REVISION)
+
+/*
+ * This model is only used for internal checks, it is not a valid
+ * model for the OCTEON_MODEL environment variable. This matches the
+ * CN3010 and CN3005 but NOT the CN3020.
+ */
+#define OCTEON_CN30XX_PASS1 0x000d0200
+#define OCTEON_CN30XX_PASS1_1 0x000d0202
+#define OCTEON_CN30XX (OCTEON_CN30XX_PASS1 | OM_IGNORE_REVISION)
+
+#define OCTEON_CN3005_PASS1 (0x000d0210 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1_0 (0x000d0210 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1_1 (0x000d0212 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005 (OCTEON_CN3005_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+
+#define OCTEON_CN3010_PASS1 (0x000d0200 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1_0 (0x000d0200 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1_1 (0x000d0202 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010 (OCTEON_CN3010_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+
+#define OCTEON_CN3020_PASS1 (0x000d0110 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1_0 (0x000d0110 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1_1 (0x000d0112 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020 (OCTEON_CN3020_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+
+/*
+ * This matches the complete family of CN3xxx CPUs, and not subsequent
+ * models
+ */
+#define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION)
+#define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
+#define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
+#define OCTEON_CNF7XXX (OCTEON_CNF71XX_PASS1_0 | \
+ OM_MATCH_F7XXX_FAMILY_MODELS)
+#define OCTEON_CN7XXX (OCTEON_CN78XX_PASS1_0 | \
+ OM_MATCH_7XXX_FAMILY_MODELS)
+
+/* The revision byte (low byte) has two different encodings.
+ * CN3XXX:
+ *
+ * bits
+ * <7:5>: reserved (0)
+ * <4>: alternate package
+ * <3:0>: revision
+ *
+ * CN5XXX and older models:
+ *
+ * bits
+ * <7>: reserved (0)
+ * <6>: alternate package
+ * <5:3>: major revision
+ * <2:0>: minor revision
+ *
+ */
+
+/* Masks used for the various types of model/family/revision matching */
+#define OCTEON_38XX_FAMILY_MASK 0x00ffff00
+#define OCTEON_38XX_FAMILY_REV_MASK 0x00ffff0f
+#define OCTEON_38XX_MODEL_MASK 0x00ffff10
+#define OCTEON_38XX_MODEL_REV_MASK (OCTEON_38XX_FAMILY_REV_MASK | OCTEON_38XX_MODEL_MASK)
+
+/* CN5XXX and later use different layout of bits in the revision ID field */
+#define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK
+#define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f
+#define OCTEON_58XX_MODEL_MASK 0x00ffff40
+#define OCTEON_58XX_MODEL_REV_MASK (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK)
+#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00ffff38)
+#define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0
+
+static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
+static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
+
+#define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z)))
+
+/*
+ * __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model)
+ * returns true if chip_model is identical or belong to the OCTEON
+ * model group specified in arg_model.
+ */
+/* NOTE: This for internal use only! */
+#define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) \
+((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \
+ ((((arg_model) & (OM_FLAG_MASK)) == (OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_38XX_MODEL_MASK)) || \
+ ((((arg_model) & (OM_FLAG_MASK)) == 0) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_38XX_FAMILY_REV_MASK)) || \
+ ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_38XX_FAMILY_MASK)) || \
+ ((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_38XX_MODEL_REV_MASK)) || \
+ ((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \
+ && (((chip_model) & OCTEON_38XX_MODEL_MASK) < ((arg_model) & OCTEON_38XX_MODEL_MASK))) \
+ )) || \
+ (((arg_model & OCTEON_38XX_FAMILY_MASK) >= OCTEON_CN58XX_PASS1_0) && ( \
+ ((((arg_model) & (OM_FLAG_MASK)) == (OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \
+ ((((arg_model) & (OM_FLAG_MASK)) == 0) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_REV_MASK)) || \
+ ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_MINOR_REVISION) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MINOR_REV_MASK)) || \
+ ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_MASK)) || \
+ ((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL) \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \
+ ((((arg_model) & (OM_MATCH_5XXX_FAMILY_MODELS)) == OM_MATCH_5XXX_FAMILY_MODELS) \
+ && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN58XX_PASS1_0) \
+ && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN63XX_PASS1_0)) || \
+ ((((arg_model) & (OM_MATCH_6XXX_FAMILY_MODELS)) == OM_MATCH_6XXX_FAMILY_MODELS) \
+ && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN63XX_PASS1_0) \
+ && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CNF71XX_PASS1_0)) || \
+ ((((arg_model) & (OM_MATCH_F7XXX_FAMILY_MODELS)) == OM_MATCH_F7XXX_FAMILY_MODELS) \
+ && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CNF71XX_PASS1_0) \
+ && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN78XX_PASS1_0)) || \
+ ((((arg_model) & (OM_MATCH_7XXX_FAMILY_MODELS)) == OM_MATCH_7XXX_FAMILY_MODELS) \
+ && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN78XX_PASS1_0)) || \
+ ((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \
+ && (((chip_model) & OCTEON_58XX_MODEL_MASK) < ((arg_model) & OCTEON_58XX_MODEL_MASK))) \
+ )))
+
+/* NOTE: This for internal use only!!!!! */
+static inline int __octeon_is_model_runtime__(uint32_t model)
+{
+ uint32_t cpuid = cvmx_get_proc_id();
+
+ return __OCTEON_IS_MODEL_COMPILE__(model, cpuid);
+}
+
+/*
+ * The OCTEON_IS_MODEL macro should be used for all Octeon model checking done
+ * in a program.
+ * This should be kept runtime if at all possible and must be conditionalized
+ * with OCTEON_IS_COMMON_BINARY() if runtime checking support is required.
+ *
+ * Use of the macro in preprocessor directives ( #if OCTEON_IS_MODEL(...) )
+ * is NOT SUPPORTED, and should be replaced with CVMX_COMPILED_FOR()
+ * I.e.:
+ * #if OCTEON_IS_MODEL(OCTEON_CN56XX) -> #if CVMX_COMPILED_FOR(OCTEON_CN56XX)
+ */
+#define OCTEON_IS_MODEL(x) __octeon_is_model_runtime__(x)
+#define OCTEON_IS_COMMON_BINARY() 1
+#undef OCTEON_MODEL
+
+#define OCTEON_IS_OCTEON1() OCTEON_IS_MODEL(OCTEON_CN3XXX)
+#define OCTEON_IS_OCTEONPLUS() OCTEON_IS_MODEL(OCTEON_CN5XXX)
+#define OCTEON_IS_OCTEON2() \
+ (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
+
+#define OCTEON_IS_OCTEON3() OCTEON_IS_MODEL(OCTEON_CN7XXX)
+
+#define OCTEON_IS_OCTEON1PLUS() (OCTEON_IS_OCTEON1() || OCTEON_IS_OCTEONPLUS())
+
+const char *__init octeon_model_get_string(uint32_t chip_id);
+
+/*
+ * Return the octeon family, i.e., ProcessorID of the PrID register.
+ *
+ * @return the octeon family on success, ((unint32_t)-1) on error.
+ */
+static inline uint32_t cvmx_get_octeon_family(void)
+{
+ return cvmx_get_proc_id() & OCTEON_FAMILY_MASK;
+}
+
+#include <asm/octeon/octeon-feature.h>
+
+#endif /* __OCTEON_MODEL_H__ */
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
new file mode 100644
index 000000000..08d48f37c
--- /dev/null
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -0,0 +1,366 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2008 Cavium Networks
+ */
+#ifndef __ASM_OCTEON_OCTEON_H
+#define __ASM_OCTEON_OCTEON_H
+
+#include <asm/octeon/cvmx.h>
+#include <asm/bitfield.h>
+
+extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size,
+ uint64_t alignment,
+ uint64_t min_addr,
+ uint64_t max_addr,
+ int do_locking);
+extern void *octeon_bootmem_alloc(uint64_t size, uint64_t alignment,
+ int do_locking);
+extern void *octeon_bootmem_alloc_range(uint64_t size, uint64_t alignment,
+ uint64_t min_addr, uint64_t max_addr,
+ int do_locking);
+extern void *octeon_bootmem_alloc_named(uint64_t size, uint64_t alignment,
+ char *name);
+extern void *octeon_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr,
+ uint64_t max_addr, uint64_t align,
+ char *name);
+extern void *octeon_bootmem_alloc_named_address(uint64_t size, uint64_t address,
+ char *name);
+extern int octeon_bootmem_free_named(char *name);
+extern void octeon_bootmem_lock(void);
+extern void octeon_bootmem_unlock(void);
+
+extern int octeon_is_simulation(void);
+extern int octeon_is_pci_host(void);
+extern int octeon_usb_is_ref_clk(void);
+extern uint64_t octeon_get_clock_rate(void);
+extern u64 octeon_get_io_clock_rate(void);
+extern const char *octeon_board_type_string(void);
+extern const char *octeon_get_pci_interrupts(void);
+extern int octeon_get_southbridge_interrupt(void);
+extern int octeon_get_boot_coremask(void);
+extern int octeon_get_boot_num_arguments(void);
+extern const char *octeon_get_boot_argument(int arg);
+extern void octeon_hal_setup_reserved32(void);
+extern void octeon_user_io_init(void);
+
+extern void octeon_init_cvmcount(void);
+extern void octeon_setup_delays(void);
+extern void octeon_io_clk_delay(unsigned long);
+
+#define OCTEON_ARGV_MAX_ARGS 64
+#define OCTEON_SERIAL_LEN 20
+
+struct octeon_boot_descriptor {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* Start of block referenced by assembly code - do not change! */
+ uint32_t desc_version;
+ uint32_t desc_size;
+ uint64_t stack_top;
+ uint64_t heap_base;
+ uint64_t heap_end;
+ /* Only used by bootloader */
+ uint64_t entry_point;
+ uint64_t desc_vaddr;
+ /* End of This block referenced by assembly code - do not change! */
+ uint32_t exception_base_addr;
+ uint32_t stack_size;
+ uint32_t heap_size;
+ /* Argc count for application. */
+ uint32_t argc;
+ uint32_t argv[OCTEON_ARGV_MAX_ARGS];
+
+#define BOOT_FLAG_INIT_CORE (1 << 0)
+#define OCTEON_BL_FLAG_DEBUG (1 << 1)
+#define OCTEON_BL_FLAG_NO_MAGIC (1 << 2)
+ /* If set, use uart1 for console */
+#define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3)
+ /* If set, use PCI console */
+#define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4)
+ /* Call exit on break on serial port */
+#define OCTEON_BL_FLAG_BREAK (1 << 5)
+
+ uint32_t flags;
+ uint32_t core_mask;
+ /* DRAM size in megabyes. */
+ uint32_t dram_size;
+ /* physical address of free memory descriptor block. */
+ uint32_t phy_mem_desc_addr;
+ /* used to pass flags from app to debugger. */
+ uint32_t debugger_flags_base_addr;
+ /* CPU clock speed, in hz. */
+ uint32_t eclock_hz;
+ /* DRAM clock speed, in hz. */
+ uint32_t dclock_hz;
+ /* SPI4 clock in hz. */
+ uint32_t spi_clock_hz;
+ uint16_t board_type;
+ uint8_t board_rev_major;
+ uint8_t board_rev_minor;
+ uint16_t chip_type;
+ uint8_t chip_rev_major;
+ uint8_t chip_rev_minor;
+ char board_serial_number[OCTEON_SERIAL_LEN];
+ uint8_t mac_addr_base[6];
+ uint8_t mac_addr_count;
+ uint64_t cvmx_desc_vaddr;
+#else
+ uint32_t desc_size;
+ uint32_t desc_version;
+ uint64_t stack_top;
+ uint64_t heap_base;
+ uint64_t heap_end;
+ /* Only used by bootloader */
+ uint64_t entry_point;
+ uint64_t desc_vaddr;
+ /* End of This block referenced by assembly code - do not change! */
+ uint32_t stack_size;
+ uint32_t exception_base_addr;
+ uint32_t argc;
+ uint32_t heap_size;
+ /*
+ * Argc count for application.
+ * Warning low bit scrambled in little-endian.
+ */
+ uint32_t argv[OCTEON_ARGV_MAX_ARGS];
+
+#define BOOT_FLAG_INIT_CORE (1 << 0)
+#define OCTEON_BL_FLAG_DEBUG (1 << 1)
+#define OCTEON_BL_FLAG_NO_MAGIC (1 << 2)
+ /* If set, use uart1 for console */
+#define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3)
+ /* If set, use PCI console */
+#define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4)
+ /* Call exit on break on serial port */
+#define OCTEON_BL_FLAG_BREAK (1 << 5)
+
+ uint32_t core_mask;
+ uint32_t flags;
+ /* physical address of free memory descriptor block. */
+ uint32_t phy_mem_desc_addr;
+ /* DRAM size in megabyes. */
+ uint32_t dram_size;
+ /* CPU clock speed, in hz. */
+ uint32_t eclock_hz;
+ /* used to pass flags from app to debugger. */
+ uint32_t debugger_flags_base_addr;
+ /* SPI4 clock in hz. */
+ uint32_t spi_clock_hz;
+ /* DRAM clock speed, in hz. */
+ uint32_t dclock_hz;
+ uint8_t chip_rev_minor;
+ uint8_t chip_rev_major;
+ uint16_t chip_type;
+ uint8_t board_rev_minor;
+ uint8_t board_rev_major;
+ uint16_t board_type;
+
+ uint64_t unused1[4]; /* Not even filled in by bootloader. */
+
+ uint64_t cvmx_desc_vaddr;
+#endif
+};
+
+union octeon_cvmemctl {
+ uint64_t u64;
+ struct {
+ /* RO 1 = BIST fail, 0 = BIST pass */
+ __BITFIELD_FIELD(uint64_t tlbbist:1,
+ /* RO 1 = BIST fail, 0 = BIST pass */
+ __BITFIELD_FIELD(uint64_t l1cbist:1,
+ /* RO 1 = BIST fail, 0 = BIST pass */
+ __BITFIELD_FIELD(uint64_t l1dbist:1,
+ /* RO 1 = BIST fail, 0 = BIST pass */
+ __BITFIELD_FIELD(uint64_t dcmbist:1,
+ /* RO 1 = BIST fail, 0 = BIST pass */
+ __BITFIELD_FIELD(uint64_t ptgbist:1,
+ /* RO 1 = BIST fail, 0 = BIST pass */
+ __BITFIELD_FIELD(uint64_t wbfbist:1,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved:17,
+ /* OCTEON II - TLB replacement policy: 0 = bitmask LRU; 1 = NLU.
+ * This field selects between the TLB replacement policies:
+ * bitmask LRU or NLU. Bitmask LRU maintains a mask of
+ * recently used TLB entries and avoids them as new entries
+ * are allocated. NLU simply guarantees that the next
+ * allocation is not the last used TLB entry. */
+ __BITFIELD_FIELD(uint64_t tlbnlu:1,
+ /* OCTEON II - Selects the bit in the counter used for
+ * releasing a PAUSE. This counter trips every 2(8+PAUSETIME)
+ * cycles. If not already released, the cnMIPS II core will
+ * always release a given PAUSE instruction within
+ * 2(8+PAUSETIME). If the counter trip happens to line up,
+ * the cnMIPS II core may release the PAUSE instantly. */
+ __BITFIELD_FIELD(uint64_t pausetime:3,
+ /* OCTEON II - This field is an extension of
+ * CvmMemCtl[DIDTTO] */
+ __BITFIELD_FIELD(uint64_t didtto2:1,
+ /* R/W If set, marked write-buffer entries time out
+ * the same as other entries; if clear, marked
+ * write-buffer entries use the maximum timeout. */
+ __BITFIELD_FIELD(uint64_t dismarkwblongto:1,
+ /* R/W If set, a merged store does not clear the
+ * write-buffer entry timeout state. */
+ __BITFIELD_FIELD(uint64_t dismrgclrwbto:1,
+ /* R/W Two bits that are the MSBs of the resultant
+ * CVMSEG LM word location for an IOBDMA. The other 8
+ * bits come from the SCRADDR field of the IOBDMA. */
+ __BITFIELD_FIELD(uint64_t iobdmascrmsb:2,
+ /* R/W If set, SYNCWS and SYNCS only order marked
+ * stores; if clear, SYNCWS and SYNCS only order
+ * unmarked stores. SYNCWSMARKED has no effect when
+ * DISSYNCWS is set. */
+ __BITFIELD_FIELD(uint64_t syncwsmarked:1,
+ /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as
+ * SYNC. */
+ __BITFIELD_FIELD(uint64_t dissyncws:1,
+ /* R/W If set, no stall happens on write buffer
+ * full. */
+ __BITFIELD_FIELD(uint64_t diswbfst:1,
+ /* R/W If set (and SX set), supervisor-level
+ * loads/stores can use XKPHYS addresses with
+ * VA<48>==0 */
+ __BITFIELD_FIELD(uint64_t xkmemenas:1,
+ /* R/W If set (and UX set), user-level loads/stores
+ * can use XKPHYS addresses with VA<48>==0 */
+ __BITFIELD_FIELD(uint64_t xkmemenau:1,
+ /* R/W If set (and SX set), supervisor-level
+ * loads/stores can use XKPHYS addresses with
+ * VA<48>==1 */
+ __BITFIELD_FIELD(uint64_t xkioenas:1,
+ /* R/W If set (and UX set), user-level loads/stores
+ * can use XKPHYS addresses with VA<48>==1 */
+ __BITFIELD_FIELD(uint64_t xkioenau:1,
+ /* R/W If set, all stores act as SYNCW (NOMERGE must
+ * be set when this is set) RW, reset to 0. */
+ __BITFIELD_FIELD(uint64_t allsyncw:1,
+ /* R/W If set, no stores merge, and all stores reach
+ * the coherent bus in order. */
+ __BITFIELD_FIELD(uint64_t nomerge:1,
+ /* R/W Selects the bit in the counter used for DID
+ * time-outs 0 = 231, 1 = 230, 2 = 229, 3 =
+ * 214. Actual time-out is between 1x and 2x this
+ * interval. For example, with DIDTTO=3, expiration
+ * interval is between 16K and 32K. */
+ __BITFIELD_FIELD(uint64_t didtto:2,
+ /* R/W If set, the (mem) CSR clock never turns off. */
+ __BITFIELD_FIELD(uint64_t csrckalwys:1,
+ /* R/W If set, mclk never turns off. */
+ __BITFIELD_FIELD(uint64_t mclkalwys:1,
+ /* R/W Selects the bit in the counter used for write
+ * buffer flush time-outs (WBFLT+11) is the bit
+ * position in an internal counter used to determine
+ * expiration. The write buffer expires between 1x and
+ * 2x this interval. For example, with WBFLT = 0, a
+ * write buffer expires between 2K and 4K cycles after
+ * the write buffer entry is allocated. */
+ __BITFIELD_FIELD(uint64_t wbfltime:3,
+ /* R/W If set, do not put Istream in the L2 cache. */
+ __BITFIELD_FIELD(uint64_t istrnol2:1,
+ /* R/W The write buffer threshold. */
+ __BITFIELD_FIELD(uint64_t wbthresh:4,
+ /* Reserved */
+ __BITFIELD_FIELD(uint64_t reserved2:2,
+ /* R/W If set, CVMSEG is available for loads/stores in
+ * kernel/debug mode. */
+ __BITFIELD_FIELD(uint64_t cvmsegenak:1,
+ /* R/W If set, CVMSEG is available for loads/stores in
+ * supervisor mode. */
+ __BITFIELD_FIELD(uint64_t cvmsegenas:1,
+ /* R/W If set, CVMSEG is available for loads/stores in
+ * user mode. */
+ __BITFIELD_FIELD(uint64_t cvmsegenau:1,
+ /* R/W Size of local memory in cache blocks, 54 (6912
+ * bytes) is max legal value. */
+ __BITFIELD_FIELD(uint64_t lmemsz:6,
+ ;)))))))))))))))))))))))))))))))))
+ } s;
+};
+
+extern void octeon_check_cpu_bist(void);
+
+int octeon_prune_device_tree(void);
+extern const char __appended_dtb;
+extern const char __dtb_octeon_3xxx_begin;
+extern const char __dtb_octeon_68xx_begin;
+
+/**
+ * Write a 32bit value to the Octeon NPI register space
+ *
+ * @address: Address to write to
+ * @val: Value to write
+ */
+static inline void octeon_npi_write32(uint64_t address, uint32_t val)
+{
+ cvmx_write64_uint32(address ^ 4, val);
+ cvmx_read64_uint32(address ^ 4);
+}
+
+#ifdef CONFIG_SMP
+void octeon_setup_smp(void);
+#else
+static inline void octeon_setup_smp(void) {}
+#endif
+
+struct irq_domain;
+struct device_node;
+struct irq_data;
+struct irq_chip;
+void octeon_ciu3_mbox_send(int cpu, unsigned int mbox);
+int octeon_irq_ciu3_xlat(struct irq_domain *d,
+ struct device_node *node,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type);
+void octeon_irq_ciu3_enable(struct irq_data *data);
+void octeon_irq_ciu3_disable(struct irq_data *data);
+void octeon_irq_ciu3_ack(struct irq_data *data);
+void octeon_irq_ciu3_mask(struct irq_data *data);
+void octeon_irq_ciu3_mask_ack(struct irq_data *data);
+int octeon_irq_ciu3_mapx(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw, struct irq_chip *chip);
+
+/* Octeon multiplier save/restore routines from octeon_switch.S */
+void octeon_mult_save(void);
+void octeon_mult_restore(void);
+void octeon_mult_save_end(void);
+void octeon_mult_restore_end(void);
+void octeon_mult_save3(void);
+void octeon_mult_save3_end(void);
+void octeon_mult_save2(void);
+void octeon_mult_save2_end(void);
+void octeon_mult_restore3(void);
+void octeon_mult_restore3_end(void);
+void octeon_mult_restore2(void);
+void octeon_mult_restore2_end(void);
+
+/**
+ * Read a 32bit value from the Octeon NPI register space
+ *
+ * @address: Address to read
+ * Returns The result
+ */
+static inline uint32_t octeon_npi_read32(uint64_t address)
+{
+ return cvmx_read64_uint32(address ^ 4);
+}
+
+extern struct cvmx_bootinfo *octeon_bootinfo;
+
+extern uint64_t octeon_bootloader_entry_addr;
+
+extern void (*octeon_irq_setup_secondary)(void);
+
+typedef void (*octeon_irq_ip4_handler_t)(void);
+void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t);
+
+extern void octeon_fixup_irqs(void);
+
+extern struct semaphore octeon_bootbus_sem;
+
+struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block);
+
+#endif /* __ASM_OCTEON_OCTEON_H */
diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h
new file mode 100644
index 000000000..b12d9a3fb
--- /dev/null
+++ b/arch/mips/include/asm/octeon/pci-octeon.h
@@ -0,0 +1,69 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005-2009 Cavium Networks
+ */
+
+#ifndef __PCI_OCTEON_H__
+#define __PCI_OCTEON_H__
+
+#include <linux/pci.h>
+
+/*
+ * The physical memory base mapped by BAR1. 256MB at the end of the
+ * first 4GB.
+ */
+#define CVMX_PCIE_BAR1_PHYS_BASE ((1ull << 32) - (1ull << 28))
+#define CVMX_PCIE_BAR1_PHYS_SIZE (1ull << 28)
+
+/*
+ * The RC base of BAR1. gen1 has a 39-bit BAR2, gen2 has 41-bit BAR2,
+ * place BAR1 so it is the same for both.
+ */
+#define CVMX_PCIE_BAR1_RC_BASE (1ull << 41)
+
+/*
+ * pcibios_map_irq() is defined inside pci-octeon.c. All it does is
+ * call the Octeon specific version pointed to by this variable. This
+ * function needs to change for PCI or PCIe based hosts.
+ */
+extern int (*octeon_pcibios_map_irq)(const struct pci_dev *dev,
+ u8 slot, u8 pin);
+
+/*
+ * For PCI (not PCIe) the BAR2 base address.
+ */
+#define OCTEON_BAR2_PCI_ADDRESS 0x8000000000ull
+
+/*
+ * For PCI (not PCIe) the base of the memory mapped by BAR1
+ */
+extern u64 octeon_bar1_pci_phys;
+
+/*
+ * The following defines are used when octeon_dma_bar_type =
+ * OCTEON_DMA_BAR_TYPE_BIG
+ */
+#define OCTEON_PCI_BAR1_HOLE_BITS 5
+#define OCTEON_PCI_BAR1_HOLE_SIZE (1ul<<(OCTEON_PCI_BAR1_HOLE_BITS+3))
+
+enum octeon_dma_bar_type {
+ OCTEON_DMA_BAR_TYPE_INVALID,
+ OCTEON_DMA_BAR_TYPE_SMALL,
+ OCTEON_DMA_BAR_TYPE_BIG,
+ OCTEON_DMA_BAR_TYPE_PCIE,
+ OCTEON_DMA_BAR_TYPE_PCIE2
+};
+
+/*
+ * This tells the DMA mapping system in dma-octeon.c how to map PCI
+ * DMA addresses.
+ */
+extern enum octeon_dma_bar_type octeon_dma_bar_type;
+
+void octeon_pci_dma_init(void);
+extern char *octeon_swiotlb;
+
+#endif
diff --git a/arch/mips/include/asm/paccess.h b/arch/mips/include/asm/paccess.h
new file mode 100644
index 000000000..af81ab0da
--- /dev/null
+++ b/arch/mips/include/asm/paccess.h
@@ -0,0 +1,114 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1997, 1998, 1999, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ *
+ * Protected memory access. Used for everything that might take revenge
+ * by sending a DBE error like accessing possibly non-existent memory or
+ * devices.
+ */
+#ifndef _ASM_PACCESS_H
+#define _ASM_PACCESS_H
+
+#include <linux/errno.h>
+
+#ifdef CONFIG_32BIT
+#define __PA_ADDR ".word"
+#endif
+#ifdef CONFIG_64BIT
+#define __PA_ADDR ".dword"
+#endif
+
+extern asmlinkage void handle_ibe(void);
+extern asmlinkage void handle_dbe(void);
+
+#define put_dbe(x, ptr) __put_dbe((x), (ptr), sizeof(*(ptr)))
+#define get_dbe(x, ptr) __get_dbe((x), (ptr), sizeof(*(ptr)))
+
+struct __large_pstruct { unsigned long buf[100]; };
+#define __mp(x) (*(struct __large_pstruct *)(x))
+
+#define __get_dbe(x, ptr, size) \
+({ \
+ long __gu_err; \
+ __typeof__(*(ptr)) __gu_val; \
+ unsigned long __gu_addr; \
+ __asm__("":"=r" (__gu_val)); \
+ __gu_addr = (unsigned long) (ptr); \
+ __asm__("":"=r" (__gu_err)); \
+ switch (size) { \
+ case 1: __get_dbe_asm("lb"); break; \
+ case 2: __get_dbe_asm("lh"); break; \
+ case 4: __get_dbe_asm("lw"); break; \
+ case 8: __get_dbe_asm("ld"); break; \
+ default: __get_dbe_unknown(); break; \
+ } \
+ x = (__typeof__(*(ptr))) __gu_val; \
+ __gu_err; \
+})
+
+#define __get_dbe_asm(insn) \
+{ \
+ __asm__ __volatile__( \
+ "1:\t" insn "\t%1,%2\n\t" \
+ "move\t%0,$0\n" \
+ "2:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n" \
+ "3:\tli\t%0,%3\n\t" \
+ "move\t%1,$0\n\t" \
+ "j\t2b\n\t" \
+ ".previous\n\t" \
+ ".section\t__dbe_table,\"a\"\n\t" \
+ __PA_ADDR "\t1b, 3b\n\t" \
+ ".previous" \
+ :"=r" (__gu_err), "=r" (__gu_val) \
+ :"o" (__mp(__gu_addr)), "i" (-EFAULT)); \
+}
+
+extern void __get_dbe_unknown(void);
+
+#define __put_dbe(x, ptr, size) \
+({ \
+ long __pu_err; \
+ __typeof__(*(ptr)) __pu_val; \
+ long __pu_addr; \
+ __pu_val = (x); \
+ __pu_addr = (long) (ptr); \
+ __asm__("":"=r" (__pu_err)); \
+ switch (size) { \
+ case 1: __put_dbe_asm("sb"); break; \
+ case 2: __put_dbe_asm("sh"); break; \
+ case 4: __put_dbe_asm("sw"); break; \
+ case 8: __put_dbe_asm("sd"); break; \
+ default: __put_dbe_unknown(); break; \
+ } \
+ __pu_err; \
+})
+
+#define __put_dbe_asm(insn) \
+{ \
+ __asm__ __volatile__( \
+ "1:\t" insn "\t%1,%2\n\t" \
+ "move\t%0,$0\n" \
+ "2:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n" \
+ "3:\tli\t%0,%3\n\t" \
+ "j\t2b\n\t" \
+ ".previous\n\t" \
+ ".section\t__dbe_table,\"a\"\n\t" \
+ __PA_ADDR "\t1b, 3b\n\t" \
+ ".previous" \
+ : "=r" (__pu_err) \
+ : "r" (__pu_val), "o" (__mp(__pu_addr)), "i" (-EFAULT)); \
+}
+
+extern void __put_dbe_unknown(void);
+
+extern unsigned long search_dbe_table(unsigned long addr);
+
+#endif /* _ASM_PACCESS_H */
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
new file mode 100644
index 000000000..6a77bc4a6
--- /dev/null
+++ b/arch/mips/include/asm/page.h
@@ -0,0 +1,261 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000, 03 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_PAGE_H
+#define _ASM_PAGE_H
+
+#include <spaces.h>
+#include <linux/const.h>
+#include <linux/kernel.h>
+#include <asm/mipsregs.h>
+
+/*
+ * PAGE_SHIFT determines the page size
+ */
+#ifdef CONFIG_PAGE_SIZE_4KB
+#define PAGE_SHIFT 12
+#endif
+#ifdef CONFIG_PAGE_SIZE_8KB
+#define PAGE_SHIFT 13
+#endif
+#ifdef CONFIG_PAGE_SIZE_16KB
+#define PAGE_SHIFT 14
+#endif
+#ifdef CONFIG_PAGE_SIZE_32KB
+#define PAGE_SHIFT 15
+#endif
+#ifdef CONFIG_PAGE_SIZE_64KB
+#define PAGE_SHIFT 16
+#endif
+#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
+#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
+
+/*
+ * This is used for calculating the real page sizes
+ * for FTLB or VTLB + FTLB configurations.
+ */
+static inline unsigned int page_size_ftlb(unsigned int mmuextdef)
+{
+ switch (mmuextdef) {
+ case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
+ if (PAGE_SIZE == (1 << 30))
+ return 5;
+ if (PAGE_SIZE == (1llu << 32))
+ return 6;
+ if (PAGE_SIZE > (256 << 10))
+ return 7; /* reserved */
+ fallthrough;
+ case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
+ return (PAGE_SHIFT - 10) / 2;
+ default:
+ panic("Invalid FTLB configuration with Conf4_mmuextdef=%d value\n",
+ mmuextdef >> 14);
+ }
+}
+
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
+#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+#else /* !CONFIG_MIPS_HUGE_TLB_SUPPORT */
+#define HPAGE_SHIFT ({BUILD_BUG(); 0; })
+#define HPAGE_SIZE ({BUILD_BUG(); 0; })
+#define HPAGE_MASK ({BUILD_BUG(); 0; })
+#define HUGETLB_PAGE_ORDER ({BUILD_BUG(); 0; })
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
+
+#include <linux/pfn.h>
+
+extern void build_clear_page(void);
+extern void build_copy_page(void);
+
+/*
+ * It's normally defined only for FLATMEM config but it's
+ * used in our early mem init code for all memory models.
+ * So always define it.
+ */
+#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+extern unsigned long ARCH_PFN_OFFSET;
+# define ARCH_PFN_OFFSET ARCH_PFN_OFFSET
+#else
+# define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
+#endif
+
+extern void clear_page(void * page);
+extern void copy_page(void * to, void * from);
+
+extern unsigned long shm_align_mask;
+
+static inline unsigned long pages_do_alias(unsigned long addr1,
+ unsigned long addr2)
+{
+ return (addr1 ^ addr2) & shm_align_mask;
+}
+
+struct page;
+
+static inline void clear_user_page(void *addr, unsigned long vaddr,
+ struct page *page)
+{
+ extern void (*flush_data_cache_page)(unsigned long addr);
+
+ clear_page(addr);
+ if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK))
+ flush_data_cache_page((unsigned long)addr);
+}
+
+struct vm_area_struct;
+extern void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+/*
+ * These are used to make use of C type-checking..
+ */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ #ifdef CONFIG_CPU_MIPS32
+ typedef struct { unsigned long pte_low, pte_high; } pte_t;
+ #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+ #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
+ #else
+ typedef struct { unsigned long long pte; } pte_t;
+ #define pte_val(x) ((x).pte)
+ #define __pte(x) ((pte_t) { (x) } )
+ #endif
+#else
+typedef struct { unsigned long pte; } pte_t;
+#define pte_val(x) ((x).pte)
+#define __pte(x) ((pte_t) { (x) } )
+#endif
+typedef struct page *pgtable_t;
+
+/*
+ * Right now we don't support 4-level pagetables, so all pud-related
+ * definitions come from <asm-generic/pgtable-nopud.h>.
+ */
+
+/*
+ * Finall the top of the hierarchy, the pgd
+ */
+typedef struct { unsigned long pgd; } pgd_t;
+#define pgd_val(x) ((x).pgd)
+#define __pgd(x) ((pgd_t) { (x) } )
+
+/*
+ * Manipulate page protection bits
+ */
+typedef struct { unsigned long pgprot; } pgprot_t;
+#define pgprot_val(x) ((x).pgprot)
+#define __pgprot(x) ((pgprot_t) { (x) } )
+#define pte_pgprot(x) __pgprot(pte_val(x) & ~_PFN_MASK)
+
+/*
+ * On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd
+ * pair of pages we only have a single global bit per pair of pages. When
+ * writing to the TLB make sure we always have the bit set for both pages
+ * or none. This macro is used to access the `buddy' of the pte we're just
+ * working on.
+ */
+#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
+
+/*
+ * __pa()/__va() should be used only during mem init.
+ */
+static inline unsigned long ___pa(unsigned long x)
+{
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ /*
+ * For MIPS64 the virtual address may either be in one of
+ * the compatibility segements ckseg0 or ckseg1, or it may
+ * be in xkphys.
+ */
+ return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
+ }
+
+ if (!IS_ENABLED(CONFIG_EVA)) {
+ /*
+ * We're using the standard MIPS32 legacy memory map, ie.
+ * the address x is going to be in kseg0 or kseg1. We can
+ * handle either case by masking out the desired bits using
+ * CPHYSADDR.
+ */
+ return CPHYSADDR(x);
+ }
+
+ /*
+ * EVA is in use so the memory map could be anything, making it not
+ * safe to just mask out bits.
+ */
+ return x - PAGE_OFFSET + PHYS_OFFSET;
+}
+#define __pa(x) ___pa((unsigned long)(x))
+#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
+#include <asm/io.h>
+
+/*
+ * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
+ * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The
+ * discussion can be found in lkml posting
+ * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is
+ * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html
+ *
+ * It is unclear if the misscompilations mentioned in
+ * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one
+ * until GCC 3.x has been retired before we can apply
+ * https://patchwork.linux-mips.org/patch/1541/
+ */
+
+#ifndef __pa_symbol
+#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
+#endif
+
+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+
+#ifdef CONFIG_FLATMEM
+
+static inline int pfn_valid(unsigned long pfn)
+{
+ /* avoid <linux/mm.h> include hell */
+ extern unsigned long max_mapnr;
+ unsigned long pfn_offset = ARCH_PFN_OFFSET;
+
+ return pfn >= pfn_offset && pfn < max_mapnr;
+}
+
+#elif defined(CONFIG_SPARSEMEM)
+
+/* pfn_valid is defined in linux/mmzone.h */
+
+#elif defined(CONFIG_NEED_MULTIPLE_NODES)
+
+#define pfn_valid(pfn) \
+({ \
+ unsigned long __pfn = (pfn); \
+ int __n = pfn_to_nid(__pfn); \
+ ((__n >= 0) ? (__pfn < NODE_DATA(__n)->node_start_pfn + \
+ NODE_DATA(__n)->node_spanned_pages) \
+ : 0); \
+})
+
+#endif
+
+#define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr)))
+#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
+
+extern bool __virt_addr_valid(const volatile void *kaddr);
+#define virt_addr_valid(kaddr) \
+ __virt_addr_valid((const volatile void *) (kaddr))
+
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* _ASM_PAGE_H */
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
new file mode 100644
index 000000000..6f4864920
--- /dev/null
+++ b/arch/mips/include/asm/pci.h
@@ -0,0 +1,149 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef _ASM_PCI_H
+#define _ASM_PCI_H
+
+#include <linux/mm.h>
+
+#ifdef __KERNEL__
+
+/*
+ * This file essentially defines the interface between board
+ * specific PCI code and MIPS common PCI code. Should potentially put
+ * into include/asm/pci.h file.
+ */
+
+#include <linux/ioport.h>
+#include <linux/list.h>
+#include <linux/of.h>
+
+#ifdef CONFIG_PCI_DRIVERS_LEGACY
+
+/*
+ * Each pci channel is a top-level PCI bus seem by CPU. A machine with
+ * multiple PCI channels may have multiple PCI host controllers or a
+ * single controller supporting multiple channels.
+ */
+struct pci_controller {
+ struct list_head list;
+ struct pci_bus *bus;
+ struct device_node *of_node;
+
+ struct pci_ops *pci_ops;
+ struct resource *mem_resource;
+ unsigned long mem_offset;
+ struct resource *io_resource;
+ unsigned long io_offset;
+ unsigned long io_map_base;
+ struct resource *busn_resource;
+
+#ifndef CONFIG_PCI_DOMAINS_GENERIC
+ unsigned int index;
+ /* For compatibility with current (as of July 2003) pciutils
+ and XFree86. Eventually will be removed. */
+ unsigned int need_domain_info;
+#endif
+
+ /* Optional access methods for reading/writing the bus number
+ of the PCI controller */
+ int (*get_busno)(void);
+ void (*set_busno)(int busno);
+};
+
+/*
+ * Used by boards to register their PCI busses before the actual scanning.
+ */
+extern void register_pci_controller(struct pci_controller *hose);
+
+/*
+ * board supplied pci irq fixup routine
+ */
+extern int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
+
+/* Do platform specific device initialization at pci_enable_device() time */
+extern int pcibios_plat_dev_init(struct pci_dev *dev);
+
+extern char * (*pcibios_plat_setup)(char *str);
+
+#ifdef CONFIG_OF
+/* this function parses memory ranges from a device node */
+extern void pci_load_of_ranges(struct pci_controller *hose,
+ struct device_node *node);
+#else
+static inline void pci_load_of_ranges(struct pci_controller *hose,
+ struct device_node *node) {}
+#endif
+
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+static inline void set_pci_need_domain_info(struct pci_controller *hose,
+ int need_domain_info)
+{
+ /* nothing to do */
+}
+#elif defined(CONFIG_PCI_DOMAINS)
+static inline void set_pci_need_domain_info(struct pci_controller *hose,
+ int need_domain_info)
+{
+ hose->need_domain_info = need_domain_info;
+}
+#endif /* CONFIG_PCI_DOMAINS */
+
+#endif
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ already-configured bus numbers - to be used for buggy BIOSes
+ or architectures with incomplete PCI setup by the loader */
+static inline unsigned int pcibios_assign_all_busses(void)
+{
+ return 1;
+}
+
+extern unsigned long PCIBIOS_MIN_IO;
+extern unsigned long PCIBIOS_MIN_MEM;
+
+#define PCIBIOS_MIN_CARDBUS_IO 0x4000
+
+#define HAVE_PCI_MMAP
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
+
+/*
+ * Dynamic DMA mapping stuff.
+ * MIPS has everything mapped statically.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ return pci_domain_nr(bus);
+}
+#elif defined(CONFIG_PCI_DOMAINS)
+#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
+
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ struct pci_controller *hose = bus->sysdata;
+ return hose->need_domain_info;
+}
+#endif /* CONFIG_PCI_DOMAINS */
+
+#endif /* __KERNEL__ */
+
+/* Do platform specific device initialization at pci_enable_device() time */
+extern int pcibios_plat_dev_init(struct pci_dev *dev);
+
+/* Chances are this interrupt is wired PC-style ... */
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ return channel ? 15 : 14;
+}
+
+#endif /* _ASM_PCI_H */
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
new file mode 100644
index 000000000..9c476a040
--- /dev/null
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -0,0 +1,825 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * bridge.h - bridge chip header file, derived from IRIX <sys/PCI/bridge.h>,
+ * revision 1.76.
+ *
+ * Copyright (C) 1996, 1999 Silcon Graphics, Inc.
+ * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
+ */
+#ifndef _ASM_PCI_BRIDGE_H
+#define _ASM_PCI_BRIDGE_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <asm/xtalk/xwidget.h> /* generic widget header */
+#include <asm/sn/types.h>
+
+/* I/O page size */
+
+#define IOPFNSHIFT 12 /* 4K per mapped page */
+
+#define IOPGSIZE (1 << IOPFNSHIFT)
+#define IOPG(x) ((x) >> IOPFNSHIFT)
+#define IOPGOFF(x) ((x) & (IOPGSIZE-1))
+
+/* Bridge RAM sizes */
+
+#define BRIDGE_ATE_RAM_SIZE 0x00000400 /* 1kB ATE RAM */
+
+#define BRIDGE_CONFIG_BASE 0x20000
+#define BRIDGE_CONFIG1_BASE 0x28000
+#define BRIDGE_CONFIG_END 0x30000
+#define BRIDGE_CONFIG_SLOT_SIZE 0x1000
+
+#define BRIDGE_SSRAM_512K 0x00080000 /* 512kB */
+#define BRIDGE_SSRAM_128K 0x00020000 /* 128kB */
+#define BRIDGE_SSRAM_64K 0x00010000 /* 64kB */
+#define BRIDGE_SSRAM_0K 0x00000000 /* 0kB */
+
+/* ========================================================================
+ * Bridge address map
+ */
+
+#ifndef __ASSEMBLY__
+
+#define ATE_V 0x01
+#define ATE_CO 0x02
+#define ATE_PREC 0x04
+#define ATE_PREF 0x08
+#define ATE_BAR 0x10
+
+#define ATE_PFNSHIFT 12
+#define ATE_TIDSHIFT 8
+#define ATE_RMFSHIFT 48
+
+#define mkate(xaddr, xid, attr) (((xaddr) & 0x0000fffffffff000ULL) | \
+ ((xid)<<ATE_TIDSHIFT) | \
+ (attr))
+
+#define BRIDGE_INTERNAL_ATES 128
+
+/*
+ * It is generally preferred that hardware registers on the bridge
+ * are located from C code via this structure.
+ *
+ * Generated from Bridge spec dated 04oct95
+ */
+
+struct bridge_regs {
+ /* Local Registers 0x000000-0x00FFFF */
+
+ /* standard widget configuration 0x000000-0x000057 */
+ widget_cfg_t b_widget; /* 0x000000 */
+
+ /* helper fieldnames for accessing bridge widget */
+
+#define b_wid_id b_widget.w_id
+#define b_wid_stat b_widget.w_status
+#define b_wid_err_upper b_widget.w_err_upper_addr
+#define b_wid_err_lower b_widget.w_err_lower_addr
+#define b_wid_control b_widget.w_control
+#define b_wid_req_timeout b_widget.w_req_timeout
+#define b_wid_int_upper b_widget.w_intdest_upper_addr
+#define b_wid_int_lower b_widget.w_intdest_lower_addr
+#define b_wid_err_cmdword b_widget.w_err_cmd_word
+#define b_wid_llp b_widget.w_llp_cfg
+#define b_wid_tflush b_widget.w_tflush
+
+ /* bridge-specific widget configuration 0x000058-0x00007F */
+ u32 _pad_000058;
+ u32 b_wid_aux_err; /* 0x00005C */
+ u32 _pad_000060;
+ u32 b_wid_resp_upper; /* 0x000064 */
+ u32 _pad_000068;
+ u32 b_wid_resp_lower; /* 0x00006C */
+ u32 _pad_000070;
+ u32 b_wid_tst_pin_ctrl; /* 0x000074 */
+ u32 _pad_000078[2];
+
+ /* PMU & Map 0x000080-0x00008F */
+ u32 _pad_000080;
+ u32 b_dir_map; /* 0x000084 */
+ u32 _pad_000088[2];
+
+ /* SSRAM 0x000090-0x00009F */
+ u32 _pad_000090;
+ u32 b_ram_perr; /* 0x000094 */
+ u32 _pad_000098[2];
+
+ /* Arbitration 0x0000A0-0x0000AF */
+ u32 _pad_0000A0;
+ u32 b_arb; /* 0x0000A4 */
+ u32 _pad_0000A8[2];
+
+ /* Number In A Can 0x0000B0-0x0000BF */
+ u32 _pad_0000B0;
+ u32 b_nic; /* 0x0000B4 */
+ u32 _pad_0000B8[2];
+
+ /* PCI/GIO 0x0000C0-0x0000FF */
+ u32 _pad_0000C0;
+ u32 b_bus_timeout; /* 0x0000C4 */
+#define b_pci_bus_timeout b_bus_timeout
+
+ u32 _pad_0000C8;
+ u32 b_pci_cfg; /* 0x0000CC */
+ u32 _pad_0000D0;
+ u32 b_pci_err_upper; /* 0x0000D4 */
+ u32 _pad_0000D8;
+ u32 b_pci_err_lower; /* 0x0000DC */
+ u32 _pad_0000E0[8];
+#define b_gio_err_lower b_pci_err_lower
+#define b_gio_err_upper b_pci_err_upper
+
+ /* Interrupt 0x000100-0x0001FF */
+ u32 _pad_000100;
+ u32 b_int_status; /* 0x000104 */
+ u32 _pad_000108;
+ u32 b_int_enable; /* 0x00010C */
+ u32 _pad_000110;
+ u32 b_int_rst_stat; /* 0x000114 */
+ u32 _pad_000118;
+ u32 b_int_mode; /* 0x00011C */
+ u32 _pad_000120;
+ u32 b_int_device; /* 0x000124 */
+ u32 _pad_000128;
+ u32 b_int_host_err; /* 0x00012C */
+
+ struct {
+ u32 __pad; /* 0x0001{30,,,68} */
+ u32 addr; /* 0x0001{34,,,6C} */
+ } b_int_addr[8]; /* 0x000130 */
+
+ u32 _pad_000170[36];
+
+ /* Device 0x000200-0x0003FF */
+ struct {
+ u32 __pad; /* 0x0002{00,,,38} */
+ u32 reg; /* 0x0002{04,,,3C} */
+ } b_device[8]; /* 0x000200 */
+
+ struct {
+ u32 __pad; /* 0x0002{40,,,78} */
+ u32 reg; /* 0x0002{44,,,7C} */
+ } b_wr_req_buf[8]; /* 0x000240 */
+
+ struct {
+ u32 __pad; /* 0x0002{80,,,88} */
+ u32 reg; /* 0x0002{84,,,8C} */
+ } b_rrb_map[2]; /* 0x000280 */
+#define b_even_resp b_rrb_map[0].reg /* 0x000284 */
+#define b_odd_resp b_rrb_map[1].reg /* 0x00028C */
+
+ u32 _pad_000290;
+ u32 b_resp_status; /* 0x000294 */
+ u32 _pad_000298;
+ u32 b_resp_clear; /* 0x00029C */
+
+ u32 _pad_0002A0[24];
+
+ char _pad_000300[0x10000 - 0x000300];
+
+ /* Internal Address Translation Entry RAM 0x010000-0x0103FF */
+ union {
+ u64 wr; /* write-only */
+ struct {
+ u32 _p_pad;
+ u32 rd; /* read-only */
+ } hi;
+ } b_int_ate_ram[128];
+
+ char _pad_010400[0x11000 - 0x010400];
+
+ /* Internal Address Translation Entry RAM LOW 0x011000-0x0113FF */
+ struct {
+ u32 _p_pad;
+ u32 rd; /* read-only */
+ } b_int_ate_ram_lo[128];
+
+ char _pad_011400[0x20000 - 0x011400];
+
+ /* PCI Device Configuration Spaces 0x020000-0x027FFF */
+ union { /* make all access sizes available. */
+ u8 c[0x1000 / 1];
+ u16 s[0x1000 / 2];
+ u32 l[0x1000 / 4];
+ u64 d[0x1000 / 8];
+ union {
+ u8 c[0x100 / 1];
+ u16 s[0x100 / 2];
+ u32 l[0x100 / 4];
+ u64 d[0x100 / 8];
+ } f[8];
+ } b_type0_cfg_dev[8]; /* 0x020000 */
+
+ /* PCI Type 1 Configuration Space 0x028000-0x028FFF */
+ union { /* make all access sizes available. */
+ u8 c[0x1000 / 1];
+ u16 s[0x1000 / 2];
+ u32 l[0x1000 / 4];
+ u64 d[0x1000 / 8];
+ } b_type1_cfg; /* 0x028000-0x029000 */
+
+ char _pad_029000[0x007000]; /* 0x029000-0x030000 */
+
+ /* PCI Interrupt Acknowledge Cycle 0x030000 */
+ union {
+ u8 c[8 / 1];
+ u16 s[8 / 2];
+ u32 l[8 / 4];
+ u64 d[8 / 8];
+ } b_pci_iack; /* 0x030000 */
+
+ u8 _pad_030007[0x04fff8]; /* 0x030008-0x07FFFF */
+
+ /* External Address Translation Entry RAM 0x080000-0x0FFFFF */
+ u64 b_ext_ate_ram[0x10000];
+
+ /* Reserved 0x100000-0x1FFFFF */
+ char _pad_100000[0x200000-0x100000];
+
+ /* PCI/GIO Device Spaces 0x200000-0xBFFFFF */
+ union { /* make all access sizes available. */
+ u8 c[0x100000 / 1];
+ u16 s[0x100000 / 2];
+ u32 l[0x100000 / 4];
+ u64 d[0x100000 / 8];
+ } b_devio_raw[10]; /* 0x200000 */
+
+ /* b_devio macro is a bit strange; it reflects the
+ * fact that the Bridge ASIC provides 2M for the
+ * first two DevIO windows and 1M for the other six.
+ */
+#define b_devio(n) b_devio_raw[((n)<2)?(n*2):(n+2)]
+
+ /* External Flash Proms 1,0 0xC00000-0xFFFFFF */
+ union { /* make all access sizes available. */
+ u8 c[0x400000 / 1]; /* read-only */
+ u16 s[0x400000 / 2]; /* read-write */
+ u32 l[0x400000 / 4]; /* read-only */
+ u64 d[0x400000 / 8]; /* read-only */
+ } b_external_flash; /* 0xC00000 */
+};
+
+/*
+ * Field formats for Error Command Word and Auxiliary Error Command Word
+ * of bridge.
+ */
+struct bridge_err_cmdword {
+ union {
+ u32 cmd_word;
+ struct {
+ u32 didn:4, /* Destination ID */
+ sidn:4, /* Source ID */
+ pactyp:4, /* Packet type */
+ tnum:5, /* Trans Number */
+ coh:1, /* Coh Transaction */
+ ds:2, /* Data size */
+ gbr:1, /* GBR enable */
+ vbpm:1, /* VBPM message */
+ error:1, /* Error occurred */
+ barr:1, /* Barrier op */
+ rsvd:8;
+ } berr_st;
+ } berr_un;
+};
+
+#define berr_field berr_un.berr_st
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * The values of these macros can and should be crosschecked
+ * regularly against the offsets of the like-named fields
+ * within the bridge_regs structure above.
+ */
+
+/* Byte offset macros for Bridge internal registers */
+
+#define BRIDGE_WID_ID WIDGET_ID
+#define BRIDGE_WID_STAT WIDGET_STATUS
+#define BRIDGE_WID_ERR_UPPER WIDGET_ERR_UPPER_ADDR
+#define BRIDGE_WID_ERR_LOWER WIDGET_ERR_LOWER_ADDR
+#define BRIDGE_WID_CONTROL WIDGET_CONTROL
+#define BRIDGE_WID_REQ_TIMEOUT WIDGET_REQ_TIMEOUT
+#define BRIDGE_WID_INT_UPPER WIDGET_INTDEST_UPPER_ADDR
+#define BRIDGE_WID_INT_LOWER WIDGET_INTDEST_LOWER_ADDR
+#define BRIDGE_WID_ERR_CMDWORD WIDGET_ERR_CMD_WORD
+#define BRIDGE_WID_LLP WIDGET_LLP_CFG
+#define BRIDGE_WID_TFLUSH WIDGET_TFLUSH
+
+#define BRIDGE_WID_AUX_ERR 0x00005C /* Aux Error Command Word */
+#define BRIDGE_WID_RESP_UPPER 0x000064 /* Response Buf Upper Addr */
+#define BRIDGE_WID_RESP_LOWER 0x00006C /* Response Buf Lower Addr */
+#define BRIDGE_WID_TST_PIN_CTRL 0x000074 /* Test pin control */
+
+#define BRIDGE_DIR_MAP 0x000084 /* Direct Map reg */
+
+#define BRIDGE_RAM_PERR 0x000094 /* SSRAM Parity Error */
+
+#define BRIDGE_ARB 0x0000A4 /* Arbitration Priority reg */
+
+#define BRIDGE_NIC 0x0000B4 /* Number In A Can */
+
+#define BRIDGE_BUS_TIMEOUT 0x0000C4 /* Bus Timeout Register */
+#define BRIDGE_PCI_BUS_TIMEOUT BRIDGE_BUS_TIMEOUT
+#define BRIDGE_PCI_CFG 0x0000CC /* PCI Type 1 Config reg */
+#define BRIDGE_PCI_ERR_UPPER 0x0000D4 /* PCI error Upper Addr */
+#define BRIDGE_PCI_ERR_LOWER 0x0000DC /* PCI error Lower Addr */
+
+#define BRIDGE_INT_STATUS 0x000104 /* Interrupt Status */
+#define BRIDGE_INT_ENABLE 0x00010C /* Interrupt Enables */
+#define BRIDGE_INT_RST_STAT 0x000114 /* Reset Intr Status */
+#define BRIDGE_INT_MODE 0x00011C /* Interrupt Mode */
+#define BRIDGE_INT_DEVICE 0x000124 /* Interrupt Device */
+#define BRIDGE_INT_HOST_ERR 0x00012C /* Host Error Field */
+
+#define BRIDGE_INT_ADDR0 0x000134 /* Host Address Reg */
+#define BRIDGE_INT_ADDR_OFF 0x000008 /* Host Addr offset (1..7) */
+#define BRIDGE_INT_ADDR(x) (BRIDGE_INT_ADDR0+(x)*BRIDGE_INT_ADDR_OFF)
+
+#define BRIDGE_DEVICE0 0x000204 /* Device 0 */
+#define BRIDGE_DEVICE_OFF 0x000008 /* Device offset (1..7) */
+#define BRIDGE_DEVICE(x) (BRIDGE_DEVICE0+(x)*BRIDGE_DEVICE_OFF)
+
+#define BRIDGE_WR_REQ_BUF0 0x000244 /* Write Request Buffer 0 */
+#define BRIDGE_WR_REQ_BUF_OFF 0x000008 /* Buffer Offset (1..7) */
+#define BRIDGE_WR_REQ_BUF(x) (BRIDGE_WR_REQ_BUF0+(x)*BRIDGE_WR_REQ_BUF_OFF)
+
+#define BRIDGE_EVEN_RESP 0x000284 /* Even Device Response Buf */
+#define BRIDGE_ODD_RESP 0x00028C /* Odd Device Response Buf */
+
+#define BRIDGE_RESP_STATUS 0x000294 /* Read Response Status reg */
+#define BRIDGE_RESP_CLEAR 0x00029C /* Read Response Clear reg */
+
+/* Byte offset macros for Bridge I/O space */
+
+#define BRIDGE_ATE_RAM 0x00010000 /* Internal Addr Xlat Ram */
+
+#define BRIDGE_TYPE0_CFG_DEV0 0x00020000 /* Type 0 Cfg, Device 0 */
+#define BRIDGE_TYPE0_CFG_SLOT_OFF 0x00001000 /* Type 0 Cfg Slot Offset (1..7) */
+#define BRIDGE_TYPE0_CFG_FUNC_OFF 0x00000100 /* Type 0 Cfg Func Offset (1..7) */
+#define BRIDGE_TYPE0_CFG_DEV(s) (BRIDGE_TYPE0_CFG_DEV0+\
+ (s)*BRIDGE_TYPE0_CFG_SLOT_OFF)
+#define BRIDGE_TYPE0_CFG_DEVF(s, f) (BRIDGE_TYPE0_CFG_DEV0+\
+ (s)*BRIDGE_TYPE0_CFG_SLOT_OFF+\
+ (f)*BRIDGE_TYPE0_CFG_FUNC_OFF)
+
+#define BRIDGE_TYPE1_CFG 0x00028000 /* Type 1 Cfg space */
+
+#define BRIDGE_PCI_IACK 0x00030000 /* PCI Interrupt Ack */
+#define BRIDGE_EXT_SSRAM 0x00080000 /* Extern SSRAM (ATE) */
+
+/* Byte offset macros for Bridge device IO spaces */
+
+#define BRIDGE_DEV_CNT 8 /* Up to 8 devices per bridge */
+#define BRIDGE_DEVIO0 0x00200000 /* Device IO 0 Addr */
+#define BRIDGE_DEVIO1 0x00400000 /* Device IO 1 Addr */
+#define BRIDGE_DEVIO2 0x00600000 /* Device IO 2 Addr */
+#define BRIDGE_DEVIO_OFF 0x00100000 /* Device IO Offset (3..7) */
+
+#define BRIDGE_DEVIO_2MB 0x00200000 /* Device IO Offset (0..1) */
+#define BRIDGE_DEVIO_1MB 0x00100000 /* Device IO Offset (2..7) */
+
+#define BRIDGE_DEVIO(x) ((x)<=1 ? BRIDGE_DEVIO0+(x)*BRIDGE_DEVIO_2MB : BRIDGE_DEVIO2+((x)-2)*BRIDGE_DEVIO_1MB)
+
+#define BRIDGE_EXTERNAL_FLASH 0x00C00000 /* External Flash PROMS */
+
+/* ========================================================================
+ * Bridge register bit field definitions
+ */
+
+/* Widget part number of bridge */
+#define BRIDGE_WIDGET_PART_NUM 0xc002
+#define XBRIDGE_WIDGET_PART_NUM 0xd002
+
+/* Manufacturer of bridge */
+#define BRIDGE_WIDGET_MFGR_NUM 0x036
+#define XBRIDGE_WIDGET_MFGR_NUM 0x024
+
+/* Revision numbers for known Bridge revisions */
+#define BRIDGE_REV_A 0x1
+#define BRIDGE_REV_B 0x2
+#define BRIDGE_REV_C 0x3
+#define BRIDGE_REV_D 0x4
+
+/* Bridge widget status register bits definition */
+
+#define BRIDGE_STAT_LLP_REC_CNT (0xFFu << 24)
+#define BRIDGE_STAT_LLP_TX_CNT (0xFF << 16)
+#define BRIDGE_STAT_FLASH_SELECT (0x1 << 6)
+#define BRIDGE_STAT_PCI_GIO_N (0x1 << 5)
+#define BRIDGE_STAT_PENDING (0x1F << 0)
+
+/* Bridge widget control register bits definition */
+#define BRIDGE_CTRL_FLASH_WR_EN (0x1ul << 31)
+#define BRIDGE_CTRL_EN_CLK50 (0x1 << 30)
+#define BRIDGE_CTRL_EN_CLK40 (0x1 << 29)
+#define BRIDGE_CTRL_EN_CLK33 (0x1 << 28)
+#define BRIDGE_CTRL_RST(n) ((n) << 24)
+#define BRIDGE_CTRL_RST_MASK (BRIDGE_CTRL_RST(0xF))
+#define BRIDGE_CTRL_RST_PIN(x) (BRIDGE_CTRL_RST(0x1 << (x)))
+#define BRIDGE_CTRL_IO_SWAP (0x1 << 23)
+#define BRIDGE_CTRL_MEM_SWAP (0x1 << 22)
+#define BRIDGE_CTRL_PAGE_SIZE (0x1 << 21)
+#define BRIDGE_CTRL_SS_PAR_BAD (0x1 << 20)
+#define BRIDGE_CTRL_SS_PAR_EN (0x1 << 19)
+#define BRIDGE_CTRL_SSRAM_SIZE(n) ((n) << 17)
+#define BRIDGE_CTRL_SSRAM_SIZE_MASK (BRIDGE_CTRL_SSRAM_SIZE(0x3))
+#define BRIDGE_CTRL_SSRAM_512K (BRIDGE_CTRL_SSRAM_SIZE(0x3))
+#define BRIDGE_CTRL_SSRAM_128K (BRIDGE_CTRL_SSRAM_SIZE(0x2))
+#define BRIDGE_CTRL_SSRAM_64K (BRIDGE_CTRL_SSRAM_SIZE(0x1))
+#define BRIDGE_CTRL_SSRAM_1K (BRIDGE_CTRL_SSRAM_SIZE(0x0))
+#define BRIDGE_CTRL_F_BAD_PKT (0x1 << 16)
+#define BRIDGE_CTRL_LLP_XBAR_CRD(n) ((n) << 12)
+#define BRIDGE_CTRL_LLP_XBAR_CRD_MASK (BRIDGE_CTRL_LLP_XBAR_CRD(0xf))
+#define BRIDGE_CTRL_CLR_RLLP_CNT (0x1 << 11)
+#define BRIDGE_CTRL_CLR_TLLP_CNT (0x1 << 10)
+#define BRIDGE_CTRL_SYS_END (0x1 << 9)
+#define BRIDGE_CTRL_MAX_TRANS(n) ((n) << 4)
+#define BRIDGE_CTRL_MAX_TRANS_MASK (BRIDGE_CTRL_MAX_TRANS(0x1f))
+#define BRIDGE_CTRL_WIDGET_ID(n) ((n) << 0)
+#define BRIDGE_CTRL_WIDGET_ID_MASK (BRIDGE_CTRL_WIDGET_ID(0xf))
+
+/* Bridge Response buffer Error Upper Register bit fields definition */
+#define BRIDGE_RESP_ERRUPPR_DEVNUM_SHFT (20)
+#define BRIDGE_RESP_ERRUPPR_DEVNUM_MASK (0x7 << BRIDGE_RESP_ERRUPPR_DEVNUM_SHFT)
+#define BRIDGE_RESP_ERRUPPR_BUFNUM_SHFT (16)
+#define BRIDGE_RESP_ERRUPPR_BUFNUM_MASK (0xF << BRIDGE_RESP_ERRUPPR_BUFNUM_SHFT)
+#define BRIDGE_RESP_ERRRUPPR_BUFMASK (0xFFFF)
+
+#define BRIDGE_RESP_ERRUPPR_BUFNUM(x) \
+ (((x) & BRIDGE_RESP_ERRUPPR_BUFNUM_MASK) >> \
+ BRIDGE_RESP_ERRUPPR_BUFNUM_SHFT)
+
+#define BRIDGE_RESP_ERRUPPR_DEVICE(x) \
+ (((x) & BRIDGE_RESP_ERRUPPR_DEVNUM_MASK) >> \
+ BRIDGE_RESP_ERRUPPR_DEVNUM_SHFT)
+
+/* Bridge direct mapping register bits definition */
+#define BRIDGE_DIRMAP_W_ID_SHFT 20
+#define BRIDGE_DIRMAP_W_ID (0xf << BRIDGE_DIRMAP_W_ID_SHFT)
+#define BRIDGE_DIRMAP_RMF_64 (0x1 << 18)
+#define BRIDGE_DIRMAP_ADD512 (0x1 << 17)
+#define BRIDGE_DIRMAP_OFF (0x1ffff << 0)
+#define BRIDGE_DIRMAP_OFF_ADDRSHFT (31) /* lsbit of DIRMAP_OFF is xtalk address bit 31 */
+
+/* Bridge Arbitration register bits definition */
+#define BRIDGE_ARB_REQ_WAIT_TICK(x) ((x) << 16)
+#define BRIDGE_ARB_REQ_WAIT_TICK_MASK BRIDGE_ARB_REQ_WAIT_TICK(0x3)
+#define BRIDGE_ARB_REQ_WAIT_EN(x) ((x) << 8)
+#define BRIDGE_ARB_REQ_WAIT_EN_MASK BRIDGE_ARB_REQ_WAIT_EN(0xff)
+#define BRIDGE_ARB_FREEZE_GNT (1 << 6)
+#define BRIDGE_ARB_HPRI_RING_B2 (1 << 5)
+#define BRIDGE_ARB_HPRI_RING_B1 (1 << 4)
+#define BRIDGE_ARB_HPRI_RING_B0 (1 << 3)
+#define BRIDGE_ARB_LPRI_RING_B2 (1 << 2)
+#define BRIDGE_ARB_LPRI_RING_B1 (1 << 1)
+#define BRIDGE_ARB_LPRI_RING_B0 (1 << 0)
+
+/* Bridge Bus time-out register bits definition */
+#define BRIDGE_BUS_PCI_RETRY_HLD(x) ((x) << 16)
+#define BRIDGE_BUS_PCI_RETRY_HLD_MASK BRIDGE_BUS_PCI_RETRY_HLD(0x1f)
+#define BRIDGE_BUS_GIO_TIMEOUT (1 << 12)
+#define BRIDGE_BUS_PCI_RETRY_CNT(x) ((x) << 0)
+#define BRIDGE_BUS_PCI_RETRY_MASK BRIDGE_BUS_PCI_RETRY_CNT(0x3ff)
+
+/* Bridge interrupt status register bits definition */
+#define BRIDGE_ISR_MULTI_ERR (0x1u << 31)
+#define BRIDGE_ISR_PMU_ESIZE_FAULT (0x1 << 30)
+#define BRIDGE_ISR_UNEXP_RESP (0x1 << 29)
+#define BRIDGE_ISR_BAD_XRESP_PKT (0x1 << 28)
+#define BRIDGE_ISR_BAD_XREQ_PKT (0x1 << 27)
+#define BRIDGE_ISR_RESP_XTLK_ERR (0x1 << 26)
+#define BRIDGE_ISR_REQ_XTLK_ERR (0x1 << 25)
+#define BRIDGE_ISR_INVLD_ADDR (0x1 << 24)
+#define BRIDGE_ISR_UNSUPPORTED_XOP (0x1 << 23)
+#define BRIDGE_ISR_XREQ_FIFO_OFLOW (0x1 << 22)
+#define BRIDGE_ISR_LLP_REC_SNERR (0x1 << 21)
+#define BRIDGE_ISR_LLP_REC_CBERR (0x1 << 20)
+#define BRIDGE_ISR_LLP_RCTY (0x1 << 19)
+#define BRIDGE_ISR_LLP_TX_RETRY (0x1 << 18)
+#define BRIDGE_ISR_LLP_TCTY (0x1 << 17)
+#define BRIDGE_ISR_SSRAM_PERR (0x1 << 16)
+#define BRIDGE_ISR_PCI_ABORT (0x1 << 15)
+#define BRIDGE_ISR_PCI_PARITY (0x1 << 14)
+#define BRIDGE_ISR_PCI_SERR (0x1 << 13)
+#define BRIDGE_ISR_PCI_PERR (0x1 << 12)
+#define BRIDGE_ISR_PCI_MST_TIMEOUT (0x1 << 11)
+#define BRIDGE_ISR_GIO_MST_TIMEOUT BRIDGE_ISR_PCI_MST_TIMEOUT
+#define BRIDGE_ISR_PCI_RETRY_CNT (0x1 << 10)
+#define BRIDGE_ISR_XREAD_REQ_TIMEOUT (0x1 << 9)
+#define BRIDGE_ISR_GIO_B_ENBL_ERR (0x1 << 8)
+#define BRIDGE_ISR_INT_MSK (0xff << 0)
+#define BRIDGE_ISR_INT(x) (0x1 << (x))
+
+#define BRIDGE_ISR_LINK_ERROR \
+ (BRIDGE_ISR_LLP_REC_SNERR|BRIDGE_ISR_LLP_REC_CBERR| \
+ BRIDGE_ISR_LLP_RCTY|BRIDGE_ISR_LLP_TX_RETRY| \
+ BRIDGE_ISR_LLP_TCTY)
+
+#define BRIDGE_ISR_PCIBUS_PIOERR \
+ (BRIDGE_ISR_PCI_MST_TIMEOUT|BRIDGE_ISR_PCI_ABORT)
+
+#define BRIDGE_ISR_PCIBUS_ERROR \
+ (BRIDGE_ISR_PCIBUS_PIOERR|BRIDGE_ISR_PCI_PERR| \
+ BRIDGE_ISR_PCI_SERR|BRIDGE_ISR_PCI_RETRY_CNT| \
+ BRIDGE_ISR_PCI_PARITY)
+
+#define BRIDGE_ISR_XTALK_ERROR \
+ (BRIDGE_ISR_XREAD_REQ_TIMEOUT|BRIDGE_ISR_XREQ_FIFO_OFLOW|\
+ BRIDGE_ISR_UNSUPPORTED_XOP|BRIDGE_ISR_INVLD_ADDR| \
+ BRIDGE_ISR_REQ_XTLK_ERR|BRIDGE_ISR_RESP_XTLK_ERR| \
+ BRIDGE_ISR_BAD_XREQ_PKT|BRIDGE_ISR_BAD_XRESP_PKT| \
+ BRIDGE_ISR_UNEXP_RESP)
+
+#define BRIDGE_ISR_ERRORS \
+ (BRIDGE_ISR_LINK_ERROR|BRIDGE_ISR_PCIBUS_ERROR| \
+ BRIDGE_ISR_XTALK_ERROR|BRIDGE_ISR_SSRAM_PERR| \
+ BRIDGE_ISR_PMU_ESIZE_FAULT)
+
+/*
+ * List of Errors which are fatal and kill the system
+ */
+#define BRIDGE_ISR_ERROR_FATAL \
+ ((BRIDGE_ISR_XTALK_ERROR & ~BRIDGE_ISR_XREAD_REQ_TIMEOUT)|\
+ BRIDGE_ISR_PCI_SERR|BRIDGE_ISR_PCI_PARITY )
+
+#define BRIDGE_ISR_ERROR_DUMP \
+ (BRIDGE_ISR_PCIBUS_ERROR|BRIDGE_ISR_PMU_ESIZE_FAULT| \
+ BRIDGE_ISR_XTALK_ERROR|BRIDGE_ISR_SSRAM_PERR)
+
+/* Bridge interrupt enable register bits definition */
+#define BRIDGE_IMR_UNEXP_RESP BRIDGE_ISR_UNEXP_RESP
+#define BRIDGE_IMR_PMU_ESIZE_FAULT BRIDGE_ISR_PMU_ESIZE_FAULT
+#define BRIDGE_IMR_BAD_XRESP_PKT BRIDGE_ISR_BAD_XRESP_PKT
+#define BRIDGE_IMR_BAD_XREQ_PKT BRIDGE_ISR_BAD_XREQ_PKT
+#define BRIDGE_IMR_RESP_XTLK_ERR BRIDGE_ISR_RESP_XTLK_ERR
+#define BRIDGE_IMR_REQ_XTLK_ERR BRIDGE_ISR_REQ_XTLK_ERR
+#define BRIDGE_IMR_INVLD_ADDR BRIDGE_ISR_INVLD_ADDR
+#define BRIDGE_IMR_UNSUPPORTED_XOP BRIDGE_ISR_UNSUPPORTED_XOP
+#define BRIDGE_IMR_XREQ_FIFO_OFLOW BRIDGE_ISR_XREQ_FIFO_OFLOW
+#define BRIDGE_IMR_LLP_REC_SNERR BRIDGE_ISR_LLP_REC_SNERR
+#define BRIDGE_IMR_LLP_REC_CBERR BRIDGE_ISR_LLP_REC_CBERR
+#define BRIDGE_IMR_LLP_RCTY BRIDGE_ISR_LLP_RCTY
+#define BRIDGE_IMR_LLP_TX_RETRY BRIDGE_ISR_LLP_TX_RETRY
+#define BRIDGE_IMR_LLP_TCTY BRIDGE_ISR_LLP_TCTY
+#define BRIDGE_IMR_SSRAM_PERR BRIDGE_ISR_SSRAM_PERR
+#define BRIDGE_IMR_PCI_ABORT BRIDGE_ISR_PCI_ABORT
+#define BRIDGE_IMR_PCI_PARITY BRIDGE_ISR_PCI_PARITY
+#define BRIDGE_IMR_PCI_SERR BRIDGE_ISR_PCI_SERR
+#define BRIDGE_IMR_PCI_PERR BRIDGE_ISR_PCI_PERR
+#define BRIDGE_IMR_PCI_MST_TIMEOUT BRIDGE_ISR_PCI_MST_TIMEOUT
+#define BRIDGE_IMR_GIO_MST_TIMEOUT BRIDGE_ISR_GIO_MST_TIMEOUT
+#define BRIDGE_IMR_PCI_RETRY_CNT BRIDGE_ISR_PCI_RETRY_CNT
+#define BRIDGE_IMR_XREAD_REQ_TIMEOUT BRIDGE_ISR_XREAD_REQ_TIMEOUT
+#define BRIDGE_IMR_GIO_B_ENBL_ERR BRIDGE_ISR_GIO_B_ENBL_ERR
+#define BRIDGE_IMR_INT_MSK BRIDGE_ISR_INT_MSK
+#define BRIDGE_IMR_INT(x) BRIDGE_ISR_INT(x)
+
+/* Bridge interrupt reset register bits definition */
+#define BRIDGE_IRR_MULTI_CLR (0x1 << 6)
+#define BRIDGE_IRR_CRP_GRP_CLR (0x1 << 5)
+#define BRIDGE_IRR_RESP_BUF_GRP_CLR (0x1 << 4)
+#define BRIDGE_IRR_REQ_DSP_GRP_CLR (0x1 << 3)
+#define BRIDGE_IRR_LLP_GRP_CLR (0x1 << 2)
+#define BRIDGE_IRR_SSRAM_GRP_CLR (0x1 << 1)
+#define BRIDGE_IRR_PCI_GRP_CLR (0x1 << 0)
+#define BRIDGE_IRR_GIO_GRP_CLR (0x1 << 0)
+#define BRIDGE_IRR_ALL_CLR 0x7f
+
+#define BRIDGE_IRR_CRP_GRP (BRIDGE_ISR_UNEXP_RESP | \
+ BRIDGE_ISR_XREQ_FIFO_OFLOW)
+#define BRIDGE_IRR_RESP_BUF_GRP (BRIDGE_ISR_BAD_XRESP_PKT | \
+ BRIDGE_ISR_RESP_XTLK_ERR | \
+ BRIDGE_ISR_XREAD_REQ_TIMEOUT)
+#define BRIDGE_IRR_REQ_DSP_GRP (BRIDGE_ISR_UNSUPPORTED_XOP | \
+ BRIDGE_ISR_BAD_XREQ_PKT | \
+ BRIDGE_ISR_REQ_XTLK_ERR | \
+ BRIDGE_ISR_INVLD_ADDR)
+#define BRIDGE_IRR_LLP_GRP (BRIDGE_ISR_LLP_REC_SNERR | \
+ BRIDGE_ISR_LLP_REC_CBERR | \
+ BRIDGE_ISR_LLP_RCTY | \
+ BRIDGE_ISR_LLP_TX_RETRY | \
+ BRIDGE_ISR_LLP_TCTY)
+#define BRIDGE_IRR_SSRAM_GRP (BRIDGE_ISR_SSRAM_PERR | \
+ BRIDGE_ISR_PMU_ESIZE_FAULT)
+#define BRIDGE_IRR_PCI_GRP (BRIDGE_ISR_PCI_ABORT | \
+ BRIDGE_ISR_PCI_PARITY | \
+ BRIDGE_ISR_PCI_SERR | \
+ BRIDGE_ISR_PCI_PERR | \
+ BRIDGE_ISR_PCI_MST_TIMEOUT | \
+ BRIDGE_ISR_PCI_RETRY_CNT)
+
+#define BRIDGE_IRR_GIO_GRP (BRIDGE_ISR_GIO_B_ENBL_ERR | \
+ BRIDGE_ISR_GIO_MST_TIMEOUT)
+
+/* Bridge INT_DEV register bits definition */
+#define BRIDGE_INT_DEV_SHFT(n) ((n)*3)
+#define BRIDGE_INT_DEV_MASK(n) (0x7 << BRIDGE_INT_DEV_SHFT(n))
+#define BRIDGE_INT_DEV_SET(_dev, _line) (_dev << BRIDGE_INT_DEV_SHFT(_line))
+
+/* Bridge interrupt(x) register bits definition */
+#define BRIDGE_INT_ADDR_HOST 0x0003FF00
+#define BRIDGE_INT_ADDR_FLD 0x000000FF
+
+#define BRIDGE_TMO_PCI_RETRY_HLD_MASK 0x1f0000
+#define BRIDGE_TMO_GIO_TIMEOUT_MASK 0x001000
+#define BRIDGE_TMO_PCI_RETRY_CNT_MASK 0x0003ff
+
+#define BRIDGE_TMO_PCI_RETRY_CNT_MAX 0x3ff
+
+/*
+ * The NASID should be shifted by this amount and stored into the
+ * interrupt(x) register.
+ */
+#define BRIDGE_INT_ADDR_NASID_SHFT 8
+
+/*
+ * The BRIDGE_INT_ADDR_DEST_IO bit should be set to send an interrupt to
+ * memory.
+ */
+#define BRIDGE_INT_ADDR_DEST_IO (1 << 17)
+#define BRIDGE_INT_ADDR_DEST_MEM 0
+#define BRIDGE_INT_ADDR_MASK (1 << 17)
+
+/* Bridge device(x) register bits definition */
+#define BRIDGE_DEV_ERR_LOCK_EN 0x10000000
+#define BRIDGE_DEV_PAGE_CHK_DIS 0x08000000
+#define BRIDGE_DEV_FORCE_PCI_PAR 0x04000000
+#define BRIDGE_DEV_VIRTUAL_EN 0x02000000
+#define BRIDGE_DEV_PMU_WRGA_EN 0x01000000
+#define BRIDGE_DEV_DIR_WRGA_EN 0x00800000
+#define BRIDGE_DEV_DEV_SIZE 0x00400000
+#define BRIDGE_DEV_RT 0x00200000
+#define BRIDGE_DEV_SWAP_PMU 0x00100000
+#define BRIDGE_DEV_SWAP_DIR 0x00080000
+#define BRIDGE_DEV_PREF 0x00040000
+#define BRIDGE_DEV_PRECISE 0x00020000
+#define BRIDGE_DEV_COH 0x00010000
+#define BRIDGE_DEV_BARRIER 0x00008000
+#define BRIDGE_DEV_GBR 0x00004000
+#define BRIDGE_DEV_DEV_SWAP 0x00002000
+#define BRIDGE_DEV_DEV_IO_MEM 0x00001000
+#define BRIDGE_DEV_OFF_MASK 0x00000fff
+#define BRIDGE_DEV_OFF_ADDR_SHFT 20
+
+#define BRIDGE_DEV_PMU_BITS (BRIDGE_DEV_PMU_WRGA_EN | \
+ BRIDGE_DEV_SWAP_PMU)
+#define BRIDGE_DEV_D32_BITS (BRIDGE_DEV_DIR_WRGA_EN | \
+ BRIDGE_DEV_SWAP_DIR | \
+ BRIDGE_DEV_PREF | \
+ BRIDGE_DEV_PRECISE | \
+ BRIDGE_DEV_COH | \
+ BRIDGE_DEV_BARRIER)
+#define BRIDGE_DEV_D64_BITS (BRIDGE_DEV_DIR_WRGA_EN | \
+ BRIDGE_DEV_SWAP_DIR | \
+ BRIDGE_DEV_COH | \
+ BRIDGE_DEV_BARRIER)
+
+/* Bridge Error Upper register bit field definition */
+#define BRIDGE_ERRUPPR_DEVMASTER (0x1 << 20) /* Device was master */
+#define BRIDGE_ERRUPPR_PCIVDEV (0x1 << 19) /* Virtual Req value */
+#define BRIDGE_ERRUPPR_DEVNUM_SHFT (16)
+#define BRIDGE_ERRUPPR_DEVNUM_MASK (0x7 << BRIDGE_ERRUPPR_DEVNUM_SHFT)
+#define BRIDGE_ERRUPPR_DEVICE(err) (((err) >> BRIDGE_ERRUPPR_DEVNUM_SHFT) & 0x7)
+#define BRIDGE_ERRUPPR_ADDRMASK (0xFFFF)
+
+/* Bridge interrupt mode register bits definition */
+#define BRIDGE_INTMODE_CLR_PKT_EN(x) (0x1 << (x))
+
+/* this should be written to the xbow's link_control(x) register */
+#define BRIDGE_CREDIT 3
+
+/* RRB assignment register */
+#define BRIDGE_RRB_EN 0x8 /* after shifting down */
+#define BRIDGE_RRB_DEV 0x7 /* after shifting down */
+#define BRIDGE_RRB_VDEV 0x4 /* after shifting down */
+#define BRIDGE_RRB_PDEV 0x3 /* after shifting down */
+
+/* RRB status register */
+#define BRIDGE_RRB_VALID(r) (0x00010000<<(r))
+#define BRIDGE_RRB_INUSE(r) (0x00000001<<(r))
+
+/* RRB clear register */
+#define BRIDGE_RRB_CLEAR(r) (0x00000001<<(r))
+
+/* xbox system controller declarations */
+#define XBOX_BRIDGE_WID 8
+#define FLASH_PROM1_BASE 0xE00000 /* To read the xbox sysctlr status */
+#define XBOX_RPS_EXISTS 1 << 6 /* RPS bit in status register */
+#define XBOX_RPS_FAIL 1 << 4 /* RPS status bit in register */
+
+/* ========================================================================
+ */
+/*
+ * Macros for Xtalk to Bridge bus (PCI/GIO) PIO
+ * refer to section 4.2.1 of Bridge Spec for xtalk to PCI/GIO PIO mappings
+ */
+/* XTALK addresses that map into Bridge Bus addr space */
+#define BRIDGE_PIO32_XTALK_ALIAS_BASE 0x000040000000L
+#define BRIDGE_PIO32_XTALK_ALIAS_LIMIT 0x00007FFFFFFFL
+#define BRIDGE_PIO64_XTALK_ALIAS_BASE 0x000080000000L
+#define BRIDGE_PIO64_XTALK_ALIAS_LIMIT 0x0000BFFFFFFFL
+#define BRIDGE_PCIIO_XTALK_ALIAS_BASE 0x000100000000L
+#define BRIDGE_PCIIO_XTALK_ALIAS_LIMIT 0x0001FFFFFFFFL
+
+/* Ranges of PCI bus space that can be accessed via PIO from xtalk */
+#define BRIDGE_MIN_PIO_ADDR_MEM 0x00000000 /* 1G PCI memory space */
+#define BRIDGE_MAX_PIO_ADDR_MEM 0x3fffffff
+#define BRIDGE_MIN_PIO_ADDR_IO 0x00000000 /* 4G PCI IO space */
+#define BRIDGE_MAX_PIO_ADDR_IO 0xffffffff
+
+/* XTALK addresses that map into PCI addresses */
+#define BRIDGE_PCI_MEM32_BASE BRIDGE_PIO32_XTALK_ALIAS_BASE
+#define BRIDGE_PCI_MEM32_LIMIT BRIDGE_PIO32_XTALK_ALIAS_LIMIT
+#define BRIDGE_PCI_MEM64_BASE BRIDGE_PIO64_XTALK_ALIAS_BASE
+#define BRIDGE_PCI_MEM64_LIMIT BRIDGE_PIO64_XTALK_ALIAS_LIMIT
+#define BRIDGE_PCI_IO_BASE BRIDGE_PCIIO_XTALK_ALIAS_BASE
+#define BRIDGE_PCI_IO_LIMIT BRIDGE_PCIIO_XTALK_ALIAS_LIMIT
+
+/*
+ * Macros for Bridge bus (PCI/GIO) to Xtalk DMA
+ */
+/* Bridge Bus DMA addresses */
+#define BRIDGE_LOCAL_BASE 0
+#define BRIDGE_DMA_MAPPED_BASE 0x40000000
+#define BRIDGE_DMA_MAPPED_SIZE 0x40000000 /* 1G Bytes */
+#define BRIDGE_DMA_DIRECT_BASE 0x80000000
+#define BRIDGE_DMA_DIRECT_SIZE 0x80000000 /* 2G Bytes */
+
+#define PCI32_LOCAL_BASE BRIDGE_LOCAL_BASE
+
+/* PCI addresses of regions decoded by Bridge for DMA */
+#define PCI32_MAPPED_BASE BRIDGE_DMA_MAPPED_BASE
+#define PCI32_DIRECT_BASE BRIDGE_DMA_DIRECT_BASE
+
+#define IS_PCI32_LOCAL(x) ((ulong_t)(x) < PCI32_MAPPED_BASE)
+#define IS_PCI32_MAPPED(x) ((ulong_t)(x) < PCI32_DIRECT_BASE && \
+ (ulong_t)(x) >= PCI32_MAPPED_BASE)
+#define IS_PCI32_DIRECT(x) ((ulong_t)(x) >= PCI32_MAPPED_BASE)
+#define IS_PCI64(x) ((ulong_t)(x) >= PCI64_BASE)
+
+/*
+ * The GIO address space.
+ */
+/* Xtalk to GIO PIO */
+#define BRIDGE_GIO_MEM32_BASE BRIDGE_PIO32_XTALK_ALIAS_BASE
+#define BRIDGE_GIO_MEM32_LIMIT BRIDGE_PIO32_XTALK_ALIAS_LIMIT
+
+#define GIO_LOCAL_BASE BRIDGE_LOCAL_BASE
+
+/* GIO addresses of regions decoded by Bridge for DMA */
+#define GIO_MAPPED_BASE BRIDGE_DMA_MAPPED_BASE
+#define GIO_DIRECT_BASE BRIDGE_DMA_DIRECT_BASE
+
+#define IS_GIO_LOCAL(x) ((ulong_t)(x) < GIO_MAPPED_BASE)
+#define IS_GIO_MAPPED(x) ((ulong_t)(x) < GIO_DIRECT_BASE && \
+ (ulong_t)(x) >= GIO_MAPPED_BASE)
+#define IS_GIO_DIRECT(x) ((ulong_t)(x) >= GIO_MAPPED_BASE)
+
+/* PCI to xtalk mapping */
+
+/* given a DIR_OFF value and a pci/gio 32 bits direct address, determine
+ * which xtalk address is accessed
+ */
+#define BRIDGE_DIRECT_32_SEG_SIZE BRIDGE_DMA_DIRECT_SIZE
+#define BRIDGE_DIRECT_32_TO_XTALK(dir_off,adr) \
+ ((dir_off) * BRIDGE_DIRECT_32_SEG_SIZE + \
+ ((adr) & (BRIDGE_DIRECT_32_SEG_SIZE - 1)) + PHYS_RAMBASE)
+
+/* 64-bit address attribute masks */
+#define PCI64_ATTR_TARG_MASK 0xf000000000000000
+#define PCI64_ATTR_TARG_SHFT 60
+#define PCI64_ATTR_PREF 0x0800000000000000
+#define PCI64_ATTR_PREC 0x0400000000000000
+#define PCI64_ATTR_VIRTUAL 0x0200000000000000
+#define PCI64_ATTR_BAR 0x0100000000000000
+#define PCI64_ATTR_RMF_MASK 0x00ff000000000000
+#define PCI64_ATTR_RMF_SHFT 48
+
+struct bridge_controller {
+ struct resource busn;
+ struct bridge_regs *base;
+ unsigned long baddr;
+ unsigned long intr_addr;
+ struct irq_domain *domain;
+ unsigned int pci_int[8][2];
+ unsigned int int_mapping[8][2];
+ u32 ioc3_sid[8];
+ nasid_t nasid;
+};
+
+#define BRIDGE_CONTROLLER(bus) \
+ ((struct bridge_controller *)((bus)->sysdata))
+
+#define bridge_read(bc, reg) __raw_readl(&bc->base->reg)
+#define bridge_write(bc, reg, val) __raw_writel(val, &bc->base->reg)
+#define bridge_set(bc, reg, val) \
+ __raw_writel(__raw_readl(&bc->base->reg) | (val), &bc->base->reg)
+#define bridge_clr(bc, reg, val) \
+ __raw_writel(__raw_readl(&bc->base->reg) & ~(val), &bc->base->reg)
+
+#endif /* _ASM_PCI_BRIDGE_H */
diff --git a/arch/mips/include/asm/perf_event.h b/arch/mips/include/asm/perf_event.h
new file mode 100644
index 000000000..0babf6bbb
--- /dev/null
+++ b/arch/mips/include/asm/perf_event.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/arch/mips/include/asm/perf_event.h
+ *
+ * Copyright (C) 2010 MIPS Technologies, Inc.
+ * Author: Deng-Cheng Zhu
+ */
+
+#ifndef __MIPS_PERF_EVENT_H__
+#define __MIPS_PERF_EVENT_H__
+/* Leave it empty here. The file is required by linux/perf_event.h */
+#endif /* __MIPS_PERF_EVENT_H__ */
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
new file mode 100644
index 000000000..71153c369
--- /dev/null
+++ b/arch/mips/include/asm/pgalloc.h
@@ -0,0 +1,112 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 2001, 2003 by Ralf Baechle
+ * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_PGALLOC_H
+#define _ASM_PGALLOC_H
+
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#define __HAVE_ARCH_PMD_ALLOC_ONE
+#define __HAVE_ARCH_PUD_ALLOC_ONE
+#define __HAVE_ARCH_PGD_FREE
+#include <asm-generic/pgalloc.h>
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+{
+ set_pmd(pmd, __pmd((unsigned long)pte));
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ pgtable_t pte)
+{
+ set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
+}
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+/*
+ * Initialize a new pmd table with invalid pointers.
+ */
+extern void pmd_init(unsigned long page, unsigned long pagetable);
+
+#ifndef __PAGETABLE_PMD_FOLDED
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ set_pud(pud, __pud((unsigned long)pmd));
+}
+#endif
+
+/*
+ * Initialize a new pgd / pmd table with invalid pointers.
+ */
+extern void pgd_init(unsigned long page);
+extern pgd_t *pgd_alloc(struct mm_struct *mm);
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_pages((unsigned long)pgd, PGD_ORDER);
+}
+
+#define __pte_free_tlb(tlb,pte,address) \
+do { \
+ pgtable_pte_page_dtor(pte); \
+ tlb_remove_page((tlb), pte); \
+} while (0)
+
+#ifndef __PAGETABLE_PMD_FOLDED
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pmd_t *pmd;
+ struct page *pg;
+
+ pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER);
+ if (!pg)
+ return NULL;
+
+ if (!pgtable_pmd_page_ctor(pg)) {
+ __free_pages(pg, PMD_ORDER);
+ return NULL;
+ }
+
+ pmd = (pmd_t *)page_address(pg);
+ pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
+ return pmd;
+}
+
+#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x)
+
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pud_t *pud;
+
+ pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER);
+ if (pud)
+ pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
+ return pud;
+}
+
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
+{
+ set_p4d(p4d, __p4d((unsigned long)pud));
+}
+
+#define __pud_free_tlb(tlb, x, addr) pud_free((tlb)->mm, x)
+
+#endif /* __PAGETABLE_PUD_FOLDED */
+
+extern void pagetable_init(void);
+
+#endif /* _ASM_PGALLOC_H */
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
new file mode 100644
index 000000000..6c0532d7b
--- /dev/null
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -0,0 +1,248 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_PGTABLE_32_H
+#define _ASM_PGTABLE_32_H
+
+#include <asm/addrspace.h>
+#include <asm/page.h>
+
+#include <linux/linkage.h>
+#include <asm/cachectl.h>
+#include <asm/fixmap.h>
+
+#include <asm-generic/pgtable-nopmd.h>
+
+#ifdef CONFIG_HIGHMEM
+#include <asm/highmem.h>
+#endif
+
+/*
+ * Regarding 32-bit MIPS huge page support (and the tradeoff it entails):
+ *
+ * We use the same huge page sizes as 64-bit MIPS. Assuming a 4KB page size,
+ * our 2-level table layout would normally have a PGD entry cover a contiguous
+ * 4MB virtual address region (pointing to a 4KB PTE page of 1,024 32-bit pte_t
+ * pointers, each pointing to a 4KB physical page). The problem is that 4MB,
+ * spanning both halves of a TLB EntryLo0,1 pair, requires 2MB hardware page
+ * support, not one of the standard supported sizes (1MB,4MB,16MB,...).
+ * To correct for this, when huge pages are enabled, we halve the number of
+ * pointers a PTE page holds, making its last half go to waste. Correspondingly,
+ * we double the number of PGD pages. Overall, page table memory overhead
+ * increases to match 64-bit MIPS, but PTE lookups remain CPU cache-friendly.
+ *
+ * NOTE: We don't yet support huge pages if extended-addressing is enabled
+ * (i.e. EVA, XPA, 36-bit Alchemy/Netlogic).
+ */
+
+extern int temp_tlb_entry;
+
+/*
+ * - add_temporary_entry() add a temporary TLB entry. We use TLB entries
+ * starting at the top and working down. This is for populating the
+ * TLB before trap_init() puts the TLB miss handler in place. It
+ * should be used only for entries matching the actual page tables,
+ * to prevent inconsistencies.
+ */
+extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
+ unsigned long entryhi, unsigned long pagemask);
+
+/*
+ * Basically we have the same two-level (which is the logical three level
+ * Linux page table layout folded) page tables as the i386. Some day
+ * when we have proper page coloring support we can have a 1% quicker
+ * tlb refill handling mechanism, but for now it is a bit slower but
+ * works even with the cache aliasing problem the R4k and above have.
+ */
+
+/* PGDIR_SHIFT determines what a third-level page table entry can map */
+#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
+# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2 - 1)
+#else
+# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
+#endif
+
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * Entries per page directory level: we use two-level, so
+ * we don't really have any PUD/PMD directory physically.
+ */
+#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
+# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1)
+#else
+# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
+#endif
+
+#define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0)
+#define PUD_ORDER aieeee_attempt_to_allocate_pud
+#define PMD_ORDER aieeee_attempt_to_allocate_pmd
+#define PTE_ORDER 0
+
+#define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2)
+#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
+# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t) / 2)
+#else
+# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
+#endif
+
+#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
+#define FIRST_USER_ADDRESS 0UL
+
+#define VMALLOC_START MAP_BASE
+
+#define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
+#define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
+
+#ifdef CONFIG_HIGHMEM
+# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
+#else
+# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
+#endif
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
+#else
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#endif
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+extern void load_pgd(unsigned long pg_dir);
+
+extern pte_t invalid_pte_table[PTRS_PER_PTE];
+
+/*
+ * Empty pgd/pmd entries point to the invalid_pte_table.
+ */
+static inline int pmd_none(pmd_t pmd)
+{
+ return pmd_val(pmd) == (unsigned long) invalid_pte_table;
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ /* pmd_huge(pmd) but inline */
+ if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
+ return 0;
+#endif
+
+ if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
+ return 1;
+
+ return 0;
+}
+
+static inline int pmd_present(pmd_t pmd)
+{
+ return pmd_val(pmd) != (unsigned long) invalid_pte_table;
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
+}
+
+#if defined(CONFIG_XPA)
+
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
+#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
+static inline pte_t
+pfn_pte(unsigned long pfn, pgprot_t prot)
+{
+ pte_t pte;
+
+ pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) |
+ (pgprot_val(prot) & ~_PFNX_MASK);
+ pte.pte_high = (pfn << _PFN_SHIFT) |
+ (pgprot_val(prot) & ~_PFN_MASK);
+ return pte;
+}
+
+#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+
+#define MAX_POSSIBLE_PHYSMEM_BITS 36
+#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
+
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+{
+ pte_t pte;
+
+ pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
+ pte.pte_low = pgprot_val(prot);
+
+ return pte;
+}
+
+#else
+
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+#ifdef CONFIG_CPU_VR41XX
+#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
+#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
+#else
+#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
+#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot) __pmd(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
+#endif
+#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
+
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
+#if defined(CONFIG_CPU_R3K_TLB)
+
+/* Swap entries must have VALID bit cleared. */
+#define __swp_type(x) (((x).val >> 10) & 0x1f)
+#define __swp_offset(x) ((x).val >> 15)
+#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#else
+
+#if defined(CONFIG_XPA)
+
+/* Swap entries must have VALID and GLOBAL bits cleared. */
+#define __swp_type(x) (((x).val >> 4) & 0x1f)
+#define __swp_offset(x) ((x).val >> 9)
+#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 9) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
+#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
+
+#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+
+/* Swap entries must have VALID and GLOBAL bits cleared. */
+#define __swp_type(x) (((x).val >> 2) & 0x1f)
+#define __swp_offset(x) ((x).val >> 7)
+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
+#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
+
+#else
+/*
+ * Constraints:
+ * _PAGE_PRESENT at bit 0
+ * _PAGE_MODIFIED at bit 4
+ * _PAGE_GLOBAL at bit 6
+ * _PAGE_VALID at bit 7
+ */
+#define __swp_type(x) (((x).val >> 8) & 0x1f)
+#define __swp_offset(x) ((x).val >> 13)
+#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
+
+#endif /* defined(CONFIG_CPU_R3K_TLB) */
+
+#endif /* _ASM_PGTABLE_32_H */
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
new file mode 100644
index 000000000..b865edff2
--- /dev/null
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -0,0 +1,346 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_PGTABLE_64_H
+#define _ASM_PGTABLE_64_H
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+
+#include <asm/addrspace.h>
+#include <asm/page.h>
+#include <asm/cachectl.h>
+#include <asm/fixmap.h>
+
+#if CONFIG_PGTABLE_LEVELS == 2
+#include <asm-generic/pgtable-nopmd.h>
+#elif CONFIG_PGTABLE_LEVELS == 3
+#include <asm-generic/pgtable-nopud.h>
+#else
+#include <asm-generic/pgtable-nop4d.h>
+#endif
+
+/*
+ * Each address space has 2 4K pages as its page directory, giving 1024
+ * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
+ * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
+ * tables. Each page table is also a single 4K page, giving 512 (==
+ * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
+ * invalid_pmd_table, each pmd entry is initialized to point to
+ * invalid_pte_table, each pte is initialized to 0.
+ *
+ * Kernel mappings: kernel mappings are held in the swapper_pg_table.
+ * The layout is identical to userspace except it's indexed with the
+ * fault address - VMALLOC_START.
+ */
+
+
+/* PGDIR_SHIFT determines what a third-level page table entry can map */
+#ifdef __PAGETABLE_PMD_FOLDED
+#define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
+#else
+
+/* PMD_SHIFT determines the size of the area a second-level page table can map */
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+
+# ifdef __PAGETABLE_PUD_FOLDED
+# define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+# endif
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
+#endif
+
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * For 4kB page size we use a 3 level page tree and an 8kB pud, which
+ * permits us mapping 40 bits of virtual address space.
+ *
+ * We used to implement 41 bits by having an order 1 pmd level but that seemed
+ * rather pointless.
+ *
+ * For 8kB page size we use a 3 level page tree which permits a total of
+ * 8TB of address space. Alternatively a 33-bit / 8GB organization using
+ * two levels would be easy to implement.
+ *
+ * For 16kB page size we use a 2 level page tree which permits a total of
+ * 36 bits of virtual address space. We could add a third level but it seems
+ * like at the moment there's no need for this.
+ *
+ * For 64kB page size we use a 2 level page table tree for a total of 42 bits
+ * of virtual address space.
+ */
+#ifdef CONFIG_PAGE_SIZE_4KB
+# ifdef CONFIG_MIPS_VA_BITS_48
+# define PGD_ORDER 0
+# define PUD_ORDER 0
+# else
+# define PGD_ORDER 1
+# define PUD_ORDER aieeee_attempt_to_allocate_pud
+# endif
+#define PMD_ORDER 0
+#define PTE_ORDER 0
+#endif
+#ifdef CONFIG_PAGE_SIZE_8KB
+#define PGD_ORDER 0
+#define PUD_ORDER aieeee_attempt_to_allocate_pud
+#define PMD_ORDER 0
+#define PTE_ORDER 0
+#endif
+#ifdef CONFIG_PAGE_SIZE_16KB
+#ifdef CONFIG_MIPS_VA_BITS_48
+#define PGD_ORDER 1
+#else
+#define PGD_ORDER 0
+#endif
+#define PUD_ORDER aieeee_attempt_to_allocate_pud
+#define PMD_ORDER 0
+#define PTE_ORDER 0
+#endif
+#ifdef CONFIG_PAGE_SIZE_32KB
+#define PGD_ORDER 0
+#define PUD_ORDER aieeee_attempt_to_allocate_pud
+#define PMD_ORDER 0
+#define PTE_ORDER 0
+#endif
+#ifdef CONFIG_PAGE_SIZE_64KB
+#define PGD_ORDER 0
+#define PUD_ORDER aieeee_attempt_to_allocate_pud
+#ifdef CONFIG_MIPS_VA_BITS_48
+#define PMD_ORDER 0
+#else
+#define PMD_ORDER aieeee_attempt_to_allocate_pmd
+#endif
+#define PTE_ORDER 0
+#endif
+
+#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
+#ifndef __PAGETABLE_PUD_FOLDED
+#define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) / sizeof(pud_t))
+#endif
+#ifndef __PAGETABLE_PMD_FOLDED
+#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
+#endif
+#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
+
+#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
+#define FIRST_USER_ADDRESS 0UL
+
+/*
+ * TLB refill handlers also map the vmalloc area into xuseg. Avoid
+ * the first couple of pages so NULL pointer dereferences will still
+ * reliably trap.
+ */
+#define VMALLOC_START (MAP_BASE + (2 * PAGE_SIZE))
+#define VMALLOC_END \
+ (MAP_BASE + \
+ min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
+ (1UL << cpu_vmbits)) - (1UL << 32))
+
+#if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
+ VMALLOC_START != CKSSEG
+/* Load modules into 32bit-compatible segment. */
+#define MODULE_START CKSSEG
+#define MODULE_END (FIXADDR_START-2*PAGE_SIZE)
+#endif
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
+#ifndef __PAGETABLE_PMD_FOLDED
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
+#endif
+#ifndef __PAGETABLE_PUD_FOLDED
+#define pud_ERROR(e) \
+ printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
+#endif
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+extern pte_t invalid_pte_table[PTRS_PER_PTE];
+
+#ifndef __PAGETABLE_PUD_FOLDED
+/*
+ * For 4-level pagetables we defines these ourselves, for 3-level the
+ * definitions are below, for 2-level the
+ * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
+ */
+typedef struct { unsigned long pud; } pud_t;
+#define pud_val(x) ((x).pud)
+#define __pud(x) ((pud_t) { (x) })
+
+extern pud_t invalid_pud_table[PTRS_PER_PUD];
+
+/*
+ * Empty pgd entries point to the invalid_pud_table.
+ */
+static inline int p4d_none(p4d_t p4d)
+{
+ return p4d_val(p4d) == (unsigned long)invalid_pud_table;
+}
+
+static inline int p4d_bad(p4d_t p4d)
+{
+ if (unlikely(p4d_val(p4d) & ~PAGE_MASK))
+ return 1;
+
+ return 0;
+}
+
+static inline int p4d_present(p4d_t p4d)
+{
+ return p4d_val(p4d) != (unsigned long)invalid_pud_table;
+}
+
+static inline void p4d_clear(p4d_t *p4dp)
+{
+ p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
+}
+
+static inline pud_t *p4d_pgtable(p4d_t p4d)
+{
+ return (pud_t *)p4d_val(p4d);
+}
+
+#define p4d_phys(p4d) virt_to_phys((void *)p4d_val(p4d))
+#define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
+
+#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
+
+static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
+{
+ *p4d = p4dval;
+}
+
+#endif
+
+#ifndef __PAGETABLE_PMD_FOLDED
+/*
+ * For 3-level pagetables we defines these ourselves, for 2-level the
+ * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
+ */
+typedef struct { unsigned long pmd; } pmd_t;
+#define pmd_val(x) ((x).pmd)
+#define __pmd(x) ((pmd_t) { (x) } )
+
+
+extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
+#endif
+
+/*
+ * Empty pgd/pmd entries point to the invalid_pte_table.
+ */
+static inline int pmd_none(pmd_t pmd)
+{
+ return pmd_val(pmd) == (unsigned long) invalid_pte_table;
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ /* pmd_huge(pmd) but inline */
+ if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
+ return 0;
+#endif
+
+ if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
+ return 1;
+
+ return 0;
+}
+
+static inline int pmd_present(pmd_t pmd)
+{
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
+ return pmd_val(pmd) & _PAGE_PRESENT;
+#endif
+
+ return pmd_val(pmd) != (unsigned long) invalid_pte_table;
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
+}
+#ifndef __PAGETABLE_PMD_FOLDED
+
+/*
+ * Empty pud entries point to the invalid_pmd_table.
+ */
+static inline int pud_none(pud_t pud)
+{
+ return pud_val(pud) == (unsigned long) invalid_pmd_table;
+}
+
+static inline int pud_bad(pud_t pud)
+{
+ return pud_val(pud) & ~PAGE_MASK;
+}
+
+static inline int pud_present(pud_t pud)
+{
+ return pud_val(pud) != (unsigned long) invalid_pmd_table;
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+ pud_val(*pudp) = ((unsigned long) invalid_pmd_table);
+}
+#endif
+
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
+#ifdef CONFIG_CPU_VR41XX
+#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
+#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
+#else
+#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
+#define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
+#endif
+
+#ifndef __PAGETABLE_PMD_FOLDED
+static inline pmd_t *pud_pgtable(pud_t pud)
+{
+ return (pmd_t *)pud_val(pud);
+}
+#define pud_phys(pud) virt_to_phys((void *)pud_val(pud))
+#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
+
+#endif
+
+/*
+ * Initialize a new pgd / pmd table with invalid pointers.
+ */
+extern void pgd_init(unsigned long page);
+extern void pud_init(unsigned long page, unsigned long pagetable);
+extern void pmd_init(unsigned long page, unsigned long pagetable);
+
+/*
+ * Non-present pages: high 40 bits are offset, next 8 bits type,
+ * low 16 bits zero.
+ */
+static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
+{ pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
+
+#define __swp_type(x) (((x).val >> 16) & 0xff)
+#define __swp_offset(x) ((x).val >> 24)
+#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#endif /* _ASM_PGTABLE_64_H */
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
new file mode 100644
index 000000000..2362842ee
--- /dev/null
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -0,0 +1,285 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 2002 by Ralf Baechle
+ * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
+ * Copyright (C) 2002 Maciej W. Rozycki
+ */
+#ifndef _ASM_PGTABLE_BITS_H
+#define _ASM_PGTABLE_BITS_H
+
+
+/*
+ * Note that we shift the lower 32bits of each EntryLo[01] entry
+ * 6 bits to the left. That way we can convert the PFN into the
+ * physical address by a single 'and' operation and gain 6 additional
+ * bits for storing information which isn't present in a normal
+ * MIPS page table.
+ *
+ * Similar to the Alpha port, we need to keep track of the ref
+ * and mod bits in software. We have a software "yeah you can read
+ * from this page" bit, and a hardware one which actually lets the
+ * process read from the page. On the same token we have a software
+ * writable bit and the real hardware one which actually lets the
+ * process write to the page, this keeps a mod bit via the hardware
+ * dirty bit.
+ *
+ * Certain revisions of the R4000 and R5000 have a bug where if a
+ * certain sequence occurs in the last 3 instructions of an executable
+ * page, and the following page is not mapped, the cpu can do
+ * unpredictable things. The code (when it is written) to deal with
+ * this problem will be in the update_mmu_cache() code for the r4k.
+ */
+#if defined(CONFIG_XPA)
+
+/*
+ * Page table bit offsets used for 64 bit physical addressing on
+ * MIPS32r5 with XPA.
+ */
+enum pgtable_bits {
+ /* Used by TLB hardware (placed in EntryLo*) */
+ _PAGE_NO_EXEC_SHIFT,
+ _PAGE_NO_READ_SHIFT,
+ _PAGE_GLOBAL_SHIFT,
+ _PAGE_VALID_SHIFT,
+ _PAGE_DIRTY_SHIFT,
+ _CACHE_SHIFT,
+
+ /* Used only by software (masked out before writing EntryLo*) */
+ _PAGE_PRESENT_SHIFT = 24,
+ _PAGE_WRITE_SHIFT,
+ _PAGE_ACCESSED_SHIFT,
+ _PAGE_MODIFIED_SHIFT,
+#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
+ _PAGE_SPECIAL_SHIFT,
+#endif
+#if defined(CONFIG_HAVE_ARCH_SOFT_DIRTY)
+ _PAGE_SOFT_DIRTY_SHIFT,
+#endif
+};
+
+/*
+ * Bits for extended EntryLo0/EntryLo1 registers
+ */
+#define _PFNX_MASK 0xffffff
+
+#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+
+/*
+ * Page table bit offsets used for 36 bit physical addressing on MIPS32,
+ * for example with Alchemy or Netlogic XLP/XLR.
+ */
+enum pgtable_bits {
+ /* Used by TLB hardware (placed in EntryLo*) */
+ _PAGE_GLOBAL_SHIFT,
+ _PAGE_VALID_SHIFT,
+ _PAGE_DIRTY_SHIFT,
+ _CACHE_SHIFT,
+
+ /* Used only by software (masked out before writing EntryLo*) */
+ _PAGE_PRESENT_SHIFT = _CACHE_SHIFT + 3,
+ _PAGE_NO_READ_SHIFT,
+ _PAGE_WRITE_SHIFT,
+ _PAGE_ACCESSED_SHIFT,
+ _PAGE_MODIFIED_SHIFT,
+#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
+ _PAGE_SPECIAL_SHIFT,
+#endif
+#if defined(CONFIG_HAVE_ARCH_SOFT_DIRTY)
+ _PAGE_SOFT_DIRTY_SHIFT,
+#endif
+};
+
+#elif defined(CONFIG_CPU_R3K_TLB)
+
+/* Page table bits used for r3k systems */
+enum pgtable_bits {
+ /* Used only by software (writes to EntryLo ignored) */
+ _PAGE_PRESENT_SHIFT,
+ _PAGE_NO_READ_SHIFT,
+ _PAGE_WRITE_SHIFT,
+ _PAGE_ACCESSED_SHIFT,
+ _PAGE_MODIFIED_SHIFT,
+#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
+ _PAGE_SPECIAL_SHIFT,
+#endif
+#if defined(CONFIG_HAVE_ARCH_SOFT_DIRTY)
+ _PAGE_SOFT_DIRTY_SHIFT,
+#endif
+
+ /* Used by TLB hardware (placed in EntryLo) */
+ _PAGE_GLOBAL_SHIFT = 8,
+ _PAGE_VALID_SHIFT,
+ _PAGE_DIRTY_SHIFT,
+ _CACHE_UNCACHED_SHIFT,
+};
+
+#else
+
+/* Page table bits used for r4k systems */
+enum pgtable_bits {
+ /* Used only by software (masked out before writing EntryLo*) */
+ _PAGE_PRESENT_SHIFT,
+#if !defined(CONFIG_CPU_HAS_RIXI)
+ _PAGE_NO_READ_SHIFT,
+#endif
+ _PAGE_WRITE_SHIFT,
+ _PAGE_ACCESSED_SHIFT,
+ _PAGE_MODIFIED_SHIFT,
+#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
+ _PAGE_HUGE_SHIFT,
+#endif
+#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
+ _PAGE_SPECIAL_SHIFT,
+#endif
+#if defined(CONFIG_HAVE_ARCH_SOFT_DIRTY)
+ _PAGE_SOFT_DIRTY_SHIFT,
+#endif
+ /* Used by TLB hardware (placed in EntryLo*) */
+#if defined(CONFIG_CPU_HAS_RIXI)
+ _PAGE_NO_EXEC_SHIFT,
+ _PAGE_NO_READ_SHIFT,
+#endif
+ _PAGE_GLOBAL_SHIFT,
+ _PAGE_VALID_SHIFT,
+ _PAGE_DIRTY_SHIFT,
+ _CACHE_SHIFT,
+};
+
+#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
+
+/* Used only by software */
+#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
+#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
+#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
+#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
+#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
+# define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
+#endif
+#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
+# define _PAGE_SPECIAL (1 << _PAGE_SPECIAL_SHIFT)
+#else
+# define _PAGE_SPECIAL 0
+#endif
+#if defined(CONFIG_HAVE_ARCH_SOFT_DIRTY)
+# define _PAGE_SOFT_DIRTY (1 << _PAGE_SOFT_DIRTY_SHIFT)
+#else
+# define _PAGE_SOFT_DIRTY 0
+#endif
+
+/* Used by TLB hardware (placed in EntryLo*) */
+#if defined(CONFIG_XPA)
+# define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT)
+#elif defined(CONFIG_CPU_HAS_RIXI)
+# define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
+#endif
+#define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT)
+#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
+#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
+#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
+#if defined(CONFIG_CPU_R3K_TLB)
+# define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT)
+# define _CACHE_MASK _CACHE_UNCACHED
+# define _PFN_SHIFT PAGE_SHIFT
+#else
+# define _CACHE_MASK (7 << _CACHE_SHIFT)
+# define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
+#endif
+
+#ifndef _PAGE_NO_EXEC
+#define _PAGE_NO_EXEC 0
+#endif
+
+#define _PAGE_SILENT_READ _PAGE_VALID
+#define _PAGE_SILENT_WRITE _PAGE_DIRTY
+
+#define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1))
+
+/*
+ * The final layouts of the PTE bits are:
+ *
+ * 64-bit, R1 or earlier: CCC D V G [S H] M A W R P
+ * 32-bit, R1 or earler: CCC D V G M A W R P
+ * 64-bit, R2 or later: CCC D V G RI/R XI [S H] M A W P
+ * 32-bit, R2 or later: CCC D V G RI/R XI M A W P
+ */
+
+
+/*
+ * pte_to_entrylo converts a page table entry (PTE) into a Mips
+ * entrylo0/1 value.
+ */
+static inline uint64_t pte_to_entrylo(unsigned long pte_val)
+{
+#ifdef CONFIG_CPU_HAS_RIXI
+ if (cpu_has_rixi) {
+ int sa;
+#ifdef CONFIG_32BIT
+ sa = 31 - _PAGE_NO_READ_SHIFT;
+#else
+ sa = 63 - _PAGE_NO_READ_SHIFT;
+#endif
+ /*
+ * C has no way to express that this is a DSRL
+ * _PAGE_NO_EXEC_SHIFT followed by a ROTR 2. Luckily
+ * in the fast path this is done in assembly
+ */
+ return (pte_val >> _PAGE_GLOBAL_SHIFT) |
+ ((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa);
+ }
+#endif
+
+ return pte_val >> _PAGE_GLOBAL_SHIFT;
+}
+
+/*
+ * Cache attributes
+ */
+#if defined(CONFIG_CPU_R3K_TLB)
+
+#define _CACHE_CACHABLE_NONCOHERENT 0
+#define _CACHE_UNCACHED_ACCELERATED _CACHE_UNCACHED
+
+#elif defined(CONFIG_CPU_SB1)
+
+/* No penalty for being coherent on the SB1, so just
+ use it for "noncoherent" spaces, too. Shouldn't hurt. */
+
+#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
+
+#endif
+
+#ifndef _CACHE_CACHABLE_NO_WA
+#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT)
+#endif
+#ifndef _CACHE_CACHABLE_WA
+#define _CACHE_CACHABLE_WA (1<<_CACHE_SHIFT)
+#endif
+#ifndef _CACHE_UNCACHED
+#define _CACHE_UNCACHED (2<<_CACHE_SHIFT)
+#endif
+#ifndef _CACHE_CACHABLE_NONCOHERENT
+#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT)
+#endif
+#ifndef _CACHE_CACHABLE_CE
+#define _CACHE_CACHABLE_CE (4<<_CACHE_SHIFT)
+#endif
+#ifndef _CACHE_CACHABLE_COW
+#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT)
+#endif
+#ifndef _CACHE_CACHABLE_CUW
+#define _CACHE_CACHABLE_CUW (6<<_CACHE_SHIFT)
+#endif
+#ifndef _CACHE_UNCACHED_ACCELERATED
+#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
+#endif
+
+#define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED)
+#define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED)
+
+#define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \
+ _PAGE_SOFT_DIRTY | _PFN_MASK | _CACHE_MASK)
+
+#endif /* _ASM_PGTABLE_BITS_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
new file mode 100644
index 000000000..e5ef0fdd4
--- /dev/null
+++ b/arch/mips/include/asm/pgtable.h
@@ -0,0 +1,744 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Ralf Baechle
+ */
+#ifndef _ASM_PGTABLE_H
+#define _ASM_PGTABLE_H
+
+#include <linux/mm_types.h>
+#include <linux/mmzone.h>
+#ifdef CONFIG_32BIT
+#include <asm/pgtable-32.h>
+#endif
+#ifdef CONFIG_64BIT
+#include <asm/pgtable-64.h>
+#endif
+
+#include <asm/cmpxchg.h>
+#include <asm/io.h>
+#include <asm/pgtable-bits.h>
+#include <asm/cpu-features.h>
+
+struct mm_struct;
+struct vm_area_struct;
+
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
+ _page_cachable_default)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
+ _page_cachable_default)
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
+ _page_cachable_default)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | \
+ _page_cachable_default)
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
+ _PAGE_GLOBAL | _page_cachable_default)
+#define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
+ _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
+#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
+ __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
+
+/*
+ * If _PAGE_NO_EXEC is not defined, we can't do page protection for
+ * execute, and consider it to be the same as read. Also, write
+ * permissions imply read permissions. This is the closest we can get
+ * by reasonable means..
+ */
+
+/*
+ * Dummy values to fill the table in mmap.c
+ * The real values will be generated at runtime
+ */
+#define __P000 __pgprot(0)
+#define __P001 __pgprot(0)
+#define __P010 __pgprot(0)
+#define __P011 __pgprot(0)
+#define __P100 __pgprot(0)
+#define __P101 __pgprot(0)
+#define __P110 __pgprot(0)
+#define __P111 __pgprot(0)
+
+#define __S000 __pgprot(0)
+#define __S001 __pgprot(0)
+#define __S010 __pgprot(0)
+#define __S011 __pgprot(0)
+#define __S100 __pgprot(0)
+#define __S101 __pgprot(0)
+#define __S110 __pgprot(0)
+#define __S111 __pgprot(0)
+
+extern unsigned long _page_cachable_default;
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero; used
+ * for zero-mapped memory areas etc..
+ */
+
+extern unsigned long empty_zero_page;
+extern unsigned long zero_page_mask;
+
+#define ZERO_PAGE(vaddr) \
+ (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
+#define __HAVE_COLOR_ZERO_PAGE
+
+extern void paging_init(void);
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
+
+#define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_page(pmd) __pmd_page(pmd)
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#define pmd_page_vaddr(pmd) pmd_val(pmd)
+
+#define htw_stop() \
+do { \
+ unsigned long flags; \
+ \
+ if (cpu_has_htw) { \
+ local_irq_save(flags); \
+ if(!raw_current_cpu_data.htw_seq++) { \
+ write_c0_pwctl(read_c0_pwctl() & \
+ ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
+ back_to_back_c0_hazard(); \
+ } \
+ local_irq_restore(flags); \
+ } \
+} while(0)
+
+#define htw_start() \
+do { \
+ unsigned long flags; \
+ \
+ if (cpu_has_htw) { \
+ local_irq_save(flags); \
+ if (!--raw_current_cpu_data.htw_seq) { \
+ write_c0_pwctl(read_c0_pwctl() | \
+ (1 << MIPS_PWCTL_PWEN_SHIFT)); \
+ back_to_back_c0_hazard(); \
+ } \
+ local_irq_restore(flags); \
+ } \
+} while(0)
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval);
+
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+
+#ifdef CONFIG_XPA
+# define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
+#else
+# define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
+#endif
+
+#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
+#define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC)
+
+static inline void set_pte(pte_t *ptep, pte_t pte)
+{
+ ptep->pte_high = pte.pte_high;
+ smp_wmb();
+ ptep->pte_low = pte.pte_low;
+
+#ifdef CONFIG_XPA
+ if (pte.pte_high & _PAGE_GLOBAL) {
+#else
+ if (pte.pte_low & _PAGE_GLOBAL) {
+#endif
+ pte_t *buddy = ptep_buddy(ptep);
+ /*
+ * Make sure the buddy is global too (if it's !none,
+ * it better already be global)
+ */
+ if (pte_none(*buddy)) {
+ if (!IS_ENABLED(CONFIG_XPA))
+ buddy->pte_low |= _PAGE_GLOBAL;
+ buddy->pte_high |= _PAGE_GLOBAL;
+ }
+ }
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_t null = __pte(0);
+
+ htw_stop();
+ /* Preserve global status for the pair */
+ if (IS_ENABLED(CONFIG_XPA)) {
+ if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
+ null.pte_high = _PAGE_GLOBAL;
+ } else {
+ if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
+ null.pte_low = null.pte_high = _PAGE_GLOBAL;
+ }
+
+ set_pte_at(mm, addr, ptep, null);
+ htw_start();
+}
+#else
+
+#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
+#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
+#define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
+
+/*
+ * Certain architectures need to do special things when pte's
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+#if !defined(CONFIG_CPU_R3K_TLB)
+ if (pte_val(pteval) & _PAGE_GLOBAL) {
+ pte_t *buddy = ptep_buddy(ptep);
+ /*
+ * Make sure the buddy is global too (if it's !none,
+ * it better already be global)
+ */
+# if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
+ cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL);
+# else
+ cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL);
+# endif
+ }
+#endif
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ htw_stop();
+#if !defined(CONFIG_CPU_R3K_TLB)
+ /* Preserve global status for the pair */
+ if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
+ set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
+ else
+#endif
+ set_pte_at(mm, addr, ptep, __pte(0));
+ htw_start();
+}
+#endif
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval)
+{
+ extern void __update_cache(unsigned long address, pte_t pte);
+
+ if (!pte_present(pteval))
+ goto cache_sync_done;
+
+ if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
+ goto cache_sync_done;
+
+ __update_cache(addr, pteval);
+cache_sync_done:
+ set_pte(ptep, pteval);
+}
+
+/*
+ * (pmds are folded into puds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
+
+#ifndef __PAGETABLE_PMD_FOLDED
+/*
+ * (puds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
+#endif
+
+#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
+#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
+#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
+
+/*
+ * We used to declare this array with size but gcc 3.3 and older are not able
+ * to find that this expression is a constant, so the size is dropped.
+ */
+extern pgd_t swapper_pg_dir[];
+
+/*
+ * Platform specific pte_special() and pte_mkspecial() definitions
+ * are required only when ARCH_HAS_PTE_SPECIAL is enabled.
+ */
+#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+static inline int pte_special(pte_t pte)
+{
+ return pte.pte_low & _PAGE_SPECIAL;
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ pte.pte_low |= _PAGE_SPECIAL;
+ return pte;
+}
+#else
+static inline int pte_special(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SPECIAL;
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_SPECIAL;
+ return pte;
+}
+#endif
+#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
+static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
+static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ pte.pte_low &= ~_PAGE_WRITE;
+ if (!IS_ENABLED(CONFIG_XPA))
+ pte.pte_low &= ~_PAGE_SILENT_WRITE;
+ pte.pte_high &= ~_PAGE_SILENT_WRITE;
+ return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ pte.pte_low &= ~_PAGE_MODIFIED;
+ if (!IS_ENABLED(CONFIG_XPA))
+ pte.pte_low &= ~_PAGE_SILENT_WRITE;
+ pte.pte_high &= ~_PAGE_SILENT_WRITE;
+ return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ pte.pte_low &= ~_PAGE_ACCESSED;
+ if (!IS_ENABLED(CONFIG_XPA))
+ pte.pte_low &= ~_PAGE_SILENT_READ;
+ pte.pte_high &= ~_PAGE_SILENT_READ;
+ return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ pte.pte_low |= _PAGE_WRITE;
+ if (pte.pte_low & _PAGE_MODIFIED) {
+ if (!IS_ENABLED(CONFIG_XPA))
+ pte.pte_low |= _PAGE_SILENT_WRITE;
+ pte.pte_high |= _PAGE_SILENT_WRITE;
+ }
+ return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ pte.pte_low |= _PAGE_MODIFIED;
+ if (pte.pte_low & _PAGE_WRITE) {
+ if (!IS_ENABLED(CONFIG_XPA))
+ pte.pte_low |= _PAGE_SILENT_WRITE;
+ pte.pte_high |= _PAGE_SILENT_WRITE;
+ }
+ return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ pte.pte_low |= _PAGE_ACCESSED;
+ if (!(pte.pte_low & _PAGE_NO_READ)) {
+ if (!IS_ENABLED(CONFIG_XPA))
+ pte.pte_low |= _PAGE_SILENT_READ;
+ pte.pte_high |= _PAGE_SILENT_READ;
+ }
+ return pte;
+}
+#else
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
+ return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
+ return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
+ return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_WRITE;
+ if (pte_val(pte) & _PAGE_MODIFIED)
+ pte_val(pte) |= _PAGE_SILENT_WRITE;
+ return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
+ if (pte_val(pte) & _PAGE_WRITE)
+ pte_val(pte) |= _PAGE_SILENT_WRITE;
+ return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_ACCESSED;
+ if (!(pte_val(pte) & _PAGE_NO_READ))
+ pte_val(pte) |= _PAGE_SILENT_READ;
+ return pte;
+}
+
+#define pte_sw_mkyoung pte_mkyoung
+
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_HUGE;
+ return pte;
+}
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+static inline bool pte_soft_dirty(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SOFT_DIRTY;
+}
+#define pte_swp_soft_dirty pte_soft_dirty
+
+static inline pte_t pte_mksoft_dirty(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_SOFT_DIRTY;
+ return pte;
+}
+#define pte_swp_mksoft_dirty pte_mksoft_dirty
+
+static inline pte_t pte_clear_soft_dirty(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_SOFT_DIRTY);
+ return pte;
+}
+#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
+
+#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+
+#endif
+
+/*
+ * Macro to make mark a page protection value as "uncacheable". Note
+ * that "protection" is really a misnomer here as the protection value
+ * contains the memory attribute bits, dirty bits, and various other
+ * bits as well.
+ */
+#define pgprot_noncached pgprot_noncached
+
+static inline pgprot_t pgprot_noncached(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
+
+ return __pgprot(prot);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+
+static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
+ prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
+
+ return __pgprot(prot);
+}
+
+static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
+ unsigned long address)
+{
+}
+
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+ return pte_val(pte_a) == pte_val(pte_b);
+}
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+static inline int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty)
+{
+ if (!pte_same(*ptep, entry))
+ set_pte_at(vma->vm_mm, address, ptep, entry);
+ /*
+ * update_mmu_cache will unconditionally execute, handling both
+ * the case that the PTE changed and the spurious fault case.
+ */
+ return true;
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+#if defined(CONFIG_XPA)
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
+ pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
+ pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
+ pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
+ return pte;
+}
+#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte.pte_low &= _PAGE_CHG_MASK;
+ pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
+ pte.pte_low |= pgprot_val(newprot);
+ pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
+ return pte;
+}
+#else
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte_val(pte) &= _PAGE_CHG_MASK;
+ pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_CHG_MASK;
+ if ((pte_val(pte) & _PAGE_ACCESSED) && !(pte_val(pte) & _PAGE_NO_READ))
+ pte_val(pte) |= _PAGE_SILENT_READ;
+ return pte;
+}
+#endif
+
+
+extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
+ pte_t pte);
+
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ __update_tlb(vma, address, pte);
+}
+
+#define __HAVE_ARCH_UPDATE_MMU_TLB
+#define update_mmu_tlb update_mmu_cache
+
+static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ pte_t pte = *(pte_t *)pmdp;
+
+ __update_tlb(vma, address, pte);
+}
+
+#define kern_addr_valid(addr) (1)
+
+/*
+ * Allow physical addresses to be fixed up to help 36-bit peripherals.
+ */
+#ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR
+phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size);
+int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr,
+ unsigned long pfn, unsigned long size, pgprot_t prot);
+#define io_remap_pfn_range io_remap_pfn_range
+#else
+#define fixup_bigphys_addr(addr, size) (addr)
+#endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
+#define pmdp_establish generic_pmdp_establish
+
+#define has_transparent_hugepage has_transparent_hugepage
+extern int has_transparent_hugepage(void);
+
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_HUGE);
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+ pmd_val(pmd) |= _PAGE_HUGE;
+
+ return pmd;
+}
+
+extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd);
+
+#define pmd_write pmd_write
+static inline int pmd_write(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_WRITE);
+}
+
+static inline pmd_t pmd_wrprotect(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
+ return pmd;
+}
+
+static inline pmd_t pmd_mkwrite(pmd_t pmd)
+{
+ pmd_val(pmd) |= _PAGE_WRITE;
+ if (pmd_val(pmd) & _PAGE_MODIFIED)
+ pmd_val(pmd) |= _PAGE_SILENT_WRITE;
+
+ return pmd;
+}
+
+static inline int pmd_dirty(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_MODIFIED);
+}
+
+static inline pmd_t pmd_mkclean(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
+ return pmd;
+}
+
+static inline pmd_t pmd_mkdirty(pmd_t pmd)
+{
+ pmd_val(pmd) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
+ if (pmd_val(pmd) & _PAGE_WRITE)
+ pmd_val(pmd) |= _PAGE_SILENT_WRITE;
+
+ return pmd;
+}
+
+static inline int pmd_young(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_ACCESSED);
+}
+
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
+
+ return pmd;
+}
+
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+ pmd_val(pmd) |= _PAGE_ACCESSED;
+
+ if (!(pmd_val(pmd) & _PAGE_NO_READ))
+ pmd_val(pmd) |= _PAGE_SILENT_READ;
+
+ return pmd;
+}
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+static inline int pmd_soft_dirty(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_SOFT_DIRTY);
+}
+
+static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
+{
+ pmd_val(pmd) |= _PAGE_SOFT_DIRTY;
+ return pmd;
+}
+
+static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~(_PAGE_SOFT_DIRTY);
+ return pmd;
+}
+
+#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+
+/* Extern to avoid header file madness */
+extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+ return pmd_val(pmd) >> _PFN_SHIFT;
+}
+
+static inline struct page *pmd_page(pmd_t pmd)
+{
+ if (pmd_trans_huge(pmd))
+ return pfn_to_page(pmd_pfn(pmd));
+
+ return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
+}
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
+ (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
+ return pmd;
+}
+
+static inline pmd_t pmd_mkinvalid(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
+
+ return pmd;
+}
+
+/*
+ * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
+ * different prototype.
+ */
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t old = *pmdp;
+
+ pmd_clear(pmdp);
+
+ return old;
+}
+
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifdef _PAGE_HUGE
+#define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
+#define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
+#endif
+
+#define gup_fast_permitted(start, end) (!cpu_has_dc_aliases)
+
+/*
+ * We provide our own get_unmapped area to cope with the virtual aliasing
+ * constraints placed on us by the cache architecture.
+ */
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
+#endif /* _ASM_PGTABLE_H */
diff --git a/arch/mips/include/asm/pm-cps.h b/arch/mips/include/asm/pm-cps.h
new file mode 100644
index 000000000..efd96e919
--- /dev/null
+++ b/arch/mips/include/asm/pm-cps.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2014 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#ifndef __MIPS_ASM_PM_CPS_H__
+#define __MIPS_ASM_PM_CPS_H__
+
+/*
+ * The CM & CPC can only handle coherence & power control on a per-core basis,
+ * thus in an MT system the VP(E)s within each core are coupled and can only
+ * enter or exit states requiring CM or CPC assistance in unison.
+ */
+#if defined(CONFIG_CPU_MIPSR6)
+# define coupled_coherence cpu_has_vp
+#elif defined(CONFIG_MIPS_MT)
+# define coupled_coherence cpu_has_mipsmt
+#else
+# define coupled_coherence 0
+#endif
+
+/* Enumeration of possible PM states */
+enum cps_pm_state {
+ CPS_PM_NC_WAIT, /* MIPS wait instruction, non-coherent */
+ CPS_PM_CLOCK_GATED, /* Core clock gated */
+ CPS_PM_POWER_GATED, /* Core power gated */
+ CPS_PM_STATE_COUNT,
+};
+
+/**
+ * cps_pm_support_state - determine whether the system supports a PM state
+ * @state: the state to test for support
+ *
+ * Returns true if the system supports the given state, otherwise false.
+ */
+extern bool cps_pm_support_state(enum cps_pm_state state);
+
+/**
+ * cps_pm_enter_state - enter a PM state
+ * @state: the state to enter
+ *
+ * Enter the given PM state. If coupled_coherence is non-zero then it is
+ * expected that this function be called at approximately the same time on
+ * each coupled CPU. Returns 0 on successful entry & exit, otherwise -errno.
+ */
+extern int cps_pm_enter_state(enum cps_pm_state state);
+
+#endif /* __MIPS_ASM_PM_CPS_H__ */
diff --git a/arch/mips/include/asm/pm.h b/arch/mips/include/asm/pm.h
new file mode 100644
index 000000000..10bb7b640
--- /dev/null
+++ b/arch/mips/include/asm/pm.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2014 Imagination Technologies Ltd
+ *
+ * PM helper macros for CPU power off (e.g. Suspend-to-RAM).
+ */
+
+#ifndef __ASM_PM_H
+#define __ASM_PM_H
+
+#ifdef __ASSEMBLY__
+
+#include <asm/asm-offsets.h>
+#include <asm/asm.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+
+/* Save CPU state to stack for suspend to RAM */
+.macro SUSPEND_SAVE_REGS
+ subu sp, PT_SIZE
+ /* Call preserved GPRs */
+ LONG_S $16, PT_R16(sp)
+ LONG_S $17, PT_R17(sp)
+ LONG_S $18, PT_R18(sp)
+ LONG_S $19, PT_R19(sp)
+ LONG_S $20, PT_R20(sp)
+ LONG_S $21, PT_R21(sp)
+ LONG_S $22, PT_R22(sp)
+ LONG_S $23, PT_R23(sp)
+ LONG_S $28, PT_R28(sp)
+ LONG_S $30, PT_R30(sp)
+ LONG_S $31, PT_R31(sp)
+ /* A couple of CP0 registers with space in pt_regs */
+ mfc0 k0, CP0_STATUS
+ LONG_S k0, PT_STATUS(sp)
+.endm
+
+/* Restore CPU state from stack after resume from RAM */
+.macro RESUME_RESTORE_REGS_RETURN
+ .set push
+ .set noreorder
+ /* A couple of CP0 registers with space in pt_regs */
+ LONG_L k0, PT_STATUS(sp)
+ mtc0 k0, CP0_STATUS
+ /* Call preserved GPRs */
+ LONG_L $16, PT_R16(sp)
+ LONG_L $17, PT_R17(sp)
+ LONG_L $18, PT_R18(sp)
+ LONG_L $19, PT_R19(sp)
+ LONG_L $20, PT_R20(sp)
+ LONG_L $21, PT_R21(sp)
+ LONG_L $22, PT_R22(sp)
+ LONG_L $23, PT_R23(sp)
+ LONG_L $28, PT_R28(sp)
+ LONG_L $30, PT_R30(sp)
+ LONG_L $31, PT_R31(sp)
+ /* Pop and return */
+ jr ra
+ addiu sp, PT_SIZE
+ .set pop
+.endm
+
+/* Get address of static suspend state into t1 */
+.macro LA_STATIC_SUSPEND
+ la t1, mips_static_suspend_state
+.endm
+
+/* Save important CPU state for early restoration to global data */
+.macro SUSPEND_SAVE_STATIC
+#ifdef CONFIG_EVA
+ /*
+ * Segment configuration is saved in global data where it can be easily
+ * reloaded without depending on the segment configuration.
+ */
+ mfc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */
+ LONG_S k0, SSS_SEGCTL0(t1)
+ mfc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */
+ LONG_S k0, SSS_SEGCTL1(t1)
+ mfc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */
+ LONG_S k0, SSS_SEGCTL2(t1)
+#endif
+ /* save stack pointer (pointing to GPRs) */
+ LONG_S sp, SSS_SP(t1)
+.endm
+
+/* Restore important CPU state early from global data */
+.macro RESUME_RESTORE_STATIC
+#ifdef CONFIG_EVA
+ /*
+ * Segment configuration must be restored prior to any access to
+ * allocated memory, as it may reside outside of the legacy kernel
+ * segments.
+ */
+ LONG_L k0, SSS_SEGCTL0(t1)
+ mtc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */
+ LONG_L k0, SSS_SEGCTL1(t1)
+ mtc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */
+ LONG_L k0, SSS_SEGCTL2(t1)
+ mtc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */
+ tlbw_use_hazard
+#endif
+ /* restore stack pointer (pointing to GPRs) */
+ LONG_L sp, SSS_SP(t1)
+.endm
+
+/* flush caches to make sure context has reached memory */
+.macro SUSPEND_CACHE_FLUSH
+ .extern __wback_cache_all
+ .set push
+ .set noreorder
+ la t1, __wback_cache_all
+ LONG_L t0, 0(t1)
+ jalr t0
+ nop
+ .set pop
+ .endm
+
+/* Save suspend state and flush data caches to RAM */
+.macro SUSPEND_SAVE
+ SUSPEND_SAVE_REGS
+ LA_STATIC_SUSPEND
+ SUSPEND_SAVE_STATIC
+ SUSPEND_CACHE_FLUSH
+.endm
+
+/* Restore saved state after resume from RAM and return */
+.macro RESUME_RESTORE_RETURN
+ LA_STATIC_SUSPEND
+ RESUME_RESTORE_STATIC
+ RESUME_RESTORE_REGS_RETURN
+.endm
+
+#else /* __ASSEMBLY__ */
+
+/**
+ * struct mips_static_suspend_state - Core saved CPU state across S2R.
+ * @segctl: CP0 Segment control registers.
+ * @sp: Stack frame where GP register context is saved.
+ *
+ * This structure contains minimal CPU state that must be saved in static kernel
+ * data in order to be able to restore the rest of the state. This includes
+ * segmentation configuration in the case of EVA being enabled, as they must be
+ * restored prior to any kmalloc'd memory being referenced (even the stack
+ * pointer).
+ */
+struct mips_static_suspend_state {
+#ifdef CONFIG_EVA
+ unsigned long segctl[3];
+#endif
+ unsigned long sp;
+};
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_PM_HELPERS_H */
diff --git a/arch/mips/include/asm/prefetch.h b/arch/mips/include/asm/prefetch.h
new file mode 100644
index 000000000..a56594f36
--- /dev/null
+++ b/arch/mips/include/asm/prefetch.h
@@ -0,0 +1,87 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 by Ralf Baechle
+ */
+#ifndef __ASM_PREFETCH_H
+#define __ASM_PREFETCH_H
+
+
+/*
+ * R5000 and RM5200 implements pref and prefx instructions but they're nops, so
+ * rather than wasting time we pretend these processors don't support
+ * prefetching at all.
+ *
+ * R5432 implements Load, Store, LoadStreamed, StoreStreamed, LoadRetained,
+ * StoreRetained and WriteBackInvalidate but not Pref_PrepareForStore.
+ *
+ * Hell (and the book on my shelf I can't open ...) know what the R8000 does.
+ *
+ * RM7000 version 1.0 interprets all hints as Pref_Load; version 2.0 implements
+ * Pref_PrepareForStore also.
+ *
+ * RM9000 is MIPS IV but implements prefetching like MIPS32/MIPS64; it's
+ * Pref_WriteBackInvalidate is a nop and Pref_PrepareForStore is broken in
+ * current versions due to erratum G105.
+ *
+ * VR5500 (including VR5701 and VR7701) only implement load prefetch.
+ *
+ * Finally MIPS32 and MIPS64 implement all of the following hints.
+ */
+
+#define Pref_Load 0
+#define Pref_Store 1
+ /* 2 and 3 are reserved */
+#define Pref_LoadStreamed 4
+#define Pref_StoreStreamed 5
+#define Pref_LoadRetained 6
+#define Pref_StoreRetained 7
+ /* 8 ... 24 are reserved */
+#define Pref_WriteBackInvalidate 25
+#define Pref_PrepareForStore 30
+
+#ifdef __ASSEMBLY__
+
+ .macro __pref hint addr
+#ifdef CONFIG_CPU_HAS_PREFETCH
+ pref \hint, \addr
+#endif
+ .endm
+
+ .macro pref_load addr
+ __pref Pref_Load, \addr
+ .endm
+
+ .macro pref_store addr
+ __pref Pref_Store, \addr
+ .endm
+
+ .macro pref_load_streamed addr
+ __pref Pref_LoadStreamed, \addr
+ .endm
+
+ .macro pref_store_streamed addr
+ __pref Pref_StoreStreamed, \addr
+ .endm
+
+ .macro pref_load_retained addr
+ __pref Pref_LoadRetained, \addr
+ .endm
+
+ .macro pref_store_retained addr
+ __pref Pref_StoreRetained, \addr
+ .endm
+
+ .macro pref_wback_inv addr
+ __pref Pref_WriteBackInvalidate, \addr
+ .endm
+
+ .macro pref_prepare_for_store addr
+ __pref Pref_PrepareForStore, \addr
+ .endm
+
+#endif
+
+#endif /* __ASM_PREFETCH_H */
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
new file mode 100644
index 000000000..7834e7c0c
--- /dev/null
+++ b/arch/mips/include/asm/processor.h
@@ -0,0 +1,425 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 Waldorf GMBH
+ * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001, 2002, 2003 Ralf Baechle
+ * Copyright (C) 1996 Paul M. Antoine
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_PROCESSOR_H
+#define _ASM_PROCESSOR_H
+
+#include <linux/atomic.h>
+#include <linux/cpumask.h>
+#include <linux/sizes.h>
+#include <linux/threads.h>
+
+#include <asm/cachectl.h>
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/dsemul.h>
+#include <asm/mipsregs.h>
+#include <asm/prefetch.h>
+#include <asm/vdso/processor.h>
+
+/*
+ * System setup and hardware flags..
+ */
+
+extern unsigned int vced_count, vcei_count;
+extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+
+#ifdef CONFIG_32BIT
+#ifdef CONFIG_KVM_GUEST
+/* User space process size is limited to 1GB in KVM Guest Mode */
+#define TASK_SIZE 0x3fff8000UL
+#else
+/*
+ * User space process size: 2GB. This is hardcoded into a few places,
+ * so don't change it unless you know what you are doing.
+ */
+#define TASK_SIZE 0x80000000UL
+#endif
+
+#define STACK_TOP_MAX TASK_SIZE
+
+#define TASK_IS_32BIT_ADDR 1
+
+#endif
+
+#ifdef CONFIG_64BIT
+/*
+ * User space process size: 1TB. This is hardcoded into a few places,
+ * so don't change it unless you know what you are doing. TASK_SIZE
+ * is limited to 1TB by the R4000 architecture; R10000 and better can
+ * support 16TB; the architectural reserve for future expansion is
+ * 8192EB ...
+ */
+#define TASK_SIZE32 0x7fff8000UL
+#ifdef CONFIG_MIPS_VA_BITS_48
+#define TASK_SIZE64 (0x1UL << ((cpu_data[0].vmbits>48)?48:cpu_data[0].vmbits))
+#else
+#define TASK_SIZE64 0x10000000000UL
+#endif
+#define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
+#define STACK_TOP_MAX TASK_SIZE64
+
+#define TASK_SIZE_OF(tsk) \
+ (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
+
+#define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
+
+#endif
+
+#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M)
+
+extern unsigned long mips_stack_top(void);
+#define STACK_TOP mips_stack_top()
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
+
+
+#define NUM_FPU_REGS 32
+
+#ifdef CONFIG_CPU_HAS_MSA
+# define FPU_REG_WIDTH 128
+#else
+# define FPU_REG_WIDTH 64
+#endif
+
+union fpureg {
+ __u32 val32[FPU_REG_WIDTH / 32];
+ __u64 val64[FPU_REG_WIDTH / 64];
+};
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# define FPR_IDX(width, idx) (idx)
+#else
+# define FPR_IDX(width, idx) ((idx) ^ ((64 / (width)) - 1))
+#endif
+
+#define BUILD_FPR_ACCESS(width) \
+static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
+{ \
+ return fpr->val##width[FPR_IDX(width, idx)]; \
+} \
+ \
+static inline void set_fpr##width(union fpureg *fpr, unsigned idx, \
+ u##width val) \
+{ \
+ fpr->val##width[FPR_IDX(width, idx)] = val; \
+}
+
+BUILD_FPR_ACCESS(32)
+BUILD_FPR_ACCESS(64)
+
+/*
+ * It would be nice to add some more fields for emulator statistics,
+ * the additional information is private to the FPU emulator for now.
+ * See arch/mips/include/asm/fpu_emulator.h.
+ */
+
+struct mips_fpu_struct {
+ union fpureg fpr[NUM_FPU_REGS];
+ unsigned int fcr31;
+ unsigned int msacsr;
+};
+
+#define NUM_DSP_REGS 6
+
+typedef unsigned long dspreg_t;
+
+struct mips_dsp_state {
+ dspreg_t dspr[NUM_DSP_REGS];
+ unsigned int dspcontrol;
+};
+
+#define INIT_CPUMASK { \
+ {0,} \
+}
+
+struct mips3264_watch_reg_state {
+ /* The width of watchlo is 32 in a 32 bit kernel and 64 in a
+ 64 bit kernel. We use unsigned long as it has the same
+ property. */
+ unsigned long watchlo[NUM_WATCH_REGS];
+ /* Only the mask and IRW bits from watchhi. */
+ u16 watchhi[NUM_WATCH_REGS];
+};
+
+union mips_watch_reg_state {
+ struct mips3264_watch_reg_state mips3264;
+};
+
+#if defined(CONFIG_CPU_CAVIUM_OCTEON)
+
+struct octeon_cop2_state {
+ /* DMFC2 rt, 0x0201 */
+ unsigned long cop2_crc_iv;
+ /* DMFC2 rt, 0x0202 (Set with DMTC2 rt, 0x1202) */
+ unsigned long cop2_crc_length;
+ /* DMFC2 rt, 0x0200 (set with DMTC2 rt, 0x4200) */
+ unsigned long cop2_crc_poly;
+ /* DMFC2 rt, 0x0402; DMFC2 rt, 0x040A */
+ unsigned long cop2_llm_dat[2];
+ /* DMFC2 rt, 0x0084 */
+ unsigned long cop2_3des_iv;
+ /* DMFC2 rt, 0x0080; DMFC2 rt, 0x0081; DMFC2 rt, 0x0082 */
+ unsigned long cop2_3des_key[3];
+ /* DMFC2 rt, 0x0088 (Set with DMTC2 rt, 0x0098) */
+ unsigned long cop2_3des_result;
+ /* DMFC2 rt, 0x0111 (FIXME: Read Pass1 Errata) */
+ unsigned long cop2_aes_inp0;
+ /* DMFC2 rt, 0x0102; DMFC2 rt, 0x0103 */
+ unsigned long cop2_aes_iv[2];
+ /* DMFC2 rt, 0x0104; DMFC2 rt, 0x0105; DMFC2 rt, 0x0106; DMFC2
+ * rt, 0x0107 */
+ unsigned long cop2_aes_key[4];
+ /* DMFC2 rt, 0x0110 */
+ unsigned long cop2_aes_keylen;
+ /* DMFC2 rt, 0x0100; DMFC2 rt, 0x0101 */
+ unsigned long cop2_aes_result[2];
+ /* DMFC2 rt, 0x0240; DMFC2 rt, 0x0241; DMFC2 rt, 0x0242; DMFC2
+ * rt, 0x0243; DMFC2 rt, 0x0244; DMFC2 rt, 0x0245; DMFC2 rt,
+ * 0x0246; DMFC2 rt, 0x0247; DMFC2 rt, 0x0248; DMFC2 rt,
+ * 0x0249; DMFC2 rt, 0x024A; DMFC2 rt, 0x024B; DMFC2 rt,
+ * 0x024C; DMFC2 rt, 0x024D; DMFC2 rt, 0x024E - Pass2 */
+ unsigned long cop2_hsh_datw[15];
+ /* DMFC2 rt, 0x0250; DMFC2 rt, 0x0251; DMFC2 rt, 0x0252; DMFC2
+ * rt, 0x0253; DMFC2 rt, 0x0254; DMFC2 rt, 0x0255; DMFC2 rt,
+ * 0x0256; DMFC2 rt, 0x0257 - Pass2 */
+ unsigned long cop2_hsh_ivw[8];
+ /* DMFC2 rt, 0x0258; DMFC2 rt, 0x0259 - Pass2 */
+ unsigned long cop2_gfm_mult[2];
+ /* DMFC2 rt, 0x025E - Pass2 */
+ unsigned long cop2_gfm_poly;
+ /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
+ unsigned long cop2_gfm_result[2];
+ /* DMFC2 rt, 0x24F, DMFC2 rt, 0x50, OCTEON III */
+ unsigned long cop2_sha3[2];
+};
+#define COP2_INIT \
+ .cp2 = {0,},
+
+struct octeon_cvmseg_state {
+ unsigned long cvmseg[CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE]
+ [cpu_dcache_line_size() / sizeof(unsigned long)];
+};
+
+#elif defined(CONFIG_CPU_XLP)
+struct nlm_cop2_state {
+ u64 rx[4];
+ u64 tx[4];
+ u32 tx_msg_status;
+ u32 rx_msg_status;
+};
+
+#define COP2_INIT \
+ .cp2 = {{0}, {0}, 0, 0},
+#else
+#define COP2_INIT
+#endif
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+#ifdef CONFIG_CPU_HAS_MSA
+# define ARCH_MIN_TASKALIGN 16
+# define FPU_ALIGN __aligned(16)
+#else
+# define ARCH_MIN_TASKALIGN 8
+# define FPU_ALIGN
+#endif
+
+struct mips_abi;
+
+/*
+ * If you change thread_struct remember to change the #defines below too!
+ */
+struct thread_struct {
+ /* Saved main processor registers. */
+ unsigned long reg16;
+ unsigned long reg17, reg18, reg19, reg20, reg21, reg22, reg23;
+ unsigned long reg29, reg30, reg31;
+
+ /* Saved cp0 stuff. */
+ unsigned long cp0_status;
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ /* Saved fpu/fpu emulator stuff. */
+ struct mips_fpu_struct fpu FPU_ALIGN;
+ /* Assigned branch delay slot 'emulation' frame */
+ atomic_t bd_emu_frame;
+ /* PC of the branch from a branch delay slot 'emulation' */
+ unsigned long bd_emu_branch_pc;
+ /* PC to continue from following a branch delay slot 'emulation' */
+ unsigned long bd_emu_cont_pc;
+#endif
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* Emulated instruction count */
+ unsigned long emulated_fp;
+ /* Saved per-thread scheduler affinity mask */
+ cpumask_t user_cpus_allowed;
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+ /* Saved state of the DSP ASE, if available. */
+ struct mips_dsp_state dsp;
+
+ /* Saved watch register state, if available. */
+ union mips_watch_reg_state watch;
+
+ /* Other stuff associated with the thread. */
+ unsigned long cp0_badvaddr; /* Last user fault */
+ unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */
+ unsigned long error_code;
+ unsigned long trap_nr;
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128)));
+ struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128)));
+#endif
+#ifdef CONFIG_CPU_XLP
+ struct nlm_cop2_state cp2;
+#endif
+ struct mips_abi *abi;
+};
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+#define FPAFF_INIT \
+ .emulated_fp = 0, \
+ .user_cpus_allowed = INIT_CPUMASK,
+#else
+#define FPAFF_INIT
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+# define FPU_INIT \
+ .fpu = { \
+ .fpr = {{{0,},},}, \
+ .fcr31 = 0, \
+ .msacsr = 0, \
+ }, \
+ /* Delay slot emulation */ \
+ .bd_emu_frame = ATOMIC_INIT(BD_EMUFRAME_NONE), \
+ .bd_emu_branch_pc = 0, \
+ .bd_emu_cont_pc = 0,
+#else
+# define FPU_INIT
+#endif
+
+#define INIT_THREAD { \
+ /* \
+ * Saved main processor registers \
+ */ \
+ .reg16 = 0, \
+ .reg17 = 0, \
+ .reg18 = 0, \
+ .reg19 = 0, \
+ .reg20 = 0, \
+ .reg21 = 0, \
+ .reg22 = 0, \
+ .reg23 = 0, \
+ .reg29 = 0, \
+ .reg30 = 0, \
+ .reg31 = 0, \
+ /* \
+ * Saved cp0 stuff \
+ */ \
+ .cp0_status = 0, \
+ /* \
+ * Saved FPU/FPU emulator stuff \
+ */ \
+ FPU_INIT \
+ /* \
+ * FPU affinity state (null if not FPAFF) \
+ */ \
+ FPAFF_INIT \
+ /* \
+ * Saved DSP stuff \
+ */ \
+ .dsp = { \
+ .dspr = {0, }, \
+ .dspcontrol = 0, \
+ }, \
+ /* \
+ * saved watch register stuff \
+ */ \
+ .watch = {{{0,},},}, \
+ /* \
+ * Other stuff associated with the process \
+ */ \
+ .cp0_badvaddr = 0, \
+ .cp0_baduaddr = 0, \
+ .error_code = 0, \
+ .trap_nr = 0, \
+ /* \
+ * Platform specific cop2 registers(null if no COP2) \
+ */ \
+ COP2_INIT \
+}
+
+struct task_struct;
+
+/* Free all resources held by a thread. */
+#define release_thread(thread) do { } while(0)
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ */
+extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp);
+
+static inline void flush_thread(void)
+{
+}
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
+ THREAD_SIZE - 32 - sizeof(struct pt_regs))
+#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
+#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
+
+/*
+ * Return_address is a replacement for __builtin_return_address(count)
+ * which on certain architectures cannot reasonably be implemented in GCC
+ * (MIPS, Alpha) or is unusable with -fomit-frame-pointer (i386).
+ * Note that __builtin_return_address(x>=1) is forbidden because GCC
+ * aborts compilation on some CPUs. It's simply not possible to unwind
+ * some CPU's stackframes.
+ *
+ * __builtin_return_address works only for non-leaf functions. We avoid the
+ * overhead of a function call by forcing the compiler to save the return
+ * address register on the stack.
+ */
+#define return_address() ({__asm__ __volatile__("":::"$31");__builtin_return_address(0);})
+
+#ifdef CONFIG_CPU_HAS_PREFETCH
+
+#define ARCH_HAS_PREFETCH
+#define prefetch(x) __builtin_prefetch((x), 0, 1)
+
+#define ARCH_HAS_PREFETCHW
+#define prefetchw(x) __builtin_prefetch((x), 1, 1)
+
+#endif
+
+/*
+ * Functions & macros implementing the PR_GET_FP_MODE & PR_SET_FP_MODE options
+ * to the prctl syscall.
+ */
+extern int mips_get_process_fp_mode(struct task_struct *task);
+extern int mips_set_process_fp_mode(struct task_struct *task,
+ unsigned int value);
+
+#define GET_FP_MODE(task) mips_get_process_fp_mode(task)
+#define SET_FP_MODE(task,value) mips_set_process_fp_mode(task, value)
+
+#endif /* _ASM_PROCESSOR_H */
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
new file mode 100644
index 000000000..c42e07671
--- /dev/null
+++ b/arch/mips/include/asm/prom.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * arch/mips/include/asm/prom.h
+ *
+ * Copyright (C) 2010 Cisco Systems Inc. <dediao@cisco.com>
+ */
+#ifndef __ASM_PROM_H
+#define __ASM_PROM_H
+
+#ifdef CONFIG_USE_OF
+#include <linux/bug.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <asm/bootinfo.h>
+
+extern void device_tree_init(void);
+
+struct boot_param_header;
+
+extern void __dt_setup_arch(void *bph);
+extern int __dt_register_buses(const char *bus0, const char *bus1);
+
+#else /* CONFIG_OF */
+static inline void device_tree_init(void) { }
+#endif /* CONFIG_OF */
+
+extern char *mips_get_machine_name(void);
+extern void mips_set_machine_name(const char *name);
+
+#endif /* __ASM_PROM_H */
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
new file mode 100644
index 000000000..1e76774b3
--- /dev/null
+++ b/arch/mips/include/asm/ptrace.h
@@ -0,0 +1,189 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_PTRACE_H
+#define _ASM_PTRACE_H
+
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <asm/isadep.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+#include <uapi/asm/ptrace.h>
+
+/*
+ * This struct defines the way the registers are stored on the stack during a
+ * system call/exception. As usual the registers k0/k1 aren't being saved.
+ *
+ * If you add a register here, also add it to regoffset_table[] in
+ * arch/mips/kernel/ptrace.c.
+ */
+struct pt_regs {
+#ifdef CONFIG_32BIT
+ /* Pad bytes for argument save space on the stack. */
+ unsigned long pad0[8];
+#endif
+
+ /* Saved main processor registers. */
+ unsigned long regs[32];
+
+ /* Saved special registers. */
+ unsigned long cp0_status;
+ unsigned long hi;
+ unsigned long lo;
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ unsigned long acx;
+#endif
+ unsigned long cp0_badvaddr;
+ unsigned long cp0_cause;
+ unsigned long cp0_epc;
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ unsigned long long mpl[6]; /* MTM{0-5} */
+ unsigned long long mtp[6]; /* MTP{0-5} */
+#endif
+ unsigned long __last[0];
+} __aligned(8);
+
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->regs[31];
+}
+
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->cp0_epc = val;
+}
+
+/* Query offset/name of register from its name/offset */
+extern int regs_query_register_offset(const char *name);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten.
+ * @offset: offset number of the register.
+ *
+ * regs_get_register returns the value of a register. The @offset is the
+ * offset of the register in struct pt_regs address which specified by @regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static inline int regs_within_kernel_stack(struct pt_regs *regs,
+ unsigned long addr)
+{
+ return ((addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
+}
+
+struct task_struct;
+
+extern int ptrace_getregs(struct task_struct *child,
+ struct user_pt_regs __user *data);
+extern int ptrace_setregs(struct task_struct *child,
+ struct user_pt_regs __user *data);
+
+extern int ptrace_getfpregs(struct task_struct *child, __u32 __user *data);
+extern int ptrace_setfpregs(struct task_struct *child, __u32 __user *data);
+
+extern int ptrace_get_watch_regs(struct task_struct *child,
+ struct pt_watch_regs __user *addr);
+extern int ptrace_set_watch_regs(struct task_struct *child,
+ struct pt_watch_regs __user *addr);
+
+/*
+ * Does the process account for user or for system time?
+ */
+#define user_mode(regs) (((regs)->cp0_status & KU_MASK) == KU_USER)
+
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+ return !regs->regs[7];
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+ if (is_syscall_success(regs) || !user_mode(regs))
+ return regs->regs[2];
+ else
+ return -regs->regs[2];
+}
+
+#define instruction_pointer(regs) ((regs)->cp0_epc)
+#define profile_pc(regs) instruction_pointer(regs)
+
+extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall);
+extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
+
+extern void die(const char *, struct pt_regs *) __noreturn;
+
+static inline void die_if_kernel(const char *str, struct pt_regs *regs)
+{
+ if (unlikely(!user_mode(regs)))
+ die(str, regs);
+}
+
+#define current_pt_regs() \
+({ \
+ unsigned long sp = (unsigned long)__builtin_frame_address(0); \
+ (struct pt_regs *)((sp | (THREAD_SIZE - 1)) + 1 - 32) - 1; \
+})
+
+/* Helpers for working with the user stack pointer */
+
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+ return regs->regs[29];
+}
+
+static inline void user_stack_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->regs[29] = val;
+}
+
+#endif /* _ASM_PTRACE_H */
diff --git a/arch/mips/include/asm/r4k-timer.h b/arch/mips/include/asm/r4k-timer.h
new file mode 100644
index 000000000..6e7361629
--- /dev/null
+++ b/arch/mips/include/asm/r4k-timer.h
@@ -0,0 +1,30 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 by Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_R4K_TIMER_H
+#define __ASM_R4K_TIMER_H
+
+#include <linux/compiler.h>
+
+#ifdef CONFIG_SYNC_R4K
+
+extern void synchronise_count_master(int cpu);
+extern void synchronise_count_slave(int cpu);
+
+#else
+
+static inline void synchronise_count_master(int cpu)
+{
+}
+
+static inline void synchronise_count_slave(int cpu)
+{
+}
+
+#endif
+
+#endif /* __ASM_R4K_TIMER_H */
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
new file mode 100644
index 000000000..15ab16f99
--- /dev/null
+++ b/arch/mips/include/asm/r4kcache.h
@@ -0,0 +1,379 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Inline assembly cache operations.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef _ASM_R4KCACHE_H
+#define _ASM_R4KCACHE_H
+
+#include <linux/stringify.h>
+
+#include <asm/asm.h>
+#include <asm/asm-eva.h>
+#include <asm/cacheops.h>
+#include <asm/compiler.h>
+#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
+#include <asm/mipsmtregs.h>
+#include <asm/mmzone.h>
+#include <asm/unroll.h>
+#include <linux/uaccess.h> /* for uaccess_kernel() */
+
+extern void (*r4k_blast_dcache)(void);
+extern void (*r4k_blast_icache)(void);
+
+/*
+ * This macro return a properly sign-extended address suitable as base address
+ * for indexed cache operations. Two issues here:
+ *
+ * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
+ * the index bits from the virtual address. This breaks with tradition
+ * set by the R4000. To keep unpleasant surprises from happening we pick
+ * an address in KSEG0 / CKSEG0.
+ * - We need a properly sign extended address for 64-bit code. To get away
+ * without ifdefs we let the compiler do it by a type cast.
+ */
+#define INDEX_BASE CKSEG0
+
+#define _cache_op(insn, op, addr) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
+ " " insn("%0", "%1") " \n" \
+ " .set pop \n" \
+ : \
+ : "i" (op), "R" (*(unsigned char *)(addr)))
+
+#define cache_op(op, addr) \
+ _cache_op(kernel_cache, op, addr)
+
+static inline void flush_icache_line_indexed(unsigned long addr)
+{
+ cache_op(Index_Invalidate_I, addr);
+}
+
+static inline void flush_dcache_line_indexed(unsigned long addr)
+{
+ cache_op(Index_Writeback_Inv_D, addr);
+}
+
+static inline void flush_scache_line_indexed(unsigned long addr)
+{
+ cache_op(Index_Writeback_Inv_SD, addr);
+}
+
+static inline void flush_icache_line(unsigned long addr)
+{
+ switch (boot_cpu_type()) {
+ case CPU_LOONGSON2EF:
+ cache_op(Hit_Invalidate_I_Loongson2, addr);
+ break;
+
+ default:
+ cache_op(Hit_Invalidate_I, addr);
+ break;
+ }
+}
+
+static inline void flush_dcache_line(unsigned long addr)
+{
+ cache_op(Hit_Writeback_Inv_D, addr);
+}
+
+static inline void invalidate_dcache_line(unsigned long addr)
+{
+ cache_op(Hit_Invalidate_D, addr);
+}
+
+static inline void invalidate_scache_line(unsigned long addr)
+{
+ cache_op(Hit_Invalidate_SD, addr);
+}
+
+static inline void flush_scache_line(unsigned long addr)
+{
+ cache_op(Hit_Writeback_Inv_SD, addr);
+}
+
+#define protected_cache_op(op,addr) \
+({ \
+ int __err = 0; \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
+ "1: cache %1, (%2) \n" \
+ "2: .insn \n" \
+ " .set pop \n" \
+ " .section .fixup,\"ax\" \n" \
+ "3: li %0, %3 \n" \
+ " j 2b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "STR(PTR)" 1b, 3b \n" \
+ " .previous" \
+ : "+r" (__err) \
+ : "i" (op), "r" (addr), "i" (-EFAULT)); \
+ __err; \
+})
+
+
+#define protected_cachee_op(op,addr) \
+({ \
+ int __err = 0; \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set mips0 \n" \
+ " .set eva \n" \
+ "1: cachee %1, (%2) \n" \
+ "2: .insn \n" \
+ " .set pop \n" \
+ " .section .fixup,\"ax\" \n" \
+ "3: li %0, %3 \n" \
+ " j 2b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "STR(PTR)" 1b, 3b \n" \
+ " .previous" \
+ : "+r" (__err) \
+ : "i" (op), "r" (addr), "i" (-EFAULT)); \
+ __err; \
+})
+
+/*
+ * The next two are for badland addresses like signal trampolines.
+ */
+static inline int protected_flush_icache_line(unsigned long addr)
+{
+ switch (boot_cpu_type()) {
+ case CPU_LOONGSON2EF:
+ return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
+
+ default:
+#ifdef CONFIG_EVA
+ return protected_cachee_op(Hit_Invalidate_I, addr);
+#else
+ return protected_cache_op(Hit_Invalidate_I, addr);
+#endif
+ }
+}
+
+/*
+ * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
+ * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
+ * caches. We're talking about one cacheline unnecessarily getting invalidated
+ * here so the penalty isn't overly hard.
+ */
+static inline int protected_writeback_dcache_line(unsigned long addr)
+{
+#ifdef CONFIG_EVA
+ return protected_cachee_op(Hit_Writeback_Inv_D, addr);
+#else
+ return protected_cache_op(Hit_Writeback_Inv_D, addr);
+#endif
+}
+
+static inline int protected_writeback_scache_line(unsigned long addr)
+{
+#ifdef CONFIG_EVA
+ return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
+#else
+ return protected_cache_op(Hit_Writeback_Inv_SD, addr);
+#endif
+}
+
+/*
+ * This one is RM7000-specific
+ */
+static inline void invalidate_tcache_page(unsigned long addr)
+{
+ cache_op(Page_Invalidate_T, addr);
+}
+
+#define cache_unroll(times, insn, op, addr, lsize) do { \
+ int i = 0; \
+ unroll(times, _cache_op, insn, op, (addr) + (i++ * (lsize))); \
+} while (0)
+
+/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
+#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
+static inline void extra##blast_##pfx##cache##lsize(void) \
+{ \
+ unsigned long start = INDEX_BASE; \
+ unsigned long end = start + current_cpu_data.desc.waysize; \
+ unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
+ unsigned long ws_end = current_cpu_data.desc.ways << \
+ current_cpu_data.desc.waybit; \
+ unsigned long ws, addr; \
+ \
+ for (ws = 0; ws < ws_end; ws += ws_inc) \
+ for (addr = start; addr < end; addr += lsize * 32) \
+ cache_unroll(32, kernel_cache, indexop, \
+ addr | ws, lsize); \
+} \
+ \
+static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
+{ \
+ unsigned long start = page; \
+ unsigned long end = page + PAGE_SIZE; \
+ \
+ do { \
+ cache_unroll(32, kernel_cache, hitop, start, lsize); \
+ start += lsize * 32; \
+ } while (start < end); \
+} \
+ \
+static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
+{ \
+ unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
+ unsigned long start = INDEX_BASE + (page & indexmask); \
+ unsigned long end = start + PAGE_SIZE; \
+ unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
+ unsigned long ws_end = current_cpu_data.desc.ways << \
+ current_cpu_data.desc.waybit; \
+ unsigned long ws, addr; \
+ \
+ for (ws = 0; ws < ws_end; ws += ws_inc) \
+ for (addr = start; addr < end; addr += lsize * 32) \
+ cache_unroll(32, kernel_cache, indexop, \
+ addr | ws, lsize); \
+}
+
+__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
+__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
+__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
+__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
+__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
+__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
+__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
+__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
+__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
+__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
+__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
+__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
+__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
+
+__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
+__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
+__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
+__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
+__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
+__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
+
+#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
+static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
+{ \
+ unsigned long start = page; \
+ unsigned long end = page + PAGE_SIZE; \
+ \
+ do { \
+ cache_unroll(32, user_cache, hitop, start, lsize); \
+ start += lsize * 32; \
+ } while (start < end); \
+}
+
+__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
+ 16)
+__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
+__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
+ 32)
+__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
+__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
+ 64)
+__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
+
+/* build blast_xxx_range, protected_blast_xxx_range */
+#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
+static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
+ unsigned long end) \
+{ \
+ unsigned long lsize = cpu_##desc##_line_size(); \
+ unsigned long addr = start & ~(lsize - 1); \
+ unsigned long aend = (end - 1) & ~(lsize - 1); \
+ \
+ while (1) { \
+ prot##cache_op(hitop, addr); \
+ if (addr == aend) \
+ break; \
+ addr += lsize; \
+ } \
+}
+
+#ifndef CONFIG_EVA
+
+__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
+__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
+
+#else
+
+#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
+static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
+ unsigned long end) \
+{ \
+ unsigned long lsize = cpu_##desc##_line_size(); \
+ unsigned long addr = start & ~(lsize - 1); \
+ unsigned long aend = (end - 1) & ~(lsize - 1); \
+ \
+ if (!uaccess_kernel()) { \
+ while (1) { \
+ protected_cachee_op(hitop, addr); \
+ if (addr == aend) \
+ break; \
+ addr += lsize; \
+ } \
+ } else { \
+ while (1) { \
+ protected_cache_op(hitop, addr); \
+ if (addr == aend) \
+ break; \
+ addr += lsize; \
+ } \
+ \
+ } \
+}
+
+__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
+__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
+
+#endif
+__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
+__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
+ protected_, loongson2_)
+__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
+__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
+__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
+/* blast_inv_dcache_range */
+__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
+__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
+
+/* Currently, this is very specific to Loongson-3 */
+#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \
+static inline void blast_##pfx##cache##lsize##_node(long node) \
+{ \
+ unsigned long start = CAC_BASE | nid_to_addrbase(node); \
+ unsigned long end = start + current_cpu_data.desc.waysize; \
+ unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
+ unsigned long ws_end = current_cpu_data.desc.ways << \
+ current_cpu_data.desc.waybit; \
+ unsigned long ws, addr; \
+ \
+ for (ws = 0; ws < ws_end; ws += ws_inc) \
+ for (addr = start; addr < end; addr += lsize * 32) \
+ cache_unroll(32, kernel_cache, indexop, \
+ addr | ws, lsize); \
+}
+
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
+
+#endif /* _ASM_R4KCACHE_H */
diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
new file mode 100644
index 000000000..e48c0bfab
--- /dev/null
+++ b/arch/mips/include/asm/reboot.h
@@ -0,0 +1,15 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1997, 1999, 2001, 06 by Ralf Baechle
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+#ifndef _ASM_REBOOT_H
+#define _ASM_REBOOT_H
+
+extern void (*_machine_restart)(char *command);
+extern void (*_machine_halt)(void);
+
+#endif /* _ASM_REBOOT_H */
diff --git a/arch/mips/include/asm/reg.h b/arch/mips/include/asm/reg.h
new file mode 100644
index 000000000..84dc7e2e2
--- /dev/null
+++ b/arch/mips/include/asm/reg.h
@@ -0,0 +1 @@
+#include <uapi/asm/reg.h>
diff --git a/arch/mips/include/asm/regdef.h b/arch/mips/include/asm/regdef.h
new file mode 100644
index 000000000..3c687df1d
--- /dev/null
+++ b/arch/mips/include/asm/regdef.h
@@ -0,0 +1,106 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1985 MIPS Computer Systems, Inc.
+ * Copyright (C) 1994, 95, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1990 - 1992, 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2011 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef _ASM_REGDEF_H
+#define _ASM_REGDEF_H
+
+#include <asm/sgidefs.h>
+
+#if _MIPS_SIM == _MIPS_SIM_ABI32
+
+/*
+ * Symbolic register names for 32 bit ABI
+ */
+#define zero $0 /* wired zero */
+#define AT $1 /* assembler temp - uppercase because of ".set at" */
+#define v0 $2 /* return value */
+#define v1 $3
+#define a0 $4 /* argument registers */
+#define a1 $5
+#define a2 $6
+#define a3 $7
+#define t0 $8 /* caller saved */
+#define t1 $9
+#define t2 $10
+#define t3 $11
+#define t4 $12
+#define ta0 $12
+#define t5 $13
+#define ta1 $13
+#define t6 $14
+#define ta2 $14
+#define t7 $15
+#define ta3 $15
+#define s0 $16 /* callee saved */
+#define s1 $17
+#define s2 $18
+#define s3 $19
+#define s4 $20
+#define s5 $21
+#define s6 $22
+#define s7 $23
+#define t8 $24 /* caller saved */
+#define t9 $25
+#define jp $25 /* PIC jump register */
+#define k0 $26 /* kernel scratch */
+#define k1 $27
+#define gp $28 /* global pointer */
+#define sp $29 /* stack pointer */
+#define fp $30 /* frame pointer */
+#define s8 $30 /* same like fp! */
+#define ra $31 /* return address */
+
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
+
+#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
+
+#define zero $0 /* wired zero */
+#define AT $at /* assembler temp - uppercase because of ".set at" */
+#define v0 $2 /* return value - caller saved */
+#define v1 $3
+#define a0 $4 /* argument registers */
+#define a1 $5
+#define a2 $6
+#define a3 $7
+#define a4 $8 /* arg reg 64 bit; caller saved in 32 bit */
+#define ta0 $8
+#define a5 $9
+#define ta1 $9
+#define a6 $10
+#define ta2 $10
+#define a7 $11
+#define ta3 $11
+#define t0 $12 /* caller saved */
+#define t1 $13
+#define t2 $14
+#define t3 $15
+#define s0 $16 /* callee saved */
+#define s1 $17
+#define s2 $18
+#define s3 $19
+#define s4 $20
+#define s5 $21
+#define s6 $22
+#define s7 $23
+#define t8 $24 /* caller saved */
+#define t9 $25 /* callee address for PIC/temp */
+#define jp $25 /* PIC jump register */
+#define k0 $26 /* kernel temporary */
+#define k1 $27
+#define gp $28 /* global pointer - caller saved for PIC */
+#define sp $29 /* stack pointer */
+#define fp $30 /* frame pointer */
+#define s8 $30 /* callee saved */
+#define ra $31 /* return address */
+
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
+
+#endif /* _ASM_REGDEF_H */
diff --git a/arch/mips/include/asm/rtlx.h b/arch/mips/include/asm/rtlx.h
new file mode 100644
index 000000000..c10206548
--- /dev/null
+++ b/arch/mips/include/asm/rtlx.h
@@ -0,0 +1,88 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#ifndef __ASM_RTLX_H_
+#define __ASM_RTLX_H_
+
+#include <irq.h>
+
+#define RTLX_MODULE_NAME "rtlx"
+
+#define LX_NODE_BASE 10
+
+#define MIPS_CPU_RTLX_IRQ 0
+
+#define RTLX_VERSION 2
+#define RTLX_xID 0x12345600
+#define RTLX_ID (RTLX_xID | RTLX_VERSION)
+#define RTLX_BUFFER_SIZE 2048
+#define RTLX_CHANNELS 8
+
+#define RTLX_CHANNEL_STDIO 0
+#define RTLX_CHANNEL_DBG 1
+#define RTLX_CHANNEL_SYSIO 2
+
+void rtlx_starting(int vpe);
+void rtlx_stopping(int vpe);
+
+int rtlx_open(int index, int can_sleep);
+int rtlx_release(int index);
+ssize_t rtlx_read(int index, void __user *buff, size_t count);
+ssize_t rtlx_write(int index, const void __user *buffer, size_t count);
+unsigned int rtlx_read_poll(int index, int can_sleep);
+unsigned int rtlx_write_poll(int index);
+
+int __init rtlx_module_init(void);
+void __exit rtlx_module_exit(void);
+
+void _interrupt_sp(void);
+
+extern struct vpe_notifications rtlx_notify;
+extern const struct file_operations rtlx_fops;
+extern void (*aprp_hook)(void);
+
+enum rtlx_state {
+ RTLX_STATE_UNUSED = 0,
+ RTLX_STATE_INITIALISED,
+ RTLX_STATE_REMOTE_READY,
+ RTLX_STATE_OPENED
+};
+
+extern struct chan_waitqueues {
+ wait_queue_head_t rt_queue;
+ wait_queue_head_t lx_queue;
+ atomic_t in_open;
+ struct mutex mutex;
+} channel_wqs[RTLX_CHANNELS];
+
+/* each channel supports read and write.
+ linux (vpe0) reads lx_buffer and writes rt_buffer
+ SP (vpe1) reads rt_buffer and writes lx_buffer
+*/
+struct rtlx_channel {
+ enum rtlx_state rt_state;
+ enum rtlx_state lx_state;
+
+ int buffer_size;
+
+ /* read and write indexes per buffer */
+ int rt_write, rt_read;
+ char *rt_buffer;
+
+ int lx_write, lx_read;
+ char *lx_buffer;
+};
+
+extern struct rtlx_info {
+ unsigned long id;
+ enum rtlx_state state;
+ int ap_int_pending; /* Status of 0 or 1 for CONFIG_MIPS_CMP only */
+
+ struct rtlx_channel channel[RTLX_CHANNELS];
+} *rtlx;
+#endif /* __ASM_RTLX_H_ */
diff --git a/arch/mips/include/asm/seccomp.h b/arch/mips/include/asm/seccomp.h
new file mode 100644
index 000000000..aa809589a
--- /dev/null
+++ b/arch/mips/include/asm/seccomp.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SECCOMP_H
+
+#include <linux/unistd.h>
+
+#ifdef CONFIG_COMPAT
+static inline const int *get_compat_mode1_syscalls(void)
+{
+ static const int syscalls_O32[] = {
+ __NR_O32_Linux + 3, __NR_O32_Linux + 4,
+ __NR_O32_Linux + 1, __NR_O32_Linux + 193,
+ -1, /* negative terminated */
+ };
+ static const int syscalls_N32[] = {
+ __NR_N32_Linux + 0, __NR_N32_Linux + 1,
+ __NR_N32_Linux + 58, __NR_N32_Linux + 211,
+ -1, /* negative terminated */
+ };
+
+ if (IS_ENABLED(CONFIG_MIPS32_O32) && test_thread_flag(TIF_32BIT_REGS))
+ return syscalls_O32;
+
+ if (IS_ENABLED(CONFIG_MIPS32_N32))
+ return syscalls_N32;
+
+ BUG();
+}
+
+#define get_compat_mode1_syscalls get_compat_mode1_syscalls
+
+#endif /* CONFIG_COMPAT */
+
+#include <asm-generic/seccomp.h>
+
+#endif /* __ASM_SECCOMP_H */
diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h
new file mode 100644
index 000000000..8c56b862f
--- /dev/null
+++ b/arch/mips/include/asm/setup.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MIPS_SETUP_H
+#define _MIPS_SETUP_H
+
+#include <linux/types.h>
+#include <uapi/asm/setup.h>
+
+extern void prom_putchar(char);
+extern void setup_early_printk(void);
+
+#ifdef CONFIG_EARLY_PRINTK_8250
+extern void setup_8250_early_printk_port(unsigned long base,
+ unsigned int reg_shift, unsigned int timeout);
+#else
+static inline void setup_8250_early_printk_port(unsigned long base,
+ unsigned int reg_shift, unsigned int timeout) {}
+#endif
+
+void set_handler(unsigned long offset, const void *addr, unsigned long len);
+extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len);
+
+typedef void (*vi_handler_t)(void);
+extern void *set_vi_handler(int n, vi_handler_t addr);
+
+extern void *set_except_vector(int n, void *addr);
+extern unsigned long ebase;
+extern unsigned int hwrena;
+extern void per_cpu_trap_init(bool);
+extern void cpu_cache_init(void);
+
+#endif /* __SETUP_H */
diff --git a/arch/mips/include/asm/sgi/gio.h b/arch/mips/include/asm/sgi/gio.h
new file mode 100644
index 000000000..24be2b425
--- /dev/null
+++ b/arch/mips/include/asm/sgi/gio.h
@@ -0,0 +1,86 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * gio.h: Definitions for SGI GIO bus
+ *
+ * Copyright (C) 2002 Ladislav Michl
+ */
+
+#ifndef _SGI_GIO_H
+#define _SGI_GIO_H
+
+/*
+ * GIO bus addresses
+ *
+ * The Indigo and Indy have two GIO bus connectors. Indigo2 (all models) have
+ * three physical connectors, but only two slots, GFX and EXP0.
+ *
+ * There is 10MB of GIO address space for GIO64 slot devices
+ * slot# slot type address range size
+ * ----- --------- ----------------------- -----
+ * 0 GFX 0x1f000000 - 0x1f3fffff 4MB
+ * 1 EXP0 0x1f400000 - 0x1f5fffff 2MB
+ * 2 EXP1 0x1f600000 - 0x1f9fffff 4MB
+ *
+ * There are un-slotted devices, HPC, I/O and misc devices, which are grouped
+ * into the HPC address space.
+ * - MISC 0x1fb00000 - 0x1fbfffff 1MB
+ *
+ * Following space is reserved and unused
+ * - RESERVED 0x18000000 - 0x1effffff 112MB
+ *
+ * GIO bus IDs
+ *
+ * Each GIO bus device identifies itself to the system by answering a
+ * read with an "ID" value. IDs are either 8 or 32 bits long. IDs less
+ * than 128 are 8 bits long, with the most significant 24 bits read from
+ * the slot undefined.
+ *
+ * 32-bit IDs are divided into
+ * bits 0:6 the product ID; ranges from 0x00 to 0x7F.
+ * bit 7 0=GIO Product ID is 8 bits wide
+ * 1=GIO Product ID is 32 bits wide.
+ * bits 8:15 manufacturer version for the product.
+ * bit 16 0=GIO32 and GIO32-bis, 1=GIO64.
+ * bit 17 0=no ROM present
+ * 1=ROM present on this board AND next three words
+ * space define the ROM.
+ * bits 18:31 up to manufacturer.
+ *
+ * IDs above 0x50/0xd0 are of 3rd party boards.
+ *
+ * 8-bit IDs
+ * 0x01 XPI low cost FDDI
+ * 0x02 GTR TokenRing
+ * 0x04 Synchronous ISDN
+ * 0x05 ATM board [*]
+ * 0x06 Canon Interface
+ * 0x07 16 bit SCSI Card [*]
+ * 0x08 JPEG (Double Wide)
+ * 0x09 JPEG (Single Wide)
+ * 0x0a XPI mez. FDDI device 0
+ * 0x0b XPI mez. FDDI device 1
+ * 0x0c SMPTE 259M Video [*]
+ * 0x0d Babblefish Compression [*]
+ * 0x0e E-Plex 8-port Ethernet
+ * 0x30 Lyon Lamb IVAS
+ * 0xb8 GIO 100BaseTX Fast Ethernet (gfe)
+ *
+ * [*] Device provide 32-bit ID.
+ *
+ */
+
+#define GIO_ID(x) (x & 0x7f)
+#define GIO_32BIT_ID 0x80
+#define GIO_REV(x) ((x >> 8) & 0xff)
+#define GIO_64BIT_IFACE 0x10000
+#define GIO_ROM_PRESENT 0x20000
+#define GIO_VENDOR_CODE(x) ((x >> 18) & 0x3fff)
+
+#define GIO_SLOT_GFX_BASE 0x1f000000
+#define GIO_SLOT_EXP0_BASE 0x1f400000
+#define GIO_SLOT_EXP1_BASE 0x1f600000
+
+#endif /* _SGI_GIO_H */
diff --git a/arch/mips/include/asm/sgi/heart.h b/arch/mips/include/asm/sgi/heart.h
new file mode 100644
index 000000000..0d0375195
--- /dev/null
+++ b/arch/mips/include/asm/sgi/heart.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * HEART chip definitions
+ *
+ * Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
+ * 2009 Johannes Dickgreber <tanzy@gmx.de>
+ * 2007-2015 Joshua Kinard <kumba@gentoo.org>
+ */
+#ifndef __ASM_SGI_HEART_H
+#define __ASM_SGI_HEART_H
+
+#include <linux/types.h>
+#include <linux/time.h>
+
+/*
+ * There are 8 DIMM slots on an IP30 system
+ * board, which are grouped into four banks
+ */
+#define HEART_MEMORY_BANKS 4
+
+/* HEART can support up to four CPUs */
+#define HEART_MAX_CPUS 4
+
+#define HEART_XKPHYS_BASE ((void *)(IO_BASE | 0x000000000ff00000ULL))
+
+/**
+ * struct ip30_heart_regs - struct that maps IP30 HEART registers.
+ * @mode: HEART_MODE - Purpose Unknown, machine reset called from here.
+ * @sdram_mode: HEART_SDRAM_MODE - purpose unknown.
+ * @mem_refresh: HEART_MEM_REF - purpose unknown.
+ * @mem_req_arb: HEART_MEM_REQ_ARB - purpose unknown.
+ * @mem_cfg.q: union for 64bit access to HEART_MEMCFG - 4x 64bit registers.
+ * @mem_cfg.l: union for 32bit access to HEART_MEMCFG - 8x 32bit registers.
+ * @fc_mode: HEART_FC_MODE - Purpose Unknown, possibly for GFX flow control.
+ * @fc_timer_limit: HEART_FC_TIMER_LIMIT - purpose unknown.
+ * @fc_addr: HEART_FC0_ADDR, HEART_FC1_ADDR - purpose unknown.
+ * @fc_credit_cnt: HEART_FC0_CR_CNT, HEART_FC1_CR_CNT - purpose unknown.
+ * @fc_timer: HEART_FC0_TIMER, HEART_FC1_TIMER - purpose unknown.
+ * @status: HEART_STATUS - HEART status information.
+ * @bus_err_addr: HEART_BERR_ADDR - likely contains addr of recent SIGBUS.
+ * @bus_err_misc: HEART_BERR_MISC - purpose unknown.
+ * @mem_err_addr: HEART_MEMERR_ADDR - likely contains addr of recent mem err.
+ * @mem_err_data: HEART_MEMERR_DATA - purpose unknown.
+ * @piur_acc_err: HEART_PIUR_ACC_ERR - likely for access err to HEART regs.
+ * @mlan_clock_div: HEART_MLAN_CLK_DIV - MicroLAN clock divider.
+ * @mlan_ctrl: HEART_MLAN_CTL - MicroLAN control.
+ * @__pad0: 0x0f40 bytes of padding -> next HEART register 0x01000.
+ * @undefined: Undefined/diag register, write to it triggers PIUR_ACC_ERR.
+ * @__pad1: 0xeff8 bytes of padding -> next HEART register 0x10000.
+ * @imr: HEART_IMR0 to HEART_IMR3 - per-cpu interrupt mask register.
+ * @set_isr: HEART_SET_ISR - set interrupt status register.
+ * @clear_isr: HEART_CLR_ISR - clear interrupt status register.
+ * @isr: HEART_ISR - interrupt status register (read-only).
+ * @imsr: HEART_IMSR - purpose unknown.
+ * @cause: HEART_CAUSE - HEART cause information.
+ * @__pad2: 0xffb8 bytes of padding -> next HEART register 0x20000.
+ * @count: HEART_COUNT - 52-bit counter.
+ * @__pad3: 0xfff8 bytes of padding -> next HEART register 0x30000.
+ * @compare: HEART_COMPARE - 24-bit compare.
+ * @__pad4: 0xfff8 bytes of padding -> next HEART register 0x40000.
+ * @trigger: HEART_TRIGGER - purpose unknown.
+ * @__pad5: 0xfff8 bytes of padding -> next HEART register 0x50000.
+ * @cpuid: HEART_PRID - contains CPU ID of CPU currently accessing HEART.
+ * @__pad6: 0xfff8 bytes of padding -> next HEART register 0x60000.
+ * @sync: HEART_SYNC - purpose unknown.
+ *
+ * HEART is the main system controller ASIC for IP30 system. It incorporates
+ * a memory controller, interrupt status/cause/set/clear management, basic
+ * timer with count/compare, and other functionality. For Linux, not all of
+ * HEART's functions are fully understood.
+ *
+ * Implementation note: All HEART registers are 64bits-wide, but the mem_cfg
+ * register only reports correct values if queried in 32bits. Hence the need
+ * for a union. Even though mem_cfg.l has 8 array slots, we only ever query
+ * up to 4 of those. IP30 has 8 DIMM slots arranged into 4 banks, w/ 2 DIMMs
+ * per bank. Each 32bit read accesses one of these banks. Perhaps HEART was
+ * designed to address up to 8 banks (16 DIMMs)? We may never know.
+ */
+struct ip30_heart_regs { /* 0x0ff00000 */
+ u64 mode; /* + 0x00000 */
+ /* Memory */
+ u64 sdram_mode; /* + 0x00008 */
+ u64 mem_refresh; /* + 0x00010 */
+ u64 mem_req_arb; /* + 0x00018 */
+ union {
+ u64 q[HEART_MEMORY_BANKS]; /* readq() */
+ u32 l[HEART_MEMORY_BANKS * 2]; /* readl() */
+ } mem_cfg; /* + 0x00020 */
+ /* Flow control (gfx?) */
+ u64 fc_mode; /* + 0x00040 */
+ u64 fc_timer_limit; /* + 0x00048 */
+ u64 fc_addr[2]; /* + 0x00050 */
+ u64 fc_credit_cnt[2]; /* + 0x00060 */
+ u64 fc_timer[2]; /* + 0x00070 */
+ /* Status */
+ u64 status; /* + 0x00080 */
+ /* Bus error */
+ u64 bus_err_addr; /* + 0x00088 */
+ u64 bus_err_misc; /* + 0x00090 */
+ /* Memory error */
+ u64 mem_err_addr; /* + 0x00098 */
+ u64 mem_err_data; /* + 0x000a0 */
+ /* Misc */
+ u64 piur_acc_err; /* + 0x000a8 */
+ u64 mlan_clock_div; /* + 0x000b0 */
+ u64 mlan_ctrl; /* + 0x000b8 */
+ u64 __pad0[0x01e8]; /* + 0x000c0 + 0x0f40 */
+ /* Undefined */
+ u64 undefined; /* + 0x01000 */
+ u64 __pad1[0x1dff]; /* + 0x01008 + 0xeff8 */
+ /* Interrupts */
+ u64 imr[HEART_MAX_CPUS]; /* + 0x10000 */
+ u64 set_isr; /* + 0x10020 */
+ u64 clear_isr; /* + 0x10028 */
+ u64 isr; /* + 0x10030 */
+ u64 imsr; /* + 0x10038 */
+ u64 cause; /* + 0x10040 */
+ u64 __pad2[0x1ff7]; /* + 0x10048 + 0xffb8 */
+ /* Timer */
+ u64 count; /* + 0x20000 */
+ u64 __pad3[0x1fff]; /* + 0x20008 + 0xfff8 */
+ u64 compare; /* + 0x30000 */
+ u64 __pad4[0x1fff]; /* + 0x30008 + 0xfff8 */
+ u64 trigger; /* + 0x40000 */
+ u64 __pad5[0x1fff]; /* + 0x40008 + 0xfff8 */
+ /* Misc */
+ u64 cpuid; /* + 0x50000 */
+ u64 __pad6[0x1fff]; /* + 0x50008 + 0xfff8 */
+ u64 sync; /* + 0x60000 */
+};
+
+
+/* For timer-related bits. */
+#define HEART_NS_PER_CYCLE 80
+#define HEART_CYCLES_PER_SEC (NSEC_PER_SEC / HEART_NS_PER_CYCLE)
+
+
+/*
+ * XXX: Everything below this comment will either go away or be cleaned
+ * up to fit in better with Linux. A lot of the bit definitions for
+ * HEART were derived from IRIX's sys/RACER/heart.h header file.
+ */
+
+/* HEART Masks */
+#define HEART_ATK_MASK 0x0007ffffffffffff /* HEART attack mask */
+#define HEART_ACK_ALL_MASK 0xffffffffffffffff /* Ack everything */
+#define HEART_CLR_ALL_MASK 0x0000000000000000 /* Clear all */
+#define HEART_BR_ERR_MASK 0x7ff8000000000000 /* BRIDGE error mask */
+#define HEART_CPU0_ERR_MASK 0x8ff8000000000000 /* CPU0 error mask */
+#define HEART_CPU1_ERR_MASK 0x97f8000000000000 /* CPU1 error mask */
+#define HEART_CPU2_ERR_MASK 0xa7f8000000000000 /* CPU2 error mask */
+#define HEART_CPU3_ERR_MASK 0xc7f8000000000000 /* CPU3 error mask */
+#define HEART_ERR_MASK 0x1ff /* HEART error mask */
+#define HEART_ERR_MASK_START 51 /* HEART error start */
+#define HEART_ERR_MASK_END 63 /* HEART error end */
+
+/* Bits in the HEART_MODE register. */
+#define HM_PROC_DISABLE_SHFT 60
+#define HM_PROC_DISABLE_MSK (0xfUL << HM_PROC_DISABLE_SHFT)
+#define HM_PROC_DISABLE(x) (0x1UL << (x) + HM_PROC_DISABLE_SHFT)
+#define HM_MAX_PSR (0x7UL << 57)
+#define HM_MAX_IOSR (0x7UL << 54)
+#define HM_MAX_PEND_IOSR (0x7UL << 51)
+#define HM_TRIG_SRC_SEL_MSK (0x7UL << 48)
+#define HM_TRIG_HEART_EXC (0x0UL << 48)
+#define HM_TRIG_REG_BIT (0x1UL << 48)
+#define HM_TRIG_SYSCLK (0x2UL << 48)
+#define HM_TRIG_MEMCLK_2X (0x3UL << 48)
+#define HM_TRIG_MEMCLK (0x4UL << 48)
+#define HM_TRIG_IOCLK (0x5UL << 48)
+#define HM_PIU_TEST_MODE (0xfUL << 40)
+#define HM_GP_FLAG_MSK (0xfUL << 36)
+#define HM_GP_FLAG(x) BIT((x) + 36)
+#define HM_MAX_PROC_HYST (0xfUL << 32)
+#define HM_LLP_WRST_AFTER_RST BIT(28)
+#define HM_LLP_LINK_RST BIT(27)
+#define HM_LLP_WARM_RST BIT(26)
+#define HM_COR_ECC_LCK BIT(25)
+#define HM_REDUCED_PWR BIT(24)
+#define HM_COLD_RST BIT(23)
+#define HM_SW_RST BIT(22)
+#define HM_MEM_FORCE_WR BIT(21)
+#define HM_DB_ERR_GEN BIT(20)
+#define HM_SB_ERR_GEN BIT(19)
+#define HM_CACHED_PIO_EN BIT(18)
+#define HM_CACHED_PROM_EN BIT(17)
+#define HM_PE_SYS_COR_ERE BIT(16)
+#define HM_GLOBAL_ECC_EN BIT(15)
+#define HM_IO_COH_EN BIT(14)
+#define HM_INT_EN BIT(13)
+#define HM_DATA_CHK_EN BIT(12)
+#define HM_REF_EN BIT(11)
+#define HM_BAD_SYSWR_ERE BIT(10)
+#define HM_BAD_SYSRD_ERE BIT(9)
+#define HM_SYSSTATE_ERE BIT(8)
+#define HM_SYSCMD_ERE BIT(7)
+#define HM_NCOR_SYS_ERE BIT(6)
+#define HM_COR_SYS_ERE BIT(5)
+#define HM_DATA_ELMNT_ERE BIT(4)
+#define HM_MEM_ADDR_PROC_ERE BIT(3)
+#define HM_MEM_ADDR_IO_ERE BIT(2)
+#define HM_NCOR_MEM_ERE BIT(1)
+#define HM_COR_MEM_ERE BIT(0)
+
+/* Bits in the HEART_MEM_REF register. */
+#define HEART_MEMREF_REFS(x) ((0xfUL & (x)) << 16)
+#define HEART_MEMREF_PERIOD(x) ((0xffffUL & (x)))
+#define HEART_MEMREF_REFS_VAL HEART_MEMREF_REFS(8)
+#define HEART_MEMREF_PERIOD_VAL HEART_MEMREF_PERIOD(0x4000)
+#define HEART_MEMREF_VAL (HEART_MEMREF_REFS_VAL | \
+ HEART_MEMREF_PERIOD_VAL)
+
+/* Bits in the HEART_MEM_REQ_ARB register. */
+#define HEART_MEMARB_IODIS (1 << 20)
+#define HEART_MEMARB_MAXPMWRQS (15 << 16)
+#define HEART_MEMARB_MAXPMRRQS (15 << 12)
+#define HEART_MEMARB_MAXPMRQS (15 << 8)
+#define HEART_MEMARB_MAXRRRQS (15 << 4)
+#define HEART_MEMARB_MAXGBRRQS (15)
+
+/* Bits in the HEART_MEMCFG<x> registers. */
+#define HEART_MEMCFG_VALID 0x80000000 /* Bank is valid */
+#define HEART_MEMCFG_DENSITY 0x01c00000 /* Mem density */
+#define HEART_MEMCFG_SIZE_MASK 0x003f0000 /* Mem size mask */
+#define HEART_MEMCFG_ADDR_MASK 0x000001ff /* Base addr mask */
+#define HEART_MEMCFG_SIZE_SHIFT 16 /* Mem size shift */
+#define HEART_MEMCFG_DENSITY_SHIFT 22 /* Density Shift */
+#define HEART_MEMCFG_UNIT_SHIFT 25 /* Unit Shift, 32MB */
+
+/* Bits in the HEART_STATUS register */
+#define HEART_STAT_HSTL_SDRV BIT(14)
+#define HEART_STAT_FC_CR_OUT(x) BIT((x) + 12)
+#define HEART_STAT_DIR_CNNCT BIT(11)
+#define HEART_STAT_TRITON BIT(10)
+#define HEART_STAT_R4K BIT(9)
+#define HEART_STAT_BIG_ENDIAN BIT(8)
+#define HEART_STAT_PROC_SHFT 4
+#define HEART_STAT_PROC_MSK (0xfUL << HEART_STAT_PROC_SHFT)
+#define HEART_STAT_PROC_ACTIVE(x) (0x1UL << ((x) + HEART_STAT_PROC_SHFT))
+#define HEART_STAT_WIDGET_ID 0xf
+
+/* Bits in the HEART_CAUSE register */
+#define HC_PE_SYS_COR_ERR_MSK (0xfUL << 60)
+#define HC_PE_SYS_COR_ERR(x) BIT((x) + 60)
+#define HC_PIOWDB_OFLOW BIT(44)
+#define HC_PIORWRB_OFLOW BIT(43)
+#define HC_PIUR_ACC_ERR BIT(42)
+#define HC_BAD_SYSWR_ERR BIT(41)
+#define HC_BAD_SYSRD_ERR BIT(40)
+#define HC_SYSSTATE_ERR_MSK (0xfUL << 36)
+#define HC_SYSSTATE_ERR(x) BIT((x) + 36)
+#define HC_SYSCMD_ERR_MSK (0xfUL << 32)
+#define HC_SYSCMD_ERR(x) BIT((x) + 32)
+#define HC_NCOR_SYSAD_ERR_MSK (0xfUL << 28)
+#define HC_NCOR_SYSAD_ERR(x) BIT((x) + 28)
+#define HC_COR_SYSAD_ERR_MSK (0xfUL << 24)
+#define HC_COR_SYSAD_ERR(x) BIT((x) + 24)
+#define HC_DATA_ELMNT_ERR_MSK (0xfUL << 20)
+#define HC_DATA_ELMNT_ERR(x) BIT((x) + 20)
+#define HC_WIDGET_ERR BIT(16)
+#define HC_MEM_ADDR_ERR_PROC_MSK (0xfUL << 4)
+#define HC_MEM_ADDR_ERR_PROC(x) BIT((x) + 4)
+#define HC_MEM_ADDR_ERR_IO BIT(2)
+#define HC_NCOR_MEM_ERR BIT(1)
+#define HC_COR_MEM_ERR BIT(0)
+
+/*
+ * HEART has 64 interrupt vectors available to it, subdivided into five
+ * priority levels. They are numbered 0 to 63.
+ */
+#define HEART_NUM_IRQS 64
+
+/*
+ * These are the five interrupt priority levels and their corresponding
+ * CPU IPx interrupt pins.
+ *
+ * Level 4 - Error Interrupts.
+ * Level 3 - HEART timer interrupt.
+ * Level 2 - CPU IPI, CPU debug, power putton, general device interrupts.
+ * Level 1 - General device interrupts.
+ * Level 0 - General device GFX flow control interrupts.
+ */
+#define HEART_L4_INT_MASK 0xfff8000000000000ULL /* IP6 */
+#define HEART_L3_INT_MASK 0x0004000000000000ULL /* IP5 */
+#define HEART_L2_INT_MASK 0x0003ffff00000000ULL /* IP4 */
+#define HEART_L1_INT_MASK 0x00000000ffff0000ULL /* IP3 */
+#define HEART_L0_INT_MASK 0x000000000000ffffULL /* IP2 */
+
+/* HEART L0 Interrupts (Low Priority) */
+#define HEART_L0_INT_GENERIC 0
+#define HEART_L0_INT_FLOW_CTRL_HWTR_0 1
+#define HEART_L0_INT_FLOW_CTRL_HWTR_1 2
+
+/* HEART L2 Interrupts (High Priority) */
+#define HEART_L2_INT_RESCHED_CPU_0 46
+#define HEART_L2_INT_RESCHED_CPU_1 47
+#define HEART_L2_INT_CALL_CPU_0 48
+#define HEART_L2_INT_CALL_CPU_1 49
+
+/* HEART L3 Interrupts (Compare/Counter Timer) */
+#define HEART_L3_INT_TIMER 50
+
+/* HEART L4 Interrupts (Errors) */
+#define HEART_L4_INT_XWID_ERR_9 51
+#define HEART_L4_INT_XWID_ERR_A 52
+#define HEART_L4_INT_XWID_ERR_B 53
+#define HEART_L4_INT_XWID_ERR_C 54
+#define HEART_L4_INT_XWID_ERR_D 55
+#define HEART_L4_INT_XWID_ERR_E 56
+#define HEART_L4_INT_XWID_ERR_F 57
+#define HEART_L4_INT_XWID_ERR_XBOW 58
+#define HEART_L4_INT_CPU_BUS_ERR_0 59
+#define HEART_L4_INT_CPU_BUS_ERR_1 60
+#define HEART_L4_INT_CPU_BUS_ERR_2 61
+#define HEART_L4_INT_CPU_BUS_ERR_3 62
+#define HEART_L4_INT_HEART_EXCP 63
+
+extern struct ip30_heart_regs __iomem *heart_regs;
+
+#define heart_read ____raw_readq
+#define heart_write ____raw_writeq
+
+#endif /* __ASM_SGI_HEART_H */
diff --git a/arch/mips/include/asm/sgi/hpc3.h b/arch/mips/include/asm/sgi/hpc3.h
new file mode 100644
index 000000000..c0e3dc029
--- /dev/null
+++ b/arch/mips/include/asm/sgi/hpc3.h
@@ -0,0 +1,317 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * hpc3.h: Definitions for SGI HPC3 controller
+ *
+ * Copyright (C) 1996 David S. Miller
+ * Copyright (C) 1998 Ralf Baechle
+ */
+
+#ifndef _SGI_HPC3_H
+#define _SGI_HPC3_H
+
+#include <linux/types.h>
+#include <asm/page.h>
+
+/* An HPC DMA descriptor. */
+struct hpc_dma_desc {
+ u32 pbuf; /* physical address of data buffer */
+ u32 cntinfo; /* counter and info bits */
+#define HPCDMA_EOX 0x80000000 /* last desc in chain for tx */
+#define HPCDMA_EOR 0x80000000 /* last desc in chain for rx */
+#define HPCDMA_EOXP 0x40000000 /* end of packet for tx */
+#define HPCDMA_EORP 0x40000000 /* end of packet for rx */
+#define HPCDMA_XIE 0x20000000 /* irq generated when at end of this desc */
+#define HPCDMA_XIU 0x01000000 /* Tx buffer in use by CPU. */
+#define HPCDMA_EIPC 0x00ff0000 /* SEEQ ethernet special xternal bytecount */
+#define HPCDMA_ETXD 0x00008000 /* set to one by HPC when packet tx'd */
+#define HPCDMA_OWN 0x00004000 /* Denotes ring buffer ownership on rx */
+#define HPCDMA_BCNT 0x00003fff /* size in bytes of this dma buffer */
+
+ u32 pnext; /* paddr of next hpc_dma_desc if any */
+};
+
+/* The set of regs for each HPC3 PBUS DMA channel. */
+struct hpc3_pbus_dmacregs {
+ volatile u32 pbdma_bptr; /* pbus dma channel buffer ptr */
+ volatile u32 pbdma_dptr; /* pbus dma channel desc ptr */
+ u32 _unused0[0x1000/4 - 2]; /* padding */
+ volatile u32 pbdma_ctrl; /* pbus dma channel control register has
+ * completely different meaning for read
+ * compared with write */
+ /* read */
+#define HPC3_PDMACTRL_INT 0x00000001 /* interrupt (cleared after read) */
+#define HPC3_PDMACTRL_ISACT 0x00000002 /* channel active */
+ /* write */
+#define HPC3_PDMACTRL_SEL 0x00000002 /* little endian transfer */
+#define HPC3_PDMACTRL_RCV 0x00000004 /* direction is receive */
+#define HPC3_PDMACTRL_FLSH 0x00000008 /* enable flush for receive DMA */
+#define HPC3_PDMACTRL_ACT 0x00000010 /* start dma transfer */
+#define HPC3_PDMACTRL_LD 0x00000020 /* load enable for ACT */
+#define HPC3_PDMACTRL_RT 0x00000040 /* Use realtime GIO bus servicing */
+#define HPC3_PDMACTRL_HW 0x0000ff00 /* DMA High-water mark */
+#define HPC3_PDMACTRL_FB 0x003f0000 /* Ptr to beginning of fifo */
+#define HPC3_PDMACTRL_FE 0x3f000000 /* Ptr to end of fifo */
+
+ u32 _unused1[0x1000/4 - 1]; /* padding */
+};
+
+/* The HPC3 SCSI registers, this does not include external ones. */
+struct hpc3_scsiregs {
+ volatile u32 cbptr; /* current dma buffer ptr, diagnostic use only */
+ volatile u32 ndptr; /* next dma descriptor ptr */
+ u32 _unused0[0x1000/4 - 2]; /* padding */
+ volatile u32 bcd; /* byte count info */
+#define HPC3_SBCD_BCNTMSK 0x00003fff /* bytes to transfer from/to memory */
+#define HPC3_SBCD_XIE 0x00004000 /* Send IRQ when done with cur buf */
+#define HPC3_SBCD_EOX 0x00008000 /* Indicates this is last buf in chain */
+
+ volatile u32 ctrl; /* control register */
+#define HPC3_SCTRL_IRQ 0x01 /* IRQ asserted, either dma done or parity */
+#define HPC3_SCTRL_ENDIAN 0x02 /* DMA endian mode, 0=big 1=little */
+#define HPC3_SCTRL_DIR 0x04 /* DMA direction, 1=dev2mem 0=mem2dev */
+#define HPC3_SCTRL_FLUSH 0x08 /* Tells HPC3 to flush scsi fifos */
+#define HPC3_SCTRL_ACTIVE 0x10 /* SCSI DMA channel is active */
+#define HPC3_SCTRL_AMASK 0x20 /* DMA active inhibits PIO */
+#define HPC3_SCTRL_CRESET 0x40 /* Resets dma channel and external controller */
+#define HPC3_SCTRL_PERR 0x80 /* Bad parity on HPC3 iface to scsi controller */
+
+ volatile u32 gfptr; /* current GIO fifo ptr */
+ volatile u32 dfptr; /* current device fifo ptr */
+ volatile u32 dconfig; /* DMA configuration register */
+#define HPC3_SDCFG_HCLK 0x00001 /* Enable DMA half clock mode */
+#define HPC3_SDCFG_D1 0x00006 /* Cycles to spend in D1 state */
+#define HPC3_SDCFG_D2 0x00038 /* Cycles to spend in D2 state */
+#define HPC3_SDCFG_D3 0x001c0 /* Cycles to spend in D3 state */
+#define HPC3_SDCFG_HWAT 0x00e00 /* DMA high water mark */
+#define HPC3_SDCFG_HW 0x01000 /* Enable 16-bit halfword DMA accesses to scsi */
+#define HPC3_SDCFG_SWAP 0x02000 /* Byte swap all DMA accesses */
+#define HPC3_SDCFG_EPAR 0x04000 /* Enable parity checking for DMA */
+#define HPC3_SDCFG_POLL 0x08000 /* hd_dreq polarity control */
+#define HPC3_SDCFG_ERLY 0x30000 /* hd_dreq behavior control bits */
+
+ volatile u32 pconfig; /* PIO configuration register */
+#define HPC3_SPCFG_P3 0x0003 /* Cycles to spend in P3 state */
+#define HPC3_SPCFG_P2W 0x001c /* Cycles to spend in P2 state for writes */
+#define HPC3_SPCFG_P2R 0x01e0 /* Cycles to spend in P2 state for reads */
+#define HPC3_SPCFG_P1 0x0e00 /* Cycles to spend in P1 state */
+#define HPC3_SPCFG_HW 0x1000 /* Enable 16-bit halfword PIO accesses to scsi */
+#define HPC3_SPCFG_SWAP 0x2000 /* Byte swap all PIO accesses */
+#define HPC3_SPCFG_EPAR 0x4000 /* Enable parity checking for PIO */
+#define HPC3_SPCFG_FUJI 0x8000 /* Fujitsu scsi controller mode for faster dma/pio */
+
+ u32 _unused1[0x1000/4 - 6]; /* padding */
+};
+
+/* SEEQ ethernet HPC3 registers, only one seeq per HPC3. */
+struct hpc3_ethregs {
+ /* Receiver registers. */
+ volatile u32 rx_cbptr; /* current dma buffer ptr, diagnostic use only */
+ volatile u32 rx_ndptr; /* next dma descriptor ptr */
+ u32 _unused0[0x1000/4 - 2]; /* padding */
+ volatile u32 rx_bcd; /* byte count info */
+#define HPC3_ERXBCD_BCNTMSK 0x00003fff /* bytes to be sent to memory */
+#define HPC3_ERXBCD_XIE 0x20000000 /* HPC3 interrupts cpu at end of this buf */
+#define HPC3_ERXBCD_EOX 0x80000000 /* flags this as end of descriptor chain */
+
+ volatile u32 rx_ctrl; /* control register */
+#define HPC3_ERXCTRL_STAT50 0x0000003f /* Receive status reg bits of Seeq8003 */
+#define HPC3_ERXCTRL_STAT6 0x00000040 /* Rdonly irq status */
+#define HPC3_ERXCTRL_STAT7 0x00000080 /* Rdonlt old/new status bit from Seeq */
+#define HPC3_ERXCTRL_ENDIAN 0x00000100 /* Endian for dma channel, little=1 big=0 */
+#define HPC3_ERXCTRL_ACTIVE 0x00000200 /* Tells if DMA transfer is in progress */
+#define HPC3_ERXCTRL_AMASK 0x00000400 /* Tells if ACTIVE inhibits PIO's to hpc3 */
+#define HPC3_ERXCTRL_RBO 0x00000800 /* Receive buffer overflow if set to 1 */
+
+ volatile u32 rx_gfptr; /* current GIO fifo ptr */
+ volatile u32 rx_dfptr; /* current device fifo ptr */
+ u32 _unused1; /* padding */
+ volatile u32 reset; /* reset register */
+#define HPC3_ERST_CRESET 0x1 /* Reset dma channel and external controller */
+#define HPC3_ERST_CLRIRQ 0x2 /* Clear channel interrupt */
+#define HPC3_ERST_LBACK 0x4 /* Enable diagnostic loopback mode of Seeq8003 */
+
+ volatile u32 dconfig; /* DMA configuration register */
+#define HPC3_EDCFG_D1 0x0000f /* Cycles to spend in D1 state for PIO */
+#define HPC3_EDCFG_D2 0x000f0 /* Cycles to spend in D2 state for PIO */
+#define HPC3_EDCFG_D3 0x00f00 /* Cycles to spend in D3 state for PIO */
+#define HPC3_EDCFG_WCTRL 0x01000 /* Enable writes of desc into ex ctrl port */
+#define HPC3_EDCFG_FRXDC 0x02000 /* Clear eop stat bits upon rxdc, hw seeq fix */
+#define HPC3_EDCFG_FEOP 0x04000 /* Bad packet marker timeout enable */
+#define HPC3_EDCFG_FIRQ 0x08000 /* Another bad packet timeout enable */
+#define HPC3_EDCFG_PTO 0x30000 /* Programmed timeout value for above two */
+
+ volatile u32 pconfig; /* PIO configuration register */
+#define HPC3_EPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */
+#define HPC3_EPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */
+#define HPC3_EPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */
+#define HPC3_EPCFG_TST 0x1000 /* Diagnostic ram test feature bit */
+
+ u32 _unused2[0x1000/4 - 8]; /* padding */
+
+ /* Transmitter registers. */
+ volatile u32 tx_cbptr; /* current dma buffer ptr, diagnostic use only */
+ volatile u32 tx_ndptr; /* next dma descriptor ptr */
+ u32 _unused3[0x1000/4 - 2]; /* padding */
+ volatile u32 tx_bcd; /* byte count info */
+#define HPC3_ETXBCD_BCNTMSK 0x00003fff /* bytes to be read from memory */
+#define HPC3_ETXBCD_ESAMP 0x10000000 /* if set, too late to add descriptor */
+#define HPC3_ETXBCD_XIE 0x20000000 /* Interrupt cpu at end of cur desc */
+#define HPC3_ETXBCD_EOP 0x40000000 /* Last byte of cur buf is end of packet */
+#define HPC3_ETXBCD_EOX 0x80000000 /* This buf is the end of desc chain */
+
+ volatile u32 tx_ctrl; /* control register */
+#define HPC3_ETXCTRL_STAT30 0x0000000f /* Rdonly copy of seeq tx stat reg */
+#define HPC3_ETXCTRL_STAT4 0x00000010 /* Indicate late collision occurred */
+#define HPC3_ETXCTRL_STAT75 0x000000e0 /* Rdonly irq status from seeq */
+#define HPC3_ETXCTRL_ENDIAN 0x00000100 /* DMA channel endian mode, 1=little 0=big */
+#define HPC3_ETXCTRL_ACTIVE 0x00000200 /* DMA tx channel is active */
+#define HPC3_ETXCTRL_AMASK 0x00000400 /* Indicates ACTIVE inhibits PIO's */
+
+ volatile u32 tx_gfptr; /* current GIO fifo ptr */
+ volatile u32 tx_dfptr; /* current device fifo ptr */
+ u32 _unused4[0x1000/4 - 4]; /* padding */
+};
+
+struct hpc3_regs {
+ /* First regs for the PBUS 8 dma channels. */
+ struct hpc3_pbus_dmacregs pbdma[8];
+
+ /* Now the HPC scsi registers, we get two scsi reg sets. */
+ struct hpc3_scsiregs scsi_chan0, scsi_chan1;
+
+ /* The SEEQ hpc3 ethernet dma/control registers. */
+ struct hpc3_ethregs ethregs;
+
+ /* Here are where the hpc3 fifo's can be directly accessed
+ * via PIO accesses. Under normal operation we never stick
+ * our grubby paws in here so it's just padding. */
+ u32 _unused0[0x18000/4];
+
+ /* HPC3 irq status regs. Due to a peculiar bug you need to
+ * look at two different register addresses to get at all of
+ * the status bits. The first reg can only reliably report
+ * bits 4:0 of the status, and the second reg can only
+ * reliably report bits 9:5 of the hpc3 irq status. I told
+ * you it was a peculiar bug. ;-)
+ */
+ volatile u32 istat0; /* Irq status, only bits <4:0> reliable. */
+#define HPC3_ISTAT_PBIMASK 0x0ff /* irq bits for pbus devs 0 --> 7 */
+#define HPC3_ISTAT_SC0MASK 0x100 /* irq bit for scsi channel 0 */
+#define HPC3_ISTAT_SC1MASK 0x200 /* irq bit for scsi channel 1 */
+
+ volatile u32 gio_misc; /* GIO misc control bits. */
+#define HPC3_GIOMISC_ERTIME 0x1 /* Enable external timer real time. */
+#define HPC3_GIOMISC_DENDIAN 0x2 /* dma descriptor endian, 1=lit 0=big */
+
+ u32 eeprom; /* EEPROM data reg. */
+#define HPC3_EEPROM_EPROT 0x01 /* Protect register enable */
+#define HPC3_EEPROM_CSEL 0x02 /* Chip select */
+#define HPC3_EEPROM_ECLK 0x04 /* EEPROM clock */
+#define HPC3_EEPROM_DATO 0x08 /* Data out */
+#define HPC3_EEPROM_DATI 0x10 /* Data in */
+
+ volatile u32 istat1; /* Irq status, only bits <9:5> reliable. */
+ volatile u32 bestat; /* Bus error interrupt status reg. */
+#define HPC3_BESTAT_BLMASK 0x000ff /* Bus lane where bad parity occurred */
+#define HPC3_BESTAT_CTYPE 0x00100 /* Bus cycle type, 0=PIO 1=DMA */
+#define HPC3_BESTAT_PIDSHIFT 9
+#define HPC3_BESTAT_PIDMASK 0x3f700 /* DMA channel parity identifier */
+
+ u32 _unused1[0x14000/4 - 5]; /* padding */
+
+ /* Now direct PIO per-HPC3 peripheral access to external regs. */
+ volatile u32 scsi0_ext[256]; /* SCSI channel 0 external regs */
+ u32 _unused2[0x7c00/4];
+ volatile u32 scsi1_ext[256]; /* SCSI channel 1 external regs */
+ u32 _unused3[0x7c00/4];
+ volatile u32 eth_ext[320]; /* Ethernet external registers */
+ u32 _unused4[0x3b00/4];
+
+ /* Per-peripheral device external registers and DMA/PIO control. */
+ volatile u32 pbus_extregs[16][256];
+ volatile u32 pbus_dmacfg[8][128];
+ /* Cycles to spend in D3 for reads */
+#define HPC3_DMACFG_D3R_MASK 0x00000001
+#define HPC3_DMACFG_D3R_SHIFT 0
+ /* Cycles to spend in D4 for reads */
+#define HPC3_DMACFG_D4R_MASK 0x0000001e
+#define HPC3_DMACFG_D4R_SHIFT 1
+ /* Cycles to spend in D5 for reads */
+#define HPC3_DMACFG_D5R_MASK 0x000001e0
+#define HPC3_DMACFG_D5R_SHIFT 5
+ /* Cycles to spend in D3 for writes */
+#define HPC3_DMACFG_D3W_MASK 0x00000200
+#define HPC3_DMACFG_D3W_SHIFT 9
+ /* Cycles to spend in D4 for writes */
+#define HPC3_DMACFG_D4W_MASK 0x00003c00
+#define HPC3_DMACFG_D4W_SHIFT 10
+ /* Cycles to spend in D5 for writes */
+#define HPC3_DMACFG_D5W_MASK 0x0003c000
+#define HPC3_DMACFG_D5W_SHIFT 14
+ /* Enable 16-bit DMA access mode */
+#define HPC3_DMACFG_DS16 0x00040000
+ /* Places halfwords on high 16 bits of bus */
+#define HPC3_DMACFG_EVENHI 0x00080000
+ /* Make this device real time */
+#define HPC3_DMACFG_RTIME 0x00200000
+ /* 5 bit burst count for DMA device */
+#define HPC3_DMACFG_BURST_MASK 0x07c00000
+#define HPC3_DMACFG_BURST_SHIFT 22
+ /* Use live pbus_dreq unsynchronized signal */
+#define HPC3_DMACFG_DRQLIVE 0x08000000
+ volatile u32 pbus_piocfg[16][64];
+ /* Cycles to spend in P2 state for reads */
+#define HPC3_PIOCFG_P2R_MASK 0x00001
+#define HPC3_PIOCFG_P2R_SHIFT 0
+ /* Cycles to spend in P3 state for reads */
+#define HPC3_PIOCFG_P3R_MASK 0x0001e
+#define HPC3_PIOCFG_P3R_SHIFT 1
+ /* Cycles to spend in P4 state for reads */
+#define HPC3_PIOCFG_P4R_MASK 0x001e0
+#define HPC3_PIOCFG_P4R_SHIFT 5
+ /* Cycles to spend in P2 state for writes */
+#define HPC3_PIOCFG_P2W_MASK 0x00200
+#define HPC3_PIOCFG_P2W_SHIFT 9
+ /* Cycles to spend in P3 state for writes */
+#define HPC3_PIOCFG_P3W_MASK 0x03c00
+#define HPC3_PIOCFG_P3W_SHIFT 10
+ /* Cycles to spend in P4 state for writes */
+#define HPC3_PIOCFG_P4W_MASK 0x3c000
+#define HPC3_PIOCFG_P4W_SHIFT 14
+ /* Enable 16-bit PIO accesses */
+#define HPC3_PIOCFG_DS16 0x40000
+ /* Place even address bits in bits <15:8> */
+#define HPC3_PIOCFG_EVENHI 0x80000
+
+ /* PBUS PROM control regs. */
+ volatile u32 pbus_promwe; /* PROM write enable register */
+#define HPC3_PROM_WENAB 0x1 /* Enable writes to the PROM */
+
+ u32 _unused5[0x0800/4 - 1];
+ volatile u32 pbus_promswap; /* Chip select swap reg */
+#define HPC3_PROM_SWAP 0x1 /* invert GIO addr bit to select prom0 or prom1 */
+
+ u32 _unused6[0x0800/4 - 1];
+ volatile u32 pbus_gout; /* PROM general purpose output reg */
+#define HPC3_PROM_STAT 0x1 /* General purpose status bit in gout */
+
+ u32 _unused7[0x1000/4 - 1];
+ volatile u32 rtcregs[14]; /* Dallas clock registers */
+ u32 _unused8[50];
+ volatile u32 bbram[8192-50-14]; /* Battery backed ram */
+};
+
+/*
+ * It is possible to have two HPC3's within the address space on
+ * one machine, though only having one is more likely on an Indy.
+ */
+extern struct hpc3_regs *hpc3c0, *hpc3c1;
+#define HPC3_CHIP0_BASE 0x1fb80000 /* physical */
+#define HPC3_CHIP1_BASE 0x1fb00000 /* physical */
+
+extern void sgihpc_init(void);
+
+#endif /* _SGI_HPC3_H */
diff --git a/arch/mips/include/asm/sgi/ioc.h b/arch/mips/include/asm/sgi/ioc.h
new file mode 100644
index 000000000..53c6b1ca6
--- /dev/null
+++ b/arch/mips/include/asm/sgi/ioc.h
@@ -0,0 +1,200 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * ioc.h: Definitions for SGI I/O Controller
+ *
+ * Copyright (C) 1996 David S. Miller
+ * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle
+ * Copyright (C) 2001, 2003 Ladislav Michl
+ */
+
+#ifndef _SGI_IOC_H
+#define _SGI_IOC_H
+
+#include <linux/types.h>
+#include <asm/sgi/pi1.h>
+
+/*
+ * All registers are 8-bit wide aligned on 32-bit boundary. Bad things
+ * happen if you try word access them. You have been warned.
+ */
+
+struct sgioc_uart_regs {
+ u8 _ctrl1[3];
+ volatile u8 ctrl1;
+ u8 _data1[3];
+ volatile u8 data1;
+ u8 _ctrl2[3];
+ volatile u8 ctrl2;
+ u8 _data2[3];
+ volatile u8 data2;
+};
+
+struct sgioc_keyb_regs {
+ u8 _data[3];
+ volatile u8 data;
+ u8 _command[3];
+ volatile u8 command;
+};
+
+struct sgint_regs {
+ u8 _istat0[3];
+ volatile u8 istat0; /* Interrupt status zero */
+#define SGINT_ISTAT0_FFULL 0x01
+#define SGINT_ISTAT0_SCSI0 0x02
+#define SGINT_ISTAT0_SCSI1 0x04
+#define SGINT_ISTAT0_ENET 0x08
+#define SGINT_ISTAT0_GFXDMA 0x10
+#define SGINT_ISTAT0_PPORT 0x20
+#define SGINT_ISTAT0_HPC2 0x40
+#define SGINT_ISTAT0_LIO2 0x80
+ u8 _imask0[3];
+ volatile u8 imask0; /* Interrupt mask zero */
+ u8 _istat1[3];
+ volatile u8 istat1; /* Interrupt status one */
+#define SGINT_ISTAT1_ISDNI 0x01
+#define SGINT_ISTAT1_PWR 0x02
+#define SGINT_ISTAT1_ISDNH 0x04
+#define SGINT_ISTAT1_LIO3 0x08
+#define SGINT_ISTAT1_HPC3 0x10
+#define SGINT_ISTAT1_AFAIL 0x20
+#define SGINT_ISTAT1_VIDEO 0x40
+#define SGINT_ISTAT1_GIO2 0x80
+ u8 _imask1[3];
+ volatile u8 imask1; /* Interrupt mask one */
+ u8 _vmeistat[3];
+ volatile u8 vmeistat; /* VME interrupt status */
+ u8 _cmeimask0[3];
+ volatile u8 cmeimask0; /* VME interrupt mask zero */
+ u8 _cmeimask1[3];
+ volatile u8 cmeimask1; /* VME interrupt mask one */
+ u8 _cmepol[3];
+ volatile u8 cmepol; /* VME polarity */
+ u8 _tclear[3];
+ volatile u8 tclear;
+ u8 _errstat[3];
+ volatile u8 errstat; /* Error status reg, reserved on INT2 */
+ u32 _unused0[2];
+ u8 _tcnt0[3];
+ volatile u8 tcnt0; /* counter 0 */
+ u8 _tcnt1[3];
+ volatile u8 tcnt1; /* counter 1 */
+ u8 _tcnt2[3];
+ volatile u8 tcnt2; /* counter 2 */
+ u8 _tcword[3];
+ volatile u8 tcword; /* control word */
+#define SGINT_TCWORD_BCD 0x01 /* Use BCD mode for counters */
+#define SGINT_TCWORD_MMASK 0x0e /* Mode bitmask. */
+#define SGINT_TCWORD_MITC 0x00 /* IRQ on terminal count (doesn't work) */
+#define SGINT_TCWORD_MOS 0x02 /* One-shot IRQ mode. */
+#define SGINT_TCWORD_MRGEN 0x04 /* Normal rate generation */
+#define SGINT_TCWORD_MSWGEN 0x06 /* Square wave generator mode */
+#define SGINT_TCWORD_MSWST 0x08 /* Software strobe */
+#define SGINT_TCWORD_MHWST 0x0a /* Hardware strobe */
+#define SGINT_TCWORD_CMASK 0x30 /* Command mask */
+#define SGINT_TCWORD_CLAT 0x00 /* Latch command */
+#define SGINT_TCWORD_CLSB 0x10 /* LSB read/write */
+#define SGINT_TCWORD_CMSB 0x20 /* MSB read/write */
+#define SGINT_TCWORD_CALL 0x30 /* Full counter read/write */
+#define SGINT_TCWORD_CNT0 0x00 /* Select counter zero */
+#define SGINT_TCWORD_CNT1 0x40 /* Select counter one */
+#define SGINT_TCWORD_CNT2 0x80 /* Select counter two */
+#define SGINT_TCWORD_CRBCK 0xc0 /* Readback command */
+};
+
+/*
+ * The timer is the good old 8254. Unlike in PCs it's clocked at exactly 1MHz
+ */
+#define SGINT_TIMER_CLOCK 1000000
+
+/*
+ * This is the constant we're using for calibrating the counter.
+ */
+#define SGINT_TCSAMP_COUNTER ((SGINT_TIMER_CLOCK / HZ) + 255)
+
+/* We need software copies of these because they are write only. */
+extern u8 sgi_ioc_reset, sgi_ioc_write;
+
+struct sgioc_regs {
+ struct pi1_regs pport;
+ u32 _unused0[2];
+ struct sgioc_uart_regs uart;
+ struct sgioc_keyb_regs kbdmouse;
+ u8 _gcsel[3];
+ volatile u8 gcsel;
+ u8 _genctrl[3];
+ volatile u8 genctrl;
+ u8 _panel[3];
+ volatile u8 panel;
+#define SGIOC_PANEL_POWERON 0x01
+#define SGIOC_PANEL_POWERINTR 0x02
+#define SGIOC_PANEL_VOLDNINTR 0x10
+#define SGIOC_PANEL_VOLDNHOLD 0x20
+#define SGIOC_PANEL_VOLUPINTR 0x40
+#define SGIOC_PANEL_VOLUPHOLD 0x80
+ u32 _unused1;
+ u8 _sysid[3];
+ volatile u8 sysid;
+#define SGIOC_SYSID_FULLHOUSE 0x01
+#define SGIOC_SYSID_BOARDREV(x) (((x) & 0x1e) >> 1)
+#define SGIOC_SYSID_CHIPREV(x) (((x) & 0xe0) >> 5)
+ u32 _unused2;
+ u8 _read[3];
+ volatile u8 read;
+ u32 _unused3;
+ u8 _dmasel[3];
+ volatile u8 dmasel;
+#define SGIOC_DMASEL_SCLK10MHZ 0x00 /* use 10MHZ serial clock */
+#define SGIOC_DMASEL_ISDNB 0x01 /* enable isdn B */
+#define SGIOC_DMASEL_ISDNA 0x02 /* enable isdn A */
+#define SGIOC_DMASEL_PPORT 0x04 /* use parallel DMA */
+#define SGIOC_DMASEL_SCLK667MHZ 0x10 /* use 6.67MHZ serial clock */
+#define SGIOC_DMASEL_SCLKEXT 0x20 /* use external serial clock */
+ u32 _unused4;
+ u8 _reset[3];
+ volatile u8 reset;
+#define SGIOC_RESET_PPORT 0x01 /* 0=parport reset, 1=nornal */
+#define SGIOC_RESET_KBDMOUSE 0x02 /* 0=kbdmouse reset, 1=normal */
+#define SGIOC_RESET_EISA 0x04 /* 0=eisa reset, 1=normal */
+#define SGIOC_RESET_ISDN 0x08 /* 0=isdn reset, 1=normal */
+#define SGIOC_RESET_LC0OFF 0x10 /* guiness: turn led off (red, else green) */
+#define SGIOC_RESET_LC1OFF 0x20 /* guiness: turn led off (green, else amber) */
+ u32 _unused5;
+ u8 _write[3];
+ volatile u8 write;
+#define SGIOC_WRITE_NTHRESH 0x01 /* use 4.5db threshold */
+#define SGIOC_WRITE_TPSPEED 0x02 /* use 100ohm TP speed */
+#define SGIOC_WRITE_EPSEL 0x04 /* force cable mode: 1=AUI 0=TP */
+#define SGIOC_WRITE_EASEL 0x08 /* 1=autoselect 0=manual cable selection */
+#define SGIOC_WRITE_U1AMODE 0x10 /* 1=PC 0=MAC UART mode */
+#define SGIOC_WRITE_U0AMODE 0x20 /* 1=PC 0=MAC UART mode */
+#define SGIOC_WRITE_MLO 0x40 /* 1=4.75V 0=+5V */
+#define SGIOC_WRITE_MHI 0x80 /* 1=5.25V 0=+5V */
+ u32 _unused6;
+ struct sgint_regs int3;
+ u32 _unused7[16];
+ volatile u32 extio; /* FullHouse only */
+#define EXTIO_S0_IRQ_3 0x8000 /* S0: vid.vsync */
+#define EXTIO_S0_IRQ_2 0x4000 /* S0: gfx.fifofull */
+#define EXTIO_S0_IRQ_1 0x2000 /* S0: gfx.int */
+#define EXTIO_S0_RETRACE 0x1000
+#define EXTIO_SG_IRQ_3 0x0800 /* SG: vid.vsync */
+#define EXTIO_SG_IRQ_2 0x0400 /* SG: gfx.fifofull */
+#define EXTIO_SG_IRQ_1 0x0200 /* SG: gfx.int */
+#define EXTIO_SG_RETRACE 0x0100
+#define EXTIO_GIO_33MHZ 0x0080
+#define EXTIO_EISA_BUSERR 0x0040
+#define EXTIO_MC_BUSERR 0x0020
+#define EXTIO_HPC3_BUSERR 0x0010
+#define EXTIO_S0_STAT_1 0x0008
+#define EXTIO_S0_STAT_0 0x0004
+#define EXTIO_SG_STAT_1 0x0002
+#define EXTIO_SG_STAT_0 0x0001
+};
+
+extern struct sgioc_regs *sgioc;
+extern struct sgint_regs *sgint;
+
+#endif
diff --git a/arch/mips/include/asm/sgi/ip22.h b/arch/mips/include/asm/sgi/ip22.h
new file mode 100644
index 000000000..87ec9eaa0
--- /dev/null
+++ b/arch/mips/include/asm/sgi/ip22.h
@@ -0,0 +1,80 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * ip22.h: Definitions for SGI IP22 machines
+ *
+ * Copyright (C) 1996 David S. Miller
+ * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle
+ */
+
+#ifndef _SGI_IP22_H
+#define _SGI_IP22_H
+
+/*
+ * These are the virtual IRQ numbers, we divide all IRQ's into
+ * 'spaces', the 'space' determines where and how to enable/disable
+ * that particular IRQ on an SGI machine. HPC DMA and MC DMA interrupts
+ * are not supported this way. Driver is supposed to allocate HPC/MC
+ * interrupt as shareable and then look to proper status bit (see
+ * HAL2 driver). This will prevent many complications, trust me ;-)
+ */
+
+#include <irq.h>
+#include <asm/sgi/ioc.h>
+
+#define SGINT_EISA 0 /* 16 EISA irq levels (Indigo2) */
+#define SGINT_CPU MIPS_CPU_IRQ_BASE /* MIPS CPU define 8 interrupt sources */
+#define SGINT_LOCAL0 (SGINT_CPU+8) /* 8 local0 irq levels */
+#define SGINT_LOCAL1 (SGINT_CPU+16) /* 8 local1 irq levels */
+#define SGINT_LOCAL2 (SGINT_CPU+24) /* 8 local2 vectored irq levels */
+#define SGINT_LOCAL3 (SGINT_CPU+32) /* 8 local3 vectored irq levels */
+#define SGINT_END (SGINT_CPU+40) /* End of 'spaces' */
+
+/*
+ * Individual interrupt definitions for the Indy and Indigo2
+ */
+
+#define SGI_SOFT_0_IRQ SGINT_CPU + 0
+#define SGI_SOFT_1_IRQ SGINT_CPU + 1
+#define SGI_LOCAL_0_IRQ SGINT_CPU + 2
+#define SGI_LOCAL_1_IRQ SGINT_CPU + 3
+#define SGI_8254_0_IRQ SGINT_CPU + 4
+#define SGI_8254_1_IRQ SGINT_CPU + 5
+#define SGI_BUSERR_IRQ SGINT_CPU + 6
+#define SGI_TIMER_IRQ SGINT_CPU + 7
+
+#define SGI_FIFO_IRQ SGINT_LOCAL0 + 0 /* FIFO full */
+#define SGI_GIO_0_IRQ SGI_FIFO_IRQ /* GIO-0 */
+#define SGI_WD93_0_IRQ SGINT_LOCAL0 + 1 /* 1st onboard WD93 */
+#define SGI_WD93_1_IRQ SGINT_LOCAL0 + 2 /* 2nd onboard WD93 */
+#define SGI_ENET_IRQ SGINT_LOCAL0 + 3 /* onboard ethernet */
+#define SGI_MCDMA_IRQ SGINT_LOCAL0 + 4 /* MC DMA done */
+#define SGI_PARPORT_IRQ SGINT_LOCAL0 + 5 /* Parallel port */
+#define SGI_GIO_1_IRQ SGINT_LOCAL0 + 6 /* GE / GIO-1 / 2nd-HPC */
+#define SGI_MAP_0_IRQ SGINT_LOCAL0 + 7 /* Mappable interrupt 0 */
+
+#define SGI_GPL0_IRQ SGINT_LOCAL1 + 0 /* General Purpose LOCAL1_N<0> */
+#define SGI_PANEL_IRQ SGINT_LOCAL1 + 1 /* front panel */
+#define SGI_GPL2_IRQ SGINT_LOCAL1 + 2 /* General Purpose LOCAL1_N<2> */
+#define SGI_MAP_1_IRQ SGINT_LOCAL1 + 3 /* Mappable interrupt 1 */
+#define SGI_HPCDMA_IRQ SGINT_LOCAL1 + 4 /* HPC DMA done */
+#define SGI_ACFAIL_IRQ SGINT_LOCAL1 + 5 /* AC fail */
+#define SGI_VINO_IRQ SGINT_LOCAL1 + 6 /* Indy VINO */
+#define SGI_GIO_2_IRQ SGINT_LOCAL1 + 7 /* Vert retrace / GIO-2 */
+
+/* Mapped interrupts. These interrupts may be mapped to either 0, or 1 */
+#define SGI_VERT_IRQ SGINT_LOCAL2 + 0 /* INT3: newport vertical status */
+#define SGI_EISA_IRQ SGINT_LOCAL2 + 3 /* EISA interrupts */
+#define SGI_KEYBD_IRQ SGINT_LOCAL2 + 4 /* keyboard */
+#define SGI_SERIAL_IRQ SGINT_LOCAL2 + 5 /* onboard serial */
+#define SGI_GIOEXP0_IRQ (SGINT_LOCAL2 + 6) /* Indy GIO EXP0 */
+#define SGI_GIOEXP1_IRQ (SGINT_LOCAL2 + 7) /* Indy GIO EXP1 */
+
+#define ip22_is_fullhouse() (sgioc->sysid & SGIOC_SYSID_FULLHOUSE)
+
+extern unsigned short ip22_eeprom_read(unsigned int *ctrl, int reg);
+extern unsigned short ip22_nvram_read(int reg);
+
+#endif
diff --git a/arch/mips/include/asm/sgi/mc.h b/arch/mips/include/asm/sgi/mc.h
new file mode 100644
index 000000000..3a070cec9
--- /dev/null
+++ b/arch/mips/include/asm/sgi/mc.h
@@ -0,0 +1,231 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * mc.h: Definitions for SGI Memory Controller
+ *
+ * Copyright (C) 1996 David S. Miller
+ * Copyright (C) 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+
+#ifndef _SGI_MC_H
+#define _SGI_MC_H
+
+struct sgimc_regs {
+ u32 _unused0;
+ volatile u32 cpuctrl0; /* CPU control register 0, readwrite */
+#define SGIMC_CCTRL0_REFS 0x0000000f /* REFS mask */
+#define SGIMC_CCTRL0_EREFRESH 0x00000010 /* Memory refresh enable */
+#define SGIMC_CCTRL0_EPERRGIO 0x00000020 /* GIO parity error enable */
+#define SGIMC_CCTRL0_EPERRMEM 0x00000040 /* Main mem parity error enable */
+#define SGIMC_CCTRL0_EPERRCPU 0x00000080 /* CPU bus parity error enable */
+#define SGIMC_CCTRL0_WDOG 0x00000100 /* Watchdog timer enable */
+#define SGIMC_CCTRL0_SYSINIT 0x00000200 /* System init bit */
+#define SGIMC_CCTRL0_GFXRESET 0x00000400 /* Graphics interface reset */
+#define SGIMC_CCTRL0_EISALOCK 0x00000800 /* Lock CPU from memory for EISA */
+#define SGIMC_CCTRL0_EPERRSCMD 0x00001000 /* SysCMD bus parity error enable */
+#define SGIMC_CCTRL0_IENAB 0x00002000 /* Allow interrupts from MC */
+#define SGIMC_CCTRL0_ESNOOP 0x00004000 /* Snooping I/O enable */
+#define SGIMC_CCTRL0_EPROMWR 0x00008000 /* Prom writes from cpu enable */
+#define SGIMC_CCTRL0_WRESETPMEM 0x00010000 /* Perform warm reset, preserves mem */
+#define SGIMC_CCTRL0_LENDIAN 0x00020000 /* Put MC in little-endian mode */
+#define SGIMC_CCTRL0_WRESETDMEM 0x00040000 /* Warm reset, destroys mem contents */
+#define SGIMC_CCTRL0_CMEMBADPAR 0x02000000 /* Generate bad perr from cpu to mem */
+#define SGIMC_CCTRL0_R4KNOCHKPARR 0x04000000 /* Don't chk parity on mem data reads */
+#define SGIMC_CCTRL0_GIOBTOB 0x08000000 /* Allow GIO back to back writes */
+ u32 _unused1;
+ volatile u32 cpuctrl1; /* CPU control register 1, readwrite */
+#define SGIMC_CCTRL1_EGIOTIMEO 0x00000010 /* GIO bus timeout enable */
+#define SGIMC_CCTRL1_FIXEDEHPC 0x00001000 /* Fixed HPC endianness */
+#define SGIMC_CCTRL1_LITTLEHPC 0x00002000 /* Little endian HPC */
+#define SGIMC_CCTRL1_FIXEDEEXP0 0x00004000 /* Fixed EXP0 endianness */
+#define SGIMC_CCTRL1_LITTLEEXP0 0x00008000 /* Little endian EXP0 */
+#define SGIMC_CCTRL1_FIXEDEEXP1 0x00010000 /* Fixed EXP1 endianness */
+#define SGIMC_CCTRL1_LITTLEEXP1 0x00020000 /* Little endian EXP1 */
+
+ u32 _unused2;
+ volatile u32 watchdogt; /* Watchdog reg rdonly, write clears */
+
+ u32 _unused3;
+ volatile u32 systemid; /* MC system ID register, readonly */
+#define SGIMC_SYSID_MASKREV 0x0000000f /* Revision of MC controller */
+#define SGIMC_SYSID_EPRESENT 0x00000010 /* Indicates presence of EISA bus */
+
+ u32 _unused4[3];
+ volatile u32 divider; /* Divider reg for RPSS */
+
+ u32 _unused5;
+ u32 eeprom; /* EEPROM byte reg for r4k */
+#define SGIMC_EEPROM_PRE 0x00000001 /* eeprom chip PRE pin assertion */
+#define SGIMC_EEPROM_CSEL 0x00000002 /* Active high, eeprom chip select */
+#define SGIMC_EEPROM_SECLOCK 0x00000004 /* EEPROM serial clock */
+#define SGIMC_EEPROM_SDATAO 0x00000008 /* Serial EEPROM data-out */
+#define SGIMC_EEPROM_SDATAI 0x00000010 /* Serial EEPROM data-in */
+
+ u32 _unused6[3];
+ volatile u32 rcntpre; /* Preload refresh counter */
+
+ u32 _unused7;
+ volatile u32 rcounter; /* Readonly refresh counter */
+
+ u32 _unused8[13];
+ volatile u32 giopar; /* Parameter word for GIO64 */
+#define SGIMC_GIOPAR_HPC64 0x00000001 /* HPC talks to GIO using 64-bits */
+#define SGIMC_GIOPAR_GFX64 0x00000002 /* GFX talks to GIO using 64-bits */
+#define SGIMC_GIOPAR_EXP064 0x00000004 /* EXP(slot0) talks using 64-bits */
+#define SGIMC_GIOPAR_EXP164 0x00000008 /* EXP(slot1) talks using 64-bits */
+#define SGIMC_GIOPAR_EISA64 0x00000010 /* EISA bus talks 64-bits to GIO */
+#define SGIMC_GIOPAR_HPC264 0x00000020 /* 2nd HPX talks 64-bits to GIO */
+#define SGIMC_GIOPAR_RTIMEGFX 0x00000040 /* GFX device has realtime attr */
+#define SGIMC_GIOPAR_RTIMEEXP0 0x00000080 /* EXP(slot0) has realtime attr */
+#define SGIMC_GIOPAR_RTIMEEXP1 0x00000100 /* EXP(slot1) has realtime attr */
+#define SGIMC_GIOPAR_MASTEREISA 0x00000200 /* EISA bus can act as bus master */
+#define SGIMC_GIOPAR_ONEBUS 0x00000400 /* Exists one GIO64 pipelined bus */
+#define SGIMC_GIOPAR_MASTERGFX 0x00000800 /* GFX can act as a bus master */
+#define SGIMC_GIOPAR_MASTEREXP0 0x00001000 /* EXP(slot0) can bus master */
+#define SGIMC_GIOPAR_MASTEREXP1 0x00002000 /* EXP(slot1) can bus master */
+#define SGIMC_GIOPAR_PLINEEXP0 0x00004000 /* EXP(slot0) has pipeline attr */
+#define SGIMC_GIOPAR_PLINEEXP1 0x00008000 /* EXP(slot1) has pipeline attr */
+
+ u32 _unused9;
+ volatile u32 cputp; /* CPU bus arb time period */
+
+ u32 _unused10[3];
+ volatile u32 lbursttp; /* Time period for long bursts */
+
+ /* MC chip can drive up to 4 bank 4 SIMMs each. All SIMMs in bank must
+ * be the same size. The size encoding for supported SIMMs is bellow */
+ u32 _unused11[9];
+ volatile u32 mconfig0; /* Memory config register zero */
+ u32 _unused12;
+ volatile u32 mconfig1; /* Memory config register one */
+#define SGIMC_MCONFIG_BASEADDR 0x000000ff /* Base address of bank*/
+#define SGIMC_MCONFIG_RMASK 0x00001f00 /* Ram config bitmask */
+#define SGIMC_MCONFIG_BVALID 0x00002000 /* Bank is valid */
+#define SGIMC_MCONFIG_SBANKS 0x00004000 /* Number of subbanks */
+
+ u32 _unused13;
+ volatile u32 cmacc; /* Mem access config for CPU */
+ u32 _unused14;
+ volatile u32 gmacc; /* Mem access config for GIO */
+
+ /* This define applies to both cmacc and gmacc registers above. */
+#define SGIMC_MACC_ALIASBIG 0x20000000 /* 512MB home for alias */
+
+ /* Error address/status regs from GIO and CPU perspectives. */
+ u32 _unused15;
+ volatile u32 cerr; /* Error address reg for CPU */
+ u32 _unused16;
+ volatile u32 cstat; /* Status reg for CPU */
+#define SGIMC_CSTAT_RD 0x00000100 /* read parity error */
+#define SGIMC_CSTAT_PAR 0x00000200 /* CPU parity error */
+#define SGIMC_CSTAT_ADDR 0x00000400 /* memory bus error bad addr */
+#define SGIMC_CSTAT_SYSAD_PAR 0x00000800 /* sysad parity error */
+#define SGIMC_CSTAT_SYSCMD_PAR 0x00001000 /* syscmd parity error */
+#define SGIMC_CSTAT_BAD_DATA 0x00002000 /* bad data identifier */
+#define SGIMC_CSTAT_PAR_MASK 0x00001f00 /* parity error mask */
+#define SGIMC_CSTAT_RD_PAR (SGIMC_CSTAT_RD | SGIMC_CSTAT_PAR)
+
+ u32 _unused17;
+ volatile u32 gerr; /* Error address reg for GIO */
+ u32 _unused18;
+ volatile u32 gstat; /* Status reg for GIO */
+#define SGIMC_GSTAT_RD 0x00000100 /* read parity error */
+#define SGIMC_GSTAT_WR 0x00000200 /* write parity error */
+#define SGIMC_GSTAT_TIME 0x00000400 /* GIO bus timed out */
+#define SGIMC_GSTAT_PROM 0x00000800 /* write to PROM when PROM_EN not set */
+#define SGIMC_GSTAT_ADDR 0x00001000 /* parity error on addr cycle */
+#define SGIMC_GSTAT_BC 0x00002000 /* parity error on byte count cycle */
+#define SGIMC_GSTAT_PIO_RD 0x00004000 /* read data parity on pio */
+#define SGIMC_GSTAT_PIO_WR 0x00008000 /* write data parity on pio */
+
+ /* Special hard bus locking registers. */
+ u32 _unused19;
+ volatile u32 syssembit; /* Uni-bit system semaphore */
+ u32 _unused20;
+ volatile u32 mlock; /* Global GIO memory access lock */
+ u32 _unused21;
+ volatile u32 elock; /* Locks EISA from GIO accesses */
+
+ /* GIO dma control registers. */
+ u32 _unused22[15];
+ volatile u32 gio_dma_trans; /* DMA mask to translation GIO addrs */
+ u32 _unused23;
+ volatile u32 gio_dma_sbits; /* DMA GIO addr substitution bits */
+ u32 _unused24;
+ volatile u32 dma_intr_cause; /* DMA IRQ cause indicator bits */
+ u32 _unused25;
+ volatile u32 dma_ctrl; /* Main DMA control reg */
+
+ /* DMA TLB entry 0 */
+ u32 _unused26[5];
+ volatile u32 dtlb_hi0;
+ u32 _unused27;
+ volatile u32 dtlb_lo0;
+
+ /* DMA TLB entry 1 */
+ u32 _unused28;
+ volatile u32 dtlb_hi1;
+ u32 _unused29;
+ volatile u32 dtlb_lo1;
+
+ /* DMA TLB entry 2 */
+ u32 _unused30;
+ volatile u32 dtlb_hi2;
+ u32 _unused31;
+ volatile u32 dtlb_lo2;
+
+ /* DMA TLB entry 3 */
+ u32 _unused32;
+ volatile u32 dtlb_hi3;
+ u32 _unused33;
+ volatile u32 dtlb_lo3;
+
+ u32 _unused34[0x0392];
+
+ u32 _unused35;
+ volatile u32 rpsscounter; /* Chirps at 100ns */
+
+ u32 _unused36[0x1000/4-2*4];
+
+ u32 _unused37;
+ volatile u32 maddronly; /* Address DMA goes at */
+ u32 _unused38;
+ volatile u32 maddrpdeflts; /* Same as above, plus set defaults */
+ u32 _unused39;
+ volatile u32 dmasz; /* DMA count */
+ u32 _unused40;
+ volatile u32 ssize; /* DMA stride size */
+ u32 _unused41;
+ volatile u32 gmaddronly; /* Set GIO DMA but don't start trans */
+ u32 _unused42;
+ volatile u32 dmaddnpgo; /* Set GIO DMA addr + start transfer */
+ u32 _unused43;
+ volatile u32 dmamode; /* DMA mode config bit settings */
+ u32 _unused44;
+ volatile u32 dmaccount; /* Zoom and byte count for DMA */
+ u32 _unused45;
+ volatile u32 dmastart; /* Pedal to the metal. */
+ u32 _unused46;
+ volatile u32 dmarunning; /* DMA op is in progress */
+ u32 _unused47;
+ volatile u32 maddrdefstart; /* Set dma addr, defaults, and kick it */
+};
+
+extern struct sgimc_regs *sgimc;
+#define SGIMC_BASE 0x1fa00000 /* physical */
+
+/* Base location of the two ram banks found in IP2[0268] machines. */
+#define SGIMC_SEG0_BADDR 0x08000000
+#define SGIMC_SEG1_BADDR 0x20000000
+
+/* Maximum size of the above banks are per machine. */
+#define SGIMC_SEG0_SIZE_ALL 0x10000000 /* 256MB */
+#define SGIMC_SEG1_SIZE_IP20_IP22 0x08000000 /* 128MB */
+#define SGIMC_SEG1_SIZE_IP26_IP28 0x20000000 /* 512MB */
+
+extern void sgimc_init(void);
+
+#endif /* _SGI_MC_H */
diff --git a/arch/mips/include/asm/sgi/pi1.h b/arch/mips/include/asm/sgi/pi1.h
new file mode 100644
index 000000000..88b814ef3
--- /dev/null
+++ b/arch/mips/include/asm/sgi/pi1.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * pi1.h: Definitions for SGI PI1 parallel port
+ */
+
+#ifndef _SGI_PI1_H
+#define _SGI_PI1_H
+
+struct pi1_regs {
+ u8 _data[3];
+ volatile u8 data;
+ u8 _ctrl[3];
+ volatile u8 ctrl;
+#define PI1_CTRL_STROBE_N 0x01
+#define PI1_CTRL_AFD_N 0x02
+#define PI1_CTRL_INIT_N 0x04
+#define PI1_CTRL_SLIN_N 0x08
+#define PI1_CTRL_IRQ_ENA 0x10
+#define PI1_CTRL_DIR 0x20
+#define PI1_CTRL_SEL 0x40
+ u8 _status[3];
+ volatile u8 status;
+#define PI1_STAT_DEVID 0x03 /* bits 0-1 */
+#define PI1_STAT_NOINK 0x04 /* SGI MODE only */
+#define PI1_STAT_ERROR 0x08
+#define PI1_STAT_ONLINE 0x10
+#define PI1_STAT_PE 0x20
+#define PI1_STAT_ACK 0x40
+#define PI1_STAT_BUSY 0x80
+ u8 _dmactrl[3];
+ volatile u8 dmactrl;
+#define PI1_DMACTRL_FIFO_EMPTY 0x01 /* fifo empty R/O */
+#define PI1_DMACTRL_ABORT 0x02 /* reset DMA and internal fifo W/O */
+#define PI1_DMACTRL_STDMODE 0x00 /* bits 2-3 */
+#define PI1_DMACTRL_SGIMODE 0x04 /* bits 2-3 */
+#define PI1_DMACTRL_RICOHMODE 0x08 /* bits 2-3 */
+#define PI1_DMACTRL_HPMODE 0x0c /* bits 2-3 */
+#define PI1_DMACTRL_BLKMODE 0x10 /* block mode */
+#define PI1_DMACTRL_FIFO_CLEAR 0x20 /* clear fifo W/O */
+#define PI1_DMACTRL_READ 0x40 /* read */
+#define PI1_DMACTRL_RUN 0x80 /* pedal to the metal */
+ u8 _intstat[3];
+ volatile u8 intstat;
+#define PI1_INTSTAT_ACK 0x04
+#define PI1_INTSTAT_FEMPTY 0x08
+#define PI1_INTSTAT_NOINK 0x10
+#define PI1_INTSTAT_ONLINE 0x20
+#define PI1_INTSTAT_ERR 0x40
+#define PI1_INTSTAT_PE 0x80
+ u8 _intmask[3];
+ volatile u8 intmask; /* enabled low, reset high*/
+#define PI1_INTMASK_ACK 0x04
+#define PI1_INTMASK_FIFO_EMPTY 0x08
+#define PI1_INTMASK_NOINK 0x10
+#define PI1_INTMASK_ONLINE 0x20
+#define PI1_INTMASK_ERR 0x40
+#define PI1_INTMASK_PE 0x80
+ u8 _timer1[3];
+ volatile u8 timer1;
+#define PI1_TIME1 0x27
+ u8 _timer2[3];
+ volatile u8 timer2;
+#define PI1_TIME2 0x13
+ u8 _timer3[3];
+ volatile u8 timer3;
+#define PI1_TIME3 0x10
+ u8 _timer4[3];
+ volatile u8 timer4;
+#define PI1_TIME4 0x00
+};
+
+#endif
diff --git a/arch/mips/include/asm/sgi/seeq.h b/arch/mips/include/asm/sgi/seeq.h
new file mode 100644
index 000000000..af0ffd768
--- /dev/null
+++ b/arch/mips/include/asm/sgi/seeq.h
@@ -0,0 +1,21 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 by Ralf Baechle
+ */
+#ifndef __ASM_SGI_SEEQ_H
+#define __ASM_SGI_SEEQ_H
+
+#include <linux/if_ether.h>
+
+#include <asm/sgi/hpc3.h>
+
+struct sgiseeq_platform_data {
+ struct hpc3_regs *hpc;
+ unsigned int irq;
+ unsigned char mac[ETH_ALEN];
+};
+
+#endif /* __ASM_SGI_SEEQ_H */
diff --git a/arch/mips/include/asm/sgi/wd.h b/arch/mips/include/asm/sgi/wd.h
new file mode 100644
index 000000000..0d6c3a4da
--- /dev/null
+++ b/arch/mips/include/asm/sgi/wd.h
@@ -0,0 +1,20 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 by Ralf Baechle
+ */
+#ifndef __ASM_SGI_WD_H
+#define __ASM_SGI_WD_H
+
+#include <asm/sgi/hpc3.h>
+
+struct sgiwd93_platform_data {
+ unsigned int unit;
+ unsigned int irq;
+ struct hpc3_scsiregs *hregs;
+ unsigned char *wdregs;
+};
+
+#endif /* __ASM_SGI_WD_H */
diff --git a/arch/mips/include/asm/sgialib.h b/arch/mips/include/asm/sgialib.h
new file mode 100644
index 000000000..80f900417
--- /dev/null
+++ b/arch/mips/include/asm/sgialib.h
@@ -0,0 +1,61 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI ARCS firmware interface library for the Linux kernel.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2001, 2002 Ralf Baechle (ralf@gnu.org)
+ */
+#ifndef _ASM_SGIALIB_H
+#define _ASM_SGIALIB_H
+
+#include <linux/compiler.h>
+#include <asm/sgiarcs.h>
+
+extern struct linux_romvec *romvec;
+
+extern int prom_flags;
+
+#define PROM_FLAG_ARCS 1
+#define PROM_FLAG_USE_AS_CONSOLE 2
+#define PROM_FLAG_DONT_FREE_TEMP 4
+
+/* Simple char-by-char console I/O. */
+extern char prom_getchar(void);
+
+/* Get next memory descriptor after CURR, returns first descriptor
+ * in chain is CURR is NULL.
+ */
+extern struct linux_mdesc *prom_getmdesc(struct linux_mdesc *curr);
+#define PROM_NULL_MDESC ((struct linux_mdesc *) 0)
+
+/* Called by prom_init to setup the physical memory pmemblock
+ * array.
+ */
+extern void prom_meminit(void);
+
+/* PROM device tree library routines. */
+#define PROM_NULL_COMPONENT ((pcomponent *) 0)
+
+/* This is called at prom_init time to identify the
+ * ARC architecture we are running on
+ */
+extern void prom_identify_arch(void);
+
+/* Environment variable routines. */
+extern PCHAR ArcGetEnvironmentVariable(PCHAR name);
+
+/* ARCS command line parsing. */
+extern void prom_init_cmdline(int argc, LONG *argv);
+
+/* File operations. */
+extern LONG ArcRead(ULONG fd, PVOID buf, ULONG num, PULONG cnt);
+extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt);
+
+/* Misc. routines. */
+extern VOID ArcEnterInteractiveMode(VOID) __noreturn;
+extern DISPLAY_STATUS *ArcGetDisplayStatus(ULONG FileID);
+
+#endif /* _ASM_SGIALIB_H */
diff --git a/arch/mips/include/asm/sgiarcs.h b/arch/mips/include/asm/sgiarcs.h
new file mode 100644
index 000000000..e1512cab1
--- /dev/null
+++ b/arch/mips/include/asm/sgiarcs.h
@@ -0,0 +1,505 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * ARC firmware interface defines.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1999, 2001 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_SGIARCS_H
+#define _ASM_SGIARCS_H
+
+#include <linux/kernel.h>
+
+#include <asm/types.h>
+#include <asm/fw/arc/types.h>
+
+/* Various ARCS error codes. */
+#define PROM_ESUCCESS 0x00
+#define PROM_E2BIG 0x01
+#define PROM_EACCESS 0x02
+#define PROM_EAGAIN 0x03
+#define PROM_EBADF 0x04
+#define PROM_EBUSY 0x05
+#define PROM_EFAULT 0x06
+#define PROM_EINVAL 0x07
+#define PROM_EIO 0x08
+#define PROM_EISDIR 0x09
+#define PROM_EMFILE 0x0a
+#define PROM_EMLINK 0x0b
+#define PROM_ENAMETOOLONG 0x0c
+#define PROM_ENODEV 0x0d
+#define PROM_ENOENT 0x0e
+#define PROM_ENOEXEC 0x0f
+#define PROM_ENOMEM 0x10
+#define PROM_ENOSPC 0x11
+#define PROM_ENOTDIR 0x12
+#define PROM_ENOTTY 0x13
+#define PROM_ENXIO 0x14
+#define PROM_EROFS 0x15
+/* SGI ARCS specific errno's. */
+#define PROM_EADDRNOTAVAIL 0x1f
+#define PROM_ETIMEDOUT 0x20
+#define PROM_ECONNABORTED 0x21
+#define PROM_ENOCONNECT 0x22
+
+/* Device classes, types, and identifiers for prom
+ * device inventory queries.
+ */
+enum linux_devclass {
+ system, processor, cache, adapter, controller, peripheral, memory
+};
+
+enum linux_devtypes {
+ /* Generic stuff. */
+ Arc, Cpu, Fpu,
+
+ /* Primary insn and data caches. */
+ picache, pdcache,
+
+ /* Secondary insn, data, and combined caches. */
+ sicache, sdcache, sccache,
+
+ memdev, eisa_adapter, tc_adapter, scsi_adapter, dti_adapter,
+ multifunc_adapter, dsk_controller, tp_controller, cdrom_controller,
+ worm_controller, serial_controller, net_controller, disp_controller,
+ parallel_controller, ptr_controller, kbd_controller, audio_controller,
+ misc_controller, disk_peripheral, flpy_peripheral, tp_peripheral,
+ modem_peripheral, monitor_peripheral, printer_peripheral,
+ ptr_peripheral, kbd_peripheral, term_peripheral, line_peripheral,
+ net_peripheral, misc_peripheral, anon
+};
+
+enum linux_identifier {
+ bogus, ronly, removable, consin, consout, input, output
+};
+
+/* A prom device tree component. */
+struct linux_component {
+ enum linux_devclass class; /* node class */
+ enum linux_devtypes type; /* node type */
+ enum linux_identifier iflags; /* node flags */
+ USHORT vers; /* node version */
+ USHORT rev; /* node revision */
+ ULONG key; /* completely magic */
+ ULONG amask; /* XXX affinity mask??? */
+ ULONG cdsize; /* size of configuration data */
+ ULONG ilen; /* length of string identifier */
+ _PULONG iname; /* string identifier */
+};
+typedef struct linux_component pcomponent;
+
+struct linux_sysid {
+ char vend[8], prod[8];
+};
+
+/* ARCS prom memory descriptors. */
+enum arcs_memtypes {
+ arcs_eblock, /* exception block */
+ arcs_rvpage, /* ARCS romvec page */
+ arcs_fcontig, /* Contiguous and free */
+ arcs_free, /* Generic free memory */
+ arcs_bmem, /* Borken memory, don't use */
+ arcs_prog, /* A loaded program resides here */
+ arcs_atmp, /* ARCS temporary storage area, wish Sparc OpenBoot told this */
+ arcs_aperm, /* ARCS permanent storage... */
+};
+
+/* ARC has slightly different types than ARCS */
+enum arc_memtypes {
+ arc_eblock, /* exception block */
+ arc_rvpage, /* romvec page */
+ arc_free, /* Generic free memory */
+ arc_bmem, /* Borken memory, don't use */
+ arc_prog, /* A loaded program resides here */
+ arc_atmp, /* temporary storage area */
+ arc_aperm, /* permanent storage */
+ arc_fcontig, /* Contiguous and free */
+};
+
+union linux_memtypes {
+ enum arcs_memtypes arcs;
+ enum arc_memtypes arc;
+};
+
+struct linux_mdesc {
+ union linux_memtypes type;
+ ULONG base;
+ ULONG pages;
+};
+
+/* Time of day descriptor. */
+struct linux_tinfo {
+ unsigned short yr;
+ unsigned short mnth;
+ unsigned short day;
+ unsigned short hr;
+ unsigned short min;
+ unsigned short sec;
+ unsigned short msec;
+};
+
+/* ARCS virtual dirents. */
+struct linux_vdirent {
+ ULONG namelen;
+ unsigned char attr;
+ char fname[32]; /* XXX empirical, should be a define */
+};
+
+/* Other stuff for files. */
+enum linux_omode {
+ rdonly, wronly, rdwr, wronly_creat, rdwr_creat,
+ wronly_ssede, rdwr_ssede, dirent, dirent_creat
+};
+
+enum linux_seekmode {
+ absolute, relative
+};
+
+enum linux_mountops {
+ media_load, media_unload
+};
+
+/* This prom has a bolixed design. */
+struct linux_bigint {
+#ifdef __MIPSEL__
+ u32 lo;
+ s32 hi;
+#else /* !(__MIPSEL__) */
+ s32 hi;
+ u32 lo;
+#endif
+};
+
+struct linux_finfo {
+ struct linux_bigint begin;
+ struct linux_bigint end;
+ struct linux_bigint cur;
+ enum linux_devtypes dtype;
+ unsigned long namelen;
+ unsigned char attr;
+ char name[32]; /* XXX empirical, should be define */
+};
+
+/* This describes the vector containing function pointers to the ARC
+ firmware functions. */
+struct linux_romvec {
+ LONG load; /* Load an executable image. */
+ LONG invoke; /* Invoke a standalong image. */
+ LONG exec; /* Load and begin execution of a
+ standalone image. */
+ LONG halt; /* Halt the machine. */
+ LONG pdown; /* Power down the machine. */
+ LONG restart; /* XXX soft reset??? */
+ LONG reboot; /* Reboot the machine. */
+ LONG imode; /* Enter PROM interactive mode. */
+ LONG _unused1; /* Was ReturnFromMain(). */
+
+ /* PROM device tree interface. */
+ LONG next_component;
+ LONG child_component;
+ LONG parent_component;
+ LONG component_data;
+ LONG child_add;
+ LONG comp_del;
+ LONG component_by_path;
+
+ /* Misc. stuff. */
+ LONG cfg_save;
+ LONG get_sysid;
+
+ /* Probing for memory. */
+ LONG get_mdesc;
+ LONG _unused2; /* was Signal() */
+
+ LONG get_tinfo;
+ LONG get_rtime;
+
+ /* File type operations. */
+ LONG get_vdirent;
+ LONG open;
+ LONG close;
+ LONG read;
+ LONG get_rstatus;
+ LONG write;
+ LONG seek;
+ LONG mount;
+
+ /* Dealing with firmware environment variables. */
+ LONG get_evar;
+ LONG set_evar;
+
+ LONG get_finfo;
+ LONG set_finfo;
+
+ /* Miscellaneous. */
+ LONG cache_flush;
+ LONG TestUnicodeCharacter; /* ARC; not sure if ARCS too */
+ LONG GetDisplayStatus;
+};
+
+/* The SGI ARCS parameter block is in a fixed location for standalone
+ * programs to access PROM facilities easily.
+ */
+typedef struct _SYSTEM_PARAMETER_BLOCK {
+ ULONG magic; /* magic cookie */
+#define PROMBLOCK_MAGIC 0x53435241
+
+ ULONG len; /* length of parm block */
+ USHORT ver; /* ARCS firmware version */
+ USHORT rev; /* ARCS firmware revision */
+ _PLONG rs_block; /* Restart block. */
+ _PLONG dbg_block; /* Debug block. */
+ _PLONG gevect; /* XXX General vector??? */
+ _PLONG utlbvect; /* XXX UTLB vector??? */
+ ULONG rveclen; /* Size of romvec struct. */
+ _PVOID romvec; /* Function interface. */
+ ULONG pveclen; /* Length of private vector. */
+ _PVOID pvector; /* Private vector. */
+ ULONG adap_cnt; /* Adapter count. */
+ ULONG adap_typ0; /* First adapter type. */
+ ULONG adap_vcnt0; /* Adapter 0 vector count. */
+ _PVOID adap_vector; /* Adapter 0 vector ptr. */
+ ULONG adap_typ1; /* Second adapter type. */
+ ULONG adap_vcnt1; /* Adapter 1 vector count. */
+ _PVOID adap_vector1; /* Adapter 1 vector ptr. */
+ /* More adapter vectors go here... */
+} SYSTEM_PARAMETER_BLOCK, *PSYSTEM_PARAMETER_BLOCK;
+
+#define PROMBLOCK ((PSYSTEM_PARAMETER_BLOCK) (int)0xA0001000)
+#define ROMVECTOR ((struct linux_romvec *) (long)(PROMBLOCK)->romvec)
+
+/* Cache layout parameter block. */
+union linux_cache_key {
+ struct param {
+#ifdef __MIPSEL__
+ unsigned short size;
+ unsigned char lsize;
+ unsigned char bsize;
+#else /* !(__MIPSEL__) */
+ unsigned char bsize;
+ unsigned char lsize;
+ unsigned short size;
+#endif
+ } info;
+ unsigned long allinfo;
+};
+
+/* Configuration data. */
+struct linux_cdata {
+ char *name;
+ int mlen;
+ enum linux_devtypes type;
+};
+
+/* Common SGI ARCS firmware file descriptors. */
+#define SGIPROM_STDIN 0
+#define SGIPROM_STDOUT 1
+
+/* Common SGI ARCS firmware file types. */
+#define SGIPROM_ROFILE 0x01 /* read-only file */
+#define SGIPROM_HFILE 0x02 /* hidden file */
+#define SGIPROM_SFILE 0x04 /* System file */
+#define SGIPROM_AFILE 0x08 /* Archive file */
+#define SGIPROM_DFILE 0x10 /* Directory file */
+#define SGIPROM_DELFILE 0x20 /* Deleted file */
+
+/* SGI ARCS boot record information. */
+struct sgi_partition {
+ unsigned char flag;
+#define SGIPART_UNUSED 0x00
+#define SGIPART_ACTIVE 0x80
+
+ unsigned char shead, ssect, scyl; /* unused */
+ unsigned char systype; /* OS type, Irix or NT */
+ unsigned char ehead, esect, ecyl; /* unused */
+ unsigned char rsect0, rsect1, rsect2, rsect3;
+ unsigned char tsect0, tsect1, tsect2, tsect3;
+};
+
+#define SGIBBLOCK_MAGIC 0xaa55
+#define SGIBBLOCK_MAXPART 0x0004
+
+struct sgi_bootblock {
+ unsigned char _unused[446];
+ struct sgi_partition partitions[SGIBBLOCK_MAXPART];
+ unsigned short magic;
+};
+
+/* BIOS parameter block. */
+struct sgi_bparm_block {
+ unsigned short bytes_sect; /* bytes per sector */
+ unsigned char sect_clust; /* sectors per cluster */
+ unsigned short sect_resv; /* reserved sectors */
+ unsigned char nfats; /* # of allocation tables */
+ unsigned short nroot_dirents; /* # of root directory entries */
+ unsigned short sect_volume; /* sectors in volume */
+ unsigned char media_type; /* media descriptor */
+ unsigned short sect_fat; /* sectors per allocation table */
+ unsigned short sect_track; /* sectors per track */
+ unsigned short nheads; /* # of heads */
+ unsigned short nhsects; /* # of hidden sectors */
+};
+
+struct sgi_bsector {
+ unsigned char jmpinfo[3];
+ unsigned char manuf_name[8];
+ struct sgi_bparm_block info;
+};
+
+/* Debugging block used with SGI symmon symbolic debugger. */
+#define SMB_DEBUG_MAGIC 0xfeeddead
+struct linux_smonblock {
+ unsigned long magic;
+ void (*handler)(void); /* Breakpoint routine. */
+ unsigned long dtable_base; /* Base addr of dbg table. */
+ int (*printf)(const char *fmt, ...);
+ unsigned long btable_base; /* Breakpoint table. */
+ unsigned long mpflushreqs; /* SMP cache flush request list. */
+ unsigned long ntab; /* Name table. */
+ unsigned long stab; /* Symbol table. */
+ int smax; /* Max # of symbols. */
+};
+
+/*
+ * Macros for calling a 32-bit ARC implementation from 64-bit code
+ */
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC32)
+
+extern long call_o32(long vec, void *stack, ...);
+
+extern u64 o32_stk[4096];
+#define O32_STK (&o32_stk[ARRAY_SIZE(o32_stk)])
+
+#define ARC_CALL0(dest) \
+({ long __res; \
+ long __vec = (long) romvec->dest; \
+ __res = call_o32(__vec, O32_STK); \
+ __res; \
+})
+
+#define ARC_CALL1(dest, a1) \
+({ long __res; \
+ int __a1 = (int) (long) (a1); \
+ long __vec = (long) romvec->dest; \
+ __res = call_o32(__vec, O32_STK, __a1); \
+ __res; \
+})
+
+#define ARC_CALL2(dest, a1, a2) \
+({ long __res; \
+ int __a1 = (int) (long) (a1); \
+ int __a2 = (int) (long) (a2); \
+ long __vec = (long) romvec->dest; \
+ __res = call_o32(__vec, O32_STK, __a1, __a2); \
+ __res; \
+})
+
+#define ARC_CALL3(dest, a1, a2, a3) \
+({ long __res; \
+ int __a1 = (int) (long) (a1); \
+ int __a2 = (int) (long) (a2); \
+ int __a3 = (int) (long) (a3); \
+ long __vec = (long) romvec->dest; \
+ __res = call_o32(__vec, O32_STK, __a1, __a2, __a3); \
+ __res; \
+})
+
+#define ARC_CALL4(dest, a1, a2, a3, a4) \
+({ long __res; \
+ int __a1 = (int) (long) (a1); \
+ int __a2 = (int) (long) (a2); \
+ int __a3 = (int) (long) (a3); \
+ int __a4 = (int) (long) (a4); \
+ long __vec = (long) romvec->dest; \
+ __res = call_o32(__vec, O32_STK, __a1, __a2, __a3, __a4); \
+ __res; \
+})
+
+#define ARC_CALL5(dest, a1, a2, a3, a4, a5) \
+({ long __res; \
+ int __a1 = (int) (long) (a1); \
+ int __a2 = (int) (long) (a2); \
+ int __a3 = (int) (long) (a3); \
+ int __a4 = (int) (long) (a4); \
+ int __a5 = (int) (long) (a5); \
+ long __vec = (long) romvec->dest; \
+ __res = call_o32(__vec, O32_STK, __a1, __a2, __a3, __a4, __a5); \
+ __res; \
+})
+
+#endif /* defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC32) */
+
+#if (defined(CONFIG_32BIT) && defined(CONFIG_FW_ARC32)) || \
+ (defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC64))
+
+#define ARC_CALL0(dest) \
+({ long __res; \
+ long (*__vec)(void) = (void *) romvec->dest; \
+ \
+ __res = __vec(); \
+ __res; \
+})
+
+#define ARC_CALL1(dest, a1) \
+({ long __res; \
+ long __a1 = (long) (a1); \
+ long (*__vec)(long) = (void *) romvec->dest; \
+ \
+ __res = __vec(__a1); \
+ __res; \
+})
+
+#define ARC_CALL2(dest, a1, a2) \
+({ long __res; \
+ long __a1 = (long) (a1); \
+ long __a2 = (long) (a2); \
+ long (*__vec)(long, long) = (void *) romvec->dest; \
+ \
+ __res = __vec(__a1, __a2); \
+ __res; \
+})
+
+#define ARC_CALL3(dest, a1, a2, a3) \
+({ long __res; \
+ long __a1 = (long) (a1); \
+ long __a2 = (long) (a2); \
+ long __a3 = (long) (a3); \
+ long (*__vec)(long, long, long) = (void *) romvec->dest; \
+ \
+ __res = __vec(__a1, __a2, __a3); \
+ __res; \
+})
+
+#define ARC_CALL4(dest, a1, a2, a3, a4) \
+({ long __res; \
+ long __a1 = (long) (a1); \
+ long __a2 = (long) (a2); \
+ long __a3 = (long) (a3); \
+ long __a4 = (long) (a4); \
+ long (*__vec)(long, long, long, long) = (void *) romvec->dest; \
+ \
+ __res = __vec(__a1, __a2, __a3, __a4); \
+ __res; \
+})
+
+#define ARC_CALL5(dest, a1, a2, a3, a4, a5) \
+({ long __res; \
+ long __a1 = (long) (a1); \
+ long __a2 = (long) (a2); \
+ long __a3 = (long) (a3); \
+ long __a4 = (long) (a4); \
+ long __a5 = (long) (a5); \
+ long (*__vec)(long, long, long, long, long); \
+ __vec = (void *) romvec->dest; \
+ \
+ __res = __vec(__a1, __a2, __a3, __a4, __a5); \
+ __res; \
+})
+#endif /* both kernel and ARC either 32-bit or 64-bit */
+
+#endif /* _ASM_SGIARCS_H */
diff --git a/arch/mips/include/asm/shmparam.h b/arch/mips/include/asm/shmparam.h
new file mode 100644
index 000000000..324d04042
--- /dev/null
+++ b/arch/mips/include/asm/shmparam.h
@@ -0,0 +1,13 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef _ASM_SHMPARAM_H
+#define _ASM_SHMPARAM_H
+
+#define __ARCH_FORCE_SHMLBA 1
+
+#define SHMLBA 0x40000 /* attach addr a multiple of this */
+
+#endif /* _ASM_SHMPARAM_H */
diff --git a/arch/mips/include/asm/sibyte/bcm1480_int.h b/arch/mips/include/asm/sibyte/bcm1480_int.h
new file mode 100644
index 000000000..18cf4b105
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/bcm1480_int.h
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * BCM1280/BCM1480 Board Support Package
+ *
+ * Interrupt Mapper definitions File: bcm1480_int.h
+ *
+ * This module contains constants for manipulating the
+ * BCM1255/BCM1280/BCM1455/BCM1480's interrupt mapper and
+ * definitions for the interrupt sources.
+ *
+ * BCM1480 specification level: 1X55_1X80-UM100-D4 (11/24/03)
+ *
+ *********************************************************************
+ *
+ * Copyright 2000,2001,2002,2003
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+
+#ifndef _BCM1480_INT_H
+#define _BCM1480_INT_H
+
+#include <asm/sibyte/sb1250_defs.h>
+
+/* *********************************************************************
+ * Interrupt Mapper Constants
+ ********************************************************************* */
+
+/*
+ * The interrupt mapper deals with 128-bit logical registers that are
+ * implemented as pairs of 64-bit registers, with the "low" 64 bits in
+ * a register that has an address 0x1000 higher(!) than the
+ * corresponding "high" register.
+ *
+ * For appropriate registers, bit 0 of the "high" register is a
+ * cascade bit that summarizes (as a bit-OR) the 64 bits of the "low"
+ * register.
+ */
+
+/*
+ * This entire file uses _BCM1480_ in all the symbols because it is
+ * entirely BCM1480 specific.
+ */
+
+/*
+ * Interrupt sources (Table 22)
+ */
+
+#define K_BCM1480_INT_SOURCES 128
+
+#define _BCM1480_INT_HIGH(k) (k)
+#define _BCM1480_INT_LOW(k) ((k)+64)
+
+#define K_BCM1480_INT_ADDR_TRAP _BCM1480_INT_HIGH(1)
+#define K_BCM1480_INT_GPIO_0 _BCM1480_INT_HIGH(4)
+#define K_BCM1480_INT_GPIO_1 _BCM1480_INT_HIGH(5)
+#define K_BCM1480_INT_GPIO_2 _BCM1480_INT_HIGH(6)
+#define K_BCM1480_INT_GPIO_3 _BCM1480_INT_HIGH(7)
+#define K_BCM1480_INT_PCI_INTA _BCM1480_INT_HIGH(8)
+#define K_BCM1480_INT_PCI_INTB _BCM1480_INT_HIGH(9)
+#define K_BCM1480_INT_PCI_INTC _BCM1480_INT_HIGH(10)
+#define K_BCM1480_INT_PCI_INTD _BCM1480_INT_HIGH(11)
+#define K_BCM1480_INT_CYCLE_CP0 _BCM1480_INT_HIGH(12)
+#define K_BCM1480_INT_CYCLE_CP1 _BCM1480_INT_HIGH(13)
+#define K_BCM1480_INT_CYCLE_CP2 _BCM1480_INT_HIGH(14)
+#define K_BCM1480_INT_CYCLE_CP3 _BCM1480_INT_HIGH(15)
+#define K_BCM1480_INT_TIMER_0 _BCM1480_INT_HIGH(20)
+#define K_BCM1480_INT_TIMER_1 _BCM1480_INT_HIGH(21)
+#define K_BCM1480_INT_TIMER_2 _BCM1480_INT_HIGH(22)
+#define K_BCM1480_INT_TIMER_3 _BCM1480_INT_HIGH(23)
+#define K_BCM1480_INT_DM_CH_0 _BCM1480_INT_HIGH(28)
+#define K_BCM1480_INT_DM_CH_1 _BCM1480_INT_HIGH(29)
+#define K_BCM1480_INT_DM_CH_2 _BCM1480_INT_HIGH(30)
+#define K_BCM1480_INT_DM_CH_3 _BCM1480_INT_HIGH(31)
+#define K_BCM1480_INT_MAC_0 _BCM1480_INT_HIGH(36)
+#define K_BCM1480_INT_MAC_0_CH1 _BCM1480_INT_HIGH(37)
+#define K_BCM1480_INT_MAC_1 _BCM1480_INT_HIGH(38)
+#define K_BCM1480_INT_MAC_1_CH1 _BCM1480_INT_HIGH(39)
+#define K_BCM1480_INT_MAC_2 _BCM1480_INT_HIGH(40)
+#define K_BCM1480_INT_MAC_2_CH1 _BCM1480_INT_HIGH(41)
+#define K_BCM1480_INT_MAC_3 _BCM1480_INT_HIGH(42)
+#define K_BCM1480_INT_MAC_3_CH1 _BCM1480_INT_HIGH(43)
+#define K_BCM1480_INT_PMI_LOW _BCM1480_INT_HIGH(52)
+#define K_BCM1480_INT_PMI_HIGH _BCM1480_INT_HIGH(53)
+#define K_BCM1480_INT_PMO_LOW _BCM1480_INT_HIGH(54)
+#define K_BCM1480_INT_PMO_HIGH _BCM1480_INT_HIGH(55)
+#define K_BCM1480_INT_MBOX_0_0 _BCM1480_INT_HIGH(56)
+#define K_BCM1480_INT_MBOX_0_1 _BCM1480_INT_HIGH(57)
+#define K_BCM1480_INT_MBOX_0_2 _BCM1480_INT_HIGH(58)
+#define K_BCM1480_INT_MBOX_0_3 _BCM1480_INT_HIGH(59)
+#define K_BCM1480_INT_MBOX_1_0 _BCM1480_INT_HIGH(60)
+#define K_BCM1480_INT_MBOX_1_1 _BCM1480_INT_HIGH(61)
+#define K_BCM1480_INT_MBOX_1_2 _BCM1480_INT_HIGH(62)
+#define K_BCM1480_INT_MBOX_1_3 _BCM1480_INT_HIGH(63)
+
+#define K_BCM1480_INT_BAD_ECC _BCM1480_INT_LOW(1)
+#define K_BCM1480_INT_COR_ECC _BCM1480_INT_LOW(2)
+#define K_BCM1480_INT_IO_BUS _BCM1480_INT_LOW(3)
+#define K_BCM1480_INT_PERF_CNT _BCM1480_INT_LOW(4)
+#define K_BCM1480_INT_SW_PERF_CNT _BCM1480_INT_LOW(5)
+#define K_BCM1480_INT_TRACE_FREEZE _BCM1480_INT_LOW(6)
+#define K_BCM1480_INT_SW_TRACE_FREEZE _BCM1480_INT_LOW(7)
+#define K_BCM1480_INT_WATCHDOG_TIMER_0 _BCM1480_INT_LOW(8)
+#define K_BCM1480_INT_WATCHDOG_TIMER_1 _BCM1480_INT_LOW(9)
+#define K_BCM1480_INT_WATCHDOG_TIMER_2 _BCM1480_INT_LOW(10)
+#define K_BCM1480_INT_WATCHDOG_TIMER_3 _BCM1480_INT_LOW(11)
+#define K_BCM1480_INT_PCI_ERROR _BCM1480_INT_LOW(16)
+#define K_BCM1480_INT_PCI_RESET _BCM1480_INT_LOW(17)
+#define K_BCM1480_INT_NODE_CONTROLLER _BCM1480_INT_LOW(18)
+#define K_BCM1480_INT_HOST_BRIDGE _BCM1480_INT_LOW(19)
+#define K_BCM1480_INT_PORT_0_FATAL _BCM1480_INT_LOW(20)
+#define K_BCM1480_INT_PORT_0_NONFATAL _BCM1480_INT_LOW(21)
+#define K_BCM1480_INT_PORT_1_FATAL _BCM1480_INT_LOW(22)
+#define K_BCM1480_INT_PORT_1_NONFATAL _BCM1480_INT_LOW(23)
+#define K_BCM1480_INT_PORT_2_FATAL _BCM1480_INT_LOW(24)
+#define K_BCM1480_INT_PORT_2_NONFATAL _BCM1480_INT_LOW(25)
+#define K_BCM1480_INT_LDT_SMI _BCM1480_INT_LOW(32)
+#define K_BCM1480_INT_LDT_NMI _BCM1480_INT_LOW(33)
+#define K_BCM1480_INT_LDT_INIT _BCM1480_INT_LOW(34)
+#define K_BCM1480_INT_LDT_STARTUP _BCM1480_INT_LOW(35)
+#define K_BCM1480_INT_LDT_EXT _BCM1480_INT_LOW(36)
+#define K_BCM1480_INT_SMB_0 _BCM1480_INT_LOW(40)
+#define K_BCM1480_INT_SMB_1 _BCM1480_INT_LOW(41)
+#define K_BCM1480_INT_PCMCIA _BCM1480_INT_LOW(42)
+#define K_BCM1480_INT_UART_0 _BCM1480_INT_LOW(44)
+#define K_BCM1480_INT_UART_1 _BCM1480_INT_LOW(45)
+#define K_BCM1480_INT_UART_2 _BCM1480_INT_LOW(46)
+#define K_BCM1480_INT_UART_3 _BCM1480_INT_LOW(47)
+#define K_BCM1480_INT_GPIO_4 _BCM1480_INT_LOW(52)
+#define K_BCM1480_INT_GPIO_5 _BCM1480_INT_LOW(53)
+#define K_BCM1480_INT_GPIO_6 _BCM1480_INT_LOW(54)
+#define K_BCM1480_INT_GPIO_7 _BCM1480_INT_LOW(55)
+#define K_BCM1480_INT_GPIO_8 _BCM1480_INT_LOW(56)
+#define K_BCM1480_INT_GPIO_9 _BCM1480_INT_LOW(57)
+#define K_BCM1480_INT_GPIO_10 _BCM1480_INT_LOW(58)
+#define K_BCM1480_INT_GPIO_11 _BCM1480_INT_LOW(59)
+#define K_BCM1480_INT_GPIO_12 _BCM1480_INT_LOW(60)
+#define K_BCM1480_INT_GPIO_13 _BCM1480_INT_LOW(61)
+#define K_BCM1480_INT_GPIO_14 _BCM1480_INT_LOW(62)
+#define K_BCM1480_INT_GPIO_15 _BCM1480_INT_LOW(63)
+
+/*
+ * Mask values for each interrupt
+ */
+
+#define _BCM1480_INT_MASK(w, n) _SB_MAKEMASK(w, ((n) & 0x3F))
+#define _BCM1480_INT_MASK1(n) _SB_MAKEMASK1(((n) & 0x3F))
+#define _BCM1480_INT_OFFSET(n) (((n) & 0x40) << 6)
+
+#define M_BCM1480_INT_CASCADE _BCM1480_INT_MASK1(_BCM1480_INT_HIGH(0))
+
+#define M_BCM1480_INT_ADDR_TRAP _BCM1480_INT_MASK1(K_BCM1480_INT_ADDR_TRAP)
+#define M_BCM1480_INT_GPIO_0 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_0)
+#define M_BCM1480_INT_GPIO_1 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_1)
+#define M_BCM1480_INT_GPIO_2 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_2)
+#define M_BCM1480_INT_GPIO_3 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_3)
+#define M_BCM1480_INT_PCI_INTA _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTA)
+#define M_BCM1480_INT_PCI_INTB _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTB)
+#define M_BCM1480_INT_PCI_INTC _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTC)
+#define M_BCM1480_INT_PCI_INTD _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTD)
+#define M_BCM1480_INT_CYCLE_CP0 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP0)
+#define M_BCM1480_INT_CYCLE_CP1 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP1)
+#define M_BCM1480_INT_CYCLE_CP2 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP2)
+#define M_BCM1480_INT_CYCLE_CP3 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP3)
+#define M_BCM1480_INT_TIMER_0 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_0)
+#define M_BCM1480_INT_TIMER_1 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_1)
+#define M_BCM1480_INT_TIMER_2 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_2)
+#define M_BCM1480_INT_TIMER_3 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_3)
+#define M_BCM1480_INT_DM_CH_0 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_0)
+#define M_BCM1480_INT_DM_CH_1 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_1)
+#define M_BCM1480_INT_DM_CH_2 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_2)
+#define M_BCM1480_INT_DM_CH_3 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_3)
+#define M_BCM1480_INT_MAC_0 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_0)
+#define M_BCM1480_INT_MAC_0_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_0_CH1)
+#define M_BCM1480_INT_MAC_1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_1)
+#define M_BCM1480_INT_MAC_1_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_1_CH1)
+#define M_BCM1480_INT_MAC_2 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_2)
+#define M_BCM1480_INT_MAC_2_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_2_CH1)
+#define M_BCM1480_INT_MAC_3 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_3)
+#define M_BCM1480_INT_MAC_3_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_3_CH1)
+#define M_BCM1480_INT_PMI_LOW _BCM1480_INT_MASK1(K_BCM1480_INT_PMI_LOW)
+#define M_BCM1480_INT_PMI_HIGH _BCM1480_INT_MASK1(K_BCM1480_INT_PMI_HIGH)
+#define M_BCM1480_INT_PMO_LOW _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_LOW)
+#define M_BCM1480_INT_PMO_HIGH _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_HIGH)
+#define M_BCM1480_INT_MBOX_ALL _BCM1480_INT_MASK(8, K_BCM1480_INT_MBOX_0_0)
+#define M_BCM1480_INT_MBOX_0_0 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_0)
+#define M_BCM1480_INT_MBOX_0_1 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_1)
+#define M_BCM1480_INT_MBOX_0_2 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_2)
+#define M_BCM1480_INT_MBOX_0_3 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_3)
+#define M_BCM1480_INT_MBOX_1_0 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_0)
+#define M_BCM1480_INT_MBOX_1_1 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_1)
+#define M_BCM1480_INT_MBOX_1_2 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_2)
+#define M_BCM1480_INT_MBOX_1_3 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_3)
+#define M_BCM1480_INT_BAD_ECC _BCM1480_INT_MASK1(K_BCM1480_INT_BAD_ECC)
+#define M_BCM1480_INT_COR_ECC _BCM1480_INT_MASK1(K_BCM1480_INT_COR_ECC)
+#define M_BCM1480_INT_IO_BUS _BCM1480_INT_MASK1(K_BCM1480_INT_IO_BUS)
+#define M_BCM1480_INT_PERF_CNT _BCM1480_INT_MASK1(K_BCM1480_INT_PERF_CNT)
+#define M_BCM1480_INT_SW_PERF_CNT _BCM1480_INT_MASK1(K_BCM1480_INT_SW_PERF_CNT)
+#define M_BCM1480_INT_TRACE_FREEZE _BCM1480_INT_MASK1(K_BCM1480_INT_TRACE_FREEZE)
+#define M_BCM1480_INT_SW_TRACE_FREEZE _BCM1480_INT_MASK1(K_BCM1480_INT_SW_TRACE_FREEZE)
+#define M_BCM1480_INT_WATCHDOG_TIMER_0 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_0)
+#define M_BCM1480_INT_WATCHDOG_TIMER_1 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_1)
+#define M_BCM1480_INT_WATCHDOG_TIMER_2 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_2)
+#define M_BCM1480_INT_WATCHDOG_TIMER_3 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_3)
+#define M_BCM1480_INT_PCI_ERROR _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_ERROR)
+#define M_BCM1480_INT_PCI_RESET _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_RESET)
+#define M_BCM1480_INT_NODE_CONTROLLER _BCM1480_INT_MASK1(K_BCM1480_INT_NODE_CONTROLLER)
+#define M_BCM1480_INT_HOST_BRIDGE _BCM1480_INT_MASK1(K_BCM1480_INT_HOST_BRIDGE)
+#define M_BCM1480_INT_PORT_0_FATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_0_FATAL)
+#define M_BCM1480_INT_PORT_0_NONFATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_0_NONFATAL)
+#define M_BCM1480_INT_PORT_1_FATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_1_FATAL)
+#define M_BCM1480_INT_PORT_1_NONFATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_1_NONFATAL)
+#define M_BCM1480_INT_PORT_2_FATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_2_FATAL)
+#define M_BCM1480_INT_PORT_2_NONFATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_2_NONFATAL)
+#define M_BCM1480_INT_LDT_SMI _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_SMI)
+#define M_BCM1480_INT_LDT_NMI _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_NMI)
+#define M_BCM1480_INT_LDT_INIT _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_INIT)
+#define M_BCM1480_INT_LDT_STARTUP _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_STARTUP)
+#define M_BCM1480_INT_LDT_EXT _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_EXT)
+#define M_BCM1480_INT_SMB_0 _BCM1480_INT_MASK1(K_BCM1480_INT_SMB_0)
+#define M_BCM1480_INT_SMB_1 _BCM1480_INT_MASK1(K_BCM1480_INT_SMB_1)
+#define M_BCM1480_INT_PCMCIA _BCM1480_INT_MASK1(K_BCM1480_INT_PCMCIA)
+#define M_BCM1480_INT_UART_0 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_0)
+#define M_BCM1480_INT_UART_1 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_1)
+#define M_BCM1480_INT_UART_2 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_2)
+#define M_BCM1480_INT_UART_3 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_3)
+#define M_BCM1480_INT_GPIO_4 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_4)
+#define M_BCM1480_INT_GPIO_5 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_5)
+#define M_BCM1480_INT_GPIO_6 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_6)
+#define M_BCM1480_INT_GPIO_7 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_7)
+#define M_BCM1480_INT_GPIO_8 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_8)
+#define M_BCM1480_INT_GPIO_9 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_9)
+#define M_BCM1480_INT_GPIO_10 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_10)
+#define M_BCM1480_INT_GPIO_11 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_11)
+#define M_BCM1480_INT_GPIO_12 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_12)
+#define M_BCM1480_INT_GPIO_13 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_13)
+#define M_BCM1480_INT_GPIO_14 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_14)
+#define M_BCM1480_INT_GPIO_15 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_15)
+
+/*
+ * Interrupt mappings (Table 18)
+ */
+
+#define K_BCM1480_INT_MAP_I0 0 /* interrupt pins on processor */
+#define K_BCM1480_INT_MAP_I1 1
+#define K_BCM1480_INT_MAP_I2 2
+#define K_BCM1480_INT_MAP_I3 3
+#define K_BCM1480_INT_MAP_I4 4
+#define K_BCM1480_INT_MAP_I5 5
+#define K_BCM1480_INT_MAP_NMI 6 /* nonmaskable */
+#define K_BCM1480_INT_MAP_DINT 7 /* debug interrupt */
+
+/*
+ * Interrupt LDT Set Register (Table 19)
+ */
+
+#define S_BCM1480_INT_HT_INTMSG 0
+#define M_BCM1480_INT_HT_INTMSG _SB_MAKEMASK(3, S_BCM1480_INT_HT_INTMSG)
+#define V_BCM1480_INT_HT_INTMSG(x) _SB_MAKEVALUE(x, S_BCM1480_INT_HT_INTMSG)
+#define G_BCM1480_INT_HT_INTMSG(x) _SB_GETVALUE(x, S_BCM1480_INT_HT_INTMSG, M_BCM1480_INT_HT_INTMSG)
+
+#define K_BCM1480_INT_HT_INTMSG_FIXED 0
+#define K_BCM1480_INT_HT_INTMSG_ARBITRATED 1
+#define K_BCM1480_INT_HT_INTMSG_SMI 2
+#define K_BCM1480_INT_HT_INTMSG_NMI 3
+#define K_BCM1480_INT_HT_INTMSG_INIT 4
+#define K_BCM1480_INT_HT_INTMSG_STARTUP 5
+#define K_BCM1480_INT_HT_INTMSG_EXTINT 6
+#define K_BCM1480_INT_HT_INTMSG_RESERVED 7
+
+#define M_BCM1480_INT_HT_TRIGGERMODE _SB_MAKEMASK1(3)
+#define V_BCM1480_INT_HT_EDGETRIGGER 0
+#define V_BCM1480_INT_HT_LEVELTRIGGER M_BCM1480_INT_HT_TRIGGERMODE
+
+#define M_BCM1480_INT_HT_DESTMODE _SB_MAKEMASK1(4)
+#define V_BCM1480_INT_HT_PHYSICALDEST 0
+#define V_BCM1480_INT_HT_LOGICALDEST M_BCM1480_INT_HT_DESTMODE
+
+#define S_BCM1480_INT_HT_INTDEST 5
+#define M_BCM1480_INT_HT_INTDEST _SB_MAKEMASK(8, S_BCM1480_INT_HT_INTDEST)
+#define V_BCM1480_INT_HT_INTDEST(x) _SB_MAKEVALUE(x, S_BCM1480_INT_HT_INTDEST)
+#define G_BCM1480_INT_HT_INTDEST(x) _SB_GETVALUE(x, S_BCM1480_INT_HT_INTDEST, M_BCM1480_INT_HT_INTDEST)
+
+#define S_BCM1480_INT_HT_VECTOR 13
+#define M_BCM1480_INT_HT_VECTOR _SB_MAKEMASK(8, S_BCM1480_INT_HT_VECTOR)
+#define V_BCM1480_INT_HT_VECTOR(x) _SB_MAKEVALUE(x, S_BCM1480_INT_HT_VECTOR)
+#define G_BCM1480_INT_HT_VECTOR(x) _SB_GETVALUE(x, S_BCM1480_INT_HT_VECTOR, M_BCM1480_INT_HT_VECTOR)
+
+/*
+ * Vector prefix (Table 4-7)
+ */
+
+#define M_BCM1480_HTVECT_RAISE_INTLDT_HIGH 0x00
+#define M_BCM1480_HTVECT_RAISE_MBOX_0 0x40
+#define M_BCM1480_HTVECT_RAISE_INTLDT_LO 0x80
+#define M_BCM1480_HTVECT_RAISE_MBOX_1 0xC0
+
+#endif /* _BCM1480_INT_H */
diff --git a/arch/mips/include/asm/sibyte/bcm1480_l2c.h b/arch/mips/include/asm/sibyte/bcm1480_l2c.h
new file mode 100644
index 000000000..f6f3a1989
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/bcm1480_l2c.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * BCM1280/BCM1480 Board Support Package
+ *
+ * L2 Cache constants and macros File: bcm1480_l2c.h
+ *
+ * This module contains constants useful for manipulating the
+ * level 2 cache.
+ *
+ * BCM1400 specification level: 1280-UM100-D2 (11/14/03)
+ *
+ *********************************************************************
+ *
+ * Copyright 2000,2001,2002,2003
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+
+#ifndef _BCM1480_L2C_H
+#define _BCM1480_L2C_H
+
+#include <asm/sibyte/sb1250_defs.h>
+
+/*
+ * Format of level 2 cache management address (Table 55)
+ */
+
+#define S_BCM1480_L2C_MGMT_INDEX 5
+#define M_BCM1480_L2C_MGMT_INDEX _SB_MAKEMASK(12, S_BCM1480_L2C_MGMT_INDEX)
+#define V_BCM1480_L2C_MGMT_INDEX(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_INDEX)
+#define G_BCM1480_L2C_MGMT_INDEX(x) _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_INDEX, M_BCM1480_L2C_MGMT_INDEX)
+
+#define S_BCM1480_L2C_MGMT_WAY 17
+#define M_BCM1480_L2C_MGMT_WAY _SB_MAKEMASK(3, S_BCM1480_L2C_MGMT_WAY)
+#define V_BCM1480_L2C_MGMT_WAY(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_WAY)
+#define G_BCM1480_L2C_MGMT_WAY(x) _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_WAY, M_BCM1480_L2C_MGMT_WAY)
+
+#define M_BCM1480_L2C_MGMT_DIRTY _SB_MAKEMASK1(20)
+#define M_BCM1480_L2C_MGMT_VALID _SB_MAKEMASK1(21)
+
+#define S_BCM1480_L2C_MGMT_ECC_DIAG 22
+#define M_BCM1480_L2C_MGMT_ECC_DIAG _SB_MAKEMASK(2, S_BCM1480_L2C_MGMT_ECC_DIAG)
+#define V_BCM1480_L2C_MGMT_ECC_DIAG(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_ECC_DIAG)
+#define G_BCM1480_L2C_MGMT_ECC_DIAG(x) _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_ECC_DIAG, M_BCM1480_L2C_MGMT_ECC_DIAG)
+
+#define A_BCM1480_L2C_MGMT_TAG_BASE 0x00D0000000
+
+#define BCM1480_L2C_ENTRIES_PER_WAY 4096
+#define BCM1480_L2C_NUM_WAYS 8
+
+
+/*
+ * Level 2 Cache Tag register (Table 59)
+ */
+
+#define S_BCM1480_L2C_TAG_MBZ 0
+#define M_BCM1480_L2C_TAG_MBZ _SB_MAKEMASK(5, S_BCM1480_L2C_TAG_MBZ)
+
+#define S_BCM1480_L2C_TAG_INDEX 5
+#define M_BCM1480_L2C_TAG_INDEX _SB_MAKEMASK(12, S_BCM1480_L2C_TAG_INDEX)
+#define V_BCM1480_L2C_TAG_INDEX(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_INDEX)
+#define G_BCM1480_L2C_TAG_INDEX(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_INDEX, M_BCM1480_L2C_TAG_INDEX)
+
+/* Note that index bit 16 is also tag bit 40 */
+#define S_BCM1480_L2C_TAG_TAG 17
+#define M_BCM1480_L2C_TAG_TAG _SB_MAKEMASK(23, S_BCM1480_L2C_TAG_TAG)
+#define V_BCM1480_L2C_TAG_TAG(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_TAG)
+#define G_BCM1480_L2C_TAG_TAG(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_TAG, M_BCM1480_L2C_TAG_TAG)
+
+#define S_BCM1480_L2C_TAG_ECC 40
+#define M_BCM1480_L2C_TAG_ECC _SB_MAKEMASK(6, S_BCM1480_L2C_TAG_ECC)
+#define V_BCM1480_L2C_TAG_ECC(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_ECC)
+#define G_BCM1480_L2C_TAG_ECC(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_ECC, M_BCM1480_L2C_TAG_ECC)
+
+#define S_BCM1480_L2C_TAG_WAY 46
+#define M_BCM1480_L2C_TAG_WAY _SB_MAKEMASK(3, S_BCM1480_L2C_TAG_WAY)
+#define V_BCM1480_L2C_TAG_WAY(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_WAY)
+#define G_BCM1480_L2C_TAG_WAY(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_WAY, M_BCM1480_L2C_TAG_WAY)
+
+#define M_BCM1480_L2C_TAG_DIRTY _SB_MAKEMASK1(49)
+#define M_BCM1480_L2C_TAG_VALID _SB_MAKEMASK1(50)
+
+#define S_BCM1480_L2C_DATA_ECC 51
+#define M_BCM1480_L2C_DATA_ECC _SB_MAKEMASK(10, S_BCM1480_L2C_DATA_ECC)
+#define V_BCM1480_L2C_DATA_ECC(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_DATA_ECC)
+#define G_BCM1480_L2C_DATA_ECC(x) _SB_GETVALUE(x, S_BCM1480_L2C_DATA_ECC, M_BCM1480_L2C_DATA_ECC)
+
+
+/*
+ * L2 Misc0 Value Register (Table 60)
+ */
+
+#define S_BCM1480_L2C_MISC0_WAY_REMOTE 0
+#define M_BCM1480_L2C_MISC0_WAY_REMOTE _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_REMOTE)
+#define G_BCM1480_L2C_MISC0_WAY_REMOTE(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_WAY_REMOTE, M_BCM1480_L2C_MISC0_WAY_REMOTE)
+
+#define S_BCM1480_L2C_MISC0_WAY_LOCAL 8
+#define M_BCM1480_L2C_MISC0_WAY_LOCAL _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_LOCAL)
+#define G_BCM1480_L2C_MISC0_WAY_LOCAL(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_WAY_LOCAL, M_BCM1480_L2C_MISC0_WAY_LOCAL)
+
+#define S_BCM1480_L2C_MISC0_WAY_ENABLE 16
+#define M_BCM1480_L2C_MISC0_WAY_ENABLE _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_ENABLE)
+#define G_BCM1480_L2C_MISC0_WAY_ENABLE(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_WAY_ENABLE, M_BCM1480_L2C_MISC0_WAY_ENABLE)
+
+#define S_BCM1480_L2C_MISC0_CACHE_DISABLE 24
+#define M_BCM1480_L2C_MISC0_CACHE_DISABLE _SB_MAKEMASK(2, S_BCM1480_L2C_MISC0_CACHE_DISABLE)
+#define G_BCM1480_L2C_MISC0_CACHE_DISABLE(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_CACHE_DISABLE, M_BCM1480_L2C_MISC0_CACHE_DISABLE)
+
+#define S_BCM1480_L2C_MISC0_CACHE_QUAD 26
+#define M_BCM1480_L2C_MISC0_CACHE_QUAD _SB_MAKEMASK(2, S_BCM1480_L2C_MISC0_CACHE_QUAD)
+#define G_BCM1480_L2C_MISC0_CACHE_QUAD(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_CACHE_QUAD, M_BCM1480_L2C_MISC0_CACHE_QUAD)
+
+#define S_BCM1480_L2C_MISC0_MC_PRIORITY 30
+#define M_BCM1480_L2C_MISC0_MC_PRIORITY _SB_MAKEMASK1(S_BCM1480_L2C_MISC0_MC_PRIORITY)
+
+#define S_BCM1480_L2C_MISC0_ECC_CLEANUP 31
+#define M_BCM1480_L2C_MISC0_ECC_CLEANUP _SB_MAKEMASK1(S_BCM1480_L2C_MISC0_ECC_CLEANUP)
+
+
+/*
+ * L2 Misc1 Value Register (Table 60)
+ */
+
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_0 0
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_0 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_0)
+#define G_BCM1480_L2C_MISC1_WAY_AGENT_0(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_0, M_BCM1480_L2C_MISC1_WAY_AGENT_0)
+
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_1 8
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_1 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_1)
+#define G_BCM1480_L2C_MISC1_WAY_AGENT_1(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_1, M_BCM1480_L2C_MISC1_WAY_AGENT_1)
+
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_2 16
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_2 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_2)
+#define G_BCM1480_L2C_MISC1_WAY_AGENT_2(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_2, M_BCM1480_L2C_MISC1_WAY_AGENT_2)
+
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_3 24
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_3 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_3)
+#define G_BCM1480_L2C_MISC1_WAY_AGENT_3(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_3, M_BCM1480_L2C_MISC1_WAY_AGENT_3)
+
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_4 32
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_4 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_4)
+#define G_BCM1480_L2C_MISC1_WAY_AGENT_4(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_4, M_BCM1480_L2C_MISC1_WAY_AGENT_4)
+
+
+/*
+ * L2 Misc2 Value Register (Table 60)
+ */
+
+#define S_BCM1480_L2C_MISC2_WAY_AGENT_8 0
+#define M_BCM1480_L2C_MISC2_WAY_AGENT_8 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_8)
+#define G_BCM1480_L2C_MISC2_WAY_AGENT_8(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC2_WAY_AGENT_8, M_BCM1480_L2C_MISC2_WAY_AGENT_8)
+
+#define S_BCM1480_L2C_MISC2_WAY_AGENT_9 8
+#define M_BCM1480_L2C_MISC2_WAY_AGENT_9 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_9)
+#define G_BCM1480_L2C_MISC2_WAY_AGENT_9(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC2_WAY_AGENT_9, M_BCM1480_L2C_MISC2_WAY_AGENT_9)
+
+#define S_BCM1480_L2C_MISC2_WAY_AGENT_A 16
+#define M_BCM1480_L2C_MISC2_WAY_AGENT_A _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_A)
+#define G_BCM1480_L2C_MISC2_WAY_AGENT_A(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC2_WAY_AGENT_A, M_BCM1480_L2C_MISC2_WAY_AGENT_A)
+
+
+#endif /* _BCM1480_L2C_H */
diff --git a/arch/mips/include/asm/sibyte/bcm1480_mc.h b/arch/mips/include/asm/sibyte/bcm1480_mc.h
new file mode 100644
index 000000000..1d9b62d47
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/bcm1480_mc.h
@@ -0,0 +1,971 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * BCM1280/BCM1480 Board Support Package
+ *
+ * Memory Controller constants File: bcm1480_mc.h
+ *
+ * This module contains constants and macros useful for
+ * programming the memory controller.
+ *
+ * BCM1400 specification level: 1280-UM100-D1 (11/14/03 Review Copy)
+ *
+ *********************************************************************
+ *
+ * Copyright 2000,2001,2002,2003
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+
+#ifndef _BCM1480_MC_H
+#define _BCM1480_MC_H
+
+#include <asm/sibyte/sb1250_defs.h>
+
+/*
+ * Memory Channel Configuration Register (Table 81)
+ */
+
+#define S_BCM1480_MC_INTLV0 0
+#define M_BCM1480_MC_INTLV0 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV0)
+#define V_BCM1480_MC_INTLV0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV0)
+#define G_BCM1480_MC_INTLV0(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV0, M_BCM1480_MC_INTLV0)
+#define V_BCM1480_MC_INTLV0_DEFAULT V_BCM1480_MC_INTLV0(0)
+
+#define S_BCM1480_MC_INTLV1 8
+#define M_BCM1480_MC_INTLV1 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV1)
+#define V_BCM1480_MC_INTLV1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV1)
+#define G_BCM1480_MC_INTLV1(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV1, M_BCM1480_MC_INTLV1)
+#define V_BCM1480_MC_INTLV1_DEFAULT V_BCM1480_MC_INTLV1(0)
+
+#define S_BCM1480_MC_INTLV2 16
+#define M_BCM1480_MC_INTLV2 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV2)
+#define V_BCM1480_MC_INTLV2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV2)
+#define G_BCM1480_MC_INTLV2(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV2, M_BCM1480_MC_INTLV2)
+#define V_BCM1480_MC_INTLV2_DEFAULT V_BCM1480_MC_INTLV2(0)
+
+#define S_BCM1480_MC_CS_MODE 32
+#define M_BCM1480_MC_CS_MODE _SB_MAKEMASK(8, S_BCM1480_MC_CS_MODE)
+#define V_BCM1480_MC_CS_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS_MODE)
+#define G_BCM1480_MC_CS_MODE(x) _SB_GETVALUE(x, S_BCM1480_MC_CS_MODE, M_BCM1480_MC_CS_MODE)
+#define V_BCM1480_MC_CS_MODE_DEFAULT V_BCM1480_MC_CS_MODE(0)
+
+#define V_BCM1480_MC_CONFIG_DEFAULT (V_BCM1480_MC_INTLV0_DEFAULT | \
+ V_BCM1480_MC_INTLV1_DEFAULT | \
+ V_BCM1480_MC_INTLV2_DEFAULT | \
+ V_BCM1480_MC_CS_MODE_DEFAULT)
+
+#define K_BCM1480_MC_CS01_MODE 0x03
+#define K_BCM1480_MC_CS02_MODE 0x05
+#define K_BCM1480_MC_CS0123_MODE 0x0F
+#define K_BCM1480_MC_CS0246_MODE 0x55
+#define K_BCM1480_MC_CS0145_MODE 0x33
+#define K_BCM1480_MC_CS0167_MODE 0xC3
+#define K_BCM1480_MC_CSFULL_MODE 0xFF
+
+/*
+ * Chip Select Start Address Register (Table 82)
+ */
+
+#define S_BCM1480_MC_CS0_START 0
+#define M_BCM1480_MC_CS0_START _SB_MAKEMASK(12, S_BCM1480_MC_CS0_START)
+#define V_BCM1480_MC_CS0_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS0_START)
+#define G_BCM1480_MC_CS0_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS0_START, M_BCM1480_MC_CS0_START)
+
+#define S_BCM1480_MC_CS1_START 16
+#define M_BCM1480_MC_CS1_START _SB_MAKEMASK(12, S_BCM1480_MC_CS1_START)
+#define V_BCM1480_MC_CS1_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS1_START)
+#define G_BCM1480_MC_CS1_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS1_START, M_BCM1480_MC_CS1_START)
+
+#define S_BCM1480_MC_CS2_START 32
+#define M_BCM1480_MC_CS2_START _SB_MAKEMASK(12, S_BCM1480_MC_CS2_START)
+#define V_BCM1480_MC_CS2_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS2_START)
+#define G_BCM1480_MC_CS2_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS2_START, M_BCM1480_MC_CS2_START)
+
+#define S_BCM1480_MC_CS3_START 48
+#define M_BCM1480_MC_CS3_START _SB_MAKEMASK(12, S_BCM1480_MC_CS3_START)
+#define V_BCM1480_MC_CS3_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS3_START)
+#define G_BCM1480_MC_CS3_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS3_START, M_BCM1480_MC_CS3_START)
+
+/*
+ * Chip Select End Address Register (Table 83)
+ */
+
+#define S_BCM1480_MC_CS0_END 0
+#define M_BCM1480_MC_CS0_END _SB_MAKEMASK(12, S_BCM1480_MC_CS0_END)
+#define V_BCM1480_MC_CS0_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS0_END)
+#define G_BCM1480_MC_CS0_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS0_END, M_BCM1480_MC_CS0_END)
+
+#define S_BCM1480_MC_CS1_END 16
+#define M_BCM1480_MC_CS1_END _SB_MAKEMASK(12, S_BCM1480_MC_CS1_END)
+#define V_BCM1480_MC_CS1_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS1_END)
+#define G_BCM1480_MC_CS1_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS1_END, M_BCM1480_MC_CS1_END)
+
+#define S_BCM1480_MC_CS2_END 32
+#define M_BCM1480_MC_CS2_END _SB_MAKEMASK(12, S_BCM1480_MC_CS2_END)
+#define V_BCM1480_MC_CS2_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS2_END)
+#define G_BCM1480_MC_CS2_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS2_END, M_BCM1480_MC_CS2_END)
+
+#define S_BCM1480_MC_CS3_END 48
+#define M_BCM1480_MC_CS3_END _SB_MAKEMASK(12, S_BCM1480_MC_CS3_END)
+#define V_BCM1480_MC_CS3_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS3_END)
+#define G_BCM1480_MC_CS3_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS3_END, M_BCM1480_MC_CS3_END)
+
+/*
+ * Row Address Bit Select Register 0 (Table 84)
+ */
+
+#define S_BCM1480_MC_ROW00 0
+#define M_BCM1480_MC_ROW00 _SB_MAKEMASK(6, S_BCM1480_MC_ROW00)
+#define V_BCM1480_MC_ROW00(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW00)
+#define G_BCM1480_MC_ROW00(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW00, M_BCM1480_MC_ROW00)
+
+#define S_BCM1480_MC_ROW01 8
+#define M_BCM1480_MC_ROW01 _SB_MAKEMASK(6, S_BCM1480_MC_ROW01)
+#define V_BCM1480_MC_ROW01(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW01)
+#define G_BCM1480_MC_ROW01(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW01, M_BCM1480_MC_ROW01)
+
+#define S_BCM1480_MC_ROW02 16
+#define M_BCM1480_MC_ROW02 _SB_MAKEMASK(6, S_BCM1480_MC_ROW02)
+#define V_BCM1480_MC_ROW02(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW02)
+#define G_BCM1480_MC_ROW02(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW02, M_BCM1480_MC_ROW02)
+
+#define S_BCM1480_MC_ROW03 24
+#define M_BCM1480_MC_ROW03 _SB_MAKEMASK(6, S_BCM1480_MC_ROW03)
+#define V_BCM1480_MC_ROW03(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW03)
+#define G_BCM1480_MC_ROW03(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW03, M_BCM1480_MC_ROW03)
+
+#define S_BCM1480_MC_ROW04 32
+#define M_BCM1480_MC_ROW04 _SB_MAKEMASK(6, S_BCM1480_MC_ROW04)
+#define V_BCM1480_MC_ROW04(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW04)
+#define G_BCM1480_MC_ROW04(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW04, M_BCM1480_MC_ROW04)
+
+#define S_BCM1480_MC_ROW05 40
+#define M_BCM1480_MC_ROW05 _SB_MAKEMASK(6, S_BCM1480_MC_ROW05)
+#define V_BCM1480_MC_ROW05(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW05)
+#define G_BCM1480_MC_ROW05(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW05, M_BCM1480_MC_ROW05)
+
+#define S_BCM1480_MC_ROW06 48
+#define M_BCM1480_MC_ROW06 _SB_MAKEMASK(6, S_BCM1480_MC_ROW06)
+#define V_BCM1480_MC_ROW06(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW06)
+#define G_BCM1480_MC_ROW06(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW06, M_BCM1480_MC_ROW06)
+
+#define S_BCM1480_MC_ROW07 56
+#define M_BCM1480_MC_ROW07 _SB_MAKEMASK(6, S_BCM1480_MC_ROW07)
+#define V_BCM1480_MC_ROW07(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW07)
+#define G_BCM1480_MC_ROW07(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW07, M_BCM1480_MC_ROW07)
+
+/*
+ * Row Address Bit Select Register 1 (Table 85)
+ */
+
+#define S_BCM1480_MC_ROW08 0
+#define M_BCM1480_MC_ROW08 _SB_MAKEMASK(6, S_BCM1480_MC_ROW08)
+#define V_BCM1480_MC_ROW08(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW08)
+#define G_BCM1480_MC_ROW08(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW08, M_BCM1480_MC_ROW08)
+
+#define S_BCM1480_MC_ROW09 8
+#define M_BCM1480_MC_ROW09 _SB_MAKEMASK(6, S_BCM1480_MC_ROW09)
+#define V_BCM1480_MC_ROW09(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW09)
+#define G_BCM1480_MC_ROW09(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW09, M_BCM1480_MC_ROW09)
+
+#define S_BCM1480_MC_ROW10 16
+#define M_BCM1480_MC_ROW10 _SB_MAKEMASK(6, S_BCM1480_MC_ROW10)
+#define V_BCM1480_MC_ROW10(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW10)
+#define G_BCM1480_MC_ROW10(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW10, M_BCM1480_MC_ROW10)
+
+#define S_BCM1480_MC_ROW11 24
+#define M_BCM1480_MC_ROW11 _SB_MAKEMASK(6, S_BCM1480_MC_ROW11)
+#define V_BCM1480_MC_ROW11(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW11)
+#define G_BCM1480_MC_ROW11(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW11, M_BCM1480_MC_ROW11)
+
+#define S_BCM1480_MC_ROW12 32
+#define M_BCM1480_MC_ROW12 _SB_MAKEMASK(6, S_BCM1480_MC_ROW12)
+#define V_BCM1480_MC_ROW12(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW12)
+#define G_BCM1480_MC_ROW12(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW12, M_BCM1480_MC_ROW12)
+
+#define S_BCM1480_MC_ROW13 40
+#define M_BCM1480_MC_ROW13 _SB_MAKEMASK(6, S_BCM1480_MC_ROW13)
+#define V_BCM1480_MC_ROW13(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW13)
+#define G_BCM1480_MC_ROW13(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW13, M_BCM1480_MC_ROW13)
+
+#define S_BCM1480_MC_ROW14 48
+#define M_BCM1480_MC_ROW14 _SB_MAKEMASK(6, S_BCM1480_MC_ROW14)
+#define V_BCM1480_MC_ROW14(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW14)
+#define G_BCM1480_MC_ROW14(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW14, M_BCM1480_MC_ROW14)
+
+#define K_BCM1480_MC_ROWX_BIT_SPACING 8
+
+/*
+ * Column Address Bit Select Register 0 (Table 86)
+ */
+
+#define S_BCM1480_MC_COL00 0
+#define M_BCM1480_MC_COL00 _SB_MAKEMASK(6, S_BCM1480_MC_COL00)
+#define V_BCM1480_MC_COL00(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL00)
+#define G_BCM1480_MC_COL00(x) _SB_GETVALUE(x, S_BCM1480_MC_COL00, M_BCM1480_MC_COL00)
+
+#define S_BCM1480_MC_COL01 8
+#define M_BCM1480_MC_COL01 _SB_MAKEMASK(6, S_BCM1480_MC_COL01)
+#define V_BCM1480_MC_COL01(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL01)
+#define G_BCM1480_MC_COL01(x) _SB_GETVALUE(x, S_BCM1480_MC_COL01, M_BCM1480_MC_COL01)
+
+#define S_BCM1480_MC_COL02 16
+#define M_BCM1480_MC_COL02 _SB_MAKEMASK(6, S_BCM1480_MC_COL02)
+#define V_BCM1480_MC_COL02(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL02)
+#define G_BCM1480_MC_COL02(x) _SB_GETVALUE(x, S_BCM1480_MC_COL02, M_BCM1480_MC_COL02)
+
+#define S_BCM1480_MC_COL03 24
+#define M_BCM1480_MC_COL03 _SB_MAKEMASK(6, S_BCM1480_MC_COL03)
+#define V_BCM1480_MC_COL03(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL03)
+#define G_BCM1480_MC_COL03(x) _SB_GETVALUE(x, S_BCM1480_MC_COL03, M_BCM1480_MC_COL03)
+
+#define S_BCM1480_MC_COL04 32
+#define M_BCM1480_MC_COL04 _SB_MAKEMASK(6, S_BCM1480_MC_COL04)
+#define V_BCM1480_MC_COL04(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL04)
+#define G_BCM1480_MC_COL04(x) _SB_GETVALUE(x, S_BCM1480_MC_COL04, M_BCM1480_MC_COL04)
+
+#define S_BCM1480_MC_COL05 40
+#define M_BCM1480_MC_COL05 _SB_MAKEMASK(6, S_BCM1480_MC_COL05)
+#define V_BCM1480_MC_COL05(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL05)
+#define G_BCM1480_MC_COL05(x) _SB_GETVALUE(x, S_BCM1480_MC_COL05, M_BCM1480_MC_COL05)
+
+#define S_BCM1480_MC_COL06 48
+#define M_BCM1480_MC_COL06 _SB_MAKEMASK(6, S_BCM1480_MC_COL06)
+#define V_BCM1480_MC_COL06(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL06)
+#define G_BCM1480_MC_COL06(x) _SB_GETVALUE(x, S_BCM1480_MC_COL06, M_BCM1480_MC_COL06)
+
+#define S_BCM1480_MC_COL07 56
+#define M_BCM1480_MC_COL07 _SB_MAKEMASK(6, S_BCM1480_MC_COL07)
+#define V_BCM1480_MC_COL07(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL07)
+#define G_BCM1480_MC_COL07(x) _SB_GETVALUE(x, S_BCM1480_MC_COL07, M_BCM1480_MC_COL07)
+
+/*
+ * Column Address Bit Select Register 1 (Table 87)
+ */
+
+#define S_BCM1480_MC_COL08 0
+#define M_BCM1480_MC_COL08 _SB_MAKEMASK(6, S_BCM1480_MC_COL08)
+#define V_BCM1480_MC_COL08(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL08)
+#define G_BCM1480_MC_COL08(x) _SB_GETVALUE(x, S_BCM1480_MC_COL08, M_BCM1480_MC_COL08)
+
+#define S_BCM1480_MC_COL09 8
+#define M_BCM1480_MC_COL09 _SB_MAKEMASK(6, S_BCM1480_MC_COL09)
+#define V_BCM1480_MC_COL09(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL09)
+#define G_BCM1480_MC_COL09(x) _SB_GETVALUE(x, S_BCM1480_MC_COL09, M_BCM1480_MC_COL09)
+
+#define S_BCM1480_MC_COL10 16 /* not a valid position, must be prog as 0 */
+
+#define S_BCM1480_MC_COL11 24
+#define M_BCM1480_MC_COL11 _SB_MAKEMASK(6, S_BCM1480_MC_COL11)
+#define V_BCM1480_MC_COL11(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL11)
+#define G_BCM1480_MC_COL11(x) _SB_GETVALUE(x, S_BCM1480_MC_COL11, M_BCM1480_MC_COL11)
+
+#define S_BCM1480_MC_COL12 32
+#define M_BCM1480_MC_COL12 _SB_MAKEMASK(6, S_BCM1480_MC_COL12)
+#define V_BCM1480_MC_COL12(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL12)
+#define G_BCM1480_MC_COL12(x) _SB_GETVALUE(x, S_BCM1480_MC_COL12, M_BCM1480_MC_COL12)
+
+#define S_BCM1480_MC_COL13 40
+#define M_BCM1480_MC_COL13 _SB_MAKEMASK(6, S_BCM1480_MC_COL13)
+#define V_BCM1480_MC_COL13(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL13)
+#define G_BCM1480_MC_COL13(x) _SB_GETVALUE(x, S_BCM1480_MC_COL13, M_BCM1480_MC_COL13)
+
+#define S_BCM1480_MC_COL14 48
+#define M_BCM1480_MC_COL14 _SB_MAKEMASK(6, S_BCM1480_MC_COL14)
+#define V_BCM1480_MC_COL14(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL14)
+#define G_BCM1480_MC_COL14(x) _SB_GETVALUE(x, S_BCM1480_MC_COL14, M_BCM1480_MC_COL14)
+
+#define K_BCM1480_MC_COLX_BIT_SPACING 8
+
+/*
+ * CS0 and CS1 Bank Address Bit Select Register (Table 88)
+ */
+
+#define S_BCM1480_MC_CS01_BANK0 0
+#define M_BCM1480_MC_CS01_BANK0 _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK0)
+#define V_BCM1480_MC_CS01_BANK0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK0)
+#define G_BCM1480_MC_CS01_BANK0(x) _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK0, M_BCM1480_MC_CS01_BANK0)
+
+#define S_BCM1480_MC_CS01_BANK1 8
+#define M_BCM1480_MC_CS01_BANK1 _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK1)
+#define V_BCM1480_MC_CS01_BANK1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK1)
+#define G_BCM1480_MC_CS01_BANK1(x) _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK1, M_BCM1480_MC_CS01_BANK1)
+
+#define S_BCM1480_MC_CS01_BANK2 16
+#define M_BCM1480_MC_CS01_BANK2 _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK2)
+#define V_BCM1480_MC_CS01_BANK2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK2)
+#define G_BCM1480_MC_CS01_BANK2(x) _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK2, M_BCM1480_MC_CS01_BANK2)
+
+/*
+ * CS2 and CS3 Bank Address Bit Select Register (Table 89)
+ */
+
+#define S_BCM1480_MC_CS23_BANK0 0
+#define M_BCM1480_MC_CS23_BANK0 _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK0)
+#define V_BCM1480_MC_CS23_BANK0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK0)
+#define G_BCM1480_MC_CS23_BANK0(x) _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK0, M_BCM1480_MC_CS23_BANK0)
+
+#define S_BCM1480_MC_CS23_BANK1 8
+#define M_BCM1480_MC_CS23_BANK1 _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK1)
+#define V_BCM1480_MC_CS23_BANK1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK1)
+#define G_BCM1480_MC_CS23_BANK1(x) _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK1, M_BCM1480_MC_CS23_BANK1)
+
+#define S_BCM1480_MC_CS23_BANK2 16
+#define M_BCM1480_MC_CS23_BANK2 _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK2)
+#define V_BCM1480_MC_CS23_BANK2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK2)
+#define G_BCM1480_MC_CS23_BANK2(x) _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK2, M_BCM1480_MC_CS23_BANK2)
+
+#define K_BCM1480_MC_CSXX_BANKX_BIT_SPACING 8
+
+/*
+ * DRAM Command Register (Table 90)
+ */
+
+#define S_BCM1480_MC_COMMAND 0
+#define M_BCM1480_MC_COMMAND _SB_MAKEMASK(4, S_BCM1480_MC_COMMAND)
+#define V_BCM1480_MC_COMMAND(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COMMAND)
+#define G_BCM1480_MC_COMMAND(x) _SB_GETVALUE(x, S_BCM1480_MC_COMMAND, M_BCM1480_MC_COMMAND)
+
+#define K_BCM1480_MC_COMMAND_EMRS 0
+#define K_BCM1480_MC_COMMAND_MRS 1
+#define K_BCM1480_MC_COMMAND_PRE 2
+#define K_BCM1480_MC_COMMAND_AR 3
+#define K_BCM1480_MC_COMMAND_SETRFSH 4
+#define K_BCM1480_MC_COMMAND_CLRRFSH 5
+#define K_BCM1480_MC_COMMAND_SETPWRDN 6
+#define K_BCM1480_MC_COMMAND_CLRPWRDN 7
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+#define K_BCM1480_MC_COMMAND_EMRS2 8
+#define K_BCM1480_MC_COMMAND_EMRS3 9
+#define K_BCM1480_MC_COMMAND_ENABLE_MCLK 10
+#define K_BCM1480_MC_COMMAND_DISABLE_MCLK 11
+#endif
+
+#define V_BCM1480_MC_COMMAND_EMRS V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS)
+#define V_BCM1480_MC_COMMAND_MRS V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_MRS)
+#define V_BCM1480_MC_COMMAND_PRE V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_PRE)
+#define V_BCM1480_MC_COMMAND_AR V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_AR)
+#define V_BCM1480_MC_COMMAND_SETRFSH V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_SETRFSH)
+#define V_BCM1480_MC_COMMAND_CLRRFSH V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_CLRRFSH)
+#define V_BCM1480_MC_COMMAND_SETPWRDN V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_SETPWRDN)
+#define V_BCM1480_MC_COMMAND_CLRPWRDN V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_CLRPWRDN)
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+#define V_BCM1480_MC_COMMAND_EMRS2 V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS2)
+#define V_BCM1480_MC_COMMAND_EMRS3 V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS3)
+#define V_BCM1480_MC_COMMAND_ENABLE_MCLK V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_ENABLE_MCLK)
+#define V_BCM1480_MC_COMMAND_DISABLE_MCLK V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_DISABLE_MCLK)
+#endif
+
+#define S_BCM1480_MC_CS0 4
+#define M_BCM1480_MC_CS0 _SB_MAKEMASK1(4)
+#define M_BCM1480_MC_CS1 _SB_MAKEMASK1(5)
+#define M_BCM1480_MC_CS2 _SB_MAKEMASK1(6)
+#define M_BCM1480_MC_CS3 _SB_MAKEMASK1(7)
+#define M_BCM1480_MC_CS4 _SB_MAKEMASK1(8)
+#define M_BCM1480_MC_CS5 _SB_MAKEMASK1(9)
+#define M_BCM1480_MC_CS6 _SB_MAKEMASK1(10)
+#define M_BCM1480_MC_CS7 _SB_MAKEMASK1(11)
+
+#define M_BCM1480_MC_CS _SB_MAKEMASK(8, S_BCM1480_MC_CS0)
+#define V_BCM1480_MC_CS(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS0)
+#define G_BCM1480_MC_CS(x) _SB_GETVALUE(x, S_BCM1480_MC_CS0, M_BCM1480_MC_CS0)
+
+#define M_BCM1480_MC_CMD_ACTIVE _SB_MAKEMASK1(16)
+
+/*
+ * DRAM Mode Register (Table 91)
+ */
+
+#define S_BCM1480_MC_EMODE 0
+#define M_BCM1480_MC_EMODE _SB_MAKEMASK(15, S_BCM1480_MC_EMODE)
+#define V_BCM1480_MC_EMODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_EMODE)
+#define G_BCM1480_MC_EMODE(x) _SB_GETVALUE(x, S_BCM1480_MC_EMODE, M_BCM1480_MC_EMODE)
+#define V_BCM1480_MC_EMODE_DEFAULT V_BCM1480_MC_EMODE(0)
+
+#define S_BCM1480_MC_MODE 16
+#define M_BCM1480_MC_MODE _SB_MAKEMASK(15, S_BCM1480_MC_MODE)
+#define V_BCM1480_MC_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_MODE)
+#define G_BCM1480_MC_MODE(x) _SB_GETVALUE(x, S_BCM1480_MC_MODE, M_BCM1480_MC_MODE)
+#define V_BCM1480_MC_MODE_DEFAULT V_BCM1480_MC_MODE(0)
+
+#define S_BCM1480_MC_DRAM_TYPE 32
+#define M_BCM1480_MC_DRAM_TYPE _SB_MAKEMASK(4, S_BCM1480_MC_DRAM_TYPE)
+#define V_BCM1480_MC_DRAM_TYPE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DRAM_TYPE)
+#define G_BCM1480_MC_DRAM_TYPE(x) _SB_GETVALUE(x, S_BCM1480_MC_DRAM_TYPE, M_BCM1480_MC_DRAM_TYPE)
+
+#define K_BCM1480_MC_DRAM_TYPE_JEDEC 0
+#define K_BCM1480_MC_DRAM_TYPE_FCRAM 1
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+#define K_BCM1480_MC_DRAM_TYPE_DDR2 2
+#endif
+
+#define K_BCM1480_MC_DRAM_TYPE_DDR2_PASS1 0
+
+#define V_BCM1480_MC_DRAM_TYPE_JEDEC V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_JEDEC)
+#define V_BCM1480_MC_DRAM_TYPE_FCRAM V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_FCRAM)
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+#define V_BCM1480_MC_DRAM_TYPE_DDR2 V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_DDR2)
+#endif
+
+#define M_BCM1480_MC_GANGED _SB_MAKEMASK1(36)
+#define M_BCM1480_MC_BY9_INTF _SB_MAKEMASK1(37)
+#define M_BCM1480_MC_FORCE_ECC64 _SB_MAKEMASK1(38)
+#define M_BCM1480_MC_ECC_DISABLE _SB_MAKEMASK1(39)
+
+#define S_BCM1480_MC_PG_POLICY 40
+#define M_BCM1480_MC_PG_POLICY _SB_MAKEMASK(2, S_BCM1480_MC_PG_POLICY)
+#define V_BCM1480_MC_PG_POLICY(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PG_POLICY)
+#define G_BCM1480_MC_PG_POLICY(x) _SB_GETVALUE(x, S_BCM1480_MC_PG_POLICY, M_BCM1480_MC_PG_POLICY)
+
+#define K_BCM1480_MC_PG_POLICY_CLOSED 0
+#define K_BCM1480_MC_PG_POLICY_CAS_TIME_CHK 1
+
+#define V_BCM1480_MC_PG_POLICY_CLOSED V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CLOSED)
+#define V_BCM1480_MC_PG_POLICY_CAS_TIME_CHK V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CAS_TIME_CHK)
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+#define M_BCM1480_MC_2T_CMD _SB_MAKEMASK1(42)
+#define M_BCM1480_MC_ECC_COR_DIS _SB_MAKEMASK1(43)
+#endif
+
+#define V_BCM1480_MC_DRAMMODE_DEFAULT V_BCM1480_MC_EMODE_DEFAULT | V_BCM1480_MC_MODE_DEFAULT | V_BCM1480_MC_DRAM_TYPE_JEDEC | \
+ V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CAS_TIME_CHK)
+
+/*
+ * Memory Clock Configuration Register (Table 92)
+ */
+
+#define S_BCM1480_MC_CLK_RATIO 0
+#define M_BCM1480_MC_CLK_RATIO _SB_MAKEMASK(6, S_BCM1480_MC_CLK_RATIO)
+#define V_BCM1480_MC_CLK_RATIO(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CLK_RATIO)
+#define G_BCM1480_MC_CLK_RATIO(x) _SB_GETVALUE(x, S_BCM1480_MC_CLK_RATIO, M_BCM1480_MC_CLK_RATIO)
+
+#define V_BCM1480_MC_CLK_RATIO_DEFAULT V_BCM1480_MC_CLK_RATIO(10)
+
+#define S_BCM1480_MC_REF_RATE 8
+#define M_BCM1480_MC_REF_RATE _SB_MAKEMASK(8, S_BCM1480_MC_REF_RATE)
+#define V_BCM1480_MC_REF_RATE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_REF_RATE)
+#define G_BCM1480_MC_REF_RATE(x) _SB_GETVALUE(x, S_BCM1480_MC_REF_RATE, M_BCM1480_MC_REF_RATE)
+
+#define K_BCM1480_MC_REF_RATE_100MHz 0x31
+#define K_BCM1480_MC_REF_RATE_200MHz 0x62
+#define K_BCM1480_MC_REF_RATE_400MHz 0xC4
+
+#define V_BCM1480_MC_REF_RATE_100MHz V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_100MHz)
+#define V_BCM1480_MC_REF_RATE_200MHz V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_200MHz)
+#define V_BCM1480_MC_REF_RATE_400MHz V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_400MHz)
+#define V_BCM1480_MC_REF_RATE_DEFAULT V_BCM1480_MC_REF_RATE_400MHz
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+#define M_BCM1480_MC_AUTO_REF_DIS _SB_MAKEMASK1(16)
+#endif
+
+/*
+ * ODT Register (Table 99)
+ */
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+#define M_BCM1480_MC_RD_ODT0_CS0 _SB_MAKEMASK1(0)
+#define M_BCM1480_MC_RD_ODT0_CS2 _SB_MAKEMASK1(1)
+#define M_BCM1480_MC_RD_ODT0_CS4 _SB_MAKEMASK1(2)
+#define M_BCM1480_MC_RD_ODT0_CS6 _SB_MAKEMASK1(3)
+#define M_BCM1480_MC_WR_ODT0_CS0 _SB_MAKEMASK1(4)
+#define M_BCM1480_MC_WR_ODT0_CS2 _SB_MAKEMASK1(5)
+#define M_BCM1480_MC_WR_ODT0_CS4 _SB_MAKEMASK1(6)
+#define M_BCM1480_MC_WR_ODT0_CS6 _SB_MAKEMASK1(7)
+#define M_BCM1480_MC_RD_ODT2_CS0 _SB_MAKEMASK1(8)
+#define M_BCM1480_MC_RD_ODT2_CS2 _SB_MAKEMASK1(9)
+#define M_BCM1480_MC_RD_ODT2_CS4 _SB_MAKEMASK1(10)
+#define M_BCM1480_MC_RD_ODT2_CS6 _SB_MAKEMASK1(11)
+#define M_BCM1480_MC_WR_ODT2_CS0 _SB_MAKEMASK1(12)
+#define M_BCM1480_MC_WR_ODT2_CS2 _SB_MAKEMASK1(13)
+#define M_BCM1480_MC_WR_ODT2_CS4 _SB_MAKEMASK1(14)
+#define M_BCM1480_MC_WR_ODT2_CS6 _SB_MAKEMASK1(15)
+#define M_BCM1480_MC_RD_ODT4_CS0 _SB_MAKEMASK1(16)
+#define M_BCM1480_MC_RD_ODT4_CS2 _SB_MAKEMASK1(17)
+#define M_BCM1480_MC_RD_ODT4_CS4 _SB_MAKEMASK1(18)
+#define M_BCM1480_MC_RD_ODT4_CS6 _SB_MAKEMASK1(19)
+#define M_BCM1480_MC_WR_ODT4_CS0 _SB_MAKEMASK1(20)
+#define M_BCM1480_MC_WR_ODT4_CS2 _SB_MAKEMASK1(21)
+#define M_BCM1480_MC_WR_ODT4_CS4 _SB_MAKEMASK1(22)
+#define M_BCM1480_MC_WR_ODT4_CS6 _SB_MAKEMASK1(23)
+#define M_BCM1480_MC_RD_ODT6_CS0 _SB_MAKEMASK1(24)
+#define M_BCM1480_MC_RD_ODT6_CS2 _SB_MAKEMASK1(25)
+#define M_BCM1480_MC_RD_ODT6_CS4 _SB_MAKEMASK1(26)
+#define M_BCM1480_MC_RD_ODT6_CS6 _SB_MAKEMASK1(27)
+#define M_BCM1480_MC_WR_ODT6_CS0 _SB_MAKEMASK1(28)
+#define M_BCM1480_MC_WR_ODT6_CS2 _SB_MAKEMASK1(29)
+#define M_BCM1480_MC_WR_ODT6_CS4 _SB_MAKEMASK1(30)
+#define M_BCM1480_MC_WR_ODT6_CS6 _SB_MAKEMASK1(31)
+
+#define M_BCM1480_MC_CS_ODD_ODT_EN _SB_MAKEMASK1(32)
+
+#define S_BCM1480_MC_ODT0 0
+#define M_BCM1480_MC_ODT0 _SB_MAKEMASK(8, S_BCM1480_MC_ODT0)
+#define V_BCM1480_MC_ODT0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ODT0)
+
+#define S_BCM1480_MC_ODT2 8
+#define M_BCM1480_MC_ODT2 _SB_MAKEMASK(8, S_BCM1480_MC_ODT2)
+#define V_BCM1480_MC_ODT2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ODT2)
+
+#define S_BCM1480_MC_ODT4 16
+#define M_BCM1480_MC_ODT4 _SB_MAKEMASK(8, S_BCM1480_MC_ODT4)
+#define V_BCM1480_MC_ODT4(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ODT4)
+
+#define S_BCM1480_MC_ODT6 24
+#define M_BCM1480_MC_ODT6 _SB_MAKEMASK(8, S_BCM1480_MC_ODT6)
+#define V_BCM1480_MC_ODT6(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ODT6)
+#endif
+
+/*
+ * Memory DLL Configuration Register (Table 93)
+ */
+
+#define S_BCM1480_MC_ADDR_COARSE_ADJ 0
+#define M_BCM1480_MC_ADDR_COARSE_ADJ _SB_MAKEMASK(6, S_BCM1480_MC_ADDR_COARSE_ADJ)
+#define V_BCM1480_MC_ADDR_COARSE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_COARSE_ADJ)
+#define G_BCM1480_MC_ADDR_COARSE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_ADDR_COARSE_ADJ, M_BCM1480_MC_ADDR_COARSE_ADJ)
+#define V_BCM1480_MC_ADDR_COARSE_ADJ_DEFAULT V_BCM1480_MC_ADDR_COARSE_ADJ(0x0)
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+#define S_BCM1480_MC_ADDR_FREQ_RANGE 8
+#define M_BCM1480_MC_ADDR_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_ADDR_FREQ_RANGE)
+#define V_BCM1480_MC_ADDR_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_FREQ_RANGE)
+#define G_BCM1480_MC_ADDR_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_ADDR_FREQ_RANGE, M_BCM1480_MC_ADDR_FREQ_RANGE)
+#define V_BCM1480_MC_ADDR_FREQ_RANGE_DEFAULT V_BCM1480_MC_ADDR_FREQ_RANGE(0x4)
+#endif
+
+#define S_BCM1480_MC_ADDR_FINE_ADJ 8
+#define M_BCM1480_MC_ADDR_FINE_ADJ _SB_MAKEMASK(4, S_BCM1480_MC_ADDR_FINE_ADJ)
+#define V_BCM1480_MC_ADDR_FINE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_FINE_ADJ)
+#define G_BCM1480_MC_ADDR_FINE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_ADDR_FINE_ADJ, M_BCM1480_MC_ADDR_FINE_ADJ)
+#define V_BCM1480_MC_ADDR_FINE_ADJ_DEFAULT V_BCM1480_MC_ADDR_FINE_ADJ(0x8)
+
+#define S_BCM1480_MC_DQI_COARSE_ADJ 16
+#define M_BCM1480_MC_DQI_COARSE_ADJ _SB_MAKEMASK(6, S_BCM1480_MC_DQI_COARSE_ADJ)
+#define V_BCM1480_MC_DQI_COARSE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_COARSE_ADJ)
+#define G_BCM1480_MC_DQI_COARSE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQI_COARSE_ADJ, M_BCM1480_MC_DQI_COARSE_ADJ)
+#define V_BCM1480_MC_DQI_COARSE_ADJ_DEFAULT V_BCM1480_MC_DQI_COARSE_ADJ(0x0)
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+#define S_BCM1480_MC_DQI_FREQ_RANGE 24
+#define M_BCM1480_MC_DQI_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_DQI_FREQ_RANGE)
+#define V_BCM1480_MC_DQI_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_FREQ_RANGE)
+#define G_BCM1480_MC_DQI_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_DQI_FREQ_RANGE, M_BCM1480_MC_DQI_FREQ_RANGE)
+#define V_BCM1480_MC_DQI_FREQ_RANGE_DEFAULT V_BCM1480_MC_DQI_FREQ_RANGE(0x4)
+#endif
+
+#define S_BCM1480_MC_DQI_FINE_ADJ 24
+#define M_BCM1480_MC_DQI_FINE_ADJ _SB_MAKEMASK(4, S_BCM1480_MC_DQI_FINE_ADJ)
+#define V_BCM1480_MC_DQI_FINE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_FINE_ADJ)
+#define G_BCM1480_MC_DQI_FINE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQI_FINE_ADJ, M_BCM1480_MC_DQI_FINE_ADJ)
+#define V_BCM1480_MC_DQI_FINE_ADJ_DEFAULT V_BCM1480_MC_DQI_FINE_ADJ(0x8)
+
+#define S_BCM1480_MC_DQO_COARSE_ADJ 32
+#define M_BCM1480_MC_DQO_COARSE_ADJ _SB_MAKEMASK(6, S_BCM1480_MC_DQO_COARSE_ADJ)
+#define V_BCM1480_MC_DQO_COARSE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_COARSE_ADJ)
+#define G_BCM1480_MC_DQO_COARSE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQO_COARSE_ADJ, M_BCM1480_MC_DQO_COARSE_ADJ)
+#define V_BCM1480_MC_DQO_COARSE_ADJ_DEFAULT V_BCM1480_MC_DQO_COARSE_ADJ(0x0)
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+#define S_BCM1480_MC_DQO_FREQ_RANGE 40
+#define M_BCM1480_MC_DQO_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_DQO_FREQ_RANGE)
+#define V_BCM1480_MC_DQO_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_FREQ_RANGE)
+#define G_BCM1480_MC_DQO_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_DQO_FREQ_RANGE, M_BCM1480_MC_DQO_FREQ_RANGE)
+#define V_BCM1480_MC_DQO_FREQ_RANGE_DEFAULT V_BCM1480_MC_DQO_FREQ_RANGE(0x4)
+#endif
+
+#define S_BCM1480_MC_DQO_FINE_ADJ 40
+#define M_BCM1480_MC_DQO_FINE_ADJ _SB_MAKEMASK(4, S_BCM1480_MC_DQO_FINE_ADJ)
+#define V_BCM1480_MC_DQO_FINE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_FINE_ADJ)
+#define G_BCM1480_MC_DQO_FINE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQO_FINE_ADJ, M_BCM1480_MC_DQO_FINE_ADJ)
+#define V_BCM1480_MC_DQO_FINE_ADJ_DEFAULT V_BCM1480_MC_DQO_FINE_ADJ(0x8)
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+#define S_BCM1480_MC_DLL_PDSEL 44
+#define M_BCM1480_MC_DLL_PDSEL _SB_MAKEMASK(2, S_BCM1480_MC_DLL_PDSEL)
+#define V_BCM1480_MC_DLL_PDSEL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_PDSEL)
+#define G_BCM1480_MC_DLL_PDSEL(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_PDSEL, M_BCM1480_MC_DLL_PDSEL)
+#define V_BCM1480_MC_DLL_DEFAULT_PDSEL V_BCM1480_MC_DLL_PDSEL(0x0)
+
+#define M_BCM1480_MC_DLL_REGBYPASS _SB_MAKEMASK1(46)
+#define M_BCM1480_MC_DQO_SHIFT _SB_MAKEMASK1(47)
+#endif
+
+#define S_BCM1480_MC_DLL_DEFAULT 48
+#define M_BCM1480_MC_DLL_DEFAULT _SB_MAKEMASK(6, S_BCM1480_MC_DLL_DEFAULT)
+#define V_BCM1480_MC_DLL_DEFAULT(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_DEFAULT)
+#define G_BCM1480_MC_DLL_DEFAULT(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_DEFAULT, M_BCM1480_MC_DLL_DEFAULT)
+#define V_BCM1480_MC_DLL_DEFAULT_DEFAULT V_BCM1480_MC_DLL_DEFAULT(0x10)
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+#define S_BCM1480_MC_DLL_REGCTRL 54
+#define M_BCM1480_MC_DLL_REGCTRL _SB_MAKEMASK(2, S_BCM1480_MC_DLL_REGCTRL)
+#define V_BCM1480_MC_DLL_REGCTRL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_REGCTRL)
+#define G_BCM1480_MC_DLL_REGCTRL(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_REGCTRL, M_BCM1480_MC_DLL_REGCTRL)
+#define V_BCM1480_MC_DLL_DEFAULT_REGCTRL V_BCM1480_MC_DLL_REGCTRL(0x0)
+#endif
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+#define S_BCM1480_MC_DLL_FREQ_RANGE 56
+#define M_BCM1480_MC_DLL_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_DLL_FREQ_RANGE)
+#define V_BCM1480_MC_DLL_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_FREQ_RANGE)
+#define G_BCM1480_MC_DLL_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_FREQ_RANGE, M_BCM1480_MC_DLL_FREQ_RANGE)
+#define V_BCM1480_MC_DLL_FREQ_RANGE_DEFAULT V_BCM1480_MC_DLL_FREQ_RANGE(0x4)
+#endif
+
+#define S_BCM1480_MC_DLL_STEP_SIZE 56
+#define M_BCM1480_MC_DLL_STEP_SIZE _SB_MAKEMASK(4, S_BCM1480_MC_DLL_STEP_SIZE)
+#define V_BCM1480_MC_DLL_STEP_SIZE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_STEP_SIZE)
+#define G_BCM1480_MC_DLL_STEP_SIZE(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_STEP_SIZE, M_BCM1480_MC_DLL_STEP_SIZE)
+#define V_BCM1480_MC_DLL_STEP_SIZE_DEFAULT V_BCM1480_MC_DLL_STEP_SIZE(0x8)
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+#define S_BCM1480_MC_DLL_BGCTRL 60
+#define M_BCM1480_MC_DLL_BGCTRL _SB_MAKEMASK(2, S_BCM1480_MC_DLL_BGCTRL)
+#define V_BCM1480_MC_DLL_BGCTRL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_BGCTRL)
+#define G_BCM1480_MC_DLL_BGCTRL(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_BGCTRL, M_BCM1480_MC_DLL_BGCTRL)
+#define V_BCM1480_MC_DLL_DEFAULT_BGCTRL V_BCM1480_MC_DLL_BGCTRL(0x0)
+#endif
+
+#define M_BCM1480_MC_DLL_BYPASS _SB_MAKEMASK1(63)
+
+/*
+ * Memory Drive Configuration Register (Table 94)
+ */
+
+#define S_BCM1480_MC_RTT_BYP_PULLDOWN 0
+#define M_BCM1480_MC_RTT_BYP_PULLDOWN _SB_MAKEMASK(3, S_BCM1480_MC_RTT_BYP_PULLDOWN)
+#define V_BCM1480_MC_RTT_BYP_PULLDOWN(x) _SB_MAKEVALUE(x, S_BCM1480_MC_RTT_BYP_PULLDOWN)
+#define G_BCM1480_MC_RTT_BYP_PULLDOWN(x) _SB_GETVALUE(x, S_BCM1480_MC_RTT_BYP_PULLDOWN, M_BCM1480_MC_RTT_BYP_PULLDOWN)
+
+#define S_BCM1480_MC_RTT_BYP_PULLUP 6
+#define M_BCM1480_MC_RTT_BYP_PULLUP _SB_MAKEMASK(3, S_BCM1480_MC_RTT_BYP_PULLUP)
+#define V_BCM1480_MC_RTT_BYP_PULLUP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_RTT_BYP_PULLUP)
+#define G_BCM1480_MC_RTT_BYP_PULLUP(x) _SB_GETVALUE(x, S_BCM1480_MC_RTT_BYP_PULLUP, M_BCM1480_MC_RTT_BYP_PULLUP)
+
+#define M_BCM1480_MC_RTT_BYPASS _SB_MAKEMASK1(8)
+#define M_BCM1480_MC_RTT_COMP_MOV_AVG _SB_MAKEMASK1(9)
+
+#define S_BCM1480_MC_PVT_BYP_C1_PULLDOWN 10
+#define M_BCM1480_MC_PVT_BYP_C1_PULLDOWN _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C1_PULLDOWN)
+#define V_BCM1480_MC_PVT_BYP_C1_PULLDOWN(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PVT_BYP_C1_PULLDOWN)
+#define G_BCM1480_MC_PVT_BYP_C1_PULLDOWN(x) _SB_GETVALUE(x, S_BCM1480_MC_PVT_BYP_C1_PULLDOWN, M_BCM1480_MC_PVT_BYP_C1_PULLDOWN)
+
+#define S_BCM1480_MC_PVT_BYP_C1_PULLUP 15
+#define M_BCM1480_MC_PVT_BYP_C1_PULLUP _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C1_PULLUP)
+#define V_BCM1480_MC_PVT_BYP_C1_PULLUP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PVT_BYP_C1_PULLUP)
+#define G_BCM1480_MC_PVT_BYP_C1_PULLUP(x) _SB_GETVALUE(x, S_BCM1480_MC_PVT_BYP_C1_PULLUP, M_BCM1480_MC_PVT_BYP_C1_PULLUP)
+
+#define S_BCM1480_MC_PVT_BYP_C2_PULLDOWN 20
+#define M_BCM1480_MC_PVT_BYP_C2_PULLDOWN _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C2_PULLDOWN)
+#define V_BCM1480_MC_PVT_BYP_C2_PULLDOWN(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PVT_BYP_C2_PULLDOWN)
+#define G_BCM1480_MC_PVT_BYP_C2_PULLDOWN(x) _SB_GETVALUE(x, S_BCM1480_MC_PVT_BYP_C2_PULLDOWN, M_BCM1480_MC_PVT_BYP_C2_PULLDOWN)
+
+#define S_BCM1480_MC_PVT_BYP_C2_PULLUP 25
+#define M_BCM1480_MC_PVT_BYP_C2_PULLUP _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C2_PULLUP)
+#define V_BCM1480_MC_PVT_BYP_C2_PULLUP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PVT_BYP_C2_PULLUP)
+#define G_BCM1480_MC_PVT_BYP_C2_PULLUP(x) _SB_GETVALUE(x, S_BCM1480_MC_PVT_BYP_C2_PULLUP, M_BCM1480_MC_PVT_BYP_C2_PULLUP)
+
+#define M_BCM1480_MC_PVT_BYPASS _SB_MAKEMASK1(30)
+#define M_BCM1480_MC_PVT_COMP_MOV_AVG _SB_MAKEMASK1(31)
+
+#define M_BCM1480_MC_CLK_CLASS _SB_MAKEMASK1(34)
+#define M_BCM1480_MC_DATA_CLASS _SB_MAKEMASK1(35)
+#define M_BCM1480_MC_ADDR_CLASS _SB_MAKEMASK1(36)
+
+#define M_BCM1480_MC_DQ_ODT_75 _SB_MAKEMASK1(37)
+#define M_BCM1480_MC_DQ_ODT_150 _SB_MAKEMASK1(38)
+#define M_BCM1480_MC_DQS_ODT_75 _SB_MAKEMASK1(39)
+#define M_BCM1480_MC_DQS_ODT_150 _SB_MAKEMASK1(40)
+#define M_BCM1480_MC_DQS_DIFF _SB_MAKEMASK1(41)
+
+/*
+ * ECC Test Data Register (Table 95)
+ */
+
+#define S_BCM1480_MC_DATA_INVERT 0
+#define M_DATA_ECC_INVERT _SB_MAKEMASK(64, S_BCM1480_MC_ECC_INVERT)
+
+/*
+ * ECC Test ECC Register (Table 96)
+ */
+
+#define S_BCM1480_MC_ECC_INVERT 0
+#define M_BCM1480_MC_ECC_INVERT _SB_MAKEMASK(8, S_BCM1480_MC_ECC_INVERT)
+
+/*
+ * SDRAM Timing Register (Table 97)
+ */
+
+#define S_BCM1480_MC_tRCD 0
+#define M_BCM1480_MC_tRCD _SB_MAKEMASK(4, S_BCM1480_MC_tRCD)
+#define V_BCM1480_MC_tRCD(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRCD)
+#define G_BCM1480_MC_tRCD(x) _SB_GETVALUE(x, S_BCM1480_MC_tRCD, M_BCM1480_MC_tRCD)
+#define K_BCM1480_MC_tRCD_DEFAULT 3
+#define V_BCM1480_MC_tRCD_DEFAULT V_BCM1480_MC_tRCD(K_BCM1480_MC_tRCD_DEFAULT)
+
+#define S_BCM1480_MC_tCL 4
+#define M_BCM1480_MC_tCL _SB_MAKEMASK(4, S_BCM1480_MC_tCL)
+#define V_BCM1480_MC_tCL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tCL)
+#define G_BCM1480_MC_tCL(x) _SB_GETVALUE(x, S_BCM1480_MC_tCL, M_BCM1480_MC_tCL)
+#define K_BCM1480_MC_tCL_DEFAULT 2
+#define V_BCM1480_MC_tCL_DEFAULT V_BCM1480_MC_tCL(K_BCM1480_MC_tCL_DEFAULT)
+
+#define M_BCM1480_MC_tCrDh _SB_MAKEMASK1(8)
+
+#define S_BCM1480_MC_tWR 9
+#define M_BCM1480_MC_tWR _SB_MAKEMASK(3, S_BCM1480_MC_tWR)
+#define V_BCM1480_MC_tWR(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tWR)
+#define G_BCM1480_MC_tWR(x) _SB_GETVALUE(x, S_BCM1480_MC_tWR, M_BCM1480_MC_tWR)
+#define K_BCM1480_MC_tWR_DEFAULT 2
+#define V_BCM1480_MC_tWR_DEFAULT V_BCM1480_MC_tWR(K_BCM1480_MC_tWR_DEFAULT)
+
+#define S_BCM1480_MC_tCwD 12
+#define M_BCM1480_MC_tCwD _SB_MAKEMASK(4, S_BCM1480_MC_tCwD)
+#define V_BCM1480_MC_tCwD(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tCwD)
+#define G_BCM1480_MC_tCwD(x) _SB_GETVALUE(x, S_BCM1480_MC_tCwD, M_BCM1480_MC_tCwD)
+#define K_BCM1480_MC_tCwD_DEFAULT 1
+#define V_BCM1480_MC_tCwD_DEFAULT V_BCM1480_MC_tCwD(K_BCM1480_MC_tCwD_DEFAULT)
+
+#define S_BCM1480_MC_tRP 16
+#define M_BCM1480_MC_tRP _SB_MAKEMASK(4, S_BCM1480_MC_tRP)
+#define V_BCM1480_MC_tRP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRP)
+#define G_BCM1480_MC_tRP(x) _SB_GETVALUE(x, S_BCM1480_MC_tRP, M_BCM1480_MC_tRP)
+#define K_BCM1480_MC_tRP_DEFAULT 4
+#define V_BCM1480_MC_tRP_DEFAULT V_BCM1480_MC_tRP(K_BCM1480_MC_tRP_DEFAULT)
+
+#define S_BCM1480_MC_tRRD 20
+#define M_BCM1480_MC_tRRD _SB_MAKEMASK(4, S_BCM1480_MC_tRRD)
+#define V_BCM1480_MC_tRRD(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRRD)
+#define G_BCM1480_MC_tRRD(x) _SB_GETVALUE(x, S_BCM1480_MC_tRRD, M_BCM1480_MC_tRRD)
+#define K_BCM1480_MC_tRRD_DEFAULT 2
+#define V_BCM1480_MC_tRRD_DEFAULT V_BCM1480_MC_tRRD(K_BCM1480_MC_tRRD_DEFAULT)
+
+#define S_BCM1480_MC_tRCw 24
+#define M_BCM1480_MC_tRCw _SB_MAKEMASK(5, S_BCM1480_MC_tRCw)
+#define V_BCM1480_MC_tRCw(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRCw)
+#define G_BCM1480_MC_tRCw(x) _SB_GETVALUE(x, S_BCM1480_MC_tRCw, M_BCM1480_MC_tRCw)
+#define K_BCM1480_MC_tRCw_DEFAULT 10
+#define V_BCM1480_MC_tRCw_DEFAULT V_BCM1480_MC_tRCw(K_BCM1480_MC_tRCw_DEFAULT)
+
+#define S_BCM1480_MC_tRCr 32
+#define M_BCM1480_MC_tRCr _SB_MAKEMASK(5, S_BCM1480_MC_tRCr)
+#define V_BCM1480_MC_tRCr(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRCr)
+#define G_BCM1480_MC_tRCr(x) _SB_GETVALUE(x, S_BCM1480_MC_tRCr, M_BCM1480_MC_tRCr)
+#define K_BCM1480_MC_tRCr_DEFAULT 9
+#define V_BCM1480_MC_tRCr_DEFAULT V_BCM1480_MC_tRCr(K_BCM1480_MC_tRCr_DEFAULT)
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+#define S_BCM1480_MC_tFAW 40
+#define M_BCM1480_MC_tFAW _SB_MAKEMASK(6, S_BCM1480_MC_tFAW)
+#define V_BCM1480_MC_tFAW(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tFAW)
+#define G_BCM1480_MC_tFAW(x) _SB_GETVALUE(x, S_BCM1480_MC_tFAW, M_BCM1480_MC_tFAW)
+#define K_BCM1480_MC_tFAW_DEFAULT 0
+#define V_BCM1480_MC_tFAW_DEFAULT V_BCM1480_MC_tFAW(K_BCM1480_MC_tFAW_DEFAULT)
+#endif
+
+#define S_BCM1480_MC_tRFC 48
+#define M_BCM1480_MC_tRFC _SB_MAKEMASK(7, S_BCM1480_MC_tRFC)
+#define V_BCM1480_MC_tRFC(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRFC)
+#define G_BCM1480_MC_tRFC(x) _SB_GETVALUE(x, S_BCM1480_MC_tRFC, M_BCM1480_MC_tRFC)
+#define K_BCM1480_MC_tRFC_DEFAULT 12
+#define V_BCM1480_MC_tRFC_DEFAULT V_BCM1480_MC_tRFC(K_BCM1480_MC_tRFC_DEFAULT)
+
+#define S_BCM1480_MC_tFIFO 56
+#define M_BCM1480_MC_tFIFO _SB_MAKEMASK(2, S_BCM1480_MC_tFIFO)
+#define V_BCM1480_MC_tFIFO(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tFIFO)
+#define G_BCM1480_MC_tFIFO(x) _SB_GETVALUE(x, S_BCM1480_MC_tFIFO, M_BCM1480_MC_tFIFO)
+#define K_BCM1480_MC_tFIFO_DEFAULT 0
+#define V_BCM1480_MC_tFIFO_DEFAULT V_BCM1480_MC_tFIFO(K_BCM1480_MC_tFIFO_DEFAULT)
+
+#define S_BCM1480_MC_tW2R 58
+#define M_BCM1480_MC_tW2R _SB_MAKEMASK(2, S_BCM1480_MC_tW2R)
+#define V_BCM1480_MC_tW2R(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tW2R)
+#define G_BCM1480_MC_tW2R(x) _SB_GETVALUE(x, S_BCM1480_MC_tW2R, M_BCM1480_MC_tW2R)
+#define K_BCM1480_MC_tW2R_DEFAULT 1
+#define V_BCM1480_MC_tW2R_DEFAULT V_BCM1480_MC_tW2R(K_BCM1480_MC_tW2R_DEFAULT)
+
+#define S_BCM1480_MC_tR2W 60
+#define M_BCM1480_MC_tR2W _SB_MAKEMASK(2, S_BCM1480_MC_tR2W)
+#define V_BCM1480_MC_tR2W(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tR2W)
+#define G_BCM1480_MC_tR2W(x) _SB_GETVALUE(x, S_BCM1480_MC_tR2W, M_BCM1480_MC_tR2W)
+#define K_BCM1480_MC_tR2W_DEFAULT 0
+#define V_BCM1480_MC_tR2W_DEFAULT V_BCM1480_MC_tR2W(K_BCM1480_MC_tR2W_DEFAULT)
+
+#define M_BCM1480_MC_tR2R _SB_MAKEMASK1(62)
+
+#define V_BCM1480_MC_TIMING_DEFAULT (M_BCM1480_MC_tR2R | \
+ V_BCM1480_MC_tFIFO_DEFAULT | \
+ V_BCM1480_MC_tR2W_DEFAULT | \
+ V_BCM1480_MC_tW2R_DEFAULT | \
+ V_BCM1480_MC_tRFC_DEFAULT | \
+ V_BCM1480_MC_tRCr_DEFAULT | \
+ V_BCM1480_MC_tRCw_DEFAULT | \
+ V_BCM1480_MC_tRRD_DEFAULT | \
+ V_BCM1480_MC_tRP_DEFAULT | \
+ V_BCM1480_MC_tCwD_DEFAULT | \
+ V_BCM1480_MC_tWR_DEFAULT | \
+ M_BCM1480_MC_tCrDh | \
+ V_BCM1480_MC_tCL_DEFAULT | \
+ V_BCM1480_MC_tRCD_DEFAULT)
+
+/*
+ * SDRAM Timing Register 2
+ */
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+
+#define S_BCM1480_MC_tAL 0
+#define M_BCM1480_MC_tAL _SB_MAKEMASK(4, S_BCM1480_MC_tAL)
+#define V_BCM1480_MC_tAL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tAL)
+#define G_BCM1480_MC_tAL(x) _SB_GETVALUE(x, S_BCM1480_MC_tAL, M_BCM1480_MC_tAL)
+#define K_BCM1480_MC_tAL_DEFAULT 0
+#define V_BCM1480_MC_tAL_DEFAULT V_BCM1480_MC_tAL(K_BCM1480_MC_tAL_DEFAULT)
+
+#define S_BCM1480_MC_tRTP 4
+#define M_BCM1480_MC_tRTP _SB_MAKEMASK(3, S_BCM1480_MC_tRTP)
+#define V_BCM1480_MC_tRTP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRTP)
+#define G_BCM1480_MC_tRTP(x) _SB_GETVALUE(x, S_BCM1480_MC_tRTP, M_BCM1480_MC_tRTP)
+#define K_BCM1480_MC_tRTP_DEFAULT 2
+#define V_BCM1480_MC_tRTP_DEFAULT V_BCM1480_MC_tRTP(K_BCM1480_MC_tRTP_DEFAULT)
+
+#define S_BCM1480_MC_tW2W 8
+#define M_BCM1480_MC_tW2W _SB_MAKEMASK(2, S_BCM1480_MC_tW2W)
+#define V_BCM1480_MC_tW2W(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tW2W)
+#define G_BCM1480_MC_tW2W(x) _SB_GETVALUE(x, S_BCM1480_MC_tW2W, M_BCM1480_MC_tW2W)
+#define K_BCM1480_MC_tW2W_DEFAULT 0
+#define V_BCM1480_MC_tW2W_DEFAULT V_BCM1480_MC_tW2W(K_BCM1480_MC_tW2W_DEFAULT)
+
+#define S_BCM1480_MC_tRAP 12
+#define M_BCM1480_MC_tRAP _SB_MAKEMASK(4, S_BCM1480_MC_tRAP)
+#define V_BCM1480_MC_tRAP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRAP)
+#define G_BCM1480_MC_tRAP(x) _SB_GETVALUE(x, S_BCM1480_MC_tRAP, M_BCM1480_MC_tRAP)
+#define K_BCM1480_MC_tRAP_DEFAULT 0
+#define V_BCM1480_MC_tRAP_DEFAULT V_BCM1480_MC_tRAP(K_BCM1480_MC_tRAP_DEFAULT)
+
+#endif
+
+
+
+/*
+ * Global Registers: single instances per BCM1480
+ */
+
+/*
+ * Global Configuration Register (Table 99)
+ */
+
+#define S_BCM1480_MC_BLK_SET_MARK 8
+#define M_BCM1480_MC_BLK_SET_MARK _SB_MAKEMASK(4, S_BCM1480_MC_BLK_SET_MARK)
+#define V_BCM1480_MC_BLK_SET_MARK(x) _SB_MAKEVALUE(x, S_BCM1480_MC_BLK_SET_MARK)
+#define G_BCM1480_MC_BLK_SET_MARK(x) _SB_GETVALUE(x, S_BCM1480_MC_BLK_SET_MARK, M_BCM1480_MC_BLK_SET_MARK)
+
+#define S_BCM1480_MC_BLK_CLR_MARK 12
+#define M_BCM1480_MC_BLK_CLR_MARK _SB_MAKEMASK(4, S_BCM1480_MC_BLK_CLR_MARK)
+#define V_BCM1480_MC_BLK_CLR_MARK(x) _SB_MAKEVALUE(x, S_BCM1480_MC_BLK_CLR_MARK)
+#define G_BCM1480_MC_BLK_CLR_MARK(x) _SB_GETVALUE(x, S_BCM1480_MC_BLK_CLR_MARK, M_BCM1480_MC_BLK_CLR_MARK)
+
+#define M_BCM1480_MC_PKT_PRIORITY _SB_MAKEMASK1(16)
+
+#define S_BCM1480_MC_MAX_AGE 20
+#define M_BCM1480_MC_MAX_AGE _SB_MAKEMASK(4, S_BCM1480_MC_MAX_AGE)
+#define V_BCM1480_MC_MAX_AGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_MAX_AGE)
+#define G_BCM1480_MC_MAX_AGE(x) _SB_GETVALUE(x, S_BCM1480_MC_MAX_AGE, M_BCM1480_MC_MAX_AGE)
+
+#define M_BCM1480_MC_BERR_DISABLE _SB_MAKEMASK1(29)
+#define M_BCM1480_MC_FORCE_SEQ _SB_MAKEMASK1(30)
+#define M_BCM1480_MC_VGEN _SB_MAKEMASK1(32)
+
+#define S_BCM1480_MC_SLEW 33
+#define M_BCM1480_MC_SLEW _SB_MAKEMASK(2, S_BCM1480_MC_SLEW)
+#define V_BCM1480_MC_SLEW(x) _SB_MAKEVALUE(x, S_BCM1480_MC_SLEW)
+#define G_BCM1480_MC_SLEW(x) _SB_GETVALUE(x, S_BCM1480_MC_SLEW, M_BCM1480_MC_SLEW)
+
+#define M_BCM1480_MC_SSTL_VOLTAGE _SB_MAKEMASK1(35)
+
+/*
+ * Global Channel Interleave Register (Table 100)
+ */
+
+#define S_BCM1480_MC_INTLV0 0
+#define M_BCM1480_MC_INTLV0 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV0)
+#define V_BCM1480_MC_INTLV0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV0)
+#define G_BCM1480_MC_INTLV0(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV0, M_BCM1480_MC_INTLV0)
+
+#define S_BCM1480_MC_INTLV1 8
+#define M_BCM1480_MC_INTLV1 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV1)
+#define V_BCM1480_MC_INTLV1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV1)
+#define G_BCM1480_MC_INTLV1(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV1, M_BCM1480_MC_INTLV1)
+
+#define S_BCM1480_MC_INTLV_MODE 16
+#define M_BCM1480_MC_INTLV_MODE _SB_MAKEMASK(3, S_BCM1480_MC_INTLV_MODE)
+#define V_BCM1480_MC_INTLV_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV_MODE)
+#define G_BCM1480_MC_INTLV_MODE(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV_MODE, M_BCM1480_MC_INTLV_MODE)
+
+#define K_BCM1480_MC_INTLV_MODE_NONE 0x0
+#define K_BCM1480_MC_INTLV_MODE_01 0x1
+#define K_BCM1480_MC_INTLV_MODE_23 0x2
+#define K_BCM1480_MC_INTLV_MODE_01_23 0x3
+#define K_BCM1480_MC_INTLV_MODE_0123 0x4
+
+#define V_BCM1480_MC_INTLV_MODE_NONE V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_NONE)
+#define V_BCM1480_MC_INTLV_MODE_01 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_01)
+#define V_BCM1480_MC_INTLV_MODE_23 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_23)
+#define V_BCM1480_MC_INTLV_MODE_01_23 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_01_23)
+#define V_BCM1480_MC_INTLV_MODE_0123 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_0123)
+
+/*
+ * ECC Status Register
+ */
+
+#define S_BCM1480_MC_ECC_ERR_ADDR 0
+#define M_BCM1480_MC_ECC_ERR_ADDR _SB_MAKEMASK(37, S_BCM1480_MC_ECC_ERR_ADDR)
+#define V_BCM1480_MC_ECC_ERR_ADDR(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_ERR_ADDR)
+#define G_BCM1480_MC_ECC_ERR_ADDR(x) _SB_GETVALUE(x, S_BCM1480_MC_ECC_ERR_ADDR, M_BCM1480_MC_ECC_ERR_ADDR)
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+#define M_BCM1480_MC_ECC_ERR_RMW _SB_MAKEMASK1(60)
+#endif
+
+#define M_BCM1480_MC_ECC_MULT_ERR_DET _SB_MAKEMASK1(61)
+#define M_BCM1480_MC_ECC_UERR_DET _SB_MAKEMASK1(62)
+#define M_BCM1480_MC_ECC_CERR_DET _SB_MAKEMASK1(63)
+
+/*
+ * Global ECC Address Register (Table 102)
+ */
+
+#define S_BCM1480_MC_ECC_CORR_ADDR 0
+#define M_BCM1480_MC_ECC_CORR_ADDR _SB_MAKEMASK(37, S_BCM1480_MC_ECC_CORR_ADDR)
+#define V_BCM1480_MC_ECC_CORR_ADDR(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_CORR_ADDR)
+#define G_BCM1480_MC_ECC_CORR_ADDR(x) _SB_GETVALUE(x, S_BCM1480_MC_ECC_CORR_ADDR, M_BCM1480_MC_ECC_CORR_ADDR)
+
+/*
+ * Global ECC Correction Register (Table 103)
+ */
+
+#define S_BCM1480_MC_ECC_CORRECT 0
+#define M_BCM1480_MC_ECC_CORRECT _SB_MAKEMASK(64, S_BCM1480_MC_ECC_CORRECT)
+#define V_BCM1480_MC_ECC_CORRECT(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_CORRECT)
+#define G_BCM1480_MC_ECC_CORRECT(x) _SB_GETVALUE(x, S_BCM1480_MC_ECC_CORRECT, M_BCM1480_MC_ECC_CORRECT)
+
+/*
+ * Global ECC Performance Counters Control Register (Table 104)
+ */
+
+#define S_BCM1480_MC_CHANNEL_SELECT 0
+#define M_BCM1480_MC_CHANNEL_SELECT _SB_MAKEMASK(4, S_BCM1480_MC_CHANNEL_SELECT)
+#define V_BCM1480_MC_CHANNEL_SELECT(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CHANNEL_SELECT)
+#define G_BCM1480_MC_CHANNEL_SELECT(x) _SB_GETVALUE(x, S_BCM1480_MC_CHANNEL_SELECT, M_BCM1480_MC_CHANNEL_SELECT)
+#define K_BCM1480_MC_CHANNEL_SELECT_0 0x1
+#define K_BCM1480_MC_CHANNEL_SELECT_1 0x2
+#define K_BCM1480_MC_CHANNEL_SELECT_2 0x4
+#define K_BCM1480_MC_CHANNEL_SELECT_3 0x8
+
+#endif /* _BCM1480_MC_H */
diff --git a/arch/mips/include/asm/sibyte/bcm1480_regs.h b/arch/mips/include/asm/sibyte/bcm1480_regs.h
new file mode 100644
index 000000000..ef12ede26
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/bcm1480_regs.h
@@ -0,0 +1,889 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * BCM1255/BCM1280/BCM1455/BCM1480 Board Support Package
+ *
+ * Register Definitions File: bcm1480_regs.h
+ *
+ * This module contains the addresses of the on-chip peripherals
+ * on the BCM1280 and BCM1480.
+ *
+ * BCM1480 specification level: 1X55_1X80-UM100-D4 (11/24/03)
+ *
+ *********************************************************************
+ *
+ * Copyright 2000,2001,2002,2003
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+#ifndef _BCM1480_REGS_H
+#define _BCM1480_REGS_H
+
+#include <asm/sibyte/sb1250_defs.h>
+
+/* *********************************************************************
+ * Pull in the BCM1250's registers since a great deal of the 1480's
+ * functions are the same as the BCM1250.
+ ********************************************************************* */
+
+#include <asm/sibyte/sb1250_regs.h>
+
+
+/* *********************************************************************
+ * Some general notes:
+ *
+ * Register addresses are grouped by function and follow the order
+ * of the User Manual.
+ *
+ * For the most part, when there is more than one peripheral
+ * of the same type on the SOC, the constants below will be
+ * offsets from the base of each peripheral. For example,
+ * the MAC registers are described as offsets from the first
+ * MAC register, and there will be a MAC_REGISTER() macro
+ * to calculate the base address of a given MAC.
+ *
+ * The information in this file is based on the BCM1X55/BCM1X80
+ * User Manual, Document 1X55_1X80-UM100-R, 22/12/03.
+ *
+ * This file is basically a "what's new" header file. Since the
+ * BCM1250 and the new BCM1480 (and derivatives) share many common
+ * features, this file contains only what's new or changed from
+ * the 1250. (above, you can see that we include the 1250 symbols
+ * to get the base functionality).
+ *
+ * In software, be sure to use the correct symbols, particularly
+ * for blocks that are different between the two chip families.
+ * All BCM1480-specific symbols have _BCM1480_ in their names,
+ * and all BCM1250-specific and "base" functions that are common in
+ * both chips have no special names (this is for compatibility with
+ * older include files). Therefore, if you're working with the
+ * SCD, which is very different on each chip, A_SCD_xxx implies
+ * the BCM1250 version and A_BCM1480_SCD_xxx implies the BCM1480
+ * version.
+ ********************************************************************* */
+
+
+/* *********************************************************************
+ * Memory Controller Registers (Section 6)
+ ********************************************************************* */
+
+#define A_BCM1480_MC_BASE_0 0x0010050000
+#define A_BCM1480_MC_BASE_1 0x0010051000
+#define A_BCM1480_MC_BASE_2 0x0010052000
+#define A_BCM1480_MC_BASE_3 0x0010053000
+#define BCM1480_MC_REGISTER_SPACING 0x1000
+
+#define A_BCM1480_MC_BASE(ctlid) (A_BCM1480_MC_BASE_0+(ctlid)*BCM1480_MC_REGISTER_SPACING)
+#define A_BCM1480_MC_REGISTER(ctlid, reg) (A_BCM1480_MC_BASE(ctlid)+(reg))
+
+#define R_BCM1480_MC_CONFIG 0x0000000100
+#define R_BCM1480_MC_CS_START 0x0000000120
+#define R_BCM1480_MC_CS_END 0x0000000140
+#define S_BCM1480_MC_CS_STARTEND 24
+
+#define R_BCM1480_MC_CS01_ROW0 0x0000000180
+#define R_BCM1480_MC_CS01_ROW1 0x00000001A0
+#define R_BCM1480_MC_CS23_ROW0 0x0000000200
+#define R_BCM1480_MC_CS23_ROW1 0x0000000220
+#define R_BCM1480_MC_CS01_COL0 0x0000000280
+#define R_BCM1480_MC_CS01_COL1 0x00000002A0
+#define R_BCM1480_MC_CS23_COL0 0x0000000300
+#define R_BCM1480_MC_CS23_COL1 0x0000000320
+
+#define R_BCM1480_MC_CSX_BASE 0x0000000180
+#define R_BCM1480_MC_CSX_ROW0 0x0000000000 /* relative to CSX_BASE */
+#define R_BCM1480_MC_CSX_ROW1 0x0000000020 /* relative to CSX_BASE */
+#define R_BCM1480_MC_CSX_COL0 0x0000000100 /* relative to CSX_BASE */
+#define R_BCM1480_MC_CSX_COL1 0x0000000120 /* relative to CSX_BASE */
+#define BCM1480_MC_CSX_SPACING 0x0000000080 /* CS23 relative to CS01 */
+
+#define R_BCM1480_MC_CS01_BA 0x0000000380
+#define R_BCM1480_MC_CS23_BA 0x00000003A0
+#define R_BCM1480_MC_DRAMCMD 0x0000000400
+#define R_BCM1480_MC_DRAMMODE 0x0000000420
+#define R_BCM1480_MC_CLOCK_CFG 0x0000000440
+#define R_BCM1480_MC_MCLK_CFG R_BCM1480_MC_CLOCK_CFG
+#define R_BCM1480_MC_TEST_DATA 0x0000000480
+#define R_BCM1480_MC_TEST_ECC 0x00000004A0
+#define R_BCM1480_MC_TIMING1 0x00000004C0
+#define R_BCM1480_MC_TIMING2 0x00000004E0
+#define R_BCM1480_MC_DLL_CFG 0x0000000500
+#define R_BCM1480_MC_DRIVE_CFG 0x0000000520
+
+#if SIBYTE_HDR_FEATURE(1480, PASS2)
+#define R_BCM1480_MC_ODT 0x0000000460
+#define R_BCM1480_MC_ECC_STATUS 0x0000000540
+#endif
+
+/* Global registers (single instance) */
+#define A_BCM1480_MC_GLB_CONFIG 0x0010054100
+#define A_BCM1480_MC_GLB_INTLV 0x0010054120
+#define A_BCM1480_MC_GLB_ECC_STATUS 0x0010054140
+#define A_BCM1480_MC_GLB_ECC_ADDR 0x0010054160
+#define A_BCM1480_MC_GLB_ECC_CORRECT 0x0010054180
+#define A_BCM1480_MC_GLB_PERF_CNT_CONTROL 0x00100541A0
+
+/* *********************************************************************
+ * L2 Cache Control Registers (Section 5)
+ ********************************************************************* */
+
+#define A_BCM1480_L2_BASE 0x0010040000
+
+#define A_BCM1480_L2_READ_TAG 0x0010040018
+#define A_BCM1480_L2_ECC_TAG 0x0010040038
+#define A_BCM1480_L2_MISC0_VALUE 0x0010040058
+#define A_BCM1480_L2_MISC1_VALUE 0x0010040078
+#define A_BCM1480_L2_MISC2_VALUE 0x0010040098
+#define A_BCM1480_L2_MISC_CONFIG 0x0010040040 /* x040 */
+#define A_BCM1480_L2_CACHE_DISABLE 0x0010040060 /* x060 */
+#define A_BCM1480_L2_MAKECACHEDISABLE(x) (A_BCM1480_L2_CACHE_DISABLE | (((x)&0xF) << 12))
+#define A_BCM1480_L2_WAY_ENABLE_3_0 0x0010040080 /* x080 */
+#define A_BCM1480_L2_WAY_ENABLE_7_4 0x00100400A0 /* x0A0 */
+#define A_BCM1480_L2_MAKE_WAY_ENABLE_LO(x) (A_BCM1480_L2_WAY_ENABLE_3_0 | (((x)&0xF) << 12))
+#define A_BCM1480_L2_MAKE_WAY_ENABLE_HI(x) (A_BCM1480_L2_WAY_ENABLE_7_4 | (((x)&0xF) << 12))
+#define A_BCM1480_L2_MAKE_WAY_DISABLE_LO(x) (A_BCM1480_L2_WAY_ENABLE_3_0 | (((~x)&0xF) << 12))
+#define A_BCM1480_L2_MAKE_WAY_DISABLE_HI(x) (A_BCM1480_L2_WAY_ENABLE_7_4 | (((~x)&0xF) << 12))
+#define A_BCM1480_L2_WAY_LOCAL_3_0 0x0010040100 /* x100 */
+#define A_BCM1480_L2_WAY_LOCAL_7_4 0x0010040120 /* x120 */
+#define A_BCM1480_L2_WAY_REMOTE_3_0 0x0010040140 /* x140 */
+#define A_BCM1480_L2_WAY_REMOTE_7_4 0x0010040160 /* x160 */
+#define A_BCM1480_L2_WAY_AGENT_3_0 0x00100400C0 /* xxC0 */
+#define A_BCM1480_L2_WAY_AGENT_7_4 0x00100400E0 /* xxE0 */
+#define A_BCM1480_L2_WAY_ENABLE(A, banks) (A | (((~(banks))&0x0F) << 8))
+#define A_BCM1480_L2_BANK_BASE 0x00D0300000
+#define A_BCM1480_L2_BANK_ADDRESS(b) (A_BCM1480_L2_BANK_BASE | (((b)&0x7)<<17))
+#define A_BCM1480_L2_MGMT_TAG_BASE 0x00D0000000
+
+
+/* *********************************************************************
+ * PCI-X Interface Registers (Section 7)
+ ********************************************************************* */
+
+#define A_BCM1480_PCI_BASE 0x0010061400
+
+#define A_BCM1480_PCI_RESET 0x0010061400
+#define A_BCM1480_PCI_DLL 0x0010061500
+
+#define A_BCM1480_PCI_TYPE00_HEADER 0x002E000000
+
+/* *********************************************************************
+ * Ethernet MAC Registers (Section 11) and DMA Registers (Section 10.6)
+ ********************************************************************* */
+
+/* No register changes with Rev.C BCM1250, but one additional MAC */
+
+#define A_BCM1480_MAC_BASE_2 0x0010066000
+
+#ifndef A_MAC_BASE_2
+#define A_MAC_BASE_2 A_BCM1480_MAC_BASE_2
+#endif
+
+#define A_BCM1480_MAC_BASE_3 0x0010067000
+#define A_MAC_BASE_3 A_BCM1480_MAC_BASE_3
+
+#define R_BCM1480_MAC_DMA_OODPKTLOST 0x00000038
+
+#ifndef R_MAC_DMA_OODPKTLOST
+#define R_MAC_DMA_OODPKTLOST R_BCM1480_MAC_DMA_OODPKTLOST
+#endif
+
+
+/* *********************************************************************
+ * DUART Registers (Section 14)
+ ********************************************************************* */
+
+/* No significant differences from BCM1250, two DUARTs */
+
+/* Conventions, per user manual:
+ * DUART generic, channels A,B,C,D
+ * DUART0 implementing channels A,B
+ * DUART1 inplementing channels C,D
+ */
+
+#define BCM1480_DUART_NUM_PORTS 4
+
+#define A_BCM1480_DUART0 0x0010060000
+#define A_BCM1480_DUART1 0x0010060400
+#define A_BCM1480_DUART(chan) ((((chan)&2) == 0)? A_BCM1480_DUART0 : A_BCM1480_DUART1)
+
+#define BCM1480_DUART_CHANREG_SPACING 0x100
+#define A_BCM1480_DUART_CHANREG(chan, reg) \
+ (A_BCM1480_DUART(chan) + \
+ BCM1480_DUART_CHANREG_SPACING * (((chan) & 1) + 1) + (reg))
+#define A_BCM1480_DUART_CTRLREG(chan, reg) \
+ (A_BCM1480_DUART(chan) + \
+ BCM1480_DUART_CHANREG_SPACING * 3 + (reg))
+
+#define DUART_IMRISR_SPACING 0x20
+#define DUART_INCHNG_SPACING 0x10
+
+#define R_BCM1480_DUART_IMRREG(chan) \
+ (R_DUART_IMR_A + ((chan) & 1) * DUART_IMRISR_SPACING)
+#define R_BCM1480_DUART_ISRREG(chan) \
+ (R_DUART_ISR_A + ((chan) & 1) * DUART_IMRISR_SPACING)
+#define R_BCM1480_DUART_INCHREG(chan) \
+ (R_DUART_IN_CHNG_A + ((chan) & 1) * DUART_INCHNG_SPACING)
+
+#define A_BCM1480_DUART_IMRREG(chan) \
+ (A_BCM1480_DUART_CTRLREG((chan), R_BCM1480_DUART_IMRREG(chan)))
+#define A_BCM1480_DUART_ISRREG(chan) \
+ (A_BCM1480_DUART_CTRLREG((chan), R_BCM1480_DUART_ISRREG(chan)))
+
+#define A_BCM1480_DUART_IN_PORT(chan) \
+ (A_BCM1480_DUART_CTRLREG((chan), R_DUART_IN_PORT))
+
+/*
+ * These constants are the absolute addresses.
+ */
+
+#define A_BCM1480_DUART_MODE_REG_1_C 0x0010060400
+#define A_BCM1480_DUART_MODE_REG_2_C 0x0010060410
+#define A_BCM1480_DUART_STATUS_C 0x0010060420
+#define A_BCM1480_DUART_CLK_SEL_C 0x0010060430
+#define A_BCM1480_DUART_FULL_CTL_C 0x0010060440
+#define A_BCM1480_DUART_CMD_C 0x0010060450
+#define A_BCM1480_DUART_RX_HOLD_C 0x0010060460
+#define A_BCM1480_DUART_TX_HOLD_C 0x0010060470
+#define A_BCM1480_DUART_OPCR_C 0x0010060480
+#define A_BCM1480_DUART_AUX_CTRL_C 0x0010060490
+
+#define A_BCM1480_DUART_MODE_REG_1_D 0x0010060500
+#define A_BCM1480_DUART_MODE_REG_2_D 0x0010060510
+#define A_BCM1480_DUART_STATUS_D 0x0010060520
+#define A_BCM1480_DUART_CLK_SEL_D 0x0010060530
+#define A_BCM1480_DUART_FULL_CTL_D 0x0010060540
+#define A_BCM1480_DUART_CMD_D 0x0010060550
+#define A_BCM1480_DUART_RX_HOLD_D 0x0010060560
+#define A_BCM1480_DUART_TX_HOLD_D 0x0010060570
+#define A_BCM1480_DUART_OPCR_D 0x0010060580
+#define A_BCM1480_DUART_AUX_CTRL_D 0x0010060590
+
+#define A_BCM1480_DUART_INPORT_CHNG_CD 0x0010060600
+#define A_BCM1480_DUART_AUX_CTRL_CD 0x0010060610
+#define A_BCM1480_DUART_ISR_C 0x0010060620
+#define A_BCM1480_DUART_IMR_C 0x0010060630
+#define A_BCM1480_DUART_ISR_D 0x0010060640
+#define A_BCM1480_DUART_IMR_D 0x0010060650
+#define A_BCM1480_DUART_OUT_PORT_CD 0x0010060660
+#define A_BCM1480_DUART_OPCR_CD 0x0010060670
+#define A_BCM1480_DUART_IN_PORT_CD 0x0010060680
+#define A_BCM1480_DUART_ISR_CD 0x0010060690
+#define A_BCM1480_DUART_IMR_CD 0x00100606A0
+#define A_BCM1480_DUART_SET_OPR_CD 0x00100606B0
+#define A_BCM1480_DUART_CLEAR_OPR_CD 0x00100606C0
+#define A_BCM1480_DUART_INPORT_CHNG_C 0x00100606D0
+#define A_BCM1480_DUART_INPORT_CHNG_D 0x00100606E0
+
+
+/* *********************************************************************
+ * Generic Bus Registers (Section 15) and PCMCIA Registers (Section 16)
+ ********************************************************************* */
+
+#define A_BCM1480_IO_PCMCIA_CFG_B 0x0010061A58
+#define A_BCM1480_IO_PCMCIA_STATUS_B 0x0010061A68
+
+/* *********************************************************************
+ * GPIO Registers (Section 17)
+ ********************************************************************* */
+
+/* One additional GPIO register, placed _before_ the BCM1250's GPIO block base */
+
+#define A_BCM1480_GPIO_INT_ADD_TYPE 0x0010061A78
+#define R_BCM1480_GPIO_INT_ADD_TYPE (-8)
+
+#define A_GPIO_INT_ADD_TYPE A_BCM1480_GPIO_INT_ADD_TYPE
+#define R_GPIO_INT_ADD_TYPE R_BCM1480_GPIO_INT_ADD_TYPE
+
+/* *********************************************************************
+ * SMBus Registers (Section 18)
+ ********************************************************************* */
+
+/* No changes from BCM1250 */
+
+/* *********************************************************************
+ * Timer Registers (Sections 4.6)
+ ********************************************************************* */
+
+/* BCM1480 has two additional watchdogs */
+
+/* Watchdog timers */
+
+#define A_BCM1480_SCD_WDOG_2 0x0010022050
+#define A_BCM1480_SCD_WDOG_3 0x0010022150
+
+#define BCM1480_SCD_NUM_WDOGS 4
+
+#define A_BCM1480_SCD_WDOG_BASE(w) (A_BCM1480_SCD_WDOG_0+((w)&2)*0x1000 + ((w)&1)*0x100)
+#define A_BCM1480_SCD_WDOG_REGISTER(w, r) (A_BCM1480_SCD_WDOG_BASE(w) + (r))
+
+#define A_BCM1480_SCD_WDOG_INIT_2 0x0010022050
+#define A_BCM1480_SCD_WDOG_CNT_2 0x0010022058
+#define A_BCM1480_SCD_WDOG_CFG_2 0x0010022060
+
+#define A_BCM1480_SCD_WDOG_INIT_3 0x0010022150
+#define A_BCM1480_SCD_WDOG_CNT_3 0x0010022158
+#define A_BCM1480_SCD_WDOG_CFG_3 0x0010022160
+
+/* BCM1480 has two additional compare registers */
+
+#define A_BCM1480_SCD_ZBBUS_CYCLE_COUNT A_SCD_ZBBUS_CYCLE_COUNT
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP_BASE 0x0010020C00
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP0 A_SCD_ZBBUS_CYCLE_CP0
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP1 A_SCD_ZBBUS_CYCLE_CP1
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP2 0x0010020C10
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP3 0x0010020C18
+
+/* *********************************************************************
+ * System Control Registers (Section 4.2)
+ ********************************************************************* */
+
+/* Scratch register in different place */
+
+#define A_BCM1480_SCD_SCRATCH 0x100200A0
+
+/* *********************************************************************
+ * System Address Trap Registers (Section 4.9)
+ ********************************************************************* */
+
+/* No changes from BCM1250 */
+
+/* *********************************************************************
+ * System Interrupt Mapper Registers (Sections 4.3-4.5)
+ ********************************************************************* */
+
+#define A_BCM1480_IMR_CPU0_BASE 0x0010020000
+#define A_BCM1480_IMR_CPU1_BASE 0x0010022000
+#define A_BCM1480_IMR_CPU2_BASE 0x0010024000
+#define A_BCM1480_IMR_CPU3_BASE 0x0010026000
+#define BCM1480_IMR_REGISTER_SPACING 0x2000
+#define BCM1480_IMR_REGISTER_SPACING_SHIFT 13
+
+#define A_BCM1480_IMR_MAPPER(cpu) (A_BCM1480_IMR_CPU0_BASE+(cpu)*BCM1480_IMR_REGISTER_SPACING)
+#define A_BCM1480_IMR_REGISTER(cpu, reg) (A_BCM1480_IMR_MAPPER(cpu)+(reg))
+
+/* Most IMR registers are 128 bits, implemented as non-contiguous
+ 64-bit registers high (_H) and low (_L) */
+#define BCM1480_IMR_HL_SPACING 0x1000
+
+#define R_BCM1480_IMR_INTERRUPT_DIAG_H 0x0010
+#define R_BCM1480_IMR_LDT_INTERRUPT_H 0x0018
+#define R_BCM1480_IMR_LDT_INTERRUPT_CLR_H 0x0020
+#define R_BCM1480_IMR_INTERRUPT_MASK_H 0x0028
+#define R_BCM1480_IMR_INTERRUPT_TRACE_H 0x0038
+#define R_BCM1480_IMR_INTERRUPT_SOURCE_STATUS_H 0x0040
+#define R_BCM1480_IMR_LDT_INTERRUPT_SET 0x0048
+#define R_BCM1480_IMR_MAILBOX_0_CPU 0x00C0
+#define R_BCM1480_IMR_MAILBOX_0_SET_CPU 0x00C8
+#define R_BCM1480_IMR_MAILBOX_0_CLR_CPU 0x00D0
+#define R_BCM1480_IMR_MAILBOX_1_CPU 0x00E0
+#define R_BCM1480_IMR_MAILBOX_1_SET_CPU 0x00E8
+#define R_BCM1480_IMR_MAILBOX_1_CLR_CPU 0x00F0
+#define R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H 0x0100
+#define BCM1480_IMR_INTERRUPT_STATUS_COUNT 8
+#define R_BCM1480_IMR_INTERRUPT_MAP_BASE_H 0x0200
+#define BCM1480_IMR_INTERRUPT_MAP_COUNT 64
+
+#define R_BCM1480_IMR_INTERRUPT_DIAG_L 0x1010
+#define R_BCM1480_IMR_LDT_INTERRUPT_L 0x1018
+#define R_BCM1480_IMR_LDT_INTERRUPT_CLR_L 0x1020
+#define R_BCM1480_IMR_INTERRUPT_MASK_L 0x1028
+#define R_BCM1480_IMR_INTERRUPT_TRACE_L 0x1038
+#define R_BCM1480_IMR_INTERRUPT_SOURCE_STATUS_L 0x1040
+#define R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L 0x1100
+#define R_BCM1480_IMR_INTERRUPT_MAP_BASE_L 0x1200
+
+#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU0_BASE 0x0010028000
+#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU1_BASE 0x0010028100
+#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU2_BASE 0x0010028200
+#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU3_BASE 0x0010028300
+#define BCM1480_IMR_ALIAS_MAILBOX_SPACING 0100
+
+#define A_BCM1480_IMR_ALIAS_MAILBOX(cpu) (A_BCM1480_IMR_ALIAS_MAILBOX_CPU0_BASE + \
+ (cpu)*BCM1480_IMR_ALIAS_MAILBOX_SPACING)
+#define A_BCM1480_IMR_ALIAS_MAILBOX_REGISTER(cpu, reg) (A_BCM1480_IMR_ALIAS_MAILBOX(cpu)+(reg))
+
+#define R_BCM1480_IMR_ALIAS_MAILBOX_0 0x0000
+#define R_BCM1480_IMR_ALIAS_MAILBOX_0_SET 0x0008
+
+/*
+ * these macros work together to build the address of a mailbox
+ * register, e.g., A_BCM1480_MAILBOX_REGISTER(0,R_BCM1480_IMR_MAILBOX_SET,2)
+ * for mbox_0_set_cpu2 returns 0x00100240C8
+ */
+#define R_BCM1480_IMR_MAILBOX_CPU 0x00
+#define R_BCM1480_IMR_MAILBOX_SET 0x08
+#define R_BCM1480_IMR_MAILBOX_CLR 0x10
+#define R_BCM1480_IMR_MAILBOX_NUM_SPACING 0x20
+#define A_BCM1480_MAILBOX_REGISTER(num, reg, cpu) \
+ (A_BCM1480_IMR_CPU0_BASE + \
+ (num * R_BCM1480_IMR_MAILBOX_NUM_SPACING) + \
+ (cpu * BCM1480_IMR_REGISTER_SPACING) + \
+ (R_BCM1480_IMR_MAILBOX_0_CPU + reg))
+
+/* *********************************************************************
+ * System Performance Counter Registers (Section 4.7)
+ ********************************************************************* */
+
+/* BCM1480 has four more performance counter registers, and two control
+ registers. */
+
+#define A_BCM1480_SCD_PERF_CNT_BASE 0x00100204C0
+
+#define A_BCM1480_SCD_PERF_CNT_CFG0 0x00100204C0
+#define A_BCM1480_SCD_PERF_CNT_CFG_0 A_BCM1480_SCD_PERF_CNT_CFG0
+#define A_BCM1480_SCD_PERF_CNT_CFG1 0x00100204C8
+#define A_BCM1480_SCD_PERF_CNT_CFG_1 A_BCM1480_SCD_PERF_CNT_CFG1
+
+#define A_BCM1480_SCD_PERF_CNT_0 A_SCD_PERF_CNT_0
+#define A_BCM1480_SCD_PERF_CNT_1 A_SCD_PERF_CNT_1
+#define A_BCM1480_SCD_PERF_CNT_2 A_SCD_PERF_CNT_2
+#define A_BCM1480_SCD_PERF_CNT_3 A_SCD_PERF_CNT_3
+
+#define A_BCM1480_SCD_PERF_CNT_4 0x00100204F0
+#define A_BCM1480_SCD_PERF_CNT_5 0x00100204F8
+#define A_BCM1480_SCD_PERF_CNT_6 0x0010020500
+#define A_BCM1480_SCD_PERF_CNT_7 0x0010020508
+
+#define BCM1480_SCD_NUM_PERF_CNT 8
+#define BCM1480_SCD_PERF_CNT_SPACING 8
+#define A_BCM1480_SCD_PERF_CNT(n) (A_SCD_PERF_CNT_0+(n*BCM1480_SCD_PERF_CNT_SPACING))
+
+/* *********************************************************************
+ * System Bus Watcher Registers (Section 4.8)
+ ********************************************************************* */
+
+
+/* Same as 1250 except BUS_ERR_STATUS_DEBUG is in a different place. */
+
+#define A_BCM1480_BUS_ERR_STATUS_DEBUG 0x00100208D8
+
+/* *********************************************************************
+ * System Debug Controller Registers (Section 19)
+ ********************************************************************* */
+
+/* Same as 1250 */
+
+/* *********************************************************************
+ * System Trace Unit Registers (Sections 4.10)
+ ********************************************************************* */
+
+/* Same as 1250 */
+
+/* *********************************************************************
+ * Data Mover DMA Registers (Section 10.7)
+ ********************************************************************* */
+
+/* Same as 1250 */
+
+
+/* *********************************************************************
+ * HyperTransport Interface Registers (Section 8)
+ ********************************************************************* */
+
+#define BCM1480_HT_NUM_PORTS 3
+#define BCM1480_HT_PORT_SPACING 0x800
+#define A_BCM1480_HT_PORT_HEADER(x) (A_BCM1480_HT_PORT0_HEADER + ((x)*BCM1480_HT_PORT_SPACING))
+
+#define A_BCM1480_HT_PORT0_HEADER 0x00FE000000
+#define A_BCM1480_HT_PORT1_HEADER 0x00FE000800
+#define A_BCM1480_HT_PORT2_HEADER 0x00FE001000
+#define A_BCM1480_HT_TYPE00_HEADER 0x00FE002000
+
+
+/* *********************************************************************
+ * Node Controller Registers (Section 9)
+ ********************************************************************* */
+
+#define A_BCM1480_NC_BASE 0x00DFBD0000
+
+#define A_BCM1480_NC_RLD_FIELD 0x00DFBD0000
+#define A_BCM1480_NC_RLD_TRIGGER 0x00DFBD0020
+#define A_BCM1480_NC_RLD_BAD_ERROR 0x00DFBD0040
+#define A_BCM1480_NC_RLD_COR_ERROR 0x00DFBD0060
+#define A_BCM1480_NC_RLD_ECC_STATUS 0x00DFBD0080
+#define A_BCM1480_NC_RLD_WAY_ENABLE 0x00DFBD00A0
+#define A_BCM1480_NC_RLD_RANDOM_LFSR 0x00DFBD00C0
+
+#define A_BCM1480_NC_INTERRUPT_STATUS 0x00DFBD00E0
+#define A_BCM1480_NC_INTERRUPT_ENABLE 0x00DFBD0100
+#define A_BCM1480_NC_TIMEOUT_COUNTER 0x00DFBD0120
+#define A_BCM1480_NC_TIMEOUT_COUNTER_SEL 0x00DFBD0140
+
+#define A_BCM1480_NC_CREDIT_STATUS_REG0 0x00DFBD0200
+#define A_BCM1480_NC_CREDIT_STATUS_REG1 0x00DFBD0220
+#define A_BCM1480_NC_CREDIT_STATUS_REG2 0x00DFBD0240
+#define A_BCM1480_NC_CREDIT_STATUS_REG3 0x00DFBD0260
+#define A_BCM1480_NC_CREDIT_STATUS_REG4 0x00DFBD0280
+#define A_BCM1480_NC_CREDIT_STATUS_REG5 0x00DFBD02A0
+#define A_BCM1480_NC_CREDIT_STATUS_REG6 0x00DFBD02C0
+#define A_BCM1480_NC_CREDIT_STATUS_REG7 0x00DFBD02E0
+#define A_BCM1480_NC_CREDIT_STATUS_REG8 0x00DFBD0300
+#define A_BCM1480_NC_CREDIT_STATUS_REG9 0x00DFBD0320
+#define A_BCM1480_NC_CREDIT_STATUS_REG10 0x00DFBE0000
+#define A_BCM1480_NC_CREDIT_STATUS_REG11 0x00DFBE0020
+#define A_BCM1480_NC_CREDIT_STATUS_REG12 0x00DFBE0040
+
+#define A_BCM1480_NC_SR_TIMEOUT_COUNTER 0x00DFBE0060
+#define A_BCM1480_NC_SR_TIMEOUT_COUNTER_SEL 0x00DFBE0080
+
+
+/* *********************************************************************
+ * H&R Block Configuration Registers (Section 12.4)
+ ********************************************************************* */
+
+#define A_BCM1480_HR_BASE_0 0x00DF820000
+#define A_BCM1480_HR_BASE_1 0x00DF8A0000
+#define A_BCM1480_HR_BASE_2 0x00DF920000
+#define BCM1480_HR_REGISTER_SPACING 0x80000
+
+#define A_BCM1480_HR_BASE(idx) (A_BCM1480_HR_BASE_0 + ((idx)*BCM1480_HR_REGISTER_SPACING))
+#define A_BCM1480_HR_REGISTER(idx, reg) (A_BCM1480_HR_BASE(idx) + (reg))
+
+#define R_BCM1480_HR_CFG 0x0000000000
+
+#define R_BCM1480_HR_MAPPING 0x0000010010
+
+#define BCM1480_HR_RULE_SPACING 0x0000000010
+#define BCM1480_HR_NUM_RULES 16
+#define BCM1480_HR_OP_OFFSET 0x0000000100
+#define BCM1480_HR_TYPE_OFFSET 0x0000000108
+#define R_BCM1480_HR_RULE_OP(idx) (BCM1480_HR_OP_OFFSET + ((idx)*BCM1480_HR_RULE_SPACING))
+#define R_BCM1480_HR_RULE_TYPE(idx) (BCM1480_HR_TYPE_OFFSET + ((idx)*BCM1480_HR_RULE_SPACING))
+
+#define BCM1480_HR_LEAF_SPACING 0x0000000010
+#define BCM1480_HR_NUM_LEAVES 10
+#define BCM1480_HR_LEAF_OFFSET 0x0000000300
+#define R_BCM1480_HR_HA_LEAF0(idx) (BCM1480_HR_LEAF_OFFSET + ((idx)*BCM1480_HR_LEAF_SPACING))
+
+#define R_BCM1480_HR_EX_LEAF0 0x00000003A0
+
+#define BCM1480_HR_PATH_SPACING 0x0000000010
+#define BCM1480_HR_NUM_PATHS 16
+#define BCM1480_HR_PATH_OFFSET 0x0000000600
+#define R_BCM1480_HR_PATH(idx) (BCM1480_HR_PATH_OFFSET + ((idx)*BCM1480_HR_PATH_SPACING))
+
+#define R_BCM1480_HR_PATH_DEFAULT 0x0000000700
+
+#define BCM1480_HR_ROUTE_SPACING 8
+#define BCM1480_HR_NUM_ROUTES 512
+#define BCM1480_HR_ROUTE_OFFSET 0x0000001000
+#define R_BCM1480_HR_RT_WORD(idx) (BCM1480_HR_ROUTE_OFFSET + ((idx)*BCM1480_HR_ROUTE_SPACING))
+
+
+/* checked to here - ehs */
+/* *********************************************************************
+ * Packet Manager DMA Registers (Section 12.5)
+ ********************************************************************* */
+
+#define A_BCM1480_PM_BASE 0x0010056000
+
+#define A_BCM1480_PMI_LCL_0 0x0010058000
+#define A_BCM1480_PMO_LCL_0 0x001005C000
+#define A_BCM1480_PMI_OFFSET_0 (A_BCM1480_PMI_LCL_0 - A_BCM1480_PM_BASE)
+#define A_BCM1480_PMO_OFFSET_0 (A_BCM1480_PMO_LCL_0 - A_BCM1480_PM_BASE)
+
+#define BCM1480_PM_LCL_REGISTER_SPACING 0x100
+#define BCM1480_PM_NUM_CHANNELS 32
+
+#define A_BCM1480_PMI_LCL_BASE(idx) (A_BCM1480_PMI_LCL_0 + ((idx)*BCM1480_PM_LCL_REGISTER_SPACING))
+#define A_BCM1480_PMI_LCL_REGISTER(idx, reg) (A_BCM1480_PMI_LCL_BASE(idx) + (reg))
+#define A_BCM1480_PMO_LCL_BASE(idx) (A_BCM1480_PMO_LCL_0 + ((idx)*BCM1480_PM_LCL_REGISTER_SPACING))
+#define A_BCM1480_PMO_LCL_REGISTER(idx, reg) (A_BCM1480_PMO_LCL_BASE(idx) + (reg))
+
+#define BCM1480_PM_INT_PACKING 8
+#define BCM1480_PM_INT_FUNCTION_SPACING 0x40
+#define BCM1480_PM_INT_NUM_FUNCTIONS 3
+
+/*
+ * DMA channel registers relative to A_BCM1480_PMI_LCL_BASE(n) and A_BCM1480_PMO_LCL_BASE(n)
+ */
+
+#define R_BCM1480_PM_BASE_SIZE 0x0000000000
+#define R_BCM1480_PM_CNT 0x0000000008
+#define R_BCM1480_PM_PFCNT 0x0000000010
+#define R_BCM1480_PM_LAST 0x0000000018
+#define R_BCM1480_PM_PFINDX 0x0000000020
+#define R_BCM1480_PM_INT_WMK 0x0000000028
+#define R_BCM1480_PM_CONFIG0 0x0000000030
+#define R_BCM1480_PM_LOCALDEBUG 0x0000000078
+#define R_BCM1480_PM_CACHEABILITY 0x0000000080 /* PMI only */
+#define R_BCM1480_PM_INT_CNFG 0x0000000088
+#define R_BCM1480_PM_DESC_MERGE_TIMER 0x0000000090
+#define R_BCM1480_PM_LOCALDEBUG_PIB 0x00000000F8 /* PMI only */
+#define R_BCM1480_PM_LOCALDEBUG_POB 0x00000000F8 /* PMO only */
+
+/*
+ * Global Registers (Not Channelized)
+ */
+
+#define A_BCM1480_PMI_GLB_0 0x0010056000
+#define A_BCM1480_PMO_GLB_0 0x0010057000
+
+/*
+ * PM to TX Mapping Register relative to A_BCM1480_PMI_GLB_0 and A_BCM1480_PMO_GLB_0
+ */
+
+#define R_BCM1480_PM_PMO_MAPPING 0x00000008C8 /* PMO only */
+
+#define A_BCM1480_PM_PMO_MAPPING (A_BCM1480_PMO_GLB_0 + R_BCM1480_PM_PMO_MAPPING)
+
+/*
+ * Interrupt mapping registers
+ */
+
+
+#define A_BCM1480_PMI_INT_0 0x0010056800
+#define A_BCM1480_PMI_INT(q) (A_BCM1480_PMI_INT_0 + ((q>>8)<<8))
+#define A_BCM1480_PMI_INT_OFFSET_0 (A_BCM1480_PMI_INT_0 - A_BCM1480_PM_BASE)
+#define A_BCM1480_PMO_INT_0 0x0010057800
+#define A_BCM1480_PMO_INT(q) (A_BCM1480_PMO_INT_0 + ((q>>8)<<8))
+#define A_BCM1480_PMO_INT_OFFSET_0 (A_BCM1480_PMO_INT_0 - A_BCM1480_PM_BASE)
+
+/*
+ * Interrupt registers relative to A_BCM1480_PMI_INT_0 and A_BCM1480_PMO_INT_0
+ */
+
+#define R_BCM1480_PM_INT_ST 0x0000000000
+#define R_BCM1480_PM_INT_MSK 0x0000000040
+#define R_BCM1480_PM_INT_CLR 0x0000000080
+#define R_BCM1480_PM_MRGD_INT 0x00000000C0
+
+/*
+ * Debug registers (global)
+ */
+
+#define A_BCM1480_PM_GLOBALDEBUGMODE_PMI 0x0010056000
+#define A_BCM1480_PM_GLOBALDEBUG_PID 0x00100567F8
+#define A_BCM1480_PM_GLOBALDEBUG_PIB 0x0010056FF8
+#define A_BCM1480_PM_GLOBALDEBUGMODE_PMO 0x0010057000
+#define A_BCM1480_PM_GLOBALDEBUG_POD 0x00100577F8
+#define A_BCM1480_PM_GLOBALDEBUG_POB 0x0010057FF8
+
+/* *********************************************************************
+ * Switch performance counters
+ ********************************************************************* */
+
+#define A_BCM1480_SWPERF_CFG 0xdfb91800
+#define A_BCM1480_SWPERF_CNT0 0xdfb91880
+#define A_BCM1480_SWPERF_CNT1 0xdfb91888
+#define A_BCM1480_SWPERF_CNT2 0xdfb91890
+#define A_BCM1480_SWPERF_CNT3 0xdfb91898
+
+
+/* *********************************************************************
+ * Switch Trace Unit
+ ********************************************************************* */
+
+#define A_BCM1480_SWTRC_MATCH_CONTROL_0 0xDFB91000
+#define A_BCM1480_SWTRC_MATCH_DATA_VALUE_0 0xDFB91100
+#define A_BCM1480_SWTRC_MATCH_DATA_MASK_0 0xDFB91108
+#define A_BCM1480_SWTRC_MATCH_TAG_VALUE_0 0xDFB91200
+#define A_BCM1480_SWTRC_MATCH_TAG_MAKS_0 0xDFB91208
+#define A_BCM1480_SWTRC_EVENT_0 0xDFB91300
+#define A_BCM1480_SWTRC_SEQUENCE_0 0xDFB91400
+
+#define A_BCM1480_SWTRC_CFG 0xDFB91500
+#define A_BCM1480_SWTRC_READ 0xDFB91508
+
+#define A_BCM1480_SWDEBUG_SCHEDSTOP 0xDFB92000
+
+#define A_BCM1480_SWTRC_MATCH_CONTROL(x) (A_BCM1480_SWTRC_MATCH_CONTROL_0 + ((x)*8))
+#define A_BCM1480_SWTRC_EVENT(x) (A_BCM1480_SWTRC_EVENT_0 + ((x)*8))
+#define A_BCM1480_SWTRC_SEQUENCE(x) (A_BCM1480_SWTRC_SEQUENCE_0 + ((x)*8))
+
+#define A_BCM1480_SWTRC_MATCH_DATA_VALUE(x) (A_BCM1480_SWTRC_MATCH_DATA_VALUE_0 + ((x)*16))
+#define A_BCM1480_SWTRC_MATCH_DATA_MASK(x) (A_BCM1480_SWTRC_MATCH_DATA_MASK_0 + ((x)*16))
+#define A_BCM1480_SWTRC_MATCH_TAG_VALUE(x) (A_BCM1480_SWTRC_MATCH_TAG_VALUE_0 + ((x)*16))
+#define A_BCM1480_SWTRC_MATCH_TAG_MASK(x) (A_BCM1480_SWTRC_MATCH_TAG_MASK_0 + ((x)*16))
+
+
+
+/* *********************************************************************
+ * High-Speed Port Registers (Section 13)
+ ********************************************************************* */
+
+#define A_BCM1480_HSP_BASE_0 0x00DF810000
+#define A_BCM1480_HSP_BASE_1 0x00DF890000
+#define A_BCM1480_HSP_BASE_2 0x00DF910000
+#define BCM1480_HSP_REGISTER_SPACING 0x80000
+
+#define A_BCM1480_HSP_BASE(idx) (A_BCM1480_HSP_BASE_0 + ((idx)*BCM1480_HSP_REGISTER_SPACING))
+#define A_BCM1480_HSP_REGISTER(idx, reg) (A_BCM1480_HSP_BASE(idx) + (reg))
+
+#define R_BCM1480_HSP_RX_SPI4_CFG_0 0x0000000000
+#define R_BCM1480_HSP_RX_SPI4_CFG_1 0x0000000008
+#define R_BCM1480_HSP_RX_SPI4_DESKEW_OVERRIDE 0x0000000010
+#define R_BCM1480_HSP_RX_SPI4_DESKEW_DATAPATH 0x0000000018
+#define R_BCM1480_HSP_RX_SPI4_PORT_INT_EN 0x0000000020
+#define R_BCM1480_HSP_RX_SPI4_PORT_INT_STATUS 0x0000000028
+
+#define R_BCM1480_HSP_RX_SPI4_CALENDAR_0 0x0000000200
+#define R_BCM1480_HSP_RX_SPI4_CALENDAR_1 0x0000000208
+
+#define R_BCM1480_HSP_RX_PLL_CNFG 0x0000000800
+#define R_BCM1480_HSP_RX_CALIBRATION 0x0000000808
+#define R_BCM1480_HSP_RX_TEST 0x0000000810
+#define R_BCM1480_HSP_RX_DIAG_DETAILS 0x0000000818
+#define R_BCM1480_HSP_RX_DIAG_CRC_0 0x0000000820
+#define R_BCM1480_HSP_RX_DIAG_CRC_1 0x0000000828
+#define R_BCM1480_HSP_RX_DIAG_HTCMD 0x0000000830
+#define R_BCM1480_HSP_RX_DIAG_PKTCTL 0x0000000838
+
+#define R_BCM1480_HSP_RX_VIS_FLCTRL_COUNTER 0x0000000870
+
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_0 0x0000020020
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_1 0x0000020028
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_2 0x0000020030
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_3 0x0000020038
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_4 0x0000020040
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_5 0x0000020048
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_6 0x0000020050
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_7 0x0000020058
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC(idx) (R_BCM1480_HSP_RX_PKT_RAMALLOC_0 + 8*(idx))
+
+/* XXX Following registers were shuffled. Renamed/renumbered per errata. */
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_0 0x0000020078
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_1 0x0000020080
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_2 0x0000020088
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_3 0x0000020090
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_4 0x0000020098
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_5 0x00000200A0
+
+#define R_BCM1480_HSP_RX_SPI_WATERMARK_0 0x00000200B0
+#define R_BCM1480_HSP_RX_SPI_WATERMARK_1 0x00000200B8
+#define R_BCM1480_HSP_RX_SPI_WATERMARK_2 0x00000200C0
+#define R_BCM1480_HSP_RX_SPI_WATERMARK_3 0x00000200C8
+#define R_BCM1480_HSP_RX_SPI_WATERMARK_4 0x00000200D0
+#define R_BCM1480_HSP_RX_SPI_WATERMARK_5 0x00000200D8
+#define R_BCM1480_HSP_RX_SPI_WATERMARK_6 0x00000200E0
+#define R_BCM1480_HSP_RX_SPI_WATERMARK_7 0x00000200E8
+#define R_BCM1480_HSP_RX_SPI_WATERMARK(idx) (R_BCM1480_HSP_RX_SPI_WATERMARK_0 + 8*(idx))
+
+#define R_BCM1480_HSP_RX_VIS_CMDQ_0 0x00000200F0
+#define R_BCM1480_HSP_RX_VIS_CMDQ_1 0x00000200F8
+#define R_BCM1480_HSP_RX_VIS_CMDQ_2 0x0000020100
+#define R_BCM1480_HSP_RX_RAM_READCTL 0x0000020108
+#define R_BCM1480_HSP_RX_RAM_READWINDOW 0x0000020110
+#define R_BCM1480_HSP_RX_RF_READCTL 0x0000020118
+#define R_BCM1480_HSP_RX_RF_READWINDOW 0x0000020120
+
+#define R_BCM1480_HSP_TX_SPI4_CFG_0 0x0000040000
+#define R_BCM1480_HSP_TX_SPI4_CFG_1 0x0000040008
+#define R_BCM1480_HSP_TX_SPI4_TRAINING_FMT 0x0000040010
+
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_0 0x0000040020
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_1 0x0000040028
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_2 0x0000040030
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_3 0x0000040038
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_4 0x0000040040
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_5 0x0000040048
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_6 0x0000040050
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_7 0x0000040058
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC(idx) (R_BCM1480_HSP_TX_PKT_RAMALLOC_0 + 8*(idx))
+#define R_BCM1480_HSP_TX_NPC_RAMALLOC 0x0000040078
+#define R_BCM1480_HSP_TX_RSP_RAMALLOC 0x0000040080
+#define R_BCM1480_HSP_TX_PC_RAMALLOC 0x0000040088
+#define R_BCM1480_HSP_TX_HTCC_RAMALLOC_0 0x0000040090
+#define R_BCM1480_HSP_TX_HTCC_RAMALLOC_1 0x0000040098
+#define R_BCM1480_HSP_TX_HTCC_RAMALLOC_2 0x00000400A0
+
+#define R_BCM1480_HSP_TX_PKT_RXPHITCNT_0 0x00000400B0
+#define R_BCM1480_HSP_TX_PKT_RXPHITCNT_1 0x00000400B8
+#define R_BCM1480_HSP_TX_PKT_RXPHITCNT_2 0x00000400C0
+#define R_BCM1480_HSP_TX_PKT_RXPHITCNT_3 0x00000400C8
+#define R_BCM1480_HSP_TX_PKT_RXPHITCNT(idx) (R_BCM1480_HSP_TX_PKT_RXPHITCNT_0 + 8*(idx))
+#define R_BCM1480_HSP_TX_HTIO_RXPHITCNT 0x00000400D0
+#define R_BCM1480_HSP_TX_HTCC_RXPHITCNT 0x00000400D8
+
+#define R_BCM1480_HSP_TX_PKT_TXPHITCNT_0 0x00000400E0
+#define R_BCM1480_HSP_TX_PKT_TXPHITCNT_1 0x00000400E8
+#define R_BCM1480_HSP_TX_PKT_TXPHITCNT_2 0x00000400F0
+#define R_BCM1480_HSP_TX_PKT_TXPHITCNT_3 0x00000400F8
+#define R_BCM1480_HSP_TX_PKT_TXPHITCNT(idx) (R_BCM1480_HSP_TX_PKT_TXPHITCNT_0 + 8*(idx))
+#define R_BCM1480_HSP_TX_HTIO_TXPHITCNT 0x0000040100
+#define R_BCM1480_HSP_TX_HTCC_TXPHITCNT 0x0000040108
+
+#define R_BCM1480_HSP_TX_SPI4_CALENDAR_0 0x0000040200
+#define R_BCM1480_HSP_TX_SPI4_CALENDAR_1 0x0000040208
+
+#define R_BCM1480_HSP_TX_PLL_CNFG 0x0000040800
+#define R_BCM1480_HSP_TX_CALIBRATION 0x0000040808
+#define R_BCM1480_HSP_TX_TEST 0x0000040810
+
+#define R_BCM1480_HSP_TX_VIS_CMDQ_0 0x0000040840
+#define R_BCM1480_HSP_TX_VIS_CMDQ_1 0x0000040848
+#define R_BCM1480_HSP_TX_VIS_CMDQ_2 0x0000040850
+#define R_BCM1480_HSP_TX_RAM_READCTL 0x0000040860
+#define R_BCM1480_HSP_TX_RAM_READWINDOW 0x0000040868
+#define R_BCM1480_HSP_TX_RF_READCTL 0x0000040870
+#define R_BCM1480_HSP_TX_RF_READWINDOW 0x0000040878
+
+#define R_BCM1480_HSP_TX_SPI4_PORT_INT_STATUS 0x0000040880
+#define R_BCM1480_HSP_TX_SPI4_PORT_INT_EN 0x0000040888
+
+#define R_BCM1480_HSP_TX_NEXT_ADDR_BASE 0x000040400
+#define R_BCM1480_HSP_TX_NEXT_ADDR_REGISTER(x) (R_BCM1480_HSP_TX_NEXT_ADDR_BASE+ 8*(x))
+
+
+
+/* *********************************************************************
+ * Physical Address Map (Table 10 and Figure 7)
+ ********************************************************************* */
+
+#define A_BCM1480_PHYS_MEMORY_0 _SB_MAKE64(0x0000000000)
+#define A_BCM1480_PHYS_MEMORY_SIZE _SB_MAKE64((256*1024*1024))
+#define A_BCM1480_PHYS_SYSTEM_CTL _SB_MAKE64(0x0010000000)
+#define A_BCM1480_PHYS_IO_SYSTEM _SB_MAKE64(0x0010060000)
+#define A_BCM1480_PHYS_GENBUS _SB_MAKE64(0x0010090000)
+#define A_BCM1480_PHYS_GENBUS_END _SB_MAKE64(0x0028000000)
+#define A_BCM1480_PHYS_PCI_MISC_MATCH_BYTES _SB_MAKE64(0x0028000000)
+#define A_BCM1480_PHYS_PCI_IACK_MATCH_BYTES _SB_MAKE64(0x0029000000)
+#define A_BCM1480_PHYS_PCI_IO_MATCH_BYTES _SB_MAKE64(0x002C000000)
+#define A_BCM1480_PHYS_PCI_CFG_MATCH_BYTES _SB_MAKE64(0x002E000000)
+#define A_BCM1480_PHYS_PCI_OMAP_MATCH_BYTES _SB_MAKE64(0x002F000000)
+#define A_BCM1480_PHYS_PCI_MEM_MATCH_BYTES _SB_MAKE64(0x0030000000)
+#define A_BCM1480_PHYS_HT_MEM_MATCH_BYTES _SB_MAKE64(0x0040000000)
+#define A_BCM1480_PHYS_HT_MEM_MATCH_BITS _SB_MAKE64(0x0060000000)
+#define A_BCM1480_PHYS_MEMORY_1 _SB_MAKE64(0x0080000000)
+#define A_BCM1480_PHYS_MEMORY_2 _SB_MAKE64(0x0090000000)
+#define A_BCM1480_PHYS_PCI_MISC_MATCH_BITS _SB_MAKE64(0x00A8000000)
+#define A_BCM1480_PHYS_PCI_IACK_MATCH_BITS _SB_MAKE64(0x00A9000000)
+#define A_BCM1480_PHYS_PCI_IO_MATCH_BITS _SB_MAKE64(0x00AC000000)
+#define A_BCM1480_PHYS_PCI_CFG_MATCH_BITS _SB_MAKE64(0x00AE000000)
+#define A_BCM1480_PHYS_PCI_OMAP_MATCH_BITS _SB_MAKE64(0x00AF000000)
+#define A_BCM1480_PHYS_PCI_MEM_MATCH_BITS _SB_MAKE64(0x00B0000000)
+#define A_BCM1480_PHYS_MEMORY_3 _SB_MAKE64(0x00C0000000)
+#define A_BCM1480_PHYS_L2_CACHE_TEST _SB_MAKE64(0x00D0000000)
+#define A_BCM1480_PHYS_HT_SPECIAL_MATCH_BYTES _SB_MAKE64(0x00D8000000)
+#define A_BCM1480_PHYS_HT_IO_MATCH_BYTES _SB_MAKE64(0x00DC000000)
+#define A_BCM1480_PHYS_HT_CFG_MATCH_BYTES _SB_MAKE64(0x00DE000000)
+#define A_BCM1480_PHYS_HS_SUBSYS _SB_MAKE64(0x00DF000000)
+#define A_BCM1480_PHYS_HT_SPECIAL_MATCH_BITS _SB_MAKE64(0x00F8000000)
+#define A_BCM1480_PHYS_HT_IO_MATCH_BITS _SB_MAKE64(0x00FC000000)
+#define A_BCM1480_PHYS_HT_CFG_MATCH_BITS _SB_MAKE64(0x00FE000000)
+#define A_BCM1480_PHYS_MEMORY_EXP _SB_MAKE64(0x0100000000)
+#define A_BCM1480_PHYS_MEMORY_EXP_SIZE _SB_MAKE64((508*1024*1024*1024))
+#define A_BCM1480_PHYS_PCI_UPPER _SB_MAKE64(0x1000000000)
+#define A_BCM1480_PHYS_HT_UPPER_MATCH_BYTES _SB_MAKE64(0x2000000000)
+#define A_BCM1480_PHYS_HT_UPPER_MATCH_BITS _SB_MAKE64(0x3000000000)
+#define A_BCM1480_PHYS_HT_NODE_ALIAS _SB_MAKE64(0x4000000000)
+#define A_BCM1480_PHYS_HT_FULLACCESS _SB_MAKE64(0xF000000000)
+
+
+/* *********************************************************************
+ * L2 Cache as RAM (Table 54)
+ ********************************************************************* */
+
+#define A_BCM1480_PHYS_L2CACHE_WAY_SIZE _SB_MAKE64(0x0000020000)
+#define BCM1480_PHYS_L2CACHE_NUM_WAYS 8
+#define A_BCM1480_PHYS_L2CACHE_TOTAL_SIZE _SB_MAKE64(0x0000100000)
+#define A_BCM1480_PHYS_L2CACHE_WAY0 _SB_MAKE64(0x00D0300000)
+#define A_BCM1480_PHYS_L2CACHE_WAY1 _SB_MAKE64(0x00D0320000)
+#define A_BCM1480_PHYS_L2CACHE_WAY2 _SB_MAKE64(0x00D0340000)
+#define A_BCM1480_PHYS_L2CACHE_WAY3 _SB_MAKE64(0x00D0360000)
+#define A_BCM1480_PHYS_L2CACHE_WAY4 _SB_MAKE64(0x00D0380000)
+#define A_BCM1480_PHYS_L2CACHE_WAY5 _SB_MAKE64(0x00D03A0000)
+#define A_BCM1480_PHYS_L2CACHE_WAY6 _SB_MAKE64(0x00D03C0000)
+#define A_BCM1480_PHYS_L2CACHE_WAY7 _SB_MAKE64(0x00D03E0000)
+
+#endif /* _BCM1480_REGS_H */
diff --git a/arch/mips/include/asm/sibyte/bcm1480_scd.h b/arch/mips/include/asm/sibyte/bcm1480_scd.h
new file mode 100644
index 000000000..87f37086d
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/bcm1480_scd.h
@@ -0,0 +1,393 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * BCM1280/BCM1400 Board Support Package
+ *
+ * SCD Constants and Macros File: bcm1480_scd.h
+ *
+ * This module contains constants and macros useful for
+ * manipulating the System Control and Debug module.
+ *
+ * BCM1400 specification level: 1X55_1X80-UM100-R (12/18/03)
+ *
+ *********************************************************************
+ *
+ * Copyright 2000,2001,2002,2003,2004,2005
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+#ifndef _BCM1480_SCD_H
+#define _BCM1480_SCD_H
+
+#include <asm/sibyte/sb1250_defs.h>
+
+/* *********************************************************************
+ * Pull in the BCM1250's SCD since lots of stuff is the same.
+ ********************************************************************* */
+
+#include <asm/sibyte/sb1250_scd.h>
+
+/* *********************************************************************
+ * Some general notes:
+ *
+ * This file is basically a "what's new" header file. Since the
+ * BCM1250 and the new BCM1480 (and derivatives) share many common
+ * features, this file contains only what's new or changed from
+ * the 1250. (above, you can see that we include the 1250 symbols
+ * to get the base functionality).
+ *
+ * In software, be sure to use the correct symbols, particularly
+ * for blocks that are different between the two chip families.
+ * All BCM1480-specific symbols have _BCM1480_ in their names,
+ * and all BCM1250-specific and "base" functions that are common in
+ * both chips have no special names (this is for compatibility with
+ * older include files). Therefore, if you're working with the
+ * SCD, which is very different on each chip, A_SCD_xxx implies
+ * the BCM1250 version and A_BCM1480_SCD_xxx implies the BCM1480
+ * version.
+ ********************************************************************* */
+
+/* *********************************************************************
+ * System control/debug registers
+ ********************************************************************* */
+
+/*
+ * System Identification and Revision Register (Table 12)
+ * Register: SCD_SYSTEM_REVISION
+ * This register is field compatible with the 1250.
+ */
+
+/*
+ * New part definitions
+ */
+
+#define K_SYS_PART_BCM1480 0x1406
+#define K_SYS_PART_BCM1280 0x1206
+#define K_SYS_PART_BCM1455 0x1407
+#define K_SYS_PART_BCM1255 0x1257
+#define K_SYS_PART_BCM1158 0x1156
+
+/*
+ * Manufacturing Information Register (Table 14)
+ * Register: SCD_SYSTEM_MANUF
+ */
+
+/*
+ * System Configuration Register (Table 15)
+ * Register: SCD_SYSTEM_CFG
+ * Entire register is different from 1250, all new constants below
+ */
+
+#define M_BCM1480_SYS_RESERVED0 _SB_MAKEMASK1(0)
+#define M_BCM1480_SYS_HT_MINRSTCNT _SB_MAKEMASK1(1)
+#define M_BCM1480_SYS_RESERVED2 _SB_MAKEMASK1(2)
+#define M_BCM1480_SYS_RESERVED3 _SB_MAKEMASK1(3)
+#define M_BCM1480_SYS_RESERVED4 _SB_MAKEMASK1(4)
+#define M_BCM1480_SYS_IOB_DIV _SB_MAKEMASK1(5)
+
+#define S_BCM1480_SYS_PLL_DIV _SB_MAKE64(6)
+#define M_BCM1480_SYS_PLL_DIV _SB_MAKEMASK(5, S_BCM1480_SYS_PLL_DIV)
+#define V_BCM1480_SYS_PLL_DIV(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_PLL_DIV)
+#define G_BCM1480_SYS_PLL_DIV(x) _SB_GETVALUE(x, S_BCM1480_SYS_PLL_DIV, M_BCM1480_SYS_PLL_DIV)
+
+#define S_BCM1480_SYS_SW_DIV _SB_MAKE64(11)
+#define M_BCM1480_SYS_SW_DIV _SB_MAKEMASK(5, S_BCM1480_SYS_SW_DIV)
+#define V_BCM1480_SYS_SW_DIV(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_SW_DIV)
+#define G_BCM1480_SYS_SW_DIV(x) _SB_GETVALUE(x, S_BCM1480_SYS_SW_DIV, M_BCM1480_SYS_SW_DIV)
+
+#define M_BCM1480_SYS_PCMCIA_ENABLE _SB_MAKEMASK1(16)
+#define M_BCM1480_SYS_DUART1_ENABLE _SB_MAKEMASK1(17)
+
+#define S_BCM1480_SYS_BOOT_MODE _SB_MAKE64(18)
+#define M_BCM1480_SYS_BOOT_MODE _SB_MAKEMASK(2, S_BCM1480_SYS_BOOT_MODE)
+#define V_BCM1480_SYS_BOOT_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_BOOT_MODE)
+#define G_BCM1480_SYS_BOOT_MODE(x) _SB_GETVALUE(x, S_BCM1480_SYS_BOOT_MODE, M_BCM1480_SYS_BOOT_MODE)
+#define K_BCM1480_SYS_BOOT_MODE_ROM32 0
+#define K_BCM1480_SYS_BOOT_MODE_ROM8 1
+#define K_BCM1480_SYS_BOOT_MODE_SMBUS_SMALL 2
+#define K_BCM1480_SYS_BOOT_MODE_SMBUS_BIG 3
+#define M_BCM1480_SYS_BOOT_MODE_SMBUS _SB_MAKEMASK1(19)
+
+#define M_BCM1480_SYS_PCI_HOST _SB_MAKEMASK1(20)
+#define M_BCM1480_SYS_PCI_ARBITER _SB_MAKEMASK1(21)
+#define M_BCM1480_SYS_BIG_ENDIAN _SB_MAKEMASK1(22)
+#define M_BCM1480_SYS_GENCLK_EN _SB_MAKEMASK1(23)
+#define M_BCM1480_SYS_GEN_PARITY_EN _SB_MAKEMASK1(24)
+#define M_BCM1480_SYS_RESERVED25 _SB_MAKEMASK1(25)
+
+#define S_BCM1480_SYS_CONFIG 26
+#define M_BCM1480_SYS_CONFIG _SB_MAKEMASK(6, S_BCM1480_SYS_CONFIG)
+#define V_BCM1480_SYS_CONFIG(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_CONFIG)
+#define G_BCM1480_SYS_CONFIG(x) _SB_GETVALUE(x, S_BCM1480_SYS_CONFIG, M_BCM1480_SYS_CONFIG)
+
+#define M_BCM1480_SYS_RESERVED32 _SB_MAKEMASK(32, 15)
+
+#define S_BCM1480_SYS_NODEID 47
+#define M_BCM1480_SYS_NODEID _SB_MAKEMASK(4, S_BCM1480_SYS_NODEID)
+#define V_BCM1480_SYS_NODEID(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_NODEID)
+#define G_BCM1480_SYS_NODEID(x) _SB_GETVALUE(x, S_BCM1480_SYS_NODEID, M_BCM1480_SYS_NODEID)
+
+#define M_BCM1480_SYS_CCNUMA_EN _SB_MAKEMASK1(51)
+#define M_BCM1480_SYS_CPU_RESET_0 _SB_MAKEMASK1(52)
+#define M_BCM1480_SYS_CPU_RESET_1 _SB_MAKEMASK1(53)
+#define M_BCM1480_SYS_CPU_RESET_2 _SB_MAKEMASK1(54)
+#define M_BCM1480_SYS_CPU_RESET_3 _SB_MAKEMASK1(55)
+#define S_BCM1480_SYS_DISABLECPU0 56
+#define M_BCM1480_SYS_DISABLECPU0 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU0)
+#define S_BCM1480_SYS_DISABLECPU1 57
+#define M_BCM1480_SYS_DISABLECPU1 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU1)
+#define S_BCM1480_SYS_DISABLECPU2 58
+#define M_BCM1480_SYS_DISABLECPU2 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU2)
+#define S_BCM1480_SYS_DISABLECPU3 59
+#define M_BCM1480_SYS_DISABLECPU3 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU3)
+
+#define M_BCM1480_SYS_SB_SOFTRES _SB_MAKEMASK1(60)
+#define M_BCM1480_SYS_EXT_RESET _SB_MAKEMASK1(61)
+#define M_BCM1480_SYS_SYSTEM_RESET _SB_MAKEMASK1(62)
+#define M_BCM1480_SYS_SW_FLAG _SB_MAKEMASK1(63)
+
+/*
+ * Scratch Register (Table 16)
+ * Register: SCD_SYSTEM_SCRATCH
+ * Same as BCM1250
+ */
+
+
+/*
+ * Mailbox Registers (Table 17)
+ * Registers: SCD_MBOX_{0,1}_CPU_x
+ * Same as BCM1250
+ */
+
+
+/*
+ * See bcm1480_int.h for interrupt mapper registers.
+ */
+
+
+/*
+ * Watchdog Timer Initial Count Registers (Table 23)
+ * Registers: SCD_WDOG_INIT_CNT_x
+ *
+ * The watchdogs are almost the same as the 1250, except
+ * the configuration register has more bits to control the
+ * other CPUs.
+ */
+
+
+/*
+ * Watchdog Timer Configuration Registers (Table 25)
+ * Registers: SCD_WDOG_CFG_x
+ */
+
+#define M_BCM1480_SCD_WDOG_ENABLE _SB_MAKEMASK1(0)
+
+#define S_BCM1480_SCD_WDOG_RESET_TYPE 2
+#define M_BCM1480_SCD_WDOG_RESET_TYPE _SB_MAKEMASK(5, S_BCM1480_SCD_WDOG_RESET_TYPE)
+#define V_BCM1480_SCD_WDOG_RESET_TYPE(x) _SB_MAKEVALUE(x, S_BCM1480_SCD_WDOG_RESET_TYPE)
+#define G_BCM1480_SCD_WDOG_RESET_TYPE(x) _SB_GETVALUE(x, S_BCM1480_SCD_WDOG_RESET_TYPE, M_BCM1480_SCD_WDOG_RESET_TYPE)
+
+#define K_BCM1480_SCD_WDOG_RESET_FULL 0 /* actually, (x & 1) == 0 */
+#define K_BCM1480_SCD_WDOG_RESET_SOFT 1
+#define K_BCM1480_SCD_WDOG_RESET_CPU0 3
+#define K_BCM1480_SCD_WDOG_RESET_CPU1 5
+#define K_BCM1480_SCD_WDOG_RESET_CPU2 9
+#define K_BCM1480_SCD_WDOG_RESET_CPU3 17
+#define K_BCM1480_SCD_WDOG_RESET_ALL_CPUS 31
+
+
+#define M_BCM1480_SCD_WDOG_HAS_RESET _SB_MAKEMASK1(8)
+
+/*
+ * General Timer Initial Count Registers (Table 26)
+ * Registers: SCD_TIMER_INIT_x
+ *
+ * The timer registers are the same as the BCM1250
+ */
+
+
+/*
+ * ZBbus Count Register (Table 29)
+ * Register: ZBBUS_CYCLE_COUNT
+ *
+ * Same as BCM1250
+ */
+
+/*
+ * ZBbus Compare Registers (Table 30)
+ * Registers: ZBBUS_CYCLE_CPx
+ *
+ * Same as BCM1250
+ */
+
+
+/*
+ * System Performance Counter Configuration Register (Table 31)
+ * Register: PERF_CNT_CFG_0
+ *
+ * SPC_CFG_SRC[0-3] is the same as the 1250.
+ * SPC_CFG_SRC[4-7] only exist on the 1480
+ * The clear/enable bits are in different locations on the 1250 and 1480.
+ */
+
+#define S_SPC_CFG_SRC4 32
+#define M_SPC_CFG_SRC4 _SB_MAKEMASK(8, S_SPC_CFG_SRC4)
+#define V_SPC_CFG_SRC4(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC4)
+#define G_SPC_CFG_SRC4(x) _SB_GETVALUE(x, S_SPC_CFG_SRC4, M_SPC_CFG_SRC4)
+
+#define S_SPC_CFG_SRC5 40
+#define M_SPC_CFG_SRC5 _SB_MAKEMASK(8, S_SPC_CFG_SRC5)
+#define V_SPC_CFG_SRC5(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC5)
+#define G_SPC_CFG_SRC5(x) _SB_GETVALUE(x, S_SPC_CFG_SRC5, M_SPC_CFG_SRC5)
+
+#define S_SPC_CFG_SRC6 48
+#define M_SPC_CFG_SRC6 _SB_MAKEMASK(8, S_SPC_CFG_SRC6)
+#define V_SPC_CFG_SRC6(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC6)
+#define G_SPC_CFG_SRC6(x) _SB_GETVALUE(x, S_SPC_CFG_SRC6, M_SPC_CFG_SRC6)
+
+#define S_SPC_CFG_SRC7 56
+#define M_SPC_CFG_SRC7 _SB_MAKEMASK(8, S_SPC_CFG_SRC7)
+#define V_SPC_CFG_SRC7(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC7)
+#define G_SPC_CFG_SRC7(x) _SB_GETVALUE(x, S_SPC_CFG_SRC7, M_SPC_CFG_SRC7)
+
+/*
+ * System Performance Counter Control Register (Table 32)
+ * Register: PERF_CNT_CFG_1
+ * BCM1480 specific
+ */
+#define M_BCM1480_SPC_CFG_CLEAR _SB_MAKEMASK1(0)
+#define M_BCM1480_SPC_CFG_ENABLE _SB_MAKEMASK1(1)
+#if SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_SPC_CFG_CLEAR M_BCM1480_SPC_CFG_CLEAR
+#define M_SPC_CFG_ENABLE M_BCM1480_SPC_CFG_ENABLE
+#endif
+
+/*
+ * System Performance Counters (Table 33)
+ * Registers: PERF_CNT_x
+ */
+
+#define S_BCM1480_SPC_CNT_COUNT 0
+#define M_BCM1480_SPC_CNT_COUNT _SB_MAKEMASK(40, S_BCM1480_SPC_CNT_COUNT)
+#define V_BCM1480_SPC_CNT_COUNT(x) _SB_MAKEVALUE(x, S_BCM1480_SPC_CNT_COUNT)
+#define G_BCM1480_SPC_CNT_COUNT(x) _SB_GETVALUE(x, S_BCM1480_SPC_CNT_COUNT, M_BCM1480_SPC_CNT_COUNT)
+
+#define M_BCM1480_SPC_CNT_OFLOW _SB_MAKEMASK1(40)
+
+
+/*
+ * Bus Watcher Error Status Register (Tables 36, 37)
+ * Registers: BUS_ERR_STATUS, BUS_ERR_STATUS_DEBUG
+ * Same as BCM1250.
+ */
+
+/*
+ * Bus Watcher Error Data Registers (Table 38)
+ * Registers: BUS_ERR_DATA_x
+ * Same as BCM1250.
+ */
+
+/*
+ * Bus Watcher L2 ECC Counter Register (Table 39)
+ * Register: BUS_L2_ERRORS
+ * Same as BCM1250.
+ */
+
+
+/*
+ * Bus Watcher Memory and I/O Error Counter Register (Table 40)
+ * Register: BUS_MEM_IO_ERRORS
+ * Same as BCM1250.
+ */
+
+
+/*
+ * Address Trap Registers
+ *
+ * Register layout same as BCM1250, almost. The bus agents
+ * are different, and the address trap configuration bits are
+ * slightly different.
+ */
+
+#define M_BCM1480_ATRAP_INDEX _SB_MAKEMASK(4, 0)
+#define M_BCM1480_ATRAP_ADDRESS _SB_MAKEMASK(40, 0)
+
+#define S_BCM1480_ATRAP_CFG_CNT 0
+#define M_BCM1480_ATRAP_CFG_CNT _SB_MAKEMASK(3, S_BCM1480_ATRAP_CFG_CNT)
+#define V_BCM1480_ATRAP_CFG_CNT(x) _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_CNT)
+#define G_BCM1480_ATRAP_CFG_CNT(x) _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_CNT, M_BCM1480_ATRAP_CFG_CNT)
+
+#define M_BCM1480_ATRAP_CFG_WRITE _SB_MAKEMASK1(3)
+#define M_BCM1480_ATRAP_CFG_ALL _SB_MAKEMASK1(4)
+#define M_BCM1480_ATRAP_CFG_INV _SB_MAKEMASK1(5)
+#define M_BCM1480_ATRAP_CFG_USESRC _SB_MAKEMASK1(6)
+#define M_BCM1480_ATRAP_CFG_SRCINV _SB_MAKEMASK1(7)
+
+#define S_BCM1480_ATRAP_CFG_AGENTID 8
+#define M_BCM1480_ATRAP_CFG_AGENTID _SB_MAKEMASK(4, S_BCM1480_ATRAP_CFG_AGENTID)
+#define V_BCM1480_ATRAP_CFG_AGENTID(x) _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_AGENTID)
+#define G_BCM1480_ATRAP_CFG_AGENTID(x) _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_AGENTID, M_BCM1480_ATRAP_CFG_AGENTID)
+
+
+#define K_BCM1480_BUS_AGENT_CPU0 0
+#define K_BCM1480_BUS_AGENT_CPU1 1
+#define K_BCM1480_BUS_AGENT_NC 2
+#define K_BCM1480_BUS_AGENT_IOB 3
+#define K_BCM1480_BUS_AGENT_SCD 4
+#define K_BCM1480_BUS_AGENT_L2C 6
+#define K_BCM1480_BUS_AGENT_MC 7
+#define K_BCM1480_BUS_AGENT_CPU2 8
+#define K_BCM1480_BUS_AGENT_CPU3 9
+#define K_BCM1480_BUS_AGENT_PM 10
+
+#define S_BCM1480_ATRAP_CFG_CATTR 12
+#define M_BCM1480_ATRAP_CFG_CATTR _SB_MAKEMASK(2, S_BCM1480_ATRAP_CFG_CATTR)
+#define V_BCM1480_ATRAP_CFG_CATTR(x) _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_CATTR)
+#define G_BCM1480_ATRAP_CFG_CATTR(x) _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_CATTR, M_BCM1480_ATRAP_CFG_CATTR)
+
+#define K_BCM1480_ATRAP_CFG_CATTR_IGNORE 0
+#define K_BCM1480_ATRAP_CFG_CATTR_UNC 1
+#define K_BCM1480_ATRAP_CFG_CATTR_NONCOH 2
+#define K_BCM1480_ATRAP_CFG_CATTR_COHERENT 3
+
+#define M_BCM1480_ATRAP_CFG_CATTRINV _SB_MAKEMASK1(14)
+
+
+/*
+ * Trace Event Registers (Table 47)
+ * Same as BCM1250.
+ */
+
+/*
+ * Trace Sequence Control Registers (Table 48)
+ * Registers: TRACE_SEQUENCE_x
+ *
+ * Same as BCM1250 except for two new fields.
+ */
+
+
+#define M_BCM1480_SCD_TRSEQ_TID_MATCH_EN _SB_MAKEMASK1(25)
+
+#define S_BCM1480_SCD_TRSEQ_SWFUNC 26
+#define M_BCM1480_SCD_TRSEQ_SWFUNC _SB_MAKEMASK(2, S_BCM1480_SCD_TRSEQ_SWFUNC)
+#define V_BCM1480_SCD_TRSEQ_SWFUNC(x) _SB_MAKEVALUE(x, S_BCM1480_SCD_TRSEQ_SWFUNC)
+#define G_BCM1480_SCD_TRSEQ_SWFUNC(x) _SB_GETVALUE(x, S_BCM1480_SCD_TRSEQ_SWFUNC, M_BCM1480_SCD_TRSEQ_SWFUNC)
+
+/*
+ * Trace Control Register (Table 49)
+ * Register: TRACE_CFG
+ *
+ * BCM1480 changes to this register (other than location of the CUR_ADDR field)
+ * are defined below.
+ */
+
+#define S_BCM1480_SCD_TRACE_CFG_MODE 16
+#define M_BCM1480_SCD_TRACE_CFG_MODE _SB_MAKEMASK(2, S_BCM1480_SCD_TRACE_CFG_MODE)
+#define V_BCM1480_SCD_TRACE_CFG_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_SCD_TRACE_CFG_MODE)
+#define G_BCM1480_SCD_TRACE_CFG_MODE(x) _SB_GETVALUE(x, S_BCM1480_SCD_TRACE_CFG_MODE, M_BCM1480_SCD_TRACE_CFG_MODE)
+
+#define K_BCM1480_SCD_TRACE_CFG_MODE_BLOCKERS 0
+#define K_BCM1480_SCD_TRACE_CFG_MODE_BYTEEN_INT 1
+#define K_BCM1480_SCD_TRACE_CFG_MODE_FLOW_ID 2
+
+#endif /* _BCM1480_SCD_H */
diff --git a/arch/mips/include/asm/sibyte/bigsur.h b/arch/mips/include/asm/sibyte/bigsur.h
new file mode 100644
index 000000000..86c876196
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/bigsur.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2000,2001,2002,2003,2004 Broadcom Corporation
+ */
+#ifndef __ASM_SIBYTE_BIGSUR_H
+#define __ASM_SIBYTE_BIGSUR_H
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/bcm1480_int.h>
+
+#ifdef CONFIG_SIBYTE_BIGSUR
+#define SIBYTE_BOARD_NAME "BCM91x80A/B (BigSur)"
+#define SIBYTE_HAVE_PCMCIA 1
+#define SIBYTE_HAVE_IDE 1
+#endif
+
+/* Generic bus chip selects */
+#define LEDS_CS 3
+#define LEDS_PHYS 0x100a0000
+
+#ifdef SIBYTE_HAVE_IDE
+#define IDE_CS 4
+#define IDE_PHYS 0x100b0000
+#define K_GPIO_GB_IDE 4
+#define K_INT_GB_IDE (K_INT_GPIO_0 + K_GPIO_GB_IDE)
+#endif
+
+#ifdef SIBYTE_HAVE_PCMCIA
+#define PCMCIA_CS 6
+#define PCMCIA_PHYS 0x11000000
+#define K_GPIO_PC_READY 9
+#define K_INT_PC_READY (K_INT_GPIO_0 + K_GPIO_PC_READY)
+#endif
+
+#endif /* __ASM_SIBYTE_BIGSUR_H */
diff --git a/arch/mips/include/asm/sibyte/board.h b/arch/mips/include/asm/sibyte/board.h
new file mode 100644
index 000000000..20fe2f16c
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/board.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2000,2001,2002,2003,2004 Broadcom Corporation
+ */
+
+#ifndef _SIBYTE_BOARD_H
+#define _SIBYTE_BOARD_H
+
+#if defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_CRHONE) || \
+ defined(CONFIG_SIBYTE_CRHINE) || defined(CONFIG_SIBYTE_LITTLESUR)
+#include <asm/sibyte/swarm.h>
+#endif
+
+#if defined(CONFIG_SIBYTE_SENTOSA) || defined(CONFIG_SIBYTE_RHONE)
+#include <asm/sibyte/sentosa.h>
+#endif
+
+#ifdef CONFIG_SIBYTE_CARMEL
+#include <asm/sibyte/carmel.h>
+#endif
+
+#ifdef CONFIG_SIBYTE_BIGSUR
+#include <asm/sibyte/bigsur.h>
+#endif
+
+#ifdef __ASSEMBLY__
+
+#ifdef LEDS_PHYS
+#define setleds(t0, t1, c0, c1, c2, c3) \
+ li t0, (LEDS_PHYS|0xa0000000); \
+ li t1, c0; \
+ sb t1, 0x18(t0); \
+ li t1, c1; \
+ sb t1, 0x10(t0); \
+ li t1, c2; \
+ sb t1, 0x08(t0); \
+ li t1, c3; \
+ sb t1, 0x00(t0)
+#else
+#define setleds(t0, t1, c0, c1, c2, c3)
+#endif /* LEDS_PHYS */
+
+#else
+
+void swarm_setup(void);
+
+#ifdef LEDS_PHYS
+extern void setleds(char *str);
+#else
+#define setleds(s) do { } while (0)
+#endif /* LEDS_PHYS */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _SIBYTE_BOARD_H */
diff --git a/arch/mips/include/asm/sibyte/carmel.h b/arch/mips/include/asm/sibyte/carmel.h
new file mode 100644
index 000000000..c6730d7a6
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/carmel.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2002 Broadcom Corporation
+ */
+#ifndef __ASM_SIBYTE_CARMEL_H
+#define __ASM_SIBYTE_CARMEL_H
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_int.h>
+
+#define SIBYTE_BOARD_NAME "Carmel"
+
+#define GPIO_PHY_INTERRUPT 2
+#define GPIO_NONMASKABLE_INT 3
+#define GPIO_CF_INSERTED 6
+#define GPIO_MONTEREY_RESET 7
+#define GPIO_QUADUART_INT 8
+#define GPIO_CF_INT 9
+#define GPIO_FPGA_CCLK 10
+#define GPIO_FPGA_DOUT 11
+#define GPIO_FPGA_DIN 12
+#define GPIO_FPGA_PGM 13
+#define GPIO_FPGA_DONE 14
+#define GPIO_FPGA_INIT 15
+
+#define LEDS_CS 2
+#define LEDS_PHYS 0x100C0000
+#define MLEDS_CS 3
+#define MLEDS_PHYS 0x100A0000
+#define UART_CS 4
+#define UART_PHYS 0x100D0000
+#define ARAVALI_CS 5
+#define ARAVALI_PHYS 0x11000000
+#define IDE_CS 6
+#define IDE_PHYS 0x100B0000
+#define ARAVALI2_CS 7
+#define ARAVALI2_PHYS 0x100E0000
+
+#if defined(CONFIG_SIBYTE_CARMEL)
+#define K_GPIO_GB_IDE 9
+#define K_INT_GB_IDE (K_INT_GPIO_0 + K_GPIO_GB_IDE)
+#endif
+
+
+#endif /* __ASM_SIBYTE_CARMEL_H */
diff --git a/arch/mips/include/asm/sibyte/sb1250.h b/arch/mips/include/asm/sibyte/sb1250.h
new file mode 100644
index 000000000..dbde5f93f
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/sb1250.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
+ */
+
+#ifndef _ASM_SIBYTE_SB1250_H
+#define _ASM_SIBYTE_SB1250_H
+
+/*
+ * yymmddpp: year, month, day, patch.
+ * should sync with Makefile EXTRAVERSION
+ */
+#define SIBYTE_RELEASE 0x02111403
+
+#define SB1250_NR_IRQS 64
+
+#define BCM1480_NR_IRQS 128
+#define BCM1480_NR_IRQS_HALF 64
+
+#define SB1250_DUART_MINOR_BASE 64
+
+#ifndef __ASSEMBLY__
+
+#include <asm/addrspace.h>
+
+/* For revision/pass information */
+#include <asm/sibyte/sb1250_scd.h>
+#include <asm/sibyte/bcm1480_scd.h>
+extern unsigned int sb1_pass;
+extern unsigned int soc_pass;
+extern unsigned int soc_type;
+extern unsigned int periph_rev;
+extern unsigned int zbbus_mhz;
+
+extern void sb1250_time_init(void);
+extern void sb1250_mask_irq(int cpu, int irq);
+extern void sb1250_unmask_irq(int cpu, int irq);
+
+extern void bcm1480_time_init(void);
+extern void bcm1480_mask_irq(int cpu, int irq);
+extern void bcm1480_unmask_irq(int cpu, int irq);
+
+#define AT_spin \
+ __asm__ __volatile__ ( \
+ ".set noat\n" \
+ "li $at, 0\n" \
+ "1: beqz $at, 1b\n" \
+ ".set at\n" \
+ )
+
+#endif
+
+#define IOADDR(a) ((void __iomem *)(IO_BASE + (a)))
+
+#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_defs.h b/arch/mips/include/asm/sibyte/sb1250_defs.h
new file mode 100644
index 000000000..68cd7c0b3
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/sb1250_defs.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * SB1250 Board Support Package
+ *
+ * Global constants and macros File: sb1250_defs.h
+ *
+ * This file contains macros and definitions used by the other
+ * include files.
+ *
+ * SB1250 specification level: User's manual 1/02/02
+ *
+ *********************************************************************
+ *
+ * Copyright 2000,2001,2002,2003
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+#ifndef _SB1250_DEFS_H
+#define _SB1250_DEFS_H
+
+/*
+ * These headers require ANSI C89 string concatenation, and GCC or other
+ * 'long long' (64-bit integer) support.
+ */
+#if !defined(__STDC__) && !defined(_MSC_VER)
+#error SiByte headers require ANSI C89 support
+#endif
+
+
+/* *********************************************************************
+ * Macros for feature tests, used to enable include file features
+ * for chip features only present in certain chip revisions.
+ *
+ * SIBYTE_HDR_FEATURES may be defined to be the mask value chip/revision
+ * which is to be exposed by the headers. If undefined, it defaults to
+ * "all features."
+ *
+ * Use like:
+ *
+ * #define SIBYTE_HDR_FEATURES SIBYTE_HDR_FMASK_112x_PASS1
+ *
+ * Generate defines only for that revision of chip.
+ *
+ * #if SIBYTE_HDR_FEATURE(chip,pass)
+ *
+ * True if header features for that revision or later of
+ * that particular chip type are enabled in SIBYTE_HDR_FEATURES.
+ * (Use this to bracket #defines for features present in a given
+ * revision and later.)
+ *
+ * Note that there is no implied ordering between chip types.
+ *
+ * Note also that 'chip' and 'pass' must textually exactly
+ * match the defines below. So, for example,
+ * SIBYTE_HDR_FEATURE(112x, PASS1) is OK, but
+ * SIBYTE_HDR_FEATURE(1120, pass1) is not (for two reasons).
+ *
+ * #if SIBYTE_HDR_FEATURE_UP_TO(chip,pass)
+ *
+ * Same as SIBYTE_HDR_FEATURE, but true for the named revision
+ * and earlier revisions of the named chip type.
+ *
+ * #if SIBYTE_HDR_FEATURE_EXACT(chip,pass)
+ *
+ * Same as SIBYTE_HDR_FEATURE, but only true for the named
+ * revision of the named chip type. (Note that this CANNOT
+ * be used to verify that you're compiling only for that
+ * particular chip/revision. It will be true any time this
+ * chip/revision is included in SIBYTE_HDR_FEATURES.)
+ *
+ * #if SIBYTE_HDR_FEATURE_CHIP(chip)
+ *
+ * True if header features for (any revision of) that chip type
+ * are enabled in SIBYTE_HDR_FEATURES. (Use this to bracket
+ * #defines for features specific to a given chip type.)
+ *
+ * Mask values currently include room for additional revisions of each
+ * chip type, but can be renumbered at will. Note that they MUST fit
+ * into 31 bits and may not include C type constructs, for safe use in
+ * CPP conditionals. Bit positions within chip types DO indicate
+ * ordering, so be careful when adding support for new minor revs.
+ ********************************************************************* */
+
+#define SIBYTE_HDR_FMASK_1250_ALL 0x000000ff
+#define SIBYTE_HDR_FMASK_1250_PASS1 0x00000001
+#define SIBYTE_HDR_FMASK_1250_PASS2 0x00000002
+#define SIBYTE_HDR_FMASK_1250_PASS3 0x00000004
+
+#define SIBYTE_HDR_FMASK_112x_ALL 0x00000f00
+#define SIBYTE_HDR_FMASK_112x_PASS1 0x00000100
+
+#define SIBYTE_HDR_FMASK_1480_ALL 0x0000f000
+#define SIBYTE_HDR_FMASK_1480_PASS1 0x00001000
+#define SIBYTE_HDR_FMASK_1480_PASS2 0x00002000
+
+/* Bit mask for chip/revision. (use _ALL for all revisions of a chip). */
+#define SIBYTE_HDR_FMASK(chip, pass) \
+ (SIBYTE_HDR_FMASK_ ## chip ## _ ## pass)
+#define SIBYTE_HDR_FMASK_ALLREVS(chip) \
+ (SIBYTE_HDR_FMASK_ ## chip ## _ALL)
+
+/* Default constant value for all chips, all revisions */
+#define SIBYTE_HDR_FMASK_ALL \
+ (SIBYTE_HDR_FMASK_1250_ALL | SIBYTE_HDR_FMASK_112x_ALL \
+ | SIBYTE_HDR_FMASK_1480_ALL)
+
+/* This one is used for the "original" BCM1250/BCM112x chips. We use this
+ to weed out constants and macros that do not exist on later chips like
+ the BCM1480 */
+#define SIBYTE_HDR_FMASK_1250_112x_ALL \
+ (SIBYTE_HDR_FMASK_1250_ALL | SIBYTE_HDR_FMASK_112x_ALL)
+#define SIBYTE_HDR_FMASK_1250_112x SIBYTE_HDR_FMASK_1250_112x_ALL
+
+#ifndef SIBYTE_HDR_FEATURES
+#define SIBYTE_HDR_FEATURES SIBYTE_HDR_FMASK_ALL
+#endif
+
+
+/* Bit mask for revisions of chip exclusively before the named revision. */
+#define SIBYTE_HDR_FMASK_BEFORE(chip, pass) \
+ ((SIBYTE_HDR_FMASK(chip, pass) - 1) & SIBYTE_HDR_FMASK_ALLREVS(chip))
+
+/* Bit mask for revisions of chip exclusively after the named revision. */
+#define SIBYTE_HDR_FMASK_AFTER(chip, pass) \
+ (~(SIBYTE_HDR_FMASK(chip, pass) \
+ | (SIBYTE_HDR_FMASK(chip, pass) - 1)) & SIBYTE_HDR_FMASK_ALLREVS(chip))
+
+
+/* True if header features enabled for (any revision of) that chip type. */
+#define SIBYTE_HDR_FEATURE_CHIP(chip) \
+ (!! (SIBYTE_HDR_FMASK_ALLREVS(chip) & SIBYTE_HDR_FEATURES))
+
+/* True for all versions of the BCM1250 and BCM1125, but not true for
+ anything else */
+#define SIBYTE_HDR_FEATURE_1250_112x \
+ (SIBYTE_HDR_FEATURE_CHIP(1250) || SIBYTE_HDR_FEATURE_CHIP(112x))
+/* (!! (SIBYTE_HDR_FEATURES & SIBYHTE_HDR_FMASK_1250_112x)) */
+
+/* True if header features enabled for that rev or later, inclusive. */
+#define SIBYTE_HDR_FEATURE(chip, pass) \
+ (!! ((SIBYTE_HDR_FMASK(chip, pass) \
+ | SIBYTE_HDR_FMASK_AFTER(chip, pass)) & SIBYTE_HDR_FEATURES))
+
+/* True if header features enabled for exactly that rev. */
+#define SIBYTE_HDR_FEATURE_EXACT(chip, pass) \
+ (!! (SIBYTE_HDR_FMASK(chip, pass) & SIBYTE_HDR_FEATURES))
+
+/* True if header features enabled for that rev or before, inclusive. */
+#define SIBYTE_HDR_FEATURE_UP_TO(chip, pass) \
+ (!! ((SIBYTE_HDR_FMASK(chip, pass) \
+ | SIBYTE_HDR_FMASK_BEFORE(chip, pass)) & SIBYTE_HDR_FEATURES))
+
+
+/* *********************************************************************
+ * Naming schemes for constants in these files:
+ *
+ * M_xxx MASK constant (identifies bits in a register).
+ * For multi-bit fields, all bits in the field will
+ * be set.
+ *
+ * K_xxx "Code" constant (value for data in a multi-bit
+ * field). The value is right justified.
+ *
+ * V_xxx "Value" constant. This is the same as the
+ * corresponding "K_xxx" constant, except it is
+ * shifted to the correct position in the register.
+ *
+ * S_xxx SHIFT constant. This is the number of bits that
+ * a field value (code) needs to be shifted
+ * (towards the left) to put the value in the right
+ * position for the register.
+ *
+ * A_xxx ADDRESS constant. This will be a physical
+ * address. Use the PHYS_TO_K1 macro to generate
+ * a K1SEG address.
+ *
+ * R_xxx RELATIVE offset constant. This is an offset from
+ * an A_xxx constant (usually the first register in
+ * a group).
+ *
+ * G_xxx(X) GET value. This macro obtains a multi-bit field
+ * from a register, masks it, and shifts it to
+ * the bottom of the register (retrieving a K_xxx
+ * value, for example).
+ *
+ * V_xxx(X) VALUE. This macro computes the value of a
+ * K_xxx constant shifted to the correct position
+ * in the register.
+ ********************************************************************* */
+
+
+
+
+/*
+ * Cast to 64-bit number. Presumably the syntax is different in
+ * assembly language.
+ *
+ * Note: you'll need to define uint32_t and uint64_t in your headers.
+ */
+
+#if !defined(__ASSEMBLY__)
+#define _SB_MAKE64(x) ((uint64_t)(x))
+#define _SB_MAKE32(x) ((uint32_t)(x))
+#else
+#define _SB_MAKE64(x) (x)
+#define _SB_MAKE32(x) (x)
+#endif
+
+
+/*
+ * Make a mask for 1 bit at position 'n'
+ */
+
+#define _SB_MAKEMASK1(n) (_SB_MAKE64(1) << _SB_MAKE64(n))
+#define _SB_MAKEMASK1_32(n) (_SB_MAKE32(1) << _SB_MAKE32(n))
+
+/*
+ * Make a mask for 'v' bits at position 'n'
+ */
+
+#define _SB_MAKEMASK(v, n) (_SB_MAKE64((_SB_MAKE64(1)<<(v))-1) << _SB_MAKE64(n))
+#define _SB_MAKEMASK_32(v, n) (_SB_MAKE32((_SB_MAKE32(1)<<(v))-1) << _SB_MAKE32(n))
+
+/*
+ * Make a value at 'v' at bit position 'n'
+ */
+
+#define _SB_MAKEVALUE(v, n) (_SB_MAKE64(v) << _SB_MAKE64(n))
+#define _SB_MAKEVALUE_32(v, n) (_SB_MAKE32(v) << _SB_MAKE32(n))
+
+#define _SB_GETVALUE(v, n, m) ((_SB_MAKE64(v) & _SB_MAKE64(m)) >> _SB_MAKE64(n))
+#define _SB_GETVALUE_32(v, n, m) ((_SB_MAKE32(v) & _SB_MAKE32(m)) >> _SB_MAKE32(n))
+
+/*
+ * Macros to read/write on-chip registers
+ * XXX should we do the PHYS_TO_K1 here?
+ */
+
+
+#if defined(__mips64) && !defined(__ASSEMBLY__)
+#define SBWRITECSR(csr, val) *((volatile uint64_t *) PHYS_TO_K1(csr)) = (val)
+#define SBREADCSR(csr) (*((volatile uint64_t *) PHYS_TO_K1(csr)))
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_dma.h b/arch/mips/include/asm/sibyte/sb1250_dma.h
new file mode 100644
index 000000000..d9678b98c
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/sb1250_dma.h
@@ -0,0 +1,581 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * SB1250 Board Support Package
+ *
+ * DMA definitions File: sb1250_dma.h
+ *
+ * This module contains constants and macros useful for
+ * programming the SB1250's DMA controllers, both the data mover
+ * and the Ethernet DMA.
+ *
+ * SB1250 specification level: User's manual 10/21/02
+ * BCM1280 specification level: User's manual 11/24/03
+ *
+ *********************************************************************
+ *
+ * Copyright 2000,2001,2002,2003
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+
+#ifndef _SB1250_DMA_H
+#define _SB1250_DMA_H
+
+
+#include <asm/sibyte/sb1250_defs.h>
+
+/* *********************************************************************
+ * DMA Registers
+ ********************************************************************* */
+
+/*
+ * Ethernet and Serial DMA Configuration Register 0 (Table 7-4)
+ * Registers: DMA_CONFIG0_MAC_x_RX_CH_0
+ * Registers: DMA_CONFIG0_MAC_x_TX_CH_0
+ * Registers: DMA_CONFIG0_SER_x_RX
+ * Registers: DMA_CONFIG0_SER_x_TX
+ */
+
+
+#define M_DMA_DROP _SB_MAKEMASK1(0)
+
+#define M_DMA_CHAIN_SEL _SB_MAKEMASK1(1)
+#define M_DMA_RESERVED1 _SB_MAKEMASK1(2)
+
+#define S_DMA_DESC_TYPE _SB_MAKE64(1)
+#define M_DMA_DESC_TYPE _SB_MAKEMASK(2, S_DMA_DESC_TYPE)
+#define V_DMA_DESC_TYPE(x) _SB_MAKEVALUE(x, S_DMA_DESC_TYPE)
+#define G_DMA_DESC_TYPE(x) _SB_GETVALUE(x, S_DMA_DESC_TYPE, M_DMA_DESC_TYPE)
+
+#define K_DMA_DESC_TYPE_RING_AL 0
+#define K_DMA_DESC_TYPE_CHAIN_AL 1
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define K_DMA_DESC_TYPE_RING_UAL_WI 2
+#define K_DMA_DESC_TYPE_RING_UAL_RMW 3
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+#define M_DMA_EOP_INT_EN _SB_MAKEMASK1(3)
+#define M_DMA_HWM_INT_EN _SB_MAKEMASK1(4)
+#define M_DMA_LWM_INT_EN _SB_MAKEMASK1(5)
+#define M_DMA_TBX_EN _SB_MAKEMASK1(6)
+#define M_DMA_TDX_EN _SB_MAKEMASK1(7)
+
+#define S_DMA_INT_PKTCNT _SB_MAKE64(8)
+#define M_DMA_INT_PKTCNT _SB_MAKEMASK(8, S_DMA_INT_PKTCNT)
+#define V_DMA_INT_PKTCNT(x) _SB_MAKEVALUE(x, S_DMA_INT_PKTCNT)
+#define G_DMA_INT_PKTCNT(x) _SB_GETVALUE(x, S_DMA_INT_PKTCNT, M_DMA_INT_PKTCNT)
+
+#define S_DMA_RINGSZ _SB_MAKE64(16)
+#define M_DMA_RINGSZ _SB_MAKEMASK(16, S_DMA_RINGSZ)
+#define V_DMA_RINGSZ(x) _SB_MAKEVALUE(x, S_DMA_RINGSZ)
+#define G_DMA_RINGSZ(x) _SB_GETVALUE(x, S_DMA_RINGSZ, M_DMA_RINGSZ)
+
+#define S_DMA_HIGH_WATERMARK _SB_MAKE64(32)
+#define M_DMA_HIGH_WATERMARK _SB_MAKEMASK(16, S_DMA_HIGH_WATERMARK)
+#define V_DMA_HIGH_WATERMARK(x) _SB_MAKEVALUE(x, S_DMA_HIGH_WATERMARK)
+#define G_DMA_HIGH_WATERMARK(x) _SB_GETVALUE(x, S_DMA_HIGH_WATERMARK, M_DMA_HIGH_WATERMARK)
+
+#define S_DMA_LOW_WATERMARK _SB_MAKE64(48)
+#define M_DMA_LOW_WATERMARK _SB_MAKEMASK(16, S_DMA_LOW_WATERMARK)
+#define V_DMA_LOW_WATERMARK(x) _SB_MAKEVALUE(x, S_DMA_LOW_WATERMARK)
+#define G_DMA_LOW_WATERMARK(x) _SB_GETVALUE(x, S_DMA_LOW_WATERMARK, M_DMA_LOW_WATERMARK)
+
+/*
+ * Ethernet and Serial DMA Configuration Register 1 (Table 7-5)
+ * Registers: DMA_CONFIG1_MAC_x_RX_CH_0
+ * Registers: DMA_CONFIG1_DMA_x_TX_CH_0
+ * Registers: DMA_CONFIG1_SER_x_RX
+ * Registers: DMA_CONFIG1_SER_x_TX
+ */
+
+#define M_DMA_HDR_CF_EN _SB_MAKEMASK1(0)
+#define M_DMA_ASIC_XFR_EN _SB_MAKEMASK1(1)
+#define M_DMA_PRE_ADDR_EN _SB_MAKEMASK1(2)
+#define M_DMA_FLOW_CTL_EN _SB_MAKEMASK1(3)
+#define M_DMA_NO_DSCR_UPDT _SB_MAKEMASK1(4)
+#define M_DMA_L2CA _SB_MAKEMASK1(5)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_DMA_RX_XTRA_STATUS _SB_MAKEMASK1(6)
+#define M_DMA_TX_CPU_PAUSE _SB_MAKEMASK1(6)
+#define M_DMA_TX_FC_PAUSE_EN _SB_MAKEMASK1(7)
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+#define M_DMA_MBZ1 _SB_MAKEMASK(6, 15)
+
+#define S_DMA_HDR_SIZE _SB_MAKE64(21)
+#define M_DMA_HDR_SIZE _SB_MAKEMASK(9, S_DMA_HDR_SIZE)
+#define V_DMA_HDR_SIZE(x) _SB_MAKEVALUE(x, S_DMA_HDR_SIZE)
+#define G_DMA_HDR_SIZE(x) _SB_GETVALUE(x, S_DMA_HDR_SIZE, M_DMA_HDR_SIZE)
+
+#define M_DMA_MBZ2 _SB_MAKEMASK(5, 32)
+
+#define S_DMA_ASICXFR_SIZE _SB_MAKE64(37)
+#define M_DMA_ASICXFR_SIZE _SB_MAKEMASK(9, S_DMA_ASICXFR_SIZE)
+#define V_DMA_ASICXFR_SIZE(x) _SB_MAKEVALUE(x, S_DMA_ASICXFR_SIZE)
+#define G_DMA_ASICXFR_SIZE(x) _SB_GETVALUE(x, S_DMA_ASICXFR_SIZE, M_DMA_ASICXFR_SIZE)
+
+#define S_DMA_INT_TIMEOUT _SB_MAKE64(48)
+#define M_DMA_INT_TIMEOUT _SB_MAKEMASK(16, S_DMA_INT_TIMEOUT)
+#define V_DMA_INT_TIMEOUT(x) _SB_MAKEVALUE(x, S_DMA_INT_TIMEOUT)
+#define G_DMA_INT_TIMEOUT(x) _SB_GETVALUE(x, S_DMA_INT_TIMEOUT, M_DMA_INT_TIMEOUT)
+
+/*
+ * Ethernet and Serial DMA Descriptor base address (Table 7-6)
+ */
+
+#define M_DMA_DSCRBASE_MBZ _SB_MAKEMASK(4, 0)
+
+
+/*
+ * ASIC Mode Base Address (Table 7-7)
+ */
+
+#define M_DMA_ASIC_BASE_MBZ _SB_MAKEMASK(20, 0)
+
+/*
+ * DMA Descriptor Count Registers (Table 7-8)
+ */
+
+/* No bitfields */
+
+
+/*
+ * Current Descriptor Address Register (Table 7-11)
+ */
+
+#define S_DMA_CURDSCR_ADDR _SB_MAKE64(0)
+#define M_DMA_CURDSCR_ADDR _SB_MAKEMASK(40, S_DMA_CURDSCR_ADDR)
+#define S_DMA_CURDSCR_COUNT _SB_MAKE64(40)
+#define M_DMA_CURDSCR_COUNT _SB_MAKEMASK(16, S_DMA_CURDSCR_COUNT)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_DMA_TX_CH_PAUSE_ON _SB_MAKEMASK1(56)
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+/*
+ * Receive Packet Drop Registers
+ */
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define S_DMA_OODLOST_RX _SB_MAKE64(0)
+#define M_DMA_OODLOST_RX _SB_MAKEMASK(16, S_DMA_OODLOST_RX)
+#define G_DMA_OODLOST_RX(x) _SB_GETVALUE(x, S_DMA_OODLOST_RX, M_DMA_OODLOST_RX)
+
+#define S_DMA_EOP_COUNT_RX _SB_MAKE64(16)
+#define M_DMA_EOP_COUNT_RX _SB_MAKEMASK(8, S_DMA_EOP_COUNT_RX)
+#define G_DMA_EOP_COUNT_RX(x) _SB_GETVALUE(x, S_DMA_EOP_COUNT_RX, M_DMA_EOP_COUNT_RX)
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+/* *********************************************************************
+ * DMA Descriptors
+ ********************************************************************* */
+
+/*
+ * Descriptor doubleword "A" (Table 7-12)
+ */
+
+#define S_DMA_DSCRA_OFFSET _SB_MAKE64(0)
+#define M_DMA_DSCRA_OFFSET _SB_MAKEMASK(5, S_DMA_DSCRA_OFFSET)
+#define V_DMA_DSCRA_OFFSET(x) _SB_MAKEVALUE(x, S_DMA_DSCRA_OFFSET)
+#define G_DMA_DSCRA_OFFSET(x) _SB_GETVALUE(x, S_DMA_DSCRA_OFFSET, M_DMA_DSCRA_OFFSET)
+
+/* Note: Don't shift the address over, just mask it with the mask below */
+#define S_DMA_DSCRA_A_ADDR _SB_MAKE64(5)
+#define M_DMA_DSCRA_A_ADDR _SB_MAKEMASK(35, S_DMA_DSCRA_A_ADDR)
+
+#define M_DMA_DSCRA_A_ADDR_OFFSET (M_DMA_DSCRA_OFFSET | M_DMA_DSCRA_A_ADDR)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define S_DMA_DSCRA_A_ADDR_UA _SB_MAKE64(0)
+#define M_DMA_DSCRA_A_ADDR_UA _SB_MAKEMASK(40, S_DMA_DSCRA_A_ADDR_UA)
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+#define S_DMA_DSCRA_A_SIZE _SB_MAKE64(40)
+#define M_DMA_DSCRA_A_SIZE _SB_MAKEMASK(9, S_DMA_DSCRA_A_SIZE)
+#define V_DMA_DSCRA_A_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRA_A_SIZE)
+#define G_DMA_DSCRA_A_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRA_A_SIZE, M_DMA_DSCRA_A_SIZE)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define S_DMA_DSCRA_DSCR_CNT _SB_MAKE64(40)
+#define M_DMA_DSCRA_DSCR_CNT _SB_MAKEMASK(8, S_DMA_DSCRA_DSCR_CNT)
+#define G_DMA_DSCRA_DSCR_CNT(x) _SB_GETVALUE(x, S_DMA_DSCRA_DSCR_CNT, M_DMA_DSCRA_DSCR_CNT)
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+#define M_DMA_DSCRA_INTERRUPT _SB_MAKEMASK1(49)
+#define M_DMA_DSCRA_OFFSETB _SB_MAKEMASK1(50)
+
+#define S_DMA_DSCRA_STATUS _SB_MAKE64(51)
+#define M_DMA_DSCRA_STATUS _SB_MAKEMASK(13, S_DMA_DSCRA_STATUS)
+#define V_DMA_DSCRA_STATUS(x) _SB_MAKEVALUE(x, S_DMA_DSCRA_STATUS)
+#define G_DMA_DSCRA_STATUS(x) _SB_GETVALUE(x, S_DMA_DSCRA_STATUS, M_DMA_DSCRA_STATUS)
+
+/*
+ * Descriptor doubleword "B" (Table 7-13)
+ */
+
+
+#define S_DMA_DSCRB_OPTIONS _SB_MAKE64(0)
+#define M_DMA_DSCRB_OPTIONS _SB_MAKEMASK(4, S_DMA_DSCRB_OPTIONS)
+#define V_DMA_DSCRB_OPTIONS(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_OPTIONS)
+#define G_DMA_DSCRB_OPTIONS(x) _SB_GETVALUE(x, S_DMA_DSCRB_OPTIONS, M_DMA_DSCRB_OPTIONS)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define S_DMA_DSCRB_A_SIZE _SB_MAKE64(8)
+#define M_DMA_DSCRB_A_SIZE _SB_MAKEMASK(14, S_DMA_DSCRB_A_SIZE)
+#define V_DMA_DSCRB_A_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_A_SIZE)
+#define G_DMA_DSCRB_A_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRB_A_SIZE, M_DMA_DSCRB_A_SIZE)
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+#define R_DMA_DSCRB_ADDR _SB_MAKE64(0x10)
+
+/* Note: Don't shift the address over, just mask it with the mask below */
+#define S_DMA_DSCRB_B_ADDR _SB_MAKE64(5)
+#define M_DMA_DSCRB_B_ADDR _SB_MAKEMASK(35, S_DMA_DSCRB_B_ADDR)
+
+#define S_DMA_DSCRB_B_SIZE _SB_MAKE64(40)
+#define M_DMA_DSCRB_B_SIZE _SB_MAKEMASK(9, S_DMA_DSCRB_B_SIZE)
+#define V_DMA_DSCRB_B_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_B_SIZE)
+#define G_DMA_DSCRB_B_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRB_B_SIZE, M_DMA_DSCRB_B_SIZE)
+
+#define M_DMA_DSCRB_B_VALID _SB_MAKEMASK1(49)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define S_DMA_DSCRB_PKT_SIZE_MSB _SB_MAKE64(48)
+#define M_DMA_DSCRB_PKT_SIZE_MSB _SB_MAKEMASK(2, S_DMA_DSCRB_PKT_SIZE_MSB)
+#define V_DMA_DSCRB_PKT_SIZE_MSB(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_PKT_SIZE_MSB)
+#define G_DMA_DSCRB_PKT_SIZE_MSB(x) _SB_GETVALUE(x, S_DMA_DSCRB_PKT_SIZE_MSB, M_DMA_DSCRB_PKT_SIZE_MSB)
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+#define S_DMA_DSCRB_PKT_SIZE _SB_MAKE64(50)
+#define M_DMA_DSCRB_PKT_SIZE _SB_MAKEMASK(14, S_DMA_DSCRB_PKT_SIZE)
+#define V_DMA_DSCRB_PKT_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_PKT_SIZE)
+#define G_DMA_DSCRB_PKT_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRB_PKT_SIZE, M_DMA_DSCRB_PKT_SIZE)
+
+/*
+ * from pass2 some bits in dscr_b are also used for rx status
+ */
+#define S_DMA_DSCRB_STATUS _SB_MAKE64(0)
+#define M_DMA_DSCRB_STATUS _SB_MAKEMASK(1, S_DMA_DSCRB_STATUS)
+#define V_DMA_DSCRB_STATUS(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_STATUS)
+#define G_DMA_DSCRB_STATUS(x) _SB_GETVALUE(x, S_DMA_DSCRB_STATUS, M_DMA_DSCRB_STATUS)
+
+/*
+ * Ethernet Descriptor Status Bits (Table 7-15)
+ */
+
+#define M_DMA_ETHRX_BADIP4CS _SB_MAKEMASK1(51)
+#define M_DMA_ETHRX_DSCRERR _SB_MAKEMASK1(52)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+/* Note: This bit is in the DSCR_B options field */
+#define M_DMA_ETHRX_BADTCPCS _SB_MAKEMASK1(0)
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+/* Note: These bits are in the DSCR_B options field */
+#define M_DMA_ETH_VLAN_FLAG _SB_MAKEMASK1(1)
+#define M_DMA_ETH_CRC_FLAG _SB_MAKEMASK1(2)
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+#define S_DMA_ETHRX_RXCH 53
+#define M_DMA_ETHRX_RXCH _SB_MAKEMASK(2, S_DMA_ETHRX_RXCH)
+#define V_DMA_ETHRX_RXCH(x) _SB_MAKEVALUE(x, S_DMA_ETHRX_RXCH)
+#define G_DMA_ETHRX_RXCH(x) _SB_GETVALUE(x, S_DMA_ETHRX_RXCH, M_DMA_ETHRX_RXCH)
+
+#define S_DMA_ETHRX_PKTTYPE 55
+#define M_DMA_ETHRX_PKTTYPE _SB_MAKEMASK(3, S_DMA_ETHRX_PKTTYPE)
+#define V_DMA_ETHRX_PKTTYPE(x) _SB_MAKEVALUE(x, S_DMA_ETHRX_PKTTYPE)
+#define G_DMA_ETHRX_PKTTYPE(x) _SB_GETVALUE(x, S_DMA_ETHRX_PKTTYPE, M_DMA_ETHRX_PKTTYPE)
+
+#define K_DMA_ETHRX_PKTTYPE_IPV4 0
+#define K_DMA_ETHRX_PKTTYPE_ARPV4 1
+#define K_DMA_ETHRX_PKTTYPE_802 2
+#define K_DMA_ETHRX_PKTTYPE_OTHER 3
+#define K_DMA_ETHRX_PKTTYPE_USER0 4
+#define K_DMA_ETHRX_PKTTYPE_USER1 5
+#define K_DMA_ETHRX_PKTTYPE_USER2 6
+#define K_DMA_ETHRX_PKTTYPE_USER3 7
+
+#define M_DMA_ETHRX_MATCH_HASH _SB_MAKEMASK1(58)
+#define M_DMA_ETHRX_MATCH_EXACT _SB_MAKEMASK1(59)
+#define M_DMA_ETHRX_BCAST _SB_MAKEMASK1(60)
+#define M_DMA_ETHRX_MCAST _SB_MAKEMASK1(61)
+#define M_DMA_ETHRX_BAD _SB_MAKEMASK1(62)
+#define M_DMA_ETHRX_SOP _SB_MAKEMASK1(63)
+
+/*
+ * Ethernet Transmit Status Bits (Table 7-16)
+ */
+
+#define M_DMA_ETHTX_SOP _SB_MAKEMASK1(63)
+
+/*
+ * Ethernet Transmit Options (Table 7-17)
+ */
+
+#define K_DMA_ETHTX_NOTSOP _SB_MAKE64(0x00)
+#define K_DMA_ETHTX_APPENDCRC _SB_MAKE64(0x01)
+#define K_DMA_ETHTX_REPLACECRC _SB_MAKE64(0x02)
+#define K_DMA_ETHTX_APPENDCRC_APPENDPAD _SB_MAKE64(0x03)
+#define K_DMA_ETHTX_APPENDVLAN_REPLACECRC _SB_MAKE64(0x04)
+#define K_DMA_ETHTX_REMOVEVLAN_REPLACECRC _SB_MAKE64(0x05)
+#define K_DMA_ETHTX_REPLACEVLAN_REPLACECRC _SB_MAKE64(0x6)
+#define K_DMA_ETHTX_NOMODS _SB_MAKE64(0x07)
+#define K_DMA_ETHTX_RESERVED1 _SB_MAKE64(0x08)
+#define K_DMA_ETHTX_REPLACESADDR_APPENDCRC _SB_MAKE64(0x09)
+#define K_DMA_ETHTX_REPLACESADDR_REPLACECRC _SB_MAKE64(0x0A)
+#define K_DMA_ETHTX_REPLACESADDR_APPENDCRC_APPENDPAD _SB_MAKE64(0x0B)
+#define K_DMA_ETHTX_REPLACESADDR_APPENDVLAN_REPLACECRC _SB_MAKE64(0x0C)
+#define K_DMA_ETHTX_REPLACESADDR_REMOVEVLAN_REPLACECRC _SB_MAKE64(0x0D)
+#define K_DMA_ETHTX_REPLACESADDR_REPLACEVLAN_REPLACECRC _SB_MAKE64(0x0E)
+#define K_DMA_ETHTX_RESERVED2 _SB_MAKE64(0x0F)
+
+/*
+ * Serial Receive Options (Table 7-18)
+ */
+#define M_DMA_SERRX_CRC_ERROR _SB_MAKEMASK1(56)
+#define M_DMA_SERRX_ABORT _SB_MAKEMASK1(57)
+#define M_DMA_SERRX_OCTET_ERROR _SB_MAKEMASK1(58)
+#define M_DMA_SERRX_LONGFRAME_ERROR _SB_MAKEMASK1(59)
+#define M_DMA_SERRX_SHORTFRAME_ERROR _SB_MAKEMASK1(60)
+#define M_DMA_SERRX_OVERRUN_ERROR _SB_MAKEMASK1(61)
+#define M_DMA_SERRX_GOOD _SB_MAKEMASK1(62)
+#define M_DMA_SERRX_SOP _SB_MAKEMASK1(63)
+
+/*
+ * Serial Transmit Status Bits (Table 7-20)
+ */
+
+#define M_DMA_SERTX_FLAG _SB_MAKEMASK1(63)
+
+/*
+ * Serial Transmit Options (Table 7-21)
+ */
+
+#define K_DMA_SERTX_RESERVED _SB_MAKEMASK1(0)
+#define K_DMA_SERTX_APPENDCRC _SB_MAKEMASK1(1)
+#define K_DMA_SERTX_APPENDPAD _SB_MAKEMASK1(2)
+#define K_DMA_SERTX_ABORT _SB_MAKEMASK1(3)
+
+
+/* *********************************************************************
+ * Data Mover Registers
+ ********************************************************************* */
+
+/*
+ * Data Mover Descriptor Base Address Register (Table 7-22)
+ * Register: DM_DSCR_BASE_0
+ * Register: DM_DSCR_BASE_1
+ * Register: DM_DSCR_BASE_2
+ * Register: DM_DSCR_BASE_3
+ */
+
+#define M_DM_DSCR_BASE_MBZ _SB_MAKEMASK(4, 0)
+
+/* Note: Just mask the base address and then OR it in. */
+#define S_DM_DSCR_BASE_ADDR _SB_MAKE64(4)
+#define M_DM_DSCR_BASE_ADDR _SB_MAKEMASK(36, S_DM_DSCR_BASE_ADDR)
+
+#define S_DM_DSCR_BASE_RINGSZ _SB_MAKE64(40)
+#define M_DM_DSCR_BASE_RINGSZ _SB_MAKEMASK(16, S_DM_DSCR_BASE_RINGSZ)
+#define V_DM_DSCR_BASE_RINGSZ(x) _SB_MAKEVALUE(x, S_DM_DSCR_BASE_RINGSZ)
+#define G_DM_DSCR_BASE_RINGSZ(x) _SB_GETVALUE(x, S_DM_DSCR_BASE_RINGSZ, M_DM_DSCR_BASE_RINGSZ)
+
+#define S_DM_DSCR_BASE_PRIORITY _SB_MAKE64(56)
+#define M_DM_DSCR_BASE_PRIORITY _SB_MAKEMASK(3, S_DM_DSCR_BASE_PRIORITY)
+#define V_DM_DSCR_BASE_PRIORITY(x) _SB_MAKEVALUE(x, S_DM_DSCR_BASE_PRIORITY)
+#define G_DM_DSCR_BASE_PRIORITY(x) _SB_GETVALUE(x, S_DM_DSCR_BASE_PRIORITY, M_DM_DSCR_BASE_PRIORITY)
+
+#define K_DM_DSCR_BASE_PRIORITY_1 0
+#define K_DM_DSCR_BASE_PRIORITY_2 1
+#define K_DM_DSCR_BASE_PRIORITY_4 2
+#define K_DM_DSCR_BASE_PRIORITY_8 3
+#define K_DM_DSCR_BASE_PRIORITY_16 4
+
+#define M_DM_DSCR_BASE_ACTIVE _SB_MAKEMASK1(59)
+#define M_DM_DSCR_BASE_INTERRUPT _SB_MAKEMASK1(60)
+#define M_DM_DSCR_BASE_RESET _SB_MAKEMASK1(61) /* write register */
+#define M_DM_DSCR_BASE_ERROR _SB_MAKEMASK1(61) /* read register */
+#define M_DM_DSCR_BASE_ABORT _SB_MAKEMASK1(62)
+#define M_DM_DSCR_BASE_ENABL _SB_MAKEMASK1(63)
+
+/*
+ * Data Mover Descriptor Count Register (Table 7-25)
+ */
+
+/* no bitfields */
+
+/*
+ * Data Mover Current Descriptor Address (Table 7-24)
+ * Register: DM_CUR_DSCR_ADDR_0
+ * Register: DM_CUR_DSCR_ADDR_1
+ * Register: DM_CUR_DSCR_ADDR_2
+ * Register: DM_CUR_DSCR_ADDR_3
+ */
+
+#define S_DM_CUR_DSCR_DSCR_ADDR _SB_MAKE64(0)
+#define M_DM_CUR_DSCR_DSCR_ADDR _SB_MAKEMASK(40, S_DM_CUR_DSCR_DSCR_ADDR)
+
+#define S_DM_CUR_DSCR_DSCR_COUNT _SB_MAKE64(48)
+#define M_DM_CUR_DSCR_DSCR_COUNT _SB_MAKEMASK(16, S_DM_CUR_DSCR_DSCR_COUNT)
+#define V_DM_CUR_DSCR_DSCR_COUNT(r) _SB_MAKEVALUE(r, S_DM_CUR_DSCR_DSCR_COUNT)
+#define G_DM_CUR_DSCR_DSCR_COUNT(r) _SB_GETVALUE(r, S_DM_CUR_DSCR_DSCR_COUNT,\
+ M_DM_CUR_DSCR_DSCR_COUNT)
+
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+/*
+ * Data Mover Channel Partial Result Registers
+ * Register: DM_PARTIAL_0
+ * Register: DM_PARTIAL_1
+ * Register: DM_PARTIAL_2
+ * Register: DM_PARTIAL_3
+ */
+#define S_DM_PARTIAL_CRC_PARTIAL _SB_MAKE64(0)
+#define M_DM_PARTIAL_CRC_PARTIAL _SB_MAKEMASK(32, S_DM_PARTIAL_CRC_PARTIAL)
+#define V_DM_PARTIAL_CRC_PARTIAL(r) _SB_MAKEVALUE(r, S_DM_PARTIAL_CRC_PARTIAL)
+#define G_DM_PARTIAL_CRC_PARTIAL(r) _SB_GETVALUE(r, S_DM_PARTIAL_CRC_PARTIAL,\
+ M_DM_PARTIAL_CRC_PARTIAL)
+
+#define S_DM_PARTIAL_TCPCS_PARTIAL _SB_MAKE64(32)
+#define M_DM_PARTIAL_TCPCS_PARTIAL _SB_MAKEMASK(16, S_DM_PARTIAL_TCPCS_PARTIAL)
+#define V_DM_PARTIAL_TCPCS_PARTIAL(r) _SB_MAKEVALUE(r, S_DM_PARTIAL_TCPCS_PARTIAL)
+#define G_DM_PARTIAL_TCPCS_PARTIAL(r) _SB_GETVALUE(r, S_DM_PARTIAL_TCPCS_PARTIAL,\
+ M_DM_PARTIAL_TCPCS_PARTIAL)
+
+#define M_DM_PARTIAL_ODD_BYTE _SB_MAKEMASK1(48)
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+/*
+ * Data Mover CRC Definition Registers
+ * Register: CRC_DEF_0
+ * Register: CRC_DEF_1
+ */
+#define S_CRC_DEF_CRC_INIT _SB_MAKE64(0)
+#define M_CRC_DEF_CRC_INIT _SB_MAKEMASK(32, S_CRC_DEF_CRC_INIT)
+#define V_CRC_DEF_CRC_INIT(r) _SB_MAKEVALUE(r, S_CRC_DEF_CRC_INIT)
+#define G_CRC_DEF_CRC_INIT(r) _SB_GETVALUE(r, S_CRC_DEF_CRC_INIT,\
+ M_CRC_DEF_CRC_INIT)
+
+#define S_CRC_DEF_CRC_POLY _SB_MAKE64(32)
+#define M_CRC_DEF_CRC_POLY _SB_MAKEMASK(32, S_CRC_DEF_CRC_POLY)
+#define V_CRC_DEF_CRC_POLY(r) _SB_MAKEVALUE(r, S_CRC_DEF_CRC_POLY)
+#define G_CRC_DEF_CRC_POLY(r) _SB_GETVALUE(r, S_CRC_DEF_CRC_POLY,\
+ M_CRC_DEF_CRC_POLY)
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+/*
+ * Data Mover CRC/Checksum Definition Registers
+ * Register: CTCP_DEF_0
+ * Register: CTCP_DEF_1
+ */
+#define S_CTCP_DEF_CRC_TXOR _SB_MAKE64(0)
+#define M_CTCP_DEF_CRC_TXOR _SB_MAKEMASK(32, S_CTCP_DEF_CRC_TXOR)
+#define V_CTCP_DEF_CRC_TXOR(r) _SB_MAKEVALUE(r, S_CTCP_DEF_CRC_TXOR)
+#define G_CTCP_DEF_CRC_TXOR(r) _SB_GETVALUE(r, S_CTCP_DEF_CRC_TXOR,\
+ M_CTCP_DEF_CRC_TXOR)
+
+#define S_CTCP_DEF_TCPCS_INIT _SB_MAKE64(32)
+#define M_CTCP_DEF_TCPCS_INIT _SB_MAKEMASK(16, S_CTCP_DEF_TCPCS_INIT)
+#define V_CTCP_DEF_TCPCS_INIT(r) _SB_MAKEVALUE(r, S_CTCP_DEF_TCPCS_INIT)
+#define G_CTCP_DEF_TCPCS_INIT(r) _SB_GETVALUE(r, S_CTCP_DEF_TCPCS_INIT,\
+ M_CTCP_DEF_TCPCS_INIT)
+
+#define S_CTCP_DEF_CRC_WIDTH _SB_MAKE64(48)
+#define M_CTCP_DEF_CRC_WIDTH _SB_MAKEMASK(2, S_CTCP_DEF_CRC_WIDTH)
+#define V_CTCP_DEF_CRC_WIDTH(r) _SB_MAKEVALUE(r, S_CTCP_DEF_CRC_WIDTH)
+#define G_CTCP_DEF_CRC_WIDTH(r) _SB_GETVALUE(r, S_CTCP_DEF_CRC_WIDTH,\
+ M_CTCP_DEF_CRC_WIDTH)
+
+#define K_CTCP_DEF_CRC_WIDTH_4 0
+#define K_CTCP_DEF_CRC_WIDTH_2 1
+#define K_CTCP_DEF_CRC_WIDTH_1 2
+
+#define M_CTCP_DEF_CRC_BIT_ORDER _SB_MAKEMASK1(50)
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+
+/*
+ * Data Mover Descriptor Doubleword "A" (Table 7-26)
+ */
+
+#define S_DM_DSCRA_DST_ADDR _SB_MAKE64(0)
+#define M_DM_DSCRA_DST_ADDR _SB_MAKEMASK(40, S_DM_DSCRA_DST_ADDR)
+
+#define M_DM_DSCRA_UN_DEST _SB_MAKEMASK1(40)
+#define M_DM_DSCRA_UN_SRC _SB_MAKEMASK1(41)
+#define M_DM_DSCRA_INTERRUPT _SB_MAKEMASK1(42)
+#if SIBYTE_HDR_FEATURE_UP_TO(1250, PASS1)
+#define M_DM_DSCRA_THROTTLE _SB_MAKEMASK1(43)
+#endif /* up to 1250 PASS1 */
+
+#define S_DM_DSCRA_DIR_DEST _SB_MAKE64(44)
+#define M_DM_DSCRA_DIR_DEST _SB_MAKEMASK(2, S_DM_DSCRA_DIR_DEST)
+#define V_DM_DSCRA_DIR_DEST(x) _SB_MAKEVALUE(x, S_DM_DSCRA_DIR_DEST)
+#define G_DM_DSCRA_DIR_DEST(x) _SB_GETVALUE(x, S_DM_DSCRA_DIR_DEST, M_DM_DSCRA_DIR_DEST)
+
+#define K_DM_DSCRA_DIR_DEST_INCR 0
+#define K_DM_DSCRA_DIR_DEST_DECR 1
+#define K_DM_DSCRA_DIR_DEST_CONST 2
+
+#define V_DM_DSCRA_DIR_DEST_INCR _SB_MAKEVALUE(K_DM_DSCRA_DIR_DEST_INCR, S_DM_DSCRA_DIR_DEST)
+#define V_DM_DSCRA_DIR_DEST_DECR _SB_MAKEVALUE(K_DM_DSCRA_DIR_DEST_DECR, S_DM_DSCRA_DIR_DEST)
+#define V_DM_DSCRA_DIR_DEST_CONST _SB_MAKEVALUE(K_DM_DSCRA_DIR_DEST_CONST, S_DM_DSCRA_DIR_DEST)
+
+#define S_DM_DSCRA_DIR_SRC _SB_MAKE64(46)
+#define M_DM_DSCRA_DIR_SRC _SB_MAKEMASK(2, S_DM_DSCRA_DIR_SRC)
+#define V_DM_DSCRA_DIR_SRC(x) _SB_MAKEVALUE(x, S_DM_DSCRA_DIR_SRC)
+#define G_DM_DSCRA_DIR_SRC(x) _SB_GETVALUE(x, S_DM_DSCRA_DIR_SRC, M_DM_DSCRA_DIR_SRC)
+
+#define K_DM_DSCRA_DIR_SRC_INCR 0
+#define K_DM_DSCRA_DIR_SRC_DECR 1
+#define K_DM_DSCRA_DIR_SRC_CONST 2
+
+#define V_DM_DSCRA_DIR_SRC_INCR _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_INCR, S_DM_DSCRA_DIR_SRC)
+#define V_DM_DSCRA_DIR_SRC_DECR _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_DECR, S_DM_DSCRA_DIR_SRC)
+#define V_DM_DSCRA_DIR_SRC_CONST _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_CONST, S_DM_DSCRA_DIR_SRC)
+
+
+#define M_DM_DSCRA_ZERO_MEM _SB_MAKEMASK1(48)
+#define M_DM_DSCRA_PREFETCH _SB_MAKEMASK1(49)
+#define M_DM_DSCRA_L2C_DEST _SB_MAKEMASK1(50)
+#define M_DM_DSCRA_L2C_SRC _SB_MAKEMASK1(51)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_DM_DSCRA_RD_BKOFF _SB_MAKEMASK1(52)
+#define M_DM_DSCRA_WR_BKOFF _SB_MAKEMASK1(53)
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_DM_DSCRA_TCPCS_EN _SB_MAKEMASK1(54)
+#define M_DM_DSCRA_TCPCS_RES _SB_MAKEMASK1(55)
+#define M_DM_DSCRA_TCPCS_AP _SB_MAKEMASK1(56)
+#define M_DM_DSCRA_CRC_EN _SB_MAKEMASK1(57)
+#define M_DM_DSCRA_CRC_RES _SB_MAKEMASK1(58)
+#define M_DM_DSCRA_CRC_AP _SB_MAKEMASK1(59)
+#define M_DM_DSCRA_CRC_DFN _SB_MAKEMASK1(60)
+#define M_DM_DSCRA_CRC_XBIT _SB_MAKEMASK1(61)
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+#define M_DM_DSCRA_RESERVED2 _SB_MAKEMASK(3, 61)
+
+/*
+ * Data Mover Descriptor Doubleword "B" (Table 7-25)
+ */
+
+#define S_DM_DSCRB_SRC_ADDR _SB_MAKE64(0)
+#define M_DM_DSCRB_SRC_ADDR _SB_MAKEMASK(40, S_DM_DSCRB_SRC_ADDR)
+
+#define S_DM_DSCRB_SRC_LENGTH _SB_MAKE64(40)
+#define M_DM_DSCRB_SRC_LENGTH _SB_MAKEMASK(20, S_DM_DSCRB_SRC_LENGTH)
+#define V_DM_DSCRB_SRC_LENGTH(x) _SB_MAKEVALUE(x, S_DM_DSCRB_SRC_LENGTH)
+#define G_DM_DSCRB_SRC_LENGTH(x) _SB_GETVALUE(x, S_DM_DSCRB_SRC_LENGTH, M_DM_DSCRB_SRC_LENGTH)
+
+
+#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_genbus.h b/arch/mips/include/asm/sibyte/sb1250_genbus.h
new file mode 100644
index 000000000..ddeb8edf5
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/sb1250_genbus.h
@@ -0,0 +1,461 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * SB1250 Board Support Package
+ *
+ * Generic Bus Constants File: sb1250_genbus.h
+ *
+ * This module contains constants and macros useful for
+ * manipulating the SB1250's Generic Bus interface
+ *
+ * SB1250 specification level: User's manual 10/21/02
+ * BCM1280 specification level: User's Manual 11/14/03
+ *
+ *********************************************************************
+ *
+ * Copyright 2000, 2001, 2002, 2003
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+
+#ifndef _SB1250_GENBUS_H
+#define _SB1250_GENBUS_H
+
+#include <asm/sibyte/sb1250_defs.h>
+
+/*
+ * Generic Bus Region Configuration Registers (Table 11-4)
+ */
+
+#define S_IO_RDY_ACTIVE 0
+#define M_IO_RDY_ACTIVE _SB_MAKEMASK1(S_IO_RDY_ACTIVE)
+
+#define S_IO_ENA_RDY 1
+#define M_IO_ENA_RDY _SB_MAKEMASK1(S_IO_ENA_RDY)
+
+#define S_IO_WIDTH_SEL 2
+#define M_IO_WIDTH_SEL _SB_MAKEMASK(2, S_IO_WIDTH_SEL)
+#define K_IO_WIDTH_SEL_1 0
+#define K_IO_WIDTH_SEL_2 1
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) \
+ || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define K_IO_WIDTH_SEL_1L 2
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+#define K_IO_WIDTH_SEL_4 3
+#define V_IO_WIDTH_SEL(x) _SB_MAKEVALUE(x, S_IO_WIDTH_SEL)
+#define G_IO_WIDTH_SEL(x) _SB_GETVALUE(x, S_IO_WIDTH_SEL, M_IO_WIDTH_SEL)
+
+#define S_IO_PARITY_ENA 4
+#define M_IO_PARITY_ENA _SB_MAKEMASK1(S_IO_PARITY_ENA)
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) \
+ || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define S_IO_BURST_EN 5
+#define M_IO_BURST_EN _SB_MAKEMASK1(S_IO_BURST_EN)
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+#define S_IO_PARITY_ODD 6
+#define M_IO_PARITY_ODD _SB_MAKEMASK1(S_IO_PARITY_ODD)
+#define S_IO_NONMUX 7
+#define M_IO_NONMUX _SB_MAKEMASK1(S_IO_NONMUX)
+
+#define S_IO_TIMEOUT 8
+#define M_IO_TIMEOUT _SB_MAKEMASK(8, S_IO_TIMEOUT)
+#define V_IO_TIMEOUT(x) _SB_MAKEVALUE(x, S_IO_TIMEOUT)
+#define G_IO_TIMEOUT(x) _SB_GETVALUE(x, S_IO_TIMEOUT, M_IO_TIMEOUT)
+
+/*
+ * Generic Bus Region Size register (Table 11-5)
+ */
+
+#define S_IO_MULT_SIZE 0
+#define M_IO_MULT_SIZE _SB_MAKEMASK(12, S_IO_MULT_SIZE)
+#define V_IO_MULT_SIZE(x) _SB_MAKEVALUE(x, S_IO_MULT_SIZE)
+#define G_IO_MULT_SIZE(x) _SB_GETVALUE(x, S_IO_MULT_SIZE, M_IO_MULT_SIZE)
+
+#define S_IO_REGSIZE 16 /* # bits to shift size for this reg */
+
+/*
+ * Generic Bus Region Address (Table 11-6)
+ */
+
+#define S_IO_START_ADDR 0
+#define M_IO_START_ADDR _SB_MAKEMASK(14, S_IO_START_ADDR)
+#define V_IO_START_ADDR(x) _SB_MAKEVALUE(x, S_IO_START_ADDR)
+#define G_IO_START_ADDR(x) _SB_GETVALUE(x, S_IO_START_ADDR, M_IO_START_ADDR)
+
+#define S_IO_ADDRBASE 16 /* # bits to shift addr for this reg */
+
+#define M_IO_BLK_CACHE _SB_MAKEMASK1(15)
+
+
+/*
+ * Generic Bus Timing 0 Registers (Table 11-7)
+ */
+
+#define S_IO_ALE_WIDTH 0
+#define M_IO_ALE_WIDTH _SB_MAKEMASK(3, S_IO_ALE_WIDTH)
+#define V_IO_ALE_WIDTH(x) _SB_MAKEVALUE(x, S_IO_ALE_WIDTH)
+#define G_IO_ALE_WIDTH(x) _SB_GETVALUE(x, S_IO_ALE_WIDTH, M_IO_ALE_WIDTH)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) \
+ || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_IO_EARLY_CS _SB_MAKEMASK1(3)
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+
+#define S_IO_ALE_TO_CS 4
+#define M_IO_ALE_TO_CS _SB_MAKEMASK(2, S_IO_ALE_TO_CS)
+#define V_IO_ALE_TO_CS(x) _SB_MAKEVALUE(x, S_IO_ALE_TO_CS)
+#define G_IO_ALE_TO_CS(x) _SB_GETVALUE(x, S_IO_ALE_TO_CS, M_IO_ALE_TO_CS)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) \
+ || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define S_IO_BURST_WIDTH _SB_MAKE64(6)
+#define M_IO_BURST_WIDTH _SB_MAKEMASK(2, S_IO_BURST_WIDTH)
+#define V_IO_BURST_WIDTH(x) _SB_MAKEVALUE(x, S_IO_BURST_WIDTH)
+#define G_IO_BURST_WIDTH(x) _SB_GETVALUE(x, S_IO_BURST_WIDTH, M_IO_BURST_WIDTH)
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+
+#define S_IO_CS_WIDTH 8
+#define M_IO_CS_WIDTH _SB_MAKEMASK(5, S_IO_CS_WIDTH)
+#define V_IO_CS_WIDTH(x) _SB_MAKEVALUE(x, S_IO_CS_WIDTH)
+#define G_IO_CS_WIDTH(x) _SB_GETVALUE(x, S_IO_CS_WIDTH, M_IO_CS_WIDTH)
+
+#define S_IO_RDY_SMPLE 13
+#define M_IO_RDY_SMPLE _SB_MAKEMASK(3, S_IO_RDY_SMPLE)
+#define V_IO_RDY_SMPLE(x) _SB_MAKEVALUE(x, S_IO_RDY_SMPLE)
+#define G_IO_RDY_SMPLE(x) _SB_GETVALUE(x, S_IO_RDY_SMPLE, M_IO_RDY_SMPLE)
+
+
+/*
+ * Generic Bus Timing 1 Registers (Table 11-8)
+ */
+
+#define S_IO_ALE_TO_WRITE 0
+#define M_IO_ALE_TO_WRITE _SB_MAKEMASK(3, S_IO_ALE_TO_WRITE)
+#define V_IO_ALE_TO_WRITE(x) _SB_MAKEVALUE(x, S_IO_ALE_TO_WRITE)
+#define G_IO_ALE_TO_WRITE(x) _SB_GETVALUE(x, S_IO_ALE_TO_WRITE, M_IO_ALE_TO_WRITE)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) \
+ || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_IO_RDY_SYNC _SB_MAKEMASK1(3)
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+
+#define S_IO_WRITE_WIDTH 4
+#define M_IO_WRITE_WIDTH _SB_MAKEMASK(4, S_IO_WRITE_WIDTH)
+#define V_IO_WRITE_WIDTH(x) _SB_MAKEVALUE(x, S_IO_WRITE_WIDTH)
+#define G_IO_WRITE_WIDTH(x) _SB_GETVALUE(x, S_IO_WRITE_WIDTH, M_IO_WRITE_WIDTH)
+
+#define S_IO_IDLE_CYCLE 8
+#define M_IO_IDLE_CYCLE _SB_MAKEMASK(4, S_IO_IDLE_CYCLE)
+#define V_IO_IDLE_CYCLE(x) _SB_MAKEVALUE(x, S_IO_IDLE_CYCLE)
+#define G_IO_IDLE_CYCLE(x) _SB_GETVALUE(x, S_IO_IDLE_CYCLE, M_IO_IDLE_CYCLE)
+
+#define S_IO_OE_TO_CS 12
+#define M_IO_OE_TO_CS _SB_MAKEMASK(2, S_IO_OE_TO_CS)
+#define V_IO_OE_TO_CS(x) _SB_MAKEVALUE(x, S_IO_OE_TO_CS)
+#define G_IO_OE_TO_CS(x) _SB_GETVALUE(x, S_IO_OE_TO_CS, M_IO_OE_TO_CS)
+
+#define S_IO_CS_TO_OE 14
+#define M_IO_CS_TO_OE _SB_MAKEMASK(2, S_IO_CS_TO_OE)
+#define V_IO_CS_TO_OE(x) _SB_MAKEVALUE(x, S_IO_CS_TO_OE)
+#define G_IO_CS_TO_OE(x) _SB_GETVALUE(x, S_IO_CS_TO_OE, M_IO_CS_TO_OE)
+
+/*
+ * Generic Bus Interrupt Status Register (Table 11-9)
+ */
+
+#define M_IO_CS_ERR_INT _SB_MAKEMASK(0, 8)
+#define M_IO_CS0_ERR_INT _SB_MAKEMASK1(0)
+#define M_IO_CS1_ERR_INT _SB_MAKEMASK1(1)
+#define M_IO_CS2_ERR_INT _SB_MAKEMASK1(2)
+#define M_IO_CS3_ERR_INT _SB_MAKEMASK1(3)
+#define M_IO_CS4_ERR_INT _SB_MAKEMASK1(4)
+#define M_IO_CS5_ERR_INT _SB_MAKEMASK1(5)
+#define M_IO_CS6_ERR_INT _SB_MAKEMASK1(6)
+#define M_IO_CS7_ERR_INT _SB_MAKEMASK1(7)
+
+#define M_IO_RD_PAR_INT _SB_MAKEMASK1(9)
+#define M_IO_TIMEOUT_INT _SB_MAKEMASK1(10)
+#define M_IO_ILL_ADDR_INT _SB_MAKEMASK1(11)
+#define M_IO_MULT_CS_INT _SB_MAKEMASK1(12)
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_IO_COH_ERR _SB_MAKEMASK1(14)
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+
+
+/*
+ * Generic Bus Output Drive Control Register 0 (Table 14-18)
+ */
+
+#define S_IO_SLEW0 0
+#define M_IO_SLEW0 _SB_MAKEMASK(2, S_IO_SLEW0)
+#define V_IO_SLEW0(x) _SB_MAKEVALUE(x, S_IO_SLEW0)
+#define G_IO_SLEW0(x) _SB_GETVALUE(x, S_IO_SLEW0, M_IO_SLEW0)
+
+#define S_IO_DRV_A 2
+#define M_IO_DRV_A _SB_MAKEMASK(2, S_IO_DRV_A)
+#define V_IO_DRV_A(x) _SB_MAKEVALUE(x, S_IO_DRV_A)
+#define G_IO_DRV_A(x) _SB_GETVALUE(x, S_IO_DRV_A, M_IO_DRV_A)
+
+#define S_IO_DRV_B 6
+#define M_IO_DRV_B _SB_MAKEMASK(2, S_IO_DRV_B)
+#define V_IO_DRV_B(x) _SB_MAKEVALUE(x, S_IO_DRV_B)
+#define G_IO_DRV_B(x) _SB_GETVALUE(x, S_IO_DRV_B, M_IO_DRV_B)
+
+#define S_IO_DRV_C 10
+#define M_IO_DRV_C _SB_MAKEMASK(2, S_IO_DRV_C)
+#define V_IO_DRV_C(x) _SB_MAKEVALUE(x, S_IO_DRV_C)
+#define G_IO_DRV_C(x) _SB_GETVALUE(x, S_IO_DRV_C, M_IO_DRV_C)
+
+#define S_IO_DRV_D 14
+#define M_IO_DRV_D _SB_MAKEMASK(2, S_IO_DRV_D)
+#define V_IO_DRV_D(x) _SB_MAKEVALUE(x, S_IO_DRV_D)
+#define G_IO_DRV_D(x) _SB_GETVALUE(x, S_IO_DRV_D, M_IO_DRV_D)
+
+/*
+ * Generic Bus Output Drive Control Register 1 (Table 14-19)
+ */
+
+#define S_IO_DRV_E 2
+#define M_IO_DRV_E _SB_MAKEMASK(2, S_IO_DRV_E)
+#define V_IO_DRV_E(x) _SB_MAKEVALUE(x, S_IO_DRV_E)
+#define G_IO_DRV_E(x) _SB_GETVALUE(x, S_IO_DRV_E, M_IO_DRV_E)
+
+#define S_IO_DRV_F 6
+#define M_IO_DRV_F _SB_MAKEMASK(2, S_IO_DRV_F)
+#define V_IO_DRV_F(x) _SB_MAKEVALUE(x, S_IO_DRV_F)
+#define G_IO_DRV_F(x) _SB_GETVALUE(x, S_IO_DRV_F, M_IO_DRV_F)
+
+#define S_IO_SLEW1 8
+#define M_IO_SLEW1 _SB_MAKEMASK(2, S_IO_SLEW1)
+#define V_IO_SLEW1(x) _SB_MAKEVALUE(x, S_IO_SLEW1)
+#define G_IO_SLEW1(x) _SB_GETVALUE(x, S_IO_SLEW1, M_IO_SLEW1)
+
+#define S_IO_DRV_G 10
+#define M_IO_DRV_G _SB_MAKEMASK(2, S_IO_DRV_G)
+#define V_IO_DRV_G(x) _SB_MAKEVALUE(x, S_IO_DRV_G)
+#define G_IO_DRV_G(x) _SB_GETVALUE(x, S_IO_DRV_G, M_IO_DRV_G)
+
+#define S_IO_SLEW2 12
+#define M_IO_SLEW2 _SB_MAKEMASK(2, S_IO_SLEW2)
+#define V_IO_SLEW2(x) _SB_MAKEVALUE(x, S_IO_SLEW2)
+#define G_IO_SLEW2(x) _SB_GETVALUE(x, S_IO_SLEW2, M_IO_SLEW2)
+
+#define S_IO_DRV_H 14
+#define M_IO_DRV_H _SB_MAKEMASK(2, S_IO_DRV_H)
+#define V_IO_DRV_H(x) _SB_MAKEVALUE(x, S_IO_DRV_H)
+#define G_IO_DRV_H(x) _SB_GETVALUE(x, S_IO_DRV_H, M_IO_DRV_H)
+
+/*
+ * Generic Bus Output Drive Control Register 2 (Table 14-20)
+ */
+
+#define S_IO_DRV_J 2
+#define M_IO_DRV_J _SB_MAKEMASK(2, S_IO_DRV_J)
+#define V_IO_DRV_J(x) _SB_MAKEVALUE(x, S_IO_DRV_J)
+#define G_IO_DRV_J(x) _SB_GETVALUE(x, S_IO_DRV_J, M_IO_DRV_J)
+
+#define S_IO_DRV_K 6
+#define M_IO_DRV_K _SB_MAKEMASK(2, S_IO_DRV_K)
+#define V_IO_DRV_K(x) _SB_MAKEVALUE(x, S_IO_DRV_K)
+#define G_IO_DRV_K(x) _SB_GETVALUE(x, S_IO_DRV_K, M_IO_DRV_K)
+
+#define S_IO_DRV_L 10
+#define M_IO_DRV_L _SB_MAKEMASK(2, S_IO_DRV_L)
+#define V_IO_DRV_L(x) _SB_MAKEVALUE(x, S_IO_DRV_L)
+#define G_IO_DRV_L(x) _SB_GETVALUE(x, S_IO_DRV_L, M_IO_DRV_L)
+
+#define S_IO_DRV_M 14
+#define M_IO_DRV_M _SB_MAKEMASK(2, S_IO_DRV_M)
+#define V_IO_DRV_M(x) _SB_MAKEVALUE(x, S_IO_DRV_M)
+#define G_IO_DRV_M(x) _SB_GETVALUE(x, S_IO_DRV_M, M_IO_DRV_M)
+
+/*
+ * Generic Bus Output Drive Control Register 3 (Table 14-21)
+ */
+
+#define S_IO_SLEW3 0
+#define M_IO_SLEW3 _SB_MAKEMASK(2, S_IO_SLEW3)
+#define V_IO_SLEW3(x) _SB_MAKEVALUE(x, S_IO_SLEW3)
+#define G_IO_SLEW3(x) _SB_GETVALUE(x, S_IO_SLEW3, M_IO_SLEW3)
+
+#define S_IO_DRV_N 2
+#define M_IO_DRV_N _SB_MAKEMASK(2, S_IO_DRV_N)
+#define V_IO_DRV_N(x) _SB_MAKEVALUE(x, S_IO_DRV_N)
+#define G_IO_DRV_N(x) _SB_GETVALUE(x, S_IO_DRV_N, M_IO_DRV_N)
+
+#define S_IO_DRV_P 6
+#define M_IO_DRV_P _SB_MAKEMASK(2, S_IO_DRV_P)
+#define V_IO_DRV_P(x) _SB_MAKEVALUE(x, S_IO_DRV_P)
+#define G_IO_DRV_P(x) _SB_GETVALUE(x, S_IO_DRV_P, M_IO_DRV_P)
+
+#define S_IO_DRV_Q 10
+#define M_IO_DRV_Q _SB_MAKEMASK(2, S_IO_DRV_Q)
+#define V_IO_DRV_Q(x) _SB_MAKEVALUE(x, S_IO_DRV_Q)
+#define G_IO_DRV_Q(x) _SB_GETVALUE(x, S_IO_DRV_Q, M_IO_DRV_Q)
+
+#define S_IO_DRV_R 14
+#define M_IO_DRV_R _SB_MAKEMASK(2, S_IO_DRV_R)
+#define V_IO_DRV_R(x) _SB_MAKEVALUE(x, S_IO_DRV_R)
+#define G_IO_DRV_R(x) _SB_GETVALUE(x, S_IO_DRV_R, M_IO_DRV_R)
+
+
+/*
+ * PCMCIA configuration register (Table 12-6)
+ */
+
+#define M_PCMCIA_CFG_ATTRMEM _SB_MAKEMASK1(0)
+#define M_PCMCIA_CFG_3VEN _SB_MAKEMASK1(1)
+#define M_PCMCIA_CFG_5VEN _SB_MAKEMASK1(2)
+#define M_PCMCIA_CFG_VPPEN _SB_MAKEMASK1(3)
+#define M_PCMCIA_CFG_RESET _SB_MAKEMASK1(4)
+#define M_PCMCIA_CFG_APWRONEN _SB_MAKEMASK1(5)
+#define M_PCMCIA_CFG_CDMASK _SB_MAKEMASK1(6)
+#define M_PCMCIA_CFG_WPMASK _SB_MAKEMASK1(7)
+#define M_PCMCIA_CFG_RDYMASK _SB_MAKEMASK1(8)
+#define M_PCMCIA_CFG_PWRCTL _SB_MAKEMASK1(9)
+
+#if SIBYTE_HDR_FEATURE_CHIP(1480)
+#define S_PCMCIA_MODE 16
+#define M_PCMCIA_MODE _SB_MAKEMASK(3, S_PCMCIA_MODE)
+#define V_PCMCIA_MODE(x) _SB_MAKEVALUE(x, S_PCMCIA_MODE)
+#define G_PCMCIA_MODE(x) _SB_GETVALUE(x, S_PCMCIA_MODE, M_PCMCIA_MODE)
+
+#define K_PCMCIA_MODE_PCMA_NOB 0 /* standard PCMCIA "A", no "B" */
+#define K_PCMCIA_MODE_IDEA_NOB 1 /* IDE "A", no "B" */
+#define K_PCMCIA_MODE_PCMIOA_NOB 2 /* PCMCIA with I/O "A", no "B" */
+#define K_PCMCIA_MODE_PCMA_PCMB 4 /* standard PCMCIA "A", standard PCMCIA "B" */
+#define K_PCMCIA_MODE_IDEA_PCMB 5 /* IDE "A", standard PCMCIA "B" */
+#define K_PCMCIA_MODE_PCMA_IDEB 6 /* standard PCMCIA "A", IDE "B" */
+#define K_PCMCIA_MODE_IDEA_IDEB 7 /* IDE "A", IDE "B" */
+#endif
+
+
+/*
+ * PCMCIA status register (Table 12-7)
+ */
+
+#define M_PCMCIA_STATUS_CD1 _SB_MAKEMASK1(0)
+#define M_PCMCIA_STATUS_CD2 _SB_MAKEMASK1(1)
+#define M_PCMCIA_STATUS_VS1 _SB_MAKEMASK1(2)
+#define M_PCMCIA_STATUS_VS2 _SB_MAKEMASK1(3)
+#define M_PCMCIA_STATUS_WP _SB_MAKEMASK1(4)
+#define M_PCMCIA_STATUS_RDY _SB_MAKEMASK1(5)
+#define M_PCMCIA_STATUS_3VEN _SB_MAKEMASK1(6)
+#define M_PCMCIA_STATUS_5VEN _SB_MAKEMASK1(7)
+#define M_PCMCIA_STATUS_CDCHG _SB_MAKEMASK1(8)
+#define M_PCMCIA_STATUS_WPCHG _SB_MAKEMASK1(9)
+#define M_PCMCIA_STATUS_RDYCHG _SB_MAKEMASK1(10)
+
+/*
+ * GPIO Interrupt Type Register (table 13-3)
+ */
+
+#define K_GPIO_INTR_DISABLE 0
+#define K_GPIO_INTR_EDGE 1
+#define K_GPIO_INTR_LEVEL 2
+#define K_GPIO_INTR_SPLIT 3
+
+#define S_GPIO_INTR_TYPEX(n) (((n)/2)*2)
+#define M_GPIO_INTR_TYPEX(n) _SB_MAKEMASK(2, S_GPIO_INTR_TYPEX(n))
+#define V_GPIO_INTR_TYPEX(n, x) _SB_MAKEVALUE(x, S_GPIO_INTR_TYPEX(n))
+#define G_GPIO_INTR_TYPEX(n, x) _SB_GETVALUE(x, S_GPIO_INTR_TYPEX(n), M_GPIO_INTR_TYPEX(n))
+
+#define S_GPIO_INTR_TYPE0 0
+#define M_GPIO_INTR_TYPE0 _SB_MAKEMASK(2, S_GPIO_INTR_TYPE0)
+#define V_GPIO_INTR_TYPE0(x) _SB_MAKEVALUE(x, S_GPIO_INTR_TYPE0)
+#define G_GPIO_INTR_TYPE0(x) _SB_GETVALUE(x, S_GPIO_INTR_TYPE0, M_GPIO_INTR_TYPE0)
+
+#define S_GPIO_INTR_TYPE2 2
+#define M_GPIO_INTR_TYPE2 _SB_MAKEMASK(2, S_GPIO_INTR_TYPE2)
+#define V_GPIO_INTR_TYPE2(x) _SB_MAKEVALUE(x, S_GPIO_INTR_TYPE2)
+#define G_GPIO_INTR_TYPE2(x) _SB_GETVALUE(x, S_GPIO_INTR_TYPE2, M_GPIO_INTR_TYPE2)
+
+#define S_GPIO_INTR_TYPE4 4
+#define M_GPIO_INTR_TYPE4 _SB_MAKEMASK(2, S_GPIO_INTR_TYPE4)
+#define V_GPIO_INTR_TYPE4(x) _SB_MAKEVALUE(x, S_GPIO_INTR_TYPE4)
+#define G_GPIO_INTR_TYPE4(x) _SB_GETVALUE(x, S_GPIO_INTR_TYPE4, M_GPIO_INTR_TYPE4)
+
+#define S_GPIO_INTR_TYPE6 6
+#define M_GPIO_INTR_TYPE6 _SB_MAKEMASK(2, S_GPIO_INTR_TYPE6)
+#define V_GPIO_INTR_TYPE6(x) _SB_MAKEVALUE(x, S_GPIO_INTR_TYPE6)
+#define G_GPIO_INTR_TYPE6(x) _SB_GETVALUE(x, S_GPIO_INTR_TYPE6, M_GPIO_INTR_TYPE6)
+
+#define S_GPIO_INTR_TYPE8 8
+#define M_GPIO_INTR_TYPE8 _SB_MAKEMASK(2, S_GPIO_INTR_TYPE8)
+#define V_GPIO_INTR_TYPE8(x) _SB_MAKEVALUE(x, S_GPIO_INTR_TYPE8)
+#define G_GPIO_INTR_TYPE8(x) _SB_GETVALUE(x, S_GPIO_INTR_TYPE8, M_GPIO_INTR_TYPE8)
+
+#define S_GPIO_INTR_TYPE10 10
+#define M_GPIO_INTR_TYPE10 _SB_MAKEMASK(2, S_GPIO_INTR_TYPE10)
+#define V_GPIO_INTR_TYPE10(x) _SB_MAKEVALUE(x, S_GPIO_INTR_TYPE10)
+#define G_GPIO_INTR_TYPE10(x) _SB_GETVALUE(x, S_GPIO_INTR_TYPE10, M_GPIO_INTR_TYPE10)
+
+#define S_GPIO_INTR_TYPE12 12
+#define M_GPIO_INTR_TYPE12 _SB_MAKEMASK(2, S_GPIO_INTR_TYPE12)
+#define V_GPIO_INTR_TYPE12(x) _SB_MAKEVALUE(x, S_GPIO_INTR_TYPE12)
+#define G_GPIO_INTR_TYPE12(x) _SB_GETVALUE(x, S_GPIO_INTR_TYPE12, M_GPIO_INTR_TYPE12)
+
+#define S_GPIO_INTR_TYPE14 14
+#define M_GPIO_INTR_TYPE14 _SB_MAKEMASK(2, S_GPIO_INTR_TYPE14)
+#define V_GPIO_INTR_TYPE14(x) _SB_MAKEVALUE(x, S_GPIO_INTR_TYPE14)
+#define G_GPIO_INTR_TYPE14(x) _SB_GETVALUE(x, S_GPIO_INTR_TYPE14, M_GPIO_INTR_TYPE14)
+
+#if SIBYTE_HDR_FEATURE_CHIP(1480)
+
+/*
+ * GPIO Interrupt Additional Type Register
+ */
+
+#define K_GPIO_INTR_BOTHEDGE 0
+#define K_GPIO_INTR_RISEEDGE 1
+#define K_GPIO_INTR_UNPRED1 2
+#define K_GPIO_INTR_UNPRED2 3
+
+#define S_GPIO_INTR_ATYPEX(n) (((n)/2)*2)
+#define M_GPIO_INTR_ATYPEX(n) _SB_MAKEMASK(2, S_GPIO_INTR_ATYPEX(n))
+#define V_GPIO_INTR_ATYPEX(n, x) _SB_MAKEVALUE(x, S_GPIO_INTR_ATYPEX(n))
+#define G_GPIO_INTR_ATYPEX(n, x) _SB_GETVALUE(x, S_GPIO_INTR_ATYPEX(n), M_GPIO_INTR_ATYPEX(n))
+
+#define S_GPIO_INTR_ATYPE0 0
+#define M_GPIO_INTR_ATYPE0 _SB_MAKEMASK(2, S_GPIO_INTR_ATYPE0)
+#define V_GPIO_INTR_ATYPE0(x) _SB_MAKEVALUE(x, S_GPIO_INTR_ATYPE0)
+#define G_GPIO_INTR_ATYPE0(x) _SB_GETVALUE(x, S_GPIO_INTR_ATYPE0, M_GPIO_INTR_ATYPE0)
+
+#define S_GPIO_INTR_ATYPE2 2
+#define M_GPIO_INTR_ATYPE2 _SB_MAKEMASK(2, S_GPIO_INTR_ATYPE2)
+#define V_GPIO_INTR_ATYPE2(x) _SB_MAKEVALUE(x, S_GPIO_INTR_ATYPE2)
+#define G_GPIO_INTR_ATYPE2(x) _SB_GETVALUE(x, S_GPIO_INTR_ATYPE2, M_GPIO_INTR_ATYPE2)
+
+#define S_GPIO_INTR_ATYPE4 4
+#define M_GPIO_INTR_ATYPE4 _SB_MAKEMASK(2, S_GPIO_INTR_ATYPE4)
+#define V_GPIO_INTR_ATYPE4(x) _SB_MAKEVALUE(x, S_GPIO_INTR_ATYPE4)
+#define G_GPIO_INTR_ATYPE4(x) _SB_GETVALUE(x, S_GPIO_INTR_ATYPE4, M_GPIO_INTR_ATYPE4)
+
+#define S_GPIO_INTR_ATYPE6 6
+#define M_GPIO_INTR_ATYPE6 _SB_MAKEMASK(2, S_GPIO_INTR_ATYPE6)
+#define V_GPIO_INTR_ATYPE6(x) _SB_MAKEVALUE(x, S_GPIO_INTR_ATYPE6)
+#define G_GPIO_INTR_ATYPE6(x) _SB_GETVALUE(x, S_GPIO_INTR_ATYPE6, M_GPIO_INTR_ATYPE6)
+
+#define S_GPIO_INTR_ATYPE8 8
+#define M_GPIO_INTR_ATYPE8 _SB_MAKEMASK(2, S_GPIO_INTR_ATYPE8)
+#define V_GPIO_INTR_ATYPE8(x) _SB_MAKEVALUE(x, S_GPIO_INTR_ATYPE8)
+#define G_GPIO_INTR_ATYPE8(x) _SB_GETVALUE(x, S_GPIO_INTR_ATYPE8, M_GPIO_INTR_ATYPE8)
+
+#define S_GPIO_INTR_ATYPE10 10
+#define M_GPIO_INTR_ATYPE10 _SB_MAKEMASK(2, S_GPIO_INTR_ATYPE10)
+#define V_GPIO_INTR_ATYPE10(x) _SB_MAKEVALUE(x, S_GPIO_INTR_ATYPE10)
+#define G_GPIO_INTR_ATYPE10(x) _SB_GETVALUE(x, S_GPIO_INTR_ATYPE10, M_GPIO_INTR_ATYPE10)
+
+#define S_GPIO_INTR_ATYPE12 12
+#define M_GPIO_INTR_ATYPE12 _SB_MAKEMASK(2, S_GPIO_INTR_ATYPE12)
+#define V_GPIO_INTR_ATYPE12(x) _SB_MAKEVALUE(x, S_GPIO_INTR_ATYPE12)
+#define G_GPIO_INTR_ATYPE12(x) _SB_GETVALUE(x, S_GPIO_INTR_ATYPE12, M_GPIO_INTR_ATYPE12)
+
+#define S_GPIO_INTR_ATYPE14 14
+#define M_GPIO_INTR_ATYPE14 _SB_MAKEMASK(2, S_GPIO_INTR_ATYPE14)
+#define V_GPIO_INTR_ATYPE14(x) _SB_MAKEVALUE(x, S_GPIO_INTR_ATYPE14)
+#define G_GPIO_INTR_ATYPE14(x) _SB_GETVALUE(x, S_GPIO_INTR_ATYPE14, M_GPIO_INTR_ATYPE14)
+#endif
+
+
+#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_int.h b/arch/mips/include/asm/sibyte/sb1250_int.h
new file mode 100644
index 000000000..2e0adb055
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/sb1250_int.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * SB1250 Board Support Package
+ *
+ * Interrupt Mapper definitions File: sb1250_int.h
+ *
+ * This module contains constants for manipulating the SB1250's
+ * interrupt mapper and definitions for the interrupt sources.
+ *
+ * SB1250 specification level: User's manual 1/02/02
+ *
+ *********************************************************************
+ *
+ * Copyright 2000, 2001, 2002, 2003
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+
+#ifndef _SB1250_INT_H
+#define _SB1250_INT_H
+
+#include <asm/sibyte/sb1250_defs.h>
+
+/* *********************************************************************
+ * Interrupt Mapper Constants
+ ********************************************************************* */
+
+/*
+ * Interrupt sources (Table 4-8, UM 0.2)
+ *
+ * First, the interrupt numbers.
+ */
+
+#define K_INT_SOURCES 64
+
+#define K_INT_WATCHDOG_TIMER_0 0
+#define K_INT_WATCHDOG_TIMER_1 1
+#define K_INT_TIMER_0 2
+#define K_INT_TIMER_1 3
+#define K_INT_TIMER_2 4
+#define K_INT_TIMER_3 5
+#define K_INT_SMB_0 6
+#define K_INT_SMB_1 7
+#define K_INT_UART_0 8
+#define K_INT_UART_1 9
+#define K_INT_SER_0 10
+#define K_INT_SER_1 11
+#define K_INT_PCMCIA 12
+#define K_INT_ADDR_TRAP 13
+#define K_INT_PERF_CNT 14
+#define K_INT_TRACE_FREEZE 15
+#define K_INT_BAD_ECC 16
+#define K_INT_COR_ECC 17
+#define K_INT_IO_BUS 18
+#define K_INT_MAC_0 19
+#define K_INT_MAC_1 20
+#define K_INT_MAC_2 21
+#define K_INT_DM_CH_0 22
+#define K_INT_DM_CH_1 23
+#define K_INT_DM_CH_2 24
+#define K_INT_DM_CH_3 25
+#define K_INT_MBOX_0 26
+#define K_INT_MBOX_1 27
+#define K_INT_MBOX_2 28
+#define K_INT_MBOX_3 29
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define K_INT_CYCLE_CP0_INT 30
+#define K_INT_CYCLE_CP1_INT 31
+#endif /* 1250 PASS2 || 112x PASS1 */
+#define K_INT_GPIO_0 32
+#define K_INT_GPIO_1 33
+#define K_INT_GPIO_2 34
+#define K_INT_GPIO_3 35
+#define K_INT_GPIO_4 36
+#define K_INT_GPIO_5 37
+#define K_INT_GPIO_6 38
+#define K_INT_GPIO_7 39
+#define K_INT_GPIO_8 40
+#define K_INT_GPIO_9 41
+#define K_INT_GPIO_10 42
+#define K_INT_GPIO_11 43
+#define K_INT_GPIO_12 44
+#define K_INT_GPIO_13 45
+#define K_INT_GPIO_14 46
+#define K_INT_GPIO_15 47
+#define K_INT_LDT_FATAL 48
+#define K_INT_LDT_NONFATAL 49
+#define K_INT_LDT_SMI 50
+#define K_INT_LDT_NMI 51
+#define K_INT_LDT_INIT 52
+#define K_INT_LDT_STARTUP 53
+#define K_INT_LDT_EXT 54
+#define K_INT_PCI_ERROR 55
+#define K_INT_PCI_INTA 56
+#define K_INT_PCI_INTB 57
+#define K_INT_PCI_INTC 58
+#define K_INT_PCI_INTD 59
+#define K_INT_SPARE_2 60
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define K_INT_MAC_0_CH1 61
+#define K_INT_MAC_1_CH1 62
+#define K_INT_MAC_2_CH1 63
+#endif /* 1250 PASS2 || 112x PASS1 */
+
+/*
+ * Mask values for each interrupt
+ */
+
+#define M_INT_WATCHDOG_TIMER_0 _SB_MAKEMASK1(K_INT_WATCHDOG_TIMER_0)
+#define M_INT_WATCHDOG_TIMER_1 _SB_MAKEMASK1(K_INT_WATCHDOG_TIMER_1)
+#define M_INT_TIMER_0 _SB_MAKEMASK1(K_INT_TIMER_0)
+#define M_INT_TIMER_1 _SB_MAKEMASK1(K_INT_TIMER_1)
+#define M_INT_TIMER_2 _SB_MAKEMASK1(K_INT_TIMER_2)
+#define M_INT_TIMER_3 _SB_MAKEMASK1(K_INT_TIMER_3)
+#define M_INT_SMB_0 _SB_MAKEMASK1(K_INT_SMB_0)
+#define M_INT_SMB_1 _SB_MAKEMASK1(K_INT_SMB_1)
+#define M_INT_UART_0 _SB_MAKEMASK1(K_INT_UART_0)
+#define M_INT_UART_1 _SB_MAKEMASK1(K_INT_UART_1)
+#define M_INT_SER_0 _SB_MAKEMASK1(K_INT_SER_0)
+#define M_INT_SER_1 _SB_MAKEMASK1(K_INT_SER_1)
+#define M_INT_PCMCIA _SB_MAKEMASK1(K_INT_PCMCIA)
+#define M_INT_ADDR_TRAP _SB_MAKEMASK1(K_INT_ADDR_TRAP)
+#define M_INT_PERF_CNT _SB_MAKEMASK1(K_INT_PERF_CNT)
+#define M_INT_TRACE_FREEZE _SB_MAKEMASK1(K_INT_TRACE_FREEZE)
+#define M_INT_BAD_ECC _SB_MAKEMASK1(K_INT_BAD_ECC)
+#define M_INT_COR_ECC _SB_MAKEMASK1(K_INT_COR_ECC)
+#define M_INT_IO_BUS _SB_MAKEMASK1(K_INT_IO_BUS)
+#define M_INT_MAC_0 _SB_MAKEMASK1(K_INT_MAC_0)
+#define M_INT_MAC_1 _SB_MAKEMASK1(K_INT_MAC_1)
+#define M_INT_MAC_2 _SB_MAKEMASK1(K_INT_MAC_2)
+#define M_INT_DM_CH_0 _SB_MAKEMASK1(K_INT_DM_CH_0)
+#define M_INT_DM_CH_1 _SB_MAKEMASK1(K_INT_DM_CH_1)
+#define M_INT_DM_CH_2 _SB_MAKEMASK1(K_INT_DM_CH_2)
+#define M_INT_DM_CH_3 _SB_MAKEMASK1(K_INT_DM_CH_3)
+#define M_INT_MBOX_0 _SB_MAKEMASK1(K_INT_MBOX_0)
+#define M_INT_MBOX_1 _SB_MAKEMASK1(K_INT_MBOX_1)
+#define M_INT_MBOX_2 _SB_MAKEMASK1(K_INT_MBOX_2)
+#define M_INT_MBOX_3 _SB_MAKEMASK1(K_INT_MBOX_3)
+#define M_INT_MBOX_ALL _SB_MAKEMASK(4, K_INT_MBOX_0)
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define M_INT_CYCLE_CP0_INT _SB_MAKEMASK1(K_INT_CYCLE_CP0_INT)
+#define M_INT_CYCLE_CP1_INT _SB_MAKEMASK1(K_INT_CYCLE_CP1_INT)
+#endif /* 1250 PASS2 || 112x PASS1 */
+#define M_INT_GPIO_0 _SB_MAKEMASK1(K_INT_GPIO_0)
+#define M_INT_GPIO_1 _SB_MAKEMASK1(K_INT_GPIO_1)
+#define M_INT_GPIO_2 _SB_MAKEMASK1(K_INT_GPIO_2)
+#define M_INT_GPIO_3 _SB_MAKEMASK1(K_INT_GPIO_3)
+#define M_INT_GPIO_4 _SB_MAKEMASK1(K_INT_GPIO_4)
+#define M_INT_GPIO_5 _SB_MAKEMASK1(K_INT_GPIO_5)
+#define M_INT_GPIO_6 _SB_MAKEMASK1(K_INT_GPIO_6)
+#define M_INT_GPIO_7 _SB_MAKEMASK1(K_INT_GPIO_7)
+#define M_INT_GPIO_8 _SB_MAKEMASK1(K_INT_GPIO_8)
+#define M_INT_GPIO_9 _SB_MAKEMASK1(K_INT_GPIO_9)
+#define M_INT_GPIO_10 _SB_MAKEMASK1(K_INT_GPIO_10)
+#define M_INT_GPIO_11 _SB_MAKEMASK1(K_INT_GPIO_11)
+#define M_INT_GPIO_12 _SB_MAKEMASK1(K_INT_GPIO_12)
+#define M_INT_GPIO_13 _SB_MAKEMASK1(K_INT_GPIO_13)
+#define M_INT_GPIO_14 _SB_MAKEMASK1(K_INT_GPIO_14)
+#define M_INT_GPIO_15 _SB_MAKEMASK1(K_INT_GPIO_15)
+#define M_INT_LDT_FATAL _SB_MAKEMASK1(K_INT_LDT_FATAL)
+#define M_INT_LDT_NONFATAL _SB_MAKEMASK1(K_INT_LDT_NONFATAL)
+#define M_INT_LDT_SMI _SB_MAKEMASK1(K_INT_LDT_SMI)
+#define M_INT_LDT_NMI _SB_MAKEMASK1(K_INT_LDT_NMI)
+#define M_INT_LDT_INIT _SB_MAKEMASK1(K_INT_LDT_INIT)
+#define M_INT_LDT_STARTUP _SB_MAKEMASK1(K_INT_LDT_STARTUP)
+#define M_INT_LDT_EXT _SB_MAKEMASK1(K_INT_LDT_EXT)
+#define M_INT_PCI_ERROR _SB_MAKEMASK1(K_INT_PCI_ERROR)
+#define M_INT_PCI_INTA _SB_MAKEMASK1(K_INT_PCI_INTA)
+#define M_INT_PCI_INTB _SB_MAKEMASK1(K_INT_PCI_INTB)
+#define M_INT_PCI_INTC _SB_MAKEMASK1(K_INT_PCI_INTC)
+#define M_INT_PCI_INTD _SB_MAKEMASK1(K_INT_PCI_INTD)
+#define M_INT_SPARE_2 _SB_MAKEMASK1(K_INT_SPARE_2)
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define M_INT_MAC_0_CH1 _SB_MAKEMASK1(K_INT_MAC_0_CH1)
+#define M_INT_MAC_1_CH1 _SB_MAKEMASK1(K_INT_MAC_1_CH1)
+#define M_INT_MAC_2_CH1 _SB_MAKEMASK1(K_INT_MAC_2_CH1)
+#endif /* 1250 PASS2 || 112x PASS1 */
+
+/*
+ * Interrupt mappings
+ */
+
+#define K_INT_MAP_I0 0 /* interrupt pins on processor */
+#define K_INT_MAP_I1 1
+#define K_INT_MAP_I2 2
+#define K_INT_MAP_I3 3
+#define K_INT_MAP_I4 4
+#define K_INT_MAP_I5 5
+#define K_INT_MAP_NMI 6 /* nonmaskable */
+#define K_INT_MAP_DINT 7 /* debug interrupt */
+
+/*
+ * LDT Interrupt Set Register (table 4-5)
+ */
+
+#define S_INT_LDT_INTMSG 0
+#define M_INT_LDT_INTMSG _SB_MAKEMASK(3, S_INT_LDT_INTMSG)
+#define V_INT_LDT_INTMSG(x) _SB_MAKEVALUE(x, S_INT_LDT_INTMSG)
+#define G_INT_LDT_INTMSG(x) _SB_GETVALUE(x, S_INT_LDT_INTMSG, M_INT_LDT_INTMSG)
+
+#define K_INT_LDT_INTMSG_FIXED 0
+#define K_INT_LDT_INTMSG_ARBITRATED 1
+#define K_INT_LDT_INTMSG_SMI 2
+#define K_INT_LDT_INTMSG_NMI 3
+#define K_INT_LDT_INTMSG_INIT 4
+#define K_INT_LDT_INTMSG_STARTUP 5
+#define K_INT_LDT_INTMSG_EXTINT 6
+#define K_INT_LDT_INTMSG_RESERVED 7
+
+#define M_INT_LDT_EDGETRIGGER 0
+#define M_INT_LDT_LEVELTRIGGER _SB_MAKEMASK1(3)
+
+#define M_INT_LDT_PHYSICALDEST 0
+#define M_INT_LDT_LOGICALDEST _SB_MAKEMASK1(4)
+
+#define S_INT_LDT_INTDEST 5
+#define M_INT_LDT_INTDEST _SB_MAKEMASK(10, S_INT_LDT_INTDEST)
+#define V_INT_LDT_INTDEST(x) _SB_MAKEVALUE(x, S_INT_LDT_INTDEST)
+#define G_INT_LDT_INTDEST(x) _SB_GETVALUE(x, S_INT_LDT_INTDEST, M_INT_LDT_INTDEST)
+
+#define S_INT_LDT_VECTOR 13
+#define M_INT_LDT_VECTOR _SB_MAKEMASK(8, S_INT_LDT_VECTOR)
+#define V_INT_LDT_VECTOR(x) _SB_MAKEVALUE(x, S_INT_LDT_VECTOR)
+#define G_INT_LDT_VECTOR(x) _SB_GETVALUE(x, S_INT_LDT_VECTOR, M_INT_LDT_VECTOR)
+
+/*
+ * Vector format (Table 4-6)
+ */
+
+#define M_LDTVECT_RAISEINT 0x00
+#define M_LDTVECT_RAISEMBOX 0x40
+
+
+#endif /* 1250/112x */
diff --git a/arch/mips/include/asm/sibyte/sb1250_l2c.h b/arch/mips/include/asm/sibyte/sb1250_l2c.h
new file mode 100644
index 000000000..cd8de844b
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/sb1250_l2c.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * SB1250 Board Support Package
+ *
+ * L2 Cache constants and macros File: sb1250_l2c.h
+ *
+ * This module contains constants useful for manipulating the
+ * level 2 cache.
+ *
+ * SB1250 specification level: User's manual 1/02/02
+ *
+ *********************************************************************
+ *
+ * Copyright 2000,2001,2002,2003
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+
+#ifndef _SB1250_L2C_H
+#define _SB1250_L2C_H
+
+#include <asm/sibyte/sb1250_defs.h>
+
+/*
+ * Level 2 Cache Tag register (Table 5-3)
+ */
+
+#define S_L2C_TAG_MBZ 0
+#define M_L2C_TAG_MBZ _SB_MAKEMASK(5, S_L2C_TAG_MBZ)
+
+#define S_L2C_TAG_INDEX 5
+#define M_L2C_TAG_INDEX _SB_MAKEMASK(12, S_L2C_TAG_INDEX)
+#define V_L2C_TAG_INDEX(x) _SB_MAKEVALUE(x, S_L2C_TAG_INDEX)
+#define G_L2C_TAG_INDEX(x) _SB_GETVALUE(x, S_L2C_TAG_INDEX, M_L2C_TAG_INDEX)
+
+#define S_L2C_TAG_TAG 17
+#define M_L2C_TAG_TAG _SB_MAKEMASK(23, S_L2C_TAG_TAG)
+#define V_L2C_TAG_TAG(x) _SB_MAKEVALUE(x, S_L2C_TAG_TAG)
+#define G_L2C_TAG_TAG(x) _SB_GETVALUE(x, S_L2C_TAG_TAG, M_L2C_TAG_TAG)
+
+#define S_L2C_TAG_ECC 40
+#define M_L2C_TAG_ECC _SB_MAKEMASK(6, S_L2C_TAG_ECC)
+#define V_L2C_TAG_ECC(x) _SB_MAKEVALUE(x, S_L2C_TAG_ECC)
+#define G_L2C_TAG_ECC(x) _SB_GETVALUE(x, S_L2C_TAG_ECC, M_L2C_TAG_ECC)
+
+#define S_L2C_TAG_WAY 46
+#define M_L2C_TAG_WAY _SB_MAKEMASK(2, S_L2C_TAG_WAY)
+#define V_L2C_TAG_WAY(x) _SB_MAKEVALUE(x, S_L2C_TAG_WAY)
+#define G_L2C_TAG_WAY(x) _SB_GETVALUE(x, S_L2C_TAG_WAY, M_L2C_TAG_WAY)
+
+#define M_L2C_TAG_DIRTY _SB_MAKEMASK1(48)
+#define M_L2C_TAG_VALID _SB_MAKEMASK1(49)
+
+/*
+ * Format of level 2 cache management address (table 5-2)
+ */
+
+#define S_L2C_MGMT_INDEX 5
+#define M_L2C_MGMT_INDEX _SB_MAKEMASK(12, S_L2C_MGMT_INDEX)
+#define V_L2C_MGMT_INDEX(x) _SB_MAKEVALUE(x, S_L2C_MGMT_INDEX)
+#define G_L2C_MGMT_INDEX(x) _SB_GETVALUE(x, S_L2C_MGMT_INDEX, M_L2C_MGMT_INDEX)
+
+#define S_L2C_MGMT_QUADRANT 15
+#define M_L2C_MGMT_QUADRANT _SB_MAKEMASK(2, S_L2C_MGMT_QUADRANT)
+#define V_L2C_MGMT_QUADRANT(x) _SB_MAKEVALUE(x, S_L2C_MGMT_QUADRANT)
+#define G_L2C_MGMT_QUADRANT(x) _SB_GETVALUE(x, S_L2C_MGMT_QUADRANT, M_L2C_MGMT_QUADRANT)
+
+#define S_L2C_MGMT_HALF 16
+#define M_L2C_MGMT_HALF _SB_MAKEMASK(1, S_L2C_MGMT_HALF)
+
+#define S_L2C_MGMT_WAY 17
+#define M_L2C_MGMT_WAY _SB_MAKEMASK(2, S_L2C_MGMT_WAY)
+#define V_L2C_MGMT_WAY(x) _SB_MAKEVALUE(x, S_L2C_MGMT_WAY)
+#define G_L2C_MGMT_WAY(x) _SB_GETVALUE(x, S_L2C_MGMT_WAY, M_L2C_MGMT_WAY)
+
+#define S_L2C_MGMT_ECC_DIAG 21
+#define M_L2C_MGMT_ECC_DIAG _SB_MAKEMASK(2, S_L2C_MGMT_ECC_DIAG)
+#define V_L2C_MGMT_ECC_DIAG(x) _SB_MAKEVALUE(x, S_L2C_MGMT_ECC_DIAG)
+#define G_L2C_MGMT_ECC_DIAG(x) _SB_GETVALUE(x, S_L2C_MGMT_ECC_DIAG, M_L2C_MGMT_ECC_DIAG)
+
+#define S_L2C_MGMT_TAG 23
+#define M_L2C_MGMT_TAG _SB_MAKEMASK(4, S_L2C_MGMT_TAG)
+#define V_L2C_MGMT_TAG(x) _SB_MAKEVALUE(x, S_L2C_MGMT_TAG)
+#define G_L2C_MGMT_TAG(x) _SB_GETVALUE(x, S_L2C_MGMT_TAG, M_L2C_MGMT_TAG)
+
+#define M_L2C_MGMT_DIRTY _SB_MAKEMASK1(19)
+#define M_L2C_MGMT_VALID _SB_MAKEMASK1(20)
+
+#define A_L2C_MGMT_TAG_BASE 0x00D0000000
+
+#define L2C_ENTRIES_PER_WAY 4096
+#define L2C_NUM_WAYS 4
+
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
+/*
+ * L2 Read Misc. register (A_L2_READ_MISC)
+ */
+#define S_L2C_MISC_NO_WAY 10
+#define M_L2C_MISC_NO_WAY _SB_MAKEMASK(4, S_L2C_MISC_NO_WAY)
+#define V_L2C_MISC_NO_WAY(x) _SB_MAKEVALUE(x, S_L2C_MISC_NO_WAY)
+#define G_L2C_MISC_NO_WAY(x) _SB_GETVALUE(x, S_L2C_MISC_NO_WAY, M_L2C_MISC_NO_WAY)
+
+#define M_L2C_MISC_ECC_CLEANUP_DIS _SB_MAKEMASK1(9)
+#define M_L2C_MISC_MC_PRIO_LOW _SB_MAKEMASK1(8)
+#define M_L2C_MISC_SOFT_DISABLE_T _SB_MAKEMASK1(7)
+#define M_L2C_MISC_SOFT_DISABLE_B _SB_MAKEMASK1(6)
+#define M_L2C_MISC_SOFT_DISABLE_R _SB_MAKEMASK1(5)
+#define M_L2C_MISC_SOFT_DISABLE_L _SB_MAKEMASK1(4)
+#define M_L2C_MISC_SCACHE_DISABLE_T _SB_MAKEMASK1(3)
+#define M_L2C_MISC_SCACHE_DISABLE_B _SB_MAKEMASK1(2)
+#define M_L2C_MISC_SCACHE_DISABLE_R _SB_MAKEMASK1(1)
+#define M_L2C_MISC_SCACHE_DISABLE_L _SB_MAKEMASK1(0)
+#endif /* 1250 PASS3 || 112x PASS1 */
+
+
+#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_ldt.h b/arch/mips/include/asm/sibyte/sb1250_ldt.h
new file mode 100644
index 000000000..c22df8dbb
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/sb1250_ldt.h
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * SB1250 Board Support Package
+ *
+ * LDT constants File: sb1250_ldt.h
+ *
+ * This module contains constants and macros to describe
+ * the LDT interface on the SB1250.
+ *
+ * SB1250 specification level: User's manual 1/02/02
+ *
+ *********************************************************************
+ *
+ * Copyright 2000, 2001, 2002, 2003
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+
+#ifndef _SB1250_LDT_H
+#define _SB1250_LDT_H
+
+#include <asm/sibyte/sb1250_defs.h>
+
+#define K_LDT_VENDOR_SIBYTE 0x166D
+#define K_LDT_DEVICE_SB1250 0x0002
+
+/*
+ * LDT Interface Type 1 (bridge) configuration header
+ */
+
+#define R_LDT_TYPE1_DEVICEID 0x0000
+#define R_LDT_TYPE1_CMDSTATUS 0x0004
+#define R_LDT_TYPE1_CLASSREV 0x0008
+#define R_LDT_TYPE1_DEVHDR 0x000C
+#define R_LDT_TYPE1_BAR0 0x0010 /* not used */
+#define R_LDT_TYPE1_BAR1 0x0014 /* not used */
+
+#define R_LDT_TYPE1_BUSID 0x0018 /* bus ID register */
+#define R_LDT_TYPE1_SECSTATUS 0x001C /* secondary status / I/O base/limit */
+#define R_LDT_TYPE1_MEMLIMIT 0x0020
+#define R_LDT_TYPE1_PREFETCH 0x0024
+#define R_LDT_TYPE1_PREF_BASE 0x0028
+#define R_LDT_TYPE1_PREF_LIMIT 0x002C
+#define R_LDT_TYPE1_IOLIMIT 0x0030
+#define R_LDT_TYPE1_CAPPTR 0x0034
+#define R_LDT_TYPE1_ROMADDR 0x0038
+#define R_LDT_TYPE1_BRCTL 0x003C
+#define R_LDT_TYPE1_CMD 0x0040
+#define R_LDT_TYPE1_LINKCTRL 0x0044
+#define R_LDT_TYPE1_LINKFREQ 0x0048
+#define R_LDT_TYPE1_RESERVED1 0x004C
+#define R_LDT_TYPE1_SRICMD 0x0050
+#define R_LDT_TYPE1_SRITXNUM 0x0054
+#define R_LDT_TYPE1_SRIRXNUM 0x0058
+#define R_LDT_TYPE1_ERRSTATUS 0x0068
+#define R_LDT_TYPE1_SRICTRL 0x006C
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define R_LDT_TYPE1_ADDSTATUS 0x0070
+#endif /* 1250 PASS2 || 112x PASS1 */
+#define R_LDT_TYPE1_TXBUFCNT 0x00C8
+#define R_LDT_TYPE1_EXPCRC 0x00DC
+#define R_LDT_TYPE1_RXCRC 0x00F0
+
+
+/*
+ * LDT Device ID register
+ */
+
+#define S_LDT_DEVICEID_VENDOR 0
+#define M_LDT_DEVICEID_VENDOR _SB_MAKEMASK_32(16, S_LDT_DEVICEID_VENDOR)
+#define V_LDT_DEVICEID_VENDOR(x) _SB_MAKEVALUE_32(x, S_LDT_DEVICEID_VENDOR)
+#define G_LDT_DEVICEID_VENDOR(x) _SB_GETVALUE_32(x, S_LDT_DEVICEID_VENDOR, M_LDT_DEVICEID_VENDOR)
+
+#define S_LDT_DEVICEID_DEVICEID 16
+#define M_LDT_DEVICEID_DEVICEID _SB_MAKEMASK_32(16, S_LDT_DEVICEID_DEVICEID)
+#define V_LDT_DEVICEID_DEVICEID(x) _SB_MAKEVALUE_32(x, S_LDT_DEVICEID_DEVICEID)
+#define G_LDT_DEVICEID_DEVICEID(x) _SB_GETVALUE_32(x, S_LDT_DEVICEID_DEVICEID, M_LDT_DEVICEID_DEVICEID)
+
+
+/*
+ * LDT Command Register (Table 8-13)
+ */
+
+#define M_LDT_CMD_IOSPACE_EN _SB_MAKEMASK1_32(0)
+#define M_LDT_CMD_MEMSPACE_EN _SB_MAKEMASK1_32(1)
+#define M_LDT_CMD_MASTER_EN _SB_MAKEMASK1_32(2)
+#define M_LDT_CMD_SPECCYC_EN _SB_MAKEMASK1_32(3)
+#define M_LDT_CMD_MEMWRINV_EN _SB_MAKEMASK1_32(4)
+#define M_LDT_CMD_VGAPALSNP_EN _SB_MAKEMASK1_32(5)
+#define M_LDT_CMD_PARERRRESP _SB_MAKEMASK1_32(6)
+#define M_LDT_CMD_WAITCYCCTRL _SB_MAKEMASK1_32(7)
+#define M_LDT_CMD_SERR_EN _SB_MAKEMASK1_32(8)
+#define M_LDT_CMD_FASTB2B_EN _SB_MAKEMASK1_32(9)
+
+/*
+ * LDT class and revision registers
+ */
+
+#define S_LDT_CLASSREV_REV 0
+#define M_LDT_CLASSREV_REV _SB_MAKEMASK_32(8, S_LDT_CLASSREV_REV)
+#define V_LDT_CLASSREV_REV(x) _SB_MAKEVALUE_32(x, S_LDT_CLASSREV_REV)
+#define G_LDT_CLASSREV_REV(x) _SB_GETVALUE_32(x, S_LDT_CLASSREV_REV, M_LDT_CLASSREV_REV)
+
+#define S_LDT_CLASSREV_CLASS 8
+#define M_LDT_CLASSREV_CLASS _SB_MAKEMASK_32(24, S_LDT_CLASSREV_CLASS)
+#define V_LDT_CLASSREV_CLASS(x) _SB_MAKEVALUE_32(x, S_LDT_CLASSREV_CLASS)
+#define G_LDT_CLASSREV_CLASS(x) _SB_GETVALUE_32(x, S_LDT_CLASSREV_CLASS, M_LDT_CLASSREV_CLASS)
+
+#define K_LDT_REV 0x01
+#define K_LDT_CLASS 0x060000
+
+/*
+ * Device Header (offset 0x0C)
+ */
+
+#define S_LDT_DEVHDR_CLINESZ 0
+#define M_LDT_DEVHDR_CLINESZ _SB_MAKEMASK_32(8, S_LDT_DEVHDR_CLINESZ)
+#define V_LDT_DEVHDR_CLINESZ(x) _SB_MAKEVALUE_32(x, S_LDT_DEVHDR_CLINESZ)
+#define G_LDT_DEVHDR_CLINESZ(x) _SB_GETVALUE_32(x, S_LDT_DEVHDR_CLINESZ, M_LDT_DEVHDR_CLINESZ)
+
+#define S_LDT_DEVHDR_LATTMR 8
+#define M_LDT_DEVHDR_LATTMR _SB_MAKEMASK_32(8, S_LDT_DEVHDR_LATTMR)
+#define V_LDT_DEVHDR_LATTMR(x) _SB_MAKEVALUE_32(x, S_LDT_DEVHDR_LATTMR)
+#define G_LDT_DEVHDR_LATTMR(x) _SB_GETVALUE_32(x, S_LDT_DEVHDR_LATTMR, M_LDT_DEVHDR_LATTMR)
+
+#define S_LDT_DEVHDR_HDRTYPE 16
+#define M_LDT_DEVHDR_HDRTYPE _SB_MAKEMASK_32(8, S_LDT_DEVHDR_HDRTYPE)
+#define V_LDT_DEVHDR_HDRTYPE(x) _SB_MAKEVALUE_32(x, S_LDT_DEVHDR_HDRTYPE)
+#define G_LDT_DEVHDR_HDRTYPE(x) _SB_GETVALUE_32(x, S_LDT_DEVHDR_HDRTYPE, M_LDT_DEVHDR_HDRTYPE)
+
+#define K_LDT_DEVHDR_HDRTYPE_TYPE1 1
+
+#define S_LDT_DEVHDR_BIST 24
+#define M_LDT_DEVHDR_BIST _SB_MAKEMASK_32(8, S_LDT_DEVHDR_BIST)
+#define V_LDT_DEVHDR_BIST(x) _SB_MAKEVALUE_32(x, S_LDT_DEVHDR_BIST)
+#define G_LDT_DEVHDR_BIST(x) _SB_GETVALUE_32(x, S_LDT_DEVHDR_BIST, M_LDT_DEVHDR_BIST)
+
+
+
+/*
+ * LDT Status Register (Table 8-14). Note that these constants
+ * assume you've read the command and status register
+ * together (32-bit read at offset 0x04)
+ *
+ * These bits also apply to the secondary status
+ * register (Table 8-15), offset 0x1C
+ */
+
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define M_LDT_STATUS_VGAEN _SB_MAKEMASK1_32(3)
+#endif /* 1250 PASS2 || 112x PASS1 */
+#define M_LDT_STATUS_CAPLIST _SB_MAKEMASK1_32(20)
+#define M_LDT_STATUS_66MHZCAP _SB_MAKEMASK1_32(21)
+#define M_LDT_STATUS_RESERVED2 _SB_MAKEMASK1_32(22)
+#define M_LDT_STATUS_FASTB2BCAP _SB_MAKEMASK1_32(23)
+#define M_LDT_STATUS_MSTRDPARERR _SB_MAKEMASK1_32(24)
+
+#define S_LDT_STATUS_DEVSELTIMING 25
+#define M_LDT_STATUS_DEVSELTIMING _SB_MAKEMASK_32(2, S_LDT_STATUS_DEVSELTIMING)
+#define V_LDT_STATUS_DEVSELTIMING(x) _SB_MAKEVALUE_32(x, S_LDT_STATUS_DEVSELTIMING)
+#define G_LDT_STATUS_DEVSELTIMING(x) _SB_GETVALUE_32(x, S_LDT_STATUS_DEVSELTIMING, M_LDT_STATUS_DEVSELTIMING)
+
+#define M_LDT_STATUS_SIGDTGTABORT _SB_MAKEMASK1_32(27)
+#define M_LDT_STATUS_RCVDTGTABORT _SB_MAKEMASK1_32(28)
+#define M_LDT_STATUS_RCVDMSTRABORT _SB_MAKEMASK1_32(29)
+#define M_LDT_STATUS_SIGDSERR _SB_MAKEMASK1_32(30)
+#define M_LDT_STATUS_DETPARERR _SB_MAKEMASK1_32(31)
+
+/*
+ * Bridge Control Register (Table 8-16). Note that these
+ * constants assume you've read the register as a 32-bit
+ * read (offset 0x3C)
+ */
+
+#define M_LDT_BRCTL_PARERRRESP_EN _SB_MAKEMASK1_32(16)
+#define M_LDT_BRCTL_SERR_EN _SB_MAKEMASK1_32(17)
+#define M_LDT_BRCTL_ISA_EN _SB_MAKEMASK1_32(18)
+#define M_LDT_BRCTL_VGA_EN _SB_MAKEMASK1_32(19)
+#define M_LDT_BRCTL_MSTRABORTMODE _SB_MAKEMASK1_32(21)
+#define M_LDT_BRCTL_SECBUSRESET _SB_MAKEMASK1_32(22)
+#define M_LDT_BRCTL_FASTB2B_EN _SB_MAKEMASK1_32(23)
+#define M_LDT_BRCTL_PRIDISCARD _SB_MAKEMASK1_32(24)
+#define M_LDT_BRCTL_SECDISCARD _SB_MAKEMASK1_32(25)
+#define M_LDT_BRCTL_DISCARDSTAT _SB_MAKEMASK1_32(26)
+#define M_LDT_BRCTL_DISCARDSERR_EN _SB_MAKEMASK1_32(27)
+
+/*
+ * LDT Command Register (Table 8-17). Note that these constants
+ * assume you've read the command and status register together
+ * 32-bit read at offset 0x40
+ */
+
+#define M_LDT_CMD_WARMRESET _SB_MAKEMASK1_32(16)
+#define M_LDT_CMD_DOUBLEENDED _SB_MAKEMASK1_32(17)
+
+#define S_LDT_CMD_CAPTYPE 29
+#define M_LDT_CMD_CAPTYPE _SB_MAKEMASK_32(3, S_LDT_CMD_CAPTYPE)
+#define V_LDT_CMD_CAPTYPE(x) _SB_MAKEVALUE_32(x, S_LDT_CMD_CAPTYPE)
+#define G_LDT_CMD_CAPTYPE(x) _SB_GETVALUE_32(x, S_LDT_CMD_CAPTYPE, M_LDT_CMD_CAPTYPE)
+
+/*
+ * LDT link control register (Table 8-18), and (Table 8-19)
+ */
+
+#define M_LDT_LINKCTRL_CAPSYNCFLOOD_EN _SB_MAKEMASK1_32(1)
+#define M_LDT_LINKCTRL_CRCSTARTTEST _SB_MAKEMASK1_32(2)
+#define M_LDT_LINKCTRL_CRCFORCEERR _SB_MAKEMASK1_32(3)
+#define M_LDT_LINKCTRL_LINKFAIL _SB_MAKEMASK1_32(4)
+#define M_LDT_LINKCTRL_INITDONE _SB_MAKEMASK1_32(5)
+#define M_LDT_LINKCTRL_EOC _SB_MAKEMASK1_32(6)
+#define M_LDT_LINKCTRL_XMITOFF _SB_MAKEMASK1_32(7)
+
+#define S_LDT_LINKCTRL_CRCERR 8
+#define M_LDT_LINKCTRL_CRCERR _SB_MAKEMASK_32(4, S_LDT_LINKCTRL_CRCERR)
+#define V_LDT_LINKCTRL_CRCERR(x) _SB_MAKEVALUE_32(x, S_LDT_LINKCTRL_CRCERR)
+#define G_LDT_LINKCTRL_CRCERR(x) _SB_GETVALUE_32(x, S_LDT_LINKCTRL_CRCERR, M_LDT_LINKCTRL_CRCERR)
+
+#define S_LDT_LINKCTRL_MAXIN 16
+#define M_LDT_LINKCTRL_MAXIN _SB_MAKEMASK_32(3, S_LDT_LINKCTRL_MAXIN)
+#define V_LDT_LINKCTRL_MAXIN(x) _SB_MAKEVALUE_32(x, S_LDT_LINKCTRL_MAXIN)
+#define G_LDT_LINKCTRL_MAXIN(x) _SB_GETVALUE_32(x, S_LDT_LINKCTRL_MAXIN, M_LDT_LINKCTRL_MAXIN)
+
+#define M_LDT_LINKCTRL_DWFCLN _SB_MAKEMASK1_32(19)
+
+#define S_LDT_LINKCTRL_MAXOUT 20
+#define M_LDT_LINKCTRL_MAXOUT _SB_MAKEMASK_32(3, S_LDT_LINKCTRL_MAXOUT)
+#define V_LDT_LINKCTRL_MAXOUT(x) _SB_MAKEVALUE_32(x, S_LDT_LINKCTRL_MAXOUT)
+#define G_LDT_LINKCTRL_MAXOUT(x) _SB_GETVALUE_32(x, S_LDT_LINKCTRL_MAXOUT, M_LDT_LINKCTRL_MAXOUT)
+
+#define M_LDT_LINKCTRL_DWFCOUT _SB_MAKEMASK1_32(23)
+
+#define S_LDT_LINKCTRL_WIDTHIN 24
+#define M_LDT_LINKCTRL_WIDTHIN _SB_MAKEMASK_32(3, S_LDT_LINKCTRL_WIDTHIN)
+#define V_LDT_LINKCTRL_WIDTHIN(x) _SB_MAKEVALUE_32(x, S_LDT_LINKCTRL_WIDTHIN)
+#define G_LDT_LINKCTRL_WIDTHIN(x) _SB_GETVALUE_32(x, S_LDT_LINKCTRL_WIDTHIN, M_LDT_LINKCTRL_WIDTHIN)
+
+#define M_LDT_LINKCTRL_DWFCLIN_EN _SB_MAKEMASK1_32(27)
+
+#define S_LDT_LINKCTRL_WIDTHOUT 28
+#define M_LDT_LINKCTRL_WIDTHOUT _SB_MAKEMASK_32(3, S_LDT_LINKCTRL_WIDTHOUT)
+#define V_LDT_LINKCTRL_WIDTHOUT(x) _SB_MAKEVALUE_32(x, S_LDT_LINKCTRL_WIDTHOUT)
+#define G_LDT_LINKCTRL_WIDTHOUT(x) _SB_GETVALUE_32(x, S_LDT_LINKCTRL_WIDTHOUT, M_LDT_LINKCTRL_WIDTHOUT)
+
+#define M_LDT_LINKCTRL_DWFCOUT_EN _SB_MAKEMASK1_32(31)
+
+/*
+ * LDT Link frequency register (Table 8-20) offset 0x48
+ */
+
+#define S_LDT_LINKFREQ_FREQ 8
+#define M_LDT_LINKFREQ_FREQ _SB_MAKEMASK_32(4, S_LDT_LINKFREQ_FREQ)
+#define V_LDT_LINKFREQ_FREQ(x) _SB_MAKEVALUE_32(x, S_LDT_LINKFREQ_FREQ)
+#define G_LDT_LINKFREQ_FREQ(x) _SB_GETVALUE_32(x, S_LDT_LINKFREQ_FREQ, M_LDT_LINKFREQ_FREQ)
+
+#define K_LDT_LINKFREQ_200MHZ 0
+#define K_LDT_LINKFREQ_300MHZ 1
+#define K_LDT_LINKFREQ_400MHZ 2
+#define K_LDT_LINKFREQ_500MHZ 3
+#define K_LDT_LINKFREQ_600MHZ 4
+#define K_LDT_LINKFREQ_800MHZ 5
+#define K_LDT_LINKFREQ_1000MHZ 6
+
+/*
+ * LDT SRI Command Register (Table 8-21). Note that these constants
+ * assume you've read the command and status register together
+ * 32-bit read at offset 0x50
+ */
+
+#define M_LDT_SRICMD_SIPREADY _SB_MAKEMASK1_32(16)
+#define M_LDT_SRICMD_SYNCPTRCTL _SB_MAKEMASK1_32(17)
+#define M_LDT_SRICMD_REDUCESYNCZERO _SB_MAKEMASK1_32(18)
+#if SIBYTE_HDR_FEATURE_UP_TO(1250, PASS1)
+#define M_LDT_SRICMD_DISSTARVATIONCNT _SB_MAKEMASK1_32(19) /* PASS1 */
+#endif /* up to 1250 PASS1 */
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define M_LDT_SRICMD_DISMULTTXVLD _SB_MAKEMASK1_32(19)
+#define M_LDT_SRICMD_EXPENDIAN _SB_MAKEMASK1_32(26)
+#endif /* 1250 PASS2 || 112x PASS1 */
+
+
+#define S_LDT_SRICMD_RXMARGIN 20
+#define M_LDT_SRICMD_RXMARGIN _SB_MAKEMASK_32(5, S_LDT_SRICMD_RXMARGIN)
+#define V_LDT_SRICMD_RXMARGIN(x) _SB_MAKEVALUE_32(x, S_LDT_SRICMD_RXMARGIN)
+#define G_LDT_SRICMD_RXMARGIN(x) _SB_GETVALUE_32(x, S_LDT_SRICMD_RXMARGIN, M_LDT_SRICMD_RXMARGIN)
+
+#define M_LDT_SRICMD_LDTPLLCOMPAT _SB_MAKEMASK1_32(25)
+
+#define S_LDT_SRICMD_TXINITIALOFFSET 28
+#define M_LDT_SRICMD_TXINITIALOFFSET _SB_MAKEMASK_32(3, S_LDT_SRICMD_TXINITIALOFFSET)
+#define V_LDT_SRICMD_TXINITIALOFFSET(x) _SB_MAKEVALUE_32(x, S_LDT_SRICMD_TXINITIALOFFSET)
+#define G_LDT_SRICMD_TXINITIALOFFSET(x) _SB_GETVALUE_32(x, S_LDT_SRICMD_TXINITIALOFFSET, M_LDT_SRICMD_TXINITIALOFFSET)
+
+#define M_LDT_SRICMD_LINKFREQDIRECT _SB_MAKEMASK1_32(31)
+
+/*
+ * LDT Error control and status register (Table 8-22) (Table 8-23)
+ */
+
+#define M_LDT_ERRCTL_PROTFATAL_EN _SB_MAKEMASK1_32(0)
+#define M_LDT_ERRCTL_PROTNONFATAL_EN _SB_MAKEMASK1_32(1)
+#define M_LDT_ERRCTL_PROTSYNCFLOOD_EN _SB_MAKEMASK1_32(2)
+#define M_LDT_ERRCTL_OVFFATAL_EN _SB_MAKEMASK1_32(3)
+#define M_LDT_ERRCTL_OVFNONFATAL_EN _SB_MAKEMASK1_32(4)
+#define M_LDT_ERRCTL_OVFSYNCFLOOD_EN _SB_MAKEMASK1_32(5)
+#define M_LDT_ERRCTL_EOCNXAFATAL_EN _SB_MAKEMASK1_32(6)
+#define M_LDT_ERRCTL_EOCNXANONFATAL_EN _SB_MAKEMASK1_32(7)
+#define M_LDT_ERRCTL_EOCNXASYNCFLOOD_EN _SB_MAKEMASK1_32(8)
+#define M_LDT_ERRCTL_CRCFATAL_EN _SB_MAKEMASK1_32(9)
+#define M_LDT_ERRCTL_CRCNONFATAL_EN _SB_MAKEMASK1_32(10)
+#define M_LDT_ERRCTL_SERRFATAL_EN _SB_MAKEMASK1_32(11)
+#define M_LDT_ERRCTL_SRCTAGFATAL_EN _SB_MAKEMASK1_32(12)
+#define M_LDT_ERRCTL_SRCTAGNONFATAL_EN _SB_MAKEMASK1_32(13)
+#define M_LDT_ERRCTL_SRCTAGSYNCFLOOD_EN _SB_MAKEMASK1_32(14)
+#define M_LDT_ERRCTL_MAPNXAFATAL_EN _SB_MAKEMASK1_32(15)
+#define M_LDT_ERRCTL_MAPNXANONFATAL_EN _SB_MAKEMASK1_32(16)
+#define M_LDT_ERRCTL_MAPNXASYNCFLOOD_EN _SB_MAKEMASK1_32(17)
+
+#define M_LDT_ERRCTL_PROTOERR _SB_MAKEMASK1_32(24)
+#define M_LDT_ERRCTL_OVFERR _SB_MAKEMASK1_32(25)
+#define M_LDT_ERRCTL_EOCNXAERR _SB_MAKEMASK1_32(26)
+#define M_LDT_ERRCTL_SRCTAGERR _SB_MAKEMASK1_32(27)
+#define M_LDT_ERRCTL_MAPNXAERR _SB_MAKEMASK1_32(28)
+
+/*
+ * SRI Control register (Table 8-24, 8-25) Offset 0x6C
+ */
+
+#define S_LDT_SRICTRL_NEEDRESP 0
+#define M_LDT_SRICTRL_NEEDRESP _SB_MAKEMASK_32(2, S_LDT_SRICTRL_NEEDRESP)
+#define V_LDT_SRICTRL_NEEDRESP(x) _SB_MAKEVALUE_32(x, S_LDT_SRICTRL_NEEDRESP)
+#define G_LDT_SRICTRL_NEEDRESP(x) _SB_GETVALUE_32(x, S_LDT_SRICTRL_NEEDRESP, M_LDT_SRICTRL_NEEDRESP)
+
+#define S_LDT_SRICTRL_NEEDNPREQ 2
+#define M_LDT_SRICTRL_NEEDNPREQ _SB_MAKEMASK_32(2, S_LDT_SRICTRL_NEEDNPREQ)
+#define V_LDT_SRICTRL_NEEDNPREQ(x) _SB_MAKEVALUE_32(x, S_LDT_SRICTRL_NEEDNPREQ)
+#define G_LDT_SRICTRL_NEEDNPREQ(x) _SB_GETVALUE_32(x, S_LDT_SRICTRL_NEEDNPREQ, M_LDT_SRICTRL_NEEDNPREQ)
+
+#define S_LDT_SRICTRL_NEEDPREQ 4
+#define M_LDT_SRICTRL_NEEDPREQ _SB_MAKEMASK_32(2, S_LDT_SRICTRL_NEEDPREQ)
+#define V_LDT_SRICTRL_NEEDPREQ(x) _SB_MAKEVALUE_32(x, S_LDT_SRICTRL_NEEDPREQ)
+#define G_LDT_SRICTRL_NEEDPREQ(x) _SB_GETVALUE_32(x, S_LDT_SRICTRL_NEEDPREQ, M_LDT_SRICTRL_NEEDPREQ)
+
+#define S_LDT_SRICTRL_WANTRESP 8
+#define M_LDT_SRICTRL_WANTRESP _SB_MAKEMASK_32(2, S_LDT_SRICTRL_WANTRESP)
+#define V_LDT_SRICTRL_WANTRESP(x) _SB_MAKEVALUE_32(x, S_LDT_SRICTRL_WANTRESP)
+#define G_LDT_SRICTRL_WANTRESP(x) _SB_GETVALUE_32(x, S_LDT_SRICTRL_WANTRESP, M_LDT_SRICTRL_WANTRESP)
+
+#define S_LDT_SRICTRL_WANTNPREQ 10
+#define M_LDT_SRICTRL_WANTNPREQ _SB_MAKEMASK_32(2, S_LDT_SRICTRL_WANTNPREQ)
+#define V_LDT_SRICTRL_WANTNPREQ(x) _SB_MAKEVALUE_32(x, S_LDT_SRICTRL_WANTNPREQ)
+#define G_LDT_SRICTRL_WANTNPREQ(x) _SB_GETVALUE_32(x, S_LDT_SRICTRL_WANTNPREQ, M_LDT_SRICTRL_WANTNPREQ)
+
+#define S_LDT_SRICTRL_WANTPREQ 12
+#define M_LDT_SRICTRL_WANTPREQ _SB_MAKEMASK_32(2, S_LDT_SRICTRL_WANTPREQ)
+#define V_LDT_SRICTRL_WANTPREQ(x) _SB_MAKEVALUE_32(x, S_LDT_SRICTRL_WANTPREQ)
+#define G_LDT_SRICTRL_WANTPREQ(x) _SB_GETVALUE_32(x, S_LDT_SRICTRL_WANTPREQ, M_LDT_SRICTRL_WANTPREQ)
+
+#define S_LDT_SRICTRL_BUFRELSPACE 16
+#define M_LDT_SRICTRL_BUFRELSPACE _SB_MAKEMASK_32(4, S_LDT_SRICTRL_BUFRELSPACE)
+#define V_LDT_SRICTRL_BUFRELSPACE(x) _SB_MAKEVALUE_32(x, S_LDT_SRICTRL_BUFRELSPACE)
+#define G_LDT_SRICTRL_BUFRELSPACE(x) _SB_GETVALUE_32(x, S_LDT_SRICTRL_BUFRELSPACE, M_LDT_SRICTRL_BUFRELSPACE)
+
+/*
+ * LDT SRI Transmit Buffer Count register (Table 8-26)
+ */
+
+#define S_LDT_TXBUFCNT_PCMD 0
+#define M_LDT_TXBUFCNT_PCMD _SB_MAKEMASK_32(4, S_LDT_TXBUFCNT_PCMD)
+#define V_LDT_TXBUFCNT_PCMD(x) _SB_MAKEVALUE_32(x, S_LDT_TXBUFCNT_PCMD)
+#define G_LDT_TXBUFCNT_PCMD(x) _SB_GETVALUE_32(x, S_LDT_TXBUFCNT_PCMD, M_LDT_TXBUFCNT_PCMD)
+
+#define S_LDT_TXBUFCNT_PDATA 4
+#define M_LDT_TXBUFCNT_PDATA _SB_MAKEMASK_32(4, S_LDT_TXBUFCNT_PDATA)
+#define V_LDT_TXBUFCNT_PDATA(x) _SB_MAKEVALUE_32(x, S_LDT_TXBUFCNT_PDATA)
+#define G_LDT_TXBUFCNT_PDATA(x) _SB_GETVALUE_32(x, S_LDT_TXBUFCNT_PDATA, M_LDT_TXBUFCNT_PDATA)
+
+#define S_LDT_TXBUFCNT_NPCMD 8
+#define M_LDT_TXBUFCNT_NPCMD _SB_MAKEMASK_32(4, S_LDT_TXBUFCNT_NPCMD)
+#define V_LDT_TXBUFCNT_NPCMD(x) _SB_MAKEVALUE_32(x, S_LDT_TXBUFCNT_NPCMD)
+#define G_LDT_TXBUFCNT_NPCMD(x) _SB_GETVALUE_32(x, S_LDT_TXBUFCNT_NPCMD, M_LDT_TXBUFCNT_NPCMD)
+
+#define S_LDT_TXBUFCNT_NPDATA 12
+#define M_LDT_TXBUFCNT_NPDATA _SB_MAKEMASK_32(4, S_LDT_TXBUFCNT_NPDATA)
+#define V_LDT_TXBUFCNT_NPDATA(x) _SB_MAKEVALUE_32(x, S_LDT_TXBUFCNT_NPDATA)
+#define G_LDT_TXBUFCNT_NPDATA(x) _SB_GETVALUE_32(x, S_LDT_TXBUFCNT_NPDATA, M_LDT_TXBUFCNT_NPDATA)
+
+#define S_LDT_TXBUFCNT_RCMD 16
+#define M_LDT_TXBUFCNT_RCMD _SB_MAKEMASK_32(4, S_LDT_TXBUFCNT_RCMD)
+#define V_LDT_TXBUFCNT_RCMD(x) _SB_MAKEVALUE_32(x, S_LDT_TXBUFCNT_RCMD)
+#define G_LDT_TXBUFCNT_RCMD(x) _SB_GETVALUE_32(x, S_LDT_TXBUFCNT_RCMD, M_LDT_TXBUFCNT_RCMD)
+
+#define S_LDT_TXBUFCNT_RDATA 20
+#define M_LDT_TXBUFCNT_RDATA _SB_MAKEMASK_32(4, S_LDT_TXBUFCNT_RDATA)
+#define V_LDT_TXBUFCNT_RDATA(x) _SB_MAKEVALUE_32(x, S_LDT_TXBUFCNT_RDATA)
+#define G_LDT_TXBUFCNT_RDATA(x) _SB_GETVALUE_32(x, S_LDT_TXBUFCNT_RDATA, M_LDT_TXBUFCNT_RDATA)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+/*
+ * Additional Status Register
+ */
+
+#define S_LDT_ADDSTATUS_TGTDONE 0
+#define M_LDT_ADDSTATUS_TGTDONE _SB_MAKEMASK_32(8, S_LDT_ADDSTATUS_TGTDONE)
+#define V_LDT_ADDSTATUS_TGTDONE(x) _SB_MAKEVALUE_32(x, S_LDT_ADDSTATUS_TGTDONE)
+#define G_LDT_ADDSTATUS_TGTDONE(x) _SB_GETVALUE_32(x, S_LDT_ADDSTATUS_TGTDONE, M_LDT_ADDSTATUS_TGTDONE)
+#endif /* 1250 PASS2 || 112x PASS1 */
+
+#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_mac.h b/arch/mips/include/asm/sibyte/sb1250_mac.h
new file mode 100644
index 000000000..3ddbd4b5d
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/sb1250_mac.h
@@ -0,0 +1,643 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * SB1250 Board Support Package
+ *
+ * MAC constants and macros File: sb1250_mac.h
+ *
+ * This module contains constants and macros for the SB1250's
+ * ethernet controllers.
+ *
+ * SB1250 specification level: User's manual 1/02/02
+ *
+ *********************************************************************
+ *
+ * Copyright 2000,2001,2002,2003
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+
+#ifndef _SB1250_MAC_H
+#define _SB1250_MAC_H
+
+#include <asm/sibyte/sb1250_defs.h>
+
+/* *********************************************************************
+ * Ethernet MAC Registers
+ ********************************************************************* */
+
+/*
+ * MAC Configuration Register (Table 9-13)
+ * Register: MAC_CFG_0
+ * Register: MAC_CFG_1
+ * Register: MAC_CFG_2
+ */
+
+
+#define M_MAC_RESERVED0 _SB_MAKEMASK1(0)
+#define M_MAC_TX_HOLD_SOP_EN _SB_MAKEMASK1(1)
+#define M_MAC_RETRY_EN _SB_MAKEMASK1(2)
+#define M_MAC_RET_DRPREQ_EN _SB_MAKEMASK1(3)
+#define M_MAC_RET_UFL_EN _SB_MAKEMASK1(4)
+#define M_MAC_BURST_EN _SB_MAKEMASK1(5)
+
+#define S_MAC_TX_PAUSE _SB_MAKE64(6)
+#define M_MAC_TX_PAUSE_CNT _SB_MAKEMASK(3, S_MAC_TX_PAUSE)
+#define V_MAC_TX_PAUSE_CNT(x) _SB_MAKEVALUE(x, S_MAC_TX_PAUSE)
+
+#define K_MAC_TX_PAUSE_CNT_512 0
+#define K_MAC_TX_PAUSE_CNT_1K 1
+#define K_MAC_TX_PAUSE_CNT_2K 2
+#define K_MAC_TX_PAUSE_CNT_4K 3
+#define K_MAC_TX_PAUSE_CNT_8K 4
+#define K_MAC_TX_PAUSE_CNT_16K 5
+#define K_MAC_TX_PAUSE_CNT_32K 6
+#define K_MAC_TX_PAUSE_CNT_64K 7
+
+#define V_MAC_TX_PAUSE_CNT_512 V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_512)
+#define V_MAC_TX_PAUSE_CNT_1K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_1K)
+#define V_MAC_TX_PAUSE_CNT_2K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_2K)
+#define V_MAC_TX_PAUSE_CNT_4K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_4K)
+#define V_MAC_TX_PAUSE_CNT_8K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_8K)
+#define V_MAC_TX_PAUSE_CNT_16K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_16K)
+#define V_MAC_TX_PAUSE_CNT_32K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_32K)
+#define V_MAC_TX_PAUSE_CNT_64K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_64K)
+
+#define M_MAC_RESERVED1 _SB_MAKEMASK(8, 9)
+
+#define M_MAC_AP_STAT_EN _SB_MAKEMASK1(17)
+
+#if SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_MAC_TIMESTAMP _SB_MAKEMASK1(18)
+#endif
+#define M_MAC_DRP_ERRPKT_EN _SB_MAKEMASK1(19)
+#define M_MAC_DRP_FCSERRPKT_EN _SB_MAKEMASK1(20)
+#define M_MAC_DRP_CODEERRPKT_EN _SB_MAKEMASK1(21)
+#define M_MAC_DRP_DRBLERRPKT_EN _SB_MAKEMASK1(22)
+#define M_MAC_DRP_RNTPKT_EN _SB_MAKEMASK1(23)
+#define M_MAC_DRP_OSZPKT_EN _SB_MAKEMASK1(24)
+#define M_MAC_DRP_LENERRPKT_EN _SB_MAKEMASK1(25)
+
+#define M_MAC_RESERVED3 _SB_MAKEMASK(6, 26)
+
+#define M_MAC_BYPASS_SEL _SB_MAKEMASK1(32)
+#define M_MAC_HDX_EN _SB_MAKEMASK1(33)
+
+#define S_MAC_SPEED_SEL _SB_MAKE64(34)
+#define M_MAC_SPEED_SEL _SB_MAKEMASK(2, S_MAC_SPEED_SEL)
+#define V_MAC_SPEED_SEL(x) _SB_MAKEVALUE(x, S_MAC_SPEED_SEL)
+#define G_MAC_SPEED_SEL(x) _SB_GETVALUE(x, S_MAC_SPEED_SEL, M_MAC_SPEED_SEL)
+
+#define K_MAC_SPEED_SEL_10MBPS 0
+#define K_MAC_SPEED_SEL_100MBPS 1
+#define K_MAC_SPEED_SEL_1000MBPS 2
+#define K_MAC_SPEED_SEL_RESERVED 3
+
+#define V_MAC_SPEED_SEL_10MBPS V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_10MBPS)
+#define V_MAC_SPEED_SEL_100MBPS V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_100MBPS)
+#define V_MAC_SPEED_SEL_1000MBPS V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_1000MBPS)
+#define V_MAC_SPEED_SEL_RESERVED V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_RESERVED)
+
+#define M_MAC_TX_CLK_EDGE_SEL _SB_MAKEMASK1(36)
+#define M_MAC_LOOPBACK_SEL _SB_MAKEMASK1(37)
+#define M_MAC_FAST_SYNC _SB_MAKEMASK1(38)
+#define M_MAC_SS_EN _SB_MAKEMASK1(39)
+
+#define S_MAC_BYPASS_CFG _SB_MAKE64(40)
+#define M_MAC_BYPASS_CFG _SB_MAKEMASK(2, S_MAC_BYPASS_CFG)
+#define V_MAC_BYPASS_CFG(x) _SB_MAKEVALUE(x, S_MAC_BYPASS_CFG)
+#define G_MAC_BYPASS_CFG(x) _SB_GETVALUE(x, S_MAC_BYPASS_CFG, M_MAC_BYPASS_CFG)
+
+#define K_MAC_BYPASS_GMII 0
+#define K_MAC_BYPASS_ENCODED 1
+#define K_MAC_BYPASS_SOP 2
+#define K_MAC_BYPASS_EOP 3
+
+#define M_MAC_BYPASS_16 _SB_MAKEMASK1(42)
+#define M_MAC_BYPASS_FCS_CHK _SB_MAKEMASK1(43)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_MAC_RX_CH_SEL_MSB _SB_MAKEMASK1(44)
+#endif /* 1250 PASS2 || 112x PASS1 || 1480*/
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_MAC_SPLIT_CH_SEL _SB_MAKEMASK1(45)
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+#define S_MAC_BYPASS_IFG _SB_MAKE64(46)
+#define M_MAC_BYPASS_IFG _SB_MAKEMASK(8, S_MAC_BYPASS_IFG)
+#define V_MAC_BYPASS_IFG(x) _SB_MAKEVALUE(x, S_MAC_BYPASS_IFG)
+#define G_MAC_BYPASS_IFG(x) _SB_GETVALUE(x, S_MAC_BYPASS_IFG, M_MAC_BYPASS_IFG)
+
+#define K_MAC_FC_CMD_DISABLED 0
+#define K_MAC_FC_CMD_ENABLED 1
+#define K_MAC_FC_CMD_ENAB_FALSECARR 2
+
+#define V_MAC_FC_CMD_DISABLED V_MAC_FC_CMD(K_MAC_FC_CMD_DISABLED)
+#define V_MAC_FC_CMD_ENABLED V_MAC_FC_CMD(K_MAC_FC_CMD_ENABLED)
+#define V_MAC_FC_CMD_ENAB_FALSECARR V_MAC_FC_CMD(K_MAC_FC_CMD_ENAB_FALSECARR)
+
+#define M_MAC_FC_SEL _SB_MAKEMASK1(54)
+
+#define S_MAC_FC_CMD _SB_MAKE64(55)
+#define M_MAC_FC_CMD _SB_MAKEMASK(2, S_MAC_FC_CMD)
+#define V_MAC_FC_CMD(x) _SB_MAKEVALUE(x, S_MAC_FC_CMD)
+#define G_MAC_FC_CMD(x) _SB_GETVALUE(x, S_MAC_FC_CMD, M_MAC_FC_CMD)
+
+#define S_MAC_RX_CH_SEL _SB_MAKE64(57)
+#define M_MAC_RX_CH_SEL _SB_MAKEMASK(7, S_MAC_RX_CH_SEL)
+#define V_MAC_RX_CH_SEL(x) _SB_MAKEVALUE(x, S_MAC_RX_CH_SEL)
+#define G_MAC_RX_CH_SEL(x) _SB_GETVALUE(x, S_MAC_RX_CH_SEL, M_MAC_RX_CH_SEL)
+
+
+/*
+ * MAC Enable Registers
+ * Register: MAC_ENABLE_0
+ * Register: MAC_ENABLE_1
+ * Register: MAC_ENABLE_2
+ */
+
+#define M_MAC_RXDMA_EN0 _SB_MAKEMASK1(0)
+#define M_MAC_RXDMA_EN1 _SB_MAKEMASK1(1)
+#define M_MAC_TXDMA_EN0 _SB_MAKEMASK1(4)
+#define M_MAC_TXDMA_EN1 _SB_MAKEMASK1(5)
+
+#define M_MAC_PORT_RESET _SB_MAKEMASK1(8)
+
+#if (SIBYTE_HDR_FEATURE_CHIP(1250) || SIBYTE_HDR_FEATURE_CHIP(112x))
+#define M_MAC_RX_ENABLE _SB_MAKEMASK1(10)
+#define M_MAC_TX_ENABLE _SB_MAKEMASK1(11)
+#define M_MAC_BYP_RX_ENABLE _SB_MAKEMASK1(12)
+#define M_MAC_BYP_TX_ENABLE _SB_MAKEMASK1(13)
+#endif
+
+/*
+ * MAC reset information register (1280/1255)
+ */
+#if SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_MAC_RX_CH0_PAUSE_ON _SB_MAKEMASK1(8)
+#define M_MAC_RX_CH1_PAUSE_ON _SB_MAKEMASK1(16)
+#define M_MAC_TX_CH0_PAUSE_ON _SB_MAKEMASK1(24)
+#define M_MAC_TX_CH1_PAUSE_ON _SB_MAKEMASK1(32)
+#endif
+
+/*
+ * MAC DMA Control Register
+ * Register: MAC_TXD_CTL_0
+ * Register: MAC_TXD_CTL_1
+ * Register: MAC_TXD_CTL_2
+ */
+
+#define S_MAC_TXD_WEIGHT0 _SB_MAKE64(0)
+#define M_MAC_TXD_WEIGHT0 _SB_MAKEMASK(4, S_MAC_TXD_WEIGHT0)
+#define V_MAC_TXD_WEIGHT0(x) _SB_MAKEVALUE(x, S_MAC_TXD_WEIGHT0)
+#define G_MAC_TXD_WEIGHT0(x) _SB_GETVALUE(x, S_MAC_TXD_WEIGHT0, M_MAC_TXD_WEIGHT0)
+
+#define S_MAC_TXD_WEIGHT1 _SB_MAKE64(4)
+#define M_MAC_TXD_WEIGHT1 _SB_MAKEMASK(4, S_MAC_TXD_WEIGHT1)
+#define V_MAC_TXD_WEIGHT1(x) _SB_MAKEVALUE(x, S_MAC_TXD_WEIGHT1)
+#define G_MAC_TXD_WEIGHT1(x) _SB_GETVALUE(x, S_MAC_TXD_WEIGHT1, M_MAC_TXD_WEIGHT1)
+
+/*
+ * MAC Fifo Threshold registers (Table 9-14)
+ * Register: MAC_THRSH_CFG_0
+ * Register: MAC_THRSH_CFG_1
+ * Register: MAC_THRSH_CFG_2
+ */
+
+#define S_MAC_TX_WR_THRSH _SB_MAKE64(0)
+#if SIBYTE_HDR_FEATURE_UP_TO(1250, PASS1)
+/* XXX: Can't enable, as it has the same name as a pass2+ define below. */
+/* #define M_MAC_TX_WR_THRSH _SB_MAKEMASK(6, S_MAC_TX_WR_THRSH) */
+#endif /* up to 1250 PASS1 */
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_MAC_TX_WR_THRSH _SB_MAKEMASK(7, S_MAC_TX_WR_THRSH)
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+#define V_MAC_TX_WR_THRSH(x) _SB_MAKEVALUE(x, S_MAC_TX_WR_THRSH)
+#define G_MAC_TX_WR_THRSH(x) _SB_GETVALUE(x, S_MAC_TX_WR_THRSH, M_MAC_TX_WR_THRSH)
+
+#define S_MAC_TX_RD_THRSH _SB_MAKE64(8)
+#if SIBYTE_HDR_FEATURE_UP_TO(1250, PASS1)
+/* XXX: Can't enable, as it has the same name as a pass2+ define below. */
+/* #define M_MAC_TX_RD_THRSH _SB_MAKEMASK(6, S_MAC_TX_RD_THRSH) */
+#endif /* up to 1250 PASS1 */
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_MAC_TX_RD_THRSH _SB_MAKEMASK(7, S_MAC_TX_RD_THRSH)
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+#define V_MAC_TX_RD_THRSH(x) _SB_MAKEVALUE(x, S_MAC_TX_RD_THRSH)
+#define G_MAC_TX_RD_THRSH(x) _SB_GETVALUE(x, S_MAC_TX_RD_THRSH, M_MAC_TX_RD_THRSH)
+
+#define S_MAC_TX_RL_THRSH _SB_MAKE64(16)
+#define M_MAC_TX_RL_THRSH _SB_MAKEMASK(4, S_MAC_TX_RL_THRSH)
+#define V_MAC_TX_RL_THRSH(x) _SB_MAKEVALUE(x, S_MAC_TX_RL_THRSH)
+#define G_MAC_TX_RL_THRSH(x) _SB_GETVALUE(x, S_MAC_TX_RL_THRSH, M_MAC_TX_RL_THRSH)
+
+#define S_MAC_RX_PL_THRSH _SB_MAKE64(24)
+#define M_MAC_RX_PL_THRSH _SB_MAKEMASK(6, S_MAC_RX_PL_THRSH)
+#define V_MAC_RX_PL_THRSH(x) _SB_MAKEVALUE(x, S_MAC_RX_PL_THRSH)
+#define G_MAC_RX_PL_THRSH(x) _SB_GETVALUE(x, S_MAC_RX_PL_THRSH, M_MAC_RX_PL_THRSH)
+
+#define S_MAC_RX_RD_THRSH _SB_MAKE64(32)
+#define M_MAC_RX_RD_THRSH _SB_MAKEMASK(6, S_MAC_RX_RD_THRSH)
+#define V_MAC_RX_RD_THRSH(x) _SB_MAKEVALUE(x, S_MAC_RX_RD_THRSH)
+#define G_MAC_RX_RD_THRSH(x) _SB_GETVALUE(x, S_MAC_RX_RD_THRSH, M_MAC_RX_RD_THRSH)
+
+#define S_MAC_RX_RL_THRSH _SB_MAKE64(40)
+#define M_MAC_RX_RL_THRSH _SB_MAKEMASK(6, S_MAC_RX_RL_THRSH)
+#define V_MAC_RX_RL_THRSH(x) _SB_MAKEVALUE(x, S_MAC_RX_RL_THRSH)
+#define G_MAC_RX_RL_THRSH(x) _SB_GETVALUE(x, S_MAC_RX_RL_THRSH, M_MAC_RX_RL_THRSH)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define S_MAC_ENC_FC_THRSH _SB_MAKE64(56)
+#define M_MAC_ENC_FC_THRSH _SB_MAKEMASK(6, S_MAC_ENC_FC_THRSH)
+#define V_MAC_ENC_FC_THRSH(x) _SB_MAKEVALUE(x, S_MAC_ENC_FC_THRSH)
+#define G_MAC_ENC_FC_THRSH(x) _SB_GETVALUE(x, S_MAC_ENC_FC_THRSH, M_MAC_ENC_FC_THRSH)
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+
+/*
+ * MAC Frame Configuration Registers (Table 9-15)
+ * Register: MAC_FRAME_CFG_0
+ * Register: MAC_FRAME_CFG_1
+ * Register: MAC_FRAME_CFG_2
+ */
+
+/* XXXCGD: ??? Unused in pass2? */
+#define S_MAC_IFG_RX _SB_MAKE64(0)
+#define M_MAC_IFG_RX _SB_MAKEMASK(6, S_MAC_IFG_RX)
+#define V_MAC_IFG_RX(x) _SB_MAKEVALUE(x, S_MAC_IFG_RX)
+#define G_MAC_IFG_RX(x) _SB_GETVALUE(x, S_MAC_IFG_RX, M_MAC_IFG_RX)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define S_MAC_PRE_LEN _SB_MAKE64(0)
+#define M_MAC_PRE_LEN _SB_MAKEMASK(6, S_MAC_PRE_LEN)
+#define V_MAC_PRE_LEN(x) _SB_MAKEVALUE(x, S_MAC_PRE_LEN)
+#define G_MAC_PRE_LEN(x) _SB_GETVALUE(x, S_MAC_PRE_LEN, M_MAC_PRE_LEN)
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+#define S_MAC_IFG_TX _SB_MAKE64(6)
+#define M_MAC_IFG_TX _SB_MAKEMASK(6, S_MAC_IFG_TX)
+#define V_MAC_IFG_TX(x) _SB_MAKEVALUE(x, S_MAC_IFG_TX)
+#define G_MAC_IFG_TX(x) _SB_GETVALUE(x, S_MAC_IFG_TX, M_MAC_IFG_TX)
+
+#define S_MAC_IFG_THRSH _SB_MAKE64(12)
+#define M_MAC_IFG_THRSH _SB_MAKEMASK(6, S_MAC_IFG_THRSH)
+#define V_MAC_IFG_THRSH(x) _SB_MAKEVALUE(x, S_MAC_IFG_THRSH)
+#define G_MAC_IFG_THRSH(x) _SB_GETVALUE(x, S_MAC_IFG_THRSH, M_MAC_IFG_THRSH)
+
+#define S_MAC_BACKOFF_SEL _SB_MAKE64(18)
+#define M_MAC_BACKOFF_SEL _SB_MAKEMASK(4, S_MAC_BACKOFF_SEL)
+#define V_MAC_BACKOFF_SEL(x) _SB_MAKEVALUE(x, S_MAC_BACKOFF_SEL)
+#define G_MAC_BACKOFF_SEL(x) _SB_GETVALUE(x, S_MAC_BACKOFF_SEL, M_MAC_BACKOFF_SEL)
+
+#define S_MAC_LFSR_SEED _SB_MAKE64(22)
+#define M_MAC_LFSR_SEED _SB_MAKEMASK(8, S_MAC_LFSR_SEED)
+#define V_MAC_LFSR_SEED(x) _SB_MAKEVALUE(x, S_MAC_LFSR_SEED)
+#define G_MAC_LFSR_SEED(x) _SB_GETVALUE(x, S_MAC_LFSR_SEED, M_MAC_LFSR_SEED)
+
+#define S_MAC_SLOT_SIZE _SB_MAKE64(30)
+#define M_MAC_SLOT_SIZE _SB_MAKEMASK(10, S_MAC_SLOT_SIZE)
+#define V_MAC_SLOT_SIZE(x) _SB_MAKEVALUE(x, S_MAC_SLOT_SIZE)
+#define G_MAC_SLOT_SIZE(x) _SB_GETVALUE(x, S_MAC_SLOT_SIZE, M_MAC_SLOT_SIZE)
+
+#define S_MAC_MIN_FRAMESZ _SB_MAKE64(40)
+#define M_MAC_MIN_FRAMESZ _SB_MAKEMASK(8, S_MAC_MIN_FRAMESZ)
+#define V_MAC_MIN_FRAMESZ(x) _SB_MAKEVALUE(x, S_MAC_MIN_FRAMESZ)
+#define G_MAC_MIN_FRAMESZ(x) _SB_GETVALUE(x, S_MAC_MIN_FRAMESZ, M_MAC_MIN_FRAMESZ)
+
+#define S_MAC_MAX_FRAMESZ _SB_MAKE64(48)
+#define M_MAC_MAX_FRAMESZ _SB_MAKEMASK(16, S_MAC_MAX_FRAMESZ)
+#define V_MAC_MAX_FRAMESZ(x) _SB_MAKEVALUE(x, S_MAC_MAX_FRAMESZ)
+#define G_MAC_MAX_FRAMESZ(x) _SB_GETVALUE(x, S_MAC_MAX_FRAMESZ, M_MAC_MAX_FRAMESZ)
+
+/*
+ * These constants are used to configure the fields within the Frame
+ * Configuration Register.
+ */
+
+#define K_MAC_IFG_RX_10 _SB_MAKE64(0) /* See table 176, not used */
+#define K_MAC_IFG_RX_100 _SB_MAKE64(0)
+#define K_MAC_IFG_RX_1000 _SB_MAKE64(0)
+
+#define K_MAC_IFG_TX_10 _SB_MAKE64(20)
+#define K_MAC_IFG_TX_100 _SB_MAKE64(20)
+#define K_MAC_IFG_TX_1000 _SB_MAKE64(8)
+
+#define K_MAC_IFG_THRSH_10 _SB_MAKE64(4)
+#define K_MAC_IFG_THRSH_100 _SB_MAKE64(4)
+#define K_MAC_IFG_THRSH_1000 _SB_MAKE64(0)
+
+#define K_MAC_SLOT_SIZE_10 _SB_MAKE64(0)
+#define K_MAC_SLOT_SIZE_100 _SB_MAKE64(0)
+#define K_MAC_SLOT_SIZE_1000 _SB_MAKE64(0)
+
+#define V_MAC_IFG_RX_10 V_MAC_IFG_RX(K_MAC_IFG_RX_10)
+#define V_MAC_IFG_RX_100 V_MAC_IFG_RX(K_MAC_IFG_RX_100)
+#define V_MAC_IFG_RX_1000 V_MAC_IFG_RX(K_MAC_IFG_RX_1000)
+
+#define V_MAC_IFG_TX_10 V_MAC_IFG_TX(K_MAC_IFG_TX_10)
+#define V_MAC_IFG_TX_100 V_MAC_IFG_TX(K_MAC_IFG_TX_100)
+#define V_MAC_IFG_TX_1000 V_MAC_IFG_TX(K_MAC_IFG_TX_1000)
+
+#define V_MAC_IFG_THRSH_10 V_MAC_IFG_THRSH(K_MAC_IFG_THRSH_10)
+#define V_MAC_IFG_THRSH_100 V_MAC_IFG_THRSH(K_MAC_IFG_THRSH_100)
+#define V_MAC_IFG_THRSH_1000 V_MAC_IFG_THRSH(K_MAC_IFG_THRSH_1000)
+
+#define V_MAC_SLOT_SIZE_10 V_MAC_SLOT_SIZE(K_MAC_SLOT_SIZE_10)
+#define V_MAC_SLOT_SIZE_100 V_MAC_SLOT_SIZE(K_MAC_SLOT_SIZE_100)
+#define V_MAC_SLOT_SIZE_1000 V_MAC_SLOT_SIZE(K_MAC_SLOT_SIZE_1000)
+
+#define K_MAC_MIN_FRAMESZ_FIFO _SB_MAKE64(9)
+#define K_MAC_MIN_FRAMESZ_DEFAULT _SB_MAKE64(64)
+#define K_MAC_MAX_FRAMESZ_DEFAULT _SB_MAKE64(1518)
+#define K_MAC_MAX_FRAMESZ_JUMBO _SB_MAKE64(9216)
+
+#define V_MAC_MIN_FRAMESZ_FIFO V_MAC_MIN_FRAMESZ(K_MAC_MIN_FRAMESZ_FIFO)
+#define V_MAC_MIN_FRAMESZ_DEFAULT V_MAC_MIN_FRAMESZ(K_MAC_MIN_FRAMESZ_DEFAULT)
+#define V_MAC_MAX_FRAMESZ_DEFAULT V_MAC_MAX_FRAMESZ(K_MAC_MAX_FRAMESZ_DEFAULT)
+#define V_MAC_MAX_FRAMESZ_JUMBO V_MAC_MAX_FRAMESZ(K_MAC_MAX_FRAMESZ_JUMBO)
+
+/*
+ * MAC VLAN Tag Registers (Table 9-16)
+ * Register: MAC_VLANTAG_0
+ * Register: MAC_VLANTAG_1
+ * Register: MAC_VLANTAG_2
+ */
+
+#define S_MAC_VLAN_TAG _SB_MAKE64(0)
+#define M_MAC_VLAN_TAG _SB_MAKEMASK(32, S_MAC_VLAN_TAG)
+#define V_MAC_VLAN_TAG(x) _SB_MAKEVALUE(x, S_MAC_VLAN_TAG)
+#define G_MAC_VLAN_TAG(x) _SB_GETVALUE(x, S_MAC_VLAN_TAG, M_MAC_VLAN_TAG)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define S_MAC_TX_PKT_OFFSET _SB_MAKE64(32)
+#define M_MAC_TX_PKT_OFFSET _SB_MAKEMASK(8, S_MAC_TX_PKT_OFFSET)
+#define V_MAC_TX_PKT_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_TX_PKT_OFFSET)
+#define G_MAC_TX_PKT_OFFSET(x) _SB_GETVALUE(x, S_MAC_TX_PKT_OFFSET, M_MAC_TX_PKT_OFFSET)
+
+#define S_MAC_TX_CRC_OFFSET _SB_MAKE64(40)
+#define M_MAC_TX_CRC_OFFSET _SB_MAKEMASK(8, S_MAC_TX_CRC_OFFSET)
+#define V_MAC_TX_CRC_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_TX_CRC_OFFSET)
+#define G_MAC_TX_CRC_OFFSET(x) _SB_GETVALUE(x, S_MAC_TX_CRC_OFFSET, M_MAC_TX_CRC_OFFSET)
+
+#define M_MAC_CH_BASE_FC_EN _SB_MAKEMASK1(48)
+#endif /* 1250 PASS3 || 112x PASS1 */
+
+/*
+ * MAC Status Registers (Table 9-17)
+ * Also used for the MAC Interrupt Mask Register (Table 9-18)
+ * Register: MAC_STATUS_0
+ * Register: MAC_STATUS_1
+ * Register: MAC_STATUS_2
+ * Register: MAC_INT_MASK_0
+ * Register: MAC_INT_MASK_1
+ * Register: MAC_INT_MASK_2
+ */
+
+/*
+ * Use these constants to shift the appropriate channel
+ * into the CH0 position so the same tests can be used
+ * on each channel.
+ */
+
+#define S_MAC_RX_CH0 _SB_MAKE64(0)
+#define S_MAC_RX_CH1 _SB_MAKE64(8)
+#define S_MAC_TX_CH0 _SB_MAKE64(16)
+#define S_MAC_TX_CH1 _SB_MAKE64(24)
+
+#define S_MAC_TXCHANNELS _SB_MAKE64(16) /* this is 1st TX chan */
+#define S_MAC_CHANWIDTH _SB_MAKE64(8) /* bits between channels */
+
+/*
+ * These are the same as RX channel 0. The idea here
+ * is that you'll use one of the "S_" things above
+ * and pass just the six bits to a DMA-channel-specific ISR
+ */
+#define M_MAC_INT_CHANNEL _SB_MAKEMASK(8, 0)
+#define M_MAC_INT_EOP_COUNT _SB_MAKEMASK1(0)
+#define M_MAC_INT_EOP_TIMER _SB_MAKEMASK1(1)
+#define M_MAC_INT_EOP_SEEN _SB_MAKEMASK1(2)
+#define M_MAC_INT_HWM _SB_MAKEMASK1(3)
+#define M_MAC_INT_LWM _SB_MAKEMASK1(4)
+#define M_MAC_INT_DSCR _SB_MAKEMASK1(5)
+#define M_MAC_INT_ERR _SB_MAKEMASK1(6)
+#define M_MAC_INT_DZERO _SB_MAKEMASK1(7) /* only for TX channels */
+#define M_MAC_INT_DROP _SB_MAKEMASK1(7) /* only for RX channels */
+
+/*
+ * In the following definitions we use ch (0/1) and txrx (TX=1, RX=0, see
+ * also DMA_TX/DMA_RX in sb_regs.h).
+ */
+#define S_MAC_STATUS_CH_OFFSET(ch, txrx) _SB_MAKE64(((ch) + 2 * (txrx)) * S_MAC_CHANWIDTH)
+
+#define M_MAC_STATUS_CHANNEL(ch, txrx) _SB_MAKEVALUE(_SB_MAKEMASK(8, 0), S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_EOP_COUNT(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_EOP_COUNT, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_EOP_TIMER(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_EOP_TIMER, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_EOP_SEEN(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_EOP_SEEN, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_HWM(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_HWM, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_LWM(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_LWM, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_DSCR(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DSCR, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_ERR(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_ERR, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_DZERO(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DZERO, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_DROP(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DROP, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_OTHER_ERR _SB_MAKEVALUE(_SB_MAKEMASK(7, 0), 40)
+
+
+#define M_MAC_RX_UNDRFL _SB_MAKEMASK1(40)
+#define M_MAC_RX_OVRFL _SB_MAKEMASK1(41)
+#define M_MAC_TX_UNDRFL _SB_MAKEMASK1(42)
+#define M_MAC_TX_OVRFL _SB_MAKEMASK1(43)
+#define M_MAC_LTCOL_ERR _SB_MAKEMASK1(44)
+#define M_MAC_EXCOL_ERR _SB_MAKEMASK1(45)
+#define M_MAC_CNTR_OVRFL_ERR _SB_MAKEMASK1(46)
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_MAC_SPLIT_EN _SB_MAKEMASK1(47) /* interrupt mask only */
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+
+#define S_MAC_COUNTER_ADDR _SB_MAKE64(47)
+#define M_MAC_COUNTER_ADDR _SB_MAKEMASK(5, S_MAC_COUNTER_ADDR)
+#define V_MAC_COUNTER_ADDR(x) _SB_MAKEVALUE(x, S_MAC_COUNTER_ADDR)
+#define G_MAC_COUNTER_ADDR(x) _SB_GETVALUE(x, S_MAC_COUNTER_ADDR, M_MAC_COUNTER_ADDR)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_MAC_TX_PAUSE_ON _SB_MAKEMASK1(52)
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+/*
+ * MAC Fifo Pointer Registers (Table 9-19) [Debug register]
+ * Register: MAC_FIFO_PTRS_0
+ * Register: MAC_FIFO_PTRS_1
+ * Register: MAC_FIFO_PTRS_2
+ */
+
+#define S_MAC_TX_WRPTR _SB_MAKE64(0)
+#define M_MAC_TX_WRPTR _SB_MAKEMASK(6, S_MAC_TX_WRPTR)
+#define V_MAC_TX_WRPTR(x) _SB_MAKEVALUE(x, S_MAC_TX_WRPTR)
+#define G_MAC_TX_WRPTR(x) _SB_GETVALUE(x, S_MAC_TX_WRPTR, M_MAC_TX_WRPTR)
+
+#define S_MAC_TX_RDPTR _SB_MAKE64(8)
+#define M_MAC_TX_RDPTR _SB_MAKEMASK(6, S_MAC_TX_RDPTR)
+#define V_MAC_TX_RDPTR(x) _SB_MAKEVALUE(x, S_MAC_TX_RDPTR)
+#define G_MAC_TX_RDPTR(x) _SB_GETVALUE(x, S_MAC_TX_RDPTR, M_MAC_TX_RDPTR)
+
+#define S_MAC_RX_WRPTR _SB_MAKE64(16)
+#define M_MAC_RX_WRPTR _SB_MAKEMASK(6, S_MAC_RX_WRPTR)
+#define V_MAC_RX_WRPTR(x) _SB_MAKEVALUE(x, S_MAC_RX_WRPTR)
+#define G_MAC_RX_WRPTR(x) _SB_GETVALUE(x, S_MAC_RX_WRPTR, M_MAC_TX_WRPTR)
+
+#define S_MAC_RX_RDPTR _SB_MAKE64(24)
+#define M_MAC_RX_RDPTR _SB_MAKEMASK(6, S_MAC_RX_RDPTR)
+#define V_MAC_RX_RDPTR(x) _SB_MAKEVALUE(x, S_MAC_RX_RDPTR)
+#define G_MAC_RX_RDPTR(x) _SB_GETVALUE(x, S_MAC_RX_RDPTR, M_MAC_TX_RDPTR)
+
+/*
+ * MAC Fifo End Of Packet Count Registers (Table 9-20) [Debug register]
+ * Register: MAC_EOPCNT_0
+ * Register: MAC_EOPCNT_1
+ * Register: MAC_EOPCNT_2
+ */
+
+#define S_MAC_TX_EOP_COUNTER _SB_MAKE64(0)
+#define M_MAC_TX_EOP_COUNTER _SB_MAKEMASK(6, S_MAC_TX_EOP_COUNTER)
+#define V_MAC_TX_EOP_COUNTER(x) _SB_MAKEVALUE(x, S_MAC_TX_EOP_COUNTER)
+#define G_MAC_TX_EOP_COUNTER(x) _SB_GETVALUE(x, S_MAC_TX_EOP_COUNTER, M_MAC_TX_EOP_COUNTER)
+
+#define S_MAC_RX_EOP_COUNTER _SB_MAKE64(8)
+#define M_MAC_RX_EOP_COUNTER _SB_MAKEMASK(6, S_MAC_RX_EOP_COUNTER)
+#define V_MAC_RX_EOP_COUNTER(x) _SB_MAKEVALUE(x, S_MAC_RX_EOP_COUNTER)
+#define G_MAC_RX_EOP_COUNTER(x) _SB_GETVALUE(x, S_MAC_RX_EOP_COUNTER, M_MAC_RX_EOP_COUNTER)
+
+/*
+ * MAC Receive Address Filter Exact Match Registers (Table 9-21)
+ * Registers: MAC_ADDR0_0 through MAC_ADDR7_0
+ * Registers: MAC_ADDR0_1 through MAC_ADDR7_1
+ * Registers: MAC_ADDR0_2 through MAC_ADDR7_2
+ */
+
+/* No bitfields */
+
+/*
+ * MAC Receive Address Filter Mask Registers
+ * Registers: MAC_ADDRMASK0_0 and MAC_ADDRMASK0_1
+ * Registers: MAC_ADDRMASK1_0 and MAC_ADDRMASK1_1
+ * Registers: MAC_ADDRMASK2_0 and MAC_ADDRMASK2_1
+ */
+
+/* No bitfields */
+
+/*
+ * MAC Receive Address Filter Hash Match Registers (Table 9-22)
+ * Registers: MAC_HASH0_0 through MAC_HASH7_0
+ * Registers: MAC_HASH0_1 through MAC_HASH7_1
+ * Registers: MAC_HASH0_2 through MAC_HASH7_2
+ */
+
+/* No bitfields */
+
+/*
+ * MAC Transmit Source Address Registers (Table 9-23)
+ * Register: MAC_ETHERNET_ADDR_0
+ * Register: MAC_ETHERNET_ADDR_1
+ * Register: MAC_ETHERNET_ADDR_2
+ */
+
+/* No bitfields */
+
+/*
+ * MAC Packet Type Configuration Register
+ * Register: MAC_TYPE_CFG_0
+ * Register: MAC_TYPE_CFG_1
+ * Register: MAC_TYPE_CFG_2
+ */
+
+#define S_TYPECFG_TYPESIZE _SB_MAKE64(16)
+
+#define S_TYPECFG_TYPE0 _SB_MAKE64(0)
+#define M_TYPECFG_TYPE0 _SB_MAKEMASK(16, S_TYPECFG_TYPE0)
+#define V_TYPECFG_TYPE0(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE0)
+#define G_TYPECFG_TYPE0(x) _SB_GETVALUE(x, S_TYPECFG_TYPE0, M_TYPECFG_TYPE0)
+
+#define S_TYPECFG_TYPE1 _SB_MAKE64(0)
+#define M_TYPECFG_TYPE1 _SB_MAKEMASK(16, S_TYPECFG_TYPE1)
+#define V_TYPECFG_TYPE1(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE1)
+#define G_TYPECFG_TYPE1(x) _SB_GETVALUE(x, S_TYPECFG_TYPE1, M_TYPECFG_TYPE1)
+
+#define S_TYPECFG_TYPE2 _SB_MAKE64(0)
+#define M_TYPECFG_TYPE2 _SB_MAKEMASK(16, S_TYPECFG_TYPE2)
+#define V_TYPECFG_TYPE2(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE2)
+#define G_TYPECFG_TYPE2(x) _SB_GETVALUE(x, S_TYPECFG_TYPE2, M_TYPECFG_TYPE2)
+
+#define S_TYPECFG_TYPE3 _SB_MAKE64(0)
+#define M_TYPECFG_TYPE3 _SB_MAKEMASK(16, S_TYPECFG_TYPE3)
+#define V_TYPECFG_TYPE3(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE3)
+#define G_TYPECFG_TYPE3(x) _SB_GETVALUE(x, S_TYPECFG_TYPE3, M_TYPECFG_TYPE3)
+
+/*
+ * MAC Receive Address Filter Control Registers (Table 9-24)
+ * Register: MAC_ADFILTER_CFG_0
+ * Register: MAC_ADFILTER_CFG_1
+ * Register: MAC_ADFILTER_CFG_2
+ */
+
+#define M_MAC_ALLPKT_EN _SB_MAKEMASK1(0)
+#define M_MAC_UCAST_EN _SB_MAKEMASK1(1)
+#define M_MAC_UCAST_INV _SB_MAKEMASK1(2)
+#define M_MAC_MCAST_EN _SB_MAKEMASK1(3)
+#define M_MAC_MCAST_INV _SB_MAKEMASK1(4)
+#define M_MAC_BCAST_EN _SB_MAKEMASK1(5)
+#define M_MAC_DIRECT_INV _SB_MAKEMASK1(6)
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_MAC_ALLMCAST_EN _SB_MAKEMASK1(7)
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+
+#define S_MAC_IPHDR_OFFSET _SB_MAKE64(8)
+#define M_MAC_IPHDR_OFFSET _SB_MAKEMASK(8, S_MAC_IPHDR_OFFSET)
+#define V_MAC_IPHDR_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_IPHDR_OFFSET)
+#define G_MAC_IPHDR_OFFSET(x) _SB_GETVALUE(x, S_MAC_IPHDR_OFFSET, M_MAC_IPHDR_OFFSET)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define S_MAC_RX_CRC_OFFSET _SB_MAKE64(16)
+#define M_MAC_RX_CRC_OFFSET _SB_MAKEMASK(8, S_MAC_RX_CRC_OFFSET)
+#define V_MAC_RX_CRC_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_RX_CRC_OFFSET)
+#define G_MAC_RX_CRC_OFFSET(x) _SB_GETVALUE(x, S_MAC_RX_CRC_OFFSET, M_MAC_RX_CRC_OFFSET)
+
+#define S_MAC_RX_PKT_OFFSET _SB_MAKE64(24)
+#define M_MAC_RX_PKT_OFFSET _SB_MAKEMASK(8, S_MAC_RX_PKT_OFFSET)
+#define V_MAC_RX_PKT_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_RX_PKT_OFFSET)
+#define G_MAC_RX_PKT_OFFSET(x) _SB_GETVALUE(x, S_MAC_RX_PKT_OFFSET, M_MAC_RX_PKT_OFFSET)
+
+#define M_MAC_FWDPAUSE_EN _SB_MAKEMASK1(32)
+#define M_MAC_VLAN_DET_EN _SB_MAKEMASK1(33)
+
+#define S_MAC_RX_CH_MSN_SEL _SB_MAKE64(34)
+#define M_MAC_RX_CH_MSN_SEL _SB_MAKEMASK(8, S_MAC_RX_CH_MSN_SEL)
+#define V_MAC_RX_CH_MSN_SEL(x) _SB_MAKEVALUE(x, S_MAC_RX_CH_MSN_SEL)
+#define G_MAC_RX_CH_MSN_SEL(x) _SB_GETVALUE(x, S_MAC_RX_CH_MSN_SEL, M_MAC_RX_CH_MSN_SEL)
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+/*
+ * MAC Receive Channel Select Registers (Table 9-25)
+ */
+
+/* no bitfields */
+
+/*
+ * MAC MII Management Interface Registers (Table 9-26)
+ * Register: MAC_MDIO_0
+ * Register: MAC_MDIO_1
+ * Register: MAC_MDIO_2
+ */
+
+#define S_MAC_MDC 0
+#define S_MAC_MDIO_DIR 1
+#define S_MAC_MDIO_OUT 2
+#define S_MAC_GENC 3
+#define S_MAC_MDIO_IN 4
+
+#define M_MAC_MDC _SB_MAKEMASK1(S_MAC_MDC)
+#define M_MAC_MDIO_DIR _SB_MAKEMASK1(S_MAC_MDIO_DIR)
+#define M_MAC_MDIO_DIR_INPUT _SB_MAKEMASK1(S_MAC_MDIO_DIR)
+#define M_MAC_MDIO_OUT _SB_MAKEMASK1(S_MAC_MDIO_OUT)
+#define M_MAC_GENC _SB_MAKEMASK1(S_MAC_GENC)
+#define M_MAC_MDIO_IN _SB_MAKEMASK1(S_MAC_MDIO_IN)
+
+#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_mc.h b/arch/mips/include/asm/sibyte/sb1250_mc.h
new file mode 100644
index 000000000..c02fe823e
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/sb1250_mc.h
@@ -0,0 +1,537 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * SB1250 Board Support Package
+ *
+ * Memory Controller constants File: sb1250_mc.h
+ *
+ * This module contains constants and macros useful for
+ * programming the memory controller.
+ *
+ * SB1250 specification level: User's manual 1/02/02
+ *
+ *********************************************************************
+ *
+ * Copyright 2000, 2001, 2002, 2003
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+
+#ifndef _SB1250_MC_H
+#define _SB1250_MC_H
+
+#include <asm/sibyte/sb1250_defs.h>
+
+/*
+ * Memory Channel Config Register (table 6-14)
+ */
+
+#define S_MC_RESERVED0 0
+#define M_MC_RESERVED0 _SB_MAKEMASK(8, S_MC_RESERVED0)
+
+#define S_MC_CHANNEL_SEL 8
+#define M_MC_CHANNEL_SEL _SB_MAKEMASK(8, S_MC_CHANNEL_SEL)
+#define V_MC_CHANNEL_SEL(x) _SB_MAKEVALUE(x, S_MC_CHANNEL_SEL)
+#define G_MC_CHANNEL_SEL(x) _SB_GETVALUE(x, S_MC_CHANNEL_SEL, M_MC_CHANNEL_SEL)
+
+#define S_MC_BANK0_MAP 16
+#define M_MC_BANK0_MAP _SB_MAKEMASK(4, S_MC_BANK0_MAP)
+#define V_MC_BANK0_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK0_MAP)
+#define G_MC_BANK0_MAP(x) _SB_GETVALUE(x, S_MC_BANK0_MAP, M_MC_BANK0_MAP)
+
+#define K_MC_BANK0_MAP_DEFAULT 0x00
+#define V_MC_BANK0_MAP_DEFAULT V_MC_BANK0_MAP(K_MC_BANK0_MAP_DEFAULT)
+
+#define S_MC_BANK1_MAP 20
+#define M_MC_BANK1_MAP _SB_MAKEMASK(4, S_MC_BANK1_MAP)
+#define V_MC_BANK1_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK1_MAP)
+#define G_MC_BANK1_MAP(x) _SB_GETVALUE(x, S_MC_BANK1_MAP, M_MC_BANK1_MAP)
+
+#define K_MC_BANK1_MAP_DEFAULT 0x08
+#define V_MC_BANK1_MAP_DEFAULT V_MC_BANK1_MAP(K_MC_BANK1_MAP_DEFAULT)
+
+#define S_MC_BANK2_MAP 24
+#define M_MC_BANK2_MAP _SB_MAKEMASK(4, S_MC_BANK2_MAP)
+#define V_MC_BANK2_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK2_MAP)
+#define G_MC_BANK2_MAP(x) _SB_GETVALUE(x, S_MC_BANK2_MAP, M_MC_BANK2_MAP)
+
+#define K_MC_BANK2_MAP_DEFAULT 0x09
+#define V_MC_BANK2_MAP_DEFAULT V_MC_BANK2_MAP(K_MC_BANK2_MAP_DEFAULT)
+
+#define S_MC_BANK3_MAP 28
+#define M_MC_BANK3_MAP _SB_MAKEMASK(4, S_MC_BANK3_MAP)
+#define V_MC_BANK3_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK3_MAP)
+#define G_MC_BANK3_MAP(x) _SB_GETVALUE(x, S_MC_BANK3_MAP, M_MC_BANK3_MAP)
+
+#define K_MC_BANK3_MAP_DEFAULT 0x0C
+#define V_MC_BANK3_MAP_DEFAULT V_MC_BANK3_MAP(K_MC_BANK3_MAP_DEFAULT)
+
+#define M_MC_RESERVED1 _SB_MAKEMASK(8, 32)
+
+#define S_MC_QUEUE_SIZE 40
+#define M_MC_QUEUE_SIZE _SB_MAKEMASK(4, S_MC_QUEUE_SIZE)
+#define V_MC_QUEUE_SIZE(x) _SB_MAKEVALUE(x, S_MC_QUEUE_SIZE)
+#define G_MC_QUEUE_SIZE(x) _SB_GETVALUE(x, S_MC_QUEUE_SIZE, M_MC_QUEUE_SIZE)
+#define V_MC_QUEUE_SIZE_DEFAULT V_MC_QUEUE_SIZE(0x0A)
+
+#define S_MC_AGE_LIMIT 44
+#define M_MC_AGE_LIMIT _SB_MAKEMASK(4, S_MC_AGE_LIMIT)
+#define V_MC_AGE_LIMIT(x) _SB_MAKEVALUE(x, S_MC_AGE_LIMIT)
+#define G_MC_AGE_LIMIT(x) _SB_GETVALUE(x, S_MC_AGE_LIMIT, M_MC_AGE_LIMIT)
+#define V_MC_AGE_LIMIT_DEFAULT V_MC_AGE_LIMIT(8)
+
+#define S_MC_WR_LIMIT 48
+#define M_MC_WR_LIMIT _SB_MAKEMASK(4, S_MC_WR_LIMIT)
+#define V_MC_WR_LIMIT(x) _SB_MAKEVALUE(x, S_MC_WR_LIMIT)
+#define G_MC_WR_LIMIT(x) _SB_GETVALUE(x, S_MC_WR_LIMIT, M_MC_WR_LIMIT)
+#define V_MC_WR_LIMIT_DEFAULT V_MC_WR_LIMIT(5)
+
+#define M_MC_IOB1HIGHPRIORITY _SB_MAKEMASK1(52)
+
+#define M_MC_RESERVED2 _SB_MAKEMASK(3, 53)
+
+#define S_MC_CS_MODE 56
+#define M_MC_CS_MODE _SB_MAKEMASK(4, S_MC_CS_MODE)
+#define V_MC_CS_MODE(x) _SB_MAKEVALUE(x, S_MC_CS_MODE)
+#define G_MC_CS_MODE(x) _SB_GETVALUE(x, S_MC_CS_MODE, M_MC_CS_MODE)
+
+#define K_MC_CS_MODE_MSB_CS 0
+#define K_MC_CS_MODE_INTLV_CS 15
+#define K_MC_CS_MODE_MIXED_CS_10 12
+#define K_MC_CS_MODE_MIXED_CS_30 6
+#define K_MC_CS_MODE_MIXED_CS_32 3
+
+#define V_MC_CS_MODE_MSB_CS V_MC_CS_MODE(K_MC_CS_MODE_MSB_CS)
+#define V_MC_CS_MODE_INTLV_CS V_MC_CS_MODE(K_MC_CS_MODE_INTLV_CS)
+#define V_MC_CS_MODE_MIXED_CS_10 V_MC_CS_MODE(K_MC_CS_MODE_MIXED_CS_10)
+#define V_MC_CS_MODE_MIXED_CS_30 V_MC_CS_MODE(K_MC_CS_MODE_MIXED_CS_30)
+#define V_MC_CS_MODE_MIXED_CS_32 V_MC_CS_MODE(K_MC_CS_MODE_MIXED_CS_32)
+
+#define M_MC_ECC_DISABLE _SB_MAKEMASK1(60)
+#define M_MC_BERR_DISABLE _SB_MAKEMASK1(61)
+#define M_MC_FORCE_SEQ _SB_MAKEMASK1(62)
+#define M_MC_DEBUG _SB_MAKEMASK1(63)
+
+#define V_MC_CONFIG_DEFAULT V_MC_WR_LIMIT_DEFAULT | V_MC_AGE_LIMIT_DEFAULT | \
+ V_MC_BANK0_MAP_DEFAULT | V_MC_BANK1_MAP_DEFAULT | \
+ V_MC_BANK2_MAP_DEFAULT | V_MC_BANK3_MAP_DEFAULT | V_MC_CHANNEL_SEL(0) | \
+ M_MC_IOB1HIGHPRIORITY | V_MC_QUEUE_SIZE_DEFAULT
+
+
+/*
+ * Memory clock config register (Table 6-15)
+ *
+ * Note: this field has been updated to be consistent with the errata to 0.2
+ */
+
+#define S_MC_CLK_RATIO 0
+#define M_MC_CLK_RATIO _SB_MAKEMASK(4, S_MC_CLK_RATIO)
+#define V_MC_CLK_RATIO(x) _SB_MAKEVALUE(x, S_MC_CLK_RATIO)
+#define G_MC_CLK_RATIO(x) _SB_GETVALUE(x, S_MC_CLK_RATIO, M_MC_CLK_RATIO)
+
+#define K_MC_CLK_RATIO_2X 4
+#define K_MC_CLK_RATIO_25X 5
+#define K_MC_CLK_RATIO_3X 6
+#define K_MC_CLK_RATIO_35X 7
+#define K_MC_CLK_RATIO_4X 8
+#define K_MC_CLK_RATIO_45X 9
+
+#define V_MC_CLK_RATIO_2X V_MC_CLK_RATIO(K_MC_CLK_RATIO_2X)
+#define V_MC_CLK_RATIO_25X V_MC_CLK_RATIO(K_MC_CLK_RATIO_25X)
+#define V_MC_CLK_RATIO_3X V_MC_CLK_RATIO(K_MC_CLK_RATIO_3X)
+#define V_MC_CLK_RATIO_35X V_MC_CLK_RATIO(K_MC_CLK_RATIO_35X)
+#define V_MC_CLK_RATIO_4X V_MC_CLK_RATIO(K_MC_CLK_RATIO_4X)
+#define V_MC_CLK_RATIO_45X V_MC_CLK_RATIO(K_MC_CLK_RATIO_45X)
+#define V_MC_CLK_RATIO_DEFAULT V_MC_CLK_RATIO_25X
+
+#define S_MC_REF_RATE 8
+#define M_MC_REF_RATE _SB_MAKEMASK(8, S_MC_REF_RATE)
+#define V_MC_REF_RATE(x) _SB_MAKEVALUE(x, S_MC_REF_RATE)
+#define G_MC_REF_RATE(x) _SB_GETVALUE(x, S_MC_REF_RATE, M_MC_REF_RATE)
+
+#define K_MC_REF_RATE_100MHz 0x62
+#define K_MC_REF_RATE_133MHz 0x81
+#define K_MC_REF_RATE_200MHz 0xC4
+
+#define V_MC_REF_RATE_100MHz V_MC_REF_RATE(K_MC_REF_RATE_100MHz)
+#define V_MC_REF_RATE_133MHz V_MC_REF_RATE(K_MC_REF_RATE_133MHz)
+#define V_MC_REF_RATE_200MHz V_MC_REF_RATE(K_MC_REF_RATE_200MHz)
+#define V_MC_REF_RATE_DEFAULT V_MC_REF_RATE_100MHz
+
+#define S_MC_CLOCK_DRIVE 16
+#define M_MC_CLOCK_DRIVE _SB_MAKEMASK(4, S_MC_CLOCK_DRIVE)
+#define V_MC_CLOCK_DRIVE(x) _SB_MAKEVALUE(x, S_MC_CLOCK_DRIVE)
+#define G_MC_CLOCK_DRIVE(x) _SB_GETVALUE(x, S_MC_CLOCK_DRIVE, M_MC_CLOCK_DRIVE)
+#define V_MC_CLOCK_DRIVE_DEFAULT V_MC_CLOCK_DRIVE(0xF)
+
+#define S_MC_DATA_DRIVE 20
+#define M_MC_DATA_DRIVE _SB_MAKEMASK(4, S_MC_DATA_DRIVE)
+#define V_MC_DATA_DRIVE(x) _SB_MAKEVALUE(x, S_MC_DATA_DRIVE)
+#define G_MC_DATA_DRIVE(x) _SB_GETVALUE(x, S_MC_DATA_DRIVE, M_MC_DATA_DRIVE)
+#define V_MC_DATA_DRIVE_DEFAULT V_MC_DATA_DRIVE(0x0)
+
+#define S_MC_ADDR_DRIVE 24
+#define M_MC_ADDR_DRIVE _SB_MAKEMASK(4, S_MC_ADDR_DRIVE)
+#define V_MC_ADDR_DRIVE(x) _SB_MAKEVALUE(x, S_MC_ADDR_DRIVE)
+#define G_MC_ADDR_DRIVE(x) _SB_GETVALUE(x, S_MC_ADDR_DRIVE, M_MC_ADDR_DRIVE)
+#define V_MC_ADDR_DRIVE_DEFAULT V_MC_ADDR_DRIVE(0x0)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define M_MC_REF_DISABLE _SB_MAKEMASK1(30)
+#endif /* 1250 PASS3 || 112x PASS1 */
+
+#define M_MC_DLL_BYPASS _SB_MAKEMASK1(31)
+
+#define S_MC_DQI_SKEW 32
+#define M_MC_DQI_SKEW _SB_MAKEMASK(8, S_MC_DQI_SKEW)
+#define V_MC_DQI_SKEW(x) _SB_MAKEVALUE(x, S_MC_DQI_SKEW)
+#define G_MC_DQI_SKEW(x) _SB_GETVALUE(x, S_MC_DQI_SKEW, M_MC_DQI_SKEW)
+#define V_MC_DQI_SKEW_DEFAULT V_MC_DQI_SKEW(0)
+
+#define S_MC_DQO_SKEW 40
+#define M_MC_DQO_SKEW _SB_MAKEMASK(8, S_MC_DQO_SKEW)
+#define V_MC_DQO_SKEW(x) _SB_MAKEVALUE(x, S_MC_DQO_SKEW)
+#define G_MC_DQO_SKEW(x) _SB_GETVALUE(x, S_MC_DQO_SKEW, M_MC_DQO_SKEW)
+#define V_MC_DQO_SKEW_DEFAULT V_MC_DQO_SKEW(0)
+
+#define S_MC_ADDR_SKEW 48
+#define M_MC_ADDR_SKEW _SB_MAKEMASK(8, S_MC_ADDR_SKEW)
+#define V_MC_ADDR_SKEW(x) _SB_MAKEVALUE(x, S_MC_ADDR_SKEW)
+#define G_MC_ADDR_SKEW(x) _SB_GETVALUE(x, S_MC_ADDR_SKEW, M_MC_ADDR_SKEW)
+#define V_MC_ADDR_SKEW_DEFAULT V_MC_ADDR_SKEW(0x0F)
+
+#define S_MC_DLL_DEFAULT 56
+#define M_MC_DLL_DEFAULT _SB_MAKEMASK(8, S_MC_DLL_DEFAULT)
+#define V_MC_DLL_DEFAULT(x) _SB_MAKEVALUE(x, S_MC_DLL_DEFAULT)
+#define G_MC_DLL_DEFAULT(x) _SB_GETVALUE(x, S_MC_DLL_DEFAULT, M_MC_DLL_DEFAULT)
+#define V_MC_DLL_DEFAULT_DEFAULT V_MC_DLL_DEFAULT(0x10)
+
+#define V_MC_CLKCONFIG_DEFAULT V_MC_DLL_DEFAULT_DEFAULT | \
+ V_MC_ADDR_SKEW_DEFAULT | \
+ V_MC_DQO_SKEW_DEFAULT | \
+ V_MC_DQI_SKEW_DEFAULT | \
+ V_MC_ADDR_DRIVE_DEFAULT | \
+ V_MC_DATA_DRIVE_DEFAULT | \
+ V_MC_CLOCK_DRIVE_DEFAULT | \
+ V_MC_REF_RATE_DEFAULT
+
+
+
+/*
+ * DRAM Command Register (Table 6-13)
+ */
+
+#define S_MC_COMMAND 0
+#define M_MC_COMMAND _SB_MAKEMASK(4, S_MC_COMMAND)
+#define V_MC_COMMAND(x) _SB_MAKEVALUE(x, S_MC_COMMAND)
+#define G_MC_COMMAND(x) _SB_GETVALUE(x, S_MC_COMMAND, M_MC_COMMAND)
+
+#define K_MC_COMMAND_EMRS 0
+#define K_MC_COMMAND_MRS 1
+#define K_MC_COMMAND_PRE 2
+#define K_MC_COMMAND_AR 3
+#define K_MC_COMMAND_SETRFSH 4
+#define K_MC_COMMAND_CLRRFSH 5
+#define K_MC_COMMAND_SETPWRDN 6
+#define K_MC_COMMAND_CLRPWRDN 7
+
+#define V_MC_COMMAND_EMRS V_MC_COMMAND(K_MC_COMMAND_EMRS)
+#define V_MC_COMMAND_MRS V_MC_COMMAND(K_MC_COMMAND_MRS)
+#define V_MC_COMMAND_PRE V_MC_COMMAND(K_MC_COMMAND_PRE)
+#define V_MC_COMMAND_AR V_MC_COMMAND(K_MC_COMMAND_AR)
+#define V_MC_COMMAND_SETRFSH V_MC_COMMAND(K_MC_COMMAND_SETRFSH)
+#define V_MC_COMMAND_CLRRFSH V_MC_COMMAND(K_MC_COMMAND_CLRRFSH)
+#define V_MC_COMMAND_SETPWRDN V_MC_COMMAND(K_MC_COMMAND_SETPWRDN)
+#define V_MC_COMMAND_CLRPWRDN V_MC_COMMAND(K_MC_COMMAND_CLRPWRDN)
+
+#define M_MC_CS0 _SB_MAKEMASK1(4)
+#define M_MC_CS1 _SB_MAKEMASK1(5)
+#define M_MC_CS2 _SB_MAKEMASK1(6)
+#define M_MC_CS3 _SB_MAKEMASK1(7)
+
+/*
+ * DRAM Mode Register (Table 6-14)
+ */
+
+#define S_MC_EMODE 0
+#define M_MC_EMODE _SB_MAKEMASK(15, S_MC_EMODE)
+#define V_MC_EMODE(x) _SB_MAKEVALUE(x, S_MC_EMODE)
+#define G_MC_EMODE(x) _SB_GETVALUE(x, S_MC_EMODE, M_MC_EMODE)
+#define V_MC_EMODE_DEFAULT V_MC_EMODE(0)
+
+#define S_MC_MODE 16
+#define M_MC_MODE _SB_MAKEMASK(15, S_MC_MODE)
+#define V_MC_MODE(x) _SB_MAKEVALUE(x, S_MC_MODE)
+#define G_MC_MODE(x) _SB_GETVALUE(x, S_MC_MODE, M_MC_MODE)
+#define V_MC_MODE_DEFAULT V_MC_MODE(0x22)
+
+#define S_MC_DRAM_TYPE 32
+#define M_MC_DRAM_TYPE _SB_MAKEMASK(3, S_MC_DRAM_TYPE)
+#define V_MC_DRAM_TYPE(x) _SB_MAKEVALUE(x, S_MC_DRAM_TYPE)
+#define G_MC_DRAM_TYPE(x) _SB_GETVALUE(x, S_MC_DRAM_TYPE, M_MC_DRAM_TYPE)
+
+#define K_MC_DRAM_TYPE_JEDEC 0
+#define K_MC_DRAM_TYPE_FCRAM 1
+#define K_MC_DRAM_TYPE_SGRAM 2
+
+#define V_MC_DRAM_TYPE_JEDEC V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_JEDEC)
+#define V_MC_DRAM_TYPE_FCRAM V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_FCRAM)
+#define V_MC_DRAM_TYPE_SGRAM V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_SGRAM)
+
+#define M_MC_EXTERNALDECODE _SB_MAKEMASK1(35)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define M_MC_PRE_ON_A8 _SB_MAKEMASK1(36)
+#define M_MC_RAM_WITH_A13 _SB_MAKEMASK1(37)
+#endif /* 1250 PASS3 || 112x PASS1 */
+
+
+
+/*
+ * SDRAM Timing Register (Table 6-15)
+ */
+
+#define M_MC_w2rIDLE_TWOCYCLES _SB_MAKEMASK1(60)
+#define M_MC_r2wIDLE_TWOCYCLES _SB_MAKEMASK1(61)
+#define M_MC_r2rIDLE_TWOCYCLES _SB_MAKEMASK1(62)
+
+#define S_MC_tFIFO 56
+#define M_MC_tFIFO _SB_MAKEMASK(4, S_MC_tFIFO)
+#define V_MC_tFIFO(x) _SB_MAKEVALUE(x, S_MC_tFIFO)
+#define G_MC_tFIFO(x) _SB_GETVALUE(x, S_MC_tFIFO, M_MC_tFIFO)
+#define K_MC_tFIFO_DEFAULT 1
+#define V_MC_tFIFO_DEFAULT V_MC_tFIFO(K_MC_tFIFO_DEFAULT)
+
+#define S_MC_tRFC 52
+#define M_MC_tRFC _SB_MAKEMASK(4, S_MC_tRFC)
+#define V_MC_tRFC(x) _SB_MAKEVALUE(x, S_MC_tRFC)
+#define G_MC_tRFC(x) _SB_GETVALUE(x, S_MC_tRFC, M_MC_tRFC)
+#define K_MC_tRFC_DEFAULT 12
+#define V_MC_tRFC_DEFAULT V_MC_tRFC(K_MC_tRFC_DEFAULT)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3)
+#define M_MC_tRFC_PLUS16 _SB_MAKEMASK1(51) /* 1250C3 and later. */
+#endif
+
+#define S_MC_tCwCr 40
+#define M_MC_tCwCr _SB_MAKEMASK(4, S_MC_tCwCr)
+#define V_MC_tCwCr(x) _SB_MAKEVALUE(x, S_MC_tCwCr)
+#define G_MC_tCwCr(x) _SB_GETVALUE(x, S_MC_tCwCr, M_MC_tCwCr)
+#define K_MC_tCwCr_DEFAULT 4
+#define V_MC_tCwCr_DEFAULT V_MC_tCwCr(K_MC_tCwCr_DEFAULT)
+
+#define S_MC_tRCr 28
+#define M_MC_tRCr _SB_MAKEMASK(4, S_MC_tRCr)
+#define V_MC_tRCr(x) _SB_MAKEVALUE(x, S_MC_tRCr)
+#define G_MC_tRCr(x) _SB_GETVALUE(x, S_MC_tRCr, M_MC_tRCr)
+#define K_MC_tRCr_DEFAULT 9
+#define V_MC_tRCr_DEFAULT V_MC_tRCr(K_MC_tRCr_DEFAULT)
+
+#define S_MC_tRCw 24
+#define M_MC_tRCw _SB_MAKEMASK(4, S_MC_tRCw)
+#define V_MC_tRCw(x) _SB_MAKEVALUE(x, S_MC_tRCw)
+#define G_MC_tRCw(x) _SB_GETVALUE(x, S_MC_tRCw, M_MC_tRCw)
+#define K_MC_tRCw_DEFAULT 10
+#define V_MC_tRCw_DEFAULT V_MC_tRCw(K_MC_tRCw_DEFAULT)
+
+#define S_MC_tRRD 20
+#define M_MC_tRRD _SB_MAKEMASK(4, S_MC_tRRD)
+#define V_MC_tRRD(x) _SB_MAKEVALUE(x, S_MC_tRRD)
+#define G_MC_tRRD(x) _SB_GETVALUE(x, S_MC_tRRD, M_MC_tRRD)
+#define K_MC_tRRD_DEFAULT 2
+#define V_MC_tRRD_DEFAULT V_MC_tRRD(K_MC_tRRD_DEFAULT)
+
+#define S_MC_tRP 16
+#define M_MC_tRP _SB_MAKEMASK(4, S_MC_tRP)
+#define V_MC_tRP(x) _SB_MAKEVALUE(x, S_MC_tRP)
+#define G_MC_tRP(x) _SB_GETVALUE(x, S_MC_tRP, M_MC_tRP)
+#define K_MC_tRP_DEFAULT 4
+#define V_MC_tRP_DEFAULT V_MC_tRP(K_MC_tRP_DEFAULT)
+
+#define S_MC_tCwD 8
+#define M_MC_tCwD _SB_MAKEMASK(4, S_MC_tCwD)
+#define V_MC_tCwD(x) _SB_MAKEVALUE(x, S_MC_tCwD)
+#define G_MC_tCwD(x) _SB_GETVALUE(x, S_MC_tCwD, M_MC_tCwD)
+#define K_MC_tCwD_DEFAULT 1
+#define V_MC_tCwD_DEFAULT V_MC_tCwD(K_MC_tCwD_DEFAULT)
+
+#define M_tCrDh _SB_MAKEMASK1(7)
+#define M_MC_tCrDh M_tCrDh
+
+#define S_MC_tCrD 4
+#define M_MC_tCrD _SB_MAKEMASK(3, S_MC_tCrD)
+#define V_MC_tCrD(x) _SB_MAKEVALUE(x, S_MC_tCrD)
+#define G_MC_tCrD(x) _SB_GETVALUE(x, S_MC_tCrD, M_MC_tCrD)
+#define K_MC_tCrD_DEFAULT 2
+#define V_MC_tCrD_DEFAULT V_MC_tCrD(K_MC_tCrD_DEFAULT)
+
+#define S_MC_tRCD 0
+#define M_MC_tRCD _SB_MAKEMASK(4, S_MC_tRCD)
+#define V_MC_tRCD(x) _SB_MAKEVALUE(x, S_MC_tRCD)
+#define G_MC_tRCD(x) _SB_GETVALUE(x, S_MC_tRCD, M_MC_tRCD)
+#define K_MC_tRCD_DEFAULT 3
+#define V_MC_tRCD_DEFAULT V_MC_tRCD(K_MC_tRCD_DEFAULT)
+
+#define V_MC_TIMING_DEFAULT V_MC_tFIFO(K_MC_tFIFO_DEFAULT) | \
+ V_MC_tRFC(K_MC_tRFC_DEFAULT) | \
+ V_MC_tCwCr(K_MC_tCwCr_DEFAULT) | \
+ V_MC_tRCr(K_MC_tRCr_DEFAULT) | \
+ V_MC_tRCw(K_MC_tRCw_DEFAULT) | \
+ V_MC_tRRD(K_MC_tRRD_DEFAULT) | \
+ V_MC_tRP(K_MC_tRP_DEFAULT) | \
+ V_MC_tCwD(K_MC_tCwD_DEFAULT) | \
+ V_MC_tCrD(K_MC_tCrD_DEFAULT) | \
+ V_MC_tRCD(K_MC_tRCD_DEFAULT) | \
+ M_MC_r2rIDLE_TWOCYCLES
+
+/*
+ * Errata says these are not the default
+ * M_MC_w2rIDLE_TWOCYCLES | \
+ * M_MC_r2wIDLE_TWOCYCLES | \
+ */
+
+
+/*
+ * Chip Select Start Address Register (Table 6-17)
+ */
+
+#define S_MC_CS0_START 0
+#define M_MC_CS0_START _SB_MAKEMASK(16, S_MC_CS0_START)
+#define V_MC_CS0_START(x) _SB_MAKEVALUE(x, S_MC_CS0_START)
+#define G_MC_CS0_START(x) _SB_GETVALUE(x, S_MC_CS0_START, M_MC_CS0_START)
+
+#define S_MC_CS1_START 16
+#define M_MC_CS1_START _SB_MAKEMASK(16, S_MC_CS1_START)
+#define V_MC_CS1_START(x) _SB_MAKEVALUE(x, S_MC_CS1_START)
+#define G_MC_CS1_START(x) _SB_GETVALUE(x, S_MC_CS1_START, M_MC_CS1_START)
+
+#define S_MC_CS2_START 32
+#define M_MC_CS2_START _SB_MAKEMASK(16, S_MC_CS2_START)
+#define V_MC_CS2_START(x) _SB_MAKEVALUE(x, S_MC_CS2_START)
+#define G_MC_CS2_START(x) _SB_GETVALUE(x, S_MC_CS2_START, M_MC_CS2_START)
+
+#define S_MC_CS3_START 48
+#define M_MC_CS3_START _SB_MAKEMASK(16, S_MC_CS3_START)
+#define V_MC_CS3_START(x) _SB_MAKEVALUE(x, S_MC_CS3_START)
+#define G_MC_CS3_START(x) _SB_GETVALUE(x, S_MC_CS3_START, M_MC_CS3_START)
+
+/*
+ * Chip Select End Address Register (Table 6-18)
+ */
+
+#define S_MC_CS0_END 0
+#define M_MC_CS0_END _SB_MAKEMASK(16, S_MC_CS0_END)
+#define V_MC_CS0_END(x) _SB_MAKEVALUE(x, S_MC_CS0_END)
+#define G_MC_CS0_END(x) _SB_GETVALUE(x, S_MC_CS0_END, M_MC_CS0_END)
+
+#define S_MC_CS1_END 16
+#define M_MC_CS1_END _SB_MAKEMASK(16, S_MC_CS1_END)
+#define V_MC_CS1_END(x) _SB_MAKEVALUE(x, S_MC_CS1_END)
+#define G_MC_CS1_END(x) _SB_GETVALUE(x, S_MC_CS1_END, M_MC_CS1_END)
+
+#define S_MC_CS2_END 32
+#define M_MC_CS2_END _SB_MAKEMASK(16, S_MC_CS2_END)
+#define V_MC_CS2_END(x) _SB_MAKEVALUE(x, S_MC_CS2_END)
+#define G_MC_CS2_END(x) _SB_GETVALUE(x, S_MC_CS2_END, M_MC_CS2_END)
+
+#define S_MC_CS3_END 48
+#define M_MC_CS3_END _SB_MAKEMASK(16, S_MC_CS3_END)
+#define V_MC_CS3_END(x) _SB_MAKEVALUE(x, S_MC_CS3_END)
+#define G_MC_CS3_END(x) _SB_GETVALUE(x, S_MC_CS3_END, M_MC_CS3_END)
+
+/*
+ * Chip Select Interleave Register (Table 6-19)
+ */
+
+#define S_MC_INTLV_RESERVED 0
+#define M_MC_INTLV_RESERVED _SB_MAKEMASK(5, S_MC_INTLV_RESERVED)
+
+#define S_MC_INTERLEAVE 7
+#define M_MC_INTERLEAVE _SB_MAKEMASK(18, S_MC_INTERLEAVE)
+#define V_MC_INTERLEAVE(x) _SB_MAKEVALUE(x, S_MC_INTERLEAVE)
+
+#define S_MC_INTLV_MBZ 25
+#define M_MC_INTLV_MBZ _SB_MAKEMASK(39, S_MC_INTLV_MBZ)
+
+/*
+ * Row Address Bits Register (Table 6-20)
+ */
+
+#define S_MC_RAS_RESERVED 0
+#define M_MC_RAS_RESERVED _SB_MAKEMASK(5, S_MC_RAS_RESERVED)
+
+#define S_MC_RAS_SELECT 12
+#define M_MC_RAS_SELECT _SB_MAKEMASK(25, S_MC_RAS_SELECT)
+#define V_MC_RAS_SELECT(x) _SB_MAKEVALUE(x, S_MC_RAS_SELECT)
+
+#define S_MC_RAS_MBZ 37
+#define M_MC_RAS_MBZ _SB_MAKEMASK(27, S_MC_RAS_MBZ)
+
+
+/*
+ * Column Address Bits Register (Table 6-21)
+ */
+
+#define S_MC_CAS_RESERVED 0
+#define M_MC_CAS_RESERVED _SB_MAKEMASK(5, S_MC_CAS_RESERVED)
+
+#define S_MC_CAS_SELECT 5
+#define M_MC_CAS_SELECT _SB_MAKEMASK(18, S_MC_CAS_SELECT)
+#define V_MC_CAS_SELECT(x) _SB_MAKEVALUE(x, S_MC_CAS_SELECT)
+
+#define S_MC_CAS_MBZ 23
+#define M_MC_CAS_MBZ _SB_MAKEMASK(41, S_MC_CAS_MBZ)
+
+
+/*
+ * Bank Address Address Bits Register (Table 6-22)
+ */
+
+#define S_MC_BA_RESERVED 0
+#define M_MC_BA_RESERVED _SB_MAKEMASK(5, S_MC_BA_RESERVED)
+
+#define S_MC_BA_SELECT 5
+#define M_MC_BA_SELECT _SB_MAKEMASK(20, S_MC_BA_SELECT)
+#define V_MC_BA_SELECT(x) _SB_MAKEVALUE(x, S_MC_BA_SELECT)
+
+#define S_MC_BA_MBZ 25
+#define M_MC_BA_MBZ _SB_MAKEMASK(39, S_MC_BA_MBZ)
+
+/*
+ * Chip Select Attribute Register (Table 6-23)
+ */
+
+#define K_MC_CS_ATTR_CLOSED 0
+#define K_MC_CS_ATTR_CASCHECK 1
+#define K_MC_CS_ATTR_HINT 2
+#define K_MC_CS_ATTR_OPEN 3
+
+#define S_MC_CS0_PAGE 0
+#define M_MC_CS0_PAGE _SB_MAKEMASK(2, S_MC_CS0_PAGE)
+#define V_MC_CS0_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS0_PAGE)
+#define G_MC_CS0_PAGE(x) _SB_GETVALUE(x, S_MC_CS0_PAGE, M_MC_CS0_PAGE)
+
+#define S_MC_CS1_PAGE 16
+#define M_MC_CS1_PAGE _SB_MAKEMASK(2, S_MC_CS1_PAGE)
+#define V_MC_CS1_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS1_PAGE)
+#define G_MC_CS1_PAGE(x) _SB_GETVALUE(x, S_MC_CS1_PAGE, M_MC_CS1_PAGE)
+
+#define S_MC_CS2_PAGE 32
+#define M_MC_CS2_PAGE _SB_MAKEMASK(2, S_MC_CS2_PAGE)
+#define V_MC_CS2_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS2_PAGE)
+#define G_MC_CS2_PAGE(x) _SB_GETVALUE(x, S_MC_CS2_PAGE, M_MC_CS2_PAGE)
+
+#define S_MC_CS3_PAGE 48
+#define M_MC_CS3_PAGE _SB_MAKEMASK(2, S_MC_CS3_PAGE)
+#define V_MC_CS3_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS3_PAGE)
+#define G_MC_CS3_PAGE(x) _SB_GETVALUE(x, S_MC_CS3_PAGE, M_MC_CS3_PAGE)
+
+/*
+ * ECC Test ECC Register (Table 6-25)
+ */
+
+#define S_MC_ECC_INVERT 0
+#define M_MC_ECC_INVERT _SB_MAKEMASK(8, S_MC_ECC_INVERT)
+
+
+#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_regs.h b/arch/mips/include/asm/sibyte/sb1250_regs.h
new file mode 100644
index 000000000..cdac01832
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/sb1250_regs.h
@@ -0,0 +1,880 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * SB1250 Board Support Package
+ *
+ * Register Definitions File: sb1250_regs.h
+ *
+ * This module contains the addresses of the on-chip peripherals
+ * on the SB1250.
+ *
+ * SB1250 specification level: 01/02/2002
+ *
+ *********************************************************************
+ *
+ * Copyright 2000,2001,2002,2003
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+
+#ifndef _SB1250_REGS_H
+#define _SB1250_REGS_H
+
+#include <asm/sibyte/sb1250_defs.h>
+
+
+/* *********************************************************************
+ * Some general notes:
+ *
+ * For the most part, when there is more than one peripheral
+ * of the same type on the SOC, the constants below will be
+ * offsets from the base of each peripheral. For example,
+ * the MAC registers are described as offsets from the first
+ * MAC register, and there will be a MAC_REGISTER() macro
+ * to calculate the base address of a given MAC.
+ *
+ * The information in this file is based on the SB1250 SOC
+ * manual version 0.2, July 2000.
+ ********************************************************************* */
+
+
+/* *********************************************************************
+ * Memory Controller Registers
+ ********************************************************************* */
+
+/*
+ * XXX: can't remove MC base 0 if 112x, since it's used by other macros,
+ * since there is one reg there (but it could get its addr/offset constant).
+ */
+
+#if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */
+#define A_MC_BASE_0 0x0010051000
+#define A_MC_BASE_1 0x0010052000
+#define MC_REGISTER_SPACING 0x1000
+
+#define A_MC_BASE(ctlid) ((ctlid)*MC_REGISTER_SPACING+A_MC_BASE_0)
+#define A_MC_REGISTER(ctlid, reg) (A_MC_BASE(ctlid)+(reg))
+
+#define R_MC_CONFIG 0x0000000100
+#define R_MC_DRAMCMD 0x0000000120
+#define R_MC_DRAMMODE 0x0000000140
+#define R_MC_TIMING1 0x0000000160
+#define R_MC_TIMING2 0x0000000180
+#define R_MC_CS_START 0x00000001A0
+#define R_MC_CS_END 0x00000001C0
+#define R_MC_CS_INTERLEAVE 0x00000001E0
+#define S_MC_CS_STARTEND 16
+
+#define R_MC_CSX_BASE 0x0000000200
+#define R_MC_CSX_ROW 0x0000000000 /* relative to CSX_BASE, above */
+#define R_MC_CSX_COL 0x0000000020 /* relative to CSX_BASE, above */
+#define R_MC_CSX_BA 0x0000000040 /* relative to CSX_BASE, above */
+#define MC_CSX_SPACING 0x0000000060 /* relative to CSX_BASE, above */
+
+#define R_MC_CS0_ROW 0x0000000200
+#define R_MC_CS0_COL 0x0000000220
+#define R_MC_CS0_BA 0x0000000240
+#define R_MC_CS1_ROW 0x0000000260
+#define R_MC_CS1_COL 0x0000000280
+#define R_MC_CS1_BA 0x00000002A0
+#define R_MC_CS2_ROW 0x00000002C0
+#define R_MC_CS2_COL 0x00000002E0
+#define R_MC_CS2_BA 0x0000000300
+#define R_MC_CS3_ROW 0x0000000320
+#define R_MC_CS3_COL 0x0000000340
+#define R_MC_CS3_BA 0x0000000360
+#define R_MC_CS_ATTR 0x0000000380
+#define R_MC_TEST_DATA 0x0000000400
+#define R_MC_TEST_ECC 0x0000000420
+#define R_MC_MCLK_CFG 0x0000000500
+
+#endif /* 1250 & 112x */
+
+/* *********************************************************************
+ * L2 Cache Control Registers
+ ********************************************************************* */
+
+#if SIBYTE_HDR_FEATURE_1250_112x /* This L2C only on 1250/112x */
+
+#define A_L2_READ_TAG 0x0010040018
+#define A_L2_ECC_TAG 0x0010040038
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define A_L2_READ_MISC 0x0010040058
+#endif /* 1250 PASS3 || 112x PASS1 */
+#define A_L2_WAY_DISABLE 0x0010041000
+#define A_L2_MAKEDISABLE(x) (A_L2_WAY_DISABLE | (((~(x))&0x0F) << 8))
+#define A_L2_MGMT_TAG_BASE 0x00D0000000
+
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define A_L2_CACHE_DISABLE 0x0010042000
+#define A_L2_MAKECACHEDISABLE(x) (A_L2_CACHE_DISABLE | (((x)&0x0F) << 8))
+#define A_L2_MISC_CONFIG 0x0010043000
+#endif /* 1250 PASS2 || 112x PASS1 */
+
+/* Backward-compatibility definitions. */
+/* XXX: discourage people from using these constants. */
+#define A_L2_READ_ADDRESS A_L2_READ_TAG
+#define A_L2_EEC_ADDRESS A_L2_ECC_TAG
+
+#endif
+
+
+/* *********************************************************************
+ * PCI Interface Registers
+ ********************************************************************* */
+
+#if SIBYTE_HDR_FEATURE_1250_112x /* This PCI/HT only on 1250/112x */
+#define A_PCI_TYPE00_HEADER 0x00DE000000
+#define A_PCI_TYPE01_HEADER 0x00DE000800
+#endif
+
+
+/* *********************************************************************
+ * Ethernet DMA and MACs
+ ********************************************************************* */
+
+#define A_MAC_BASE_0 0x0010064000
+#define A_MAC_BASE_1 0x0010065000
+#if SIBYTE_HDR_FEATURE_CHIP(1250)
+#define A_MAC_BASE_2 0x0010066000
+#endif /* 1250 */
+
+#define MAC_SPACING 0x1000
+#define MAC_DMA_TXRX_SPACING 0x0400
+#define MAC_DMA_CHANNEL_SPACING 0x0100
+#define DMA_RX 0
+#define DMA_TX 1
+#define MAC_NUM_DMACHAN 2 /* channels per direction */
+
+/* XXX: not correct; depends on SOC type. */
+#define MAC_NUM_PORTS 3
+
+#define A_MAC_CHANNEL_BASE(macnum) \
+ (A_MAC_BASE_0 + \
+ MAC_SPACING*(macnum))
+
+#define A_MAC_REGISTER(macnum,reg) \
+ (A_MAC_BASE_0 + \
+ MAC_SPACING*(macnum) + (reg))
+
+
+#define R_MAC_DMA_CHANNELS 0x800 /* Relative to A_MAC_CHANNEL_BASE */
+
+#define A_MAC_DMA_CHANNEL_BASE(macnum, txrx, chan) \
+ ((A_MAC_CHANNEL_BASE(macnum)) + \
+ R_MAC_DMA_CHANNELS + \
+ (MAC_DMA_TXRX_SPACING*(txrx)) + \
+ (MAC_DMA_CHANNEL_SPACING*(chan)))
+
+#define R_MAC_DMA_CHANNEL_BASE(txrx, chan) \
+ (R_MAC_DMA_CHANNELS + \
+ (MAC_DMA_TXRX_SPACING*(txrx)) + \
+ (MAC_DMA_CHANNEL_SPACING*(chan)))
+
+#define A_MAC_DMA_REGISTER(macnum, txrx, chan, reg) \
+ (A_MAC_DMA_CHANNEL_BASE(macnum, txrx, chan) + \
+ (reg))
+
+#define R_MAC_DMA_REGISTER(txrx, chan, reg) \
+ (R_MAC_DMA_CHANNEL_BASE(txrx, chan) + \
+ (reg))
+
+/*
+ * DMA channel registers, relative to A_MAC_DMA_CHANNEL_BASE
+ */
+
+#define R_MAC_DMA_CONFIG0 0x00000000
+#define R_MAC_DMA_CONFIG1 0x00000008
+#define R_MAC_DMA_DSCR_BASE 0x00000010
+#define R_MAC_DMA_DSCR_CNT 0x00000018
+#define R_MAC_DMA_CUR_DSCRA 0x00000020
+#define R_MAC_DMA_CUR_DSCRB 0x00000028
+#define R_MAC_DMA_CUR_DSCRADDR 0x00000030
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define R_MAC_DMA_OODPKTLOST_RX 0x00000038 /* rx only */
+#endif /* 1250 PASS3 || 112x PASS1 */
+
+/*
+ * RMON Counters
+ */
+
+#define R_MAC_RMON_TX_BYTES 0x00000000
+#define R_MAC_RMON_COLLISIONS 0x00000008
+#define R_MAC_RMON_LATE_COL 0x00000010
+#define R_MAC_RMON_EX_COL 0x00000018
+#define R_MAC_RMON_FCS_ERROR 0x00000020
+#define R_MAC_RMON_TX_ABORT 0x00000028
+/* Counter #6 (0x30) now reserved */
+#define R_MAC_RMON_TX_BAD 0x00000038
+#define R_MAC_RMON_TX_GOOD 0x00000040
+#define R_MAC_RMON_TX_RUNT 0x00000048
+#define R_MAC_RMON_TX_OVERSIZE 0x00000050
+#define R_MAC_RMON_RX_BYTES 0x00000080
+#define R_MAC_RMON_RX_MCAST 0x00000088
+#define R_MAC_RMON_RX_BCAST 0x00000090
+#define R_MAC_RMON_RX_BAD 0x00000098
+#define R_MAC_RMON_RX_GOOD 0x000000A0
+#define R_MAC_RMON_RX_RUNT 0x000000A8
+#define R_MAC_RMON_RX_OVERSIZE 0x000000B0
+#define R_MAC_RMON_RX_FCS_ERROR 0x000000B8
+#define R_MAC_RMON_RX_LENGTH_ERROR 0x000000C0
+#define R_MAC_RMON_RX_CODE_ERROR 0x000000C8
+#define R_MAC_RMON_RX_ALIGN_ERROR 0x000000D0
+
+/* Updated to spec 0.2 */
+#define R_MAC_CFG 0x00000100
+#define R_MAC_THRSH_CFG 0x00000108
+#define R_MAC_VLANTAG 0x00000110
+#define R_MAC_FRAMECFG 0x00000118
+#define R_MAC_EOPCNT 0x00000120
+#define R_MAC_FIFO_PTRS 0x00000128
+#define R_MAC_ADFILTER_CFG 0x00000200
+#define R_MAC_ETHERNET_ADDR 0x00000208
+#define R_MAC_PKT_TYPE 0x00000210
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define R_MAC_ADMASK0 0x00000218
+#define R_MAC_ADMASK1 0x00000220
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+#define R_MAC_HASH_BASE 0x00000240
+#define R_MAC_ADDR_BASE 0x00000280
+#define R_MAC_CHLO0_BASE 0x00000300
+#define R_MAC_CHUP0_BASE 0x00000320
+#define R_MAC_ENABLE 0x00000400
+#define R_MAC_STATUS 0x00000408
+#define R_MAC_INT_MASK 0x00000410
+#define R_MAC_TXD_CTL 0x00000420
+#define R_MAC_MDIO 0x00000428
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define R_MAC_STATUS1 0x00000430
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+#define R_MAC_DEBUG_STATUS 0x00000448
+
+#define MAC_HASH_COUNT 8
+#define MAC_ADDR_COUNT 8
+#define MAC_CHMAP_COUNT 4
+
+
+/* *********************************************************************
+ * DUART Registers
+ ********************************************************************* */
+
+
+#if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */
+#define R_DUART_NUM_PORTS 2
+
+#define A_DUART 0x0010060000
+
+#define DUART_CHANREG_SPACING 0x100
+
+#define A_DUART_CHANREG(chan, reg) \
+ (A_DUART + DUART_CHANREG_SPACING * ((chan) + 1) + (reg))
+#endif /* 1250 & 112x */
+
+#define R_DUART_MODE_REG_1 0x000
+#define R_DUART_MODE_REG_2 0x010
+#define R_DUART_STATUS 0x020
+#define R_DUART_CLK_SEL 0x030
+#define R_DUART_CMD 0x050
+#define R_DUART_RX_HOLD 0x060
+#define R_DUART_TX_HOLD 0x070
+
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define R_DUART_FULL_CTL 0x040
+#define R_DUART_OPCR_X 0x080
+#define R_DUART_AUXCTL_X 0x090
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+
+
+/*
+ * The IMR and ISR can't be addressed with A_DUART_CHANREG,
+ * so use these macros instead.
+ */
+
+#if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */
+#define DUART_IMRISR_SPACING 0x20
+#define DUART_INCHNG_SPACING 0x10
+
+#define A_DUART_CTRLREG(reg) \
+ (A_DUART + DUART_CHANREG_SPACING * 3 + (reg))
+
+#define R_DUART_IMRREG(chan) \
+ (R_DUART_IMR_A + (chan) * DUART_IMRISR_SPACING)
+#define R_DUART_ISRREG(chan) \
+ (R_DUART_ISR_A + (chan) * DUART_IMRISR_SPACING)
+#define R_DUART_INCHREG(chan) \
+ (R_DUART_IN_CHNG_A + (chan) * DUART_INCHNG_SPACING)
+
+#define A_DUART_IMRREG(chan) A_DUART_CTRLREG(R_DUART_IMRREG(chan))
+#define A_DUART_ISRREG(chan) A_DUART_CTRLREG(R_DUART_ISRREG(chan))
+#define A_DUART_INCHREG(chan) A_DUART_CTRLREG(R_DUART_INCHREG(chan))
+#endif /* 1250 & 112x */
+
+#define R_DUART_AUX_CTRL 0x010
+#define R_DUART_ISR_A 0x020
+#define R_DUART_IMR_A 0x030
+#define R_DUART_ISR_B 0x040
+#define R_DUART_IMR_B 0x050
+#define R_DUART_OUT_PORT 0x060
+#define R_DUART_OPCR 0x070
+#define R_DUART_IN_PORT 0x080
+
+#define R_DUART_SET_OPR 0x0B0
+#define R_DUART_CLEAR_OPR 0x0C0
+#define R_DUART_IN_CHNG_A 0x0D0
+#define R_DUART_IN_CHNG_B 0x0E0
+
+
+/*
+ * These constants are the absolute addresses.
+ */
+
+#define A_DUART_MODE_REG_1_A 0x0010060100
+#define A_DUART_MODE_REG_2_A 0x0010060110
+#define A_DUART_STATUS_A 0x0010060120
+#define A_DUART_CLK_SEL_A 0x0010060130
+#define A_DUART_CMD_A 0x0010060150
+#define A_DUART_RX_HOLD_A 0x0010060160
+#define A_DUART_TX_HOLD_A 0x0010060170
+
+#define A_DUART_MODE_REG_1_B 0x0010060200
+#define A_DUART_MODE_REG_2_B 0x0010060210
+#define A_DUART_STATUS_B 0x0010060220
+#define A_DUART_CLK_SEL_B 0x0010060230
+#define A_DUART_CMD_B 0x0010060250
+#define A_DUART_RX_HOLD_B 0x0010060260
+#define A_DUART_TX_HOLD_B 0x0010060270
+
+#define A_DUART_INPORT_CHNG 0x0010060300
+#define A_DUART_AUX_CTRL 0x0010060310
+#define A_DUART_ISR_A 0x0010060320
+#define A_DUART_IMR_A 0x0010060330
+#define A_DUART_ISR_B 0x0010060340
+#define A_DUART_IMR_B 0x0010060350
+#define A_DUART_OUT_PORT 0x0010060360
+#define A_DUART_OPCR 0x0010060370
+#define A_DUART_IN_PORT 0x0010060380
+#define A_DUART_ISR 0x0010060390
+#define A_DUART_IMR 0x00100603A0
+#define A_DUART_SET_OPR 0x00100603B0
+#define A_DUART_CLEAR_OPR 0x00100603C0
+#define A_DUART_INPORT_CHNG_A 0x00100603D0
+#define A_DUART_INPORT_CHNG_B 0x00100603E0
+
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define A_DUART_FULL_CTL_A 0x0010060140
+#define A_DUART_FULL_CTL_B 0x0010060240
+
+#define A_DUART_OPCR_A 0x0010060180
+#define A_DUART_OPCR_B 0x0010060280
+
+#define A_DUART_INPORT_CHNG_DEBUG 0x00100603F0
+#endif /* 1250 PASS2 || 112x PASS1 */
+
+
+/* *********************************************************************
+ * Synchronous Serial Registers
+ ********************************************************************* */
+
+
+#if SIBYTE_HDR_FEATURE_1250_112x /* sync serial only on 1250/112x */
+
+#define A_SER_BASE_0 0x0010060400
+#define A_SER_BASE_1 0x0010060800
+#define SER_SPACING 0x400
+
+#define SER_DMA_TXRX_SPACING 0x80
+
+#define SER_NUM_PORTS 2
+
+#define A_SER_CHANNEL_BASE(sernum) \
+ (A_SER_BASE_0 + \
+ SER_SPACING*(sernum))
+
+#define A_SER_REGISTER(sernum,reg) \
+ (A_SER_BASE_0 + \
+ SER_SPACING*(sernum) + (reg))
+
+
+#define R_SER_DMA_CHANNELS 0 /* Relative to A_SER_BASE_x */
+
+#define A_SER_DMA_CHANNEL_BASE(sernum,txrx) \
+ ((A_SER_CHANNEL_BASE(sernum)) + \
+ R_SER_DMA_CHANNELS + \
+ (SER_DMA_TXRX_SPACING*(txrx)))
+
+#define A_SER_DMA_REGISTER(sernum, txrx, reg) \
+ (A_SER_DMA_CHANNEL_BASE(sernum, txrx) + \
+ (reg))
+
+
+/*
+ * DMA channel registers, relative to A_SER_DMA_CHANNEL_BASE
+ */
+
+#define R_SER_DMA_CONFIG0 0x00000000
+#define R_SER_DMA_CONFIG1 0x00000008
+#define R_SER_DMA_DSCR_BASE 0x00000010
+#define R_SER_DMA_DSCR_CNT 0x00000018
+#define R_SER_DMA_CUR_DSCRA 0x00000020
+#define R_SER_DMA_CUR_DSCRB 0x00000028
+#define R_SER_DMA_CUR_DSCRADDR 0x00000030
+
+#define R_SER_DMA_CONFIG0_RX 0x00000000
+#define R_SER_DMA_CONFIG1_RX 0x00000008
+#define R_SER_DMA_DSCR_BASE_RX 0x00000010
+#define R_SER_DMA_DSCR_COUNT_RX 0x00000018
+#define R_SER_DMA_CUR_DSCR_A_RX 0x00000020
+#define R_SER_DMA_CUR_DSCR_B_RX 0x00000028
+#define R_SER_DMA_CUR_DSCR_ADDR_RX 0x00000030
+
+#define R_SER_DMA_CONFIG0_TX 0x00000080
+#define R_SER_DMA_CONFIG1_TX 0x00000088
+#define R_SER_DMA_DSCR_BASE_TX 0x00000090
+#define R_SER_DMA_DSCR_COUNT_TX 0x00000098
+#define R_SER_DMA_CUR_DSCR_A_TX 0x000000A0
+#define R_SER_DMA_CUR_DSCR_B_TX 0x000000A8
+#define R_SER_DMA_CUR_DSCR_ADDR_TX 0x000000B0
+
+#define R_SER_MODE 0x00000100
+#define R_SER_MINFRM_SZ 0x00000108
+#define R_SER_MAXFRM_SZ 0x00000110
+#define R_SER_ADDR 0x00000118
+#define R_SER_USR0_ADDR 0x00000120
+#define R_SER_USR1_ADDR 0x00000128
+#define R_SER_USR2_ADDR 0x00000130
+#define R_SER_USR3_ADDR 0x00000138
+#define R_SER_CMD 0x00000140
+#define R_SER_TX_RD_THRSH 0x00000160
+#define R_SER_TX_WR_THRSH 0x00000168
+#define R_SER_RX_RD_THRSH 0x00000170
+#define R_SER_LINE_MODE 0x00000178
+#define R_SER_DMA_ENABLE 0x00000180
+#define R_SER_INT_MASK 0x00000190
+#define R_SER_STATUS 0x00000188
+#define R_SER_STATUS_DEBUG 0x000001A8
+#define R_SER_RX_TABLE_BASE 0x00000200
+#define SER_RX_TABLE_COUNT 16
+#define R_SER_TX_TABLE_BASE 0x00000300
+#define SER_TX_TABLE_COUNT 16
+
+/* RMON Counters */
+#define R_SER_RMON_TX_BYTE_LO 0x000001C0
+#define R_SER_RMON_TX_BYTE_HI 0x000001C8
+#define R_SER_RMON_RX_BYTE_LO 0x000001D0
+#define R_SER_RMON_RX_BYTE_HI 0x000001D8
+#define R_SER_RMON_TX_UNDERRUN 0x000001E0
+#define R_SER_RMON_RX_OVERFLOW 0x000001E8
+#define R_SER_RMON_RX_ERRORS 0x000001F0
+#define R_SER_RMON_RX_BADADDR 0x000001F8
+
+#endif /* 1250/112x */
+
+/* *********************************************************************
+ * Generic Bus Registers
+ ********************************************************************* */
+
+#define IO_EXT_CFG_COUNT 8
+
+#define A_IO_EXT_BASE 0x0010061000
+#define A_IO_EXT_REG(r) (A_IO_EXT_BASE + (r))
+
+#define A_IO_EXT_CFG_BASE 0x0010061000
+#define A_IO_EXT_MULT_SIZE_BASE 0x0010061100
+#define A_IO_EXT_START_ADDR_BASE 0x0010061200
+#define A_IO_EXT_TIME_CFG0_BASE 0x0010061600
+#define A_IO_EXT_TIME_CFG1_BASE 0x0010061700
+
+#define IO_EXT_REGISTER_SPACING 8
+#define A_IO_EXT_CS_BASE(cs) (A_IO_EXT_CFG_BASE+IO_EXT_REGISTER_SPACING*(cs))
+#define R_IO_EXT_REG(reg, cs) ((cs)*IO_EXT_REGISTER_SPACING + (reg))
+
+#define R_IO_EXT_CFG 0x0000
+#define R_IO_EXT_MULT_SIZE 0x0100
+#define R_IO_EXT_START_ADDR 0x0200
+#define R_IO_EXT_TIME_CFG0 0x0600
+#define R_IO_EXT_TIME_CFG1 0x0700
+
+
+#define A_IO_INTERRUPT_STATUS 0x0010061A00
+#define A_IO_INTERRUPT_DATA0 0x0010061A10
+#define A_IO_INTERRUPT_DATA1 0x0010061A18
+#define A_IO_INTERRUPT_DATA2 0x0010061A20
+#define A_IO_INTERRUPT_DATA3 0x0010061A28
+#define A_IO_INTERRUPT_ADDR0 0x0010061A30
+#define A_IO_INTERRUPT_ADDR1 0x0010061A40
+#define A_IO_INTERRUPT_PARITY 0x0010061A50
+#define A_IO_PCMCIA_CFG 0x0010061A60
+#define A_IO_PCMCIA_STATUS 0x0010061A70
+#define A_IO_DRIVE_0 0x0010061300
+#define A_IO_DRIVE_1 0x0010061308
+#define A_IO_DRIVE_2 0x0010061310
+#define A_IO_DRIVE_3 0x0010061318
+#define A_IO_DRIVE_BASE A_IO_DRIVE_0
+#define IO_DRIVE_REGISTER_SPACING 8
+#define R_IO_DRIVE(x) ((x)*IO_DRIVE_REGISTER_SPACING)
+#define A_IO_DRIVE(x) (A_IO_DRIVE_BASE + R_IO_DRIVE(x))
+
+#define R_IO_INTERRUPT_STATUS 0x0A00
+#define R_IO_INTERRUPT_DATA0 0x0A10
+#define R_IO_INTERRUPT_DATA1 0x0A18
+#define R_IO_INTERRUPT_DATA2 0x0A20
+#define R_IO_INTERRUPT_DATA3 0x0A28
+#define R_IO_INTERRUPT_ADDR0 0x0A30
+#define R_IO_INTERRUPT_ADDR1 0x0A40
+#define R_IO_INTERRUPT_PARITY 0x0A50
+#define R_IO_PCMCIA_CFG 0x0A60
+#define R_IO_PCMCIA_STATUS 0x0A70
+
+/* *********************************************************************
+ * GPIO Registers
+ ********************************************************************* */
+
+#define A_GPIO_CLR_EDGE 0x0010061A80
+#define A_GPIO_INT_TYPE 0x0010061A88
+#define A_GPIO_INPUT_INVERT 0x0010061A90
+#define A_GPIO_GLITCH 0x0010061A98
+#define A_GPIO_READ 0x0010061AA0
+#define A_GPIO_DIRECTION 0x0010061AA8
+#define A_GPIO_PIN_CLR 0x0010061AB0
+#define A_GPIO_PIN_SET 0x0010061AB8
+
+#define A_GPIO_BASE 0x0010061A80
+
+#define R_GPIO_CLR_EDGE 0x00
+#define R_GPIO_INT_TYPE 0x08
+#define R_GPIO_INPUT_INVERT 0x10
+#define R_GPIO_GLITCH 0x18
+#define R_GPIO_READ 0x20
+#define R_GPIO_DIRECTION 0x28
+#define R_GPIO_PIN_CLR 0x30
+#define R_GPIO_PIN_SET 0x38
+
+/* *********************************************************************
+ * SMBus Registers
+ ********************************************************************* */
+
+#define A_SMB_XTRA_0 0x0010060000
+#define A_SMB_XTRA_1 0x0010060008
+#define A_SMB_FREQ_0 0x0010060010
+#define A_SMB_FREQ_1 0x0010060018
+#define A_SMB_STATUS_0 0x0010060020
+#define A_SMB_STATUS_1 0x0010060028
+#define A_SMB_CMD_0 0x0010060030
+#define A_SMB_CMD_1 0x0010060038
+#define A_SMB_START_0 0x0010060040
+#define A_SMB_START_1 0x0010060048
+#define A_SMB_DATA_0 0x0010060050
+#define A_SMB_DATA_1 0x0010060058
+#define A_SMB_CONTROL_0 0x0010060060
+#define A_SMB_CONTROL_1 0x0010060068
+#define A_SMB_PEC_0 0x0010060070
+#define A_SMB_PEC_1 0x0010060078
+
+#define A_SMB_0 0x0010060000
+#define A_SMB_1 0x0010060008
+#define SMB_REGISTER_SPACING 0x8
+#define A_SMB_BASE(idx) (A_SMB_0+(idx)*SMB_REGISTER_SPACING)
+#define A_SMB_REGISTER(idx, reg) (A_SMB_BASE(idx)+(reg))
+
+#define R_SMB_XTRA 0x0000000000
+#define R_SMB_FREQ 0x0000000010
+#define R_SMB_STATUS 0x0000000020
+#define R_SMB_CMD 0x0000000030
+#define R_SMB_START 0x0000000040
+#define R_SMB_DATA 0x0000000050
+#define R_SMB_CONTROL 0x0000000060
+#define R_SMB_PEC 0x0000000070
+
+/* *********************************************************************
+ * Timer Registers
+ ********************************************************************* */
+
+/*
+ * Watchdog timers
+ */
+
+#define A_SCD_WDOG_0 0x0010020050
+#define A_SCD_WDOG_1 0x0010020150
+#define SCD_WDOG_SPACING 0x100
+#define SCD_NUM_WDOGS 2
+#define A_SCD_WDOG_BASE(w) (A_SCD_WDOG_0+SCD_WDOG_SPACING*(w))
+#define A_SCD_WDOG_REGISTER(w, r) (A_SCD_WDOG_BASE(w) + (r))
+
+#define R_SCD_WDOG_INIT 0x0000000000
+#define R_SCD_WDOG_CNT 0x0000000008
+#define R_SCD_WDOG_CFG 0x0000000010
+
+#define A_SCD_WDOG_INIT_0 0x0010020050
+#define A_SCD_WDOG_CNT_0 0x0010020058
+#define A_SCD_WDOG_CFG_0 0x0010020060
+
+#define A_SCD_WDOG_INIT_1 0x0010020150
+#define A_SCD_WDOG_CNT_1 0x0010020158
+#define A_SCD_WDOG_CFG_1 0x0010020160
+
+/*
+ * Generic timers
+ */
+
+#define A_SCD_TIMER_0 0x0010020070
+#define A_SCD_TIMER_1 0x0010020078
+#define A_SCD_TIMER_2 0x0010020170
+#define A_SCD_TIMER_3 0x0010020178
+#define SCD_NUM_TIMERS 4
+#define A_SCD_TIMER_BASE(w) (A_SCD_TIMER_0+0x08*((w)&1)+0x100*(((w)&2)>>1))
+#define A_SCD_TIMER_REGISTER(w, r) (A_SCD_TIMER_BASE(w) + (r))
+
+#define R_SCD_TIMER_INIT 0x0000000000
+#define R_SCD_TIMER_CNT 0x0000000010
+#define R_SCD_TIMER_CFG 0x0000000020
+
+#define A_SCD_TIMER_INIT_0 0x0010020070
+#define A_SCD_TIMER_CNT_0 0x0010020080
+#define A_SCD_TIMER_CFG_0 0x0010020090
+
+#define A_SCD_TIMER_INIT_1 0x0010020078
+#define A_SCD_TIMER_CNT_1 0x0010020088
+#define A_SCD_TIMER_CFG_1 0x0010020098
+
+#define A_SCD_TIMER_INIT_2 0x0010020170
+#define A_SCD_TIMER_CNT_2 0x0010020180
+#define A_SCD_TIMER_CFG_2 0x0010020190
+
+#define A_SCD_TIMER_INIT_3 0x0010020178
+#define A_SCD_TIMER_CNT_3 0x0010020188
+#define A_SCD_TIMER_CFG_3 0x0010020198
+
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define A_SCD_SCRATCH 0x0010020C10
+#endif /* 1250 PASS2 || 112x PASS1 */
+
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define A_SCD_ZBBUS_CYCLE_COUNT 0x0010030000
+#define A_SCD_ZBBUS_CYCLE_CP0 0x0010020C00
+#define A_SCD_ZBBUS_CYCLE_CP1 0x0010020C08
+#endif
+
+/* *********************************************************************
+ * System Control Registers
+ ********************************************************************* */
+
+#define A_SCD_SYSTEM_REVISION 0x0010020000
+#define A_SCD_SYSTEM_CFG 0x0010020008
+#define A_SCD_SYSTEM_MANUF 0x0010038000
+
+/* *********************************************************************
+ * System Address Trap Registers
+ ********************************************************************* */
+
+#define A_ADDR_TRAP_INDEX 0x00100200B0
+#define A_ADDR_TRAP_REG 0x00100200B8
+#define A_ADDR_TRAP_UP_0 0x0010020400
+#define A_ADDR_TRAP_UP_1 0x0010020408
+#define A_ADDR_TRAP_UP_2 0x0010020410
+#define A_ADDR_TRAP_UP_3 0x0010020418
+#define A_ADDR_TRAP_DOWN_0 0x0010020420
+#define A_ADDR_TRAP_DOWN_1 0x0010020428
+#define A_ADDR_TRAP_DOWN_2 0x0010020430
+#define A_ADDR_TRAP_DOWN_3 0x0010020438
+#define A_ADDR_TRAP_CFG_0 0x0010020440
+#define A_ADDR_TRAP_CFG_1 0x0010020448
+#define A_ADDR_TRAP_CFG_2 0x0010020450
+#define A_ADDR_TRAP_CFG_3 0x0010020458
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define A_ADDR_TRAP_REG_DEBUG 0x0010020460
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+
+#define ADDR_TRAP_SPACING 8
+#define NUM_ADDR_TRAP 4
+#define A_ADDR_TRAP_UP(n) (A_ADDR_TRAP_UP_0 + ((n) * ADDR_TRAP_SPACING))
+#define A_ADDR_TRAP_DOWN(n) (A_ADDR_TRAP_DOWN_0 + ((n) * ADDR_TRAP_SPACING))
+#define A_ADDR_TRAP_CFG(n) (A_ADDR_TRAP_CFG_0 + ((n) * ADDR_TRAP_SPACING))
+
+
+/* *********************************************************************
+ * System Interrupt Mapper Registers
+ ********************************************************************* */
+
+#define A_IMR_CPU0_BASE 0x0010020000
+#define A_IMR_CPU1_BASE 0x0010022000
+#define IMR_REGISTER_SPACING 0x2000
+#define IMR_REGISTER_SPACING_SHIFT 13
+
+#define A_IMR_MAPPER(cpu) (A_IMR_CPU0_BASE+(cpu)*IMR_REGISTER_SPACING)
+#define A_IMR_REGISTER(cpu, reg) (A_IMR_MAPPER(cpu)+(reg))
+
+#define R_IMR_INTERRUPT_DIAG 0x0010
+#define R_IMR_INTERRUPT_LDT 0x0018
+#define R_IMR_INTERRUPT_MASK 0x0028
+#define R_IMR_INTERRUPT_TRACE 0x0038
+#define R_IMR_INTERRUPT_SOURCE_STATUS 0x0040
+#define R_IMR_LDT_INTERRUPT_SET 0x0048
+#define R_IMR_LDT_INTERRUPT 0x0018
+#define R_IMR_LDT_INTERRUPT_CLR 0x0020
+#define R_IMR_MAILBOX_CPU 0x00c0
+#define R_IMR_ALIAS_MAILBOX_CPU 0x1000
+#define R_IMR_MAILBOX_SET_CPU 0x00C8
+#define R_IMR_ALIAS_MAILBOX_SET_CPU 0x1008
+#define R_IMR_MAILBOX_CLR_CPU 0x00D0
+#define R_IMR_INTERRUPT_STATUS_BASE 0x0100
+#define R_IMR_INTERRUPT_STATUS_COUNT 7
+#define R_IMR_INTERRUPT_MAP_BASE 0x0200
+#define R_IMR_INTERRUPT_MAP_COUNT 64
+
+/*
+ * these macros work together to build the address of a mailbox
+ * register, e.g., A_MAILBOX_REGISTER(R_IMR_MAILBOX_SET_CPU,1)
+ * for mbox_0_set_cpu2 returns 0x00100240C8
+ */
+#define A_MAILBOX_REGISTER(reg,cpu) \
+ (A_IMR_CPU0_BASE + (cpu * IMR_REGISTER_SPACING) + reg)
+
+/* *********************************************************************
+ * System Performance Counter Registers
+ ********************************************************************* */
+
+#define A_SCD_PERF_CNT_CFG 0x00100204C0
+#define A_SCD_PERF_CNT_0 0x00100204D0
+#define A_SCD_PERF_CNT_1 0x00100204D8
+#define A_SCD_PERF_CNT_2 0x00100204E0
+#define A_SCD_PERF_CNT_3 0x00100204E8
+
+#define SCD_NUM_PERF_CNT 4
+#define SCD_PERF_CNT_SPACING 8
+#define A_SCD_PERF_CNT(n) (A_SCD_PERF_CNT_0+(n*SCD_PERF_CNT_SPACING))
+
+/* *********************************************************************
+ * System Bus Watcher Registers
+ ********************************************************************* */
+
+#define A_SCD_BUS_ERR_STATUS 0x0010020880
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define A_SCD_BUS_ERR_STATUS_DEBUG 0x00100208D0
+#define A_BUS_ERR_STATUS_DEBUG 0x00100208D0
+#endif /* 1250 PASS2 || 112x PASS1 */
+#define A_BUS_ERR_DATA_0 0x00100208A0
+#define A_BUS_ERR_DATA_1 0x00100208A8
+#define A_BUS_ERR_DATA_2 0x00100208B0
+#define A_BUS_ERR_DATA_3 0x00100208B8
+#define A_BUS_L2_ERRORS 0x00100208C0
+#define A_BUS_MEM_IO_ERRORS 0x00100208C8
+
+/* *********************************************************************
+ * System Debug Controller Registers
+ ********************************************************************* */
+
+#define A_SCD_JTAG_BASE 0x0010000000
+
+/* *********************************************************************
+ * System Trace Buffer Registers
+ ********************************************************************* */
+
+#define A_SCD_TRACE_CFG 0x0010020A00
+#define A_SCD_TRACE_READ 0x0010020A08
+#define A_SCD_TRACE_EVENT_0 0x0010020A20
+#define A_SCD_TRACE_EVENT_1 0x0010020A28
+#define A_SCD_TRACE_EVENT_2 0x0010020A30
+#define A_SCD_TRACE_EVENT_3 0x0010020A38
+#define A_SCD_TRACE_SEQUENCE_0 0x0010020A40
+#define A_SCD_TRACE_SEQUENCE_1 0x0010020A48
+#define A_SCD_TRACE_SEQUENCE_2 0x0010020A50
+#define A_SCD_TRACE_SEQUENCE_3 0x0010020A58
+#define A_SCD_TRACE_EVENT_4 0x0010020A60
+#define A_SCD_TRACE_EVENT_5 0x0010020A68
+#define A_SCD_TRACE_EVENT_6 0x0010020A70
+#define A_SCD_TRACE_EVENT_7 0x0010020A78
+#define A_SCD_TRACE_SEQUENCE_4 0x0010020A80
+#define A_SCD_TRACE_SEQUENCE_5 0x0010020A88
+#define A_SCD_TRACE_SEQUENCE_6 0x0010020A90
+#define A_SCD_TRACE_SEQUENCE_7 0x0010020A98
+
+#define TRACE_REGISTER_SPACING 8
+#define TRACE_NUM_REGISTERS 8
+#define A_SCD_TRACE_EVENT(n) (((n) & 4) ? \
+ (A_SCD_TRACE_EVENT_4 + (((n) & 3) * TRACE_REGISTER_SPACING)) : \
+ (A_SCD_TRACE_EVENT_0 + ((n) * TRACE_REGISTER_SPACING)))
+#define A_SCD_TRACE_SEQUENCE(n) (((n) & 4) ? \
+ (A_SCD_TRACE_SEQUENCE_4 + (((n) & 3) * TRACE_REGISTER_SPACING)) : \
+ (A_SCD_TRACE_SEQUENCE_0 + ((n) * TRACE_REGISTER_SPACING)))
+
+/* *********************************************************************
+ * System Generic DMA Registers
+ ********************************************************************* */
+
+#define A_DM_0 0x0010020B00
+#define A_DM_1 0x0010020B20
+#define A_DM_2 0x0010020B40
+#define A_DM_3 0x0010020B60
+#define DM_REGISTER_SPACING 0x20
+#define DM_NUM_CHANNELS 4
+#define A_DM_BASE(idx) (A_DM_0 + ((idx) * DM_REGISTER_SPACING))
+#define A_DM_REGISTER(idx, reg) (A_DM_BASE(idx) + (reg))
+
+#define R_DM_DSCR_BASE 0x0000000000
+#define R_DM_DSCR_COUNT 0x0000000008
+#define R_DM_CUR_DSCR_ADDR 0x0000000010
+#define R_DM_DSCR_BASE_DEBUG 0x0000000018
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define A_DM_PARTIAL_0 0x0010020ba0
+#define A_DM_PARTIAL_1 0x0010020ba8
+#define A_DM_PARTIAL_2 0x0010020bb0
+#define A_DM_PARTIAL_3 0x0010020bb8
+#define DM_PARTIAL_REGISTER_SPACING 0x8
+#define A_DM_PARTIAL(idx) (A_DM_PARTIAL_0 + ((idx) * DM_PARTIAL_REGISTER_SPACING))
+#endif /* 1250 PASS3 || 112x PASS1 */
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define A_DM_CRC_0 0x0010020b80
+#define A_DM_CRC_1 0x0010020b90
+#define DM_CRC_REGISTER_SPACING 0x10
+#define DM_CRC_NUM_CHANNELS 2
+#define A_DM_CRC_BASE(idx) (A_DM_CRC_0 + ((idx) * DM_CRC_REGISTER_SPACING))
+#define A_DM_CRC_REGISTER(idx, reg) (A_DM_CRC_BASE(idx) + (reg))
+
+#define R_CRC_DEF_0 0x00
+#define R_CTCP_DEF_0 0x08
+#endif /* 1250 PASS3 || 112x PASS1 */
+
+/* *********************************************************************
+ * Physical Address Map
+ ********************************************************************* */
+
+#if SIBYTE_HDR_FEATURE_1250_112x
+#define A_PHYS_MEMORY_0 _SB_MAKE64(0x0000000000)
+#define A_PHYS_MEMORY_SIZE _SB_MAKE64((256*1024*1024))
+#define A_PHYS_SYSTEM_CTL _SB_MAKE64(0x0010000000)
+#define A_PHYS_IO_SYSTEM _SB_MAKE64(0x0010060000)
+#define A_PHYS_GENBUS _SB_MAKE64(0x0010090000)
+#define A_PHYS_GENBUS_END _SB_MAKE64(0x0040000000)
+#define A_PHYS_LDTPCI_IO_MATCH_BYTES_32 _SB_MAKE64(0x0040000000)
+#define A_PHYS_LDTPCI_IO_MATCH_BITS_32 _SB_MAKE64(0x0060000000)
+#define A_PHYS_MEMORY_1 _SB_MAKE64(0x0080000000)
+#define A_PHYS_MEMORY_2 _SB_MAKE64(0x0090000000)
+#define A_PHYS_MEMORY_3 _SB_MAKE64(0x00C0000000)
+#define A_PHYS_L2_CACHE_TEST _SB_MAKE64(0x00D0000000)
+#define A_PHYS_LDT_SPECIAL_MATCH_BYTES _SB_MAKE64(0x00D8000000)
+#define A_PHYS_LDTPCI_IO_MATCH_BYTES _SB_MAKE64(0x00DC000000)
+#define A_PHYS_LDTPCI_CFG_MATCH_BYTES _SB_MAKE64(0x00DE000000)
+#define A_PHYS_LDT_SPECIAL_MATCH_BITS _SB_MAKE64(0x00F8000000)
+#define A_PHYS_LDTPCI_IO_MATCH_BITS _SB_MAKE64(0x00FC000000)
+#define A_PHYS_LDTPCI_CFG_MATCH_BITS _SB_MAKE64(0x00FE000000)
+#define A_PHYS_MEMORY_EXP _SB_MAKE64(0x0100000000)
+#define A_PHYS_MEMORY_EXP_SIZE _SB_MAKE64((508*1024*1024*1024))
+#define A_PHYS_LDT_EXP _SB_MAKE64(0x8000000000)
+#define A_PHYS_PCI_FULLACCESS_BYTES _SB_MAKE64(0xF000000000)
+#define A_PHYS_PCI_FULLACCESS_BITS _SB_MAKE64(0xF100000000)
+#define A_PHYS_RESERVED _SB_MAKE64(0xF200000000)
+#define A_PHYS_RESERVED_SPECIAL_LDT _SB_MAKE64(0xFD00000000)
+
+#define A_PHYS_L2CACHE_WAY_SIZE _SB_MAKE64(0x0000020000)
+#define PHYS_L2CACHE_NUM_WAYS 4
+#define A_PHYS_L2CACHE_TOTAL_SIZE _SB_MAKE64(0x0000080000)
+#define A_PHYS_L2CACHE_WAY0 _SB_MAKE64(0x00D0180000)
+#define A_PHYS_L2CACHE_WAY1 _SB_MAKE64(0x00D01A0000)
+#define A_PHYS_L2CACHE_WAY2 _SB_MAKE64(0x00D01C0000)
+#define A_PHYS_L2CACHE_WAY3 _SB_MAKE64(0x00D01E0000)
+#endif
+
+
+#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_scd.h b/arch/mips/include/asm/sibyte/sb1250_scd.h
new file mode 100644
index 000000000..d099dcbef
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/sb1250_scd.h
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * SB1250 Board Support Package
+ *
+ * SCD Constants and Macros File: sb1250_scd.h
+ *
+ * This module contains constants and macros useful for
+ * manipulating the System Control and Debug module on the 1250.
+ *
+ * SB1250 specification level: User's manual 1/02/02
+ *
+ *********************************************************************
+ *
+ * Copyright 2000,2001,2002,2003,2004,2005
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+#ifndef _SB1250_SCD_H
+#define _SB1250_SCD_H
+
+#include <asm/sibyte/sb1250_defs.h>
+
+/* *********************************************************************
+ * System control/debug registers
+ ********************************************************************* */
+
+/*
+ * System Revision Register (Table 4-1)
+ */
+
+#define M_SYS_RESERVED _SB_MAKEMASK(8, 0)
+
+#define S_SYS_REVISION _SB_MAKE64(8)
+#define M_SYS_REVISION _SB_MAKEMASK(8, S_SYS_REVISION)
+#define V_SYS_REVISION(x) _SB_MAKEVALUE(x, S_SYS_REVISION)
+#define G_SYS_REVISION(x) _SB_GETVALUE(x, S_SYS_REVISION, M_SYS_REVISION)
+
+#define K_SYS_REVISION_BCM1250_PASS1 0x01
+
+#define K_SYS_REVISION_BCM1250_PASS2 0x03
+#define K_SYS_REVISION_BCM1250_A1 0x03 /* Pass 2.0 WB */
+#define K_SYS_REVISION_BCM1250_A2 0x04 /* Pass 2.0 FC */
+#define K_SYS_REVISION_BCM1250_A3 0x05 /* Pass 2.1 FC */
+#define K_SYS_REVISION_BCM1250_A4 0x06 /* Pass 2.1 WB */
+#define K_SYS_REVISION_BCM1250_A6 0x07 /* OR 0x04 (A2) w/WID != 0 */
+#define K_SYS_REVISION_BCM1250_A8 0x0b /* A8/A10 */
+#define K_SYS_REVISION_BCM1250_A9 0x08
+#define K_SYS_REVISION_BCM1250_A10 K_SYS_REVISION_BCM1250_A8
+
+#define K_SYS_REVISION_BCM1250_PASS2_2 0x10
+#define K_SYS_REVISION_BCM1250_B0 K_SYS_REVISION_BCM1250_B1
+#define K_SYS_REVISION_BCM1250_B1 0x10
+#define K_SYS_REVISION_BCM1250_B2 0x11
+
+#define K_SYS_REVISION_BCM1250_C0 0x20
+#define K_SYS_REVISION_BCM1250_C1 0x21
+#define K_SYS_REVISION_BCM1250_C2 0x22
+#define K_SYS_REVISION_BCM1250_C3 0x23
+
+#if SIBYTE_HDR_FEATURE_CHIP(1250)
+/* XXX: discourage people from using these constants. */
+#define K_SYS_REVISION_PASS1 K_SYS_REVISION_BCM1250_PASS1
+#define K_SYS_REVISION_PASS2 K_SYS_REVISION_BCM1250_PASS2
+#define K_SYS_REVISION_PASS2_2 K_SYS_REVISION_BCM1250_PASS2_2
+#define K_SYS_REVISION_PASS3 K_SYS_REVISION_BCM1250_PASS3
+#define K_SYS_REVISION_BCM1250_PASS3 K_SYS_REVISION_BCM1250_C0
+#endif /* 1250 */
+
+#define K_SYS_REVISION_BCM112x_A1 0x20
+#define K_SYS_REVISION_BCM112x_A2 0x21
+#define K_SYS_REVISION_BCM112x_A3 0x22
+#define K_SYS_REVISION_BCM112x_A4 0x23
+#define K_SYS_REVISION_BCM112x_B0 0x30
+
+#define K_SYS_REVISION_BCM1480_S0 0x01
+#define K_SYS_REVISION_BCM1480_A1 0x02
+#define K_SYS_REVISION_BCM1480_A2 0x03
+#define K_SYS_REVISION_BCM1480_A3 0x04
+#define K_SYS_REVISION_BCM1480_B0 0x11
+
+/*Cache size - 23:20 of revision register*/
+#define S_SYS_L2C_SIZE _SB_MAKE64(20)
+#define M_SYS_L2C_SIZE _SB_MAKEMASK(4, S_SYS_L2C_SIZE)
+#define V_SYS_L2C_SIZE(x) _SB_MAKEVALUE(x, S_SYS_L2C_SIZE)
+#define G_SYS_L2C_SIZE(x) _SB_GETVALUE(x, S_SYS_L2C_SIZE, M_SYS_L2C_SIZE)
+
+#define K_SYS_L2C_SIZE_1MB 0
+#define K_SYS_L2C_SIZE_512KB 5
+#define K_SYS_L2C_SIZE_256KB 2
+#define K_SYS_L2C_SIZE_128KB 1
+
+#define K_SYS_L2C_SIZE_BCM1250 K_SYS_L2C_SIZE_512KB
+#define K_SYS_L2C_SIZE_BCM1125 K_SYS_L2C_SIZE_256KB
+#define K_SYS_L2C_SIZE_BCM1122 K_SYS_L2C_SIZE_128KB
+
+
+/* Number of CPU cores, bits 27:24 of revision register*/
+#define S_SYS_NUM_CPUS _SB_MAKE64(24)
+#define M_SYS_NUM_CPUS _SB_MAKEMASK(4, S_SYS_NUM_CPUS)
+#define V_SYS_NUM_CPUS(x) _SB_MAKEVALUE(x, S_SYS_NUM_CPUS)
+#define G_SYS_NUM_CPUS(x) _SB_GETVALUE(x, S_SYS_NUM_CPUS, M_SYS_NUM_CPUS)
+
+
+/* XXX: discourage people from using these constants. */
+#define S_SYS_PART _SB_MAKE64(16)
+#define M_SYS_PART _SB_MAKEMASK(16, S_SYS_PART)
+#define V_SYS_PART(x) _SB_MAKEVALUE(x, S_SYS_PART)
+#define G_SYS_PART(x) _SB_GETVALUE(x, S_SYS_PART, M_SYS_PART)
+
+/* XXX: discourage people from using these constants. */
+#define K_SYS_PART_SB1250 0x1250
+#define K_SYS_PART_BCM1120 0x1121
+#define K_SYS_PART_BCM1125 0x1123
+#define K_SYS_PART_BCM1125H 0x1124
+#define K_SYS_PART_BCM1122 0x1113
+
+
+/* The "peripheral set" (SOC type) is the low 4 bits of the "part" field. */
+#define S_SYS_SOC_TYPE _SB_MAKE64(16)
+#define M_SYS_SOC_TYPE _SB_MAKEMASK(4, S_SYS_SOC_TYPE)
+#define V_SYS_SOC_TYPE(x) _SB_MAKEVALUE(x, S_SYS_SOC_TYPE)
+#define G_SYS_SOC_TYPE(x) _SB_GETVALUE(x, S_SYS_SOC_TYPE, M_SYS_SOC_TYPE)
+
+#define K_SYS_SOC_TYPE_BCM1250 0x0
+#define K_SYS_SOC_TYPE_BCM1120 0x1
+#define K_SYS_SOC_TYPE_BCM1250_ALT 0x2 /* 1250pass2 w/ 1/4 L2. */
+#define K_SYS_SOC_TYPE_BCM1125 0x3
+#define K_SYS_SOC_TYPE_BCM1125H 0x4
+#define K_SYS_SOC_TYPE_BCM1250_ALT2 0x5 /* 1250pass2 w/ 1/2 L2. */
+#define K_SYS_SOC_TYPE_BCM1x80 0x6
+#define K_SYS_SOC_TYPE_BCM1x55 0x7
+
+/*
+ * Calculate correct SOC type given a copy of system revision register.
+ *
+ * (For the assembler version, sysrev and dest may be the same register.
+ * Also, it clobbers AT.)
+ */
+#ifdef __ASSEMBLER__
+#define SYS_SOC_TYPE(dest, sysrev) \
+ .set push ; \
+ .set reorder ; \
+ dsrl dest, sysrev, S_SYS_SOC_TYPE ; \
+ andi dest, dest, (M_SYS_SOC_TYPE >> S_SYS_SOC_TYPE); \
+ beq dest, K_SYS_SOC_TYPE_BCM1250_ALT, 991f ; \
+ beq dest, K_SYS_SOC_TYPE_BCM1250_ALT2, 991f ; \
+ b 992f ; \
+991: li dest, K_SYS_SOC_TYPE_BCM1250 ; \
+992: \
+ .set pop
+#else
+#define SYS_SOC_TYPE(sysrev) \
+ ((G_SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1250_ALT \
+ || G_SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1250_ALT2) \
+ ? K_SYS_SOC_TYPE_BCM1250 : G_SYS_SOC_TYPE(sysrev))
+#endif
+
+#define S_SYS_WID _SB_MAKE64(32)
+#define M_SYS_WID _SB_MAKEMASK(32, S_SYS_WID)
+#define V_SYS_WID(x) _SB_MAKEVALUE(x, S_SYS_WID)
+#define G_SYS_WID(x) _SB_GETVALUE(x, S_SYS_WID, M_SYS_WID)
+
+/*
+ * System Manufacturing Register
+ * Register: SCD_SYSTEM_MANUF
+ */
+
+#if SIBYTE_HDR_FEATURE_1250_112x
+/* Wafer ID: bits 31:0 */
+#define S_SYS_WAFERID1_200 _SB_MAKE64(0)
+#define M_SYS_WAFERID1_200 _SB_MAKEMASK(32, S_SYS_WAFERID1_200)
+#define V_SYS_WAFERID1_200(x) _SB_MAKEVALUE(x, S_SYS_WAFERID1_200)
+#define G_SYS_WAFERID1_200(x) _SB_GETVALUE(x, S_SYS_WAFERID1_200, M_SYS_WAFERID1_200)
+
+#define S_SYS_BIN _SB_MAKE64(32)
+#define M_SYS_BIN _SB_MAKEMASK(4, S_SYS_BIN)
+#define V_SYS_BIN(x) _SB_MAKEVALUE(x, S_SYS_BIN)
+#define G_SYS_BIN(x) _SB_GETVALUE(x, S_SYS_BIN, M_SYS_BIN)
+
+/* Wafer ID: bits 39:36 */
+#define S_SYS_WAFERID2_200 _SB_MAKE64(36)
+#define M_SYS_WAFERID2_200 _SB_MAKEMASK(4, S_SYS_WAFERID2_200)
+#define V_SYS_WAFERID2_200(x) _SB_MAKEVALUE(x, S_SYS_WAFERID2_200)
+#define G_SYS_WAFERID2_200(x) _SB_GETVALUE(x, S_SYS_WAFERID2_200, M_SYS_WAFERID2_200)
+
+/* Wafer ID: bits 39:0 */
+#define S_SYS_WAFERID_300 _SB_MAKE64(0)
+#define M_SYS_WAFERID_300 _SB_MAKEMASK(40, S_SYS_WAFERID_300)
+#define V_SYS_WAFERID_300(x) _SB_MAKEVALUE(x, S_SYS_WAFERID_300)
+#define G_SYS_WAFERID_300(x) _SB_GETVALUE(x, S_SYS_WAFERID_300, M_SYS_WAFERID_300)
+
+#define S_SYS_XPOS _SB_MAKE64(40)
+#define M_SYS_XPOS _SB_MAKEMASK(6, S_SYS_XPOS)
+#define V_SYS_XPOS(x) _SB_MAKEVALUE(x, S_SYS_XPOS)
+#define G_SYS_XPOS(x) _SB_GETVALUE(x, S_SYS_XPOS, M_SYS_XPOS)
+
+#define S_SYS_YPOS _SB_MAKE64(46)
+#define M_SYS_YPOS _SB_MAKEMASK(6, S_SYS_YPOS)
+#define V_SYS_YPOS(x) _SB_MAKEVALUE(x, S_SYS_YPOS)
+#define G_SYS_YPOS(x) _SB_GETVALUE(x, S_SYS_YPOS, M_SYS_YPOS)
+#endif
+
+
+/*
+ * System Config Register (Table 4-2)
+ * Register: SCD_SYSTEM_CFG
+ */
+
+#if SIBYTE_HDR_FEATURE_1250_112x
+#define M_SYS_LDT_PLL_BYP _SB_MAKEMASK1(3)
+#define M_SYS_PCI_SYNC_TEST_MODE _SB_MAKEMASK1(4)
+#define M_SYS_IOB0_DIV _SB_MAKEMASK1(5)
+#define M_SYS_IOB1_DIV _SB_MAKEMASK1(6)
+
+#define S_SYS_PLL_DIV _SB_MAKE64(7)
+#define M_SYS_PLL_DIV _SB_MAKEMASK(5, S_SYS_PLL_DIV)
+#define V_SYS_PLL_DIV(x) _SB_MAKEVALUE(x, S_SYS_PLL_DIV)
+#define G_SYS_PLL_DIV(x) _SB_GETVALUE(x, S_SYS_PLL_DIV, M_SYS_PLL_DIV)
+
+#define M_SYS_SER0_ENABLE _SB_MAKEMASK1(12)
+#define M_SYS_SER0_RSTB_EN _SB_MAKEMASK1(13)
+#define M_SYS_SER1_ENABLE _SB_MAKEMASK1(14)
+#define M_SYS_SER1_RSTB_EN _SB_MAKEMASK1(15)
+#define M_SYS_PCMCIA_ENABLE _SB_MAKEMASK1(16)
+
+#define S_SYS_BOOT_MODE _SB_MAKE64(17)
+#define M_SYS_BOOT_MODE _SB_MAKEMASK(2, S_SYS_BOOT_MODE)
+#define V_SYS_BOOT_MODE(x) _SB_MAKEVALUE(x, S_SYS_BOOT_MODE)
+#define G_SYS_BOOT_MODE(x) _SB_GETVALUE(x, S_SYS_BOOT_MODE, M_SYS_BOOT_MODE)
+#define K_SYS_BOOT_MODE_ROM32 0
+#define K_SYS_BOOT_MODE_ROM8 1
+#define K_SYS_BOOT_MODE_SMBUS_SMALL 2
+#define K_SYS_BOOT_MODE_SMBUS_BIG 3
+
+#define M_SYS_PCI_HOST _SB_MAKEMASK1(19)
+#define M_SYS_PCI_ARBITER _SB_MAKEMASK1(20)
+#define M_SYS_SOUTH_ON_LDT _SB_MAKEMASK1(21)
+#define M_SYS_BIG_ENDIAN _SB_MAKEMASK1(22)
+#define M_SYS_GENCLK_EN _SB_MAKEMASK1(23)
+#define M_SYS_LDT_TEST_EN _SB_MAKEMASK1(24)
+#define M_SYS_GEN_PARITY_EN _SB_MAKEMASK1(25)
+
+#define S_SYS_CONFIG 26
+#define M_SYS_CONFIG _SB_MAKEMASK(6, S_SYS_CONFIG)
+#define V_SYS_CONFIG(x) _SB_MAKEVALUE(x, S_SYS_CONFIG)
+#define G_SYS_CONFIG(x) _SB_GETVALUE(x, S_SYS_CONFIG, M_SYS_CONFIG)
+
+/* The following bits are writeable by JTAG only. */
+
+#define M_SYS_CLKSTOP _SB_MAKEMASK1(32)
+#define M_SYS_CLKSTEP _SB_MAKEMASK1(33)
+
+#define S_SYS_CLKCOUNT 34
+#define M_SYS_CLKCOUNT _SB_MAKEMASK(8, S_SYS_CLKCOUNT)
+#define V_SYS_CLKCOUNT(x) _SB_MAKEVALUE(x, S_SYS_CLKCOUNT)
+#define G_SYS_CLKCOUNT(x) _SB_GETVALUE(x, S_SYS_CLKCOUNT, M_SYS_CLKCOUNT)
+
+#define M_SYS_PLL_BYPASS _SB_MAKEMASK1(42)
+
+#define S_SYS_PLL_IREF 43
+#define M_SYS_PLL_IREF _SB_MAKEMASK(2, S_SYS_PLL_IREF)
+
+#define S_SYS_PLL_VCO 45
+#define M_SYS_PLL_VCO _SB_MAKEMASK(2, S_SYS_PLL_VCO)
+
+#define S_SYS_PLL_VREG 47
+#define M_SYS_PLL_VREG _SB_MAKEMASK(2, S_SYS_PLL_VREG)
+
+#define M_SYS_MEM_RESET _SB_MAKEMASK1(49)
+#define M_SYS_L2C_RESET _SB_MAKEMASK1(50)
+#define M_SYS_IO_RESET_0 _SB_MAKEMASK1(51)
+#define M_SYS_IO_RESET_1 _SB_MAKEMASK1(52)
+#define M_SYS_SCD_RESET _SB_MAKEMASK1(53)
+
+/* End of bits writable by JTAG only. */
+
+#define M_SYS_CPU_RESET_0 _SB_MAKEMASK1(54)
+#define M_SYS_CPU_RESET_1 _SB_MAKEMASK1(55)
+
+#define M_SYS_UNICPU0 _SB_MAKEMASK1(56)
+#define M_SYS_UNICPU1 _SB_MAKEMASK1(57)
+
+#define M_SYS_SB_SOFTRES _SB_MAKEMASK1(58)
+#define M_SYS_EXT_RESET _SB_MAKEMASK1(59)
+#define M_SYS_SYSTEM_RESET _SB_MAKEMASK1(60)
+
+#define M_SYS_MISR_MODE _SB_MAKEMASK1(61)
+#define M_SYS_MISR_RESET _SB_MAKEMASK1(62)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
+#define M_SYS_SW_FLAG _SB_MAKEMASK1(63)
+#endif /* 1250 PASS2 || 112x PASS1 */
+
+#endif
+
+
+/*
+ * Mailbox Registers (Table 4-3)
+ * Registers: SCD_MBOX_CPU_x
+ */
+
+#define S_MBOX_INT_3 0
+#define M_MBOX_INT_3 _SB_MAKEMASK(16, S_MBOX_INT_3)
+#define S_MBOX_INT_2 16
+#define M_MBOX_INT_2 _SB_MAKEMASK(16, S_MBOX_INT_2)
+#define S_MBOX_INT_1 32
+#define M_MBOX_INT_1 _SB_MAKEMASK(16, S_MBOX_INT_1)
+#define S_MBOX_INT_0 48
+#define M_MBOX_INT_0 _SB_MAKEMASK(16, S_MBOX_INT_0)
+
+/*
+ * Watchdog Registers (Table 4-8) (Table 4-9) (Table 4-10)
+ * Registers: SCD_WDOG_INIT_CNT_x
+ */
+
+#define V_SCD_WDOG_FREQ 1000000
+
+#define S_SCD_WDOG_INIT 0
+#define M_SCD_WDOG_INIT _SB_MAKEMASK(23, S_SCD_WDOG_INIT)
+
+#define S_SCD_WDOG_CNT 0
+#define M_SCD_WDOG_CNT _SB_MAKEMASK(23, S_SCD_WDOG_CNT)
+
+#define S_SCD_WDOG_ENABLE 0
+#define M_SCD_WDOG_ENABLE _SB_MAKEMASK1(S_SCD_WDOG_ENABLE)
+
+#define S_SCD_WDOG_RESET_TYPE 2
+#define M_SCD_WDOG_RESET_TYPE _SB_MAKEMASK(3, S_SCD_WDOG_RESET_TYPE)
+#define V_SCD_WDOG_RESET_TYPE(x) _SB_MAKEVALUE(x, S_SCD_WDOG_RESET_TYPE)
+#define G_SCD_WDOG_RESET_TYPE(x) _SB_GETVALUE(x, S_SCD_WDOG_RESET_TYPE, M_SCD_WDOG_RESET_TYPE)
+
+#define K_SCD_WDOG_RESET_FULL 0 /* actually, (x & 1) == 0 */
+#define K_SCD_WDOG_RESET_SOFT 1
+#define K_SCD_WDOG_RESET_CPU0 3
+#define K_SCD_WDOG_RESET_CPU1 5
+#define K_SCD_WDOG_RESET_BOTH_CPUS 7
+
+/* This feature is present in 1250 C0 and later, but *not* in 112x A revs. */
+#if SIBYTE_HDR_FEATURE(1250, PASS3)
+#define S_SCD_WDOG_HAS_RESET 8
+#define M_SCD_WDOG_HAS_RESET _SB_MAKEMASK1(S_SCD_WDOG_HAS_RESET)
+#endif
+
+
+/*
+ * Timer Registers (Table 4-11) (Table 4-12) (Table 4-13)
+ */
+
+#define V_SCD_TIMER_FREQ 1000000
+
+#define S_SCD_TIMER_INIT 0
+#define M_SCD_TIMER_INIT _SB_MAKEMASK(23, S_SCD_TIMER_INIT)
+#define V_SCD_TIMER_INIT(x) _SB_MAKEVALUE(x, S_SCD_TIMER_INIT)
+#define G_SCD_TIMER_INIT(x) _SB_GETVALUE(x, S_SCD_TIMER_INIT, M_SCD_TIMER_INIT)
+
+#define V_SCD_TIMER_WIDTH 23
+#define S_SCD_TIMER_CNT 0
+#define M_SCD_TIMER_CNT _SB_MAKEMASK(V_SCD_TIMER_WIDTH, S_SCD_TIMER_CNT)
+#define V_SCD_TIMER_CNT(x) _SB_MAKEVALUE(x, S_SCD_TIMER_CNT)
+#define G_SCD_TIMER_CNT(x) _SB_GETVALUE(x, S_SCD_TIMER_CNT, M_SCD_TIMER_CNT)
+
+#define M_SCD_TIMER_ENABLE _SB_MAKEMASK1(0)
+#define M_SCD_TIMER_MODE _SB_MAKEMASK1(1)
+#define M_SCD_TIMER_MODE_CONTINUOUS M_SCD_TIMER_MODE
+
+/*
+ * System Performance Counters
+ */
+
+#define S_SPC_CFG_SRC0 0
+#define M_SPC_CFG_SRC0 _SB_MAKEMASK(8, S_SPC_CFG_SRC0)
+#define V_SPC_CFG_SRC0(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC0)
+#define G_SPC_CFG_SRC0(x) _SB_GETVALUE(x, S_SPC_CFG_SRC0, M_SPC_CFG_SRC0)
+
+#define S_SPC_CFG_SRC1 8
+#define M_SPC_CFG_SRC1 _SB_MAKEMASK(8, S_SPC_CFG_SRC1)
+#define V_SPC_CFG_SRC1(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC1)
+#define G_SPC_CFG_SRC1(x) _SB_GETVALUE(x, S_SPC_CFG_SRC1, M_SPC_CFG_SRC1)
+
+#define S_SPC_CFG_SRC2 16
+#define M_SPC_CFG_SRC2 _SB_MAKEMASK(8, S_SPC_CFG_SRC2)
+#define V_SPC_CFG_SRC2(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC2)
+#define G_SPC_CFG_SRC2(x) _SB_GETVALUE(x, S_SPC_CFG_SRC2, M_SPC_CFG_SRC2)
+
+#define S_SPC_CFG_SRC3 24
+#define M_SPC_CFG_SRC3 _SB_MAKEMASK(8, S_SPC_CFG_SRC3)
+#define V_SPC_CFG_SRC3(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC3)
+#define G_SPC_CFG_SRC3(x) _SB_GETVALUE(x, S_SPC_CFG_SRC3, M_SPC_CFG_SRC3)
+
+#if SIBYTE_HDR_FEATURE_1250_112x
+#define M_SPC_CFG_CLEAR _SB_MAKEMASK1(32)
+#define M_SPC_CFG_ENABLE _SB_MAKEMASK1(33)
+#endif
+
+
+/*
+ * Bus Watcher
+ */
+
+#define S_SCD_BERR_TID 8
+#define M_SCD_BERR_TID _SB_MAKEMASK(10, S_SCD_BERR_TID)
+#define V_SCD_BERR_TID(x) _SB_MAKEVALUE(x, S_SCD_BERR_TID)
+#define G_SCD_BERR_TID(x) _SB_GETVALUE(x, S_SCD_BERR_TID, M_SCD_BERR_TID)
+
+#define S_SCD_BERR_RID 18
+#define M_SCD_BERR_RID _SB_MAKEMASK(4, S_SCD_BERR_RID)
+#define V_SCD_BERR_RID(x) _SB_MAKEVALUE(x, S_SCD_BERR_RID)
+#define G_SCD_BERR_RID(x) _SB_GETVALUE(x, S_SCD_BERR_RID, M_SCD_BERR_RID)
+
+#define S_SCD_BERR_DCODE 22
+#define M_SCD_BERR_DCODE _SB_MAKEMASK(3, S_SCD_BERR_DCODE)
+#define V_SCD_BERR_DCODE(x) _SB_MAKEVALUE(x, S_SCD_BERR_DCODE)
+#define G_SCD_BERR_DCODE(x) _SB_GETVALUE(x, S_SCD_BERR_DCODE, M_SCD_BERR_DCODE)
+
+#define M_SCD_BERR_MULTERRS _SB_MAKEMASK1(30)
+
+
+#define S_SCD_L2ECC_CORR_D 0
+#define M_SCD_L2ECC_CORR_D _SB_MAKEMASK(8, S_SCD_L2ECC_CORR_D)
+#define V_SCD_L2ECC_CORR_D(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_CORR_D)
+#define G_SCD_L2ECC_CORR_D(x) _SB_GETVALUE(x, S_SCD_L2ECC_CORR_D, M_SCD_L2ECC_CORR_D)
+
+#define S_SCD_L2ECC_BAD_D 8
+#define M_SCD_L2ECC_BAD_D _SB_MAKEMASK(8, S_SCD_L2ECC_BAD_D)
+#define V_SCD_L2ECC_BAD_D(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_BAD_D)
+#define G_SCD_L2ECC_BAD_D(x) _SB_GETVALUE(x, S_SCD_L2ECC_BAD_D, M_SCD_L2ECC_BAD_D)
+
+#define S_SCD_L2ECC_CORR_T 16
+#define M_SCD_L2ECC_CORR_T _SB_MAKEMASK(8, S_SCD_L2ECC_CORR_T)
+#define V_SCD_L2ECC_CORR_T(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_CORR_T)
+#define G_SCD_L2ECC_CORR_T(x) _SB_GETVALUE(x, S_SCD_L2ECC_CORR_T, M_SCD_L2ECC_CORR_T)
+
+#define S_SCD_L2ECC_BAD_T 24
+#define M_SCD_L2ECC_BAD_T _SB_MAKEMASK(8, S_SCD_L2ECC_BAD_T)
+#define V_SCD_L2ECC_BAD_T(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_BAD_T)
+#define G_SCD_L2ECC_BAD_T(x) _SB_GETVALUE(x, S_SCD_L2ECC_BAD_T, M_SCD_L2ECC_BAD_T)
+
+#define S_SCD_MEM_ECC_CORR 0
+#define M_SCD_MEM_ECC_CORR _SB_MAKEMASK(8, S_SCD_MEM_ECC_CORR)
+#define V_SCD_MEM_ECC_CORR(x) _SB_MAKEVALUE(x, S_SCD_MEM_ECC_CORR)
+#define G_SCD_MEM_ECC_CORR(x) _SB_GETVALUE(x, S_SCD_MEM_ECC_CORR, M_SCD_MEM_ECC_CORR)
+
+#define S_SCD_MEM_ECC_BAD 8
+#define M_SCD_MEM_ECC_BAD _SB_MAKEMASK(8, S_SCD_MEM_ECC_BAD)
+#define V_SCD_MEM_ECC_BAD(x) _SB_MAKEVALUE(x, S_SCD_MEM_ECC_BAD)
+#define G_SCD_MEM_ECC_BAD(x) _SB_GETVALUE(x, S_SCD_MEM_ECC_BAD, M_SCD_MEM_ECC_BAD)
+
+#define S_SCD_MEM_BUSERR 16
+#define M_SCD_MEM_BUSERR _SB_MAKEMASK(8, S_SCD_MEM_BUSERR)
+#define V_SCD_MEM_BUSERR(x) _SB_MAKEVALUE(x, S_SCD_MEM_BUSERR)
+#define G_SCD_MEM_BUSERR(x) _SB_GETVALUE(x, S_SCD_MEM_BUSERR, M_SCD_MEM_BUSERR)
+
+
+/*
+ * Address Trap Registers
+ */
+
+#if SIBYTE_HDR_FEATURE_1250_112x
+#define M_ATRAP_INDEX _SB_MAKEMASK(4, 0)
+#define M_ATRAP_ADDRESS _SB_MAKEMASK(40, 0)
+
+#define S_ATRAP_CFG_CNT 0
+#define M_ATRAP_CFG_CNT _SB_MAKEMASK(3, S_ATRAP_CFG_CNT)
+#define V_ATRAP_CFG_CNT(x) _SB_MAKEVALUE(x, S_ATRAP_CFG_CNT)
+#define G_ATRAP_CFG_CNT(x) _SB_GETVALUE(x, S_ATRAP_CFG_CNT, M_ATRAP_CFG_CNT)
+
+#define M_ATRAP_CFG_WRITE _SB_MAKEMASK1(3)
+#define M_ATRAP_CFG_ALL _SB_MAKEMASK1(4)
+#define M_ATRAP_CFG_INV _SB_MAKEMASK1(5)
+#define M_ATRAP_CFG_USESRC _SB_MAKEMASK1(6)
+#define M_ATRAP_CFG_SRCINV _SB_MAKEMASK1(7)
+
+#define S_ATRAP_CFG_AGENTID 8
+#define M_ATRAP_CFG_AGENTID _SB_MAKEMASK(4, S_ATRAP_CFG_AGENTID)
+#define V_ATRAP_CFG_AGENTID(x) _SB_MAKEVALUE(x, S_ATRAP_CFG_AGENTID)
+#define G_ATRAP_CFG_AGENTID(x) _SB_GETVALUE(x, S_ATRAP_CFG_AGENTID, M_ATRAP_CFG_AGENTID)
+
+#define K_BUS_AGENT_CPU0 0
+#define K_BUS_AGENT_CPU1 1
+#define K_BUS_AGENT_IOB0 2
+#define K_BUS_AGENT_IOB1 3
+#define K_BUS_AGENT_SCD 4
+#define K_BUS_AGENT_L2C 6
+#define K_BUS_AGENT_MC 7
+
+#define S_ATRAP_CFG_CATTR 12
+#define M_ATRAP_CFG_CATTR _SB_MAKEMASK(3, S_ATRAP_CFG_CATTR)
+#define V_ATRAP_CFG_CATTR(x) _SB_MAKEVALUE(x, S_ATRAP_CFG_CATTR)
+#define G_ATRAP_CFG_CATTR(x) _SB_GETVALUE(x, S_ATRAP_CFG_CATTR, M_ATRAP_CFG_CATTR)
+
+#define K_ATRAP_CFG_CATTR_IGNORE 0
+#define K_ATRAP_CFG_CATTR_UNC 1
+#define K_ATRAP_CFG_CATTR_CACHEABLE 2
+#define K_ATRAP_CFG_CATTR_NONCOH 3
+#define K_ATRAP_CFG_CATTR_COHERENT 4
+#define K_ATRAP_CFG_CATTR_NOTUNC 5
+#define K_ATRAP_CFG_CATTR_NOTNONCOH 6
+#define K_ATRAP_CFG_CATTR_NOTCOHERENT 7
+
+#endif /* 1250/112x */
+
+/*
+ * Trace Buffer Config register
+ */
+
+#define M_SCD_TRACE_CFG_RESET _SB_MAKEMASK1(0)
+#define M_SCD_TRACE_CFG_START_READ _SB_MAKEMASK1(1)
+#define M_SCD_TRACE_CFG_START _SB_MAKEMASK1(2)
+#define M_SCD_TRACE_CFG_STOP _SB_MAKEMASK1(3)
+#define M_SCD_TRACE_CFG_FREEZE _SB_MAKEMASK1(4)
+#define M_SCD_TRACE_CFG_FREEZE_FULL _SB_MAKEMASK1(5)
+#define M_SCD_TRACE_CFG_DEBUG_FULL _SB_MAKEMASK1(6)
+#define M_SCD_TRACE_CFG_FULL _SB_MAKEMASK1(7)
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define M_SCD_TRACE_CFG_FORCECNT _SB_MAKEMASK1(8)
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+
+/*
+ * This field is the same on the 1250/112x and 1480, just located in
+ * a slightly different place in the register.
+ */
+#if SIBYTE_HDR_FEATURE_1250_112x
+#define S_SCD_TRACE_CFG_CUR_ADDR 10
+#else
+#if SIBYTE_HDR_FEATURE_CHIP(1480)
+#define S_SCD_TRACE_CFG_CUR_ADDR 24
+#endif /* 1480 */
+#endif /* 1250/112x */
+
+#define M_SCD_TRACE_CFG_CUR_ADDR _SB_MAKEMASK(8, S_SCD_TRACE_CFG_CUR_ADDR)
+#define V_SCD_TRACE_CFG_CUR_ADDR(x) _SB_MAKEVALUE(x, S_SCD_TRACE_CFG_CUR_ADDR)
+#define G_SCD_TRACE_CFG_CUR_ADDR(x) _SB_GETVALUE(x, S_SCD_TRACE_CFG_CUR_ADDR, M_SCD_TRACE_CFG_CUR_ADDR)
+
+/*
+ * Trace Event registers
+ */
+
+#define S_SCD_TREVT_ADDR_MATCH 0
+#define M_SCD_TREVT_ADDR_MATCH _SB_MAKEMASK(4, S_SCD_TREVT_ADDR_MATCH)
+#define V_SCD_TREVT_ADDR_MATCH(x) _SB_MAKEVALUE(x, S_SCD_TREVT_ADDR_MATCH)
+#define G_SCD_TREVT_ADDR_MATCH(x) _SB_GETVALUE(x, S_SCD_TREVT_ADDR_MATCH, M_SCD_TREVT_ADDR_MATCH)
+
+#define M_SCD_TREVT_REQID_MATCH _SB_MAKEMASK1(4)
+#define M_SCD_TREVT_DATAID_MATCH _SB_MAKEMASK1(5)
+#define M_SCD_TREVT_RESPID_MATCH _SB_MAKEMASK1(6)
+#define M_SCD_TREVT_INTERRUPT _SB_MAKEMASK1(7)
+#define M_SCD_TREVT_DEBUG_PIN _SB_MAKEMASK1(9)
+#define M_SCD_TREVT_WRITE _SB_MAKEMASK1(10)
+#define M_SCD_TREVT_READ _SB_MAKEMASK1(11)
+
+#define S_SCD_TREVT_REQID 12
+#define M_SCD_TREVT_REQID _SB_MAKEMASK(4, S_SCD_TREVT_REQID)
+#define V_SCD_TREVT_REQID(x) _SB_MAKEVALUE(x, S_SCD_TREVT_REQID)
+#define G_SCD_TREVT_REQID(x) _SB_GETVALUE(x, S_SCD_TREVT_REQID, M_SCD_TREVT_REQID)
+
+#define S_SCD_TREVT_RESPID 16
+#define M_SCD_TREVT_RESPID _SB_MAKEMASK(4, S_SCD_TREVT_RESPID)
+#define V_SCD_TREVT_RESPID(x) _SB_MAKEVALUE(x, S_SCD_TREVT_RESPID)
+#define G_SCD_TREVT_RESPID(x) _SB_GETVALUE(x, S_SCD_TREVT_RESPID, M_SCD_TREVT_RESPID)
+
+#define S_SCD_TREVT_DATAID 20
+#define M_SCD_TREVT_DATAID _SB_MAKEMASK(4, S_SCD_TREVT_DATAID)
+#define V_SCD_TREVT_DATAID(x) _SB_MAKEVALUE(x, S_SCD_TREVT_DATAID)
+#define G_SCD_TREVT_DATAID(x) _SB_GETVALUE(x, S_SCD_TREVT_DATAID, M_SCD_TREVT_DATID)
+
+#define S_SCD_TREVT_COUNT 24
+#define M_SCD_TREVT_COUNT _SB_MAKEMASK(8, S_SCD_TREVT_COUNT)
+#define V_SCD_TREVT_COUNT(x) _SB_MAKEVALUE(x, S_SCD_TREVT_COUNT)
+#define G_SCD_TREVT_COUNT(x) _SB_GETVALUE(x, S_SCD_TREVT_COUNT, M_SCD_TREVT_COUNT)
+
+/*
+ * Trace Sequence registers
+ */
+
+#define S_SCD_TRSEQ_EVENT4 0
+#define M_SCD_TRSEQ_EVENT4 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT4)
+#define V_SCD_TRSEQ_EVENT4(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT4)
+#define G_SCD_TRSEQ_EVENT4(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT4, M_SCD_TRSEQ_EVENT4)
+
+#define S_SCD_TRSEQ_EVENT3 4
+#define M_SCD_TRSEQ_EVENT3 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT3)
+#define V_SCD_TRSEQ_EVENT3(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT3)
+#define G_SCD_TRSEQ_EVENT3(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT3, M_SCD_TRSEQ_EVENT3)
+
+#define S_SCD_TRSEQ_EVENT2 8
+#define M_SCD_TRSEQ_EVENT2 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT2)
+#define V_SCD_TRSEQ_EVENT2(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT2)
+#define G_SCD_TRSEQ_EVENT2(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT2, M_SCD_TRSEQ_EVENT2)
+
+#define S_SCD_TRSEQ_EVENT1 12
+#define M_SCD_TRSEQ_EVENT1 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT1)
+#define V_SCD_TRSEQ_EVENT1(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT1)
+#define G_SCD_TRSEQ_EVENT1(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT1, M_SCD_TRSEQ_EVENT1)
+
+#define K_SCD_TRSEQ_E0 0
+#define K_SCD_TRSEQ_E1 1
+#define K_SCD_TRSEQ_E2 2
+#define K_SCD_TRSEQ_E3 3
+#define K_SCD_TRSEQ_E0_E1 4
+#define K_SCD_TRSEQ_E1_E2 5
+#define K_SCD_TRSEQ_E2_E3 6
+#define K_SCD_TRSEQ_E0_E1_E2 7
+#define K_SCD_TRSEQ_E0_E1_E2_E3 8
+#define K_SCD_TRSEQ_E0E1 9
+#define K_SCD_TRSEQ_E0E1E2 10
+#define K_SCD_TRSEQ_E0E1E2E3 11
+#define K_SCD_TRSEQ_E0E1_E2 12
+#define K_SCD_TRSEQ_E0E1_E2E3 13
+#define K_SCD_TRSEQ_E0E1_E2_E3 14
+#define K_SCD_TRSEQ_IGNORED 15
+
+#define K_SCD_TRSEQ_TRIGGER_ALL (V_SCD_TRSEQ_EVENT1(K_SCD_TRSEQ_IGNORED) | \
+ V_SCD_TRSEQ_EVENT2(K_SCD_TRSEQ_IGNORED) | \
+ V_SCD_TRSEQ_EVENT3(K_SCD_TRSEQ_IGNORED) | \
+ V_SCD_TRSEQ_EVENT4(K_SCD_TRSEQ_IGNORED))
+
+#define S_SCD_TRSEQ_FUNCTION 16
+#define M_SCD_TRSEQ_FUNCTION _SB_MAKEMASK(4, S_SCD_TRSEQ_FUNCTION)
+#define V_SCD_TRSEQ_FUNCTION(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_FUNCTION)
+#define G_SCD_TRSEQ_FUNCTION(x) _SB_GETVALUE(x, S_SCD_TRSEQ_FUNCTION, M_SCD_TRSEQ_FUNCTION)
+
+#define K_SCD_TRSEQ_FUNC_NOP 0
+#define K_SCD_TRSEQ_FUNC_START 1
+#define K_SCD_TRSEQ_FUNC_STOP 2
+#define K_SCD_TRSEQ_FUNC_FREEZE 3
+
+#define V_SCD_TRSEQ_FUNC_NOP V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_NOP)
+#define V_SCD_TRSEQ_FUNC_START V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_START)
+#define V_SCD_TRSEQ_FUNC_STOP V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_STOP)
+#define V_SCD_TRSEQ_FUNC_FREEZE V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_FREEZE)
+
+#define M_SCD_TRSEQ_ASAMPLE _SB_MAKEMASK1(18)
+#define M_SCD_TRSEQ_DSAMPLE _SB_MAKEMASK1(19)
+#define M_SCD_TRSEQ_DEBUGPIN _SB_MAKEMASK1(20)
+#define M_SCD_TRSEQ_DEBUGCPU _SB_MAKEMASK1(21)
+#define M_SCD_TRSEQ_CLEARUSE _SB_MAKEMASK1(22)
+#define M_SCD_TRSEQ_ALLD_A _SB_MAKEMASK1(23)
+#define M_SCD_TRSEQ_ALL_A _SB_MAKEMASK1(24)
+
+#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_smbus.h b/arch/mips/include/asm/sibyte/sb1250_smbus.h
new file mode 100644
index 000000000..e854f96ff
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/sb1250_smbus.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * SB1250 Board Support Package
+ *
+ * SMBUS Constants File: sb1250_smbus.h
+ *
+ * This module contains constants and macros useful for
+ * manipulating the SB1250's SMbus devices.
+ *
+ * SB1250 specification level: 10/21/02
+ * BCM1280 specification level: 11/24/03
+ *
+ *********************************************************************
+ *
+ * Copyright 2000,2001,2002,2003
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+
+#ifndef _SB1250_SMBUS_H
+#define _SB1250_SMBUS_H
+
+#include <asm/sibyte/sb1250_defs.h>
+
+/*
+ * SMBus Clock Frequency Register (Table 14-2)
+ */
+
+#define S_SMB_FREQ_DIV 0
+#define M_SMB_FREQ_DIV _SB_MAKEMASK(13, S_SMB_FREQ_DIV)
+#define V_SMB_FREQ_DIV(x) _SB_MAKEVALUE(x, S_SMB_FREQ_DIV)
+
+#define K_SMB_FREQ_400KHZ 0x1F
+#define K_SMB_FREQ_100KHZ 0x7D
+#define K_SMB_FREQ_10KHZ 1250
+
+#define S_SMB_CMD 0
+#define M_SMB_CMD _SB_MAKEMASK(8, S_SMB_CMD)
+#define V_SMB_CMD(x) _SB_MAKEVALUE(x, S_SMB_CMD)
+
+/*
+ * SMBus control register (Table 14-4)
+ */
+
+#define M_SMB_ERR_INTR _SB_MAKEMASK1(0)
+#define M_SMB_FINISH_INTR _SB_MAKEMASK1(1)
+
+#define S_SMB_DATA_OUT 4
+#define M_SMB_DATA_OUT _SB_MAKEMASK1(S_SMB_DATA_OUT)
+#define V_SMB_DATA_OUT(x) _SB_MAKEVALUE(x, S_SMB_DATA_OUT)
+
+#define M_SMB_DATA_DIR _SB_MAKEMASK1(5)
+#define M_SMB_DATA_DIR_OUTPUT M_SMB_DATA_DIR
+#define M_SMB_CLK_OUT _SB_MAKEMASK1(6)
+#define M_SMB_DIRECT_ENABLE _SB_MAKEMASK1(7)
+
+/*
+ * SMBus status registers (Table 14-5)
+ */
+
+#define M_SMB_BUSY _SB_MAKEMASK1(0)
+#define M_SMB_ERROR _SB_MAKEMASK1(1)
+#define M_SMB_ERROR_TYPE _SB_MAKEMASK1(2)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+#define S_SMB_SCL_IN 5
+#define M_SMB_SCL_IN _SB_MAKEMASK1(S_SMB_SCL_IN)
+#define V_SMB_SCL_IN(x) _SB_MAKEVALUE(x, S_SMB_SCL_IN)
+#define G_SMB_SCL_IN(x) _SB_GETVALUE(x, S_SMB_SCL_IN, M_SMB_SCL_IN)
+#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
+
+#define S_SMB_REF 6
+#define M_SMB_REF _SB_MAKEMASK1(S_SMB_REF)
+#define V_SMB_REF(x) _SB_MAKEVALUE(x, S_SMB_REF)
+#define G_SMB_REF(x) _SB_GETVALUE(x, S_SMB_REF, M_SMB_REF)
+
+#define S_SMB_DATA_IN 7
+#define M_SMB_DATA_IN _SB_MAKEMASK1(S_SMB_DATA_IN)
+#define V_SMB_DATA_IN(x) _SB_MAKEVALUE(x, S_SMB_DATA_IN)
+#define G_SMB_DATA_IN(x) _SB_GETVALUE(x, S_SMB_DATA_IN, M_SMB_DATA_IN)
+
+/*
+ * SMBus Start/Command registers (Table 14-9)
+ */
+
+#define S_SMB_ADDR 0
+#define M_SMB_ADDR _SB_MAKEMASK(7, S_SMB_ADDR)
+#define V_SMB_ADDR(x) _SB_MAKEVALUE(x, S_SMB_ADDR)
+#define G_SMB_ADDR(x) _SB_GETVALUE(x, S_SMB_ADDR, M_SMB_ADDR)
+
+#define M_SMB_QDATA _SB_MAKEMASK1(7)
+
+#define S_SMB_TT 8
+#define M_SMB_TT _SB_MAKEMASK(3, S_SMB_TT)
+#define V_SMB_TT(x) _SB_MAKEVALUE(x, S_SMB_TT)
+#define G_SMB_TT(x) _SB_GETVALUE(x, S_SMB_TT, M_SMB_TT)
+
+#define K_SMB_TT_WR1BYTE 0
+#define K_SMB_TT_WR2BYTE 1
+#define K_SMB_TT_WR3BYTE 2
+#define K_SMB_TT_CMD_RD1BYTE 3
+#define K_SMB_TT_CMD_RD2BYTE 4
+#define K_SMB_TT_RD1BYTE 5
+#define K_SMB_TT_QUICKCMD 6
+#define K_SMB_TT_EEPROMREAD 7
+
+#define V_SMB_TT_WR1BYTE V_SMB_TT(K_SMB_TT_WR1BYTE)
+#define V_SMB_TT_WR2BYTE V_SMB_TT(K_SMB_TT_WR2BYTE)
+#define V_SMB_TT_WR3BYTE V_SMB_TT(K_SMB_TT_WR3BYTE)
+#define V_SMB_TT_CMD_RD1BYTE V_SMB_TT(K_SMB_TT_CMD_RD1BYTE)
+#define V_SMB_TT_CMD_RD2BYTE V_SMB_TT(K_SMB_TT_CMD_RD2BYTE)
+#define V_SMB_TT_RD1BYTE V_SMB_TT(K_SMB_TT_RD1BYTE)
+#define V_SMB_TT_QUICKCMD V_SMB_TT(K_SMB_TT_QUICKCMD)
+#define V_SMB_TT_EEPROMREAD V_SMB_TT(K_SMB_TT_EEPROMREAD)
+
+#define M_SMB_PEC _SB_MAKEMASK1(15)
+
+/*
+ * SMBus Data Register (Table 14-6) and SMBus Extra Register (Table 14-7)
+ */
+
+#define S_SMB_LB 0
+#define M_SMB_LB _SB_MAKEMASK(8, S_SMB_LB)
+#define V_SMB_LB(x) _SB_MAKEVALUE(x, S_SMB_LB)
+
+#define S_SMB_MB 8
+#define M_SMB_MB _SB_MAKEMASK(8, S_SMB_MB)
+#define V_SMB_MB(x) _SB_MAKEVALUE(x, S_SMB_MB)
+
+
+/*
+ * SMBus Packet Error Check register (Table 14-8)
+ */
+
+#define S_SPEC_PEC 0
+#define M_SPEC_PEC _SB_MAKEMASK(8, S_SPEC_PEC)
+#define V_SPEC_MB(x) _SB_MAKEVALUE(x, S_SPEC_PEC)
+
+
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+
+#define S_SMB_CMDH 8
+#define M_SMB_CMDH _SB_MAKEMASK(8, S_SMB_CMDH)
+#define V_SMB_CMDH(x) _SB_MAKEVALUE(x, S_SMB_CMDH)
+
+#define M_SMB_EXTEND _SB_MAKEMASK1(14)
+
+#define S_SMB_DFMT 8
+#define M_SMB_DFMT _SB_MAKEMASK(3, S_SMB_DFMT)
+#define V_SMB_DFMT(x) _SB_MAKEVALUE(x, S_SMB_DFMT)
+#define G_SMB_DFMT(x) _SB_GETVALUE(x, S_SMB_DFMT, M_SMB_DFMT)
+
+#define K_SMB_DFMT_1BYTE 0
+#define K_SMB_DFMT_2BYTE 1
+#define K_SMB_DFMT_3BYTE 2
+#define K_SMB_DFMT_4BYTE 3
+#define K_SMB_DFMT_NODATA 4
+#define K_SMB_DFMT_CMD4BYTE 5
+#define K_SMB_DFMT_CMD5BYTE 6
+#define K_SMB_DFMT_RESERVED 7
+
+#define V_SMB_DFMT_1BYTE V_SMB_DFMT(K_SMB_DFMT_1BYTE)
+#define V_SMB_DFMT_2BYTE V_SMB_DFMT(K_SMB_DFMT_2BYTE)
+#define V_SMB_DFMT_3BYTE V_SMB_DFMT(K_SMB_DFMT_3BYTE)
+#define V_SMB_DFMT_4BYTE V_SMB_DFMT(K_SMB_DFMT_4BYTE)
+#define V_SMB_DFMT_NODATA V_SMB_DFMT(K_SMB_DFMT_NODATA)
+#define V_SMB_DFMT_CMD4BYTE V_SMB_DFMT(K_SMB_DFMT_CMD4BYTE)
+#define V_SMB_DFMT_CMD5BYTE V_SMB_DFMT(K_SMB_DFMT_CMD5BYTE)
+#define V_SMB_DFMT_RESERVED V_SMB_DFMT(K_SMB_DFMT_RESERVED)
+
+#define S_SMB_AFMT 11
+#define M_SMB_AFMT _SB_MAKEMASK(2, S_SMB_AFMT)
+#define V_SMB_AFMT(x) _SB_MAKEVALUE(x, S_SMB_AFMT)
+#define G_SMB_AFMT(x) _SB_GETVALUE(x, S_SMB_AFMT, M_SMB_AFMT)
+
+#define K_SMB_AFMT_NONE 0
+#define K_SMB_AFMT_ADDR 1
+#define K_SMB_AFMT_ADDR_CMD1BYTE 2
+#define K_SMB_AFMT_ADDR_CMD2BYTE 3
+
+#define V_SMB_AFMT_NONE V_SMB_AFMT(K_SMB_AFMT_NONE)
+#define V_SMB_AFMT_ADDR V_SMB_AFMT(K_SMB_AFMT_ADDR)
+#define V_SMB_AFMT_ADDR_CMD1BYTE V_SMB_AFMT(K_SMB_AFMT_ADDR_CMD1BYTE)
+#define V_SMB_AFMT_ADDR_CMD2BYTE V_SMB_AFMT(K_SMB_AFMT_ADDR_CMD2BYTE)
+
+#define M_SMB_DIR _SB_MAKEMASK1(13)
+
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+
+#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_syncser.h b/arch/mips/include/asm/sibyte/sb1250_syncser.h
new file mode 100644
index 000000000..8b40e3f05
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/sb1250_syncser.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * SB1250 Board Support Package
+ *
+ * Synchronous Serial Constants File: sb1250_syncser.h
+ *
+ * This module contains constants and macros useful for
+ * manipulating the SB1250's Synchronous Serial
+ *
+ * SB1250 specification level: User's manual 1/02/02
+ *
+ *********************************************************************
+ *
+ * Copyright 2000,2001,2002,2003
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+
+#ifndef _SB1250_SYNCSER_H
+#define _SB1250_SYNCSER_H
+
+#include <asm/sibyte/sb1250_defs.h>
+
+/*
+ * Serial Mode Configuration Register
+ */
+
+#define M_SYNCSER_CRC_MODE _SB_MAKEMASK1(0)
+#define M_SYNCSER_MSB_FIRST _SB_MAKEMASK1(1)
+
+#define S_SYNCSER_FLAG_NUM 2
+#define M_SYNCSER_FLAG_NUM _SB_MAKEMASK(4, S_SYNCSER_FLAG_NUM)
+#define V_SYNCSER_FLAG_NUM _SB_MAKEVALUE(x, S_SYNCSER_FLAG_NUM)
+
+#define M_SYNCSER_FLAG_EN _SB_MAKEMASK1(6)
+#define M_SYNCSER_HDLC_EN _SB_MAKEMASK1(7)
+#define M_SYNCSER_LOOP_MODE _SB_MAKEMASK1(8)
+#define M_SYNCSER_LOOPBACK _SB_MAKEMASK1(9)
+
+/*
+ * Serial Clock Source and Line Interface Mode Register
+ */
+
+#define M_SYNCSER_RXCLK_INV _SB_MAKEMASK1(0)
+#define M_SYNCSER_RXCLK_EXT _SB_MAKEMASK1(1)
+
+#define S_SYNCSER_RXSYNC_DLY 2
+#define M_SYNCSER_RXSYNC_DLY _SB_MAKEMASK(2, S_SYNCSER_RXSYNC_DLY)
+#define V_SYNCSER_RXSYNC_DLY(x) _SB_MAKEVALUE(x, S_SYNCSER_RXSYNC_DLY)
+
+#define M_SYNCSER_RXSYNC_LOW _SB_MAKEMASK1(4)
+#define M_SYNCSER_RXSTRB_LOW _SB_MAKEMASK1(5)
+
+#define M_SYNCSER_RXSYNC_EDGE _SB_MAKEMASK1(6)
+#define M_SYNCSER_RXSYNC_INT _SB_MAKEMASK1(7)
+
+#define M_SYNCSER_TXCLK_INV _SB_MAKEMASK1(8)
+#define M_SYNCSER_TXCLK_EXT _SB_MAKEMASK1(9)
+
+#define S_SYNCSER_TXSYNC_DLY 10
+#define M_SYNCSER_TXSYNC_DLY _SB_MAKEMASK(2, S_SYNCSER_TXSYNC_DLY)
+#define V_SYNCSER_TXSYNC_DLY(x) _SB_MAKEVALUE(x, S_SYNCSER_TXSYNC_DLY)
+
+#define M_SYNCSER_TXSYNC_LOW _SB_MAKEMASK1(12)
+#define M_SYNCSER_TXSTRB_LOW _SB_MAKEMASK1(13)
+
+#define M_SYNCSER_TXSYNC_EDGE _SB_MAKEMASK1(14)
+#define M_SYNCSER_TXSYNC_INT _SB_MAKEMASK1(15)
+
+/*
+ * Serial Command Register
+ */
+
+#define M_SYNCSER_CMD_RX_EN _SB_MAKEMASK1(0)
+#define M_SYNCSER_CMD_TX_EN _SB_MAKEMASK1(1)
+#define M_SYNCSER_CMD_RX_RESET _SB_MAKEMASK1(2)
+#define M_SYNCSER_CMD_TX_RESET _SB_MAKEMASK1(3)
+#define M_SYNCSER_CMD_TX_PAUSE _SB_MAKEMASK1(5)
+
+/*
+ * Serial DMA Enable Register
+ */
+
+#define M_SYNCSER_DMA_RX_EN _SB_MAKEMASK1(0)
+#define M_SYNCSER_DMA_TX_EN _SB_MAKEMASK1(4)
+
+/*
+ * Serial Status Register
+ */
+
+#define M_SYNCSER_RX_CRCERR _SB_MAKEMASK1(0)
+#define M_SYNCSER_RX_ABORT _SB_MAKEMASK1(1)
+#define M_SYNCSER_RX_OCTET _SB_MAKEMASK1(2)
+#define M_SYNCSER_RX_LONGFRM _SB_MAKEMASK1(3)
+#define M_SYNCSER_RX_SHORTFRM _SB_MAKEMASK1(4)
+#define M_SYNCSER_RX_OVERRUN _SB_MAKEMASK1(5)
+#define M_SYNCSER_RX_SYNC_ERR _SB_MAKEMASK1(6)
+#define M_SYNCSER_TX_CRCERR _SB_MAKEMASK1(8)
+#define M_SYNCSER_TX_UNDERRUN _SB_MAKEMASK1(9)
+#define M_SYNCSER_TX_SYNC_ERR _SB_MAKEMASK1(10)
+#define M_SYNCSER_TX_PAUSE_COMPLETE _SB_MAKEMASK1(11)
+#define M_SYNCSER_RX_EOP_COUNT _SB_MAKEMASK1(16)
+#define M_SYNCSER_RX_EOP_TIMER _SB_MAKEMASK1(17)
+#define M_SYNCSER_RX_EOP_SEEN _SB_MAKEMASK1(18)
+#define M_SYNCSER_RX_HWM _SB_MAKEMASK1(19)
+#define M_SYNCSER_RX_LWM _SB_MAKEMASK1(20)
+#define M_SYNCSER_RX_DSCR _SB_MAKEMASK1(21)
+#define M_SYNCSER_RX_DERR _SB_MAKEMASK1(22)
+#define M_SYNCSER_TX_EOP_COUNT _SB_MAKEMASK1(24)
+#define M_SYNCSER_TX_EOP_TIMER _SB_MAKEMASK1(25)
+#define M_SYNCSER_TX_EOP_SEEN _SB_MAKEMASK1(26)
+#define M_SYNCSER_TX_HWM _SB_MAKEMASK1(27)
+#define M_SYNCSER_TX_LWM _SB_MAKEMASK1(28)
+#define M_SYNCSER_TX_DSCR _SB_MAKEMASK1(29)
+#define M_SYNCSER_TX_DERR _SB_MAKEMASK1(30)
+#define M_SYNCSER_TX_DZERO _SB_MAKEMASK1(31)
+
+/*
+ * Sequencer Table Entry format
+ */
+
+#define M_SYNCSER_SEQ_LAST _SB_MAKEMASK1(0)
+#define M_SYNCSER_SEQ_BYTE _SB_MAKEMASK1(1)
+
+#define S_SYNCSER_SEQ_COUNT 2
+#define M_SYNCSER_SEQ_COUNT _SB_MAKEMASK(4, S_SYNCSER_SEQ_COUNT)
+#define V_SYNCSER_SEQ_COUNT(x) _SB_MAKEVALUE(x, S_SYNCSER_SEQ_COUNT)
+
+#define M_SYNCSER_SEQ_ENABLE _SB_MAKEMASK1(6)
+#define M_SYNCSER_SEQ_STROBE _SB_MAKEMASK1(7)
+
+#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_uart.h b/arch/mips/include/asm/sibyte/sb1250_uart.h
new file mode 100644
index 000000000..da782e643
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/sb1250_uart.h
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* *********************************************************************
+ * SB1250 Board Support Package
+ *
+ * UART Constants File: sb1250_uart.h
+ *
+ * This module contains constants and macros useful for
+ * manipulating the SB1250's UARTs
+ *
+ * SB1250 specification level: User's manual 1/02/02
+ *
+ *********************************************************************
+ *
+ * Copyright 2000,2001,2002,2003
+ * Broadcom Corporation. All rights reserved.
+ *
+ ********************************************************************* */
+
+
+#ifndef _SB1250_UART_H
+#define _SB1250_UART_H
+
+#include <asm/sibyte/sb1250_defs.h>
+
+/* **********************************************************************
+ * DUART Registers
+ ********************************************************************** */
+
+/*
+ * DUART Mode Register #1 (Table 10-3)
+ * Register: DUART_MODE_REG_1_A
+ * Register: DUART_MODE_REG_1_B
+ */
+
+#define S_DUART_BITS_PER_CHAR 0
+#define M_DUART_BITS_PER_CHAR _SB_MAKEMASK(2, S_DUART_BITS_PER_CHAR)
+#define V_DUART_BITS_PER_CHAR(x) _SB_MAKEVALUE(x, S_DUART_BITS_PER_CHAR)
+
+#define K_DUART_BITS_PER_CHAR_RSV0 0
+#define K_DUART_BITS_PER_CHAR_RSV1 1
+#define K_DUART_BITS_PER_CHAR_7 2
+#define K_DUART_BITS_PER_CHAR_8 3
+
+#define V_DUART_BITS_PER_CHAR_RSV0 V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_RSV0)
+#define V_DUART_BITS_PER_CHAR_RSV1 V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_RSV1)
+#define V_DUART_BITS_PER_CHAR_7 V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_7)
+#define V_DUART_BITS_PER_CHAR_8 V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_8)
+
+
+#define M_DUART_PARITY_TYPE_EVEN 0x00
+#define M_DUART_PARITY_TYPE_ODD _SB_MAKEMASK1(2)
+
+#define S_DUART_PARITY_MODE 3
+#define M_DUART_PARITY_MODE _SB_MAKEMASK(2, S_DUART_PARITY_MODE)
+#define V_DUART_PARITY_MODE(x) _SB_MAKEVALUE(x, S_DUART_PARITY_MODE)
+
+#define K_DUART_PARITY_MODE_ADD 0
+#define K_DUART_PARITY_MODE_ADD_FIXED 1
+#define K_DUART_PARITY_MODE_NONE 2
+
+#define V_DUART_PARITY_MODE_ADD V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_ADD)
+#define V_DUART_PARITY_MODE_ADD_FIXED V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_ADD_FIXED)
+#define V_DUART_PARITY_MODE_NONE V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_NONE)
+
+#define M_DUART_TX_IRQ_SEL_TXRDY 0
+#define M_DUART_TX_IRQ_SEL_TXEMPT _SB_MAKEMASK1(5)
+
+#define M_DUART_RX_IRQ_SEL_RXRDY 0
+#define M_DUART_RX_IRQ_SEL_RXFULL _SB_MAKEMASK1(6)
+
+#define M_DUART_RX_RTS_ENA _SB_MAKEMASK1(7)
+
+/*
+ * DUART Mode Register #2 (Table 10-4)
+ * Register: DUART_MODE_REG_2_A
+ * Register: DUART_MODE_REG_2_B
+ */
+
+#define M_DUART_MODE_RESERVED1 _SB_MAKEMASK(3, 0) /* ignored */
+
+#define M_DUART_STOP_BIT_LEN_2 _SB_MAKEMASK1(3)
+#define M_DUART_STOP_BIT_LEN_1 0
+
+#define M_DUART_TX_CTS_ENA _SB_MAKEMASK1(4)
+
+
+#define M_DUART_MODE_RESERVED2 _SB_MAKEMASK1(5) /* must be zero */
+
+#define S_DUART_CHAN_MODE 6
+#define M_DUART_CHAN_MODE _SB_MAKEMASK(2, S_DUART_CHAN_MODE)
+#define V_DUART_CHAN_MODE(x) _SB_MAKEVALUE(x, S_DUART_CHAN_MODE)
+
+#define K_DUART_CHAN_MODE_NORMAL 0
+#define K_DUART_CHAN_MODE_LCL_LOOP 2
+#define K_DUART_CHAN_MODE_REM_LOOP 3
+
+#define V_DUART_CHAN_MODE_NORMAL V_DUART_CHAN_MODE(K_DUART_CHAN_MODE_NORMAL)
+#define V_DUART_CHAN_MODE_LCL_LOOP V_DUART_CHAN_MODE(K_DUART_CHAN_MODE_LCL_LOOP)
+#define V_DUART_CHAN_MODE_REM_LOOP V_DUART_CHAN_MODE(K_DUART_CHAN_MODE_REM_LOOP)
+
+/*
+ * DUART Command Register (Table 10-5)
+ * Register: DUART_CMD_A
+ * Register: DUART_CMD_B
+ */
+
+#define M_DUART_RX_EN _SB_MAKEMASK1(0)
+#define M_DUART_RX_DIS _SB_MAKEMASK1(1)
+#define M_DUART_TX_EN _SB_MAKEMASK1(2)
+#define M_DUART_TX_DIS _SB_MAKEMASK1(3)
+
+#define S_DUART_MISC_CMD 4
+#define M_DUART_MISC_CMD _SB_MAKEMASK(3, S_DUART_MISC_CMD)
+#define V_DUART_MISC_CMD(x) _SB_MAKEVALUE(x, S_DUART_MISC_CMD)
+
+#define K_DUART_MISC_CMD_NOACTION0 0
+#define K_DUART_MISC_CMD_NOACTION1 1
+#define K_DUART_MISC_CMD_RESET_RX 2
+#define K_DUART_MISC_CMD_RESET_TX 3
+#define K_DUART_MISC_CMD_NOACTION4 4
+#define K_DUART_MISC_CMD_RESET_BREAK_INT 5
+#define K_DUART_MISC_CMD_START_BREAK 6
+#define K_DUART_MISC_CMD_STOP_BREAK 7
+
+#define V_DUART_MISC_CMD_NOACTION0 V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION0)
+#define V_DUART_MISC_CMD_NOACTION1 V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION1)
+#define V_DUART_MISC_CMD_RESET_RX V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_RX)
+#define V_DUART_MISC_CMD_RESET_TX V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_TX)
+#define V_DUART_MISC_CMD_NOACTION4 V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION4)
+#define V_DUART_MISC_CMD_RESET_BREAK_INT V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_BREAK_INT)
+#define V_DUART_MISC_CMD_START_BREAK V_DUART_MISC_CMD(K_DUART_MISC_CMD_START_BREAK)
+#define V_DUART_MISC_CMD_STOP_BREAK V_DUART_MISC_CMD(K_DUART_MISC_CMD_STOP_BREAK)
+
+#define M_DUART_CMD_RESERVED _SB_MAKEMASK1(7)
+
+/*
+ * DUART Status Register (Table 10-6)
+ * Register: DUART_STATUS_A
+ * Register: DUART_STATUS_B
+ * READ-ONLY
+ */
+
+#define M_DUART_RX_RDY _SB_MAKEMASK1(0)
+#define M_DUART_RX_FFUL _SB_MAKEMASK1(1)
+#define M_DUART_TX_RDY _SB_MAKEMASK1(2)
+#define M_DUART_TX_EMT _SB_MAKEMASK1(3)
+#define M_DUART_OVRUN_ERR _SB_MAKEMASK1(4)
+#define M_DUART_PARITY_ERR _SB_MAKEMASK1(5)
+#define M_DUART_FRM_ERR _SB_MAKEMASK1(6)
+#define M_DUART_RCVD_BRK _SB_MAKEMASK1(7)
+
+/*
+ * DUART Baud Rate Register (Table 10-7)
+ * Register: DUART_CLK_SEL_A
+ * Register: DUART_CLK_SEL_B
+ */
+
+#define M_DUART_CLK_COUNTER _SB_MAKEMASK(12, 0)
+#define V_DUART_BAUD_RATE(x) (100000000/((x)*20)-1)
+
+/*
+ * DUART Data Registers (Table 10-8 and 10-9)
+ * Register: DUART_RX_HOLD_A
+ * Register: DUART_RX_HOLD_B
+ * Register: DUART_TX_HOLD_A
+ * Register: DUART_TX_HOLD_B
+ */
+
+#define M_DUART_RX_DATA _SB_MAKEMASK(8, 0)
+#define M_DUART_TX_DATA _SB_MAKEMASK(8, 0)
+
+/*
+ * DUART Input Port Register (Table 10-10)
+ * Register: DUART_IN_PORT
+ */
+
+#define M_DUART_IN_PIN0_VAL _SB_MAKEMASK1(0)
+#define M_DUART_IN_PIN1_VAL _SB_MAKEMASK1(1)
+#define M_DUART_IN_PIN2_VAL _SB_MAKEMASK1(2)
+#define M_DUART_IN_PIN3_VAL _SB_MAKEMASK1(3)
+#define M_DUART_IN_PIN4_VAL _SB_MAKEMASK1(4)
+#define M_DUART_IN_PIN5_VAL _SB_MAKEMASK1(5)
+#define M_DUART_RIN0_PIN _SB_MAKEMASK1(6)
+#define M_DUART_RIN1_PIN _SB_MAKEMASK1(7)
+
+/*
+ * DUART Input Port Change Status Register (Tables 10-11, 10-12, and 10-13)
+ * Register: DUART_INPORT_CHNG
+ */
+
+#define S_DUART_IN_PIN_VAL 0
+#define M_DUART_IN_PIN_VAL _SB_MAKEMASK(4, S_DUART_IN_PIN_VAL)
+
+#define S_DUART_IN_PIN_CHNG 4
+#define M_DUART_IN_PIN_CHNG _SB_MAKEMASK(4, S_DUART_IN_PIN_CHNG)
+
+
+/*
+ * DUART Output port control register (Table 10-14)
+ * Register: DUART_OPCR
+ */
+
+#define M_DUART_OPCR_RESERVED0 _SB_MAKEMASK1(0) /* must be zero */
+#define M_DUART_OPC2_SEL _SB_MAKEMASK1(1)
+#define M_DUART_OPCR_RESERVED1 _SB_MAKEMASK1(2) /* must be zero */
+#define M_DUART_OPC3_SEL _SB_MAKEMASK1(3)
+#define M_DUART_OPCR_RESERVED2 _SB_MAKEMASK(4, 4) /* must be zero */
+
+/*
+ * DUART Aux Control Register (Table 10-15)
+ * Register: DUART_AUX_CTRL
+ */
+
+#define M_DUART_IP0_CHNG_ENA _SB_MAKEMASK1(0)
+#define M_DUART_IP1_CHNG_ENA _SB_MAKEMASK1(1)
+#define M_DUART_IP2_CHNG_ENA _SB_MAKEMASK1(2)
+#define M_DUART_IP3_CHNG_ENA _SB_MAKEMASK1(3)
+#define M_DUART_ACR_RESERVED _SB_MAKEMASK(4, 4)
+
+#define M_DUART_CTS_CHNG_ENA _SB_MAKEMASK1(0)
+#define M_DUART_CIN_CHNG_ENA _SB_MAKEMASK1(2)
+
+/*
+ * DUART Interrupt Status Register (Table 10-16)
+ * Register: DUART_ISR
+ */
+
+#define M_DUART_ISR_TX_A _SB_MAKEMASK1(0)
+
+#define S_DUART_ISR_RX_A 1
+#define M_DUART_ISR_RX_A _SB_MAKEMASK1(S_DUART_ISR_RX_A)
+#define V_DUART_ISR_RX_A(x) _SB_MAKEVALUE(x, S_DUART_ISR_RX_A)
+#define G_DUART_ISR_RX_A(x) _SB_GETVALUE(x, S_DUART_ISR_RX_A, M_DUART_ISR_RX_A)
+
+#define M_DUART_ISR_BRK_A _SB_MAKEMASK1(2)
+#define M_DUART_ISR_IN_A _SB_MAKEMASK1(3)
+#define M_DUART_ISR_ALL_A _SB_MAKEMASK(4, 0)
+
+#define M_DUART_ISR_TX_B _SB_MAKEMASK1(4)
+#define M_DUART_ISR_RX_B _SB_MAKEMASK1(5)
+#define M_DUART_ISR_BRK_B _SB_MAKEMASK1(6)
+#define M_DUART_ISR_IN_B _SB_MAKEMASK1(7)
+#define M_DUART_ISR_ALL_B _SB_MAKEMASK(4, 4)
+
+/*
+ * DUART Channel A Interrupt Status Register (Table 10-17)
+ * DUART Channel B Interrupt Status Register (Table 10-18)
+ * Register: DUART_ISR_A
+ * Register: DUART_ISR_B
+ */
+
+#define M_DUART_ISR_TX _SB_MAKEMASK1(0)
+#define M_DUART_ISR_RX _SB_MAKEMASK1(1)
+#define M_DUART_ISR_BRK _SB_MAKEMASK1(2)
+#define M_DUART_ISR_IN _SB_MAKEMASK1(3)
+#define M_DUART_ISR_ALL _SB_MAKEMASK(4, 0)
+#define M_DUART_ISR_RESERVED _SB_MAKEMASK(4, 4)
+
+/*
+ * DUART Interrupt Mask Register (Table 10-19)
+ * Register: DUART_IMR
+ */
+
+#define M_DUART_IMR_TX_A _SB_MAKEMASK1(0)
+#define M_DUART_IMR_RX_A _SB_MAKEMASK1(1)
+#define M_DUART_IMR_BRK_A _SB_MAKEMASK1(2)
+#define M_DUART_IMR_IN_A _SB_MAKEMASK1(3)
+#define M_DUART_IMR_ALL_A _SB_MAKEMASK(4, 0)
+
+#define M_DUART_IMR_TX_B _SB_MAKEMASK1(4)
+#define M_DUART_IMR_RX_B _SB_MAKEMASK1(5)
+#define M_DUART_IMR_BRK_B _SB_MAKEMASK1(6)
+#define M_DUART_IMR_IN_B _SB_MAKEMASK1(7)
+#define M_DUART_IMR_ALL_B _SB_MAKEMASK(4, 4)
+
+/*
+ * DUART Channel A Interrupt Mask Register (Table 10-20)
+ * DUART Channel B Interrupt Mask Register (Table 10-21)
+ * Register: DUART_IMR_A
+ * Register: DUART_IMR_B
+ */
+
+#define M_DUART_IMR_TX _SB_MAKEMASK1(0)
+#define M_DUART_IMR_RX _SB_MAKEMASK1(1)
+#define M_DUART_IMR_BRK _SB_MAKEMASK1(2)
+#define M_DUART_IMR_IN _SB_MAKEMASK1(3)
+#define M_DUART_IMR_ALL _SB_MAKEMASK(4, 0)
+#define M_DUART_IMR_RESERVED _SB_MAKEMASK(4, 4)
+
+
+/*
+ * DUART Output Port Set Register (Table 10-22)
+ * Register: DUART_SET_OPR
+ */
+
+#define M_DUART_SET_OPR0 _SB_MAKEMASK1(0)
+#define M_DUART_SET_OPR1 _SB_MAKEMASK1(1)
+#define M_DUART_SET_OPR2 _SB_MAKEMASK1(2)
+#define M_DUART_SET_OPR3 _SB_MAKEMASK1(3)
+#define M_DUART_OPSR_RESERVED _SB_MAKEMASK(4, 4)
+
+/*
+ * DUART Output Port Clear Register (Table 10-23)
+ * Register: DUART_CLEAR_OPR
+ */
+
+#define M_DUART_CLR_OPR0 _SB_MAKEMASK1(0)
+#define M_DUART_CLR_OPR1 _SB_MAKEMASK1(1)
+#define M_DUART_CLR_OPR2 _SB_MAKEMASK1(2)
+#define M_DUART_CLR_OPR3 _SB_MAKEMASK1(3)
+#define M_DUART_OPCR_RESERVED _SB_MAKEMASK(4, 4)
+
+/*
+ * DUART Output Port RTS Register (Table 10-24)
+ * Register: DUART_OUT_PORT
+ */
+
+#define M_DUART_OUT_PIN_SET0 _SB_MAKEMASK1(0)
+#define M_DUART_OUT_PIN_SET1 _SB_MAKEMASK1(1)
+#define M_DUART_OUT_PIN_CLR0 _SB_MAKEMASK1(2)
+#define M_DUART_OUT_PIN_CLR1 _SB_MAKEMASK1(3)
+#define M_DUART_OPRR_RESERVED _SB_MAKEMASK(4, 4)
+
+#define M_DUART_OUT_PIN_SET(chan) \
+ (chan == 0 ? M_DUART_OUT_PIN_SET0 : M_DUART_OUT_PIN_SET1)
+#define M_DUART_OUT_PIN_CLR(chan) \
+ (chan == 0 ? M_DUART_OUT_PIN_CLR0 : M_DUART_OUT_PIN_CLR1)
+
+#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
+/*
+ * Full Interrupt Control Register
+ */
+
+#define S_DUART_SIG_FULL _SB_MAKE64(0)
+#define M_DUART_SIG_FULL _SB_MAKEMASK(4, S_DUART_SIG_FULL)
+#define V_DUART_SIG_FULL(x) _SB_MAKEVALUE(x, S_DUART_SIG_FULL)
+#define G_DUART_SIG_FULL(x) _SB_GETVALUE(x, S_DUART_SIG_FULL, M_DUART_SIG_FULL)
+
+#define S_DUART_INT_TIME _SB_MAKE64(4)
+#define M_DUART_INT_TIME _SB_MAKEMASK(4, S_DUART_INT_TIME)
+#define V_DUART_INT_TIME(x) _SB_MAKEVALUE(x, S_DUART_INT_TIME)
+#define G_DUART_INT_TIME(x) _SB_GETVALUE(x, S_DUART_INT_TIME, M_DUART_INT_TIME)
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
+
+
+/* ********************************************************************** */
+
+
+#endif
diff --git a/arch/mips/include/asm/sibyte/sentosa.h b/arch/mips/include/asm/sibyte/sentosa.h
new file mode 100644
index 000000000..a27cda344
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/sentosa.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2000, 2001 Broadcom Corporation
+ */
+#ifndef __ASM_SIBYTE_SENTOSA_H
+#define __ASM_SIBYTE_SENTOSA_H
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_int.h>
+
+#ifdef CONFIG_SIBYTE_SENTOSA
+#define SIBYTE_BOARD_NAME "BCM91250E (Sentosa)"
+#endif
+#ifdef CONFIG_SIBYTE_RHONE
+#define SIBYTE_BOARD_NAME "BCM91125E (Rhone)"
+#endif
+
+/* Generic bus chip selects */
+#ifdef CONFIG_SIBYTE_RHONE
+#define LEDS_CS 6
+#define LEDS_PHYS 0x1d0a0000
+#endif
+
+/* GPIOs */
+#define K_GPIO_DBG_LED 0
+
+#endif /* __ASM_SIBYTE_SENTOSA_H */
diff --git a/arch/mips/include/asm/sibyte/swarm.h b/arch/mips/include/asm/sibyte/swarm.h
new file mode 100644
index 000000000..947122f48
--- /dev/null
+++ b/arch/mips/include/asm/sibyte/swarm.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
+ */
+#ifndef __ASM_SIBYTE_SWARM_H
+#define __ASM_SIBYTE_SWARM_H
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_int.h>
+
+#ifdef CONFIG_SIBYTE_SWARM
+#define SIBYTE_BOARD_NAME "BCM91250A (SWARM)"
+#define SIBYTE_HAVE_PCMCIA 1
+#define SIBYTE_HAVE_IDE 1
+#endif
+#ifdef CONFIG_SIBYTE_LITTLESUR
+#define SIBYTE_BOARD_NAME "BCM91250C2 (LittleSur)"
+#define SIBYTE_HAVE_PCMCIA 0
+#define SIBYTE_HAVE_IDE 1
+#define SIBYTE_DEFAULT_CONSOLE "cfe0"
+#endif
+#ifdef CONFIG_SIBYTE_CRHONE
+#define SIBYTE_BOARD_NAME "BCM91125C (CRhone)"
+#define SIBYTE_HAVE_PCMCIA 0
+#define SIBYTE_HAVE_IDE 0
+#endif
+#ifdef CONFIG_SIBYTE_CRHINE
+#define SIBYTE_BOARD_NAME "BCM91120C (CRhine)"
+#define SIBYTE_HAVE_PCMCIA 0
+#define SIBYTE_HAVE_IDE 0
+#endif
+
+/* Generic bus chip selects */
+#define LEDS_CS 3
+#define LEDS_PHYS 0x100a0000
+
+#ifdef SIBYTE_HAVE_IDE
+#define IDE_CS 4
+#define IDE_PHYS 0x100b0000
+#define K_GPIO_GB_IDE 4
+#define K_INT_GB_IDE (K_INT_GPIO_0 + K_GPIO_GB_IDE)
+#endif
+
+#ifdef SIBYTE_HAVE_PCMCIA
+#define PCMCIA_CS 6
+#define PCMCIA_PHYS 0x11000000
+#define K_GPIO_PC_READY 9
+#define K_INT_PC_READY (K_INT_GPIO_0 + K_GPIO_PC_READY)
+#endif
+
+#endif /* __ASM_SIBYTE_SWARM_H */
diff --git a/arch/mips/include/asm/sigcontext.h b/arch/mips/include/asm/sigcontext.h
new file mode 100644
index 000000000..eeeb0f48c
--- /dev/null
+++ b/arch/mips/include/asm/sigcontext.h
@@ -0,0 +1,37 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1997, 1999 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_SIGCONTEXT_H
+#define _ASM_SIGCONTEXT_H
+
+#include <uapi/asm/sigcontext.h>
+
+#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
+
+struct sigcontext32 {
+ __u32 sc_regmask; /* Unused */
+ __u32 sc_status; /* Unused */
+ __u64 sc_pc;
+ __u64 sc_regs[32];
+ __u64 sc_fpregs[32];
+ __u32 sc_acx; /* Only MIPS32; was sc_ownedfp */
+ __u32 sc_fpc_csr;
+ __u32 sc_fpc_eir; /* Unused */
+ __u32 sc_used_math;
+ __u32 sc_dsp; /* dsp status, was sc_ssflags */
+ __u64 sc_mdhi;
+ __u64 sc_mdlo;
+ __u32 sc_hi1; /* Was sc_cause */
+ __u32 sc_lo1; /* Was sc_badvaddr */
+ __u32 sc_hi2; /* Was sc_sigset[4] */
+ __u32 sc_lo2;
+ __u32 sc_hi3;
+ __u32 sc_lo3;
+};
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
+#endif /* _ASM_SIGCONTEXT_H */
diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h
new file mode 100644
index 000000000..23d6b8015
--- /dev/null
+++ b/arch/mips/include/asm/signal.h
@@ -0,0 +1,35 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_SIGNAL_H
+#define _ASM_SIGNAL_H
+
+#include <uapi/asm/signal.h>
+
+#ifdef CONFIG_MIPS32_O32
+extern struct mips_abi mips_abi_32;
+
+#define sig_uses_siginfo(ka, abi) \
+ ((abi != &mips_abi_32) ? 1 : \
+ ((ka)->sa.sa_flags & SA_SIGINFO))
+#else
+#define sig_uses_siginfo(ka, abi) \
+ (IS_ENABLED(CONFIG_64BIT) ? 1 : \
+ (IS_ENABLED(CONFIG_TRAD_SIGNALS) ? \
+ ((ka)->sa.sa_flags & SA_SIGINFO) : 1) )
+#endif
+
+#include <asm/sigcontext.h>
+#include <asm/siginfo.h>
+
+#define __ARCH_HAS_IRIX_SIGACTION
+
+extern int protected_save_fp_context(void __user *sc);
+extern int protected_restore_fp_context(void __user *sc);
+
+#endif /* _ASM_SIGNAL_H */
diff --git a/arch/mips/include/asm/sim.h b/arch/mips/include/asm/sim.h
new file mode 100644
index 000000000..59f31a95f
--- /dev/null
+++ b/arch/mips/include/asm/sim.h
@@ -0,0 +1,70 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999, 2000, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_SIM_H
+#define _ASM_SIM_H
+
+
+#include <asm/asm-offsets.h>
+
+#define __str2(x) #x
+#define __str(x) __str2(x)
+
+#ifdef CONFIG_32BIT
+
+#define save_static_function(symbol) \
+__asm__( \
+ ".text\n\t" \
+ ".globl\t__" #symbol "\n\t" \
+ ".align\t2\n\t" \
+ ".type\t__" #symbol ", @function\n\t" \
+ ".ent\t__" #symbol ", 0\n__" \
+ #symbol":\n\t" \
+ ".frame\t$29, 0, $31\n\t" \
+ "sw\t$16,"__str(PT_R16)"($29)\t\t\t# save_static_function\n\t" \
+ "sw\t$17,"__str(PT_R17)"($29)\n\t" \
+ "sw\t$18,"__str(PT_R18)"($29)\n\t" \
+ "sw\t$19,"__str(PT_R19)"($29)\n\t" \
+ "sw\t$20,"__str(PT_R20)"($29)\n\t" \
+ "sw\t$21,"__str(PT_R21)"($29)\n\t" \
+ "sw\t$22,"__str(PT_R22)"($29)\n\t" \
+ "sw\t$23,"__str(PT_R23)"($29)\n\t" \
+ "sw\t$30,"__str(PT_R30)"($29)\n\t" \
+ "j\t" #symbol "\n\t" \
+ ".end\t__" #symbol "\n\t" \
+ ".size\t__" #symbol",. - __" #symbol)
+
+#endif /* CONFIG_32BIT */
+
+#ifdef CONFIG_64BIT
+
+#define save_static_function(symbol) \
+__asm__( \
+ ".text\n\t" \
+ ".globl\t__" #symbol "\n\t" \
+ ".align\t2\n\t" \
+ ".type\t__" #symbol ", @function\n\t" \
+ ".ent\t__" #symbol ", 0\n__" \
+ #symbol":\n\t" \
+ ".frame\t$29, 0, $31\n\t" \
+ "sd\t$16,"__str(PT_R16)"($29)\t\t\t# save_static_function\n\t" \
+ "sd\t$17,"__str(PT_R17)"($29)\n\t" \
+ "sd\t$18,"__str(PT_R18)"($29)\n\t" \
+ "sd\t$19,"__str(PT_R19)"($29)\n\t" \
+ "sd\t$20,"__str(PT_R20)"($29)\n\t" \
+ "sd\t$21,"__str(PT_R21)"($29)\n\t" \
+ "sd\t$22,"__str(PT_R22)"($29)\n\t" \
+ "sd\t$23,"__str(PT_R23)"($29)\n\t" \
+ "sd\t$30,"__str(PT_R30)"($29)\n\t" \
+ "j\t" #symbol "\n\t" \
+ ".end\t__" #symbol "\n\t" \
+ ".size\t__" #symbol",. - __" #symbol)
+
+#endif /* CONFIG_64BIT */
+
+#endif /* _ASM_SIM_H */
diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h
new file mode 100644
index 000000000..7e5b9411f
--- /dev/null
+++ b/arch/mips/include/asm/smp-cps.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#ifndef __MIPS_ASM_SMP_CPS_H__
+#define __MIPS_ASM_SMP_CPS_H__
+
+#ifndef __ASSEMBLY__
+
+struct vpe_boot_config {
+ unsigned long pc;
+ unsigned long sp;
+ unsigned long gp;
+};
+
+struct core_boot_config {
+ atomic_t vpe_mask;
+ struct vpe_boot_config *vpe_config;
+};
+
+extern struct core_boot_config *mips_cps_core_bootcfg;
+
+extern void mips_cps_core_entry(void);
+extern void mips_cps_core_init(void);
+
+extern void mips_cps_boot_vpes(struct core_boot_config *cfg, unsigned vpe);
+
+extern void mips_cps_pm_save(void);
+extern void mips_cps_pm_restore(void);
+
+#ifdef CONFIG_MIPS_CPS
+
+extern bool mips_cps_smp_in_use(void);
+
+#else /* !CONFIG_MIPS_CPS */
+
+static inline bool mips_cps_smp_in_use(void) { return false; }
+
+#endif /* !CONFIG_MIPS_CPS */
+
+#else /* __ASSEMBLY__ */
+
+.extern mips_cps_bootcfg;
+
+#endif /* __ASSEMBLY__ */
+#endif /* __MIPS_ASM_SMP_CPS_H__ */
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
new file mode 100644
index 000000000..65618ff12
--- /dev/null
+++ b/arch/mips/include/asm/smp-ops.h
@@ -0,0 +1,121 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
+ * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
+ * Copyright (C) 2000, 2001, 2002 Ralf Baechle
+ * Copyright (C) 2000, 2001 Broadcom Corporation
+ */
+#ifndef __ASM_SMP_OPS_H
+#define __ASM_SMP_OPS_H
+
+#include <linux/errno.h>
+
+#include <asm/mips-cps.h>
+
+#ifdef CONFIG_SMP
+
+#include <linux/cpumask.h>
+
+struct task_struct;
+
+struct plat_smp_ops {
+ void (*send_ipi_single)(int cpu, unsigned int action);
+ void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
+ void (*init_secondary)(void);
+ void (*smp_finish)(void);
+ int (*boot_secondary)(int cpu, struct task_struct *idle);
+ void (*smp_setup)(void);
+ void (*prepare_cpus)(unsigned int max_cpus);
+ void (*prepare_boot_cpu)(void);
+#ifdef CONFIG_HOTPLUG_CPU
+ int (*cpu_disable)(void);
+ void (*cpu_die)(unsigned int cpu);
+#endif
+#ifdef CONFIG_KEXEC
+ void (*kexec_nonboot_cpu)(void);
+#endif
+};
+
+extern void register_smp_ops(const struct plat_smp_ops *ops);
+
+static inline void plat_smp_setup(void)
+{
+ extern const struct plat_smp_ops *mp_ops; /* private */
+
+ mp_ops->smp_setup();
+}
+
+extern void mips_smp_send_ipi_single(int cpu, unsigned int action);
+extern void mips_smp_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action);
+
+#else /* !CONFIG_SMP */
+
+struct plat_smp_ops;
+
+static inline void plat_smp_setup(void)
+{
+ /* UP, nothing to do ... */
+}
+
+static inline void register_smp_ops(const struct plat_smp_ops *ops)
+{
+}
+
+#endif /* !CONFIG_SMP */
+
+static inline int register_up_smp_ops(void)
+{
+#ifdef CONFIG_SMP_UP
+ extern const struct plat_smp_ops up_smp_ops;
+
+ register_smp_ops(&up_smp_ops);
+
+ return 0;
+#else
+ return -ENODEV;
+#endif
+}
+
+static inline int register_cmp_smp_ops(void)
+{
+#ifdef CONFIG_MIPS_CMP
+ extern const struct plat_smp_ops cmp_smp_ops;
+
+ if (!mips_cm_present())
+ return -ENODEV;
+
+ register_smp_ops(&cmp_smp_ops);
+
+ return 0;
+#else
+ return -ENODEV;
+#endif
+}
+
+static inline int register_vsmp_smp_ops(void)
+{
+#ifdef CONFIG_MIPS_MT_SMP
+ extern const struct plat_smp_ops vsmp_smp_ops;
+
+ register_smp_ops(&vsmp_smp_ops);
+
+ return 0;
+#else
+ return -ENODEV;
+#endif
+}
+
+#ifdef CONFIG_MIPS_CPS
+extern int register_cps_smp_ops(void);
+#else
+static inline int register_cps_smp_ops(void)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __ASM_SMP_OPS_H */
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
new file mode 100644
index 000000000..5d9ff6100
--- /dev/null
+++ b/arch/mips/include/asm/smp.h
@@ -0,0 +1,138 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
+ * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
+ * Copyright (C) 2000, 2001, 2002 Ralf Baechle
+ * Copyright (C) 2000, 2001 Broadcom Corporation
+ */
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+#include <linux/bitops.h>
+#include <linux/linkage.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+
+#include <linux/atomic.h>
+#include <asm/smp-ops.h>
+
+extern int smp_num_siblings;
+extern cpumask_t cpu_sibling_map[];
+extern cpumask_t cpu_core_map[];
+extern cpumask_t cpu_foreign_map[];
+
+static inline int raw_smp_processor_id(void)
+{
+#if defined(__VDSO__)
+ extern int vdso_smp_processor_id(void)
+ __compiletime_error("VDSO should not call smp_processor_id()");
+ return vdso_smp_processor_id();
+#else
+ return current_thread_info()->cpu;
+#endif
+}
+#define raw_smp_processor_id raw_smp_processor_id
+
+/* Map from cpu id to sequential logical cpu number. This will only
+ not be idempotent when cpus failed to come on-line. */
+extern int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP];
+#define cpu_number_map(cpu) __cpu_number_map[cpu]
+
+/* The reverse map from sequential logical cpu number to cpu id. */
+extern int __cpu_logical_map[NR_CPUS];
+#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
+
+#define NO_PROC_ID (-1)
+
+#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
+#define SMP_CALL_FUNCTION 0x2
+/* Octeon - Tell another core to flush its icache */
+#define SMP_ICACHE_FLUSH 0x4
+#define SMP_ASK_C0COUNT 0x8
+
+/* Mask of CPUs which are currently definitely operating coherently */
+extern cpumask_t cpu_coherent_mask;
+
+extern asmlinkage void smp_bootstrap(void);
+
+extern void calculate_cpu_foreign_map(void);
+
+/*
+ * this function sends a 'reschedule' IPI to another CPU.
+ * it goes straight through and wastes no time serializing
+ * anything. Worst case is that we lose a reschedule ...
+ */
+static inline void smp_send_reschedule(int cpu)
+{
+ extern const struct plat_smp_ops *mp_ops; /* private */
+
+ mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static inline int __cpu_disable(void)
+{
+ extern const struct plat_smp_ops *mp_ops; /* private */
+
+ return mp_ops->cpu_disable();
+}
+
+static inline void __cpu_die(unsigned int cpu)
+{
+ extern const struct plat_smp_ops *mp_ops; /* private */
+
+ mp_ops->cpu_die(cpu);
+}
+
+extern void play_dead(void);
+#endif
+
+#ifdef CONFIG_KEXEC
+static inline void kexec_nonboot_cpu(void)
+{
+ extern const struct plat_smp_ops *mp_ops; /* private */
+
+ return mp_ops->kexec_nonboot_cpu();
+}
+
+static inline void *kexec_nonboot_cpu_func(void)
+{
+ extern const struct plat_smp_ops *mp_ops; /* private */
+
+ return mp_ops->kexec_nonboot_cpu;
+}
+#endif
+
+/*
+ * This function will set up the necessary IPIs for Linux to communicate
+ * with the CPUs in mask.
+ * Return 0 on success.
+ */
+int mips_smp_ipi_allocate(const struct cpumask *mask);
+
+/*
+ * This function will free up IPIs allocated with mips_smp_ipi_allocate to the
+ * CPUs in mask, which must be a subset of the IPIs that have been configured.
+ * Return 0 on success.
+ */
+int mips_smp_ipi_free(const struct cpumask *mask);
+
+static inline void arch_send_call_function_single_ipi(int cpu)
+{
+ extern const struct plat_smp_ops *mp_ops; /* private */
+
+ mp_ops->send_ipi_single(cpu, SMP_CALL_FUNCTION);
+}
+
+static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ extern const struct plat_smp_ops *mp_ops; /* private */
+
+ mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
+}
+
+#endif /* __ASM_SMP_H */
diff --git a/arch/mips/include/asm/sn/addrs.h b/arch/mips/include/asm/sn/addrs.h
new file mode 100644
index 000000000..837d23e24
--- /dev/null
+++ b/arch/mips/include/asm/sn/addrs.h
@@ -0,0 +1,377 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 1999, 2000 by Ralf Baechle
+ */
+#ifndef _ASM_SN_ADDRS_H
+#define _ASM_SN_ADDRS_H
+
+
+#ifndef __ASSEMBLY__
+#include <linux/smp.h>
+#include <linux/types.h>
+#endif /* !__ASSEMBLY__ */
+
+#include <asm/addrspace.h>
+#include <asm/sn/kldir.h>
+
+#if defined(CONFIG_SGI_IP27)
+#include <asm/sn/sn0/addrs.h>
+#elif defined(CONFIG_SGI_IP35)
+#include <asm/sn/sn1/addrs.h>
+#endif
+
+
+#ifndef __ASSEMBLY__
+
+#define UINT64_CAST (unsigned long)
+
+#else /* __ASSEMBLY__ */
+
+#define UINT64_CAST
+
+#endif /* __ASSEMBLY__ */
+
+
+#define NASID_GET_META(_n) ((_n) >> NASID_LOCAL_BITS)
+#ifdef CONFIG_SGI_IP27
+#define NASID_GET_LOCAL(_n) ((_n) & 0xf)
+#endif
+#define NASID_MAKE(_m, _l) (((_m) << NASID_LOCAL_BITS) | (_l))
+
+#define NODE_ADDRSPACE_MASK (NODE_ADDRSPACE_SIZE - 1)
+#define TO_NODE_ADDRSPACE(_pa) (UINT64_CAST (_pa) & NODE_ADDRSPACE_MASK)
+
+#define CHANGE_ADDR_NASID(_pa, _nasid) \
+ ((UINT64_CAST(_pa) & ~NASID_MASK) | \
+ (UINT64_CAST(_nasid) << NASID_SHFT))
+
+
+/*
+ * The following macros are used to index to the beginning of a specific
+ * node's address space.
+ */
+
+#define NODE_OFFSET(_n) (UINT64_CAST (_n) << NODE_SIZE_BITS)
+
+#define NODE_CAC_BASE(_n) (CAC_BASE + NODE_OFFSET(_n))
+#define NODE_HSPEC_BASE(_n) (HSPEC_BASE + NODE_OFFSET(_n))
+#define NODE_IO_BASE(_n) (IO_BASE + NODE_OFFSET(_n))
+#define NODE_MSPEC_BASE(_n) (MSPEC_BASE + NODE_OFFSET(_n))
+#define NODE_UNCAC_BASE(_n) (UNCAC_BASE + NODE_OFFSET(_n))
+
+#define TO_NODE(_n, _x) (NODE_OFFSET(_n) | ((_x) ))
+#define TO_NODE_CAC(_n, _x) (NODE_CAC_BASE(_n) | ((_x) & TO_PHYS_MASK))
+#define TO_NODE_UNCAC(_n, _x) (NODE_UNCAC_BASE(_n) | ((_x) & TO_PHYS_MASK))
+#define TO_NODE_MSPEC(_n, _x) (NODE_MSPEC_BASE(_n) | ((_x) & TO_PHYS_MASK))
+#define TO_NODE_HSPEC(_n, _x) (NODE_HSPEC_BASE(_n) | ((_x) & TO_PHYS_MASK))
+
+
+#define RAW_NODE_SWIN_BASE(nasid, widget) \
+ (NODE_IO_BASE(nasid) + (UINT64_CAST(widget) << SWIN_SIZE_BITS))
+
+#define WIDGETID_GET(addr) ((unsigned char)((addr >> SWIN_SIZE_BITS) & 0xff))
+
+/*
+ * The following definitions pertain to the IO special address
+ * space. They define the location of the big and little windows
+ * of any given node.
+ */
+
+#define SWIN_SIZE_BITS 24
+#define SWIN_SIZE (UINT64_CAST 1 << 24)
+#define SWIN_SIZEMASK (SWIN_SIZE - 1)
+#define SWIN_WIDGET_MASK 0xF
+
+/*
+ * Convert smallwindow address to xtalk address.
+ *
+ * 'addr' can be physical or virtual address, but will be converted
+ * to Xtalk address in the range 0 -> SWINZ_SIZEMASK
+ */
+#define SWIN_WIDGETADDR(addr) ((addr) & SWIN_SIZEMASK)
+#define SWIN_WIDGETNUM(addr) (((addr) >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
+/*
+ * Verify if addr belongs to small window address on node with "nasid"
+ *
+ *
+ * NOTE: "addr" is expected to be XKPHYS address, and NOT physical
+ * address
+ *
+ *
+ */
+#define NODE_SWIN_ADDR(nasid, addr) \
+ (((addr) >= NODE_SWIN_BASE(nasid, 0)) && \
+ ((addr) < (NODE_SWIN_BASE(nasid, HUB_NUM_WIDGET) + SWIN_SIZE)\
+ ))
+
+/*
+ * The following define the major position-independent aliases used
+ * in SN.
+ * UALIAS -- 256MB in size, reads in the UALIAS result in
+ * uncached references to the memory of the reader's node.
+ * CPU_UALIAS -- 128kb in size, the bottom part of UALIAS is flipped
+ * depending on which CPU does the access to provide
+ * all CPUs with unique uncached memory at low addresses.
+ * LBOOT -- 256MB in size, reads in the LBOOT area result in
+ * uncached references to the local hub's boot prom and
+ * other directory-bus connected devices.
+ * IALIAS -- 8MB in size, reads in the IALIAS result in uncached
+ * references to the local hub's registers.
+ */
+
+#define UALIAS_BASE HSPEC_BASE
+#define UALIAS_SIZE 0x10000000 /* 256 Megabytes */
+#define UALIAS_LIMIT (UALIAS_BASE + UALIAS_SIZE)
+
+/*
+ * The bottom of ualias space is flipped depending on whether you're
+ * processor 0 or 1 within a node.
+ */
+#ifdef CONFIG_SGI_IP27
+#define UALIAS_FLIP_BASE UALIAS_BASE
+#define UALIAS_FLIP_SIZE 0x20000
+#define UALIAS_FLIP_BIT 0x10000
+#define UALIAS_FLIP_ADDR(_x) (cputoslice(smp_processor_id()) ? \
+ (_x) ^ UALIAS_FLIP_BIT : (_x))
+
+#define LBOOT_BASE (HSPEC_BASE + 0x10000000)
+#define LBOOT_SIZE 0x10000000
+#define LBOOT_LIMIT (LBOOT_BASE + LBOOT_SIZE)
+#define LBOOT_STRIDE 0 /* IP27 has only one CPU PROM */
+
+#endif
+
+#define HUB_REGISTER_WIDGET 1
+#define IALIAS_BASE NODE_SWIN_BASE(0, HUB_REGISTER_WIDGET)
+#define IALIAS_SIZE 0x800000 /* 8 Megabytes */
+#define IS_IALIAS(_a) (((_a) >= IALIAS_BASE) && \
+ ((_a) < (IALIAS_BASE + IALIAS_SIZE)))
+
+/*
+ * Macro for referring to Hub's RBOOT space
+ */
+
+#ifdef CONFIG_SGI_IP27
+#define RBOOT_SIZE 0x10000000 /* 256 Megabytes */
+#define NODE_RBOOT_BASE(_n) (NODE_HSPEC_BASE(_n) + 0x30000000)
+#define NODE_RBOOT_LIMIT(_n) (NODE_RBOOT_BASE(_n) + RBOOT_SIZE)
+
+#endif
+
+/*
+ * Macros for referring the Hub's back door space
+ *
+ * These macros correctly process addresses in any node's space.
+ * WARNING: They won't work in assembler.
+ *
+ * BDDIR_ENTRY_LO returns the address of the low double-word of the dir
+ * entry corresponding to a physical (Cac or Uncac) address.
+ * BDDIR_ENTRY_HI returns the address of the high double-word of the entry.
+ * BDPRT_ENTRY returns the address of the double-word protection entry
+ * corresponding to the page containing the physical address.
+ * BDPRT_ENTRY_S Stores the value into the protection entry.
+ * BDPRT_ENTRY_L Load the value from the protection entry.
+ * BDECC_ENTRY returns the address of the ECC byte corresponding to a
+ * double-word at a specified physical address.
+ * BDECC_ENTRY_H returns the address of the two ECC bytes corresponding to a
+ * quad-word at a specified physical address.
+ */
+#define NODE_BDOOR_BASE(_n) (NODE_HSPEC_BASE(_n) + (NODE_ADDRSPACE_SIZE/2))
+
+#define NODE_BDECC_BASE(_n) (NODE_BDOOR_BASE(_n))
+#define NODE_BDDIR_BASE(_n) (NODE_BDOOR_BASE(_n) + (NODE_ADDRSPACE_SIZE/4))
+#ifdef CONFIG_SGI_IP27
+#define BDDIR_ENTRY_LO(_pa) ((HSPEC_BASE + \
+ NODE_ADDRSPACE_SIZE * 3 / 4 + \
+ 0x200) | \
+ UINT64_CAST(_pa) & NASID_MASK | \
+ UINT64_CAST(_pa) >> 2 & BDDIR_UPPER_MASK | \
+ UINT64_CAST(_pa) >> 3 & 0x1f << 4)
+
+#define BDDIR_ENTRY_HI(_pa) ((HSPEC_BASE + \
+ NODE_ADDRSPACE_SIZE * 3 / 4 + \
+ 0x208) | \
+ UINT64_CAST(_pa) & NASID_MASK | \
+ UINT64_CAST(_pa) >> 2 & BDDIR_UPPER_MASK | \
+ UINT64_CAST(_pa) >> 3 & 0x1f << 4)
+
+#define BDPRT_ENTRY(_pa, _rgn) ((HSPEC_BASE + \
+ NODE_ADDRSPACE_SIZE * 3 / 4) | \
+ UINT64_CAST(_pa) & NASID_MASK | \
+ UINT64_CAST(_pa) >> 2 & BDDIR_UPPER_MASK | \
+ (_rgn) << 3)
+#define BDPRT_ENTRY_ADDR(_pa, _rgn) (BDPRT_ENTRY((_pa), (_rgn)))
+#define BDPRT_ENTRY_S(_pa, _rgn, _val) (*(__psunsigned_t *)BDPRT_ENTRY((_pa), (_rgn))=(_val))
+#define BDPRT_ENTRY_L(_pa, _rgn) (*(__psunsigned_t *)BDPRT_ENTRY((_pa), (_rgn)))
+
+#define BDECC_ENTRY(_pa) ((HSPEC_BASE + \
+ NODE_ADDRSPACE_SIZE / 2) | \
+ UINT64_CAST(_pa) & NASID_MASK | \
+ UINT64_CAST(_pa) >> 2 & BDECC_UPPER_MASK | \
+ UINT64_CAST(_pa) >> 3 & 3)
+
+/*
+ * Macro to convert a back door directory or protection address into the
+ * raw physical address of the associated cache line or protection page.
+ */
+#define BDADDR_IS_DIR(_ba) ((UINT64_CAST (_ba) & 0x200) != 0)
+#define BDADDR_IS_PRT(_ba) ((UINT64_CAST (_ba) & 0x200) == 0)
+
+#define BDDIR_TO_MEM(_ba) (UINT64_CAST (_ba) & NASID_MASK | \
+ (UINT64_CAST(_ba) & BDDIR_UPPER_MASK)<<2 | \
+ (UINT64_CAST(_ba) & 0x1f << 4) << 3)
+
+#define BDPRT_TO_MEM(_ba) (UINT64_CAST (_ba) & NASID_MASK | \
+ (UINT64_CAST(_ba) & BDDIR_UPPER_MASK)<<2)
+
+#define BDECC_TO_MEM(_ba) (UINT64_CAST (_ba) & NASID_MASK | \
+ (UINT64_CAST(_ba) & BDECC_UPPER_MASK)<<2 | \
+ (UINT64_CAST(_ba) & 3) << 3)
+#endif /* CONFIG_SGI_IP27 */
+
+
+/*
+ * The following macros produce the correct base virtual address for
+ * the hub registers. The LOCAL_HUB_* macros produce the appropriate
+ * address for the local registers. The REMOTE_HUB_* macro produce
+ * the address for the specified hub's registers. The intent is
+ * that the appropriate PI, MD, NI, or II register would be substituted
+ * for _x.
+ */
+
+/*
+ * WARNING:
+ * When certain Hub chip workaround are defined, it's not sufficient
+ * to dereference the *_HUB_ADDR() macros. You should instead use
+ * HUB_L() and HUB_S() if you must deal with pointers to hub registers.
+ * Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
+ * They're always safe.
+ */
+#define LOCAL_HUB_ADDR(_x) (IALIAS_BASE + (_x))
+#define REMOTE_HUB_ADDR(_n, _x) ((NODE_SWIN_BASE(_n, 1) + 0x800000 + (_x)))
+
+#ifndef __ASSEMBLY__
+
+#define LOCAL_HUB_PTR(_x) ((u64 *)LOCAL_HUB_ADDR((_x)))
+#define REMOTE_HUB_PTR(_n, _x) ((u64 *)REMOTE_HUB_ADDR((_n), (_x)))
+
+#define LOCAL_HUB_L(_r) __raw_readq(LOCAL_HUB_PTR(_r))
+#define LOCAL_HUB_S(_r, _d) __raw_writeq((_d), LOCAL_HUB_PTR(_r))
+#define REMOTE_HUB_L(_n, _r) __raw_readq(REMOTE_HUB_PTR((_n), (_r)))
+#define REMOTE_HUB_S(_n, _r, _d) __raw_writeq((_d), \
+ REMOTE_HUB_PTR((_n), (_r)))
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Software structure locations -- permanently fixed
+ * See diagram in kldir.h
+ */
+
+#define PHYS_RAMBASE 0x0
+#define K0_RAMBASE PHYS_TO_K0(PHYS_RAMBASE)
+
+#define EX_HANDLER_OFFSET(slice) ((slice) << 16)
+#define EX_HANDLER_ADDR(nasid, slice) \
+ PHYS_TO_K0(NODE_OFFSET(nasid) | EX_HANDLER_OFFSET(slice))
+#define EX_HANDLER_SIZE 0x0400
+
+#define EX_FRAME_OFFSET(slice) ((slice) << 16 | 0x400)
+#define EX_FRAME_ADDR(nasid, slice) \
+ PHYS_TO_K0(NODE_OFFSET(nasid) | EX_FRAME_OFFSET(slice))
+#define EX_FRAME_SIZE 0x0c00
+
+#define ARCS_SPB_OFFSET 0x1000
+#define ARCS_SPB_ADDR(nasid) \
+ PHYS_TO_K0(NODE_OFFSET(nasid) | ARCS_SPB_OFFSET)
+#define ARCS_SPB_SIZE 0x0400
+
+#define KLDIR_OFFSET 0x2000
+#define KLDIR_ADDR(nasid) \
+ TO_NODE_UNCAC((nasid), KLDIR_OFFSET)
+#define KLDIR_SIZE 0x0400
+
+
+/*
+ * Software structure locations -- indirected through KLDIR
+ * See diagram in kldir.h
+ *
+ * Important: All low memory structures must only be accessed
+ * uncached, except for the symmon stacks.
+ */
+
+#define KLI_LAUNCH 0 /* Dir. entries */
+#define KLI_KLCONFIG 1
+#define KLI_NMI 2
+#define KLI_GDA 3
+#define KLI_FREEMEM 4
+#define KLI_SYMMON_STK 5
+#define KLI_PI_ERROR 6
+#define KLI_KERN_VARS 7
+#define KLI_KERN_XP 8
+#define KLI_KERN_PARTID 9
+
+#ifndef __ASSEMBLY__
+
+#define KLD_BASE(nasid) ((kldir_ent_t *) KLDIR_ADDR(nasid))
+#define KLD_LAUNCH(nasid) (KLD_BASE(nasid) + KLI_LAUNCH)
+#define KLD_NMI(nasid) (KLD_BASE(nasid) + KLI_NMI)
+#define KLD_KLCONFIG(nasid) (KLD_BASE(nasid) + KLI_KLCONFIG)
+#define KLD_PI_ERROR(nasid) (KLD_BASE(nasid) + KLI_PI_ERROR)
+#define KLD_GDA(nasid) (KLD_BASE(nasid) + KLI_GDA)
+#define KLD_SYMMON_STK(nasid) (KLD_BASE(nasid) + KLI_SYMMON_STK)
+#define KLD_FREEMEM(nasid) (KLD_BASE(nasid) + KLI_FREEMEM)
+#define KLD_KERN_VARS(nasid) (KLD_BASE(nasid) + KLI_KERN_VARS)
+#define KLD_KERN_XP(nasid) (KLD_BASE(nasid) + KLI_KERN_XP)
+#define KLD_KERN_PARTID(nasid) (KLD_BASE(nasid) + KLI_KERN_PARTID)
+
+#define LAUNCH_OFFSET(nasid, slice) \
+ (KLD_LAUNCH(nasid)->offset + \
+ KLD_LAUNCH(nasid)->stride * (slice))
+#define LAUNCH_ADDR(nasid, slice) \
+ TO_NODE_UNCAC((nasid), LAUNCH_OFFSET(nasid, slice))
+#define LAUNCH_SIZE(nasid) KLD_LAUNCH(nasid)->size
+
+#define SN_NMI_OFFSET(nasid, slice) \
+ (KLD_NMI(nasid)->offset + \
+ KLD_NMI(nasid)->stride * (slice))
+#define NMI_ADDR(nasid, slice) \
+ TO_NODE_UNCAC((nasid), SN_NMI_OFFSET(nasid, slice))
+#define NMI_SIZE(nasid) KLD_NMI(nasid)->size
+
+#define KLCONFIG_OFFSET(nasid) KLD_KLCONFIG(nasid)->offset
+#define KLCONFIG_ADDR(nasid) \
+ TO_NODE_UNCAC((nasid), KLCONFIG_OFFSET(nasid))
+#define KLCONFIG_SIZE(nasid) KLD_KLCONFIG(nasid)->size
+
+#define GDA_ADDR(nasid) KLD_GDA(nasid)->pointer
+#define GDA_SIZE(nasid) KLD_GDA(nasid)->size
+
+#define SYMMON_STK_OFFSET(nasid, slice) \
+ (KLD_SYMMON_STK(nasid)->offset + \
+ KLD_SYMMON_STK(nasid)->stride * (slice))
+#define SYMMON_STK_STRIDE(nasid) KLD_SYMMON_STK(nasid)->stride
+
+#define SYMMON_STK_ADDR(nasid, slice) \
+ TO_NODE_CAC((nasid), SYMMON_STK_OFFSET(nasid, slice))
+
+#define SYMMON_STK_SIZE(nasid) KLD_SYMMON_STK(nasid)->stride
+
+#define SYMMON_STK_END(nasid) (SYMMON_STK_ADDR(nasid, 0) + KLD_SYMMON_STK(nasid)->size)
+
+#define NODE_OFFSET_TO_K0(_nasid, _off) \
+ PHYS_TO_K0((NODE_OFFSET(_nasid) + (_off)) | CAC_BASE)
+#define NODE_OFFSET_TO_K1(_nasid, _off) \
+ TO_UNCAC((NODE_OFFSET(_nasid) + (_off)) | UNCAC_BASE)
+
+#define KERN_VARS_ADDR(nasid) KLD_KERN_VARS(nasid)->pointer
+#define KERN_VARS_SIZE(nasid) KLD_KERN_VARS(nasid)->size
+
+#endif /* !__ASSEMBLY__ */
+
+
+#endif /* _ASM_SN_ADDRS_H */
diff --git a/arch/mips/include/asm/sn/agent.h b/arch/mips/include/asm/sn/agent.h
new file mode 100644
index 000000000..7e9b32717
--- /dev/null
+++ b/arch/mips/include/asm/sn/agent.h
@@ -0,0 +1,45 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * This file has definitions for the hub and snac interfaces.
+ *
+ * Copyright (C) 1992 - 1997, 1999, 2000 Silcon Graphics, Inc.
+ * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
+ */
+#ifndef _ASM_SGI_SN_AGENT_H
+#define _ASM_SGI_SN_AGENT_H
+
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+
+#if defined(CONFIG_SGI_IP27)
+#include <asm/sn/sn0/hub.h>
+#elif defined(CONFIG_SGI_IP35)
+#include <asm/sn/sn1/hub.h>
+#endif /* !CONFIG_SGI_IP27 && !CONFIG_SGI_IP35 */
+
+/*
+ * NIC register macros
+ */
+
+#if defined(CONFIG_SGI_IP27)
+#define HUB_NIC_ADDR(_cpuid) \
+ REMOTE_HUB_ADDR(cpu_to_node(_cpuid), \
+ MD_MLAN_CTL)
+#endif
+
+#define SET_HUB_NIC(_my_cpuid, _val) \
+ (HUB_S(HUB_NIC_ADDR(_my_cpuid), (_val)))
+
+#define SET_MY_HUB_NIC(_v) \
+ SET_HUB_NIC(cpuid(), (_v))
+
+#define GET_HUB_NIC(_my_cpuid) \
+ (HUB_L(HUB_NIC_ADDR(_my_cpuid)))
+
+#define GET_MY_HUB_NIC() \
+ GET_HUB_NIC(cpuid())
+
+#endif /* _ASM_SGI_SN_AGENT_H */
diff --git a/arch/mips/include/asm/sn/arch.h b/arch/mips/include/asm/sn/arch.h
new file mode 100644
index 000000000..9a9682543
--- /dev/null
+++ b/arch/mips/include/asm/sn/arch.h
@@ -0,0 +1,28 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI specific setup.
+ *
+ * Copyright (C) 1995 - 1997, 1999 Silcon Graphics, Inc.
+ * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
+ */
+#ifndef _ASM_SN_ARCH_H
+#define _ASM_SN_ARCH_H
+
+#include <linux/types.h>
+#include <asm/sn/types.h>
+#ifdef CONFIG_SGI_IP27
+#include <asm/sn/sn0/arch.h>
+#endif
+
+#define cputonasid(cpu) (sn_cpu_info[(cpu)].p_nasid)
+#define cputoslice(cpu) (sn_cpu_info[(cpu)].p_slice)
+
+#define INVALID_NASID (nasid_t)-1
+#define INVALID_PNODEID (pnodeid_t)-1
+#define INVALID_MODULE (moduleid_t)-1
+#define INVALID_PARTID (partid_t)-1
+
+#endif /* _ASM_SN_ARCH_H */
diff --git a/arch/mips/include/asm/sn/fru.h b/arch/mips/include/asm/sn/fru.h
new file mode 100644
index 000000000..bbb83257c
--- /dev/null
+++ b/arch/mips/include/asm/sn/fru.h
@@ -0,0 +1,44 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from IRIX <sys/SN/SN0/sn0_fru.h>
+ *
+ * Copyright (C) 1992 - 1997, 1999 Silcon Graphics, Inc.
+ * Copyright (C) 1999, 2006 Ralf Baechle (ralf@linux-mips)
+ */
+#ifndef __ASM_SN_FRU_H
+#define __ASM_SN_FRU_H
+
+#define MAX_DIMMS 8 /* max # of dimm banks */
+#define MAX_PCIDEV 8 /* max # of pci devices on a pci bus */
+
+typedef unsigned char confidence_t;
+
+typedef struct kf_mem_s {
+ confidence_t km_confidence; /* confidence level that the memory is bad
+ * is this necessary ?
+ */
+ confidence_t km_dimm[MAX_DIMMS];
+ /* confidence level that dimm[i] is bad
+ *I think this is the right number
+ */
+
+} kf_mem_t;
+
+typedef struct kf_cpu_s {
+ confidence_t kc_confidence; /* confidence level that cpu is bad */
+ confidence_t kc_icache; /* confidence level that instr. cache is bad */
+ confidence_t kc_dcache; /* confidence level that data cache is bad */
+ confidence_t kc_scache; /* confidence level that sec. cache is bad */
+ confidence_t kc_sysbus; /* confidence level that sysad/cmd/state bus is bad */
+} kf_cpu_t;
+
+typedef struct kf_pci_bus_s {
+ confidence_t kpb_belief; /* confidence level that the pci bus is bad */
+ confidence_t kpb_pcidev_belief[MAX_PCIDEV];
+ /* confidence level that the pci dev is bad */
+} kf_pci_bus_t;
+
+#endif /* __ASM_SN_FRU_H */
diff --git a/arch/mips/include/asm/sn/gda.h b/arch/mips/include/asm/sn/gda.h
new file mode 100644
index 000000000..d52f81620
--- /dev/null
+++ b/arch/mips/include/asm/sn/gda.h
@@ -0,0 +1,105 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from IRIX <sys/SN/gda.h>.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ *
+ * gda.h -- Contains the data structure for the global data area,
+ * The GDA contains information communicated between the
+ * PROM, SYMMON, and the kernel.
+ */
+#ifndef _ASM_SN_GDA_H
+#define _ASM_SN_GDA_H
+
+#include <asm/sn/addrs.h>
+
+#define GDA_MAGIC 0x58464552
+
+/*
+ * GDA Version History
+ *
+ * Version # | Change
+ * -------------+-------------------------------------------------------
+ * 1 | Initial SN0 version
+ * 2 | Prom sets g_partid field to the partition number. 0 IS
+ * | a valid partition #.
+ */
+
+#define GDA_VERSION 2 /* Current GDA version # */
+
+#define G_MAGICOFF 0
+#define G_VERSIONOFF 4
+#define G_PROMOPOFF 6
+#define G_MASTEROFF 8
+#define G_VDSOFF 12
+#define G_HKDNORMOFF 16
+#define G_HKDUTLBOFF 24
+#define G_HKDXUTLBOFF 32
+#define G_PARTIDOFF 40
+#define G_TABLEOFF 128
+
+#ifndef __ASSEMBLY__
+
+typedef struct gda {
+ u32 g_magic; /* GDA magic number */
+ u16 g_version; /* Version of this structure */
+ u16 g_masterid; /* The NASID:CPUNUM of the master cpu */
+ u32 g_promop; /* Passes requests from the kernel to prom */
+ u32 g_vds; /* Store the virtual dipswitches here */
+ void **g_hooked_norm;/* ptr to pda loc for norm hndlr */
+ void **g_hooked_utlb;/* ptr to pda loc for utlb hndlr */
+ void **g_hooked_xtlb;/* ptr to pda loc for xtlb hndlr */
+ int g_partid; /* partition id */
+ int g_symmax; /* Max symbols in name table. */
+ void *g_dbstab; /* Address of idbg symbol table */
+ char *g_nametab; /* Address of idbg name table */
+ void *g_ktext_repmask;
+ /* Pointer to a mask of nodes with copies
+ * of the kernel. */
+ char g_padding[56]; /* pad out to 128 bytes */
+ nasid_t g_nasidtable[MAX_NUMNODES]; /* NASID of each node */
+} gda_t;
+
+#define GDA ((gda_t*) GDA_ADDR(get_nasid()))
+
+#endif /* !__ASSEMBLY__ */
+/*
+ * Define: PART_GDA_VERSION
+ * Purpose: Define the minimum version of the GDA required, lower
+ * revisions assume GDA is NOT set up, and read partition
+ * information from the board info.
+ */
+#define PART_GDA_VERSION 2
+
+/*
+ * The following requests can be sent to the PROM during startup.
+ */
+
+#define PROMOP_MAGIC 0x0ead0000
+#define PROMOP_MAGIC_MASK 0x0fff0000
+
+#define PROMOP_BIST_SHIFT 11
+#define PROMOP_BIST_MASK (0x3 << 11)
+
+#define PROMOP_REG PI_ERR_STACK_ADDR_A
+
+#define PROMOP_INVALID (PROMOP_MAGIC | 0x00)
+#define PROMOP_HALT (PROMOP_MAGIC | 0x10)
+#define PROMOP_POWERDOWN (PROMOP_MAGIC | 0x20)
+#define PROMOP_RESTART (PROMOP_MAGIC | 0x30)
+#define PROMOP_REBOOT (PROMOP_MAGIC | 0x40)
+#define PROMOP_IMODE (PROMOP_MAGIC | 0x50)
+
+#define PROMOP_CMD_MASK 0x00f0
+#define PROMOP_OPTIONS_MASK 0xfff0
+
+#define PROMOP_SKIP_DIAGS 0x0100 /* don't bother running diags */
+#define PROMOP_SKIP_MEMINIT 0x0200 /* don't bother initing memory */
+#define PROMOP_SKIP_DEVINIT 0x0400 /* don't bother initing devices */
+#define PROMOP_BIST1 0x0800 /* keep track of which BIST ran */
+#define PROMOP_BIST2 0x1000 /* keep track of which BIST ran */
+
+#endif /* _ASM_SN_GDA_H */
diff --git a/arch/mips/include/asm/sn/intr.h b/arch/mips/include/asm/sn/intr.h
new file mode 100644
index 000000000..3d6954d37
--- /dev/null
+++ b/arch/mips/include/asm/sn/intr.h
@@ -0,0 +1,112 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997 Silicon Graphics, Inc.
+ */
+#ifndef __ASM_SN_INTR_H
+#define __ASM_SN_INTR_H
+
+/*
+ * Macros to manipulate the interrupt register on the calling hub chip.
+ */
+
+#define LOCAL_HUB_SEND_INTR(level) \
+ LOCAL_HUB_S(PI_INT_PEND_MOD, (0x100 | (level)))
+#define REMOTE_HUB_SEND_INTR(hub, level) \
+ REMOTE_HUB_S((hub), PI_INT_PEND_MOD, (0x100 | (level)))
+
+/*
+ * When clearing the interrupt, make sure this clear does make it
+ * to the hub. Otherwise we could end up losing interrupts.
+ * We do an uncached load of the int_pend0 register to ensure this.
+ */
+
+#define LOCAL_HUB_CLR_INTR(level) \
+do { \
+ LOCAL_HUB_S(PI_INT_PEND_MOD, (level)); \
+ LOCAL_HUB_L(PI_INT_PEND0); \
+} while (0);
+
+#define REMOTE_HUB_CLR_INTR(hub, level) \
+do { \
+ nasid_t __hub = (hub); \
+ \
+ REMOTE_HUB_S(__hub, PI_INT_PEND_MOD, (level)); \
+ REMOTE_HUB_L(__hub, PI_INT_PEND0); \
+} while (0);
+
+/*
+ * Hard-coded interrupt levels:
+ */
+
+/*
+ * L0 = SW1
+ * L1 = SW2
+ * L2 = INT_PEND0
+ * L3 = INT_PEND1
+ * L4 = RTC
+ * L5 = Profiling Timer
+ * L6 = Hub Errors
+ * L7 = Count/Compare (T5 counters)
+ */
+
+
+/*
+ * INT_PEND0 hard-coded bits.
+ */
+
+/*
+ * INT_PEND0 bits determined by hardware:
+ */
+#define RESERVED_INTR 0 /* What is this bit? */
+#define GFX_INTR_A 1
+#define GFX_INTR_B 2
+#define PG_MIG_INTR 3
+#define UART_INTR 4
+#define CC_PEND_A 5
+#define CC_PEND_B 6
+
+/*
+ * INT_PEND0 used by the kernel for itself ...
+ */
+#define CPU_RESCHED_A_IRQ 7
+#define CPU_RESCHED_B_IRQ 8
+#define CPU_CALL_A_IRQ 9
+#define CPU_CALL_B_IRQ 10
+
+/*
+ * INT_PEND1 hard-coded bits:
+ */
+#define NI_BRDCAST_ERR_A 39
+#define NI_BRDCAST_ERR_B 40
+
+#define LLP_PFAIL_INTR_A 41 /* see ml/SN/SN0/sysctlr.c */
+#define LLP_PFAIL_INTR_B 42
+
+#define TLB_INTR_A 43 /* used for tlb flush random */
+#define TLB_INTR_B 44
+
+#define IP27_INTR_0 45 /* Reserved for PROM use */
+#define IP27_INTR_1 46 /* do not use in Kernel */
+#define IP27_INTR_2 47
+#define IP27_INTR_3 48
+#define IP27_INTR_4 49
+#define IP27_INTR_5 50
+#define IP27_INTR_6 51
+#define IP27_INTR_7 52
+
+#define BRIDGE_ERROR_INTR 53 /* Setup by PROM to catch */
+ /* Bridge Errors */
+#define DEBUG_INTR_A 54
+#define DEBUG_INTR_B 55 /* Used by symmon to stop all cpus */
+#define IO_ERROR_INTR 57 /* Setup by PROM */
+#define CLK_ERR_INTR 58
+#define COR_ERR_INTR_A 59
+#define COR_ERR_INTR_B 60
+#define MD_COR_ERR_INTR 61
+#define NI_ERROR_INTR 62
+#define MSC_PANIC_INTR 63
+
+#endif /* __ASM_SN_INTR_H */
diff --git a/arch/mips/include/asm/sn/io.h b/arch/mips/include/asm/sn/io.h
new file mode 100644
index 000000000..211f1e83b
--- /dev/null
+++ b/arch/mips/include/asm/sn/io.h
@@ -0,0 +1,59 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000, 2003 Ralf Baechle
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_SN_IO_H
+#define _ASM_SN_IO_H
+
+#if defined(CONFIG_SGI_IP27)
+#include <asm/sn/sn0/hubio.h>
+#endif
+
+
+#define IIO_ITTE_BASE 0x400160 /* base of translation table entries */
+#define IIO_ITTE(bigwin) (IIO_ITTE_BASE + 8*(bigwin))
+
+#define IIO_ITTE_OFFSET_BITS 5 /* size of offset field */
+#define IIO_ITTE_OFFSET_MASK ((1<<IIO_ITTE_OFFSET_BITS)-1)
+#define IIO_ITTE_OFFSET_SHIFT 0
+
+#define IIO_ITTE_WIDGET_BITS 4 /* size of widget field */
+#define IIO_ITTE_WIDGET_MASK ((1<<IIO_ITTE_WIDGET_BITS)-1)
+#define IIO_ITTE_WIDGET_SHIFT 8
+
+#define IIO_ITTE_IOSP 1 /* I/O Space bit */
+#define IIO_ITTE_IOSP_MASK 1
+#define IIO_ITTE_IOSP_SHIFT 12
+#define HUB_PIO_MAP_TO_MEM 0
+#define HUB_PIO_MAP_TO_IO 1
+
+#define IIO_ITTE_INVALID_WIDGET 3 /* an invalid widget */
+
+#define IIO_ITTE_PUT(nasid, bigwin, io_or_mem, widget, addr) \
+ REMOTE_HUB_S((nasid), IIO_ITTE(bigwin), \
+ (((((addr) >> BWIN_SIZE_BITS) & \
+ IIO_ITTE_OFFSET_MASK) << IIO_ITTE_OFFSET_SHIFT) | \
+ (io_or_mem << IIO_ITTE_IOSP_SHIFT) | \
+ (((widget) & IIO_ITTE_WIDGET_MASK) << IIO_ITTE_WIDGET_SHIFT)))
+
+#define IIO_ITTE_DISABLE(nasid, bigwin) \
+ IIO_ITTE_PUT((nasid), HUB_PIO_MAP_TO_MEM, \
+ (bigwin), IIO_ITTE_INVALID_WIDGET, 0)
+
+#define IIO_ITTE_GET(nasid, bigwin) REMOTE_HUB_PTR((nasid), IIO_ITTE(bigwin))
+
+/*
+ * Macro which takes the widget number, and returns the
+ * IO PRB address of that widget.
+ * value _x is expected to be a widget number in the range
+ * 0, 8 - 0xF
+ */
+#define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \
+ (_x) : \
+ (_x) - (HUB_WIDGET_ID_MIN-1)) << 3) )
+
+#endif /* _ASM_SN_IO_H */
diff --git a/arch/mips/include/asm/sn/ioc3.h b/arch/mips/include/asm/sn/ioc3.h
new file mode 100644
index 000000000..2c09c17ca
--- /dev/null
+++ b/arch/mips/include/asm/sn/ioc3.h
@@ -0,0 +1,606 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1999, 2000 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef MIPS_SN_IOC3_H
+#define MIPS_SN_IOC3_H
+
+#include <linux/types.h>
+
+/* serial port register map */
+struct ioc3_serialregs {
+ u32 sscr;
+ u32 stpir;
+ u32 stcir;
+ u32 srpir;
+ u32 srcir;
+ u32 srtr;
+ u32 shadow;
+};
+
+/* SUPERIO uart register map */
+struct ioc3_uartregs {
+ u8 iu_lcr;
+ union {
+ u8 iu_iir; /* read only */
+ u8 iu_fcr; /* write only */
+ };
+ union {
+ u8 iu_ier; /* DLAB == 0 */
+ u8 iu_dlm; /* DLAB == 1 */
+ };
+ union {
+ u8 iu_rbr; /* read only, DLAB == 0 */
+ u8 iu_thr; /* write only, DLAB == 0 */
+ u8 iu_dll; /* DLAB == 1 */
+ };
+ u8 iu_scr;
+ u8 iu_msr;
+ u8 iu_lsr;
+ u8 iu_mcr;
+};
+
+struct ioc3_sioregs {
+ u8 fill[0x141]; /* starts at 0x141 */
+
+ u8 kbdcg;
+ u8 uartc;
+
+ u8 fill0[0x151 - 0x142 - 1];
+
+ u8 pp_dcr;
+ u8 pp_dsr;
+ u8 pp_data;
+
+ u8 fill1[0x159 - 0x153 - 1];
+
+ u8 pp_ecr;
+ u8 pp_cfgb;
+ u8 pp_fifa;
+
+ u8 fill2[0x16a - 0x15b - 1];
+
+ u8 rtcdat;
+ u8 rtcad;
+
+ u8 fill3[0x170 - 0x16b - 1];
+
+ struct ioc3_uartregs uartb; /* 0x20170 */
+ struct ioc3_uartregs uarta; /* 0x20178 */
+};
+
+struct ioc3_ethregs {
+ u32 emcr; /* 0x000f0 */
+ u32 eisr; /* 0x000f4 */
+ u32 eier; /* 0x000f8 */
+ u32 ercsr; /* 0x000fc */
+ u32 erbr_h; /* 0x00100 */
+ u32 erbr_l; /* 0x00104 */
+ u32 erbar; /* 0x00108 */
+ u32 ercir; /* 0x0010c */
+ u32 erpir; /* 0x00110 */
+ u32 ertr; /* 0x00114 */
+ u32 etcsr; /* 0x00118 */
+ u32 ersr; /* 0x0011c */
+ u32 etcdc; /* 0x00120 */
+ u32 ebir; /* 0x00124 */
+ u32 etbr_h; /* 0x00128 */
+ u32 etbr_l; /* 0x0012c */
+ u32 etcir; /* 0x00130 */
+ u32 etpir; /* 0x00134 */
+ u32 emar_h; /* 0x00138 */
+ u32 emar_l; /* 0x0013c */
+ u32 ehar_h; /* 0x00140 */
+ u32 ehar_l; /* 0x00144 */
+ u32 micr; /* 0x00148 */
+ u32 midr_r; /* 0x0014c */
+ u32 midr_w; /* 0x00150 */
+};
+
+struct ioc3_serioregs {
+ u32 km_csr; /* 0x0009c */
+ u32 k_rd; /* 0x000a0 */
+ u32 m_rd; /* 0x000a4 */
+ u32 k_wd; /* 0x000a8 */
+ u32 m_wd; /* 0x000ac */
+};
+
+/* Register layout of IOC3 in configuration space. */
+struct ioc3 {
+ /* PCI Config Space registers */
+ u32 pci_id; /* 0x00000 */
+ u32 pci_scr; /* 0x00004 */
+ u32 pci_rev; /* 0x00008 */
+ u32 pci_lat; /* 0x0000c */
+ u32 pci_addr; /* 0x00010 */
+ u32 pci_err_addr_l; /* 0x00014 */
+ u32 pci_err_addr_h; /* 0x00018 */
+
+ u32 sio_ir; /* 0x0001c */
+ u32 sio_ies; /* 0x00020 */
+ u32 sio_iec; /* 0x00024 */
+ u32 sio_cr; /* 0x00028 */
+ u32 int_out; /* 0x0002c */
+ u32 mcr; /* 0x00030 */
+
+ /* General Purpose I/O registers */
+ u32 gpcr_s; /* 0x00034 */
+ u32 gpcr_c; /* 0x00038 */
+ u32 gpdr; /* 0x0003c */
+ u32 gppr[16]; /* 0x00040 */
+
+ /* Parallel Port Registers */
+ u32 ppbr_h_a; /* 0x00080 */
+ u32 ppbr_l_a; /* 0x00084 */
+ u32 ppcr_a; /* 0x00088 */
+ u32 ppcr; /* 0x0008c */
+ u32 ppbr_h_b; /* 0x00090 */
+ u32 ppbr_l_b; /* 0x00094 */
+ u32 ppcr_b; /* 0x00098 */
+
+ /* Keyboard and Mouse Registers */
+ struct ioc3_serioregs serio;
+
+ /* Serial Port Registers */
+ u32 sbbr_h; /* 0x000b0 */
+ u32 sbbr_l; /* 0x000b4 */
+ struct ioc3_serialregs port_a;
+ struct ioc3_serialregs port_b;
+
+ /* Ethernet Registers */
+ struct ioc3_ethregs eth;
+ u32 pad1[(0x20000 - 0x00154) / 4];
+
+ /* SuperIO Registers XXX */
+ struct ioc3_sioregs sregs; /* 0x20000 */
+ u32 pad2[(0x40000 - 0x20180) / 4];
+
+ /* SSRAM Diagnostic Access */
+ u32 ssram[(0x80000 - 0x40000) / 4];
+
+ /* Bytebus device offsets
+ 0x80000 - Access to the generic devices selected with DEV0
+ 0x9FFFF bytebus DEV_SEL_0
+ 0xA0000 - Access to the generic devices selected with DEV1
+ 0xBFFFF bytebus DEV_SEL_1
+ 0xC0000 - Access to the generic devices selected with DEV2
+ 0xDFFFF bytebus DEV_SEL_2
+ 0xE0000 - Access to the generic devices selected with DEV3
+ 0xFFFFF bytebus DEV_SEL_3 */
+};
+
+
+#define PCI_LAT 0xc /* Latency Timer */
+#define PCI_SCR_DROP_MODE_EN 0x00008000 /* drop pios on parity err */
+#define UARTA_BASE 0x178
+#define UARTB_BASE 0x170
+
+/*
+ * Bytebus device space
+ */
+#define IOC3_BYTEBUS_DEV0 0x80000L
+#define IOC3_BYTEBUS_DEV1 0xa0000L
+#define IOC3_BYTEBUS_DEV2 0xc0000L
+#define IOC3_BYTEBUS_DEV3 0xe0000L
+
+/*
+ * Ethernet RX Buffer
+ */
+struct ioc3_erxbuf {
+ u32 w0; /* first word (valid,bcnt,cksum) */
+ u32 err; /* second word various errors */
+ /* next comes n bytes of padding */
+ /* then the received ethernet frame itself */
+};
+
+#define ERXBUF_IPCKSUM_MASK 0x0000ffff
+#define ERXBUF_BYTECNT_MASK 0x07ff0000
+#define ERXBUF_BYTECNT_SHIFT 16
+#define ERXBUF_V 0x80000000
+
+#define ERXBUF_CRCERR 0x00000001 /* aka RSV15 */
+#define ERXBUF_FRAMERR 0x00000002 /* aka RSV14 */
+#define ERXBUF_CODERR 0x00000004 /* aka RSV13 */
+#define ERXBUF_INVPREAMB 0x00000008 /* aka RSV18 */
+#define ERXBUF_LOLEN 0x00007000 /* aka RSV2_0 */
+#define ERXBUF_HILEN 0x03ff0000 /* aka RSV12_3 */
+#define ERXBUF_MULTICAST 0x04000000 /* aka RSV16 */
+#define ERXBUF_BROADCAST 0x08000000 /* aka RSV17 */
+#define ERXBUF_LONGEVENT 0x10000000 /* aka RSV19 */
+#define ERXBUF_BADPKT 0x20000000 /* aka RSV20 */
+#define ERXBUF_GOODPKT 0x40000000 /* aka RSV21 */
+#define ERXBUF_CARRIER 0x80000000 /* aka RSV22 */
+
+/*
+ * Ethernet TX Descriptor
+ */
+#define ETXD_DATALEN 104
+struct ioc3_etxd {
+ u32 cmd; /* command field */
+ u32 bufcnt; /* buffer counts field */
+ u64 p1; /* buffer pointer 1 */
+ u64 p2; /* buffer pointer 2 */
+ u8 data[ETXD_DATALEN]; /* opt. tx data */
+};
+
+#define ETXD_BYTECNT_MASK 0x000007ff /* total byte count */
+#define ETXD_INTWHENDONE 0x00001000 /* intr when done */
+#define ETXD_D0V 0x00010000 /* data 0 valid */
+#define ETXD_B1V 0x00020000 /* buf 1 valid */
+#define ETXD_B2V 0x00040000 /* buf 2 valid */
+#define ETXD_DOCHECKSUM 0x00080000 /* insert ip cksum */
+#define ETXD_CHKOFF_MASK 0x07f00000 /* cksum byte offset */
+#define ETXD_CHKOFF_SHIFT 20
+
+#define ETXD_D0CNT_MASK 0x0000007f
+#define ETXD_B1CNT_MASK 0x0007ff00
+#define ETXD_B1CNT_SHIFT 8
+#define ETXD_B2CNT_MASK 0x7ff00000
+#define ETXD_B2CNT_SHIFT 20
+
+/* ------------------------------------------------------------------------- */
+
+/* Superio Registers (PIO Access) */
+#define IOC3_SIO_BASE 0x20000
+#define IOC3_SIO_UARTC (IOC3_SIO_BASE+0x141) /* UART Config */
+#define IOC3_SIO_KBDCG (IOC3_SIO_BASE+0x142) /* KBD Config */
+#define IOC3_SIO_PP_BASE (IOC3_SIO_BASE+PP_BASE) /* Parallel Port */
+#define IOC3_SIO_RTC_BASE (IOC3_SIO_BASE+0x168) /* Real Time Clock */
+#define IOC3_SIO_UB_BASE (IOC3_SIO_BASE+UARTB_BASE) /* UART B */
+#define IOC3_SIO_UA_BASE (IOC3_SIO_BASE+UARTA_BASE) /* UART A */
+
+/* SSRAM Diagnostic Access */
+#define IOC3_SSRAM IOC3_RAM_OFF /* base of SSRAM diagnostic access */
+#define IOC3_SSRAM_LEN 0x40000 /* 256kb (addrspc sz, may not be populated) */
+#define IOC3_SSRAM_DM 0x0000ffff /* data mask */
+#define IOC3_SSRAM_PM 0x00010000 /* parity mask */
+
+/* bitmasks for PCI_SCR */
+#define PCI_SCR_PAR_RESP_EN 0x00000040 /* enb PCI parity checking */
+#define PCI_SCR_SERR_EN 0x00000100 /* enable the SERR# driver */
+#define PCI_SCR_DROP_MODE_EN 0x00008000 /* drop pios on parity err */
+#define PCI_SCR_RX_SERR (0x1 << 16)
+#define PCI_SCR_DROP_MODE (0x1 << 17)
+#define PCI_SCR_SIG_PAR_ERR (0x1 << 24)
+#define PCI_SCR_SIG_TAR_ABRT (0x1 << 27)
+#define PCI_SCR_RX_TAR_ABRT (0x1 << 28)
+#define PCI_SCR_SIG_MST_ABRT (0x1 << 29)
+#define PCI_SCR_SIG_SERR (0x1 << 30)
+#define PCI_SCR_PAR_ERR (0x1 << 31)
+
+/* bitmasks for IOC3_KM_CSR */
+#define KM_CSR_K_WRT_PEND 0x00000001 /* kbd port xmitting or resetting */
+#define KM_CSR_M_WRT_PEND 0x00000002 /* mouse port xmitting or resetting */
+#define KM_CSR_K_LCB 0x00000004 /* Line Cntrl Bit for last KBD write */
+#define KM_CSR_M_LCB 0x00000008 /* same for mouse */
+#define KM_CSR_K_DATA 0x00000010 /* state of kbd data line */
+#define KM_CSR_K_CLK 0x00000020 /* state of kbd clock line */
+#define KM_CSR_K_PULL_DATA 0x00000040 /* pull kbd data line low */
+#define KM_CSR_K_PULL_CLK 0x00000080 /* pull kbd clock line low */
+#define KM_CSR_M_DATA 0x00000100 /* state of ms data line */
+#define KM_CSR_M_CLK 0x00000200 /* state of ms clock line */
+#define KM_CSR_M_PULL_DATA 0x00000400 /* pull ms data line low */
+#define KM_CSR_M_PULL_CLK 0x00000800 /* pull ms clock line low */
+#define KM_CSR_EMM_MODE 0x00001000 /* emulation mode */
+#define KM_CSR_SIM_MODE 0x00002000 /* clock X8 */
+#define KM_CSR_K_SM_IDLE 0x00004000 /* Keyboard is idle */
+#define KM_CSR_M_SM_IDLE 0x00008000 /* Mouse is idle */
+#define KM_CSR_K_TO 0x00010000 /* Keyboard trying to send/receive */
+#define KM_CSR_M_TO 0x00020000 /* Mouse trying to send/receive */
+#define KM_CSR_K_TO_EN 0x00040000 /* KM_CSR_K_TO + KM_CSR_K_TO_EN = cause
+ SIO_IR to assert */
+#define KM_CSR_M_TO_EN 0x00080000 /* KM_CSR_M_TO + KM_CSR_M_TO_EN = cause
+ SIO_IR to assert */
+#define KM_CSR_K_CLAMP_1 0x00100000 /* Pull K_CLK low aft recv 1 char */
+#define KM_CSR_M_CLAMP_1 0x00200000 /* Pull M_CLK low aft recv 1 char */
+#define KM_CSR_K_CLAMP_3 0x00400000 /* Pull K_CLK low aft recv 3 chars */
+#define KM_CSR_M_CLAMP_3 0x00800000 /* Pull M_CLK low aft recv 3 chars */
+
+/* bitmasks for IOC3_K_RD and IOC3_M_RD */
+#define KM_RD_DATA_2 0x000000ff /* 3rd char recvd since last read */
+#define KM_RD_DATA_2_SHIFT 0
+#define KM_RD_DATA_1 0x0000ff00 /* 2nd char recvd since last read */
+#define KM_RD_DATA_1_SHIFT 8
+#define KM_RD_DATA_0 0x00ff0000 /* 1st char recvd since last read */
+#define KM_RD_DATA_0_SHIFT 16
+#define KM_RD_FRAME_ERR_2 0x01000000 /* framing or parity error in byte 2 */
+#define KM_RD_FRAME_ERR_1 0x02000000 /* same for byte 1 */
+#define KM_RD_FRAME_ERR_0 0x04000000 /* same for byte 0 */
+
+#define KM_RD_KBD_MSE 0x08000000 /* 0 if from kbd, 1 if from mouse */
+#define KM_RD_OFLO 0x10000000 /* 4th char recvd before this read */
+#define KM_RD_VALID_2 0x20000000 /* DATA_2 valid */
+#define KM_RD_VALID_1 0x40000000 /* DATA_1 valid */
+#define KM_RD_VALID_0 0x80000000 /* DATA_0 valid */
+#define KM_RD_VALID_ALL (KM_RD_VALID_0|KM_RD_VALID_1|KM_RD_VALID_2)
+
+/* bitmasks for IOC3_K_WD & IOC3_M_WD */
+#define KM_WD_WRT_DATA 0x000000ff /* write to keyboard/mouse port */
+#define KM_WD_WRT_DATA_SHIFT 0
+
+/* bitmasks for serial RX status byte */
+#define RXSB_OVERRUN 0x01 /* char(s) lost */
+#define RXSB_PAR_ERR 0x02 /* parity error */
+#define RXSB_FRAME_ERR 0x04 /* framing error */
+#define RXSB_BREAK 0x08 /* break character */
+#define RXSB_CTS 0x10 /* state of CTS */
+#define RXSB_DCD 0x20 /* state of DCD */
+#define RXSB_MODEM_VALID 0x40 /* DCD, CTS and OVERRUN are valid */
+#define RXSB_DATA_VALID 0x80 /* data byte, FRAME_ERR PAR_ERR & BREAK valid */
+
+/* bitmasks for serial TX control byte */
+#define TXCB_INT_WHEN_DONE 0x20 /* interrupt after this byte is sent */
+#define TXCB_INVALID 0x00 /* byte is invalid */
+#define TXCB_VALID 0x40 /* byte is valid */
+#define TXCB_MCR 0x80 /* data<7:0> to modem control register */
+#define TXCB_DELAY 0xc0 /* delay data<7:0> mSec */
+
+/* bitmasks for IOC3_SBBR_L */
+#define SBBR_L_SIZE 0x00000001 /* 0 == 1KB rings, 1 == 4KB rings */
+#define SBBR_L_BASE 0xfffff000 /* lower serial ring base addr */
+
+/* bitmasks for IOC3_SSCR_<A:B> */
+#define SSCR_RX_THRESHOLD 0x000001ff /* hiwater mark */
+#define SSCR_TX_TIMER_BUSY 0x00010000 /* TX timer in progress */
+#define SSCR_HFC_EN 0x00020000 /* hardware flow control enabled */
+#define SSCR_RX_RING_DCD 0x00040000 /* post RX record on delta-DCD */
+#define SSCR_RX_RING_CTS 0x00080000 /* post RX record on delta-CTS */
+#define SSCR_HIGH_SPD 0x00100000 /* 4X speed */
+#define SSCR_DIAG 0x00200000 /* bypass clock divider for sim */
+#define SSCR_RX_DRAIN 0x08000000 /* drain RX buffer to memory */
+#define SSCR_DMA_EN 0x10000000 /* enable ring buffer DMA */
+#define SSCR_DMA_PAUSE 0x20000000 /* pause DMA */
+#define SSCR_PAUSE_STATE 0x40000000 /* sets when PAUSE takes effect */
+#define SSCR_RESET 0x80000000 /* reset DMA channels */
+
+/* all producer/consumer pointers are the same bitfield */
+#define PROD_CONS_PTR_4K 0x00000ff8 /* for 4K buffers */
+#define PROD_CONS_PTR_1K 0x000003f8 /* for 1K buffers */
+#define PROD_CONS_PTR_OFF 3
+
+/* bitmasks for IOC3_SRCIR_<A:B> */
+#define SRCIR_ARM 0x80000000 /* arm RX timer */
+
+/* bitmasks for IOC3_SRPIR_<A:B> */
+#define SRPIR_BYTE_CNT 0x07000000 /* bytes in packer */
+#define SRPIR_BYTE_CNT_SHIFT 24
+
+/* bitmasks for IOC3_STCIR_<A:B> */
+#define STCIR_BYTE_CNT 0x0f000000 /* bytes in unpacker */
+#define STCIR_BYTE_CNT_SHIFT 24
+
+/* bitmasks for IOC3_SHADOW_<A:B> */
+#define SHADOW_DR 0x00000001 /* data ready */
+#define SHADOW_OE 0x00000002 /* overrun error */
+#define SHADOW_PE 0x00000004 /* parity error */
+#define SHADOW_FE 0x00000008 /* framing error */
+#define SHADOW_BI 0x00000010 /* break interrupt */
+#define SHADOW_THRE 0x00000020 /* transmit holding register empty */
+#define SHADOW_TEMT 0x00000040 /* transmit shift register empty */
+#define SHADOW_RFCE 0x00000080 /* char in RX fifo has an error */
+#define SHADOW_DCTS 0x00010000 /* delta clear to send */
+#define SHADOW_DDCD 0x00080000 /* delta data carrier detect */
+#define SHADOW_CTS 0x00100000 /* clear to send */
+#define SHADOW_DCD 0x00800000 /* data carrier detect */
+#define SHADOW_DTR 0x01000000 /* data terminal ready */
+#define SHADOW_RTS 0x02000000 /* request to send */
+#define SHADOW_OUT1 0x04000000 /* 16550 OUT1 bit */
+#define SHADOW_OUT2 0x08000000 /* 16550 OUT2 bit */
+#define SHADOW_LOOP 0x10000000 /* loopback enabled */
+
+/* bitmasks for IOC3_SRTR_<A:B> */
+#define SRTR_CNT 0x00000fff /* reload value for RX timer */
+#define SRTR_CNT_VAL 0x0fff0000 /* current value of RX timer */
+#define SRTR_CNT_VAL_SHIFT 16
+#define SRTR_HZ 16000 /* SRTR clock frequency */
+
+/* bitmasks for IOC3_SIO_IR, IOC3_SIO_IEC and IOC3_SIO_IES */
+#define SIO_IR_SA_TX_MT 0x00000001 /* Serial port A TX empty */
+#define SIO_IR_SA_RX_FULL 0x00000002 /* port A RX buf full */
+#define SIO_IR_SA_RX_HIGH 0x00000004 /* port A RX hiwat */
+#define SIO_IR_SA_RX_TIMER 0x00000008 /* port A RX timeout */
+#define SIO_IR_SA_DELTA_DCD 0x00000010 /* port A delta DCD */
+#define SIO_IR_SA_DELTA_CTS 0x00000020 /* port A delta CTS */
+#define SIO_IR_SA_INT 0x00000040 /* port A pass-thru intr */
+#define SIO_IR_SA_TX_EXPLICIT 0x00000080 /* port A explicit TX thru */
+#define SIO_IR_SA_MEMERR 0x00000100 /* port A PCI error */
+#define SIO_IR_SB_TX_MT 0x00000200 /* */
+#define SIO_IR_SB_RX_FULL 0x00000400 /* */
+#define SIO_IR_SB_RX_HIGH 0x00000800 /* */
+#define SIO_IR_SB_RX_TIMER 0x00001000 /* */
+#define SIO_IR_SB_DELTA_DCD 0x00002000 /* */
+#define SIO_IR_SB_DELTA_CTS 0x00004000 /* */
+#define SIO_IR_SB_INT 0x00008000 /* */
+#define SIO_IR_SB_TX_EXPLICIT 0x00010000 /* */
+#define SIO_IR_SB_MEMERR 0x00020000 /* */
+#define SIO_IR_PP_INT 0x00040000 /* P port pass-thru intr */
+#define SIO_IR_PP_INTA 0x00080000 /* PP context A thru */
+#define SIO_IR_PP_INTB 0x00100000 /* PP context B thru */
+#define SIO_IR_PP_MEMERR 0x00200000 /* PP PCI error */
+#define SIO_IR_KBD_INT 0x00400000 /* kbd/mouse intr */
+#define SIO_IR_RT_INT 0x08000000 /* RT output pulse */
+#define SIO_IR_GEN_INT1 0x10000000 /* RT input pulse */
+#define SIO_IR_GEN_INT_SHIFT 28
+
+/* per device interrupt masks */
+#define SIO_IR_SA (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL | \
+ SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER | \
+ SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS | \
+ SIO_IR_SA_INT | SIO_IR_SA_TX_EXPLICIT | \
+ SIO_IR_SA_MEMERR)
+#define SIO_IR_SB (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL | \
+ SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER | \
+ SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS | \
+ SIO_IR_SB_INT | SIO_IR_SB_TX_EXPLICIT | \
+ SIO_IR_SB_MEMERR)
+#define SIO_IR_PP (SIO_IR_PP_INT | SIO_IR_PP_INTA | \
+ SIO_IR_PP_INTB | SIO_IR_PP_MEMERR)
+#define SIO_IR_RT (SIO_IR_RT_INT | SIO_IR_GEN_INT1)
+
+/* bitmasks for SIO_CR */
+#define SIO_CR_SIO_RESET 0x00000001 /* reset the SIO */
+#define SIO_CR_SER_A_BASE 0x000000fe /* DMA poll addr port A */
+#define SIO_CR_SER_A_BASE_SHIFT 1
+#define SIO_CR_SER_B_BASE 0x00007f00 /* DMA poll addr port B */
+#define SIO_CR_SER_B_BASE_SHIFT 8
+#define SIO_SR_CMD_PULSE 0x00078000 /* byte bus strobe length */
+#define SIO_CR_CMD_PULSE_SHIFT 15
+#define SIO_CR_ARB_DIAG 0x00380000 /* cur !enet PCI requet (ro) */
+#define SIO_CR_ARB_DIAG_TXA 0x00000000
+#define SIO_CR_ARB_DIAG_RXA 0x00080000
+#define SIO_CR_ARB_DIAG_TXB 0x00100000
+#define SIO_CR_ARB_DIAG_RXB 0x00180000
+#define SIO_CR_ARB_DIAG_PP 0x00200000
+#define SIO_CR_ARB_DIAG_IDLE 0x00400000 /* 0 -> active request (ro) */
+
+/* bitmasks for INT_OUT */
+#define INT_OUT_COUNT 0x0000ffff /* pulse interval timer */
+#define INT_OUT_MODE 0x00070000 /* mode mask */
+#define INT_OUT_MODE_0 0x00000000 /* set output to 0 */
+#define INT_OUT_MODE_1 0x00040000 /* set output to 1 */
+#define INT_OUT_MODE_1PULSE 0x00050000 /* send 1 pulse */
+#define INT_OUT_MODE_PULSES 0x00060000 /* send 1 pulse every interval */
+#define INT_OUT_MODE_SQW 0x00070000 /* toggle output every interval */
+#define INT_OUT_DIAG 0x40000000 /* diag mode */
+#define INT_OUT_INT_OUT 0x80000000 /* current state of INT_OUT */
+
+/* time constants for INT_OUT */
+#define INT_OUT_NS_PER_TICK (30 * 260) /* 30 ns PCI clock, divisor=260 */
+#define INT_OUT_TICKS_PER_PULSE 3 /* outgoing pulse lasts 3 ticks */
+#define INT_OUT_US_TO_COUNT(x) /* convert uS to a count value */ \
+ (((x) * 10 + INT_OUT_NS_PER_TICK / 200) * \
+ 100 / INT_OUT_NS_PER_TICK - 1)
+#define INT_OUT_COUNT_TO_US(x) /* convert count value to uS */ \
+ (((x) + 1) * INT_OUT_NS_PER_TICK / 1000)
+#define INT_OUT_MIN_TICKS 3 /* min period is width of pulse in "ticks" */
+#define INT_OUT_MAX_TICKS INT_OUT_COUNT /* largest possible count */
+
+/* bitmasks for GPCR */
+#define GPCR_DIR 0x000000ff /* tristate pin input or output */
+#define GPCR_DIR_PIN(x) (1<<(x)) /* access one of the DIR bits */
+#define GPCR_EDGE 0x000f0000 /* extint edge or level sensitive */
+#define GPCR_EDGE_PIN(x) (1<<((x)+15)) /* access one of the EDGE bits */
+
+/* values for GPCR */
+#define GPCR_INT_OUT_EN 0x00100000 /* enable INT_OUT to pin 0 */
+#define GPCR_MLAN_EN 0x00200000 /* enable MCR to pin 8 */
+#define GPCR_DIR_SERA_XCVR 0x00000080 /* Port A Transceiver select enable */
+#define GPCR_DIR_SERB_XCVR 0x00000040 /* Port B Transceiver select enable */
+#define GPCR_DIR_PHY_RST 0x00000020 /* ethernet PHY reset enable */
+
+/* defs for some of the generic I/O pins */
+#define GPCR_PHY_RESET 0x20 /* pin is output to PHY reset */
+#define GPCR_UARTB_MODESEL 0x40 /* pin is output to port B mode sel */
+#define GPCR_UARTA_MODESEL 0x80 /* pin is output to port A mode sel */
+
+#define GPPR_PHY_RESET_PIN 5 /* GIO pin cntrlling phy reset */
+#define GPPR_UARTB_MODESEL_PIN 6 /* GIO pin cntrlling uart b mode sel */
+#define GPPR_UARTA_MODESEL_PIN 7 /* GIO pin cntrlling uart a mode sel */
+
+/* ethernet */
+#define EMCR_DUPLEX 0x00000001
+#define EMCR_PROMISC 0x00000002
+#define EMCR_PADEN 0x00000004
+#define EMCR_RXOFF_MASK 0x000001f8
+#define EMCR_RXOFF_SHIFT 3
+#define EMCR_RAMPAR 0x00000200
+#define EMCR_BADPAR 0x00000800
+#define EMCR_BUFSIZ 0x00001000
+#define EMCR_TXDMAEN 0x00002000
+#define EMCR_TXEN 0x00004000
+#define EMCR_RXDMAEN 0x00008000
+#define EMCR_RXEN 0x00010000
+#define EMCR_LOOPBACK 0x00020000
+#define EMCR_ARB_DIAG 0x001c0000
+#define EMCR_ARB_DIAG_IDLE 0x00200000
+#define EMCR_RST 0x80000000
+
+#define EISR_RXTIMERINT 0x00000001
+#define EISR_RXTHRESHINT 0x00000002
+#define EISR_RXOFLO 0x00000004
+#define EISR_RXBUFOFLO 0x00000008
+#define EISR_RXMEMERR 0x00000010
+#define EISR_RXPARERR 0x00000020
+#define EISR_TXEMPTY 0x00010000
+#define EISR_TXRTRY 0x00020000
+#define EISR_TXEXDEF 0x00040000
+#define EISR_TXLCOL 0x00080000
+#define EISR_TXGIANT 0x00100000
+#define EISR_TXBUFUFLO 0x00200000
+#define EISR_TXEXPLICIT 0x00400000
+#define EISR_TXCOLLWRAP 0x00800000
+#define EISR_TXDEFERWRAP 0x01000000
+#define EISR_TXMEMERR 0x02000000
+#define EISR_TXPARERR 0x04000000
+
+#define ERCSR_THRESH_MASK 0x000001ff /* enet RX threshold */
+#define ERCSR_RX_TMR 0x40000000 /* simulation only */
+#define ERCSR_DIAG_OFLO 0x80000000 /* simulation only */
+
+#define ERBR_ALIGNMENT 4096
+#define ERBR_L_RXRINGBASE_MASK 0xfffff000
+
+#define ERBAR_BARRIER_BIT 0x0100
+#define ERBAR_RXBARR_MASK 0xffff0000
+#define ERBAR_RXBARR_SHIFT 16
+
+#define ERCIR_RXCONSUME_MASK 0x00000fff
+
+#define ERPIR_RXPRODUCE_MASK 0x00000fff
+#define ERPIR_ARM 0x80000000
+
+#define ERTR_CNT_MASK 0x000007ff
+
+#define ETCSR_IPGT_MASK 0x0000007f
+#define ETCSR_IPGR1_MASK 0x00007f00
+#define ETCSR_IPGR1_SHIFT 8
+#define ETCSR_IPGR2_MASK 0x007f0000
+#define ETCSR_IPGR2_SHIFT 16
+#define ETCSR_NOTXCLK 0x80000000
+
+#define ETCDC_COLLCNT_MASK 0x0000ffff
+#define ETCDC_DEFERCNT_MASK 0xffff0000
+#define ETCDC_DEFERCNT_SHIFT 16
+
+#define ETBR_ALIGNMENT (64*1024)
+#define ETBR_L_RINGSZ_MASK 0x00000001
+#define ETBR_L_RINGSZ128 0
+#define ETBR_L_RINGSZ512 1
+#define ETBR_L_TXRINGBASE_MASK 0xffffc000
+
+#define ETCIR_TXCONSUME_MASK 0x0000ffff
+#define ETCIR_IDLE 0x80000000
+
+#define ETPIR_TXPRODUCE_MASK 0x0000ffff
+
+#define EBIR_TXBUFPROD_MASK 0x0000001f
+#define EBIR_TXBUFCONS_MASK 0x00001f00
+#define EBIR_TXBUFCONS_SHIFT 8
+#define EBIR_RXBUFPROD_MASK 0x007fc000
+#define EBIR_RXBUFPROD_SHIFT 14
+#define EBIR_RXBUFCONS_MASK 0xff800000
+#define EBIR_RXBUFCONS_SHIFT 23
+
+#define MICR_REGADDR_MASK 0x0000001f
+#define MICR_PHYADDR_MASK 0x000003e0
+#define MICR_PHYADDR_SHIFT 5
+#define MICR_READTRIG 0x00000400
+#define MICR_BUSY 0x00000800
+
+#define MIDR_DATA_MASK 0x0000ffff
+
+/* subsystem IDs supplied by card detection in pci-xtalk-bridge */
+#define IOC3_SUBSYS_IP27_BASEIO6G 0xc300
+#define IOC3_SUBSYS_IP27_MIO 0xc301
+#define IOC3_SUBSYS_IP27_BASEIO 0xc302
+#define IOC3_SUBSYS_IP29_SYSBOARD 0xc303
+#define IOC3_SUBSYS_IP30_SYSBOARD 0xc304
+#define IOC3_SUBSYS_MENET 0xc305
+#define IOC3_SUBSYS_MENET4 0xc306
+#define IOC3_SUBSYS_IO7 0xc307
+#define IOC3_SUBSYS_IO8 0xc308
+#define IOC3_SUBSYS_IO9 0xc309
+#define IOC3_SUBSYS_IP34_SYSBOARD 0xc30A
+
+#endif /* MIPS_SN_IOC3_H */
diff --git a/arch/mips/include/asm/sn/irq_alloc.h b/arch/mips/include/asm/sn/irq_alloc.h
new file mode 100644
index 000000000..09b89cecf
--- /dev/null
+++ b/arch/mips/include/asm/sn/irq_alloc.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SN_IRQ_ALLOC_H
+#define __ASM_SN_IRQ_ALLOC_H
+
+struct irq_alloc_info {
+ void *ctrl;
+ nasid_t nasid;
+ int pin;
+};
+
+#endif /* __ASM_SN_IRQ_ALLOC_H */
diff --git a/arch/mips/include/asm/sn/klconfig.h b/arch/mips/include/asm/sn/klconfig.h
new file mode 100644
index 000000000..117f85e4b
--- /dev/null
+++ b/arch/mips/include/asm/sn/klconfig.h
@@ -0,0 +1,894 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from IRIX <sys/SN/klconfig.h>.
+ *
+ * Copyright (C) 1992 - 1997, 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 1999, 2000 by Ralf Baechle
+ */
+#ifndef _ASM_SN_KLCONFIG_H
+#define _ASM_SN_KLCONFIG_H
+
+/*
+ * The KLCONFIG structures store info about the various BOARDs found
+ * during Hardware Discovery. In addition, it stores info about the
+ * components found on the BOARDs.
+ */
+
+/*
+ * WARNING:
+ * Certain assembly language routines (notably xxxxx.s) in the IP27PROM
+ * will depend on the format of the data structures in this file. In
+ * most cases, rearranging the fields can seriously break things.
+ * Adding fields in the beginning or middle can also break things.
+ * Add fields if necessary, to the end of a struct in such a way
+ * that offsets of existing fields do not change.
+ */
+
+#include <linux/types.h>
+#include <asm/sn/types.h>
+
+#if defined(CONFIG_SGI_IP27)
+
+#include <asm/sn/sn0/addrs.h>
+//#include <sys/SN/router.h>
+// XXX Stolen from <sys/SN/router.h>:
+#define MAX_ROUTER_PORTS (6) /* Max. number of ports on a router */
+#include <asm/sn/fru.h>
+//#include <sys/graph.h>
+//#include <sys/xtalk/xbow.h>
+
+#elif defined(CONFIG_SGI_IP35)
+
+#include <asm/sn/sn1/addrs.h>
+#include <sys/sn/router.h>
+#include <sys/graph.h>
+#include <asm/xtalk/xbow.h>
+
+#endif /* !CONFIG_SGI_IP27 && !CONFIG_SGI_IP35 */
+
+#if defined(CONFIG_SGI_IP27) || defined(CONFIG_SGI_IP35)
+#include <asm/sn/agent.h>
+#include <asm/fw/arc/types.h>
+#include <asm/fw/arc/hinv.h>
+#if defined(CONFIG_SGI_IP35)
+// The hack file has to be before vector and after sn0_fru....
+#include <asm/hack.h>
+#include <asm/sn/vector.h>
+#include <asm/xtalk/xtalk.h>
+#endif /* CONFIG_SGI_IP35 */
+#endif /* CONFIG_SGI_IP27 || CONFIG_SGI_IP35 */
+
+typedef u64 nic_t;
+
+#define KLCFGINFO_MAGIC 0xbeedbabe
+
+typedef s32 klconf_off_t;
+
+/*
+ * Some IMPORTANT OFFSETS. These are the offsets on all NODES.
+ */
+#define MAX_MODULE_ID 255
+#define SIZE_PAD 4096 /* 4k padding for structures */
+/*
+ * 1 NODE brd, 2 Router brd (1 8p, 1 meta), 6 Widgets,
+ * 2 Midplanes assuming no pci card cages
+ */
+#define MAX_SLOTS_PER_NODE (1 + 2 + 6 + 2)
+
+/* XXX if each node is guaranteed to have some memory */
+
+#define MAX_PCI_DEVS 8
+
+/* lboard_t->brd_flags fields */
+/* All bits in this field are currently used. Try the pad fields if
+ you need more flag bits */
+
+#define ENABLE_BOARD 0x01
+#define FAILED_BOARD 0x02
+#define DUPLICATE_BOARD 0x04 /* Boards like midplanes/routers which
+ are discovered twice. Use one of them */
+#define VISITED_BOARD 0x08 /* Used for compact hub numbering. */
+#define LOCAL_MASTER_IO6 0x10 /* master io6 for that node */
+#define GLOBAL_MASTER_IO6 0x20
+#define THIRD_NIC_PRESENT 0x40 /* for future use */
+#define SECOND_NIC_PRESENT 0x80 /* addons like MIO are present */
+
+/* klinfo->flags fields */
+
+#define KLINFO_ENABLE 0x01 /* This component is enabled */
+#define KLINFO_FAILED 0x02 /* This component failed */
+#define KLINFO_DEVICE 0x04 /* This component is a device */
+#define KLINFO_VISITED 0x08 /* This component has been visited */
+#define KLINFO_CONTROLLER 0x10 /* This component is a device controller */
+#define KLINFO_INSTALL 0x20 /* Install a driver */
+#define KLINFO_HEADLESS 0x40 /* Headless (or hubless) component */
+#define IS_CONSOLE_IOC3(i) ((((klinfo_t *)i)->flags) & KLINFO_INSTALL)
+
+#define GB2 0x80000000
+
+#define MAX_RSV_PTRS 32
+
+/* Structures to manage various data storage areas */
+/* The numbers must be contiguous since the array index i
+ is used in the code to allocate various areas.
+*/
+
+#define BOARD_STRUCT 0
+#define COMPONENT_STRUCT 1
+#define ERRINFO_STRUCT 2
+#define KLMALLOC_TYPE_MAX (ERRINFO_STRUCT + 1)
+#define DEVICE_STRUCT 3
+
+
+typedef struct console_s {
+ unsigned long uart_base;
+ unsigned long config_base;
+ unsigned long memory_base;
+ short baud;
+ short flag;
+ int type;
+ nasid_t nasid;
+ char wid;
+ char npci;
+ nic_t baseio_nic;
+} console_t;
+
+typedef struct klc_malloc_hdr {
+ klconf_off_t km_base;
+ klconf_off_t km_limit;
+ klconf_off_t km_current;
+} klc_malloc_hdr_t;
+
+/* Functions/macros needed to use this structure */
+
+typedef struct kl_config_hdr {
+ u64 ch_magic; /* set this to KLCFGINFO_MAGIC */
+ u32 ch_version; /* structure version number */
+ klconf_off_t ch_malloc_hdr_off; /* offset of ch_malloc_hdr */
+ klconf_off_t ch_cons_off; /* offset of ch_cons */
+ klconf_off_t ch_board_info; /* the link list of boards */
+ console_t ch_cons_info; /* address info of the console */
+ klc_malloc_hdr_t ch_malloc_hdr[KLMALLOC_TYPE_MAX];
+ confidence_t ch_sw_belief; /* confidence that software is bad*/
+ confidence_t ch_sn0net_belief; /* confidence that sn0net is bad */
+} kl_config_hdr_t;
+
+
+#define KL_CONFIG_HDR(_nasid) ((kl_config_hdr_t *)(KLCONFIG_ADDR(_nasid)))
+#define KL_CONFIG_INFO_OFFSET(_nasid) \
+ (KL_CONFIG_HDR(_nasid)->ch_board_info)
+#define KL_CONFIG_INFO_SET_OFFSET(_nasid, _off) \
+ (KL_CONFIG_HDR(_nasid)->ch_board_info = (_off))
+
+#define KL_CONFIG_INFO(_nasid) \
+ (lboard_t *)((KL_CONFIG_HDR(_nasid)->ch_board_info) ? \
+ NODE_OFFSET_TO_K1((_nasid), KL_CONFIG_HDR(_nasid)->ch_board_info) : \
+ 0)
+#define KL_CONFIG_MAGIC(_nasid) (KL_CONFIG_HDR(_nasid)->ch_magic)
+
+#define KL_CONFIG_CHECK_MAGIC(_nasid) \
+ (KL_CONFIG_HDR(_nasid)->ch_magic == KLCFGINFO_MAGIC)
+
+#define KL_CONFIG_HDR_INIT_MAGIC(_nasid) \
+ (KL_CONFIG_HDR(_nasid)->ch_magic = KLCFGINFO_MAGIC)
+
+/* --- New Macros for the changed kl_config_hdr_t structure --- */
+
+#define PTR_CH_MALLOC_HDR(_k) ((klc_malloc_hdr_t *)\
+ ((unsigned long)_k + (_k->ch_malloc_hdr_off)))
+
+#define KL_CONFIG_CH_MALLOC_HDR(_n) PTR_CH_MALLOC_HDR(KL_CONFIG_HDR(_n))
+
+#define PTR_CH_CONS_INFO(_k) ((console_t *)\
+ ((unsigned long)_k + (_k->ch_cons_off)))
+
+#define KL_CONFIG_CH_CONS_INFO(_n) PTR_CH_CONS_INFO(KL_CONFIG_HDR(_n))
+
+/* ------------------------------------------------------------- */
+
+#define KL_CONFIG_INFO_START(_nasid) \
+ (klconf_off_t)(KLCONFIG_OFFSET(_nasid) + sizeof(kl_config_hdr_t))
+
+#define KL_CONFIG_BOARD_NASID(_brd) ((_brd)->brd_nasid)
+#define KL_CONFIG_BOARD_SET_NEXT(_brd, _off) ((_brd)->brd_next = (_off))
+
+#define KL_CONFIG_DUPLICATE_BOARD(_brd) ((_brd)->brd_flags & DUPLICATE_BOARD)
+
+#define XBOW_PORT_TYPE_HUB(_xbowp, _link) \
+ ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_HUB)
+#define XBOW_PORT_TYPE_IO(_xbowp, _link) \
+ ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_IO)
+
+#define XBOW_PORT_IS_ENABLED(_xbowp, _link) \
+ ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_ENABLE)
+#define XBOW_PORT_NASID(_xbowp, _link) \
+ ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_nasid)
+
+#define XBOW_PORT_IO 0x1
+#define XBOW_PORT_HUB 0x2
+#define XBOW_PORT_ENABLE 0x4
+
+#define SN0_PORT_FENCE_SHFT 0
+#define SN0_PORT_FENCE_MASK (1 << SN0_PORT_FENCE_SHFT)
+
+/*
+ * The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD
+ * can be either 'LOCAL' or 'REMOTE'. LOCAL means it is attached to
+ * the LOCAL/current NODE. REMOTE means it is attached to a different
+ * node.(TBD - Need a way to treat ROUTER boards.)
+ *
+ * There are 2 different structures to represent these boards -
+ * lboard - Local board, rboard - remote board. These 2 structures
+ * can be arbitrarily mixed in the LINKED LIST of BOARDs. (Refer
+ * Figure below). The first byte of the rboard or lboard structure
+ * is used to find out its type - no unions are used.
+ * If it is a lboard, then the config info of this board will be found
+ * on the local node. (LOCAL NODE BASE + offset value gives pointer to
+ * the structure.
+ * If it is a rboard, the local structure contains the node number
+ * and the offset of the beginning of the LINKED LIST on the remote node.
+ * The details of the hardware on a remote node can be built locally,
+ * if required, by reading the LINKED LIST on the remote node and
+ * ignoring all the rboards on that node.
+ *
+ * The local node uses the REMOTE NODE NUMBER + OFFSET to point to the
+ * First board info on the remote node. The remote node list is
+ * traversed as the local list, using the REMOTE BASE ADDRESS and not
+ * the local base address and ignoring all rboard values.
+ *
+ *
+ KLCONFIG
+
+ +------------+ +------------+ +------------+ +------------+
+ | lboard | +-->| lboard | +-->| rboard | +-->| lboard |
+ +------------+ | +------------+ | +------------+ | +------------+
+ | board info | | | board info | | |errinfo,bptr| | | board info |
+ +------------+ | +------------+ | +------------+ | +------------+
+ | offset |--+ | offset |--+ | offset |--+ |offset=NULL |
+ +------------+ +------------+ +------------+ +------------+
+
+
+ +------------+
+ | board info |
+ +------------+ +--------------------------------+
+ | compt 1 |------>| type, rev, diaginfo, size ... | (CPU)
+ +------------+ +--------------------------------+
+ | compt 2 |--+
+ +------------+ | +--------------------------------+
+ | ... | +--->| type, rev, diaginfo, size ... | (MEM_BANK)
+ +------------+ +--------------------------------+
+ | errinfo |--+
+ +------------+ | +--------------------------------+
+ +--->|r/l brd errinfo,compt err flags |
+ +--------------------------------+
+
+ *
+ * Each BOARD consists of COMPONENTs and the BOARD structure has
+ * pointers (offsets) to its COMPONENT structure.
+ * The COMPONENT structure has version info, size and speed info, revision,
+ * error info and the NIC info. This structure can accommodate any
+ * BOARD with arbitrary COMPONENT composition.
+ *
+ * The ERRORINFO part of each BOARD has error information
+ * that describes errors about the BOARD itself. It also has flags to
+ * indicate the COMPONENT(s) on the board that have errors. The error
+ * information specific to the COMPONENT is present in the respective
+ * COMPONENT structure.
+ *
+ * The ERRORINFO structure is also treated like a COMPONENT, ie. the
+ * BOARD has pointers(offset) to the ERRORINFO structure. The rboard
+ * structure also has a pointer to the ERRORINFO structure. This is
+ * the place to store ERRORINFO about a REMOTE NODE, if the HUB on
+ * that NODE is not working or if the REMOTE MEMORY is BAD. In cases where
+ * only the CPU of the REMOTE NODE is disabled, the ERRORINFO pointer can
+ * be a NODE NUMBER, REMOTE OFFSET combination, pointing to error info
+ * which is present on the REMOTE NODE.(TBD)
+ * REMOTE ERRINFO can be stored on any of the nearest nodes
+ * or on all the nearest nodes.(TBD)
+ * Like BOARD structures, REMOTE ERRINFO structures can be built locally
+ * using the rboard errinfo pointer.
+ *
+ * In order to get useful information from this Data organization, a set of
+ * interface routines are provided (TBD). The important thing to remember while
+ * manipulating the structures, is that, the NODE number information should
+ * be used. If the NODE is non-zero (remote) then each offset should
+ * be added to the REMOTE BASE ADDR else it should be added to the LOCAL BASE ADDR.
+ * This includes offsets for BOARDS, COMPONENTS and ERRORINFO.
+ *
+ * Note that these structures do not provide much info about connectivity.
+ * That info will be part of HWGRAPH, which is an extension of the cfg_t
+ * data structure. (ref IP27prom/cfg.h) It has to be extended to include
+ * the IO part of the Network(TBD).
+ *
+ * The data structures below define the above concepts.
+ */
+
+/*
+ * Values for CPU types
+ */
+#define KL_CPU_R4000 0x1 /* Standard R4000 */
+#define KL_CPU_TFP 0x2 /* TFP processor */
+#define KL_CPU_R10000 0x3 /* R10000 (T5) */
+#define KL_CPU_NONE (-1) /* no cpu present in slot */
+
+/*
+ * IP27 BOARD classes
+ */
+
+#define KLCLASS_MASK 0xf0
+#define KLCLASS_NONE 0x00
+#define KLCLASS_NODE 0x10 /* CPU, Memory and HUB board */
+#define KLCLASS_CPU KLCLASS_NODE
+#define KLCLASS_IO 0x20 /* BaseIO, 4 ch SCSI, ethernet, FDDI
+ and the non-graphics widget boards */
+#define KLCLASS_ROUTER 0x30 /* Router board */
+#define KLCLASS_MIDPLANE 0x40 /* We need to treat this as a board
+ so that we can record error info */
+#define KLCLASS_GFX 0x50 /* graphics boards */
+
+#define KLCLASS_PSEUDO_GFX 0x60 /* HDTV type cards that use a gfx
+ * hw ifc to xtalk and are not gfx
+ * class for sw purposes */
+
+#define KLCLASS_MAX 7 /* Bump this if a new CLASS is added */
+#define KLTYPE_MAX 10 /* Bump this if a new CLASS is added */
+
+#define KLCLASS_UNKNOWN 0xf0
+
+#define KLCLASS(_x) ((_x) & KLCLASS_MASK)
+
+/*
+ * IP27 board types
+ */
+
+#define KLTYPE_MASK 0x0f
+#define KLTYPE_NONE 0x00
+#define KLTYPE_EMPTY 0x00
+
+#define KLTYPE_WEIRDCPU (KLCLASS_CPU | 0x0)
+#define KLTYPE_IP27 (KLCLASS_CPU | 0x1) /* 2 CPUs(R10K) per board */
+
+#define KLTYPE_WEIRDIO (KLCLASS_IO | 0x0)
+#define KLTYPE_BASEIO (KLCLASS_IO | 0x1) /* IOC3, SuperIO, Bridge, SCSI */
+#define KLTYPE_IO6 KLTYPE_BASEIO /* Additional name */
+#define KLTYPE_4CHSCSI (KLCLASS_IO | 0x2)
+#define KLTYPE_MSCSI KLTYPE_4CHSCSI /* Additional name */
+#define KLTYPE_ETHERNET (KLCLASS_IO | 0x3)
+#define KLTYPE_MENET KLTYPE_ETHERNET /* Additional name */
+#define KLTYPE_FDDI (KLCLASS_IO | 0x4)
+#define KLTYPE_UNUSED (KLCLASS_IO | 0x5) /* XXX UNUSED */
+#define KLTYPE_HAROLD (KLCLASS_IO | 0x6) /* PCI SHOE BOX */
+#define KLTYPE_PCI KLTYPE_HAROLD
+#define KLTYPE_VME (KLCLASS_IO | 0x7) /* Any 3rd party VME card */
+#define KLTYPE_MIO (KLCLASS_IO | 0x8)
+#define KLTYPE_FC (KLCLASS_IO | 0x9)
+#define KLTYPE_LINC (KLCLASS_IO | 0xA)
+#define KLTYPE_TPU (KLCLASS_IO | 0xB) /* Tensor Processing Unit */
+#define KLTYPE_GSN_A (KLCLASS_IO | 0xC) /* Main GSN board */
+#define KLTYPE_GSN_B (KLCLASS_IO | 0xD) /* Auxiliary GSN board */
+
+#define KLTYPE_GFX (KLCLASS_GFX | 0x0) /* unknown graphics type */
+#define KLTYPE_GFX_KONA (KLCLASS_GFX | 0x1) /* KONA graphics on IP27 */
+#define KLTYPE_GFX_MGRA (KLCLASS_GFX | 0x3) /* MGRAS graphics on IP27 */
+
+#define KLTYPE_WEIRDROUTER (KLCLASS_ROUTER | 0x0)
+#define KLTYPE_ROUTER (KLCLASS_ROUTER | 0x1)
+#define KLTYPE_ROUTER2 KLTYPE_ROUTER /* Obsolete! */
+#define KLTYPE_NULL_ROUTER (KLCLASS_ROUTER | 0x2)
+#define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3)
+
+#define KLTYPE_WEIRDMIDPLANE (KLCLASS_MIDPLANE | 0x0)
+#define KLTYPE_MIDPLANE8 (KLCLASS_MIDPLANE | 0x1) /* 8 slot backplane */
+#define KLTYPE_MIDPLANE KLTYPE_MIDPLANE8
+#define KLTYPE_PBRICK_XBOW (KLCLASS_MIDPLANE | 0x2)
+
+#define KLTYPE_IOBRICK (KLCLASS_IOBRICK | 0x0)
+#define KLTYPE_IBRICK (KLCLASS_IOBRICK | 0x1)
+#define KLTYPE_PBRICK (KLCLASS_IOBRICK | 0x2)
+#define KLTYPE_XBRICK (KLCLASS_IOBRICK | 0x3)
+
+#define KLTYPE_PBRICK_BRIDGE KLTYPE_PBRICK
+
+/* The value of type should be more than 8 so that hinv prints
+ * out the board name from the NIC string. For values less than
+ * 8 the name of the board needs to be hard coded in a few places.
+ * When bringup started nic names had not standardized and so we
+ * had to hard code. (For people interested in history.)
+ */
+#define KLTYPE_XTHD (KLCLASS_PSEUDO_GFX | 0x9)
+
+#define KLTYPE_UNKNOWN (KLCLASS_UNKNOWN | 0xf)
+
+#define KLTYPE(_x) ((_x) & KLTYPE_MASK)
+#define IS_MIO_PRESENT(l) ((l->brd_type == KLTYPE_BASEIO) && \
+ (l->brd_flags & SECOND_NIC_PRESENT))
+#define IS_MIO_IOC3(l, n) (IS_MIO_PRESENT(l) && (n > 2))
+
+/*
+ * board structures
+ */
+
+#define MAX_COMPTS_PER_BRD 24
+
+#define LOCAL_BOARD 1
+#define REMOTE_BOARD 2
+
+#define LBOARD_STRUCT_VERSION 2
+
+typedef struct lboard_s {
+ klconf_off_t brd_next; /* Next BOARD */
+ unsigned char struct_type; /* type of structure, local or remote */
+ unsigned char brd_type; /* type+class */
+ unsigned char brd_sversion; /* version of this structure */
+ unsigned char brd_brevision; /* board revision */
+ unsigned char brd_promver; /* board prom version, if any */
+ unsigned char brd_flags; /* Enabled, Disabled etc */
+ unsigned char brd_slot; /* slot number */
+ unsigned short brd_debugsw; /* Debug switches */
+ moduleid_t brd_module; /* module to which it belongs */
+ partid_t brd_partition; /* Partition number */
+ unsigned short brd_diagval; /* diagnostic value */
+ unsigned short brd_diagparm; /* diagnostic parameter */
+ unsigned char brd_inventory; /* inventory history */
+ unsigned char brd_numcompts; /* Number of components */
+ nic_t brd_nic; /* Number in CAN */
+ nasid_t brd_nasid; /* passed parameter */
+ klconf_off_t brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
+ klconf_off_t brd_errinfo; /* Board's error information */
+ struct lboard_s *brd_parent; /* Logical parent for this brd */
+ vertex_hdl_t brd_graph_link; /* vertex hdl to connect extern compts */
+ confidence_t brd_confidence; /* confidence that the board is bad */
+ nasid_t brd_owner; /* who owns this board */
+ unsigned char brd_nic_flags; /* To handle 8 more NICs */
+ char brd_name[32];
+} lboard_t;
+
+
+/*
+ * Make sure we pass back the calias space address for local boards.
+ * klconfig board traversal and error structure extraction defines.
+ */
+
+#define BOARD_SLOT(_brd) ((_brd)->brd_slot)
+
+#define KLCF_CLASS(_brd) KLCLASS((_brd)->brd_type)
+#define KLCF_TYPE(_brd) KLTYPE((_brd)->brd_type)
+#define KLCF_REMOTE(_brd) (((_brd)->struct_type & LOCAL_BOARD) ? 0 : 1)
+#define KLCF_NUM_COMPS(_brd) ((_brd)->brd_numcompts)
+#define KLCF_MODULE_ID(_brd) ((_brd)->brd_module)
+
+#define KLCF_NEXT(_brd) \
+ ((_brd)->brd_next ? \
+ (lboard_t *)(NODE_OFFSET_TO_K1(NASID_GET(_brd), (_brd)->brd_next)):\
+ NULL)
+#define KLCF_COMP(_brd, _ndx) \
+ (klinfo_t *)(NODE_OFFSET_TO_K1(NASID_GET(_brd), \
+ (_brd)->brd_compts[(_ndx)]))
+
+#define KLCF_COMP_ERROR(_brd, _comp) \
+ (NODE_OFFSET_TO_K1(NASID_GET(_brd), (_comp)->errinfo))
+
+#define KLCF_COMP_TYPE(_comp) ((_comp)->struct_type)
+#define KLCF_BRIDGE_W_ID(_comp) ((_comp)->physid) /* Widget ID */
+
+
+
+/*
+ * Generic info structure. This stores common info about a
+ * component.
+ */
+
+typedef struct klinfo_s { /* Generic info */
+ unsigned char struct_type; /* type of this structure */
+ unsigned char struct_version; /* version of this structure */
+ unsigned char flags; /* Enabled, disabled etc */
+ unsigned char revision; /* component revision */
+ unsigned short diagval; /* result of diagnostics */
+ unsigned short diagparm; /* diagnostic parameter */
+ unsigned char inventory; /* previous inventory status */
+ nic_t nic; /* MUst be aligned properly */
+ unsigned char physid; /* physical id of component */
+ unsigned int virtid; /* virtual id as seen by system */
+ unsigned char widid; /* Widget id - if applicable */
+ nasid_t nasid; /* node number - from parent */
+ char pad1; /* pad out structure. */
+ char pad2; /* pad out structure. */
+ COMPONENT *arcs_compt; /* ptr to the arcs struct for ease*/
+ klconf_off_t errinfo; /* component specific errors */
+ unsigned short pad3; /* pci fields have moved over to */
+ unsigned short pad4; /* klbri_t */
+} klinfo_t ;
+
+#define KLCONFIG_INFO_ENABLED(_i) ((_i)->flags & KLINFO_ENABLE)
+/*
+ * Component structures.
+ * Following are the currently identified components:
+ * CPU, HUB, MEM_BANK,
+ * XBOW(consists of 16 WIDGETs, each of which can be HUB or GRAPHICS or BRIDGE)
+ * BRIDGE, IOC3, SuperIO, SCSI, FDDI
+ * ROUTER
+ * GRAPHICS
+ */
+#define KLSTRUCT_UNKNOWN 0
+#define KLSTRUCT_CPU 1
+#define KLSTRUCT_HUB 2
+#define KLSTRUCT_MEMBNK 3
+#define KLSTRUCT_XBOW 4
+#define KLSTRUCT_BRI 5
+#define KLSTRUCT_IOC3 6
+#define KLSTRUCT_PCI 7
+#define KLSTRUCT_VME 8
+#define KLSTRUCT_ROU 9
+#define KLSTRUCT_GFX 10
+#define KLSTRUCT_SCSI 11
+#define KLSTRUCT_FDDI 12
+#define KLSTRUCT_MIO 13
+#define KLSTRUCT_DISK 14
+#define KLSTRUCT_TAPE 15
+#define KLSTRUCT_CDROM 16
+#define KLSTRUCT_HUB_UART 17
+#define KLSTRUCT_IOC3ENET 18
+#define KLSTRUCT_IOC3UART 19
+#define KLSTRUCT_UNUSED 20 /* XXX UNUSED */
+#define KLSTRUCT_IOC3PCKM 21
+#define KLSTRUCT_RAD 22
+#define KLSTRUCT_HUB_TTY 23
+#define KLSTRUCT_IOC3_TTY 24
+
+/* Early Access IO proms are compatible
+ only with KLSTRUCT values up to 24. */
+
+#define KLSTRUCT_FIBERCHANNEL 25
+#define KLSTRUCT_MOD_SERIAL_NUM 26
+#define KLSTRUCT_IOC3MS 27
+#define KLSTRUCT_TPU 28
+#define KLSTRUCT_GSN_A 29
+#define KLSTRUCT_GSN_B 30
+#define KLSTRUCT_XTHD 31
+
+/*
+ * These are the indices of various components within a lboard structure.
+ */
+
+#define IP27_CPU0_INDEX 0
+#define IP27_CPU1_INDEX 1
+#define IP27_HUB_INDEX 2
+#define IP27_MEM_INDEX 3
+
+#define BASEIO_BRIDGE_INDEX 0
+#define BASEIO_IOC3_INDEX 1
+#define BASEIO_SCSI1_INDEX 2
+#define BASEIO_SCSI2_INDEX 3
+
+#define MIDPLANE_XBOW_INDEX 0
+#define ROUTER_COMPONENT_INDEX 0
+
+#define CH4SCSI_BRIDGE_INDEX 0
+
+/* Info holders for various hardware components */
+
+typedef u64 *pci_t;
+typedef u64 *vmeb_t;
+typedef u64 *vmed_t;
+typedef u64 *fddi_t;
+typedef u64 *scsi_t;
+typedef u64 *mio_t;
+typedef u64 *graphics_t;
+typedef u64 *router_t;
+
+/*
+ * The port info in ip27_cfg area translates to a lboart_t in the
+ * KLCONFIG area. But since KLCONFIG does not use pointers, lboart_t
+ * is stored in terms of a nasid and a offset from start of KLCONFIG
+ * area on that nasid.
+ */
+typedef struct klport_s {
+ nasid_t port_nasid;
+ unsigned char port_flag;
+ klconf_off_t port_offset;
+} klport_t;
+
+typedef struct klcpu_s { /* CPU */
+ klinfo_t cpu_info;
+ unsigned short cpu_prid; /* Processor PRID value */
+ unsigned short cpu_fpirr; /* FPU IRR value */
+ unsigned short cpu_speed; /* Speed in MHZ */
+ unsigned short cpu_scachesz; /* secondary cache size in MB */
+ unsigned short cpu_scachespeed;/* secondary cache speed in MHz */
+} klcpu_t ;
+
+#define CPU_STRUCT_VERSION 2
+
+typedef struct klhub_s { /* HUB */
+ klinfo_t hub_info;
+ unsigned int hub_flags; /* PCFG_HUB_xxx flags */
+ klport_t hub_port; /* hub is connected to this */
+ nic_t hub_box_nic; /* nic of containing box */
+ klconf_off_t hub_mfg_nic; /* MFG NIC string */
+ u64 hub_speed; /* Speed of hub in HZ */
+} klhub_t ;
+
+typedef struct klhub_uart_s { /* HUB */
+ klinfo_t hubuart_info;
+ unsigned int hubuart_flags; /* PCFG_HUB_xxx flags */
+ nic_t hubuart_box_nic; /* nic of containing box */
+} klhub_uart_t ;
+
+#define MEMORY_STRUCT_VERSION 2
+
+typedef struct klmembnk_s { /* MEMORY BANK */
+ klinfo_t membnk_info;
+ short membnk_memsz; /* Total memory in megabytes */
+ short membnk_dimm_select; /* bank to physical addr mapping*/
+ short membnk_bnksz[MD_MEM_BANKS]; /* Memory bank sizes */
+ short membnk_attr;
+} klmembnk_t ;
+
+#define KLCONFIG_MEMBNK_SIZE(_info, _bank) \
+ ((_info)->membnk_bnksz[(_bank)])
+
+
+#define MEMBNK_PREMIUM 1
+#define KLCONFIG_MEMBNK_PREMIUM(_info, _bank) \
+ ((_info)->membnk_attr & (MEMBNK_PREMIUM << (_bank)))
+
+#define MAX_SERIAL_NUM_SIZE 10
+
+typedef struct klmod_serial_num_s {
+ klinfo_t snum_info;
+ union {
+ char snum_str[MAX_SERIAL_NUM_SIZE];
+ unsigned long long snum_int;
+ } snum;
+} klmod_serial_num_t;
+
+/* Macros needed to access serial number structure in lboard_t.
+ Hard coded values are necessary since we cannot treat
+ serial number struct as a component without losing compatibility
+ between prom versions. */
+
+#define GET_SNUM_COMP(_l) ((klmod_serial_num_t *)\
+ KLCF_COMP(_l, _l->brd_numcompts))
+
+#define MAX_XBOW_LINKS 16
+
+typedef struct klxbow_s { /* XBOW */
+ klinfo_t xbow_info ;
+ klport_t xbow_port_info[MAX_XBOW_LINKS] ; /* Module number */
+ int xbow_master_hub_link;
+ /* type of brd connected+component struct ptr+flags */
+} klxbow_t ;
+
+#define MAX_PCI_SLOTS 8
+
+typedef struct klpci_device_s {
+ s32 pci_device_id; /* 32 bits of vendor/device ID. */
+ s32 pci_device_pad; /* 32 bits of padding. */
+} klpci_device_t;
+
+#define BRIDGE_STRUCT_VERSION 2
+
+typedef struct klbri_s { /* BRIDGE */
+ klinfo_t bri_info ;
+ unsigned char bri_eprominfo ; /* IO6prom connected to bridge */
+ unsigned char bri_bustype ; /* PCI/VME BUS bridge/GIO */
+ pci_t pci_specific ; /* PCI Board config info */
+ klpci_device_t bri_devices[MAX_PCI_DEVS] ; /* PCI IDs */
+ klconf_off_t bri_mfg_nic ;
+} klbri_t ;
+
+#define MAX_IOC3_TTY 2
+
+typedef struct klioc3_s { /* IOC3 */
+ klinfo_t ioc3_info ;
+ unsigned char ioc3_ssram ; /* Info about ssram */
+ unsigned char ioc3_nvram ; /* Info about nvram */
+ klinfo_t ioc3_superio ; /* Info about superio */
+ klconf_off_t ioc3_tty_off ;
+ klinfo_t ioc3_enet ;
+ klconf_off_t ioc3_enet_off ;
+ klconf_off_t ioc3_kbd_off ;
+} klioc3_t ;
+
+#define MAX_VME_SLOTS 8
+
+typedef struct klvmeb_s { /* VME BRIDGE - PCI CTLR */
+ klinfo_t vmeb_info ;
+ vmeb_t vmeb_specific ;
+ klconf_off_t vmeb_brdinfo[MAX_VME_SLOTS] ; /* VME Board config info */
+} klvmeb_t ;
+
+typedef struct klvmed_s { /* VME DEVICE - VME BOARD */
+ klinfo_t vmed_info ;
+ vmed_t vmed_specific ;
+ klconf_off_t vmed_brdinfo[MAX_VME_SLOTS] ; /* VME Board config info */
+} klvmed_t ;
+
+#define ROUTER_VECTOR_VERS 2
+
+/* XXX - Don't we need the number of ports here?!? */
+typedef struct klrou_s { /* ROUTER */
+ klinfo_t rou_info ;
+ unsigned int rou_flags ; /* PCFG_ROUTER_xxx flags */
+ nic_t rou_box_nic ; /* nic of the containing module */
+ klport_t rou_port[MAX_ROUTER_PORTS + 1] ; /* array index 1 to 6 */
+ klconf_off_t rou_mfg_nic ; /* MFG NIC string */
+ u64 rou_vector; /* vector from master node */
+} klrou_t ;
+
+/*
+ * Graphics Controller/Device
+ *
+ * (IP27/IO6) Prom versions 6.13 (and 6.5.1 kernels) and earlier
+ * used a couple different structures to store graphics information.
+ * For compatibility reasons, the newer data structure preserves some
+ * of the layout so that fields that are used in the old versions remain
+ * in the same place (with the same info). Determination of what version
+ * of this structure we have is done by checking the cookie field.
+ */
+#define KLGFX_COOKIE 0x0c0de000
+
+typedef struct klgfx_s { /* GRAPHICS Device */
+ klinfo_t gfx_info;
+ klconf_off_t old_gndevs; /* for compatibility with older proms */
+ klconf_off_t old_gdoff0; /* for compatibility with older proms */
+ unsigned int cookie; /* for compatibility with older proms */
+ unsigned int moduleslot;
+ struct klgfx_s *gfx_next_pipe;
+ graphics_t gfx_specific;
+ klconf_off_t pad0; /* for compatibility with older proms */
+ klconf_off_t gfx_mfg_nic;
+} klgfx_t;
+
+typedef struct klxthd_s {
+ klinfo_t xthd_info ;
+ klconf_off_t xthd_mfg_nic ; /* MFG NIC string */
+} klxthd_t ;
+
+typedef struct kltpu_s { /* TPU board */
+ klinfo_t tpu_info ;
+ klconf_off_t tpu_mfg_nic ; /* MFG NIC string */
+} kltpu_t ;
+
+typedef struct klgsn_s { /* GSN board */
+ klinfo_t gsn_info ;
+ klconf_off_t gsn_mfg_nic ; /* MFG NIC string */
+} klgsn_t ;
+
+#define MAX_SCSI_DEVS 16
+
+/*
+ * NOTE: THis is the max sized kl* structure and is used in klmalloc.c
+ * to allocate space of type COMPONENT. Make sure that if the size of
+ * any other component struct becomes more than this, then redefine
+ * that as the size to be klmalloced.
+ */
+
+typedef struct klscsi_s { /* SCSI Controller */
+ klinfo_t scsi_info ;
+ scsi_t scsi_specific ;
+ unsigned char scsi_numdevs ;
+ klconf_off_t scsi_devinfo[MAX_SCSI_DEVS] ;
+} klscsi_t ;
+
+typedef struct klscdev_s { /* SCSI device */
+ klinfo_t scdev_info ;
+ struct scsidisk_data *scdev_cfg ; /* driver fills up this */
+} klscdev_t ;
+
+typedef struct klttydev_s { /* TTY device */
+ klinfo_t ttydev_info ;
+ struct terminal_data *ttydev_cfg ; /* driver fills up this */
+} klttydev_t ;
+
+typedef struct klenetdev_s { /* ENET device */
+ klinfo_t enetdev_info ;
+ struct net_data *enetdev_cfg ; /* driver fills up this */
+} klenetdev_t ;
+
+typedef struct klkbddev_s { /* KBD device */
+ klinfo_t kbddev_info ;
+ struct keyboard_data *kbddev_cfg ; /* driver fills up this */
+} klkbddev_t ;
+
+typedef struct klmsdev_s { /* mouse device */
+ klinfo_t msdev_info ;
+ void *msdev_cfg ;
+} klmsdev_t ;
+
+#define MAX_FDDI_DEVS 10 /* XXX Is this true */
+
+typedef struct klfddi_s { /* FDDI */
+ klinfo_t fddi_info ;
+ fddi_t fddi_specific ;
+ klconf_off_t fddi_devinfo[MAX_FDDI_DEVS] ;
+} klfddi_t ;
+
+typedef struct klmio_s { /* MIO */
+ klinfo_t mio_info ;
+ mio_t mio_specific ;
+} klmio_t ;
+
+
+typedef union klcomp_s {
+ klcpu_t kc_cpu;
+ klhub_t kc_hub;
+ klmembnk_t kc_mem;
+ klxbow_t kc_xbow;
+ klbri_t kc_bri;
+ klioc3_t kc_ioc3;
+ klvmeb_t kc_vmeb;
+ klvmed_t kc_vmed;
+ klrou_t kc_rou;
+ klgfx_t kc_gfx;
+ klscsi_t kc_scsi;
+ klscdev_t kc_scsi_dev;
+ klfddi_t kc_fddi;
+ klmio_t kc_mio;
+ klmod_serial_num_t kc_snum ;
+} klcomp_t;
+
+typedef union kldev_s { /* for device structure allocation */
+ klscdev_t kc_scsi_dev ;
+ klttydev_t kc_tty_dev ;
+ klenetdev_t kc_enet_dev ;
+ klkbddev_t kc_kbd_dev ;
+} kldev_t ;
+
+/* Data structure interface routines. TBD */
+
+/* Include launch info in this file itself? TBD */
+
+/*
+ * TBD - Can the ARCS and device driver related info also be included in the
+ * KLCONFIG area. On the IO4PROM, prom device driver info is part of cfgnode_t
+ * structure, viz private to the IO4prom.
+ */
+
+/*
+ * TBD - Allocation issues.
+ *
+ * Do we need to Mark off sepatate heaps for lboard_t, rboard_t, component,
+ * errinfo and allocate from them, or have a single heap and allocate all
+ * structures from it. Debug is easier in the former method since we can
+ * dump all similar structs in one command, but there will be lots of holes,
+ * in memory and max limits are needed for number of structures.
+ * Another way to make it organized, is to have a union of all components
+ * and allocate a aligned chunk of memory greater than the biggest
+ * component.
+ */
+
+typedef union {
+ lboard_t *lbinfo ;
+} biptr_t ;
+
+
+#define BRI_PER_XBOW 6
+#define PCI_PER_BRI 8
+#define DEV_PER_PCI 16
+
+
+/* Virtual dipswitch values (starting from switch "7"): */
+
+#define VDS_NOGFX 0x8000 /* Don't enable gfx and autoboot */
+#define VDS_NOMP 0x100 /* Don't start slave processors */
+#define VDS_MANUMODE 0x80 /* Manufacturing mode */
+#define VDS_NOARB 0x40 /* No bootmaster arbitration */
+#define VDS_PODMODE 0x20 /* Go straight to POD mode */
+#define VDS_NO_DIAGS 0x10 /* Don't run any diags after BM arb */
+#define VDS_DEFAULTS 0x08 /* Use default environment values */
+#define VDS_NOMEMCLEAR 0x04 /* Don't run mem cfg code */
+#define VDS_2ND_IO4 0x02 /* Boot from the second IO4 */
+#define VDS_DEBUG_PROM 0x01 /* Print PROM debugging messages */
+
+/* external declarations of Linux kernel functions. */
+
+extern lboard_t *find_lboard(lboard_t *start, unsigned char type);
+extern klinfo_t *find_component(lboard_t *brd, klinfo_t *kli, unsigned char type);
+extern klinfo_t *find_first_component(lboard_t *brd, unsigned char type);
+extern lboard_t *find_lboard_class(lboard_t *start, unsigned char brd_class);
+
+#endif /* _ASM_SN_KLCONFIG_H */
diff --git a/arch/mips/include/asm/sn/kldir.h b/arch/mips/include/asm/sn/kldir.h
new file mode 100644
index 000000000..245f59bf3
--- /dev/null
+++ b/arch/mips/include/asm/sn/kldir.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_SN_KLDIR_H
+#define _ASM_SN_KLDIR_H
+
+#define KLDIR_MAGIC 0x434d5f53505f5357
+
+#define KLDIR_OFF_MAGIC 0x00
+#define KLDIR_OFF_OFFSET 0x08
+#define KLDIR_OFF_POINTER 0x10
+#define KLDIR_OFF_SIZE 0x18
+#define KLDIR_OFF_COUNT 0x20
+#define KLDIR_OFF_STRIDE 0x28
+
+#define KLDIR_ENT_SIZE 0x40
+#define KLDIR_MAX_ENTRIES (0x400 / 0x40)
+
+#ifndef __ASSEMBLY__
+typedef struct kldir_ent_s {
+ u64 magic; /* Indicates validity of entry */
+ off_t offset; /* Offset from start of node space */
+ unsigned long pointer; /* Pointer to area in some cases */
+ size_t size; /* Size in bytes */
+ u64 count; /* Repeat count if array, 1 if not */
+ size_t stride; /* Stride if array, 0 if not */
+ char rsvd[16]; /* Pad entry to 0x40 bytes */
+ /* NOTE: These 16 bytes are used in the Partition KLDIR
+ entry to store partition info. Refer to klpart.h for this. */
+} kldir_ent_t;
+#endif /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_SGI_IP27
+#include <asm/sn/sn0/kldir.h>
+#endif
+
+#endif /* _ASM_SN_KLDIR_H */
diff --git a/arch/mips/include/asm/sn/klkernvars.h b/arch/mips/include/asm/sn/klkernvars.h
new file mode 100644
index 000000000..ea6b21795
--- /dev/null
+++ b/arch/mips/include/asm/sn/klkernvars.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * File ported from IRIX to Linux by Kanoj Sarcar, 06/08/00.
+ * Copyright 2000 Silicon Graphics, Inc.
+ */
+#ifndef __ASM_SN_KLKERNVARS_H
+#define __ASM_SN_KLKERNVARS_H
+
+#define KV_MAGIC_OFFSET 0x0
+#define KV_RO_NASID_OFFSET 0x4
+#define KV_RW_NASID_OFFSET 0x6
+
+#define KV_MAGIC 0x5f4b565f
+
+#ifndef __ASSEMBLY__
+
+#include <asm/sn/types.h>
+
+typedef struct kern_vars_s {
+ int kv_magic;
+ nasid_t kv_ro_nasid;
+ nasid_t kv_rw_nasid;
+ unsigned long kv_ro_baseaddr;
+ unsigned long kv_rw_baseaddr;
+} kern_vars_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_SN_KLKERNVARS_H */
diff --git a/arch/mips/include/asm/sn/launch.h b/arch/mips/include/asm/sn/launch.h
new file mode 100644
index 000000000..04226d8d3
--- /dev/null
+++ b/arch/mips/include/asm/sn/launch.h
@@ -0,0 +1,106 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2000 by Colin Ngam
+ */
+#ifndef _ASM_SN_LAUNCH_H
+#define _ASM_SN_LAUNCH_H
+
+#include <asm/sn/types.h>
+#include <asm/sn/addrs.h>
+
+/*
+ * The launch data structure resides at a fixed place in each node's memory
+ * and is used to communicate between the master processor and the slave
+ * processors.
+ *
+ * The master stores launch parameters in the launch structure
+ * corresponding to a target processor that is in a slave loop, then sends
+ * an interrupt to the slave processor. The slave calls the desired
+ * function, then returns to the slave loop. The master may poll or wait
+ * for the slaves to finish.
+ *
+ * There is an array of launch structures, one per CPU on the node. One
+ * interrupt level is used per local CPU.
+ */
+
+#define LAUNCH_MAGIC 0xaddbead2addbead3
+#ifdef CONFIG_SGI_IP27
+#define LAUNCH_SIZEOF 0x100
+#define LAUNCH_PADSZ 0xa0
+#endif
+
+#define LAUNCH_OFF_MAGIC 0x00 /* Struct offsets for assembly */
+#define LAUNCH_OFF_BUSY 0x08
+#define LAUNCH_OFF_CALL 0x10
+#define LAUNCH_OFF_CALLC 0x18
+#define LAUNCH_OFF_CALLPARM 0x20
+#define LAUNCH_OFF_STACK 0x28
+#define LAUNCH_OFF_GP 0x30
+#define LAUNCH_OFF_BEVUTLB 0x38
+#define LAUNCH_OFF_BEVNORMAL 0x40
+#define LAUNCH_OFF_BEVECC 0x48
+
+#define LAUNCH_STATE_DONE 0 /* Return value of LAUNCH_POLL */
+#define LAUNCH_STATE_SENT 1
+#define LAUNCH_STATE_RECD 2
+
+/*
+ * The launch routine is called only if the complement address is correct.
+ *
+ * Before control is transferred to a routine, the complement address
+ * is zeroed (invalidated) to prevent an accidental call from a spurious
+ * interrupt.
+ *
+ * The slave_launch routine turns on the BUSY flag, and the slave loop
+ * clears the BUSY flag after control is returned to it.
+ */
+
+#ifndef __ASSEMBLY__
+
+typedef int launch_state_t;
+typedef void (*launch_proc_t)(u64 call_parm);
+
+typedef struct launch_s {
+ volatile u64 magic; /* Magic number */
+ volatile u64 busy; /* Slave currently active */
+ volatile launch_proc_t call_addr; /* Func. for slave to call */
+ volatile u64 call_addr_c; /* 1's complement of call_addr*/
+ volatile u64 call_parm; /* Single parm passed to call*/
+ volatile void *stack_addr; /* Stack pointer for slave function */
+ volatile void *gp_addr; /* Global pointer for slave func. */
+ volatile char *bevutlb;/* Address of bev utlb ex handler */
+ volatile char *bevnormal;/*Address of bev normal ex handler */
+ volatile char *bevecc;/* Address of bev cache err handler */
+ volatile char pad[160]; /* Pad to LAUNCH_SIZEOF */
+} launch_t;
+
+/*
+ * PROM entry points for launch routines are determined by IPxxprom/start.s
+ */
+
+#define LAUNCH_SLAVE (*(void (*)(int nasid, int cpu, \
+ launch_proc_t call_addr, \
+ u64 call_parm, \
+ void *stack_addr, \
+ void *gp_addr)) \
+ IP27PROM_LAUNCHSLAVE)
+
+#define LAUNCH_WAIT (*(void (*)(int nasid, int cpu, int timeout_msec)) \
+ IP27PROM_WAITSLAVE)
+
+#define LAUNCH_POLL (*(launch_state_t (*)(int nasid, int cpu)) \
+ IP27PROM_POLLSLAVE)
+
+#define LAUNCH_LOOP (*(void (*)(void)) \
+ IP27PROM_SLAVELOOP)
+
+#define LAUNCH_FLASH (*(void (*)(void)) \
+ IP27PROM_FLASHLEDS)
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_SN_LAUNCH_H */
diff --git a/arch/mips/include/asm/sn/mapped_kernel.h b/arch/mips/include/asm/sn/mapped_kernel.h
new file mode 100644
index 000000000..3f1049807
--- /dev/null
+++ b/arch/mips/include/asm/sn/mapped_kernel.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * File created by Kanoj Sarcar 06/06/00.
+ * Copyright 2000 Silicon Graphics, Inc.
+ */
+#ifndef __ASM_SN_MAPPED_KERNEL_H
+#define __ASM_SN_MAPPED_KERNEL_H
+
+#include <linux/mmzone.h>
+
+/*
+ * Note on how mapped kernels work: the text and data section is
+ * compiled at cksseg segment (LOADADDR = 0xc001c000), and the
+ * init/setup/data section gets a 16M virtual address bump in the
+ * ld.script file (so that tlblo0 and tlblo1 maps the sections).
+ * The vmlinux.64 section addresses are put in the xkseg range
+ * using the change-addresses makefile option. Use elfdump -of
+ * on IRIX to see where the sections go. The Origin loader loads
+ * the two sections contiguously in physical memory. The loader
+ * sets the entry point into kernel_entry using a xkphys address,
+ * but instead of using 0xa800000001160000, it uses the address
+ * 0xa800000000160000, which is where it physically loaded that
+ * code. So no jumps can be done before we have switched to using
+ * cksseg addresses.
+ */
+#include <asm/addrspace.h>
+
+#define REP_BASE CAC_BASE
+
+#ifdef CONFIG_MAPPED_KERNEL
+
+#define MAPPED_ADDR_RO_TO_PHYS(x) (x - REP_BASE)
+#define MAPPED_ADDR_RW_TO_PHYS(x) (x - REP_BASE - 16777216)
+
+#define MAPPED_KERN_RO_PHYSBASE(n) (hub_data(n)->kern_vars.kv_ro_baseaddr)
+#define MAPPED_KERN_RW_PHYSBASE(n) (hub_data(n)->kern_vars.kv_rw_baseaddr)
+
+#define MAPPED_KERN_RO_TO_PHYS(x) \
+ ((unsigned long)MAPPED_ADDR_RO_TO_PHYS(x) | \
+ MAPPED_KERN_RO_PHYSBASE(get_nasid()))
+#define MAPPED_KERN_RW_TO_PHYS(x) \
+ ((unsigned long)MAPPED_ADDR_RW_TO_PHYS(x) | \
+ MAPPED_KERN_RW_PHYSBASE(get_nasid()))
+
+#else /* CONFIG_MAPPED_KERNEL */
+
+#define MAPPED_KERN_RO_TO_PHYS(x) (x - REP_BASE)
+#define MAPPED_KERN_RW_TO_PHYS(x) (x - REP_BASE)
+
+#endif /* CONFIG_MAPPED_KERNEL */
+
+#define MAPPED_KERN_RO_TO_K0(x) PHYS_TO_K0(MAPPED_KERN_RO_TO_PHYS(x))
+#define MAPPED_KERN_RW_TO_K0(x) PHYS_TO_K0(MAPPED_KERN_RW_TO_PHYS(x))
+
+#endif /* __ASM_SN_MAPPED_KERNEL_H */
diff --git a/arch/mips/include/asm/sn/nmi.h b/arch/mips/include/asm/sn/nmi.h
new file mode 100644
index 000000000..12ac210f1
--- /dev/null
+++ b/arch/mips/include/asm/sn/nmi.h
@@ -0,0 +1,125 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from IRIX <sys/SN/nmi.h>, Revision 1.5.
+ *
+ * Copyright (C) 1992 - 1997 Silicon Graphics, Inc.
+ */
+#ifndef __ASM_SN_NMI_H
+#define __ASM_SN_NMI_H
+
+#include <asm/sn/addrs.h>
+
+/*
+ * The launch data structure resides at a fixed place in each node's memory
+ * and is used to communicate between the master processor and the slave
+ * processors.
+ *
+ * The master stores launch parameters in the launch structure
+ * corresponding to a target processor that is in a slave loop, then sends
+ * an interrupt to the slave processor. The slave calls the desired
+ * function, followed by an optional rendezvous function, then returns to
+ * the slave loop. The master does not wait for the slaves before
+ * returning.
+ *
+ * There is an array of launch structures, one per CPU on the node. One
+ * interrupt level is used per CPU.
+ */
+
+#define NMI_MAGIC 0x48414d4d455201
+#define NMI_SIZEOF 0x40
+
+#define NMI_OFF_MAGIC 0x00 /* Struct offsets for assembly */
+#define NMI_OFF_FLAGS 0x08
+#define NMI_OFF_CALL 0x10
+#define NMI_OFF_CALLC 0x18
+#define NMI_OFF_CALLPARM 0x20
+#define NMI_OFF_GMASTER 0x28
+
+/*
+ * The NMI routine is called only if the complement address is
+ * correct.
+ *
+ * Before control is transferred to a routine, the complement address
+ * is zeroed (invalidated) to prevent an accidental call from a spurious
+ * interrupt.
+ *
+ */
+
+#ifndef __ASSEMBLY__
+
+typedef struct nmi_s {
+ volatile unsigned long magic; /* Magic number */
+ volatile unsigned long flags; /* Combination of flags above */
+ volatile void *call_addr; /* Routine for slave to call */
+ volatile void *call_addr_c; /* 1's complement of address */
+ volatile void *call_parm; /* Single parm passed to call */
+ volatile unsigned long gmaster; /* Flag true only on global master*/
+} nmi_t;
+
+#endif /* !__ASSEMBLY__ */
+
+/* Following definitions are needed both in the prom & the kernel
+ * to identify the format of the nmi cpu register save area in the
+ * low memory on each node.
+ */
+#ifndef __ASSEMBLY__
+
+struct reg_struct {
+ unsigned long gpr[32];
+ unsigned long sr;
+ unsigned long cause;
+ unsigned long epc;
+ unsigned long badva;
+ unsigned long error_epc;
+ unsigned long cache_err;
+ unsigned long nmi_sr;
+};
+
+#endif /* !__ASSEMBLY__ */
+
+/* These are the assembly language offsets into the reg_struct structure */
+
+#define R0_OFF 0x0
+#define R1_OFF 0x8
+#define R2_OFF 0x10
+#define R3_OFF 0x18
+#define R4_OFF 0x20
+#define R5_OFF 0x28
+#define R6_OFF 0x30
+#define R7_OFF 0x38
+#define R8_OFF 0x40
+#define R9_OFF 0x48
+#define R10_OFF 0x50
+#define R11_OFF 0x58
+#define R12_OFF 0x60
+#define R13_OFF 0x68
+#define R14_OFF 0x70
+#define R15_OFF 0x78
+#define R16_OFF 0x80
+#define R17_OFF 0x88
+#define R18_OFF 0x90
+#define R19_OFF 0x98
+#define R20_OFF 0xa0
+#define R21_OFF 0xa8
+#define R22_OFF 0xb0
+#define R23_OFF 0xb8
+#define R24_OFF 0xc0
+#define R25_OFF 0xc8
+#define R26_OFF 0xd0
+#define R27_OFF 0xd8
+#define R28_OFF 0xe0
+#define R29_OFF 0xe8
+#define R30_OFF 0xf0
+#define R31_OFF 0xf8
+#define SR_OFF 0x100
+#define CAUSE_OFF 0x108
+#define EPC_OFF 0x110
+#define BADVA_OFF 0x118
+#define ERROR_EPC_OFF 0x120
+#define CACHE_ERR_OFF 0x128
+#define NMISR_OFF 0x130
+
+#endif /* __ASM_SN_NMI_H */
diff --git a/arch/mips/include/asm/sn/sn0/addrs.h b/arch/mips/include/asm/sn/sn0/addrs.h
new file mode 100644
index 000000000..f13df84ed
--- /dev/null
+++ b/arch/mips/include/asm/sn/sn0/addrs.h
@@ -0,0 +1,283 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from IRIX <sys/SN/SN0/addrs.h>, revision 1.126.
+ *
+ * Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_SN_SN0_ADDRS_H
+#define _ASM_SN_SN0_ADDRS_H
+
+
+/*
+ * SN0 (on a T5) Address map
+ *
+ * This file contains a set of definitions and macros which are used
+ * to reference into the major address spaces (CAC, HSPEC, IO, MSPEC,
+ * and UNCAC) used by the SN0 architecture. It also contains addresses
+ * for "major" statically locatable PROM/Kernel data structures, such as
+ * the partition table, the configuration data structure, etc.
+ * We make an implicit assumption that the processor using this file
+ * follows the R10K's provisions for specifying uncached attributes;
+ * should this change, the base registers may very well become processor-
+ * dependent.
+ *
+ * For more information on the address spaces, see the "Local Resources"
+ * chapter of the Hub specification.
+ *
+ * NOTE: This header file is included both by C and by assembler source
+ * files. Please bracket any language-dependent definitions
+ * appropriately.
+ */
+
+/*
+ * Some of the macros here need to be casted to appropriate types when used
+ * from C. They definitely must not be casted from assembly language so we
+ * use some new ANSI preprocessor stuff to paste these on where needed.
+ */
+
+/*
+ * The following couple of definitions will eventually need to be variables,
+ * since the amount of address space assigned to each node depends on
+ * whether the system is running in N-mode (more nodes with less memory)
+ * or M-mode (fewer nodes with more memory). We expect that it will
+ * be a while before we need to make this decision dynamically, though,
+ * so for now we just use defines bracketed by an ifdef.
+ */
+
+#ifdef CONFIG_SGI_SN_N_MODE
+
+#define NODE_SIZE_BITS 31
+#define BWIN_SIZE_BITS 28
+
+#define NASID_BITS 9
+#define NASID_BITMASK (0x1ffLL)
+#define NASID_SHFT 31
+#define NASID_META_BITS 5
+#define NASID_LOCAL_BITS 4
+
+#define BDDIR_UPPER_MASK (UINT64_CAST 0x7ffff << 10)
+#define BDECC_UPPER_MASK (UINT64_CAST 0x3ffffff << 3)
+
+#else /* !defined(CONFIG_SGI_SN_N_MODE), assume that M-mode is desired */
+
+#define NODE_SIZE_BITS 32
+#define BWIN_SIZE_BITS 29
+
+#define NASID_BITMASK (0xffLL)
+#define NASID_BITS 8
+#define NASID_SHFT 32
+#define NASID_META_BITS 4
+#define NASID_LOCAL_BITS 4
+
+#define BDDIR_UPPER_MASK (UINT64_CAST 0xfffff << 10)
+#define BDECC_UPPER_MASK (UINT64_CAST 0x7ffffff << 3)
+
+#endif /* !defined(CONFIG_SGI_SN_N_MODE) */
+
+#define NODE_ADDRSPACE_SIZE (UINT64_CAST 1 << NODE_SIZE_BITS)
+
+#define NASID_MASK (UINT64_CAST NASID_BITMASK << NASID_SHFT)
+#define NASID_GET(_pa) (int) ((UINT64_CAST (_pa) >> \
+ NASID_SHFT) & NASID_BITMASK)
+
+#if !defined(__ASSEMBLY__)
+
+#define NODE_SWIN_BASE(nasid, widget) \
+ ((widget == 0) ? NODE_BWIN_BASE((nasid), SWIN0_BIGWIN) \
+ : RAW_NODE_SWIN_BASE(nasid, widget))
+#else /* __ASSEMBLY__ */
+#define NODE_SWIN_BASE(nasid, widget) \
+ (NODE_IO_BASE(nasid) + (UINT64_CAST(widget) << SWIN_SIZE_BITS))
+#endif /* __ASSEMBLY__ */
+
+/*
+ * The following definitions pertain to the IO special address
+ * space. They define the location of the big and little windows
+ * of any given node.
+ */
+
+#define BWIN_INDEX_BITS 3
+#define BWIN_SIZE (UINT64_CAST 1 << BWIN_SIZE_BITS)
+#define BWIN_SIZEMASK (BWIN_SIZE - 1)
+#define BWIN_WIDGET_MASK 0x7
+#define NODE_BWIN_BASE0(nasid) (NODE_IO_BASE(nasid) + BWIN_SIZE)
+#define NODE_BWIN_BASE(nasid, bigwin) (NODE_BWIN_BASE0(nasid) + \
+ (UINT64_CAST(bigwin) << BWIN_SIZE_BITS))
+
+#define BWIN_WIDGETADDR(addr) ((addr) & BWIN_SIZEMASK)
+#define BWIN_WINDOWNUM(addr) (((addr) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
+/*
+ * Verify if addr belongs to large window address of node with "nasid"
+ *
+ *
+ * NOTE: "addr" is expected to be XKPHYS address, and NOT physical
+ * address
+ *
+ *
+ */
+
+#define NODE_BWIN_ADDR(nasid, addr) \
+ (((addr) >= NODE_BWIN_BASE0(nasid)) && \
+ ((addr) < (NODE_BWIN_BASE(nasid, HUB_NUM_BIG_WINDOW) + \
+ BWIN_SIZE)))
+
+/*
+ * The following define the major position-independent aliases used
+ * in SN0.
+ * CALIAS -- Varies in size, points to the first n bytes of memory
+ * on the reader's node.
+ */
+
+#define CALIAS_BASE CAC_BASE
+
+#define SN0_WIDGET_BASE(_nasid, _wid) (NODE_SWIN_BASE((_nasid), (_wid)))
+
+/* Turn on sable logging for the processors whose bits are set. */
+#define SABLE_LOG_TRIGGER(_map)
+
+#ifndef __ASSEMBLY__
+#define KERN_NMI_ADDR(nasid, slice) \
+ TO_NODE_UNCAC((nasid), IP27_NMI_KREGS_OFFSET + \
+ (IP27_NMI_KREGS_CPU_SIZE * (slice)))
+#endif /* !__ASSEMBLY__ */
+
+#ifdef PROM
+
+#define MISC_PROM_BASE PHYS_TO_K0(0x01300000)
+#define MISC_PROM_SIZE 0x200000
+
+#define DIAG_BASE PHYS_TO_K0(0x01500000)
+#define DIAG_SIZE 0x300000
+
+#define ROUTE_BASE PHYS_TO_K0(0x01800000)
+#define ROUTE_SIZE 0x200000
+
+#define IP27PROM_FLASH_HDR PHYS_TO_K0(0x01300000)
+#define IP27PROM_FLASH_DATA PHYS_TO_K0(0x01301000)
+#define IP27PROM_CORP_MAX 32
+#define IP27PROM_CORP PHYS_TO_K0(0x01800000)
+#define IP27PROM_CORP_SIZE 0x10000
+#define IP27PROM_CORP_STK PHYS_TO_K0(0x01810000)
+#define IP27PROM_CORP_STKSIZE 0x2000
+#define IP27PROM_DECOMP_BUF PHYS_TO_K0(0x01900000)
+#define IP27PROM_DECOMP_SIZE 0xfff00
+
+#define IP27PROM_BASE PHYS_TO_K0(0x01a00000)
+#define IP27PROM_BASE_MAPPED (UNCAC_BASE | 0x1fc00000)
+#define IP27PROM_SIZE_MAX 0x100000
+
+#define IP27PROM_PCFG PHYS_TO_K0(0x01b00000)
+#define IP27PROM_PCFG_SIZE 0xd0000
+#define IP27PROM_ERRDMP PHYS_TO_K1(0x01bd0000)
+#define IP27PROM_ERRDMP_SIZE 0xf000
+
+#define IP27PROM_INIT_START PHYS_TO_K1(0x01bd0000)
+#define IP27PROM_CONSOLE PHYS_TO_K1(0x01bdf000)
+#define IP27PROM_CONSOLE_SIZE 0x200
+#define IP27PROM_NETUART PHYS_TO_K1(0x01bdf200)
+#define IP27PROM_NETUART_SIZE 0x100
+#define IP27PROM_UNUSED1 PHYS_TO_K1(0x01bdf300)
+#define IP27PROM_UNUSED1_SIZE 0x500
+#define IP27PROM_ELSC_BASE_A PHYS_TO_K0(0x01bdf800)
+#define IP27PROM_ELSC_BASE_B PHYS_TO_K0(0x01bdfc00)
+#define IP27PROM_STACK_A PHYS_TO_K0(0x01be0000)
+#define IP27PROM_STACK_B PHYS_TO_K0(0x01bf0000)
+#define IP27PROM_STACK_SHFT 16
+#define IP27PROM_STACK_SIZE (1 << IP27PROM_STACK_SHFT)
+#define IP27PROM_INIT_END PHYS_TO_K0(0x01c00000)
+
+#define SLAVESTACK_BASE PHYS_TO_K0(0x01580000)
+#define SLAVESTACK_SIZE 0x40000
+
+#define ENETBUFS_BASE PHYS_TO_K0(0x01f80000)
+#define ENETBUFS_SIZE 0x20000
+
+#define IO6PROM_BASE PHYS_TO_K0(0x01c00000)
+#define IO6PROM_SIZE 0x400000
+#define IO6PROM_BASE_MAPPED (UNCAC_BASE | 0x11c00000)
+#define IO6DPROM_BASE PHYS_TO_K0(0x01c00000)
+#define IO6DPROM_SIZE 0x200000
+
+#define NODEBUGUNIX_ADDR PHYS_TO_K0(0x00019000)
+#define DEBUGUNIX_ADDR PHYS_TO_K0(0x00100000)
+
+#define IP27PROM_INT_LAUNCH 10 /* and 11 */
+#define IP27PROM_INT_NETUART 12 /* through 17 */
+
+#endif /* PROM */
+
+/*
+ * needed by symmon so it needs to be outside #if PROM
+ */
+#define IP27PROM_ELSC_SHFT 10
+#define IP27PROM_ELSC_SIZE (1 << IP27PROM_ELSC_SHFT)
+
+/*
+ * This address is used by IO6PROM to build MemoryDescriptors of
+ * free memory. This address is important since unix gets loaded
+ * at this address, and this memory has to be FREE if unix is to
+ * be loaded.
+ */
+
+#define FREEMEM_BASE PHYS_TO_K0(0x2000000)
+
+#define IO6PROM_STACK_SHFT 14 /* stack per cpu */
+#define IO6PROM_STACK_SIZE (1 << IO6PROM_STACK_SHFT)
+
+/*
+ * IP27 PROM vectors
+ */
+
+#define IP27PROM_ENTRY PHYS_TO_COMPATK1(0x1fc00000)
+#define IP27PROM_RESTART PHYS_TO_COMPATK1(0x1fc00008)
+#define IP27PROM_SLAVELOOP PHYS_TO_COMPATK1(0x1fc00010)
+#define IP27PROM_PODMODE PHYS_TO_COMPATK1(0x1fc00018)
+#define IP27PROM_IOC3UARTPOD PHYS_TO_COMPATK1(0x1fc00020)
+#define IP27PROM_FLASHLEDS PHYS_TO_COMPATK1(0x1fc00028)
+#define IP27PROM_REPOD PHYS_TO_COMPATK1(0x1fc00030)
+#define IP27PROM_LAUNCHSLAVE PHYS_TO_COMPATK1(0x1fc00038)
+#define IP27PROM_WAITSLAVE PHYS_TO_COMPATK1(0x1fc00040)
+#define IP27PROM_POLLSLAVE PHYS_TO_COMPATK1(0x1fc00048)
+
+#define KL_UART_BASE LOCAL_HUB_ADDR(MD_UREG0_0) /* base of UART regs */
+#define KL_UART_CMD LOCAL_HUB_ADDR(MD_UREG0_0) /* UART command reg */
+#define KL_UART_DATA LOCAL_HUB_ADDR(MD_UREG0_1) /* UART data reg */
+#define KL_I2C_REG MD_UREG0_0 /* I2C reg */
+
+#ifndef __ASSEMBLY__
+
+/* Address 0x400 to 0x1000 ualias points to cache error eframe + misc
+ * CACHE_ERR_SP_PTR could either contain an address to the stack, or
+ * the stack could start at CACHE_ERR_SP_PTR
+ */
+#if defined(HUB_ERR_STS_WAR)
+#define CACHE_ERR_EFRAME 0x480
+#else /* HUB_ERR_STS_WAR */
+#define CACHE_ERR_EFRAME 0x400
+#endif /* HUB_ERR_STS_WAR */
+
+#define CACHE_ERR_ECCFRAME (CACHE_ERR_EFRAME + EF_SIZE)
+#define CACHE_ERR_SP_PTR (0x1000 - 32) /* why -32? TBD */
+#define CACHE_ERR_IBASE_PTR (0x1000 - 40)
+#define CACHE_ERR_SP (CACHE_ERR_SP_PTR - 16)
+#define CACHE_ERR_AREA_SIZE (ARCS_SPB_OFFSET - CACHE_ERR_EFRAME)
+
+#endif /* !__ASSEMBLY__ */
+
+#define _ARCSPROM
+
+#if defined(HUB_ERR_STS_WAR)
+
+#define ERR_STS_WAR_REGISTER IIO_IIBUSERR
+#define ERR_STS_WAR_ADDR LOCAL_HUB_ADDR(IIO_IIBUSERR)
+#define ERR_STS_WAR_PHYSADDR TO_PHYS((__psunsigned_t)ERR_STS_WAR_ADDR)
+ /* Used to match addr in error reg. */
+#define OLD_ERR_STS_WAR_OFFSET ((MD_MEM_BANKS * MD_BANK_SIZE) - 0x100)
+
+#endif /* HUB_ERR_STS_WAR */
+
+#endif /* _ASM_SN_SN0_ADDRS_H */
diff --git a/arch/mips/include/asm/sn/sn0/arch.h b/arch/mips/include/asm/sn/sn0/arch.h
new file mode 100644
index 000000000..12f4c4649
--- /dev/null
+++ b/arch/mips/include/asm/sn/sn0/arch.h
@@ -0,0 +1,56 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI IP27 specific setup.
+ *
+ * Copyright (C) 1995 - 1997, 1999 Silcon Graphics, Inc.
+ * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
+ */
+#ifndef _ASM_SN_SN0_ARCH_H
+#define _ASM_SN_SN0_ARCH_H
+
+
+/*
+ * MAXCPUS refers to the maximum number of CPUs in a single kernel.
+ * This is not necessarily the same as MAXNODES * CPUS_PER_NODE
+ */
+#define MAXCPUS (MAX_NUMNODES * CPUS_PER_NODE)
+
+/*
+ * This is the maximum number of NASIDS that can be present in a system.
+ * (Highest NASID plus one.)
+ */
+#define MAX_NASIDS 256
+
+/*
+ * MAX_REGIONS refers to the maximum number of hardware partitioned regions.
+ */
+#define MAX_REGIONS 64
+#define MAX_NONPREMIUM_REGIONS 16
+#define MAX_PREMIUM_REGIONS MAX_REGIONS
+
+/*
+ * MAX_PARITIONS refers to the maximum number of logically defined
+ * partitions the system can support.
+ */
+#define MAX_PARTITIONS MAX_REGIONS
+
+#define NASID_MASK_BYTES ((MAX_NASIDS + 7) / 8)
+
+/*
+ * Slot constants for SN0
+ */
+#ifdef CONFIG_SGI_SN_N_MODE
+#define MAX_MEM_SLOTS 16 /* max slots per node */
+#else /* !CONFIG_SGI_SN_N_MODE, assume CONFIG_SGI_SN_M_MODE */
+#define MAX_MEM_SLOTS 32 /* max slots per node */
+#endif /* CONFIG_SGI_SN_M_MODE */
+
+#define SLOT_SHIFT (27)
+#define SLOT_MIN_MEM_SIZE (32*1024*1024)
+
+#define CPUS_PER_NODE 2 /* CPUs on a single hub */
+
+#endif /* _ASM_SN_SN0_ARCH_H */
diff --git a/arch/mips/include/asm/sn/sn0/hub.h b/arch/mips/include/asm/sn/sn0/hub.h
new file mode 100644
index 000000000..c84adde36
--- /dev/null
+++ b/arch/mips/include/asm/sn/sn0/hub.h
@@ -0,0 +1,62 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_SN_SN0_HUB_H
+#define _ASM_SN_SN0_HUB_H
+
+/* The secret password; used to release protection */
+#define HUB_PASSWORD 0x53474972756c6573ull
+
+#define CHIPID_HUB 0
+#define CHIPID_ROUTER 1
+
+#define HUB_REV_1_0 1
+#define HUB_REV_2_0 2
+#define HUB_REV_2_1 3
+#define HUB_REV_2_2 4
+#define HUB_REV_2_3 5
+#define HUB_REV_2_4 6
+
+#define MAX_HUB_PATH 80
+
+#include <asm/sn/sn0/addrs.h>
+#include <asm/sn/sn0/hubpi.h>
+#include <asm/sn/sn0/hubmd.h>
+#include <asm/sn/sn0/hubio.h>
+#include <asm/sn/sn0/hubni.h>
+//#include <asm/sn/sn0/hubcore.h>
+
+/* Translation of uncached attributes */
+#define UATTR_HSPEC 0
+#define UATTR_IO 1
+#define UATTR_MSPEC 2
+#define UATTR_UNCAC 3
+
+#ifdef __ASSEMBLY__
+/*
+ * Returns the local nasid into res.
+ */
+ .macro GET_NASID_ASM res
+ dli \res, LOCAL_HUB_ADDR(NI_STATUS_REV_ID)
+ ld \res, (\res)
+ and \res, NSRI_NODEID_MASK
+ dsrl \res, NSRI_NODEID_SHFT
+ .endm
+#else
+
+/*
+ * get_nasid() returns the physical node id number of the caller.
+ */
+static inline nasid_t get_nasid(void)
+{
+ return (nasid_t)((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_NODEID_MASK)
+ >> NSRI_NODEID_SHFT);
+}
+#endif
+
+#endif /* _ASM_SN_SN0_HUB_H */
diff --git a/arch/mips/include/asm/sn/sn0/hubio.h b/arch/mips/include/asm/sn/sn0/hubio.h
new file mode 100644
index 000000000..57ece90f8
--- /dev/null
+++ b/arch/mips/include/asm/sn/sn0/hubio.h
@@ -0,0 +1,972 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from IRIX <sys/SN/SN0/hubio.h>, Revision 1.80.
+ *
+ * Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_SGI_SN_SN0_HUBIO_H
+#define _ASM_SGI_SN_SN0_HUBIO_H
+
+/*
+ * Hub I/O interface registers
+ *
+ * All registers in this file are subject to change until Hub chip tapeout.
+ * In general, the longer software name should be used when available.
+ */
+
+/*
+ * Slightly friendlier names for some common registers.
+ * The hardware definitions follow.
+ */
+#define IIO_WIDGET IIO_WID /* Widget identification */
+#define IIO_WIDGET_STAT IIO_WSTAT /* Widget status register */
+#define IIO_WIDGET_CTRL IIO_WCR /* Widget control register */
+#define IIO_WIDGET_TOUT IIO_WRTO /* Widget request timeout */
+#define IIO_WIDGET_FLUSH IIO_WTFR /* Widget target flush */
+#define IIO_PROTECT IIO_ILAPR /* IO interface protection */
+#define IIO_PROTECT_OVRRD IIO_ILAPO /* IO protect override */
+#define IIO_OUTWIDGET_ACCESS IIO_IOWA /* Outbound widget access */
+#define IIO_INWIDGET_ACCESS IIO_IIWA /* Inbound widget access */
+#define IIO_INDEV_ERR_MASK IIO_IIDEM /* Inbound device error mask */
+#define IIO_LLP_CSR IIO_ILCSR /* LLP control and status */
+#define IIO_LLP_LOG IIO_ILLR /* LLP log */
+#define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout*/
+#define IIO_XTALKTT_TOUT IIO_IXTT /* Xtalk tail timeout */
+#define IIO_IO_ERR_CLR IIO_IECLR /* IO error clear */
+#define IIO_BTE_CRB_CNT IIO_IBCN /* IO BTE CRB count */
+
+#define IIO_LLP_CSR_IS_UP 0x00002000
+#define IIO_LLP_CSR_LLP_STAT_MASK 0x00003000
+#define IIO_LLP_CSR_LLP_STAT_SHFT 12
+
+/* key to IIO_PROTECT_OVRRD */
+#define IIO_PROTECT_OVRRD_KEY 0x53474972756c6573ull /* "SGIrules" */
+
+/* BTE register names */
+#define IIO_BTE_STAT_0 IIO_IBLS_0 /* Also BTE length/status 0 */
+#define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */
+#define IIO_BTE_DEST_0 IIO_IBDA_0 /* Also BTE dest. address 0 */
+#define IIO_BTE_CTRL_0 IIO_IBCT_0 /* Also BTE control/terminate 0 */
+#define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */
+#define IIO_BTE_INT_0 IIO_IBIA_0 /* Also BTE interrupt 0 */
+#define IIO_BTE_OFF_0 0 /* Base offset from BTE 0 regs. */
+#define IIO_BTE_OFF_1 IIO_IBLS_1 - IIO_IBLS_0 /* Offset from base to BTE 1 */
+
+/* BTE register offsets from base */
+#define BTEOFF_STAT 0
+#define BTEOFF_SRC (IIO_BTE_SRC_0 - IIO_BTE_STAT_0)
+#define BTEOFF_DEST (IIO_BTE_DEST_0 - IIO_BTE_STAT_0)
+#define BTEOFF_CTRL (IIO_BTE_CTRL_0 - IIO_BTE_STAT_0)
+#define BTEOFF_NOTIFY (IIO_BTE_NOTIFY_0 - IIO_BTE_STAT_0)
+#define BTEOFF_INT (IIO_BTE_INT_0 - IIO_BTE_STAT_0)
+
+
+/*
+ * The following definitions use the names defined in the IO interface
+ * document for ease of reference. When possible, software should
+ * generally use the longer but clearer names defined above.
+ */
+
+#define IIO_BASE 0x400000
+#define IIO_BASE_BTE0 0x410000
+#define IIO_BASE_BTE1 0x420000
+#define IIO_BASE_PERF 0x430000
+#define IIO_PERF_CNT 0x430008
+
+#define IO_PERF_SETS 32
+
+#define IIO_WID 0x400000 /* Widget identification */
+#define IIO_WSTAT 0x400008 /* Widget status */
+#define IIO_WCR 0x400020 /* Widget control */
+
+#define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */
+#define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */
+#define IIO_WSTAT_TXRETRY_MASK (0x7F)
+#define IIO_WSTAT_TXRETRY_SHFT (16)
+#define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \
+ IIO_WSTAT_TXRETRY_MASK)
+
+#define IIO_ILAPR 0x400100 /* Local Access Protection */
+#define IIO_ILAPO 0x400108 /* Protection override */
+#define IIO_IOWA 0x400110 /* outbound widget access */
+#define IIO_IIWA 0x400118 /* inbound widget access */
+#define IIO_IIDEM 0x400120 /* Inbound Device Error Mask */
+#define IIO_ILCSR 0x400128 /* LLP control and status */
+#define IIO_ILLR 0x400130 /* LLP Log */
+#define IIO_IIDSR 0x400138 /* Interrupt destination */
+
+#define IIO_IIBUSERR 0x1400208 /* Reads here cause a bus error. */
+
+/* IO Interrupt Destination Register */
+#define IIO_IIDSR_SENT_SHIFT 28
+#define IIO_IIDSR_SENT_MASK 0x10000000
+#define IIO_IIDSR_ENB_SHIFT 24
+#define IIO_IIDSR_ENB_MASK 0x01000000
+#define IIO_IIDSR_NODE_SHIFT 8
+#define IIO_IIDSR_NODE_MASK 0x0000ff00
+#define IIO_IIDSR_LVL_SHIFT 0
+#define IIO_IIDSR_LVL_MASK 0x0000003f
+
+
+/* GFX Flow Control Node/Widget Register */
+#define IIO_IGFX_0 0x400140 /* gfx node/widget register 0 */
+#define IIO_IGFX_1 0x400148 /* gfx node/widget register 1 */
+#define IIO_IGFX_W_NUM_BITS 4 /* size of widget num field */
+#define IIO_IGFX_W_NUM_MASK ((1<<IIO_IGFX_W_NUM_BITS)-1)
+#define IIO_IGFX_W_NUM_SHIFT 0
+#define IIO_IGFX_N_NUM_BITS 9 /* size of node num field */
+#define IIO_IGFX_N_NUM_MASK ((1<<IIO_IGFX_N_NUM_BITS)-1)
+#define IIO_IGFX_N_NUM_SHIFT 4
+#define IIO_IGFX_P_NUM_BITS 1 /* size of processor num field */
+#define IIO_IGFX_P_NUM_MASK ((1<<IIO_IGFX_P_NUM_BITS)-1)
+#define IIO_IGFX_P_NUM_SHIFT 16
+#define IIO_IGFX_VLD_BITS 1 /* size of valid field */
+#define IIO_IGFX_VLD_MASK ((1<<IIO_IGFX_VLD_BITS)-1)
+#define IIO_IGFX_VLD_SHIFT 20
+#define IIO_IGFX_INIT(widget, node, cpu, valid) (\
+ (((widget) & IIO_IGFX_W_NUM_MASK) << IIO_IGFX_W_NUM_SHIFT) | \
+ (((node) & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) | \
+ (((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT) | \
+ (((valid) & IIO_IGFX_VLD_MASK) << IIO_IGFX_VLD_SHIFT) )
+
+/* Scratch registers (not all bits available) */
+#define IIO_SCRATCH_REG0 0x400150
+#define IIO_SCRATCH_REG1 0x400158
+#define IIO_SCRATCH_MASK 0x0000000f00f11fff
+
+#define IIO_SCRATCH_BIT0_0 0x0000000800000000
+#define IIO_SCRATCH_BIT0_1 0x0000000400000000
+#define IIO_SCRATCH_BIT0_2 0x0000000200000000
+#define IIO_SCRATCH_BIT0_3 0x0000000100000000
+#define IIO_SCRATCH_BIT0_4 0x0000000000800000
+#define IIO_SCRATCH_BIT0_5 0x0000000000400000
+#define IIO_SCRATCH_BIT0_6 0x0000000000200000
+#define IIO_SCRATCH_BIT0_7 0x0000000000100000
+#define IIO_SCRATCH_BIT0_8 0x0000000000010000
+#define IIO_SCRATCH_BIT0_9 0x0000000000001000
+#define IIO_SCRATCH_BIT0_R 0x0000000000000fff
+
+/* IO Translation Table Entries */
+#define IIO_NUM_ITTES 7 /* ITTEs numbered 0..6 */
+ /* Hw manuals number them 1..7! */
+
+/*
+ * As a permanent workaround for a bug in the PI side of the hub, we've
+ * redefined big window 7 as small window 0.
+ */
+#define HUB_NUM_BIG_WINDOW IIO_NUM_ITTES - 1
+
+/*
+ * Use the top big window as a surrogate for the first small window
+ */
+#define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW
+
+#define ILCSR_WARM_RESET 0x100
+/*
+ * The IO LLP control status register and widget control register
+ */
+#ifndef __ASSEMBLY__
+
+typedef union hubii_wid_u {
+ u64 wid_reg_value;
+ struct {
+ u64 wid_rsvd: 32, /* unused */
+ wid_rev_num: 4, /* revision number */
+ wid_part_num: 16, /* the widget type: hub=c101 */
+ wid_mfg_num: 11, /* Manufacturer id (IBM) */
+ wid_rsvd1: 1; /* Reserved */
+ } wid_fields_s;
+} hubii_wid_t;
+
+
+typedef union hubii_wcr_u {
+ u64 wcr_reg_value;
+ struct {
+ u64 wcr_rsvd: 41, /* unused */
+ wcr_e_thresh: 5, /* elasticity threshold */
+ wcr_dir_con: 1, /* widget direct connect */
+ wcr_f_bad_pkt: 1, /* Force bad llp pkt enable */
+ wcr_xbar_crd: 3, /* LLP crossbar credit */
+ wcr_rsvd1: 8, /* Reserved */
+ wcr_tag_mode: 1, /* Tag mode */
+ wcr_widget_id: 4; /* LLP crossbar credit */
+ } wcr_fields_s;
+} hubii_wcr_t;
+
+#define iwcr_dir_con wcr_fields_s.wcr_dir_con
+
+typedef union hubii_wstat_u {
+ u64 reg_value;
+ struct {
+ u64 rsvd1: 31,
+ crazy: 1, /* Crazy bit */
+ rsvd2: 8,
+ llp_tx_cnt: 8, /* LLP Xmit retry counter */
+ rsvd3: 6,
+ tx_max_rtry: 1, /* LLP Retry Timeout Signal */
+ rsvd4: 2,
+ xt_tail_to: 1, /* Xtalk Tail Timeout */
+ xt_crd_to: 1, /* Xtalk Credit Timeout */
+ pending: 4; /* Pending Requests */
+ } wstat_fields_s;
+} hubii_wstat_t;
+
+
+typedef union hubii_ilcsr_u {
+ u64 icsr_reg_value;
+ struct {
+ u64 icsr_rsvd: 22, /* unused */
+ icsr_max_burst: 10, /* max burst */
+ icsr_rsvd4: 6, /* reserved */
+ icsr_max_retry: 10, /* max retry */
+ icsr_rsvd3: 2, /* reserved */
+ icsr_lnk_stat: 2, /* link status */
+ icsr_bm8: 1, /* Bit mode 8 */
+ icsr_llp_en: 1, /* LLP enable bit */
+ icsr_rsvd2: 1, /* reserver */
+ icsr_wrm_reset: 1, /* Warm reset bit */
+ icsr_rsvd1: 2, /* Data ready offset */
+ icsr_null_to: 6; /* Null timeout */
+
+ } icsr_fields_s;
+} hubii_ilcsr_t;
+
+
+typedef union hubii_iowa_u {
+ u64 iowa_reg_value;
+ struct {
+ u64 iowa_rsvd: 48, /* unused */
+ iowa_wxoac: 8, /* xtalk widget access bits */
+ iowa_rsvd1: 7, /* xtalk widget access bits */
+ iowa_w0oac: 1; /* xtalk widget access bits */
+ } iowa_fields_s;
+} hubii_iowa_t;
+
+typedef union hubii_iiwa_u {
+ u64 iiwa_reg_value;
+ struct {
+ u64 iiwa_rsvd: 48, /* unused */
+ iiwa_wxiac: 8, /* hub wid access bits */
+ iiwa_rsvd1: 7, /* reserved */
+ iiwa_w0iac: 1; /* hub wid0 access */
+ } iiwa_fields_s;
+} hubii_iiwa_t;
+
+typedef union hubii_illr_u {
+ u64 illr_reg_value;
+ struct {
+ u64 illr_rsvd: 32, /* unused */
+ illr_cb_cnt: 16, /* checkbit error count */
+ illr_sn_cnt: 16; /* sequence number count */
+ } illr_fields_s;
+} hubii_illr_t;
+
+/* The structures below are defined to extract and modify the ii
+performance registers */
+
+/* io_perf_sel allows the caller to specify what tests will be
+ performed */
+typedef union io_perf_sel {
+ u64 perf_sel_reg;
+ struct {
+ u64 perf_rsvd : 48,
+ perf_icct : 8,
+ perf_ippr1 : 4,
+ perf_ippr0 : 4;
+ } perf_sel_bits;
+} io_perf_sel_t;
+
+/* io_perf_cnt is to extract the count from the hub registers. Due to
+ hardware problems there is only one counter, not two. */
+
+typedef union io_perf_cnt {
+ u64 perf_cnt;
+ struct {
+ u64 perf_rsvd1 : 32,
+ perf_rsvd2 : 12,
+ perf_cnt : 20;
+ } perf_cnt_bits;
+} io_perf_cnt_t;
+
+#endif /* !__ASSEMBLY__ */
+
+
+#define LNK_STAT_WORKING 0x2
+
+#define IIO_LLP_CB_MAX 0xffff
+#define IIO_LLP_SN_MAX 0xffff
+
+/* IO PRB Entries */
+#define IIO_NUM_IPRBS (9)
+#define IIO_IOPRB_0 0x400198 /* PRB entry 0 */
+#define IIO_IOPRB_8 0x4001a0 /* PRB entry 8 */
+#define IIO_IOPRB_9 0x4001a8 /* PRB entry 9 */
+#define IIO_IOPRB_A 0x4001b0 /* PRB entry a */
+#define IIO_IOPRB_B 0x4001b8 /* PRB entry b */
+#define IIO_IOPRB_C 0x4001c0 /* PRB entry c */
+#define IIO_IOPRB_D 0x4001c8 /* PRB entry d */
+#define IIO_IOPRB_E 0x4001d0 /* PRB entry e */
+#define IIO_IOPRB_F 0x4001d8 /* PRB entry f */
+
+
+#define IIO_IXCC 0x4001e0 /* Crosstalk credit count timeout */
+#define IIO_IXTCC IIO_IXCC
+#define IIO_IMEM 0x4001e8 /* Miscellaneous Enable Mask */
+#define IIO_IXTT 0x4001f0 /* Crosstalk tail timeout */
+#define IIO_IECLR 0x4001f8 /* IO error clear */
+#define IIO_IBCN 0x400200 /* IO BTE CRB count */
+
+/*
+ * IIO_IMEM Register fields.
+ */
+#define IIO_IMEM_W0ESD 0x1 /* Widget 0 shut down due to error */
+#define IIO_IMEM_B0ESD (1 << 4) /* BTE 0 shut down due to error */
+#define IIO_IMEM_B1ESD (1 << 8) /* BTE 1 Shut down due to error */
+
+/* PIO Read address Table Entries */
+#define IIO_IPCA 0x400300 /* PRB Counter adjust */
+#define IIO_NUM_PRTES 8 /* Total number of PRB table entries */
+#define IIO_PRTE_0 0x400308 /* PIO Read address table entry 0 */
+#define IIO_PRTE(_x) (IIO_PRTE_0 + (8 * (_x)))
+#define IIO_WIDPRTE(x) IIO_PRTE(((x) - 8)) /* widget ID to its PRTE num */
+#define IIO_IPDR 0x400388 /* PIO table entry deallocation */
+#define IIO_ICDR 0x400390 /* CRB Entry Deallocation */
+#define IIO_IFDR 0x400398 /* IOQ FIFO Depth */
+#define IIO_IIAP 0x4003a0 /* IIQ Arbitration Parameters */
+#define IIO_IMMR IIO_IIAP
+#define IIO_ICMR 0x4003a8 /* CRB Management Register */
+#define IIO_ICCR 0x4003b0 /* CRB Control Register */
+#define IIO_ICTO 0x4003b8 /* CRB Time Out Register */
+#define IIO_ICTP 0x4003c0 /* CRB Time Out Prescalar */
+
+
+/*
+ * ICMR register fields
+ */
+#define IIO_ICMR_PC_VLD_SHFT 36
+#define IIO_ICMR_PC_VLD_MASK (0x7fffUL << IIO_ICMR_PC_VLD_SHFT)
+
+#define IIO_ICMR_CRB_VLD_SHFT 20
+#define IIO_ICMR_CRB_VLD_MASK (0x7fffUL << IIO_ICMR_CRB_VLD_SHFT)
+
+#define IIO_ICMR_FC_CNT_SHFT 16
+#define IIO_ICMR_FC_CNT_MASK (0xf << IIO_ICMR_FC_CNT_SHFT)
+
+#define IIO_ICMR_C_CNT_SHFT 4
+#define IIO_ICMR_C_CNT_MASK (0xf << IIO_ICMR_C_CNT_SHFT)
+
+#define IIO_ICMR_P_CNT_SHFT 0
+#define IIO_ICMR_P_CNT_MASK (0xf << IIO_ICMR_P_CNT_SHFT)
+
+#define IIO_ICMR_PRECISE (1UL << 52)
+#define IIO_ICMR_CLR_RPPD (1UL << 13)
+#define IIO_ICMR_CLR_RQPD (1UL << 12)
+
+/*
+ * IIO PIO Deallocation register field masks : (IIO_IPDR)
+ */
+#define IIO_IPDR_PND (1 << 4)
+
+/*
+ * IIO CRB deallocation register field masks: (IIO_ICDR)
+ */
+#define IIO_ICDR_PND (1 << 4)
+
+/*
+ * IIO CRB control register Fields: IIO_ICCR
+ */
+#define IIO_ICCR_PENDING (0x10000)
+#define IIO_ICCR_CMD_MASK (0xFF)
+#define IIO_ICCR_CMD_SHFT (7)
+#define IIO_ICCR_CMD_NOP (0x0) /* No Op */
+#define IIO_ICCR_CMD_WAKE (0x100) /* Reactivate CRB entry and process */
+#define IIO_ICCR_CMD_TIMEOUT (0x200) /* Make CRB timeout & mark invalid */
+#define IIO_ICCR_CMD_EJECT (0x400) /* Contents of entry written to memory
+ * via a WB
+ */
+#define IIO_ICCR_CMD_FLUSH (0x800)
+
+/*
+ * CRB manipulation macros
+ * The CRB macros are slightly complicated, since there are up to
+ * four registers associated with each CRB entry.
+ */
+#define IIO_NUM_CRBS 15 /* Number of CRBs */
+#define IIO_NUM_NORMAL_CRBS 12 /* Number of regular CRB entries */
+#define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */
+#define IIO_ICRB_OFFSET 8
+#define IIO_ICRB_0 0x400400
+/* XXX - This is now tuneable:
+ #define IIO_FIRST_PC_ENTRY 12
+ */
+
+#define IIO_ICRB_A(_x) (IIO_ICRB_0 + (4 * IIO_ICRB_OFFSET * (_x)))
+#define IIO_ICRB_B(_x) (IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET)
+#define IIO_ICRB_C(_x) (IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET)
+#define IIO_ICRB_D(_x) (IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET)
+
+/* XXX - IBUE register coming for Hub 2 */
+
+/*
+ *
+ * CRB Register description.
+ *
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ *
+ * Many of the fields in CRB are status bits used by hardware
+ * for implementation of the protocol. It's very dangerous to
+ * mess around with the CRB registers.
+ *
+ * It's OK to read the CRB registers and try to make sense out of the
+ * fields in CRB.
+ *
+ * Updating CRB requires all activities in Hub IIO to be quiesced.
+ * otherwise, a write to CRB could corrupt other CRB entries.
+ * CRBs are here only as a back door peek to hub IIO's status.
+ * Quiescing implies no dmas no PIOs
+ * either directly from the cpu or from sn0net.
+ * this is not something that can be done easily. So, AVOID updating
+ * CRBs.
+ */
+
+/*
+ * Fields in CRB Register A
+ */
+#ifndef __ASSEMBLY__
+typedef union icrba_u {
+ u64 reg_value;
+ struct {
+ u64 resvd: 6,
+ stall_bte0: 1, /* Stall BTE 0 */
+ stall_bte1: 1, /* Stall BTE 1 */
+ error: 1, /* CRB has an error */
+ ecode: 3, /* Error Code */
+ lnetuce: 1, /* SN0net Uncorrectable error */
+ mark: 1, /* CRB Has been marked */
+ xerr: 1, /* Error bit set in xtalk header */
+ sidn: 4, /* SIDN field from xtalk */
+ tnum: 5, /* TNUM field in xtalk */
+ addr: 38, /* Address of request */
+ valid: 1, /* Valid status */
+ iow: 1; /* IO Write operation */
+ } icrba_fields_s;
+} icrba_t;
+
+/* This is an alternate typedef for the HUB1 CRB A in order to allow
+ runtime selection of the format based on the REV_ID field of the
+ NI_STATUS_REV_ID register. */
+typedef union h1_icrba_u {
+ u64 reg_value;
+
+ struct {
+ u64 resvd: 6,
+ unused: 1, /* Unused but RW!! */
+ error: 1, /* CRB has an error */
+ ecode: 4, /* Error Code */
+ lnetuce: 1, /* SN0net Uncorrectable error */
+ mark: 1, /* CRB Has been marked */
+ xerr: 1, /* Error bit set in xtalk header */
+ sidn: 4, /* SIDN field from xtalk */
+ tnum: 5, /* TNUM field in xtalk */
+ addr: 38, /* Address of request */
+ valid: 1, /* Valid status */
+ iow: 1; /* IO Write operation */
+ } h1_icrba_fields_s;
+} h1_icrba_t;
+
+/* XXX - Is this still right? Check the spec. */
+#define ICRBN_A_CERR_SHFT 54
+#define ICRBN_A_ERR_MASK 0x3ff
+
+#endif /* !__ASSEMBLY__ */
+
+#define IIO_ICRB_ADDR_SHFT 2 /* Shift to get proper address */
+
+/*
+ * values for "ecode" field
+ */
+#define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */
+#define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */
+#define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access
+ * e.g. WINV to a Read only line.
+ */
+#define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */
+#define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */
+#define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */
+#define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */
+#define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */
+
+
+
+/*
+ * Fields in CRB Register B
+ */
+#ifndef __ASSEMBLY__
+typedef union icrbb_u {
+ u64 reg_value;
+ struct {
+ u64 rsvd1: 5,
+ btenum: 1, /* BTE to which entry belongs to */
+ cohtrans: 1, /* Coherent transaction */
+ xtsize: 2, /* Xtalk operation size
+ * 0: Double Word
+ * 1: 32 Bytes.
+ * 2: 128 Bytes,
+ * 3: Reserved.
+ */
+ srcnode: 9, /* Source Node ID */
+ srcinit: 2, /* Source Initiator:
+ * See below for field values.
+ */
+ useold: 1, /* Use OLD command for processing */
+ imsgtype: 2, /* Incoming message type
+ * see below for field values
+ */
+ imsg: 8, /* Incoming message */
+ initator: 3, /* Initiator of original request
+ * See below for field values.
+ */
+ reqtype: 5, /* Identifies type of request
+ * See below for field values.
+ */
+ rsvd2: 7,
+ ackcnt: 11, /* Invalidate ack count */
+ resp: 1, /* data response given to processor */
+ ack: 1, /* indicates data ack received */
+ hold: 1, /* entry is gathering inval acks */
+ wb_pend:1, /* waiting for writeback to complete */
+ intvn: 1, /* Intervention */
+ stall_ib: 1, /* Stall Ibuf (from crosstalk) */
+ stall_intr: 1; /* Stall internal interrupts */
+ } icrbb_field_s;
+} icrbb_t;
+
+/* This is an alternate typedef for the HUB1 CRB B in order to allow
+ runtime selection of the format based on the REV_ID field of the
+ NI_STATUS_REV_ID register. */
+typedef union h1_icrbb_u {
+ u64 reg_value;
+ struct {
+ u64 rsvd1: 5,
+ btenum: 1, /* BTE to which entry belongs to */
+ cohtrans: 1, /* Coherent transaction */
+ xtsize: 2, /* Xtalk operation size
+ * 0: Double Word
+ * 1: 32 Bytes.
+ * 2: 128 Bytes,
+ * 3: Reserved.
+ */
+ srcnode: 9, /* Source Node ID */
+ srcinit: 2, /* Source Initiator:
+ * See below for field values.
+ */
+ useold: 1, /* Use OLD command for processing */
+ imsgtype: 2, /* Incoming message type
+ * see below for field values
+ */
+ imsg: 8, /* Incoming message */
+ initator: 3, /* Initiator of original request
+ * See below for field values.
+ */
+ rsvd2: 1,
+ pcache: 1, /* entry belongs to partial cache */
+ reqtype: 5, /* Identifies type of request
+ * See below for field values.
+ */
+ stl_ib: 1, /* stall Ibus coming from xtalk */
+ stl_intr: 1, /* Stall internal interrupts */
+ stl_bte0: 1, /* Stall BTE 0 */
+ stl_bte1: 1, /* Stall BTE 1 */
+ intrvn: 1, /* Req was target of intervention */
+ ackcnt: 11, /* Invalidate ack count */
+ resp: 1, /* data response given to processor */
+ ack: 1, /* indicates data ack received */
+ hold: 1, /* entry is gathering inval acks */
+ wb_pend:1, /* waiting for writeback to complete */
+ sleep: 1, /* xtalk req sleeping till IO-sync */
+ pnd_reply: 1, /* replies not issed due to IOQ full */
+ pnd_req: 1; /* reqs not issued due to IOQ full */
+ } h1_icrbb_field_s;
+} h1_icrbb_t;
+
+
+#define b_imsgtype icrbb_field_s.imsgtype
+#define b_btenum icrbb_field_s.btenum
+#define b_cohtrans icrbb_field_s.cohtrans
+#define b_xtsize icrbb_field_s.xtsize
+#define b_srcnode icrbb_field_s.srcnode
+#define b_srcinit icrbb_field_s.srcinit
+#define b_imsgtype icrbb_field_s.imsgtype
+#define b_imsg icrbb_field_s.imsg
+#define b_initiator icrbb_field_s.initiator
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * values for field xtsize
+ */
+#define IIO_ICRB_XTSIZE_DW 0 /* Xtalk operation size is 8 bytes */
+#define IIO_ICRB_XTSIZE_32 1 /* Xtalk operation size is 32 bytes */
+#define IIO_ICRB_XTSIZE_128 2 /* Xtalk operation size is 128 bytes */
+
+/*
+ * values for field srcinit
+ */
+#define IIO_ICRB_PROC0 0 /* Source of request is Proc 0 */
+#define IIO_ICRB_PROC1 1 /* Source of request is Proc 1 */
+#define IIO_ICRB_GB_REQ 2 /* Source is Guaranteed BW request */
+#define IIO_ICRB_IO_REQ 3 /* Source is Normal IO request */
+
+/*
+ * Values for field imsgtype
+ */
+#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Message from Xtalk */
+#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */
+#define IIO_ICRB_IMSGT_SN0NET 2 /* Incoming message from SN0 net */
+#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */
+
+/*
+ * values for field initiator.
+ */
+#define IIO_ICRB_INIT_XTALK 0 /* Message originated in xtalk */
+#define IIO_ICRB_INIT_BTE0 0x1 /* Message originated in BTE 0 */
+#define IIO_ICRB_INIT_SN0NET 0x2 /* Message originated in SN0net */
+#define IIO_ICRB_INIT_CRB 0x3 /* Message originated in CRB ? */
+#define IIO_ICRB_INIT_BTE1 0x5 /* MEssage originated in BTE 1 */
+
+/*
+ * Values for field reqtype.
+ */
+/* XXX - Need to fix this for Hub 2 */
+#define IIO_ICRB_REQ_DWRD 0 /* Request type double word */
+#define IIO_ICRB_REQ_QCLRD 1 /* Request is Qrtr Caceh line Rd */
+#define IIO_ICRB_REQ_BLKRD 2 /* Request is block read */
+#define IIO_ICRB_REQ_RSHU 6 /* Request is BTE block read */
+#define IIO_ICRB_REQ_REXU 7 /* request is BTE Excl Read */
+#define IIO_ICRB_REQ_RDEX 8 /* Request is Read Exclusive */
+#define IIO_ICRB_REQ_WINC 9 /* Request is Write Invalidate */
+#define IIO_ICRB_REQ_BWINV 10 /* Request is BTE Winv */
+#define IIO_ICRB_REQ_PIORD 11 /* Request is PIO read */
+#define IIO_ICRB_REQ_PIOWR 12 /* Request is PIO Write */
+#define IIO_ICRB_REQ_PRDM 13 /* Request is Fetch&Op */
+#define IIO_ICRB_REQ_PWRM 14 /* Request is Store &Op */
+#define IIO_ICRB_REQ_PTPWR 15 /* Request is Peer to peer */
+#define IIO_ICRB_REQ_WB 16 /* Request is Write back */
+#define IIO_ICRB_REQ_DEX 17 /* Retained DEX Cache line */
+
+/*
+ * Fields in CRB Register C
+ */
+
+#ifndef __ASSEMBLY__
+
+typedef union icrbc_s {
+ u64 reg_value;
+ struct {
+ u64 rsvd: 6,
+ sleep: 1,
+ pricnt: 4, /* Priority count sent with Read req */
+ pripsc: 4, /* Priority Pre scalar */
+ bteop: 1, /* BTE Operation */
+ push_be: 34, /* Push address Byte enable
+ * Holds push addr, if CRB is for BTE
+ * If CRB belongs to Partial cache,
+ * this contains byte enables bits
+ * ([47:46] = 0)
+ */
+ suppl: 11, /* Supplemental field */
+ barrop: 1, /* Barrier Op bit set in xtalk req */
+ doresp: 1, /* Xtalk req needs a response */
+ gbr: 1; /* GBR bit set in xtalk packet */
+ } icrbc_field_s;
+} icrbc_t;
+
+#define c_pricnt icrbc_field_s.pricnt
+#define c_pripsc icrbc_field_s.pripsc
+#define c_bteop icrbc_field_s.bteop
+#define c_bteaddr icrbc_field_s.push_be /* push_be field has 2 names */
+#define c_benable icrbc_field_s.push_be /* push_be field has 2 names */
+#define c_suppl icrbc_field_s.suppl
+#define c_barrop icrbc_field_s.barrop
+#define c_doresp icrbc_field_s.doresp
+#define c_gbr icrbc_field_s.gbr
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Fields in CRB Register D
+ */
+
+#ifndef __ASSEMBLY__
+typedef union icrbd_s {
+ u64 reg_value;
+ struct {
+ u64 rsvd: 38,
+ toutvld: 1, /* Timeout in progress for this CRB */
+ ctxtvld: 1, /* Context field below is valid */
+ rsvd2: 1,
+ context: 15, /* Bit vector:
+ * Has a bit set for each CRB entry
+ * which needs to be deallocated
+ * before this CRB entry is processed.
+ * Set only for barrier operations.
+ */
+ timeout: 8; /* Timeout Upper 8 bits */
+ } icrbd_field_s;
+} icrbd_t;
+
+#define icrbd_toutvld icrbd_field_s.toutvld
+#define icrbd_ctxtvld icrbd_field_s.ctxtvld
+#define icrbd_context icrbd_field_s.context
+
+
+typedef union hubii_ifdr_u {
+ u64 hi_ifdr_value;
+ struct {
+ u64 ifdr_rsvd: 49,
+ ifdr_maxrp: 7,
+ ifdr_rsvd1: 1,
+ ifdr_maxrq: 7;
+ } hi_ifdr_fields;
+} hubii_ifdr_t;
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Hardware designed names for the BTE control registers.
+ */
+#define IIO_IBLS_0 0x410000 /* BTE length/status 0 */
+#define IIO_IBSA_0 0x410008 /* BTE source address 0 */
+#define IIO_IBDA_0 0x410010 /* BTE destination address 0 */
+#define IIO_IBCT_0 0x410018 /* BTE control/terminate 0 */
+#define IIO_IBNA_0 0x410020 /* BTE notification address 0 */
+#define IIO_IBNR_0 IIO_IBNA_0
+#define IIO_IBIA_0 0x410028 /* BTE interrupt address 0 */
+
+#define IIO_IBLS_1 0x420000 /* BTE length/status 1 */
+#define IIO_IBSA_1 0x420008 /* BTE source address 1 */
+#define IIO_IBDA_1 0x420010 /* BTE destination address 1 */
+#define IIO_IBCT_1 0x420018 /* BTE control/terminate 1 */
+#define IIO_IBNA_1 0x420020 /* BTE notification address 1 */
+#define IIO_IBNR_1 IIO_IBNA_1
+#define IIO_IBIA_1 0x420028 /* BTE interrupt address 1 */
+
+/*
+ * More miscellaneous registers
+ */
+#define IIO_IPCR 0x430000 /* Performance Control */
+#define IIO_IPPR 0x430008 /* Performance Profiling */
+
+/*
+ * IO Error Clear register bit field definitions
+ */
+#define IECLR_BTE1 (1 << 18) /* clear bte error 1 ??? */
+#define IECLR_BTE0 (1 << 17) /* clear bte error 0 ??? */
+#define IECLR_CRAZY (1 << 16) /* clear crazy bit in wstat reg */
+#define IECLR_PRB_F (1 << 15) /* clear err bit in PRB_F reg */
+#define IECLR_PRB_E (1 << 14) /* clear err bit in PRB_E reg */
+#define IECLR_PRB_D (1 << 13) /* clear err bit in PRB_D reg */
+#define IECLR_PRB_C (1 << 12) /* clear err bit in PRB_C reg */
+#define IECLR_PRB_B (1 << 11) /* clear err bit in PRB_B reg */
+#define IECLR_PRB_A (1 << 10) /* clear err bit in PRB_A reg */
+#define IECLR_PRB_9 (1 << 9) /* clear err bit in PRB_9 reg */
+#define IECLR_PRB_8 (1 << 8) /* clear err bit in PRB_8 reg */
+#define IECLR_PRB_0 (1 << 0) /* clear err bit in PRB_0 reg */
+
+/*
+ * IO PIO Read Table Entry format
+ */
+
+#ifndef __ASSEMBLY__
+
+typedef union iprte_a {
+ u64 entry;
+ struct {
+ u64 rsvd1 : 7, /* Reserved field */
+ valid : 1, /* Maps to a timeout entry */
+ rsvd2 : 1,
+ srcnode : 9, /* Node which did this PIO */
+ initiator : 2, /* If T5A or T5B or IO */
+ rsvd3 : 3,
+ addr : 38, /* Physical address of PIO */
+ rsvd4 : 3;
+ } iprte_fields;
+} iprte_a_t;
+
+#define iprte_valid iprte_fields.valid
+#define iprte_timeout iprte_fields.timeout
+#define iprte_srcnode iprte_fields.srcnode
+#define iprte_init iprte_fields.initiator
+#define iprte_addr iprte_fields.addr
+
+#endif /* !__ASSEMBLY__ */
+
+#define IPRTE_ADDRSHFT 3
+
+/*
+ * Hub IIO PRB Register format.
+ */
+
+#ifndef __ASSEMBLY__
+/*
+ * Note: Fields bnakctr, anakctr, xtalkctrmode, ovflow fields are
+ * "Status" fields, and should only be used in case of clean up after errors.
+ */
+
+typedef union iprb_u {
+ u64 reg_value;
+ struct {
+ u64 rsvd1: 15,
+ error: 1, /* Widget rcvd wr resp pkt w/ error */
+ ovflow: 5, /* Overflow count. perf measurement */
+ fire_and_forget: 1, /* Launch Write without response */
+ mode: 2, /* Widget operation Mode */
+ rsvd2: 2,
+ bnakctr: 14,
+ rsvd3: 2,
+ anakctr: 14,
+ xtalkctr: 8;
+ } iprb_fields_s;
+} iprb_t;
+
+#define iprb_regval reg_value
+
+#define iprb_error iprb_fields_s.error
+#define iprb_ovflow iprb_fields_s.ovflow
+#define iprb_ff iprb_fields_s.fire_and_forget
+#define iprb_mode iprb_fields_s.mode
+#define iprb_bnakctr iprb_fields_s.bnakctr
+#define iprb_anakctr iprb_fields_s.anakctr
+#define iprb_xtalkctr iprb_fields_s.xtalkctr
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * values for mode field in iprb_t.
+ * For details of the meanings of NAK and Accept, refer the PIO flow
+ * document
+ */
+#define IPRB_MODE_NORMAL (0)
+#define IPRB_MODE_COLLECT_A (1) /* PRB in collect A mode */
+#define IPRB_MODE_SERVICE_A (2) /* NAK B and Accept A */
+#define IPRB_MODE_SERVICE_B (3) /* NAK A and Accept B */
+
+/*
+ * IO CRB entry C_A to E_A : Partial (cache) CRBS
+ */
+#ifndef __ASSEMBLY__
+typedef union icrbp_a {
+ u64 ip_reg; /* the entire register value */
+ struct {
+ u64 error: 1, /* 63, error occurred */
+ ln_uce: 1, /* 62: uncorrectable memory */
+ ln_ae: 1, /* 61: protection violation */
+ ln_werr:1, /* 60: write access error */
+ ln_aerr:1, /* 59: sn0net: Address error */
+ ln_perr:1, /* 58: sn0net: poison error */
+ timeout:1, /* 57: CRB timed out */
+ l_bdpkt:1, /* 56: truncated pkt on sn0net */
+ c_bdpkt:1, /* 55: truncated pkt on xtalk */
+ c_err: 1, /* 54: incoming xtalk req, err set*/
+ rsvd1: 12, /* 53-42: reserved */
+ valid: 1, /* 41: Valid status */
+ sidn: 4, /* 40-37: SIDN field of xtalk rqst */
+ tnum: 5, /* 36-32: TNUM of xtalk request */
+ bo: 1, /* 31: barrier op set in xtalk rqst*/
+ resprqd:1, /* 30: xtalk rqst requires response*/
+ gbr: 1, /* 29: gbr bit set in xtalk rqst */
+ size: 2, /* 28-27: size of xtalk request */
+ excl: 4, /* 26-23: exclusive bit(s) */
+ stall: 3, /* 22-20: stall (xtalk, bte 0/1) */
+ intvn: 1, /* 19: rqst target of intervention*/
+ resp: 1, /* 18: Data response given to t5 */
+ ack: 1, /* 17: Data ack received. */
+ hold: 1, /* 16: crb gathering invalidate acks*/
+ wb: 1, /* 15: writeback pending. */
+ ack_cnt:11, /* 14-04: counter of invalidate acks*/
+ tscaler:4; /* 03-00: Timeout prescaler */
+ } ip_fmt;
+} icrbp_a_t;
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * A couple of defines to go with the above structure.
+ */
+#define ICRBP_A_CERR_SHFT 54
+#define ICRBP_A_ERR_MASK 0x3ff
+
+#ifndef __ASSEMBLY__
+typedef union hubii_idsr {
+ u64 iin_reg;
+ struct {
+ u64 rsvd1 : 35,
+ isent : 1,
+ rsvd2 : 3,
+ ienable: 1,
+ rsvd : 7,
+ node : 9,
+ rsvd4 : 1,
+ level : 7;
+ } iin_fmt;
+} hubii_idsr_t;
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * IO BTE Length/Status (IIO_IBLS) register bit field definitions
+ */
+#define IBLS_BUSY (0x1 << 20)
+#define IBLS_ERROR_SHFT 16
+#define IBLS_ERROR (0x1 << IBLS_ERROR_SHFT)
+#define IBLS_LENGTH_MASK 0xffff
+
+/*
+ * IO BTE Control/Terminate register (IBCT) register bit field definitions
+ */
+#define IBCT_POISON (0x1 << 8)
+#define IBCT_NOTIFY (0x1 << 4)
+#define IBCT_ZFIL_MODE (0x1 << 0)
+
+/*
+ * IO BTE Interrupt Address Register (IBIA) register bit field definitions
+ */
+#define IBIA_LEVEL_SHFT 16
+#define IBIA_LEVEL_MASK (0x7f << IBIA_LEVEL_SHFT)
+#define IBIA_NODE_ID_SHFT 0
+#define IBIA_NODE_ID_MASK (0x1ff)
+
+/*
+ * Miscellaneous hub constants
+ */
+
+/* Number of widgets supported by hub */
+#define HUB_NUM_WIDGET 9
+#define HUB_WIDGET_ID_MIN 0x8
+#define HUB_WIDGET_ID_MAX 0xf
+
+#define HUB_WIDGET_PART_NUM 0xc101
+#define MAX_HUBS_PER_XBOW 2
+
+/*
+ * Get a hub's widget id from widget control register
+ */
+#define IIO_WCR_WID_GET(nasid) (REMOTE_HUB_L(nasid, III_WCR) & 0xf)
+#define IIO_WST_ERROR_MASK (UINT64_CAST 1 << 32) /* Widget status error */
+
+/*
+ * Number of credits Hub widget has while sending req/response to
+ * xbow.
+ * Value of 3 is required by Xbow 1.1
+ * We may be able to increase this to 4 with Xbow 1.2.
+ */
+#define HUBII_XBOW_CREDIT 3
+#define HUBII_XBOW_REV2_CREDIT 4
+
+#endif /* _ASM_SGI_SN_SN0_HUBIO_H */
diff --git a/arch/mips/include/asm/sn/sn0/hubmd.h b/arch/mips/include/asm/sn/sn0/hubmd.h
new file mode 100644
index 000000000..305d002be
--- /dev/null
+++ b/arch/mips/include/asm/sn/sn0/hubmd.h
@@ -0,0 +1,789 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from IRIX <sys/SN/SN0/hubmd.h>, revision 1.59.
+ *
+ * Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_SN_SN0_HUBMD_H
+#define _ASM_SN_SN0_HUBMD_H
+
+
+/*
+ * Hub Memory/Directory interface registers
+ */
+#define CACHE_SLINE_SIZE 128 /* Secondary cache line size on SN0 */
+
+#define MAX_REGIONS 64
+
+/* Hardware page size and shift */
+
+#define MD_PAGE_SIZE 4096 /* Page size in bytes */
+#define MD_PAGE_NUM_SHFT 12 /* Address to page number shift */
+
+/* Register offsets from LOCAL_HUB or REMOTE_HUB */
+
+#define MD_BASE 0x200000
+#define MD_BASE_PERF 0x210000
+#define MD_BASE_JUNK 0x220000
+
+#define MD_IO_PROTECT 0x200000 /* MD and core register protection */
+#define MD_IO_PROT_OVRRD 0x200008 /* Clear my bit in MD_IO_PROTECT */
+#define MD_HSPEC_PROTECT 0x200010 /* BDDIR, LBOOT, RBOOT protection */
+#define MD_MEMORY_CONFIG 0x200018 /* Memory/Directory DIMM control */
+#define MD_REFRESH_CONTROL 0x200020 /* Memory/Directory refresh ctrl */
+#define MD_FANDOP_CAC_STAT 0x200028 /* Fetch-and-op cache status */
+#define MD_MIG_DIFF_THRESH 0x200030 /* Page migr. count diff thresh. */
+#define MD_MIG_VALUE_THRESH 0x200038 /* Page migr. count abs. thresh. */
+#define MD_MIG_CANDIDATE 0x200040 /* Latest page migration candidate */
+#define MD_MIG_CANDIDATE_CLR 0x200048 /* Clear page migration candidate */
+#define MD_DIR_ERROR 0x200050 /* Directory DIMM error */
+#define MD_DIR_ERROR_CLR 0x200058 /* Directory DIMM error clear */
+#define MD_PROTOCOL_ERROR 0x200060 /* Directory protocol error */
+#define MD_PROTOCOL_ERROR_CLR 0x200068 /* Directory protocol error clear */
+#define MD_MEM_ERROR 0x200070 /* Memory DIMM error */
+#define MD_MEM_ERROR_CLR 0x200078 /* Memory DIMM error clear */
+#define MD_MISC_ERROR 0x200080 /* Miscellaneous MD error */
+#define MD_MISC_ERROR_CLR 0x200088 /* Miscellaneous MD error clear */
+#define MD_MEM_DIMM_INIT 0x200090 /* Memory DIMM mode initization. */
+#define MD_DIR_DIMM_INIT 0x200098 /* Directory DIMM mode init. */
+#define MD_MOQ_SIZE 0x2000a0 /* MD outgoing queue size */
+#define MD_MLAN_CTL 0x2000a8 /* NIC (Microlan) control register */
+
+#define MD_PERF_SEL 0x210000 /* Select perf monitor events */
+#define MD_PERF_CNT0 0x210010 /* Performance counter 0 */
+#define MD_PERF_CNT1 0x210018 /* Performance counter 1 */
+#define MD_PERF_CNT2 0x210020 /* Performance counter 2 */
+#define MD_PERF_CNT3 0x210028 /* Performance counter 3 */
+#define MD_PERF_CNT4 0x210030 /* Performance counter 4 */
+#define MD_PERF_CNT5 0x210038 /* Performance counter 5 */
+
+#define MD_UREG0_0 0x220000 /* uController/UART 0 register */
+#define MD_UREG0_1 0x220008 /* uController/UART 0 register */
+#define MD_UREG0_2 0x220010 /* uController/UART 0 register */
+#define MD_UREG0_3 0x220018 /* uController/UART 0 register */
+#define MD_UREG0_4 0x220020 /* uController/UART 0 register */
+#define MD_UREG0_5 0x220028 /* uController/UART 0 register */
+#define MD_UREG0_6 0x220030 /* uController/UART 0 register */
+#define MD_UREG0_7 0x220038 /* uController/UART 0 register */
+
+#define MD_SLOTID_USTAT 0x220048 /* Hub slot ID & UART/uCtlr status */
+#define MD_LED0 0x220050 /* Eight-bit LED for CPU A */
+#define MD_LED1 0x220058 /* Eight-bit LED for CPU B */
+
+#define MD_UREG1_0 0x220080 /* uController/UART 1 register */
+#define MD_UREG1_1 0x220088 /* uController/UART 1 register */
+#define MD_UREG1_2 0x220090 /* uController/UART 1 register */
+#define MD_UREG1_3 0x220098 /* uController/UART 1 register */
+#define MD_UREG1_4 0x2200a0 /* uController/UART 1 register */
+#define MD_UREG1_5 0x2200a8 /* uController/UART 1 register */
+#define MD_UREG1_6 0x2200b0 /* uController/UART 1 register */
+#define MD_UREG1_7 0x2200b8 /* uController/UART 1 register */
+#define MD_UREG1_8 0x2200c0 /* uController/UART 1 register */
+#define MD_UREG1_9 0x2200c8 /* uController/UART 1 register */
+#define MD_UREG1_10 0x2200d0 /* uController/UART 1 register */
+#define MD_UREG1_11 0x2200d8 /* uController/UART 1 register */
+#define MD_UREG1_12 0x2200e0 /* uController/UART 1 register */
+#define MD_UREG1_13 0x2200e8 /* uController/UART 1 register */
+#define MD_UREG1_14 0x2200f0 /* uController/UART 1 register */
+#define MD_UREG1_15 0x2200f8 /* uController/UART 1 register */
+
+#ifdef CONFIG_SGI_SN_N_MODE
+#define MD_MEM_BANKS 4 /* 4 banks of memory max in N mode */
+#else
+#define MD_MEM_BANKS 8 /* 8 banks of memory max in M mode */
+#endif
+
+/*
+ * MD_MEMORY_CONFIG fields
+ *
+ * MD_SIZE_xxx are useful for representing the size of a SIMM or bank
+ * (SIMM pair). They correspond to the values needed for the bit
+ * triplets (MMC_BANK_MASK) in the MD_MEMORY_CONFIG register for bank size.
+ * Bits not used by the MD are used by software.
+ */
+
+#define MD_SIZE_EMPTY 0 /* Valid in MEMORY_CONFIG */
+#define MD_SIZE_8MB 1
+#define MD_SIZE_16MB 2
+#define MD_SIZE_32MB 3 /* Broken in Hub 1 */
+#define MD_SIZE_64MB 4 /* Valid in MEMORY_CONFIG */
+#define MD_SIZE_128MB 5 /* Valid in MEMORY_CONFIG */
+#define MD_SIZE_256MB 6
+#define MD_SIZE_512MB 7 /* Valid in MEMORY_CONFIG */
+#define MD_SIZE_1GB 8
+#define MD_SIZE_2GB 9
+#define MD_SIZE_4GB 10
+
+#define MD_SIZE_BYTES(size) ((size) == 0 ? 0 : 0x400000L << (size))
+#define MD_SIZE_MBYTES(size) ((size) == 0 ? 0 : 4 << (size))
+
+#define MMC_FPROM_CYC_SHFT 49 /* Have to use UINT64_CAST, instead */
+#define MMC_FPROM_CYC_MASK (UINT64_CAST 31 << 49) /* of 'L' suffix, */
+#define MMC_FPROM_WR_SHFT 44 /* for assembler */
+#define MMC_FPROM_WR_MASK (UINT64_CAST 31 << 44)
+#define MMC_UCTLR_CYC_SHFT 39
+#define MMC_UCTLR_CYC_MASK (UINT64_CAST 31 << 39)
+#define MMC_UCTLR_WR_SHFT 34
+#define MMC_UCTLR_WR_MASK (UINT64_CAST 31 << 34)
+#define MMC_DIMM0_SEL_SHFT 32
+#define MMC_DIMM0_SEL_MASK (UINT64_CAST 3 << 32)
+#define MMC_IO_PROT_EN_SHFT 31
+#define MMC_IO_PROT_EN_MASK (UINT64_CAST 1 << 31)
+#define MMC_IO_PROT (UINT64_CAST 1 << 31)
+#define MMC_ARB_MLSS_SHFT 30
+#define MMC_ARB_MLSS_MASK (UINT64_CAST 1 << 30)
+#define MMC_ARB_MLSS (UINT64_CAST 1 << 30)
+#define MMC_IGNORE_ECC_SHFT 29
+#define MMC_IGNORE_ECC_MASK (UINT64_CAST 1 << 29)
+#define MMC_IGNORE_ECC (UINT64_CAST 1 << 29)
+#define MMC_DIR_PREMIUM_SHFT 28
+#define MMC_DIR_PREMIUM_MASK (UINT64_CAST 1 << 28)
+#define MMC_DIR_PREMIUM (UINT64_CAST 1 << 28)
+#define MMC_REPLY_GUAR_SHFT 24
+#define MMC_REPLY_GUAR_MASK (UINT64_CAST 15 << 24)
+#define MMC_BANK_SHFT(_b) ((_b) * 3)
+#define MMC_BANK_MASK(_b) (UINT64_CAST 7 << MMC_BANK_SHFT(_b))
+#define MMC_BANK_ALL_MASK 0xffffff
+#define MMC_RESET_DEFAULTS (UINT64_CAST 0x0f << MMC_FPROM_CYC_SHFT | \
+ UINT64_CAST 0x07 << MMC_FPROM_WR_SHFT | \
+ UINT64_CAST 0x1f << MMC_UCTLR_CYC_SHFT | \
+ UINT64_CAST 0x0f << MMC_UCTLR_WR_SHFT | \
+ MMC_IGNORE_ECC | MMC_DIR_PREMIUM | \
+ UINT64_CAST 0x0f << MMC_REPLY_GUAR_SHFT | \
+ MMC_BANK_ALL_MASK)
+
+/* MD_REFRESH_CONTROL fields */
+
+#define MRC_ENABLE_SHFT 63
+#define MRC_ENABLE_MASK (UINT64_CAST 1 << 63)
+#define MRC_ENABLE (UINT64_CAST 1 << 63)
+#define MRC_COUNTER_SHFT 12
+#define MRC_COUNTER_MASK (UINT64_CAST 0xfff << 12)
+#define MRC_CNT_THRESH_MASK 0xfff
+#define MRC_RESET_DEFAULTS (UINT64_CAST 0x400)
+
+/* MD_MEM_DIMM_INIT and MD_DIR_DIMM_INIT fields */
+
+#define MDI_SELECT_SHFT 32
+#define MDI_SELECT_MASK (UINT64_CAST 0x0f << 32)
+#define MDI_DIMM_MODE_MASK (UINT64_CAST 0xfff)
+
+/* MD_MOQ_SIZE fields */
+
+#define MMS_RP_SIZE_SHFT 8
+#define MMS_RP_SIZE_MASK (UINT64_CAST 0x3f << 8)
+#define MMS_RQ_SIZE_SHFT 0
+#define MMS_RQ_SIZE_MASK (UINT64_CAST 0x1f)
+#define MMS_RESET_DEFAULTS (0x32 << 8 | 0x12)
+
+/* MD_FANDOP_CAC_STAT fields */
+
+#define MFC_VALID_SHFT 63
+#define MFC_VALID_MASK (UINT64_CAST 1 << 63)
+#define MFC_VALID (UINT64_CAST 1 << 63)
+#define MFC_ADDR_SHFT 6
+#define MFC_ADDR_MASK (UINT64_CAST 0x3ffffff)
+
+/* MD_MLAN_CTL fields */
+
+#define MLAN_PHI1_SHFT 27
+#define MLAN_PHI1_MASK (UINT64_CAST 0x7f << 27)
+#define MLAN_PHI0_SHFT 20
+#define MLAN_PHI0_MASK (UINT64_CAST 0x7f << 27)
+#define MLAN_PULSE_SHFT 10
+#define MLAN_PULSE_MASK (UINT64_CAST 0x3ff << 10)
+#define MLAN_SAMPLE_SHFT 2
+#define MLAN_SAMPLE_MASK (UINT64_CAST 0xff << 2)
+#define MLAN_DONE_SHFT 1
+#define MLAN_DONE_MASK 2
+#define MLAN_DONE (UINT64_CAST 0x02)
+#define MLAN_RD_DATA (UINT64_CAST 0x01)
+#define MLAN_RESET_DEFAULTS (UINT64_CAST 0x31 << MLAN_PHI1_SHFT | \
+ UINT64_CAST 0x31 << MLAN_PHI0_SHFT)
+
+/* MD_SLOTID_USTAT bit definitions */
+
+#define MSU_CORECLK_TST_SHFT 7 /* You don't wanna know */
+#define MSU_CORECLK_TST_MASK (UINT64_CAST 1 << 7)
+#define MSU_CORECLK_TST (UINT64_CAST 1 << 7)
+#define MSU_CORECLK_SHFT 6 /* You don't wanna know */
+#define MSU_CORECLK_MASK (UINT64_CAST 1 << 6)
+#define MSU_CORECLK (UINT64_CAST 1 << 6)
+#define MSU_NETSYNC_SHFT 5 /* You don't wanna know */
+#define MSU_NETSYNC_MASK (UINT64_CAST 1 << 5)
+#define MSU_NETSYNC (UINT64_CAST 1 << 5)
+#define MSU_FPROMRDY_SHFT 4 /* Flash PROM ready bit */
+#define MSU_FPROMRDY_MASK (UINT64_CAST 1 << 4)
+#define MSU_FPROMRDY (UINT64_CAST 1 << 4)
+#define MSU_I2CINTR_SHFT 3 /* I2C interrupt bit */
+#define MSU_I2CINTR_MASK (UINT64_CAST 1 << 3)
+#define MSU_I2CINTR (UINT64_CAST 1 << 3)
+#define MSU_SLOTID_MASK 0xff
+#define MSU_SN0_SLOTID_SHFT 0 /* Slot ID */
+#define MSU_SN0_SLOTID_MASK (UINT64_CAST 7)
+#define MSU_SN00_SLOTID_SHFT 7
+#define MSU_SN00_SLOTID_MASK (UINT64_CAST 0x80)
+
+#define MSU_PIMM_PSC_SHFT 4
+#define MSU_PIMM_PSC_MASK (0xf << MSU_PIMM_PSC_SHFT)
+
+/* MD_MIG_DIFF_THRESH bit definitions */
+
+#define MD_MIG_DIFF_THRES_VALID_MASK (UINT64_CAST 0x1 << 63)
+#define MD_MIG_DIFF_THRES_VALID_SHFT 63
+#define MD_MIG_DIFF_THRES_VALUE_MASK (UINT64_CAST 0xfffff)
+
+/* MD_MIG_VALUE_THRESH bit definitions */
+
+#define MD_MIG_VALUE_THRES_VALID_MASK (UINT64_CAST 0x1 << 63)
+#define MD_MIG_VALUE_THRES_VALID_SHFT 63
+#define MD_MIG_VALUE_THRES_VALUE_MASK (UINT64_CAST 0xfffff)
+
+/* MD_MIG_CANDIDATE bit definitions */
+
+#define MD_MIG_CANDIDATE_VALID_MASK (UINT64_CAST 0x1 << 63)
+#define MD_MIG_CANDIDATE_VALID_SHFT 63
+#define MD_MIG_CANDIDATE_TYPE_MASK (UINT64_CAST 0x1 << 30)
+#define MD_MIG_CANDIDATE_TYPE_SHFT 30
+#define MD_MIG_CANDIDATE_OVERRUN_MASK (UINT64_CAST 0x1 << 29)
+#define MD_MIG_CANDIDATE_OVERRUN_SHFT 29
+#define MD_MIG_CANDIDATE_INITIATOR_MASK (UINT64_CAST 0x7ff << 18)
+#define MD_MIG_CANDIDATE_INITIATOR_SHFT 18
+#define MD_MIG_CANDIDATE_NODEID_MASK (UINT64_CAST 0x1ff << 20)
+#define MD_MIG_CANDIDATE_NODEID_SHFT 20
+#define MD_MIG_CANDIDATE_ADDR_MASK (UINT64_CAST 0x3ffff)
+#define MD_MIG_CANDIDATE_ADDR_SHFT 14 /* The address starts at bit 14 */
+
+/* Other MD definitions */
+
+#define MD_BANK_SHFT 29 /* log2(512 MB) */
+#define MD_BANK_MASK (UINT64_CAST 7 << 29)
+#define MD_BANK_SIZE (UINT64_CAST 1 << MD_BANK_SHFT) /* 512 MB */
+#define MD_BANK_OFFSET(_b) (UINT64_CAST (_b) << MD_BANK_SHFT)
+
+/*
+ * The following definitions cover the bit field definitions for the
+ * various MD registers. For multi-bit registers, we define both
+ * a shift amount and a mask value. By convention, if you want to
+ * isolate a field, you should mask the field and then shift it down,
+ * since this makes the masks useful without a shift.
+ */
+
+/* Directory entry states for both premium and standard SIMMs. */
+
+#define MD_DIR_SHARED (UINT64_CAST 0x0) /* 000 */
+#define MD_DIR_POISONED (UINT64_CAST 0x1) /* 001 */
+#define MD_DIR_EXCLUSIVE (UINT64_CAST 0x2) /* 010 */
+#define MD_DIR_BUSY_SHARED (UINT64_CAST 0x3) /* 011 */
+#define MD_DIR_BUSY_EXCL (UINT64_CAST 0x4) /* 100 */
+#define MD_DIR_WAIT (UINT64_CAST 0x5) /* 101 */
+#define MD_DIR_UNOWNED (UINT64_CAST 0x7) /* 111 */
+
+/*
+ * The MD_DIR_FORCE_ECC bit can be added directory entry write data
+ * to forcing the ECC to be written as-is instead of recalculated.
+ */
+
+#define MD_DIR_FORCE_ECC (UINT64_CAST 1 << 63)
+
+/*
+ * Premium SIMM directory entry shifts and masks. Each is valid only in the
+ * context(s) indicated, where A, B, and C indicate the directory entry format
+ * as shown, and low and/or high indicates which double-word of the entry.
+ *
+ * Format A: STATE = shared, FINE = 1
+ * Format B: STATE = shared, FINE = 0
+ * Format C: STATE != shared (FINE must be 0)
+ */
+
+#define MD_PDIR_MASK 0xffffffffffff /* Whole entry */
+#define MD_PDIR_ECC_SHFT 0 /* ABC low or high */
+#define MD_PDIR_ECC_MASK 0x7f
+#define MD_PDIR_PRIO_SHFT 8 /* ABC low */
+#define MD_PDIR_PRIO_MASK (0xf << 8)
+#define MD_PDIR_AX_SHFT 7 /* ABC low */
+#define MD_PDIR_AX_MASK (1 << 7)
+#define MD_PDIR_AX (1 << 7)
+#define MD_PDIR_FINE_SHFT 12 /* ABC low */
+#define MD_PDIR_FINE_MASK (1 << 12)
+#define MD_PDIR_FINE (1 << 12)
+#define MD_PDIR_OCT_SHFT 13 /* A low */
+#define MD_PDIR_OCT_MASK (7 << 13)
+#define MD_PDIR_STATE_SHFT 13 /* BC low */
+#define MD_PDIR_STATE_MASK (7 << 13)
+#define MD_PDIR_ONECNT_SHFT 16 /* BC low */
+#define MD_PDIR_ONECNT_MASK (0x3f << 16)
+#define MD_PDIR_PTR_SHFT 22 /* C low */
+#define MD_PDIR_PTR_MASK (UINT64_CAST 0x7ff << 22)
+#define MD_PDIR_VECMSB_SHFT 22 /* AB low */
+#define MD_PDIR_VECMSB_BITMASK 0x3ffffff
+#define MD_PDIR_VECMSB_BITSHFT 27
+#define MD_PDIR_VECMSB_MASK (UINT64_CAST MD_PDIR_VECMSB_BITMASK << 22)
+#define MD_PDIR_CWOFF_SHFT 7 /* C high */
+#define MD_PDIR_CWOFF_MASK (7 << 7)
+#define MD_PDIR_VECLSB_SHFT 10 /* AB high */
+#define MD_PDIR_VECLSB_BITMASK (UINT64_CAST 0x3fffffffff)
+#define MD_PDIR_VECLSB_BITSHFT 0
+#define MD_PDIR_VECLSB_MASK (MD_PDIR_VECLSB_BITMASK << 10)
+
+/*
+ * Directory initialization values
+ */
+
+#define MD_PDIR_INIT_LO (MD_DIR_UNOWNED << MD_PDIR_STATE_SHFT | \
+ MD_PDIR_AX)
+#define MD_PDIR_INIT_HI 0
+#define MD_PDIR_INIT_PROT (MD_PROT_RW << MD_PPROT_IO_SHFT | \
+ MD_PROT_RW << MD_PPROT_SHFT)
+
+/*
+ * Standard SIMM directory entry shifts and masks. Each is valid only in the
+ * context(s) indicated, where A and C indicate the directory entry format
+ * as shown, and low and/or high indicates which double-word of the entry.
+ *
+ * Format A: STATE == shared
+ * Format C: STATE != shared
+ */
+
+#define MD_SDIR_MASK 0xffff /* Whole entry */
+#define MD_SDIR_ECC_SHFT 0 /* AC low or high */
+#define MD_SDIR_ECC_MASK 0x1f
+#define MD_SDIR_PRIO_SHFT 6 /* AC low */
+#define MD_SDIR_PRIO_MASK (1 << 6)
+#define MD_SDIR_AX_SHFT 5 /* AC low */
+#define MD_SDIR_AX_MASK (1 << 5)
+#define MD_SDIR_AX (1 << 5)
+#define MD_SDIR_STATE_SHFT 7 /* AC low */
+#define MD_SDIR_STATE_MASK (7 << 7)
+#define MD_SDIR_PTR_SHFT 10 /* C low */
+#define MD_SDIR_PTR_MASK (0x3f << 10)
+#define MD_SDIR_CWOFF_SHFT 5 /* C high */
+#define MD_SDIR_CWOFF_MASK (7 << 5)
+#define MD_SDIR_VECMSB_SHFT 11 /* A low */
+#define MD_SDIR_VECMSB_BITMASK 0x1f
+#define MD_SDIR_VECMSB_BITSHFT 7
+#define MD_SDIR_VECMSB_MASK (MD_SDIR_VECMSB_BITMASK << 11)
+#define MD_SDIR_VECLSB_SHFT 5 /* A high */
+#define MD_SDIR_VECLSB_BITMASK 0x7ff
+#define MD_SDIR_VECLSB_BITSHFT 0
+#define MD_SDIR_VECLSB_MASK (MD_SDIR_VECLSB_BITMASK << 5)
+
+/*
+ * Directory initialization values
+ */
+
+#define MD_SDIR_INIT_LO (MD_DIR_UNOWNED << MD_SDIR_STATE_SHFT | \
+ MD_SDIR_AX)
+#define MD_SDIR_INIT_HI 0
+#define MD_SDIR_INIT_PROT (MD_PROT_RW << MD_SPROT_SHFT)
+
+/* Protection and migration field values */
+
+#define MD_PROT_RW (UINT64_CAST 0x6)
+#define MD_PROT_RO (UINT64_CAST 0x3)
+#define MD_PROT_NO (UINT64_CAST 0x0)
+#define MD_PROT_BAD (UINT64_CAST 0x5)
+
+/* Premium SIMM protection entry shifts and masks. */
+
+#define MD_PPROT_SHFT 0 /* Prot. field */
+#define MD_PPROT_MASK 7
+#define MD_PPROT_MIGMD_SHFT 3 /* Migration mode */
+#define MD_PPROT_MIGMD_MASK (3 << 3)
+#define MD_PPROT_REFCNT_SHFT 5 /* Reference count */
+#define MD_PPROT_REFCNT_WIDTH 0x7ffff
+#define MD_PPROT_REFCNT_MASK (MD_PPROT_REFCNT_WIDTH << 5)
+
+#define MD_PPROT_IO_SHFT 45 /* I/O Prot field */
+#define MD_PPROT_IO_MASK (UINT64_CAST 7 << 45)
+
+/* Standard SIMM protection entry shifts and masks. */
+
+#define MD_SPROT_SHFT 0 /* Prot. field */
+#define MD_SPROT_MASK 7
+#define MD_SPROT_MIGMD_SHFT 3 /* Migration mode */
+#define MD_SPROT_MIGMD_MASK (3 << 3)
+#define MD_SPROT_REFCNT_SHFT 5 /* Reference count */
+#define MD_SPROT_REFCNT_WIDTH 0x7ff
+#define MD_SPROT_REFCNT_MASK (MD_SPROT_REFCNT_WIDTH << 5)
+
+/* Migration modes used in protection entries */
+
+#define MD_PROT_MIGMD_IREL (UINT64_CAST 0x3 << 3)
+#define MD_PROT_MIGMD_IABS (UINT64_CAST 0x2 << 3)
+#define MD_PROT_MIGMD_PREL (UINT64_CAST 0x1 << 3)
+#define MD_PROT_MIGMD_OFF (UINT64_CAST 0x0 << 3)
+
+
+/*
+ * Operations on page migration threshold register
+ */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * LED register macros
+ */
+
+#define CPU_LED_ADDR(_nasid, _slice) \
+ (private.p_sn00 ? \
+ REMOTE_HUB_ADDR((_nasid), MD_UREG1_0 + ((_slice) << 5)) : \
+ REMOTE_HUB_ADDR((_nasid), MD_LED0 + ((_slice) << 3)))
+
+#define SET_CPU_LEDS(_nasid, _slice, _val) \
+ (HUB_S(CPU_LED_ADDR(_nasid, _slice), (_val)))
+
+#define SET_MY_LEDS(_v) \
+ SET_CPU_LEDS(get_nasid(), get_slice(), (_v))
+
+/*
+ * Operations on Memory/Directory DIMM control register
+ */
+
+#define DIRTYPE_PREMIUM 1
+#define DIRTYPE_STANDARD 0
+#define MD_MEMORY_CONFIG_DIR_TYPE_GET(region) (\
+ (REMOTE_HUB_L(region, MD_MEMORY_CONFIG) & MMC_DIR_PREMIUM_MASK) >> \
+ MMC_DIR_PREMIUM_SHFT)
+
+
+/*
+ * Operations on page migration count difference and absolute threshold
+ * registers
+ */
+
+#define MD_MIG_DIFF_THRESH_GET(region) ( \
+ REMOTE_HUB_L((region), MD_MIG_DIFF_THRESH) & \
+ MD_MIG_DIFF_THRES_VALUE_MASK)
+
+#define MD_MIG_DIFF_THRESH_SET(region, value) ( \
+ REMOTE_HUB_S((region), MD_MIG_DIFF_THRESH, \
+ MD_MIG_DIFF_THRES_VALID_MASK | (value)))
+
+#define MD_MIG_DIFF_THRESH_DISABLE(region) ( \
+ REMOTE_HUB_S((region), MD_MIG_DIFF_THRESH, \
+ REMOTE_HUB_L((region), MD_MIG_DIFF_THRESH) \
+ & ~MD_MIG_DIFF_THRES_VALID_MASK))
+
+#define MD_MIG_DIFF_THRESH_ENABLE(region) ( \
+ REMOTE_HUB_S((region), MD_MIG_DIFF_THRESH, \
+ REMOTE_HUB_L((region), MD_MIG_DIFF_THRESH) \
+ | MD_MIG_DIFF_THRES_VALID_MASK))
+
+#define MD_MIG_DIFF_THRESH_IS_ENABLED(region) ( \
+ REMOTE_HUB_L((region), MD_MIG_DIFF_THRESH) & \
+ MD_MIG_DIFF_THRES_VALID_MASK)
+
+#define MD_MIG_VALUE_THRESH_GET(region) ( \
+ REMOTE_HUB_L((region), MD_MIG_VALUE_THRESH) & \
+ MD_MIG_VALUE_THRES_VALUE_MASK)
+
+#define MD_MIG_VALUE_THRESH_SET(region, value) ( \
+ REMOTE_HUB_S((region), MD_MIG_VALUE_THRESH, \
+ MD_MIG_VALUE_THRES_VALID_MASK | (value)))
+
+#define MD_MIG_VALUE_THRESH_DISABLE(region) ( \
+ REMOTE_HUB_S((region), MD_MIG_VALUE_THRESH, \
+ REMOTE_HUB_L(region, MD_MIG_VALUE_THRESH) \
+ & ~MD_MIG_VALUE_THRES_VALID_MASK))
+
+#define MD_MIG_VALUE_THRESH_ENABLE(region) ( \
+ REMOTE_HUB_S((region), MD_MIG_VALUE_THRESH, \
+ REMOTE_HUB_L((region), MD_MIG_VALUE_THRESH) \
+ | MD_MIG_VALUE_THRES_VALID_MASK))
+
+#define MD_MIG_VALUE_THRESH_IS_ENABLED(region) ( \
+ REMOTE_HUB_L((region), MD_MIG_VALUE_THRESH) & \
+ MD_MIG_VALUE_THRES_VALID_MASK)
+
+/*
+ * Operations on page migration candidate register
+ */
+
+#define MD_MIG_CANDIDATE_GET(my_region_id) ( \
+ REMOTE_HUB_L((my_region_id), MD_MIG_CANDIDATE_CLR))
+
+#define MD_MIG_CANDIDATE_HWPFN(value) ((value) & MD_MIG_CANDIDATE_ADDR_MASK)
+
+#define MD_MIG_CANDIDATE_NODEID(value) ( \
+ ((value) & MD_MIG_CANDIDATE_NODEID_MASK) >> MD_MIG_CANDIDATE_NODEID_SHFT)
+
+#define MD_MIG_CANDIDATE_TYPE(value) ( \
+ ((value) & MD_MIG_CANDIDATE_TYPE_MASK) >> MD_MIG_CANDIDATE_TYPE_SHFT)
+
+#define MD_MIG_CANDIDATE_VALID(value) ( \
+ ((value) & MD_MIG_CANDIDATE_VALID_MASK) >> MD_MIG_CANDIDATE_VALID_SHFT)
+
+/*
+ * Macros to retrieve fields in the protection entry
+ */
+
+/* for Premium SIMM */
+#define MD_PPROT_REFCNT_GET(value) ( \
+ ((value) & MD_PPROT_REFCNT_MASK) >> MD_PPROT_REFCNT_SHFT)
+
+#define MD_PPROT_MIGMD_GET(value) ( \
+ ((value) & MD_PPROT_MIGMD_MASK) >> MD_PPROT_MIGMD_SHFT)
+
+/* for Standard SIMM */
+#define MD_SPROT_REFCNT_GET(value) ( \
+ ((value) & MD_SPROT_REFCNT_MASK) >> MD_SPROT_REFCNT_SHFT)
+
+#define MD_SPROT_MIGMD_GET(value) ( \
+ ((value) & MD_SPROT_MIGMD_MASK) >> MD_SPROT_MIGMD_SHFT)
+
+/*
+ * Format of dir_error, mem_error, protocol_error and misc_error registers
+ */
+
+struct dir_error_reg {
+ u64 uce_vld: 1, /* 63: valid directory uce */
+ ae_vld: 1, /* 62: valid dir prot ecc error */
+ ce_vld: 1, /* 61: valid correctable ECC err*/
+ rsvd1: 19, /* 60-42: reserved */
+ bad_prot: 3, /* 41-39: encoding, bad access rights*/
+ bad_syn: 7, /* 38-32: bad dir syndrome */
+ rsvd2: 2, /* 31-30: reserved */
+ hspec_addr:27, /* 29-03: bddir space bad entry */
+ uce_ovr: 1, /* 2: multiple dir uce's */
+ ae_ovr: 1, /* 1: multiple prot ecc errs*/
+ ce_ovr: 1; /* 0: multiple correctable errs */
+};
+
+typedef union md_dir_error {
+ u64 derr_reg; /* the entire register */
+ struct dir_error_reg derr_fmt; /* the register format */
+} md_dir_error_t;
+
+
+struct mem_error_reg {
+ u64 uce_vld: 1, /* 63: valid memory uce */
+ ce_vld: 1, /* 62: valid correctable ECC err*/
+ rsvd1: 22, /* 61-40: reserved */
+ bad_syn: 8, /* 39-32: bad mem ecc syndrome */
+ address: 29, /* 31-03: bad entry pointer */
+ rsvd2: 1, /* 2: reserved */
+ uce_ovr: 1, /* 1: multiple mem uce's */
+ ce_ovr: 1; /* 0: multiple correctable errs */
+};
+
+
+typedef union md_mem_error {
+ u64 merr_reg; /* the entire register */
+ struct mem_error_reg merr_fmt; /* format of the mem_error reg */
+} md_mem_error_t;
+
+
+struct proto_error_reg {
+ u64 valid: 1, /* 63: valid protocol error */
+ rsvd1: 2, /* 62-61: reserved */
+ initiator:11, /* 60-50: id of request initiator*/
+ backoff: 2, /* 49-48: backoff control */
+ msg_type: 8, /* 47-40: type of request */
+ access: 2, /* 39-38: access rights of initiator*/
+ priority: 1, /* 37: priority level of requestor*/
+ dir_state: 4, /* 36-33: state of directory */
+ pointer_me:1, /* 32: initiator same as dir ptr */
+ address: 29, /* 31-03: request address */
+ rsvd2: 2, /* 02-01: reserved */
+ overrun: 1; /* 0: multiple protocol errs */
+};
+
+typedef union md_proto_error {
+ u64 perr_reg; /* the entire register */
+ struct proto_error_reg perr_fmt; /* format of the register */
+} md_proto_error_t;
+
+
+struct md_sdir_high_fmt {
+ unsigned short sd_hi_bvec : 11,
+ sd_hi_ecc : 5;
+};
+
+
+typedef union md_sdir_high {
+ /* The 16 bits of standard directory, upper word */
+ unsigned short sd_hi_val;
+ struct md_sdir_high_fmt sd_hi_fmt;
+}md_sdir_high_t;
+
+
+struct md_sdir_low_shared_fmt {
+ /* The meaning of lower directory, shared */
+ unsigned short sds_lo_bvec : 5,
+ sds_lo_unused: 1,
+ sds_lo_state : 3,
+ sds_lo_prio : 1,
+ sds_lo_ax : 1,
+ sds_lo_ecc : 5;
+};
+
+struct md_sdir_low_exclusive_fmt {
+ /* The meaning of lower directory, exclusive */
+ unsigned short sde_lo_ptr : 6,
+ sde_lo_state : 3,
+ sde_lo_prio : 1,
+ sde_lo_ax : 1,
+ sde_lo_ecc : 5;
+};
+
+
+typedef union md_sdir_low {
+ /* The 16 bits of standard directory, lower word */
+ unsigned short sd_lo_val;
+ struct md_sdir_low_exclusive_fmt sde_lo_fmt;
+ struct md_sdir_low_shared_fmt sds_lo_fmt;
+}md_sdir_low_t;
+
+
+
+struct md_pdir_high_fmt {
+ u64 pd_hi_unused : 16,
+ pd_hi_bvec : 38,
+ pd_hi_unused1 : 3,
+ pd_hi_ecc : 7;
+};
+
+
+typedef union md_pdir_high {
+ /* The 48 bits of standard directory, upper word */
+ u64 pd_hi_val;
+ struct md_pdir_high_fmt pd_hi_fmt;
+}md_pdir_high_t;
+
+
+struct md_pdir_low_shared_fmt {
+ /* The meaning of lower directory, shared */
+ u64 pds_lo_unused : 16,
+ pds_lo_bvec : 26,
+ pds_lo_cnt : 6,
+ pds_lo_state : 3,
+ pds_lo_ste : 1,
+ pds_lo_prio : 4,
+ pds_lo_ax : 1,
+ pds_lo_ecc : 7;
+};
+
+struct md_pdir_low_exclusive_fmt {
+ /* The meaning of lower directory, exclusive */
+ u64 pde_lo_unused : 31,
+ pde_lo_ptr : 11,
+ pde_lo_unused1 : 6,
+ pde_lo_state : 3,
+ pde_lo_ste : 1,
+ pde_lo_prio : 4,
+ pde_lo_ax : 1,
+ pde_lo_ecc : 7;
+};
+
+
+typedef union md_pdir_loent {
+ /* The 48 bits of premium directory, lower word */
+ u64 pd_lo_val;
+ struct md_pdir_low_exclusive_fmt pde_lo_fmt;
+ struct md_pdir_low_shared_fmt pds_lo_fmt;
+}md_pdir_low_t;
+
+
+/*
+ * the following two "union" definitions and two
+ * "struct" definitions are used in vmdump.c to
+ * represent directory memory information.
+ */
+
+typedef union md_dir_high {
+ md_sdir_high_t md_sdir_high;
+ md_pdir_high_t md_pdir_high;
+} md_dir_high_t;
+
+typedef union md_dir_low {
+ md_sdir_low_t md_sdir_low;
+ md_pdir_low_t md_pdir_low;
+} md_dir_low_t;
+
+typedef struct bddir_entry {
+ md_dir_low_t md_dir_low;
+ md_dir_high_t md_dir_high;
+} bddir_entry_t;
+
+typedef struct dir_mem_entry {
+ u64 prcpf[MAX_REGIONS];
+ bddir_entry_t directory_words[MD_PAGE_SIZE/CACHE_SLINE_SIZE];
+} dir_mem_entry_t;
+
+
+
+typedef union md_perf_sel {
+ u64 perf_sel_reg;
+ struct {
+ u64 perf_rsvd : 60,
+ perf_en : 1,
+ perf_sel : 3;
+ } perf_sel_bits;
+} md_perf_sel_t;
+
+typedef union md_perf_cnt {
+ u64 perf_cnt;
+ struct {
+ u64 perf_rsvd : 44,
+ perf_cnt : 20;
+ } perf_cnt_bits;
+} md_perf_cnt_t;
+
+
+#endif /* !__ASSEMBLY__ */
+
+
+#define DIR_ERROR_VALID_MASK 0xe000000000000000
+#define DIR_ERROR_VALID_SHFT 61
+#define DIR_ERROR_VALID_UCE 0x8000000000000000
+#define DIR_ERROR_VALID_AE 0x4000000000000000
+#define DIR_ERROR_VALID_CE 0x2000000000000000
+
+#define MEM_ERROR_VALID_MASK 0xc000000000000000
+#define MEM_ERROR_VALID_SHFT 62
+#define MEM_ERROR_VALID_UCE 0x8000000000000000
+#define MEM_ERROR_VALID_CE 0x4000000000000000
+
+#define PROTO_ERROR_VALID_MASK 0x8000000000000000
+
+#define MISC_ERROR_VALID_MASK 0x3ff
+
+/*
+ * Mask for hspec address that is stored in the dir error register.
+ * This represents bits 29 through 3.
+ */
+#define DIR_ERR_HSPEC_MASK 0x3ffffff8
+#define ERROR_HSPEC_MASK 0x3ffffff8
+#define ERROR_HSPEC_SHFT 3
+#define ERROR_ADDR_MASK 0xfffffff8
+#define ERROR_ADDR_SHFT 3
+
+/*
+ * MD_MISC_ERROR register defines.
+ */
+
+#define MMCE_VALID_MASK 0x3ff
+#define MMCE_ILL_MSG_SHFT 8
+#define MMCE_ILL_MSG_MASK (UINT64_CAST 0x03 << MMCE_ILL_MSG_SHFT)
+#define MMCE_ILL_REV_SHFT 6
+#define MMCE_ILL_REV_MASK (UINT64_CAST 0x03 << MMCE_ILL_REV_SHFT)
+#define MMCE_LONG_PACK_SHFT 4
+#define MMCE_LONG_PACK_MASK (UINT64_CAST 0x03 << MMCE_lONG_PACK_SHFT)
+#define MMCE_SHORT_PACK_SHFT 2
+#define MMCE_SHORT_PACK_MASK (UINT64_CAST 0x03 << MMCE_SHORT_PACK_SHFT)
+#define MMCE_BAD_DATA_SHFT 0
+#define MMCE_BAD_DATA_MASK (UINT64_CAST 0x03 << MMCE_BAD_DATA_SHFT)
+
+
+#define MD_PERF_COUNTERS 6
+#define MD_PERF_SETS 6
+
+#define MEM_DIMM_MASK 0xe0000000
+#define MEM_DIMM_SHFT 29
+
+#endif /* _ASM_SN_SN0_HUBMD_H */
diff --git a/arch/mips/include/asm/sn/sn0/hubni.h b/arch/mips/include/asm/sn/sn0/hubni.h
new file mode 100644
index 000000000..b8253142c
--- /dev/null
+++ b/arch/mips/include/asm/sn/sn0/hubni.h
@@ -0,0 +1,263 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from IRIX <sys/SN/SN0/hubni.h>, Revision 1.27.
+ *
+ * Copyright (C) 1992-1997, 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_SGI_SN0_HUBNI_H
+#define _ASM_SGI_SN0_HUBNI_H
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#endif
+
+/*
+ * Hub Network Interface registers
+ *
+ * All registers in this file are subject to change until Hub chip tapeout.
+ */
+
+#define NI_BASE 0x600000
+#define NI_BASE_TABLES 0x630000
+
+#define NI_STATUS_REV_ID 0x600000 /* Hub network status, rev, and ID */
+#define NI_PORT_RESET 0x600008 /* Reset the network interface */
+#define NI_PROTECTION 0x600010 /* NI register access permissions */
+#define NI_GLOBAL_PARMS 0x600018 /* LLP parameters */
+#define NI_SCRATCH_REG0 0x600100 /* Scratch register 0 (64 bits) */
+#define NI_SCRATCH_REG1 0x600108 /* Scratch register 1 (64 bits) */
+#define NI_DIAG_PARMS 0x600110 /* Parameters for diags */
+
+#define NI_VECTOR_PARMS 0x600200 /* Vector PIO routing parameters */
+#define NI_VECTOR 0x600208 /* Vector PIO route */
+#define NI_VECTOR_DATA 0x600210 /* Vector PIO data */
+#define NI_VECTOR_STATUS 0x600300 /* Vector PIO return status */
+#define NI_RETURN_VECTOR 0x600308 /* Vector PIO return vector */
+#define NI_VECTOR_READ_DATA 0x600310 /* Vector PIO read data */
+#define NI_VECTOR_CLEAR 0x600380 /* Vector PIO read & clear status */
+
+#define NI_IO_PROTECT 0x600400 /* PIO protection bits */
+#define NI_IO_PROT_OVRRD 0x600408 /* PIO protection bit override */
+
+#define NI_AGE_CPU0_MEMORY 0x600500 /* CPU 0 memory age control */
+#define NI_AGE_CPU0_PIO 0x600508 /* CPU 0 PIO age control */
+#define NI_AGE_CPU1_MEMORY 0x600510 /* CPU 1 memory age control */
+#define NI_AGE_CPU1_PIO 0x600518 /* CPU 1 PIO age control */
+#define NI_AGE_GBR_MEMORY 0x600520 /* GBR memory age control */
+#define NI_AGE_GBR_PIO 0x600528 /* GBR PIO age control */
+#define NI_AGE_IO_MEMORY 0x600530 /* IO memory age control */
+#define NI_AGE_IO_PIO 0x600538 /* IO PIO age control */
+#define NI_AGE_REG_MIN NI_AGE_CPU0_MEMORY
+#define NI_AGE_REG_MAX NI_AGE_IO_PIO
+
+#define NI_PORT_PARMS 0x608000 /* LLP Parameters */
+#define NI_PORT_ERROR 0x608008 /* LLP Errors */
+#define NI_PORT_ERROR_CLEAR 0x608088 /* Clear the error bits */
+
+#define NI_META_TABLE0 0x638000 /* First meta routing table entry */
+#define NI_META_TABLE(_x) (NI_META_TABLE0 + (8 * (_x)))
+#define NI_META_ENTRIES 32
+
+#define NI_LOCAL_TABLE0 0x638100 /* First local routing table entry */
+#define NI_LOCAL_TABLE(_x) (NI_LOCAL_TABLE0 + (8 * (_x)))
+#define NI_LOCAL_ENTRIES 16
+
+/*
+ * NI_STATUS_REV_ID mask and shift definitions
+ * Have to use UINT64_CAST instead of 'L' suffix, for assembler.
+ */
+
+#define NSRI_8BITMODE_SHFT 30
+#define NSRI_8BITMODE_MASK (UINT64_CAST 0x1 << 30)
+#define NSRI_LINKUP_SHFT 29
+#define NSRI_LINKUP_MASK (UINT64_CAST 0x1 << 29)
+#define NSRI_DOWNREASON_SHFT 28 /* 0=failed, 1=never came */
+#define NSRI_DOWNREASON_MASK (UINT64_CAST 0x1 << 28) /* out of reset. */
+#define NSRI_MORENODES_SHFT 18
+#define NSRI_MORENODES_MASK (UINT64_CAST 1 << 18) /* Max. # of nodes */
+#define MORE_MEMORY 0
+#define MORE_NODES 1
+#define NSRI_REGIONSIZE_SHFT 17
+#define NSRI_REGIONSIZE_MASK (UINT64_CAST 1 << 17) /* Granularity */
+#define REGIONSIZE_FINE 1
+#define REGIONSIZE_COARSE 0
+#define NSRI_NODEID_SHFT 8
+#define NSRI_NODEID_MASK (UINT64_CAST 0x1ff << 8)/* Node (Hub) ID */
+#define NSRI_REV_SHFT 4
+#define NSRI_REV_MASK (UINT64_CAST 0xf << 4) /* Chip Revision */
+#define NSRI_CHIPID_SHFT 0
+#define NSRI_CHIPID_MASK (UINT64_CAST 0xf) /* Chip type ID */
+
+/*
+ * In fine mode, each node is a region. In coarse mode, there are
+ * eight nodes per region.
+ */
+#define NASID_TO_FINEREG_SHFT 0
+#define NASID_TO_COARSEREG_SHFT 3
+
+/* NI_PORT_RESET mask definitions */
+
+#define NPR_PORTRESET (UINT64_CAST 1 << 7) /* Send warm reset */
+#define NPR_LINKRESET (UINT64_CAST 1 << 1) /* Send link reset */
+#define NPR_LOCALRESET (UINT64_CAST 1) /* Reset entire hub */
+
+/* NI_PROTECTION mask and shift definitions */
+
+#define NPROT_RESETOK (UINT64_CAST 1)
+
+/* NI_GLOBAL_PARMS mask and shift definitions */
+
+#define NGP_MAXRETRY_SHFT 48 /* Maximum retries */
+#define NGP_MAXRETRY_MASK (UINT64_CAST 0x3ff << 48)
+#define NGP_TAILTOWRAP_SHFT 32 /* Tail timeout wrap */
+#define NGP_TAILTOWRAP_MASK (UINT64_CAST 0xffff << 32)
+
+#define NGP_CREDITTOVAL_SHFT 16 /* Tail timeout wrap */
+#define NGP_CREDITTOVAL_MASK (UINT64_CAST 0xf << 16)
+#define NGP_TAILTOVAL_SHFT 4 /* Tail timeout value */
+#define NGP_TAILTOVAL_MASK (UINT64_CAST 0xf << 4)
+
+/* NI_DIAG_PARMS mask and shift definitions */
+
+#define NDP_PORTTORESET (UINT64_CAST 1 << 18) /* Port tmout reset */
+#define NDP_LLP8BITMODE (UINT64_CAST 1 << 12) /* LLP 8-bit mode */
+#define NDP_PORTDISABLE (UINT64_CAST 1 << 6) /* Port disable */
+#define NDP_SENDERROR (UINT64_CAST 1) /* Send data error */
+
+/*
+ * NI_VECTOR_PARMS mask and shift definitions.
+ * TYPE may be any of the first four PIOTYPEs defined under NI_VECTOR_STATUS.
+ */
+
+#define NVP_PIOID_SHFT 40
+#define NVP_PIOID_MASK (UINT64_CAST 0x3ff << 40)
+#define NVP_WRITEID_SHFT 32
+#define NVP_WRITEID_MASK (UINT64_CAST 0xff << 32)
+#define NVP_ADDRESS_MASK (UINT64_CAST 0xffff8) /* Bits 19:3 */
+#define NVP_TYPE_SHFT 0
+#define NVP_TYPE_MASK (UINT64_CAST 0x3)
+
+/* NI_VECTOR_STATUS mask and shift definitions */
+
+#define NVS_VALID (UINT64_CAST 1 << 63)
+#define NVS_OVERRUN (UINT64_CAST 1 << 62)
+#define NVS_TARGET_SHFT 51
+#define NVS_TARGET_MASK (UINT64_CAST 0x3ff << 51)
+#define NVS_PIOID_SHFT 40
+#define NVS_PIOID_MASK (UINT64_CAST 0x3ff << 40)
+#define NVS_WRITEID_SHFT 32
+#define NVS_WRITEID_MASK (UINT64_CAST 0xff << 32)
+#define NVS_ADDRESS_MASK (UINT64_CAST 0xfffffff8) /* Bits 31:3 */
+#define NVS_TYPE_SHFT 0
+#define NVS_TYPE_MASK (UINT64_CAST 0x7)
+#define NVS_ERROR_MASK (UINT64_CAST 0x4) /* bit set means error */
+
+
+#define PIOTYPE_READ 0 /* VECTOR_PARMS and VECTOR_STATUS */
+#define PIOTYPE_WRITE 1 /* VECTOR_PARMS and VECTOR_STATUS */
+#define PIOTYPE_UNDEFINED 2 /* VECTOR_PARMS and VECTOR_STATUS */
+#define PIOTYPE_EXCHANGE 3 /* VECTOR_PARMS and VECTOR_STATUS */
+#define PIOTYPE_ADDR_ERR 4 /* VECTOR_STATUS only */
+#define PIOTYPE_CMD_ERR 5 /* VECTOR_STATUS only */
+#define PIOTYPE_PROT_ERR 6 /* VECTOR_STATUS only */
+#define PIOTYPE_UNKNOWN 7 /* VECTOR_STATUS only */
+
+/* NI_AGE_XXX mask and shift definitions */
+
+#define NAGE_VCH_SHFT 10
+#define NAGE_VCH_MASK (UINT64_CAST 3 << 10)
+#define NAGE_CC_SHFT 8
+#define NAGE_CC_MASK (UINT64_CAST 3 << 8)
+#define NAGE_AGE_SHFT 0
+#define NAGE_AGE_MASK (UINT64_CAST 0xff)
+#define NAGE_MASK (NAGE_VCH_MASK | NAGE_CC_MASK | NAGE_AGE_MASK)
+
+#define VCHANNEL_A 0
+#define VCHANNEL_B 1
+#define VCHANNEL_ANY 2
+
+/* NI_PORT_PARMS mask and shift definitions */
+
+#define NPP_NULLTO_SHFT 10
+#define NPP_NULLTO_MASK (UINT64_CAST 0x3f << 16)
+#define NPP_MAXBURST_SHFT 0
+#define NPP_MAXBURST_MASK (UINT64_CAST 0x3ff)
+#define NPP_RESET_DFLT_HUB20 ((UINT64_CAST 1 << NPP_NULLTO_SHFT) | \
+ (UINT64_CAST 0x3f0 << NPP_MAXBURST_SHFT))
+#define NPP_RESET_DEFAULTS ((UINT64_CAST 6 << NPP_NULLTO_SHFT) | \
+ (UINT64_CAST 0x3f0 << NPP_MAXBURST_SHFT))
+
+
+/* NI_PORT_ERROR mask and shift definitions */
+
+#define NPE_LINKRESET (UINT64_CAST 1 << 37)
+#define NPE_INTERNALERROR (UINT64_CAST 1 << 36)
+#define NPE_BADMESSAGE (UINT64_CAST 1 << 35)
+#define NPE_BADDEST (UINT64_CAST 1 << 34)
+#define NPE_FIFOOVERFLOW (UINT64_CAST 1 << 33)
+#define NPE_CREDITTO_SHFT 28
+#define NPE_CREDITTO_MASK (UINT64_CAST 0xf << 28)
+#define NPE_TAILTO_SHFT 24
+#define NPE_TAILTO_MASK (UINT64_CAST 0xf << 24)
+#define NPE_RETRYCOUNT_SHFT 16
+#define NPE_RETRYCOUNT_MASK (UINT64_CAST 0xff << 16)
+#define NPE_CBERRCOUNT_SHFT 8
+#define NPE_CBERRCOUNT_MASK (UINT64_CAST 0xff << 8)
+#define NPE_SNERRCOUNT_SHFT 0
+#define NPE_SNERRCOUNT_MASK (UINT64_CAST 0xff << 0)
+#define NPE_MASK 0x3effffffff
+
+#define NPE_COUNT_MAX 0xff
+
+#define NPE_FATAL_ERRORS (NPE_LINKRESET | NPE_INTERNALERROR | \
+ NPE_BADMESSAGE | NPE_BADDEST | \
+ NPE_FIFOOVERFLOW | NPE_CREDITTO_MASK | \
+ NPE_TAILTO_MASK)
+
+/* NI_META_TABLE mask and shift definitions */
+
+#define NMT_EXIT_PORT_MASK (UINT64_CAST 0xf)
+
+/* NI_LOCAL_TABLE mask and shift definitions */
+
+#define NLT_EXIT_PORT_MASK (UINT64_CAST 0xf)
+
+#ifndef __ASSEMBLY__
+
+typedef union hubni_port_error_u {
+ u64 nipe_reg_value;
+ struct {
+ u64 nipe_rsvd: 26, /* unused */
+ nipe_lnk_reset: 1, /* link reset */
+ nipe_intl_err: 1, /* internal error */
+ nipe_bad_msg: 1, /* bad message */
+ nipe_bad_dest: 1, /* bad dest */
+ nipe_fifo_ovfl: 1, /* fifo overflow */
+ nipe_rsvd1: 1, /* unused */
+ nipe_credit_to: 4, /* credit timeout */
+ nipe_tail_to: 4, /* tail timeout */
+ nipe_retry_cnt: 8, /* retry error count */
+ nipe_cb_cnt: 8, /* checkbit error count */
+ nipe_sn_cnt: 8; /* sequence number count */
+ } nipe_fields_s;
+} hubni_port_error_t;
+
+#define NI_LLP_RETRY_MAX 0xff
+#define NI_LLP_CB_MAX 0xff
+#define NI_LLP_SN_MAX 0xff
+
+static inline int get_region_shift(void)
+{
+ if (LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK)
+ return NASID_TO_FINEREG_SHFT;
+
+ return NASID_TO_COARSEREG_SHFT;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_SGI_SN0_HUBNI_H */
diff --git a/arch/mips/include/asm/sn/sn0/hubpi.h b/arch/mips/include/asm/sn/sn0/hubpi.h
new file mode 100644
index 000000000..7b8365591
--- /dev/null
+++ b/arch/mips/include/asm/sn/sn0/hubpi.h
@@ -0,0 +1,409 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from IRIX <sys/SN/SN0/hubpi.h>, revision 1.28.
+ *
+ * Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_SN_SN0_HUBPI_H
+#define _ASM_SN_SN0_HUBPI_H
+
+#include <linux/types.h>
+
+/*
+ * Hub I/O interface registers
+ *
+ * All registers in this file are subject to change until Hub chip tapeout.
+ * All register "addresses" are actually offsets. Use the LOCAL_HUB
+ * or REMOTE_HUB macros to synthesize an actual address
+ */
+
+#define PI_BASE 0x000000
+
+/* General protection and control registers */
+
+#define PI_CPU_PROTECT 0x000000 /* CPU Protection */
+#define PI_PROT_OVERRD 0x000008 /* Clear CPU Protection bit */
+#define PI_IO_PROTECT 0x000010 /* Interrupt Pending Protection */
+#define PI_REGION_PRESENT 0x000018 /* Indicates whether region exists */
+#define PI_CPU_NUM 0x000020 /* CPU Number ID */
+#define PI_CALIAS_SIZE 0x000028 /* Cached Alias Size */
+#define PI_MAX_CRB_TIMEOUT 0x000030 /* Maximum Timeout for CRB */
+#define PI_CRB_SFACTOR 0x000038 /* Scale factor for CRB timeout */
+
+/* CALIAS values */
+#define PI_CALIAS_SIZE_0 0
+#define PI_CALIAS_SIZE_4K 1
+#define PI_CALIAS_SIZE_8K 2
+#define PI_CALIAS_SIZE_16K 3
+#define PI_CALIAS_SIZE_32K 4
+#define PI_CALIAS_SIZE_64K 5
+#define PI_CALIAS_SIZE_128K 6
+#define PI_CALIAS_SIZE_256K 7
+#define PI_CALIAS_SIZE_512K 8
+#define PI_CALIAS_SIZE_1M 9
+#define PI_CALIAS_SIZE_2M 10
+#define PI_CALIAS_SIZE_4M 11
+#define PI_CALIAS_SIZE_8M 12
+#define PI_CALIAS_SIZE_16M 13
+#define PI_CALIAS_SIZE_32M 14
+#define PI_CALIAS_SIZE_64M 15
+
+/* Processor control and status checking */
+
+#define PI_CPU_PRESENT_A 0x000040 /* CPU Present A */
+#define PI_CPU_PRESENT_B 0x000048 /* CPU Present B */
+#define PI_CPU_ENABLE_A 0x000050 /* CPU Enable A */
+#define PI_CPU_ENABLE_B 0x000058 /* CPU Enable B */
+#define PI_REPLY_LEVEL 0x000060 /* Reply Level */
+#define PI_HARDRESET_BIT 0x020068 /* Bit cleared by s/w on SR */
+#define PI_NMI_A 0x000070 /* NMI to CPU A */
+#define PI_NMI_B 0x000078 /* NMI to CPU B */
+#define PI_NMI_OFFSET (PI_NMI_B - PI_NMI_A)
+#define PI_SOFTRESET 0x000080 /* Softreset (to both CPUs) */
+
+/* Regular Interrupt register checking. */
+
+#define PI_INT_PEND_MOD 0x000090 /* Write to set pending ints */
+#define PI_INT_PEND0 0x000098 /* Read to get pending ints */
+#define PI_INT_PEND1 0x0000a0 /* Read to get pending ints */
+#define PI_INT_MASK0_A 0x0000a8 /* Interrupt Mask 0 for CPU A */
+#define PI_INT_MASK1_A 0x0000b0 /* Interrupt Mask 1 for CPU A */
+#define PI_INT_MASK0_B 0x0000b8 /* Interrupt Mask 0 for CPU B */
+#define PI_INT_MASK1_B 0x0000c0 /* Interrupt Mask 1 for CPU B */
+
+#define PI_INT_MASK_OFFSET 0x10 /* Offset from A to B */
+
+/* Crosscall interrupts */
+
+#define PI_CC_PEND_SET_A 0x0000c8 /* CC Interrupt Pending Set, CPU A */
+#define PI_CC_PEND_SET_B 0x0000d0 /* CC Interrupt Pending Set, CPU B */
+#define PI_CC_PEND_CLR_A 0x0000d8 /* CC Interrupt Pending Clr, CPU A */
+#define PI_CC_PEND_CLR_B 0x0000e0 /* CC Interrupt Pending Clr, CPU B */
+#define PI_CC_MASK 0x0000e8 /* CC Interrupt mask */
+
+#define PI_INT_SET_OFFSET 0x08 /* Offset from A to B */
+
+/* Realtime Counter and Profiler control registers */
+
+#define PI_RT_COUNT 0x030100 /* Real Time Counter */
+#define PI_RT_COMPARE_A 0x000108 /* Real Time Compare A */
+#define PI_RT_COMPARE_B 0x000110 /* Real Time Compare B */
+#define PI_PROFILE_COMPARE 0x000118 /* L5 int to both cpus when == RTC */
+#define PI_RT_PEND_A 0x000120 /* Set if RT int for A pending */
+#define PI_RT_PEND_B 0x000128 /* Set if RT int for B pending */
+#define PI_PROF_PEND_A 0x000130 /* Set if Prof int for A pending */
+#define PI_PROF_PEND_B 0x000138 /* Set if Prof int for B pending */
+#define PI_RT_EN_A 0x000140 /* RT int for CPU A enable */
+#define PI_RT_EN_B 0x000148 /* RT int for CPU B enable */
+#define PI_PROF_EN_A 0x000150 /* PROF int for CPU A enable */
+#define PI_PROF_EN_B 0x000158 /* PROF int for CPU B enable */
+#define PI_RT_LOCAL_CTRL 0x000160 /* RT control register */
+#define PI_RT_FILTER_CTRL 0x000168 /* GCLK Filter control register */
+
+#define PI_COUNT_OFFSET 0x08 /* A to B offset for all counts */
+
+/* Built-In Self Test support */
+
+#define PI_BIST_WRITE_DATA 0x000200 /* BIST write data */
+#define PI_BIST_READ_DATA 0x000208 /* BIST read data */
+#define PI_BIST_COUNT_TARG 0x000210 /* BIST Count and Target */
+#define PI_BIST_READY 0x000218 /* BIST Ready indicator */
+#define PI_BIST_SHIFT_LOAD 0x000220 /* BIST control */
+#define PI_BIST_SHIFT_UNLOAD 0x000228 /* BIST control */
+#define PI_BIST_ENTER_RUN 0x000230 /* BIST control */
+
+/* Graphics control registers */
+
+#define PI_GFX_PAGE_A 0x000300 /* Graphics page A */
+#define PI_GFX_CREDIT_CNTR_A 0x000308 /* Graphics credit counter A */
+#define PI_GFX_BIAS_A 0x000310 /* Graphics bias A */
+#define PI_GFX_INT_CNTR_A 0x000318 /* Graphics interrupt counter A */
+#define PI_GFX_INT_CMP_A 0x000320 /* Graphics interrupt comparator A */
+#define PI_GFX_PAGE_B 0x000328 /* Graphics page B */
+#define PI_GFX_CREDIT_CNTR_B 0x000330 /* Graphics credit counter B */
+#define PI_GFX_BIAS_B 0x000338 /* Graphics bias B */
+#define PI_GFX_INT_CNTR_B 0x000340 /* Graphics interrupt counter B */
+#define PI_GFX_INT_CMP_B 0x000348 /* Graphics interrupt comparator B */
+
+#define PI_GFX_OFFSET (PI_GFX_PAGE_B - PI_GFX_PAGE_A)
+#define PI_GFX_PAGE_ENABLE 0x0000010000000000LL
+
+/* Error and timeout registers */
+#define PI_ERR_INT_PEND 0x000400 /* Error Interrupt Pending */
+#define PI_ERR_INT_MASK_A 0x000408 /* Error Interrupt mask for CPU A */
+#define PI_ERR_INT_MASK_B 0x000410 /* Error Interrupt mask for CPU B */
+#define PI_ERR_STACK_ADDR_A 0x000418 /* Error stack address for CPU A */
+#define PI_ERR_STACK_ADDR_B 0x000420 /* Error stack address for CPU B */
+#define PI_ERR_STACK_SIZE 0x000428 /* Error Stack Size */
+#define PI_ERR_STATUS0_A 0x000430 /* Error Status 0A */
+#define PI_ERR_STATUS0_A_RCLR 0x000438 /* Error Status 0A clear on read */
+#define PI_ERR_STATUS1_A 0x000440 /* Error Status 1A */
+#define PI_ERR_STATUS1_A_RCLR 0x000448 /* Error Status 1A clear on read */
+#define PI_ERR_STATUS0_B 0x000450 /* Error Status 0B */
+#define PI_ERR_STATUS0_B_RCLR 0x000458 /* Error Status 0B clear on read */
+#define PI_ERR_STATUS1_B 0x000460 /* Error Status 1B */
+#define PI_ERR_STATUS1_B_RCLR 0x000468 /* Error Status 1B clear on read */
+#define PI_SPOOL_CMP_A 0x000470 /* Spool compare for CPU A */
+#define PI_SPOOL_CMP_B 0x000478 /* Spool compare for CPU B */
+#define PI_CRB_TIMEOUT_A 0x000480 /* Timed out CRB entries for A */
+#define PI_CRB_TIMEOUT_B 0x000488 /* Timed out CRB entries for B */
+#define PI_SYSAD_ERRCHK_EN 0x000490 /* Enables SYSAD error checking */
+#define PI_BAD_CHECK_BIT_A 0x000498 /* Force SYSAD check bit error */
+#define PI_BAD_CHECK_BIT_B 0x0004a0 /* Force SYSAD check bit error */
+#define PI_NACK_CNT_A 0x0004a8 /* Consecutive NACK counter */
+#define PI_NACK_CNT_B 0x0004b0 /* " " for CPU B */
+#define PI_NACK_CMP 0x0004b8 /* NACK count compare */
+#define PI_STACKADDR_OFFSET (PI_ERR_STACK_ADDR_B - PI_ERR_STACK_ADDR_A)
+#define PI_ERRSTAT_OFFSET (PI_ERR_STATUS0_B - PI_ERR_STATUS0_A)
+#define PI_RDCLR_OFFSET (PI_ERR_STATUS0_A_RCLR - PI_ERR_STATUS0_A)
+
+/* Bits in PI_ERR_INT_PEND */
+#define PI_ERR_SPOOL_CMP_B 0x00000001 /* Spool end hit high water */
+#define PI_ERR_SPOOL_CMP_A 0x00000002
+#define PI_ERR_SPUR_MSG_B 0x00000004 /* Spurious message intr. */
+#define PI_ERR_SPUR_MSG_A 0x00000008
+#define PI_ERR_WRB_TERR_B 0x00000010 /* WRB TERR */
+#define PI_ERR_WRB_TERR_A 0x00000020
+#define PI_ERR_WRB_WERR_B 0x00000040 /* WRB WERR */
+#define PI_ERR_WRB_WERR_A 0x00000080
+#define PI_ERR_SYSSTATE_B 0x00000100 /* SysState parity error */
+#define PI_ERR_SYSSTATE_A 0x00000200
+#define PI_ERR_SYSAD_DATA_B 0x00000400 /* SysAD data parity error */
+#define PI_ERR_SYSAD_DATA_A 0x00000800
+#define PI_ERR_SYSAD_ADDR_B 0x00001000 /* SysAD addr parity error */
+#define PI_ERR_SYSAD_ADDR_A 0x00002000
+#define PI_ERR_SYSCMD_DATA_B 0x00004000 /* SysCmd data parity error */
+#define PI_ERR_SYSCMD_DATA_A 0x00008000
+#define PI_ERR_SYSCMD_ADDR_B 0x00010000 /* SysCmd addr parity error */
+#define PI_ERR_SYSCMD_ADDR_A 0x00020000
+#define PI_ERR_BAD_SPOOL_B 0x00040000 /* Error spooling to memory */
+#define PI_ERR_BAD_SPOOL_A 0x00080000
+#define PI_ERR_UNCAC_UNCORR_B 0x00100000 /* Uncached uncorrectable */
+#define PI_ERR_UNCAC_UNCORR_A 0x00200000
+#define PI_ERR_SYSSTATE_TAG_B 0x00400000 /* SysState tag parity error */
+#define PI_ERR_SYSSTATE_TAG_A 0x00800000
+#define PI_ERR_MD_UNCORR 0x01000000 /* Must be cleared in MD */
+
+#define PI_ERR_CLEAR_ALL_A 0x00aaaaaa
+#define PI_ERR_CLEAR_ALL_B 0x00555555
+
+
+/*
+ * The following three macros define all possible error int pends.
+ */
+
+#define PI_FATAL_ERR_CPU_A (PI_ERR_SYSSTATE_TAG_A | \
+ PI_ERR_BAD_SPOOL_A | \
+ PI_ERR_SYSCMD_ADDR_A | \
+ PI_ERR_SYSCMD_DATA_A | \
+ PI_ERR_SYSAD_ADDR_A | \
+ PI_ERR_SYSAD_DATA_A | \
+ PI_ERR_SYSSTATE_A)
+
+#define PI_MISC_ERR_CPU_A (PI_ERR_UNCAC_UNCORR_A | \
+ PI_ERR_WRB_WERR_A | \
+ PI_ERR_WRB_TERR_A | \
+ PI_ERR_SPUR_MSG_A | \
+ PI_ERR_SPOOL_CMP_A)
+
+#define PI_FATAL_ERR_CPU_B (PI_ERR_SYSSTATE_TAG_B | \
+ PI_ERR_BAD_SPOOL_B | \
+ PI_ERR_SYSCMD_ADDR_B | \
+ PI_ERR_SYSCMD_DATA_B | \
+ PI_ERR_SYSAD_ADDR_B | \
+ PI_ERR_SYSAD_DATA_B | \
+ PI_ERR_SYSSTATE_B)
+
+#define PI_MISC_ERR_CPU_B (PI_ERR_UNCAC_UNCORR_B | \
+ PI_ERR_WRB_WERR_B | \
+ PI_ERR_WRB_TERR_B | \
+ PI_ERR_SPUR_MSG_B | \
+ PI_ERR_SPOOL_CMP_B)
+
+#define PI_ERR_GENERIC (PI_ERR_MD_UNCORR)
+
+/*
+ * Error types for PI_ERR_STATUS0_[AB] and error stack:
+ * Use the write types if WRBRRB is 1 else use the read types
+ */
+
+/* Fields in PI_ERR_STATUS0_[AB] */
+#define PI_ERR_ST0_TYPE_MASK 0x0000000000000007
+#define PI_ERR_ST0_TYPE_SHFT 0
+#define PI_ERR_ST0_REQNUM_MASK 0x0000000000000038
+#define PI_ERR_ST0_REQNUM_SHFT 3
+#define PI_ERR_ST0_SUPPL_MASK 0x000000000001ffc0
+#define PI_ERR_ST0_SUPPL_SHFT 6
+#define PI_ERR_ST0_CMD_MASK 0x0000000001fe0000
+#define PI_ERR_ST0_CMD_SHFT 17
+#define PI_ERR_ST0_ADDR_MASK 0x3ffffffffe000000
+#define PI_ERR_ST0_ADDR_SHFT 25
+#define PI_ERR_ST0_OVERRUN_MASK 0x4000000000000000
+#define PI_ERR_ST0_OVERRUN_SHFT 62
+#define PI_ERR_ST0_VALID_MASK 0x8000000000000000
+#define PI_ERR_ST0_VALID_SHFT 63
+
+/* Fields in PI_ERR_STATUS1_[AB] */
+#define PI_ERR_ST1_SPOOL_MASK 0x00000000001fffff
+#define PI_ERR_ST1_SPOOL_SHFT 0
+#define PI_ERR_ST1_TOUTCNT_MASK 0x000000001fe00000
+#define PI_ERR_ST1_TOUTCNT_SHFT 21
+#define PI_ERR_ST1_INVCNT_MASK 0x0000007fe0000000
+#define PI_ERR_ST1_INVCNT_SHFT 29
+#define PI_ERR_ST1_CRBNUM_MASK 0x0000038000000000
+#define PI_ERR_ST1_CRBNUM_SHFT 39
+#define PI_ERR_ST1_WRBRRB_MASK 0x0000040000000000
+#define PI_ERR_ST1_WRBRRB_SHFT 42
+#define PI_ERR_ST1_CRBSTAT_MASK 0x001ff80000000000
+#define PI_ERR_ST1_CRBSTAT_SHFT 43
+#define PI_ERR_ST1_MSGSRC_MASK 0xffe0000000000000
+#define PI_ERR_ST1_MSGSRC_SHFT 53
+
+/* Fields in the error stack */
+#define PI_ERR_STK_TYPE_MASK 0x0000000000000003
+#define PI_ERR_STK_TYPE_SHFT 0
+#define PI_ERR_STK_SUPPL_MASK 0x0000000000000038
+#define PI_ERR_STK_SUPPL_SHFT 3
+#define PI_ERR_STK_REQNUM_MASK 0x00000000000001c0
+#define PI_ERR_STK_REQNUM_SHFT 6
+#define PI_ERR_STK_CRBNUM_MASK 0x0000000000000e00
+#define PI_ERR_STK_CRBNUM_SHFT 9
+#define PI_ERR_STK_WRBRRB_MASK 0x0000000000001000
+#define PI_ERR_STK_WRBRRB_SHFT 12
+#define PI_ERR_STK_CRBSTAT_MASK 0x00000000007fe000
+#define PI_ERR_STK_CRBSTAT_SHFT 13
+#define PI_ERR_STK_CMD_MASK 0x000000007f800000
+#define PI_ERR_STK_CMD_SHFT 23
+#define PI_ERR_STK_ADDR_MASK 0xffffffff80000000
+#define PI_ERR_STK_ADDR_SHFT 31
+
+/* Error type in the error status or stack on Read CRBs */
+#define PI_ERR_RD_PRERR 1
+#define PI_ERR_RD_DERR 2
+#define PI_ERR_RD_TERR 3
+
+/* Error type in the error status or stack on Write CRBs */
+#define PI_ERR_WR_WERR 0
+#define PI_ERR_WR_PWERR 1
+#define PI_ERR_WR_TERR 3
+
+/* Read or Write CRB in error status or stack */
+#define PI_ERR_RRB 0
+#define PI_ERR_WRB 1
+#define PI_ERR_ANY_CRB 2
+
+/* Address masks in the error status and error stack are not the same */
+#define ERR_STK_ADDR_SHFT 7
+#define ERR_STAT0_ADDR_SHFT 3
+
+#define PI_MIN_STACK_SIZE 4096 /* For figuring out the size to set */
+#define PI_STACK_SIZE_SHFT 12 /* 4k */
+
+#define ERR_STACK_SIZE_BYTES(_sz) \
+ ((_sz) ? (PI_MIN_STACK_SIZE << ((_sz) - 1)) : 0)
+
+#ifndef __ASSEMBLY__
+/*
+ * format of error stack and error status registers.
+ */
+
+struct err_stack_format {
+ u64 sk_addr : 33, /* address */
+ sk_cmd : 8, /* message command */
+ sk_crb_sts : 10, /* status from RRB or WRB */
+ sk_rw_rb : 1, /* RRB == 0, WRB == 1 */
+ sk_crb_num : 3, /* WRB (0 to 7) or RRB (0 to 4) */
+ sk_t5_req : 3, /* RRB T5 request number */
+ sk_suppl : 3, /* lowest 3 bit of supplemental */
+ sk_err_type: 3; /* error type */
+};
+
+typedef union pi_err_stack {
+ u64 pi_stk_word;
+ struct err_stack_format pi_stk_fmt;
+} pi_err_stack_t;
+
+struct err_status0_format {
+ u64 s0_valid : 1, /* Valid */
+ s0_ovr_run : 1, /* Overrun, spooled to memory */
+ s0_addr : 37, /* address */
+ s0_cmd : 8, /* message command */
+ s0_supl : 11, /* message supplemental field */
+ s0_t5_req : 3, /* RRB T5 request number */
+ s0_err_type: 3; /* error type */
+};
+
+typedef union pi_err_stat0 {
+ u64 pi_stat0_word;
+ struct err_status0_format pi_stat0_fmt;
+} pi_err_stat0_t;
+
+struct err_status1_format {
+ u64 s1_src : 11, /* message source */
+ s1_crb_sts : 10, /* status from RRB or WRB */
+ s1_rw_rb : 1, /* RRB == 0, WRB == 1 */
+ s1_crb_num : 3, /* WRB (0 to 7) or RRB (0 to 4) */
+ s1_inval_cnt:10, /* signed invalidate counter RRB */
+ s1_to_cnt : 8, /* crb timeout counter */
+ s1_spl_cnt : 21; /* number spooled to memory */
+};
+
+typedef union pi_err_stat1 {
+ u64 pi_stat1_word;
+ struct err_status1_format pi_stat1_fmt;
+} pi_err_stat1_t;
+
+typedef u64 rtc_time_t;
+
+#endif /* !__ASSEMBLY__ */
+
+
+/* Bits in PI_SYSAD_ERRCHK_EN */
+#define PI_SYSAD_ERRCHK_ECCGEN 0x01 /* Enable ECC generation */
+#define PI_SYSAD_ERRCHK_QUALGEN 0x02 /* Enable data quality signal gen. */
+#define PI_SYSAD_ERRCHK_SADP 0x04 /* Enable SysAD parity checking */
+#define PI_SYSAD_ERRCHK_CMDP 0x08 /* Enable SysCmd parity checking */
+#define PI_SYSAD_ERRCHK_STATE 0x10 /* Enable SysState parity checking */
+#define PI_SYSAD_ERRCHK_QUAL 0x20 /* Enable data quality checking */
+#define PI_SYSAD_CHECK_ALL 0x3f /* Generate and check all signals. */
+
+/* Interrupt pending bits on R10000 */
+
+#define HUB_IP_PEND0 0x0400
+#define HUB_IP_PEND1_CC 0x0800
+#define HUB_IP_RT 0x1000
+#define HUB_IP_PROF 0x2000
+#define HUB_IP_ERROR 0x4000
+#define HUB_IP_MASK 0x7c00
+
+/* PI_RT_LOCAL_CTRL mask and shift definitions */
+
+#define PRLC_USE_INT_SHFT 16
+#define PRLC_USE_INT_MASK (UINT64_CAST 1 << 16)
+#define PRLC_USE_INT (UINT64_CAST 1 << 16)
+#define PRLC_GCLK_SHFT 15
+#define PRLC_GCLK_MASK (UINT64_CAST 1 << 15)
+#define PRLC_GCLK (UINT64_CAST 1 << 15)
+#define PRLC_GCLK_COUNT_SHFT 8
+#define PRLC_GCLK_COUNT_MASK (UINT64_CAST 0x7f << 8)
+#define PRLC_MAX_COUNT_SHFT 1
+#define PRLC_MAX_COUNT_MASK (UINT64_CAST 0x7f << 1)
+#define PRLC_GCLK_EN_SHFT 0
+#define PRLC_GCLK_EN_MASK (UINT64_CAST 1)
+#define PRLC_GCLK_EN (UINT64_CAST 1)
+
+/* PI_RT_FILTER_CTRL mask and shift definitions */
+
+/*
+ * Bits for NACK_CNT_A/B and NACK_CMP
+ */
+#define PI_NACK_CNT_EN_SHFT 20
+#define PI_NACK_CNT_EN_MASK 0x100000
+#define PI_NACK_CNT_MASK 0x0fffff
+#define PI_NACK_CNT_MAX 0x0fffff
+
+#endif /* _ASM_SN_SN0_HUBPI_H */
diff --git a/arch/mips/include/asm/sn/sn0/kldir.h b/arch/mips/include/asm/sn/sn0/kldir.h
new file mode 100644
index 000000000..1b10af6cb
--- /dev/null
+++ b/arch/mips/include/asm/sn/sn0/kldir.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Derived from IRIX <sys/SN/kldir.h>, revision 1.21.
+ *
+ * Copyright (C) 1992 - 1997, 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 1999, 2000 by Ralf Baechle
+ */
+#ifndef _ASM_SN_SN0_KLDIR_H
+#define _ASM_SN_SN0_KLDIR_H
+
+
+/*
+ * The kldir memory area resides at a fixed place in each node's memory and
+ * provides pointers to most other IP27 memory areas. This allows us to
+ * resize and/or relocate memory areas at a later time without breaking all
+ * firmware and kernels that use them. Indices in the array are
+ * permanently dedicated to areas listed below. Some memory areas (marked
+ * below) reside at a permanently fixed location, but are included in the
+ * directory for completeness.
+ */
+
+/*
+ * The upper portion of the memory map applies during boot
+ * only and is overwritten by IRIX/SYMMON.
+ *
+ * MEMORY MAP PER NODE
+ *
+ * 0x2000000 (32M) +-----------------------------------------+
+ * | IO6 BUFFERS FOR FLASH ENET IOC3 |
+ * 0x1F80000 (31.5M) +-----------------------------------------+
+ * | IO6 TEXT/DATA/BSS/stack |
+ * 0x1C00000 (30M) +-----------------------------------------+
+ * | IO6 PROM DEBUG TEXT/DATA/BSS/stack |
+ * 0x0800000 (28M) +-----------------------------------------+
+ * | IP27 PROM TEXT/DATA/BSS/stack |
+ * 0x1B00000 (27M) +-----------------------------------------+
+ * | IP27 CFG |
+ * 0x1A00000 (26M) +-----------------------------------------+
+ * | Graphics PROM |
+ * 0x1800000 (24M) +-----------------------------------------+
+ * | 3rd Party PROM drivers |
+ * 0x1600000 (22M) +-----------------------------------------+
+ * | |
+ * | Free |
+ * | |
+ * +-----------------------------------------+
+ * | UNIX DEBUG Version |
+ * 0x190000 (2M--) +-----------------------------------------+
+ * | SYMMON |
+ * | (For UNIX Debug only) |
+ * 0x34000 (208K) +-----------------------------------------+
+ * | SYMMON STACK [NUM_CPU_PER_NODE] |
+ * | (For UNIX Debug only) |
+ * 0x25000 (148K) +-----------------------------------------+
+ * | KLCONFIG - II (temp) |
+ * | |
+ * | ---------------------------- |
+ * | |
+ * | UNIX NON-DEBUG Version |
+ * 0x19000 (100K) +-----------------------------------------+
+ *
+ *
+ * The lower portion of the memory map contains information that is
+ * permanent and is used by the IP27PROM, IO6PROM and IRIX.
+ *
+ * 0x19000 (100K) +-----------------------------------------+
+ * | |
+ * | PI Error Spools (32K) |
+ * | |
+ * 0x12000 (72K) +-----------------------------------------+
+ * | Unused |
+ * 0x11c00 (71K) +-----------------------------------------+
+ * | CPU 1 NMI Eframe area |
+ * 0x11a00 (70.5K) +-----------------------------------------+
+ * | CPU 0 NMI Eframe area |
+ * 0x11800 (70K) +-----------------------------------------+
+ * | CPU 1 NMI Register save area |
+ * 0x11600 (69.5K) +-----------------------------------------+
+ * | CPU 0 NMI Register save area |
+ * 0x11400 (69K) +-----------------------------------------+
+ * | GDA (1k) |
+ * 0x11000 (68K) +-----------------------------------------+
+ * | Early cache Exception stack |
+ * | and/or |
+ * | kernel/io6prom nmi registers |
+ * 0x10800 (66k) +-----------------------------------------+
+ * | cache error eframe |
+ * 0x10400 (65K) +-----------------------------------------+
+ * | Exception Handlers (UALIAS copy) |
+ * 0x10000 (64K) +-----------------------------------------+
+ * | |
+ * | |
+ * | KLCONFIG - I (permanent) (48K) |
+ * | |
+ * | |
+ * | |
+ * 0x4000 (16K) +-----------------------------------------+
+ * | NMI Handler (Protected Page) |
+ * 0x3000 (12K) +-----------------------------------------+
+ * | ARCS PVECTORS (master node only) |
+ * 0x2c00 (11K) +-----------------------------------------+
+ * | ARCS TVECTORS (master node only) |
+ * 0x2800 (10K) +-----------------------------------------+
+ * | LAUNCH [NUM_CPU] |
+ * 0x2400 (9K) +-----------------------------------------+
+ * | Low memory directory (KLDIR) |
+ * 0x2000 (8K) +-----------------------------------------+
+ * | ARCS SPB (1K) |
+ * 0x1000 (4K) +-----------------------------------------+
+ * | Early cache Exception stack |
+ * | and/or |
+ * | kernel/io6prom nmi registers |
+ * 0x800 (2k) +-----------------------------------------+
+ * | cache error eframe |
+ * 0x400 (1K) +-----------------------------------------+
+ * | Exception Handlers |
+ * 0x0 (0K) +-----------------------------------------+
+ */
+
+/*
+ * This is defined here because IP27_SYMMON_STK_SIZE must be at least what
+ * we define here. Since it's set up in the prom. We can't redefine it later
+ * and expect more space to be allocated. The way to find out the true size
+ * of the symmon stacks is to divide SYMMON_STK_SIZE by SYMMON_STK_STRIDE
+ * for a particular node.
+ */
+#define SYMMON_STACK_SIZE 0x8000
+
+#if defined(PROM)
+
+/*
+ * These defines are prom version dependent. No code other than the IP27
+ * prom should attempt to use these values.
+ */
+#define IP27_LAUNCH_OFFSET 0x2400
+#define IP27_LAUNCH_SIZE 0x400
+#define IP27_LAUNCH_COUNT 2
+#define IP27_LAUNCH_STRIDE 0x200
+
+#define IP27_KLCONFIG_OFFSET 0x4000
+#define IP27_KLCONFIG_SIZE 0xc000
+#define IP27_KLCONFIG_COUNT 1
+#define IP27_KLCONFIG_STRIDE 0
+
+#define IP27_NMI_OFFSET 0x3000
+#define IP27_NMI_SIZE 0x40
+#define IP27_NMI_COUNT 2
+#define IP27_NMI_STRIDE 0x40
+
+#define IP27_PI_ERROR_OFFSET 0x12000
+#define IP27_PI_ERROR_SIZE 0x4000
+#define IP27_PI_ERROR_COUNT 1
+#define IP27_PI_ERROR_STRIDE 0
+
+#define IP27_SYMMON_STK_OFFSET 0x25000
+#define IP27_SYMMON_STK_SIZE 0xe000
+#define IP27_SYMMON_STK_COUNT 2
+/* IP27_SYMMON_STK_STRIDE must be >= SYMMON_STACK_SIZE */
+#define IP27_SYMMON_STK_STRIDE 0x7000
+
+#define IP27_FREEMEM_OFFSET 0x19000
+#define IP27_FREEMEM_SIZE -1
+#define IP27_FREEMEM_COUNT 1
+#define IP27_FREEMEM_STRIDE 0
+
+#endif /* PROM */
+/*
+ * There will be only one of these in a partition so the IO6 must set it up.
+ */
+#define IO6_GDA_OFFSET 0x11000
+#define IO6_GDA_SIZE 0x400
+#define IO6_GDA_COUNT 1
+#define IO6_GDA_STRIDE 0
+
+/*
+ * save area of kernel nmi regs in the prom format
+ */
+#define IP27_NMI_KREGS_OFFSET 0x11400
+#define IP27_NMI_KREGS_CPU_SIZE 0x200
+/*
+ * save area of kernel nmi regs in eframe format
+ */
+#define IP27_NMI_EFRAME_OFFSET 0x11800
+#define IP27_NMI_EFRAME_SIZE 0x200
+
+#endif /* _ASM_SN_SN0_KLDIR_H */
diff --git a/arch/mips/include/asm/sn/types.h b/arch/mips/include/asm/sn/types.h
new file mode 100644
index 000000000..451ba1ee4
--- /dev/null
+++ b/arch/mips/include/asm/sn/types.h
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_SN_TYPES_H
+#define _ASM_SN_TYPES_H
+
+#include <linux/types.h>
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned long cpuid_t;
+typedef signed short nasid_t; /* node id in numa-as-id space */
+typedef signed char partid_t; /* partition ID type */
+typedef signed short moduleid_t; /* user-visible module number type */
+
+typedef dev_t vertex_hdl_t; /* hardware graph vertex handle */
+
+#endif
+
+#endif /* _ASM_SN_TYPES_H */
diff --git a/arch/mips/include/asm/sni.h b/arch/mips/include/asm/sni.h
new file mode 100644
index 000000000..7dfa297ce
--- /dev/null
+++ b/arch/mips/include/asm/sni.h
@@ -0,0 +1,246 @@
+/*
+ * SNI specific definitions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1997, 1998 by Ralf Baechle
+ * Copyright (C) 2006 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ */
+#ifndef __ASM_SNI_H
+#define __ASM_SNI_H
+
+#include <linux/irqreturn.h>
+
+extern unsigned int sni_brd_type;
+
+#define SNI_BRD_10 2
+#define SNI_BRD_10NEW 3
+#define SNI_BRD_TOWER_OASIC 4
+#define SNI_BRD_MINITOWER 5
+#define SNI_BRD_PCI_TOWER 6
+#define SNI_BRD_RM200 7
+#define SNI_BRD_PCI_MTOWER 8
+#define SNI_BRD_PCI_DESKTOP 9
+#define SNI_BRD_PCI_TOWER_CPLUS 10
+#define SNI_BRD_PCI_MTOWER_CPLUS 11
+
+/* RM400 cpu types */
+#define SNI_CPU_M8021 0x01
+#define SNI_CPU_M8030 0x04
+#define SNI_CPU_M8031 0x06
+#define SNI_CPU_M8034 0x0f
+#define SNI_CPU_M8037 0x07
+#define SNI_CPU_M8040 0x05
+#define SNI_CPU_M8043 0x09
+#define SNI_CPU_M8050 0x0b
+#define SNI_CPU_M8053 0x0d
+
+#define SNI_PORT_BASE CKSEG1ADDR(0xb4000000)
+
+#ifndef __MIPSEL__
+/*
+ * ASIC PCI registers for big endian configuration.
+ */
+#define PCIMT_UCONF CKSEG1ADDR(0xbfff0004)
+#define PCIMT_IOADTIMEOUT2 CKSEG1ADDR(0xbfff000c)
+#define PCIMT_IOMEMCONF CKSEG1ADDR(0xbfff0014)
+#define PCIMT_IOMMU CKSEG1ADDR(0xbfff001c)
+#define PCIMT_IOADTIMEOUT1 CKSEG1ADDR(0xbfff0024)
+#define PCIMT_DMAACCESS CKSEG1ADDR(0xbfff002c)
+#define PCIMT_DMAHIT CKSEG1ADDR(0xbfff0034)
+#define PCIMT_ERRSTATUS CKSEG1ADDR(0xbfff003c)
+#define PCIMT_ERRADDR CKSEG1ADDR(0xbfff0044)
+#define PCIMT_SYNDROME CKSEG1ADDR(0xbfff004c)
+#define PCIMT_ITPEND CKSEG1ADDR(0xbfff0054)
+#define IT_INT2 0x01
+#define IT_INTD 0x02
+#define IT_INTC 0x04
+#define IT_INTB 0x08
+#define IT_INTA 0x10
+#define IT_EISA 0x20
+#define IT_SCSI 0x40
+#define IT_ETH 0x80
+#define PCIMT_IRQSEL CKSEG1ADDR(0xbfff005c)
+#define PCIMT_TESTMEM CKSEG1ADDR(0xbfff0064)
+#define PCIMT_ECCREG CKSEG1ADDR(0xbfff006c)
+#define PCIMT_CONFIG_ADDRESS CKSEG1ADDR(0xbfff0074)
+#define PCIMT_ASIC_ID CKSEG1ADDR(0xbfff007c) /* read */
+#define PCIMT_SOFT_RESET CKSEG1ADDR(0xbfff007c) /* write */
+#define PCIMT_PIA_OE CKSEG1ADDR(0xbfff0084)
+#define PCIMT_PIA_DATAOUT CKSEG1ADDR(0xbfff008c)
+#define PCIMT_PIA_DATAIN CKSEG1ADDR(0xbfff0094)
+#define PCIMT_CACHECONF CKSEG1ADDR(0xbfff009c)
+#define PCIMT_INVSPACE CKSEG1ADDR(0xbfff00a4)
+#else
+/*
+ * ASIC PCI registers for little endian configuration.
+ */
+#define PCIMT_UCONF CKSEG1ADDR(0xbfff0000)
+#define PCIMT_IOADTIMEOUT2 CKSEG1ADDR(0xbfff0008)
+#define PCIMT_IOMEMCONF CKSEG1ADDR(0xbfff0010)
+#define PCIMT_IOMMU CKSEG1ADDR(0xbfff0018)
+#define PCIMT_IOADTIMEOUT1 CKSEG1ADDR(0xbfff0020)
+#define PCIMT_DMAACCESS CKSEG1ADDR(0xbfff0028)
+#define PCIMT_DMAHIT CKSEG1ADDR(0xbfff0030)
+#define PCIMT_ERRSTATUS CKSEG1ADDR(0xbfff0038)
+#define PCIMT_ERRADDR CKSEG1ADDR(0xbfff0040)
+#define PCIMT_SYNDROME CKSEG1ADDR(0xbfff0048)
+#define PCIMT_ITPEND CKSEG1ADDR(0xbfff0050)
+#define IT_INT2 0x01
+#define IT_INTD 0x02
+#define IT_INTC 0x04
+#define IT_INTB 0x08
+#define IT_INTA 0x10
+#define IT_EISA 0x20
+#define IT_SCSI 0x40
+#define IT_ETH 0x80
+#define PCIMT_IRQSEL CKSEG1ADDR(0xbfff0058)
+#define PCIMT_TESTMEM CKSEG1ADDR(0xbfff0060)
+#define PCIMT_ECCREG CKSEG1ADDR(0xbfff0068)
+#define PCIMT_CONFIG_ADDRESS CKSEG1ADDR(0xbfff0070)
+#define PCIMT_ASIC_ID CKSEG1ADDR(0xbfff0078) /* read */
+#define PCIMT_SOFT_RESET CKSEG1ADDR(0xbfff0078) /* write */
+#define PCIMT_PIA_OE CKSEG1ADDR(0xbfff0080)
+#define PCIMT_PIA_DATAOUT CKSEG1ADDR(0xbfff0088)
+#define PCIMT_PIA_DATAIN CKSEG1ADDR(0xbfff0090)
+#define PCIMT_CACHECONF CKSEG1ADDR(0xbfff0098)
+#define PCIMT_INVSPACE CKSEG1ADDR(0xbfff00a0)
+#endif
+
+#define PCIMT_PCI_CONF CKSEG1ADDR(0xbfff0100)
+
+/*
+ * Data port for the PCI bus in IO space
+ */
+#define PCIMT_CONFIG_DATA 0x0cfc
+
+/*
+ * Board specific registers
+ */
+#define PCIMT_CSMSR CKSEG1ADDR(0xbfd00000)
+#define PCIMT_CSSWITCH CKSEG1ADDR(0xbfd10000)
+#define PCIMT_CSITPEND CKSEG1ADDR(0xbfd20000)
+#define PCIMT_AUTO_PO_EN CKSEG1ADDR(0xbfd30000)
+#define PCIMT_CLR_TEMP CKSEG1ADDR(0xbfd40000)
+#define PCIMT_AUTO_PO_DIS CKSEG1ADDR(0xbfd50000)
+#define PCIMT_EXMSR CKSEG1ADDR(0xbfd60000)
+#define PCIMT_UNUSED1 CKSEG1ADDR(0xbfd70000)
+#define PCIMT_CSWCSM CKSEG1ADDR(0xbfd80000)
+#define PCIMT_UNUSED2 CKSEG1ADDR(0xbfd90000)
+#define PCIMT_CSLED CKSEG1ADDR(0xbfda0000)
+#define PCIMT_CSMAPISA CKSEG1ADDR(0xbfdb0000)
+#define PCIMT_CSRSTBP CKSEG1ADDR(0xbfdc0000)
+#define PCIMT_CLRPOFF CKSEG1ADDR(0xbfdd0000)
+#define PCIMT_CSTIMER CKSEG1ADDR(0xbfde0000)
+#define PCIMT_PWDN CKSEG1ADDR(0xbfdf0000)
+
+/*
+ * A20R based boards
+ */
+#define A20R_PT_CLOCK_BASE CKSEG1ADDR(0xbc040000)
+#define A20R_PT_TIM0_ACK CKSEG1ADDR(0xbc050000)
+#define A20R_PT_TIM1_ACK CKSEG1ADDR(0xbc060000)
+
+#define SNI_A20R_IRQ_BASE MIPS_CPU_IRQ_BASE
+#define SNI_A20R_IRQ_TIMER (SNI_A20R_IRQ_BASE+5)
+
+#define SNI_PCIT_INT_REG CKSEG1ADDR(0xbfff000c)
+
+#define SNI_PCIT_INT_START 24
+#define SNI_PCIT_INT_END 30
+
+#define PCIT_IRQ_ETHERNET (MIPS_CPU_IRQ_BASE + 5)
+#define PCIT_IRQ_INTA (SNI_PCIT_INT_START + 0)
+#define PCIT_IRQ_INTB (SNI_PCIT_INT_START + 1)
+#define PCIT_IRQ_INTC (SNI_PCIT_INT_START + 2)
+#define PCIT_IRQ_INTD (SNI_PCIT_INT_START + 3)
+#define PCIT_IRQ_SCSI0 (SNI_PCIT_INT_START + 4)
+#define PCIT_IRQ_SCSI1 (SNI_PCIT_INT_START + 5)
+
+
+/*
+ * Interrupt 0-16 are EISA interrupts. Interrupts from 16 on are assigned
+ * to the other interrupts generated by ASIC PCI.
+ *
+ * INT2 is a wired-or of the push button interrupt, high temperature interrupt
+ * ASIC PCI interrupt.
+ */
+#define PCIMT_KEYBOARD_IRQ 1
+#define PCIMT_IRQ_INT2 24
+#define PCIMT_IRQ_INTD 25
+#define PCIMT_IRQ_INTC 26
+#define PCIMT_IRQ_INTB 27
+#define PCIMT_IRQ_INTA 28
+#define PCIMT_IRQ_EISA 29
+#define PCIMT_IRQ_SCSI 30
+
+#define PCIMT_IRQ_ETHERNET (MIPS_CPU_IRQ_BASE+6)
+
+#if 0
+#define PCIMT_IRQ_TEMPERATURE 24
+#define PCIMT_IRQ_EISA_NMI 25
+#define PCIMT_IRQ_POWER_OFF 26
+#define PCIMT_IRQ_BUTTON 27
+#endif
+
+/*
+ * Base address for the mapped 16mb EISA bus segment.
+ */
+#define PCIMT_EISA_BASE CKSEG1ADDR(0xb0000000)
+
+/* PCI EISA Interrupt acknowledge */
+#define PCIMT_INT_ACKNOWLEDGE CKSEG1ADDR(0xba000000)
+
+/*
+ * SNI ID PROM
+ *
+ * SNI_IDPROM_MEMSIZE Memsize in 16MB quantities
+ * SNI_IDPROM_BRDTYPE Board Type
+ * SNI_IDPROM_CPUTYPE CPU Type on RM400
+ */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define __SNI_END 0
+#endif
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define __SNI_END 3
+#endif
+#define SNI_IDPROM_BASE CKSEG1ADDR(0x1ff00000)
+#define SNI_IDPROM_MEMSIZE (SNI_IDPROM_BASE + (0x28 ^ __SNI_END))
+#define SNI_IDPROM_BRDTYPE (SNI_IDPROM_BASE + (0x29 ^ __SNI_END))
+#define SNI_IDPROM_CPUTYPE (SNI_IDPROM_BASE + (0x30 ^ __SNI_END))
+
+#define SNI_IDPROM_SIZE 0x1000
+
+/* board specific init functions */
+extern void sni_a20r_init(void);
+extern void sni_pcit_init(void);
+extern void sni_rm200_init(void);
+extern void sni_pcimt_init(void);
+
+/* board specific irq init functions */
+extern void sni_a20r_irq_init(void);
+extern void sni_pcit_irq_init(void);
+extern void sni_pcit_cplus_irq_init(void);
+extern void sni_rm200_irq_init(void);
+extern void sni_pcimt_irq_init(void);
+
+/* timer inits */
+extern void sni_cpu_time_init(void);
+
+/* eisa init for RM200/400 */
+#ifdef CONFIG_EISA
+extern int sni_eisa_root_init(void);
+#else
+static inline int sni_eisa_root_init(void)
+{
+ return 0;
+}
+#endif
+
+/* common irq stuff */
+extern void (*sni_hwint)(void);
+extern irqreturn_t sni_isa_irq_handler(int dummy, void *p);
+
+#endif /* __ASM_SNI_H */
diff --git a/arch/mips/include/asm/socket.h b/arch/mips/include/asm/socket.h
new file mode 100644
index 000000000..4724a563c
--- /dev/null
+++ b/arch/mips/include/asm/socket.h
@@ -0,0 +1,50 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1997, 1999, 2000, 2001 Ralf Baechle
+ * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_SOCKET_H
+#define _ASM_SOCKET_H
+
+#include <uapi/asm/socket.h>
+
+
+/** sock_type - Socket types
+ *
+ * Please notice that for binary compat reasons MIPS has to
+ * override the enum sock_type in include/linux/net.h, so
+ * we define ARCH_HAS_SOCKET_TYPES here.
+ *
+ * @SOCK_DGRAM - datagram (conn.less) socket
+ * @SOCK_STREAM - stream (connection) socket
+ * @SOCK_RAW - raw socket
+ * @SOCK_RDM - reliably-delivered message
+ * @SOCK_SEQPACKET - sequential packet socket
+ * @SOCK_PACKET - linux specific way of getting packets at the dev level.
+ * For writing rarp and other similar things on the user level.
+ */
+enum sock_type {
+ SOCK_DGRAM = 1,
+ SOCK_STREAM = 2,
+ SOCK_RAW = 3,
+ SOCK_RDM = 4,
+ SOCK_SEQPACKET = 5,
+ SOCK_DCCP = 6,
+ SOCK_PACKET = 10,
+};
+
+#define SOCK_MAX (SOCK_PACKET + 1)
+/* Mask which covers at least up to SOCK_MASK-1. The
+ * * remaining bits are used as flags. */
+#define SOCK_TYPE_MASK 0xf
+
+/* Flags for socket, socketpair, paccept */
+#define SOCK_CLOEXEC O_CLOEXEC
+#define SOCK_NONBLOCK O_NONBLOCK
+
+#define ARCH_HAS_SOCKET_TYPES 1
+
+#endif /* _ASM_SOCKET_H */
diff --git a/arch/mips/include/asm/sparsemem.h b/arch/mips/include/asm/sparsemem.h
new file mode 100644
index 000000000..b0686ca3d
--- /dev/null
+++ b/arch/mips/include/asm/sparsemem.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MIPS_SPARSEMEM_H
+#define _MIPS_SPARSEMEM_H
+#ifdef CONFIG_SPARSEMEM
+
+/*
+ * SECTION_SIZE_BITS 2^N: how big each section will be
+ * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
+ */
+#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && defined(CONFIG_PAGE_SIZE_64KB)
+# define SECTION_SIZE_BITS 29
+#else
+# define SECTION_SIZE_BITS 28
+#endif
+#define MAX_PHYSMEM_BITS 48
+
+#endif /* CONFIG_SPARSEMEM */
+#endif /* _MIPS_SPARSEMEM_H */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
new file mode 100644
index 000000000..8a88eb265
--- /dev/null
+++ b/arch/mips/include/asm/spinlock.h
@@ -0,0 +1,31 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_SPINLOCK_H
+#define _ASM_SPINLOCK_H
+
+#include <asm/processor.h>
+#include <asm/qrwlock.h>
+
+#include <asm-generic/qspinlock_types.h>
+
+#define queued_spin_unlock queued_spin_unlock
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ */
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ /* This could be optimised with ARCH_HAS_MMIOWB */
+ mmiowb();
+ smp_store_release(&lock->locked, 0);
+}
+
+#include <asm/qspinlock.h>
+
+#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h
new file mode 100644
index 000000000..28fd4b140
--- /dev/null
+++ b/arch/mips/include/asm/spinlock_types.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_SPINLOCK_TYPES_H
+#define _ASM_SPINLOCK_TYPES_H
+
+#include <asm-generic/qspinlock_types.h>
+#include <asm-generic/qrwlock_types.h>
+
+#endif
diff --git a/arch/mips/include/asm/spram.h b/arch/mips/include/asm/spram.h
new file mode 100644
index 000000000..63cb90fd4
--- /dev/null
+++ b/arch/mips/include/asm/spram.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MIPS_SPRAM_H
+#define _MIPS_SPRAM_H
+
+#if defined(CONFIG_MIPS_SPRAM)
+extern __init void spram_config(void);
+#else
+static inline void spram_config(void) { };
+#endif /* CONFIG_MIPS_SPRAM */
+
+#endif /* _MIPS_SPRAM_H */
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
new file mode 100644
index 000000000..aa430a6c6
--- /dev/null
+++ b/arch/mips/include/asm/stackframe.h
@@ -0,0 +1,492 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
+ * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2007 Maciej W. Rozycki
+ */
+#ifndef _ASM_STACKFRAME_H
+#define _ASM_STACKFRAME_H
+
+#include <linux/threads.h>
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/mipsregs.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+
+/* Make the addition of cfi info a little easier. */
+ .macro cfi_rel_offset reg offset=0 docfi=0
+ .if \docfi
+ .cfi_rel_offset \reg, \offset
+ .endif
+ .endm
+
+ .macro cfi_st reg offset=0 docfi=0
+ LONG_S \reg, \offset(sp)
+ cfi_rel_offset \reg, \offset, \docfi
+ .endm
+
+ .macro cfi_restore reg offset=0 docfi=0
+ .if \docfi
+ .cfi_restore \reg
+ .endif
+ .endm
+
+ .macro cfi_ld reg offset=0 docfi=0
+ LONG_L \reg, \offset(sp)
+ cfi_restore \reg \offset \docfi
+ .endm
+
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+#define STATMASK 0x3f
+#else
+#define STATMASK 0x1f
+#endif
+
+ .macro SAVE_AT docfi=0
+ .set push
+ .set noat
+ cfi_st $1, PT_R1, \docfi
+ .set pop
+ .endm
+
+ .macro SAVE_TEMP docfi=0
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ mflhxu v1
+ LONG_S v1, PT_LO(sp)
+ mflhxu v1
+ LONG_S v1, PT_HI(sp)
+ mflhxu v1
+ LONG_S v1, PT_ACX(sp)
+#elif !defined(CONFIG_CPU_MIPSR6)
+ mfhi v1
+#endif
+#ifdef CONFIG_32BIT
+ cfi_st $8, PT_R8, \docfi
+ cfi_st $9, PT_R9, \docfi
+#endif
+ cfi_st $10, PT_R10, \docfi
+ cfi_st $11, PT_R11, \docfi
+ cfi_st $12, PT_R12, \docfi
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
+ LONG_S v1, PT_HI(sp)
+ mflo v1
+#endif
+ cfi_st $13, PT_R13, \docfi
+ cfi_st $14, PT_R14, \docfi
+ cfi_st $15, PT_R15, \docfi
+ cfi_st $24, PT_R24, \docfi
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
+ LONG_S v1, PT_LO(sp)
+#endif
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ /*
+ * The Octeon multiplier state is affected by general
+ * multiply instructions. It must be saved before and
+ * kernel code might corrupt it
+ */
+ jal octeon_mult_save
+#endif
+ .endm
+
+ .macro SAVE_STATIC docfi=0
+ cfi_st $16, PT_R16, \docfi
+ cfi_st $17, PT_R17, \docfi
+ cfi_st $18, PT_R18, \docfi
+ cfi_st $19, PT_R19, \docfi
+ cfi_st $20, PT_R20, \docfi
+ cfi_st $21, PT_R21, \docfi
+ cfi_st $22, PT_R22, \docfi
+ cfi_st $23, PT_R23, \docfi
+ cfi_st $30, PT_R30, \docfi
+ .endm
+
+/*
+ * get_saved_sp returns the SP for the current CPU by looking in the
+ * kernelsp array for it. If tosp is set, it stores the current sp in
+ * k0 and loads the new value in sp. If not, it clobbers k0 and
+ * stores the new value in k1, leaving sp unaffected.
+ */
+#ifdef CONFIG_SMP
+
+ /* SMP variation */
+ .macro get_saved_sp docfi=0 tosp=0
+ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
+ lui k1, %hi(kernelsp)
+#else
+ lui k1, %highest(kernelsp)
+ daddiu k1, %higher(kernelsp)
+ dsll k1, 16
+ daddiu k1, %hi(kernelsp)
+ dsll k1, 16
+#endif
+ LONG_SRL k0, SMP_CPUID_PTRSHIFT
+ LONG_ADDU k1, k0
+ .if \tosp
+ move k0, sp
+ .if \docfi
+ .cfi_register sp, k0
+ .endif
+ LONG_L sp, %lo(kernelsp)(k1)
+ .else
+ LONG_L k1, %lo(kernelsp)(k1)
+ .endif
+ .endm
+
+ .macro set_saved_sp stackp temp temp2
+ ASM_CPUID_MFC0 \temp, ASM_SMP_CPUID_REG
+ LONG_SRL \temp, SMP_CPUID_PTRSHIFT
+ LONG_S \stackp, kernelsp(\temp)
+ .endm
+#else /* !CONFIG_SMP */
+ /* Uniprocessor variation */
+ .macro get_saved_sp docfi=0 tosp=0
+#ifdef CONFIG_CPU_JUMP_WORKAROUNDS
+ /*
+ * Clear BTB (branch target buffer), forbid RAS (return address
+ * stack) to workaround the Out-of-order Issue in Loongson2F
+ * via its diagnostic register.
+ */
+ move k0, ra
+ jal 1f
+ nop
+1: jal 1f
+ nop
+1: jal 1f
+ nop
+1: jal 1f
+ nop
+1: move ra, k0
+ li k0, 3
+ mtc0 k0, $22
+#endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
+ lui k1, %hi(kernelsp)
+#else
+ lui k1, %highest(kernelsp)
+ daddiu k1, %higher(kernelsp)
+ dsll k1, k1, 16
+ daddiu k1, %hi(kernelsp)
+ dsll k1, k1, 16
+#endif
+ .if \tosp
+ move k0, sp
+ .if \docfi
+ .cfi_register sp, k0
+ .endif
+ LONG_L sp, %lo(kernelsp)(k1)
+ .else
+ LONG_L k1, %lo(kernelsp)(k1)
+ .endif
+ .endm
+
+ .macro set_saved_sp stackp temp temp2
+ LONG_S \stackp, kernelsp
+ .endm
+#endif
+
+ .macro SAVE_SOME docfi=0
+ .set push
+ .set noat
+ .set reorder
+ mfc0 k0, CP0_STATUS
+ sll k0, 3 /* extract cu0 bit */
+ .set noreorder
+ bltz k0, 8f
+ move k0, sp
+ .if \docfi
+ .cfi_register sp, k0
+ .endif
+#ifdef CONFIG_EVA
+ /*
+ * Flush interAptiv's Return Prediction Stack (RPS) by writing
+ * EntryHi. Toggling Config7.RPS is slower and less portable.
+ *
+ * The RPS isn't automatically flushed when exceptions are
+ * taken, which can result in kernel mode speculative accesses
+ * to user addresses if the RPS mispredicts. That's harmless
+ * when user and kernel share the same address space, but with
+ * EVA the same user segments may be unmapped to kernel mode,
+ * even containing sensitive MMIO regions or invalid memory.
+ *
+ * This can happen when the kernel sets the return address to
+ * ret_from_* and jr's to the exception handler, which looks
+ * more like a tail call than a function call. If nested calls
+ * don't evict the last user address in the RPS, it will
+ * mispredict the return and fetch from a user controlled
+ * address into the icache.
+ *
+ * More recent EVA-capable cores with MAAR to restrict
+ * speculative accesses aren't affected.
+ */
+ MFC0 k0, CP0_ENTRYHI
+ MTC0 k0, CP0_ENTRYHI
+#endif
+ .set reorder
+ /* Called from user mode, new stack. */
+ get_saved_sp docfi=\docfi tosp=1
+8:
+#ifdef CONFIG_CPU_DADDI_WORKAROUNDS
+ .set at=k1
+#endif
+ PTR_SUBU sp, PT_SIZE
+#ifdef CONFIG_CPU_DADDI_WORKAROUNDS
+ .set noat
+#endif
+ .if \docfi
+ .cfi_def_cfa sp,0
+ .endif
+ cfi_st k0, PT_R29, \docfi
+ cfi_rel_offset sp, PT_R29, \docfi
+ cfi_st v1, PT_R3, \docfi
+ /*
+ * You might think that you don't need to save $0,
+ * but the FPU emulator and gdb remote debug stub
+ * need it to operate correctly
+ */
+ LONG_S $0, PT_R0(sp)
+ mfc0 v1, CP0_STATUS
+ cfi_st v0, PT_R2, \docfi
+ LONG_S v1, PT_STATUS(sp)
+ cfi_st $4, PT_R4, \docfi
+ mfc0 v1, CP0_CAUSE
+ cfi_st $5, PT_R5, \docfi
+ LONG_S v1, PT_CAUSE(sp)
+ cfi_st $6, PT_R6, \docfi
+ cfi_st ra, PT_R31, \docfi
+ MFC0 ra, CP0_EPC
+ cfi_st $7, PT_R7, \docfi
+#ifdef CONFIG_64BIT
+ cfi_st $8, PT_R8, \docfi
+ cfi_st $9, PT_R9, \docfi
+#endif
+ LONG_S ra, PT_EPC(sp)
+ .if \docfi
+ .cfi_rel_offset ra, PT_EPC
+ .endif
+ cfi_st $25, PT_R25, \docfi
+ cfi_st $28, PT_R28, \docfi
+
+ /* Set thread_info if we're coming from user mode */
+ mfc0 k0, CP0_STATUS
+ sll k0, 3 /* extract cu0 bit */
+ bltz k0, 9f
+
+ ori $28, sp, _THREAD_MASK
+ xori $28, _THREAD_MASK
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ .set mips64
+ pref 0, 0($28) /* Prefetch the current pointer */
+#endif
+9:
+ .set pop
+ .endm
+
+ .macro SAVE_ALL docfi=0
+ SAVE_SOME \docfi
+ SAVE_AT \docfi
+ SAVE_TEMP \docfi
+ SAVE_STATIC \docfi
+ .endm
+
+ .macro RESTORE_AT docfi=0
+ .set push
+ .set noat
+ cfi_ld $1, PT_R1, \docfi
+ .set pop
+ .endm
+
+ .macro RESTORE_TEMP docfi=0
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ /* Restore the Octeon multiplier state */
+ jal octeon_mult_restore
+#endif
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ LONG_L $24, PT_ACX(sp)
+ mtlhx $24
+ LONG_L $24, PT_HI(sp)
+ mtlhx $24
+ LONG_L $24, PT_LO(sp)
+ mtlhx $24
+#elif !defined(CONFIG_CPU_MIPSR6)
+ LONG_L $24, PT_LO(sp)
+ mtlo $24
+ LONG_L $24, PT_HI(sp)
+ mthi $24
+#endif
+#ifdef CONFIG_32BIT
+ cfi_ld $8, PT_R8, \docfi
+ cfi_ld $9, PT_R9, \docfi
+#endif
+ cfi_ld $10, PT_R10, \docfi
+ cfi_ld $11, PT_R11, \docfi
+ cfi_ld $12, PT_R12, \docfi
+ cfi_ld $13, PT_R13, \docfi
+ cfi_ld $14, PT_R14, \docfi
+ cfi_ld $15, PT_R15, \docfi
+ cfi_ld $24, PT_R24, \docfi
+ .endm
+
+ .macro RESTORE_STATIC docfi=0
+ cfi_ld $16, PT_R16, \docfi
+ cfi_ld $17, PT_R17, \docfi
+ cfi_ld $18, PT_R18, \docfi
+ cfi_ld $19, PT_R19, \docfi
+ cfi_ld $20, PT_R20, \docfi
+ cfi_ld $21, PT_R21, \docfi
+ cfi_ld $22, PT_R22, \docfi
+ cfi_ld $23, PT_R23, \docfi
+ cfi_ld $30, PT_R30, \docfi
+ .endm
+
+ .macro RESTORE_SP docfi=0
+ cfi_ld sp, PT_R29, \docfi
+ .endm
+
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+
+ .macro RESTORE_SOME docfi=0
+ .set push
+ .set reorder
+ .set noat
+ mfc0 a0, CP0_STATUS
+ li v1, ST0_CU1 | ST0_IM
+ ori a0, STATMASK
+ xori a0, STATMASK
+ mtc0 a0, CP0_STATUS
+ and a0, v1
+ LONG_L v0, PT_STATUS(sp)
+ nor v1, $0, v1
+ and v0, v1
+ or v0, a0
+ mtc0 v0, CP0_STATUS
+ cfi_ld $31, PT_R31, \docfi
+ cfi_ld $28, PT_R28, \docfi
+ cfi_ld $25, PT_R25, \docfi
+ cfi_ld $7, PT_R7, \docfi
+ cfi_ld $6, PT_R6, \docfi
+ cfi_ld $5, PT_R5, \docfi
+ cfi_ld $4, PT_R4, \docfi
+ cfi_ld $3, PT_R3, \docfi
+ cfi_ld $2, PT_R2, \docfi
+ .set pop
+ .endm
+
+ .macro RESTORE_SP_AND_RET docfi=0
+ .set push
+ .set noreorder
+ LONG_L k0, PT_EPC(sp)
+ RESTORE_SP \docfi
+ jr k0
+ rfe
+ .set pop
+ .endm
+
+#else
+ .macro RESTORE_SOME docfi=0
+ .set push
+ .set reorder
+ .set noat
+ mfc0 a0, CP0_STATUS
+ ori a0, STATMASK
+ xori a0, STATMASK
+ mtc0 a0, CP0_STATUS
+ li v1, ST0_CU1 | ST0_FR | ST0_IM
+ and a0, v1
+ LONG_L v0, PT_STATUS(sp)
+ nor v1, $0, v1
+ and v0, v1
+ or v0, a0
+ mtc0 v0, CP0_STATUS
+ LONG_L v1, PT_EPC(sp)
+ MTC0 v1, CP0_EPC
+ cfi_ld $31, PT_R31, \docfi
+ cfi_ld $28, PT_R28, \docfi
+ cfi_ld $25, PT_R25, \docfi
+#ifdef CONFIG_64BIT
+ cfi_ld $8, PT_R8, \docfi
+ cfi_ld $9, PT_R9, \docfi
+#endif
+ cfi_ld $7, PT_R7, \docfi
+ cfi_ld $6, PT_R6, \docfi
+ cfi_ld $5, PT_R5, \docfi
+ cfi_ld $4, PT_R4, \docfi
+ cfi_ld $3, PT_R3, \docfi
+ cfi_ld $2, PT_R2, \docfi
+ .set pop
+ .endm
+
+ .macro RESTORE_SP_AND_RET docfi=0
+ RESTORE_SP \docfi
+#if defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6)
+ eretnc
+#else
+ .set push
+ .set arch=r4000
+ eret
+ .set pop
+#endif
+ .endm
+
+#endif
+
+ .macro RESTORE_ALL docfi=0
+ RESTORE_TEMP \docfi
+ RESTORE_STATIC \docfi
+ RESTORE_AT \docfi
+ RESTORE_SOME \docfi
+ RESTORE_SP \docfi
+ .endm
+
+/*
+ * Move to kernel mode and disable interrupts.
+ * Set cp0 enable bit as sign that we're running on the kernel stack
+ */
+ .macro CLI
+ mfc0 t0, CP0_STATUS
+ li t1, ST0_KERNEL_CUMASK | STATMASK
+ or t0, t1
+ xori t0, STATMASK
+ mtc0 t0, CP0_STATUS
+ irq_disable_hazard
+ .endm
+
+/*
+ * Move to kernel mode and enable interrupts.
+ * Set cp0 enable bit as sign that we're running on the kernel stack
+ */
+ .macro STI
+ mfc0 t0, CP0_STATUS
+ li t1, ST0_KERNEL_CUMASK | STATMASK
+ or t0, t1
+ xori t0, STATMASK & ~1
+ mtc0 t0, CP0_STATUS
+ irq_enable_hazard
+ .endm
+
+/*
+ * Just move to kernel mode and leave interrupts as they are. Note
+ * for the R3000 this means copying the previous enable from IEp.
+ * Set cp0 enable bit as sign that we're running on the kernel stack
+ */
+ .macro KMODE
+ mfc0 t0, CP0_STATUS
+ li t1, ST0_KERNEL_CUMASK | (STATMASK & ~1)
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ andi t2, t0, ST0_IEP
+ srl t2, 2
+ or t0, t2
+#endif
+ or t0, t1
+ xori t0, STATMASK & ~1
+ mtc0 t0, CP0_STATUS
+ irq_disable_hazard
+ .endm
+
+#endif /* _ASM_STACKFRAME_H */
diff --git a/arch/mips/include/asm/stackprotector.h b/arch/mips/include/asm/stackprotector.h
new file mode 100644
index 000000000..68d4be9e1
--- /dev/null
+++ b/arch/mips/include/asm/stackprotector.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCC stack protector support.
+ *
+ * (This is directly adopted from the ARM implementation)
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function. The pattern is called stack canary
+ * and gcc expects it to be defined by a global variable called
+ * "__stack_chk_guard" on MIPS. This unfortunately means that on SMP
+ * we cannot have a different canary value per task.
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H 1
+
+#include <linux/random.h>
+#include <linux/version.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary;
+
+ /* Try to get a semi random initial value. */
+ get_random_bytes(&canary, sizeof(canary));
+ canary ^= LINUX_VERSION_CODE;
+
+ current->stack_canary = canary;
+ __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/mips/include/asm/stacktrace.h b/arch/mips/include/asm/stacktrace.h
new file mode 100644
index 000000000..8ad25c25b
--- /dev/null
+++ b/arch/mips/include/asm/stacktrace.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_STACKTRACE_H
+#define _ASM_STACKTRACE_H
+
+#include <asm/ptrace.h>
+#include <asm/asm.h>
+#include <linux/stringify.h>
+
+#ifdef CONFIG_KALLSYMS
+extern int raw_show_trace;
+extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
+ unsigned long pc, unsigned long *ra);
+extern unsigned long unwind_stack_by_address(unsigned long stack_page,
+ unsigned long *sp,
+ unsigned long pc,
+ unsigned long *ra);
+#else
+#define raw_show_trace 1
+static inline unsigned long unwind_stack(struct task_struct *task,
+ unsigned long *sp, unsigned long pc, unsigned long *ra)
+{
+ return 0;
+}
+#endif
+
+#define STR_PTR_LA __stringify(PTR_LA)
+#define STR_LONG_S __stringify(LONG_S)
+#define STR_LONG_L __stringify(LONG_L)
+#define STR_LONGSIZE __stringify(LONGSIZE)
+
+#define STORE_ONE_REG(r) \
+ STR_LONG_S " $" __stringify(r)",("STR_LONGSIZE"*"__stringify(r)")(%1)\n\t"
+
+static __always_inline void prepare_frametrace(struct pt_regs *regs)
+{
+#ifndef CONFIG_KALLSYMS
+ /*
+ * Remove any garbage that may be in regs (specially func
+ * addresses) to avoid show_raw_backtrace() to report them
+ */
+ memset(regs, 0, sizeof(*regs));
+#endif
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noat\n\t"
+ /* Store $1 so we can use it */
+ STR_LONG_S " $1,"STR_LONGSIZE"(%1)\n\t"
+ /* Store the PC */
+ "1: " STR_PTR_LA " $1, 1b\n\t"
+ STR_LONG_S " $1,%0\n\t"
+ STORE_ONE_REG(2)
+ STORE_ONE_REG(3)
+ STORE_ONE_REG(4)
+ STORE_ONE_REG(5)
+ STORE_ONE_REG(6)
+ STORE_ONE_REG(7)
+ STORE_ONE_REG(8)
+ STORE_ONE_REG(9)
+ STORE_ONE_REG(10)
+ STORE_ONE_REG(11)
+ STORE_ONE_REG(12)
+ STORE_ONE_REG(13)
+ STORE_ONE_REG(14)
+ STORE_ONE_REG(15)
+ STORE_ONE_REG(16)
+ STORE_ONE_REG(17)
+ STORE_ONE_REG(18)
+ STORE_ONE_REG(19)
+ STORE_ONE_REG(20)
+ STORE_ONE_REG(21)
+ STORE_ONE_REG(22)
+ STORE_ONE_REG(23)
+ STORE_ONE_REG(24)
+ STORE_ONE_REG(25)
+ STORE_ONE_REG(26)
+ STORE_ONE_REG(27)
+ STORE_ONE_REG(28)
+ STORE_ONE_REG(29)
+ STORE_ONE_REG(30)
+ STORE_ONE_REG(31)
+ /* Restore $1 */
+ STR_LONG_L " $1,"STR_LONGSIZE"(%1)\n\t"
+ ".set pop\n\t"
+ : "=m" (regs->cp0_epc)
+ : "r" (regs->regs)
+ : "memory");
+}
+
+#endif /* _ASM_STACKTRACE_H */
diff --git a/arch/mips/include/asm/string.h b/arch/mips/include/asm/string.h
new file mode 100644
index 000000000..1de3bbce8
--- /dev/null
+++ b/arch/mips/include/asm/string.h
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1994, 95, 96, 97, 98, 2000, 01 Ralf Baechle
+ * Copyright (c) 2000 by Silicon Graphics, Inc.
+ * Copyright (c) 2001 MIPS Technologies, Inc.
+ */
+#ifndef _ASM_STRING_H
+#define _ASM_STRING_H
+
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *__s, int __c, size_t __count);
+
+#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+
+#define __HAVE_ARCH_MEMMOVE
+extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+
+#endif /* _ASM_STRING_H */
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
new file mode 100644
index 000000000..a4374b4cb
--- /dev/null
+++ b/arch/mips/include/asm/switch_to.h
@@ -0,0 +1,142 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
+ * Copyright (C) 1996 by Paul M. Antoine
+ * Copyright (C) 1999 Silicon Graphics
+ * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#ifndef _ASM_SWITCH_TO_H
+#define _ASM_SWITCH_TO_H
+
+#include <asm/cpu-features.h>
+#include <asm/watch.h>
+#include <asm/dsp.h>
+#include <asm/cop2.h>
+#include <asm/fpu.h>
+
+struct task_struct;
+
+/**
+ * resume - resume execution of a task
+ * @prev: The task previously executed.
+ * @next: The task to begin executing.
+ * @next_ti: task_thread_info(next).
+ *
+ * This function is used whilst scheduling to save the context of prev & load
+ * the context of next. Returns prev.
+ */
+extern asmlinkage struct task_struct *resume(struct task_struct *prev,
+ struct task_struct *next, struct thread_info *next_ti);
+
+extern unsigned int ll_bit;
+extern struct task_struct *ll_task;
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+
+/*
+ * Handle the scheduler resume end of FPU affinity management. We do this
+ * inline to try to keep the overhead down. If we have been forced to run on
+ * a "CPU" with an FPU because of a previous high level of FP computation,
+ * but did not actually use the FPU during the most recent time-slice (CU1
+ * isn't set), we undo the restriction on cpus_mask.
+ *
+ * We're not calling set_cpus_allowed() here, because we have no need to
+ * force prompt migration - we're already switching the current CPU to a
+ * different thread.
+ */
+
+#define __mips_mt_fpaff_switch_to(prev) \
+do { \
+ struct thread_info *__prev_ti = task_thread_info(prev); \
+ \
+ if (cpu_has_fpu && \
+ test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
+ (!(KSTK_STATUS(prev) & ST0_CU1))) { \
+ clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
+ prev->cpus_mask = prev->thread.user_cpus_allowed; \
+ } \
+ next->thread.emulated_fp = 0; \
+} while(0)
+
+#else
+#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
+#endif
+
+/*
+ * Clear LLBit during context switches on MIPSr5+ such that eretnc can be used
+ * unconditionally when returning to userland in entry.S.
+ */
+#define __clear_r5_hw_ll_bit() do { \
+ if (cpu_has_mips_r5 || cpu_has_mips_r6) \
+ write_c0_lladdr(0); \
+} while (0)
+
+#define __clear_software_ll_bit() do { \
+ if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \
+ ll_bit = 0; \
+} while (0)
+
+/*
+ * Check FCSR for any unmasked exceptions pending set with `ptrace',
+ * clear them and send a signal.
+ */
+#ifdef CONFIG_MIPS_FP_SUPPORT
+# define __sanitize_fcr31(next) \
+do { \
+ unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \
+ void __user *pc; \
+ \
+ if (unlikely(fcr31)) { \
+ pc = (void __user *)task_pt_regs(next)->cp0_epc; \
+ next->thread.fpu.fcr31 &= ~fcr31; \
+ force_fcr31_sig(fcr31, pc, next); \
+ } \
+} while (0)
+#else
+# define __sanitize_fcr31(next)
+#endif
+
+/*
+ * For newly created kernel threads switch_to() will return to
+ * ret_from_kernel_thread, newly created user threads to ret_from_fork.
+ * That is, everything following resume() will be skipped for new threads.
+ * So everything that matters to new threads should be placed before resume().
+ */
+#define switch_to(prev, next, last) \
+do { \
+ __mips_mt_fpaff_switch_to(prev); \
+ lose_fpu_inatomic(1, prev); \
+ if (tsk_used_math(next)) \
+ __sanitize_fcr31(next); \
+ if (cpu_has_dsp) { \
+ __save_dsp(prev); \
+ __restore_dsp(next); \
+ } \
+ if (cop2_present) { \
+ u32 status = read_c0_status(); \
+ \
+ set_c0_status(ST0_CU2); \
+ if ((KSTK_STATUS(prev) & ST0_CU2)) { \
+ if (cop2_lazy_restore) \
+ KSTK_STATUS(prev) &= ~ST0_CU2; \
+ cop2_save(prev); \
+ } \
+ if (KSTK_STATUS(next) & ST0_CU2 && \
+ !cop2_lazy_restore) { \
+ cop2_restore(next); \
+ } \
+ write_c0_status(status); \
+ } \
+ __clear_r5_hw_ll_bit(); \
+ __clear_software_ll_bit(); \
+ if (cpu_has_userlocal) \
+ write_c0_userlocal(task_thread_info(next)->tp_value); \
+ __restore_watch(next); \
+ (last) = resume(prev, next, task_thread_info(next)); \
+} while (0)
+
+#endif /* _ASM_SWITCH_TO_H */
diff --git a/arch/mips/include/asm/sync.h b/arch/mips/include/asm/sync.h
new file mode 100644
index 000000000..aabd09793
--- /dev/null
+++ b/arch/mips/include/asm/sync.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __MIPS_ASM_SYNC_H__
+#define __MIPS_ASM_SYNC_H__
+
+/*
+ * sync types are defined by the MIPS64 Instruction Set documentation in Volume
+ * II-A of the MIPS Architecture Reference Manual, which can be found here:
+ *
+ * https://www.mips.com/?do-download=the-mips64-instruction-set-v6-06
+ *
+ * Two types of barrier are provided:
+ *
+ * 1) Completion barriers, which ensure that a memory operation has actually
+ * completed & often involve stalling the CPU pipeline to do so.
+ *
+ * 2) Ordering barriers, which only ensure that affected memory operations
+ * won't be reordered in the CPU pipeline in a manner that violates the
+ * restrictions imposed by the barrier.
+ *
+ * Ordering barriers can be more efficient than completion barriers, since:
+ *
+ * a) Ordering barriers only require memory access instructions which preceed
+ * them in program order (older instructions) to reach a point in the
+ * load/store datapath beyond which reordering is not possible before
+ * allowing memory access instructions which follow them (younger
+ * instructions) to be performed. That is, older instructions don't
+ * actually need to complete - they just need to get far enough that all
+ * other coherent CPUs will observe their completion before they observe
+ * the effects of younger instructions.
+ *
+ * b) Multiple variants of ordering barrier are provided which allow the
+ * effects to be restricted to different combinations of older or younger
+ * loads or stores. By way of example, if we only care that stores older
+ * than a barrier are observed prior to stores that are younger than a
+ * barrier & don't care about the ordering of loads then the 'wmb'
+ * ordering barrier can be used. Limiting the barrier's effects to stores
+ * allows loads to continue unaffected & potentially allows the CPU to
+ * make progress faster than if younger loads had to wait for older stores
+ * to complete.
+ */
+
+/*
+ * No sync instruction at all; used to allow code to nullify the effect of the
+ * __SYNC() macro without needing lots of #ifdefery.
+ */
+#define __SYNC_none -1
+
+/*
+ * A full completion barrier; all memory accesses appearing prior to this sync
+ * instruction in program order must complete before any memory accesses
+ * appearing after this sync instruction in program order.
+ */
+#define __SYNC_full 0x00
+
+/*
+ * For now we use a full completion barrier to implement all sync types, until
+ * we're satisfied that lightweight ordering barriers defined by MIPSr6 are
+ * sufficient to uphold our desired memory model.
+ */
+#define __SYNC_aq __SYNC_full
+#define __SYNC_rl __SYNC_full
+#define __SYNC_mb __SYNC_full
+
+/*
+ * ...except on Cavium Octeon CPUs, which have been using the 'wmb' ordering
+ * barrier since 2010 & omit 'rmb' barriers because the CPUs don't perform
+ * speculative reads.
+ */
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+# define __SYNC_rmb __SYNC_none
+# define __SYNC_wmb 0x04
+#else
+# define __SYNC_rmb __SYNC_full
+# define __SYNC_wmb __SYNC_full
+#endif
+
+/*
+ * A GINV sync is a little different; it doesn't relate directly to loads or
+ * stores, but instead causes synchronization of an icache or TLB global
+ * invalidation operation triggered by the ginvi or ginvt instructions
+ * respectively. In cases where we need to know that a ginvi or ginvt operation
+ * has been performed by all coherent CPUs, we must issue a sync instruction of
+ * this type. Once this instruction graduates all coherent CPUs will have
+ * observed the invalidation.
+ */
+#define __SYNC_ginv 0x14
+
+/* Trivial; indicate that we always need this sync instruction. */
+#define __SYNC_always (1 << 0)
+
+/*
+ * Indicate that we need this sync instruction only on systems with weakly
+ * ordered memory access. In general this is most MIPS systems, but there are
+ * exceptions which provide strongly ordered memory.
+ */
+#ifdef CONFIG_WEAK_ORDERING
+# define __SYNC_weak_ordering (1 << 1)
+#else
+# define __SYNC_weak_ordering 0
+#endif
+
+/*
+ * Indicate that we need this sync instruction only on systems where LL/SC
+ * don't implicitly provide a memory barrier. In general this is most MIPS
+ * systems.
+ */
+#ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
+# define __SYNC_weak_llsc (1 << 2)
+#else
+# define __SYNC_weak_llsc 0
+#endif
+
+/*
+ * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load,
+ * store or prefetch) in between an LL & SC can cause the SC instruction to
+ * erroneously succeed, breaking atomicity. Whilst it's unusual to write code
+ * containing such sequences, this bug bites harder than we might otherwise
+ * expect due to reordering & speculation:
+ *
+ * 1) A memory access appearing prior to the LL in program order may actually
+ * be executed after the LL - this is the reordering case.
+ *
+ * In order to avoid this we need to place a memory barrier (ie. a SYNC
+ * instruction) prior to every LL instruction, in between it and any earlier
+ * memory access instructions.
+ *
+ * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later.
+ *
+ * 2) If a conditional branch exists between an LL & SC with a target outside
+ * of the LL-SC loop, for example an exit upon value mismatch in cmpxchg()
+ * or similar, then misprediction of the branch may allow speculative
+ * execution of memory accesses from outside of the LL-SC loop.
+ *
+ * In order to avoid this we need a memory barrier (ie. a SYNC instruction)
+ * at each affected branch target.
+ *
+ * This case affects all current Loongson 3 CPUs.
+ *
+ * The above described cases cause an error in the cache coherence protocol;
+ * such that the Invalidate of a competing LL-SC goes 'missing' and SC
+ * erroneously observes its core still has Exclusive state and lets the SC
+ * proceed.
+ *
+ * Therefore the error only occurs on SMP systems.
+ */
+#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS
+# define __SYNC_loongson3_war (1 << 31)
+#else
+# define __SYNC_loongson3_war 0
+#endif
+
+/*
+ * Some Cavium Octeon CPUs suffer from a bug that causes a single wmb ordering
+ * barrier to be ineffective, requiring the use of 2 in sequence to provide an
+ * effective barrier as noted by commit 6b07d38aaa52 ("MIPS: Octeon: Use
+ * optimized memory barrier primitives."). Here we specify that the affected
+ * sync instructions should be emitted twice.
+ * Note that this expression is evaluated by the assembler (not the compiler),
+ * and that the assembler evaluates '==' as 0 or -1, not 0 or 1.
+ */
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+# define __SYNC_rpt(type) (1 - (type == __SYNC_wmb))
+#else
+# define __SYNC_rpt(type) 1
+#endif
+
+/*
+ * The main event. Here we actually emit a sync instruction of a given type, if
+ * reason is non-zero.
+ *
+ * In future we have the option of emitting entries in a fixups-style table
+ * here that would allow us to opportunistically remove some sync instructions
+ * when we detect at runtime that we're running on a CPU that doesn't need
+ * them.
+ */
+#ifdef CONFIG_CPU_HAS_SYNC
+# define ____SYNC(_type, _reason, _else) \
+ .if (( _type ) != -1) && ( _reason ); \
+ .set push; \
+ .set MIPS_ISA_LEVEL_RAW; \
+ .rept __SYNC_rpt(_type); \
+ sync _type; \
+ .endr; \
+ .set pop; \
+ .else; \
+ _else; \
+ .endif
+#else
+# define ____SYNC(_type, _reason, _else)
+#endif
+
+/*
+ * Preprocessor magic to expand macros used as arguments before we insert them
+ * into assembly code.
+ */
+#ifdef __ASSEMBLY__
+# define ___SYNC(type, reason, else) \
+ ____SYNC(type, reason, else)
+#else
+# define ___SYNC(type, reason, else) \
+ __stringify(____SYNC(type, reason, else))
+#endif
+
+#define __SYNC(type, reason) \
+ ___SYNC(__SYNC_##type, __SYNC_##reason, )
+#define __SYNC_ELSE(type, reason, else) \
+ ___SYNC(__SYNC_##type, __SYNC_##reason, else)
+
+#endif /* __MIPS_ASM_SYNC_H__ */
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
new file mode 100644
index 000000000..ebdf4d910
--- /dev/null
+++ b/arch/mips/include/asm/syscall.h
@@ -0,0 +1,160 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ *
+ * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
+ */
+
+#ifndef __ASM_MIPS_SYSCALL_H
+#define __ASM_MIPS_SYSCALL_H
+
+#include <linux/compiler.h>
+#include <uapi/linux/audit.h>
+#include <linux/elf-em.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/unistd.h>
+
+#ifndef __NR_syscall /* Only defined if _MIPS_SIM == _MIPS_SIM_ABI32 */
+#define __NR_syscall 4000
+#endif
+
+static inline bool mips_syscall_is_indirect(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
+ return (IS_ENABLED(CONFIG_32BIT) ||
+ test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
+ (regs->regs[2] == __NR_syscall);
+}
+
+static inline long syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return task_thread_info(task)->syscall;
+}
+
+static inline void mips_syscall_update_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ /*
+ * v0 is the system call number, except for O32 ABI syscall(), where it
+ * ends up in a0.
+ */
+ if (mips_syscall_is_indirect(task, regs))
+ task_thread_info(task)->syscall = regs->regs[4];
+ else
+ task_thread_info(task)->syscall = regs->regs[2];
+}
+
+static inline void mips_get_syscall_arg(unsigned long *arg,
+ struct task_struct *task, struct pt_regs *regs, unsigned int n)
+{
+ unsigned long usp __maybe_unused = regs->regs[29];
+
+ switch (n) {
+ case 0: case 1: case 2: case 3:
+ *arg = regs->regs[4 + n];
+
+ return;
+
+#ifdef CONFIG_32BIT
+ case 4: case 5: case 6: case 7:
+ get_user(*arg, (int *)usp + n);
+ return;
+#endif
+
+#ifdef CONFIG_64BIT
+ case 4: case 5: case 6: case 7:
+#ifdef CONFIG_MIPS32_O32
+ if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
+ get_user(*arg, (int *)usp + n);
+ else
+#endif
+ *arg = regs->regs[4 + n];
+
+ return;
+#endif
+
+ default:
+ BUG();
+ }
+
+ unreachable();
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->regs[7] ? -regs->regs[2] : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->regs[2];
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ /* Do nothing */
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ if (error) {
+ regs->regs[2] = -error;
+ regs->regs[7] = 1;
+ } else {
+ regs->regs[2] = val;
+ regs->regs[7] = 0;
+ }
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ unsigned int i = 0;
+ unsigned int n = 6;
+
+ /* O32 ABI syscall() */
+ if (mips_syscall_is_indirect(task, regs))
+ i++;
+
+ while (n--)
+ mips_get_syscall_arg(args++, task, regs, i++);
+}
+
+extern const unsigned long sys_call_table[];
+extern const unsigned long sys32_call_table[];
+extern const unsigned long sysn32_call_table[];
+
+static inline int syscall_get_arch(struct task_struct *task)
+{
+ int arch = AUDIT_ARCH_MIPS;
+#ifdef CONFIG_64BIT
+ if (!test_tsk_thread_flag(task, TIF_32BIT_REGS)) {
+ arch |= __AUDIT_ARCH_64BIT;
+ /* N32 sets only TIF_32BIT_ADDR */
+ if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
+ arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32;
+ }
+#endif
+#if defined(__LITTLE_ENDIAN)
+ arch |= __AUDIT_ARCH_LE;
+#endif
+ return arch;
+}
+
+#endif /* __ASM_MIPS_SYSCALL_H */
diff --git a/arch/mips/include/asm/termios.h b/arch/mips/include/asm/termios.h
new file mode 100644
index 000000000..bc29eeacc
--- /dev/null
+++ b/arch/mips/include/asm/termios.h
@@ -0,0 +1,105 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1996, 2000, 2001 by Ralf Baechle
+ * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_TERMIOS_H
+#define _ASM_TERMIOS_H
+
+#include <linux/uaccess.h>
+#include <uapi/asm/termios.h>
+
+/*
+ * intr=^C quit=^\ erase=del kill=^U
+ * vmin=\1 vtime=\0 eol2=\0 swtc=\0
+ * start=^Q stop=^S susp=^Z vdsusp=
+ * reprint=^R discard=^U werase=^W lnext=^V
+ * eof=^D eol=\0
+ */
+#define INIT_C_CC "\003\034\177\025\1\0\0\0\021\023\032\0\022\017\027\026\004\0"
+
+#include <linux/string.h>
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+static inline int user_termio_to_kernel_termios(struct ktermios *termios,
+ struct termio __user *termio)
+{
+ unsigned short iflag, oflag, cflag, lflag;
+ unsigned int err;
+
+ if (!access_ok(termio, sizeof(struct termio)))
+ return -EFAULT;
+
+ err = __get_user(iflag, &termio->c_iflag);
+ termios->c_iflag = (termios->c_iflag & 0xffff0000) | iflag;
+ err |=__get_user(oflag, &termio->c_oflag);
+ termios->c_oflag = (termios->c_oflag & 0xffff0000) | oflag;
+ err |=__get_user(cflag, &termio->c_cflag);
+ termios->c_cflag = (termios->c_cflag & 0xffff0000) | cflag;
+ err |=__get_user(lflag, &termio->c_lflag);
+ termios->c_lflag = (termios->c_lflag & 0xffff0000) | lflag;
+ err |=__get_user(termios->c_line, &termio->c_line);
+ if (err)
+ return -EFAULT;
+
+ if (__copy_from_user(termios->c_cc, termio->c_cc, NCC))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+static inline int kernel_termios_to_user_termio(struct termio __user *termio,
+ struct ktermios *termios)
+{
+ int err;
+
+ if (!access_ok(termio, sizeof(struct termio)))
+ return -EFAULT;
+
+ err = __put_user(termios->c_iflag, &termio->c_iflag);
+ err |= __put_user(termios->c_oflag, &termio->c_oflag);
+ err |= __put_user(termios->c_cflag, &termio->c_cflag);
+ err |= __put_user(termios->c_lflag, &termio->c_lflag);
+ err |= __put_user(termios->c_line, &termio->c_line);
+ if (err)
+ return -EFAULT;
+
+ if (__copy_to_user(termio->c_cc, termios->c_cc, NCC))
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int user_termios_to_kernel_termios(struct ktermios __user *k,
+ struct termios2 *u)
+{
+ return copy_from_user(k, u, sizeof(struct termios2)) ? -EFAULT : 0;
+}
+
+static inline int kernel_termios_to_user_termios(struct termios2 __user *u,
+ struct ktermios *k)
+{
+ return copy_to_user(u, k, sizeof(struct termios2)) ? -EFAULT : 0;
+}
+
+static inline int user_termios_to_kernel_termios_1(struct ktermios *k,
+ struct termios __user *u)
+{
+ return copy_from_user(k, u, sizeof(struct termios)) ? -EFAULT : 0;
+}
+
+static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
+ struct ktermios *k)
+{
+ return copy_to_user(u, k, sizeof(struct termios)) ? -EFAULT : 0;
+}
+
+#endif /* _ASM_TERMIOS_H */
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
new file mode 100644
index 000000000..e2c352da3
--- /dev/null
+++ b/arch/mips/include/asm/thread_info.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* thread_info.h: MIPS low-level thread information
+ *
+ * Copyright (C) 2002 David Howells (dhowells@redhat.com)
+ * - Incorporating suggestions made by Linus Torvalds and Dave Miller
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+
+#ifndef __ASSEMBLY__
+
+#include <asm/processor.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ unsigned long flags; /* low level flags */
+ unsigned long tp_value; /* thread pointer */
+ __u32 cpu; /* current CPU */
+ int preempt_count; /* 0 => preemptable, <0 => BUG */
+ mm_segment_t addr_limit; /*
+ * thread address space limit:
+ * 0x7fffffff for user-thead
+ * 0xffffffff for kernel-thread
+ */
+ struct pt_regs *regs;
+ long syscall; /* syscall number */
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .flags = _TIF_FIXADE, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+}
+
+/*
+ * A pointer to the struct thread_info for the currently executing thread is
+ * held in register $28/$gp.
+ *
+ * We declare __current_thread_info as a global register variable rather than a
+ * local register variable within current_thread_info() because clang doesn't
+ * support explicit local register variables.
+ *
+ * When building the VDSO we take care not to declare the global register
+ * variable because this causes GCC to not preserve the value of $28/$gp in
+ * functions that change its value (which is common in the PIC VDSO when
+ * accessing the GOT). Since the VDSO shouldn't be accessing
+ * __current_thread_info anyway we declare it extern in order to cause a link
+ * failure if it's referenced.
+ */
+#ifdef __VDSO__
+extern struct thread_info *__current_thread_info;
+#else
+register struct thread_info *__current_thread_info __asm__("$28");
+#endif
+
+static inline struct thread_info *current_thread_info(void)
+{
+ return __current_thread_info;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+/* thread information allocation */
+#if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT)
+#define THREAD_SIZE_ORDER (1)
+#endif
+#if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_64BIT)
+#define THREAD_SIZE_ORDER (2)
+#endif
+#ifdef CONFIG_PAGE_SIZE_8KB
+#define THREAD_SIZE_ORDER (1)
+#endif
+#ifdef CONFIG_PAGE_SIZE_16KB
+#define THREAD_SIZE_ORDER (0)
+#endif
+#ifdef CONFIG_PAGE_SIZE_32KB
+#define THREAD_SIZE_ORDER (0)
+#endif
+#ifdef CONFIG_PAGE_SIZE_64KB
+#define THREAD_SIZE_ORDER (0)
+#endif
+
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+#define THREAD_MASK (THREAD_SIZE - 1UL)
+
+#define STACK_WARN (THREAD_SIZE / 8)
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SIGPENDING 1 /* signal pending */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */
+#define TIF_SECCOMP 4 /* secure computing */
+#define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
+#define TIF_UPROBE 6 /* breakpointed or singlestepping */
+#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
+#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
+#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
+#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
+#define TIF_NOHZ 19 /* in adaptive nohz mode */
+#define TIF_FIXADE 20 /* Fix address errors in software */
+#define TIF_LOGADE 21 /* Log address errors to syslog */
+#define TIF_32BIT_REGS 22 /* 32-bit general purpose registers */
+#define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
+#define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
+#define TIF_LOAD_WATCH 25 /* If set, load watch registers */
+#define TIF_SYSCALL_TRACEPOINT 26 /* syscall tracepoint instrumentation */
+#define TIF_32BIT_FPREGS 27 /* 32-bit floating point registers */
+#define TIF_HYBRID_FPREGS 28 /* 64b FP registers, odd singles in bits 63:32 of even doubles */
+#define TIF_USEDMSA 29 /* MSA has been used this quantum */
+#define TIF_MSA_CTX_LIVE 30 /* MSA context must be preserved */
+#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
+
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP (1<<TIF_SECCOMP)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_UPROBE (1<<TIF_UPROBE)
+#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
+#define _TIF_USEDFPU (1<<TIF_USEDFPU)
+#define _TIF_NOHZ (1<<TIF_NOHZ)
+#define _TIF_FIXADE (1<<TIF_FIXADE)
+#define _TIF_LOGADE (1<<TIF_LOGADE)
+#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS)
+#define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
+#define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
+#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
+#define _TIF_32BIT_FPREGS (1<<TIF_32BIT_FPREGS)
+#define _TIF_HYBRID_FPREGS (1<<TIF_HYBRID_FPREGS)
+#define _TIF_USEDMSA (1<<TIF_USEDMSA)
+#define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
+#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+
+#define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
+ _TIF_SYSCALL_AUDIT | \
+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
+
+/* work to do in syscall_trace_leave() */
+#define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
+
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK \
+ (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME | \
+ _TIF_UPROBE | _TIF_NOTIFY_SIGNAL)
+/* work to do on any return to u-space */
+#define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
+ _TIF_WORK_SYSCALL_EXIT | \
+ _TIF_SYSCALL_TRACEPOINT)
+
+/*
+ * We stash processor id into a COP0 register to retrieve it fast
+ * at kernel exception entry.
+ */
+#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
+#define SMP_CPUID_REG 20, 0 /* XCONTEXT */
+#define ASM_SMP_CPUID_REG $20
+#define SMP_CPUID_PTRSHIFT 48
+#else
+#define SMP_CPUID_REG 4, 0 /* CONTEXT */
+#define ASM_SMP_CPUID_REG $4
+#define SMP_CPUID_PTRSHIFT 23
+#endif
+
+#ifdef CONFIG_64BIT
+#define SMP_CPUID_REGSHIFT (SMP_CPUID_PTRSHIFT + 3)
+#else
+#define SMP_CPUID_REGSHIFT (SMP_CPUID_PTRSHIFT + 2)
+#endif
+
+#define ASM_CPUID_MFC0 MFC0
+#define UASM_i_CPUID_MFC0 UASM_i_MFC0
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
new file mode 100644
index 000000000..e855a3611
--- /dev/null
+++ b/arch/mips/include/asm/time.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2001, 2002, MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ * Copyright (c) 2003 Maciej W. Rozycki
+ *
+ * include/asm-mips/time.h
+ * header file for the new style time.c file and time services.
+ */
+#ifndef _ASM_TIME_H
+#define _ASM_TIME_H
+
+#include <linux/rtc.h>
+#include <linux/spinlock.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+
+extern spinlock_t rtc_lock;
+
+/*
+ * board specific routines required by time_init().
+ */
+extern void plat_time_init(void);
+
+/*
+ * mips_hpt_frequency - must be set if you intend to use an R4k-compatible
+ * counter as a timer interrupt source.
+ */
+extern unsigned int mips_hpt_frequency;
+
+/*
+ * The performance counter IRQ on MIPS is a close relative to the timer IRQ
+ * so it lives here.
+ */
+extern int (*perf_irq)(void);
+extern int __weak get_c0_perfcount_int(void);
+
+/*
+ * Initialize the calling CPU's compare interrupt as clockevent device
+ */
+extern unsigned int get_c0_compare_int(void);
+extern int r4k_clockevent_init(void);
+
+static inline int mips_clockevent_init(void)
+{
+#ifdef CONFIG_CEVT_R4K
+ return r4k_clockevent_init();
+#else
+ return -ENXIO;
+#endif
+}
+
+/*
+ * Initialize the count register as a clocksource
+ */
+extern int init_r4k_clocksource(void);
+
+static inline int init_mips_clocksource(void)
+{
+#ifdef CONFIG_CSRC_R4K
+ return init_r4k_clocksource();
+#else
+ return 0;
+#endif
+}
+
+static inline void clockevent_set_clock(struct clock_event_device *cd,
+ unsigned int clock)
+{
+ clockevents_calc_mult_shift(cd, clock, 4);
+}
+
+#endif /* _ASM_TIME_H */
diff --git a/arch/mips/include/asm/timex.h b/arch/mips/include/asm/timex.h
new file mode 100644
index 000000000..2e107886f
--- /dev/null
+++ b/arch/mips/include/asm/timex.h
@@ -0,0 +1,102 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998, 1999, 2003 by Ralf Baechle
+ * Copyright (C) 2014 by Maciej W. Rozycki
+ */
+#ifndef _ASM_TIMEX_H
+#define _ASM_TIMEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/mipsregs.h>
+#include <asm/cpu-type.h>
+
+/*
+ * This is the clock rate of the i8253 PIT. A MIPS system may not have
+ * a PIT by the symbol is used all over the kernel including some APIs.
+ * So keeping it defined to the number for the PIT is the only sane thing
+ * for now.
+ */
+#define CLOCK_TICK_RATE 1193182
+
+/*
+ * Standard way to access the cycle counter.
+ * Currently only used on SMP for scheduling.
+ *
+ * Only the low 32 bits are available as a continuously counting entity.
+ * But this only means we'll force a reschedule every 8 seconds or so,
+ * which isn't an evil thing.
+ *
+ * We know that all SMP capable CPUs have cycle counters.
+ */
+
+typedef unsigned int cycles_t;
+
+/*
+ * On R4000/R4400 an erratum exists such that if the cycle counter is
+ * read in the exact moment that it is matching the compare register,
+ * no interrupt will be generated.
+ *
+ * There is a suggested workaround and also the erratum can't strike if
+ * the compare interrupt isn't being used as the clock source device.
+ * However for now the implementaton of this function doesn't get these
+ * fine details right.
+ */
+static inline int can_use_mips_counter(unsigned int prid)
+{
+ int comp = (prid & PRID_COMP_MASK) != PRID_COMP_LEGACY;
+
+ if (__builtin_constant_p(cpu_has_counter) && !cpu_has_counter)
+ return 0;
+ else if (__builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r)
+ return 1;
+ else if (likely(!__builtin_constant_p(cpu_has_mips_r) && comp))
+ return 1;
+ /* Make sure we don't peek at cpu_data[0].options in the fast path! */
+ if (!__builtin_constant_p(cpu_has_counter))
+ asm volatile("" : "=m" (cpu_data[0].options));
+ if (likely(cpu_has_counter &&
+ prid > (PRID_IMP_R4000 | PRID_REV_ENCODE_44(15, 15))))
+ return 1;
+ else
+ return 0;
+}
+
+static inline cycles_t get_cycles(void)
+{
+ if (can_use_mips_counter(read_c0_prid()))
+ return read_c0_count();
+ else
+ return 0; /* no usable counter */
+}
+#define get_cycles get_cycles
+
+/*
+ * Like get_cycles - but where c0_count is not available we desperately
+ * use c0_random in an attempt to get at least a little bit of entropy.
+ */
+static inline unsigned long random_get_entropy(void)
+{
+ unsigned int c0_random;
+
+ if (can_use_mips_counter(read_c0_prid()))
+ return read_c0_count();
+
+ if (cpu_has_3kex)
+ c0_random = (read_c0_random() >> 8) & 0x3f;
+ else
+ c0_random = read_c0_random() & 0x3f;
+ return (random_get_entropy_fallback() << 6) | (0x3f - c0_random);
+}
+#define random_get_entropy random_get_entropy
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_TIMEX_H */
diff --git a/arch/mips/include/asm/tlb.h b/arch/mips/include/asm/tlb.h
new file mode 100644
index 000000000..90f3ad76d
--- /dev/null
+++ b/arch/mips/include/asm/tlb.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_TLB_H
+#define __ASM_TLB_H
+
+#include <asm/cpu-features.h>
+#include <asm/mipsregs.h>
+
+#define _UNIQUE_ENTRYHI(base, idx) \
+ (((base) + ((idx) << (PAGE_SHIFT + 1))) | \
+ (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
+#define UNIQUE_ENTRYHI(idx) _UNIQUE_ENTRYHI(CKSEG0, idx)
+#define UNIQUE_GUEST_ENTRYHI(idx) _UNIQUE_ENTRYHI(CKSEG1, idx)
+
+static inline unsigned int num_wired_entries(void)
+{
+ unsigned int wired = read_c0_wired();
+
+ if (cpu_has_mips_r6)
+ wired &= MIPSR6_WIRED_WIRED;
+
+ return wired;
+}
+
+#include <asm-generic/tlb.h>
+
+#endif /* __ASM_TLB_H */
diff --git a/arch/mips/include/asm/tlbdebug.h b/arch/mips/include/asm/tlbdebug.h
new file mode 100644
index 000000000..3a25a8780
--- /dev/null
+++ b/arch/mips/include/asm/tlbdebug.h
@@ -0,0 +1,17 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002 by Ralf Baechle
+ */
+#ifndef __ASM_TLBDEBUG_H
+#define __ASM_TLBDEBUG_H
+
+/*
+ * TLB debugging functions:
+ */
+extern void dump_tlb_regs(void);
+extern void dump_tlb_all(void);
+
+#endif /* __ASM_TLBDEBUG_H */
diff --git a/arch/mips/include/asm/tlbex.h b/arch/mips/include/asm/tlbex.h
new file mode 100644
index 000000000..6d97e23f3
--- /dev/null
+++ b/arch/mips/include/asm/tlbex.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_TLBEX_H
+#define __ASM_TLBEX_H
+
+#include <asm/uasm.h>
+
+/*
+ * Write random or indexed TLB entry, and care about the hazards from
+ * the preceding mtc0 and for the following eret.
+ */
+enum tlb_write_entry {
+ tlb_random,
+ tlb_indexed
+};
+
+extern int pgd_reg;
+
+void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
+ unsigned int tmp, unsigned int ptr);
+void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr);
+void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr);
+void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep);
+void build_tlb_write_entry(u32 **p, struct uasm_label **l,
+ struct uasm_reloc **r,
+ enum tlb_write_entry wmode);
+
+extern void handle_tlbl(void);
+extern char handle_tlbl_end[];
+
+extern void handle_tlbs(void);
+extern char handle_tlbs_end[];
+
+extern void handle_tlbm(void);
+extern char handle_tlbm_end[];
+
+#endif /* __ASM_TLBEX_H */
diff --git a/arch/mips/include/asm/tlbflush.h b/arch/mips/include/asm/tlbflush.h
new file mode 100644
index 000000000..9789e7a32
--- /dev/null
+++ b/arch/mips/include/asm/tlbflush.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_TLBFLUSH_H
+#define __ASM_TLBFLUSH_H
+
+#include <linux/mm.h>
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb_all() flushes all processes TLB entries
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ */
+extern void local_flush_tlb_all(void);
+extern void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+extern void local_flush_tlb_kernel_range(unsigned long start,
+ unsigned long end);
+extern void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long page);
+extern void local_flush_tlb_one(unsigned long vaddr);
+
+#include <asm/mmu_context.h>
+
+#ifdef CONFIG_SMP
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long,
+ unsigned long);
+extern void flush_tlb_kernel_range(unsigned long, unsigned long);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_one(unsigned long vaddr);
+
+#else /* CONFIG_SMP */
+
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_mm(mm) drop_mmu_context(mm)
+#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end)
+#define flush_tlb_kernel_range(vmaddr,end) \
+ local_flush_tlb_kernel_range(vmaddr, end)
+#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
+#define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr)
+
+#endif /* CONFIG_SMP */
+
+#endif /* __ASM_TLBFLUSH_H */
diff --git a/arch/mips/include/asm/tlbmisc.h b/arch/mips/include/asm/tlbmisc.h
new file mode 100644
index 000000000..c1a540669
--- /dev/null
+++ b/arch/mips/include/asm/tlbmisc.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_TLBMISC_H
+#define __ASM_TLBMISC_H
+
+/*
+ * - add_wired_entry() add a fixed TLB entry, and move wired register
+ */
+extern void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
+ unsigned long entryhi, unsigned long pagemask);
+
+#endif /* __ASM_TLBMISC_H */
diff --git a/arch/mips/include/asm/topology.h b/arch/mips/include/asm/topology.h
new file mode 100644
index 000000000..0673d2d0f
--- /dev/null
+++ b/arch/mips/include/asm/topology.h
@@ -0,0 +1,21 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 by Ralf Baechle
+ */
+#ifndef __ASM_TOPOLOGY_H
+#define __ASM_TOPOLOGY_H
+
+#include <topology.h>
+#include <linux/smp.h>
+
+#ifdef CONFIG_SMP
+#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
+#define topology_core_id(cpu) (cpu_core(&cpu_data[cpu]))
+#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
+#define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu])
+#endif
+
+#endif /* __ASM_TOPOLOGY_H */
diff --git a/arch/mips/include/asm/traps.h b/arch/mips/include/asm/traps.h
new file mode 100644
index 000000000..6a0864bb6
--- /dev/null
+++ b/arch/mips/include/asm/traps.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Trap handling definitions.
+ *
+ * Copyright (C) 2002, 2003 Maciej W. Rozycki
+ */
+#ifndef _ASM_TRAPS_H
+#define _ASM_TRAPS_H
+
+/*
+ * Possible status responses for a board_be_handler backend.
+ */
+#define MIPS_BE_DISCARD 0 /* return with no action */
+#define MIPS_BE_FIXUP 1 /* return to the fixup code */
+#define MIPS_BE_FATAL 2 /* treat as an unrecoverable error */
+
+extern void (*board_be_init)(void);
+extern int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
+
+extern void (*board_nmi_handler_setup)(void);
+extern void (*board_ejtag_handler_setup)(void);
+extern void (*board_bind_eic_interrupt)(int irq, int regset);
+extern void (*board_ebase_setup)(void);
+extern void (*board_cache_error_setup)(void);
+
+extern int register_nmi_notifier(struct notifier_block *nb);
+
+#define nmi_notifier(fn, pri) \
+({ \
+ static struct notifier_block fn##_nb = { \
+ .notifier_call = fn, \
+ .priority = pri \
+ }; \
+ \
+ register_nmi_notifier(&fn##_nb); \
+})
+
+#endif /* _ASM_TRAPS_H */
diff --git a/arch/mips/include/asm/txx9/boards.h b/arch/mips/include/asm/txx9/boards.h
new file mode 100644
index 000000000..d45237bef
--- /dev/null
+++ b/arch/mips/include/asm/txx9/boards.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifdef CONFIG_TOSHIBA_JMR3927
+BOARD_VEC(jmr3927_vec)
+#endif
+#ifdef CONFIG_TOSHIBA_RBTX4927
+BOARD_VEC(rbtx4927_vec)
+BOARD_VEC(rbtx4937_vec)
+#endif
+#ifdef CONFIG_TOSHIBA_RBTX4938
+BOARD_VEC(rbtx4938_vec)
+#endif
+#ifdef CONFIG_TOSHIBA_RBTX4939
+BOARD_VEC(rbtx4939_vec)
+#endif
diff --git a/arch/mips/include/asm/txx9/dmac.h b/arch/mips/include/asm/txx9/dmac.h
new file mode 100644
index 000000000..b47ef5fe7
--- /dev/null
+++ b/arch/mips/include/asm/txx9/dmac.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * TXx9 SoC DMA Controller
+ */
+
+#ifndef __ASM_TXX9_DMAC_H
+#define __ASM_TXX9_DMAC_H
+
+#include <linux/dmaengine.h>
+
+#define TXX9_DMA_MAX_NR_CHANNELS 4
+
+/**
+ * struct txx9dmac_platform_data - Controller configuration parameters
+ * @memcpy_chan: Channel used for DMA_MEMCPY
+ * @have_64bit_regs: DMAC have 64 bit registers
+ */
+struct txx9dmac_platform_data {
+ int memcpy_chan;
+ bool have_64bit_regs;
+};
+
+/**
+ * struct txx9dmac_chan_platform_data - Channel configuration parameters
+ * @dmac_dev: A platform device for DMAC
+ */
+struct txx9dmac_chan_platform_data {
+ struct platform_device *dmac_dev;
+};
+
+/**
+ * struct txx9dmac_slave - Controller-specific information about a slave
+ * @tx_reg: physical address of data register used for
+ * memory-to-peripheral transfers
+ * @rx_reg: physical address of data register used for
+ * peripheral-to-memory transfers
+ * @reg_width: peripheral register width
+ */
+struct txx9dmac_slave {
+ u64 tx_reg;
+ u64 rx_reg;
+ unsigned int reg_width;
+};
+
+void txx9_dmac_init(int id, unsigned long baseaddr, int irq,
+ const struct txx9dmac_platform_data *pdata);
+
+#endif /* __ASM_TXX9_DMAC_H */
diff --git a/arch/mips/include/asm/txx9/generic.h b/arch/mips/include/asm/txx9/generic.h
new file mode 100644
index 000000000..9a2c47bf3
--- /dev/null
+++ b/arch/mips/include/asm/txx9/generic.h
@@ -0,0 +1,98 @@
+/*
+ * linux/include/asm-mips/txx9/generic.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_TXX9_GENERIC_H
+#define __ASM_TXX9_GENERIC_H
+
+#include <linux/init.h>
+#include <linux/ioport.h> /* for struct resource */
+
+extern struct resource txx9_ce_res[];
+#define TXX9_CE(n) (unsigned long)(txx9_ce_res[(n)].start)
+extern unsigned int txx9_pcode;
+extern char txx9_pcode_str[8];
+void txx9_reg_res_init(unsigned int pcode, unsigned long base,
+ unsigned long size);
+
+extern unsigned int txx9_master_clock;
+extern unsigned int txx9_cpu_clock;
+extern unsigned int txx9_gbus_clock;
+#define TXX9_IMCLK (txx9_gbus_clock / 2)
+
+extern int txx9_ccfg_toeon;
+struct uart_port;
+int early_serial_txx9_setup(struct uart_port *port);
+
+struct pci_dev;
+struct txx9_board_vec {
+ const char *system;
+ void (*prom_init)(void);
+ void (*mem_setup)(void);
+ void (*irq_setup)(void);
+ void (*time_init)(void);
+ void (*arch_init)(void);
+ void (*device_init)(void);
+#ifdef CONFIG_PCI
+ int (*pci_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
+#endif
+};
+extern struct txx9_board_vec *txx9_board_vec;
+extern int (*txx9_irq_dispatch)(int pending);
+const char *prom_getenv(const char *name);
+void txx9_wdt_init(unsigned long base);
+void txx9_wdt_now(unsigned long base);
+void txx9_spi_init(int busid, unsigned long base, int irq);
+void txx9_ethaddr_init(unsigned int id, unsigned char *ethaddr);
+void txx9_sio_init(unsigned long baseaddr, int irq,
+ unsigned int line, unsigned int sclk, int nocts);
+#ifdef CONFIG_EARLY_PRINTK
+extern void (*txx9_prom_putchar)(char c);
+void txx9_sio_putchar_init(unsigned long baseaddr);
+#else
+static inline void txx9_sio_putchar_init(unsigned long baseaddr)
+{
+}
+#endif
+
+struct physmap_flash_data;
+void txx9_physmap_flash_init(int no, unsigned long addr, unsigned long size,
+ const struct physmap_flash_data *pdata);
+
+/* 8 bit version of __fls(): find first bit set (returns 0..7) */
+static inline unsigned int __fls8(unsigned char x)
+{
+ int r = 7;
+
+ if (!(x & 0xf0)) {
+ r -= 4;
+ x <<= 4;
+ }
+ if (!(x & 0xc0)) {
+ r -= 2;
+ x <<= 2;
+ }
+ if (!(x & 0x80))
+ r -= 1;
+ return r;
+}
+
+void txx9_iocled_init(unsigned long baseaddr,
+ int basenum, unsigned int num, int lowactive,
+ const char *color, char **deftriggers);
+
+/* 7SEG LED */
+void txx9_7segled_init(unsigned int num,
+ void (*putc)(unsigned int pos, unsigned char val));
+int txx9_7segled_putc(unsigned int pos, char c);
+
+void __init txx9_aclc_init(unsigned long baseaddr, int irq,
+ unsigned int dmac_id,
+ unsigned int dma_chan_out,
+ unsigned int dma_chan_in);
+void __init txx9_sramc_init(struct resource *r);
+
+#endif /* __ASM_TXX9_GENERIC_H */
diff --git a/arch/mips/include/asm/txx9/jmr3927.h b/arch/mips/include/asm/txx9/jmr3927.h
new file mode 100644
index 000000000..aab959dc3
--- /dev/null
+++ b/arch/mips/include/asm/txx9/jmr3927.h
@@ -0,0 +1,179 @@
+/*
+ * Defines for the TJSYS JMR-TX3927
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ */
+#ifndef __ASM_TXX9_JMR3927_H
+#define __ASM_TXX9_JMR3927_H
+
+#include <asm/txx9/tx3927.h>
+#include <asm/addrspace.h>
+#include <asm/txx9irq.h>
+
+/* CS */
+#define JMR3927_ROMCE0 0x1fc00000 /* 4M */
+#define JMR3927_ROMCE1 0x1e000000 /* 4M */
+#define JMR3927_ROMCE2 0x14000000 /* 16M */
+#define JMR3927_ROMCE3 0x10000000 /* 64M */
+#define JMR3927_ROMCE5 0x1d000000 /* 4M */
+#define JMR3927_SDCS0 0x00000000 /* 32M */
+#define JMR3927_SDCS1 0x02000000 /* 32M */
+/* PCI Direct Mappings */
+
+#define JMR3927_PCIMEM 0x08000000
+#define JMR3927_PCIMEM_SIZE 0x08000000 /* 128M */
+#define JMR3927_PCIIO 0x15000000
+#define JMR3927_PCIIO_SIZE 0x01000000 /* 16M */
+
+#define JMR3927_SDRAM_SIZE 0x02000000 /* 32M */
+#define JMR3927_PORT_BASE KSEG1
+
+/* Address map (virtual address) */
+#define JMR3927_ROM0_BASE (KSEG1 + JMR3927_ROMCE0)
+#define JMR3927_ROM1_BASE (KSEG1 + JMR3927_ROMCE1)
+#define JMR3927_IOC_BASE (KSEG1 + JMR3927_ROMCE2)
+#define JMR3927_PCIMEM_BASE (KSEG1 + JMR3927_PCIMEM)
+#define JMR3927_PCIIO_BASE (KSEG1 + JMR3927_PCIIO)
+
+#define JMR3927_IOC_REV_ADDR (JMR3927_IOC_BASE + 0x00000000)
+#define JMR3927_IOC_NVRAMB_ADDR (JMR3927_IOC_BASE + 0x00010000)
+#define JMR3927_IOC_LED_ADDR (JMR3927_IOC_BASE + 0x00020000)
+#define JMR3927_IOC_DIPSW_ADDR (JMR3927_IOC_BASE + 0x00030000)
+#define JMR3927_IOC_BREV_ADDR (JMR3927_IOC_BASE + 0x00040000)
+#define JMR3927_IOC_DTR_ADDR (JMR3927_IOC_BASE + 0x00050000)
+#define JMR3927_IOC_INTS1_ADDR (JMR3927_IOC_BASE + 0x00080000)
+#define JMR3927_IOC_INTS2_ADDR (JMR3927_IOC_BASE + 0x00090000)
+#define JMR3927_IOC_INTM_ADDR (JMR3927_IOC_BASE + 0x000a0000)
+#define JMR3927_IOC_INTP_ADDR (JMR3927_IOC_BASE + 0x000b0000)
+#define JMR3927_IOC_RESET_ADDR (JMR3927_IOC_BASE + 0x000f0000)
+
+/* Flash ROM */
+#define JMR3927_FLASH_BASE (JMR3927_ROM0_BASE)
+#define JMR3927_FLASH_SIZE 0x00400000
+
+/* bits for IOC_REV/IOC_BREV (high byte) */
+#define JMR3927_IDT_MASK 0xfc
+#define JMR3927_REV_MASK 0x03
+#define JMR3927_IOC_IDT 0xe0
+
+/* bits for IOC_INTS1/IOC_INTS2/IOC_INTM/IOC_INTP (high byte) */
+#define JMR3927_IOC_INTB_PCIA 0
+#define JMR3927_IOC_INTB_PCIB 1
+#define JMR3927_IOC_INTB_PCIC 2
+#define JMR3927_IOC_INTB_PCID 3
+#define JMR3927_IOC_INTB_MODEM 4
+#define JMR3927_IOC_INTB_INT6 5
+#define JMR3927_IOC_INTB_INT7 6
+#define JMR3927_IOC_INTB_SOFT 7
+#define JMR3927_IOC_INTF_PCIA (1 << JMR3927_IOC_INTF_PCIA)
+#define JMR3927_IOC_INTF_PCIB (1 << JMR3927_IOC_INTB_PCIB)
+#define JMR3927_IOC_INTF_PCIC (1 << JMR3927_IOC_INTB_PCIC)
+#define JMR3927_IOC_INTF_PCID (1 << JMR3927_IOC_INTB_PCID)
+#define JMR3927_IOC_INTF_MODEM (1 << JMR3927_IOC_INTB_MODEM)
+#define JMR3927_IOC_INTF_INT6 (1 << JMR3927_IOC_INTB_INT6)
+#define JMR3927_IOC_INTF_INT7 (1 << JMR3927_IOC_INTB_INT7)
+#define JMR3927_IOC_INTF_SOFT (1 << JMR3927_IOC_INTB_SOFT)
+
+/* bits for IOC_RESET (high byte) */
+#define JMR3927_IOC_RESET_CPU 1
+#define JMR3927_IOC_RESET_PCI 2
+
+#if defined(__BIG_ENDIAN)
+#define jmr3927_ioc_reg_out(d, a) ((*(volatile unsigned char *)(a)) = (d))
+#define jmr3927_ioc_reg_in(a) (*(volatile unsigned char *)(a))
+#elif defined(__LITTLE_ENDIAN)
+#define jmr3927_ioc_reg_out(d, a) ((*(volatile unsigned char *)((a)^1)) = (d))
+#define jmr3927_ioc_reg_in(a) (*(volatile unsigned char *)((a)^1))
+#else
+#error "No Endian"
+#endif
+
+/* LED macro */
+#define jmr3927_led_set(n/*0-16*/) jmr3927_ioc_reg_out(~(n), JMR3927_IOC_LED_ADDR)
+
+#define jmr3927_led_and_set(n/*0-16*/) jmr3927_ioc_reg_out((~(n)) & jmr3927_ioc_reg_in(JMR3927_IOC_LED_ADDR), JMR3927_IOC_LED_ADDR)
+
+/* DIPSW4 macro */
+#define jmr3927_dipsw1() (gpio_get_value(11) == 0)
+#define jmr3927_dipsw2() (gpio_get_value(10) == 0)
+#define jmr3927_dipsw3() ((jmr3927_ioc_reg_in(JMR3927_IOC_DIPSW_ADDR) & 2) == 0)
+#define jmr3927_dipsw4() ((jmr3927_ioc_reg_in(JMR3927_IOC_DIPSW_ADDR) & 1) == 0)
+
+/*
+ * IRQ mappings
+ */
+
+/* These are the virtual IRQ numbers, we divide all IRQ's into
+ * 'spaces', the 'space' determines where and how to enable/disable
+ * that particular IRQ on an JMR machine. Add new 'spaces' as new
+ * IRQ hardware is supported.
+ */
+#define JMR3927_NR_IRQ_IRC 16 /* On-Chip IRC */
+#define JMR3927_NR_IRQ_IOC 8 /* PCI/MODEM/INT[6:7] */
+
+#define JMR3927_IRQ_IRC TXX9_IRQ_BASE
+#define JMR3927_IRQ_IOC (JMR3927_IRQ_IRC + JMR3927_NR_IRQ_IRC)
+#define JMR3927_IRQ_END (JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC)
+
+#define JMR3927_IRQ_IRC_INT0 (JMR3927_IRQ_IRC + TX3927_IR_INT0)
+#define JMR3927_IRQ_IRC_INT1 (JMR3927_IRQ_IRC + TX3927_IR_INT1)
+#define JMR3927_IRQ_IRC_INT2 (JMR3927_IRQ_IRC + TX3927_IR_INT2)
+#define JMR3927_IRQ_IRC_INT3 (JMR3927_IRQ_IRC + TX3927_IR_INT3)
+#define JMR3927_IRQ_IRC_INT4 (JMR3927_IRQ_IRC + TX3927_IR_INT4)
+#define JMR3927_IRQ_IRC_INT5 (JMR3927_IRQ_IRC + TX3927_IR_INT5)
+#define JMR3927_IRQ_IRC_SIO0 (JMR3927_IRQ_IRC + TX3927_IR_SIO0)
+#define JMR3927_IRQ_IRC_SIO1 (JMR3927_IRQ_IRC + TX3927_IR_SIO1)
+#define JMR3927_IRQ_IRC_SIO(ch) (JMR3927_IRQ_IRC + TX3927_IR_SIO(ch))
+#define JMR3927_IRQ_IRC_DMA (JMR3927_IRQ_IRC + TX3927_IR_DMA)
+#define JMR3927_IRQ_IRC_PIO (JMR3927_IRQ_IRC + TX3927_IR_PIO)
+#define JMR3927_IRQ_IRC_PCI (JMR3927_IRQ_IRC + TX3927_IR_PCI)
+#define JMR3927_IRQ_IRC_TMR(ch) (JMR3927_IRQ_IRC + TX3927_IR_TMR(ch))
+#define JMR3927_IRQ_IOC_PCIA (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_PCIA)
+#define JMR3927_IRQ_IOC_PCIB (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_PCIB)
+#define JMR3927_IRQ_IOC_PCIC (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_PCIC)
+#define JMR3927_IRQ_IOC_PCID (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_PCID)
+#define JMR3927_IRQ_IOC_MODEM (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_MODEM)
+#define JMR3927_IRQ_IOC_INT6 (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_INT6)
+#define JMR3927_IRQ_IOC_INT7 (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_INT7)
+#define JMR3927_IRQ_IOC_SOFT (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_SOFT)
+
+/* IOC (PCI, MODEM) */
+#define JMR3927_IRQ_IOCINT JMR3927_IRQ_IRC_INT1
+/* TC35815 100M Ether (JMR-TX3912:JPW4:2-3 Short) */
+#define JMR3927_IRQ_ETHER0 JMR3927_IRQ_IRC_INT3
+
+/* Clocks */
+#define JMR3927_CORECLK 132710400 /* 132.7MHz */
+
+/*
+ * TX3927 Pin Configuration:
+ *
+ * PCFG bits Avail Dead
+ * SELSIO[1:0]:11 RXD[1:0], TXD[1:0] PIO[6:3]
+ * SELSIOC[0]:1 CTS[0], RTS[0] INT[5:4]
+ * SELSIOC[1]:0,SELDSF:0, GSDAO[0],GPCST[3] CTS[1], RTS[1],DSF,
+ * GDBGE* PIO[2:1]
+ * SELDMA[2]:1 DMAREQ[2],DMAACK[2] PIO[13:12]
+ * SELTMR[2:0]:000 TIMER[1:0]
+ * SELCS:0,SELDMA[1]:0 PIO[11;10] SDCS_CE[7:6],
+ * DMAREQ[1],DMAACK[1]
+ * SELDMA[0]:1 DMAREQ[0],DMAACK[0] PIO[9:8]
+ * SELDMA[3]:1 DMAREQ[3],DMAACK[3] PIO[15:14]
+ * SELDONE:1 DMADONE PIO[7]
+ *
+ * Usable pins are:
+ * RXD[1;0],TXD[1:0],CTS[0],RTS[0],
+ * DMAREQ[0,2,3],DMAACK[0,2,3],DMADONE,PIO[0,10,11]
+ * INT[3:0]
+ */
+
+void jmr3927_prom_init(void);
+void jmr3927_irq_setup(void);
+struct pci_dev;
+int jmr3927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
+
+#endif /* __ASM_TXX9_JMR3927_H */
diff --git a/arch/mips/include/asm/txx9/pci.h b/arch/mips/include/asm/txx9/pci.h
new file mode 100644
index 000000000..3d3252906
--- /dev/null
+++ b/arch/mips/include/asm/txx9/pci.h
@@ -0,0 +1,39 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_TXX9_PCI_H
+#define __ASM_TXX9_PCI_H
+
+#include <linux/pci.h>
+
+extern struct pci_controller txx9_primary_pcic;
+struct pci_controller *
+txx9_alloc_pci_controller(struct pci_controller *pcic,
+ unsigned long mem_base, unsigned long mem_size,
+ unsigned long io_base, unsigned long io_size);
+
+int txx9_pci66_check(struct pci_controller *hose, int top_bus,
+ int current_bus);
+extern int txx9_pci_mem_high __initdata;
+
+extern int txx9_pci_option;
+#define TXX9_PCI_OPT_PICMG 0x0002
+#define TXX9_PCI_OPT_CLK_33 0x0008
+#define TXX9_PCI_OPT_CLK_66 0x0010
+#define TXX9_PCI_OPT_CLK_MASK \
+ (TXX9_PCI_OPT_CLK_33 | TXX9_PCI_OPT_CLK_66)
+#define TXX9_PCI_OPT_CLK_AUTO TXX9_PCI_OPT_CLK_MASK
+
+enum txx9_pci_err_action {
+ TXX9_PCI_ERR_REPORT,
+ TXX9_PCI_ERR_IGNORE,
+ TXX9_PCI_ERR_PANIC,
+};
+extern enum txx9_pci_err_action txx9_pci_err_action;
+
+extern char * (*txx9_board_pcibios_setup)(char *str);
+char *txx9_pcibios_setup(char *str);
+
+#endif /* __ASM_TXX9_PCI_H */
diff --git a/arch/mips/include/asm/txx9/rbtx4927.h b/arch/mips/include/asm/txx9/rbtx4927.h
new file mode 100644
index 000000000..4060ad26c
--- /dev/null
+++ b/arch/mips/include/asm/txx9/rbtx4927.h
@@ -0,0 +1,92 @@
+/*
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * Copyright 2001-2002 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __ASM_TXX9_RBTX4927_H
+#define __ASM_TXX9_RBTX4927_H
+
+#include <asm/txx9/tx4927.h>
+
+#define RBTX4927_PCIMEM 0x08000000
+#define RBTX4927_PCIMEM_SIZE 0x08000000
+#define RBTX4927_PCIIO 0x16000000
+#define RBTX4927_PCIIO_SIZE 0x01000000
+
+#define RBTX4927_LED_ADDR (IO_BASE + TXX9_CE(2) + 0x00001000)
+#define RBTX4927_IMASK_ADDR (IO_BASE + TXX9_CE(2) + 0x00002000)
+#define RBTX4927_IMSTAT_ADDR (IO_BASE + TXX9_CE(2) + 0x00002006)
+#define RBTX4927_SOFTINT_ADDR (IO_BASE + TXX9_CE(2) + 0x00003000)
+#define RBTX4927_SOFTRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x0000f000)
+#define RBTX4927_SOFTRESETLOCK_ADDR (IO_BASE + TXX9_CE(2) + 0x0000f002)
+#define RBTX4927_PCIRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x0000f006)
+#define RBTX4927_BRAMRTC_BASE (IO_BASE + TXX9_CE(2) + 0x00010000)
+#define RBTX4927_ETHER_BASE (IO_BASE + TXX9_CE(2) + 0x00020000)
+
+/* Ethernet port address */
+#define RBTX4927_ETHER_ADDR (RBTX4927_ETHER_BASE + 0x280)
+
+#define rbtx4927_imask_addr ((__u8 __iomem *)RBTX4927_IMASK_ADDR)
+#define rbtx4927_imstat_addr ((__u8 __iomem *)RBTX4927_IMSTAT_ADDR)
+#define rbtx4927_softint_addr ((__u8 __iomem *)RBTX4927_SOFTINT_ADDR)
+#define rbtx4927_softreset_addr ((__u8 __iomem *)RBTX4927_SOFTRESET_ADDR)
+#define rbtx4927_softresetlock_addr \
+ ((__u8 __iomem *)RBTX4927_SOFTRESETLOCK_ADDR)
+#define rbtx4927_pcireset_addr ((__u8 __iomem *)RBTX4927_PCIRESET_ADDR)
+
+/* bits for ISTAT/IMASK/IMSTAT */
+#define RBTX4927_INTB_PCID 0
+#define RBTX4927_INTB_PCIC 1
+#define RBTX4927_INTB_PCIB 2
+#define RBTX4927_INTB_PCIA 3
+#define RBTX4927_INTF_PCID (1 << RBTX4927_INTB_PCID)
+#define RBTX4927_INTF_PCIC (1 << RBTX4927_INTB_PCIC)
+#define RBTX4927_INTF_PCIB (1 << RBTX4927_INTB_PCIB)
+#define RBTX4927_INTF_PCIA (1 << RBTX4927_INTB_PCIA)
+
+#define RBTX4927_NR_IRQ_IOC 8 /* IOC */
+
+#define RBTX4927_IRQ_IOC (TXX9_IRQ_BASE + TX4927_NUM_IR)
+#define RBTX4927_IRQ_IOC_PCID (RBTX4927_IRQ_IOC + RBTX4927_INTB_PCID)
+#define RBTX4927_IRQ_IOC_PCIC (RBTX4927_IRQ_IOC + RBTX4927_INTB_PCIC)
+#define RBTX4927_IRQ_IOC_PCIB (RBTX4927_IRQ_IOC + RBTX4927_INTB_PCIB)
+#define RBTX4927_IRQ_IOC_PCIA (RBTX4927_IRQ_IOC + RBTX4927_INTB_PCIA)
+
+#define RBTX4927_IRQ_IOCINT (TXX9_IRQ_BASE + TX4927_IR_INT(1))
+
+#ifdef CONFIG_PCI
+#define RBTX4927_ISA_IO_OFFSET RBTX4927_PCIIO
+#else
+#define RBTX4927_ISA_IO_OFFSET 0
+#endif
+
+#define RBTX4927_RTL_8019_BASE (RBTX4927_ETHER_ADDR - mips_io_port_base)
+#define RBTX4927_RTL_8019_IRQ (TXX9_IRQ_BASE + TX4927_IR_INT(3))
+
+void rbtx4927_prom_init(void);
+void rbtx4927_irq_setup(void);
+struct pci_dev;
+int rbtx4927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
+
+#endif /* __ASM_TXX9_RBTX4927_H */
diff --git a/arch/mips/include/asm/txx9/rbtx4938.h b/arch/mips/include/asm/txx9/rbtx4938.h
new file mode 100644
index 000000000..9c969dd3c
--- /dev/null
+++ b/arch/mips/include/asm/txx9/rbtx4938.h
@@ -0,0 +1,145 @@
+/*
+ * Definitions for TX4937/TX4938
+ *
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
+ */
+#ifndef __ASM_TXX9_RBTX4938_H
+#define __ASM_TXX9_RBTX4938_H
+
+#include <asm/addrspace.h>
+#include <asm/txx9irq.h>
+#include <asm/txx9/tx4938.h>
+
+/* Address map */
+#define RBTX4938_FPGA_REG_ADDR (IO_BASE + TXX9_CE(2) + 0x00000000)
+#define RBTX4938_FPGA_REV_ADDR (IO_BASE + TXX9_CE(2) + 0x00000002)
+#define RBTX4938_CONFIG1_ADDR (IO_BASE + TXX9_CE(2) + 0x00000004)
+#define RBTX4938_CONFIG2_ADDR (IO_BASE + TXX9_CE(2) + 0x00000006)
+#define RBTX4938_CONFIG3_ADDR (IO_BASE + TXX9_CE(2) + 0x00000008)
+#define RBTX4938_LED_ADDR (IO_BASE + TXX9_CE(2) + 0x00001000)
+#define RBTX4938_DIPSW_ADDR (IO_BASE + TXX9_CE(2) + 0x00001002)
+#define RBTX4938_BDIPSW_ADDR (IO_BASE + TXX9_CE(2) + 0x00001004)
+#define RBTX4938_IMASK_ADDR (IO_BASE + TXX9_CE(2) + 0x00002000)
+#define RBTX4938_IMASK2_ADDR (IO_BASE + TXX9_CE(2) + 0x00002002)
+#define RBTX4938_INTPOL_ADDR (IO_BASE + TXX9_CE(2) + 0x00002004)
+#define RBTX4938_ISTAT_ADDR (IO_BASE + TXX9_CE(2) + 0x00002006)
+#define RBTX4938_ISTAT2_ADDR (IO_BASE + TXX9_CE(2) + 0x00002008)
+#define RBTX4938_IMSTAT_ADDR (IO_BASE + TXX9_CE(2) + 0x0000200a)
+#define RBTX4938_IMSTAT2_ADDR (IO_BASE + TXX9_CE(2) + 0x0000200c)
+#define RBTX4938_SOFTINT_ADDR (IO_BASE + TXX9_CE(2) + 0x00003000)
+#define RBTX4938_PIOSEL_ADDR (IO_BASE + TXX9_CE(2) + 0x00005000)
+#define RBTX4938_SPICS_ADDR (IO_BASE + TXX9_CE(2) + 0x00005002)
+#define RBTX4938_SFPWR_ADDR (IO_BASE + TXX9_CE(2) + 0x00005008)
+#define RBTX4938_SFVOL_ADDR (IO_BASE + TXX9_CE(2) + 0x0000500a)
+#define RBTX4938_SOFTRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x00007000)
+#define RBTX4938_SOFTRESETLOCK_ADDR (IO_BASE + TXX9_CE(2) + 0x00007002)
+#define RBTX4938_PCIRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x00007004)
+#define RBTX4938_ETHER_BASE (IO_BASE + TXX9_CE(2) + 0x00020000)
+
+/* Ethernet port address (Jumperless Mode (W12:Open)) */
+#define RBTX4938_ETHER_ADDR (RBTX4938_ETHER_BASE + 0x280)
+
+/* bits for ISTAT/IMASK/IMSTAT */
+#define RBTX4938_INTB_PCID 0
+#define RBTX4938_INTB_PCIC 1
+#define RBTX4938_INTB_PCIB 2
+#define RBTX4938_INTB_PCIA 3
+#define RBTX4938_INTB_RTC 4
+#define RBTX4938_INTB_ATA 5
+#define RBTX4938_INTB_MODEM 6
+#define RBTX4938_INTB_SWINT 7
+#define RBTX4938_INTF_PCID (1 << RBTX4938_INTB_PCID)
+#define RBTX4938_INTF_PCIC (1 << RBTX4938_INTB_PCIC)
+#define RBTX4938_INTF_PCIB (1 << RBTX4938_INTB_PCIB)
+#define RBTX4938_INTF_PCIA (1 << RBTX4938_INTB_PCIA)
+#define RBTX4938_INTF_RTC (1 << RBTX4938_INTB_RTC)
+#define RBTX4938_INTF_ATA (1 << RBTX4938_INTB_ATA)
+#define RBTX4938_INTF_MODEM (1 << RBTX4938_INTB_MODEM)
+#define RBTX4938_INTF_SWINT (1 << RBTX4938_INTB_SWINT)
+
+#define rbtx4938_fpga_rev_addr ((__u8 __iomem *)RBTX4938_FPGA_REV_ADDR)
+#define rbtx4938_led_addr ((__u8 __iomem *)RBTX4938_LED_ADDR)
+#define rbtx4938_dipsw_addr ((__u8 __iomem *)RBTX4938_DIPSW_ADDR)
+#define rbtx4938_bdipsw_addr ((__u8 __iomem *)RBTX4938_BDIPSW_ADDR)
+#define rbtx4938_imask_addr ((__u8 __iomem *)RBTX4938_IMASK_ADDR)
+#define rbtx4938_imask2_addr ((__u8 __iomem *)RBTX4938_IMASK2_ADDR)
+#define rbtx4938_intpol_addr ((__u8 __iomem *)RBTX4938_INTPOL_ADDR)
+#define rbtx4938_istat_addr ((__u8 __iomem *)RBTX4938_ISTAT_ADDR)
+#define rbtx4938_istat2_addr ((__u8 __iomem *)RBTX4938_ISTAT2_ADDR)
+#define rbtx4938_imstat_addr ((__u8 __iomem *)RBTX4938_IMSTAT_ADDR)
+#define rbtx4938_imstat2_addr ((__u8 __iomem *)RBTX4938_IMSTAT2_ADDR)
+#define rbtx4938_softint_addr ((__u8 __iomem *)RBTX4938_SOFTINT_ADDR)
+#define rbtx4938_piosel_addr ((__u8 __iomem *)RBTX4938_PIOSEL_ADDR)
+#define rbtx4938_spics_addr ((__u8 __iomem *)RBTX4938_SPICS_ADDR)
+#define rbtx4938_sfpwr_addr ((__u8 __iomem *)RBTX4938_SFPWR_ADDR)
+#define rbtx4938_sfvol_addr ((__u8 __iomem *)RBTX4938_SFVOL_ADDR)
+#define rbtx4938_softreset_addr ((__u8 __iomem *)RBTX4938_SOFTRESET_ADDR)
+#define rbtx4938_softresetlock_addr \
+ ((__u8 __iomem *)RBTX4938_SOFTRESETLOCK_ADDR)
+#define rbtx4938_pcireset_addr ((__u8 __iomem *)RBTX4938_PCIRESET_ADDR)
+
+/*
+ * IRQ mappings
+ */
+
+#define RBTX4938_SOFT_INT0 0 /* not used */
+#define RBTX4938_SOFT_INT1 1 /* not used */
+#define RBTX4938_IRC_INT 2
+#define RBTX4938_TIMER_INT 7
+
+/* These are the virtual IRQ numbers, we divide all IRQ's into
+ * 'spaces', the 'space' determines where and how to enable/disable
+ * that particular IRQ on an RBTX4938 machine. Add new 'spaces' as new
+ * IRQ hardware is supported.
+ */
+#define RBTX4938_NR_IRQ_IOC 8
+
+#define RBTX4938_IRQ_IRC TXX9_IRQ_BASE
+#define RBTX4938_IRQ_IOC (TXX9_IRQ_BASE + TX4938_NUM_IR)
+#define RBTX4938_IRQ_END (RBTX4938_IRQ_IOC + RBTX4938_NR_IRQ_IOC)
+
+#define RBTX4938_IRQ_IRC_ECCERR (RBTX4938_IRQ_IRC + TX4938_IR_ECCERR)
+#define RBTX4938_IRQ_IRC_WTOERR (RBTX4938_IRQ_IRC + TX4938_IR_WTOERR)
+#define RBTX4938_IRQ_IRC_INT(n) (RBTX4938_IRQ_IRC + TX4938_IR_INT(n))
+#define RBTX4938_IRQ_IRC_SIO(n) (RBTX4938_IRQ_IRC + TX4938_IR_SIO(n))
+#define RBTX4938_IRQ_IRC_DMA(ch, n) (RBTX4938_IRQ_IRC + TX4938_IR_DMA(ch, n))
+#define RBTX4938_IRQ_IRC_PIO (RBTX4938_IRQ_IRC + TX4938_IR_PIO)
+#define RBTX4938_IRQ_IRC_PDMAC (RBTX4938_IRQ_IRC + TX4938_IR_PDMAC)
+#define RBTX4938_IRQ_IRC_PCIC (RBTX4938_IRQ_IRC + TX4938_IR_PCIC)
+#define RBTX4938_IRQ_IRC_TMR(n) (RBTX4938_IRQ_IRC + TX4938_IR_TMR(n))
+#define RBTX4938_IRQ_IRC_NDFMC (RBTX4938_IRQ_IRC + TX4938_IR_NDFMC)
+#define RBTX4938_IRQ_IRC_PCIERR (RBTX4938_IRQ_IRC + TX4938_IR_PCIERR)
+#define RBTX4938_IRQ_IRC_PCIPME (RBTX4938_IRQ_IRC + TX4938_IR_PCIPME)
+#define RBTX4938_IRQ_IRC_ACLC (RBTX4938_IRQ_IRC + TX4938_IR_ACLC)
+#define RBTX4938_IRQ_IRC_ACLCPME (RBTX4938_IRQ_IRC + TX4938_IR_ACLCPME)
+#define RBTX4938_IRQ_IRC_PCIC1 (RBTX4938_IRQ_IRC + TX4938_IR_PCIC1)
+#define RBTX4938_IRQ_IRC_SPI (RBTX4938_IRQ_IRC + TX4938_IR_SPI)
+#define RBTX4938_IRQ_IOC_PCID (RBTX4938_IRQ_IOC + RBTX4938_INTB_PCID)
+#define RBTX4938_IRQ_IOC_PCIC (RBTX4938_IRQ_IOC + RBTX4938_INTB_PCIC)
+#define RBTX4938_IRQ_IOC_PCIB (RBTX4938_IRQ_IOC + RBTX4938_INTB_PCIB)
+#define RBTX4938_IRQ_IOC_PCIA (RBTX4938_IRQ_IOC + RBTX4938_INTB_PCIA)
+#define RBTX4938_IRQ_IOC_RTC (RBTX4938_IRQ_IOC + RBTX4938_INTB_RTC)
+#define RBTX4938_IRQ_IOC_ATA (RBTX4938_IRQ_IOC + RBTX4938_INTB_ATA)
+#define RBTX4938_IRQ_IOC_MODEM (RBTX4938_IRQ_IOC + RBTX4938_INTB_MODEM)
+#define RBTX4938_IRQ_IOC_SWINT (RBTX4938_IRQ_IOC + RBTX4938_INTB_SWINT)
+
+
+/* IOC (PCI, etc) */
+#define RBTX4938_IRQ_IOCINT (TXX9_IRQ_BASE + TX4938_IR_INT(0))
+/* Onboard 10M Ether */
+#define RBTX4938_IRQ_ETHER (TXX9_IRQ_BASE + TX4938_IR_INT(1))
+
+#define RBTX4938_RTL_8019_BASE (RBTX4938_ETHER_ADDR - mips_io_port_base)
+#define RBTX4938_RTL_8019_IRQ (RBTX4938_IRQ_ETHER)
+
+void rbtx4938_prom_init(void);
+void rbtx4938_irq_setup(void);
+struct pci_dev;
+int rbtx4938_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
+
+#endif /* __ASM_TXX9_RBTX4938_H */
diff --git a/arch/mips/include/asm/txx9/rbtx4939.h b/arch/mips/include/asm/txx9/rbtx4939.h
new file mode 100644
index 000000000..6157bfd90
--- /dev/null
+++ b/arch/mips/include/asm/txx9/rbtx4939.h
@@ -0,0 +1,142 @@
+/*
+ * Definitions for RBTX4939
+ *
+ * (C) Copyright TOSHIBA CORPORATION 2005-2006
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifndef __ASM_TXX9_RBTX4939_H
+#define __ASM_TXX9_RBTX4939_H
+
+#include <asm/addrspace.h>
+#include <asm/txx9irq.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/tx4939.h>
+
+/* Address map */
+#define RBTX4939_IOC_REG_ADDR (IO_BASE + TXX9_CE(1) + 0x00000000)
+#define RBTX4939_BOARD_REV_ADDR (IO_BASE + TXX9_CE(1) + 0x00000000)
+#define RBTX4939_IOC_REV_ADDR (IO_BASE + TXX9_CE(1) + 0x00000002)
+#define RBTX4939_CONFIG1_ADDR (IO_BASE + TXX9_CE(1) + 0x00000004)
+#define RBTX4939_CONFIG2_ADDR (IO_BASE + TXX9_CE(1) + 0x00000006)
+#define RBTX4939_CONFIG3_ADDR (IO_BASE + TXX9_CE(1) + 0x00000008)
+#define RBTX4939_CONFIG4_ADDR (IO_BASE + TXX9_CE(1) + 0x0000000a)
+#define RBTX4939_USTAT_ADDR (IO_BASE + TXX9_CE(1) + 0x00001000)
+#define RBTX4939_UDIPSW_ADDR (IO_BASE + TXX9_CE(1) + 0x00001002)
+#define RBTX4939_BDIPSW_ADDR (IO_BASE + TXX9_CE(1) + 0x00001004)
+#define RBTX4939_IEN_ADDR (IO_BASE + TXX9_CE(1) + 0x00002000)
+#define RBTX4939_IPOL_ADDR (IO_BASE + TXX9_CE(1) + 0x00002002)
+#define RBTX4939_IFAC1_ADDR (IO_BASE + TXX9_CE(1) + 0x00002004)
+#define RBTX4939_IFAC2_ADDR (IO_BASE + TXX9_CE(1) + 0x00002006)
+#define RBTX4939_SOFTINT_ADDR (IO_BASE + TXX9_CE(1) + 0x00003000)
+#define RBTX4939_ISASTAT_ADDR (IO_BASE + TXX9_CE(1) + 0x00004000)
+#define RBTX4939_PCISTAT_ADDR (IO_BASE + TXX9_CE(1) + 0x00004002)
+#define RBTX4939_ROME_ADDR (IO_BASE + TXX9_CE(1) + 0x00004004)
+#define RBTX4939_SPICS_ADDR (IO_BASE + TXX9_CE(1) + 0x00004006)
+#define RBTX4939_AUDI_ADDR (IO_BASE + TXX9_CE(1) + 0x00004008)
+#define RBTX4939_ISAGPIO_ADDR (IO_BASE + TXX9_CE(1) + 0x0000400a)
+#define RBTX4939_PE1_ADDR (IO_BASE + TXX9_CE(1) + 0x00005000)
+#define RBTX4939_PE2_ADDR (IO_BASE + TXX9_CE(1) + 0x00005002)
+#define RBTX4939_PE3_ADDR (IO_BASE + TXX9_CE(1) + 0x00005004)
+#define RBTX4939_VP_ADDR (IO_BASE + TXX9_CE(1) + 0x00005006)
+#define RBTX4939_VPRESET_ADDR (IO_BASE + TXX9_CE(1) + 0x00005008)
+#define RBTX4939_VPSOUT_ADDR (IO_BASE + TXX9_CE(1) + 0x0000500a)
+#define RBTX4939_VPSIN_ADDR (IO_BASE + TXX9_CE(1) + 0x0000500c)
+#define RBTX4939_7SEG_ADDR(s, ch) \
+ (IO_BASE + TXX9_CE(1) + 0x00006000 + (s) * 16 + ((ch) & 3) * 2)
+#define RBTX4939_SOFTRESET_ADDR (IO_BASE + TXX9_CE(1) + 0x00007000)
+#define RBTX4939_RESETEN_ADDR (IO_BASE + TXX9_CE(1) + 0x00007002)
+#define RBTX4939_RESETSTAT_ADDR (IO_BASE + TXX9_CE(1) + 0x00007004)
+#define RBTX4939_ETHER_BASE (IO_BASE + TXX9_CE(1) + 0x00020000)
+
+/* Ethernet port address */
+#define RBTX4939_ETHER_ADDR (RBTX4939_ETHER_BASE + 0x300)
+
+/* bits for IEN/IPOL/IFAC */
+#define RBTX4938_INTB_ISA0 0
+#define RBTX4938_INTB_ISA11 1
+#define RBTX4938_INTB_ISA12 2
+#define RBTX4938_INTB_ISA15 3
+#define RBTX4938_INTB_I2S 4
+#define RBTX4938_INTB_SW 5
+#define RBTX4938_INTF_ISA0 (1 << RBTX4938_INTB_ISA0)
+#define RBTX4938_INTF_ISA11 (1 << RBTX4938_INTB_ISA11)
+#define RBTX4938_INTF_ISA12 (1 << RBTX4938_INTB_ISA12)
+#define RBTX4938_INTF_ISA15 (1 << RBTX4938_INTB_ISA15)
+#define RBTX4938_INTF_I2S (1 << RBTX4938_INTB_I2S)
+#define RBTX4938_INTF_SW (1 << RBTX4938_INTB_SW)
+
+/* bits for PE1,PE2,PE3 */
+#define RBTX4939_PE1_ATA(ch) (0x01 << (ch))
+#define RBTX4939_PE1_RMII(ch) (0x04 << (ch))
+#define RBTX4939_PE2_SIO0 0x01
+#define RBTX4939_PE2_SIO2 0x02
+#define RBTX4939_PE2_SIO3 0x04
+#define RBTX4939_PE2_CIR 0x08
+#define RBTX4939_PE2_SPI 0x10
+#define RBTX4939_PE2_GPIO 0x20
+#define RBTX4939_PE3_VP 0x01
+#define RBTX4939_PE3_VP_P 0x02
+#define RBTX4939_PE3_VP_S 0x04
+
+#define rbtx4939_board_rev_addr ((u8 __iomem *)RBTX4939_BOARD_REV_ADDR)
+#define rbtx4939_ioc_rev_addr ((u8 __iomem *)RBTX4939_IOC_REV_ADDR)
+#define rbtx4939_config1_addr ((u8 __iomem *)RBTX4939_CONFIG1_ADDR)
+#define rbtx4939_config2_addr ((u8 __iomem *)RBTX4939_CONFIG2_ADDR)
+#define rbtx4939_config3_addr ((u8 __iomem *)RBTX4939_CONFIG3_ADDR)
+#define rbtx4939_config4_addr ((u8 __iomem *)RBTX4939_CONFIG4_ADDR)
+#define rbtx4939_ustat_addr ((u8 __iomem *)RBTX4939_USTAT_ADDR)
+#define rbtx4939_udipsw_addr ((u8 __iomem *)RBTX4939_UDIPSW_ADDR)
+#define rbtx4939_bdipsw_addr ((u8 __iomem *)RBTX4939_BDIPSW_ADDR)
+#define rbtx4939_ien_addr ((u8 __iomem *)RBTX4939_IEN_ADDR)
+#define rbtx4939_ipol_addr ((u8 __iomem *)RBTX4939_IPOL_ADDR)
+#define rbtx4939_ifac1_addr ((u8 __iomem *)RBTX4939_IFAC1_ADDR)
+#define rbtx4939_ifac2_addr ((u8 __iomem *)RBTX4939_IFAC2_ADDR)
+#define rbtx4939_softint_addr ((u8 __iomem *)RBTX4939_SOFTINT_ADDR)
+#define rbtx4939_isastat_addr ((u8 __iomem *)RBTX4939_ISASTAT_ADDR)
+#define rbtx4939_pcistat_addr ((u8 __iomem *)RBTX4939_PCISTAT_ADDR)
+#define rbtx4939_rome_addr ((u8 __iomem *)RBTX4939_ROME_ADDR)
+#define rbtx4939_spics_addr ((u8 __iomem *)RBTX4939_SPICS_ADDR)
+#define rbtx4939_audi_addr ((u8 __iomem *)RBTX4939_AUDI_ADDR)
+#define rbtx4939_isagpio_addr ((u8 __iomem *)RBTX4939_ISAGPIO_ADDR)
+#define rbtx4939_pe1_addr ((u8 __iomem *)RBTX4939_PE1_ADDR)
+#define rbtx4939_pe2_addr ((u8 __iomem *)RBTX4939_PE2_ADDR)
+#define rbtx4939_pe3_addr ((u8 __iomem *)RBTX4939_PE3_ADDR)
+#define rbtx4939_vp_addr ((u8 __iomem *)RBTX4939_VP_ADDR)
+#define rbtx4939_vpreset_addr ((u8 __iomem *)RBTX4939_VPRESET_ADDR)
+#define rbtx4939_vpsout_addr ((u8 __iomem *)RBTX4939_VPSOUT_ADDR)
+#define rbtx4939_vpsin_addr ((u8 __iomem *)RBTX4939_VPSIN_ADDR)
+#define rbtx4939_7seg_addr(s, ch) \
+ ((u8 __iomem *)RBTX4939_7SEG_ADDR(s, ch))
+#define rbtx4939_softreset_addr ((u8 __iomem *)RBTX4939_SOFTRESET_ADDR)
+#define rbtx4939_reseten_addr ((u8 __iomem *)RBTX4939_RESETEN_ADDR)
+#define rbtx4939_resetstat_addr ((u8 __iomem *)RBTX4939_RESETSTAT_ADDR)
+
+/*
+ * IRQ mappings
+ */
+#define RBTX4939_NR_IRQ_IOC 8
+
+#define RBTX4939_IRQ_IOC (TXX9_IRQ_BASE + TX4939_NUM_IR)
+#define RBTX4939_IRQ_END (RBTX4939_IRQ_IOC + RBTX4939_NR_IRQ_IOC)
+
+/* IOC (ISA, etc) */
+#define RBTX4939_IRQ_IOCINT (TXX9_IRQ_BASE + TX4939_IR_INT(0))
+/* Onboard 10M Ether */
+#define RBTX4939_IRQ_ETHER (TXX9_IRQ_BASE + TX4939_IR_INT(1))
+
+void rbtx4939_prom_init(void);
+void rbtx4939_irq_setup(void);
+
+struct mtd_partition;
+struct map_info;
+struct rbtx4939_flash_data {
+ unsigned int width;
+ unsigned int nr_parts;
+ struct mtd_partition *parts;
+ void (*map_init)(struct map_info *map);
+};
+
+#endif /* __ASM_TXX9_RBTX4939_H */
diff --git a/arch/mips/include/asm/txx9/smsc_fdc37m81x.h b/arch/mips/include/asm/txx9/smsc_fdc37m81x.h
new file mode 100644
index 000000000..926d08f18
--- /dev/null
+++ b/arch/mips/include/asm/txx9/smsc_fdc37m81x.h
@@ -0,0 +1,68 @@
+/*
+ * Interface for smsc fdc48m81x Super IO chip
+ *
+ * Author: MontaVista Software, Inc. source@mvista.com
+ *
+ * 2001-2003 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Copyright (C) 2004 MontaVista Software Inc.
+ * Manish Lachwani, mlachwani@mvista.com
+ */
+
+#ifndef _SMSC_FDC37M81X_H_
+#define _SMSC_FDC37M81X_H_
+
+/* Common Registers */
+#define SMSC_FDC37M81X_CONFIG_INDEX 0x00
+#define SMSC_FDC37M81X_CONFIG_DATA 0x01
+#define SMSC_FDC37M81X_CONF 0x02
+#define SMSC_FDC37M81X_INDEX 0x03
+#define SMSC_FDC37M81X_DNUM 0x07
+#define SMSC_FDC37M81X_DID 0x20
+#define SMSC_FDC37M81X_DREV 0x21
+#define SMSC_FDC37M81X_PCNT 0x22
+#define SMSC_FDC37M81X_PMGT 0x23
+#define SMSC_FDC37M81X_OSC 0x24
+#define SMSC_FDC37M81X_CONFPA0 0x26
+#define SMSC_FDC37M81X_CONFPA1 0x27
+#define SMSC_FDC37M81X_TEST4 0x2B
+#define SMSC_FDC37M81X_TEST5 0x2C
+#define SMSC_FDC37M81X_TEST1 0x2D
+#define SMSC_FDC37M81X_TEST2 0x2E
+#define SMSC_FDC37M81X_TEST3 0x2F
+
+/* Logical device numbers */
+#define SMSC_FDC37M81X_FDD 0x00
+#define SMSC_FDC37M81X_PARALLEL 0x03
+#define SMSC_FDC37M81X_SERIAL1 0x04
+#define SMSC_FDC37M81X_SERIAL2 0x05
+#define SMSC_FDC37M81X_KBD 0x07
+#define SMSC_FDC37M81X_AUXIO 0x08
+#define SMSC_FDC37M81X_NONE 0xff
+
+/* Logical device Config Registers */
+#define SMSC_FDC37M81X_ACTIVE 0x30
+#define SMSC_FDC37M81X_BASEADDR0 0x60
+#define SMSC_FDC37M81X_BASEADDR1 0x61
+#define SMSC_FDC37M81X_INT 0x70
+#define SMSC_FDC37M81X_INT2 0x72
+#define SMSC_FDC37M81X_LDCR_F0 0xF0
+
+/* Chip Config Values */
+#define SMSC_FDC37M81X_CONFIG_ENTER 0x55
+#define SMSC_FDC37M81X_CONFIG_EXIT 0xaa
+#define SMSC_FDC37M81X_CHIP_ID 0x4d
+
+unsigned long smsc_fdc37m81x_init(unsigned long port);
+
+void smsc_fdc37m81x_config_beg(void);
+
+void smsc_fdc37m81x_config_end(void);
+
+u8 smsc_fdc37m81x_config_get(u8 reg);
+void smsc_fdc37m81x_config_set(u8 reg, u8 val);
+
+#endif
diff --git a/arch/mips/include/asm/txx9/spi.h b/arch/mips/include/asm/txx9/spi.h
new file mode 100644
index 000000000..0d727f354
--- /dev/null
+++ b/arch/mips/include/asm/txx9/spi.h
@@ -0,0 +1,34 @@
+/*
+ * Definitions for TX4937/TX4938 SPI
+ *
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
+ */
+#ifndef __ASM_TXX9_SPI_H
+#define __ASM_TXX9_SPI_H
+
+#include <linux/errno.h>
+
+#ifdef CONFIG_SPI
+int spi_eeprom_register(int busid, int chipid, int size);
+int spi_eeprom_read(int busid, int chipid,
+ int address, unsigned char *buf, int len);
+#else
+static inline int spi_eeprom_register(int busid, int chipid, int size)
+{
+ return -ENODEV;
+}
+static inline int spi_eeprom_read(int busid, int chipid,
+ int address, unsigned char *buf, int len)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __ASM_TXX9_SPI_H */
diff --git a/arch/mips/include/asm/txx9/tx3927.h b/arch/mips/include/asm/txx9/tx3927.h
new file mode 100644
index 000000000..149fab4f8
--- /dev/null
+++ b/arch/mips/include/asm/txx9/tx3927.h
@@ -0,0 +1,341 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Toshiba Corporation
+ */
+#ifndef __ASM_TXX9_TX3927_H
+#define __ASM_TXX9_TX3927_H
+
+#define TX3927_REG_BASE 0xfffe0000UL
+#define TX3927_REG_SIZE 0x00010000
+#define TX3927_SDRAMC_REG (TX3927_REG_BASE + 0x8000)
+#define TX3927_ROMC_REG (TX3927_REG_BASE + 0x9000)
+#define TX3927_DMA_REG (TX3927_REG_BASE + 0xb000)
+#define TX3927_IRC_REG (TX3927_REG_BASE + 0xc000)
+#define TX3927_PCIC_REG (TX3927_REG_BASE + 0xd000)
+#define TX3927_CCFG_REG (TX3927_REG_BASE + 0xe000)
+#define TX3927_NR_TMR 3
+#define TX3927_TMR_REG(ch) (TX3927_REG_BASE + 0xf000 + (ch) * 0x100)
+#define TX3927_NR_SIO 2
+#define TX3927_SIO_REG(ch) (TX3927_REG_BASE + 0xf300 + (ch) * 0x100)
+#define TX3927_PIO_REG (TX3927_REG_BASE + 0xf500)
+
+struct tx3927_sdramc_reg {
+ volatile unsigned long cr[8];
+ volatile unsigned long tr[3];
+ volatile unsigned long cmd;
+ volatile unsigned long smrs[2];
+};
+
+struct tx3927_romc_reg {
+ volatile unsigned long cr[8];
+};
+
+struct tx3927_dma_reg {
+ struct tx3927_dma_ch_reg {
+ volatile unsigned long cha;
+ volatile unsigned long sar;
+ volatile unsigned long dar;
+ volatile unsigned long cntr;
+ volatile unsigned long sair;
+ volatile unsigned long dair;
+ volatile unsigned long ccr;
+ volatile unsigned long csr;
+ } ch[4];
+ volatile unsigned long dbr[8];
+ volatile unsigned long tdhr;
+ volatile unsigned long mcr;
+ volatile unsigned long unused0;
+};
+
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
+#define endian_def_s2(e1, e2) \
+ volatile unsigned short e1, e2
+#define endian_def_sb2(e1, e2, e3) \
+ volatile unsigned short e1;volatile unsigned char e2, e3
+#define endian_def_b2s(e1, e2, e3) \
+ volatile unsigned char e1, e2;volatile unsigned short e3
+#define endian_def_b4(e1, e2, e3, e4) \
+ volatile unsigned char e1, e2, e3, e4
+#else
+#define endian_def_s2(e1, e2) \
+ volatile unsigned short e2, e1
+#define endian_def_sb2(e1, e2, e3) \
+ volatile unsigned char e3, e2;volatile unsigned short e1
+#define endian_def_b2s(e1, e2, e3) \
+ volatile unsigned short e3;volatile unsigned char e2, e1
+#define endian_def_b4(e1, e2, e3, e4) \
+ volatile unsigned char e4, e3, e2, e1
+#endif
+
+struct tx3927_pcic_reg {
+ endian_def_s2(did, vid);
+ endian_def_s2(pcistat, pcicmd);
+ endian_def_b4(cc, scc, rpli, rid);
+ endian_def_b4(unused0, ht, mlt, cls);
+ volatile unsigned long ioba; /* +10 */
+ volatile unsigned long mba;
+ volatile unsigned long unused1[5];
+ endian_def_s2(svid, ssvid);
+ volatile unsigned long unused2; /* +30 */
+ endian_def_sb2(unused3, unused4, capptr);
+ volatile unsigned long unused5;
+ endian_def_b4(ml, mg, ip, il);
+ volatile unsigned long unused6; /* +40 */
+ volatile unsigned long istat;
+ volatile unsigned long iim;
+ volatile unsigned long rrt;
+ volatile unsigned long unused7[3]; /* +50 */
+ volatile unsigned long ipbmma;
+ volatile unsigned long ipbioma; /* +60 */
+ volatile unsigned long ilbmma;
+ volatile unsigned long ilbioma;
+ volatile unsigned long unused8[9];
+ volatile unsigned long tc; /* +90 */
+ volatile unsigned long tstat;
+ volatile unsigned long tim;
+ volatile unsigned long tccmd;
+ volatile unsigned long pcirrt; /* +a0 */
+ volatile unsigned long pcirrt_cmd;
+ volatile unsigned long pcirrdt;
+ volatile unsigned long unused9[3];
+ volatile unsigned long tlboap;
+ volatile unsigned long tlbiap;
+ volatile unsigned long tlbmma; /* +c0 */
+ volatile unsigned long tlbioma;
+ volatile unsigned long sc_msg;
+ volatile unsigned long sc_be;
+ volatile unsigned long tbl; /* +d0 */
+ volatile unsigned long unused10[3];
+ volatile unsigned long pwmng; /* +e0 */
+ volatile unsigned long pwmngs;
+ volatile unsigned long unused11[6];
+ volatile unsigned long req_trace; /* +100 */
+ volatile unsigned long pbapmc;
+ volatile unsigned long pbapms;
+ volatile unsigned long pbapmim;
+ volatile unsigned long bm; /* +110 */
+ volatile unsigned long cpcibrs;
+ volatile unsigned long cpcibgs;
+ volatile unsigned long pbacs;
+ volatile unsigned long iobas; /* +120 */
+ volatile unsigned long mbas;
+ volatile unsigned long lbc;
+ volatile unsigned long lbstat;
+ volatile unsigned long lbim; /* +130 */
+ volatile unsigned long pcistatim;
+ volatile unsigned long ica;
+ volatile unsigned long icd;
+ volatile unsigned long iiadp; /* +140 */
+ volatile unsigned long iscdp;
+ volatile unsigned long mmas;
+ volatile unsigned long iomas;
+ volatile unsigned long ipciaddr; /* +150 */
+ volatile unsigned long ipcidata;
+ volatile unsigned long ipcibe;
+};
+
+struct tx3927_ccfg_reg {
+ volatile unsigned long ccfg;
+ volatile unsigned long crir;
+ volatile unsigned long pcfg;
+ volatile unsigned long tear;
+ volatile unsigned long pdcr;
+};
+
+/*
+ * SDRAMC
+ */
+
+/*
+ * ROMC
+ */
+
+/*
+ * DMA
+ */
+/* bits for MCR */
+#define TX3927_DMA_MCR_EIS(ch) (0x10000000<<(ch))
+#define TX3927_DMA_MCR_DIS(ch) (0x01000000<<(ch))
+#define TX3927_DMA_MCR_RSFIF 0x00000080
+#define TX3927_DMA_MCR_FIFUM(ch) (0x00000008<<(ch))
+#define TX3927_DMA_MCR_LE 0x00000004
+#define TX3927_DMA_MCR_RPRT 0x00000002
+#define TX3927_DMA_MCR_MSTEN 0x00000001
+
+/* bits for CCRn */
+#define TX3927_DMA_CCR_DBINH 0x04000000
+#define TX3927_DMA_CCR_SBINH 0x02000000
+#define TX3927_DMA_CCR_CHRST 0x01000000
+#define TX3927_DMA_CCR_RVBYTE 0x00800000
+#define TX3927_DMA_CCR_ACKPOL 0x00400000
+#define TX3927_DMA_CCR_REQPL 0x00200000
+#define TX3927_DMA_CCR_EGREQ 0x00100000
+#define TX3927_DMA_CCR_CHDN 0x00080000
+#define TX3927_DMA_CCR_DNCTL 0x00060000
+#define TX3927_DMA_CCR_EXTRQ 0x00010000
+#define TX3927_DMA_CCR_INTRQD 0x0000e000
+#define TX3927_DMA_CCR_INTENE 0x00001000
+#define TX3927_DMA_CCR_INTENC 0x00000800
+#define TX3927_DMA_CCR_INTENT 0x00000400
+#define TX3927_DMA_CCR_CHNEN 0x00000200
+#define TX3927_DMA_CCR_XFACT 0x00000100
+#define TX3927_DMA_CCR_SNOP 0x00000080
+#define TX3927_DMA_CCR_DSTINC 0x00000040
+#define TX3927_DMA_CCR_SRCINC 0x00000020
+#define TX3927_DMA_CCR_XFSZ(order) (((order) << 2) & 0x0000001c)
+#define TX3927_DMA_CCR_XFSZ_1W TX3927_DMA_CCR_XFSZ(2)
+#define TX3927_DMA_CCR_XFSZ_4W TX3927_DMA_CCR_XFSZ(4)
+#define TX3927_DMA_CCR_XFSZ_8W TX3927_DMA_CCR_XFSZ(5)
+#define TX3927_DMA_CCR_XFSZ_16W TX3927_DMA_CCR_XFSZ(6)
+#define TX3927_DMA_CCR_XFSZ_32W TX3927_DMA_CCR_XFSZ(7)
+#define TX3927_DMA_CCR_MEMIO 0x00000002
+#define TX3927_DMA_CCR_ONEAD 0x00000001
+
+/* bits for CSRn */
+#define TX3927_DMA_CSR_CHNACT 0x00000100
+#define TX3927_DMA_CSR_ABCHC 0x00000080
+#define TX3927_DMA_CSR_NCHNC 0x00000040
+#define TX3927_DMA_CSR_NTRNFC 0x00000020
+#define TX3927_DMA_CSR_EXTDN 0x00000010
+#define TX3927_DMA_CSR_CFERR 0x00000008
+#define TX3927_DMA_CSR_CHERR 0x00000004
+#define TX3927_DMA_CSR_DESERR 0x00000002
+#define TX3927_DMA_CSR_SORERR 0x00000001
+
+/*
+ * IRC
+ */
+#define TX3927_IR_INT0 0
+#define TX3927_IR_INT1 1
+#define TX3927_IR_INT2 2
+#define TX3927_IR_INT3 3
+#define TX3927_IR_INT4 4
+#define TX3927_IR_INT5 5
+#define TX3927_IR_SIO0 6
+#define TX3927_IR_SIO1 7
+#define TX3927_IR_SIO(ch) (6 + (ch))
+#define TX3927_IR_DMA 8
+#define TX3927_IR_PIO 9
+#define TX3927_IR_PCI 10
+#define TX3927_IR_TMR(ch) (13 + (ch))
+#define TX3927_NUM_IR 16
+
+/*
+ * PCIC
+ */
+/* bits for PCICMD */
+/* see PCI_COMMAND_XXX in linux/pci.h */
+
+/* bits for PCISTAT */
+/* see PCI_STATUS_XXX in linux/pci.h */
+#define PCI_STATUS_NEW_CAP 0x0010
+
+/* bits for ISTAT/IIM */
+#define TX3927_PCIC_IIM_ALL 0x00001600
+
+/* bits for TC */
+#define TX3927_PCIC_TC_OF16E 0x00000020
+#define TX3927_PCIC_TC_IF8E 0x00000010
+#define TX3927_PCIC_TC_OF8E 0x00000008
+
+/* bits for TSTAT/TIM */
+#define TX3927_PCIC_TIM_ALL 0x0003ffff
+
+/* bits for IOBA/MBA */
+/* see PCI_BASE_ADDRESS_XXX in linux/pci.h */
+
+/* bits for PBAPMC */
+#define TX3927_PCIC_PBAPMC_RPBA 0x00000004
+#define TX3927_PCIC_PBAPMC_PBAEN 0x00000002
+#define TX3927_PCIC_PBAPMC_BMCEN 0x00000001
+
+/* bits for LBSTAT/LBIM */
+#define TX3927_PCIC_LBIM_ALL 0x0000003e
+
+/* bits for PCISTATIM (see also PCI_STATUS_XXX in linux/pci.h */
+#define TX3927_PCIC_PCISTATIM_ALL 0x0000f900
+
+/* bits for LBC */
+#define TX3927_PCIC_LBC_IBSE 0x00004000
+#define TX3927_PCIC_LBC_TIBSE 0x00002000
+#define TX3927_PCIC_LBC_TMFBSE 0x00001000
+#define TX3927_PCIC_LBC_HRST 0x00000800
+#define TX3927_PCIC_LBC_SRST 0x00000400
+#define TX3927_PCIC_LBC_EPCAD 0x00000200
+#define TX3927_PCIC_LBC_MSDSE 0x00000100
+#define TX3927_PCIC_LBC_CRR 0x00000080
+#define TX3927_PCIC_LBC_ILMDE 0x00000040
+#define TX3927_PCIC_LBC_ILIDE 0x00000020
+
+#define TX3927_PCIC_IDSEL_AD_TO_SLOT(ad) ((ad) - 11)
+#define TX3927_PCIC_MAX_DEVNU TX3927_PCIC_IDSEL_AD_TO_SLOT(32)
+
+/*
+ * CCFG
+ */
+/* CCFG : Chip Configuration */
+#define TX3927_CCFG_TLBOFF 0x00020000
+#define TX3927_CCFG_BEOW 0x00010000
+#define TX3927_CCFG_WR 0x00008000
+#define TX3927_CCFG_TOE 0x00004000
+#define TX3927_CCFG_PCIXARB 0x00002000
+#define TX3927_CCFG_PCI3 0x00001000
+#define TX3927_CCFG_PSNP 0x00000800
+#define TX3927_CCFG_PPRI 0x00000400
+#define TX3927_CCFG_PLLM 0x00000030
+#define TX3927_CCFG_ENDIAN 0x00000004
+#define TX3927_CCFG_HALT 0x00000002
+#define TX3927_CCFG_ACEHOLD 0x00000001
+
+/* PCFG : Pin Configuration */
+#define TX3927_PCFG_SYSCLKEN 0x08000000
+#define TX3927_PCFG_SDRCLKEN_ALL 0x07c00000
+#define TX3927_PCFG_SDRCLKEN(ch) (0x00400000<<(ch))
+#define TX3927_PCFG_PCICLKEN_ALL 0x003c0000
+#define TX3927_PCFG_PCICLKEN(ch) (0x00040000<<(ch))
+#define TX3927_PCFG_SELALL 0x0003ffff
+#define TX3927_PCFG_SELCS 0x00020000
+#define TX3927_PCFG_SELDSF 0x00010000
+#define TX3927_PCFG_SELSIOC_ALL 0x0000c000
+#define TX3927_PCFG_SELSIOC(ch) (0x00004000<<(ch))
+#define TX3927_PCFG_SELSIO_ALL 0x00003000
+#define TX3927_PCFG_SELSIO(ch) (0x00001000<<(ch))
+#define TX3927_PCFG_SELTMR_ALL 0x00000e00
+#define TX3927_PCFG_SELTMR(ch) (0x00000200<<(ch))
+#define TX3927_PCFG_SELDONE 0x00000100
+#define TX3927_PCFG_INTDMA_ALL 0x000000f0
+#define TX3927_PCFG_INTDMA(ch) (0x00000010<<(ch))
+#define TX3927_PCFG_SELDMA_ALL 0x0000000f
+#define TX3927_PCFG_SELDMA(ch) (0x00000001<<(ch))
+
+#define tx3927_sdramcptr ((struct tx3927_sdramc_reg *)TX3927_SDRAMC_REG)
+#define tx3927_romcptr ((struct tx3927_romc_reg *)TX3927_ROMC_REG)
+#define tx3927_dmaptr ((struct tx3927_dma_reg *)TX3927_DMA_REG)
+#define tx3927_pcicptr ((struct tx3927_pcic_reg *)TX3927_PCIC_REG)
+#define tx3927_ccfgptr ((struct tx3927_ccfg_reg *)TX3927_CCFG_REG)
+#define tx3927_sioptr(ch) ((struct txx927_sio_reg *)TX3927_SIO_REG(ch))
+#define tx3927_pioptr ((struct txx9_pio_reg __iomem *)TX3927_PIO_REG)
+
+#define TX3927_REV_PCODE() (tx3927_ccfgptr->crir >> 16)
+#define TX3927_ROMC_BA(ch) (tx3927_romcptr->cr[(ch)] & 0xfff00000)
+#define TX3927_ROMC_SIZE(ch) \
+ (0x00100000 << ((tx3927_romcptr->cr[(ch)] >> 8) & 0xf))
+#define TX3927_ROMC_WIDTH(ch) (32 >> ((tx3927_romcptr->cr[(ch)] >> 7) & 0x1))
+
+void tx3927_wdt_init(void);
+void tx3927_setup(void);
+void tx3927_time_init(unsigned int evt_tmrnr, unsigned int src_tmrnr);
+void tx3927_sio_init(unsigned int sclk, unsigned int cts_mask);
+struct pci_controller;
+void tx3927_pcic_setup(struct pci_controller *channel,
+ unsigned long sdram_size, int extarb);
+void tx3927_setup_pcierr_irq(void);
+void tx3927_irq_init(void);
+void tx3927_mtd_init(int ch);
+
+#endif /* __ASM_TXX9_TX3927_H */
diff --git a/arch/mips/include/asm/txx9/tx4927.h b/arch/mips/include/asm/txx9/tx4927.h
new file mode 100644
index 000000000..284eea752
--- /dev/null
+++ b/arch/mips/include/asm/txx9/tx4927.h
@@ -0,0 +1,273 @@
+/*
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * Copyright 2001-2006 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __ASM_TXX9_TX4927_H
+#define __ASM_TXX9_TX4927_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <asm/txx9irq.h>
+#include <asm/txx9/tx4927pcic.h>
+
+#ifdef CONFIG_64BIT
+#define TX4927_REG_BASE 0xffffffffff1f0000UL
+#else
+#define TX4927_REG_BASE 0xff1f0000UL
+#endif
+#define TX4927_REG_SIZE 0x00010000
+
+#define TX4927_SDRAMC_REG (TX4927_REG_BASE + 0x8000)
+#define TX4927_EBUSC_REG (TX4927_REG_BASE + 0x9000)
+#define TX4927_DMA_REG (TX4927_REG_BASE + 0xb000)
+#define TX4927_PCIC_REG (TX4927_REG_BASE + 0xd000)
+#define TX4927_CCFG_REG (TX4927_REG_BASE + 0xe000)
+#define TX4927_IRC_REG (TX4927_REG_BASE + 0xf600)
+#define TX4927_NR_TMR 3
+#define TX4927_TMR_REG(ch) (TX4927_REG_BASE + 0xf000 + (ch) * 0x100)
+#define TX4927_NR_SIO 2
+#define TX4927_SIO_REG(ch) (TX4927_REG_BASE + 0xf300 + (ch) * 0x100)
+#define TX4927_PIO_REG (TX4927_REG_BASE + 0xf500)
+#define TX4927_ACLC_REG (TX4927_REG_BASE + 0xf700)
+
+#define TX4927_IR_ECCERR 0
+#define TX4927_IR_WTOERR 1
+#define TX4927_NUM_IR_INT 6
+#define TX4927_IR_INT(n) (2 + (n))
+#define TX4927_NUM_IR_SIO 2
+#define TX4927_IR_SIO(n) (8 + (n))
+#define TX4927_NUM_IR_DMA 4
+#define TX4927_IR_DMA(n) (10 + (n))
+#define TX4927_IR_PIO 14
+#define TX4927_IR_PDMAC 15
+#define TX4927_IR_PCIC 16
+#define TX4927_NUM_IR_TMR 3
+#define TX4927_IR_TMR(n) (17 + (n))
+#define TX4927_IR_PCIERR 22
+#define TX4927_IR_PCIPME 23
+#define TX4927_IR_ACLC 24
+#define TX4927_IR_ACLCPME 25
+#define TX4927_NUM_IR 32
+
+#define TX4927_IRC_INT 2 /* IP[2] in Status register */
+
+#define TX4927_NUM_PIO 16
+
+struct tx4927_sdramc_reg {
+ u64 cr[4];
+ u64 unused0[4];
+ u64 tr;
+ u64 unused1[2];
+ u64 cmd;
+};
+
+struct tx4927_ebusc_reg {
+ u64 cr[8];
+};
+
+struct tx4927_ccfg_reg {
+ u64 ccfg;
+ u64 crir;
+ u64 pcfg;
+ u64 toea;
+ u64 clkctr;
+ u64 unused0;
+ u64 garbc;
+ u64 unused1;
+ u64 unused2;
+ u64 ramp;
+};
+
+/*
+ * CCFG
+ */
+/* CCFG : Chip Configuration */
+#define TX4927_CCFG_WDRST 0x0000020000000000ULL
+#define TX4927_CCFG_WDREXEN 0x0000010000000000ULL
+#define TX4927_CCFG_BCFG_MASK 0x000000ff00000000ULL
+#define TX4927_CCFG_TINTDIS 0x01000000
+#define TX4927_CCFG_PCI66 0x00800000
+#define TX4927_CCFG_PCIMODE 0x00400000
+#define TX4927_CCFG_DIVMODE_MASK 0x000e0000
+#define TX4927_CCFG_DIVMODE_8 (0x0 << 17)
+#define TX4927_CCFG_DIVMODE_12 (0x1 << 17)
+#define TX4927_CCFG_DIVMODE_16 (0x2 << 17)
+#define TX4927_CCFG_DIVMODE_10 (0x3 << 17)
+#define TX4927_CCFG_DIVMODE_2 (0x4 << 17)
+#define TX4927_CCFG_DIVMODE_3 (0x5 << 17)
+#define TX4927_CCFG_DIVMODE_4 (0x6 << 17)
+#define TX4927_CCFG_DIVMODE_2_5 (0x7 << 17)
+#define TX4927_CCFG_BEOW 0x00010000
+#define TX4927_CCFG_WR 0x00008000
+#define TX4927_CCFG_TOE 0x00004000
+#define TX4927_CCFG_PCIARB 0x00002000
+#define TX4927_CCFG_PCIDIVMODE_MASK 0x00001800
+#define TX4927_CCFG_PCIDIVMODE_2_5 0x00000000
+#define TX4927_CCFG_PCIDIVMODE_3 0x00000800
+#define TX4927_CCFG_PCIDIVMODE_5 0x00001000
+#define TX4927_CCFG_PCIDIVMODE_6 0x00001800
+#define TX4927_CCFG_SYSSP_MASK 0x000000c0
+#define TX4927_CCFG_ENDIAN 0x00000004
+#define TX4927_CCFG_HALT 0x00000002
+#define TX4927_CCFG_ACEHOLD 0x00000001
+#define TX4927_CCFG_W1CBITS (TX4927_CCFG_WDRST | TX4927_CCFG_BEOW)
+
+/* PCFG : Pin Configuration */
+#define TX4927_PCFG_SDCLKDLY_MASK 0x30000000
+#define TX4927_PCFG_SDCLKDLY(d) ((d)<<28)
+#define TX4927_PCFG_SYSCLKEN 0x08000000
+#define TX4927_PCFG_SDCLKEN_ALL 0x07800000
+#define TX4927_PCFG_SDCLKEN(ch) (0x00800000<<(ch))
+#define TX4927_PCFG_PCICLKEN_ALL 0x003f0000
+#define TX4927_PCFG_PCICLKEN(ch) (0x00010000<<(ch))
+#define TX4927_PCFG_SEL2 0x00000200
+#define TX4927_PCFG_SEL1 0x00000100
+#define TX4927_PCFG_DMASEL_ALL 0x000000ff
+#define TX4927_PCFG_DMASEL0_MASK 0x00000003
+#define TX4927_PCFG_DMASEL1_MASK 0x0000000c
+#define TX4927_PCFG_DMASEL2_MASK 0x00000030
+#define TX4927_PCFG_DMASEL3_MASK 0x000000c0
+#define TX4927_PCFG_DMASEL0_DRQ0 0x00000000
+#define TX4927_PCFG_DMASEL0_SIO1 0x00000001
+#define TX4927_PCFG_DMASEL0_ACL0 0x00000002
+#define TX4927_PCFG_DMASEL0_ACL2 0x00000003
+#define TX4927_PCFG_DMASEL1_DRQ1 0x00000000
+#define TX4927_PCFG_DMASEL1_SIO1 0x00000004
+#define TX4927_PCFG_DMASEL1_ACL1 0x00000008
+#define TX4927_PCFG_DMASEL1_ACL3 0x0000000c
+#define TX4927_PCFG_DMASEL2_DRQ2 0x00000000 /* SEL2=0 */
+#define TX4927_PCFG_DMASEL2_SIO0 0x00000010 /* SEL2=0 */
+#define TX4927_PCFG_DMASEL2_ACL1 0x00000000 /* SEL2=1 */
+#define TX4927_PCFG_DMASEL2_ACL2 0x00000020 /* SEL2=1 */
+#define TX4927_PCFG_DMASEL2_ACL0 0x00000030 /* SEL2=1 */
+#define TX4927_PCFG_DMASEL3_DRQ3 0x00000000
+#define TX4927_PCFG_DMASEL3_SIO0 0x00000040
+#define TX4927_PCFG_DMASEL3_ACL3 0x00000080
+#define TX4927_PCFG_DMASEL3_ACL1 0x000000c0
+
+/* CLKCTR : Clock Control */
+#define TX4927_CLKCTR_ACLCKD 0x02000000
+#define TX4927_CLKCTR_PIOCKD 0x01000000
+#define TX4927_CLKCTR_DMACKD 0x00800000
+#define TX4927_CLKCTR_PCICKD 0x00400000
+#define TX4927_CLKCTR_TM0CKD 0x00100000
+#define TX4927_CLKCTR_TM1CKD 0x00080000
+#define TX4927_CLKCTR_TM2CKD 0x00040000
+#define TX4927_CLKCTR_SIO0CKD 0x00020000
+#define TX4927_CLKCTR_SIO1CKD 0x00010000
+#define TX4927_CLKCTR_ACLRST 0x00000200
+#define TX4927_CLKCTR_PIORST 0x00000100
+#define TX4927_CLKCTR_DMARST 0x00000080
+#define TX4927_CLKCTR_PCIRST 0x00000040
+#define TX4927_CLKCTR_TM0RST 0x00000010
+#define TX4927_CLKCTR_TM1RST 0x00000008
+#define TX4927_CLKCTR_TM2RST 0x00000004
+#define TX4927_CLKCTR_SIO0RST 0x00000002
+#define TX4927_CLKCTR_SIO1RST 0x00000001
+
+#define tx4927_sdramcptr \
+ ((struct tx4927_sdramc_reg __iomem *)TX4927_SDRAMC_REG)
+#define tx4927_pcicptr \
+ ((struct tx4927_pcic_reg __iomem *)TX4927_PCIC_REG)
+#define tx4927_ccfgptr \
+ ((struct tx4927_ccfg_reg __iomem *)TX4927_CCFG_REG)
+#define tx4927_ebuscptr \
+ ((struct tx4927_ebusc_reg __iomem *)TX4927_EBUSC_REG)
+#define tx4927_pioptr ((struct txx9_pio_reg __iomem *)TX4927_PIO_REG)
+
+#define TX4927_REV_PCODE() \
+ ((__u32)__raw_readq(&tx4927_ccfgptr->crir) >> 16)
+
+#define TX4927_SDRAMC_CR(ch) __raw_readq(&tx4927_sdramcptr->cr[(ch)])
+#define TX4927_SDRAMC_BA(ch) ((TX4927_SDRAMC_CR(ch) >> 49) << 21)
+#define TX4927_SDRAMC_SIZE(ch) \
+ ((((TX4927_SDRAMC_CR(ch) >> 33) & 0x7fff) + 1) << 21)
+
+#define TX4927_EBUSC_CR(ch) __raw_readq(&tx4927_ebuscptr->cr[(ch)])
+#define TX4927_EBUSC_BA(ch) ((TX4927_EBUSC_CR(ch) >> 48) << 20)
+#define TX4927_EBUSC_SIZE(ch) \
+ (0x00100000 << ((unsigned long)(TX4927_EBUSC_CR(ch) >> 8) & 0xf))
+#define TX4927_EBUSC_WIDTH(ch) \
+ (64 >> ((__u32)(TX4927_EBUSC_CR(ch) >> 20) & 0x3))
+
+/* utilities */
+static inline void txx9_clear64(__u64 __iomem *adr, __u64 bits)
+{
+#ifdef CONFIG_32BIT
+ unsigned long flags;
+ local_irq_save(flags);
+#endif
+ ____raw_writeq(____raw_readq(adr) & ~bits, adr);
+#ifdef CONFIG_32BIT
+ local_irq_restore(flags);
+#endif
+}
+static inline void txx9_set64(__u64 __iomem *adr, __u64 bits)
+{
+#ifdef CONFIG_32BIT
+ unsigned long flags;
+ local_irq_save(flags);
+#endif
+ ____raw_writeq(____raw_readq(adr) | bits, adr);
+#ifdef CONFIG_32BIT
+ local_irq_restore(flags);
+#endif
+}
+
+/* These functions are not interrupt safe. */
+static inline void tx4927_ccfg_clear(__u64 bits)
+{
+ ____raw_writeq(____raw_readq(&tx4927_ccfgptr->ccfg)
+ & ~(TX4927_CCFG_W1CBITS | bits),
+ &tx4927_ccfgptr->ccfg);
+}
+static inline void tx4927_ccfg_set(__u64 bits)
+{
+ ____raw_writeq((____raw_readq(&tx4927_ccfgptr->ccfg)
+ & ~TX4927_CCFG_W1CBITS) | bits,
+ &tx4927_ccfgptr->ccfg);
+}
+static inline void tx4927_ccfg_change(__u64 change, __u64 new)
+{
+ ____raw_writeq((____raw_readq(&tx4927_ccfgptr->ccfg)
+ & ~(TX4927_CCFG_W1CBITS | change)) |
+ new,
+ &tx4927_ccfgptr->ccfg);
+}
+
+unsigned int tx4927_get_mem_size(void);
+void tx4927_wdt_init(void);
+void tx4927_setup(void);
+void tx4927_time_init(unsigned int tmrnr);
+void tx4927_sio_init(unsigned int sclk, unsigned int cts_mask);
+int tx4927_report_pciclk(void);
+int tx4927_pciclk66_setup(void);
+void tx4927_setup_pcierr_irq(void);
+void tx4927_irq_init(void);
+void tx4927_mtd_init(int ch);
+void tx4927_dmac_init(int memcpy_chan);
+void tx4927_aclc_init(unsigned int dma_chan_out, unsigned int dma_chan_in);
+
+#endif /* __ASM_TXX9_TX4927_H */
diff --git a/arch/mips/include/asm/txx9/tx4927pcic.h b/arch/mips/include/asm/txx9/tx4927pcic.h
new file mode 100644
index 000000000..9eab2698c
--- /dev/null
+++ b/arch/mips/include/asm/txx9/tx4927pcic.h
@@ -0,0 +1,203 @@
+/*
+ * include/asm-mips/txx9/tx4927pcic.h
+ * TX4927 PCI controller definitions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_TXX9_TX4927PCIC_H
+#define __ASM_TXX9_TX4927PCIC_H
+
+#include <linux/pci.h>
+#include <linux/irqreturn.h>
+
+struct tx4927_pcic_reg {
+ u32 pciid;
+ u32 pcistatus;
+ u32 pciccrev;
+ u32 pcicfg1;
+ u32 p2gm0plbase; /* +10 */
+ u32 p2gm0pubase;
+ u32 p2gm1plbase;
+ u32 p2gm1pubase;
+ u32 p2gm2pbase; /* +20 */
+ u32 p2giopbase;
+ u32 unused0;
+ u32 pcisid;
+ u32 unused1; /* +30 */
+ u32 pcicapptr;
+ u32 unused2;
+ u32 pcicfg2;
+ u32 g2ptocnt; /* +40 */
+ u32 unused3[15];
+ u32 g2pstatus; /* +80 */
+ u32 g2pmask;
+ u32 pcisstatus;
+ u32 pcimask;
+ u32 p2gcfg; /* +90 */
+ u32 p2gstatus;
+ u32 p2gmask;
+ u32 p2gccmd;
+ u32 unused4[24]; /* +a0 */
+ u32 pbareqport; /* +100 */
+ u32 pbacfg;
+ u32 pbastatus;
+ u32 pbamask;
+ u32 pbabm; /* +110 */
+ u32 pbacreq;
+ u32 pbacgnt;
+ u32 pbacstate;
+ u64 g2pmgbase[3]; /* +120 */
+ u64 g2piogbase;
+ u32 g2pmmask[3]; /* +140 */
+ u32 g2piomask;
+ u64 g2pmpbase[3]; /* +150 */
+ u64 g2piopbase;
+ u32 pciccfg; /* +170 */
+ u32 pcicstatus;
+ u32 pcicmask;
+ u32 unused5;
+ u64 p2gmgbase[3]; /* +180 */
+ u64 p2giogbase;
+ u32 g2pcfgadrs; /* +1a0 */
+ u32 g2pcfgdata;
+ u32 unused6[8];
+ u32 g2pintack;
+ u32 g2pspc;
+ u32 unused7[12]; /* +1d0 */
+ u64 pdmca; /* +200 */
+ u64 pdmga;
+ u64 pdmpa;
+ u64 pdmctr;
+ u64 pdmcfg; /* +220 */
+ u64 pdmsts;
+};
+
+/* bits for PCICMD */
+/* see PCI_COMMAND_XXX in linux/pci_regs.h */
+
+/* bits for PCISTAT */
+/* see PCI_STATUS_XXX in linux/pci_regs.h */
+
+/* bits for IOBA/MBA */
+/* see PCI_BASE_ADDRESS_XXX in linux/pci_regs.h */
+
+/* bits for G2PSTATUS/G2PMASK */
+#define TX4927_PCIC_G2PSTATUS_ALL 0x00000003
+#define TX4927_PCIC_G2PSTATUS_TTOE 0x00000002
+#define TX4927_PCIC_G2PSTATUS_RTOE 0x00000001
+
+/* bits for PCIMASK (see also PCI_STATUS_XXX in linux/pci_regs.h */
+#define TX4927_PCIC_PCISTATUS_ALL 0x0000f900
+
+/* bits for PBACFG */
+#define TX4927_PCIC_PBACFG_FIXPA 0x00000008
+#define TX4927_PCIC_PBACFG_RPBA 0x00000004
+#define TX4927_PCIC_PBACFG_PBAEN 0x00000002
+#define TX4927_PCIC_PBACFG_BMCEN 0x00000001
+
+/* bits for PBASTATUS/PBAMASK */
+#define TX4927_PCIC_PBASTATUS_ALL 0x00000001
+#define TX4927_PCIC_PBASTATUS_BM 0x00000001
+
+/* bits for G2PMnGBASE */
+#define TX4927_PCIC_G2PMnGBASE_BSDIS 0x0000002000000000ULL
+#define TX4927_PCIC_G2PMnGBASE_ECHG 0x0000001000000000ULL
+
+/* bits for G2PIOGBASE */
+#define TX4927_PCIC_G2PIOGBASE_BSDIS 0x0000002000000000ULL
+#define TX4927_PCIC_G2PIOGBASE_ECHG 0x0000001000000000ULL
+
+/* bits for PCICSTATUS/PCICMASK */
+#define TX4927_PCIC_PCICSTATUS_ALL 0x000007b8
+#define TX4927_PCIC_PCICSTATUS_PME 0x00000400
+#define TX4927_PCIC_PCICSTATUS_TLB 0x00000200
+#define TX4927_PCIC_PCICSTATUS_NIB 0x00000100
+#define TX4927_PCIC_PCICSTATUS_ZIB 0x00000080
+#define TX4927_PCIC_PCICSTATUS_PERR 0x00000020
+#define TX4927_PCIC_PCICSTATUS_SERR 0x00000010
+#define TX4927_PCIC_PCICSTATUS_GBE 0x00000008
+#define TX4927_PCIC_PCICSTATUS_IWB 0x00000002
+#define TX4927_PCIC_PCICSTATUS_E2PDONE 0x00000001
+
+/* bits for PCICCFG */
+#define TX4927_PCIC_PCICCFG_GBWC_MASK 0x0fff0000
+#define TX4927_PCIC_PCICCFG_HRST 0x00000800
+#define TX4927_PCIC_PCICCFG_SRST 0x00000400
+#define TX4927_PCIC_PCICCFG_IRBER 0x00000200
+#define TX4927_PCIC_PCICCFG_G2PMEN(ch) (0x00000100>>(ch))
+#define TX4927_PCIC_PCICCFG_G2PM0EN 0x00000100
+#define TX4927_PCIC_PCICCFG_G2PM1EN 0x00000080
+#define TX4927_PCIC_PCICCFG_G2PM2EN 0x00000040
+#define TX4927_PCIC_PCICCFG_G2PIOEN 0x00000020
+#define TX4927_PCIC_PCICCFG_TCAR 0x00000010
+#define TX4927_PCIC_PCICCFG_ICAEN 0x00000008
+
+/* bits for P2GMnGBASE */
+#define TX4927_PCIC_P2GMnGBASE_TMEMEN 0x0000004000000000ULL
+#define TX4927_PCIC_P2GMnGBASE_TBSDIS 0x0000002000000000ULL
+#define TX4927_PCIC_P2GMnGBASE_TECHG 0x0000001000000000ULL
+
+/* bits for P2GIOGBASE */
+#define TX4927_PCIC_P2GIOGBASE_TIOEN 0x0000004000000000ULL
+#define TX4927_PCIC_P2GIOGBASE_TBSDIS 0x0000002000000000ULL
+#define TX4927_PCIC_P2GIOGBASE_TECHG 0x0000001000000000ULL
+
+#define TX4927_PCIC_IDSEL_AD_TO_SLOT(ad) ((ad) - 11)
+#define TX4927_PCIC_MAX_DEVNU TX4927_PCIC_IDSEL_AD_TO_SLOT(32)
+
+/* bits for PDMCFG */
+#define TX4927_PCIC_PDMCFG_RSTFIFO 0x00200000
+#define TX4927_PCIC_PDMCFG_EXFER 0x00100000
+#define TX4927_PCIC_PDMCFG_REQDLY_MASK 0x00003800
+#define TX4927_PCIC_PDMCFG_REQDLY_NONE (0 << 11)
+#define TX4927_PCIC_PDMCFG_REQDLY_16 (1 << 11)
+#define TX4927_PCIC_PDMCFG_REQDLY_32 (2 << 11)
+#define TX4927_PCIC_PDMCFG_REQDLY_64 (3 << 11)
+#define TX4927_PCIC_PDMCFG_REQDLY_128 (4 << 11)
+#define TX4927_PCIC_PDMCFG_REQDLY_256 (5 << 11)
+#define TX4927_PCIC_PDMCFG_REQDLY_512 (6 << 11)
+#define TX4927_PCIC_PDMCFG_REQDLY_1024 (7 << 11)
+#define TX4927_PCIC_PDMCFG_ERRIE 0x00000400
+#define TX4927_PCIC_PDMCFG_NCCMPIE 0x00000200
+#define TX4927_PCIC_PDMCFG_NTCMPIE 0x00000100
+#define TX4927_PCIC_PDMCFG_CHNEN 0x00000080
+#define TX4927_PCIC_PDMCFG_XFRACT 0x00000040
+#define TX4927_PCIC_PDMCFG_BSWAP 0x00000020
+#define TX4927_PCIC_PDMCFG_XFRSIZE_MASK 0x0000000c
+#define TX4927_PCIC_PDMCFG_XFRSIZE_1DW 0x00000000
+#define TX4927_PCIC_PDMCFG_XFRSIZE_1QW 0x00000004
+#define TX4927_PCIC_PDMCFG_XFRSIZE_4QW 0x00000008
+#define TX4927_PCIC_PDMCFG_XFRDIRC 0x00000002
+#define TX4927_PCIC_PDMCFG_CHRST 0x00000001
+
+/* bits for PDMSTS */
+#define TX4927_PCIC_PDMSTS_REQCNT_MASK 0x3f000000
+#define TX4927_PCIC_PDMSTS_FIFOCNT_MASK 0x00f00000
+#define TX4927_PCIC_PDMSTS_FIFOWP_MASK 0x000c0000
+#define TX4927_PCIC_PDMSTS_FIFORP_MASK 0x00030000
+#define TX4927_PCIC_PDMSTS_ERRINT 0x00000800
+#define TX4927_PCIC_PDMSTS_DONEINT 0x00000400
+#define TX4927_PCIC_PDMSTS_CHNEN 0x00000200
+#define TX4927_PCIC_PDMSTS_XFRACT 0x00000100
+#define TX4927_PCIC_PDMSTS_ACCMP 0x00000080
+#define TX4927_PCIC_PDMSTS_NCCMP 0x00000040
+#define TX4927_PCIC_PDMSTS_NTCMP 0x00000020
+#define TX4927_PCIC_PDMSTS_CFGERR 0x00000008
+#define TX4927_PCIC_PDMSTS_PCIERR 0x00000004
+#define TX4927_PCIC_PDMSTS_CHNERR 0x00000002
+#define TX4927_PCIC_PDMSTS_DATAERR 0x00000001
+#define TX4927_PCIC_PDMSTS_ALL_CMP 0x000000e0
+#define TX4927_PCIC_PDMSTS_ALL_ERR 0x0000000f
+
+struct tx4927_pcic_reg __iomem *get_tx4927_pcicptr(
+ struct pci_controller *channel);
+void tx4927_pcic_setup(struct tx4927_pcic_reg __iomem *pcicptr,
+ struct pci_controller *channel, int extarb);
+void tx4927_report_pcic_status(void);
+char *tx4927_pcibios_setup(char *str);
+void tx4927_dump_pcic_settings(void);
+irqreturn_t tx4927_pcierr_interrupt(int irq, void *dev_id);
+
+#endif /* __ASM_TXX9_TX4927PCIC_H */
diff --git a/arch/mips/include/asm/txx9/tx4938.h b/arch/mips/include/asm/txx9/tx4938.h
new file mode 100644
index 000000000..6ca767ee6
--- /dev/null
+++ b/arch/mips/include/asm/txx9/tx4938.h
@@ -0,0 +1,312 @@
+/*
+ * Definitions for TX4937/TX4938
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
+ */
+#ifndef __ASM_TXX9_TX4938_H
+#define __ASM_TXX9_TX4938_H
+
+/* some controllers are compatible with 4927 */
+#include <asm/txx9/tx4927.h>
+
+#ifdef CONFIG_64BIT
+#define TX4938_REG_BASE 0xffffffffff1f0000UL /* == TX4937_REG_BASE */
+#else
+#define TX4938_REG_BASE 0xff1f0000UL /* == TX4937_REG_BASE */
+#endif
+#define TX4938_REG_SIZE 0x00010000 /* == TX4937_REG_SIZE */
+
+/* NDFMC, SRAMC, PCIC1, SPIC: TX4938 only */
+#define TX4938_NDFMC_REG (TX4938_REG_BASE + 0x5000)
+#define TX4938_SRAMC_REG (TX4938_REG_BASE + 0x6000)
+#define TX4938_PCIC1_REG (TX4938_REG_BASE + 0x7000)
+#define TX4938_SDRAMC_REG (TX4938_REG_BASE + 0x8000)
+#define TX4938_EBUSC_REG (TX4938_REG_BASE + 0x9000)
+#define TX4938_DMA_REG(ch) (TX4938_REG_BASE + 0xb000 + (ch) * 0x800)
+#define TX4938_PCIC_REG (TX4938_REG_BASE + 0xd000)
+#define TX4938_CCFG_REG (TX4938_REG_BASE + 0xe000)
+#define TX4938_NR_TMR 3
+#define TX4938_TMR_REG(ch) ((TX4938_REG_BASE + 0xf000) + (ch) * 0x100)
+#define TX4938_NR_SIO 2
+#define TX4938_SIO_REG(ch) ((TX4938_REG_BASE + 0xf300) + (ch) * 0x100)
+#define TX4938_PIO_REG (TX4938_REG_BASE + 0xf500)
+#define TX4938_IRC_REG (TX4938_REG_BASE + 0xf600)
+#define TX4938_ACLC_REG (TX4938_REG_BASE + 0xf700)
+#define TX4938_SPI_REG (TX4938_REG_BASE + 0xf800)
+
+struct tx4938_sramc_reg {
+ u64 cr;
+};
+
+struct tx4938_ccfg_reg {
+ u64 ccfg;
+ u64 crir;
+ u64 pcfg;
+ u64 toea;
+ u64 clkctr;
+ u64 unused0;
+ u64 garbc;
+ u64 unused1;
+ u64 unused2;
+ u64 ramp;
+ u64 unused3;
+ u64 jmpadr;
+};
+
+/*
+ * IRC
+ */
+
+#define TX4938_IR_ECCERR 0
+#define TX4938_IR_WTOERR 1
+#define TX4938_NUM_IR_INT 6
+#define TX4938_IR_INT(n) (2 + (n))
+#define TX4938_NUM_IR_SIO 2
+#define TX4938_IR_SIO(n) (8 + (n))
+#define TX4938_NUM_IR_DMA 4
+#define TX4938_IR_DMA(ch, n) ((ch ? 27 : 10) + (n)) /* 10-13, 27-30 */
+#define TX4938_IR_PIO 14
+#define TX4938_IR_PDMAC 15
+#define TX4938_IR_PCIC 16
+#define TX4938_NUM_IR_TMR 3
+#define TX4938_IR_TMR(n) (17 + (n))
+#define TX4938_IR_NDFMC 21
+#define TX4938_IR_PCIERR 22
+#define TX4938_IR_PCIPME 23
+#define TX4938_IR_ACLC 24
+#define TX4938_IR_ACLCPME 25
+#define TX4938_IR_PCIC1 26
+#define TX4938_IR_SPI 31
+#define TX4938_NUM_IR 32
+/* multiplex */
+#define TX4938_IR_ETH0 TX4938_IR_INT(4)
+#define TX4938_IR_ETH1 TX4938_IR_INT(3)
+
+#define TX4938_IRC_INT 2 /* IP[2] in Status register */
+
+#define TX4938_NUM_PIO 16
+
+/*
+ * CCFG
+ */
+/* CCFG : Chip Configuration */
+#define TX4938_CCFG_WDRST 0x0000020000000000ULL
+#define TX4938_CCFG_WDREXEN 0x0000010000000000ULL
+#define TX4938_CCFG_BCFG_MASK 0x000000ff00000000ULL
+#define TX4938_CCFG_TINTDIS 0x01000000
+#define TX4938_CCFG_PCI66 0x00800000
+#define TX4938_CCFG_PCIMODE 0x00400000
+#define TX4938_CCFG_PCI1_66 0x00200000
+#define TX4938_CCFG_DIVMODE_MASK 0x001e0000
+#define TX4938_CCFG_DIVMODE_2 (0x4 << 17)
+#define TX4938_CCFG_DIVMODE_2_5 (0xf << 17)
+#define TX4938_CCFG_DIVMODE_3 (0x5 << 17)
+#define TX4938_CCFG_DIVMODE_4 (0x6 << 17)
+#define TX4938_CCFG_DIVMODE_4_5 (0xd << 17)
+#define TX4938_CCFG_DIVMODE_8 (0x0 << 17)
+#define TX4938_CCFG_DIVMODE_10 (0xb << 17)
+#define TX4938_CCFG_DIVMODE_12 (0x1 << 17)
+#define TX4938_CCFG_DIVMODE_16 (0x2 << 17)
+#define TX4938_CCFG_DIVMODE_18 (0x9 << 17)
+#define TX4938_CCFG_BEOW 0x00010000
+#define TX4938_CCFG_WR 0x00008000
+#define TX4938_CCFG_TOE 0x00004000
+#define TX4938_CCFG_PCIARB 0x00002000
+#define TX4938_CCFG_PCIDIVMODE_MASK 0x00001c00
+#define TX4938_CCFG_PCIDIVMODE_4 (0x1 << 10)
+#define TX4938_CCFG_PCIDIVMODE_4_5 (0x3 << 10)
+#define TX4938_CCFG_PCIDIVMODE_5 (0x5 << 10)
+#define TX4938_CCFG_PCIDIVMODE_5_5 (0x7 << 10)
+#define TX4938_CCFG_PCIDIVMODE_8 (0x0 << 10)
+#define TX4938_CCFG_PCIDIVMODE_9 (0x2 << 10)
+#define TX4938_CCFG_PCIDIVMODE_10 (0x4 << 10)
+#define TX4938_CCFG_PCIDIVMODE_11 (0x6 << 10)
+#define TX4938_CCFG_PCI1DMD 0x00000100
+#define TX4938_CCFG_SYSSP_MASK 0x000000c0
+#define TX4938_CCFG_ENDIAN 0x00000004
+#define TX4938_CCFG_HALT 0x00000002
+#define TX4938_CCFG_ACEHOLD 0x00000001
+
+/* PCFG : Pin Configuration */
+#define TX4938_PCFG_ETH0_SEL 0x8000000000000000ULL
+#define TX4938_PCFG_ETH1_SEL 0x4000000000000000ULL
+#define TX4938_PCFG_ATA_SEL 0x2000000000000000ULL
+#define TX4938_PCFG_ISA_SEL 0x1000000000000000ULL
+#define TX4938_PCFG_SPI_SEL 0x0800000000000000ULL
+#define TX4938_PCFG_NDF_SEL 0x0400000000000000ULL
+#define TX4938_PCFG_SDCLKDLY_MASK 0x30000000
+#define TX4938_PCFG_SDCLKDLY(d) ((d)<<28)
+#define TX4938_PCFG_SYSCLKEN 0x08000000
+#define TX4938_PCFG_SDCLKEN_ALL 0x07800000
+#define TX4938_PCFG_SDCLKEN(ch) (0x00800000<<(ch))
+#define TX4938_PCFG_PCICLKEN_ALL 0x003f0000
+#define TX4938_PCFG_PCICLKEN(ch) (0x00010000<<(ch))
+#define TX4938_PCFG_SEL2 0x00000200
+#define TX4938_PCFG_SEL1 0x00000100
+#define TX4938_PCFG_DMASEL_ALL 0x0000000f
+#define TX4938_PCFG_DMASEL0_DRQ0 0x00000000
+#define TX4938_PCFG_DMASEL0_SIO1 0x00000001
+#define TX4938_PCFG_DMASEL1_DRQ1 0x00000000
+#define TX4938_PCFG_DMASEL1_SIO1 0x00000002
+#define TX4938_PCFG_DMASEL2_DRQ2 0x00000000
+#define TX4938_PCFG_DMASEL2_SIO0 0x00000004
+#define TX4938_PCFG_DMASEL3_DRQ3 0x00000000
+#define TX4938_PCFG_DMASEL3_SIO0 0x00000008
+
+/* CLKCTR : Clock Control */
+#define TX4938_CLKCTR_NDFCKD 0x0001000000000000ULL
+#define TX4938_CLKCTR_NDFRST 0x0000000100000000ULL
+#define TX4938_CLKCTR_ETH1CKD 0x80000000
+#define TX4938_CLKCTR_ETH0CKD 0x40000000
+#define TX4938_CLKCTR_SPICKD 0x20000000
+#define TX4938_CLKCTR_SRAMCKD 0x10000000
+#define TX4938_CLKCTR_PCIC1CKD 0x08000000
+#define TX4938_CLKCTR_DMA1CKD 0x04000000
+#define TX4938_CLKCTR_ACLCKD 0x02000000
+#define TX4938_CLKCTR_PIOCKD 0x01000000
+#define TX4938_CLKCTR_DMACKD 0x00800000
+#define TX4938_CLKCTR_PCICKD 0x00400000
+#define TX4938_CLKCTR_TM0CKD 0x00100000
+#define TX4938_CLKCTR_TM1CKD 0x00080000
+#define TX4938_CLKCTR_TM2CKD 0x00040000
+#define TX4938_CLKCTR_SIO0CKD 0x00020000
+#define TX4938_CLKCTR_SIO1CKD 0x00010000
+#define TX4938_CLKCTR_ETH1RST 0x00008000
+#define TX4938_CLKCTR_ETH0RST 0x00004000
+#define TX4938_CLKCTR_SPIRST 0x00002000
+#define TX4938_CLKCTR_SRAMRST 0x00001000
+#define TX4938_CLKCTR_PCIC1RST 0x00000800
+#define TX4938_CLKCTR_DMA1RST 0x00000400
+#define TX4938_CLKCTR_ACLRST 0x00000200
+#define TX4938_CLKCTR_PIORST 0x00000100
+#define TX4938_CLKCTR_DMARST 0x00000080
+#define TX4938_CLKCTR_PCIRST 0x00000040
+#define TX4938_CLKCTR_TM0RST 0x00000010
+#define TX4938_CLKCTR_TM1RST 0x00000008
+#define TX4938_CLKCTR_TM2RST 0x00000004
+#define TX4938_CLKCTR_SIO0RST 0x00000002
+#define TX4938_CLKCTR_SIO1RST 0x00000001
+
+/*
+ * DMA
+ */
+/* bits for MCR */
+#define TX4938_DMA_MCR_EIS(ch) (0x10000000<<(ch))
+#define TX4938_DMA_MCR_DIS(ch) (0x01000000<<(ch))
+#define TX4938_DMA_MCR_RSFIF 0x00000080
+#define TX4938_DMA_MCR_FIFUM(ch) (0x00000008<<(ch))
+#define TX4938_DMA_MCR_RPRT 0x00000002
+#define TX4938_DMA_MCR_MSTEN 0x00000001
+
+/* bits for CCRn */
+#define TX4938_DMA_CCR_IMMCHN 0x20000000
+#define TX4938_DMA_CCR_USEXFSZ 0x10000000
+#define TX4938_DMA_CCR_LE 0x08000000
+#define TX4938_DMA_CCR_DBINH 0x04000000
+#define TX4938_DMA_CCR_SBINH 0x02000000
+#define TX4938_DMA_CCR_CHRST 0x01000000
+#define TX4938_DMA_CCR_RVBYTE 0x00800000
+#define TX4938_DMA_CCR_ACKPOL 0x00400000
+#define TX4938_DMA_CCR_REQPL 0x00200000
+#define TX4938_DMA_CCR_EGREQ 0x00100000
+#define TX4938_DMA_CCR_CHDN 0x00080000
+#define TX4938_DMA_CCR_DNCTL 0x00060000
+#define TX4938_DMA_CCR_EXTRQ 0x00010000
+#define TX4938_DMA_CCR_INTRQD 0x0000e000
+#define TX4938_DMA_CCR_INTENE 0x00001000
+#define TX4938_DMA_CCR_INTENC 0x00000800
+#define TX4938_DMA_CCR_INTENT 0x00000400
+#define TX4938_DMA_CCR_CHNEN 0x00000200
+#define TX4938_DMA_CCR_XFACT 0x00000100
+#define TX4938_DMA_CCR_SMPCHN 0x00000020
+#define TX4938_DMA_CCR_XFSZ(order) (((order) << 2) & 0x0000001c)
+#define TX4938_DMA_CCR_XFSZ_1W TX4938_DMA_CCR_XFSZ(2)
+#define TX4938_DMA_CCR_XFSZ_2W TX4938_DMA_CCR_XFSZ(3)
+#define TX4938_DMA_CCR_XFSZ_4W TX4938_DMA_CCR_XFSZ(4)
+#define TX4938_DMA_CCR_XFSZ_8W TX4938_DMA_CCR_XFSZ(5)
+#define TX4938_DMA_CCR_XFSZ_16W TX4938_DMA_CCR_XFSZ(6)
+#define TX4938_DMA_CCR_XFSZ_32W TX4938_DMA_CCR_XFSZ(7)
+#define TX4938_DMA_CCR_MEMIO 0x00000002
+#define TX4938_DMA_CCR_SNGAD 0x00000001
+
+/* bits for CSRn */
+#define TX4938_DMA_CSR_CHNEN 0x00000400
+#define TX4938_DMA_CSR_STLXFER 0x00000200
+#define TX4938_DMA_CSR_CHNACT 0x00000100
+#define TX4938_DMA_CSR_ABCHC 0x00000080
+#define TX4938_DMA_CSR_NCHNC 0x00000040
+#define TX4938_DMA_CSR_NTRNFC 0x00000020
+#define TX4938_DMA_CSR_EXTDN 0x00000010
+#define TX4938_DMA_CSR_CFERR 0x00000008
+#define TX4938_DMA_CSR_CHERR 0x00000004
+#define TX4938_DMA_CSR_DESERR 0x00000002
+#define TX4938_DMA_CSR_SORERR 0x00000001
+
+#define tx4938_sdramcptr tx4927_sdramcptr
+#define tx4938_ebuscptr tx4927_ebuscptr
+#define tx4938_pcicptr tx4927_pcicptr
+#define tx4938_pcic1ptr \
+ ((struct tx4927_pcic_reg __iomem *)TX4938_PCIC1_REG)
+#define tx4938_ccfgptr \
+ ((struct tx4938_ccfg_reg __iomem *)TX4938_CCFG_REG)
+#define tx4938_pioptr ((struct txx9_pio_reg __iomem *)TX4938_PIO_REG)
+#define tx4938_sramcptr \
+ ((struct tx4938_sramc_reg __iomem *)TX4938_SRAMC_REG)
+
+
+#define TX4938_REV_PCODE() \
+ ((__u32)__raw_readq(&tx4938_ccfgptr->crir) >> 16)
+
+#define tx4938_ccfg_clear(bits) tx4927_ccfg_clear(bits)
+#define tx4938_ccfg_set(bits) tx4927_ccfg_set(bits)
+#define tx4938_ccfg_change(change, new) tx4927_ccfg_change(change, new)
+
+#define TX4938_SDRAMC_CR(ch) TX4927_SDRAMC_CR(ch)
+#define TX4938_SDRAMC_BA(ch) TX4927_SDRAMC_BA(ch)
+#define TX4938_SDRAMC_SIZE(ch) TX4927_SDRAMC_SIZE(ch)
+
+#define TX4938_EBUSC_CR(ch) TX4927_EBUSC_CR(ch)
+#define TX4938_EBUSC_BA(ch) TX4927_EBUSC_BA(ch)
+#define TX4938_EBUSC_SIZE(ch) TX4927_EBUSC_SIZE(ch)
+#define TX4938_EBUSC_WIDTH(ch) TX4927_EBUSC_WIDTH(ch)
+
+#define tx4938_get_mem_size() tx4927_get_mem_size()
+void tx4938_wdt_init(void);
+void tx4938_setup(void);
+void tx4938_time_init(unsigned int tmrnr);
+void tx4938_sio_init(unsigned int sclk, unsigned int cts_mask);
+void tx4938_spi_init(int busid);
+void tx4938_ethaddr_init(unsigned char *addr0, unsigned char *addr1);
+int tx4938_report_pciclk(void);
+void tx4938_report_pci1clk(void);
+int tx4938_pciclk66_setup(void);
+struct pci_dev;
+int tx4938_pcic1_map_irq(const struct pci_dev *dev, u8 slot);
+void tx4938_setup_pcierr_irq(void);
+void tx4938_irq_init(void);
+void tx4938_mtd_init(int ch);
+void tx4938_ndfmc_init(unsigned int hold, unsigned int spw);
+
+struct tx4938ide_platform_info {
+ /*
+ * I/O port shift, for platforms with ports that are
+ * constantly spaced and need larger than the 1-byte
+ * spacing used by ata_std_ports().
+ */
+ unsigned int ioport_shift;
+ unsigned int gbus_clock; /* 0 means no PIO mode tuning. */
+ unsigned int ebus_ch;
+};
+
+void tx4938_ata_init(unsigned int irq, unsigned int shift, int tune);
+void tx4938_dmac_init(int memcpy_chan0, int memcpy_chan1);
+void tx4938_aclc_init(void);
+void tx4938_sramc_init(void);
+
+#endif
diff --git a/arch/mips/include/asm/txx9/tx4939.h b/arch/mips/include/asm/txx9/tx4939.h
new file mode 100644
index 000000000..abf980af9
--- /dev/null
+++ b/arch/mips/include/asm/txx9/tx4939.h
@@ -0,0 +1,524 @@
+/*
+ * Definitions for TX4939
+ *
+ * Copyright (C) 2000-2001,2005-2006 Toshiba Corporation
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifndef __ASM_TXX9_TX4939_H
+#define __ASM_TXX9_TX4939_H
+
+/* some controllers are compatible with 4927/4938 */
+#include <asm/txx9/tx4938.h>
+
+#ifdef CONFIG_64BIT
+#define TX4939_REG_BASE 0xffffffffff1f0000UL /* == TX4938_REG_BASE */
+#else
+#define TX4939_REG_BASE 0xff1f0000UL /* == TX4938_REG_BASE */
+#endif
+#define TX4939_REG_SIZE 0x00010000 /* == TX4938_REG_SIZE */
+
+#define TX4939_ATA_REG(ch) (TX4939_REG_BASE + 0x3000 + (ch) * 0x1000)
+#define TX4939_NDFMC_REG (TX4939_REG_BASE + 0x5000)
+#define TX4939_SRAMC_REG (TX4939_REG_BASE + 0x6000)
+#define TX4939_CRYPTO_REG (TX4939_REG_BASE + 0x6800)
+#define TX4939_PCIC1_REG (TX4939_REG_BASE + 0x7000)
+#define TX4939_DDRC_REG (TX4939_REG_BASE + 0x8000)
+#define TX4939_EBUSC_REG (TX4939_REG_BASE + 0x9000)
+#define TX4939_VPC_REG (TX4939_REG_BASE + 0xa000)
+#define TX4939_DMA_REG(ch) (TX4939_REG_BASE + 0xb000 + (ch) * 0x800)
+#define TX4939_PCIC_REG (TX4939_REG_BASE + 0xd000)
+#define TX4939_CCFG_REG (TX4939_REG_BASE + 0xe000)
+#define TX4939_IRC_REG (TX4939_REG_BASE + 0xe800)
+#define TX4939_NR_TMR 6 /* 0xf000,0xf100,0xf200,0xfd00,0xfe00,0xff00 */
+#define TX4939_TMR_REG(ch) \
+ (TX4939_REG_BASE + 0xf000 + ((ch) + ((ch) >= 3) * 10) * 0x100)
+#define TX4939_NR_SIO 4 /* 0xf300, 0xf400, 0xf380, 0xf480 */
+#define TX4939_SIO_REG(ch) \
+ (TX4939_REG_BASE + 0xf300 + (((ch) & 1) << 8) + (((ch) & 2) << 6))
+#define TX4939_ACLC_REG (TX4939_REG_BASE + 0xf700)
+#define TX4939_SPI_REG (TX4939_REG_BASE + 0xf800)
+#define TX4939_I2C_REG (TX4939_REG_BASE + 0xf900)
+#define TX4939_I2S_REG (TX4939_REG_BASE + 0xfa00)
+#define TX4939_RTC_REG (TX4939_REG_BASE + 0xfb00)
+#define TX4939_CIR_REG (TX4939_REG_BASE + 0xfc00)
+
+#define TX4939_RNG_REG (TX4939_CRYPTO_REG + 0xb0)
+
+struct tx4939_le_reg {
+ __u32 r;
+ __u32 unused;
+};
+
+struct tx4939_ddrc_reg {
+ struct tx4939_le_reg ctl[47];
+ __u64 unused0[17];
+ __u64 winen;
+ __u64 win[4];
+};
+
+struct tx4939_ccfg_reg {
+ __u64 ccfg;
+ __u64 crir;
+ __u64 pcfg;
+ __u64 toea;
+ __u64 clkctr;
+ __u64 unused0;
+ __u64 garbc;
+ __u64 unused1[2];
+ __u64 ramp;
+ __u64 unused2[2];
+ __u64 dskwctrl;
+ __u64 mclkosc;
+ __u64 mclkctl;
+ __u64 unused3[17];
+ struct {
+ __u64 mr;
+ __u64 dr;
+ } gpio[2];
+};
+
+struct tx4939_irc_reg {
+ struct tx4939_le_reg den;
+ struct tx4939_le_reg scipb;
+ struct tx4939_le_reg dm[2];
+ struct tx4939_le_reg lvl[16];
+ struct tx4939_le_reg msk;
+ struct tx4939_le_reg edc;
+ struct tx4939_le_reg pnd0;
+ struct tx4939_le_reg cs;
+ struct tx4939_le_reg pnd1;
+ struct tx4939_le_reg dm2[2];
+ struct tx4939_le_reg dbr[2];
+ struct tx4939_le_reg dben;
+ struct tx4939_le_reg unused0[2];
+ struct tx4939_le_reg flag[2];
+ struct tx4939_le_reg pol;
+ struct tx4939_le_reg cnt;
+ struct tx4939_le_reg maskint;
+ struct tx4939_le_reg maskext;
+};
+
+struct tx4939_crypto_reg {
+ struct tx4939_le_reg csr;
+ struct tx4939_le_reg idesptr;
+ struct tx4939_le_reg cdesptr;
+ struct tx4939_le_reg buserr;
+ struct tx4939_le_reg cip_tout;
+ struct tx4939_le_reg cir;
+ union {
+ struct {
+ struct tx4939_le_reg data[8];
+ struct tx4939_le_reg ctrl;
+ } gen;
+ struct {
+ struct {
+ struct tx4939_le_reg l;
+ struct tx4939_le_reg u;
+ } key[3], ini;
+ struct tx4939_le_reg ctrl;
+ } des;
+ struct {
+ struct tx4939_le_reg key[4];
+ struct tx4939_le_reg ini[4];
+ struct tx4939_le_reg ctrl;
+ } aes;
+ struct {
+ struct {
+ struct tx4939_le_reg l;
+ struct tx4939_le_reg u;
+ } cnt;
+ struct tx4939_le_reg ini[5];
+ struct tx4939_le_reg unused;
+ struct tx4939_le_reg ctrl;
+ } hash;
+ } cdr;
+ struct tx4939_le_reg unused0[7];
+ struct tx4939_le_reg rcsr;
+ struct tx4939_le_reg rpr;
+ __u64 rdr;
+ __u64 ror[3];
+ struct tx4939_le_reg unused1[2];
+ struct tx4939_le_reg xorslr;
+ struct tx4939_le_reg xorsur;
+};
+
+struct tx4939_crypto_desc {
+ __u32 src;
+ __u32 dst;
+ __u32 next;
+ __u32 ctrl;
+ __u32 index;
+ __u32 xor;
+};
+
+struct tx4939_vpc_reg {
+ struct tx4939_le_reg csr;
+ struct {
+ struct tx4939_le_reg ctrlA;
+ struct tx4939_le_reg ctrlB;
+ struct tx4939_le_reg idesptr;
+ struct tx4939_le_reg cdesptr;
+ } port[3];
+ struct tx4939_le_reg buserr;
+};
+
+struct tx4939_vpc_desc {
+ __u32 src;
+ __u32 next;
+ __u32 ctrl1;
+ __u32 ctrl2;
+};
+
+/*
+ * IRC
+ */
+#define TX4939_IR_NONE 0
+#define TX4939_IR_DDR 1
+#define TX4939_IR_WTOERR 2
+#define TX4939_NUM_IR_INT 3
+#define TX4939_IR_INT(n) (3 + (n))
+#define TX4939_NUM_IR_ETH 2
+#define TX4939_IR_ETH(n) ((n) ? 43 : 6)
+#define TX4939_IR_VIDEO 7
+#define TX4939_IR_CIR 8
+#define TX4939_NUM_IR_SIO 4
+#define TX4939_IR_SIO(n) ((n) ? 43 + (n) : 9) /* 9,44-46 */
+#define TX4939_NUM_IR_DMA 4
+#define TX4939_IR_DMA(ch, n) (((ch) ? 22 : 10) + (n)) /* 10-13,22-25 */
+#define TX4939_IR_IRC 14
+#define TX4939_IR_PDMAC 15
+#define TX4939_NUM_IR_TMR 6
+#define TX4939_IR_TMR(n) (((n) >= 3 ? 45 : 16) + (n)) /* 16-18,48-50 */
+#define TX4939_NUM_IR_ATA 2
+#define TX4939_IR_ATA(n) (19 + (n))
+#define TX4939_IR_ACLC 21
+#define TX4939_IR_CIPHER 26
+#define TX4939_IR_INTA 27
+#define TX4939_IR_INTB 28
+#define TX4939_IR_INTC 29
+#define TX4939_IR_INTD 30
+#define TX4939_IR_I2C 33
+#define TX4939_IR_SPI 34
+#define TX4939_IR_PCIC 35
+#define TX4939_IR_PCIC1 36
+#define TX4939_IR_PCIERR 37
+#define TX4939_IR_PCIPME 38
+#define TX4939_IR_NDFMC 39
+#define TX4939_IR_ACLCPME 40
+#define TX4939_IR_RTC 41
+#define TX4939_IR_RND 42
+#define TX4939_IR_I2S 47
+#define TX4939_NUM_IR 64
+
+#define TX4939_IRC_INT 2 /* IP[2] in Status register */
+
+/*
+ * CCFG
+ */
+/* CCFG : Chip Configuration */
+#define TX4939_CCFG_PCIBOOT 0x0000040000000000ULL
+#define TX4939_CCFG_WDRST 0x0000020000000000ULL
+#define TX4939_CCFG_WDREXEN 0x0000010000000000ULL
+#define TX4939_CCFG_BCFG_MASK 0x000000ff00000000ULL
+#define TX4939_CCFG_GTOT_MASK 0x06000000
+#define TX4939_CCFG_GTOT_4096 0x06000000
+#define TX4939_CCFG_GTOT_2048 0x04000000
+#define TX4939_CCFG_GTOT_1024 0x02000000
+#define TX4939_CCFG_GTOT_512 0x00000000
+#define TX4939_CCFG_TINTDIS 0x01000000
+#define TX4939_CCFG_PCI66 0x00800000
+#define TX4939_CCFG_PCIMODE 0x00400000
+#define TX4939_CCFG_SSCG 0x00100000
+#define TX4939_CCFG_MULCLK_MASK 0x000e0000
+#define TX4939_CCFG_MULCLK_8 (0x7 << 17)
+#define TX4939_CCFG_MULCLK_9 (0x0 << 17)
+#define TX4939_CCFG_MULCLK_10 (0x1 << 17)
+#define TX4939_CCFG_MULCLK_11 (0x2 << 17)
+#define TX4939_CCFG_MULCLK_12 (0x3 << 17)
+#define TX4939_CCFG_MULCLK_13 (0x4 << 17)
+#define TX4939_CCFG_MULCLK_14 (0x5 << 17)
+#define TX4939_CCFG_MULCLK_15 (0x6 << 17)
+#define TX4939_CCFG_BEOW 0x00010000
+#define TX4939_CCFG_WR 0x00008000
+#define TX4939_CCFG_TOE 0x00004000
+#define TX4939_CCFG_PCIARB 0x00002000
+#define TX4939_CCFG_YDIVMODE_MASK 0x00001c00
+#define TX4939_CCFG_YDIVMODE_2 (0x0 << 10)
+#define TX4939_CCFG_YDIVMODE_3 (0x1 << 10)
+#define TX4939_CCFG_YDIVMODE_5 (0x6 << 10)
+#define TX4939_CCFG_YDIVMODE_6 (0x7 << 10)
+#define TX4939_CCFG_PTSEL 0x00000200
+#define TX4939_CCFG_BESEL 0x00000100
+#define TX4939_CCFG_SYSSP_MASK 0x000000c0
+#define TX4939_CCFG_ACKSEL 0x00000020
+#define TX4939_CCFG_ROMW 0x00000010
+#define TX4939_CCFG_ENDIAN 0x00000004
+#define TX4939_CCFG_ARMODE 0x00000002
+#define TX4939_CCFG_ACEHOLD 0x00000001
+
+/* PCFG : Pin Configuration */
+#define TX4939_PCFG_SIO2MODE_MASK 0xc000000000000000ULL
+#define TX4939_PCFG_SIO2MODE_GPIO 0x8000000000000000ULL
+#define TX4939_PCFG_SIO2MODE_SIO2 0x4000000000000000ULL
+#define TX4939_PCFG_SIO2MODE_SIO0 0x0000000000000000ULL
+#define TX4939_PCFG_SPIMODE 0x2000000000000000ULL
+#define TX4939_PCFG_I2CMODE 0x1000000000000000ULL
+#define TX4939_PCFG_I2SMODE_MASK 0x0c00000000000000ULL
+#define TX4939_PCFG_I2SMODE_GPIO 0x0c00000000000000ULL
+#define TX4939_PCFG_I2SMODE_I2S 0x0800000000000000ULL
+#define TX4939_PCFG_I2SMODE_I2S_ALT 0x0400000000000000ULL
+#define TX4939_PCFG_I2SMODE_ACLC 0x0000000000000000ULL
+#define TX4939_PCFG_SIO3MODE 0x0200000000000000ULL
+#define TX4939_PCFG_DMASEL3 0x0004000000000000ULL
+#define TX4939_PCFG_DMASEL3_SIO0 0x0004000000000000ULL
+#define TX4939_PCFG_DMASEL3_NDFC 0x0000000000000000ULL
+#define TX4939_PCFG_VSSMODE 0x0000200000000000ULL
+#define TX4939_PCFG_VPSMODE 0x0000100000000000ULL
+#define TX4939_PCFG_ET1MODE 0x0000080000000000ULL
+#define TX4939_PCFG_ET0MODE 0x0000040000000000ULL
+#define TX4939_PCFG_ATA1MODE 0x0000020000000000ULL
+#define TX4939_PCFG_ATA0MODE 0x0000010000000000ULL
+#define TX4939_PCFG_BP_PLL 0x0000000100000000ULL
+
+#define TX4939_PCFG_SYSCLKEN 0x08000000
+#define TX4939_PCFG_PCICLKEN_ALL 0x000f0000
+#define TX4939_PCFG_PCICLKEN(ch) (0x00010000<<(ch))
+#define TX4939_PCFG_SPEED1 0x00002000
+#define TX4939_PCFG_SPEED0 0x00001000
+#define TX4939_PCFG_ITMODE 0x00000300
+#define TX4939_PCFG_DMASEL_ALL (0x00000007 | TX4939_PCFG_DMASEL3)
+#define TX4939_PCFG_DMASEL2 0x00000004
+#define TX4939_PCFG_DMASEL2_DRQ2 0x00000000
+#define TX4939_PCFG_DMASEL2_SIO0 0x00000004
+#define TX4939_PCFG_DMASEL1 0x00000002
+#define TX4939_PCFG_DMASEL1_DRQ1 0x00000000
+#define TX4939_PCFG_DMASEL0 0x00000001
+#define TX4939_PCFG_DMASEL0_DRQ0 0x00000000
+
+/* CLKCTR : Clock Control */
+#define TX4939_CLKCTR_IOSCKD 0x8000000000000000ULL
+#define TX4939_CLKCTR_SYSCKD 0x4000000000000000ULL
+#define TX4939_CLKCTR_TM5CKD 0x2000000000000000ULL
+#define TX4939_CLKCTR_TM4CKD 0x1000000000000000ULL
+#define TX4939_CLKCTR_TM3CKD 0x0800000000000000ULL
+#define TX4939_CLKCTR_CIRCKD 0x0400000000000000ULL
+#define TX4939_CLKCTR_SIO3CKD 0x0200000000000000ULL
+#define TX4939_CLKCTR_SIO2CKD 0x0100000000000000ULL
+#define TX4939_CLKCTR_SIO1CKD 0x0080000000000000ULL
+#define TX4939_CLKCTR_VPCCKD 0x0040000000000000ULL
+#define TX4939_CLKCTR_EPCICKD 0x0020000000000000ULL
+#define TX4939_CLKCTR_ETH1CKD 0x0008000000000000ULL
+#define TX4939_CLKCTR_ATA1CKD 0x0004000000000000ULL
+#define TX4939_CLKCTR_BROMCKD 0x0002000000000000ULL
+#define TX4939_CLKCTR_NDCCKD 0x0001000000000000ULL
+#define TX4939_CLKCTR_I2CCKD 0x0000800000000000ULL
+#define TX4939_CLKCTR_ETH0CKD 0x0000400000000000ULL
+#define TX4939_CLKCTR_SPICKD 0x0000200000000000ULL
+#define TX4939_CLKCTR_SRAMCKD 0x0000100000000000ULL
+#define TX4939_CLKCTR_PCI1CKD 0x0000080000000000ULL
+#define TX4939_CLKCTR_DMA1CKD 0x0000040000000000ULL
+#define TX4939_CLKCTR_ACLCKD 0x0000020000000000ULL
+#define TX4939_CLKCTR_ATA0CKD 0x0000010000000000ULL
+#define TX4939_CLKCTR_DMA0CKD 0x0000008000000000ULL
+#define TX4939_CLKCTR_PCICCKD 0x0000004000000000ULL
+#define TX4939_CLKCTR_I2SCKD 0x0000002000000000ULL
+#define TX4939_CLKCTR_TM0CKD 0x0000001000000000ULL
+#define TX4939_CLKCTR_TM1CKD 0x0000000800000000ULL
+#define TX4939_CLKCTR_TM2CKD 0x0000000400000000ULL
+#define TX4939_CLKCTR_SIO0CKD 0x0000000200000000ULL
+#define TX4939_CLKCTR_CYPCKD 0x0000000100000000ULL
+#define TX4939_CLKCTR_IOSRST 0x80000000
+#define TX4939_CLKCTR_SYSRST 0x40000000
+#define TX4939_CLKCTR_TM5RST 0x20000000
+#define TX4939_CLKCTR_TM4RST 0x10000000
+#define TX4939_CLKCTR_TM3RST 0x08000000
+#define TX4939_CLKCTR_CIRRST 0x04000000
+#define TX4939_CLKCTR_SIO3RST 0x02000000
+#define TX4939_CLKCTR_SIO2RST 0x01000000
+#define TX4939_CLKCTR_SIO1RST 0x00800000
+#define TX4939_CLKCTR_VPCRST 0x00400000
+#define TX4939_CLKCTR_EPCIRST 0x00200000
+#define TX4939_CLKCTR_ETH1RST 0x00080000
+#define TX4939_CLKCTR_ATA1RST 0x00040000
+#define TX4939_CLKCTR_BROMRST 0x00020000
+#define TX4939_CLKCTR_NDCRST 0x00010000
+#define TX4939_CLKCTR_I2CRST 0x00008000
+#define TX4939_CLKCTR_ETH0RST 0x00004000
+#define TX4939_CLKCTR_SPIRST 0x00002000
+#define TX4939_CLKCTR_SRAMRST 0x00001000
+#define TX4939_CLKCTR_PCI1RST 0x00000800
+#define TX4939_CLKCTR_DMA1RST 0x00000400
+#define TX4939_CLKCTR_ACLRST 0x00000200
+#define TX4939_CLKCTR_ATA0RST 0x00000100
+#define TX4939_CLKCTR_DMA0RST 0x00000080
+#define TX4939_CLKCTR_PCICRST 0x00000040
+#define TX4939_CLKCTR_I2SRST 0x00000020
+#define TX4939_CLKCTR_TM0RST 0x00000010
+#define TX4939_CLKCTR_TM1RST 0x00000008
+#define TX4939_CLKCTR_TM2RST 0x00000004
+#define TX4939_CLKCTR_SIO0RST 0x00000002
+#define TX4939_CLKCTR_CYPRST 0x00000001
+
+/*
+ * CRYPTO
+ */
+#define TX4939_CRYPTO_CSR_SAESO 0x08000000
+#define TX4939_CRYPTO_CSR_SAESI 0x04000000
+#define TX4939_CRYPTO_CSR_SDESO 0x02000000
+#define TX4939_CRYPTO_CSR_SDESI 0x01000000
+#define TX4939_CRYPTO_CSR_INDXBST_MASK 0x00700000
+#define TX4939_CRYPTO_CSR_INDXBST(n) ((n) << 20)
+#define TX4939_CRYPTO_CSR_TOINT 0x00080000
+#define TX4939_CRYPTO_CSR_DCINT 0x00040000
+#define TX4939_CRYPTO_CSR_GBINT 0x00010000
+#define TX4939_CRYPTO_CSR_INDXAST_MASK 0x0000e000
+#define TX4939_CRYPTO_CSR_INDXAST(n) ((n) << 13)
+#define TX4939_CRYPTO_CSR_CSWAP_MASK 0x00001800
+#define TX4939_CRYPTO_CSR_CSWAP_NONE 0x00000000
+#define TX4939_CRYPTO_CSR_CSWAP_IN 0x00000800
+#define TX4939_CRYPTO_CSR_CSWAP_OUT 0x00001000
+#define TX4939_CRYPTO_CSR_CSWAP_BOTH 0x00001800
+#define TX4939_CRYPTO_CSR_CDIV_MASK 0x00000600
+#define TX4939_CRYPTO_CSR_CDIV_DIV2 0x00000000
+#define TX4939_CRYPTO_CSR_CDIV_DIV1 0x00000200
+#define TX4939_CRYPTO_CSR_CDIV_DIV2ALT 0x00000400
+#define TX4939_CRYPTO_CSR_CDIV_DIV1ALT 0x00000600
+#define TX4939_CRYPTO_CSR_PDINT_MASK 0x000000c0
+#define TX4939_CRYPTO_CSR_PDINT_ALL 0x00000000
+#define TX4939_CRYPTO_CSR_PDINT_END 0x00000040
+#define TX4939_CRYPTO_CSR_PDINT_NEXT 0x00000080
+#define TX4939_CRYPTO_CSR_PDINT_NONE 0x000000c0
+#define TX4939_CRYPTO_CSR_GINTE 0x00000008
+#define TX4939_CRYPTO_CSR_RSTD 0x00000004
+#define TX4939_CRYPTO_CSR_RSTC 0x00000002
+#define TX4939_CRYPTO_CSR_ENCR 0x00000001
+
+/* bits for tx4939_crypto_reg.cdr.gen.ctrl */
+#define TX4939_CRYPTO_CTX_ENGINE_MASK 0x00000003
+#define TX4939_CRYPTO_CTX_ENGINE_DES 0x00000000
+#define TX4939_CRYPTO_CTX_ENGINE_AES 0x00000001
+#define TX4939_CRYPTO_CTX_ENGINE_MD5 0x00000002
+#define TX4939_CRYPTO_CTX_ENGINE_SHA1 0x00000003
+#define TX4939_CRYPTO_CTX_TDMS 0x00000010
+#define TX4939_CRYPTO_CTX_CMS 0x00000020
+#define TX4939_CRYPTO_CTX_DMS 0x00000040
+#define TX4939_CRYPTO_CTX_UPDATE 0x00000080
+
+/* bits for tx4939_crypto_desc.ctrl */
+#define TX4939_CRYPTO_DESC_OB_CNT_MASK 0xffe00000
+#define TX4939_CRYPTO_DESC_OB_CNT(cnt) ((cnt) << 21)
+#define TX4939_CRYPTO_DESC_IB_CNT_MASK 0x001ffc00
+#define TX4939_CRYPTO_DESC_IB_CNT(cnt) ((cnt) << 10)
+#define TX4939_CRYPTO_DESC_START 0x00000200
+#define TX4939_CRYPTO_DESC_END 0x00000100
+#define TX4939_CRYPTO_DESC_XOR 0x00000010
+#define TX4939_CRYPTO_DESC_LAST 0x00000008
+#define TX4939_CRYPTO_DESC_ERR_MASK 0x00000006
+#define TX4939_CRYPTO_DESC_ERR_NONE 0x00000000
+#define TX4939_CRYPTO_DESC_ERR_TOUT 0x00000002
+#define TX4939_CRYPTO_DESC_ERR_DIGEST 0x00000004
+#define TX4939_CRYPTO_DESC_OWN 0x00000001
+
+/* bits for tx4939_crypto_desc.index */
+#define TX4939_CRYPTO_DESC_HASH_IDX_MASK 0x00000070
+#define TX4939_CRYPTO_DESC_HASH_IDX(idx) ((idx) << 4)
+#define TX4939_CRYPTO_DESC_ENCRYPT_IDX_MASK 0x00000007
+#define TX4939_CRYPTO_DESC_ENCRYPT_IDX(idx) ((idx) << 0)
+
+#define TX4939_CRYPTO_NR_SET 6
+
+#define TX4939_CRYPTO_RCSR_INTE 0x00000008
+#define TX4939_CRYPTO_RCSR_RST 0x00000004
+#define TX4939_CRYPTO_RCSR_FIN 0x00000002
+#define TX4939_CRYPTO_RCSR_ST 0x00000001
+
+/*
+ * VPC
+ */
+#define TX4939_VPC_CSR_GBINT 0x00010000
+#define TX4939_VPC_CSR_SWAPO 0x00000020
+#define TX4939_VPC_CSR_SWAPI 0x00000010
+#define TX4939_VPC_CSR_GINTE 0x00000008
+#define TX4939_VPC_CSR_RSTD 0x00000004
+#define TX4939_VPC_CSR_RSTVPC 0x00000002
+
+#define TX4939_VPC_CTRLA_VDPSN 0x00000200
+#define TX4939_VPC_CTRLA_PBUSY 0x00000100
+#define TX4939_VPC_CTRLA_DCINT 0x00000080
+#define TX4939_VPC_CTRLA_UOINT 0x00000040
+#define TX4939_VPC_CTRLA_PDINT_MASK 0x00000030
+#define TX4939_VPC_CTRLA_PDINT_ALL 0x00000000
+#define TX4939_VPC_CTRLA_PDINT_NEXT 0x00000010
+#define TX4939_VPC_CTRLA_PDINT_NONE 0x00000030
+#define TX4939_VPC_CTRLA_VDVLDP 0x00000008
+#define TX4939_VPC_CTRLA_VDMODE 0x00000004
+#define TX4939_VPC_CTRLA_VDFOR 0x00000002
+#define TX4939_VPC_CTRLA_ENVPC 0x00000001
+
+/* bits for tx4939_vpc_desc.ctrl1 */
+#define TX4939_VPC_DESC_CTRL1_ERR_MASK 0x00000006
+#define TX4939_VPC_DESC_CTRL1_OWN 0x00000001
+
+#define tx4939_ddrcptr ((struct tx4939_ddrc_reg __iomem *)TX4939_DDRC_REG)
+#define tx4939_ebuscptr tx4938_ebuscptr
+#define tx4939_ircptr \
+ ((struct tx4939_irc_reg __iomem *)TX4939_IRC_REG)
+#define tx4939_pcicptr tx4938_pcicptr
+#define tx4939_pcic1ptr tx4938_pcic1ptr
+#define tx4939_ccfgptr \
+ ((struct tx4939_ccfg_reg __iomem *)TX4939_CCFG_REG)
+#define tx4939_sramcptr tx4938_sramcptr
+#define tx4939_cryptoptr \
+ ((struct tx4939_crypto_reg __iomem *)TX4939_CRYPTO_REG)
+#define tx4939_vpcptr ((struct tx4939_vpc_reg __iomem *)TX4939_VPC_REG)
+
+#define TX4939_REV_MAJ_MIN() \
+ ((__u32)__raw_readq(&tx4939_ccfgptr->crir) & 0x00ff)
+#define TX4939_REV_PCODE() \
+ ((__u32)__raw_readq(&tx4939_ccfgptr->crir) >> 16)
+#define TX4939_CCFG_BCFG() \
+ ((__u32)((__raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_BCFG_MASK) \
+ >> 32))
+
+#define tx4939_ccfg_clear(bits) tx4938_ccfg_clear(bits)
+#define tx4939_ccfg_set(bits) tx4938_ccfg_set(bits)
+#define tx4939_ccfg_change(change, new) tx4938_ccfg_change(change, new)
+
+#define TX4939_EBUSC_CR(ch) TX4927_EBUSC_CR(ch)
+#define TX4939_EBUSC_BA(ch) TX4927_EBUSC_BA(ch)
+#define TX4939_EBUSC_SIZE(ch) TX4927_EBUSC_SIZE(ch)
+#define TX4939_EBUSC_WIDTH(ch) \
+ (16 >> ((__u32)(TX4939_EBUSC_CR(ch) >> 20) & 0x1))
+
+/* SCLK0 = MSTCLK * 429/19 * 16/245 / 2 (14.745MHz for MST 20MHz) */
+#define TX4939_SCLK0(mst) \
+ ((((mst) + 245/2) / 245UL * 429 * 16 + 19) / 19 / 2)
+
+void tx4939_wdt_init(void);
+void tx4939_setup(void);
+void tx4939_time_init(unsigned int tmrnr);
+void tx4939_sio_init(unsigned int sclk, unsigned int cts_mask);
+void tx4939_spi_init(int busid);
+void tx4939_ethaddr_init(unsigned char *addr0, unsigned char *addr1);
+int tx4939_report_pciclk(void);
+void tx4939_report_pci1clk(void);
+struct pci_dev;
+int tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot);
+int tx4939_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
+void tx4939_setup_pcierr_irq(void);
+void tx4939_irq_init(void);
+int tx4939_irq(void);
+void tx4939_mtd_init(int ch);
+void tx4939_ata_init(void);
+void tx4939_rtc_init(void);
+void tx4939_ndfmc_init(unsigned int hold, unsigned int spw,
+ unsigned char ch_mask, unsigned char wide_mask);
+void tx4939_dmac_init(int memcpy_chan0, int memcpy_chan1);
+void tx4939_aclc_init(void);
+void tx4939_sramc_init(void);
+void tx4939_rng_init(void);
+
+#endif /* __ASM_TXX9_TX4939_H */
diff --git a/arch/mips/include/asm/txx9irq.h b/arch/mips/include/asm/txx9irq.h
new file mode 100644
index 000000000..68a6650a4
--- /dev/null
+++ b/arch/mips/include/asm/txx9irq.h
@@ -0,0 +1,34 @@
+/*
+ * include/asm-mips/txx9irq.h
+ * TX39/TX49 interrupt controller definitions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_TXX9IRQ_H
+#define __ASM_TXX9IRQ_H
+
+#include <irq.h>
+
+#ifdef CONFIG_IRQ_MIPS_CPU
+#define TXX9_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8)
+#else
+#ifdef CONFIG_I8259
+#define TXX9_IRQ_BASE (I8259A_IRQ_BASE + 16)
+#else
+#define TXX9_IRQ_BASE 0
+#endif
+#endif
+
+#ifdef CONFIG_CPU_TX39XX
+#define TXx9_MAX_IR 16
+#else
+#define TXx9_MAX_IR 32
+#endif
+
+void txx9_irq_init(unsigned long baseaddr);
+int txx9_irq(void);
+int txx9_irq_set_pri(int irc_irq, int new_pri);
+
+#endif /* __ASM_TXX9IRQ_H */
diff --git a/arch/mips/include/asm/txx9pio.h b/arch/mips/include/asm/txx9pio.h
new file mode 100644
index 000000000..3d6fa9f8d
--- /dev/null
+++ b/arch/mips/include/asm/txx9pio.h
@@ -0,0 +1,29 @@
+/*
+ * include/asm-mips/txx9pio.h
+ * TX39/TX49 PIO controller definitions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_TXX9PIO_H
+#define __ASM_TXX9PIO_H
+
+#include <linux/types.h>
+
+struct txx9_pio_reg {
+ __u32 dout;
+ __u32 din;
+ __u32 dir;
+ __u32 od;
+ __u32 flag[2];
+ __u32 pol;
+ __u32 intc;
+ __u32 maskcpu;
+ __u32 maskext;
+};
+
+int txx9_gpio_init(unsigned long baseaddr,
+ unsigned int base, unsigned int num);
+
+#endif /* __ASM_TXX9PIO_H */
diff --git a/arch/mips/include/asm/txx9tmr.h b/arch/mips/include/asm/txx9tmr.h
new file mode 100644
index 000000000..466a3def3
--- /dev/null
+++ b/arch/mips/include/asm/txx9tmr.h
@@ -0,0 +1,67 @@
+/*
+ * include/asm-mips/txx9tmr.h
+ * TX39/TX49 timer controller definitions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_TXX9TMR_H
+#define __ASM_TXX9TMR_H
+
+#include <linux/types.h>
+
+struct txx9_tmr_reg {
+ u32 tcr;
+ u32 tisr;
+ u32 cpra;
+ u32 cprb;
+ u32 itmr;
+ u32 unused0[3];
+ u32 ccdr;
+ u32 unused1[3];
+ u32 pgmr;
+ u32 unused2[3];
+ u32 wtmr;
+ u32 unused3[43];
+ u32 trr;
+};
+
+/* TMTCR : Timer Control */
+#define TXx9_TMTCR_TCE 0x00000080
+#define TXx9_TMTCR_CCDE 0x00000040
+#define TXx9_TMTCR_CRE 0x00000020
+#define TXx9_TMTCR_ECES 0x00000008
+#define TXx9_TMTCR_CCS 0x00000004
+#define TXx9_TMTCR_TMODE_MASK 0x00000003
+#define TXx9_TMTCR_TMODE_ITVL 0x00000000
+#define TXx9_TMTCR_TMODE_PGEN 0x00000001
+#define TXx9_TMTCR_TMODE_WDOG 0x00000002
+
+/* TMTISR : Timer Int. Status */
+#define TXx9_TMTISR_TPIBS 0x00000004
+#define TXx9_TMTISR_TPIAS 0x00000002
+#define TXx9_TMTISR_TIIS 0x00000001
+
+/* TMITMR : Interval Timer Mode */
+#define TXx9_TMITMR_TIIE 0x00008000
+#define TXx9_TMITMR_TZCE 0x00000001
+
+/* TMWTMR : Watchdog Timer Mode */
+#define TXx9_TMWTMR_TWIE 0x00008000
+#define TXx9_TMWTMR_WDIS 0x00000080
+#define TXx9_TMWTMR_TWC 0x00000001
+
+void txx9_clocksource_init(unsigned long baseaddr,
+ unsigned int imbusclk);
+void txx9_clockevent_init(unsigned long baseaddr, int irq,
+ unsigned int imbusclk);
+void txx9_tmr_init(unsigned long baseaddr);
+
+#ifdef CONFIG_CPU_TX39XX
+#define TXX9_TIMER_BITS 24
+#else
+#define TXX9_TIMER_BITS 32
+#endif
+
+#endif /* __ASM_TXX9TMR_H */
diff --git a/arch/mips/include/asm/types.h b/arch/mips/include/asm/types.h
new file mode 100644
index 000000000..148d42a17
--- /dev/null
+++ b/arch/mips/include/asm/types.h
@@ -0,0 +1,17 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle
+ * Copyright (C) 2008 Wind River Systems,
+ * written by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_TYPES_H
+#define _ASM_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+#include <uapi/asm/types.h>
+
+#endif /* _ASM_TYPES_H */
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
new file mode 100644
index 000000000..61fc01f17
--- /dev/null
+++ b/arch/mips/include/asm/uaccess.h
@@ -0,0 +1,791 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2007 Maciej W. Rozycki
+ * Copyright (C) 2014, Imagination Technologies Ltd.
+ */
+#ifndef _ASM_UACCESS_H
+#define _ASM_UACCESS_H
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/asm-eva.h>
+#include <asm/extable.h>
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ */
+#ifdef CONFIG_32BIT
+
+#ifdef CONFIG_KVM_GUEST
+#define __UA_LIMIT 0x40000000UL
+#else
+#define __UA_LIMIT 0x80000000UL
+#endif
+
+#define __UA_ADDR ".word"
+#define __UA_LA "la"
+#define __UA_ADDU "addu"
+#define __UA_t0 "$8"
+#define __UA_t1 "$9"
+
+#endif /* CONFIG_32BIT */
+
+#ifdef CONFIG_64BIT
+
+extern u64 __ua_limit;
+
+#define __UA_LIMIT __ua_limit
+
+#define __UA_ADDR ".dword"
+#define __UA_LA "dla"
+#define __UA_ADDU "daddu"
+#define __UA_t0 "$12"
+#define __UA_t1 "$13"
+
+#endif /* CONFIG_64BIT */
+
+/*
+ * USER_DS is a bitmask that has the bits set that may not be set in a valid
+ * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
+ * the arithmetic we're doing only works if the limit is a power of two, so
+ * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
+ * address in this range it's the process's problem, not ours :-)
+ */
+
+#ifdef CONFIG_KVM_GUEST
+#define KERNEL_DS ((mm_segment_t) { 0x80000000UL })
+#define USER_DS ((mm_segment_t) { 0xC0000000UL })
+#else
+#define KERNEL_DS ((mm_segment_t) { 0UL })
+#define USER_DS ((mm_segment_t) { __UA_LIMIT })
+#endif
+
+#define get_fs() (current_thread_info()->addr_limit)
+#define set_fs(x) (current_thread_info()->addr_limit = (x))
+
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
+
+/*
+ * eva_kernel_access() - determine whether kernel memory access on an EVA system
+ *
+ * Determines whether memory accesses should be performed to kernel memory
+ * on a system using Extended Virtual Addressing (EVA).
+ *
+ * Return: true if a kernel memory access on an EVA system, else false.
+ */
+static inline bool eva_kernel_access(void)
+{
+ if (!IS_ENABLED(CONFIG_EVA))
+ return false;
+
+ return uaccess_kernel();
+}
+
+/*
+ * Is a address valid? This does a straightforward calculation rather
+ * than tests.
+ *
+ * Address valid if:
+ * - "addr" doesn't have any high-bits set
+ * - AND "size" doesn't have any high-bits set
+ * - AND "addr+size" doesn't have any high-bits set
+ * - OR we are in kernel mode.
+ *
+ * __ua_size() is a trick to avoid runtime checking of positive constant
+ * sizes; for those we already know at compile time that the size is ok.
+ */
+#define __ua_size(size) \
+ ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
+
+/*
+ * access_ok: - Checks if a user space pointer is valid
+ * @addr: User space pointer to start of block to check
+ * @size: Size of block to check
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * Checks if a pointer to a block of memory in user space is valid.
+ *
+ * Returns true (nonzero) if the memory block may be valid, false (zero)
+ * if it is definitely invalid.
+ *
+ * Note that, depending on architecture, this function probably just
+ * checks that the pointer is in the user space range - after calling
+ * this function, memory access functions may still return -EFAULT.
+ */
+
+static inline int __access_ok(const void __user *p, unsigned long size)
+{
+ unsigned long addr = (unsigned long)p;
+ return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
+}
+
+#define access_ok(addr, size) \
+ likely(__access_ok((addr), (size)))
+
+/*
+ * put_user: - Write a simple value into user space.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define put_user(x,ptr) \
+ __put_user_check((x), (ptr), sizeof(*(ptr)))
+
+/*
+ * get_user: - Get a simple variable from user space.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define get_user(x,ptr) \
+ __get_user_check((x), (ptr), sizeof(*(ptr)))
+
+/*
+ * __put_user: - Write a simple value into user space, with less checking.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define __put_user(x,ptr) \
+ __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+/*
+ * __get_user: - Get a simple variable from user space, with less checking.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define __get_user(x,ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+struct __large_struct { unsigned long buf[100]; };
+#define __m(x) (*(struct __large_struct __user *)(x))
+
+/*
+ * Yuck. We need two variants, one for 64bit operation and one
+ * for 32 bit mode and old iron.
+ */
+#ifndef CONFIG_EVA
+#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
+#else
+/*
+ * Kernel specific functions for EVA. We need to use normal load instructions
+ * to read data from kernel when operating in EVA mode. We use these macros to
+ * avoid redefining __get_user_asm for EVA.
+ */
+#undef _loadd
+#undef _loadw
+#undef _loadh
+#undef _loadb
+#ifdef CONFIG_32BIT
+#define _loadd _loadw
+#else
+#define _loadd(reg, addr) "ld " reg ", " addr
+#endif
+#define _loadw(reg, addr) "lw " reg ", " addr
+#define _loadh(reg, addr) "lh " reg ", " addr
+#define _loadb(reg, addr) "lb " reg ", " addr
+
+#define __get_kernel_common(val, size, ptr) \
+do { \
+ switch (size) { \
+ case 1: __get_data_asm(val, _loadb, ptr); break; \
+ case 2: __get_data_asm(val, _loadh, ptr); break; \
+ case 4: __get_data_asm(val, _loadw, ptr); break; \
+ case 8: __GET_DW(val, _loadd, ptr); break; \
+ default: __get_user_unknown(); break; \
+ } \
+} while (0)
+#endif
+
+#ifdef CONFIG_32BIT
+#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
+#endif
+#ifdef CONFIG_64BIT
+#define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
+#endif
+
+extern void __get_user_unknown(void);
+
+#define __get_user_common(val, size, ptr) \
+do { \
+ switch (size) { \
+ case 1: __get_data_asm(val, user_lb, ptr); break; \
+ case 2: __get_data_asm(val, user_lh, ptr); break; \
+ case 4: __get_data_asm(val, user_lw, ptr); break; \
+ case 8: __GET_DW(val, user_ld, ptr); break; \
+ default: __get_user_unknown(); break; \
+ } \
+} while (0)
+
+#define __get_user_nocheck(x, ptr, size) \
+({ \
+ int __gu_err; \
+ \
+ if (eva_kernel_access()) { \
+ __get_kernel_common((x), size, ptr); \
+ } else { \
+ __chk_user_ptr(ptr); \
+ __get_user_common((x), size, ptr); \
+ } \
+ __gu_err; \
+})
+
+#define __get_user_check(x, ptr, size) \
+({ \
+ int __gu_err = -EFAULT; \
+ const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
+ \
+ might_fault(); \
+ if (likely(access_ok( __gu_ptr, size))) { \
+ if (eva_kernel_access()) \
+ __get_kernel_common((x), size, __gu_ptr); \
+ else \
+ __get_user_common((x), size, __gu_ptr); \
+ } else \
+ (x) = 0; \
+ \
+ __gu_err; \
+})
+
+#define __get_data_asm(val, insn, addr) \
+{ \
+ long __gu_tmp; \
+ \
+ __asm__ __volatile__( \
+ "1: "insn("%1", "%3")" \n" \
+ "2: \n" \
+ " .insn \n" \
+ " .section .fixup,\"ax\" \n" \
+ "3: li %0, %4 \n" \
+ " move %1, $0 \n" \
+ " j 2b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "__UA_ADDR "\t1b, 3b \n" \
+ " .previous \n" \
+ : "=r" (__gu_err), "=r" (__gu_tmp) \
+ : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
+ \
+ (val) = (__typeof__(*(addr))) __gu_tmp; \
+}
+
+/*
+ * Get a long long 64 using 32 bit registers.
+ */
+#define __get_data_asm_ll32(val, insn, addr) \
+{ \
+ union { \
+ unsigned long long l; \
+ __typeof__(*(addr)) t; \
+ } __gu_tmp; \
+ \
+ __asm__ __volatile__( \
+ "1: " insn("%1", "(%3)")" \n" \
+ "2: " insn("%D1", "4(%3)")" \n" \
+ "3: \n" \
+ " .insn \n" \
+ " .section .fixup,\"ax\" \n" \
+ "4: li %0, %4 \n" \
+ " move %1, $0 \n" \
+ " move %D1, $0 \n" \
+ " j 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " " __UA_ADDR " 1b, 4b \n" \
+ " " __UA_ADDR " 2b, 4b \n" \
+ " .previous \n" \
+ : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
+ : "0" (0), "r" (addr), "i" (-EFAULT)); \
+ \
+ (val) = __gu_tmp.t; \
+}
+
+#ifndef CONFIG_EVA
+#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
+#else
+/*
+ * Kernel specific functions for EVA. We need to use normal load instructions
+ * to read data from kernel when operating in EVA mode. We use these macros to
+ * avoid redefining __get_data_asm for EVA.
+ */
+#undef _stored
+#undef _storew
+#undef _storeh
+#undef _storeb
+#ifdef CONFIG_32BIT
+#define _stored _storew
+#else
+#define _stored(reg, addr) "ld " reg ", " addr
+#endif
+
+#define _storew(reg, addr) "sw " reg ", " addr
+#define _storeh(reg, addr) "sh " reg ", " addr
+#define _storeb(reg, addr) "sb " reg ", " addr
+
+#define __put_kernel_common(ptr, size) \
+do { \
+ switch (size) { \
+ case 1: __put_data_asm(_storeb, ptr); break; \
+ case 2: __put_data_asm(_storeh, ptr); break; \
+ case 4: __put_data_asm(_storew, ptr); break; \
+ case 8: __PUT_DW(_stored, ptr); break; \
+ default: __put_user_unknown(); break; \
+ } \
+} while(0)
+#endif
+
+/*
+ * Yuck. We need two variants, one for 64bit operation and one
+ * for 32 bit mode and old iron.
+ */
+#ifdef CONFIG_32BIT
+#define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
+#endif
+#ifdef CONFIG_64BIT
+#define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
+#endif
+
+#define __put_user_common(ptr, size) \
+do { \
+ switch (size) { \
+ case 1: __put_data_asm(user_sb, ptr); break; \
+ case 2: __put_data_asm(user_sh, ptr); break; \
+ case 4: __put_data_asm(user_sw, ptr); break; \
+ case 8: __PUT_DW(user_sd, ptr); break; \
+ default: __put_user_unknown(); break; \
+ } \
+} while (0)
+
+#define __put_user_nocheck(x, ptr, size) \
+({ \
+ __typeof__(*(ptr)) __pu_val; \
+ int __pu_err = 0; \
+ \
+ __pu_val = (x); \
+ if (eva_kernel_access()) { \
+ __put_kernel_common(ptr, size); \
+ } else { \
+ __chk_user_ptr(ptr); \
+ __put_user_common(ptr, size); \
+ } \
+ __pu_err; \
+})
+
+#define __put_user_check(x, ptr, size) \
+({ \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ int __pu_err = -EFAULT; \
+ \
+ might_fault(); \
+ if (likely(access_ok( __pu_addr, size))) { \
+ if (eva_kernel_access()) \
+ __put_kernel_common(__pu_addr, size); \
+ else \
+ __put_user_common(__pu_addr, size); \
+ } \
+ \
+ __pu_err; \
+})
+
+#define __put_data_asm(insn, ptr) \
+{ \
+ __asm__ __volatile__( \
+ "1: "insn("%z2", "%3")" # __put_data_asm \n" \
+ "2: \n" \
+ " .insn \n" \
+ " .section .fixup,\"ax\" \n" \
+ "3: li %0, %4 \n" \
+ " j 2b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " " __UA_ADDR " 1b, 3b \n" \
+ " .previous \n" \
+ : "=r" (__pu_err) \
+ : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
+ "i" (-EFAULT)); \
+}
+
+#define __put_data_asm_ll32(insn, ptr) \
+{ \
+ __asm__ __volatile__( \
+ "1: "insn("%2", "(%3)")" # __put_data_asm_ll32 \n" \
+ "2: "insn("%D2", "4(%3)")" \n" \
+ "3: \n" \
+ " .insn \n" \
+ " .section .fixup,\"ax\" \n" \
+ "4: li %0, %4 \n" \
+ " j 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " " __UA_ADDR " 1b, 4b \n" \
+ " " __UA_ADDR " 2b, 4b \n" \
+ " .previous" \
+ : "=r" (__pu_err) \
+ : "0" (0), "r" (__pu_val), "r" (ptr), \
+ "i" (-EFAULT)); \
+}
+
+extern void __put_user_unknown(void);
+
+/*
+ * We're generating jump to subroutines which will be outside the range of
+ * jump instructions
+ */
+#ifdef MODULE
+#define __MODULE_JAL(destination) \
+ ".set\tnoat\n\t" \
+ __UA_LA "\t$1, " #destination "\n\t" \
+ "jalr\t$1\n\t" \
+ ".set\tat\n\t"
+#else
+#define __MODULE_JAL(destination) \
+ "jal\t" #destination "\n\t"
+#endif
+
+#if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \
+ defined(CONFIG_CPU_HAS_PREFETCH))
+#define DADDI_SCRATCH "$3"
+#else
+#define DADDI_SCRATCH "$0"
+#endif
+
+extern size_t __copy_user(void *__to, const void *__from, size_t __n);
+
+#define __invoke_copy_from(func, to, from, n) \
+({ \
+ register void *__cu_to_r __asm__("$4"); \
+ register const void __user *__cu_from_r __asm__("$5"); \
+ register long __cu_len_r __asm__("$6"); \
+ \
+ __cu_to_r = (to); \
+ __cu_from_r = (from); \
+ __cu_len_r = (n); \
+ __asm__ __volatile__( \
+ ".set\tnoreorder\n\t" \
+ __MODULE_JAL(func) \
+ ".set\tnoat\n\t" \
+ __UA_ADDU "\t$1, %1, %2\n\t" \
+ ".set\tat\n\t" \
+ ".set\treorder" \
+ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
+ : \
+ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
+ DADDI_SCRATCH, "memory"); \
+ __cu_len_r; \
+})
+
+#define __invoke_copy_to(func, to, from, n) \
+({ \
+ register void __user *__cu_to_r __asm__("$4"); \
+ register const void *__cu_from_r __asm__("$5"); \
+ register long __cu_len_r __asm__("$6"); \
+ \
+ __cu_to_r = (to); \
+ __cu_from_r = (from); \
+ __cu_len_r = (n); \
+ __asm__ __volatile__( \
+ __MODULE_JAL(func) \
+ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
+ : \
+ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
+ DADDI_SCRATCH, "memory"); \
+ __cu_len_r; \
+})
+
+#define __invoke_copy_from_kernel(to, from, n) \
+ __invoke_copy_from(__copy_user, to, from, n)
+
+#define __invoke_copy_to_kernel(to, from, n) \
+ __invoke_copy_to(__copy_user, to, from, n)
+
+#define ___invoke_copy_in_kernel(to, from, n) \
+ __invoke_copy_from(__copy_user, to, from, n)
+
+#ifndef CONFIG_EVA
+#define __invoke_copy_from_user(to, from, n) \
+ __invoke_copy_from(__copy_user, to, from, n)
+
+#define __invoke_copy_to_user(to, from, n) \
+ __invoke_copy_to(__copy_user, to, from, n)
+
+#define ___invoke_copy_in_user(to, from, n) \
+ __invoke_copy_from(__copy_user, to, from, n)
+
+#else
+
+/* EVA specific functions */
+
+extern size_t __copy_from_user_eva(void *__to, const void *__from,
+ size_t __n);
+extern size_t __copy_to_user_eva(void *__to, const void *__from,
+ size_t __n);
+extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
+
+/*
+ * Source or destination address is in userland. We need to go through
+ * the TLB
+ */
+#define __invoke_copy_from_user(to, from, n) \
+ __invoke_copy_from(__copy_from_user_eva, to, from, n)
+
+#define __invoke_copy_to_user(to, from, n) \
+ __invoke_copy_to(__copy_to_user_eva, to, from, n)
+
+#define ___invoke_copy_in_user(to, from, n) \
+ __invoke_copy_from(__copy_in_user_eva, to, from, n)
+
+#endif /* CONFIG_EVA */
+
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (eva_kernel_access())
+ return __invoke_copy_to_kernel(to, from, n);
+ else
+ return __invoke_copy_to_user(to, from, n);
+}
+
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ if (eva_kernel_access())
+ return __invoke_copy_from_kernel(to, from, n);
+ else
+ return __invoke_copy_from_user(to, from, n);
+}
+
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+
+static inline unsigned long
+raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
+{
+ if (eva_kernel_access())
+ return ___invoke_copy_in_kernel(to, from, n);
+ else
+ return ___invoke_copy_in_user(to, from, n);
+}
+
+extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
+extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
+
+/*
+ * __clear_user: - Zero a block of memory in user space, with less checking.
+ * @to: Destination address, in user space.
+ * @n: Number of bytes to zero.
+ *
+ * Zero a block of memory in user space. Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be cleared.
+ * On success, this will be zero.
+ */
+static inline __kernel_size_t
+__clear_user(void __user *addr, __kernel_size_t size)
+{
+ __kernel_size_t res;
+
+#ifdef CONFIG_CPU_MICROMIPS
+/* micromips memset / bzero also clobbers t7 & t8 */
+#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
+#else
+#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
+#endif /* CONFIG_CPU_MICROMIPS */
+
+ if (eva_kernel_access()) {
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, $0\n\t"
+ "move\t$6, %2\n\t"
+ __MODULE_JAL(__bzero_kernel)
+ "move\t%0, $6"
+ : "=r" (res)
+ : "r" (addr), "r" (size)
+ : bzero_clobbers);
+ } else {
+ might_fault();
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, $0\n\t"
+ "move\t$6, %2\n\t"
+ __MODULE_JAL(__bzero)
+ "move\t%0, $6"
+ : "=r" (res)
+ : "r" (addr), "r" (size)
+ : bzero_clobbers);
+ }
+
+ return res;
+}
+
+#define clear_user(addr,n) \
+({ \
+ void __user * __cl_addr = (addr); \
+ unsigned long __cl_size = (n); \
+ if (__cl_size && access_ok(__cl_addr, __cl_size)) \
+ __cl_size = __clear_user(__cl_addr, __cl_size); \
+ __cl_size; \
+})
+
+extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
+extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
+
+/*
+ * strncpy_from_user: - Copy a NUL terminated string from userspace.
+ * @dst: Destination address, in kernel space. This buffer must be at
+ * least @count bytes long.
+ * @src: Source address, in user space.
+ * @count: Maximum number of bytes to copy, including the trailing NUL.
+ *
+ * Copies a NUL-terminated string from userspace to kernel space.
+ *
+ * On success, returns the length of the string (not including the trailing
+ * NUL).
+ *
+ * If access to userspace fails, returns -EFAULT (some data may have been
+ * copied).
+ *
+ * If @count is smaller than the length of the string, copies @count bytes
+ * and returns @count.
+ */
+static inline long
+strncpy_from_user(char *__to, const char __user *__from, long __len)
+{
+ long res;
+
+ if (eva_kernel_access()) {
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ "move\t$6, %3\n\t"
+ __MODULE_JAL(__strncpy_from_kernel_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (__to), "r" (__from), "r" (__len)
+ : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
+ } else {
+ might_fault();
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ "move\t$6, %3\n\t"
+ __MODULE_JAL(__strncpy_from_user_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (__to), "r" (__from), "r" (__len)
+ : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
+ }
+
+ return res;
+}
+
+extern long __strnlen_kernel_asm(const char __user *s, long n);
+extern long __strnlen_user_asm(const char __user *s, long n);
+
+/*
+ * strnlen_user: - Get the size of a string in user space.
+ * @str: The string to measure.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * Get the size of a NUL-terminated string in user space.
+ *
+ * Returns the size of the string INCLUDING the terminating NUL.
+ * On exception, returns 0.
+ * If the string is too long, returns a value greater than @n.
+ */
+static inline long strnlen_user(const char __user *s, long n)
+{
+ long res;
+
+ might_fault();
+ if (eva_kernel_access()) {
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ __MODULE_JAL(__strnlen_kernel_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (s), "r" (n)
+ : "$2", "$4", "$5", __UA_t0, "$31");
+ } else {
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ __MODULE_JAL(__strnlen_user_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (s), "r" (n)
+ : "$2", "$4", "$5", __UA_t0, "$31");
+ }
+
+ return res;
+}
+
+#endif /* _ASM_UACCESS_H */
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
new file mode 100644
index 000000000..f7effca79
--- /dev/null
+++ b/arch/mips/include/asm/uasm.h
@@ -0,0 +1,325 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005 Maciej W. Rozycki
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
+ */
+
+#ifndef __ASM_UASM_H
+#define __ASM_UASM_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_EXPORT_UASM
+#include <linux/export.h>
+#define UASM_EXPORT_SYMBOL(sym) EXPORT_SYMBOL(sym)
+#else
+#define UASM_EXPORT_SYMBOL(sym)
+#endif
+
+#define Ip_u1u2u3(op) \
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+
+#define Ip_u2u1u3(op) \
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+
+#define Ip_u3u2u1(op) \
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+
+#define Ip_u3u1u2(op) \
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+
+#define Ip_u1u2s3(op) \
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
+
+#define Ip_u2s3u1(op) \
+void uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
+
+#define Ip_s3s1s2(op) \
+void uasm_i##op(u32 **buf, int a, int b, int c)
+
+#define Ip_u2u1s3(op) \
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
+
+#define Ip_u2u1msbu3(op) \
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
+ unsigned int d)
+
+#define Ip_u1u2(op) \
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
+
+#define Ip_u2u1(op) \
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
+
+#define Ip_u1s2(op) \
+void uasm_i##op(u32 **buf, unsigned int a, signed int b)
+
+#define Ip_u1(op) void uasm_i##op(u32 **buf, unsigned int a)
+
+#define Ip_0(op) void uasm_i##op(u32 **buf)
+
+Ip_u2u1s3(_addiu);
+Ip_u3u1u2(_addu);
+Ip_u3u1u2(_and);
+Ip_u2u1u3(_andi);
+Ip_u1u2s3(_bbit0);
+Ip_u1u2s3(_bbit1);
+Ip_u1u2s3(_beq);
+Ip_u1u2s3(_beql);
+Ip_u1s2(_bgez);
+Ip_u1s2(_bgezl);
+Ip_u1s2(_bgtz);
+Ip_u1s2(_blez);
+Ip_u1s2(_bltz);
+Ip_u1s2(_bltzl);
+Ip_u1u2s3(_bne);
+Ip_u1(_break);
+Ip_u2s3u1(_cache);
+Ip_u1u2(_cfc1);
+Ip_u2u1(_cfcmsa);
+Ip_u1u2(_ctc1);
+Ip_u2u1(_ctcmsa);
+Ip_u2u1s3(_daddiu);
+Ip_u3u1u2(_daddu);
+Ip_u1u2(_ddivu);
+Ip_u3u1u2(_ddivu_r6);
+Ip_u1(_di);
+Ip_u2u1msbu3(_dins);
+Ip_u2u1msbu3(_dinsm);
+Ip_u2u1msbu3(_dinsu);
+Ip_u1u2(_divu);
+Ip_u3u1u2(_divu_r6);
+Ip_u1u2u3(_dmfc0);
+Ip_u3u1u2(_dmodu);
+Ip_u1u2u3(_dmtc0);
+Ip_u1u2(_dmultu);
+Ip_u3u1u2(_dmulu);
+Ip_u2u1u3(_drotr);
+Ip_u2u1u3(_drotr32);
+Ip_u2u1(_dsbh);
+Ip_u2u1(_dshd);
+Ip_u2u1u3(_dsll);
+Ip_u2u1u3(_dsll32);
+Ip_u3u2u1(_dsllv);
+Ip_u2u1u3(_dsra);
+Ip_u2u1u3(_dsra32);
+Ip_u3u2u1(_dsrav);
+Ip_u2u1u3(_dsrl);
+Ip_u2u1u3(_dsrl32);
+Ip_u3u2u1(_dsrlv);
+Ip_u3u1u2(_dsubu);
+Ip_0(_eret);
+Ip_u2u1msbu3(_ext);
+Ip_u2u1msbu3(_ins);
+Ip_u1(_j);
+Ip_u1(_jal);
+Ip_u2u1(_jalr);
+Ip_u1(_jr);
+Ip_u2s3u1(_lb);
+Ip_u2s3u1(_lbu);
+Ip_u2s3u1(_ld);
+Ip_u3u1u2(_ldx);
+Ip_u2s3u1(_lh);
+Ip_u2s3u1(_lhu);
+Ip_u2s3u1(_ll);
+Ip_u2s3u1(_lld);
+Ip_u1s2(_lui);
+Ip_u2s3u1(_lw);
+Ip_u2s3u1(_lwu);
+Ip_u3u1u2(_lwx);
+Ip_u1u2u3(_mfc0);
+Ip_u1u2u3(_mfhc0);
+Ip_u1(_mfhi);
+Ip_u1(_mflo);
+Ip_u3u1u2(_modu);
+Ip_u3u1u2(_movn);
+Ip_u3u1u2(_movz);
+Ip_u1u2u3(_mtc0);
+Ip_u1u2u3(_mthc0);
+Ip_u1(_mthi);
+Ip_u1(_mtlo);
+Ip_u3u1u2(_mul);
+Ip_u1u2(_multu);
+Ip_u3u1u2(_mulu);
+Ip_u3u1u2(_nor);
+Ip_u3u1u2(_or);
+Ip_u2u1u3(_ori);
+Ip_u2s3u1(_pref);
+Ip_0(_rfe);
+Ip_u2u1u3(_rotr);
+Ip_u2s3u1(_sb);
+Ip_u2s3u1(_sc);
+Ip_u2s3u1(_scd);
+Ip_u2s3u1(_sd);
+Ip_u3u1u2(_seleqz);
+Ip_u3u1u2(_selnez);
+Ip_u2s3u1(_sh);
+Ip_u2u1u3(_sll);
+Ip_u3u2u1(_sllv);
+Ip_s3s1s2(_slt);
+Ip_u2u1s3(_slti);
+Ip_u2u1s3(_sltiu);
+Ip_u3u1u2(_sltu);
+Ip_u2u1u3(_sra);
+Ip_u3u2u1(_srav);
+Ip_u2u1u3(_srl);
+Ip_u3u2u1(_srlv);
+Ip_u3u1u2(_subu);
+Ip_u2s3u1(_sw);
+Ip_u1(_sync);
+Ip_u1(_syscall);
+Ip_0(_tlbp);
+Ip_0(_tlbr);
+Ip_0(_tlbwi);
+Ip_0(_tlbwr);
+Ip_u1(_wait);
+Ip_u2u1(_wsbh);
+Ip_u3u1u2(_xor);
+Ip_u2u1u3(_xori);
+Ip_u2u1(_yield);
+Ip_u1u2(_ldpte);
+Ip_u2u1u3(_lddir);
+
+/* Handle labels. */
+struct uasm_label {
+ u32 *addr;
+ int lab;
+};
+
+void uasm_build_label(struct uasm_label **lab, u32 *addr,
+ int lid);
+#ifdef CONFIG_64BIT
+int uasm_in_compat_space_p(long addr);
+#endif
+int uasm_rel_hi(long val);
+int uasm_rel_lo(long val);
+void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
+void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
+
+#define UASM_L_LA(lb) \
+static inline void uasm_l##lb(struct uasm_label **lab, u32 *addr) \
+{ \
+ uasm_build_label(lab, addr, label##lb); \
+}
+
+/* convenience macros for instructions */
+#ifdef CONFIG_64BIT
+# define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_daddiu(buf, rs, rt, val)
+# define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_daddu(buf, rs, rt, rd)
+# define UASM_i_LL(buf, rs, rt, off) uasm_i_lld(buf, rs, rt, off)
+# define UASM_i_LW(buf, rs, rt, off) uasm_i_ld(buf, rs, rt, off)
+# define UASM_i_LWX(buf, rs, rt, rd) uasm_i_ldx(buf, rs, rt, rd)
+# define UASM_i_MFC0(buf, rt, rd...) uasm_i_dmfc0(buf, rt, rd)
+# define UASM_i_MTC0(buf, rt, rd...) uasm_i_dmtc0(buf, rt, rd)
+# define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_drotr(buf, rs, rt, sh)
+# define UASM_i_SC(buf, rs, rt, off) uasm_i_scd(buf, rs, rt, off)
+# define UASM_i_SLL(buf, rs, rt, sh) uasm_i_dsll(buf, rs, rt, sh)
+# define UASM_i_SRA(buf, rs, rt, sh) uasm_i_dsra(buf, rs, rt, sh)
+# define UASM_i_SRL(buf, rs, rt, sh) uasm_i_dsrl(buf, rs, rt, sh)
+# define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_dsrl_safe(buf, rs, rt, sh)
+# define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_dsubu(buf, rs, rt, rd)
+# define UASM_i_SW(buf, rs, rt, off) uasm_i_sd(buf, rs, rt, off)
+#else
+# define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_addiu(buf, rs, rt, val)
+# define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_addu(buf, rs, rt, rd)
+# define UASM_i_LL(buf, rs, rt, off) uasm_i_ll(buf, rs, rt, off)
+# define UASM_i_LW(buf, rs, rt, off) uasm_i_lw(buf, rs, rt, off)
+# define UASM_i_LWX(buf, rs, rt, rd) uasm_i_lwx(buf, rs, rt, rd)
+# define UASM_i_MFC0(buf, rt, rd...) uasm_i_mfc0(buf, rt, rd)
+# define UASM_i_MTC0(buf, rt, rd...) uasm_i_mtc0(buf, rt, rd)
+# define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_rotr(buf, rs, rt, sh)
+# define UASM_i_SC(buf, rs, rt, off) uasm_i_sc(buf, rs, rt, off)
+# define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh)
+# define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh)
+# define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
+# define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
+# define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_subu(buf, rs, rt, rd)
+# define UASM_i_SW(buf, rs, rt, off) uasm_i_sw(buf, rs, rt, off)
+#endif
+
+#define uasm_i_b(buf, off) uasm_i_beq(buf, 0, 0, off)
+#define uasm_i_beqz(buf, rs, off) uasm_i_beq(buf, rs, 0, off)
+#define uasm_i_beqzl(buf, rs, off) uasm_i_beql(buf, rs, 0, off)
+#define uasm_i_bnez(buf, rs, off) uasm_i_bne(buf, rs, 0, off)
+#define uasm_i_bnezl(buf, rs, off) uasm_i_bnel(buf, rs, 0, off)
+#define uasm_i_ehb(buf) uasm_i_sll(buf, 0, 0, 3)
+#define uasm_i_move(buf, a, b) UASM_i_ADDU(buf, a, 0, b)
+#define uasm_i_nop(buf) uasm_i_sll(buf, 0, 0, 0)
+#define uasm_i_ssnop(buf) uasm_i_sll(buf, 0, 0, 1)
+
+static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1,
+ unsigned int a2, unsigned int a3)
+{
+ if (a3 < 32)
+ uasm_i_drotr(p, a1, a2, a3);
+ else
+ uasm_i_drotr32(p, a1, a2, a3 - 32);
+}
+
+static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1,
+ unsigned int a2, unsigned int a3)
+{
+ if (a3 < 32)
+ uasm_i_dsll(p, a1, a2, a3);
+ else
+ uasm_i_dsll32(p, a1, a2, a3 - 32);
+}
+
+static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1,
+ unsigned int a2, unsigned int a3)
+{
+ if (a3 < 32)
+ uasm_i_dsrl(p, a1, a2, a3);
+ else
+ uasm_i_dsrl32(p, a1, a2, a3 - 32);
+}
+
+static inline void uasm_i_dsra_safe(u32 **p, unsigned int a1,
+ unsigned int a2, unsigned int a3)
+{
+ if (a3 < 32)
+ uasm_i_dsra(p, a1, a2, a3);
+ else
+ uasm_i_dsra32(p, a1, a2, a3 - 32);
+}
+
+/* Handle relocations. */
+struct uasm_reloc {
+ u32 *addr;
+ unsigned int type;
+ int lab;
+};
+
+/* This is zero so we can use zeroed label arrays. */
+#define UASM_LABEL_INVALID 0
+
+void uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid);
+void uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
+void uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off);
+void uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off);
+void uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab,
+ u32 *first, u32 *end, u32 *target);
+int uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr);
+
+/* Convenience functions for labeled branches. */
+void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid);
+void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ unsigned int bit, int lid);
+void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ unsigned int bit, int lid);
+void uasm_il_beq(u32 **p, struct uasm_reloc **r, unsigned int r1,
+ unsigned int r2, int lid);
+void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+ unsigned int reg2, int lid);
+void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+
+#endif /* __ASM_UASM_H */
diff --git a/arch/mips/include/asm/unaligned-emul.h b/arch/mips/include/asm/unaligned-emul.h
new file mode 100644
index 000000000..2022b1894
--- /dev/null
+++ b/arch/mips/include/asm/unaligned-emul.h
@@ -0,0 +1,779 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_MIPS_UNALIGNED_EMUL_H
+#define _ASM_MIPS_UNALIGNED_EMUL_H
+
+#include <asm/asm.h>
+
+#ifdef __BIG_ENDIAN
+#define _LoadHW(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ (".set\tnoat\n" \
+ "1:\t"type##_lb("%0", "0(%2)")"\n" \
+ "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\t.set\tat\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
+#define _LoadW(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ ( \
+ "1:\t"type##_lwl("%0", "(%2)")"\n" \
+ "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
+/* For CPUs without lwl instruction */
+#define _LoadW(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n" \
+ ".set\tnoat\n\t" \
+ "1:"type##_lb("%0", "0(%2)")"\n\t" \
+ "2:"type##_lbu("$1", "1(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:"type##_lbu("$1", "2(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:"type##_lbu("$1", "3(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tpop\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
+
+#define _LoadHWU(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\t"type##_lbu("%0", "0(%2)")"\n" \
+ "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".set\tat\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
+#define _LoadWU(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ ( \
+ "1:\t"type##_lwl("%0", "(%2)")"\n" \
+ "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
+ "dsll\t%0, %0, 32\n\t" \
+ "dsrl\t%0, %0, 32\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ "\t.section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#define _LoadDW(addr, value, res) \
+do { \
+ __asm__ __volatile__ ( \
+ "1:\tldl\t%0, (%2)\n" \
+ "2:\tldr\t%0, 7(%2)\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ "\t.section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
+/* For CPUs without lwl and ldl instructions */
+#define _LoadWU(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+ "1:"type##_lbu("%0", "0(%2)")"\n\t" \
+ "2:"type##_lbu("$1", "1(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:"type##_lbu("$1", "2(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:"type##_lbu("$1", "3(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tpop\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#define _LoadDW(addr, value, res) \
+do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+ "1:lb\t%0, 0(%2)\n\t" \
+ "2:lbu\t $1, 1(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:lbu\t$1, 2(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:lbu\t$1, 3(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "5:lbu\t$1, 4(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "6:lbu\t$1, 5(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "7:lbu\t$1, 6(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "8:lbu\t$1, 7(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tpop\n\t" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR)"\t5b, 11b\n\t" \
+ STR(PTR)"\t6b, 11b\n\t" \
+ STR(PTR)"\t7b, 11b\n\t" \
+ STR(PTR)"\t8b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
+
+
+#define _StoreHW(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\t"type##_sb("%1", "1(%2)")"\n" \
+ "srl\t$1, %1, 0x8\n" \
+ "2:\t"type##_sb("$1", "0(%2)")"\n" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT));\
+} while (0)
+
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
+#define _StoreW(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ ( \
+ "1:\t"type##_swl("%1", "(%2)")"\n" \
+ "2:\t"type##_swr("%1", "3(%2)")"\n\t"\
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#define _StoreDW(addr, value, res) \
+do { \
+ __asm__ __volatile__ ( \
+ "1:\tsdl\t%1,(%2)\n" \
+ "2:\tsdr\t%1, 7(%2)\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
+#define _StoreW(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+ "1:"type##_sb("%1", "3(%2)")"\n\t" \
+ "srl\t$1, %1, 0x8\n\t" \
+ "2:"type##_sb("$1", "2(%2)")"\n\t" \
+ "srl\t$1, $1, 0x8\n\t" \
+ "3:"type##_sb("$1", "1(%2)")"\n\t" \
+ "srl\t$1, $1, 0x8\n\t" \
+ "4:"type##_sb("$1", "0(%2)")"\n\t" \
+ ".set\tpop\n\t" \
+ "li\t%0, 0\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%0, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory"); \
+} while (0)
+
+#define _StoreDW(addr, value, res) \
+do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+ "1:sb\t%1, 7(%2)\n\t" \
+ "dsrl\t$1, %1, 0x8\n\t" \
+ "2:sb\t$1, 6(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n\t" \
+ "3:sb\t$1, 5(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n\t" \
+ "4:sb\t$1, 4(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n\t" \
+ "5:sb\t$1, 3(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n\t" \
+ "6:sb\t$1, 2(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n\t" \
+ "7:sb\t$1, 1(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n\t" \
+ "8:sb\t$1, 0(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n\t" \
+ ".set\tpop\n\t" \
+ "li\t%0, 0\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%0, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR)"\t5b, 11b\n\t" \
+ STR(PTR)"\t6b, 11b\n\t" \
+ STR(PTR)"\t7b, 11b\n\t" \
+ STR(PTR)"\t8b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory"); \
+} while (0)
+
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
+
+#else /* __BIG_ENDIAN */
+
+#define _LoadHW(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ (".set\tnoat\n" \
+ "1:\t"type##_lb("%0", "1(%2)")"\n" \
+ "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\t.set\tat\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
+#define _LoadW(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ ( \
+ "1:\t"type##_lwl("%0", "3(%2)")"\n" \
+ "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
+/* For CPUs without lwl instruction */
+#define _LoadW(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n" \
+ ".set\tnoat\n\t" \
+ "1:"type##_lb("%0", "3(%2)")"\n\t" \
+ "2:"type##_lbu("$1", "2(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:"type##_lbu("$1", "1(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:"type##_lbu("$1", "0(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tpop\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
+
+
+#define _LoadHWU(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\t"type##_lbu("%0", "1(%2)")"\n" \
+ "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".set\tat\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
+#define _LoadWU(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ ( \
+ "1:\t"type##_lwl("%0", "3(%2)")"\n" \
+ "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
+ "dsll\t%0, %0, 32\n\t" \
+ "dsrl\t%0, %0, 32\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ "\t.section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#define _LoadDW(addr, value, res) \
+do { \
+ __asm__ __volatile__ ( \
+ "1:\tldl\t%0, 7(%2)\n" \
+ "2:\tldr\t%0, (%2)\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ "\t.section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
+/* For CPUs without lwl and ldl instructions */
+#define _LoadWU(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+ "1:"type##_lbu("%0", "3(%2)")"\n\t" \
+ "2:"type##_lbu("$1", "2(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:"type##_lbu("$1", "1(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:"type##_lbu("$1", "0(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tpop\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#define _LoadDW(addr, value, res) \
+do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+ "1:lb\t%0, 7(%2)\n\t" \
+ "2:lbu\t$1, 6(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:lbu\t$1, 5(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:lbu\t$1, 4(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "5:lbu\t$1, 3(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "6:lbu\t$1, 2(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "7:lbu\t$1, 1(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "8:lbu\t$1, 0(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tpop\n\t" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR)"\t5b, 11b\n\t" \
+ STR(PTR)"\t6b, 11b\n\t" \
+ STR(PTR)"\t7b, 11b\n\t" \
+ STR(PTR)"\t8b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT)); \
+} while (0)
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
+
+#define _StoreHW(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\t"type##_sb("%1", "0(%2)")"\n" \
+ "srl\t$1,%1, 0x8\n" \
+ "2:\t"type##_sb("$1", "1(%2)")"\n" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT));\
+} while (0)
+
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
+#define _StoreW(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ ( \
+ "1:\t"type##_swl("%1", "3(%2)")"\n" \
+ "2:\t"type##_swr("%1", "(%2)")"\n\t"\
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#define _StoreDW(addr, value, res) \
+do { \
+ __asm__ __volatile__ ( \
+ "1:\tsdl\t%1, 7(%2)\n" \
+ "2:\tsdr\t%1, (%2)\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
+} while (0)
+
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
+/* For CPUs without swl and sdl instructions */
+#define _StoreW(addr, value, res, type) \
+do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+ "1:"type##_sb("%1", "0(%2)")"\n\t" \
+ "srl\t$1, %1, 0x8\n\t" \
+ "2:"type##_sb("$1", "1(%2)")"\n\t" \
+ "srl\t$1, $1, 0x8\n\t" \
+ "3:"type##_sb("$1", "2(%2)")"\n\t" \
+ "srl\t$1, $1, 0x8\n\t" \
+ "4:"type##_sb("$1", "3(%2)")"\n\t" \
+ ".set\tpop\n\t" \
+ "li\t%0, 0\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%0, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory"); \
+} while (0)
+
+#define _StoreDW(addr, value, res) \
+do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+ "1:sb\t%1, 0(%2)\n\t" \
+ "dsrl\t$1, %1, 0x8\n\t" \
+ "2:sb\t$1, 1(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n\t" \
+ "3:sb\t$1, 2(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n\t" \
+ "4:sb\t$1, 3(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n\t" \
+ "5:sb\t$1, 4(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n\t" \
+ "6:sb\t$1, 5(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n\t" \
+ "7:sb\t$1, 6(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n\t" \
+ "8:sb\t$1, 7(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n\t" \
+ ".set\tpop\n\t" \
+ "li\t%0, 0\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%0, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR)"\t5b, 11b\n\t" \
+ STR(PTR)"\t6b, 11b\n\t" \
+ STR(PTR)"\t7b, 11b\n\t" \
+ STR(PTR)"\t8b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory"); \
+} while (0)
+
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
+#endif
+
+#define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel)
+#define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user)
+#define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel)
+#define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user)
+#define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel)
+#define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user)
+#define LoadW(addr, value, res) _LoadW(addr, value, res, kernel)
+#define LoadWE(addr, value, res) _LoadW(addr, value, res, user)
+#define LoadDW(addr, value, res) _LoadDW(addr, value, res)
+
+#define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel)
+#define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user)
+#define StoreW(addr, value, res) _StoreW(addr, value, res, kernel)
+#define StoreWE(addr, value, res) _StoreW(addr, value, res, user)
+#define StoreDW(addr, value, res) _StoreDW(addr, value, res)
+
+#endif /* _ASM_MIPS_UNALIGNED_EMUL_H */
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
new file mode 100644
index 000000000..5d70babfc
--- /dev/null
+++ b/arch/mips/include/asm/unistd.h
@@ -0,0 +1,62 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 96, 97, 98, 99, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ *
+ * Changed system calls macros _syscall5 - _syscall7 to push args 5 to 7 onto
+ * the stack. Robin Farine for ACN S.A, Copyright (C) 1996 by ACN S.A
+ */
+#ifndef _ASM_UNISTD_H
+#define _ASM_UNISTD_H
+
+#include <uapi/asm/unistd.h>
+#include <asm/unistd_nr_n32.h>
+#include <asm/unistd_nr_n64.h>
+#include <asm/unistd_nr_o32.h>
+
+#ifdef CONFIG_MIPS32_N32
+#define NR_syscalls (__NR_N32_Linux + __NR_N32_Linux_syscalls)
+#elif defined(CONFIG_64BIT)
+#define NR_syscalls (__NR_64_Linux + __NR_64_Linux_syscalls)
+#else
+#define NR_syscalls (__NR_O32_Linux + __NR_O32_Linux_syscalls)
+#endif
+
+#ifndef __ASSEMBLY__
+
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_OLD_READDIR
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_IPC
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_UTIME32
+#define __ARCH_WANT_SYS_WAITPID
+#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_NICE
+#define __ARCH_WANT_SYS_OLD_UNAME
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_SIGPROCMASK
+# ifdef CONFIG_32BIT
+# define __ARCH_WANT_STAT64
+# define __ARCH_WANT_SYS_TIME32
+# endif
+# ifdef CONFIG_MIPS32_O32
+# define __ARCH_WANT_SYS_TIME32
+# endif
+#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
+
+/* whitelists for checksyscalls */
+#define __IGNORE_fadvise64_64
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_UNISTD_H */
diff --git a/arch/mips/include/asm/unroll.h b/arch/mips/include/asm/unroll.h
new file mode 100644
index 000000000..6f4ac854b
--- /dev/null
+++ b/arch/mips/include/asm/unroll.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_UNROLL_H__
+#define __ASM_UNROLL_H__
+
+/*
+ * Explicitly unroll a loop, for use in cases where doing so is performance
+ * critical.
+ *
+ * Ideally we'd rely upon the compiler to provide this but there's no commonly
+ * available means to do so. For example GCC's "#pragma GCC unroll"
+ * functionality would be ideal but is only available from GCC 8 onwards. Using
+ * -funroll-loops is an option but GCC tends to make poor choices when
+ * compiling our string functions. -funroll-all-loops leads to massive code
+ * bloat, even if only applied to the string functions.
+ */
+#define unroll(times, fn, ...) do { \
+ extern void bad_unroll(void) \
+ __compiletime_error("Unsupported unroll"); \
+ \
+ /* \
+ * We can't unroll if the number of iterations isn't \
+ * compile-time constant. Unfortunately clang versions \
+ * up until 8.0 tend to miss obvious constants & cause \
+ * this check to fail, even though they go on to \
+ * generate reasonable code for the switch statement, \
+ * so we skip the sanity check for those compilers. \
+ */ \
+ BUILD_BUG_ON(!__builtin_constant_p(times)); \
+ \
+ switch (times) { \
+ case 32: fn(__VA_ARGS__); fallthrough; \
+ case 31: fn(__VA_ARGS__); fallthrough; \
+ case 30: fn(__VA_ARGS__); fallthrough; \
+ case 29: fn(__VA_ARGS__); fallthrough; \
+ case 28: fn(__VA_ARGS__); fallthrough; \
+ case 27: fn(__VA_ARGS__); fallthrough; \
+ case 26: fn(__VA_ARGS__); fallthrough; \
+ case 25: fn(__VA_ARGS__); fallthrough; \
+ case 24: fn(__VA_ARGS__); fallthrough; \
+ case 23: fn(__VA_ARGS__); fallthrough; \
+ case 22: fn(__VA_ARGS__); fallthrough; \
+ case 21: fn(__VA_ARGS__); fallthrough; \
+ case 20: fn(__VA_ARGS__); fallthrough; \
+ case 19: fn(__VA_ARGS__); fallthrough; \
+ case 18: fn(__VA_ARGS__); fallthrough; \
+ case 17: fn(__VA_ARGS__); fallthrough; \
+ case 16: fn(__VA_ARGS__); fallthrough; \
+ case 15: fn(__VA_ARGS__); fallthrough; \
+ case 14: fn(__VA_ARGS__); fallthrough; \
+ case 13: fn(__VA_ARGS__); fallthrough; \
+ case 12: fn(__VA_ARGS__); fallthrough; \
+ case 11: fn(__VA_ARGS__); fallthrough; \
+ case 10: fn(__VA_ARGS__); fallthrough; \
+ case 9: fn(__VA_ARGS__); fallthrough; \
+ case 8: fn(__VA_ARGS__); fallthrough; \
+ case 7: fn(__VA_ARGS__); fallthrough; \
+ case 6: fn(__VA_ARGS__); fallthrough; \
+ case 5: fn(__VA_ARGS__); fallthrough; \
+ case 4: fn(__VA_ARGS__); fallthrough; \
+ case 3: fn(__VA_ARGS__); fallthrough; \
+ case 2: fn(__VA_ARGS__); fallthrough; \
+ case 1: fn(__VA_ARGS__); fallthrough; \
+ case 0: break; \
+ \
+ default: \
+ /* \
+ * Either the iteration count is unreasonable \
+ * or we need to add more cases above. \
+ */ \
+ bad_unroll(); \
+ break; \
+ } \
+} while (0)
+
+#endif /* __ASM_UNROLL_H__ */
diff --git a/arch/mips/include/asm/uprobes.h b/arch/mips/include/asm/uprobes.h
new file mode 100644
index 000000000..b86d1ae07
--- /dev/null
+++ b/arch/mips/include/asm/uprobes.h
@@ -0,0 +1,45 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_UPROBES_H
+#define __ASM_UPROBES_H
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+#include <asm/break.h>
+#include <asm/inst.h>
+
+/*
+ * We want this to be defined as union mips_instruction but that makes the
+ * generic code blow up.
+ */
+typedef u32 uprobe_opcode_t;
+
+/*
+ * Classic MIPS (note this implementation doesn't consider microMIPS yet)
+ * instructions are always 4 bytes but in order to deal with branches and
+ * their delay slots, we treat instructions as having 8 bytes maximum.
+ */
+#define MAX_UINSN_BYTES 8
+#define UPROBE_XOL_SLOT_BYTES 128 /* Max. cache line size */
+
+#define UPROBE_BRK_UPROBE 0x000d000d /* break 13 */
+#define UPROBE_BRK_UPROBE_XOL 0x000e000d /* break 14 */
+
+#define UPROBE_SWBP_INSN UPROBE_BRK_UPROBE
+#define UPROBE_SWBP_INSN_SIZE 4
+
+struct arch_uprobe {
+ unsigned long resume_epc;
+ u32 insn[2];
+ u32 ixol[2];
+};
+
+struct arch_uprobe_task {
+ unsigned long saved_trap_nr;
+};
+
+#endif /* __ASM_UPROBES_H */
diff --git a/arch/mips/include/asm/vdso.h b/arch/mips/include/asm/vdso.h
new file mode 100644
index 000000000..cc7b51612
--- /dev/null
+++ b/arch/mips/include/asm/vdso.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#ifndef __ASM_VDSO_H
+#define __ASM_VDSO_H
+
+#include <linux/mm_types.h>
+#include <vdso/datapage.h>
+
+#include <asm/barrier.h>
+
+/**
+ * struct mips_vdso_image - Details of a VDSO image.
+ * @data: Pointer to VDSO image data (page-aligned).
+ * @size: Size of the VDSO image data (page-aligned).
+ * @off_sigreturn: Offset of the sigreturn() trampoline.
+ * @off_rt_sigreturn: Offset of the rt_sigreturn() trampoline.
+ * @mapping: Special mapping structure.
+ *
+ * This structure contains details of a VDSO image, including the image data
+ * and offsets of certain symbols required by the kernel. It is generated as
+ * part of the VDSO build process, aside from the mapping page array, which is
+ * populated at runtime.
+ */
+struct mips_vdso_image {
+ void *data;
+ unsigned long size;
+
+ unsigned long off_sigreturn;
+ unsigned long off_rt_sigreturn;
+
+ struct vm_special_mapping mapping;
+};
+
+/*
+ * The following structures are auto-generated as part of the build for each
+ * ABI by genvdso, see arch/mips/vdso/Makefile.
+ */
+
+extern struct mips_vdso_image vdso_image;
+
+#ifdef CONFIG_MIPS32_O32
+extern struct mips_vdso_image vdso_image_o32;
+#endif
+
+#ifdef CONFIG_MIPS32_N32
+extern struct mips_vdso_image vdso_image_n32;
+#endif
+
+union mips_vdso_data {
+ struct vdso_data data[CS_BASES];
+ u8 page[PAGE_SIZE];
+};
+
+#endif /* __ASM_VDSO_H */
diff --git a/arch/mips/include/asm/vdso/clocksource.h b/arch/mips/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000..510e1671d
--- /dev/null
+++ b/arch/mips/include/asm/vdso/clocksource.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_R4K, \
+ VDSO_CLOCKMODE_GIC
+
+#endif /* __ASM_VDSOCLOCKSOURCE_H */
diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h
new file mode 100644
index 000000000..44a45f3fa
--- /dev/null
+++ b/arch/mips/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2018 ARM Limited
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/vdso/vdso.h>
+#include <asm/clocksource.h>
+#include <asm/unistd.h>
+#include <asm/vdso.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+#if MIPS_ISA_REV < 6
+#define VDSO_SYSCALL_CLOBBERS "hi", "lo",
+#else
+#define VDSO_SYSCALL_CLOBBERS
+#endif
+
+static __always_inline long gettimeofday_fallback(
+ struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct timezone *tz asm("a1") = _tz;
+ register struct __kernel_old_timeval *tv asm("a0") = _tv;
+ register long ret asm("v0");
+ register long nr asm("v0") = __NR_gettimeofday;
+ register long error asm("a3");
+
+ asm volatile(
+ " syscall\n"
+ : "=r" (ret), "=r" (error)
+ : "r" (tv), "r" (tz), "r" (nr)
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25",
+ VDSO_SYSCALL_CLOBBERS
+ "memory");
+
+ return error ? -ret : ret;
+}
+
+static __always_inline long clock_gettime_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register clockid_t clkid asm("a0") = _clkid;
+ register long ret asm("v0");
+#if _MIPS_SIM == _MIPS_SIM_ABI64
+ register long nr asm("v0") = __NR_clock_gettime;
+#else
+ register long nr asm("v0") = __NR_clock_gettime64;
+#endif
+ register long error asm("a3");
+
+ asm volatile(
+ " syscall\n"
+ : "=r" (ret), "=r" (error)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25",
+ VDSO_SYSCALL_CLOBBERS
+ "memory");
+
+ return error ? -ret : ret;
+}
+
+static __always_inline int clock_getres_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register clockid_t clkid asm("a0") = _clkid;
+ register long ret asm("v0");
+#if _MIPS_SIM == _MIPS_SIM_ABI64
+ register long nr asm("v0") = __NR_clock_getres;
+#else
+ register long nr asm("v0") = __NR_clock_getres_time64;
+#endif
+ register long error asm("a3");
+
+ asm volatile(
+ " syscall\n"
+ : "=r" (ret), "=r" (error)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25",
+ VDSO_SYSCALL_CLOBBERS
+ "memory");
+
+ return error ? -ret : ret;
+}
+
+#if _MIPS_SIM != _MIPS_SIM_ABI64
+
+static __always_inline long clock_gettime32_fallback(
+ clockid_t _clkid,
+ struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("a1") = _ts;
+ register clockid_t clkid asm("a0") = _clkid;
+ register long ret asm("v0");
+ register long nr asm("v0") = __NR_clock_gettime;
+ register long error asm("a3");
+
+ asm volatile(
+ " syscall\n"
+ : "=r" (ret), "=r" (error)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25",
+ VDSO_SYSCALL_CLOBBERS
+ "memory");
+
+ return error ? -ret : ret;
+}
+
+static __always_inline int clock_getres32_fallback(
+ clockid_t _clkid,
+ struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("a1") = _ts;
+ register clockid_t clkid asm("a0") = _clkid;
+ register long ret asm("v0");
+ register long nr asm("v0") = __NR_clock_getres;
+ register long error asm("a3");
+
+ asm volatile(
+ " syscall\n"
+ : "=r" (ret), "=r" (error)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25",
+ VDSO_SYSCALL_CLOBBERS
+ "memory");
+
+ return error ? -ret : ret;
+}
+#endif
+
+#ifdef CONFIG_CSRC_R4K
+
+static __always_inline u64 read_r4k_count(void)
+{
+ unsigned int count;
+
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set mips32r2\n"
+ " rdhwr %0, $2\n"
+ " .set pop\n"
+ : "=r" (count));
+
+ return count;
+}
+
+#endif
+
+#ifdef CONFIG_CLKSRC_MIPS_GIC
+
+static __always_inline u64 read_gic_count(const struct vdso_data *data)
+{
+ void __iomem *gic = get_gic(data);
+ u32 hi, hi2, lo;
+
+ do {
+ hi = __raw_readl(gic + sizeof(lo));
+ lo = __raw_readl(gic);
+ hi2 = __raw_readl(gic + sizeof(lo));
+ } while (hi2 != hi);
+
+ return (((u64)hi) << 32) + lo;
+}
+
+#endif
+
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
+ const struct vdso_data *vd)
+{
+#ifdef CONFIG_CSRC_R4K
+ if (clock_mode == VDSO_CLOCKMODE_R4K)
+ return read_r4k_count();
+#endif
+#ifdef CONFIG_CLKSRC_MIPS_GIC
+ if (clock_mode == VDSO_CLOCKMODE_GIC)
+ return read_gic_count(vd);
+#endif
+ /*
+ * Core checks mode already. So this raced against a concurrent
+ * update. Return something. Core will do another round see the
+ * change and fallback to syscall.
+ */
+ return 0;
+}
+
+static inline bool mips_vdso_hres_capable(void)
+{
+ return IS_ENABLED(CONFIG_CSRC_R4K) ||
+ IS_ENABLED(CONFIG_CLKSRC_MIPS_GIC);
+}
+#define __arch_vdso_hres_capable mips_vdso_hres_capable
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+ return get_vdso_data();
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/mips/include/asm/vdso/processor.h b/arch/mips/include/asm/vdso/processor.h
new file mode 100644
index 000000000..511c95d73
--- /dev/null
+++ b/arch/mips/include/asm/vdso/processor.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_CPU_LOONGSON64
+/*
+ * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a
+ * tight read loop is executed, because reads take priority over writes & the
+ * hardware (incorrectly) doesn't ensure that writes will eventually occur.
+ *
+ * Since spin loops of any kind should have a cpu_relax() in them, force an SFB
+ * flush from cpu_relax() such that any pending writes will become visible as
+ * expected.
+ */
+#define cpu_relax() smp_mb()
+#else
+#define cpu_relax() barrier()
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/mips/include/asm/vdso/vdso.h b/arch/mips/include/asm/vdso/vdso.h
new file mode 100644
index 000000000..a327ca212
--- /dev/null
+++ b/arch/mips/include/asm/vdso/vdso.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#include <asm/sgidefs.h>
+
+#ifndef __ASSEMBLY__
+
+#include <asm/asm.h>
+#include <asm/page.h>
+#include <asm/vdso.h>
+
+static inline unsigned long get_vdso_base(void)
+{
+ unsigned long addr;
+
+ /*
+ * We can't use cpu_has_mips_r6 since it needs the cpu_data[]
+ * kernel symbol.
+ */
+#ifdef CONFIG_CPU_MIPSR6
+ /*
+ * lapc <symbol> is an alias to addiupc reg, <symbol> - .
+ *
+ * We can't use addiupc because there is no label-label
+ * support for the addiupc reloc
+ */
+ __asm__("lapc %0, _start \n"
+ : "=r" (addr) : :);
+#else
+ /*
+ * Get the base load address of the VDSO. We have to avoid generating
+ * relocations and references to the GOT because ld.so does not peform
+ * relocations on the VDSO. We use the current offset from the VDSO base
+ * and perform a PC-relative branch which gives the absolute address in
+ * ra, and take the difference. The assembler chokes on
+ * "li %0, _start - .", so embed the offset as a word and branch over
+ * it.
+ *
+ */
+
+ __asm__(
+ " .set push \n"
+ " .set noreorder \n"
+ " bal 1f \n"
+ " nop \n"
+ " .word _start - . \n"
+ "1: lw %0, 0($31) \n"
+ " " STR(PTR_ADDU) " %0, $31, %0 \n"
+ " .set pop \n"
+ : "=r" (addr)
+ :
+ : "$31");
+#endif /* CONFIG_CPU_MIPSR6 */
+
+ return addr;
+}
+
+static inline const struct vdso_data *get_vdso_data(void)
+{
+ return (const struct vdso_data *)(get_vdso_base() - PAGE_SIZE);
+}
+
+#ifdef CONFIG_CLKSRC_MIPS_GIC
+
+static inline void __iomem *get_gic(const struct vdso_data *data)
+{
+ return (void __iomem *)((unsigned long)data & PAGE_MASK) - PAGE_SIZE;
+}
+
+#endif /* CONFIG_CLKSRC_MIPS_GIC */
+
+#endif /* __ASSEMBLY__ */
diff --git a/arch/mips/include/asm/vdso/vsyscall.h b/arch/mips/include/asm/vdso/vsyscall.h
new file mode 100644
index 000000000..47168aaf1
--- /dev/null
+++ b/arch/mips/include/asm/vdso/vsyscall.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <vdso/datapage.h>
+
+extern struct vdso_data *vdso_data;
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline
+struct vdso_data *__mips_get_k_vdso_data(void)
+{
+ return vdso_data;
+}
+#define __arch_get_k_vdso_data __mips_get_k_vdso_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/mips/include/asm/vermagic.h b/arch/mips/include/asm/vermagic.h
new file mode 100644
index 000000000..4d2dae0c7
--- /dev/null
+++ b/arch/mips/include/asm/vermagic.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#ifdef CONFIG_CPU_BMIPS
+#define MODULE_PROC_FAMILY "BMIPS "
+#elif defined CONFIG_CPU_MIPS32_R1
+#define MODULE_PROC_FAMILY "MIPS32_R1 "
+#elif defined CONFIG_CPU_MIPS32_R2
+#define MODULE_PROC_FAMILY "MIPS32_R2 "
+#elif defined CONFIG_CPU_MIPS32_R5
+#define MODULE_PROC_FAMILY "MIPS32_R5 "
+#elif defined CONFIG_CPU_MIPS32_R6
+#define MODULE_PROC_FAMILY "MIPS32_R6 "
+#elif defined CONFIG_CPU_MIPS64_R1
+#define MODULE_PROC_FAMILY "MIPS64_R1 "
+#elif defined CONFIG_CPU_MIPS64_R2
+#define MODULE_PROC_FAMILY "MIPS64_R2 "
+#elif defined CONFIG_CPU_MIPS64_R5
+#define MODULE_PROC_FAMILY "MIPS64_R5 "
+#elif defined CONFIG_CPU_MIPS64_R6
+#define MODULE_PROC_FAMILY "MIPS64_R6 "
+#elif defined CONFIG_CPU_R3000
+#define MODULE_PROC_FAMILY "R3000 "
+#elif defined CONFIG_CPU_TX39XX
+#define MODULE_PROC_FAMILY "TX39XX "
+#elif defined CONFIG_CPU_VR41XX
+#define MODULE_PROC_FAMILY "VR41XX "
+#elif defined CONFIG_CPU_R4X00
+#define MODULE_PROC_FAMILY "R4X00 "
+#elif defined CONFIG_CPU_TX49XX
+#define MODULE_PROC_FAMILY "TX49XX "
+#elif defined CONFIG_CPU_R5000
+#define MODULE_PROC_FAMILY "R5000 "
+#elif defined CONFIG_CPU_R5500
+#define MODULE_PROC_FAMILY "R5500 "
+#elif defined CONFIG_CPU_NEVADA
+#define MODULE_PROC_FAMILY "NEVADA "
+#elif defined CONFIG_CPU_R10000
+#define MODULE_PROC_FAMILY "R10000 "
+#elif defined CONFIG_CPU_RM7000
+#define MODULE_PROC_FAMILY "RM7000 "
+#elif defined CONFIG_CPU_SB1
+#define MODULE_PROC_FAMILY "SB1 "
+#elif defined CONFIG_CPU_LOONGSON32
+#define MODULE_PROC_FAMILY "LOONGSON32 "
+#elif defined CONFIG_CPU_LOONGSON2EF
+#define MODULE_PROC_FAMILY "LOONGSON2EF "
+#elif defined CONFIG_CPU_LOONGSON64
+#define MODULE_PROC_FAMILY "LOONGSON64 "
+#elif defined CONFIG_CPU_CAVIUM_OCTEON
+#define MODULE_PROC_FAMILY "OCTEON "
+#elif defined CONFIG_CPU_P5600
+#define MODULE_PROC_FAMILY "P5600 "
+#elif defined CONFIG_CPU_XLR
+#define MODULE_PROC_FAMILY "XLR "
+#elif defined CONFIG_CPU_XLP
+#define MODULE_PROC_FAMILY "XLP "
+#else
+#error MODULE_PROC_FAMILY undefined for your processor configuration
+#endif
+
+#ifdef CONFIG_32BIT
+#define MODULE_KERNEL_TYPE "32BIT "
+#elif defined CONFIG_64BIT
+#define MODULE_KERNEL_TYPE "64BIT "
+#endif
+
+#define MODULE_ARCH_VERMAGIC \
+ MODULE_PROC_FAMILY MODULE_KERNEL_TYPE
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/mips/include/asm/vga.h b/arch/mips/include/asm/vga.h
new file mode 100644
index 000000000..0136e0366
--- /dev/null
+++ b/arch/mips/include/asm/vga.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ */
+#ifndef _ASM_VGA_H
+#define _ASM_VGA_H
+
+#include <linux/string.h>
+#include <asm/addrspace.h>
+#include <asm/byteorder.h>
+
+/*
+ * On the PC, we can just recalculate addresses and then
+ * access the videoram directly without any black magic.
+ */
+
+#define VGA_MAP_MEM(x, s) CKSEG1ADDR(0x10000000L + (unsigned long)(x))
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x, y) (*(y) = (x))
+
+#define VT_BUF_HAVE_RW
+/*
+ * These are only needed for supporting VGA or MDA text mode, which use little
+ * endian byte ordering.
+ * In other cases, we can optimize by using native byte ordering and
+ * <linux/vt_buffer.h> has already done the right job for us.
+ */
+
+#undef scr_writew
+#undef scr_readw
+
+static inline void scr_writew(u16 val, volatile u16 *addr)
+{
+ *addr = cpu_to_le16(val);
+}
+
+static inline u16 scr_readw(volatile const u16 *addr)
+{
+ return le16_to_cpu(*addr);
+}
+
+static inline void scr_memsetw(u16 *s, u16 v, unsigned int count)
+{
+ memset16(s, cpu_to_le16(v), count / 2);
+}
+
+#define scr_memcpyw(d, s, c) memcpy(d, s, c)
+#define scr_memmovew(d, s, c) memmove(d, s, c)
+#define VT_BUF_HAVE_MEMCPYW
+#define VT_BUF_HAVE_MEMMOVEW
+#define VT_BUF_HAVE_MEMSETW
+
+#endif /* _ASM_VGA_H */
diff --git a/arch/mips/include/asm/vmalloc.h b/arch/mips/include/asm/vmalloc.h
new file mode 100644
index 000000000..25dc09b25
--- /dev/null
+++ b/arch/mips/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_MIPS_VMALLOC_H
+#define _ASM_MIPS_VMALLOC_H
+
+#endif /* _ASM_MIPS_VMALLOC_H */
diff --git a/arch/mips/include/asm/vpe.h b/arch/mips/include/asm/vpe.h
new file mode 100644
index 000000000..012731546
--- /dev/null
+++ b/arch/mips/include/asm/vpe.h
@@ -0,0 +1,129 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#ifndef _ASM_VPE_H
+#define _ASM_VPE_H
+
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+
+#define VPE_MODULE_NAME "vpe"
+#define VPE_MODULE_MINOR 1
+
+/* grab the likely amount of memory we will need. */
+#ifdef CONFIG_MIPS_VPE_LOADER_TOM
+#define P_SIZE (2 * 1024 * 1024)
+#else
+/* add an overhead to the max kmalloc size for non-striped symbols/etc */
+#define P_SIZE (256 * 1024)
+#endif
+
+#define MAX_VPES 16
+#define VPE_PATH_MAX 256
+
+static inline int aprp_cpu_index(void)
+{
+#ifdef CONFIG_MIPS_CMP
+ return setup_max_cpus;
+#else
+ extern int tclimit;
+ return tclimit;
+#endif
+}
+
+enum vpe_state {
+ VPE_STATE_UNUSED = 0,
+ VPE_STATE_INUSE,
+ VPE_STATE_RUNNING
+};
+
+enum tc_state {
+ TC_STATE_UNUSED = 0,
+ TC_STATE_INUSE,
+ TC_STATE_RUNNING,
+ TC_STATE_DYNAMIC
+};
+
+struct vpe {
+ enum vpe_state state;
+
+ /* (device) minor associated with this vpe */
+ int minor;
+
+ /* elfloader stuff */
+ void *load_addr;
+ unsigned long len;
+ char *pbuffer;
+ unsigned long plen;
+ char cwd[VPE_PATH_MAX];
+
+ unsigned long __start;
+
+ /* tc's associated with this vpe */
+ struct list_head tc;
+
+ /* The list of vpe's */
+ struct list_head list;
+
+ /* shared symbol address */
+ void *shared_ptr;
+
+ /* the list of who wants to know when something major happens */
+ struct list_head notify;
+
+ unsigned int ntcs;
+};
+
+struct tc {
+ enum tc_state state;
+ int index;
+
+ struct vpe *pvpe; /* parent VPE */
+ struct list_head tc; /* The list of TC's with this VPE */
+ struct list_head list; /* The global list of tc's */
+};
+
+struct vpe_notifications {
+ void (*start)(int vpe);
+ void (*stop)(int vpe);
+
+ struct list_head list;
+};
+
+struct vpe_control {
+ spinlock_t vpe_list_lock;
+ struct list_head vpe_list; /* Virtual processing elements */
+ spinlock_t tc_list_lock;
+ struct list_head tc_list; /* Thread contexts */
+};
+
+extern struct vpe_control vpecontrol;
+extern const struct file_operations vpe_fops;
+
+int vpe_notify(int index, struct vpe_notifications *notify);
+
+void *vpe_get_shared(int index);
+char *vpe_getcwd(int index);
+
+struct vpe *get_vpe(int minor);
+struct tc *get_tc(int index);
+struct vpe *alloc_vpe(int minor);
+struct tc *alloc_tc(int index);
+void release_vpe(struct vpe *v);
+
+void *alloc_progmem(unsigned long len);
+void release_progmem(void *ptr);
+
+int vpe_run(struct vpe *v);
+void cleanup_tc(struct tc *tc);
+
+int __init vpe_module_init(void);
+void __exit vpe_module_exit(void);
+#endif /* _ASM_VPE_H */
diff --git a/arch/mips/include/asm/vr41xx/capcella.h b/arch/mips/include/asm/vr41xx/capcella.h
new file mode 100644
index 000000000..d45a33969
--- /dev/null
+++ b/arch/mips/include/asm/vr41xx/capcella.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * capcella.h, Include file for ZAO Networks Capcella.
+ *
+ * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#ifndef __ZAO_CAPCELLA_H
+#define __ZAO_CAPCELLA_H
+
+#include <asm/vr41xx/irq.h>
+
+/*
+ * General-Purpose I/O Pin Number
+ */
+#define PC104PLUS_INTA_PIN 2
+#define PC104PLUS_INTB_PIN 3
+#define PC104PLUS_INTC_PIN 4
+#define PC104PLUS_INTD_PIN 5
+
+/*
+ * Interrupt Number
+ */
+#define RTL8139_1_IRQ GIU_IRQ(PC104PLUS_INTC_PIN)
+#define RTL8139_2_IRQ GIU_IRQ(PC104PLUS_INTD_PIN)
+#define PC104PLUS_INTA_IRQ GIU_IRQ(PC104PLUS_INTA_PIN)
+#define PC104PLUS_INTB_IRQ GIU_IRQ(PC104PLUS_INTB_PIN)
+#define PC104PLUS_INTC_IRQ GIU_IRQ(PC104PLUS_INTC_PIN)
+#define PC104PLUS_INTD_IRQ GIU_IRQ(PC104PLUS_INTD_PIN)
+
+#endif /* __ZAO_CAPCELLA_H */
diff --git a/arch/mips/include/asm/vr41xx/giu.h b/arch/mips/include/asm/vr41xx/giu.h
new file mode 100644
index 000000000..0211fa898
--- /dev/null
+++ b/arch/mips/include/asm/vr41xx/giu.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Include file for NEC VR4100 series General-purpose I/O Unit.
+ *
+ * Copyright (C) 2005-2009 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#ifndef __NEC_VR41XX_GIU_H
+#define __NEC_VR41XX_GIU_H
+
+/*
+ * NEC VR4100 series GIU platform device IDs.
+ */
+enum {
+ GPIO_50PINS_PULLUPDOWN,
+ GPIO_36PINS,
+ GPIO_48PINS_EDGE_SELECT,
+};
+
+typedef enum {
+ IRQ_TRIGGER_LEVEL,
+ IRQ_TRIGGER_EDGE,
+ IRQ_TRIGGER_EDGE_FALLING,
+ IRQ_TRIGGER_EDGE_RISING,
+} irq_trigger_t;
+
+typedef enum {
+ IRQ_SIGNAL_THROUGH,
+ IRQ_SIGNAL_HOLD,
+} irq_signal_t;
+
+extern void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger,
+ irq_signal_t signal);
+
+typedef enum {
+ IRQ_LEVEL_LOW,
+ IRQ_LEVEL_HIGH,
+} irq_level_t;
+
+extern void vr41xx_set_irq_level(unsigned int pin, irq_level_t level);
+
+#endif /* __NEC_VR41XX_GIU_H */
diff --git a/arch/mips/include/asm/vr41xx/irq.h b/arch/mips/include/asm/vr41xx/irq.h
new file mode 100644
index 000000000..2f3d552f9
--- /dev/null
+++ b/arch/mips/include/asm/vr41xx/irq.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/asm-mips/vr41xx/irq.h
+ *
+ * Interrupt numbers for NEC VR4100 series.
+ *
+ * Copyright (C) 1999 Michael Klar
+ * Copyright (C) 2001, 2002 Paul Mundt
+ * Copyright (C) 2002 MontaVista Software, Inc.
+ * Copyright (C) 2002 TimeSys Corp.
+ * Copyright (C) 2003-2006 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#ifndef __NEC_VR41XX_IRQ_H
+#define __NEC_VR41XX_IRQ_H
+
+/*
+ * CPU core Interrupt Numbers
+ */
+#define MIPS_CPU_IRQ_BASE 0
+#define MIPS_CPU_IRQ(x) (MIPS_CPU_IRQ_BASE + (x))
+#define MIPS_SOFTINT0_IRQ MIPS_CPU_IRQ(0)
+#define MIPS_SOFTINT1_IRQ MIPS_CPU_IRQ(1)
+#define INT0_IRQ MIPS_CPU_IRQ(2)
+#define INT1_IRQ MIPS_CPU_IRQ(3)
+#define INT2_IRQ MIPS_CPU_IRQ(4)
+#define INT3_IRQ MIPS_CPU_IRQ(5)
+#define INT4_IRQ MIPS_CPU_IRQ(6)
+#define TIMER_IRQ MIPS_CPU_IRQ(7)
+
+/*
+ * SYINT1 Interrupt Numbers
+ */
+#define SYSINT1_IRQ_BASE 8
+#define SYSINT1_IRQ(x) (SYSINT1_IRQ_BASE + (x))
+#define BATTRY_IRQ SYSINT1_IRQ(0)
+#define POWER_IRQ SYSINT1_IRQ(1)
+#define RTCLONG1_IRQ SYSINT1_IRQ(2)
+#define ELAPSEDTIME_IRQ SYSINT1_IRQ(3)
+/* RFU */
+#define PIU_IRQ SYSINT1_IRQ(5)
+#define AIU_IRQ SYSINT1_IRQ(6)
+#define KIU_IRQ SYSINT1_IRQ(7)
+#define GIUINT_IRQ SYSINT1_IRQ(8)
+#define SIU_IRQ SYSINT1_IRQ(9)
+#define BUSERR_IRQ SYSINT1_IRQ(10)
+#define SOFTINT_IRQ SYSINT1_IRQ(11)
+#define CLKRUN_IRQ SYSINT1_IRQ(12)
+#define DOZEPIU_IRQ SYSINT1_IRQ(13)
+#define SYSINT1_IRQ_LAST DOZEPIU_IRQ
+
+/*
+ * SYSINT2 Interrupt Numbers
+ */
+#define SYSINT2_IRQ_BASE 24
+#define SYSINT2_IRQ(x) (SYSINT2_IRQ_BASE + (x))
+#define RTCLONG2_IRQ SYSINT2_IRQ(0)
+#define LED_IRQ SYSINT2_IRQ(1)
+#define HSP_IRQ SYSINT2_IRQ(2)
+#define TCLOCK_IRQ SYSINT2_IRQ(3)
+#define FIR_IRQ SYSINT2_IRQ(4)
+#define CEU_IRQ SYSINT2_IRQ(4) /* same number as FIR_IRQ */
+#define DSIU_IRQ SYSINT2_IRQ(5)
+#define PCI_IRQ SYSINT2_IRQ(6)
+#define SCU_IRQ SYSINT2_IRQ(7)
+#define CSI_IRQ SYSINT2_IRQ(8)
+#define BCU_IRQ SYSINT2_IRQ(9)
+#define ETHERNET_IRQ SYSINT2_IRQ(10)
+#define SYSINT2_IRQ_LAST ETHERNET_IRQ
+
+/*
+ * GIU Interrupt Numbers
+ */
+#define GIU_IRQ_BASE 40
+#define GIU_IRQ(x) (GIU_IRQ_BASE + (x)) /* IRQ 40-71 */
+#define GIU_IRQ_LAST GIU_IRQ(31)
+
+/*
+ * VRC4173 Interrupt Numbers
+ */
+#define VRC4173_IRQ_BASE 72
+#define VRC4173_IRQ(x) (VRC4173_IRQ_BASE + (x))
+#define VRC4173_USB_IRQ VRC4173_IRQ(0)
+#define VRC4173_PCMCIA2_IRQ VRC4173_IRQ(1)
+#define VRC4173_PCMCIA1_IRQ VRC4173_IRQ(2)
+#define VRC4173_PS2CH2_IRQ VRC4173_IRQ(3)
+#define VRC4173_PS2CH1_IRQ VRC4173_IRQ(4)
+#define VRC4173_PIU_IRQ VRC4173_IRQ(5)
+#define VRC4173_AIU_IRQ VRC4173_IRQ(6)
+#define VRC4173_KIU_IRQ VRC4173_IRQ(7)
+#define VRC4173_GIU_IRQ VRC4173_IRQ(8)
+#define VRC4173_AC97_IRQ VRC4173_IRQ(9)
+#define VRC4173_AC97INT1_IRQ VRC4173_IRQ(10)
+/* RFU */
+#define VRC4173_DOZEPIU_IRQ VRC4173_IRQ(13)
+#define VRC4173_IRQ_LAST VRC4173_DOZEPIU_IRQ
+
+#endif /* __NEC_VR41XX_IRQ_H */
diff --git a/arch/mips/include/asm/vr41xx/mpc30x.h b/arch/mips/include/asm/vr41xx/mpc30x.h
new file mode 100644
index 000000000..9f977e18d
--- /dev/null
+++ b/arch/mips/include/asm/vr41xx/mpc30x.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * mpc30x.h, Include file for Victor MP-C303/304.
+ *
+ * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#ifndef __VICTOR_MPC30X_H
+#define __VICTOR_MPC30X_H
+
+#include <asm/vr41xx/irq.h>
+
+/*
+ * General-Purpose I/O Pin Number
+ */
+#define VRC4173_PIN 1
+#define MQ200_PIN 4
+
+/*
+ * Interrupt Number
+ */
+#define VRC4173_CASCADE_IRQ GIU_IRQ(VRC4173_PIN)
+#define MQ200_IRQ GIU_IRQ(MQ200_PIN)
+
+#endif /* __VICTOR_MPC30X_H */
diff --git a/arch/mips/include/asm/vr41xx/pci.h b/arch/mips/include/asm/vr41xx/pci.h
new file mode 100644
index 000000000..ad93b5e89
--- /dev/null
+++ b/arch/mips/include/asm/vr41xx/pci.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Include file for NEC VR4100 series PCI Control Unit.
+ *
+ * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#ifndef __NEC_VR41XX_PCI_H
+#define __NEC_VR41XX_PCI_H
+
+#define PCI_MASTER_ADDRESS_MASK 0x7fffffffU
+
+struct pci_master_address_conversion {
+ uint32_t bus_base_address;
+ uint32_t address_mask;
+ uint32_t pci_base_address;
+};
+
+struct pci_target_address_conversion {
+ uint32_t address_mask;
+ uint32_t bus_base_address;
+};
+
+typedef enum {
+ CANNOT_LOCK_FROM_DEVICE,
+ CAN_LOCK_FROM_DEVICE,
+} pci_exclusive_access_t;
+
+struct pci_mailbox_address {
+ uint32_t base_address;
+};
+
+struct pci_target_address_window {
+ uint32_t base_address;
+};
+
+typedef enum {
+ PCI_ARBITRATION_MODE_FAIR,
+ PCI_ARBITRATION_MODE_ALTERNATE_0,
+ PCI_ARBITRATION_MODE_ALTERNATE_B,
+} pci_arbiter_priority_control_t;
+
+typedef enum {
+ PCI_TAKE_AWAY_GNT_DISABLE,
+ PCI_TAKE_AWAY_GNT_ENABLE,
+} pci_take_away_gnt_mode_t;
+
+struct pci_controller_unit_setup {
+ struct pci_master_address_conversion *master_memory1;
+ struct pci_master_address_conversion *master_memory2;
+
+ struct pci_target_address_conversion *target_memory1;
+ struct pci_target_address_conversion *target_memory2;
+
+ struct pci_master_address_conversion *master_io;
+
+ pci_exclusive_access_t exclusive_access;
+
+ uint32_t pci_clock_max;
+ uint8_t wait_time_limit_from_irdy_to_trdy; /* Only VR4122 is supported */
+
+ struct pci_mailbox_address *mailbox;
+ struct pci_target_address_window *target_window1;
+ struct pci_target_address_window *target_window2;
+
+ uint8_t master_latency_timer;
+ uint8_t retry_limit;
+
+ pci_arbiter_priority_control_t arbiter_priority_control;
+ pci_take_away_gnt_mode_t take_away_gnt_mode;
+
+ struct resource *mem_resource;
+ struct resource *io_resource;
+};
+
+extern void vr41xx_pciu_setup(struct pci_controller_unit_setup *setup);
+
+#endif /* __NEC_VR41XX_PCI_H */
diff --git a/arch/mips/include/asm/vr41xx/siu.h b/arch/mips/include/asm/vr41xx/siu.h
new file mode 100644
index 000000000..e920cd2cf
--- /dev/null
+++ b/arch/mips/include/asm/vr41xx/siu.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Include file for NEC VR4100 series Serial Interface Unit.
+ *
+ * Copyright (C) 2005-2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#ifndef __NEC_VR41XX_SIU_H
+#define __NEC_VR41XX_SIU_H
+
+#define SIU_PORTS_MAX 2
+
+typedef enum {
+ SIU_INTERFACE_RS232C,
+ SIU_INTERFACE_IRDA,
+} siu_interface_t;
+
+extern void vr41xx_select_siu_interface(siu_interface_t interface);
+
+typedef enum {
+ SIU_USE_IRDA,
+ FIR_USE_IRDA,
+} irda_use_t;
+
+extern void vr41xx_use_irda(irda_use_t use);
+
+typedef enum {
+ SHARP_IRDA,
+ TEMIC_IRDA,
+ HP_IRDA,
+} irda_module_t;
+
+typedef enum {
+ IRDA_TX_1_5MBPS,
+ IRDA_TX_4MBPS,
+} irda_speed_t;
+
+extern void vr41xx_select_irda_module(irda_module_t module, irda_speed_t speed);
+
+#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
+extern void vr41xx_siu_early_setup(struct uart_port *port);
+#else
+static inline void vr41xx_siu_early_setup(struct uart_port *port) {}
+#endif
+
+#endif /* __NEC_VR41XX_SIU_H */
diff --git a/arch/mips/include/asm/vr41xx/tb0219.h b/arch/mips/include/asm/vr41xx/tb0219.h
new file mode 100644
index 000000000..01e96d6c2
--- /dev/null
+++ b/arch/mips/include/asm/vr41xx/tb0219.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * tb0219.h, Include file for TANBAC TB0219.
+ *
+ * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@linux-mips.org>
+ *
+ * Modified for TANBAC TB0219:
+ * Copyright (C) 2003 Megasolution Inc. <matsu@megasolution.jp>
+ */
+#ifndef __TANBAC_TB0219_H
+#define __TANBAC_TB0219_H
+
+#include <asm/vr41xx/irq.h>
+
+/*
+ * General-Purpose I/O Pin Number
+ */
+#define TB0219_PCI_SLOT1_PIN 2
+#define TB0219_PCI_SLOT2_PIN 3
+#define TB0219_PCI_SLOT3_PIN 4
+
+/*
+ * Interrupt Number
+ */
+#define TB0219_PCI_SLOT1_IRQ GIU_IRQ(TB0219_PCI_SLOT1_PIN)
+#define TB0219_PCI_SLOT2_IRQ GIU_IRQ(TB0219_PCI_SLOT2_PIN)
+#define TB0219_PCI_SLOT3_IRQ GIU_IRQ(TB0219_PCI_SLOT3_PIN)
+
+#endif /* __TANBAC_TB0219_H */
diff --git a/arch/mips/include/asm/vr41xx/tb0226.h b/arch/mips/include/asm/vr41xx/tb0226.h
new file mode 100644
index 000000000..64993d149
--- /dev/null
+++ b/arch/mips/include/asm/vr41xx/tb0226.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * tb0226.h, Include file for TANBAC TB0226.
+ *
+ * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#ifndef __TANBAC_TB0226_H
+#define __TANBAC_TB0226_H
+
+#include <asm/vr41xx/irq.h>
+
+/*
+ * General-Purpose I/O Pin Number
+ */
+#define GD82559_1_PIN 2
+#define GD82559_2_PIN 3
+#define UPD720100_INTA_PIN 4
+#define UPD720100_INTB_PIN 8
+#define UPD720100_INTC_PIN 13
+
+/*
+ * Interrupt Number
+ */
+#define GD82559_1_IRQ GIU_IRQ(GD82559_1_PIN)
+#define GD82559_2_IRQ GIU_IRQ(GD82559_2_PIN)
+#define UPD720100_INTA_IRQ GIU_IRQ(UPD720100_INTA_PIN)
+#define UPD720100_INTB_IRQ GIU_IRQ(UPD720100_INTB_PIN)
+#define UPD720100_INTC_IRQ GIU_IRQ(UPD720100_INTC_PIN)
+
+#endif /* __TANBAC_TB0226_H */
diff --git a/arch/mips/include/asm/vr41xx/tb0287.h b/arch/mips/include/asm/vr41xx/tb0287.h
new file mode 100644
index 000000000..3ddc91386
--- /dev/null
+++ b/arch/mips/include/asm/vr41xx/tb0287.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * tb0287.h, Include file for TANBAC TB0287 mini-ITX board.
+ *
+ * Copyright (C) 2005 Media Lab Inc. <ito@mlb.co.jp>
+ *
+ * This code is largely based on tb0219.h.
+ */
+#ifndef __TANBAC_TB0287_H
+#define __TANBAC_TB0287_H
+
+#include <asm/vr41xx/irq.h>
+
+/*
+ * General-Purpose I/O Pin Number
+ */
+#define TB0287_PCI_SLOT_PIN 2
+#define TB0287_SM501_PIN 3
+#define TB0287_SIL680A_PIN 8
+#define TB0287_RTL8110_PIN 13
+
+/*
+ * Interrupt Number
+ */
+#define TB0287_PCI_SLOT_IRQ GIU_IRQ(TB0287_PCI_SLOT_PIN)
+#define TB0287_SM501_IRQ GIU_IRQ(TB0287_SM501_PIN)
+#define TB0287_SIL680A_IRQ GIU_IRQ(TB0287_SIL680A_PIN)
+#define TB0287_RTL8110_IRQ GIU_IRQ(TB0287_RTL8110_PIN)
+
+#endif /* __TANBAC_TB0287_H */
diff --git a/arch/mips/include/asm/vr41xx/vr41xx.h b/arch/mips/include/asm/vr41xx/vr41xx.h
new file mode 100644
index 000000000..9a4b36b75
--- /dev/null
+++ b/arch/mips/include/asm/vr41xx/vr41xx.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/asm-mips/vr41xx/vr41xx.h
+ *
+ * Include file for NEC VR4100 series.
+ *
+ * Copyright (C) 1999 Michael Klar
+ * Copyright (C) 2001, 2002 Paul Mundt
+ * Copyright (C) 2002 MontaVista Software, Inc.
+ * Copyright (C) 2002 TimeSys Corp.
+ * Copyright (C) 2003-2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#ifndef __NEC_VR41XX_H
+#define __NEC_VR41XX_H
+
+#include <linux/interrupt.h>
+
+/*
+ * CPU Revision
+ */
+/* VR4122 0x00000c70-0x00000c72 */
+#define PRID_VR4122_REV1_0 0x00000c70
+#define PRID_VR4122_REV2_0 0x00000c70
+#define PRID_VR4122_REV2_1 0x00000c70
+#define PRID_VR4122_REV3_0 0x00000c71
+#define PRID_VR4122_REV3_1 0x00000c72
+
+/* VR4181A 0x00000c73-0x00000c7f */
+#define PRID_VR4181A_REV1_0 0x00000c73
+#define PRID_VR4181A_REV1_1 0x00000c74
+
+/* VR4131 0x00000c80-0x00000c83 */
+#define PRID_VR4131_REV1_2 0x00000c80
+#define PRID_VR4131_REV2_0 0x00000c81
+#define PRID_VR4131_REV2_1 0x00000c82
+#define PRID_VR4131_REV2_2 0x00000c83
+
+/* VR4133 0x00000c84- */
+#define PRID_VR4133 0x00000c84
+
+/*
+ * Bus Control Uint
+ */
+extern unsigned long vr41xx_calculate_clock_frequency(void);
+extern unsigned long vr41xx_get_vtclock_frequency(void);
+extern unsigned long vr41xx_get_tclock_frequency(void);
+
+/*
+ * Clock Mask Unit
+ */
+typedef enum {
+ PIU_CLOCK,
+ SIU_CLOCK,
+ AIU_CLOCK,
+ KIU_CLOCK,
+ FIR_CLOCK,
+ DSIU_CLOCK,
+ CSI_CLOCK,
+ PCIU_CLOCK,
+ HSP_CLOCK,
+ PCI_CLOCK,
+ CEU_CLOCK,
+ ETHER0_CLOCK,
+ ETHER1_CLOCK
+} vr41xx_clock_t;
+
+extern void vr41xx_supply_clock(vr41xx_clock_t clock);
+extern void vr41xx_mask_clock(vr41xx_clock_t clock);
+
+/*
+ * Interrupt Control Unit
+ */
+extern int vr41xx_set_intassign(unsigned int irq, unsigned char intassign);
+extern int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int));
+
+#define PIUINT_COMMAND 0x0040
+#define PIUINT_DATA 0x0020
+#define PIUINT_PAGE1 0x0010
+#define PIUINT_PAGE0 0x0008
+#define PIUINT_DATALOST 0x0004
+#define PIUINT_STATUSCHANGE 0x0001
+
+extern void vr41xx_enable_piuint(uint16_t mask);
+extern void vr41xx_disable_piuint(uint16_t mask);
+
+#define AIUINT_INPUT_DMAEND 0x0800
+#define AIUINT_INPUT_DMAHALT 0x0400
+#define AIUINT_INPUT_DATALOST 0x0200
+#define AIUINT_INPUT_DATA 0x0100
+#define AIUINT_OUTPUT_DMAEND 0x0008
+#define AIUINT_OUTPUT_DMAHALT 0x0004
+#define AIUINT_OUTPUT_NODATA 0x0002
+
+extern void vr41xx_enable_aiuint(uint16_t mask);
+extern void vr41xx_disable_aiuint(uint16_t mask);
+
+#define KIUINT_DATALOST 0x0004
+#define KIUINT_DATAREADY 0x0002
+#define KIUINT_SCAN 0x0001
+
+extern void vr41xx_enable_kiuint(uint16_t mask);
+extern void vr41xx_disable_kiuint(uint16_t mask);
+
+#define DSIUINT_CTS 0x0800
+#define DSIUINT_RXERR 0x0400
+#define DSIUINT_RX 0x0200
+#define DSIUINT_TX 0x0100
+#define DSIUINT_ALL 0x0f00
+
+extern void vr41xx_enable_dsiuint(uint16_t mask);
+extern void vr41xx_disable_dsiuint(uint16_t mask);
+
+#define FIRINT_UNIT 0x0010
+#define FIRINT_RX_DMAEND 0x0008
+#define FIRINT_RX_DMAHALT 0x0004
+#define FIRINT_TX_DMAEND 0x0002
+#define FIRINT_TX_DMAHALT 0x0001
+
+extern void vr41xx_enable_firint(uint16_t mask);
+extern void vr41xx_disable_firint(uint16_t mask);
+
+extern void vr41xx_enable_pciint(void);
+extern void vr41xx_disable_pciint(void);
+
+extern void vr41xx_enable_scuint(void);
+extern void vr41xx_disable_scuint(void);
+
+#define CSIINT_TX_DMAEND 0x0040
+#define CSIINT_TX_DMAHALT 0x0020
+#define CSIINT_TX_DATA 0x0010
+#define CSIINT_TX_FIFOEMPTY 0x0008
+#define CSIINT_RX_DMAEND 0x0004
+#define CSIINT_RX_DMAHALT 0x0002
+#define CSIINT_RX_FIFOEMPTY 0x0001
+
+extern void vr41xx_enable_csiint(uint16_t mask);
+extern void vr41xx_disable_csiint(uint16_t mask);
+
+extern void vr41xx_enable_bcuint(void);
+extern void vr41xx_disable_bcuint(void);
+
+#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
+extern void vr41xx_siu_setup(void);
+#else
+static inline void vr41xx_siu_setup(void) {}
+#endif
+
+#endif /* __NEC_VR41XX_H */
diff --git a/arch/mips/include/asm/war.h b/arch/mips/include/asm/war.h
new file mode 100644
index 000000000..21443f096
--- /dev/null
+++ b/arch/mips/include/asm/war.h
@@ -0,0 +1,73 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2004, 2007 by Ralf Baechle
+ * Copyright (C) 2007 Maciej W. Rozycki
+ */
+#ifndef _ASM_WAR_H
+#define _ASM_WAR_H
+
+/*
+ * Work around certain R4000 CPU errata (as implemented by GCC):
+ *
+ * - A double-word or a variable shift may give an incorrect result
+ * if executed immediately after starting an integer division:
+ * "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0",
+ * erratum #28
+ * "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0", erratum
+ * #19
+ *
+ * - A double-word or a variable shift may give an incorrect result
+ * if executed while an integer multiplication is in progress:
+ * "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0",
+ * errata #16 & #28
+ *
+ * - An integer division may give an incorrect result if started in
+ * a delay slot of a taken branch or a jump:
+ * "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0",
+ * erratum #52
+ */
+#ifdef CONFIG_CPU_R4000_WORKAROUNDS
+#define R4000_WAR 1
+#else
+#define R4000_WAR 0
+#endif
+
+/*
+ * Work around certain R4400 CPU errata (as implemented by GCC):
+ *
+ * - A double-word or a variable shift may give an incorrect result
+ * if executed immediately after starting an integer division:
+ * "MIPS R4400MC Errata, Processor Revision 1.0", erratum #10
+ * "MIPS R4400MC Errata, Processor Revision 2.0 & 3.0", erratum #4
+ */
+#ifdef CONFIG_CPU_R4400_WORKAROUNDS
+#define R4400_WAR 1
+#else
+#define R4400_WAR 0
+#endif
+
+/*
+ * Work around the "daddi" and "daddiu" CPU errata:
+ *
+ * - The `daddi' instruction fails to trap on overflow.
+ * "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0",
+ * erratum #23
+ *
+ * - The `daddiu' instruction can produce an incorrect result.
+ * "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0",
+ * erratum #41
+ * "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0", erratum
+ * #15
+ * "MIPS R4400PC/SC Errata, Processor Revision 1.0", erratum #7
+ * "MIPS R4400MC Errata, Processor Revision 1.0", erratum #5
+ */
+#ifdef CONFIG_CPU_DADDI_WORKAROUNDS
+#define DADDI_WAR 1
+#else
+#define DADDI_WAR 0
+#endif
+
+#endif /* _ASM_WAR_H */
diff --git a/arch/mips/include/asm/watch.h b/arch/mips/include/asm/watch.h
new file mode 100644
index 000000000..6ffe3eadf
--- /dev/null
+++ b/arch/mips/include/asm/watch.h
@@ -0,0 +1,32 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 David Daney
+ */
+#ifndef _ASM_WATCH_H
+#define _ASM_WATCH_H
+
+#include <linux/bitops.h>
+
+#include <asm/mipsregs.h>
+
+void mips_install_watch_registers(struct task_struct *t);
+void mips_read_watch_registers(void);
+void mips_clear_watch_registers(void);
+void mips_probe_watch_registers(struct cpuinfo_mips *c);
+
+#ifdef CONFIG_HARDWARE_WATCHPOINTS
+#define __restore_watch(task) do { \
+ if (unlikely(test_bit(TIF_LOAD_WATCH, \
+ &task_thread_info(task)->flags))) { \
+ mips_install_watch_registers(task); \
+ } \
+} while (0)
+
+#else
+#define __restore_watch(task) do {} while (0)
+#endif
+
+#endif /* _ASM_WATCH_H */
diff --git a/arch/mips/include/asm/wbflush.h b/arch/mips/include/asm/wbflush.h
new file mode 100644
index 000000000..eadc0ac47
--- /dev/null
+++ b/arch/mips/include/asm/wbflush.h
@@ -0,0 +1,34 @@
+/*
+ * Header file for using the wbflush routine
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1998 Harald Koerfgen
+ * Copyright (C) 2002 Maciej W. Rozycki
+ */
+#ifndef _ASM_WBFLUSH_H
+#define _ASM_WBFLUSH_H
+
+
+#ifdef CONFIG_CPU_HAS_WB
+
+extern void (*__wbflush)(void);
+extern void wbflush_setup(void);
+
+#define wbflush() \
+ do { \
+ __sync(); \
+ __wbflush(); \
+ } while (0)
+
+#else /* !CONFIG_CPU_HAS_WB */
+
+#define wbflush_setup() do { } while (0)
+
+#define wbflush() fast_iob()
+
+#endif /* !CONFIG_CPU_HAS_WB */
+
+#endif /* _ASM_WBFLUSH_H */
diff --git a/arch/mips/include/asm/xtalk/xtalk.h b/arch/mips/include/asm/xtalk/xtalk.h
new file mode 100644
index 000000000..680e7efeb
--- /dev/null
+++ b/arch/mips/include/asm/xtalk/xtalk.h
@@ -0,0 +1,52 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * xtalk.h -- platform-independent crosstalk interface, derived from
+ * IRIX <sys/PCI/bridge.h>, revision 1.38.
+ *
+ * Copyright (C) 1995 - 1997, 1999 Silcon Graphics, Inc.
+ * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
+ */
+#ifndef _ASM_XTALK_XTALK_H
+#define _ASM_XTALK_XTALK_H
+
+#ifndef __ASSEMBLY__
+/*
+ * User-level device driver visible types
+ */
+typedef char xwidgetnum_t; /* xtalk widget number (0..15) */
+
+#define XWIDGET_NONE -1
+
+typedef int xwidget_part_num_t; /* xtalk widget part number */
+
+#define XWIDGET_PART_NUM_NONE -1
+
+typedef int xwidget_rev_num_t; /* xtalk widget revision number */
+
+#define XWIDGET_REV_NUM_NONE -1
+
+typedef int xwidget_mfg_num_t; /* xtalk widget manufacturing ID */
+
+#define XWIDGET_MFG_NUM_NONE -1
+
+typedef struct xtalk_piomap_s *xtalk_piomap_t;
+
+/* It is often convenient to fold the XIO target port
+ * number into the XIO address.
+ */
+#define XIO_NOWHERE (0xFFFFFFFFFFFFFFFFull)
+#define XIO_ADDR_BITS (0x0000FFFFFFFFFFFFull)
+#define XIO_PORT_BITS (0xF000000000000000ull)
+#define XIO_PORT_SHIFT (60)
+
+#define XIO_PACKED(x) (((x)&XIO_PORT_BITS) != 0)
+#define XIO_ADDR(x) ((x)&XIO_ADDR_BITS)
+#define XIO_PORT(x) ((xwidgetnum_t)(((x)&XIO_PORT_BITS) >> XIO_PORT_SHIFT))
+#define XIO_PACK(p, o) ((((uint64_t)(p))<<XIO_PORT_SHIFT) | ((o)&XIO_ADDR_BITS))
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_XTALK_XTALK_H */
diff --git a/arch/mips/include/asm/xtalk/xwidget.h b/arch/mips/include/asm/xtalk/xwidget.h
new file mode 100644
index 000000000..24f121da6
--- /dev/null
+++ b/arch/mips/include/asm/xtalk/xwidget.h
@@ -0,0 +1,279 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * xwidget.h - generic crosstalk widget header file, derived from IRIX
+ * <sys/xtalk/xtalkwidget.h>, revision 1.32.
+ *
+ * Copyright (C) 1996, 1999 Silcon Graphics, Inc.
+ * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
+ */
+#ifndef _ASM_XTALK_XWIDGET_H
+#define _ASM_XTALK_XWIDGET_H
+
+#include <linux/types.h>
+#include <asm/xtalk/xtalk.h>
+
+#define WIDGET_ID 0x04
+#define WIDGET_STATUS 0x0c
+#define WIDGET_ERR_UPPER_ADDR 0x14
+#define WIDGET_ERR_LOWER_ADDR 0x1c
+#define WIDGET_CONTROL 0x24
+#define WIDGET_REQ_TIMEOUT 0x2c
+#define WIDGET_INTDEST_UPPER_ADDR 0x34
+#define WIDGET_INTDEST_LOWER_ADDR 0x3c
+#define WIDGET_ERR_CMD_WORD 0x44
+#define WIDGET_LLP_CFG 0x4c
+#define WIDGET_TFLUSH 0x54
+
+/* WIDGET_ID */
+#define WIDGET_REV_NUM 0xf0000000
+#define WIDGET_PART_NUM 0x0ffff000
+#define WIDGET_MFG_NUM 0x00000ffe
+#define WIDGET_REV_NUM_SHFT 28
+#define WIDGET_PART_NUM_SHFT 12
+#define WIDGET_MFG_NUM_SHFT 1
+
+#define XWIDGET_PART_NUM(widgetid) (((widgetid) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT)
+#define XWIDGET_REV_NUM(widgetid) (((widgetid) & WIDGET_REV_NUM) >> WIDGET_REV_NUM_SHFT)
+#define XWIDGET_MFG_NUM(widgetid) (((widgetid) & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT)
+
+/* WIDGET_STATUS */
+#define WIDGET_LLP_REC_CNT 0xff000000
+#define WIDGET_LLP_TX_CNT 0x00ff0000
+#define WIDGET_PENDING 0x0000001f
+
+/* WIDGET_ERR_UPPER_ADDR */
+#define WIDGET_ERR_UPPER_ADDR_ONLY 0x0000ffff
+
+/* WIDGET_CONTROL */
+#define WIDGET_F_BAD_PKT 0x00010000
+#define WIDGET_LLP_XBAR_CRD 0x0000f000
+#define WIDGET_LLP_XBAR_CRD_SHFT 12
+#define WIDGET_CLR_RLLP_CNT 0x00000800
+#define WIDGET_CLR_TLLP_CNT 0x00000400
+#define WIDGET_SYS_END 0x00000200
+#define WIDGET_MAX_TRANS 0x000001f0
+#define WIDGET_WIDGET_ID 0x0000000f
+
+/* WIDGET_INTDEST_UPPER_ADDR */
+#define WIDGET_INT_VECTOR 0xff000000
+#define WIDGET_INT_VECTOR_SHFT 24
+#define WIDGET_TARGET_ID 0x000f0000
+#define WIDGET_TARGET_ID_SHFT 16
+#define WIDGET_UPP_ADDR 0x0000ffff
+
+/* WIDGET_ERR_CMD_WORD */
+#define WIDGET_DIDN 0xf0000000
+#define WIDGET_SIDN 0x0f000000
+#define WIDGET_PACTYP 0x00f00000
+#define WIDGET_TNUM 0x000f8000
+#define WIDGET_COHERENT 0x00004000
+#define WIDGET_DS 0x00003000
+#define WIDGET_GBR 0x00000800
+#define WIDGET_VBPM 0x00000400
+#define WIDGET_ERROR 0x00000200
+#define WIDGET_BARRIER 0x00000100
+
+/* WIDGET_LLP_CFG */
+#define WIDGET_LLP_MAXRETRY 0x03ff0000
+#define WIDGET_LLP_MAXRETRY_SHFT 16
+#define WIDGET_LLP_NULLTIMEOUT 0x0000fc00
+#define WIDGET_LLP_NULLTIMEOUT_SHFT 10
+#define WIDGET_LLP_MAXBURST 0x000003ff
+#define WIDGET_LLP_MAXBURST_SHFT 0
+
+/* Xtalk Widget Device Mfgr Nums */
+#define WIDGET_XBOW_MFGR_NUM 0x0 /* IP30 XBow Chip */
+#define WIDGET_XXBOW_MFGR_NUM 0x0 /* IP35 Xbow + XBridge Chip */
+#define WIDGET_ODYS_MFGR_NUM 0x023 /* Odyssey / VPro GFX */
+#define WIDGET_TPU_MFGR_NUM 0x024 /* Tensor Processor Unit */
+#define WIDGET_XBRDG_MFGR_NUM 0x024 /* IP35 XBridge Chip */
+#define WIDGET_HEART_MFGR_NUM 0x036 /* IP30 HEART Chip */
+#define WIDGET_BRIDG_MFGR_NUM 0x036 /* PCI Bridge */
+#define WIDGET_HUB_MFGR_NUM 0x036 /* IP27 Hub Chip */
+#define WIDGET_BDRCK_MFGR_NUM 0x036 /* IP35 Bedrock Chip */
+#define WIDGET_IMPCT_MFGR_NUM 0x2aa /* HQ4 / Impact GFX */
+#define WIDGET_KONA_MFGR_NUM 0x2aa /* InfiniteReality3 / Kona GFX */
+#define WIDGET_NULL_MFGR_NUM -1 /* NULL */
+
+/* Xtalk Widget Device Part Nums */
+#define WIDGET_XBOW_PART_NUM 0x0000
+#define WIDGET_HEART_PART_NUM 0xc001
+#define WIDGET_BRIDG_PART_NUM 0xc002
+#define WIDGET_IMPCT_PART_NUM 0xc003
+#define WIDGET_ODYS_PART_NUM 0xc013
+#define WIDGET_HUB_PART_NUM 0xc101
+#define WIDGET_KONA_PART_NUM 0xc102
+#define WIDGET_BDRCK_PART_NUM 0xc110
+#define WIDGET_TPU_PART_NUM 0xc202
+#define WIDGET_XXBOW_PART_NUM 0xd000
+#define WIDGET_XBRDG_PART_NUM 0xd002
+#define WIDGET_NULL_PART_NUM -1
+
+/* For Xtalk Widget identification */
+struct widget_ident {
+ u32 mfgr;
+ u32 part;
+ char *name;
+ char *revs[16];
+};
+
+/* Known Xtalk Widgets */
+static const struct widget_ident __initconst widget_idents[] = {
+ {
+ WIDGET_XBOW_MFGR_NUM,
+ WIDGET_XBOW_PART_NUM,
+ "xbow",
+ {NULL, "1.0", "1.1", "1.2", "1.3", "2.0", NULL},
+ },
+ {
+ WIDGET_HEART_MFGR_NUM,
+ WIDGET_HEART_PART_NUM,
+ "heart",
+ {NULL, "A", "B", "C", "D", "E", "F", NULL},
+ },
+ {
+ WIDGET_BRIDG_MFGR_NUM,
+ WIDGET_BRIDG_PART_NUM,
+ "bridge",
+ {NULL, "A", "B", "C", "D", NULL},
+ },
+ {
+ WIDGET_IMPCT_MFGR_NUM,
+ WIDGET_IMPCT_PART_NUM,
+ "impact",
+ {NULL, "A", "B", NULL},
+ },
+ {
+ WIDGET_ODYS_MFGR_NUM,
+ WIDGET_ODYS_PART_NUM,
+ "odyssey",
+ {NULL, "A", "B", NULL},
+ },
+ {
+ WIDGET_HUB_MFGR_NUM,
+ WIDGET_HUB_PART_NUM,
+ "hub",
+ {NULL, "1.0", "2.0", "2.1", "2.2", "2.3", "2.4", NULL},
+ },
+ {
+ WIDGET_KONA_MFGR_NUM,
+ WIDGET_KONA_PART_NUM,
+ "kona",
+ {NULL},
+ },
+ {
+ WIDGET_BDRCK_MFGR_NUM,
+ WIDGET_BDRCK_PART_NUM,
+ "bedrock",
+ {NULL, "1.0", "1.1", NULL},
+ },
+ {
+ WIDGET_TPU_MFGR_NUM,
+ WIDGET_TPU_PART_NUM,
+ "tpu",
+ {"0", NULL},
+ },
+ {
+ WIDGET_XXBOW_MFGR_NUM,
+ WIDGET_XXBOW_PART_NUM,
+ "xxbow",
+ {NULL, "1.0", "2.0", NULL},
+ },
+ {
+ WIDGET_XBRDG_MFGR_NUM,
+ WIDGET_XBRDG_PART_NUM,
+ "xbridge",
+ {NULL, "A", "B", NULL},
+ },
+ {
+ WIDGET_NULL_MFGR_NUM,
+ WIDGET_NULL_PART_NUM,
+ NULL,
+ {NULL},
+ }
+};
+
+/*
+ * according to the crosstalk spec, only 32-bits access to the widget
+ * configuration registers is allowed. some widgets may allow 64-bits
+ * access but software should not depend on it. registers beyond the
+ * widget target flush register are widget dependent thus will not be
+ * defined here
+ */
+#ifndef __ASSEMBLY__
+typedef u32 widgetreg_t;
+
+/* widget configuration registers */
+typedef volatile struct widget_cfg {
+ widgetreg_t w_pad_0; /* 0x00 */
+ widgetreg_t w_id; /* 0x04 */
+ widgetreg_t w_pad_1; /* 0x08 */
+ widgetreg_t w_status; /* 0x0c */
+ widgetreg_t w_pad_2; /* 0x10 */
+ widgetreg_t w_err_upper_addr; /* 0x14 */
+ widgetreg_t w_pad_3; /* 0x18 */
+ widgetreg_t w_err_lower_addr; /* 0x1c */
+ widgetreg_t w_pad_4; /* 0x20 */
+ widgetreg_t w_control; /* 0x24 */
+ widgetreg_t w_pad_5; /* 0x28 */
+ widgetreg_t w_req_timeout; /* 0x2c */
+ widgetreg_t w_pad_6; /* 0x30 */
+ widgetreg_t w_intdest_upper_addr; /* 0x34 */
+ widgetreg_t w_pad_7; /* 0x38 */
+ widgetreg_t w_intdest_lower_addr; /* 0x3c */
+ widgetreg_t w_pad_8; /* 0x40 */
+ widgetreg_t w_err_cmd_word; /* 0x44 */
+ widgetreg_t w_pad_9; /* 0x48 */
+ widgetreg_t w_llp_cfg; /* 0x4c */
+ widgetreg_t w_pad_10; /* 0x50 */
+ widgetreg_t w_tflush; /* 0x54 */
+} widget_cfg_t;
+
+typedef struct {
+ unsigned didn:4;
+ unsigned sidn:4;
+ unsigned pactyp:4;
+ unsigned tnum:5;
+ unsigned ct:1;
+ unsigned ds:2;
+ unsigned gbr:1;
+ unsigned vbpm:1;
+ unsigned error:1;
+ unsigned bo:1;
+ unsigned other:8;
+} w_err_cmd_word_f;
+
+typedef union {
+ widgetreg_t r;
+ w_err_cmd_word_f f;
+} w_err_cmd_word_u;
+
+typedef struct xwidget_info_s *xwidget_info_t;
+
+/*
+ * Crosstalk Widget Hardware Identification, as defined in the Crosstalk spec.
+ */
+typedef struct xwidget_hwid_s {
+ xwidget_part_num_t part_num;
+ xwidget_rev_num_t rev_num;
+ xwidget_mfg_num_t mfg_num;
+} *xwidget_hwid_t;
+
+
+/*
+ * Returns 1 if a driver that handles devices described by hwid1 is able
+ * to manage a device with hardwareid hwid2. NOTE: We don't check rev
+ * numbers at all.
+ */
+#define XWIDGET_HARDWARE_ID_MATCH(hwid1, hwid2) \
+ (((hwid1)->part_num == (hwid2)->part_num) && \
+ (((hwid1)->mfg_num == XWIDGET_MFG_NUM_NONE) || \
+ ((hwid2)->mfg_num == XWIDGET_MFG_NUM_NONE) || \
+ ((hwid1)->mfg_num == (hwid2)->mfg_num)))
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_XTALK_XWIDGET_H */
diff --git a/arch/mips/include/asm/yamon-dt.h b/arch/mips/include/asm/yamon-dt.h
new file mode 100644
index 000000000..e20475540
--- /dev/null
+++ b/arch/mips/include/asm/yamon-dt.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#ifndef __MIPS_ASM_YAMON_DT_H__
+#define __MIPS_ASM_YAMON_DT_H__
+
+#include <linux/types.h>
+
+/**
+ * struct yamon_mem_region - Represents a contiguous range of physical RAM.
+ * @start: Start physical address.
+ * @size: Maximum size of region.
+ * @discard: Length of additional memory to discard after the region.
+ */
+struct yamon_mem_region {
+ phys_addr_t start;
+ phys_addr_t size;
+ phys_addr_t discard;
+};
+
+/**
+ * yamon_dt_append_cmdline() - Append YAMON-provided command line to /chosen
+ * @fdt: the FDT blob
+ *
+ * Write the YAMON-provided command line to the bootargs property of the
+ * /chosen node in @fdt.
+ *
+ * Return: 0 on success, else -errno
+ */
+extern __init int yamon_dt_append_cmdline(void *fdt);
+
+/**
+ * yamon_dt_append_memory() - Append YAMON-provided memory info to /memory
+ * @fdt: the FDT blob
+ * @regions: zero size terminated array of physical memory regions
+ *
+ * Generate a /memory node in @fdt based upon memory size information provided
+ * by YAMON in its environment and the @regions array.
+ *
+ * Return: 0 on success, else -errno
+ */
+extern __init int yamon_dt_append_memory(void *fdt,
+ const struct yamon_mem_region *regions);
+
+/**
+ * yamon_dt_serial_config() - Append YAMON-provided serial config to /chosen
+ * @fdt: the FDT blob
+ *
+ * Generate a stdout-path property in the /chosen node of @fdt, based upon
+ * information provided in the YAMON environment about the UART configuration
+ * of the system.
+ *
+ * Return: 0 on success, else -errno
+ */
+extern __init int yamon_dt_serial_config(void *fdt);
+
+#endif /* __MIPS_ASM_YAMON_DT_H__ */
diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild
new file mode 100644
index 000000000..6db08385d
--- /dev/null
+++ b/arch/mips/include/uapi/asm/Kbuild
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+generated-y += unistd_n32.h
+generated-y += unistd_n64.h
+generated-y += unistd_o32.h
+generated-y += unistd_nr_n32.h
+generated-y += unistd_nr_n64.h
+generated-y += unistd_nr_o32.h
+
+generic-y += kvm_para.h
diff --git a/arch/mips/include/uapi/asm/auxvec.h b/arch/mips/include/uapi/asm/auxvec.h
new file mode 100644
index 000000000..612c2c41f
--- /dev/null
+++ b/arch/mips/include/uapi/asm/auxvec.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __ASM_AUXVEC_H
+#define __ASM_AUXVEC_H
+
+/* Location of VDSO image. */
+#define AT_SYSINFO_EHDR 33
+
+#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
+
+#endif /* __ASM_AUXVEC_H */
diff --git a/arch/mips/include/uapi/asm/bitfield.h b/arch/mips/include/uapi/asm/bitfield.h
new file mode 100644
index 000000000..b11713d87
--- /dev/null
+++ b/arch/mips/include/uapi/asm/bitfield.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __UAPI_ASM_BITFIELD_H
+#define __UAPI_ASM_BITFIELD_H
+
+/*
+ * * Damn ... bitfields depend from byteorder :-(
+ * */
+#ifdef __MIPSEB__
+#define __BITFIELD_FIELD(field, more) \
+ field; \
+ more
+
+#elif defined(__MIPSEL__)
+
+#define __BITFIELD_FIELD(field, more) \
+ more \
+ field;
+
+#else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */
+#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?"
+#endif
+
+#endif /* __UAPI_ASM_BITFIELD_H */
diff --git a/arch/mips/include/uapi/asm/bitsperlong.h b/arch/mips/include/uapi/asm/bitsperlong.h
new file mode 100644
index 000000000..7268380d8
--- /dev/null
+++ b/arch/mips/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_MIPS_BITSPERLONG_H
+#define __ASM_MIPS_BITSPERLONG_H
+
+#define __BITS_PER_LONG _MIPS_SZLONG
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_MIPS_BITSPERLONG_H */
diff --git a/arch/mips/include/uapi/asm/break.h b/arch/mips/include/uapi/asm/break.h
new file mode 100644
index 000000000..10380b1bc
--- /dev/null
+++ b/arch/mips/include/uapi/asm/break.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 2003 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#ifndef __UAPI_ASM_BREAK_H
+#define __UAPI_ASM_BREAK_H
+
+/*
+ * The following break codes are or were in use for specific purposes in
+ * other MIPS operating systems. Linux/MIPS doesn't use all of them. The
+ * unused ones are here as placeholders; we might encounter them in
+ * non-Linux/MIPS object files or make use of them in the future.
+ */
+#define BRK_USERBP 0 /* User bp (used by debuggers) */
+#define BRK_SSTEPBP 5 /* User bp (used by debuggers) */
+#define BRK_OVERFLOW 6 /* Overflow check */
+#define BRK_DIVZERO 7 /* Divide by zero check */
+#define BRK_RANGE 8 /* Range error check */
+#define BRK_BUG 12 /* Used by BUG() */
+#define BRK_UPROBE 13 /* See <asm/uprobes.h> */
+#define BRK_UPROBE_XOL 14 /* See <asm/uprobes.h> */
+#define BRK_MEMU 514 /* Used by FPU emulator */
+#define BRK_KPROBE_BP 515 /* Kprobe break */
+#define BRK_KPROBE_SSTEPBP 516 /* Kprobe single step software implementation */
+#define BRK_MULOVF 1023 /* Multiply overflow */
+
+#endif /* __UAPI_ASM_BREAK_H */
diff --git a/arch/mips/include/uapi/asm/byteorder.h b/arch/mips/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000..b4edc85f9
--- /dev/null
+++ b/arch/mips/include/uapi/asm/byteorder.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 99, 2003 by Ralf Baechle
+ */
+#ifndef _ASM_BYTEORDER_H
+#define _ASM_BYTEORDER_H
+
+#if defined(__MIPSEB__)
+#include <linux/byteorder/big_endian.h>
+#elif defined(__MIPSEL__)
+#include <linux/byteorder/little_endian.h>
+#else
+# error "MIPS, but neither __MIPSEB__, nor __MIPSEL__???"
+#endif
+
+#endif /* _ASM_BYTEORDER_H */
diff --git a/arch/mips/include/uapi/asm/cachectl.h b/arch/mips/include/uapi/asm/cachectl.h
new file mode 100644
index 000000000..af7639ff4
--- /dev/null
+++ b/arch/mips/include/uapi/asm/cachectl.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995, 1996 by Ralf Baechle
+ */
+#ifndef _ASM_CACHECTL
+#define _ASM_CACHECTL
+
+/*
+ * Options for cacheflush system call
+ */
+#define ICACHE (1<<0) /* flush instruction cache */
+#define DCACHE (1<<1) /* writeback and flush data cache */
+#define BCACHE (ICACHE|DCACHE) /* flush both caches */
+
+/*
+ * Caching modes for the cachectl(2) call
+ *
+ * cachectl(2) is currently not supported and returns ENOSYS.
+ */
+#define CACHEABLE 0 /* make pages cacheable */
+#define UNCACHEABLE 1 /* make pages uncacheable */
+
+#endif /* _ASM_CACHECTL */
diff --git a/arch/mips/include/uapi/asm/errno.h b/arch/mips/include/uapi/asm/errno.h
new file mode 100644
index 000000000..2fb714e2d
--- /dev/null
+++ b/arch/mips/include/uapi/asm/errno.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1999, 2001, 2002 by Ralf Baechle
+ */
+#ifndef _UAPI_ASM_ERRNO_H
+#define _UAPI_ASM_ERRNO_H
+
+/*
+ * These error numbers are intended to be MIPS ABI compatible
+ */
+
+#include <asm-generic/errno-base.h>
+
+#define ENOMSG 35 /* No message of desired type */
+#define EIDRM 36 /* Identifier removed */
+#define ECHRNG 37 /* Channel number out of range */
+#define EL2NSYNC 38 /* Level 2 not synchronized */
+#define EL3HLT 39 /* Level 3 halted */
+#define EL3RST 40 /* Level 3 reset */
+#define ELNRNG 41 /* Link number out of range */
+#define EUNATCH 42 /* Protocol driver not attached */
+#define ENOCSI 43 /* No CSI structure available */
+#define EL2HLT 44 /* Level 2 halted */
+#define EDEADLK 45 /* Resource deadlock would occur */
+#define ENOLCK 46 /* No record locks available */
+#define EBADE 50 /* Invalid exchange */
+#define EBADR 51 /* Invalid request descriptor */
+#define EXFULL 52 /* Exchange full */
+#define ENOANO 53 /* No anode */
+#define EBADRQC 54 /* Invalid request code */
+#define EBADSLT 55 /* Invalid slot */
+#define EDEADLOCK 56 /* File locking deadlock error */
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EDOTDOT 73 /* RFS specific error */
+#define EMULTIHOP 74 /* Multihop attempted */
+#define EBADMSG 77 /* Not a data message */
+#define ENAMETOOLONG 78 /* File name too long */
+#define EOVERFLOW 79 /* Value too large for defined data type */
+#define ENOTUNIQ 80 /* Name not unique on network */
+#define EBADFD 81 /* File descriptor in bad state */
+#define EREMCHG 82 /* Remote address changed */
+#define ELIBACC 83 /* Can not access a needed shared library */
+#define ELIBBAD 84 /* Accessing a corrupted shared library */
+#define ELIBSCN 85 /* .lib section in a.out corrupted */
+#define ELIBMAX 86 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 87 /* Cannot exec a shared library directly */
+#define EILSEQ 88 /* Illegal byte sequence */
+#define ENOSYS 89 /* Function not implemented */
+#define ELOOP 90 /* Too many symbolic links encountered */
+#define ERESTART 91 /* Interrupted system call should be restarted */
+#define ESTRPIPE 92 /* Streams pipe error */
+#define ENOTEMPTY 93 /* Directory not empty */
+#define EUSERS 94 /* Too many users */
+#define ENOTSOCK 95 /* Socket operation on non-socket */
+#define EDESTADDRREQ 96 /* Destination address required */
+#define EMSGSIZE 97 /* Message too long */
+#define EPROTOTYPE 98 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 99 /* Protocol not available */
+#define EPROTONOSUPPORT 120 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 121 /* Socket type not supported */
+#define EOPNOTSUPP 122 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 123 /* Protocol family not supported */
+#define EAFNOSUPPORT 124 /* Address family not supported by protocol */
+#define EADDRINUSE 125 /* Address already in use */
+#define EADDRNOTAVAIL 126 /* Cannot assign requested address */
+#define ENETDOWN 127 /* Network is down */
+#define ENETUNREACH 128 /* Network is unreachable */
+#define ENETRESET 129 /* Network dropped connection because of reset */
+#define ECONNABORTED 130 /* Software caused connection abort */
+#define ECONNRESET 131 /* Connection reset by peer */
+#define ENOBUFS 132 /* No buffer space available */
+#define EISCONN 133 /* Transport endpoint is already connected */
+#define ENOTCONN 134 /* Transport endpoint is not connected */
+#define EUCLEAN 135 /* Structure needs cleaning */
+#define ENOTNAM 137 /* Not a XENIX named type file */
+#define ENAVAIL 138 /* No XENIX semaphores available */
+#define EISNAM 139 /* Is a named type file */
+#define EREMOTEIO 140 /* Remote I/O error */
+#define EINIT 141 /* Reserved */
+#define EREMDEV 142 /* Error 142 */
+#define ESHUTDOWN 143 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 144 /* Too many references: cannot splice */
+#define ETIMEDOUT 145 /* Connection timed out */
+#define ECONNREFUSED 146 /* Connection refused */
+#define EHOSTDOWN 147 /* Host is down */
+#define EHOSTUNREACH 148 /* No route to host */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define EALREADY 149 /* Operation already in progress */
+#define EINPROGRESS 150 /* Operation now in progress */
+#define ESTALE 151 /* Stale file handle */
+#define ECANCELED 158 /* AIO operation canceled */
+
+/*
+ * These error are Linux extensions.
+ */
+#define ENOMEDIUM 159 /* No medium found */
+#define EMEDIUMTYPE 160 /* Wrong medium type */
+#define ENOKEY 161 /* Required key not available */
+#define EKEYEXPIRED 162 /* Key has expired */
+#define EKEYREVOKED 163 /* Key has been revoked */
+#define EKEYREJECTED 164 /* Key was rejected by service */
+
+/* for robust mutexes */
+#define EOWNERDEAD 165 /* Owner died */
+#define ENOTRECOVERABLE 166 /* State not recoverable */
+
+#define ERFKILL 167 /* Operation not possible due to RF-kill */
+
+#define EHWPOISON 168 /* Memory page has hardware error */
+
+#define EDQUOT 1133 /* Quota exceeded */
+
+
+#endif /* _UAPI_ASM_ERRNO_H */
diff --git a/arch/mips/include/uapi/asm/fcntl.h b/arch/mips/include/uapi/asm/fcntl.h
new file mode 100644
index 000000000..42e13dead
--- /dev/null
+++ b/arch/mips/include/uapi/asm/fcntl.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 96, 97, 98, 99, 2003, 05 Ralf Baechle
+ */
+#ifndef _UAPI_ASM_FCNTL_H
+#define _UAPI_ASM_FCNTL_H
+
+#include <asm/sgidefs.h>
+
+#define O_APPEND 0x0008
+#define O_DSYNC 0x0010 /* used to be O_SYNC, see below */
+#define O_NONBLOCK 0x0080
+#define O_CREAT 0x0100 /* not fcntl */
+#define O_TRUNC 0x0200 /* not fcntl */
+#define O_EXCL 0x0400 /* not fcntl */
+#define O_NOCTTY 0x0800 /* not fcntl */
+#define FASYNC 0x1000 /* fcntl, for BSD compatibility */
+#define O_LARGEFILE 0x2000 /* allow large file opens */
+/*
+ * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using
+ * the O_SYNC flag. We continue to use the existing numerical value
+ * for O_DSYNC semantics now, but using the correct symbolic name for it.
+ * This new value is used to request true Posix O_SYNC semantics. It is
+ * defined in this strange way to make sure applications compiled against
+ * new headers get at least O_DSYNC semantics on older kernels.
+ *
+ * This has the nice side-effect that we can simply test for O_DSYNC
+ * wherever we do not care if O_DSYNC or O_SYNC is used.
+ *
+ * Note: __O_SYNC must never be used directly.
+ */
+#define __O_SYNC 0x4000
+#define O_SYNC (__O_SYNC|O_DSYNC)
+#define O_DIRECT 0x8000 /* direct disk access hint */
+
+#define F_GETLK 14
+#define F_SETLK 6
+#define F_SETLKW 7
+
+#define F_SETOWN 24 /* for sockets. */
+#define F_GETOWN 23 /* for sockets. */
+
+#ifndef __mips64
+#define F_GETLK64 33 /* using 'struct flock64' */
+#define F_SETLK64 34
+#define F_SETLKW64 35
+#endif
+
+/*
+ * The flavours of struct flock. "struct flock" is the ABI compliant
+ * variant. Finally struct flock64 is the LFS variant of struct flock. As
+ * a historic accident and inconsistence with the ABI definition it doesn't
+ * contain all the same fields as struct flock.
+ */
+
+#if _MIPS_SIM != _MIPS_SIM_ABI64
+
+#include <linux/types.h>
+
+struct flock {
+ short l_type;
+ short l_whence;
+ __kernel_off_t l_start;
+ __kernel_off_t l_len;
+ long l_sysid;
+ __kernel_pid_t l_pid;
+ long pad[4];
+};
+
+#define HAVE_ARCH_STRUCT_FLOCK
+
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
+
+#include <asm-generic/fcntl.h>
+
+#endif /* _UAPI_ASM_FCNTL_H */
diff --git a/arch/mips/include/uapi/asm/hwcap.h b/arch/mips/include/uapi/asm/hwcap.h
new file mode 100644
index 000000000..b7e02bdc1
--- /dev/null
+++ b/arch/mips/include/uapi/asm/hwcap.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_ASM_HWCAP_H
+#define _UAPI_ASM_HWCAP_H
+
+/* HWCAP flags */
+#define HWCAP_MIPS_R6 (1 << 0)
+#define HWCAP_MIPS_MSA (1 << 1)
+#define HWCAP_MIPS_CRC32 (1 << 2)
+#define HWCAP_MIPS_MIPS16 (1 << 3)
+#define HWCAP_MIPS_MDMX (1 << 4)
+#define HWCAP_MIPS_MIPS3D (1 << 5)
+#define HWCAP_MIPS_SMARTMIPS (1 << 6)
+#define HWCAP_MIPS_DSP (1 << 7)
+#define HWCAP_MIPS_DSP2 (1 << 8)
+#define HWCAP_MIPS_DSP3 (1 << 9)
+#define HWCAP_MIPS_MIPS16E2 (1 << 10)
+#define HWCAP_LOONGSON_MMI (1 << 11)
+#define HWCAP_LOONGSON_EXT (1 << 12)
+#define HWCAP_LOONGSON_EXT2 (1 << 13)
+#define HWCAP_LOONGSON_CPUCFG (1 << 14)
+
+#endif /* _UAPI_ASM_HWCAP_H */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
new file mode 100644
index 000000000..43d1faa02
--- /dev/null
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -0,0 +1,1141 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Format of an instruction in memory.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 2000 by Ralf Baechle
+ * Copyright (C) 2006 by Thiemo Seufer
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ */
+#ifndef _UAPI_ASM_INST_H
+#define _UAPI_ASM_INST_H
+
+#include <asm/bitfield.h>
+
+/*
+ * Major opcodes; before MIPS IV cop1x was called cop3.
+ */
+enum major_op {
+ spec_op, bcond_op, j_op, jal_op,
+ beq_op, bne_op, blez_op, bgtz_op,
+ addi_op, pop10_op = addi_op, addiu_op, slti_op, sltiu_op,
+ andi_op, ori_op, xori_op, lui_op,
+ cop0_op, cop1_op, cop2_op, cop1x_op,
+ beql_op, bnel_op, blezl_op, bgtzl_op,
+ daddi_op, pop30_op = daddi_op, daddiu_op, ldl_op, ldr_op,
+ spec2_op, jalx_op, mdmx_op, msa_op = mdmx_op, spec3_op,
+ lb_op, lh_op, lwl_op, lw_op,
+ lbu_op, lhu_op, lwr_op, lwu_op,
+ sb_op, sh_op, swl_op, sw_op,
+ sdl_op, sdr_op, swr_op, cache_op,
+ ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op,
+ lld_op, ldc1_op, ldc2_op, pop66_op = ldc2_op, ld_op,
+ sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op,
+ scd_op, sdc1_op, sdc2_op, pop76_op = sdc2_op, sd_op
+};
+
+/*
+ * func field of spec opcode.
+ */
+enum spec_op {
+ sll_op, movc_op, srl_op, sra_op,
+ sllv_op, pmon_op, srlv_op, srav_op,
+ jr_op, jalr_op, movz_op, movn_op,
+ syscall_op, break_op, spim_op, sync_op,
+ mfhi_op, mthi_op, mflo_op, mtlo_op,
+ dsllv_op, spec2_unused_op, dsrlv_op, dsrav_op,
+ mult_op, multu_op, div_op, divu_op,
+ dmult_op, dmultu_op, ddiv_op, ddivu_op,
+ add_op, addu_op, sub_op, subu_op,
+ and_op, or_op, xor_op, nor_op,
+ spec3_unused_op, spec4_unused_op, slt_op, sltu_op,
+ dadd_op, daddu_op, dsub_op, dsubu_op,
+ tge_op, tgeu_op, tlt_op, tltu_op,
+ teq_op, seleqz_op, tne_op, selnez_op,
+ dsll_op, spec5_unused_op, dsrl_op, dsra_op,
+ dsll32_op, spec6_unused_op, dsrl32_op, dsra32_op
+};
+
+/*
+ * func field of spec2 opcode.
+ */
+enum spec2_op {
+ madd_op, maddu_op, mul_op, spec2_3_unused_op,
+ msub_op, msubu_op, /* more unused ops */
+ clz_op = 0x20, clo_op,
+ dclz_op = 0x24, dclo_op,
+ sdbpp_op = 0x3f
+};
+
+/*
+ * func field of spec3 opcode.
+ */
+enum spec3_op {
+ ext_op, dextm_op, dextu_op, dext_op,
+ ins_op, dinsm_op, dinsu_op, dins_op,
+ yield_op = 0x09, lx_op = 0x0a,
+ lwle_op = 0x19, lwre_op = 0x1a,
+ cachee_op = 0x1b, sbe_op = 0x1c,
+ she_op = 0x1d, sce_op = 0x1e,
+ swe_op = 0x1f, bshfl_op = 0x20,
+ swle_op = 0x21, swre_op = 0x22,
+ prefe_op = 0x23, dbshfl_op = 0x24,
+ cache6_op = 0x25, sc6_op = 0x26,
+ scd6_op = 0x27, lbue_op = 0x28,
+ lhue_op = 0x29, lbe_op = 0x2c,
+ lhe_op = 0x2d, lle_op = 0x2e,
+ lwe_op = 0x2f, pref6_op = 0x35,
+ ll6_op = 0x36, lld6_op = 0x37,
+ rdhwr_op = 0x3b
+};
+
+/*
+ * Bits 10-6 minor opcode for r6 spec mult/div encodings
+ */
+enum mult_op {
+ mult_mult_op = 0x0,
+ mult_mul_op = 0x2,
+ mult_muh_op = 0x3,
+};
+enum multu_op {
+ multu_multu_op = 0x0,
+ multu_mulu_op = 0x2,
+ multu_muhu_op = 0x3,
+};
+enum div_op {
+ div_div_op = 0x0,
+ div_div6_op = 0x2,
+ div_mod_op = 0x3,
+};
+enum divu_op {
+ divu_divu_op = 0x0,
+ divu_divu6_op = 0x2,
+ divu_modu_op = 0x3,
+};
+enum dmult_op {
+ dmult_dmult_op = 0x0,
+ dmult_dmul_op = 0x2,
+ dmult_dmuh_op = 0x3,
+};
+enum dmultu_op {
+ dmultu_dmultu_op = 0x0,
+ dmultu_dmulu_op = 0x2,
+ dmultu_dmuhu_op = 0x3,
+};
+enum ddiv_op {
+ ddiv_ddiv_op = 0x0,
+ ddiv_ddiv6_op = 0x2,
+ ddiv_dmod_op = 0x3,
+};
+enum ddivu_op {
+ ddivu_ddivu_op = 0x0,
+ ddivu_ddivu6_op = 0x2,
+ ddivu_dmodu_op = 0x3,
+};
+
+/*
+ * rt field of bcond opcodes.
+ */
+enum rt_op {
+ bltz_op, bgez_op, bltzl_op, bgezl_op,
+ spimi_op, unused_rt_op_0x05, unused_rt_op_0x06, unused_rt_op_0x07,
+ tgei_op, tgeiu_op, tlti_op, tltiu_op,
+ teqi_op, unused_0x0d_rt_op, tnei_op, unused_0x0f_rt_op,
+ bltzal_op, bgezal_op, bltzall_op, bgezall_op,
+ rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17,
+ rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b,
+ bposge32_op, rt_op_0x1d, rt_op_0x1e, synci_op
+};
+
+/*
+ * rs field of cop opcodes.
+ */
+enum cop_op {
+ mfc_op = 0x00, dmfc_op = 0x01,
+ cfc_op = 0x02, mfhc0_op = 0x02,
+ mfhc_op = 0x03, mtc_op = 0x04,
+ dmtc_op = 0x05, ctc_op = 0x06,
+ mthc0_op = 0x06, mthc_op = 0x07,
+ bc_op = 0x08, bc1eqz_op = 0x09,
+ mfmc0_op = 0x0b, bc1nez_op = 0x0d,
+ wrpgpr_op = 0x0e, cop_op = 0x10,
+ copm_op = 0x18
+};
+
+/*
+ * rt field of cop.bc_op opcodes
+ */
+enum bcop_op {
+ bcf_op, bct_op, bcfl_op, bctl_op
+};
+
+/*
+ * func field of cop0 coi opcodes.
+ */
+enum cop0_coi_func {
+ tlbr_op = 0x01, tlbwi_op = 0x02,
+ tlbwr_op = 0x06, tlbp_op = 0x08,
+ rfe_op = 0x10, eret_op = 0x18,
+ wait_op = 0x20, hypcall_op = 0x28
+};
+
+/*
+ * func field of cop0 com opcodes.
+ */
+enum cop0_com_func {
+ tlbr1_op = 0x01, tlbw_op = 0x02,
+ tlbp1_op = 0x08, dctr_op = 0x09,
+ dctw_op = 0x0a
+};
+
+/*
+ * fmt field of cop1 opcodes.
+ */
+enum cop1_fmt {
+ s_fmt, d_fmt, e_fmt, q_fmt,
+ w_fmt, l_fmt
+};
+
+/*
+ * func field of cop1 instructions using d, s or w format.
+ */
+enum cop1_sdw_func {
+ fadd_op = 0x00, fsub_op = 0x01,
+ fmul_op = 0x02, fdiv_op = 0x03,
+ fsqrt_op = 0x04, fabs_op = 0x05,
+ fmov_op = 0x06, fneg_op = 0x07,
+ froundl_op = 0x08, ftruncl_op = 0x09,
+ fceill_op = 0x0a, ffloorl_op = 0x0b,
+ fround_op = 0x0c, ftrunc_op = 0x0d,
+ fceil_op = 0x0e, ffloor_op = 0x0f,
+ fsel_op = 0x10,
+ fmovc_op = 0x11, fmovz_op = 0x12,
+ fmovn_op = 0x13, fseleqz_op = 0x14,
+ frecip_op = 0x15, frsqrt_op = 0x16,
+ fselnez_op = 0x17, fmaddf_op = 0x18,
+ fmsubf_op = 0x19, frint_op = 0x1a,
+ fclass_op = 0x1b, fmin_op = 0x1c,
+ fmina_op = 0x1d, fmax_op = 0x1e,
+ fmaxa_op = 0x1f, fcvts_op = 0x20,
+ fcvtd_op = 0x21, fcvte_op = 0x22,
+ fcvtw_op = 0x24, fcvtl_op = 0x25,
+ fcmp_op = 0x30
+};
+
+/*
+ * func field of cop1x opcodes (MIPS IV).
+ */
+enum cop1x_func {
+ lwxc1_op = 0x00, ldxc1_op = 0x01,
+ swxc1_op = 0x08, sdxc1_op = 0x09,
+ pfetch_op = 0x0f, madd_s_op = 0x20,
+ madd_d_op = 0x21, madd_e_op = 0x22,
+ msub_s_op = 0x28, msub_d_op = 0x29,
+ msub_e_op = 0x2a, nmadd_s_op = 0x30,
+ nmadd_d_op = 0x31, nmadd_e_op = 0x32,
+ nmsub_s_op = 0x38, nmsub_d_op = 0x39,
+ nmsub_e_op = 0x3a
+};
+
+/*
+ * func field for mad opcodes (MIPS IV).
+ */
+enum mad_func {
+ madd_fp_op = 0x08, msub_fp_op = 0x0a,
+ nmadd_fp_op = 0x0c, nmsub_fp_op = 0x0e
+};
+
+/*
+ * func field for page table walker (Loongson-3).
+ */
+enum ptw_func {
+ lwdir_op = 0x00,
+ lwpte_op = 0x01,
+ lddir_op = 0x02,
+ ldpte_op = 0x03,
+};
+
+/*
+ * func field for special3 lx opcodes (Cavium Octeon).
+ */
+enum lx_func {
+ lwx_op = 0x00,
+ lhx_op = 0x04,
+ lbux_op = 0x06,
+ ldx_op = 0x08,
+ lwux_op = 0x10,
+ lhux_op = 0x14,
+ lbx_op = 0x16,
+};
+
+/*
+ * BSHFL opcodes
+ */
+enum bshfl_func {
+ wsbh_op = 0x2,
+ seb_op = 0x10,
+ seh_op = 0x18,
+};
+
+/*
+ * DBSHFL opcodes
+ */
+enum dbshfl_func {
+ dsbh_op = 0x2,
+ dshd_op = 0x5,
+};
+
+/*
+ * MSA minor opcodes.
+ */
+enum msa_func {
+ msa_elm_op = 0x19,
+};
+
+/*
+ * MSA ELM opcodes.
+ */
+enum msa_elm {
+ msa_ctc_op = 0x3e,
+ msa_cfc_op = 0x7e,
+};
+
+/*
+ * func field for MSA MI10 format.
+ */
+enum msa_mi10_func {
+ msa_ld_op = 8,
+ msa_st_op = 9,
+};
+
+/*
+ * MSA 2 bit format fields.
+ */
+enum msa_2b_fmt {
+ msa_fmt_b = 0,
+ msa_fmt_h = 1,
+ msa_fmt_w = 2,
+ msa_fmt_d = 3,
+};
+
+/*
+ * (microMIPS) Major opcodes.
+ */
+enum mm_major_op {
+ mm_pool32a_op, mm_pool16a_op, mm_lbu16_op, mm_move16_op,
+ mm_addi32_op, mm_lbu32_op, mm_sb32_op, mm_lb32_op,
+ mm_pool32b_op, mm_pool16b_op, mm_lhu16_op, mm_andi16_op,
+ mm_addiu32_op, mm_lhu32_op, mm_sh32_op, mm_lh32_op,
+ mm_pool32i_op, mm_pool16c_op, mm_lwsp16_op, mm_pool16d_op,
+ mm_ori32_op, mm_pool32f_op, mm_pool32s_op, mm_reserved2_op,
+ mm_pool32c_op, mm_lwgp16_op, mm_lw16_op, mm_pool16e_op,
+ mm_xori32_op, mm_jals32_op, mm_addiupc_op, mm_reserved3_op,
+ mm_reserved4_op, mm_pool16f_op, mm_sb16_op, mm_beqz16_op,
+ mm_slti32_op, mm_beq32_op, mm_swc132_op, mm_lwc132_op,
+ mm_reserved5_op, mm_reserved6_op, mm_sh16_op, mm_bnez16_op,
+ mm_sltiu32_op, mm_bne32_op, mm_sdc132_op, mm_ldc132_op,
+ mm_reserved7_op, mm_reserved8_op, mm_swsp16_op, mm_b16_op,
+ mm_andi32_op, mm_j32_op, mm_sd32_op, mm_ld32_op,
+ mm_reserved11_op, mm_reserved12_op, mm_sw16_op, mm_li16_op,
+ mm_jalx32_op, mm_jal32_op, mm_sw32_op, mm_lw32_op,
+};
+
+/*
+ * (microMIPS) POOL32I minor opcodes.
+ */
+enum mm_32i_minor_op {
+ mm_bltz_op, mm_bltzal_op, mm_bgez_op, mm_bgezal_op,
+ mm_blez_op, mm_bnezc_op, mm_bgtz_op, mm_beqzc_op,
+ mm_tlti_op, mm_tgei_op, mm_tltiu_op, mm_tgeiu_op,
+ mm_tnei_op, mm_lui_op, mm_teqi_op, mm_reserved13_op,
+ mm_synci_op, mm_bltzals_op, mm_reserved14_op, mm_bgezals_op,
+ mm_bc2f_op, mm_bc2t_op, mm_reserved15_op, mm_reserved16_op,
+ mm_reserved17_op, mm_reserved18_op, mm_bposge64_op, mm_bposge32_op,
+ mm_bc1f_op, mm_bc1t_op, mm_reserved19_op, mm_reserved20_op,
+ mm_bc1any2f_op, mm_bc1any2t_op, mm_bc1any4f_op, mm_bc1any4t_op,
+};
+
+/*
+ * (microMIPS) POOL32A minor opcodes.
+ */
+enum mm_32a_minor_op {
+ mm_sll32_op = 0x000,
+ mm_ins_op = 0x00c,
+ mm_sllv32_op = 0x010,
+ mm_ext_op = 0x02c,
+ mm_pool32axf_op = 0x03c,
+ mm_srl32_op = 0x040,
+ mm_srlv32_op = 0x050,
+ mm_sra_op = 0x080,
+ mm_srav_op = 0x090,
+ mm_rotr_op = 0x0c0,
+ mm_lwxs_op = 0x118,
+ mm_addu32_op = 0x150,
+ mm_subu32_op = 0x1d0,
+ mm_wsbh_op = 0x1ec,
+ mm_mul_op = 0x210,
+ mm_and_op = 0x250,
+ mm_or32_op = 0x290,
+ mm_xor32_op = 0x310,
+ mm_slt_op = 0x350,
+ mm_sltu_op = 0x390,
+};
+
+/*
+ * (microMIPS) POOL32B functions.
+ */
+enum mm_32b_func {
+ mm_lwc2_func = 0x0,
+ mm_lwp_func = 0x1,
+ mm_ldc2_func = 0x2,
+ mm_ldp_func = 0x4,
+ mm_lwm32_func = 0x5,
+ mm_cache_func = 0x6,
+ mm_ldm_func = 0x7,
+ mm_swc2_func = 0x8,
+ mm_swp_func = 0x9,
+ mm_sdc2_func = 0xa,
+ mm_sdp_func = 0xc,
+ mm_swm32_func = 0xd,
+ mm_sdm_func = 0xf,
+};
+
+/*
+ * (microMIPS) POOL32C functions.
+ */
+enum mm_32c_func {
+ mm_pref_func = 0x2,
+ mm_ll_func = 0x3,
+ mm_swr_func = 0x9,
+ mm_sc_func = 0xb,
+ mm_lwu_func = 0xe,
+};
+
+/*
+ * (microMIPS) POOL32AXF minor opcodes.
+ */
+enum mm_32axf_minor_op {
+ mm_mfc0_op = 0x003,
+ mm_mtc0_op = 0x00b,
+ mm_tlbp_op = 0x00d,
+ mm_mfhi32_op = 0x035,
+ mm_jalr_op = 0x03c,
+ mm_tlbr_op = 0x04d,
+ mm_mflo32_op = 0x075,
+ mm_jalrhb_op = 0x07c,
+ mm_tlbwi_op = 0x08d,
+ mm_mthi32_op = 0x0b5,
+ mm_tlbwr_op = 0x0cd,
+ mm_mtlo32_op = 0x0f5,
+ mm_di_op = 0x11d,
+ mm_jalrs_op = 0x13c,
+ mm_jalrshb_op = 0x17c,
+ mm_sync_op = 0x1ad,
+ mm_syscall_op = 0x22d,
+ mm_wait_op = 0x24d,
+ mm_eret_op = 0x3cd,
+ mm_divu_op = 0x5dc,
+};
+
+/*
+ * (microMIPS) POOL32F minor opcodes.
+ */
+enum mm_32f_minor_op {
+ mm_32f_00_op = 0x00,
+ mm_32f_01_op = 0x01,
+ mm_32f_02_op = 0x02,
+ mm_32f_10_op = 0x08,
+ mm_32f_11_op = 0x09,
+ mm_32f_12_op = 0x0a,
+ mm_32f_20_op = 0x10,
+ mm_32f_30_op = 0x18,
+ mm_32f_40_op = 0x20,
+ mm_32f_41_op = 0x21,
+ mm_32f_42_op = 0x22,
+ mm_32f_50_op = 0x28,
+ mm_32f_51_op = 0x29,
+ mm_32f_52_op = 0x2a,
+ mm_32f_60_op = 0x30,
+ mm_32f_70_op = 0x38,
+ mm_32f_73_op = 0x3b,
+ mm_32f_74_op = 0x3c,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_10_minor_op {
+ mm_lwxc1_op = 0x1,
+ mm_swxc1_op,
+ mm_ldxc1_op,
+ mm_sdxc1_op,
+ mm_luxc1_op,
+ mm_suxc1_op,
+};
+
+enum mm_32f_func {
+ mm_lwxc1_func = 0x048,
+ mm_swxc1_func = 0x088,
+ mm_ldxc1_func = 0x0c8,
+ mm_sdxc1_func = 0x108,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_40_minor_op {
+ mm_fmovf_op,
+ mm_fmovt_op,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_60_minor_op {
+ mm_fadd_op,
+ mm_fsub_op,
+ mm_fmul_op,
+ mm_fdiv_op,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_70_minor_op {
+ mm_fmovn_op,
+ mm_fmovz_op,
+};
+
+/*
+ * (microMIPS) POOL32FXF secondary minor opcodes for POOL32F.
+ */
+enum mm_32f_73_minor_op {
+ mm_fmov0_op = 0x01,
+ mm_fcvtl_op = 0x04,
+ mm_movf0_op = 0x05,
+ mm_frsqrt_op = 0x08,
+ mm_ffloorl_op = 0x0c,
+ mm_fabs0_op = 0x0d,
+ mm_fcvtw_op = 0x24,
+ mm_movt0_op = 0x25,
+ mm_fsqrt_op = 0x28,
+ mm_ffloorw_op = 0x2c,
+ mm_fneg0_op = 0x2d,
+ mm_cfc1_op = 0x40,
+ mm_frecip_op = 0x48,
+ mm_fceill_op = 0x4c,
+ mm_fcvtd0_op = 0x4d,
+ mm_ctc1_op = 0x60,
+ mm_fceilw_op = 0x6c,
+ mm_fcvts0_op = 0x6d,
+ mm_mfc1_op = 0x80,
+ mm_fmov1_op = 0x81,
+ mm_movf1_op = 0x85,
+ mm_ftruncl_op = 0x8c,
+ mm_fabs1_op = 0x8d,
+ mm_mtc1_op = 0xa0,
+ mm_movt1_op = 0xa5,
+ mm_ftruncw_op = 0xac,
+ mm_fneg1_op = 0xad,
+ mm_mfhc1_op = 0xc0,
+ mm_froundl_op = 0xcc,
+ mm_fcvtd1_op = 0xcd,
+ mm_mthc1_op = 0xe0,
+ mm_froundw_op = 0xec,
+ mm_fcvts1_op = 0xed,
+};
+
+/*
+ * (microMIPS) POOL32S minor opcodes.
+ */
+enum mm_32s_minor_op {
+ mm_32s_elm_op = 0x16,
+};
+
+/*
+ * (microMIPS) POOL16C minor opcodes.
+ */
+enum mm_16c_minor_op {
+ mm_lwm16_op = 0x04,
+ mm_swm16_op = 0x05,
+ mm_jr16_op = 0x0c,
+ mm_jrc_op = 0x0d,
+ mm_jalr16_op = 0x0e,
+ mm_jalrs16_op = 0x0f,
+ mm_jraddiusp_op = 0x18,
+};
+
+/*
+ * (microMIPS) POOL16D minor opcodes.
+ */
+enum mm_16d_minor_op {
+ mm_addius5_func,
+ mm_addiusp_func,
+};
+
+/*
+ * (MIPS16e) opcodes.
+ */
+enum MIPS16e_ops {
+ MIPS16e_jal_op = 003,
+ MIPS16e_ld_op = 007,
+ MIPS16e_i8_op = 014,
+ MIPS16e_sd_op = 017,
+ MIPS16e_lb_op = 020,
+ MIPS16e_lh_op = 021,
+ MIPS16e_lwsp_op = 022,
+ MIPS16e_lw_op = 023,
+ MIPS16e_lbu_op = 024,
+ MIPS16e_lhu_op = 025,
+ MIPS16e_lwpc_op = 026,
+ MIPS16e_lwu_op = 027,
+ MIPS16e_sb_op = 030,
+ MIPS16e_sh_op = 031,
+ MIPS16e_swsp_op = 032,
+ MIPS16e_sw_op = 033,
+ MIPS16e_rr_op = 035,
+ MIPS16e_extend_op = 036,
+ MIPS16e_i64_op = 037,
+};
+
+enum MIPS16e_i64_func {
+ MIPS16e_ldsp_func,
+ MIPS16e_sdsp_func,
+ MIPS16e_sdrasp_func,
+ MIPS16e_dadjsp_func,
+ MIPS16e_ldpc_func,
+};
+
+enum MIPS16e_rr_func {
+ MIPS16e_jr_func,
+};
+
+enum MIPS6e_i8_func {
+ MIPS16e_swrasp_func = 02,
+};
+
+/*
+ * (microMIPS) NOP instruction.
+ */
+#define MM_NOP16 0x0c00
+
+struct j_format {
+ __BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */
+ __BITFIELD_FIELD(unsigned int target : 26,
+ ;))
+};
+
+struct i_format { /* signed immediate format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(signed int simmediate : 16,
+ ;))))
+};
+
+struct u_format { /* unsigned immediate format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int uimmediate : 16,
+ ;))))
+};
+
+struct c_format { /* Cache (>= R6000) format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int c_op : 3,
+ __BITFIELD_FIELD(unsigned int cache : 2,
+ __BITFIELD_FIELD(unsigned int simmediate : 16,
+ ;)))))
+};
+
+struct r_format { /* Register format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int re : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct c0r_format { /* C0 register format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int z: 8,
+ __BITFIELD_FIELD(unsigned int sel : 3,
+ ;))))))
+};
+
+struct mfmc0_format { /* MFMC0 register format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int re : 5,
+ __BITFIELD_FIELD(unsigned int sc : 1,
+ __BITFIELD_FIELD(unsigned int : 2,
+ __BITFIELD_FIELD(unsigned int sel : 3,
+ ;))))))))
+};
+
+struct co_format { /* C0 CO format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int co : 1,
+ __BITFIELD_FIELD(unsigned int code : 19,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))
+};
+
+struct p_format { /* Performance counter format (R10000) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int re : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct f_format { /* FPU register format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int : 1,
+ __BITFIELD_FIELD(unsigned int fmt : 4,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int re : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;)))))))
+};
+
+struct ma_format { /* FPU multiply and add format (MIPS IV) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int fr : 5,
+ __BITFIELD_FIELD(unsigned int ft : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int func : 4,
+ __BITFIELD_FIELD(unsigned int fmt : 2,
+ ;)))))))
+};
+
+struct b_format { /* BREAK and SYSCALL */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int code : 20,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;)))
+};
+
+struct ps_format { /* MIPS-3D / paired single format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int ft : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct v_format { /* MDMX vector format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int sel : 4,
+ __BITFIELD_FIELD(unsigned int fmt : 1,
+ __BITFIELD_FIELD(unsigned int vt : 5,
+ __BITFIELD_FIELD(unsigned int vs : 5,
+ __BITFIELD_FIELD(unsigned int vd : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;)))))))
+};
+
+struct msa_mi10_format { /* MSA MI10 */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(signed int s10 : 10,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int wd : 5,
+ __BITFIELD_FIELD(unsigned int func : 4,
+ __BITFIELD_FIELD(unsigned int df : 2,
+ ;))))))
+};
+
+struct dsp_format { /* SPEC3 DSP format instructions */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int base : 5,
+ __BITFIELD_FIELD(unsigned int index : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int op : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct spec3_format { /* SPEC3 */
+ __BITFIELD_FIELD(unsigned int opcode:6,
+ __BITFIELD_FIELD(unsigned int rs:5,
+ __BITFIELD_FIELD(unsigned int rt:5,
+ __BITFIELD_FIELD(signed int simmediate:9,
+ __BITFIELD_FIELD(unsigned int func:7,
+ ;)))))
+};
+
+/*
+ * microMIPS instruction formats (32-bit length)
+ *
+ * NOTE:
+ * Parenthesis denote whether the format is a microMIPS instruction or
+ * if it is MIPS32 instruction re-encoded for use in the microMIPS ASE.
+ */
+struct fb_format { /* FPU branch format (MIPS32) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int bc : 5,
+ __BITFIELD_FIELD(unsigned int cc : 3,
+ __BITFIELD_FIELD(unsigned int flag : 2,
+ __BITFIELD_FIELD(signed int simmediate : 16,
+ ;)))))
+};
+
+struct fp0_format { /* FPU multiply and add format (MIPS32) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int fmt : 5,
+ __BITFIELD_FIELD(unsigned int ft : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct mm_fp0_format { /* FPU multiply and add format (microMIPS) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int ft : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int fmt : 3,
+ __BITFIELD_FIELD(unsigned int op : 2,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;)))))))
+};
+
+struct fp1_format { /* FPU mfc1 and cfc1 format (MIPS32) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int op : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct mm_fp1_format { /* FPU mfc1 and cfc1 format (microMIPS) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fmt : 2,
+ __BITFIELD_FIELD(unsigned int op : 8,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct mm_fp2_format { /* FPU movt and movf format (microMIPS) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int cc : 3,
+ __BITFIELD_FIELD(unsigned int zero : 2,
+ __BITFIELD_FIELD(unsigned int fmt : 2,
+ __BITFIELD_FIELD(unsigned int op : 3,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))))
+};
+
+struct mm_fp3_format { /* FPU abs and neg format (microMIPS) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fmt : 3,
+ __BITFIELD_FIELD(unsigned int op : 7,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct mm_fp4_format { /* FPU c.cond format (microMIPS) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int cc : 3,
+ __BITFIELD_FIELD(unsigned int fmt : 3,
+ __BITFIELD_FIELD(unsigned int cond : 4,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;)))))))
+};
+
+struct mm_fp5_format { /* FPU lwxc1 and swxc1 format (microMIPS) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int index : 5,
+ __BITFIELD_FIELD(unsigned int base : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int op : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct fp6_format { /* FPU madd and msub format (MIPS IV) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int fr : 5,
+ __BITFIELD_FIELD(unsigned int ft : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct mm_fp6_format { /* FPU madd and msub format (microMIPS) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int ft : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int fr : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct mm_i_format { /* Immediate format (microMIPS) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(signed int simmediate : 16,
+ ;))))
+};
+
+struct mm_m_format { /* Multi-word load/store format (microMIPS) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int base : 5,
+ __BITFIELD_FIELD(unsigned int func : 4,
+ __BITFIELD_FIELD(signed int simmediate : 12,
+ ;)))))
+};
+
+struct mm_x_format { /* Scaled indexed load format (microMIPS) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int index : 5,
+ __BITFIELD_FIELD(unsigned int base : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int func : 11,
+ ;)))))
+};
+
+struct mm_a_format { /* ADDIUPC format (microMIPS) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 3,
+ __BITFIELD_FIELD(signed int simmediate : 23,
+ ;)))
+};
+
+/*
+ * microMIPS instruction formats (16-bit length)
+ */
+struct mm_b0_format { /* Unconditional branch format (microMIPS) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(signed int simmediate : 10,
+ __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ ;)))
+};
+
+struct mm_b1_format { /* Conditional branch format (microMIPS) */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 3,
+ __BITFIELD_FIELD(signed int simmediate : 7,
+ __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ ;))))
+};
+
+struct mm16_m_format { /* Multi-word load/store format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int func : 4,
+ __BITFIELD_FIELD(unsigned int rlist : 2,
+ __BITFIELD_FIELD(unsigned int imm : 4,
+ __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ ;)))))
+};
+
+struct mm16_rb_format { /* Signed immediate format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rt : 3,
+ __BITFIELD_FIELD(unsigned int base : 3,
+ __BITFIELD_FIELD(signed int simmediate : 4,
+ __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ ;)))))
+};
+
+struct mm16_r3_format { /* Load from global pointer format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rt : 3,
+ __BITFIELD_FIELD(signed int simmediate : 7,
+ __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ ;))))
+};
+
+struct mm16_r5_format { /* Load/store from stack pointer format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int imm : 5,
+ __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ ;))))
+};
+
+/*
+ * Loongson-3 overridden COP2 instruction formats (32-bit length)
+ */
+struct loongson3_lswc2_format { /* Loongson-3 overridden lwc2/swc2 Load/Store format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int base : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int fr : 1,
+ __BITFIELD_FIELD(unsigned int offset : 9,
+ __BITFIELD_FIELD(unsigned int ls : 1,
+ __BITFIELD_FIELD(unsigned int rq : 5,
+ ;)))))))
+};
+
+struct loongson3_lsdc2_format { /* Loongson-3 overridden ldc2/sdc2 Load/Store format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int base : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int index : 5,
+ __BITFIELD_FIELD(unsigned int offset : 8,
+ __BITFIELD_FIELD(unsigned int opcode1 : 3,
+ ;))))))
+};
+
+struct loongson3_lscsr_format { /* Loongson-3 CPUCFG&CSR read/write format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int fr : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+/*
+ * MIPS16e instruction formats (16-bit length)
+ */
+struct m16e_rr {
+ __BITFIELD_FIELD(unsigned int opcode : 5,
+ __BITFIELD_FIELD(unsigned int rx : 3,
+ __BITFIELD_FIELD(unsigned int nd : 1,
+ __BITFIELD_FIELD(unsigned int l : 1,
+ __BITFIELD_FIELD(unsigned int ra : 1,
+ __BITFIELD_FIELD(unsigned int func : 5,
+ ;))))))
+};
+
+struct m16e_jal {
+ __BITFIELD_FIELD(unsigned int opcode : 5,
+ __BITFIELD_FIELD(unsigned int x : 1,
+ __BITFIELD_FIELD(unsigned int imm20_16 : 5,
+ __BITFIELD_FIELD(signed int imm25_21 : 5,
+ ;))))
+};
+
+struct m16e_i64 {
+ __BITFIELD_FIELD(unsigned int opcode : 5,
+ __BITFIELD_FIELD(unsigned int func : 3,
+ __BITFIELD_FIELD(unsigned int imm : 8,
+ ;)))
+};
+
+struct m16e_ri64 {
+ __BITFIELD_FIELD(unsigned int opcode : 5,
+ __BITFIELD_FIELD(unsigned int func : 3,
+ __BITFIELD_FIELD(unsigned int ry : 3,
+ __BITFIELD_FIELD(unsigned int imm : 5,
+ ;))))
+};
+
+struct m16e_ri {
+ __BITFIELD_FIELD(unsigned int opcode : 5,
+ __BITFIELD_FIELD(unsigned int rx : 3,
+ __BITFIELD_FIELD(unsigned int imm : 8,
+ ;)))
+};
+
+struct m16e_rri {
+ __BITFIELD_FIELD(unsigned int opcode : 5,
+ __BITFIELD_FIELD(unsigned int rx : 3,
+ __BITFIELD_FIELD(unsigned int ry : 3,
+ __BITFIELD_FIELD(unsigned int imm : 5,
+ ;))))
+};
+
+struct m16e_i8 {
+ __BITFIELD_FIELD(unsigned int opcode : 5,
+ __BITFIELD_FIELD(unsigned int func : 3,
+ __BITFIELD_FIELD(unsigned int imm : 8,
+ ;)))
+};
+
+union mips_instruction {
+ unsigned int word;
+ unsigned short halfword[2];
+ unsigned char byte[4];
+ struct j_format j_format;
+ struct i_format i_format;
+ struct u_format u_format;
+ struct c_format c_format;
+ struct r_format r_format;
+ struct c0r_format c0r_format;
+ struct mfmc0_format mfmc0_format;
+ struct co_format co_format;
+ struct p_format p_format;
+ struct f_format f_format;
+ struct ma_format ma_format;
+ struct msa_mi10_format msa_mi10_format;
+ struct b_format b_format;
+ struct ps_format ps_format;
+ struct v_format v_format;
+ struct dsp_format dsp_format;
+ struct spec3_format spec3_format;
+ struct fb_format fb_format;
+ struct fp0_format fp0_format;
+ struct mm_fp0_format mm_fp0_format;
+ struct fp1_format fp1_format;
+ struct mm_fp1_format mm_fp1_format;
+ struct mm_fp2_format mm_fp2_format;
+ struct mm_fp3_format mm_fp3_format;
+ struct mm_fp4_format mm_fp4_format;
+ struct mm_fp5_format mm_fp5_format;
+ struct fp6_format fp6_format;
+ struct mm_fp6_format mm_fp6_format;
+ struct mm_i_format mm_i_format;
+ struct mm_m_format mm_m_format;
+ struct mm_x_format mm_x_format;
+ struct mm_a_format mm_a_format;
+ struct mm_b0_format mm_b0_format;
+ struct mm_b1_format mm_b1_format;
+ struct mm16_m_format mm16_m_format ;
+ struct mm16_rb_format mm16_rb_format;
+ struct mm16_r3_format mm16_r3_format;
+ struct mm16_r5_format mm16_r5_format;
+ struct loongson3_lswc2_format loongson3_lswc2_format;
+ struct loongson3_lsdc2_format loongson3_lsdc2_format;
+ struct loongson3_lscsr_format loongson3_lscsr_format;
+};
+
+union mips16e_instruction {
+ unsigned int full : 16;
+ struct m16e_rr rr;
+ struct m16e_jal jal;
+ struct m16e_i64 i64;
+ struct m16e_ri64 ri64;
+ struct m16e_ri ri;
+ struct m16e_rri rri;
+ struct m16e_i8 i8;
+};
+
+#endif /* _UAPI_ASM_INST_H */
diff --git a/arch/mips/include/uapi/asm/ioctl.h b/arch/mips/include/uapi/asm/ioctl.h
new file mode 100644
index 000000000..1050a6ea2
--- /dev/null
+++ b/arch/mips/include/uapi/asm/ioctl.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 96, 99, 2001 Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) 2009 Wind River Systems
+ * Written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __ASM_IOCTL_H
+#define __ASM_IOCTL_H
+
+#define _IOC_SIZEBITS 13
+#define _IOC_DIRBITS 3
+
+/*
+ * Direction bits _IOC_NONE could be 0, but OSF/1 gives it a bit.
+ * And this turns out useful to catch old ioctl numbers in header
+ * files for us.
+ */
+#define _IOC_NONE 1U
+#define _IOC_READ 2U
+#define _IOC_WRITE 4U
+
+#include <asm-generic/ioctl.h>
+
+#endif /* __ASM_IOCTL_H */
diff --git a/arch/mips/include/uapi/asm/ioctls.h b/arch/mips/include/uapi/asm/ioctls.h
new file mode 100644
index 000000000..16aa8a766
--- /dev/null
+++ b/arch/mips/include/uapi/asm/ioctls.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1996, 2001 Ralf Baechle
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+#ifndef __ASM_IOCTLS_H
+#define __ASM_IOCTLS_H
+
+#include <asm/ioctl.h>
+
+#define TCGETA 0x5401
+#define TCSETA 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
+#define TCSETAW 0x5403
+#define TCSETAF 0x5404
+
+#define TCSBRK 0x5405
+#define TCXONC 0x5406
+#define TCFLSH 0x5407
+
+#define TCGETS 0x540d
+#define TCSETS 0x540e
+#define TCSETSW 0x540f
+#define TCSETSF 0x5410
+
+#define TIOCEXCL 0x740d /* set exclusive use of tty */
+#define TIOCNXCL 0x740e /* reset exclusive use of tty */
+#define TIOCOUTQ 0x7472 /* output queue size */
+#define TIOCSTI 0x5472 /* simulate terminal input */
+#define TIOCMGET 0x741d /* get all modem bits */
+#define TIOCMBIS 0x741b /* bis modem bits */
+#define TIOCMBIC 0x741c /* bic modem bits */
+#define TIOCMSET 0x741a /* set all modem bits */
+#define TIOCPKT 0x5470 /* pty: set/clear packet mode */
+#define TIOCPKT_DATA 0x00 /* data packet */
+#define TIOCPKT_FLUSHREAD 0x01 /* flush packet */
+#define TIOCPKT_FLUSHWRITE 0x02 /* flush packet */
+#define TIOCPKT_STOP 0x04 /* stop output */
+#define TIOCPKT_START 0x08 /* start output */
+#define TIOCPKT_NOSTOP 0x10 /* no more ^S, ^Q */
+#define TIOCPKT_DOSTOP 0x20 /* now do ^S ^Q */
+#define TIOCPKT_IOCTL 0x40 /* state change of pty driver */
+#define TIOCSWINSZ _IOW('t', 103, struct winsize) /* set window size */
+#define TIOCGWINSZ _IOR('t', 104, struct winsize) /* get window size */
+#define TIOCNOTTY 0x5471 /* void tty association */
+#define TIOCSETD 0x7401
+#define TIOCGETD 0x7400
+
+#define FIOCLEX 0x6601
+#define FIONCLEX 0x6602
+#define FIOASYNC 0x667d
+#define FIONBIO 0x667e
+#define FIOQSIZE 0x667f
+
+#define TIOCGLTC 0x7474 /* get special local chars */
+#define TIOCSLTC 0x7475 /* set special local chars */
+#define TIOCSPGRP _IOW('t', 118, int) /* set pgrp of tty */
+#define TIOCGPGRP _IOR('t', 119, int) /* get pgrp of tty */
+#define TIOCCONS _IOW('t', 120, int) /* become virtual console */
+
+#define FIONREAD 0x467f
+#define TIOCINQ FIONREAD
+
+#define TIOCGETP 0x7408
+#define TIOCSETP 0x7409
+#define TIOCSETN 0x740a /* TIOCSETP wo flush */
+
+/* #define TIOCSETA _IOW('t', 20, struct termios) set termios struct */
+/* #define TIOCSETAW _IOW('t', 21, struct termios) drain output, set */
+/* #define TIOCSETAF _IOW('t', 22, struct termios) drn out, fls in, set */
+/* #define TIOCGETD _IOR('t', 26, int) get line discipline */
+/* #define TIOCSETD _IOW('t', 27, int) set line discipline */
+ /* 127-124 compat */
+
+#define TIOCSBRK 0x5427 /* BSD compatibility */
+#define TIOCCBRK 0x5428 /* BSD compatibility */
+#define TIOCGSID 0x7416 /* Return the session ID of FD */
+#define TCGETS2 _IOR('T', 0x2A, struct termios2)
+#define TCSETS2 _IOW('T', 0x2B, struct termios2)
+#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
+#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
+#define TIOCGRS485 _IOR('T', 0x2E, struct serial_rs485)
+#define TIOCSRS485 _IOWR('T', 0x2F, struct serial_rs485)
+#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
+#define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */
+#define TIOCSIG _IOW('T', 0x36, int) /* Generate signal on Pty slave */
+#define TIOCVHANGUP 0x5437
+#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
+#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
+#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
+#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
+#define TIOCGISO7816 _IOR('T', 0x42, struct serial_iso7816)
+#define TIOCSISO7816 _IOWR('T', 0x43, struct serial_iso7816)
+
+/* I hope the range from 0x5480 on is free ... */
+#define TIOCSCTTY 0x5480 /* become controlling tty */
+#define TIOCGSOFTCAR 0x5481
+#define TIOCSSOFTCAR 0x5482
+#define TIOCLINUX 0x5483
+#define TIOCGSERIAL 0x5484
+#define TIOCSSERIAL 0x5485
+#define TCSBRKP 0x5486 /* Needed for POSIX tcsendbreak() */
+#define TIOCSERCONFIG 0x5488
+#define TIOCSERGWILD 0x5489
+#define TIOCSERSWILD 0x548a
+#define TIOCGLCKTRMIOS 0x548b
+#define TIOCSLCKTRMIOS 0x548c
+#define TIOCSERGSTRUCT 0x548d /* For debugging only */
+#define TIOCSERGETLSR 0x548e /* Get line status register */
+#define TIOCSERGETMULTI 0x548f /* Get multiport config */
+#define TIOCSERSETMULTI 0x5490 /* Set multiport config */
+#define TIOCMIWAIT 0x5491 /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x5492 /* read serial port inline interrupt counts */
+
+#endif /* __ASM_IOCTLS_H */
diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h
new file mode 100644
index 000000000..edcf717c4
--- /dev/null
+++ b/arch/mips/include/uapi/asm/kvm.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Cavium, Inc.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#ifndef __LINUX_KVM_MIPS_H
+#define __LINUX_KVM_MIPS_H
+
+#include <linux/types.h>
+
+/*
+ * KVM MIPS specific structures and definitions.
+ *
+ * Some parts derived from the x86 version of this file.
+ */
+
+#define __KVM_HAVE_READONLY_MEM
+
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
+/*
+ * for KVM_GET_REGS and KVM_SET_REGS
+ *
+ * If Config[AT] is zero (32-bit CPU), the register contents are
+ * stored in the lower 32-bits of the struct kvm_regs fields and sign
+ * extended to 64-bits.
+ */
+struct kvm_regs {
+ /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
+ __u64 gpr[32];
+ __u64 hi;
+ __u64 lo;
+ __u64 pc;
+};
+
+/*
+ * for KVM_GET_FPU and KVM_SET_FPU
+ */
+struct kvm_fpu {
+};
+
+
+/*
+ * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
+ * registers. The id field is broken down as follows:
+ *
+ * bits[63..52] - As per linux/kvm.h
+ * bits[51..32] - Must be zero.
+ * bits[31..16] - Register set.
+ *
+ * Register set = 0: GP registers from kvm_regs (see definitions below).
+ *
+ * Register set = 1: CP0 registers.
+ * bits[15..8] - COP0 register set.
+ *
+ * COP0 register set = 0: Main CP0 registers.
+ * bits[7..3] - Register 'rd' index.
+ * bits[2..0] - Register 'sel' index.
+ *
+ * COP0 register set = 1: MAARs.
+ * bits[7..0] - MAAR index.
+ *
+ * Register set = 2: KVM specific registers (see definitions below).
+ *
+ * Register set = 3: FPU / MSA registers (see definitions below).
+ *
+ * Other sets registers may be added in the future. Each set would
+ * have its own identifier in bits[31..16].
+ */
+
+#define KVM_REG_MIPS_GP (KVM_REG_MIPS | 0x0000000000000000ULL)
+#define KVM_REG_MIPS_CP0 (KVM_REG_MIPS | 0x0000000000010000ULL)
+#define KVM_REG_MIPS_KVM (KVM_REG_MIPS | 0x0000000000020000ULL)
+#define KVM_REG_MIPS_FPU (KVM_REG_MIPS | 0x0000000000030000ULL)
+
+
+/*
+ * KVM_REG_MIPS_GP - General purpose registers from kvm_regs.
+ */
+
+#define KVM_REG_MIPS_R0 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 0)
+#define KVM_REG_MIPS_R1 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 1)
+#define KVM_REG_MIPS_R2 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 2)
+#define KVM_REG_MIPS_R3 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 3)
+#define KVM_REG_MIPS_R4 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 4)
+#define KVM_REG_MIPS_R5 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 5)
+#define KVM_REG_MIPS_R6 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 6)
+#define KVM_REG_MIPS_R7 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 7)
+#define KVM_REG_MIPS_R8 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 8)
+#define KVM_REG_MIPS_R9 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 9)
+#define KVM_REG_MIPS_R10 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 10)
+#define KVM_REG_MIPS_R11 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 11)
+#define KVM_REG_MIPS_R12 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 12)
+#define KVM_REG_MIPS_R13 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 13)
+#define KVM_REG_MIPS_R14 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 14)
+#define KVM_REG_MIPS_R15 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 15)
+#define KVM_REG_MIPS_R16 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 16)
+#define KVM_REG_MIPS_R17 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 17)
+#define KVM_REG_MIPS_R18 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 18)
+#define KVM_REG_MIPS_R19 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 19)
+#define KVM_REG_MIPS_R20 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 20)
+#define KVM_REG_MIPS_R21 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 21)
+#define KVM_REG_MIPS_R22 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 22)
+#define KVM_REG_MIPS_R23 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 23)
+#define KVM_REG_MIPS_R24 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 24)
+#define KVM_REG_MIPS_R25 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 25)
+#define KVM_REG_MIPS_R26 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 26)
+#define KVM_REG_MIPS_R27 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 27)
+#define KVM_REG_MIPS_R28 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 28)
+#define KVM_REG_MIPS_R29 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 29)
+#define KVM_REG_MIPS_R30 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 30)
+#define KVM_REG_MIPS_R31 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 31)
+
+#define KVM_REG_MIPS_HI (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 32)
+#define KVM_REG_MIPS_LO (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 33)
+#define KVM_REG_MIPS_PC (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34)
+
+
+/*
+ * KVM_REG_MIPS_CP0 - Coprocessor 0 registers.
+ */
+
+#define KVM_REG_MIPS_MAAR (KVM_REG_MIPS_CP0 | (1 << 8))
+#define KVM_REG_MIPS_CP0_MAAR(n) (KVM_REG_MIPS_MAAR | \
+ KVM_REG_SIZE_U64 | (n))
+
+
+/*
+ * KVM_REG_MIPS_KVM - KVM specific control registers.
+ */
+
+/*
+ * CP0_Count control
+ * DC: Set 0: Master disable CP0_Count and set COUNT_RESUME to now
+ * Set 1: Master re-enable CP0_Count with unchanged bias, handling timer
+ * interrupts since COUNT_RESUME
+ * This can be used to freeze the timer to get a consistent snapshot of
+ * the CP0_Count and timer interrupt pending state, while also resuming
+ * safely without losing time or guest timer interrupts.
+ * Other: Reserved, do not change.
+ */
+#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 0)
+#define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001
+
+/*
+ * CP0_Count resume monotonic nanoseconds
+ * The monotonic nanosecond time of the last set of COUNT_CTL.DC (master
+ * disable). Any reads and writes of Count related registers while
+ * COUNT_CTL.DC=1 will appear to occur at this time. When COUNT_CTL.DC is
+ * cleared again (master enable) any timer interrupts since this time will be
+ * emulated.
+ * Modifications to times in the future are rejected.
+ */
+#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 1)
+/*
+ * CP0_Count rate in Hz
+ * Specifies the rate of the CP0_Count timer in Hz. Modifications occur without
+ * discontinuities in CP0_Count.
+ */
+#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 2)
+
+
+/*
+ * KVM_REG_MIPS_FPU - Floating Point and MIPS SIMD Architecture (MSA) registers.
+ *
+ * bits[15..8] - Register subset (see definitions below).
+ * bits[7..5] - Must be zero.
+ * bits[4..0] - Register number within register subset.
+ */
+
+#define KVM_REG_MIPS_FPR (KVM_REG_MIPS_FPU | 0x0000000000000000ULL)
+#define KVM_REG_MIPS_FCR (KVM_REG_MIPS_FPU | 0x0000000000000100ULL)
+#define KVM_REG_MIPS_MSACR (KVM_REG_MIPS_FPU | 0x0000000000000200ULL)
+
+/*
+ * KVM_REG_MIPS_FPR - Floating point / Vector registers.
+ */
+#define KVM_REG_MIPS_FPR_32(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U32 | (n))
+#define KVM_REG_MIPS_FPR_64(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U64 | (n))
+#define KVM_REG_MIPS_VEC_128(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U128 | (n))
+
+/*
+ * KVM_REG_MIPS_FCR - Floating point control registers.
+ */
+#define KVM_REG_MIPS_FCR_IR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 0)
+#define KVM_REG_MIPS_FCR_CSR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 31)
+
+/*
+ * KVM_REG_MIPS_MSACR - MIPS SIMD Architecture (MSA) control registers.
+ */
+#define KVM_REG_MIPS_MSA_IR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 0)
+#define KVM_REG_MIPS_MSA_CSR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 1)
+
+
+/*
+ * KVM MIPS specific structures and definitions
+ *
+ */
+struct kvm_debug_exit_arch {
+ __u64 epc;
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+/* dummy definition */
+struct kvm_sregs {
+};
+
+struct kvm_mips_interrupt {
+ /* in */
+ __u32 cpu;
+ __u32 irq;
+};
+
+#endif /* __LINUX_KVM_MIPS_H */
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h
new file mode 100644
index 000000000..57dc2ac4f
--- /dev/null
+++ b/arch/mips/include/uapi/asm/mman.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1999, 2002 by Ralf Baechle
+ */
+#ifndef _ASM_MMAN_H
+#define _ASM_MMAN_H
+
+/*
+ * Protections are chosen from these bits, OR'd together. The
+ * implementation does not necessarily support PROT_EXEC or PROT_WRITE
+ * without PROT_READ. The only guarantees are that no writing will be
+ * allowed without PROT_WRITE and no access will be allowed for PROT_NONE.
+ */
+#define PROT_NONE 0x00 /* page can not be accessed */
+#define PROT_READ 0x01 /* page can be read */
+#define PROT_WRITE 0x02 /* page can be written */
+#define PROT_EXEC 0x04 /* page can be executed */
+/* 0x08 reserved for PROT_EXEC_NOFLUSH */
+#define PROT_SEM 0x10 /* page may be used for atomic ops */
+#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
+#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
+
+/*
+ * Flags for mmap
+ */
+/* 0x01 - 0x03 are defined in linux/mman.h */
+#define MAP_TYPE 0x00f /* Mask for type of mapping */
+#define MAP_FIXED 0x010 /* Interpret addr exactly */
+
+/* not used by linux, but here to make sure we don't clash with ABI defines */
+#define MAP_RENAME 0x020 /* Assign page to file */
+#define MAP_AUTOGROW 0x040 /* File may grow by writing */
+#define MAP_LOCAL 0x080 /* Copy on fork/sproc */
+#define MAP_AUTORSRV 0x100 /* Logical swap reserved on demand */
+
+/* These are linux-specific */
+#define MAP_NORESERVE 0x0400 /* don't check for reservations */
+#define MAP_ANONYMOUS 0x0800 /* don't use a file */
+#define MAP_GROWSDOWN 0x1000 /* stack-like segment */
+#define MAP_DENYWRITE 0x2000 /* ETXTBSY */
+#define MAP_EXECUTABLE 0x4000 /* mark it as an executable */
+#define MAP_LOCKED 0x8000 /* pages are locked */
+#define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
+#define MAP_NONBLOCK 0x20000 /* do not block on IO */
+#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */
+#define MAP_HUGETLB 0x80000 /* create a huge page mapping */
+#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */
+
+/*
+ * Flags for msync
+ */
+#define MS_ASYNC 0x0001 /* sync memory asynchronously */
+#define MS_INVALIDATE 0x0002 /* invalidate mappings & caches */
+#define MS_SYNC 0x0004 /* synchronous memory sync */
+
+/*
+ * Flags for mlockall
+ */
+#define MCL_CURRENT 1 /* lock all current mappings */
+#define MCL_FUTURE 2 /* lock all future mappings */
+#define MCL_ONFAULT 4 /* lock all pages that are faulted in */
+
+/*
+ * Flags for mlock
+ */
+#define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */
+
+#define MADV_NORMAL 0 /* no further special treatment */
+#define MADV_RANDOM 1 /* expect random page references */
+#define MADV_SEQUENTIAL 2 /* expect sequential page references */
+#define MADV_WILLNEED 3 /* will need these pages */
+#define MADV_DONTNEED 4 /* don't need these pages */
+
+/* common parameters: try to keep these consistent across architectures */
+#define MADV_FREE 8 /* free pages only if memory pressure */
+#define MADV_REMOVE 9 /* remove these pages & resources */
+#define MADV_DONTFORK 10 /* don't inherit across fork */
+#define MADV_DOFORK 11 /* do inherit across fork */
+
+#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
+#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
+#define MADV_HWPOISON 100 /* poison a page for testing */
+
+#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */
+#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */
+
+#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
+ overrides the coredump filter bits */
+#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
+
+#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
+#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+
+#define MADV_COLD 20 /* deactivate these pages */
+#define MADV_PAGEOUT 21 /* reclaim these pages */
+
+/* compatibility flags */
+#define MAP_FILE 0
+
+#define PKEY_DISABLE_ACCESS 0x1
+#define PKEY_DISABLE_WRITE 0x2
+#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
+ PKEY_DISABLE_WRITE)
+
+#endif /* _ASM_MMAN_H */
diff --git a/arch/mips/include/uapi/asm/msgbuf.h b/arch/mips/include/uapi/asm/msgbuf.h
new file mode 100644
index 000000000..128af72f2
--- /dev/null
+++ b/arch/mips/include/uapi/asm/msgbuf.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_MSGBUF_H
+#define _ASM_MSGBUF_H
+
+#include <asm/ipcbuf.h>
+
+/*
+ * The msqid64_ds structure for the MIPS architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous unsigned long values
+ */
+
+#if defined(__mips64)
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ long msg_stime; /* last msgsnd time */
+ long msg_rtime; /* last msgrcv time */
+ long msg_ctime; /* last change time */
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+#elif defined (__MIPSEB__)
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ unsigned long msg_stime_high;
+ unsigned long msg_stime; /* last msgsnd time */
+ unsigned long msg_rtime_high;
+ unsigned long msg_rtime; /* last msgrcv time */
+ unsigned long msg_ctime_high;
+ unsigned long msg_ctime; /* last change time */
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+#elif defined (__MIPSEL__)
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ unsigned long msg_stime; /* last msgsnd time */
+ unsigned long msg_stime_high;
+ unsigned long msg_rtime; /* last msgrcv time */
+ unsigned long msg_rtime_high;
+ unsigned long msg_ctime; /* last change time */
+ unsigned long msg_ctime_high;
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+#else
+#warning no endianess set
+#endif
+
+#endif /* _ASM_MSGBUF_H */
diff --git a/arch/mips/include/uapi/asm/param.h b/arch/mips/include/uapi/asm/param.h
new file mode 100644
index 000000000..3f337ed66
--- /dev/null
+++ b/arch/mips/include/uapi/asm/param.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright 1994 - 2000, 2002 Ralf Baechle (ralf@gnu.org)
+ * Copyright 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_PARAM_H
+#define _ASM_PARAM_H
+
+#define EXEC_PAGESIZE 65536
+
+#include <asm-generic/param.h>
+
+#endif /* _ASM_PARAM_H */
diff --git a/arch/mips/include/uapi/asm/poll.h b/arch/mips/include/uapi/asm/poll.h
new file mode 100644
index 000000000..ad289d7b7
--- /dev/null
+++ b/arch/mips/include/uapi/asm/poll.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_POLL_H
+#define __ASM_POLL_H
+
+#define POLLWRNORM POLLOUT
+#define POLLWRBAND 0x0100
+
+#include <asm-generic/poll.h>
+
+#endif /* __ASM_POLL_H */
diff --git a/arch/mips/include/uapi/asm/posix_types.h b/arch/mips/include/uapi/asm/posix_types.h
new file mode 100644
index 000000000..f0ccb5b90
--- /dev/null
+++ b/arch/mips/include/uapi/asm/posix_types.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 97, 98, 99, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_POSIX_TYPES_H
+#define _ASM_POSIX_TYPES_H
+
+#include <asm/sgidefs.h>
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef long __kernel_daddr_t;
+#define __kernel_daddr_t __kernel_daddr_t
+
+#include <asm-generic/posix_types.h>
+
+#endif /* _ASM_POSIX_TYPES_H */
diff --git a/arch/mips/include/uapi/asm/ptrace.h b/arch/mips/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000..f3c025445
--- /dev/null
+++ b/arch/mips/include/uapi/asm/ptrace.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _UAPI_ASM_PTRACE_H
+#define _UAPI_ASM_PTRACE_H
+
+#include <linux/types.h>
+
+/* 0 - 31 are integer registers, 32 - 63 are fp registers. */
+#define FPR_BASE 32
+#define PC 64
+#define CAUSE 65
+#define BADVADDR 66
+#define MMHI 67
+#define MMLO 68
+#define FPC_CSR 69
+#define FPC_EIR 70
+#define DSP_BASE 71 /* 3 more hi / lo register pairs */
+#define DSP_CONTROL 77
+#define ACX 78
+
+/*
+ * This struct defines the registers as used by PTRACE_{GET,SET}REGS. The
+ * format is the same for both 32- and 64-bit processes. Registers for 32-bit
+ * processes are sign extended.
+ */
+#ifdef __KERNEL__
+struct user_pt_regs {
+#else
+struct pt_regs {
+#endif
+ /* Saved main processor registers. */
+ __u64 regs[32];
+
+ /* Saved special registers. */
+ __u64 lo;
+ __u64 hi;
+ __u64 cp0_epc;
+ __u64 cp0_badvaddr;
+ __u64 cp0_status;
+ __u64 cp0_cause;
+} __attribute__ ((aligned (8)));
+
+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+#define PTRACE_GETFPREGS 14
+#define PTRACE_SETFPREGS 15
+/* #define PTRACE_GETFPXREGS 18 */
+/* #define PTRACE_SETFPXREGS 19 */
+
+#define PTRACE_OLDSETOPTIONS 21
+
+#define PTRACE_GET_THREAD_AREA 25
+#define PTRACE_SET_THREAD_AREA 26
+
+/* Calls to trace a 64bit program from a 32bit program. */
+#define PTRACE_PEEKTEXT_3264 0xc0
+#define PTRACE_PEEKDATA_3264 0xc1
+#define PTRACE_POKETEXT_3264 0xc2
+#define PTRACE_POKEDATA_3264 0xc3
+#define PTRACE_GET_THREAD_AREA_3264 0xc4
+
+/* Read and write watchpoint registers. */
+enum pt_watch_style {
+ pt_watch_style_mips32,
+ pt_watch_style_mips64
+};
+struct mips32_watch_regs {
+ unsigned int watchlo[8];
+ /* Lower 16 bits of watchhi. */
+ unsigned short watchhi[8];
+ /* Valid mask and I R W bits.
+ * bit 0 -- 1 if W bit is usable.
+ * bit 1 -- 1 if R bit is usable.
+ * bit 2 -- 1 if I bit is usable.
+ * bits 3 - 11 -- Valid watchhi mask bits.
+ */
+ unsigned short watch_masks[8];
+ /* The number of valid watch register pairs. */
+ unsigned int num_valid;
+} __attribute__((aligned(8)));
+
+struct mips64_watch_regs {
+ unsigned long long watchlo[8];
+ unsigned short watchhi[8];
+ unsigned short watch_masks[8];
+ unsigned int num_valid;
+} __attribute__((aligned(8)));
+
+struct pt_watch_regs {
+ enum pt_watch_style style;
+ union {
+ struct mips32_watch_regs mips32;
+ struct mips64_watch_regs mips64;
+ };
+};
+
+#define PTRACE_GET_WATCH_REGS 0xd0
+#define PTRACE_SET_WATCH_REGS 0xd1
+
+
+#endif /* _UAPI_ASM_PTRACE_H */
diff --git a/arch/mips/include/uapi/asm/reg.h b/arch/mips/include/uapi/asm/reg.h
new file mode 100644
index 000000000..56d15cb81
--- /dev/null
+++ b/arch/mips/include/uapi/asm/reg.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Various register offset definitions for debuggers, core file
+ * examiners and whatnot.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1999 Ralf Baechle
+ * Copyright (C) 1995, 1999 Silicon Graphics
+ */
+#ifndef __UAPI_ASM_MIPS_REG_H
+#define __UAPI_ASM_MIPS_REG_H
+
+#define MIPS32_EF_R0 6
+#define MIPS32_EF_R1 7
+#define MIPS32_EF_R2 8
+#define MIPS32_EF_R3 9
+#define MIPS32_EF_R4 10
+#define MIPS32_EF_R5 11
+#define MIPS32_EF_R6 12
+#define MIPS32_EF_R7 13
+#define MIPS32_EF_R8 14
+#define MIPS32_EF_R9 15
+#define MIPS32_EF_R10 16
+#define MIPS32_EF_R11 17
+#define MIPS32_EF_R12 18
+#define MIPS32_EF_R13 19
+#define MIPS32_EF_R14 20
+#define MIPS32_EF_R15 21
+#define MIPS32_EF_R16 22
+#define MIPS32_EF_R17 23
+#define MIPS32_EF_R18 24
+#define MIPS32_EF_R19 25
+#define MIPS32_EF_R20 26
+#define MIPS32_EF_R21 27
+#define MIPS32_EF_R22 28
+#define MIPS32_EF_R23 29
+#define MIPS32_EF_R24 30
+#define MIPS32_EF_R25 31
+
+/*
+ * k0/k1 unsaved
+ */
+#define MIPS32_EF_R26 32
+#define MIPS32_EF_R27 33
+
+#define MIPS32_EF_R28 34
+#define MIPS32_EF_R29 35
+#define MIPS32_EF_R30 36
+#define MIPS32_EF_R31 37
+
+/*
+ * Saved special registers
+ */
+#define MIPS32_EF_LO 38
+#define MIPS32_EF_HI 39
+
+#define MIPS32_EF_CP0_EPC 40
+#define MIPS32_EF_CP0_BADVADDR 41
+#define MIPS32_EF_CP0_STATUS 42
+#define MIPS32_EF_CP0_CAUSE 43
+#define MIPS32_EF_UNUSED0 44
+
+#define MIPS32_EF_SIZE 180
+
+#define MIPS64_EF_R0 0
+#define MIPS64_EF_R1 1
+#define MIPS64_EF_R2 2
+#define MIPS64_EF_R3 3
+#define MIPS64_EF_R4 4
+#define MIPS64_EF_R5 5
+#define MIPS64_EF_R6 6
+#define MIPS64_EF_R7 7
+#define MIPS64_EF_R8 8
+#define MIPS64_EF_R9 9
+#define MIPS64_EF_R10 10
+#define MIPS64_EF_R11 11
+#define MIPS64_EF_R12 12
+#define MIPS64_EF_R13 13
+#define MIPS64_EF_R14 14
+#define MIPS64_EF_R15 15
+#define MIPS64_EF_R16 16
+#define MIPS64_EF_R17 17
+#define MIPS64_EF_R18 18
+#define MIPS64_EF_R19 19
+#define MIPS64_EF_R20 20
+#define MIPS64_EF_R21 21
+#define MIPS64_EF_R22 22
+#define MIPS64_EF_R23 23
+#define MIPS64_EF_R24 24
+#define MIPS64_EF_R25 25
+
+/*
+ * k0/k1 unsaved
+ */
+#define MIPS64_EF_R26 26
+#define MIPS64_EF_R27 27
+
+
+#define MIPS64_EF_R28 28
+#define MIPS64_EF_R29 29
+#define MIPS64_EF_R30 30
+#define MIPS64_EF_R31 31
+
+/*
+ * Saved special registers
+ */
+#define MIPS64_EF_LO 32
+#define MIPS64_EF_HI 33
+
+#define MIPS64_EF_CP0_EPC 34
+#define MIPS64_EF_CP0_BADVADDR 35
+#define MIPS64_EF_CP0_STATUS 36
+#define MIPS64_EF_CP0_CAUSE 37
+
+#define MIPS64_EF_SIZE 304 /* size in bytes */
+
+#if _MIPS_SIM == _MIPS_SIM_ABI32
+
+#define EF_R0 MIPS32_EF_R0
+#define EF_R1 MIPS32_EF_R1
+#define EF_R2 MIPS32_EF_R2
+#define EF_R3 MIPS32_EF_R3
+#define EF_R4 MIPS32_EF_R4
+#define EF_R5 MIPS32_EF_R5
+#define EF_R6 MIPS32_EF_R6
+#define EF_R7 MIPS32_EF_R7
+#define EF_R8 MIPS32_EF_R8
+#define EF_R9 MIPS32_EF_R9
+#define EF_R10 MIPS32_EF_R10
+#define EF_R11 MIPS32_EF_R11
+#define EF_R12 MIPS32_EF_R12
+#define EF_R13 MIPS32_EF_R13
+#define EF_R14 MIPS32_EF_R14
+#define EF_R15 MIPS32_EF_R15
+#define EF_R16 MIPS32_EF_R16
+#define EF_R17 MIPS32_EF_R17
+#define EF_R18 MIPS32_EF_R18
+#define EF_R19 MIPS32_EF_R19
+#define EF_R20 MIPS32_EF_R20
+#define EF_R21 MIPS32_EF_R21
+#define EF_R22 MIPS32_EF_R22
+#define EF_R23 MIPS32_EF_R23
+#define EF_R24 MIPS32_EF_R24
+#define EF_R25 MIPS32_EF_R25
+#define EF_R26 MIPS32_EF_R26
+#define EF_R27 MIPS32_EF_R27
+#define EF_R28 MIPS32_EF_R28
+#define EF_R29 MIPS32_EF_R29
+#define EF_R30 MIPS32_EF_R30
+#define EF_R31 MIPS32_EF_R31
+#define EF_LO MIPS32_EF_LO
+#define EF_HI MIPS32_EF_HI
+#define EF_CP0_EPC MIPS32_EF_CP0_EPC
+#define EF_CP0_BADVADDR MIPS32_EF_CP0_BADVADDR
+#define EF_CP0_STATUS MIPS32_EF_CP0_STATUS
+#define EF_CP0_CAUSE MIPS32_EF_CP0_CAUSE
+#define EF_UNUSED0 MIPS32_EF_UNUSED0
+#define EF_SIZE MIPS32_EF_SIZE
+
+#elif _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
+
+#define EF_R0 MIPS64_EF_R0
+#define EF_R1 MIPS64_EF_R1
+#define EF_R2 MIPS64_EF_R2
+#define EF_R3 MIPS64_EF_R3
+#define EF_R4 MIPS64_EF_R4
+#define EF_R5 MIPS64_EF_R5
+#define EF_R6 MIPS64_EF_R6
+#define EF_R7 MIPS64_EF_R7
+#define EF_R8 MIPS64_EF_R8
+#define EF_R9 MIPS64_EF_R9
+#define EF_R10 MIPS64_EF_R10
+#define EF_R11 MIPS64_EF_R11
+#define EF_R12 MIPS64_EF_R12
+#define EF_R13 MIPS64_EF_R13
+#define EF_R14 MIPS64_EF_R14
+#define EF_R15 MIPS64_EF_R15
+#define EF_R16 MIPS64_EF_R16
+#define EF_R17 MIPS64_EF_R17
+#define EF_R18 MIPS64_EF_R18
+#define EF_R19 MIPS64_EF_R19
+#define EF_R20 MIPS64_EF_R20
+#define EF_R21 MIPS64_EF_R21
+#define EF_R22 MIPS64_EF_R22
+#define EF_R23 MIPS64_EF_R23
+#define EF_R24 MIPS64_EF_R24
+#define EF_R25 MIPS64_EF_R25
+#define EF_R26 MIPS64_EF_R26
+#define EF_R27 MIPS64_EF_R27
+#define EF_R28 MIPS64_EF_R28
+#define EF_R29 MIPS64_EF_R29
+#define EF_R30 MIPS64_EF_R30
+#define EF_R31 MIPS64_EF_R31
+#define EF_LO MIPS64_EF_LO
+#define EF_HI MIPS64_EF_HI
+#define EF_CP0_EPC MIPS64_EF_CP0_EPC
+#define EF_CP0_BADVADDR MIPS64_EF_CP0_BADVADDR
+#define EF_CP0_STATUS MIPS64_EF_CP0_STATUS
+#define EF_CP0_CAUSE MIPS64_EF_CP0_CAUSE
+#define EF_SIZE MIPS64_EF_SIZE
+
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
+
+#endif /* __UAPI_ASM_MIPS_REG_H */
diff --git a/arch/mips/include/uapi/asm/resource.h b/arch/mips/include/uapi/asm/resource.h
new file mode 100644
index 000000000..372ff8f4b
--- /dev/null
+++ b/arch/mips/include/uapi/asm/resource.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 96, 98, 99, 2000 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_RESOURCE_H
+#define _ASM_RESOURCE_H
+
+
+/*
+ * These five resource limit IDs have a MIPS/Linux-specific ordering,
+ * the rest comes from the generic header:
+ */
+#define RLIMIT_NOFILE 5 /* max number of open files */
+#define RLIMIT_AS 6 /* address space limit */
+#define RLIMIT_RSS 7 /* max resident set size */
+#define RLIMIT_NPROC 8 /* max number of processes */
+#define RLIMIT_MEMLOCK 9 /* max locked-in-memory address space */
+
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway,
+ * but we keep the old value on MIPS32,
+ * for compatibility:
+ */
+#ifndef __mips64
+# define RLIM_INFINITY 0x7fffffffUL
+#endif
+
+#include <asm-generic/resource.h>
+
+#endif /* _ASM_RESOURCE_H */
diff --git a/arch/mips/include/uapi/asm/sembuf.h b/arch/mips/include/uapi/asm/sembuf.h
new file mode 100644
index 000000000..ba7fe0c89
--- /dev/null
+++ b/arch/mips/include/uapi/asm/sembuf.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_SEMBUF_H
+#define _ASM_SEMBUF_H
+
+#include <asm/ipcbuf.h>
+
+/*
+ * The semid64_ds structure for the MIPS architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for 2 miscellaneous 64-bit values on mips64,
+ * but used for the upper 32 bit of the time values on mips32.
+ */
+
+#ifdef __mips64
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+ long sem_otime; /* last semop time */
+ long sem_ctime; /* last change time */
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+#else
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+ unsigned long sem_otime; /* last semop time */
+ unsigned long sem_ctime; /* last change time */
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long sem_otime_high;
+ unsigned long sem_ctime_high;
+};
+#endif
+
+#endif /* _ASM_SEMBUF_H */
diff --git a/arch/mips/include/uapi/asm/setup.h b/arch/mips/include/uapi/asm/setup.h
new file mode 100644
index 000000000..7d48c433b
--- /dev/null
+++ b/arch/mips/include/uapi/asm/setup.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_MIPS_SETUP_H
+#define _UAPI_MIPS_SETUP_H
+
+#define COMMAND_LINE_SIZE 4096
+
+
+#endif /* _UAPI_MIPS_SETUP_H */
diff --git a/arch/mips/include/uapi/asm/sgidefs.h b/arch/mips/include/uapi/asm/sgidefs.h
new file mode 100644
index 000000000..69c3de90c
--- /dev/null
+++ b/arch/mips/include/uapi/asm/sgidefs.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1999, 2001 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+#ifndef __ASM_SGIDEFS_H
+#define __ASM_SGIDEFS_H
+
+/*
+ * Definitions for the ISA levels
+ *
+ * With the introduction of MIPS32 / MIPS64 instruction sets definitions
+ * MIPS ISAs are no longer subsets of each other. Therefore comparisons
+ * on these symbols except with == may result in unexpected results and
+ * are forbidden!
+ */
+#define _MIPS_ISA_MIPS1 1
+#define _MIPS_ISA_MIPS2 2
+#define _MIPS_ISA_MIPS3 3
+#define _MIPS_ISA_MIPS4 4
+#define _MIPS_ISA_MIPS5 5
+#define _MIPS_ISA_MIPS32 6
+#define _MIPS_ISA_MIPS64 7
+
+/*
+ * Subprogram calling convention
+ */
+#define _MIPS_SIM_ABI32 1
+#define _MIPS_SIM_NABI32 2
+#define _MIPS_SIM_ABI64 3
+
+#endif /* __ASM_SGIDEFS_H */
diff --git a/arch/mips/include/uapi/asm/shmbuf.h b/arch/mips/include/uapi/asm/shmbuf.h
new file mode 100644
index 000000000..680bb95b2
--- /dev/null
+++ b/arch/mips/include/uapi/asm/shmbuf.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_SHMBUF_H
+#define _ASM_SHMBUF_H
+
+/*
+ * The shmid64_ds structure for the MIPS architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * As MIPS was lacking proper padding after shm_?time, we use 48 bits
+ * of the padding at the end to store a few additional bits of the time.
+ * libc implementations need to take care to convert this into a proper
+ * data structure when moving to 64-bit time_t.
+ */
+
+#ifdef __mips64
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ long shm_atime; /* last attach time */
+ long shm_dtime; /* last detach time */
+ long shm_ctime; /* last change time */
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+#else
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ unsigned long shm_atime; /* last attach time */
+ unsigned long shm_dtime; /* last detach time */
+ unsigned long shm_ctime; /* last change time */
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned short shm_atime_high;
+ unsigned short shm_dtime_high;
+ unsigned short shm_ctime_high;
+ unsigned short __unused1;
+};
+#endif
+
+struct shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* _ASM_SHMBUF_H */
diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000..d0a540e88
--- /dev/null
+++ b/arch/mips/include/uapi/asm/sigcontext.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1997, 1999 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#ifndef _UAPI_ASM_SIGCONTEXT_H
+#define _UAPI_ASM_SIGCONTEXT_H
+
+#include <linux/types.h>
+#include <asm/sgidefs.h>
+
+/* scalar FP context was used */
+#define USED_FP (1 << 0)
+
+/* the value of Status.FR when context was saved */
+#define USED_FR1 (1 << 1)
+
+/* FR=1, but with odd singles in bits 63:32 of preceding even double */
+#define USED_HYBRID_FPRS (1 << 2)
+
+/* extended context was used, see struct extcontext for details */
+#define USED_EXTCONTEXT (1 << 3)
+
+#if _MIPS_SIM == _MIPS_SIM_ABI32
+
+/*
+ * Keep this struct definition in sync with the sigcontext fragment
+ * in arch/mips/kernel/asm-offsets.c
+ */
+struct sigcontext {
+ unsigned int sc_regmask; /* Unused */
+ unsigned int sc_status; /* Unused */
+ unsigned long long sc_pc;
+ unsigned long long sc_regs[32];
+ unsigned long long sc_fpregs[32];
+ unsigned int sc_acx; /* Was sc_ownedfp */
+ unsigned int sc_fpc_csr;
+ unsigned int sc_fpc_eir; /* Unused */
+ unsigned int sc_used_math;
+ unsigned int sc_dsp; /* dsp status, was sc_ssflags */
+ unsigned long long sc_mdhi;
+ unsigned long long sc_mdlo;
+ unsigned long sc_hi1; /* Was sc_cause */
+ unsigned long sc_lo1; /* Was sc_badvaddr */
+ unsigned long sc_hi2; /* Was sc_sigset[4] */
+ unsigned long sc_lo2;
+ unsigned long sc_hi3;
+ unsigned long sc_lo3;
+};
+
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
+
+#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
+
+#include <linux/posix_types.h>
+/*
+ * Keep this struct definition in sync with the sigcontext fragment
+ * in arch/mips/kernel/asm-offsets.c
+ *
+ * Warning: this structure illdefined with sc_badvaddr being just an unsigned
+ * int so it was changed to unsigned long in 2.6.0-test1. This may break
+ * binary compatibility - no prisoners.
+ * DSP ASE in 2.6.12-rc4. Turn sc_mdhi and sc_mdlo into an array of four
+ * entries, add sc_dsp and sc_reserved for padding. No prisoners.
+ */
+struct sigcontext {
+ __u64 sc_regs[32];
+ __u64 sc_fpregs[32];
+ __u64 sc_mdhi;
+ __u64 sc_hi1;
+ __u64 sc_hi2;
+ __u64 sc_hi3;
+ __u64 sc_mdlo;
+ __u64 sc_lo1;
+ __u64 sc_lo2;
+ __u64 sc_lo3;
+ __u64 sc_pc;
+ __u32 sc_fpc_csr;
+ __u32 sc_used_math;
+ __u32 sc_dsp;
+ __u32 sc_reserved;
+};
+
+
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
+
+#endif /* _UAPI_ASM_SIGCONTEXT_H */
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
new file mode 100644
index 000000000..c34c7eef0
--- /dev/null
+++ b/arch/mips/include/uapi/asm/siginfo.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998, 1999, 2001, 2003 Ralf Baechle
+ * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
+ */
+#ifndef _UAPI_ASM_SIGINFO_H
+#define _UAPI_ASM_SIGINFO_H
+
+
+#define __ARCH_SIGEV_PREAMBLE_SIZE (sizeof(long) + 2*sizeof(int))
+#undef __ARCH_SI_TRAPNO /* exception code needs to fill this ... */
+
+#define __ARCH_HAS_SWAPPED_SIGINFO
+
+#include <asm-generic/siginfo.h>
+
+/*
+ * si_code values
+ * Again these have been chosen to be IRIX compatible.
+ */
+#undef SI_ASYNCIO
+#undef SI_TIMER
+#undef SI_MESGQ
+#define SI_ASYNCIO -2 /* sent by AIO completion */
+#define SI_TIMER -3 /* sent by timer expiration */
+#define SI_MESGQ -4 /* sent by real time mesq state change */
+
+#endif /* _UAPI_ASM_SIGINFO_H */
diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h
new file mode 100644
index 000000000..53104b10a
--- /dev/null
+++ b/arch/mips/include/uapi/asm/signal.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#ifndef _UAPI_ASM_SIGNAL_H
+#define _UAPI_ASM_SIGNAL_H
+
+#include <linux/types.h>
+
+#define _NSIG 128
+#define _NSIG_BPW (sizeof(unsigned long) * 8)
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+
+#define SIGHUP 1 /* Hangup (POSIX). */
+#define SIGINT 2 /* Interrupt (ANSI). */
+#define SIGQUIT 3 /* Quit (POSIX). */
+#define SIGILL 4 /* Illegal instruction (ANSI). */
+#define SIGTRAP 5 /* Trace trap (POSIX). */
+#define SIGIOT 6 /* IOT trap (4.2 BSD). */
+#define SIGABRT SIGIOT /* Abort (ANSI). */
+#define SIGEMT 7
+#define SIGFPE 8 /* Floating-point exception (ANSI). */
+#define SIGKILL 9 /* Kill, unblockable (POSIX). */
+#define SIGBUS 10 /* BUS error (4.2 BSD). */
+#define SIGSEGV 11 /* Segmentation violation (ANSI). */
+#define SIGSYS 12
+#define SIGPIPE 13 /* Broken pipe (POSIX). */
+#define SIGALRM 14 /* Alarm clock (POSIX). */
+#define SIGTERM 15 /* Termination (ANSI). */
+#define SIGUSR1 16 /* User-defined signal 1 (POSIX). */
+#define SIGUSR2 17 /* User-defined signal 2 (POSIX). */
+#define SIGCHLD 18 /* Child status has changed (POSIX). */
+#define SIGCLD SIGCHLD /* Same as SIGCHLD (System V). */
+#define SIGPWR 19 /* Power failure restart (System V). */
+#define SIGWINCH 20 /* Window size change (4.3 BSD, Sun). */
+#define SIGURG 21 /* Urgent condition on socket (4.2 BSD). */
+#define SIGIO 22 /* I/O now possible (4.2 BSD). */
+#define SIGPOLL SIGIO /* Pollable event occurred (System V). */
+#define SIGSTOP 23 /* Stop, unblockable (POSIX). */
+#define SIGTSTP 24 /* Keyboard stop (POSIX). */
+#define SIGCONT 25 /* Continue (POSIX). */
+#define SIGTTIN 26 /* Background read from tty (POSIX). */
+#define SIGTTOU 27 /* Background write to tty (POSIX). */
+#define SIGVTALRM 28 /* Virtual alarm clock (4.2 BSD). */
+#define SIGPROF 29 /* Profiling alarm clock (4.2 BSD). */
+#define SIGXCPU 30 /* CPU limit exceeded (4.2 BSD). */
+#define SIGXFSZ 31 /* File size limit exceeded (4.2 BSD). */
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX _NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ *
+ * SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever
+ * supported its use and no libc was using it, so the entire sa-restorer
+ * functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48
+ * retaining only the SA_RESTORER definition as a reminder to avoid
+ * accidental reuse of the mask bit.
+ */
+#define SA_ONSTACK 0x08000000
+#define SA_RESETHAND 0x80000000
+#define SA_RESTART 0x10000000
+#define SA_SIGINFO 0x00000008
+#define SA_NODEFER 0x40000000
+#define SA_NOCLDWAIT 0x00010000
+#define SA_NOCLDSTOP 0x00000001
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
+
+#define SIG_BLOCK 1 /* for blocking signals */
+#define SIG_UNBLOCK 2 /* for unblocking signals */
+#define SIG_SETMASK 3 /* for setting the signal mask */
+
+#include <asm-generic/signal-defs.h>
+
+#ifndef __KERNEL__
+struct sigaction {
+ unsigned int sa_flags;
+ __sighandler_t sa_handler;
+ sigset_t sa_mask;
+};
+#endif
+
+/* IRIX compatible stack_t */
+typedef struct sigaltstack {
+ void __user *ss_sp;
+ size_t ss_size;
+ int ss_flags;
+} stack_t;
+
+
+#endif /* _UAPI_ASM_SIGNAL_H */
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
new file mode 100644
index 000000000..d0a9ed2ca
--- /dev/null
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1997, 1999, 2000, 2001 Ralf Baechle
+ * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
+ */
+#ifndef _UAPI_ASM_SOCKET_H
+#define _UAPI_ASM_SOCKET_H
+
+#include <linux/posix_types.h>
+#include <asm/sockios.h>
+
+/*
+ * For setsockopt(2)
+ *
+ * This defines are ABI conformant as far as Linux supports these ...
+ */
+#define SOL_SOCKET 0xffff
+
+#define SO_DEBUG 0x0001 /* Record debugging information. */
+#define SO_REUSEADDR 0x0004 /* Allow reuse of local addresses. */
+#define SO_KEEPALIVE 0x0008 /* Keep connections alive and send
+ SIGPIPE when they die. */
+#define SO_DONTROUTE 0x0010 /* Don't do local routing. */
+#define SO_BROADCAST 0x0020 /* Allow transmission of
+ broadcast messages. */
+#define SO_LINGER 0x0080 /* Block on close of a reliable
+ socket to transmit pending data. */
+#define SO_OOBINLINE 0x0100 /* Receive out-of-band data in-band. */
+#define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */
+
+#define SO_TYPE 0x1008 /* Compatible name for SO_STYLE. */
+#define SO_STYLE SO_TYPE /* Synonym */
+#define SO_ERROR 0x1007 /* get error status and clear */
+#define SO_SNDBUF 0x1001 /* Send buffer size. */
+#define SO_RCVBUF 0x1002 /* Receive buffer. */
+#define SO_SNDLOWAT 0x1003 /* send low-water mark */
+#define SO_RCVLOWAT 0x1004 /* receive low-water mark */
+#define SO_SNDTIMEO_OLD 0x1005 /* send timeout */
+#define SO_RCVTIMEO_OLD 0x1006 /* receive timeout */
+#define SO_ACCEPTCONN 0x1009
+#define SO_PROTOCOL 0x1028 /* protocol type */
+#define SO_DOMAIN 0x1029 /* domain/socket family */
+
+/* linux-specific, might as well be the same as on i386 */
+#define SO_NO_CHECK 11
+#define SO_PRIORITY 12
+#define SO_BSDCOMPAT 14
+
+#define SO_PASSCRED 17
+#define SO_PEERCRED 18
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION 22
+#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
+#define SO_SECURITY_ENCRYPTION_NETWORK 24
+
+#define SO_BINDTODEVICE 25
+
+/* Socket filtering */
+#define SO_ATTACH_FILTER 26
+#define SO_DETACH_FILTER 27
+#define SO_GET_FILTER SO_ATTACH_FILTER
+
+#define SO_PEERNAME 28
+
+#define SO_PEERSEC 30
+#define SO_SNDBUFFORCE 31
+#define SO_RCVBUFFORCE 33
+#define SO_PASSSEC 34
+
+#define SO_MARK 36
+
+#define SO_RXQ_OVFL 40
+
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+#define SO_PEEK_OFF 42
+
+/* Instruct lower device to use last 4-bytes of skb data as FCS */
+#define SO_NOFCS 43
+
+#define SO_LOCK_FILTER 44
+
+#define SO_SELECT_ERR_QUEUE 45
+
+#define SO_BUSY_POLL 46
+
+#define SO_MAX_PACING_RATE 47
+
+#define SO_BPF_EXTENSIONS 48
+
+#define SO_INCOMING_CPU 49
+
+#define SO_ATTACH_BPF 50
+#define SO_DETACH_BPF SO_DETACH_FILTER
+
+#define SO_ATTACH_REUSEPORT_CBPF 51
+#define SO_ATTACH_REUSEPORT_EBPF 52
+
+#define SO_CNX_ADVICE 53
+
+#define SCM_TIMESTAMPING_OPT_STATS 54
+
+#define SO_MEMINFO 55
+
+#define SO_INCOMING_NAPI_ID 56
+
+#define SO_COOKIE 57
+
+#define SCM_TIMESTAMPING_PKTINFO 58
+
+#define SO_PEERGROUPS 59
+
+#define SO_ZEROCOPY 60
+
+#define SO_TXTIME 61
+#define SCM_TXTIME SO_TXTIME
+
+#define SO_BINDTOIFINDEX 62
+
+#define SO_TIMESTAMP_OLD 29
+#define SO_TIMESTAMPNS_OLD 35
+#define SO_TIMESTAMPING_OLD 37
+
+#define SO_TIMESTAMP_NEW 63
+#define SO_TIMESTAMPNS_NEW 64
+#define SO_TIMESTAMPING_NEW 65
+
+#define SO_RCVTIMEO_NEW 66
+#define SO_SNDTIMEO_NEW 67
+
+#define SO_DETACH_REUSEPORT_BPF 68
+
+#if !defined(__KERNEL__)
+
+#if __BITS_PER_LONG == 64
+#define SO_TIMESTAMP SO_TIMESTAMP_OLD
+#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD
+#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD
+
+#define SO_RCVTIMEO SO_RCVTIMEO_OLD
+#define SO_SNDTIMEO SO_SNDTIMEO_OLD
+#else
+#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW)
+#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW)
+#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW)
+
+#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW)
+#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW)
+#endif
+
+#define SCM_TIMESTAMP SO_TIMESTAMP
+#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
+#define SCM_TIMESTAMPING SO_TIMESTAMPING
+
+#endif
+
+#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mips/include/uapi/asm/sockios.h b/arch/mips/include/uapi/asm/sockios.h
new file mode 100644
index 000000000..66f60234f
--- /dev/null
+++ b/arch/mips/include/uapi/asm/sockios.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Socket-level I/O control calls.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 by Ralf Baechle
+ */
+#ifndef _ASM_SOCKIOS_H
+#define _ASM_SOCKIOS_H
+
+#include <asm/ioctl.h>
+
+/* Socket-level I/O control calls. */
+#define FIOGETOWN _IOR('f', 123, int)
+#define FIOSETOWN _IOW('f', 124, int)
+
+#define SIOCATMARK _IOR('s', 7, int)
+#define SIOCSPGRP _IOW('s', 8, pid_t)
+#define SIOCGPGRP _IOR('s', 9, pid_t)
+
+#define SIOCGSTAMP_OLD 0x8906 /* Get stamp (timeval) */
+#define SIOCGSTAMPNS_OLD 0x8907 /* Get stamp (timespec) */
+
+#endif /* _ASM_SOCKIOS_H */
diff --git a/arch/mips/include/uapi/asm/stat.h b/arch/mips/include/uapi/asm/stat.h
new file mode 100644
index 000000000..3d2a3b718
--- /dev/null
+++ b/arch/mips/include/uapi/asm/stat.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1999, 2000 Ralf Baechle
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_STAT_H
+#define _ASM_STAT_H
+
+#include <linux/types.h>
+
+#include <asm/sgidefs.h>
+
+#if (_MIPS_SIM == _MIPS_SIM_ABI32) || (_MIPS_SIM == _MIPS_SIM_NABI32)
+
+struct stat {
+ unsigned st_dev;
+ long st_pad1[3]; /* Reserved for network id */
+ ino_t st_ino;
+ mode_t st_mode;
+ __u32 st_nlink;
+ uid_t st_uid;
+ gid_t st_gid;
+ unsigned st_rdev;
+ long st_pad2[2];
+ long st_size;
+ long st_pad3;
+ /*
+ * Actually this should be timestruc_t st_atime, st_mtime and st_ctime
+ * but we don't have it under Linux.
+ */
+ long st_atime;
+ long st_atime_nsec;
+ long st_mtime;
+ long st_mtime_nsec;
+ long st_ctime;
+ long st_ctime_nsec;
+ long st_blksize;
+ long st_blocks;
+ long st_pad4[14];
+};
+
+/*
+ * This matches struct stat64 in glibc2.1, hence the absolutely insane
+ * amounts of padding around dev_t's. The memory layout is the same as of
+ * struct stat of the 64-bit kernel.
+ */
+
+struct stat64 {
+ unsigned long st_dev;
+ unsigned long st_pad0[3]; /* Reserved for st_dev expansion */
+
+ unsigned long long st_ino;
+
+ mode_t st_mode;
+ __u32 st_nlink;
+
+ uid_t st_uid;
+ gid_t st_gid;
+
+ unsigned long st_rdev;
+ unsigned long st_pad1[3]; /* Reserved for st_rdev expansion */
+
+ long long st_size;
+
+ /*
+ * Actually this should be timestruc_t st_atime, st_mtime and st_ctime
+ * but we don't have it under Linux.
+ */
+ long st_atime;
+ unsigned long st_atime_nsec; /* Reserved for st_atime expansion */
+
+ long st_mtime;
+ unsigned long st_mtime_nsec; /* Reserved for st_mtime expansion */
+
+ long st_ctime;
+ unsigned long st_ctime_nsec; /* Reserved for st_ctime expansion */
+
+ unsigned long st_blksize;
+ unsigned long st_pad2;
+
+ long long st_blocks;
+};
+
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
+
+#if _MIPS_SIM == _MIPS_SIM_ABI64
+
+/* The memory layout is the same as of struct stat64 of the 32-bit kernel. */
+struct stat {
+ unsigned int st_dev;
+ unsigned int st_pad0[3]; /* Reserved for st_dev expansion */
+
+ unsigned long st_ino;
+
+ mode_t st_mode;
+ __u32 st_nlink;
+
+ uid_t st_uid;
+ gid_t st_gid;
+
+ unsigned int st_rdev;
+ unsigned int st_pad1[3]; /* Reserved for st_rdev expansion */
+
+ long st_size;
+
+ /*
+ * Actually this should be timestruc_t st_atime, st_mtime and st_ctime
+ * but we don't have it under Linux.
+ */
+ unsigned int st_atime;
+ unsigned int st_atime_nsec;
+
+ unsigned int st_mtime;
+ unsigned int st_mtime_nsec;
+
+ unsigned int st_ctime;
+ unsigned int st_ctime_nsec;
+
+ unsigned int st_blksize;
+ unsigned int st_pad2;
+
+ unsigned long st_blocks;
+};
+
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
+
+#define STAT_HAVE_NSEC 1
+
+#endif /* _ASM_STAT_H */
diff --git a/arch/mips/include/uapi/asm/statfs.h b/arch/mips/include/uapi/asm/statfs.h
new file mode 100644
index 000000000..f4174dcae
--- /dev/null
+++ b/arch/mips/include/uapi/asm/statfs.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1999 by Ralf Baechle
+ */
+#ifndef _ASM_STATFS_H
+#define _ASM_STATFS_H
+
+#include <linux/posix_types.h>
+#include <asm/sgidefs.h>
+
+#ifndef __KERNEL_STRICT_NAMES
+
+#include <linux/types.h>
+
+typedef __kernel_fsid_t fsid_t;
+
+#endif
+
+struct statfs {
+ long f_type;
+#define f_fstyp f_type
+ long f_bsize;
+ long f_frsize; /* Fragment size - unsupported */
+ long f_blocks;
+ long f_bfree;
+ long f_files;
+ long f_ffree;
+ long f_bavail;
+
+ /* Linux specials */
+ __kernel_fsid_t f_fsid;
+ long f_namelen;
+ long f_flags;
+ long f_spare[5];
+};
+
+#if (_MIPS_SIM == _MIPS_SIM_ABI32) || (_MIPS_SIM == _MIPS_SIM_NABI32)
+
+/*
+ * Unlike the traditional version the LFAPI version has none of the ABI junk
+ */
+struct statfs64 {
+ __u32 f_type;
+ __u32 f_bsize;
+ __u32 f_frsize; /* Fragment size - unsupported */
+ __u32 __pad;
+ __u64 f_blocks;
+ __u64 f_bfree;
+ __u64 f_files;
+ __u64 f_ffree;
+ __u64 f_bavail;
+ __kernel_fsid_t f_fsid;
+ __u32 f_namelen;
+ __u32 f_flags;
+ __u32 f_spare[5];
+};
+
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
+
+#if _MIPS_SIM == _MIPS_SIM_ABI64
+
+struct statfs64 { /* Same as struct statfs */
+ long f_type;
+ long f_bsize;
+ long f_frsize; /* Fragment size - unsupported */
+ long f_blocks;
+ long f_bfree;
+ long f_files;
+ long f_ffree;
+ long f_bavail;
+
+ /* Linux specials */
+ __kernel_fsid_t f_fsid;
+ long f_namelen;
+ long f_flags;
+ long f_spare[5];
+};
+
+struct compat_statfs64 {
+ __u32 f_type;
+ __u32 f_bsize;
+ __u32 f_frsize; /* Fragment size - unsupported */
+ __u32 __pad;
+ __u64 f_blocks;
+ __u64 f_bfree;
+ __u64 f_files;
+ __u64 f_ffree;
+ __u64 f_bavail;
+ __kernel_fsid_t f_fsid;
+ __u32 f_namelen;
+ __u32 f_flags;
+ __u32 f_spare[5];
+};
+
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
+
+#endif /* _ASM_STATFS_H */
diff --git a/arch/mips/include/uapi/asm/swab.h b/arch/mips/include/uapi/asm/swab.h
new file mode 100644
index 000000000..d6795fe4e
--- /dev/null
+++ b/arch/mips/include/uapi/asm/swab.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 99, 2003 by Ralf Baechle
+ */
+#ifndef _ASM_SWAB_H
+#define _ASM_SWAB_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#define __SWAB_64_THRU_32__
+
+#if !defined(__mips16) && \
+ ((defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \
+ defined(_MIPS_ARCH_LOONGSON3A))
+
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+{
+ __asm__(
+ " .set push \n"
+ " .set arch=mips32r2 \n"
+ " wsbh %0, %1 \n"
+ " .set pop \n"
+ : "=r" (x)
+ : "r" (x));
+
+ return x;
+}
+#define __arch_swab16 __arch_swab16
+
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+ __asm__(
+ " .set push \n"
+ " .set arch=mips32r2 \n"
+ " wsbh %0, %1 \n"
+ " rotr %0, %0, 16 \n"
+ " .set pop \n"
+ : "=r" (x)
+ : "r" (x));
+
+ return x;
+}
+#define __arch_swab32 __arch_swab32
+
+/*
+ * Having already checked for MIPS R2, enable the optimized version for
+ * 64-bit kernel on r2 CPUs.
+ */
+#ifdef __mips64
+static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
+{
+ __asm__(
+ " .set push \n"
+ " .set arch=mips64r2 \n"
+ " dsbh %0, %1 \n"
+ " dshd %0, %0 \n"
+ " .set pop \n"
+ : "=r" (x)
+ : "r" (x));
+
+ return x;
+}
+#define __arch_swab64 __arch_swab64
+#endif /* __mips64 */
+#endif /* (not __mips16) and (MIPS R2 or newer or Loongson 3A) */
+#endif /* _ASM_SWAB_H */
diff --git a/arch/mips/include/uapi/asm/sysmips.h b/arch/mips/include/uapi/asm/sysmips.h
new file mode 100644
index 000000000..4c009e10d
--- /dev/null
+++ b/arch/mips/include/uapi/asm/sysmips.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Definitions for the MIPS sysmips(2) call
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 by Ralf Baechle
+ */
+#ifndef _ASM_SYSMIPS_H
+#define _ASM_SYSMIPS_H
+
+/*
+ * Commands for the sysmips(2) call
+ *
+ * sysmips(2) is deprecated - though some existing software uses it.
+ * We only support the following commands.
+ */
+#define SETNAME 1 /* set hostname */
+#define FLUSH_CACHE 3 /* writeback and invalidate caches */
+#define MIPS_FIXADE 7 /* control address error fixing */
+#define MIPS_RDNVRAM 10 /* read NVRAM */
+#define MIPS_ATOMIC_SET 2001 /* atomically set variable */
+
+#endif /* _ASM_SYSMIPS_H */
diff --git a/arch/mips/include/uapi/asm/termbits.h b/arch/mips/include/uapi/asm/termbits.h
new file mode 100644
index 000000000..dfeffba72
--- /dev/null
+++ b/arch/mips/include/uapi/asm/termbits.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 96, 99, 2001, 06 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+#ifndef _ASM_TERMBITS_H
+#define _ASM_TERMBITS_H
+
+#include <linux/posix_types.h>
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
+
+/*
+ * The ABI says nothing about NCC but seems to use NCCS as
+ * replacement for it in struct termio
+ */
+#define NCCS 23
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+};
+
+struct termios2 {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+struct ktermios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+/* c_cc characters */
+#define VINTR 0 /* Interrupt character [ISIG]. */
+#define VQUIT 1 /* Quit character [ISIG]. */
+#define VERASE 2 /* Erase character [ICANON]. */
+#define VKILL 3 /* Kill-line character [ICANON]. */
+#define VMIN 4 /* Minimum number of bytes read at once [!ICANON]. */
+#define VTIME 5 /* Time-out value (tenths of a second) [!ICANON]. */
+#define VEOL2 6 /* Second EOL character [ICANON]. */
+#define VSWTC 7 /* ??? */
+#define VSWTCH VSWTC
+#define VSTART 8 /* Start (X-ON) character [IXON, IXOFF]. */
+#define VSTOP 9 /* Stop (X-OFF) character [IXON, IXOFF]. */
+#define VSUSP 10 /* Suspend character [ISIG]. */
+#if 0
+/*
+ * VDSUSP is not supported
+ */
+#define VDSUSP 11 /* Delayed suspend character [ISIG]. */
+#endif
+#define VREPRINT 12 /* Reprint-line character [ICANON]. */
+#define VDISCARD 13 /* Discard character [IEXTEN]. */
+#define VWERASE 14 /* Word-erase character [ICANON]. */
+#define VLNEXT 15 /* Literal-next character [IEXTEN]. */
+#define VEOF 16 /* End-of-file character [ICANON]. */
+#define VEOL 17 /* End-of-line character [ICANON]. */
+
+/* c_iflag bits */
+#define IGNBRK 0000001 /* Ignore break condition. */
+#define BRKINT 0000002 /* Signal interrupt on break. */
+#define IGNPAR 0000004 /* Ignore characters with parity errors. */
+#define PARMRK 0000010 /* Mark parity and framing errors. */
+#define INPCK 0000020 /* Enable input parity check. */
+#define ISTRIP 0000040 /* Strip 8th bit off characters. */
+#define INLCR 0000100 /* Map NL to CR on input. */
+#define IGNCR 0000200 /* Ignore CR. */
+#define ICRNL 0000400 /* Map CR to NL on input. */
+#define IUCLC 0001000 /* Map upper case to lower case on input. */
+#define IXON 0002000 /* Enable start/stop output control. */
+#define IXANY 0004000 /* Any character will restart after stop. */
+#define IXOFF 0010000 /* Enable start/stop input control. */
+#define IMAXBEL 0020000 /* Ring bell when input queue is full. */
+#define IUTF8 0040000 /* Input is UTF-8 */
+
+/* c_oflag bits */
+#define OPOST 0000001 /* Perform output processing. */
+#define OLCUC 0000002 /* Map lower case to upper case on output. */
+#define ONLCR 0000004 /* Map NL to CR-NL on output. */
+#define OCRNL 0000010
+#define ONOCR 0000020
+#define ONLRET 0000040
+#define OFILL 0000100
+#define OFDEL 0000200
+#define NLDLY 0000400
+#define NL0 0000000
+#define NL1 0000400
+#define CRDLY 0003000
+#define CR0 0000000
+#define CR1 0001000
+#define CR2 0002000
+#define CR3 0003000
+#define TABDLY 0014000
+#define TAB0 0000000
+#define TAB1 0004000
+#define TAB2 0010000
+#define TAB3 0014000
+#define XTABS 0014000
+#define BSDLY 0020000
+#define BS0 0000000
+#define BS1 0020000
+#define VTDLY 0040000
+#define VT0 0000000
+#define VT1 0040000
+#define FFDLY 0100000
+#define FF0 0000000
+#define FF1 0100000
+/*
+#define PAGEOUT ???
+#define WRAP ???
+ */
+
+/* c_cflag bit meaning */
+#define CBAUD 0010017
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE 0000060 /* Number of bits per byte (mask). */
+#define CS5 0000000 /* 5 bits per byte. */
+#define CS6 0000020 /* 6 bits per byte. */
+#define CS7 0000040 /* 7 bits per byte. */
+#define CS8 0000060 /* 8 bits per byte. */
+#define CSTOPB 0000100 /* Two stop bits instead of one. */
+#define CREAD 0000200 /* Enable receiver. */
+#define PARENB 0000400 /* Parity enable. */
+#define PARODD 0001000 /* Odd parity instead of even. */
+#define HUPCL 0002000 /* Hang up on last close. */
+#define CLOCAL 0004000 /* Ignore modem status lines. */
+#define CBAUDEX 0010000
+#define BOTHER 0010000
+#define B57600 0010001
+#define B115200 0010002
+#define B230400 0010003
+#define B460800 0010004
+#define B500000 0010005
+#define B576000 0010006
+#define B921600 0010007
+#define B1000000 0010010
+#define B1152000 0010011
+#define B1500000 0010012
+#define B2000000 0010013
+#define B2500000 0010014
+#define B3000000 0010015
+#define B3500000 0010016
+#define B4000000 0010017
+#define CIBAUD 002003600000 /* input baud rate */
+#define CMSPAR 010000000000 /* mark or space (stick) parity */
+#define CRTSCTS 020000000000 /* flow control */
+
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+
+/* c_lflag bits */
+#define ISIG 0000001 /* Enable signals. */
+#define ICANON 0000002 /* Do erase and kill processing. */
+#define XCASE 0000004
+#define ECHO 0000010 /* Enable echo. */
+#define ECHOE 0000020 /* Visual erase for ERASE. */
+#define ECHOK 0000040 /* Echo NL after KILL. */
+#define ECHONL 0000100 /* Echo NL even if ECHO is off. */
+#define NOFLSH 0000200 /* Disable flush after interrupt. */
+#define IEXTEN 0000400 /* Enable DISCARD and LNEXT. */
+#define ECHOCTL 0001000 /* Echo control characters as ^X. */
+#define ECHOPRT 0002000 /* Hardcopy visual erase. */
+#define ECHOKE 0004000 /* Visual erase for KILL. */
+#define FLUSHO 0020000
+#define PENDIN 0040000 /* Retype pending input (state). */
+#define TOSTOP 0100000 /* Send SIGTTOU for background output. */
+#define ITOSTOP TOSTOP
+#define EXTPROC 0200000 /* External processing on pty */
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+
+/* tcflow() and TCXONC use these */
+#define TCOOFF 0 /* Suspend output. */
+#define TCOON 1 /* Restart suspended output. */
+#define TCIOFF 2 /* Send a STOP character. */
+#define TCION 3 /* Send a START character. */
+
+/* tcflush() and TCFLSH use these */
+#define TCIFLUSH 0 /* Discard data received but not yet read. */
+#define TCOFLUSH 1 /* Discard data written but not yet sent. */
+#define TCIOFLUSH 2 /* Discard all pending data. */
+
+/* tcsetattr uses these */
+#define TCSANOW TCSETS /* Change immediately. */
+#define TCSADRAIN TCSETSW /* Change when pending output is written. */
+#define TCSAFLUSH TCSETSF /* Flush pending input before changing. */
+
+#endif /* _ASM_TERMBITS_H */
diff --git a/arch/mips/include/uapi/asm/termios.h b/arch/mips/include/uapi/asm/termios.h
new file mode 100644
index 000000000..d6c576794
--- /dev/null
+++ b/arch/mips/include/uapi/asm/termios.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1996, 2000, 2001 by Ralf Baechle
+ * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
+ */
+#ifndef _UAPI_ASM_TERMIOS_H
+#define _UAPI_ASM_TERMIOS_H
+
+#include <linux/errno.h>
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct sgttyb {
+ char sg_ispeed;
+ char sg_ospeed;
+ char sg_erase;
+ char sg_kill;
+ int sg_flags; /* SGI special - int, not short */
+};
+
+struct tchars {
+ char t_intrc;
+ char t_quitc;
+ char t_startc;
+ char t_stopc;
+ char t_eofc;
+ char t_brkc;
+};
+
+struct ltchars {
+ char t_suspc; /* stop process signal */
+ char t_dsuspc; /* delayed stop process signal */
+ char t_rprntc; /* reprint line */
+ char t_flushc; /* flush output (toggles) */
+ char t_werasc; /* word erase */
+ char t_lnextc; /* literal next character */
+};
+
+/* TIOCGSIZE, TIOCSSIZE not defined yet. Only needed for SunOS source
+ compatibility anyway ... */
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ char c_line; /* line discipline */
+ unsigned char c_cc[NCCS]; /* control characters */
+};
+
+
+/* modem lines */
+#define TIOCM_LE 0x001 /* line enable */
+#define TIOCM_DTR 0x002 /* data terminal ready */
+#define TIOCM_RTS 0x004 /* request to send */
+#define TIOCM_ST 0x010 /* secondary transmit */
+#define TIOCM_SR 0x020 /* secondary receive */
+#define TIOCM_CTS 0x040 /* clear to send */
+#define TIOCM_CAR 0x100 /* carrier detect */
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RNG 0x200 /* ring */
+#define TIOCM_RI TIOCM_RNG
+#define TIOCM_DSR 0x400 /* data set ready */
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
+
+
+#endif /* _UAPI_ASM_TERMIOS_H */
diff --git a/arch/mips/include/uapi/asm/types.h b/arch/mips/include/uapi/asm/types.h
new file mode 100644
index 000000000..6b2150484
--- /dev/null
+++ b/arch/mips/include/uapi/asm/types.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle
+ * Copyright (C) 2008 Wind River Systems,
+ * written by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#ifndef _UAPI_ASM_TYPES_H
+#define _UAPI_ASM_TYPES_H
+
+/*
+ * We don't use int-l64.h for the kernel anymore but still use it for
+ * userspace to avoid code changes.
+ *
+ * However, some user programs (e.g. perf) may not want this. They can
+ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
+ */
+#ifndef __KERNEL__
+# if _MIPS_SZLONG == 64 && !defined(__SANE_USERSPACE_TYPES__)
+# include <asm-generic/int-l64.h>
+# else
+# include <asm-generic/int-ll64.h>
+# endif
+#endif
+
+
+#endif /* _UAPI_ASM_TYPES_H */
diff --git a/arch/mips/include/uapi/asm/ucontext.h b/arch/mips/include/uapi/asm/ucontext.h
new file mode 100644
index 000000000..2d3bf8eeb
--- /dev/null
+++ b/arch/mips/include/uapi/asm/ucontext.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __MIPS_UAPI_ASM_UCONTEXT_H
+#define __MIPS_UAPI_ASM_UCONTEXT_H
+
+/**
+ * struct extcontext - extended context header structure
+ * @magic: magic value identifying the type of extended context
+ * @size: the size in bytes of the enclosing structure
+ *
+ * Extended context structures provide context which does not fit within struct
+ * sigcontext. They are placed sequentially in memory at the end of struct
+ * ucontext and struct sigframe, with each extended context structure beginning
+ * with a header defined by this struct. The type of context represented is
+ * indicated by the magic field. Userland may check each extended context
+ * structure against magic values that it recognises. The size field allows any
+ * unrecognised context to be skipped, allowing for future expansion. The end
+ * of the extended context data is indicated by the magic value
+ * END_EXTCONTEXT_MAGIC.
+ */
+struct extcontext {
+ unsigned int magic;
+ unsigned int size;
+};
+
+/**
+ * struct msa_extcontext - MSA extended context structure
+ * @ext: the extended context header, with magic == MSA_EXTCONTEXT_MAGIC
+ * @wr: the most significant 64 bits of each MSA vector register
+ * @csr: the value of the MSA control & status register
+ *
+ * If MSA context is live for a task at the time a signal is delivered to it,
+ * this structure will hold the MSA context of the task as it was prior to the
+ * signal delivery.
+ */
+struct msa_extcontext {
+ struct extcontext ext;
+#define MSA_EXTCONTEXT_MAGIC 0x784d5341 /* xMSA */
+
+ unsigned long long wr[32];
+ unsigned int csr;
+};
+
+#define END_EXTCONTEXT_MAGIC 0x78454e44 /* xEND */
+
+/**
+ * struct ucontext - user context structure
+ * @uc_flags:
+ * @uc_link:
+ * @uc_stack:
+ * @uc_mcontext: holds basic processor state
+ * @uc_sigmask:
+ * @uc_extcontext: holds extended processor state
+ */
+struct ucontext {
+ /* Historic fields matching asm-generic */
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ sigset_t uc_sigmask;
+
+ /* Extended context structures may follow ucontext */
+ unsigned long long uc_extcontext[0];
+};
+
+#endif /* __MIPS_UAPI_ASM_UCONTEXT_H */
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
new file mode 100644
index 000000000..4abe38754
--- /dev/null
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 96, 97, 98, 99, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ *
+ * Changed system calls macros _syscall5 - _syscall7 to push args 5 to 7 onto
+ * the stack. Robin Farine for ACN S.A, Copyright (C) 1996 by ACN S.A
+ */
+#ifndef _UAPI_ASM_UNISTD_H
+#define _UAPI_ASM_UNISTD_H
+
+#include <asm/sgidefs.h>
+
+#if _MIPS_SIM == _MIPS_SIM_ABI32
+
+#define __NR_Linux 4000
+#include <asm/unistd_o32.h>
+
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
+
+#if _MIPS_SIM == _MIPS_SIM_ABI64
+
+#define __NR_Linux 5000
+#include <asm/unistd_n64.h>
+
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
+
+#if _MIPS_SIM == _MIPS_SIM_NABI32
+
+#define __NR_Linux 6000
+#include <asm/unistd_n32.h>
+
+#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
+
+#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/ingenic/Kconfig b/arch/mips/ingenic/Kconfig
new file mode 100644
index 000000000..3238e16fe
--- /dev/null
+++ b/arch/mips/ingenic/Kconfig
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config MACH_INGENIC_GENERIC
+ bool
+ select MACH_INGENIC
+ select MACH_JZ4740
+ select MACH_JZ4770
+ select MACH_JZ4780
+ select MACH_X1000
+
+choice
+ prompt "Machine type"
+ depends on MACH_INGENIC_SOC
+ default INGENIC_GENERIC_BOARD
+
+config INGENIC_GENERIC_BOARD
+ bool "Generic board"
+ select MACH_INGENIC_GENERIC
+
+config JZ4740_QI_LB60
+ bool "Qi Hardware Ben NanoNote"
+ select MACH_JZ4740
+
+config JZ4740_RS90
+ bool "YLM RetroMini (RS-90)"
+ select MACH_JZ4725B
+
+config JZ4770_GCW0
+ bool "Game Consoles Worldwide GCW Zero"
+ select MACH_JZ4770
+
+config JZ4780_CI20
+ bool "MIPS Creator CI20"
+ select MACH_JZ4780
+
+config X1000_CU1000_NEO
+ bool "YSH & ATIL CU1000 Module with Neo backplane"
+ select MACH_X1000
+
+config X1830_CU1830_NEO
+ bool "YSH & ATIL CU1830 Module with Neo backplane"
+ select MACH_X1830
+
+endchoice
+
+config MACH_JZ4725B
+ bool
+ select SYS_HAS_CPU_MIPS32_R1
+
+config MACH_JZ4740
+ bool
+ select SYS_HAS_CPU_MIPS32_R1
+
+config MACH_JZ4770
+ bool
+ select MIPS_CPU_SCACHE
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_SUPPORTS_HIGHMEM
+
+config MACH_JZ4780
+ bool
+ select MIPS_CPU_SCACHE
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_SUPPORTS_HIGHMEM
+
+config MACH_X1000
+ bool
+ select MIPS_CPU_SCACHE
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_SUPPORTS_HIGHMEM
+
+config MACH_X1830
+ bool
+ select MIPS_CPU_SCACHE
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_SUPPORTS_HIGHMEM
diff --git a/arch/mips/jazz/Kconfig b/arch/mips/jazz/Kconfig
new file mode 100644
index 000000000..06838f80a
--- /dev/null
+++ b/arch/mips/jazz/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+config ACER_PICA_61
+ bool "Support for Acer PICA 1 chipset"
+ depends on MACH_JAZZ
+ select DMA_NONCOHERENT
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ help
+ This is a machine with a R4400 133/150 MHz CPU. To compile a Linux
+ kernel that runs on these, say Y here. For details about Linux on
+ the MIPS architecture, check out the Linux/MIPS FAQ on the WWW at
+ <http://www.linux-mips.org/>.
+
+config MIPS_MAGNUM_4000
+ bool "Support for MIPS Magnum 4000"
+ depends on MACH_JAZZ
+ select DMA_NONCOHERENT
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ help
+ This is a machine with a R4000 100 MHz CPU. To compile a Linux
+ kernel that runs on these, say Y here. For details about Linux on
+ the MIPS architecture, check out the Linux/MIPS FAQ on the WWW at
+ <http://www.linux-mips.org/>.
+
+config OLIVETTI_M700
+ bool "Support for Olivetti M700-10"
+ depends on MACH_JAZZ
+ select DMA_NONCOHERENT
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ help
+ This is a machine with a R4000 100 MHz CPU. To compile a Linux
+ kernel that runs on these, say Y here. For details about Linux on
+ the MIPS architecture, check out the Linux/MIPS FAQ on the WWW at
+ <http://www.linux-mips.org/>.
diff --git a/arch/mips/jazz/Makefile b/arch/mips/jazz/Makefile
new file mode 100644
index 000000000..5815e1cb3
--- /dev/null
+++ b/arch/mips/jazz/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Jazz family specific parts of the kernel
+#
+
+obj-y := irq.o jazzdma.o reset.o setup.o
diff --git a/arch/mips/jazz/Platform b/arch/mips/jazz/Platform
new file mode 100644
index 000000000..eb0490ae8
--- /dev/null
+++ b/arch/mips/jazz/Platform
@@ -0,0 +1,5 @@
+#
+# Acer PICA 61, Mips Magnum 4000 and Olivetti M700.
+#
+cflags-$(CONFIG_MACH_JAZZ) += -I$(srctree)/arch/mips/include/asm/mach-jazz
+load-$(CONFIG_MACH_JAZZ) += 0xffffffff80080000
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
new file mode 100644
index 000000000..495ba7cc5
--- /dev/null
+++ b/arch/mips/jazz/irq.c
@@ -0,0 +1,149 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2001, 2003, 07 Ralf Baechle
+ */
+#include <linux/clockchips.h>
+#include <linux/i8253.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/pgtable.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/i8259.h>
+#include <asm/io.h>
+#include <asm/jazz.h>
+#include <asm/tlbmisc.h>
+
+static DEFINE_RAW_SPINLOCK(r4030_lock);
+
+static void enable_r4030_irq(struct irq_data *d)
+{
+ unsigned int mask = 1 << (d->irq - JAZZ_IRQ_START);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&r4030_lock, flags);
+ mask |= r4030_read_reg16(JAZZ_IO_IRQ_ENABLE);
+ r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, mask);
+ raw_spin_unlock_irqrestore(&r4030_lock, flags);
+}
+
+void disable_r4030_irq(struct irq_data *d)
+{
+ unsigned int mask = ~(1 << (d->irq - JAZZ_IRQ_START));
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&r4030_lock, flags);
+ mask &= r4030_read_reg16(JAZZ_IO_IRQ_ENABLE);
+ r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, mask);
+ raw_spin_unlock_irqrestore(&r4030_lock, flags);
+}
+
+static struct irq_chip r4030_irq_type = {
+ .name = "R4030",
+ .irq_mask = disable_r4030_irq,
+ .irq_unmask = enable_r4030_irq,
+};
+
+void __init init_r4030_ints(void)
+{
+ int i;
+
+ for (i = JAZZ_IRQ_START; i <= JAZZ_IRQ_END; i++)
+ irq_set_chip_and_handler(i, &r4030_irq_type, handle_level_irq);
+
+ r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, 0);
+ r4030_read_reg16(JAZZ_IO_IRQ_SOURCE); /* clear pending IRQs */
+ r4030_read_reg32(JAZZ_R4030_INVAL_ADDR); /* clear error bits */
+}
+
+/*
+ * On systems with i8259-style interrupt controllers we assume for
+ * driver compatibility reasons interrupts 0 - 15 to be the i8259
+ * interrupts even if the hardware uses a different interrupt numbering.
+ */
+void __init arch_init_irq(void)
+{
+ /*
+ * this is a hack to get back the still needed wired mapping
+ * killed by init_mm()
+ */
+
+ /* Map 0xe0000000 -> 0x0:800005C0, 0xe0010000 -> 0x1:30000580 */
+ add_wired_entry(0x02000017, 0x03c00017, 0xe0000000, PM_64K);
+ /* Map 0xe2000000 -> 0x0:900005C0, 0xe3010000 -> 0x0:910005C0 */
+ add_wired_entry(0x02400017, 0x02440017, 0xe2000000, PM_16M);
+ /* Map 0xe4000000 -> 0x0:600005C0, 0xe4100000 -> 400005C0 */
+ add_wired_entry(0x01800017, 0x01000017, 0xe4000000, PM_4M);
+
+ init_i8259_irqs(); /* Integrated i8259 */
+ mips_cpu_irq_init();
+ init_r4030_ints();
+
+ change_c0_status(ST0_IM, IE_IRQ2 | IE_IRQ1);
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned int pending = read_c0_cause() & read_c0_status();
+ unsigned int irq;
+
+ if (pending & IE_IRQ4) {
+ r4030_read_reg32(JAZZ_TIMER_REGISTER);
+ do_IRQ(JAZZ_TIMER_IRQ);
+ } else if (pending & IE_IRQ2) {
+ irq = *(volatile u8 *)JAZZ_EISA_IRQ_ACK;
+ do_IRQ(irq);
+ } else if (pending & IE_IRQ1) {
+ irq = *(volatile u8 *)JAZZ_IO_IRQ_SOURCE >> 2;
+ if (likely(irq > 0))
+ do_IRQ(irq + JAZZ_IRQ_START - 1);
+ else
+ panic("Unimplemented loc_no_irq handler");
+ }
+}
+
+struct clock_event_device r4030_clockevent = {
+ .name = "r4030",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .rating = 300,
+ .irq = JAZZ_TIMER_IRQ,
+};
+
+static irqreturn_t r4030_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = dev_id;
+
+ cd->event_handler(cd);
+ return IRQ_HANDLED;
+}
+
+void __init plat_time_init(void)
+{
+ struct clock_event_device *cd = &r4030_clockevent;
+ unsigned int cpu = smp_processor_id();
+
+ BUG_ON(HZ != 100);
+
+ cd->cpumask = cpumask_of(cpu);
+ clockevents_register_device(cd);
+ if (request_irq(JAZZ_TIMER_IRQ, r4030_timer_interrupt, IRQF_TIMER,
+ "R4030 timer", cd))
+ pr_err("Failed to register R4030 timer interrupt\n");
+
+ /*
+ * Set clock to 100Hz.
+ *
+ * The R4030 timer receives an input clock of 1kHz which is divieded by
+ * a programmable 4-bit divider. This makes it fairly inflexible.
+ */
+ r4030_write_reg32(JAZZ_TIMER_INTERVAL, 9);
+ setup_pit_timer();
+}
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
new file mode 100644
index 000000000..461457b28
--- /dev/null
+++ b/arch/mips/jazz/jazzdma.c
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Mips Jazz DMA controller support
+ * Copyright (C) 1995, 1996 by Andreas Busse
+ *
+ * NOTE: Some of the argument checking could be removed when
+ * things have settled down. Also, instead of returning 0xffffffff
+ * on failure of vdma_alloc() one could leave page #0 unused
+ * and return the more usual NULL pointer as logical address.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/spinlock.h>
+#include <linux/gfp.h>
+#include <linux/dma-map-ops.h>
+#include <asm/mipsregs.h>
+#include <asm/jazz.h>
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <asm/dma.h>
+#include <asm/jazzdma.h>
+
+/*
+ * Set this to one to enable additional vdma debug code.
+ */
+#define CONF_DEBUG_VDMA 0
+
+static VDMA_PGTBL_ENTRY *pgtbl;
+
+static DEFINE_SPINLOCK(vdma_lock);
+
+/*
+ * Debug stuff
+ */
+#define vdma_debug ((CONF_DEBUG_VDMA) ? debuglvl : 0)
+
+static int debuglvl = 3;
+
+/*
+ * Initialize the pagetable with a one-to-one mapping of
+ * the first 16 Mbytes of main memory and declare all
+ * entries to be unused. Using this method will at least
+ * allow some early device driver operations to work.
+ */
+static inline void vdma_pgtbl_init(void)
+{
+ unsigned long paddr = 0;
+ int i;
+
+ for (i = 0; i < VDMA_PGTBL_ENTRIES; i++) {
+ pgtbl[i].frame = paddr;
+ pgtbl[i].owner = VDMA_PAGE_EMPTY;
+ paddr += VDMA_PAGESIZE;
+ }
+}
+
+/*
+ * Initialize the Jazz R4030 dma controller
+ */
+static int __init vdma_init(void)
+{
+ /*
+ * Allocate 32k of memory for DMA page tables. This needs to be page
+ * aligned and should be uncached to avoid cache flushing after every
+ * update.
+ */
+ pgtbl = (VDMA_PGTBL_ENTRY *)__get_free_pages(GFP_KERNEL | GFP_DMA,
+ get_order(VDMA_PGTBL_SIZE));
+ BUG_ON(!pgtbl);
+ dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
+ pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
+
+ /*
+ * Clear the R4030 translation table
+ */
+ vdma_pgtbl_init();
+
+ r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
+ CPHYSADDR((unsigned long)pgtbl));
+ r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
+ r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
+
+ printk(KERN_INFO "VDMA: R4030 DMA pagetables initialized.\n");
+ return 0;
+}
+arch_initcall(vdma_init);
+
+/*
+ * Allocate DMA pagetables using a simple first-fit algorithm
+ */
+unsigned long vdma_alloc(unsigned long paddr, unsigned long size)
+{
+ int first, last, pages, frame, i;
+ unsigned long laddr, flags;
+
+ /* check arguments */
+
+ if (paddr > 0x1fffffff) {
+ if (vdma_debug)
+ printk("vdma_alloc: Invalid physical address: %08lx\n",
+ paddr);
+ return DMA_MAPPING_ERROR; /* invalid physical address */
+ }
+ if (size > 0x400000 || size == 0) {
+ if (vdma_debug)
+ printk("vdma_alloc: Invalid size: %08lx\n", size);
+ return DMA_MAPPING_ERROR; /* invalid physical address */
+ }
+
+ spin_lock_irqsave(&vdma_lock, flags);
+ /*
+ * Find free chunk
+ */
+ pages = VDMA_PAGE(paddr + size) - VDMA_PAGE(paddr) + 1;
+ first = 0;
+ while (1) {
+ while (pgtbl[first].owner != VDMA_PAGE_EMPTY &&
+ first < VDMA_PGTBL_ENTRIES) first++;
+ if (first + pages > VDMA_PGTBL_ENTRIES) { /* nothing free */
+ spin_unlock_irqrestore(&vdma_lock, flags);
+ return DMA_MAPPING_ERROR;
+ }
+
+ last = first + 1;
+ while (pgtbl[last].owner == VDMA_PAGE_EMPTY
+ && last - first < pages)
+ last++;
+
+ if (last - first == pages)
+ break; /* found */
+ first = last + 1;
+ }
+
+ /*
+ * Mark pages as allocated
+ */
+ laddr = (first << 12) + (paddr & (VDMA_PAGESIZE - 1));
+ frame = paddr & ~(VDMA_PAGESIZE - 1);
+
+ for (i = first; i < last; i++) {
+ pgtbl[i].frame = frame;
+ pgtbl[i].owner = laddr;
+ frame += VDMA_PAGESIZE;
+ }
+
+ /*
+ * Update translation table and return logical start address
+ */
+ r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
+
+ if (vdma_debug > 1)
+ printk("vdma_alloc: Allocated %d pages starting from %08lx\n",
+ pages, laddr);
+
+ if (vdma_debug > 2) {
+ printk("LADDR: ");
+ for (i = first; i < last; i++)
+ printk("%08x ", i << 12);
+ printk("\nPADDR: ");
+ for (i = first; i < last; i++)
+ printk("%08x ", pgtbl[i].frame);
+ printk("\nOWNER: ");
+ for (i = first; i < last; i++)
+ printk("%08x ", pgtbl[i].owner);
+ printk("\n");
+ }
+
+ spin_unlock_irqrestore(&vdma_lock, flags);
+
+ return laddr;
+}
+
+EXPORT_SYMBOL(vdma_alloc);
+
+/*
+ * Free previously allocated dma translation pages
+ * Note that this does NOT change the translation table,
+ * it just marks the free'd pages as unused!
+ */
+int vdma_free(unsigned long laddr)
+{
+ int i;
+
+ i = laddr >> 12;
+
+ if (pgtbl[i].owner != laddr) {
+ printk
+ ("vdma_free: trying to free other's dma pages, laddr=%8lx\n",
+ laddr);
+ return -1;
+ }
+
+ while (i < VDMA_PGTBL_ENTRIES && pgtbl[i].owner == laddr) {
+ pgtbl[i].owner = VDMA_PAGE_EMPTY;
+ i++;
+ }
+
+ if (vdma_debug > 1)
+ printk("vdma_free: freed %ld pages starting from %08lx\n",
+ i - (laddr >> 12), laddr);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vdma_free);
+
+/*
+ * Translate a physical address to a logical address.
+ * This will return the logical address of the first
+ * match.
+ */
+unsigned long vdma_phys2log(unsigned long paddr)
+{
+ int i;
+ int frame;
+
+ frame = paddr & ~(VDMA_PAGESIZE - 1);
+
+ for (i = 0; i < VDMA_PGTBL_ENTRIES; i++) {
+ if (pgtbl[i].frame == frame)
+ break;
+ }
+
+ if (i == VDMA_PGTBL_ENTRIES)
+ return ~0UL;
+
+ return (i << 12) + (paddr & (VDMA_PAGESIZE - 1));
+}
+
+EXPORT_SYMBOL(vdma_phys2log);
+
+/*
+ * Translate a logical DMA address to a physical address
+ */
+unsigned long vdma_log2phys(unsigned long laddr)
+{
+ return pgtbl[laddr >> 12].frame + (laddr & (VDMA_PAGESIZE - 1));
+}
+
+EXPORT_SYMBOL(vdma_log2phys);
+
+/*
+ * Print DMA statistics
+ */
+void vdma_stats(void)
+{
+ int i;
+
+ printk("vdma_stats: CONFIG: %08x\n",
+ r4030_read_reg32(JAZZ_R4030_CONFIG));
+ printk("R4030 translation table base: %08x\n",
+ r4030_read_reg32(JAZZ_R4030_TRSTBL_BASE));
+ printk("R4030 translation table limit: %08x\n",
+ r4030_read_reg32(JAZZ_R4030_TRSTBL_LIM));
+ printk("vdma_stats: INV_ADDR: %08x\n",
+ r4030_read_reg32(JAZZ_R4030_INV_ADDR));
+ printk("vdma_stats: R_FAIL_ADDR: %08x\n",
+ r4030_read_reg32(JAZZ_R4030_R_FAIL_ADDR));
+ printk("vdma_stats: M_FAIL_ADDR: %08x\n",
+ r4030_read_reg32(JAZZ_R4030_M_FAIL_ADDR));
+ printk("vdma_stats: IRQ_SOURCE: %08x\n",
+ r4030_read_reg32(JAZZ_R4030_IRQ_SOURCE));
+ printk("vdma_stats: I386_ERROR: %08x\n",
+ r4030_read_reg32(JAZZ_R4030_I386_ERROR));
+ printk("vdma_chnl_modes: ");
+ for (i = 0; i < 8; i++)
+ printk("%04x ",
+ (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_MODE +
+ (i << 5)));
+ printk("\n");
+ printk("vdma_chnl_enables: ");
+ for (i = 0; i < 8; i++)
+ printk("%04x ",
+ (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
+ (i << 5)));
+ printk("\n");
+}
+
+/*
+ * DMA transfer functions
+ */
+
+/*
+ * Enable a DMA channel. Also clear any error conditions.
+ */
+void vdma_enable(int channel)
+{
+ int status;
+
+ if (vdma_debug)
+ printk("vdma_enable: channel %d\n", channel);
+
+ /*
+ * Check error conditions first
+ */
+ status = r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5));
+ if (status & 0x400)
+ printk("VDMA: Channel %d: Address error!\n", channel);
+ if (status & 0x200)
+ printk("VDMA: Channel %d: Memory error!\n", channel);
+
+ /*
+ * Clear all interrupt flags
+ */
+ r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
+ r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
+ (channel << 5)) | R4030_TC_INTR
+ | R4030_MEM_INTR | R4030_ADDR_INTR);
+
+ /*
+ * Enable the desired channel
+ */
+ r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
+ r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
+ (channel << 5)) |
+ R4030_CHNL_ENABLE);
+}
+
+EXPORT_SYMBOL(vdma_enable);
+
+/*
+ * Disable a DMA channel
+ */
+void vdma_disable(int channel)
+{
+ if (vdma_debug) {
+ int status =
+ r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
+ (channel << 5));
+
+ printk("vdma_disable: channel %d\n", channel);
+ printk("VDMA: channel %d status: %04x (%s) mode: "
+ "%02x addr: %06x count: %06x\n",
+ channel, status,
+ ((status & 0x600) ? "ERROR" : "OK"),
+ (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_MODE +
+ (channel << 5)),
+ (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_ADDR +
+ (channel << 5)),
+ (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_COUNT +
+ (channel << 5)));
+ }
+
+ r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
+ r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
+ (channel << 5)) &
+ ~R4030_CHNL_ENABLE);
+
+ /*
+ * After disabling a DMA channel a remote bus register should be
+ * read to ensure that the current DMA acknowledge cycle is completed.
+ */
+ *((volatile unsigned int *) JAZZ_DUMMY_DEVICE);
+}
+
+EXPORT_SYMBOL(vdma_disable);
+
+/*
+ * Set DMA mode. This function accepts the mode values used
+ * to set a PC-style DMA controller. For the SCSI and FDC
+ * channels, we also set the default modes each time we're
+ * called.
+ * NOTE: The FAST and BURST dma modes are supported by the
+ * R4030 Rev. 2 and PICA chipsets only. I leave them disabled
+ * for now.
+ */
+void vdma_set_mode(int channel, int mode)
+{
+ if (vdma_debug)
+ printk("vdma_set_mode: channel %d, mode 0x%x\n", channel,
+ mode);
+
+ switch (channel) {
+ case JAZZ_SCSI_DMA: /* scsi */
+ r4030_write_reg32(JAZZ_R4030_CHNL_MODE + (channel << 5),
+/* R4030_MODE_FAST | */
+/* R4030_MODE_BURST | */
+ R4030_MODE_INTR_EN |
+ R4030_MODE_WIDTH_16 |
+ R4030_MODE_ATIME_80);
+ break;
+
+ case JAZZ_FLOPPY_DMA: /* floppy */
+ r4030_write_reg32(JAZZ_R4030_CHNL_MODE + (channel << 5),
+/* R4030_MODE_FAST | */
+/* R4030_MODE_BURST | */
+ R4030_MODE_INTR_EN |
+ R4030_MODE_WIDTH_8 |
+ R4030_MODE_ATIME_120);
+ break;
+
+ case JAZZ_AUDIOL_DMA:
+ case JAZZ_AUDIOR_DMA:
+ printk("VDMA: Audio DMA not supported yet.\n");
+ break;
+
+ default:
+ printk
+ ("VDMA: vdma_set_mode() called with unsupported channel %d!\n",
+ channel);
+ }
+
+ switch (mode) {
+ case DMA_MODE_READ:
+ r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
+ r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
+ (channel << 5)) &
+ ~R4030_CHNL_WRITE);
+ break;
+
+ case DMA_MODE_WRITE:
+ r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
+ r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
+ (channel << 5)) |
+ R4030_CHNL_WRITE);
+ break;
+
+ default:
+ printk
+ ("VDMA: vdma_set_mode() called with unknown dma mode 0x%x\n",
+ mode);
+ }
+}
+
+EXPORT_SYMBOL(vdma_set_mode);
+
+/*
+ * Set Transfer Address
+ */
+void vdma_set_addr(int channel, long addr)
+{
+ if (vdma_debug)
+ printk("vdma_set_addr: channel %d, addr %lx\n", channel,
+ addr);
+
+ r4030_write_reg32(JAZZ_R4030_CHNL_ADDR + (channel << 5), addr);
+}
+
+EXPORT_SYMBOL(vdma_set_addr);
+
+/*
+ * Set Transfer Count
+ */
+void vdma_set_count(int channel, int count)
+{
+ if (vdma_debug)
+ printk("vdma_set_count: channel %d, count %08x\n", channel,
+ (unsigned) count);
+
+ r4030_write_reg32(JAZZ_R4030_CHNL_COUNT + (channel << 5), count);
+}
+
+EXPORT_SYMBOL(vdma_set_count);
+
+/*
+ * Get Residual
+ */
+int vdma_get_residue(int channel)
+{
+ int residual;
+
+ residual = r4030_read_reg32(JAZZ_R4030_CHNL_COUNT + (channel << 5));
+
+ if (vdma_debug)
+ printk("vdma_get_residual: channel %d: residual=%d\n",
+ channel, residual);
+
+ return residual;
+}
+
+/*
+ * Get DMA channel enable register
+ */
+int vdma_get_enable(int channel)
+{
+ int enable;
+
+ enable = r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5));
+
+ if (vdma_debug)
+ printk("vdma_get_enable: channel %d: enable=%d\n", channel,
+ enable);
+
+ return enable;
+}
+
+static void *jazz_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+ struct page *page;
+ void *ret;
+
+ if (attrs & DMA_ATTR_NO_WARN)
+ gfp |= __GFP_NOWARN;
+
+ size = PAGE_ALIGN(size);
+ page = alloc_pages(gfp, get_order(size));
+ if (!page)
+ return NULL;
+ ret = page_address(page);
+ memset(ret, 0, size);
+ *dma_handle = vdma_alloc(virt_to_phys(ret), size);
+ if (*dma_handle == DMA_MAPPING_ERROR)
+ goto out_free_pages;
+ arch_dma_prep_coherent(page, size);
+ return (void *)(UNCAC_BASE + __pa(ret));
+
+out_free_pages:
+ __free_pages(page, get_order(size));
+ return NULL;
+}
+
+static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ vdma_free(dma_handle);
+ __free_pages(virt_to_page(vaddr), get_order(size));
+}
+
+static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ phys_addr_t phys = page_to_phys(page) + offset;
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(phys, size, dir);
+ return vdma_alloc(phys, size);
+}
+
+static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_cpu(vdma_log2phys(dma_addr), size, dir);
+ vdma_free(dma_addr);
+}
+
+static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nents, i) {
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(sg_phys(sg), sg->length,
+ dir);
+ sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
+ if (sg->dma_address == DMA_MAPPING_ERROR)
+ return 0;
+ sg_dma_len(sg) = sg->length;
+ }
+
+ return nents;
+}
+
+static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nents, i) {
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
+ vdma_free(sg->dma_address);
+ }
+}
+
+static void jazz_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+ arch_sync_dma_for_device(vdma_log2phys(addr), size, dir);
+}
+
+static void jazz_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+ arch_sync_dma_for_cpu(vdma_log2phys(addr), size, dir);
+}
+
+static void jazz_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
+}
+
+static void jazz_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
+}
+
+const struct dma_map_ops jazz_dma_ops = {
+ .alloc = jazz_dma_alloc,
+ .free = jazz_dma_free,
+ .map_page = jazz_dma_map_page,
+ .unmap_page = jazz_dma_unmap_page,
+ .map_sg = jazz_dma_map_sg,
+ .unmap_sg = jazz_dma_unmap_sg,
+ .sync_single_for_cpu = jazz_dma_sync_single_for_cpu,
+ .sync_single_for_device = jazz_dma_sync_single_for_device,
+ .sync_sg_for_cpu = jazz_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = jazz_dma_sync_sg_for_device,
+ .mmap = dma_common_mmap,
+ .get_sgtable = dma_common_get_sgtable,
+ .alloc_pages = dma_common_alloc_pages,
+ .free_pages = dma_common_free_pages,
+};
+EXPORT_SYMBOL(jazz_dma_ops);
diff --git a/arch/mips/jazz/reset.c b/arch/mips/jazz/reset.c
new file mode 100644
index 000000000..052b01f5d
--- /dev/null
+++ b/arch/mips/jazz/reset.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Reset a Jazz machine.
+ *
+ * We don't trust the firmware so we do it the classic way by poking and
+ * stabbing at the keyboard controller ...
+ */
+#include <linux/jiffies.h>
+#include <asm/jazz.h>
+
+#define KBD_STAT_IBF 0x02 /* Keyboard input buffer full */
+
+static void jazz_write_output(unsigned char val)
+{
+ int status;
+
+ do {
+ status = jazz_kh->command;
+ } while (status & KBD_STAT_IBF);
+ jazz_kh->data = val;
+}
+
+static void jazz_write_command(unsigned char val)
+{
+ int status;
+
+ do {
+ status = jazz_kh->command;
+ } while (status & KBD_STAT_IBF);
+ jazz_kh->command = val;
+}
+
+static unsigned char jazz_read_status(void)
+{
+ return jazz_kh->command;
+}
+
+static inline void kb_wait(void)
+{
+ unsigned long start = jiffies;
+ unsigned long timeout = start + HZ/2;
+
+ do {
+ if (! (jazz_read_status() & 0x02))
+ return;
+ } while (time_before_eq(jiffies, timeout));
+}
+
+void jazz_machine_restart(char *command)
+{
+ while(1) {
+ kb_wait();
+ jazz_write_command(0xd1);
+ kb_wait();
+ jazz_write_output(0x00);
+ }
+}
diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c
new file mode 100644
index 000000000..04aab419a
--- /dev/null
+++ b/arch/mips/jazz/setup.c
@@ -0,0 +1,212 @@
+/*
+ * Setup pointers to hardware-dependent routines.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1997, 1998, 2001, 07, 08 by Ralf Baechle
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ * Copyright (C) 2007 by Thomas Bogendoerfer
+ */
+#include <linux/eisa.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/console.h>
+#include <linux/screen_info.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+#include <linux/dma-mapping.h>
+#include <linux/pgtable.h>
+
+#include <asm/jazz.h>
+#include <asm/jazzdma.h>
+#include <asm/reboot.h>
+#include <asm/tlbmisc.h>
+
+extern asmlinkage void jazz_handle_int(void);
+
+extern void jazz_machine_restart(char *command);
+
+static struct resource jazz_io_resources[] = {
+ {
+ .start = 0x00,
+ .end = 0x1f,
+ .name = "dma1",
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+ }, {
+ .start = 0x40,
+ .end = 0x5f,
+ .name = "timer",
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+ }, {
+ .start = 0x80,
+ .end = 0x8f,
+ .name = "dma page reg",
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+ }, {
+ .start = 0xc0,
+ .end = 0xdf,
+ .name = "dma2",
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+ }
+};
+
+void __init plat_mem_setup(void)
+{
+ int i;
+
+ /* Map 0xe0000000 -> 0x0:800005C0, 0xe0010000 -> 0x1:30000580 */
+ add_wired_entry(0x02000017, 0x03c00017, 0xe0000000, PM_64K);
+ /* Map 0xe2000000 -> 0x0:900005C0, 0xe3010000 -> 0x0:910005C0 */
+ add_wired_entry(0x02400017, 0x02440017, 0xe2000000, PM_16M);
+ /* Map 0xe4000000 -> 0x0:600005C0, 0xe4100000 -> 400005C0 */
+ add_wired_entry(0x01800017, 0x01000017, 0xe4000000, PM_4M);
+
+ set_io_port_base(JAZZ_PORT_BASE);
+#ifdef CONFIG_EISA
+ EISA_bus = 1;
+#endif
+
+ /* request I/O space for devices used on all i[345]86 PCs */
+ for (i = 0; i < ARRAY_SIZE(jazz_io_resources); i++)
+ request_resource(&ioport_resource, jazz_io_resources + i);
+
+ /* The RTC is outside the port address space */
+
+ _machine_restart = jazz_machine_restart;
+
+#ifdef CONFIG_VT
+ screen_info = (struct screen_info) {
+ .orig_video_cols = 160,
+ .orig_video_lines = 64,
+ .orig_video_points = 16,
+ };
+#endif
+
+ add_preferred_console("ttyS", 0, "9600");
+}
+
+#ifdef CONFIG_OLIVETTI_M700
+#define UART_CLK 1843200
+#else
+/* Some Jazz machines seem to have an 8MHz crystal clock but I don't know
+ exactly which ones ... XXX */
+#define UART_CLK (8000000 / 16) /* ( 3072000 / 16) */
+#endif
+
+#define MEMPORT(_base, _irq) \
+ { \
+ .mapbase = (_base), \
+ .membase = (void *)(_base), \
+ .irq = (_irq), \
+ .uartclk = UART_CLK, \
+ .iotype = UPIO_MEM, \
+ .flags = UPF_BOOT_AUTOCONF, \
+ }
+
+static struct plat_serial8250_port jazz_serial_data[] = {
+ MEMPORT(JAZZ_SERIAL1_BASE, JAZZ_SERIAL1_IRQ),
+ MEMPORT(JAZZ_SERIAL2_BASE, JAZZ_SERIAL2_IRQ),
+ { },
+};
+
+static struct platform_device jazz_serial8250_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = jazz_serial_data,
+ },
+};
+
+static struct resource jazz_esp_rsrc[] = {
+ {
+ .start = JAZZ_SCSI_BASE,
+ .end = JAZZ_SCSI_BASE + 31,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .start = JAZZ_SCSI_DMA,
+ .end = JAZZ_SCSI_DMA,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .start = JAZZ_SCSI_IRQ,
+ .end = JAZZ_SCSI_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static u64 jazz_esp_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device jazz_esp_pdev = {
+ .name = "jazz_esp",
+ .num_resources = ARRAY_SIZE(jazz_esp_rsrc),
+ .resource = jazz_esp_rsrc,
+ .dev = {
+ .dma_mask = &jazz_esp_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
+};
+
+static struct resource jazz_sonic_rsrc[] = {
+ {
+ .start = JAZZ_ETHERNET_BASE,
+ .end = JAZZ_ETHERNET_BASE + 0xff,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .start = JAZZ_ETHERNET_IRQ,
+ .end = JAZZ_ETHERNET_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static u64 jazz_sonic_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device jazz_sonic_pdev = {
+ .name = "jazzsonic",
+ .num_resources = ARRAY_SIZE(jazz_sonic_rsrc),
+ .resource = jazz_sonic_rsrc,
+ .dev = {
+ .dma_mask = &jazz_sonic_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
+};
+
+static struct resource jazz_cmos_rsrc[] = {
+ {
+ .start = 0x70,
+ .end = 0x71,
+ .flags = IORESOURCE_IO
+ },
+ {
+ .start = 8,
+ .end = 8,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct platform_device jazz_cmos_pdev = {
+ .name = "rtc_cmos",
+ .num_resources = ARRAY_SIZE(jazz_cmos_rsrc),
+ .resource = jazz_cmos_rsrc
+};
+
+static struct platform_device pcspeaker_pdev = {
+ .name = "pcspkr",
+ .id = -1,
+};
+
+static int __init jazz_setup_devinit(void)
+{
+ platform_device_register(&jazz_serial8250_device);
+ platform_device_register(&jazz_esp_pdev);
+ platform_device_register(&jazz_sonic_pdev);
+ platform_device_register(&jazz_cmos_pdev);
+ platform_device_register(&pcspeaker_pdev);
+
+ return 0;
+}
+
+device_initcall(jazz_setup_devinit);
diff --git a/arch/mips/kernel/.gitignore b/arch/mips/kernel/.gitignore
new file mode 100644
index 000000000..bbb90f92d
--- /dev/null
+++ b/arch/mips/kernel/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vmlinux.lds
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
new file mode 100644
index 000000000..2a05b923f
--- /dev/null
+++ b/arch/mips/kernel/Makefile
@@ -0,0 +1,119 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux/MIPS kernel.
+#
+
+extra-y := head.o vmlinux.lds
+
+obj-y += branch.o cmpxchg.o elf.o entry.o genex.o idle.o irq.o \
+ process.o prom.o ptrace.o reset.o setup.o signal.o \
+ syscall.o time.o topology.o traps.o unaligned.o watch.o \
+ vdso.o cacheinfo.o
+
+ifdef CONFIG_CPU_R3K_TLB
+obj-y += cpu-r3k-probe.o
+else
+obj-y += cpu-probe.o
+endif
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_early_printk.o = -pg
+CFLAGS_REMOVE_perf_event.o = -pg
+CFLAGS_REMOVE_perf_event_mipsxx.o = -pg
+endif
+
+obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
+obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
+obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
+obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
+obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
+obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
+obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
+obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
+obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
+obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
+obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
+
+obj-$(CONFIG_DEBUG_FS) += segment.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_MODULES) += module.o
+
+obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
+obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
+
+sw-y := r4k_switch.o
+sw-$(CONFIG_CPU_R3000) := r2300_switch.o
+sw-$(CONFIG_CPU_TX39XX) := r2300_switch.o
+sw-$(CONFIG_CPU_CAVIUM_OCTEON) := octeon_switch.o
+obj-y += $(sw-y)
+
+obj-$(CONFIG_MIPS_FP_SUPPORT) += fpu-probe.o
+obj-$(CONFIG_CPU_R2300_FPU) += r2300_fpu.o
+obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o
+
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_SMP_UP) += smp-up.o
+obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o bmips_5xxx_init.o
+
+obj-$(CONFIG_MIPS_MT) += mips-mt.o
+obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
+obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
+obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
+obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
+obj-$(CONFIG_MIPS_CPS_NS16550) += cps-vec-ns16550.o
+obj-$(CONFIG_MIPS_SPRAM) += spram.o
+
+obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
+obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o
+obj-$(CONFIG_MIPS_VPE_LOADER_MT) += vpe-mt.o
+obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
+obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o
+obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o
+
+obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
+obj-$(CONFIG_MIPS_MSC) += irq-msc01.o
+obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
+obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
+
+obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_32BIT) += scall32-o32.o
+obj-$(CONFIG_64BIT) += scall64-n64.o
+obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o
+obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o
+obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o signal_o32.o
+
+obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
+
+obj-$(CONFIG_CPU_R4X00_BUGS64) += r4k-bugs64.o
+
+obj-$(CONFIG_I8253) += i8253.o
+
+obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
+
+obj-$(CONFIG_RELOCATABLE) += relocate.o
+
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o
+obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
+obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR) += mips-r2-to-r6-emul.o
+
+CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
+
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
+
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+obj-$(CONFIG_UPROBES) += uprobes.o
+
+obj-$(CONFIG_MIPS_CM) += mips-cm.o
+obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
+
+obj-$(CONFIG_CPU_PM) += pm.o
+obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o
+
+CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
new file mode 100644
index 000000000..aebfda811
--- /dev/null
+++ b/arch/mips/kernel/asm-offsets.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * asm-offsets.c: Calculate pt_regs and task_struct offsets.
+ *
+ * Copyright (C) 1996 David S. Miller
+ * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ *
+ * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#include <linux/compat.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kbuild.h>
+#include <linux/suspend.h>
+#include <asm/cpu-info.h>
+#include <asm/pm.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/smp-cps.h>
+
+#include <linux/kvm_host.h>
+
+void output_ptreg_defines(void)
+{
+ COMMENT("MIPS pt_regs offsets.");
+ OFFSET(PT_R0, pt_regs, regs[0]);
+ OFFSET(PT_R1, pt_regs, regs[1]);
+ OFFSET(PT_R2, pt_regs, regs[2]);
+ OFFSET(PT_R3, pt_regs, regs[3]);
+ OFFSET(PT_R4, pt_regs, regs[4]);
+ OFFSET(PT_R5, pt_regs, regs[5]);
+ OFFSET(PT_R6, pt_regs, regs[6]);
+ OFFSET(PT_R7, pt_regs, regs[7]);
+ OFFSET(PT_R8, pt_regs, regs[8]);
+ OFFSET(PT_R9, pt_regs, regs[9]);
+ OFFSET(PT_R10, pt_regs, regs[10]);
+ OFFSET(PT_R11, pt_regs, regs[11]);
+ OFFSET(PT_R12, pt_regs, regs[12]);
+ OFFSET(PT_R13, pt_regs, regs[13]);
+ OFFSET(PT_R14, pt_regs, regs[14]);
+ OFFSET(PT_R15, pt_regs, regs[15]);
+ OFFSET(PT_R16, pt_regs, regs[16]);
+ OFFSET(PT_R17, pt_regs, regs[17]);
+ OFFSET(PT_R18, pt_regs, regs[18]);
+ OFFSET(PT_R19, pt_regs, regs[19]);
+ OFFSET(PT_R20, pt_regs, regs[20]);
+ OFFSET(PT_R21, pt_regs, regs[21]);
+ OFFSET(PT_R22, pt_regs, regs[22]);
+ OFFSET(PT_R23, pt_regs, regs[23]);
+ OFFSET(PT_R24, pt_regs, regs[24]);
+ OFFSET(PT_R25, pt_regs, regs[25]);
+ OFFSET(PT_R26, pt_regs, regs[26]);
+ OFFSET(PT_R27, pt_regs, regs[27]);
+ OFFSET(PT_R28, pt_regs, regs[28]);
+ OFFSET(PT_R29, pt_regs, regs[29]);
+ OFFSET(PT_R30, pt_regs, regs[30]);
+ OFFSET(PT_R31, pt_regs, regs[31]);
+ OFFSET(PT_LO, pt_regs, lo);
+ OFFSET(PT_HI, pt_regs, hi);
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ OFFSET(PT_ACX, pt_regs, acx);
+#endif
+ OFFSET(PT_EPC, pt_regs, cp0_epc);
+ OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr);
+ OFFSET(PT_STATUS, pt_regs, cp0_status);
+ OFFSET(PT_CAUSE, pt_regs, cp0_cause);
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ OFFSET(PT_MPL, pt_regs, mpl);
+ OFFSET(PT_MTP, pt_regs, mtp);
+#endif /* CONFIG_CPU_CAVIUM_OCTEON */
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ BLANK();
+}
+
+void output_task_defines(void)
+{
+ COMMENT("MIPS task_struct offsets.");
+ OFFSET(TASK_STATE, task_struct, state);
+ OFFSET(TASK_THREAD_INFO, task_struct, stack);
+ OFFSET(TASK_FLAGS, task_struct, flags);
+ OFFSET(TASK_MM, task_struct, mm);
+ OFFSET(TASK_PID, task_struct, pid);
+#if defined(CONFIG_STACKPROTECTOR)
+ OFFSET(TASK_STACK_CANARY, task_struct, stack_canary);
+#endif
+ DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
+ BLANK();
+}
+
+void output_thread_info_defines(void)
+{
+ COMMENT("MIPS thread_info offsets.");
+ OFFSET(TI_TASK, thread_info, task);
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_TP_VALUE, thread_info, tp_value);
+ OFFSET(TI_CPU, thread_info, cpu);
+ OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+ OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
+ OFFSET(TI_REGS, thread_info, regs);
+ DEFINE(_THREAD_SIZE, THREAD_SIZE);
+ DEFINE(_THREAD_MASK, THREAD_MASK);
+ DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
+ DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
+ BLANK();
+}
+
+void output_thread_defines(void)
+{
+ COMMENT("MIPS specific thread_struct offsets.");
+ OFFSET(THREAD_REG16, task_struct, thread.reg16);
+ OFFSET(THREAD_REG17, task_struct, thread.reg17);
+ OFFSET(THREAD_REG18, task_struct, thread.reg18);
+ OFFSET(THREAD_REG19, task_struct, thread.reg19);
+ OFFSET(THREAD_REG20, task_struct, thread.reg20);
+ OFFSET(THREAD_REG21, task_struct, thread.reg21);
+ OFFSET(THREAD_REG22, task_struct, thread.reg22);
+ OFFSET(THREAD_REG23, task_struct, thread.reg23);
+ OFFSET(THREAD_REG29, task_struct, thread.reg29);
+ OFFSET(THREAD_REG30, task_struct, thread.reg30);
+ OFFSET(THREAD_REG31, task_struct, thread.reg31);
+ OFFSET(THREAD_STATUS, task_struct,
+ thread.cp0_status);
+
+ OFFSET(THREAD_BVADDR, task_struct, \
+ thread.cp0_badvaddr);
+ OFFSET(THREAD_BUADDR, task_struct, \
+ thread.cp0_baduaddr);
+ OFFSET(THREAD_ECODE, task_struct, \
+ thread.error_code);
+ OFFSET(THREAD_TRAPNO, task_struct, thread.trap_nr);
+ BLANK();
+}
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+void output_thread_fpu_defines(void)
+{
+ OFFSET(THREAD_FPU, task_struct, thread.fpu);
+
+ OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]);
+ OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]);
+ OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]);
+ OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]);
+ OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]);
+ OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]);
+ OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]);
+ OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]);
+ OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]);
+ OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]);
+ OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]);
+ OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]);
+ OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]);
+ OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]);
+ OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]);
+ OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]);
+ OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]);
+ OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]);
+ OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]);
+ OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]);
+ OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]);
+ OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]);
+ OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]);
+ OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]);
+ OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]);
+ OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]);
+ OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]);
+ OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]);
+ OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]);
+ OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]);
+ OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
+ OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
+
+ OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
+ OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr);
+ BLANK();
+}
+#endif
+
+void output_mm_defines(void)
+{
+ COMMENT("Size of struct page");
+ DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page));
+ BLANK();
+ COMMENT("Linux mm_struct offsets.");
+ OFFSET(MM_USERS, mm_struct, mm_users);
+ OFFSET(MM_PGD, mm_struct, pgd);
+ OFFSET(MM_CONTEXT, mm_struct, context);
+ BLANK();
+ DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
+ DEFINE(_PMD_T_SIZE, sizeof(pmd_t));
+ DEFINE(_PTE_T_SIZE, sizeof(pte_t));
+ BLANK();
+ DEFINE(_PGD_T_LOG2, PGD_T_LOG2);
+#ifndef __PAGETABLE_PMD_FOLDED
+ DEFINE(_PMD_T_LOG2, PMD_T_LOG2);
+#endif
+ DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
+ BLANK();
+ DEFINE(_PGD_ORDER, PGD_ORDER);
+#ifndef __PAGETABLE_PMD_FOLDED
+ DEFINE(_PMD_ORDER, PMD_ORDER);
+#endif
+ DEFINE(_PTE_ORDER, PTE_ORDER);
+ BLANK();
+ DEFINE(_PMD_SHIFT, PMD_SHIFT);
+ DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
+ BLANK();
+ DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD);
+ DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD);
+ DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
+ BLANK();
+ DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
+ DEFINE(_PAGE_SIZE, PAGE_SIZE);
+ BLANK();
+}
+
+#ifdef CONFIG_32BIT
+void output_sc_defines(void)
+{
+ COMMENT("Linux sigcontext offsets.");
+ OFFSET(SC_REGS, sigcontext, sc_regs);
+ OFFSET(SC_FPREGS, sigcontext, sc_fpregs);
+ OFFSET(SC_ACX, sigcontext, sc_acx);
+ OFFSET(SC_MDHI, sigcontext, sc_mdhi);
+ OFFSET(SC_MDLO, sigcontext, sc_mdlo);
+ OFFSET(SC_PC, sigcontext, sc_pc);
+ OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
+ OFFSET(SC_FPC_EIR, sigcontext, sc_fpc_eir);
+ OFFSET(SC_HI1, sigcontext, sc_hi1);
+ OFFSET(SC_LO1, sigcontext, sc_lo1);
+ OFFSET(SC_HI2, sigcontext, sc_hi2);
+ OFFSET(SC_LO2, sigcontext, sc_lo2);
+ OFFSET(SC_HI3, sigcontext, sc_hi3);
+ OFFSET(SC_LO3, sigcontext, sc_lo3);
+ BLANK();
+}
+#endif
+
+#ifdef CONFIG_64BIT
+void output_sc_defines(void)
+{
+ COMMENT("Linux sigcontext offsets.");
+ OFFSET(SC_REGS, sigcontext, sc_regs);
+ OFFSET(SC_FPREGS, sigcontext, sc_fpregs);
+ OFFSET(SC_MDHI, sigcontext, sc_mdhi);
+ OFFSET(SC_MDLO, sigcontext, sc_mdlo);
+ OFFSET(SC_PC, sigcontext, sc_pc);
+ OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
+ BLANK();
+}
+#endif
+
+void output_signal_defined(void)
+{
+ COMMENT("Linux signal numbers.");
+ DEFINE(_SIGHUP, SIGHUP);
+ DEFINE(_SIGINT, SIGINT);
+ DEFINE(_SIGQUIT, SIGQUIT);
+ DEFINE(_SIGILL, SIGILL);
+ DEFINE(_SIGTRAP, SIGTRAP);
+ DEFINE(_SIGIOT, SIGIOT);
+ DEFINE(_SIGABRT, SIGABRT);
+ DEFINE(_SIGEMT, SIGEMT);
+ DEFINE(_SIGFPE, SIGFPE);
+ DEFINE(_SIGKILL, SIGKILL);
+ DEFINE(_SIGBUS, SIGBUS);
+ DEFINE(_SIGSEGV, SIGSEGV);
+ DEFINE(_SIGSYS, SIGSYS);
+ DEFINE(_SIGPIPE, SIGPIPE);
+ DEFINE(_SIGALRM, SIGALRM);
+ DEFINE(_SIGTERM, SIGTERM);
+ DEFINE(_SIGUSR1, SIGUSR1);
+ DEFINE(_SIGUSR2, SIGUSR2);
+ DEFINE(_SIGCHLD, SIGCHLD);
+ DEFINE(_SIGPWR, SIGPWR);
+ DEFINE(_SIGWINCH, SIGWINCH);
+ DEFINE(_SIGURG, SIGURG);
+ DEFINE(_SIGIO, SIGIO);
+ DEFINE(_SIGSTOP, SIGSTOP);
+ DEFINE(_SIGTSTP, SIGTSTP);
+ DEFINE(_SIGCONT, SIGCONT);
+ DEFINE(_SIGTTIN, SIGTTIN);
+ DEFINE(_SIGTTOU, SIGTTOU);
+ DEFINE(_SIGVTALRM, SIGVTALRM);
+ DEFINE(_SIGPROF, SIGPROF);
+ DEFINE(_SIGXCPU, SIGXCPU);
+ DEFINE(_SIGXFSZ, SIGXFSZ);
+ BLANK();
+}
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+void output_octeon_cop2_state_defines(void)
+{
+ COMMENT("Octeon specific octeon_cop2_state offsets.");
+ OFFSET(OCTEON_CP2_CRC_IV, octeon_cop2_state, cop2_crc_iv);
+ OFFSET(OCTEON_CP2_CRC_LENGTH, octeon_cop2_state, cop2_crc_length);
+ OFFSET(OCTEON_CP2_CRC_POLY, octeon_cop2_state, cop2_crc_poly);
+ OFFSET(OCTEON_CP2_LLM_DAT, octeon_cop2_state, cop2_llm_dat);
+ OFFSET(OCTEON_CP2_3DES_IV, octeon_cop2_state, cop2_3des_iv);
+ OFFSET(OCTEON_CP2_3DES_KEY, octeon_cop2_state, cop2_3des_key);
+ OFFSET(OCTEON_CP2_3DES_RESULT, octeon_cop2_state, cop2_3des_result);
+ OFFSET(OCTEON_CP2_AES_INP0, octeon_cop2_state, cop2_aes_inp0);
+ OFFSET(OCTEON_CP2_AES_IV, octeon_cop2_state, cop2_aes_iv);
+ OFFSET(OCTEON_CP2_AES_KEY, octeon_cop2_state, cop2_aes_key);
+ OFFSET(OCTEON_CP2_AES_KEYLEN, octeon_cop2_state, cop2_aes_keylen);
+ OFFSET(OCTEON_CP2_AES_RESULT, octeon_cop2_state, cop2_aes_result);
+ OFFSET(OCTEON_CP2_GFM_MULT, octeon_cop2_state, cop2_gfm_mult);
+ OFFSET(OCTEON_CP2_GFM_POLY, octeon_cop2_state, cop2_gfm_poly);
+ OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result);
+ OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw);
+ OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw);
+ OFFSET(OCTEON_CP2_SHA3, octeon_cop2_state, cop2_sha3);
+ OFFSET(THREAD_CP2, task_struct, thread.cp2);
+ OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg);
+ BLANK();
+}
+#endif
+
+#ifdef CONFIG_HIBERNATION
+void output_pbe_defines(void)
+{
+ COMMENT(" Linux struct pbe offsets. ");
+ OFFSET(PBE_ADDRESS, pbe, address);
+ OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address);
+ OFFSET(PBE_NEXT, pbe, next);
+ DEFINE(PBE_SIZE, sizeof(struct pbe));
+ BLANK();
+}
+#endif
+
+#ifdef CONFIG_CPU_PM
+void output_pm_defines(void)
+{
+ COMMENT(" PM offsets. ");
+#ifdef CONFIG_EVA
+ OFFSET(SSS_SEGCTL0, mips_static_suspend_state, segctl[0]);
+ OFFSET(SSS_SEGCTL1, mips_static_suspend_state, segctl[1]);
+ OFFSET(SSS_SEGCTL2, mips_static_suspend_state, segctl[2]);
+#endif
+ OFFSET(SSS_SP, mips_static_suspend_state, sp);
+ BLANK();
+}
+#endif
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+void output_kvm_defines(void)
+{
+ COMMENT(" KVM/MIPS Specific offsets. ");
+
+ OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]);
+ OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]);
+ OFFSET(VCPU_FPR2, kvm_vcpu_arch, fpu.fpr[2]);
+ OFFSET(VCPU_FPR3, kvm_vcpu_arch, fpu.fpr[3]);
+ OFFSET(VCPU_FPR4, kvm_vcpu_arch, fpu.fpr[4]);
+ OFFSET(VCPU_FPR5, kvm_vcpu_arch, fpu.fpr[5]);
+ OFFSET(VCPU_FPR6, kvm_vcpu_arch, fpu.fpr[6]);
+ OFFSET(VCPU_FPR7, kvm_vcpu_arch, fpu.fpr[7]);
+ OFFSET(VCPU_FPR8, kvm_vcpu_arch, fpu.fpr[8]);
+ OFFSET(VCPU_FPR9, kvm_vcpu_arch, fpu.fpr[9]);
+ OFFSET(VCPU_FPR10, kvm_vcpu_arch, fpu.fpr[10]);
+ OFFSET(VCPU_FPR11, kvm_vcpu_arch, fpu.fpr[11]);
+ OFFSET(VCPU_FPR12, kvm_vcpu_arch, fpu.fpr[12]);
+ OFFSET(VCPU_FPR13, kvm_vcpu_arch, fpu.fpr[13]);
+ OFFSET(VCPU_FPR14, kvm_vcpu_arch, fpu.fpr[14]);
+ OFFSET(VCPU_FPR15, kvm_vcpu_arch, fpu.fpr[15]);
+ OFFSET(VCPU_FPR16, kvm_vcpu_arch, fpu.fpr[16]);
+ OFFSET(VCPU_FPR17, kvm_vcpu_arch, fpu.fpr[17]);
+ OFFSET(VCPU_FPR18, kvm_vcpu_arch, fpu.fpr[18]);
+ OFFSET(VCPU_FPR19, kvm_vcpu_arch, fpu.fpr[19]);
+ OFFSET(VCPU_FPR20, kvm_vcpu_arch, fpu.fpr[20]);
+ OFFSET(VCPU_FPR21, kvm_vcpu_arch, fpu.fpr[21]);
+ OFFSET(VCPU_FPR22, kvm_vcpu_arch, fpu.fpr[22]);
+ OFFSET(VCPU_FPR23, kvm_vcpu_arch, fpu.fpr[23]);
+ OFFSET(VCPU_FPR24, kvm_vcpu_arch, fpu.fpr[24]);
+ OFFSET(VCPU_FPR25, kvm_vcpu_arch, fpu.fpr[25]);
+ OFFSET(VCPU_FPR26, kvm_vcpu_arch, fpu.fpr[26]);
+ OFFSET(VCPU_FPR27, kvm_vcpu_arch, fpu.fpr[27]);
+ OFFSET(VCPU_FPR28, kvm_vcpu_arch, fpu.fpr[28]);
+ OFFSET(VCPU_FPR29, kvm_vcpu_arch, fpu.fpr[29]);
+ OFFSET(VCPU_FPR30, kvm_vcpu_arch, fpu.fpr[30]);
+ OFFSET(VCPU_FPR31, kvm_vcpu_arch, fpu.fpr[31]);
+
+ OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31);
+ OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr);
+ BLANK();
+}
+#endif
+
+#ifdef CONFIG_MIPS_CPS
+void output_cps_defines(void)
+{
+ COMMENT(" MIPS CPS offsets. ");
+
+ OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask);
+ OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config);
+ DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config));
+
+ OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc);
+ OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp);
+ OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp);
+ DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config));
+}
+#endif
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
new file mode 100644
index 000000000..c4441416e
--- /dev/null
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for n32 Linux/MIPS ELF binaries.
+ * Author: Ralf Baechle (ralf@linux-mips.org)
+ *
+ * Copyright (C) 1999, 2001 Ralf Baechle
+ * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
+ *
+ * Heavily inspired by the 32-bit Sparc compat code which is
+ * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#define ELF_ARCH EM_MIPS
+#define ELF_CLASS ELFCLASS32
+#ifdef __MIPSEB__
+#define ELF_DATA ELFDATA2MSB;
+#else /* __MIPSEL__ */
+#define ELF_DATA ELFDATA2LSB;
+#endif
+
+/* ELF register definitions */
+#define ELF_NGREG 45
+#define ELF_NFPREG 33
+
+typedef unsigned long elf_greg_t;
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef double elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch elfn32_check_arch
+
+#define TASK32_SIZE 0x7fff8000UL
+#undef ELF_ET_DYN_BASE
+#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
+#include <asm/processor.h>
+#include <linux/elfcore.h>
+#include <linux/compat.h>
+#include <linux/math64.h>
+
+#define elf_prstatus elf_prstatus32
+struct elf_prstatus32
+{
+ struct elf_siginfo pr_info; /* Info associated with signal */
+ short pr_cursig; /* Current signal */
+ unsigned int pr_sigpend; /* Set of pending signals */
+ unsigned int pr_sighold; /* Set of held signals */
+ pid_t pr_pid;
+ pid_t pr_ppid;
+ pid_t pr_pgrp;
+ pid_t pr_sid;
+ struct old_timeval32 pr_utime; /* User time */
+ struct old_timeval32 pr_stime; /* System time */
+ struct old_timeval32 pr_cutime;/* Cumulative user time */
+ struct old_timeval32 pr_cstime;/* Cumulative system time */
+ elf_gregset_t pr_reg; /* GP registers */
+ int pr_fpvalid; /* True if math co-processor being used. */
+};
+
+#define elf_prpsinfo elf_prpsinfo32
+struct elf_prpsinfo32
+{
+ char pr_state; /* numeric process state */
+ char pr_sname; /* char for pr_state */
+ char pr_zomb; /* zombie */
+ char pr_nice; /* nice val */
+ unsigned int pr_flag; /* flags */
+ __kernel_uid_t pr_uid;
+ __kernel_gid_t pr_gid;
+ pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
+ /* Lots missing */
+ char pr_fname[16]; /* filename of executable */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+};
+
+#define elf_caddr_t u32
+#define init_elf_binfmt init_elfn32_binfmt
+
+#define jiffies_to_timeval jiffies_to_old_timeval32
+static __inline__ void
+jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
+{
+ /*
+ * Convert jiffies to nanoseconds and separate with
+ * one divide.
+ */
+ u64 nsec = (u64)jiffies * TICK_NSEC;
+ u32 rem;
+ value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
+ value->tv_usec = rem / NSEC_PER_USEC;
+}
+
+#define ELF_CORE_EFLAGS EF_MIPS_ABI2
+
+#undef TASK_SIZE
+#define TASK_SIZE TASK_SIZE32
+
+#undef ns_to_kernel_old_timeval
+#define ns_to_kernel_old_timeval ns_to_old_timeval32
+
+/*
+ * Some data types as stored in coredump.
+ */
+#define user_long_t compat_long_t
+#define user_siginfo_t compat_siginfo_t
+#define copy_siginfo_to_external copy_siginfo_to_external32
+
+#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
new file mode 100644
index 000000000..7b2a23f48
--- /dev/null
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for o32 Linux/MIPS ELF binaries.
+ * Author: Ralf Baechle (ralf@linux-mips.org)
+ *
+ * Copyright (C) 1999, 2001 Ralf Baechle
+ * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
+ *
+ * Heavily inspired by the 32-bit Sparc compat code which is
+ * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#define ELF_ARCH EM_MIPS
+#define ELF_CLASS ELFCLASS32
+#ifdef __MIPSEB__
+#define ELF_DATA ELFDATA2MSB;
+#else /* __MIPSEL__ */
+#define ELF_DATA ELFDATA2LSB;
+#endif
+
+/* ELF register definitions */
+#define ELF_NGREG 45
+#define ELF_NFPREG 33
+
+typedef unsigned int elf_greg_t;
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef double elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch elfo32_check_arch
+
+#ifdef CONFIG_KVM_GUEST
+#define TASK32_SIZE 0x3fff8000UL
+#else
+#define TASK32_SIZE 0x7fff8000UL
+#endif
+#undef ELF_ET_DYN_BASE
+#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
+#include <asm/processor.h>
+
+#include <linux/elfcore.h>
+#include <linux/compat.h>
+#include <linux/math64.h>
+
+#define elf_prstatus elf_prstatus32
+struct elf_prstatus32
+{
+ struct elf_siginfo pr_info; /* Info associated with signal */
+ short pr_cursig; /* Current signal */
+ unsigned int pr_sigpend; /* Set of pending signals */
+ unsigned int pr_sighold; /* Set of held signals */
+ pid_t pr_pid;
+ pid_t pr_ppid;
+ pid_t pr_pgrp;
+ pid_t pr_sid;
+ struct old_timeval32 pr_utime; /* User time */
+ struct old_timeval32 pr_stime; /* System time */
+ struct old_timeval32 pr_cutime;/* Cumulative user time */
+ struct old_timeval32 pr_cstime;/* Cumulative system time */
+ elf_gregset_t pr_reg; /* GP registers */
+ int pr_fpvalid; /* True if math co-processor being used. */
+};
+
+#define elf_prpsinfo elf_prpsinfo32
+struct elf_prpsinfo32
+{
+ char pr_state; /* numeric process state */
+ char pr_sname; /* char for pr_state */
+ char pr_zomb; /* zombie */
+ char pr_nice; /* nice val */
+ unsigned int pr_flag; /* flags */
+ __kernel_uid_t pr_uid;
+ __kernel_gid_t pr_gid;
+ pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
+ /* Lots missing */
+ char pr_fname[16]; /* filename of executable */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+};
+
+#define elf_caddr_t u32
+#define init_elf_binfmt init_elf32_binfmt
+
+#define jiffies_to_timeval jiffies_to_old_timeval32
+static inline void
+jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
+{
+ /*
+ * Convert jiffies to nanoseconds and separate with
+ * one divide.
+ */
+ u64 nsec = (u64)jiffies * TICK_NSEC;
+ u32 rem;
+ value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
+ value->tv_usec = rem / NSEC_PER_USEC;
+}
+
+#undef TASK_SIZE
+#define TASK_SIZE TASK_SIZE32
+
+#undef ns_to_kernel_old_timeval
+#define ns_to_kernel_old_timeval ns_to_old_timeval32
+
+/*
+ * Some data types as stored in coredump.
+ */
+#define user_long_t compat_long_t
+#define user_siginfo_t compat_siginfo_t
+#define copy_siginfo_to_external copy_siginfo_to_external32
+
+#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/bmips_5xxx_init.S b/arch/mips/kernel/bmips_5xxx_init.S
new file mode 100644
index 000000000..9e422d186
--- /dev/null
+++ b/arch/mips/kernel/bmips_5xxx_init.S
@@ -0,0 +1,747 @@
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011-2012 by Broadcom Corporation
+ *
+ * Init for bmips 5000.
+ * Used to init second core in dual core 5000's.
+ */
+
+#include <linux/init.h>
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheops.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/addrspace.h>
+#include <asm/hazards.h>
+#include <asm/bmips.h>
+
+#ifdef CONFIG_CPU_BMIPS5000
+
+
+#define cacheop(kva, size, linesize, op) \
+ .set noreorder ; \
+ addu t1, kva, size ; \
+ subu t2, linesize, 1 ; \
+ not t2 ; \
+ and t0, kva, t2 ; \
+ addiu t1, t1, -1 ; \
+ and t1, t2 ; \
+9: cache op, 0(t0) ; \
+ bne t0, t1, 9b ; \
+ addu t0, linesize ; \
+ .set reorder ;
+
+
+
+#define IS_SHIFT 22
+#define IL_SHIFT 19
+#define IA_SHIFT 16
+#define DS_SHIFT 13
+#define DL_SHIFT 10
+#define DA_SHIFT 7
+#define IS_MASK 7
+#define IL_MASK 7
+#define IA_MASK 7
+#define DS_MASK 7
+#define DL_MASK 7
+#define DA_MASK 7
+#define ICE_MASK 0x80000000
+#define DCE_MASK 0x40000000
+
+#define CP0_BRCM_CONFIG0 $22, 0
+#define CP0_BRCM_MODE $22, 1
+#define CP0_CONFIG_K0_MASK 7
+
+#define CP0_ICACHE_TAG_LO $28
+#define CP0_ICACHE_DATA_LO $28, 1
+#define CP0_DCACHE_TAG_LO $28, 2
+#define CP0_D_SEC_CACHE_DATA_LO $28, 3
+#define CP0_ICACHE_TAG_HI $29
+#define CP0_ICACHE_DATA_HI $29, 1
+#define CP0_DCACHE_TAG_HI $29, 2
+
+#define CP0_BRCM_MODE_Luc_MASK (1 << 11)
+#define CP0_BRCM_CONFIG0_CWF_MASK (1 << 20)
+#define CP0_BRCM_CONFIG0_TSE_MASK (1 << 19)
+#define CP0_BRCM_MODE_SET_MASK (1 << 7)
+#define CP0_BRCM_MODE_ClkRATIO_MASK (7 << 4)
+#define CP0_BRCM_MODE_BrPRED_MASK (3 << 24)
+#define CP0_BRCM_MODE_BrPRED_SHIFT 24
+#define CP0_BRCM_MODE_BrHIST_MASK (0x1f << 20)
+#define CP0_BRCM_MODE_BrHIST_SHIFT 20
+
+/* ZSC L2 Cache Register Access Register Definitions */
+#define BRCM_ZSC_ALL_REGS_SELECT 0x7 << 24
+
+#define BRCM_ZSC_CONFIG_REG 0 << 3
+#define BRCM_ZSC_REQ_BUFFER_REG 2 << 3
+#define BRCM_ZSC_RBUS_ADDR_MAPPING_REG0 4 << 3
+#define BRCM_ZSC_RBUS_ADDR_MAPPING_REG1 6 << 3
+#define BRCM_ZSC_RBUS_ADDR_MAPPING_REG2 8 << 3
+
+#define BRCM_ZSC_SCB0_ADDR_MAPPING_REG0 0xa << 3
+#define BRCM_ZSC_SCB0_ADDR_MAPPING_REG1 0xc << 3
+
+#define BRCM_ZSC_SCB1_ADDR_MAPPING_REG0 0xe << 3
+#define BRCM_ZSC_SCB1_ADDR_MAPPING_REG1 0x10 << 3
+
+#define BRCM_ZSC_CONFIG_LMB1En 1 << (15)
+#define BRCM_ZSC_CONFIG_LMB0En 1 << (14)
+
+/* branch predition values */
+
+#define BRCM_BrPRED_ALL_TAKEN (0x0)
+#define BRCM_BrPRED_ALL_NOT_TAKEN (0x1)
+#define BRCM_BrPRED_BHT_ENABLE (0x2)
+#define BRCM_BrPRED_PREDICT_BACKWARD (0x3)
+
+
+
+.align 2
+/*
+ * Function: size_i_cache
+ * Arguments: None
+ * Returns: v0 = i cache size, v1 = I cache line size
+ * Description: compute the I-cache size and I-cache line size
+ * Trashes: v0, v1, a0, t0
+ *
+ * pseudo code:
+ *
+ */
+
+LEAF(size_i_cache)
+ .set noreorder
+
+ mfc0 a0, CP0_CONFIG, 1
+ move t0, a0
+
+ /*
+ * Determine sets per way: IS
+ *
+ * This field contains the number of sets (i.e., indices) per way of
+ * the instruction cache:
+ * i) 0x0: 64, ii) 0x1: 128, iii) 0x2: 256, iv) 0x3: 512, v) 0x4: 1k
+ * vi) 0x5 - 0x7: Reserved.
+ */
+
+ srl a0, a0, IS_SHIFT
+ and a0, a0, IS_MASK
+
+ /* sets per way = (64<<IS) */
+
+ li v0, 0x40
+ sllv v0, v0, a0
+
+ /*
+ * Determine line size
+ *
+ * This field contains the line size of the instruction cache:
+ * i) 0x0: No I-cache present, i) 0x3: 16 bytes, ii) 0x4: 32 bytes, iii)
+ * 0x5: 64 bytes, iv) the rest: Reserved.
+ */
+
+ move a0, t0
+
+ srl a0, a0, IL_SHIFT
+ and a0, a0, IL_MASK
+
+ beqz a0, no_i_cache
+ nop
+
+ /* line size = 2 ^ (IL+1) */
+
+ addi a0, a0, 1
+ li v1, 1
+ sll v1, v1, a0
+
+ /* v0 now have sets per way, multiply it by line size now
+ * that will give the set size
+ */
+
+ sll v0, v0, a0
+
+ /*
+ * Determine set associativity
+ *
+ * This field contains the set associativity of the instruction cache.
+ * i) 0x0: Direct mapped, ii) 0x1: 2-way, iii) 0x2: 3-way, iv) 0x3:
+ * 4-way, v) 0x4 - 0x7: Reserved.
+ */
+
+ move a0, t0
+
+ srl a0, a0, IA_SHIFT
+ and a0, a0, IA_MASK
+ addi a0, a0, 0x1
+
+ /* v0 has the set size, multiply it by
+ * set associativiy, to get the cache size
+ */
+
+ multu v0, a0 /*multu is interlocked, so no need to insert nops */
+ mflo v0
+ b 1f
+ nop
+
+no_i_cache:
+ move v0, zero
+ move v1, zero
+1:
+ jr ra
+ nop
+ .set reorder
+
+END(size_i_cache)
+
+/*
+ * Function: size_d_cache
+ * Arguments: None
+ * Returns: v0 = d cache size, v1 = d cache line size
+ * Description: compute the D-cache size and D-cache line size.
+ * Trashes: v0, v1, a0, t0
+ *
+ */
+
+LEAF(size_d_cache)
+ .set noreorder
+
+ mfc0 a0, CP0_CONFIG, 1
+ move t0, a0
+
+ /*
+ * Determine sets per way: IS
+ *
+ * This field contains the number of sets (i.e., indices) per way of
+ * the instruction cache:
+ * i) 0x0: 64, ii) 0x1: 128, iii) 0x2: 256, iv) 0x3: 512, v) 0x4: 1k
+ * vi) 0x5 - 0x7: Reserved.
+ */
+
+ srl a0, a0, DS_SHIFT
+ and a0, a0, DS_MASK
+
+ /* sets per way = (64<<IS) */
+
+ li v0, 0x40
+ sllv v0, v0, a0
+
+ /*
+ * Determine line size
+ *
+ * This field contains the line size of the instruction cache:
+ * i) 0x0: No I-cache present, i) 0x3: 16 bytes, ii) 0x4: 32 bytes, iii)
+ * 0x5: 64 bytes, iv) the rest: Reserved.
+ */
+ move a0, t0
+
+ srl a0, a0, DL_SHIFT
+ and a0, a0, DL_MASK
+
+ beqz a0, no_d_cache
+ nop
+
+ /* line size = 2 ^ (IL+1) */
+
+ addi a0, a0, 1
+ li v1, 1
+ sll v1, v1, a0
+
+ /* v0 now have sets per way, multiply it by line size now
+ * that will give the set size
+ */
+
+ sll v0, v0, a0
+
+ /* determine set associativity
+ *
+ * This field contains the set associativity of the instruction cache.
+ * i) 0x0: Direct mapped, ii) 0x1: 2-way, iii) 0x2: 3-way, iv) 0x3:
+ * 4-way, v) 0x4 - 0x7: Reserved.
+ */
+
+ move a0, t0
+
+ srl a0, a0, DA_SHIFT
+ and a0, a0, DA_MASK
+ addi a0, a0, 0x1
+
+ /* v0 has the set size, multiply it by
+ * set associativiy, to get the cache size
+ */
+
+ multu v0, a0 /*multu is interlocked, so no need to insert nops */
+ mflo v0
+
+ b 1f
+ nop
+
+no_d_cache:
+ move v0, zero
+ move v1, zero
+1:
+ jr ra
+ nop
+ .set reorder
+
+END(size_d_cache)
+
+
+/*
+ * Function: enable_ID
+ * Arguments: None
+ * Returns: None
+ * Description: Enable I and D caches, initialize I and D-caches, also set
+ * hardware delay for d-cache (TP0).
+ * Trashes: t0
+ *
+ */
+ .global enable_ID
+ .ent enable_ID
+ .set noreorder
+enable_ID:
+ mfc0 t0, CP0_BRCM_CONFIG0
+ or t0, t0, (ICE_MASK | DCE_MASK)
+ mtc0 t0, CP0_BRCM_CONFIG0
+ jr ra
+ nop
+
+ .end enable_ID
+ .set reorder
+
+
+/*
+ * Function: l1_init
+ * Arguments: None
+ * Returns: None
+ * Description: Enable I and D caches, and initialize I and D-caches
+ * Trashes: a0, v0, v1, t0, t1, t2, t8
+ *
+ */
+ .globl l1_init
+ .ent l1_init
+ .set noreorder
+l1_init:
+
+ /* save return address */
+ move t8, ra
+
+
+ /* initialize I and D cache Data and Tag registers. */
+ mtc0 zero, CP0_ICACHE_TAG_LO
+ mtc0 zero, CP0_ICACHE_TAG_HI
+ mtc0 zero, CP0_ICACHE_DATA_LO
+ mtc0 zero, CP0_ICACHE_DATA_HI
+ mtc0 zero, CP0_DCACHE_TAG_LO
+ mtc0 zero, CP0_DCACHE_TAG_HI
+
+ /* Enable Caches before Clearing. If the caches are disabled
+ * then the cache operations to clear the cache will be ignored
+ */
+
+ jal enable_ID
+ nop
+
+ jal size_i_cache /* v0 = i-cache size, v1 = i-cache line size */
+ nop
+
+ /* run uncached in kseg 1 */
+ la k0, 1f
+ lui k1, 0x2000
+ or k0, k1, k0
+ jr k0
+ nop
+1:
+
+ /*
+ * set K0 cache mode
+ */
+
+ mfc0 t0, CP0_CONFIG
+ and t0, t0, ~CP0_CONFIG_K0_MASK
+ or t0, t0, 3 /* Write Back mode */
+ mtc0 t0, CP0_CONFIG
+
+ /*
+ * Initialize instruction cache.
+ */
+
+ li a0, KSEG0
+ cacheop(a0, v0, v1, Index_Store_Tag_I)
+
+ /*
+ * Now we can run from I-$, kseg 0
+ */
+ la k0, 1f
+ lui k1, 0x2000
+ or k0, k1, k0
+ xor k0, k1, k0
+ jr k0
+ nop
+1:
+ /*
+ * Initialize data cache.
+ */
+
+ jal size_d_cache /* v0 = d-cache size, v1 = d-cache line size */
+ nop
+
+
+ li a0, KSEG0
+ cacheop(a0, v0, v1, Index_Store_Tag_D)
+
+ jr t8
+ nop
+
+ .end l1_init
+ .set reorder
+
+
+/*
+ * Function: set_other_config
+ * Arguments: none
+ * Returns: None
+ * Description: initialize other remainder configuration to defaults.
+ * Trashes: t0, t1
+ *
+ * pseudo code:
+ *
+ */
+LEAF(set_other_config)
+ .set noreorder
+
+ /* enable Bus error for I-fetch */
+ mfc0 t0, CP0_CACHEERR, 0
+ li t1, 0x4
+ or t0, t1
+ mtc0 t0, CP0_CACHEERR, 0
+
+ /* enable Bus error for Load */
+ mfc0 t0, CP0_CACHEERR, 1
+ li t1, 0x4
+ or t0, t1
+ mtc0 t0, CP0_CACHEERR, 1
+
+ /* enable Bus Error for Store */
+ mfc0 t0, CP0_CACHEERR, 2
+ li t1, 0x4
+ or t0, t1
+ mtc0 t0, CP0_CACHEERR, 2
+
+ jr ra
+ nop
+ .set reorder
+END(set_other_config)
+
+/*
+ * Function: set_branch_pred
+ * Arguments: none
+ * Returns: None
+ * Description:
+ * Trashes: t0, t1
+ *
+ * pseudo code:
+ *
+ */
+
+LEAF(set_branch_pred)
+ .set noreorder
+ mfc0 t0, CP0_BRCM_MODE
+ li t1, ~(CP0_BRCM_MODE_BrPRED_MASK | CP0_BRCM_MODE_BrHIST_MASK )
+ and t0, t0, t1
+
+ /* enable Branch prediction */
+ li t1, BRCM_BrPRED_BHT_ENABLE
+ sll t1, CP0_BRCM_MODE_BrPRED_SHIFT
+ or t0, t0, t1
+
+ /* set history count to 8 */
+ li t1, 8
+ sll t1, CP0_BRCM_MODE_BrHIST_SHIFT
+ or t0, t0, t1
+
+ mtc0 t0, CP0_BRCM_MODE
+ jr ra
+ nop
+ .set reorder
+END(set_branch_pred)
+
+
+/*
+ * Function: set_luc
+ * Arguments: set link uncached.
+ * Returns: None
+ * Description:
+ * Trashes: t0, t1
+ *
+ */
+LEAF(set_luc)
+ .set noreorder
+ mfc0 t0, CP0_BRCM_MODE
+ li t1, ~(CP0_BRCM_MODE_Luc_MASK)
+ and t0, t0, t1
+
+ /* set Luc */
+ ori t0, t0, CP0_BRCM_MODE_Luc_MASK
+
+ mtc0 t0, CP0_BRCM_MODE
+ jr ra
+ nop
+ .set reorder
+END(set_luc)
+
+/*
+ * Function: set_cwf_tse
+ * Arguments: set CWF and TSE bits
+ * Returns: None
+ * Description:
+ * Trashes: t0, t1
+ *
+ */
+LEAF(set_cwf_tse)
+ .set noreorder
+ mfc0 t0, CP0_BRCM_CONFIG0
+ li t1, (CP0_BRCM_CONFIG0_CWF_MASK | CP0_BRCM_CONFIG0_TSE_MASK)
+ or t0, t0, t1
+
+ mtc0 t0, CP0_BRCM_CONFIG0
+ jr ra
+ nop
+ .set reorder
+END(set_cwf_tse)
+
+/*
+ * Function: set_clock_ratio
+ * Arguments: set clock ratio specified by a0
+ * Returns: None
+ * Description:
+ * Trashes: v0, v1, a0, a1
+ *
+ * pseudo code:
+ *
+ */
+LEAF(set_clock_ratio)
+ .set noreorder
+
+ mfc0 t0, CP0_BRCM_MODE
+ li t1, ~(CP0_BRCM_MODE_SET_MASK | CP0_BRCM_MODE_ClkRATIO_MASK)
+ and t0, t0, t1
+ li t1, CP0_BRCM_MODE_SET_MASK
+ or t0, t0, t1
+ or t0, t0, a0
+ mtc0 t0, CP0_BRCM_MODE
+ jr ra
+ nop
+ .set reorder
+END(set_clock_ratio)
+/*
+ * Function: set_zephyr
+ * Arguments: None
+ * Returns: None
+ * Description: Set any zephyr bits
+ * Trashes: t0 & t1
+ *
+ */
+LEAF(set_zephyr)
+ .set noreorder
+
+ /* enable read/write of CP0 #22 sel. 8 */
+ li t0, 0x5a455048
+ .word 0x4088b00f /* mtc0 t0, $22, 15 */
+
+ .word 0x4008b008 /* mfc0 t0, $22, 8 */
+ li t1, 0x09008000 /* turn off pref, jtb */
+ or t0, t0, t1
+ .word 0x4088b008 /* mtc0 t0, $22, 8 */
+ sync
+
+ /* disable read/write of CP0 #22 sel 8 */
+ li t0, 0x0
+ .word 0x4088b00f /* mtc0 t0, $22, 15 */
+
+
+ jr ra
+ nop
+ .set reorder
+
+END(set_zephyr)
+
+
+/*
+ * Function: set_llmb
+ * Arguments: a0=0 disable llmb, a0=1 enables llmb
+ * Returns: None
+ * Description:
+ * Trashes: t0, t1, t2
+ *
+ * pseudo code:
+ *
+ */
+LEAF(set_llmb)
+ .set noreorder
+
+ li t2, 0x90000000 | BRCM_ZSC_ALL_REGS_SELECT | BRCM_ZSC_CONFIG_REG
+ sync
+ cache 0x7, 0x0(t2)
+ sync
+ mfc0 t0, CP0_D_SEC_CACHE_DATA_LO
+ li t1, ~(BRCM_ZSC_CONFIG_LMB1En | BRCM_ZSC_CONFIG_LMB0En)
+ and t0, t0, t1
+
+ beqz a0, svlmb
+ nop
+
+enable_lmb:
+ li t1, (BRCM_ZSC_CONFIG_LMB1En | BRCM_ZSC_CONFIG_LMB0En)
+ or t0, t0, t1
+
+svlmb:
+ mtc0 t0, CP0_D_SEC_CACHE_DATA_LO
+ sync
+ cache 0xb, 0x0(t2)
+ sync
+
+ jr ra
+ nop
+ .set reorder
+
+END(set_llmb)
+/*
+ * Function: core_init
+ * Arguments: none
+ * Returns: None
+ * Description: initialize core related configuration
+ * Trashes: v0,v1,a0,a1,t8
+ *
+ * pseudo code:
+ *
+ */
+ .globl core_init
+ .ent core_init
+ .set noreorder
+core_init:
+ move t8, ra
+
+ /* set Zephyr bits. */
+ bal set_zephyr
+ nop
+
+ /* set low latency memory bus */
+ li a0, 1
+ bal set_llmb
+ nop
+
+ /* set branch prediction (TP0 only) */
+ bal set_branch_pred
+ nop
+
+ /* set link uncached */
+ bal set_luc
+ nop
+
+ /* set CWF and TSE */
+ bal set_cwf_tse
+ nop
+
+ /*
+ *set clock ratio by setting 1 to 'set'
+ * and 0 to ClkRatio, (TP0 only)
+ */
+ li a0, 0
+ bal set_clock_ratio
+ nop
+
+ /* set other configuration to defaults */
+ bal set_other_config
+ nop
+
+ move ra, t8
+ jr ra
+ nop
+
+ .set reorder
+ .end core_init
+
+/*
+ * Function: clear_jump_target_buffer
+ * Arguments: None
+ * Returns: None
+ * Description:
+ * Trashes: t0, t1, t2
+ *
+ */
+#define RESET_CALL_RETURN_STACK_THIS_THREAD (0x06<<16)
+#define RESET_JUMP_TARGET_BUFFER_THIS_THREAD (0x04<<16)
+#define JTB_CS_CNTL_MASK (0xFF<<16)
+
+ .globl clear_jump_target_buffer
+ .ent clear_jump_target_buffer
+ .set noreorder
+clear_jump_target_buffer:
+
+ mfc0 t0, $22, 2
+ nop
+ nop
+
+ li t1, ~JTB_CS_CNTL_MASK
+ and t0, t0, t1
+ li t2, RESET_CALL_RETURN_STACK_THIS_THREAD
+ or t0, t0, t2
+ mtc0 t0, $22, 2
+ nop
+ nop
+
+ and t0, t0, t1
+ li t2, RESET_JUMP_TARGET_BUFFER_THIS_THREAD
+ or t0, t0, t2
+ mtc0 t0, $22, 2
+ nop
+ nop
+ jr ra
+ nop
+
+ .end clear_jump_target_buffer
+ .set reorder
+/*
+ * Function: bmips_cache_init
+ * Arguments: None
+ * Returns: None
+ * Description: Enable I and D caches, and initialize I and D-caches
+ * Trashes: v0, v1, t0, t1, t2, t5, t7, t8
+ *
+ */
+ .globl bmips_5xxx_init
+ .ent bmips_5xxx_init
+ .set noreorder
+bmips_5xxx_init:
+
+ /* save return address and A0 */
+ move t7, ra
+ move t5, a0
+
+ jal l1_init
+ nop
+
+ jal core_init
+ nop
+
+ jal clear_jump_target_buffer
+ nop
+
+ mtc0 zero, CP0_CAUSE
+
+ move a0, t5
+ jr t7
+ nop
+
+ .end bmips_5xxx_init
+ .set reorder
+
+
+#endif
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
new file mode 100644
index 000000000..921a5fa55
--- /dev/null
+++ b/arch/mips/kernel/bmips_vec.S
@@ -0,0 +1,322 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com)
+ *
+ * Reset/NMI/re-entry vectors for BMIPS processors
+ */
+
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheops.h>
+#include <asm/cpu.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/addrspace.h>
+#include <asm/hazards.h>
+#include <asm/bmips.h>
+
+ .macro BARRIER
+ .set mips32
+ _ssnop
+ _ssnop
+ _ssnop
+ .set mips0
+ .endm
+
+/***********************************************************************
+ * Alternate CPU1 startup vector for BMIPS4350
+ *
+ * On some systems the bootloader has already started CPU1 and configured
+ * it to resume execution at 0x8000_0200 (!BEV IV vector) when it is
+ * triggered by the SW1 interrupt. If that is the case we try to move
+ * it to a more convenient place: BMIPS_WARM_RESTART_VEC @ 0x8000_0380.
+ ***********************************************************************/
+
+LEAF(bmips_smp_movevec)
+ la k0, 1f
+ li k1, CKSEG1
+ or k0, k1
+ jr k0
+
+1:
+ /* clear IV, pending IPIs */
+ mtc0 zero, CP0_CAUSE
+
+ /* re-enable IRQs to wait for SW1 */
+ li k0, ST0_IE | ST0_BEV | STATUSF_IP1
+ mtc0 k0, CP0_STATUS
+
+ /* set up CPU1 CBR; move BASE to 0xa000_0000 */
+ li k0, 0xff400000
+ mtc0 k0, $22, 6
+ /* set up relocation vector address based on thread ID */
+ mfc0 k1, $22, 3
+ srl k1, 16
+ andi k1, 0x8000
+ or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0
+ or k0, k1
+ li k1, 0xa0080000
+ sw k1, 0(k0)
+
+ /* wait here for SW1 interrupt from bmips_boot_secondary() */
+ wait
+
+ la k0, bmips_reset_nmi_vec
+ li k1, CKSEG1
+ or k0, k1
+ jr k0
+END(bmips_smp_movevec)
+
+/***********************************************************************
+ * Reset/NMI vector
+ * For BMIPS processors that can relocate their exception vectors, this
+ * entire function gets copied to 0x8000_0000.
+ ***********************************************************************/
+
+NESTED(bmips_reset_nmi_vec, PT_SIZE, sp)
+ .set push
+ .set noat
+ .align 4
+
+#ifdef CONFIG_SMP
+ /* if the NMI bit is clear, assume this is a CPU1 reset instead */
+ li k1, (1 << 19)
+ mfc0 k0, CP0_STATUS
+ and k0, k1
+ beqz k0, soft_reset
+
+#if defined(CONFIG_CPU_BMIPS5000)
+ mfc0 k0, CP0_PRID
+ li k1, PRID_IMP_BMIPS5000
+ /* mask with PRID_IMP_BMIPS5000 to cover both variants */
+ andi k0, PRID_IMP_BMIPS5000
+ bne k0, k1, 1f
+
+ /* if we're not on core 0, this must be the SMP boot signal */
+ li k1, (3 << 25)
+ mfc0 k0, $22
+ and k0, k1
+ bnez k0, bmips_smp_entry
+1:
+#endif /* CONFIG_CPU_BMIPS5000 */
+#endif /* CONFIG_SMP */
+
+ /* nope, it's just a regular NMI */
+ SAVE_ALL
+ move a0, sp
+
+ /* clear EXL, ERL, BEV so that TLB refills still work */
+ mfc0 k0, CP0_STATUS
+ li k1, ST0_ERL | ST0_EXL | ST0_BEV | ST0_IE
+ or k0, k1
+ xor k0, k1
+ mtc0 k0, CP0_STATUS
+ BARRIER
+
+ /* jump to the NMI handler function */
+ la k0, nmi_handler
+ jr k0
+
+ RESTORE_ALL
+ .set arch=r4000
+ eret
+
+#ifdef CONFIG_SMP
+soft_reset:
+
+#if defined(CONFIG_CPU_BMIPS5000)
+ mfc0 k0, CP0_PRID
+ andi k0, 0xff00
+ li k1, PRID_IMP_BMIPS5200
+ bne k0, k1, bmips_smp_entry
+
+ /* if running on TP 1, jump to bmips_smp_entry */
+ mfc0 k0, $22
+ li k1, (1 << 24)
+ and k1, k0
+ bnez k1, bmips_smp_entry
+ nop
+
+ /*
+ * running on TP0, can not be core 0 (the boot core).
+ * Check for soft reset. Indicates a warm boot
+ */
+ mfc0 k0, $12
+ li k1, (1 << 20)
+ and k0, k1
+ beqz k0, bmips_smp_entry
+
+ /*
+ * Warm boot.
+ * Cache init is only done on TP0
+ */
+ la k0, bmips_5xxx_init
+ jalr k0
+ nop
+
+ b bmips_smp_entry
+ nop
+#endif
+
+/***********************************************************************
+ * CPU1 reset vector (used for the initial boot only)
+ * This is still part of bmips_reset_nmi_vec().
+ ***********************************************************************/
+
+bmips_smp_entry:
+
+ /* set up CP0 STATUS; enable FPU */
+ li k0, 0x30000000
+ mtc0 k0, CP0_STATUS
+ BARRIER
+
+ /* set local CP0 CONFIG to make kseg0 cacheable, write-back */
+ mfc0 k0, CP0_CONFIG
+ ori k0, 0x07
+ xori k0, 0x04
+ mtc0 k0, CP0_CONFIG
+
+ mfc0 k0, CP0_PRID
+ andi k0, 0xff00
+#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
+ li k1, PRID_IMP_BMIPS43XX
+ bne k0, k1, 2f
+
+ /* initialize CPU1's local I-cache */
+ li k0, 0x80000000
+ li k1, 0x80010000
+ mtc0 zero, $28
+ mtc0 zero, $28, 1
+ BARRIER
+
+1: cache Index_Store_Tag_I, 0(k0)
+ addiu k0, 16
+ bne k0, k1, 1b
+
+ b 3f
+2:
+#endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */
+#if defined(CONFIG_CPU_BMIPS5000)
+ /* mask with PRID_IMP_BMIPS5000 to cover both variants */
+ li k1, PRID_IMP_BMIPS5000
+ andi k0, PRID_IMP_BMIPS5000
+ bne k0, k1, 3f
+
+ /* set exception vector base */
+ la k0, ebase
+ lw k0, 0(k0)
+ mtc0 k0, $15, 1
+ BARRIER
+#endif /* CONFIG_CPU_BMIPS5000 */
+3:
+ /* jump back to kseg0 in case we need to remap the kseg1 area */
+ la k0, 1f
+ jr k0
+1:
+ la k0, bmips_enable_xks01
+ jalr k0
+
+ /* use temporary stack to set up upper memory TLB */
+ li sp, BMIPS_WARM_RESTART_VEC
+ la k0, plat_wired_tlb_setup
+ jalr k0
+
+ /* switch to permanent stack and continue booting */
+
+ .global bmips_secondary_reentry
+bmips_secondary_reentry:
+ la k0, bmips_smp_boot_sp
+ lw sp, 0(k0)
+ la k0, bmips_smp_boot_gp
+ lw gp, 0(k0)
+ la k0, start_secondary
+ jr k0
+
+#endif /* CONFIG_SMP */
+
+ .align 4
+ .global bmips_reset_nmi_vec_end
+bmips_reset_nmi_vec_end:
+
+END(bmips_reset_nmi_vec)
+
+ .set pop
+
+/***********************************************************************
+ * CPU1 warm restart vector (used for second and subsequent boots).
+ * Also used for S2 standby recovery (PM).
+ * This entire function gets copied to (BMIPS_WARM_RESTART_VEC)
+ ***********************************************************************/
+
+LEAF(bmips_smp_int_vec)
+
+ .align 4
+ mfc0 k0, CP0_STATUS
+ ori k0, 0x01
+ xori k0, 0x01
+ mtc0 k0, CP0_STATUS
+ eret
+
+ .align 4
+ .global bmips_smp_int_vec_end
+bmips_smp_int_vec_end:
+
+END(bmips_smp_int_vec)
+
+/***********************************************************************
+ * XKS01 support
+ * Certain CPUs support extending kseg0 to 1024MB.
+ ***********************************************************************/
+
+LEAF(bmips_enable_xks01)
+
+#if defined(CONFIG_XKS01)
+ mfc0 t0, CP0_PRID
+ andi t2, t0, 0xff00
+#if defined(CONFIG_CPU_BMIPS4380)
+ li t1, PRID_IMP_BMIPS43XX
+ bne t2, t1, 1f
+
+ andi t0, 0xff
+ addiu t1, t0, -PRID_REV_BMIPS4380_HI
+ bgtz t1, 2f
+ addiu t0, -PRID_REV_BMIPS4380_LO
+ bltz t0, 2f
+
+ mfc0 t0, $22, 3
+ li t1, 0x1ff0
+ li t2, (1 << 12) | (1 << 9)
+ or t0, t1
+ xor t0, t1
+ or t0, t2
+ mtc0 t0, $22, 3
+ BARRIER
+ b 2f
+1:
+#endif /* CONFIG_CPU_BMIPS4380 */
+#if defined(CONFIG_CPU_BMIPS5000)
+ li t1, PRID_IMP_BMIPS5000
+ /* mask with PRID_IMP_BMIPS5000 to cover both variants */
+ andi t2, PRID_IMP_BMIPS5000
+ bne t2, t1, 2f
+
+ mfc0 t0, $22, 5
+ li t1, 0x01ff
+ li t2, (1 << 8) | (1 << 5)
+ or t0, t1
+ xor t0, t1
+ or t0, t2
+ mtc0 t0, $22, 5
+ BARRIER
+#endif /* CONFIG_CPU_BMIPS5000 */
+2:
+#endif /* defined(CONFIG_XKS01) */
+
+ jr ra
+
+END(bmips_enable_xks01)
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
new file mode 100644
index 000000000..0216ff24c
--- /dev/null
+++ b/arch/mips/kernel/branch.c
@@ -0,0 +1,908 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 97, 2000, 2001 by Ralf Baechle
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+#include <linux/signal.h>
+#include <linux/export.h>
+#include <asm/branch.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
+#include <asm/inst.h>
+#include <asm/mips-r2-to-r6-emul.h>
+#include <asm/ptrace.h>
+#include <linux/uaccess.h>
+
+#include "probes-common.h"
+
+/*
+ * Calculate and return exception PC in case of branch delay slot
+ * for microMIPS and MIPS16e. It does not clear the ISA mode bit.
+ */
+int __isa_exception_epc(struct pt_regs *regs)
+{
+ unsigned short inst;
+ long epc = regs->cp0_epc;
+
+ /* Calculate exception PC in branch delay slot. */
+ if (__get_user(inst, (u16 __user *) msk_isa16_mode(epc))) {
+ /* This should never happen because delay slot was checked. */
+ force_sig(SIGSEGV);
+ return epc;
+ }
+ if (cpu_has_mips16) {
+ union mips16e_instruction inst_mips16e;
+
+ inst_mips16e.full = inst;
+ if (inst_mips16e.ri.opcode == MIPS16e_jal_op)
+ epc += 4;
+ else
+ epc += 2;
+ } else if (mm_insn_16bit(inst))
+ epc += 2;
+ else
+ epc += 4;
+
+ return epc;
+}
+
+/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
+static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
+
+int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ unsigned long *contpc)
+{
+ union mips_instruction insn = (union mips_instruction)dec_insn.insn;
+ int __maybe_unused bc_false = 0;
+
+ if (!cpu_has_mmips)
+ return 0;
+
+ switch (insn.mm_i_format.opcode) {
+ case mm_pool32a_op:
+ if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
+ mm_pool32axf_op) {
+ switch (insn.mm_i_format.simmediate >>
+ MM_POOL32A_MINOR_SHIFT) {
+ case mm_jalr_op:
+ case mm_jalrhb_op:
+ case mm_jalrs_op:
+ case mm_jalrshb_op:
+ if (insn.mm_i_format.rt != 0) /* Not mm_jr */
+ regs->regs[insn.mm_i_format.rt] =
+ regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ *contpc = regs->regs[insn.mm_i_format.rs];
+ return 1;
+ }
+ }
+ break;
+ case mm_pool32i_op:
+ switch (insn.mm_i_format.rt) {
+ case mm_bltzals_op:
+ case mm_bltzal_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ fallthrough;
+ case mm_bltz_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] < 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_bgezals_op:
+ case mm_bgezal_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ fallthrough;
+ case mm_bgez_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_blez_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_bgtz_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case mm_bc2f_op:
+ case mm_bc1f_op: {
+ unsigned int fcr31;
+ unsigned int bit;
+
+ bc_false = 1;
+ fallthrough;
+ case mm_bc2t_op:
+ case mm_bc1t_op:
+ preempt_disable();
+ if (is_fpu_owner())
+ fcr31 = read_32bit_cp1_register(CP1_STATUS);
+ else
+ fcr31 = current->thread.fpu.fcr31;
+ preempt_enable();
+
+ if (bc_false)
+ fcr31 = ~fcr31;
+
+ bit = (insn.mm_i_format.rs >> 2);
+ bit += (bit != 0);
+ bit += 23;
+ if (fcr31 & (1 << bit))
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ }
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+ }
+ break;
+ case mm_pool16c_op:
+ switch (insn.mm_i_format.rt) {
+ case mm_jalr16_op:
+ case mm_jalrs16_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ fallthrough;
+ case mm_jr16_op:
+ *contpc = regs->regs[insn.mm_i_format.rs];
+ return 1;
+ }
+ break;
+ case mm_beqz16_op:
+ if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_b1_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ case mm_bnez16_op:
+ if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_b1_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ case mm_b16_op:
+ *contpc = regs->cp0_epc + dec_insn.pc_inc +
+ (insn.mm_b0_format.simmediate << 1);
+ return 1;
+ case mm_beq32_op:
+ if (regs->regs[insn.mm_i_format.rs] ==
+ regs->regs[insn.mm_i_format.rt])
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_bne32_op:
+ if (regs->regs[insn.mm_i_format.rs] !=
+ regs->regs[insn.mm_i_format.rt])
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ case mm_jalx32_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ *contpc = regs->cp0_epc + dec_insn.pc_inc;
+ *contpc >>= 28;
+ *contpc <<= 28;
+ *contpc |= (insn.j_format.target << 2);
+ return 1;
+ case mm_jals32_op:
+ case mm_jal32_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ fallthrough;
+ case mm_j32_op:
+ *contpc = regs->cp0_epc + dec_insn.pc_inc;
+ *contpc >>= 27;
+ *contpc <<= 27;
+ *contpc |= (insn.j_format.target << 1);
+ set_isa16_mode(*contpc);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Compute return address and emulate branch in microMIPS mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __microMIPS_compute_return_epc(struct pt_regs *regs)
+{
+ u16 __user *pc16;
+ u16 halfword;
+ unsigned int word;
+ unsigned long contpc;
+ struct mm_decoded_insn mminsn = { 0 };
+
+ mminsn.micro_mips_mode = 1;
+
+ /* This load never faults. */
+ pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 2;
+ word = ((unsigned int)halfword << 16);
+ mminsn.pc_inc = 2;
+
+ if (!mm_insn_16bit(halfword)) {
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 4;
+ mminsn.pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.insn = word;
+
+ if (get_user(halfword, pc16))
+ goto sigsegv;
+ mminsn.next_pc_inc = 2;
+ word = ((unsigned int)halfword << 16);
+
+ if (!mm_insn_16bit(halfword)) {
+ pc16++;
+ if (get_user(halfword, pc16))
+ goto sigsegv;
+ mminsn.next_pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.next_insn = word;
+
+ mm_isBranchInstr(regs, mminsn, &contpc);
+
+ regs->cp0_epc = contpc;
+
+ return 0;
+
+sigsegv:
+ force_sig(SIGSEGV);
+ return -EFAULT;
+}
+
+/*
+ * Compute return address and emulate branch in MIPS16e mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __MIPS16e_compute_return_epc(struct pt_regs *regs)
+{
+ u16 __user *addr;
+ union mips16e_instruction inst;
+ u16 inst2;
+ u32 fullinst;
+ long epc;
+
+ epc = regs->cp0_epc;
+
+ /* Read the instruction. */
+ addr = (u16 __user *)msk_isa16_mode(epc);
+ if (__get_user(inst.full, addr)) {
+ force_sig(SIGSEGV);
+ return -EFAULT;
+ }
+
+ switch (inst.ri.opcode) {
+ case MIPS16e_extend_op:
+ regs->cp0_epc += 4;
+ return 0;
+
+ /*
+ * JAL and JALX in MIPS16e mode
+ */
+ case MIPS16e_jal_op:
+ addr += 1;
+ if (__get_user(inst2, addr)) {
+ force_sig(SIGSEGV);
+ return -EFAULT;
+ }
+ fullinst = ((unsigned)inst.full << 16) | inst2;
+ regs->regs[31] = epc + 6;
+ epc += 4;
+ epc >>= 28;
+ epc <<= 28;
+ /*
+ * JAL:5 X:1 TARGET[20-16]:5 TARGET[25:21]:5 TARGET[15:0]:16
+ *
+ * ......TARGET[15:0].................TARGET[20:16]...........
+ * ......TARGET[25:21]
+ */
+ epc |=
+ ((fullinst & 0xffff) << 2) | ((fullinst & 0x3e00000) >> 3) |
+ ((fullinst & 0x1f0000) << 7);
+ if (!inst.jal.x)
+ set_isa16_mode(epc); /* Set ISA mode bit. */
+ regs->cp0_epc = epc;
+ return 0;
+
+ /*
+ * J(AL)R(C)
+ */
+ case MIPS16e_rr_op:
+ if (inst.rr.func == MIPS16e_jr_func) {
+
+ if (inst.rr.ra)
+ regs->cp0_epc = regs->regs[31];
+ else
+ regs->cp0_epc =
+ regs->regs[reg16to32[inst.rr.rx]];
+
+ if (inst.rr.l) {
+ if (inst.rr.nd)
+ regs->regs[31] = epc + 2;
+ else
+ regs->regs[31] = epc + 4;
+ }
+ return 0;
+ }
+ break;
+ }
+
+ /*
+ * All other cases have no branch delay slot and are 16-bits.
+ * Branches do not cause an exception.
+ */
+ regs->cp0_epc += 2;
+
+ return 0;
+}
+
+/**
+ * __compute_return_epc_for_insn - Computes the return address and do emulate
+ * branch simulation, if required.
+ *
+ * @regs: Pointer to pt_regs
+ * @insn: branch instruction to decode
+ * Return: -EFAULT on error and forces SIGILL, and on success
+ * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
+ * evaluating the branch.
+ *
+ * MIPS R6 Compact branches and forbidden slots:
+ * Compact branches do not throw exceptions because they do
+ * not have delay slots. The forbidden slot instruction ($PC+4)
+ * is only executed if the branch was not taken. Otherwise the
+ * forbidden slot is skipped entirely. This means that the
+ * only possible reason to be here because of a MIPS R6 compact
+ * branch instruction is that the forbidden slot has thrown one.
+ * In that case the branch was not taken, so the EPC can be safely
+ * set to EPC + 8.
+ */
+int __compute_return_epc_for_insn(struct pt_regs *regs,
+ union mips_instruction insn)
+{
+ long epc = regs->cp0_epc;
+ unsigned int dspcontrol;
+ int ret = 0;
+
+ switch (insn.i_format.opcode) {
+ /*
+ * jr and jalr are in r_format format.
+ */
+ case spec_op:
+ switch (insn.r_format.func) {
+ case jalr_op:
+ regs->regs[insn.r_format.rd] = epc + 8;
+ fallthrough;
+ case jr_op:
+ if (NO_R6EMU && insn.r_format.func == jr_op)
+ goto sigill_r2r6;
+ regs->cp0_epc = regs->regs[insn.r_format.rs];
+ break;
+ }
+ break;
+
+ /*
+ * This group contains:
+ * bltz_op, bgez_op, bltzl_op, bgezl_op,
+ * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+ */
+ case bcond_op:
+ switch (insn.i_format.rt) {
+ case bltzl_op:
+ if (NO_R6EMU)
+ goto sigill_r2r6;
+ fallthrough;
+ case bltz_op:
+ if ((long)regs->regs[insn.i_format.rs] < 0) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ if (insn.i_format.rt == bltzl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case bgezl_op:
+ if (NO_R6EMU)
+ goto sigill_r2r6;
+ fallthrough;
+ case bgez_op:
+ if ((long)regs->regs[insn.i_format.rs] >= 0) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ if (insn.i_format.rt == bgezl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case bltzal_op:
+ case bltzall_op:
+ if (NO_R6EMU && (insn.i_format.rs ||
+ insn.i_format.rt == bltzall_op))
+ goto sigill_r2r6;
+ regs->regs[31] = epc + 8;
+ /*
+ * OK we are here either because we hit a NAL
+ * instruction or because we are emulating an
+ * old bltzal{,l} one. Let's figure out what the
+ * case really is.
+ */
+ if (!insn.i_format.rs) {
+ /*
+ * NAL or BLTZAL with rs == 0
+ * Doesn't matter if we are R6 or not. The
+ * result is the same
+ */
+ regs->cp0_epc += 4 +
+ (insn.i_format.simmediate << 2);
+ break;
+ }
+ /* Now do the real thing for non-R6 BLTZAL{,L} */
+ if ((long)regs->regs[insn.i_format.rs] < 0) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ if (insn.i_format.rt == bltzall_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case bgezal_op:
+ case bgezall_op:
+ if (NO_R6EMU && (insn.i_format.rs ||
+ insn.i_format.rt == bgezall_op))
+ goto sigill_r2r6;
+ regs->regs[31] = epc + 8;
+ /*
+ * OK we are here either because we hit a BAL
+ * instruction or because we are emulating an
+ * old bgezal{,l} one. Let's figure out what the
+ * case really is.
+ */
+ if (!insn.i_format.rs) {
+ /*
+ * BAL or BGEZAL with rs == 0
+ * Doesn't matter if we are R6 or not. The
+ * result is the same
+ */
+ regs->cp0_epc += 4 +
+ (insn.i_format.simmediate << 2);
+ break;
+ }
+ /* Now do the real thing for non-R6 BGEZAL{,L} */
+ if ((long)regs->regs[insn.i_format.rs] >= 0) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ if (insn.i_format.rt == bgezall_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case bposge32_op:
+ if (!cpu_has_dsp)
+ goto sigill_dsp;
+
+ dspcontrol = rddsp(0x01);
+
+ if (dspcontrol >= 32) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ }
+ break;
+
+ /*
+ * These are unconditional and in j_format.
+ */
+ case jalx_op:
+ case jal_op:
+ regs->regs[31] = regs->cp0_epc + 8;
+ fallthrough;
+ case j_op:
+ epc += 4;
+ epc >>= 28;
+ epc <<= 28;
+ epc |= (insn.j_format.target << 2);
+ regs->cp0_epc = epc;
+ if (insn.i_format.opcode == jalx_op)
+ set_isa16_mode(regs->cp0_epc);
+ break;
+
+ /*
+ * These are conditional and in i_format.
+ */
+ case beql_op:
+ if (NO_R6EMU)
+ goto sigill_r2r6;
+ fallthrough;
+ case beq_op:
+ if (regs->regs[insn.i_format.rs] ==
+ regs->regs[insn.i_format.rt]) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ if (insn.i_format.opcode == beql_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case bnel_op:
+ if (NO_R6EMU)
+ goto sigill_r2r6;
+ fallthrough;
+ case bne_op:
+ if (regs->regs[insn.i_format.rs] !=
+ regs->regs[insn.i_format.rt]) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ if (insn.i_format.opcode == bnel_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case blezl_op: /* not really i_format */
+ if (!insn.i_format.rt && NO_R6EMU)
+ goto sigill_r2r6;
+ fallthrough;
+ case blez_op:
+ /*
+ * Compact branches for R6 for the
+ * blez and blezl opcodes.
+ * BLEZ | rs = 0 | rt != 0 == BLEZALC
+ * BLEZ | rs = rt != 0 == BGEZALC
+ * BLEZ | rs != 0 | rt != 0 == BGEUC
+ * BLEZL | rs = 0 | rt != 0 == BLEZC
+ * BLEZL | rs = rt != 0 == BGEZC
+ * BLEZL | rs != 0 | rt != 0 == BGEC
+ *
+ * For real BLEZ{,L}, rt is always 0.
+ */
+
+ if (cpu_has_mips_r6 && insn.i_format.rt) {
+ if ((insn.i_format.opcode == blez_op) &&
+ ((!insn.i_format.rs && insn.i_format.rt) ||
+ (insn.i_format.rs == insn.i_format.rt)))
+ regs->regs[31] = epc + 4;
+ regs->cp0_epc += 8;
+ break;
+ }
+ /* rt field assumed to be zero */
+ if ((long)regs->regs[insn.i_format.rs] <= 0) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ if (insn.i_format.opcode == blezl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case bgtzl_op:
+ if (!insn.i_format.rt && NO_R6EMU)
+ goto sigill_r2r6;
+ fallthrough;
+ case bgtz_op:
+ /*
+ * Compact branches for R6 for the
+ * bgtz and bgtzl opcodes.
+ * BGTZ | rs = 0 | rt != 0 == BGTZALC
+ * BGTZ | rs = rt != 0 == BLTZALC
+ * BGTZ | rs != 0 | rt != 0 == BLTUC
+ * BGTZL | rs = 0 | rt != 0 == BGTZC
+ * BGTZL | rs = rt != 0 == BLTZC
+ * BGTZL | rs != 0 | rt != 0 == BLTC
+ *
+ * *ZALC varint for BGTZ &&& rt != 0
+ * For real GTZ{,L}, rt is always 0.
+ */
+ if (cpu_has_mips_r6 && insn.i_format.rt) {
+ if ((insn.i_format.opcode == blez_op) &&
+ ((!insn.i_format.rs && insn.i_format.rt) ||
+ (insn.i_format.rs == insn.i_format.rt)))
+ regs->regs[31] = epc + 4;
+ regs->cp0_epc += 8;
+ break;
+ }
+
+ /* rt field assumed to be zero */
+ if ((long)regs->regs[insn.i_format.rs] > 0) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ if (insn.i_format.opcode == bgtzl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ /*
+ * And now the FPA/cp1 branch instructions.
+ */
+ case cop1_op: {
+ unsigned int bit, fcr31, reg;
+
+ if (cpu_has_mips_r6 &&
+ ((insn.i_format.rs == bc1eqz_op) ||
+ (insn.i_format.rs == bc1nez_op))) {
+ if (!init_fp_ctx(current))
+ lose_fpu(1);
+ reg = insn.i_format.rt;
+ bit = get_fpr32(&current->thread.fpu.fpr[reg], 0) & 0x1;
+ if (insn.i_format.rs == bc1eqz_op)
+ bit = !bit;
+ own_fpu(1);
+ if (bit)
+ epc = epc + 4 +
+ (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+
+ break;
+ } else {
+
+ preempt_disable();
+ if (is_fpu_owner())
+ fcr31 = read_32bit_cp1_register(CP1_STATUS);
+ else
+ fcr31 = current->thread.fpu.fcr31;
+ preempt_enable();
+
+ bit = (insn.i_format.rt >> 2);
+ bit += (bit != 0);
+ bit += 23;
+ switch (insn.i_format.rt & 3) {
+ case 0: /* bc1f */
+ case 2: /* bc1fl */
+ if (~fcr31 & (1 << bit)) {
+ epc = epc + 4 +
+ (insn.i_format.simmediate << 2);
+ if (insn.i_format.rt == 2)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case 1: /* bc1t */
+ case 3: /* bc1tl */
+ if (fcr31 & (1 << bit)) {
+ epc = epc + 4 +
+ (insn.i_format.simmediate << 2);
+ if (insn.i_format.rt == 3)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ }
+ break;
+ }
+ }
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+ == 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ case ldc2_op: /* This is bbit032 on Octeon */
+ if ((regs->regs[insn.i_format.rs] &
+ (1ull<<(insn.i_format.rt+32))) == 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ case swc2_op: /* This is bbit1 on Octeon */
+ if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ case sdc2_op: /* This is bbit132 on Octeon */
+ if (regs->regs[insn.i_format.rs] &
+ (1ull<<(insn.i_format.rt+32)))
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+#else
+ case bc6_op:
+ /* Only valid for MIPS R6 */
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
+ regs->cp0_epc += 8;
+ break;
+ case balc6_op:
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
+ /* Compact branch: BALC */
+ regs->regs[31] = epc + 4;
+ epc += 4 + (insn.i_format.simmediate << 2);
+ regs->cp0_epc = epc;
+ break;
+ case pop66_op:
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
+ /* Compact branch: BEQZC || JIC */
+ regs->cp0_epc += 8;
+ break;
+ case pop76_op:
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
+ /* Compact branch: BNEZC || JIALC */
+ if (!insn.i_format.rs) {
+ /* JIALC: set $31/ra */
+ regs->regs[31] = epc + 4;
+ }
+ regs->cp0_epc += 8;
+ break;
+#endif
+ case pop10_op:
+ case pop30_op:
+ /* Only valid for MIPS R6 */
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
+ /*
+ * Compact branches:
+ * bovc, beqc, beqzalc, bnvc, bnec, bnezlac
+ */
+ if (insn.i_format.rt && !insn.i_format.rs)
+ regs->regs[31] = epc + 4;
+ regs->cp0_epc += 8;
+ break;
+ }
+
+ return ret;
+
+sigill_dsp:
+ pr_debug("%s: DSP branch but not DSP ASE - sending SIGILL.\n",
+ current->comm);
+ force_sig(SIGILL);
+ return -EFAULT;
+sigill_r2r6:
+ pr_debug("%s: R2 branch but r2-to-r6 emulator is not present - sending SIGILL.\n",
+ current->comm);
+ force_sig(SIGILL);
+ return -EFAULT;
+sigill_r6:
+ pr_debug("%s: R6 branch but no MIPSr6 ISA support - sending SIGILL.\n",
+ current->comm);
+ force_sig(SIGILL);
+ return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
+
+int __compute_return_epc(struct pt_regs *regs)
+{
+ unsigned int __user *addr;
+ long epc;
+ union mips_instruction insn;
+
+ epc = regs->cp0_epc;
+ if (epc & 3)
+ goto unaligned;
+
+ /*
+ * Read the instruction
+ */
+ addr = (unsigned int __user *) epc;
+ if (__get_user(insn.word, addr)) {
+ force_sig(SIGSEGV);
+ return -EFAULT;
+ }
+
+ return __compute_return_epc_for_insn(regs, insn);
+
+unaligned:
+ printk("%s: unaligned epc - sending SIGBUS.\n", current->comm);
+ force_sig(SIGBUS);
+ return -EFAULT;
+}
+
+#if (defined CONFIG_KPROBES) || (defined CONFIG_UPROBES)
+
+int __insn_is_compact_branch(union mips_instruction insn)
+{
+ if (!cpu_has_mips_r6)
+ return 0;
+
+ switch (insn.i_format.opcode) {
+ case blezl_op:
+ case bgtzl_op:
+ case blez_op:
+ case bgtz_op:
+ /*
+ * blez[l] and bgtz[l] opcodes with non-zero rt
+ * are MIPS R6 compact branches
+ */
+ if (insn.i_format.rt)
+ return 1;
+ break;
+ case bc6_op:
+ case balc6_op:
+ case pop10_op:
+ case pop30_op:
+ case pop66_op:
+ case pop76_op:
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__insn_is_compact_branch);
+
+#endif /* CONFIG_KPROBES || CONFIG_UPROBES */
diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
new file mode 100644
index 000000000..529dab855
--- /dev/null
+++ b/arch/mips/kernel/cacheinfo.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MIPS cacheinfo support
+ */
+#include <linux/cacheinfo.h>
+
+/* Populates leaf and increments to next leaf */
+#define populate_cache(cache, leaf, c_level, c_type) \
+do { \
+ leaf->type = c_type; \
+ leaf->level = c_level; \
+ leaf->coherency_line_size = c->cache.linesz; \
+ leaf->number_of_sets = c->cache.sets; \
+ leaf->ways_of_associativity = c->cache.ways; \
+ leaf->size = c->cache.linesz * c->cache.sets * \
+ c->cache.ways; \
+ leaf++; \
+} while (0)
+
+int init_cache_level(unsigned int cpu)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ int levels = 0, leaves = 0;
+
+ /*
+ * If Dcache is not set, we assume the cache structures
+ * are not properly initialized.
+ */
+ if (c->dcache.waysize)
+ levels += 1;
+ else
+ return -ENOENT;
+
+
+ leaves += (c->icache.waysize) ? 2 : 1;
+
+ if (c->scache.waysize) {
+ levels++;
+ leaves++;
+ }
+
+ if (c->tcache.waysize) {
+ levels++;
+ leaves++;
+ }
+
+ this_cpu_ci->num_levels = levels;
+ this_cpu_ci->num_leaves = leaves;
+ return 0;
+}
+
+static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
+{
+ int cpu1;
+
+ for_each_possible_cpu(cpu1)
+ if (cpus_are_siblings(cpu, cpu1))
+ cpumask_set_cpu(cpu1, cpu_map);
+}
+
+static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
+{
+ int cpu1;
+ int cluster = cpu_cluster(&cpu_data[cpu]);
+
+ for_each_possible_cpu(cpu1)
+ if (cpu_cluster(&cpu_data[cpu1]) == cluster)
+ cpumask_set_cpu(cpu1, cpu_map);
+}
+
+int populate_cache_leaves(unsigned int cpu)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+
+ if (c->icache.waysize) {
+ /* L1 caches are per core */
+ fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
+ populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA);
+ fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
+ populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST);
+ } else {
+ populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED);
+ }
+
+ if (c->scache.waysize) {
+ /* L2 cache is per cluster */
+ fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
+ populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED);
+ }
+
+ if (c->tcache.waysize)
+ populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
+
+ this_cpu_ci->cpu_map_populated = true;
+
+ return 0;
+}
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
new file mode 100644
index 000000000..d39a2963b
--- /dev/null
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000,2001,2004 Broadcom Corporation
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/time.h>
+
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+#include <asm/sibyte/bcm1480_scd.h>
+
+#include <asm/sibyte/sb1250.h>
+
+#define IMR_IP2_VAL K_BCM1480_INT_MAP_I0
+#define IMR_IP3_VAL K_BCM1480_INT_MAP_I1
+#define IMR_IP4_VAL K_BCM1480_INT_MAP_I2
+
+/*
+ * The general purpose timer ticks at 1MHz independent if
+ * the rest of the system
+ */
+
+static int sibyte_set_periodic(struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg, *init;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
+
+ __raw_writeq(0, cfg);
+ __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg);
+ return 0;
+}
+
+static int sibyte_shutdown(struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+
+ /* Stop the timer until we actually program a shot */
+ __raw_writeq(0, cfg);
+ return 0;
+}
+
+static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg, *init;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
+
+ __raw_writeq(0, cfg);
+ __raw_writeq(delta - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE, cfg);
+
+ return 0;
+}
+
+static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd = dev_id;
+ void __iomem *cfg;
+ unsigned long tmode;
+
+ if (clockevent_state_periodic(cd))
+ tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
+ else
+ tmode = 0;
+
+ /* ACK interrupt */
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ ____raw_writeq(tmode, cfg);
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
+static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
+
+void sb1480_clockevent_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
+ struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
+ unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
+ unsigned long flags = IRQF_PERCPU | IRQF_TIMER;
+
+ BUG_ON(cpu > 3); /* Only have 4 general purpose timers */
+
+ sprintf(name, "bcm1480-counter-%d", cpu);
+ cd->name = name;
+ cd->features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT;
+ clockevent_set_clock(cd, V_SCD_TIMER_FREQ);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd);
+ cd->max_delta_ticks = 0x7fffff;
+ cd->min_delta_ns = clockevent_delta2ns(2, cd);
+ cd->min_delta_ticks = 2;
+ cd->rating = 200;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_next_event = sibyte_next_event;
+ cd->set_state_shutdown = sibyte_shutdown;
+ cd->set_state_periodic = sibyte_set_periodic;
+ cd->set_state_oneshot = sibyte_shutdown;
+ clockevents_register_device(cd);
+
+ bcm1480_mask_irq(cpu, irq);
+
+ /*
+ * Map the timer interrupt to IP[4] of this cpu
+ */
+ __raw_writeq(IMR_IP4_VAL,
+ IOADDR(A_BCM1480_IMR_REGISTER(cpu,
+ R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + (irq << 3)));
+
+ bcm1480_unmask_irq(cpu, irq);
+
+ irq_set_affinity(irq, cpumask_of(cpu));
+ if (request_irq(irq, sibyte_counter_handler, flags, name, cd))
+ pr_err("Failed to request irq %d (%s)\n", irq, name);
+}
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
new file mode 100644
index 000000000..9a47fbcd4
--- /dev/null
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DS1287 clockevent driver
+ *
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mc146818rtc.h>
+#include <linux/irq.h>
+
+#include <asm/time.h>
+
+int ds1287_timer_state(void)
+{
+ return (CMOS_READ(RTC_REG_C) & RTC_PF) != 0;
+}
+
+int ds1287_set_base_clock(unsigned int hz)
+{
+ u8 rate;
+
+ switch (hz) {
+ case 128:
+ rate = 0x9;
+ break;
+ case 256:
+ rate = 0x8;
+ break;
+ case 1024:
+ rate = 0x6;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ CMOS_WRITE(RTC_REF_CLCK_32KHZ | rate, RTC_REG_A);
+
+ return 0;
+}
+
+static int ds1287_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ return -EINVAL;
+}
+
+static int ds1287_shutdown(struct clock_event_device *evt)
+{
+ u8 val;
+
+ spin_lock(&rtc_lock);
+
+ val = CMOS_READ(RTC_REG_B);
+ val &= ~RTC_PIE;
+ CMOS_WRITE(val, RTC_REG_B);
+
+ spin_unlock(&rtc_lock);
+ return 0;
+}
+
+static int ds1287_set_periodic(struct clock_event_device *evt)
+{
+ u8 val;
+
+ spin_lock(&rtc_lock);
+
+ val = CMOS_READ(RTC_REG_B);
+ val |= RTC_PIE;
+ CMOS_WRITE(val, RTC_REG_B);
+
+ spin_unlock(&rtc_lock);
+ return 0;
+}
+
+static void ds1287_event_handler(struct clock_event_device *dev)
+{
+}
+
+static struct clock_event_device ds1287_clockevent = {
+ .name = "ds1287",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .set_next_event = ds1287_set_next_event,
+ .set_state_shutdown = ds1287_shutdown,
+ .set_state_periodic = ds1287_set_periodic,
+ .tick_resume = ds1287_shutdown,
+ .event_handler = ds1287_event_handler,
+};
+
+static irqreturn_t ds1287_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = &ds1287_clockevent;
+
+ /* Ack the RTC interrupt. */
+ CMOS_READ(RTC_REG_C);
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+int __init ds1287_clockevent_init(int irq)
+{
+ unsigned long flags = IRQF_PERCPU | IRQF_TIMER;
+ struct clock_event_device *cd;
+
+ cd = &ds1287_clockevent;
+ cd->rating = 100;
+ cd->irq = irq;
+ clockevent_set_clock(cd, 32768);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->max_delta_ticks = 0x7fffffff;
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+ cd->min_delta_ticks = 0x300;
+ cd->cpumask = cpumask_of(0);
+
+ clockevents_register_device(&ds1287_clockevent);
+
+ return request_irq(irq, ds1287_interrupt, flags, "ds1287", NULL);
+}
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
new file mode 100644
index 000000000..5b132e8c5
--- /dev/null
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * GT641xx clockevent routines.
+ *
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+
+#include <asm/gt64120.h>
+#include <asm/time.h>
+
+static DEFINE_RAW_SPINLOCK(gt641xx_timer_lock);
+static unsigned int gt641xx_base_clock;
+
+void gt641xx_set_base_clock(unsigned int clock)
+{
+ gt641xx_base_clock = clock;
+}
+
+int gt641xx_timer0_state(void)
+{
+ if (GT_READ(GT_TC0_OFS))
+ return 0;
+
+ GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ);
+ GT_WRITE(GT_TC_CONTROL_OFS, GT_TC_CONTROL_ENTC0_MSK);
+
+ return 1;
+}
+
+static int gt641xx_timer0_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ u32 ctrl;
+
+ raw_spin_lock(&gt641xx_timer_lock);
+
+ ctrl = GT_READ(GT_TC_CONTROL_OFS);
+ ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
+ ctrl |= GT_TC_CONTROL_ENTC0_MSK;
+
+ GT_WRITE(GT_TC0_OFS, delta);
+ GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+ raw_spin_unlock(&gt641xx_timer_lock);
+
+ return 0;
+}
+
+static int gt641xx_timer0_shutdown(struct clock_event_device *evt)
+{
+ u32 ctrl;
+
+ raw_spin_lock(&gt641xx_timer_lock);
+
+ ctrl = GT_READ(GT_TC_CONTROL_OFS);
+ ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
+ GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+ raw_spin_unlock(&gt641xx_timer_lock);
+ return 0;
+}
+
+static int gt641xx_timer0_set_oneshot(struct clock_event_device *evt)
+{
+ u32 ctrl;
+
+ raw_spin_lock(&gt641xx_timer_lock);
+
+ ctrl = GT_READ(GT_TC_CONTROL_OFS);
+ ctrl &= ~GT_TC_CONTROL_SELTC0_MSK;
+ ctrl |= GT_TC_CONTROL_ENTC0_MSK;
+ GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+ raw_spin_unlock(&gt641xx_timer_lock);
+ return 0;
+}
+
+static int gt641xx_timer0_set_periodic(struct clock_event_device *evt)
+{
+ u32 ctrl;
+
+ raw_spin_lock(&gt641xx_timer_lock);
+
+ ctrl = GT_READ(GT_TC_CONTROL_OFS);
+ ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK;
+ GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+ raw_spin_unlock(&gt641xx_timer_lock);
+ return 0;
+}
+
+static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
+{
+}
+
+static struct clock_event_device gt641xx_timer0_clockevent = {
+ .name = "gt641xx-timer0",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .irq = GT641XX_TIMER0_IRQ,
+ .set_next_event = gt641xx_timer0_set_next_event,
+ .set_state_shutdown = gt641xx_timer0_shutdown,
+ .set_state_periodic = gt641xx_timer0_set_periodic,
+ .set_state_oneshot = gt641xx_timer0_set_oneshot,
+ .tick_resume = gt641xx_timer0_shutdown,
+ .event_handler = gt641xx_timer0_event_handler,
+};
+
+static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = &gt641xx_timer0_clockevent;
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static int __init gt641xx_timer0_clockevent_init(void)
+{
+ struct clock_event_device *cd;
+
+ if (!gt641xx_base_clock)
+ return 0;
+
+ GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ);
+
+ cd = &gt641xx_timer0_clockevent;
+ cd->rating = 200 + gt641xx_base_clock / 10000000;
+ clockevent_set_clock(cd, gt641xx_base_clock);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->max_delta_ticks = 0x7fffffff;
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+ cd->min_delta_ticks = 0x300;
+ cd->cpumask = cpumask_of(0);
+
+ clockevents_register_device(&gt641xx_timer0_clockevent);
+
+ return request_irq(GT641XX_TIMER0_IRQ, gt641xx_timer0_interrupt,
+ IRQF_PERCPU | IRQF_TIMER, "gt641xx_timer0", NULL);
+}
+arch_initcall(gt641xx_timer0_clockevent_init);
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
new file mode 100644
index 000000000..995ad9e69
--- /dev/null
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -0,0 +1,345 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/cpufreq.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/time.h>
+#include <asm/cevt-r4k.h>
+
+static int mips_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned int cnt;
+ int res;
+
+ cnt = read_c0_count();
+ cnt += delta;
+ write_c0_compare(cnt);
+ res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0;
+ return res;
+}
+
+/**
+ * calculate_min_delta() - Calculate a good minimum delta for mips_next_event().
+ *
+ * Running under virtualisation can introduce overhead into mips_next_event() in
+ * the form of hypervisor emulation of CP0_Count/CP0_Compare registers,
+ * potentially with an unnatural frequency, which makes a fixed min_delta_ns
+ * value inappropriate as it may be too small.
+ *
+ * It can also introduce occasional latency from the guest being descheduled.
+ *
+ * This function calculates a good minimum delta based roughly on the 75th
+ * percentile of the time taken to do the mips_next_event() sequence, in order
+ * to handle potentially higher overhead while also eliminating outliers due to
+ * unpredictable hypervisor latency (which can be handled by retries).
+ *
+ * Return: An appropriate minimum delta for the clock event device.
+ */
+static unsigned int calculate_min_delta(void)
+{
+ unsigned int cnt, i, j, k, l;
+ unsigned int buf1[4], buf2[3];
+ unsigned int min_delta;
+
+ /*
+ * Calculate the median of 5 75th percentiles of 5 samples of how long
+ * it takes to set CP0_Compare = CP0_Count + delta.
+ */
+ for (i = 0; i < 5; ++i) {
+ for (j = 0; j < 5; ++j) {
+ /*
+ * This is like the code in mips_next_event(), and
+ * directly measures the borderline "safe" delta.
+ */
+ cnt = read_c0_count();
+ write_c0_compare(cnt);
+ cnt = read_c0_count() - cnt;
+
+ /* Sorted insert into buf1 */
+ for (k = 0; k < j; ++k) {
+ if (cnt < buf1[k]) {
+ l = min_t(unsigned int,
+ j, ARRAY_SIZE(buf1) - 1);
+ for (; l > k; --l)
+ buf1[l] = buf1[l - 1];
+ break;
+ }
+ }
+ if (k < ARRAY_SIZE(buf1))
+ buf1[k] = cnt;
+ }
+
+ /* Sorted insert of 75th percentile into buf2 */
+ for (k = 0; k < i && k < ARRAY_SIZE(buf2); ++k) {
+ if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) {
+ l = min_t(unsigned int,
+ i, ARRAY_SIZE(buf2) - 1);
+ for (; l > k; --l)
+ buf2[l] = buf2[l - 1];
+ break;
+ }
+ }
+ if (k < ARRAY_SIZE(buf2))
+ buf2[k] = buf1[ARRAY_SIZE(buf1) - 1];
+ }
+
+ /* Use 2 * median of 75th percentiles */
+ min_delta = buf2[ARRAY_SIZE(buf2) - 1] * 2;
+
+ /* Don't go too low */
+ if (min_delta < 0x300)
+ min_delta = 0x300;
+
+ pr_debug("%s: median 75th percentile=%#x, min_delta=%#x\n",
+ __func__, buf2[ARRAY_SIZE(buf2) - 1], min_delta);
+ return min_delta;
+}
+
+DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
+int cp0_timer_irq_installed;
+
+/*
+ * Possibly handle a performance counter interrupt.
+ * Return true if the timer interrupt should not be checked
+ */
+static inline int handle_perf_irq(int r2)
+{
+ /*
+ * The performance counter overflow interrupt may be shared with the
+ * timer interrupt (cp0_perfcount_irq < 0). If it is and a
+ * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
+ * and we can't reliably determine if a counter interrupt has also
+ * happened (!r2) then don't check for a timer interrupt.
+ */
+ return (cp0_perfcount_irq < 0) &&
+ perf_irq() == IRQ_HANDLED &&
+ !r2;
+}
+
+irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
+{
+ const int r2 = cpu_has_mips_r2_r6;
+ struct clock_event_device *cd;
+ int cpu = smp_processor_id();
+
+ /*
+ * Suckage alert:
+ * Before R2 of the architecture there was no way to see if a
+ * performance counter interrupt was pending, so we have to run
+ * the performance counter interrupt handler anyway.
+ */
+ if (handle_perf_irq(r2))
+ return IRQ_HANDLED;
+
+ /*
+ * The same applies to performance counter interrupts. But with the
+ * above we now know that the reason we got here must be a timer
+ * interrupt. Being the paranoiacs we are we check anyway.
+ */
+ if (!r2 || (read_c0_cause() & CAUSEF_TI)) {
+ /* Clear Count/Compare Interrupt */
+ write_c0_compare(read_c0_compare());
+ cd = &per_cpu(mips_clockevent_device, cpu);
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+struct irqaction c0_compare_irqaction = {
+ .handler = c0_compare_interrupt,
+ /*
+ * IRQF_SHARED: The timer interrupt may be shared with other interrupts
+ * such as perf counter and FDC interrupts.
+ */
+ .flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED,
+ .name = "timer",
+};
+
+
+void mips_event_handler(struct clock_event_device *dev)
+{
+}
+
+/*
+ * FIXME: This doesn't hold for the relocated E9000 compare interrupt.
+ */
+static int c0_compare_int_pending(void)
+{
+ /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */
+ return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
+}
+
+/*
+ * Compare interrupt can be routed and latched outside the core,
+ * so wait up to worst case number of cycle counter ticks for timer interrupt
+ * changes to propagate to the cause register.
+ */
+#define COMPARE_INT_SEEN_TICKS 50
+
+int c0_compare_int_usable(void)
+{
+ unsigned int delta;
+ unsigned int cnt;
+
+#ifdef CONFIG_KVM_GUEST
+ return 1;
+#endif
+
+ /*
+ * IP7 already pending? Try to clear it by acking the timer.
+ */
+ if (c0_compare_int_pending()) {
+ cnt = read_c0_count();
+ write_c0_compare(cnt);
+ back_to_back_c0_hazard();
+ while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
+ if (!c0_compare_int_pending())
+ break;
+ if (c0_compare_int_pending())
+ return 0;
+ }
+
+ for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
+ cnt = read_c0_count();
+ cnt += delta;
+ write_c0_compare(cnt);
+ back_to_back_c0_hazard();
+ if ((int)(read_c0_count() - cnt) < 0)
+ break;
+ /* increase delta if the timer was already expired */
+ }
+
+ while ((int)(read_c0_count() - cnt) <= 0)
+ ; /* Wait for expiry */
+
+ while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
+ if (c0_compare_int_pending())
+ break;
+ if (!c0_compare_int_pending())
+ return 0;
+ cnt = read_c0_count();
+ write_c0_compare(cnt);
+ back_to_back_c0_hazard();
+ while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
+ if (!c0_compare_int_pending())
+ break;
+ if (c0_compare_int_pending())
+ return 0;
+
+ /*
+ * Feels like a real count / compare timer.
+ */
+ return 1;
+}
+
+unsigned int __weak get_c0_compare_int(void)
+{
+ return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+}
+
+#ifdef CONFIG_CPU_FREQ
+
+static unsigned long mips_ref_freq;
+
+static int r4k_cpufreq_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ struct clock_event_device *cd;
+ unsigned long rate;
+ int cpu;
+
+ if (!mips_ref_freq)
+ mips_ref_freq = freq->old;
+
+ if (val == CPUFREQ_POSTCHANGE) {
+ rate = cpufreq_scale(mips_hpt_frequency, mips_ref_freq,
+ freq->new);
+
+ for_each_cpu(cpu, freq->policy->cpus) {
+ cd = &per_cpu(mips_clockevent_device, cpu);
+
+ clockevents_update_freq(cd, rate);
+ }
+ }
+
+ return 0;
+}
+
+static struct notifier_block r4k_cpufreq_notifier = {
+ .notifier_call = r4k_cpufreq_callback,
+};
+
+static int __init r4k_register_cpufreq_notifier(void)
+{
+ return cpufreq_register_notifier(&r4k_cpufreq_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+}
+core_initcall(r4k_register_cpufreq_notifier);
+
+#endif /* !CONFIG_CPU_FREQ */
+
+int r4k_clockevent_init(void)
+{
+ unsigned long flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+ unsigned int irq, min_delta;
+
+ if (!cpu_has_counter || !mips_hpt_frequency)
+ return -ENXIO;
+
+ if (!c0_compare_int_usable())
+ return -ENXIO;
+
+ /*
+ * With vectored interrupts things are getting platform specific.
+ * get_c0_compare_int is a hook to allow a platform to return the
+ * interrupt number of its liking.
+ */
+ irq = get_c0_compare_int();
+
+ cd = &per_cpu(mips_clockevent_device, cpu);
+
+ cd->name = "MIPS";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_C3STOP |
+ CLOCK_EVT_FEAT_PERCPU;
+
+ min_delta = calculate_min_delta();
+
+ cd->rating = 300;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_next_event = mips_next_event;
+ cd->event_handler = mips_event_handler;
+
+ clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff);
+
+ if (cp0_timer_irq_installed)
+ return 0;
+
+ cp0_timer_irq_installed = 1;
+
+ if (request_irq(irq, c0_compare_interrupt, flags, "timer",
+ c0_compare_interrupt))
+ pr_err("Failed to request irq %d (timer)\n", irq);
+
+ return 0;
+}
+
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
new file mode 100644
index 000000000..0451273fa
--- /dev/null
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000, 2001 Broadcom Corporation
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/time.h>
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+#include <asm/sibyte/sb1250_scd.h>
+
+#define IMR_IP2_VAL K_INT_MAP_I0
+#define IMR_IP3_VAL K_INT_MAP_I1
+#define IMR_IP4_VAL K_INT_MAP_I2
+
+/*
+ * The general purpose timer ticks at 1MHz independent if
+ * the rest of the system
+ */
+
+static int sibyte_shutdown(struct clock_event_device *evt)
+{
+ void __iomem *cfg;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(smp_processor_id(), R_SCD_TIMER_CFG));
+
+ /* Stop the timer until we actually program a shot */
+ __raw_writeq(0, cfg);
+
+ return 0;
+}
+
+static int sibyte_set_periodic(struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg, *init;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
+
+ __raw_writeq(0, cfg);
+ __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg);
+
+ return 0;
+}
+
+static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg, *init;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
+
+ __raw_writeq(0, cfg);
+ __raw_writeq(delta - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE, cfg);
+
+ return 0;
+}
+
+static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd = dev_id;
+ void __iomem *cfg;
+ unsigned long tmode;
+
+ if (clockevent_state_periodic(cd))
+ tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
+ else
+ tmode = 0;
+
+ /* ACK interrupt */
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ ____raw_writeq(tmode, cfg);
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
+static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
+
+void sb1250_clockevent_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned int irq = K_INT_TIMER_0 + cpu;
+ struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
+ unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
+ unsigned long flags = IRQF_PERCPU | IRQF_TIMER;
+
+ /* Only have 4 general purpose timers, and we use last one as hpt */
+ BUG_ON(cpu > 2);
+
+ sprintf(name, "sb1250-counter-%d", cpu);
+ cd->name = name;
+ cd->features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT;
+ clockevent_set_clock(cd, V_SCD_TIMER_FREQ);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd);
+ cd->max_delta_ticks = 0x7fffff;
+ cd->min_delta_ns = clockevent_delta2ns(2, cd);
+ cd->min_delta_ticks = 2;
+ cd->rating = 200;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_next_event = sibyte_next_event;
+ cd->set_state_shutdown = sibyte_shutdown;
+ cd->set_state_periodic = sibyte_set_periodic;
+ cd->set_state_oneshot = sibyte_shutdown;
+ clockevents_register_device(cd);
+
+ sb1250_mask_irq(cpu, irq);
+
+ /*
+ * Map the timer interrupt to IP[4] of this cpu
+ */
+ __raw_writeq(IMR_IP4_VAL,
+ IOADDR(A_IMR_REGISTER(cpu, R_IMR_INTERRUPT_MAP_BASE) +
+ (irq << 3)));
+
+ sb1250_unmask_irq(cpu, irq);
+
+ irq_set_affinity(irq, cpumask_of(cpu));
+ if (request_irq(irq, sibyte_counter_handler, flags, name, cd))
+ pr_err("Failed to request irq %d (%s)\n", irq, name);
+}
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
new file mode 100644
index 000000000..5709469c2
--- /dev/null
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -0,0 +1,220 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Based on linux/arch/mips/kernel/cevt-r4k.c,
+ * linux/arch/mips/jmr3927/rbhma3100/setup.c
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/sched_clock.h>
+#include <asm/time.h>
+#include <asm/txx9tmr.h>
+
+#define TCR_BASE (TXx9_TMTCR_CCDE | TXx9_TMTCR_CRE | TXx9_TMTCR_TMODE_ITVL)
+#define TIMER_CCD 0 /* 1/2 */
+#define TIMER_CLK(imclk) ((imclk) / (2 << TIMER_CCD))
+
+struct txx9_clocksource {
+ struct clocksource cs;
+ struct txx9_tmr_reg __iomem *tmrptr;
+};
+
+static u64 txx9_cs_read(struct clocksource *cs)
+{
+ struct txx9_clocksource *txx9_cs =
+ container_of(cs, struct txx9_clocksource, cs);
+ return __raw_readl(&txx9_cs->tmrptr->trr);
+}
+
+/* Use 1 bit smaller width to use full bits in that width */
+#define TXX9_CLOCKSOURCE_BITS (TXX9_TIMER_BITS - 1)
+
+static struct txx9_clocksource txx9_clocksource = {
+ .cs = {
+ .name = "TXx9",
+ .rating = 200,
+ .read = txx9_cs_read,
+ .mask = CLOCKSOURCE_MASK(TXX9_CLOCKSOURCE_BITS),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ },
+};
+
+static u64 notrace txx9_read_sched_clock(void)
+{
+ return __raw_readl(&txx9_clocksource.tmrptr->trr);
+}
+
+void __init txx9_clocksource_init(unsigned long baseaddr,
+ unsigned int imbusclk)
+{
+ struct txx9_tmr_reg __iomem *tmrptr;
+
+ clocksource_register_hz(&txx9_clocksource.cs, TIMER_CLK(imbusclk));
+
+ tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
+ __raw_writel(TCR_BASE, &tmrptr->tcr);
+ __raw_writel(0, &tmrptr->tisr);
+ __raw_writel(TIMER_CCD, &tmrptr->ccdr);
+ __raw_writel(TXx9_TMITMR_TZCE, &tmrptr->itmr);
+ __raw_writel(1 << TXX9_CLOCKSOURCE_BITS, &tmrptr->cpra);
+ __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ txx9_clocksource.tmrptr = tmrptr;
+
+ sched_clock_register(txx9_read_sched_clock, TXX9_CLOCKSOURCE_BITS,
+ TIMER_CLK(imbusclk));
+}
+
+struct txx9_clock_event_device {
+ struct clock_event_device cd;
+ struct txx9_tmr_reg __iomem *tmrptr;
+};
+
+static void txx9tmr_stop_and_clear(struct txx9_tmr_reg __iomem *tmrptr)
+{
+ /* stop and reset counter */
+ __raw_writel(TCR_BASE, &tmrptr->tcr);
+ /* clear pending interrupt */
+ __raw_writel(0, &tmrptr->tisr);
+}
+
+static int txx9tmr_set_state_periodic(struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+
+ __raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE, &tmrptr->itmr);
+ /* start timer */
+ __raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >> evt->shift,
+ &tmrptr->cpra);
+ __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ return 0;
+}
+
+static int txx9tmr_set_state_oneshot(struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ __raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr);
+ return 0;
+}
+
+static int txx9tmr_set_state_shutdown(struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ __raw_writel(0, &tmrptr->itmr);
+ return 0;
+}
+
+static int txx9tmr_tick_resume(struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ __raw_writel(TIMER_CCD, &tmrptr->ccdr);
+ __raw_writel(0, &tmrptr->itmr);
+ return 0;
+}
+
+static int txx9tmr_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ /* start timer */
+ __raw_writel(delta, &tmrptr->cpra);
+ __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ return 0;
+}
+
+static struct txx9_clock_event_device txx9_clock_event_device = {
+ .cd = {
+ .name = "TXx9",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_state_shutdown = txx9tmr_set_state_shutdown,
+ .set_state_periodic = txx9tmr_set_state_periodic,
+ .set_state_oneshot = txx9tmr_set_state_oneshot,
+ .tick_resume = txx9tmr_tick_resume,
+ .set_next_event = txx9tmr_set_next_event,
+ },
+};
+
+static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id)
+{
+ struct txx9_clock_event_device *txx9_cd = dev_id;
+ struct clock_event_device *cd = &txx9_cd->cd;
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ __raw_writel(0, &tmrptr->tisr); /* ack interrupt */
+ cd->event_handler(cd);
+ return IRQ_HANDLED;
+}
+
+void __init txx9_clockevent_init(unsigned long baseaddr, int irq,
+ unsigned int imbusclk)
+{
+ struct clock_event_device *cd = &txx9_clock_event_device.cd;
+ struct txx9_tmr_reg __iomem *tmrptr;
+
+ tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
+ txx9tmr_stop_and_clear(tmrptr);
+ __raw_writel(TIMER_CCD, &tmrptr->ccdr);
+ __raw_writel(0, &tmrptr->itmr);
+ txx9_clock_event_device.tmrptr = tmrptr;
+
+ clockevent_set_clock(cd, TIMER_CLK(imbusclk));
+ cd->max_delta_ns =
+ clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd);
+ cd->max_delta_ticks = 0xffffffff >> (32 - TXX9_TIMER_BITS);
+ cd->min_delta_ns = clockevent_delta2ns(0xf, cd);
+ cd->min_delta_ticks = 0xf;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(0),
+ clockevents_register_device(cd);
+ if (request_irq(irq, txx9tmr_interrupt, IRQF_PERCPU | IRQF_TIMER,
+ "txx9tmr", &txx9_clock_event_device))
+ pr_err("Failed to request irq %d (txx9tmr)\n", irq);
+ printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n",
+ baseaddr, irq);
+}
+
+void __init txx9_tmr_init(unsigned long baseaddr)
+{
+ struct txx9_tmr_reg __iomem *tmrptr;
+
+ tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
+ /* Start once to make CounterResetEnable effective */
+ __raw_writel(TXx9_TMTCR_CRE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ /* Stop and reset the counter */
+ __raw_writel(TXx9_TMTCR_CRE, &tmrptr->tcr);
+ __raw_writel(0, &tmrptr->tisr);
+ __raw_writel(0xffffffff, &tmrptr->cpra);
+ __raw_writel(0, &tmrptr->itmr);
+ __raw_writel(0, &tmrptr->ccdr);
+ __raw_writel(0, &tmrptr->pgmr);
+ iounmap(tmrptr);
+}
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
new file mode 100644
index 000000000..89107deb0
--- /dev/null
+++ b/arch/mips/kernel/cmpxchg.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <linux/bitops.h>
+#include <asm/cmpxchg.h>
+
+unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size)
+{
+ u32 old32, new32, load32, mask;
+ volatile u32 *ptr32;
+ unsigned int shift;
+
+ /* Check that ptr is naturally aligned */
+ WARN_ON((unsigned long)ptr & (size - 1));
+
+ /* Mask value to the correct size. */
+ mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
+ val &= mask;
+
+ /*
+ * Calculate a shift & mask that correspond to the value we wish to
+ * exchange within the naturally aligned 4 byte integerthat includes
+ * it.
+ */
+ shift = (unsigned long)ptr & 0x3;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ shift ^= sizeof(u32) - size;
+ shift *= BITS_PER_BYTE;
+ mask <<= shift;
+
+ /*
+ * Calculate a pointer to the naturally aligned 4 byte integer that
+ * includes our byte of interest, and load its value.
+ */
+ ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
+ load32 = *ptr32;
+
+ do {
+ old32 = load32;
+ new32 = (load32 & ~mask) | (val << shift);
+ load32 = cmpxchg(ptr32, old32, new32);
+ } while (load32 != old32);
+
+ return (load32 & mask) >> shift;
+}
+
+unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size)
+{
+ u32 mask, old32, new32, load32, load;
+ volatile u32 *ptr32;
+ unsigned int shift;
+
+ /* Check that ptr is naturally aligned */
+ WARN_ON((unsigned long)ptr & (size - 1));
+
+ /* Mask inputs to the correct size. */
+ mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
+ old &= mask;
+ new &= mask;
+
+ /*
+ * Calculate a shift & mask that correspond to the value we wish to
+ * compare & exchange within the naturally aligned 4 byte integer
+ * that includes it.
+ */
+ shift = (unsigned long)ptr & 0x3;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ shift ^= sizeof(u32) - size;
+ shift *= BITS_PER_BYTE;
+ mask <<= shift;
+
+ /*
+ * Calculate a pointer to the naturally aligned 4 byte integer that
+ * includes our byte of interest, and load its value.
+ */
+ ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
+ load32 = *ptr32;
+
+ while (true) {
+ /*
+ * Ensure the byte we want to exchange matches the expected
+ * old value, and if not then bail.
+ */
+ load = (load32 & mask) >> shift;
+ if (load != old)
+ return load;
+
+ /*
+ * Calculate the old & new values of the naturally aligned
+ * 4 byte integer that include the byte we want to exchange.
+ * Attempt to exchange the old value for the new value, and
+ * return if we succeed.
+ */
+ old32 = (load32 & ~mask) | (old << shift);
+ new32 = (load32 & ~mask) | (new << shift);
+ load32 = cmpxchg(ptr32, old32, new32);
+ if (load32 == old32)
+ return old;
+ }
+}
diff --git a/arch/mips/kernel/cps-vec-ns16550.S b/arch/mips/kernel/cps-vec-ns16550.S
new file mode 100644
index 000000000..30725e1df
--- /dev/null
+++ b/arch/mips/kernel/cps-vec-ns16550.S
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <linux/serial_reg.h>
+
+#define UART_TX_OFS (UART_TX << CONFIG_MIPS_CPS_NS16550_SHIFT)
+#define UART_LSR_OFS (UART_LSR << CONFIG_MIPS_CPS_NS16550_SHIFT)
+
+#if CONFIG_MIPS_CPS_NS16550_WIDTH == 1
+# define UART_L lb
+# define UART_S sb
+#elif CONFIG_MIPS_CPS_NS16550_WIDTH == 2
+# define UART_L lh
+# define UART_S sh
+#elif CONFIG_MIPS_CPS_NS16550_WIDTH == 4
+# define UART_L lw
+# define UART_S sw
+#else
+# define UART_L lb
+# define UART_S sb
+#endif
+
+/**
+ * _mips_cps_putc() - write a character to the UART
+ * @a0: ASCII character to write
+ * @t9: UART base address
+ */
+LEAF(_mips_cps_putc)
+1: UART_L t0, UART_LSR_OFS(t9)
+ andi t0, t0, UART_LSR_TEMT
+ beqz t0, 1b
+ UART_S a0, UART_TX_OFS(t9)
+ jr ra
+ END(_mips_cps_putc)
+
+/**
+ * _mips_cps_puts() - write a string to the UART
+ * @a0: pointer to NULL-terminated ASCII string
+ * @t9: UART base address
+ *
+ * Write a null-terminated ASCII string to the UART.
+ */
+NESTED(_mips_cps_puts, 0, ra)
+ move s7, ra
+ move s6, a0
+
+1: lb a0, 0(s6)
+ beqz a0, 2f
+ jal _mips_cps_putc
+ PTR_ADDIU s6, s6, 1
+ b 1b
+
+2: jr s7
+ END(_mips_cps_puts)
+
+/**
+ * _mips_cps_putx4 - write a 4b hex value to the UART
+ * @a0: the 4b value to write to the UART
+ * @t9: UART base address
+ *
+ * Write a single hexadecimal character to the UART.
+ */
+NESTED(_mips_cps_putx4, 0, ra)
+ andi a0, a0, 0xf
+ li t0, '0'
+ blt a0, 10, 1f
+ li t0, 'a'
+ addiu a0, a0, -10
+1: addu a0, a0, t0
+ b _mips_cps_putc
+ END(_mips_cps_putx4)
+
+/**
+ * _mips_cps_putx8 - write an 8b hex value to the UART
+ * @a0: the 8b value to write to the UART
+ * @t9: UART base address
+ *
+ * Write an 8 bit value (ie. 2 hexadecimal characters) to the UART.
+ */
+NESTED(_mips_cps_putx8, 0, ra)
+ move s3, ra
+ move s2, a0
+ srl a0, a0, 4
+ jal _mips_cps_putx4
+ move a0, s2
+ move ra, s3
+ b _mips_cps_putx4
+ END(_mips_cps_putx8)
+
+/**
+ * _mips_cps_putx16 - write a 16b hex value to the UART
+ * @a0: the 16b value to write to the UART
+ * @t9: UART base address
+ *
+ * Write a 16 bit value (ie. 4 hexadecimal characters) to the UART.
+ */
+NESTED(_mips_cps_putx16, 0, ra)
+ move s5, ra
+ move s4, a0
+ srl a0, a0, 8
+ jal _mips_cps_putx8
+ move a0, s4
+ move ra, s5
+ b _mips_cps_putx8
+ END(_mips_cps_putx16)
+
+/**
+ * _mips_cps_putx32 - write a 32b hex value to the UART
+ * @a0: the 32b value to write to the UART
+ * @t9: UART base address
+ *
+ * Write a 32 bit value (ie. 8 hexadecimal characters) to the UART.
+ */
+NESTED(_mips_cps_putx32, 0, ra)
+ move s7, ra
+ move s6, a0
+ srl a0, a0, 16
+ jal _mips_cps_putx16
+ move a0, s6
+ move ra, s7
+ b _mips_cps_putx16
+ END(_mips_cps_putx32)
+
+#ifdef CONFIG_64BIT
+
+/**
+ * _mips_cps_putx64 - write a 64b hex value to the UART
+ * @a0: the 64b value to write to the UART
+ * @t9: UART base address
+ *
+ * Write a 64 bit value (ie. 16 hexadecimal characters) to the UART.
+ */
+NESTED(_mips_cps_putx64, 0, ra)
+ move sp, ra
+ move s8, a0
+ dsrl32 a0, a0, 0
+ jal _mips_cps_putx32
+ move a0, s8
+ move ra, sp
+ b _mips_cps_putx32
+ END(_mips_cps_putx64)
+
+#define _mips_cps_putxlong _mips_cps_putx64
+
+#else /* !CONFIG_64BIT */
+
+#define _mips_cps_putxlong _mips_cps_putx32
+
+#endif /* !CONFIG_64BIT */
+
+/**
+ * mips_cps_bev_dump() - dump relevant exception state to UART
+ * @a0: pointer to NULL-terminated ASCII string naming the exception
+ *
+ * Write information that may be useful in debugging an exception to the
+ * UART configured by CONFIG_MIPS_CPS_NS16550_*. As this BEV exception
+ * will only be run if something goes horribly wrong very early during
+ * the bringup of a core and it is very likely to be unsafe to perform
+ * memory accesses at that point (cache state indeterminate, EVA may not
+ * be configured, coherence may be disabled) let alone have a stack,
+ * this is all written in assembly using only registers & unmapped
+ * uncached access to the UART registers.
+ */
+LEAF(mips_cps_bev_dump)
+ move s0, ra
+ move s1, a0
+
+ li t9, CKSEG1ADDR(CONFIG_MIPS_CPS_NS16550_BASE)
+
+ PTR_LA a0, str_newline
+ jal _mips_cps_puts
+ PTR_LA a0, str_bev
+ jal _mips_cps_puts
+ move a0, s1
+ jal _mips_cps_puts
+ PTR_LA a0, str_newline
+ jal _mips_cps_puts
+ PTR_LA a0, str_newline
+ jal _mips_cps_puts
+
+#define DUMP_COP0_REG(reg, name, sz, _mfc0) \
+ PTR_LA a0, 8f; \
+ jal _mips_cps_puts; \
+ _mfc0 a0, reg; \
+ jal _mips_cps_putx##sz; \
+ PTR_LA a0, str_newline; \
+ jal _mips_cps_puts; \
+ TEXT(name)
+
+ DUMP_COP0_REG(CP0_CAUSE, "Cause: 0x", 32, mfc0)
+ DUMP_COP0_REG(CP0_STATUS, "Status: 0x", 32, mfc0)
+ DUMP_COP0_REG(CP0_EBASE, "EBase: 0x", long, MFC0)
+ DUMP_COP0_REG(CP0_BADVADDR, "BadVAddr: 0x", long, MFC0)
+ DUMP_COP0_REG(CP0_BADINSTR, "BadInstr: 0x", 32, mfc0)
+
+ PTR_LA a0, str_newline
+ jal _mips_cps_puts
+ jr s0
+ END(mips_cps_bev_dump)
+
+.pushsection .data
+str_bev: .asciiz "BEV Exception: "
+str_newline: .asciiz "\r\n"
+.popsection
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
new file mode 100644
index 000000000..4db7ff055
--- /dev/null
+++ b/arch/mips/kernel/cps-vec.S
@@ -0,0 +1,630 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheops.h>
+#include <asm/eva.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/pm.h>
+
+#define GCR_CPC_BASE_OFS 0x0088
+#define GCR_CL_COHERENCE_OFS 0x2008
+#define GCR_CL_ID_OFS 0x2028
+
+#define CPC_CL_VC_STOP_OFS 0x2020
+#define CPC_CL_VC_RUN_OFS 0x2028
+
+.extern mips_cm_base
+
+.set noreorder
+
+#ifdef CONFIG_64BIT
+# define STATUS_BITDEPS ST0_KX
+#else
+# define STATUS_BITDEPS 0
+#endif
+
+#ifdef CONFIG_MIPS_CPS_NS16550
+
+#define DUMP_EXCEP(name) \
+ PTR_LA a0, 8f; \
+ jal mips_cps_bev_dump; \
+ nop; \
+ TEXT(name)
+
+#else /* !CONFIG_MIPS_CPS_NS16550 */
+
+#define DUMP_EXCEP(name)
+
+#endif /* !CONFIG_MIPS_CPS_NS16550 */
+
+ /*
+ * Set dest to non-zero if the core supports the MT ASE, else zero. If
+ * MT is not supported then branch to nomt.
+ */
+ .macro has_mt dest, nomt
+ mfc0 \dest, CP0_CONFIG, 1
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 2
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 3
+ andi \dest, \dest, MIPS_CONF3_MT
+ beqz \dest, \nomt
+ nop
+ .endm
+
+ /*
+ * Set dest to non-zero if the core supports MIPSr6 multithreading
+ * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
+ * branch to nomt.
+ */
+ .macro has_vp dest, nomt
+ mfc0 \dest, CP0_CONFIG, 1
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 2
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 3
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 4
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 5
+ andi \dest, \dest, MIPS_CONF5_VP
+ beqz \dest, \nomt
+ nop
+ .endm
+
+ /* Calculate an uncached address for the CM GCRs */
+ .macro cmgcrb dest
+ .set push
+ .set noat
+ MFC0 $1, CP0_CMGCRBASE
+ PTR_SLL $1, $1, 4
+ PTR_LI \dest, UNCAC_BASE
+ PTR_ADDU \dest, \dest, $1
+ .set pop
+ .endm
+
+.section .text.cps-vec
+.balign 0x1000
+
+LEAF(mips_cps_core_entry)
+ /*
+ * These first 4 bytes will be patched by cps_smp_setup to load the
+ * CCA to use into register s0.
+ */
+ .word 0
+
+ /* Check whether we're here due to an NMI */
+ mfc0 k0, CP0_STATUS
+ and k0, k0, ST0_NMI
+ beqz k0, not_nmi
+ nop
+
+ /* This is an NMI */
+ PTR_LA k0, nmi_handler
+ jr k0
+ nop
+
+not_nmi:
+ /* Setup Cause */
+ li t0, CAUSEF_IV
+ mtc0 t0, CP0_CAUSE
+
+ /* Setup Status */
+ li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
+ mtc0 t0, CP0_STATUS
+
+ /* Skip cache & coherence setup if we're already coherent */
+ cmgcrb v1
+ lw s7, GCR_CL_COHERENCE_OFS(v1)
+ bnez s7, 1f
+ nop
+
+ /* Initialize the L1 caches */
+ jal mips_cps_cache_init
+ nop
+
+ /* Enter the coherent domain */
+ li t0, 0xff
+ sw t0, GCR_CL_COHERENCE_OFS(v1)
+ ehb
+
+ /* Set Kseg0 CCA to that in s0 */
+1: mfc0 t0, CP0_CONFIG
+ ori t0, 0x7
+ xori t0, 0x7
+ or t0, t0, s0
+ mtc0 t0, CP0_CONFIG
+ ehb
+
+ /* Jump to kseg0 */
+ PTR_LA t0, 1f
+ jr t0
+ nop
+
+ /*
+ * We're up, cached & coherent. Perform any EVA initialization necessary
+ * before we access memory.
+ */
+1: eva_init
+
+ /* Retrieve boot configuration pointers */
+ jal mips_cps_get_bootcfg
+ nop
+
+ /* Skip core-level init if we started up coherent */
+ bnez s7, 1f
+ nop
+
+ /* Perform any further required core-level initialisation */
+ jal mips_cps_core_init
+ nop
+
+ /*
+ * Boot any other VPEs within this core that should be online, and
+ * deactivate this VPE if it should be offline.
+ */
+ move a1, t9
+ jal mips_cps_boot_vpes
+ move a0, v0
+
+ /* Off we go! */
+1: PTR_L t1, VPEBOOTCFG_PC(v1)
+ PTR_L gp, VPEBOOTCFG_GP(v1)
+ PTR_L sp, VPEBOOTCFG_SP(v1)
+ jr t1
+ nop
+ END(mips_cps_core_entry)
+
+.org 0x200
+LEAF(excep_tlbfill)
+ DUMP_EXCEP("TLB Fill")
+ b .
+ nop
+ END(excep_tlbfill)
+
+.org 0x280
+LEAF(excep_xtlbfill)
+ DUMP_EXCEP("XTLB Fill")
+ b .
+ nop
+ END(excep_xtlbfill)
+
+.org 0x300
+LEAF(excep_cache)
+ DUMP_EXCEP("Cache")
+ b .
+ nop
+ END(excep_cache)
+
+.org 0x380
+LEAF(excep_genex)
+ DUMP_EXCEP("General")
+ b .
+ nop
+ END(excep_genex)
+
+.org 0x400
+LEAF(excep_intex)
+ DUMP_EXCEP("Interrupt")
+ b .
+ nop
+ END(excep_intex)
+
+.org 0x480
+LEAF(excep_ejtag)
+ PTR_LA k0, ejtag_debug_handler
+ jr k0
+ nop
+ END(excep_ejtag)
+
+LEAF(mips_cps_core_init)
+#ifdef CONFIG_MIPS_MT_SMP
+ /* Check that the core implements the MT ASE */
+ has_mt t0, 3f
+
+ .set push
+ .set MIPS_ISA_LEVEL_RAW
+ .set mt
+
+ /* Only allow 1 TC per VPE to execute... */
+ dmt
+
+ /* ...and for the moment only 1 VPE */
+ dvpe
+ PTR_LA t1, 1f
+ jr.hb t1
+ nop
+
+ /* Enter VPE configuration state */
+1: mfc0 t0, CP0_MVPCONTROL
+ ori t0, t0, MVPCONTROL_VPC
+ mtc0 t0, CP0_MVPCONTROL
+
+ /* Retrieve the number of VPEs within the core */
+ mfc0 t0, CP0_MVPCONF0
+ srl t0, t0, MVPCONF0_PVPE_SHIFT
+ andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
+ addiu ta3, t0, 1
+
+ /* If there's only 1, we're done */
+ beqz t0, 2f
+ nop
+
+ /* Loop through each VPE within this core */
+ li ta1, 1
+
+1: /* Operate on the appropriate TC */
+ mtc0 ta1, CP0_VPECONTROL
+ ehb
+
+ /* Bind TC to VPE (1:1 TC:VPE mapping) */
+ mttc0 ta1, CP0_TCBIND
+
+ /* Set exclusive TC, non-active, master */
+ li t0, VPECONF0_MVP
+ sll t1, ta1, VPECONF0_XTC_SHIFT
+ or t0, t0, t1
+ mttc0 t0, CP0_VPECONF0
+
+ /* Set TC non-active, non-allocatable */
+ mttc0 zero, CP0_TCSTATUS
+
+ /* Set TC halted */
+ li t0, TCHALT_H
+ mttc0 t0, CP0_TCHALT
+
+ /* Next VPE */
+ addiu ta1, ta1, 1
+ slt t0, ta1, ta3
+ bnez t0, 1b
+ nop
+
+ /* Leave VPE configuration state */
+2: mfc0 t0, CP0_MVPCONTROL
+ xori t0, t0, MVPCONTROL_VPC
+ mtc0 t0, CP0_MVPCONTROL
+
+3: .set pop
+#endif
+ jr ra
+ nop
+ END(mips_cps_core_init)
+
+/**
+ * mips_cps_get_bootcfg() - retrieve boot configuration pointers
+ *
+ * Returns: pointer to struct core_boot_config in v0, pointer to
+ * struct vpe_boot_config in v1, VPE ID in t9
+ */
+LEAF(mips_cps_get_bootcfg)
+ /* Calculate a pointer to this cores struct core_boot_config */
+ cmgcrb t0
+ lw t0, GCR_CL_ID_OFS(t0)
+ li t1, COREBOOTCFG_SIZE
+ mul t0, t0, t1
+ PTR_LA t1, mips_cps_core_bootcfg
+ PTR_L t1, 0(t1)
+ PTR_ADDU v0, t0, t1
+
+ /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
+ li t9, 0
+#if defined(CONFIG_CPU_MIPSR6)
+ has_vp ta2, 1f
+
+ /*
+ * Assume non-contiguous numbering. Perhaps some day we'll need
+ * to handle contiguous VP numbering, but no such systems yet
+ * exist.
+ */
+ mfc0 t9, CP0_GLOBALNUMBER
+ andi t9, t9, MIPS_GLOBALNUMBER_VP
+#elif defined(CONFIG_MIPS_MT_SMP)
+ has_mt ta2, 1f
+
+ /* Find the number of VPEs present in the core */
+ mfc0 t1, CP0_MVPCONF0
+ srl t1, t1, MVPCONF0_PVPE_SHIFT
+ andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
+ addiu t1, t1, 1
+
+ /* Calculate a mask for the VPE ID from EBase.CPUNum */
+ clz t1, t1
+ li t2, 31
+ subu t1, t2, t1
+ li t2, 1
+ sll t1, t2, t1
+ addiu t1, t1, -1
+
+ /* Retrieve the VPE ID from EBase.CPUNum */
+ mfc0 t9, $15, 1
+ and t9, t9, t1
+#endif
+
+1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
+ li t1, VPEBOOTCFG_SIZE
+ mul v1, t9, t1
+ PTR_L ta3, COREBOOTCFG_VPECONFIG(v0)
+ PTR_ADDU v1, v1, ta3
+
+ jr ra
+ nop
+ END(mips_cps_get_bootcfg)
+
+LEAF(mips_cps_boot_vpes)
+ lw ta2, COREBOOTCFG_VPEMASK(a0)
+ PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
+
+#if defined(CONFIG_CPU_MIPSR6)
+
+ has_vp t0, 5f
+
+ /* Find base address of CPC */
+ cmgcrb t3
+ PTR_L t1, GCR_CPC_BASE_OFS(t3)
+ PTR_LI t2, ~0x7fff
+ and t1, t1, t2
+ PTR_LI t2, UNCAC_BASE
+ PTR_ADD t1, t1, t2
+
+ /* Start any other VPs that ought to be running */
+ PTR_S ta2, CPC_CL_VC_RUN_OFS(t1)
+
+ /* Ensure this VP stops running if it shouldn't be */
+ not ta2
+ PTR_S ta2, CPC_CL_VC_STOP_OFS(t1)
+ ehb
+
+#elif defined(CONFIG_MIPS_MT)
+
+ /* If the core doesn't support MT then return */
+ has_mt t0, 5f
+
+ /* Enter VPE configuration state */
+ .set push
+ .set MIPS_ISA_LEVEL_RAW
+ .set mt
+ dvpe
+ .set pop
+
+ PTR_LA t1, 1f
+ jr.hb t1
+ nop
+1: mfc0 t1, CP0_MVPCONTROL
+ ori t1, t1, MVPCONTROL_VPC
+ mtc0 t1, CP0_MVPCONTROL
+ ehb
+
+ /* Loop through each VPE */
+ move t8, ta2
+ li ta1, 0
+
+ /* Check whether the VPE should be running. If not, skip it */
+1: andi t0, ta2, 1
+ beqz t0, 2f
+ nop
+
+ /* Operate on the appropriate TC */
+ mfc0 t0, CP0_VPECONTROL
+ ori t0, t0, VPECONTROL_TARGTC
+ xori t0, t0, VPECONTROL_TARGTC
+ or t0, t0, ta1
+ mtc0 t0, CP0_VPECONTROL
+ ehb
+
+ .set push
+ .set MIPS_ISA_LEVEL_RAW
+ .set mt
+
+ /* Skip the VPE if its TC is not halted */
+ mftc0 t0, CP0_TCHALT
+ beqz t0, 2f
+ nop
+
+ /* Calculate a pointer to the VPEs struct vpe_boot_config */
+ li t0, VPEBOOTCFG_SIZE
+ mul t0, t0, ta1
+ addu t0, t0, ta3
+
+ /* Set the TC restart PC */
+ lw t1, VPEBOOTCFG_PC(t0)
+ mttc0 t1, CP0_TCRESTART
+
+ /* Set the TC stack pointer */
+ lw t1, VPEBOOTCFG_SP(t0)
+ mttgpr t1, sp
+
+ /* Set the TC global pointer */
+ lw t1, VPEBOOTCFG_GP(t0)
+ mttgpr t1, gp
+
+ /* Copy config from this VPE */
+ mfc0 t0, CP0_CONFIG
+ mttc0 t0, CP0_CONFIG
+
+ /*
+ * Copy the EVA config from this VPE if the CPU supports it.
+ * CONFIG3 must exist to be running MT startup - just read it.
+ */
+ mfc0 t0, CP0_CONFIG, 3
+ and t0, t0, MIPS_CONF3_SC
+ beqz t0, 3f
+ nop
+ mfc0 t0, CP0_SEGCTL0
+ mttc0 t0, CP0_SEGCTL0
+ mfc0 t0, CP0_SEGCTL1
+ mttc0 t0, CP0_SEGCTL1
+ mfc0 t0, CP0_SEGCTL2
+ mttc0 t0, CP0_SEGCTL2
+3:
+ /* Ensure no software interrupts are pending */
+ mttc0 zero, CP0_CAUSE
+ mttc0 zero, CP0_STATUS
+
+ /* Set TC active, not interrupt exempt */
+ mftc0 t0, CP0_TCSTATUS
+ li t1, ~TCSTATUS_IXMT
+ and t0, t0, t1
+ ori t0, t0, TCSTATUS_A
+ mttc0 t0, CP0_TCSTATUS
+
+ /* Clear the TC halt bit */
+ mttc0 zero, CP0_TCHALT
+
+ /* Set VPE active */
+ mftc0 t0, CP0_VPECONF0
+ ori t0, t0, VPECONF0_VPA
+ mttc0 t0, CP0_VPECONF0
+
+ /* Next VPE */
+2: srl ta2, ta2, 1
+ addiu ta1, ta1, 1
+ bnez ta2, 1b
+ nop
+
+ /* Leave VPE configuration state */
+ mfc0 t1, CP0_MVPCONTROL
+ xori t1, t1, MVPCONTROL_VPC
+ mtc0 t1, CP0_MVPCONTROL
+ ehb
+ evpe
+
+ .set pop
+
+ /* Check whether this VPE is meant to be running */
+ li t0, 1
+ sll t0, t0, a1
+ and t0, t0, t8
+ bnez t0, 2f
+ nop
+
+ /* This VPE should be offline, halt the TC */
+ li t0, TCHALT_H
+ mtc0 t0, CP0_TCHALT
+ PTR_LA t0, 1f
+1: jr.hb t0
+ nop
+
+2:
+
+#endif /* CONFIG_MIPS_MT_SMP */
+
+ /* Return */
+5: jr ra
+ nop
+ END(mips_cps_boot_vpes)
+
+LEAF(mips_cps_cache_init)
+ /*
+ * Clear the bits used to index the caches. Note that the architecture
+ * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
+ * be valid for all MIPS32 CPUs, even those for which said writes are
+ * unnecessary.
+ */
+ mtc0 zero, CP0_TAGLO, 0
+ mtc0 zero, CP0_TAGHI, 0
+ mtc0 zero, CP0_TAGLO, 2
+ mtc0 zero, CP0_TAGHI, 2
+ ehb
+
+ /* Primary cache configuration is indicated by Config1 */
+ mfc0 v0, CP0_CONFIG, 1
+
+ /* Detect I-cache line size */
+ _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
+ beqz t0, icache_done
+ li t1, 2
+ sllv t0, t1, t0
+
+ /* Detect I-cache size */
+ _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
+ xori t2, t1, 0x7
+ beqz t2, 1f
+ li t3, 32
+ addiu t1, t1, 1
+ sllv t1, t3, t1
+1: /* At this point t1 == I-cache sets per way */
+ _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
+ addiu t2, t2, 1
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+ li a0, CKSEG0
+ PTR_ADD a1, a0, t1
+1: cache Index_Store_Tag_I, 0(a0)
+ PTR_ADD a0, a0, t0
+ bne a0, a1, 1b
+ nop
+icache_done:
+
+ /* Detect D-cache line size */
+ _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
+ beqz t0, dcache_done
+ li t1, 2
+ sllv t0, t1, t0
+
+ /* Detect D-cache size */
+ _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
+ xori t2, t1, 0x7
+ beqz t2, 1f
+ li t3, 32
+ addiu t1, t1, 1
+ sllv t1, t3, t1
+1: /* At this point t1 == D-cache sets per way */
+ _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
+ addiu t2, t2, 1
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+ li a0, CKSEG0
+ PTR_ADDU a1, a0, t1
+ PTR_SUBU a1, a1, t0
+1: cache Index_Store_Tag_D, 0(a0)
+ bne a0, a1, 1b
+ PTR_ADD a0, a0, t0
+dcache_done:
+
+ jr ra
+ nop
+ END(mips_cps_cache_init)
+
+#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
+
+ /* Calculate a pointer to this CPUs struct mips_static_suspend_state */
+ .macro psstate dest
+ .set push
+ .set noat
+ lw $1, TI_CPU(gp)
+ sll $1, $1, LONGLOG
+ PTR_LA \dest, __per_cpu_offset
+ addu $1, $1, \dest
+ lw $1, 0($1)
+ PTR_LA \dest, cps_cpu_state
+ addu \dest, \dest, $1
+ .set pop
+ .endm
+
+LEAF(mips_cps_pm_save)
+ /* Save CPU state */
+ SUSPEND_SAVE_REGS
+ psstate t1
+ SUSPEND_SAVE_STATIC
+ jr v0
+ nop
+ END(mips_cps_pm_save)
+
+LEAF(mips_cps_pm_restore)
+ /* Restore CPU state */
+ psstate t1
+ RESUME_RESTORE_STATIC
+ RESUME_RESTORE_REGS_RETURN
+ END(mips_cps_pm_restore)
+
+#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
new file mode 100644
index 000000000..f8d1933bf
--- /dev/null
+++ b/arch/mips/kernel/cpu-probe.c
@@ -0,0 +1,2170 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Processor capabilities determination functions.
+ *
+ * Copyright (C) xxxx the Anonymous
+ * Copyright (C) 1994 - 2006 Ralf Baechle
+ * Copyright (C) 2003, 2004 Maciej W. Rozycki
+ * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/export.h>
+
+#include <asm/bugs.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
+#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/msa.h>
+#include <asm/watch.h>
+#include <asm/elf.h>
+#include <asm/pgtable-bits.h>
+#include <asm/spram.h>
+#include <linux/uaccess.h>
+
+#include "fpu-probe.h"
+
+#include <asm/mach-loongson64/cpucfg-emul.h>
+
+/* Hardware capabilities */
+unsigned int elf_hwcap __read_mostly;
+EXPORT_SYMBOL_GPL(elf_hwcap);
+
+static inline unsigned long cpu_get_msa_id(void)
+{
+ unsigned long status, msa_id;
+
+ status = read_c0_status();
+ __enable_fpu(FPU_64BIT);
+ enable_msa();
+ msa_id = read_msa_ir();
+ disable_msa();
+ write_c0_status(status);
+ return msa_id;
+}
+
+static int mips_dsp_disabled;
+
+static int __init dsp_disable(char *s)
+{
+ cpu_data[0].ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P);
+ mips_dsp_disabled = 1;
+
+ return 1;
+}
+
+__setup("nodsp", dsp_disable);
+
+static int mips_htw_disabled;
+
+static int __init htw_disable(char *s)
+{
+ mips_htw_disabled = 1;
+ cpu_data[0].options &= ~MIPS_CPU_HTW;
+ write_c0_pwctl(read_c0_pwctl() &
+ ~(1 << MIPS_PWCTL_PWEN_SHIFT));
+
+ return 1;
+}
+
+__setup("nohtw", htw_disable);
+
+static int mips_ftlb_disabled;
+static int mips_has_ftlb_configured;
+
+enum ftlb_flags {
+ FTLB_EN = 1 << 0,
+ FTLB_SET_PROB = 1 << 1,
+};
+
+static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags);
+
+static int __init ftlb_disable(char *s)
+{
+ unsigned int config4, mmuextdef;
+
+ /*
+ * If the core hasn't done any FTLB configuration, there is nothing
+ * for us to do here.
+ */
+ if (!mips_has_ftlb_configured)
+ return 1;
+
+ /* Disable it in the boot cpu */
+ if (set_ftlb_enable(&cpu_data[0], 0)) {
+ pr_warn("Can't turn FTLB off\n");
+ return 1;
+ }
+
+ config4 = read_c0_config4();
+
+ /* Check that FTLB has been disabled */
+ mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+ /* MMUSIZEEXT == VTLB ON, FTLB OFF */
+ if (mmuextdef == MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT) {
+ /* This should never happen */
+ pr_warn("FTLB could not be disabled!\n");
+ return 1;
+ }
+
+ mips_ftlb_disabled = 1;
+ mips_has_ftlb_configured = 0;
+
+ /*
+ * noftlb is mainly used for debug purposes so print
+ * an informative message instead of using pr_debug()
+ */
+ pr_info("FTLB has been disabled\n");
+
+ /*
+ * Some of these bits are duplicated in the decode_config4.
+ * MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT is the only possible case
+ * once FTLB has been disabled so undo what decode_config4 did.
+ */
+ cpu_data[0].tlbsize -= cpu_data[0].tlbsizeftlbways *
+ cpu_data[0].tlbsizeftlbsets;
+ cpu_data[0].tlbsizeftlbsets = 0;
+ cpu_data[0].tlbsizeftlbways = 0;
+
+ return 1;
+}
+
+__setup("noftlb", ftlb_disable);
+
+/*
+ * Check if the CPU has per tc perf counters
+ */
+static inline void cpu_set_mt_per_tc_perf(struct cpuinfo_mips *c)
+{
+ if (read_c0_config7() & MTI_CONF7_PTC)
+ c->options |= MIPS_CPU_MT_PER_TC_PERF_COUNTERS;
+}
+
+static inline void check_errata(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ switch (current_cpu_type()) {
+ case CPU_34K:
+ /*
+ * Erratum "RPS May Cause Incorrect Instruction Execution"
+ * This code only handles VPE0, any SMP/RTOS code
+ * making use of VPE1 will be responsable for that VPE.
+ */
+ if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2)
+ write_c0_config7(read_c0_config7() | MIPS_CONF7_RPS);
+ break;
+ default:
+ break;
+ }
+}
+
+void __init check_bugs32(void)
+{
+ check_errata();
+}
+
+/*
+ * Probe whether cpu has config register by trying to play with
+ * alternate cache bit and see whether it matters.
+ * It's used by cpu_probe to distinguish between R3000A and R3081.
+ */
+static inline int cpu_has_confreg(void)
+{
+#ifdef CONFIG_CPU_R3000
+ extern unsigned long r3k_cache_size(unsigned long);
+ unsigned long size1, size2;
+ unsigned long cfg = read_c0_conf();
+
+ size1 = r3k_cache_size(ST0_ISC);
+ write_c0_conf(cfg ^ R30XX_CONF_AC);
+ size2 = r3k_cache_size(ST0_ISC);
+ write_c0_conf(cfg);
+ return size1 != size2;
+#else
+ return 0;
+#endif
+}
+
+static inline void set_elf_platform(int cpu, const char *plat)
+{
+ if (cpu == 0)
+ __elf_platform = plat;
+}
+
+static inline void set_elf_base_platform(const char *plat)
+{
+ if (__elf_base_platform == NULL) {
+ __elf_base_platform = plat;
+ }
+}
+
+static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
+{
+#ifdef __NEED_VMBITS_PROBE
+ write_c0_entryhi(0x3fffffffffffe000ULL);
+ back_to_back_c0_hazard();
+ c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
+#endif
+}
+
+static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
+{
+ switch (isa) {
+ case MIPS_CPU_ISA_M64R5:
+ c->isa_level |= MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5;
+ set_elf_base_platform("mips64r5");
+ fallthrough;
+ case MIPS_CPU_ISA_M64R2:
+ c->isa_level |= MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2;
+ set_elf_base_platform("mips64r2");
+ fallthrough;
+ case MIPS_CPU_ISA_M64R1:
+ c->isa_level |= MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1;
+ set_elf_base_platform("mips64");
+ fallthrough;
+ case MIPS_CPU_ISA_V:
+ c->isa_level |= MIPS_CPU_ISA_V;
+ set_elf_base_platform("mips5");
+ fallthrough;
+ case MIPS_CPU_ISA_IV:
+ c->isa_level |= MIPS_CPU_ISA_IV;
+ set_elf_base_platform("mips4");
+ fallthrough;
+ case MIPS_CPU_ISA_III:
+ c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III;
+ set_elf_base_platform("mips3");
+ break;
+
+ /* R6 incompatible with everything else */
+ case MIPS_CPU_ISA_M64R6:
+ c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6;
+ set_elf_base_platform("mips64r6");
+ fallthrough;
+ case MIPS_CPU_ISA_M32R6:
+ c->isa_level |= MIPS_CPU_ISA_M32R6;
+ set_elf_base_platform("mips32r6");
+ /* Break here so we don't add incompatible ISAs */
+ break;
+ case MIPS_CPU_ISA_M32R5:
+ c->isa_level |= MIPS_CPU_ISA_M32R5;
+ set_elf_base_platform("mips32r5");
+ fallthrough;
+ case MIPS_CPU_ISA_M32R2:
+ c->isa_level |= MIPS_CPU_ISA_M32R2;
+ set_elf_base_platform("mips32r2");
+ fallthrough;
+ case MIPS_CPU_ISA_M32R1:
+ c->isa_level |= MIPS_CPU_ISA_M32R1;
+ set_elf_base_platform("mips32");
+ fallthrough;
+ case MIPS_CPU_ISA_II:
+ c->isa_level |= MIPS_CPU_ISA_II;
+ set_elf_base_platform("mips2");
+ break;
+ }
+}
+
+static char unknown_isa[] = KERN_ERR \
+ "Unsupported ISA type, c0.config0: %d.";
+
+static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c)
+{
+
+ unsigned int probability = c->tlbsize / c->tlbsizevtlb;
+
+ /*
+ * 0 = All TLBWR instructions go to FTLB
+ * 1 = 15:1: For every 16 TBLWR instructions, 15 go to the
+ * FTLB and 1 goes to the VTLB.
+ * 2 = 7:1: As above with 7:1 ratio.
+ * 3 = 3:1: As above with 3:1 ratio.
+ *
+ * Use the linear midpoint as the probability threshold.
+ */
+ if (probability >= 12)
+ return 1;
+ else if (probability >= 6)
+ return 2;
+ else
+ /*
+ * So FTLB is less than 4 times bigger than VTLB.
+ * A 3:1 ratio can still be useful though.
+ */
+ return 3;
+}
+
+static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags)
+{
+ unsigned int config;
+
+ /* It's implementation dependent how the FTLB can be enabled */
+ switch (c->cputype) {
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ case CPU_P6600:
+ /* proAptiv & related cores use Config6 to enable the FTLB */
+ config = read_c0_config6();
+
+ if (flags & FTLB_EN)
+ config |= MTI_CONF6_FTLBEN;
+ else
+ config &= ~MTI_CONF6_FTLBEN;
+
+ if (flags & FTLB_SET_PROB) {
+ config &= ~(3 << MTI_CONF6_FTLBP_SHIFT);
+ config |= calculate_ftlb_probability(c)
+ << MTI_CONF6_FTLBP_SHIFT;
+ }
+
+ write_c0_config6(config);
+ back_to_back_c0_hazard();
+ break;
+ case CPU_I6400:
+ case CPU_I6500:
+ /* There's no way to disable the FTLB */
+ if (!(flags & FTLB_EN))
+ return 1;
+ return 0;
+ case CPU_LOONGSON64:
+ /* Flush ITLB, DTLB, VTLB and FTLB */
+ write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB |
+ LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB);
+ /* Loongson-3 cores use Config6 to enable the FTLB */
+ config = read_c0_config6();
+ if (flags & FTLB_EN)
+ /* Enable FTLB */
+ write_c0_config6(config & ~LOONGSON_CONF6_FTLBDIS);
+ else
+ /* Disable FTLB */
+ write_c0_config6(config | LOONGSON_CONF6_FTLBDIS);
+ break;
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+static int mm_config(struct cpuinfo_mips *c)
+{
+ unsigned int config0, update, mm;
+
+ config0 = read_c0_config();
+ mm = config0 & MIPS_CONF_MM;
+
+ /*
+ * It's implementation dependent what type of write-merge is supported
+ * and whether it can be enabled/disabled. If it is settable lets make
+ * the merging allowed by default. Some platforms might have
+ * write-through caching unsupported. In this case just ignore the
+ * CP0.Config.MM bit field value.
+ */
+ switch (c->cputype) {
+ case CPU_24K:
+ case CPU_34K:
+ case CPU_74K:
+ case CPU_P5600:
+ case CPU_P6600:
+ c->options |= MIPS_CPU_MM_FULL;
+ update = MIPS_CONF_MM_FULL;
+ break;
+ case CPU_1004K:
+ case CPU_1074K:
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ mm = 0;
+ fallthrough;
+ default:
+ update = 0;
+ break;
+ }
+
+ if (update) {
+ config0 = (config0 & ~MIPS_CONF_MM) | update;
+ write_c0_config(config0);
+ } else if (mm == MIPS_CONF_MM_SYSAD) {
+ c->options |= MIPS_CPU_MM_SYSAD;
+ } else if (mm == MIPS_CONF_MM_FULL) {
+ c->options |= MIPS_CPU_MM_FULL;
+ }
+
+ return 0;
+}
+
+static inline unsigned int decode_config0(struct cpuinfo_mips *c)
+{
+ unsigned int config0;
+ int isa, mt;
+
+ config0 = read_c0_config();
+
+ /*
+ * Look for Standard TLB or Dual VTLB and FTLB
+ */
+ mt = config0 & MIPS_CONF_MT;
+ if (mt == MIPS_CONF_MT_TLB)
+ c->options |= MIPS_CPU_TLB;
+ else if (mt == MIPS_CONF_MT_FTLB)
+ c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB;
+
+ isa = (config0 & MIPS_CONF_AT) >> 13;
+ switch (isa) {
+ case 0:
+ switch ((config0 & MIPS_CONF_AR) >> 10) {
+ case 0:
+ set_isa(c, MIPS_CPU_ISA_M32R1);
+ break;
+ case 1:
+ set_isa(c, MIPS_CPU_ISA_M32R2);
+ break;
+ case 2:
+ set_isa(c, MIPS_CPU_ISA_M32R6);
+ break;
+ default:
+ goto unknown;
+ }
+ break;
+ case 2:
+ switch ((config0 & MIPS_CONF_AR) >> 10) {
+ case 0:
+ set_isa(c, MIPS_CPU_ISA_M64R1);
+ break;
+ case 1:
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ break;
+ case 2:
+ set_isa(c, MIPS_CPU_ISA_M64R6);
+ break;
+ default:
+ goto unknown;
+ }
+ break;
+ default:
+ goto unknown;
+ }
+
+ return config0 & MIPS_CONF_M;
+
+unknown:
+ panic(unknown_isa, config0);
+}
+
+static inline unsigned int decode_config1(struct cpuinfo_mips *c)
+{
+ unsigned int config1;
+
+ config1 = read_c0_config1();
+
+ if (config1 & MIPS_CONF1_MD)
+ c->ases |= MIPS_ASE_MDMX;
+ if (config1 & MIPS_CONF1_PC)
+ c->options |= MIPS_CPU_PERF;
+ if (config1 & MIPS_CONF1_WR)
+ c->options |= MIPS_CPU_WATCH;
+ if (config1 & MIPS_CONF1_CA)
+ c->ases |= MIPS_ASE_MIPS16;
+ if (config1 & MIPS_CONF1_EP)
+ c->options |= MIPS_CPU_EJTAG;
+ if (config1 & MIPS_CONF1_FP) {
+ c->options |= MIPS_CPU_FPU;
+ c->options |= MIPS_CPU_32FPR;
+ }
+ if (cpu_has_tlb) {
+ c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1;
+ c->tlbsizevtlb = c->tlbsize;
+ c->tlbsizeftlbsets = 0;
+ }
+
+ return config1 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config2(struct cpuinfo_mips *c)
+{
+ unsigned int config2;
+
+ config2 = read_c0_config2();
+
+ if (config2 & MIPS_CONF2_SL)
+ c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+
+ return config2 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config3(struct cpuinfo_mips *c)
+{
+ unsigned int config3;
+
+ config3 = read_c0_config3();
+
+ if (config3 & MIPS_CONF3_SM) {
+ c->ases |= MIPS_ASE_SMARTMIPS;
+ c->options |= MIPS_CPU_RIXI | MIPS_CPU_CTXTC;
+ }
+ if (config3 & MIPS_CONF3_RXI)
+ c->options |= MIPS_CPU_RIXI;
+ if (config3 & MIPS_CONF3_CTXTC)
+ c->options |= MIPS_CPU_CTXTC;
+ if (config3 & MIPS_CONF3_DSP)
+ c->ases |= MIPS_ASE_DSP;
+ if (config3 & MIPS_CONF3_DSP2P) {
+ c->ases |= MIPS_ASE_DSP2P;
+ if (cpu_has_mips_r6)
+ c->ases |= MIPS_ASE_DSP3;
+ }
+ if (config3 & MIPS_CONF3_VINT)
+ c->options |= MIPS_CPU_VINT;
+ if (config3 & MIPS_CONF3_VEIC)
+ c->options |= MIPS_CPU_VEIC;
+ if (config3 & MIPS_CONF3_LPA)
+ c->options |= MIPS_CPU_LPA;
+ if (config3 & MIPS_CONF3_MT)
+ c->ases |= MIPS_ASE_MIPSMT;
+ if (config3 & MIPS_CONF3_ULRI)
+ c->options |= MIPS_CPU_ULRI;
+ if (config3 & MIPS_CONF3_ISA)
+ c->options |= MIPS_CPU_MICROMIPS;
+ if (config3 & MIPS_CONF3_VZ)
+ c->ases |= MIPS_ASE_VZ;
+ if (config3 & MIPS_CONF3_SC)
+ c->options |= MIPS_CPU_SEGMENTS;
+ if (config3 & MIPS_CONF3_BI)
+ c->options |= MIPS_CPU_BADINSTR;
+ if (config3 & MIPS_CONF3_BP)
+ c->options |= MIPS_CPU_BADINSTRP;
+ if (config3 & MIPS_CONF3_MSA)
+ c->ases |= MIPS_ASE_MSA;
+ if (config3 & MIPS_CONF3_PW) {
+ c->htw_seq = 0;
+ c->options |= MIPS_CPU_HTW;
+ }
+ if (config3 & MIPS_CONF3_CDMM)
+ c->options |= MIPS_CPU_CDMM;
+ if (config3 & MIPS_CONF3_SP)
+ c->options |= MIPS_CPU_SP;
+
+ return config3 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config4(struct cpuinfo_mips *c)
+{
+ unsigned int config4;
+ unsigned int newcf4;
+ unsigned int mmuextdef;
+ unsigned int ftlb_page = MIPS_CONF4_FTLBPAGESIZE;
+ unsigned long asid_mask;
+
+ config4 = read_c0_config4();
+
+ if (cpu_has_tlb) {
+ if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
+ c->options |= MIPS_CPU_TLBINV;
+
+ /*
+ * R6 has dropped the MMUExtDef field from config4.
+ * On R6 the fields always describe the FTLB, and only if it is
+ * present according to Config.MT.
+ */
+ if (!cpu_has_mips_r6)
+ mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+ else if (cpu_has_ftlb)
+ mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT;
+ else
+ mmuextdef = 0;
+
+ switch (mmuextdef) {
+ case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
+ c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
+ c->tlbsizevtlb = c->tlbsize;
+ break;
+ case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
+ c->tlbsizevtlb +=
+ ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
+ MIPS_CONF4_VTLBSIZEEXT_SHIFT) * 0x40;
+ c->tlbsize = c->tlbsizevtlb;
+ ftlb_page = MIPS_CONF4_VFTLBPAGESIZE;
+ fallthrough;
+ case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
+ if (mips_ftlb_disabled)
+ break;
+ newcf4 = (config4 & ~ftlb_page) |
+ (page_size_ftlb(mmuextdef) <<
+ MIPS_CONF4_FTLBPAGESIZE_SHIFT);
+ write_c0_config4(newcf4);
+ back_to_back_c0_hazard();
+ config4 = read_c0_config4();
+ if (config4 != newcf4) {
+ pr_err("PAGE_SIZE 0x%lx is not supported by FTLB (config4=0x%x)\n",
+ PAGE_SIZE, config4);
+ /* Switch FTLB off */
+ set_ftlb_enable(c, 0);
+ mips_ftlb_disabled = 1;
+ break;
+ }
+ c->tlbsizeftlbsets = 1 <<
+ ((config4 & MIPS_CONF4_FTLBSETS) >>
+ MIPS_CONF4_FTLBSETS_SHIFT);
+ c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >>
+ MIPS_CONF4_FTLBWAYS_SHIFT) + 2;
+ c->tlbsize += c->tlbsizeftlbways * c->tlbsizeftlbsets;
+ mips_has_ftlb_configured = 1;
+ break;
+ }
+ }
+
+ c->kscratch_mask = (config4 & MIPS_CONF4_KSCREXIST)
+ >> MIPS_CONF4_KSCREXIST_SHIFT;
+
+ asid_mask = MIPS_ENTRYHI_ASID;
+ if (config4 & MIPS_CONF4_AE)
+ asid_mask |= MIPS_ENTRYHI_ASIDX;
+ set_cpu_asid_mask(c, asid_mask);
+
+ /*
+ * Warn if the computed ASID mask doesn't match the mask the kernel
+ * is built for. This may indicate either a serious problem or an
+ * easy optimisation opportunity, but either way should be addressed.
+ */
+ WARN_ON(asid_mask != cpu_asid_mask(c));
+
+ return config4 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config5(struct cpuinfo_mips *c)
+{
+ unsigned int config5, max_mmid_width;
+ unsigned long asid_mask;
+
+ config5 = read_c0_config5();
+ config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE);
+
+ if (cpu_has_mips_r6) {
+ if (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid)
+ config5 |= MIPS_CONF5_MI;
+ else
+ config5 &= ~MIPS_CONF5_MI;
+ }
+
+ write_c0_config5(config5);
+
+ if (config5 & MIPS_CONF5_EVA)
+ c->options |= MIPS_CPU_EVA;
+ if (config5 & MIPS_CONF5_MRP)
+ c->options |= MIPS_CPU_MAAR;
+ if (config5 & MIPS_CONF5_LLB)
+ c->options |= MIPS_CPU_RW_LLB;
+ if (config5 & MIPS_CONF5_MVH)
+ c->options |= MIPS_CPU_MVH;
+ if (cpu_has_mips_r6 && (config5 & MIPS_CONF5_VP))
+ c->options |= MIPS_CPU_VP;
+ if (config5 & MIPS_CONF5_CA2)
+ c->ases |= MIPS_ASE_MIPS16E2;
+
+ if (config5 & MIPS_CONF5_CRCP)
+ elf_hwcap |= HWCAP_MIPS_CRC32;
+
+ if (cpu_has_mips_r6) {
+ /* Ensure the write to config5 above takes effect */
+ back_to_back_c0_hazard();
+
+ /* Check whether we successfully enabled MMID support */
+ config5 = read_c0_config5();
+ if (config5 & MIPS_CONF5_MI)
+ c->options |= MIPS_CPU_MMID;
+
+ /*
+ * Warn if we've hardcoded cpu_has_mmid to a value unsuitable
+ * for the CPU we're running on, or if CPUs in an SMP system
+ * have inconsistent MMID support.
+ */
+ WARN_ON(!!cpu_has_mmid != !!(config5 & MIPS_CONF5_MI));
+
+ if (cpu_has_mmid) {
+ write_c0_memorymapid(~0ul);
+ back_to_back_c0_hazard();
+ asid_mask = read_c0_memorymapid();
+
+ /*
+ * We maintain a bitmap to track MMID allocation, and
+ * need a sensible upper bound on the size of that
+ * bitmap. The initial CPU with MMID support (I6500)
+ * supports 16 bit MMIDs, which gives us an 8KiB
+ * bitmap. The architecture recommends that hardware
+ * support 32 bit MMIDs, which would give us a 512MiB
+ * bitmap - that's too big in most cases.
+ *
+ * Cap MMID width at 16 bits for now & we can revisit
+ * this if & when hardware supports anything wider.
+ */
+ max_mmid_width = 16;
+ if (asid_mask > GENMASK(max_mmid_width - 1, 0)) {
+ pr_info("Capping MMID width at %d bits",
+ max_mmid_width);
+ asid_mask = GENMASK(max_mmid_width - 1, 0);
+ }
+
+ set_cpu_asid_mask(c, asid_mask);
+ }
+ }
+
+ return config5 & MIPS_CONF_M;
+}
+
+static void decode_configs(struct cpuinfo_mips *c)
+{
+ int ok;
+
+ /* MIPS32 or MIPS64 compliant CPU. */
+ c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
+ MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
+
+ c->scache.flags = MIPS_CACHE_NOT_PRESENT;
+
+ /* Enable FTLB if present and not disabled */
+ set_ftlb_enable(c, mips_ftlb_disabled ? 0 : FTLB_EN);
+
+ ok = decode_config0(c); /* Read Config registers. */
+ BUG_ON(!ok); /* Arch spec violation! */
+ if (ok)
+ ok = decode_config1(c);
+ if (ok)
+ ok = decode_config2(c);
+ if (ok)
+ ok = decode_config3(c);
+ if (ok)
+ ok = decode_config4(c);
+ if (ok)
+ ok = decode_config5(c);
+
+ /* Probe the EBase.WG bit */
+ if (cpu_has_mips_r2_r6) {
+ u64 ebase;
+ unsigned int status;
+
+ /* {read,write}_c0_ebase_64() may be UNDEFINED prior to r6 */
+ ebase = cpu_has_mips64r6 ? read_c0_ebase_64()
+ : (s32)read_c0_ebase();
+ if (ebase & MIPS_EBASE_WG) {
+ /* WG bit already set, we can avoid the clumsy probe */
+ c->options |= MIPS_CPU_EBASE_WG;
+ } else {
+ /* Its UNDEFINED to change EBase while BEV=0 */
+ status = read_c0_status();
+ write_c0_status(status | ST0_BEV);
+ irq_enable_hazard();
+ /*
+ * On pre-r6 cores, this may well clobber the upper bits
+ * of EBase. This is hard to avoid without potentially
+ * hitting UNDEFINED dm*c0 behaviour if EBase is 32-bit.
+ */
+ if (cpu_has_mips64r6)
+ write_c0_ebase_64(ebase | MIPS_EBASE_WG);
+ else
+ write_c0_ebase(ebase | MIPS_EBASE_WG);
+ back_to_back_c0_hazard();
+ /* Restore BEV */
+ write_c0_status(status);
+ if (read_c0_ebase() & MIPS_EBASE_WG) {
+ c->options |= MIPS_CPU_EBASE_WG;
+ write_c0_ebase(ebase);
+ }
+ }
+ }
+
+ /* configure the FTLB write probability */
+ set_ftlb_enable(c, (mips_ftlb_disabled ? 0 : FTLB_EN) | FTLB_SET_PROB);
+
+ mips_probe_watch_registers(c);
+
+#ifndef CONFIG_MIPS_CPS
+ if (cpu_has_mips_r2_r6) {
+ unsigned int core;
+
+ core = get_ebase_cpunum();
+ if (cpu_has_mipsmt)
+ core >>= fls(core_nvpes()) - 1;
+ cpu_set_core(c, core);
+ }
+#endif
+}
+
+/*
+ * Probe for certain guest capabilities by writing config bits and reading back.
+ * Finally write back the original value.
+ */
+#define probe_gc0_config(name, maxconf, bits) \
+do { \
+ unsigned int tmp; \
+ tmp = read_gc0_##name(); \
+ write_gc0_##name(tmp | (bits)); \
+ back_to_back_c0_hazard(); \
+ maxconf = read_gc0_##name(); \
+ write_gc0_##name(tmp); \
+} while (0)
+
+/*
+ * Probe for dynamic guest capabilities by changing certain config bits and
+ * reading back to see if they change. Finally write back the original value.
+ */
+#define probe_gc0_config_dyn(name, maxconf, dynconf, bits) \
+do { \
+ maxconf = read_gc0_##name(); \
+ write_gc0_##name(maxconf ^ (bits)); \
+ back_to_back_c0_hazard(); \
+ dynconf = maxconf ^ read_gc0_##name(); \
+ write_gc0_##name(maxconf); \
+ maxconf |= dynconf; \
+} while (0)
+
+static inline unsigned int decode_guest_config0(struct cpuinfo_mips *c)
+{
+ unsigned int config0;
+
+ probe_gc0_config(config, config0, MIPS_CONF_M);
+
+ if (config0 & MIPS_CONF_M)
+ c->guest.conf |= BIT(1);
+ return config0 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config1(struct cpuinfo_mips *c)
+{
+ unsigned int config1, config1_dyn;
+
+ probe_gc0_config_dyn(config1, config1, config1_dyn,
+ MIPS_CONF_M | MIPS_CONF1_PC | MIPS_CONF1_WR |
+ MIPS_CONF1_FP);
+
+ if (config1 & MIPS_CONF1_FP)
+ c->guest.options |= MIPS_CPU_FPU;
+ if (config1_dyn & MIPS_CONF1_FP)
+ c->guest.options_dyn |= MIPS_CPU_FPU;
+
+ if (config1 & MIPS_CONF1_WR)
+ c->guest.options |= MIPS_CPU_WATCH;
+ if (config1_dyn & MIPS_CONF1_WR)
+ c->guest.options_dyn |= MIPS_CPU_WATCH;
+
+ if (config1 & MIPS_CONF1_PC)
+ c->guest.options |= MIPS_CPU_PERF;
+ if (config1_dyn & MIPS_CONF1_PC)
+ c->guest.options_dyn |= MIPS_CPU_PERF;
+
+ if (config1 & MIPS_CONF_M)
+ c->guest.conf |= BIT(2);
+ return config1 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config2(struct cpuinfo_mips *c)
+{
+ unsigned int config2;
+
+ probe_gc0_config(config2, config2, MIPS_CONF_M);
+
+ if (config2 & MIPS_CONF_M)
+ c->guest.conf |= BIT(3);
+ return config2 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c)
+{
+ unsigned int config3, config3_dyn;
+
+ probe_gc0_config_dyn(config3, config3, config3_dyn,
+ MIPS_CONF_M | MIPS_CONF3_MSA | MIPS_CONF3_ULRI |
+ MIPS_CONF3_CTXTC);
+
+ if (config3 & MIPS_CONF3_CTXTC)
+ c->guest.options |= MIPS_CPU_CTXTC;
+ if (config3_dyn & MIPS_CONF3_CTXTC)
+ c->guest.options_dyn |= MIPS_CPU_CTXTC;
+
+ if (config3 & MIPS_CONF3_PW)
+ c->guest.options |= MIPS_CPU_HTW;
+
+ if (config3 & MIPS_CONF3_ULRI)
+ c->guest.options |= MIPS_CPU_ULRI;
+
+ if (config3 & MIPS_CONF3_SC)
+ c->guest.options |= MIPS_CPU_SEGMENTS;
+
+ if (config3 & MIPS_CONF3_BI)
+ c->guest.options |= MIPS_CPU_BADINSTR;
+ if (config3 & MIPS_CONF3_BP)
+ c->guest.options |= MIPS_CPU_BADINSTRP;
+
+ if (config3 & MIPS_CONF3_MSA)
+ c->guest.ases |= MIPS_ASE_MSA;
+ if (config3_dyn & MIPS_CONF3_MSA)
+ c->guest.ases_dyn |= MIPS_ASE_MSA;
+
+ if (config3 & MIPS_CONF_M)
+ c->guest.conf |= BIT(4);
+ return config3 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config4(struct cpuinfo_mips *c)
+{
+ unsigned int config4;
+
+ probe_gc0_config(config4, config4,
+ MIPS_CONF_M | MIPS_CONF4_KSCREXIST);
+
+ c->guest.kscratch_mask = (config4 & MIPS_CONF4_KSCREXIST)
+ >> MIPS_CONF4_KSCREXIST_SHIFT;
+
+ if (config4 & MIPS_CONF_M)
+ c->guest.conf |= BIT(5);
+ return config4 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c)
+{
+ unsigned int config5, config5_dyn;
+
+ probe_gc0_config_dyn(config5, config5, config5_dyn,
+ MIPS_CONF_M | MIPS_CONF5_MVH | MIPS_CONF5_MRP);
+
+ if (config5 & MIPS_CONF5_MRP)
+ c->guest.options |= MIPS_CPU_MAAR;
+ if (config5_dyn & MIPS_CONF5_MRP)
+ c->guest.options_dyn |= MIPS_CPU_MAAR;
+
+ if (config5 & MIPS_CONF5_LLB)
+ c->guest.options |= MIPS_CPU_RW_LLB;
+
+ if (config5 & MIPS_CONF5_MVH)
+ c->guest.options |= MIPS_CPU_MVH;
+
+ if (config5 & MIPS_CONF_M)
+ c->guest.conf |= BIT(6);
+ return config5 & MIPS_CONF_M;
+}
+
+static inline void decode_guest_configs(struct cpuinfo_mips *c)
+{
+ unsigned int ok;
+
+ ok = decode_guest_config0(c);
+ if (ok)
+ ok = decode_guest_config1(c);
+ if (ok)
+ ok = decode_guest_config2(c);
+ if (ok)
+ ok = decode_guest_config3(c);
+ if (ok)
+ ok = decode_guest_config4(c);
+ if (ok)
+ decode_guest_config5(c);
+}
+
+static inline void cpu_probe_guestctl0(struct cpuinfo_mips *c)
+{
+ unsigned int guestctl0, temp;
+
+ guestctl0 = read_c0_guestctl0();
+
+ if (guestctl0 & MIPS_GCTL0_G0E)
+ c->options |= MIPS_CPU_GUESTCTL0EXT;
+ if (guestctl0 & MIPS_GCTL0_G1)
+ c->options |= MIPS_CPU_GUESTCTL1;
+ if (guestctl0 & MIPS_GCTL0_G2)
+ c->options |= MIPS_CPU_GUESTCTL2;
+ if (!(guestctl0 & MIPS_GCTL0_RAD)) {
+ c->options |= MIPS_CPU_GUESTID;
+
+ /*
+ * Probe for Direct Root to Guest (DRG). Set GuestCtl1.RID = 0
+ * first, otherwise all data accesses will be fully virtualised
+ * as if they were performed by guest mode.
+ */
+ write_c0_guestctl1(0);
+ tlbw_use_hazard();
+
+ write_c0_guestctl0(guestctl0 | MIPS_GCTL0_DRG);
+ back_to_back_c0_hazard();
+ temp = read_c0_guestctl0();
+
+ if (temp & MIPS_GCTL0_DRG) {
+ write_c0_guestctl0(guestctl0);
+ c->options |= MIPS_CPU_DRG;
+ }
+ }
+}
+
+static inline void cpu_probe_guestctl1(struct cpuinfo_mips *c)
+{
+ if (cpu_has_guestid) {
+ /* determine the number of bits of GuestID available */
+ write_c0_guestctl1(MIPS_GCTL1_ID);
+ back_to_back_c0_hazard();
+ c->guestid_mask = (read_c0_guestctl1() & MIPS_GCTL1_ID)
+ >> MIPS_GCTL1_ID_SHIFT;
+ write_c0_guestctl1(0);
+ }
+}
+
+static inline void cpu_probe_gtoffset(struct cpuinfo_mips *c)
+{
+ /* determine the number of bits of GTOffset available */
+ write_c0_gtoffset(0xffffffff);
+ back_to_back_c0_hazard();
+ c->gtoffset_mask = read_c0_gtoffset();
+ write_c0_gtoffset(0);
+}
+
+static inline void cpu_probe_vz(struct cpuinfo_mips *c)
+{
+ cpu_probe_guestctl0(c);
+ if (cpu_has_guestctl1)
+ cpu_probe_guestctl1(c);
+
+ cpu_probe_gtoffset(c);
+
+ decode_guest_configs(c);
+}
+
+#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \
+ | MIPS_CPU_COUNTER)
+
+static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_R2000:
+ c->cputype = CPU_R2000;
+ __cpu_name[cpu] = "R2000";
+ c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS;
+ c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
+ MIPS_CPU_NOFPUEX;
+ if (__cpu_has_fpu())
+ c->options |= MIPS_CPU_FPU;
+ c->tlbsize = 64;
+ break;
+ case PRID_IMP_R3000:
+ if ((c->processor_id & PRID_REV_MASK) == PRID_REV_R3000A) {
+ if (cpu_has_confreg()) {
+ c->cputype = CPU_R3081E;
+ __cpu_name[cpu] = "R3081";
+ } else {
+ c->cputype = CPU_R3000A;
+ __cpu_name[cpu] = "R3000A";
+ }
+ } else {
+ c->cputype = CPU_R3000;
+ __cpu_name[cpu] = "R3000";
+ }
+ c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS;
+ c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
+ MIPS_CPU_NOFPUEX;
+ if (__cpu_has_fpu())
+ c->options |= MIPS_CPU_FPU;
+ c->tlbsize = 64;
+ break;
+ case PRID_IMP_R4000:
+ if (read_c0_config() & CONF_SC) {
+ if ((c->processor_id & PRID_REV_MASK) >=
+ PRID_REV_R4400) {
+ c->cputype = CPU_R4400PC;
+ __cpu_name[cpu] = "R4400PC";
+ } else {
+ c->cputype = CPU_R4000PC;
+ __cpu_name[cpu] = "R4000PC";
+ }
+ } else {
+ int cca = read_c0_config() & CONF_CM_CMASK;
+ int mc;
+
+ /*
+ * SC and MC versions can't be reliably told apart,
+ * but only the latter support coherent caching
+ * modes so assume the firmware has set the KSEG0
+ * coherency attribute reasonably (if uncached, we
+ * assume SC).
+ */
+ switch (cca) {
+ case CONF_CM_CACHABLE_CE:
+ case CONF_CM_CACHABLE_COW:
+ case CONF_CM_CACHABLE_CUW:
+ mc = 1;
+ break;
+ default:
+ mc = 0;
+ break;
+ }
+ if ((c->processor_id & PRID_REV_MASK) >=
+ PRID_REV_R4400) {
+ c->cputype = mc ? CPU_R4400MC : CPU_R4400SC;
+ __cpu_name[cpu] = mc ? "R4400MC" : "R4400SC";
+ } else {
+ c->cputype = mc ? CPU_R4000MC : CPU_R4000SC;
+ __cpu_name[cpu] = mc ? "R4000MC" : "R4000SC";
+ }
+ }
+
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_WATCH | MIPS_CPU_VCE |
+ MIPS_CPU_LLSC;
+ c->tlbsize = 48;
+ break;
+ case PRID_IMP_VR41XX:
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
+ c->options = R4K_OPTS;
+ c->tlbsize = 32;
+ switch (c->processor_id & 0xf0) {
+ case PRID_REV_VR4111:
+ c->cputype = CPU_VR4111;
+ __cpu_name[cpu] = "NEC VR4111";
+ break;
+ case PRID_REV_VR4121:
+ c->cputype = CPU_VR4121;
+ __cpu_name[cpu] = "NEC VR4121";
+ break;
+ case PRID_REV_VR4122:
+ if ((c->processor_id & 0xf) < 0x3) {
+ c->cputype = CPU_VR4122;
+ __cpu_name[cpu] = "NEC VR4122";
+ } else {
+ c->cputype = CPU_VR4181A;
+ __cpu_name[cpu] = "NEC VR4181A";
+ }
+ break;
+ case PRID_REV_VR4130:
+ if ((c->processor_id & 0xf) < 0x4) {
+ c->cputype = CPU_VR4131;
+ __cpu_name[cpu] = "NEC VR4131";
+ } else {
+ c->cputype = CPU_VR4133;
+ c->options |= MIPS_CPU_LLSC;
+ __cpu_name[cpu] = "NEC VR4133";
+ }
+ break;
+ default:
+ printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n");
+ c->cputype = CPU_VR41XX;
+ __cpu_name[cpu] = "NEC Vr41xx";
+ break;
+ }
+ break;
+ case PRID_IMP_R4600:
+ c->cputype = CPU_R4600;
+ __cpu_name[cpu] = "R4600";
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_LLSC;
+ c->tlbsize = 48;
+ break;
+ #if 0
+ case PRID_IMP_R4650:
+ /*
+ * This processor doesn't have an MMU, so it's not
+ * "real easy" to run Linux on it. It is left purely
+ * for documentation. Commented out because it shares
+ * it's c0_prid id number with the TX3900.
+ */
+ c->cputype = CPU_R4650;
+ __cpu_name[cpu] = "R4650";
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
+ c->tlbsize = 48;
+ break;
+ #endif
+ case PRID_IMP_TX39:
+ c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS;
+ c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE;
+
+ if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) {
+ c->cputype = CPU_TX3927;
+ __cpu_name[cpu] = "TX3927";
+ c->tlbsize = 64;
+ } else {
+ switch (c->processor_id & PRID_REV_MASK) {
+ case PRID_REV_TX3912:
+ c->cputype = CPU_TX3912;
+ __cpu_name[cpu] = "TX3912";
+ c->tlbsize = 32;
+ break;
+ case PRID_REV_TX3922:
+ c->cputype = CPU_TX3922;
+ __cpu_name[cpu] = "TX3922";
+ c->tlbsize = 64;
+ break;
+ }
+ }
+ break;
+ case PRID_IMP_R4700:
+ c->cputype = CPU_R4700;
+ __cpu_name[cpu] = "R4700";
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_LLSC;
+ c->tlbsize = 48;
+ break;
+ case PRID_IMP_TX49:
+ c->cputype = CPU_TX49XX;
+ __cpu_name[cpu] = "R49XX";
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
+ c->options = R4K_OPTS | MIPS_CPU_LLSC;
+ if (!(c->processor_id & 0x08))
+ c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR;
+ c->tlbsize = 48;
+ break;
+ case PRID_IMP_R5000:
+ c->cputype = CPU_R5000;
+ __cpu_name[cpu] = "R5000";
+ set_isa(c, MIPS_CPU_ISA_IV);
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_LLSC;
+ c->tlbsize = 48;
+ break;
+ case PRID_IMP_R5500:
+ c->cputype = CPU_R5500;
+ __cpu_name[cpu] = "R5500";
+ set_isa(c, MIPS_CPU_ISA_IV);
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_WATCH | MIPS_CPU_LLSC;
+ c->tlbsize = 48;
+ break;
+ case PRID_IMP_NEVADA:
+ c->cputype = CPU_NEVADA;
+ __cpu_name[cpu] = "Nevada";
+ set_isa(c, MIPS_CPU_ISA_IV);
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
+ c->tlbsize = 48;
+ break;
+ case PRID_IMP_RM7000:
+ c->cputype = CPU_RM7000;
+ __cpu_name[cpu] = "RM7000";
+ set_isa(c, MIPS_CPU_ISA_IV);
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_LLSC;
+ /*
+ * Undocumented RM7000: Bit 29 in the info register of
+ * the RM7000 v2.0 indicates if the TLB has 48 or 64
+ * entries.
+ *
+ * 29 1 => 64 entry JTLB
+ * 0 => 48 entry JTLB
+ */
+ c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
+ break;
+ case PRID_IMP_R10000:
+ c->cputype = CPU_R10000;
+ __cpu_name[cpu] = "R10000";
+ set_isa(c, MIPS_CPU_ISA_IV);
+ c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
+ MIPS_CPU_LLSC;
+ c->tlbsize = 64;
+ break;
+ case PRID_IMP_R12000:
+ c->cputype = CPU_R12000;
+ __cpu_name[cpu] = "R12000";
+ set_isa(c, MIPS_CPU_ISA_IV);
+ c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
+ MIPS_CPU_LLSC;
+ c->tlbsize = 64;
+ write_c0_r10k_diag(read_c0_r10k_diag() | R10K_DIAG_E_GHIST);
+ break;
+ case PRID_IMP_R14000:
+ if (((c->processor_id >> 4) & 0x0f) > 2) {
+ c->cputype = CPU_R16000;
+ __cpu_name[cpu] = "R16000";
+ } else {
+ c->cputype = CPU_R14000;
+ __cpu_name[cpu] = "R14000";
+ }
+ set_isa(c, MIPS_CPU_ISA_IV);
+ c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
+ MIPS_CPU_LLSC;
+ c->tlbsize = 64;
+ write_c0_r10k_diag(read_c0_r10k_diag() | R10K_DIAG_E_GHIST);
+ break;
+ case PRID_IMP_LOONGSON_64C: /* Loongson-2/3 */
+ switch (c->processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON2E:
+ c->cputype = CPU_LOONGSON2EF;
+ __cpu_name[cpu] = "ICT Loongson-2";
+ set_elf_platform(cpu, "loongson2e");
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
+ break;
+ case PRID_REV_LOONGSON2F:
+ c->cputype = CPU_LOONGSON2EF;
+ __cpu_name[cpu] = "ICT Loongson-2";
+ set_elf_platform(cpu, "loongson2f");
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
+ break;
+ case PRID_REV_LOONGSON3A_R1:
+ c->cputype = CPU_LOONGSON64;
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3a");
+ set_isa(c, MIPS_CPU_ISA_M64R1);
+ c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+ MIPS_ASE_LOONGSON_EXT);
+ break;
+ case PRID_REV_LOONGSON3B_R1:
+ case PRID_REV_LOONGSON3B_R2:
+ c->cputype = CPU_LOONGSON64;
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3b");
+ set_isa(c, MIPS_CPU_ISA_M64R1);
+ c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+ MIPS_ASE_LOONGSON_EXT);
+ break;
+ }
+
+ c->options = R4K_OPTS |
+ MIPS_CPU_FPU | MIPS_CPU_LLSC |
+ MIPS_CPU_32FPR;
+ c->tlbsize = 64;
+ set_cpu_asid_mask(c, MIPS_ENTRYHI_ASID);
+ c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ break;
+ case PRID_IMP_LOONGSON_32: /* Loongson-1 */
+ decode_configs(c);
+
+ c->cputype = CPU_LOONGSON32;
+
+ switch (c->processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON1B:
+ __cpu_name[cpu] = "Loongson 1B";
+ break;
+ }
+
+ break;
+ }
+}
+
+static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_QEMU_GENERIC:
+ c->writecombine = _CACHE_UNCACHED;
+ c->cputype = CPU_QEMU_GENERIC;
+ __cpu_name[cpu] = "MIPS GENERIC QEMU";
+ break;
+ case PRID_IMP_4KC:
+ c->cputype = CPU_4KC;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 4Kc";
+ break;
+ case PRID_IMP_4KEC:
+ case PRID_IMP_4KECR2:
+ c->cputype = CPU_4KEC;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 4KEc";
+ break;
+ case PRID_IMP_4KSC:
+ case PRID_IMP_4KSD:
+ c->cputype = CPU_4KSC;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 4KSc";
+ break;
+ case PRID_IMP_5KC:
+ c->cputype = CPU_5KC;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 5Kc";
+ break;
+ case PRID_IMP_5KE:
+ c->cputype = CPU_5KE;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 5KE";
+ break;
+ case PRID_IMP_20KC:
+ c->cputype = CPU_20KC;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 20Kc";
+ break;
+ case PRID_IMP_24K:
+ c->cputype = CPU_24K;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 24Kc";
+ break;
+ case PRID_IMP_24KE:
+ c->cputype = CPU_24K;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 24KEc";
+ break;
+ case PRID_IMP_25KF:
+ c->cputype = CPU_25KF;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 25Kc";
+ break;
+ case PRID_IMP_34K:
+ c->cputype = CPU_34K;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 34Kc";
+ cpu_set_mt_per_tc_perf(c);
+ break;
+ case PRID_IMP_74K:
+ c->cputype = CPU_74K;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 74Kc";
+ break;
+ case PRID_IMP_M14KC:
+ c->cputype = CPU_M14KC;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS M14Kc";
+ break;
+ case PRID_IMP_M14KEC:
+ c->cputype = CPU_M14KEC;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS M14KEc";
+ break;
+ case PRID_IMP_1004K:
+ c->cputype = CPU_1004K;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 1004Kc";
+ cpu_set_mt_per_tc_perf(c);
+ break;
+ case PRID_IMP_1074K:
+ c->cputype = CPU_1074K;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 1074Kc";
+ break;
+ case PRID_IMP_INTERAPTIV_UP:
+ c->cputype = CPU_INTERAPTIV;
+ __cpu_name[cpu] = "MIPS interAptiv";
+ cpu_set_mt_per_tc_perf(c);
+ break;
+ case PRID_IMP_INTERAPTIV_MP:
+ c->cputype = CPU_INTERAPTIV;
+ __cpu_name[cpu] = "MIPS interAptiv (multi)";
+ cpu_set_mt_per_tc_perf(c);
+ break;
+ case PRID_IMP_PROAPTIV_UP:
+ c->cputype = CPU_PROAPTIV;
+ __cpu_name[cpu] = "MIPS proAptiv";
+ break;
+ case PRID_IMP_PROAPTIV_MP:
+ c->cputype = CPU_PROAPTIV;
+ __cpu_name[cpu] = "MIPS proAptiv (multi)";
+ break;
+ case PRID_IMP_P5600:
+ c->cputype = CPU_P5600;
+ __cpu_name[cpu] = "MIPS P5600";
+ break;
+ case PRID_IMP_P6600:
+ c->cputype = CPU_P6600;
+ __cpu_name[cpu] = "MIPS P6600";
+ break;
+ case PRID_IMP_I6400:
+ c->cputype = CPU_I6400;
+ __cpu_name[cpu] = "MIPS I6400";
+ break;
+ case PRID_IMP_I6500:
+ c->cputype = CPU_I6500;
+ __cpu_name[cpu] = "MIPS I6500";
+ break;
+ case PRID_IMP_M5150:
+ c->cputype = CPU_M5150;
+ __cpu_name[cpu] = "MIPS M5150";
+ break;
+ case PRID_IMP_M6250:
+ c->cputype = CPU_M6250;
+ __cpu_name[cpu] = "MIPS M6250";
+ break;
+ }
+
+ decode_configs(c);
+
+ spram_config();
+
+ mm_config(c);
+
+ switch (__get_cpu_type(c->cputype)) {
+ case CPU_M5150:
+ case CPU_P5600:
+ set_isa(c, MIPS_CPU_ISA_M32R5);
+ break;
+ case CPU_I6500:
+ c->options |= MIPS_CPU_SHARED_FTLB_ENTRIES;
+ fallthrough;
+ case CPU_I6400:
+ c->options |= MIPS_CPU_SHARED_FTLB_RAM;
+ fallthrough;
+ default:
+ break;
+ }
+
+ /* Recent MIPS cores use the implementation-dependent ExcCode 16 for
+ * cache/FTLB parity exceptions.
+ */
+ switch (__get_cpu_type(c->cputype)) {
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ case CPU_P6600:
+ case CPU_I6400:
+ case CPU_I6500:
+ c->options |= MIPS_CPU_FTLBPAREX;
+ break;
+ }
+}
+
+static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_AU1_REV1:
+ case PRID_IMP_AU1_REV2:
+ c->cputype = CPU_ALCHEMY;
+ switch ((c->processor_id >> 24) & 0xff) {
+ case 0:
+ __cpu_name[cpu] = "Au1000";
+ break;
+ case 1:
+ __cpu_name[cpu] = "Au1500";
+ break;
+ case 2:
+ __cpu_name[cpu] = "Au1100";
+ break;
+ case 3:
+ __cpu_name[cpu] = "Au1550";
+ break;
+ case 4:
+ __cpu_name[cpu] = "Au1200";
+ if ((c->processor_id & PRID_REV_MASK) == 2)
+ __cpu_name[cpu] = "Au1250";
+ break;
+ case 5:
+ __cpu_name[cpu] = "Au1210";
+ break;
+ default:
+ __cpu_name[cpu] = "Au1xxx";
+ break;
+ }
+ break;
+ }
+}
+
+static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+
+ c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_SB1:
+ c->cputype = CPU_SB1;
+ __cpu_name[cpu] = "SiByte SB1";
+ /* FPU in pass1 is known to have issues. */
+ if ((c->processor_id & PRID_REV_MASK) < 0x02)
+ c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR);
+ break;
+ case PRID_IMP_SB1A:
+ c->cputype = CPU_SB1A;
+ __cpu_name[cpu] = "SiByte SB1A";
+ break;
+ }
+}
+
+static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_SR71000:
+ c->cputype = CPU_SR71000;
+ __cpu_name[cpu] = "Sandcraft SR71000";
+ c->scache.ways = 8;
+ c->tlbsize = 64;
+ break;
+ }
+}
+
+static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_PR4450:
+ c->cputype = CPU_PR4450;
+ __cpu_name[cpu] = "Philips PR4450";
+ set_isa(c, MIPS_CPU_ISA_M32R1);
+ break;
+ }
+}
+
+static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_BMIPS32_REV4:
+ case PRID_IMP_BMIPS32_REV8:
+ c->cputype = CPU_BMIPS32;
+ __cpu_name[cpu] = "Broadcom BMIPS32";
+ set_elf_platform(cpu, "bmips32");
+ break;
+ case PRID_IMP_BMIPS3300:
+ case PRID_IMP_BMIPS3300_ALT:
+ case PRID_IMP_BMIPS3300_BUG:
+ c->cputype = CPU_BMIPS3300;
+ __cpu_name[cpu] = "Broadcom BMIPS3300";
+ set_elf_platform(cpu, "bmips3300");
+ break;
+ case PRID_IMP_BMIPS43XX: {
+ int rev = c->processor_id & PRID_REV_MASK;
+
+ if (rev >= PRID_REV_BMIPS4380_LO &&
+ rev <= PRID_REV_BMIPS4380_HI) {
+ c->cputype = CPU_BMIPS4380;
+ __cpu_name[cpu] = "Broadcom BMIPS4380";
+ set_elf_platform(cpu, "bmips4380");
+ c->options |= MIPS_CPU_RIXI;
+ } else {
+ c->cputype = CPU_BMIPS4350;
+ __cpu_name[cpu] = "Broadcom BMIPS4350";
+ set_elf_platform(cpu, "bmips4350");
+ }
+ break;
+ }
+ case PRID_IMP_BMIPS5000:
+ case PRID_IMP_BMIPS5200:
+ c->cputype = CPU_BMIPS5000;
+ if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_BMIPS5200)
+ __cpu_name[cpu] = "Broadcom BMIPS5200";
+ else
+ __cpu_name[cpu] = "Broadcom BMIPS5000";
+ set_elf_platform(cpu, "bmips5000");
+ c->options |= MIPS_CPU_ULRI | MIPS_CPU_RIXI;
+ break;
+ }
+}
+
+static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_CAVIUM_CN38XX:
+ case PRID_IMP_CAVIUM_CN31XX:
+ case PRID_IMP_CAVIUM_CN30XX:
+ c->cputype = CPU_CAVIUM_OCTEON;
+ __cpu_name[cpu] = "Cavium Octeon";
+ goto platform;
+ case PRID_IMP_CAVIUM_CN58XX:
+ case PRID_IMP_CAVIUM_CN56XX:
+ case PRID_IMP_CAVIUM_CN50XX:
+ case PRID_IMP_CAVIUM_CN52XX:
+ c->cputype = CPU_CAVIUM_OCTEON_PLUS;
+ __cpu_name[cpu] = "Cavium Octeon+";
+platform:
+ set_elf_platform(cpu, "octeon");
+ break;
+ case PRID_IMP_CAVIUM_CN61XX:
+ case PRID_IMP_CAVIUM_CN63XX:
+ case PRID_IMP_CAVIUM_CN66XX:
+ case PRID_IMP_CAVIUM_CN68XX:
+ case PRID_IMP_CAVIUM_CNF71XX:
+ c->cputype = CPU_CAVIUM_OCTEON2;
+ __cpu_name[cpu] = "Cavium Octeon II";
+ set_elf_platform(cpu, "octeon2");
+ break;
+ case PRID_IMP_CAVIUM_CN70XX:
+ case PRID_IMP_CAVIUM_CN73XX:
+ case PRID_IMP_CAVIUM_CNF75XX:
+ case PRID_IMP_CAVIUM_CN78XX:
+ c->cputype = CPU_CAVIUM_OCTEON3;
+ __cpu_name[cpu] = "Cavium Octeon III";
+ set_elf_platform(cpu, "octeon3");
+ break;
+ default:
+ printk(KERN_INFO "Unknown Octeon chip!\n");
+ c->cputype = CPU_UNKNOWN;
+ break;
+ }
+}
+
+#ifdef CONFIG_CPU_LOONGSON64
+#include <loongson_regs.h>
+
+static inline void decode_cpucfg(struct cpuinfo_mips *c)
+{
+ u32 cfg1 = read_cpucfg(LOONGSON_CFG1);
+ u32 cfg2 = read_cpucfg(LOONGSON_CFG2);
+ u32 cfg3 = read_cpucfg(LOONGSON_CFG3);
+
+ if (cfg1 & LOONGSON_CFG1_MMI)
+ c->ases |= MIPS_ASE_LOONGSON_MMI;
+
+ if (cfg2 & LOONGSON_CFG2_LEXT1)
+ c->ases |= MIPS_ASE_LOONGSON_EXT;
+
+ if (cfg2 & LOONGSON_CFG2_LEXT2)
+ c->ases |= MIPS_ASE_LOONGSON_EXT2;
+
+ if (cfg2 & LOONGSON_CFG2_LSPW) {
+ c->options |= MIPS_CPU_LDPTE;
+ c->guest.options |= MIPS_CPU_LDPTE;
+ }
+
+ if (cfg3 & LOONGSON_CFG3_LCAMP)
+ c->ases |= MIPS_ASE_LOONGSON_CAM;
+}
+
+static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ c->cputype = CPU_LOONGSON64;
+
+ /* All Loongson processors covered here define ExcCode 16 as GSExc. */
+ decode_configs(c);
+ c->options |= MIPS_CPU_GSEXCEX;
+
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_LOONGSON_64R: /* Loongson-64 Reduced */
+ switch (c->processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON2K_R1_0:
+ case PRID_REV_LOONGSON2K_R1_1:
+ case PRID_REV_LOONGSON2K_R1_2:
+ case PRID_REV_LOONGSON2K_R1_3:
+ __cpu_name[cpu] = "Loongson-2K";
+ set_elf_platform(cpu, "gs264e");
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ break;
+ }
+ c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_EXT |
+ MIPS_ASE_LOONGSON_EXT2);
+ break;
+ case PRID_IMP_LOONGSON_64C: /* Loongson-3 Classic */
+ switch (c->processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON3A_R2_0:
+ case PRID_REV_LOONGSON3A_R2_1:
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3a");
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ break;
+ case PRID_REV_LOONGSON3A_R3_0:
+ case PRID_REV_LOONGSON3A_R3_1:
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3a");
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ break;
+ }
+ /*
+ * Loongson-3 Classic did not implement MIPS standard TLBINV
+ * but implemented TLBINVF and EHINV. As currently we're only
+ * using these two features, enable MIPS_CPU_TLBINV as well.
+ *
+ * Also some early Loongson-3A2000 had wrong TLB type in Config
+ * register, we correct it here.
+ */
+ c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
+ c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+ MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
+ c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
+ break;
+ case PRID_IMP_LOONGSON_64G:
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3a");
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ decode_cpucfg(c);
+ break;
+ default:
+ panic("Unknown Loongson Processor ID!");
+ break;
+ }
+}
+#else
+static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }
+#endif
+
+static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+
+ /*
+ * XBurst misses a config2 register, so config3 decode was skipped in
+ * decode_configs().
+ */
+ decode_config3(c);
+
+ /* XBurst does not implement the CP0 counter. */
+ c->options &= ~MIPS_CPU_COUNTER;
+ BUG_ON(__builtin_constant_p(cpu_has_counter) && cpu_has_counter);
+
+ /* XBurst has virtually tagged icache */
+ c->icache.flags |= MIPS_CACHE_VTAG;
+
+ switch (c->processor_id & PRID_IMP_MASK) {
+
+ /* XBurst®1 with MXU1.0/MXU1.1 SIMD ISA */
+ case PRID_IMP_XBURST_REV1:
+
+ /*
+ * The XBurst core by default attempts to avoid branch target
+ * buffer lookups by detecting & special casing loops. This
+ * feature will cause BogoMIPS and lpj calculate in error.
+ * Set cp0 config7 bit 4 to disable this feature.
+ */
+ set_c0_config7(MIPS_CONF7_BTB_LOOP_EN);
+
+ switch (c->processor_id & PRID_COMP_MASK) {
+
+ /*
+ * The config0 register in the XBurst CPUs with a processor ID of
+ * PRID_COMP_INGENIC_D0 report themselves as MIPS32r2 compatible,
+ * but they don't actually support this ISA.
+ */
+ case PRID_COMP_INGENIC_D0:
+ c->isa_level &= ~MIPS_CPU_ISA_M32R2;
+
+ /* FPU is not properly detected on JZ4760(B). */
+ if (c->processor_id == 0x2ed0024f)
+ c->options |= MIPS_CPU_FPU;
+
+ fallthrough;
+
+ /*
+ * The config0 register in the XBurst CPUs with a processor ID of
+ * PRID_COMP_INGENIC_D0 or PRID_COMP_INGENIC_D1 has an abandoned
+ * huge page tlb mode, this mode is not compatible with the MIPS
+ * standard, it will cause tlbmiss and into an infinite loop
+ * (line 21 in the tlb-funcs.S) when starting the init process.
+ * After chip reset, the default is HPTLB mode, Write 0xa9000000
+ * to cp0 register 5 sel 4 to switch back to VTLB mode to prevent
+ * getting stuck.
+ */
+ case PRID_COMP_INGENIC_D1:
+ write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS);
+ break;
+
+ default:
+ break;
+ }
+ fallthrough;
+
+ /* XBurst®1 with MXU2.0 SIMD ISA */
+ case PRID_IMP_XBURST_REV2:
+ /* Ingenic uses the WA bit to achieve write-combine memory writes */
+ c->writecombine = _CACHE_CACHABLE_WA;
+ c->cputype = CPU_XBURST;
+ __cpu_name[cpu] = "Ingenic XBurst";
+ break;
+
+ /* XBurst®2 with MXU2.1 SIMD ISA */
+ case PRID_IMP_XBURST2:
+ c->cputype = CPU_XBURST;
+ __cpu_name[cpu] = "Ingenic XBurst II";
+ break;
+
+ default:
+ panic("Unknown Ingenic Processor ID!");
+ break;
+ }
+}
+
+static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
+{
+ decode_configs(c);
+
+ if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_NETLOGIC_AU13XX) {
+ c->cputype = CPU_ALCHEMY;
+ __cpu_name[cpu] = "Au1300";
+ /* following stuff is not for Alchemy */
+ return;
+ }
+
+ c->options = (MIPS_CPU_TLB |
+ MIPS_CPU_4KEX |
+ MIPS_CPU_COUNTER |
+ MIPS_CPU_DIVEC |
+ MIPS_CPU_WATCH |
+ MIPS_CPU_EJTAG |
+ MIPS_CPU_LLSC);
+
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_NETLOGIC_XLP2XX:
+ case PRID_IMP_NETLOGIC_XLP9XX:
+ case PRID_IMP_NETLOGIC_XLP5XX:
+ c->cputype = CPU_XLP;
+ __cpu_name[cpu] = "Broadcom XLPII";
+ break;
+
+ case PRID_IMP_NETLOGIC_XLP8XX:
+ case PRID_IMP_NETLOGIC_XLP3XX:
+ c->cputype = CPU_XLP;
+ __cpu_name[cpu] = "Netlogic XLP";
+ break;
+
+ case PRID_IMP_NETLOGIC_XLR732:
+ case PRID_IMP_NETLOGIC_XLR716:
+ case PRID_IMP_NETLOGIC_XLR532:
+ case PRID_IMP_NETLOGIC_XLR308:
+ case PRID_IMP_NETLOGIC_XLR532C:
+ case PRID_IMP_NETLOGIC_XLR516C:
+ case PRID_IMP_NETLOGIC_XLR508C:
+ case PRID_IMP_NETLOGIC_XLR308C:
+ c->cputype = CPU_XLR;
+ __cpu_name[cpu] = "Netlogic XLR";
+ break;
+
+ case PRID_IMP_NETLOGIC_XLS608:
+ case PRID_IMP_NETLOGIC_XLS408:
+ case PRID_IMP_NETLOGIC_XLS404:
+ case PRID_IMP_NETLOGIC_XLS208:
+ case PRID_IMP_NETLOGIC_XLS204:
+ case PRID_IMP_NETLOGIC_XLS108:
+ case PRID_IMP_NETLOGIC_XLS104:
+ case PRID_IMP_NETLOGIC_XLS616B:
+ case PRID_IMP_NETLOGIC_XLS608B:
+ case PRID_IMP_NETLOGIC_XLS416B:
+ case PRID_IMP_NETLOGIC_XLS412B:
+ case PRID_IMP_NETLOGIC_XLS408B:
+ case PRID_IMP_NETLOGIC_XLS404B:
+ c->cputype = CPU_XLR;
+ __cpu_name[cpu] = "Netlogic XLS";
+ break;
+
+ default:
+ pr_info("Unknown Netlogic chip id [%02x]!\n",
+ c->processor_id);
+ c->cputype = CPU_XLR;
+ break;
+ }
+
+ if (c->cputype == CPU_XLP) {
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ c->options |= (MIPS_CPU_FPU | MIPS_CPU_ULRI | MIPS_CPU_MCHECK);
+ /* This will be updated again after all threads are woken up */
+ c->tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+ } else {
+ set_isa(c, MIPS_CPU_ISA_M64R1);
+ c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1;
+ }
+ c->kscratch_mask = 0xf;
+}
+
+#ifdef CONFIG_64BIT
+/* For use by uaccess.h */
+u64 __ua_limit;
+EXPORT_SYMBOL(__ua_limit);
+#endif
+
+const char *__cpu_name[NR_CPUS];
+const char *__elf_platform;
+const char *__elf_base_platform;
+
+void cpu_probe(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int cpu = smp_processor_id();
+
+ /*
+ * Set a default elf platform, cpu probe may later
+ * overwrite it with a more precise value
+ */
+ set_elf_platform(cpu, "mips");
+
+ c->processor_id = PRID_IMP_UNKNOWN;
+ c->fpu_id = FPIR_IMP_NONE;
+ c->cputype = CPU_UNKNOWN;
+ c->writecombine = _CACHE_UNCACHED;
+
+ c->fpu_csr31 = FPU_CSR_RN;
+ c->fpu_msk31 = FPU_CSR_RSVD | FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+
+ c->processor_id = read_c0_prid();
+ switch (c->processor_id & PRID_COMP_MASK) {
+ case PRID_COMP_LEGACY:
+ cpu_probe_legacy(c, cpu);
+ break;
+ case PRID_COMP_MIPS:
+ cpu_probe_mips(c, cpu);
+ break;
+ case PRID_COMP_ALCHEMY:
+ cpu_probe_alchemy(c, cpu);
+ break;
+ case PRID_COMP_SIBYTE:
+ cpu_probe_sibyte(c, cpu);
+ break;
+ case PRID_COMP_BROADCOM:
+ cpu_probe_broadcom(c, cpu);
+ break;
+ case PRID_COMP_SANDCRAFT:
+ cpu_probe_sandcraft(c, cpu);
+ break;
+ case PRID_COMP_NXP:
+ cpu_probe_nxp(c, cpu);
+ break;
+ case PRID_COMP_CAVIUM:
+ cpu_probe_cavium(c, cpu);
+ break;
+ case PRID_COMP_LOONGSON:
+ cpu_probe_loongson(c, cpu);
+ break;
+ case PRID_COMP_INGENIC_13:
+ case PRID_COMP_INGENIC_D0:
+ case PRID_COMP_INGENIC_D1:
+ case PRID_COMP_INGENIC_E1:
+ cpu_probe_ingenic(c, cpu);
+ break;
+ case PRID_COMP_NETLOGIC:
+ cpu_probe_netlogic(c, cpu);
+ break;
+ }
+
+ BUG_ON(!__cpu_name[cpu]);
+ BUG_ON(c->cputype == CPU_UNKNOWN);
+
+ /*
+ * Platform code can force the cpu type to optimize code
+ * generation. In that case be sure the cpu type is correctly
+ * manually setup otherwise it could trigger some nasty bugs.
+ */
+ BUG_ON(current_cpu_type() != c->cputype);
+
+ if (cpu_has_rixi) {
+ /* Enable the RIXI exceptions */
+ set_c0_pagegrain(PG_IEC);
+ back_to_back_c0_hazard();
+ /* Verify the IEC bit is set */
+ if (read_c0_pagegrain() & PG_IEC)
+ c->options |= MIPS_CPU_RIXIEX;
+ }
+
+ if (mips_fpu_disabled)
+ c->options &= ~MIPS_CPU_FPU;
+
+ if (mips_dsp_disabled)
+ c->ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P);
+
+ if (mips_htw_disabled) {
+ c->options &= ~MIPS_CPU_HTW;
+ write_c0_pwctl(read_c0_pwctl() &
+ ~(1 << MIPS_PWCTL_PWEN_SHIFT));
+ }
+
+ if (c->options & MIPS_CPU_FPU)
+ cpu_set_fpu_opts(c);
+ else
+ cpu_set_nofpu_opts(c);
+
+ if (cpu_has_mips_r2_r6) {
+ c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
+ /* R2 has Performance Counter Interrupt indicator */
+ c->options |= MIPS_CPU_PCI;
+ }
+ else
+ c->srsets = 1;
+
+ if (cpu_has_mips_r6)
+ elf_hwcap |= HWCAP_MIPS_R6;
+
+ if (cpu_has_msa) {
+ c->msa_id = cpu_get_msa_id();
+ WARN(c->msa_id & MSA_IR_WRPF,
+ "Vector register partitioning unimplemented!");
+ elf_hwcap |= HWCAP_MIPS_MSA;
+ }
+
+ if (cpu_has_mips16)
+ elf_hwcap |= HWCAP_MIPS_MIPS16;
+
+ if (cpu_has_mdmx)
+ elf_hwcap |= HWCAP_MIPS_MDMX;
+
+ if (cpu_has_mips3d)
+ elf_hwcap |= HWCAP_MIPS_MIPS3D;
+
+ if (cpu_has_smartmips)
+ elf_hwcap |= HWCAP_MIPS_SMARTMIPS;
+
+ if (cpu_has_dsp)
+ elf_hwcap |= HWCAP_MIPS_DSP;
+
+ if (cpu_has_dsp2)
+ elf_hwcap |= HWCAP_MIPS_DSP2;
+
+ if (cpu_has_dsp3)
+ elf_hwcap |= HWCAP_MIPS_DSP3;
+
+ if (cpu_has_mips16e2)
+ elf_hwcap |= HWCAP_MIPS_MIPS16E2;
+
+ if (cpu_has_loongson_mmi)
+ elf_hwcap |= HWCAP_LOONGSON_MMI;
+
+ if (cpu_has_loongson_ext)
+ elf_hwcap |= HWCAP_LOONGSON_EXT;
+
+ if (cpu_has_loongson_ext2)
+ elf_hwcap |= HWCAP_LOONGSON_EXT2;
+
+ if (cpu_has_vz)
+ cpu_probe_vz(c);
+
+ cpu_probe_vmbits(c);
+
+ /* Synthesize CPUCFG data if running on Loongson processors;
+ * no-op otherwise.
+ *
+ * This looks at previously probed features, so keep this at bottom.
+ */
+ loongson3_cpucfg_synthesize_data(c);
+
+#ifdef CONFIG_64BIT
+ if (cpu == 0)
+ __ua_limit = ~((1ull << cpu_vmbits) - 1);
+#endif
+}
+
+void cpu_report(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ pr_info("CPU%d revision is: %08x (%s)\n",
+ smp_processor_id(), c->processor_id, cpu_name_string());
+ if (c->options & MIPS_CPU_FPU)
+ printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id);
+ if (cpu_has_msa)
+ pr_info("MSA revision is: %08x\n", c->msa_id);
+}
+
+void cpu_set_cluster(struct cpuinfo_mips *cpuinfo, unsigned int cluster)
+{
+ /* Ensure the core number fits in the field */
+ WARN_ON(cluster > (MIPS_GLOBALNUMBER_CLUSTER >>
+ MIPS_GLOBALNUMBER_CLUSTER_SHF));
+
+ cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_CLUSTER;
+ cpuinfo->globalnumber |= cluster << MIPS_GLOBALNUMBER_CLUSTER_SHF;
+}
+
+void cpu_set_core(struct cpuinfo_mips *cpuinfo, unsigned int core)
+{
+ /* Ensure the core number fits in the field */
+ WARN_ON(core > (MIPS_GLOBALNUMBER_CORE >> MIPS_GLOBALNUMBER_CORE_SHF));
+
+ cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_CORE;
+ cpuinfo->globalnumber |= core << MIPS_GLOBALNUMBER_CORE_SHF;
+}
+
+void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo, unsigned int vpe)
+{
+ /* Ensure the VP(E) ID fits in the field */
+ WARN_ON(vpe > (MIPS_GLOBALNUMBER_VP >> MIPS_GLOBALNUMBER_VP_SHF));
+
+ /* Ensure we're not using VP(E)s without support */
+ WARN_ON(vpe && !IS_ENABLED(CONFIG_MIPS_MT_SMP) &&
+ !IS_ENABLED(CONFIG_CPU_MIPSR6));
+
+ cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_VP;
+ cpuinfo->globalnumber |= vpe << MIPS_GLOBALNUMBER_VP_SHF;
+}
diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c
new file mode 100644
index 000000000..abdbbe8c5
--- /dev/null
+++ b/arch/mips/kernel/cpu-r3k-probe.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Processor capabilities determination functions.
+ *
+ * Copyright (C) xxxx the Anonymous
+ * Copyright (C) 1994 - 2006 Ralf Baechle
+ * Copyright (C) 2003, 2004 Maciej W. Rozycki
+ * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/export.h>
+
+#include <asm/bugs.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
+#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+#include <asm/elf.h>
+
+#include "fpu-probe.h"
+
+/* Hardware capabilities */
+unsigned int elf_hwcap __read_mostly;
+EXPORT_SYMBOL_GPL(elf_hwcap);
+
+void __init check_bugs32(void)
+{
+
+}
+
+/*
+ * Probe whether cpu has config register by trying to play with
+ * alternate cache bit and see whether it matters.
+ * It's used by cpu_probe to distinguish between R3000A and R3081.
+ */
+static inline int cpu_has_confreg(void)
+{
+#ifdef CONFIG_CPU_R3000
+ extern unsigned long r3k_cache_size(unsigned long);
+ unsigned long size1, size2;
+ unsigned long cfg = read_c0_conf();
+
+ size1 = r3k_cache_size(ST0_ISC);
+ write_c0_conf(cfg ^ R30XX_CONF_AC);
+ size2 = r3k_cache_size(ST0_ISC);
+ write_c0_conf(cfg);
+ return size1 != size2;
+#else
+ return 0;
+#endif
+}
+
+static inline void set_elf_platform(int cpu, const char *plat)
+{
+ if (cpu == 0)
+ __elf_platform = plat;
+}
+
+const char *__cpu_name[NR_CPUS];
+const char *__elf_platform;
+const char *__elf_base_platform;
+
+void cpu_probe(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int cpu = smp_processor_id();
+
+ /*
+ * Set a default elf platform, cpu probe may later
+ * overwrite it with a more precise value
+ */
+ set_elf_platform(cpu, "mips");
+
+ c->processor_id = PRID_IMP_UNKNOWN;
+ c->fpu_id = FPIR_IMP_NONE;
+ c->cputype = CPU_UNKNOWN;
+ c->writecombine = _CACHE_UNCACHED;
+
+ c->fpu_csr31 = FPU_CSR_RN;
+ c->fpu_msk31 = FPU_CSR_RSVD | FPU_CSR_ABS2008 | FPU_CSR_NAN2008 |
+ FPU_CSR_CONDX | FPU_CSR_FS;
+
+ c->srsets = 1;
+
+ c->processor_id = read_c0_prid();
+ switch (c->processor_id & (PRID_COMP_MASK | PRID_IMP_MASK)) {
+ case PRID_COMP_LEGACY | PRID_IMP_R2000:
+ c->cputype = CPU_R2000;
+ __cpu_name[cpu] = "R2000";
+ c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
+ MIPS_CPU_NOFPUEX;
+ if (__cpu_has_fpu())
+ c->options |= MIPS_CPU_FPU;
+ c->tlbsize = 64;
+ break;
+ case PRID_COMP_LEGACY | PRID_IMP_R3000:
+ if ((c->processor_id & PRID_REV_MASK) == PRID_REV_R3000A) {
+ if (cpu_has_confreg()) {
+ c->cputype = CPU_R3081E;
+ __cpu_name[cpu] = "R3081";
+ } else {
+ c->cputype = CPU_R3000A;
+ __cpu_name[cpu] = "R3000A";
+ }
+ } else {
+ c->cputype = CPU_R3000;
+ __cpu_name[cpu] = "R3000";
+ }
+ c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
+ MIPS_CPU_NOFPUEX;
+ if (__cpu_has_fpu())
+ c->options |= MIPS_CPU_FPU;
+ c->tlbsize = 64;
+ break;
+ case PRID_COMP_LEGACY | PRID_IMP_TX39:
+ c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE;
+
+ if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) {
+ c->cputype = CPU_TX3927;
+ __cpu_name[cpu] = "TX3927";
+ c->tlbsize = 64;
+ } else {
+ switch (c->processor_id & PRID_REV_MASK) {
+ case PRID_REV_TX3912:
+ c->cputype = CPU_TX3912;
+ __cpu_name[cpu] = "TX3912";
+ c->tlbsize = 32;
+ break;
+ case PRID_REV_TX3922:
+ c->cputype = CPU_TX3922;
+ __cpu_name[cpu] = "TX3922";
+ c->tlbsize = 64;
+ break;
+ }
+ }
+ break;
+ }
+
+ BUG_ON(!__cpu_name[cpu]);
+ BUG_ON(c->cputype == CPU_UNKNOWN);
+
+ /*
+ * Platform code can force the cpu type to optimize code
+ * generation. In that case be sure the cpu type is correctly
+ * manually setup otherwise it could trigger some nasty bugs.
+ */
+ BUG_ON(current_cpu_type() != c->cputype);
+
+ if (mips_fpu_disabled)
+ c->options &= ~MIPS_CPU_FPU;
+
+ if (c->options & MIPS_CPU_FPU)
+ cpu_set_fpu_opts(c);
+ else
+ cpu_set_nofpu_opts(c);
+}
+
+void cpu_report(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ pr_info("CPU%d revision is: %08x (%s)\n",
+ smp_processor_id(), c->processor_id, cpu_name_string());
+ if (c->options & MIPS_CPU_FPU)
+ pr_info("FPU revision is: %08x\n", c->fpu_id);
+}
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
new file mode 100644
index 000000000..81845ba04
--- /dev/null
+++ b/arch/mips/kernel/crash.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/reboot.h>
+#include <linux/kexec.h>
+#include <linux/memblock.h>
+#include <linux/crash_dump.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+/* This keeps a track of which one is crashing cpu. */
+static int crashing_cpu = -1;
+static cpumask_t cpus_in_crash = CPU_MASK_NONE;
+
+#ifdef CONFIG_SMP
+static void crash_shutdown_secondary(void *passed_regs)
+{
+ struct pt_regs *regs = passed_regs;
+ int cpu = smp_processor_id();
+
+ /*
+ * If we are passed registers, use those. Otherwise get the
+ * regs from the last interrupt, which should be correct, as
+ * we are in an interrupt. But if the regs are not there,
+ * pull them from the top of the stack. They are probably
+ * wrong, but we need something to keep from crashing again.
+ */
+ if (!regs)
+ regs = get_irq_regs();
+ if (!regs)
+ regs = task_pt_regs(current);
+
+ if (!cpu_online(cpu))
+ return;
+
+ /* We won't be sent IPIs any more. */
+ set_cpu_online(cpu, false);
+
+ local_irq_disable();
+ if (!cpumask_test_cpu(cpu, &cpus_in_crash))
+ crash_save_cpu(regs, cpu);
+ cpumask_set_cpu(cpu, &cpus_in_crash);
+
+ while (!atomic_read(&kexec_ready_to_reboot))
+ cpu_relax();
+
+ kexec_reboot();
+
+ /* NOTREACHED */
+}
+
+static void crash_kexec_prepare_cpus(void)
+{
+ static int cpus_stopped;
+ unsigned int msecs;
+ unsigned int ncpus;
+
+ if (cpus_stopped)
+ return;
+
+ ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
+
+ smp_call_function(crash_shutdown_secondary, NULL, 0);
+ smp_wmb();
+
+ /*
+ * The crash CPU sends an IPI and wait for other CPUs to
+ * respond. Delay of at least 10 seconds.
+ */
+ pr_emerg("Sending IPI to other cpus...\n");
+ msecs = 10000;
+ while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
+ cpu_relax();
+ mdelay(1);
+ }
+
+ cpus_stopped = 1;
+}
+
+/* Override the weak function in kernel/panic.c */
+void crash_smp_send_stop(void)
+{
+ if (_crash_smp_send_stop)
+ _crash_smp_send_stop();
+
+ crash_kexec_prepare_cpus();
+}
+
+#else /* !defined(CONFIG_SMP) */
+static void crash_kexec_prepare_cpus(void) {}
+#endif /* !defined(CONFIG_SMP) */
+
+void default_machine_crash_shutdown(struct pt_regs *regs)
+{
+ local_irq_disable();
+ crashing_cpu = smp_processor_id();
+ crash_save_cpu(regs, crashing_cpu);
+ crash_kexec_prepare_cpus();
+ cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
+}
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c
new file mode 100644
index 000000000..01b2bd95b
--- /dev/null
+++ b/arch/mips/kernel/crash_dump.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/crash_dump.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+
+static void *kdump_buf_page;
+
+/**
+ * copy_oldmem_page - copy one page from "oldmem"
+ * @pfn: page frame number to be copied
+ * @buf: target memory address for the copy; this can be in kernel address
+ * space or user address space (see @userbuf)
+ * @csize: number of bytes to copy
+ * @offset: offset in bytes into the page (based on pfn) to begin the copy
+ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+ * otherwise @buf is in kernel address space, use memcpy().
+ *
+ * Copy a page from "oldmem". For this page, there is no pte mapped
+ * in the current kernel.
+ *
+ * Calling copy_to_user() in atomic context is not desirable. Hence first
+ * copying the data to a pre-allocated kernel page and then copying to user
+ * space in non-atomic context.
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset, int userbuf)
+{
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+
+ vaddr = kmap_atomic_pfn(pfn);
+
+ if (!userbuf) {
+ memcpy(buf, (vaddr + offset), csize);
+ kunmap_atomic(vaddr);
+ } else {
+ if (!kdump_buf_page) {
+ pr_warn("Kdump: Kdump buffer page not allocated\n");
+
+ return -EFAULT;
+ }
+ copy_page(kdump_buf_page, vaddr);
+ kunmap_atomic(vaddr);
+ if (copy_to_user(buf, (kdump_buf_page + offset), csize))
+ return -EFAULT;
+ }
+
+ return csize;
+}
+
+static int __init kdump_buf_page_init(void)
+{
+ int ret = 0;
+
+ kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!kdump_buf_page) {
+ pr_warn("Kdump: Failed to allocate kdump buffer page\n");
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+arch_initcall(kdump_buf_page_init);
diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c
new file mode 100644
index 000000000..6c18a138f
--- /dev/null
+++ b/arch/mips/kernel/csrc-bcm1480.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000,2001,2004 Broadcom Corporation
+ */
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/time.h>
+
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+#include <asm/sibyte/bcm1480_scd.h>
+
+#include <asm/sibyte/sb1250.h>
+
+static u64 bcm1480_hpt_read(struct clocksource *cs)
+{
+ return (u64) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
+}
+
+struct clocksource bcm1480_clocksource = {
+ .name = "zbbus-cycles",
+ .rating = 200,
+ .read = bcm1480_hpt_read,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u64 notrace sb1480_read_sched_clock(void)
+{
+ return __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
+}
+
+void __init sb1480_clocksource_init(void)
+{
+ struct clocksource *cs = &bcm1480_clocksource;
+ unsigned int plldiv;
+ unsigned long zbbus;
+
+ plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG)));
+ zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000);
+ clocksource_register_hz(cs, zbbus);
+
+ sched_clock_register(sb1480_read_sched_clock, 64, zbbus);
+}
diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c
new file mode 100644
index 000000000..bad740ad3
--- /dev/null
+++ b/arch/mips/kernel/csrc-ioasic.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DEC I/O ASIC's counter clocksource
+ *
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+#include <linux/init.h>
+
+#include <asm/ds1287.h>
+#include <asm/time.h>
+#include <asm/dec/ioasic.h>
+#include <asm/dec/ioasic_addrs.h>
+
+static u64 dec_ioasic_hpt_read(struct clocksource *cs)
+{
+ return ioasic_read(IO_REG_FCTR);
+}
+
+static struct clocksource clocksource_dec = {
+ .name = "dec-ioasic",
+ .read = dec_ioasic_hpt_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u64 notrace dec_ioasic_read_sched_clock(void)
+{
+ return ioasic_read(IO_REG_FCTR);
+}
+
+int __init dec_ioasic_clocksource_init(void)
+{
+ unsigned int freq;
+ u32 start, end;
+ int i = HZ / 8;
+
+ ds1287_timer_state();
+ while (!ds1287_timer_state())
+ ;
+
+ start = dec_ioasic_hpt_read(&clocksource_dec);
+
+ while (i--)
+ while (!ds1287_timer_state())
+ ;
+
+ end = dec_ioasic_hpt_read(&clocksource_dec);
+
+ freq = (end - start) * 8;
+
+ /* An early revision of the I/O ASIC didn't have the counter. */
+ if (!freq)
+ return -ENXIO;
+
+ printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq);
+
+ clocksource_dec.rating = 200 + freq / 10000000;
+ clocksource_register_hz(&clocksource_dec, freq);
+
+ sched_clock_register(dec_ioasic_read_sched_clock, 32, freq);
+
+ return 0;
+}
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
new file mode 100644
index 000000000..edc4afc08
--- /dev/null
+++ b/arch/mips/kernel/csrc-r4k.c
@@ -0,0 +1,130 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 by Ralf Baechle
+ */
+#include <linux/clocksource.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/sched_clock.h>
+
+#include <asm/time.h>
+
+static u64 c0_hpt_read(struct clocksource *cs)
+{
+ return read_c0_count();
+}
+
+static struct clocksource clocksource_mips = {
+ .name = "MIPS",
+ .read = c0_hpt_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u64 __maybe_unused notrace r4k_read_sched_clock(void)
+{
+ return read_c0_count();
+}
+
+static inline unsigned int rdhwr_count(void)
+{
+ unsigned int count;
+
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set mips32r2\n"
+ " rdhwr %0, $2\n"
+ " .set pop\n"
+ : "=r" (count));
+
+ return count;
+}
+
+static bool rdhwr_count_usable(void)
+{
+ unsigned int prev, curr, i;
+
+ /*
+ * Older QEMUs have a broken implementation of RDHWR for the CP0 count
+ * which always returns a constant value. Try to identify this and don't
+ * use it in the VDSO if it is broken. This workaround can be removed
+ * once the fix has been in QEMU stable for a reasonable amount of time.
+ */
+ for (i = 0, prev = rdhwr_count(); i < 100; i++) {
+ curr = rdhwr_count();
+
+ if (curr != prev)
+ return true;
+
+ prev = curr;
+ }
+
+ pr_warn("Not using R4K clocksource in VDSO due to broken RDHWR\n");
+ return false;
+}
+
+#ifdef CONFIG_CPU_FREQ
+
+static bool __read_mostly r4k_clock_unstable;
+
+static void r4k_clocksource_unstable(char *reason)
+{
+ if (r4k_clock_unstable)
+ return;
+
+ r4k_clock_unstable = true;
+
+ pr_info("R4K timer is unstable due to %s\n", reason);
+
+ clocksource_mark_unstable(&clocksource_mips);
+}
+
+static int r4k_cpufreq_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ if (val == CPUFREQ_POSTCHANGE)
+ r4k_clocksource_unstable("CPU frequency change");
+
+ return 0;
+}
+
+static struct notifier_block r4k_cpufreq_notifier = {
+ .notifier_call = r4k_cpufreq_callback,
+};
+
+static int __init r4k_register_cpufreq_notifier(void)
+{
+ return cpufreq_register_notifier(&r4k_cpufreq_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+}
+core_initcall(r4k_register_cpufreq_notifier);
+
+#endif /* !CONFIG_CPU_FREQ */
+
+int __init init_r4k_clocksource(void)
+{
+ if (!cpu_has_counter || !mips_hpt_frequency)
+ return -ENXIO;
+
+ /* Calculate a somewhat reasonable rating value */
+ clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
+
+ /*
+ * R2 onwards makes the count accessible to user mode so it can be used
+ * by the VDSO (HWREna is configured by configure_hwrena()).
+ */
+ if (cpu_has_mips_r2_r6 && rdhwr_count_usable())
+ clocksource_mips.vdso_clock_mode = VDSO_CLOCKMODE_R4K;
+
+ clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
+
+#ifndef CONFIG_CPU_FREQ
+ sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
+#endif
+
+ return 0;
+}
diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c
new file mode 100644
index 000000000..fa2fa3e10
--- /dev/null
+++ b/arch/mips/kernel/csrc-sb1250.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000, 2001 Broadcom Corporation
+ */
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/time.h>
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+#include <asm/sibyte/sb1250_scd.h>
+
+#define SB1250_HPT_NUM 3
+#define SB1250_HPT_VALUE M_SCD_TIMER_CNT /* max value */
+
+/*
+ * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
+ * again.
+ */
+static inline u64 sb1250_hpt_get_cycles(void)
+{
+ unsigned int count;
+ void __iomem *addr;
+
+ addr = IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT));
+ count = G_SCD_TIMER_CNT(__raw_readq(addr));
+
+ return SB1250_HPT_VALUE - count;
+}
+
+static u64 sb1250_hpt_read(struct clocksource *cs)
+{
+ return sb1250_hpt_get_cycles();
+}
+
+struct clocksource bcm1250_clocksource = {
+ .name = "bcm1250-counter-3",
+ .rating = 200,
+ .read = sb1250_hpt_read,
+ .mask = CLOCKSOURCE_MASK(23),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u64 notrace sb1250_read_sched_clock(void)
+{
+ return sb1250_hpt_get_cycles();
+}
+
+void __init sb1250_clocksource_init(void)
+{
+ struct clocksource *cs = &bcm1250_clocksource;
+
+ /* Setup hpt using timer #3 but do not enable irq for it */
+ __raw_writeq(0,
+ IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
+ R_SCD_TIMER_CFG)));
+ __raw_writeq(SB1250_HPT_VALUE,
+ IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
+ R_SCD_TIMER_INIT)));
+ __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
+ IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
+ R_SCD_TIMER_CFG)));
+
+ clocksource_register_hz(cs, V_SCD_TIMER_FREQ);
+
+ sched_clock_register(sb1250_read_sched_clock, 23, V_SCD_TIMER_FREQ);
+}
diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c
new file mode 100644
index 000000000..4a1647ddf
--- /dev/null
+++ b/arch/mips/kernel/early_printk.c
@@ -0,0 +1,41 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2003, 06, 07 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * written by Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/printk.h>
+#include <linux/init.h>
+
+#include <asm/setup.h>
+
+static void early_console_write(struct console *con, const char *s, unsigned n)
+{
+ while (n-- && *s) {
+ if (*s == '\n')
+ prom_putchar('\r');
+ prom_putchar(*s);
+ s++;
+ }
+}
+
+static struct console early_console_prom = {
+ .name = "early",
+ .write = early_console_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1
+};
+
+void __init setup_early_printk(void)
+{
+ if (early_console)
+ return;
+ early_console = &early_console_prom;
+
+ register_console(&early_console_prom);
+}
diff --git a/arch/mips/kernel/early_printk_8250.c b/arch/mips/kernel/early_printk_8250.c
new file mode 100644
index 000000000..567c6ec0c
--- /dev/null
+++ b/arch/mips/kernel/early_printk_8250.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * 8250/16550-type serial ports prom_putchar()
+ *
+ * Copyright (C) 2010 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/io.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <asm/setup.h>
+
+static void __iomem *serial8250_base;
+static unsigned int serial8250_reg_shift;
+static unsigned int serial8250_tx_timeout;
+
+void setup_8250_early_printk_port(unsigned long base, unsigned int reg_shift,
+ unsigned int timeout)
+{
+ serial8250_base = (void __iomem *)base;
+ serial8250_reg_shift = reg_shift;
+ serial8250_tx_timeout = timeout;
+}
+
+static inline u8 serial_in(int offset)
+{
+ return readb(serial8250_base + (offset << serial8250_reg_shift));
+}
+
+static inline void serial_out(int offset, char value)
+{
+ writeb(value, serial8250_base + (offset << serial8250_reg_shift));
+}
+
+void prom_putchar(char c)
+{
+ unsigned int timeout;
+ int status, bits;
+
+ if (!serial8250_base)
+ return;
+
+ timeout = serial8250_tx_timeout;
+ bits = UART_LSR_TEMT | UART_LSR_THRE;
+
+ do {
+ status = serial_in(UART_LSR);
+
+ if (--timeout == 0)
+ break;
+ } while ((status & bits) != bits);
+
+ if (timeout)
+ serial_out(UART_TX, c);
+}
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
new file mode 100644
index 000000000..7b045d2a0
--- /dev/null
+++ b/arch/mips/kernel/elf.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2014 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+
+#include <asm/cpu-features.h>
+#include <asm/cpu-info.h>
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+/* Whether to accept legacy-NaN and 2008-NaN user binaries. */
+bool mips_use_nan_legacy;
+bool mips_use_nan_2008;
+
+/* FPU modes */
+enum {
+ FP_FRE,
+ FP_FR0,
+ FP_FR1,
+};
+
+/**
+ * struct mode_req - ABI FPU mode requirements
+ * @single: The program being loaded needs an FPU but it will only issue
+ * single precision instructions meaning that it can execute in
+ * either FR0 or FR1.
+ * @soft: The soft(-float) requirement means that the program being
+ * loaded needs has no FPU dependency at all (i.e. it has no
+ * FPU instructions).
+ * @fr1: The program being loaded depends on FPU being in FR=1 mode.
+ * @frdefault: The program being loaded depends on the default FPU mode.
+ * That is FR0 for O32 and FR1 for N32/N64.
+ * @fre: The program being loaded depends on FPU with FRE=1. This mode is
+ * a bridge which uses FR=1 whilst still being able to maintain
+ * full compatibility with pre-existing code using the O32 FP32
+ * ABI.
+ *
+ * More information about the FP ABIs can be found here:
+ *
+ * https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking#10.4.1._Basic_mode_set-up
+ *
+ */
+
+struct mode_req {
+ bool single;
+ bool soft;
+ bool fr1;
+ bool frdefault;
+ bool fre;
+};
+
+static const struct mode_req fpu_reqs[] = {
+ [MIPS_ABI_FP_ANY] = { true, true, true, true, true },
+ [MIPS_ABI_FP_DOUBLE] = { false, false, false, true, true },
+ [MIPS_ABI_FP_SINGLE] = { true, false, false, false, false },
+ [MIPS_ABI_FP_SOFT] = { false, true, false, false, false },
+ [MIPS_ABI_FP_OLD_64] = { false, false, false, false, false },
+ [MIPS_ABI_FP_XX] = { false, false, true, true, true },
+ [MIPS_ABI_FP_64] = { false, false, true, false, false },
+ [MIPS_ABI_FP_64A] = { false, false, true, false, true }
+};
+
+/*
+ * Mode requirements when .MIPS.abiflags is not present in the ELF.
+ * Not present means that everything is acceptable except FR1.
+ */
+static struct mode_req none_req = { true, true, false, true, true };
+
+int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
+ bool is_interp, struct arch_elf_state *state)
+{
+ union {
+ struct elf32_hdr e32;
+ struct elf64_hdr e64;
+ } *ehdr = _ehdr;
+ struct elf32_phdr *phdr32 = _phdr;
+ struct elf64_phdr *phdr64 = _phdr;
+ struct mips_elf_abiflags_v0 abiflags;
+ bool elf32;
+ u32 flags;
+ int ret;
+ loff_t pos;
+
+ elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
+ flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags;
+
+ /* Let's see if this is an O32 ELF */
+ if (elf32) {
+ if (flags & EF_MIPS_FP64) {
+ /*
+ * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
+ * later if needed
+ */
+ if (is_interp)
+ state->interp_fp_abi = MIPS_ABI_FP_OLD_64;
+ else
+ state->fp_abi = MIPS_ABI_FP_OLD_64;
+ }
+ if (phdr32->p_type != PT_MIPS_ABIFLAGS)
+ return 0;
+
+ if (phdr32->p_filesz < sizeof(abiflags))
+ return -EINVAL;
+ pos = phdr32->p_offset;
+ } else {
+ if (phdr64->p_type != PT_MIPS_ABIFLAGS)
+ return 0;
+ if (phdr64->p_filesz < sizeof(abiflags))
+ return -EINVAL;
+ pos = phdr64->p_offset;
+ }
+
+ ret = kernel_read(elf, &abiflags, sizeof(abiflags), &pos);
+ if (ret < 0)
+ return ret;
+ if (ret != sizeof(abiflags))
+ return -EIO;
+
+ /* Record the required FP ABIs for use by mips_check_elf */
+ if (is_interp)
+ state->interp_fp_abi = abiflags.fp_abi;
+ else
+ state->fp_abi = abiflags.fp_abi;
+
+ return 0;
+}
+
+int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
+ struct arch_elf_state *state)
+{
+ union {
+ struct elf32_hdr e32;
+ struct elf64_hdr e64;
+ } *ehdr = _ehdr;
+ union {
+ struct elf32_hdr e32;
+ struct elf64_hdr e64;
+ } *iehdr = _interp_ehdr;
+ struct mode_req prog_req, interp_req;
+ int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
+ bool elf32;
+ u32 flags;
+
+ elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
+ flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags;
+
+ /*
+ * Determine the NaN personality, reject the binary if not allowed.
+ * Also ensure that any interpreter matches the executable.
+ */
+ if (flags & EF_MIPS_NAN2008) {
+ if (mips_use_nan_2008)
+ state->nan_2008 = 1;
+ else
+ return -ENOEXEC;
+ } else {
+ if (mips_use_nan_legacy)
+ state->nan_2008 = 0;
+ else
+ return -ENOEXEC;
+ }
+ if (has_interpreter) {
+ bool ielf32;
+ u32 iflags;
+
+ ielf32 = iehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
+ iflags = ielf32 ? iehdr->e32.e_flags : iehdr->e64.e_flags;
+
+ if ((flags ^ iflags) & EF_MIPS_NAN2008)
+ return -ELIBBAD;
+ }
+
+ if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
+ return 0;
+
+ fp_abi = state->fp_abi;
+
+ if (has_interpreter) {
+ interp_fp_abi = state->interp_fp_abi;
+
+ abi0 = min(fp_abi, interp_fp_abi);
+ abi1 = max(fp_abi, interp_fp_abi);
+ } else {
+ abi0 = abi1 = fp_abi;
+ }
+
+ if (elf32 && !(flags & EF_MIPS_ABI2)) {
+ /* Default to a mode capable of running code expecting FR=0 */
+ state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0;
+
+ /* Allow all ABIs we know about */
+ max_abi = MIPS_ABI_FP_64A;
+ } else {
+ /* MIPS64 code always uses FR=1, thus the default is easy */
+ state->overall_fp_mode = FP_FR1;
+
+ /* Disallow access to the various FPXX & FP64 ABIs */
+ max_abi = MIPS_ABI_FP_SOFT;
+ }
+
+ if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
+ (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))
+ return -ELIBBAD;
+
+ /* It's time to determine the FPU mode requirements */
+ prog_req = (abi0 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi0];
+ interp_req = (abi1 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi1];
+
+ /*
+ * Check whether the program's and interp's ABIs have a matching FPU
+ * mode requirement.
+ */
+ prog_req.single = interp_req.single && prog_req.single;
+ prog_req.soft = interp_req.soft && prog_req.soft;
+ prog_req.fr1 = interp_req.fr1 && prog_req.fr1;
+ prog_req.frdefault = interp_req.frdefault && prog_req.frdefault;
+ prog_req.fre = interp_req.fre && prog_req.fre;
+
+ /*
+ * Determine the desired FPU mode
+ *
+ * Decision making:
+ *
+ * - We want FR_FRE if FRE=1 and both FR=1 and FR=0 are false. This
+ * means that we have a combination of program and interpreter
+ * that inherently require the hybrid FP mode.
+ * - If FR1 and FRDEFAULT is true, that means we hit the any-abi or
+ * fpxx case. This is because, in any-ABI (or no-ABI) we have no FPU
+ * instructions so we don't care about the mode. We will simply use
+ * the one preferred by the hardware. In fpxx case, that ABI can
+ * handle both FR=1 and FR=0, so, again, we simply choose the one
+ * preferred by the hardware. Next, if we only use single-precision
+ * FPU instructions, and the default ABI FPU mode is not good
+ * (ie single + any ABI combination), we set again the FPU mode to the
+ * one is preferred by the hardware. Next, if we know that the code
+ * will only use single-precision instructions, shown by single being
+ * true but frdefault being false, then we again set the FPU mode to
+ * the one that is preferred by the hardware.
+ * - We want FP_FR1 if that's the only matching mode and the default one
+ * is not good.
+ * - Return with -ELIBADD if we can't find a matching FPU mode.
+ */
+ if (prog_req.fre && !prog_req.frdefault && !prog_req.fr1)
+ state->overall_fp_mode = FP_FRE;
+ else if ((prog_req.fr1 && prog_req.frdefault) ||
+ (prog_req.single && !prog_req.frdefault))
+ /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
+ state->overall_fp_mode = ((raw_current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
+ cpu_has_mips_r2_r6) ?
+ FP_FR1 : FP_FR0;
+ else if (prog_req.fr1)
+ state->overall_fp_mode = FP_FR1;
+ else if (!prog_req.fre && !prog_req.frdefault &&
+ !prog_req.fr1 && !prog_req.single && !prog_req.soft)
+ return -ELIBBAD;
+
+ return 0;
+}
+
+static inline void set_thread_fp_mode(int hybrid, int regs32)
+{
+ if (hybrid)
+ set_thread_flag(TIF_HYBRID_FPREGS);
+ else
+ clear_thread_flag(TIF_HYBRID_FPREGS);
+ if (regs32)
+ set_thread_flag(TIF_32BIT_FPREGS);
+ else
+ clear_thread_flag(TIF_32BIT_FPREGS);
+}
+
+void mips_set_personality_fp(struct arch_elf_state *state)
+{
+ /*
+ * This function is only ever called for O32 ELFs so we should
+ * not be worried about N32/N64 binaries.
+ */
+
+ if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
+ return;
+
+ switch (state->overall_fp_mode) {
+ case FP_FRE:
+ set_thread_fp_mode(1, 0);
+ break;
+ case FP_FR0:
+ set_thread_fp_mode(0, 1);
+ break;
+ case FP_FR1:
+ set_thread_fp_mode(0, 0);
+ break;
+ default:
+ BUG();
+ }
+}
+
+/*
+ * Select the IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode
+ * in FCSR according to the ELF NaN personality.
+ */
+void mips_set_personality_nan(struct arch_elf_state *state)
+{
+ struct cpuinfo_mips *c = &boot_cpu_data;
+ struct task_struct *t = current;
+
+ t->thread.fpu.fcr31 = c->fpu_csr31;
+ switch (state->nan_2008) {
+ case 0:
+ break;
+ case 1:
+ if (!(c->fpu_msk31 & FPU_CSR_NAN2008))
+ t->thread.fpu.fcr31 |= FPU_CSR_NAN2008;
+ if (!(c->fpu_msk31 & FPU_CSR_ABS2008))
+ t->thread.fpu.fcr31 |= FPU_CSR_ABS2008;
+ break;
+ default:
+ BUG();
+ }
+}
+
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
+int mips_elf_read_implies_exec(void *elf_ex, int exstack)
+{
+ if (exstack != EXSTACK_DISABLE_X) {
+ /* The binary doesn't request a non-executable stack */
+ return 1;
+ }
+
+ if (!cpu_has_rixi) {
+ /* The CPU doesn't support non-executable memory */
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mips_elf_read_implies_exec);
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
new file mode 100644
index 000000000..4b896f502
--- /dev/null
+++ b/arch/mips/kernel/entry.S
@@ -0,0 +1,186 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/compiler.h>
+#include <asm/irqflags.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/isadep.h>
+#include <asm/thread_info.h>
+#include <asm/war.h>
+
+#ifndef CONFIG_PREEMPTION
+#define resume_kernel restore_all
+#else
+#define __ret_from_irq ret_from_exception
+#endif
+
+ .text
+ .align 5
+#ifndef CONFIG_PREEMPTION
+FEXPORT(ret_from_exception)
+ local_irq_disable # preempt stop
+ b __ret_from_irq
+#endif
+FEXPORT(ret_from_irq)
+ LONG_S s0, TI_REGS($28)
+FEXPORT(__ret_from_irq)
+/*
+ * We can be coming here from a syscall done in the kernel space,
+ * e.g. a failed kernel_execve().
+ */
+resume_userspace_check:
+ LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
+ andi t0, t0, KU_USER
+ beqz t0, resume_kernel
+
+resume_userspace:
+ local_irq_disable # make sure we dont miss an
+ # interrupt setting need_resched
+ # between sampling and return
+ LONG_L a2, TI_FLAGS($28) # current->work
+ andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace)
+ bnez t0, work_pending
+ j restore_all
+
+#ifdef CONFIG_PREEMPTION
+resume_kernel:
+ local_irq_disable
+ lw t0, TI_PRE_COUNT($28)
+ bnez t0, restore_all
+ LONG_L t0, TI_FLAGS($28)
+ andi t1, t0, _TIF_NEED_RESCHED
+ beqz t1, restore_all
+ LONG_L t0, PT_STATUS(sp) # Interrupts off?
+ andi t0, 1
+ beqz t0, restore_all
+ PTR_LA ra, restore_all
+ j preempt_schedule_irq
+#endif
+
+FEXPORT(ret_from_kernel_thread)
+ jal schedule_tail # a0 = struct task_struct *prev
+ move a0, s1
+ jal s0
+ j syscall_exit
+
+FEXPORT(ret_from_fork)
+ jal schedule_tail # a0 = struct task_struct *prev
+
+FEXPORT(syscall_exit)
+#ifdef CONFIG_DEBUG_RSEQ
+ move a0, sp
+ jal rseq_syscall
+#endif
+ local_irq_disable # make sure need_resched and
+ # signals dont change between
+ # sampling and return
+ LONG_L a2, TI_FLAGS($28) # current->work
+ li t0, _TIF_ALLWORK_MASK
+ and t0, a2, t0
+ bnez t0, syscall_exit_work
+
+restore_all: # restore full frame
+ .set noat
+ RESTORE_TEMP
+ RESTORE_AT
+ RESTORE_STATIC
+restore_partial: # restore partial frame
+#ifdef CONFIG_TRACE_IRQFLAGS
+ SAVE_STATIC
+ SAVE_AT
+ SAVE_TEMP
+ LONG_L v0, PT_STATUS(sp)
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ and v0, ST0_IEP
+#else
+ and v0, ST0_IE
+#endif
+ beqz v0, 1f
+ jal trace_hardirqs_on
+ b 2f
+1: jal trace_hardirqs_off
+2:
+ RESTORE_TEMP
+ RESTORE_AT
+ RESTORE_STATIC
+#endif
+ RESTORE_SOME
+ RESTORE_SP_AND_RET
+ .set at
+
+work_pending:
+ andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
+ beqz t0, work_notifysig
+work_resched:
+ TRACE_IRQS_OFF
+ jal schedule
+
+ local_irq_disable # make sure need_resched and
+ # signals dont change between
+ # sampling and return
+ LONG_L a2, TI_FLAGS($28)
+ andi t0, a2, _TIF_WORK_MASK # is there any work to be done
+ # other than syscall tracing?
+ beqz t0, restore_all
+ andi t0, a2, _TIF_NEED_RESCHED
+ bnez t0, work_resched
+
+work_notifysig: # deal with pending signals and
+ # notify-resume requests
+ move a0, sp
+ li a1, 0
+ jal do_notify_resume # a2 already loaded
+ j resume_userspace_check
+
+FEXPORT(syscall_exit_partial)
+#ifdef CONFIG_DEBUG_RSEQ
+ move a0, sp
+ jal rseq_syscall
+#endif
+ local_irq_disable # make sure need_resched doesn't
+ # change between and return
+ LONG_L a2, TI_FLAGS($28) # current->work
+ li t0, _TIF_ALLWORK_MASK
+ and t0, a2
+ beqz t0, restore_partial
+ SAVE_STATIC
+syscall_exit_work:
+ LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
+ andi t0, t0, KU_USER
+ beqz t0, resume_kernel
+ li t0, _TIF_WORK_SYSCALL_EXIT
+ and t0, a2 # a2 is preloaded with TI_FLAGS
+ beqz t0, work_pending # trace bit set?
+ local_irq_enable # could let syscall_trace_leave()
+ # call schedule() instead
+ TRACE_IRQS_ON
+ move a0, sp
+ jal syscall_trace_leave
+ b resume_userspace
+
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
+ defined(CONFIG_CPU_MIPSR6) || defined(CONFIG_MIPS_MT)
+
+/*
+ * MIPS32R2 Instruction Hazard Barrier - must be called
+ *
+ * For C code use the inline version named instruction_hazard().
+ */
+LEAF(mips_ihb)
+ .set MIPS_ISA_LEVEL_RAW
+ jr.hb ra
+ nop
+ END(mips_ihb)
+
+#endif /* CONFIG_CPU_MIPSR2 - CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */
diff --git a/arch/mips/kernel/fpu-probe.c b/arch/mips/kernel/fpu-probe.c
new file mode 100644
index 000000000..e689d6a83
--- /dev/null
+++ b/arch/mips/kernel/fpu-probe.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Processor capabilities determination functions.
+ *
+ * Copyright (C) xxxx the Anonymous
+ * Copyright (C) 1994 - 2006 Ralf Baechle
+ * Copyright (C) 2003, 2004 Maciej W. Rozycki
+ * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <asm/bugs.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
+#include <asm/elf.h>
+#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+
+#include "fpu-probe.h"
+
+/*
+ * Get the FPU Implementation/Revision.
+ */
+static inline unsigned long cpu_get_fpu_id(void)
+{
+ unsigned long tmp, fpu_id;
+
+ tmp = read_c0_status();
+ __enable_fpu(FPU_AS_IS);
+ fpu_id = read_32bit_cp1_register(CP1_REVISION);
+ write_c0_status(tmp);
+ return fpu_id;
+}
+
+/*
+ * Check if the CPU has an external FPU.
+ */
+int __cpu_has_fpu(void)
+{
+ return (cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE;
+}
+
+/*
+ * Determine the FCSR mask for FPU hardware.
+ */
+static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c)
+{
+ unsigned long sr, mask, fcsr, fcsr0, fcsr1;
+
+ fcsr = c->fpu_csr31;
+ mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM;
+
+ sr = read_c0_status();
+ __enable_fpu(FPU_AS_IS);
+
+ fcsr0 = fcsr & mask;
+ write_32bit_cp1_register(CP1_STATUS, fcsr0);
+ fcsr0 = read_32bit_cp1_register(CP1_STATUS);
+
+ fcsr1 = fcsr | ~mask;
+ write_32bit_cp1_register(CP1_STATUS, fcsr1);
+ fcsr1 = read_32bit_cp1_register(CP1_STATUS);
+
+ write_32bit_cp1_register(CP1_STATUS, fcsr);
+
+ write_c0_status(sr);
+
+ c->fpu_msk31 = ~(fcsr0 ^ fcsr1) & ~mask;
+}
+
+/*
+ * Determine the IEEE 754 NaN encodings and ABS.fmt/NEG.fmt execution modes
+ * supported by FPU hardware.
+ */
+static void cpu_set_fpu_2008(struct cpuinfo_mips *c)
+{
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
+ unsigned long sr, fir, fcsr, fcsr0, fcsr1;
+
+ sr = read_c0_status();
+ __enable_fpu(FPU_AS_IS);
+
+ fir = read_32bit_cp1_register(CP1_REVISION);
+ if (fir & MIPS_FPIR_HAS2008) {
+ fcsr = read_32bit_cp1_register(CP1_STATUS);
+
+ /*
+ * MAC2008 toolchain never landed in real world, so
+ * we're only testing whether it can be disabled and
+ * don't try to enabled it.
+ */
+ fcsr0 = fcsr & ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008 |
+ FPU_CSR_MAC2008);
+ write_32bit_cp1_register(CP1_STATUS, fcsr0);
+ fcsr0 = read_32bit_cp1_register(CP1_STATUS);
+
+ fcsr1 = fcsr | FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+ write_32bit_cp1_register(CP1_STATUS, fcsr1);
+ fcsr1 = read_32bit_cp1_register(CP1_STATUS);
+
+ write_32bit_cp1_register(CP1_STATUS, fcsr);
+
+ if (c->isa_level & (MIPS_CPU_ISA_M32R2 |
+ MIPS_CPU_ISA_M64R2)) {
+ /*
+ * The bit for MAC2008 might be reused by R6
+ * in future, so we only test for R2-R5.
+ */
+ if (fcsr0 & FPU_CSR_MAC2008)
+ c->options |= MIPS_CPU_MAC_2008_ONLY;
+ }
+
+ if (!(fcsr0 & FPU_CSR_NAN2008))
+ c->options |= MIPS_CPU_NAN_LEGACY;
+ if (fcsr1 & FPU_CSR_NAN2008)
+ c->options |= MIPS_CPU_NAN_2008;
+
+ if ((fcsr0 ^ fcsr1) & FPU_CSR_ABS2008)
+ c->fpu_msk31 &= ~FPU_CSR_ABS2008;
+ else
+ c->fpu_csr31 |= fcsr & FPU_CSR_ABS2008;
+
+ if ((fcsr0 ^ fcsr1) & FPU_CSR_NAN2008)
+ c->fpu_msk31 &= ~FPU_CSR_NAN2008;
+ else
+ c->fpu_csr31 |= fcsr & FPU_CSR_NAN2008;
+ } else {
+ c->options |= MIPS_CPU_NAN_LEGACY;
+ }
+
+ write_c0_status(sr);
+ } else {
+ c->options |= MIPS_CPU_NAN_LEGACY;
+ }
+}
+
+/*
+ * IEEE 754 conformance mode to use. Affects the NaN encoding and the
+ * ABS.fmt/NEG.fmt execution mode.
+ */
+static enum { STRICT, LEGACY, STD2008, RELAXED } ieee754 = STRICT;
+
+/*
+ * Set the IEEE 754 NaN encodings and the ABS.fmt/NEG.fmt execution modes
+ * to support by the FPU emulator according to the IEEE 754 conformance
+ * mode selected. Note that "relaxed" straps the emulator so that it
+ * allows 2008-NaN binaries even for legacy processors.
+ */
+static void cpu_set_nofpu_2008(struct cpuinfo_mips *c)
+{
+ c->options &= ~(MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY);
+ c->fpu_csr31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008);
+ c->fpu_msk31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008);
+
+ switch (ieee754) {
+ case STRICT:
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
+ c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY;
+ } else {
+ c->options |= MIPS_CPU_NAN_LEGACY;
+ c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+ }
+ break;
+ case LEGACY:
+ c->options |= MIPS_CPU_NAN_LEGACY;
+ c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+ break;
+ case STD2008:
+ c->options |= MIPS_CPU_NAN_2008;
+ c->fpu_csr31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+ c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+ break;
+ case RELAXED:
+ c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY;
+ break;
+ }
+}
+
+/*
+ * Override the IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode
+ * according to the "ieee754=" parameter.
+ */
+static void cpu_set_nan_2008(struct cpuinfo_mips *c)
+{
+ switch (ieee754) {
+ case STRICT:
+ mips_use_nan_legacy = !!cpu_has_nan_legacy;
+ mips_use_nan_2008 = !!cpu_has_nan_2008;
+ break;
+ case LEGACY:
+ mips_use_nan_legacy = !!cpu_has_nan_legacy;
+ mips_use_nan_2008 = !cpu_has_nan_legacy;
+ break;
+ case STD2008:
+ mips_use_nan_legacy = !cpu_has_nan_2008;
+ mips_use_nan_2008 = !!cpu_has_nan_2008;
+ break;
+ case RELAXED:
+ mips_use_nan_legacy = true;
+ mips_use_nan_2008 = true;
+ break;
+ }
+}
+
+/*
+ * IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode override
+ * settings:
+ *
+ * strict: accept binaries that request a NaN encoding supported by the FPU
+ * legacy: only accept legacy-NaN binaries
+ * 2008: only accept 2008-NaN binaries
+ * relaxed: accept any binaries regardless of whether supported by the FPU
+ */
+static int __init ieee754_setup(char *s)
+{
+ if (!s)
+ return -1;
+ else if (!strcmp(s, "strict"))
+ ieee754 = STRICT;
+ else if (!strcmp(s, "legacy"))
+ ieee754 = LEGACY;
+ else if (!strcmp(s, "2008"))
+ ieee754 = STD2008;
+ else if (!strcmp(s, "relaxed"))
+ ieee754 = RELAXED;
+ else
+ return -1;
+
+ if (!(boot_cpu_data.options & MIPS_CPU_FPU))
+ cpu_set_nofpu_2008(&boot_cpu_data);
+ cpu_set_nan_2008(&boot_cpu_data);
+
+ return 0;
+}
+
+early_param("ieee754", ieee754_setup);
+
+/*
+ * Set the FIR feature flags for the FPU emulator.
+ */
+static void cpu_set_nofpu_id(struct cpuinfo_mips *c)
+{
+ u32 value;
+
+ value = 0;
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6))
+ value |= MIPS_FPIR_D | MIPS_FPIR_S;
+ if (c->isa_level & (MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6))
+ value |= MIPS_FPIR_F64 | MIPS_FPIR_L | MIPS_FPIR_W;
+ if (c->options & MIPS_CPU_NAN_2008)
+ value |= MIPS_FPIR_HAS2008;
+ c->fpu_id = value;
+}
+
+/* Determined FPU emulator mask to use for the boot CPU with "nofpu". */
+static unsigned int mips_nofpu_msk31;
+
+/*
+ * Set options for FPU hardware.
+ */
+void cpu_set_fpu_opts(struct cpuinfo_mips *c)
+{
+ c->fpu_id = cpu_get_fpu_id();
+ mips_nofpu_msk31 = c->fpu_msk31;
+
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
+ if (c->fpu_id & MIPS_FPIR_3D)
+ c->ases |= MIPS_ASE_MIPS3D;
+ if (c->fpu_id & MIPS_FPIR_UFRP)
+ c->options |= MIPS_CPU_UFR;
+ if (c->fpu_id & MIPS_FPIR_FREP)
+ c->options |= MIPS_CPU_FRE;
+ }
+
+ cpu_set_fpu_fcsr_mask(c);
+ cpu_set_fpu_2008(c);
+ cpu_set_nan_2008(c);
+}
+
+/*
+ * Set options for the FPU emulator.
+ */
+void cpu_set_nofpu_opts(struct cpuinfo_mips *c)
+{
+ c->options &= ~MIPS_CPU_FPU;
+ c->fpu_msk31 = mips_nofpu_msk31;
+
+ cpu_set_nofpu_2008(c);
+ cpu_set_nan_2008(c);
+ cpu_set_nofpu_id(c);
+}
+
+int mips_fpu_disabled;
+
+static int __init fpu_disable(char *s)
+{
+ cpu_set_nofpu_opts(&boot_cpu_data);
+ mips_fpu_disabled = 1;
+
+ return 1;
+}
+
+__setup("nofpu", fpu_disable);
+
diff --git a/arch/mips/kernel/fpu-probe.h b/arch/mips/kernel/fpu-probe.h
new file mode 100644
index 000000000..951ce5089
--- /dev/null
+++ b/arch/mips/kernel/fpu-probe.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include <linux/kernel.h>
+
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+extern int mips_fpu_disabled;
+
+int __cpu_has_fpu(void);
+void cpu_set_fpu_opts(struct cpuinfo_mips *c);
+void cpu_set_nofpu_opts(struct cpuinfo_mips *c);
+
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+#define mips_fpu_disabled 1
+
+static inline unsigned long cpu_get_fpu_id(void)
+{
+ return FPIR_IMP_NONE;
+}
+
+static inline int __cpu_has_fpu(void)
+{
+ return 0;
+}
+
+static inline void cpu_set_fpu_opts(struct cpuinfo_mips *c)
+{
+ /* no-op */
+}
+
+static inline void cpu_set_nofpu_opts(struct cpuinfo_mips *c)
+{
+ /* no-op */
+}
+
+#endif /* CONFIG_MIPS_FP_SUPPORT */
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
new file mode 100644
index 000000000..f57e68f40
--- /dev/null
+++ b/arch/mips/kernel/ftrace.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Code for replacing ftrace calls with jumps.
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ *
+ * Thanks goes to Steven Rostedt for writing the original x86 version.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/ftrace.h>
+#include <linux/syscalls.h>
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/cacheflush.h>
+#include <asm/syscall.h>
+#include <asm/uasm.h>
+#include <asm/unistd.h>
+
+#include <asm-generic/sections.h>
+
+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+#define MCOUNT_OFFSET_INSNS 5
+#else
+#define MCOUNT_OFFSET_INSNS 4
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+/* Arch override because MIPS doesn't need to run this from stop_machine() */
+void arch_ftrace_update_code(int command)
+{
+ ftrace_modify_all_code(command);
+}
+
+#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
+#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
+#define JUMP_RANGE_MASK ((1UL << 28) - 1)
+
+#define INSN_NOP 0x00000000 /* nop */
+#define INSN_JAL(addr) \
+ ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
+
+static unsigned int insn_jal_ftrace_caller __read_mostly;
+static unsigned int insn_la_mcount[2] __read_mostly;
+static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
+
+static inline void ftrace_dyn_arch_init_insns(void)
+{
+ u32 *buf;
+ unsigned int v1;
+
+ /* la v1, _mcount */
+ v1 = 3;
+ buf = (u32 *)&insn_la_mcount[0];
+ UASM_i_LA(&buf, v1, MCOUNT_ADDR);
+
+ /* jal (ftrace_caller + 8), jump over the first two instruction */
+ buf = (u32 *)&insn_jal_ftrace_caller;
+ uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* j ftrace_graph_caller */
+ buf = (u32 *)&insn_j_ftrace_graph_caller;
+ uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
+#endif
+}
+
+static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
+{
+ int faulted;
+ mm_segment_t old_fs;
+
+ /* *(unsigned int *)ip = new_code; */
+ safe_store_code(new_code, ip, faulted);
+
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ flush_icache_range(ip, ip + 8);
+ set_fs(old_fs);
+
+ return 0;
+}
+
+#ifndef CONFIG_64BIT
+static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
+ unsigned int new_code2)
+{
+ int faulted;
+ mm_segment_t old_fs;
+
+ safe_store_code(new_code1, ip, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ ip += 4;
+ safe_store_code(new_code2, ip, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ ip -= 4;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ flush_icache_range(ip, ip + 8);
+ set_fs(old_fs);
+
+ return 0;
+}
+
+static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
+ unsigned int new_code2)
+{
+ int faulted;
+ mm_segment_t old_fs;
+
+ ip += 4;
+ safe_store_code(new_code2, ip, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ ip -= 4;
+ safe_store_code(new_code1, ip, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ flush_icache_range(ip, ip + 8);
+ set_fs(old_fs);
+
+ return 0;
+}
+#endif
+
+/*
+ * The details about the calling site of mcount on MIPS
+ *
+ * 1. For kernel:
+ *
+ * move at, ra
+ * jal _mcount --> nop
+ * sub sp, sp, 8 --> nop (CONFIG_32BIT)
+ *
+ * 2. For modules:
+ *
+ * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
+ *
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
+ * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
+ * move at, ra
+ * move $12, ra_address
+ * jalr v1
+ * sub sp, sp, 8
+ * 1: offset = 5 instructions
+ * 2.2 For the Other situations
+ *
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
+ * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
+ * move at, ra
+ * jalr v1
+ * nop | move $12, ra_address | sub sp, sp, 8
+ * 1: offset = 4 instructions
+ */
+
+#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
+
+int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int new;
+ unsigned long ip = rec->ip;
+
+ /*
+ * If ip is in kernel space, no long call, otherwise, long call is
+ * needed.
+ */
+ new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
+#ifdef CONFIG_64BIT
+ return ftrace_modify_code(ip, new);
+#else
+ /*
+ * On 32 bit MIPS platforms, gcc adds a stack adjust
+ * instruction in the delay slot after the branch to
+ * mcount and expects mcount to restore the sp on return.
+ * This is based on a legacy API and does nothing but
+ * waste instructions so it's being removed at runtime.
+ */
+ return ftrace_modify_code_2(ip, new, INSN_NOP);
+#endif
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int new;
+ unsigned long ip = rec->ip;
+
+ new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
+
+#ifdef CONFIG_64BIT
+ return ftrace_modify_code(ip, new);
+#else
+ return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
+ INSN_NOP : insn_la_mcount[1]);
+#endif
+}
+
+#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned int new;
+
+ new = INSN_JAL((unsigned long)func);
+
+ return ftrace_modify_code(FTRACE_CALL_IP, new);
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+ /* Encode the instructions when booting */
+ ftrace_dyn_arch_init_insns();
+
+ /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
+ ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
+
+ return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+extern void ftrace_graph_call(void);
+#define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
+ insn_j_ftrace_graph_caller);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
+}
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifndef KBUILD_MCOUNT_RA_ADDRESS
+
+#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
+#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
+#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
+
+unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
+ old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
+{
+ unsigned long sp, ip, tmp;
+ unsigned int code;
+ int faulted;
+
+ /*
+ * For module, move the ip from the return address after the
+ * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
+ * kernel, move after the instruction "move ra, at"(offset is 16)
+ */
+ ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
+
+ /*
+ * search the text until finding the non-store instruction or "s{d,w}
+ * ra, offset(sp)" instruction
+ */
+ do {
+ /* get the code at "ip": code = *(unsigned int *)ip; */
+ safe_load_code(code, ip, faulted);
+
+ if (unlikely(faulted))
+ return 0;
+ /*
+ * If we hit the non-store instruction before finding where the
+ * ra is stored, then this is a leaf function and it does not
+ * store the ra on the stack
+ */
+ if ((code & S_R_SP) != S_R_SP)
+ return parent_ra_addr;
+
+ /* Move to the next instruction */
+ ip -= 4;
+ } while ((code & S_RA_SP) != S_RA_SP);
+
+ sp = fp + (code & OFFSET_MASK);
+
+ /* tmp = *(unsigned long *)sp; */
+ safe_load_stack(tmp, sp, faulted);
+ if (unlikely(faulted))
+ return 0;
+
+ if (tmp == old_parent_ra)
+ return sp;
+ return 0;
+}
+
+#endif /* !KBUILD_MCOUNT_RA_ADDRESS */
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
+ unsigned long fp)
+{
+ unsigned long old_parent_ra;
+ unsigned long return_hooker = (unsigned long)
+ &return_to_handler;
+ int faulted, insns;
+
+ if (unlikely(ftrace_graph_is_dead()))
+ return;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * "parent_ra_addr" is the stack address where the return address of
+ * the caller of _mcount is saved.
+ *
+ * If gcc < 4.5, a leaf function does not save the return address
+ * in the stack address, so we "emulate" one in _mcount's stack space,
+ * and hijack it directly.
+ * For a non-leaf function, it does save the return address to its own
+ * stack space, so we can not hijack it directly, but need to find the
+ * real stack address, which is done by ftrace_get_parent_addr().
+ *
+ * If gcc >= 4.5, with the new -mmcount-ra-address option, for a
+ * non-leaf function, the location of the return address will be saved
+ * to $12 for us.
+ * For a leaf function, it just puts a zero into $12, so we handle
+ * it in ftrace_graph_caller() of mcount.S.
+ */
+
+ /* old_parent_ra = *parent_ra_addr; */
+ safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
+ if (unlikely(faulted))
+ goto out;
+#ifndef KBUILD_MCOUNT_RA_ADDRESS
+ parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
+ old_parent_ra, (unsigned long)parent_ra_addr, fp);
+ /*
+ * If fails when getting the stack address of the non-leaf function's
+ * ra, stop function graph tracer and return
+ */
+ if (parent_ra_addr == NULL)
+ goto out;
+#endif
+ /* *parent_ra_addr = return_hooker; */
+ safe_store_stack(return_hooker, parent_ra_addr, faulted);
+ if (unlikely(faulted))
+ goto out;
+
+ /*
+ * Get the recorded ip of the current mcount calling site in the
+ * __mcount_loc section, which will be used to filter the function
+ * entries configured through the tracing/set_graph_function interface.
+ */
+
+ insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
+ self_ra -= (MCOUNT_INSN_SIZE * insns);
+
+ if (function_graph_enter(old_parent_ra, self_ra, fp, NULL))
+ *parent_ra_addr = old_parent_ra;
+ return;
+out:
+ ftrace_graph_stop();
+ WARN_ON(1);
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+
+#ifdef CONFIG_32BIT
+unsigned long __init arch_syscall_addr(int nr)
+{
+ return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
+}
+#endif
+
+#ifdef CONFIG_64BIT
+
+unsigned long __init arch_syscall_addr(int nr)
+{
+#ifdef CONFIG_MIPS32_N32
+ if (nr >= __NR_N32_Linux && nr < __NR_N32_Linux + __NR_N32_Linux_syscalls)
+ return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
+#endif
+ if (nr >= __NR_64_Linux && nr < __NR_64_Linux + __NR_64_Linux_syscalls)
+ return (unsigned long)sys_call_table[nr - __NR_64_Linux];
+#ifdef CONFIG_MIPS32_O32
+ if (nr >= __NR_O32_Linux && nr < __NR_O32_Linux + __NR_O32_Linux_syscalls)
+ return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
+#endif
+
+ return (unsigned long) &sys_ni_syscall;
+}
+#endif
+
+#endif /* CONFIG_FTRACE_SYSCALLS */
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
new file mode 100644
index 000000000..bcce32a3d
--- /dev/null
+++ b/arch/mips/kernel/genex.S
@@ -0,0 +1,682 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2002, 2007 Maciej W. Rozycki
+ * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/init.h>
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheops.h>
+#include <asm/irqflags.h>
+#include <asm/regdef.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/sync.h>
+#include <asm/war.h>
+#include <asm/thread_info.h>
+
+ __INIT
+
+/*
+ * General exception vector for all other CPUs.
+ *
+ * Be careful when changing this, it has to be at most 128 bytes
+ * to fit into space reserved for the exception handler.
+ */
+NESTED(except_vec3_generic, 0, sp)
+ .set push
+ .set noat
+ mfc0 k1, CP0_CAUSE
+ andi k1, k1, 0x7c
+#ifdef CONFIG_64BIT
+ dsll k1, k1, 1
+#endif
+ PTR_L k0, exception_handlers(k1)
+ jr k0
+ .set pop
+ END(except_vec3_generic)
+
+/*
+ * General exception handler for CPUs with virtual coherency exception.
+ *
+ * Be careful when changing this, it has to be at most 256 (as a special
+ * exception) bytes to fit into space reserved for the exception handler.
+ */
+NESTED(except_vec3_r4000, 0, sp)
+ .set push
+ .set arch=r4000
+ .set noat
+ mfc0 k1, CP0_CAUSE
+ li k0, 31<<2
+ andi k1, k1, 0x7c
+ .set push
+ .set noreorder
+ .set nomacro
+ beq k1, k0, handle_vced
+ li k0, 14<<2
+ beq k1, k0, handle_vcei
+#ifdef CONFIG_64BIT
+ dsll k1, k1, 1
+#endif
+ .set pop
+ PTR_L k0, exception_handlers(k1)
+ jr k0
+
+ /*
+ * Big shit, we now may have two dirty primary cache lines for the same
+ * physical address. We can safely invalidate the line pointed to by
+ * c0_badvaddr because after return from this exception handler the
+ * load / store will be re-executed.
+ */
+handle_vced:
+ MFC0 k0, CP0_BADVADDR
+ li k1, -4 # Is this ...
+ and k0, k1 # ... really needed?
+ mtc0 zero, CP0_TAGLO
+ cache Index_Store_Tag_D, (k0)
+ cache Hit_Writeback_Inv_SD, (k0)
+#ifdef CONFIG_PROC_FS
+ PTR_LA k0, vced_count
+ lw k1, (k0)
+ addiu k1, 1
+ sw k1, (k0)
+#endif
+ eret
+
+handle_vcei:
+ MFC0 k0, CP0_BADVADDR
+ cache Hit_Writeback_Inv_SD, (k0) # also cleans pi
+#ifdef CONFIG_PROC_FS
+ PTR_LA k0, vcei_count
+ lw k1, (k0)
+ addiu k1, 1
+ sw k1, (k0)
+#endif
+ eret
+ .set pop
+ END(except_vec3_r4000)
+
+ __FINIT
+
+ .align 5 /* 32 byte rollback region */
+LEAF(__r4k_wait)
+ .set push
+ .set noreorder
+ /* start of rollback region */
+ LONG_L t0, TI_FLAGS($28)
+ nop
+ andi t0, _TIF_NEED_RESCHED
+ bnez t0, 1f
+ nop
+ nop
+ nop
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+ nop
+ nop
+ nop
+#endif
+ .set MIPS_ISA_ARCH_LEVEL_RAW
+ wait
+ /* end of rollback region (the region size must be power of two) */
+1:
+ jr ra
+ nop
+ .set pop
+ END(__r4k_wait)
+
+ .macro BUILD_ROLLBACK_PROLOGUE handler
+ FEXPORT(rollback_\handler)
+ .set push
+ .set noat
+ MFC0 k0, CP0_EPC
+ PTR_LA k1, __r4k_wait
+ ori k0, 0x1f /* 32 byte rollback region */
+ xori k0, 0x1f
+ bne k0, k1, \handler
+ MTC0 k0, CP0_EPC
+ .set pop
+ .endm
+
+ .align 5
+BUILD_ROLLBACK_PROLOGUE handle_int
+NESTED(handle_int, PT_SIZE, sp)
+ .cfi_signal_frame
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /*
+ * Check to see if the interrupted code has just disabled
+ * interrupts and ignore this interrupt for now if so.
+ *
+ * local_irq_disable() disables interrupts and then calls
+ * trace_hardirqs_off() to track the state. If an interrupt is taken
+ * after interrupts are disabled but before the state is updated
+ * it will appear to restore_all that it is incorrectly returning with
+ * interrupts disabled
+ */
+ .set push
+ .set noat
+ mfc0 k0, CP0_STATUS
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ and k0, ST0_IEP
+ bnez k0, 1f
+
+ mfc0 k0, CP0_EPC
+ .set noreorder
+ j k0
+ rfe
+#else
+ and k0, ST0_IE
+ bnez k0, 1f
+
+ eret
+#endif
+1:
+ .set pop
+#endif
+ SAVE_ALL docfi=1
+ CLI
+ TRACE_IRQS_OFF
+
+ LONG_L s0, TI_REGS($28)
+ LONG_S sp, TI_REGS($28)
+
+ /*
+ * SAVE_ALL ensures we are using a valid kernel stack for the thread.
+ * Check if we are already using the IRQ stack.
+ */
+ move s1, sp # Preserve the sp
+
+ /* Get IRQ stack for this CPU */
+ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
+ lui k1, %hi(irq_stack)
+#else
+ lui k1, %highest(irq_stack)
+ daddiu k1, %higher(irq_stack)
+ dsll k1, 16
+ daddiu k1, %hi(irq_stack)
+ dsll k1, 16
+#endif
+ LONG_SRL k0, SMP_CPUID_PTRSHIFT
+ LONG_ADDU k1, k0
+ LONG_L t0, %lo(irq_stack)(k1)
+
+ # Check if already on IRQ stack
+ PTR_LI t1, ~(_THREAD_SIZE-1)
+ and t1, t1, sp
+ beq t0, t1, 2f
+
+ /* Switch to IRQ stack */
+ li t1, _IRQ_STACK_START
+ PTR_ADD sp, t0, t1
+
+ /* Save task's sp on IRQ stack so that unwinding can follow it */
+ LONG_S s1, 0(sp)
+2:
+ jal plat_irq_dispatch
+
+ /* Restore sp */
+ move sp, s1
+
+ j ret_from_irq
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+#endif
+ END(handle_int)
+
+ __INIT
+
+/*
+ * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
+ * This is a dedicated interrupt exception vector which reduces the
+ * interrupt processing overhead. The jump instruction will be replaced
+ * at the initialization time.
+ *
+ * Be careful when changing this, it has to be at most 128 bytes
+ * to fit into space reserved for the exception handler.
+ */
+NESTED(except_vec4, 0, sp)
+1: j 1b /* Dummy, will be replaced */
+ END(except_vec4)
+
+/*
+ * EJTAG debug exception handler.
+ * The EJTAG debug exception entry point is 0xbfc00480, which
+ * normally is in the boot PROM, so the boot PROM must do an
+ * unconditional jump to this vector.
+ */
+NESTED(except_vec_ejtag_debug, 0, sp)
+ j ejtag_debug_handler
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+#endif
+ END(except_vec_ejtag_debug)
+
+ __FINIT
+
+/*
+ * Vectored interrupt handler.
+ * This prototype is copied to ebase + n*IntCtl.VS and patched
+ * to invoke the handler
+ */
+BUILD_ROLLBACK_PROLOGUE except_vec_vi
+NESTED(except_vec_vi, 0, sp)
+ SAVE_SOME docfi=1
+ SAVE_AT docfi=1
+ .set push
+ .set noreorder
+ PTR_LA v1, except_vec_vi_handler
+FEXPORT(except_vec_vi_lui)
+ lui v0, 0 /* Patched */
+ jr v1
+FEXPORT(except_vec_vi_ori)
+ ori v0, 0 /* Patched */
+ .set pop
+ END(except_vec_vi)
+EXPORT(except_vec_vi_end)
+
+/*
+ * Common Vectored Interrupt code
+ * Complete the register saves and invoke the handler which is passed in $v0
+ */
+NESTED(except_vec_vi_handler, 0, sp)
+ SAVE_TEMP
+ SAVE_STATIC
+ CLI
+#ifdef CONFIG_TRACE_IRQFLAGS
+ move s0, v0
+ TRACE_IRQS_OFF
+ move v0, s0
+#endif
+
+ LONG_L s0, TI_REGS($28)
+ LONG_S sp, TI_REGS($28)
+
+ /*
+ * SAVE_ALL ensures we are using a valid kernel stack for the thread.
+ * Check if we are already using the IRQ stack.
+ */
+ move s1, sp # Preserve the sp
+
+ /* Get IRQ stack for this CPU */
+ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
+ lui k1, %hi(irq_stack)
+#else
+ lui k1, %highest(irq_stack)
+ daddiu k1, %higher(irq_stack)
+ dsll k1, 16
+ daddiu k1, %hi(irq_stack)
+ dsll k1, 16
+#endif
+ LONG_SRL k0, SMP_CPUID_PTRSHIFT
+ LONG_ADDU k1, k0
+ LONG_L t0, %lo(irq_stack)(k1)
+
+ # Check if already on IRQ stack
+ PTR_LI t1, ~(_THREAD_SIZE-1)
+ and t1, t1, sp
+ beq t0, t1, 2f
+
+ /* Switch to IRQ stack */
+ li t1, _IRQ_STACK_START
+ PTR_ADD sp, t0, t1
+
+ /* Save task's sp on IRQ stack so that unwinding can follow it */
+ LONG_S s1, 0(sp)
+2:
+ jalr v0
+
+ /* Restore sp */
+ move sp, s1
+
+ j ret_from_irq
+ END(except_vec_vi_handler)
+
+/*
+ * EJTAG debug exception handler.
+ */
+NESTED(ejtag_debug_handler, PT_SIZE, sp)
+ .set push
+ .set noat
+ MTC0 k0, CP0_DESAVE
+ mfc0 k0, CP0_DEBUG
+
+ sll k0, k0, 30 # Check for SDBBP.
+ bgez k0, ejtag_return
+
+#ifdef CONFIG_SMP
+1: PTR_LA k0, ejtag_debug_buffer_spinlock
+ __SYNC(full, loongson3_war)
+2: ll k0, 0(k0)
+ bnez k0, 2b
+ PTR_LA k0, ejtag_debug_buffer_spinlock
+ sc k0, 0(k0)
+ beqz k0, 1b
+# ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
+ sync
+# endif
+
+ PTR_LA k0, ejtag_debug_buffer
+ LONG_S k1, 0(k0)
+
+ ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
+ PTR_SRL k1, SMP_CPUID_PTRSHIFT
+ PTR_SLL k1, LONGLOG
+ PTR_LA k0, ejtag_debug_buffer_per_cpu
+ PTR_ADDU k0, k1
+
+ PTR_LA k1, ejtag_debug_buffer
+ LONG_L k1, 0(k1)
+ LONG_S k1, 0(k0)
+
+ PTR_LA k0, ejtag_debug_buffer_spinlock
+ sw zero, 0(k0)
+#else
+ PTR_LA k0, ejtag_debug_buffer
+ LONG_S k1, 0(k0)
+#endif
+
+ SAVE_ALL
+ move a0, sp
+ jal ejtag_exception_handler
+ RESTORE_ALL
+
+#ifdef CONFIG_SMP
+ ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
+ PTR_SRL k1, SMP_CPUID_PTRSHIFT
+ PTR_SLL k1, LONGLOG
+ PTR_LA k0, ejtag_debug_buffer_per_cpu
+ PTR_ADDU k0, k1
+ LONG_L k1, 0(k0)
+#else
+ PTR_LA k0, ejtag_debug_buffer
+ LONG_L k1, 0(k0)
+#endif
+
+ejtag_return:
+ back_to_back_c0_hazard
+ MFC0 k0, CP0_DESAVE
+ .set mips32
+ deret
+ .set pop
+ END(ejtag_debug_handler)
+
+/*
+ * This buffer is reserved for the use of the EJTAG debug
+ * handler.
+ */
+ .data
+EXPORT(ejtag_debug_buffer)
+ .fill LONGSIZE
+#ifdef CONFIG_SMP
+EXPORT(ejtag_debug_buffer_spinlock)
+ .fill LONGSIZE
+EXPORT(ejtag_debug_buffer_per_cpu)
+ .fill LONGSIZE * NR_CPUS
+#endif
+ .previous
+
+ __INIT
+
+/*
+ * NMI debug exception handler for MIPS reference boards.
+ * The NMI debug exception entry point is 0xbfc00000, which
+ * normally is in the boot PROM, so the boot PROM must do a
+ * unconditional jump to this vector.
+ */
+NESTED(except_vec_nmi, 0, sp)
+ j nmi_handler
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+#endif
+ END(except_vec_nmi)
+
+ __FINIT
+
+NESTED(nmi_handler, PT_SIZE, sp)
+ .cfi_signal_frame
+ .set push
+ .set noat
+ /*
+ * Clear ERL - restore segment mapping
+ * Clear BEV - required for page fault exception handler to work
+ */
+ mfc0 k0, CP0_STATUS
+ ori k0, k0, ST0_EXL
+ li k1, ~(ST0_BEV | ST0_ERL)
+ and k0, k0, k1
+ mtc0 k0, CP0_STATUS
+ _ehb
+ SAVE_ALL
+ move a0, sp
+ jal nmi_exception_handler
+ /* nmi_exception_handler never returns */
+ .set pop
+ END(nmi_handler)
+
+ .macro __build_clear_none
+ .endm
+
+ .macro __build_clear_sti
+ TRACE_IRQS_ON
+ STI
+ .endm
+
+ .macro __build_clear_cli
+ CLI
+ TRACE_IRQS_OFF
+ .endm
+
+ .macro __build_clear_fpe
+ CLI
+ TRACE_IRQS_OFF
+ .set push
+ /* gas fails to assemble cfc1 for some archs (octeon).*/ \
+ .set mips1
+ SET_HARDFLOAT
+ cfc1 a1, fcr31
+ .set pop
+ .endm
+
+ .macro __build_clear_msa_fpe
+ CLI
+ TRACE_IRQS_OFF
+ _cfcmsa a1, MSA_CSR
+ .endm
+
+ .macro __build_clear_ade
+ MFC0 t0, CP0_BADVADDR
+ PTR_S t0, PT_BVADDR(sp)
+ KMODE
+ .endm
+
+ .macro __build_clear_gsexc
+ .set push
+ /*
+ * We need to specify a selector to access the CP0.Diag1 (GSCause)
+ * register. All GSExc-equipped processors have MIPS32.
+ */
+ .set mips32
+ mfc0 a1, CP0_DIAGNOSTIC1
+ .set pop
+ TRACE_IRQS_ON
+ STI
+ .endm
+
+ .macro __BUILD_silent exception
+ .endm
+
+ /* Gas tries to parse the ASM_PRINT argument as a string containing
+ string escapes and emits bogus warnings if it believes to
+ recognize an unknown escape code. So make the arguments
+ start with an n and gas will believe \n is ok ... */
+ .macro __BUILD_verbose nexception
+ LONG_L a1, PT_EPC(sp)
+#ifdef CONFIG_32BIT
+ ASM_PRINT("Got \nexception at %08lx\012")
+#endif
+#ifdef CONFIG_64BIT
+ ASM_PRINT("Got \nexception at %016lx\012")
+#endif
+ .endm
+
+ .macro __BUILD_count exception
+ LONG_L t0,exception_count_\exception
+ LONG_ADDIU t0, 1
+ LONG_S t0,exception_count_\exception
+ .comm exception_count\exception, 8, 8
+ .endm
+
+ .macro __BUILD_HANDLER exception handler clear verbose ext
+ .align 5
+ NESTED(handle_\exception, PT_SIZE, sp)
+ .cfi_signal_frame
+ .set noat
+ SAVE_ALL
+ FEXPORT(handle_\exception\ext)
+ __build_clear_\clear
+ .set at
+ __BUILD_\verbose \exception
+ move a0, sp
+ jal do_\handler
+ j ret_from_exception
+ END(handle_\exception)
+ .endm
+
+ .macro BUILD_HANDLER exception handler clear verbose
+ __BUILD_HANDLER \exception \handler \clear \verbose _int
+ .endm
+
+ BUILD_HANDLER adel ade ade silent /* #4 */
+ BUILD_HANDLER ades ade ade silent /* #5 */
+ BUILD_HANDLER ibe be cli silent /* #6 */
+ BUILD_HANDLER dbe be cli silent /* #7 */
+ BUILD_HANDLER bp bp sti silent /* #9 */
+ BUILD_HANDLER ri ri sti silent /* #10 */
+ BUILD_HANDLER cpu cpu sti silent /* #11 */
+ BUILD_HANDLER ov ov sti silent /* #12 */
+ BUILD_HANDLER tr tr sti silent /* #13 */
+ BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ BUILD_HANDLER fpe fpe fpe silent /* #15 */
+#endif
+ BUILD_HANDLER ftlb ftlb none silent /* #16 */
+ BUILD_HANDLER gsexc gsexc gsexc silent /* #16 */
+ BUILD_HANDLER msa msa sti silent /* #21 */
+ BUILD_HANDLER mdmx mdmx sti silent /* #22 */
+#ifdef CONFIG_HARDWARE_WATCHPOINTS
+ /*
+ * For watch, interrupts will be enabled after the watch
+ * registers are read.
+ */
+ BUILD_HANDLER watch watch cli silent /* #23 */
+#else
+ BUILD_HANDLER watch watch sti verbose /* #23 */
+#endif
+ BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
+ BUILD_HANDLER mt mt sti silent /* #25 */
+ BUILD_HANDLER dsp dsp sti silent /* #26 */
+ BUILD_HANDLER reserved reserved sti verbose /* others */
+
+ .align 5
+ LEAF(handle_ri_rdhwr_tlbp)
+ .set push
+ .set noat
+ .set noreorder
+ /* check if TLB contains a entry for EPC */
+ MFC0 k1, CP0_ENTRYHI
+ andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX
+ MFC0 k0, CP0_EPC
+ PTR_SRL k0, _PAGE_SHIFT + 1
+ PTR_SLL k0, _PAGE_SHIFT + 1
+ or k1, k0
+ MTC0 k1, CP0_ENTRYHI
+ mtc0_tlbw_hazard
+ tlbp
+ tlb_probe_hazard
+ mfc0 k1, CP0_INDEX
+ .set pop
+ bltz k1, handle_ri /* slow path */
+ /* fall thru */
+ END(handle_ri_rdhwr_tlbp)
+
+ LEAF(handle_ri_rdhwr)
+ .set push
+ .set noat
+ .set noreorder
+ /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
+ /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
+ MFC0 k1, CP0_EPC
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
+ and k0, k1, 1
+ beqz k0, 1f
+ xor k1, k0
+ lhu k0, (k1)
+ lhu k1, 2(k1)
+ ins k1, k0, 16, 16
+ lui k0, 0x007d
+ b docheck
+ ori k0, 0x6b3c
+1:
+ lui k0, 0x7c03
+ lw k1, (k1)
+ ori k0, 0xe83b
+#else
+ andi k0, k1, 1
+ bnez k0, handle_ri
+ lui k0, 0x7c03
+ lw k1, (k1)
+ ori k0, 0xe83b
+#endif
+ .set reorder
+docheck:
+ bne k0, k1, handle_ri /* if not ours */
+
+isrdhwr:
+ /* The insn is rdhwr. No need to check CAUSE.BD here. */
+ get_saved_sp /* k1 := current_thread_info */
+ .set noreorder
+ MFC0 k0, CP0_EPC
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ ori k1, _THREAD_MASK
+ xori k1, _THREAD_MASK
+ LONG_L v1, TI_TP_VALUE(k1)
+ LONG_ADDIU k0, 4
+ jr k0
+ rfe
+#else
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+ LONG_ADDIU k0, 4 /* stall on $k0 */
+#else
+ .set at=v1
+ LONG_ADDIU k0, 4
+ .set noat
+#endif
+ MTC0 k0, CP0_EPC
+ /* I hope three instructions between MTC0 and ERET are enough... */
+ ori k1, _THREAD_MASK
+ xori k1, _THREAD_MASK
+ LONG_L v1, TI_TP_VALUE(k1)
+ .set push
+ .set arch=r4000
+ eret
+ .set pop
+#endif
+ .set pop
+ END(handle_ri_rdhwr)
+
+#ifdef CONFIG_CPU_R4X00_BUGS64
+/* A temporary overflow handler used by check_daddi(). */
+
+ __INIT
+
+ BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */
+#endif
diff --git a/arch/mips/kernel/gpio_txx9.c b/arch/mips/kernel/gpio_txx9.c
new file mode 100644
index 000000000..8c083612d
--- /dev/null
+++ b/arch/mips/kernel/gpio_txx9.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * A gpio chip driver for TXx9 SoCs
+ *
+ * Copyright (C) 2008 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ */
+
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/gpio/driver.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <asm/txx9pio.h>
+
+static DEFINE_SPINLOCK(txx9_gpio_lock);
+
+static struct txx9_pio_reg __iomem *txx9_pioptr;
+
+static int txx9_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ return !!(__raw_readl(&txx9_pioptr->din) & (1 << offset));
+}
+
+static void txx9_gpio_set_raw(unsigned int offset, int value)
+{
+ u32 val;
+ val = __raw_readl(&txx9_pioptr->dout);
+ if (value)
+ val |= 1 << offset;
+ else
+ val &= ~(1 << offset);
+ __raw_writel(val, &txx9_pioptr->dout);
+}
+
+static void txx9_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&txx9_gpio_lock, flags);
+ txx9_gpio_set_raw(offset, value);
+ mmiowb();
+ spin_unlock_irqrestore(&txx9_gpio_lock, flags);
+}
+
+static int txx9_gpio_dir_in(struct gpio_chip *chip, unsigned int offset)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&txx9_gpio_lock, flags);
+ __raw_writel(__raw_readl(&txx9_pioptr->dir) & ~(1 << offset),
+ &txx9_pioptr->dir);
+ mmiowb();
+ spin_unlock_irqrestore(&txx9_gpio_lock, flags);
+ return 0;
+}
+
+static int txx9_gpio_dir_out(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&txx9_gpio_lock, flags);
+ txx9_gpio_set_raw(offset, value);
+ __raw_writel(__raw_readl(&txx9_pioptr->dir) | (1 << offset),
+ &txx9_pioptr->dir);
+ mmiowb();
+ spin_unlock_irqrestore(&txx9_gpio_lock, flags);
+ return 0;
+}
+
+static struct gpio_chip txx9_gpio_chip = {
+ .get = txx9_gpio_get,
+ .set = txx9_gpio_set,
+ .direction_input = txx9_gpio_dir_in,
+ .direction_output = txx9_gpio_dir_out,
+ .label = "TXx9",
+};
+
+int __init txx9_gpio_init(unsigned long baseaddr,
+ unsigned int base, unsigned int num)
+{
+ txx9_pioptr = ioremap(baseaddr, sizeof(struct txx9_pio_reg));
+ if (!txx9_pioptr)
+ return -ENODEV;
+ txx9_gpio_chip.base = base;
+ txx9_gpio_chip.ngpio = num;
+ return gpiochip_add_data(&txx9_gpio_chip, NULL);
+}
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
new file mode 100644
index 000000000..61b73580b
--- /dev/null
+++ b/arch/mips/kernel/head.S
@@ -0,0 +1,185 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995 Waldorf Electronics
+ * Written by Ralf Baechle and Andreas Busse
+ * Copyright (C) 1994 - 99, 2003, 06 Ralf Baechle
+ * Copyright (C) 1996 Paul M. Antoine
+ * Modified for DECStation and hence R3000 support by Paul M. Antoine
+ * Further modifications by David S. Miller and Harald Koerfgen
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/threads.h>
+
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/irqflags.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+
+#include <kernel-entry-init.h>
+
+ /*
+ * For the moment disable interrupts, mark the kernel mode and
+ * set ST0_KX so that the CPU does not spit fire when using
+ * 64-bit addresses. A full initialization of the CPU's status
+ * register is done later in per_cpu_trap_init().
+ */
+ .macro setup_c0_status set clr
+ .set push
+ mfc0 t0, CP0_STATUS
+ or t0, ST0_KERNEL_CUMASK|\set|0x1f|\clr
+ xor t0, 0x1f|\clr
+ mtc0 t0, CP0_STATUS
+ .set noreorder
+ sll zero,3 # ehb
+ .set pop
+ .endm
+
+ .macro setup_c0_status_pri
+#ifdef CONFIG_64BIT
+ setup_c0_status ST0_KX 0
+#else
+ setup_c0_status 0 0
+#endif
+ .endm
+
+ .macro setup_c0_status_sec
+#ifdef CONFIG_64BIT
+ setup_c0_status ST0_KX ST0_BEV
+#else
+ setup_c0_status 0 ST0_BEV
+#endif
+ .endm
+
+#ifndef CONFIG_NO_EXCEPT_FILL
+ /*
+ * Reserved space for exception handlers.
+ * Necessary for machines which link their kernels at KSEG0.
+ */
+ .fill 0x400
+#endif
+
+EXPORT(_stext)
+
+#ifdef CONFIG_BOOT_RAW
+ /*
+ * Give us a fighting chance of running if execution beings at the
+ * kernel load address. This is needed because this platform does
+ * not have a ELF loader yet.
+ */
+FEXPORT(__kernel_entry)
+ j kernel_entry
+#endif /* CONFIG_BOOT_RAW */
+
+ __REF
+
+NESTED(kernel_entry, 16, sp) # kernel entry point
+
+ kernel_entry_setup # cpu specific setup
+
+ setup_c0_status_pri
+
+ /* We might not get launched at the address the kernel is linked to,
+ so we jump there. */
+ PTR_LA t0, 0f
+ jr t0
+0:
+
+#ifdef CONFIG_USE_OF
+#if defined(CONFIG_MIPS_RAW_APPENDED_DTB) || \
+ defined(CONFIG_MIPS_ELF_APPENDED_DTB)
+
+ PTR_LA t2, __appended_dtb
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ li t1, 0xd00dfeed
+#else /* !CONFIG_CPU_BIG_ENDIAN */
+ li t1, 0xedfe0dd0
+#endif /* !CONFIG_CPU_BIG_ENDIAN */
+ lw t0, (t2)
+ beq t0, t1, dtb_found
+#endif /* CONFIG_MIPS_RAW_APPENDED_DTB || CONFIG_MIPS_ELF_APPENDED_DTB */
+ li t1, -2
+ move t2, a1
+ beq a0, t1, dtb_found
+
+#ifdef CONFIG_BUILTIN_DTB
+ PTR_LA t2, __dtb_start
+ PTR_LA t1, __dtb_end
+ bne t1, t2, dtb_found
+#endif /* CONFIG_BUILTIN_DTB */
+
+ li t2, 0
+dtb_found:
+#endif /* CONFIG_USE_OF */
+ PTR_LA t0, __bss_start # clear .bss
+ LONG_S zero, (t0)
+ PTR_LA t1, __bss_stop - LONGSIZE
+1:
+ PTR_ADDIU t0, LONGSIZE
+ LONG_S zero, (t0)
+ bne t0, t1, 1b
+
+ LONG_S a0, fw_arg0 # firmware arguments
+ LONG_S a1, fw_arg1
+ LONG_S a2, fw_arg2
+ LONG_S a3, fw_arg3
+
+#ifdef CONFIG_USE_OF
+ LONG_S t2, fw_passed_dtb
+#endif
+
+ MTC0 zero, CP0_CONTEXT # clear context register
+#ifdef CONFIG_64BIT
+ MTC0 zero, CP0_XCONTEXT
+#endif
+ PTR_LA $28, init_thread_union
+ /* Set the SP after an empty pt_regs. */
+ PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE
+ PTR_ADDU sp, $28
+ back_to_back_c0_hazard
+ set_saved_sp sp, t0, t1
+ PTR_SUBU sp, 4 * SZREG # init stack pointer
+
+#ifdef CONFIG_RELOCATABLE
+ /* Copy kernel and apply the relocations */
+ jal relocate_kernel
+
+ /* Repoint the sp into the new kernel image */
+ PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE
+ PTR_ADDU sp, $28
+ set_saved_sp sp, t0, t1
+ PTR_SUBU sp, 4 * SZREG # init stack pointer
+
+ /*
+ * relocate_kernel returns the entry point either
+ * in the relocated kernel or the original if for
+ * some reason relocation failed - jump there now
+ * with instruction hazard barrier because of the
+ * newly sync'd icache.
+ */
+ jr.hb v0
+#else /* !CONFIG_RELOCATABLE */
+ j start_kernel
+#endif /* !CONFIG_RELOCATABLE */
+ END(kernel_entry)
+
+#ifdef CONFIG_SMP
+/*
+ * SMP slave cpus entry point. Board specific code for bootstrap calls this
+ * function after setting up the stack and gp registers.
+ */
+NESTED(smp_bootstrap, 16, sp)
+ smp_slave_setup
+ setup_c0_status_sec
+ j start_secondary
+ END(smp_bootstrap)
+#endif /* CONFIG_SMP */
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
new file mode 100644
index 000000000..ca21210e0
--- /dev/null
+++ b/arch/mips/kernel/i8253.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * i8253.c 8253/PIT functions
+ *
+ */
+#include <linux/clockchips.h>
+#include <linux/i8253.h>
+#include <linux/export.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/time.h>
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ i8253_clockevent.event_handler(&i8253_clockevent);
+
+ return IRQ_HANDLED;
+}
+
+void __init setup_pit_timer(void)
+{
+ unsigned long flags = IRQF_NOBALANCING | IRQF_TIMER;
+
+ clockevent_i8253_init(true);
+ if (request_irq(0, timer_interrupt, flags, "timer", NULL))
+ pr_err("Failed to request irq 0 (timer)\n");
+}
+
+static int __init init_pit_clocksource(void)
+{
+ if (num_possible_cpus() > 1 || /* PIT does not scale! */
+ !clockevent_state_periodic(&i8253_clockevent))
+ return 0;
+
+ return clocksource_i8253_init();
+}
+arch_initcall(init_pit_clocksource);
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
new file mode 100644
index 000000000..18e69ebf5
--- /dev/null
+++ b/arch/mips/kernel/idle.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MIPS idle loop and WAIT instruction support.
+ *
+ * Copyright (C) xxxx the Anonymous
+ * Copyright (C) 1994 - 2006 Ralf Baechle
+ * Copyright (C) 2003, 2004 Maciej W. Rozycki
+ * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
+ */
+#include <linux/cpu.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/cpu-type.h>
+#include <asm/idle.h>
+#include <asm/mipsregs.h>
+
+/*
+ * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
+ * the implementation of the "wait" feature differs between CPU families. This
+ * points to the function that implements CPU specific wait.
+ * The wait instruction stops the pipeline and reduces the power consumption of
+ * the CPU very much.
+ */
+void (*cpu_wait)(void);
+EXPORT_SYMBOL(cpu_wait);
+
+static void __cpuidle r3081_wait(void)
+{
+ unsigned long cfg = read_c0_conf();
+ write_c0_conf(cfg | R30XX_CONF_HALT);
+ raw_local_irq_enable();
+}
+
+static void __cpuidle r39xx_wait(void)
+{
+ if (!need_resched())
+ write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
+ raw_local_irq_enable();
+}
+
+void __cpuidle r4k_wait(void)
+{
+ raw_local_irq_enable();
+ __r4k_wait();
+}
+
+/*
+ * This variant is preferable as it allows testing need_resched and going to
+ * sleep depending on the outcome atomically. Unfortunately the "It is
+ * implementation-dependent whether the pipeline restarts when a non-enabled
+ * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
+ * using this version a gamble.
+ */
+void __cpuidle r4k_wait_irqoff(void)
+{
+ if (!need_resched())
+ __asm__(
+ " .set push \n"
+ " .set arch=r4000 \n"
+ " wait \n"
+ " .set pop \n");
+ raw_local_irq_enable();
+}
+
+/*
+ * The RM7000 variant has to handle erratum 38. The workaround is to not
+ * have any pending stores when the WAIT instruction is executed.
+ */
+static void __cpuidle rm7k_wait_irqoff(void)
+{
+ if (!need_resched())
+ __asm__(
+ " .set push \n"
+ " .set arch=r4000 \n"
+ " .set noat \n"
+ " mfc0 $1, $12 \n"
+ " sync \n"
+ " mtc0 $1, $12 # stalls until W stage \n"
+ " wait \n"
+ " mtc0 $1, $12 # stalls until W stage \n"
+ " .set pop \n");
+ raw_local_irq_enable();
+}
+
+/*
+ * Au1 'wait' is only useful when the 32kHz counter is used as timer,
+ * since coreclock (and the cp0 counter) stops upon executing it. Only an
+ * interrupt can wake it, so they must be enabled before entering idle modes.
+ */
+static void __cpuidle au1k_wait(void)
+{
+ unsigned long c0status = read_c0_status() | 1; /* irqs on */
+
+ __asm__(
+ " .set push \n"
+ " .set arch=r4000 \n"
+ " cache 0x14, 0(%0) \n"
+ " cache 0x14, 32(%0) \n"
+ " sync \n"
+ " mtc0 %1, $12 \n" /* wr c0status */
+ " wait \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ " .set pop \n"
+ : : "r" (au1k_wait), "r" (c0status));
+}
+
+static int __initdata nowait;
+
+static int __init wait_disable(char *s)
+{
+ nowait = 1;
+
+ return 1;
+}
+
+__setup("nowait", wait_disable);
+
+void __init check_wait(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ if (nowait) {
+ printk("Wait instruction disabled.\n");
+ return;
+ }
+
+ /*
+ * MIPSr6 specifies that masked interrupts should unblock an executing
+ * wait instruction, and thus that it is safe for us to use
+ * r4k_wait_irqoff. Yippee!
+ */
+ if (cpu_has_mips_r6) {
+ cpu_wait = r4k_wait_irqoff;
+ return;
+ }
+
+ switch (current_cpu_type()) {
+ case CPU_R3081:
+ case CPU_R3081E:
+ cpu_wait = r3081_wait;
+ break;
+ case CPU_TX3927:
+ cpu_wait = r39xx_wait;
+ break;
+ case CPU_R4200:
+ case CPU_R4600:
+ case CPU_R4640:
+ case CPU_R4650:
+ case CPU_R4700:
+ case CPU_R5000:
+ case CPU_R5500:
+ case CPU_NEVADA:
+ case CPU_4KC:
+ case CPU_4KEC:
+ case CPU_4KSC:
+ case CPU_5KC:
+ case CPU_5KE:
+ case CPU_25KF:
+ case CPU_PR4450:
+ case CPU_BMIPS3300:
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ case CPU_CAVIUM_OCTEON3:
+ case CPU_XBURST:
+ case CPU_LOONGSON32:
+ case CPU_XLR:
+ case CPU_XLP:
+ cpu_wait = r4k_wait;
+ break;
+ case CPU_LOONGSON64:
+ if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
+ (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
+ (c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
+ cpu_wait = r4k_wait;
+ break;
+
+ case CPU_BMIPS5000:
+ cpu_wait = r4k_wait_irqoff;
+ break;
+ case CPU_RM7000:
+ cpu_wait = rm7k_wait_irqoff;
+ break;
+
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ /*
+ * Incoming Fast Debug Channel (FDC) data during a wait
+ * instruction causes the wait never to resume, even if an
+ * interrupt is received. Avoid using wait at all if FDC data is
+ * likely to be received.
+ */
+ if (IS_ENABLED(CONFIG_MIPS_EJTAG_FDC_TTY))
+ break;
+ fallthrough;
+ case CPU_M14KC:
+ case CPU_M14KEC:
+ case CPU_24K:
+ case CPU_34K:
+ case CPU_1004K:
+ case CPU_1074K:
+ case CPU_INTERAPTIV:
+ case CPU_M5150:
+ case CPU_QEMU_GENERIC:
+ cpu_wait = r4k_wait;
+ if (read_c0_config7() & MIPS_CONF7_WII)
+ cpu_wait = r4k_wait_irqoff;
+ break;
+
+ case CPU_74K:
+ cpu_wait = r4k_wait;
+ if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
+ cpu_wait = r4k_wait_irqoff;
+ break;
+
+ case CPU_TX49XX:
+ cpu_wait = r4k_wait_irqoff;
+ break;
+ case CPU_ALCHEMY:
+ cpu_wait = au1k_wait;
+ break;
+ case CPU_20KC:
+ /*
+ * WAIT on Rev1.0 has E1, E2, E3 and E16.
+ * WAIT on Rev2.0 and Rev3.0 has E16.
+ * Rev3.1 WAIT is nop, why bother
+ */
+ if ((c->processor_id & 0xff) <= 0x64)
+ break;
+
+ /*
+ * Another rev is incremeting c0_count at a reduced clock
+ * rate while in WAIT mode. So we basically have the choice
+ * between using the cp0 timer as clocksource or avoiding
+ * the WAIT instruction. Until more details are known,
+ * disable the use of WAIT for 20Kc entirely.
+ cpu_wait = r4k_wait;
+ */
+ break;
+ default:
+ break;
+ }
+}
+
+void arch_cpu_idle(void)
+{
+ if (cpu_wait)
+ cpu_wait();
+ else
+ raw_local_irq_enable();
+}
+
+#ifdef CONFIG_CPU_IDLE
+
+int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ arch_cpu_idle();
+ return index;
+}
+
+#endif
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
new file mode 100644
index 000000000..93bcf5736
--- /dev/null
+++ b/arch/mips/kernel/irq-gt641xx.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * GT641xx IRQ routines.
+ *
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/hardirq.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/gt64120.h>
+
+#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE))
+
+static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock);
+
+static void ack_gt641xx_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ u32 cause;
+
+ raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ cause = GT_READ(GT_INTRCAUSE_OFS);
+ cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
+ GT_WRITE(GT_INTRCAUSE_OFS, cause);
+ raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static void mask_gt641xx_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ u32 mask;
+
+ raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
+ GT_WRITE(GT_INTRMASK_OFS, mask);
+ raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static void mask_ack_gt641xx_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ u32 cause, mask;
+
+ raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
+ GT_WRITE(GT_INTRMASK_OFS, mask);
+
+ cause = GT_READ(GT_INTRCAUSE_OFS);
+ cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
+ GT_WRITE(GT_INTRCAUSE_OFS, cause);
+ raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static void unmask_gt641xx_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ u32 mask;
+
+ raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ mask |= GT641XX_IRQ_TO_BIT(d->irq);
+ GT_WRITE(GT_INTRMASK_OFS, mask);
+ raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static struct irq_chip gt641xx_irq_chip = {
+ .name = "GT641xx",
+ .irq_ack = ack_gt641xx_irq,
+ .irq_mask = mask_gt641xx_irq,
+ .irq_mask_ack = mask_ack_gt641xx_irq,
+ .irq_unmask = unmask_gt641xx_irq,
+};
+
+void gt641xx_irq_dispatch(void)
+{
+ u32 cause, mask;
+ int i;
+
+ cause = GT_READ(GT_INTRCAUSE_OFS);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ cause &= mask;
+
+ /*
+ * bit0 : logical or of all the interrupt bits.
+ * bit30: logical or of bits[29:26,20:1].
+ * bit31: logical or of bits[25:1].
+ */
+ for (i = 1; i < 30; i++) {
+ if (cause & (1U << i)) {
+ do_IRQ(GT641XX_IRQ_BASE + i);
+ return;
+ }
+ }
+
+ atomic_inc(&irq_err_count);
+}
+
+void __init gt641xx_irq_init(void)
+{
+ int i;
+
+ GT_WRITE(GT_INTRMASK_OFS, 0);
+ GT_WRITE(GT_INTRCAUSE_OFS, 0);
+
+ /*
+ * bit0 : logical or of all the interrupt bits.
+ * bit30: logical or of bits[29:26,20:1].
+ * bit31: logical or of bits[25:1].
+ */
+ for (i = 1; i < 30; i++)
+ irq_set_chip_and_handler(GT641XX_IRQ_BASE + i,
+ &gt641xx_irq_chip, handle_level_irq);
+}
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
new file mode 100644
index 000000000..ab511b64a
--- /dev/null
+++ b/arch/mips/kernel/irq-msc01.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *
+ * Copyright (c) 2004 MIPS Inc
+ * Author: chris@mips.com
+ *
+ * Copyright (C) 2004, 06 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/msc01_ic.h>
+#include <asm/traps.h>
+
+static unsigned long _icctrl_msc;
+#define MSC01_IC_REG_BASE _icctrl_msc
+
+#define MSCIC_WRITE(reg, data) do { *(volatile u32 *)(reg) = data; } while (0)
+#define MSCIC_READ(reg, data) do { data = *(volatile u32 *)(reg); } while (0)
+
+static unsigned int irq_base;
+
+/* mask off an interrupt */
+static inline void mask_msc_irq(struct irq_data *d)
+{
+ unsigned int irq = d->irq;
+
+ if (irq < (irq_base + 32))
+ MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base));
+ else
+ MSCIC_WRITE(MSC01_IC_DISH, 1<<(irq - irq_base - 32));
+}
+
+/* unmask an interrupt */
+static inline void unmask_msc_irq(struct irq_data *d)
+{
+ unsigned int irq = d->irq;
+
+ if (irq < (irq_base + 32))
+ MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base));
+ else
+ MSCIC_WRITE(MSC01_IC_ENAH, 1<<(irq - irq_base - 32));
+}
+
+/*
+ * Masks and ACKs an IRQ
+ */
+static void level_mask_and_ack_msc_irq(struct irq_data *d)
+{
+ mask_msc_irq(d);
+ if (!cpu_has_veic)
+ MSCIC_WRITE(MSC01_IC_EOI, 0);
+}
+
+/*
+ * Masks and ACKs an IRQ
+ */
+static void edge_mask_and_ack_msc_irq(struct irq_data *d)
+{
+ unsigned int irq = d->irq;
+
+ mask_msc_irq(d);
+ if (!cpu_has_veic)
+ MSCIC_WRITE(MSC01_IC_EOI, 0);
+ else {
+ u32 r;
+ MSCIC_READ(MSC01_IC_SUP+irq*8, r);
+ MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
+ MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
+ }
+}
+
+/*
+ * Interrupt handler for interrupts coming from SOC-it.
+ */
+void ll_msc_irq(void)
+{
+ unsigned int irq;
+
+ /* read the interrupt vector register */
+ MSCIC_READ(MSC01_IC_VEC, irq);
+ if (irq < 64)
+ do_IRQ(irq + irq_base);
+ else {
+ /* Ignore spurious interrupt */
+ }
+}
+
+static void msc_bind_eic_interrupt(int irq, int set)
+{
+ MSCIC_WRITE(MSC01_IC_RAMW,
+ (irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
+}
+
+static struct irq_chip msc_levelirq_type = {
+ .name = "SOC-it-Level",
+ .irq_ack = level_mask_and_ack_msc_irq,
+ .irq_mask = mask_msc_irq,
+ .irq_mask_ack = level_mask_and_ack_msc_irq,
+ .irq_unmask = unmask_msc_irq,
+ .irq_eoi = unmask_msc_irq,
+};
+
+static struct irq_chip msc_edgeirq_type = {
+ .name = "SOC-it-Edge",
+ .irq_ack = edge_mask_and_ack_msc_irq,
+ .irq_mask = mask_msc_irq,
+ .irq_mask_ack = edge_mask_and_ack_msc_irq,
+ .irq_unmask = unmask_msc_irq,
+ .irq_eoi = unmask_msc_irq,
+};
+
+
+void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqmap_t *imp, int nirq)
+{
+ _icctrl_msc = (unsigned long) ioremap(icubase, 0x40000);
+
+ /* Reset interrupt controller - initialises all registers to 0 */
+ MSCIC_WRITE(MSC01_IC_RST, MSC01_IC_RST_RST_BIT);
+
+ board_bind_eic_interrupt = &msc_bind_eic_interrupt;
+
+ for (; nirq > 0; nirq--, imp++) {
+ int n = imp->im_irq;
+
+ switch (imp->im_type) {
+ case MSC01_IRQ_EDGE:
+ irq_set_chip_and_handler_name(irqbase + n,
+ &msc_edgeirq_type,
+ handle_edge_irq,
+ "edge");
+ if (cpu_has_veic)
+ MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
+ else
+ MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
+ break;
+ case MSC01_IRQ_LEVEL:
+ irq_set_chip_and_handler_name(irqbase + n,
+ &msc_levelirq_type,
+ handle_level_irq,
+ "level");
+ if (cpu_has_veic)
+ MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
+ else
+ MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl);
+ }
+ }
+
+ irq_base = irqbase;
+
+ MSCIC_WRITE(MSC01_IC_GENA, MSC01_IC_GENA_GENA_BIT); /* Enable interrupt generation */
+
+}
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
new file mode 100644
index 000000000..e1a497f63
--- /dev/null
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2003 Ralf Baechle
+ *
+ * Handler for RM7000 extended interrupts. These are a non-standard
+ * feature so we handle them separately from standard interrupts.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+
+static inline void unmask_rm7k_irq(struct irq_data *d)
+{
+ set_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
+}
+
+static inline void mask_rm7k_irq(struct irq_data *d)
+{
+ clear_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
+}
+
+static struct irq_chip rm7k_irq_controller = {
+ .name = "RM7000",
+ .irq_ack = mask_rm7k_irq,
+ .irq_mask = mask_rm7k_irq,
+ .irq_mask_ack = mask_rm7k_irq,
+ .irq_unmask = unmask_rm7k_irq,
+ .irq_eoi = unmask_rm7k_irq
+};
+
+void __init rm7k_cpu_irq_init(void)
+{
+ int base = RM7K_CPU_IRQ_BASE;
+ int i;
+
+ clear_c0_intcontrol(0x00000f00); /* Mask all */
+
+ for (i = base; i < base + 4; i++)
+ irq_set_chip_and_handler(i, &rm7k_irq_controller,
+ handle_percpu_irq);
+}
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
new file mode 100644
index 000000000..85b6c60f2
--- /dev/null
+++ b/arch/mips/kernel/irq.c
@@ -0,0 +1,109 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Code to handle x86 style IRQs plus some generic interrupt stuff.
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ */
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/kallsyms.h>
+#include <linux/kgdb.h>
+#include <linux/ftrace.h>
+
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+
+void *irq_stack[NR_CPUS];
+
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+ */
+void ack_bad_irq(unsigned int irq)
+{
+ printk("unexpected IRQ # %d\n", irq);
+}
+
+atomic_t irq_err_count;
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
+ return 0;
+}
+
+asmlinkage void spurious_interrupt(void)
+{
+ atomic_inc(&irq_err_count);
+}
+
+void __init init_IRQ(void)
+{
+ int i;
+ unsigned int order = get_order(IRQ_STACK_SIZE);
+
+ for (i = 0; i < NR_IRQS; i++)
+ irq_set_noprobe(i);
+
+ if (cpu_has_veic)
+ clear_c0_status(ST0_IM);
+
+ arch_init_irq();
+
+ for_each_possible_cpu(i) {
+ void *s = (void *)__get_free_pages(GFP_KERNEL, order);
+
+ irq_stack[i] = s;
+ pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
+ irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
+ }
+}
+
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+static inline void check_stack_overflow(void)
+{
+ unsigned long sp;
+
+ __asm__ __volatile__("move %0, $sp" : "=r" (sp));
+ sp &= THREAD_MASK;
+
+ /*
+ * Check for stack overflow: is there less than STACK_WARN free?
+ * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
+ */
+ if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
+ printk("do_IRQ: stack overflow: %ld\n",
+ sp - sizeof(struct thread_info));
+ dump_stack();
+ }
+}
+#else
+static inline void check_stack_overflow(void) {}
+#endif
+
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+void __irq_entry do_IRQ(unsigned int irq)
+{
+ irq_enter();
+ check_stack_overflow();
+ generic_handle_irq(irq);
+ irq_exit();
+}
+
diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c
new file mode 100644
index 000000000..ab00e4904
--- /dev/null
+++ b/arch/mips/kernel/irq_txx9.c
@@ -0,0 +1,191 @@
+/*
+ * Based on linux/arch/mips/jmr3927/rbhma3100/irq.c,
+ * linux/arch/mips/tx4927/common/tx4927_irq.c,
+ * linux/arch/mips/tx4938/common/irq.c
+ *
+ * Copyright 2001, 2003-2005 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ahennessy@mvista.com
+ * source@mvista.com
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/irq.h>
+#include <asm/txx9irq.h>
+
+struct txx9_irc_reg {
+ u32 cer;
+ u32 cr[2];
+ u32 unused0;
+ u32 ilr[8];
+ u32 unused1[4];
+ u32 imr;
+ u32 unused2[7];
+ u32 scr;
+ u32 unused3[7];
+ u32 ssr;
+ u32 unused4[7];
+ u32 csr;
+};
+
+/* IRCER : Int. Control Enable */
+#define TXx9_IRCER_ICE 0x00000001
+
+/* IRCR : Int. Control */
+#define TXx9_IRCR_LOW 0x00000000
+#define TXx9_IRCR_HIGH 0x00000001
+#define TXx9_IRCR_DOWN 0x00000002
+#define TXx9_IRCR_UP 0x00000003
+#define TXx9_IRCR_EDGE(cr) ((cr) & 0x00000002)
+
+/* IRSCR : Int. Status Control */
+#define TXx9_IRSCR_EIClrE 0x00000100
+#define TXx9_IRSCR_EIClr_MASK 0x0000000f
+
+/* IRCSR : Int. Current Status */
+#define TXx9_IRCSR_IF 0x00010000
+#define TXx9_IRCSR_ILV_MASK 0x00000700
+#define TXx9_IRCSR_IVL_MASK 0x0000001f
+
+#define irc_dlevel 0
+#define irc_elevel 1
+
+static struct txx9_irc_reg __iomem *txx9_ircptr __read_mostly;
+
+static struct {
+ unsigned char level;
+ unsigned char mode;
+} txx9irq[TXx9_MAX_IR] __read_mostly;
+
+static void txx9_irq_unmask(struct irq_data *d)
+{
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
+ u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2];
+ int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
+
+ __raw_writel((__raw_readl(ilrp) & ~(0xff << ofs))
+ | (txx9irq[irq_nr].level << ofs),
+ ilrp);
+#ifdef CONFIG_CPU_TX39XX
+ /* update IRCSR */
+ __raw_writel(0, &txx9_ircptr->imr);
+ __raw_writel(irc_elevel, &txx9_ircptr->imr);
+#endif
+}
+
+static inline void txx9_irq_mask(struct irq_data *d)
+{
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
+ u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2];
+ int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
+
+ __raw_writel((__raw_readl(ilrp) & ~(0xff << ofs))
+ | (irc_dlevel << ofs),
+ ilrp);
+#ifdef CONFIG_CPU_TX39XX
+ /* update IRCSR */
+ __raw_writel(0, &txx9_ircptr->imr);
+ __raw_writel(irc_elevel, &txx9_ircptr->imr);
+ /* flush write buffer */
+ __raw_readl(&txx9_ircptr->ssr);
+#else
+ mmiowb();
+#endif
+}
+
+static void txx9_irq_mask_ack(struct irq_data *d)
+{
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
+
+ txx9_irq_mask(d);
+ /* clear edge detection */
+ if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode)))
+ __raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr);
+}
+
+static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
+ u32 cr;
+ u32 __iomem *crp;
+ int ofs;
+ int mode;
+
+ if (flow_type & IRQF_TRIGGER_PROBE)
+ return 0;
+ switch (flow_type & IRQF_TRIGGER_MASK) {
+ case IRQF_TRIGGER_RISING: mode = TXx9_IRCR_UP; break;
+ case IRQF_TRIGGER_FALLING: mode = TXx9_IRCR_DOWN; break;
+ case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break;
+ case IRQF_TRIGGER_LOW: mode = TXx9_IRCR_LOW; break;
+ default:
+ return -EINVAL;
+ }
+ crp = &txx9_ircptr->cr[(unsigned int)irq_nr / 8];
+ cr = __raw_readl(crp);
+ ofs = (irq_nr & (8 - 1)) * 2;
+ cr &= ~(0x3 << ofs);
+ cr |= (mode & 0x3) << ofs;
+ __raw_writel(cr, crp);
+ txx9irq[irq_nr].mode = mode;
+ return 0;
+}
+
+static struct irq_chip txx9_irq_chip = {
+ .name = "TXX9",
+ .irq_ack = txx9_irq_mask_ack,
+ .irq_mask = txx9_irq_mask,
+ .irq_mask_ack = txx9_irq_mask_ack,
+ .irq_unmask = txx9_irq_unmask,
+ .irq_set_type = txx9_irq_set_type,
+};
+
+void __init txx9_irq_init(unsigned long baseaddr)
+{
+ int i;
+
+ txx9_ircptr = ioremap(baseaddr, sizeof(struct txx9_irc_reg));
+ for (i = 0; i < TXx9_MAX_IR; i++) {
+ txx9irq[i].level = 4; /* middle level */
+ txx9irq[i].mode = TXx9_IRCR_LOW;
+ irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &txx9_irq_chip,
+ handle_level_irq);
+ }
+
+ /* mask all IRC interrupts */
+ __raw_writel(0, &txx9_ircptr->imr);
+ for (i = 0; i < 8; i++)
+ __raw_writel(0, &txx9_ircptr->ilr[i]);
+ /* setup IRC interrupt mode (Low Active) */
+ for (i = 0; i < 2; i++)
+ __raw_writel(0, &txx9_ircptr->cr[i]);
+ /* enable interrupt control */
+ __raw_writel(TXx9_IRCER_ICE, &txx9_ircptr->cer);
+ __raw_writel(irc_elevel, &txx9_ircptr->imr);
+}
+
+int __init txx9_irq_set_pri(int irc_irq, int new_pri)
+{
+ int old_pri;
+
+ if ((unsigned int)irc_irq >= TXx9_MAX_IR)
+ return 0;
+ old_pri = txx9irq[irc_irq].level;
+ txx9irq[irc_irq].level = new_pri;
+ return old_pri;
+}
+
+int txx9_irq(void)
+{
+ u32 csr = __raw_readl(&txx9_ircptr->csr);
+
+ if (likely(!(csr & TXx9_IRCSR_IF)))
+ return TXX9_IRQ_BASE + (csr & (TXx9_MAX_IR - 1));
+ return -1;
+}
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
new file mode 100644
index 000000000..9f5b1247b
--- /dev/null
+++ b/arch/mips/kernel/jump_label.c
@@ -0,0 +1,90 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2010 Cavium Networks, Inc.
+ */
+
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/cpu.h>
+
+#include <asm/cacheflush.h>
+#include <asm/inst.h>
+
+/*
+ * Define parameters for the standard MIPS and the microMIPS jump
+ * instruction encoding respectively:
+ *
+ * - the ISA bit of the target, either 0 or 1 respectively,
+ *
+ * - the amount the jump target address is shifted right to fit in the
+ * immediate field of the machine instruction, either 2 or 1,
+ *
+ * - the mask determining the size of the jump region relative to the
+ * delay-slot instruction, either 256MB or 128MB,
+ *
+ * - the jump target alignment, either 4 or 2 bytes.
+ */
+#define J_ISA_BIT IS_ENABLED(CONFIG_CPU_MICROMIPS)
+#define J_RANGE_SHIFT (2 - J_ISA_BIT)
+#define J_RANGE_MASK ((1ul << (26 + J_RANGE_SHIFT)) - 1)
+#define J_ALIGN_MASK ((1ul << J_RANGE_SHIFT) - 1)
+
+void arch_jump_label_transform(struct jump_entry *e,
+ enum jump_label_type type)
+{
+ union mips_instruction *insn_p;
+ union mips_instruction insn;
+ long offset;
+
+ insn_p = (union mips_instruction *)msk_isa16_mode(e->code);
+
+ /* Target must have the right alignment and ISA must be preserved. */
+ BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT);
+
+ if (type == JUMP_LABEL_JMP) {
+ if (!IS_ENABLED(CONFIG_CPU_MICROMIPS) && MIPS_ISA_REV >= 6) {
+ offset = e->target - ((unsigned long)insn_p + 4);
+ offset >>= 2;
+
+ /*
+ * The branch offset must fit in the instruction's 26
+ * bit field.
+ */
+ WARN_ON((offset >= (long)BIT(25)) ||
+ (offset < -(long)BIT(25)));
+
+ insn.j_format.opcode = bc6_op;
+ insn.j_format.target = offset;
+ } else {
+ /*
+ * Jump only works within an aligned region its delay
+ * slot is in.
+ */
+ WARN_ON((e->target & ~J_RANGE_MASK) !=
+ ((e->code + 4) & ~J_RANGE_MASK));
+
+ insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
+ insn.j_format.target = e->target >> J_RANGE_SHIFT;
+ }
+ } else {
+ insn.word = 0; /* nop */
+ }
+
+ mutex_lock(&text_mutex);
+ if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
+ insn_p->halfword[0] = insn.word >> 16;
+ insn_p->halfword[1] = insn.word;
+ } else
+ *insn_p = insn;
+
+ flush_icache_range((unsigned long)insn_p,
+ (unsigned long)insn_p + sizeof(*insn_p));
+
+ mutex_unlock(&text_mutex);
+}
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
new file mode 100644
index 000000000..ea781b29f
--- /dev/null
+++ b/arch/mips/kernel/kgdb.c
@@ -0,0 +1,415 @@
+/*
+ * Originally written by Glenn Engel, Lake Stevens Instrument Division
+ *
+ * Contributed by HP Systems
+ *
+ * Modified for Linux/MIPS (and MIPS in general) by Andreas Busse
+ * Send complaints, suggestions etc. to <andy@waldorf-gmbh.de>
+ *
+ * Copyright (C) 1995 Andreas Busse
+ *
+ * Copyright (C) 2003 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * Copyright (C) 2004-2005 MontaVista Software Inc.
+ * Author: Manish Lachwani, mlachwani@mvista.com or manish@koffee-break.com
+ *
+ * Copyright (C) 2007-2008 Wind River Systems, Inc.
+ * Author/Maintainer: Jason Wessel, jason.wessel@windriver.com
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/ptrace.h> /* for linux pt_regs struct */
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <asm/inst.h>
+#include <asm/fpu.h>
+#include <asm/cacheflush.h>
+#include <asm/processor.h>
+#include <asm/sigcontext.h>
+#include <linux/uaccess.h>
+#include <asm/irq_regs.h>
+
+static struct hard_trap_info {
+ unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
+ unsigned char signo; /* Signal that we map this trap into */
+} hard_trap_info[] = {
+ { 6, SIGBUS }, /* instruction bus error */
+ { 7, SIGBUS }, /* data bus error */
+ { 9, SIGTRAP }, /* break */
+/* { 11, SIGILL }, */ /* CPU unusable */
+ { 12, SIGFPE }, /* overflow */
+ { 13, SIGTRAP }, /* trap */
+ { 14, SIGSEGV }, /* virtual instruction cache coherency */
+ { 15, SIGFPE }, /* floating point exception */
+ { 23, SIGSEGV }, /* watch */
+ { 31, SIGSEGV }, /* virtual data cache coherency */
+ { 0, 0} /* Must be last */
+};
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
+{
+ { "zero", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
+ { "at", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
+ { "v0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
+ { "v1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
+ { "a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
+ { "a1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
+ { "a2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
+ { "a3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
+ { "t0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
+ { "t1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
+ { "t2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
+ { "t3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
+ { "t4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
+ { "t5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
+ { "t6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
+ { "t7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
+ { "s0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
+ { "s1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
+ { "s2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
+ { "s3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
+ { "s4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
+ { "s5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
+ { "s6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
+ { "s7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
+ { "t8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
+ { "t9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
+ { "k0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
+ { "k1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
+ { "gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
+ { "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
+ { "s8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
+ { "ra", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
+ { "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_status) },
+ { "lo", GDB_SIZEOF_REG, offsetof(struct pt_regs, lo) },
+ { "hi", GDB_SIZEOF_REG, offsetof(struct pt_regs, hi) },
+ { "bad", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_badvaddr) },
+ { "cause", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_cause) },
+ { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_epc) },
+ { "f0", GDB_SIZEOF_REG, 0 },
+ { "f1", GDB_SIZEOF_REG, 1 },
+ { "f2", GDB_SIZEOF_REG, 2 },
+ { "f3", GDB_SIZEOF_REG, 3 },
+ { "f4", GDB_SIZEOF_REG, 4 },
+ { "f5", GDB_SIZEOF_REG, 5 },
+ { "f6", GDB_SIZEOF_REG, 6 },
+ { "f7", GDB_SIZEOF_REG, 7 },
+ { "f8", GDB_SIZEOF_REG, 8 },
+ { "f9", GDB_SIZEOF_REG, 9 },
+ { "f10", GDB_SIZEOF_REG, 10 },
+ { "f11", GDB_SIZEOF_REG, 11 },
+ { "f12", GDB_SIZEOF_REG, 12 },
+ { "f13", GDB_SIZEOF_REG, 13 },
+ { "f14", GDB_SIZEOF_REG, 14 },
+ { "f15", GDB_SIZEOF_REG, 15 },
+ { "f16", GDB_SIZEOF_REG, 16 },
+ { "f17", GDB_SIZEOF_REG, 17 },
+ { "f18", GDB_SIZEOF_REG, 18 },
+ { "f19", GDB_SIZEOF_REG, 19 },
+ { "f20", GDB_SIZEOF_REG, 20 },
+ { "f21", GDB_SIZEOF_REG, 21 },
+ { "f22", GDB_SIZEOF_REG, 22 },
+ { "f23", GDB_SIZEOF_REG, 23 },
+ { "f24", GDB_SIZEOF_REG, 24 },
+ { "f25", GDB_SIZEOF_REG, 25 },
+ { "f26", GDB_SIZEOF_REG, 26 },
+ { "f27", GDB_SIZEOF_REG, 27 },
+ { "f28", GDB_SIZEOF_REG, 28 },
+ { "f29", GDB_SIZEOF_REG, 29 },
+ { "f30", GDB_SIZEOF_REG, 30 },
+ { "f31", GDB_SIZEOF_REG, 31 },
+ { "fsr", GDB_SIZEOF_REG, 0 },
+ { "fir", GDB_SIZEOF_REG, 0 },
+};
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int fp_reg;
+
+ if (regno < 0 || regno >= DBG_MAX_REG_NUM)
+ return -EINVAL;
+
+ if (dbg_reg_def[regno].offset != -1 && regno < 38) {
+ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+ dbg_reg_def[regno].size);
+ } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
+ /* FP registers 38 -> 69 */
+ if (!(regs->cp0_status & ST0_CU1))
+ return 0;
+ if (regno == 70) {
+ /* Process the fcr31/fsr (register 70) */
+ memcpy((void *)&current->thread.fpu.fcr31, mem,
+ dbg_reg_def[regno].size);
+ goto out_save;
+ } else if (regno == 71) {
+ /* Ignore the fir (register 71) */
+ goto out_save;
+ }
+ fp_reg = dbg_reg_def[regno].offset;
+ memcpy((void *)&current->thread.fpu.fpr[fp_reg], mem,
+ dbg_reg_def[regno].size);
+out_save:
+ restore_fp(current);
+ }
+
+ return 0;
+}
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int fp_reg;
+
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ if (dbg_reg_def[regno].offset != -1 && regno < 38) {
+ /* First 38 registers */
+ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+ dbg_reg_def[regno].size);
+ } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
+ /* FP registers 38 -> 69 */
+ if (!(regs->cp0_status & ST0_CU1))
+ goto out;
+ save_fp(current);
+ if (regno == 70) {
+ /* Process the fcr31/fsr (register 70) */
+ memcpy(mem, (void *)&current->thread.fpu.fcr31,
+ dbg_reg_def[regno].size);
+ goto out;
+ } else if (regno == 71) {
+ /* Ignore the fir (register 71) */
+ memset(mem, 0, dbg_reg_def[regno].size);
+ goto out;
+ }
+ fp_reg = dbg_reg_def[regno].offset;
+ memcpy(mem, (void *)&current->thread.fpu.fpr[fp_reg],
+ dbg_reg_def[regno].size);
+ }
+
+out:
+ return dbg_reg_def[regno].name;
+
+}
+
+void arch_kgdb_breakpoint(void)
+{
+ __asm__ __volatile__(
+ ".globl breakinst\n\t"
+ ".set\tnoreorder\n\t"
+ "nop\n"
+ "breakinst:\tbreak\n\t"
+ "nop\n\t"
+ ".set\treorder");
+}
+
+void kgdb_call_nmi_hook(void *ignored)
+{
+ mm_segment_t old_fs;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
+
+ set_fs(old_fs);
+}
+
+static int compute_signal(int tt)
+{
+ struct hard_trap_info *ht;
+
+ for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
+ if (ht->tt == tt)
+ return ht->signo;
+
+ return SIGHUP; /* default for things we don't know about */
+}
+
+/*
+ * Similar to regs_to_gdb_regs() except that process is sleeping and so
+ * we may not be able to get all the info.
+ */
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+ int reg;
+#if (KGDB_GDB_REG_SIZE == 32)
+ u32 *ptr = (u32 *)gdb_regs;
+#else
+ u64 *ptr = (u64 *)gdb_regs;
+#endif
+
+ for (reg = 0; reg < 16; reg++)
+ *(ptr++) = 0;
+
+ /* S0 - S7 */
+ *(ptr++) = p->thread.reg16;
+ *(ptr++) = p->thread.reg17;
+ *(ptr++) = p->thread.reg18;
+ *(ptr++) = p->thread.reg19;
+ *(ptr++) = p->thread.reg20;
+ *(ptr++) = p->thread.reg21;
+ *(ptr++) = p->thread.reg22;
+ *(ptr++) = p->thread.reg23;
+
+ for (reg = 24; reg < 28; reg++)
+ *(ptr++) = 0;
+
+ /* GP, SP, FP, RA */
+ *(ptr++) = (long)p;
+ *(ptr++) = p->thread.reg29;
+ *(ptr++) = p->thread.reg30;
+ *(ptr++) = p->thread.reg31;
+
+ *(ptr++) = p->thread.cp0_status;
+
+ /* lo, hi */
+ *(ptr++) = 0;
+ *(ptr++) = 0;
+
+ /*
+ * BadVAddr, Cause
+ * Ideally these would come from the last exception frame up the stack
+ * but that requires unwinding, otherwise we can't know much for sure.
+ */
+ *(ptr++) = 0;
+ *(ptr++) = 0;
+
+ /*
+ * PC
+ * use return address (RA), i.e. the moment after return from resume()
+ */
+ *(ptr++) = p->thread.reg31;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->cp0_epc = pc;
+}
+
+/*
+ * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
+ * then try to fall into the debugger
+ */
+static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
+ void *ptr)
+{
+ struct die_args *args = (struct die_args *)ptr;
+ struct pt_regs *regs = args->regs;
+ int trap = (regs->cp0_cause & 0x7c) >> 2;
+ mm_segment_t old_fs;
+
+#ifdef CONFIG_KPROBES
+ /*
+ * Return immediately if the kprobes fault notifier has set
+ * DIE_PAGE_FAULT.
+ */
+ if (cmd == DIE_PAGE_FAULT)
+ return NOTIFY_DONE;
+#endif /* CONFIG_KPROBES */
+
+ /* Userspace events, ignore. */
+ if (user_mode(regs))
+ return NOTIFY_DONE;
+
+ /* Kernel mode. Set correct address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ if (atomic_read(&kgdb_active) != -1)
+ kgdb_nmicallback(smp_processor_id(), regs);
+
+ if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) {
+ set_fs(old_fs);
+ return NOTIFY_DONE;
+ }
+
+ if (atomic_read(&kgdb_setting_breakpoint))
+ if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
+ regs->cp0_epc += 4;
+
+ /* In SMP mode, __flush_cache_all does IPI */
+ local_irq_enable();
+ __flush_cache_all();
+
+ set_fs(old_fs);
+ return NOTIFY_STOP;
+}
+
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig,
+
+ };
+
+ if (!kgdb_io_module_registered)
+ return NOTIFY_DONE;
+
+ return kgdb_mips_notify(NULL, cmd, &args);
+}
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_mips_notify,
+};
+
+/*
+ * Handle the 'c' command
+ */
+int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ char *remcom_in_buffer, char *remcom_out_buffer,
+ struct pt_regs *regs)
+{
+ char *ptr;
+ unsigned long address;
+
+ switch (remcom_in_buffer[0]) {
+ case 'c':
+ /* handle the optional parameter */
+ ptr = &remcom_in_buffer[1];
+ if (kgdb_hex2long(&ptr, &address))
+ regs->cp0_epc = address;
+
+ return 0;
+ }
+
+ return -1;
+}
+
+const struct kgdb_arch arch_kgdb_ops = {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ .gdb_bpt_instr = { spec_op << 2, 0x00, 0x00, break_op },
+#else
+ .gdb_bpt_instr = { break_op, 0x00, 0x00, spec_op << 2 },
+#endif
+};
+
+int kgdb_arch_init(void)
+{
+ register_die_notifier(&kgdb_notifier);
+
+ return 0;
+}
+
+/*
+ * kgdb_arch_exit - Perform any architecture specific uninitalization.
+ *
+ * This function will handle the uninitalization of any architecture
+ * specific callbacks, for dynamic registration and unregistration.
+ */
+void kgdb_arch_exit(void)
+{
+ unregister_die_notifier(&kgdb_notifier);
+}
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
new file mode 100644
index 000000000..54dfba8fa
--- /dev/null
+++ b/arch/mips/kernel/kprobes.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Kernel Probes (KProbes)
+ * arch/mips/kernel/kprobes.c
+ *
+ * Copyright 2006 Sony Corp.
+ * Copyright 2010 Cavium Networks
+ *
+ * Some portions copied from the powerpc version.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ */
+
+#include <linux/kprobes.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
+#include <linux/kdebug.h>
+#include <linux/slab.h>
+
+#include <asm/ptrace.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+
+#include "probes-common.h"
+
+static const union mips_instruction breakpoint_insn = {
+ .b_format = {
+ .opcode = spec_op,
+ .code = BRK_KPROBE_BP,
+ .func = break_op
+ }
+};
+
+static const union mips_instruction breakpoint2_insn = {
+ .b_format = {
+ .opcode = spec_op,
+ .code = BRK_KPROBE_SSTEPBP,
+ .func = break_op
+ }
+};
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe);
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static int __kprobes insn_has_delayslot(union mips_instruction insn)
+{
+ return __insn_has_delay_slot(insn);
+}
+
+/*
+ * insn_has_ll_or_sc function checks whether instruction is ll or sc
+ * one; putting breakpoint on top of atomic ll/sc pair is bad idea;
+ * so we need to prevent it and refuse kprobes insertion for such
+ * instructions; cannot do much about breakpoint in the middle of
+ * ll/sc pair; it is upto user to avoid those places
+ */
+static int __kprobes insn_has_ll_or_sc(union mips_instruction insn)
+{
+ int ret = 0;
+
+ switch (insn.i_format.opcode) {
+ case ll_op:
+ case lld_op:
+ case sc_op:
+ case scd_op:
+ ret = 1;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ union mips_instruction insn;
+ union mips_instruction prev_insn;
+ int ret = 0;
+
+ insn = p->addr[0];
+
+ if (insn_has_ll_or_sc(insn)) {
+ pr_notice("Kprobes for ll and sc instructions are not"
+ "supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (copy_from_kernel_nofault(&prev_insn, p->addr - 1,
+ sizeof(mips_instruction)) == 0 &&
+ insn_has_delayslot(prev_insn)) {
+ pr_notice("Kprobes for branch delayslot are not supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (__insn_is_compact_branch(insn)) {
+ pr_notice("Kprobes for compact branches are not supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* insn: must be on special executable page on mips. */
+ p->ainsn.insn = get_insn_slot();
+ if (!p->ainsn.insn) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * In the kprobe->ainsn.insn[] array we store the original
+ * instruction at index zero and a break trap instruction at
+ * index one.
+ *
+ * On MIPS arch if the instruction at probed address is a
+ * branch instruction, we need to execute the instruction at
+ * Branch Delayslot (BD) at the time of probe hit. As MIPS also
+ * doesn't have single stepping support, the BD instruction can
+ * not be executed in-line and it would be executed on SSOL slot
+ * using a normal breakpoint instruction in the next slot.
+ * So, read the instruction and save it for later execution.
+ */
+ if (insn_has_delayslot(insn))
+ memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t));
+ else
+ memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
+
+ p->ainsn.insn[1] = breakpoint2_insn;
+ p->opcode = *p->addr;
+
+out:
+ return ret;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = breakpoint_insn;
+ flush_insn_slot(p);
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+ flush_insn_slot(p);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ if (p->ainsn.insn) {
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
+ }
+}
+
+static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+ kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR;
+ kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR;
+ kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc;
+}
+
+static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+ kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
+ kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
+ kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc;
+}
+
+static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, p);
+ kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE);
+ kcb->kprobe_saved_epc = regs->cp0_epc;
+}
+
+/**
+ * evaluate_branch_instrucion -
+ *
+ * Evaluate the branch instruction at probed address during probe hit. The
+ * result of evaluation would be the updated epc. The insturction in delayslot
+ * would actually be single stepped using a normal breakpoint) on SSOL slot.
+ *
+ * The result is also saved in the kprobe control block for later use,
+ * in case we need to execute the delayslot instruction. The latter will be
+ * false for NOP instruction in dealyslot and the branch-likely instructions
+ * when the branch is taken. And for those cases we set a flag as
+ * SKIP_DELAYSLOT in the kprobe control block
+ */
+static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ union mips_instruction insn = p->opcode;
+ long epc;
+ int ret = 0;
+
+ epc = regs->cp0_epc;
+ if (epc & 3)
+ goto unaligned;
+
+ if (p->ainsn.insn->word == 0)
+ kcb->flags |= SKIP_DELAYSLOT;
+ else
+ kcb->flags &= ~SKIP_DELAYSLOT;
+
+ ret = __compute_return_epc_for_insn(regs, insn);
+ if (ret < 0)
+ return ret;
+
+ if (ret == BRANCH_LIKELY_TAKEN)
+ kcb->flags |= SKIP_DELAYSLOT;
+
+ kcb->target_epc = regs->cp0_epc;
+
+ return 0;
+
+unaligned:
+ pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm);
+ force_sig(SIGBUS);
+ return -EFAULT;
+
+}
+
+static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ int ret = 0;
+
+ regs->cp0_status &= ~ST0_IE;
+
+ /* single step inline if the instruction is a break */
+ if (p->opcode.word == breakpoint_insn.word ||
+ p->opcode.word == breakpoint2_insn.word)
+ regs->cp0_epc = (unsigned long)p->addr;
+ else if (insn_has_delayslot(p->opcode)) {
+ ret = evaluate_branch_instruction(p, regs, kcb);
+ if (ret < 0) {
+ pr_notice("Kprobes: Error in evaluating branch\n");
+ return;
+ }
+ }
+ regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
+}
+
+/*
+ * Called after single-stepping. p->addr is the address of the
+ * instruction whose first byte has been replaced by the "break 0"
+ * instruction. To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction. The address of this
+ * copy is p->ainsn.insn.
+ *
+ * This function prepares to return from the post-single-step
+ * breakpoint trap. In case of branch instructions, the target
+ * epc to be restored.
+ */
+static void __kprobes resume_execution(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ if (insn_has_delayslot(p->opcode))
+ regs->cp0_epc = kcb->target_epc;
+ else {
+ unsigned long orig_epc = kcb->kprobe_saved_epc;
+ regs->cp0_epc = orig_epc + 4;
+ }
+}
+
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *p;
+ int ret = 0;
+ kprobe_opcode_t *addr;
+ struct kprobe_ctlblk *kcb;
+
+ addr = (kprobe_opcode_t *) regs->cp0_epc;
+
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing
+ */
+ preempt_disable();
+ kcb = get_kprobe_ctlblk();
+
+ /* Check we're not actually recursing */
+ if (kprobe_running()) {
+ p = get_kprobe(addr);
+ if (p) {
+ if (kcb->kprobe_status == KPROBE_HIT_SS &&
+ p->ainsn.insn->word == breakpoint_insn.word) {
+ regs->cp0_status &= ~ST0_IE;
+ regs->cp0_status |= kcb->kprobe_saved_SR;
+ goto no_kprobe;
+ }
+ /*
+ * We have reentered the kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kprobes_inc_nmissed_count(p);
+ prepare_singlestep(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_REENTER;
+ if (kcb->flags & SKIP_DELAYSLOT) {
+ resume_execution(p, regs, kcb);
+ restore_previous_kprobe(kcb);
+ preempt_enable_no_resched();
+ }
+ return 1;
+ } else if (addr->word != breakpoint_insn.word) {
+ /*
+ * The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ ret = 1;
+ }
+ goto no_kprobe;
+ }
+
+ p = get_kprobe(addr);
+ if (!p) {
+ if (addr->word != breakpoint_insn.word) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ */
+ ret = 1;
+ }
+ /* Not one of ours: let kernel handle it */
+ goto no_kprobe;
+ }
+
+ set_current_kprobe(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ /* handler has already set things up, so skip ss setup */
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ return 1;
+ }
+
+ prepare_singlestep(p, regs, kcb);
+ if (kcb->flags & SKIP_DELAYSLOT) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (p->post_handler)
+ p->post_handler(p, regs, 0);
+ resume_execution(p, regs, kcb);
+ preempt_enable_no_resched();
+ } else
+ kcb->kprobe_status = KPROBE_HIT_SS;
+
+ return 1;
+
+no_kprobe:
+ preempt_enable_no_resched();
+ return ret;
+
+}
+
+static inline int post_kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
+ return 0;
+
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
+ }
+
+ resume_execution(cur, regs, kcb);
+
+ regs->cp0_status |= kcb->kprobe_saved_SR;
+
+ /* Restore back the original saved kprobes variables and continue. */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ goto out;
+ }
+ reset_current_kprobe();
+out:
+ preempt_enable_no_resched();
+
+ return 1;
+}
+
+int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ if (kcb->kprobe_status & KPROBE_HIT_SS) {
+ resume_execution(cur, regs, kcb);
+ regs->cp0_status |= kcb->kprobe_old_SR;
+
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ }
+ return 0;
+}
+
+/*
+ * Wrapper routine for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+
+ struct die_args *args = (struct die_args *)data;
+ int ret = NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_BREAK:
+ if (kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+ case DIE_SSTEPBP:
+ if (post_kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+
+ case DIE_PAGE_FAULT:
+ /* kprobe_running() needs smp_processor_id() */
+ preempt_disable();
+
+ if (kprobe_running()
+ && kprobe_fault_handler(args->regs, args->trapnr))
+ ret = NOTIFY_STOP;
+ preempt_enable();
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function return probe trampoline:
+ * - init_kprobes() establishes a probepoint here
+ * - When the probed function returns, this probe causes the
+ * handlers to fire
+ */
+static void __used kretprobe_trampoline_holder(void)
+{
+ asm volatile(
+ ".set push\n\t"
+ /* Keep the assembler from reordering and placing JR here. */
+ ".set noreorder\n\t"
+ "nop\n\t"
+ ".global kretprobe_trampoline\n"
+ "kretprobe_trampoline:\n\t"
+ "nop\n\t"
+ ".set pop"
+ : : : "memory");
+}
+
+void kretprobe_trampoline(void);
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
+ ri->fp = NULL;
+
+ /* Replace the return addr with trampoline addr */
+ regs->regs[31] = (unsigned long)kretprobe_trampoline;
+}
+
+/*
+ * Called when the probe at kretprobe trampoline is hit
+ */
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs)
+{
+ instruction_pointer(regs) = __kretprobe_trampoline_handler(regs,
+ kretprobe_trampoline, NULL);
+ /*
+ * By returning a non-zero value, we are telling
+ * kprobe_handler() that we don't want the post_handler
+ * to run (and have re-enabled preemption)
+ */
+ return 1;
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
+ return 1;
+
+ return 0;
+}
+
+static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *)kretprobe_trampoline,
+ .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+ return register_kprobe(&trampoline_p);
+}
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
new file mode 100644
index 000000000..6b61be486
--- /dev/null
+++ b/arch/mips/kernel/linux32.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Conversion between 32-bit and 64-bit native system calls.
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Written by Ulf Carlsson (ulfc@engr.sgi.com)
+ */
+#include <linux/compiler.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/highuid.h>
+#include <linux/resource.h>
+#include <linux/highmem.h>
+#include <linux/time.h>
+#include <linux/times.h>
+#include <linux/poll.h>
+#include <linux/skbuff.h>
+#include <linux/filter.h>
+#include <linux/shm.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/icmpv6.h>
+#include <linux/syscalls.h>
+#include <linux/sysctl.h>
+#include <linux/utime.h>
+#include <linux/utsname.h>
+#include <linux/personality.h>
+#include <linux/dnotify.h>
+#include <linux/binfmts.h>
+#include <linux/security.h>
+#include <linux/compat.h>
+#include <linux/vfs.h>
+#include <linux/ipc.h>
+#include <linux/slab.h>
+
+#include <net/sock.h>
+#include <net/scm.h>
+
+#include <asm/compat-signal.h>
+#include <asm/sim.h>
+#include <linux/uaccess.h>
+#include <asm/mmu_context.h>
+#include <asm/mman.h>
+
+#ifdef __MIPSEB__
+#define merge_64(r1, r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL))
+#endif
+#ifdef __MIPSEL__
+#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
+#endif
+
+SYSCALL_DEFINE4(32_truncate64, const char __user *, path,
+ unsigned long, __dummy, unsigned long, a2, unsigned long, a3)
+{
+ return ksys_truncate(path, merge_64(a2, a3));
+}
+
+SYSCALL_DEFINE4(32_ftruncate64, unsigned long, fd, unsigned long, __dummy,
+ unsigned long, a2, unsigned long, a3)
+{
+ return ksys_ftruncate(fd, merge_64(a2, a3));
+}
+
+SYSCALL_DEFINE5(32_llseek, unsigned int, fd, unsigned int, offset_high,
+ unsigned int, offset_low, loff_t __user *, result,
+ unsigned int, origin)
+{
+ return sys_llseek(fd, offset_high, offset_low, result, origin);
+}
+
+/* From the Single Unix Spec: pread & pwrite act like lseek to pos + op +
+ lseek back to original location. They fail just like lseek does on
+ non-seekable files. */
+
+SYSCALL_DEFINE6(32_pread, unsigned long, fd, char __user *, buf, size_t, count,
+ unsigned long, unused, unsigned long, a4, unsigned long, a5)
+{
+ return ksys_pread64(fd, buf, count, merge_64(a4, a5));
+}
+
+SYSCALL_DEFINE6(32_pwrite, unsigned int, fd, const char __user *, buf,
+ size_t, count, u32, unused, u64, a4, u64, a5)
+{
+ return ksys_pwrite64(fd, buf, count, merge_64(a4, a5));
+}
+
+SYSCALL_DEFINE1(32_personality, unsigned long, personality)
+{
+ unsigned int p = personality & 0xffffffff;
+ int ret;
+
+ if (personality(current->personality) == PER_LINUX32 &&
+ personality(p) == PER_LINUX)
+ p = (p & ~PER_MASK) | PER_LINUX32;
+ ret = sys_personality(p);
+ if (ret != -1 && personality(ret) == PER_LINUX32)
+ ret = (ret & ~PER_MASK) | PER_LINUX;
+ return ret;
+}
+
+asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3,
+ size_t count)
+{
+ return ksys_readahead(fd, merge_64(a2, a3), count);
+}
+
+asmlinkage long sys32_sync_file_range(int fd, int __pad,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ int flags)
+{
+ return ksys_sync_file_range(fd,
+ merge_64(a2, a3), merge_64(a4, a5),
+ flags);
+}
+
+asmlinkage long sys32_fadvise64_64(int fd, int __pad,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ int flags)
+{
+ return ksys_fadvise64_64(fd,
+ merge_64(a2, a3), merge_64(a4, a5),
+ flags);
+}
+
+asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2,
+ unsigned offset_a3, unsigned len_a4, unsigned len_a5)
+{
+ return ksys_fallocate(fd, mode, merge_64(offset_a2, offset_a3),
+ merge_64(len_a4, len_a5));
+}
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
new file mode 100644
index 000000000..432bfd3e7
--- /dev/null
+++ b/arch/mips/kernel/machine_kexec.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * machine_kexec.c for kexec
+ * Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006
+ */
+#include <linux/compiler.h>
+#include <linux/kexec.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/libfdt.h>
+
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+
+extern const unsigned char relocate_new_kernel[];
+extern const size_t relocate_new_kernel_size;
+
+extern unsigned long kexec_start_address;
+extern unsigned long kexec_indirection_page;
+
+static unsigned long reboot_code_buffer;
+
+#ifdef CONFIG_SMP
+static void (*relocated_kexec_smp_wait)(void *);
+
+atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
+void (*_crash_smp_send_stop)(void) = NULL;
+#endif
+
+void (*_machine_kexec_shutdown)(void) = NULL;
+void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
+
+static void kexec_image_info(const struct kimage *kimage)
+{
+ unsigned long i;
+
+ pr_debug("kexec kimage info:\n");
+ pr_debug(" type: %d\n", kimage->type);
+ pr_debug(" start: %lx\n", kimage->start);
+ pr_debug(" head: %lx\n", kimage->head);
+ pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
+
+ for (i = 0; i < kimage->nr_segments; i++) {
+ pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
+ i,
+ kimage->segment[i].mem,
+ kimage->segment[i].mem + kimage->segment[i].memsz,
+ (unsigned long)kimage->segment[i].memsz,
+ (unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
+ }
+}
+
+#ifdef CONFIG_UHI_BOOT
+
+static int uhi_machine_kexec_prepare(struct kimage *kimage)
+{
+ int i;
+
+ /*
+ * In case DTB file is not passed to the new kernel, a flat device
+ * tree will be created by kexec tool. It holds modified command
+ * line for the new kernel.
+ */
+ for (i = 0; i < kimage->nr_segments; i++) {
+ struct fdt_header fdt;
+
+ if (kimage->segment[i].memsz <= sizeof(fdt))
+ continue;
+
+ if (copy_from_user(&fdt, kimage->segment[i].buf, sizeof(fdt)))
+ continue;
+
+ if (fdt_check_header(&fdt))
+ continue;
+
+ kexec_args[0] = -2;
+ kexec_args[1] = (unsigned long)
+ phys_to_virt((unsigned long)kimage->segment[i].mem);
+ break;
+ }
+
+ return 0;
+}
+
+int (*_machine_kexec_prepare)(struct kimage *) = uhi_machine_kexec_prepare;
+
+#else
+
+int (*_machine_kexec_prepare)(struct kimage *) = NULL;
+
+#endif /* CONFIG_UHI_BOOT */
+
+int
+machine_kexec_prepare(struct kimage *kimage)
+{
+#ifdef CONFIG_SMP
+ if (!kexec_nonboot_cpu_func())
+ return -EINVAL;
+#endif
+
+ kexec_image_info(kimage);
+
+ if (_machine_kexec_prepare)
+ return _machine_kexec_prepare(kimage);
+
+ return 0;
+}
+
+void
+machine_kexec_cleanup(struct kimage *kimage)
+{
+}
+
+#ifdef CONFIG_SMP
+static void kexec_shutdown_secondary(void *param)
+{
+ int cpu = smp_processor_id();
+
+ if (!cpu_online(cpu))
+ return;
+
+ /* We won't be sent IPIs any more. */
+ set_cpu_online(cpu, false);
+
+ local_irq_disable();
+ while (!atomic_read(&kexec_ready_to_reboot))
+ cpu_relax();
+
+ kexec_reboot();
+
+ /* NOTREACHED */
+}
+#endif
+
+void
+machine_shutdown(void)
+{
+ if (_machine_kexec_shutdown)
+ _machine_kexec_shutdown();
+
+#ifdef CONFIG_SMP
+ smp_call_function(kexec_shutdown_secondary, NULL, 0);
+
+ while (num_online_cpus() > 1) {
+ cpu_relax();
+ mdelay(1);
+ }
+#endif
+}
+
+void
+machine_crash_shutdown(struct pt_regs *regs)
+{
+ if (_machine_crash_shutdown)
+ _machine_crash_shutdown(regs);
+ else
+ default_machine_crash_shutdown(regs);
+}
+
+#ifdef CONFIG_SMP
+void kexec_nonboot_cpu_jump(void)
+{
+ local_flush_icache_range((unsigned long)relocated_kexec_smp_wait,
+ reboot_code_buffer + relocate_new_kernel_size);
+
+ relocated_kexec_smp_wait(NULL);
+}
+#endif
+
+void kexec_reboot(void)
+{
+ void (*do_kexec)(void) __noreturn;
+
+ /*
+ * We know we were online, and there will be no incoming IPIs at
+ * this point. Mark online again before rebooting so that the crash
+ * analysis tool will see us correctly.
+ */
+ set_cpu_online(smp_processor_id(), true);
+
+ /* Ensure remote CPUs observe that we're online before rebooting. */
+ smp_mb__after_atomic();
+
+#ifdef CONFIG_SMP
+ if (smp_processor_id() > 0) {
+ /*
+ * Instead of cpu_relax() or wait, this is needed for kexec
+ * smp reboot. Kdump usually doesn't require an smp new
+ * kernel, but kexec may do.
+ */
+ kexec_nonboot_cpu();
+
+ /* NOTREACHED */
+ }
+#endif
+
+ /*
+ * Make sure we get correct instructions written by the
+ * machine_kexec() CPU.
+ */
+ local_flush_icache_range(reboot_code_buffer,
+ reboot_code_buffer + relocate_new_kernel_size);
+
+ do_kexec = (void *)reboot_code_buffer;
+ do_kexec();
+}
+
+void
+machine_kexec(struct kimage *image)
+{
+ unsigned long entry;
+ unsigned long *ptr;
+
+ reboot_code_buffer =
+ (unsigned long)page_address(image->control_code_page);
+
+ kexec_start_address =
+ (unsigned long) phys_to_virt(image->start);
+
+ if (image->type == KEXEC_TYPE_DEFAULT) {
+ kexec_indirection_page =
+ (unsigned long) phys_to_virt(image->head & PAGE_MASK);
+ } else {
+ kexec_indirection_page = (unsigned long)&image->head;
+ }
+
+ memcpy((void*)reboot_code_buffer, relocate_new_kernel,
+ relocate_new_kernel_size);
+
+ /*
+ * The generic kexec code builds a page list with physical
+ * addresses. they are directly accessible through KSEG0 (or
+ * CKSEG0 or XPHYS if on 64bit system), hence the
+ * phys_to_virt() call.
+ */
+ for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
+ ptr = (entry & IND_INDIRECTION) ?
+ phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
+ if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
+ *ptr & IND_DESTINATION)
+ *ptr = (unsigned long) phys_to_virt(*ptr);
+ }
+
+ /* Mark offline BEFORE disabling local irq. */
+ set_cpu_online(smp_processor_id(), false);
+
+ /*
+ * we do not want to be bothered.
+ */
+ local_irq_disable();
+
+ printk("Will call new kernel at %08lx\n", image->start);
+ printk("Bye ...\n");
+ /* Make reboot code buffer available to the boot CPU. */
+ __flush_cache_all();
+#ifdef CONFIG_SMP
+ /* All secondary cpus now may jump to kexec_wait cycle */
+ relocated_kexec_smp_wait = reboot_code_buffer +
+ (void *)(kexec_smp_wait - relocate_new_kernel);
+ smp_wmb();
+ atomic_set(&kexec_ready_to_reboot, 1);
+#endif
+ kexec_reboot();
+}
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
new file mode 100644
index 000000000..cff52b283
--- /dev/null
+++ b/arch/mips/kernel/mcount.S
@@ -0,0 +1,220 @@
+/*
+ * MIPS specific _mcount support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive for
+ * more details.
+ *
+ * Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University, China
+ * Copyright (C) 2010 DSLab, Lanzhou University, China
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#include <asm/export.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/ftrace.h>
+
+ .text
+ .set noreorder
+ .set noat
+
+ .macro MCOUNT_SAVE_REGS
+ PTR_SUBU sp, PT_SIZE
+ PTR_S ra, PT_R31(sp)
+ PTR_S AT, PT_R1(sp)
+ PTR_S a0, PT_R4(sp)
+ PTR_S a1, PT_R5(sp)
+ PTR_S a2, PT_R6(sp)
+ PTR_S a3, PT_R7(sp)
+#ifdef CONFIG_64BIT
+ PTR_S a4, PT_R8(sp)
+ PTR_S a5, PT_R9(sp)
+ PTR_S a6, PT_R10(sp)
+ PTR_S a7, PT_R11(sp)
+#endif
+ .endm
+
+ .macro MCOUNT_RESTORE_REGS
+ PTR_L ra, PT_R31(sp)
+ PTR_L AT, PT_R1(sp)
+ PTR_L a0, PT_R4(sp)
+ PTR_L a1, PT_R5(sp)
+ PTR_L a2, PT_R6(sp)
+ PTR_L a3, PT_R7(sp)
+#ifdef CONFIG_64BIT
+ PTR_L a4, PT_R8(sp)
+ PTR_L a5, PT_R9(sp)
+ PTR_L a6, PT_R10(sp)
+ PTR_L a7, PT_R11(sp)
+#endif
+ PTR_ADDIU sp, PT_SIZE
+ .endm
+
+ .macro RETURN_BACK
+ jr ra
+ move ra, AT
+ .endm
+
+/*
+ * The -mmcount-ra-address option of gcc 4.5 uses register $12 to pass
+ * the location of the parent's return address.
+ */
+#define MCOUNT_RA_ADDRESS_REG $12
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+NESTED(ftrace_caller, PT_SIZE, ra)
+ .globl _mcount
+_mcount:
+EXPORT_SYMBOL(_mcount)
+ b ftrace_stub
+#ifdef CONFIG_32BIT
+ addiu sp,sp,8
+#else
+ nop
+#endif
+
+ /* When tracing is activated, it calls ftrace_caller+8 (aka here) */
+ MCOUNT_SAVE_REGS
+#ifdef KBUILD_MCOUNT_RA_ADDRESS
+ PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp)
+#endif
+
+ PTR_SUBU a0, ra, 8 /* arg1: self address */
+ PTR_LA t1, _stext
+ sltu t2, a0, t1 /* t2 = (a0 < _stext) */
+ PTR_LA t1, _etext
+ sltu t3, t1, a0 /* t3 = (a0 > _etext) */
+ or t1, t2, t3
+ beqz t1, ftrace_call
+ nop
+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+ PTR_SUBU a0, a0, 16 /* arg1: adjust to module's recorded callsite */
+#else
+ PTR_SUBU a0, a0, 12
+#endif
+
+ .globl ftrace_call
+ftrace_call:
+ nop /* a placeholder for the call to a real tracing function */
+ move a1, AT /* arg2: parent's return address */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .globl ftrace_graph_call
+ftrace_graph_call:
+ nop
+ nop
+#endif
+
+ MCOUNT_RESTORE_REGS
+ .globl ftrace_stub
+ftrace_stub:
+ RETURN_BACK
+ END(ftrace_caller)
+
+#else /* ! CONFIG_DYNAMIC_FTRACE */
+
+NESTED(_mcount, PT_SIZE, ra)
+EXPORT_SYMBOL(_mcount)
+ PTR_LA t1, ftrace_stub
+ PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
+ beq t1, t2, fgraph_trace
+ nop
+
+ MCOUNT_SAVE_REGS
+
+ move a0, ra /* arg1: self return address */
+ jalr t2 /* (1) call *ftrace_trace_function */
+ move a1, AT /* arg2: parent's return address */
+
+ MCOUNT_RESTORE_REGS
+
+fgraph_trace:
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ PTR_LA t1, ftrace_stub
+ PTR_L t3, ftrace_graph_return
+ bne t1, t3, ftrace_graph_caller
+ nop
+ PTR_LA t1, ftrace_graph_entry_stub
+ PTR_L t3, ftrace_graph_entry
+ bne t1, t3, ftrace_graph_caller
+ nop
+#endif
+
+#ifdef CONFIG_32BIT
+ addiu sp, sp, 8
+#endif
+
+ .globl ftrace_stub
+ftrace_stub:
+ RETURN_BACK
+ END(_mcount)
+
+#endif /* ! CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+NESTED(ftrace_graph_caller, PT_SIZE, ra)
+#ifndef CONFIG_DYNAMIC_FTRACE
+ MCOUNT_SAVE_REGS
+#endif
+
+ /* arg1: Get the location of the parent's return address */
+#ifdef KBUILD_MCOUNT_RA_ADDRESS
+#ifdef CONFIG_DYNAMIC_FTRACE
+ PTR_L a0, PT_R12(sp)
+#else
+ move a0, MCOUNT_RA_ADDRESS_REG
+#endif
+ bnez a0, 1f /* non-leaf func: stored in MCOUNT_RA_ADDRESS_REG */
+ nop
+#endif
+ PTR_LA a0, PT_R1(sp) /* leaf func: the location in current stack */
+1:
+
+ /* arg2: Get self return address */
+#ifdef CONFIG_DYNAMIC_FTRACE
+ PTR_L a1, PT_R31(sp)
+#else
+ move a1, ra
+#endif
+
+ /* arg3: Get frame pointer of current stack */
+#ifdef CONFIG_64BIT
+ PTR_LA a2, PT_SIZE(sp)
+#else
+ PTR_LA a2, (PT_SIZE+8)(sp)
+#endif
+
+ jal prepare_ftrace_return
+ nop
+ MCOUNT_RESTORE_REGS
+#ifndef CONFIG_DYNAMIC_FTRACE
+#ifdef CONFIG_32BIT
+ addiu sp, sp, 8
+#endif
+#endif
+ RETURN_BACK
+ END(ftrace_graph_caller)
+
+ .align 2
+ .globl return_to_handler
+return_to_handler:
+ PTR_SUBU sp, PT_SIZE
+ PTR_S v0, PT_R2(sp)
+
+ jal ftrace_return_to_handler
+ PTR_S v1, PT_R3(sp)
+
+ /* restore the real parent address: v0 -> ra */
+ move ra, v0
+
+ PTR_L v0, PT_R2(sp)
+ PTR_L v1, PT_R3(sp)
+ jr ra
+ PTR_ADDIU sp, PT_SIZE
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+ .set at
+ .set reorder
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
new file mode 100644
index 000000000..72c8374a3
--- /dev/null
+++ b/arch/mips/kernel/mips-cm.c
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
+
+#include <asm/mips-cps.h>
+#include <asm/mipsregs.h>
+
+void __iomem *mips_gcr_base;
+void __iomem *mips_cm_l2sync_base;
+int mips_cm_is64;
+
+static char *cm2_tr[8] = {
+ "mem", "gcr", "gic", "mmio",
+ "0x04", "cpc", "0x06", "0x07"
+};
+
+/* CM3 Tag ECC transaction type */
+static char *cm3_tr[16] = {
+ [0x0] = "ReqNoData",
+ [0x1] = "0x1",
+ [0x2] = "ReqWData",
+ [0x3] = "0x3",
+ [0x4] = "IReqNoResp",
+ [0x5] = "IReqWResp",
+ [0x6] = "IReqNoRespDat",
+ [0x7] = "IReqWRespDat",
+ [0x8] = "RespNoData",
+ [0x9] = "RespDataFol",
+ [0xa] = "RespWData",
+ [0xb] = "RespDataOnly",
+ [0xc] = "IRespNoData",
+ [0xd] = "IRespDataFol",
+ [0xe] = "IRespWData",
+ [0xf] = "IRespDataOnly"
+};
+
+static char *cm2_cmd[32] = {
+ [0x00] = "0x00",
+ [0x01] = "Legacy Write",
+ [0x02] = "Legacy Read",
+ [0x03] = "0x03",
+ [0x04] = "0x04",
+ [0x05] = "0x05",
+ [0x06] = "0x06",
+ [0x07] = "0x07",
+ [0x08] = "Coherent Read Own",
+ [0x09] = "Coherent Read Share",
+ [0x0a] = "Coherent Read Discard",
+ [0x0b] = "Coherent Ready Share Always",
+ [0x0c] = "Coherent Upgrade",
+ [0x0d] = "Coherent Writeback",
+ [0x0e] = "0x0e",
+ [0x0f] = "0x0f",
+ [0x10] = "Coherent Copyback",
+ [0x11] = "Coherent Copyback Invalidate",
+ [0x12] = "Coherent Invalidate",
+ [0x13] = "Coherent Write Invalidate",
+ [0x14] = "Coherent Completion Sync",
+ [0x15] = "0x15",
+ [0x16] = "0x16",
+ [0x17] = "0x17",
+ [0x18] = "0x18",
+ [0x19] = "0x19",
+ [0x1a] = "0x1a",
+ [0x1b] = "0x1b",
+ [0x1c] = "0x1c",
+ [0x1d] = "0x1d",
+ [0x1e] = "0x1e",
+ [0x1f] = "0x1f"
+};
+
+/* CM3 Tag ECC command type */
+static char *cm3_cmd[16] = {
+ [0x0] = "Legacy Read",
+ [0x1] = "Legacy Write",
+ [0x2] = "Coherent Read Own",
+ [0x3] = "Coherent Read Share",
+ [0x4] = "Coherent Read Discard",
+ [0x5] = "Coherent Evicted",
+ [0x6] = "Coherent Upgrade",
+ [0x7] = "Coherent Upgrade for Store Conditional",
+ [0x8] = "Coherent Writeback",
+ [0x9] = "Coherent Write Invalidate",
+ [0xa] = "0xa",
+ [0xb] = "0xb",
+ [0xc] = "0xc",
+ [0xd] = "0xd",
+ [0xe] = "0xe",
+ [0xf] = "0xf"
+};
+
+/* CM3 Tag ECC command group */
+static char *cm3_cmd_group[8] = {
+ [0x0] = "Normal",
+ [0x1] = "Registers",
+ [0x2] = "TLB",
+ [0x3] = "0x3",
+ [0x4] = "L1I",
+ [0x5] = "L1D",
+ [0x6] = "L3",
+ [0x7] = "L2"
+};
+
+static char *cm2_core[8] = {
+ "Invalid/OK", "Invalid/Data",
+ "Shared/OK", "Shared/Data",
+ "Modified/OK", "Modified/Data",
+ "Exclusive/OK", "Exclusive/Data"
+};
+
+static char *cm2_l2_type[4] = {
+ [0x0] = "None",
+ [0x1] = "Tag RAM single/double ECC error",
+ [0x2] = "Data RAM single/double ECC error",
+ [0x3] = "WS RAM uncorrectable dirty parity"
+};
+
+static char *cm2_l2_instr[32] = {
+ [0x00] = "L2_NOP",
+ [0x01] = "L2_ERR_CORR",
+ [0x02] = "L2_TAG_INV",
+ [0x03] = "L2_WS_CLEAN",
+ [0x04] = "L2_RD_MDYFY_WR",
+ [0x05] = "L2_WS_MRU",
+ [0x06] = "L2_EVICT_LN2",
+ [0x07] = "0x07",
+ [0x08] = "L2_EVICT",
+ [0x09] = "L2_REFL",
+ [0x0a] = "L2_RD",
+ [0x0b] = "L2_WR",
+ [0x0c] = "L2_EVICT_MRU",
+ [0x0d] = "L2_SYNC",
+ [0x0e] = "L2_REFL_ERR",
+ [0x0f] = "0x0f",
+ [0x10] = "L2_INDX_WB_INV",
+ [0x11] = "L2_INDX_LD_TAG",
+ [0x12] = "L2_INDX_ST_TAG",
+ [0x13] = "L2_INDX_ST_DATA",
+ [0x14] = "L2_INDX_ST_ECC",
+ [0x15] = "0x15",
+ [0x16] = "0x16",
+ [0x17] = "0x17",
+ [0x18] = "L2_FTCH_AND_LCK",
+ [0x19] = "L2_HIT_INV",
+ [0x1a] = "L2_HIT_WB_INV",
+ [0x1b] = "L2_HIT_WB",
+ [0x1c] = "0x1c",
+ [0x1d] = "0x1d",
+ [0x1e] = "0x1e",
+ [0x1f] = "0x1f"
+};
+
+static char *cm2_causes[32] = {
+ "None", "GC_WR_ERR", "GC_RD_ERR", "COH_WR_ERR",
+ "COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
+ "0x08", "0x09", "0x0a", "0x0b",
+ "0x0c", "0x0d", "0x0e", "0x0f",
+ "0x10", "INTVN_WR_ERR", "INTVN_RD_ERR", "0x13",
+ "0x14", "0x15", "0x16", "0x17",
+ "L2_RD_UNCORR", "L2_WR_UNCORR", "L2_CORR", "0x1b",
+ "0x1c", "0x1d", "0x1e", "0x1f"
+};
+
+static char *cm3_causes[32] = {
+ "0x0", "MP_CORRECTABLE_ECC_ERR", "MP_REQUEST_DECODE_ERR",
+ "MP_UNCORRECTABLE_ECC_ERR", "MP_PARITY_ERR", "MP_COHERENCE_ERR",
+ "CMBIU_REQUEST_DECODE_ERR", "CMBIU_PARITY_ERR", "CMBIU_AXI_RESP_ERR",
+ "0x9", "RBI_BUS_ERR", "0xb", "0xc", "0xd", "0xe", "0xf", "0x10",
+ "0x11", "0x12", "0x13", "0x14", "0x15", "0x16", "0x17", "0x18",
+ "0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f"
+};
+
+static DEFINE_PER_CPU_ALIGNED(spinlock_t, cm_core_lock);
+static DEFINE_PER_CPU_ALIGNED(unsigned long, cm_core_lock_flags);
+
+phys_addr_t __mips_cm_phys_base(void)
+{
+ u32 config3 = read_c0_config3();
+ unsigned long cmgcr;
+
+ /* Check the CMGCRBase register is implemented */
+ if (!(config3 & MIPS_CONF3_CMGCR))
+ return 0;
+
+ /* Read the address from CMGCRBase */
+ cmgcr = read_c0_cmgcrbase();
+ return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32);
+}
+
+phys_addr_t mips_cm_phys_base(void)
+ __attribute__((weak, alias("__mips_cm_phys_base")));
+
+phys_addr_t __mips_cm_l2sync_phys_base(void)
+{
+ u32 base_reg;
+
+ /*
+ * If the L2-only sync region is already enabled then leave it at it's
+ * current location.
+ */
+ base_reg = read_gcr_l2_only_sync_base();
+ if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN)
+ return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE;
+
+ /* Default to following the CM */
+ return mips_cm_phys_base() + MIPS_CM_GCR_SIZE;
+}
+
+phys_addr_t mips_cm_l2sync_phys_base(void)
+ __attribute__((weak, alias("__mips_cm_l2sync_phys_base")));
+
+static void mips_cm_probe_l2sync(void)
+{
+ unsigned major_rev;
+ phys_addr_t addr;
+
+ /* L2-only sync was introduced with CM major revision 6 */
+ major_rev = FIELD_GET(CM_GCR_REV_MAJOR, read_gcr_rev());
+ if (major_rev < 6)
+ return;
+
+ /* Find a location for the L2 sync region */
+ addr = mips_cm_l2sync_phys_base();
+ BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE) != addr);
+ if (!addr)
+ return;
+
+ /* Set the region base address & enable it */
+ write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN);
+
+ /* Map the region */
+ mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE);
+}
+
+int mips_cm_probe(void)
+{
+ phys_addr_t addr;
+ u32 base_reg;
+ unsigned cpu;
+
+ /*
+ * No need to probe again if we have already been
+ * here before.
+ */
+ if (mips_gcr_base)
+ return 0;
+
+ addr = mips_cm_phys_base();
+ BUG_ON((addr & CM_GCR_BASE_GCRBASE) != addr);
+ if (!addr)
+ return -ENODEV;
+
+ mips_gcr_base = ioremap(addr, MIPS_CM_GCR_SIZE);
+ if (!mips_gcr_base)
+ return -ENXIO;
+
+ /* sanity check that we're looking at a CM */
+ base_reg = read_gcr_base();
+ if ((base_reg & CM_GCR_BASE_GCRBASE) != addr) {
+ pr_err("GCRs appear to have been moved (expected them at 0x%08lx)!\n",
+ (unsigned long)addr);
+ mips_gcr_base = NULL;
+ return -ENODEV;
+ }
+
+ /* set default target to memory */
+ change_gcr_base(CM_GCR_BASE_CMDEFTGT, CM_GCR_BASE_CMDEFTGT_MEM);
+
+ /* disable CM regions */
+ write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR);
+ write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK);
+ write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR);
+ write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK);
+ write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR);
+ write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK);
+ write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR);
+ write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK);
+
+ /* probe for an L2-only sync region */
+ mips_cm_probe_l2sync();
+
+ /* determine register width for this CM */
+ mips_cm_is64 = IS_ENABLED(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
+
+ for_each_possible_cpu(cpu)
+ spin_lock_init(&per_cpu(cm_core_lock, cpu));
+
+ return 0;
+}
+
+void mips_cm_lock_other(unsigned int cluster, unsigned int core,
+ unsigned int vp, unsigned int block)
+{
+ unsigned int curr_core, cm_rev;
+ u32 val;
+
+ cm_rev = mips_cm_revision();
+ preempt_disable();
+
+ if (cm_rev >= CM_REV_CM3) {
+ val = FIELD_PREP(CM3_GCR_Cx_OTHER_CORE, core) |
+ FIELD_PREP(CM3_GCR_Cx_OTHER_VP, vp);
+
+ if (cm_rev >= CM_REV_CM3_5) {
+ val |= CM_GCR_Cx_OTHER_CLUSTER_EN;
+ val |= FIELD_PREP(CM_GCR_Cx_OTHER_CLUSTER, cluster);
+ val |= FIELD_PREP(CM_GCR_Cx_OTHER_BLOCK, block);
+ } else {
+ WARN_ON(cluster != 0);
+ WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ }
+
+ /*
+ * We need to disable interrupts in SMP systems in order to
+ * ensure that we don't interrupt the caller with code which
+ * may modify the redirect register. We do so here in a
+ * slightly obscure way by using a spin lock, since this has
+ * the neat property of also catching any nested uses of
+ * mips_cm_lock_other() leading to a deadlock or a nice warning
+ * with lockdep enabled.
+ */
+ spin_lock_irqsave(this_cpu_ptr(&cm_core_lock),
+ *this_cpu_ptr(&cm_core_lock_flags));
+ } else {
+ WARN_ON(cluster != 0);
+ WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+
+ /*
+ * We only have a GCR_CL_OTHER per core in systems with
+ * CM 2.5 & older, so have to ensure other VP(E)s don't
+ * race with us.
+ */
+ curr_core = cpu_core(&current_cpu_data);
+ spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
+ per_cpu(cm_core_lock_flags, curr_core));
+
+ val = FIELD_PREP(CM_GCR_Cx_OTHER_CORENUM, core);
+ }
+
+ write_gcr_cl_other(val);
+
+ /*
+ * Ensure the core-other region reflects the appropriate core &
+ * VP before any accesses to it occur.
+ */
+ mb();
+}
+
+void mips_cm_unlock_other(void)
+{
+ unsigned int curr_core;
+
+ if (mips_cm_revision() < CM_REV_CM3) {
+ curr_core = cpu_core(&current_cpu_data);
+ spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
+ per_cpu(cm_core_lock_flags, curr_core));
+ } else {
+ spin_unlock_irqrestore(this_cpu_ptr(&cm_core_lock),
+ *this_cpu_ptr(&cm_core_lock_flags));
+ }
+
+ preempt_enable();
+}
+
+void mips_cm_error_report(void)
+{
+ u64 cm_error, cm_addr, cm_other;
+ unsigned long revision;
+ int ocause, cause;
+ char buf[256];
+
+ if (!mips_cm_present())
+ return;
+
+ revision = mips_cm_revision();
+ cm_error = read_gcr_error_cause();
+ cm_addr = read_gcr_error_addr();
+ cm_other = read_gcr_error_mult();
+
+ if (revision < CM_REV_CM3) { /* CM2 */
+ cause = FIELD_GET(CM_GCR_ERROR_CAUSE_ERRTYPE, cm_error);
+ ocause = FIELD_GET(CM_GCR_ERROR_MULT_ERR2ND, cm_other);
+
+ if (!cause)
+ return;
+
+ if (cause < 16) {
+ unsigned long cca_bits = (cm_error >> 15) & 7;
+ unsigned long tr_bits = (cm_error >> 12) & 7;
+ unsigned long cmd_bits = (cm_error >> 7) & 0x1f;
+ unsigned long stag_bits = (cm_error >> 3) & 15;
+ unsigned long sport_bits = (cm_error >> 0) & 7;
+
+ snprintf(buf, sizeof(buf),
+ "CCA=%lu TR=%s MCmd=%s STag=%lu "
+ "SPort=%lu\n", cca_bits, cm2_tr[tr_bits],
+ cm2_cmd[cmd_bits], stag_bits, sport_bits);
+ } else if (cause < 24) {
+ /* glob state & sresp together */
+ unsigned long c3_bits = (cm_error >> 18) & 7;
+ unsigned long c2_bits = (cm_error >> 15) & 7;
+ unsigned long c1_bits = (cm_error >> 12) & 7;
+ unsigned long c0_bits = (cm_error >> 9) & 7;
+ unsigned long sc_bit = (cm_error >> 8) & 1;
+ unsigned long cmd_bits = (cm_error >> 3) & 0x1f;
+ unsigned long sport_bits = (cm_error >> 0) & 7;
+
+ snprintf(buf, sizeof(buf),
+ "C3=%s C2=%s C1=%s C0=%s SC=%s "
+ "MCmd=%s SPort=%lu\n",
+ cm2_core[c3_bits], cm2_core[c2_bits],
+ cm2_core[c1_bits], cm2_core[c0_bits],
+ sc_bit ? "True" : "False",
+ cm2_cmd[cmd_bits], sport_bits);
+ } else {
+ unsigned long muc_bit = (cm_error >> 23) & 1;
+ unsigned long ins_bits = (cm_error >> 18) & 0x1f;
+ unsigned long arr_bits = (cm_error >> 16) & 3;
+ unsigned long dw_bits = (cm_error >> 12) & 15;
+ unsigned long way_bits = (cm_error >> 9) & 7;
+ unsigned long mway_bit = (cm_error >> 8) & 1;
+ unsigned long syn_bits = (cm_error >> 0) & 0xFF;
+
+ snprintf(buf, sizeof(buf),
+ "Type=%s%s Instr=%s DW=%lu Way=%lu "
+ "MWay=%s Syndrome=0x%02lx",
+ muc_bit ? "Multi-UC " : "",
+ cm2_l2_type[arr_bits],
+ cm2_l2_instr[ins_bits], dw_bits, way_bits,
+ mway_bit ? "True" : "False", syn_bits);
+ }
+ pr_err("CM_ERROR=%08llx %s <%s>\n", cm_error,
+ cm2_causes[cause], buf);
+ pr_err("CM_ADDR =%08llx\n", cm_addr);
+ pr_err("CM_OTHER=%08llx %s\n", cm_other, cm2_causes[ocause]);
+ } else { /* CM3 */
+ ulong core_id_bits, vp_id_bits, cmd_bits, cmd_group_bits;
+ ulong cm3_cca_bits, mcp_bits, cm3_tr_bits, sched_bit;
+
+ cause = FIELD_GET(CM3_GCR_ERROR_CAUSE_ERRTYPE, cm_error);
+ ocause = FIELD_GET(CM_GCR_ERROR_MULT_ERR2ND, cm_other);
+
+ if (!cause)
+ return;
+
+ /* Used by cause == {1,2,3} */
+ core_id_bits = (cm_error >> 22) & 0xf;
+ vp_id_bits = (cm_error >> 18) & 0xf;
+ cmd_bits = (cm_error >> 14) & 0xf;
+ cmd_group_bits = (cm_error >> 11) & 0xf;
+ cm3_cca_bits = (cm_error >> 8) & 7;
+ mcp_bits = (cm_error >> 5) & 0xf;
+ cm3_tr_bits = (cm_error >> 1) & 0xf;
+ sched_bit = cm_error & 0x1;
+
+ if (cause == 1 || cause == 3) { /* Tag ECC */
+ unsigned long tag_ecc = (cm_error >> 57) & 0x1;
+ unsigned long tag_way_bits = (cm_error >> 29) & 0xffff;
+ unsigned long dword_bits = (cm_error >> 49) & 0xff;
+ unsigned long data_way_bits = (cm_error >> 45) & 0xf;
+ unsigned long data_sets_bits = (cm_error >> 29) & 0xfff;
+ unsigned long bank_bit = (cm_error >> 28) & 0x1;
+ snprintf(buf, sizeof(buf),
+ "%s ECC Error: Way=%lu (DWORD=%lu, Sets=%lu)"
+ "Bank=%lu CoreID=%lu VPID=%lu Command=%s"
+ "Command Group=%s CCA=%lu MCP=%d"
+ "Transaction type=%s Scheduler=%lu\n",
+ tag_ecc ? "TAG" : "DATA",
+ tag_ecc ? (unsigned long)ffs(tag_way_bits) - 1 :
+ data_way_bits, bank_bit, dword_bits,
+ data_sets_bits,
+ core_id_bits, vp_id_bits,
+ cm3_cmd[cmd_bits],
+ cm3_cmd_group[cmd_group_bits],
+ cm3_cca_bits, 1 << mcp_bits,
+ cm3_tr[cm3_tr_bits], sched_bit);
+ } else if (cause == 2) {
+ unsigned long data_error_type = (cm_error >> 41) & 0xfff;
+ unsigned long data_decode_cmd = (cm_error >> 37) & 0xf;
+ unsigned long data_decode_group = (cm_error >> 34) & 0x7;
+ unsigned long data_decode_destination_id = (cm_error >> 28) & 0x3f;
+
+ snprintf(buf, sizeof(buf),
+ "Decode Request Error: Type=%lu, Command=%lu"
+ "Command Group=%lu Destination ID=%lu"
+ "CoreID=%lu VPID=%lu Command=%s"
+ "Command Group=%s CCA=%lu MCP=%d"
+ "Transaction type=%s Scheduler=%lu\n",
+ data_error_type, data_decode_cmd,
+ data_decode_group, data_decode_destination_id,
+ core_id_bits, vp_id_bits,
+ cm3_cmd[cmd_bits],
+ cm3_cmd_group[cmd_group_bits],
+ cm3_cca_bits, 1 << mcp_bits,
+ cm3_tr[cm3_tr_bits], sched_bit);
+ } else {
+ buf[0] = 0;
+ }
+
+ pr_err("CM_ERROR=%llx %s <%s>\n", cm_error,
+ cm3_causes[cause], buf);
+ pr_err("CM_ADDR =%llx\n", cm_addr);
+ pr_err("CM_OTHER=%llx %s\n", cm_other, cm3_causes[ocause]);
+ }
+
+ /* reprime cause register */
+ write_gcr_error_cause(cm_error);
+}
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
new file mode 100644
index 000000000..d005be84c
--- /dev/null
+++ b/arch/mips/kernel/mips-cpc.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/percpu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+
+#include <asm/mips-cps.h>
+
+void __iomem *mips_cpc_base;
+
+static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
+
+static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
+
+phys_addr_t __weak mips_cpc_default_phys_base(void)
+{
+ struct device_node *cpc_node;
+ struct resource res;
+ int err;
+
+ cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
+ if (cpc_node) {
+ err = of_address_to_resource(cpc_node, 0, &res);
+ of_node_put(cpc_node);
+ if (!err)
+ return res.start;
+ }
+
+ return 0;
+}
+
+/**
+ * mips_cpc_phys_base - retrieve the physical base address of the CPC
+ *
+ * This function returns the physical base address of the Cluster Power
+ * Controller memory mapped registers, or 0 if no Cluster Power Controller
+ * is present.
+ */
+static phys_addr_t mips_cpc_phys_base(void)
+{
+ unsigned long cpc_base;
+
+ if (!mips_cm_present())
+ return 0;
+
+ if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX))
+ return 0;
+
+ /* If the CPC is already enabled, leave it so */
+ cpc_base = read_gcr_cpc_base();
+ if (cpc_base & CM_GCR_CPC_BASE_CPCEN)
+ return cpc_base & CM_GCR_CPC_BASE_CPCBASE;
+
+ /* Otherwise, use the default address */
+ cpc_base = mips_cpc_default_phys_base();
+ if (!cpc_base)
+ return cpc_base;
+
+ /* Enable the CPC, mapped at the default address */
+ write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN);
+ return cpc_base;
+}
+
+int mips_cpc_probe(void)
+{
+ phys_addr_t addr;
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ spin_lock_init(&per_cpu(cpc_core_lock, cpu));
+
+ addr = mips_cpc_phys_base();
+ if (!addr)
+ return -ENODEV;
+
+ mips_cpc_base = ioremap(addr, 0x8000);
+ if (!mips_cpc_base)
+ return -ENXIO;
+
+ return 0;
+}
+
+void mips_cpc_lock_other(unsigned int core)
+{
+ unsigned int curr_core;
+
+ if (mips_cm_revision() >= CM_REV_CM3)
+ /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
+ return;
+
+ preempt_disable();
+ curr_core = cpu_core(&current_cpu_data);
+ spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
+ per_cpu(cpc_core_lock_flags, curr_core));
+ write_cpc_cl_other(core << __ffs(CPC_Cx_OTHER_CORENUM));
+
+ /*
+ * Ensure the core-other region reflects the appropriate core &
+ * VP before any accesses to it occur.
+ */
+ mb();
+}
+
+void mips_cpc_unlock_other(void)
+{
+ unsigned int curr_core;
+
+ if (mips_cm_revision() >= CM_REV_CM3)
+ /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
+ return;
+
+ curr_core = cpu_core(&current_cpu_data);
+ spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
+ per_cpu(cpc_core_lock_flags, curr_core));
+ preempt_enable();
+}
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
new file mode 100644
index 000000000..6c590ef27
--- /dev/null
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * General MIPS MT support routines, usable in AP/SP and SMVP.
+ * Copyright (C) 2005 Mips Technologies, Inc
+ */
+#include <linux/cpu.h>
+#include <linux/cpuset.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sched/task.h>
+#include <linux/cred.h>
+#include <linux/security.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+/*
+ * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
+ */
+cpumask_t mt_fpu_cpumask;
+
+static int fpaff_threshold = -1;
+unsigned long mt_fpemul_threshold;
+
+/*
+ * Replacement functions for the sys_sched_setaffinity() and
+ * sys_sched_getaffinity() system calls, so that we can integrate
+ * FPU affinity with the user's requested processor affinity.
+ * This code is 98% identical with the sys_sched_setaffinity()
+ * and sys_sched_getaffinity() system calls, and should be
+ * updated when kernel/sched/core.c changes.
+ */
+
+/*
+ * find_process_by_pid - find a process with a matching PID value.
+ * used in sys_sched_set/getaffinity() in kernel/sched/core.c, so
+ * cloned here.
+ */
+static inline struct task_struct *find_process_by_pid(pid_t pid)
+{
+ return pid ? find_task_by_vpid(pid) : current;
+}
+
+/*
+ * check the target process has a UID that matches the current process's
+ */
+static bool check_same_owner(struct task_struct *p)
+{
+ const struct cred *cred = current_cred(), *pcred;
+ bool match;
+
+ rcu_read_lock();
+ pcred = __task_cred(p);
+ match = (uid_eq(cred->euid, pcred->euid) ||
+ uid_eq(cred->euid, pcred->uid));
+ rcu_read_unlock();
+ return match;
+}
+
+/*
+ * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
+ */
+asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
+ unsigned long __user *user_mask_ptr)
+{
+ cpumask_var_t cpus_allowed, new_mask, effective_mask;
+ struct thread_info *ti;
+ struct task_struct *p;
+ int retval;
+
+ if (len < sizeof(new_mask))
+ return -EINVAL;
+
+ if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
+ return -EFAULT;
+
+ get_online_cpus();
+ rcu_read_lock();
+
+ p = find_process_by_pid(pid);
+ if (!p) {
+ rcu_read_unlock();
+ put_online_cpus();
+ return -ESRCH;
+ }
+
+ /* Prevent p going away */
+ get_task_struct(p);
+ rcu_read_unlock();
+
+ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_put_task;
+ }
+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_free_cpus_allowed;
+ }
+ if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_free_new_mask;
+ }
+ if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) {
+ retval = -EPERM;
+ goto out_unlock;
+ }
+
+ retval = security_task_setscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+ /* Record new user-specified CPU set for future reference */
+ cpumask_copy(&p->thread.user_cpus_allowed, new_mask);
+
+ again:
+ /* Compute new global allowed CPU set if necessary */
+ ti = task_thread_info(p);
+ if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
+ cpumask_intersects(new_mask, &mt_fpu_cpumask)) {
+ cpumask_and(effective_mask, new_mask, &mt_fpu_cpumask);
+ retval = set_cpus_allowed_ptr(p, effective_mask);
+ } else {
+ cpumask_copy(effective_mask, new_mask);
+ clear_ti_thread_flag(ti, TIF_FPUBOUND);
+ retval = set_cpus_allowed_ptr(p, new_mask);
+ }
+
+ if (!retval) {
+ cpuset_cpus_allowed(p, cpus_allowed);
+ if (!cpumask_subset(effective_mask, cpus_allowed)) {
+ /*
+ * We must have raced with a concurrent cpuset
+ * update. Just reset the cpus_allowed to the
+ * cpuset's cpus_allowed
+ */
+ cpumask_copy(new_mask, cpus_allowed);
+ goto again;
+ }
+ }
+out_unlock:
+ free_cpumask_var(effective_mask);
+out_free_new_mask:
+ free_cpumask_var(new_mask);
+out_free_cpus_allowed:
+ free_cpumask_var(cpus_allowed);
+out_put_task:
+ put_task_struct(p);
+ put_online_cpus();
+ return retval;
+}
+
+/*
+ * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
+ */
+asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
+ unsigned long __user *user_mask_ptr)
+{
+ unsigned int real_len;
+ cpumask_t allowed, mask;
+ int retval;
+ struct task_struct *p;
+
+ real_len = sizeof(mask);
+ if (len < real_len)
+ return -EINVAL;
+
+ get_online_cpus();
+ rcu_read_lock();
+
+ retval = -ESRCH;
+ p = find_process_by_pid(pid);
+ if (!p)
+ goto out_unlock;
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+ cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr);
+ cpumask_and(&mask, &allowed, cpu_active_mask);
+
+out_unlock:
+ rcu_read_unlock();
+ put_online_cpus();
+ if (retval)
+ return retval;
+ if (copy_to_user(user_mask_ptr, &mask, real_len))
+ return -EFAULT;
+ return real_len;
+}
+
+
+static int __init fpaff_thresh(char *str)
+{
+ get_option(&str, &fpaff_threshold);
+ return 1;
+}
+__setup("fpaff=", fpaff_thresh);
+
+/*
+ * FPU Use Factor empirically derived from experiments on 34K
+ */
+#define FPUSEFACTOR 2000
+
+static __init int mt_fp_affinity_init(void)
+{
+ if (fpaff_threshold >= 0) {
+ mt_fpemul_threshold = fpaff_threshold;
+ } else {
+ mt_fpemul_threshold =
+ (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
+ }
+ printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n",
+ mt_fpemul_threshold);
+
+ return 0;
+}
+arch_initcall(mt_fp_affinity_init);
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
new file mode 100644
index 000000000..d5f7362e8
--- /dev/null
+++ b/arch/mips/kernel/mips-mt.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * General MIPS MT support routines, usable in AP/SP and SMVP.
+ * Copyright (C) 2005 Mips Technologies, Inc
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/security.h>
+
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <linux/atomic.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/mipsmtregs.h>
+#include <asm/r4kcache.h>
+#include <asm/cacheflush.h>
+
+int vpelimit;
+
+static int __init maxvpes(char *str)
+{
+ get_option(&str, &vpelimit);
+
+ return 1;
+}
+
+__setup("maxvpes=", maxvpes);
+
+int tclimit;
+
+static int __init maxtcs(char *str)
+{
+ get_option(&str, &tclimit);
+
+ return 1;
+}
+
+__setup("maxtcs=", maxtcs);
+
+/*
+ * Dump new MIPS MT state for the core. Does not leave TCs halted.
+ * Takes an argument which taken to be a pre-call MVPControl value.
+ */
+
+void mips_mt_regdump(unsigned long mvpctl)
+{
+ unsigned long flags;
+ unsigned long vpflags;
+ unsigned long mvpconf0;
+ int nvpe;
+ int ntc;
+ int i;
+ int tc;
+ unsigned long haltval;
+ unsigned long tcstatval;
+
+ local_irq_save(flags);
+ vpflags = dvpe();
+ printk("=== MIPS MT State Dump ===\n");
+ printk("-- Global State --\n");
+ printk(" MVPControl Passed: %08lx\n", mvpctl);
+ printk(" MVPControl Read: %08lx\n", vpflags);
+ printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
+ nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+ ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
+ printk("-- per-VPE State --\n");
+ for (i = 0; i < nvpe; i++) {
+ for (tc = 0; tc < ntc; tc++) {
+ settc(tc);
+ if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
+ printk(" VPE %d\n", i);
+ printk(" VPEControl : %08lx\n",
+ read_vpe_c0_vpecontrol());
+ printk(" VPEConf0 : %08lx\n",
+ read_vpe_c0_vpeconf0());
+ printk(" VPE%d.Status : %08lx\n",
+ i, read_vpe_c0_status());
+ printk(" VPE%d.EPC : %08lx %pS\n",
+ i, read_vpe_c0_epc(),
+ (void *) read_vpe_c0_epc());
+ printk(" VPE%d.Cause : %08lx\n",
+ i, read_vpe_c0_cause());
+ printk(" VPE%d.Config7 : %08lx\n",
+ i, read_vpe_c0_config7());
+ break; /* Next VPE */
+ }
+ }
+ }
+ printk("-- per-TC State --\n");
+ for (tc = 0; tc < ntc; tc++) {
+ settc(tc);
+ if (read_tc_c0_tcbind() == read_c0_tcbind()) {
+ /* Are we dumping ourself? */
+ haltval = 0; /* Then we're not halted, and mustn't be */
+ tcstatval = flags; /* And pre-dump TCStatus is flags */
+ printk(" TC %d (current TC with VPE EPC above)\n", tc);
+ } else {
+ haltval = read_tc_c0_tchalt();
+ write_tc_c0_tchalt(1);
+ tcstatval = read_tc_c0_tcstatus();
+ printk(" TC %d\n", tc);
+ }
+ printk(" TCStatus : %08lx\n", tcstatval);
+ printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
+ printk(" TCRestart : %08lx %pS\n",
+ read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart());
+ printk(" TCHalt : %08lx\n", haltval);
+ printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
+ if (!haltval)
+ write_tc_c0_tchalt(0);
+ }
+ printk("===========================\n");
+ evpe(vpflags);
+ local_irq_restore(flags);
+}
+
+static int mt_opt_rpsctl = -1;
+static int mt_opt_nblsu = -1;
+static int mt_opt_forceconfig7;
+static int mt_opt_config7 = -1;
+
+static int __init rpsctl_set(char *str)
+{
+ get_option(&str, &mt_opt_rpsctl);
+ return 1;
+}
+__setup("rpsctl=", rpsctl_set);
+
+static int __init nblsu_set(char *str)
+{
+ get_option(&str, &mt_opt_nblsu);
+ return 1;
+}
+__setup("nblsu=", nblsu_set);
+
+static int __init config7_set(char *str)
+{
+ get_option(&str, &mt_opt_config7);
+ mt_opt_forceconfig7 = 1;
+ return 1;
+}
+__setup("config7=", config7_set);
+
+static unsigned int itc_base;
+
+static int __init set_itc_base(char *str)
+{
+ get_option(&str, &itc_base);
+ return 1;
+}
+
+__setup("itcbase=", set_itc_base);
+
+void mips_mt_set_cpuoptions(void)
+{
+ unsigned int oconfig7 = read_c0_config7();
+ unsigned int nconfig7 = oconfig7;
+
+ if (mt_opt_rpsctl >= 0) {
+ printk("34K return prediction stack override set to %d.\n",
+ mt_opt_rpsctl);
+ if (mt_opt_rpsctl)
+ nconfig7 |= (1 << 2);
+ else
+ nconfig7 &= ~(1 << 2);
+ }
+ if (mt_opt_nblsu >= 0) {
+ printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
+ if (mt_opt_nblsu)
+ nconfig7 |= (1 << 5);
+ else
+ nconfig7 &= ~(1 << 5);
+ }
+ if (mt_opt_forceconfig7) {
+ printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
+ nconfig7 = mt_opt_config7;
+ }
+ if (oconfig7 != nconfig7) {
+ __asm__ __volatile("sync");
+ write_c0_config7(nconfig7);
+ ehb();
+ printk("Config7: 0x%08x\n", read_c0_config7());
+ }
+
+ if (itc_base != 0) {
+ /*
+ * Configure ITC mapping. This code is very
+ * specific to the 34K core family, which uses
+ * a special mode bit ("ITC") in the ErrCtl
+ * register to enable access to ITC control
+ * registers via cache "tag" operations.
+ */
+ unsigned long ectlval;
+ unsigned long itcblkgrn;
+
+ /* ErrCtl register is known as "ecc" to Linux */
+ ectlval = read_c0_ecc();
+ write_c0_ecc(ectlval | (0x1 << 26));
+ ehb();
+#define INDEX_0 (0x80000000)
+#define INDEX_8 (0x80000008)
+ /* Read "cache tag" for Dcache pseudo-index 8 */
+ cache_op(Index_Load_Tag_D, INDEX_8);
+ ehb();
+ itcblkgrn = read_c0_dtaglo();
+ itcblkgrn &= 0xfffe0000;
+ /* Set for 128 byte pitch of ITC cells */
+ itcblkgrn |= 0x00000c00;
+ /* Stage in Tag register */
+ write_c0_dtaglo(itcblkgrn);
+ ehb();
+ /* Write out to ITU with CACHE op */
+ cache_op(Index_Store_Tag_D, INDEX_8);
+ /* Now set base address, and turn ITC on with 0x1 bit */
+ write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
+ ehb();
+ /* Write out to ITU with CACHE op */
+ cache_op(Index_Store_Tag_D, INDEX_0);
+ write_c0_ecc(ectlval);
+ ehb();
+ printk("Mapped %ld ITC cells starting at 0x%08x\n",
+ ((itcblkgrn & 0x7fe00000) >> 20), itc_base);
+ }
+}
+
+struct class *mt_class;
+
+static int __init mt_init(void)
+{
+ struct class *mtc;
+
+ mtc = class_create(THIS_MODULE, "mt");
+ if (IS_ERR(mtc))
+ return PTR_ERR(mtc);
+
+ mt_class = mtc;
+
+ return 0;
+}
+
+subsys_initcall(mt_init);
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
new file mode 100644
index 000000000..a39ec755e
--- /dev/null
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -0,0 +1,2363 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ * MIPS R2 user space instruction emulator for MIPS R6
+ *
+ */
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/seq_file.h>
+
+#include <asm/asm.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+#include <asm/debug.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
+#include <asm/inst.h>
+#include <asm/mips-r2-to-r6-emul.h>
+#include <asm/local.h>
+#include <asm/mipsregs.h>
+#include <asm/ptrace.h>
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_64BIT
+#define ADDIU "daddiu "
+#define INS "dins "
+#define EXT "dext "
+#else
+#define ADDIU "addiu "
+#define INS "ins "
+#define EXT "ext "
+#endif /* CONFIG_64BIT */
+
+#define SB "sb "
+#define LB "lb "
+#define LL "ll "
+#define SC "sc "
+
+#ifdef CONFIG_DEBUG_FS
+static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats);
+static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats);
+static DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats);
+#endif
+
+extern const unsigned int fpucondbit[8];
+
+#define MIPS_R2_EMUL_TOTAL_PASS 10
+
+int mipsr2_emulation = 0;
+
+static int __init mipsr2emu_enable(char *s)
+{
+ mipsr2_emulation = 1;
+
+ pr_info("MIPS R2-to-R6 Emulator Enabled!");
+
+ return 1;
+}
+__setup("mipsr2emu", mipsr2emu_enable);
+
+/**
+ * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot
+ * for performance instead of the traditional way of using a stack trampoline
+ * which is rather slow.
+ * @regs: Process register set
+ * @ir: Instruction
+ */
+static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
+{
+ switch (MIPSInst_OPCODE(ir)) {
+ case addiu_op:
+ if (MIPSInst_RT(ir))
+ regs->regs[MIPSInst_RT(ir)] =
+ (s32)regs->regs[MIPSInst_RS(ir)] +
+ (s32)MIPSInst_SIMM(ir);
+ return 0;
+ case daddiu_op:
+ if (IS_ENABLED(CONFIG_32BIT))
+ break;
+
+ if (MIPSInst_RT(ir))
+ regs->regs[MIPSInst_RT(ir)] =
+ (s64)regs->regs[MIPSInst_RS(ir)] +
+ (s64)MIPSInst_SIMM(ir);
+ return 0;
+ case lwc1_op:
+ case swc1_op:
+ case cop1_op:
+ case cop1x_op:
+ /* FPU instructions in delay slot */
+ return -SIGFPE;
+ case spec_op:
+ switch (MIPSInst_FUNC(ir)) {
+ case or_op:
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ regs->regs[MIPSInst_RS(ir)] |
+ regs->regs[MIPSInst_RT(ir)];
+ return 0;
+ case sll_op:
+ if (MIPSInst_RS(ir))
+ break;
+
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) <<
+ MIPSInst_FD(ir));
+ return 0;
+ case srl_op:
+ if (MIPSInst_RS(ir))
+ break;
+
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >>
+ MIPSInst_FD(ir));
+ return 0;
+ case addu_op:
+ if (MIPSInst_FD(ir))
+ break;
+
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s32)((u32)regs->regs[MIPSInst_RS(ir)] +
+ (u32)regs->regs[MIPSInst_RT(ir)]);
+ return 0;
+ case subu_op:
+ if (MIPSInst_FD(ir))
+ break;
+
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s32)((u32)regs->regs[MIPSInst_RS(ir)] -
+ (u32)regs->regs[MIPSInst_RT(ir)]);
+ return 0;
+ case dsll_op:
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
+ break;
+
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) <<
+ MIPSInst_FD(ir));
+ return 0;
+ case dsrl_op:
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
+ break;
+
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >>
+ MIPSInst_FD(ir));
+ return 0;
+ case daddu_op:
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
+ break;
+
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (u64)regs->regs[MIPSInst_RS(ir)] +
+ (u64)regs->regs[MIPSInst_RT(ir)];
+ return 0;
+ case dsubu_op:
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
+ break;
+
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s64)((u64)regs->regs[MIPSInst_RS(ir)] -
+ (u64)regs->regs[MIPSInst_RT(ir)]);
+ return 0;
+ }
+ break;
+ default:
+ pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n",
+ ir, MIPSInst_OPCODE(ir));
+ }
+
+ return SIGILL;
+}
+
+/**
+ * movf_func - Emulate a MOVF instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movf_func(struct pt_regs *regs, u32 ir)
+{
+ u32 csr;
+ u32 cond;
+
+ csr = current->thread.fpu.fcr31;
+ cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+
+ if (((csr & cond) == 0) && MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+
+ MIPS_R2_STATS(movs);
+
+ return 0;
+}
+
+/**
+ * movt_func - Emulate a MOVT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movt_func(struct pt_regs *regs, u32 ir)
+{
+ u32 csr;
+ u32 cond;
+
+ csr = current->thread.fpu.fcr31;
+ cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+
+ if (((csr & cond) != 0) && MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+
+ MIPS_R2_STATS(movs);
+
+ return 0;
+}
+
+/**
+ * jr_func - Emulate a JR instruction.
+ * @pt_regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns SIGILL if JR was in delay slot, SIGEMT if we
+ * can't compute the EPC, SIGSEGV if we can't access the
+ * userland instruction or 0 on success.
+ */
+static int jr_func(struct pt_regs *regs, u32 ir)
+{
+ int err;
+ unsigned long cepc, epc, nepc;
+ u32 nir;
+
+ if (delay_slot(regs))
+ return SIGILL;
+
+ /* EPC after the RI/JR instruction */
+ nepc = regs->cp0_epc;
+ /* Roll back to the reserved R2 JR instruction */
+ regs->cp0_epc -= 4;
+ epc = regs->cp0_epc;
+ err = __compute_return_epc(regs);
+
+ if (err < 0)
+ return SIGEMT;
+
+
+ /* Computed EPC */
+ cepc = regs->cp0_epc;
+
+ /* Get DS instruction */
+ err = __get_user(nir, (u32 __user *)nepc);
+ if (err)
+ return SIGSEGV;
+
+ MIPS_R2BR_STATS(jrs);
+
+ /* If nir == 0(NOP), then nothing else to do */
+ if (nir) {
+ /*
+ * Negative err means FPU instruction in BD-slot,
+ * Zero err means 'BD-slot emulation done'
+ * For anything else we go back to trampoline emulation.
+ */
+ err = mipsr6_emul(regs, nir);
+ if (err > 0) {
+ regs->cp0_epc = nepc;
+ err = mips_dsemul(regs, nir, epc, cepc);
+ if (err == SIGILL)
+ err = SIGEMT;
+ MIPS_R2_STATS(dsemul);
+ }
+ }
+
+ return err;
+}
+
+/**
+ * movz_func - Emulate a MOVZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movz_func(struct pt_regs *regs, u32 ir)
+{
+ if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+ MIPS_R2_STATS(movs);
+
+ return 0;
+}
+
+/**
+ * movn_func - Emulate a MOVZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movn_func(struct pt_regs *regs, u32 ir)
+{
+ if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+ MIPS_R2_STATS(movs);
+
+ return 0;
+}
+
+/**
+ * mfhi_func - Emulate a MFHI instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mfhi_func(struct pt_regs *regs, u32 ir)
+{
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->hi;
+
+ MIPS_R2_STATS(hilo);
+
+ return 0;
+}
+
+/**
+ * mthi_func - Emulate a MTHI instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mthi_func(struct pt_regs *regs, u32 ir)
+{
+ regs->hi = regs->regs[MIPSInst_RS(ir)];
+
+ MIPS_R2_STATS(hilo);
+
+ return 0;
+}
+
+/**
+ * mflo_func - Emulate a MFLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mflo_func(struct pt_regs *regs, u32 ir)
+{
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->lo;
+
+ MIPS_R2_STATS(hilo);
+
+ return 0;
+}
+
+/**
+ * mtlo_func - Emulate a MTLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mtlo_func(struct pt_regs *regs, u32 ir)
+{
+ regs->lo = regs->regs[MIPSInst_RS(ir)];
+
+ MIPS_R2_STATS(hilo);
+
+ return 0;
+}
+
+/**
+ * mult_func - Emulate a MULT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mult_func(struct pt_regs *regs, u32 ir)
+{
+ s64 res;
+ s32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (s64)rt * (s64)rs;
+
+ rs = res;
+ regs->lo = (s64)rs;
+ rt = res >> 32;
+ res = (s64)rt;
+ regs->hi = res;
+
+ MIPS_R2_STATS(muls);
+
+ return 0;
+}
+
+/**
+ * multu_func - Emulate a MULTU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int multu_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (u64)rt * (u64)rs;
+ rt = res;
+ regs->lo = (s64)(s32)rt;
+ regs->hi = (s64)(s32)(res >> 32);
+
+ MIPS_R2_STATS(muls);
+
+ return 0;
+}
+
+/**
+ * div_func - Emulate a DIV instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int div_func(struct pt_regs *regs, u32 ir)
+{
+ s32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+
+ regs->lo = (s64)(rs / rt);
+ regs->hi = (s64)(rs % rt);
+
+ MIPS_R2_STATS(divs);
+
+ return 0;
+}
+
+/**
+ * divu_func - Emulate a DIVU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int divu_func(struct pt_regs *regs, u32 ir)
+{
+ u32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+
+ regs->lo = (s64)(rs / rt);
+ regs->hi = (s64)(rs % rt);
+
+ MIPS_R2_STATS(divs);
+
+ return 0;
+}
+
+/**
+ * dmult_func - Emulate a DMULT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int dmult_func(struct pt_regs *regs, u32 ir)
+{
+ s64 res;
+ s64 rt, rs;
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ return SIGILL;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = rt * rs;
+
+ regs->lo = res;
+ __asm__ __volatile__(
+ "dmuh %0, %1, %2\t\n"
+ : "=r"(res)
+ : "r"(rt), "r"(rs));
+
+ regs->hi = res;
+
+ MIPS_R2_STATS(muls);
+
+ return 0;
+}
+
+/**
+ * dmultu_func - Emulate a DMULTU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int dmultu_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u64 rt, rs;
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ return SIGILL;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = rt * rs;
+
+ regs->lo = res;
+ __asm__ __volatile__(
+ "dmuhu %0, %1, %2\t\n"
+ : "=r"(res)
+ : "r"(rt), "r"(rs));
+
+ regs->hi = res;
+
+ MIPS_R2_STATS(muls);
+
+ return 0;
+}
+
+/**
+ * ddiv_func - Emulate a DDIV instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int ddiv_func(struct pt_regs *regs, u32 ir)
+{
+ s64 rt, rs;
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ return SIGILL;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+
+ regs->lo = rs / rt;
+ regs->hi = rs % rt;
+
+ MIPS_R2_STATS(divs);
+
+ return 0;
+}
+
+/**
+ * ddivu_func - Emulate a DDIVU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int ddivu_func(struct pt_regs *regs, u32 ir)
+{
+ u64 rt, rs;
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ return SIGILL;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+
+ regs->lo = rs / rt;
+ regs->hi = rs % rt;
+
+ MIPS_R2_STATS(divs);
+
+ return 0;
+}
+
+/* R6 removed instructions for the SPECIAL opcode */
+static const struct r2_decoder_table spec_op_table[] = {
+ { 0xfc1ff83f, 0x00000008, jr_func },
+ { 0xfc00ffff, 0x00000018, mult_func },
+ { 0xfc00ffff, 0x00000019, multu_func },
+ { 0xfc00ffff, 0x0000001c, dmult_func },
+ { 0xfc00ffff, 0x0000001d, dmultu_func },
+ { 0xffff07ff, 0x00000010, mfhi_func },
+ { 0xfc1fffff, 0x00000011, mthi_func },
+ { 0xffff07ff, 0x00000012, mflo_func },
+ { 0xfc1fffff, 0x00000013, mtlo_func },
+ { 0xfc0307ff, 0x00000001, movf_func },
+ { 0xfc0307ff, 0x00010001, movt_func },
+ { 0xfc0007ff, 0x0000000a, movz_func },
+ { 0xfc0007ff, 0x0000000b, movn_func },
+ { 0xfc00ffff, 0x0000001a, div_func },
+ { 0xfc00ffff, 0x0000001b, divu_func },
+ { 0xfc00ffff, 0x0000001e, ddiv_func },
+ { 0xfc00ffff, 0x0000001f, ddivu_func },
+ {}
+};
+
+/**
+ * madd_func - Emulate a MADD instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int madd_func(struct pt_regs *regs, u32 ir)
+{
+ s64 res;
+ s32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (s64)rt * (s64)rs;
+ rt = regs->hi;
+ rs = regs->lo;
+ res += ((((s64)rt) << 32) | (u32)rs);
+
+ rt = res;
+ regs->lo = (s64)rt;
+ rs = res >> 32;
+ regs->hi = (s64)rs;
+
+ MIPS_R2_STATS(dsps);
+
+ return 0;
+}
+
+/**
+ * maddu_func - Emulate a MADDU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int maddu_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (u64)rt * (u64)rs;
+ rt = regs->hi;
+ rs = regs->lo;
+ res += ((((s64)rt) << 32) | (u32)rs);
+
+ rt = res;
+ regs->lo = (s64)(s32)rt;
+ rs = res >> 32;
+ regs->hi = (s64)(s32)rs;
+
+ MIPS_R2_STATS(dsps);
+
+ return 0;
+}
+
+/**
+ * msub_func - Emulate a MSUB instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int msub_func(struct pt_regs *regs, u32 ir)
+{
+ s64 res;
+ s32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (s64)rt * (s64)rs;
+ rt = regs->hi;
+ rs = regs->lo;
+ res = ((((s64)rt) << 32) | (u32)rs) - res;
+
+ rt = res;
+ regs->lo = (s64)rt;
+ rs = res >> 32;
+ regs->hi = (s64)rs;
+
+ MIPS_R2_STATS(dsps);
+
+ return 0;
+}
+
+/**
+ * msubu_func - Emulate a MSUBU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int msubu_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (u64)rt * (u64)rs;
+ rt = regs->hi;
+ rs = regs->lo;
+ res = ((((s64)rt) << 32) | (u32)rs) - res;
+
+ rt = res;
+ regs->lo = (s64)(s32)rt;
+ rs = res >> 32;
+ regs->hi = (s64)(s32)rs;
+
+ MIPS_R2_STATS(dsps);
+
+ return 0;
+}
+
+/**
+ * mul_func - Emulate a MUL instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mul_func(struct pt_regs *regs, u32 ir)
+{
+ s64 res;
+ s32 rt, rs;
+
+ if (!MIPSInst_RD(ir))
+ return 0;
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (s64)rt * (s64)rs;
+
+ rs = res;
+ regs->regs[MIPSInst_RD(ir)] = (s64)rs;
+
+ MIPS_R2_STATS(muls);
+
+ return 0;
+}
+
+/**
+ * clz_func - Emulate a CLZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int clz_func(struct pt_regs *regs, u32 ir)
+{
+ u32 res;
+ u32 rs;
+
+ if (!MIPSInst_RD(ir))
+ return 0;
+
+ rs = regs->regs[MIPSInst_RS(ir)];
+ __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs));
+ regs->regs[MIPSInst_RD(ir)] = res;
+
+ MIPS_R2_STATS(bops);
+
+ return 0;
+}
+
+/**
+ * clo_func - Emulate a CLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+
+static int clo_func(struct pt_regs *regs, u32 ir)
+{
+ u32 res;
+ u32 rs;
+
+ if (!MIPSInst_RD(ir))
+ return 0;
+
+ rs = regs->regs[MIPSInst_RS(ir)];
+ __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs));
+ regs->regs[MIPSInst_RD(ir)] = res;
+
+ MIPS_R2_STATS(bops);
+
+ return 0;
+}
+
+/**
+ * dclz_func - Emulate a DCLZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int dclz_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u64 rs;
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ return SIGILL;
+
+ if (!MIPSInst_RD(ir))
+ return 0;
+
+ rs = regs->regs[MIPSInst_RS(ir)];
+ __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs));
+ regs->regs[MIPSInst_RD(ir)] = res;
+
+ MIPS_R2_STATS(bops);
+
+ return 0;
+}
+
+/**
+ * dclo_func - Emulate a DCLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int dclo_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u64 rs;
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ return SIGILL;
+
+ if (!MIPSInst_RD(ir))
+ return 0;
+
+ rs = regs->regs[MIPSInst_RS(ir)];
+ __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs));
+ regs->regs[MIPSInst_RD(ir)] = res;
+
+ MIPS_R2_STATS(bops);
+
+ return 0;
+}
+
+/* R6 removed instructions for the SPECIAL2 opcode */
+static const struct r2_decoder_table spec2_op_table[] = {
+ { 0xfc00ffff, 0x70000000, madd_func },
+ { 0xfc00ffff, 0x70000001, maddu_func },
+ { 0xfc0007ff, 0x70000002, mul_func },
+ { 0xfc00ffff, 0x70000004, msub_func },
+ { 0xfc00ffff, 0x70000005, msubu_func },
+ { 0xfc0007ff, 0x70000020, clz_func },
+ { 0xfc0007ff, 0x70000021, clo_func },
+ { 0xfc0007ff, 0x70000024, dclz_func },
+ { 0xfc0007ff, 0x70000025, dclo_func },
+ { }
+};
+
+static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
+ const struct r2_decoder_table *table)
+{
+ const struct r2_decoder_table *p;
+ int err;
+
+ for (p = table; p->func; p++) {
+ if ((inst & p->mask) == p->code) {
+ err = (p->func)(regs, inst);
+ return err;
+ }
+ }
+ return SIGILL;
+}
+
+/**
+ * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
+ * @regs: Process register set
+ * @inst: Instruction to decode and emulate
+ * @fcr31: Floating Point Control and Status Register Cause bits returned
+ */
+int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
+{
+ int err = 0;
+ unsigned long vaddr;
+ u32 nir;
+ unsigned long cpc, epc, nepc, r31, res, rs, rt;
+
+ void __user *fault_addr = NULL;
+ int pass = 0;
+
+repeat:
+ r31 = regs->regs[31];
+ epc = regs->cp0_epc;
+ err = compute_return_epc(regs);
+ if (err < 0) {
+ BUG();
+ return SIGEMT;
+ }
+ pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n",
+ inst, epc, pass);
+
+ switch (MIPSInst_OPCODE(inst)) {
+ case spec_op:
+ err = mipsr2_find_op_func(regs, inst, spec_op_table);
+ if (err < 0) {
+ /* FPU instruction under JR */
+ regs->cp0_cause |= CAUSEF_BD;
+ goto fpu_emul;
+ }
+ break;
+ case spec2_op:
+ err = mipsr2_find_op_func(regs, inst, spec2_op_table);
+ break;
+ case bcond_op:
+ rt = MIPSInst_RT(inst);
+ rs = MIPSInst_RS(inst);
+ switch (rt) {
+ case tgei_op:
+ if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
+ do_trap_or_bp(regs, 0, 0, "TGEI");
+
+ MIPS_R2_STATS(traps);
+
+ break;
+ case tgeiu_op:
+ if (regs->regs[rs] >= MIPSInst_UIMM(inst))
+ do_trap_or_bp(regs, 0, 0, "TGEIU");
+
+ MIPS_R2_STATS(traps);
+
+ break;
+ case tlti_op:
+ if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
+ do_trap_or_bp(regs, 0, 0, "TLTI");
+
+ MIPS_R2_STATS(traps);
+
+ break;
+ case tltiu_op:
+ if (regs->regs[rs] < MIPSInst_UIMM(inst))
+ do_trap_or_bp(regs, 0, 0, "TLTIU");
+
+ MIPS_R2_STATS(traps);
+
+ break;
+ case teqi_op:
+ if (regs->regs[rs] == MIPSInst_SIMM(inst))
+ do_trap_or_bp(regs, 0, 0, "TEQI");
+
+ MIPS_R2_STATS(traps);
+
+ break;
+ case tnei_op:
+ if (regs->regs[rs] != MIPSInst_SIMM(inst))
+ do_trap_or_bp(regs, 0, 0, "TNEI");
+
+ MIPS_R2_STATS(traps);
+
+ break;
+ case bltzl_op:
+ case bgezl_op:
+ case bltzall_op:
+ case bgezall_op:
+ if (delay_slot(regs)) {
+ err = SIGILL;
+ break;
+ }
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+ err = __compute_return_epc(regs);
+ if (err < 0)
+ return SIGEMT;
+ if (err != BRANCH_LIKELY_TAKEN)
+ break;
+ cpc = regs->cp0_epc;
+ nepc = epc + 4;
+ err = __get_user(nir, (u32 __user *)nepc);
+ if (err) {
+ err = SIGSEGV;
+ break;
+ }
+ /*
+ * This will probably be optimized away when
+ * CONFIG_DEBUG_FS is not enabled
+ */
+ switch (rt) {
+ case bltzl_op:
+ MIPS_R2BR_STATS(bltzl);
+ break;
+ case bgezl_op:
+ MIPS_R2BR_STATS(bgezl);
+ break;
+ case bltzall_op:
+ MIPS_R2BR_STATS(bltzall);
+ break;
+ case bgezall_op:
+ MIPS_R2BR_STATS(bgezall);
+ break;
+ }
+
+ switch (MIPSInst_OPCODE(nir)) {
+ case cop1_op:
+ case cop1x_op:
+ case lwc1_op:
+ case swc1_op:
+ regs->cp0_cause |= CAUSEF_BD;
+ goto fpu_emul;
+ }
+ if (nir) {
+ err = mipsr6_emul(regs, nir);
+ if (err > 0) {
+ err = mips_dsemul(regs, nir, epc, cpc);
+ if (err == SIGILL)
+ err = SIGEMT;
+ MIPS_R2_STATS(dsemul);
+ }
+ }
+ break;
+ case bltzal_op:
+ case bgezal_op:
+ if (delay_slot(regs)) {
+ err = SIGILL;
+ break;
+ }
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+ err = __compute_return_epc(regs);
+ if (err < 0)
+ return SIGEMT;
+ cpc = regs->cp0_epc;
+ nepc = epc + 4;
+ err = __get_user(nir, (u32 __user *)nepc);
+ if (err) {
+ err = SIGSEGV;
+ break;
+ }
+ /*
+ * This will probably be optimized away when
+ * CONFIG_DEBUG_FS is not enabled
+ */
+ switch (rt) {
+ case bltzal_op:
+ MIPS_R2BR_STATS(bltzal);
+ break;
+ case bgezal_op:
+ MIPS_R2BR_STATS(bgezal);
+ break;
+ }
+
+ switch (MIPSInst_OPCODE(nir)) {
+ case cop1_op:
+ case cop1x_op:
+ case lwc1_op:
+ case swc1_op:
+ regs->cp0_cause |= CAUSEF_BD;
+ goto fpu_emul;
+ }
+ if (nir) {
+ err = mipsr6_emul(regs, nir);
+ if (err > 0) {
+ err = mips_dsemul(regs, nir, epc, cpc);
+ if (err == SIGILL)
+ err = SIGEMT;
+ MIPS_R2_STATS(dsemul);
+ }
+ }
+ break;
+ default:
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+ err = SIGILL;
+ break;
+ }
+ break;
+
+ case blezl_op:
+ case bgtzl_op:
+ /*
+ * For BLEZL and BGTZL, rt field must be set to 0. If this
+ * is not the case, this may be an encoding of a MIPS R6
+ * instruction, so return to CPU execution if this occurs
+ */
+ if (MIPSInst_RT(inst)) {
+ err = SIGILL;
+ break;
+ }
+ fallthrough;
+ case beql_op:
+ case bnel_op:
+ if (delay_slot(regs)) {
+ err = SIGILL;
+ break;
+ }
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+ err = __compute_return_epc(regs);
+ if (err < 0)
+ return SIGEMT;
+ if (err != BRANCH_LIKELY_TAKEN)
+ break;
+ cpc = regs->cp0_epc;
+ nepc = epc + 4;
+ err = __get_user(nir, (u32 __user *)nepc);
+ if (err) {
+ err = SIGSEGV;
+ break;
+ }
+ /*
+ * This will probably be optimized away when
+ * CONFIG_DEBUG_FS is not enabled
+ */
+ switch (MIPSInst_OPCODE(inst)) {
+ case beql_op:
+ MIPS_R2BR_STATS(beql);
+ break;
+ case bnel_op:
+ MIPS_R2BR_STATS(bnel);
+ break;
+ case blezl_op:
+ MIPS_R2BR_STATS(blezl);
+ break;
+ case bgtzl_op:
+ MIPS_R2BR_STATS(bgtzl);
+ break;
+ }
+
+ switch (MIPSInst_OPCODE(nir)) {
+ case cop1_op:
+ case cop1x_op:
+ case lwc1_op:
+ case swc1_op:
+ regs->cp0_cause |= CAUSEF_BD;
+ goto fpu_emul;
+ }
+ if (nir) {
+ err = mipsr6_emul(regs, nir);
+ if (err > 0) {
+ err = mips_dsemul(regs, nir, epc, cpc);
+ if (err == SIGILL)
+ err = SIGEMT;
+ MIPS_R2_STATS(dsemul);
+ }
+ }
+ break;
+ case lwc1_op:
+ case swc1_op:
+ case cop1_op:
+ case cop1x_op:
+fpu_emul:
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+
+ err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
+ &fault_addr);
+
+ /*
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
+ */
+ *fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~res;
+
+ /*
+ * this is a tricky issue - lose_fpu() uses LL/SC atomics
+ * if FPU is owned and effectively cancels user level LL/SC.
+ * So, it could be logical to don't restore FPU ownership here.
+ * But the sequence of multiple FPU instructions is much much
+ * more often than LL-FPU-SC and I prefer loop here until
+ * next scheduler cycle cancels FPU ownership
+ */
+ own_fpu(1); /* Restore FPU state. */
+
+ if (err)
+ current->thread.cp0_baduaddr = (unsigned long)fault_addr;
+
+ MIPS_R2_STATS(fpus);
+
+ break;
+
+ case lwl_op:
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok((void __user *)vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ "1:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 24, 8\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ "2:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 16, 8\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ "3:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 8, 8\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ "4:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 0, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ "1:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 24, 8\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ "2:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 16, 8\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ "3:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 8, 8\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ "4:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 0, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9: sll %0, %0, 0\n"
+ "10:\n"
+ " .insn\n"
+ " .section .fixup,\"ax\"\n"
+ "8: li %3,%4\n"
+ " j 10b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ STR(PTR) " 1b,8b\n"
+ STR(PTR) " 2b,8b\n"
+ STR(PTR) " 3b,8b\n"
+ STR(PTR) " 4b,8b\n"
+ " .previous\n"
+ " .set pop\n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV));
+
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = rt;
+
+ MIPS_R2_STATS(loads);
+
+ break;
+
+ case lwr_op:
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok((void __user *)vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ "1:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 0, 8\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ "2:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 8, 8\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ "3:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 16, 8\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ "4:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 24, 8\n"
+ " sll %0, %0, 0\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ "1:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 0, 8\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ "2:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 8, 8\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ "3:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 16, 8\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ "4:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 24, 8\n"
+ " sll %0, %0, 0\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9:\n"
+ "10:\n"
+ " .insn\n"
+ " .section .fixup,\"ax\"\n"
+ "8: li %3,%4\n"
+ " j 10b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ STR(PTR) " 1b,8b\n"
+ STR(PTR) " 2b,8b\n"
+ STR(PTR) " 3b,8b\n"
+ STR(PTR) " 4b,8b\n"
+ " .previous\n"
+ " .set pop\n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV));
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = rt;
+
+ MIPS_R2_STATS(loads);
+
+ break;
+
+ case swl_op:
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok((void __user *)vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ EXT "%1, %0, 24, 8\n"
+ "1:" SB "%1, 0(%2)\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ EXT "%1, %0, 16, 8\n"
+ "2:" SB "%1, 0(%2)\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ EXT "%1, %0, 8, 8\n"
+ "3:" SB "%1, 0(%2)\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ EXT "%1, %0, 0, 8\n"
+ "4:" SB "%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ EXT "%1, %0, 24, 8\n"
+ "1:" SB "%1, 0(%2)\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ EXT "%1, %0, 16, 8\n"
+ "2:" SB "%1, 0(%2)\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ EXT "%1, %0, 8, 8\n"
+ "3:" SB "%1, 0(%2)\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ EXT "%1, %0, 0, 8\n"
+ "4:" SB "%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9:\n"
+ " .insn\n"
+ " .section .fixup,\"ax\"\n"
+ "8: li %3,%4\n"
+ " j 9b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ STR(PTR) " 1b,8b\n"
+ STR(PTR) " 2b,8b\n"
+ STR(PTR) " 3b,8b\n"
+ STR(PTR) " 4b,8b\n"
+ " .previous\n"
+ " .set pop\n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV)
+ : "memory");
+
+ MIPS_R2_STATS(stores);
+
+ break;
+
+ case swr_op:
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok((void __user *)vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ EXT "%1, %0, 0, 8\n"
+ "1:" SB "%1, 0(%2)\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ EXT "%1, %0, 8, 8\n"
+ "2:" SB "%1, 0(%2)\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ EXT "%1, %0, 16, 8\n"
+ "3:" SB "%1, 0(%2)\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ EXT "%1, %0, 24, 8\n"
+ "4:" SB "%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ EXT "%1, %0, 0, 8\n"
+ "1:" SB "%1, 0(%2)\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ EXT "%1, %0, 8, 8\n"
+ "2:" SB "%1, 0(%2)\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ EXT "%1, %0, 16, 8\n"
+ "3:" SB "%1, 0(%2)\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ EXT "%1, %0, 24, 8\n"
+ "4:" SB "%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9:\n"
+ " .insn\n"
+ " .section .fixup,\"ax\"\n"
+ "8: li %3,%4\n"
+ " j 9b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ STR(PTR) " 1b,8b\n"
+ STR(PTR) " 2b,8b\n"
+ STR(PTR) " 3b,8b\n"
+ STR(PTR) " 4b,8b\n"
+ " .previous\n"
+ " .set pop\n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV)
+ : "memory");
+
+ MIPS_R2_STATS(stores);
+
+ break;
+
+ case ldl_op:
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ err = SIGILL;
+ break;
+ }
+
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok((void __user *)vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ "1: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 56, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "2: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 48, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "3: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 40, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "4: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 32, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "5: lb %1, 0(%2)\n"
+ " dins %0, %1, 24, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "6: lb %1, 0(%2)\n"
+ " dins %0, %1, 16, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "7: lb %1, 0(%2)\n"
+ " dins %0, %1, 8, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "0: lb %1, 0(%2)\n"
+ " dins %0, %1, 0, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ "1: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 56, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "2: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 48, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "3: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 40, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "4: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 32, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "5: lb %1, 0(%2)\n"
+ " dins %0, %1, 24, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "6: lb %1, 0(%2)\n"
+ " dins %0, %1, 16, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "7: lb %1, 0(%2)\n"
+ " dins %0, %1, 8, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "0: lb %1, 0(%2)\n"
+ " dins %0, %1, 0, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9:\n"
+ " .insn\n"
+ " .section .fixup,\"ax\"\n"
+ "8: li %3,%4\n"
+ " j 9b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ STR(PTR) " 1b,8b\n"
+ STR(PTR) " 2b,8b\n"
+ STR(PTR) " 3b,8b\n"
+ STR(PTR) " 4b,8b\n"
+ STR(PTR) " 5b,8b\n"
+ STR(PTR) " 6b,8b\n"
+ STR(PTR) " 7b,8b\n"
+ STR(PTR) " 0b,8b\n"
+ " .previous\n"
+ " .set pop\n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV));
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = rt;
+
+ MIPS_R2_STATS(loads);
+ break;
+
+ case ldr_op:
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ err = SIGILL;
+ break;
+ }
+
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok((void __user *)vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ "1: lb %1, 0(%2)\n"
+ " dins %0, %1, 0, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "2: lb %1, 0(%2)\n"
+ " dins %0, %1, 8, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "3: lb %1, 0(%2)\n"
+ " dins %0, %1, 16, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "4: lb %1, 0(%2)\n"
+ " dins %0, %1, 24, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "5: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 32, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "6: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 40, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "7: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 48, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "0: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 56, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ "1: lb %1, 0(%2)\n"
+ " dins %0, %1, 0, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "2: lb %1, 0(%2)\n"
+ " dins %0, %1, 8, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "3: lb %1, 0(%2)\n"
+ " dins %0, %1, 16, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "4: lb %1, 0(%2)\n"
+ " dins %0, %1, 24, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "5: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 32, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "6: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 40, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "7: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 48, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "0: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 56, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9:\n"
+ " .insn\n"
+ " .section .fixup,\"ax\"\n"
+ "8: li %3,%4\n"
+ " j 9b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ STR(PTR) " 1b,8b\n"
+ STR(PTR) " 2b,8b\n"
+ STR(PTR) " 3b,8b\n"
+ STR(PTR) " 4b,8b\n"
+ STR(PTR) " 5b,8b\n"
+ STR(PTR) " 6b,8b\n"
+ STR(PTR) " 7b,8b\n"
+ STR(PTR) " 0b,8b\n"
+ " .previous\n"
+ " .set pop\n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV));
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = rt;
+
+ MIPS_R2_STATS(loads);
+ break;
+
+ case sdl_op:
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ err = SIGILL;
+ break;
+ }
+
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok((void __user *)vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ " dextu %1, %0, 56, 8\n"
+ "1: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dextu %1, %0, 48, 8\n"
+ "2: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dextu %1, %0, 40, 8\n"
+ "3: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dextu %1, %0, 32, 8\n"
+ "4: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dext %1, %0, 24, 8\n"
+ "5: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dext %1, %0, 16, 8\n"
+ "6: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dext %1, %0, 8, 8\n"
+ "7: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dext %1, %0, 0, 8\n"
+ "0: sb %1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ " dextu %1, %0, 56, 8\n"
+ "1: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dextu %1, %0, 48, 8\n"
+ "2: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dextu %1, %0, 40, 8\n"
+ "3: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dextu %1, %0, 32, 8\n"
+ "4: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dext %1, %0, 24, 8\n"
+ "5: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dext %1, %0, 16, 8\n"
+ "6: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dext %1, %0, 8, 8\n"
+ "7: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dext %1, %0, 0, 8\n"
+ "0: sb %1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9:\n"
+ " .insn\n"
+ " .section .fixup,\"ax\"\n"
+ "8: li %3,%4\n"
+ " j 9b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ STR(PTR) " 1b,8b\n"
+ STR(PTR) " 2b,8b\n"
+ STR(PTR) " 3b,8b\n"
+ STR(PTR) " 4b,8b\n"
+ STR(PTR) " 5b,8b\n"
+ STR(PTR) " 6b,8b\n"
+ STR(PTR) " 7b,8b\n"
+ STR(PTR) " 0b,8b\n"
+ " .previous\n"
+ " .set pop\n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV)
+ : "memory");
+
+ MIPS_R2_STATS(stores);
+ break;
+
+ case sdr_op:
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ err = SIGILL;
+ break;
+ }
+
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok((void __user *)vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ " dext %1, %0, 0, 8\n"
+ "1: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dext %1, %0, 8, 8\n"
+ "2: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dext %1, %0, 16, 8\n"
+ "3: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dext %1, %0, 24, 8\n"
+ "4: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dextu %1, %0, 32, 8\n"
+ "5: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dextu %1, %0, 40, 8\n"
+ "6: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dextu %1, %0, 48, 8\n"
+ "7: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dextu %1, %0, 56, 8\n"
+ "0: sb %1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ " dext %1, %0, 0, 8\n"
+ "1: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dext %1, %0, 8, 8\n"
+ "2: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dext %1, %0, 16, 8\n"
+ "3: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dext %1, %0, 24, 8\n"
+ "4: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dextu %1, %0, 32, 8\n"
+ "5: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dextu %1, %0, 40, 8\n"
+ "6: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dextu %1, %0, 48, 8\n"
+ "7: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dextu %1, %0, 56, 8\n"
+ "0: sb %1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9:\n"
+ " .insn\n"
+ " .section .fixup,\"ax\"\n"
+ "8: li %3,%4\n"
+ " j 9b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ STR(PTR) " 1b,8b\n"
+ STR(PTR) " 2b,8b\n"
+ STR(PTR) " 3b,8b\n"
+ STR(PTR) " 4b,8b\n"
+ STR(PTR) " 5b,8b\n"
+ STR(PTR) " 6b,8b\n"
+ STR(PTR) " 7b,8b\n"
+ STR(PTR) " 0b,8b\n"
+ " .previous\n"
+ " .set pop\n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV)
+ : "memory");
+
+ MIPS_R2_STATS(stores);
+
+ break;
+ case ll_op:
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (vaddr & 0x3) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+ if (!access_ok((void __user *)vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+
+ if (!cpu_has_rw_llb) {
+ /*
+ * An LL/SC block can't be safely emulated without
+ * a Config5/LLB availability. So it's probably time to
+ * kill our process before things get any worse. This is
+ * because Config5/LLB allows us to use ERETNC so that
+ * the LLAddr/LLB bit is not cleared when we return from
+ * an exception. MIPS R2 LL/SC instructions trap with an
+ * RI exception so once we emulate them here, we return
+ * back to userland with ERETNC. That preserves the
+ * LLAddr/LLB so the subsequent SC instruction will
+ * succeed preserving the atomic semantics of the LL/SC
+ * block. Without that, there is no safe way to emulate
+ * an LL/SC block in MIPSR2 userland.
+ */
+ pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+ err = SIGKILL;
+ break;
+ }
+
+ __asm__ __volatile__(
+ "1:\n"
+ "ll %0, 0(%2)\n"
+ "2:\n"
+ ".insn\n"
+ ".section .fixup,\"ax\"\n"
+ "3:\n"
+ "li %1, %3\n"
+ "j 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ STR(PTR) " 1b,3b\n"
+ ".previous\n"
+ : "=&r"(res), "+&r"(err)
+ : "r"(vaddr), "i"(SIGSEGV)
+ : "memory");
+
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = res;
+ MIPS_R2_STATS(llsc);
+
+ break;
+
+ case sc_op:
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (vaddr & 0x3) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+ if (!access_ok((void __user *)vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+
+ if (!cpu_has_rw_llb) {
+ /*
+ * An LL/SC block can't be safely emulated without
+ * a Config5/LLB availability. So it's probably time to
+ * kill our process before things get any worse. This is
+ * because Config5/LLB allows us to use ERETNC so that
+ * the LLAddr/LLB bit is not cleared when we return from
+ * an exception. MIPS R2 LL/SC instructions trap with an
+ * RI exception so once we emulate them here, we return
+ * back to userland with ERETNC. That preserves the
+ * LLAddr/LLB so the subsequent SC instruction will
+ * succeed preserving the atomic semantics of the LL/SC
+ * block. Without that, there is no safe way to emulate
+ * an LL/SC block in MIPSR2 userland.
+ */
+ pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+ err = SIGKILL;
+ break;
+ }
+
+ res = regs->regs[MIPSInst_RT(inst)];
+
+ __asm__ __volatile__(
+ "1:\n"
+ "sc %0, 0(%2)\n"
+ "2:\n"
+ ".insn\n"
+ ".section .fixup,\"ax\"\n"
+ "3:\n"
+ "li %1, %3\n"
+ "j 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ STR(PTR) " 1b,3b\n"
+ ".previous\n"
+ : "+&r"(res), "+&r"(err)
+ : "r"(vaddr), "i"(SIGSEGV));
+
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = res;
+
+ MIPS_R2_STATS(llsc);
+
+ break;
+
+ case lld_op:
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ err = SIGILL;
+ break;
+ }
+
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (vaddr & 0x7) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+ if (!access_ok((void __user *)vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+
+ if (!cpu_has_rw_llb) {
+ /*
+ * An LL/SC block can't be safely emulated without
+ * a Config5/LLB availability. So it's probably time to
+ * kill our process before things get any worse. This is
+ * because Config5/LLB allows us to use ERETNC so that
+ * the LLAddr/LLB bit is not cleared when we return from
+ * an exception. MIPS R2 LL/SC instructions trap with an
+ * RI exception so once we emulate them here, we return
+ * back to userland with ERETNC. That preserves the
+ * LLAddr/LLB so the subsequent SC instruction will
+ * succeed preserving the atomic semantics of the LL/SC
+ * block. Without that, there is no safe way to emulate
+ * an LL/SC block in MIPSR2 userland.
+ */
+ pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+ err = SIGKILL;
+ break;
+ }
+
+ __asm__ __volatile__(
+ "1:\n"
+ "lld %0, 0(%2)\n"
+ "2:\n"
+ ".insn\n"
+ ".section .fixup,\"ax\"\n"
+ "3:\n"
+ "li %1, %3\n"
+ "j 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ STR(PTR) " 1b,3b\n"
+ ".previous\n"
+ : "=&r"(res), "+&r"(err)
+ : "r"(vaddr), "i"(SIGSEGV)
+ : "memory");
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = res;
+
+ MIPS_R2_STATS(llsc);
+
+ break;
+
+ case scd_op:
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ err = SIGILL;
+ break;
+ }
+
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (vaddr & 0x7) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+ if (!access_ok((void __user *)vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+
+ if (!cpu_has_rw_llb) {
+ /*
+ * An LL/SC block can't be safely emulated without
+ * a Config5/LLB availability. So it's probably time to
+ * kill our process before things get any worse. This is
+ * because Config5/LLB allows us to use ERETNC so that
+ * the LLAddr/LLB bit is not cleared when we return from
+ * an exception. MIPS R2 LL/SC instructions trap with an
+ * RI exception so once we emulate them here, we return
+ * back to userland with ERETNC. That preserves the
+ * LLAddr/LLB so the subsequent SC instruction will
+ * succeed preserving the atomic semantics of the LL/SC
+ * block. Without that, there is no safe way to emulate
+ * an LL/SC block in MIPSR2 userland.
+ */
+ pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+ err = SIGKILL;
+ break;
+ }
+
+ res = regs->regs[MIPSInst_RT(inst)];
+
+ __asm__ __volatile__(
+ "1:\n"
+ "scd %0, 0(%2)\n"
+ "2:\n"
+ ".insn\n"
+ ".section .fixup,\"ax\"\n"
+ "3:\n"
+ "li %1, %3\n"
+ "j 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ STR(PTR) " 1b,3b\n"
+ ".previous\n"
+ : "+&r"(res), "+&r"(err)
+ : "r"(vaddr), "i"(SIGSEGV));
+
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = res;
+
+ MIPS_R2_STATS(llsc);
+
+ break;
+ case pref_op:
+ /* skip it */
+ break;
+ default:
+ err = SIGILL;
+ }
+
+ /*
+ * Let's not return to userland just yet. It's costly and
+ * it's likely we have more R2 instructions to emulate
+ */
+ if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
+ regs->cp0_cause &= ~CAUSEF_BD;
+ err = get_user(inst, (u32 __user *)regs->cp0_epc);
+ if (!err)
+ goto repeat;
+
+ if (err < 0)
+ err = SIGSEGV;
+ }
+
+ if (err && (err != SIGEMT)) {
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+ }
+
+ /* Likely a MIPS R6 compatible instruction */
+ if (pass && (err == SIGILL))
+ err = 0;
+
+ return err;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int mipsr2_emul_show(struct seq_file *s, void *unused)
+{
+
+ seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n");
+ seq_printf(s, "movs\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.movs),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.movs));
+ seq_printf(s, "hilo\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.hilo),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo));
+ seq_printf(s, "muls\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.muls),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.muls));
+ seq_printf(s, "divs\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.divs),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.divs));
+ seq_printf(s, "dsps\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.dsps),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps));
+ seq_printf(s, "bops\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.bops),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.bops));
+ seq_printf(s, "traps\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.traps),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.traps));
+ seq_printf(s, "fpus\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.fpus),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus));
+ seq_printf(s, "loads\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.loads),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.loads));
+ seq_printf(s, "stores\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.stores),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.stores));
+ seq_printf(s, "llsc\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.llsc),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc));
+ seq_printf(s, "dsemul\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.dsemul),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul));
+ seq_printf(s, "jr\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.jrs));
+ seq_printf(s, "bltzl\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl));
+ seq_printf(s, "bgezl\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl));
+ seq_printf(s, "bltzll\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll));
+ seq_printf(s, "bgezll\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll));
+ seq_printf(s, "bltzal\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal));
+ seq_printf(s, "bgezal\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal));
+ seq_printf(s, "beql\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.beql));
+ seq_printf(s, "bnel\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.bnel));
+ seq_printf(s, "blezl\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.blezl));
+ seq_printf(s, "bgtzl\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl));
+
+ return 0;
+}
+
+static int mipsr2_clear_show(struct seq_file *s, void *unused)
+{
+ mipsr2_emul_show(s, unused);
+
+ __this_cpu_write((mipsr2emustats).movs, 0);
+ __this_cpu_write((mipsr2bdemustats).movs, 0);
+ __this_cpu_write((mipsr2emustats).hilo, 0);
+ __this_cpu_write((mipsr2bdemustats).hilo, 0);
+ __this_cpu_write((mipsr2emustats).muls, 0);
+ __this_cpu_write((mipsr2bdemustats).muls, 0);
+ __this_cpu_write((mipsr2emustats).divs, 0);
+ __this_cpu_write((mipsr2bdemustats).divs, 0);
+ __this_cpu_write((mipsr2emustats).dsps, 0);
+ __this_cpu_write((mipsr2bdemustats).dsps, 0);
+ __this_cpu_write((mipsr2emustats).bops, 0);
+ __this_cpu_write((mipsr2bdemustats).bops, 0);
+ __this_cpu_write((mipsr2emustats).traps, 0);
+ __this_cpu_write((mipsr2bdemustats).traps, 0);
+ __this_cpu_write((mipsr2emustats).fpus, 0);
+ __this_cpu_write((mipsr2bdemustats).fpus, 0);
+ __this_cpu_write((mipsr2emustats).loads, 0);
+ __this_cpu_write((mipsr2bdemustats).loads, 0);
+ __this_cpu_write((mipsr2emustats).stores, 0);
+ __this_cpu_write((mipsr2bdemustats).stores, 0);
+ __this_cpu_write((mipsr2emustats).llsc, 0);
+ __this_cpu_write((mipsr2bdemustats).llsc, 0);
+ __this_cpu_write((mipsr2emustats).dsemul, 0);
+ __this_cpu_write((mipsr2bdemustats).dsemul, 0);
+ __this_cpu_write((mipsr2bremustats).jrs, 0);
+ __this_cpu_write((mipsr2bremustats).bltzl, 0);
+ __this_cpu_write((mipsr2bremustats).bgezl, 0);
+ __this_cpu_write((mipsr2bremustats).bltzll, 0);
+ __this_cpu_write((mipsr2bremustats).bgezll, 0);
+ __this_cpu_write((mipsr2bremustats).bltzall, 0);
+ __this_cpu_write((mipsr2bremustats).bgezall, 0);
+ __this_cpu_write((mipsr2bremustats).bltzal, 0);
+ __this_cpu_write((mipsr2bremustats).bgezal, 0);
+ __this_cpu_write((mipsr2bremustats).beql, 0);
+ __this_cpu_write((mipsr2bremustats).bnel, 0);
+ __this_cpu_write((mipsr2bremustats).blezl, 0);
+ __this_cpu_write((mipsr2bremustats).bgtzl, 0);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mipsr2_emul);
+DEFINE_SHOW_ATTRIBUTE(mipsr2_clear);
+
+static int __init mipsr2_init_debugfs(void)
+{
+ debugfs_create_file("r2_emul_stats", S_IRUGO, mips_debugfs_dir, NULL,
+ &mipsr2_emul_fops);
+ debugfs_create_file("r2_emul_stats_clear", S_IRUGO, mips_debugfs_dir,
+ NULL, &mipsr2_clear_fops);
+ return 0;
+}
+
+device_initcall(mipsr2_init_debugfs);
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
new file mode 100644
index 000000000..3c0c3d126
--- /dev/null
+++ b/arch/mips/kernel/module.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *
+ * Copyright (C) 2001 Rusty Russell.
+ * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2005 Thiemo Seufer
+ */
+
+#undef DEBUG
+
+#include <linux/extable.h>
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/mm.h>
+#include <linux/numa.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/jump_label.h>
+
+
+struct mips_hi16 {
+ struct mips_hi16 *next;
+ Elf_Addr *addr;
+ Elf_Addr value;
+};
+
+static LIST_HEAD(dbe_list);
+static DEFINE_SPINLOCK(dbe_lock);
+
+#ifdef MODULE_START
+void *module_alloc(unsigned long size)
+{
+ return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
+ GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+}
+#endif
+
+static int apply_r_mips_none(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ return 0;
+}
+
+static int apply_r_mips_32(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ *location = base + v;
+
+ return 0;
+}
+
+static int apply_r_mips_26(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ if (v % 4) {
+ pr_err("module %s: dangerous R_MIPS_26 relocation\n",
+ me->name);
+ return -ENOEXEC;
+ }
+
+ if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
+ pr_err("module %s: relocation overflow\n",
+ me->name);
+ return -ENOEXEC;
+ }
+
+ *location = (*location & ~0x03ffffff) |
+ ((base + (v >> 2)) & 0x03ffffff);
+
+ return 0;
+}
+
+static int apply_r_mips_hi16(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ struct mips_hi16 *n;
+
+ if (rela) {
+ *location = (*location & 0xffff0000) |
+ ((((long long) v + 0x8000LL) >> 16) & 0xffff);
+ return 0;
+ }
+
+ /*
+ * We cannot relocate this one now because we don't know the value of
+ * the carry we need to add. Save the information, and let LO16 do the
+ * actual relocation.
+ */
+ n = kmalloc(sizeof *n, GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+
+ n->addr = (Elf_Addr *)location;
+ n->value = v;
+ n->next = me->arch.r_mips_hi16_list;
+ me->arch.r_mips_hi16_list = n;
+
+ return 0;
+}
+
+static void free_relocation_chain(struct mips_hi16 *l)
+{
+ struct mips_hi16 *next;
+
+ while (l) {
+ next = l->next;
+ kfree(l);
+ l = next;
+ }
+}
+
+static int apply_r_mips_lo16(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ unsigned long insnlo = base;
+ struct mips_hi16 *l;
+ Elf_Addr val, vallo;
+
+ if (rela) {
+ *location = (*location & 0xffff0000) | (v & 0xffff);
+ return 0;
+ }
+
+ /* Sign extend the addend we extract from the lo insn. */
+ vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
+
+ if (me->arch.r_mips_hi16_list != NULL) {
+ l = me->arch.r_mips_hi16_list;
+ while (l != NULL) {
+ struct mips_hi16 *next;
+ unsigned long insn;
+
+ /*
+ * The value for the HI16 had best be the same.
+ */
+ if (v != l->value)
+ goto out_danger;
+
+ /*
+ * Do the HI16 relocation. Note that we actually don't
+ * need to know anything about the LO16 itself, except
+ * where to find the low 16 bits of the addend needed
+ * by the LO16.
+ */
+ insn = *l->addr;
+ val = ((insn & 0xffff) << 16) + vallo;
+ val += v;
+
+ /*
+ * Account for the sign extension that will happen in
+ * the low bits.
+ */
+ val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
+
+ insn = (insn & ~0xffff) | val;
+ *l->addr = insn;
+
+ next = l->next;
+ kfree(l);
+ l = next;
+ }
+
+ me->arch.r_mips_hi16_list = NULL;
+ }
+
+ /*
+ * Ok, we're done with the HI16 relocs. Now deal with the LO16.
+ */
+ val = v + vallo;
+ insnlo = (insnlo & ~0xffff) | (val & 0xffff);
+ *location = insnlo;
+
+ return 0;
+
+out_danger:
+ free_relocation_chain(l);
+ me->arch.r_mips_hi16_list = NULL;
+
+ pr_err("module %s: dangerous R_MIPS_LO16 relocation\n", me->name);
+
+ return -ENOEXEC;
+}
+
+static int apply_r_mips_pc(struct module *me, u32 *location, u32 base,
+ Elf_Addr v, unsigned int bits)
+{
+ unsigned long mask = GENMASK(bits - 1, 0);
+ unsigned long se_bits;
+ long offset;
+
+ if (v % 4) {
+ pr_err("module %s: dangerous R_MIPS_PC%u relocation\n",
+ me->name, bits);
+ return -ENOEXEC;
+ }
+
+ /* retrieve & sign extend implicit addend if any */
+ offset = base & mask;
+ offset |= (offset & BIT(bits - 1)) ? ~mask : 0;
+
+ offset += ((long)v - (long)location) >> 2;
+
+ /* check the sign bit onwards are identical - ie. we didn't overflow */
+ se_bits = (offset & BIT(bits - 1)) ? ~0ul : 0;
+ if ((offset & ~mask) != (se_bits & ~mask)) {
+ pr_err("module %s: relocation overflow\n", me->name);
+ return -ENOEXEC;
+ }
+
+ *location = (*location & ~mask) | (offset & mask);
+
+ return 0;
+}
+
+static int apply_r_mips_pc16(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ return apply_r_mips_pc(me, location, base, v, 16);
+}
+
+static int apply_r_mips_pc21(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ return apply_r_mips_pc(me, location, base, v, 21);
+}
+
+static int apply_r_mips_pc26(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ return apply_r_mips_pc(me, location, base, v, 26);
+}
+
+static int apply_r_mips_64(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ if (WARN_ON(!rela))
+ return -EINVAL;
+
+ *(Elf_Addr *)location = v;
+
+ return 0;
+}
+
+static int apply_r_mips_higher(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ if (WARN_ON(!rela))
+ return -EINVAL;
+
+ *location = (*location & 0xffff0000) |
+ ((((long long)v + 0x80008000LL) >> 32) & 0xffff);
+
+ return 0;
+}
+
+static int apply_r_mips_highest(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ if (WARN_ON(!rela))
+ return -EINVAL;
+
+ *location = (*location & 0xffff0000) |
+ ((((long long)v + 0x800080008000LL) >> 48) & 0xffff);
+
+ return 0;
+}
+
+/**
+ * reloc_handler() - Apply a particular relocation to a module
+ * @me: the module to apply the reloc to
+ * @location: the address at which the reloc is to be applied
+ * @base: the existing value at location for REL-style; 0 for RELA-style
+ * @v: the value of the reloc, with addend for RELA-style
+ *
+ * Each implemented reloc_handler function applies a particular type of
+ * relocation to the module @me. Relocs that may be found in either REL or RELA
+ * variants can be handled by making use of the @base & @v parameters which are
+ * set to values which abstract the difference away from the particular reloc
+ * implementations.
+ *
+ * Return: 0 upon success, else -ERRNO
+ */
+typedef int (*reloc_handler)(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela);
+
+/* The handlers for known reloc types */
+static reloc_handler reloc_handlers[] = {
+ [R_MIPS_NONE] = apply_r_mips_none,
+ [R_MIPS_32] = apply_r_mips_32,
+ [R_MIPS_26] = apply_r_mips_26,
+ [R_MIPS_HI16] = apply_r_mips_hi16,
+ [R_MIPS_LO16] = apply_r_mips_lo16,
+ [R_MIPS_PC16] = apply_r_mips_pc16,
+ [R_MIPS_64] = apply_r_mips_64,
+ [R_MIPS_HIGHER] = apply_r_mips_higher,
+ [R_MIPS_HIGHEST] = apply_r_mips_highest,
+ [R_MIPS_PC21_S2] = apply_r_mips_pc21,
+ [R_MIPS_PC26_S2] = apply_r_mips_pc26,
+};
+
+static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me, bool rela)
+{
+ union {
+ Elf_Mips_Rel *rel;
+ Elf_Mips_Rela *rela;
+ } r;
+ reloc_handler handler;
+ Elf_Sym *sym;
+ u32 *location, base;
+ unsigned int i, type;
+ Elf_Addr v;
+ int err = 0;
+ size_t reloc_sz;
+
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+
+ r.rel = (void *)sechdrs[relsec].sh_addr;
+ reloc_sz = rela ? sizeof(*r.rela) : sizeof(*r.rel);
+ me->arch.r_mips_hi16_list = NULL;
+ for (i = 0; i < sechdrs[relsec].sh_size / reloc_sz; i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + r.rel->r_offset;
+ /* This is the symbol it is referring to */
+ sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ + ELF_MIPS_R_SYM(*r.rel);
+ if (sym->st_value >= -MAX_ERRNO) {
+ /* Ignore unresolved weak symbol */
+ if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
+ continue;
+ pr_warn("%s: Unknown symbol %s\n",
+ me->name, strtab + sym->st_name);
+ err = -ENOENT;
+ goto out;
+ }
+
+ type = ELF_MIPS_R_TYPE(*r.rel);
+ if (type < ARRAY_SIZE(reloc_handlers))
+ handler = reloc_handlers[type];
+ else
+ handler = NULL;
+
+ if (!handler) {
+ pr_err("%s: Unknown relocation type %u\n",
+ me->name, type);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rela) {
+ v = sym->st_value + r.rela->r_addend;
+ base = 0;
+ r.rela = &r.rela[1];
+ } else {
+ v = sym->st_value;
+ base = *location;
+ r.rel = &r.rel[1];
+ }
+
+ err = handler(me, location, base, v, rela);
+ if (err)
+ goto out;
+ }
+
+out:
+ /*
+ * Normally the hi16 list should be deallocated at this point. A
+ * malformed binary however could contain a series of R_MIPS_HI16
+ * relocations not followed by a R_MIPS_LO16 relocation, or if we hit
+ * an error processing a reloc we might have gotten here before
+ * reaching the R_MIPS_LO16. In either case, free up the list and
+ * return an error.
+ */
+ if (me->arch.r_mips_hi16_list) {
+ free_relocation_chain(me->arch.r_mips_hi16_list);
+ me->arch.r_mips_hi16_list = NULL;
+ err = err ?: -ENOEXEC;
+ }
+
+ return err;
+}
+
+int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ return __apply_relocate(sechdrs, strtab, symindex, relsec, me, false);
+}
+
+#ifdef CONFIG_MODULES_USE_ELF_RELA
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ return __apply_relocate(sechdrs, strtab, symindex, relsec, me, true);
+}
+#endif /* CONFIG_MODULES_USE_ELF_RELA */
+
+/* Given an address, look for it in the module exception tables. */
+const struct exception_table_entry *search_module_dbetables(unsigned long addr)
+{
+ unsigned long flags;
+ const struct exception_table_entry *e = NULL;
+ struct mod_arch_specific *dbe;
+
+ spin_lock_irqsave(&dbe_lock, flags);
+ list_for_each_entry(dbe, &dbe_list, dbe_list) {
+ e = search_extable(dbe->dbe_start,
+ dbe->dbe_end - dbe->dbe_start, addr);
+ if (e)
+ break;
+ }
+ spin_unlock_irqrestore(&dbe_lock, flags);
+
+ /* Now, if we found one, we are running inside it now, hence
+ we cannot unload the module, hence no refcnt needed. */
+ return e;
+}
+
+/* Put in dbe list if necessary. */
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ const Elf_Shdr *s;
+ char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ /* Make jump label nops. */
+ jump_label_apply_nops(me);
+
+ INIT_LIST_HEAD(&me->arch.dbe_list);
+ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+ if (strcmp("__dbe_table", secstrings + s->sh_name) != 0)
+ continue;
+ me->arch.dbe_start = (void *)s->sh_addr;
+ me->arch.dbe_end = (void *)s->sh_addr + s->sh_size;
+ spin_lock_irq(&dbe_lock);
+ list_add(&me->arch.dbe_list, &dbe_list);
+ spin_unlock_irq(&dbe_lock);
+ }
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+ spin_lock_irq(&dbe_lock);
+ list_del(&mod->arch.dbe_list);
+ spin_unlock_irq(&dbe_lock);
+}
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
new file mode 100644
index 000000000..896080b44
--- /dev/null
+++ b/arch/mips/kernel/octeon_switch.S
@@ -0,0 +1,554 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1994, 1995, 1996, by Andreas Busse
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ * written by Carsten Langgaard, carstenl@mips.com
+ */
+#include <asm/asm.h>
+#include <asm/export.h>
+#include <asm/asm-offsets.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+
+/*
+ * task_struct *resume(task_struct *prev, task_struct *next,
+ * struct thread_info *next_ti)
+ */
+ .align 7
+ LEAF(resume)
+ .set arch=octeon
+ mfc0 t1, CP0_STATUS
+ LONG_S t1, THREAD_STATUS(a0)
+ cpu_save_nonscratch a0
+ LONG_S ra, THREAD_REG31(a0)
+
+#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
+ /* Check if we need to store CVMSEG state */
+ dmfc0 t0, $11,7 /* CvmMemCtl */
+ bbit0 t0, 6, 3f /* Is user access enabled? */
+
+ /* Store the CVMSEG state */
+ /* Extract the size of CVMSEG */
+ andi t0, 0x3f
+ /* Multiply * (cache line size/sizeof(long)/2) */
+ sll t0, 7-LONGLOG-1
+ li t1, -32768 /* Base address of CVMSEG */
+ LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */
+ synciobdma
+2:
+ .set noreorder
+ LONG_L t8, 0(t1) /* Load from CVMSEG */
+ subu t0, 1 /* Decrement loop var */
+ LONG_L t9, LONGSIZE(t1)/* Load from CVMSEG */
+ LONG_ADDU t1, LONGSIZE*2 /* Increment loc in CVMSEG */
+ LONG_S t8, 0(t2) /* Store CVMSEG to thread storage */
+ LONG_ADDU t2, LONGSIZE*2 /* Increment loc in thread storage */
+ bnez t0, 2b /* Loop until we've copied it all */
+ LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */
+ .set reorder
+
+ /* Disable access to CVMSEG */
+ dmfc0 t0, $11,7 /* CvmMemCtl */
+ xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */
+ dmtc0 t0, $11,7 /* CvmMemCtl */
+#endif
+3:
+
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ PTR_LA t8, __stack_chk_guard
+ LONG_L t9, TASK_STACK_CANARY(a1)
+ LONG_S t9, 0(t8)
+#endif
+
+ /*
+ * The order of restoring the registers takes care of the race
+ * updating $28, $29 and kernelsp without disabling ints.
+ */
+ move $28, a2
+ cpu_restore_nonscratch a1
+
+ PTR_ADDU t0, $28, _THREAD_SIZE - 32
+ set_saved_sp t0, t1, t2
+
+ mfc0 t1, CP0_STATUS /* Do we really need this? */
+ li a3, 0xff01
+ and t1, a3
+ LONG_L a2, THREAD_STATUS(a1)
+ nor a3, $0, a3
+ and a2, a3
+ or a2, t1
+ mtc0 a2, CP0_STATUS
+ move v0, a0
+ jr ra
+ END(resume)
+
+/*
+ * void octeon_cop2_save(struct octeon_cop2_state *a0)
+ */
+ .align 7
+ .set push
+ .set noreorder
+ LEAF(octeon_cop2_save)
+
+ dmfc0 t9, $9,7 /* CvmCtl register. */
+
+ /* Save the COP2 CRC state */
+ dmfc2 t0, 0x0201
+ dmfc2 t1, 0x0202
+ dmfc2 t2, 0x0200
+ sd t0, OCTEON_CP2_CRC_IV(a0)
+ sd t1, OCTEON_CP2_CRC_LENGTH(a0)
+ /* Skip next instructions if CvmCtl[NODFA_CP2] set */
+ bbit1 t9, 28, 1f
+ sd t2, OCTEON_CP2_CRC_POLY(a0)
+
+ /* Save the LLM state */
+ dmfc2 t0, 0x0402
+ dmfc2 t1, 0x040A
+ sd t0, OCTEON_CP2_LLM_DAT(a0)
+
+1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */
+ sd t1, OCTEON_CP2_LLM_DAT+8(a0)
+
+ /* Save the COP2 crypto state */
+ /* this part is mostly common to both pass 1 and later revisions */
+ dmfc2 t0, 0x0084
+ dmfc2 t1, 0x0080
+ dmfc2 t2, 0x0081
+ dmfc2 t3, 0x0082
+ sd t0, OCTEON_CP2_3DES_IV(a0)
+ dmfc2 t0, 0x0088
+ sd t1, OCTEON_CP2_3DES_KEY(a0)
+ dmfc2 t1, 0x0111 /* only necessary for pass 1 */
+ sd t2, OCTEON_CP2_3DES_KEY+8(a0)
+ dmfc2 t2, 0x0102
+ sd t3, OCTEON_CP2_3DES_KEY+16(a0)
+ dmfc2 t3, 0x0103
+ sd t0, OCTEON_CP2_3DES_RESULT(a0)
+ dmfc2 t0, 0x0104
+ sd t1, OCTEON_CP2_AES_INP0(a0) /* only necessary for pass 1 */
+ dmfc2 t1, 0x0105
+ sd t2, OCTEON_CP2_AES_IV(a0)
+ dmfc2 t2, 0x0106
+ sd t3, OCTEON_CP2_AES_IV+8(a0)
+ dmfc2 t3, 0x0107
+ sd t0, OCTEON_CP2_AES_KEY(a0)
+ dmfc2 t0, 0x0110
+ sd t1, OCTEON_CP2_AES_KEY+8(a0)
+ dmfc2 t1, 0x0100
+ sd t2, OCTEON_CP2_AES_KEY+16(a0)
+ dmfc2 t2, 0x0101
+ sd t3, OCTEON_CP2_AES_KEY+24(a0)
+ mfc0 v0, $15,0 /* Get the processor ID register */
+ sd t0, OCTEON_CP2_AES_KEYLEN(a0)
+ li v1, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
+ sd t1, OCTEON_CP2_AES_RESULT(a0)
+ /* Skip to the Pass1 version of the remainder of the COP2 state */
+ beq v0, v1, 2f
+ sd t2, OCTEON_CP2_AES_RESULT+8(a0)
+
+ /* the non-pass1 state when !CvmCtl[NOCRYPTO] */
+ dmfc2 t1, 0x0240
+ dmfc2 t2, 0x0241
+ ori v1, v1, 0x9500 /* lowest OCTEON III PrId*/
+ dmfc2 t3, 0x0242
+ subu v1, v0, v1 /* prid - lowest OCTEON III PrId */
+ dmfc2 t0, 0x0243
+ sd t1, OCTEON_CP2_HSH_DATW(a0)
+ dmfc2 t1, 0x0244
+ sd t2, OCTEON_CP2_HSH_DATW+8(a0)
+ dmfc2 t2, 0x0245
+ sd t3, OCTEON_CP2_HSH_DATW+16(a0)
+ dmfc2 t3, 0x0246
+ sd t0, OCTEON_CP2_HSH_DATW+24(a0)
+ dmfc2 t0, 0x0247
+ sd t1, OCTEON_CP2_HSH_DATW+32(a0)
+ dmfc2 t1, 0x0248
+ sd t2, OCTEON_CP2_HSH_DATW+40(a0)
+ dmfc2 t2, 0x0249
+ sd t3, OCTEON_CP2_HSH_DATW+48(a0)
+ dmfc2 t3, 0x024A
+ sd t0, OCTEON_CP2_HSH_DATW+56(a0)
+ dmfc2 t0, 0x024B
+ sd t1, OCTEON_CP2_HSH_DATW+64(a0)
+ dmfc2 t1, 0x024C
+ sd t2, OCTEON_CP2_HSH_DATW+72(a0)
+ dmfc2 t2, 0x024D
+ sd t3, OCTEON_CP2_HSH_DATW+80(a0)
+ dmfc2 t3, 0x024E
+ sd t0, OCTEON_CP2_HSH_DATW+88(a0)
+ dmfc2 t0, 0x0250
+ sd t1, OCTEON_CP2_HSH_DATW+96(a0)
+ dmfc2 t1, 0x0251
+ sd t2, OCTEON_CP2_HSH_DATW+104(a0)
+ dmfc2 t2, 0x0252
+ sd t3, OCTEON_CP2_HSH_DATW+112(a0)
+ dmfc2 t3, 0x0253
+ sd t0, OCTEON_CP2_HSH_IVW(a0)
+ dmfc2 t0, 0x0254
+ sd t1, OCTEON_CP2_HSH_IVW+8(a0)
+ dmfc2 t1, 0x0255
+ sd t2, OCTEON_CP2_HSH_IVW+16(a0)
+ dmfc2 t2, 0x0256
+ sd t3, OCTEON_CP2_HSH_IVW+24(a0)
+ dmfc2 t3, 0x0257
+ sd t0, OCTEON_CP2_HSH_IVW+32(a0)
+ dmfc2 t0, 0x0258
+ sd t1, OCTEON_CP2_HSH_IVW+40(a0)
+ dmfc2 t1, 0x0259
+ sd t2, OCTEON_CP2_HSH_IVW+48(a0)
+ dmfc2 t2, 0x025E
+ sd t3, OCTEON_CP2_HSH_IVW+56(a0)
+ dmfc2 t3, 0x025A
+ sd t0, OCTEON_CP2_GFM_MULT(a0)
+ dmfc2 t0, 0x025B
+ sd t1, OCTEON_CP2_GFM_MULT+8(a0)
+ sd t2, OCTEON_CP2_GFM_POLY(a0)
+ sd t3, OCTEON_CP2_GFM_RESULT(a0)
+ bltz v1, 4f
+ sd t0, OCTEON_CP2_GFM_RESULT+8(a0)
+ /* OCTEON III things*/
+ dmfc2 t0, 0x024F
+ dmfc2 t1, 0x0050
+ sd t0, OCTEON_CP2_SHA3(a0)
+ sd t1, OCTEON_CP2_SHA3+8(a0)
+4:
+ jr ra
+ nop
+
+2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
+ dmfc2 t3, 0x0040
+ dmfc2 t0, 0x0041
+ dmfc2 t1, 0x0042
+ dmfc2 t2, 0x0043
+ sd t3, OCTEON_CP2_HSH_DATW(a0)
+ dmfc2 t3, 0x0044
+ sd t0, OCTEON_CP2_HSH_DATW+8(a0)
+ dmfc2 t0, 0x0045
+ sd t1, OCTEON_CP2_HSH_DATW+16(a0)
+ dmfc2 t1, 0x0046
+ sd t2, OCTEON_CP2_HSH_DATW+24(a0)
+ dmfc2 t2, 0x0048
+ sd t3, OCTEON_CP2_HSH_DATW+32(a0)
+ dmfc2 t3, 0x0049
+ sd t0, OCTEON_CP2_HSH_DATW+40(a0)
+ dmfc2 t0, 0x004A
+ sd t1, OCTEON_CP2_HSH_DATW+48(a0)
+ sd t2, OCTEON_CP2_HSH_IVW(a0)
+ sd t3, OCTEON_CP2_HSH_IVW+8(a0)
+ sd t0, OCTEON_CP2_HSH_IVW+16(a0)
+
+3: /* pass 1 or CvmCtl[NOCRYPTO] set */
+ jr ra
+ nop
+ END(octeon_cop2_save)
+ .set pop
+
+/*
+ * void octeon_cop2_restore(struct octeon_cop2_state *a0)
+ */
+ .align 7
+ .set push
+ .set noreorder
+ LEAF(octeon_cop2_restore)
+ /* First cache line was prefetched before the call */
+ pref 4, 128(a0)
+ dmfc0 t9, $9,7 /* CvmCtl register. */
+
+ pref 4, 256(a0)
+ ld t0, OCTEON_CP2_CRC_IV(a0)
+ pref 4, 384(a0)
+ ld t1, OCTEON_CP2_CRC_LENGTH(a0)
+ ld t2, OCTEON_CP2_CRC_POLY(a0)
+
+ /* Restore the COP2 CRC state */
+ dmtc2 t0, 0x0201
+ dmtc2 t1, 0x1202
+ bbit1 t9, 28, 2f /* Skip LLM if CvmCtl[NODFA_CP2] is set */
+ dmtc2 t2, 0x4200
+
+ /* Restore the LLM state */
+ ld t0, OCTEON_CP2_LLM_DAT(a0)
+ ld t1, OCTEON_CP2_LLM_DAT+8(a0)
+ dmtc2 t0, 0x0402
+ dmtc2 t1, 0x040A
+
+2:
+ bbit1 t9, 26, done_restore /* done if CvmCtl[NOCRYPTO] set */
+ nop
+
+ /* Restore the COP2 crypto state common to pass 1 and pass 2 */
+ ld t0, OCTEON_CP2_3DES_IV(a0)
+ ld t1, OCTEON_CP2_3DES_KEY(a0)
+ ld t2, OCTEON_CP2_3DES_KEY+8(a0)
+ dmtc2 t0, 0x0084
+ ld t0, OCTEON_CP2_3DES_KEY+16(a0)
+ dmtc2 t1, 0x0080
+ ld t1, OCTEON_CP2_3DES_RESULT(a0)
+ dmtc2 t2, 0x0081
+ ld t2, OCTEON_CP2_AES_INP0(a0) /* only really needed for pass 1 */
+ dmtc2 t0, 0x0082
+ ld t0, OCTEON_CP2_AES_IV(a0)
+ dmtc2 t1, 0x0098
+ ld t1, OCTEON_CP2_AES_IV+8(a0)
+ dmtc2 t2, 0x010A /* only really needed for pass 1 */
+ ld t2, OCTEON_CP2_AES_KEY(a0)
+ dmtc2 t0, 0x0102
+ ld t0, OCTEON_CP2_AES_KEY+8(a0)
+ dmtc2 t1, 0x0103
+ ld t1, OCTEON_CP2_AES_KEY+16(a0)
+ dmtc2 t2, 0x0104
+ ld t2, OCTEON_CP2_AES_KEY+24(a0)
+ dmtc2 t0, 0x0105
+ ld t0, OCTEON_CP2_AES_KEYLEN(a0)
+ dmtc2 t1, 0x0106
+ ld t1, OCTEON_CP2_AES_RESULT(a0)
+ dmtc2 t2, 0x0107
+ ld t2, OCTEON_CP2_AES_RESULT+8(a0)
+ mfc0 t3, $15,0 /* Get the processor ID register */
+ dmtc2 t0, 0x0110
+ li v0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
+ dmtc2 t1, 0x0100
+ bne v0, t3, 3f /* Skip the next stuff for non-pass1 */
+ dmtc2 t2, 0x0101
+
+ /* this code is specific for pass 1 */
+ ld t0, OCTEON_CP2_HSH_DATW(a0)
+ ld t1, OCTEON_CP2_HSH_DATW+8(a0)
+ ld t2, OCTEON_CP2_HSH_DATW+16(a0)
+ dmtc2 t0, 0x0040
+ ld t0, OCTEON_CP2_HSH_DATW+24(a0)
+ dmtc2 t1, 0x0041
+ ld t1, OCTEON_CP2_HSH_DATW+32(a0)
+ dmtc2 t2, 0x0042
+ ld t2, OCTEON_CP2_HSH_DATW+40(a0)
+ dmtc2 t0, 0x0043
+ ld t0, OCTEON_CP2_HSH_DATW+48(a0)
+ dmtc2 t1, 0x0044
+ ld t1, OCTEON_CP2_HSH_IVW(a0)
+ dmtc2 t2, 0x0045
+ ld t2, OCTEON_CP2_HSH_IVW+8(a0)
+ dmtc2 t0, 0x0046
+ ld t0, OCTEON_CP2_HSH_IVW+16(a0)
+ dmtc2 t1, 0x0048
+ dmtc2 t2, 0x0049
+ b done_restore /* unconditional branch */
+ dmtc2 t0, 0x004A
+
+3: /* this is post-pass1 code */
+ ld t2, OCTEON_CP2_HSH_DATW(a0)
+ ori v0, v0, 0x9500 /* lowest OCTEON III PrId*/
+ ld t0, OCTEON_CP2_HSH_DATW+8(a0)
+ ld t1, OCTEON_CP2_HSH_DATW+16(a0)
+ dmtc2 t2, 0x0240
+ ld t2, OCTEON_CP2_HSH_DATW+24(a0)
+ dmtc2 t0, 0x0241
+ ld t0, OCTEON_CP2_HSH_DATW+32(a0)
+ dmtc2 t1, 0x0242
+ ld t1, OCTEON_CP2_HSH_DATW+40(a0)
+ dmtc2 t2, 0x0243
+ ld t2, OCTEON_CP2_HSH_DATW+48(a0)
+ dmtc2 t0, 0x0244
+ ld t0, OCTEON_CP2_HSH_DATW+56(a0)
+ dmtc2 t1, 0x0245
+ ld t1, OCTEON_CP2_HSH_DATW+64(a0)
+ dmtc2 t2, 0x0246
+ ld t2, OCTEON_CP2_HSH_DATW+72(a0)
+ dmtc2 t0, 0x0247
+ ld t0, OCTEON_CP2_HSH_DATW+80(a0)
+ dmtc2 t1, 0x0248
+ ld t1, OCTEON_CP2_HSH_DATW+88(a0)
+ dmtc2 t2, 0x0249
+ ld t2, OCTEON_CP2_HSH_DATW+96(a0)
+ dmtc2 t0, 0x024A
+ ld t0, OCTEON_CP2_HSH_DATW+104(a0)
+ dmtc2 t1, 0x024B
+ ld t1, OCTEON_CP2_HSH_DATW+112(a0)
+ dmtc2 t2, 0x024C
+ ld t2, OCTEON_CP2_HSH_IVW(a0)
+ dmtc2 t0, 0x024D
+ ld t0, OCTEON_CP2_HSH_IVW+8(a0)
+ dmtc2 t1, 0x024E
+ ld t1, OCTEON_CP2_HSH_IVW+16(a0)
+ dmtc2 t2, 0x0250
+ ld t2, OCTEON_CP2_HSH_IVW+24(a0)
+ dmtc2 t0, 0x0251
+ ld t0, OCTEON_CP2_HSH_IVW+32(a0)
+ dmtc2 t1, 0x0252
+ ld t1, OCTEON_CP2_HSH_IVW+40(a0)
+ dmtc2 t2, 0x0253
+ ld t2, OCTEON_CP2_HSH_IVW+48(a0)
+ dmtc2 t0, 0x0254
+ ld t0, OCTEON_CP2_HSH_IVW+56(a0)
+ dmtc2 t1, 0x0255
+ ld t1, OCTEON_CP2_GFM_MULT(a0)
+ dmtc2 t2, 0x0256
+ ld t2, OCTEON_CP2_GFM_MULT+8(a0)
+ dmtc2 t0, 0x0257
+ ld t0, OCTEON_CP2_GFM_POLY(a0)
+ dmtc2 t1, 0x0258
+ ld t1, OCTEON_CP2_GFM_RESULT(a0)
+ dmtc2 t2, 0x0259
+ ld t2, OCTEON_CP2_GFM_RESULT+8(a0)
+ dmtc2 t0, 0x025E
+ subu v0, t3, v0 /* prid - lowest OCTEON III PrId */
+ dmtc2 t1, 0x025A
+ bltz v0, done_restore
+ dmtc2 t2, 0x025B
+ /* OCTEON III things*/
+ ld t0, OCTEON_CP2_SHA3(a0)
+ ld t1, OCTEON_CP2_SHA3+8(a0)
+ dmtc2 t0, 0x0051
+ dmtc2 t1, 0x0050
+done_restore:
+ jr ra
+ nop
+ END(octeon_cop2_restore)
+ .set pop
+
+/*
+ * void octeon_mult_save()
+ * sp is assumed to point to a struct pt_regs
+ *
+ * NOTE: This is called in SAVE_TEMP in stackframe.h. It can
+ * safely modify v1,k0, k1,$10-$15, and $24. It will
+ * be overwritten with a processor specific version of the code.
+ */
+ .p2align 7
+ .set push
+ .set noreorder
+ LEAF(octeon_mult_save)
+ jr ra
+ nop
+ .space 30 * 4, 0
+octeon_mult_save_end:
+ EXPORT(octeon_mult_save_end)
+ END(octeon_mult_save)
+
+ LEAF(octeon_mult_save2)
+ /* Save the multiplier state OCTEON II and earlier*/
+ v3mulu k0, $0, $0
+ v3mulu k1, $0, $0
+ sd k0, PT_MTP(sp) /* PT_MTP has P0 */
+ v3mulu k0, $0, $0
+ sd k1, PT_MTP+8(sp) /* PT_MTP+8 has P1 */
+ ori k1, $0, 1
+ v3mulu k1, k1, $0
+ sd k0, PT_MTP+16(sp) /* PT_MTP+16 has P2 */
+ v3mulu k0, $0, $0
+ sd k1, PT_MPL(sp) /* PT_MPL has MPL0 */
+ v3mulu k1, $0, $0
+ sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */
+ jr ra
+ sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */
+octeon_mult_save2_end:
+ EXPORT(octeon_mult_save2_end)
+ END(octeon_mult_save2)
+
+ LEAF(octeon_mult_save3)
+ /* Save the multiplier state OCTEON III */
+ v3mulu $10, $0, $0 /* read P0 */
+ v3mulu $11, $0, $0 /* read P1 */
+ v3mulu $12, $0, $0 /* read P2 */
+ sd $10, PT_MTP+(0*8)(sp) /* store P0 */
+ v3mulu $10, $0, $0 /* read P3 */
+ sd $11, PT_MTP+(1*8)(sp) /* store P1 */
+ v3mulu $11, $0, $0 /* read P4 */
+ sd $12, PT_MTP+(2*8)(sp) /* store P2 */
+ ori $13, $0, 1
+ v3mulu $12, $0, $0 /* read P5 */
+ sd $10, PT_MTP+(3*8)(sp) /* store P3 */
+ v3mulu $13, $13, $0 /* P4-P0 = MPL5-MPL1, $13 = MPL0 */
+ sd $11, PT_MTP+(4*8)(sp) /* store P4 */
+ v3mulu $10, $0, $0 /* read MPL1 */
+ sd $12, PT_MTP+(5*8)(sp) /* store P5 */
+ v3mulu $11, $0, $0 /* read MPL2 */
+ sd $13, PT_MPL+(0*8)(sp) /* store MPL0 */
+ v3mulu $12, $0, $0 /* read MPL3 */
+ sd $10, PT_MPL+(1*8)(sp) /* store MPL1 */
+ v3mulu $10, $0, $0 /* read MPL4 */
+ sd $11, PT_MPL+(2*8)(sp) /* store MPL2 */
+ v3mulu $11, $0, $0 /* read MPL5 */
+ sd $12, PT_MPL+(3*8)(sp) /* store MPL3 */
+ sd $10, PT_MPL+(4*8)(sp) /* store MPL4 */
+ jr ra
+ sd $11, PT_MPL+(5*8)(sp) /* store MPL5 */
+octeon_mult_save3_end:
+ EXPORT(octeon_mult_save3_end)
+ END(octeon_mult_save3)
+ .set pop
+
+/*
+ * void octeon_mult_restore()
+ * sp is assumed to point to a struct pt_regs
+ *
+ * NOTE: This is called in RESTORE_TEMP in stackframe.h.
+ */
+ .p2align 7
+ .set push
+ .set noreorder
+ LEAF(octeon_mult_restore)
+ jr ra
+ nop
+ .space 30 * 4, 0
+octeon_mult_restore_end:
+ EXPORT(octeon_mult_restore_end)
+ END(octeon_mult_restore)
+
+ LEAF(octeon_mult_restore2)
+ ld v0, PT_MPL(sp) /* MPL0 */
+ ld v1, PT_MPL+8(sp) /* MPL1 */
+ ld k0, PT_MPL+16(sp) /* MPL2 */
+ /* Restore the multiplier state */
+ ld k1, PT_MTP+16(sp) /* P2 */
+ mtm0 v0 /* MPL0 */
+ ld v0, PT_MTP+8(sp) /* P1 */
+ mtm1 v1 /* MPL1 */
+ ld v1, PT_MTP(sp) /* P0 */
+ mtm2 k0 /* MPL2 */
+ mtp2 k1 /* P2 */
+ mtp1 v0 /* P1 */
+ jr ra
+ mtp0 v1 /* P0 */
+octeon_mult_restore2_end:
+ EXPORT(octeon_mult_restore2_end)
+ END(octeon_mult_restore2)
+
+ LEAF(octeon_mult_restore3)
+ ld $12, PT_MPL+(0*8)(sp) /* read MPL0 */
+ ld $13, PT_MPL+(3*8)(sp) /* read MPL3 */
+ ld $10, PT_MPL+(1*8)(sp) /* read MPL1 */
+ ld $11, PT_MPL+(4*8)(sp) /* read MPL4 */
+ .word 0x718d0008
+ /* mtm0 $12, $13 restore MPL0 and MPL3 */
+ ld $12, PT_MPL+(2*8)(sp) /* read MPL2 */
+ .word 0x714b000c
+ /* mtm1 $10, $11 restore MPL1 and MPL4 */
+ ld $13, PT_MPL+(5*8)(sp) /* read MPL5 */
+ ld $10, PT_MTP+(0*8)(sp) /* read P0 */
+ ld $11, PT_MTP+(3*8)(sp) /* read P3 */
+ .word 0x718d000d
+ /* mtm2 $12, $13 restore MPL2 and MPL5 */
+ ld $12, PT_MTP+(1*8)(sp) /* read P1 */
+ .word 0x714b0009
+ /* mtp0 $10, $11 restore P0 and P3 */
+ ld $13, PT_MTP+(4*8)(sp) /* read P4 */
+ ld $10, PT_MTP+(2*8)(sp) /* read P2 */
+ ld $11, PT_MTP+(5*8)(sp) /* read P5 */
+ .word 0x718d000a
+ /* mtp1 $12, $13 restore P1 and P4 */
+ jr ra
+ .word 0x714b000b
+ /* mtp2 $10, $11 restore P2 and P5 */
+
+octeon_mult_restore3_end:
+ EXPORT(octeon_mult_restore3_end)
+ END(octeon_mult_restore3)
+ .set pop
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
new file mode 100644
index 000000000..5d7a9c039
--- /dev/null
+++ b/arch/mips/kernel/perf_event.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux performance counter support for MIPS.
+ *
+ * Copyright (C) 2010 MIPS Technologies, Inc.
+ * Author: Deng-Cheng Zhu
+ *
+ * This code is based on the implementation for ARM, which is in turn
+ * based on the sparc64 perf event code and the x86 code. Performance
+ * counter access is based on the MIPS Oprofile code. And the callchain
+ * support references the code of MIPS stacktrace.c.
+ */
+
+#include <linux/perf_event.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/stacktrace.h>
+
+/* Callchain handling code. */
+
+/*
+ * Leave userspace callchain empty for now. When we find a way to trace
+ * the user stack callchains, we will add it here.
+ */
+
+static void save_raw_perf_callchain(struct perf_callchain_entry_ctx *entry,
+ unsigned long reg29)
+{
+ unsigned long *sp = (unsigned long *)reg29;
+ unsigned long addr;
+
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr)) {
+ perf_callchain_store(entry, addr);
+ if (entry->nr >= entry->max_stack)
+ break;
+ }
+ }
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ unsigned long sp = regs->regs[29];
+#ifdef CONFIG_KALLSYMS
+ unsigned long ra = regs->regs[31];
+ unsigned long pc = regs->cp0_epc;
+
+ if (raw_show_trace || !__kernel_text_address(pc)) {
+ unsigned long stack_page =
+ (unsigned long)task_stack_page(current);
+ if (stack_page && sp >= stack_page &&
+ sp <= stack_page + THREAD_SIZE - 32)
+ save_raw_perf_callchain(entry, sp);
+ return;
+ }
+ do {
+ perf_callchain_store(entry, pc);
+ if (entry->nr >= entry->max_stack)
+ break;
+ pc = unwind_stack(current, &sp, pc, &ra);
+ } while (pc);
+#else
+ save_raw_perf_callchain(entry, sp);
+#endif
+}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
new file mode 100644
index 000000000..011eb6bbf
--- /dev/null
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -0,0 +1,2138 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux performance counter support for MIPS.
+ *
+ * Copyright (C) 2010 MIPS Technologies, Inc.
+ * Copyright (C) 2011 Cavium Networks, Inc.
+ * Author: Deng-Cheng Zhu
+ *
+ * This code is based on the implementation for ARM, which is in turn
+ * based on the sparc64 perf event code and the x86 code. Performance
+ * counter access is based on the MIPS Oprofile code. And the callchain
+ * support references the code of MIPS stacktrace.c.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/stacktrace.h>
+#include <asm/time.h> /* For perf_irq */
+
+#define MIPS_MAX_HWEVENTS 4
+#define MIPS_TCS_PER_COUNTER 2
+#define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
+
+struct cpu_hw_events {
+ /* Array of events on this cpu. */
+ struct perf_event *events[MIPS_MAX_HWEVENTS];
+
+ /*
+ * Set the bit (indexed by the counter number) when the counter
+ * is used for an event.
+ */
+ unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
+
+ /*
+ * Software copy of the control register for each performance counter.
+ * MIPS CPUs vary in performance counters. They use this differently,
+ * and even may not use it.
+ */
+ unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
+};
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
+ .saved_ctrl = {0},
+};
+
+/* The description of MIPS performance events. */
+struct mips_perf_event {
+ unsigned int event_id;
+ /*
+ * MIPS performance counters are indexed starting from 0.
+ * CNTR_EVEN indicates the indexes of the counters to be used are
+ * even numbers.
+ */
+ unsigned int cntr_mask;
+ #define CNTR_EVEN 0x55555555
+ #define CNTR_ODD 0xaaaaaaaa
+ #define CNTR_ALL 0xffffffff
+ enum {
+ T = 0,
+ V = 1,
+ P = 2,
+ } range;
+};
+
+static struct mips_perf_event raw_event;
+static DEFINE_MUTEX(raw_event_mutex);
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+struct mips_pmu {
+ u64 max_period;
+ u64 valid_count;
+ u64 overflow;
+ const char *name;
+ int irq;
+ u64 (*read_counter)(unsigned int idx);
+ void (*write_counter)(unsigned int idx, u64 val);
+ const struct mips_perf_event *(*map_raw_event)(u64 config);
+ const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
+ const struct mips_perf_event (*cache_event_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+ unsigned int num_counters;
+};
+
+static int counter_bits;
+static struct mips_pmu mipspmu;
+
+#define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \
+ MIPS_PERFCTRL_EVENT)
+#define M_PERFCTL_VPEID(vpe) ((vpe) << MIPS_PERFCTRL_VPEID_S)
+
+#ifdef CONFIG_CPU_BMIPS5000
+#define M_PERFCTL_MT_EN(filter) 0
+#else /* !CONFIG_CPU_BMIPS5000 */
+#define M_PERFCTL_MT_EN(filter) (filter)
+#endif /* CONFIG_CPU_BMIPS5000 */
+
+#define M_TC_EN_ALL M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_ALL)
+#define M_TC_EN_VPE M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_VPE)
+#define M_TC_EN_TC M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_TC)
+
+#define M_PERFCTL_COUNT_EVENT_WHENEVER (MIPS_PERFCTRL_EXL | \
+ MIPS_PERFCTRL_K | \
+ MIPS_PERFCTRL_U | \
+ MIPS_PERFCTRL_S | \
+ MIPS_PERFCTRL_IE)
+
+#ifdef CONFIG_MIPS_MT_SMP
+#define M_PERFCTL_CONFIG_MASK 0x3fff801f
+#else
+#define M_PERFCTL_CONFIG_MASK 0x1f
+#endif
+
+#define CNTR_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+static DEFINE_RWLOCK(pmuint_rwlock);
+
+#if defined(CONFIG_CPU_BMIPS5000)
+#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
+ 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
+#else
+#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
+ 0 : cpu_vpe_id(&current_cpu_data))
+#endif
+
+/* Copied from op_model_mipsxx.c */
+static unsigned int vpe_shift(void)
+{
+ if (num_possible_cpus() > 1)
+ return 1;
+
+ return 0;
+}
+
+static unsigned int counters_total_to_per_cpu(unsigned int counters)
+{
+ return counters >> vpe_shift();
+}
+
+#else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
+#define vpe_id() 0
+
+#endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
+
+static void resume_local_counters(void);
+static void pause_local_counters(void);
+static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
+static int mipsxx_pmu_handle_shared_irq(void);
+
+/* 0: Not Loongson-3
+ * 1: Loongson-3A1000/3B1000/3B1500
+ * 2: Loongson-3A2000/3A3000
+ * 3: Loongson-3A4000+
+ */
+
+#define LOONGSON_PMU_TYPE0 0
+#define LOONGSON_PMU_TYPE1 1
+#define LOONGSON_PMU_TYPE2 2
+#define LOONGSON_PMU_TYPE3 3
+
+static inline int get_loongson3_pmu_type(void)
+{
+ if (boot_cpu_type() != CPU_LOONGSON64)
+ return LOONGSON_PMU_TYPE0;
+ if ((boot_cpu_data.processor_id & PRID_COMP_MASK) == PRID_COMP_LEGACY)
+ return LOONGSON_PMU_TYPE1;
+ if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64C)
+ return LOONGSON_PMU_TYPE2;
+ if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G)
+ return LOONGSON_PMU_TYPE3;
+
+ return LOONGSON_PMU_TYPE0;
+}
+
+static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
+{
+ if (vpe_id() == 1)
+ idx = (idx + 2) & 3;
+ return idx;
+}
+
+static u64 mipsxx_pmu_read_counter(unsigned int idx)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ /*
+ * The counters are unsigned, we must cast to truncate
+ * off the high bits.
+ */
+ return (u32)read_c0_perfcntr0();
+ case 1:
+ return (u32)read_c0_perfcntr1();
+ case 2:
+ return (u32)read_c0_perfcntr2();
+ case 3:
+ return (u32)read_c0_perfcntr3();
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
+{
+ u64 mask = CNTR_BIT_MASK(counter_bits);
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ return read_c0_perfcntr0_64() & mask;
+ case 1:
+ return read_c0_perfcntr1_64() & mask;
+ case 2:
+ return read_c0_perfcntr2_64() & mask;
+ case 3:
+ return read_c0_perfcntr3_64() & mask;
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ write_c0_perfcntr0(val);
+ return;
+ case 1:
+ write_c0_perfcntr1(val);
+ return;
+ case 2:
+ write_c0_perfcntr2(val);
+ return;
+ case 3:
+ write_c0_perfcntr3(val);
+ return;
+ }
+}
+
+static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
+{
+ val &= CNTR_BIT_MASK(counter_bits);
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ write_c0_perfcntr0_64(val);
+ return;
+ case 1:
+ write_c0_perfcntr1_64(val);
+ return;
+ case 2:
+ write_c0_perfcntr2_64(val);
+ return;
+ case 3:
+ write_c0_perfcntr3_64(val);
+ return;
+ }
+}
+
+static unsigned int mipsxx_pmu_read_control(unsigned int idx)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ return read_c0_perfctrl0();
+ case 1:
+ return read_c0_perfctrl1();
+ case 2:
+ return read_c0_perfctrl2();
+ case 3:
+ return read_c0_perfctrl3();
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ write_c0_perfctrl0(val);
+ return;
+ case 1:
+ write_c0_perfctrl1(val);
+ return;
+ case 2:
+ write_c0_perfctrl2(val);
+ return;
+ case 3:
+ write_c0_perfctrl3(val);
+ return;
+ }
+}
+
+static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *hwc)
+{
+ int i;
+ unsigned long cntr_mask;
+
+ /*
+ * We only need to care the counter mask. The range has been
+ * checked definitely.
+ */
+ if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
+ cntr_mask = (hwc->event_base >> 10) & 0xffff;
+ else
+ cntr_mask = (hwc->event_base >> 8) & 0xffff;
+
+ for (i = mipspmu.num_counters - 1; i >= 0; i--) {
+ /*
+ * Note that some MIPS perf events can be counted by both
+ * even and odd counters, wheresas many other are only by
+ * even _or_ odd counters. This introduces an issue that
+ * when the former kind of event takes the counter the
+ * latter kind of event wants to use, then the "counter
+ * allocation" for the latter event will fail. In fact if
+ * they can be dynamically swapped, they both feel happy.
+ * But here we leave this issue alone for now.
+ */
+ if (test_bit(i, &cntr_mask) &&
+ !test_and_set_bit(i, cpuc->used_mask))
+ return i;
+ }
+
+ return -EAGAIN;
+}
+
+static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
+{
+ struct perf_event *event = container_of(evt, struct perf_event, hw);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ unsigned int range = evt->event_base >> 24;
+
+ WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
+
+ if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
+ cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0x3ff) |
+ (evt->config_base & M_PERFCTL_CONFIG_MASK) |
+ /* Make sure interrupt enabled. */
+ MIPS_PERFCTRL_IE;
+ else
+ cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
+ (evt->config_base & M_PERFCTL_CONFIG_MASK) |
+ /* Make sure interrupt enabled. */
+ MIPS_PERFCTRL_IE;
+
+ if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
+ /* enable the counter for the calling thread */
+ cpuc->saved_ctrl[idx] |=
+ (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
+ } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
+ /* The counter is processor wide. Set it up to count all TCs. */
+ pr_debug("Enabling perf counter for all TCs\n");
+ cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
+ } else {
+ unsigned int cpu, ctrl;
+
+ /*
+ * Set up the counter for a particular CPU when event->cpu is
+ * a valid CPU number. Otherwise set up the counter for the CPU
+ * scheduling this thread.
+ */
+ cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
+
+ ctrl = M_PERFCTL_VPEID(cpu_vpe_id(&cpu_data[cpu]));
+ ctrl |= M_TC_EN_VPE;
+ cpuc->saved_ctrl[idx] |= ctrl;
+ pr_debug("Enabling perf counter for CPU%d\n", cpu);
+ }
+ /*
+ * We do not actually let the counter run. Leave it until start().
+ */
+}
+
+static void mipsxx_pmu_disable_event(int idx)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ unsigned long flags;
+
+ WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
+
+ local_irq_save(flags);
+ cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER;
+ mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
+ local_irq_restore(flags);
+}
+
+static int mipspmu_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ u64 left = local64_read(&hwc->period_left);
+ u64 period = hwc->sample_period;
+ int ret = 0;
+
+ if (unlikely((left + period) & (1ULL << 63))) {
+ /* left underflowed by more than period. */
+ left = period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ } else if (unlikely((left + period) <= period)) {
+ /* left underflowed by less than period. */
+ left += period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (left > mipspmu.max_period) {
+ left = mipspmu.max_period;
+ local64_set(&hwc->period_left, left);
+ }
+
+ local64_set(&hwc->prev_count, mipspmu.overflow - left);
+
+ if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
+ mipsxx_pmu_write_control(idx,
+ M_PERFCTL_EVENT(hwc->event_base & 0x3ff));
+
+ mipspmu.write_counter(idx, mipspmu.overflow - left);
+
+ perf_event_update_userpage(event);
+
+ return ret;
+}
+
+static void mipspmu_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ u64 prev_raw_count, new_raw_count;
+ u64 delta;
+
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = mipspmu.read_counter(idx);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ delta = new_raw_count - prev_raw_count;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+}
+
+static void mipspmu_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+
+ /* Set the period for the event. */
+ mipspmu_event_set_period(event, hwc, hwc->idx);
+
+ /* Enable the event. */
+ mipsxx_pmu_enable_event(hwc, hwc->idx);
+}
+
+static void mipspmu_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ /* We are working on a local event. */
+ mipsxx_pmu_disable_event(hwc->idx);
+ barrier();
+ mipspmu_event_update(event, hwc, hwc->idx);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ }
+}
+
+static int mipspmu_add(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+ int err = 0;
+
+ perf_pmu_disable(event->pmu);
+
+ /* To look for a free counter for this event. */
+ idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
+ if (idx < 0) {
+ err = idx;
+ goto out;
+ }
+
+ /*
+ * If there is an event in the counter we are going to use then
+ * make sure it is disabled.
+ */
+ event->hw.idx = idx;
+ mipsxx_pmu_disable_event(idx);
+ cpuc->events[idx] = event;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ if (flags & PERF_EF_START)
+ mipspmu_start(event, PERF_EF_RELOAD);
+
+ /* Propagate our changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+out:
+ perf_pmu_enable(event->pmu);
+ return err;
+}
+
+static void mipspmu_del(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
+
+ mipspmu_stop(event, PERF_EF_UPDATE);
+ cpuc->events[idx] = NULL;
+ clear_bit(idx, cpuc->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static void mipspmu_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Don't read disabled counters! */
+ if (hwc->idx < 0)
+ return;
+
+ mipspmu_event_update(event, hwc, hwc->idx);
+}
+
+static void mipspmu_enable(struct pmu *pmu)
+{
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ write_unlock(&pmuint_rwlock);
+#endif
+ resume_local_counters();
+}
+
+/*
+ * MIPS performance counters can be per-TC. The control registers can
+ * not be directly accessed across CPUs. Hence if we want to do global
+ * control, we need cross CPU calls. on_each_cpu() can help us, but we
+ * can not make sure this function is called with interrupts enabled. So
+ * here we pause local counters and then grab a rwlock and leave the
+ * counters on other CPUs alone. If any counter interrupt raises while
+ * we own the write lock, simply pause local counters on that CPU and
+ * spin in the handler. Also we know we won't be switched to another
+ * CPU after pausing local counters and before grabbing the lock.
+ */
+static void mipspmu_disable(struct pmu *pmu)
+{
+ pause_local_counters();
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ write_lock(&pmuint_rwlock);
+#endif
+}
+
+static atomic_t active_events = ATOMIC_INIT(0);
+static DEFINE_MUTEX(pmu_reserve_mutex);
+static int (*save_perf_irq)(void);
+
+static int mipspmu_get_irq(void)
+{
+ int err;
+
+ if (mipspmu.irq >= 0) {
+ /* Request my own irq handler. */
+ err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
+ IRQF_PERCPU | IRQF_NOBALANCING |
+ IRQF_NO_THREAD | IRQF_NO_SUSPEND |
+ IRQF_SHARED,
+ "mips_perf_pmu", &mipspmu);
+ if (err) {
+ pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
+ mipspmu.irq);
+ }
+ } else if (cp0_perfcount_irq < 0) {
+ /*
+ * We are sharing the irq number with the timer interrupt.
+ */
+ save_perf_irq = perf_irq;
+ perf_irq = mipsxx_pmu_handle_shared_irq;
+ err = 0;
+ } else {
+ pr_warn("The platform hasn't properly defined its interrupt controller\n");
+ err = -ENOENT;
+ }
+
+ return err;
+}
+
+static void mipspmu_free_irq(void)
+{
+ if (mipspmu.irq >= 0)
+ free_irq(mipspmu.irq, &mipspmu);
+ else if (cp0_perfcount_irq < 0)
+ perf_irq = save_perf_irq;
+}
+
+/*
+ * mipsxx/rm9000/loongson2 have different performance counters, they have
+ * specific low-level init routines.
+ */
+static void reset_counters(void *arg);
+static int __hw_perf_event_init(struct perf_event *event);
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+ if (atomic_dec_and_mutex_lock(&active_events,
+ &pmu_reserve_mutex)) {
+ /*
+ * We must not call the destroy function with interrupts
+ * disabled.
+ */
+ on_each_cpu(reset_counters,
+ (void *)(long)mipspmu.num_counters, 1);
+ mipspmu_free_irq();
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+}
+
+static int mipspmu_event_init(struct perf_event *event)
+{
+ int err = 0;
+
+ /* does not support taken branch sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_RAW:
+ case PERF_TYPE_HARDWARE:
+ case PERF_TYPE_HW_CACHE:
+ break;
+
+ default:
+ return -ENOENT;
+ }
+
+ if (event->cpu >= 0 && !cpu_online(event->cpu))
+ return -ENODEV;
+
+ if (!atomic_inc_not_zero(&active_events)) {
+ mutex_lock(&pmu_reserve_mutex);
+ if (atomic_read(&active_events) == 0)
+ err = mipspmu_get_irq();
+
+ if (!err)
+ atomic_inc(&active_events);
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+
+ if (err)
+ return err;
+
+ return __hw_perf_event_init(event);
+}
+
+static struct pmu pmu = {
+ .pmu_enable = mipspmu_enable,
+ .pmu_disable = mipspmu_disable,
+ .event_init = mipspmu_event_init,
+ .add = mipspmu_add,
+ .del = mipspmu_del,
+ .start = mipspmu_start,
+ .stop = mipspmu_stop,
+ .read = mipspmu_read,
+};
+
+static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
+{
+/*
+ * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
+ * event_id.
+ */
+#ifdef CONFIG_MIPS_MT_SMP
+ if (num_possible_cpus() > 1)
+ return ((unsigned int)pev->range << 24) |
+ (pev->cntr_mask & 0xffff00) |
+ (pev->event_id & 0xff);
+ else
+#endif /* CONFIG_MIPS_MT_SMP */
+ {
+ if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
+ return (pev->cntr_mask & 0xfffc00) |
+ (pev->event_id & 0x3ff);
+ else
+ return (pev->cntr_mask & 0xffff00) |
+ (pev->event_id & 0xff);
+ }
+}
+
+static const struct mips_perf_event *mipspmu_map_general_event(int idx)
+{
+
+ if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
+ return ERR_PTR(-EOPNOTSUPP);
+ return &(*mipspmu.general_event_map)[idx];
+}
+
+static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
+{
+ unsigned int cache_type, cache_op, cache_result;
+ const struct mips_perf_event *pev;
+
+ cache_type = (config >> 0) & 0xff;
+ if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_op = (config >> 8) & 0xff;
+ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_result = (config >> 16) & 0xff;
+ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ pev = &((*mipspmu.cache_event_map)
+ [cache_type]
+ [cache_op]
+ [cache_result]);
+
+ if (pev->cntr_mask == 0)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return pev;
+
+}
+
+static int validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct cpu_hw_events fake_cpuc;
+
+ memset(&fake_cpuc, 0, sizeof(fake_cpuc));
+
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
+ return -EINVAL;
+
+ for_each_sibling_event(sibling, leader) {
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
+ return -EINVAL;
+ }
+
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* This is needed by specific irq handlers in perf_event_*.c */
+static void handle_associated_event(struct cpu_hw_events *cpuc,
+ int idx, struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc = &event->hw;
+
+ mipspmu_event_update(event, hwc, idx);
+ data->period = event->hw.last_period;
+ if (!mipspmu_event_set_period(event, hwc, idx))
+ return;
+
+ if (perf_event_overflow(event, data, regs))
+ mipsxx_pmu_disable_event(idx);
+}
+
+
+static int __n_counters(void)
+{
+ if (!cpu_has_perf)
+ return 0;
+ if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
+ return 1;
+ if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
+ return 2;
+ if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
+ return 3;
+
+ return 4;
+}
+
+static int n_counters(void)
+{
+ int counters;
+
+ switch (current_cpu_type()) {
+ case CPU_R10000:
+ counters = 2;
+ break;
+
+ case CPU_R12000:
+ case CPU_R14000:
+ case CPU_R16000:
+ counters = 4;
+ break;
+
+ default:
+ counters = __n_counters();
+ }
+
+ return counters;
+}
+
+static void loongson3_reset_counters(void *arg)
+{
+ int counters = (int)(long)arg;
+
+ switch (counters) {
+ case 4:
+ mipsxx_pmu_write_control(3, 0);
+ mipspmu.write_counter(3, 0);
+ mipsxx_pmu_write_control(3, 127<<5);
+ mipspmu.write_counter(3, 0);
+ mipsxx_pmu_write_control(3, 191<<5);
+ mipspmu.write_counter(3, 0);
+ mipsxx_pmu_write_control(3, 255<<5);
+ mipspmu.write_counter(3, 0);
+ mipsxx_pmu_write_control(3, 319<<5);
+ mipspmu.write_counter(3, 0);
+ mipsxx_pmu_write_control(3, 383<<5);
+ mipspmu.write_counter(3, 0);
+ mipsxx_pmu_write_control(3, 575<<5);
+ mipspmu.write_counter(3, 0);
+ fallthrough;
+ case 3:
+ mipsxx_pmu_write_control(2, 0);
+ mipspmu.write_counter(2, 0);
+ mipsxx_pmu_write_control(2, 127<<5);
+ mipspmu.write_counter(2, 0);
+ mipsxx_pmu_write_control(2, 191<<5);
+ mipspmu.write_counter(2, 0);
+ mipsxx_pmu_write_control(2, 255<<5);
+ mipspmu.write_counter(2, 0);
+ mipsxx_pmu_write_control(2, 319<<5);
+ mipspmu.write_counter(2, 0);
+ mipsxx_pmu_write_control(2, 383<<5);
+ mipspmu.write_counter(2, 0);
+ mipsxx_pmu_write_control(2, 575<<5);
+ mipspmu.write_counter(2, 0);
+ fallthrough;
+ case 2:
+ mipsxx_pmu_write_control(1, 0);
+ mipspmu.write_counter(1, 0);
+ mipsxx_pmu_write_control(1, 127<<5);
+ mipspmu.write_counter(1, 0);
+ mipsxx_pmu_write_control(1, 191<<5);
+ mipspmu.write_counter(1, 0);
+ mipsxx_pmu_write_control(1, 255<<5);
+ mipspmu.write_counter(1, 0);
+ mipsxx_pmu_write_control(1, 319<<5);
+ mipspmu.write_counter(1, 0);
+ mipsxx_pmu_write_control(1, 383<<5);
+ mipspmu.write_counter(1, 0);
+ mipsxx_pmu_write_control(1, 575<<5);
+ mipspmu.write_counter(1, 0);
+ fallthrough;
+ case 1:
+ mipsxx_pmu_write_control(0, 0);
+ mipspmu.write_counter(0, 0);
+ mipsxx_pmu_write_control(0, 127<<5);
+ mipspmu.write_counter(0, 0);
+ mipsxx_pmu_write_control(0, 191<<5);
+ mipspmu.write_counter(0, 0);
+ mipsxx_pmu_write_control(0, 255<<5);
+ mipspmu.write_counter(0, 0);
+ mipsxx_pmu_write_control(0, 319<<5);
+ mipspmu.write_counter(0, 0);
+ mipsxx_pmu_write_control(0, 383<<5);
+ mipspmu.write_counter(0, 0);
+ mipsxx_pmu_write_control(0, 575<<5);
+ mipspmu.write_counter(0, 0);
+ break;
+ }
+}
+
+static void reset_counters(void *arg)
+{
+ int counters = (int)(long)arg;
+
+ if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) {
+ loongson3_reset_counters(arg);
+ return;
+ }
+
+ switch (counters) {
+ case 4:
+ mipsxx_pmu_write_control(3, 0);
+ mipspmu.write_counter(3, 0);
+ fallthrough;
+ case 3:
+ mipsxx_pmu_write_control(2, 0);
+ mipspmu.write_counter(2, 0);
+ fallthrough;
+ case 2:
+ mipsxx_pmu_write_control(1, 0);
+ mipspmu.write_counter(1, 0);
+ fallthrough;
+ case 1:
+ mipsxx_pmu_write_control(0, 0);
+ mipspmu.write_counter(0, 0);
+ break;
+ }
+}
+
+/* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
+static const struct mips_perf_event mipsxxcore_event_map
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
+};
+
+/* 74K/proAptiv core has different branch event code. */
+static const struct mips_perf_event mipsxxcore_event_map2
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
+};
+
+static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD },
+ /* These only count dcache, not icache */
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x45, CNTR_EVEN | CNTR_ODD },
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x48, CNTR_EVEN | CNTR_ODD },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x16, CNTR_EVEN | CNTR_ODD },
+};
+
+static const struct mips_perf_event loongson3_event_map1[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD },
+};
+
+static const struct mips_perf_event loongson3_event_map2[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x80, CNTR_ALL },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x81, CNTR_ALL },
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x18, CNTR_ALL },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x94, CNTR_ALL },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x9c, CNTR_ALL },
+};
+
+static const struct mips_perf_event loongson3_event_map3[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_ALL },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_ALL },
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x1c, CNTR_ALL },
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x1d, CNTR_ALL },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_ALL },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x08, CNTR_ALL },
+};
+
+static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
+ [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
+};
+
+static const struct mips_perf_event bmips5000_event_map
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
+};
+
+static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
+};
+
+/* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
+static const struct mips_perf_event mipsxxcore_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+},
+};
+
+/* 74K/proAptiv core has completely different cache event map. */
+static const struct mips_perf_event mipsxxcore_cache_map2
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
+ [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
+ [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
+ },
+},
+/*
+ * 74K core does not have specific DTLB events. proAptiv core has
+ * "speculative" DTLB events which are numbered 0x63 (even/odd) and
+ * not included here. One can use raw events if really needed.
+ */
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
+ },
+},
+};
+
+static const struct mips_perf_event i6x00_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x46, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x49, CNTR_EVEN | CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x47, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x4a, CNTR_EVEN | CNTR_ODD },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x84, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x85, CNTR_EVEN | CNTR_ODD },
+ },
+},
+[C(DTLB)] = {
+ /* Can't distinguish read & write */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD },
+ },
+},
+[C(BPU)] = {
+ /* Conditional branches / mispredicted */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x15, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x16, CNTR_EVEN | CNTR_ODD },
+ },
+},
+};
+
+static const struct mips_perf_event loongson3_cache_map1
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x01, CNTR_EVEN },
+ [C(RESULT_MISS)] = { 0x01, CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x01, CNTR_EVEN },
+ [C(RESULT_MISS)] = { 0x01, CNTR_ODD },
+ },
+},
+};
+
+static const struct mips_perf_event loongson3_cache_map2
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x156, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x155, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x153, CNTR_ALL },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x18, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x18, CNTR_ALL },
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x1b6, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x1b7, CNTR_ALL },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x1bf, CNTR_ALL },
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x92, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x92, CNTR_ALL },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x1a, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x1a, CNTR_ALL },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x94, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x9c, CNTR_ALL },
+ },
+},
+};
+
+static const struct mips_perf_event loongson3_cache_map3
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x1e, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x1f, CNTR_ALL },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0xaa, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0xa9, CNTR_ALL },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x1c, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_ALL },
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x2e, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x2f, CNTR_ALL },
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x14, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x1b, CNTR_ALL },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x1a, CNTR_ALL },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x08, CNTR_ALL },
+ },
+},
+};
+
+/* BMIPS5000 */
+static const struct mips_perf_event bmips5000_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 23, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
+ [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
+ [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+},
+};
+
+static const struct mips_perf_event octeon_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
+ },
+},
+[C(DTLB)] = {
+ /*
+ * Only general DTLB misses are counted use the same event for
+ * read and write.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
+ },
+},
+};
+
+static const struct mips_perf_event xlp_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */
+ [C(RESULT_MISS)] = { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */
+ [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
+ [C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */
+ [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */
+ [C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */
+ },
+},
+[C(DTLB)] = {
+ /*
+ * Only general DTLB misses are counted use the same event for
+ * read and write.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
+ },
+},
+[C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x25, CNTR_ALL },
+ },
+},
+};
+
+static int __hw_perf_event_init(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
+ const struct mips_perf_event *pev;
+ int err;
+
+ /* Returning MIPS event descriptor for generic perf event. */
+ if (PERF_TYPE_HARDWARE == event->attr.type) {
+ if (event->attr.config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+ pev = mipspmu_map_general_event(event->attr.config);
+ } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
+ pev = mipspmu_map_cache_event(event->attr.config);
+ } else if (PERF_TYPE_RAW == event->attr.type) {
+ /* We are working on the global raw event. */
+ mutex_lock(&raw_event_mutex);
+ pev = mipspmu.map_raw_event(event->attr.config);
+ } else {
+ /* The event type is not (yet) supported. */
+ return -EOPNOTSUPP;
+ }
+
+ if (IS_ERR(pev)) {
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+ return PTR_ERR(pev);
+ }
+
+ /*
+ * We allow max flexibility on how each individual counter shared
+ * by the single CPU operates (the mode exclusion and the range).
+ */
+ hwc->config_base = MIPS_PERFCTRL_IE;
+
+ hwc->event_base = mipspmu_perf_event_encode(pev);
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+
+ if (!attr->exclude_user)
+ hwc->config_base |= MIPS_PERFCTRL_U;
+ if (!attr->exclude_kernel) {
+ hwc->config_base |= MIPS_PERFCTRL_K;
+ /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
+ hwc->config_base |= MIPS_PERFCTRL_EXL;
+ }
+ if (!attr->exclude_hv)
+ hwc->config_base |= MIPS_PERFCTRL_S;
+
+ hwc->config_base &= M_PERFCTL_CONFIG_MASK;
+ /*
+ * The event can belong to another cpu. We do not assign a local
+ * counter for it for now.
+ */
+ hwc->idx = -1;
+ hwc->config = 0;
+
+ if (!hwc->sample_period) {
+ hwc->sample_period = mipspmu.max_period;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+ }
+
+ err = 0;
+ if (event->group_leader != event)
+ err = validate_group(event);
+
+ event->destroy = hw_perf_event_destroy;
+
+ if (err)
+ event->destroy(event);
+
+ return err;
+}
+
+static void pause_local_counters(void)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ int ctr = mipspmu.num_counters;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ do {
+ ctr--;
+ cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
+ mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ } while (ctr > 0);
+ local_irq_restore(flags);
+}
+
+static void resume_local_counters(void)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ int ctr = mipspmu.num_counters;
+
+ do {
+ ctr--;
+ mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
+ } while (ctr > 0);
+}
+
+static int mipsxx_pmu_handle_shared_irq(void)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct perf_sample_data data;
+ unsigned int counters = mipspmu.num_counters;
+ u64 counter;
+ int n, handled = IRQ_NONE;
+ struct pt_regs *regs;
+
+ if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
+ return handled;
+ /*
+ * First we pause the local counters, so that when we are locked
+ * here, the counters are all paused. When it gets locked due to
+ * perf_disable(), the timer interrupt handler will be delayed.
+ *
+ * See also mipsxx_pmu_start().
+ */
+ pause_local_counters();
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ read_lock(&pmuint_rwlock);
+#endif
+
+ regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0, 0);
+
+ for (n = counters - 1; n >= 0; n--) {
+ if (!test_bit(n, cpuc->used_mask))
+ continue;
+
+ counter = mipspmu.read_counter(n);
+ if (!(counter & mipspmu.overflow))
+ continue;
+
+ handle_associated_event(cpuc, n, &data, regs);
+ handled = IRQ_HANDLED;
+ }
+
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ read_unlock(&pmuint_rwlock);
+#endif
+ resume_local_counters();
+
+ /*
+ * Do all the work for the pending perf events. We can do this
+ * in here because the performance counter interrupt is a regular
+ * interrupt, not NMI.
+ */
+ if (handled == IRQ_HANDLED)
+ irq_work_run();
+
+ return handled;
+}
+
+static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
+{
+ return mipsxx_pmu_handle_shared_irq();
+}
+
+/* 24K */
+#define IS_BOTH_COUNTERS_24K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+
+/* 34K */
+#define IS_BOTH_COUNTERS_34K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+#define IS_RANGE_P_34K_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
+ (r) == 176 || ((b) >= 50 && (b) <= 55) || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
+#endif
+
+/* 74K */
+#define IS_BOTH_COUNTERS_74K_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+
+/* proAptiv */
+#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+/* P5600 */
+#define IS_BOTH_COUNTERS_P5600_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+
+/* 1004K */
+#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+#define IS_RANGE_P_1004K_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
+ (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
+ (r) == 188 || (b) == 61 || (b) == 62 || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
+#endif
+
+/* interAptiv */
+#define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+/* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
+#define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \
+ (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \
+ (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175)
+#endif
+
+/* BMIPS5000 */
+#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+
+
+/*
+ * For most cores the user can use 0-255 raw events, where 0-127 for the events
+ * of even counters, and 128-255 for odd counters. Note that bit 7 is used to
+ * indicate the even/odd bank selector. So, for example, when user wants to take
+ * the Event Num of 15 for odd counters (by referring to the user manual), then
+ * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
+ * to be used.
+ *
+ * Some newer cores have even more events, in which case the user can use raw
+ * events 0-511, where 0-255 are for the events of even counters, and 256-511
+ * are for odd counters, so bit 8 is used to indicate the even/odd bank selector.
+ */
+static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
+{
+ /* currently most cores have 7-bit event numbers */
+ int pmu_type;
+ unsigned int raw_id = config & 0xff;
+ unsigned int base_id = raw_id & 0x7f;
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ /*
+ * This is actually doing nothing. Non-multithreading
+ * CPUs will not check and calculate the range.
+ */
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_34K:
+ if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ case CPU_74K:
+ case CPU_1074K:
+ if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_PROAPTIV:
+ if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_P5600:
+ case CPU_P6600:
+ /* 8-bit event numbers */
+ raw_id = config & 0x1ff;
+ base_id = raw_id & 0xff;
+ if (IS_BOTH_COUNTERS_P5600_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 255 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_I6400:
+ case CPU_I6500:
+ /* 8-bit event numbers */
+ base_id = config & 0xff;
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ break;
+ case CPU_1004K:
+ if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ case CPU_INTERAPTIV:
+ if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ case CPU_BMIPS5000:
+ if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+ break;
+ case CPU_LOONGSON64:
+ pmu_type = get_loongson3_pmu_type();
+
+ switch (pmu_type) {
+ case LOONGSON_PMU_TYPE1:
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+ break;
+ case LOONGSON_PMU_TYPE2:
+ base_id = config & 0x3ff;
+ raw_event.cntr_mask = CNTR_ALL;
+
+ if ((base_id >= 1 && base_id < 28) ||
+ (base_id >= 64 && base_id < 90) ||
+ (base_id >= 128 && base_id < 164) ||
+ (base_id >= 192 && base_id < 200) ||
+ (base_id >= 256 && base_id < 275) ||
+ (base_id >= 320 && base_id < 361) ||
+ (base_id >= 384 && base_id < 574))
+ break;
+
+ return ERR_PTR(-EOPNOTSUPP);
+ case LOONGSON_PMU_TYPE3:
+ base_id = raw_id;
+ raw_event.cntr_mask = CNTR_ALL;
+ break;
+ }
+ break;
+ }
+
+ raw_event.event_id = base_id;
+
+ return &raw_event;
+}
+
+static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
+{
+ unsigned int raw_id = config & 0xff;
+ unsigned int base_id = raw_id & 0x7f;
+
+
+ raw_event.cntr_mask = CNTR_ALL;
+ raw_event.event_id = base_id;
+
+ if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
+ if (base_id > 0x42)
+ return ERR_PTR(-EOPNOTSUPP);
+ } else {
+ if (base_id > 0x3a)
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ switch (base_id) {
+ case 0x00:
+ case 0x0f:
+ case 0x1e:
+ case 0x1f:
+ case 0x2f:
+ case 0x34:
+ case 0x3b ... 0x3f:
+ return ERR_PTR(-EOPNOTSUPP);
+ default:
+ break;
+ }
+
+ return &raw_event;
+}
+
+static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
+{
+ unsigned int raw_id = config & 0xff;
+
+ /* Only 1-63 are defined */
+ if ((raw_id < 0x01) || (raw_id > 0x3f))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ raw_event.cntr_mask = CNTR_ALL;
+ raw_event.event_id = raw_id;
+
+ return &raw_event;
+}
+
+static int __init
+init_hw_perf_events(void)
+{
+ int counters, irq, pmu_type;
+
+ pr_info("Performance counters: ");
+
+ counters = n_counters();
+ if (counters == 0) {
+ pr_cont("No available PMU.\n");
+ return -ENODEV;
+ }
+
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ if (!cpu_has_mipsmt_pertccounters)
+ counters = counters_total_to_per_cpu(counters);
+#endif
+
+ if (get_c0_perfcount_int)
+ irq = get_c0_perfcount_int();
+ else if (cp0_perfcount_irq >= 0)
+ irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+ else
+ irq = -1;
+
+ mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ mipspmu.name = "mips/24K";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_34K:
+ mipspmu.name = "mips/34K";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_74K:
+ mipspmu.name = "mips/74K";
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+ break;
+ case CPU_PROAPTIV:
+ mipspmu.name = "mips/proAptiv";
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+ break;
+ case CPU_P5600:
+ mipspmu.name = "mips/P5600";
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+ break;
+ case CPU_P6600:
+ mipspmu.name = "mips/P6600";
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+ break;
+ case CPU_I6400:
+ mipspmu.name = "mips/I6400";
+ mipspmu.general_event_map = &i6x00_event_map;
+ mipspmu.cache_event_map = &i6x00_cache_map;
+ break;
+ case CPU_I6500:
+ mipspmu.name = "mips/I6500";
+ mipspmu.general_event_map = &i6x00_event_map;
+ mipspmu.cache_event_map = &i6x00_cache_map;
+ break;
+ case CPU_1004K:
+ mipspmu.name = "mips/1004K";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_1074K:
+ mipspmu.name = "mips/1074K";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_INTERAPTIV:
+ mipspmu.name = "mips/interAptiv";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_LOONGSON32:
+ mipspmu.name = "mips/loongson1";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_LOONGSON64:
+ mipspmu.name = "mips/loongson3";
+ pmu_type = get_loongson3_pmu_type();
+
+ switch (pmu_type) {
+ case LOONGSON_PMU_TYPE1:
+ counters = 2;
+ mipspmu.general_event_map = &loongson3_event_map1;
+ mipspmu.cache_event_map = &loongson3_cache_map1;
+ break;
+ case LOONGSON_PMU_TYPE2:
+ counters = 4;
+ mipspmu.general_event_map = &loongson3_event_map2;
+ mipspmu.cache_event_map = &loongson3_cache_map2;
+ break;
+ case LOONGSON_PMU_TYPE3:
+ counters = 4;
+ mipspmu.general_event_map = &loongson3_event_map3;
+ mipspmu.cache_event_map = &loongson3_cache_map3;
+ break;
+ }
+ break;
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ mipspmu.name = "octeon";
+ mipspmu.general_event_map = &octeon_event_map;
+ mipspmu.cache_event_map = &octeon_cache_map;
+ mipspmu.map_raw_event = octeon_pmu_map_raw_event;
+ break;
+ case CPU_BMIPS5000:
+ mipspmu.name = "BMIPS5000";
+ mipspmu.general_event_map = &bmips5000_event_map;
+ mipspmu.cache_event_map = &bmips5000_cache_map;
+ break;
+ case CPU_XLP:
+ mipspmu.name = "xlp";
+ mipspmu.general_event_map = &xlp_event_map;
+ mipspmu.cache_event_map = &xlp_cache_map;
+ mipspmu.map_raw_event = xlp_pmu_map_raw_event;
+ break;
+ default:
+ pr_cont("Either hardware does not support performance "
+ "counters, or not yet implemented.\n");
+ return -ENODEV;
+ }
+
+ mipspmu.num_counters = counters;
+ mipspmu.irq = irq;
+
+ if (read_c0_perfctrl0() & MIPS_PERFCTRL_W) {
+ if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) {
+ counter_bits = 48;
+ mipspmu.max_period = (1ULL << 47) - 1;
+ mipspmu.valid_count = (1ULL << 47) - 1;
+ mipspmu.overflow = 1ULL << 47;
+ } else {
+ counter_bits = 64;
+ mipspmu.max_period = (1ULL << 63) - 1;
+ mipspmu.valid_count = (1ULL << 63) - 1;
+ mipspmu.overflow = 1ULL << 63;
+ }
+ mipspmu.read_counter = mipsxx_pmu_read_counter_64;
+ mipspmu.write_counter = mipsxx_pmu_write_counter_64;
+ } else {
+ counter_bits = 32;
+ mipspmu.max_period = (1ULL << 31) - 1;
+ mipspmu.valid_count = (1ULL << 31) - 1;
+ mipspmu.overflow = 1ULL << 31;
+ mipspmu.read_counter = mipsxx_pmu_read_counter;
+ mipspmu.write_counter = mipsxx_pmu_write_counter;
+ }
+
+ on_each_cpu(reset_counters, (void *)(long)counters, 1);
+
+ pr_cont("%s PMU enabled, %d %d-bit counters available to each "
+ "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
+ irq < 0 ? " (share with timer interrupt)" : "");
+
+ perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+
+ return 0;
+}
+early_initcall(init_hw_perf_events);
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
new file mode 100644
index 000000000..9bf60d7d4
--- /dev/null
+++ b/arch/mips/kernel/pm-cps.c
@@ -0,0 +1,738 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2014 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <linux/cpuhotplug.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/cacheflush.h>
+#include <asm/cacheops.h>
+#include <asm/idle.h>
+#include <asm/mips-cps.h>
+#include <asm/mipsmtregs.h>
+#include <asm/pm.h>
+#include <asm/pm-cps.h>
+#include <asm/smp-cps.h>
+#include <asm/uasm.h>
+
+/*
+ * cps_nc_entry_fn - type of a generated non-coherent state entry function
+ * @online: the count of online coupled VPEs
+ * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count
+ *
+ * The code entering & exiting non-coherent states is generated at runtime
+ * using uasm, in order to ensure that the compiler cannot insert a stray
+ * memory access at an unfortunate time and to allow the generation of optimal
+ * core-specific code particularly for cache routines. If coupled_coherence
+ * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state,
+ * returns the number of VPEs that were in the wait state at the point this
+ * VPE left it. Returns garbage if coupled_coherence is zero or this is not
+ * the entry function for CPS_PM_NC_WAIT.
+ */
+typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count);
+
+/*
+ * The entry point of the generated non-coherent idle state entry/exit
+ * functions. Actually per-core rather than per-CPU.
+ */
+static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT],
+ nc_asm_enter);
+
+/* Bitmap indicating which states are supported by the system */
+static DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
+
+/*
+ * Indicates the number of coupled VPEs ready to operate in a non-coherent
+ * state. Actually per-core rather than per-CPU.
+ */
+static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
+
+/* Indicates online CPUs coupled with the current CPU */
+static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
+
+/*
+ * Used to synchronize entry to deep idle states. Actually per-core rather
+ * than per-CPU.
+ */
+static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
+
+/* Saved CPU state across the CPS_PM_POWER_GATED state */
+DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
+
+/* A somewhat arbitrary number of labels & relocs for uasm */
+static struct uasm_label labels[32];
+static struct uasm_reloc relocs[32];
+
+enum mips_reg {
+ zero, at, v0, v1, a0, a1, a2, a3,
+ t0, t1, t2, t3, t4, t5, t6, t7,
+ s0, s1, s2, s3, s4, s5, s6, s7,
+ t8, t9, k0, k1, gp, sp, fp, ra,
+};
+
+bool cps_pm_support_state(enum cps_pm_state state)
+{
+ return test_bit(state, state_support);
+}
+
+static void coupled_barrier(atomic_t *a, unsigned online)
+{
+ /*
+ * This function is effectively the same as
+ * cpuidle_coupled_parallel_barrier, which can't be used here since
+ * there's no cpuidle device.
+ */
+
+ if (!coupled_coherence)
+ return;
+
+ smp_mb__before_atomic();
+ atomic_inc(a);
+
+ while (atomic_read(a) < online)
+ cpu_relax();
+
+ if (atomic_inc_return(a) == online * 2) {
+ atomic_set(a, 0);
+ return;
+ }
+
+ while (atomic_read(a) > online)
+ cpu_relax();
+}
+
+int cps_pm_enter_state(enum cps_pm_state state)
+{
+ unsigned cpu = smp_processor_id();
+ unsigned core = cpu_core(&current_cpu_data);
+ unsigned online, left;
+ cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
+ u32 *core_ready_count, *nc_core_ready_count;
+ void *nc_addr;
+ cps_nc_entry_fn entry;
+ struct core_boot_config *core_cfg;
+ struct vpe_boot_config *vpe_cfg;
+
+ /* Check that there is an entry function for this state */
+ entry = per_cpu(nc_asm_enter, core)[state];
+ if (!entry)
+ return -EINVAL;
+
+ /* Calculate which coupled CPUs (VPEs) are online */
+#if defined(CONFIG_MIPS_MT) || defined(CONFIG_CPU_MIPSR6)
+ if (cpu_online(cpu)) {
+ cpumask_and(coupled_mask, cpu_online_mask,
+ &cpu_sibling_map[cpu]);
+ online = cpumask_weight(coupled_mask);
+ cpumask_clear_cpu(cpu, coupled_mask);
+ } else
+#endif
+ {
+ cpumask_clear(coupled_mask);
+ online = 1;
+ }
+
+ /* Setup the VPE to run mips_cps_pm_restore when started again */
+ if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ /* Power gating relies upon CPS SMP */
+ if (!mips_cps_smp_in_use())
+ return -EINVAL;
+
+ core_cfg = &mips_cps_core_bootcfg[core];
+ vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(&current_cpu_data)];
+ vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
+ vpe_cfg->gp = (unsigned long)current_thread_info();
+ vpe_cfg->sp = 0;
+ }
+
+ /* Indicate that this CPU might not be coherent */
+ cpumask_clear_cpu(cpu, &cpu_coherent_mask);
+ smp_mb__after_atomic();
+
+ /* Create a non-coherent mapping of the core ready_count */
+ core_ready_count = per_cpu(ready_count, core);
+ nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
+ (unsigned long)core_ready_count);
+ nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
+ nc_core_ready_count = nc_addr;
+
+ /* Ensure ready_count is zero-initialised before the assembly runs */
+ WRITE_ONCE(*nc_core_ready_count, 0);
+ coupled_barrier(&per_cpu(pm_barrier, core), online);
+
+ /* Run the generated entry code */
+ left = entry(online, nc_core_ready_count);
+
+ /* Remove the non-coherent mapping of ready_count */
+ kunmap_noncoherent();
+
+ /* Indicate that this CPU is definitely coherent */
+ cpumask_set_cpu(cpu, &cpu_coherent_mask);
+
+ /*
+ * If this VPE is the first to leave the non-coherent wait state then
+ * it needs to wake up any coupled VPEs still running their wait
+ * instruction so that they return to cpuidle, which can then complete
+ * coordination between the coupled VPEs & provide the governor with
+ * a chance to reflect on the length of time the VPEs were in the
+ * idle state.
+ */
+ if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
+ arch_send_call_function_ipi_mask(coupled_mask);
+
+ return 0;
+}
+
+static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
+ struct uasm_reloc **pr,
+ const struct cache_desc *cache,
+ unsigned op, int lbl)
+{
+ unsigned cache_size = cache->ways << cache->waybit;
+ unsigned i;
+ const unsigned unroll_lines = 32;
+
+ /* If the cache isn't present this function has it easy */
+ if (cache->flags & MIPS_CACHE_NOT_PRESENT)
+ return;
+
+ /* Load base address */
+ UASM_i_LA(pp, t0, (long)CKSEG0);
+
+ /* Calculate end address */
+ if (cache_size < 0x8000)
+ uasm_i_addiu(pp, t1, t0, cache_size);
+ else
+ UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size));
+
+ /* Start of cache op loop */
+ uasm_build_label(pl, *pp, lbl);
+
+ /* Generate the cache ops */
+ for (i = 0; i < unroll_lines; i++) {
+ if (cpu_has_mips_r6) {
+ uasm_i_cache(pp, op, 0, t0);
+ uasm_i_addiu(pp, t0, t0, cache->linesz);
+ } else {
+ uasm_i_cache(pp, op, i * cache->linesz, t0);
+ }
+ }
+
+ if (!cpu_has_mips_r6)
+ /* Update the base address */
+ uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
+
+ /* Loop if we haven't reached the end address yet */
+ uasm_il_bne(pp, pr, t0, t1, lbl);
+ uasm_i_nop(pp);
+}
+
+static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
+ struct uasm_reloc **pr,
+ const struct cpuinfo_mips *cpu_info,
+ int lbl)
+{
+ unsigned i, fsb_size = 8;
+ unsigned num_loads = (fsb_size * 3) / 2;
+ unsigned line_stride = 2;
+ unsigned line_size = cpu_info->dcache.linesz;
+ unsigned perf_counter, perf_event;
+ unsigned revision = cpu_info->processor_id & PRID_REV_MASK;
+
+ /*
+ * Determine whether this CPU requires an FSB flush, and if so which
+ * performance counter/event reflect stalls due to a full FSB.
+ */
+ switch (__get_cpu_type(cpu_info->cputype)) {
+ case CPU_INTERAPTIV:
+ perf_counter = 1;
+ perf_event = 51;
+ break;
+
+ case CPU_PROAPTIV:
+ /* Newer proAptiv cores don't require this workaround */
+ if (revision >= PRID_REV_ENCODE_332(1, 1, 0))
+ return 0;
+
+ /* On older ones it's unavailable */
+ return -1;
+
+ default:
+ /* Assume that the CPU does not need this workaround */
+ return 0;
+ }
+
+ /*
+ * Ensure that the fill/store buffer (FSB) is not holding the results
+ * of a prefetch, since if it is then the CPC sequencer may become
+ * stuck in the D3 (ClrBus) state whilst entering a low power state.
+ */
+
+ /* Preserve perf counter setup */
+ uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
+ uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
+
+ /* Setup perf counter to count FSB full pipeline stalls */
+ uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf);
+ uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
+ uasm_i_ehb(pp);
+ uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */
+ uasm_i_ehb(pp);
+
+ /* Base address for loads */
+ UASM_i_LA(pp, t0, (long)CKSEG0);
+
+ /* Start of clear loop */
+ uasm_build_label(pl, *pp, lbl);
+
+ /* Perform some loads to fill the FSB */
+ for (i = 0; i < num_loads; i++)
+ uasm_i_lw(pp, zero, i * line_size * line_stride, t0);
+
+ /*
+ * Invalidate the new D-cache entries so that the cache will need
+ * refilling (via the FSB) if the loop is executed again.
+ */
+ for (i = 0; i < num_loads; i++) {
+ uasm_i_cache(pp, Hit_Invalidate_D,
+ i * line_size * line_stride, t0);
+ uasm_i_cache(pp, Hit_Writeback_Inv_SD,
+ i * line_size * line_stride, t0);
+ }
+
+ /* Barrier ensuring previous cache invalidates are complete */
+ uasm_i_sync(pp, __SYNC_full);
+ uasm_i_ehb(pp);
+
+ /* Check whether the pipeline stalled due to the FSB being full */
+ uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */
+
+ /* Loop if it didn't */
+ uasm_il_beqz(pp, pr, t1, lbl);
+ uasm_i_nop(pp);
+
+ /* Restore perf counter 1. The count may well now be wrong... */
+ uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
+ uasm_i_ehb(pp);
+ uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
+ uasm_i_ehb(pp);
+
+ return 0;
+}
+
+static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
+ struct uasm_reloc **pr,
+ unsigned r_addr, int lbl)
+{
+ uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
+ uasm_build_label(pl, *pp, lbl);
+ uasm_i_ll(pp, t1, 0, r_addr);
+ uasm_i_or(pp, t1, t1, t0);
+ uasm_i_sc(pp, t1, 0, r_addr);
+ uasm_il_beqz(pp, pr, t1, lbl);
+ uasm_i_nop(pp);
+}
+
+static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
+{
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+ u32 *buf, *p;
+ const unsigned r_online = a0;
+ const unsigned r_nc_count = a1;
+ const unsigned r_pcohctl = t7;
+ const unsigned max_instrs = 256;
+ unsigned cpc_cmd;
+ int err;
+ enum {
+ lbl_incready = 1,
+ lbl_poll_cont,
+ lbl_secondary_hang,
+ lbl_disable_coherence,
+ lbl_flush_fsb,
+ lbl_invicache,
+ lbl_flushdcache,
+ lbl_hang,
+ lbl_set_cont,
+ lbl_secondary_cont,
+ lbl_decready,
+ };
+
+ /* Allocate a buffer to hold the generated code */
+ p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ /* Clear labels & relocs ready for (re)use */
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ /* Power gating relies upon CPS SMP */
+ if (!mips_cps_smp_in_use())
+ goto out_err;
+
+ /*
+ * Save CPU state. Note the non-standard calling convention
+ * with the return address placed in v0 to avoid clobbering
+ * the ra register before it is saved.
+ */
+ UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
+ uasm_i_jalr(&p, v0, t0);
+ uasm_i_nop(&p);
+ }
+
+ /*
+ * Load addresses of required CM & CPC registers. This is done early
+ * because they're needed in both the enable & disable coherence steps
+ * but in the coupled case the enable step will only run on one VPE.
+ */
+ UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());
+
+ if (coupled_coherence) {
+ /* Increment ready_count */
+ uasm_i_sync(&p, __SYNC_mb);
+ uasm_build_label(&l, p, lbl_incready);
+ uasm_i_ll(&p, t1, 0, r_nc_count);
+ uasm_i_addiu(&p, t2, t1, 1);
+ uasm_i_sc(&p, t2, 0, r_nc_count);
+ uasm_il_beqz(&p, &r, t2, lbl_incready);
+ uasm_i_addiu(&p, t1, t1, 1);
+
+ /* Barrier ensuring all CPUs see the updated r_nc_count value */
+ uasm_i_sync(&p, __SYNC_mb);
+
+ /*
+ * If this is the last VPE to become ready for non-coherence
+ * then it should branch below.
+ */
+ uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
+ uasm_i_nop(&p);
+
+ if (state < CPS_PM_POWER_GATED) {
+ /*
+ * Otherwise this is not the last VPE to become ready
+ * for non-coherence. It needs to wait until coherence
+ * has been disabled before proceeding, which it will do
+ * by polling for the top bit of ready_count being set.
+ */
+ uasm_i_addiu(&p, t1, zero, -1);
+ uasm_build_label(&l, p, lbl_poll_cont);
+ uasm_i_lw(&p, t0, 0, r_nc_count);
+ uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
+ uasm_i_ehb(&p);
+ if (cpu_has_mipsmt)
+ uasm_i_yield(&p, zero, t1);
+ uasm_il_b(&p, &r, lbl_poll_cont);
+ uasm_i_nop(&p);
+ } else {
+ /*
+ * The core will lose power & this VPE will not continue
+ * so it can simply halt here.
+ */
+ if (cpu_has_mipsmt) {
+ /* Halt the VPE via C0 tchalt register */
+ uasm_i_addiu(&p, t0, zero, TCHALT_H);
+ uasm_i_mtc0(&p, t0, 2, 4);
+ } else if (cpu_has_vp) {
+ /* Halt the VP via the CPC VP_STOP register */
+ unsigned int vpe_id;
+
+ vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+ uasm_i_addiu(&p, t0, zero, 1 << vpe_id);
+ UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop());
+ uasm_i_sw(&p, t0, 0, t1);
+ } else {
+ BUG();
+ }
+ uasm_build_label(&l, p, lbl_secondary_hang);
+ uasm_il_b(&p, &r, lbl_secondary_hang);
+ uasm_i_nop(&p);
+ }
+ }
+
+ /*
+ * This is the point of no return - this VPE will now proceed to
+ * disable coherence. At this point we *must* be sure that no other
+ * VPE within the core will interfere with the L1 dcache.
+ */
+ uasm_build_label(&l, p, lbl_disable_coherence);
+
+ /* Invalidate the L1 icache */
+ cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
+ Index_Invalidate_I, lbl_invicache);
+
+ /* Writeback & invalidate the L1 dcache */
+ cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
+ Index_Writeback_Inv_D, lbl_flushdcache);
+
+ /* Barrier ensuring previous cache invalidates are complete */
+ uasm_i_sync(&p, __SYNC_full);
+ uasm_i_ehb(&p);
+
+ if (mips_cm_revision() < CM_REV_CM3) {
+ /*
+ * Disable all but self interventions. The load from COHCTL is
+ * defined by the interAptiv & proAptiv SUMs as ensuring that the
+ * operation resulting from the preceding store is complete.
+ */
+ uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu]));
+ uasm_i_sw(&p, t0, 0, r_pcohctl);
+ uasm_i_lw(&p, t0, 0, r_pcohctl);
+
+ /* Barrier to ensure write to coherence control is complete */
+ uasm_i_sync(&p, __SYNC_full);
+ uasm_i_ehb(&p);
+ }
+
+ /* Disable coherence */
+ uasm_i_sw(&p, zero, 0, r_pcohctl);
+ uasm_i_lw(&p, t0, 0, r_pcohctl);
+
+ if (state >= CPS_PM_CLOCK_GATED) {
+ err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
+ lbl_flush_fsb);
+ if (err)
+ goto out_err;
+
+ /* Determine the CPC command to issue */
+ switch (state) {
+ case CPS_PM_CLOCK_GATED:
+ cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
+ break;
+ case CPS_PM_POWER_GATED:
+ cpc_cmd = CPC_Cx_CMD_PWRDOWN;
+ break;
+ default:
+ BUG();
+ goto out_err;
+ }
+
+ /* Issue the CPC command */
+ UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
+ uasm_i_addiu(&p, t1, zero, cpc_cmd);
+ uasm_i_sw(&p, t1, 0, t0);
+
+ if (state == CPS_PM_POWER_GATED) {
+ /* If anything goes wrong just hang */
+ uasm_build_label(&l, p, lbl_hang);
+ uasm_il_b(&p, &r, lbl_hang);
+ uasm_i_nop(&p);
+
+ /*
+ * There's no point generating more code, the core is
+ * powered down & if powered back up will run from the
+ * reset vector not from here.
+ */
+ goto gen_done;
+ }
+
+ /* Barrier to ensure write to CPC command is complete */
+ uasm_i_sync(&p, __SYNC_full);
+ uasm_i_ehb(&p);
+ }
+
+ if (state == CPS_PM_NC_WAIT) {
+ /*
+ * At this point it is safe for all VPEs to proceed with
+ * execution. This VPE will set the top bit of ready_count
+ * to indicate to the other VPEs that they may continue.
+ */
+ if (coupled_coherence)
+ cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
+ lbl_set_cont);
+
+ /*
+ * VPEs which did not disable coherence will continue
+ * executing, after coherence has been disabled, from this
+ * point.
+ */
+ uasm_build_label(&l, p, lbl_secondary_cont);
+
+ /* Now perform our wait */
+ uasm_i_wait(&p, 0);
+ }
+
+ /*
+ * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
+ * will run this. The first will actually re-enable coherence & the
+ * rest will just be performing a rather unusual nop.
+ */
+ uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3
+ ? CM_GCR_Cx_COHERENCE_COHDOMAINEN
+ : CM3_GCR_Cx_COHERENCE_COHEN);
+
+ uasm_i_sw(&p, t0, 0, r_pcohctl);
+ uasm_i_lw(&p, t0, 0, r_pcohctl);
+
+ /* Barrier to ensure write to coherence control is complete */
+ uasm_i_sync(&p, __SYNC_full);
+ uasm_i_ehb(&p);
+
+ if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
+ /* Decrement ready_count */
+ uasm_build_label(&l, p, lbl_decready);
+ uasm_i_sync(&p, __SYNC_mb);
+ uasm_i_ll(&p, t1, 0, r_nc_count);
+ uasm_i_addiu(&p, t2, t1, -1);
+ uasm_i_sc(&p, t2, 0, r_nc_count);
+ uasm_il_beqz(&p, &r, t2, lbl_decready);
+ uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
+
+ /* Barrier ensuring all CPUs see the updated r_nc_count value */
+ uasm_i_sync(&p, __SYNC_mb);
+ }
+
+ if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
+ /*
+ * At this point it is safe for all VPEs to proceed with
+ * execution. This VPE will set the top bit of ready_count
+ * to indicate to the other VPEs that they may continue.
+ */
+ cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);
+
+ /*
+ * This core will be reliant upon another core sending a
+ * power-up command to the CPC in order to resume operation.
+ * Thus an arbitrary VPE can't trigger the core leaving the
+ * idle state and the one that disables coherence might as well
+ * be the one to re-enable it. The rest will continue from here
+ * after that has been done.
+ */
+ uasm_build_label(&l, p, lbl_secondary_cont);
+
+ /* Barrier ensuring all CPUs see the updated r_nc_count value */
+ uasm_i_sync(&p, __SYNC_mb);
+ }
+
+ /* The core is coherent, time to return to C code */
+ uasm_i_jr(&p, ra);
+ uasm_i_nop(&p);
+
+gen_done:
+ /* Ensure the code didn't exceed the resources allocated for it */
+ BUG_ON((p - buf) > max_instrs);
+ BUG_ON((l - labels) > ARRAY_SIZE(labels));
+ BUG_ON((r - relocs) > ARRAY_SIZE(relocs));
+
+ /* Patch branch offsets */
+ uasm_resolve_relocs(relocs, labels);
+
+ /* Flush the icache */
+ local_flush_icache_range((unsigned long)buf, (unsigned long)p);
+
+ return buf;
+out_err:
+ kfree(buf);
+ return NULL;
+}
+
+static int cps_pm_online_cpu(unsigned int cpu)
+{
+ enum cps_pm_state state;
+ unsigned core = cpu_core(&cpu_data[cpu]);
+ void *entry_fn, *core_rc;
+
+ for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
+ if (per_cpu(nc_asm_enter, core)[state])
+ continue;
+ if (!test_bit(state, state_support))
+ continue;
+
+ entry_fn = cps_gen_entry_code(cpu, state);
+ if (!entry_fn) {
+ pr_err("Failed to generate core %u state %u entry\n",
+ core, state);
+ clear_bit(state, state_support);
+ }
+
+ per_cpu(nc_asm_enter, core)[state] = entry_fn;
+ }
+
+ if (!per_cpu(ready_count, core)) {
+ core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
+ if (!core_rc) {
+ pr_err("Failed allocate core %u ready_count\n", core);
+ return -ENOMEM;
+ }
+ per_cpu(ready_count, core) = core_rc;
+ }
+
+ return 0;
+}
+
+static int cps_pm_power_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ unsigned int stat;
+
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ stat = read_cpc_cl_stat_conf();
+ /*
+ * If we're attempting to suspend the system and power down all
+ * of the cores, the JTAG detect bit indicates that the CPC will
+ * instead put the cores into clock-off state. In this state
+ * a connected debugger can cause the CPU to attempt
+ * interactions with the powered down system. At best this will
+ * fail. At worst, it can hang the NoC, requiring a hard reset.
+ * To avoid this, just block system suspend if a JTAG probe
+ * is detected.
+ */
+ if (stat & CPC_Cx_STAT_CONF_EJTAG_PROBE) {
+ pr_warn("JTAG probe is connected - abort suspend\n");
+ return NOTIFY_BAD;
+ }
+ return NOTIFY_DONE;
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static int __init cps_pm_init(void)
+{
+ /* A CM is required for all non-coherent states */
+ if (!mips_cm_present()) {
+ pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
+ return 0;
+ }
+
+ /*
+ * If interrupts were enabled whilst running a wait instruction on a
+ * non-coherent core then the VPE may end up processing interrupts
+ * whilst non-coherent. That would be bad.
+ */
+ if (cpu_wait == r4k_wait_irqoff)
+ set_bit(CPS_PM_NC_WAIT, state_support);
+ else
+ pr_warn("pm-cps: non-coherent wait unavailable\n");
+
+ /* Detect whether a CPC is present */
+ if (mips_cpc_present()) {
+ /* Detect whether clock gating is implemented */
+ if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL)
+ set_bit(CPS_PM_CLOCK_GATED, state_support);
+ else
+ pr_warn("pm-cps: CPC does not support clock gating\n");
+
+ /* Power gating is available with CPS SMP & any CPC */
+ if (mips_cps_smp_in_use())
+ set_bit(CPS_PM_POWER_GATED, state_support);
+ else
+ pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n");
+ } else {
+ pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
+ }
+
+ pm_notifier(cps_pm_power_notifier, 0);
+
+ return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mips/cps_pm:online",
+ cps_pm_online_cpu, NULL);
+}
+arch_initcall(cps_pm_init);
diff --git a/arch/mips/kernel/pm.c b/arch/mips/kernel/pm.c
new file mode 100644
index 000000000..486ed2bf2
--- /dev/null
+++ b/arch/mips/kernel/pm.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ *
+ * CPU PM notifiers for saving/restoring general CPU state.
+ */
+
+#include <linux/cpu_pm.h>
+#include <linux/init.h>
+
+#include <asm/dsp.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/pm.h>
+#include <asm/watch.h>
+
+/* Used by PM helper macros in asm/pm.h */
+struct mips_static_suspend_state mips_static_suspend_state;
+
+/**
+ * mips_cpu_save() - Save general CPU state.
+ * Ensures that general CPU context is saved, notably FPU and DSP.
+ */
+static int mips_cpu_save(void)
+{
+ /* Save FPU state */
+ lose_fpu(1);
+
+ /* Save DSP state */
+ save_dsp(current);
+
+ return 0;
+}
+
+/**
+ * mips_cpu_restore() - Restore general CPU state.
+ * Restores important CPU context.
+ */
+static void mips_cpu_restore(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /* Restore ASID */
+ if (current->mm)
+ write_c0_entryhi(cpu_asid(cpu, current->mm));
+
+ /* Restore DSP state */
+ restore_dsp(current);
+
+ /* Restore UserLocal */
+ if (cpu_has_userlocal)
+ write_c0_userlocal(current_thread_info()->tp_value);
+
+ /* Restore watch registers */
+ __restore_watch(current);
+}
+
+/**
+ * mips_pm_notifier() - Notifier for preserving general CPU context.
+ * @self: Notifier block.
+ * @cmd: CPU PM event.
+ * @v: Private data (unused).
+ *
+ * This is called when a CPU power management event occurs, and is used to
+ * ensure that important CPU context is preserved across a CPU power down.
+ */
+static int mips_pm_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ int ret;
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ ret = mips_cpu_save();
+ if (ret)
+ return NOTIFY_STOP;
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ mips_cpu_restore();
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mips_pm_notifier_block = {
+ .notifier_call = mips_pm_notifier,
+};
+
+static int __init mips_pm_init(void)
+{
+ return cpu_pm_register_notifier(&mips_pm_notifier_block);
+}
+arch_initcall(mips_pm_init);
diff --git a/arch/mips/kernel/probes-common.h b/arch/mips/kernel/probes-common.h
new file mode 100644
index 000000000..73e1d5e95
--- /dev/null
+++ b/arch/mips/kernel/probes-common.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Marcin Nowakowski <marcin.nowakowski@mips.com>
+ */
+
+#ifndef __PROBES_COMMON_H
+#define __PROBES_COMMON_H
+
+#include <asm/inst.h>
+
+int __insn_is_compact_branch(union mips_instruction insn);
+
+static inline int __insn_has_delay_slot(const union mips_instruction insn)
+{
+ switch (insn.i_format.opcode) {
+ /*
+ * jr and jalr are in r_format format.
+ */
+ case spec_op:
+ switch (insn.r_format.func) {
+ case jalr_op:
+ case jr_op:
+ return 1;
+ }
+ break;
+
+ /*
+ * This group contains:
+ * bltz_op, bgez_op, bltzl_op, bgezl_op,
+ * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+ */
+ case bcond_op:
+ switch (insn.i_format.rt) {
+ case bltz_op:
+ case bltzl_op:
+ case bgez_op:
+ case bgezl_op:
+ case bltzal_op:
+ case bltzall_op:
+ case bgezal_op:
+ case bgezall_op:
+ case bposge32_op:
+ return 1;
+ }
+ break;
+
+ /*
+ * These are unconditional and in j_format.
+ */
+ case jal_op:
+ case j_op:
+ case beq_op:
+ case beql_op:
+ case bne_op:
+ case bnel_op:
+ case blez_op: /* not really i_format */
+ case blezl_op:
+ case bgtz_op:
+ case bgtzl_op:
+ return 1;
+
+ /*
+ * And now the FPA/cp1 branch instructions.
+ */
+ case cop1_op:
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ case ldc2_op: /* This is bbit032 on Octeon */
+ case swc2_op: /* This is bbit1 on Octeon */
+ case sdc2_op: /* This is bbit132 on Octeon */
+#endif
+ return 1;
+ }
+
+ return 0;
+}
+
+#endif /* __PROBES_COMMON_H */
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
new file mode 100644
index 000000000..33a02f381
--- /dev/null
+++ b/arch/mips/kernel/proc.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 1995, 1996, 2001 Ralf Baechle
+ * Copyright (C) 2001, 2004 MIPS Technologies, Inc.
+ * Copyright (C) 2004 Maciej W. Rozycki
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/idle.h>
+#include <asm/mipsregs.h>
+#include <asm/processor.h>
+#include <asm/prom.h>
+
+unsigned int vced_count, vcei_count;
+
+/*
+ * * No lock; only written during early bootup by CPU 0.
+ * */
+static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
+
+int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
+}
+
+int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
+{
+ return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
+}
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
+ unsigned long n = (unsigned long) v - 1;
+ unsigned int version = cpu_data[n].processor_id;
+ unsigned int fp_vers = cpu_data[n].fpu_id;
+ char fmt [64];
+ int i;
+
+#ifdef CONFIG_SMP
+ if (!cpu_online(n))
+ return 0;
+#endif
+
+ /*
+ * For the first processor also print the system type
+ */
+ if (n == 0) {
+ seq_printf(m, "system type\t\t: %s\n", get_system_type());
+ if (mips_get_machine_name())
+ seq_printf(m, "machine\t\t\t: %s\n",
+ mips_get_machine_name());
+ }
+
+ seq_printf(m, "processor\t\t: %ld\n", n);
+ sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
+ cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : "");
+ seq_printf(m, fmt, __cpu_name[n],
+ (version >> 4) & 0x0f, version & 0x0f,
+ (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
+ seq_printf(m, "BogoMIPS\t\t: %u.%02u\n",
+ cpu_data[n].udelay_val / (500000/HZ),
+ (cpu_data[n].udelay_val / (5000/HZ)) % 100);
+ seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no");
+ seq_printf(m, "microsecond timers\t: %s\n",
+ cpu_has_counter ? "yes" : "no");
+ seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize);
+ seq_printf(m, "extra interrupt vector\t: %s\n",
+ cpu_has_divec ? "yes" : "no");
+ seq_printf(m, "hardware watchpoint\t: %s",
+ cpu_has_watch ? "yes, " : "no\n");
+ if (cpu_has_watch) {
+ seq_printf(m, "count: %d, address/irw mask: [",
+ cpu_data[n].watch_reg_count);
+ for (i = 0; i < cpu_data[n].watch_reg_count; i++)
+ seq_printf(m, "%s0x%04x", i ? ", " : "" ,
+ cpu_data[n].watch_reg_masks[i]);
+ seq_printf(m, "]\n");
+ }
+
+ seq_printf(m, "isa\t\t\t:");
+ if (cpu_has_mips_1)
+ seq_printf(m, " mips1");
+ if (cpu_has_mips_2)
+ seq_printf(m, "%s", " mips2");
+ if (cpu_has_mips_3)
+ seq_printf(m, "%s", " mips3");
+ if (cpu_has_mips_4)
+ seq_printf(m, "%s", " mips4");
+ if (cpu_has_mips_5)
+ seq_printf(m, "%s", " mips5");
+ if (cpu_has_mips32r1)
+ seq_printf(m, "%s", " mips32r1");
+ if (cpu_has_mips32r2)
+ seq_printf(m, "%s", " mips32r2");
+ if (cpu_has_mips32r5)
+ seq_printf(m, "%s", " mips32r5");
+ if (cpu_has_mips32r6)
+ seq_printf(m, "%s", " mips32r6");
+ if (cpu_has_mips64r1)
+ seq_printf(m, "%s", " mips64r1");
+ if (cpu_has_mips64r2)
+ seq_printf(m, "%s", " mips64r2");
+ if (cpu_has_mips64r5)
+ seq_printf(m, "%s", " mips64r5");
+ if (cpu_has_mips64r6)
+ seq_printf(m, "%s", " mips64r6");
+ seq_printf(m, "\n");
+
+ seq_printf(m, "ASEs implemented\t:");
+ if (cpu_has_mips16) seq_printf(m, "%s", " mips16");
+ if (cpu_has_mips16e2) seq_printf(m, "%s", " mips16e2");
+ if (cpu_has_mdmx) seq_printf(m, "%s", " mdmx");
+ if (cpu_has_mips3d) seq_printf(m, "%s", " mips3d");
+ if (cpu_has_smartmips) seq_printf(m, "%s", " smartmips");
+ if (cpu_has_dsp) seq_printf(m, "%s", " dsp");
+ if (cpu_has_dsp2) seq_printf(m, "%s", " dsp2");
+ if (cpu_has_dsp3) seq_printf(m, "%s", " dsp3");
+ if (cpu_has_mipsmt) seq_printf(m, "%s", " mt");
+ if (cpu_has_mmips) seq_printf(m, "%s", " micromips");
+ if (cpu_has_vz) seq_printf(m, "%s", " vz");
+ if (cpu_has_msa) seq_printf(m, "%s", " msa");
+ if (cpu_has_eva) seq_printf(m, "%s", " eva");
+ if (cpu_has_htw) seq_printf(m, "%s", " htw");
+ if (cpu_has_xpa) seq_printf(m, "%s", " xpa");
+ if (cpu_has_loongson_mmi) seq_printf(m, "%s", " loongson-mmi");
+ if (cpu_has_loongson_cam) seq_printf(m, "%s", " loongson-cam");
+ if (cpu_has_loongson_ext) seq_printf(m, "%s", " loongson-ext");
+ if (cpu_has_loongson_ext2) seq_printf(m, "%s", " loongson-ext2");
+ seq_printf(m, "\n");
+
+ if (cpu_has_mmips) {
+ seq_printf(m, "micromips kernel\t: %s\n",
+ (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no");
+ }
+ seq_printf(m, "shadow register sets\t: %d\n",
+ cpu_data[n].srsets);
+ seq_printf(m, "kscratch registers\t: %d\n",
+ hweight8(cpu_data[n].kscratch_mask));
+ seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package);
+ seq_printf(m, "core\t\t\t: %d\n", cpu_core(&cpu_data[n]));
+
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
+ if (cpu_has_mipsmt)
+ seq_printf(m, "VPE\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n]));
+ else if (cpu_has_vp)
+ seq_printf(m, "VP\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n]));
+#endif
+
+ sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
+ cpu_has_vce ? "%u" : "not available");
+ seq_printf(m, fmt, 'D', vced_count);
+ seq_printf(m, fmt, 'I', vcei_count);
+
+ proc_cpuinfo_notifier_args.m = m;
+ proc_cpuinfo_notifier_args.n = n;
+
+ raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
+ &proc_cpuinfo_notifier_args);
+
+ seq_printf(m, "\n");
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ unsigned long i = *pos;
+
+ return i < nr_cpu_ids ? (void *) (i + 1) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
new file mode 100644
index 000000000..98ecaf6f3
--- /dev/null
+++ b/arch/mips/kernel/process.c
@@ -0,0 +1,896 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
+ * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2004 Thiemo Seufer
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/tick.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/export.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/personality.h>
+#include <linux/sys.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/kallsyms.h>
+#include <linux/random.h>
+#include <linux/prctl.h>
+#include <linux/nmi.h>
+#include <linux/cpu.h>
+
+#include <asm/abi.h>
+#include <asm/asm.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/dsemul.h>
+#include <asm/dsp.h>
+#include <asm/fpu.h>
+#include <asm/irq.h>
+#include <asm/mips-cps.h>
+#include <asm/msa.h>
+#include <asm/mipsregs.h>
+#include <asm/processor.h>
+#include <asm/reg.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+#include <asm/elf.h>
+#include <asm/isadep.h>
+#include <asm/inst.h>
+#include <asm/stacktrace.h>
+#include <asm/irq_regs.h>
+#include <asm/exec.h>
+
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
+{
+ play_dead();
+}
+#endif
+
+asmlinkage void ret_from_fork(void);
+asmlinkage void ret_from_kernel_thread(void);
+
+void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
+{
+ unsigned long status;
+
+ /* New thread loses kernel privileges. */
+ status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK);
+ status |= KU_USER;
+ regs->cp0_status = status;
+ lose_fpu(0);
+ clear_thread_flag(TIF_MSA_CTX_LIVE);
+ clear_used_math();
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+#endif
+ init_dsp();
+ regs->cp0_epc = pc;
+ regs->regs[29] = sp;
+}
+
+void exit_thread(struct task_struct *tsk)
+{
+ /*
+ * User threads may have allocated a delay slot emulation frame.
+ * If so, clean up that allocation.
+ */
+ if (!(current->flags & PF_KTHREAD))
+ dsemul_thread_cleanup(tsk);
+}
+
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+ /*
+ * Save any process state which is live in hardware registers to the
+ * parent context prior to duplication. This prevents the new child
+ * state becoming stale if the parent is preempted before copy_thread()
+ * gets a chance to save the parent's live hardware registers to the
+ * child context.
+ */
+ preempt_disable();
+
+ if (is_msa_enabled())
+ save_msa(current);
+ else if (is_fpu_owner())
+ _save_fp(current);
+
+ save_dsp(current);
+
+ preempt_enable();
+
+ *dst = *src;
+ return 0;
+}
+
+/*
+ * Copy architecture-specific thread state
+ */
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+ unsigned long kthread_arg, struct task_struct *p,
+ unsigned long tls)
+{
+ struct thread_info *ti = task_thread_info(p);
+ struct pt_regs *childregs, *regs = current_pt_regs();
+ unsigned long childksp;
+
+ childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
+
+ /* set up new TSS. */
+ childregs = (struct pt_regs *) childksp - 1;
+ /* Put the stack after the struct pt_regs. */
+ childksp = (unsigned long) childregs;
+ p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ /* kernel thread */
+ unsigned long status = p->thread.cp0_status;
+ memset(childregs, 0, sizeof(struct pt_regs));
+ ti->addr_limit = KERNEL_DS;
+ p->thread.reg16 = usp; /* fn */
+ p->thread.reg17 = kthread_arg;
+ p->thread.reg29 = childksp;
+ p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
+ ((status & (ST0_KUC | ST0_IEC)) << 2);
+#else
+ status |= ST0_EXL;
+#endif
+ childregs->cp0_status = status;
+ return 0;
+ }
+
+ /* user thread */
+ *childregs = *regs;
+ childregs->regs[7] = 0; /* Clear error flag */
+ childregs->regs[2] = 0; /* Child gets zero as return value */
+ if (usp)
+ childregs->regs[29] = usp;
+ ti->addr_limit = USER_DS;
+
+ p->thread.reg29 = (unsigned long) childregs;
+ p->thread.reg31 = (unsigned long) ret_from_fork;
+
+ /*
+ * New tasks lose permission to use the fpu. This accelerates context
+ * switching for most programs since they don't use the fpu.
+ */
+ childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
+
+ clear_tsk_thread_flag(p, TIF_USEDFPU);
+ clear_tsk_thread_flag(p, TIF_USEDMSA);
+ clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ clear_tsk_thread_flag(p, TIF_FPUBOUND);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+#endif
+
+ if (clone_flags & CLONE_SETTLS)
+ ti->tp_value = tls;
+
+ return 0;
+}
+
+#ifdef CONFIG_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
+struct mips_frame_info {
+ void *func;
+ unsigned long func_size;
+ int frame_size;
+ int pc_offset;
+};
+
+#define J_TARGET(pc,target) \
+ (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
+
+static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
+{
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * swsp ra,offset
+ * swm16 reglist,offset(sp)
+ * swm32 reglist,offset(sp)
+ * sw32 ra,offset(sp)
+ * jradiussp - NOT SUPPORTED
+ *
+ * microMIPS is way more fun...
+ */
+ if (mm_insn_16bit(ip->word >> 16)) {
+ switch (ip->mm16_r5_format.opcode) {
+ case mm_swsp16_op:
+ if (ip->mm16_r5_format.rt != 31)
+ return 0;
+
+ *poff = ip->mm16_r5_format.imm;
+ *poff = (*poff << 2) / sizeof(ulong);
+ return 1;
+
+ case mm_pool16c_op:
+ switch (ip->mm16_m_format.func) {
+ case mm_swm16_op:
+ *poff = ip->mm16_m_format.imm;
+ *poff += 1 + ip->mm16_m_format.rlist;
+ *poff = (*poff << 2) / sizeof(ulong);
+ return 1;
+
+ default:
+ return 0;
+ }
+
+ default:
+ return 0;
+ }
+ }
+
+ switch (ip->i_format.opcode) {
+ case mm_sw32_op:
+ if (ip->i_format.rs != 29)
+ return 0;
+ if (ip->i_format.rt != 31)
+ return 0;
+
+ *poff = ip->i_format.simmediate / sizeof(ulong);
+ return 1;
+
+ case mm_pool32b_op:
+ switch (ip->mm_m_format.func) {
+ case mm_swm32_func:
+ if (ip->mm_m_format.rd < 0x10)
+ return 0;
+ if (ip->mm_m_format.base != 29)
+ return 0;
+
+ *poff = ip->mm_m_format.simmediate;
+ *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
+ *poff /= sizeof(ulong);
+ return 1;
+ default:
+ return 0;
+ }
+
+ default:
+ return 0;
+ }
+#else
+ /* sw / sd $ra, offset($sp) */
+ if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
+ ip->i_format.rs == 29 && ip->i_format.rt == 31) {
+ *poff = ip->i_format.simmediate / sizeof(ulong);
+ return 1;
+ }
+#ifdef CONFIG_CPU_LOONGSON64
+ if ((ip->loongson3_lswc2_format.opcode == swc2_op) &&
+ (ip->loongson3_lswc2_format.ls == 1) &&
+ (ip->loongson3_lswc2_format.fr == 0) &&
+ (ip->loongson3_lswc2_format.base == 29)) {
+ if (ip->loongson3_lswc2_format.rt == 31) {
+ *poff = ip->loongson3_lswc2_format.offset << 1;
+ return 1;
+ }
+ if (ip->loongson3_lswc2_format.rq == 31) {
+ *poff = (ip->loongson3_lswc2_format.offset << 1) + 1;
+ return 1;
+ }
+ }
+#endif
+ return 0;
+#endif
+}
+
+static inline int is_jump_ins(union mips_instruction *ip)
+{
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * jr16,jrc,jalr16,jalr16
+ * jal
+ * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
+ * jraddiusp - NOT SUPPORTED
+ *
+ * microMIPS is kind of more fun...
+ */
+ if (mm_insn_16bit(ip->word >> 16)) {
+ if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
+ (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
+ return 1;
+ return 0;
+ }
+
+ if (ip->j_format.opcode == mm_j32_op)
+ return 1;
+ if (ip->j_format.opcode == mm_jal32_op)
+ return 1;
+ if (ip->r_format.opcode != mm_pool32a_op ||
+ ip->r_format.func != mm_pool32axf_op)
+ return 0;
+ return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
+#else
+ if (ip->j_format.opcode == j_op)
+ return 1;
+ if (ip->j_format.opcode == jal_op)
+ return 1;
+ if (ip->r_format.opcode != spec_op)
+ return 0;
+ return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
+#endif
+}
+
+static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
+{
+#ifdef CONFIG_CPU_MICROMIPS
+ unsigned short tmp;
+
+ /*
+ * addiusp -imm
+ * addius5 sp,-imm
+ * addiu32 sp,sp,-imm
+ * jradiussp - NOT SUPPORTED
+ *
+ * microMIPS is not more fun...
+ */
+ if (mm_insn_16bit(ip->word >> 16)) {
+ if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
+ ip->mm16_r3_format.simmediate & mm_addiusp_func) {
+ tmp = ip->mm_b0_format.simmediate >> 1;
+ tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
+ if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
+ tmp ^= 0x100;
+ *frame_size = -(signed short)(tmp << 2);
+ return 1;
+ }
+ if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
+ ip->mm16_r5_format.rt == 29) {
+ tmp = ip->mm16_r5_format.imm >> 1;
+ *frame_size = -(signed short)(tmp & 0xf);
+ return 1;
+ }
+ return 0;
+ }
+
+ if (ip->mm_i_format.opcode == mm_addiu32_op &&
+ ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
+ *frame_size = -ip->i_format.simmediate;
+ return 1;
+ }
+#else
+ /* addiu/daddiu sp,sp,-imm */
+ if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
+ return 0;
+
+ if (ip->i_format.opcode == addiu_op ||
+ ip->i_format.opcode == daddiu_op) {
+ *frame_size = -ip->i_format.simmediate;
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+static int get_frame_info(struct mips_frame_info *info)
+{
+ bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
+ union mips_instruction insn, *ip;
+ const unsigned int max_insns = 128;
+ unsigned int last_insn_size = 0;
+ unsigned int i;
+ bool saw_jump = false;
+
+ info->pc_offset = -1;
+ info->frame_size = 0;
+
+ ip = (void *)msk_isa16_mode((ulong)info->func);
+ if (!ip)
+ goto err;
+
+ for (i = 0; i < max_insns; i++) {
+ ip = (void *)ip + last_insn_size;
+
+ if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
+ insn.word = ip->halfword[0] << 16;
+ last_insn_size = 2;
+ } else if (is_mmips) {
+ insn.word = ip->halfword[0] << 16 | ip->halfword[1];
+ last_insn_size = 4;
+ } else {
+ insn.word = ip->word;
+ last_insn_size = 4;
+ }
+
+ if (!info->frame_size) {
+ is_sp_move_ins(&insn, &info->frame_size);
+ continue;
+ } else if (!saw_jump && is_jump_ins(ip)) {
+ /*
+ * If we see a jump instruction, we are finished
+ * with the frame save.
+ *
+ * Some functions can have a shortcut return at
+ * the beginning of the function, so don't start
+ * looking for jump instruction until we see the
+ * frame setup.
+ *
+ * The RA save instruction can get put into the
+ * delay slot of the jump instruction, so look
+ * at the next instruction, too.
+ */
+ saw_jump = true;
+ continue;
+ }
+ if (info->pc_offset == -1 &&
+ is_ra_save_ins(&insn, &info->pc_offset))
+ break;
+ if (saw_jump)
+ break;
+ }
+ if (info->frame_size && info->pc_offset >= 0) /* nested */
+ return 0;
+ if (info->pc_offset < 0) /* leaf */
+ return 1;
+ /* prologue seems bogus... */
+err:
+ return -1;
+}
+
+static struct mips_frame_info schedule_mfi __read_mostly;
+
+#ifdef CONFIG_KALLSYMS
+static unsigned long get___schedule_addr(void)
+{
+ return kallsyms_lookup_name("__schedule");
+}
+#else
+static unsigned long get___schedule_addr(void)
+{
+ union mips_instruction *ip = (void *)schedule;
+ int max_insns = 8;
+ int i;
+
+ for (i = 0; i < max_insns; i++, ip++) {
+ if (ip->j_format.opcode == j_op)
+ return J_TARGET(ip, ip->j_format.target);
+ }
+ return 0;
+}
+#endif
+
+static int __init frame_info_init(void)
+{
+ unsigned long size = 0;
+#ifdef CONFIG_KALLSYMS
+ unsigned long ofs;
+#endif
+ unsigned long addr;
+
+ addr = get___schedule_addr();
+ if (!addr)
+ addr = (unsigned long)schedule;
+
+#ifdef CONFIG_KALLSYMS
+ kallsyms_lookup_size_offset(addr, &size, &ofs);
+#endif
+ schedule_mfi.func = (void *)addr;
+ schedule_mfi.func_size = size;
+
+ get_frame_info(&schedule_mfi);
+
+ /*
+ * Without schedule() frame info, result given by
+ * thread_saved_pc() and get_wchan() are not reliable.
+ */
+ if (schedule_mfi.pc_offset < 0)
+ printk("Can't analyze schedule() prologue at %p\n", schedule);
+
+ return 0;
+}
+
+arch_initcall(frame_info_init);
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+static unsigned long thread_saved_pc(struct task_struct *tsk)
+{
+ struct thread_struct *t = &tsk->thread;
+
+ /* New born processes are a special case */
+ if (t->reg31 == (unsigned long) ret_from_fork)
+ return t->reg31;
+ if (schedule_mfi.pc_offset < 0)
+ return 0;
+ return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
+}
+
+
+#ifdef CONFIG_KALLSYMS
+/* generic stack unwinding function */
+unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
+ unsigned long *sp,
+ unsigned long pc,
+ unsigned long *ra)
+{
+ unsigned long low, high, irq_stack_high;
+ struct mips_frame_info info;
+ unsigned long size, ofs;
+ struct pt_regs *regs;
+ int leaf;
+
+ if (!stack_page)
+ return 0;
+
+ /*
+ * IRQ stacks start at IRQ_STACK_START
+ * task stacks at THREAD_SIZE - 32
+ */
+ low = stack_page;
+ if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
+ high = stack_page + IRQ_STACK_START;
+ irq_stack_high = high;
+ } else {
+ high = stack_page + THREAD_SIZE - 32;
+ irq_stack_high = 0;
+ }
+
+ /*
+ * If we reached the top of the interrupt stack, start unwinding
+ * the interrupted task stack.
+ */
+ if (unlikely(*sp == irq_stack_high)) {
+ unsigned long task_sp = *(unsigned long *)*sp;
+
+ /*
+ * Check that the pointer saved in the IRQ stack head points to
+ * something within the stack of the current task
+ */
+ if (!object_is_on_stack((void *)task_sp))
+ return 0;
+
+ /*
+ * Follow pointer to tasks kernel stack frame where interrupted
+ * state was saved.
+ */
+ regs = (struct pt_regs *)task_sp;
+ pc = regs->cp0_epc;
+ if (!user_mode(regs) && __kernel_text_address(pc)) {
+ *sp = regs->regs[29];
+ *ra = regs->regs[31];
+ return pc;
+ }
+ return 0;
+ }
+ if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
+ return 0;
+ /*
+ * Return ra if an exception occurred at the first instruction
+ */
+ if (unlikely(ofs == 0)) {
+ pc = *ra;
+ *ra = 0;
+ return pc;
+ }
+
+ info.func = (void *)(pc - ofs);
+ info.func_size = ofs; /* analyze from start to ofs */
+ leaf = get_frame_info(&info);
+ if (leaf < 0)
+ return 0;
+
+ if (*sp < low || *sp + info.frame_size > high)
+ return 0;
+
+ if (leaf)
+ /*
+ * For some extreme cases, get_frame_info() can
+ * consider wrongly a nested function as a leaf
+ * one. In that cases avoid to return always the
+ * same value.
+ */
+ pc = pc != *ra ? *ra : 0;
+ else
+ pc = ((unsigned long *)(*sp))[info.pc_offset];
+
+ *sp += info.frame_size;
+ *ra = 0;
+ return __kernel_text_address(pc) ? pc : 0;
+}
+EXPORT_SYMBOL(unwind_stack_by_address);
+
+/* used by show_backtrace() */
+unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
+ unsigned long pc, unsigned long *ra)
+{
+ unsigned long stack_page = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (on_irq_stack(cpu, *sp)) {
+ stack_page = (unsigned long)irq_stack[cpu];
+ break;
+ }
+ }
+
+ if (!stack_page)
+ stack_page = (unsigned long)task_stack_page(task);
+
+ return unwind_stack_by_address(stack_page, sp, pc, ra);
+}
+#endif
+
+/*
+ * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
+ */
+unsigned long get_wchan(struct task_struct *task)
+{
+ unsigned long pc = 0;
+#ifdef CONFIG_KALLSYMS
+ unsigned long sp;
+ unsigned long ra = 0;
+#endif
+
+ if (!task || task == current || task->state == TASK_RUNNING)
+ goto out;
+ if (!task_stack_page(task))
+ goto out;
+
+ pc = thread_saved_pc(task);
+
+#ifdef CONFIG_KALLSYMS
+ sp = task->thread.reg29 + schedule_mfi.frame_size;
+
+ while (in_sched_functions(pc))
+ pc = unwind_stack(task, &sp, pc, &ra);
+#endif
+
+out:
+ return pc;
+}
+
+unsigned long mips_stack_top(void)
+{
+ unsigned long top = TASK_SIZE & PAGE_MASK;
+
+ if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
+ /* One page for branch delay slot "emulation" */
+ top -= PAGE_SIZE;
+ }
+
+ /* Space for the VDSO, data page & GIC user page */
+ top -= PAGE_ALIGN(current->thread.abi->vdso->size);
+ top -= PAGE_SIZE;
+ top -= mips_gic_present() ? PAGE_SIZE : 0;
+
+ /* Space for cache colour alignment */
+ if (cpu_has_dc_aliases)
+ top -= shm_align_mask + 1;
+
+ /* Space to randomize the VDSO base */
+ if (current->flags & PF_RANDOMIZE)
+ top -= VDSO_RANDOMIZE_SIZE;
+
+ return top;
+}
+
+/*
+ * Don't forget that the stack pointer must be aligned on a 8 bytes
+ * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+ */
+unsigned long arch_align_stack(unsigned long sp)
+{
+ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ sp -= get_random_int() & ~PAGE_MASK;
+
+ return sp & ALMASK;
+}
+
+static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
+static struct cpumask backtrace_csd_busy;
+
+static void handle_backtrace(void *info)
+{
+ nmi_cpu_backtrace(get_irq_regs());
+ cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
+}
+
+static void raise_backtrace(cpumask_t *mask)
+{
+ call_single_data_t *csd;
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ /*
+ * If we previously sent an IPI to the target CPU & it hasn't
+ * cleared its bit in the busy cpumask then it didn't handle
+ * our previous IPI & it's not safe for us to reuse the
+ * call_single_data_t.
+ */
+ if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
+ pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
+ cpu);
+ continue;
+ }
+
+ csd = &per_cpu(backtrace_csd, cpu);
+ csd->func = handle_backtrace;
+ smp_call_function_single_async(cpu, csd);
+ }
+}
+
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+{
+ nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
+}
+
+int mips_get_process_fp_mode(struct task_struct *task)
+{
+ int value = 0;
+
+ if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
+ value |= PR_FP_MODE_FR;
+ if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
+ value |= PR_FP_MODE_FRE;
+
+ return value;
+}
+
+static long prepare_for_fp_mode_switch(void *unused)
+{
+ /*
+ * This is icky, but we use this to simply ensure that all CPUs have
+ * context switched, regardless of whether they were previously running
+ * kernel or user code. This ensures that no CPU that a mode-switching
+ * program may execute on keeps its FPU enabled (& in the old mode)
+ * throughout the mode switch.
+ */
+ return 0;
+}
+
+int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
+{
+ const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
+ struct task_struct *t;
+ struct cpumask process_cpus;
+ int cpu;
+
+ /* If nothing to change, return right away, successfully. */
+ if (value == mips_get_process_fp_mode(task))
+ return 0;
+
+ /* Only accept a mode change if 64-bit FP enabled for o32. */
+ if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
+ return -EOPNOTSUPP;
+
+ /* And only for o32 tasks. */
+ if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
+ return -EOPNOTSUPP;
+
+ /* Check the value is valid */
+ if (value & ~known_bits)
+ return -EOPNOTSUPP;
+
+ /* Setting FRE without FR is not supported. */
+ if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
+ return -EOPNOTSUPP;
+
+ /* Avoid inadvertently triggering emulation */
+ if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
+ !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
+ return -EOPNOTSUPP;
+ if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
+ return -EOPNOTSUPP;
+
+ /* FR = 0 not supported in MIPS R6 */
+ if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
+ return -EOPNOTSUPP;
+
+ /* Indicate the new FP mode in each thread */
+ for_each_thread(task, t) {
+ /* Update desired FP register width */
+ if (value & PR_FP_MODE_FR) {
+ clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
+ } else {
+ set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
+ clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
+ }
+
+ /* Update desired FP single layout */
+ if (value & PR_FP_MODE_FRE)
+ set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
+ else
+ clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
+ }
+
+ /*
+ * We need to ensure that all threads in the process have switched mode
+ * before returning, in order to allow userland to not worry about
+ * races. We can do this by forcing all CPUs that any thread in the
+ * process may be running on to schedule something else - in this case
+ * prepare_for_fp_mode_switch().
+ *
+ * We begin by generating a mask of all CPUs that any thread in the
+ * process may be running on.
+ */
+ cpumask_clear(&process_cpus);
+ for_each_thread(task, t)
+ cpumask_set_cpu(task_cpu(t), &process_cpus);
+
+ /*
+ * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
+ *
+ * The CPUs may have rescheduled already since we switched mode or
+ * generated the cpumask, but that doesn't matter. If the task in this
+ * process is scheduled out then our scheduling
+ * prepare_for_fp_mode_switch() will simply be redundant. If it's
+ * scheduled in then it will already have picked up the new FP mode
+ * whilst doing so.
+ */
+ get_online_cpus();
+ for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
+ work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
+ put_online_cpus();
+
+ return 0;
+}
+
+#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
+void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
+{
+ unsigned int i;
+
+ for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
+ /* k0/k1 are copied as zero. */
+ if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
+ uregs[i] = 0;
+ else
+ uregs[i] = regs->regs[i - MIPS32_EF_R0];
+ }
+
+ uregs[MIPS32_EF_LO] = regs->lo;
+ uregs[MIPS32_EF_HI] = regs->hi;
+ uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
+ uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
+ uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
+ uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
+}
+#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
+
+#ifdef CONFIG_64BIT
+void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
+{
+ unsigned int i;
+
+ for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
+ /* k0/k1 are copied as zero. */
+ if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
+ uregs[i] = 0;
+ else
+ uregs[i] = regs->regs[i - MIPS64_EF_R0];
+ }
+
+ uregs[MIPS64_EF_LO] = regs->lo;
+ uregs[MIPS64_EF_HI] = regs->hi;
+ uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
+ uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
+ uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
+ uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
+}
+#endif /* CONFIG_64BIT */
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
new file mode 100644
index 000000000..6abebd57b
--- /dev/null
+++ b/arch/mips/kernel/prom.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MIPS support for CONFIG_OF device tree support
+ *
+ * Copyright (C) 2010 Cisco Systems Inc. <dediao@cisco.com>
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/memblock.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+
+#include <asm/bootinfo.h>
+#include <asm/page.h>
+#include <asm/prom.h>
+
+static char mips_machine_name[64] = "Unknown";
+
+__init void mips_set_machine_name(const char *name)
+{
+ if (name == NULL)
+ return;
+
+ strlcpy(mips_machine_name, name, sizeof(mips_machine_name));
+ pr_info("MIPS: machine is %s\n", mips_get_machine_name());
+}
+
+char *mips_get_machine_name(void)
+{
+ return mips_machine_name;
+}
+
+#ifdef CONFIG_USE_OF
+
+void __init __dt_setup_arch(void *bph)
+{
+ if (!early_init_dt_scan(bph))
+ return;
+
+ mips_set_machine_name(of_flat_dt_get_machine_name());
+}
+
+int __init __dt_register_buses(const char *bus0, const char *bus1)
+{
+ static struct of_device_id of_ids[3];
+
+ if (!of_have_populated_dt())
+ panic("device tree not present");
+
+ strlcpy(of_ids[0].compatible, bus0, sizeof(of_ids[0].compatible));
+ if (bus1) {
+ strlcpy(of_ids[1].compatible, bus1,
+ sizeof(of_ids[1].compatible));
+ }
+
+ if (of_platform_populate(NULL, of_ids, NULL, NULL))
+ panic("failed to populate DT");
+
+ return 0;
+}
+
+#endif
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
new file mode 100644
index 000000000..db7c5be1d
--- /dev/null
+++ b/arch/mips/kernel/ptrace.c
@@ -0,0 +1,1382 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 Ross Biro
+ * Copyright (C) Linus Torvalds
+ * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999 MIPS Technologies, Inc.
+ * Copyright (C) 2000 Ulf Carlsson
+ *
+ * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
+ * binaries.
+ */
+#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/regset.h>
+#include <linux/smp.h>
+#include <linux/security.h>
+#include <linux/stddef.h>
+#include <linux/tracehook.h>
+#include <linux/audit.h>
+#include <linux/seccomp.h>
+#include <linux/ftrace.h>
+
+#include <asm/byteorder.h>
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/dsp.h>
+#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/syscall.h>
+#include <linux/uaccess.h>
+#include <asm/bootinfo.h>
+#include <asm/reg.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ /* Don't load the watchpoint registers for the ex-child. */
+ clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
+}
+
+/*
+ * Read a general register set. We always use the 64-bit format, even
+ * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
+ * Registers are sign extended to fill the available space.
+ */
+int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
+{
+ struct pt_regs *regs;
+ int i;
+
+ if (!access_ok(data, 38 * 8))
+ return -EIO;
+
+ regs = task_pt_regs(child);
+
+ for (i = 0; i < 32; i++)
+ __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
+ __put_user((long)regs->lo, (__s64 __user *)&data->lo);
+ __put_user((long)regs->hi, (__s64 __user *)&data->hi);
+ __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
+ __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
+ __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
+ __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
+
+ return 0;
+}
+
+/*
+ * Write a general register set. As for PTRACE_GETREGS, we always use
+ * the 64-bit format. On a 32-bit kernel only the lower order half
+ * (according to endianness) will be used.
+ */
+int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
+{
+ struct pt_regs *regs;
+ int i;
+
+ if (!access_ok(data, 38 * 8))
+ return -EIO;
+
+ regs = task_pt_regs(child);
+
+ for (i = 0; i < 32; i++)
+ __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
+ __get_user(regs->lo, (__s64 __user *)&data->lo);
+ __get_user(regs->hi, (__s64 __user *)&data->hi);
+ __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
+
+ /* badvaddr, status, and cause may not be written. */
+
+ /* System call number may have been changed */
+ mips_syscall_update_nr(child, regs);
+
+ return 0;
+}
+
+int ptrace_get_watch_regs(struct task_struct *child,
+ struct pt_watch_regs __user *addr)
+{
+ enum pt_watch_style style;
+ int i;
+
+ if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
+ return -EIO;
+ if (!access_ok(addr, sizeof(struct pt_watch_regs)))
+ return -EIO;
+
+#ifdef CONFIG_32BIT
+ style = pt_watch_style_mips32;
+#define WATCH_STYLE mips32
+#else
+ style = pt_watch_style_mips64;
+#define WATCH_STYLE mips64
+#endif
+
+ __put_user(style, &addr->style);
+ __put_user(boot_cpu_data.watch_reg_use_cnt,
+ &addr->WATCH_STYLE.num_valid);
+ for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
+ __put_user(child->thread.watch.mips3264.watchlo[i],
+ &addr->WATCH_STYLE.watchlo[i]);
+ __put_user(child->thread.watch.mips3264.watchhi[i] &
+ (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
+ &addr->WATCH_STYLE.watchhi[i]);
+ __put_user(boot_cpu_data.watch_reg_masks[i],
+ &addr->WATCH_STYLE.watch_masks[i]);
+ }
+ for (; i < 8; i++) {
+ __put_user(0, &addr->WATCH_STYLE.watchlo[i]);
+ __put_user(0, &addr->WATCH_STYLE.watchhi[i]);
+ __put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
+ }
+
+ return 0;
+}
+
+int ptrace_set_watch_regs(struct task_struct *child,
+ struct pt_watch_regs __user *addr)
+{
+ int i;
+ int watch_active = 0;
+ unsigned long lt[NUM_WATCH_REGS];
+ u16 ht[NUM_WATCH_REGS];
+
+ if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
+ return -EIO;
+ if (!access_ok(addr, sizeof(struct pt_watch_regs)))
+ return -EIO;
+ /* Check the values. */
+ for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
+ __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
+#ifdef CONFIG_32BIT
+ if (lt[i] & __UA_LIMIT)
+ return -EINVAL;
+#else
+ if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
+ if (lt[i] & 0xffffffff80000000UL)
+ return -EINVAL;
+ } else {
+ if (lt[i] & __UA_LIMIT)
+ return -EINVAL;
+ }
+#endif
+ __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
+ if (ht[i] & ~MIPS_WATCHHI_MASK)
+ return -EINVAL;
+ }
+ /* Install them. */
+ for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
+ if (lt[i] & MIPS_WATCHLO_IRW)
+ watch_active = 1;
+ child->thread.watch.mips3264.watchlo[i] = lt[i];
+ /* Set the G bit. */
+ child->thread.watch.mips3264.watchhi[i] = ht[i];
+ }
+
+ if (watch_active)
+ set_tsk_thread_flag(child, TIF_LOAD_WATCH);
+ else
+ clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
+
+ return 0;
+}
+
+/* regset get/set implementations */
+
+#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
+
+static int gpr32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ u32 uregs[ELF_NGREG] = {};
+
+ mips_dump_regs32(uregs, regs);
+ return membuf_write(&to, uregs, sizeof(uregs));
+}
+
+static int gpr32_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ u32 uregs[ELF_NGREG];
+ unsigned start, num_regs, i;
+ int err;
+
+ start = pos / sizeof(u32);
+ num_regs = count / sizeof(u32);
+
+ if (start + num_regs > ELF_NGREG)
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
+ sizeof(uregs));
+ if (err)
+ return err;
+
+ for (i = start; i < num_regs; i++) {
+ /*
+ * Cast all values to signed here so that if this is a 64-bit
+ * kernel, the supplied 32-bit values will be sign extended.
+ */
+ switch (i) {
+ case MIPS32_EF_R1 ... MIPS32_EF_R25:
+ /* k0/k1 are ignored. */
+ case MIPS32_EF_R28 ... MIPS32_EF_R31:
+ regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
+ break;
+ case MIPS32_EF_LO:
+ regs->lo = (s32)uregs[i];
+ break;
+ case MIPS32_EF_HI:
+ regs->hi = (s32)uregs[i];
+ break;
+ case MIPS32_EF_CP0_EPC:
+ regs->cp0_epc = (s32)uregs[i];
+ break;
+ }
+ }
+
+ /* System call number may have been changed */
+ mips_syscall_update_nr(target, regs);
+
+ return 0;
+}
+
+#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
+
+#ifdef CONFIG_64BIT
+
+static int gpr64_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ u64 uregs[ELF_NGREG] = {};
+
+ mips_dump_regs64(uregs, regs);
+ return membuf_write(&to, uregs, sizeof(uregs));
+}
+
+static int gpr64_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ u64 uregs[ELF_NGREG];
+ unsigned start, num_regs, i;
+ int err;
+
+ start = pos / sizeof(u64);
+ num_regs = count / sizeof(u64);
+
+ if (start + num_regs > ELF_NGREG)
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
+ sizeof(uregs));
+ if (err)
+ return err;
+
+ for (i = start; i < num_regs; i++) {
+ switch (i) {
+ case MIPS64_EF_R1 ... MIPS64_EF_R25:
+ /* k0/k1 are ignored. */
+ case MIPS64_EF_R28 ... MIPS64_EF_R31:
+ regs->regs[i - MIPS64_EF_R0] = uregs[i];
+ break;
+ case MIPS64_EF_LO:
+ regs->lo = uregs[i];
+ break;
+ case MIPS64_EF_HI:
+ regs->hi = uregs[i];
+ break;
+ case MIPS64_EF_CP0_EPC:
+ regs->cp0_epc = uregs[i];
+ break;
+ }
+ }
+
+ /* System call number may have been changed */
+ mips_syscall_update_nr(target, regs);
+
+ return 0;
+}
+
+#endif /* CONFIG_64BIT */
+
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+/*
+ * Poke at FCSR according to its mask. Set the Cause bits even
+ * if a corresponding Enable bit is set. This will be noticed at
+ * the time the thread is switched to and SIGFPE thrown accordingly.
+ */
+static void ptrace_setfcr31(struct task_struct *child, u32 value)
+{
+ u32 fcr31;
+ u32 mask;
+
+ fcr31 = child->thread.fpu.fcr31;
+ mask = boot_cpu_data.fpu_msk31;
+ child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
+}
+
+int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
+{
+ int i;
+
+ if (!access_ok(data, 33 * 8))
+ return -EIO;
+
+ if (tsk_used_math(child)) {
+ union fpureg *fregs = get_fpu_regs(child);
+ for (i = 0; i < 32; i++)
+ __put_user(get_fpr64(&fregs[i], 0),
+ i + (__u64 __user *)data);
+ } else {
+ for (i = 0; i < 32; i++)
+ __put_user((__u64) -1, i + (__u64 __user *) data);
+ }
+
+ __put_user(child->thread.fpu.fcr31, data + 64);
+ __put_user(boot_cpu_data.fpu_id, data + 65);
+
+ return 0;
+}
+
+int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
+{
+ union fpureg *fregs;
+ u64 fpr_val;
+ u32 value;
+ int i;
+
+ if (!access_ok(data, 33 * 8))
+ return -EIO;
+
+ init_fp_ctx(child);
+ fregs = get_fpu_regs(child);
+
+ for (i = 0; i < 32; i++) {
+ __get_user(fpr_val, i + (__u64 __user *)data);
+ set_fpr64(&fregs[i], 0, fpr_val);
+ }
+
+ __get_user(value, data + 64);
+ ptrace_setfcr31(child, value);
+
+ /* FIR may not be written. */
+
+ return 0;
+}
+
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer,
+ * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots
+ * correspond 1:1 to buffer slots. Only general registers are copied.
+ */
+static void fpr_get_fpa(struct task_struct *target,
+ struct membuf *to)
+{
+ membuf_write(to, &target->thread.fpu,
+ NUM_FPU_REGS * sizeof(elf_fpreg_t));
+}
+
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer,
+ * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's
+ * general register slots are copied to buffer slots. Only general
+ * registers are copied.
+ */
+static void fpr_get_msa(struct task_struct *target, struct membuf *to)
+{
+ unsigned int i;
+
+ BUILD_BUG_ON(sizeof(u64) != sizeof(elf_fpreg_t));
+ for (i = 0; i < NUM_FPU_REGS; i++)
+ membuf_store(to, get_fpr64(&target->thread.fpu.fpr[i], 0));
+}
+
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer.
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCSR and FIR registers separately.
+ */
+static int fpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+ fpr_get_fpa(target, &to);
+ else
+ fpr_get_msa(target, &to);
+
+ membuf_write(&to, &target->thread.fpu.fcr31, sizeof(u32));
+ membuf_write(&to, &boot_cpu_data.fpu_id, sizeof(u32));
+ return 0;
+}
+
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context,
+ * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP
+ * context's general register slots. Only general registers are copied.
+ */
+static int fpr_set_fpa(struct task_struct *target,
+ unsigned int *pos, unsigned int *count,
+ const void **kbuf, const void __user **ubuf)
+{
+ return user_regset_copyin(pos, count, kbuf, ubuf,
+ &target->thread.fpu,
+ 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
+}
+
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context,
+ * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64
+ * bits only of FP context's general register slots. Only general
+ * registers are copied.
+ */
+static int fpr_set_msa(struct task_struct *target,
+ unsigned int *pos, unsigned int *count,
+ const void **kbuf, const void __user **ubuf)
+{
+ unsigned int i;
+ u64 fpr_val;
+ int err;
+
+ BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+ for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
+ err = user_regset_copyin(pos, count, kbuf, ubuf,
+ &fpr_val, i * sizeof(elf_fpreg_t),
+ (i + 1) * sizeof(elf_fpreg_t));
+ if (err)
+ return err;
+ set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
+ }
+
+ return 0;
+}
+
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context.
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCSR register separately. Ignore the incoming FIR register
+ * contents though, as the register is read-only.
+ *
+ * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
+ * which is supposed to have been guaranteed by the kernel before
+ * calling us, e.g. in `ptrace_regset'. We enforce that requirement,
+ * so that we can safely avoid preinitializing temporaries for
+ * partial register writes.
+ */
+static int fpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
+ const int fir_pos = fcr31_pos + sizeof(u32);
+ u32 fcr31;
+ int err;
+
+ BUG_ON(count % sizeof(elf_fpreg_t));
+
+ if (pos + count > sizeof(elf_fpregset_t))
+ return -EIO;
+
+ init_fp_ctx(target);
+
+ if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+ err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
+ else
+ err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
+ if (err)
+ return err;
+
+ if (count > 0) {
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &fcr31,
+ fcr31_pos, fcr31_pos + sizeof(u32));
+ if (err)
+ return err;
+
+ ptrace_setfcr31(target, fcr31);
+ }
+
+ if (count > 0)
+ err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ fir_pos,
+ fir_pos + sizeof(u32));
+
+ return err;
+}
+
+/* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */
+static int fp_mode_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ return membuf_store(&to, (int)mips_get_process_fp_mode(target));
+}
+
+/*
+ * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
+ *
+ * We optimize for the case where `count % sizeof(int) == 0', which
+ * is supposed to have been guaranteed by the kernel before calling
+ * us, e.g. in `ptrace_regset'. We enforce that requirement, so
+ * that we can safely avoid preinitializing temporaries for partial
+ * mode writes.
+ */
+static int fp_mode_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int fp_mode;
+ int err;
+
+ BUG_ON(count % sizeof(int));
+
+ if (pos + count > sizeof(fp_mode))
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
+ sizeof(fp_mode));
+ if (err)
+ return err;
+
+ if (count > 0)
+ err = mips_set_process_fp_mode(target, fp_mode);
+
+ return err;
+}
+
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+struct msa_control_regs {
+ unsigned int fir;
+ unsigned int fcsr;
+ unsigned int msair;
+ unsigned int msacsr;
+};
+
+static void copy_pad_fprs(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf *to,
+ unsigned int live_sz)
+{
+ int i, j;
+ unsigned long long fill = ~0ull;
+ unsigned int cp_sz, pad_sz;
+
+ cp_sz = min(regset->size, live_sz);
+ pad_sz = regset->size - cp_sz;
+ WARN_ON(pad_sz % sizeof(fill));
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
+ for (j = 0; j < (pad_sz / sizeof(fill)); j++)
+ membuf_store(to, fill);
+ }
+}
+
+static int msa_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ const unsigned int wr_size = NUM_FPU_REGS * regset->size;
+ const struct msa_control_regs ctrl_regs = {
+ .fir = boot_cpu_data.fpu_id,
+ .fcsr = target->thread.fpu.fcr31,
+ .msair = boot_cpu_data.msa_id,
+ .msacsr = target->thread.fpu.msacsr,
+ };
+
+ if (!tsk_used_math(target)) {
+ /* The task hasn't used FP or MSA, fill with 0xff */
+ copy_pad_fprs(target, regset, &to, 0);
+ } else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) {
+ /* Copy scalar FP context, fill the rest with 0xff */
+ copy_pad_fprs(target, regset, &to, 8);
+ } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
+ /* Trivially copy the vector registers */
+ membuf_write(&to, &target->thread.fpu.fpr, wr_size);
+ } else {
+ /* Copy as much context as possible, fill the rest with 0xff */
+ copy_pad_fprs(target, regset, &to,
+ sizeof(target->thread.fpu.fpr[0]));
+ }
+
+ return membuf_write(&to, &ctrl_regs, sizeof(ctrl_regs));
+}
+
+static int msa_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ const unsigned int wr_size = NUM_FPU_REGS * regset->size;
+ struct msa_control_regs ctrl_regs;
+ unsigned int cp_sz;
+ int i, err, start;
+
+ init_fp_ctx(target);
+
+ if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
+ /* Trivially copy the vector registers */
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.fpr,
+ 0, wr_size);
+ } else {
+ /* Copy as much context as possible */
+ cp_sz = min_t(unsigned int, regset->size,
+ sizeof(target->thread.fpu.fpr[0]));
+
+ i = start = err = 0;
+ for (; i < NUM_FPU_REGS; i++, start += regset->size) {
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.fpr[i],
+ start, start + cp_sz);
+ }
+ }
+
+ if (!err)
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs,
+ wr_size, wr_size + sizeof(ctrl_regs));
+ if (!err) {
+ target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X;
+ target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF;
+ }
+
+ return err;
+}
+
+#endif /* CONFIG_CPU_HAS_MSA */
+
+#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
+
+/*
+ * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer.
+ */
+static int dsp32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ u32 dspregs[NUM_DSP_REGS + 1];
+ unsigned int i;
+
+ BUG_ON(to.left % sizeof(u32));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ for (i = 0; i < NUM_DSP_REGS; i++)
+ dspregs[i] = target->thread.dsp.dspr[i];
+ dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
+ return membuf_write(&to, dspregs, sizeof(dspregs));
+}
+
+/*
+ * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context.
+ */
+static int dsp32_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned int start, num_regs, i;
+ u32 dspregs[NUM_DSP_REGS + 1];
+ int err;
+
+ BUG_ON(count % sizeof(u32));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ start = pos / sizeof(u32);
+ num_regs = count / sizeof(u32);
+
+ if (start + num_regs > NUM_DSP_REGS + 1)
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+ sizeof(dspregs));
+ if (err)
+ return err;
+
+ for (i = start; i < num_regs; i++)
+ switch (i) {
+ case 0 ... NUM_DSP_REGS - 1:
+ target->thread.dsp.dspr[i] = (s32)dspregs[i];
+ break;
+ case NUM_DSP_REGS:
+ target->thread.dsp.dspcontrol = (s32)dspregs[i];
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer.
+ */
+static int dsp64_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ u64 dspregs[NUM_DSP_REGS + 1];
+ unsigned int i;
+
+ BUG_ON(to.left % sizeof(u64));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ for (i = 0; i < NUM_DSP_REGS; i++)
+ dspregs[i] = target->thread.dsp.dspr[i];
+ dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
+ return membuf_write(&to, dspregs, sizeof(dspregs));
+}
+
+/*
+ * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context.
+ */
+static int dsp64_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned int start, num_regs, i;
+ u64 dspregs[NUM_DSP_REGS + 1];
+ int err;
+
+ BUG_ON(count % sizeof(u64));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ start = pos / sizeof(u64);
+ num_regs = count / sizeof(u64);
+
+ if (start + num_regs > NUM_DSP_REGS + 1)
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+ sizeof(dspregs));
+ if (err)
+ return err;
+
+ for (i = start; i < num_regs; i++)
+ switch (i) {
+ case 0 ... NUM_DSP_REGS - 1:
+ target->thread.dsp.dspr[i] = dspregs[i];
+ break;
+ case NUM_DSP_REGS:
+ target->thread.dsp.dspcontrol = dspregs[i];
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_64BIT */
+
+/*
+ * Determine whether the DSP context is present.
+ */
+static int dsp_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
+}
+
+enum mips_regset {
+ REGSET_GPR,
+ REGSET_DSP,
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ REGSET_FPR,
+ REGSET_FP_MODE,
+#endif
+#ifdef CONFIG_CPU_HAS_MSA
+ REGSET_MSA,
+#endif
+};
+
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(reg, r) { \
+ .name = #reg, \
+ .offset = offsetof(struct pt_regs, r) \
+}
+
+#define REG_OFFSET_END { \
+ .name = NULL, \
+ .offset = 0 \
+}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME(r0, regs[0]),
+ REG_OFFSET_NAME(r1, regs[1]),
+ REG_OFFSET_NAME(r2, regs[2]),
+ REG_OFFSET_NAME(r3, regs[3]),
+ REG_OFFSET_NAME(r4, regs[4]),
+ REG_OFFSET_NAME(r5, regs[5]),
+ REG_OFFSET_NAME(r6, regs[6]),
+ REG_OFFSET_NAME(r7, regs[7]),
+ REG_OFFSET_NAME(r8, regs[8]),
+ REG_OFFSET_NAME(r9, regs[9]),
+ REG_OFFSET_NAME(r10, regs[10]),
+ REG_OFFSET_NAME(r11, regs[11]),
+ REG_OFFSET_NAME(r12, regs[12]),
+ REG_OFFSET_NAME(r13, regs[13]),
+ REG_OFFSET_NAME(r14, regs[14]),
+ REG_OFFSET_NAME(r15, regs[15]),
+ REG_OFFSET_NAME(r16, regs[16]),
+ REG_OFFSET_NAME(r17, regs[17]),
+ REG_OFFSET_NAME(r18, regs[18]),
+ REG_OFFSET_NAME(r19, regs[19]),
+ REG_OFFSET_NAME(r20, regs[20]),
+ REG_OFFSET_NAME(r21, regs[21]),
+ REG_OFFSET_NAME(r22, regs[22]),
+ REG_OFFSET_NAME(r23, regs[23]),
+ REG_OFFSET_NAME(r24, regs[24]),
+ REG_OFFSET_NAME(r25, regs[25]),
+ REG_OFFSET_NAME(r26, regs[26]),
+ REG_OFFSET_NAME(r27, regs[27]),
+ REG_OFFSET_NAME(r28, regs[28]),
+ REG_OFFSET_NAME(r29, regs[29]),
+ REG_OFFSET_NAME(r30, regs[30]),
+ REG_OFFSET_NAME(r31, regs[31]),
+ REG_OFFSET_NAME(c0_status, cp0_status),
+ REG_OFFSET_NAME(hi, hi),
+ REG_OFFSET_NAME(lo, lo),
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ REG_OFFSET_NAME(acx, acx),
+#endif
+ REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
+ REG_OFFSET_NAME(c0_cause, cp0_cause),
+ REG_OFFSET_NAME(c0_epc, cp0_epc),
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ REG_OFFSET_NAME(mpl0, mpl[0]),
+ REG_OFFSET_NAME(mpl1, mpl[1]),
+ REG_OFFSET_NAME(mpl2, mpl[2]),
+ REG_OFFSET_NAME(mtp0, mtp[0]),
+ REG_OFFSET_NAME(mtp1, mtp[1]),
+ REG_OFFSET_NAME(mtp2, mtp[2]),
+#endif
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
+
+static const struct user_regset mips_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned int),
+ .align = sizeof(unsigned int),
+ .regset_get = gpr32_get,
+ .set = gpr32_set,
+ },
+ [REGSET_DSP] = {
+ .core_note_type = NT_MIPS_DSP,
+ .n = NUM_DSP_REGS + 1,
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .regset_get = dsp32_get,
+ .set = dsp32_set,
+ .active = dsp_active,
+ },
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .regset_get = fpr_get,
+ .set = fpr_set,
+ },
+ [REGSET_FP_MODE] = {
+ .core_note_type = NT_MIPS_FP_MODE,
+ .n = 1,
+ .size = sizeof(int),
+ .align = sizeof(int),
+ .regset_get = fp_mode_get,
+ .set = fp_mode_set,
+ },
+#endif
+#ifdef CONFIG_CPU_HAS_MSA
+ [REGSET_MSA] = {
+ .core_note_type = NT_MIPS_MSA,
+ .n = NUM_FPU_REGS + 1,
+ .size = 16,
+ .align = 16,
+ .regset_get = msa_get,
+ .set = msa_set,
+ },
+#endif
+};
+
+static const struct user_regset_view user_mips_view = {
+ .name = "mips",
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = mips_regsets,
+ .n = ARRAY_SIZE(mips_regsets),
+};
+
+#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
+
+#ifdef CONFIG_64BIT
+
+static const struct user_regset mips64_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned long),
+ .align = sizeof(unsigned long),
+ .regset_get = gpr64_get,
+ .set = gpr64_set,
+ },
+ [REGSET_DSP] = {
+ .core_note_type = NT_MIPS_DSP,
+ .n = NUM_DSP_REGS + 1,
+ .size = sizeof(u64),
+ .align = sizeof(u64),
+ .regset_get = dsp64_get,
+ .set = dsp64_set,
+ .active = dsp_active,
+ },
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ [REGSET_FP_MODE] = {
+ .core_note_type = NT_MIPS_FP_MODE,
+ .n = 1,
+ .size = sizeof(int),
+ .align = sizeof(int),
+ .regset_get = fp_mode_get,
+ .set = fp_mode_set,
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .regset_get = fpr_get,
+ .set = fpr_set,
+ },
+#endif
+#ifdef CONFIG_CPU_HAS_MSA
+ [REGSET_MSA] = {
+ .core_note_type = NT_MIPS_MSA,
+ .n = NUM_FPU_REGS + 1,
+ .size = 16,
+ .align = 16,
+ .regset_get = msa_get,
+ .set = msa_set,
+ },
+#endif
+};
+
+static const struct user_regset_view user_mips64_view = {
+ .name = "mips64",
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = mips64_regsets,
+ .n = ARRAY_SIZE(mips64_regsets),
+};
+
+#ifdef CONFIG_MIPS32_N32
+
+static const struct user_regset_view user_mipsn32_view = {
+ .name = "mipsn32",
+ .e_flags = EF_MIPS_ABI2,
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = mips64_regsets,
+ .n = ARRAY_SIZE(mips64_regsets),
+};
+
+#endif /* CONFIG_MIPS32_N32 */
+
+#endif /* CONFIG_64BIT */
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+#ifdef CONFIG_32BIT
+ return &user_mips_view;
+#else
+#ifdef CONFIG_MIPS32_O32
+ if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
+ return &user_mips_view;
+#endif
+#ifdef CONFIG_MIPS32_N32
+ if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
+ return &user_mipsn32_view;
+#endif
+ return &user_mips64_view;
+#endif
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret;
+ void __user *addrp = (void __user *) addr;
+ void __user *datavp = (void __user *) data;
+ unsigned long __user *datalp = (void __user *) data;
+
+ switch (request) {
+ /* when I and D space are separate, these will need to be fixed. */
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
+ break;
+
+ /* Read the word at location addr in the USER area. */
+ case PTRACE_PEEKUSR: {
+ struct pt_regs *regs;
+ unsigned long tmp = 0;
+
+ regs = task_pt_regs(child);
+ ret = 0; /* Default return value. */
+
+ switch (addr) {
+ case 0 ... 31:
+ tmp = regs->regs[addr];
+ break;
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case FPR_BASE ... FPR_BASE + 31: {
+ union fpureg *fregs;
+
+ if (!tsk_used_math(child)) {
+ /* FP not yet used */
+ tmp = -1;
+ break;
+ }
+ fregs = get_fpu_regs(child);
+
+#ifdef CONFIG_32BIT
+ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
+ /*
+ * The odd registers are actually the high
+ * order bits of the values stored in the even
+ * registers.
+ */
+ tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1);
+ break;
+ }
+#endif
+ tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
+ break;
+ }
+ case FPC_CSR:
+ tmp = child->thread.fpu.fcr31;
+ break;
+ case FPC_EIR:
+ /* implementation / version register */
+ tmp = boot_cpu_data.fpu_id;
+ break;
+#endif
+ case PC:
+ tmp = regs->cp0_epc;
+ break;
+ case CAUSE:
+ tmp = regs->cp0_cause;
+ break;
+ case BADVADDR:
+ tmp = regs->cp0_badvaddr;
+ break;
+ case MMHI:
+ tmp = regs->hi;
+ break;
+ case MMLO:
+ tmp = regs->lo;
+ break;
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ case ACX:
+ tmp = regs->acx;
+ break;
+#endif
+ case DSP_BASE ... DSP_BASE + 5: {
+ dspreg_t *dregs;
+
+ if (!cpu_has_dsp) {
+ tmp = 0;
+ ret = -EIO;
+ goto out;
+ }
+ dregs = __get_dsp_regs(child);
+ tmp = dregs[addr - DSP_BASE];
+ break;
+ }
+ case DSP_CONTROL:
+ if (!cpu_has_dsp) {
+ tmp = 0;
+ ret = -EIO;
+ goto out;
+ }
+ tmp = child->thread.dsp.dspcontrol;
+ break;
+ default:
+ tmp = 0;
+ ret = -EIO;
+ goto out;
+ }
+ ret = put_user(tmp, datalp);
+ break;
+ }
+
+ /* when I and D space are separate, this will have to be fixed. */
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ ret = generic_ptrace_pokedata(child, addr, data);
+ break;
+
+ case PTRACE_POKEUSR: {
+ struct pt_regs *regs;
+ ret = 0;
+ regs = task_pt_regs(child);
+
+ switch (addr) {
+ case 0 ... 31:
+ regs->regs[addr] = data;
+ /* System call number may have been changed */
+ if (addr == 2)
+ mips_syscall_update_nr(child, regs);
+ else if (addr == 4 &&
+ mips_syscall_is_indirect(child, regs))
+ mips_syscall_update_nr(child, regs);
+ break;
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case FPR_BASE ... FPR_BASE + 31: {
+ union fpureg *fregs = get_fpu_regs(child);
+
+ init_fp_ctx(child);
+#ifdef CONFIG_32BIT
+ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
+ /*
+ * The odd registers are actually the high
+ * order bits of the values stored in the even
+ * registers.
+ */
+ set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1, data);
+ break;
+ }
+#endif
+ set_fpr64(&fregs[addr - FPR_BASE], 0, data);
+ break;
+ }
+ case FPC_CSR:
+ init_fp_ctx(child);
+ ptrace_setfcr31(child, data);
+ break;
+#endif
+ case PC:
+ regs->cp0_epc = data;
+ break;
+ case MMHI:
+ regs->hi = data;
+ break;
+ case MMLO:
+ regs->lo = data;
+ break;
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ case ACX:
+ regs->acx = data;
+ break;
+#endif
+ case DSP_BASE ... DSP_BASE + 5: {
+ dspreg_t *dregs;
+
+ if (!cpu_has_dsp) {
+ ret = -EIO;
+ break;
+ }
+
+ dregs = __get_dsp_regs(child);
+ dregs[addr - DSP_BASE] = data;
+ break;
+ }
+ case DSP_CONTROL:
+ if (!cpu_has_dsp) {
+ ret = -EIO;
+ break;
+ }
+ child->thread.dsp.dspcontrol = data;
+ break;
+ default:
+ /* The rest are not allowed. */
+ ret = -EIO;
+ break;
+ }
+ break;
+ }
+
+ case PTRACE_GETREGS:
+ ret = ptrace_getregs(child, datavp);
+ break;
+
+ case PTRACE_SETREGS:
+ ret = ptrace_setregs(child, datavp);
+ break;
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case PTRACE_GETFPREGS:
+ ret = ptrace_getfpregs(child, datavp);
+ break;
+
+ case PTRACE_SETFPREGS:
+ ret = ptrace_setfpregs(child, datavp);
+ break;
+#endif
+ case PTRACE_GET_THREAD_AREA:
+ ret = put_user(task_thread_info(child)->tp_value, datalp);
+ break;
+
+ case PTRACE_GET_WATCH_REGS:
+ ret = ptrace_get_watch_regs(child, addrp);
+ break;
+
+ case PTRACE_SET_WATCH_REGS:
+ ret = ptrace_set_watch_regs(child, addrp);
+ break;
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+ out:
+ return ret;
+}
+
+/*
+ * Notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+ */
+asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
+{
+ user_exit();
+
+ current_thread_info()->syscall = syscall;
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+ if (tracehook_report_syscall_entry(regs))
+ return -1;
+ syscall = current_thread_info()->syscall;
+ }
+
+#ifdef CONFIG_SECCOMP
+ if (unlikely(test_thread_flag(TIF_SECCOMP))) {
+ int ret, i;
+ struct seccomp_data sd;
+ unsigned long args[6];
+
+ sd.nr = syscall;
+ sd.arch = syscall_get_arch(current);
+ syscall_get_arguments(current, regs, args);
+ for (i = 0; i < 6; i++)
+ sd.args[i] = args[i];
+ sd.instruction_pointer = KSTK_EIP(current);
+
+ ret = __secure_computing(&sd);
+ if (ret == -1)
+ return ret;
+ syscall = current_thread_info()->syscall;
+ }
+#endif
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->regs[2]);
+
+ audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
+ regs->regs[6], regs->regs[7]);
+
+ /*
+ * Negative syscall numbers are mistaken for rejected syscalls, but
+ * won't have had the return value set appropriately, so we do so now.
+ */
+ if (syscall < 0)
+ syscall_set_return_value(current, regs, -ENOSYS, 0);
+ return syscall;
+}
+
+/*
+ * Notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+ */
+asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+{
+ /*
+ * We may come here right after calling schedule_user()
+ * or do_notify_resume(), in which case we can be in RCU
+ * user mode.
+ */
+ user_exit();
+
+ audit_syscall_exit(regs);
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs_return_value(regs));
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, 0);
+
+ user_enter();
+}
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
new file mode 100644
index 000000000..afcf27a87
--- /dev/null
+++ b/arch/mips/kernel/ptrace32.c
@@ -0,0 +1,317 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 Ross Biro
+ * Copyright (C) Linus Torvalds
+ * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999 MIPS Technologies, Inc.
+ * Copyright (C) 2000 Ulf Carlsson
+ *
+ * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
+ * binaries.
+ */
+#include <linux/compiler.h>
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/smp.h>
+#include <linux/security.h>
+
+#include <asm/cpu.h>
+#include <asm/dsp.h>
+#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/page.h>
+#include <asm/reg.h>
+#include <asm/syscall.h>
+#include <linux/uaccess.h>
+#include <asm/bootinfo.h>
+
+/*
+ * Tracing a 32-bit process with a 64-bit strace and vice versa will not
+ * work. I don't know how to fix this.
+ */
+long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ compat_ulong_t caddr, compat_ulong_t cdata)
+{
+ int addr = caddr;
+ int data = cdata;
+ int ret;
+
+ switch (request) {
+
+ /*
+ * Read 4 bytes of the other process' storage
+ * data is a pointer specifying where the user wants the
+ * 4 bytes copied into
+ * addr is a pointer in the user's storage that contains an 8 byte
+ * address in the other process of the 4 bytes that is to be read
+ * (this is run in a 32-bit process looking at a 64-bit process)
+ * when I and D space are separate, these will need to be fixed.
+ */
+ case PTRACE_PEEKTEXT_3264:
+ case PTRACE_PEEKDATA_3264: {
+ u32 tmp;
+ int copied;
+ u32 __user * addrOthers;
+
+ ret = -EIO;
+
+ /* Get the addr in the other process that we want to read */
+ if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0)
+ break;
+
+ copied = ptrace_access_vm(child, (u64)addrOthers, &tmp,
+ sizeof(tmp), FOLL_FORCE);
+ if (copied != sizeof(tmp))
+ break;
+ ret = put_user(tmp, (u32 __user *) (unsigned long) data);
+ break;
+ }
+
+ /* Read the word at location addr in the USER area. */
+ case PTRACE_PEEKUSR: {
+ struct pt_regs *regs;
+ unsigned int tmp;
+
+ regs = task_pt_regs(child);
+ ret = 0; /* Default return value. */
+
+ switch (addr) {
+ case 0 ... 31:
+ tmp = regs->regs[addr];
+ break;
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case FPR_BASE ... FPR_BASE + 31: {
+ union fpureg *fregs;
+
+ if (!tsk_used_math(child)) {
+ /* FP not yet used */
+ tmp = -1;
+ break;
+ }
+ fregs = get_fpu_regs(child);
+ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
+ /*
+ * The odd registers are actually the high
+ * order bits of the values stored in the even
+ * registers.
+ */
+ tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1);
+ break;
+ }
+ tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
+ break;
+ }
+ case FPC_CSR:
+ tmp = child->thread.fpu.fcr31;
+ break;
+ case FPC_EIR:
+ /* implementation / version register */
+ tmp = boot_cpu_data.fpu_id;
+ break;
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+ case PC:
+ tmp = regs->cp0_epc;
+ break;
+ case CAUSE:
+ tmp = regs->cp0_cause;
+ break;
+ case BADVADDR:
+ tmp = regs->cp0_badvaddr;
+ break;
+ case MMHI:
+ tmp = regs->hi;
+ break;
+ case MMLO:
+ tmp = regs->lo;
+ break;
+ case DSP_BASE ... DSP_BASE + 5: {
+ dspreg_t *dregs;
+
+ if (!cpu_has_dsp) {
+ tmp = 0;
+ ret = -EIO;
+ goto out;
+ }
+ dregs = __get_dsp_regs(child);
+ tmp = dregs[addr - DSP_BASE];
+ break;
+ }
+ case DSP_CONTROL:
+ if (!cpu_has_dsp) {
+ tmp = 0;
+ ret = -EIO;
+ goto out;
+ }
+ tmp = child->thread.dsp.dspcontrol;
+ break;
+ default:
+ tmp = 0;
+ ret = -EIO;
+ goto out;
+ }
+ ret = put_user(tmp, (unsigned __user *) (unsigned long) data);
+ break;
+ }
+
+ /*
+ * Write 4 bytes into the other process' storage
+ * data is the 4 bytes that the user wants written
+ * addr is a pointer in the user's storage that contains an
+ * 8 byte address in the other process where the 4 bytes
+ * that is to be written
+ * (this is run in a 32-bit process looking at a 64-bit process)
+ * when I and D space are separate, these will need to be fixed.
+ */
+ case PTRACE_POKETEXT_3264:
+ case PTRACE_POKEDATA_3264: {
+ u32 __user * addrOthers;
+
+ /* Get the addr in the other process that we want to write into */
+ ret = -EIO;
+ if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0)
+ break;
+ ret = 0;
+ if (ptrace_access_vm(child, (u64)addrOthers, &data,
+ sizeof(data),
+ FOLL_FORCE | FOLL_WRITE) == sizeof(data))
+ break;
+ ret = -EIO;
+ break;
+ }
+
+ case PTRACE_POKEUSR: {
+ struct pt_regs *regs;
+ ret = 0;
+ regs = task_pt_regs(child);
+
+ switch (addr) {
+ case 0 ... 31:
+ regs->regs[addr] = data;
+ /* System call number may have been changed */
+ if (addr == 2)
+ mips_syscall_update_nr(child, regs);
+ else if (addr == 4 &&
+ mips_syscall_is_indirect(child, regs))
+ mips_syscall_update_nr(child, regs);
+ break;
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case FPR_BASE ... FPR_BASE + 31: {
+ union fpureg *fregs = get_fpu_regs(child);
+
+ if (!tsk_used_math(child)) {
+ /* FP not yet used */
+ memset(&child->thread.fpu, ~0,
+ sizeof(child->thread.fpu));
+ child->thread.fpu.fcr31 = 0;
+ }
+ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
+ /*
+ * The odd registers are actually the high
+ * order bits of the values stored in the even
+ * registers.
+ */
+ set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1, data);
+ break;
+ }
+ set_fpr64(&fregs[addr - FPR_BASE], 0, data);
+ break;
+ }
+ case FPC_CSR:
+ child->thread.fpu.fcr31 = data;
+ break;
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+ case PC:
+ regs->cp0_epc = data;
+ break;
+ case MMHI:
+ regs->hi = data;
+ break;
+ case MMLO:
+ regs->lo = data;
+ break;
+ case DSP_BASE ... DSP_BASE + 5: {
+ dspreg_t *dregs;
+
+ if (!cpu_has_dsp) {
+ ret = -EIO;
+ break;
+ }
+
+ dregs = __get_dsp_regs(child);
+ dregs[addr - DSP_BASE] = data;
+ break;
+ }
+ case DSP_CONTROL:
+ if (!cpu_has_dsp) {
+ ret = -EIO;
+ break;
+ }
+ child->thread.dsp.dspcontrol = data;
+ break;
+ default:
+ /* The rest are not allowed. */
+ ret = -EIO;
+ break;
+ }
+ break;
+ }
+
+ case PTRACE_GETREGS:
+ ret = ptrace_getregs(child,
+ (struct user_pt_regs __user *) (__u64) data);
+ break;
+
+ case PTRACE_SETREGS:
+ ret = ptrace_setregs(child,
+ (struct user_pt_regs __user *) (__u64) data);
+ break;
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case PTRACE_GETFPREGS:
+ ret = ptrace_getfpregs(child, (__u32 __user *) (__u64) data);
+ break;
+
+ case PTRACE_SETFPREGS:
+ ret = ptrace_setfpregs(child, (__u32 __user *) (__u64) data);
+ break;
+#endif
+ case PTRACE_GET_THREAD_AREA:
+ ret = put_user(task_thread_info(child)->tp_value,
+ (unsigned int __user *) (unsigned long) data);
+ break;
+
+ case PTRACE_GET_THREAD_AREA_3264:
+ ret = put_user(task_thread_info(child)->tp_value,
+ (unsigned long __user *) (unsigned long) data);
+ break;
+
+ case PTRACE_GET_WATCH_REGS:
+ ret = ptrace_get_watch_regs(child,
+ (struct pt_watch_regs __user *) (unsigned long) addr);
+ break;
+
+ case PTRACE_SET_WATCH_REGS:
+ ret = ptrace_set_watch_regs(child,
+ (struct pt_watch_regs __user *) (unsigned long) addr);
+ break;
+
+ default:
+ ret = compat_ptrace_request(child, request, addr, data);
+ break;
+ }
+out:
+ return ret;
+}
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
new file mode 100644
index 000000000..cbf6db98c
--- /dev/null
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -0,0 +1,130 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1998 by Ralf Baechle
+ *
+ * Multi-arch abstraction and asm macros for easier reading:
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ *
+ * Further modifications to make this work:
+ * Copyright (c) 1998 Harald Koerfgen
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/errno.h>
+#include <asm/export.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+
+#define EX(a,b) \
+9: a,##b; \
+ .section __ex_table,"a"; \
+ PTR 9b,fault; \
+ .previous
+
+#define EX2(a,b) \
+9: a,##b; \
+ .section __ex_table,"a"; \
+ PTR 9b,fault; \
+ PTR 9b+4,fault; \
+ .previous
+
+ .set mips1
+
+/*
+ * Save a thread's fp context.
+ */
+LEAF(_save_fp)
+EXPORT_SYMBOL(_save_fp)
+ fpu_save_single a0, t1 # clobbers t1
+ jr ra
+ END(_save_fp)
+
+/*
+ * Restore a thread's fp context.
+ */
+LEAF(_restore_fp)
+ fpu_restore_single a0, t1 # clobbers t1
+ jr ra
+ END(_restore_fp)
+
+ .set noreorder
+
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
+LEAF(_save_fp_context)
+ .set push
+ SET_HARDFLOAT
+ li v0, 0 # assume success
+ cfc1 t1, fcr31
+ EX2(s.d $f0, 0(a0))
+ EX2(s.d $f2, 16(a0))
+ EX2(s.d $f4, 32(a0))
+ EX2(s.d $f6, 48(a0))
+ EX2(s.d $f8, 64(a0))
+ EX2(s.d $f10, 80(a0))
+ EX2(s.d $f12, 96(a0))
+ EX2(s.d $f14, 112(a0))
+ EX2(s.d $f16, 128(a0))
+ EX2(s.d $f18, 144(a0))
+ EX2(s.d $f20, 160(a0))
+ EX2(s.d $f22, 176(a0))
+ EX2(s.d $f24, 192(a0))
+ EX2(s.d $f26, 208(a0))
+ EX2(s.d $f28, 224(a0))
+ EX2(s.d $f30, 240(a0))
+ jr ra
+ EX(sw t1, (a1))
+ .set pop
+ END(_save_fp_context)
+
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
+ */
+LEAF(_restore_fp_context)
+ .set push
+ SET_HARDFLOAT
+ li v0, 0 # assume success
+ EX(lw t0, (a1))
+ EX2(l.d $f0, 0(a0))
+ EX2(l.d $f2, 16(a0))
+ EX2(l.d $f4, 32(a0))
+ EX2(l.d $f6, 48(a0))
+ EX2(l.d $f8, 64(a0))
+ EX2(l.d $f10, 80(a0))
+ EX2(l.d $f12, 96(a0))
+ EX2(l.d $f14, 112(a0))
+ EX2(l.d $f16, 128(a0))
+ EX2(l.d $f18, 144(a0))
+ EX2(l.d $f20, 160(a0))
+ EX2(l.d $f22, 176(a0))
+ EX2(l.d $f24, 192(a0))
+ EX2(l.d $f26, 208(a0))
+ EX2(l.d $f28, 224(a0))
+ EX2(l.d $f30, 240(a0))
+ jr ra
+ ctc1 t0, fcr31
+ .set pop
+ END(_restore_fp_context)
+ .set reorder
+
+ .type fault, @function
+ .ent fault
+fault: li v0, -EFAULT
+ jr ra
+ .end fault
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
new file mode 100644
index 000000000..71b1aafae
--- /dev/null
+++ b/arch/mips/kernel/r2300_switch.S
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * r2300_switch.S: R2300 specific task switching code.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle
+ * Copyright (C) 1994, 1995, 1996 by Andreas Busse
+ *
+ * Multi-cpu abstraction and macros for easier reading:
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ *
+ * Further modifications to make this work:
+ * Copyright (c) 1998-2000 Harald Koerfgen
+ */
+#include <asm/asm.h>
+#include <asm/cachectl.h>
+#include <asm/export.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+#include <asm/asmmacro.h>
+
+ .set mips1
+ .align 5
+
+/*
+ * task_struct *resume(task_struct *prev, task_struct *next,
+ * struct thread_info *next_ti)
+ */
+LEAF(resume)
+ mfc0 t1, CP0_STATUS
+ sw t1, THREAD_STATUS(a0)
+ cpu_save_nonscratch a0
+ sw ra, THREAD_REG31(a0)
+
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ PTR_LA t8, __stack_chk_guard
+ LONG_L t9, TASK_STACK_CANARY(a1)
+ LONG_S t9, 0(t8)
+#endif
+
+ /*
+ * The order of restoring the registers takes care of the race
+ * updating $28, $29 and kernelsp without disabling ints.
+ */
+ move $28, a2
+ cpu_restore_nonscratch a1
+
+ addiu t1, $28, _THREAD_SIZE - 32
+ sw t1, kernelsp
+
+ mfc0 t1, CP0_STATUS /* Do we really need this? */
+ li a3, 0xff01
+ and t1, a3
+ lw a2, THREAD_STATUS(a1)
+ nor a3, $0, a3
+ and a2, a3
+ or a2, t1
+ mtc0 a2, CP0_STATUS
+ move v0, a0
+ jr ra
+ END(resume)
diff --git a/arch/mips/kernel/r4k-bugs64.c b/arch/mips/kernel/r4k-bugs64.c
new file mode 100644
index 000000000..1ff19f1ea
--- /dev/null
+++ b/arch/mips/kernel/r4k-bugs64.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2003, 2004, 2007 Maciej W. Rozycki
+ */
+#include <linux/context_tracking.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/stddef.h>
+
+#include <asm/bugs.h>
+#include <asm/compiler.h>
+#include <asm/cpu.h>
+#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+#include <asm/setup.h>
+
+static char bug64hit[] __initdata =
+ "reliable operation impossible!\n%s";
+static char nowar[] __initdata =
+ "Please report to <linux-mips@linux-mips.org>.";
+static char r4kwar[] __initdata =
+ "Enable CPU_R4000_WORKAROUNDS to rectify.";
+static char daddiwar[] __initdata =
+ "Enable CPU_DADDI_WORKAROUNDS to rectify.";
+
+static __always_inline __init
+void align_mod(const int align, const int mod)
+{
+ asm volatile(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ ".balign %0\n\t"
+ ".rept %1\n\t"
+ "nop\n\t"
+ ".endr\n\t"
+ ".set pop"
+ :
+ : "n"(align), "n"(mod));
+}
+
+static __always_inline __init
+void mult_sh_align_mod(long *v1, long *v2, long *w,
+ const int align, const int mod)
+{
+ unsigned long flags;
+ int m1, m2;
+ long p, s, lv1, lv2, lw;
+
+ /*
+ * We want the multiply and the shift to be isolated from the
+ * rest of the code to disable gcc optimizations. Hence the
+ * asm statements that execute nothing, but make gcc not know
+ * what the values of m1, m2 and s are and what lv2 and p are
+ * used for.
+ */
+
+ local_irq_save(flags);
+ /*
+ * The following code leads to a wrong result of the first
+ * dsll32 when executed on R4000 rev. 2.2 or 3.0 (PRId
+ * 00000422 or 00000430, respectively).
+ *
+ * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and
+ * 3.0" by MIPS Technologies, Inc., errata #16 and #28 for
+ * details. I got no permission to duplicate them here,
+ * sigh... --macro
+ */
+ asm volatile(
+ ""
+ : "=r" (m1), "=r" (m2), "=r" (s)
+ : "0" (5), "1" (8), "2" (5));
+ align_mod(align, mod);
+ /*
+ * The trailing nop is needed to fulfill the two-instruction
+ * requirement between reading hi/lo and staring a mult/div.
+ * Leaving it out may cause gas insert a nop itself breaking
+ * the desired alignment of the next chunk.
+ */
+ asm volatile(
+ ".set push\n\t"
+ ".set noat\n\t"
+ ".set noreorder\n\t"
+ ".set nomacro\n\t"
+ "mult %2, %3\n\t"
+ "dsll32 %0, %4, %5\n\t"
+ "mflo $0\n\t"
+ "dsll32 %1, %4, %5\n\t"
+ "nop\n\t"
+ ".set pop"
+ : "=&r" (lv1), "=r" (lw)
+ : "r" (m1), "r" (m2), "r" (s), "I" (0)
+ : "hi", "lo", "$0");
+ /* We have to use single integers for m1 and m2 and a double
+ * one for p to be sure the mulsidi3 gcc's RTL multiplication
+ * instruction has the workaround applied. Older versions of
+ * gcc have correct umulsi3 and mulsi3, but other
+ * multiplication variants lack the workaround.
+ */
+ asm volatile(
+ ""
+ : "=r" (m1), "=r" (m2), "=r" (s)
+ : "0" (m1), "1" (m2), "2" (s));
+ align_mod(align, mod);
+ p = m1 * m2;
+ lv2 = s << 32;
+ asm volatile(
+ ""
+ : "=r" (lv2)
+ : "0" (lv2), "r" (p));
+ local_irq_restore(flags);
+
+ *v1 = lv1;
+ *v2 = lv2;
+ *w = lw;
+}
+
+static __always_inline __init void check_mult_sh(void)
+{
+ long v1[8], v2[8], w[8];
+ int bug, fix, i;
+
+ printk("Checking for the multiply/shift bug... ");
+
+ /*
+ * Testing discovered false negatives for certain code offsets
+ * into cache lines. Hence we test all possible offsets for
+ * the worst assumption of an R4000 I-cache line width of 32
+ * bytes.
+ *
+ * We can't use a loop as alignment directives need to be
+ * immediates.
+ */
+ mult_sh_align_mod(&v1[0], &v2[0], &w[0], 32, 0);
+ mult_sh_align_mod(&v1[1], &v2[1], &w[1], 32, 1);
+ mult_sh_align_mod(&v1[2], &v2[2], &w[2], 32, 2);
+ mult_sh_align_mod(&v1[3], &v2[3], &w[3], 32, 3);
+ mult_sh_align_mod(&v1[4], &v2[4], &w[4], 32, 4);
+ mult_sh_align_mod(&v1[5], &v2[5], &w[5], 32, 5);
+ mult_sh_align_mod(&v1[6], &v2[6], &w[6], 32, 6);
+ mult_sh_align_mod(&v1[7], &v2[7], &w[7], 32, 7);
+
+ bug = 0;
+ for (i = 0; i < 8; i++)
+ if (v1[i] != w[i])
+ bug = 1;
+
+ if (bug == 0) {
+ pr_cont("no.\n");
+ return;
+ }
+
+ pr_cont("yes, workaround... ");
+
+ fix = 1;
+ for (i = 0; i < 8; i++)
+ if (v2[i] != w[i])
+ fix = 0;
+
+ if (fix == 1) {
+ pr_cont("yes.\n");
+ return;
+ }
+
+ pr_cont("no.\n");
+ panic(bug64hit, !R4000_WAR ? r4kwar : nowar);
+}
+
+static volatile int daddi_ov;
+
+asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ daddi_ov = 1;
+ regs->cp0_epc += 4;
+ exception_exit(prev_state);
+}
+
+static __init void check_daddi(void)
+{
+ extern asmlinkage void handle_daddi_ov(void);
+ unsigned long flags;
+ void *handler;
+ long v, tmp;
+
+ printk("Checking for the daddi bug... ");
+
+ local_irq_save(flags);
+ handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
+ /*
+ * The following code fails to trigger an overflow exception
+ * when executed on R4000 rev. 2.2 or 3.0 (PRId 00000422 or
+ * 00000430, respectively).
+ *
+ * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and
+ * 3.0" by MIPS Technologies, Inc., erratum #23 for details.
+ * I got no permission to duplicate it here, sigh... --macro
+ */
+ asm volatile(
+ ".set push\n\t"
+ ".set noat\n\t"
+ ".set noreorder\n\t"
+ ".set nomacro\n\t"
+ "addiu %1, $0, %2\n\t"
+ "dsrl %1, %1, 1\n\t"
+#ifdef HAVE_AS_SET_DADDI
+ ".set daddi\n\t"
+#endif
+ "daddi %0, %1, %3\n\t"
+ ".set pop"
+ : "=r" (v), "=&r" (tmp)
+ : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
+ set_except_vector(EXCCODE_OV, handler);
+ local_irq_restore(flags);
+
+ if (daddi_ov) {
+ pr_cont("no.\n");
+ return;
+ }
+
+ pr_cont("yes, workaround... ");
+
+ local_irq_save(flags);
+ handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
+ asm volatile(
+ "addiu %1, $0, %2\n\t"
+ "dsrl %1, %1, 1\n\t"
+ "daddi %0, %1, %3"
+ : "=r" (v), "=&r" (tmp)
+ : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
+ set_except_vector(EXCCODE_OV, handler);
+ local_irq_restore(flags);
+
+ if (daddi_ov) {
+ pr_cont("yes.\n");
+ return;
+ }
+
+ pr_cont("no.\n");
+ panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
+}
+
+int daddiu_bug = -1;
+
+static __init void check_daddiu(void)
+{
+ long v, w, tmp;
+
+ printk("Checking for the daddiu bug... ");
+
+ /*
+ * The following code leads to a wrong result of daddiu when
+ * executed on R4400 rev. 1.0 (PRId 00000440).
+ *
+ * See "MIPS R4400PC/SC Errata, Processor Revision 1.0" by
+ * MIPS Technologies, Inc., erratum #7 for details.
+ *
+ * According to "MIPS R4000PC/SC Errata, Processor Revision
+ * 2.2 and 3.0" by MIPS Technologies, Inc., erratum #41 this
+ * problem affects R4000 rev. 2.2 and 3.0 (PRId 00000422 and
+ * 00000430, respectively), too. Testing failed to trigger it
+ * so far.
+ *
+ * I got no permission to duplicate the errata here, sigh...
+ * --macro
+ */
+ asm volatile(
+ ".set push\n\t"
+ ".set noat\n\t"
+ ".set noreorder\n\t"
+ ".set nomacro\n\t"
+ "addiu %2, $0, %3\n\t"
+ "dsrl %2, %2, 1\n\t"
+#ifdef HAVE_AS_SET_DADDI
+ ".set daddi\n\t"
+#endif
+ "daddiu %0, %2, %4\n\t"
+ "addiu %1, $0, %4\n\t"
+ "daddu %1, %2\n\t"
+ ".set pop"
+ : "=&r" (v), "=&r" (w), "=&r" (tmp)
+ : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
+
+ daddiu_bug = v != w;
+
+ if (!daddiu_bug) {
+ pr_cont("no.\n");
+ return;
+ }
+
+ pr_cont("yes, workaround... ");
+
+ asm volatile(
+ "addiu %2, $0, %3\n\t"
+ "dsrl %2, %2, 1\n\t"
+ "daddiu %0, %2, %4\n\t"
+ "addiu %1, $0, %4\n\t"
+ "daddu %1, %2"
+ : "=&r" (v), "=&r" (w), "=&r" (tmp)
+ : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
+
+ if (v == w) {
+ pr_cont("yes.\n");
+ return;
+ }
+
+ pr_cont("no.\n");
+ panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
+}
+
+void __init check_bugs64_early(void)
+{
+ check_mult_sh();
+ check_daddiu();
+}
+
+void __init check_bugs64(void)
+{
+ check_daddi();
+}
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
new file mode 100644
index 000000000..b91e91106
--- /dev/null
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -0,0 +1,417 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 98, 99, 2000, 01 Ralf Baechle
+ *
+ * Multi-arch abstraction and asm macros for easier reading:
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/errno.h>
+#include <asm/export.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
+ .macro EX insn, reg, src
+ .set push
+ SET_HARDFLOAT
+ .set nomacro
+.ex\@: \insn \reg, \src
+ .set pop
+ .section __ex_table,"a"
+ PTR .ex\@, fault
+ .previous
+ .endm
+
+/*
+ * Save a thread's fp context.
+ */
+LEAF(_save_fp)
+EXPORT_SYMBOL(_save_fp)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
+ defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6)
+ mfc0 t0, CP0_STATUS
+#endif
+ fpu_save_double a0 t0 t1 # clobbers t1
+ jr ra
+ END(_save_fp)
+
+/*
+ * Restore a thread's fp context.
+ */
+LEAF(_restore_fp)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
+ defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6)
+ mfc0 t0, CP0_STATUS
+#endif
+ fpu_restore_double a0 t0 t1 # clobbers t1
+ jr ra
+ END(_restore_fp)
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+/*
+ * Save a thread's MSA vector context.
+ */
+LEAF(_save_msa)
+EXPORT_SYMBOL(_save_msa)
+ msa_save_all a0
+ jr ra
+ END(_save_msa)
+
+/*
+ * Restore a thread's MSA vector context.
+ */
+LEAF(_restore_msa)
+ msa_restore_all a0
+ jr ra
+ END(_restore_msa)
+
+LEAF(_init_msa_upper)
+ msa_init_all_upper
+ jr ra
+ END(_init_msa_upper)
+
+#endif
+
+ .set noreorder
+
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
+LEAF(_save_fp_context)
+ .set push
+ SET_HARDFLOAT
+ cfc1 t1, fcr31
+ .set pop
+
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
+ defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6)
+ .set push
+ SET_HARDFLOAT
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5)
+ .set mips32r2
+ .set fp=64
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 5
+ bgez t0, 1f # skip storing odd if FR=0
+ nop
+#endif
+ /* Store the 16 odd double precision registers */
+ EX sdc1 $f1, 8(a0)
+ EX sdc1 $f3, 24(a0)
+ EX sdc1 $f5, 40(a0)
+ EX sdc1 $f7, 56(a0)
+ EX sdc1 $f9, 72(a0)
+ EX sdc1 $f11, 88(a0)
+ EX sdc1 $f13, 104(a0)
+ EX sdc1 $f15, 120(a0)
+ EX sdc1 $f17, 136(a0)
+ EX sdc1 $f19, 152(a0)
+ EX sdc1 $f21, 168(a0)
+ EX sdc1 $f23, 184(a0)
+ EX sdc1 $f25, 200(a0)
+ EX sdc1 $f27, 216(a0)
+ EX sdc1 $f29, 232(a0)
+ EX sdc1 $f31, 248(a0)
+1: .set pop
+#endif
+
+ .set push
+ SET_HARDFLOAT
+ /* Store the 16 even double precision registers */
+ EX sdc1 $f0, 0(a0)
+ EX sdc1 $f2, 16(a0)
+ EX sdc1 $f4, 32(a0)
+ EX sdc1 $f6, 48(a0)
+ EX sdc1 $f8, 64(a0)
+ EX sdc1 $f10, 80(a0)
+ EX sdc1 $f12, 96(a0)
+ EX sdc1 $f14, 112(a0)
+ EX sdc1 $f16, 128(a0)
+ EX sdc1 $f18, 144(a0)
+ EX sdc1 $f20, 160(a0)
+ EX sdc1 $f22, 176(a0)
+ EX sdc1 $f24, 192(a0)
+ EX sdc1 $f26, 208(a0)
+ EX sdc1 $f28, 224(a0)
+ EX sdc1 $f30, 240(a0)
+ EX sw t1, 0(a1)
+ jr ra
+ li v0, 0 # success
+ .set pop
+ END(_save_fp_context)
+
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
+ */
+LEAF(_restore_fp_context)
+ EX lw t1, 0(a1)
+
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
+ defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6)
+ .set push
+ SET_HARDFLOAT
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5)
+ .set mips32r2
+ .set fp=64
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 5
+ bgez t0, 1f # skip loading odd if FR=0
+ nop
+#endif
+ EX ldc1 $f1, 8(a0)
+ EX ldc1 $f3, 24(a0)
+ EX ldc1 $f5, 40(a0)
+ EX ldc1 $f7, 56(a0)
+ EX ldc1 $f9, 72(a0)
+ EX ldc1 $f11, 88(a0)
+ EX ldc1 $f13, 104(a0)
+ EX ldc1 $f15, 120(a0)
+ EX ldc1 $f17, 136(a0)
+ EX ldc1 $f19, 152(a0)
+ EX ldc1 $f21, 168(a0)
+ EX ldc1 $f23, 184(a0)
+ EX ldc1 $f25, 200(a0)
+ EX ldc1 $f27, 216(a0)
+ EX ldc1 $f29, 232(a0)
+ EX ldc1 $f31, 248(a0)
+1: .set pop
+#endif
+ .set push
+ SET_HARDFLOAT
+ EX ldc1 $f0, 0(a0)
+ EX ldc1 $f2, 16(a0)
+ EX ldc1 $f4, 32(a0)
+ EX ldc1 $f6, 48(a0)
+ EX ldc1 $f8, 64(a0)
+ EX ldc1 $f10, 80(a0)
+ EX ldc1 $f12, 96(a0)
+ EX ldc1 $f14, 112(a0)
+ EX ldc1 $f16, 128(a0)
+ EX ldc1 $f18, 144(a0)
+ EX ldc1 $f20, 160(a0)
+ EX ldc1 $f22, 176(a0)
+ EX ldc1 $f24, 192(a0)
+ EX ldc1 $f26, 208(a0)
+ EX ldc1 $f28, 224(a0)
+ EX ldc1 $f30, 240(a0)
+ ctc1 t1, fcr31
+ .set pop
+ jr ra
+ li v0, 0 # success
+ END(_restore_fp_context)
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+ .macro op_one_wr op, idx, base
+ .align 4
+\idx: \op \idx, 0, \base
+ jr ra
+ nop
+ .endm
+
+ .macro op_msa_wr name, op
+LEAF(\name)
+ .set push
+ .set noreorder
+ sll t0, a0, 4
+ PTR_LA t1, 0f
+ PTR_ADDU t0, t0, t1
+ jr t0
+ nop
+ op_one_wr \op, 0, a1
+ op_one_wr \op, 1, a1
+ op_one_wr \op, 2, a1
+ op_one_wr \op, 3, a1
+ op_one_wr \op, 4, a1
+ op_one_wr \op, 5, a1
+ op_one_wr \op, 6, a1
+ op_one_wr \op, 7, a1
+ op_one_wr \op, 8, a1
+ op_one_wr \op, 9, a1
+ op_one_wr \op, 10, a1
+ op_one_wr \op, 11, a1
+ op_one_wr \op, 12, a1
+ op_one_wr \op, 13, a1
+ op_one_wr \op, 14, a1
+ op_one_wr \op, 15, a1
+ op_one_wr \op, 16, a1
+ op_one_wr \op, 17, a1
+ op_one_wr \op, 18, a1
+ op_one_wr \op, 19, a1
+ op_one_wr \op, 20, a1
+ op_one_wr \op, 21, a1
+ op_one_wr \op, 22, a1
+ op_one_wr \op, 23, a1
+ op_one_wr \op, 24, a1
+ op_one_wr \op, 25, a1
+ op_one_wr \op, 26, a1
+ op_one_wr \op, 27, a1
+ op_one_wr \op, 28, a1
+ op_one_wr \op, 29, a1
+ op_one_wr \op, 30, a1
+ op_one_wr \op, 31, a1
+ .set pop
+ END(\name)
+ .endm
+
+ op_msa_wr read_msa_wr_b, st_b
+ op_msa_wr read_msa_wr_h, st_h
+ op_msa_wr read_msa_wr_w, st_w
+ op_msa_wr read_msa_wr_d, st_d
+
+ op_msa_wr write_msa_wr_b, ld_b
+ op_msa_wr write_msa_wr_h, ld_h
+ op_msa_wr write_msa_wr_w, ld_w
+ op_msa_wr write_msa_wr_d, ld_d
+
+#endif /* CONFIG_CPU_HAS_MSA */
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+ .macro save_msa_upper wr, off, base
+ .set push
+ .set noat
+#ifdef CONFIG_64BIT
+ copy_s_d \wr, 1
+ EX sd $1, \off(\base)
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+ copy_s_w \wr, 2
+ EX sw $1, \off(\base)
+ copy_s_w \wr, 3
+ EX sw $1, (\off+4)(\base)
+#else /* CONFIG_CPU_BIG_ENDIAN */
+ copy_s_w \wr, 2
+ EX sw $1, (\off+4)(\base)
+ copy_s_w \wr, 3
+ EX sw $1, \off(\base)
+#endif
+ .set pop
+ .endm
+
+LEAF(_save_msa_all_upper)
+ save_msa_upper 0, 0x00, a0
+ save_msa_upper 1, 0x08, a0
+ save_msa_upper 2, 0x10, a0
+ save_msa_upper 3, 0x18, a0
+ save_msa_upper 4, 0x20, a0
+ save_msa_upper 5, 0x28, a0
+ save_msa_upper 6, 0x30, a0
+ save_msa_upper 7, 0x38, a0
+ save_msa_upper 8, 0x40, a0
+ save_msa_upper 9, 0x48, a0
+ save_msa_upper 10, 0x50, a0
+ save_msa_upper 11, 0x58, a0
+ save_msa_upper 12, 0x60, a0
+ save_msa_upper 13, 0x68, a0
+ save_msa_upper 14, 0x70, a0
+ save_msa_upper 15, 0x78, a0
+ save_msa_upper 16, 0x80, a0
+ save_msa_upper 17, 0x88, a0
+ save_msa_upper 18, 0x90, a0
+ save_msa_upper 19, 0x98, a0
+ save_msa_upper 20, 0xa0, a0
+ save_msa_upper 21, 0xa8, a0
+ save_msa_upper 22, 0xb0, a0
+ save_msa_upper 23, 0xb8, a0
+ save_msa_upper 24, 0xc0, a0
+ save_msa_upper 25, 0xc8, a0
+ save_msa_upper 26, 0xd0, a0
+ save_msa_upper 27, 0xd8, a0
+ save_msa_upper 28, 0xe0, a0
+ save_msa_upper 29, 0xe8, a0
+ save_msa_upper 30, 0xf0, a0
+ save_msa_upper 31, 0xf8, a0
+ jr ra
+ li v0, 0
+ END(_save_msa_all_upper)
+
+ .macro restore_msa_upper wr, off, base
+ .set push
+ .set noat
+#ifdef CONFIG_64BIT
+ EX ld $1, \off(\base)
+ insert_d \wr, 1
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+ EX lw $1, \off(\base)
+ insert_w \wr, 2
+ EX lw $1, (\off+4)(\base)
+ insert_w \wr, 3
+#else /* CONFIG_CPU_BIG_ENDIAN */
+ EX lw $1, (\off+4)(\base)
+ insert_w \wr, 2
+ EX lw $1, \off(\base)
+ insert_w \wr, 3
+#endif
+ .set pop
+ .endm
+
+LEAF(_restore_msa_all_upper)
+ restore_msa_upper 0, 0x00, a0
+ restore_msa_upper 1, 0x08, a0
+ restore_msa_upper 2, 0x10, a0
+ restore_msa_upper 3, 0x18, a0
+ restore_msa_upper 4, 0x20, a0
+ restore_msa_upper 5, 0x28, a0
+ restore_msa_upper 6, 0x30, a0
+ restore_msa_upper 7, 0x38, a0
+ restore_msa_upper 8, 0x40, a0
+ restore_msa_upper 9, 0x48, a0
+ restore_msa_upper 10, 0x50, a0
+ restore_msa_upper 11, 0x58, a0
+ restore_msa_upper 12, 0x60, a0
+ restore_msa_upper 13, 0x68, a0
+ restore_msa_upper 14, 0x70, a0
+ restore_msa_upper 15, 0x78, a0
+ restore_msa_upper 16, 0x80, a0
+ restore_msa_upper 17, 0x88, a0
+ restore_msa_upper 18, 0x90, a0
+ restore_msa_upper 19, 0x98, a0
+ restore_msa_upper 20, 0xa0, a0
+ restore_msa_upper 21, 0xa8, a0
+ restore_msa_upper 22, 0xb0, a0
+ restore_msa_upper 23, 0xb8, a0
+ restore_msa_upper 24, 0xc0, a0
+ restore_msa_upper 25, 0xc8, a0
+ restore_msa_upper 26, 0xd0, a0
+ restore_msa_upper 27, 0xd8, a0
+ restore_msa_upper 28, 0xe0, a0
+ restore_msa_upper 29, 0xe8, a0
+ restore_msa_upper 30, 0xf0, a0
+ restore_msa_upper 31, 0xf8, a0
+ jr ra
+ li v0, 0
+ END(_restore_msa_all_upper)
+
+#endif /* CONFIG_CPU_HAS_MSA */
+
+ .set reorder
+
+ .type fault, @function
+ .ent fault
+fault: li v0, -EFAULT # failure
+ jr ra
+ .end fault
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
new file mode 100644
index 000000000..58232ae6c
--- /dev/null
+++ b/arch/mips/kernel/r4k_switch.S
@@ -0,0 +1,59 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1994, 1995, 1996, by Andreas Busse
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ * written by Carsten Langgaard, carstenl@mips.com
+ */
+#include <asm/asm.h>
+#include <asm/cachectl.h>
+#include <asm/mipsregs.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+#include <asm/asmmacro.h>
+
+/*
+ * task_struct *resume(task_struct *prev, task_struct *next,
+ * struct thread_info *next_ti)
+ */
+ .align 5
+ LEAF(resume)
+ mfc0 t1, CP0_STATUS
+ LONG_S t1, THREAD_STATUS(a0)
+ cpu_save_nonscratch a0
+ LONG_S ra, THREAD_REG31(a0)
+
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ PTR_LA t8, __stack_chk_guard
+ LONG_L t9, TASK_STACK_CANARY(a1)
+ LONG_S t9, 0(t8)
+#endif
+
+ /*
+ * The order of restoring the registers takes care of the race
+ * updating $28, $29 and kernelsp without disabling ints.
+ */
+ move $28, a2
+ cpu_restore_nonscratch a1
+
+ PTR_ADDU t0, $28, _THREAD_SIZE - 32
+ set_saved_sp t0, t1, t2
+ mfc0 t1, CP0_STATUS /* Do we really need this? */
+ li a3, 0xff01
+ and t1, a3
+ LONG_L a2, THREAD_STATUS(a1)
+ nor a3, $0, a3
+ and a2, a3
+ or a2, t1
+ mtc0 a2, CP0_STATUS
+ move v0, a0
+ jr ra
+ END(resume)
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
new file mode 100644
index 000000000..dab8febb5
--- /dev/null
+++ b/arch/mips/kernel/relocate.c
@@ -0,0 +1,446 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Support for Kernel relocation at boot time
+ *
+ * Copyright (C) 2015, Imagination Technologies Ltd.
+ * Authors: Matt Redfearn (matt.redfearn@mips.com)
+ */
+#include <asm/bootinfo.h>
+#include <asm/cacheflush.h>
+#include <asm/fw/fw.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/timex.h>
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/libfdt.h>
+#include <linux/of_fdt.h>
+#include <linux/sched/task.h>
+#include <linux/start_kernel.h>
+#include <linux/string.h>
+#include <linux/printk.h>
+
+#define RELOCATED(x) ((void *)((long)x + offset))
+
+extern u32 _relocation_start[]; /* End kernel image / start relocation table */
+extern u32 _relocation_end[]; /* End relocation table */
+
+extern long __start___ex_table; /* Start exception table */
+extern long __stop___ex_table; /* End exception table */
+
+extern void __weak plat_fdt_relocated(void *new_location);
+
+/*
+ * This function may be defined for a platform to perform any post-relocation
+ * fixup necessary.
+ * Return non-zero to abort relocation
+ */
+int __weak plat_post_relocation(long offset)
+{
+ return 0;
+}
+
+static inline u32 __init get_synci_step(void)
+{
+ u32 res;
+
+ __asm__("rdhwr %0, $1" : "=r" (res));
+
+ return res;
+}
+
+static void __init sync_icache(void *kbase, unsigned long kernel_length)
+{
+ void *kend = kbase + kernel_length;
+ u32 step = get_synci_step();
+
+ do {
+ __asm__ __volatile__(
+ "synci 0(%0)"
+ : /* no output */
+ : "r" (kbase));
+
+ kbase += step;
+ } while (kbase < kend);
+
+ /* Completion barrier */
+ __sync();
+}
+
+static int __init apply_r_mips_64_rel(u32 *loc_orig, u32 *loc_new, long offset)
+{
+ *(u64 *)loc_new += offset;
+
+ return 0;
+}
+
+static int __init apply_r_mips_32_rel(u32 *loc_orig, u32 *loc_new, long offset)
+{
+ *loc_new += offset;
+
+ return 0;
+}
+
+static int __init apply_r_mips_26_rel(u32 *loc_orig, u32 *loc_new, long offset)
+{
+ unsigned long target_addr = (*loc_orig) & 0x03ffffff;
+
+ if (offset % 4) {
+ pr_err("Dangerous R_MIPS_26 REL relocation\n");
+ return -ENOEXEC;
+ }
+
+ /* Original target address */
+ target_addr <<= 2;
+ target_addr += (unsigned long)loc_orig & ~0x03ffffff;
+
+ /* Get the new target address */
+ target_addr += offset;
+
+ if ((target_addr & 0xf0000000) != ((unsigned long)loc_new & 0xf0000000)) {
+ pr_err("R_MIPS_26 REL relocation overflow\n");
+ return -ENOEXEC;
+ }
+
+ target_addr -= (unsigned long)loc_new & ~0x03ffffff;
+ target_addr >>= 2;
+
+ *loc_new = (*loc_new & ~0x03ffffff) | (target_addr & 0x03ffffff);
+
+ return 0;
+}
+
+
+static int __init apply_r_mips_hi16_rel(u32 *loc_orig, u32 *loc_new, long offset)
+{
+ unsigned long insn = *loc_orig;
+ unsigned long target = (insn & 0xffff) << 16; /* high 16bits of target */
+
+ target += offset;
+
+ *loc_new = (insn & ~0xffff) | ((target >> 16) & 0xffff);
+ return 0;
+}
+
+static int (*reloc_handlers_rel[]) (u32 *, u32 *, long) __initdata = {
+ [R_MIPS_64] = apply_r_mips_64_rel,
+ [R_MIPS_32] = apply_r_mips_32_rel,
+ [R_MIPS_26] = apply_r_mips_26_rel,
+ [R_MIPS_HI16] = apply_r_mips_hi16_rel,
+};
+
+int __init do_relocations(void *kbase_old, void *kbase_new, long offset)
+{
+ u32 *r;
+ u32 *loc_orig;
+ u32 *loc_new;
+ int type;
+ int res;
+
+ for (r = _relocation_start; r < _relocation_end; r++) {
+ /* Sentinel for last relocation */
+ if (*r == 0)
+ break;
+
+ type = (*r >> 24) & 0xff;
+ loc_orig = kbase_old + ((*r & 0x00ffffff) << 2);
+ loc_new = RELOCATED(loc_orig);
+
+ if (reloc_handlers_rel[type] == NULL) {
+ /* Unsupported relocation */
+ pr_err("Unhandled relocation type %d at 0x%pK\n",
+ type, loc_orig);
+ return -ENOEXEC;
+ }
+
+ res = reloc_handlers_rel[type](loc_orig, loc_new, offset);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+/*
+ * The exception table is filled in by the relocs tool after vmlinux is linked.
+ * It must be relocated separately since there will not be any relocation
+ * information for it filled in by the linker.
+ */
+static int __init relocate_exception_table(long offset)
+{
+ unsigned long *etable_start, *etable_end, *e;
+
+ etable_start = RELOCATED(&__start___ex_table);
+ etable_end = RELOCATED(&__stop___ex_table);
+
+ for (e = etable_start; e < etable_end; e++)
+ *e += offset;
+
+ return 0;
+}
+
+#ifdef CONFIG_RANDOMIZE_BASE
+
+static inline __init unsigned long rotate_xor(unsigned long hash,
+ const void *area, size_t size)
+{
+ const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
+ size_t diff, i;
+
+ diff = (void *)ptr - area;
+ if (unlikely(size < diff + sizeof(hash)))
+ return hash;
+
+ size = ALIGN_DOWN(size - diff, sizeof(hash));
+
+ for (i = 0; i < size / sizeof(hash); i++) {
+ /* Rotate by odd number of bits and XOR. */
+ hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
+ hash ^= ptr[i];
+ }
+
+ return hash;
+}
+
+static inline __init unsigned long get_random_boot(void)
+{
+ unsigned long entropy = random_get_entropy();
+ unsigned long hash = 0;
+
+ /* Attempt to create a simple but unpredictable starting entropy. */
+ hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
+
+ /* Add in any runtime entropy we can get */
+ hash = rotate_xor(hash, &entropy, sizeof(entropy));
+
+#if defined(CONFIG_USE_OF)
+ /* Get any additional entropy passed in device tree */
+ if (initial_boot_params) {
+ int node, len;
+ u64 *prop;
+
+ node = fdt_path_offset(initial_boot_params, "/chosen");
+ if (node >= 0) {
+ prop = fdt_getprop_w(initial_boot_params, node,
+ "kaslr-seed", &len);
+ if (prop && (len == sizeof(u64)))
+ hash = rotate_xor(hash, prop, sizeof(*prop));
+ }
+ }
+#endif /* CONFIG_USE_OF */
+
+ return hash;
+}
+
+static inline __init bool kaslr_disabled(void)
+{
+ char *str;
+
+#if defined(CONFIG_CMDLINE_BOOL)
+ const char *builtin_cmdline = CONFIG_CMDLINE;
+
+ str = strstr(builtin_cmdline, "nokaslr");
+ if (str == builtin_cmdline ||
+ (str > builtin_cmdline && *(str - 1) == ' '))
+ return true;
+#endif
+ str = strstr(arcs_cmdline, "nokaslr");
+ if (str == arcs_cmdline || (str > arcs_cmdline && *(str - 1) == ' '))
+ return true;
+
+ return false;
+}
+
+static inline void __init *determine_relocation_address(void)
+{
+ /* Choose a new address for the kernel */
+ unsigned long kernel_length;
+ void *dest = &_text;
+ unsigned long offset;
+
+ if (kaslr_disabled())
+ return dest;
+
+ kernel_length = (long)_end - (long)(&_text);
+
+ offset = get_random_boot() << 16;
+ offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1);
+ if (offset < kernel_length)
+ offset += ALIGN(kernel_length, 0xffff);
+
+ return RELOCATED(dest);
+}
+
+#else
+
+static inline void __init *determine_relocation_address(void)
+{
+ /*
+ * Choose a new address for the kernel
+ * For now we'll hard code the destination
+ */
+ return (void *)0xffffffff81000000;
+}
+
+#endif
+
+static inline int __init relocation_addr_valid(void *loc_new)
+{
+ if ((unsigned long)loc_new & 0x0000ffff) {
+ /* Inappropriately aligned new location */
+ return 0;
+ }
+ if ((unsigned long)loc_new < (unsigned long)&_end) {
+ /* New location overlaps original kernel */
+ return 0;
+ }
+ return 1;
+}
+
+void *__init relocate_kernel(void)
+{
+ void *loc_new;
+ unsigned long kernel_length;
+ unsigned long bss_length;
+ long offset = 0;
+ int res = 1;
+ /* Default to original kernel entry point */
+ void *kernel_entry = start_kernel;
+ void *fdt = NULL;
+
+ /* Get the command line */
+ fw_init_cmdline();
+#if defined(CONFIG_USE_OF)
+ /* Deal with the device tree */
+ fdt = plat_get_fdt();
+ early_init_dt_scan(fdt);
+ if (boot_command_line[0]) {
+ /* Boot command line was passed in device tree */
+ strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+ }
+#endif /* CONFIG_USE_OF */
+
+ kernel_length = (long)(&_relocation_start) - (long)(&_text);
+ bss_length = (long)&__bss_stop - (long)&__bss_start;
+
+ loc_new = determine_relocation_address();
+
+ /* Sanity check relocation address */
+ if (relocation_addr_valid(loc_new))
+ offset = (unsigned long)loc_new - (unsigned long)(&_text);
+
+ /* Reset the command line now so we don't end up with a duplicate */
+ arcs_cmdline[0] = '\0';
+
+ if (offset) {
+ void (*fdt_relocated_)(void *) = NULL;
+#if defined(CONFIG_USE_OF)
+ unsigned long fdt_phys = virt_to_phys(fdt);
+
+ /*
+ * If built-in dtb is used then it will have been relocated
+ * during kernel _text relocation. If appended DTB is used
+ * then it will not be relocated, but it should remain
+ * intact in the original location. If dtb is loaded by
+ * the bootloader then it may need to be moved if it crosses
+ * the target memory area
+ */
+
+ if (fdt_phys >= virt_to_phys(RELOCATED(&_text)) &&
+ fdt_phys <= virt_to_phys(RELOCATED(&_end))) {
+ void *fdt_relocated =
+ RELOCATED(ALIGN((long)&_end, PAGE_SIZE));
+ memcpy(fdt_relocated, fdt, fdt_totalsize(fdt));
+ fdt = fdt_relocated;
+ fdt_relocated_ = RELOCATED(&plat_fdt_relocated);
+ }
+#endif /* CONFIG_USE_OF */
+
+ /* Copy the kernel to it's new location */
+ memcpy(loc_new, &_text, kernel_length);
+
+ /* Perform relocations on the new kernel */
+ res = do_relocations(&_text, loc_new, offset);
+ if (res < 0)
+ goto out;
+
+ /* Sync the caches ready for execution of new kernel */
+ sync_icache(loc_new, kernel_length);
+
+ res = relocate_exception_table(offset);
+ if (res < 0)
+ goto out;
+
+ /*
+ * The original .bss has already been cleared, and
+ * some variables such as command line parameters
+ * stored to it so make a copy in the new location.
+ */
+ memcpy(RELOCATED(&__bss_start), &__bss_start, bss_length);
+
+ /*
+ * If fdt was stored outside of the kernel image and
+ * had to be moved then update platform's state data
+ * with the new fdt location
+ */
+ if (fdt_relocated_)
+ fdt_relocated_(fdt);
+
+ /*
+ * Last chance for the platform to abort relocation.
+ * This may also be used by the platform to perform any
+ * initialisation required now that the new kernel is
+ * resident in memory and ready to be executed.
+ */
+ if (plat_post_relocation(offset))
+ goto out;
+
+ /* The current thread is now within the relocated image */
+ __current_thread_info = RELOCATED(&init_thread_union);
+
+ /* Return the new kernel's entry point */
+ kernel_entry = RELOCATED(start_kernel);
+ }
+out:
+ return kernel_entry;
+}
+
+/*
+ * Show relocation information on panic.
+ */
+void show_kernel_relocation(const char *level)
+{
+ unsigned long offset;
+
+ offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
+
+ if (IS_ENABLED(CONFIG_RELOCATABLE) && offset > 0) {
+ printk(level);
+ pr_cont("Kernel relocated by 0x%pK\n", (void *)offset);
+ pr_cont(" .text @ 0x%pK\n", _text);
+ pr_cont(" .data @ 0x%pK\n", _sdata);
+ pr_cont(" .bss @ 0x%pK\n", __bss_start);
+ }
+}
+
+static int kernel_location_notifier_fn(struct notifier_block *self,
+ unsigned long v, void *p)
+{
+ show_kernel_relocation(KERN_EMERG);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block kernel_location_notifier = {
+ .notifier_call = kernel_location_notifier_fn
+};
+
+static int __init register_kernel_offset_dumper(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &kernel_location_notifier);
+ return 0;
+}
+__initcall(register_kernel_offset_dumper);
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
new file mode 100644
index 000000000..ac870893b
--- /dev/null
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * relocate_kernel.S for kexec
+ * Created by <nschichan@corp.free.fr> on Thu Oct 12 17:49:57 2006
+ */
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/addrspace.h>
+
+LEAF(relocate_new_kernel)
+ PTR_L a0, arg0
+ PTR_L a1, arg1
+ PTR_L a2, arg2
+ PTR_L a3, arg3
+
+ PTR_L s0, kexec_indirection_page
+ PTR_L s1, kexec_start_address
+
+process_entry:
+ PTR_L s2, (s0)
+ PTR_ADDIU s0, s0, SZREG
+
+ /*
+ * In case of a kdump/crash kernel, the indirection page is not
+ * populated as the kernel is directly copied to a reserved location
+ */
+ beqz s2, done
+
+ /* destination page */
+ and s3, s2, 0x1
+ beq s3, zero, 1f
+ and s4, s2, ~0x1 /* store destination addr in s4 */
+ b process_entry
+
+1:
+ /* indirection page, update s0 */
+ and s3, s2, 0x2
+ beq s3, zero, 1f
+ and s0, s2, ~0x2
+ b process_entry
+
+1:
+ /* done page */
+ and s3, s2, 0x4
+ beq s3, zero, 1f
+ b done
+1:
+ /* source page */
+ and s3, s2, 0x8
+ beq s3, zero, process_entry
+ and s2, s2, ~0x8
+ li s6, (1 << _PAGE_SHIFT) / SZREG
+
+copy_word:
+ /* copy page word by word */
+ REG_L s5, (s2)
+ REG_S s5, (s4)
+ PTR_ADDIU s4, s4, SZREG
+ PTR_ADDIU s2, s2, SZREG
+ LONG_ADDIU s6, s6, -1
+ beq s6, zero, process_entry
+ b copy_word
+ b process_entry
+
+done:
+#ifdef CONFIG_SMP
+ /* kexec_flag reset is signal to other CPUs what kernel
+ was moved to it's location. Note - we need relocated address
+ of kexec_flag. */
+
+ bal 1f
+ 1: move t1,ra;
+ PTR_LA t2,1b
+ PTR_LA t0,kexec_flag
+ PTR_SUB t0,t0,t2;
+ PTR_ADD t0,t1,t0;
+ LONG_S zero,(t0)
+#endif
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ /* We need to flush I-cache before jumping to new kernel.
+ * Unfortunately, this code is cpu-specific.
+ */
+ .set push
+ .set noreorder
+ syncw
+ syncw
+ synci 0($0)
+ .set pop
+#else
+ sync
+#endif
+ /* jump to kexec_start_address */
+ j s1
+ END(relocate_new_kernel)
+
+#ifdef CONFIG_SMP
+/*
+ * Other CPUs should wait until code is relocated and
+ * then start at entry (?) point.
+ */
+LEAF(kexec_smp_wait)
+ PTR_L a0, s_arg0
+ PTR_L a1, s_arg1
+ PTR_L a2, s_arg2
+ PTR_L a3, s_arg3
+ PTR_L s1, kexec_start_address
+
+ /* Non-relocated address works for args and kexec_start_address ( old
+ * kernel is not overwritten). But we need relocated address of
+ * kexec_flag.
+ */
+
+ bal 1f
+1: move t1,ra;
+ PTR_LA t2,1b
+ PTR_LA t0,kexec_flag
+ PTR_SUB t0,t0,t2;
+ PTR_ADD t0,t1,t0;
+
+1: LONG_L s0, (t0)
+ bne s0, zero,1b
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ .set push
+ .set noreorder
+ synci 0($0)
+ .set pop
+#else
+ sync
+#endif
+ j s1
+ END(kexec_smp_wait)
+#endif
+
+#ifdef __mips64
+ /* all PTR's must be aligned to 8 byte in 64-bit mode */
+ .align 3
+#endif
+
+/* All parameters to new kernel are passed in registers a0-a3.
+ * kexec_args[0..3] are used to prepare register values.
+ */
+
+kexec_args:
+ EXPORT(kexec_args)
+arg0: PTR 0x0
+arg1: PTR 0x0
+arg2: PTR 0x0
+arg3: PTR 0x0
+ .size kexec_args,PTRSIZE*4
+
+#ifdef CONFIG_SMP
+/*
+ * Secondary CPUs may have different kernel parameters in
+ * their registers a0-a3. secondary_kexec_args[0..3] are used
+ * to prepare register values.
+ */
+secondary_kexec_args:
+ EXPORT(secondary_kexec_args)
+s_arg0: PTR 0x0
+s_arg1: PTR 0x0
+s_arg2: PTR 0x0
+s_arg3: PTR 0x0
+ .size secondary_kexec_args,PTRSIZE*4
+kexec_flag:
+ LONG 0x1
+
+#endif
+
+kexec_start_address:
+ EXPORT(kexec_start_address)
+ PTR 0x0
+ .size kexec_start_address, PTRSIZE
+
+kexec_indirection_page:
+ EXPORT(kexec_indirection_page)
+ PTR 0
+ .size kexec_indirection_page, PTRSIZE
+
+relocate_new_kernel_end:
+
+relocate_new_kernel_size:
+ EXPORT(relocate_new_kernel_size)
+ PTR relocate_new_kernel_end - relocate_new_kernel
+ .size relocate_new_kernel_size, PTRSIZE
diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
new file mode 100644
index 000000000..6288780b7
--- /dev/null
+++ b/arch/mips/kernel/reset.c
@@ -0,0 +1,125 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 06 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+#include <linux/reboot.h>
+#include <linux/delay.h>
+
+#include <asm/compiler.h>
+#include <asm/idle.h>
+#include <asm/mipsregs.h>
+#include <asm/reboot.h>
+
+/*
+ * Urgs ... Too many MIPS machines to handle this in a generic way.
+ * So handle all using function pointers to machine specific
+ * functions.
+ */
+void (*_machine_restart)(char *command);
+void (*_machine_halt)(void);
+void (*pm_power_off)(void);
+
+EXPORT_SYMBOL(pm_power_off);
+
+static void machine_hang(void)
+{
+ /*
+ * We're hanging the system so we don't want to be interrupted anymore.
+ * Any interrupt handlers that ran would at best be useless & at worst
+ * go awry because the system isn't in a functional state.
+ */
+ local_irq_disable();
+
+ /*
+ * Mask all interrupts, giving us a better chance of remaining in the
+ * low power wait state.
+ */
+ clear_c0_status(ST0_IM);
+
+ while (true) {
+ if (cpu_has_mips_r) {
+ /*
+ * We know that the wait instruction is supported so
+ * make use of it directly, leaving interrupts
+ * disabled.
+ */
+ asm volatile(
+ ".set push\n\t"
+ ".set " MIPS_ISA_ARCH_LEVEL "\n\t"
+ "wait\n\t"
+ ".set pop");
+ } else if (cpu_wait) {
+ /*
+ * Try the cpu_wait() callback. This isn't ideal since
+ * it'll re-enable interrupts, but that ought to be
+ * harmless given that they're all masked.
+ */
+ cpu_wait();
+ local_irq_disable();
+ } else {
+ /*
+ * We're going to burn some power running round the
+ * loop, but we don't really have a choice. This isn't
+ * a path we should expect to run for long during
+ * typical use anyway.
+ */
+ }
+
+ /*
+ * In most modern MIPS CPUs interrupts will cause the wait
+ * instruction to graduate even when disabled, and in some
+ * cases even when masked. In order to prevent a timer
+ * interrupt from continuously taking us out of the low power
+ * wait state, we clear any pending timer interrupt here.
+ */
+ if (cpu_has_counter)
+ write_c0_compare(0);
+ }
+}
+
+void machine_restart(char *command)
+{
+ if (_machine_restart)
+ _machine_restart(command);
+
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ do_kernel_restart(command);
+ mdelay(1000);
+ pr_emerg("Reboot failed -- System halted\n");
+ machine_hang();
+}
+
+void machine_halt(void)
+{
+ if (_machine_halt)
+ _machine_halt();
+
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ machine_hang();
+}
+
+void machine_power_off(void)
+{
+ if (pm_power_off)
+ pm_power_off();
+
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ machine_hang();
+}
diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c
new file mode 100644
index 000000000..d26dcc4b4
--- /dev/null
+++ b/arch/mips/kernel/rtlx-cmp.c
@@ -0,0 +1,122 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+
+#include <asm/mips_mt.h>
+#include <asm/vpe.h>
+#include <asm/rtlx.h>
+
+static int major;
+
+static void rtlx_interrupt(void)
+{
+ int i;
+ struct rtlx_info *info;
+ struct rtlx_info **p = vpe_get_shared(aprp_cpu_index());
+
+ if (p == NULL || *p == NULL)
+ return;
+
+ info = *p;
+
+ if (info->ap_int_pending == 1 && smp_processor_id() == 0) {
+ for (i = 0; i < RTLX_CHANNELS; i++) {
+ wake_up(&channel_wqs[i].lx_queue);
+ wake_up(&channel_wqs[i].rt_queue);
+ }
+ info->ap_int_pending = 0;
+ }
+}
+
+void _interrupt_sp(void)
+{
+ smp_send_reschedule(aprp_cpu_index());
+}
+
+int __init rtlx_module_init(void)
+{
+ struct device *dev;
+ int i, err;
+
+ if (!cpu_has_mipsmt) {
+ pr_warn("VPE loader: not a MIPS MT capable processor\n");
+ return -ENODEV;
+ }
+
+ if (num_possible_cpus() - aprp_cpu_index() < 1) {
+ pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n"
+ "Pass maxcpus=<n> argument as kernel argument\n");
+
+ return -ENODEV;
+ }
+
+ major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops);
+ if (major < 0) {
+ pr_err("rtlx_module_init: unable to register device\n");
+ return major;
+ }
+
+ /* initialise the wait queues */
+ for (i = 0; i < RTLX_CHANNELS; i++) {
+ init_waitqueue_head(&channel_wqs[i].rt_queue);
+ init_waitqueue_head(&channel_wqs[i].lx_queue);
+ atomic_set(&channel_wqs[i].in_open, 0);
+ mutex_init(&channel_wqs[i].mutex);
+
+ dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
+ "%s%d", RTLX_MODULE_NAME, i);
+ if (IS_ERR(dev)) {
+ while (i--)
+ device_destroy(mt_class, MKDEV(major, i));
+
+ err = PTR_ERR(dev);
+ goto out_chrdev;
+ }
+ }
+
+ /* set up notifiers */
+ rtlx_notify.start = rtlx_starting;
+ rtlx_notify.stop = rtlx_stopping;
+ vpe_notify(aprp_cpu_index(), &rtlx_notify);
+
+ if (cpu_has_vint) {
+ aprp_hook = rtlx_interrupt;
+ } else {
+ pr_err("APRP RTLX init on non-vectored-interrupt processor\n");
+ err = -ENODEV;
+ goto out_class;
+ }
+
+ return 0;
+
+out_class:
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ device_destroy(mt_class, MKDEV(major, i));
+out_chrdev:
+ unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ return err;
+}
+
+void __exit rtlx_module_exit(void)
+{
+ int i;
+
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ device_destroy(mt_class, MKDEV(major, i));
+
+ unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ aprp_hook = NULL;
+}
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c
new file mode 100644
index 000000000..38c6925a1
--- /dev/null
+++ b/arch/mips/kernel/rtlx-mt.c
@@ -0,0 +1,147 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include <asm/mips_mt.h>
+#include <asm/vpe.h>
+#include <asm/rtlx.h>
+
+static int major;
+
+static void rtlx_dispatch(void)
+{
+ if (read_c0_cause() & read_c0_status() & C_SW0)
+ do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ);
+}
+
+/*
+ * Interrupt handler may be called before rtlx_init has otherwise had
+ * a chance to run.
+ */
+static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
+{
+ unsigned int vpeflags;
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ vpeflags = dvpe();
+ set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ);
+ irq_enable_hazard();
+ evpe(vpeflags);
+ local_irq_restore(flags);
+
+ for (i = 0; i < RTLX_CHANNELS; i++) {
+ wake_up(&channel_wqs[i].lx_queue);
+ wake_up(&channel_wqs[i].rt_queue);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ;
+
+void _interrupt_sp(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ dvpe();
+ settc(1);
+ write_vpe_c0_cause(read_vpe_c0_cause() | C_SW0);
+ evpe(EVPE_ENABLE);
+ local_irq_restore(flags);
+}
+
+int __init rtlx_module_init(void)
+{
+ struct device *dev;
+ int i, err;
+
+ if (!cpu_has_mipsmt) {
+ pr_warn("VPE loader: not a MIPS MT capable processor\n");
+ return -ENODEV;
+ }
+
+ if (aprp_cpu_index() == 0) {
+ pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n"
+ "Pass maxtcs=<n> argument as kernel argument\n");
+
+ return -ENODEV;
+ }
+
+ major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops);
+ if (major < 0) {
+ pr_err("rtlx_module_init: unable to register device\n");
+ return major;
+ }
+
+ /* initialise the wait queues */
+ for (i = 0; i < RTLX_CHANNELS; i++) {
+ init_waitqueue_head(&channel_wqs[i].rt_queue);
+ init_waitqueue_head(&channel_wqs[i].lx_queue);
+ atomic_set(&channel_wqs[i].in_open, 0);
+ mutex_init(&channel_wqs[i].mutex);
+
+ dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
+ "%s%d", RTLX_MODULE_NAME, i);
+ if (IS_ERR(dev)) {
+ while (i--)
+ device_destroy(mt_class, MKDEV(major, i));
+
+ err = PTR_ERR(dev);
+ goto out_chrdev;
+ }
+ }
+
+ /* set up notifiers */
+ rtlx_notify.start = rtlx_starting;
+ rtlx_notify.stop = rtlx_stopping;
+ vpe_notify(aprp_cpu_index(), &rtlx_notify);
+
+ if (cpu_has_vint) {
+ aprp_hook = rtlx_dispatch;
+ } else {
+ pr_err("APRP RTLX init on non-vectored-interrupt processor\n");
+ err = -ENODEV;
+ goto out_class;
+ }
+
+ err = request_irq(rtlx_irq_num, rtlx_interrupt, 0, "RTLX", rtlx);
+ if (err)
+ goto out_class;
+
+ return 0;
+
+out_class:
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ device_destroy(mt_class, MKDEV(major, i));
+out_chrdev:
+ unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ return err;
+}
+
+void __exit rtlx_module_exit(void)
+{
+ int i;
+
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ device_destroy(mt_class, MKDEV(major, i));
+
+ unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ aprp_hook = NULL;
+}
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
new file mode 100644
index 000000000..18c509c59
--- /dev/null
+++ b/arch/mips/kernel/rtlx.c
@@ -0,0 +1,409 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2005, 06 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/syscalls.h>
+#include <linux/moduleloader.h>
+#include <linux/atomic.h>
+#include <linux/sched/signal.h>
+
+#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
+#include <asm/processor.h>
+#include <asm/rtlx.h>
+#include <asm/setup.h>
+#include <asm/vpe.h>
+
+static int sp_stopping;
+struct rtlx_info *rtlx;
+struct chan_waitqueues channel_wqs[RTLX_CHANNELS];
+struct vpe_notifications rtlx_notify;
+void (*aprp_hook)(void) = NULL;
+EXPORT_SYMBOL(aprp_hook);
+
+static void __used dump_rtlx(void)
+{
+ int i;
+
+ pr_info("id 0x%lx state %d\n", rtlx->id, rtlx->state);
+
+ for (i = 0; i < RTLX_CHANNELS; i++) {
+ struct rtlx_channel *chan = &rtlx->channel[i];
+
+ pr_info(" rt_state %d lx_state %d buffer_size %d\n",
+ chan->rt_state, chan->lx_state, chan->buffer_size);
+
+ pr_info(" rt_read %d rt_write %d\n",
+ chan->rt_read, chan->rt_write);
+
+ pr_info(" lx_read %d lx_write %d\n",
+ chan->lx_read, chan->lx_write);
+
+ pr_info(" rt_buffer <%s>\n", chan->rt_buffer);
+ pr_info(" lx_buffer <%s>\n", chan->lx_buffer);
+ }
+}
+
+/* call when we have the address of the shared structure from the SP side. */
+static int rtlx_init(struct rtlx_info *rtlxi)
+{
+ if (rtlxi->id != RTLX_ID) {
+ pr_err("no valid RTLX id at 0x%p 0x%lx\n", rtlxi, rtlxi->id);
+ return -ENOEXEC;
+ }
+
+ rtlx = rtlxi;
+
+ return 0;
+}
+
+/* notifications */
+void rtlx_starting(int vpe)
+{
+ int i;
+ sp_stopping = 0;
+
+ /* force a reload of rtlx */
+ rtlx = NULL;
+
+ /* wake up any sleeping rtlx_open's */
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ wake_up_interruptible(&channel_wqs[i].lx_queue);
+}
+
+void rtlx_stopping(int vpe)
+{
+ int i;
+
+ sp_stopping = 1;
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ wake_up_interruptible(&channel_wqs[i].lx_queue);
+}
+
+
+int rtlx_open(int index, int can_sleep)
+{
+ struct rtlx_info **p;
+ struct rtlx_channel *chan;
+ enum rtlx_state state;
+ int ret = 0;
+
+ if (index >= RTLX_CHANNELS) {
+ pr_debug("rtlx_open index out of range\n");
+ return -ENOSYS;
+ }
+
+ if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
+ pr_debug("rtlx_open channel %d already opened\n", index);
+ ret = -EBUSY;
+ goto out_fail;
+ }
+
+ if (rtlx == NULL) {
+ p = vpe_get_shared(aprp_cpu_index());
+ if (p == NULL) {
+ if (can_sleep) {
+ ret = __wait_event_interruptible(
+ channel_wqs[index].lx_queue,
+ (p = vpe_get_shared(aprp_cpu_index())));
+ if (ret)
+ goto out_fail;
+ } else {
+ pr_debug("No SP program loaded, and device opened with O_NONBLOCK\n");
+ ret = -ENOSYS;
+ goto out_fail;
+ }
+ }
+
+ smp_rmb();
+ if (*p == NULL) {
+ if (can_sleep) {
+ DEFINE_WAIT(wait);
+
+ for (;;) {
+ prepare_to_wait(
+ &channel_wqs[index].lx_queue,
+ &wait, TASK_INTERRUPTIBLE);
+ smp_rmb();
+ if (*p != NULL)
+ break;
+ if (!signal_pending(current)) {
+ schedule();
+ continue;
+ }
+ ret = -ERESTARTSYS;
+ goto out_fail;
+ }
+ finish_wait(&channel_wqs[index].lx_queue,
+ &wait);
+ } else {
+ pr_err(" *vpe_get_shared is NULL. Has an SP program been loaded?\n");
+ ret = -ENOSYS;
+ goto out_fail;
+ }
+ }
+
+ if ((unsigned int)*p < KSEG0) {
+ pr_warn("vpe_get_shared returned an invalid pointer maybe an error code %d\n",
+ (int)*p);
+ ret = -ENOSYS;
+ goto out_fail;
+ }
+
+ ret = rtlx_init(*p);
+ if (ret < 0)
+ goto out_ret;
+ }
+
+ chan = &rtlx->channel[index];
+
+ state = xchg(&chan->lx_state, RTLX_STATE_OPENED);
+ if (state == RTLX_STATE_OPENED) {
+ ret = -EBUSY;
+ goto out_fail;
+ }
+
+out_fail:
+ smp_mb();
+ atomic_dec(&channel_wqs[index].in_open);
+ smp_mb();
+
+out_ret:
+ return ret;
+}
+
+int rtlx_release(int index)
+{
+ if (rtlx == NULL) {
+ pr_err("rtlx_release() with null rtlx\n");
+ return 0;
+ }
+ rtlx->channel[index].lx_state = RTLX_STATE_UNUSED;
+ return 0;
+}
+
+unsigned int rtlx_read_poll(int index, int can_sleep)
+{
+ struct rtlx_channel *chan;
+
+ if (rtlx == NULL)
+ return 0;
+
+ chan = &rtlx->channel[index];
+
+ /* data available to read? */
+ if (chan->lx_read == chan->lx_write) {
+ if (can_sleep) {
+ int ret = __wait_event_interruptible(
+ channel_wqs[index].lx_queue,
+ (chan->lx_read != chan->lx_write) ||
+ sp_stopping);
+ if (ret)
+ return ret;
+
+ if (sp_stopping)
+ return 0;
+ } else
+ return 0;
+ }
+
+ return (chan->lx_write + chan->buffer_size - chan->lx_read)
+ % chan->buffer_size;
+}
+
+static inline int write_spacefree(int read, int write, int size)
+{
+ if (read == write) {
+ /*
+ * Never fill the buffer completely, so indexes are always
+ * equal if empty and only empty, or !equal if data available
+ */
+ return size - 1;
+ }
+
+ return ((read + size - write) % size) - 1;
+}
+
+unsigned int rtlx_write_poll(int index)
+{
+ struct rtlx_channel *chan = &rtlx->channel[index];
+
+ return write_spacefree(chan->rt_read, chan->rt_write,
+ chan->buffer_size);
+}
+
+ssize_t rtlx_read(int index, void __user *buff, size_t count)
+{
+ size_t lx_write, fl = 0L;
+ struct rtlx_channel *lx;
+ unsigned long failed;
+
+ if (rtlx == NULL)
+ return -ENOSYS;
+
+ lx = &rtlx->channel[index];
+
+ mutex_lock(&channel_wqs[index].mutex);
+ smp_rmb();
+ lx_write = lx->lx_write;
+
+ /* find out how much in total */
+ count = min(count,
+ (size_t)(lx_write + lx->buffer_size - lx->lx_read)
+ % lx->buffer_size);
+
+ /* then how much from the read pointer onwards */
+ fl = min(count, (size_t)lx->buffer_size - lx->lx_read);
+
+ failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl);
+ if (failed)
+ goto out;
+
+ /* and if there is anything left at the beginning of the buffer */
+ if (count - fl)
+ failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl);
+
+out:
+ count -= failed;
+
+ smp_wmb();
+ lx->lx_read = (lx->lx_read + count) % lx->buffer_size;
+ smp_wmb();
+ mutex_unlock(&channel_wqs[index].mutex);
+
+ return count;
+}
+
+ssize_t rtlx_write(int index, const void __user *buffer, size_t count)
+{
+ struct rtlx_channel *rt;
+ unsigned long failed;
+ size_t rt_read;
+ size_t fl;
+
+ if (rtlx == NULL)
+ return -ENOSYS;
+
+ rt = &rtlx->channel[index];
+
+ mutex_lock(&channel_wqs[index].mutex);
+ smp_rmb();
+ rt_read = rt->rt_read;
+
+ /* total number of bytes to copy */
+ count = min_t(size_t, count, write_spacefree(rt_read, rt->rt_write,
+ rt->buffer_size));
+
+ /* first bit from write pointer to the end of the buffer, or count */
+ fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
+
+ failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl);
+ if (failed)
+ goto out;
+
+ /* if there's any left copy to the beginning of the buffer */
+ if (count - fl)
+ failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
+
+out:
+ count -= failed;
+
+ smp_wmb();
+ rt->rt_write = (rt->rt_write + count) % rt->buffer_size;
+ smp_wmb();
+ mutex_unlock(&channel_wqs[index].mutex);
+
+ _interrupt_sp();
+
+ return count;
+}
+
+
+static int file_open(struct inode *inode, struct file *filp)
+{
+ return rtlx_open(iminor(inode), (filp->f_flags & O_NONBLOCK) ? 0 : 1);
+}
+
+static int file_release(struct inode *inode, struct file *filp)
+{
+ return rtlx_release(iminor(inode));
+}
+
+static __poll_t file_poll(struct file *file, poll_table *wait)
+{
+ int minor = iminor(file_inode(file));
+ __poll_t mask = 0;
+
+ poll_wait(file, &channel_wqs[minor].rt_queue, wait);
+ poll_wait(file, &channel_wqs[minor].lx_queue, wait);
+
+ if (rtlx == NULL)
+ return 0;
+
+ /* data available to read? */
+ if (rtlx_read_poll(minor, 0))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ /* space to write */
+ if (rtlx_write_poll(minor))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+
+ return mask;
+}
+
+static ssize_t file_read(struct file *file, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ int minor = iminor(file_inode(file));
+
+ /* data available? */
+ if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1))
+ return 0; /* -EAGAIN makes 'cat' whine */
+
+ return rtlx_read(minor, buffer, count);
+}
+
+static ssize_t file_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int minor = iminor(file_inode(file));
+
+ /* any space left... */
+ if (!rtlx_write_poll(minor)) {
+ int ret;
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = __wait_event_interruptible(channel_wqs[minor].rt_queue,
+ rtlx_write_poll(minor));
+ if (ret)
+ return ret;
+ }
+
+ return rtlx_write(minor, buffer, count);
+}
+
+const struct file_operations rtlx_fops = {
+ .owner = THIS_MODULE,
+ .open = file_open,
+ .release = file_release,
+ .write = file_write,
+ .read = file_read,
+ .poll = file_poll,
+ .llseek = noop_llseek,
+};
+
+module_init(rtlx_module_init);
+module_exit(rtlx_module_exit);
+
+MODULE_DESCRIPTION("MIPS RTLX");
+MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
new file mode 100644
index 000000000..b449b6866
--- /dev/null
+++ b/arch/mips/kernel/scall32-o32.S
@@ -0,0 +1,225 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ * Copyright (C) 2004 Thiemo Seufer
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ */
+#include <linux/errno.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/irqflags.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/isadep.h>
+#include <asm/sysmips.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+#include <asm/war.h>
+#include <asm/asm-offsets.h>
+
+ .align 5
+NESTED(handle_sys, PT_SIZE, sp)
+ .set noat
+ SAVE_SOME
+ TRACE_IRQS_ON_RELOAD
+ STI
+ .set at
+
+ lw t1, PT_EPC(sp) # skip syscall on return
+
+ addiu t1, 4 # skip to next instruction
+ sw t1, PT_EPC(sp)
+
+ sw a3, PT_R26(sp) # save a3 for syscall restarting
+
+ /*
+ * More than four arguments. Try to deal with it by copying the
+ * stack arguments from the user stack to the kernel stack.
+ * This Sucks (TM).
+ */
+ lw t0, PT_R29(sp) # get old user stack pointer
+
+ /*
+ * We intentionally keep the kernel stack a little below the top of
+ * userspace so we don't have to do a slower byte accurate check here.
+ */
+ lw t5, TI_ADDR_LIMIT($28)
+ addu t4, t0, 32
+ and t5, t4
+ bltz t5, bad_stack # -> sp is bad
+
+ /*
+ * Ok, copy the args from the luser stack to the kernel stack.
+ */
+
+ .set push
+ .set noreorder
+ .set nomacro
+
+load_a4: user_lw(t5, 16(t0)) # argument #5 from usp
+load_a5: user_lw(t6, 20(t0)) # argument #6 from usp
+load_a6: user_lw(t7, 24(t0)) # argument #7 from usp
+load_a7: user_lw(t8, 28(t0)) # argument #8 from usp
+loads_done:
+
+ sw t5, 16(sp) # argument #5 to ksp
+ sw t6, 20(sp) # argument #6 to ksp
+ sw t7, 24(sp) # argument #7 to ksp
+ sw t8, 28(sp) # argument #8 to ksp
+ .set pop
+
+ .section __ex_table,"a"
+ PTR load_a4, bad_stack_a4
+ PTR load_a5, bad_stack_a5
+ PTR load_a6, bad_stack_a6
+ PTR load_a7, bad_stack_a7
+ .previous
+
+ lw t0, TI_FLAGS($28) # syscall tracing enabled?
+ li t1, _TIF_WORK_SYSCALL_ENTRY
+ and t0, t1
+ bnez t0, syscall_trace_entry # -> yes
+syscall_common:
+ subu v0, v0, __NR_O32_Linux # check syscall number
+ sltiu t0, v0, __NR_O32_Linux_syscalls
+ beqz t0, illegal_syscall
+
+ sll t0, v0, 2
+ la t1, sys_call_table
+ addu t1, t0
+ lw t2, (t1) # syscall routine
+
+ beqz t2, illegal_syscall
+
+ jalr t2 # Do The Real Thing (TM)
+
+ li t0, -EMAXERRNO - 1 # error?
+ sltu t0, t0, v0
+ sw t0, PT_R7(sp) # set error flag
+ beqz t0, 1f
+
+ lw t1, PT_R2(sp) # syscall number
+ negu v0 # error
+ sw t1, PT_R0(sp) # save it for syscall restarting
+1: sw v0, PT_R2(sp) # result
+
+o32_syscall_exit:
+ j syscall_exit_partial
+
+/* ------------------------------------------------------------------------ */
+
+syscall_trace_entry:
+ SAVE_STATIC
+ move a0, sp
+
+ /*
+ * syscall number is in v0 unless we called syscall(__NR_###)
+ * where the real syscall number is in a0
+ */
+ move a1, v0
+ subu t2, v0, __NR_O32_Linux
+ bnez t2, 1f /* __NR_syscall at offset 0 */
+ lw a1, PT_R4(sp)
+
+1: jal syscall_trace_enter
+
+ bltz v0, 1f # seccomp failed? Skip syscall
+
+ RESTORE_STATIC
+ lw v0, PT_R2(sp) # Restore syscall (maybe modified)
+ lw a0, PT_R4(sp) # Restore argument registers
+ lw a1, PT_R5(sp)
+ lw a2, PT_R6(sp)
+ lw a3, PT_R7(sp)
+ j syscall_common
+
+1: j syscall_exit
+
+/* ------------------------------------------------------------------------ */
+
+ /*
+ * Our open-coded access area sanity test for the stack pointer
+ * failed. We probably should handle this case a bit more drastic.
+ */
+bad_stack:
+ li v0, EFAULT
+ sw v0, PT_R2(sp)
+ li t0, 1 # set error flag
+ sw t0, PT_R7(sp)
+ j o32_syscall_exit
+
+bad_stack_a4:
+ li t5, 0
+ b load_a5
+
+bad_stack_a5:
+ li t6, 0
+ b load_a6
+
+bad_stack_a6:
+ li t7, 0
+ b load_a7
+
+bad_stack_a7:
+ li t8, 0
+ b loads_done
+
+ /*
+ * The system call does not exist in this kernel
+ */
+illegal_syscall:
+ li v0, ENOSYS # error
+ sw v0, PT_R2(sp)
+ li t0, 1 # set error flag
+ sw t0, PT_R7(sp)
+ j o32_syscall_exit
+ END(handle_sys)
+
+ LEAF(sys_syscall)
+ subu t0, a0, __NR_O32_Linux # check syscall number
+ sltiu v0, t0, __NR_O32_Linux_syscalls
+ beqz t0, einval # do not recurse
+ sll t1, t0, 2
+ beqz v0, einval
+ lw t2, sys_call_table(t1) # syscall routine
+
+ move a0, a1 # shift argument registers
+ move a1, a2
+ move a2, a3
+ lw a3, 16(sp)
+ lw t4, 20(sp)
+ lw t5, 24(sp)
+ lw t6, 28(sp)
+ sw t4, 16(sp)
+ sw t5, 20(sp)
+ sw t6, 24(sp)
+ jr t2
+ /* Unreached */
+
+einval: li v0, -ENOSYS
+ jr ra
+ END(sys_syscall)
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /*
+ * For FPU affinity scheduling on MIPS MT processors, we need to
+ * intercept sys_sched_xxxaffinity() calls until we get a proper hook
+ * in kernel/sched/core.c. Considered only temporary we only support
+ * these hooks for the 32-bit kernel - there is no MIPS64 MT processor
+ * atm.
+ */
+#define sys_sched_setaffinity mipsmt_sys_sched_setaffinity
+#define sys_sched_getaffinity mipsmt_sys_sched_getaffinity
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+#define __SYSCALL(nr, entry) PTR entry
+ .align 2
+ .type sys_call_table, @object
+EXPORT(sys_call_table)
+#include <asm/syscall_table_32_o32.h>
+#undef __SYSCALL
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
new file mode 100644
index 000000000..35d8c86b1
--- /dev/null
+++ b/arch/mips/kernel/scall64-n32.S
@@ -0,0 +1,108 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+#include <linux/errno.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/irqflags.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+
+#ifndef CONFIG_MIPS32_O32
+/* No O32, so define handle_sys here */
+#define handle_sysn32 handle_sys
+#endif
+
+ .align 5
+NESTED(handle_sysn32, PT_SIZE, sp)
+#ifndef CONFIG_MIPS32_O32
+ .set noat
+ SAVE_SOME
+ TRACE_IRQS_ON_RELOAD
+ STI
+ .set at
+#endif
+
+ dsubu t0, v0, __NR_N32_Linux # check syscall number
+ sltiu t0, t0, __NR_N32_Linux_syscalls
+
+#ifndef CONFIG_MIPS32_O32
+ ld t1, PT_EPC(sp) # skip syscall on return
+ daddiu t1, 4 # skip to next instruction
+ sd t1, PT_EPC(sp)
+#endif
+ beqz t0, not_n32_scall
+
+ sd a3, PT_R26(sp) # save a3 for syscall restarting
+
+ li t1, _TIF_WORK_SYSCALL_ENTRY
+ LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
+ and t0, t1, t0
+ bnez t0, n32_syscall_trace_entry
+
+syscall_common:
+ dsll t0, v0, 3 # offset into table
+ ld t2, (sysn32_call_table - (__NR_N32_Linux * 8))(t0)
+
+ jalr t2 # Do The Real Thing (TM)
+
+ li t0, -EMAXERRNO - 1 # error?
+ sltu t0, t0, v0
+ sd t0, PT_R7(sp) # set error flag
+ beqz t0, 1f
+
+ ld t1, PT_R2(sp) # syscall number
+ dnegu v0 # error
+ sd t1, PT_R0(sp) # save it for syscall restarting
+1: sd v0, PT_R2(sp) # result
+
+ j syscall_exit_partial
+
+/* ------------------------------------------------------------------------ */
+
+n32_syscall_trace_entry:
+ SAVE_STATIC
+ move a0, sp
+ move a1, v0
+ jal syscall_trace_enter
+
+ bltz v0, 1f # seccomp failed? Skip syscall
+
+ RESTORE_STATIC
+ ld v0, PT_R2(sp) # Restore syscall (maybe modified)
+ ld a0, PT_R4(sp) # Restore argument registers
+ ld a1, PT_R5(sp)
+ ld a2, PT_R6(sp)
+ ld a3, PT_R7(sp)
+ ld a4, PT_R8(sp)
+ ld a5, PT_R9(sp)
+
+ dsubu t2, v0, __NR_N32_Linux # check (new) syscall number
+ sltiu t0, t2, __NR_N32_Linux_syscalls
+ beqz t0, not_n32_scall
+
+ j syscall_common
+
+1: j syscall_exit
+
+not_n32_scall:
+ /* This is not an n32 compatibility syscall, pass it on to
+ the n64 syscall handlers. */
+ j handle_sys64
+
+ END(handle_sysn32)
+
+#define __SYSCALL(nr, entry) PTR entry
+ .type sysn32_call_table, @object
+EXPORT(sysn32_call_table)
+#include <asm/syscall_table_64_n32.h>
+#undef __SYSCALL
diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S
new file mode 100644
index 000000000..23b2e2b16
--- /dev/null
+++ b/arch/mips/kernel/scall64-n64.S
@@ -0,0 +1,117 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+#include <linux/errno.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/irqflags.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/asm-offsets.h>
+#include <asm/sysmips.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+#include <asm/war.h>
+
+#ifndef CONFIG_BINFMT_ELF32
+/* Neither O32 nor N32, so define handle_sys here */
+#define handle_sys64 handle_sys
+#endif
+
+ .align 5
+NESTED(handle_sys64, PT_SIZE, sp)
+#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
+ /*
+ * When 32-bit compatibility is configured scall_o32.S
+ * already did this.
+ */
+ .set noat
+ SAVE_SOME
+ TRACE_IRQS_ON_RELOAD
+ STI
+ .set at
+#endif
+
+#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
+ ld t1, PT_EPC(sp) # skip syscall on return
+ daddiu t1, 4 # skip to next instruction
+ sd t1, PT_EPC(sp)
+#endif
+
+ sd a3, PT_R26(sp) # save a3 for syscall restarting
+
+ li t1, _TIF_WORK_SYSCALL_ENTRY
+ LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
+ and t0, t1, t0
+ bnez t0, syscall_trace_entry
+
+syscall_common:
+ dsubu t2, v0, __NR_64_Linux
+ sltiu t0, t2, __NR_64_Linux_syscalls
+ beqz t0, illegal_syscall
+
+ dsll t0, t2, 3 # offset into table
+ dla t2, sys_call_table
+ daddu t0, t2, t0
+ ld t2, (t0) # syscall routine
+ beqz t2, illegal_syscall
+
+ jalr t2 # Do The Real Thing (TM)
+
+ li t0, -EMAXERRNO - 1 # error?
+ sltu t0, t0, v0
+ sd t0, PT_R7(sp) # set error flag
+ beqz t0, 1f
+
+ ld t1, PT_R2(sp) # syscall number
+ dnegu v0 # error
+ sd t1, PT_R0(sp) # save it for syscall restarting
+1: sd v0, PT_R2(sp) # result
+
+n64_syscall_exit:
+ j syscall_exit_partial
+
+/* ------------------------------------------------------------------------ */
+
+syscall_trace_entry:
+ SAVE_STATIC
+ move a0, sp
+ move a1, v0
+ jal syscall_trace_enter
+
+ bltz v0, 1f # seccomp failed? Skip syscall
+
+ RESTORE_STATIC
+ ld v0, PT_R2(sp) # Restore syscall (maybe modified)
+ ld a0, PT_R4(sp) # Restore argument registers
+ ld a1, PT_R5(sp)
+ ld a2, PT_R6(sp)
+ ld a3, PT_R7(sp)
+ ld a4, PT_R8(sp)
+ ld a5, PT_R9(sp)
+ j syscall_common
+
+1: j syscall_exit
+
+illegal_syscall:
+ /* This also isn't a 64-bit syscall, throw an error. */
+ li v0, ENOSYS # error
+ sd v0, PT_R2(sp)
+ li t0, 1 # set error flag
+ sd t0, PT_R7(sp)
+ j n64_syscall_exit
+ END(handle_sys64)
+
+#define __SYSCALL(nr, entry) PTR entry
+ .align 3
+ .type sys_call_table, @object
+EXPORT(sys_call_table)
+#include <asm/syscall_table_64_n64.h>
+#undef __SYSCALL
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
new file mode 100644
index 000000000..50c9a57e0
--- /dev/null
+++ b/arch/mips/kernel/scall64-o32.S
@@ -0,0 +1,221 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 - 2000, 2001 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ * Copyright (C) 2004 Thiemo Seufer
+ *
+ * Hairy, the userspace application uses a different argument passing
+ * convention than the kernel, so we have to translate things from o32
+ * to ABI64 calling convention. 64-bit syscalls are also processed
+ * here for now.
+ */
+#include <linux/errno.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/irqflags.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+#include <asm/sysmips.h>
+
+ .align 5
+NESTED(handle_sys, PT_SIZE, sp)
+ .set noat
+ SAVE_SOME
+ TRACE_IRQS_ON_RELOAD
+ STI
+ .set at
+ ld t1, PT_EPC(sp) # skip syscall on return
+
+ dsubu t0, v0, __NR_O32_Linux # check syscall number
+ sltiu t0, t0, __NR_O32_Linux_syscalls
+ daddiu t1, 4 # skip to next instruction
+ sd t1, PT_EPC(sp)
+ beqz t0, not_o32_scall
+#if 0
+ SAVE_ALL
+ move a1, v0
+ ASM_PRINT("Scall %ld\n")
+ RESTORE_ALL
+#endif
+
+ /* We don't want to stumble over broken sign extensions from
+ userland. O32 does never use the upper half. */
+ sll a0, a0, 0
+ sll a1, a1, 0
+ sll a2, a2, 0
+ sll a3, a3, 0
+
+ sd a3, PT_R26(sp) # save a3 for syscall restarting
+
+ /*
+ * More than four arguments. Try to deal with it by copying the
+ * stack arguments from the user stack to the kernel stack.
+ * This Sucks (TM).
+ *
+ * We intentionally keep the kernel stack a little below the top of
+ * userspace so we don't have to do a slower byte accurate check here.
+ */
+ ld t0, PT_R29(sp) # get old user stack pointer
+ daddu t1, t0, 32
+ bltz t1, bad_stack
+
+load_a4: lw a4, 16(t0) # argument #5 from usp
+load_a5: lw a5, 20(t0) # argument #6 from usp
+load_a6: lw a6, 24(t0) # argument #7 from usp
+load_a7: lw a7, 28(t0) # argument #8 from usp
+loads_done:
+
+ .section __ex_table,"a"
+ PTR load_a4, bad_stack_a4
+ PTR load_a5, bad_stack_a5
+ PTR load_a6, bad_stack_a6
+ PTR load_a7, bad_stack_a7
+ .previous
+
+ li t1, _TIF_WORK_SYSCALL_ENTRY
+ LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
+ and t0, t1, t0
+ bnez t0, trace_a_syscall
+
+syscall_common:
+ dsll t0, v0, 3 # offset into table
+ ld t2, (sys32_call_table - (__NR_O32_Linux * 8))(t0)
+
+ jalr t2 # Do The Real Thing (TM)
+
+ li t0, -EMAXERRNO - 1 # error?
+ sltu t0, t0, v0
+ sd t0, PT_R7(sp) # set error flag
+ beqz t0, 1f
+
+ ld t1, PT_R2(sp) # syscall number
+ dnegu v0 # error
+ sd t1, PT_R0(sp) # save it for syscall restarting
+1: sd v0, PT_R2(sp) # result
+
+o32_syscall_exit:
+ j syscall_exit_partial
+
+/* ------------------------------------------------------------------------ */
+
+trace_a_syscall:
+ SAVE_STATIC
+ sd a4, PT_R8(sp) # Save argument registers
+ sd a5, PT_R9(sp)
+ sd a6, PT_R10(sp)
+ sd a7, PT_R11(sp) # For indirect syscalls
+
+ move a0, sp
+ /*
+ * absolute syscall number is in v0 unless we called syscall(__NR_###)
+ * where the real syscall number is in a0
+ * note: NR_syscall is the first O32 syscall but the macro is
+ * only defined when compiling with -mabi=32 (CONFIG_32BIT)
+ * therefore __NR_O32_Linux is used (4000)
+ */
+ .set push
+ .set reorder
+ subu t1, v0, __NR_O32_Linux
+ move a1, v0
+ bnez t1, 1f /* __NR_syscall at offset 0 */
+ ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
+ .set pop
+
+1: jal syscall_trace_enter
+
+ bltz v0, 1f # seccomp failed? Skip syscall
+
+ RESTORE_STATIC
+ ld v0, PT_R2(sp) # Restore syscall (maybe modified)
+ ld a0, PT_R4(sp) # Restore argument registers
+ ld a1, PT_R5(sp)
+ ld a2, PT_R6(sp)
+ ld a3, PT_R7(sp)
+ ld a4, PT_R8(sp)
+ ld a5, PT_R9(sp)
+ ld a6, PT_R10(sp)
+ ld a7, PT_R11(sp) # For indirect syscalls
+
+ dsubu t0, v0, __NR_O32_Linux # check (new) syscall number
+ sltiu t0, t0, __NR_O32_Linux_syscalls
+ beqz t0, not_o32_scall
+
+ j syscall_common
+
+1: j syscall_exit
+
+/* ------------------------------------------------------------------------ */
+
+ /*
+ * The stackpointer for a call with more than 4 arguments is bad.
+ */
+bad_stack:
+ li v0, EFAULT
+ sd v0, PT_R2(sp)
+ li t0, 1 # set error flag
+ sd t0, PT_R7(sp)
+ j o32_syscall_exit
+
+bad_stack_a4:
+ li a4, 0
+ b load_a5
+
+bad_stack_a5:
+ li a5, 0
+ b load_a6
+
+bad_stack_a6:
+ li a6, 0
+ b load_a7
+
+bad_stack_a7:
+ li a7, 0
+ b loads_done
+
+not_o32_scall:
+ /*
+ * This is not an o32 compatibility syscall, pass it on
+ * to the 64-bit syscall handlers.
+ */
+#ifdef CONFIG_MIPS32_N32
+ j handle_sysn32
+#else
+ j handle_sys64
+#endif
+ END(handle_sys)
+
+LEAF(sys32_syscall)
+ subu t0, a0, __NR_O32_Linux # check syscall number
+ sltiu v0, t0, __NR_O32_Linux_syscalls
+ beqz t0, einval # do not recurse
+ dsll t1, t0, 3
+ beqz v0, einval
+ ld t2, sys32_call_table(t1) # syscall routine
+
+ move a0, a1 # shift argument registers
+ move a1, a2
+ move a2, a3
+ move a3, a4
+ move a4, a5
+ move a5, a6
+ move a6, a7
+ jr t2
+ /* Unreached */
+
+einval: li v0, -ENOSYS
+ jr ra
+ END(sys32_syscall)
+
+#define __SYSCALL(nr, entry) PTR entry
+ .align 3
+ .type sys32_call_table,@object
+EXPORT(sys32_call_table)
+#include <asm/syscall_table_64_o32.h>
+#undef __SYSCALL
diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c
new file mode 100644
index 000000000..0a9bd7b09
--- /dev/null
+++ b/arch/mips/kernel/segment.c
@@ -0,0 +1,104 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/cpu.h>
+#include <asm/debug.h>
+#include <asm/mipsregs.h>
+
+static void build_segment_config(char *str, unsigned int cfg)
+{
+ unsigned int am;
+ static const char * const am_str[] = {
+ "UK", "MK", "MSK", "MUSK", "MUSUK", "USK",
+ "RSRVD", "UUSK"};
+
+ /* Segment access mode. */
+ am = (cfg & MIPS_SEGCFG_AM) >> MIPS_SEGCFG_AM_SHIFT;
+ str += sprintf(str, "%-5s", am_str[am]);
+
+ /*
+ * Access modes MK, MSK and MUSK are mapped segments. Therefore
+ * there is no direct physical address mapping unless it becomes
+ * unmapped uncached at error level due to EU.
+ */
+ if ((am == 0) || (am > 3) || (cfg & MIPS_SEGCFG_EU))
+ str += sprintf(str, " %03lx",
+ ((cfg & MIPS_SEGCFG_PA) >> MIPS_SEGCFG_PA_SHIFT));
+ else
+ str += sprintf(str, " UND");
+
+ if ((am == 0) || (am > 3))
+ str += sprintf(str, " %01ld",
+ ((cfg & MIPS_SEGCFG_C) >> MIPS_SEGCFG_C_SHIFT));
+ else
+ str += sprintf(str, " U");
+
+ /* Exception configuration. */
+ str += sprintf(str, " %01ld\n",
+ ((cfg & MIPS_SEGCFG_EU) >> MIPS_SEGCFG_EU_SHIFT));
+}
+
+static int show_segments(struct seq_file *m, void *v)
+{
+ unsigned int segcfg;
+ char str[42];
+
+ seq_puts(m, "Segment Virtual Size Access Mode Physical Caching EU\n");
+ seq_puts(m, "------- ------- ---- ----------- -------- ------- --\n");
+
+ segcfg = read_c0_segctl0();
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 0 e0000000 512M %s", str);
+
+ segcfg >>= 16;
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 1 c0000000 512M %s", str);
+
+ segcfg = read_c0_segctl1();
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 2 a0000000 512M %s", str);
+
+ segcfg >>= 16;
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 3 80000000 512M %s", str);
+
+ segcfg = read_c0_segctl2();
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 4 40000000 1G %s", str);
+
+ segcfg >>= 16;
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 5 00000000 1G %s\n", str);
+
+ return 0;
+}
+
+static int segments_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_segments, NULL);
+}
+
+static const struct file_operations segments_fops = {
+ .open = segments_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init segments_info(void)
+{
+ if (cpu_has_segments)
+ debugfs_create_file("segments", S_IRUGO, mips_debugfs_dir, NULL,
+ &segments_fops);
+ return 0;
+}
+
+device_initcall(segments_info);
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
new file mode 100644
index 000000000..66643cb65
--- /dev/null
+++ b/arch/mips/kernel/setup.c
@@ -0,0 +1,844 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 1995 Waldorf Electronics
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
+ * Copyright (C) 1996 Stoned Elipot
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/screen_info.h>
+#include <linux/memblock.h>
+#include <linux/initrd.h>
+#include <linux/root_dev.h>
+#include <linux/highmem.h>
+#include <linux/console.h>
+#include <linux/pfn.h>
+#include <linux/debugfs.h>
+#include <linux/kexec.h>
+#include <linux/sizes.h>
+#include <linux/device.h>
+#include <linux/dma-map-ops.h>
+#include <linux/decompress/generic.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/dmi.h>
+
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/bugs.h>
+#include <asm/cache.h>
+#include <asm/cdmm.h>
+#include <asm/cpu.h>
+#include <asm/debug.h>
+#include <asm/dma-coherence.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/smp-ops.h>
+#include <asm/prom.h>
+
+#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
+const char __section(".appended_dtb") __appended_dtb[0x100000];
+#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
+
+struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
+
+EXPORT_SYMBOL(cpu_data);
+
+#ifdef CONFIG_VT
+struct screen_info screen_info;
+#endif
+
+/*
+ * Setup information
+ *
+ * These are initialized so they are in the .data section
+ */
+unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
+
+EXPORT_SYMBOL(mips_machtype);
+
+static char __initdata command_line[COMMAND_LINE_SIZE];
+char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
+
+#ifdef CONFIG_CMDLINE_BOOL
+static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE;
+#else
+static const char builtin_cmdline[] __initconst = "";
+#endif
+
+/*
+ * mips_io_port_base is the begin of the address space to which x86 style
+ * I/O ports are mapped.
+ */
+unsigned long mips_io_port_base = -1;
+EXPORT_SYMBOL(mips_io_port_base);
+
+static struct resource code_resource = { .name = "Kernel code", };
+static struct resource data_resource = { .name = "Kernel data", };
+static struct resource bss_resource = { .name = "Kernel bss", };
+
+static void *detect_magic __initdata = detect_memory_region;
+
+#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+unsigned long ARCH_PFN_OFFSET;
+EXPORT_SYMBOL(ARCH_PFN_OFFSET);
+#endif
+
+void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
+{
+ void *dm = &detect_magic;
+ phys_addr_t size;
+
+ for (size = sz_min; size < sz_max; size <<= 1) {
+ if (!memcmp(dm, dm + size, sizeof(detect_magic)))
+ break;
+ }
+
+ pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
+ ((unsigned long long) size) / SZ_1M,
+ (unsigned long long) start,
+ ((unsigned long long) sz_min) / SZ_1M,
+ ((unsigned long long) sz_max) / SZ_1M);
+
+ memblock_add(start, size);
+}
+
+/*
+ * Manage initrd
+ */
+#ifdef CONFIG_BLK_DEV_INITRD
+
+static int __init rd_start_early(char *p)
+{
+ unsigned long start = memparse(p, &p);
+
+#ifdef CONFIG_64BIT
+ /* Guess if the sign extension was forgotten by bootloader */
+ if (start < XKPHYS)
+ start = (int)start;
+#endif
+ initrd_start = start;
+ initrd_end += start;
+ return 0;
+}
+early_param("rd_start", rd_start_early);
+
+static int __init rd_size_early(char *p)
+{
+ initrd_end += memparse(p, &p);
+ return 0;
+}
+early_param("rd_size", rd_size_early);
+
+/* it returns the next free pfn after initrd */
+static unsigned long __init init_initrd(void)
+{
+ unsigned long end;
+
+ /*
+ * Board specific code or command line parser should have
+ * already set up initrd_start and initrd_end. In these cases
+ * perfom sanity checks and use them if all looks good.
+ */
+ if (!initrd_start || initrd_end <= initrd_start)
+ goto disable;
+
+ if (initrd_start & ~PAGE_MASK) {
+ pr_err("initrd start must be page aligned\n");
+ goto disable;
+ }
+
+ /*
+ * Sanitize initrd addresses. For example firmware
+ * can't guess if they need to pass them through
+ * 64-bits values if the kernel has been built in pure
+ * 32-bit. We need also to switch from KSEG0 to XKPHYS
+ * addresses now, so the code can now safely use __pa().
+ */
+ end = __pa(initrd_end);
+ initrd_end = (unsigned long)__va(end);
+ initrd_start = (unsigned long)__va(__pa(initrd_start));
+
+ if (initrd_start < PAGE_OFFSET) {
+ pr_err("initrd start < PAGE_OFFSET\n");
+ goto disable;
+ }
+
+ ROOT_DEV = Root_RAM0;
+ return PFN_UP(end);
+disable:
+ initrd_start = 0;
+ initrd_end = 0;
+ return 0;
+}
+
+/* In some conditions (e.g. big endian bootloader with a little endian
+ kernel), the initrd might appear byte swapped. Try to detect this and
+ byte swap it if needed. */
+static void __init maybe_bswap_initrd(void)
+{
+#if defined(CONFIG_CPU_CAVIUM_OCTEON)
+ u64 buf;
+
+ /* Check for CPIO signature */
+ if (!memcmp((void *)initrd_start, "070701", 6))
+ return;
+
+ /* Check for compressed initrd */
+ if (decompress_method((unsigned char *)initrd_start, 8, NULL))
+ return;
+
+ /* Try again with a byte swapped header */
+ buf = swab64p((u64 *)initrd_start);
+ if (!memcmp(&buf, "070701", 6) ||
+ decompress_method((unsigned char *)(&buf), 8, NULL)) {
+ unsigned long i;
+
+ pr_info("Byteswapped initrd detected\n");
+ for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
+ swab64s((u64 *)i);
+ }
+#endif
+}
+
+static void __init finalize_initrd(void)
+{
+ unsigned long size = initrd_end - initrd_start;
+
+ if (size == 0) {
+ printk(KERN_INFO "Initrd not found or empty");
+ goto disable;
+ }
+ if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
+ printk(KERN_ERR "Initrd extends beyond end of memory");
+ goto disable;
+ }
+
+ maybe_bswap_initrd();
+
+ memblock_reserve(__pa(initrd_start), size);
+ initrd_below_start_ok = 1;
+
+ pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
+ initrd_start, size);
+ return;
+disable:
+ printk(KERN_CONT " - disabling initrd\n");
+ initrd_start = 0;
+ initrd_end = 0;
+}
+
+#else /* !CONFIG_BLK_DEV_INITRD */
+
+static unsigned long __init init_initrd(void)
+{
+ return 0;
+}
+
+#define finalize_initrd() do {} while (0)
+
+#endif
+
+/*
+ * Initialize the bootmem allocator. It also setup initrd related data
+ * if needed.
+ */
+#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA))
+
+static void __init bootmem_init(void)
+{
+ init_initrd();
+ finalize_initrd();
+}
+
+#else /* !CONFIG_SGI_IP27 */
+
+static void __init bootmem_init(void)
+{
+ phys_addr_t ramstart, ramend;
+ unsigned long start, end;
+ int i;
+
+ ramstart = memblock_start_of_DRAM();
+ ramend = memblock_end_of_DRAM();
+
+ /*
+ * Sanity check any INITRD first. We don't take it into account
+ * for bootmem setup initially, rely on the end-of-kernel-code
+ * as our memory range starting point. Once bootmem is inited we
+ * will reserve the area used for the initrd.
+ */
+ init_initrd();
+
+ /* Reserve memory occupied by kernel. */
+ memblock_reserve(__pa_symbol(&_text),
+ __pa_symbol(&_end) - __pa_symbol(&_text));
+
+ /* max_low_pfn is not a number of pages but the end pfn of low mem */
+
+#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+ ARCH_PFN_OFFSET = PFN_UP(ramstart);
+#else
+ /*
+ * Reserve any memory between the start of RAM and PHYS_OFFSET
+ */
+ if (ramstart > PHYS_OFFSET)
+ memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
+
+ if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
+ pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
+ (unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
+ (unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET));
+ }
+#endif
+
+ min_low_pfn = ARCH_PFN_OFFSET;
+ max_pfn = PFN_DOWN(ramend);
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
+ /*
+ * Skip highmem here so we get an accurate max_low_pfn if low
+ * memory stops short of high memory.
+ * If the region overlaps HIGHMEM_START, end is clipped so
+ * max_pfn excludes the highmem portion.
+ */
+ if (start >= PFN_DOWN(HIGHMEM_START))
+ continue;
+ if (end > PFN_DOWN(HIGHMEM_START))
+ end = PFN_DOWN(HIGHMEM_START);
+ if (end > max_low_pfn)
+ max_low_pfn = end;
+ }
+
+ if (min_low_pfn >= max_low_pfn)
+ panic("Incorrect memory mapping !!!");
+
+ if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
+ max_low_pfn = PFN_DOWN(HIGHMEM_START);
+#ifdef CONFIG_HIGHMEM
+ highstart_pfn = max_low_pfn;
+ highend_pfn = max_pfn;
+#else
+ max_pfn = max_low_pfn;
+#endif
+ }
+
+ /*
+ * Reserve initrd memory if needed.
+ */
+ finalize_initrd();
+}
+
+#endif /* CONFIG_SGI_IP27 */
+
+static int usermem __initdata;
+
+static int __init early_parse_mem(char *p)
+{
+ phys_addr_t start, size;
+
+ /*
+ * If a user specifies memory size, we
+ * blow away any automatically generated
+ * size.
+ */
+ if (usermem == 0) {
+ usermem = 1;
+ memblock_remove(memblock_start_of_DRAM(),
+ memblock_end_of_DRAM() - memblock_start_of_DRAM());
+ }
+ start = 0;
+ size = memparse(p, &p);
+ if (*p == '@')
+ start = memparse(p + 1, &p);
+
+ memblock_add(start, size);
+
+ return 0;
+}
+early_param("mem", early_parse_mem);
+
+static int __init early_parse_memmap(char *p)
+{
+ char *oldp;
+ u64 start_at, mem_size;
+
+ if (!p)
+ return -EINVAL;
+
+ if (!strncmp(p, "exactmap", 8)) {
+ pr_err("\"memmap=exactmap\" invalid on MIPS\n");
+ return 0;
+ }
+
+ oldp = p;
+ mem_size = memparse(p, &p);
+ if (p == oldp)
+ return -EINVAL;
+
+ if (*p == '@') {
+ start_at = memparse(p+1, &p);
+ memblock_add(start_at, mem_size);
+ } else if (*p == '#') {
+ pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
+ return -EINVAL;
+ } else if (*p == '$') {
+ start_at = memparse(p+1, &p);
+ memblock_add(start_at, mem_size);
+ memblock_reserve(start_at, mem_size);
+ } else {
+ pr_err("\"memmap\" invalid format!\n");
+ return -EINVAL;
+ }
+
+ if (*p == '\0') {
+ usermem = 1;
+ return 0;
+ } else
+ return -EINVAL;
+}
+early_param("memmap", early_parse_memmap);
+
+#ifdef CONFIG_PROC_VMCORE
+static unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
+static int __init early_parse_elfcorehdr(char *p)
+{
+ phys_addr_t start, end;
+ u64 i;
+
+ setup_elfcorehdr = memparse(p, &p);
+
+ for_each_mem_range(i, &start, &end) {
+ if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
+ /*
+ * Reserve from the elf core header to the end of
+ * the memory segment, that should all be kdump
+ * reserved memory.
+ */
+ setup_elfcorehdr_size = end - setup_elfcorehdr;
+ break;
+ }
+ }
+ /*
+ * If we don't find it in the memory map, then we shouldn't
+ * have to worry about it, as the new kernel won't use it.
+ */
+ return 0;
+}
+early_param("elfcorehdr", early_parse_elfcorehdr);
+#endif
+
+#ifdef CONFIG_KEXEC
+
+/* 64M alignment for crash kernel regions */
+#define CRASH_ALIGN SZ_64M
+#define CRASH_ADDR_MAX SZ_512M
+
+static void __init mips_parse_crashkernel(void)
+{
+ unsigned long long total_mem;
+ unsigned long long crash_size, crash_base;
+ int ret;
+
+ total_mem = memblock_phys_mem_size();
+ ret = parse_crashkernel(boot_command_line, total_mem,
+ &crash_size, &crash_base);
+ if (ret != 0 || crash_size <= 0)
+ return;
+
+ if (crash_base <= 0) {
+ crash_base = memblock_find_in_range(CRASH_ALIGN, CRASH_ADDR_MAX,
+ crash_size, CRASH_ALIGN);
+ if (!crash_base) {
+ pr_warn("crashkernel reservation failed - No suitable area found.\n");
+ return;
+ }
+ } else {
+ unsigned long long start;
+
+ start = memblock_find_in_range(crash_base, crash_base + crash_size,
+ crash_size, 1);
+ if (start != crash_base) {
+ pr_warn("Invalid memory region reserved for crash kernel\n");
+ return;
+ }
+ }
+
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+}
+
+static void __init request_crashkernel(struct resource *res)
+{
+ int ret;
+
+ if (crashk_res.start == crashk_res.end)
+ return;
+
+ ret = request_resource(res, &crashk_res);
+ if (!ret)
+ pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
+ (unsigned long)(resource_size(&crashk_res) >> 20),
+ (unsigned long)(crashk_res.start >> 20));
+}
+#else /* !defined(CONFIG_KEXEC) */
+static void __init mips_parse_crashkernel(void)
+{
+}
+
+static void __init request_crashkernel(struct resource *res)
+{
+}
+#endif /* !defined(CONFIG_KEXEC) */
+
+static void __init check_kernel_sections_mem(void)
+{
+ phys_addr_t start = __pa_symbol(&_text);
+ phys_addr_t size = __pa_symbol(&_end) - start;
+
+ if (!memblock_is_region_memory(start, size)) {
+ pr_info("Kernel sections are not in the memory maps\n");
+ memblock_add(start, size);
+ }
+}
+
+static void __init bootcmdline_append(const char *s, size_t max)
+{
+ if (!s[0] || !max)
+ return;
+
+ if (boot_command_line[0])
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+
+ strlcat(boot_command_line, s, max);
+}
+
+#ifdef CONFIG_OF_EARLY_FLATTREE
+
+static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ bool *dt_bootargs = data;
+ const char *p;
+ int l;
+
+ if (depth != 1 || !data ||
+ (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
+ return 0;
+
+ p = of_get_flat_dt_prop(node, "bootargs", &l);
+ if (p != NULL && l > 0) {
+ bootcmdline_append(p, min(l, COMMAND_LINE_SIZE));
+ *dt_bootargs = true;
+ }
+
+ return 1;
+}
+
+#endif /* CONFIG_OF_EARLY_FLATTREE */
+
+static void __init bootcmdline_init(void)
+{
+ bool dt_bootargs = false;
+
+ /*
+ * If CMDLINE_OVERRIDE is enabled then initializing the command line is
+ * trivial - we simply use the built-in command line unconditionally &
+ * unmodified.
+ */
+ if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
+ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+ return;
+ }
+
+ /*
+ * If the user specified a built-in command line &
+ * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is
+ * prepended to arguments from the bootloader or DT so we'll copy them
+ * to the start of boot_command_line here. Otherwise, empty
+ * boot_command_line to undo anything early_init_dt_scan_chosen() did.
+ */
+ if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
+ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+ else
+ boot_command_line[0] = 0;
+
+#ifdef CONFIG_OF_EARLY_FLATTREE
+ /*
+ * If we're configured to take boot arguments from DT, look for those
+ * now.
+ */
+ if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
+ IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
+ of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
+#endif
+
+ /*
+ * If we didn't get any arguments from DT (regardless of whether that's
+ * because we weren't configured to look for them, or because we looked
+ * & found none) then we'll take arguments from the bootloader.
+ * plat_mem_setup() should have filled arcs_cmdline with arguments from
+ * the bootloader.
+ */
+ if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs)
+ bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE);
+
+ /*
+ * If the user specified a built-in command line & we didn't already
+ * prepend it, we append it to boot_command_line here.
+ */
+ if (IS_ENABLED(CONFIG_CMDLINE_BOOL) &&
+ !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
+ bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE);
+}
+
+/*
+ * arch_mem_init - initialize memory management subsystem
+ *
+ * o plat_mem_setup() detects the memory configuration and will record detected
+ * memory areas using memblock_add.
+ *
+ * At this stage the memory configuration of the system is known to the
+ * kernel but generic memory management system is still entirely uninitialized.
+ *
+ * o bootmem_init()
+ * o sparse_init()
+ * o paging_init()
+ * o dma_contiguous_reserve()
+ *
+ * At this stage the bootmem allocator is ready to use.
+ *
+ * NOTE: historically plat_mem_setup did the entire platform initialization.
+ * This was rather impractical because it meant plat_mem_setup had to
+ * get away without any kind of memory allocator. To keep old code from
+ * breaking plat_setup was just renamed to plat_mem_setup and a second platform
+ * initialization hook for anything else was introduced.
+ */
+static void __init arch_mem_init(char **cmdline_p)
+{
+ /* call board setup routine */
+ plat_mem_setup();
+ memblock_set_bottom_up(true);
+
+ bootcmdline_init();
+ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = command_line;
+
+ parse_early_param();
+
+ if (usermem)
+ pr_info("User-defined physical RAM map overwrite\n");
+
+ check_kernel_sections_mem();
+
+ early_init_fdt_reserve_self();
+ early_init_fdt_scan_reserved_mem();
+
+#ifndef CONFIG_NUMA
+ memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
+#endif
+ bootmem_init();
+
+ /*
+ * Prevent memblock from allocating high memory.
+ * This cannot be done before max_low_pfn is detected, so up
+ * to this point is possible to only reserve physical memory
+ * with memblock_reserve; memblock_alloc* can be used
+ * only after this point
+ */
+ memblock_set_current_limit(PFN_PHYS(max_low_pfn));
+
+#ifdef CONFIG_PROC_VMCORE
+ if (setup_elfcorehdr && setup_elfcorehdr_size) {
+ printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
+ setup_elfcorehdr, setup_elfcorehdr_size);
+ memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size);
+ }
+#endif
+
+ mips_parse_crashkernel();
+#ifdef CONFIG_KEXEC
+ if (crashk_res.start != crashk_res.end)
+ memblock_reserve(crashk_res.start, resource_size(&crashk_res));
+#endif
+ device_tree_init();
+
+ /*
+ * In order to reduce the possibility of kernel panic when failed to
+ * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
+ * low memory as small as possible before plat_swiotlb_setup(), so
+ * make sparse_init() using top-down allocation.
+ */
+ memblock_set_bottom_up(false);
+ sparse_init();
+ memblock_set_bottom_up(true);
+
+ plat_swiotlb_setup();
+
+ dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
+
+ /* Reserve for hibernation. */
+ memblock_reserve(__pa_symbol(&__nosave_begin),
+ __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
+
+ fdt_init_reserved_mem();
+
+ memblock_dump_all();
+
+ early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
+}
+
+static void __init resource_init(void)
+{
+ phys_addr_t start, end;
+ u64 i;
+
+ if (UNCAC_BASE != IO_BASE)
+ return;
+
+ code_resource.start = __pa_symbol(&_text);
+ code_resource.end = __pa_symbol(&_etext) - 1;
+ data_resource.start = __pa_symbol(&_etext);
+ data_resource.end = __pa_symbol(&_edata) - 1;
+ bss_resource.start = __pa_symbol(&__bss_start);
+ bss_resource.end = __pa_symbol(&__bss_stop) - 1;
+
+ for_each_mem_range(i, &start, &end) {
+ struct resource *res;
+
+ res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(struct resource));
+
+ res->start = start;
+ /*
+ * In memblock, end points to the first byte after the
+ * range while in resourses, end points to the last byte in
+ * the range.
+ */
+ res->end = end - 1;
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ res->name = "System RAM";
+
+ request_resource(&iomem_resource, res);
+
+ /*
+ * We don't know which RAM region contains kernel data,
+ * so we try it repeatedly and let the resource manager
+ * test it.
+ */
+ request_resource(res, &code_resource);
+ request_resource(res, &data_resource);
+ request_resource(res, &bss_resource);
+ request_crashkernel(res);
+ }
+}
+
+#ifdef CONFIG_SMP
+static void __init prefill_possible_map(void)
+{
+ int i, possible = num_possible_cpus();
+
+ if (possible > nr_cpu_ids)
+ possible = nr_cpu_ids;
+
+ for (i = 0; i < possible; i++)
+ set_cpu_possible(i, true);
+ for (; i < NR_CPUS; i++)
+ set_cpu_possible(i, false);
+
+ nr_cpu_ids = possible;
+}
+#else
+static inline void prefill_possible_map(void) {}
+#endif
+
+void __init setup_arch(char **cmdline_p)
+{
+ cpu_probe();
+ mips_cm_probe();
+ prom_init();
+
+ setup_early_fdc_console();
+#ifdef CONFIG_EARLY_PRINTK
+ setup_early_printk();
+#endif
+ cpu_report();
+ check_bugs_early();
+
+#if defined(CONFIG_VT)
+#if defined(CONFIG_VGA_CONSOLE)
+ conswitchp = &vga_con;
+#endif
+#endif
+
+ arch_mem_init(cmdline_p);
+ dmi_setup();
+
+ resource_init();
+ plat_smp_setup();
+ prefill_possible_map();
+
+ cpu_cache_init();
+ paging_init();
+}
+
+unsigned long kernelsp[NR_CPUS];
+unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
+
+#ifdef CONFIG_USE_OF
+unsigned long fw_passed_dtb;
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+struct dentry *mips_debugfs_dir;
+static int __init debugfs_mips(void)
+{
+ mips_debugfs_dir = debugfs_create_dir("mips", NULL);
+ return 0;
+}
+arch_initcall(debugfs_mips);
+#endif
+
+#ifdef CONFIG_DMA_MAYBE_COHERENT
+/* User defined DMA coherency from command line. */
+enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
+EXPORT_SYMBOL_GPL(coherentio);
+int hw_coherentio; /* Actual hardware supported DMA coherency setting. */
+
+static int __init setcoherentio(char *str)
+{
+ coherentio = IO_COHERENCE_ENABLED;
+ pr_info("Hardware DMA cache coherency (command line)\n");
+ return 0;
+}
+early_param("coherentio", setcoherentio);
+
+static int __init setnocoherentio(char *str)
+{
+ coherentio = IO_COHERENCE_DISABLED;
+ pr_info("Software DMA cache coherency (command line)\n");
+ return 0;
+}
+early_param("nocoherentio", setnocoherentio);
+#endif
+
+void __init arch_cpu_finalize_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ cpu_data[cpu].udelay_val = loops_per_jiffy;
+ check_bugs32();
+
+ if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
+ check_bugs64();
+}
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
new file mode 100644
index 000000000..f50d48435
--- /dev/null
+++ b/arch/mips/kernel/signal-common.h
@@ -0,0 +1,43 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+
+#ifndef __SIGNAL_COMMON_H
+#define __SIGNAL_COMMON_H
+
+/* #define DEBUG_SIG */
+
+#ifdef DEBUG_SIG
+# define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
+#else
+# define DEBUGP(fmt, args...)
+#endif
+
+/*
+ * Determine which stack to use..
+ */
+extern void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+ size_t frame_size);
+/* Check and clear pending FPU exceptions in saved CSR */
+extern int fpcsr_pending(unsigned int __user *fpcsr);
+
+/* Make sure we will not lose FPU ownership */
+#define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
+#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
+
+/* Assembly functions to move context to/from the FPU */
+extern asmlinkage int
+_save_fp_context(void __user *fpregs, void __user *csr);
+extern asmlinkage int
+_restore_fp_context(void __user *fpregs, void __user *csr);
+
+extern asmlinkage int _save_msa_all_upper(void __user *buf);
+extern asmlinkage int _restore_msa_all_upper(void __user *buf);
+
+#endif /* __SIGNAL_COMMON_H */
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
new file mode 100644
index 000000000..f1e985109
--- /dev/null
+++ b/arch/mips/kernel/signal.c
@@ -0,0 +1,962 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2014, Imagination Technologies Ltd.
+ */
+#include <linux/cache.h>
+#include <linux/context_tracking.h>
+#include <linux/irqflags.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/personality.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/uprobes.h>
+#include <linux/compiler.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/tracehook.h>
+
+#include <asm/abi.h>
+#include <asm/asm.h>
+#include <linux/bitops.h>
+#include <asm/cacheflush.h>
+#include <asm/fpu.h>
+#include <asm/sim.h>
+#include <asm/ucontext.h>
+#include <asm/cpu-features.h>
+#include <asm/war.h>
+#include <asm/dsp.h>
+#include <asm/inst.h>
+#include <asm/msa.h>
+
+#include "signal-common.h"
+
+static int (*save_fp_context)(void __user *sc);
+static int (*restore_fp_context)(void __user *sc);
+
+struct sigframe {
+ u32 sf_ass[4]; /* argument save space for o32 */
+ u32 sf_pad[2]; /* Was: signal trampoline */
+
+ /* Matches struct ucontext from its uc_mcontext field onwards */
+ struct sigcontext sf_sc;
+ sigset_t sf_mask;
+ unsigned long long sf_extcontext[];
+};
+
+struct rt_sigframe {
+ u32 rs_ass[4]; /* argument save space for o32 */
+ u32 rs_pad[2]; /* Was: signal trampoline */
+ struct siginfo rs_info;
+ struct ucontext rs_uc;
+};
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+/*
+ * Thread saved context copy to/from a signal context presumed to be on the
+ * user stack, and therefore accessed with appropriate macros from uaccess.h.
+ */
+static int copy_fp_to_sigcontext(void __user *sc)
+{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+ int i;
+ int err = 0;
+ int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
+
+ for (i = 0; i < NUM_FPU_REGS; i += inc) {
+ err |=
+ __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
+ &fpregs[i]);
+ }
+ err |= __put_user(current->thread.fpu.fcr31, csr);
+
+ return err;
+}
+
+static int copy_fp_from_sigcontext(void __user *sc)
+{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+ int i;
+ int err = 0;
+ int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
+ u64 fpr_val;
+
+ for (i = 0; i < NUM_FPU_REGS; i += inc) {
+ err |= __get_user(fpr_val, &fpregs[i]);
+ set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
+ }
+ err |= __get_user(current->thread.fpu.fcr31, csr);
+
+ return err;
+}
+
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+static int copy_fp_to_sigcontext(void __user *sc)
+{
+ return 0;
+}
+
+static int copy_fp_from_sigcontext(void __user *sc)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_MIPS_FP_SUPPORT */
+
+/*
+ * Wrappers for the assembly _{save,restore}_fp_context functions.
+ */
+static int save_hw_fp_context(void __user *sc)
+{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+
+ return _save_fp_context(fpregs, csr);
+}
+
+static int restore_hw_fp_context(void __user *sc)
+{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+
+ return _restore_fp_context(fpregs, csr);
+}
+
+/*
+ * Extended context handling.
+ */
+
+static inline void __user *sc_to_extcontext(void __user *sc)
+{
+ struct ucontext __user *uc;
+
+ /*
+ * We can just pretend the sigcontext is always embedded in a struct
+ * ucontext here, because the offset from sigcontext to extended
+ * context is the same in the struct sigframe case.
+ */
+ uc = container_of(sc, struct ucontext, uc_mcontext);
+ return &uc->uc_extcontext;
+}
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+static int save_msa_extcontext(void __user *buf)
+{
+ struct msa_extcontext __user *msa = buf;
+ uint64_t val;
+ int i, err;
+
+ if (!thread_msa_context_live())
+ return 0;
+
+ /*
+ * Ensure that we can't lose the live MSA context between checking
+ * for it & writing it to memory.
+ */
+ preempt_disable();
+
+ if (is_msa_enabled()) {
+ /*
+ * There are no EVA versions of the vector register load/store
+ * instructions, so MSA context has to be saved to kernel memory
+ * and then copied to user memory. The save to kernel memory
+ * should already have been done when handling scalar FP
+ * context.
+ */
+ BUG_ON(IS_ENABLED(CONFIG_EVA));
+
+ err = __put_user(read_msa_csr(), &msa->csr);
+ err |= _save_msa_all_upper(&msa->wr);
+
+ preempt_enable();
+ } else {
+ preempt_enable();
+
+ err = __put_user(current->thread.fpu.msacsr, &msa->csr);
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ val = get_fpr64(&current->thread.fpu.fpr[i], 1);
+ err |= __put_user(val, &msa->wr[i]);
+ }
+ }
+
+ err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic);
+ err |= __put_user(sizeof(*msa), &msa->ext.size);
+
+ return err ? -EFAULT : sizeof(*msa);
+}
+
+static int restore_msa_extcontext(void __user *buf, unsigned int size)
+{
+ struct msa_extcontext __user *msa = buf;
+ unsigned long long val;
+ unsigned int csr;
+ int i, err;
+
+ if (size != sizeof(*msa))
+ return -EINVAL;
+
+ err = get_user(csr, &msa->csr);
+ if (err)
+ return err;
+
+ preempt_disable();
+
+ if (is_msa_enabled()) {
+ /*
+ * There are no EVA versions of the vector register load/store
+ * instructions, so MSA context has to be copied to kernel
+ * memory and later loaded to registers. The same is true of
+ * scalar FP context, so FPU & MSA should have already been
+ * disabled whilst handling scalar FP context.
+ */
+ BUG_ON(IS_ENABLED(CONFIG_EVA));
+
+ write_msa_csr(csr);
+ err |= _restore_msa_all_upper(&msa->wr);
+ preempt_enable();
+ } else {
+ preempt_enable();
+
+ current->thread.fpu.msacsr = csr;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |= __get_user(val, &msa->wr[i]);
+ set_fpr64(&current->thread.fpu.fpr[i], 1, val);
+ }
+ }
+
+ return err;
+}
+
+#else /* !CONFIG_CPU_HAS_MSA */
+
+static int save_msa_extcontext(void __user *buf)
+{
+ return 0;
+}
+
+static int restore_msa_extcontext(void __user *buf, unsigned int size)
+{
+ return SIGSYS;
+}
+
+#endif /* !CONFIG_CPU_HAS_MSA */
+
+static int save_extcontext(void __user *buf)
+{
+ int sz;
+
+ sz = save_msa_extcontext(buf);
+ if (sz < 0)
+ return sz;
+ buf += sz;
+
+ /* If no context was saved then trivially return */
+ if (!sz)
+ return 0;
+
+ /* Write the end marker */
+ if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf))
+ return -EFAULT;
+
+ sz += sizeof(((struct extcontext *)NULL)->magic);
+ return sz;
+}
+
+static int restore_extcontext(void __user *buf)
+{
+ struct extcontext ext;
+ int err;
+
+ while (1) {
+ err = __get_user(ext.magic, (unsigned int *)buf);
+ if (err)
+ return err;
+
+ if (ext.magic == END_EXTCONTEXT_MAGIC)
+ return 0;
+
+ err = __get_user(ext.size, (unsigned int *)(buf
+ + offsetof(struct extcontext, size)));
+ if (err)
+ return err;
+
+ switch (ext.magic) {
+ case MSA_EXTCONTEXT_MAGIC:
+ err = restore_msa_extcontext(buf, ext.size);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ return err;
+
+ buf += ext.size;
+ }
+}
+
+/*
+ * Helper routines
+ */
+int protected_save_fp_context(void __user *sc)
+{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+ uint32_t __user *used_math = sc + abi->off_sc_used_math;
+ unsigned int used, ext_sz;
+ int err;
+
+ used = used_math() ? USED_FP : 0;
+ if (!used)
+ goto fp_done;
+
+ if (!test_thread_flag(TIF_32BIT_FPREGS))
+ used |= USED_FR1;
+ if (test_thread_flag(TIF_HYBRID_FPREGS))
+ used |= USED_HYBRID_FPRS;
+
+ /*
+ * EVA does not have userland equivalents of ldc1 or sdc1, so
+ * save to the kernel FP context & copy that to userland below.
+ */
+ if (IS_ENABLED(CONFIG_EVA))
+ lose_fpu(1);
+
+ while (1) {
+ lock_fpu_owner();
+ if (is_fpu_owner()) {
+ err = save_fp_context(sc);
+ unlock_fpu_owner();
+ } else {
+ unlock_fpu_owner();
+ err = copy_fp_to_sigcontext(sc);
+ }
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __put_user(0, &fpregs[0]) |
+ __put_user(0, &fpregs[31]) |
+ __put_user(0, csr);
+ if (err)
+ return err; /* really bad sigcontext */
+ }
+
+fp_done:
+ ext_sz = err = save_extcontext(sc_to_extcontext(sc));
+ if (err < 0)
+ return err;
+ used |= ext_sz ? USED_EXTCONTEXT : 0;
+
+ return __put_user(used, used_math);
+}
+
+int protected_restore_fp_context(void __user *sc)
+{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+ uint32_t __user *used_math = sc + abi->off_sc_used_math;
+ unsigned int used;
+ int err, sig = 0, tmp __maybe_unused;
+
+ err = __get_user(used, used_math);
+ conditional_used_math(used & USED_FP);
+
+ /*
+ * The signal handler may have used FPU; give it up if the program
+ * doesn't want it following sigreturn.
+ */
+ if (err || !(used & USED_FP))
+ lose_fpu(0);
+ if (err)
+ return err;
+ if (!(used & USED_FP))
+ goto fp_done;
+
+ err = sig = fpcsr_pending(csr);
+ if (err < 0)
+ return err;
+
+ /*
+ * EVA does not have userland equivalents of ldc1 or sdc1, so we
+ * disable the FPU here such that the code below simply copies to
+ * the kernel FP context.
+ */
+ if (IS_ENABLED(CONFIG_EVA))
+ lose_fpu(0);
+
+ while (1) {
+ lock_fpu_owner();
+ if (is_fpu_owner()) {
+ err = restore_fp_context(sc);
+ unlock_fpu_owner();
+ } else {
+ unlock_fpu_owner();
+ err = copy_fp_from_sigcontext(sc);
+ }
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __get_user(tmp, &fpregs[0]) |
+ __get_user(tmp, &fpregs[31]) |
+ __get_user(tmp, csr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+
+fp_done:
+ if (!err && (used & USED_EXTCONTEXT))
+ err = restore_extcontext(sc_to_extcontext(sc));
+
+ return err ?: sig;
+}
+
+int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ int err = 0;
+ int i;
+
+ err |= __put_user(regs->cp0_epc, &sc->sc_pc);
+
+ err |= __put_user(0, &sc->sc_regs[0]);
+ for (i = 1; i < 32; i++)
+ err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ err |= __put_user(regs->acx, &sc->sc_acx);
+#endif
+ err |= __put_user(regs->hi, &sc->sc_mdhi);
+ err |= __put_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __put_user(mfhi1(), &sc->sc_hi1);
+ err |= __put_user(mflo1(), &sc->sc_lo1);
+ err |= __put_user(mfhi2(), &sc->sc_hi2);
+ err |= __put_user(mflo2(), &sc->sc_lo2);
+ err |= __put_user(mfhi3(), &sc->sc_hi3);
+ err |= __put_user(mflo3(), &sc->sc_lo3);
+ err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
+ }
+
+
+ /*
+ * Save FPU state to signal context. Signal handler
+ * will "inherit" current FPU state.
+ */
+ err |= protected_save_fp_context(sc);
+
+ return err;
+}
+
+static size_t extcontext_max_size(void)
+{
+ size_t sz = 0;
+
+ /*
+ * The assumption here is that between this point & the point at which
+ * the extended context is saved the size of the context should only
+ * ever be able to shrink (if the task is preempted), but never grow.
+ * That is, what this function returns is an upper bound on the size of
+ * the extended context for the current task at the current time.
+ */
+
+ if (thread_msa_context_live())
+ sz += sizeof(struct msa_extcontext);
+
+ /* If any context is saved then we'll append the end marker */
+ if (sz)
+ sz += sizeof(((struct extcontext *)NULL)->magic);
+
+ return sz;
+}
+
+int fpcsr_pending(unsigned int __user *fpcsr)
+{
+ int err, sig = 0;
+ unsigned int csr, enabled;
+
+ err = __get_user(csr, fpcsr);
+ enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5);
+ /*
+ * If the signal handler set some FPU exceptions, clear it and
+ * send SIGFPE.
+ */
+ if (csr & enabled) {
+ csr &= ~enabled;
+ err |= __put_user(csr, fpcsr);
+ sig = SIGFPE;
+ }
+ return err ?: sig;
+}
+
+int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ unsigned long treg;
+ int err = 0;
+ int i;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ err |= __get_user(regs->cp0_epc, &sc->sc_pc);
+
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ err |= __get_user(regs->acx, &sc->sc_acx);
+#endif
+ err |= __get_user(regs->hi, &sc->sc_mdhi);
+ err |= __get_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
+ err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
+ err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
+ err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
+ }
+
+ for (i = 1; i < 32; i++)
+ err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
+
+ return err ?: protected_restore_fp_context(sc);
+}
+
+#ifdef CONFIG_WAR_ICACHE_REFILLS
+#define SIGMASK ~(cpu_icache_line_size()-1)
+#else
+#define SIGMASK ALMASK
+#endif
+
+void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+ size_t frame_size)
+{
+ unsigned long sp;
+
+ /* Leave space for potential extended context */
+ frame_size += extcontext_max_size();
+
+ /* Default to using normal stack */
+ sp = regs->regs[29];
+
+ /*
+ * FPU emulator may have it's own trampoline active just
+ * above the user stack, 16-bytes before the next lowest
+ * 16 byte boundary. Try to avoid trashing it.
+ */
+ sp -= 32;
+
+ sp = sigsp(sp, ksig);
+
+ return (void __user *)((sp - frame_size) & SIGMASK);
+}
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+
+#ifdef CONFIG_TRAD_SIGNALS
+SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset)
+{
+ return sys_rt_sigsuspend(uset, sizeof(sigset_t));
+}
+#endif
+
+#ifdef CONFIG_TRAD_SIGNALS
+SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
+ struct sigaction __user *, oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+ int err = 0;
+
+ if (act) {
+ old_sigset_t mask;
+
+ if (!access_ok(act, sizeof(*act)))
+ return -EFAULT;
+ err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler);
+ err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ err |= __get_user(mask, &act->sa_mask.sig[0]);
+ if (err)
+ return -EFAULT;
+
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (!access_ok(oact, sizeof(*oact)))
+ return -EFAULT;
+ err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
+ err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
+ err |= __put_user(0, &oact->sa_mask.sig[1]);
+ err |= __put_user(0, &oact->sa_mask.sig[2]);
+ err |= __put_user(0, &oact->sa_mask.sig[3]);
+ if (err)
+ return -EFAULT;
+ }
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_TRAD_SIGNALS
+asmlinkage void sys_sigreturn(void)
+{
+ struct sigframe __user *frame;
+ struct pt_regs *regs;
+ sigset_t blocked;
+ int sig;
+
+ regs = current_pt_regs();
+ frame = (struct sigframe __user *)regs->regs[29];
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
+ goto badframe;
+
+ set_current_blocked(&blocked);
+
+ sig = restore_sigcontext(regs, &frame->sf_sc);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig);
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tsyscall_exit"
+ : /* no outputs */
+ : "r" (regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV);
+}
+#endif /* CONFIG_TRAD_SIGNALS */
+
+asmlinkage void sys_rt_sigreturn(void)
+{
+ struct rt_sigframe __user *frame;
+ struct pt_regs *regs;
+ sigset_t set;
+ int sig;
+
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe __user *)regs->regs[29];
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig);
+
+ if (restore_altstack(&frame->rs_uc.uc_stack))
+ goto badframe;
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tsyscall_exit"
+ : /* no outputs */
+ : "r" (regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV);
+}
+
+#ifdef CONFIG_TRAD_SIGNALS
+static int setup_frame(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ struct sigframe __user *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(frame, sizeof (*frame)))
+ return -EFAULT;
+
+ err |= setup_sigcontext(regs, &frame->sf_sc);
+ err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to struct sigcontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to the
+ * struct sigframe.
+ */
+ regs->regs[ 4] = ksig->sig;
+ regs->regs[ 5] = 0;
+ regs->regs[ 6] = (unsigned long) &frame->sf_sc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) sig_return;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->cp0_epc, regs->regs[31]);
+ return 0;
+}
+#endif
+
+static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ struct rt_sigframe __user *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(frame, sizeof (*frame)))
+ return -EFAULT;
+
+ /* Create siginfo. */
+ err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->rs_uc.uc_flags);
+ err |= __put_user(NULL, &frame->rs_uc.uc_link);
+ err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
+ err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
+ err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
+
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to ucontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to
+ * the struct rt_sigframe.
+ */
+ regs->regs[ 4] = ksig->sig;
+ regs->regs[ 5] = (unsigned long) &frame->rs_info;
+ regs->regs[ 6] = (unsigned long) &frame->rs_uc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) sig_return;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->cp0_epc, regs->regs[31]);
+
+ return 0;
+}
+
+struct mips_abi mips_abi = {
+#ifdef CONFIG_TRAD_SIGNALS
+ .setup_frame = setup_frame,
+#endif
+ .setup_rt_frame = setup_rt_frame,
+ .restart = __NR_restart_syscall,
+
+ .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
+ .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
+ .off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
+
+ .vdso = &vdso_image,
+};
+
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+ sigset_t *oldset = sigmask_to_save();
+ int ret;
+ struct mips_abi *abi = current->thread.abi;
+ void *vdso = current->mm->context.vdso;
+
+ /*
+ * If we were emulating a delay slot instruction, exit that frame such
+ * that addresses in the sigframe are as expected for userland and we
+ * don't have a problem if we reuse the thread's frame for an
+ * instruction within the signal handler.
+ */
+ dsemul_thread_rollback(regs);
+
+ if (regs->regs[0]) {
+ switch(regs->regs[2]) {
+ case ERESTART_RESTARTBLOCK:
+ case ERESTARTNOHAND:
+ regs->regs[2] = EINTR;
+ break;
+ case ERESTARTSYS:
+ if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+ regs->regs[2] = EINTR;
+ break;
+ }
+ fallthrough;
+ case ERESTARTNOINTR:
+ regs->regs[7] = regs->regs[26];
+ regs->regs[2] = regs->regs[0];
+ regs->cp0_epc -= 4;
+ }
+
+ regs->regs[0] = 0; /* Don't deal with this again. */
+ }
+
+ rseq_signal_deliver(ksig, regs);
+
+ if (sig_uses_siginfo(&ksig->ka, abi))
+ ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn,
+ ksig, regs, oldset);
+ else
+ ret = abi->setup_frame(vdso + abi->vdso->off_sigreturn,
+ ksig, regs, oldset);
+
+ signal_setup_done(ret, ksig, 0);
+}
+
+static void do_signal(struct pt_regs *regs)
+{
+ struct ksignal ksig;
+
+ if (get_signal(&ksig)) {
+ /* Whee! Actually deliver the signal. */
+ handle_signal(&ksig, regs);
+ return;
+ }
+
+ if (regs->regs[0]) {
+ switch (regs->regs[2]) {
+ case ERESTARTNOHAND:
+ case ERESTARTSYS:
+ case ERESTARTNOINTR:
+ regs->regs[2] = regs->regs[0];
+ regs->regs[7] = regs->regs[26];
+ regs->cp0_epc -= 4;
+ break;
+
+ case ERESTART_RESTARTBLOCK:
+ regs->regs[2] = current->thread.abi->restart;
+ regs->regs[7] = regs->regs[26];
+ regs->cp0_epc -= 4;
+ break;
+ }
+ regs->regs[0] = 0; /* Don't deal with this again. */
+ }
+
+ /*
+ * If there's no signal to deliver, we just put the saved sigmask
+ * back
+ */
+ restore_saved_sigmask();
+}
+
+/*
+ * notification of userspace execution resumption
+ * - triggered by the TIF_WORK_MASK flags
+ */
+asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
+ __u32 thread_info_flags)
+{
+ local_irq_enable();
+
+ user_exit();
+
+ if (thread_info_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
+ /* deal with pending signal delivery */
+ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
+ do_signal(regs);
+
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ tracehook_notify_resume(regs);
+ rseq_handle_notify_resume(NULL, regs);
+ }
+
+ user_enter();
+}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT)
+static int smp_save_fp_context(void __user *sc)
+{
+ return raw_cpu_has_fpu
+ ? save_hw_fp_context(sc)
+ : copy_fp_to_sigcontext(sc);
+}
+
+static int smp_restore_fp_context(void __user *sc)
+{
+ return raw_cpu_has_fpu
+ ? restore_hw_fp_context(sc)
+ : copy_fp_from_sigcontext(sc);
+}
+#endif
+
+static int signal_setup(void)
+{
+ /*
+ * The offset from sigcontext to extended context should be the same
+ * regardless of the type of signal, such that userland can always know
+ * where to look if it wishes to find the extended context structures.
+ */
+ BUILD_BUG_ON((offsetof(struct sigframe, sf_extcontext) -
+ offsetof(struct sigframe, sf_sc)) !=
+ (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) -
+ offsetof(struct rt_sigframe, rs_uc.uc_mcontext)));
+
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT)
+ /* For now just do the cpu_has_fpu check when the functions are invoked */
+ save_fp_context = smp_save_fp_context;
+ restore_fp_context = smp_restore_fp_context;
+#else
+ if (cpu_has_fpu) {
+ save_fp_context = save_hw_fp_context;
+ restore_fp_context = restore_hw_fp_context;
+ } else {
+ save_fp_context = copy_fp_to_sigcontext;
+ restore_fp_context = copy_fp_from_sigcontext;
+ }
+#endif /* CONFIG_SMP */
+
+ return 0;
+}
+
+arch_initcall(signal_setup);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
new file mode 100644
index 000000000..59b896543
--- /dev/null
+++ b/arch/mips/kernel/signal32.c
@@ -0,0 +1,78 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000, 2006 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2016, Imagination Technologies Ltd.
+ */
+#include <linux/compat.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/syscalls.h>
+
+#include <asm/compat-signal.h>
+#include <linux/uaccess.h>
+#include <asm/unistd.h>
+
+#include "signal-common.h"
+
+/* 32-bit compatibility types */
+
+typedef unsigned int __sighandler32_t;
+typedef void (*vfptr_t)(void);
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+
+asmlinkage int sys32_sigsuspend(compat_sigset_t __user *uset)
+{
+ return compat_sys_rt_sigsuspend(uset, sizeof(compat_sigset_t));
+}
+
+SYSCALL_DEFINE3(32_sigaction, long, sig, const struct compat_sigaction __user *, act,
+ struct compat_sigaction __user *, oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+ int err = 0;
+
+ if (act) {
+ old_sigset_t mask;
+ s32 handler;
+
+ if (!access_ok(act, sizeof(*act)))
+ return -EFAULT;
+ err |= __get_user(handler, &act->sa_handler);
+ new_ka.sa.sa_handler = (void __user *)(s64)handler;
+ err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ err |= __get_user(mask, &act->sa_mask.sig[0]);
+ if (err)
+ return -EFAULT;
+
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (!access_ok(oact, sizeof(*oact)))
+ return -EFAULT;
+ err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ err |= __put_user((u32)(u64)old_ka.sa.sa_handler,
+ &oact->sa_handler);
+ err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
+ err |= __put_user(0, &oact->sa_mask.sig[1]);
+ err |= __put_user(0, &oact->sa_mask.sig[2]);
+ err |= __put_user(0, &oact->sa_mask.sig[3]);
+ if (err)
+ return -EFAULT;
+ }
+
+ return ret;
+}
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
new file mode 100644
index 000000000..7bd00fad6
--- /dev/null
+++ b/arch/mips/kernel/signal_n32.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2003 Broadcom Corporation
+ */
+#include <linux/cache.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/compat.h>
+#include <linux/bitops.h>
+
+#include <asm/abi.h>
+#include <asm/asm.h>
+#include <asm/cacheflush.h>
+#include <asm/compat-signal.h>
+#include <asm/sim.h>
+#include <linux/uaccess.h>
+#include <asm/ucontext.h>
+#include <asm/fpu.h>
+#include <asm/cpu-features.h>
+#include <asm/war.h>
+
+#include "signal-common.h"
+
+/*
+ * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
+ */
+#define __NR_N32_restart_syscall 6214
+
+extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *);
+extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
+
+struct ucontextn32 {
+ u32 uc_flags;
+ s32 uc_link;
+ compat_stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+struct rt_sigframe_n32 {
+ u32 rs_ass[4]; /* argument save space for o32 */
+ u32 rs_pad[2]; /* Was: signal trampoline */
+ struct compat_siginfo rs_info;
+ struct ucontextn32 rs_uc;
+};
+
+asmlinkage void sysn32_rt_sigreturn(void)
+{
+ struct rt_sigframe_n32 __user *frame;
+ struct pt_regs *regs;
+ sigset_t set;
+ int sig;
+
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe_n32 __user *)regs->regs[29];
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig);
+
+ if (compat_restore_altstack(&frame->rs_uc.uc_stack))
+ goto badframe;
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tsyscall_exit"
+ : /* no outputs */
+ : "r" (regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV);
+}
+
+static int setup_rt_frame_n32(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ struct rt_sigframe_n32 __user *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(frame, sizeof (*frame)))
+ return -EFAULT;
+
+ /* Create siginfo. */
+ err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->rs_uc.uc_flags);
+ err |= __put_user(0, &frame->rs_uc.uc_link);
+ err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
+ err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
+ err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
+
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to ucontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to
+ * the struct rt_sigframe.
+ */
+ regs->regs[ 4] = ksig->sig;
+ regs->regs[ 5] = (unsigned long) &frame->rs_info;
+ regs->regs[ 6] = (unsigned long) &frame->rs_uc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) sig_return;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->cp0_epc, regs->regs[31]);
+
+ return 0;
+}
+
+struct mips_abi mips_abi_n32 = {
+ .setup_rt_frame = setup_rt_frame_n32,
+ .restart = __NR_N32_restart_syscall,
+
+ .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
+ .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
+ .off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
+
+ .vdso = &vdso_image_n32,
+};
diff --git a/arch/mips/kernel/signal_o32.c b/arch/mips/kernel/signal_o32.c
new file mode 100644
index 000000000..299a7a28c
--- /dev/null
+++ b/arch/mips/kernel/signal_o32.c
@@ -0,0 +1,290 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000, 2006 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2016, Imagination Technologies Ltd.
+ */
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+
+#include <asm/abi.h>
+#include <asm/compat-signal.h>
+#include <asm/dsp.h>
+#include <asm/sim.h>
+#include <asm/unistd.h>
+
+#include "signal-common.h"
+
+/*
+ * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
+ */
+#define __NR_O32_restart_syscall 4253
+
+struct sigframe32 {
+ u32 sf_ass[4]; /* argument save space for o32 */
+ u32 sf_pad[2]; /* Was: signal trampoline */
+ struct sigcontext32 sf_sc;
+ compat_sigset_t sf_mask;
+};
+
+struct ucontext32 {
+ u32 uc_flags;
+ s32 uc_link;
+ compat_stack_t uc_stack;
+ struct sigcontext32 uc_mcontext;
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+struct rt_sigframe32 {
+ u32 rs_ass[4]; /* argument save space for o32 */
+ u32 rs_pad[2]; /* Was: signal trampoline */
+ compat_siginfo_t rs_info;
+ struct ucontext32 rs_uc;
+};
+
+static int setup_sigcontext32(struct pt_regs *regs,
+ struct sigcontext32 __user *sc)
+{
+ int err = 0;
+ int i;
+
+ err |= __put_user(regs->cp0_epc, &sc->sc_pc);
+
+ err |= __put_user(0, &sc->sc_regs[0]);
+ for (i = 1; i < 32; i++)
+ err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+
+ err |= __put_user(regs->hi, &sc->sc_mdhi);
+ err |= __put_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
+ err |= __put_user(mfhi1(), &sc->sc_hi1);
+ err |= __put_user(mflo1(), &sc->sc_lo1);
+ err |= __put_user(mfhi2(), &sc->sc_hi2);
+ err |= __put_user(mflo2(), &sc->sc_lo2);
+ err |= __put_user(mfhi3(), &sc->sc_hi3);
+ err |= __put_user(mflo3(), &sc->sc_lo3);
+ }
+
+ /*
+ * Save FPU state to signal context. Signal handler
+ * will "inherit" current FPU state.
+ */
+ err |= protected_save_fp_context(sc);
+
+ return err;
+}
+
+static int restore_sigcontext32(struct pt_regs *regs,
+ struct sigcontext32 __user *sc)
+{
+ int err = 0;
+ s32 treg;
+ int i;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ err |= __get_user(regs->cp0_epc, &sc->sc_pc);
+ err |= __get_user(regs->hi, &sc->sc_mdhi);
+ err |= __get_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
+ err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
+ err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
+ err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
+ }
+
+ for (i = 1; i < 32; i++)
+ err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
+
+ return err ?: protected_restore_fp_context(sc);
+}
+
+static int setup_frame_32(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ struct sigframe32 __user *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(frame, sizeof (*frame)))
+ return -EFAULT;
+
+ err |= setup_sigcontext32(regs, &frame->sf_sc);
+ err |= __copy_conv_sigset_to_user(&frame->sf_mask, set);
+
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to struct sigcontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to the
+ * struct sigframe.
+ */
+ regs->regs[ 4] = ksig->sig;
+ regs->regs[ 5] = 0;
+ regs->regs[ 6] = (unsigned long) &frame->sf_sc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) sig_return;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->cp0_epc, regs->regs[31]);
+
+ return 0;
+}
+
+asmlinkage void sys32_rt_sigreturn(void)
+{
+ struct rt_sigframe32 __user *frame;
+ struct pt_regs *regs;
+ sigset_t set;
+ int sig;
+
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe32 __user *)regs->regs[29];
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ sig = restore_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig);
+
+ if (compat_restore_altstack(&frame->rs_uc.uc_stack))
+ goto badframe;
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tsyscall_exit"
+ : /* no outputs */
+ : "r" (regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV);
+}
+
+static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ struct rt_sigframe32 __user *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(frame, sizeof (*frame)))
+ return -EFAULT;
+
+ /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
+ err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->rs_uc.uc_flags);
+ err |= __put_user(0, &frame->rs_uc.uc_link);
+ err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
+ err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
+ err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
+
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to ucontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to
+ * the struct rt_sigframe32.
+ */
+ regs->regs[ 4] = ksig->sig;
+ regs->regs[ 5] = (unsigned long) &frame->rs_info;
+ regs->regs[ 6] = (unsigned long) &frame->rs_uc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) sig_return;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->cp0_epc, regs->regs[31]);
+
+ return 0;
+}
+
+/*
+ * o32 compatibility on 64-bit kernels, without DSP ASE
+ */
+struct mips_abi mips_abi_32 = {
+ .setup_frame = setup_frame_32,
+ .setup_rt_frame = setup_rt_frame_32,
+ .restart = __NR_O32_restart_syscall,
+
+ .off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs),
+ .off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr),
+ .off_sc_used_math = offsetof(struct sigcontext32, sc_used_math),
+
+ .vdso = &vdso_image_o32,
+};
+
+
+asmlinkage void sys32_sigreturn(void)
+{
+ struct sigframe32 __user *frame;
+ struct pt_regs *regs;
+ sigset_t blocked;
+ int sig;
+
+ regs = current_pt_regs();
+ frame = (struct sigframe32 __user *)regs->regs[29];
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
+ goto badframe;
+
+ set_current_blocked(&blocked);
+
+ sig = restore_sigcontext32(regs, &frame->sf_sc);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig);
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tsyscall_exit"
+ : /* no outputs */
+ : "r" (regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV);
+}
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
new file mode 100644
index 000000000..1dbfb5aad
--- /dev/null
+++ b/arch/mips/kernel/smp-bmips.c
@@ -0,0 +1,667 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com)
+ *
+ * SMP support for BMIPS
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/reboot.h>
+#include <linux/io.h>
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+
+#include <asm/time.h>
+#include <asm/processor.h>
+#include <asm/bootinfo.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/mipsregs.h>
+#include <asm/bmips.h>
+#include <asm/traps.h>
+#include <asm/barrier.h>
+#include <asm/cpu-features.h>
+
+static int __maybe_unused max_cpus = 1;
+
+/* these may be configured by the platform code */
+int bmips_smp_enabled = 1;
+int bmips_cpu_offset;
+cpumask_t bmips_booted_mask;
+unsigned long bmips_tp1_irqs = IE_IRQ1;
+
+#define RESET_FROM_KSEG0 0x80080800
+#define RESET_FROM_KSEG1 0xa0080800
+
+static void bmips_set_reset_vec(int cpu, u32 val);
+
+#ifdef CONFIG_SMP
+
+/* initial $sp, $gp - used by arch/mips/kernel/bmips_vec.S */
+unsigned long bmips_smp_boot_sp;
+unsigned long bmips_smp_boot_gp;
+
+static void bmips43xx_send_ipi_single(int cpu, unsigned int action);
+static void bmips5000_send_ipi_single(int cpu, unsigned int action);
+static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id);
+static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id);
+
+/* SW interrupts 0,1 are used for interprocessor signaling */
+#define IPI0_IRQ (MIPS_CPU_IRQ_BASE + 0)
+#define IPI1_IRQ (MIPS_CPU_IRQ_BASE + 1)
+
+#define CPUNUM(cpu, shift) (((cpu) + bmips_cpu_offset) << (shift))
+#define ACTION_CLR_IPI(cpu, ipi) (0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8))
+#define ACTION_SET_IPI(cpu, ipi) (0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8))
+#define ACTION_BOOT_THREAD(cpu) (0x08 | CPUNUM(cpu, 0))
+
+static void __init bmips_smp_setup(void)
+{
+ int i, cpu = 1, boot_cpu = 0;
+ int cpu_hw_intr;
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ /* arbitration priority */
+ clear_c0_brcm_cmt_ctrl(0x30);
+
+ /* NBK and weak order flags */
+ set_c0_brcm_config_0(0x30000);
+
+ /* Find out if we are running on TP0 or TP1 */
+ boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
+
+ /*
+ * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other
+ * thread
+ * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
+ * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
+ */
+ if (boot_cpu == 0)
+ cpu_hw_intr = 0x02;
+ else
+ cpu_hw_intr = 0x1d;
+
+ change_c0_brcm_cmt_intr(0xf8018000,
+ (cpu_hw_intr << 27) | (0x03 << 15));
+
+ /* single core, 2 threads (2 pipelines) */
+ max_cpus = 2;
+
+ break;
+ case CPU_BMIPS5000:
+ /* enable raceless SW interrupts */
+ set_c0_brcm_config(0x03 << 22);
+
+ /* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */
+ change_c0_brcm_mode(0x1f << 27, 0x02 << 27);
+
+ /* N cores, 2 threads per core */
+ max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1;
+
+ /* clear any pending SW interrupts */
+ for (i = 0; i < max_cpus; i++) {
+ write_c0_brcm_action(ACTION_CLR_IPI(i, 0));
+ write_c0_brcm_action(ACTION_CLR_IPI(i, 1));
+ }
+
+ break;
+ default:
+ max_cpus = 1;
+ }
+
+ if (!bmips_smp_enabled)
+ max_cpus = 1;
+
+ /* this can be overridden by the BSP */
+ if (!board_ebase_setup)
+ board_ebase_setup = &bmips_ebase_setup;
+
+ __cpu_number_map[boot_cpu] = 0;
+ __cpu_logical_map[0] = boot_cpu;
+
+ for (i = 0; i < max_cpus; i++) {
+ if (i != boot_cpu) {
+ __cpu_number_map[i] = cpu;
+ __cpu_logical_map[cpu] = i;
+ cpu++;
+ }
+ set_cpu_possible(i, 1);
+ set_cpu_present(i, 1);
+ }
+}
+
+/*
+ * IPI IRQ setup - runs on CPU0
+ */
+static void bmips_prepare_cpus(unsigned int max_cpus)
+{
+ irqreturn_t (*bmips_ipi_interrupt)(int irq, void *dev_id);
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ bmips_ipi_interrupt = bmips43xx_ipi_interrupt;
+ break;
+ case CPU_BMIPS5000:
+ bmips_ipi_interrupt = bmips5000_ipi_interrupt;
+ break;
+ default:
+ return;
+ }
+
+ if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
+ IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
+ panic("Can't request IPI0 interrupt");
+ if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
+ IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
+ panic("Can't request IPI1 interrupt");
+}
+
+/*
+ * Tell the hardware to boot CPUx - runs on CPU0
+ */
+static int bmips_boot_secondary(int cpu, struct task_struct *idle)
+{
+ bmips_smp_boot_sp = __KSTK_TOS(idle);
+ bmips_smp_boot_gp = (unsigned long)task_thread_info(idle);
+ mb();
+
+ /*
+ * Initial boot sequence for secondary CPU:
+ * bmips_reset_nmi_vec @ a000_0000 ->
+ * bmips_smp_entry ->
+ * plat_wired_tlb_setup (cached function call; optional) ->
+ * start_secondary (cached jump)
+ *
+ * Warm restart sequence:
+ * play_dead WAIT loop ->
+ * bmips_smp_int_vec @ BMIPS_WARM_RESTART_VEC ->
+ * eret to play_dead ->
+ * bmips_secondary_reentry ->
+ * start_secondary
+ */
+
+ pr_info("SMP: Booting CPU%d...\n", cpu);
+
+ if (cpumask_test_cpu(cpu, &bmips_booted_mask)) {
+ /* kseg1 might not exist if this CPU enabled XKS01 */
+ bmips_set_reset_vec(cpu, RESET_FROM_KSEG0);
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ bmips43xx_send_ipi_single(cpu, 0);
+ break;
+ case CPU_BMIPS5000:
+ bmips5000_send_ipi_single(cpu, 0);
+ break;
+ }
+ } else {
+ bmips_set_reset_vec(cpu, RESET_FROM_KSEG1);
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ /* Reset slave TP1 if booting from TP0 */
+ if (cpu_logical_map(cpu) == 1)
+ set_c0_brcm_cmt_ctrl(0x01);
+ break;
+ case CPU_BMIPS5000:
+ write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
+ break;
+ }
+ cpumask_set_cpu(cpu, &bmips_booted_mask);
+ }
+
+ return 0;
+}
+
+/*
+ * Early setup - runs on secondary CPU after cache probe
+ */
+static void bmips_init_secondary(void)
+{
+ bmips_cpu_setup();
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
+ break;
+ case CPU_BMIPS5000:
+ write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
+ cpu_set_core(&current_cpu_data, (read_c0_brcm_config() >> 25) & 3);
+ break;
+ }
+}
+
+/*
+ * Late setup - runs on secondary CPU before entering the idle loop
+ */
+static void bmips_smp_finish(void)
+{
+ pr_info("SMP: CPU%d is running\n", smp_processor_id());
+
+ /* make sure there won't be a timer interrupt for a little while */
+ write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+
+ irq_enable_hazard();
+ set_c0_status(IE_SW0 | IE_SW1 | bmips_tp1_irqs | IE_IRQ5 | ST0_IE);
+ irq_enable_hazard();
+}
+
+/*
+ * BMIPS5000 raceless IPIs
+ *
+ * Each CPU has two inbound SW IRQs which are independent of all other CPUs.
+ * IPI0 is used for SMP_RESCHEDULE_YOURSELF
+ * IPI1 is used for SMP_CALL_FUNCTION
+ */
+
+static void bmips5000_send_ipi_single(int cpu, unsigned int action)
+{
+ write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION));
+}
+
+static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
+{
+ int action = irq - IPI0_IRQ;
+
+ write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), action));
+
+ if (action == 0)
+ scheduler_ipi();
+ else
+ generic_smp_call_function_interrupt();
+
+ return IRQ_HANDLED;
+}
+
+static void bmips5000_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ bmips5000_send_ipi_single(i, action);
+}
+
+/*
+ * BMIPS43xx racey IPIs
+ *
+ * We use one inbound SW IRQ for each CPU.
+ *
+ * A spinlock must be held in order to keep CPUx from accidentally clearing
+ * an incoming IPI when it writes CP0 CAUSE to raise an IPI on CPUy. The
+ * same spinlock is used to protect the action masks.
+ */
+
+static DEFINE_SPINLOCK(ipi_lock);
+static DEFINE_PER_CPU(int, ipi_action_mask);
+
+static void bmips43xx_send_ipi_single(int cpu, unsigned int action)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipi_lock, flags);
+ set_c0_cause(cpu ? C_SW1 : C_SW0);
+ per_cpu(ipi_action_mask, cpu) |= action;
+ irq_enable_hazard();
+ spin_unlock_irqrestore(&ipi_lock, flags);
+}
+
+static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
+{
+ unsigned long flags;
+ int action, cpu = irq - IPI0_IRQ;
+
+ spin_lock_irqsave(&ipi_lock, flags);
+ action = __this_cpu_read(ipi_action_mask);
+ per_cpu(ipi_action_mask, cpu) = 0;
+ clear_c0_cause(cpu ? C_SW1 : C_SW0);
+ spin_unlock_irqrestore(&ipi_lock, flags);
+
+ if (action & SMP_RESCHEDULE_YOURSELF)
+ scheduler_ipi();
+ if (action & SMP_CALL_FUNCTION)
+ generic_smp_call_function_interrupt();
+
+ return IRQ_HANDLED;
+}
+
+static void bmips43xx_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ bmips43xx_send_ipi_single(i, action);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int bmips_cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if (cpu == 0)
+ return -EBUSY;
+
+ pr_info("SMP: CPU%d is offline\n", cpu);
+
+ set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
+ irq_cpu_offline();
+ clear_c0_status(IE_IRQ5);
+
+ local_flush_tlb_all();
+ local_flush_icache_range(0, ~0);
+
+ return 0;
+}
+
+static void bmips_cpu_die(unsigned int cpu)
+{
+}
+
+void __ref play_dead(void)
+{
+ idle_task_exit();
+
+ /* flush data cache */
+ _dma_cache_wback_inv(0, ~0);
+
+ /*
+ * Wakeup is on SW0 or SW1; disable everything else
+ * Use BEV !IV (BMIPS_WARM_RESTART_VEC) to avoid the regular Linux
+ * IRQ handlers; this clears ST0_IE and returns immediately.
+ */
+ clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1);
+ change_c0_status(
+ IE_IRQ5 | bmips_tp1_irqs | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV,
+ IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV);
+ irq_disable_hazard();
+
+ /*
+ * wait for SW interrupt from bmips_boot_secondary(), then jump
+ * back to start_secondary()
+ */
+ __asm__ __volatile__(
+ " wait\n"
+ " j bmips_secondary_reentry\n"
+ : : : "memory");
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+const struct plat_smp_ops bmips43xx_smp_ops = {
+ .smp_setup = bmips_smp_setup,
+ .prepare_cpus = bmips_prepare_cpus,
+ .boot_secondary = bmips_boot_secondary,
+ .smp_finish = bmips_smp_finish,
+ .init_secondary = bmips_init_secondary,
+ .send_ipi_single = bmips43xx_send_ipi_single,
+ .send_ipi_mask = bmips43xx_send_ipi_mask,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = bmips_cpu_disable,
+ .cpu_die = bmips_cpu_die,
+#endif
+#ifdef CONFIG_KEXEC
+ .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
+#endif
+};
+
+const struct plat_smp_ops bmips5000_smp_ops = {
+ .smp_setup = bmips_smp_setup,
+ .prepare_cpus = bmips_prepare_cpus,
+ .boot_secondary = bmips_boot_secondary,
+ .smp_finish = bmips_smp_finish,
+ .init_secondary = bmips_init_secondary,
+ .send_ipi_single = bmips5000_send_ipi_single,
+ .send_ipi_mask = bmips5000_send_ipi_mask,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = bmips_cpu_disable,
+ .cpu_die = bmips_cpu_die,
+#endif
+#ifdef CONFIG_KEXEC
+ .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
+#endif
+};
+
+#endif /* CONFIG_SMP */
+
+/***********************************************************************
+ * BMIPS vector relocation
+ * This is primarily used for SMP boot, but it is applicable to some
+ * UP BMIPS systems as well.
+ ***********************************************************************/
+
+static void bmips_wr_vec(unsigned long dst, char *start, char *end)
+{
+ memcpy((void *)dst, start, end - start);
+ dma_cache_wback(dst, end - start);
+ local_flush_icache_range(dst, dst + (end - start));
+ instruction_hazard();
+}
+
+static inline void bmips_nmi_handler_setup(void)
+{
+ bmips_wr_vec(BMIPS_NMI_RESET_VEC, bmips_reset_nmi_vec,
+ bmips_reset_nmi_vec_end);
+ bmips_wr_vec(BMIPS_WARM_RESTART_VEC, bmips_smp_int_vec,
+ bmips_smp_int_vec_end);
+}
+
+struct reset_vec_info {
+ int cpu;
+ u32 val;
+};
+
+static void bmips_set_reset_vec_remote(void *vinfo)
+{
+ struct reset_vec_info *info = vinfo;
+ int shift = info->cpu & 0x01 ? 16 : 0;
+ u32 mask = ~(0xffff << shift), val = info->val >> 16;
+
+ preempt_disable();
+ if (smp_processor_id() > 0) {
+ smp_call_function_single(0, &bmips_set_reset_vec_remote,
+ info, 1);
+ } else {
+ if (info->cpu & 0x02) {
+ /* BMIPS5200 "should" use mask/shift, but it's buggy */
+ bmips_write_zscm_reg(0xa0, (val << 16) | val);
+ bmips_read_zscm_reg(0xa0);
+ } else {
+ write_c0_brcm_bootvec((read_c0_brcm_bootvec() & mask) |
+ (val << shift));
+ }
+ }
+ preempt_enable();
+}
+
+static void bmips_set_reset_vec(int cpu, u32 val)
+{
+ struct reset_vec_info info;
+
+ if (current_cpu_type() == CPU_BMIPS5000) {
+ /* this needs to run from CPU0 (which is always online) */
+ info.cpu = cpu;
+ info.val = val;
+ bmips_set_reset_vec_remote(&info);
+ } else {
+ void __iomem *cbr = BMIPS_GET_CBR();
+
+ if (cpu == 0)
+ __raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_0);
+ else {
+ if (current_cpu_type() != CPU_BMIPS4380)
+ return;
+ __raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
+ }
+ }
+ __sync();
+ back_to_back_c0_hazard();
+}
+
+void bmips_ebase_setup(void)
+{
+ unsigned long new_ebase = ebase;
+
+ BUG_ON(ebase != CKSEG0);
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ /*
+ * BMIPS4350 cannot relocate the normal vectors, but it
+ * can relocate the BEV=1 vectors. So CPU1 starts up at
+ * the relocated BEV=1, IV=0 general exception vector @
+ * 0xa000_0380.
+ *
+ * set_uncached_handler() is used here because:
+ * - CPU1 will run this from uncached space
+ * - None of the cacheflush functions are set up yet
+ */
+ set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0,
+ &bmips_smp_int_vec, 0x80);
+ __sync();
+ return;
+ case CPU_BMIPS3300:
+ case CPU_BMIPS4380:
+ /*
+ * 0x8000_0000: reset/NMI (initially in kseg1)
+ * 0x8000_0400: normal vectors
+ */
+ new_ebase = 0x80000400;
+ bmips_set_reset_vec(0, RESET_FROM_KSEG0);
+ break;
+ case CPU_BMIPS5000:
+ /*
+ * 0x8000_0000: reset/NMI (initially in kseg1)
+ * 0x8000_1000: normal vectors
+ */
+ new_ebase = 0x80001000;
+ bmips_set_reset_vec(0, RESET_FROM_KSEG0);
+ write_c0_ebase(new_ebase);
+ break;
+ default:
+ return;
+ }
+
+ board_nmi_handler_setup = &bmips_nmi_handler_setup;
+ ebase = new_ebase;
+}
+
+asmlinkage void __weak plat_wired_tlb_setup(void)
+{
+ /*
+ * Called when starting/restarting a secondary CPU.
+ * Kernel stacks and other important data might only be accessible
+ * once the wired entries are present.
+ */
+}
+
+void bmips_cpu_setup(void)
+{
+ void __iomem __maybe_unused *cbr = BMIPS_GET_CBR();
+ u32 __maybe_unused cfg;
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS3300:
+ /* Set BIU to async mode */
+ set_c0_brcm_bus_pll(BIT(22));
+ __sync();
+
+ /* put the BIU back in sync mode */
+ clear_c0_brcm_bus_pll(BIT(22));
+
+ /* clear BHTD to enable branch history table */
+ clear_c0_brcm_reset(BIT(16));
+
+ /* Flush and enable RAC */
+ cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
+ __raw_readl(cbr + BMIPS_RAC_CONFIG);
+
+ cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0xf, cbr + BMIPS_RAC_CONFIG);
+ __raw_readl(cbr + BMIPS_RAC_CONFIG);
+
+ cfg = __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
+ __raw_writel(cfg | 0x0fff0000, cbr + BMIPS_RAC_ADDRESS_RANGE);
+ __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
+ break;
+
+ case CPU_BMIPS4380:
+ /* CBG workaround for early BMIPS4380 CPUs */
+ switch (read_c0_prid()) {
+ case 0x2a040:
+ case 0x2a042:
+ case 0x2a044:
+ case 0x2a060:
+ cfg = __raw_readl(cbr + BMIPS_L2_CONFIG);
+ __raw_writel(cfg & ~0x07000000, cbr + BMIPS_L2_CONFIG);
+ __raw_readl(cbr + BMIPS_L2_CONFIG);
+ }
+
+ /* clear BHTD to enable branch history table */
+ clear_c0_brcm_config_0(BIT(21));
+
+ /* XI/ROTR enable */
+ set_c0_brcm_config_0(BIT(23));
+ set_c0_brcm_cmt_ctrl(BIT(15));
+ break;
+
+ case CPU_BMIPS5000:
+ /* enable RDHWR, BRDHWR */
+ set_c0_brcm_config(BIT(17) | BIT(21));
+
+ /* Disable JTB */
+ __asm__ __volatile__(
+ " .set noreorder\n"
+ " li $8, 0x5a455048\n"
+ " .word 0x4088b00f\n" /* mtc0 t0, $22, 15 */
+ " .word 0x4008b008\n" /* mfc0 t0, $22, 8 */
+ " li $9, 0x00008000\n"
+ " or $8, $8, $9\n"
+ " .word 0x4088b008\n" /* mtc0 t0, $22, 8 */
+ " sync\n"
+ " li $8, 0x0\n"
+ " .word 0x4088b00f\n" /* mtc0 t0, $22, 15 */
+ " .set reorder\n"
+ : : : "$8", "$9");
+
+ /* XI enable */
+ set_c0_brcm_config(BIT(27));
+
+ /* enable MIPS32R2 ROR instruction for XI TLB handlers */
+ __asm__ __volatile__(
+ " li $8, 0x5a455048\n"
+ " .word 0x4088b00f\n" /* mtc0 $8, $22, 15 */
+ " nop; nop; nop\n"
+ " .word 0x4008b008\n" /* mfc0 $8, $22, 8 */
+ " lui $9, 0x0100\n"
+ " or $8, $9\n"
+ " .word 0x4088b008\n" /* mtc0 $8, $22, 8 */
+ : : : "$8", "$9");
+ break;
+ }
+}
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
new file mode 100644
index 000000000..76f5824cd
--- /dev/null
+++ b/arch/mips/kernel/smp-cmp.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * Chris Dearman (chris@mips.com)
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/sched/task_stack.h>
+#include <linux/smp.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+
+#include <linux/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/time.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
+#include <asm/amon.h>
+
+static void cmp_init_secondary(void)
+{
+ struct cpuinfo_mips *c __maybe_unused = &current_cpu_data;
+
+ /* Assume GIC is present */
+ change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 |
+ STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7);
+
+ /* Enable per-cpu interrupts: platform specific */
+
+#ifdef CONFIG_MIPS_MT_SMP
+ if (cpu_has_mipsmt)
+ cpu_set_vpe_id(c, (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
+ TCBIND_CURVPE);
+#endif
+}
+
+static void cmp_smp_finish(void)
+{
+ pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
+
+ /* CDFIXME: remove this? */
+ write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+ local_irq_enable();
+}
+
+/*
+ * Setup the PC, SP, and GP of a secondary processor and start it running
+ * smp_bootstrap is the place to resume from
+ * __KSTK_TOS(idle) is apparently the stack pointer
+ * (unsigned long)idle->thread_info the gp
+ */
+static int cmp_boot_secondary(int cpu, struct task_struct *idle)
+{
+ struct thread_info *gp = task_thread_info(idle);
+ unsigned long sp = __KSTK_TOS(idle);
+ unsigned long pc = (unsigned long)&smp_bootstrap;
+ unsigned long a0 = 0;
+
+ pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(),
+ __func__, cpu);
+
+#if 0
+ /* Needed? */
+ flush_icache_range((unsigned long)gp,
+ (unsigned long)(gp + sizeof(struct thread_info)));
+#endif
+
+ amon_cpu_start(cpu, pc, sp, (unsigned long)gp, a0);
+ return 0;
+}
+
+/*
+ * Common setup before any secondaries are started
+ */
+void __init cmp_smp_setup(void)
+{
+ int i;
+ int ncpu = 0;
+
+ pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpumask_set_cpu(0, &mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+ for (i = 1; i < NR_CPUS; i++) {
+ if (amon_cpu_avail(i)) {
+ set_cpu_possible(i, true);
+ __cpu_number_map[i] = ++ncpu;
+ __cpu_logical_map[ncpu] = i;
+ }
+ }
+
+ if (cpu_has_mipsmt) {
+ unsigned int nvpe = 1;
+#ifdef CONFIG_MIPS_MT_SMP
+ unsigned int mvpconf0 = read_c0_mvpconf0();
+
+ nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+#endif
+ smp_num_siblings = nvpe;
+ }
+ pr_info("Detected %i available secondary CPU(s)\n", ncpu);
+}
+
+void __init cmp_prepare_cpus(unsigned int max_cpus)
+{
+ pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n",
+ smp_processor_id(), __func__, max_cpus);
+
+#ifdef CONFIG_MIPS_MT
+ /*
+ * FIXME: some of these options are per-system, some per-core and
+ * some per-cpu
+ */
+ mips_mt_set_cpuoptions();
+#endif
+
+}
+
+const struct plat_smp_ops cmp_smp_ops = {
+ .send_ipi_single = mips_smp_send_ipi_single,
+ .send_ipi_mask = mips_smp_send_ipi_mask,
+ .init_secondary = cmp_init_secondary,
+ .smp_finish = cmp_smp_finish,
+ .boot_secondary = cmp_boot_secondary,
+ .smp_setup = cmp_smp_setup,
+ .prepare_cpus = cmp_prepare_cpus,
+};
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
new file mode 100644
index 000000000..f659adb68
--- /dev/null
+++ b/arch/mips/kernel/smp-cps.c
@@ -0,0 +1,647 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/sched/task_stack.h>
+#include <linux/sched/hotplug.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+
+#include <asm/bcache.h>
+#include <asm/mips-cps.h>
+#include <asm/mips_mt.h>
+#include <asm/mipsregs.h>
+#include <asm/pm-cps.h>
+#include <asm/r4kcache.h>
+#include <asm/smp-cps.h>
+#include <asm/time.h>
+#include <asm/uasm.h>
+
+static bool threads_disabled;
+static DECLARE_BITMAP(core_power, NR_CPUS);
+
+struct core_boot_config *mips_cps_core_bootcfg;
+
+static int __init setup_nothreads(char *s)
+{
+ threads_disabled = true;
+ return 0;
+}
+early_param("nothreads", setup_nothreads);
+
+static unsigned core_vpe_count(unsigned int cluster, unsigned core)
+{
+ if (threads_disabled)
+ return 1;
+
+ return mips_cps_numvps(cluster, core);
+}
+
+static void __init cps_smp_setup(void)
+{
+ unsigned int nclusters, ncores, nvpes, core_vpes;
+ unsigned long core_entry;
+ int cl, c, v;
+
+ /* Detect & record VPE topology */
+ nvpes = 0;
+ nclusters = mips_cps_numclusters();
+ pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
+ for (cl = 0; cl < nclusters; cl++) {
+ if (cl > 0)
+ pr_cont(",");
+ pr_cont("{");
+
+ ncores = mips_cps_numcores(cl);
+ for (c = 0; c < ncores; c++) {
+ core_vpes = core_vpe_count(cl, c);
+
+ if (c > 0)
+ pr_cont(",");
+ pr_cont("%u", core_vpes);
+
+ /* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */
+ if (!cl && !c)
+ smp_num_siblings = core_vpes;
+
+ for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
+ cpu_set_cluster(&cpu_data[nvpes + v], cl);
+ cpu_set_core(&cpu_data[nvpes + v], c);
+ cpu_set_vpe_id(&cpu_data[nvpes + v], v);
+ }
+
+ nvpes += core_vpes;
+ }
+
+ pr_cont("}");
+ }
+ pr_cont(" total %u\n", nvpes);
+
+ /* Indicate present CPUs (CPU being synonymous with VPE) */
+ for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
+ set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0);
+ set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0);
+ __cpu_number_map[v] = v;
+ __cpu_logical_map[v] = v;
+ }
+
+ /* Set a coherent default CCA (CWB) */
+ change_c0_config(CONF_CM_CMASK, 0x5);
+
+ /* Core 0 is powered up (we're running on it) */
+ bitmap_set(core_power, 0, 1);
+
+ /* Initialise core 0 */
+ mips_cps_core_init();
+
+ /* Make core 0 coherent with everything */
+ write_gcr_cl_coherence(0xff);
+
+ if (mips_cm_revision() >= CM_REV_CM3) {
+ core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
+ write_gcr_bev_base(core_entry);
+ }
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpumask_set_cpu(0, &mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+}
+
+static void __init cps_prepare_cpus(unsigned int max_cpus)
+{
+ unsigned ncores, core_vpes, c, cca;
+ bool cca_unsuitable, cores_limited;
+ u32 *entry_code;
+
+ mips_mt_set_cpuoptions();
+
+ /* Detect whether the CCA is unsuited to multi-core SMP */
+ cca = read_c0_config() & CONF_CM_CMASK;
+ switch (cca) {
+ case 0x4: /* CWBE */
+ case 0x5: /* CWB */
+ /* The CCA is coherent, multi-core is fine */
+ cca_unsuitable = false;
+ break;
+
+ default:
+ /* CCA is not coherent, multi-core is not usable */
+ cca_unsuitable = true;
+ }
+
+ /* Warn the user if the CCA prevents multi-core */
+ cores_limited = false;
+ if (cca_unsuitable || cpu_has_dc_aliases) {
+ for_each_present_cpu(c) {
+ if (cpus_are_siblings(smp_processor_id(), c))
+ continue;
+
+ set_cpu_present(c, false);
+ cores_limited = true;
+ }
+ }
+ if (cores_limited)
+ pr_warn("Using only one core due to %s%s%s\n",
+ cca_unsuitable ? "unsuitable CCA" : "",
+ (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
+ cpu_has_dc_aliases ? "dcache aliasing" : "");
+
+ /*
+ * Patch the start of mips_cps_core_entry to provide:
+ *
+ * s0 = kseg0 CCA
+ */
+ entry_code = (u32 *)&mips_cps_core_entry;
+ uasm_i_addiu(&entry_code, 16, 0, cca);
+ blast_dcache_range((unsigned long)&mips_cps_core_entry,
+ (unsigned long)entry_code);
+ bc_wback_inv((unsigned long)&mips_cps_core_entry,
+ (void *)entry_code - (void *)&mips_cps_core_entry);
+ __sync();
+
+ /* Allocate core boot configuration structs */
+ ncores = mips_cps_numcores(0);
+ mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
+ GFP_KERNEL);
+ if (!mips_cps_core_bootcfg) {
+ pr_err("Failed to allocate boot config for %u cores\n", ncores);
+ goto err_out;
+ }
+
+ /* Allocate VPE boot configuration structs */
+ for (c = 0; c < ncores; c++) {
+ core_vpes = core_vpe_count(0, c);
+ mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
+ sizeof(*mips_cps_core_bootcfg[c].vpe_config),
+ GFP_KERNEL);
+ if (!mips_cps_core_bootcfg[c].vpe_config) {
+ pr_err("Failed to allocate %u VPE boot configs\n",
+ core_vpes);
+ goto err_out;
+ }
+ }
+
+ /* Mark this CPU as booted */
+ atomic_set(&mips_cps_core_bootcfg[cpu_core(&current_cpu_data)].vpe_mask,
+ 1 << cpu_vpe_id(&current_cpu_data));
+
+ return;
+err_out:
+ /* Clean up allocations */
+ if (mips_cps_core_bootcfg) {
+ for (c = 0; c < ncores; c++)
+ kfree(mips_cps_core_bootcfg[c].vpe_config);
+ kfree(mips_cps_core_bootcfg);
+ mips_cps_core_bootcfg = NULL;
+ }
+
+ /* Effectively disable SMP by declaring CPUs not present */
+ for_each_possible_cpu(c) {
+ if (c == 0)
+ continue;
+ set_cpu_present(c, false);
+ }
+}
+
+static void boot_core(unsigned int core, unsigned int vpe_id)
+{
+ u32 stat, seq_state;
+ unsigned timeout;
+
+ /* Select the appropriate core */
+ mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+
+ /* Set its reset vector */
+ write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
+
+ /* Ensure its coherency is disabled */
+ write_gcr_co_coherence(0);
+
+ /* Start it with the legacy memory map and exception base */
+ write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
+
+ /* Ensure the core can access the GCRs */
+ set_gcr_access(1 << core);
+
+ if (mips_cpc_present()) {
+ /* Reset the core */
+ mips_cpc_lock_other(core);
+
+ if (mips_cm_revision() >= CM_REV_CM3) {
+ /* Run only the requested VP following the reset */
+ write_cpc_co_vp_stop(0xf);
+ write_cpc_co_vp_run(1 << vpe_id);
+
+ /*
+ * Ensure that the VP_RUN register is written before the
+ * core leaves reset.
+ */
+ wmb();
+ }
+
+ write_cpc_co_cmd(CPC_Cx_CMD_RESET);
+
+ timeout = 100;
+ while (true) {
+ stat = read_cpc_co_stat_conf();
+ seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
+ seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
+
+ /* U6 == coherent execution, ie. the core is up */
+ if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
+ break;
+
+ /* Delay a little while before we start warning */
+ if (timeout) {
+ timeout--;
+ mdelay(10);
+ continue;
+ }
+
+ pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
+ core, stat);
+ mdelay(1000);
+ }
+
+ mips_cpc_unlock_other();
+ } else {
+ /* Take the core out of reset */
+ write_gcr_co_reset_release(0);
+ }
+
+ mips_cm_unlock_other();
+
+ /* The core is now powered up */
+ bitmap_set(core_power, core, 1);
+}
+
+static void remote_vpe_boot(void *dummy)
+{
+ unsigned core = cpu_core(&current_cpu_data);
+ struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
+
+ mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
+}
+
+static int cps_boot_secondary(int cpu, struct task_struct *idle)
+{
+ unsigned core = cpu_core(&cpu_data[cpu]);
+ unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+ struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
+ struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
+ unsigned long core_entry;
+ unsigned int remote;
+ int err;
+
+ /* We don't yet support booting CPUs in other clusters */
+ if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
+ return -ENOSYS;
+
+ vpe_cfg->pc = (unsigned long)&smp_bootstrap;
+ vpe_cfg->sp = __KSTK_TOS(idle);
+ vpe_cfg->gp = (unsigned long)task_thread_info(idle);
+
+ atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
+
+ preempt_disable();
+
+ if (!test_bit(core, core_power)) {
+ /* Boot a VPE on a powered down core */
+ boot_core(core, vpe_id);
+ goto out;
+ }
+
+ if (cpu_has_vp) {
+ mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
+ write_gcr_co_reset_base(core_entry);
+ mips_cm_unlock_other();
+ }
+
+ if (!cpus_are_siblings(cpu, smp_processor_id())) {
+ /* Boot a VPE on another powered up core */
+ for (remote = 0; remote < NR_CPUS; remote++) {
+ if (!cpus_are_siblings(cpu, remote))
+ continue;
+ if (cpu_online(remote))
+ break;
+ }
+ if (remote >= NR_CPUS) {
+ pr_crit("No online CPU in core %u to start CPU%d\n",
+ core, cpu);
+ goto out;
+ }
+
+ err = smp_call_function_single(remote, remote_vpe_boot,
+ NULL, 1);
+ if (err)
+ panic("Failed to call remote CPU\n");
+ goto out;
+ }
+
+ BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
+
+ /* Boot a VPE on this core */
+ mips_cps_boot_vpes(core_cfg, vpe_id);
+out:
+ preempt_enable();
+ return 0;
+}
+
+static void cps_init_secondary(void)
+{
+ /* Disable MT - we only want to run 1 TC per VPE */
+ if (cpu_has_mipsmt)
+ dmt();
+
+ if (mips_cm_revision() >= CM_REV_CM3) {
+ unsigned int ident = read_gic_vl_ident();
+
+ /*
+ * Ensure that our calculation of the VP ID matches up with
+ * what the GIC reports, otherwise we'll have configured
+ * interrupts incorrectly.
+ */
+ BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
+ }
+
+ if (cpu_has_veic)
+ clear_c0_status(ST0_IM);
+ else
+ change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
+ STATUSF_IP4 | STATUSF_IP5 |
+ STATUSF_IP6 | STATUSF_IP7);
+}
+
+static void cps_smp_finish(void)
+{
+ write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+ local_irq_enable();
+}
+
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
+
+enum cpu_death {
+ CPU_DEATH_HALT,
+ CPU_DEATH_POWER,
+};
+
+static void cps_shutdown_this_cpu(enum cpu_death death)
+{
+ unsigned int cpu, core, vpe_id;
+
+ cpu = smp_processor_id();
+ core = cpu_core(&cpu_data[cpu]);
+
+ if (death == CPU_DEATH_HALT) {
+ vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+
+ pr_debug("Halting core %d VP%d\n", core, vpe_id);
+ if (cpu_has_mipsmt) {
+ /* Halt this TC */
+ write_c0_tchalt(TCHALT_H);
+ instruction_hazard();
+ } else if (cpu_has_vp) {
+ write_cpc_cl_vp_stop(1 << vpe_id);
+
+ /* Ensure that the VP_STOP register is written */
+ wmb();
+ }
+ } else {
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
+ pr_debug("Gating power to core %d\n", core);
+ /* Power down the core */
+ cps_pm_enter_state(CPS_PM_POWER_GATED);
+ }
+ }
+}
+
+#ifdef CONFIG_KEXEC
+
+static void cps_kexec_nonboot_cpu(void)
+{
+ if (cpu_has_mipsmt || cpu_has_vp)
+ cps_shutdown_this_cpu(CPU_DEATH_HALT);
+ else
+ cps_shutdown_this_cpu(CPU_DEATH_POWER);
+}
+
+#endif /* CONFIG_KEXEC */
+
+#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int cps_cpu_disable(void)
+{
+ unsigned cpu = smp_processor_id();
+ struct core_boot_config *core_cfg;
+
+ if (!cpu)
+ return -EBUSY;
+
+ if (!cps_pm_support_state(CPS_PM_POWER_GATED))
+ return -EINVAL;
+
+ core_cfg = &mips_cps_core_bootcfg[cpu_core(&current_cpu_data)];
+ atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
+ smp_mb__after_atomic();
+ set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
+
+ return 0;
+}
+
+static unsigned cpu_death_sibling;
+static enum cpu_death cpu_death;
+
+void play_dead(void)
+{
+ unsigned int cpu;
+
+ local_irq_disable();
+ idle_task_exit();
+ cpu = smp_processor_id();
+ cpu_death = CPU_DEATH_POWER;
+
+ pr_debug("CPU%d going offline\n", cpu);
+
+ if (cpu_has_mipsmt || cpu_has_vp) {
+ /* Look for another online VPE within the core */
+ for_each_online_cpu(cpu_death_sibling) {
+ if (!cpus_are_siblings(cpu, cpu_death_sibling))
+ continue;
+
+ /*
+ * There is an online VPE within the core. Just halt
+ * this TC and leave the core alone.
+ */
+ cpu_death = CPU_DEATH_HALT;
+ break;
+ }
+ }
+
+ /* This CPU has chosen its way out */
+ (void)cpu_report_death();
+
+ cps_shutdown_this_cpu(cpu_death);
+
+ /* This should never be reached */
+ panic("Failed to offline CPU %u", cpu);
+}
+
+static void wait_for_sibling_halt(void *ptr_cpu)
+{
+ unsigned cpu = (unsigned long)ptr_cpu;
+ unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+ unsigned halted;
+ unsigned long flags;
+
+ do {
+ local_irq_save(flags);
+ settc(vpe_id);
+ halted = read_tc_c0_tchalt();
+ local_irq_restore(flags);
+ } while (!(halted & TCHALT_H));
+}
+
+static void cps_cpu_die(unsigned int cpu)
+{
+ unsigned core = cpu_core(&cpu_data[cpu]);
+ unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+ ktime_t fail_time;
+ unsigned stat;
+ int err;
+
+ /* Wait for the cpu to choose its way out */
+ if (!cpu_wait_death(cpu, 5)) {
+ pr_err("CPU%u: didn't offline\n", cpu);
+ return;
+ }
+
+ /*
+ * Now wait for the CPU to actually offline. Without doing this that
+ * offlining may race with one or more of:
+ *
+ * - Onlining the CPU again.
+ * - Powering down the core if another VPE within it is offlined.
+ * - A sibling VPE entering a non-coherent state.
+ *
+ * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
+ * with which we could race, so do nothing.
+ */
+ if (cpu_death == CPU_DEATH_POWER) {
+ /*
+ * Wait for the core to enter a powered down or clock gated
+ * state, the latter happening when a JTAG probe is connected
+ * in which case the CPC will refuse to power down the core.
+ */
+ fail_time = ktime_add_ms(ktime_get(), 2000);
+ do {
+ mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ mips_cpc_lock_other(core);
+ stat = read_cpc_co_stat_conf();
+ stat &= CPC_Cx_STAT_CONF_SEQSTATE;
+ stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
+ mips_cpc_unlock_other();
+ mips_cm_unlock_other();
+
+ if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 ||
+ stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 ||
+ stat == CPC_Cx_STAT_CONF_SEQSTATE_U2)
+ break;
+
+ /*
+ * The core ought to have powered down, but didn't &
+ * now we don't really know what state it's in. It's
+ * likely that its _pwr_up pin has been wired to logic
+ * 1 & it powered back up as soon as we powered it
+ * down...
+ *
+ * The best we can do is warn the user & continue in
+ * the hope that the core is doing nothing harmful &
+ * might behave properly if we online it later.
+ */
+ if (WARN(ktime_after(ktime_get(), fail_time),
+ "CPU%u hasn't powered down, seq. state %u\n",
+ cpu, stat))
+ break;
+ } while (1);
+
+ /* Indicate the core is powered off */
+ bitmap_clear(core_power, core, 1);
+ } else if (cpu_has_mipsmt) {
+ /*
+ * Have a CPU with access to the offlined CPUs registers wait
+ * for its TC to halt.
+ */
+ err = smp_call_function_single(cpu_death_sibling,
+ wait_for_sibling_halt,
+ (void *)(unsigned long)cpu, 1);
+ if (err)
+ panic("Failed to call remote sibling CPU\n");
+ } else if (cpu_has_vp) {
+ do {
+ mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ stat = read_cpc_co_vp_running();
+ mips_cm_unlock_other();
+ } while (stat & (1 << vpe_id));
+ }
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static const struct plat_smp_ops cps_smp_ops = {
+ .smp_setup = cps_smp_setup,
+ .prepare_cpus = cps_prepare_cpus,
+ .boot_secondary = cps_boot_secondary,
+ .init_secondary = cps_init_secondary,
+ .smp_finish = cps_smp_finish,
+ .send_ipi_single = mips_smp_send_ipi_single,
+ .send_ipi_mask = mips_smp_send_ipi_mask,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = cps_cpu_disable,
+ .cpu_die = cps_cpu_die,
+#endif
+#ifdef CONFIG_KEXEC
+ .kexec_nonboot_cpu = cps_kexec_nonboot_cpu,
+#endif
+};
+
+bool mips_cps_smp_in_use(void)
+{
+ extern const struct plat_smp_ops *mp_ops;
+ return mp_ops == &cps_smp_ops;
+}
+
+int register_cps_smp_ops(void)
+{
+ if (!mips_cm_present()) {
+ pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
+ return -ENODEV;
+ }
+
+ /* check we have a GIC - we need one for IPIs */
+ if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) {
+ pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
+ return -ENODEV;
+ }
+
+ register_smp_ops(&cps_smp_ops);
+ return 0;
+}
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
new file mode 100644
index 000000000..5f04a0141
--- /dev/null
+++ b/arch/mips/kernel/smp-mt.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2004, 05, 06 MIPS Technologies, Inc.
+ * Elizabeth Clarke (beth@mips.com)
+ * Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+#include <linux/sched/task_stack.h>
+#include <linux/smp.h>
+
+#include <linux/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/time.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
+#include <asm/mips-cps.h>
+
+static void __init smvp_copy_vpe_config(void)
+{
+ write_vpe_c0_status(
+ (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
+
+ /* set config to be the same as vpe0, particularly kseg0 coherency alg */
+ write_vpe_c0_config( read_c0_config());
+
+ /* make sure there are no software interrupts pending */
+ write_vpe_c0_cause(0);
+
+ /* Propagate Config7 */
+ write_vpe_c0_config7(read_c0_config7());
+
+ write_vpe_c0_count(read_c0_count());
+}
+
+static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
+ unsigned int ncpu)
+{
+ if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT))
+ return ncpu;
+
+ /* Deactivate all but VPE 0 */
+ if (tc != 0) {
+ unsigned long tmp = read_vpe_c0_vpeconf0();
+
+ tmp &= ~VPECONF0_VPA;
+
+ /* master VPE */
+ tmp |= VPECONF0_MVP;
+ write_vpe_c0_vpeconf0(tmp);
+
+ /* Record this as available CPU */
+ set_cpu_possible(tc, true);
+ set_cpu_present(tc, true);
+ __cpu_number_map[tc] = ++ncpu;
+ __cpu_logical_map[ncpu] = tc;
+ }
+
+ /* Disable multi-threading with TC's */
+ write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
+
+ if (tc != 0)
+ smvp_copy_vpe_config();
+
+ cpu_set_vpe_id(&cpu_data[ncpu], tc);
+
+ return ncpu;
+}
+
+static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
+{
+ unsigned long tmp;
+
+ if (!tc)
+ return;
+
+ /* bind a TC to each VPE, May as well put all excess TC's
+ on the last VPE */
+ if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1))
+ write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT));
+ else {
+ write_tc_c0_tcbind(read_tc_c0_tcbind() | tc);
+
+ /* and set XTC */
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT));
+ }
+
+ tmp = read_tc_c0_tcstatus();
+
+ /* mark not allocated and not dynamically allocatable */
+ tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
+ tmp |= TCSTATUS_IXMT; /* interrupt exempt */
+ write_tc_c0_tcstatus(tmp);
+
+ write_tc_c0_tchalt(TCHALT_H);
+}
+
+static void vsmp_init_secondary(void)
+{
+ /* This is Malta specific: IPI,performance and timer interrupts */
+ if (mips_gic_present())
+ change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
+ STATUSF_IP4 | STATUSF_IP5 |
+ STATUSF_IP6 | STATUSF_IP7);
+ else
+ change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
+ STATUSF_IP6 | STATUSF_IP7);
+}
+
+static void vsmp_smp_finish(void)
+{
+ /* CDFIXME: remove this? */
+ write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+ local_irq_enable();
+}
+
+/*
+ * Setup the PC, SP, and GP of a secondary processor and start it
+ * running!
+ * smp_bootstrap is the place to resume from
+ * __KSTK_TOS(idle) is apparently the stack pointer
+ * (unsigned long)idle->thread_info the gp
+ * assumes a 1:1 mapping of TC => VPE
+ */
+static int vsmp_boot_secondary(int cpu, struct task_struct *idle)
+{
+ struct thread_info *gp = task_thread_info(idle);
+ dvpe();
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ settc(cpu);
+
+ /* restart */
+ write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
+
+ /* enable the tc this vpe/cpu will be running */
+ write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A);
+
+ write_tc_c0_tchalt(0);
+
+ /* enable the VPE */
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
+
+ /* stack pointer */
+ write_tc_gpr_sp( __KSTK_TOS(idle));
+
+ /* global pointer */
+ write_tc_gpr_gp((unsigned long)gp);
+
+ flush_icache_range((unsigned long)gp,
+ (unsigned long)(gp + sizeof(struct thread_info)));
+
+ /* finally out of configuration and into chaos */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ evpe(EVPE_ENABLE);
+
+ return 0;
+}
+
+/*
+ * Common setup before any secondaries are started
+ * Make sure all CPU's are in a sensible state before we boot any of the
+ * secondaries
+ */
+static void __init vsmp_smp_setup(void)
+{
+ unsigned int mvpconf0, ntc, tc, ncpu = 0;
+ unsigned int nvpe;
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpumask_set_cpu(0, &mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+ if (!cpu_has_mipsmt)
+ return;
+
+ /* disable MT so we can configure */
+ dvpe();
+ dmt();
+
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ mvpconf0 = read_c0_mvpconf0();
+ ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;
+
+ nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+ smp_num_siblings = nvpe;
+
+ /* we'll always have more TC's than VPE's, so loop setting everything
+ to a sensible state */
+ for (tc = 0; tc <= ntc; tc++) {
+ settc(tc);
+
+ smvp_tc_init(tc, mvpconf0);
+ ncpu = smvp_vpe_init(tc, mvpconf0, ncpu);
+ }
+
+ /* Release config state */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ /* We'll wait until starting the secondaries before starting MVPE */
+
+ printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
+}
+
+static void __init vsmp_prepare_cpus(unsigned int max_cpus)
+{
+ mips_mt_set_cpuoptions();
+}
+
+const struct plat_smp_ops vsmp_smp_ops = {
+ .send_ipi_single = mips_smp_send_ipi_single,
+ .send_ipi_mask = mips_smp_send_ipi_mask,
+ .init_secondary = vsmp_init_secondary,
+ .smp_finish = vsmp_smp_finish,
+ .boot_secondary = vsmp_boot_secondary,
+ .smp_setup = vsmp_smp_setup,
+ .prepare_cpus = vsmp_prepare_cpus,
+};
+
diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c
new file mode 100644
index 000000000..525d3196f
--- /dev/null
+++ b/arch/mips/kernel/smp-up.c
@@ -0,0 +1,79 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006, 07 by Ralf Baechle (ralf@linux-mips.org)
+ *
+ * Symmetric Uniprocessor (TM) Support
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+/*
+ * Send inter-processor interrupt
+ */
+static void up_send_ipi_single(int cpu, unsigned int action)
+{
+ panic(KERN_ERR "%s called", __func__);
+}
+
+static inline void up_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
+{
+ panic(KERN_ERR "%s called", __func__);
+}
+
+/*
+ * After we've done initial boot, this function is called to allow the
+ * board code to clean up state, if needed
+ */
+static void up_init_secondary(void)
+{
+}
+
+static void up_smp_finish(void)
+{
+}
+
+/*
+ * Firmware CPU startup hook
+ */
+static int up_boot_secondary(int cpu, struct task_struct *idle)
+{
+ return 0;
+}
+
+static void __init up_smp_setup(void)
+{
+}
+
+static void __init up_prepare_cpus(unsigned int max_cpus)
+{
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int up_cpu_disable(void)
+{
+ return -ENOSYS;
+}
+
+static void up_cpu_die(unsigned int cpu)
+{
+ BUG();
+}
+#endif
+
+const struct plat_smp_ops up_smp_ops = {
+ .send_ipi_single = up_send_ipi_single,
+ .send_ipi_mask = up_send_ipi_mask,
+ .init_secondary = up_init_secondary,
+ .smp_finish = up_smp_finish,
+ .boot_secondary = up_boot_secondary,
+ .smp_setup = up_smp_setup,
+ .prepare_cpus = up_prepare_cpus,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = up_cpu_disable,
+ .cpu_die = up_cpu_die,
+#endif
+};
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
new file mode 100644
index 000000000..14db66dbc
--- /dev/null
+++ b/arch/mips/kernel/smp.c
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *
+ * Copyright (C) 2000, 2001 Kanoj Sarcar
+ * Copyright (C) 2000, 2001 Ralf Baechle
+ * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
+ * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
+ */
+#include <linux/cache.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/export.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/sched/mm.h>
+#include <linux/cpumask.h>
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/ftrace.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+
+#include <linux/atomic.h>
+#include <asm/cpu.h>
+#include <asm/ginvt.h>
+#include <asm/processor.h>
+#include <asm/idle.h>
+#include <asm/r4k-timer.h>
+#include <asm/mips-cps.h>
+#include <asm/mmu_context.h>
+#include <asm/time.h>
+#include <asm/setup.h>
+#include <asm/maar.h>
+
+int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP]; /* Map physical to logical */
+EXPORT_SYMBOL(__cpu_number_map);
+
+int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
+EXPORT_SYMBOL(__cpu_logical_map);
+
+/* Number of TCs (or siblings in Intel speak) per CPU core */
+int smp_num_siblings = 1;
+EXPORT_SYMBOL(smp_num_siblings);
+
+/* representing the TCs (or siblings in Intel speak) of each logical CPU */
+cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_sibling_map);
+
+/* representing the core map of multi-core chips of each logical CPU */
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_core_map);
+
+static DECLARE_COMPLETION(cpu_starting);
+static DECLARE_COMPLETION(cpu_running);
+
+/*
+ * A logcal cpu mask containing only one VPE per core to
+ * reduce the number of IPIs on large MT systems.
+ */
+cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_foreign_map);
+
+/* representing cpus for which sibling maps can be computed */
+static cpumask_t cpu_sibling_setup_map;
+
+/* representing cpus for which core maps can be computed */
+static cpumask_t cpu_core_setup_map;
+
+cpumask_t cpu_coherent_mask;
+
+#ifdef CONFIG_GENERIC_IRQ_IPI
+static struct irq_desc *call_desc;
+static struct irq_desc *sched_desc;
+#endif
+
+static inline void set_cpu_sibling_map(int cpu)
+{
+ int i;
+
+ cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
+
+ if (smp_num_siblings > 1) {
+ for_each_cpu(i, &cpu_sibling_setup_map) {
+ if (cpus_are_siblings(cpu, i)) {
+ cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
+ }
+ }
+ } else
+ cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
+}
+
+static inline void set_cpu_core_map(int cpu)
+{
+ int i;
+
+ cpumask_set_cpu(cpu, &cpu_core_setup_map);
+
+ for_each_cpu(i, &cpu_core_setup_map) {
+ if (cpu_data[cpu].package == cpu_data[i].package) {
+ cpumask_set_cpu(i, &cpu_core_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_core_map[i]);
+ }
+ }
+}
+
+/*
+ * Calculate a new cpu_foreign_map mask whenever a
+ * new cpu appears or disappears.
+ */
+void calculate_cpu_foreign_map(void)
+{
+ int i, k, core_present;
+ cpumask_t temp_foreign_map;
+
+ /* Re-calculate the mask */
+ cpumask_clear(&temp_foreign_map);
+ for_each_online_cpu(i) {
+ core_present = 0;
+ for_each_cpu(k, &temp_foreign_map)
+ if (cpus_are_siblings(i, k))
+ core_present = 1;
+ if (!core_present)
+ cpumask_set_cpu(i, &temp_foreign_map);
+ }
+
+ for_each_online_cpu(i)
+ cpumask_andnot(&cpu_foreign_map[i],
+ &temp_foreign_map, &cpu_sibling_map[i]);
+}
+
+const struct plat_smp_ops *mp_ops;
+EXPORT_SYMBOL(mp_ops);
+
+void register_smp_ops(const struct plat_smp_ops *ops)
+{
+ if (mp_ops)
+ printk(KERN_WARNING "Overriding previously set SMP ops\n");
+
+ mp_ops = ops;
+}
+
+#ifdef CONFIG_GENERIC_IRQ_IPI
+void mips_smp_send_ipi_single(int cpu, unsigned int action)
+{
+ mips_smp_send_ipi_mask(cpumask_of(cpu), action);
+}
+
+void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+ unsigned long flags;
+ unsigned int core;
+ int cpu;
+
+ local_irq_save(flags);
+
+ switch (action) {
+ case SMP_CALL_FUNCTION:
+ __ipi_send_mask(call_desc, mask);
+ break;
+
+ case SMP_RESCHEDULE_YOURSELF:
+ __ipi_send_mask(sched_desc, mask);
+ break;
+
+ default:
+ BUG();
+ }
+
+ if (mips_cpc_present()) {
+ for_each_cpu(cpu, mask) {
+ if (cpus_are_siblings(cpu, smp_processor_id()))
+ continue;
+
+ core = cpu_core(&cpu_data[cpu]);
+
+ while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
+ mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ mips_cpc_lock_other(core);
+ write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
+ mips_cpc_unlock_other();
+ mips_cm_unlock_other();
+ }
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+
+static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
+{
+ scheduler_ipi();
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
+{
+ generic_smp_call_function_interrupt();
+
+ return IRQ_HANDLED;
+}
+
+static void smp_ipi_init_one(unsigned int virq, const char *name,
+ irq_handler_t handler)
+{
+ int ret;
+
+ irq_set_handler(virq, handle_percpu_irq);
+ ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL);
+ BUG_ON(ret);
+}
+
+static unsigned int call_virq, sched_virq;
+
+int mips_smp_ipi_allocate(const struct cpumask *mask)
+{
+ int virq;
+ struct irq_domain *ipidomain;
+ struct device_node *node;
+
+ node = of_irq_find_parent(of_root);
+ ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
+
+ /*
+ * Some platforms have half DT setup. So if we found irq node but
+ * didn't find an ipidomain, try to search for one that is not in the
+ * DT.
+ */
+ if (node && !ipidomain)
+ ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
+
+ /*
+ * There are systems which use IPI IRQ domains, but only have one
+ * registered when some runtime condition is met. For example a Malta
+ * kernel may include support for GIC & CPU interrupt controller IPI
+ * IRQ domains, but if run on a system with no GIC & no MT ASE then
+ * neither will be supported or registered.
+ *
+ * We only have a problem if we're actually using multiple CPUs so fail
+ * loudly if that is the case. Otherwise simply return, skipping IPI
+ * setup, if we're running with only a single CPU.
+ */
+ if (!ipidomain) {
+ BUG_ON(num_present_cpus() > 1);
+ return 0;
+ }
+
+ virq = irq_reserve_ipi(ipidomain, mask);
+ BUG_ON(!virq);
+ if (!call_virq)
+ call_virq = virq;
+
+ virq = irq_reserve_ipi(ipidomain, mask);
+ BUG_ON(!virq);
+ if (!sched_virq)
+ sched_virq = virq;
+
+ if (irq_domain_is_ipi_per_cpu(ipidomain)) {
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ smp_ipi_init_one(call_virq + cpu, "IPI call",
+ ipi_call_interrupt);
+ smp_ipi_init_one(sched_virq + cpu, "IPI resched",
+ ipi_resched_interrupt);
+ }
+ } else {
+ smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt);
+ smp_ipi_init_one(sched_virq, "IPI resched",
+ ipi_resched_interrupt);
+ }
+
+ return 0;
+}
+
+int mips_smp_ipi_free(const struct cpumask *mask)
+{
+ struct irq_domain *ipidomain;
+ struct device_node *node;
+
+ node = of_irq_find_parent(of_root);
+ ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
+
+ /*
+ * Some platforms have half DT setup. So if we found irq node but
+ * didn't find an ipidomain, try to search for one that is not in the
+ * DT.
+ */
+ if (node && !ipidomain)
+ ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
+
+ BUG_ON(!ipidomain);
+
+ if (irq_domain_is_ipi_per_cpu(ipidomain)) {
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ free_irq(call_virq + cpu, NULL);
+ free_irq(sched_virq + cpu, NULL);
+ }
+ }
+ irq_destroy_ipi(call_virq, mask);
+ irq_destroy_ipi(sched_virq, mask);
+ return 0;
+}
+
+
+static int __init mips_smp_ipi_init(void)
+{
+ if (num_possible_cpus() == 1)
+ return 0;
+
+ mips_smp_ipi_allocate(cpu_possible_mask);
+
+ call_desc = irq_to_desc(call_virq);
+ sched_desc = irq_to_desc(sched_virq);
+
+ return 0;
+}
+early_initcall(mips_smp_ipi_init);
+#endif
+
+/*
+ * First C code run on the secondary CPUs after being started up by
+ * the master.
+ */
+asmlinkage void start_secondary(void)
+{
+ unsigned int cpu;
+
+ cpu_probe();
+ per_cpu_trap_init(false);
+ mips_clockevent_init();
+ mp_ops->init_secondary();
+ cpu_report();
+ maar_init();
+
+ /*
+ * XXX parity protection should be folded in here when it's converted
+ * to an option instead of something based on .cputype
+ */
+
+ calibrate_delay();
+ cpu = smp_processor_id();
+ cpu_data[cpu].udelay_val = loops_per_jiffy;
+
+ set_cpu_sibling_map(cpu);
+ set_cpu_core_map(cpu);
+
+ cpumask_set_cpu(cpu, &cpu_coherent_mask);
+ notify_cpu_starting(cpu);
+
+ /* Notify boot CPU that we're starting & ready to sync counters */
+ complete(&cpu_starting);
+
+ synchronise_count_slave(cpu);
+
+ /* The CPU is running and counters synchronised, now mark it online */
+ set_cpu_online(cpu, true);
+
+ calculate_cpu_foreign_map();
+
+ /*
+ * Notify boot CPU that we're up & online and it can safely return
+ * from __cpu_up
+ */
+ complete(&cpu_running);
+
+ /*
+ * irq will be enabled in ->smp_finish(), enabling it too early
+ * is dangerous.
+ */
+ WARN_ON_ONCE(!irqs_disabled());
+ mp_ops->smp_finish();
+
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
+
+static void stop_this_cpu(void *dummy)
+{
+ /*
+ * Remove this CPU:
+ */
+
+ set_cpu_online(smp_processor_id(), false);
+ calculate_cpu_foreign_map();
+ local_irq_disable();
+ while (1);
+}
+
+void smp_send_stop(void)
+{
+ smp_call_function(stop_this_cpu, NULL, 0);
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+/* called from main before smp_init() */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ init_new_context(current, &init_mm);
+ current_thread_info()->cpu = 0;
+ mp_ops->prepare_cpus(max_cpus);
+ set_cpu_sibling_map(0);
+ set_cpu_core_map(0);
+ calculate_cpu_foreign_map();
+#ifndef CONFIG_HOTPLUG_CPU
+ init_cpu_present(cpu_possible_mask);
+#endif
+ cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
+}
+
+/* preload SMP state for boot cpu */
+void smp_prepare_boot_cpu(void)
+{
+ if (mp_ops->prepare_boot_cpu)
+ mp_ops->prepare_boot_cpu();
+ set_cpu_possible(0, true);
+ set_cpu_online(0, true);
+}
+
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+{
+ int err;
+
+ err = mp_ops->boot_secondary(cpu, tidle);
+ if (err)
+ return err;
+
+ /* Wait for CPU to start and be ready to sync counters */
+ if (!wait_for_completion_timeout(&cpu_starting,
+ msecs_to_jiffies(1000))) {
+ pr_crit("CPU%u: failed to start\n", cpu);
+ return -EIO;
+ }
+
+ synchronise_count_master(cpu);
+
+ /* Wait for CPU to finish startup & mark itself online before return */
+ wait_for_completion(&cpu_running);
+ return 0;
+}
+
+/* Not really SMP stuff ... */
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return 0;
+}
+
+static void flush_tlb_all_ipi(void *info)
+{
+ local_flush_tlb_all();
+}
+
+void flush_tlb_all(void)
+{
+ if (cpu_has_mmid) {
+ htw_stop();
+ ginvt_full();
+ sync_ginv();
+ instruction_hazard();
+ htw_start();
+ return;
+ }
+
+ on_each_cpu(flush_tlb_all_ipi, NULL, 1);
+}
+
+static void flush_tlb_mm_ipi(void *mm)
+{
+ drop_mmu_context((struct mm_struct *)mm);
+}
+
+/*
+ * Special Variant of smp_call_function for use by TLB functions:
+ *
+ * o No return value
+ * o collapses to normal function call on UP kernels
+ * o collapses to normal function call on systems with a single shared
+ * primary cache.
+ */
+static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
+{
+ smp_call_function(func, info, 1);
+}
+
+static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
+{
+ preempt_disable();
+
+ smp_on_other_tlbs(func, info);
+ func(info);
+
+ preempt_enable();
+}
+
+/*
+ * The following tlb flush calls are invoked when old translations are
+ * being torn down, or pte attributes are changing. For single threaded
+ * address spaces, a new context is obtained on the current cpu, and tlb
+ * context on other cpus are invalidated to force a new context allocation
+ * at switch_mm time, should the mm ever be used on other cpus. For
+ * multithreaded address spaces, intercpu interrupts have to be sent.
+ * Another case where intercpu interrupts are required is when the target
+ * mm might be active on another cpu (eg debuggers doing the flushes on
+ * behalf of debugees, kswapd stealing pages from another process etc).
+ * Kanoj 07/00.
+ */
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ preempt_disable();
+
+ if (cpu_has_mmid) {
+ /*
+ * No need to worry about other CPUs - the ginvt in
+ * drop_mmu_context() will be globalized.
+ */
+ } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
+ } else {
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, mm))
+ set_cpu_context(cpu, mm, 0);
+ }
+ }
+ drop_mmu_context(mm);
+
+ preempt_enable();
+}
+
+struct flush_tlb_data {
+ struct vm_area_struct *vma;
+ unsigned long addr1;
+ unsigned long addr2;
+};
+
+static void flush_tlb_range_ipi(void *info)
+{
+ struct flush_tlb_data *fd = info;
+
+ local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long addr;
+ u32 old_mmid;
+
+ preempt_disable();
+ if (cpu_has_mmid) {
+ htw_stop();
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(cpu_asid(0, mm));
+ mtc0_tlbw_hazard();
+ addr = round_down(start, PAGE_SIZE * 2);
+ end = round_up(end, PAGE_SIZE * 2);
+ do {
+ ginvt_va_mmid(addr);
+ sync_ginv();
+ addr += PAGE_SIZE * 2;
+ } while (addr < end);
+ write_c0_memorymapid(old_mmid);
+ instruction_hazard();
+ htw_start();
+ } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ struct flush_tlb_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
+
+ smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
+ local_flush_tlb_range(vma, start, end);
+ } else {
+ unsigned int cpu;
+ int exec = vma->vm_flags & VM_EXEC;
+
+ for_each_online_cpu(cpu) {
+ /*
+ * flush_cache_range() will only fully flush icache if
+ * the VMA is executable, otherwise we must invalidate
+ * ASID without it appearing to has_valid_asid() as if
+ * mm has been completely unused by that CPU.
+ */
+ if (cpu != smp_processor_id() && cpu_context(cpu, mm))
+ set_cpu_context(cpu, mm, !exec);
+ }
+ local_flush_tlb_range(vma, start, end);
+ }
+ preempt_enable();
+}
+
+static void flush_tlb_kernel_range_ipi(void *info)
+{
+ struct flush_tlb_data *fd = info;
+
+ local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ struct flush_tlb_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
+
+ on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
+}
+
+static void flush_tlb_page_ipi(void *info)
+{
+ struct flush_tlb_data *fd = info;
+
+ local_flush_tlb_page(fd->vma, fd->addr1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ u32 old_mmid;
+
+ preempt_disable();
+ if (cpu_has_mmid) {
+ htw_stop();
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
+ mtc0_tlbw_hazard();
+ ginvt_va_mmid(page);
+ sync_ginv();
+ write_c0_memorymapid(old_mmid);
+ instruction_hazard();
+ htw_start();
+ } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
+ (current->mm != vma->vm_mm)) {
+ struct flush_tlb_data fd = {
+ .vma = vma,
+ .addr1 = page,
+ };
+
+ smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
+ local_flush_tlb_page(vma, page);
+ } else {
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ /*
+ * flush_cache_page() only does partial flushes, so
+ * invalidate ASID without it appearing to
+ * has_valid_asid() as if mm has been completely unused
+ * by that CPU.
+ */
+ if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
+ set_cpu_context(cpu, vma->vm_mm, 1);
+ }
+ local_flush_tlb_page(vma, page);
+ }
+ preempt_enable();
+}
+
+static void flush_tlb_one_ipi(void *info)
+{
+ unsigned long vaddr = (unsigned long) info;
+
+ local_flush_tlb_one(vaddr);
+}
+
+void flush_tlb_one(unsigned long vaddr)
+{
+ smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
+}
+
+EXPORT_SYMBOL(flush_tlb_page);
+EXPORT_SYMBOL(flush_tlb_one);
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+
+static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);
+
+void tick_broadcast(const struct cpumask *mask)
+{
+ call_single_data_t *csd;
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ csd = &per_cpu(tick_broadcast_csd, cpu);
+ smp_call_function_single_async(cpu, csd);
+ }
+}
+
+static void tick_broadcast_callee(void *info)
+{
+ tick_receive_broadcast();
+}
+
+static int __init tick_broadcast_init(void)
+{
+ call_single_data_t *csd;
+ int cpu;
+
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ csd = &per_cpu(tick_broadcast_csd, cpu);
+ csd->func = tick_broadcast_callee;
+ }
+
+ return 0;
+}
+early_initcall(tick_broadcast_init);
+
+#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
diff --git a/arch/mips/kernel/spinlock_test.c b/arch/mips/kernel/spinlock_test.c
new file mode 100644
index 000000000..ab4e3e1b1
--- /dev/null
+++ b/arch/mips/kernel/spinlock_test.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/kthread.h>
+#include <linux/hrtimer.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <asm/debug.h>
+
+static int ss_get(void *data, u64 *val)
+{
+ ktime_t start, finish;
+ int loops;
+ int cont;
+ DEFINE_RAW_SPINLOCK(ss_spin);
+
+ loops = 1000000;
+ cont = 1;
+
+ start = ktime_get();
+
+ while (cont) {
+ raw_spin_lock(&ss_spin);
+ loops--;
+ if (loops == 0)
+ cont = 0;
+ raw_spin_unlock(&ss_spin);
+ }
+
+ finish = ktime_get();
+
+ *val = ktime_us_delta(finish, start);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n");
+
+
+
+struct spin_multi_state {
+ raw_spinlock_t lock;
+ atomic_t start_wait;
+ atomic_t enter_wait;
+ atomic_t exit_wait;
+ int loops;
+};
+
+struct spin_multi_per_thread {
+ struct spin_multi_state *state;
+ ktime_t start;
+};
+
+static int multi_other(void *data)
+{
+ int loops;
+ int cont;
+ struct spin_multi_per_thread *pt = data;
+ struct spin_multi_state *s = pt->state;
+
+ loops = s->loops;
+ cont = 1;
+
+ atomic_dec(&s->enter_wait);
+
+ while (atomic_read(&s->enter_wait))
+ ; /* spin */
+
+ pt->start = ktime_get();
+
+ atomic_dec(&s->start_wait);
+
+ while (atomic_read(&s->start_wait))
+ ; /* spin */
+
+ while (cont) {
+ raw_spin_lock(&s->lock);
+ loops--;
+ if (loops == 0)
+ cont = 0;
+ raw_spin_unlock(&s->lock);
+ }
+
+ atomic_dec(&s->exit_wait);
+ while (atomic_read(&s->exit_wait))
+ ; /* spin */
+ return 0;
+}
+
+static int multi_get(void *data, u64 *val)
+{
+ ktime_t finish;
+ struct spin_multi_state ms;
+ struct spin_multi_per_thread t1, t2;
+
+ ms.lock = __RAW_SPIN_LOCK_UNLOCKED("multi_get");
+ ms.loops = 1000000;
+
+ atomic_set(&ms.start_wait, 2);
+ atomic_set(&ms.enter_wait, 2);
+ atomic_set(&ms.exit_wait, 2);
+ t1.state = &ms;
+ t2.state = &ms;
+
+ kthread_run(multi_other, &t2, "multi_get");
+
+ multi_other(&t1);
+
+ finish = ktime_get();
+
+ *val = ktime_us_delta(finish, t1.start);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n");
+
+static int __init spinlock_test(void)
+{
+ debugfs_create_file("spin_single", S_IRUGO, mips_debugfs_dir, NULL,
+ &fops_ss);
+ debugfs_create_file("spin_multi", S_IRUGO, mips_debugfs_dir, NULL,
+ &fops_multi);
+ return 0;
+}
+device_initcall(spinlock_test);
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
new file mode 100644
index 000000000..d5d96214c
--- /dev/null
+++ b/arch/mips/kernel/spram.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MIPS SPRAM support
+ *
+ * Copyright (C) 2007, 2008 MIPS Technologies, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/stddef.h>
+
+#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+#include <asm/r4kcache.h>
+#include <asm/hazards.h>
+
+/*
+ * These definitions are correct for the 24K/34K/74K SPRAM sample
+ * implementation. The 4KS interpreted the tags differently...
+ */
+#define SPRAM_TAG0_ENABLE 0x00000080
+#define SPRAM_TAG0_PA_MASK 0xfffff000
+#define SPRAM_TAG1_SIZE_MASK 0xfffff000
+
+#define SPRAM_TAG_STRIDE 8
+
+#define ERRCTL_SPRAM (1 << 28)
+
+/* errctl access */
+#define read_c0_errctl(x) read_c0_ecc(x)
+#define write_c0_errctl(x) write_c0_ecc(x)
+
+/*
+ * Different semantics to the set_c0_* function built by __BUILD_SET_C0
+ */
+static unsigned int bis_c0_errctl(unsigned int set)
+{
+ unsigned int res;
+ res = read_c0_errctl();
+ write_c0_errctl(res | set);
+ return res;
+}
+
+static void ispram_store_tag(unsigned int offset, unsigned int data)
+{
+ unsigned int errctl;
+
+ /* enable SPRAM tag access */
+ errctl = bis_c0_errctl(ERRCTL_SPRAM);
+ ehb();
+
+ write_c0_taglo(data);
+ ehb();
+
+ cache_op(Index_Store_Tag_I, CKSEG0|offset);
+ ehb();
+
+ write_c0_errctl(errctl);
+ ehb();
+}
+
+
+static unsigned int ispram_load_tag(unsigned int offset)
+{
+ unsigned int data;
+ unsigned int errctl;
+
+ /* enable SPRAM tag access */
+ errctl = bis_c0_errctl(ERRCTL_SPRAM);
+ ehb();
+ cache_op(Index_Load_Tag_I, CKSEG0 | offset);
+ ehb();
+ data = read_c0_taglo();
+ ehb();
+ write_c0_errctl(errctl);
+ ehb();
+
+ return data;
+}
+
+static void dspram_store_tag(unsigned int offset, unsigned int data)
+{
+ unsigned int errctl;
+
+ /* enable SPRAM tag access */
+ errctl = bis_c0_errctl(ERRCTL_SPRAM);
+ ehb();
+ write_c0_dtaglo(data);
+ ehb();
+ cache_op(Index_Store_Tag_D, CKSEG0 | offset);
+ ehb();
+ write_c0_errctl(errctl);
+ ehb();
+}
+
+
+static unsigned int dspram_load_tag(unsigned int offset)
+{
+ unsigned int data;
+ unsigned int errctl;
+
+ errctl = bis_c0_errctl(ERRCTL_SPRAM);
+ ehb();
+ cache_op(Index_Load_Tag_D, CKSEG0 | offset);
+ ehb();
+ data = read_c0_dtaglo();
+ ehb();
+ write_c0_errctl(errctl);
+ ehb();
+
+ return data;
+}
+
+static void probe_spram(char *type,
+ unsigned int base,
+ unsigned int (*read)(unsigned int),
+ void (*write)(unsigned int, unsigned int))
+{
+ unsigned int firstsize = 0, lastsize = 0;
+ unsigned int firstpa = 0, lastpa = 0, pa = 0;
+ unsigned int offset = 0;
+ unsigned int size, tag0, tag1;
+ unsigned int enabled;
+ int i;
+
+ /*
+ * The limit is arbitrary but avoids the loop running away if
+ * the SPRAM tags are implemented differently
+ */
+
+ for (i = 0; i < 8; i++) {
+ tag0 = read(offset);
+ tag1 = read(offset+SPRAM_TAG_STRIDE);
+ pr_debug("DBG %s%d: tag0=%08x tag1=%08x\n",
+ type, i, tag0, tag1);
+
+ size = tag1 & SPRAM_TAG1_SIZE_MASK;
+
+ if (size == 0)
+ break;
+
+ if (i != 0) {
+ /* tags may repeat... */
+ if ((pa == firstpa && size == firstsize) ||
+ (pa == lastpa && size == lastsize))
+ break;
+ }
+
+ /* Align base with size */
+ base = (base + size - 1) & ~(size-1);
+
+ /* reprogram the base address base address and enable */
+ tag0 = (base & SPRAM_TAG0_PA_MASK) | SPRAM_TAG0_ENABLE;
+ write(offset, tag0);
+
+ base += size;
+
+ /* reread the tag */
+ tag0 = read(offset);
+ pa = tag0 & SPRAM_TAG0_PA_MASK;
+ enabled = tag0 & SPRAM_TAG0_ENABLE;
+
+ if (i == 0) {
+ firstpa = pa;
+ firstsize = size;
+ }
+
+ lastpa = pa;
+ lastsize = size;
+
+ if (strcmp(type, "DSPRAM") == 0) {
+ unsigned int *vp = (unsigned int *)(CKSEG1 | pa);
+ unsigned int v;
+#define TDAT 0x5a5aa5a5
+ vp[0] = TDAT;
+ vp[1] = ~TDAT;
+
+ mb();
+
+ v = vp[0];
+ if (v != TDAT)
+ printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
+ vp, TDAT, v);
+ v = vp[1];
+ if (v != ~TDAT)
+ printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
+ vp+1, ~TDAT, v);
+ }
+
+ pr_info("%s%d: PA=%08x,Size=%08x%s\n",
+ type, i, pa, size, enabled ? ",enabled" : "");
+ offset += 2 * SPRAM_TAG_STRIDE;
+ }
+}
+void spram_config(void)
+{
+ unsigned int config0;
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ case CPU_34K:
+ case CPU_74K:
+ case CPU_1004K:
+ case CPU_1074K:
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ case CPU_QEMU_GENERIC:
+ case CPU_I6400:
+ case CPU_P6600:
+ config0 = read_c0_config();
+ /* FIXME: addresses are Malta specific */
+ if (config0 & MIPS_CONF_ISP) {
+ probe_spram("ISPRAM", 0x1c000000,
+ &ispram_load_tag, &ispram_store_tag);
+ }
+ if (config0 & MIPS_CONF_DSP)
+ probe_spram("DSPRAM", 0x1c100000,
+ &dspram_load_tag, &dspram_store_tag);
+ }
+}
diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c
new file mode 100644
index 000000000..f2e720940
--- /dev/null
+++ b/arch/mips/kernel/stacktrace.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Stack trace management functions
+ *
+ * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ */
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/stacktrace.h>
+#include <linux/export.h>
+#include <asm/stacktrace.h>
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer:
+ */
+static void save_raw_context_stack(struct stack_trace *trace,
+ unsigned long reg29, int savesched)
+{
+ unsigned long *sp = (unsigned long *)reg29;
+ unsigned long addr;
+
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr) &&
+ (savesched || !in_sched_functions(addr))) {
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = addr;
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ }
+ }
+}
+
+static void save_context_stack(struct stack_trace *trace,
+ struct task_struct *tsk, struct pt_regs *regs, int savesched)
+{
+ unsigned long sp = regs->regs[29];
+#ifdef CONFIG_KALLSYMS
+ unsigned long ra = regs->regs[31];
+ unsigned long pc = regs->cp0_epc;
+
+ if (raw_show_trace || !__kernel_text_address(pc)) {
+ unsigned long stack_page =
+ (unsigned long)task_stack_page(tsk);
+ if (stack_page && sp >= stack_page &&
+ sp <= stack_page + THREAD_SIZE - 32)
+ save_raw_context_stack(trace, sp, savesched);
+ return;
+ }
+ do {
+ if (savesched || !in_sched_functions(pc)) {
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = pc;
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ }
+ pc = unwind_stack(tsk, &sp, pc, &ra);
+ } while (pc);
+#else
+ save_raw_context_stack(trace, sp, savesched);
+#endif
+}
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace(struct stack_trace *trace)
+{
+ save_stack_trace_tsk(current, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ struct pt_regs dummyregs;
+ struct pt_regs *regs = &dummyregs;
+
+ WARN_ON(trace->nr_entries || !trace->max_entries);
+
+ if (tsk != current) {
+ regs->regs[29] = tsk->thread.reg29;
+ regs->regs[31] = 0;
+ regs->cp0_epc = tsk->thread.reg31;
+ } else
+ prepare_frametrace(regs);
+ save_context_stack(trace, tsk, regs, tsk == current);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
new file mode 100644
index 000000000..abdd7aaa3
--- /dev/null
+++ b/arch/mips/kernel/sync-r4k.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Count register synchronisation.
+ *
+ * All CPUs will have their count registers synchronised to the CPU0 next time
+ * value. This can cause a small timewarp for CPU0. All other CPU's should
+ * not have done anything significant (but they may have had interrupts
+ * enabled briefly - prom_smp_finish() should not be responsible for enabling
+ * interrupts...)
+ */
+
+#include <linux/kernel.h>
+#include <linux/irqflags.h>
+#include <linux/cpumask.h>
+
+#include <asm/r4k-timer.h>
+#include <linux/atomic.h>
+#include <asm/barrier.h>
+#include <asm/mipsregs.h>
+
+static unsigned int initcount = 0;
+static atomic_t count_count_start = ATOMIC_INIT(0);
+static atomic_t count_count_stop = ATOMIC_INIT(0);
+
+#define COUNTON 100
+#define NR_LOOPS 3
+
+void synchronise_count_master(int cpu)
+{
+ int i;
+ unsigned long flags;
+
+ pr_info("Synchronize counters for CPU %u: ", cpu);
+
+ local_irq_save(flags);
+
+ /*
+ * We loop a few times to get a primed instruction cache,
+ * then the last pass is more or less synchronised and
+ * the master and slaves each set their cycle counters to a known
+ * value all at once. This reduces the chance of having random offsets
+ * between the processors, and guarantees that the maximum
+ * delay between the cycle counters is never bigger than
+ * the latency of information-passing (cachelines) between
+ * two CPUs.
+ */
+
+ for (i = 0; i < NR_LOOPS; i++) {
+ /* slaves loop on '!= 2' */
+ while (atomic_read(&count_count_start) != 1)
+ mb();
+ atomic_set(&count_count_stop, 0);
+ smp_wmb();
+
+ /* Let the slave writes its count register */
+ atomic_inc(&count_count_start);
+
+ /* Count will be initialised to current timer */
+ if (i == 1)
+ initcount = read_c0_count();
+
+ /*
+ * Everyone initialises count in the last loop:
+ */
+ if (i == NR_LOOPS-1)
+ write_c0_count(initcount);
+
+ /*
+ * Wait for slave to leave the synchronization point:
+ */
+ while (atomic_read(&count_count_stop) != 1)
+ mb();
+ atomic_set(&count_count_start, 0);
+ smp_wmb();
+ atomic_inc(&count_count_stop);
+ }
+ /* Arrange for an interrupt in a short while */
+ write_c0_compare(read_c0_count() + COUNTON);
+
+ local_irq_restore(flags);
+
+ /*
+ * i386 code reported the skew here, but the
+ * count registers were almost certainly out of sync
+ * so no point in alarming people
+ */
+ pr_cont("done.\n");
+}
+
+void synchronise_count_slave(int cpu)
+{
+ int i;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /*
+ * Not every cpu is online at the time this gets called,
+ * so we first wait for the master to say everyone is ready
+ */
+
+ for (i = 0; i < NR_LOOPS; i++) {
+ atomic_inc(&count_count_start);
+ while (atomic_read(&count_count_start) != 2)
+ mb();
+
+ /*
+ * Everyone initialises count in the last loop:
+ */
+ if (i == NR_LOOPS-1)
+ write_c0_count(initcount);
+
+ atomic_inc(&count_count_stop);
+ while (atomic_read(&count_count_stop) != 2)
+ mb();
+ }
+ /* Arrange for an interrupt in a short while */
+ write_c0_compare(read_c0_count() + COUNTON);
+
+ local_irq_restore(flags);
+}
+#undef NR_LOOPS
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
new file mode 100644
index 000000000..5512cd586
--- /dev/null
+++ b/arch/mips/kernel/syscall.c
@@ -0,0 +1,242 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/fs.h>
+#include <linux/smp.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/syscalls.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+#include <linux/unistd.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/compiler.h>
+#include <linux/ipc.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/elf.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/asm.h>
+#include <asm/asm-eva.h>
+#include <asm/branch.h>
+#include <asm/cachectl.h>
+#include <asm/cacheflush.h>
+#include <asm/asm-offsets.h>
+#include <asm/signal.h>
+#include <asm/sim.h>
+#include <asm/shmparam.h>
+#include <asm/sync.h>
+#include <asm/sysmips.h>
+#include <asm/switch_to.h>
+
+/*
+ * For historic reasons the pipe(2) syscall on MIPS has an unusual calling
+ * convention. It returns results in registers $v0 / $v1 which means there
+ * is no need for it to do verify the validity of a userspace pointer
+ * argument. Historically that used to be expensive in Linux. These days
+ * the performance advantage is negligible.
+ */
+asmlinkage int sysm_pipe(void)
+{
+ int fd[2];
+ int error = do_pipe_flags(fd, 0);
+ if (error)
+ return error;
+ current_pt_regs()->regs[3] = fd[1];
+ return fd[0];
+}
+
+SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags, unsigned long,
+ fd, off_t, offset)
+{
+ if (offset & ~PAGE_MASK)
+ return -EINVAL;
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd,
+ offset >> PAGE_SHIFT);
+}
+
+SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags, unsigned long, fd,
+ unsigned long, pgoff)
+{
+ if (pgoff & (~PAGE_MASK >> 12))
+ return -EINVAL;
+
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd,
+ pgoff >> (PAGE_SHIFT - 12));
+}
+
+save_static_function(sys_fork);
+save_static_function(sys_clone);
+save_static_function(sys_clone3);
+
+SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
+{
+ struct thread_info *ti = task_thread_info(current);
+
+ ti->tp_value = addr;
+ if (cpu_has_userlocal)
+ write_c0_userlocal(addr);
+
+ return 0;
+}
+
+static inline int mips_atomic_set(unsigned long addr, unsigned long new)
+{
+ unsigned long old, tmp;
+ struct pt_regs *regs;
+ unsigned int err;
+
+ if (unlikely(addr & 3))
+ return -EINVAL;
+
+ if (unlikely(!access_ok((const void __user *)addr, 4)))
+ return -EINVAL;
+
+ if (cpu_has_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) {
+ __asm__ __volatile__ (
+ " .set push \n"
+ " .set arch=r4000 \n"
+ " li %[err], 0 \n"
+ "1: ll %[old], (%[addr]) \n"
+ " move %[tmp], %[new] \n"
+ "2: sc %[tmp], (%[addr]) \n"
+ " beqzl %[tmp], 1b \n"
+ "3: \n"
+ " .insn \n"
+ " .section .fixup,\"ax\" \n"
+ "4: li %[err], %[efault] \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " "STR(PTR)" 1b, 4b \n"
+ " "STR(PTR)" 2b, 4b \n"
+ " .previous \n"
+ " .set pop \n"
+ : [old] "=&r" (old),
+ [err] "=&r" (err),
+ [tmp] "=&r" (tmp)
+ : [addr] "r" (addr),
+ [new] "r" (new),
+ [efault] "i" (-EFAULT)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ __asm__ __volatile__ (
+ " .set push \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
+ " li %[err], 0 \n"
+ "1: \n"
+ " " __SYNC(full, loongson3_war) " \n"
+ user_ll("%[old]", "(%[addr])")
+ " move %[tmp], %[new] \n"
+ "2: \n"
+ user_sc("%[tmp]", "(%[addr])")
+ " beqz %[tmp], 1b \n"
+ "3: \n"
+ " .insn \n"
+ " .section .fixup,\"ax\" \n"
+ "5: li %[err], %[efault] \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " "STR(PTR)" 1b, 5b \n"
+ " "STR(PTR)" 2b, 5b \n"
+ " .previous \n"
+ " .set pop \n"
+ : [old] "=&r" (old),
+ [err] "=&r" (err),
+ [tmp] "=&r" (tmp)
+ : [addr] "r" (addr),
+ [new] "r" (new),
+ [efault] "i" (-EFAULT)
+ : "memory");
+ } else {
+ do {
+ preempt_disable();
+ ll_bit = 1;
+ ll_task = current;
+ preempt_enable();
+
+ err = __get_user(old, (unsigned int *) addr);
+ err |= __put_user(new, (unsigned int *) addr);
+ if (err)
+ break;
+ rmb();
+ } while (!ll_bit);
+ }
+
+ if (unlikely(err))
+ return err;
+
+ regs = current_pt_regs();
+ regs->regs[2] = old;
+ regs->regs[7] = 0; /* No error */
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ " move $29, %0 \n"
+ " j syscall_exit \n"
+ : /* no outputs */
+ : "r" (regs));
+
+ /* unreached. Honestly. */
+ unreachable();
+}
+
+/*
+ * mips_atomic_set() normally returns directly via syscall_exit potentially
+ * clobbering static registers, so be sure to preserve them.
+ */
+save_static_function(sys_sysmips);
+
+SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2)
+{
+ switch (cmd) {
+ case MIPS_ATOMIC_SET:
+ return mips_atomic_set(arg1, arg2);
+
+ case MIPS_FIXADE:
+ if (arg1 & ~3)
+ return -EINVAL;
+
+ if (arg1 & 1)
+ set_thread_flag(TIF_FIXADE);
+ else
+ clear_thread_flag(TIF_FIXADE);
+ if (arg1 & 2)
+ set_thread_flag(TIF_LOGADE);
+ else
+ clear_thread_flag(TIF_LOGADE);
+
+ return 0;
+
+ case FLUSH_CACHE:
+ __flush_cache_all();
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * No implemented yet ...
+ */
+SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op)
+{
+ return -ENOSYS;
+}
diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile
new file mode 100644
index 000000000..6efb2f688
--- /dev/null
+++ b/arch/mips/kernel/syscalls/Makefile
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: GPL-2.0
+kapi := arch/$(SRCARCH)/include/generated/asm
+uapi := arch/$(SRCARCH)/include/generated/uapi/asm
+
+_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
+ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+
+syscalln32 := $(srctree)/$(src)/syscall_n32.tbl
+syscalln64 := $(srctree)/$(src)/syscall_n64.tbl
+syscallo32 := $(srctree)/$(src)/syscall_o32.tbl
+syshdr := $(srctree)/$(src)/syscallhdr.sh
+sysnr := $(srctree)/$(src)/syscallnr.sh
+systbl := $(srctree)/$(src)/syscalltbl.sh
+
+quiet_cmd_syshdr = SYSHDR $@
+ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
+ '$(syshdr_abis_$(basetarget))' \
+ '$(syshdr_pfx_$(basetarget))' \
+ '$(syshdr_offset_$(basetarget))'
+
+quiet_cmd_sysnr = SYSNR $@
+ cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \
+ '$(sysnr_abis_$(basetarget))' \
+ '$(sysnr_pfx_$(basetarget))' \
+ '$(sysnr_offset_$(basetarget))'
+
+quiet_cmd_systbl = SYSTBL $@
+ cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
+ '$(systbl_abis_$(basetarget))' \
+ '$(systbl_abi_$(basetarget))' \
+ '$(systbl_offset_$(basetarget))'
+
+syshdr_offset_unistd_n32 := __NR_Linux
+$(uapi)/unistd_n32.h: $(syscalln32) $(syshdr)
+ $(call if_changed,syshdr)
+
+syshdr_offset_unistd_n64 := __NR_Linux
+$(uapi)/unistd_n64.h: $(syscalln64) $(syshdr)
+ $(call if_changed,syshdr)
+
+syshdr_offset_unistd_o32 := __NR_Linux
+$(uapi)/unistd_o32.h: $(syscallo32) $(syshdr)
+ $(call if_changed,syshdr)
+
+sysnr_pfx_unistd_nr_n32 := N32
+sysnr_offset_unistd_nr_n32 := 6000
+$(uapi)/unistd_nr_n32.h: $(syscalln32) $(sysnr)
+ $(call if_changed,sysnr)
+
+sysnr_pfx_unistd_nr_n64 := 64
+sysnr_offset_unistd_nr_n64 := 5000
+$(uapi)/unistd_nr_n64.h: $(syscalln64) $(sysnr)
+ $(call if_changed,sysnr)
+
+sysnr_pfx_unistd_nr_o32 := O32
+sysnr_offset_unistd_nr_o32 := 4000
+$(uapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr)
+ $(call if_changed,sysnr)
+
+systbl_abi_syscall_table_32_o32 := 32_o32
+systbl_offset_syscall_table_32_o32 := 4000
+$(kapi)/syscall_table_32_o32.h: $(syscallo32) $(systbl)
+ $(call if_changed,systbl)
+
+systbl_abi_syscall_table_64_n32 := 64_n32
+systbl_offset_syscall_table_64_n32 := 6000
+$(kapi)/syscall_table_64_n32.h: $(syscalln32) $(systbl)
+ $(call if_changed,systbl)
+
+systbl_abi_syscall_table_64_n64 := 64_n64
+systbl_offset_syscall_table_64_n64 := 5000
+$(kapi)/syscall_table_64_n64.h: $(syscalln64) $(systbl)
+ $(call if_changed,systbl)
+
+systbl_abi_syscall_table_64_o32 := 64_o32
+systbl_offset_syscall_table_64_o32 := 4000
+$(kapi)/syscall_table_64_o32.h: $(syscallo32) $(systbl)
+ $(call if_changed,systbl)
+
+uapisyshdr-y += unistd_n32.h \
+ unistd_n64.h \
+ unistd_o32.h \
+ unistd_nr_n32.h \
+ unistd_nr_n64.h \
+ unistd_nr_o32.h
+kapisyshdr-y += syscall_table_32_o32.h \
+ syscall_table_64_n32.h \
+ syscall_table_64_n64.h \
+ syscall_table_64_o32.h
+
+targets += $(uapisyshdr-y) $(kapisyshdr-y)
+
+PHONY += all
+all: $(addprefix $(uapi)/,$(uapisyshdr-y))
+all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+ @:
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
new file mode 100644
index 000000000..32817c954
--- /dev/null
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -0,0 +1,381 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for mips
+#
+# The format is:
+# <number> <abi> <name> <entry point> <compat entry point>
+#
+# The <abi> is always "n32" for this file.
+#
+0 n32 read sys_read
+1 n32 write sys_write
+2 n32 open sys_open
+3 n32 close sys_close
+4 n32 stat sys_newstat
+5 n32 fstat sys_newfstat
+6 n32 lstat sys_newlstat
+7 n32 poll sys_poll
+8 n32 lseek sys_lseek
+9 n32 mmap sys_mips_mmap
+10 n32 mprotect sys_mprotect
+11 n32 munmap sys_munmap
+12 n32 brk sys_brk
+13 n32 rt_sigaction compat_sys_rt_sigaction
+14 n32 rt_sigprocmask compat_sys_rt_sigprocmask
+15 n32 ioctl compat_sys_ioctl
+16 n32 pread64 sys_pread64
+17 n32 pwrite64 sys_pwrite64
+18 n32 readv sys_readv
+19 n32 writev sys_writev
+20 n32 access sys_access
+21 n32 pipe sysm_pipe
+22 n32 _newselect compat_sys_select
+23 n32 sched_yield sys_sched_yield
+24 n32 mremap sys_mremap
+25 n32 msync sys_msync
+26 n32 mincore sys_mincore
+27 n32 madvise sys_madvise
+28 n32 shmget sys_shmget
+29 n32 shmat sys_shmat
+30 n32 shmctl compat_sys_old_shmctl
+31 n32 dup sys_dup
+32 n32 dup2 sys_dup2
+33 n32 pause sys_pause
+34 n32 nanosleep sys_nanosleep_time32
+35 n32 getitimer compat_sys_getitimer
+36 n32 setitimer compat_sys_setitimer
+37 n32 alarm sys_alarm
+38 n32 getpid sys_getpid
+39 n32 sendfile compat_sys_sendfile
+40 n32 socket sys_socket
+41 n32 connect sys_connect
+42 n32 accept sys_accept
+43 n32 sendto sys_sendto
+44 n32 recvfrom compat_sys_recvfrom
+45 n32 sendmsg compat_sys_sendmsg
+46 n32 recvmsg compat_sys_recvmsg
+47 n32 shutdown sys_shutdown
+48 n32 bind sys_bind
+49 n32 listen sys_listen
+50 n32 getsockname sys_getsockname
+51 n32 getpeername sys_getpeername
+52 n32 socketpair sys_socketpair
+53 n32 setsockopt sys_setsockopt
+54 n32 getsockopt sys_getsockopt
+55 n32 clone __sys_clone
+56 n32 fork __sys_fork
+57 n32 execve compat_sys_execve
+58 n32 exit sys_exit
+59 n32 wait4 compat_sys_wait4
+60 n32 kill sys_kill
+61 n32 uname sys_newuname
+62 n32 semget sys_semget
+63 n32 semop sys_semop
+64 n32 semctl compat_sys_old_semctl
+65 n32 shmdt sys_shmdt
+66 n32 msgget sys_msgget
+67 n32 msgsnd compat_sys_msgsnd
+68 n32 msgrcv compat_sys_msgrcv
+69 n32 msgctl compat_sys_old_msgctl
+70 n32 fcntl compat_sys_fcntl
+71 n32 flock sys_flock
+72 n32 fsync sys_fsync
+73 n32 fdatasync sys_fdatasync
+74 n32 truncate sys_truncate
+75 n32 ftruncate sys_ftruncate
+76 n32 getdents compat_sys_getdents
+77 n32 getcwd sys_getcwd
+78 n32 chdir sys_chdir
+79 n32 fchdir sys_fchdir
+80 n32 rename sys_rename
+81 n32 mkdir sys_mkdir
+82 n32 rmdir sys_rmdir
+83 n32 creat sys_creat
+84 n32 link sys_link
+85 n32 unlink sys_unlink
+86 n32 symlink sys_symlink
+87 n32 readlink sys_readlink
+88 n32 chmod sys_chmod
+89 n32 fchmod sys_fchmod
+90 n32 chown sys_chown
+91 n32 fchown sys_fchown
+92 n32 lchown sys_lchown
+93 n32 umask sys_umask
+94 n32 gettimeofday compat_sys_gettimeofday
+95 n32 getrlimit compat_sys_getrlimit
+96 n32 getrusage compat_sys_getrusage
+97 n32 sysinfo compat_sys_sysinfo
+98 n32 times compat_sys_times
+99 n32 ptrace compat_sys_ptrace
+100 n32 getuid sys_getuid
+101 n32 syslog sys_syslog
+102 n32 getgid sys_getgid
+103 n32 setuid sys_setuid
+104 n32 setgid sys_setgid
+105 n32 geteuid sys_geteuid
+106 n32 getegid sys_getegid
+107 n32 setpgid sys_setpgid
+108 n32 getppid sys_getppid
+109 n32 getpgrp sys_getpgrp
+110 n32 setsid sys_setsid
+111 n32 setreuid sys_setreuid
+112 n32 setregid sys_setregid
+113 n32 getgroups sys_getgroups
+114 n32 setgroups sys_setgroups
+115 n32 setresuid sys_setresuid
+116 n32 getresuid sys_getresuid
+117 n32 setresgid sys_setresgid
+118 n32 getresgid sys_getresgid
+119 n32 getpgid sys_getpgid
+120 n32 setfsuid sys_setfsuid
+121 n32 setfsgid sys_setfsgid
+122 n32 getsid sys_getsid
+123 n32 capget sys_capget
+124 n32 capset sys_capset
+125 n32 rt_sigpending compat_sys_rt_sigpending
+126 n32 rt_sigtimedwait compat_sys_rt_sigtimedwait_time32
+127 n32 rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+128 n32 rt_sigsuspend compat_sys_rt_sigsuspend
+129 n32 sigaltstack compat_sys_sigaltstack
+130 n32 utime sys_utime32
+131 n32 mknod sys_mknod
+132 n32 personality sys_32_personality
+133 n32 ustat compat_sys_ustat
+134 n32 statfs compat_sys_statfs
+135 n32 fstatfs compat_sys_fstatfs
+136 n32 sysfs sys_sysfs
+137 n32 getpriority sys_getpriority
+138 n32 setpriority sys_setpriority
+139 n32 sched_setparam sys_sched_setparam
+140 n32 sched_getparam sys_sched_getparam
+141 n32 sched_setscheduler sys_sched_setscheduler
+142 n32 sched_getscheduler sys_sched_getscheduler
+143 n32 sched_get_priority_max sys_sched_get_priority_max
+144 n32 sched_get_priority_min sys_sched_get_priority_min
+145 n32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+146 n32 mlock sys_mlock
+147 n32 munlock sys_munlock
+148 n32 mlockall sys_mlockall
+149 n32 munlockall sys_munlockall
+150 n32 vhangup sys_vhangup
+151 n32 pivot_root sys_pivot_root
+152 n32 _sysctl sys_ni_syscall
+153 n32 prctl sys_prctl
+154 n32 adjtimex sys_adjtimex_time32
+155 n32 setrlimit compat_sys_setrlimit
+156 n32 chroot sys_chroot
+157 n32 sync sys_sync
+158 n32 acct sys_acct
+159 n32 settimeofday compat_sys_settimeofday
+160 n32 mount sys_mount
+161 n32 umount2 sys_umount
+162 n32 swapon sys_swapon
+163 n32 swapoff sys_swapoff
+164 n32 reboot sys_reboot
+165 n32 sethostname sys_sethostname
+166 n32 setdomainname sys_setdomainname
+167 n32 create_module sys_ni_syscall
+168 n32 init_module sys_init_module
+169 n32 delete_module sys_delete_module
+170 n32 get_kernel_syms sys_ni_syscall
+171 n32 query_module sys_ni_syscall
+172 n32 quotactl sys_quotactl
+173 n32 nfsservctl sys_ni_syscall
+174 n32 getpmsg sys_ni_syscall
+175 n32 putpmsg sys_ni_syscall
+176 n32 afs_syscall sys_ni_syscall
+# 177 reserved for security
+177 n32 reserved177 sys_ni_syscall
+178 n32 gettid sys_gettid
+179 n32 readahead sys_readahead
+180 n32 setxattr sys_setxattr
+181 n32 lsetxattr sys_lsetxattr
+182 n32 fsetxattr sys_fsetxattr
+183 n32 getxattr sys_getxattr
+184 n32 lgetxattr sys_lgetxattr
+185 n32 fgetxattr sys_fgetxattr
+186 n32 listxattr sys_listxattr
+187 n32 llistxattr sys_llistxattr
+188 n32 flistxattr sys_flistxattr
+189 n32 removexattr sys_removexattr
+190 n32 lremovexattr sys_lremovexattr
+191 n32 fremovexattr sys_fremovexattr
+192 n32 tkill sys_tkill
+193 n32 reserved193 sys_ni_syscall
+194 n32 futex sys_futex_time32
+195 n32 sched_setaffinity compat_sys_sched_setaffinity
+196 n32 sched_getaffinity compat_sys_sched_getaffinity
+197 n32 cacheflush sys_cacheflush
+198 n32 cachectl sys_cachectl
+199 n32 sysmips __sys_sysmips
+200 n32 io_setup compat_sys_io_setup
+201 n32 io_destroy sys_io_destroy
+202 n32 io_getevents sys_io_getevents_time32
+203 n32 io_submit compat_sys_io_submit
+204 n32 io_cancel sys_io_cancel
+205 n32 exit_group sys_exit_group
+206 n32 lookup_dcookie sys_lookup_dcookie
+207 n32 epoll_create sys_epoll_create
+208 n32 epoll_ctl sys_epoll_ctl
+209 n32 epoll_wait sys_epoll_wait
+210 n32 remap_file_pages sys_remap_file_pages
+211 n32 rt_sigreturn sysn32_rt_sigreturn
+212 n32 fcntl64 compat_sys_fcntl64
+213 n32 set_tid_address sys_set_tid_address
+214 n32 restart_syscall sys_restart_syscall
+215 n32 semtimedop sys_semtimedop_time32
+216 n32 fadvise64 sys_fadvise64_64
+217 n32 statfs64 compat_sys_statfs64
+218 n32 fstatfs64 compat_sys_fstatfs64
+219 n32 sendfile64 sys_sendfile64
+220 n32 timer_create compat_sys_timer_create
+221 n32 timer_settime sys_timer_settime32
+222 n32 timer_gettime sys_timer_gettime32
+223 n32 timer_getoverrun sys_timer_getoverrun
+224 n32 timer_delete sys_timer_delete
+225 n32 clock_settime sys_clock_settime32
+226 n32 clock_gettime sys_clock_gettime32
+227 n32 clock_getres sys_clock_getres_time32
+228 n32 clock_nanosleep sys_clock_nanosleep_time32
+229 n32 tgkill sys_tgkill
+230 n32 utimes sys_utimes_time32
+231 n32 mbind compat_sys_mbind
+232 n32 get_mempolicy compat_sys_get_mempolicy
+233 n32 set_mempolicy compat_sys_set_mempolicy
+234 n32 mq_open compat_sys_mq_open
+235 n32 mq_unlink sys_mq_unlink
+236 n32 mq_timedsend sys_mq_timedsend_time32
+237 n32 mq_timedreceive sys_mq_timedreceive_time32
+238 n32 mq_notify compat_sys_mq_notify
+239 n32 mq_getsetattr compat_sys_mq_getsetattr
+240 n32 vserver sys_ni_syscall
+241 n32 waitid compat_sys_waitid
+# 242 was sys_setaltroot
+243 n32 add_key sys_add_key
+244 n32 request_key sys_request_key
+245 n32 keyctl compat_sys_keyctl
+246 n32 set_thread_area sys_set_thread_area
+247 n32 inotify_init sys_inotify_init
+248 n32 inotify_add_watch sys_inotify_add_watch
+249 n32 inotify_rm_watch sys_inotify_rm_watch
+250 n32 migrate_pages compat_sys_migrate_pages
+251 n32 openat sys_openat
+252 n32 mkdirat sys_mkdirat
+253 n32 mknodat sys_mknodat
+254 n32 fchownat sys_fchownat
+255 n32 futimesat sys_futimesat_time32
+256 n32 newfstatat sys_newfstatat
+257 n32 unlinkat sys_unlinkat
+258 n32 renameat sys_renameat
+259 n32 linkat sys_linkat
+260 n32 symlinkat sys_symlinkat
+261 n32 readlinkat sys_readlinkat
+262 n32 fchmodat sys_fchmodat
+263 n32 faccessat sys_faccessat
+264 n32 pselect6 compat_sys_pselect6_time32
+265 n32 ppoll compat_sys_ppoll_time32
+266 n32 unshare sys_unshare
+267 n32 splice sys_splice
+268 n32 sync_file_range sys_sync_file_range
+269 n32 tee sys_tee
+270 n32 vmsplice sys_vmsplice
+271 n32 move_pages compat_sys_move_pages
+272 n32 set_robust_list compat_sys_set_robust_list
+273 n32 get_robust_list compat_sys_get_robust_list
+274 n32 kexec_load compat_sys_kexec_load
+275 n32 getcpu sys_getcpu
+276 n32 epoll_pwait compat_sys_epoll_pwait
+277 n32 ioprio_set sys_ioprio_set
+278 n32 ioprio_get sys_ioprio_get
+279 n32 utimensat sys_utimensat_time32
+280 n32 signalfd compat_sys_signalfd
+281 n32 timerfd sys_ni_syscall
+282 n32 eventfd sys_eventfd
+283 n32 fallocate sys_fallocate
+284 n32 timerfd_create sys_timerfd_create
+285 n32 timerfd_gettime sys_timerfd_gettime32
+286 n32 timerfd_settime sys_timerfd_settime32
+287 n32 signalfd4 compat_sys_signalfd4
+288 n32 eventfd2 sys_eventfd2
+289 n32 epoll_create1 sys_epoll_create1
+290 n32 dup3 sys_dup3
+291 n32 pipe2 sys_pipe2
+292 n32 inotify_init1 sys_inotify_init1
+293 n32 preadv compat_sys_preadv
+294 n32 pwritev compat_sys_pwritev
+295 n32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+296 n32 perf_event_open sys_perf_event_open
+297 n32 accept4 sys_accept4
+298 n32 recvmmsg compat_sys_recvmmsg_time32
+299 n32 getdents64 sys_getdents64
+300 n32 fanotify_init sys_fanotify_init
+301 n32 fanotify_mark sys_fanotify_mark
+302 n32 prlimit64 sys_prlimit64
+303 n32 name_to_handle_at sys_name_to_handle_at
+304 n32 open_by_handle_at sys_open_by_handle_at
+305 n32 clock_adjtime sys_clock_adjtime32
+306 n32 syncfs sys_syncfs
+307 n32 sendmmsg compat_sys_sendmmsg
+308 n32 setns sys_setns
+309 n32 process_vm_readv sys_process_vm_readv
+310 n32 process_vm_writev sys_process_vm_writev
+311 n32 kcmp sys_kcmp
+312 n32 finit_module sys_finit_module
+313 n32 sched_setattr sys_sched_setattr
+314 n32 sched_getattr sys_sched_getattr
+315 n32 renameat2 sys_renameat2
+316 n32 seccomp sys_seccomp
+317 n32 getrandom sys_getrandom
+318 n32 memfd_create sys_memfd_create
+319 n32 bpf sys_bpf
+320 n32 execveat compat_sys_execveat
+321 n32 userfaultfd sys_userfaultfd
+322 n32 membarrier sys_membarrier
+323 n32 mlock2 sys_mlock2
+324 n32 copy_file_range sys_copy_file_range
+325 n32 preadv2 compat_sys_preadv2
+326 n32 pwritev2 compat_sys_pwritev2
+327 n32 pkey_mprotect sys_pkey_mprotect
+328 n32 pkey_alloc sys_pkey_alloc
+329 n32 pkey_free sys_pkey_free
+330 n32 statx sys_statx
+331 n32 rseq sys_rseq
+332 n32 io_pgetevents compat_sys_io_pgetevents
+# 333 through 402 are unassigned to sync up with generic numbers
+403 n32 clock_gettime64 sys_clock_gettime
+404 n32 clock_settime64 sys_clock_settime
+405 n32 clock_adjtime64 sys_clock_adjtime
+406 n32 clock_getres_time64 sys_clock_getres
+407 n32 clock_nanosleep_time64 sys_clock_nanosleep
+408 n32 timer_gettime64 sys_timer_gettime
+409 n32 timer_settime64 sys_timer_settime
+410 n32 timerfd_gettime64 sys_timerfd_gettime
+411 n32 timerfd_settime64 sys_timerfd_settime
+412 n32 utimensat_time64 sys_utimensat
+413 n32 pselect6_time64 compat_sys_pselect6_time64
+414 n32 ppoll_time64 compat_sys_ppoll_time64
+416 n32 io_pgetevents_time64 sys_io_pgetevents
+417 n32 recvmmsg_time64 compat_sys_recvmmsg_time64
+418 n32 mq_timedsend_time64 sys_mq_timedsend
+419 n32 mq_timedreceive_time64 sys_mq_timedreceive
+420 n32 semtimedop_time64 sys_semtimedop
+421 n32 rt_sigtimedwait_time64 compat_sys_rt_sigtimedwait_time64
+422 n32 futex_time64 sys_futex
+423 n32 sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 n32 pidfd_send_signal sys_pidfd_send_signal
+425 n32 io_uring_setup sys_io_uring_setup
+426 n32 io_uring_enter sys_io_uring_enter
+427 n32 io_uring_register sys_io_uring_register
+428 n32 open_tree sys_open_tree
+429 n32 move_mount sys_move_mount
+430 n32 fsopen sys_fsopen
+431 n32 fsconfig sys_fsconfig
+432 n32 fsmount sys_fsmount
+433 n32 fspick sys_fspick
+434 n32 pidfd_open sys_pidfd_open
+435 n32 clone3 __sys_clone3
+436 n32 close_range sys_close_range
+437 n32 openat2 sys_openat2
+438 n32 pidfd_getfd sys_pidfd_getfd
+439 n32 faccessat2 sys_faccessat2
+440 n32 process_madvise sys_process_madvise
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
new file mode 100644
index 000000000..9e4ea3c31
--- /dev/null
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -0,0 +1,357 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for mips
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# The <abi> is always "n64" for this file.
+#
+0 n64 read sys_read
+1 n64 write sys_write
+2 n64 open sys_open
+3 n64 close sys_close
+4 n64 stat sys_newstat
+5 n64 fstat sys_newfstat
+6 n64 lstat sys_newlstat
+7 n64 poll sys_poll
+8 n64 lseek sys_lseek
+9 n64 mmap sys_mips_mmap
+10 n64 mprotect sys_mprotect
+11 n64 munmap sys_munmap
+12 n64 brk sys_brk
+13 n64 rt_sigaction sys_rt_sigaction
+14 n64 rt_sigprocmask sys_rt_sigprocmask
+15 n64 ioctl sys_ioctl
+16 n64 pread64 sys_pread64
+17 n64 pwrite64 sys_pwrite64
+18 n64 readv sys_readv
+19 n64 writev sys_writev
+20 n64 access sys_access
+21 n64 pipe sysm_pipe
+22 n64 _newselect sys_select
+23 n64 sched_yield sys_sched_yield
+24 n64 mremap sys_mremap
+25 n64 msync sys_msync
+26 n64 mincore sys_mincore
+27 n64 madvise sys_madvise
+28 n64 shmget sys_shmget
+29 n64 shmat sys_shmat
+30 n64 shmctl sys_old_shmctl
+31 n64 dup sys_dup
+32 n64 dup2 sys_dup2
+33 n64 pause sys_pause
+34 n64 nanosleep sys_nanosleep
+35 n64 getitimer sys_getitimer
+36 n64 setitimer sys_setitimer
+37 n64 alarm sys_alarm
+38 n64 getpid sys_getpid
+39 n64 sendfile sys_sendfile64
+40 n64 socket sys_socket
+41 n64 connect sys_connect
+42 n64 accept sys_accept
+43 n64 sendto sys_sendto
+44 n64 recvfrom sys_recvfrom
+45 n64 sendmsg sys_sendmsg
+46 n64 recvmsg sys_recvmsg
+47 n64 shutdown sys_shutdown
+48 n64 bind sys_bind
+49 n64 listen sys_listen
+50 n64 getsockname sys_getsockname
+51 n64 getpeername sys_getpeername
+52 n64 socketpair sys_socketpair
+53 n64 setsockopt sys_setsockopt
+54 n64 getsockopt sys_getsockopt
+55 n64 clone __sys_clone
+56 n64 fork __sys_fork
+57 n64 execve sys_execve
+58 n64 exit sys_exit
+59 n64 wait4 sys_wait4
+60 n64 kill sys_kill
+61 n64 uname sys_newuname
+62 n64 semget sys_semget
+63 n64 semop sys_semop
+64 n64 semctl sys_old_semctl
+65 n64 shmdt sys_shmdt
+66 n64 msgget sys_msgget
+67 n64 msgsnd sys_msgsnd
+68 n64 msgrcv sys_msgrcv
+69 n64 msgctl sys_old_msgctl
+70 n64 fcntl sys_fcntl
+71 n64 flock sys_flock
+72 n64 fsync sys_fsync
+73 n64 fdatasync sys_fdatasync
+74 n64 truncate sys_truncate
+75 n64 ftruncate sys_ftruncate
+76 n64 getdents sys_getdents
+77 n64 getcwd sys_getcwd
+78 n64 chdir sys_chdir
+79 n64 fchdir sys_fchdir
+80 n64 rename sys_rename
+81 n64 mkdir sys_mkdir
+82 n64 rmdir sys_rmdir
+83 n64 creat sys_creat
+84 n64 link sys_link
+85 n64 unlink sys_unlink
+86 n64 symlink sys_symlink
+87 n64 readlink sys_readlink
+88 n64 chmod sys_chmod
+89 n64 fchmod sys_fchmod
+90 n64 chown sys_chown
+91 n64 fchown sys_fchown
+92 n64 lchown sys_lchown
+93 n64 umask sys_umask
+94 n64 gettimeofday sys_gettimeofday
+95 n64 getrlimit sys_getrlimit
+96 n64 getrusage sys_getrusage
+97 n64 sysinfo sys_sysinfo
+98 n64 times sys_times
+99 n64 ptrace sys_ptrace
+100 n64 getuid sys_getuid
+101 n64 syslog sys_syslog
+102 n64 getgid sys_getgid
+103 n64 setuid sys_setuid
+104 n64 setgid sys_setgid
+105 n64 geteuid sys_geteuid
+106 n64 getegid sys_getegid
+107 n64 setpgid sys_setpgid
+108 n64 getppid sys_getppid
+109 n64 getpgrp sys_getpgrp
+110 n64 setsid sys_setsid
+111 n64 setreuid sys_setreuid
+112 n64 setregid sys_setregid
+113 n64 getgroups sys_getgroups
+114 n64 setgroups sys_setgroups
+115 n64 setresuid sys_setresuid
+116 n64 getresuid sys_getresuid
+117 n64 setresgid sys_setresgid
+118 n64 getresgid sys_getresgid
+119 n64 getpgid sys_getpgid
+120 n64 setfsuid sys_setfsuid
+121 n64 setfsgid sys_setfsgid
+122 n64 getsid sys_getsid
+123 n64 capget sys_capget
+124 n64 capset sys_capset
+125 n64 rt_sigpending sys_rt_sigpending
+126 n64 rt_sigtimedwait sys_rt_sigtimedwait
+127 n64 rt_sigqueueinfo sys_rt_sigqueueinfo
+128 n64 rt_sigsuspend sys_rt_sigsuspend
+129 n64 sigaltstack sys_sigaltstack
+130 n64 utime sys_utime
+131 n64 mknod sys_mknod
+132 n64 personality sys_personality
+133 n64 ustat sys_ustat
+134 n64 statfs sys_statfs
+135 n64 fstatfs sys_fstatfs
+136 n64 sysfs sys_sysfs
+137 n64 getpriority sys_getpriority
+138 n64 setpriority sys_setpriority
+139 n64 sched_setparam sys_sched_setparam
+140 n64 sched_getparam sys_sched_getparam
+141 n64 sched_setscheduler sys_sched_setscheduler
+142 n64 sched_getscheduler sys_sched_getscheduler
+143 n64 sched_get_priority_max sys_sched_get_priority_max
+144 n64 sched_get_priority_min sys_sched_get_priority_min
+145 n64 sched_rr_get_interval sys_sched_rr_get_interval
+146 n64 mlock sys_mlock
+147 n64 munlock sys_munlock
+148 n64 mlockall sys_mlockall
+149 n64 munlockall sys_munlockall
+150 n64 vhangup sys_vhangup
+151 n64 pivot_root sys_pivot_root
+152 n64 _sysctl sys_ni_syscall
+153 n64 prctl sys_prctl
+154 n64 adjtimex sys_adjtimex
+155 n64 setrlimit sys_setrlimit
+156 n64 chroot sys_chroot
+157 n64 sync sys_sync
+158 n64 acct sys_acct
+159 n64 settimeofday sys_settimeofday
+160 n64 mount sys_mount
+161 n64 umount2 sys_umount
+162 n64 swapon sys_swapon
+163 n64 swapoff sys_swapoff
+164 n64 reboot sys_reboot
+165 n64 sethostname sys_sethostname
+166 n64 setdomainname sys_setdomainname
+167 n64 create_module sys_ni_syscall
+168 n64 init_module sys_init_module
+169 n64 delete_module sys_delete_module
+170 n64 get_kernel_syms sys_ni_syscall
+171 n64 query_module sys_ni_syscall
+172 n64 quotactl sys_quotactl
+173 n64 nfsservctl sys_ni_syscall
+174 n64 getpmsg sys_ni_syscall
+175 n64 putpmsg sys_ni_syscall
+176 n64 afs_syscall sys_ni_syscall
+# 177 reserved for security
+177 n64 reserved177 sys_ni_syscall
+178 n64 gettid sys_gettid
+179 n64 readahead sys_readahead
+180 n64 setxattr sys_setxattr
+181 n64 lsetxattr sys_lsetxattr
+182 n64 fsetxattr sys_fsetxattr
+183 n64 getxattr sys_getxattr
+184 n64 lgetxattr sys_lgetxattr
+185 n64 fgetxattr sys_fgetxattr
+186 n64 listxattr sys_listxattr
+187 n64 llistxattr sys_llistxattr
+188 n64 flistxattr sys_flistxattr
+189 n64 removexattr sys_removexattr
+190 n64 lremovexattr sys_lremovexattr
+191 n64 fremovexattr sys_fremovexattr
+192 n64 tkill sys_tkill
+193 n64 reserved193 sys_ni_syscall
+194 n64 futex sys_futex
+195 n64 sched_setaffinity sys_sched_setaffinity
+196 n64 sched_getaffinity sys_sched_getaffinity
+197 n64 cacheflush sys_cacheflush
+198 n64 cachectl sys_cachectl
+199 n64 sysmips __sys_sysmips
+200 n64 io_setup sys_io_setup
+201 n64 io_destroy sys_io_destroy
+202 n64 io_getevents sys_io_getevents
+203 n64 io_submit sys_io_submit
+204 n64 io_cancel sys_io_cancel
+205 n64 exit_group sys_exit_group
+206 n64 lookup_dcookie sys_lookup_dcookie
+207 n64 epoll_create sys_epoll_create
+208 n64 epoll_ctl sys_epoll_ctl
+209 n64 epoll_wait sys_epoll_wait
+210 n64 remap_file_pages sys_remap_file_pages
+211 n64 rt_sigreturn sys_rt_sigreturn
+212 n64 set_tid_address sys_set_tid_address
+213 n64 restart_syscall sys_restart_syscall
+214 n64 semtimedop sys_semtimedop
+215 n64 fadvise64 sys_fadvise64_64
+216 n64 timer_create sys_timer_create
+217 n64 timer_settime sys_timer_settime
+218 n64 timer_gettime sys_timer_gettime
+219 n64 timer_getoverrun sys_timer_getoverrun
+220 n64 timer_delete sys_timer_delete
+221 n64 clock_settime sys_clock_settime
+222 n64 clock_gettime sys_clock_gettime
+223 n64 clock_getres sys_clock_getres
+224 n64 clock_nanosleep sys_clock_nanosleep
+225 n64 tgkill sys_tgkill
+226 n64 utimes sys_utimes
+227 n64 mbind sys_mbind
+228 n64 get_mempolicy sys_get_mempolicy
+229 n64 set_mempolicy sys_set_mempolicy
+230 n64 mq_open sys_mq_open
+231 n64 mq_unlink sys_mq_unlink
+232 n64 mq_timedsend sys_mq_timedsend
+233 n64 mq_timedreceive sys_mq_timedreceive
+234 n64 mq_notify sys_mq_notify
+235 n64 mq_getsetattr sys_mq_getsetattr
+236 n64 vserver sys_ni_syscall
+237 n64 waitid sys_waitid
+# 238 was sys_setaltroot
+239 n64 add_key sys_add_key
+240 n64 request_key sys_request_key
+241 n64 keyctl sys_keyctl
+242 n64 set_thread_area sys_set_thread_area
+243 n64 inotify_init sys_inotify_init
+244 n64 inotify_add_watch sys_inotify_add_watch
+245 n64 inotify_rm_watch sys_inotify_rm_watch
+246 n64 migrate_pages sys_migrate_pages
+247 n64 openat sys_openat
+248 n64 mkdirat sys_mkdirat
+249 n64 mknodat sys_mknodat
+250 n64 fchownat sys_fchownat
+251 n64 futimesat sys_futimesat
+252 n64 newfstatat sys_newfstatat
+253 n64 unlinkat sys_unlinkat
+254 n64 renameat sys_renameat
+255 n64 linkat sys_linkat
+256 n64 symlinkat sys_symlinkat
+257 n64 readlinkat sys_readlinkat
+258 n64 fchmodat sys_fchmodat
+259 n64 faccessat sys_faccessat
+260 n64 pselect6 sys_pselect6
+261 n64 ppoll sys_ppoll
+262 n64 unshare sys_unshare
+263 n64 splice sys_splice
+264 n64 sync_file_range sys_sync_file_range
+265 n64 tee sys_tee
+266 n64 vmsplice sys_vmsplice
+267 n64 move_pages sys_move_pages
+268 n64 set_robust_list sys_set_robust_list
+269 n64 get_robust_list sys_get_robust_list
+270 n64 kexec_load sys_kexec_load
+271 n64 getcpu sys_getcpu
+272 n64 epoll_pwait sys_epoll_pwait
+273 n64 ioprio_set sys_ioprio_set
+274 n64 ioprio_get sys_ioprio_get
+275 n64 utimensat sys_utimensat
+276 n64 signalfd sys_signalfd
+277 n64 timerfd sys_ni_syscall
+278 n64 eventfd sys_eventfd
+279 n64 fallocate sys_fallocate
+280 n64 timerfd_create sys_timerfd_create
+281 n64 timerfd_gettime sys_timerfd_gettime
+282 n64 timerfd_settime sys_timerfd_settime
+283 n64 signalfd4 sys_signalfd4
+284 n64 eventfd2 sys_eventfd2
+285 n64 epoll_create1 sys_epoll_create1
+286 n64 dup3 sys_dup3
+287 n64 pipe2 sys_pipe2
+288 n64 inotify_init1 sys_inotify_init1
+289 n64 preadv sys_preadv
+290 n64 pwritev sys_pwritev
+291 n64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+292 n64 perf_event_open sys_perf_event_open
+293 n64 accept4 sys_accept4
+294 n64 recvmmsg sys_recvmmsg
+295 n64 fanotify_init sys_fanotify_init
+296 n64 fanotify_mark sys_fanotify_mark
+297 n64 prlimit64 sys_prlimit64
+298 n64 name_to_handle_at sys_name_to_handle_at
+299 n64 open_by_handle_at sys_open_by_handle_at
+300 n64 clock_adjtime sys_clock_adjtime
+301 n64 syncfs sys_syncfs
+302 n64 sendmmsg sys_sendmmsg
+303 n64 setns sys_setns
+304 n64 process_vm_readv sys_process_vm_readv
+305 n64 process_vm_writev sys_process_vm_writev
+306 n64 kcmp sys_kcmp
+307 n64 finit_module sys_finit_module
+308 n64 getdents64 sys_getdents64
+309 n64 sched_setattr sys_sched_setattr
+310 n64 sched_getattr sys_sched_getattr
+311 n64 renameat2 sys_renameat2
+312 n64 seccomp sys_seccomp
+313 n64 getrandom sys_getrandom
+314 n64 memfd_create sys_memfd_create
+315 n64 bpf sys_bpf
+316 n64 execveat sys_execveat
+317 n64 userfaultfd sys_userfaultfd
+318 n64 membarrier sys_membarrier
+319 n64 mlock2 sys_mlock2
+320 n64 copy_file_range sys_copy_file_range
+321 n64 preadv2 sys_preadv2
+322 n64 pwritev2 sys_pwritev2
+323 n64 pkey_mprotect sys_pkey_mprotect
+324 n64 pkey_alloc sys_pkey_alloc
+325 n64 pkey_free sys_pkey_free
+326 n64 statx sys_statx
+327 n64 rseq sys_rseq
+328 n64 io_pgetevents sys_io_pgetevents
+# 329 through 423 are reserved to sync up with other architectures
+424 n64 pidfd_send_signal sys_pidfd_send_signal
+425 n64 io_uring_setup sys_io_uring_setup
+426 n64 io_uring_enter sys_io_uring_enter
+427 n64 io_uring_register sys_io_uring_register
+428 n64 open_tree sys_open_tree
+429 n64 move_mount sys_move_mount
+430 n64 fsopen sys_fsopen
+431 n64 fsconfig sys_fsconfig
+432 n64 fsmount sys_fsmount
+433 n64 fspick sys_fspick
+434 n64 pidfd_open sys_pidfd_open
+435 n64 clone3 __sys_clone3
+436 n64 close_range sys_close_range
+437 n64 openat2 sys_openat2
+438 n64 pidfd_getfd sys_pidfd_getfd
+439 n64 faccessat2 sys_faccessat2
+440 n64 process_madvise sys_process_madvise
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
new file mode 100644
index 000000000..29f5f28cf
--- /dev/null
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -0,0 +1,430 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for mips
+#
+# The format is:
+# <number> <abi> <name> <entry point> <compat entry point>
+#
+# The <abi> is always "o32" for this file.
+#
+0 o32 syscall sys_syscall sys32_syscall
+1 o32 exit sys_exit
+2 o32 fork __sys_fork
+3 o32 read sys_read
+4 o32 write sys_write
+5 o32 open sys_open compat_sys_open
+6 o32 close sys_close
+7 o32 waitpid sys_waitpid
+8 o32 creat sys_creat
+9 o32 link sys_link
+10 o32 unlink sys_unlink
+11 o32 execve sys_execve compat_sys_execve
+12 o32 chdir sys_chdir
+13 o32 time sys_time32
+14 o32 mknod sys_mknod
+15 o32 chmod sys_chmod
+16 o32 lchown sys_lchown
+17 o32 break sys_ni_syscall
+# 18 was sys_stat
+18 o32 unused18 sys_ni_syscall
+19 o32 lseek sys_lseek
+20 o32 getpid sys_getpid
+21 o32 mount sys_mount
+22 o32 umount sys_oldumount
+23 o32 setuid sys_setuid
+24 o32 getuid sys_getuid
+25 o32 stime sys_stime32
+26 o32 ptrace sys_ptrace compat_sys_ptrace
+27 o32 alarm sys_alarm
+# 28 was sys_fstat
+28 o32 unused28 sys_ni_syscall
+29 o32 pause sys_pause
+30 o32 utime sys_utime32
+31 o32 stty sys_ni_syscall
+32 o32 gtty sys_ni_syscall
+33 o32 access sys_access
+34 o32 nice sys_nice
+35 o32 ftime sys_ni_syscall
+36 o32 sync sys_sync
+37 o32 kill sys_kill
+38 o32 rename sys_rename
+39 o32 mkdir sys_mkdir
+40 o32 rmdir sys_rmdir
+41 o32 dup sys_dup
+42 o32 pipe sysm_pipe
+43 o32 times sys_times compat_sys_times
+44 o32 prof sys_ni_syscall
+45 o32 brk sys_brk
+46 o32 setgid sys_setgid
+47 o32 getgid sys_getgid
+48 o32 signal sys_ni_syscall
+49 o32 geteuid sys_geteuid
+50 o32 getegid sys_getegid
+51 o32 acct sys_acct
+52 o32 umount2 sys_umount
+53 o32 lock sys_ni_syscall
+54 o32 ioctl sys_ioctl compat_sys_ioctl
+55 o32 fcntl sys_fcntl compat_sys_fcntl
+56 o32 mpx sys_ni_syscall
+57 o32 setpgid sys_setpgid
+58 o32 ulimit sys_ni_syscall
+59 o32 unused59 sys_olduname
+60 o32 umask sys_umask
+61 o32 chroot sys_chroot
+62 o32 ustat sys_ustat compat_sys_ustat
+63 o32 dup2 sys_dup2
+64 o32 getppid sys_getppid
+65 o32 getpgrp sys_getpgrp
+66 o32 setsid sys_setsid
+67 o32 sigaction sys_sigaction sys_32_sigaction
+68 o32 sgetmask sys_sgetmask
+69 o32 ssetmask sys_ssetmask
+70 o32 setreuid sys_setreuid
+71 o32 setregid sys_setregid
+72 o32 sigsuspend sys_sigsuspend sys32_sigsuspend
+73 o32 sigpending sys_sigpending compat_sys_sigpending
+74 o32 sethostname sys_sethostname
+75 o32 setrlimit sys_setrlimit compat_sys_setrlimit
+76 o32 getrlimit sys_getrlimit compat_sys_getrlimit
+77 o32 getrusage sys_getrusage compat_sys_getrusage
+78 o32 gettimeofday sys_gettimeofday compat_sys_gettimeofday
+79 o32 settimeofday sys_settimeofday compat_sys_settimeofday
+80 o32 getgroups sys_getgroups
+81 o32 setgroups sys_setgroups
+# 82 was old_select
+82 o32 reserved82 sys_ni_syscall
+83 o32 symlink sys_symlink
+# 84 was sys_lstat
+84 o32 unused84 sys_ni_syscall
+85 o32 readlink sys_readlink
+86 o32 uselib sys_uselib
+87 o32 swapon sys_swapon
+88 o32 reboot sys_reboot
+89 o32 readdir sys_old_readdir compat_sys_old_readdir
+90 o32 mmap sys_mips_mmap
+91 o32 munmap sys_munmap
+92 o32 truncate sys_truncate compat_sys_truncate
+93 o32 ftruncate sys_ftruncate compat_sys_ftruncate
+94 o32 fchmod sys_fchmod
+95 o32 fchown sys_fchown
+96 o32 getpriority sys_getpriority
+97 o32 setpriority sys_setpriority
+98 o32 profil sys_ni_syscall
+99 o32 statfs sys_statfs compat_sys_statfs
+100 o32 fstatfs sys_fstatfs compat_sys_fstatfs
+101 o32 ioperm sys_ni_syscall
+102 o32 socketcall sys_socketcall compat_sys_socketcall
+103 o32 syslog sys_syslog
+104 o32 setitimer sys_setitimer compat_sys_setitimer
+105 o32 getitimer sys_getitimer compat_sys_getitimer
+106 o32 stat sys_newstat compat_sys_newstat
+107 o32 lstat sys_newlstat compat_sys_newlstat
+108 o32 fstat sys_newfstat compat_sys_newfstat
+109 o32 unused109 sys_uname
+110 o32 iopl sys_ni_syscall
+111 o32 vhangup sys_vhangup
+112 o32 idle sys_ni_syscall
+113 o32 vm86 sys_ni_syscall
+114 o32 wait4 sys_wait4 compat_sys_wait4
+115 o32 swapoff sys_swapoff
+116 o32 sysinfo sys_sysinfo compat_sys_sysinfo
+117 o32 ipc sys_ipc compat_sys_ipc
+118 o32 fsync sys_fsync
+119 o32 sigreturn sys_sigreturn sys32_sigreturn
+120 o32 clone __sys_clone
+121 o32 setdomainname sys_setdomainname
+122 o32 uname sys_newuname
+123 o32 modify_ldt sys_ni_syscall
+124 o32 adjtimex sys_adjtimex_time32
+125 o32 mprotect sys_mprotect
+126 o32 sigprocmask sys_sigprocmask compat_sys_sigprocmask
+127 o32 create_module sys_ni_syscall
+128 o32 init_module sys_init_module
+129 o32 delete_module sys_delete_module
+130 o32 get_kernel_syms sys_ni_syscall
+131 o32 quotactl sys_quotactl
+132 o32 getpgid sys_getpgid
+133 o32 fchdir sys_fchdir
+134 o32 bdflush sys_bdflush
+135 o32 sysfs sys_sysfs
+136 o32 personality sys_personality sys_32_personality
+137 o32 afs_syscall sys_ni_syscall
+138 o32 setfsuid sys_setfsuid
+139 o32 setfsgid sys_setfsgid
+140 o32 _llseek sys_llseek sys_32_llseek
+141 o32 getdents sys_getdents compat_sys_getdents
+142 o32 _newselect sys_select compat_sys_select
+143 o32 flock sys_flock
+144 o32 msync sys_msync
+145 o32 readv sys_readv
+146 o32 writev sys_writev
+147 o32 cacheflush sys_cacheflush
+148 o32 cachectl sys_cachectl
+149 o32 sysmips __sys_sysmips
+150 o32 unused150 sys_ni_syscall
+151 o32 getsid sys_getsid
+152 o32 fdatasync sys_fdatasync
+153 o32 _sysctl sys_ni_syscall
+154 o32 mlock sys_mlock
+155 o32 munlock sys_munlock
+156 o32 mlockall sys_mlockall
+157 o32 munlockall sys_munlockall
+158 o32 sched_setparam sys_sched_setparam
+159 o32 sched_getparam sys_sched_getparam
+160 o32 sched_setscheduler sys_sched_setscheduler
+161 o32 sched_getscheduler sys_sched_getscheduler
+162 o32 sched_yield sys_sched_yield
+163 o32 sched_get_priority_max sys_sched_get_priority_max
+164 o32 sched_get_priority_min sys_sched_get_priority_min
+165 o32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+166 o32 nanosleep sys_nanosleep_time32
+167 o32 mremap sys_mremap
+168 o32 accept sys_accept
+169 o32 bind sys_bind
+170 o32 connect sys_connect
+171 o32 getpeername sys_getpeername
+172 o32 getsockname sys_getsockname
+173 o32 getsockopt sys_getsockopt sys_getsockopt
+174 o32 listen sys_listen
+175 o32 recv sys_recv compat_sys_recv
+176 o32 recvfrom sys_recvfrom compat_sys_recvfrom
+177 o32 recvmsg sys_recvmsg compat_sys_recvmsg
+178 o32 send sys_send
+179 o32 sendmsg sys_sendmsg compat_sys_sendmsg
+180 o32 sendto sys_sendto
+181 o32 setsockopt sys_setsockopt sys_setsockopt
+182 o32 shutdown sys_shutdown
+183 o32 socket sys_socket
+184 o32 socketpair sys_socketpair
+185 o32 setresuid sys_setresuid
+186 o32 getresuid sys_getresuid
+187 o32 query_module sys_ni_syscall
+188 o32 poll sys_poll
+189 o32 nfsservctl sys_ni_syscall
+190 o32 setresgid sys_setresgid
+191 o32 getresgid sys_getresgid
+192 o32 prctl sys_prctl
+193 o32 rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn
+194 o32 rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+195 o32 rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+196 o32 rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+197 o32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+198 o32 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+199 o32 rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+200 o32 pread64 sys_pread64 sys_32_pread
+201 o32 pwrite64 sys_pwrite64 sys_32_pwrite
+202 o32 chown sys_chown
+203 o32 getcwd sys_getcwd
+204 o32 capget sys_capget
+205 o32 capset sys_capset
+206 o32 sigaltstack sys_sigaltstack compat_sys_sigaltstack
+207 o32 sendfile sys_sendfile compat_sys_sendfile
+208 o32 getpmsg sys_ni_syscall
+209 o32 putpmsg sys_ni_syscall
+210 o32 mmap2 sys_mips_mmap2
+211 o32 truncate64 sys_truncate64 sys_32_truncate64
+212 o32 ftruncate64 sys_ftruncate64 sys_32_ftruncate64
+213 o32 stat64 sys_stat64 sys_newstat
+214 o32 lstat64 sys_lstat64 sys_newlstat
+215 o32 fstat64 sys_fstat64 sys_newfstat
+216 o32 pivot_root sys_pivot_root
+217 o32 mincore sys_mincore
+218 o32 madvise sys_madvise
+219 o32 getdents64 sys_getdents64
+220 o32 fcntl64 sys_fcntl64 compat_sys_fcntl64
+221 o32 reserved221 sys_ni_syscall
+222 o32 gettid sys_gettid
+223 o32 readahead sys_readahead sys32_readahead
+224 o32 setxattr sys_setxattr
+225 o32 lsetxattr sys_lsetxattr
+226 o32 fsetxattr sys_fsetxattr
+227 o32 getxattr sys_getxattr
+228 o32 lgetxattr sys_lgetxattr
+229 o32 fgetxattr sys_fgetxattr
+230 o32 listxattr sys_listxattr
+231 o32 llistxattr sys_llistxattr
+232 o32 flistxattr sys_flistxattr
+233 o32 removexattr sys_removexattr
+234 o32 lremovexattr sys_lremovexattr
+235 o32 fremovexattr sys_fremovexattr
+236 o32 tkill sys_tkill
+237 o32 sendfile64 sys_sendfile64
+238 o32 futex sys_futex_time32
+239 o32 sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+240 o32 sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+241 o32 io_setup sys_io_setup compat_sys_io_setup
+242 o32 io_destroy sys_io_destroy
+243 o32 io_getevents sys_io_getevents_time32
+244 o32 io_submit sys_io_submit compat_sys_io_submit
+245 o32 io_cancel sys_io_cancel
+246 o32 exit_group sys_exit_group
+247 o32 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+248 o32 epoll_create sys_epoll_create
+249 o32 epoll_ctl sys_epoll_ctl
+250 o32 epoll_wait sys_epoll_wait
+251 o32 remap_file_pages sys_remap_file_pages
+252 o32 set_tid_address sys_set_tid_address
+253 o32 restart_syscall sys_restart_syscall
+254 o32 fadvise64 sys_fadvise64_64 sys32_fadvise64_64
+255 o32 statfs64 sys_statfs64 compat_sys_statfs64
+256 o32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+257 o32 timer_create sys_timer_create compat_sys_timer_create
+258 o32 timer_settime sys_timer_settime32
+259 o32 timer_gettime sys_timer_gettime32
+260 o32 timer_getoverrun sys_timer_getoverrun
+261 o32 timer_delete sys_timer_delete
+262 o32 clock_settime sys_clock_settime32
+263 o32 clock_gettime sys_clock_gettime32
+264 o32 clock_getres sys_clock_getres_time32
+265 o32 clock_nanosleep sys_clock_nanosleep_time32
+266 o32 tgkill sys_tgkill
+267 o32 utimes sys_utimes_time32
+268 o32 mbind sys_mbind compat_sys_mbind
+269 o32 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
+270 o32 set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+271 o32 mq_open sys_mq_open compat_sys_mq_open
+272 o32 mq_unlink sys_mq_unlink
+273 o32 mq_timedsend sys_mq_timedsend_time32
+274 o32 mq_timedreceive sys_mq_timedreceive_time32
+275 o32 mq_notify sys_mq_notify compat_sys_mq_notify
+276 o32 mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+277 o32 vserver sys_ni_syscall
+278 o32 waitid sys_waitid compat_sys_waitid
+# 279 was sys_setaltroot
+280 o32 add_key sys_add_key
+281 o32 request_key sys_request_key
+282 o32 keyctl sys_keyctl compat_sys_keyctl
+283 o32 set_thread_area sys_set_thread_area
+284 o32 inotify_init sys_inotify_init
+285 o32 inotify_add_watch sys_inotify_add_watch
+286 o32 inotify_rm_watch sys_inotify_rm_watch
+287 o32 migrate_pages sys_migrate_pages compat_sys_migrate_pages
+288 o32 openat sys_openat compat_sys_openat
+289 o32 mkdirat sys_mkdirat
+290 o32 mknodat sys_mknodat
+291 o32 fchownat sys_fchownat
+292 o32 futimesat sys_futimesat_time32
+293 o32 fstatat64 sys_fstatat64 sys_newfstatat
+294 o32 unlinkat sys_unlinkat
+295 o32 renameat sys_renameat
+296 o32 linkat sys_linkat
+297 o32 symlinkat sys_symlinkat
+298 o32 readlinkat sys_readlinkat
+299 o32 fchmodat sys_fchmodat
+300 o32 faccessat sys_faccessat
+301 o32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+302 o32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+303 o32 unshare sys_unshare
+304 o32 splice sys_splice
+305 o32 sync_file_range sys_sync_file_range sys32_sync_file_range
+306 o32 tee sys_tee
+307 o32 vmsplice sys_vmsplice
+308 o32 move_pages sys_move_pages compat_sys_move_pages
+309 o32 set_robust_list sys_set_robust_list compat_sys_set_robust_list
+310 o32 get_robust_list sys_get_robust_list compat_sys_get_robust_list
+311 o32 kexec_load sys_kexec_load compat_sys_kexec_load
+312 o32 getcpu sys_getcpu
+313 o32 epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+314 o32 ioprio_set sys_ioprio_set
+315 o32 ioprio_get sys_ioprio_get
+316 o32 utimensat sys_utimensat_time32
+317 o32 signalfd sys_signalfd compat_sys_signalfd
+318 o32 timerfd sys_ni_syscall
+319 o32 eventfd sys_eventfd
+320 o32 fallocate sys_fallocate sys32_fallocate
+321 o32 timerfd_create sys_timerfd_create
+322 o32 timerfd_gettime sys_timerfd_gettime32
+323 o32 timerfd_settime sys_timerfd_settime32
+324 o32 signalfd4 sys_signalfd4 compat_sys_signalfd4
+325 o32 eventfd2 sys_eventfd2
+326 o32 epoll_create1 sys_epoll_create1
+327 o32 dup3 sys_dup3
+328 o32 pipe2 sys_pipe2
+329 o32 inotify_init1 sys_inotify_init1
+330 o32 preadv sys_preadv compat_sys_preadv
+331 o32 pwritev sys_pwritev compat_sys_pwritev
+332 o32 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+333 o32 perf_event_open sys_perf_event_open
+334 o32 accept4 sys_accept4
+335 o32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+336 o32 fanotify_init sys_fanotify_init
+337 o32 fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
+338 o32 prlimit64 sys_prlimit64
+339 o32 name_to_handle_at sys_name_to_handle_at
+340 o32 open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
+341 o32 clock_adjtime sys_clock_adjtime32
+342 o32 syncfs sys_syncfs
+343 o32 sendmmsg sys_sendmmsg compat_sys_sendmmsg
+344 o32 setns sys_setns
+345 o32 process_vm_readv sys_process_vm_readv
+346 o32 process_vm_writev sys_process_vm_writev
+347 o32 kcmp sys_kcmp
+348 o32 finit_module sys_finit_module
+349 o32 sched_setattr sys_sched_setattr
+350 o32 sched_getattr sys_sched_getattr
+351 o32 renameat2 sys_renameat2
+352 o32 seccomp sys_seccomp
+353 o32 getrandom sys_getrandom
+354 o32 memfd_create sys_memfd_create
+355 o32 bpf sys_bpf
+356 o32 execveat sys_execveat compat_sys_execveat
+357 o32 userfaultfd sys_userfaultfd
+358 o32 membarrier sys_membarrier
+359 o32 mlock2 sys_mlock2
+360 o32 copy_file_range sys_copy_file_range
+361 o32 preadv2 sys_preadv2 compat_sys_preadv2
+362 o32 pwritev2 sys_pwritev2 compat_sys_pwritev2
+363 o32 pkey_mprotect sys_pkey_mprotect
+364 o32 pkey_alloc sys_pkey_alloc
+365 o32 pkey_free sys_pkey_free
+366 o32 statx sys_statx
+367 o32 rseq sys_rseq
+368 o32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+# room for arch specific calls
+393 o32 semget sys_semget
+394 o32 semctl sys_semctl compat_sys_semctl
+395 o32 shmget sys_shmget
+396 o32 shmctl sys_shmctl compat_sys_shmctl
+397 o32 shmat sys_shmat compat_sys_shmat
+398 o32 shmdt sys_shmdt
+399 o32 msgget sys_msgget
+400 o32 msgsnd sys_msgsnd compat_sys_msgsnd
+401 o32 msgrcv sys_msgrcv compat_sys_msgrcv
+402 o32 msgctl sys_msgctl compat_sys_msgctl
+403 o32 clock_gettime64 sys_clock_gettime sys_clock_gettime
+404 o32 clock_settime64 sys_clock_settime sys_clock_settime
+405 o32 clock_adjtime64 sys_clock_adjtime sys_clock_adjtime
+406 o32 clock_getres_time64 sys_clock_getres sys_clock_getres
+407 o32 clock_nanosleep_time64 sys_clock_nanosleep sys_clock_nanosleep
+408 o32 timer_gettime64 sys_timer_gettime sys_timer_gettime
+409 o32 timer_settime64 sys_timer_settime sys_timer_settime
+410 o32 timerfd_gettime64 sys_timerfd_gettime sys_timerfd_gettime
+411 o32 timerfd_settime64 sys_timerfd_settime sys_timerfd_settime
+412 o32 utimensat_time64 sys_utimensat sys_utimensat
+413 o32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 o32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 o32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+417 o32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 o32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
+419 o32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
+420 o32 semtimedop_time64 sys_semtimedop sys_semtimedop
+421 o32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 o32 futex_time64 sys_futex sys_futex
+423 o32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval
+424 o32 pidfd_send_signal sys_pidfd_send_signal
+425 o32 io_uring_setup sys_io_uring_setup
+426 o32 io_uring_enter sys_io_uring_enter
+427 o32 io_uring_register sys_io_uring_register
+428 o32 open_tree sys_open_tree
+429 o32 move_mount sys_move_mount
+430 o32 fsopen sys_fsopen
+431 o32 fsconfig sys_fsconfig
+432 o32 fsmount sys_fsmount
+433 o32 fspick sys_fspick
+434 o32 pidfd_open sys_pidfd_open
+435 o32 clone3 __sys_clone3
+436 o32 close_range sys_close_range
+437 o32 openat2 sys_openat2
+438 o32 pidfd_getfd sys_pidfd_getfd
+439 o32 faccessat2 sys_faccessat2
+440 o32 process_madvise sys_process_madvise
diff --git a/arch/mips/kernel/syscalls/syscallhdr.sh b/arch/mips/kernel/syscalls/syscallhdr.sh
new file mode 100644
index 000000000..2e241e713
--- /dev/null
+++ b/arch/mips/kernel/syscalls/syscallhdr.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=_UAPI_ASM_MIPS_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ printf "#ifndef %s\n" "${fileguard}"
+ printf "#define %s\n" "${fileguard}"
+ printf "\n"
+
+ nxt=0
+ while read nr abi name entry compat ; do
+ if [ -z "$offset" ]; then
+ printf "#define __NR_%s%s\t%s\n" \
+ "${prefix}" "${name}" "${nr}"
+ else
+ printf "#define __NR_%s%s\t(%s + %s)\n" \
+ "${prefix}" "${name}" "${offset}" "${nr}"
+ fi
+ nxt=$((nr+1))
+ done
+
+ printf "\n"
+ printf "#ifdef __KERNEL__\n"
+ printf "#define __NR_syscalls\t%s\n" "${nxt}"
+ printf "#endif\n"
+ printf "\n"
+ printf "#endif /* %s */\n" "${fileguard}"
+) > "$out"
diff --git a/arch/mips/kernel/syscalls/syscallnr.sh b/arch/mips/kernel/syscalls/syscallnr.sh
new file mode 100644
index 000000000..60bbdb3fe
--- /dev/null
+++ b/arch/mips/kernel/syscalls/syscallnr.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=_UAPI_ASM_MIPS_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ printf "#ifndef %s\n" "${fileguard}"
+ printf "#define %s\n" "${fileguard}"
+ printf "\n"
+
+ nxt=0
+ while read nr abi name entry compat ; do
+ nxt=$((nr+1))
+ done
+
+ printf "#define __NR_%s_Linux\t%s\n" "${prefix}" "${offset}"
+ printf "#define __NR_%s_Linux_syscalls\t%s\n" "${prefix}" "${nxt}"
+ printf "\n"
+ printf "#endif /* %s */" "${fileguard}"
+ printf "\n"
+) > "$out"
diff --git a/arch/mips/kernel/syscalls/syscalltbl.sh b/arch/mips/kernel/syscalls/syscalltbl.sh
new file mode 100644
index 000000000..1e2570740
--- /dev/null
+++ b/arch/mips/kernel/syscalls/syscalltbl.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+my_abi="$4"
+offset="$5"
+
+emit() {
+ t_nxt="$1"
+ t_nr="$2"
+ t_entry="$3"
+
+ while [ $t_nxt -lt $t_nr ]; do
+ printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}"
+ t_nxt=$((t_nxt+1))
+ done
+ printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}"
+}
+
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ nxt=0
+ if [ -z "$offset" ]; then
+ offset=0
+ fi
+
+ while read nr abi name entry compat ; do
+ if [ "$my_abi" = "64_o32" ] && [ ! -z "$compat" ]; then
+ emit $((nxt+offset)) $((nr+offset)) $compat
+ else
+ emit $((nxt+offset)) $((nr+offset)) $entry
+ fi
+ nxt=$((nr+1))
+ done
+) > "$out"
diff --git a/arch/mips/kernel/sysrq.c b/arch/mips/kernel/sysrq.c
new file mode 100644
index 000000000..9c1a20191
--- /dev/null
+++ b/arch/mips/kernel/sysrq.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MIPS specific sysrq operations.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ */
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/sysrq.h>
+#include <linux/workqueue.h>
+
+#include <asm/cpu-features.h>
+#include <asm/mipsregs.h>
+#include <asm/tlbdebug.h>
+
+/*
+ * Dump TLB entries on all CPUs.
+ */
+
+static DEFINE_SPINLOCK(show_lock);
+
+static void sysrq_tlbdump_single(void *dummy)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&show_lock, flags);
+
+ pr_info("CPU%d:\n", smp_processor_id());
+ dump_tlb_regs();
+ pr_info("\n");
+ dump_tlb_all();
+ pr_info("\n");
+
+ spin_unlock_irqrestore(&show_lock, flags);
+}
+
+#ifdef CONFIG_SMP
+static void sysrq_tlbdump_othercpus(struct work_struct *dummy)
+{
+ smp_call_function(sysrq_tlbdump_single, NULL, 0);
+}
+
+static DECLARE_WORK(sysrq_tlbdump, sysrq_tlbdump_othercpus);
+#endif
+
+static void sysrq_handle_tlbdump(int key)
+{
+ sysrq_tlbdump_single(NULL);
+#ifdef CONFIG_SMP
+ schedule_work(&sysrq_tlbdump);
+#endif
+}
+
+static const struct sysrq_key_op sysrq_tlbdump_op = {
+ .handler = sysrq_handle_tlbdump,
+ .help_msg = "show-tlbs(x)",
+ .action_msg = "Show TLB entries",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+
+static int __init mips_sysrq_init(void)
+{
+ return register_sysrq_key('x', &sysrq_tlbdump_op);
+}
+arch_initcall(mips_sysrq_init);
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
new file mode 100644
index 000000000..ed339d797
--- /dev/null
+++ b/arch/mips/kernel/time.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ * Copyright (c) 2003, 2004 Maciej W. Rozycki
+ *
+ * Common time service routines for MIPS machines.
+ */
+#include <linux/bug.h>
+#include <linux/clockchips.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/param.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+
+#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
+#include <asm/div64.h>
+#include <asm/time.h>
+
+#ifdef CONFIG_CPU_FREQ
+
+static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref);
+static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq);
+static unsigned long glb_lpj_ref;
+static unsigned long glb_lpj_ref_freq;
+
+static int cpufreq_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ struct cpumask *cpus = freq->policy->cpus;
+ unsigned long lpj;
+ int cpu;
+
+ /*
+ * Skip lpj numbers adjustment if the CPU-freq transition is safe for
+ * the loops delay. (Is this possible?)
+ */
+ if (freq->flags & CPUFREQ_CONST_LOOPS)
+ return NOTIFY_OK;
+
+ /* Save the initial values of the lpjes for future scaling. */
+ if (!glb_lpj_ref) {
+ glb_lpj_ref = boot_cpu_data.udelay_val;
+ glb_lpj_ref_freq = freq->old;
+
+ for_each_online_cpu(cpu) {
+ per_cpu(pcp_lpj_ref, cpu) =
+ cpu_data[cpu].udelay_val;
+ per_cpu(pcp_lpj_ref_freq, cpu) = freq->old;
+ }
+ }
+
+ /*
+ * Adjust global lpj variable and per-CPU udelay_val number in
+ * accordance with the new CPU frequency.
+ */
+ if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
+ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
+ loops_per_jiffy = cpufreq_scale(glb_lpj_ref,
+ glb_lpj_ref_freq,
+ freq->new);
+
+ for_each_cpu(cpu, cpus) {
+ lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu),
+ per_cpu(pcp_lpj_ref_freq, cpu),
+ freq->new);
+ cpu_data[cpu].udelay_val = (unsigned int)lpj;
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_notifier = {
+ .notifier_call = cpufreq_callback,
+};
+
+static int __init register_cpufreq_notifier(void)
+{
+ return cpufreq_register_notifier(&cpufreq_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+core_initcall(register_cpufreq_notifier);
+
+#endif /* CONFIG_CPU_FREQ */
+
+/*
+ * forward reference
+ */
+DEFINE_SPINLOCK(rtc_lock);
+EXPORT_SYMBOL(rtc_lock);
+
+static int null_perf_irq(void)
+{
+ return 0;
+}
+
+int (*perf_irq)(void) = null_perf_irq;
+
+EXPORT_SYMBOL(perf_irq);
+
+/*
+ * time_init() - it does the following things.
+ *
+ * 1) plat_time_init() -
+ * a) (optional) set up RTC routines,
+ * b) (optional) calibrate and set the mips_hpt_frequency
+ * (only needed if you intended to use cpu counter as timer interrupt
+ * source)
+ * 2) calculate a couple of cached variables for later usage
+ */
+
+unsigned int mips_hpt_frequency;
+EXPORT_SYMBOL_GPL(mips_hpt_frequency);
+
+static __init int cpu_has_mfc0_count_bug(void)
+{
+ switch (current_cpu_type()) {
+ case CPU_R4000PC:
+ case CPU_R4000SC:
+ case CPU_R4000MC:
+ /*
+ * V3.0 is documented as suffering from the mfc0 from count bug.
+ * Afaik this is the last version of the R4000. Later versions
+ * were marketed as R4400.
+ */
+ return 1;
+
+ case CPU_R4400PC:
+ case CPU_R4400SC:
+ case CPU_R4400MC:
+ /*
+ * The published errata for the R4400 up to 3.0 say the CPU
+ * has the mfc0 from count bug. This seems the last version
+ * produced.
+ */
+ return 1;
+ }
+
+ return 0;
+}
+
+void __init time_init(void)
+{
+ plat_time_init();
+
+ /*
+ * The use of the R4k timer as a clock event takes precedence;
+ * if reading the Count register might interfere with the timer
+ * interrupt, then we don't use the timer as a clock source.
+ * We may still use the timer as a clock source though if the
+ * timer interrupt isn't reliable; the interference doesn't
+ * matter then, because we don't use the interrupt.
+ */
+ if (mips_clockevent_init() != 0 || !cpu_has_mfc0_count_bug())
+ init_mips_clocksource();
+}
diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c
new file mode 100644
index 000000000..08ad6371f
--- /dev/null
+++ b/arch/mips/kernel/topology.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/node.h>
+#include <linux/nodemask.h>
+#include <linux/percpu.h>
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+static int __init topology_init(void)
+{
+ int i, ret;
+
+#ifdef CONFIG_NUMA
+ for_each_online_node(i)
+ register_one_node(i);
+#endif /* CONFIG_NUMA */
+
+ for_each_present_cpu(i) {
+ struct cpu *c = &per_cpu(cpu_devices, i);
+
+ c->hotpluggable = !!i;
+ ret = register_cpu(c, i);
+ if (ret)
+ printk(KERN_WARNING "topology_init: register_cpu %d "
+ "failed (%d)\n", i, ret);
+ }
+
+ return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
new file mode 100644
index 000000000..ebd0101f0
--- /dev/null
+++ b/arch/mips/kernel/traps.c
@@ -0,0 +1,2571 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
+ * Copyright (C) 1995, 1996 Paul M. Antoine
+ * Copyright (C) 1998 Ulf Carlsson
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
+ * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2014, Imagination Technologies Ltd.
+ */
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/cpu_pm.h>
+#include <linux/kexec.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/extable.h>
+#include <linux/mm.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/debug.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/memblock.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/kprobes.h>
+#include <linux/notifier.h>
+#include <linux/kdb.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+#include <asm/cop2.h>
+#include <asm/cpu.h>
+#include <asm/cpu-type.h>
+#include <asm/dsp.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
+#include <asm/idle.h>
+#include <asm/isa-rev.h>
+#include <asm/mips-cps.h>
+#include <asm/mips-r2-to-r6-emul.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/module.h>
+#include <asm/msa.h>
+#include <asm/ptrace.h>
+#include <asm/sections.h>
+#include <asm/siginfo.h>
+#include <asm/tlbdebug.h>
+#include <asm/traps.h>
+#include <linux/uaccess.h>
+#include <asm/watch.h>
+#include <asm/mmu_context.h>
+#include <asm/types.h>
+#include <asm/stacktrace.h>
+#include <asm/tlbex.h>
+#include <asm/uasm.h>
+
+#include <asm/mach-loongson64/cpucfg-emul.h>
+
+extern void check_wait(void);
+extern asmlinkage void rollback_handle_int(void);
+extern asmlinkage void handle_int(void);
+extern asmlinkage void handle_adel(void);
+extern asmlinkage void handle_ades(void);
+extern asmlinkage void handle_ibe(void);
+extern asmlinkage void handle_dbe(void);
+extern asmlinkage void handle_sys(void);
+extern asmlinkage void handle_bp(void);
+extern asmlinkage void handle_ri(void);
+extern asmlinkage void handle_ri_rdhwr_tlbp(void);
+extern asmlinkage void handle_ri_rdhwr(void);
+extern asmlinkage void handle_cpu(void);
+extern asmlinkage void handle_ov(void);
+extern asmlinkage void handle_tr(void);
+extern asmlinkage void handle_msa_fpe(void);
+extern asmlinkage void handle_fpe(void);
+extern asmlinkage void handle_ftlb(void);
+extern asmlinkage void handle_gsexc(void);
+extern asmlinkage void handle_msa(void);
+extern asmlinkage void handle_mdmx(void);
+extern asmlinkage void handle_watch(void);
+extern asmlinkage void handle_mt(void);
+extern asmlinkage void handle_dsp(void);
+extern asmlinkage void handle_mcheck(void);
+extern asmlinkage void handle_reserved(void);
+extern void tlb_do_page_fault_0(void);
+
+void (*board_be_init)(void);
+int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
+void (*board_nmi_handler_setup)(void);
+void (*board_ejtag_handler_setup)(void);
+void (*board_bind_eic_interrupt)(int irq, int regset);
+void (*board_ebase_setup)(void);
+void(*board_cache_error_setup)(void);
+
+static void show_raw_backtrace(unsigned long reg29, const char *loglvl)
+{
+ unsigned long *sp = (unsigned long *)(reg29 & ~3);
+ unsigned long addr;
+
+ printk("%sCall Trace:", loglvl);
+#ifdef CONFIG_KALLSYMS
+ printk("%s\n", loglvl);
+#endif
+ while (!kstack_end(sp)) {
+ unsigned long __user *p =
+ (unsigned long __user *)(unsigned long)sp++;
+ if (__get_user(addr, p)) {
+ printk("%s (Bad stack address)", loglvl);
+ break;
+ }
+ if (__kernel_text_address(addr))
+ print_ip_sym(loglvl, addr);
+ }
+ printk("%s\n", loglvl);
+}
+
+#ifdef CONFIG_KALLSYMS
+int raw_show_trace;
+static int __init set_raw_show_trace(char *str)
+{
+ raw_show_trace = 1;
+ return 1;
+}
+__setup("raw_show_trace", set_raw_show_trace);
+#endif
+
+static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
+ const char *loglvl)
+{
+ unsigned long sp = regs->regs[29];
+ unsigned long ra = regs->regs[31];
+ unsigned long pc = regs->cp0_epc;
+
+ if (!task)
+ task = current;
+
+ if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
+ show_raw_backtrace(sp, loglvl);
+ return;
+ }
+ printk("%sCall Trace:\n", loglvl);
+ do {
+ print_ip_sym(loglvl, pc);
+ pc = unwind_stack(task, &sp, pc, &ra);
+ } while (pc);
+ pr_cont("\n");
+}
+
+/*
+ * This routine abuses get_user()/put_user() to reference pointers
+ * with at least a bit of error checking ...
+ */
+static void show_stacktrace(struct task_struct *task,
+ const struct pt_regs *regs, const char *loglvl)
+{
+ const int field = 2 * sizeof(unsigned long);
+ long stackdata;
+ int i;
+ unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
+
+ printk("%sStack :", loglvl);
+ i = 0;
+ while ((unsigned long) sp & (PAGE_SIZE - 1)) {
+ if (i && ((i % (64 / field)) == 0)) {
+ pr_cont("\n");
+ printk("%s ", loglvl);
+ }
+ if (i > 39) {
+ pr_cont(" ...");
+ break;
+ }
+
+ if (__get_user(stackdata, sp++)) {
+ pr_cont(" (Bad stack address)");
+ break;
+ }
+
+ pr_cont(" %0*lx", field, stackdata);
+ i++;
+ }
+ pr_cont("\n");
+ show_backtrace(task, regs, loglvl);
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+{
+ struct pt_regs regs;
+ mm_segment_t old_fs = get_fs();
+
+ regs.cp0_status = KSU_KERNEL;
+ if (sp) {
+ regs.regs[29] = (unsigned long)sp;
+ regs.regs[31] = 0;
+ regs.cp0_epc = 0;
+ } else {
+ if (task && task != current) {
+ regs.regs[29] = task->thread.reg29;
+ regs.regs[31] = 0;
+ regs.cp0_epc = task->thread.reg31;
+ } else {
+ prepare_frametrace(&regs);
+ }
+ }
+ /*
+ * show_stack() deals exclusively with kernel mode, so be sure to access
+ * the stack in the kernel (not user) address space.
+ */
+ set_fs(KERNEL_DS);
+ show_stacktrace(task, &regs, loglvl);
+ set_fs(old_fs);
+}
+
+static void show_code(unsigned int __user *pc)
+{
+ long i;
+ unsigned short __user *pc16 = NULL;
+
+ printk("Code:");
+
+ if ((unsigned long)pc & 1)
+ pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
+ for(i = -3 ; i < 6 ; i++) {
+ unsigned int insn;
+ if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
+ pr_cont(" (Bad address in epc)\n");
+ break;
+ }
+ pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
+ }
+ pr_cont("\n");
+}
+
+static void __show_regs(const struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned int cause = regs->cp0_cause;
+ unsigned int exccode;
+ int i;
+
+ show_regs_print_info(KERN_DEFAULT);
+
+ /*
+ * Saved main processor registers
+ */
+ for (i = 0; i < 32; ) {
+ if ((i % 4) == 0)
+ printk("$%2d :", i);
+ if (i == 0)
+ pr_cont(" %0*lx", field, 0UL);
+ else if (i == 26 || i == 27)
+ pr_cont(" %*s", field, "");
+ else
+ pr_cont(" %0*lx", field, regs->regs[i]);
+
+ i++;
+ if ((i % 4) == 0)
+ pr_cont("\n");
+ }
+
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ printk("Acx : %0*lx\n", field, regs->acx);
+#endif
+ if (MIPS_ISA_REV < 6) {
+ printk("Hi : %0*lx\n", field, regs->hi);
+ printk("Lo : %0*lx\n", field, regs->lo);
+ }
+
+ /*
+ * Saved cp0 registers
+ */
+ printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
+ (void *) regs->cp0_epc);
+ printk("ra : %0*lx %pS\n", field, regs->regs[31],
+ (void *) regs->regs[31]);
+
+ printk("Status: %08x ", (uint32_t) regs->cp0_status);
+
+ if (cpu_has_3kex) {
+ if (regs->cp0_status & ST0_KUO)
+ pr_cont("KUo ");
+ if (regs->cp0_status & ST0_IEO)
+ pr_cont("IEo ");
+ if (regs->cp0_status & ST0_KUP)
+ pr_cont("KUp ");
+ if (regs->cp0_status & ST0_IEP)
+ pr_cont("IEp ");
+ if (regs->cp0_status & ST0_KUC)
+ pr_cont("KUc ");
+ if (regs->cp0_status & ST0_IEC)
+ pr_cont("IEc ");
+ } else if (cpu_has_4kex) {
+ if (regs->cp0_status & ST0_KX)
+ pr_cont("KX ");
+ if (regs->cp0_status & ST0_SX)
+ pr_cont("SX ");
+ if (regs->cp0_status & ST0_UX)
+ pr_cont("UX ");
+ switch (regs->cp0_status & ST0_KSU) {
+ case KSU_USER:
+ pr_cont("USER ");
+ break;
+ case KSU_SUPERVISOR:
+ pr_cont("SUPERVISOR ");
+ break;
+ case KSU_KERNEL:
+ pr_cont("KERNEL ");
+ break;
+ default:
+ pr_cont("BAD_MODE ");
+ break;
+ }
+ if (regs->cp0_status & ST0_ERL)
+ pr_cont("ERL ");
+ if (regs->cp0_status & ST0_EXL)
+ pr_cont("EXL ");
+ if (regs->cp0_status & ST0_IE)
+ pr_cont("IE ");
+ }
+ pr_cont("\n");
+
+ exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
+ printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
+
+ if (1 <= exccode && exccode <= 5)
+ printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
+
+ printk("PrId : %08x (%s)\n", read_c0_prid(),
+ cpu_name_string());
+}
+
+/*
+ * FIXME: really the generic show_regs should take a const pointer argument.
+ */
+void show_regs(struct pt_regs *regs)
+{
+ __show_regs(regs);
+ dump_stack();
+}
+
+void show_registers(struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+ mm_segment_t old_fs = get_fs();
+
+ __show_regs(regs);
+ print_modules();
+ printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
+ current->comm, current->pid, current_thread_info(), current,
+ field, current_thread_info()->tp_value);
+ if (cpu_has_userlocal) {
+ unsigned long tls;
+
+ tls = read_c0_userlocal();
+ if (tls != current_thread_info()->tp_value)
+ printk("*HwTLS: %0*lx\n", field, tls);
+ }
+
+ if (!user_mode(regs))
+ /* Necessary for getting the correct stack content */
+ set_fs(KERNEL_DS);
+ show_stacktrace(current, regs, KERN_DEFAULT);
+ show_code((unsigned int __user *) regs->cp0_epc);
+ printk("\n");
+ set_fs(old_fs);
+}
+
+static DEFINE_RAW_SPINLOCK(die_lock);
+
+void __noreturn die(const char *str, struct pt_regs *regs)
+{
+ static int die_counter;
+ int sig = SIGSEGV;
+
+ oops_enter();
+
+ if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
+ SIGSEGV) == NOTIFY_STOP)
+ sig = 0;
+
+ console_verbose();
+ raw_spin_lock_irq(&die_lock);
+ bust_spinlocks(1);
+
+ printk("%s[#%d]:\n", str, ++die_counter);
+ show_registers(regs);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ raw_spin_unlock_irq(&die_lock);
+
+ oops_exit();
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops)
+ panic("Fatal exception");
+
+ if (regs && kexec_should_crash(current))
+ crash_kexec(regs);
+
+ make_task_dead(sig);
+}
+
+extern struct exception_table_entry __start___dbe_table[];
+extern struct exception_table_entry __stop___dbe_table[];
+
+__asm__(
+" .section __dbe_table, \"a\"\n"
+" .previous \n");
+
+/* Given an address, look for it in the exception tables. */
+static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
+{
+ const struct exception_table_entry *e;
+
+ e = search_extable(__start___dbe_table,
+ __stop___dbe_table - __start___dbe_table, addr);
+ if (!e)
+ e = search_module_dbetables(addr);
+ return e;
+}
+
+asmlinkage void do_be(struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+ const struct exception_table_entry *fixup = NULL;
+ int data = regs->cp0_cause & 4;
+ int action = MIPS_BE_FATAL;
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ /* XXX For now. Fixme, this searches the wrong table ... */
+ if (data && !user_mode(regs))
+ fixup = search_dbe_tables(exception_epc(regs));
+
+ if (fixup)
+ action = MIPS_BE_FIXUP;
+
+ if (board_be_handler)
+ action = board_be_handler(regs, fixup != NULL);
+ else
+ mips_cm_error_report();
+
+ switch (action) {
+ case MIPS_BE_DISCARD:
+ goto out;
+ case MIPS_BE_FIXUP:
+ if (fixup) {
+ regs->cp0_epc = fixup->nextinsn;
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Assume it would be too dangerous to continue ...
+ */
+ printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
+ data ? "Data" : "Instruction",
+ field, regs->cp0_epc, field, regs->regs[31]);
+ if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
+ SIGBUS) == NOTIFY_STOP)
+ goto out;
+
+ die_if_kernel("Oops", regs);
+ force_sig(SIGBUS);
+
+out:
+ exception_exit(prev_state);
+}
+
+/*
+ * ll/sc, rdhwr, sync emulation
+ */
+
+#define OPCODE 0xfc000000
+#define BASE 0x03e00000
+#define RT 0x001f0000
+#define OFFSET 0x0000ffff
+#define LL 0xc0000000
+#define SC 0xe0000000
+#define SPEC0 0x00000000
+#define SPEC3 0x7c000000
+#define RD 0x0000f800
+#define FUNC 0x0000003f
+#define SYNC 0x0000000f
+#define RDHWR 0x0000003b
+
+/* microMIPS definitions */
+#define MM_POOL32A_FUNC 0xfc00ffff
+#define MM_RDHWR 0x00006b3c
+#define MM_RS 0x001f0000
+#define MM_RT 0x03e00000
+
+/*
+ * The ll_bit is cleared by r*_switch.S
+ */
+
+unsigned int ll_bit;
+struct task_struct *ll_task;
+
+static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
+{
+ unsigned long value, __user *vaddr;
+ long offset;
+
+ /*
+ * analyse the ll instruction that just caused a ri exception
+ * and put the referenced address to addr.
+ */
+
+ /* sign extend offset */
+ offset = opcode & OFFSET;
+ offset <<= 16;
+ offset >>= 16;
+
+ vaddr = (unsigned long __user *)
+ ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
+
+ if ((unsigned long)vaddr & 3)
+ return SIGBUS;
+ if (get_user(value, vaddr))
+ return SIGSEGV;
+
+ preempt_disable();
+
+ if (ll_task == NULL || ll_task == current) {
+ ll_bit = 1;
+ } else {
+ ll_bit = 0;
+ }
+ ll_task = current;
+
+ preempt_enable();
+
+ regs->regs[(opcode & RT) >> 16] = value;
+
+ return 0;
+}
+
+static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
+{
+ unsigned long __user *vaddr;
+ unsigned long reg;
+ long offset;
+
+ /*
+ * analyse the sc instruction that just caused a ri exception
+ * and put the referenced address to addr.
+ */
+
+ /* sign extend offset */
+ offset = opcode & OFFSET;
+ offset <<= 16;
+ offset >>= 16;
+
+ vaddr = (unsigned long __user *)
+ ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
+ reg = (opcode & RT) >> 16;
+
+ if ((unsigned long)vaddr & 3)
+ return SIGBUS;
+
+ preempt_disable();
+
+ if (ll_bit == 0 || ll_task != current) {
+ regs->regs[reg] = 0;
+ preempt_enable();
+ return 0;
+ }
+
+ preempt_enable();
+
+ if (put_user(regs->regs[reg], vaddr))
+ return SIGSEGV;
+
+ regs->regs[reg] = 1;
+
+ return 0;
+}
+
+/*
+ * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
+ * opcodes are supposed to result in coprocessor unusable exceptions if
+ * executed on ll/sc-less processors. That's the theory. In practice a
+ * few processors such as NEC's VR4100 throw reserved instruction exceptions
+ * instead, so we're doing the emulation thing in both exception handlers.
+ */
+static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
+{
+ if ((opcode & OPCODE) == LL) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, regs, 0);
+ return simulate_ll(regs, opcode);
+ }
+ if ((opcode & OPCODE) == SC) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, regs, 0);
+ return simulate_sc(regs, opcode);
+ }
+
+ return -1; /* Must be something else ... */
+}
+
+/*
+ * Simulate trapping 'rdhwr' instructions to provide user accessible
+ * registers not implemented in hardware.
+ */
+static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
+{
+ struct thread_info *ti = task_thread_info(current);
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, regs, 0);
+ switch (rd) {
+ case MIPS_HWR_CPUNUM: /* CPU number */
+ regs->regs[rt] = smp_processor_id();
+ return 0;
+ case MIPS_HWR_SYNCISTEP: /* SYNCI length */
+ regs->regs[rt] = min(current_cpu_data.dcache.linesz,
+ current_cpu_data.icache.linesz);
+ return 0;
+ case MIPS_HWR_CC: /* Read count register */
+ regs->regs[rt] = read_c0_count();
+ return 0;
+ case MIPS_HWR_CCRES: /* Count register resolution */
+ switch (current_cpu_type()) {
+ case CPU_20KC:
+ case CPU_25KF:
+ regs->regs[rt] = 1;
+ break;
+ default:
+ regs->regs[rt] = 2;
+ }
+ return 0;
+ case MIPS_HWR_ULR: /* Read UserLocal register */
+ regs->regs[rt] = ti->tp_value;
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
+{
+ if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
+ int rd = (opcode & RD) >> 11;
+ int rt = (opcode & RT) >> 16;
+
+ simulate_rdhwr(regs, rd, rt);
+ return 0;
+ }
+
+ /* Not ours. */
+ return -1;
+}
+
+static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
+{
+ if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
+ int rd = (opcode & MM_RS) >> 16;
+ int rt = (opcode & MM_RT) >> 21;
+ simulate_rdhwr(regs, rd, rt);
+ return 0;
+ }
+
+ /* Not ours. */
+ return -1;
+}
+
+static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
+{
+ if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, regs, 0);
+ return 0;
+ }
+
+ return -1; /* Must be something else ... */
+}
+
+/*
+ * Loongson-3 CSR instructions emulation
+ */
+
+#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
+
+#define LWC2 0xc8000000
+#define RS BASE
+#define CSR_OPCODE2 0x00000118
+#define CSR_OPCODE2_MASK 0x000007ff
+#define CSR_FUNC_MASK RT
+#define CSR_FUNC_CPUCFG 0x8
+
+static int simulate_loongson3_cpucfg(struct pt_regs *regs,
+ unsigned int opcode)
+{
+ int op = opcode & OPCODE;
+ int op2 = opcode & CSR_OPCODE2_MASK;
+ int csr_func = (opcode & CSR_FUNC_MASK) >> 16;
+
+ if (op == LWC2 && op2 == CSR_OPCODE2 && csr_func == CSR_FUNC_CPUCFG) {
+ int rd = (opcode & RD) >> 11;
+ int rs = (opcode & RS) >> 21;
+ __u64 sel = regs->regs[rs];
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
+
+ /* Do not emulate on unsupported core models. */
+ preempt_disable();
+ if (!loongson3_cpucfg_emulation_enabled(&current_cpu_data)) {
+ preempt_enable();
+ return -1;
+ }
+ regs->regs[rd] = loongson3_cpucfg_read_synthesized(
+ &current_cpu_data, sel);
+ preempt_enable();
+ return 0;
+ }
+
+ /* Not ours. */
+ return -1;
+}
+#endif /* CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION */
+
+asmlinkage void do_ov(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ die_if_kernel("Integer overflow", regs);
+
+ force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc);
+ exception_exit(prev_state);
+}
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+/*
+ * Send SIGFPE according to FCSR Cause bits, which must have already
+ * been masked against Enable bits. This is impotant as Inexact can
+ * happen together with Overflow or Underflow, and `ptrace' can set
+ * any bits.
+ */
+void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+ struct task_struct *tsk)
+{
+ int si_code = FPE_FLTUNK;
+
+ if (fcr31 & FPU_CSR_INV_X)
+ si_code = FPE_FLTINV;
+ else if (fcr31 & FPU_CSR_DIV_X)
+ si_code = FPE_FLTDIV;
+ else if (fcr31 & FPU_CSR_OVF_X)
+ si_code = FPE_FLTOVF;
+ else if (fcr31 & FPU_CSR_UDF_X)
+ si_code = FPE_FLTUND;
+ else if (fcr31 & FPU_CSR_INE_X)
+ si_code = FPE_FLTRES;
+
+ force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk);
+}
+
+int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
+{
+ int si_code;
+ struct vm_area_struct *vma;
+
+ switch (sig) {
+ case 0:
+ return 0;
+
+ case SIGFPE:
+ force_fcr31_sig(fcr31, fault_addr, current);
+ return 1;
+
+ case SIGBUS:
+ force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
+ return 1;
+
+ case SIGSEGV:
+ mmap_read_lock(current->mm);
+ vma = find_vma(current->mm, (unsigned long)fault_addr);
+ if (vma && (vma->vm_start <= (unsigned long)fault_addr))
+ si_code = SEGV_ACCERR;
+ else
+ si_code = SEGV_MAPERR;
+ mmap_read_unlock(current->mm);
+ force_sig_fault(SIGSEGV, si_code, fault_addr);
+ return 1;
+
+ default:
+ force_sig(sig);
+ return 1;
+ }
+}
+
+static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
+ unsigned long old_epc, unsigned long old_ra)
+{
+ union mips_instruction inst = { .word = opcode };
+ void __user *fault_addr;
+ unsigned long fcr31;
+ int sig;
+
+ /* If it's obviously not an FP instruction, skip it */
+ switch (inst.i_format.opcode) {
+ case cop1_op:
+ case cop1x_op:
+ case lwc1_op:
+ case ldc1_op:
+ case swc1_op:
+ case sdc1_op:
+ break;
+
+ default:
+ return -1;
+ }
+
+ /*
+ * do_ri skipped over the instruction via compute_return_epc, undo
+ * that for the FPU emulator.
+ */
+ regs->cp0_epc = old_epc;
+ regs->regs[31] = old_ra;
+
+ /* Run the emulator */
+ sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+ &fault_addr);
+
+ /*
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
+ */
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
+
+ /* Restore the hardware register state */
+ own_fpu(1);
+
+ /* Send a signal if required. */
+ process_fpemu_return(sig, fault_addr, fcr31);
+
+ return 0;
+}
+
+/*
+ * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
+ */
+asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
+{
+ enum ctx_state prev_state;
+ void __user *fault_addr;
+ int sig;
+
+ prev_state = exception_enter();
+ if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
+ SIGFPE) == NOTIFY_STOP)
+ goto out;
+
+ /* Clear FCSR.Cause before enabling interrupts */
+ write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
+ local_irq_enable();
+
+ die_if_kernel("FP exception in kernel code", regs);
+
+ if (fcr31 & FPU_CSR_UNI_X) {
+ /*
+ * Unimplemented operation exception. If we've got the full
+ * software emulator on-board, let's use it...
+ *
+ * Force FPU to dump state into task/thread context. We're
+ * moving a lot of data here for what is probably a single
+ * instruction, but the alternative is to pre-decode the FP
+ * register operands before invoking the emulator, which seems
+ * a bit extreme for what should be an infrequent event.
+ */
+
+ /* Run the emulator */
+ sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+ &fault_addr);
+
+ /*
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
+ */
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
+
+ /* Restore the hardware register state */
+ own_fpu(1); /* Using the FPU again. */
+ } else {
+ sig = SIGFPE;
+ fault_addr = (void __user *) regs->cp0_epc;
+ }
+
+ /* Send a signal if required. */
+ process_fpemu_return(sig, fault_addr, fcr31);
+
+out:
+ exception_exit(prev_state);
+}
+
+/*
+ * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
+ * emulated more than some threshold number of instructions, force migration to
+ * a "CPU" that has FP support.
+ */
+static void mt_ase_fp_affinity(void)
+{
+#ifdef CONFIG_MIPS_MT_FPAFF
+ if (mt_fpemul_threshold > 0 &&
+ ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
+ /*
+ * If there's no FPU present, or if the application has already
+ * restricted the allowed set to exclude any CPUs with FPUs,
+ * we'll skip the procedure.
+ */
+ if (cpumask_intersects(&current->cpus_mask, &mt_fpu_cpumask)) {
+ cpumask_t tmask;
+
+ current->thread.user_cpus_allowed
+ = current->cpus_mask;
+ cpumask_and(&tmask, &current->cpus_mask,
+ &mt_fpu_cpumask);
+ set_cpus_allowed_ptr(current, &tmask);
+ set_thread_flag(TIF_FPUBOUND);
+ }
+ }
+#endif /* CONFIG_MIPS_MT_FPAFF */
+}
+
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
+ unsigned long old_epc, unsigned long old_ra)
+{
+ return -1;
+}
+
+#endif /* !CONFIG_MIPS_FP_SUPPORT */
+
+void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
+ const char *str)
+{
+ char b[40];
+
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+ if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
+ SIGTRAP) == NOTIFY_STOP)
+ return;
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
+ if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
+ SIGTRAP) == NOTIFY_STOP)
+ return;
+
+ /*
+ * A short test says that IRIX 5.3 sends SIGTRAP for all trap
+ * insns, even for trap and break codes that indicate arithmetic
+ * failures. Weird ...
+ * But should we continue the brokenness??? --macro
+ */
+ switch (code) {
+ case BRK_OVERFLOW:
+ case BRK_DIVZERO:
+ scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
+ die_if_kernel(b, regs);
+ force_sig_fault(SIGFPE,
+ code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF,
+ (void __user *) regs->cp0_epc);
+ break;
+ case BRK_BUG:
+ die_if_kernel("Kernel bug detected", regs);
+ force_sig(SIGTRAP);
+ break;
+ case BRK_MEMU:
+ /*
+ * This breakpoint code is used by the FPU emulator to retake
+ * control of the CPU after executing the instruction from the
+ * delay slot of an emulated branch.
+ *
+ * Terminate if exception was recognized as a delay slot return
+ * otherwise handle as normal.
+ */
+ if (do_dsemulret(regs))
+ return;
+
+ die_if_kernel("Math emu break/trap", regs);
+ force_sig(SIGTRAP);
+ break;
+ default:
+ scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
+ die_if_kernel(b, regs);
+ if (si_code) {
+ force_sig_fault(SIGTRAP, si_code, NULL);
+ } else {
+ force_sig(SIGTRAP);
+ }
+ }
+}
+
+asmlinkage void do_bp(struct pt_regs *regs)
+{
+ unsigned long epc = msk_isa16_mode(exception_epc(regs));
+ unsigned int opcode, bcode;
+ enum ctx_state prev_state;
+ mm_segment_t seg;
+
+ seg = get_fs();
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+
+ prev_state = exception_enter();
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
+ if (get_isa16_mode(regs->cp0_epc)) {
+ u16 instr[2];
+
+ if (__get_user(instr[0], (u16 __user *)epc))
+ goto out_sigsegv;
+
+ if (!cpu_has_mmips) {
+ /* MIPS16e mode */
+ bcode = (instr[0] >> 5) & 0x3f;
+ } else if (mm_insn_16bit(instr[0])) {
+ /* 16-bit microMIPS BREAK */
+ bcode = instr[0] & 0xf;
+ } else {
+ /* 32-bit microMIPS BREAK */
+ if (__get_user(instr[1], (u16 __user *)(epc + 2)))
+ goto out_sigsegv;
+ opcode = (instr[0] << 16) | instr[1];
+ bcode = (opcode >> 6) & ((1 << 20) - 1);
+ }
+ } else {
+ if (__get_user(opcode, (unsigned int __user *)epc))
+ goto out_sigsegv;
+ bcode = (opcode >> 6) & ((1 << 20) - 1);
+ }
+
+ /*
+ * There is the ancient bug in the MIPS assemblers that the break
+ * code starts left to bit 16 instead to bit 6 in the opcode.
+ * Gas is bug-compatible, but not always, grrr...
+ * We handle both cases with a simple heuristics. --macro
+ */
+ if (bcode >= (1 << 10))
+ bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
+
+ /*
+ * notify the kprobe handlers, if instruction is likely to
+ * pertain to them.
+ */
+ switch (bcode) {
+ case BRK_UPROBE:
+ if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ case BRK_UPROBE_XOL:
+ if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ case BRK_KPROBE_BP:
+ if (notify_die(DIE_BREAK, "debug", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ case BRK_KPROBE_SSTEPBP:
+ if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ default:
+ break;
+ }
+
+ do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
+
+out:
+ set_fs(seg);
+ exception_exit(prev_state);
+ return;
+
+out_sigsegv:
+ force_sig(SIGSEGV);
+ goto out;
+}
+
+asmlinkage void do_tr(struct pt_regs *regs)
+{
+ u32 opcode, tcode = 0;
+ enum ctx_state prev_state;
+ u16 instr[2];
+ mm_segment_t seg;
+ unsigned long epc = msk_isa16_mode(exception_epc(regs));
+
+ seg = get_fs();
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+
+ prev_state = exception_enter();
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
+ if (get_isa16_mode(regs->cp0_epc)) {
+ if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
+ __get_user(instr[1], (u16 __user *)(epc + 2)))
+ goto out_sigsegv;
+ opcode = (instr[0] << 16) | instr[1];
+ /* Immediate versions don't provide a code. */
+ if (!(opcode & OPCODE))
+ tcode = (opcode >> 12) & ((1 << 4) - 1);
+ } else {
+ if (__get_user(opcode, (u32 __user *)epc))
+ goto out_sigsegv;
+ /* Immediate versions don't provide a code. */
+ if (!(opcode & OPCODE))
+ tcode = (opcode >> 6) & ((1 << 10) - 1);
+ }
+
+ do_trap_or_bp(regs, tcode, 0, "Trap");
+
+out:
+ set_fs(seg);
+ exception_exit(prev_state);
+ return;
+
+out_sigsegv:
+ force_sig(SIGSEGV);
+ goto out;
+}
+
+asmlinkage void do_ri(struct pt_regs *regs)
+{
+ unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
+ unsigned long old_epc = regs->cp0_epc;
+ unsigned long old31 = regs->regs[31];
+ enum ctx_state prev_state;
+ unsigned int opcode = 0;
+ int status = -1;
+
+ /*
+ * Avoid any kernel code. Just emulate the R2 instruction
+ * as quickly as possible.
+ */
+ if (mipsr2_emulation && cpu_has_mips_r6 &&
+ likely(user_mode(regs)) &&
+ likely(get_user(opcode, epc) >= 0)) {
+ unsigned long fcr31 = 0;
+
+ status = mipsr2_decoder(regs, opcode, &fcr31);
+ switch (status) {
+ case 0:
+ case SIGEMT:
+ return;
+ case SIGILL:
+ goto no_r2_instr;
+ default:
+ process_fpemu_return(status,
+ &current->thread.cp0_baduaddr,
+ fcr31);
+ return;
+ }
+ }
+
+no_r2_instr:
+
+ prev_state = exception_enter();
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
+
+ if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
+ SIGILL) == NOTIFY_STOP)
+ goto out;
+
+ die_if_kernel("Reserved instruction in kernel code", regs);
+
+ if (unlikely(compute_return_epc(regs) < 0))
+ goto out;
+
+ if (!get_isa16_mode(regs->cp0_epc)) {
+ if (unlikely(get_user(opcode, epc) < 0))
+ status = SIGSEGV;
+
+ if (!cpu_has_llsc && status < 0)
+ status = simulate_llsc(regs, opcode);
+
+ if (status < 0)
+ status = simulate_rdhwr_normal(regs, opcode);
+
+ if (status < 0)
+ status = simulate_sync(regs, opcode);
+
+ if (status < 0)
+ status = simulate_fp(regs, opcode, old_epc, old31);
+
+#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
+ if (status < 0)
+ status = simulate_loongson3_cpucfg(regs, opcode);
+#endif
+ } else if (cpu_has_mmips) {
+ unsigned short mmop[2] = { 0 };
+
+ if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
+ status = SIGSEGV;
+ if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
+ status = SIGSEGV;
+ opcode = mmop[0];
+ opcode = (opcode << 16) | mmop[1];
+
+ if (status < 0)
+ status = simulate_rdhwr_mm(regs, opcode);
+ }
+
+ if (status < 0)
+ status = SIGILL;
+
+ if (unlikely(status > 0)) {
+ regs->cp0_epc = old_epc; /* Undo skip-over. */
+ regs->regs[31] = old31;
+ force_sig(status);
+ }
+
+out:
+ exception_exit(prev_state);
+}
+
+/*
+ * No lock; only written during early bootup by CPU 0.
+ */
+static RAW_NOTIFIER_HEAD(cu2_chain);
+
+int __ref register_cu2_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&cu2_chain, nb);
+}
+
+int cu2_notifier_call_chain(unsigned long val, void *v)
+{
+ return raw_notifier_call_chain(&cu2_chain, val, v);
+}
+
+static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
+ void *data)
+{
+ struct pt_regs *regs = data;
+
+ die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
+ "instruction", regs);
+ force_sig(SIGILL);
+
+ return NOTIFY_OK;
+}
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+static int enable_restore_fp_context(int msa)
+{
+ int err, was_fpu_owner, prior_msa;
+ bool first_fp;
+
+ /* Initialize context if it hasn't been used already */
+ first_fp = init_fp_ctx(current);
+
+ if (first_fp) {
+ preempt_disable();
+ err = own_fpu_inatomic(1);
+ if (msa && !err) {
+ enable_msa();
+ /*
+ * with MSA enabled, userspace can see MSACSR
+ * and MSA regs, but the values in them are from
+ * other task before current task, restore them
+ * from saved fp/msa context
+ */
+ write_msa_csr(current->thread.fpu.msacsr);
+ /*
+ * own_fpu_inatomic(1) just restore low 64bit,
+ * fix the high 64bit
+ */
+ init_msa_upper();
+ set_thread_flag(TIF_USEDMSA);
+ set_thread_flag(TIF_MSA_CTX_LIVE);
+ }
+ preempt_enable();
+ return err;
+ }
+
+ /*
+ * This task has formerly used the FP context.
+ *
+ * If this thread has no live MSA vector context then we can simply
+ * restore the scalar FP context. If it has live MSA vector context
+ * (that is, it has or may have used MSA since last performing a
+ * function call) then we'll need to restore the vector context. This
+ * applies even if we're currently only executing a scalar FP
+ * instruction. This is because if we were to later execute an MSA
+ * instruction then we'd either have to:
+ *
+ * - Restore the vector context & clobber any registers modified by
+ * scalar FP instructions between now & then.
+ *
+ * or
+ *
+ * - Not restore the vector context & lose the most significant bits
+ * of all vector registers.
+ *
+ * Neither of those options is acceptable. We cannot restore the least
+ * significant bits of the registers now & only restore the most
+ * significant bits later because the most significant bits of any
+ * vector registers whose aliased FP register is modified now will have
+ * been zeroed. We'd have no way to know that when restoring the vector
+ * context & thus may load an outdated value for the most significant
+ * bits of a vector register.
+ */
+ if (!msa && !thread_msa_context_live())
+ return own_fpu(1);
+
+ /*
+ * This task is using or has previously used MSA. Thus we require
+ * that Status.FR == 1.
+ */
+ preempt_disable();
+ was_fpu_owner = is_fpu_owner();
+ err = own_fpu_inatomic(0);
+ if (err)
+ goto out;
+
+ enable_msa();
+ write_msa_csr(current->thread.fpu.msacsr);
+ set_thread_flag(TIF_USEDMSA);
+
+ /*
+ * If this is the first time that the task is using MSA and it has
+ * previously used scalar FP in this time slice then we already nave
+ * FP context which we shouldn't clobber. We do however need to clear
+ * the upper 64b of each vector register so that this task has no
+ * opportunity to see data left behind by another.
+ */
+ prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
+ if (!prior_msa && was_fpu_owner) {
+ init_msa_upper();
+
+ goto out;
+ }
+
+ if (!prior_msa) {
+ /*
+ * Restore the least significant 64b of each vector register
+ * from the existing scalar FP context.
+ */
+ _restore_fp(current);
+
+ /*
+ * The task has not formerly used MSA, so clear the upper 64b
+ * of each vector register such that it cannot see data left
+ * behind by another task.
+ */
+ init_msa_upper();
+ } else {
+ /* We need to restore the vector context. */
+ restore_msa(current);
+
+ /* Restore the scalar FP control & status register */
+ if (!was_fpu_owner)
+ write_32bit_cp1_register(CP1_STATUS,
+ current->thread.fpu.fcr31);
+ }
+
+out:
+ preempt_enable();
+
+ return 0;
+}
+
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+static int enable_restore_fp_context(int msa)
+{
+ return SIGILL;
+}
+
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
+asmlinkage void do_cpu(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+ unsigned int __user *epc;
+ unsigned long old_epc, old31;
+ unsigned int opcode;
+ unsigned int cpid;
+ int status;
+
+ prev_state = exception_enter();
+ cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
+
+ if (cpid != 2)
+ die_if_kernel("do_cpu invoked from kernel context!", regs);
+
+ switch (cpid) {
+ case 0:
+ epc = (unsigned int __user *)exception_epc(regs);
+ old_epc = regs->cp0_epc;
+ old31 = regs->regs[31];
+ opcode = 0;
+ status = -1;
+
+ if (unlikely(compute_return_epc(regs) < 0))
+ break;
+
+ if (!get_isa16_mode(regs->cp0_epc)) {
+ if (unlikely(get_user(opcode, epc) < 0))
+ status = SIGSEGV;
+
+ if (!cpu_has_llsc && status < 0)
+ status = simulate_llsc(regs, opcode);
+ }
+
+ if (status < 0)
+ status = SIGILL;
+
+ if (unlikely(status > 0)) {
+ regs->cp0_epc = old_epc; /* Undo skip-over. */
+ regs->regs[31] = old31;
+ force_sig(status);
+ }
+
+ break;
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case 3:
+ /*
+ * The COP3 opcode space and consequently the CP0.Status.CU3
+ * bit and the CP0.Cause.CE=3 encoding have been removed as
+ * of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs
+ * up the space has been reused for COP1X instructions, that
+ * are enabled by the CP0.Status.CU1 bit and consequently
+ * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
+ * exceptions. Some FPU-less processors that implement one
+ * of these ISAs however use this code erroneously for COP1X
+ * instructions. Therefore we redirect this trap to the FP
+ * emulator too.
+ */
+ if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
+ force_sig(SIGILL);
+ break;
+ }
+ fallthrough;
+ case 1: {
+ void __user *fault_addr;
+ unsigned long fcr31;
+ int err, sig;
+
+ err = enable_restore_fp_context(0);
+
+ if (raw_cpu_has_fpu && !err)
+ break;
+
+ sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
+ &fault_addr);
+
+ /*
+ * We can't allow the emulated instruction to leave
+ * any enabled Cause bits set in $fcr31.
+ */
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
+
+ /* Send a signal if required. */
+ if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
+ mt_ase_fp_affinity();
+
+ break;
+ }
+#else /* CONFIG_MIPS_FP_SUPPORT */
+ case 1:
+ case 3:
+ force_sig(SIGILL);
+ break;
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
+ case 2:
+ raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
+ break;
+ }
+
+ exception_exit(prev_state);
+}
+
+asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
+{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
+ if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
+ current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
+ goto out;
+
+ /* Clear MSACSR.Cause before enabling interrupts */
+ write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
+ local_irq_enable();
+
+ die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
+ force_sig(SIGFPE);
+out:
+ exception_exit(prev_state);
+}
+
+asmlinkage void do_msa(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+ int err;
+
+ prev_state = exception_enter();
+
+ if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
+ force_sig(SIGILL);
+ goto out;
+ }
+
+ die_if_kernel("do_msa invoked from kernel context!", regs);
+
+ err = enable_restore_fp_context(1);
+ if (err)
+ force_sig(SIGILL);
+out:
+ exception_exit(prev_state);
+}
+
+asmlinkage void do_mdmx(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ force_sig(SIGILL);
+ exception_exit(prev_state);
+}
+
+/*
+ * Called with interrupts disabled.
+ */
+asmlinkage void do_watch(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ /*
+ * Clear WP (bit 22) bit of cause register so we don't loop
+ * forever.
+ */
+ clear_c0_cause(CAUSEF_WP);
+
+ /*
+ * If the current thread has the watch registers loaded, save
+ * their values and send SIGTRAP. Otherwise another thread
+ * left the registers set, clear them and continue.
+ */
+ if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
+ mips_read_watch_registers();
+ local_irq_enable();
+ force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL);
+ } else {
+ mips_clear_watch_registers();
+ local_irq_enable();
+ }
+ exception_exit(prev_state);
+}
+
+asmlinkage void do_mcheck(struct pt_regs *regs)
+{
+ int multi_match = regs->cp0_status & ST0_TS;
+ enum ctx_state prev_state;
+ mm_segment_t old_fs = get_fs();
+
+ prev_state = exception_enter();
+ show_regs(regs);
+
+ if (multi_match) {
+ dump_tlb_regs();
+ pr_info("\n");
+ dump_tlb_all();
+ }
+
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+
+ show_code((unsigned int __user *) regs->cp0_epc);
+
+ set_fs(old_fs);
+
+ /*
+ * Some chips may have other causes of machine check (e.g. SB1
+ * graduation timer)
+ */
+ panic("Caught Machine Check exception - %scaused by multiple "
+ "matching entries in the TLB.",
+ (multi_match) ? "" : "not ");
+}
+
+asmlinkage void do_mt(struct pt_regs *regs)
+{
+ int subcode;
+
+ subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
+ >> VPECONTROL_EXCPT_SHIFT;
+ switch (subcode) {
+ case 0:
+ printk(KERN_DEBUG "Thread Underflow\n");
+ break;
+ case 1:
+ printk(KERN_DEBUG "Thread Overflow\n");
+ break;
+ case 2:
+ printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
+ break;
+ case 3:
+ printk(KERN_DEBUG "Gating Storage Exception\n");
+ break;
+ case 4:
+ printk(KERN_DEBUG "YIELD Scheduler Exception\n");
+ break;
+ case 5:
+ printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
+ break;
+ default:
+ printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
+ subcode);
+ break;
+ }
+ die_if_kernel("MIPS MT Thread exception in kernel", regs);
+
+ force_sig(SIGILL);
+}
+
+
+asmlinkage void do_dsp(struct pt_regs *regs)
+{
+ if (cpu_has_dsp)
+ panic("Unexpected DSP exception");
+
+ force_sig(SIGILL);
+}
+
+asmlinkage void do_reserved(struct pt_regs *regs)
+{
+ /*
+ * Game over - no way to handle this if it ever occurs. Most probably
+ * caused by a new unknown cpu type or after another deadly
+ * hard/software error.
+ */
+ show_regs(regs);
+ panic("Caught reserved exception %ld - should not happen.",
+ (regs->cp0_cause & 0x7f) >> 2);
+}
+
+static int __initdata l1parity = 1;
+static int __init nol1parity(char *s)
+{
+ l1parity = 0;
+ return 1;
+}
+__setup("nol1par", nol1parity);
+static int __initdata l2parity = 1;
+static int __init nol2parity(char *s)
+{
+ l2parity = 0;
+ return 1;
+}
+__setup("nol2par", nol2parity);
+
+/*
+ * Some MIPS CPUs can enable/disable for cache parity detection, but do
+ * it different ways.
+ */
+static inline __init void parity_protection_init(void)
+{
+#define ERRCTL_PE 0x80000000
+#define ERRCTL_L2P 0x00800000
+
+ if (mips_cm_revision() >= CM_REV_CM3) {
+ ulong gcr_ectl, cp0_ectl;
+
+ /*
+ * With CM3 systems we need to ensure that the L1 & L2
+ * parity enables are set to the same value, since this
+ * is presumed by the hardware engineers.
+ *
+ * If the user disabled either of L1 or L2 ECC checking,
+ * disable both.
+ */
+ l1parity &= l2parity;
+ l2parity &= l1parity;
+
+ /* Probe L1 ECC support */
+ cp0_ectl = read_c0_ecc();
+ write_c0_ecc(cp0_ectl | ERRCTL_PE);
+ back_to_back_c0_hazard();
+ cp0_ectl = read_c0_ecc();
+
+ /* Probe L2 ECC support */
+ gcr_ectl = read_gcr_err_control();
+
+ if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
+ !(cp0_ectl & ERRCTL_PE)) {
+ /*
+ * One of L1 or L2 ECC checking isn't supported,
+ * so we cannot enable either.
+ */
+ l1parity = l2parity = 0;
+ }
+
+ /* Configure L1 ECC checking */
+ if (l1parity)
+ cp0_ectl |= ERRCTL_PE;
+ else
+ cp0_ectl &= ~ERRCTL_PE;
+ write_c0_ecc(cp0_ectl);
+ back_to_back_c0_hazard();
+ WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
+
+ /* Configure L2 ECC checking */
+ if (l2parity)
+ gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
+ else
+ gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
+ write_gcr_err_control(gcr_ectl);
+ gcr_ectl = read_gcr_err_control();
+ gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
+ WARN_ON(!!gcr_ectl != l2parity);
+
+ pr_info("Cache parity protection %sabled\n",
+ l1parity ? "en" : "dis");
+ return;
+ }
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ case CPU_34K:
+ case CPU_74K:
+ case CPU_1004K:
+ case CPU_1074K:
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ case CPU_QEMU_GENERIC:
+ case CPU_P6600:
+ {
+ unsigned long errctl;
+ unsigned int l1parity_present, l2parity_present;
+
+ errctl = read_c0_ecc();
+ errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
+
+ /* probe L1 parity support */
+ write_c0_ecc(errctl | ERRCTL_PE);
+ back_to_back_c0_hazard();
+ l1parity_present = (read_c0_ecc() & ERRCTL_PE);
+
+ /* probe L2 parity support */
+ write_c0_ecc(errctl|ERRCTL_L2P);
+ back_to_back_c0_hazard();
+ l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
+
+ if (l1parity_present && l2parity_present) {
+ if (l1parity)
+ errctl |= ERRCTL_PE;
+ if (l1parity ^ l2parity)
+ errctl |= ERRCTL_L2P;
+ } else if (l1parity_present) {
+ if (l1parity)
+ errctl |= ERRCTL_PE;
+ } else if (l2parity_present) {
+ if (l2parity)
+ errctl |= ERRCTL_L2P;
+ } else {
+ /* No parity available */
+ }
+
+ printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
+
+ write_c0_ecc(errctl);
+ back_to_back_c0_hazard();
+ errctl = read_c0_ecc();
+ printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
+
+ if (l1parity_present)
+ printk(KERN_INFO "Cache parity protection %sabled\n",
+ (errctl & ERRCTL_PE) ? "en" : "dis");
+
+ if (l2parity_present) {
+ if (l1parity_present && l1parity)
+ errctl ^= ERRCTL_L2P;
+ printk(KERN_INFO "L2 cache parity protection %sabled\n",
+ (errctl & ERRCTL_L2P) ? "en" : "dis");
+ }
+ }
+ break;
+
+ case CPU_5KC:
+ case CPU_5KE:
+ case CPU_LOONGSON32:
+ write_c0_ecc(0x80000000);
+ back_to_back_c0_hazard();
+ /* Set the PE bit (bit 31) in the c0_errctl register. */
+ printk(KERN_INFO "Cache parity protection %sabled\n",
+ (read_c0_ecc() & 0x80000000) ? "en" : "dis");
+ break;
+ case CPU_20KC:
+ case CPU_25KF:
+ /* Clear the DE bit (bit 16) in the c0_status register. */
+ printk(KERN_INFO "Enable cache parity protection for "
+ "MIPS 20KC/25KF CPUs.\n");
+ clear_c0_status(ST0_DE);
+ break;
+ default:
+ break;
+ }
+}
+
+asmlinkage void cache_parity_error(void)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned int reg_val;
+
+ /* For the moment, report the problem and hang. */
+ printk("Cache error exception:\n");
+ printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
+ reg_val = read_c0_cacheerr();
+ printk("c0_cacheerr == %08x\n", reg_val);
+
+ printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
+ reg_val & (1<<30) ? "secondary" : "primary",
+ reg_val & (1<<31) ? "data" : "insn");
+ if ((cpu_has_mips_r2_r6) &&
+ ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
+ pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
+ reg_val & (1<<29) ? "ED " : "",
+ reg_val & (1<<28) ? "ET " : "",
+ reg_val & (1<<27) ? "ES " : "",
+ reg_val & (1<<26) ? "EE " : "",
+ reg_val & (1<<25) ? "EB " : "",
+ reg_val & (1<<24) ? "EI " : "",
+ reg_val & (1<<23) ? "E1 " : "",
+ reg_val & (1<<22) ? "E0 " : "");
+ } else {
+ pr_err("Error bits: %s%s%s%s%s%s%s\n",
+ reg_val & (1<<29) ? "ED " : "",
+ reg_val & (1<<28) ? "ET " : "",
+ reg_val & (1<<26) ? "EE " : "",
+ reg_val & (1<<25) ? "EB " : "",
+ reg_val & (1<<24) ? "EI " : "",
+ reg_val & (1<<23) ? "E1 " : "",
+ reg_val & (1<<22) ? "E0 " : "");
+ }
+ printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
+
+#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
+ if (reg_val & (1<<22))
+ printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
+
+ if (reg_val & (1<<23))
+ printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
+#endif
+
+ panic("Can't handle the cache error!");
+}
+
+asmlinkage void do_ftlb(void)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned int reg_val;
+
+ /* For the moment, report the problem and hang. */
+ if ((cpu_has_mips_r2_r6) &&
+ (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
+ ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
+ pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
+ read_c0_ecc());
+ pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
+ reg_val = read_c0_cacheerr();
+ pr_err("c0_cacheerr == %08x\n", reg_val);
+
+ if ((reg_val & 0xc0000000) == 0xc0000000) {
+ pr_err("Decoded c0_cacheerr: FTLB parity error\n");
+ } else {
+ pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
+ reg_val & (1<<30) ? "secondary" : "primary",
+ reg_val & (1<<31) ? "data" : "insn");
+ }
+ } else {
+ pr_err("FTLB error exception\n");
+ }
+ /* Just print the cacheerr bits for now */
+ cache_parity_error();
+}
+
+asmlinkage void do_gsexc(struct pt_regs *regs, u32 diag1)
+{
+ u32 exccode = (diag1 & LOONGSON_DIAG1_EXCCODE) >>
+ LOONGSON_DIAG1_EXCCODE_SHIFT;
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+
+ switch (exccode) {
+ case 0x08:
+ /* Undocumented exception, will trigger on certain
+ * also-undocumented instructions accessible from userspace.
+ * Processor state is not otherwise corrupted, but currently
+ * we don't know how to proceed. Maybe there is some
+ * undocumented control flag to enable the instructions?
+ */
+ force_sig(SIGILL);
+ break;
+
+ default:
+ /* None of the other exceptions, documented or not, have
+ * further details given; none are encountered in the wild
+ * either. Panic in case some of them turn out to be fatal.
+ */
+ show_regs(regs);
+ panic("Unhandled Loongson exception - GSCause = %08x", diag1);
+ }
+
+ exception_exit(prev_state);
+}
+
+/*
+ * SDBBP EJTAG debug exception handler.
+ * We skip the instruction and return to the next instruction.
+ */
+void ejtag_exception_handler(struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned long depc, old_epc, old_ra;
+ unsigned int debug;
+
+ printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
+ depc = read_c0_depc();
+ debug = read_c0_debug();
+ printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
+ if (debug & 0x80000000) {
+ /*
+ * In branch delay slot.
+ * We cheat a little bit here and use EPC to calculate the
+ * debug return address (DEPC). EPC is restored after the
+ * calculation.
+ */
+ old_epc = regs->cp0_epc;
+ old_ra = regs->regs[31];
+ regs->cp0_epc = depc;
+ compute_return_epc(regs);
+ depc = regs->cp0_epc;
+ regs->cp0_epc = old_epc;
+ regs->regs[31] = old_ra;
+ } else
+ depc += 4;
+ write_c0_depc(depc);
+
+#if 0
+ printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
+ write_c0_debug(debug | 0x100);
+#endif
+}
+
+/*
+ * NMI exception handler.
+ * No lock; only written during early bootup by CPU 0.
+ */
+static RAW_NOTIFIER_HEAD(nmi_chain);
+
+int register_nmi_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&nmi_chain, nb);
+}
+
+void __noreturn nmi_exception_handler(struct pt_regs *regs)
+{
+ char str[100];
+
+ nmi_enter();
+ raw_notifier_call_chain(&nmi_chain, 0, regs);
+ bust_spinlocks(1);
+ snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
+ smp_processor_id(), regs->cp0_epc);
+ regs->cp0_epc = read_c0_errorepc();
+ die(str, regs);
+ nmi_exit();
+}
+
+#define VECTORSPACING 0x100 /* for EI/VI mode */
+
+unsigned long ebase;
+EXPORT_SYMBOL_GPL(ebase);
+unsigned long exception_handlers[32];
+unsigned long vi_handlers[64];
+
+void __init *set_except_vector(int n, void *addr)
+{
+ unsigned long handler = (unsigned long) addr;
+ unsigned long old_handler;
+
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * Only the TLB handlers are cache aligned with an even
+ * address. All other handlers are on an odd address and
+ * require no modification. Otherwise, MIPS32 mode will
+ * be entered when handling any TLB exceptions. That
+ * would be bad...since we must stay in microMIPS mode.
+ */
+ if (!(handler & 0x1))
+ handler |= 1;
+#endif
+ old_handler = xchg(&exception_handlers[n], handler);
+
+ if (n == 0 && cpu_has_divec) {
+#ifdef CONFIG_CPU_MICROMIPS
+ unsigned long jump_mask = ~((1 << 27) - 1);
+#else
+ unsigned long jump_mask = ~((1 << 28) - 1);
+#endif
+ u32 *buf = (u32 *)(ebase + 0x200);
+ unsigned int k0 = 26;
+ if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
+ uasm_i_j(&buf, handler & ~jump_mask);
+ uasm_i_nop(&buf);
+ } else {
+ UASM_i_LA(&buf, k0, handler);
+ uasm_i_jr(&buf, k0);
+ uasm_i_nop(&buf);
+ }
+ local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
+ }
+ return (void *)old_handler;
+}
+
+static void do_default_vi(void)
+{
+ show_regs(get_irq_regs());
+ panic("Caught unexpected vectored interrupt.");
+}
+
+static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
+{
+ unsigned long handler;
+ unsigned long old_handler = vi_handlers[n];
+ int srssets = current_cpu_data.srsets;
+ u16 *h;
+ unsigned char *b;
+
+ BUG_ON(!cpu_has_veic && !cpu_has_vint);
+
+ if (addr == NULL) {
+ handler = (unsigned long) do_default_vi;
+ srs = 0;
+ } else
+ handler = (unsigned long) addr;
+ vi_handlers[n] = handler;
+
+ b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
+
+ if (srs >= srssets)
+ panic("Shadow register set %d not supported", srs);
+
+ if (cpu_has_veic) {
+ if (board_bind_eic_interrupt)
+ board_bind_eic_interrupt(n, srs);
+ } else if (cpu_has_vint) {
+ /* SRSMap is only defined if shadow sets are implemented */
+ if (srssets > 1)
+ change_c0_srsmap(0xf << n*4, srs << n*4);
+ }
+
+ if (srs == 0) {
+ /*
+ * If no shadow set is selected then use the default handler
+ * that does normal register saving and standard interrupt exit
+ */
+ extern const u8 except_vec_vi[], except_vec_vi_lui[];
+ extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
+ extern const u8 rollback_except_vec_vi[];
+ const u8 *vec_start = using_rollback_handler() ?
+ rollback_except_vec_vi : except_vec_vi;
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
+ const int lui_offset = except_vec_vi_lui - vec_start + 2;
+ const int ori_offset = except_vec_vi_ori - vec_start + 2;
+#else
+ const int lui_offset = except_vec_vi_lui - vec_start;
+ const int ori_offset = except_vec_vi_ori - vec_start;
+#endif
+ const int handler_len = except_vec_vi_end - vec_start;
+
+ if (handler_len > VECTORSPACING) {
+ /*
+ * Sigh... panicing won't help as the console
+ * is probably not configured :(
+ */
+ panic("VECTORSPACING too small");
+ }
+
+ set_handler(((unsigned long)b - ebase), vec_start,
+#ifdef CONFIG_CPU_MICROMIPS
+ (handler_len - 1));
+#else
+ handler_len);
+#endif
+ h = (u16 *)(b + lui_offset);
+ *h = (handler >> 16) & 0xffff;
+ h = (u16 *)(b + ori_offset);
+ *h = (handler & 0xffff);
+ local_flush_icache_range((unsigned long)b,
+ (unsigned long)(b+handler_len));
+ }
+ else {
+ /*
+ * In other cases jump directly to the interrupt handler. It
+ * is the handler's responsibility to save registers if required
+ * (eg hi/lo) and return from the exception using "eret".
+ */
+ u32 insn;
+
+ h = (u16 *)b;
+ /* j handler */
+#ifdef CONFIG_CPU_MICROMIPS
+ insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
+#else
+ insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
+#endif
+ h[0] = (insn >> 16) & 0xffff;
+ h[1] = insn & 0xffff;
+ h[2] = 0;
+ h[3] = 0;
+ local_flush_icache_range((unsigned long)b,
+ (unsigned long)(b+8));
+ }
+
+ return (void *)old_handler;
+}
+
+void *set_vi_handler(int n, vi_handler_t addr)
+{
+ return set_vi_srs_handler(n, addr, 0);
+}
+
+extern void tlb_init(void);
+
+/*
+ * Timer interrupt
+ */
+int cp0_compare_irq;
+EXPORT_SYMBOL_GPL(cp0_compare_irq);
+int cp0_compare_irq_shift;
+
+/*
+ * Performance counter IRQ or -1 if shared with timer
+ */
+int cp0_perfcount_irq;
+EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
+
+/*
+ * Fast debug channel IRQ or -1 if not present
+ */
+int cp0_fdc_irq;
+EXPORT_SYMBOL_GPL(cp0_fdc_irq);
+
+static int noulri;
+
+static int __init ulri_disable(char *s)
+{
+ pr_info("Disabling ulri\n");
+ noulri = 1;
+
+ return 1;
+}
+__setup("noulri", ulri_disable);
+
+/* configure STATUS register */
+static void configure_status(void)
+{
+ /*
+ * Disable coprocessors and select 32-bit or 64-bit addressing
+ * and the 16/32 or 32/32 FPR register model. Reset the BEV
+ * flag that some firmware may have left set and the TS bit (for
+ * IP27). Set XX for ISA IV code to work.
+ */
+ unsigned int status_set = ST0_KERNEL_CUMASK;
+#ifdef CONFIG_64BIT
+ status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
+#endif
+ if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
+ status_set |= ST0_XX;
+ if (cpu_has_dsp)
+ status_set |= ST0_MX;
+
+ change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
+ status_set);
+ back_to_back_c0_hazard();
+}
+
+unsigned int hwrena;
+EXPORT_SYMBOL_GPL(hwrena);
+
+/* configure HWRENA register */
+static void configure_hwrena(void)
+{
+ hwrena = cpu_hwrena_impl_bits;
+
+ if (cpu_has_mips_r2_r6)
+ hwrena |= MIPS_HWRENA_CPUNUM |
+ MIPS_HWRENA_SYNCISTEP |
+ MIPS_HWRENA_CC |
+ MIPS_HWRENA_CCRES;
+
+ if (!noulri && cpu_has_userlocal)
+ hwrena |= MIPS_HWRENA_ULR;
+
+ if (hwrena)
+ write_c0_hwrena(hwrena);
+}
+
+static void configure_exception_vector(void)
+{
+ if (cpu_has_mips_r2_r6) {
+ unsigned long sr = set_c0_status(ST0_BEV);
+ /* If available, use WG to set top bits of EBASE */
+ if (cpu_has_ebase_wg) {
+#ifdef CONFIG_64BIT
+ write_c0_ebase_64(ebase | MIPS_EBASE_WG);
+#else
+ write_c0_ebase(ebase | MIPS_EBASE_WG);
+#endif
+ }
+ write_c0_ebase(ebase);
+ write_c0_status(sr);
+ }
+ if (cpu_has_veic || cpu_has_vint) {
+ /* Setting vector spacing enables EI/VI mode */
+ change_c0_intctl(0x3e0, VECTORSPACING);
+ }
+ if (cpu_has_divec) {
+ if (cpu_has_mipsmt) {
+ unsigned int vpflags = dvpe();
+ set_c0_cause(CAUSEF_IV);
+ evpe(vpflags);
+ } else
+ set_c0_cause(CAUSEF_IV);
+ }
+}
+
+void per_cpu_trap_init(bool is_boot_cpu)
+{
+ unsigned int cpu = smp_processor_id();
+
+ configure_status();
+ configure_hwrena();
+
+ configure_exception_vector();
+
+ /*
+ * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
+ *
+ * o read IntCtl.IPTI to determine the timer interrupt
+ * o read IntCtl.IPPCI to determine the performance counter interrupt
+ * o read IntCtl.IPFDC to determine the fast debug channel interrupt
+ */
+ if (cpu_has_mips_r2_r6) {
+ cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
+ cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
+ cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
+ cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
+ if (!cp0_fdc_irq)
+ cp0_fdc_irq = -1;
+
+ } else {
+ cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
+ cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
+ cp0_perfcount_irq = -1;
+ cp0_fdc_irq = -1;
+ }
+
+ if (cpu_has_mmid)
+ cpu_data[cpu].asid_cache = 0;
+ else if (!cpu_data[cpu].asid_cache)
+ cpu_data[cpu].asid_cache = asid_first_version(cpu);
+
+ mmgrab(&init_mm);
+ current->active_mm = &init_mm;
+ BUG_ON(current->mm);
+ enter_lazy_tlb(&init_mm, current);
+
+ /* Boot CPU's cache setup in setup_arch(). */
+ if (!is_boot_cpu)
+ cpu_cache_init();
+ tlb_init();
+ TLBMISS_HANDLER_SETUP();
+}
+
+/* Install CPU exception handler */
+void set_handler(unsigned long offset, const void *addr, unsigned long size)
+{
+#ifdef CONFIG_CPU_MICROMIPS
+ memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
+#else
+ memcpy((void *)(ebase + offset), addr, size);
+#endif
+ local_flush_icache_range(ebase + offset, ebase + offset + size);
+}
+
+static const char panic_null_cerr[] =
+ "Trying to set NULL cache error exception handler\n";
+
+/*
+ * Install uncached CPU exception handler.
+ * This is suitable only for the cache error exception which is the only
+ * exception handler that is being run uncached.
+ */
+void set_uncached_handler(unsigned long offset, void *addr,
+ unsigned long size)
+{
+ unsigned long uncached_ebase = CKSEG1ADDR(ebase);
+
+ if (!addr)
+ panic(panic_null_cerr);
+
+ memcpy((void *)(uncached_ebase + offset), addr, size);
+}
+
+static int __initdata rdhwr_noopt;
+static int __init set_rdhwr_noopt(char *str)
+{
+ rdhwr_noopt = 1;
+ return 1;
+}
+
+__setup("rdhwr_noopt", set_rdhwr_noopt);
+
+void __init trap_init(void)
+{
+ extern char except_vec3_generic;
+ extern char except_vec4;
+ extern char except_vec3_r4000;
+ unsigned long i, vec_size;
+ phys_addr_t ebase_pa;
+
+ check_wait();
+
+ if (!cpu_has_mips_r2_r6) {
+ ebase = CAC_BASE;
+ ebase_pa = virt_to_phys((void *)ebase);
+ vec_size = 0x400;
+
+ memblock_reserve(ebase_pa, vec_size);
+ } else {
+ if (cpu_has_veic || cpu_has_vint)
+ vec_size = 0x200 + VECTORSPACING*64;
+ else
+ vec_size = PAGE_SIZE;
+
+ ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
+ if (!ebase_pa)
+ panic("%s: Failed to allocate %lu bytes align=0x%x\n",
+ __func__, vec_size, 1 << fls(vec_size));
+
+ /*
+ * Try to ensure ebase resides in KSeg0 if possible.
+ *
+ * It shouldn't generally be in XKPhys on MIPS64 to avoid
+ * hitting a poorly defined exception base for Cache Errors.
+ * The allocation is likely to be in the low 512MB of physical,
+ * in which case we should be able to convert to KSeg0.
+ *
+ * EVA is special though as it allows segments to be rearranged
+ * and to become uncached during cache error handling.
+ */
+ if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
+ ebase = CKSEG0ADDR(ebase_pa);
+ else
+ ebase = (unsigned long)phys_to_virt(ebase_pa);
+ }
+
+ if (cpu_has_mmips) {
+ unsigned int config3 = read_c0_config3();
+
+ if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
+ write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
+ else
+ write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
+ }
+
+ if (board_ebase_setup)
+ board_ebase_setup();
+ per_cpu_trap_init(true);
+ memblock_set_bottom_up(false);
+
+ /*
+ * Copy the generic exception handlers to their final destination.
+ * This will be overridden later as suitable for a particular
+ * configuration.
+ */
+ set_handler(0x180, &except_vec3_generic, 0x80);
+
+ /*
+ * Setup default vectors
+ */
+ for (i = 0; i <= 31; i++)
+ set_except_vector(i, handle_reserved);
+
+ /*
+ * Copy the EJTAG debug exception vector handler code to it's final
+ * destination.
+ */
+ if (cpu_has_ejtag && board_ejtag_handler_setup)
+ board_ejtag_handler_setup();
+
+ /*
+ * Only some CPUs have the watch exceptions.
+ */
+ if (cpu_has_watch)
+ set_except_vector(EXCCODE_WATCH, handle_watch);
+
+ /*
+ * Initialise interrupt handlers
+ */
+ if (cpu_has_veic || cpu_has_vint) {
+ int nvec = cpu_has_veic ? 64 : 8;
+ for (i = 0; i < nvec; i++)
+ set_vi_handler(i, NULL);
+ }
+ else if (cpu_has_divec)
+ set_handler(0x200, &except_vec4, 0x8);
+
+ /*
+ * Some CPUs can enable/disable for cache parity detection, but does
+ * it different ways.
+ */
+ parity_protection_init();
+
+ /*
+ * The Data Bus Errors / Instruction Bus Errors are signaled
+ * by external hardware. Therefore these two exceptions
+ * may have board specific handlers.
+ */
+ if (board_be_init)
+ board_be_init();
+
+ set_except_vector(EXCCODE_INT, using_rollback_handler() ?
+ rollback_handle_int : handle_int);
+ set_except_vector(EXCCODE_MOD, handle_tlbm);
+ set_except_vector(EXCCODE_TLBL, handle_tlbl);
+ set_except_vector(EXCCODE_TLBS, handle_tlbs);
+
+ set_except_vector(EXCCODE_ADEL, handle_adel);
+ set_except_vector(EXCCODE_ADES, handle_ades);
+
+ set_except_vector(EXCCODE_IBE, handle_ibe);
+ set_except_vector(EXCCODE_DBE, handle_dbe);
+
+ set_except_vector(EXCCODE_SYS, handle_sys);
+ set_except_vector(EXCCODE_BP, handle_bp);
+
+ if (rdhwr_noopt)
+ set_except_vector(EXCCODE_RI, handle_ri);
+ else {
+ if (cpu_has_vtag_icache)
+ set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
+ else if (current_cpu_type() == CPU_LOONGSON64)
+ set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
+ else
+ set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
+ }
+
+ set_except_vector(EXCCODE_CPU, handle_cpu);
+ set_except_vector(EXCCODE_OV, handle_ov);
+ set_except_vector(EXCCODE_TR, handle_tr);
+ set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
+
+ if (board_nmi_handler_setup)
+ board_nmi_handler_setup();
+
+ if (cpu_has_fpu && !cpu_has_nofpuex)
+ set_except_vector(EXCCODE_FPE, handle_fpe);
+
+ if (cpu_has_ftlbparex)
+ set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
+
+ if (cpu_has_gsexcex)
+ set_except_vector(LOONGSON_EXCCODE_GSEXC, handle_gsexc);
+
+ if (cpu_has_rixiex) {
+ set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
+ set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
+ }
+
+ set_except_vector(EXCCODE_MSADIS, handle_msa);
+ set_except_vector(EXCCODE_MDMX, handle_mdmx);
+
+ if (cpu_has_mcheck)
+ set_except_vector(EXCCODE_MCHECK, handle_mcheck);
+
+ if (cpu_has_mipsmt)
+ set_except_vector(EXCCODE_THREAD, handle_mt);
+
+ set_except_vector(EXCCODE_DSPDIS, handle_dsp);
+
+ if (board_cache_error_setup)
+ board_cache_error_setup();
+
+ if (cpu_has_vce)
+ /* Special exception: R4[04]00 uses also the divec space. */
+ set_handler(0x180, &except_vec3_r4000, 0x100);
+ else if (cpu_has_4kex)
+ set_handler(0x180, &except_vec3_generic, 0x80);
+ else
+ set_handler(0x080, &except_vec3_generic, 0x80);
+
+ local_flush_icache_range(ebase, ebase + vec_size);
+
+ sort_extable(__start___dbe_table, __stop___dbe_table);
+
+ cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
+}
+
+static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ configure_status();
+ configure_hwrena();
+ configure_exception_vector();
+
+ /* Restore register with CPU number for TLB handlers */
+ TLBMISS_HANDLER_RESTORE();
+
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block trap_pm_notifier_block = {
+ .notifier_call = trap_pm_notifier,
+};
+
+static int __init trap_pm_init(void)
+{
+ return cpu_pm_register_notifier(&trap_pm_notifier_block);
+}
+arch_initcall(trap_pm_init);
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
new file mode 100644
index 000000000..126a5f3f4
--- /dev/null
+++ b/arch/mips/kernel/unaligned.c
@@ -0,0 +1,1610 @@
+/*
+ * Handle unaligned accesses by emulation.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ *
+ * This file contains exception handler for address error exception with the
+ * special capability to execute faulting instructions in software. The
+ * handler does not try to handle the case when the program counter points
+ * to an address not aligned to a word boundary.
+ *
+ * Putting data to unaligned addresses is a bad practice even on Intel where
+ * only the performance is affected. Much worse is that such code is non-
+ * portable. Due to several programs that die on MIPS due to alignment
+ * problems I decided to implement this handler anyway though I originally
+ * didn't intend to do this at all for user code.
+ *
+ * For now I enable fixing of address errors by default to make life easier.
+ * I however intend to disable this somewhen in the future when the alignment
+ * problems with user programs have been fixed. For programmers this is the
+ * right way to go.
+ *
+ * Fixing address errors is a per process option. The option is inherited
+ * across fork(2) and execve(2) calls. If you really want to use the
+ * option in your user programs - I discourage the use of the software
+ * emulation strongly - use the following code in your userland stuff:
+ *
+ * #include <sys/sysmips.h>
+ *
+ * ...
+ * sysmips(MIPS_FIXADE, x);
+ * ...
+ *
+ * The argument x is 0 for disabling software emulation, enabled otherwise.
+ *
+ * Below a little program to play around with this feature.
+ *
+ * #include <stdio.h>
+ * #include <sys/sysmips.h>
+ *
+ * struct foo {
+ * unsigned char bar[8];
+ * };
+ *
+ * main(int argc, char *argv[])
+ * {
+ * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
+ * unsigned int *p = (unsigned int *) (x.bar + 3);
+ * int i;
+ *
+ * if (argc > 1)
+ * sysmips(MIPS_FIXADE, atoi(argv[1]));
+ *
+ * printf("*p = %08lx\n", *p);
+ *
+ * *p = 0xdeadface;
+ *
+ * for(i = 0; i <= 7; i++)
+ * printf("%02x ", x.bar[i]);
+ * printf("\n");
+ * }
+ *
+ * Coprocessor loads are not supported; I think this case is unimportant
+ * in the practice.
+ *
+ * TODO: Handle ndc (attempted store to doubleword in uncached memory)
+ * exception for the R6000.
+ * A store crossing a page boundary might be executed only partially.
+ * Undo the partial store in this case.
+ */
+#include <linux/context_tracking.h>
+#include <linux/mm.h>
+#include <linux/signal.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/debugfs.h>
+#include <linux/perf_event.h>
+
+#include <asm/asm.h>
+#include <asm/branch.h>
+#include <asm/byteorder.h>
+#include <asm/cop2.h>
+#include <asm/debug.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
+#include <asm/inst.h>
+#include <asm/unaligned-emul.h>
+#include <asm/mmu_context.h>
+#include <linux/uaccess.h>
+
+enum {
+ UNALIGNED_ACTION_QUIET,
+ UNALIGNED_ACTION_SIGNAL,
+ UNALIGNED_ACTION_SHOW,
+};
+#ifdef CONFIG_DEBUG_FS
+static u32 unaligned_instructions;
+static u32 unaligned_action;
+#else
+#define unaligned_action UNALIGNED_ACTION_QUIET
+#endif
+extern void show_registers(struct pt_regs *regs);
+
+static void emulate_load_store_insn(struct pt_regs *regs,
+ void __user *addr, unsigned int __user *pc)
+{
+ unsigned long origpc, orig31, value;
+ union mips_instruction insn;
+ unsigned int res;
+#ifdef CONFIG_EVA
+ mm_segment_t seg;
+#endif
+ origpc = (unsigned long)pc;
+ orig31 = regs->regs[31];
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
+
+ /*
+ * This load never faults.
+ */
+ __get_user(insn.word, pc);
+
+ switch (insn.i_format.opcode) {
+ /*
+ * These are instructions that a compiler doesn't generate. We
+ * can assume therefore that the code is MIPS-aware and
+ * really buggy. Emulating these instructions would break the
+ * semantics anyway.
+ */
+ case ll_op:
+ case lld_op:
+ case sc_op:
+ case scd_op:
+
+ /*
+ * For these instructions the only way to create an address
+ * error is an attempted access to kernel/supervisor address
+ * space.
+ */
+ case ldl_op:
+ case ldr_op:
+ case lwl_op:
+ case lwr_op:
+ case sdl_op:
+ case sdr_op:
+ case swl_op:
+ case swr_op:
+ case lb_op:
+ case lbu_op:
+ case sb_op:
+ goto sigbus;
+
+ /*
+ * The remaining opcodes are the ones that are really of
+ * interest.
+ */
+ case spec3_op:
+ if (insn.dsp_format.func == lx_op) {
+ switch (insn.dsp_format.op) {
+ case lwx_op:
+ if (!access_ok(addr, 4))
+ goto sigbus;
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.dsp_format.rd] = value;
+ break;
+ case lhx_op:
+ if (!access_ok(addr, 2))
+ goto sigbus;
+ LoadHW(addr, value, res);
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.dsp_format.rd] = value;
+ break;
+ default:
+ goto sigill;
+ }
+ }
+#ifdef CONFIG_EVA
+ else {
+ /*
+ * we can land here only from kernel accessing user
+ * memory, so we need to "switch" the address limit to
+ * user space, so that address check can work properly.
+ */
+ seg = force_uaccess_begin();
+ switch (insn.spec3_format.func) {
+ case lhe_op:
+ if (!access_ok(addr, 2)) {
+ force_uaccess_end(seg);
+ goto sigbus;
+ }
+ LoadHWE(addr, value, res);
+ if (res) {
+ force_uaccess_end(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case lwe_op:
+ if (!access_ok(addr, 4)) {
+ force_uaccess_end(seg);
+ goto sigbus;
+ }
+ LoadWE(addr, value, res);
+ if (res) {
+ force_uaccess_end(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case lhue_op:
+ if (!access_ok(addr, 2)) {
+ force_uaccess_end(seg);
+ goto sigbus;
+ }
+ LoadHWUE(addr, value, res);
+ if (res) {
+ force_uaccess_end(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case she_op:
+ if (!access_ok(addr, 2)) {
+ force_uaccess_end(seg);
+ goto sigbus;
+ }
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+ StoreHWE(addr, value, res);
+ if (res) {
+ force_uaccess_end(seg);
+ goto fault;
+ }
+ break;
+ case swe_op:
+ if (!access_ok(addr, 4)) {
+ force_uaccess_end(seg);
+ goto sigbus;
+ }
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+ StoreWE(addr, value, res);
+ if (res) {
+ force_uaccess_end(seg);
+ goto fault;
+ }
+ break;
+ default:
+ force_uaccess_end(seg);
+ goto sigill;
+ }
+ force_uaccess_end(seg);
+ }
+#endif
+ break;
+ case lh_op:
+ if (!access_ok(addr, 2))
+ goto sigbus;
+
+ if (IS_ENABLED(CONFIG_EVA)) {
+ if (uaccess_kernel())
+ LoadHW(addr, value, res);
+ else
+ LoadHWE(addr, value, res);
+ } else {
+ LoadHW(addr, value, res);
+ }
+
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.i_format.rt] = value;
+ break;
+
+ case lw_op:
+ if (!access_ok(addr, 4))
+ goto sigbus;
+
+ if (IS_ENABLED(CONFIG_EVA)) {
+ if (uaccess_kernel())
+ LoadW(addr, value, res);
+ else
+ LoadWE(addr, value, res);
+ } else {
+ LoadW(addr, value, res);
+ }
+
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.i_format.rt] = value;
+ break;
+
+ case lhu_op:
+ if (!access_ok(addr, 2))
+ goto sigbus;
+
+ if (IS_ENABLED(CONFIG_EVA)) {
+ if (uaccess_kernel())
+ LoadHWU(addr, value, res);
+ else
+ LoadHWUE(addr, value, res);
+ } else {
+ LoadHWU(addr, value, res);
+ }
+
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.i_format.rt] = value;
+ break;
+
+ case lwu_op:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(addr, 4))
+ goto sigbus;
+
+ LoadWU(addr, value, res);
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.i_format.rt] = value;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+ case ld_op:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(addr, 8))
+ goto sigbus;
+
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.i_format.rt] = value;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+ case sh_op:
+ if (!access_ok(addr, 2))
+ goto sigbus;
+
+ compute_return_epc(regs);
+ value = regs->regs[insn.i_format.rt];
+
+ if (IS_ENABLED(CONFIG_EVA)) {
+ if (uaccess_kernel())
+ StoreHW(addr, value, res);
+ else
+ StoreHWE(addr, value, res);
+ } else {
+ StoreHW(addr, value, res);
+ }
+
+ if (res)
+ goto fault;
+ break;
+
+ case sw_op:
+ if (!access_ok(addr, 4))
+ goto sigbus;
+
+ compute_return_epc(regs);
+ value = regs->regs[insn.i_format.rt];
+
+ if (IS_ENABLED(CONFIG_EVA)) {
+ if (uaccess_kernel())
+ StoreW(addr, value, res);
+ else
+ StoreWE(addr, value, res);
+ } else {
+ StoreW(addr, value, res);
+ }
+
+ if (res)
+ goto fault;
+ break;
+
+ case sd_op:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(addr, 8))
+ goto sigbus;
+
+ compute_return_epc(regs);
+ value = regs->regs[insn.i_format.rt];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+ case lwc1_op:
+ case ldc1_op:
+ case swc1_op:
+ case sdc1_op:
+ case cop1x_op: {
+ void __user *fault_addr = NULL;
+
+ die_if_kernel("Unaligned FP access in kernel code", regs);
+ BUG_ON(!used_math());
+
+ res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+ &fault_addr);
+ own_fpu(1); /* Restore FPU state. */
+
+ /* Signal if something went wrong. */
+ process_fpemu_return(res, fault_addr, 0);
+
+ if (res == 0)
+ break;
+ return;
+ }
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+ case msa_op: {
+ unsigned int wd, preempted;
+ enum msa_2b_fmt df;
+ union fpureg *fpr;
+
+ if (!cpu_has_msa)
+ goto sigill;
+
+ /*
+ * If we've reached this point then userland should have taken
+ * the MSA disabled exception & initialised vector context at
+ * some point in the past.
+ */
+ BUG_ON(!thread_msa_context_live());
+
+ df = insn.msa_mi10_format.df;
+ wd = insn.msa_mi10_format.wd;
+ fpr = &current->thread.fpu.fpr[wd];
+
+ switch (insn.msa_mi10_format.func) {
+ case msa_ld_op:
+ if (!access_ok(addr, sizeof(*fpr)))
+ goto sigbus;
+
+ do {
+ /*
+ * If we have live MSA context keep track of
+ * whether we get preempted in order to avoid
+ * the register context we load being clobbered
+ * by the live context as it's saved during
+ * preemption. If we don't have live context
+ * then it can't be saved to clobber the value
+ * we load.
+ */
+ preempted = test_thread_flag(TIF_USEDMSA);
+
+ res = __copy_from_user_inatomic(fpr, addr,
+ sizeof(*fpr));
+ if (res)
+ goto fault;
+
+ /*
+ * Update the hardware register if it is in use
+ * by the task in this quantum, in order to
+ * avoid having to save & restore the whole
+ * vector context.
+ */
+ preempt_disable();
+ if (test_thread_flag(TIF_USEDMSA)) {
+ write_msa_wr(wd, fpr, df);
+ preempted = 0;
+ }
+ preempt_enable();
+ } while (preempted);
+ break;
+
+ case msa_st_op:
+ if (!access_ok(addr, sizeof(*fpr)))
+ goto sigbus;
+
+ /*
+ * Update from the hardware register if it is in use by
+ * the task in this quantum, in order to avoid having to
+ * save & restore the whole vector context.
+ */
+ preempt_disable();
+ if (test_thread_flag(TIF_USEDMSA))
+ read_msa_wr(wd, fpr, df);
+ preempt_enable();
+
+ res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
+ if (res)
+ goto fault;
+ break;
+
+ default:
+ goto sigbus;
+ }
+
+ compute_return_epc(regs);
+ break;
+ }
+#endif /* CONFIG_CPU_HAS_MSA */
+
+#ifndef CONFIG_CPU_MIPSR6
+ /*
+ * COP2 is available to implementor for application specific use.
+ * It's up to applications to register a notifier chain and do
+ * whatever they have to do, including possible sending of signals.
+ *
+ * This instruction has been reallocated in Release 6
+ */
+ case lwc2_op:
+ cu2_notifier_call_chain(CU2_LWC2_OP, regs);
+ break;
+
+ case ldc2_op:
+ cu2_notifier_call_chain(CU2_LDC2_OP, regs);
+ break;
+
+ case swc2_op:
+ cu2_notifier_call_chain(CU2_SWC2_OP, regs);
+ break;
+
+ case sdc2_op:
+ cu2_notifier_call_chain(CU2_SDC2_OP, regs);
+ break;
+#endif
+ default:
+ /*
+ * Pheeee... We encountered an yet unknown instruction or
+ * cache coherence problem. Die sucker, die ...
+ */
+ goto sigill;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ unaligned_instructions++;
+#endif
+
+ return;
+
+fault:
+ /* roll back jump/branch */
+ regs->cp0_epc = origpc;
+ regs->regs[31] = orig31;
+ /* Did we have an exception handler installed? */
+ if (fixup_exception(regs))
+ return;
+
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGSEGV);
+
+ return;
+
+sigbus:
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGBUS);
+
+ return;
+
+sigill:
+ die_if_kernel
+ ("Unhandled kernel unaligned access or invalid instruction", regs);
+ force_sig(SIGILL);
+}
+
+/* Recode table from 16-bit register notation to 32-bit GPR. */
+const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
+
+/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
+static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
+
+static void emulate_load_store_microMIPS(struct pt_regs *regs,
+ void __user *addr)
+{
+ unsigned long value;
+ unsigned int res;
+ int i;
+ unsigned int reg = 0, rvar;
+ unsigned long orig31;
+ u16 __user *pc16;
+ u16 halfword;
+ unsigned int word;
+ unsigned long origpc, contpc;
+ union mips_instruction insn;
+ struct mm_decoded_insn mminsn;
+
+ origpc = regs->cp0_epc;
+ orig31 = regs->regs[31];
+
+ mminsn.micro_mips_mode = 1;
+
+ /*
+ * This load never faults.
+ */
+ pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 2;
+ word = ((unsigned int)halfword << 16);
+ mminsn.pc_inc = 2;
+
+ if (!mm_insn_16bit(halfword)) {
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 4;
+ mminsn.pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.insn = word;
+
+ if (get_user(halfword, pc16))
+ goto fault;
+ mminsn.next_pc_inc = 2;
+ word = ((unsigned int)halfword << 16);
+
+ if (!mm_insn_16bit(halfword)) {
+ pc16++;
+ if (get_user(halfword, pc16))
+ goto fault;
+ mminsn.next_pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.next_insn = word;
+
+ insn = (union mips_instruction)(mminsn.insn);
+ if (mm_isBranchInstr(regs, mminsn, &contpc))
+ insn = (union mips_instruction)(mminsn.next_insn);
+
+ /* Parse instruction to find what to do */
+
+ switch (insn.mm_i_format.opcode) {
+
+ case mm_pool32a_op:
+ switch (insn.mm_x_format.func) {
+ case mm_lwxs_op:
+ reg = insn.mm_x_format.rd;
+ goto loadW;
+ }
+
+ goto sigbus;
+
+ case mm_pool32b_op:
+ switch (insn.mm_m_format.func) {
+ case mm_lwp_func:
+ reg = insn.mm_m_format.rd;
+ if (reg == 31)
+ goto sigbus;
+
+ if (!access_ok(addr, 8))
+ goto sigbus;
+
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ addr += 4;
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg + 1] = value;
+ goto success;
+
+ case mm_swp_func:
+ reg = insn.mm_m_format.rd;
+ if (reg == 31)
+ goto sigbus;
+
+ if (!access_ok(addr, 8))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ value = regs->regs[reg + 1];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+
+ case mm_ldp_func:
+#ifdef CONFIG_64BIT
+ reg = insn.mm_m_format.rd;
+ if (reg == 31)
+ goto sigbus;
+
+ if (!access_ok(addr, 16))
+ goto sigbus;
+
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ addr += 8;
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg + 1] = value;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ goto sigill;
+
+ case mm_sdp_func:
+#ifdef CONFIG_64BIT
+ reg = insn.mm_m_format.rd;
+ if (reg == 31)
+ goto sigbus;
+
+ if (!access_ok(addr, 16))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 8;
+ value = regs->regs[reg + 1];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ goto sigill;
+
+ case mm_lwm32_func:
+ reg = insn.mm_m_format.rd;
+ rvar = reg & 0xf;
+ if ((rvar > 9) || !reg)
+ goto sigill;
+ if (reg & 0x10) {
+ if (!access_ok(addr, 4 * (rvar + 1)))
+ goto sigbus;
+ } else {
+ if (!access_ok(addr, 4 * rvar))
+ goto sigbus;
+ }
+ if (rvar == 9)
+ rvar = 8;
+ for (i = 16; rvar; rvar--, i++) {
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ regs->regs[i] = value;
+ }
+ if ((reg & 0xf) == 9) {
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ regs->regs[30] = value;
+ }
+ if (reg & 0x10) {
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[31] = value;
+ }
+ goto success;
+
+ case mm_swm32_func:
+ reg = insn.mm_m_format.rd;
+ rvar = reg & 0xf;
+ if ((rvar > 9) || !reg)
+ goto sigill;
+ if (reg & 0x10) {
+ if (!access_ok(addr, 4 * (rvar + 1)))
+ goto sigbus;
+ } else {
+ if (!access_ok(addr, 4 * rvar))
+ goto sigbus;
+ }
+ if (rvar == 9)
+ rvar = 8;
+ for (i = 16; rvar; rvar--, i++) {
+ value = regs->regs[i];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ }
+ if ((reg & 0xf) == 9) {
+ value = regs->regs[30];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ }
+ if (reg & 0x10) {
+ value = regs->regs[31];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ }
+ goto success;
+
+ case mm_ldm_func:
+#ifdef CONFIG_64BIT
+ reg = insn.mm_m_format.rd;
+ rvar = reg & 0xf;
+ if ((rvar > 9) || !reg)
+ goto sigill;
+ if (reg & 0x10) {
+ if (!access_ok(addr, 8 * (rvar + 1)))
+ goto sigbus;
+ } else {
+ if (!access_ok(addr, 8 * rvar))
+ goto sigbus;
+ }
+ if (rvar == 9)
+ rvar = 8;
+
+ for (i = 16; rvar; rvar--, i++) {
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ regs->regs[i] = value;
+ }
+ if ((reg & 0xf) == 9) {
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 8;
+ regs->regs[30] = value;
+ }
+ if (reg & 0x10) {
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[31] = value;
+ }
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ goto sigill;
+
+ case mm_sdm_func:
+#ifdef CONFIG_64BIT
+ reg = insn.mm_m_format.rd;
+ rvar = reg & 0xf;
+ if ((rvar > 9) || !reg)
+ goto sigill;
+ if (reg & 0x10) {
+ if (!access_ok(addr, 8 * (rvar + 1)))
+ goto sigbus;
+ } else {
+ if (!access_ok(addr, 8 * rvar))
+ goto sigbus;
+ }
+ if (rvar == 9)
+ rvar = 8;
+
+ for (i = 16; rvar; rvar--, i++) {
+ value = regs->regs[i];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 8;
+ }
+ if ((reg & 0xf) == 9) {
+ value = regs->regs[30];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 8;
+ }
+ if (reg & 0x10) {
+ value = regs->regs[31];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ }
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ goto sigill;
+
+ /* LWC2, SWC2, LDC2, SDC2 are not serviced */
+ }
+
+ goto sigbus;
+
+ case mm_pool32c_op:
+ switch (insn.mm_m_format.func) {
+ case mm_lwu_func:
+ reg = insn.mm_m_format.rd;
+ goto loadWU;
+ }
+
+ /* LL,SC,LLD,SCD are not serviced */
+ goto sigbus;
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case mm_pool32f_op:
+ switch (insn.mm_x_format.func) {
+ case mm_lwxc1_func:
+ case mm_swxc1_func:
+ case mm_ldxc1_func:
+ case mm_sdxc1_func:
+ goto fpu_emul;
+ }
+
+ goto sigbus;
+
+ case mm_ldc132_op:
+ case mm_sdc132_op:
+ case mm_lwc132_op:
+ case mm_swc132_op: {
+ void __user *fault_addr = NULL;
+
+fpu_emul:
+ /* roll back jump/branch */
+ regs->cp0_epc = origpc;
+ regs->regs[31] = orig31;
+
+ die_if_kernel("Unaligned FP access in kernel code", regs);
+ BUG_ON(!used_math());
+ BUG_ON(!is_fpu_owner());
+
+ res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+ &fault_addr);
+ own_fpu(1); /* restore FPU state */
+
+ /* If something went wrong, signal */
+ process_fpemu_return(res, fault_addr, 0);
+
+ if (res == 0)
+ goto success;
+ return;
+ }
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
+ case mm_lh32_op:
+ reg = insn.mm_i_format.rt;
+ goto loadHW;
+
+ case mm_lhu32_op:
+ reg = insn.mm_i_format.rt;
+ goto loadHWU;
+
+ case mm_lw32_op:
+ reg = insn.mm_i_format.rt;
+ goto loadW;
+
+ case mm_sh32_op:
+ reg = insn.mm_i_format.rt;
+ goto storeHW;
+
+ case mm_sw32_op:
+ reg = insn.mm_i_format.rt;
+ goto storeW;
+
+ case mm_ld32_op:
+ reg = insn.mm_i_format.rt;
+ goto loadDW;
+
+ case mm_sd32_op:
+ reg = insn.mm_i_format.rt;
+ goto storeDW;
+
+ case mm_pool16c_op:
+ switch (insn.mm16_m_format.func) {
+ case mm_lwm16_op:
+ reg = insn.mm16_m_format.rlist;
+ rvar = reg + 1;
+ if (!access_ok(addr, 4 * rvar))
+ goto sigbus;
+
+ for (i = 16; rvar; rvar--, i++) {
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ regs->regs[i] = value;
+ }
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[31] = value;
+
+ goto success;
+
+ case mm_swm16_op:
+ reg = insn.mm16_m_format.rlist;
+ rvar = reg + 1;
+ if (!access_ok(addr, 4 * rvar))
+ goto sigbus;
+
+ for (i = 16; rvar; rvar--, i++) {
+ value = regs->regs[i];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ }
+ value = regs->regs[31];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+
+ goto success;
+
+ }
+
+ goto sigbus;
+
+ case mm_lhu16_op:
+ reg = reg16to32[insn.mm16_rb_format.rt];
+ goto loadHWU;
+
+ case mm_lw16_op:
+ reg = reg16to32[insn.mm16_rb_format.rt];
+ goto loadW;
+
+ case mm_sh16_op:
+ reg = reg16to32st[insn.mm16_rb_format.rt];
+ goto storeHW;
+
+ case mm_sw16_op:
+ reg = reg16to32st[insn.mm16_rb_format.rt];
+ goto storeW;
+
+ case mm_lwsp16_op:
+ reg = insn.mm16_r5_format.rt;
+ goto loadW;
+
+ case mm_swsp16_op:
+ reg = insn.mm16_r5_format.rt;
+ goto storeW;
+
+ case mm_lwgp16_op:
+ reg = reg16to32[insn.mm16_r3_format.rt];
+ goto loadW;
+
+ default:
+ goto sigill;
+ }
+
+loadHW:
+ if (!access_ok(addr, 2))
+ goto sigbus;
+
+ LoadHW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+
+loadHWU:
+ if (!access_ok(addr, 2))
+ goto sigbus;
+
+ LoadHWU(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+
+loadW:
+ if (!access_ok(addr, 4))
+ goto sigbus;
+
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+
+loadWU:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(addr, 4))
+ goto sigbus;
+
+ LoadWU(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+loadDW:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(addr, 8))
+ goto sigbus;
+
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+storeHW:
+ if (!access_ok(addr, 2))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreHW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+
+storeW:
+ if (!access_ok(addr, 4))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+
+storeDW:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(addr, 8))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+success:
+ regs->cp0_epc = contpc; /* advance or branch */
+
+#ifdef CONFIG_DEBUG_FS
+ unaligned_instructions++;
+#endif
+ return;
+
+fault:
+ /* roll back jump/branch */
+ regs->cp0_epc = origpc;
+ regs->regs[31] = orig31;
+ /* Did we have an exception handler installed? */
+ if (fixup_exception(regs))
+ return;
+
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGSEGV);
+
+ return;
+
+sigbus:
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGBUS);
+
+ return;
+
+sigill:
+ die_if_kernel
+ ("Unhandled kernel unaligned access or invalid instruction", regs);
+ force_sig(SIGILL);
+}
+
+static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
+{
+ unsigned long value;
+ unsigned int res;
+ int reg;
+ unsigned long orig31;
+ u16 __user *pc16;
+ unsigned long origpc;
+ union mips16e_instruction mips16inst, oldinst;
+ unsigned int opcode;
+ int extended = 0;
+
+ origpc = regs->cp0_epc;
+ orig31 = regs->regs[31];
+ pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
+ /*
+ * This load never faults.
+ */
+ __get_user(mips16inst.full, pc16);
+ oldinst = mips16inst;
+
+ /* skip EXTEND instruction */
+ if (mips16inst.ri.opcode == MIPS16e_extend_op) {
+ extended = 1;
+ pc16++;
+ __get_user(mips16inst.full, pc16);
+ } else if (delay_slot(regs)) {
+ /* skip jump instructions */
+ /* JAL/JALX are 32 bits but have OPCODE in first short int */
+ if (mips16inst.ri.opcode == MIPS16e_jal_op)
+ pc16++;
+ pc16++;
+ if (get_user(mips16inst.full, pc16))
+ goto sigbus;
+ }
+
+ opcode = mips16inst.ri.opcode;
+ switch (opcode) {
+ case MIPS16e_i64_op: /* I64 or RI64 instruction */
+ switch (mips16inst.i64.func) { /* I64/RI64 func field check */
+ case MIPS16e_ldpc_func:
+ case MIPS16e_ldsp_func:
+ reg = reg16to32[mips16inst.ri64.ry];
+ goto loadDW;
+
+ case MIPS16e_sdsp_func:
+ reg = reg16to32[mips16inst.ri64.ry];
+ goto writeDW;
+
+ case MIPS16e_sdrasp_func:
+ reg = 29; /* GPRSP */
+ goto writeDW;
+ }
+
+ goto sigbus;
+
+ case MIPS16e_swsp_op:
+ reg = reg16to32[mips16inst.ri.rx];
+ if (extended && cpu_has_mips16e2)
+ switch (mips16inst.ri.imm >> 5) {
+ case 0: /* SWSP */
+ case 1: /* SWGP */
+ break;
+ case 2: /* SHGP */
+ opcode = MIPS16e_sh_op;
+ break;
+ default:
+ goto sigbus;
+ }
+ break;
+
+ case MIPS16e_lwpc_op:
+ reg = reg16to32[mips16inst.ri.rx];
+ break;
+
+ case MIPS16e_lwsp_op:
+ reg = reg16to32[mips16inst.ri.rx];
+ if (extended && cpu_has_mips16e2)
+ switch (mips16inst.ri.imm >> 5) {
+ case 0: /* LWSP */
+ case 1: /* LWGP */
+ break;
+ case 2: /* LHGP */
+ opcode = MIPS16e_lh_op;
+ break;
+ case 4: /* LHUGP */
+ opcode = MIPS16e_lhu_op;
+ break;
+ default:
+ goto sigbus;
+ }
+ break;
+
+ case MIPS16e_i8_op:
+ if (mips16inst.i8.func != MIPS16e_swrasp_func)
+ goto sigbus;
+ reg = 29; /* GPRSP */
+ break;
+
+ default:
+ reg = reg16to32[mips16inst.rri.ry];
+ break;
+ }
+
+ switch (opcode) {
+
+ case MIPS16e_lb_op:
+ case MIPS16e_lbu_op:
+ case MIPS16e_sb_op:
+ goto sigbus;
+
+ case MIPS16e_lh_op:
+ if (!access_ok(addr, 2))
+ goto sigbus;
+
+ LoadHW(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+
+ case MIPS16e_lhu_op:
+ if (!access_ok(addr, 2))
+ goto sigbus;
+
+ LoadHWU(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+
+ case MIPS16e_lw_op:
+ case MIPS16e_lwpc_op:
+ case MIPS16e_lwsp_op:
+ if (!access_ok(addr, 4))
+ goto sigbus;
+
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+
+ case MIPS16e_lwu_op:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(addr, 4))
+ goto sigbus;
+
+ LoadWU(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+ case MIPS16e_ld_op:
+loadDW:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(addr, 8))
+ goto sigbus;
+
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+ case MIPS16e_sh_op:
+ if (!access_ok(addr, 2))
+ goto sigbus;
+
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ value = regs->regs[reg];
+ StoreHW(addr, value, res);
+ if (res)
+ goto fault;
+ break;
+
+ case MIPS16e_sw_op:
+ case MIPS16e_swsp_op:
+ case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
+ if (!access_ok(addr, 4))
+ goto sigbus;
+
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ value = regs->regs[reg];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ break;
+
+ case MIPS16e_sd_op:
+writeDW:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(addr, 8))
+ goto sigbus;
+
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ value = regs->regs[reg];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+ default:
+ /*
+ * Pheeee... We encountered an yet unknown instruction or
+ * cache coherence problem. Die sucker, die ...
+ */
+ goto sigill;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ unaligned_instructions++;
+#endif
+
+ return;
+
+fault:
+ /* roll back jump/branch */
+ regs->cp0_epc = origpc;
+ regs->regs[31] = orig31;
+ /* Did we have an exception handler installed? */
+ if (fixup_exception(regs))
+ return;
+
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGSEGV);
+
+ return;
+
+sigbus:
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGBUS);
+
+ return;
+
+sigill:
+ die_if_kernel
+ ("Unhandled kernel unaligned access or invalid instruction", regs);
+ force_sig(SIGILL);
+}
+
+asmlinkage void do_ade(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+ unsigned int __user *pc;
+ mm_segment_t seg;
+
+ prev_state = exception_enter();
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
+ 1, regs, regs->cp0_badvaddr);
+ /*
+ * Did we catch a fault trying to load an instruction?
+ */
+ if (regs->cp0_badvaddr == regs->cp0_epc)
+ goto sigbus;
+
+ if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
+ goto sigbus;
+ if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
+ goto sigbus;
+
+ /*
+ * Do branch emulation only if we didn't forward the exception.
+ * This is all so but ugly ...
+ */
+
+ /*
+ * Are we running in microMIPS mode?
+ */
+ if (get_isa16_mode(regs->cp0_epc)) {
+ /*
+ * Did we catch a fault trying to load an instruction in
+ * 16-bit mode?
+ */
+ if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
+ goto sigbus;
+ if (unaligned_action == UNALIGNED_ACTION_SHOW)
+ show_registers(regs);
+
+ if (cpu_has_mmips) {
+ seg = get_fs();
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+ emulate_load_store_microMIPS(regs,
+ (void __user *)regs->cp0_badvaddr);
+ set_fs(seg);
+
+ return;
+ }
+
+ if (cpu_has_mips16) {
+ seg = get_fs();
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+ emulate_load_store_MIPS16e(regs,
+ (void __user *)regs->cp0_badvaddr);
+ set_fs(seg);
+
+ return;
+ }
+
+ goto sigbus;
+ }
+
+ if (unaligned_action == UNALIGNED_ACTION_SHOW)
+ show_registers(regs);
+ pc = (unsigned int __user *)exception_epc(regs);
+
+ seg = get_fs();
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+ emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
+ set_fs(seg);
+
+ return;
+
+sigbus:
+ die_if_kernel("Kernel unaligned instruction access", regs);
+ force_sig(SIGBUS);
+
+ /*
+ * XXX On return from the signal handler we should advance the epc
+ */
+ exception_exit(prev_state);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int __init debugfs_unaligned(void)
+{
+ debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir,
+ &unaligned_instructions);
+ debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
+ mips_debugfs_dir, &unaligned_action);
+ return 0;
+}
+arch_initcall(debugfs_unaligned);
+#endif
diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c
new file mode 100644
index 000000000..6dbe4eab0
--- /dev/null
+++ b/arch/mips/kernel/uprobes.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/highmem.h>
+#include <linux/kdebug.h>
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include <linux/sched.h>
+#include <linux/uprobes.h>
+
+#include <asm/branch.h>
+#include <asm/cpu-features.h>
+#include <asm/ptrace.h>
+
+#include "probes-common.h"
+
+static inline int insn_has_delay_slot(const union mips_instruction insn)
+{
+ return __insn_has_delay_slot(insn);
+}
+
+/**
+ * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
+ * @mm: the probed address space.
+ * @arch_uprobe: the probepoint information.
+ * @addr: virtual address at which to install the probepoint
+ * Return 0 on success or a -ve number on error.
+ */
+int arch_uprobe_analyze_insn(struct arch_uprobe *aup,
+ struct mm_struct *mm, unsigned long addr)
+{
+ union mips_instruction inst;
+
+ /*
+ * For the time being this also blocks attempts to use uprobes with
+ * MIPS16 and microMIPS.
+ */
+ if (addr & 0x03)
+ return -EINVAL;
+
+ inst.word = aup->insn[0];
+
+ if (__insn_is_compact_branch(inst)) {
+ pr_notice("Uprobes for compact branches are not supported\n");
+ return -EINVAL;
+ }
+
+ aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)];
+ aup->ixol[1] = UPROBE_BRK_UPROBE_XOL; /* NOP */
+
+ return 0;
+}
+
+/**
+ * is_trap_insn - check if the instruction is a trap variant
+ * @insn: instruction to be checked.
+ * Returns true if @insn is a trap variant.
+ *
+ * This definition overrides the weak definition in kernel/events/uprobes.c.
+ * and is needed for the case where an architecture has multiple trap
+ * instructions (like PowerPC or MIPS). We treat BREAK just like the more
+ * modern conditional trap instructions.
+ */
+bool is_trap_insn(uprobe_opcode_t *insn)
+{
+ union mips_instruction inst;
+
+ inst.word = *insn;
+
+ switch (inst.i_format.opcode) {
+ case spec_op:
+ switch (inst.r_format.func) {
+ case break_op:
+ case teq_op:
+ case tge_op:
+ case tgeu_op:
+ case tlt_op:
+ case tltu_op:
+ case tne_op:
+ return 1;
+ }
+ break;
+
+ case bcond_op: /* Yes, really ... */
+ switch (inst.u_format.rt) {
+ case teqi_op:
+ case tgei_op:
+ case tgeiu_op:
+ case tlti_op:
+ case tltiu_op:
+ case tnei_op:
+ return 1;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+#define UPROBE_TRAP_NR ULONG_MAX
+
+/*
+ * arch_uprobe_pre_xol - prepare to execute out of line.
+ * @auprobe: the probepoint information.
+ * @regs: reflects the saved user state of current task.
+ */
+int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ /*
+ * Now find the EPC where to resume after the breakpoint has been
+ * dealt with. This may require emulation of a branch.
+ */
+ aup->resume_epc = regs->cp0_epc + 4;
+ if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) {
+ __compute_return_epc_for_insn(regs,
+ (union mips_instruction) aup->insn[0]);
+ aup->resume_epc = regs->cp0_epc;
+ }
+ utask->autask.saved_trap_nr = current->thread.trap_nr;
+ current->thread.trap_nr = UPROBE_TRAP_NR;
+ regs->cp0_epc = current->utask->xol_vaddr;
+
+ return 0;
+}
+
+int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ current->thread.trap_nr = utask->autask.saved_trap_nr;
+ regs->cp0_epc = aup->resume_epc;
+
+ return 0;
+}
+
+/*
+ * If xol insn itself traps and generates a signal(Say,
+ * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
+ * instruction jumps back to its own address. It is assumed that anything
+ * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
+ *
+ * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
+ * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
+ * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
+ */
+bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
+{
+ if (tsk->thread.trap_nr != UPROBE_TRAP_NR)
+ return true;
+
+ return false;
+}
+
+int arch_uprobe_exception_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct die_args *args = data;
+ struct pt_regs *regs = args->regs;
+
+ /* regs == NULL is a kernel bug */
+ if (WARN_ON(!regs))
+ return NOTIFY_DONE;
+
+ /* We are only interested in userspace traps */
+ if (!user_mode(regs))
+ return NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_UPROBE:
+ if (uprobe_pre_sstep_notifier(regs))
+ return NOTIFY_STOP;
+ break;
+ case DIE_UPROBE_XOL:
+ if (uprobe_post_sstep_notifier(regs))
+ return NOTIFY_STOP;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * This function gets called when XOL instruction either gets trapped or
+ * the thread has a fatal signal. Reset the instruction pointer to its
+ * probed address for the potential restart or for post mortem analysis.
+ */
+void arch_uprobe_abort_xol(struct arch_uprobe *aup,
+ struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ instruction_pointer_set(regs, utask->vaddr);
+}
+
+unsigned long arch_uretprobe_hijack_return_addr(
+ unsigned long trampoline_vaddr, struct pt_regs *regs)
+{
+ unsigned long ra;
+
+ ra = regs->regs[31];
+
+ /* Replace the return address with the trampoline address */
+ regs->regs[31] = trampoline_vaddr;
+
+ return ra;
+}
+
+/**
+ * set_swbp - store breakpoint at a given address.
+ * @auprobe: arch specific probepoint information.
+ * @mm: the probed process address space.
+ * @vaddr: the virtual address to insert the opcode.
+ *
+ * For mm @mm, store the breakpoint instruction at @vaddr.
+ * Return 0 (success) or a negative errno.
+ *
+ * This version overrides the weak version in kernel/events/uprobes.c.
+ * It is required to handle MIPS16 and microMIPS.
+ */
+int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ unsigned long vaddr)
+{
+ return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
+}
+
+void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ void *src, unsigned long len)
+{
+ unsigned long kaddr, kstart;
+
+ /* Initialize the slot */
+ kaddr = (unsigned long)kmap_atomic(page);
+ kstart = kaddr + (vaddr & ~PAGE_MASK);
+ memcpy((void *)kstart, src, len);
+ flush_icache_range(kstart, kstart + len);
+ kunmap_atomic((void *)kaddr);
+}
+
+/**
+ * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
+ * @regs: Reflects the saved state of the task after it has hit a breakpoint
+ * instruction.
+ * Return the address of the breakpoint instruction.
+ *
+ * This overrides the weak version in kernel/events/uprobes.c.
+ */
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+/*
+ * See if the instruction can be emulated.
+ * Returns true if instruction was emulated, false otherwise.
+ *
+ * For now we always emulate so this function just returns 0.
+ */
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ return 0;
+}
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
new file mode 100644
index 000000000..242dc5e83
--- /dev/null
+++ b/arch/mips/kernel/vdso.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timekeeper_internal.h>
+
+#include <asm/abi.h>
+#include <asm/mips-cps.h>
+#include <asm/page.h>
+#include <asm/vdso.h>
+#include <vdso/helpers.h>
+#include <vdso/vsyscall.h>
+
+/* Kernel-provided data used by the VDSO. */
+static union mips_vdso_data mips_vdso_data __page_aligned_data;
+struct vdso_data *vdso_data = mips_vdso_data.data;
+
+/*
+ * Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as
+ * what we map and where within the area they are mapped is determined at
+ * runtime.
+ */
+static struct page *no_pages[] = { NULL };
+static struct vm_special_mapping vdso_vvar_mapping = {
+ .name = "[vvar]",
+ .pages = no_pages,
+};
+
+static void __init init_vdso_image(struct mips_vdso_image *image)
+{
+ unsigned long num_pages, i;
+ unsigned long data_pfn;
+
+ BUG_ON(!PAGE_ALIGNED(image->data));
+ BUG_ON(!PAGE_ALIGNED(image->size));
+
+ num_pages = image->size / PAGE_SIZE;
+
+ data_pfn = __phys_to_pfn(__pa_symbol(image->data));
+ for (i = 0; i < num_pages; i++)
+ image->mapping.pages[i] = pfn_to_page(data_pfn + i);
+}
+
+static int __init init_vdso(void)
+{
+ init_vdso_image(&vdso_image);
+
+#ifdef CONFIG_MIPS32_O32
+ init_vdso_image(&vdso_image_o32);
+#endif
+
+#ifdef CONFIG_MIPS32_N32
+ init_vdso_image(&vdso_image_n32);
+#endif
+
+ return 0;
+}
+subsys_initcall(init_vdso);
+
+static unsigned long vdso_base(void)
+{
+ unsigned long base = STACK_TOP;
+
+ if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
+ /* Skip the delay slot emulation page */
+ base += PAGE_SIZE;
+ }
+
+ if (current->flags & PF_RANDOMIZE) {
+ base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
+ base = PAGE_ALIGN(base);
+ }
+
+ return base;
+}
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mips_vdso_image *image = current->thread.abi->vdso;
+ struct mm_struct *mm = current->mm;
+ unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn;
+ struct vm_area_struct *vma;
+ int ret;
+
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
+ if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
+ /* Map delay slot emulation page */
+ base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
+ VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ 0, NULL);
+ if (IS_ERR_VALUE(base)) {
+ ret = base;
+ goto out;
+ }
+ }
+
+ /*
+ * Determine total area size. This includes the VDSO data itself, the
+ * data page, and the GIC user page if present. Always create a mapping
+ * for the GIC user area if the GIC is present regardless of whether it
+ * is the current clocksource, in case it comes into use later on. We
+ * only map a page even though the total area is 64K, as we only need
+ * the counter registers at the start.
+ */
+ gic_size = mips_gic_present() ? PAGE_SIZE : 0;
+ vvar_size = gic_size + PAGE_SIZE;
+ size = vvar_size + image->size;
+
+ /*
+ * Find a region that's large enough for us to perform the
+ * colour-matching alignment below.
+ */
+ if (cpu_has_dc_aliases)
+ size += shm_align_mask + 1;
+
+ base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
+ if (IS_ERR_VALUE(base)) {
+ ret = base;
+ goto out;
+ }
+
+ /*
+ * If we suffer from dcache aliasing, ensure that the VDSO data page
+ * mapping is coloured the same as the kernel's mapping of that memory.
+ * This ensures that when the kernel updates the VDSO data userland
+ * will observe it without requiring cache invalidations.
+ */
+ if (cpu_has_dc_aliases) {
+ base = __ALIGN_MASK(base, shm_align_mask);
+ base += ((unsigned long)vdso_data - gic_size) & shm_align_mask;
+ }
+
+ data_addr = base + gic_size;
+ vdso_addr = data_addr + PAGE_SIZE;
+
+ vma = _install_special_mapping(mm, base, vvar_size,
+ VM_READ | VM_MAYREAD,
+ &vdso_vvar_mapping);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out;
+ }
+
+ /* Map GIC user page. */
+ if (gic_size) {
+ gic_pfn = virt_to_phys(mips_gic_base + MIPS_GIC_USER_OFS) >> PAGE_SHIFT;
+
+ ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size,
+ pgprot_noncached(PAGE_READONLY));
+ if (ret)
+ goto out;
+ }
+
+ /* Map data page. */
+ ret = remap_pfn_range(vma, data_addr,
+ virt_to_phys(vdso_data) >> PAGE_SHIFT,
+ PAGE_SIZE, PAGE_READONLY);
+ if (ret)
+ goto out;
+
+ /* Map VDSO image. */
+ vma = _install_special_mapping(mm, vdso_addr, image->size,
+ VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ &image->mapping);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out;
+ }
+
+ mm->context.vdso = (void *)vdso_addr;
+ ret = 0;
+
+out:
+ mmap_write_unlock(mm);
+ return ret;
+}
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
new file mode 100644
index 000000000..64afe075d
--- /dev/null
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+
+#define PAGE_SIZE _PAGE_SIZE
+
+/*
+ * Put .bss..swapper_pg_dir as the first thing in .bss. This will
+ * ensure that it has .bss alignment (64K).
+ */
+#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir)
+
+/* Cavium Octeon should not have a separate PT_NOTE Program Header. */
+#ifndef CONFIG_CAVIUM_OCTEON_SOC
+#define EMITS_PT_NOTE
+#endif
+
+#define RUNTIME_DISCARD_EXIT
+
+#include <asm-generic/vmlinux.lds.h>
+
+#undef mips
+#define mips mips
+OUTPUT_ARCH(mips)
+ENTRY(kernel_entry)
+PHDRS {
+ text PT_LOAD FLAGS(7); /* RWX */
+#ifndef CONFIG_CAVIUM_OCTEON_SOC
+ note PT_NOTE FLAGS(4); /* R__ */
+#endif /* CAVIUM_OCTEON_SOC */
+}
+
+#ifdef CONFIG_32BIT
+ #ifdef CONFIG_CPU_LITTLE_ENDIAN
+ jiffies = jiffies_64;
+ #else
+ jiffies = jiffies_64 + 4;
+ #endif
+#else
+ jiffies = jiffies_64;
+#endif
+
+SECTIONS
+{
+#ifdef CONFIG_BOOT_ELF64
+ /* Read-only sections, merged into text segment: */
+ /* . = 0xc000000000000000; */
+
+ /* This is the value for an Origin kernel, taken from an IRIX kernel. */
+ /* . = 0xc00000000001c000; */
+
+ /* Set the vaddr for the text segment to a value
+ * >= 0xa800 0000 0001 9000 if no symmon is going to configured
+ * >= 0xa800 0000 0030 0000 otherwise
+ */
+
+ /* . = 0xa800000000300000; */
+ . = 0xffffffff80300000;
+#endif
+ . = LINKER_LOAD_ADDRESS;
+ /* read-only */
+ _text = .; /* Text and read-only data */
+ .text : {
+ TEXT_TEXT
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
+ *(.text.*)
+ *(.fixup)
+ *(.gnu.warning)
+ } :text = 0
+ _etext = .; /* End of text section */
+
+ EXCEPTION_TABLE(16)
+
+ /* Exception table for data bus errors */
+ __dbe_table : {
+ __start___dbe_table = .;
+ KEEP(*(__dbe_table))
+ __stop___dbe_table = .;
+ }
+
+ _sdata = .; /* Start of data section */
+ RO_DATA(4096)
+
+ /* writeable */
+ .data : { /* Data */
+ . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
+
+ INIT_TASK_DATA(THREAD_SIZE)
+ NOSAVE_DATA
+ PAGE_ALIGNED_DATA(PAGE_SIZE)
+ CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+ READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+ DATA_DATA
+ CONSTRUCTORS
+ }
+ BUG_TABLE
+ _gp = . + 0x8000;
+ .lit8 : {
+ *(.lit8)
+ }
+ .lit4 : {
+ *(.lit4)
+ }
+ /* We want the small data sections together, so single-instruction offsets
+ can access them all, and initialized data all before uninitialized, so
+ we can shorten the on-disk segment size. */
+ .sdata : {
+ *(.sdata)
+ }
+ _edata = .; /* End of data section */
+
+ /* will be freed after init */
+ . = ALIGN(PAGE_SIZE); /* Init code and data */
+ __init_begin = .;
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
+
+ . = ALIGN(4);
+ .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
+ __mips_machines_start = .;
+ KEEP(*(.mips.machines.init))
+ __mips_machines_end = .;
+ }
+
+ /* .exit.text is discarded at runtime, not link time, to deal with
+ * references from .rodata
+ */
+ .exit.text : {
+ EXIT_TEXT
+ }
+ .exit.data : {
+ EXIT_DATA
+ }
+#ifdef CONFIG_SMP
+ PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+#endif
+
+#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
+ .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
+ *(.appended_dtb)
+ KEEP(*(.appended_dtb))
+ }
+#endif
+
+#ifdef CONFIG_RELOCATABLE
+ . = ALIGN(4);
+
+ .data.reloc : {
+ _relocation_start = .;
+ /*
+ * Space for relocation table
+ * This needs to be filled so that the
+ * relocs tool can overwrite the content.
+ * An invalid value is left at the start of the
+ * section to abort relocation if the table
+ * has not been filled in.
+ */
+ LONG(0xFFFFFFFF);
+ FILL(0);
+ . += CONFIG_RELOCATION_TABLE_SIZE - 4;
+ _relocation_end = .;
+ }
+#endif
+
+#ifdef CONFIG_MIPS_RAW_APPENDED_DTB
+ __appended_dtb = .;
+ /* leave space for appended DTB */
+ . += 0x100000;
+#endif
+ /*
+ * Align to 64K in attempt to eliminate holes before the
+ * .bss..swapper_pg_dir section at the start of .bss. This
+ * also satisfies PAGE_SIZE alignment as the largest page size
+ * allowed is 64K.
+ */
+ . = ALIGN(0x10000);
+ __init_end = .;
+ /* freed after init ends here */
+
+ /*
+ * Force .bss to 64K alignment so that .bss..swapper_pg_dir
+ * gets that alignment. .sbss should be empty, so there will be
+ * no holes after __init_end. */
+ BSS_SECTION(0, 0x10000, 8)
+
+ _end = . ;
+
+ /* These mark the ABI of the kernel for debuggers. */
+ .mdebug.abi32 : {
+ KEEP(*(.mdebug.abi32))
+ }
+ .mdebug.abi64 : {
+ KEEP(*(.mdebug.abi64))
+ }
+
+ /* This is the MIPS specific mdebug section. */
+ .mdebug : {
+ *(.mdebug)
+ }
+
+ STABS_DEBUG
+ DWARF_DEBUG
+ ELF_DETAILS
+
+ /* These must appear regardless of . */
+ .gptab.sdata : {
+ *(.gptab.data)
+ *(.gptab.sdata)
+ }
+ .gptab.sbss : {
+ *(.gptab.bss)
+ *(.gptab.sbss)
+ }
+
+ /* Sections to be discarded */
+ DISCARDS
+ /DISCARD/ : {
+ /* ABI crap starts here */
+ *(.MIPS.abiflags)
+ *(.MIPS.options)
+ *(.options)
+ *(.pdr)
+ *(.reginfo)
+ }
+}
diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c
new file mode 100644
index 000000000..903c07bdc
--- /dev/null
+++ b/arch/mips/kernel/vpe-cmp.c
@@ -0,0 +1,180 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <asm/vpe.h>
+
+static int major;
+
+void cleanup_tc(struct tc *tc)
+{
+
+}
+
+static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+ struct vpe_notifications *notifier;
+
+ list_for_each_entry(notifier, &vpe->notify, list)
+ notifier->stop(aprp_cpu_index());
+
+ release_progmem(vpe->load_addr);
+ vpe->state = VPE_STATE_UNUSED;
+
+ return len;
+}
+static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
+
+static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
+ char *buf)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+
+ return sprintf(buf, "%d\n", vpe->ntcs);
+}
+
+static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+ unsigned long new;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &new);
+ if (ret < 0)
+ return ret;
+
+ /* APRP can only reserve one TC in a VPE and no more. */
+ if (new != 1)
+ return -EINVAL;
+
+ vpe->ntcs = new;
+
+ return len;
+}
+static DEVICE_ATTR_RW(ntcs);
+
+static struct attribute *vpe_attrs[] = {
+ &dev_attr_kill.attr,
+ &dev_attr_ntcs.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vpe);
+
+static void vpe_device_release(struct device *cd)
+{
+}
+
+static struct class vpe_class = {
+ .name = "vpe",
+ .owner = THIS_MODULE,
+ .dev_release = vpe_device_release,
+ .dev_groups = vpe_groups,
+};
+
+static struct device vpe_device;
+
+int __init vpe_module_init(void)
+{
+ struct vpe *v = NULL;
+ struct tc *t;
+ int err;
+
+ if (!cpu_has_mipsmt) {
+ pr_warn("VPE loader: not a MIPS MT capable processor\n");
+ return -ENODEV;
+ }
+
+ if (num_possible_cpus() - aprp_cpu_index() < 1) {
+ pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n"
+ "Pass maxcpus=<n> argument as kernel argument\n");
+ return -ENODEV;
+ }
+
+ major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops);
+ if (major < 0) {
+ pr_warn("VPE loader: unable to register character device\n");
+ return major;
+ }
+
+ err = class_register(&vpe_class);
+ if (err) {
+ pr_err("vpe_class registration failed\n");
+ goto out_chrdev;
+ }
+
+ device_initialize(&vpe_device);
+ vpe_device.class = &vpe_class,
+ vpe_device.parent = NULL,
+ dev_set_name(&vpe_device, "vpe_sp");
+ vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
+ err = device_add(&vpe_device);
+ if (err) {
+ pr_err("Adding vpe_device failed\n");
+ goto out_class;
+ }
+
+ t = alloc_tc(aprp_cpu_index());
+ if (!t) {
+ pr_warn("VPE: unable to allocate TC\n");
+ err = -ENOMEM;
+ goto out_dev;
+ }
+
+ /* VPE */
+ v = alloc_vpe(aprp_cpu_index());
+ if (v == NULL) {
+ pr_warn("VPE: unable to allocate VPE\n");
+ kfree(t);
+ err = -ENOMEM;
+ goto out_dev;
+ }
+
+ v->ntcs = 1;
+
+ /* add the tc to the list of this vpe's tc's. */
+ list_add(&t->tc, &v->tc);
+
+ /* TC */
+ t->pvpe = v; /* set the parent vpe */
+
+ return 0;
+
+out_dev:
+ device_del(&vpe_device);
+
+out_class:
+ put_device(&vpe_device);
+ class_unregister(&vpe_class);
+
+out_chrdev:
+ unregister_chrdev(major, VPE_MODULE_NAME);
+
+ return err;
+}
+
+void __exit vpe_module_exit(void)
+{
+ struct vpe *v, *n;
+
+ device_unregister(&vpe_device);
+ class_unregister(&vpe_class);
+ unregister_chrdev(major, VPE_MODULE_NAME);
+
+ /* No locking needed here */
+ list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list)
+ if (v->state != VPE_STATE_UNUSED)
+ release_vpe(v);
+}
diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c
new file mode 100644
index 000000000..496ed8f36
--- /dev/null
+++ b/arch/mips/kernel/vpe-mt.c
@@ -0,0 +1,520 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
+#include <asm/vpe.h>
+
+static int major;
+
+/* The number of TCs and VPEs physically available on the core */
+static int hw_tcs, hw_vpes;
+
+/* We are prepared so configure and start the VPE... */
+int vpe_run(struct vpe *v)
+{
+ unsigned long flags, val, dmt_flag;
+ struct vpe_notifications *notifier;
+ unsigned int vpeflags;
+ struct tc *t;
+
+ /* check we are the Master VPE */
+ local_irq_save(flags);
+ val = read_c0_vpeconf0();
+ if (!(val & VPECONF0_MVP)) {
+ pr_warn("VPE loader: only Master VPE's are able to config MT\n");
+ local_irq_restore(flags);
+
+ return -1;
+ }
+
+ dmt_flag = dmt();
+ vpeflags = dvpe();
+
+ if (list_empty(&v->tc)) {
+ evpe(vpeflags);
+ emt(dmt_flag);
+ local_irq_restore(flags);
+
+ pr_warn("VPE loader: No TC's associated with VPE %d\n",
+ v->minor);
+
+ return -ENOEXEC;
+ }
+
+ t = list_first_entry(&v->tc, struct tc, tc);
+
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ settc(t->index);
+
+ /* should check it is halted, and not activated */
+ if ((read_tc_c0_tcstatus() & TCSTATUS_A) ||
+ !(read_tc_c0_tchalt() & TCHALT_H)) {
+ evpe(vpeflags);
+ emt(dmt_flag);
+ local_irq_restore(flags);
+
+ pr_warn("VPE loader: TC %d is already active!\n",
+ t->index);
+
+ return -ENOEXEC;
+ }
+
+ /*
+ * Write the address we want it to start running from in the TCPC
+ * register.
+ */
+ write_tc_c0_tcrestart((unsigned long)v->__start);
+ write_tc_c0_tccontext((unsigned long)0);
+
+ /*
+ * Mark the TC as activated, not interrupt exempt and not dynamically
+ * allocatable
+ */
+ val = read_tc_c0_tcstatus();
+ val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
+ write_tc_c0_tcstatus(val);
+
+ write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
+
+ /*
+ * We don't pass the memsize here, so VPE programs need to be
+ * compiled with DFLT_STACK_SIZE and DFLT_HEAP_SIZE defined.
+ */
+ mttgpr(7, 0);
+ mttgpr(6, v->ntcs);
+
+ /* set up VPE1 */
+ /*
+ * bind the TC to VPE 1 as late as possible so we only have the final
+ * VPE registers to set up, and so an EJTAG probe can trigger on it
+ */
+ write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
+
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
+
+ back_to_back_c0_hazard();
+
+ /* Set up the XTC bit in vpeconf0 to point at our tc */
+ write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
+ | (t->index << VPECONF0_XTC_SHIFT));
+
+ back_to_back_c0_hazard();
+
+ /* enable this VPE */
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
+
+ /* clear out any left overs from a previous program */
+ write_vpe_c0_status(0);
+ write_vpe_c0_cause(0);
+
+ /* take system out of configuration state */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ /*
+ * SMVP kernels manage VPE enable independently, but uniprocessor
+ * kernels need to turn it on, even if that wasn't the pre-dvpe() state.
+ */
+#ifdef CONFIG_SMP
+ evpe(vpeflags);
+#else
+ evpe(EVPE_ENABLE);
+#endif
+ emt(dmt_flag);
+ local_irq_restore(flags);
+
+ list_for_each_entry(notifier, &v->notify, list)
+ notifier->start(VPE_MODULE_MINOR);
+
+ return 0;
+}
+
+void cleanup_tc(struct tc *tc)
+{
+ unsigned long flags;
+ unsigned int mtflags, vpflags;
+ int tmp;
+
+ local_irq_save(flags);
+ mtflags = dmt();
+ vpflags = dvpe();
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ settc(tc->index);
+ tmp = read_tc_c0_tcstatus();
+
+ /* mark not allocated and not dynamically allocatable */
+ tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
+ tmp |= TCSTATUS_IXMT; /* interrupt exempt */
+ write_tc_c0_tcstatus(tmp);
+
+ write_tc_c0_tchalt(TCHALT_H);
+ mips_ihb();
+
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+ evpe(vpflags);
+ emt(mtflags);
+ local_irq_restore(flags);
+}
+
+/* module wrapper entry points */
+/* give me a vpe */
+void *vpe_alloc(void)
+{
+ int i;
+ struct vpe *v;
+
+ /* find a vpe */
+ for (i = 1; i < MAX_VPES; i++) {
+ v = get_vpe(i);
+ if (v != NULL) {
+ v->state = VPE_STATE_INUSE;
+ return v;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(vpe_alloc);
+
+/* start running from here */
+int vpe_start(void *vpe, unsigned long start)
+{
+ struct vpe *v = vpe;
+
+ v->__start = start;
+ return vpe_run(v);
+}
+EXPORT_SYMBOL(vpe_start);
+
+/* halt it for now */
+int vpe_stop(void *vpe)
+{
+ struct vpe *v = vpe;
+ struct tc *t;
+ unsigned int evpe_flags;
+
+ evpe_flags = dvpe();
+
+ t = list_entry(v->tc.next, struct tc, tc);
+ if (t != NULL) {
+ settc(t->index);
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
+ }
+
+ evpe(evpe_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(vpe_stop);
+
+/* I've done with it thank you */
+int vpe_free(void *vpe)
+{
+ struct vpe *v = vpe;
+ struct tc *t;
+ unsigned int evpe_flags;
+
+ t = list_entry(v->tc.next, struct tc, tc);
+ if (t == NULL)
+ return -ENOEXEC;
+
+ evpe_flags = dvpe();
+
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ settc(t->index);
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
+
+ /* halt the TC */
+ write_tc_c0_tchalt(TCHALT_H);
+ mips_ihb();
+
+ /* mark the TC unallocated */
+ write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
+
+ v->state = VPE_STATE_UNUSED;
+
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+ evpe(evpe_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(vpe_free);
+
+static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+ struct vpe_notifications *notifier;
+
+ list_for_each_entry(notifier, &vpe->notify, list)
+ notifier->stop(aprp_cpu_index());
+
+ release_progmem(vpe->load_addr);
+ cleanup_tc(get_tc(aprp_cpu_index()));
+ vpe_stop(vpe);
+ vpe_free(vpe);
+
+ return len;
+}
+static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
+
+static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
+ char *buf)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+
+ return sprintf(buf, "%d\n", vpe->ntcs);
+}
+
+static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+ unsigned long new;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &new);
+ if (ret < 0)
+ return ret;
+
+ if (new == 0 || new > (hw_tcs - aprp_cpu_index()))
+ return -EINVAL;
+
+ vpe->ntcs = new;
+
+ return len;
+}
+static DEVICE_ATTR_RW(ntcs);
+
+static struct attribute *vpe_attrs[] = {
+ &dev_attr_kill.attr,
+ &dev_attr_ntcs.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vpe);
+
+static void vpe_device_release(struct device *cd)
+{
+}
+
+static struct class vpe_class = {
+ .name = "vpe",
+ .owner = THIS_MODULE,
+ .dev_release = vpe_device_release,
+ .dev_groups = vpe_groups,
+};
+
+static struct device vpe_device;
+
+int __init vpe_module_init(void)
+{
+ unsigned int mtflags, vpflags;
+ unsigned long flags, val;
+ struct vpe *v = NULL;
+ struct tc *t;
+ int tc, err;
+
+ if (!cpu_has_mipsmt) {
+ pr_warn("VPE loader: not a MIPS MT capable processor\n");
+ return -ENODEV;
+ }
+
+ if (vpelimit == 0) {
+ pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n"
+ "Pass maxvpes=<n> argument as kernel argument\n");
+
+ return -ENODEV;
+ }
+
+ if (aprp_cpu_index() == 0) {
+ pr_warn("No TCs reserved for AP/SP, not initialize VPE loader\n"
+ "Pass maxtcs=<n> argument as kernel argument\n");
+
+ return -ENODEV;
+ }
+
+ major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops);
+ if (major < 0) {
+ pr_warn("VPE loader: unable to register character device\n");
+ return major;
+ }
+
+ err = class_register(&vpe_class);
+ if (err) {
+ pr_err("vpe_class registration failed\n");
+ goto out_chrdev;
+ }
+
+ device_initialize(&vpe_device);
+ vpe_device.class = &vpe_class,
+ vpe_device.parent = NULL,
+ dev_set_name(&vpe_device, "vpe1");
+ vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
+ err = device_add(&vpe_device);
+ if (err) {
+ pr_err("Adding vpe_device failed\n");
+ goto out_class;
+ }
+
+ local_irq_save(flags);
+ mtflags = dmt();
+ vpflags = dvpe();
+
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ val = read_c0_mvpconf0();
+ hw_tcs = (val & MVPCONF0_PTC) + 1;
+ hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+
+ for (tc = aprp_cpu_index(); tc < hw_tcs; tc++) {
+ /*
+ * Must re-enable multithreading temporarily or in case we
+ * reschedule send IPIs or similar we might hang.
+ */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+ evpe(vpflags);
+ emt(mtflags);
+ local_irq_restore(flags);
+ t = alloc_tc(tc);
+ if (!t) {
+ err = -ENOMEM;
+ goto out_dev;
+ }
+
+ local_irq_save(flags);
+ mtflags = dmt();
+ vpflags = dvpe();
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ /* VPE's */
+ if (tc < hw_tcs) {
+ settc(tc);
+
+ v = alloc_vpe(tc);
+ if (v == NULL) {
+ pr_warn("VPE: unable to allocate VPE\n");
+ goto out_reenable;
+ }
+
+ v->ntcs = hw_tcs - aprp_cpu_index();
+
+ /* add the tc to the list of this vpe's tc's. */
+ list_add(&t->tc, &v->tc);
+
+ /* deactivate all but vpe0 */
+ if (tc >= aprp_cpu_index()) {
+ unsigned long tmp = read_vpe_c0_vpeconf0();
+
+ tmp &= ~VPECONF0_VPA;
+
+ /* master VPE */
+ tmp |= VPECONF0_MVP;
+ write_vpe_c0_vpeconf0(tmp);
+ }
+
+ /* disable multi-threading with TC's */
+ write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() &
+ ~VPECONTROL_TE);
+
+ if (tc >= vpelimit) {
+ /*
+ * Set config to be the same as vpe0,
+ * particularly kseg0 coherency alg
+ */
+ write_vpe_c0_config(read_c0_config());
+ }
+ }
+
+ /* TC's */
+ t->pvpe = v; /* set the parent vpe */
+
+ if (tc >= aprp_cpu_index()) {
+ unsigned long tmp;
+
+ settc(tc);
+
+ /*
+ * A TC that is bound to any other VPE gets bound to
+ * VPE0, ideally I'd like to make it homeless but it
+ * doesn't appear to let me bind a TC to a non-existent
+ * VPE. Which is perfectly reasonable.
+ *
+ * The (un)bound state is visible to an EJTAG probe so
+ * may notify GDB...
+ */
+ tmp = read_tc_c0_tcbind();
+ if (tmp & TCBIND_CURVPE) {
+ /* tc is bound >vpe0 */
+ write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
+
+ t->pvpe = get_vpe(0); /* set the parent vpe */
+ }
+
+ /* halt the TC */
+ write_tc_c0_tchalt(TCHALT_H);
+ mips_ihb();
+
+ tmp = read_tc_c0_tcstatus();
+
+ /* mark not activated and not dynamically allocatable */
+ tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
+ tmp |= TCSTATUS_IXMT; /* interrupt exempt */
+ write_tc_c0_tcstatus(tmp);
+ }
+ }
+
+out_reenable:
+ /* release config state */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ evpe(vpflags);
+ emt(mtflags);
+ local_irq_restore(flags);
+
+ return 0;
+
+out_dev:
+ device_del(&vpe_device);
+
+out_class:
+ put_device(&vpe_device);
+ class_unregister(&vpe_class);
+
+out_chrdev:
+ unregister_chrdev(major, VPE_MODULE_NAME);
+
+ return err;
+}
+
+void __exit vpe_module_exit(void)
+{
+ struct vpe *v, *n;
+
+ device_unregister(&vpe_device);
+ class_unregister(&vpe_class);
+ unregister_chrdev(major, VPE_MODULE_NAME);
+
+ /* No locking needed here */
+ list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
+ if (v->state != VPE_STATE_UNUSED)
+ release_vpe(v);
+ }
+}
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
new file mode 100644
index 000000000..d0d832ab3
--- /dev/null
+++ b/arch/mips/kernel/vpe.c
@@ -0,0 +1,933 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ *
+ * VPE spport module for loading a MIPS SP program into VPE1. The SP
+ * environment is rather simple since there are no TLBs. It needs
+ * to be relocatable (or partiall linked). Initialize your stack in
+ * the startup-code. The loader looks for the symbol __start and sets
+ * up the execution to resume from there. To load and run, simply do
+ * a cat SP 'binary' to the /dev/vpe1 device.
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+#include <linux/elf.h>
+#include <linux/seq_file.h>
+#include <linux/syscalls.h>
+#include <linux/moduleloader.h>
+#include <linux/interrupt.h>
+#include <linux/poll.h>
+#include <linux/memblock.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/cacheflush.h>
+#include <linux/atomic.h>
+#include <asm/mips_mt.h>
+#include <asm/processor.h>
+#include <asm/vpe.h>
+
+#ifndef ARCH_SHF_SMALL
+#define ARCH_SHF_SMALL 0
+#endif
+
+/* If this is set, the section belongs in the init part of the module */
+#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
+
+struct vpe_control vpecontrol = {
+ .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
+ .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
+ .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock),
+ .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
+};
+
+/* get the vpe associated with this minor */
+struct vpe *get_vpe(int minor)
+{
+ struct vpe *res, *v;
+
+ if (!cpu_has_mipsmt)
+ return NULL;
+
+ res = NULL;
+ spin_lock(&vpecontrol.vpe_list_lock);
+ list_for_each_entry(v, &vpecontrol.vpe_list, list) {
+ if (v->minor == VPE_MODULE_MINOR) {
+ res = v;
+ break;
+ }
+ }
+ spin_unlock(&vpecontrol.vpe_list_lock);
+
+ return res;
+}
+
+/* get the vpe associated with this minor */
+struct tc *get_tc(int index)
+{
+ struct tc *res, *t;
+
+ res = NULL;
+ spin_lock(&vpecontrol.tc_list_lock);
+ list_for_each_entry(t, &vpecontrol.tc_list, list) {
+ if (t->index == index) {
+ res = t;
+ break;
+ }
+ }
+ spin_unlock(&vpecontrol.tc_list_lock);
+
+ return res;
+}
+
+/* allocate a vpe and associate it with this minor (or index) */
+struct vpe *alloc_vpe(int minor)
+{
+ struct vpe *v;
+
+ v = kzalloc(sizeof(struct vpe), GFP_KERNEL);
+ if (v == NULL)
+ goto out;
+
+ INIT_LIST_HEAD(&v->tc);
+ spin_lock(&vpecontrol.vpe_list_lock);
+ list_add_tail(&v->list, &vpecontrol.vpe_list);
+ spin_unlock(&vpecontrol.vpe_list_lock);
+
+ INIT_LIST_HEAD(&v->notify);
+ v->minor = VPE_MODULE_MINOR;
+
+out:
+ return v;
+}
+
+/* allocate a tc. At startup only tc0 is running, all other can be halted. */
+struct tc *alloc_tc(int index)
+{
+ struct tc *tc;
+
+ tc = kzalloc(sizeof(struct tc), GFP_KERNEL);
+ if (tc == NULL)
+ goto out;
+
+ INIT_LIST_HEAD(&tc->tc);
+ tc->index = index;
+
+ spin_lock(&vpecontrol.tc_list_lock);
+ list_add_tail(&tc->list, &vpecontrol.tc_list);
+ spin_unlock(&vpecontrol.tc_list_lock);
+
+out:
+ return tc;
+}
+
+/* clean up and free everything */
+void release_vpe(struct vpe *v)
+{
+ list_del(&v->list);
+ if (v->load_addr)
+ release_progmem(v->load_addr);
+ kfree(v);
+}
+
+/* Find some VPE program space */
+void *alloc_progmem(unsigned long len)
+{
+ void *addr;
+
+#ifdef CONFIG_MIPS_VPE_LOADER_TOM
+ /*
+ * This means you must tell Linux to use less memory than you
+ * physically have, for example by passing a mem= boot argument.
+ */
+ addr = pfn_to_kaddr(max_low_pfn);
+ memset(addr, 0, len);
+#else
+ /* simple grab some mem for now */
+ addr = kzalloc(len, GFP_KERNEL);
+#endif
+
+ return addr;
+}
+
+void release_progmem(void *ptr)
+{
+#ifndef CONFIG_MIPS_VPE_LOADER_TOM
+ kfree(ptr);
+#endif
+}
+
+/* Update size with this section: return offset. */
+static long get_offset(unsigned long *size, Elf_Shdr *sechdr)
+{
+ long ret;
+
+ ret = ALIGN(*size, sechdr->sh_addralign ? : 1);
+ *size = ret + sechdr->sh_size;
+ return ret;
+}
+
+/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
+ might -- code, read-only data, read-write data, small data. Tally
+ sizes, and place the offsets into sh_entsize fields: high bit means it
+ belongs in init. */
+static void layout_sections(struct module *mod, const Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs, const char *secstrings)
+{
+ static unsigned long const masks[][2] = {
+ /* NOTE: all executable code must be the first section
+ * in this array; otherwise modify the text_size
+ * finder in the two loops below */
+ {SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL},
+ {SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL},
+ {SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL},
+ {ARCH_SHF_SMALL | SHF_ALLOC, 0}
+ };
+ unsigned int m, i;
+
+ for (i = 0; i < hdr->e_shnum; i++)
+ sechdrs[i].sh_entsize = ~0UL;
+
+ for (m = 0; m < ARRAY_SIZE(masks); ++m) {
+ for (i = 0; i < hdr->e_shnum; ++i) {
+ Elf_Shdr *s = &sechdrs[i];
+
+ if ((s->sh_flags & masks[m][0]) != masks[m][0]
+ || (s->sh_flags & masks[m][1])
+ || s->sh_entsize != ~0UL)
+ continue;
+ s->sh_entsize =
+ get_offset((unsigned long *)&mod->core_layout.size, s);
+ }
+
+ if (m == 0)
+ mod->core_layout.text_size = mod->core_layout.size;
+
+ }
+}
+
+/* from module-elf32.c, but subverted a little */
+
+struct mips_hi16 {
+ struct mips_hi16 *next;
+ Elf32_Addr *addr;
+ Elf32_Addr value;
+};
+
+static struct mips_hi16 *mips_hi16_list;
+static unsigned int gp_offs, gp_addr;
+
+static int apply_r_mips_none(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ return 0;
+}
+
+static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ int rel;
+
+ if (!(*location & 0xffff)) {
+ rel = (int)v - gp_addr;
+ } else {
+ /* .sbss + gp(relative) + offset */
+ /* kludge! */
+ rel = (int)(short)((int)v + gp_offs +
+ (int)(short)(*location & 0xffff) - gp_addr);
+ }
+
+ if ((rel > 32768) || (rel < -32768)) {
+ pr_debug("VPE loader: apply_r_mips_gprel16: relative address 0x%x out of range of gp register\n",
+ rel);
+ return -ENOEXEC;
+ }
+
+ *location = (*location & 0xffff0000) | (rel & 0xffff);
+
+ return 0;
+}
+
+static int apply_r_mips_pc16(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ int rel;
+ rel = (((unsigned int)v - (unsigned int)location));
+ rel >>= 2; /* because the offset is in _instructions_ not bytes. */
+ rel -= 1; /* and one instruction less due to the branch delay slot. */
+
+ if ((rel > 32768) || (rel < -32768)) {
+ pr_debug("VPE loader: apply_r_mips_pc16: relative address out of range 0x%x\n",
+ rel);
+ return -ENOEXEC;
+ }
+
+ *location = (*location & 0xffff0000) | (rel & 0xffff);
+
+ return 0;
+}
+
+static int apply_r_mips_32(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ *location += v;
+
+ return 0;
+}
+
+static int apply_r_mips_26(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ if (v % 4) {
+ pr_debug("VPE loader: apply_r_mips_26: unaligned relocation\n");
+ return -ENOEXEC;
+ }
+
+/*
+ * Not desperately convinced this is a good check of an overflow condition
+ * anyway. But it gets in the way of handling undefined weak symbols which
+ * we want to set to zero.
+ * if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
+ * printk(KERN_ERR
+ * "module %s: relocation overflow\n",
+ * me->name);
+ * return -ENOEXEC;
+ * }
+ */
+
+ *location = (*location & ~0x03ffffff) |
+ ((*location + (v >> 2)) & 0x03ffffff);
+ return 0;
+}
+
+static int apply_r_mips_hi16(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ struct mips_hi16 *n;
+
+ /*
+ * We cannot relocate this one now because we don't know the value of
+ * the carry we need to add. Save the information, and let LO16 do the
+ * actual relocation.
+ */
+ n = kmalloc(sizeof(*n), GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+
+ n->addr = location;
+ n->value = v;
+ n->next = mips_hi16_list;
+ mips_hi16_list = n;
+
+ return 0;
+}
+
+static int apply_r_mips_lo16(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ unsigned long insnlo = *location;
+ Elf32_Addr val, vallo;
+ struct mips_hi16 *l, *next;
+
+ /* Sign extend the addend we extract from the lo insn. */
+ vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
+
+ if (mips_hi16_list != NULL) {
+
+ l = mips_hi16_list;
+ while (l != NULL) {
+ unsigned long insn;
+
+ /*
+ * The value for the HI16 had best be the same.
+ */
+ if (v != l->value) {
+ pr_debug("VPE loader: apply_r_mips_lo16/hi16: inconsistent value information\n");
+ goto out_free;
+ }
+
+ /*
+ * Do the HI16 relocation. Note that we actually don't
+ * need to know anything about the LO16 itself, except
+ * where to find the low 16 bits of the addend needed
+ * by the LO16.
+ */
+ insn = *l->addr;
+ val = ((insn & 0xffff) << 16) + vallo;
+ val += v;
+
+ /*
+ * Account for the sign extension that will happen in
+ * the low bits.
+ */
+ val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
+
+ insn = (insn & ~0xffff) | val;
+ *l->addr = insn;
+
+ next = l->next;
+ kfree(l);
+ l = next;
+ }
+
+ mips_hi16_list = NULL;
+ }
+
+ /*
+ * Ok, we're done with the HI16 relocs. Now deal with the LO16.
+ */
+ val = v + vallo;
+ insnlo = (insnlo & ~0xffff) | (val & 0xffff);
+ *location = insnlo;
+
+ return 0;
+
+out_free:
+ while (l != NULL) {
+ next = l->next;
+ kfree(l);
+ l = next;
+ }
+ mips_hi16_list = NULL;
+
+ return -ENOEXEC;
+}
+
+static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
+ Elf32_Addr v) = {
+ [R_MIPS_NONE] = apply_r_mips_none,
+ [R_MIPS_32] = apply_r_mips_32,
+ [R_MIPS_26] = apply_r_mips_26,
+ [R_MIPS_HI16] = apply_r_mips_hi16,
+ [R_MIPS_LO16] = apply_r_mips_lo16,
+ [R_MIPS_GPREL16] = apply_r_mips_gprel16,
+ [R_MIPS_PC16] = apply_r_mips_pc16
+};
+
+static char *rstrs[] = {
+ [R_MIPS_NONE] = "MIPS_NONE",
+ [R_MIPS_32] = "MIPS_32",
+ [R_MIPS_26] = "MIPS_26",
+ [R_MIPS_HI16] = "MIPS_HI16",
+ [R_MIPS_LO16] = "MIPS_LO16",
+ [R_MIPS_GPREL16] = "MIPS_GPREL16",
+ [R_MIPS_PC16] = "MIPS_PC16"
+};
+
+static int apply_relocations(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ uint32_t *location;
+ unsigned int i;
+ Elf32_Addr v;
+ int res;
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ Elf32_Word r_info = rel[i].r_info;
+
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ /* This is the symbol it is referring to */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(r_info);
+
+ if (!sym->st_value) {
+ pr_debug("%s: undefined weak symbol %s\n",
+ me->name, strtab + sym->st_name);
+ /* just print the warning, dont barf */
+ }
+
+ v = sym->st_value;
+
+ res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
+ if (res) {
+ char *r = rstrs[ELF32_R_TYPE(r_info)];
+ pr_warn("VPE loader: .text+0x%x relocation type %s for symbol \"%s\" failed\n",
+ rel[i].r_offset, r ? r : "UNKNOWN",
+ strtab + sym->st_name);
+ return res;
+ }
+ }
+
+ return 0;
+}
+
+static inline void save_gp_address(unsigned int secbase, unsigned int rel)
+{
+ gp_addr = secbase + rel;
+ gp_offs = gp_addr - (secbase & 0xffff0000);
+}
+/* end module-elf32.c */
+
+/* Change all symbols so that sh_value encodes the pointer directly. */
+static void simplify_symbols(Elf_Shdr *sechdrs,
+ unsigned int symindex,
+ const char *strtab,
+ const char *secstrings,
+ unsigned int nsecs, struct module *mod)
+{
+ Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
+ unsigned long secbase, bssbase = 0;
+ unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
+ int size;
+
+ /* find the .bss section for COMMON symbols */
+ for (i = 0; i < nsecs; i++) {
+ if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) {
+ bssbase = sechdrs[i].sh_addr;
+ break;
+ }
+ }
+
+ for (i = 1; i < n; i++) {
+ switch (sym[i].st_shndx) {
+ case SHN_COMMON:
+ /* Allocate space for the symbol in the .bss section.
+ st_value is currently size.
+ We want it to have the address of the symbol. */
+
+ size = sym[i].st_value;
+ sym[i].st_value = bssbase;
+
+ bssbase += size;
+ break;
+
+ case SHN_ABS:
+ /* Don't need to do anything */
+ break;
+
+ case SHN_UNDEF:
+ /* ret = -ENOENT; */
+ break;
+
+ case SHN_MIPS_SCOMMON:
+ pr_debug("simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n",
+ strtab + sym[i].st_name, sym[i].st_shndx);
+ /* .sbss section */
+ break;
+
+ default:
+ secbase = sechdrs[sym[i].st_shndx].sh_addr;
+
+ if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0)
+ save_gp_address(secbase, sym[i].st_value);
+
+ sym[i].st_value += secbase;
+ break;
+ }
+ }
+}
+
+#ifdef DEBUG_ELFLOADER
+static void dump_elfsymbols(Elf_Shdr *sechdrs, unsigned int symindex,
+ const char *strtab, struct module *mod)
+{
+ Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
+ unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
+
+ pr_debug("dump_elfsymbols: n %d\n", n);
+ for (i = 1; i < n; i++) {
+ pr_debug(" i %d name <%s> 0x%x\n", i, strtab + sym[i].st_name,
+ sym[i].st_value);
+ }
+}
+#endif
+
+static int find_vpe_symbols(struct vpe *v, Elf_Shdr *sechdrs,
+ unsigned int symindex, const char *strtab,
+ struct module *mod)
+{
+ Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
+ unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
+
+ for (i = 1; i < n; i++) {
+ if (strcmp(strtab + sym[i].st_name, "__start") == 0)
+ v->__start = sym[i].st_value;
+
+ if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0)
+ v->shared_ptr = (void *)sym[i].st_value;
+ }
+
+ if ((v->__start == 0) || (v->shared_ptr == NULL))
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Allocates a VPE with some program code space(the load address), copies the
+ * contents of the program (p)buffer performing relocatations/etc, free's it
+ * when finished.
+ */
+static int vpe_elfload(struct vpe *v)
+{
+ Elf_Ehdr *hdr;
+ Elf_Shdr *sechdrs;
+ long err = 0;
+ char *secstrings, *strtab = NULL;
+ unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
+ struct module mod; /* so we can re-use the relocations code */
+
+ memset(&mod, 0, sizeof(struct module));
+ strcpy(mod.name, "VPE loader");
+
+ hdr = (Elf_Ehdr *) v->pbuffer;
+ len = v->plen;
+
+ /* Sanity checks against insmoding binaries or wrong arch,
+ weird elf version */
+ if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
+ || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
+ || !elf_check_arch(hdr)
+ || hdr->e_shentsize != sizeof(*sechdrs)) {
+ pr_warn("VPE loader: program wrong arch or weird elf version\n");
+
+ return -ENOEXEC;
+ }
+
+ if (hdr->e_type == ET_REL)
+ relocate = 1;
+
+ if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
+ pr_err("VPE loader: program length %u truncated\n", len);
+
+ return -ENOEXEC;
+ }
+
+ /* Convenience variables */
+ sechdrs = (void *)hdr + hdr->e_shoff;
+ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ sechdrs[0].sh_addr = 0;
+
+ /* And these should exist, but gcc whinges if we don't init them */
+ symindex = strindex = 0;
+
+ if (relocate) {
+ for (i = 1; i < hdr->e_shnum; i++) {
+ if ((sechdrs[i].sh_type != SHT_NOBITS) &&
+ (len < sechdrs[i].sh_offset + sechdrs[i].sh_size)) {
+ pr_err("VPE program length %u truncated\n",
+ len);
+ return -ENOEXEC;
+ }
+
+ /* Mark all sections sh_addr with their address in the
+ temporary image. */
+ sechdrs[i].sh_addr = (size_t) hdr +
+ sechdrs[i].sh_offset;
+
+ /* Internal symbols and strings. */
+ if (sechdrs[i].sh_type == SHT_SYMTAB) {
+ symindex = i;
+ strindex = sechdrs[i].sh_link;
+ strtab = (char *)hdr +
+ sechdrs[strindex].sh_offset;
+ }
+ }
+ layout_sections(&mod, hdr, sechdrs, secstrings);
+ }
+
+ v->load_addr = alloc_progmem(mod.core_layout.size);
+ if (!v->load_addr)
+ return -ENOMEM;
+
+ pr_info("VPE loader: loading to %p\n", v->load_addr);
+
+ if (relocate) {
+ for (i = 0; i < hdr->e_shnum; i++) {
+ void *dest;
+
+ if (!(sechdrs[i].sh_flags & SHF_ALLOC))
+ continue;
+
+ dest = v->load_addr + sechdrs[i].sh_entsize;
+
+ if (sechdrs[i].sh_type != SHT_NOBITS)
+ memcpy(dest, (void *)sechdrs[i].sh_addr,
+ sechdrs[i].sh_size);
+ /* Update sh_addr to point to copy in image. */
+ sechdrs[i].sh_addr = (unsigned long)dest;
+
+ pr_debug(" section sh_name %s sh_addr 0x%x\n",
+ secstrings + sechdrs[i].sh_name,
+ sechdrs[i].sh_addr);
+ }
+
+ /* Fix up syms, so that st_value is a pointer to location. */
+ simplify_symbols(sechdrs, symindex, strtab, secstrings,
+ hdr->e_shnum, &mod);
+
+ /* Now do relocations. */
+ for (i = 1; i < hdr->e_shnum; i++) {
+ const char *strtab = (char *)sechdrs[strindex].sh_addr;
+ unsigned int info = sechdrs[i].sh_info;
+
+ /* Not a valid relocation section? */
+ if (info >= hdr->e_shnum)
+ continue;
+
+ /* Don't bother with non-allocated sections */
+ if (!(sechdrs[info].sh_flags & SHF_ALLOC))
+ continue;
+
+ if (sechdrs[i].sh_type == SHT_REL)
+ err = apply_relocations(sechdrs, strtab,
+ symindex, i, &mod);
+ else if (sechdrs[i].sh_type == SHT_RELA)
+ err = apply_relocate_add(sechdrs, strtab,
+ symindex, i, &mod);
+ if (err < 0)
+ return err;
+
+ }
+ } else {
+ struct elf_phdr *phdr = (struct elf_phdr *)
+ ((char *)hdr + hdr->e_phoff);
+
+ for (i = 0; i < hdr->e_phnum; i++) {
+ if (phdr->p_type == PT_LOAD) {
+ memcpy((void *)phdr->p_paddr,
+ (char *)hdr + phdr->p_offset,
+ phdr->p_filesz);
+ memset((void *)phdr->p_paddr + phdr->p_filesz,
+ 0, phdr->p_memsz - phdr->p_filesz);
+ }
+ phdr++;
+ }
+
+ for (i = 0; i < hdr->e_shnum; i++) {
+ /* Internal symbols and strings. */
+ if (sechdrs[i].sh_type == SHT_SYMTAB) {
+ symindex = i;
+ strindex = sechdrs[i].sh_link;
+ strtab = (char *)hdr +
+ sechdrs[strindex].sh_offset;
+
+ /*
+ * mark symtab's address for when we try
+ * to find the magic symbols
+ */
+ sechdrs[i].sh_addr = (size_t) hdr +
+ sechdrs[i].sh_offset;
+ }
+ }
+ }
+
+ /* make sure it's physically written out */
+ flush_icache_range((unsigned long)v->load_addr,
+ (unsigned long)v->load_addr + v->len);
+
+ if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
+ if (v->__start == 0) {
+ pr_warn("VPE loader: program does not contain a __start symbol\n");
+ return -ENOEXEC;
+ }
+
+ if (v->shared_ptr == NULL)
+ pr_warn("VPE loader: program does not contain vpe_shared symbol.\n"
+ " Unable to use AMVP (AP/SP) facilities.\n");
+ }
+
+ pr_info(" elf loaded\n");
+ return 0;
+}
+
+static int getcwd(char *buff, int size)
+{
+ mm_segment_t old_fs;
+ int ret;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ ret = sys_getcwd(buff, size);
+
+ set_fs(old_fs);
+
+ return ret;
+}
+
+/* checks VPE is unused and gets ready to load program */
+static int vpe_open(struct inode *inode, struct file *filp)
+{
+ enum vpe_state state;
+ struct vpe_notifications *notifier;
+ struct vpe *v;
+ int ret;
+
+ if (VPE_MODULE_MINOR != iminor(inode)) {
+ /* assume only 1 device at the moment. */
+ pr_warn("VPE loader: only vpe1 is supported\n");
+
+ return -ENODEV;
+ }
+
+ v = get_vpe(aprp_cpu_index());
+ if (v == NULL) {
+ pr_warn("VPE loader: unable to get vpe\n");
+
+ return -ENODEV;
+ }
+
+ state = xchg(&v->state, VPE_STATE_INUSE);
+ if (state != VPE_STATE_UNUSED) {
+ pr_debug("VPE loader: tc in use dumping regs\n");
+
+ list_for_each_entry(notifier, &v->notify, list)
+ notifier->stop(aprp_cpu_index());
+
+ release_progmem(v->load_addr);
+ cleanup_tc(get_tc(aprp_cpu_index()));
+ }
+
+ /* this of-course trashes what was there before... */
+ v->pbuffer = vmalloc(P_SIZE);
+ if (!v->pbuffer) {
+ pr_warn("VPE loader: unable to allocate memory\n");
+ return -ENOMEM;
+ }
+ v->plen = P_SIZE;
+ v->load_addr = NULL;
+ v->len = 0;
+
+ v->cwd[0] = 0;
+ ret = getcwd(v->cwd, VPE_PATH_MAX);
+ if (ret < 0)
+ pr_warn("VPE loader: open, getcwd returned %d\n", ret);
+
+ v->shared_ptr = NULL;
+ v->__start = 0;
+
+ return 0;
+}
+
+static int vpe_release(struct inode *inode, struct file *filp)
+{
+#if defined(CONFIG_MIPS_VPE_LOADER_MT) || defined(CONFIG_MIPS_VPE_LOADER_CMP)
+ struct vpe *v;
+ Elf_Ehdr *hdr;
+ int ret = 0;
+
+ v = get_vpe(aprp_cpu_index());
+ if (v == NULL)
+ return -ENODEV;
+
+ hdr = (Elf_Ehdr *) v->pbuffer;
+ if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
+ if (vpe_elfload(v) >= 0) {
+ vpe_run(v);
+ } else {
+ pr_warn("VPE loader: ELF load failed.\n");
+ ret = -ENOEXEC;
+ }
+ } else {
+ pr_warn("VPE loader: only elf files are supported\n");
+ ret = -ENOEXEC;
+ }
+
+ /* It's good to be able to run the SP and if it chokes have a look at
+ the /dev/rt?. But if we reset the pointer to the shared struct we
+ lose what has happened. So perhaps if garbage is sent to the vpe
+ device, use it as a trigger for the reset. Hopefully a nice
+ executable will be along shortly. */
+ if (ret < 0)
+ v->shared_ptr = NULL;
+
+ vfree(v->pbuffer);
+ v->plen = 0;
+
+ return ret;
+#else
+ pr_warn("VPE loader: ELF load failed.\n");
+ return -ENOEXEC;
+#endif
+}
+
+static ssize_t vpe_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ size_t ret = count;
+ struct vpe *v;
+
+ if (iminor(file_inode(file)) != VPE_MODULE_MINOR)
+ return -ENODEV;
+
+ v = get_vpe(aprp_cpu_index());
+
+ if (v == NULL)
+ return -ENODEV;
+
+ if ((count + v->len) > v->plen) {
+ pr_warn("VPE loader: elf size too big. Perhaps strip unneeded symbols\n");
+ return -ENOMEM;
+ }
+
+ count -= copy_from_user(v->pbuffer + v->len, buffer, count);
+ if (!count)
+ return -EFAULT;
+
+ v->len += count;
+ return ret;
+}
+
+const struct file_operations vpe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpe_open,
+ .release = vpe_release,
+ .write = vpe_write,
+ .llseek = noop_llseek,
+};
+
+void *vpe_get_shared(int index)
+{
+ struct vpe *v = get_vpe(index);
+
+ if (v == NULL)
+ return NULL;
+
+ return v->shared_ptr;
+}
+EXPORT_SYMBOL(vpe_get_shared);
+
+int vpe_notify(int index, struct vpe_notifications *notify)
+{
+ struct vpe *v = get_vpe(index);
+
+ if (v == NULL)
+ return -1;
+
+ list_add(&notify->list, &v->notify);
+ return 0;
+}
+EXPORT_SYMBOL(vpe_notify);
+
+char *vpe_getcwd(int index)
+{
+ struct vpe *v = get_vpe(index);
+
+ if (v == NULL)
+ return NULL;
+
+ return v->cwd;
+}
+EXPORT_SYMBOL(vpe_getcwd);
+
+module_init(vpe_module_init);
+module_exit(vpe_module_exit);
+MODULE_DESCRIPTION("MIPS VPE Loader");
+MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c
new file mode 100644
index 000000000..c9263b95c
--- /dev/null
+++ b/arch/mips/kernel/watch.c
@@ -0,0 +1,211 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 David Daney
+ */
+
+#include <linux/sched.h>
+
+#include <asm/processor.h>
+#include <asm/watch.h>
+
+/*
+ * Install the watch registers for the current thread. A maximum of
+ * four registers are installed although the machine may have more.
+ */
+void mips_install_watch_registers(struct task_struct *t)
+{
+ struct mips3264_watch_reg_state *watches = &t->thread.watch.mips3264;
+ unsigned int watchhi = MIPS_WATCHHI_G | /* Trap all ASIDs */
+ MIPS_WATCHHI_IRW; /* Clear result bits */
+
+ switch (current_cpu_data.watch_reg_use_cnt) {
+ default:
+ BUG();
+ case 4:
+ write_c0_watchlo3(watches->watchlo[3]);
+ write_c0_watchhi3(watchhi | watches->watchhi[3]);
+ fallthrough;
+ case 3:
+ write_c0_watchlo2(watches->watchlo[2]);
+ write_c0_watchhi2(watchhi | watches->watchhi[2]);
+ fallthrough;
+ case 2:
+ write_c0_watchlo1(watches->watchlo[1]);
+ write_c0_watchhi1(watchhi | watches->watchhi[1]);
+ fallthrough;
+ case 1:
+ write_c0_watchlo0(watches->watchlo[0]);
+ write_c0_watchhi0(watchhi | watches->watchhi[0]);
+ }
+}
+
+/*
+ * Read back the watchhi registers so the user space debugger has
+ * access to the I, R, and W bits. A maximum of four registers are
+ * read although the machine may have more.
+ */
+void mips_read_watch_registers(void)
+{
+ struct mips3264_watch_reg_state *watches =
+ &current->thread.watch.mips3264;
+ unsigned int watchhi_mask = MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW;
+
+ switch (current_cpu_data.watch_reg_use_cnt) {
+ default:
+ BUG();
+ case 4:
+ watches->watchhi[3] = (read_c0_watchhi3() & watchhi_mask);
+ fallthrough;
+ case 3:
+ watches->watchhi[2] = (read_c0_watchhi2() & watchhi_mask);
+ fallthrough;
+ case 2:
+ watches->watchhi[1] = (read_c0_watchhi1() & watchhi_mask);
+ fallthrough;
+ case 1:
+ watches->watchhi[0] = (read_c0_watchhi0() & watchhi_mask);
+ }
+ if (current_cpu_data.watch_reg_use_cnt == 1 &&
+ (watches->watchhi[0] & MIPS_WATCHHI_IRW) == 0) {
+ /* Pathological case of release 1 architecture that
+ * doesn't set the condition bits. We assume that
+ * since we got here, the watch condition was met and
+ * signal that the conditions requested in watchlo
+ * were met. */
+ watches->watchhi[0] |= (watches->watchlo[0] & MIPS_WATCHHI_IRW);
+ }
+ }
+
+/*
+ * Disable all watch registers. Although only four registers are
+ * installed, all are cleared to eliminate the possibility of endless
+ * looping in the watch handler.
+ */
+void mips_clear_watch_registers(void)
+{
+ switch (current_cpu_data.watch_reg_count) {
+ default:
+ BUG();
+ case 8:
+ write_c0_watchlo7(0);
+ fallthrough;
+ case 7:
+ write_c0_watchlo6(0);
+ fallthrough;
+ case 6:
+ write_c0_watchlo5(0);
+ fallthrough;
+ case 5:
+ write_c0_watchlo4(0);
+ fallthrough;
+ case 4:
+ write_c0_watchlo3(0);
+ fallthrough;
+ case 3:
+ write_c0_watchlo2(0);
+ fallthrough;
+ case 2:
+ write_c0_watchlo1(0);
+ fallthrough;
+ case 1:
+ write_c0_watchlo0(0);
+ }
+}
+
+void mips_probe_watch_registers(struct cpuinfo_mips *c)
+{
+ unsigned int t;
+
+ if ((c->options & MIPS_CPU_WATCH) == 0)
+ return;
+ /*
+ * Check which of the I,R and W bits are supported, then
+ * disable the register.
+ */
+ write_c0_watchlo0(MIPS_WATCHLO_IRW);
+ back_to_back_c0_hazard();
+ t = read_c0_watchlo0();
+ write_c0_watchlo0(0);
+ c->watch_reg_masks[0] = t & MIPS_WATCHLO_IRW;
+
+ /* Write the mask bits and read them back to determine which
+ * can be used. */
+ c->watch_reg_count = 1;
+ c->watch_reg_use_cnt = 1;
+ t = read_c0_watchhi0();
+ write_c0_watchhi0(t | MIPS_WATCHHI_MASK);
+ back_to_back_c0_hazard();
+ t = read_c0_watchhi0();
+ c->watch_reg_masks[0] |= (t & MIPS_WATCHHI_MASK);
+ if ((t & MIPS_WATCHHI_M) == 0)
+ return;
+
+ write_c0_watchlo1(MIPS_WATCHLO_IRW);
+ back_to_back_c0_hazard();
+ t = read_c0_watchlo1();
+ write_c0_watchlo1(0);
+ c->watch_reg_masks[1] = t & MIPS_WATCHLO_IRW;
+
+ c->watch_reg_count = 2;
+ c->watch_reg_use_cnt = 2;
+ t = read_c0_watchhi1();
+ write_c0_watchhi1(t | MIPS_WATCHHI_MASK);
+ back_to_back_c0_hazard();
+ t = read_c0_watchhi1();
+ c->watch_reg_masks[1] |= (t & MIPS_WATCHHI_MASK);
+ if ((t & MIPS_WATCHHI_M) == 0)
+ return;
+
+ write_c0_watchlo2(MIPS_WATCHLO_IRW);
+ back_to_back_c0_hazard();
+ t = read_c0_watchlo2();
+ write_c0_watchlo2(0);
+ c->watch_reg_masks[2] = t & MIPS_WATCHLO_IRW;
+
+ c->watch_reg_count = 3;
+ c->watch_reg_use_cnt = 3;
+ t = read_c0_watchhi2();
+ write_c0_watchhi2(t | MIPS_WATCHHI_MASK);
+ back_to_back_c0_hazard();
+ t = read_c0_watchhi2();
+ c->watch_reg_masks[2] |= (t & MIPS_WATCHHI_MASK);
+ if ((t & MIPS_WATCHHI_M) == 0)
+ return;
+
+ write_c0_watchlo3(MIPS_WATCHLO_IRW);
+ back_to_back_c0_hazard();
+ t = read_c0_watchlo3();
+ write_c0_watchlo3(0);
+ c->watch_reg_masks[3] = t & MIPS_WATCHLO_IRW;
+
+ c->watch_reg_count = 4;
+ c->watch_reg_use_cnt = 4;
+ t = read_c0_watchhi3();
+ write_c0_watchhi3(t | MIPS_WATCHHI_MASK);
+ back_to_back_c0_hazard();
+ t = read_c0_watchhi3();
+ c->watch_reg_masks[3] |= (t & MIPS_WATCHHI_MASK);
+ if ((t & MIPS_WATCHHI_M) == 0)
+ return;
+
+ /* We use at most 4, but probe and report up to 8. */
+ c->watch_reg_count = 5;
+ t = read_c0_watchhi4();
+ if ((t & MIPS_WATCHHI_M) == 0)
+ return;
+
+ c->watch_reg_count = 6;
+ t = read_c0_watchhi5();
+ if ((t & MIPS_WATCHHI_M) == 0)
+ return;
+
+ c->watch_reg_count = 7;
+ t = read_c0_watchhi6();
+ if ((t & MIPS_WATCHHI_M) == 0)
+ return;
+
+ c->watch_reg_count = 8;
+}
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
new file mode 100644
index 000000000..032b3fca6
--- /dev/null
+++ b/arch/mips/kvm/Kconfig
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# KVM configuration
+#
+source "virt/kvm/Kconfig"
+
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
+ help
+ Say Y here to get to see options for using your Linux host to run
+ other operating systems inside virtual machines (guests).
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+config KVM
+ tristate "Kernel-based Virtual Machine (KVM) support"
+ depends on HAVE_KVM
+ depends on MIPS_FP_SUPPORT
+ select EXPORT_UASM
+ select PREEMPT_NOTIFIERS
+ select KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ select HAVE_KVM_EVENTFD
+ select HAVE_KVM_VCPU_ASYNC_IOCTL
+ select KVM_MMIO
+ select MMU_NOTIFIER
+ select SRCU
+ help
+ Support for hosting Guest kernels.
+
+choice
+ prompt "Virtualization mode"
+ depends on KVM
+ default KVM_MIPS_TE
+
+config KVM_MIPS_TE
+ bool "Trap & Emulate"
+ depends on CPU_MIPS32_R2
+ help
+ Use trap and emulate to virtualize 32-bit guests in user mode. This
+ does not require any special hardware Virtualization support beyond
+ standard MIPS32 r2 or later, but it does require the guest kernel
+ to be configured with CONFIG_KVM_GUEST=y so that it resides in the
+ user address segment.
+
+config KVM_MIPS_VZ
+ bool "MIPS Virtualization (VZ) ASE"
+ help
+ Use the MIPS Virtualization (VZ) ASE to virtualize guests. This
+ supports running unmodified guest kernels (with CONFIG_KVM_GUEST=n),
+ but requires hardware support.
+
+endchoice
+
+config KVM_MIPS_DYN_TRANS
+ bool "KVM/MIPS: Dynamic binary translation to reduce traps"
+ depends on KVM_MIPS_TE
+ default y
+ help
+ When running in Trap & Emulate mode patch privileged
+ instructions to reduce the number of traps.
+
+ If unsure, say Y.
+
+config KVM_MIPS_DEBUG_COP0_COUNTERS
+ bool "Maintain counters for COP0 accesses"
+ depends on KVM
+ help
+ Maintain statistics for Guest COP0 accesses.
+ A histogram of COP0 accesses is printed when the VM is
+ shutdown.
+
+ If unsure, say N.
+
+endif # VIRTUALIZATION
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
new file mode 100644
index 000000000..506c4ac0b
--- /dev/null
+++ b/arch/mips/kvm/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for KVM support for MIPS
+#
+
+common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o)
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
+
+common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
+
+kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \
+ interrupt.o stats.o commpage.o \
+ fpu.o
+kvm-objs += hypcall.o
+kvm-objs += mmu.o
+ifdef CONFIG_CPU_LOONGSON64
+kvm-objs += loongson_ipi.o
+endif
+
+ifdef CONFIG_KVM_MIPS_VZ
+kvm-objs += vz.o
+else
+kvm-objs += dyntrans.o
+kvm-objs += trap_emul.o
+endif
+obj-$(CONFIG_KVM) += kvm.o
+obj-y += callback.o tlb.o
diff --git a/arch/mips/kvm/callback.c b/arch/mips/kvm/callback.c
new file mode 100644
index 000000000..d88aa2173
--- /dev/null
+++ b/arch/mips/kvm/callback.c
@@ -0,0 +1,14 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Yann Le Du <ledu@kymasys.com>
+ */
+
+#include <linux/export.h>
+#include <linux/kvm_host.h>
+
+struct kvm_mips_callbacks *kvm_mips_callbacks;
+EXPORT_SYMBOL_GPL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c
new file mode 100644
index 000000000..5812e6145
--- /dev/null
+++ b/arch/mips/kvm/commpage.c
@@ -0,0 +1,32 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * commpage, currently used for Virtual COP0 registers.
+ * Mapped into the guest kernel @ KVM_GUEST_COMMPAGE_ADDR.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/memblock.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "commpage.h"
+
+void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
+
+ /* Specific init values for fields */
+ vcpu->arch.cop0 = &page->cop0;
+}
diff --git a/arch/mips/kvm/commpage.h b/arch/mips/kvm/commpage.h
new file mode 100644
index 000000000..08c5fa2bb
--- /dev/null
+++ b/arch/mips/kvm/commpage.h
@@ -0,0 +1,24 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: commpage: mapped into get kernel space
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#ifndef __KVM_MIPS_COMMPAGE_H__
+#define __KVM_MIPS_COMMPAGE_H__
+
+struct kvm_mips_commpage {
+ /* COP0 state is mapped into Guest kernel via commpage */
+ struct mips_coproc cop0;
+};
+
+#define KVM_MIPS_COMM_EIDI_OFFSET 0x0
+
+extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
+
+#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c
new file mode 100644
index 000000000..d77b61b3d
--- /dev/null
+++ b/arch/mips/kvm/dyntrans.c
@@ -0,0 +1,143 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/kvm_host.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/memblock.h>
+#include <asm/cacheflush.h>
+
+#include "commpage.h"
+
+/**
+ * kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
+ * @vcpu: Virtual CPU.
+ * @opc: PC of instruction to replace.
+ * @replace: Instruction to write
+ */
+static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
+ union mips_instruction replace)
+{
+ unsigned long vaddr = (unsigned long)opc;
+ int err;
+
+retry:
+ /* The GVA page table is still active so use the Linux TLB handlers */
+ kvm_trap_emul_gva_lockless_begin(vcpu);
+ err = put_user(replace.word, opc);
+ kvm_trap_emul_gva_lockless_end(vcpu);
+
+ if (unlikely(err)) {
+ /*
+ * We write protect clean pages in GVA page table so normal
+ * Linux TLB mod handler doesn't silently dirty the page.
+ * Its also possible we raced with a GVA invalidation.
+ * Try to force the page to become dirty.
+ */
+ err = kvm_trap_emul_gva_fault(vcpu, vaddr, true);
+ if (unlikely(err)) {
+ kvm_info("%s: Address unwriteable: %p\n",
+ __func__, opc);
+ return -EFAULT;
+ }
+
+ /*
+ * Try again. This will likely trigger a TLB refill, which will
+ * fetch the new dirty entry from the GVA page table, which
+ * should then succeed.
+ */
+ goto retry;
+ }
+ __local_flush_icache_user_range(vaddr, vaddr + 4);
+
+ return 0;
+}
+
+int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ union mips_instruction nop_inst = { 0 };
+
+ /* Replace the CACHE instruction, with a NOP */
+ return kvm_mips_trans_replace(vcpu, opc, nop_inst);
+}
+
+/*
+ * Address based CACHE instructions are transformed into synci(s). A little
+ * heavy for just D-cache invalidates, but avoids an expensive trap
+ */
+int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ union mips_instruction synci_inst = { 0 };
+
+ synci_inst.i_format.opcode = bcond_op;
+ synci_inst.i_format.rs = inst.i_format.rs;
+ synci_inst.i_format.rt = synci_op;
+ if (cpu_has_mips_r6)
+ synci_inst.i_format.simmediate = inst.spec3_format.simmediate;
+ else
+ synci_inst.i_format.simmediate = inst.i_format.simmediate;
+
+ return kvm_mips_trans_replace(vcpu, opc, synci_inst);
+}
+
+int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ union mips_instruction mfc0_inst = { 0 };
+ u32 rd, sel;
+
+ rd = inst.c0r_format.rd;
+ sel = inst.c0r_format.sel;
+
+ if (rd == MIPS_CP0_ERRCTL && sel == 0) {
+ mfc0_inst.r_format.opcode = spec_op;
+ mfc0_inst.r_format.rd = inst.c0r_format.rt;
+ mfc0_inst.r_format.func = add_op;
+ } else {
+ mfc0_inst.i_format.opcode = lw_op;
+ mfc0_inst.i_format.rt = inst.c0r_format.rt;
+ mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
+ offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
+ mfc0_inst.i_format.simmediate |= 4;
+#endif
+ }
+
+ return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
+}
+
+int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ union mips_instruction mtc0_inst = { 0 };
+ u32 rd, sel;
+
+ rd = inst.c0r_format.rd;
+ sel = inst.c0r_format.sel;
+
+ mtc0_inst.i_format.opcode = sw_op;
+ mtc0_inst.i_format.rt = inst.c0r_format.rt;
+ mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
+ offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
+ mtc0_inst.i_format.simmediate |= 4;
+#endif
+
+ return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
+}
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
new file mode 100644
index 000000000..d70c4f8e1
--- /dev/null
+++ b/arch/mips/kvm/emulate.c
@@ -0,0 +1,3292 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Instruction/Exception emulation
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/ktime.h>
+#include <linux/kvm_host.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/memblock.h>
+#include <linux/random.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/cacheops.h>
+#include <asm/cpu-info.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/inst.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#include "interrupt.h"
+#include "commpage.h"
+
+#include "trace.h"
+
+/*
+ * Compute the return address and do emulate branch simulation, if required.
+ * This function should be called only in branch delay slot active.
+ */
+static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
+ unsigned long *out)
+{
+ unsigned int dspcontrol;
+ union mips_instruction insn;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ long epc = instpc;
+ long nextpc;
+ int err;
+
+ if (epc & 3) {
+ kvm_err("%s: unaligned epc\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Read the instruction */
+ err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word);
+ if (err)
+ return err;
+
+ switch (insn.i_format.opcode) {
+ /* jr and jalr are in r_format format. */
+ case spec_op:
+ switch (insn.r_format.func) {
+ case jalr_op:
+ arch->gprs[insn.r_format.rd] = epc + 8;
+ fallthrough;
+ case jr_op:
+ nextpc = arch->gprs[insn.r_format.rs];
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+
+ /*
+ * This group contains:
+ * bltz_op, bgez_op, bltzl_op, bgezl_op,
+ * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+ */
+ case bcond_op:
+ switch (insn.i_format.rt) {
+ case bltz_op:
+ case bltzl_op:
+ if ((long)arch->gprs[insn.i_format.rs] < 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case bgez_op:
+ case bgezl_op:
+ if ((long)arch->gprs[insn.i_format.rs] >= 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case bltzal_op:
+ case bltzall_op:
+ arch->gprs[31] = epc + 8;
+ if ((long)arch->gprs[insn.i_format.rs] < 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case bgezal_op:
+ case bgezall_op:
+ arch->gprs[31] = epc + 8;
+ if ((long)arch->gprs[insn.i_format.rs] >= 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+ case bposge32_op:
+ if (!cpu_has_dsp) {
+ kvm_err("%s: DSP branch but not DSP ASE\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ dspcontrol = rddsp(0x01);
+
+ if (dspcontrol >= 32)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+
+ /* These are unconditional and in j_format. */
+ case jal_op:
+ arch->gprs[31] = instpc + 8;
+ fallthrough;
+ case j_op:
+ epc += 4;
+ epc >>= 28;
+ epc <<= 28;
+ epc |= (insn.j_format.target << 2);
+ nextpc = epc;
+ break;
+
+ /* These are conditional and in i_format. */
+ case beq_op:
+ case beql_op:
+ if (arch->gprs[insn.i_format.rs] ==
+ arch->gprs[insn.i_format.rt])
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case bne_op:
+ case bnel_op:
+ if (arch->gprs[insn.i_format.rs] !=
+ arch->gprs[insn.i_format.rt])
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case blez_op: /* POP06 */
+#ifndef CONFIG_CPU_MIPSR6
+ case blezl_op: /* removed in R6 */
+#endif
+ if (insn.i_format.rt != 0)
+ goto compact_branch;
+ if ((long)arch->gprs[insn.i_format.rs] <= 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case bgtz_op: /* POP07 */
+#ifndef CONFIG_CPU_MIPSR6
+ case bgtzl_op: /* removed in R6 */
+#endif
+ if (insn.i_format.rt != 0)
+ goto compact_branch;
+ if ((long)arch->gprs[insn.i_format.rs] > 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ /* And now the FPA/cp1 branch instructions. */
+ case cop1_op:
+ kvm_err("%s: unsupported cop1_op\n", __func__);
+ return -EINVAL;
+
+#ifdef CONFIG_CPU_MIPSR6
+ /* R6 added the following compact branches with forbidden slots */
+ case blezl_op: /* POP26 */
+ case bgtzl_op: /* POP27 */
+ /* only rt == 0 isn't compact branch */
+ if (insn.i_format.rt != 0)
+ goto compact_branch;
+ return -EINVAL;
+ case pop10_op:
+ case pop30_op:
+ /* only rs == rt == 0 is reserved, rest are compact branches */
+ if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
+ goto compact_branch;
+ return -EINVAL;
+ case pop66_op:
+ case pop76_op:
+ /* only rs == 0 isn't compact branch */
+ if (insn.i_format.rs != 0)
+ goto compact_branch;
+ return -EINVAL;
+compact_branch:
+ /*
+ * If we've hit an exception on the forbidden slot, then
+ * the branch must not have been taken.
+ */
+ epc += 8;
+ nextpc = epc;
+ break;
+#else
+compact_branch:
+ /* Fall through - Compact branches not supported before R6 */
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ *out = nextpc;
+ return 0;
+}
+
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
+{
+ int err;
+
+ if (cause & CAUSEF_BD) {
+ err = kvm_compute_return_epc(vcpu, vcpu->arch.pc,
+ &vcpu->arch.pc);
+ if (err)
+ return EMULATE_FAIL;
+ } else {
+ vcpu->arch.pc += 4;
+ }
+
+ kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+
+ return EMULATE_DONE;
+}
+
+/**
+ * kvm_get_badinstr() - Get bad instruction encoding.
+ * @opc: Guest pointer to faulting instruction.
+ * @vcpu: KVM VCPU information.
+ *
+ * Gets the instruction encoding of the faulting instruction, using the saved
+ * BadInstr register value if it exists, otherwise falling back to reading guest
+ * memory at @opc.
+ *
+ * Returns: The instruction encoding of the faulting instruction.
+ */
+int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
+{
+ if (cpu_has_badinstr) {
+ *out = vcpu->arch.host_cp0_badinstr;
+ return 0;
+ } else {
+ return kvm_get_inst(opc, vcpu, out);
+ }
+}
+
+/**
+ * kvm_get_badinstrp() - Get bad prior instruction encoding.
+ * @opc: Guest pointer to prior faulting instruction.
+ * @vcpu: KVM VCPU information.
+ *
+ * Gets the instruction encoding of the prior faulting instruction (the branch
+ * containing the delay slot which faulted), using the saved BadInstrP register
+ * value if it exists, otherwise falling back to reading guest memory at @opc.
+ *
+ * Returns: The instruction encoding of the prior faulting instruction.
+ */
+int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
+{
+ if (cpu_has_badinstrp) {
+ *out = vcpu->arch.host_cp0_badinstrp;
+ return 0;
+ } else {
+ return kvm_get_inst(opc, vcpu, out);
+ }
+}
+
+/**
+ * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
+ * @vcpu: Virtual CPU.
+ *
+ * Returns: 1 if the CP0_Count timer is disabled by either the guest
+ * CP0_Cause.DC bit or the count_ctl.DC bit.
+ * 0 otherwise (in which case CP0_Count timer is running).
+ */
+int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+ return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
+ (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
+}
+
+/**
+ * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
+ *
+ * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
+ *
+ * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
+ */
+static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
+{
+ s64 now_ns, periods;
+ u64 delta;
+
+ now_ns = ktime_to_ns(now);
+ delta = now_ns + vcpu->arch.count_dyn_bias;
+
+ if (delta >= vcpu->arch.count_period) {
+ /* If delta is out of safe range the bias needs adjusting */
+ periods = div64_s64(now_ns, vcpu->arch.count_period);
+ vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
+ /* Recalculate delta with new bias */
+ delta = now_ns + vcpu->arch.count_dyn_bias;
+ }
+
+ /*
+ * We've ensured that:
+ * delta < count_period
+ *
+ * Therefore the intermediate delta*count_hz will never overflow since
+ * at the boundary condition:
+ * delta = count_period
+ * delta = NSEC_PER_SEC * 2^32 / count_hz
+ * delta * count_hz = NSEC_PER_SEC * 2^32
+ */
+ return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
+}
+
+/**
+ * kvm_mips_count_time() - Get effective current time.
+ * @vcpu: Virtual CPU.
+ *
+ * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
+ * except when the master disable bit is set in count_ctl, in which case it is
+ * count_resume, i.e. the time that the count was disabled.
+ *
+ * Returns: Effective monotonic ktime for CP0_Count.
+ */
+static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
+{
+ if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
+ return vcpu->arch.count_resume;
+
+ return ktime_get();
+}
+
+/**
+ * kvm_mips_read_count_running() - Read the current count value as if running.
+ * @vcpu: Virtual CPU.
+ * @now: Kernel time to read CP0_Count at.
+ *
+ * Returns the current guest CP0_Count register at time @now and handles if the
+ * timer interrupt is pending and hasn't been handled yet.
+ *
+ * Returns: The current value of the guest CP0_Count register.
+ */
+static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ ktime_t expires, threshold;
+ u32 count, compare;
+ int running;
+
+ /* Calculate the biased and scaled guest CP0_Count */
+ count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
+ compare = kvm_read_c0_guest_compare(cop0);
+
+ /*
+ * Find whether CP0_Count has reached the closest timer interrupt. If
+ * not, we shouldn't inject it.
+ */
+ if ((s32)(count - compare) < 0)
+ return count;
+
+ /*
+ * The CP0_Count we're going to return has already reached the closest
+ * timer interrupt. Quickly check if it really is a new interrupt by
+ * looking at whether the interval until the hrtimer expiry time is
+ * less than 1/4 of the timer period.
+ */
+ expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
+ threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
+ if (ktime_before(expires, threshold)) {
+ /*
+ * Cancel it while we handle it so there's no chance of
+ * interference with the timeout handler.
+ */
+ running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+ /* Nothing should be waiting on the timeout */
+ kvm_mips_callbacks->queue_timer_int(vcpu);
+
+ /*
+ * Restart the timer if it was running based on the expiry time
+ * we read, so that we don't push it back 2 periods.
+ */
+ if (running) {
+ expires = ktime_add_ns(expires,
+ vcpu->arch.count_period);
+ hrtimer_start(&vcpu->arch.comparecount_timer, expires,
+ HRTIMER_MODE_ABS);
+ }
+ }
+
+ return count;
+}
+
+/**
+ * kvm_mips_read_count() - Read the current count value.
+ * @vcpu: Virtual CPU.
+ *
+ * Read the current guest CP0_Count value, taking into account whether the timer
+ * is stopped.
+ *
+ * Returns: The current guest CP0_Count value.
+ */
+u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+ /* If count disabled just read static copy of count */
+ if (kvm_mips_count_disabled(vcpu))
+ return kvm_read_c0_guest_count(cop0);
+
+ return kvm_mips_read_count_running(vcpu, ktime_get());
+}
+
+/**
+ * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
+ * @vcpu: Virtual CPU.
+ * @count: Output pointer for CP0_Count value at point of freeze.
+ *
+ * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
+ * at the point it was frozen. It is guaranteed that any pending interrupts at
+ * the point it was frozen are handled, and none after that point.
+ *
+ * This is useful where the time/CP0_Count is needed in the calculation of the
+ * new parameters.
+ *
+ * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
+ *
+ * Returns: The ktime at the point of freeze.
+ */
+ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
+{
+ ktime_t now;
+
+ /* stop hrtimer before finding time */
+ hrtimer_cancel(&vcpu->arch.comparecount_timer);
+ now = ktime_get();
+
+ /* find count at this point and handle pending hrtimer */
+ *count = kvm_mips_read_count_running(vcpu, now);
+
+ return now;
+}
+
+/**
+ * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
+ * @vcpu: Virtual CPU.
+ * @now: ktime at point of resume.
+ * @count: CP0_Count at point of resume.
+ *
+ * Resumes the timer and updates the timer expiry based on @now and @count.
+ * This can be used in conjunction with kvm_mips_freeze_timer() when timer
+ * parameters need to be changed.
+ *
+ * It is guaranteed that a timer interrupt immediately after resume will be
+ * handled, but not if CP_Compare is exactly at @count. That case is already
+ * handled by kvm_mips_freeze_timer().
+ *
+ * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
+ */
+static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
+ ktime_t now, u32 count)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ u32 compare;
+ u64 delta;
+ ktime_t expire;
+
+ /* Calculate timeout (wrap 0 to 2^32) */
+ compare = kvm_read_c0_guest_compare(cop0);
+ delta = (u64)(u32)(compare - count - 1) + 1;
+ delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
+ expire = ktime_add_ns(now, delta);
+
+ /* Update hrtimer to use new timeout */
+ hrtimer_cancel(&vcpu->arch.comparecount_timer);
+ hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
+}
+
+/**
+ * kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry.
+ * @vcpu: Virtual CPU.
+ * @before: Time before Count was saved, lower bound of drift calculation.
+ * @count: CP0_Count at point of restore.
+ * @min_drift: Minimum amount of drift permitted before correction.
+ * Must be <= 0.
+ *
+ * Restores the timer from a particular @count, accounting for drift. This can
+ * be used in conjunction with kvm_mips_freeze_timer() when a hardware timer is
+ * to be used for a period of time, but the exact ktime corresponding to the
+ * final Count that must be restored is not known.
+ *
+ * It is gauranteed that a timer interrupt immediately after restore will be
+ * handled, but not if CP0_Compare is exactly at @count. That case should
+ * already be handled when the hardware timer state is saved.
+ *
+ * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not
+ * stopped).
+ *
+ * Returns: Amount of correction to count_bias due to drift.
+ */
+int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
+ u32 count, int min_drift)
+{
+ ktime_t now, count_time;
+ u32 now_count, before_count;
+ u64 delta;
+ int drift, ret = 0;
+
+ /* Calculate expected count at before */
+ before_count = vcpu->arch.count_bias +
+ kvm_mips_ktime_to_count(vcpu, before);
+
+ /*
+ * Detect significantly negative drift, where count is lower than
+ * expected. Some negative drift is expected when hardware counter is
+ * set after kvm_mips_freeze_timer(), and it is harmless to allow the
+ * time to jump forwards a little, within reason. If the drift is too
+ * significant, adjust the bias to avoid a big Guest.CP0_Count jump.
+ */
+ drift = count - before_count;
+ if (drift < min_drift) {
+ count_time = before;
+ vcpu->arch.count_bias += drift;
+ ret = drift;
+ goto resume;
+ }
+
+ /* Calculate expected count right now */
+ now = ktime_get();
+ now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
+
+ /*
+ * Detect positive drift, where count is higher than expected, and
+ * adjust the bias to avoid guest time going backwards.
+ */
+ drift = count - now_count;
+ if (drift > 0) {
+ count_time = now;
+ vcpu->arch.count_bias += drift;
+ ret = drift;
+ goto resume;
+ }
+
+ /* Subtract nanosecond delta to find ktime when count was read */
+ delta = (u64)(u32)(now_count - count);
+ delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
+ count_time = ktime_sub_ns(now, delta);
+
+resume:
+ /* Resume using the calculated ktime */
+ kvm_mips_resume_hrtimer(vcpu, count_time, count);
+ return ret;
+}
+
+/**
+ * kvm_mips_write_count() - Modify the count and update timer.
+ * @vcpu: Virtual CPU.
+ * @count: Guest CP0_Count value to set.
+ *
+ * Sets the CP0_Count value and updates the timer accordingly.
+ */
+void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ ktime_t now;
+
+ /* Calculate bias */
+ now = kvm_mips_count_time(vcpu);
+ vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
+
+ if (kvm_mips_count_disabled(vcpu))
+ /* The timer's disabled, adjust the static count */
+ kvm_write_c0_guest_count(cop0, count);
+ else
+ /* Update timeout */
+ kvm_mips_resume_hrtimer(vcpu, now, count);
+}
+
+/**
+ * kvm_mips_init_count() - Initialise timer.
+ * @vcpu: Virtual CPU.
+ * @count_hz: Frequency of timer.
+ *
+ * Initialise the timer to the specified frequency, zero it, and set it going if
+ * it's enabled.
+ */
+void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
+{
+ vcpu->arch.count_hz = count_hz;
+ vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
+ vcpu->arch.count_dyn_bias = 0;
+
+ /* Starting at 0 */
+ kvm_mips_write_count(vcpu, 0);
+}
+
+/**
+ * kvm_mips_set_count_hz() - Update the frequency of the timer.
+ * @vcpu: Virtual CPU.
+ * @count_hz: Frequency of CP0_Count timer in Hz.
+ *
+ * Change the frequency of the CP0_Count timer. This is done atomically so that
+ * CP0_Count is continuous and no timer interrupt is lost.
+ *
+ * Returns: -EINVAL if @count_hz is out of range.
+ * 0 on success.
+ */
+int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ int dc;
+ ktime_t now;
+ u32 count;
+
+ /* ensure the frequency is in a sensible range... */
+ if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
+ return -EINVAL;
+ /* ... and has actually changed */
+ if (vcpu->arch.count_hz == count_hz)
+ return 0;
+
+ /* Safely freeze timer so we can keep it continuous */
+ dc = kvm_mips_count_disabled(vcpu);
+ if (dc) {
+ now = kvm_mips_count_time(vcpu);
+ count = kvm_read_c0_guest_count(cop0);
+ } else {
+ now = kvm_mips_freeze_hrtimer(vcpu, &count);
+ }
+
+ /* Update the frequency */
+ vcpu->arch.count_hz = count_hz;
+ vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
+ vcpu->arch.count_dyn_bias = 0;
+
+ /* Calculate adjusted bias so dynamic count is unchanged */
+ vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
+
+ /* Update and resume hrtimer */
+ if (!dc)
+ kvm_mips_resume_hrtimer(vcpu, now, count);
+ return 0;
+}
+
+/**
+ * kvm_mips_write_compare() - Modify compare and update timer.
+ * @vcpu: Virtual CPU.
+ * @compare: New CP0_Compare value.
+ * @ack: Whether to acknowledge timer interrupt.
+ *
+ * Update CP0_Compare to a new value and update the timeout.
+ * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
+ * any pending timer interrupt is preserved.
+ */
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ int dc;
+ u32 old_compare = kvm_read_c0_guest_compare(cop0);
+ s32 delta = compare - old_compare;
+ u32 cause;
+ ktime_t now = ktime_set(0, 0); /* silence bogus GCC warning */
+ u32 count;
+
+ /* if unchanged, must just be an ack */
+ if (old_compare == compare) {
+ if (!ack)
+ return;
+ kvm_mips_callbacks->dequeue_timer_int(vcpu);
+ kvm_write_c0_guest_compare(cop0, compare);
+ return;
+ }
+
+ /*
+ * If guest CP0_Compare moves forward, CP0_GTOffset should be adjusted
+ * too to prevent guest CP0_Count hitting guest CP0_Compare.
+ *
+ * The new GTOffset corresponds to the new value of CP0_Compare, and is
+ * set prior to it being written into the guest context. We disable
+ * preemption until the new value is written to prevent restore of a
+ * GTOffset corresponding to the old CP0_Compare value.
+ */
+ if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta > 0) {
+ preempt_disable();
+ write_c0_gtoffset(compare - read_c0_count());
+ back_to_back_c0_hazard();
+ }
+
+ /* freeze_hrtimer() takes care of timer interrupts <= count */
+ dc = kvm_mips_count_disabled(vcpu);
+ if (!dc)
+ now = kvm_mips_freeze_hrtimer(vcpu, &count);
+
+ if (ack)
+ kvm_mips_callbacks->dequeue_timer_int(vcpu);
+ else if (IS_ENABLED(CONFIG_KVM_MIPS_VZ))
+ /*
+ * With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so
+ * preserve guest CP0_Cause.TI if we don't want to ack it.
+ */
+ cause = kvm_read_c0_guest_cause(cop0);
+
+ kvm_write_c0_guest_compare(cop0, compare);
+
+ if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
+ if (delta > 0)
+ preempt_enable();
+
+ back_to_back_c0_hazard();
+
+ if (!ack && cause & CAUSEF_TI)
+ kvm_write_c0_guest_cause(cop0, cause);
+ }
+
+ /* resume_hrtimer() takes care of timer interrupts > count */
+ if (!dc)
+ kvm_mips_resume_hrtimer(vcpu, now, count);
+
+ /*
+ * If guest CP0_Compare is moving backward, we delay CP0_GTOffset change
+ * until after the new CP0_Compare is written, otherwise new guest
+ * CP0_Count could hit new guest CP0_Compare.
+ */
+ if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta <= 0)
+ write_c0_gtoffset(compare - read_c0_count());
+}
+
+/**
+ * kvm_mips_count_disable() - Disable count.
+ * @vcpu: Virtual CPU.
+ *
+ * Disable the CP0_Count timer. A timer interrupt on or before the final stop
+ * time will be handled but not after.
+ *
+ * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
+ * count_ctl.DC has been set (count disabled).
+ *
+ * Returns: The time that the timer was stopped.
+ */
+static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ u32 count;
+ ktime_t now;
+
+ /* Stop hrtimer */
+ hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+ /* Set the static count from the dynamic count, handling pending TI */
+ now = ktime_get();
+ count = kvm_mips_read_count_running(vcpu, now);
+ kvm_write_c0_guest_count(cop0, count);
+
+ return now;
+}
+
+/**
+ * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
+ * @vcpu: Virtual CPU.
+ *
+ * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
+ * before the final stop time will be handled if the timer isn't disabled by
+ * count_ctl.DC, but not after.
+ *
+ * Assumes CP0_Cause.DC is clear (count enabled).
+ */
+void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+ kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
+ if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
+ kvm_mips_count_disable(vcpu);
+}
+
+/**
+ * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
+ * @vcpu: Virtual CPU.
+ *
+ * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
+ * the start time will be handled if the timer isn't disabled by count_ctl.DC,
+ * potentially before even returning, so the caller should be careful with
+ * ordering of CP0_Cause modifications so as not to lose it.
+ *
+ * Assumes CP0_Cause.DC is set (count disabled).
+ */
+void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ u32 count;
+
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
+
+ /*
+ * Set the dynamic count to match the static count.
+ * This starts the hrtimer if count_ctl.DC allows it.
+ * Otherwise it conveniently updates the biases.
+ */
+ count = kvm_read_c0_guest_count(cop0);
+ kvm_mips_write_count(vcpu, count);
+}
+
+/**
+ * kvm_mips_set_count_ctl() - Update the count control KVM register.
+ * @vcpu: Virtual CPU.
+ * @count_ctl: Count control register new value.
+ *
+ * Set the count control KVM register. The timer is updated accordingly.
+ *
+ * Returns: -EINVAL if reserved bits are set.
+ * 0 on success.
+ */
+int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ s64 changed = count_ctl ^ vcpu->arch.count_ctl;
+ s64 delta;
+ ktime_t expire, now;
+ u32 count, compare;
+
+ /* Only allow defined bits to be changed */
+ if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
+ return -EINVAL;
+
+ /* Apply new value */
+ vcpu->arch.count_ctl = count_ctl;
+
+ /* Master CP0_Count disable */
+ if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
+ /* Is CP0_Cause.DC already disabling CP0_Count? */
+ if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
+ if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
+ /* Just record the current time */
+ vcpu->arch.count_resume = ktime_get();
+ } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
+ /* disable timer and record current time */
+ vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
+ } else {
+ /*
+ * Calculate timeout relative to static count at resume
+ * time (wrap 0 to 2^32).
+ */
+ count = kvm_read_c0_guest_count(cop0);
+ compare = kvm_read_c0_guest_compare(cop0);
+ delta = (u64)(u32)(compare - count - 1) + 1;
+ delta = div_u64(delta * NSEC_PER_SEC,
+ vcpu->arch.count_hz);
+ expire = ktime_add_ns(vcpu->arch.count_resume, delta);
+
+ /* Handle pending interrupt */
+ now = ktime_get();
+ if (ktime_compare(now, expire) >= 0)
+ /* Nothing should be waiting on the timeout */
+ kvm_mips_callbacks->queue_timer_int(vcpu);
+
+ /* Resume hrtimer without changing bias */
+ count = kvm_mips_read_count_running(vcpu, now);
+ kvm_mips_resume_hrtimer(vcpu, now, count);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * kvm_mips_set_count_resume() - Update the count resume KVM register.
+ * @vcpu: Virtual CPU.
+ * @count_resume: Count resume register new value.
+ *
+ * Set the count resume KVM register.
+ *
+ * Returns: -EINVAL if out of valid range (0..now).
+ * 0 on success.
+ */
+int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
+{
+ /*
+ * It doesn't make sense for the resume time to be in the future, as it
+ * would be possible for the next interrupt to be more than a full
+ * period in the future.
+ */
+ if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
+ return -EINVAL;
+
+ vcpu->arch.count_resume = ns_to_ktime(count_resume);
+ return 0;
+}
+
+/**
+ * kvm_mips_count_timeout() - Push timer forward on timeout.
+ * @vcpu: Virtual CPU.
+ *
+ * Handle an hrtimer event by push the hrtimer forward a period.
+ *
+ * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
+ */
+enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
+{
+ /* Add the Count period to the current expiry time */
+ hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
+ vcpu->arch.count_period);
+ return HRTIMER_RESTART;
+}
+
+enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ enum emulation_result er = EMULATE_DONE;
+
+ if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+ kvm_clear_c0_guest_status(cop0, ST0_ERL);
+ vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+ } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+ kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
+ kvm_read_c0_guest_epc(cop0));
+ kvm_clear_c0_guest_status(cop0, ST0_EXL);
+ vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
+
+ } else {
+ kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
+ vcpu->arch.pc);
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
+{
+ kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
+ vcpu->arch.pending_exceptions);
+
+ ++vcpu->stat.wait_exits;
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
+ if (!vcpu->arch.pending_exceptions) {
+ kvm_vz_lose_htimer(vcpu);
+ vcpu->arch.wait = 1;
+ kvm_vcpu_block(vcpu);
+
+ /*
+ * We we are runnable, then definitely go off to user space to
+ * check if any I/O interrupts are pending.
+ */
+ if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
+ kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+ }
+ }
+
+ return EMULATE_DONE;
+}
+
+static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
+ unsigned long entryhi)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
+ int cpu, i;
+ u32 nasid = entryhi & KVM_ENTRYHI_ASID;
+
+ if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) {
+ trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) &
+ KVM_ENTRYHI_ASID, nasid);
+
+ /*
+ * Flush entries from the GVA page tables.
+ * Guest user page table will get flushed lazily on re-entry to
+ * guest user if the guest ASID actually changes.
+ */
+ kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN);
+
+ /*
+ * Regenerate/invalidate kernel MMU context.
+ * The user MMU context will be regenerated lazily on re-entry
+ * to guest user if the guest ASID actually changes.
+ */
+ preempt_disable();
+ cpu = smp_processor_id();
+ get_new_mmu_context(kern_mm);
+ for_each_possible_cpu(i)
+ if (i != cpu)
+ set_cpu_context(i, kern_mm, 0);
+ preempt_enable();
+ }
+ kvm_write_c0_guest_entryhi(cop0, entryhi);
+}
+
+enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_mips_tlb *tlb;
+ unsigned long pc = vcpu->arch.pc;
+ int index;
+
+ index = kvm_read_c0_guest_index(cop0);
+ if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+ /* UNDEFINED */
+ kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index);
+ index &= KVM_MIPS_GUEST_TLB_SIZE - 1;
+ }
+
+ tlb = &vcpu->arch.guest_tlb[index];
+ kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask);
+ kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]);
+ kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]);
+ kvm_mips_change_entryhi(vcpu, tlb->tlb_hi);
+
+ return EMULATE_DONE;
+}
+
+/**
+ * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
+ * @vcpu: VCPU with changed mappings.
+ * @tlb: TLB entry being removed.
+ *
+ * This is called to indicate a single change in guest MMU mappings, so that we
+ * can arrange TLB flushes on this and other CPUs.
+ */
+static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_tlb *tlb)
+{
+ struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
+ struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
+ int cpu, i;
+ bool user;
+
+ /* No need to flush for entries which are already invalid */
+ if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
+ return;
+ /* Don't touch host kernel page tables or TLB mappings */
+ if ((unsigned long)tlb->tlb_hi > 0x7fffffff)
+ return;
+ /* User address space doesn't need flushing for KSeg2/3 changes */
+ user = tlb->tlb_hi < KVM_GUEST_KSEG0;
+
+ preempt_disable();
+
+ /* Invalidate page table entries */
+ kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user);
+
+ /*
+ * Probe the shadow host TLB for the entry being overwritten, if one
+ * matches, invalidate it
+ */
+ kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true);
+
+ /* Invalidate the whole ASID on other CPUs */
+ cpu = smp_processor_id();
+ for_each_possible_cpu(i) {
+ if (i == cpu)
+ continue;
+ if (user)
+ set_cpu_context(i, user_mm, 0);
+ set_cpu_context(i, kern_mm, 0);
+ }
+
+ preempt_enable();
+}
+
+/* Write Guest TLB Entry @ Index */
+enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ int index = kvm_read_c0_guest_index(cop0);
+ struct kvm_mips_tlb *tlb = NULL;
+ unsigned long pc = vcpu->arch.pc;
+
+ if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+ kvm_debug("%s: illegal index: %d\n", __func__, index);
+ kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+ pc, index, kvm_read_c0_guest_entryhi(cop0),
+ kvm_read_c0_guest_entrylo0(cop0),
+ kvm_read_c0_guest_entrylo1(cop0),
+ kvm_read_c0_guest_pagemask(cop0));
+ index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
+ }
+
+ tlb = &vcpu->arch.guest_tlb[index];
+
+ kvm_mips_invalidate_guest_tlb(vcpu, tlb);
+
+ tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+ tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+ tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
+ tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
+
+ kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+ pc, index, kvm_read_c0_guest_entryhi(cop0),
+ kvm_read_c0_guest_entrylo0(cop0),
+ kvm_read_c0_guest_entrylo1(cop0),
+ kvm_read_c0_guest_pagemask(cop0));
+
+ return EMULATE_DONE;
+}
+
+/* Write Guest TLB Entry @ Random Index */
+enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_mips_tlb *tlb = NULL;
+ unsigned long pc = vcpu->arch.pc;
+ int index;
+
+ index = prandom_u32_max(KVM_MIPS_GUEST_TLB_SIZE);
+ tlb = &vcpu->arch.guest_tlb[index];
+
+ kvm_mips_invalidate_guest_tlb(vcpu, tlb);
+
+ tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+ tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+ tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
+ tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
+
+ kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
+ pc, index, kvm_read_c0_guest_entryhi(cop0),
+ kvm_read_c0_guest_entrylo0(cop0),
+ kvm_read_c0_guest_entrylo1(cop0));
+
+ return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ long entryhi = kvm_read_c0_guest_entryhi(cop0);
+ unsigned long pc = vcpu->arch.pc;
+ int index = -1;
+
+ index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+
+ kvm_write_c0_guest_index(cop0, index);
+
+ kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
+ index);
+
+ return EMULATE_DONE;
+}
+
+/**
+ * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
+ * @vcpu: Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config1 CP0
+ * register, by userland (currently read-only to the guest).
+ */
+unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
+{
+ unsigned int mask = 0;
+
+ /* Permit FPU to be present if FPU is supported */
+ if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
+ mask |= MIPS_CONF1_FP;
+
+ return mask;
+}
+
+/**
+ * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
+ * @vcpu: Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config3 CP0
+ * register, by userland (currently read-only to the guest).
+ */
+unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
+{
+ /* Config4 and ULRI are optional */
+ unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
+
+ /* Permit MSA to be present if MSA is supported */
+ if (kvm_mips_guest_can_have_msa(&vcpu->arch))
+ mask |= MIPS_CONF3_MSA;
+
+ return mask;
+}
+
+/**
+ * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
+ * @vcpu: Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config4 CP0
+ * register, by userland (currently read-only to the guest).
+ */
+unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
+{
+ /* Config5 is optional */
+ unsigned int mask = MIPS_CONF_M;
+
+ /* KScrExist */
+ mask |= 0xfc << MIPS_CONF4_KSCREXIST_SHIFT;
+
+ return mask;
+}
+
+/**
+ * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
+ * @vcpu: Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config5 CP0
+ * register, by the guest itself.
+ */
+unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
+{
+ unsigned int mask = 0;
+
+ /* Permit MSAEn changes if MSA supported and enabled */
+ if (kvm_mips_guest_has_msa(&vcpu->arch))
+ mask |= MIPS_CONF5_MSAEN;
+
+ /*
+ * Permit guest FPU mode changes if FPU is enabled and the relevant
+ * feature exists according to FIR register.
+ */
+ if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
+ if (cpu_has_fre)
+ mask |= MIPS_CONF5_FRE;
+ /* We don't support UFR or UFE */
+ }
+
+ return mask;
+}
+
+enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
+ u32 *opc, u32 cause,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ enum emulation_result er = EMULATE_DONE;
+ u32 rt, rd, sel;
+ unsigned long curr_pc;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ if (inst.co_format.co) {
+ switch (inst.co_format.func) {
+ case tlbr_op: /* Read indexed TLB entry */
+ er = kvm_mips_emul_tlbr(vcpu);
+ break;
+ case tlbwi_op: /* Write indexed */
+ er = kvm_mips_emul_tlbwi(vcpu);
+ break;
+ case tlbwr_op: /* Write random */
+ er = kvm_mips_emul_tlbwr(vcpu);
+ break;
+ case tlbp_op: /* TLB Probe */
+ er = kvm_mips_emul_tlbp(vcpu);
+ break;
+ case rfe_op:
+ kvm_err("!!!COP0_RFE!!!\n");
+ break;
+ case eret_op:
+ er = kvm_mips_emul_eret(vcpu);
+ goto dont_update_pc;
+ case wait_op:
+ er = kvm_mips_emul_wait(vcpu);
+ break;
+ case hypcall_op:
+ er = kvm_mips_emul_hypcall(vcpu, inst);
+ break;
+ }
+ } else {
+ rt = inst.c0r_format.rt;
+ rd = inst.c0r_format.rd;
+ sel = inst.c0r_format.sel;
+
+ switch (inst.c0r_format.rs) {
+ case mfc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+ cop0->stat[rd][sel]++;
+#endif
+ /* Get reg */
+ if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+ vcpu->arch.gprs[rt] =
+ (s32)kvm_mips_read_count(vcpu);
+ } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+ vcpu->arch.gprs[rt] = 0x0;
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+ } else {
+ vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+ }
+
+ trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
+ KVM_TRACE_COP0(rd, sel),
+ vcpu->arch.gprs[rt]);
+ break;
+
+ case dmfc_op:
+ vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+
+ trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
+ KVM_TRACE_COP0(rd, sel),
+ vcpu->arch.gprs[rt]);
+ break;
+
+ case mtc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+ cop0->stat[rd][sel]++;
+#endif
+ trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
+ KVM_TRACE_COP0(rd, sel),
+ vcpu->arch.gprs[rt]);
+
+ if ((rd == MIPS_CP0_TLB_INDEX)
+ && (vcpu->arch.gprs[rt] >=
+ KVM_MIPS_GUEST_TLB_SIZE)) {
+ kvm_err("Invalid TLB Index: %ld",
+ vcpu->arch.gprs[rt]);
+ er = EMULATE_FAIL;
+ break;
+ }
+ if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
+ /*
+ * Preserve core number, and keep the exception
+ * base in guest KSeg0.
+ */
+ kvm_change_c0_guest_ebase(cop0, 0x1ffff000,
+ vcpu->arch.gprs[rt]);
+ } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
+ kvm_mips_change_entryhi(vcpu,
+ vcpu->arch.gprs[rt]);
+ }
+ /* Are we writing to COUNT */
+ else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+ kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
+ goto done;
+ } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
+ /* If we are writing to COMPARE */
+ /* Clear pending timer interrupt, if any */
+ kvm_mips_write_compare(vcpu,
+ vcpu->arch.gprs[rt],
+ true);
+ } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
+ unsigned int old_val, val, change;
+
+ old_val = kvm_read_c0_guest_status(cop0);
+ val = vcpu->arch.gprs[rt];
+ change = val ^ old_val;
+
+ /* Make sure that the NMI bit is never set */
+ val &= ~ST0_NMI;
+
+ /*
+ * Don't allow CU1 or FR to be set unless FPU
+ * capability enabled and exists in guest
+ * configuration.
+ */
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ val &= ~(ST0_CU1 | ST0_FR);
+
+ /*
+ * Also don't allow FR to be set if host doesn't
+ * support it.
+ */
+ if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
+ val &= ~ST0_FR;
+
+
+ /* Handle changes in FPU mode */
+ preempt_disable();
+
+ /*
+ * FPU and Vector register state is made
+ * UNPREDICTABLE by a change of FR, so don't
+ * even bother saving it.
+ */
+ if (change & ST0_FR)
+ kvm_drop_fpu(vcpu);
+
+ /*
+ * If MSA state is already live, it is undefined
+ * how it interacts with FR=0 FPU state, and we
+ * don't want to hit reserved instruction
+ * exceptions trying to save the MSA state later
+ * when CU=1 && FR=1, so play it safe and save
+ * it first.
+ */
+ if (change & ST0_CU1 && !(val & ST0_FR) &&
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
+ kvm_lose_fpu(vcpu);
+
+ /*
+ * Propagate CU1 (FPU enable) changes
+ * immediately if the FPU context is already
+ * loaded. When disabling we leave the context
+ * loaded so it can be quickly enabled again in
+ * the near future.
+ */
+ if (change & ST0_CU1 &&
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
+ change_c0_status(ST0_CU1, val);
+
+ preempt_enable();
+
+ kvm_write_c0_guest_status(cop0, val);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ /*
+ * If FPU present, we need CU1/FR bits to take
+ * effect fairly soon.
+ */
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+ } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
+ unsigned int old_val, val, change, wrmask;
+
+ old_val = kvm_read_c0_guest_config5(cop0);
+ val = vcpu->arch.gprs[rt];
+
+ /* Only a few bits are writable in Config5 */
+ wrmask = kvm_mips_config5_wrmask(vcpu);
+ change = (val ^ old_val) & wrmask;
+ val = old_val ^ change;
+
+
+ /* Handle changes in FPU/MSA modes */
+ preempt_disable();
+
+ /*
+ * Propagate FRE changes immediately if the FPU
+ * context is already loaded.
+ */
+ if (change & MIPS_CONF5_FRE &&
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
+ change_c0_config5(MIPS_CONF5_FRE, val);
+
+ /*
+ * Propagate MSAEn changes immediately if the
+ * MSA context is already loaded. When disabling
+ * we leave the context loaded so it can be
+ * quickly enabled again in the near future.
+ */
+ if (change & MIPS_CONF5_MSAEN &&
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
+ change_c0_config5(MIPS_CONF5_MSAEN,
+ val);
+
+ preempt_enable();
+
+ kvm_write_c0_guest_config5(cop0, val);
+ } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
+ u32 old_cause, new_cause;
+
+ old_cause = kvm_read_c0_guest_cause(cop0);
+ new_cause = vcpu->arch.gprs[rt];
+ /* Update R/W bits */
+ kvm_change_c0_guest_cause(cop0, 0x08800300,
+ new_cause);
+ /* DC bit enabling/disabling timer? */
+ if ((old_cause ^ new_cause) & CAUSEF_DC) {
+ if (new_cause & CAUSEF_DC)
+ kvm_mips_count_disable_cause(vcpu);
+ else
+ kvm_mips_count_enable_cause(vcpu);
+ }
+ } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
+ u32 mask = MIPS_HWRENA_CPUNUM |
+ MIPS_HWRENA_SYNCISTEP |
+ MIPS_HWRENA_CC |
+ MIPS_HWRENA_CCRES;
+
+ if (kvm_read_c0_guest_config3(cop0) &
+ MIPS_CONF3_ULRI)
+ mask |= MIPS_HWRENA_ULR;
+ cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
+ } else {
+ cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+ }
+ break;
+
+ case dmtc_op:
+ kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
+ vcpu->arch.pc, rt, rd, sel);
+ trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
+ KVM_TRACE_COP0(rd, sel),
+ vcpu->arch.gprs[rt]);
+ er = EMULATE_FAIL;
+ break;
+
+ case mfmc0_op:
+#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
+ cop0->stat[MIPS_CP0_STATUS][0]++;
+#endif
+ if (rt != 0)
+ vcpu->arch.gprs[rt] =
+ kvm_read_c0_guest_status(cop0);
+ /* EI */
+ if (inst.mfmc0_format.sc) {
+ kvm_debug("[%#lx] mfmc0_op: EI\n",
+ vcpu->arch.pc);
+ kvm_set_c0_guest_status(cop0, ST0_IE);
+ } else {
+ kvm_debug("[%#lx] mfmc0_op: DI\n",
+ vcpu->arch.pc);
+ kvm_clear_c0_guest_status(cop0, ST0_IE);
+ }
+
+ break;
+
+ case wrpgpr_op:
+ {
+ u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
+ u32 pss =
+ (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
+ /*
+ * We don't support any shadow register sets, so
+ * SRSCtl[PSS] == SRSCtl[CSS] = 0
+ */
+ if (css || pss) {
+ er = EMULATE_FAIL;
+ break;
+ }
+ kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
+ vcpu->arch.gprs[rt]);
+ vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
+ }
+ break;
+ default:
+ kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
+ vcpu->arch.pc, inst.c0r_format.rs);
+ er = EMULATE_FAIL;
+ break;
+ }
+ }
+
+done:
+ /* Rollback PC only if emulation was unsuccessful */
+ if (er == EMULATE_FAIL)
+ vcpu->arch.pc = curr_pc;
+
+dont_update_pc:
+ /*
+ * This is for special instructions whose emulation
+ * updates the PC, so do not overwrite the PC under
+ * any circumstances
+ */
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
+ u32 cause,
+ struct kvm_vcpu *vcpu)
+{
+ int r;
+ enum emulation_result er;
+ u32 rt;
+ struct kvm_run *run = vcpu->run;
+ void *data = run->mmio.data;
+ unsigned int imme;
+ unsigned long curr_pc;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ rt = inst.i_format.rt;
+
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr);
+ if (run->mmio.phys_addr == KVM_INVALID_ADDR)
+ goto out_fail;
+
+ switch (inst.i_format.opcode) {
+#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
+ case sd_op:
+ run->mmio.len = 8;
+ *(u64 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u64 *)data);
+ break;
+#endif
+
+ case sw_op:
+ run->mmio.len = 4;
+ *(u32 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u32 *)data);
+ break;
+
+ case sh_op:
+ run->mmio.len = 2;
+ *(u16 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u16 *)data);
+ break;
+
+ case sb_op:
+ run->mmio.len = 1;
+ *(u8 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u8 *)data);
+ break;
+
+ case swl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ *(u32 *)data = ((*(u32 *)data) & 0xffffff00) |
+ (vcpu->arch.gprs[rt] >> 24);
+ break;
+ case 1:
+ *(u32 *)data = ((*(u32 *)data) & 0xffff0000) |
+ (vcpu->arch.gprs[rt] >> 16);
+ break;
+ case 2:
+ *(u32 *)data = ((*(u32 *)data) & 0xff000000) |
+ (vcpu->arch.gprs[rt] >> 8);
+ break;
+ case 3:
+ *(u32 *)data = vcpu->arch.gprs[rt];
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u32 *)data);
+ break;
+
+ case swr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ *(u32 *)data = vcpu->arch.gprs[rt];
+ break;
+ case 1:
+ *(u32 *)data = ((*(u32 *)data) & 0xff) |
+ (vcpu->arch.gprs[rt] << 8);
+ break;
+ case 2:
+ *(u32 *)data = ((*(u32 *)data) & 0xffff) |
+ (vcpu->arch.gprs[rt] << 16);
+ break;
+ case 3:
+ *(u32 *)data = ((*(u32 *)data) & 0xffffff) |
+ (vcpu->arch.gprs[rt] << 24);
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u32 *)data);
+ break;
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
+ case sdl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) |
+ ((vcpu->arch.gprs[rt] >> 56) & 0xff);
+ break;
+ case 1:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) |
+ ((vcpu->arch.gprs[rt] >> 48) & 0xffff);
+ break;
+ case 2:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) |
+ ((vcpu->arch.gprs[rt] >> 40) & 0xffffff);
+ break;
+ case 3:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) |
+ ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff);
+ break;
+ case 4:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) |
+ ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff);
+ break;
+ case 5:
+ *(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) |
+ ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff);
+ break;
+ case 6:
+ *(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) |
+ ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff);
+ break;
+ case 7:
+ *(u64 *)data = vcpu->arch.gprs[rt];
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u64 *)data);
+ break;
+
+ case sdr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ *(u64 *)data = vcpu->arch.gprs[rt];
+ break;
+ case 1:
+ *(u64 *)data = ((*(u64 *)data) & 0xff) |
+ (vcpu->arch.gprs[rt] << 8);
+ break;
+ case 2:
+ *(u64 *)data = ((*(u64 *)data) & 0xffff) |
+ (vcpu->arch.gprs[rt] << 16);
+ break;
+ case 3:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffff) |
+ (vcpu->arch.gprs[rt] << 24);
+ break;
+ case 4:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffff) |
+ (vcpu->arch.gprs[rt] << 32);
+ break;
+ case 5:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffff) |
+ (vcpu->arch.gprs[rt] << 40);
+ break;
+ case 6:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) |
+ (vcpu->arch.gprs[rt] << 48);
+ break;
+ case 7:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) |
+ (vcpu->arch.gprs[rt] << 56);
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u64 *)data);
+ break;
+#endif
+
+#ifdef CONFIG_CPU_LOONGSON64
+ case sdc2_op:
+ rt = inst.loongson3_lsdc2_format.rt;
+ switch (inst.loongson3_lsdc2_format.opcode1) {
+ /*
+ * Loongson-3 overridden sdc2 instructions.
+ * opcode1 instruction
+ * 0x0 gssbx: store 1 bytes from GPR
+ * 0x1 gsshx: store 2 bytes from GPR
+ * 0x2 gsswx: store 4 bytes from GPR
+ * 0x3 gssdx: store 8 bytes from GPR
+ */
+ case 0x0:
+ run->mmio.len = 1;
+ *(u8 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u8 *)data);
+ break;
+ case 0x1:
+ run->mmio.len = 2;
+ *(u16 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u16 *)data);
+ break;
+ case 0x2:
+ run->mmio.len = 4;
+ *(u32 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u32 *)data);
+ break;
+ case 0x3:
+ run->mmio.len = 8;
+ *(u64 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u64 *)data);
+ break;
+ default:
+ kvm_err("Godson Extended GS-Store not yet supported (inst=0x%08x)\n",
+ inst.word);
+ break;
+ }
+ break;
+#endif
+ default:
+ kvm_err("Store not yet supported (inst=0x%08x)\n",
+ inst.word);
+ goto out_fail;
+ }
+
+ vcpu->mmio_needed = 1;
+ run->mmio.is_write = 1;
+ vcpu->mmio_is_write = 1;
+
+ r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
+ run->mmio.phys_addr, run->mmio.len, data);
+
+ if (!r) {
+ vcpu->mmio_needed = 0;
+ return EMULATE_DONE;
+ }
+
+ return EMULATE_DO_MMIO;
+
+out_fail:
+ /* Rollback PC if emulation was unsuccessful */
+ vcpu->arch.pc = curr_pc;
+ return EMULATE_FAIL;
+}
+
+enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
+ u32 cause, struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ int r;
+ enum emulation_result er;
+ unsigned long curr_pc;
+ u32 op, rt;
+ unsigned int imme;
+
+ rt = inst.i_format.rt;
+ op = inst.i_format.opcode;
+
+ /*
+ * Find the resume PC now while we have safe and easy access to the
+ * prior branch instruction, and save it for
+ * kvm_mips_complete_mmio_load() to restore later.
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+ vcpu->arch.io_pc = vcpu->arch.pc;
+ vcpu->arch.pc = curr_pc;
+
+ vcpu->arch.io_gpr = rt;
+
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr);
+ if (run->mmio.phys_addr == KVM_INVALID_ADDR)
+ return EMULATE_FAIL;
+
+ vcpu->mmio_needed = 2; /* signed */
+ switch (op) {
+#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
+ case ld_op:
+ run->mmio.len = 8;
+ break;
+
+ case lwu_op:
+ vcpu->mmio_needed = 1; /* unsigned */
+ fallthrough;
+#endif
+ case lw_op:
+ run->mmio.len = 4;
+ break;
+
+ case lhu_op:
+ vcpu->mmio_needed = 1; /* unsigned */
+ fallthrough;
+ case lh_op:
+ run->mmio.len = 2;
+ break;
+
+ case lbu_op:
+ vcpu->mmio_needed = 1; /* unsigned */
+ fallthrough;
+ case lb_op:
+ run->mmio.len = 1;
+ break;
+
+ case lwl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 3; /* 1 byte */
+ break;
+ case 1:
+ vcpu->mmio_needed = 4; /* 2 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 5; /* 3 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 6; /* 4 bytes */
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case lwr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 7; /* 4 bytes */
+ break;
+ case 1:
+ vcpu->mmio_needed = 8; /* 3 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 9; /* 2 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 10; /* 1 byte */
+ break;
+ default:
+ break;
+ }
+ break;
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
+ case ldl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 11; /* 1 byte */
+ break;
+ case 1:
+ vcpu->mmio_needed = 12; /* 2 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 13; /* 3 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 14; /* 4 bytes */
+ break;
+ case 4:
+ vcpu->mmio_needed = 15; /* 5 bytes */
+ break;
+ case 5:
+ vcpu->mmio_needed = 16; /* 6 bytes */
+ break;
+ case 6:
+ vcpu->mmio_needed = 17; /* 7 bytes */
+ break;
+ case 7:
+ vcpu->mmio_needed = 18; /* 8 bytes */
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case ldr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 19; /* 8 bytes */
+ break;
+ case 1:
+ vcpu->mmio_needed = 20; /* 7 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 21; /* 6 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 22; /* 5 bytes */
+ break;
+ case 4:
+ vcpu->mmio_needed = 23; /* 4 bytes */
+ break;
+ case 5:
+ vcpu->mmio_needed = 24; /* 3 bytes */
+ break;
+ case 6:
+ vcpu->mmio_needed = 25; /* 2 bytes */
+ break;
+ case 7:
+ vcpu->mmio_needed = 26; /* 1 byte */
+ break;
+ default:
+ break;
+ }
+ break;
+#endif
+
+#ifdef CONFIG_CPU_LOONGSON64
+ case ldc2_op:
+ rt = inst.loongson3_lsdc2_format.rt;
+ switch (inst.loongson3_lsdc2_format.opcode1) {
+ /*
+ * Loongson-3 overridden ldc2 instructions.
+ * opcode1 instruction
+ * 0x0 gslbx: store 1 bytes from GPR
+ * 0x1 gslhx: store 2 bytes from GPR
+ * 0x2 gslwx: store 4 bytes from GPR
+ * 0x3 gsldx: store 8 bytes from GPR
+ */
+ case 0x0:
+ run->mmio.len = 1;
+ vcpu->mmio_needed = 27; /* signed */
+ break;
+ case 0x1:
+ run->mmio.len = 2;
+ vcpu->mmio_needed = 28; /* signed */
+ break;
+ case 0x2:
+ run->mmio.len = 4;
+ vcpu->mmio_needed = 29; /* signed */
+ break;
+ case 0x3:
+ run->mmio.len = 8;
+ vcpu->mmio_needed = 30; /* signed */
+ break;
+ default:
+ kvm_err("Godson Extended GS-Load for float not yet supported (inst=0x%08x)\n",
+ inst.word);
+ break;
+ }
+ break;
+#endif
+
+ default:
+ kvm_err("Load not yet supported (inst=0x%08x)\n",
+ inst.word);
+ vcpu->mmio_needed = 0;
+ return EMULATE_FAIL;
+ }
+
+ run->mmio.is_write = 0;
+ vcpu->mmio_is_write = 0;
+
+ r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS,
+ run->mmio.phys_addr, run->mmio.len, run->mmio.data);
+
+ if (!r) {
+ kvm_mips_complete_mmio_load(vcpu);
+ vcpu->mmio_needed = 0;
+ return EMULATE_DONE;
+ }
+
+ return EMULATE_DO_MMIO;
+}
+
+#ifndef CONFIG_KVM_MIPS_VZ
+static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
+ unsigned long curr_pc,
+ unsigned long addr,
+ struct kvm_vcpu *vcpu,
+ u32 cause)
+{
+ int err;
+
+ for (;;) {
+ /* Carefully attempt the cache operation */
+ kvm_trap_emul_gva_lockless_begin(vcpu);
+ err = fn(addr);
+ kvm_trap_emul_gva_lockless_end(vcpu);
+
+ if (likely(!err))
+ return EMULATE_DONE;
+
+ /*
+ * Try to handle the fault and retry, maybe we just raced with a
+ * GVA invalidation.
+ */
+ switch (kvm_trap_emul_gva_fault(vcpu, addr, false)) {
+ case KVM_MIPS_GVA:
+ case KVM_MIPS_GPA:
+ /* bad virtual or physical address */
+ return EMULATE_FAIL;
+ case KVM_MIPS_TLB:
+ /* no matching guest TLB */
+ vcpu->arch.host_cp0_badvaddr = addr;
+ vcpu->arch.pc = curr_pc;
+ kvm_mips_emulate_tlbmiss_ld(cause, NULL, vcpu);
+ return EMULATE_EXCEPT;
+ case KVM_MIPS_TLBINV:
+ /* invalid matching guest TLB */
+ vcpu->arch.host_cp0_badvaddr = addr;
+ vcpu->arch.pc = curr_pc;
+ kvm_mips_emulate_tlbinv_ld(cause, NULL, vcpu);
+ return EMULATE_EXCEPT;
+ default:
+ break;
+ }
+ }
+}
+
+enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
+ u32 *opc, u32 cause,
+ struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+ u32 cache, op_inst, op, base;
+ s16 offset;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ unsigned long va;
+ unsigned long curr_pc;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ base = inst.i_format.rs;
+ op_inst = inst.i_format.rt;
+ if (cpu_has_mips_r6)
+ offset = inst.spec3_format.simmediate;
+ else
+ offset = inst.i_format.simmediate;
+ cache = op_inst & CacheOp_Cache;
+ op = op_inst & CacheOp_Op;
+
+ va = arch->gprs[base] + offset;
+
+ kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+ cache, op, base, arch->gprs[base], offset);
+
+ /*
+ * Treat INDEX_INV as a nop, basically issued by Linux on startup to
+ * invalidate the caches entirely by stepping through all the
+ * ways/indexes
+ */
+ if (op == Index_Writeback_Inv) {
+ kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+ vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
+ arch->gprs[base], offset);
+
+ if (cache == Cache_D) {
+#ifdef CONFIG_CPU_R4K_CACHE_TLB
+ r4k_blast_dcache();
+#else
+ switch (boot_cpu_type()) {
+ case CPU_CAVIUM_OCTEON3:
+ /* locally flush icache */
+ local_flush_icache_range(0, 0);
+ break;
+ default:
+ __flush_cache_all();
+ break;
+ }
+#endif
+ } else if (cache == Cache_I) {
+#ifdef CONFIG_CPU_R4K_CACHE_TLB
+ r4k_blast_icache();
+#else
+ switch (boot_cpu_type()) {
+ case CPU_CAVIUM_OCTEON3:
+ /* locally flush icache */
+ local_flush_icache_range(0, 0);
+ break;
+ default:
+ flush_icache_all();
+ break;
+ }
+#endif
+ } else {
+ kvm_err("%s: unsupported CACHE INDEX operation\n",
+ __func__);
+ return EMULATE_FAIL;
+ }
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ kvm_mips_trans_cache_index(inst, opc, vcpu);
+#endif
+ goto done;
+ }
+
+ /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
+ if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
+ /*
+ * Perform the dcache part of icache synchronisation on the
+ * guest's behalf.
+ */
+ er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
+ curr_pc, va, vcpu, cause);
+ if (er != EMULATE_DONE)
+ goto done;
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ /*
+ * Replace the CACHE instruction, with a SYNCI, not the same,
+ * but avoids a trap
+ */
+ kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+ } else if (op_inst == Hit_Invalidate_I) {
+ /* Perform the icache synchronisation on the guest's behalf */
+ er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
+ curr_pc, va, vcpu, cause);
+ if (er != EMULATE_DONE)
+ goto done;
+ er = kvm_mips_guest_cache_op(protected_flush_icache_line,
+ curr_pc, va, vcpu, cause);
+ if (er != EMULATE_DONE)
+ goto done;
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ /* Replace the CACHE instruction, with a SYNCI */
+ kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+ } else {
+ kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+ cache, op, base, arch->gprs[base], offset);
+ er = EMULATE_FAIL;
+ }
+
+done:
+ /* Rollback PC only if emulation was unsuccessful */
+ if (er == EMULATE_FAIL)
+ vcpu->arch.pc = curr_pc;
+ /* Guest exception needs guest to resume */
+ if (er == EMULATE_EXCEPT)
+ er = EMULATE_DONE;
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ union mips_instruction inst;
+ enum emulation_result er = EMULATE_DONE;
+ int err;
+
+ /* Fetch the instruction. */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ err = kvm_get_badinstr(opc, vcpu, &inst.word);
+ if (err)
+ return EMULATE_FAIL;
+
+ switch (inst.r_format.opcode) {
+ case cop0_op:
+ er = kvm_mips_emulate_CP0(inst, opc, cause, vcpu);
+ break;
+
+#ifndef CONFIG_CPU_MIPSR6
+ case cache_op:
+ ++vcpu->stat.cache_exits;
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
+ er = kvm_mips_emulate_cache(inst, opc, cause, vcpu);
+ break;
+#else
+ case spec3_op:
+ switch (inst.spec3_format.func) {
+ case cache6_op:
+ ++vcpu->stat.cache_exits;
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
+ er = kvm_mips_emulate_cache(inst, opc, cause,
+ vcpu);
+ break;
+ default:
+ goto unknown;
+ }
+ break;
+unknown:
+#endif
+
+ default:
+ kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
+ inst.word);
+ kvm_arch_vcpu_dump_regs(vcpu);
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ return er;
+}
+#endif /* CONFIG_KVM_MIPS_VZ */
+
+/**
+ * kvm_mips_guest_exception_base() - Find guest exception vector base address.
+ *
+ * Returns: The base address of the current guest exception vector, taking
+ * both Guest.CP0_Status.BEV and Guest.CP0_EBase into account.
+ */
+long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+ if (kvm_read_c0_guest_status(cop0) & ST0_BEV)
+ return KVM_GUEST_CKSEG1ADDR(0x1fc00200);
+ else
+ return kvm_read_c0_guest_ebase(cop0) & MIPS_EBASE_BASE;
+}
+
+enum emulation_result kvm_mips_emulate_syscall(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (EXCCODE_SYS << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+
+ /* set pc to the exception entry point */
+ arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0;
+
+ } else {
+ kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+
+ arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
+ }
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (EXCCODE_TLBL << CAUSEB_EXCCODE));
+
+ /* setup badvaddr, context and entryhi registers for the guest */
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+ /* XXXKYMA: is the context register used by linux??? */
+ kvm_write_c0_guest_entryhi(cop0, entryhi);
+
+ return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ unsigned long entryhi =
+ (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
+ arch->pc);
+ } else {
+ kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+ }
+
+ /* set pc to the exception entry point */
+ arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (EXCCODE_TLBL << CAUSEB_EXCCODE));
+
+ /* setup badvaddr, context and entryhi registers for the guest */
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+ /* XXXKYMA: is the context register used by linux??? */
+ kvm_write_c0_guest_entryhi(cop0, entryhi);
+
+ return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+
+ /* Set PC to the exception entry point */
+ arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0;
+ } else {
+ kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+ arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
+ }
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (EXCCODE_TLBS << CAUSEB_EXCCODE));
+
+ /* setup badvaddr, context and entryhi registers for the guest */
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+ /* XXXKYMA: is the context register used by linux??? */
+ kvm_write_c0_guest_entryhi(cop0, entryhi);
+
+ return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+ } else {
+ kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+ }
+
+ /* Set PC to the exception entry point */
+ arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (EXCCODE_TLBS << CAUSEB_EXCCODE));
+
+ /* setup badvaddr, context and entryhi registers for the guest */
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+ /* XXXKYMA: is the context register used by linux??? */
+ kvm_write_c0_guest_entryhi(cop0, entryhi);
+
+ return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
+ arch->pc);
+ } else {
+ kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
+ arch->pc);
+ }
+
+ arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (EXCCODE_MOD << CAUSEB_EXCCODE));
+
+ /* setup badvaddr, context and entryhi registers for the guest */
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+ /* XXXKYMA: is the context register used by linux??? */
+ kvm_write_c0_guest_entryhi(cop0, entryhi);
+
+ return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ }
+
+ arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (EXCCODE_CPU << CAUSEB_EXCCODE));
+ kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
+
+ return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (EXCCODE_RI << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver RI when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (EXCCODE_BP << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver BP when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (EXCCODE_TR << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver TRAP when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (EXCCODE_FPE << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver FPE when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (EXCCODE_MSADIS << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver MSADIS when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+ unsigned long curr_pc;
+ union mips_instruction inst;
+ int err;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ /* Fetch the instruction. */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ err = kvm_get_badinstr(opc, vcpu, &inst.word);
+ if (err) {
+ kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err);
+ return EMULATE_FAIL;
+ }
+
+ if (inst.r_format.opcode == spec3_op &&
+ inst.r_format.func == rdhwr_op &&
+ inst.r_format.rs == 0 &&
+ (inst.r_format.re >> 3) == 0) {
+ int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
+ int rd = inst.r_format.rd;
+ int rt = inst.r_format.rt;
+ int sel = inst.r_format.re & 0x7;
+
+ /* If usermode, check RDHWR rd is allowed by guest HWREna */
+ if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
+ kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
+ rd, opc);
+ goto emulate_ri;
+ }
+ switch (rd) {
+ case MIPS_HWR_CPUNUM: /* CPU number */
+ arch->gprs[rt] = vcpu->vcpu_id;
+ break;
+ case MIPS_HWR_SYNCISTEP: /* SYNCI length */
+ arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
+ current_cpu_data.icache.linesz);
+ break;
+ case MIPS_HWR_CC: /* Read count register */
+ arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
+ break;
+ case MIPS_HWR_CCRES: /* Count register resolution */
+ switch (current_cpu_data.cputype) {
+ case CPU_20KC:
+ case CPU_25KF:
+ arch->gprs[rt] = 1;
+ break;
+ default:
+ arch->gprs[rt] = 2;
+ }
+ break;
+ case MIPS_HWR_ULR: /* Read UserLocal register */
+ arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
+ break;
+
+ default:
+ kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
+ goto emulate_ri;
+ }
+
+ trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
+ vcpu->arch.gprs[rt]);
+ } else {
+ kvm_debug("Emulate RI not supported @ %p: %#x\n",
+ opc, inst.word);
+ goto emulate_ri;
+ }
+
+ return EMULATE_DONE;
+
+emulate_ri:
+ /*
+ * Rollback PC (if in branch delay slot then the PC already points to
+ * branch target), and pass the RI exception to the guest OS.
+ */
+ vcpu->arch.pc = curr_pc;
+ return kvm_mips_emulate_ri_exc(cause, opc, vcpu);
+}
+
+enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
+ enum emulation_result er = EMULATE_DONE;
+
+ if (run->mmio.len > sizeof(*gpr)) {
+ kvm_err("Bad MMIO length: %d", run->mmio.len);
+ er = EMULATE_FAIL;
+ goto done;
+ }
+
+ /* Restore saved resume PC */
+ vcpu->arch.pc = vcpu->arch.io_pc;
+
+ switch (run->mmio.len) {
+ case 8:
+ switch (vcpu->mmio_needed) {
+ case 11:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xff) << 56);
+ break;
+ case 12:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffff) << 48);
+ break;
+ case 13:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffff) << 40);
+ break;
+ case 14:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffff) << 32);
+ break;
+ case 15:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffffff) << 24);
+ break;
+ case 16:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16);
+ break;
+ case 17:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8);
+ break;
+ case 18:
+ case 19:
+ *gpr = *(s64 *)run->mmio.data;
+ break;
+ case 20:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) |
+ ((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff);
+ break;
+ case 21:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) |
+ ((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff);
+ break;
+ case 22:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) |
+ ((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff);
+ break;
+ case 23:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) |
+ ((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff);
+ break;
+ case 24:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) |
+ ((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff);
+ break;
+ case 25:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) |
+ ((((*(s64 *)run->mmio.data)) >> 48) & 0xffff);
+ break;
+ case 26:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) |
+ ((((*(s64 *)run->mmio.data)) >> 56) & 0xff);
+ break;
+ default:
+ *gpr = *(s64 *)run->mmio.data;
+ }
+ break;
+
+ case 4:
+ switch (vcpu->mmio_needed) {
+ case 1:
+ *gpr = *(u32 *)run->mmio.data;
+ break;
+ case 2:
+ *gpr = *(s32 *)run->mmio.data;
+ break;
+ case 3:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
+ (((*(s32 *)run->mmio.data) & 0xff) << 24);
+ break;
+ case 4:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
+ (((*(s32 *)run->mmio.data) & 0xffff) << 16);
+ break;
+ case 5:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
+ (((*(s32 *)run->mmio.data) & 0xffffff) << 8);
+ break;
+ case 6:
+ case 7:
+ *gpr = *(s32 *)run->mmio.data;
+ break;
+ case 8:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) |
+ ((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff);
+ break;
+ case 9:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) |
+ ((((*(s32 *)run->mmio.data)) >> 16) & 0xffff);
+ break;
+ case 10:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) |
+ ((((*(s32 *)run->mmio.data)) >> 24) & 0xff);
+ break;
+ default:
+ *gpr = *(s32 *)run->mmio.data;
+ }
+ break;
+
+ case 2:
+ if (vcpu->mmio_needed == 1)
+ *gpr = *(u16 *)run->mmio.data;
+ else
+ *gpr = *(s16 *)run->mmio.data;
+
+ break;
+ case 1:
+ if (vcpu->mmio_needed == 1)
+ *gpr = *(u8 *)run->mmio.data;
+ else
+ *gpr = *(s8 *)run->mmio.data;
+ break;
+ }
+
+done:
+ return er;
+}
+
+static enum emulation_result kvm_mips_emulate_exc(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (exccode << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+
+ kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
+ exccode, kvm_read_c0_guest_epc(cop0),
+ kvm_read_c0_guest_badvaddr(cop0));
+ } else {
+ kvm_err("Trying to deliver EXC when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_check_privilege(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+
+ int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
+
+ if (usermode) {
+ switch (exccode) {
+ case EXCCODE_INT:
+ case EXCCODE_SYS:
+ case EXCCODE_BP:
+ case EXCCODE_RI:
+ case EXCCODE_TR:
+ case EXCCODE_MSAFPE:
+ case EXCCODE_FPE:
+ case EXCCODE_MSADIS:
+ break;
+
+ case EXCCODE_CPU:
+ if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
+ er = EMULATE_PRIV_FAIL;
+ break;
+
+ case EXCCODE_MOD:
+ break;
+
+ case EXCCODE_TLBL:
+ /*
+ * We we are accessing Guest kernel space, then send an
+ * address error exception to the guest
+ */
+ if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+ kvm_debug("%s: LD MISS @ %#lx\n", __func__,
+ badvaddr);
+ cause &= ~0xff;
+ cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
+ er = EMULATE_PRIV_FAIL;
+ }
+ break;
+
+ case EXCCODE_TLBS:
+ /*
+ * We we are accessing Guest kernel space, then send an
+ * address error exception to the guest
+ */
+ if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+ kvm_debug("%s: ST MISS @ %#lx\n", __func__,
+ badvaddr);
+ cause &= ~0xff;
+ cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
+ er = EMULATE_PRIV_FAIL;
+ }
+ break;
+
+ case EXCCODE_ADES:
+ kvm_debug("%s: address error ST @ %#lx\n", __func__,
+ badvaddr);
+ if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+ cause &= ~0xff;
+ cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
+ }
+ er = EMULATE_PRIV_FAIL;
+ break;
+ case EXCCODE_ADEL:
+ kvm_debug("%s: address error LD @ %#lx\n", __func__,
+ badvaddr);
+ if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+ cause &= ~0xff;
+ cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
+ }
+ er = EMULATE_PRIV_FAIL;
+ break;
+ default:
+ er = EMULATE_PRIV_FAIL;
+ break;
+ }
+ }
+
+ if (er == EMULATE_PRIV_FAIL)
+ kvm_mips_emulate_exc(cause, opc, vcpu);
+
+ return er;
+}
+
+/*
+ * User Address (UA) fault, this could happen if
+ * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+ * case we pass on the fault to the guest kernel and let it handle it.
+ * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+ * case we inject the TLB from the Guest TLB into the shadow host TLB
+ */
+enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu,
+ bool write_fault)
+{
+ enum emulation_result er = EMULATE_DONE;
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ unsigned long va = vcpu->arch.host_cp0_badvaddr;
+ int index;
+
+ kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
+ vcpu->arch.host_cp0_badvaddr);
+
+ /*
+ * KVM would not have got the exception if this entry was valid in the
+ * shadow host TLB. Check the Guest TLB, if the entry is not there then
+ * send the guest an exception. The guest exc handler should then inject
+ * an entry into the guest TLB.
+ */
+ index = kvm_mips_guest_tlb_lookup(vcpu,
+ (va & VPN2_MASK) |
+ (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
+ KVM_ENTRYHI_ASID));
+ if (index < 0) {
+ if (exccode == EXCCODE_TLBL) {
+ er = kvm_mips_emulate_tlbmiss_ld(cause, opc, vcpu);
+ } else if (exccode == EXCCODE_TLBS) {
+ er = kvm_mips_emulate_tlbmiss_st(cause, opc, vcpu);
+ } else {
+ kvm_err("%s: invalid exc code: %d\n", __func__,
+ exccode);
+ er = EMULATE_FAIL;
+ }
+ } else {
+ struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+
+ /*
+ * Check if the entry is valid, if not then setup a TLB invalid
+ * exception to the guest
+ */
+ if (!TLB_IS_VALID(*tlb, va)) {
+ if (exccode == EXCCODE_TLBL) {
+ er = kvm_mips_emulate_tlbinv_ld(cause, opc,
+ vcpu);
+ } else if (exccode == EXCCODE_TLBS) {
+ er = kvm_mips_emulate_tlbinv_st(cause, opc,
+ vcpu);
+ } else {
+ kvm_err("%s: invalid exc code: %d\n", __func__,
+ exccode);
+ er = EMULATE_FAIL;
+ }
+ } else {
+ kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
+ tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
+ /*
+ * OK we have a Guest TLB entry, now inject it into the
+ * shadow host TLB
+ */
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va,
+ write_fault)) {
+ kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
+ __func__, va, index, vcpu,
+ read_c0_entryhi());
+ er = EMULATE_FAIL;
+ }
+ }
+ }
+
+ return er;
+}
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
new file mode 100644
index 000000000..832475bf2
--- /dev/null
+++ b/arch/mips/kvm/entry.c
@@ -0,0 +1,955 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Generation of main entry point for the guest, exception handling.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ *
+ * Copyright (C) 2016 Imagination Technologies Ltd.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/log2.h>
+#include <asm/mmu_context.h>
+#include <asm/msa.h>
+#include <asm/setup.h>
+#include <asm/tlbex.h>
+#include <asm/uasm.h>
+
+/* Register names */
+#define ZERO 0
+#define AT 1
+#define V0 2
+#define V1 3
+#define A0 4
+#define A1 5
+
+#if _MIPS_SIM == _MIPS_SIM_ABI32
+#define T0 8
+#define T1 9
+#define T2 10
+#define T3 11
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
+
+#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
+#define T0 12
+#define T1 13
+#define T2 14
+#define T3 15
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
+
+#define S0 16
+#define S1 17
+#define T9 25
+#define K0 26
+#define K1 27
+#define GP 28
+#define SP 29
+#define RA 31
+
+/* Some CP0 registers */
+#define C0_PWBASE 5, 5
+#define C0_HWRENA 7, 0
+#define C0_BADVADDR 8, 0
+#define C0_BADINSTR 8, 1
+#define C0_BADINSTRP 8, 2
+#define C0_PGD 9, 7
+#define C0_ENTRYHI 10, 0
+#define C0_GUESTCTL1 10, 4
+#define C0_STATUS 12, 0
+#define C0_GUESTCTL0 12, 6
+#define C0_CAUSE 13, 0
+#define C0_EPC 14, 0
+#define C0_EBASE 15, 1
+#define C0_CONFIG5 16, 5
+#define C0_DDATA_LO 28, 3
+#define C0_ERROREPC 30, 0
+
+#define CALLFRAME_SIZ 32
+
+#ifdef CONFIG_64BIT
+#define ST0_KX_IF_64 ST0_KX
+#else
+#define ST0_KX_IF_64 0
+#endif
+
+static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
+static unsigned int scratch_tmp[2] = { C0_ERROREPC };
+
+enum label_id {
+ label_fpu_1 = 1,
+ label_msa_1,
+ label_return_to_host,
+ label_kernel_asid,
+ label_exit_common,
+};
+
+UASM_L_LA(_fpu_1)
+UASM_L_LA(_msa_1)
+UASM_L_LA(_return_to_host)
+UASM_L_LA(_kernel_asid)
+UASM_L_LA(_exit_common)
+
+static void *kvm_mips_build_enter_guest(void *addr);
+static void *kvm_mips_build_ret_from_exit(void *addr);
+static void *kvm_mips_build_ret_to_guest(void *addr);
+static void *kvm_mips_build_ret_to_host(void *addr);
+
+/*
+ * The version of this function in tlbex.c uses current_cpu_type(), but for KVM
+ * we assume symmetry.
+ */
+static int c0_kscratch(void)
+{
+ switch (boot_cpu_type()) {
+ case CPU_XLP:
+ case CPU_XLR:
+ return 22;
+ default:
+ return 31;
+ }
+}
+
+/**
+ * kvm_mips_entry_setup() - Perform global setup for entry code.
+ *
+ * Perform global setup for entry code, such as choosing a scratch register.
+ *
+ * Returns: 0 on success.
+ * -errno on failure.
+ */
+int kvm_mips_entry_setup(void)
+{
+ /*
+ * We prefer to use KScratchN registers if they are available over the
+ * defaults above, which may not work on all cores.
+ */
+ unsigned int kscratch_mask = cpu_data[0].kscratch_mask;
+
+ if (pgd_reg != -1)
+ kscratch_mask &= ~BIT(pgd_reg);
+
+ /* Pick a scratch register for storing VCPU */
+ if (kscratch_mask) {
+ scratch_vcpu[0] = c0_kscratch();
+ scratch_vcpu[1] = ffs(kscratch_mask) - 1;
+ kscratch_mask &= ~BIT(scratch_vcpu[1]);
+ }
+
+ /* Pick a scratch register to use as a temp for saving state */
+ if (kscratch_mask) {
+ scratch_tmp[0] = c0_kscratch();
+ scratch_tmp[1] = ffs(kscratch_mask) - 1;
+ kscratch_mask &= ~BIT(scratch_tmp[1]);
+ }
+
+ return 0;
+}
+
+static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
+ unsigned int frame)
+{
+ /* Save the VCPU scratch register value in cp0_epc of the stack frame */
+ UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
+ UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
+
+ /* Save the temp scratch register value in cp0_cause of stack frame */
+ if (scratch_tmp[0] == c0_kscratch()) {
+ UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
+ UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
+ }
+}
+
+static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
+ unsigned int frame)
+{
+ /*
+ * Restore host scratch register values saved by
+ * kvm_mips_build_save_scratch().
+ */
+ UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
+ UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
+
+ if (scratch_tmp[0] == c0_kscratch()) {
+ UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
+ UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
+ }
+}
+
+/**
+ * build_set_exc_base() - Assemble code to write exception base address.
+ * @p: Code buffer pointer.
+ * @reg: Source register (generated code may set WG bit in @reg).
+ *
+ * Assemble code to modify the exception base address in the EBase register,
+ * using the appropriately sized access and setting the WG bit if necessary.
+ */
+static inline void build_set_exc_base(u32 **p, unsigned int reg)
+{
+ if (cpu_has_ebase_wg) {
+ /* Set WG so that all the bits get written */
+ uasm_i_ori(p, reg, reg, MIPS_EBASE_WG);
+ UASM_i_MTC0(p, reg, C0_EBASE);
+ } else {
+ uasm_i_mtc0(p, reg, C0_EBASE);
+ }
+}
+
+/**
+ * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the start of the vcpu_run function to run a guest VCPU. The function
+ * conforms to the following prototype:
+ *
+ * int vcpu_run(struct kvm_vcpu *vcpu);
+ *
+ * The exit from the guest and return to the caller is handled by the code
+ * generated by kvm_mips_build_ret_to_host().
+ *
+ * Returns: Next address after end of written function.
+ */
+void *kvm_mips_build_vcpu_run(void *addr)
+{
+ u32 *p = addr;
+ unsigned int i;
+
+ /*
+ * A0: vcpu
+ */
+
+ /* k0/k1 not being used in host kernel context */
+ UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
+ for (i = 16; i < 32; ++i) {
+ if (i == 24)
+ i = 28;
+ UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
+ }
+
+ /* Save host status */
+ uasm_i_mfc0(&p, V0, C0_STATUS);
+ UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
+
+ /* Save scratch registers, will be used to store pointer to vcpu etc */
+ kvm_mips_build_save_scratch(&p, V1, K1);
+
+ /* VCPU scratch register has pointer to vcpu */
+ UASM_i_MTC0(&p, A0, scratch_vcpu[0], scratch_vcpu[1]);
+
+ /* Offset into vcpu->arch */
+ UASM_i_ADDIU(&p, K1, A0, offsetof(struct kvm_vcpu, arch));
+
+ /*
+ * Save the host stack to VCPU, used for exception processing
+ * when we exit from the Guest
+ */
+ UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
+
+ /* Save the kernel gp as well */
+ UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
+
+ /*
+ * Setup status register for running the guest in UM, interrupts
+ * are disabled
+ */
+ UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
+ uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ /* load up the new EBASE */
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
+ build_set_exc_base(&p, K0);
+
+ /*
+ * Now that the new EBASE has been loaded, unset BEV, set
+ * interrupt mask as it was but make sure that timer interrupts
+ * are enabled
+ */
+ uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
+ uasm_i_andi(&p, V0, V0, ST0_IM);
+ uasm_i_or(&p, K0, K0, V0);
+ uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ p = kvm_mips_build_enter_guest(p);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the code to resume guest execution. This code is common between the
+ * initial entry into the guest from the host, and returning from the exit
+ * handler back to the guest.
+ *
+ * Returns: Next address after end of written function.
+ */
+static void *kvm_mips_build_enter_guest(void *addr)
+{
+ u32 *p = addr;
+ unsigned int i;
+ struct uasm_label labels[2];
+ struct uasm_reloc relocs[2];
+ struct uasm_label __maybe_unused *l = labels;
+ struct uasm_reloc __maybe_unused *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ /* Set Guest EPC */
+ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
+ UASM_i_MTC0(&p, T0, C0_EPC);
+
+#ifdef CONFIG_KVM_MIPS_VZ
+ /* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
+ if (cpu_has_ldpte)
+ UASM_i_MFC0(&p, K0, C0_PWBASE);
+ else
+ UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
+ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
+
+ /*
+ * Set up KVM GPA pgd.
+ * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
+ * - call tlbmiss_handler_setup_pgd(mm->pgd)
+ * - write mm->pgd into CP0_PWBase
+ *
+ * We keep S0 pointing at struct kvm so we can load the ASID below.
+ */
+ UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) -
+ (int)offsetof(struct kvm_vcpu, arch), K1);
+ UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
+ UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
+ uasm_i_jalr(&p, RA, T9);
+ /* delay slot */
+ if (cpu_has_htw)
+ UASM_i_MTC0(&p, A0, C0_PWBASE);
+ else
+ uasm_i_nop(&p);
+
+ /* Set GM bit to setup eret to VZ guest context */
+ uasm_i_addiu(&p, V1, ZERO, 1);
+ uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
+ uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1);
+ uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
+
+ if (cpu_has_guestid) {
+ /*
+ * Set root mode GuestID, so that root TLB refill handler can
+ * use the correct GuestID in the root TLB.
+ */
+
+ /* Get current GuestID */
+ uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
+ /* Set GuestCtl1.RID = GuestCtl1.ID */
+ uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT,
+ MIPS_GCTL1_ID_WIDTH);
+ uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT,
+ MIPS_GCTL1_RID_WIDTH);
+ uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
+
+ /* GuestID handles dealiasing so we don't need to touch ASID */
+ goto skip_asid_restore;
+ }
+
+ /* Root ASID Dealias (RAD) */
+
+ /* Save host ASID */
+ UASM_i_MFC0(&p, K0, C0_ENTRYHI);
+ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
+ K1);
+
+ /* Set the root ASID for the Guest */
+ UASM_i_ADDIU(&p, T1, S0,
+ offsetof(struct kvm, arch.gpa_mm.context.asid));
+#else
+ /* Set the ASID for the Guest Kernel or User */
+ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
+ UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
+ T0);
+ uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
+ uasm_i_xori(&p, T0, T0, KSU_USER);
+ uasm_il_bnez(&p, &r, T0, label_kernel_asid);
+ UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
+ guest_kernel_mm.context.asid));
+ /* else user */
+ UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
+ guest_user_mm.context.asid));
+ uasm_l_kernel_asid(&l, p);
+#endif
+
+ /* t1: contains the base of the ASID array, need to get the cpu id */
+ /* smp_processor_id */
+ uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
+ /* index the ASID array */
+ uasm_i_sll(&p, T2, T2, ilog2(sizeof(long)));
+ UASM_i_ADDU(&p, T3, T1, T2);
+ UASM_i_LW(&p, K0, 0, T3);
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+ /*
+ * reuse ASID array offset
+ * cpuinfo_mips is a multiple of sizeof(long)
+ */
+ uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
+ uasm_i_mul(&p, T2, T2, T3);
+
+ UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
+ UASM_i_ADDU(&p, AT, AT, T2);
+ UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
+ uasm_i_and(&p, K0, K0, T2);
+#else
+ uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
+#endif
+
+#ifndef CONFIG_KVM_MIPS_VZ
+ /*
+ * Set up KVM T&E GVA pgd.
+ * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
+ * - call tlbmiss_handler_setup_pgd(mm->pgd)
+ * - but skips write into CP0_PWBase for now
+ */
+ UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) -
+ (int)offsetof(struct mm_struct, context.asid), T1);
+
+ UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
+ uasm_i_jalr(&p, RA, T9);
+ uasm_i_mtc0(&p, K0, C0_ENTRYHI);
+#else
+ /* Set up KVM VZ root ASID (!guestid) */
+ uasm_i_mtc0(&p, K0, C0_ENTRYHI);
+skip_asid_restore:
+#endif
+ uasm_i_ehb(&p);
+
+ /* Disable RDHWR access */
+ uasm_i_mtc0(&p, ZERO, C0_HWRENA);
+
+ /* load the guest context from VCPU and return */
+ for (i = 1; i < 32; ++i) {
+ /* Guest k0/k1 loaded later */
+ if (i == K0 || i == K1)
+ continue;
+ UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
+ }
+
+#ifndef CONFIG_CPU_MIPSR6
+ /* Restore hi/lo */
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
+ uasm_i_mthi(&p, K0);
+
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
+ uasm_i_mtlo(&p, K0);
+#endif
+
+ /* Restore the guest's k0/k1 registers */
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
+ UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
+
+ /* Jump to guest */
+ uasm_i_eret(&p);
+
+ uasm_resolve_relocs(relocs, labels);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_tlb_refill_exception() - Assemble TLB refill handler.
+ * @addr: Address to start writing code.
+ * @handler: Address of common handler (within range of @addr).
+ *
+ * Assemble TLB refill exception fast path handler for guest execution.
+ *
+ * Returns: Next address after end of written function.
+ */
+void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
+{
+ u32 *p = addr;
+ struct uasm_label labels[2];
+ struct uasm_reloc relocs[2];
+#ifndef CONFIG_CPU_LOONGSON64
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+#endif
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ /* Save guest k1 into scratch register */
+ UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
+
+ /* Get the VCPU pointer from the VCPU scratch register */
+ UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
+
+ /* Save guest k0 into VCPU structure */
+ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
+
+ /*
+ * Some of the common tlbex code uses current_cpu_type(). For KVM we
+ * assume symmetry and just disable preemption to silence the warning.
+ */
+ preempt_disable();
+
+#ifdef CONFIG_CPU_LOONGSON64
+ UASM_i_MFC0(&p, K1, C0_PGD);
+ uasm_i_lddir(&p, K0, K1, 3); /* global page dir */
+#ifndef __PAGETABLE_PMD_FOLDED
+ uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */
+#endif
+ uasm_i_ldpte(&p, K1, 0); /* even */
+ uasm_i_ldpte(&p, K1, 1); /* odd */
+ uasm_i_tlbwr(&p);
+#else
+ /*
+ * Now for the actual refill bit. A lot of this can be common with the
+ * Linux TLB refill handler, however we don't need to handle so many
+ * cases. We only need to handle user mode refills, and user mode runs
+ * with 32-bit addressing.
+ *
+ * Therefore the branch to label_vmalloc generated by build_get_pmde64()
+ * that isn't resolved should never actually get taken and is harmless
+ * to leave in place for now.
+ */
+
+#ifdef CONFIG_64BIT
+ build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
+#else
+ build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
+#endif
+
+ /* we don't support huge pages yet */
+
+ build_get_ptep(&p, K0, K1);
+ build_update_entries(&p, K0, K1);
+ build_tlb_write_entry(&p, &l, &r, tlb_random);
+#endif
+
+ preempt_enable();
+
+ /* Get the VCPU pointer from the VCPU scratch register again */
+ UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
+
+ /* Restore the guest's k0/k1 registers */
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
+ uasm_i_ehb(&p);
+ UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
+
+ /* Jump to guest */
+ uasm_i_eret(&p);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_exception() - Assemble first level guest exception handler.
+ * @addr: Address to start writing code.
+ * @handler: Address of common handler (within range of @addr).
+ *
+ * Assemble exception vector code for guest execution. The generated vector will
+ * branch to the common exception handler generated by kvm_mips_build_exit().
+ *
+ * Returns: Next address after end of written function.
+ */
+void *kvm_mips_build_exception(void *addr, void *handler)
+{
+ u32 *p = addr;
+ struct uasm_label labels[2];
+ struct uasm_reloc relocs[2];
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ /* Save guest k1 into scratch register */
+ UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
+
+ /* Get the VCPU pointer from the VCPU scratch register */
+ UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
+ UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
+
+ /* Save guest k0 into VCPU structure */
+ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
+
+ /* Branch to the common handler */
+ uasm_il_b(&p, &r, label_exit_common);
+ uasm_i_nop(&p);
+
+ uasm_l_exit_common(&l, handler);
+ uasm_resolve_relocs(relocs, labels);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_exit() - Assemble common guest exit handler.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the generic guest exit handling code. This is called by the
+ * exception vectors (generated by kvm_mips_build_exception()), and calls
+ * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
+ * depending on the return value.
+ *
+ * Returns: Next address after end of written function.
+ */
+void *kvm_mips_build_exit(void *addr)
+{
+ u32 *p = addr;
+ unsigned int i;
+ struct uasm_label labels[3];
+ struct uasm_reloc relocs[3];
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ /*
+ * Generic Guest exception handler. We end up here when the guest
+ * does something that causes a trap to kernel mode.
+ *
+ * Both k0/k1 registers will have already been saved (k0 into the vcpu
+ * structure, and k1 into the scratch_tmp register).
+ *
+ * The k1 register will already contain the kvm_vcpu_arch pointer.
+ */
+
+ /* Start saving Guest context to VCPU */
+ for (i = 0; i < 32; ++i) {
+ /* Guest k0/k1 saved later */
+ if (i == K0 || i == K1)
+ continue;
+ UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
+ }
+
+#ifndef CONFIG_CPU_MIPSR6
+ /* We need to save hi/lo and restore them on the way out */
+ uasm_i_mfhi(&p, T0);
+ UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
+
+ uasm_i_mflo(&p, T0);
+ UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
+#endif
+
+ /* Finally save guest k1 to VCPU */
+ uasm_i_ehb(&p);
+ UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
+ UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
+
+ /* Now that context has been saved, we can use other registers */
+
+ /* Restore vcpu */
+ UASM_i_MFC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
+
+ /*
+ * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
+ * the exception
+ */
+ UASM_i_MFC0(&p, K0, C0_EPC);
+ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
+
+ UASM_i_MFC0(&p, K0, C0_BADVADDR);
+ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
+ K1);
+
+ uasm_i_mfc0(&p, K0, C0_CAUSE);
+ uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
+
+ if (cpu_has_badinstr) {
+ uasm_i_mfc0(&p, K0, C0_BADINSTR);
+ uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
+ host_cp0_badinstr), K1);
+ }
+
+ if (cpu_has_badinstrp) {
+ uasm_i_mfc0(&p, K0, C0_BADINSTRP);
+ uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
+ host_cp0_badinstrp), K1);
+ }
+
+ /* Now restore the host state just enough to run the handlers */
+
+ /* Switch EBASE to the one used by Linux */
+ /* load up the host EBASE */
+ uasm_i_mfc0(&p, V0, C0_STATUS);
+
+ uasm_i_lui(&p, AT, ST0_BEV >> 16);
+ uasm_i_or(&p, K0, V0, AT);
+
+ uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ UASM_i_LA_mostly(&p, K0, (long)&ebase);
+ UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
+ build_set_exc_base(&p, K0);
+
+ if (raw_cpu_has_fpu) {
+ /*
+ * If FPU is enabled, save FCR31 and clear it so that later
+ * ctc1's don't trigger FPE for pending exceptions.
+ */
+ uasm_i_lui(&p, AT, ST0_CU1 >> 16);
+ uasm_i_and(&p, V1, V0, AT);
+ uasm_il_beqz(&p, &r, V1, label_fpu_1);
+ uasm_i_nop(&p);
+ uasm_i_cfc1(&p, T0, 31);
+ uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
+ K1);
+ uasm_i_ctc1(&p, ZERO, 31);
+ uasm_l_fpu_1(&l, p);
+ }
+
+ if (cpu_has_msa) {
+ /*
+ * If MSA is enabled, save MSACSR and clear it so that later
+ * instructions don't trigger MSAFPE for pending exceptions.
+ */
+ uasm_i_mfc0(&p, T0, C0_CONFIG5);
+ uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
+ uasm_il_beqz(&p, &r, T0, label_msa_1);
+ uasm_i_nop(&p);
+ uasm_i_cfcmsa(&p, T0, MSA_CSR);
+ uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
+ K1);
+ uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
+ uasm_l_msa_1(&l, p);
+ }
+
+#ifdef CONFIG_KVM_MIPS_VZ
+ /* Restore host ASID */
+ if (!cpu_has_guestid) {
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
+ K1);
+ UASM_i_MTC0(&p, K0, C0_ENTRYHI);
+ }
+
+ /*
+ * Set up normal Linux process pgd.
+ * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
+ * - call tlbmiss_handler_setup_pgd(mm->pgd)
+ * - write mm->pgd into CP0_PWBase
+ */
+ UASM_i_LW(&p, A0,
+ offsetof(struct kvm_vcpu_arch, host_pgd), K1);
+ UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
+ uasm_i_jalr(&p, RA, T9);
+ /* delay slot */
+ if (cpu_has_htw)
+ UASM_i_MTC0(&p, A0, C0_PWBASE);
+ else
+ uasm_i_nop(&p);
+
+ /* Clear GM bit so we don't enter guest mode when EXL is cleared */
+ uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
+ uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1);
+ uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
+
+ /* Save GuestCtl0 so we can access GExcCode after CPU migration */
+ uasm_i_sw(&p, K0,
+ offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1);
+
+ if (cpu_has_guestid) {
+ /*
+ * Clear root mode GuestID, so that root TLB operations use the
+ * root GuestID in the root TLB.
+ */
+ uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
+ /* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */
+ uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT,
+ MIPS_GCTL1_RID_WIDTH);
+ uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
+ }
+#endif
+
+ /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
+ uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
+ uasm_i_and(&p, V0, V0, AT);
+ uasm_i_lui(&p, AT, ST0_CU0 >> 16);
+ uasm_i_or(&p, V0, V0, AT);
+#ifdef CONFIG_64BIT
+ uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX);
+#endif
+ uasm_i_mtc0(&p, V0, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ /* Load up host GP */
+ UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
+
+ /* Need a stack before we can jump to "C" */
+ UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
+
+ /* Saved host state */
+ UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
+
+ /*
+ * XXXKYMA do we need to load the host ASID, maybe not because the
+ * kernel entries are marked GLOBAL, need to verify
+ */
+
+ /* Restore host scratch registers, as we'll have clobbered them */
+ kvm_mips_build_restore_scratch(&p, K0, SP);
+
+ /* Restore RDHWR access */
+ UASM_i_LA_mostly(&p, K0, (long)&hwrena);
+ uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
+ uasm_i_mtc0(&p, K0, C0_HWRENA);
+
+ /* Jump to handler */
+ /*
+ * XXXKYMA: not sure if this is safe, how large is the stack??
+ * Now jump to the kvm_mips_handle_exit() to see if we can deal
+ * with this in the kernel
+ */
+ uasm_i_move(&p, A0, S0);
+ UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
+ uasm_i_jalr(&p, RA, T9);
+ UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
+
+ uasm_resolve_relocs(relocs, labels);
+
+ p = kvm_mips_build_ret_from_exit(p);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the code to handle the return from kvm_mips_handle_exit(), either
+ * resuming the guest or returning to the host depending on the return value.
+ *
+ * Returns: Next address after end of written function.
+ */
+static void *kvm_mips_build_ret_from_exit(void *addr)
+{
+ u32 *p = addr;
+ struct uasm_label labels[2];
+ struct uasm_reloc relocs[2];
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ /* Return from handler Make sure interrupts are disabled */
+ uasm_i_di(&p, ZERO);
+ uasm_i_ehb(&p);
+
+ /*
+ * XXXKYMA: k0/k1 could have been blown away if we processed
+ * an exception while we were handling the exception from the
+ * guest, reload k1
+ */
+
+ uasm_i_move(&p, K1, S0);
+ UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
+
+ /*
+ * Check return value, should tell us if we are returning to the
+ * host (handle I/O etc)or resuming the guest
+ */
+ uasm_i_andi(&p, T0, V0, RESUME_HOST);
+ uasm_il_bnez(&p, &r, T0, label_return_to_host);
+ uasm_i_nop(&p);
+
+ p = kvm_mips_build_ret_to_guest(p);
+
+ uasm_l_return_to_host(&l, p);
+ p = kvm_mips_build_ret_to_host(p);
+
+ uasm_resolve_relocs(relocs, labels);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the code to handle return from the guest exit handler
+ * (kvm_mips_handle_exit()) back to the guest.
+ *
+ * Returns: Next address after end of written function.
+ */
+static void *kvm_mips_build_ret_to_guest(void *addr)
+{
+ u32 *p = addr;
+
+ /* Put the saved pointer to vcpu (s0) back into the scratch register */
+ UASM_i_MTC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
+
+ /* Load up the Guest EBASE to minimize the window where BEV is set */
+ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
+
+ /* Switch EBASE back to the one used by KVM */
+ uasm_i_mfc0(&p, V1, C0_STATUS);
+ uasm_i_lui(&p, AT, ST0_BEV >> 16);
+ uasm_i_or(&p, K0, V1, AT);
+ uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_ehb(&p);
+ build_set_exc_base(&p, T0);
+
+ /* Setup status register for running guest in UM */
+ uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
+ UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
+ uasm_i_and(&p, V1, V1, AT);
+ uasm_i_mtc0(&p, V1, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ p = kvm_mips_build_enter_guest(p);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the code to handle return from the guest exit handler
+ * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
+ * function generated by kvm_mips_build_vcpu_run().
+ *
+ * Returns: Next address after end of written function.
+ */
+static void *kvm_mips_build_ret_to_host(void *addr)
+{
+ u32 *p = addr;
+ unsigned int i;
+
+ /* EBASE is already pointing to Linux */
+ UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
+ UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
+
+ /*
+ * r2/v0 is the return code, shift it down by 2 (arithmetic)
+ * to recover the err code
+ */
+ uasm_i_sra(&p, K0, V0, 2);
+ uasm_i_move(&p, V0, K0);
+
+ /* Load context saved on the host stack */
+ for (i = 16; i < 31; ++i) {
+ if (i == 24)
+ i = 28;
+ UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
+ }
+
+ /* Restore RDHWR access */
+ UASM_i_LA_mostly(&p, K0, (long)&hwrena);
+ uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
+ uasm_i_mtc0(&p, K0, C0_HWRENA);
+
+ /* Restore RA, which is the address we will return to */
+ UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
+ uasm_i_jr(&p, RA);
+ uasm_i_nop(&p);
+
+ return p;
+}
+
diff --git a/arch/mips/kvm/fpu.S b/arch/mips/kvm/fpu.S
new file mode 100644
index 000000000..16f17c639
--- /dev/null
+++ b/arch/mips/kvm/fpu.S
@@ -0,0 +1,125 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * FPU context handling code for KVM.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ */
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
+ .set noreorder
+ .set noat
+
+LEAF(__kvm_save_fpu)
+ .set push
+ SET_HARDFLOAT
+ .set fp=64
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 5 # is Status.FR set?
+ bgez t0, 1f # no: skip odd doubles
+ nop
+ sdc1 $f1, VCPU_FPR1(a0)
+ sdc1 $f3, VCPU_FPR3(a0)
+ sdc1 $f5, VCPU_FPR5(a0)
+ sdc1 $f7, VCPU_FPR7(a0)
+ sdc1 $f9, VCPU_FPR9(a0)
+ sdc1 $f11, VCPU_FPR11(a0)
+ sdc1 $f13, VCPU_FPR13(a0)
+ sdc1 $f15, VCPU_FPR15(a0)
+ sdc1 $f17, VCPU_FPR17(a0)
+ sdc1 $f19, VCPU_FPR19(a0)
+ sdc1 $f21, VCPU_FPR21(a0)
+ sdc1 $f23, VCPU_FPR23(a0)
+ sdc1 $f25, VCPU_FPR25(a0)
+ sdc1 $f27, VCPU_FPR27(a0)
+ sdc1 $f29, VCPU_FPR29(a0)
+ sdc1 $f31, VCPU_FPR31(a0)
+1: sdc1 $f0, VCPU_FPR0(a0)
+ sdc1 $f2, VCPU_FPR2(a0)
+ sdc1 $f4, VCPU_FPR4(a0)
+ sdc1 $f6, VCPU_FPR6(a0)
+ sdc1 $f8, VCPU_FPR8(a0)
+ sdc1 $f10, VCPU_FPR10(a0)
+ sdc1 $f12, VCPU_FPR12(a0)
+ sdc1 $f14, VCPU_FPR14(a0)
+ sdc1 $f16, VCPU_FPR16(a0)
+ sdc1 $f18, VCPU_FPR18(a0)
+ sdc1 $f20, VCPU_FPR20(a0)
+ sdc1 $f22, VCPU_FPR22(a0)
+ sdc1 $f24, VCPU_FPR24(a0)
+ sdc1 $f26, VCPU_FPR26(a0)
+ sdc1 $f28, VCPU_FPR28(a0)
+ jr ra
+ sdc1 $f30, VCPU_FPR30(a0)
+ .set pop
+ END(__kvm_save_fpu)
+
+LEAF(__kvm_restore_fpu)
+ .set push
+ SET_HARDFLOAT
+ .set fp=64
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 5 # is Status.FR set?
+ bgez t0, 1f # no: skip odd doubles
+ nop
+ ldc1 $f1, VCPU_FPR1(a0)
+ ldc1 $f3, VCPU_FPR3(a0)
+ ldc1 $f5, VCPU_FPR5(a0)
+ ldc1 $f7, VCPU_FPR7(a0)
+ ldc1 $f9, VCPU_FPR9(a0)
+ ldc1 $f11, VCPU_FPR11(a0)
+ ldc1 $f13, VCPU_FPR13(a0)
+ ldc1 $f15, VCPU_FPR15(a0)
+ ldc1 $f17, VCPU_FPR17(a0)
+ ldc1 $f19, VCPU_FPR19(a0)
+ ldc1 $f21, VCPU_FPR21(a0)
+ ldc1 $f23, VCPU_FPR23(a0)
+ ldc1 $f25, VCPU_FPR25(a0)
+ ldc1 $f27, VCPU_FPR27(a0)
+ ldc1 $f29, VCPU_FPR29(a0)
+ ldc1 $f31, VCPU_FPR31(a0)
+1: ldc1 $f0, VCPU_FPR0(a0)
+ ldc1 $f2, VCPU_FPR2(a0)
+ ldc1 $f4, VCPU_FPR4(a0)
+ ldc1 $f6, VCPU_FPR6(a0)
+ ldc1 $f8, VCPU_FPR8(a0)
+ ldc1 $f10, VCPU_FPR10(a0)
+ ldc1 $f12, VCPU_FPR12(a0)
+ ldc1 $f14, VCPU_FPR14(a0)
+ ldc1 $f16, VCPU_FPR16(a0)
+ ldc1 $f18, VCPU_FPR18(a0)
+ ldc1 $f20, VCPU_FPR20(a0)
+ ldc1 $f22, VCPU_FPR22(a0)
+ ldc1 $f24, VCPU_FPR24(a0)
+ ldc1 $f26, VCPU_FPR26(a0)
+ ldc1 $f28, VCPU_FPR28(a0)
+ jr ra
+ ldc1 $f30, VCPU_FPR30(a0)
+ .set pop
+ END(__kvm_restore_fpu)
+
+LEAF(__kvm_restore_fcsr)
+ .set push
+ SET_HARDFLOAT
+ lw t0, VCPU_FCR31(a0)
+ /*
+ * The ctc1 must stay at this offset in __kvm_restore_fcsr.
+ * See kvm_mips_csr_die_notify() which handles t0 containing a value
+ * which triggers an FP Exception, which must be stepped over and
+ * ignored since the set cause bits must remain there for the guest.
+ */
+ ctc1 t0, fcr31
+ jr ra
+ nop
+ .set pop
+ END(__kvm_restore_fcsr)
diff --git a/arch/mips/kvm/hypcall.c b/arch/mips/kvm/hypcall.c
new file mode 100644
index 000000000..830634351
--- /dev/null
+++ b/arch/mips/kvm/hypcall.c
@@ -0,0 +1,53 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Hypercall handling.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/kvm_para.h>
+
+#define MAX_HYPCALL_ARGS 4
+
+enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
+ union mips_instruction inst)
+{
+ unsigned int code = (inst.co_format.code >> 5) & 0x3ff;
+
+ kvm_debug("[%#lx] HYPCALL %#03x\n", vcpu->arch.pc, code);
+
+ switch (code) {
+ case 0:
+ return EMULATE_HYPERCALL;
+ default:
+ return EMULATE_FAIL;
+ };
+}
+
+static int kvm_mips_hypercall(struct kvm_vcpu *vcpu, unsigned long num,
+ const unsigned long *args, unsigned long *hret)
+{
+ /* Report unimplemented hypercall to guest */
+ *hret = -KVM_ENOSYS;
+ return RESUME_GUEST;
+}
+
+int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu)
+{
+ unsigned long num, args[MAX_HYPCALL_ARGS];
+
+ /* read hypcall number and arguments */
+ num = vcpu->arch.gprs[2]; /* v0 */
+ args[0] = vcpu->arch.gprs[4]; /* a0 */
+ args[1] = vcpu->arch.gprs[5]; /* a1 */
+ args[2] = vcpu->arch.gprs[6]; /* a2 */
+ args[3] = vcpu->arch.gprs[7]; /* a3 */
+
+ return kvm_mips_hypercall(vcpu, num,
+ args, &vcpu->arch.gprs[2] /* v0 */);
+}
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c
new file mode 100644
index 000000000..d28c2c9c3
--- /dev/null
+++ b/arch/mips/kvm/interrupt.c
@@ -0,0 +1,175 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Interrupt delivery
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/memblock.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+
+#include <linux/kvm_host.h>
+
+#include "interrupt.h"
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
+{
+ set_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
+{
+ clear_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Cause bits to reflect the pending timer interrupt,
+ * the EXC code will be set when we are actually
+ * delivering the interrupt:
+ */
+ kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+
+ /* Queue up an INT exception for the core */
+ kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+
+}
+
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+ kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+ kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq)
+{
+ int intr = (int)irq->irq;
+
+ /*
+ * Cause bits to reflect the pending IO interrupt,
+ * the EXC code will be set when we are actually
+ * delivering the interrupt:
+ */
+ kvm_set_c0_guest_cause(vcpu->arch.cop0, 1 << (intr + 8));
+ kvm_mips_queue_irq(vcpu, kvm_irq_to_priority(intr));
+}
+
+void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq)
+{
+ int intr = (int)irq->irq;
+
+ kvm_clear_c0_guest_cause(vcpu->arch.cop0, 1 << (-intr + 8));
+ kvm_mips_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
+}
+
+/* Deliver the interrupt of the corresponding priority, if possible. */
+int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+ u32 cause)
+{
+ int allowed = 0;
+ u32 exccode, ie;
+
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+ if (priority == MIPS_EXC_MAX)
+ return 0;
+
+ ie = 1 << (kvm_priority_to_irq[priority] + 8);
+ if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+ && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+ && (kvm_read_c0_guest_status(cop0) & ie)) {
+ allowed = 1;
+ exccode = EXCCODE_INT;
+ }
+
+ /* Are we allowed to deliver the interrupt ??? */
+ if (allowed) {
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
+
+ } else
+ kvm_err("Trying to deliver interrupt when EXL is already set\n");
+
+ kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
+ (exccode << CAUSEB_EXCCODE));
+
+ /* XXXSL Set PC to the interrupt exception entry point */
+ arch->pc = kvm_mips_guest_exception_base(vcpu);
+ if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
+ arch->pc += 0x200;
+ else
+ arch->pc += 0x180;
+
+ clear_bit(priority, &vcpu->arch.pending_exceptions);
+ }
+
+ return allowed;
+}
+
+int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+ u32 cause)
+{
+ return 1;
+}
+
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
+{
+ unsigned long *pending = &vcpu->arch.pending_exceptions;
+ unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
+ unsigned int priority;
+
+ if (!(*pending) && !(*pending_clr))
+ return;
+
+ priority = __ffs(*pending_clr);
+ while (priority <= MIPS_EXC_MAX) {
+ if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
+ if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
+ break;
+ }
+
+ priority = find_next_bit(pending_clr,
+ BITS_PER_BYTE * sizeof(*pending_clr),
+ priority + 1);
+ }
+
+ priority = __ffs(*pending);
+ while (priority <= MIPS_EXC_MAX) {
+ if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
+ if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
+ break;
+ }
+
+ priority = find_next_bit(pending,
+ BITS_PER_BYTE * sizeof(*pending),
+ priority + 1);
+ }
+
+}
+
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
+{
+ return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
+}
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
new file mode 100644
index 000000000..c3e878ca3
--- /dev/null
+++ b/arch/mips/kvm/interrupt.h
@@ -0,0 +1,59 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Interrupts
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+/*
+ * MIPS Exception Priorities, exceptions (including interrupts) are queued up
+ * for the guest in the order specified by their priorities
+ */
+
+#define MIPS_EXC_RESET 0
+#define MIPS_EXC_SRESET 1
+#define MIPS_EXC_DEBUG_ST 2
+#define MIPS_EXC_DEBUG 3
+#define MIPS_EXC_DDB 4
+#define MIPS_EXC_NMI 5
+#define MIPS_EXC_MCHK 6
+#define MIPS_EXC_INT_TIMER 7
+#define MIPS_EXC_INT_IO_1 8
+#define MIPS_EXC_INT_IO_2 9
+#define MIPS_EXC_EXECUTE 10
+#define MIPS_EXC_INT_IPI_1 11
+#define MIPS_EXC_INT_IPI_2 12
+#define MIPS_EXC_MAX 13
+/* XXXSL More to follow */
+
+#define C_TI (_ULCAST_(1) << 30)
+
+#ifdef CONFIG_KVM_MIPS_VZ
+#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (1)
+#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (1)
+#else
+#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
+#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
+#endif
+
+extern u32 *kvm_priority_to_irq;
+u32 kvm_irq_to_priority(u32 irq);
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq);
+void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq);
+int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+ u32 cause);
+int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+ u32 cause);
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause);
diff --git a/arch/mips/kvm/loongson_ipi.c b/arch/mips/kvm/loongson_ipi.c
new file mode 100644
index 000000000..3681fc8fb
--- /dev/null
+++ b/arch/mips/kvm/loongson_ipi.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Loongson-3 Virtual IPI interrupt support.
+ *
+ * Copyright (C) 2019 Loongson Technologies, Inc. All rights reserved.
+ *
+ * Authors: Chen Zhu <zhuchen@loongson.cn>
+ * Authors: Huacai Chen <chenhc@lemote.com>
+ */
+
+#include <linux/kvm_host.h>
+
+#define IPI_BASE 0x3ff01000ULL
+
+#define CORE0_STATUS_OFF 0x000
+#define CORE0_EN_OFF 0x004
+#define CORE0_SET_OFF 0x008
+#define CORE0_CLEAR_OFF 0x00c
+#define CORE0_BUF_20 0x020
+#define CORE0_BUF_28 0x028
+#define CORE0_BUF_30 0x030
+#define CORE0_BUF_38 0x038
+
+#define CORE1_STATUS_OFF 0x100
+#define CORE1_EN_OFF 0x104
+#define CORE1_SET_OFF 0x108
+#define CORE1_CLEAR_OFF 0x10c
+#define CORE1_BUF_20 0x120
+#define CORE1_BUF_28 0x128
+#define CORE1_BUF_30 0x130
+#define CORE1_BUF_38 0x138
+
+#define CORE2_STATUS_OFF 0x200
+#define CORE2_EN_OFF 0x204
+#define CORE2_SET_OFF 0x208
+#define CORE2_CLEAR_OFF 0x20c
+#define CORE2_BUF_20 0x220
+#define CORE2_BUF_28 0x228
+#define CORE2_BUF_30 0x230
+#define CORE2_BUF_38 0x238
+
+#define CORE3_STATUS_OFF 0x300
+#define CORE3_EN_OFF 0x304
+#define CORE3_SET_OFF 0x308
+#define CORE3_CLEAR_OFF 0x30c
+#define CORE3_BUF_20 0x320
+#define CORE3_BUF_28 0x328
+#define CORE3_BUF_30 0x330
+#define CORE3_BUF_38 0x338
+
+static int loongson_vipi_read(struct loongson_kvm_ipi *ipi,
+ gpa_t addr, int len, void *val)
+{
+ uint32_t core = (addr >> 8) & 3;
+ uint32_t node = (addr >> 44) & 3;
+ uint32_t id = core + node * 4;
+ uint64_t offset = addr & 0xff;
+ void *pbuf;
+ struct ipi_state *s = &(ipi->ipistate[id]);
+
+ BUG_ON(offset & (len - 1));
+
+ switch (offset) {
+ case CORE0_STATUS_OFF:
+ *(uint64_t *)val = s->status;
+ break;
+
+ case CORE0_EN_OFF:
+ *(uint64_t *)val = s->en;
+ break;
+
+ case CORE0_SET_OFF:
+ *(uint64_t *)val = 0;
+ break;
+
+ case CORE0_CLEAR_OFF:
+ *(uint64_t *)val = 0;
+ break;
+
+ case CORE0_BUF_20 ... CORE0_BUF_38:
+ pbuf = (void *)s->buf + (offset - 0x20);
+ if (len == 8)
+ *(uint64_t *)val = *(uint64_t *)pbuf;
+ else /* Assume len == 4 */
+ *(uint32_t *)val = *(uint32_t *)pbuf;
+ break;
+
+ default:
+ pr_notice("%s with unknown addr %llx\n", __func__, addr);
+ break;
+ }
+
+ return 0;
+}
+
+static int loongson_vipi_write(struct loongson_kvm_ipi *ipi,
+ gpa_t addr, int len, const void *val)
+{
+ uint32_t core = (addr >> 8) & 3;
+ uint32_t node = (addr >> 44) & 3;
+ uint32_t id = core + node * 4;
+ uint64_t data, offset = addr & 0xff;
+ void *pbuf;
+ struct kvm *kvm = ipi->kvm;
+ struct kvm_mips_interrupt irq;
+ struct ipi_state *s = &(ipi->ipistate[id]);
+
+ data = *(uint64_t *)val;
+ BUG_ON(offset & (len - 1));
+
+ switch (offset) {
+ case CORE0_STATUS_OFF:
+ break;
+
+ case CORE0_EN_OFF:
+ s->en = data;
+ break;
+
+ case CORE0_SET_OFF:
+ s->status |= data;
+ irq.cpu = id;
+ irq.irq = 6;
+ kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq);
+ break;
+
+ case CORE0_CLEAR_OFF:
+ s->status &= ~data;
+ if (!s->status) {
+ irq.cpu = id;
+ irq.irq = -6;
+ kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq);
+ }
+ break;
+
+ case CORE0_BUF_20 ... CORE0_BUF_38:
+ pbuf = (void *)s->buf + (offset - 0x20);
+ if (len == 8)
+ *(uint64_t *)pbuf = (uint64_t)data;
+ else /* Assume len == 4 */
+ *(uint32_t *)pbuf = (uint32_t)data;
+ break;
+
+ default:
+ pr_notice("%s with unknown addr %llx\n", __func__, addr);
+ break;
+ }
+
+ return 0;
+}
+
+static int kvm_ipi_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+ gpa_t addr, int len, void *val)
+{
+ unsigned long flags;
+ struct loongson_kvm_ipi *ipi;
+ struct ipi_io_device *ipi_device;
+
+ ipi_device = container_of(dev, struct ipi_io_device, device);
+ ipi = ipi_device->ipi;
+
+ spin_lock_irqsave(&ipi->lock, flags);
+ loongson_vipi_read(ipi, addr, len, val);
+ spin_unlock_irqrestore(&ipi->lock, flags);
+
+ return 0;
+}
+
+static int kvm_ipi_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+ gpa_t addr, int len, const void *val)
+{
+ unsigned long flags;
+ struct loongson_kvm_ipi *ipi;
+ struct ipi_io_device *ipi_device;
+
+ ipi_device = container_of(dev, struct ipi_io_device, device);
+ ipi = ipi_device->ipi;
+
+ spin_lock_irqsave(&ipi->lock, flags);
+ loongson_vipi_write(ipi, addr, len, val);
+ spin_unlock_irqrestore(&ipi->lock, flags);
+
+ return 0;
+}
+
+static const struct kvm_io_device_ops kvm_ipi_ops = {
+ .read = kvm_ipi_read,
+ .write = kvm_ipi_write,
+};
+
+void kvm_init_loongson_ipi(struct kvm *kvm)
+{
+ int i;
+ unsigned long addr;
+ struct loongson_kvm_ipi *s;
+ struct kvm_io_device *device;
+
+ s = &kvm->arch.ipi;
+ s->kvm = kvm;
+ spin_lock_init(&s->lock);
+
+ /*
+ * Initialize IPI device
+ */
+ for (i = 0; i < 4; i++) {
+ device = &s->dev_ipi[i].device;
+ kvm_iodevice_init(device, &kvm_ipi_ops);
+ addr = (((unsigned long)i) << 44) + IPI_BASE;
+ mutex_lock(&kvm->slots_lock);
+ kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, addr, 0x400, device);
+ mutex_unlock(&kvm->slots_lock);
+ s->dev_ipi[i].ipi = s;
+ s->dev_ipi[i].node_id = i;
+ }
+}
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
new file mode 100644
index 000000000..3d6a7f582
--- /dev/null
+++ b/arch/mips/kvm/mips.c
@@ -0,0 +1,1701 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: MIPS specific KVM APIs
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/sched/signal.h>
+#include <linux/fs.h>
+#include <linux/memblock.h>
+#include <linux/pgtable.h>
+
+#include <asm/fpu.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
+
+#include <linux/kvm_host.h>
+
+#include "interrupt.h"
+#include "commpage.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#ifndef VECTORSPACING
+#define VECTORSPACING 0x100 /* for EI/VI mode */
+#endif
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+ VCPU_STAT("wait", wait_exits),
+ VCPU_STAT("cache", cache_exits),
+ VCPU_STAT("signal", signal_exits),
+ VCPU_STAT("interrupt", int_exits),
+ VCPU_STAT("cop_unusable", cop_unusable_exits),
+ VCPU_STAT("tlbmod", tlbmod_exits),
+ VCPU_STAT("tlbmiss_ld", tlbmiss_ld_exits),
+ VCPU_STAT("tlbmiss_st", tlbmiss_st_exits),
+ VCPU_STAT("addrerr_st", addrerr_st_exits),
+ VCPU_STAT("addrerr_ld", addrerr_ld_exits),
+ VCPU_STAT("syscall", syscall_exits),
+ VCPU_STAT("resvd_inst", resvd_inst_exits),
+ VCPU_STAT("break_inst", break_inst_exits),
+ VCPU_STAT("trap_inst", trap_inst_exits),
+ VCPU_STAT("msa_fpe", msa_fpe_exits),
+ VCPU_STAT("fpe", fpe_exits),
+ VCPU_STAT("msa_disabled", msa_disabled_exits),
+ VCPU_STAT("flush_dcache", flush_dcache_exits),
+#ifdef CONFIG_KVM_MIPS_VZ
+ VCPU_STAT("vz_gpsi", vz_gpsi_exits),
+ VCPU_STAT("vz_gsfc", vz_gsfc_exits),
+ VCPU_STAT("vz_hc", vz_hc_exits),
+ VCPU_STAT("vz_grr", vz_grr_exits),
+ VCPU_STAT("vz_gva", vz_gva_exits),
+ VCPU_STAT("vz_ghfc", vz_ghfc_exits),
+ VCPU_STAT("vz_gpa", vz_gpa_exits),
+ VCPU_STAT("vz_resvd", vz_resvd_exits),
+#ifdef CONFIG_CPU_LOONGSON64
+ VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
+#endif
+#endif
+ VCPU_STAT("halt_successful_poll", halt_successful_poll),
+ VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
+ VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
+ VCPU_STAT("halt_wakeup", halt_wakeup),
+ VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
+ VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
+ {NULL}
+};
+
+bool kvm_trace_guest_mode_change;
+
+int kvm_guest_mode_change_trace_reg(void)
+{
+ kvm_trace_guest_mode_change = true;
+ return 0;
+}
+
+void kvm_guest_mode_change_trace_unreg(void)
+{
+ kvm_trace_guest_mode_change = false;
+}
+
+/*
+ * XXXKYMA: We are simulatoring a processor that has the WII bit set in
+ * Config7, so we are "runnable" if interrupts are pending
+ */
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+ return !!(vcpu->arch.pending_exceptions);
+}
+
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+ return 1;
+}
+
+int kvm_arch_hardware_enable(void)
+{
+ return kvm_mips_callbacks->hardware_enable();
+}
+
+void kvm_arch_hardware_disable(void)
+{
+ kvm_mips_callbacks->hardware_disable();
+}
+
+int kvm_arch_hardware_setup(void *opaque)
+{
+ return 0;
+}
+
+int kvm_arch_check_processor_compat(void *opaque)
+{
+ return 0;
+}
+
+extern void kvm_init_loongson_ipi(struct kvm *kvm);
+
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+{
+ switch (type) {
+ case KVM_VM_MIPS_AUTO:
+ break;
+#ifdef CONFIG_KVM_MIPS_VZ
+ case KVM_VM_MIPS_VZ:
+#else
+ case KVM_VM_MIPS_TE:
+#endif
+ break;
+ default:
+ /* Unsupported KVM type */
+ return -EINVAL;
+ };
+
+ /* Allocate page table to map GPA -> RPA */
+ kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
+ if (!kvm->arch.gpa_mm.pgd)
+ return -ENOMEM;
+
+#ifdef CONFIG_CPU_LOONGSON64
+ kvm_init_loongson_ipi(kvm);
+#endif
+
+ return 0;
+}
+
+void kvm_mips_free_vcpus(struct kvm *kvm)
+{
+ unsigned int i;
+ struct kvm_vcpu *vcpu;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ kvm_vcpu_destroy(vcpu);
+ }
+
+ mutex_lock(&kvm->lock);
+
+ for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+ kvm->vcpus[i] = NULL;
+
+ atomic_set(&kvm->online_vcpus, 0);
+
+ mutex_unlock(&kvm->lock);
+}
+
+static void kvm_mips_free_gpa_pt(struct kvm *kvm)
+{
+ /* It should always be safe to remove after flushing the whole range */
+ WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0));
+ pgd_free(NULL, kvm->arch.gpa_mm.pgd);
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+ kvm_mips_free_vcpus(kvm);
+ kvm_mips_free_gpa_pt(kvm);
+}
+
+long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg)
+{
+ return -ENOIOCTLCMD;
+}
+
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+ /* Flush whole GPA */
+ kvm_mips_flush_gpa_pt(kvm, 0, ~0);
+
+ /* Let implementation do the rest */
+ kvm_mips_callbacks->flush_shadow_all(kvm);
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot)
+{
+ /*
+ * The slot has been made invalid (ready for moving or deletion), so we
+ * need to ensure that it can no longer be accessed by any guest VCPUs.
+ */
+
+ spin_lock(&kvm->mmu_lock);
+ /* Flush slot from GPA */
+ kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
+ slot->base_gfn + slot->npages - 1);
+ /* Let implementation do the rest */
+ kvm_mips_callbacks->flush_shadow_memslot(kvm, slot);
+ spin_unlock(&kvm->mmu_lock);
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change)
+{
+ return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ const struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
+{
+ int needs_flush;
+
+ kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
+ __func__, kvm, mem->slot, mem->guest_phys_addr,
+ mem->memory_size, mem->userspace_addr);
+
+ /*
+ * If dirty page logging is enabled, write protect all pages in the slot
+ * ready for dirty logging.
+ *
+ * There is no need to do this in any of the following cases:
+ * CREATE: No dirty mappings will already exist.
+ * MOVE/DELETE: The old mappings will already have been cleaned up by
+ * kvm_arch_flush_shadow_memslot()
+ */
+ if (change == KVM_MR_FLAGS_ONLY &&
+ (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
+ new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
+ spin_lock(&kvm->mmu_lock);
+ /* Write protect GPA page table entries */
+ needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
+ new->base_gfn + new->npages - 1);
+ /* Let implementation do the rest */
+ if (needs_flush)
+ kvm_mips_callbacks->flush_shadow_memslot(kvm, new);
+ spin_unlock(&kvm->mmu_lock);
+ }
+}
+
+static inline void dump_handler(const char *symbol, void *start, void *end)
+{
+ u32 *p;
+
+ pr_debug("LEAF(%s)\n", symbol);
+
+ pr_debug("\t.set push\n");
+ pr_debug("\t.set noreorder\n");
+
+ for (p = start; p < (u32 *)end; ++p)
+ pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
+
+ pr_debug("\t.set\tpop\n");
+
+ pr_debug("\tEND(%s)\n", symbol);
+}
+
+/* low level hrtimer wake routine */
+static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
+{
+ struct kvm_vcpu *vcpu;
+
+ vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
+
+ kvm_mips_callbacks->queue_timer_int(vcpu);
+
+ vcpu->arch.wait = 0;
+ rcuwait_wake_up(&vcpu->wait);
+
+ return kvm_mips_count_timeout(vcpu);
+}
+
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
+{
+ return 0;
+}
+
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
+{
+ int err, size;
+ void *gebase, *p, *handler, *refill_start, *refill_end;
+ int i;
+
+ kvm_debug("kvm @ %p: create cpu %d at %p\n",
+ vcpu->kvm, vcpu->vcpu_id, vcpu);
+
+ err = kvm_mips_callbacks->vcpu_init(vcpu);
+ if (err)
+ return err;
+
+ hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
+
+ /*
+ * Allocate space for host mode exception handlers that handle
+ * guest mode exits
+ */
+ if (cpu_has_veic || cpu_has_vint)
+ size = 0x200 + VECTORSPACING * 64;
+ else
+ size = 0x4000;
+
+ gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
+
+ if (!gebase) {
+ err = -ENOMEM;
+ goto out_uninit_vcpu;
+ }
+ kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
+ ALIGN(size, PAGE_SIZE), gebase);
+
+ /*
+ * Check new ebase actually fits in CP0_EBase. The lack of a write gate
+ * limits us to the low 512MB of physical address space. If the memory
+ * we allocate is out of range, just give up now.
+ */
+ if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
+ kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
+ gebase);
+ err = -ENOMEM;
+ goto out_free_gebase;
+ }
+
+ /* Save new ebase */
+ vcpu->arch.guest_ebase = gebase;
+
+ /* Build guest exception vectors dynamically in unmapped memory */
+ handler = gebase + 0x2000;
+
+ /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
+ refill_start = gebase;
+ if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT))
+ refill_start += 0x080;
+ refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
+
+ /* General Exception Entry point */
+ kvm_mips_build_exception(gebase + 0x180, handler);
+
+ /* For vectored interrupts poke the exception code @ all offsets 0-7 */
+ for (i = 0; i < 8; i++) {
+ kvm_debug("L1 Vectored handler @ %p\n",
+ gebase + 0x200 + (i * VECTORSPACING));
+ kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
+ handler);
+ }
+
+ /* General exit handler */
+ p = handler;
+ p = kvm_mips_build_exit(p);
+
+ /* Guest entry routine */
+ vcpu->arch.vcpu_run = p;
+ p = kvm_mips_build_vcpu_run(p);
+
+ /* Dump the generated code */
+ pr_debug("#include <asm/asm.h>\n");
+ pr_debug("#include <asm/regdef.h>\n");
+ pr_debug("\n");
+ dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
+ dump_handler("kvm_tlb_refill", refill_start, refill_end);
+ dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
+ dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
+
+ /* Invalidate the icache for these ranges */
+ flush_icache_range((unsigned long)gebase,
+ (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
+
+ /*
+ * Allocate comm page for guest kernel, a TLB will be reserved for
+ * mapping GVA @ 0xFFFF8000 to this page
+ */
+ vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
+
+ if (!vcpu->arch.kseg0_commpage) {
+ err = -ENOMEM;
+ goto out_free_gebase;
+ }
+
+ kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
+ kvm_mips_commpage_init(vcpu);
+
+ /* Init */
+ vcpu->arch.last_sched_cpu = -1;
+ vcpu->arch.last_exec_cpu = -1;
+
+ /* Initial guest state */
+ err = kvm_mips_callbacks->vcpu_setup(vcpu);
+ if (err)
+ goto out_free_commpage;
+
+ return 0;
+
+out_free_commpage:
+ kfree(vcpu->arch.kseg0_commpage);
+out_free_gebase:
+ kfree(gebase);
+out_uninit_vcpu:
+ kvm_mips_callbacks->vcpu_uninit(vcpu);
+ return err;
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+ kvm_mips_dump_stats(vcpu);
+
+ kvm_mmu_free_memory_caches(vcpu);
+ kfree(vcpu->arch.guest_ebase);
+ kfree(vcpu->arch.kseg0_commpage);
+
+ kvm_mips_callbacks->vcpu_uninit(vcpu);
+}
+
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg)
+{
+ return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+{
+ int r = -EINTR;
+
+ vcpu_load(vcpu);
+
+ kvm_sigset_activate(vcpu);
+
+ if (vcpu->mmio_needed) {
+ if (!vcpu->mmio_is_write)
+ kvm_mips_complete_mmio_load(vcpu);
+ vcpu->mmio_needed = 0;
+ }
+
+ if (vcpu->run->immediate_exit)
+ goto out;
+
+ lose_fpu(1);
+
+ local_irq_disable();
+ guest_enter_irqoff();
+ trace_kvm_enter(vcpu);
+
+ /*
+ * Make sure the read of VCPU requests in vcpu_run() callback is not
+ * reordered ahead of the write to vcpu->mode, or we could miss a TLB
+ * flush request while the requester sees the VCPU as outside of guest
+ * mode and not needing an IPI.
+ */
+ smp_store_mb(vcpu->mode, IN_GUEST_MODE);
+
+ r = kvm_mips_callbacks->vcpu_run(vcpu);
+
+ trace_kvm_out(vcpu);
+ guest_exit_irqoff();
+ local_irq_enable();
+
+out:
+ kvm_sigset_deactivate(vcpu);
+
+ vcpu_put(vcpu);
+ return r;
+}
+
+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq)
+{
+ int intr = (int)irq->irq;
+ struct kvm_vcpu *dvcpu = NULL;
+
+ if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] ||
+ intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] ||
+ intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) ||
+ intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2]))
+ kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
+ (int)intr);
+
+ if (irq->cpu == -1)
+ dvcpu = vcpu;
+ else
+ dvcpu = vcpu->kvm->vcpus[irq->cpu];
+
+ if (intr == 2 || intr == 3 || intr == 4 || intr == 6) {
+ kvm_mips_callbacks->queue_io_int(dvcpu, irq);
+
+ } else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) {
+ kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
+ } else {
+ kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
+ irq->cpu, irq->irq);
+ return -EINVAL;
+ }
+
+ dvcpu->arch.wait = 0;
+
+ rcuwait_wake_up(&dvcpu->wait);
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -ENOIOCTLCMD;
+}
+
+static u64 kvm_mips_get_one_regs[] = {
+ KVM_REG_MIPS_R0,
+ KVM_REG_MIPS_R1,
+ KVM_REG_MIPS_R2,
+ KVM_REG_MIPS_R3,
+ KVM_REG_MIPS_R4,
+ KVM_REG_MIPS_R5,
+ KVM_REG_MIPS_R6,
+ KVM_REG_MIPS_R7,
+ KVM_REG_MIPS_R8,
+ KVM_REG_MIPS_R9,
+ KVM_REG_MIPS_R10,
+ KVM_REG_MIPS_R11,
+ KVM_REG_MIPS_R12,
+ KVM_REG_MIPS_R13,
+ KVM_REG_MIPS_R14,
+ KVM_REG_MIPS_R15,
+ KVM_REG_MIPS_R16,
+ KVM_REG_MIPS_R17,
+ KVM_REG_MIPS_R18,
+ KVM_REG_MIPS_R19,
+ KVM_REG_MIPS_R20,
+ KVM_REG_MIPS_R21,
+ KVM_REG_MIPS_R22,
+ KVM_REG_MIPS_R23,
+ KVM_REG_MIPS_R24,
+ KVM_REG_MIPS_R25,
+ KVM_REG_MIPS_R26,
+ KVM_REG_MIPS_R27,
+ KVM_REG_MIPS_R28,
+ KVM_REG_MIPS_R29,
+ KVM_REG_MIPS_R30,
+ KVM_REG_MIPS_R31,
+
+#ifndef CONFIG_CPU_MIPSR6
+ KVM_REG_MIPS_HI,
+ KVM_REG_MIPS_LO,
+#endif
+ KVM_REG_MIPS_PC,
+};
+
+static u64 kvm_mips_get_one_regs_fpu[] = {
+ KVM_REG_MIPS_FCR_IR,
+ KVM_REG_MIPS_FCR_CSR,
+};
+
+static u64 kvm_mips_get_one_regs_msa[] = {
+ KVM_REG_MIPS_MSA_IR,
+ KVM_REG_MIPS_MSA_CSR,
+};
+
+static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
+{
+ unsigned long ret;
+
+ ret = ARRAY_SIZE(kvm_mips_get_one_regs);
+ if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
+ ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
+ /* odd doubles */
+ if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
+ ret += 16;
+ }
+ if (kvm_mips_guest_can_have_msa(&vcpu->arch))
+ ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
+ ret += kvm_mips_callbacks->num_regs(vcpu);
+
+ return ret;
+}
+
+static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
+{
+ u64 index;
+ unsigned int i;
+
+ if (copy_to_user(indices, kvm_mips_get_one_regs,
+ sizeof(kvm_mips_get_one_regs)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_mips_get_one_regs);
+
+ if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
+ if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
+ sizeof(kvm_mips_get_one_regs_fpu)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
+
+ for (i = 0; i < 32; ++i) {
+ index = KVM_REG_MIPS_FPR_32(i);
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+
+ /* skip odd doubles if no F64 */
+ if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
+ continue;
+
+ index = KVM_REG_MIPS_FPR_64(i);
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+ }
+
+ if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
+ if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
+ sizeof(kvm_mips_get_one_regs_msa)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
+
+ for (i = 0; i < 32; ++i) {
+ index = KVM_REG_MIPS_VEC_128(i);
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+ }
+
+ return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
+}
+
+static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
+ int ret;
+ s64 v;
+ s64 vs[2];
+ unsigned int idx;
+
+ switch (reg->id) {
+ /* General purpose registers */
+ case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
+ v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
+ break;
+#ifndef CONFIG_CPU_MIPSR6
+ case KVM_REG_MIPS_HI:
+ v = (long)vcpu->arch.hi;
+ break;
+ case KVM_REG_MIPS_LO:
+ v = (long)vcpu->arch.lo;
+ break;
+#endif
+ case KVM_REG_MIPS_PC:
+ v = (long)vcpu->arch.pc;
+ break;
+
+ /* Floating point registers */
+ case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_FPR_32(0);
+ /* Odd singles in top of even double when FR=0 */
+ if (kvm_read_c0_guest_status(cop0) & ST0_FR)
+ v = get_fpr32(&fpu->fpr[idx], 0);
+ else
+ v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
+ break;
+ case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_FPR_64(0);
+ /* Can't access odd doubles in FR=0 mode */
+ if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
+ return -EINVAL;
+ v = get_fpr64(&fpu->fpr[idx], 0);
+ break;
+ case KVM_REG_MIPS_FCR_IR:
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ v = boot_cpu_data.fpu_id;
+ break;
+ case KVM_REG_MIPS_FCR_CSR:
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ v = fpu->fcr31;
+ break;
+
+ /* MIPS SIMD Architecture (MSA) registers */
+ case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ /* Can't access MSA registers in FR=0 mode */
+ if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_VEC_128(0);
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ /* least significant byte first */
+ vs[0] = get_fpr64(&fpu->fpr[idx], 0);
+ vs[1] = get_fpr64(&fpu->fpr[idx], 1);
+#else
+ /* most significant byte first */
+ vs[0] = get_fpr64(&fpu->fpr[idx], 1);
+ vs[1] = get_fpr64(&fpu->fpr[idx], 0);
+#endif
+ break;
+ case KVM_REG_MIPS_MSA_IR:
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ v = boot_cpu_data.msa_id;
+ break;
+ case KVM_REG_MIPS_MSA_CSR:
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ v = fpu->msacsr;
+ break;
+
+ /* registers to be handled specially */
+ default:
+ ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
+ if (ret)
+ return ret;
+ break;
+ }
+ if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
+ u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
+
+ return put_user(v, uaddr64);
+ } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
+ u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
+ u32 v32 = (u32)v;
+
+ return put_user(v32, uaddr32);
+ } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
+ void __user *uaddr = (void __user *)(long)reg->addr;
+
+ return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
+ s64 v;
+ s64 vs[2];
+ unsigned int idx;
+
+ if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
+ u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
+
+ if (get_user(v, uaddr64) != 0)
+ return -EFAULT;
+ } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
+ u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
+ s32 v32;
+
+ if (get_user(v32, uaddr32) != 0)
+ return -EFAULT;
+ v = (s64)v32;
+ } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
+ void __user *uaddr = (void __user *)(long)reg->addr;
+
+ return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
+ } else {
+ return -EINVAL;
+ }
+
+ switch (reg->id) {
+ /* General purpose registers */
+ case KVM_REG_MIPS_R0:
+ /* Silently ignore requests to set $0 */
+ break;
+ case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
+ vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
+ break;
+#ifndef CONFIG_CPU_MIPSR6
+ case KVM_REG_MIPS_HI:
+ vcpu->arch.hi = v;
+ break;
+ case KVM_REG_MIPS_LO:
+ vcpu->arch.lo = v;
+ break;
+#endif
+ case KVM_REG_MIPS_PC:
+ vcpu->arch.pc = v;
+ break;
+
+ /* Floating point registers */
+ case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_FPR_32(0);
+ /* Odd singles in top of even double when FR=0 */
+ if (kvm_read_c0_guest_status(cop0) & ST0_FR)
+ set_fpr32(&fpu->fpr[idx], 0, v);
+ else
+ set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
+ break;
+ case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_FPR_64(0);
+ /* Can't access odd doubles in FR=0 mode */
+ if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
+ return -EINVAL;
+ set_fpr64(&fpu->fpr[idx], 0, v);
+ break;
+ case KVM_REG_MIPS_FCR_IR:
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ /* Read-only */
+ break;
+ case KVM_REG_MIPS_FCR_CSR:
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ fpu->fcr31 = v;
+ break;
+
+ /* MIPS SIMD Architecture (MSA) registers */
+ case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_VEC_128(0);
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ /* least significant byte first */
+ set_fpr64(&fpu->fpr[idx], 0, vs[0]);
+ set_fpr64(&fpu->fpr[idx], 1, vs[1]);
+#else
+ /* most significant byte first */
+ set_fpr64(&fpu->fpr[idx], 1, vs[0]);
+ set_fpr64(&fpu->fpr[idx], 0, vs[1]);
+#endif
+ break;
+ case KVM_REG_MIPS_MSA_IR:
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ /* Read-only */
+ break;
+ case KVM_REG_MIPS_MSA_CSR:
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ fpu->msacsr = v;
+ break;
+
+ /* registers to be handled specially */
+ default:
+ return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
+ }
+ return 0;
+}
+
+static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
+ struct kvm_enable_cap *cap)
+{
+ int r = 0;
+
+ if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
+ return -EINVAL;
+ if (cap->flags)
+ return -EINVAL;
+ if (cap->args[0])
+ return -EINVAL;
+
+ switch (cap->cap) {
+ case KVM_CAP_MIPS_FPU:
+ vcpu->arch.fpu_enabled = true;
+ break;
+ case KVM_CAP_MIPS_MSA:
+ vcpu->arch.msa_enabled = true;
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+
+ return r;
+}
+
+long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+
+ if (ioctl == KVM_INTERRUPT) {
+ struct kvm_mips_interrupt irq;
+
+ if (copy_from_user(&irq, argp, sizeof(irq)))
+ return -EFAULT;
+ kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
+ irq.irq);
+
+ return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ long r;
+
+ vcpu_load(vcpu);
+
+ switch (ioctl) {
+ case KVM_SET_ONE_REG:
+ case KVM_GET_ONE_REG: {
+ struct kvm_one_reg reg;
+
+ r = -EFAULT;
+ if (copy_from_user(&reg, argp, sizeof(reg)))
+ break;
+ if (ioctl == KVM_SET_ONE_REG)
+ r = kvm_mips_set_reg(vcpu, &reg);
+ else
+ r = kvm_mips_get_reg(vcpu, &reg);
+ break;
+ }
+ case KVM_GET_REG_LIST: {
+ struct kvm_reg_list __user *user_list = argp;
+ struct kvm_reg_list reg_list;
+ unsigned n;
+
+ r = -EFAULT;
+ if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
+ break;
+ n = reg_list.n;
+ reg_list.n = kvm_mips_num_regs(vcpu);
+ if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
+ break;
+ r = -E2BIG;
+ if (n < reg_list.n)
+ break;
+ r = kvm_mips_copy_reg_indices(vcpu, user_list->reg);
+ break;
+ }
+ case KVM_ENABLE_CAP: {
+ struct kvm_enable_cap cap;
+
+ r = -EFAULT;
+ if (copy_from_user(&cap, argp, sizeof(cap)))
+ break;
+ r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
+ break;
+ }
+ default:
+ r = -ENOIOCTLCMD;
+ }
+
+ vcpu_put(vcpu);
+ return r;
+}
+
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
+{
+
+}
+
+void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *memslot)
+{
+ /* Let implementation handle TLB/GVA invalidation */
+ kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
+}
+
+long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ long r;
+
+ switch (ioctl) {
+ default:
+ r = -ENOIOCTLCMD;
+ }
+
+ return r;
+}
+
+int kvm_arch_init(void *opaque)
+{
+ if (kvm_mips_callbacks) {
+ kvm_err("kvm: module already exists\n");
+ return -EEXIST;
+ }
+
+ return kvm_mips_emulation_init(&kvm_mips_callbacks);
+}
+
+void kvm_arch_exit(void)
+{
+ kvm_mips_callbacks = NULL;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -ENOIOCTLCMD;
+}
+
+void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -ENOIOCTLCMD;
+}
+
+vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+{
+ return VM_FAULT_SIGBUS;
+}
+
+int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+{
+ int r;
+
+ switch (ext) {
+ case KVM_CAP_ONE_REG:
+ case KVM_CAP_ENABLE_CAP:
+ case KVM_CAP_READONLY_MEM:
+ case KVM_CAP_SYNC_MMU:
+ case KVM_CAP_IMMEDIATE_EXIT:
+ r = 1;
+ break;
+ case KVM_CAP_NR_VCPUS:
+ r = num_online_cpus();
+ break;
+ case KVM_CAP_MAX_VCPUS:
+ r = KVM_MAX_VCPUS;
+ break;
+ case KVM_CAP_MAX_VCPU_ID:
+ r = KVM_MAX_VCPU_ID;
+ break;
+ case KVM_CAP_MIPS_FPU:
+ /* We don't handle systems with inconsistent cpu_has_fpu */
+ r = !!raw_cpu_has_fpu;
+ break;
+ case KVM_CAP_MIPS_MSA:
+ /*
+ * We don't support MSA vector partitioning yet:
+ * 1) It would require explicit support which can't be tested
+ * yet due to lack of support in current hardware.
+ * 2) It extends the state that would need to be saved/restored
+ * by e.g. QEMU for migration.
+ *
+ * When vector partitioning hardware becomes available, support
+ * could be added by requiring a flag when enabling
+ * KVM_CAP_MIPS_MSA capability to indicate that userland knows
+ * to save/restore the appropriate extra state.
+ */
+ r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
+ break;
+ default:
+ r = kvm_mips_callbacks->check_extension(kvm, ext);
+ break;
+ }
+ return r;
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ return kvm_mips_pending_timer(vcpu) ||
+ kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
+}
+
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct mips_coproc *cop0;
+
+ if (!vcpu)
+ return -1;
+
+ kvm_debug("VCPU Register Dump:\n");
+ kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
+ kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
+
+ for (i = 0; i < 32; i += 4) {
+ kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
+ vcpu->arch.gprs[i],
+ vcpu->arch.gprs[i + 1],
+ vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
+ }
+ kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
+ kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
+
+ cop0 = vcpu->arch.cop0;
+ kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
+ kvm_read_c0_guest_status(cop0),
+ kvm_read_c0_guest_cause(cop0));
+
+ kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+
+ vcpu_load(vcpu);
+
+ for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
+ vcpu->arch.gprs[i] = regs->gpr[i];
+ vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
+ vcpu->arch.hi = regs->hi;
+ vcpu->arch.lo = regs->lo;
+ vcpu->arch.pc = regs->pc;
+
+ vcpu_put(vcpu);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+
+ vcpu_load(vcpu);
+
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
+ regs->gpr[i] = vcpu->arch.gprs[i];
+
+ regs->hi = vcpu->arch.hi;
+ regs->lo = vcpu->arch.lo;
+ regs->pc = vcpu->arch.pc;
+
+ vcpu_put(vcpu);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr)
+{
+ return 0;
+}
+
+static void kvm_mips_set_c0_status(void)
+{
+ u32 status = read_c0_status();
+
+ if (cpu_has_dsp)
+ status |= (ST0_MX);
+
+ write_c0_status(status);
+ ehb();
+}
+
+/*
+ * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
+ */
+int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ enum emulation_result er = EMULATE_DONE;
+ u32 inst;
+ int ret = RESUME_GUEST;
+
+ vcpu->mode = OUTSIDE_GUEST_MODE;
+
+ /* re-enable HTW before enabling interrupts */
+ if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
+ htw_start();
+
+ /* Set a default exit reason */
+ run->exit_reason = KVM_EXIT_UNKNOWN;
+ run->ready_for_interrupt_injection = 1;
+
+ /*
+ * Set the appropriate status bits based on host CPU features,
+ * before we hit the scheduler
+ */
+ kvm_mips_set_c0_status();
+
+ local_irq_enable();
+
+ kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
+ cause, opc, run, vcpu);
+ trace_kvm_exit(vcpu, exccode);
+
+ if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
+ /*
+ * Do a privilege check, if in UM most of these exit conditions
+ * end up causing an exception to be delivered to the Guest
+ * Kernel
+ */
+ er = kvm_mips_check_privilege(cause, opc, vcpu);
+ if (er == EMULATE_PRIV_FAIL) {
+ goto skip_emul;
+ } else if (er == EMULATE_FAIL) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ goto skip_emul;
+ }
+ }
+
+ switch (exccode) {
+ case EXCCODE_INT:
+ kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
+
+ ++vcpu->stat.int_exits;
+
+ if (need_resched())
+ cond_resched();
+
+ ret = RESUME_GUEST;
+ break;
+
+ case EXCCODE_CPU:
+ kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
+
+ ++vcpu->stat.cop_unusable_exits;
+ ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
+ /* XXXKYMA: Might need to return to user space */
+ if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
+ ret = RESUME_HOST;
+ break;
+
+ case EXCCODE_MOD:
+ ++vcpu->stat.tlbmod_exits;
+ ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
+ break;
+
+ case EXCCODE_TLBS:
+ kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
+ cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
+ badvaddr);
+
+ ++vcpu->stat.tlbmiss_st_exits;
+ ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
+ break;
+
+ case EXCCODE_TLBL:
+ kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+
+ ++vcpu->stat.tlbmiss_ld_exits;
+ ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
+ break;
+
+ case EXCCODE_ADES:
+ ++vcpu->stat.addrerr_st_exits;
+ ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
+ break;
+
+ case EXCCODE_ADEL:
+ ++vcpu->stat.addrerr_ld_exits;
+ ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
+ break;
+
+ case EXCCODE_SYS:
+ ++vcpu->stat.syscall_exits;
+ ret = kvm_mips_callbacks->handle_syscall(vcpu);
+ break;
+
+ case EXCCODE_RI:
+ ++vcpu->stat.resvd_inst_exits;
+ ret = kvm_mips_callbacks->handle_res_inst(vcpu);
+ break;
+
+ case EXCCODE_BP:
+ ++vcpu->stat.break_inst_exits;
+ ret = kvm_mips_callbacks->handle_break(vcpu);
+ break;
+
+ case EXCCODE_TR:
+ ++vcpu->stat.trap_inst_exits;
+ ret = kvm_mips_callbacks->handle_trap(vcpu);
+ break;
+
+ case EXCCODE_MSAFPE:
+ ++vcpu->stat.msa_fpe_exits;
+ ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
+ break;
+
+ case EXCCODE_FPE:
+ ++vcpu->stat.fpe_exits;
+ ret = kvm_mips_callbacks->handle_fpe(vcpu);
+ break;
+
+ case EXCCODE_MSADIS:
+ ++vcpu->stat.msa_disabled_exits;
+ ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
+ break;
+
+ case EXCCODE_GE:
+ /* defer exit accounting to handler */
+ ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
+ break;
+
+ default:
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ inst = 0;
+ kvm_get_badinstr(opc, vcpu, &inst);
+ kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
+ exccode, opc, inst, badvaddr,
+ kvm_read_c0_guest_status(vcpu->arch.cop0));
+ kvm_arch_vcpu_dump_regs(vcpu);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ break;
+
+ }
+
+skip_emul:
+ local_irq_disable();
+
+ if (ret == RESUME_GUEST)
+ kvm_vz_acquire_htimer(vcpu);
+
+ if (er == EMULATE_DONE && !(ret & RESUME_HOST))
+ kvm_mips_deliver_interrupts(vcpu, cause);
+
+ if (!(ret & RESUME_HOST)) {
+ /* Only check for signals if not already exiting to userspace */
+ if (signal_pending(current)) {
+ run->exit_reason = KVM_EXIT_INTR;
+ ret = (-EINTR << 2) | RESUME_HOST;
+ ++vcpu->stat.signal_exits;
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
+ }
+ }
+
+ if (ret == RESUME_GUEST) {
+ trace_kvm_reenter(vcpu);
+
+ /*
+ * Make sure the read of VCPU requests in vcpu_reenter()
+ * callback is not reordered ahead of the write to vcpu->mode,
+ * or we could miss a TLB flush request while the requester sees
+ * the VCPU as outside of guest mode and not needing an IPI.
+ */
+ smp_store_mb(vcpu->mode, IN_GUEST_MODE);
+
+ kvm_mips_callbacks->vcpu_reenter(vcpu);
+
+ /*
+ * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
+ * is live), restore FCR31 / MSACSR.
+ *
+ * This should be before returning to the guest exception
+ * vector, as it may well cause an [MSA] FP exception if there
+ * are pending exception bits unmasked. (see
+ * kvm_mips_csr_die_notifier() for how that is handled).
+ */
+ if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
+ read_c0_status() & ST0_CU1)
+ __kvm_restore_fcsr(&vcpu->arch);
+
+ if (kvm_mips_guest_has_msa(&vcpu->arch) &&
+ read_c0_config5() & MIPS_CONF5_MSAEN)
+ __kvm_restore_msacsr(&vcpu->arch);
+ }
+
+ /* Disable HTW before returning to guest or host */
+ if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
+ htw_stop();
+
+ return ret;
+}
+
+/* Enable FPU for guest and restore context */
+void kvm_own_fpu(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned int sr, cfg5;
+
+ preempt_disable();
+
+ sr = kvm_read_c0_guest_status(cop0);
+
+ /*
+ * If MSA state is already live, it is undefined how it interacts with
+ * FR=0 FPU state, and we don't want to hit reserved instruction
+ * exceptions trying to save the MSA state later when CU=1 && FR=1, so
+ * play it safe and save it first.
+ *
+ * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
+ * get called when guest CU1 is set, however we can't trust the guest
+ * not to clobber the status register directly via the commpage.
+ */
+ if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
+ kvm_lose_fpu(vcpu);
+
+ /*
+ * Enable FPU for guest
+ * We set FR and FRE according to guest context
+ */
+ change_c0_status(ST0_CU1 | ST0_FR, sr);
+ if (cpu_has_fre) {
+ cfg5 = kvm_read_c0_guest_config5(cop0);
+ change_c0_config5(MIPS_CONF5_FRE, cfg5);
+ }
+ enable_fpu_hazard();
+
+ /* If guest FPU state not active, restore it now */
+ if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
+ __kvm_restore_fpu(&vcpu->arch);
+ vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
+ } else {
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
+ }
+
+ preempt_enable();
+}
+
+#ifdef CONFIG_CPU_HAS_MSA
+/* Enable MSA for guest and restore context */
+void kvm_own_msa(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned int sr, cfg5;
+
+ preempt_disable();
+
+ /*
+ * Enable FPU if enabled in guest, since we're restoring FPU context
+ * anyway. We set FR and FRE according to guest context.
+ */
+ if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
+ sr = kvm_read_c0_guest_status(cop0);
+
+ /*
+ * If FR=0 FPU state is already live, it is undefined how it
+ * interacts with MSA state, so play it safe and save it first.
+ */
+ if (!(sr & ST0_FR) &&
+ (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
+ KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
+ kvm_lose_fpu(vcpu);
+
+ change_c0_status(ST0_CU1 | ST0_FR, sr);
+ if (sr & ST0_CU1 && cpu_has_fre) {
+ cfg5 = kvm_read_c0_guest_config5(cop0);
+ change_c0_config5(MIPS_CONF5_FRE, cfg5);
+ }
+ }
+
+ /* Enable MSA for guest */
+ set_c0_config5(MIPS_CONF5_MSAEN);
+ enable_fpu_hazard();
+
+ switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
+ case KVM_MIPS_AUX_FPU:
+ /*
+ * Guest FPU state already loaded, only restore upper MSA state
+ */
+ __kvm_restore_msa_upper(&vcpu->arch);
+ vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
+ break;
+ case 0:
+ /* Neither FPU or MSA already active, restore full MSA state */
+ __kvm_restore_msa(&vcpu->arch);
+ vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
+ if (kvm_mips_guest_has_fpu(&vcpu->arch))
+ vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
+ KVM_TRACE_AUX_FPU_MSA);
+ break;
+ default:
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
+ break;
+ }
+
+ preempt_enable();
+}
+#endif
+
+/* Drop FPU & MSA without saving it */
+void kvm_drop_fpu(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
+ disable_msa();
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
+ vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
+ }
+ if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
+ clear_c0_status(ST0_CU1 | ST0_FR);
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
+ vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
+ }
+ preempt_enable();
+}
+
+/* Save and disable FPU & MSA */
+void kvm_lose_fpu(struct kvm_vcpu *vcpu)
+{
+ /*
+ * With T&E, FPU & MSA get disabled in root context (hardware) when it
+ * is disabled in guest context (software), but the register state in
+ * the hardware may still be in use.
+ * This is why we explicitly re-enable the hardware before saving.
+ */
+
+ preempt_disable();
+ if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
+ if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
+ set_c0_config5(MIPS_CONF5_MSAEN);
+ enable_fpu_hazard();
+ }
+
+ __kvm_save_msa(&vcpu->arch);
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
+
+ /* Disable MSA & FPU */
+ disable_msa();
+ if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
+ clear_c0_status(ST0_CU1 | ST0_FR);
+ disable_fpu_hazard();
+ }
+ vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
+ } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
+ if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
+ set_c0_status(ST0_CU1);
+ enable_fpu_hazard();
+ }
+
+ __kvm_save_fpu(&vcpu->arch);
+ vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
+
+ /* Disable FPU */
+ clear_c0_status(ST0_CU1 | ST0_FR);
+ disable_fpu_hazard();
+ }
+ preempt_enable();
+}
+
+/*
+ * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
+ * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
+ * exception if cause bits are set in the value being written.
+ */
+static int kvm_mips_csr_die_notify(struct notifier_block *self,
+ unsigned long cmd, void *ptr)
+{
+ struct die_args *args = (struct die_args *)ptr;
+ struct pt_regs *regs = args->regs;
+ unsigned long pc;
+
+ /* Only interested in FPE and MSAFPE */
+ if (cmd != DIE_FP && cmd != DIE_MSAFP)
+ return NOTIFY_DONE;
+
+ /* Return immediately if guest context isn't active */
+ if (!(current->flags & PF_VCPU))
+ return NOTIFY_DONE;
+
+ /* Should never get here from user mode */
+ BUG_ON(user_mode(regs));
+
+ pc = instruction_pointer(regs);
+ switch (cmd) {
+ case DIE_FP:
+ /* match 2nd instruction in __kvm_restore_fcsr */
+ if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
+ return NOTIFY_DONE;
+ break;
+ case DIE_MSAFP:
+ /* match 2nd/3rd instruction in __kvm_restore_msacsr */
+ if (!cpu_has_msa ||
+ pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
+ pc > (unsigned long)&__kvm_restore_msacsr + 8)
+ return NOTIFY_DONE;
+ break;
+ }
+
+ /* Move PC forward a little and continue executing */
+ instruction_pointer(regs) += 4;
+
+ return NOTIFY_STOP;
+}
+
+static struct notifier_block kvm_mips_csr_die_notifier = {
+ .notifier_call = kvm_mips_csr_die_notify,
+};
+
+static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = {
+ [MIPS_EXC_INT_TIMER] = C_IRQ5,
+ [MIPS_EXC_INT_IO_1] = C_IRQ0,
+ [MIPS_EXC_INT_IPI_1] = C_IRQ1,
+ [MIPS_EXC_INT_IPI_2] = C_IRQ2,
+};
+
+static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = {
+ [MIPS_EXC_INT_TIMER] = C_IRQ5,
+ [MIPS_EXC_INT_IO_1] = C_IRQ0,
+ [MIPS_EXC_INT_IO_2] = C_IRQ1,
+ [MIPS_EXC_INT_IPI_1] = C_IRQ4,
+};
+
+u32 *kvm_priority_to_irq = kvm_default_priority_to_irq;
+
+u32 kvm_irq_to_priority(u32 irq)
+{
+ int i;
+
+ for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) {
+ if (kvm_priority_to_irq[i] == (1 << (irq + 8)))
+ return i;
+ }
+
+ return MIPS_EXC_MAX;
+}
+
+static int __init kvm_mips_init(void)
+{
+ int ret;
+
+ if (cpu_has_mmid) {
+ pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = kvm_mips_entry_setup();
+ if (ret)
+ return ret;
+
+ ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+
+ if (ret)
+ return ret;
+
+ if (boot_cpu_type() == CPU_LOONGSON64)
+ kvm_priority_to_irq = kvm_loongson3_priority_to_irq;
+
+ register_die_notifier(&kvm_mips_csr_die_notifier);
+
+ return 0;
+}
+
+static void __exit kvm_mips_exit(void)
+{
+ kvm_exit();
+
+ unregister_die_notifier(&kvm_mips_csr_die_notifier);
+}
+
+module_init(kvm_mips_init);
+module_exit(kvm_mips_exit);
+
+EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
new file mode 100644
index 000000000..38a5be10a
--- /dev/null
+++ b/arch/mips/kvm/mmu.c
@@ -0,0 +1,1235 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS MMU handling in the KVM module.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/highmem.h>
+#include <linux/kvm_host.h>
+#include <linux/uaccess.h>
+#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
+
+/*
+ * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
+ * for which pages need to be cached.
+ */
+#if defined(__PAGETABLE_PMD_FOLDED)
+#define KVM_MMU_CACHE_MIN_PAGES 1
+#else
+#define KVM_MMU_CACHE_MIN_PAGES 2
+#endif
+
+void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
+{
+ kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+}
+
+/**
+ * kvm_pgd_init() - Initialise KVM GPA page directory.
+ * @page: Pointer to page directory (PGD) for KVM GPA.
+ *
+ * Initialise a KVM GPA page directory with pointers to the invalid table, i.e.
+ * representing no mappings. This is similar to pgd_init(), however it
+ * initialises all the page directory pointers, not just the ones corresponding
+ * to the userland address space (since it is for the guest physical address
+ * space rather than a virtual address space).
+ */
+static void kvm_pgd_init(void *page)
+{
+ unsigned long *p, *end;
+ unsigned long entry;
+
+#ifdef __PAGETABLE_PMD_FOLDED
+ entry = (unsigned long)invalid_pte_table;
+#else
+ entry = (unsigned long)invalid_pmd_table;
+#endif
+
+ p = (unsigned long *)page;
+ end = p + PTRS_PER_PGD;
+
+ do {
+ p[0] = entry;
+ p[1] = entry;
+ p[2] = entry;
+ p[3] = entry;
+ p[4] = entry;
+ p += 8;
+ p[-3] = entry;
+ p[-2] = entry;
+ p[-1] = entry;
+ } while (p != end);
+}
+
+/**
+ * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory.
+ *
+ * Allocate a blank KVM GPA page directory (PGD) for representing guest physical
+ * to host physical page mappings.
+ *
+ * Returns: Pointer to new KVM GPA page directory.
+ * NULL on allocation failure.
+ */
+pgd_t *kvm_pgd_alloc(void)
+{
+ pgd_t *ret;
+
+ ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER);
+ if (ret)
+ kvm_pgd_init(ret);
+
+ return ret;
+}
+
+/**
+ * kvm_mips_walk_pgd() - Walk page table with optional allocation.
+ * @pgd: Page directory pointer.
+ * @addr: Address to index page table using.
+ * @cache: MMU page cache to allocate new page tables from, or NULL.
+ *
+ * Walk the page tables pointed to by @pgd to find the PTE corresponding to the
+ * address @addr. If page tables don't exist for @addr, they will be created
+ * from the MMU cache if @cache is not NULL.
+ *
+ * Returns: Pointer to pte_t corresponding to @addr.
+ * NULL if a page table doesn't exist for @addr and !@cache.
+ * NULL if a page table allocation failed.
+ */
+static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
+ unsigned long addr)
+{
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ pgd += pgd_index(addr);
+ if (pgd_none(*pgd)) {
+ /* Not used on MIPS yet */
+ BUG();
+ return NULL;
+ }
+ p4d = p4d_offset(pgd, addr);
+ pud = pud_offset(p4d, addr);
+ if (pud_none(*pud)) {
+ pmd_t *new_pmd;
+
+ if (!cache)
+ return NULL;
+ new_pmd = kvm_mmu_memory_cache_alloc(cache);
+ pmd_init((unsigned long)new_pmd,
+ (unsigned long)invalid_pte_table);
+ pud_populate(NULL, pud, new_pmd);
+ }
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd)) {
+ pte_t *new_pte;
+
+ if (!cache)
+ return NULL;
+ new_pte = kvm_mmu_memory_cache_alloc(cache);
+ clear_page(new_pte);
+ pmd_populate_kernel(NULL, pmd, new_pte);
+ }
+ return pte_offset_kernel(pmd, addr);
+}
+
+/* Caller must hold kvm->mm_lock */
+static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
+ struct kvm_mmu_memory_cache *cache,
+ unsigned long addr)
+{
+ return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr);
+}
+
+/*
+ * kvm_mips_flush_gpa_{pte,pmd,pud,pgd,pt}.
+ * Flush a range of guest physical address space from the VM's GPA page tables.
+ */
+
+static bool kvm_mips_flush_gpa_pte(pte_t *pte, unsigned long start_gpa,
+ unsigned long end_gpa)
+{
+ int i_min = pte_index(start_gpa);
+ int i_max = pte_index(end_gpa);
+ bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
+ int i;
+
+ for (i = i_min; i <= i_max; ++i) {
+ if (!pte_present(pte[i]))
+ continue;
+
+ set_pte(pte + i, __pte(0));
+ }
+ return safe_to_remove;
+}
+
+static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa,
+ unsigned long end_gpa)
+{
+ pte_t *pte;
+ unsigned long end = ~0ul;
+ int i_min = pmd_index(start_gpa);
+ int i_max = pmd_index(end_gpa);
+ bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
+ int i;
+
+ for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
+ if (!pmd_present(pmd[i]))
+ continue;
+
+ pte = pte_offset_kernel(pmd + i, 0);
+ if (i == i_max)
+ end = end_gpa;
+
+ if (kvm_mips_flush_gpa_pte(pte, start_gpa, end)) {
+ pmd_clear(pmd + i);
+ pte_free_kernel(NULL, pte);
+ } else {
+ safe_to_remove = false;
+ }
+ }
+ return safe_to_remove;
+}
+
+static bool kvm_mips_flush_gpa_pud(pud_t *pud, unsigned long start_gpa,
+ unsigned long end_gpa)
+{
+ pmd_t *pmd;
+ unsigned long end = ~0ul;
+ int i_min = pud_index(start_gpa);
+ int i_max = pud_index(end_gpa);
+ bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
+ int i;
+
+ for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
+ if (!pud_present(pud[i]))
+ continue;
+
+ pmd = pmd_offset(pud + i, 0);
+ if (i == i_max)
+ end = end_gpa;
+
+ if (kvm_mips_flush_gpa_pmd(pmd, start_gpa, end)) {
+ pud_clear(pud + i);
+ pmd_free(NULL, pmd);
+ } else {
+ safe_to_remove = false;
+ }
+ }
+ return safe_to_remove;
+}
+
+static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa,
+ unsigned long end_gpa)
+{
+ p4d_t *p4d;
+ pud_t *pud;
+ unsigned long end = ~0ul;
+ int i_min = pgd_index(start_gpa);
+ int i_max = pgd_index(end_gpa);
+ bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
+ int i;
+
+ for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
+ if (!pgd_present(pgd[i]))
+ continue;
+
+ p4d = p4d_offset(pgd, 0);
+ pud = pud_offset(p4d + i, 0);
+ if (i == i_max)
+ end = end_gpa;
+
+ if (kvm_mips_flush_gpa_pud(pud, start_gpa, end)) {
+ pgd_clear(pgd + i);
+ pud_free(NULL, pud);
+ } else {
+ safe_to_remove = false;
+ }
+ }
+ return safe_to_remove;
+}
+
+/**
+ * kvm_mips_flush_gpa_pt() - Flush a range of guest physical addresses.
+ * @kvm: KVM pointer.
+ * @start_gfn: Guest frame number of first page in GPA range to flush.
+ * @end_gfn: Guest frame number of last page in GPA range to flush.
+ *
+ * Flushes a range of GPA mappings from the GPA page tables.
+ *
+ * The caller must hold the @kvm->mmu_lock spinlock.
+ *
+ * Returns: Whether its safe to remove the top level page directory because
+ * all lower levels have been removed.
+ */
+bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
+{
+ return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd,
+ start_gfn << PAGE_SHIFT,
+ end_gfn << PAGE_SHIFT);
+}
+
+#define BUILD_PTE_RANGE_OP(name, op) \
+static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \
+ unsigned long end) \
+{ \
+ int ret = 0; \
+ int i_min = pte_index(start); \
+ int i_max = pte_index(end); \
+ int i; \
+ pte_t old, new; \
+ \
+ for (i = i_min; i <= i_max; ++i) { \
+ if (!pte_present(pte[i])) \
+ continue; \
+ \
+ old = pte[i]; \
+ new = op(old); \
+ if (pte_val(new) == pte_val(old)) \
+ continue; \
+ set_pte(pte + i, new); \
+ ret = 1; \
+ } \
+ return ret; \
+} \
+ \
+/* returns true if anything was done */ \
+static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \
+ unsigned long end) \
+{ \
+ int ret = 0; \
+ pte_t *pte; \
+ unsigned long cur_end = ~0ul; \
+ int i_min = pmd_index(start); \
+ int i_max = pmd_index(end); \
+ int i; \
+ \
+ for (i = i_min; i <= i_max; ++i, start = 0) { \
+ if (!pmd_present(pmd[i])) \
+ continue; \
+ \
+ pte = pte_offset_kernel(pmd + i, 0); \
+ if (i == i_max) \
+ cur_end = end; \
+ \
+ ret |= kvm_mips_##name##_pte(pte, start, cur_end); \
+ } \
+ return ret; \
+} \
+ \
+static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start, \
+ unsigned long end) \
+{ \
+ int ret = 0; \
+ pmd_t *pmd; \
+ unsigned long cur_end = ~0ul; \
+ int i_min = pud_index(start); \
+ int i_max = pud_index(end); \
+ int i; \
+ \
+ for (i = i_min; i <= i_max; ++i, start = 0) { \
+ if (!pud_present(pud[i])) \
+ continue; \
+ \
+ pmd = pmd_offset(pud + i, 0); \
+ if (i == i_max) \
+ cur_end = end; \
+ \
+ ret |= kvm_mips_##name##_pmd(pmd, start, cur_end); \
+ } \
+ return ret; \
+} \
+ \
+static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start, \
+ unsigned long end) \
+{ \
+ int ret = 0; \
+ p4d_t *p4d; \
+ pud_t *pud; \
+ unsigned long cur_end = ~0ul; \
+ int i_min = pgd_index(start); \
+ int i_max = pgd_index(end); \
+ int i; \
+ \
+ for (i = i_min; i <= i_max; ++i, start = 0) { \
+ if (!pgd_present(pgd[i])) \
+ continue; \
+ \
+ p4d = p4d_offset(pgd, 0); \
+ pud = pud_offset(p4d + i, 0); \
+ if (i == i_max) \
+ cur_end = end; \
+ \
+ ret |= kvm_mips_##name##_pud(pud, start, cur_end); \
+ } \
+ return ret; \
+}
+
+/*
+ * kvm_mips_mkclean_gpa_pt.
+ * Mark a range of guest physical address space clean (writes fault) in the VM's
+ * GPA page table to allow dirty page tracking.
+ */
+
+BUILD_PTE_RANGE_OP(mkclean, pte_mkclean)
+
+/**
+ * kvm_mips_mkclean_gpa_pt() - Make a range of guest physical addresses clean.
+ * @kvm: KVM pointer.
+ * @start_gfn: Guest frame number of first page in GPA range to flush.
+ * @end_gfn: Guest frame number of last page in GPA range to flush.
+ *
+ * Make a range of GPA mappings clean so that guest writes will fault and
+ * trigger dirty page logging.
+ *
+ * The caller must hold the @kvm->mmu_lock spinlock.
+ *
+ * Returns: Whether any GPA mappings were modified, which would require
+ * derived mappings (GVA page tables & TLB enties) to be
+ * invalidated.
+ */
+int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
+{
+ return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd,
+ start_gfn << PAGE_SHIFT,
+ end_gfn << PAGE_SHIFT);
+}
+
+/**
+ * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
+ * @kvm: The KVM pointer
+ * @slot: The memory slot associated with mask
+ * @gfn_offset: The gfn offset in memory slot
+ * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
+ * slot to be write protected
+ *
+ * Walks bits set in mask write protects the associated pte's. Caller must
+ * acquire @kvm->mmu_lock.
+ */
+void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask)
+{
+ gfn_t base_gfn = slot->base_gfn + gfn_offset;
+ gfn_t start = base_gfn + __ffs(mask);
+ gfn_t end = base_gfn + __fls(mask);
+
+ kvm_mips_mkclean_gpa_pt(kvm, start, end);
+}
+
+/*
+ * kvm_mips_mkold_gpa_pt.
+ * Mark a range of guest physical address space old (all accesses fault) in the
+ * VM's GPA page table to allow detection of commonly used pages.
+ */
+
+BUILD_PTE_RANGE_OP(mkold, pte_mkold)
+
+static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
+ gfn_t end_gfn)
+{
+ return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd,
+ start_gfn << PAGE_SHIFT,
+ end_gfn << PAGE_SHIFT);
+}
+
+static int handle_hva_to_gpa(struct kvm *kvm,
+ unsigned long start,
+ unsigned long end,
+ int (*handler)(struct kvm *kvm, gfn_t gfn,
+ gpa_t gfn_end,
+ struct kvm_memory_slot *memslot,
+ void *data),
+ void *data)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+ int ret = 0;
+
+ slots = kvm_memslots(kvm);
+
+ /* we only care about the pages that the guest sees */
+ kvm_for_each_memslot(memslot, slots) {
+ unsigned long hva_start, hva_end;
+ gfn_t gfn, gfn_end;
+
+ hva_start = max(start, memslot->userspace_addr);
+ hva_end = min(end, memslot->userspace_addr +
+ (memslot->npages << PAGE_SHIFT));
+ if (hva_start >= hva_end)
+ continue;
+
+ /*
+ * {gfn(page) | page intersects with [hva_start, hva_end)} =
+ * {gfn_start, gfn_start+1, ..., gfn_end-1}.
+ */
+ gfn = hva_to_gfn_memslot(hva_start, memslot);
+ gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
+
+ ret |= handler(kvm, gfn, gfn_end, memslot, data);
+ }
+
+ return ret;
+}
+
+
+static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
+ struct kvm_memory_slot *memslot, void *data)
+{
+ kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end);
+ return 1;
+}
+
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ unsigned flags)
+{
+ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
+
+ kvm_mips_callbacks->flush_shadow_all(kvm);
+ return 0;
+}
+
+static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
+ struct kvm_memory_slot *memslot, void *data)
+{
+ gpa_t gpa = gfn << PAGE_SHIFT;
+ pte_t hva_pte = *(pte_t *)data;
+ pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
+ pte_t old_pte;
+
+ if (!gpa_pte)
+ return 0;
+
+ /* Mapping may need adjusting depending on memslot flags */
+ old_pte = *gpa_pte;
+ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
+ hva_pte = pte_mkclean(hva_pte);
+ else if (memslot->flags & KVM_MEM_READONLY)
+ hva_pte = pte_wrprotect(hva_pte);
+
+ set_pte(gpa_pte, hva_pte);
+
+ /* Replacing an absent or old page doesn't need flushes */
+ if (!pte_present(old_pte) || !pte_young(old_pte))
+ return 0;
+
+ /* Pages swapped, aged, moved, or cleaned require flushes */
+ return !pte_present(hva_pte) ||
+ !pte_young(hva_pte) ||
+ pte_pfn(old_pte) != pte_pfn(hva_pte) ||
+ (pte_dirty(old_pte) && !pte_dirty(hva_pte));
+}
+
+int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+{
+ unsigned long end = hva + PAGE_SIZE;
+ int ret;
+
+ ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
+ if (ret)
+ kvm_mips_callbacks->flush_shadow_all(kvm);
+ return 0;
+}
+
+static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
+ struct kvm_memory_slot *memslot, void *data)
+{
+ return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end);
+}
+
+static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
+ struct kvm_memory_slot *memslot, void *data)
+{
+ gpa_t gpa = gfn << PAGE_SHIFT;
+ pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
+
+ if (!gpa_pte)
+ return 0;
+ return pte_young(*gpa_pte);
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+{
+ return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
+}
+
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
+}
+
+/**
+ * _kvm_mips_map_page_fast() - Fast path GPA fault handler.
+ * @vcpu: VCPU pointer.
+ * @gpa: Guest physical address of fault.
+ * @write_fault: Whether the fault was due to a write.
+ * @out_entry: New PTE for @gpa (written on success unless NULL).
+ * @out_buddy: New PTE for @gpa's buddy (written on success unless
+ * NULL).
+ *
+ * Perform fast path GPA fault handling, doing all that can be done without
+ * calling into KVM. This handles marking old pages young (for idle page
+ * tracking), and dirtying of clean pages (for dirty page logging).
+ *
+ * Returns: 0 on success, in which case we can update derived mappings and
+ * resume guest execution.
+ * -EFAULT on failure due to absent GPA mapping or write to
+ * read-only page, in which case KVM must be consulted.
+ */
+static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa,
+ bool write_fault,
+ pte_t *out_entry, pte_t *out_buddy)
+{
+ struct kvm *kvm = vcpu->kvm;
+ gfn_t gfn = gpa >> PAGE_SHIFT;
+ pte_t *ptep;
+ kvm_pfn_t pfn = 0; /* silence bogus GCC warning */
+ bool pfn_valid = false;
+ int ret = 0;
+
+ spin_lock(&kvm->mmu_lock);
+
+ /* Fast path - just check GPA page table for an existing entry */
+ ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
+ if (!ptep || !pte_present(*ptep)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* Track access to pages marked old */
+ if (!pte_young(*ptep)) {
+ set_pte(ptep, pte_mkyoung(*ptep));
+ pfn = pte_pfn(*ptep);
+ pfn_valid = true;
+ /* call kvm_set_pfn_accessed() after unlock */
+ }
+ if (write_fault && !pte_dirty(*ptep)) {
+ if (!pte_write(*ptep)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* Track dirtying of writeable pages */
+ set_pte(ptep, pte_mkdirty(*ptep));
+ pfn = pte_pfn(*ptep);
+ mark_page_dirty(kvm, gfn);
+ kvm_set_pfn_dirty(pfn);
+ }
+
+ if (out_entry)
+ *out_entry = *ptep;
+ if (out_buddy)
+ *out_buddy = *ptep_buddy(ptep);
+
+out:
+ spin_unlock(&kvm->mmu_lock);
+ if (pfn_valid)
+ kvm_set_pfn_accessed(pfn);
+ return ret;
+}
+
+/**
+ * kvm_mips_map_page() - Map a guest physical page.
+ * @vcpu: VCPU pointer.
+ * @gpa: Guest physical address of fault.
+ * @write_fault: Whether the fault was due to a write.
+ * @out_entry: New PTE for @gpa (written on success unless NULL).
+ * @out_buddy: New PTE for @gpa's buddy (written on success unless
+ * NULL).
+ *
+ * Handle GPA faults by creating a new GPA mapping (or updating an existing
+ * one).
+ *
+ * This takes care of marking pages young or dirty (idle/dirty page tracking),
+ * asking KVM for the corresponding PFN, and creating a mapping in the GPA page
+ * tables. Derived mappings (GVA page tables and TLBs) must be handled by the
+ * caller.
+ *
+ * Returns: 0 on success, in which case the caller may use the @out_entry
+ * and @out_buddy PTEs to update derived mappings and resume guest
+ * execution.
+ * -EFAULT if there is no memory region at @gpa or a write was
+ * attempted to a read-only memory region. This is usually handled
+ * as an MMIO access.
+ */
+static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
+ bool write_fault,
+ pte_t *out_entry, pte_t *out_buddy)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+ gfn_t gfn = gpa >> PAGE_SHIFT;
+ int srcu_idx, err;
+ kvm_pfn_t pfn;
+ pte_t *ptep, entry;
+ bool writeable;
+ unsigned long prot_bits;
+ unsigned long mmu_seq;
+
+ /* Try the fast path to handle old / clean pages */
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry,
+ out_buddy);
+ if (!err)
+ goto out;
+
+ /* We need a minimum of cached pages ready for page table creation */
+ err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
+ if (err)
+ goto out;
+
+retry:
+ /*
+ * Used to check for invalidations in progress, of the pfn that is
+ * returned by pfn_to_pfn_prot below.
+ */
+ mmu_seq = kvm->mmu_notifier_seq;
+ /*
+ * Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
+ * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
+ * risk the page we get a reference to getting unmapped before we have a
+ * chance to grab the mmu_lock without mmu_notifier_retry() noticing.
+ *
+ * This smp_rmb() pairs with the effective smp_wmb() of the combination
+ * of the pte_unmap_unlock() after the PTE is zapped, and the
+ * spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
+ * mmu_notifier_seq is incremented.
+ */
+ smp_rmb();
+
+ /* Slow path - ask KVM core whether we can access this GPA */
+ pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable);
+ if (is_error_noslot_pfn(pfn)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ spin_lock(&kvm->mmu_lock);
+ /* Check if an invalidation has taken place since we got pfn */
+ if (mmu_notifier_retry(kvm, mmu_seq)) {
+ /*
+ * This can happen when mappings are changed asynchronously, but
+ * also synchronously if a COW is triggered by
+ * gfn_to_pfn_prot().
+ */
+ spin_unlock(&kvm->mmu_lock);
+ kvm_release_pfn_clean(pfn);
+ goto retry;
+ }
+
+ /* Ensure page tables are allocated */
+ ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa);
+
+ /* Set up the PTE */
+ prot_bits = _PAGE_PRESENT | __READABLE | _page_cachable_default;
+ if (writeable) {
+ prot_bits |= _PAGE_WRITE;
+ if (write_fault) {
+ prot_bits |= __WRITEABLE;
+ mark_page_dirty(kvm, gfn);
+ kvm_set_pfn_dirty(pfn);
+ }
+ }
+ entry = pfn_pte(pfn, __pgprot(prot_bits));
+
+ /* Write the PTE */
+ set_pte(ptep, entry);
+
+ err = 0;
+ if (out_entry)
+ *out_entry = *ptep;
+ if (out_buddy)
+ *out_buddy = *ptep_buddy(ptep);
+
+ spin_unlock(&kvm->mmu_lock);
+ kvm_release_pfn_clean(pfn);
+ kvm_set_pfn_accessed(pfn);
+out:
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ return err;
+}
+
+static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
+ unsigned long addr)
+{
+ struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+ pgd_t *pgdp;
+ int ret;
+
+ /* We need a minimum of cached pages ready for page table creation */
+ ret = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
+ if (ret)
+ return NULL;
+
+ if (KVM_GUEST_KERNEL_MODE(vcpu))
+ pgdp = vcpu->arch.guest_kernel_mm.pgd;
+ else
+ pgdp = vcpu->arch.guest_user_mm.pgd;
+
+ return kvm_mips_walk_pgd(pgdp, memcache, addr);
+}
+
+void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
+ bool user)
+{
+ pgd_t *pgdp;
+ pte_t *ptep;
+
+ addr &= PAGE_MASK << 1;
+
+ pgdp = vcpu->arch.guest_kernel_mm.pgd;
+ ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
+ if (ptep) {
+ ptep[0] = pfn_pte(0, __pgprot(0));
+ ptep[1] = pfn_pte(0, __pgprot(0));
+ }
+
+ if (user) {
+ pgdp = vcpu->arch.guest_user_mm.pgd;
+ ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
+ if (ptep) {
+ ptep[0] = pfn_pte(0, __pgprot(0));
+ ptep[1] = pfn_pte(0, __pgprot(0));
+ }
+ }
+}
+
+/*
+ * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}.
+ * Flush a range of guest physical address space from the VM's GPA page tables.
+ */
+
+static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva,
+ unsigned long end_gva)
+{
+ int i_min = pte_index(start_gva);
+ int i_max = pte_index(end_gva);
+ bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
+ int i;
+
+ /*
+ * There's no freeing to do, so there's no point clearing individual
+ * entries unless only part of the last level page table needs flushing.
+ */
+ if (safe_to_remove)
+ return true;
+
+ for (i = i_min; i <= i_max; ++i) {
+ if (!pte_present(pte[i]))
+ continue;
+
+ set_pte(pte + i, __pte(0));
+ }
+ return false;
+}
+
+static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva,
+ unsigned long end_gva)
+{
+ pte_t *pte;
+ unsigned long end = ~0ul;
+ int i_min = pmd_index(start_gva);
+ int i_max = pmd_index(end_gva);
+ bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
+ int i;
+
+ for (i = i_min; i <= i_max; ++i, start_gva = 0) {
+ if (!pmd_present(pmd[i]))
+ continue;
+
+ pte = pte_offset_kernel(pmd + i, 0);
+ if (i == i_max)
+ end = end_gva;
+
+ if (kvm_mips_flush_gva_pte(pte, start_gva, end)) {
+ pmd_clear(pmd + i);
+ pte_free_kernel(NULL, pte);
+ } else {
+ safe_to_remove = false;
+ }
+ }
+ return safe_to_remove;
+}
+
+static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva,
+ unsigned long end_gva)
+{
+ pmd_t *pmd;
+ unsigned long end = ~0ul;
+ int i_min = pud_index(start_gva);
+ int i_max = pud_index(end_gva);
+ bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
+ int i;
+
+ for (i = i_min; i <= i_max; ++i, start_gva = 0) {
+ if (!pud_present(pud[i]))
+ continue;
+
+ pmd = pmd_offset(pud + i, 0);
+ if (i == i_max)
+ end = end_gva;
+
+ if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) {
+ pud_clear(pud + i);
+ pmd_free(NULL, pmd);
+ } else {
+ safe_to_remove = false;
+ }
+ }
+ return safe_to_remove;
+}
+
+static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva,
+ unsigned long end_gva)
+{
+ p4d_t *p4d;
+ pud_t *pud;
+ unsigned long end = ~0ul;
+ int i_min = pgd_index(start_gva);
+ int i_max = pgd_index(end_gva);
+ bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
+ int i;
+
+ for (i = i_min; i <= i_max; ++i, start_gva = 0) {
+ if (!pgd_present(pgd[i]))
+ continue;
+
+ p4d = p4d_offset(pgd, 0);
+ pud = pud_offset(p4d + i, 0);
+ if (i == i_max)
+ end = end_gva;
+
+ if (kvm_mips_flush_gva_pud(pud, start_gva, end)) {
+ pgd_clear(pgd + i);
+ pud_free(NULL, pud);
+ } else {
+ safe_to_remove = false;
+ }
+ }
+ return safe_to_remove;
+}
+
+void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags)
+{
+ if (flags & KMF_GPA) {
+ /* all of guest virtual address space could be affected */
+ if (flags & KMF_KERN)
+ /* useg, kseg0, seg2/3 */
+ kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff);
+ else
+ /* useg */
+ kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
+ } else {
+ /* useg */
+ kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
+
+ /* kseg2/3 */
+ if (flags & KMF_KERN)
+ kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff);
+ }
+}
+
+static pte_t kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte)
+{
+ /*
+ * Don't leak writeable but clean entries from GPA page tables. We don't
+ * want the normal Linux tlbmod handler to handle dirtying when KVM
+ * accesses guest memory.
+ */
+ if (!pte_dirty(pte))
+ pte = pte_wrprotect(pte);
+
+ return pte;
+}
+
+static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo)
+{
+ /* Guest EntryLo overrides host EntryLo */
+ if (!(entrylo & ENTRYLO_D))
+ pte = pte_mkclean(pte);
+
+ return kvm_mips_gpa_pte_to_gva_unmapped(pte);
+}
+
+#ifdef CONFIG_KVM_MIPS_VZ
+int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
+ struct kvm_vcpu *vcpu,
+ bool write_fault)
+{
+ int ret;
+
+ ret = kvm_mips_map_page(vcpu, badvaddr, write_fault, NULL, NULL);
+ if (ret)
+ return ret;
+
+ /* Invalidate this entry in the TLB */
+ return kvm_vz_host_tlb_inv(vcpu, badvaddr);
+}
+#endif
+
+/* XXXKYMA: Must be called with interrupts disabled */
+int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
+ struct kvm_vcpu *vcpu,
+ bool write_fault)
+{
+ unsigned long gpa;
+ pte_t pte_gpa[2], *ptep_gva;
+ int idx;
+
+ if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
+ kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ return -1;
+ }
+
+ /* Get the GPA page table entry */
+ gpa = KVM_GUEST_CPHYSADDR(badvaddr);
+ idx = (badvaddr >> PAGE_SHIFT) & 1;
+ if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx],
+ &pte_gpa[!idx]) < 0)
+ return -1;
+
+ /* Get the GVA page table entry */
+ ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, badvaddr & ~PAGE_SIZE);
+ if (!ptep_gva) {
+ kvm_err("No ptep for gva %lx\n", badvaddr);
+ return -1;
+ }
+
+ /* Copy a pair of entries from GPA page table to GVA page table */
+ ptep_gva[0] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[0]);
+ ptep_gva[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[1]);
+
+ /* Invalidate this entry in the TLB, guest kernel ASID only */
+ kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
+ return 0;
+}
+
+int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+ struct kvm_mips_tlb *tlb,
+ unsigned long gva,
+ bool write_fault)
+{
+ struct kvm *kvm = vcpu->kvm;
+ long tlb_lo[2];
+ pte_t pte_gpa[2], *ptep_buddy, *ptep_gva;
+ unsigned int idx = TLB_LO_IDX(*tlb, gva);
+ bool kernel = KVM_GUEST_KERNEL_MODE(vcpu);
+
+ tlb_lo[0] = tlb->tlb_lo[0];
+ tlb_lo[1] = tlb->tlb_lo[1];
+
+ /*
+ * The commpage address must not be mapped to anything else if the guest
+ * TLB contains entries nearby, or commpage accesses will break.
+ */
+ if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1)))
+ tlb_lo[TLB_LO_IDX(*tlb, KVM_GUEST_COMMPAGE_ADDR)] = 0;
+
+ /* Get the GPA page table entry */
+ if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo[idx]),
+ write_fault, &pte_gpa[idx], NULL) < 0)
+ return -1;
+
+ /* And its GVA buddy's GPA page table entry if it also exists */
+ pte_gpa[!idx] = pfn_pte(0, __pgprot(0));
+ if (tlb_lo[!idx] & ENTRYLO_V) {
+ spin_lock(&kvm->mmu_lock);
+ ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL,
+ mips3_tlbpfn_to_paddr(tlb_lo[!idx]));
+ if (ptep_buddy)
+ pte_gpa[!idx] = *ptep_buddy;
+ spin_unlock(&kvm->mmu_lock);
+ }
+
+ /* Get the GVA page table entry pair */
+ ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE);
+ if (!ptep_gva) {
+ kvm_err("No ptep for gva %lx\n", gva);
+ return -1;
+ }
+
+ /* Copy a pair of entries from GPA page table to GVA page table */
+ ptep_gva[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[0], tlb_lo[0]);
+ ptep_gva[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[1], tlb_lo[1]);
+
+ /* Invalidate this entry in the TLB, current guest mode ASID only */
+ kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel);
+
+ kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+ tlb->tlb_lo[0], tlb->tlb_lo[1]);
+
+ return 0;
+}
+
+int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+ struct kvm_vcpu *vcpu)
+{
+ kvm_pfn_t pfn;
+ pte_t *ptep;
+
+ ptep = kvm_trap_emul_pte_for_gva(vcpu, badvaddr);
+ if (!ptep) {
+ kvm_err("No ptep for commpage %lx\n", badvaddr);
+ return -1;
+ }
+
+ pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
+ /* Also set valid and dirty, so refill handler doesn't have to */
+ *ptep = pte_mkyoung(pte_mkdirty(pfn_pte(pfn, PAGE_SHARED)));
+
+ /* Invalidate this entry in the TLB, guest kernel ASID only */
+ kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
+ return 0;
+}
+
+/**
+ * kvm_mips_migrate_count() - Migrate timer.
+ * @vcpu: Virtual CPU.
+ *
+ * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
+ * if it was running prior to being cancelled.
+ *
+ * Must be called when the VCPU is migrated to a different CPU to ensure that
+ * timer expiry during guest execution interrupts the guest and causes the
+ * interrupt to be delivered in a timely manner.
+ */
+static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
+{
+ if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
+ hrtimer_restart(&vcpu->arch.comparecount_timer);
+}
+
+/* Restore ASID once we are scheduled back after preemption */
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ unsigned long flags;
+
+ kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
+
+ local_irq_save(flags);
+
+ vcpu->cpu = cpu;
+ if (vcpu->arch.last_sched_cpu != cpu) {
+ kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
+ vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
+ /*
+ * Migrate the timer interrupt to the current CPU so that it
+ * always interrupts the guest and synchronously triggers a
+ * guest timer interrupt.
+ */
+ kvm_mips_migrate_count(vcpu);
+ }
+
+ /* restore guest state to registers */
+ kvm_mips_callbacks->vcpu_load(vcpu, cpu);
+
+ local_irq_restore(flags);
+}
+
+/* ASID can change if another task is scheduled during preemption */
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ unsigned long flags;
+ int cpu;
+
+ local_irq_save(flags);
+
+ cpu = smp_processor_id();
+ vcpu->arch.last_sched_cpu = cpu;
+ vcpu->cpu = -1;
+
+ /* save guest state in registers */
+ kvm_mips_callbacks->vcpu_put(vcpu, cpu);
+
+ local_irq_restore(flags);
+}
+
+/**
+ * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault.
+ * @vcpu: Virtual CPU.
+ * @gva: Guest virtual address to be accessed.
+ * @write: True if write attempted (must be dirtied and made writable).
+ *
+ * Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and
+ * dirtying the page if @write so that guest instructions can be modified.
+ *
+ * Returns: KVM_MIPS_MAPPED on success.
+ * KVM_MIPS_GVA if bad guest virtual address.
+ * KVM_MIPS_GPA if bad guest physical address.
+ * KVM_MIPS_TLB if guest TLB not present.
+ * KVM_MIPS_TLBINV if guest TLB present but not valid.
+ * KVM_MIPS_TLBMOD if guest TLB read only.
+ */
+enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
+ unsigned long gva,
+ bool write)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_mips_tlb *tlb;
+ int index;
+
+ if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) {
+ if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0)
+ return KVM_MIPS_GPA;
+ } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) ||
+ KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) {
+ /* Address should be in the guest TLB */
+ index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) |
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID));
+ if (index < 0)
+ return KVM_MIPS_TLB;
+ tlb = &vcpu->arch.guest_tlb[index];
+
+ /* Entry should be valid, and dirty for writes */
+ if (!TLB_IS_VALID(*tlb, gva))
+ return KVM_MIPS_TLBINV;
+ if (write && !TLB_IS_DIRTY(*tlb, gva))
+ return KVM_MIPS_TLBMOD;
+
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write))
+ return KVM_MIPS_GPA;
+ } else {
+ return KVM_MIPS_GVA;
+ }
+
+ return KVM_MIPS_MAPPED;
+}
+
+int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
+{
+ int err;
+
+ if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ),
+ "Expect BadInstr/BadInstrP registers to be used with VZ\n"))
+ return -EINVAL;
+
+retry:
+ kvm_trap_emul_gva_lockless_begin(vcpu);
+ err = get_user(*out, opc);
+ kvm_trap_emul_gva_lockless_end(vcpu);
+
+ if (unlikely(err)) {
+ /*
+ * Try to handle the fault, maybe we just raced with a GVA
+ * invalidation.
+ */
+ err = kvm_trap_emul_gva_fault(vcpu, (unsigned long)opc,
+ false);
+ if (unlikely(err)) {
+ kvm_err("%s: illegal address: %p\n",
+ __func__, opc);
+ return -EFAULT;
+ }
+
+ /* Hopefully it'll work now */
+ goto retry;
+ }
+ return 0;
+}
diff --git a/arch/mips/kvm/msa.S b/arch/mips/kvm/msa.S
new file mode 100644
index 000000000..d02f0c6cc
--- /dev/null
+++ b/arch/mips/kvm/msa.S
@@ -0,0 +1,161 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * MIPS SIMD Architecture (MSA) context handling code for KVM.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ */
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+
+ .set noreorder
+ .set noat
+
+LEAF(__kvm_save_msa)
+ st_d 0, VCPU_FPR0, a0
+ st_d 1, VCPU_FPR1, a0
+ st_d 2, VCPU_FPR2, a0
+ st_d 3, VCPU_FPR3, a0
+ st_d 4, VCPU_FPR4, a0
+ st_d 5, VCPU_FPR5, a0
+ st_d 6, VCPU_FPR6, a0
+ st_d 7, VCPU_FPR7, a0
+ st_d 8, VCPU_FPR8, a0
+ st_d 9, VCPU_FPR9, a0
+ st_d 10, VCPU_FPR10, a0
+ st_d 11, VCPU_FPR11, a0
+ st_d 12, VCPU_FPR12, a0
+ st_d 13, VCPU_FPR13, a0
+ st_d 14, VCPU_FPR14, a0
+ st_d 15, VCPU_FPR15, a0
+ st_d 16, VCPU_FPR16, a0
+ st_d 17, VCPU_FPR17, a0
+ st_d 18, VCPU_FPR18, a0
+ st_d 19, VCPU_FPR19, a0
+ st_d 20, VCPU_FPR20, a0
+ st_d 21, VCPU_FPR21, a0
+ st_d 22, VCPU_FPR22, a0
+ st_d 23, VCPU_FPR23, a0
+ st_d 24, VCPU_FPR24, a0
+ st_d 25, VCPU_FPR25, a0
+ st_d 26, VCPU_FPR26, a0
+ st_d 27, VCPU_FPR27, a0
+ st_d 28, VCPU_FPR28, a0
+ st_d 29, VCPU_FPR29, a0
+ st_d 30, VCPU_FPR30, a0
+ st_d 31, VCPU_FPR31, a0
+ jr ra
+ nop
+ END(__kvm_save_msa)
+
+LEAF(__kvm_restore_msa)
+ ld_d 0, VCPU_FPR0, a0
+ ld_d 1, VCPU_FPR1, a0
+ ld_d 2, VCPU_FPR2, a0
+ ld_d 3, VCPU_FPR3, a0
+ ld_d 4, VCPU_FPR4, a0
+ ld_d 5, VCPU_FPR5, a0
+ ld_d 6, VCPU_FPR6, a0
+ ld_d 7, VCPU_FPR7, a0
+ ld_d 8, VCPU_FPR8, a0
+ ld_d 9, VCPU_FPR9, a0
+ ld_d 10, VCPU_FPR10, a0
+ ld_d 11, VCPU_FPR11, a0
+ ld_d 12, VCPU_FPR12, a0
+ ld_d 13, VCPU_FPR13, a0
+ ld_d 14, VCPU_FPR14, a0
+ ld_d 15, VCPU_FPR15, a0
+ ld_d 16, VCPU_FPR16, a0
+ ld_d 17, VCPU_FPR17, a0
+ ld_d 18, VCPU_FPR18, a0
+ ld_d 19, VCPU_FPR19, a0
+ ld_d 20, VCPU_FPR20, a0
+ ld_d 21, VCPU_FPR21, a0
+ ld_d 22, VCPU_FPR22, a0
+ ld_d 23, VCPU_FPR23, a0
+ ld_d 24, VCPU_FPR24, a0
+ ld_d 25, VCPU_FPR25, a0
+ ld_d 26, VCPU_FPR26, a0
+ ld_d 27, VCPU_FPR27, a0
+ ld_d 28, VCPU_FPR28, a0
+ ld_d 29, VCPU_FPR29, a0
+ ld_d 30, VCPU_FPR30, a0
+ ld_d 31, VCPU_FPR31, a0
+ jr ra
+ nop
+ END(__kvm_restore_msa)
+
+ .macro kvm_restore_msa_upper wr, off, base
+ .set push
+ .set noat
+#ifdef CONFIG_64BIT
+ ld $1, \off(\base)
+ insert_d \wr, 1
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+ lw $1, \off(\base)
+ insert_w \wr, 2
+ lw $1, (\off+4)(\base)
+ insert_w \wr, 3
+#else /* CONFIG_CPU_BIG_ENDIAN */
+ lw $1, (\off+4)(\base)
+ insert_w \wr, 2
+ lw $1, \off(\base)
+ insert_w \wr, 3
+#endif
+ .set pop
+ .endm
+
+LEAF(__kvm_restore_msa_upper)
+ kvm_restore_msa_upper 0, VCPU_FPR0 +8, a0
+ kvm_restore_msa_upper 1, VCPU_FPR1 +8, a0
+ kvm_restore_msa_upper 2, VCPU_FPR2 +8, a0
+ kvm_restore_msa_upper 3, VCPU_FPR3 +8, a0
+ kvm_restore_msa_upper 4, VCPU_FPR4 +8, a0
+ kvm_restore_msa_upper 5, VCPU_FPR5 +8, a0
+ kvm_restore_msa_upper 6, VCPU_FPR6 +8, a0
+ kvm_restore_msa_upper 7, VCPU_FPR7 +8, a0
+ kvm_restore_msa_upper 8, VCPU_FPR8 +8, a0
+ kvm_restore_msa_upper 9, VCPU_FPR9 +8, a0
+ kvm_restore_msa_upper 10, VCPU_FPR10+8, a0
+ kvm_restore_msa_upper 11, VCPU_FPR11+8, a0
+ kvm_restore_msa_upper 12, VCPU_FPR12+8, a0
+ kvm_restore_msa_upper 13, VCPU_FPR13+8, a0
+ kvm_restore_msa_upper 14, VCPU_FPR14+8, a0
+ kvm_restore_msa_upper 15, VCPU_FPR15+8, a0
+ kvm_restore_msa_upper 16, VCPU_FPR16+8, a0
+ kvm_restore_msa_upper 17, VCPU_FPR17+8, a0
+ kvm_restore_msa_upper 18, VCPU_FPR18+8, a0
+ kvm_restore_msa_upper 19, VCPU_FPR19+8, a0
+ kvm_restore_msa_upper 20, VCPU_FPR20+8, a0
+ kvm_restore_msa_upper 21, VCPU_FPR21+8, a0
+ kvm_restore_msa_upper 22, VCPU_FPR22+8, a0
+ kvm_restore_msa_upper 23, VCPU_FPR23+8, a0
+ kvm_restore_msa_upper 24, VCPU_FPR24+8, a0
+ kvm_restore_msa_upper 25, VCPU_FPR25+8, a0
+ kvm_restore_msa_upper 26, VCPU_FPR26+8, a0
+ kvm_restore_msa_upper 27, VCPU_FPR27+8, a0
+ kvm_restore_msa_upper 28, VCPU_FPR28+8, a0
+ kvm_restore_msa_upper 29, VCPU_FPR29+8, a0
+ kvm_restore_msa_upper 30, VCPU_FPR30+8, a0
+ kvm_restore_msa_upper 31, VCPU_FPR31+8, a0
+ jr ra
+ nop
+ END(__kvm_restore_msa_upper)
+
+LEAF(__kvm_restore_msacsr)
+ lw t0, VCPU_MSA_CSR(a0)
+ /*
+ * The ctcmsa must stay at this offset in __kvm_restore_msacsr.
+ * See kvm_mips_csr_die_notify() which handles t0 containing a value
+ * which triggers an MSA FP Exception, which must be stepped over and
+ * ignored since the set cause bits must remain there for the guest.
+ */
+ _ctcmsa MSA_CSR, t0
+ jr ra
+ nop
+ END(__kvm_restore_msacsr)
diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c
new file mode 100644
index 000000000..53f851a61
--- /dev/null
+++ b/arch/mips/kvm/stats.c
@@ -0,0 +1,63 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: COP0 access histogram
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/kvm_host.h>
+
+char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
+ "Index",
+ "Random",
+ "EntryLo0",
+ "EntryLo1",
+ "Context",
+ "PG Mask",
+ "Wired",
+ "HWREna",
+ "BadVAddr",
+ "Count",
+ "EntryHI",
+ "Compare",
+ "Status",
+ "Cause",
+ "EXC PC",
+ "PRID",
+ "Config",
+ "LLAddr",
+ "Watch Lo",
+ "Watch Hi",
+ "X Context",
+ "Reserved",
+ "Impl Dep",
+ "Debug",
+ "DEPC",
+ "PerfCnt",
+ "ErrCtl",
+ "CacheErr",
+ "TagLo",
+ "TagHi",
+ "ErrorEPC",
+ "DESAVE"
+};
+
+void kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+ int i, j;
+
+ kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
+ for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
+ for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
+ if (vcpu->arch.cop0->stat[i][j])
+ kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j,
+ vcpu->arch.cop0->stat[i][j]);
+ }
+ }
+#endif
+}
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
new file mode 100644
index 000000000..1c1fbce3f
--- /dev/null
+++ b/arch/mips/kvm/tlb.c
@@ -0,0 +1,700 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
+ * TLB handlers run from KSEG0
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/kvm_host.h>
+#include <linux/srcu.h>
+
+#include <asm/cpu.h>
+#include <asm/bootinfo.h>
+#include <asm/mipsregs.h>
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+#include <asm/tlb.h>
+#include <asm/tlbdebug.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#define KVM_GUEST_PC_TLB 0
+#define KVM_GUEST_SP_TLB 1
+
+#ifdef CONFIG_KVM_MIPS_VZ
+unsigned long GUESTID_MASK;
+EXPORT_SYMBOL_GPL(GUESTID_MASK);
+unsigned long GUESTID_FIRST_VERSION;
+EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION);
+unsigned long GUESTID_VERSION_MASK;
+EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK);
+
+static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu)
+{
+ struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm;
+
+ if (cpu_has_guestid)
+ return 0;
+ else
+ return cpu_asid(smp_processor_id(), gpa_mm);
+}
+#endif
+
+static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+{
+ struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
+ int cpu = smp_processor_id();
+
+ return cpu_asid(cpu, kern_mm);
+}
+
+static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+{
+ struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
+ int cpu = smp_processor_id();
+
+ return cpu_asid(cpu, user_mm);
+}
+
+/* Structure defining an tlb entry data set. */
+
+void kvm_mips_dump_host_tlbs(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ kvm_info("HOST TLBs:\n");
+ dump_tlb_regs();
+ pr_info("\n");
+ dump_tlb_all();
+
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
+
+void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_mips_tlb tlb;
+ int i;
+
+ kvm_info("Guest TLBs:\n");
+ kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
+
+ for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+ tlb = vcpu->arch.guest_tlb[i];
+ kvm_info("TLB%c%3d Hi 0x%08lx ",
+ (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V
+ ? ' ' : '*',
+ i, tlb.tlb_hi);
+ kvm_info("Lo0=0x%09llx %c%c attr %lx ",
+ (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]),
+ (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ',
+ (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ',
+ (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT);
+ kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
+ (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]),
+ (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ',
+ (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ',
+ (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT,
+ tlb.tlb_mask);
+ }
+}
+EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
+
+int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
+{
+ int i;
+ int index = -1;
+ struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
+
+ for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+ if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
+ TLB_HI_ASID_HIT(tlb[i], entryhi)) {
+ index = i;
+ break;
+ }
+ }
+
+ kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
+ __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]);
+
+ return index;
+}
+EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
+
+static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
+{
+ int idx;
+
+ write_c0_entryhi(entryhi);
+ mtc0_tlbw_hazard();
+
+ tlb_probe();
+ tlb_probe_hazard();
+ idx = read_c0_index();
+
+ if (idx >= current_cpu_data.tlbsize)
+ BUG();
+
+ if (idx >= 0) {
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+ mtc0_tlbw_hazard();
+
+ tlb_write_indexed();
+ tlbw_use_hazard();
+ }
+
+ return idx;
+}
+
+int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
+ bool user, bool kernel)
+{
+ /*
+ * Initialize idx_user and idx_kernel to workaround bogus
+ * maybe-initialized warning when using GCC 6.
+ */
+ int idx_user = 0, idx_kernel = 0;
+ unsigned long flags, old_entryhi;
+
+ local_irq_save(flags);
+
+ old_entryhi = read_c0_entryhi();
+
+ if (user)
+ idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
+ kvm_mips_get_user_asid(vcpu));
+ if (kernel)
+ idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
+ kvm_mips_get_kernel_asid(vcpu));
+
+ write_c0_entryhi(old_entryhi);
+ mtc0_tlbw_hazard();
+
+ local_irq_restore(flags);
+
+ /*
+ * We don't want to get reserved instruction exceptions for missing tlb
+ * entries.
+ */
+ if (cpu_has_vtag_icache)
+ flush_icache_all();
+
+ if (user && idx_user >= 0)
+ kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n",
+ __func__, (va & VPN2_MASK) |
+ kvm_mips_get_user_asid(vcpu), idx_user);
+ if (kernel && idx_kernel >= 0)
+ kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n",
+ __func__, (va & VPN2_MASK) |
+ kvm_mips_get_kernel_asid(vcpu), idx_kernel);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
+
+#ifdef CONFIG_KVM_MIPS_VZ
+
+/* GuestID management */
+
+/**
+ * clear_root_gid() - Set GuestCtl1.RID for normal root operation.
+ */
+static inline void clear_root_gid(void)
+{
+ if (cpu_has_guestid) {
+ clear_c0_guestctl1(MIPS_GCTL1_RID);
+ mtc0_tlbw_hazard();
+ }
+}
+
+/**
+ * set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID.
+ *
+ * Sets the root GuestID to match the current guest GuestID, for TLB operation
+ * on the GPA->RPA mappings in the root TLB.
+ *
+ * The caller must be sure to disable HTW while the root GID is set, and
+ * possibly longer if TLB registers are modified.
+ */
+static inline void set_root_gid_to_guest_gid(void)
+{
+ unsigned int guestctl1;
+
+ if (cpu_has_guestid) {
+ back_to_back_c0_hazard();
+ guestctl1 = read_c0_guestctl1();
+ guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) |
+ ((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT)
+ << MIPS_GCTL1_RID_SHIFT;
+ write_c0_guestctl1(guestctl1);
+ mtc0_tlbw_hazard();
+ }
+}
+
+int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
+{
+ int idx;
+ unsigned long flags, old_entryhi;
+
+ local_irq_save(flags);
+ htw_stop();
+
+ /* Set root GuestID for root probe and write of guest TLB entry */
+ set_root_gid_to_guest_gid();
+
+ old_entryhi = read_c0_entryhi();
+
+ idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
+ kvm_mips_get_root_asid(vcpu));
+
+ write_c0_entryhi(old_entryhi);
+ clear_root_gid();
+ mtc0_tlbw_hazard();
+
+ htw_start();
+ local_irq_restore(flags);
+
+ /*
+ * We don't want to get reserved instruction exceptions for missing tlb
+ * entries.
+ */
+ if (cpu_has_vtag_icache)
+ flush_icache_all();
+
+ if (idx > 0)
+ kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n",
+ __func__, (va & VPN2_MASK) |
+ kvm_mips_get_root_asid(vcpu), idx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv);
+
+/**
+ * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
+ * @vcpu: KVM VCPU pointer.
+ * @gpa: Guest virtual address in a TLB mapped guest segment.
+ * @gpa: Ponter to output guest physical address it maps to.
+ *
+ * Converts a guest virtual address in a guest TLB mapped segment to a guest
+ * physical address, by probing the guest TLB.
+ *
+ * Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been
+ * written.
+ * -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not
+ * have been written.
+ */
+int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
+ unsigned long *gpa)
+{
+ unsigned long o_entryhi, o_entrylo[2], o_pagemask;
+ unsigned int o_index;
+ unsigned long entrylo[2], pagemask, pagemaskbit, pa;
+ unsigned long flags;
+ int index;
+
+ /* Probe the guest TLB for a mapping */
+ local_irq_save(flags);
+ /* Set root GuestID for root probe of guest TLB entry */
+ htw_stop();
+ set_root_gid_to_guest_gid();
+
+ o_entryhi = read_gc0_entryhi();
+ o_index = read_gc0_index();
+
+ write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl));
+ mtc0_tlbw_hazard();
+ guest_tlb_probe();
+ tlb_probe_hazard();
+
+ index = read_gc0_index();
+ if (index < 0) {
+ /* No match, fail */
+ write_gc0_entryhi(o_entryhi);
+ write_gc0_index(o_index);
+
+ clear_root_gid();
+ htw_start();
+ local_irq_restore(flags);
+ return -EFAULT;
+ }
+
+ /* Match! read the TLB entry */
+ o_entrylo[0] = read_gc0_entrylo0();
+ o_entrylo[1] = read_gc0_entrylo1();
+ o_pagemask = read_gc0_pagemask();
+
+ mtc0_tlbr_hazard();
+ guest_tlb_read();
+ tlb_read_hazard();
+
+ entrylo[0] = read_gc0_entrylo0();
+ entrylo[1] = read_gc0_entrylo1();
+ pagemask = ~read_gc0_pagemask() & ~0x1fffl;
+
+ write_gc0_entryhi(o_entryhi);
+ write_gc0_index(o_index);
+ write_gc0_entrylo0(o_entrylo[0]);
+ write_gc0_entrylo1(o_entrylo[1]);
+ write_gc0_pagemask(o_pagemask);
+
+ clear_root_gid();
+ htw_start();
+ local_irq_restore(flags);
+
+ /* Select one of the EntryLo values and interpret the GPA */
+ pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1;
+ pa = entrylo[!!(gva & pagemaskbit)];
+
+ /*
+ * TLB entry may have become invalid since TLB probe if physical FTLB
+ * entries are shared between threads (e.g. I6400).
+ */
+ if (!(pa & ENTRYLO_V))
+ return -EFAULT;
+
+ /*
+ * Note, this doesn't take guest MIPS32 XPA into account, where PFN is
+ * split with XI/RI in the middle.
+ */
+ pa = (pa << 6) & ~0xfffl;
+ pa |= gva & ~(pagemask | pagemaskbit);
+
+ *gpa = pa;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup);
+
+/**
+ * kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for
+ * guests.
+ *
+ * Invalidate all entries in root tlb which are GPA mappings.
+ */
+void kvm_vz_local_flush_roottlb_all_guests(void)
+{
+ unsigned long flags;
+ unsigned long old_entryhi, old_pagemask, old_guestctl1;
+ int entry;
+
+ if (WARN_ON(!cpu_has_guestid))
+ return;
+
+ local_irq_save(flags);
+ htw_stop();
+
+ /* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */
+ old_entryhi = read_c0_entryhi();
+ old_pagemask = read_c0_pagemask();
+ old_guestctl1 = read_c0_guestctl1();
+
+ /*
+ * Invalidate guest entries in root TLB while leaving root entries
+ * intact when possible.
+ */
+ for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+ tlb_read();
+ tlb_read_hazard();
+
+ /* Don't invalidate non-guest (RVA) mappings in the root TLB */
+ if (!(read_c0_guestctl1() & MIPS_GCTL1_RID))
+ continue;
+
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+ write_c0_guestctl1(0);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ }
+
+ write_c0_entryhi(old_entryhi);
+ write_c0_pagemask(old_pagemask);
+ write_c0_guestctl1(old_guestctl1);
+ tlbw_use_hazard();
+
+ htw_start();
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests);
+
+/**
+ * kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries.
+ *
+ * Invalidate all entries in guest tlb irrespective of guestid.
+ */
+void kvm_vz_local_flush_guesttlb_all(void)
+{
+ unsigned long flags;
+ unsigned long old_index;
+ unsigned long old_entryhi;
+ unsigned long old_entrylo[2];
+ unsigned long old_pagemask;
+ int entry;
+ u64 cvmmemctl2 = 0;
+
+ local_irq_save(flags);
+
+ /* Preserve all clobbered guest registers */
+ old_index = read_gc0_index();
+ old_entryhi = read_gc0_entryhi();
+ old_entrylo[0] = read_gc0_entrylo0();
+ old_entrylo[1] = read_gc0_entrylo1();
+ old_pagemask = read_gc0_pagemask();
+
+ switch (current_cpu_type()) {
+ case CPU_CAVIUM_OCTEON3:
+ /* Inhibit machine check due to multiple matching TLB entries */
+ cvmmemctl2 = read_c0_cvmmemctl2();
+ cvmmemctl2 |= CVMMEMCTL2_INHIBITTS;
+ write_c0_cvmmemctl2(cvmmemctl2);
+ break;
+ }
+
+ /* Invalidate guest entries in guest TLB */
+ write_gc0_entrylo0(0);
+ write_gc0_entrylo1(0);
+ write_gc0_pagemask(0);
+ for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) {
+ /* Make sure all entries differ. */
+ write_gc0_index(entry);
+ write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry));
+ mtc0_tlbw_hazard();
+ guest_tlb_write_indexed();
+ }
+
+ if (cvmmemctl2) {
+ cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS;
+ write_c0_cvmmemctl2(cvmmemctl2);
+ }
+
+ write_gc0_index(old_index);
+ write_gc0_entryhi(old_entryhi);
+ write_gc0_entrylo0(old_entrylo[0]);
+ write_gc0_entrylo1(old_entrylo[1]);
+ write_gc0_pagemask(old_pagemask);
+ tlbw_use_hazard();
+
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all);
+
+/**
+ * kvm_vz_save_guesttlb() - Save a range of guest TLB entries.
+ * @buf: Buffer to write TLB entries into.
+ * @index: Start index.
+ * @count: Number of entries to save.
+ *
+ * Save a range of guest TLB entries. The caller must ensure interrupts are
+ * disabled.
+ */
+void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
+ unsigned int count)
+{
+ unsigned int end = index + count;
+ unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
+ unsigned int guestctl1 = 0;
+ int old_index, i;
+
+ /* Save registers we're about to clobber */
+ old_index = read_gc0_index();
+ old_entryhi = read_gc0_entryhi();
+ old_entrylo0 = read_gc0_entrylo0();
+ old_entrylo1 = read_gc0_entrylo1();
+ old_pagemask = read_gc0_pagemask();
+
+ /* Set root GuestID for root probe */
+ htw_stop();
+ set_root_gid_to_guest_gid();
+ if (cpu_has_guestid)
+ guestctl1 = read_c0_guestctl1();
+
+ /* Read each entry from guest TLB */
+ for (i = index; i < end; ++i, ++buf) {
+ write_gc0_index(i);
+
+ mtc0_tlbr_hazard();
+ guest_tlb_read();
+ tlb_read_hazard();
+
+ if (cpu_has_guestid &&
+ (read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) {
+ /* Entry invalid or belongs to another guest */
+ buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
+ buf->tlb_lo[0] = 0;
+ buf->tlb_lo[1] = 0;
+ buf->tlb_mask = 0;
+ } else {
+ /* Entry belongs to the right guest */
+ buf->tlb_hi = read_gc0_entryhi();
+ buf->tlb_lo[0] = read_gc0_entrylo0();
+ buf->tlb_lo[1] = read_gc0_entrylo1();
+ buf->tlb_mask = read_gc0_pagemask();
+ }
+ }
+
+ /* Clear root GuestID again */
+ clear_root_gid();
+ htw_start();
+
+ /* Restore clobbered registers */
+ write_gc0_index(old_index);
+ write_gc0_entryhi(old_entryhi);
+ write_gc0_entrylo0(old_entrylo0);
+ write_gc0_entrylo1(old_entrylo1);
+ write_gc0_pagemask(old_pagemask);
+
+ tlbw_use_hazard();
+}
+EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb);
+
+/**
+ * kvm_vz_load_guesttlb() - Save a range of guest TLB entries.
+ * @buf: Buffer to read TLB entries from.
+ * @index: Start index.
+ * @count: Number of entries to load.
+ *
+ * Load a range of guest TLB entries. The caller must ensure interrupts are
+ * disabled.
+ */
+void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
+ unsigned int count)
+{
+ unsigned int end = index + count;
+ unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
+ int old_index, i;
+
+ /* Save registers we're about to clobber */
+ old_index = read_gc0_index();
+ old_entryhi = read_gc0_entryhi();
+ old_entrylo0 = read_gc0_entrylo0();
+ old_entrylo1 = read_gc0_entrylo1();
+ old_pagemask = read_gc0_pagemask();
+
+ /* Set root GuestID for root probe */
+ htw_stop();
+ set_root_gid_to_guest_gid();
+
+ /* Write each entry to guest TLB */
+ for (i = index; i < end; ++i, ++buf) {
+ write_gc0_index(i);
+ write_gc0_entryhi(buf->tlb_hi);
+ write_gc0_entrylo0(buf->tlb_lo[0]);
+ write_gc0_entrylo1(buf->tlb_lo[1]);
+ write_gc0_pagemask(buf->tlb_mask);
+
+ mtc0_tlbw_hazard();
+ guest_tlb_write_indexed();
+ }
+
+ /* Clear root GuestID again */
+ clear_root_gid();
+ htw_start();
+
+ /* Restore clobbered registers */
+ write_gc0_index(old_index);
+ write_gc0_entryhi(old_entryhi);
+ write_gc0_entrylo0(old_entrylo0);
+ write_gc0_entrylo1(old_entrylo1);
+ write_gc0_pagemask(old_pagemask);
+
+ tlbw_use_hazard();
+}
+EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb);
+
+#ifdef CONFIG_CPU_LOONGSON64
+void kvm_loongson_clear_guest_vtlb(void)
+{
+ int idx = read_gc0_index();
+
+ /* Set root GuestID for root probe and write of guest TLB entry */
+ set_root_gid_to_guest_gid();
+
+ write_gc0_index(0);
+ guest_tlbinvf();
+ write_gc0_index(idx);
+
+ clear_root_gid();
+ set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
+}
+EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_vtlb);
+
+void kvm_loongson_clear_guest_ftlb(void)
+{
+ int i;
+ int idx = read_gc0_index();
+
+ /* Set root GuestID for root probe and write of guest TLB entry */
+ set_root_gid_to_guest_gid();
+
+ for (i = current_cpu_data.tlbsizevtlb;
+ i < (current_cpu_data.tlbsizevtlb +
+ current_cpu_data.tlbsizeftlbsets);
+ i++) {
+ write_gc0_index(i);
+ guest_tlbinvf();
+ }
+ write_gc0_index(idx);
+
+ clear_root_gid();
+ set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
+}
+EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb);
+#endif
+
+#endif
+
+/**
+ * kvm_mips_suspend_mm() - Suspend the active mm.
+ * @cpu The CPU we're running on.
+ *
+ * Suspend the active_mm, ready for a switch to a KVM guest virtual address
+ * space. This is left active for the duration of guest context, including time
+ * with interrupts enabled, so we need to be careful not to confuse e.g. cache
+ * management IPIs.
+ *
+ * kvm_mips_resume_mm() should be called before context switching to a different
+ * process so we don't need to worry about reference counting.
+ *
+ * This needs to be in static kernel code to avoid exporting init_mm.
+ */
+void kvm_mips_suspend_mm(int cpu)
+{
+ cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm));
+ current->active_mm = &init_mm;
+}
+EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm);
+
+/**
+ * kvm_mips_resume_mm() - Resume the current process mm.
+ * @cpu The CPU we're running on.
+ *
+ * Resume the mm of the current process, after a switch back from a KVM guest
+ * virtual address space (see kvm_mips_suspend_mm()).
+ */
+void kvm_mips_resume_mm(int cpu)
+{
+ cpumask_set_cpu(cpu, mm_cpumask(current->mm));
+ current->active_mm = current->mm;
+}
+EXPORT_SYMBOL_GPL(kvm_mips_resume_mm);
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
new file mode 100644
index 000000000..a8c7fd7bf
--- /dev/null
+++ b/arch/mips/kvm/trace.h
@@ -0,0 +1,346 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+
+/*
+ * arch/mips/kvm/mips.c
+ */
+extern bool kvm_trace_guest_mode_change;
+int kvm_guest_mode_change_trace_reg(void);
+void kvm_guest_mode_change_trace_unreg(void);
+
+/*
+ * Tracepoints for VM enters
+ */
+DECLARE_EVENT_CLASS(kvm_transition,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+ TP_STRUCT__entry(
+ __field(unsigned long, pc)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = vcpu->arch.pc;
+ ),
+
+ TP_printk("PC: 0x%08lx",
+ __entry->pc)
+);
+
+DEFINE_EVENT(kvm_transition, kvm_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu));
+
+DEFINE_EVENT(kvm_transition, kvm_reenter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu));
+
+DEFINE_EVENT(kvm_transition, kvm_out,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu));
+
+/* The first 32 exit reasons correspond to Cause.ExcCode */
+#define KVM_TRACE_EXIT_INT 0
+#define KVM_TRACE_EXIT_TLBMOD 1
+#define KVM_TRACE_EXIT_TLBMISS_LD 2
+#define KVM_TRACE_EXIT_TLBMISS_ST 3
+#define KVM_TRACE_EXIT_ADDRERR_LD 4
+#define KVM_TRACE_EXIT_ADDRERR_ST 5
+#define KVM_TRACE_EXIT_SYSCALL 8
+#define KVM_TRACE_EXIT_BREAK_INST 9
+#define KVM_TRACE_EXIT_RESVD_INST 10
+#define KVM_TRACE_EXIT_COP_UNUSABLE 11
+#define KVM_TRACE_EXIT_TRAP_INST 13
+#define KVM_TRACE_EXIT_MSA_FPE 14
+#define KVM_TRACE_EXIT_FPE 15
+#define KVM_TRACE_EXIT_MSA_DISABLED 21
+#define KVM_TRACE_EXIT_GUEST_EXIT 27
+/* Further exit reasons */
+#define KVM_TRACE_EXIT_WAIT 32
+#define KVM_TRACE_EXIT_CACHE 33
+#define KVM_TRACE_EXIT_SIGNAL 34
+/* 32 exit reasons correspond to GuestCtl0.GExcCode (VZ) */
+#define KVM_TRACE_EXIT_GEXCCODE_BASE 64
+#define KVM_TRACE_EXIT_GPSI 64 /* 0 */
+#define KVM_TRACE_EXIT_GSFC 65 /* 1 */
+#define KVM_TRACE_EXIT_HC 66 /* 2 */
+#define KVM_TRACE_EXIT_GRR 67 /* 3 */
+#define KVM_TRACE_EXIT_GVA 72 /* 8 */
+#define KVM_TRACE_EXIT_GHFC 73 /* 9 */
+#define KVM_TRACE_EXIT_GPA 74 /* 10 */
+
+/* Tracepoints for VM exits */
+#define kvm_trace_symbol_exit_types \
+ { KVM_TRACE_EXIT_INT, "Interrupt" }, \
+ { KVM_TRACE_EXIT_TLBMOD, "TLB Mod" }, \
+ { KVM_TRACE_EXIT_TLBMISS_LD, "TLB Miss (LD)" }, \
+ { KVM_TRACE_EXIT_TLBMISS_ST, "TLB Miss (ST)" }, \
+ { KVM_TRACE_EXIT_ADDRERR_LD, "Address Error (LD)" }, \
+ { KVM_TRACE_EXIT_ADDRERR_ST, "Address Err (ST)" }, \
+ { KVM_TRACE_EXIT_SYSCALL, "System Call" }, \
+ { KVM_TRACE_EXIT_BREAK_INST, "Break Inst" }, \
+ { KVM_TRACE_EXIT_RESVD_INST, "Reserved Inst" }, \
+ { KVM_TRACE_EXIT_COP_UNUSABLE, "COP0/1 Unusable" }, \
+ { KVM_TRACE_EXIT_TRAP_INST, "Trap Inst" }, \
+ { KVM_TRACE_EXIT_MSA_FPE, "MSA FPE" }, \
+ { KVM_TRACE_EXIT_FPE, "FPE" }, \
+ { KVM_TRACE_EXIT_MSA_DISABLED, "MSA Disabled" }, \
+ { KVM_TRACE_EXIT_GUEST_EXIT, "Guest Exit" }, \
+ { KVM_TRACE_EXIT_WAIT, "WAIT" }, \
+ { KVM_TRACE_EXIT_CACHE, "CACHE" }, \
+ { KVM_TRACE_EXIT_SIGNAL, "Signal" }, \
+ { KVM_TRACE_EXIT_GPSI, "GPSI" }, \
+ { KVM_TRACE_EXIT_GSFC, "GSFC" }, \
+ { KVM_TRACE_EXIT_HC, "HC" }, \
+ { KVM_TRACE_EXIT_GRR, "GRR" }, \
+ { KVM_TRACE_EXIT_GVA, "GVA" }, \
+ { KVM_TRACE_EXIT_GHFC, "GHFC" }, \
+ { KVM_TRACE_EXIT_GPA, "GPA" }
+
+TRACE_EVENT(kvm_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+ TP_ARGS(vcpu, reason),
+ TP_STRUCT__entry(
+ __field(unsigned long, pc)
+ __field(unsigned int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = vcpu->arch.pc;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("[%s]PC: 0x%08lx",
+ __print_symbolic(__entry->reason,
+ kvm_trace_symbol_exit_types),
+ __entry->pc)
+);
+
+#define KVM_TRACE_MFC0 0
+#define KVM_TRACE_MTC0 1
+#define KVM_TRACE_DMFC0 2
+#define KVM_TRACE_DMTC0 3
+#define KVM_TRACE_RDHWR 4
+
+#define KVM_TRACE_HWR_COP0 0
+#define KVM_TRACE_HWR_HWR 1
+
+#define KVM_TRACE_COP0(REG, SEL) ((KVM_TRACE_HWR_COP0 << 8) | \
+ ((REG) << 3) | (SEL))
+#define KVM_TRACE_HWR(REG, SEL) ((KVM_TRACE_HWR_HWR << 8) | \
+ ((REG) << 3) | (SEL))
+
+#define kvm_trace_symbol_hwr_ops \
+ { KVM_TRACE_MFC0, "MFC0" }, \
+ { KVM_TRACE_MTC0, "MTC0" }, \
+ { KVM_TRACE_DMFC0, "DMFC0" }, \
+ { KVM_TRACE_DMTC0, "DMTC0" }, \
+ { KVM_TRACE_RDHWR, "RDHWR" }
+
+#define kvm_trace_symbol_hwr_cop \
+ { KVM_TRACE_HWR_COP0, "COP0" }, \
+ { KVM_TRACE_HWR_HWR, "HWR" }
+
+#define kvm_trace_symbol_hwr_regs \
+ { KVM_TRACE_COP0( 0, 0), "Index" }, \
+ { KVM_TRACE_COP0( 2, 0), "EntryLo0" }, \
+ { KVM_TRACE_COP0( 3, 0), "EntryLo1" }, \
+ { KVM_TRACE_COP0( 4, 0), "Context" }, \
+ { KVM_TRACE_COP0( 4, 2), "UserLocal" }, \
+ { KVM_TRACE_COP0( 5, 0), "PageMask" }, \
+ { KVM_TRACE_COP0( 6, 0), "Wired" }, \
+ { KVM_TRACE_COP0( 7, 0), "HWREna" }, \
+ { KVM_TRACE_COP0( 8, 0), "BadVAddr" }, \
+ { KVM_TRACE_COP0( 9, 0), "Count" }, \
+ { KVM_TRACE_COP0(10, 0), "EntryHi" }, \
+ { KVM_TRACE_COP0(11, 0), "Compare" }, \
+ { KVM_TRACE_COP0(12, 0), "Status" }, \
+ { KVM_TRACE_COP0(12, 1), "IntCtl" }, \
+ { KVM_TRACE_COP0(12, 2), "SRSCtl" }, \
+ { KVM_TRACE_COP0(13, 0), "Cause" }, \
+ { KVM_TRACE_COP0(14, 0), "EPC" }, \
+ { KVM_TRACE_COP0(15, 0), "PRId" }, \
+ { KVM_TRACE_COP0(15, 1), "EBase" }, \
+ { KVM_TRACE_COP0(16, 0), "Config" }, \
+ { KVM_TRACE_COP0(16, 1), "Config1" }, \
+ { KVM_TRACE_COP0(16, 2), "Config2" }, \
+ { KVM_TRACE_COP0(16, 3), "Config3" }, \
+ { KVM_TRACE_COP0(16, 4), "Config4" }, \
+ { KVM_TRACE_COP0(16, 5), "Config5" }, \
+ { KVM_TRACE_COP0(16, 7), "Config7" }, \
+ { KVM_TRACE_COP0(17, 1), "MAAR" }, \
+ { KVM_TRACE_COP0(17, 2), "MAARI" }, \
+ { KVM_TRACE_COP0(26, 0), "ECC" }, \
+ { KVM_TRACE_COP0(30, 0), "ErrorEPC" }, \
+ { KVM_TRACE_COP0(31, 2), "KScratch1" }, \
+ { KVM_TRACE_COP0(31, 3), "KScratch2" }, \
+ { KVM_TRACE_COP0(31, 4), "KScratch3" }, \
+ { KVM_TRACE_COP0(31, 5), "KScratch4" }, \
+ { KVM_TRACE_COP0(31, 6), "KScratch5" }, \
+ { KVM_TRACE_COP0(31, 7), "KScratch6" }, \
+ { KVM_TRACE_HWR( 0, 0), "CPUNum" }, \
+ { KVM_TRACE_HWR( 1, 0), "SYNCI_Step" }, \
+ { KVM_TRACE_HWR( 2, 0), "CC" }, \
+ { KVM_TRACE_HWR( 3, 0), "CCRes" }, \
+ { KVM_TRACE_HWR(29, 0), "ULR" }
+
+TRACE_EVENT(kvm_hwr,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, unsigned int reg,
+ unsigned long val),
+ TP_ARGS(vcpu, op, reg, val),
+ TP_STRUCT__entry(
+ __field(unsigned long, val)
+ __field(u16, reg)
+ __field(u8, op)
+ ),
+
+ TP_fast_assign(
+ __entry->val = val;
+ __entry->reg = reg;
+ __entry->op = op;
+ ),
+
+ TP_printk("%s %s (%s:%u:%u) 0x%08lx",
+ __print_symbolic(__entry->op,
+ kvm_trace_symbol_hwr_ops),
+ __print_symbolic(__entry->reg,
+ kvm_trace_symbol_hwr_regs),
+ __print_symbolic(__entry->reg >> 8,
+ kvm_trace_symbol_hwr_cop),
+ (__entry->reg >> 3) & 0x1f,
+ __entry->reg & 0x7,
+ __entry->val)
+);
+
+#define KVM_TRACE_AUX_RESTORE 0
+#define KVM_TRACE_AUX_SAVE 1
+#define KVM_TRACE_AUX_ENABLE 2
+#define KVM_TRACE_AUX_DISABLE 3
+#define KVM_TRACE_AUX_DISCARD 4
+
+#define KVM_TRACE_AUX_FPU 1
+#define KVM_TRACE_AUX_MSA 2
+#define KVM_TRACE_AUX_FPU_MSA 3
+
+#define kvm_trace_symbol_aux_op \
+ { KVM_TRACE_AUX_RESTORE, "restore" }, \
+ { KVM_TRACE_AUX_SAVE, "save" }, \
+ { KVM_TRACE_AUX_ENABLE, "enable" }, \
+ { KVM_TRACE_AUX_DISABLE, "disable" }, \
+ { KVM_TRACE_AUX_DISCARD, "discard" }
+
+#define kvm_trace_symbol_aux_state \
+ { KVM_TRACE_AUX_FPU, "FPU" }, \
+ { KVM_TRACE_AUX_MSA, "MSA" }, \
+ { KVM_TRACE_AUX_FPU_MSA, "FPU & MSA" }
+
+TRACE_EVENT(kvm_aux,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
+ unsigned int state),
+ TP_ARGS(vcpu, op, state),
+ TP_STRUCT__entry(
+ __field(unsigned long, pc)
+ __field(u8, op)
+ __field(u8, state)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = vcpu->arch.pc;
+ __entry->op = op;
+ __entry->state = state;
+ ),
+
+ TP_printk("%s %s PC: 0x%08lx",
+ __print_symbolic(__entry->op,
+ kvm_trace_symbol_aux_op),
+ __print_symbolic(__entry->state,
+ kvm_trace_symbol_aux_state),
+ __entry->pc)
+);
+
+TRACE_EVENT(kvm_asid_change,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int old_asid,
+ unsigned int new_asid),
+ TP_ARGS(vcpu, old_asid, new_asid),
+ TP_STRUCT__entry(
+ __field(unsigned long, pc)
+ __field(u8, old_asid)
+ __field(u8, new_asid)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = vcpu->arch.pc;
+ __entry->old_asid = old_asid;
+ __entry->new_asid = new_asid;
+ ),
+
+ TP_printk("PC: 0x%08lx old: 0x%02x new: 0x%02x",
+ __entry->pc,
+ __entry->old_asid,
+ __entry->new_asid)
+);
+
+TRACE_EVENT(kvm_guestid_change,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int guestid),
+ TP_ARGS(vcpu, guestid),
+ TP_STRUCT__entry(
+ __field(unsigned int, guestid)
+ ),
+
+ TP_fast_assign(
+ __entry->guestid = guestid;
+ ),
+
+ TP_printk("GuestID: 0x%02x",
+ __entry->guestid)
+);
+
+TRACE_EVENT_FN(kvm_guest_mode_change,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+ TP_STRUCT__entry(
+ __field(unsigned long, epc)
+ __field(unsigned long, pc)
+ __field(unsigned long, badvaddr)
+ __field(unsigned int, status)
+ __field(unsigned int, cause)
+ ),
+
+ TP_fast_assign(
+ __entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0);
+ __entry->pc = vcpu->arch.pc;
+ __entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0);
+ __entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0);
+ __entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0);
+ ),
+
+ TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx",
+ __entry->epc,
+ __entry->pc,
+ __entry->status,
+ __entry->cause,
+ __entry->badvaddr),
+
+ kvm_guest_mode_change_trace_reg,
+ kvm_guest_mode_change_trace_unreg
+);
+
+#endif /* _TRACE_KVM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
new file mode 100644
index 000000000..0788c00d7
--- /dev/null
+++ b/arch/mips/kvm/trap_emul.c
@@ -0,0 +1,1306 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/log2.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
+
+#include "interrupt.h"
+
+static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
+{
+ gpa_t gpa;
+ gva_t kseg = KSEGX(gva);
+ gva_t gkseg = KVM_GUEST_KSEGX(gva);
+
+ if ((kseg == CKSEG0) || (kseg == CKSEG1))
+ gpa = CPHYSADDR(gva);
+ else if (gkseg == KVM_GUEST_KSEG0)
+ gpa = KVM_GUEST_CPHYSADDR(gva);
+ else {
+ kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
+ kvm_mips_dump_host_tlbs();
+ gpa = KVM_INVALID_ADDR;
+ }
+
+ kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
+
+ return gpa;
+}
+
+static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu)
+{
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ u32 inst = 0;
+
+ /*
+ * Fetch the instruction.
+ */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ kvm_get_badinstr(opc, vcpu, &inst);
+
+ kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
+ exccode, opc, inst, badvaddr,
+ kvm_read_c0_guest_status(vcpu->arch.cop0));
+ kvm_arch_vcpu_dump_regs(vcpu);
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+}
+
+static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
+ /* FPU Unusable */
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
+ (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
+ /*
+ * Unusable/no FPU in guest:
+ * deliver guest COP1 Unusable Exception
+ */
+ er = kvm_mips_emulate_fpu_exc(cause, opc, vcpu);
+ } else {
+ /* Restore FPU state */
+ kvm_own_fpu(vcpu);
+ er = EMULATE_DONE;
+ }
+ } else {
+ er = kvm_mips_emulate_inst(cause, opc, vcpu);
+ }
+
+ switch (er) {
+ case EMULATE_DONE:
+ ret = RESUME_GUEST;
+ break;
+
+ case EMULATE_FAIL:
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ break;
+
+ case EMULATE_WAIT:
+ vcpu->run->exit_reason = KVM_EXIT_INTR;
+ ret = RESUME_HOST;
+ break;
+
+ case EMULATE_HYPERCALL:
+ ret = kvm_mips_handle_hypcall(vcpu);
+ break;
+
+ default:
+ BUG();
+ }
+ return ret;
+}
+
+static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er;
+ union mips_instruction inst;
+ int err;
+
+ /* A code fetch fault doesn't count as an MMIO */
+ if (kvm_is_ifetch_fault(&vcpu->arch)) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+ }
+
+ /* Fetch the instruction. */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ err = kvm_get_badinstr(opc, vcpu, &inst.word);
+ if (err) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+ }
+
+ /* Emulate the load */
+ er = kvm_mips_emulate_load(inst, cause, vcpu);
+ if (er == EMULATE_FAIL) {
+ kvm_err("Emulate load from MMIO space failed\n");
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ } else {
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
+ }
+ return RESUME_HOST;
+}
+
+static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er;
+ union mips_instruction inst;
+ int err;
+
+ /* Fetch the instruction. */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ err = kvm_get_badinstr(opc, vcpu, &inst.word);
+ if (err) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+ }
+
+ /* Emulate the store */
+ er = kvm_mips_emulate_store(inst, cause, vcpu);
+ if (er == EMULATE_FAIL) {
+ kvm_err("Emulate store to MMIO space failed\n");
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ } else {
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
+ }
+ return RESUME_HOST;
+}
+
+static int kvm_mips_bad_access(u32 cause, u32 *opc,
+ struct kvm_vcpu *vcpu, bool store)
+{
+ if (store)
+ return kvm_mips_bad_store(cause, opc, vcpu);
+ else
+ return kvm_mips_bad_load(cause, opc, vcpu);
+}
+
+static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ struct kvm_mips_tlb *tlb;
+ unsigned long entryhi;
+ int index;
+
+ if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+ || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+ /*
+ * First find the mapping in the guest TLB. If the failure to
+ * write was due to the guest TLB, it should be up to the guest
+ * to handle it.
+ */
+ entryhi = (badvaddr & VPN2_MASK) |
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
+ index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+
+ /*
+ * These should never happen.
+ * They would indicate stale host TLB entries.
+ */
+ if (unlikely(index < 0)) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+ }
+ tlb = vcpu->arch.guest_tlb + index;
+ if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+ }
+
+ /*
+ * Guest entry not dirty? That would explain the TLB modified
+ * exception. Relay that on to the guest so it can handle it.
+ */
+ if (!TLB_IS_DIRTY(*tlb, badvaddr)) {
+ kvm_mips_emulate_tlbmod(cause, opc, vcpu);
+ return RESUME_GUEST;
+ }
+
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
+ true))
+ /* Not writable, needs handling as MMIO */
+ return kvm_mips_bad_store(cause, opc, vcpu);
+ return RESUME_GUEST;
+ } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+ if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
+ /* Not writable, needs handling as MMIO */
+ return kvm_mips_bad_store(cause, opc, vcpu);
+ return RESUME_GUEST;
+ } else {
+ /* host kernel addresses are all handled as MMIO */
+ return kvm_mips_bad_store(cause, opc, vcpu);
+ }
+}
+
+static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
+{
+ struct kvm_run *run = vcpu->run;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+ && KVM_GUEST_KERNEL_MODE(vcpu)) {
+ if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+ || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+ kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
+ store ? "ST" : "LD", cause, opc, badvaddr);
+
+ /*
+ * User Address (UA) fault, this could happen if
+ * (1) TLB entry not present/valid in both Guest and shadow host
+ * TLBs, in this case we pass on the fault to the guest
+ * kernel and let it handle it.
+ * (2) TLB entry is present in the Guest TLB but not in the
+ * shadow, in this case we inject the TLB from the Guest TLB
+ * into the shadow host TLB
+ */
+
+ er = kvm_mips_handle_tlbmiss(cause, opc, vcpu, store);
+ if (er == EMULATE_DONE)
+ ret = RESUME_GUEST;
+ else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+ /*
+ * All KSEG0 faults are handled by KVM, as the guest kernel does
+ * not expect to ever get them
+ */
+ if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
+ ret = kvm_mips_bad_access(cause, opc, vcpu, store);
+ } else if (KVM_GUEST_KERNEL_MODE(vcpu)
+ && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
+ /*
+ * With EVA we may get a TLB exception instead of an address
+ * error when the guest performs MMIO to KSeg1 addresses.
+ */
+ ret = kvm_mips_bad_access(cause, opc, vcpu, store);
+ } else {
+ kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
+ store ? "ST" : "LD", cause, opc, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ kvm_arch_vcpu_dump_regs(vcpu);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
+{
+ return kvm_trap_emul_handle_tlb_miss(vcpu, true);
+}
+
+static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+{
+ return kvm_trap_emul_handle_tlb_miss(vcpu, false);
+}
+
+static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
+{
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ int ret = RESUME_GUEST;
+
+ if (KVM_GUEST_KERNEL_MODE(vcpu)
+ && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
+ ret = kvm_mips_bad_store(cause, opc, vcpu);
+ } else {
+ kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
+{
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ int ret = RESUME_GUEST;
+
+ if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
+ ret = kvm_mips_bad_load(cause, opc, vcpu);
+ } else {
+ kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
+{
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_syscall(cause, opc, vcpu);
+ if (er == EMULATE_DONE)
+ ret = RESUME_GUEST;
+ else {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
+{
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_handle_ri(cause, opc, vcpu);
+ if (er == EMULATE_DONE)
+ ret = RESUME_GUEST;
+ else {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
+{
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_bp_exc(cause, opc, vcpu);
+ if (er == EMULATE_DONE)
+ ret = RESUME_GUEST;
+ else {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
+{
+ u32 __user *opc = (u32 __user *)vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_trap_exc(cause, opc, vcpu);
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
+{
+ u32 __user *opc = (u32 __user *)vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_msafpe_exc(cause, opc, vcpu);
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
+{
+ u32 __user *opc = (u32 __user *)vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_fpe_exc(cause, opc, vcpu);
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+/**
+ * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
+ * @vcpu: Virtual CPU context.
+ *
+ * Handle when the guest attempts to use MSA when it is disabled.
+ */
+static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
+ (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
+ /*
+ * No MSA in guest, or FPU enabled and not in FR=1 mode,
+ * guest reserved instruction exception
+ */
+ er = kvm_mips_emulate_ri_exc(cause, opc, vcpu);
+ } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
+ /* MSA disabled by guest, guest MSA disabled exception */
+ er = kvm_mips_emulate_msadis_exc(cause, opc, vcpu);
+ } else {
+ /* Restore MSA/FPU state */
+ kvm_own_msa(vcpu);
+ er = EMULATE_DONE;
+ }
+
+ switch (er) {
+ case EMULATE_DONE:
+ ret = RESUME_GUEST;
+ break;
+
+ case EMULATE_FAIL:
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ break;
+
+ default:
+ BUG();
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_hardware_enable(void)
+{
+ return 0;
+}
+
+static void kvm_trap_emul_hardware_disable(void)
+{
+}
+
+static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext)
+{
+ int r;
+
+ switch (ext) {
+ case KVM_CAP_MIPS_TE:
+ r = 1;
+ break;
+ case KVM_CAP_IOEVENTFD:
+ r = 1;
+ break;
+ default:
+ r = 0;
+ break;
+ }
+
+ return r;
+}
+
+static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
+ struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
+
+ /*
+ * Allocate GVA -> HPA page tables.
+ * MIPS doesn't use the mm_struct pointer argument.
+ */
+ kern_mm->pgd = pgd_alloc(kern_mm);
+ if (!kern_mm->pgd)
+ return -ENOMEM;
+
+ user_mm->pgd = pgd_alloc(user_mm);
+ if (!user_mm->pgd) {
+ pgd_free(kern_mm, kern_mm->pgd);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
+{
+ /* Don't free host kernel page tables copied from init_mm.pgd */
+ const unsigned long end = 0x80000000;
+ unsigned long pgd_va, pud_va, pmd_va;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ int i, j, k;
+
+ for (i = 0; i < USER_PTRS_PER_PGD; i++) {
+ if (pgd_none(pgd[i]))
+ continue;
+
+ pgd_va = (unsigned long)i << PGDIR_SHIFT;
+ if (pgd_va >= end)
+ break;
+ p4d = p4d_offset(pgd, 0);
+ pud = pud_offset(p4d + i, 0);
+ for (j = 0; j < PTRS_PER_PUD; j++) {
+ if (pud_none(pud[j]))
+ continue;
+
+ pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT);
+ if (pud_va >= end)
+ break;
+ pmd = pmd_offset(pud + j, 0);
+ for (k = 0; k < PTRS_PER_PMD; k++) {
+ if (pmd_none(pmd[k]))
+ continue;
+
+ pmd_va = pud_va | (k << PMD_SHIFT);
+ if (pmd_va >= end)
+ break;
+ pte = pte_offset_kernel(pmd + k, 0);
+ pte_free_kernel(NULL, pte);
+ }
+ pmd_free(NULL, pmd);
+ }
+ pud_free(NULL, pud);
+ }
+ pgd_free(NULL, pgd);
+}
+
+static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd);
+ kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd);
+}
+
+static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ u32 config, config1;
+ int vcpu_id = vcpu->vcpu_id;
+
+ /* Start off the timer at 100 MHz */
+ kvm_mips_init_count(vcpu, 100*1000*1000);
+
+ /*
+ * Arch specific stuff, set up config registers properly so that the
+ * guest will come up as expected
+ */
+#ifndef CONFIG_CPU_MIPSR6
+ /* r2-r5, simulate a MIPS 24kc */
+ kvm_write_c0_guest_prid(cop0, 0x00019300);
+#else
+ /* r6+, simulate a generic QEMU machine */
+ kvm_write_c0_guest_prid(cop0, 0x00010000);
+#endif
+ /*
+ * Have config1, Cacheable, noncoherent, write-back, write allocate.
+ * Endianness, arch revision & virtually tagged icache should match
+ * host.
+ */
+ config = read_c0_config() & MIPS_CONF_AR;
+ config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ config |= CONF_BE;
+#endif
+ if (cpu_has_vtag_icache)
+ config |= MIPS_CONF_VI;
+ kvm_write_c0_guest_config(cop0, config);
+
+ /* Read the cache characteristics from the host Config1 Register */
+ config1 = (read_c0_config1() & ~0x7f);
+
+ /* DCache line size not correctly reported in Config1 on Octeon CPUs */
+ if (cpu_dcache_line_size()) {
+ config1 &= ~MIPS_CONF1_DL;
+ config1 |= ((ilog2(cpu_dcache_line_size()) - 1) <<
+ MIPS_CONF1_DL_SHF) & MIPS_CONF1_DL;
+ }
+
+ /* Set up MMU size */
+ config1 &= ~(0x3f << 25);
+ config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
+
+ /* We unset some bits that we aren't emulating */
+ config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
+ MIPS_CONF1_WR | MIPS_CONF1_CA);
+ kvm_write_c0_guest_config1(cop0, config1);
+
+ /* Have config3, no tertiary/secondary caches implemented */
+ kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
+ /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
+
+ /* Have config4, UserLocal */
+ kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
+
+ /* Have config5 */
+ kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
+
+ /* No config6 */
+ kvm_write_c0_guest_config5(cop0, 0);
+
+ /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
+ kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
+
+ /* Status */
+ kvm_write_c0_guest_status(cop0, ST0_BEV | ST0_ERL);
+
+ /*
+ * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
+ */
+ kvm_write_c0_guest_intctl(cop0, 0xFC000000);
+
+ /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
+ kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
+ (vcpu_id & MIPS_EBASE_CPUNUM));
+
+ /* Put PC at guest reset vector */
+ vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000);
+
+ return 0;
+}
+
+static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm)
+{
+ /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */
+ kvm_flush_remote_tlbs(kvm);
+}
+
+static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *slot)
+{
+ kvm_trap_emul_flush_shadow_all(kvm);
+}
+
+static u64 kvm_trap_emul_get_one_regs[] = {
+ KVM_REG_MIPS_CP0_INDEX,
+ KVM_REG_MIPS_CP0_ENTRYLO0,
+ KVM_REG_MIPS_CP0_ENTRYLO1,
+ KVM_REG_MIPS_CP0_CONTEXT,
+ KVM_REG_MIPS_CP0_USERLOCAL,
+ KVM_REG_MIPS_CP0_PAGEMASK,
+ KVM_REG_MIPS_CP0_WIRED,
+ KVM_REG_MIPS_CP0_HWRENA,
+ KVM_REG_MIPS_CP0_BADVADDR,
+ KVM_REG_MIPS_CP0_COUNT,
+ KVM_REG_MIPS_CP0_ENTRYHI,
+ KVM_REG_MIPS_CP0_COMPARE,
+ KVM_REG_MIPS_CP0_STATUS,
+ KVM_REG_MIPS_CP0_INTCTL,
+ KVM_REG_MIPS_CP0_CAUSE,
+ KVM_REG_MIPS_CP0_EPC,
+ KVM_REG_MIPS_CP0_PRID,
+ KVM_REG_MIPS_CP0_EBASE,
+ KVM_REG_MIPS_CP0_CONFIG,
+ KVM_REG_MIPS_CP0_CONFIG1,
+ KVM_REG_MIPS_CP0_CONFIG2,
+ KVM_REG_MIPS_CP0_CONFIG3,
+ KVM_REG_MIPS_CP0_CONFIG4,
+ KVM_REG_MIPS_CP0_CONFIG5,
+ KVM_REG_MIPS_CP0_CONFIG7,
+ KVM_REG_MIPS_CP0_ERROREPC,
+ KVM_REG_MIPS_CP0_KSCRATCH1,
+ KVM_REG_MIPS_CP0_KSCRATCH2,
+ KVM_REG_MIPS_CP0_KSCRATCH3,
+ KVM_REG_MIPS_CP0_KSCRATCH4,
+ KVM_REG_MIPS_CP0_KSCRATCH5,
+ KVM_REG_MIPS_CP0_KSCRATCH6,
+
+ KVM_REG_MIPS_COUNT_CTL,
+ KVM_REG_MIPS_COUNT_RESUME,
+ KVM_REG_MIPS_COUNT_HZ,
+};
+
+static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
+{
+ return ARRAY_SIZE(kvm_trap_emul_get_one_regs);
+}
+
+static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
+ u64 __user *indices)
+{
+ if (copy_to_user(indices, kvm_trap_emul_get_one_regs,
+ sizeof(kvm_trap_emul_get_one_regs)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs);
+
+ return 0;
+}
+
+static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg,
+ s64 *v)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+ switch (reg->id) {
+ case KVM_REG_MIPS_CP0_INDEX:
+ *v = (long)kvm_read_c0_guest_index(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYLO0:
+ *v = kvm_read_c0_guest_entrylo0(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYLO1:
+ *v = kvm_read_c0_guest_entrylo1(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CONTEXT:
+ *v = (long)kvm_read_c0_guest_context(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_USERLOCAL:
+ *v = (long)kvm_read_c0_guest_userlocal(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_PAGEMASK:
+ *v = (long)kvm_read_c0_guest_pagemask(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_WIRED:
+ *v = (long)kvm_read_c0_guest_wired(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_HWRENA:
+ *v = (long)kvm_read_c0_guest_hwrena(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_BADVADDR:
+ *v = (long)kvm_read_c0_guest_badvaddr(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYHI:
+ *v = (long)kvm_read_c0_guest_entryhi(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_COMPARE:
+ *v = (long)kvm_read_c0_guest_compare(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_STATUS:
+ *v = (long)kvm_read_c0_guest_status(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_INTCTL:
+ *v = (long)kvm_read_c0_guest_intctl(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CAUSE:
+ *v = (long)kvm_read_c0_guest_cause(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_EPC:
+ *v = (long)kvm_read_c0_guest_epc(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_PRID:
+ *v = (long)kvm_read_c0_guest_prid(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_EBASE:
+ *v = (long)kvm_read_c0_guest_ebase(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG:
+ *v = (long)kvm_read_c0_guest_config(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG1:
+ *v = (long)kvm_read_c0_guest_config1(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG2:
+ *v = (long)kvm_read_c0_guest_config2(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG3:
+ *v = (long)kvm_read_c0_guest_config3(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG4:
+ *v = (long)kvm_read_c0_guest_config4(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG5:
+ *v = (long)kvm_read_c0_guest_config5(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG7:
+ *v = (long)kvm_read_c0_guest_config7(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_COUNT:
+ *v = kvm_mips_read_count(vcpu);
+ break;
+ case KVM_REG_MIPS_COUNT_CTL:
+ *v = vcpu->arch.count_ctl;
+ break;
+ case KVM_REG_MIPS_COUNT_RESUME:
+ *v = ktime_to_ns(vcpu->arch.count_resume);
+ break;
+ case KVM_REG_MIPS_COUNT_HZ:
+ *v = vcpu->arch.count_hz;
+ break;
+ case KVM_REG_MIPS_CP0_ERROREPC:
+ *v = (long)kvm_read_c0_guest_errorepc(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_KSCRATCH1:
+ *v = (long)kvm_read_c0_guest_kscratch1(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_KSCRATCH2:
+ *v = (long)kvm_read_c0_guest_kscratch2(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_KSCRATCH3:
+ *v = (long)kvm_read_c0_guest_kscratch3(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_KSCRATCH4:
+ *v = (long)kvm_read_c0_guest_kscratch4(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_KSCRATCH5:
+ *v = (long)kvm_read_c0_guest_kscratch5(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_KSCRATCH6:
+ *v = (long)kvm_read_c0_guest_kscratch6(cop0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg,
+ s64 v)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ int ret = 0;
+ unsigned int cur, change;
+
+ switch (reg->id) {
+ case KVM_REG_MIPS_CP0_INDEX:
+ kvm_write_c0_guest_index(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYLO0:
+ kvm_write_c0_guest_entrylo0(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYLO1:
+ kvm_write_c0_guest_entrylo1(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_CONTEXT:
+ kvm_write_c0_guest_context(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_USERLOCAL:
+ kvm_write_c0_guest_userlocal(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_PAGEMASK:
+ kvm_write_c0_guest_pagemask(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_WIRED:
+ kvm_write_c0_guest_wired(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_HWRENA:
+ kvm_write_c0_guest_hwrena(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_BADVADDR:
+ kvm_write_c0_guest_badvaddr(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYHI:
+ kvm_write_c0_guest_entryhi(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_STATUS:
+ kvm_write_c0_guest_status(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_INTCTL:
+ /* No VInt, so no VS, read-only for now */
+ break;
+ case KVM_REG_MIPS_CP0_EPC:
+ kvm_write_c0_guest_epc(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_PRID:
+ kvm_write_c0_guest_prid(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_EBASE:
+ /*
+ * Allow core number to be written, but the exception base must
+ * remain in guest KSeg0.
+ */
+ kvm_change_c0_guest_ebase(cop0, 0x1ffff000 | MIPS_EBASE_CPUNUM,
+ v);
+ break;
+ case KVM_REG_MIPS_CP0_COUNT:
+ kvm_mips_write_count(vcpu, v);
+ break;
+ case KVM_REG_MIPS_CP0_COMPARE:
+ kvm_mips_write_compare(vcpu, v, false);
+ break;
+ case KVM_REG_MIPS_CP0_CAUSE:
+ /*
+ * If the timer is stopped or started (DC bit) it must look
+ * atomic with changes to the interrupt pending bits (TI, IRQ5).
+ * A timer interrupt should not happen in between.
+ */
+ if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
+ if (v & CAUSEF_DC) {
+ /* disable timer first */
+ kvm_mips_count_disable_cause(vcpu);
+ kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
+ v);
+ } else {
+ /* enable timer last */
+ kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
+ v);
+ kvm_mips_count_enable_cause(vcpu);
+ }
+ } else {
+ kvm_write_c0_guest_cause(cop0, v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG:
+ /* read-only for now */
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG1:
+ cur = kvm_read_c0_guest_config1(cop0);
+ change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_c0_guest_config1(cop0, v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG2:
+ /* read-only for now */
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG3:
+ cur = kvm_read_c0_guest_config3(cop0);
+ change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_c0_guest_config3(cop0, v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG4:
+ cur = kvm_read_c0_guest_config4(cop0);
+ change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_c0_guest_config4(cop0, v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG5:
+ cur = kvm_read_c0_guest_config5(cop0);
+ change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_c0_guest_config5(cop0, v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG7:
+ /* writes ignored */
+ break;
+ case KVM_REG_MIPS_COUNT_CTL:
+ ret = kvm_mips_set_count_ctl(vcpu, v);
+ break;
+ case KVM_REG_MIPS_COUNT_RESUME:
+ ret = kvm_mips_set_count_resume(vcpu, v);
+ break;
+ case KVM_REG_MIPS_COUNT_HZ:
+ ret = kvm_mips_set_count_hz(vcpu, v);
+ break;
+ case KVM_REG_MIPS_CP0_ERROREPC:
+ kvm_write_c0_guest_errorepc(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_KSCRATCH1:
+ kvm_write_c0_guest_kscratch1(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_KSCRATCH2:
+ kvm_write_c0_guest_kscratch2(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_KSCRATCH3:
+ kvm_write_c0_guest_kscratch3(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_KSCRATCH4:
+ kvm_write_c0_guest_kscratch4(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_KSCRATCH5:
+ kvm_write_c0_guest_kscratch5(cop0, v);
+ break;
+ case KVM_REG_MIPS_CP0_KSCRATCH6:
+ kvm_write_c0_guest_kscratch6(cop0, v);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
+ struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
+ struct mm_struct *mm;
+
+ /*
+ * Were we in guest context? If so, restore the appropriate ASID based
+ * on the mode of the Guest (Kernel/User).
+ */
+ if (current->flags & PF_VCPU) {
+ mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
+ check_switch_mmu_context(mm);
+ kvm_mips_suspend_mm(cpu);
+ ehb();
+ }
+
+ return 0;
+}
+
+static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
+{
+ kvm_lose_fpu(vcpu);
+
+ if (current->flags & PF_VCPU) {
+ /* Restore normal Linux process memory map */
+ check_switch_mmu_context(current->mm);
+ kvm_mips_resume_mm(cpu);
+ ehb();
+ }
+
+ return 0;
+}
+
+static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
+ bool reload_asid)
+{
+ struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
+ struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
+ struct mm_struct *mm;
+ int i;
+
+ if (likely(!kvm_request_pending(vcpu)))
+ return;
+
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
+ /*
+ * Both kernel & user GVA mappings must be invalidated. The
+ * caller is just about to check whether the ASID is stale
+ * anyway so no need to reload it here.
+ */
+ kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
+ kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
+ for_each_possible_cpu(i) {
+ set_cpu_context(i, kern_mm, 0);
+ set_cpu_context(i, user_mm, 0);
+ }
+
+ /* Generate new ASID for current mode */
+ if (reload_asid) {
+ mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
+ get_new_mmu_context(mm);
+ htw_stop();
+ write_c0_entryhi(cpu_asid(cpu, mm));
+ TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
+ htw_start();
+ }
+ }
+}
+
+/**
+ * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
+ * @vcpu: VCPU pointer.
+ *
+ * Call before a GVA space access outside of guest mode, to ensure that
+ * asynchronous TLB flush requests are handled or delayed until completion of
+ * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
+ *
+ * Should be called with IRQs already enabled.
+ */
+void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu)
+{
+ /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
+ WARN_ON_ONCE(irqs_disabled());
+
+ /*
+ * The caller is about to access the GVA space, so we set the mode to
+ * force TLB flush requests to send an IPI, and also disable IRQs to
+ * delay IPI handling until kvm_trap_emul_gva_lockless_end().
+ */
+ local_irq_disable();
+
+ /*
+ * Make sure the read of VCPU requests is not reordered ahead of the
+ * write to vcpu->mode, or we could miss a TLB flush request while
+ * the requester sees the VCPU as outside of guest mode and not needing
+ * an IPI.
+ */
+ smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
+
+ /*
+ * If a TLB flush has been requested (potentially while
+ * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
+ * before accessing the GVA space, and be sure to reload the ASID if
+ * necessary as it'll be immediately used.
+ *
+ * TLB flush requests after this check will trigger an IPI due to the
+ * mode change above, which will be delayed due to IRQs disabled.
+ */
+ kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true);
+}
+
+/**
+ * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
+ * @vcpu: VCPU pointer.
+ *
+ * Called after a GVA space access outside of guest mode. Should have a matching
+ * call to kvm_trap_emul_gva_lockless_begin().
+ */
+void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Make sure the write to vcpu->mode is not reordered in front of GVA
+ * accesses, or a TLB flush requester may not think it necessary to send
+ * an IPI.
+ */
+ smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
+
+ /*
+ * Now that the access to GVA space is complete, its safe for pending
+ * TLB flush request IPIs to be handled (which indicates completion).
+ */
+ local_irq_enable();
+}
+
+static void kvm_trap_emul_vcpu_reenter(struct kvm_vcpu *vcpu)
+{
+ struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
+ struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
+ struct mm_struct *mm;
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ int i, cpu = smp_processor_id();
+ unsigned int gasid;
+
+ /*
+ * No need to reload ASID, IRQs are disabled already so there's no rush,
+ * and we'll check if we need to regenerate below anyway before
+ * re-entering the guest.
+ */
+ kvm_trap_emul_check_requests(vcpu, cpu, false);
+
+ if (KVM_GUEST_KERNEL_MODE(vcpu)) {
+ mm = kern_mm;
+ } else {
+ mm = user_mm;
+
+ /*
+ * Lazy host ASID regeneration / PT flush for guest user mode.
+ * If the guest ASID has changed since the last guest usermode
+ * execution, invalidate the stale TLB entries and flush GVA PT
+ * entries too.
+ */
+ gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
+ if (gasid != vcpu->arch.last_user_gasid) {
+ kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
+ for_each_possible_cpu(i)
+ set_cpu_context(i, user_mm, 0);
+ vcpu->arch.last_user_gasid = gasid;
+ }
+ }
+
+ /*
+ * Check if ASID is stale. This may happen due to a TLB flush request or
+ * a lazy user MM invalidation.
+ */
+ check_mmu_context(mm);
+}
+
+static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu)
+{
+ int cpu = smp_processor_id();
+ int r;
+
+ /* Check if we have any exceptions/interrupts pending */
+ kvm_mips_deliver_interrupts(vcpu,
+ kvm_read_c0_guest_cause(vcpu->arch.cop0));
+
+ kvm_trap_emul_vcpu_reenter(vcpu);
+
+ /*
+ * We use user accessors to access guest memory, but we don't want to
+ * invoke Linux page faulting.
+ */
+ pagefault_disable();
+
+ /* Disable hardware page table walking while in guest */
+ htw_stop();
+
+ /*
+ * While in guest context we're in the guest's address space, not the
+ * host process address space, so we need to be careful not to confuse
+ * e.g. cache management IPIs.
+ */
+ kvm_mips_suspend_mm(cpu);
+
+ r = vcpu->arch.vcpu_run(vcpu);
+
+ /* We may have migrated while handling guest exits */
+ cpu = smp_processor_id();
+
+ /* Restore normal Linux process memory map */
+ check_switch_mmu_context(current->mm);
+ kvm_mips_resume_mm(cpu);
+
+ htw_start();
+
+ pagefault_enable();
+
+ return r;
+}
+
+static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
+ /* exit handlers */
+ .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
+ .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
+ .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
+ .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
+ .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
+ .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
+ .handle_syscall = kvm_trap_emul_handle_syscall,
+ .handle_res_inst = kvm_trap_emul_handle_res_inst,
+ .handle_break = kvm_trap_emul_handle_break,
+ .handle_trap = kvm_trap_emul_handle_trap,
+ .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
+ .handle_fpe = kvm_trap_emul_handle_fpe,
+ .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
+ .handle_guest_exit = kvm_trap_emul_no_handler,
+
+ .hardware_enable = kvm_trap_emul_hardware_enable,
+ .hardware_disable = kvm_trap_emul_hardware_disable,
+ .check_extension = kvm_trap_emul_check_extension,
+ .vcpu_init = kvm_trap_emul_vcpu_init,
+ .vcpu_uninit = kvm_trap_emul_vcpu_uninit,
+ .vcpu_setup = kvm_trap_emul_vcpu_setup,
+ .flush_shadow_all = kvm_trap_emul_flush_shadow_all,
+ .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot,
+ .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
+ .queue_timer_int = kvm_mips_queue_timer_int_cb,
+ .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
+ .queue_io_int = kvm_mips_queue_io_int_cb,
+ .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
+ .irq_deliver = kvm_mips_irq_deliver_cb,
+ .irq_clear = kvm_mips_irq_clear_cb,
+ .num_regs = kvm_trap_emul_num_regs,
+ .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
+ .get_one_reg = kvm_trap_emul_get_one_reg,
+ .set_one_reg = kvm_trap_emul_set_one_reg,
+ .vcpu_load = kvm_trap_emul_vcpu_load,
+ .vcpu_put = kvm_trap_emul_vcpu_put,
+ .vcpu_run = kvm_trap_emul_vcpu_run,
+ .vcpu_reenter = kvm_trap_emul_vcpu_reenter,
+};
+
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
+{
+ *install_callbacks = &kvm_trap_emul_callbacks;
+ return 0;
+}
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
new file mode 100644
index 000000000..2ffbe9264
--- /dev/null
+++ b/arch/mips/kvm/vz.c
@@ -0,0 +1,3331 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Support for hardware virtualization extensions
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Yann Le Du <ledu@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/cacheops.h>
+#include <asm/cmpxchg.h>
+#include <asm/fpu.h>
+#include <asm/hazards.h>
+#include <asm/inst.h>
+#include <asm/mmu_context.h>
+#include <asm/r4kcache.h>
+#include <asm/time.h>
+#include <asm/tlb.h>
+#include <asm/tlbex.h>
+
+#include <linux/kvm_host.h>
+
+#include "interrupt.h"
+#ifdef CONFIG_CPU_LOONGSON64
+#include "loongson_regs.h"
+#endif
+
+#include "trace.h"
+
+/* Pointers to last VCPU loaded on each physical CPU */
+static struct kvm_vcpu *last_vcpu[NR_CPUS];
+/* Pointers to last VCPU executed on each physical CPU */
+static struct kvm_vcpu *last_exec_vcpu[NR_CPUS];
+
+/*
+ * Number of guest VTLB entries to use, so we can catch inconsistency between
+ * CPUs.
+ */
+static unsigned int kvm_vz_guest_vtlb_size;
+
+static inline long kvm_vz_read_gc0_ebase(void)
+{
+ if (sizeof(long) == 8 && cpu_has_ebase_wg)
+ return read_gc0_ebase_64();
+ else
+ return read_gc0_ebase();
+}
+
+static inline void kvm_vz_write_gc0_ebase(long v)
+{
+ /*
+ * First write with WG=1 to write upper bits, then write again in case
+ * WG should be left at 0.
+ * write_gc0_ebase_64() is no longer UNDEFINED since R6.
+ */
+ if (sizeof(long) == 8 &&
+ (cpu_has_mips64r6 || cpu_has_ebase_wg)) {
+ write_gc0_ebase_64(v | MIPS_EBASE_WG);
+ write_gc0_ebase_64(v);
+ } else {
+ write_gc0_ebase(v | MIPS_EBASE_WG);
+ write_gc0_ebase(v);
+ }
+}
+
+/*
+ * These Config bits may be writable by the guest:
+ * Config: [K23, KU] (!TLB), K0
+ * Config1: (none)
+ * Config2: [TU, SU] (impl)
+ * Config3: ISAOnExc
+ * Config4: FTLBPageSize
+ * Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR
+ */
+
+static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
+{
+ return CONF_CM_CMASK;
+}
+
+static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
+{
+ return MIPS_CONF3_ISA_OE;
+}
+
+static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
+{
+ /* no need to be exact */
+ return MIPS_CONF4_VFTLBPAGESIZE;
+}
+
+static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
+{
+ unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI;
+
+ /* Permit MSAEn changes if MSA supported and enabled */
+ if (kvm_mips_guest_has_msa(&vcpu->arch))
+ mask |= MIPS_CONF5_MSAEN;
+
+ /*
+ * Permit guest FPU mode changes if FPU is enabled and the relevant
+ * feature exists according to FIR register.
+ */
+ if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
+ if (cpu_has_ufr)
+ mask |= MIPS_CONF5_UFR;
+ if (cpu_has_fre)
+ mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
+ }
+
+ return mask;
+}
+
+static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu)
+{
+ return LOONGSON_CONF6_INTIMER | LOONGSON_CONF6_EXTIMER;
+}
+
+/*
+ * VZ optionally allows these additional Config bits to be written by root:
+ * Config: M, [MT]
+ * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
+ * Config2: M
+ * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
+ * VInt, SP, CDMM, MT, SM, TL]
+ * Config4: M, [VTLBSizeExt, MMUSizeExt]
+ * Config5: MRP
+ */
+
+static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
+{
+ return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
+}
+
+static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
+{
+ unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
+
+ /* Permit FPU to be present if FPU is supported */
+ if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
+ mask |= MIPS_CONF1_FP;
+
+ return mask;
+}
+
+static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
+{
+ return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
+}
+
+static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
+{
+ unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
+ MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
+
+ /* Permit MSA to be present if MSA is supported */
+ if (kvm_mips_guest_can_have_msa(&vcpu->arch))
+ mask |= MIPS_CONF3_MSA;
+
+ return mask;
+}
+
+static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
+{
+ return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
+}
+
+static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
+{
+ return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
+}
+
+static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu)
+{
+ return kvm_vz_config6_guest_wrmask(vcpu) |
+ LOONGSON_CONF6_SFBEN | LOONGSON_CONF6_FTLBDIS;
+}
+
+static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
+{
+ /* VZ guest has already converted gva to gpa */
+ return gva;
+}
+
+static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
+{
+ set_bit(priority, &vcpu->arch.pending_exceptions);
+ clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
+}
+
+static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
+{
+ clear_bit(priority, &vcpu->arch.pending_exceptions);
+ set_bit(priority, &vcpu->arch.pending_exceptions_clr);
+}
+
+static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+ /*
+ * timer expiry is asynchronous to vcpu execution therefore defer guest
+ * cp0 accesses
+ */
+ kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+ /*
+ * timer expiry is asynchronous to vcpu execution therefore defer guest
+ * cp0 accesses
+ */
+ kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq)
+{
+ int intr = (int)irq->irq;
+
+ /*
+ * interrupts are asynchronous to vcpu execution therefore defer guest
+ * cp0 accesses
+ */
+ kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr));
+}
+
+static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq)
+{
+ int intr = (int)irq->irq;
+
+ /*
+ * interrupts are asynchronous to vcpu execution therefore defer guest
+ * cp0 accesses
+ */
+ kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
+}
+
+static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+ u32 cause)
+{
+ u32 irq = (priority < MIPS_EXC_MAX) ?
+ kvm_priority_to_irq[priority] : 0;
+
+ switch (priority) {
+ case MIPS_EXC_INT_TIMER:
+ set_gc0_cause(C_TI);
+ break;
+
+ case MIPS_EXC_INT_IO_1:
+ case MIPS_EXC_INT_IO_2:
+ case MIPS_EXC_INT_IPI_1:
+ case MIPS_EXC_INT_IPI_2:
+ if (cpu_has_guestctl2)
+ set_c0_guestctl2(irq);
+ else
+ set_gc0_cause(irq);
+ break;
+
+ default:
+ break;
+ }
+
+ clear_bit(priority, &vcpu->arch.pending_exceptions);
+ return 1;
+}
+
+static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+ u32 cause)
+{
+ u32 irq = (priority < MIPS_EXC_MAX) ?
+ kvm_priority_to_irq[priority] : 0;
+
+ switch (priority) {
+ case MIPS_EXC_INT_TIMER:
+ /*
+ * Call to kvm_write_c0_guest_compare() clears Cause.TI in
+ * kvm_mips_emulate_CP0(). Explicitly clear irq associated with
+ * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not
+ * supported or if not using GuestCtl2 Hardware Clear.
+ */
+ if (cpu_has_guestctl2) {
+ if (!(read_c0_guestctl2() & (irq << 14)))
+ clear_c0_guestctl2(irq);
+ } else {
+ clear_gc0_cause(irq);
+ }
+ break;
+
+ case MIPS_EXC_INT_IO_1:
+ case MIPS_EXC_INT_IO_2:
+ case MIPS_EXC_INT_IPI_1:
+ case MIPS_EXC_INT_IPI_2:
+ /* Clear GuestCtl2.VIP irq if not using Hardware Clear */
+ if (cpu_has_guestctl2) {
+ if (!(read_c0_guestctl2() & (irq << 14)))
+ clear_c0_guestctl2(irq);
+ } else {
+ clear_gc0_cause(irq);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
+ return 1;
+}
+
+/*
+ * VZ guest timer handling.
+ */
+
+/**
+ * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer.
+ * @vcpu: Virtual CPU.
+ *
+ * Returns: true if the VZ GTOffset & real guest CP0_Count should be used
+ * instead of software emulation of guest timer.
+ * false otherwise.
+ */
+static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu)
+{
+ if (kvm_mips_count_disabled(vcpu))
+ return false;
+
+ /* Chosen frequency must match real frequency */
+ if (mips_hpt_frequency != vcpu->arch.count_hz)
+ return false;
+
+ /* We don't support a CP0_GTOffset with fewer bits than CP0_Count */
+ if (current_cpu_data.gtoffset_mask != 0xffffffff)
+ return false;
+
+ return true;
+}
+
+/**
+ * _kvm_vz_restore_stimer() - Restore soft timer state.
+ * @vcpu: Virtual CPU.
+ * @compare: CP0_Compare register value, restored by caller.
+ * @cause: CP0_Cause register to restore.
+ *
+ * Restore VZ state relating to the soft timer. The hard timer can be enabled
+ * later.
+ */
+static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
+ u32 cause)
+{
+ /*
+ * Avoid spurious counter interrupts by setting Guest CP0_Count to just
+ * after Guest CP0_Compare.
+ */
+ write_c0_gtoffset(compare - read_c0_count());
+
+ back_to_back_c0_hazard();
+ write_gc0_cause(cause);
+}
+
+/**
+ * _kvm_vz_restore_htimer() - Restore hard timer state.
+ * @vcpu: Virtual CPU.
+ * @compare: CP0_Compare register value, restored by caller.
+ * @cause: CP0_Cause register to restore.
+ *
+ * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the
+ * value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause.
+ */
+static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
+ u32 compare, u32 cause)
+{
+ u32 start_count, after_count;
+ ktime_t freeze_time;
+ unsigned long flags;
+
+ /*
+ * Freeze the soft-timer and sync the guest CP0_Count with it. We do
+ * this with interrupts disabled to avoid latency.
+ */
+ local_irq_save(flags);
+ freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count);
+ write_c0_gtoffset(start_count - read_c0_count());
+ local_irq_restore(flags);
+
+ /* restore guest CP0_Cause, as TI may already be set */
+ back_to_back_c0_hazard();
+ write_gc0_cause(cause);
+
+ /*
+ * The above sequence isn't atomic and would result in lost timer
+ * interrupts if we're not careful. Detect if a timer interrupt is due
+ * and assert it.
+ */
+ back_to_back_c0_hazard();
+ after_count = read_gc0_count();
+ if (after_count - start_count > compare - start_count - 1)
+ kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+/**
+ * kvm_vz_restore_timer() - Restore timer state.
+ * @vcpu: Virtual CPU.
+ *
+ * Restore soft timer state from saved context.
+ */
+static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ u32 cause, compare;
+
+ compare = kvm_read_sw_gc0_compare(cop0);
+ cause = kvm_read_sw_gc0_cause(cop0);
+
+ write_gc0_compare(compare);
+ _kvm_vz_restore_stimer(vcpu, compare, cause);
+}
+
+/**
+ * kvm_vz_acquire_htimer() - Switch to hard timer state.
+ * @vcpu: Virtual CPU.
+ *
+ * Restore hard timer state on top of existing soft timer state if possible.
+ *
+ * Since hard timer won't remain active over preemption, preemption should be
+ * disabled by the caller.
+ */
+void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
+{
+ u32 gctl0;
+
+ gctl0 = read_c0_guestctl0();
+ if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) {
+ /* enable guest access to hard timer */
+ write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT);
+
+ _kvm_vz_restore_htimer(vcpu, read_gc0_compare(),
+ read_gc0_cause());
+ }
+}
+
+/**
+ * _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
+ * @vcpu: Virtual CPU.
+ * @compare: Pointer to write compare value to.
+ * @cause: Pointer to write cause value to.
+ *
+ * Save VZ guest timer state and switch to software emulation of guest CP0
+ * timer. The hard timer must already be in use, so preemption should be
+ * disabled.
+ */
+static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
+ u32 *out_compare, u32 *out_cause)
+{
+ u32 cause, compare, before_count, end_count;
+ ktime_t before_time;
+
+ compare = read_gc0_compare();
+ *out_compare = compare;
+
+ before_time = ktime_get();
+
+ /*
+ * Record the CP0_Count *prior* to saving CP0_Cause, so we have a time
+ * at which no pending timer interrupt is missing.
+ */
+ before_count = read_gc0_count();
+ back_to_back_c0_hazard();
+ cause = read_gc0_cause();
+ *out_cause = cause;
+
+ /*
+ * Record a final CP0_Count which we will transfer to the soft-timer.
+ * This is recorded *after* saving CP0_Cause, so we don't get any timer
+ * interrupts from just after the final CP0_Count point.
+ */
+ back_to_back_c0_hazard();
+ end_count = read_gc0_count();
+
+ /*
+ * The above sequence isn't atomic, so we could miss a timer interrupt
+ * between reading CP0_Cause and end_count. Detect and record any timer
+ * interrupt due between before_count and end_count.
+ */
+ if (end_count - before_count > compare - before_count - 1)
+ kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+
+ /*
+ * Restore soft-timer, ignoring a small amount of negative drift due to
+ * delay between freeze_hrtimer and setting CP0_GTOffset.
+ */
+ kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000);
+}
+
+/**
+ * kvm_vz_save_timer() - Save guest timer state.
+ * @vcpu: Virtual CPU.
+ *
+ * Save VZ guest timer state and switch to soft guest timer if hard timer was in
+ * use.
+ */
+static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ u32 gctl0, compare, cause;
+
+ gctl0 = read_c0_guestctl0();
+ if (gctl0 & MIPS_GCTL0_GT) {
+ /* disable guest use of hard timer */
+ write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
+
+ /* save hard timer state */
+ _kvm_vz_save_htimer(vcpu, &compare, &cause);
+ } else {
+ compare = read_gc0_compare();
+ cause = read_gc0_cause();
+ }
+
+ /* save timer-related state to VCPU context */
+ kvm_write_sw_gc0_cause(cop0, cause);
+ kvm_write_sw_gc0_compare(cop0, compare);
+}
+
+/**
+ * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use.
+ * @vcpu: Virtual CPU.
+ *
+ * Transfers the state of the hard guest timer to the soft guest timer, leaving
+ * guest state intact so it can continue to be used with the soft timer.
+ */
+void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu)
+{
+ u32 gctl0, compare, cause;
+
+ preempt_disable();
+ gctl0 = read_c0_guestctl0();
+ if (gctl0 & MIPS_GCTL0_GT) {
+ /* disable guest use of timer */
+ write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
+
+ /* switch to soft timer */
+ _kvm_vz_save_htimer(vcpu, &compare, &cause);
+
+ /* leave soft timer in usable state */
+ _kvm_vz_restore_stimer(vcpu, compare, cause);
+ }
+ preempt_enable();
+}
+
+/**
+ * is_eva_access() - Find whether an instruction is an EVA memory accessor.
+ * @inst: 32-bit instruction encoding.
+ *
+ * Finds whether @inst encodes an EVA memory access instruction, which would
+ * indicate that emulation of it should access the user mode address space
+ * instead of the kernel mode address space. This matters for MUSUK segments
+ * which are TLB mapped for user mode but unmapped for kernel mode.
+ *
+ * Returns: Whether @inst encodes an EVA accessor instruction.
+ */
+static bool is_eva_access(union mips_instruction inst)
+{
+ if (inst.spec3_format.opcode != spec3_op)
+ return false;
+
+ switch (inst.spec3_format.func) {
+ case lwle_op:
+ case lwre_op:
+ case cachee_op:
+ case sbe_op:
+ case she_op:
+ case sce_op:
+ case swe_op:
+ case swle_op:
+ case swre_op:
+ case prefe_op:
+ case lbue_op:
+ case lhue_op:
+ case lbe_op:
+ case lhe_op:
+ case lle_op:
+ case lwe_op:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * is_eva_am_mapped() - Find whether an access mode is mapped.
+ * @vcpu: KVM VCPU state.
+ * @am: 3-bit encoded access mode.
+ * @eu: Segment becomes unmapped and uncached when Status.ERL=1.
+ *
+ * Decode @am to find whether it encodes a mapped segment for the current VCPU
+ * state. Where necessary @eu and the actual instruction causing the fault are
+ * taken into account to make the decision.
+ *
+ * Returns: Whether the VCPU faulted on a TLB mapped address.
+ */
+static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
+{
+ u32 am_lookup;
+ int err;
+
+ /*
+ * Interpret access control mode. We assume address errors will already
+ * have been caught by the guest, leaving us with:
+ * AM UM SM KM 31..24 23..16
+ * UK 0 000 Unm 0 0
+ * MK 1 001 TLB 1
+ * MSK 2 010 TLB TLB 1
+ * MUSK 3 011 TLB TLB TLB 1
+ * MUSUK 4 100 TLB TLB Unm 0 1
+ * USK 5 101 Unm Unm 0 0
+ * - 6 110 0 0
+ * UUSK 7 111 Unm Unm Unm 0 0
+ *
+ * We shift a magic value by AM across the sign bit to find if always
+ * TLB mapped, and if not shift by 8 again to find if it depends on KM.
+ */
+ am_lookup = 0x70080000 << am;
+ if ((s32)am_lookup < 0) {
+ /*
+ * MK, MSK, MUSK
+ * Always TLB mapped, unless SegCtl.EU && ERL
+ */
+ if (!eu || !(read_gc0_status() & ST0_ERL))
+ return true;
+ } else {
+ am_lookup <<= 8;
+ if ((s32)am_lookup < 0) {
+ union mips_instruction inst;
+ unsigned int status;
+ u32 *opc;
+
+ /*
+ * MUSUK
+ * TLB mapped if not in kernel mode
+ */
+ status = read_gc0_status();
+ if (!(status & (ST0_EXL | ST0_ERL)) &&
+ (status & ST0_KSU))
+ return true;
+ /*
+ * EVA access instructions in kernel
+ * mode access user address space.
+ */
+ opc = (u32 *)vcpu->arch.pc;
+ if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
+ opc += 1;
+ err = kvm_get_badinstr(opc, vcpu, &inst.word);
+ if (!err && is_eva_access(inst))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
+ * @vcpu: KVM VCPU state.
+ * @gva: Guest virtual address to convert.
+ * @gpa: Output guest physical address.
+ *
+ * Convert a guest virtual address (GVA) which is valid according to the guest
+ * context, to a guest physical address (GPA).
+ *
+ * Returns: 0 on success.
+ * -errno on failure.
+ */
+static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
+ unsigned long *gpa)
+{
+ u32 gva32 = gva;
+ unsigned long segctl;
+
+ if ((long)gva == (s32)gva32) {
+ /* Handle canonical 32-bit virtual address */
+ if (cpu_guest_has_segments) {
+ unsigned long mask, pa;
+
+ switch (gva32 >> 29) {
+ case 0:
+ case 1: /* CFG5 (1GB) */
+ segctl = read_gc0_segctl2() >> 16;
+ mask = (unsigned long)0xfc0000000ull;
+ break;
+ case 2:
+ case 3: /* CFG4 (1GB) */
+ segctl = read_gc0_segctl2();
+ mask = (unsigned long)0xfc0000000ull;
+ break;
+ case 4: /* CFG3 (512MB) */
+ segctl = read_gc0_segctl1() >> 16;
+ mask = (unsigned long)0xfe0000000ull;
+ break;
+ case 5: /* CFG2 (512MB) */
+ segctl = read_gc0_segctl1();
+ mask = (unsigned long)0xfe0000000ull;
+ break;
+ case 6: /* CFG1 (512MB) */
+ segctl = read_gc0_segctl0() >> 16;
+ mask = (unsigned long)0xfe0000000ull;
+ break;
+ case 7: /* CFG0 (512MB) */
+ segctl = read_gc0_segctl0();
+ mask = (unsigned long)0xfe0000000ull;
+ break;
+ default:
+ /*
+ * GCC 4.9 isn't smart enough to figure out that
+ * segctl and mask are always initialised.
+ */
+ unreachable();
+ }
+
+ if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
+ segctl & 0x0008))
+ goto tlb_mapped;
+
+ /* Unmapped, find guest physical address */
+ pa = (segctl << 20) & mask;
+ pa |= gva32 & ~mask;
+ *gpa = pa;
+ return 0;
+ } else if ((s32)gva32 < (s32)0xc0000000) {
+ /* legacy unmapped KSeg0 or KSeg1 */
+ *gpa = gva32 & 0x1fffffff;
+ return 0;
+ }
+#ifdef CONFIG_64BIT
+ } else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
+ /* XKPHYS */
+ if (cpu_guest_has_segments) {
+ /*
+ * Each of the 8 regions can be overridden by SegCtl2.XR
+ * to use SegCtl1.XAM.
+ */
+ segctl = read_gc0_segctl2();
+ if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
+ segctl = read_gc0_segctl1();
+ if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
+ 0))
+ goto tlb_mapped;
+ }
+
+ }
+ /*
+ * Traditionally fully unmapped.
+ * Bits 61:59 specify the CCA, which we can just mask off here.
+ * Bits 58:PABITS should be zero, but we shouldn't have got here
+ * if it wasn't.
+ */
+ *gpa = gva & 0x07ffffffffffffff;
+ return 0;
+#endif
+ }
+
+tlb_mapped:
+ return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
+}
+
+/**
+ * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
+ * @vcpu: KVM VCPU state.
+ * @badvaddr: Root BadVAddr.
+ * @gpa: Output guest physical address.
+ *
+ * VZ implementations are permitted to report guest virtual addresses (GVA) in
+ * BadVAddr on a root exception during guest execution, instead of the more
+ * convenient guest physical addresses (GPA). When we get a GVA, this function
+ * converts it to a GPA, taking into account guest segmentation and guest TLB
+ * state.
+ *
+ * Returns: 0 on success.
+ * -errno on failure.
+ */
+static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
+ unsigned long *gpa)
+{
+ unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
+ MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
+
+ /* If BadVAddr is GPA, then all is well in the world */
+ if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
+ *gpa = badvaddr;
+ return 0;
+ }
+
+ /* Otherwise we'd expect it to be GVA ... */
+ if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA,
+ "Unexpected gexccode %#x\n", gexccode))
+ return -EINVAL;
+
+ /* ... and we need to perform the GVA->GPA translation in software */
+ return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
+}
+
+static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
+{
+ u32 *opc = (u32 *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ u32 inst = 0;
+
+ /*
+ * Fetch the instruction.
+ */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ kvm_get_badinstr(opc, vcpu, &inst);
+
+ kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
+ exccode, opc, inst, badvaddr,
+ read_gc0_status());
+ kvm_arch_vcpu_dump_regs(vcpu);
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+}
+
+static unsigned long mips_process_maar(unsigned int op, unsigned long val)
+{
+ /* Mask off unused bits */
+ unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL;
+
+ if (read_gc0_pagegrain() & PG_ELPA)
+ mask |= 0x00ffffff00000000ull;
+ if (cpu_guest_has_mvh)
+ mask |= MIPS_MAAR_VH;
+
+ /* Set or clear VH */
+ if (op == mtc_op) {
+ /* clear VH */
+ val &= ~MIPS_MAAR_VH;
+ } else if (op == dmtc_op) {
+ /* set VH to match VL */
+ val &= ~MIPS_MAAR_VH;
+ if (val & MIPS_MAAR_VL)
+ val |= MIPS_MAAR_VH;
+ }
+
+ return val & mask;
+}
+
+static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+ val &= MIPS_MAARI_INDEX;
+ if (val == MIPS_MAARI_INDEX)
+ kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
+ else if (val < ARRAY_SIZE(vcpu->arch.maar))
+ kvm_write_sw_gc0_maari(cop0, val);
+}
+
+static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
+ u32 *opc, u32 cause,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ enum emulation_result er = EMULATE_DONE;
+ u32 rt, rd, sel;
+ unsigned long curr_pc;
+ unsigned long val;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ if (inst.co_format.co) {
+ switch (inst.co_format.func) {
+ case wait_op:
+ er = kvm_mips_emul_wait(vcpu);
+ break;
+ default:
+ er = EMULATE_FAIL;
+ }
+ } else {
+ rt = inst.c0r_format.rt;
+ rd = inst.c0r_format.rd;
+ sel = inst.c0r_format.sel;
+
+ switch (inst.c0r_format.rs) {
+ case dmfc_op:
+ case mfc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+ cop0->stat[rd][sel]++;
+#endif
+ if (rd == MIPS_CP0_COUNT &&
+ sel == 0) { /* Count */
+ val = kvm_mips_read_count(vcpu);
+ } else if (rd == MIPS_CP0_COMPARE &&
+ sel == 0) { /* Compare */
+ val = read_gc0_compare();
+ } else if (rd == MIPS_CP0_LLADDR &&
+ sel == 0) { /* LLAddr */
+ if (cpu_guest_has_rw_llb)
+ val = read_gc0_lladdr() &
+ MIPS_LLADDR_LLB;
+ else
+ val = 0;
+ } else if (rd == MIPS_CP0_LLADDR &&
+ sel == 1 && /* MAAR */
+ cpu_guest_has_maar &&
+ !cpu_guest_has_dyn_maar) {
+ /* MAARI must be in range */
+ BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
+ ARRAY_SIZE(vcpu->arch.maar));
+ val = vcpu->arch.maar[
+ kvm_read_sw_gc0_maari(cop0)];
+ } else if ((rd == MIPS_CP0_PRID &&
+ (sel == 0 || /* PRid */
+ sel == 2 || /* CDMMBase */
+ sel == 3)) || /* CMGCRBase */
+ (rd == MIPS_CP0_STATUS &&
+ (sel == 2 || /* SRSCtl */
+ sel == 3)) || /* SRSMap */
+ (rd == MIPS_CP0_CONFIG &&
+ (sel == 6 || /* Config6 */
+ sel == 7)) || /* Config7 */
+ (rd == MIPS_CP0_LLADDR &&
+ (sel == 2) && /* MAARI */
+ cpu_guest_has_maar &&
+ !cpu_guest_has_dyn_maar) ||
+ (rd == MIPS_CP0_ERRCTL &&
+ (sel == 0))) { /* ErrCtl */
+ val = cop0->reg[rd][sel];
+#ifdef CONFIG_CPU_LOONGSON64
+ } else if (rd == MIPS_CP0_DIAG &&
+ (sel == 0)) { /* Diag */
+ val = cop0->reg[rd][sel];
+#endif
+ } else {
+ val = 0;
+ er = EMULATE_FAIL;
+ }
+
+ if (er != EMULATE_FAIL) {
+ /* Sign extend */
+ if (inst.c0r_format.rs == mfc_op)
+ val = (int)val;
+ vcpu->arch.gprs[rt] = val;
+ }
+
+ trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
+ KVM_TRACE_MFC0 : KVM_TRACE_DMFC0,
+ KVM_TRACE_COP0(rd, sel), val);
+ break;
+
+ case dmtc_op:
+ case mtc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+ cop0->stat[rd][sel]++;
+#endif
+ val = vcpu->arch.gprs[rt];
+ trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
+ KVM_TRACE_MTC0 : KVM_TRACE_DMTC0,
+ KVM_TRACE_COP0(rd, sel), val);
+
+ if (rd == MIPS_CP0_COUNT &&
+ sel == 0) { /* Count */
+ kvm_vz_lose_htimer(vcpu);
+ kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
+ } else if (rd == MIPS_CP0_COMPARE &&
+ sel == 0) { /* Compare */
+ kvm_mips_write_compare(vcpu,
+ vcpu->arch.gprs[rt],
+ true);
+ } else if (rd == MIPS_CP0_LLADDR &&
+ sel == 0) { /* LLAddr */
+ /*
+ * P5600 generates GPSI on guest MTC0 LLAddr.
+ * Only allow the guest to clear LLB.
+ */
+ if (cpu_guest_has_rw_llb &&
+ !(val & MIPS_LLADDR_LLB))
+ write_gc0_lladdr(0);
+ } else if (rd == MIPS_CP0_LLADDR &&
+ sel == 1 && /* MAAR */
+ cpu_guest_has_maar &&
+ !cpu_guest_has_dyn_maar) {
+ val = mips_process_maar(inst.c0r_format.rs,
+ val);
+
+ /* MAARI must be in range */
+ BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
+ ARRAY_SIZE(vcpu->arch.maar));
+ vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
+ val;
+ } else if (rd == MIPS_CP0_LLADDR &&
+ (sel == 2) && /* MAARI */
+ cpu_guest_has_maar &&
+ !cpu_guest_has_dyn_maar) {
+ kvm_write_maari(vcpu, val);
+ } else if (rd == MIPS_CP0_CONFIG &&
+ (sel == 6)) {
+ cop0->reg[rd][sel] = (int)val;
+ } else if (rd == MIPS_CP0_ERRCTL &&
+ (sel == 0)) { /* ErrCtl */
+ /* ignore the written value */
+#ifdef CONFIG_CPU_LOONGSON64
+ } else if (rd == MIPS_CP0_DIAG &&
+ (sel == 0)) { /* Diag */
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (val & LOONGSON_DIAG_BTB) {
+ /* Flush BTB */
+ set_c0_diag(LOONGSON_DIAG_BTB);
+ }
+ if (val & LOONGSON_DIAG_ITLB) {
+ /* Flush ITLB */
+ set_c0_diag(LOONGSON_DIAG_ITLB);
+ }
+ if (val & LOONGSON_DIAG_DTLB) {
+ /* Flush DTLB */
+ set_c0_diag(LOONGSON_DIAG_DTLB);
+ }
+ if (val & LOONGSON_DIAG_VTLB) {
+ /* Flush VTLB */
+ kvm_loongson_clear_guest_vtlb();
+ }
+ if (val & LOONGSON_DIAG_FTLB) {
+ /* Flush FTLB */
+ kvm_loongson_clear_guest_ftlb();
+ }
+ local_irq_restore(flags);
+#endif
+ } else {
+ er = EMULATE_FAIL;
+ }
+ break;
+
+ default:
+ er = EMULATE_FAIL;
+ break;
+ }
+ }
+ /* Rollback PC only if emulation was unsuccessful */
+ if (er == EMULATE_FAIL) {
+ kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
+ curr_pc, __func__, inst.word);
+
+ vcpu->arch.pc = curr_pc;
+ }
+
+ return er;
+}
+
+static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
+ u32 *opc, u32 cause,
+ struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+ u32 cache, op_inst, op, base;
+ s16 offset;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ unsigned long va, curr_pc;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ base = inst.i_format.rs;
+ op_inst = inst.i_format.rt;
+ if (cpu_has_mips_r6)
+ offset = inst.spec3_format.simmediate;
+ else
+ offset = inst.i_format.simmediate;
+ cache = op_inst & CacheOp_Cache;
+ op = op_inst & CacheOp_Op;
+
+ va = arch->gprs[base] + offset;
+
+ kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+ cache, op, base, arch->gprs[base], offset);
+
+ /* Secondary or tirtiary cache ops ignored */
+ if (cache != Cache_I && cache != Cache_D)
+ return EMULATE_DONE;
+
+ switch (op_inst) {
+ case Index_Invalidate_I:
+ flush_icache_line_indexed(va);
+ return EMULATE_DONE;
+ case Index_Writeback_Inv_D:
+ flush_dcache_line_indexed(va);
+ return EMULATE_DONE;
+ case Hit_Invalidate_I:
+ case Hit_Invalidate_D:
+ case Hit_Writeback_Inv_D:
+ if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) {
+ /* We can just flush entire icache */
+ local_flush_icache_range(0, 0);
+ return EMULATE_DONE;
+ }
+
+ /* So far, other platforms support guest hit cache ops */
+ break;
+ default:
+ break;
+ }
+
+ kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+ curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
+ offset);
+ /* Rollback PC */
+ vcpu->arch.pc = curr_pc;
+
+ return EMULATE_FAIL;
+}
+
+#ifdef CONFIG_CPU_LOONGSON64
+static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
+ u32 *opc, u32 cause,
+ struct kvm_vcpu *vcpu)
+{
+ unsigned int rs, rd;
+ unsigned int hostcfg;
+ unsigned long curr_pc;
+ enum emulation_result er = EMULATE_DONE;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ rs = inst.loongson3_lscsr_format.rs;
+ rd = inst.loongson3_lscsr_format.rd;
+ switch (inst.loongson3_lscsr_format.fr) {
+ case 0x8: /* Read CPUCFG */
+ ++vcpu->stat.vz_cpucfg_exits;
+ hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
+
+ switch (vcpu->arch.gprs[rs]) {
+ case LOONGSON_CFG0:
+ vcpu->arch.gprs[rd] = 0x14c000;
+ break;
+ case LOONGSON_CFG1:
+ hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
+ LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
+ LOONGSON_CFG1_SFBP);
+ vcpu->arch.gprs[rd] = hostcfg;
+ break;
+ case LOONGSON_CFG2:
+ hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
+ LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
+ vcpu->arch.gprs[rd] = hostcfg;
+ break;
+ case LOONGSON_CFG3:
+ vcpu->arch.gprs[rd] = hostcfg;
+ break;
+ default:
+ /* Don't export any other advanced features to guest */
+ vcpu->arch.gprs[rd] = 0;
+ break;
+ }
+ break;
+
+ default:
+ kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
+ inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ /* Rollback PC only if emulation was unsuccessful */
+ if (er == EMULATE_FAIL) {
+ kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
+ curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
+
+ vcpu->arch.pc = curr_pc;
+ }
+
+ return er;
+}
+#endif
+
+static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ union mips_instruction inst;
+ int rd, rt, sel;
+ int err;
+
+ /*
+ * Fetch the instruction.
+ */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ err = kvm_get_badinstr(opc, vcpu, &inst.word);
+ if (err)
+ return EMULATE_FAIL;
+
+ switch (inst.r_format.opcode) {
+ case cop0_op:
+ er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu);
+ break;
+#ifndef CONFIG_CPU_MIPSR6
+ case cache_op:
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
+ er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
+ break;
+#endif
+#ifdef CONFIG_CPU_LOONGSON64
+ case lwc2_op:
+ er = kvm_vz_gpsi_lwc2(inst, opc, cause, vcpu);
+ break;
+#endif
+ case spec3_op:
+ switch (inst.spec3_format.func) {
+#ifdef CONFIG_CPU_MIPSR6
+ case cache6_op:
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
+ er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
+ break;
+#endif
+ case rdhwr_op:
+ if (inst.r_format.rs || (inst.r_format.re >> 3))
+ goto unknown;
+
+ rd = inst.r_format.rd;
+ rt = inst.r_format.rt;
+ sel = inst.r_format.re & 0x7;
+
+ switch (rd) {
+ case MIPS_HWR_CC: /* Read count register */
+ arch->gprs[rt] =
+ (long)(int)kvm_mips_read_count(vcpu);
+ break;
+ default:
+ trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
+ KVM_TRACE_HWR(rd, sel), 0);
+ goto unknown;
+ }
+
+ trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
+ KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
+
+ er = update_pc(vcpu, cause);
+ break;
+ default:
+ goto unknown;
+ }
+ break;
+unknown:
+
+ default:
+ kvm_err("GPSI exception not supported (%p/%#x)\n",
+ opc, inst.word);
+ kvm_arch_vcpu_dump_regs(vcpu);
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ return er;
+}
+
+static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ union mips_instruction inst;
+ int err;
+
+ /*
+ * Fetch the instruction.
+ */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ err = kvm_get_badinstr(opc, vcpu, &inst.word);
+ if (err)
+ return EMULATE_FAIL;
+
+ /* complete MTC0 on behalf of guest and advance EPC */
+ if (inst.c0r_format.opcode == cop0_op &&
+ inst.c0r_format.rs == mtc_op &&
+ inst.c0r_format.z == 0) {
+ int rt = inst.c0r_format.rt;
+ int rd = inst.c0r_format.rd;
+ int sel = inst.c0r_format.sel;
+ unsigned int val = arch->gprs[rt];
+ unsigned int old_val, change;
+
+ trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
+ val);
+
+ if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
+ /* FR bit should read as zero if no FPU */
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ val &= ~(ST0_CU1 | ST0_FR);
+
+ /*
+ * Also don't allow FR to be set if host doesn't support
+ * it.
+ */
+ if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
+ val &= ~ST0_FR;
+
+ old_val = read_gc0_status();
+ change = val ^ old_val;
+
+ if (change & ST0_FR) {
+ /*
+ * FPU and Vector register state is made
+ * UNPREDICTABLE by a change of FR, so don't
+ * even bother saving it.
+ */
+ kvm_drop_fpu(vcpu);
+ }
+
+ /*
+ * If MSA state is already live, it is undefined how it
+ * interacts with FR=0 FPU state, and we don't want to
+ * hit reserved instruction exceptions trying to save
+ * the MSA state later when CU=1 && FR=1, so play it
+ * safe and save it first.
+ */
+ if (change & ST0_CU1 && !(val & ST0_FR) &&
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
+ kvm_lose_fpu(vcpu);
+
+ write_gc0_status(val);
+ } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
+ u32 old_cause = read_gc0_cause();
+ u32 change = old_cause ^ val;
+
+ /* DC bit enabling/disabling timer? */
+ if (change & CAUSEF_DC) {
+ if (val & CAUSEF_DC) {
+ kvm_vz_lose_htimer(vcpu);
+ kvm_mips_count_disable_cause(vcpu);
+ } else {
+ kvm_mips_count_enable_cause(vcpu);
+ }
+ }
+
+ /* Only certain bits are RW to the guest */
+ change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
+ CAUSEF_IP0 | CAUSEF_IP1);
+
+ /* WP can only be cleared */
+ change &= ~CAUSEF_WP | old_cause;
+
+ write_gc0_cause(old_cause ^ change);
+ } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */
+ write_gc0_intctl(val);
+ } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
+ old_val = read_gc0_config5();
+ change = val ^ old_val;
+ /* Handle changes in FPU/MSA modes */
+ preempt_disable();
+
+ /*
+ * Propagate FRE changes immediately if the FPU
+ * context is already loaded.
+ */
+ if (change & MIPS_CONF5_FRE &&
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
+ change_c0_config5(MIPS_CONF5_FRE, val);
+
+ preempt_enable();
+
+ val = old_val ^
+ (change & kvm_vz_config5_guest_wrmask(vcpu));
+ write_gc0_config5(val);
+ } else {
+ kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
+ opc, inst.word);
+ er = EMULATE_FAIL;
+ }
+
+ if (er != EMULATE_FAIL)
+ er = update_pc(vcpu, cause);
+ } else {
+ kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
+ opc, inst.word);
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ /*
+ * Presumably this is due to MC (guest mode change), so lets trace some
+ * relevant info.
+ */
+ trace_kvm_guest_mode_change(vcpu);
+
+ return EMULATE_DONE;
+}
+
+static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er;
+ union mips_instruction inst;
+ unsigned long curr_pc;
+ int err;
+
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ err = kvm_get_badinstr(opc, vcpu, &inst.word);
+ if (err)
+ return EMULATE_FAIL;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ er = kvm_mips_emul_hypcall(vcpu, inst);
+ if (er == EMULATE_FAIL)
+ vcpu->arch.pc = curr_pc;
+
+ return er;
+}
+
+static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
+ u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ u32 inst;
+
+ /*
+ * Fetch the instruction.
+ */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ kvm_get_badinstr(opc, vcpu, &inst);
+
+ kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n",
+ gexccode, opc, inst, read_gc0_status());
+
+ return EMULATE_FAIL;
+}
+
+static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
+{
+ u32 *opc = (u32 *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
+ MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
+ int ret = RESUME_GUEST;
+
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
+ switch (gexccode) {
+ case MIPS_GCTL0_GEXC_GPSI:
+ ++vcpu->stat.vz_gpsi_exits;
+ er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
+ break;
+ case MIPS_GCTL0_GEXC_GSFC:
+ ++vcpu->stat.vz_gsfc_exits;
+ er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
+ break;
+ case MIPS_GCTL0_GEXC_HC:
+ ++vcpu->stat.vz_hc_exits;
+ er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
+ break;
+ case MIPS_GCTL0_GEXC_GRR:
+ ++vcpu->stat.vz_grr_exits;
+ er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
+ vcpu);
+ break;
+ case MIPS_GCTL0_GEXC_GVA:
+ ++vcpu->stat.vz_gva_exits;
+ er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
+ vcpu);
+ break;
+ case MIPS_GCTL0_GEXC_GHFC:
+ ++vcpu->stat.vz_ghfc_exits;
+ er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
+ break;
+ case MIPS_GCTL0_GEXC_GPA:
+ ++vcpu->stat.vz_gpa_exits;
+ er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
+ vcpu);
+ break;
+ default:
+ ++vcpu->stat.vz_resvd_exits;
+ er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
+ vcpu);
+ break;
+
+ }
+
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else if (er == EMULATE_HYPERCALL) {
+ ret = kvm_mips_handle_hypcall(vcpu);
+ } else {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+/**
+ * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
+ * @vcpu: Virtual CPU context.
+ *
+ * Handle when the guest attempts to use a coprocessor which hasn't been allowed
+ * by the root context.
+ */
+static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
+{
+ u32 cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_FAIL;
+ int ret = RESUME_GUEST;
+
+ if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
+ /*
+ * If guest FPU not present, the FPU operation should have been
+ * treated as a reserved instruction!
+ * If FPU already in use, we shouldn't get this at all.
+ */
+ if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
+ preempt_enable();
+ return EMULATE_FAIL;
+ }
+
+ kvm_own_fpu(vcpu);
+ er = EMULATE_DONE;
+ }
+ /* other coprocessors not handled */
+
+ switch (er) {
+ case EMULATE_DONE:
+ ret = RESUME_GUEST;
+ break;
+
+ case EMULATE_FAIL:
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ break;
+
+ default:
+ BUG();
+ }
+ return ret;
+}
+
+/**
+ * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
+ * @vcpu: Virtual CPU context.
+ *
+ * Handle when the guest attempts to use MSA when it is disabled in the root
+ * context.
+ */
+static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
+{
+ /*
+ * If MSA not present or not exposed to guest or FR=0, the MSA operation
+ * should have been treated as a reserved instruction!
+ * Same if CU1=1, FR=0.
+ * If MSA already in use, we shouldn't get this at all.
+ */
+ if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
+ (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
+ !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+ }
+
+ kvm_own_msa(vcpu);
+
+ return RESUME_GUEST;
+}
+
+static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ u32 *opc = (u32 *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
+ union mips_instruction inst;
+ enum emulation_result er = EMULATE_DONE;
+ int err, ret = RESUME_GUEST;
+
+ if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
+ /* A code fetch fault doesn't count as an MMIO */
+ if (kvm_is_ifetch_fault(&vcpu->arch)) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+ }
+
+ /* Fetch the instruction */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ err = kvm_get_badinstr(opc, vcpu, &inst.word);
+ if (err) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+ }
+
+ /* Treat as MMIO */
+ er = kvm_mips_emulate_load(inst, cause, vcpu);
+ if (er == EMULATE_FAIL) {
+ kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
+ opc, badvaddr);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ }
+ }
+
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else if (er == EMULATE_DO_MMIO) {
+ run->exit_reason = KVM_EXIT_MMIO;
+ ret = RESUME_HOST;
+ } else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ u32 *opc = (u32 *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
+ union mips_instruction inst;
+ enum emulation_result er = EMULATE_DONE;
+ int err;
+ int ret = RESUME_GUEST;
+
+ /* Just try the access again if we couldn't do the translation */
+ if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
+ return RESUME_GUEST;
+ vcpu->arch.host_cp0_badvaddr = badvaddr;
+
+ if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
+ /* Fetch the instruction */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ err = kvm_get_badinstr(opc, vcpu, &inst.word);
+ if (err) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+ }
+
+ /* Treat as MMIO */
+ er = kvm_mips_emulate_store(inst, cause, vcpu);
+ if (er == EMULATE_FAIL) {
+ kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
+ opc, badvaddr);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ }
+ }
+
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else if (er == EMULATE_DO_MMIO) {
+ run->exit_reason = KVM_EXIT_MMIO;
+ ret = RESUME_HOST;
+ } else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static u64 kvm_vz_get_one_regs[] = {
+ KVM_REG_MIPS_CP0_INDEX,
+ KVM_REG_MIPS_CP0_ENTRYLO0,
+ KVM_REG_MIPS_CP0_ENTRYLO1,
+ KVM_REG_MIPS_CP0_CONTEXT,
+ KVM_REG_MIPS_CP0_PAGEMASK,
+ KVM_REG_MIPS_CP0_PAGEGRAIN,
+ KVM_REG_MIPS_CP0_WIRED,
+ KVM_REG_MIPS_CP0_HWRENA,
+ KVM_REG_MIPS_CP0_BADVADDR,
+ KVM_REG_MIPS_CP0_COUNT,
+ KVM_REG_MIPS_CP0_ENTRYHI,
+ KVM_REG_MIPS_CP0_COMPARE,
+ KVM_REG_MIPS_CP0_STATUS,
+ KVM_REG_MIPS_CP0_INTCTL,
+ KVM_REG_MIPS_CP0_CAUSE,
+ KVM_REG_MIPS_CP0_EPC,
+ KVM_REG_MIPS_CP0_PRID,
+ KVM_REG_MIPS_CP0_EBASE,
+ KVM_REG_MIPS_CP0_CONFIG,
+ KVM_REG_MIPS_CP0_CONFIG1,
+ KVM_REG_MIPS_CP0_CONFIG2,
+ KVM_REG_MIPS_CP0_CONFIG3,
+ KVM_REG_MIPS_CP0_CONFIG4,
+ KVM_REG_MIPS_CP0_CONFIG5,
+ KVM_REG_MIPS_CP0_CONFIG6,
+#ifdef CONFIG_64BIT
+ KVM_REG_MIPS_CP0_XCONTEXT,
+#endif
+ KVM_REG_MIPS_CP0_ERROREPC,
+
+ KVM_REG_MIPS_COUNT_CTL,
+ KVM_REG_MIPS_COUNT_RESUME,
+ KVM_REG_MIPS_COUNT_HZ,
+};
+
+static u64 kvm_vz_get_one_regs_contextconfig[] = {
+ KVM_REG_MIPS_CP0_CONTEXTCONFIG,
+#ifdef CONFIG_64BIT
+ KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
+#endif
+};
+
+static u64 kvm_vz_get_one_regs_segments[] = {
+ KVM_REG_MIPS_CP0_SEGCTL0,
+ KVM_REG_MIPS_CP0_SEGCTL1,
+ KVM_REG_MIPS_CP0_SEGCTL2,
+};
+
+static u64 kvm_vz_get_one_regs_htw[] = {
+ KVM_REG_MIPS_CP0_PWBASE,
+ KVM_REG_MIPS_CP0_PWFIELD,
+ KVM_REG_MIPS_CP0_PWSIZE,
+ KVM_REG_MIPS_CP0_PWCTL,
+};
+
+static u64 kvm_vz_get_one_regs_kscratch[] = {
+ KVM_REG_MIPS_CP0_KSCRATCH1,
+ KVM_REG_MIPS_CP0_KSCRATCH2,
+ KVM_REG_MIPS_CP0_KSCRATCH3,
+ KVM_REG_MIPS_CP0_KSCRATCH4,
+ KVM_REG_MIPS_CP0_KSCRATCH5,
+ KVM_REG_MIPS_CP0_KSCRATCH6,
+};
+
+static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
+{
+ unsigned long ret;
+
+ ret = ARRAY_SIZE(kvm_vz_get_one_regs);
+ if (cpu_guest_has_userlocal)
+ ++ret;
+ if (cpu_guest_has_badinstr)
+ ++ret;
+ if (cpu_guest_has_badinstrp)
+ ++ret;
+ if (cpu_guest_has_contextconfig)
+ ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
+ if (cpu_guest_has_segments)
+ ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte)
+ ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
+ if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
+ ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
+ ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
+
+ return ret;
+}
+
+static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
+{
+ u64 index;
+ unsigned int i;
+
+ if (copy_to_user(indices, kvm_vz_get_one_regs,
+ sizeof(kvm_vz_get_one_regs)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_vz_get_one_regs);
+
+ if (cpu_guest_has_userlocal) {
+ index = KVM_REG_MIPS_CP0_USERLOCAL;
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+ if (cpu_guest_has_badinstr) {
+ index = KVM_REG_MIPS_CP0_BADINSTR;
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+ if (cpu_guest_has_badinstrp) {
+ index = KVM_REG_MIPS_CP0_BADINSTRP;
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+ if (cpu_guest_has_contextconfig) {
+ if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
+ sizeof(kvm_vz_get_one_regs_contextconfig)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
+ }
+ if (cpu_guest_has_segments) {
+ if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
+ sizeof(kvm_vz_get_one_regs_segments)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
+ }
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
+ if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
+ sizeof(kvm_vz_get_one_regs_htw)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
+ }
+ if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
+ index = KVM_REG_MIPS_CP0_MAAR(i);
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+
+ index = KVM_REG_MIPS_CP0_MAARI;
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+ for (i = 0; i < 6; ++i) {
+ if (!cpu_guest_has_kscr(i + 2))
+ continue;
+
+ if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
+ sizeof(kvm_vz_get_one_regs_kscratch[i])))
+ return -EFAULT;
+ ++indices;
+ }
+
+ return 0;
+}
+
+static inline s64 entrylo_kvm_to_user(unsigned long v)
+{
+ s64 mask, ret = v;
+
+ if (BITS_PER_LONG == 32) {
+ /*
+ * KVM API exposes 64-bit version of the register, so move the
+ * RI/XI bits up into place.
+ */
+ mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
+ ret &= ~mask;
+ ret |= ((s64)v & mask) << 32;
+ }
+ return ret;
+}
+
+static inline unsigned long entrylo_user_to_kvm(s64 v)
+{
+ unsigned long mask, ret = v;
+
+ if (BITS_PER_LONG == 32) {
+ /*
+ * KVM API exposes 64-bit versiono of the register, so move the
+ * RI/XI bits down into place.
+ */
+ mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
+ ret &= ~mask;
+ ret |= (v >> 32) & mask;
+ }
+ return ret;
+}
+
+static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg,
+ s64 *v)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned int idx;
+
+ switch (reg->id) {
+ case KVM_REG_MIPS_CP0_INDEX:
+ *v = (long)read_gc0_index();
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYLO0:
+ *v = entrylo_kvm_to_user(read_gc0_entrylo0());
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYLO1:
+ *v = entrylo_kvm_to_user(read_gc0_entrylo1());
+ break;
+ case KVM_REG_MIPS_CP0_CONTEXT:
+ *v = (long)read_gc0_context();
+ break;
+ case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
+ if (!cpu_guest_has_contextconfig)
+ return -EINVAL;
+ *v = read_gc0_contextconfig();
+ break;
+ case KVM_REG_MIPS_CP0_USERLOCAL:
+ if (!cpu_guest_has_userlocal)
+ return -EINVAL;
+ *v = read_gc0_userlocal();
+ break;
+#ifdef CONFIG_64BIT
+ case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
+ if (!cpu_guest_has_contextconfig)
+ return -EINVAL;
+ *v = read_gc0_xcontextconfig();
+ break;
+#endif
+ case KVM_REG_MIPS_CP0_PAGEMASK:
+ *v = (long)read_gc0_pagemask();
+ break;
+ case KVM_REG_MIPS_CP0_PAGEGRAIN:
+ *v = (long)read_gc0_pagegrain();
+ break;
+ case KVM_REG_MIPS_CP0_SEGCTL0:
+ if (!cpu_guest_has_segments)
+ return -EINVAL;
+ *v = read_gc0_segctl0();
+ break;
+ case KVM_REG_MIPS_CP0_SEGCTL1:
+ if (!cpu_guest_has_segments)
+ return -EINVAL;
+ *v = read_gc0_segctl1();
+ break;
+ case KVM_REG_MIPS_CP0_SEGCTL2:
+ if (!cpu_guest_has_segments)
+ return -EINVAL;
+ *v = read_gc0_segctl2();
+ break;
+ case KVM_REG_MIPS_CP0_PWBASE:
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
+ return -EINVAL;
+ *v = read_gc0_pwbase();
+ break;
+ case KVM_REG_MIPS_CP0_PWFIELD:
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
+ return -EINVAL;
+ *v = read_gc0_pwfield();
+ break;
+ case KVM_REG_MIPS_CP0_PWSIZE:
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
+ return -EINVAL;
+ *v = read_gc0_pwsize();
+ break;
+ case KVM_REG_MIPS_CP0_WIRED:
+ *v = (long)read_gc0_wired();
+ break;
+ case KVM_REG_MIPS_CP0_PWCTL:
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
+ return -EINVAL;
+ *v = read_gc0_pwctl();
+ break;
+ case KVM_REG_MIPS_CP0_HWRENA:
+ *v = (long)read_gc0_hwrena();
+ break;
+ case KVM_REG_MIPS_CP0_BADVADDR:
+ *v = (long)read_gc0_badvaddr();
+ break;
+ case KVM_REG_MIPS_CP0_BADINSTR:
+ if (!cpu_guest_has_badinstr)
+ return -EINVAL;
+ *v = read_gc0_badinstr();
+ break;
+ case KVM_REG_MIPS_CP0_BADINSTRP:
+ if (!cpu_guest_has_badinstrp)
+ return -EINVAL;
+ *v = read_gc0_badinstrp();
+ break;
+ case KVM_REG_MIPS_CP0_COUNT:
+ *v = kvm_mips_read_count(vcpu);
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYHI:
+ *v = (long)read_gc0_entryhi();
+ break;
+ case KVM_REG_MIPS_CP0_COMPARE:
+ *v = (long)read_gc0_compare();
+ break;
+ case KVM_REG_MIPS_CP0_STATUS:
+ *v = (long)read_gc0_status();
+ break;
+ case KVM_REG_MIPS_CP0_INTCTL:
+ *v = read_gc0_intctl();
+ break;
+ case KVM_REG_MIPS_CP0_CAUSE:
+ *v = (long)read_gc0_cause();
+ break;
+ case KVM_REG_MIPS_CP0_EPC:
+ *v = (long)read_gc0_epc();
+ break;
+ case KVM_REG_MIPS_CP0_PRID:
+ switch (boot_cpu_type()) {
+ case CPU_CAVIUM_OCTEON3:
+ /* Octeon III has a read-only guest.PRid */
+ *v = read_gc0_prid();
+ break;
+ default:
+ *v = (long)kvm_read_c0_guest_prid(cop0);
+ break;
+ }
+ break;
+ case KVM_REG_MIPS_CP0_EBASE:
+ *v = kvm_vz_read_gc0_ebase();
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG:
+ *v = read_gc0_config();
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG1:
+ if (!cpu_guest_has_conf1)
+ return -EINVAL;
+ *v = read_gc0_config1();
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG2:
+ if (!cpu_guest_has_conf2)
+ return -EINVAL;
+ *v = read_gc0_config2();
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG3:
+ if (!cpu_guest_has_conf3)
+ return -EINVAL;
+ *v = read_gc0_config3();
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG4:
+ if (!cpu_guest_has_conf4)
+ return -EINVAL;
+ *v = read_gc0_config4();
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG5:
+ if (!cpu_guest_has_conf5)
+ return -EINVAL;
+ *v = read_gc0_config5();
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG6:
+ *v = kvm_read_sw_gc0_config6(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
+ if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
+ if (idx >= ARRAY_SIZE(vcpu->arch.maar))
+ return -EINVAL;
+ *v = vcpu->arch.maar[idx];
+ break;
+ case KVM_REG_MIPS_CP0_MAARI:
+ if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
+ return -EINVAL;
+ *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
+ break;
+#ifdef CONFIG_64BIT
+ case KVM_REG_MIPS_CP0_XCONTEXT:
+ *v = read_gc0_xcontext();
+ break;
+#endif
+ case KVM_REG_MIPS_CP0_ERROREPC:
+ *v = (long)read_gc0_errorepc();
+ break;
+ case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
+ idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
+ if (!cpu_guest_has_kscr(idx))
+ return -EINVAL;
+ switch (idx) {
+ case 2:
+ *v = (long)read_gc0_kscratch1();
+ break;
+ case 3:
+ *v = (long)read_gc0_kscratch2();
+ break;
+ case 4:
+ *v = (long)read_gc0_kscratch3();
+ break;
+ case 5:
+ *v = (long)read_gc0_kscratch4();
+ break;
+ case 6:
+ *v = (long)read_gc0_kscratch5();
+ break;
+ case 7:
+ *v = (long)read_gc0_kscratch6();
+ break;
+ }
+ break;
+ case KVM_REG_MIPS_COUNT_CTL:
+ *v = vcpu->arch.count_ctl;
+ break;
+ case KVM_REG_MIPS_COUNT_RESUME:
+ *v = ktime_to_ns(vcpu->arch.count_resume);
+ break;
+ case KVM_REG_MIPS_COUNT_HZ:
+ *v = vcpu->arch.count_hz;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg,
+ s64 v)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned int idx;
+ int ret = 0;
+ unsigned int cur, change;
+
+ switch (reg->id) {
+ case KVM_REG_MIPS_CP0_INDEX:
+ write_gc0_index(v);
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYLO0:
+ write_gc0_entrylo0(entrylo_user_to_kvm(v));
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYLO1:
+ write_gc0_entrylo1(entrylo_user_to_kvm(v));
+ break;
+ case KVM_REG_MIPS_CP0_CONTEXT:
+ write_gc0_context(v);
+ break;
+ case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
+ if (!cpu_guest_has_contextconfig)
+ return -EINVAL;
+ write_gc0_contextconfig(v);
+ break;
+ case KVM_REG_MIPS_CP0_USERLOCAL:
+ if (!cpu_guest_has_userlocal)
+ return -EINVAL;
+ write_gc0_userlocal(v);
+ break;
+#ifdef CONFIG_64BIT
+ case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
+ if (!cpu_guest_has_contextconfig)
+ return -EINVAL;
+ write_gc0_xcontextconfig(v);
+ break;
+#endif
+ case KVM_REG_MIPS_CP0_PAGEMASK:
+ write_gc0_pagemask(v);
+ break;
+ case KVM_REG_MIPS_CP0_PAGEGRAIN:
+ write_gc0_pagegrain(v);
+ break;
+ case KVM_REG_MIPS_CP0_SEGCTL0:
+ if (!cpu_guest_has_segments)
+ return -EINVAL;
+ write_gc0_segctl0(v);
+ break;
+ case KVM_REG_MIPS_CP0_SEGCTL1:
+ if (!cpu_guest_has_segments)
+ return -EINVAL;
+ write_gc0_segctl1(v);
+ break;
+ case KVM_REG_MIPS_CP0_SEGCTL2:
+ if (!cpu_guest_has_segments)
+ return -EINVAL;
+ write_gc0_segctl2(v);
+ break;
+ case KVM_REG_MIPS_CP0_PWBASE:
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
+ return -EINVAL;
+ write_gc0_pwbase(v);
+ break;
+ case KVM_REG_MIPS_CP0_PWFIELD:
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
+ return -EINVAL;
+ write_gc0_pwfield(v);
+ break;
+ case KVM_REG_MIPS_CP0_PWSIZE:
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
+ return -EINVAL;
+ write_gc0_pwsize(v);
+ break;
+ case KVM_REG_MIPS_CP0_WIRED:
+ change_gc0_wired(MIPSR6_WIRED_WIRED, v);
+ break;
+ case KVM_REG_MIPS_CP0_PWCTL:
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
+ return -EINVAL;
+ write_gc0_pwctl(v);
+ break;
+ case KVM_REG_MIPS_CP0_HWRENA:
+ write_gc0_hwrena(v);
+ break;
+ case KVM_REG_MIPS_CP0_BADVADDR:
+ write_gc0_badvaddr(v);
+ break;
+ case KVM_REG_MIPS_CP0_BADINSTR:
+ if (!cpu_guest_has_badinstr)
+ return -EINVAL;
+ write_gc0_badinstr(v);
+ break;
+ case KVM_REG_MIPS_CP0_BADINSTRP:
+ if (!cpu_guest_has_badinstrp)
+ return -EINVAL;
+ write_gc0_badinstrp(v);
+ break;
+ case KVM_REG_MIPS_CP0_COUNT:
+ kvm_mips_write_count(vcpu, v);
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYHI:
+ write_gc0_entryhi(v);
+ break;
+ case KVM_REG_MIPS_CP0_COMPARE:
+ kvm_mips_write_compare(vcpu, v, false);
+ break;
+ case KVM_REG_MIPS_CP0_STATUS:
+ write_gc0_status(v);
+ break;
+ case KVM_REG_MIPS_CP0_INTCTL:
+ write_gc0_intctl(v);
+ break;
+ case KVM_REG_MIPS_CP0_CAUSE:
+ /*
+ * If the timer is stopped or started (DC bit) it must look
+ * atomic with changes to the timer interrupt pending bit (TI).
+ * A timer interrupt should not happen in between.
+ */
+ if ((read_gc0_cause() ^ v) & CAUSEF_DC) {
+ if (v & CAUSEF_DC) {
+ /* disable timer first */
+ kvm_mips_count_disable_cause(vcpu);
+ change_gc0_cause((u32)~CAUSEF_DC, v);
+ } else {
+ /* enable timer last */
+ change_gc0_cause((u32)~CAUSEF_DC, v);
+ kvm_mips_count_enable_cause(vcpu);
+ }
+ } else {
+ write_gc0_cause(v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_EPC:
+ write_gc0_epc(v);
+ break;
+ case KVM_REG_MIPS_CP0_PRID:
+ switch (boot_cpu_type()) {
+ case CPU_CAVIUM_OCTEON3:
+ /* Octeon III has a guest.PRid, but its read-only */
+ break;
+ default:
+ kvm_write_c0_guest_prid(cop0, v);
+ break;
+ }
+ break;
+ case KVM_REG_MIPS_CP0_EBASE:
+ kvm_vz_write_gc0_ebase(v);
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG:
+ cur = read_gc0_config();
+ change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ write_gc0_config(v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG1:
+ if (!cpu_guest_has_conf1)
+ break;
+ cur = read_gc0_config1();
+ change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ write_gc0_config1(v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG2:
+ if (!cpu_guest_has_conf2)
+ break;
+ cur = read_gc0_config2();
+ change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ write_gc0_config2(v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG3:
+ if (!cpu_guest_has_conf3)
+ break;
+ cur = read_gc0_config3();
+ change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ write_gc0_config3(v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG4:
+ if (!cpu_guest_has_conf4)
+ break;
+ cur = read_gc0_config4();
+ change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ write_gc0_config4(v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG5:
+ if (!cpu_guest_has_conf5)
+ break;
+ cur = read_gc0_config5();
+ change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ write_gc0_config5(v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG6:
+ cur = kvm_read_sw_gc0_config6(cop0);
+ change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_sw_gc0_config6(cop0, (int)v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
+ if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
+ if (idx >= ARRAY_SIZE(vcpu->arch.maar))
+ return -EINVAL;
+ vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
+ break;
+ case KVM_REG_MIPS_CP0_MAARI:
+ if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
+ return -EINVAL;
+ kvm_write_maari(vcpu, v);
+ break;
+#ifdef CONFIG_64BIT
+ case KVM_REG_MIPS_CP0_XCONTEXT:
+ write_gc0_xcontext(v);
+ break;
+#endif
+ case KVM_REG_MIPS_CP0_ERROREPC:
+ write_gc0_errorepc(v);
+ break;
+ case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
+ idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
+ if (!cpu_guest_has_kscr(idx))
+ return -EINVAL;
+ switch (idx) {
+ case 2:
+ write_gc0_kscratch1(v);
+ break;
+ case 3:
+ write_gc0_kscratch2(v);
+ break;
+ case 4:
+ write_gc0_kscratch3(v);
+ break;
+ case 5:
+ write_gc0_kscratch4(v);
+ break;
+ case 6:
+ write_gc0_kscratch5(v);
+ break;
+ case 7:
+ write_gc0_kscratch6(v);
+ break;
+ }
+ break;
+ case KVM_REG_MIPS_COUNT_CTL:
+ ret = kvm_mips_set_count_ctl(vcpu, v);
+ break;
+ case KVM_REG_MIPS_COUNT_RESUME:
+ ret = kvm_mips_set_count_resume(vcpu, v);
+ break;
+ case KVM_REG_MIPS_COUNT_HZ:
+ ret = kvm_mips_set_count_hz(vcpu, v);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ret;
+}
+
+#define guestid_cache(cpu) (cpu_data[cpu].guestid_cache)
+static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
+{
+ unsigned long guestid = guestid_cache(cpu);
+
+ if (!(++guestid & GUESTID_MASK)) {
+ if (cpu_has_vtag_icache)
+ flush_icache_all();
+
+ if (!guestid) /* fix version if needed */
+ guestid = GUESTID_FIRST_VERSION;
+
+ ++guestid; /* guestid 0 reserved for root */
+
+ /* start new guestid cycle */
+ kvm_vz_local_flush_roottlb_all_guests();
+ kvm_vz_local_flush_guesttlb_all();
+ }
+
+ guestid_cache(cpu) = guestid;
+}
+
+/* Returns 1 if the guest TLB may be clobbered */
+static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
+{
+ int ret = 0;
+ int i;
+
+ if (!kvm_request_pending(vcpu))
+ return 0;
+
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
+ if (cpu_has_guestid) {
+ /* Drop all GuestIDs for this VCPU */
+ for_each_possible_cpu(i)
+ vcpu->arch.vzguestid[i] = 0;
+ /* This will clobber guest TLB contents too */
+ ret = 1;
+ }
+ /*
+ * For Root ASID Dealias (RAD) we don't do anything here, but we
+ * still need the request to ensure we recheck asid_flush_mask.
+ * We can still return 0 as only the root TLB will be affected
+ * by a root ASID flush.
+ */
+ }
+
+ return ret;
+}
+
+static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
+{
+ unsigned int wired = read_gc0_wired();
+ struct kvm_mips_tlb *tlbs;
+ int i;
+
+ /* Expand the wired TLB array if necessary */
+ wired &= MIPSR6_WIRED_WIRED;
+ if (wired > vcpu->arch.wired_tlb_limit) {
+ tlbs = krealloc(vcpu->arch.wired_tlb, wired *
+ sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
+ if (WARN_ON(!tlbs)) {
+ /* Save whatever we can */
+ wired = vcpu->arch.wired_tlb_limit;
+ } else {
+ vcpu->arch.wired_tlb = tlbs;
+ vcpu->arch.wired_tlb_limit = wired;
+ }
+ }
+
+ if (wired)
+ /* Save wired entries from the guest TLB */
+ kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
+ /* Invalidate any dropped entries since last time */
+ for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
+ vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
+ vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
+ vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
+ vcpu->arch.wired_tlb[i].tlb_mask = 0;
+ }
+ vcpu->arch.wired_tlb_used = wired;
+}
+
+static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
+{
+ /* Load wired entries into the guest TLB */
+ if (vcpu->arch.wired_tlb)
+ kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
+ vcpu->arch.wired_tlb_used);
+}
+
+static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
+ bool migrated;
+
+ /*
+ * Are we entering guest context on a different CPU to last time?
+ * If so, the VCPU's guest TLB state on this CPU may be stale.
+ */
+ migrated = (vcpu->arch.last_exec_cpu != cpu);
+ vcpu->arch.last_exec_cpu = cpu;
+
+ /*
+ * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and
+ * remains set until another vcpu is loaded in. As a rule GuestRID
+ * remains zeroed when in root context unless the kernel is busy
+ * manipulating guest tlb entries.
+ */
+ if (cpu_has_guestid) {
+ /*
+ * Check if our GuestID is of an older version and thus invalid.
+ *
+ * We also discard the stored GuestID if we've executed on
+ * another CPU, as the guest mappings may have changed without
+ * hypervisor knowledge.
+ */
+ if (migrated ||
+ (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
+ GUESTID_VERSION_MASK) {
+ kvm_vz_get_new_guestid(cpu, vcpu);
+ vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
+ trace_kvm_guestid_change(vcpu,
+ vcpu->arch.vzguestid[cpu]);
+ }
+
+ /* Restore GuestID */
+ change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
+ } else {
+ /*
+ * The Guest TLB only stores a single guest's TLB state, so
+ * flush it if another VCPU has executed on this CPU.
+ *
+ * We also flush if we've executed on another CPU, as the guest
+ * mappings may have changed without hypervisor knowledge.
+ */
+ if (migrated || last_exec_vcpu[cpu] != vcpu)
+ kvm_vz_local_flush_guesttlb_all();
+ last_exec_vcpu[cpu] = vcpu;
+
+ /*
+ * Root ASID dealiases guest GPA mappings in the root TLB.
+ * Allocate new root ASID if needed.
+ */
+ if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
+ get_new_mmu_context(gpa_mm);
+ else
+ check_mmu_context(gpa_mm);
+ }
+}
+
+static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ bool migrated, all;
+
+ /*
+ * Have we migrated to a different CPU?
+ * If so, any old guest TLB state may be stale.
+ */
+ migrated = (vcpu->arch.last_sched_cpu != cpu);
+
+ /*
+ * Was this the last VCPU to run on this CPU?
+ * If not, any old guest state from this VCPU will have been clobbered.
+ */
+ all = migrated || (last_vcpu[cpu] != vcpu);
+ last_vcpu[cpu] = vcpu;
+
+ /*
+ * Restore CP0_Wired unconditionally as we clear it after use, and
+ * restore wired guest TLB entries (while in guest context).
+ */
+ kvm_restore_gc0_wired(cop0);
+ if (current->flags & PF_VCPU) {
+ tlbw_use_hazard();
+ kvm_vz_vcpu_load_tlb(vcpu, cpu);
+ kvm_vz_vcpu_load_wired(vcpu);
+ }
+
+ /*
+ * Restore timer state regardless, as e.g. Cause.TI can change over time
+ * if left unmaintained.
+ */
+ kvm_vz_restore_timer(vcpu);
+
+ /* Set MC bit if we want to trace guest mode changes */
+ if (kvm_trace_guest_mode_change)
+ set_c0_guestctl0(MIPS_GCTL0_MC);
+ else
+ clear_c0_guestctl0(MIPS_GCTL0_MC);
+
+ /* Don't bother restoring registers multiple times unless necessary */
+ if (!all)
+ return 0;
+
+ /*
+ * Restore config registers first, as some implementations restrict
+ * writes to other registers when the corresponding feature bits aren't
+ * set. For example Status.CU1 cannot be set unless Config1.FP is set.
+ */
+ kvm_restore_gc0_config(cop0);
+ if (cpu_guest_has_conf1)
+ kvm_restore_gc0_config1(cop0);
+ if (cpu_guest_has_conf2)
+ kvm_restore_gc0_config2(cop0);
+ if (cpu_guest_has_conf3)
+ kvm_restore_gc0_config3(cop0);
+ if (cpu_guest_has_conf4)
+ kvm_restore_gc0_config4(cop0);
+ if (cpu_guest_has_conf5)
+ kvm_restore_gc0_config5(cop0);
+ if (cpu_guest_has_conf6)
+ kvm_restore_gc0_config6(cop0);
+ if (cpu_guest_has_conf7)
+ kvm_restore_gc0_config7(cop0);
+
+ kvm_restore_gc0_index(cop0);
+ kvm_restore_gc0_entrylo0(cop0);
+ kvm_restore_gc0_entrylo1(cop0);
+ kvm_restore_gc0_context(cop0);
+ if (cpu_guest_has_contextconfig)
+ kvm_restore_gc0_contextconfig(cop0);
+#ifdef CONFIG_64BIT
+ kvm_restore_gc0_xcontext(cop0);
+ if (cpu_guest_has_contextconfig)
+ kvm_restore_gc0_xcontextconfig(cop0);
+#endif
+ kvm_restore_gc0_pagemask(cop0);
+ kvm_restore_gc0_pagegrain(cop0);
+ kvm_restore_gc0_hwrena(cop0);
+ kvm_restore_gc0_badvaddr(cop0);
+ kvm_restore_gc0_entryhi(cop0);
+ kvm_restore_gc0_status(cop0);
+ kvm_restore_gc0_intctl(cop0);
+ kvm_restore_gc0_epc(cop0);
+ kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0));
+ if (cpu_guest_has_userlocal)
+ kvm_restore_gc0_userlocal(cop0);
+
+ kvm_restore_gc0_errorepc(cop0);
+
+ /* restore KScratch registers if enabled in guest */
+ if (cpu_guest_has_conf4) {
+ if (cpu_guest_has_kscr(2))
+ kvm_restore_gc0_kscratch1(cop0);
+ if (cpu_guest_has_kscr(3))
+ kvm_restore_gc0_kscratch2(cop0);
+ if (cpu_guest_has_kscr(4))
+ kvm_restore_gc0_kscratch3(cop0);
+ if (cpu_guest_has_kscr(5))
+ kvm_restore_gc0_kscratch4(cop0);
+ if (cpu_guest_has_kscr(6))
+ kvm_restore_gc0_kscratch5(cop0);
+ if (cpu_guest_has_kscr(7))
+ kvm_restore_gc0_kscratch6(cop0);
+ }
+
+ if (cpu_guest_has_badinstr)
+ kvm_restore_gc0_badinstr(cop0);
+ if (cpu_guest_has_badinstrp)
+ kvm_restore_gc0_badinstrp(cop0);
+
+ if (cpu_guest_has_segments) {
+ kvm_restore_gc0_segctl0(cop0);
+ kvm_restore_gc0_segctl1(cop0);
+ kvm_restore_gc0_segctl2(cop0);
+ }
+
+ /* restore HTW registers */
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
+ kvm_restore_gc0_pwbase(cop0);
+ kvm_restore_gc0_pwfield(cop0);
+ kvm_restore_gc0_pwsize(cop0);
+ kvm_restore_gc0_pwctl(cop0);
+ }
+
+ /* restore Root.GuestCtl2 from unused Guest guestctl2 register */
+ if (cpu_has_guestctl2)
+ write_c0_guestctl2(
+ cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
+
+ /*
+ * We should clear linked load bit to break interrupted atomics. This
+ * prevents a SC on the next VCPU from succeeding by matching a LL on
+ * the previous VCPU.
+ */
+ if (vcpu->kvm->created_vcpus > 1)
+ write_gc0_lladdr(0);
+
+ return 0;
+}
+
+static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+ if (current->flags & PF_VCPU)
+ kvm_vz_vcpu_save_wired(vcpu);
+
+ kvm_lose_fpu(vcpu);
+
+ kvm_save_gc0_index(cop0);
+ kvm_save_gc0_entrylo0(cop0);
+ kvm_save_gc0_entrylo1(cop0);
+ kvm_save_gc0_context(cop0);
+ if (cpu_guest_has_contextconfig)
+ kvm_save_gc0_contextconfig(cop0);
+#ifdef CONFIG_64BIT
+ kvm_save_gc0_xcontext(cop0);
+ if (cpu_guest_has_contextconfig)
+ kvm_save_gc0_xcontextconfig(cop0);
+#endif
+ kvm_save_gc0_pagemask(cop0);
+ kvm_save_gc0_pagegrain(cop0);
+ kvm_save_gc0_wired(cop0);
+ /* allow wired TLB entries to be overwritten */
+ clear_gc0_wired(MIPSR6_WIRED_WIRED);
+ kvm_save_gc0_hwrena(cop0);
+ kvm_save_gc0_badvaddr(cop0);
+ kvm_save_gc0_entryhi(cop0);
+ kvm_save_gc0_status(cop0);
+ kvm_save_gc0_intctl(cop0);
+ kvm_save_gc0_epc(cop0);
+ kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase());
+ if (cpu_guest_has_userlocal)
+ kvm_save_gc0_userlocal(cop0);
+
+ /* only save implemented config registers */
+ kvm_save_gc0_config(cop0);
+ if (cpu_guest_has_conf1)
+ kvm_save_gc0_config1(cop0);
+ if (cpu_guest_has_conf2)
+ kvm_save_gc0_config2(cop0);
+ if (cpu_guest_has_conf3)
+ kvm_save_gc0_config3(cop0);
+ if (cpu_guest_has_conf4)
+ kvm_save_gc0_config4(cop0);
+ if (cpu_guest_has_conf5)
+ kvm_save_gc0_config5(cop0);
+ if (cpu_guest_has_conf6)
+ kvm_save_gc0_config6(cop0);
+ if (cpu_guest_has_conf7)
+ kvm_save_gc0_config7(cop0);
+
+ kvm_save_gc0_errorepc(cop0);
+
+ /* save KScratch registers if enabled in guest */
+ if (cpu_guest_has_conf4) {
+ if (cpu_guest_has_kscr(2))
+ kvm_save_gc0_kscratch1(cop0);
+ if (cpu_guest_has_kscr(3))
+ kvm_save_gc0_kscratch2(cop0);
+ if (cpu_guest_has_kscr(4))
+ kvm_save_gc0_kscratch3(cop0);
+ if (cpu_guest_has_kscr(5))
+ kvm_save_gc0_kscratch4(cop0);
+ if (cpu_guest_has_kscr(6))
+ kvm_save_gc0_kscratch5(cop0);
+ if (cpu_guest_has_kscr(7))
+ kvm_save_gc0_kscratch6(cop0);
+ }
+
+ if (cpu_guest_has_badinstr)
+ kvm_save_gc0_badinstr(cop0);
+ if (cpu_guest_has_badinstrp)
+ kvm_save_gc0_badinstrp(cop0);
+
+ if (cpu_guest_has_segments) {
+ kvm_save_gc0_segctl0(cop0);
+ kvm_save_gc0_segctl1(cop0);
+ kvm_save_gc0_segctl2(cop0);
+ }
+
+ /* save HTW registers if enabled in guest */
+ if (cpu_guest_has_ldpte || (cpu_guest_has_htw &&
+ kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) {
+ kvm_save_gc0_pwbase(cop0);
+ kvm_save_gc0_pwfield(cop0);
+ kvm_save_gc0_pwsize(cop0);
+ kvm_save_gc0_pwctl(cop0);
+ }
+
+ kvm_vz_save_timer(vcpu);
+
+ /* save Root.GuestCtl2 in unused Guest guestctl2 register */
+ if (cpu_has_guestctl2)
+ cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] =
+ read_c0_guestctl2();
+
+ return 0;
+}
+
+/**
+ * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
+ * @size: Number of guest VTLB entries (0 < @size <= root VTLB entries).
+ *
+ * Attempt to resize the guest VTLB by writing guest Config registers. This is
+ * necessary for cores with a shared root/guest TLB to avoid overlap with wired
+ * entries in the root VTLB.
+ *
+ * Returns: The resulting guest VTLB size.
+ */
+static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
+{
+ unsigned int config4 = 0, ret = 0, limit;
+
+ /* Write MMUSize - 1 into guest Config registers */
+ if (cpu_guest_has_conf1)
+ change_gc0_config1(MIPS_CONF1_TLBS,
+ (size - 1) << MIPS_CONF1_TLBS_SHIFT);
+ if (cpu_guest_has_conf4) {
+ config4 = read_gc0_config4();
+ if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
+ MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) {
+ config4 &= ~MIPS_CONF4_VTLBSIZEEXT;
+ config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
+ MIPS_CONF4_VTLBSIZEEXT_SHIFT;
+ } else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
+ MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) {
+ config4 &= ~MIPS_CONF4_MMUSIZEEXT;
+ config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
+ MIPS_CONF4_MMUSIZEEXT_SHIFT;
+ }
+ write_gc0_config4(config4);
+ }
+
+ /*
+ * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it
+ * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write
+ * not dropped)
+ */
+ if (cpu_has_mips_r6) {
+ limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >>
+ MIPSR6_WIRED_LIMIT_SHIFT;
+ if (size - 1 <= limit)
+ limit = 0;
+ write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT);
+ }
+
+ /* Read back MMUSize - 1 */
+ back_to_back_c0_hazard();
+ if (cpu_guest_has_conf1)
+ ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >>
+ MIPS_CONF1_TLBS_SHIFT;
+ if (config4) {
+ if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
+ MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT)
+ ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
+ MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
+ MIPS_CONF1_TLBS_SIZE;
+ else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
+ MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT)
+ ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >>
+ MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
+ MIPS_CONF1_TLBS_SIZE;
+ }
+ return ret + 1;
+}
+
+static int kvm_vz_hardware_enable(void)
+{
+ unsigned int mmu_size, guest_mmu_size, ftlb_size;
+ u64 guest_cvmctl, cvmvmconfig;
+
+ switch (current_cpu_type()) {
+ case CPU_CAVIUM_OCTEON3:
+ /* Set up guest timer/perfcount IRQ lines */
+ guest_cvmctl = read_gc0_cvmctl();
+ guest_cvmctl &= ~CVMCTL_IPTI;
+ guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT;
+ guest_cvmctl &= ~CVMCTL_IPPCI;
+ guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT;
+ write_gc0_cvmctl(guest_cvmctl);
+
+ cvmvmconfig = read_c0_cvmvmconfig();
+ /* No I/O hole translation. */
+ cvmvmconfig |= CVMVMCONF_DGHT;
+ /* Halve the root MMU size */
+ mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
+ >> CVMVMCONF_MMUSIZEM1_S) + 1;
+ guest_mmu_size = mmu_size / 2;
+ mmu_size -= guest_mmu_size;
+ cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
+ cvmvmconfig |= mmu_size - 1;
+ write_c0_cvmvmconfig(cvmvmconfig);
+
+ /* Update our records */
+ current_cpu_data.tlbsize = mmu_size;
+ current_cpu_data.tlbsizevtlb = mmu_size;
+ current_cpu_data.guest.tlbsize = guest_mmu_size;
+
+ /* Flush moved entries in new (guest) context */
+ kvm_vz_local_flush_guesttlb_all();
+ break;
+ default:
+ /*
+ * ImgTec cores tend to use a shared root/guest TLB. To avoid
+ * overlap of root wired and guest entries, the guest TLB may
+ * need resizing.
+ */
+ mmu_size = current_cpu_data.tlbsizevtlb;
+ ftlb_size = current_cpu_data.tlbsize - mmu_size;
+
+ /* Try switching to maximum guest VTLB size for flush */
+ guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
+ current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
+ kvm_vz_local_flush_guesttlb_all();
+
+ /*
+ * Reduce to make space for root wired entries and at least 2
+ * root non-wired entries. This does assume that long-term wired
+ * entries won't be added later.
+ */
+ guest_mmu_size = mmu_size - num_wired_entries() - 2;
+ guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
+ current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
+
+ /*
+ * Write the VTLB size, but if another CPU has already written,
+ * check it matches or we won't provide a consistent view to the
+ * guest. If this ever happens it suggests an asymmetric number
+ * of wired entries.
+ */
+ if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
+ WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
+ "Available guest VTLB size mismatch"))
+ return -EINVAL;
+ break;
+ }
+
+ /*
+ * Enable virtualization features granting guest direct control of
+ * certain features:
+ * CP0=1: Guest coprocessor 0 context.
+ * AT=Guest: Guest MMU.
+ * CG=1: Hit (virtual address) CACHE operations (optional).
+ * CF=1: Guest Config registers.
+ * CGI=1: Indexed flush CACHE operations (optional).
+ */
+ write_c0_guestctl0(MIPS_GCTL0_CP0 |
+ (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
+ MIPS_GCTL0_CG | MIPS_GCTL0_CF);
+ if (cpu_has_guestctl0ext) {
+ if (current_cpu_type() != CPU_LOONGSON64)
+ set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
+ else
+ clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
+ }
+
+ if (cpu_has_guestid) {
+ write_c0_guestctl1(0);
+ kvm_vz_local_flush_roottlb_all_guests();
+
+ GUESTID_MASK = current_cpu_data.guestid_mask;
+ GUESTID_FIRST_VERSION = GUESTID_MASK + 1;
+ GUESTID_VERSION_MASK = ~GUESTID_MASK;
+
+ current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION;
+ }
+
+ /* clear any pending injected virtual guest interrupts */
+ if (cpu_has_guestctl2)
+ clear_c0_guestctl2(0x3f << 10);
+
+#ifdef CONFIG_CPU_LOONGSON64
+ /* Control guest CCA attribute */
+ if (cpu_has_csr())
+ csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec);
+#endif
+
+ return 0;
+}
+
+static void kvm_vz_hardware_disable(void)
+{
+ u64 cvmvmconfig;
+ unsigned int mmu_size;
+
+ /* Flush any remaining guest TLB entries */
+ kvm_vz_local_flush_guesttlb_all();
+
+ switch (current_cpu_type()) {
+ case CPU_CAVIUM_OCTEON3:
+ /*
+ * Allocate whole TLB for root. Existing guest TLB entries will
+ * change ownership to the root TLB. We should be safe though as
+ * they've already been flushed above while in guest TLB.
+ */
+ cvmvmconfig = read_c0_cvmvmconfig();
+ mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
+ >> CVMVMCONF_MMUSIZEM1_S) + 1;
+ cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
+ cvmvmconfig |= mmu_size - 1;
+ write_c0_cvmvmconfig(cvmvmconfig);
+
+ /* Update our records */
+ current_cpu_data.tlbsize = mmu_size;
+ current_cpu_data.tlbsizevtlb = mmu_size;
+ current_cpu_data.guest.tlbsize = 0;
+
+ /* Flush moved entries in new (root) context */
+ local_flush_tlb_all();
+ break;
+ }
+
+ if (cpu_has_guestid) {
+ write_c0_guestctl1(0);
+ kvm_vz_local_flush_roottlb_all_guests();
+ }
+}
+
+static int kvm_vz_check_extension(struct kvm *kvm, long ext)
+{
+ int r;
+
+ switch (ext) {
+ case KVM_CAP_MIPS_VZ:
+ /* we wouldn't be here unless cpu_has_vz */
+ r = 1;
+ break;
+#ifdef CONFIG_64BIT
+ case KVM_CAP_MIPS_64BIT:
+ /* We support 64-bit registers/operations and addresses */
+ r = 2;
+ break;
+#endif
+ case KVM_CAP_IOEVENTFD:
+ r = 1;
+ break;
+ default:
+ r = 0;
+ break;
+ }
+
+ return r;
+}
+
+static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ vcpu->arch.vzguestid[i] = 0;
+
+ return 0;
+}
+
+static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ int cpu;
+
+ /*
+ * If the VCPU is freed and reused as another VCPU, we don't want the
+ * matching pointer wrongly hanging around in last_vcpu[] or
+ * last_exec_vcpu[].
+ */
+ for_each_possible_cpu(cpu) {
+ if (last_vcpu[cpu] == vcpu)
+ last_vcpu[cpu] = NULL;
+ if (last_exec_vcpu[cpu] == vcpu)
+ last_exec_vcpu[cpu] = NULL;
+ }
+}
+
+static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
+
+ /*
+ * Start off the timer at the same frequency as the host timer, but the
+ * soft timer doesn't handle frequencies greater than 1GHz yet.
+ */
+ if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC)
+ count_hz = mips_hpt_frequency;
+ kvm_mips_init_count(vcpu, count_hz);
+
+ /*
+ * Initialize guest register state to valid architectural reset state.
+ */
+
+ /* PageGrain */
+ if (cpu_has_mips_r5 || cpu_has_mips_r6)
+ kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
+ /* Wired */
+ if (cpu_has_mips_r6)
+ kvm_write_sw_gc0_wired(cop0,
+ read_gc0_wired() & MIPSR6_WIRED_LIMIT);
+ /* Status */
+ kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
+ if (cpu_has_mips_r5 || cpu_has_mips_r6)
+ kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
+ /* IntCtl */
+ kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
+ (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI));
+ /* PRId */
+ kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id);
+ /* EBase */
+ kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
+ /* Config */
+ kvm_save_gc0_config(cop0);
+ /* architecturally writable (e.g. from guest) */
+ kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK,
+ _page_cachable_default >> _CACHE_SHIFT);
+ /* architecturally read only, but maybe writable from root */
+ kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config());
+ if (cpu_guest_has_conf1) {
+ kvm_set_sw_gc0_config(cop0, MIPS_CONF_M);
+ /* Config1 */
+ kvm_save_gc0_config1(cop0);
+ /* architecturally read only, but maybe writable from root */
+ kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 |
+ MIPS_CONF1_MD |
+ MIPS_CONF1_PC |
+ MIPS_CONF1_WR |
+ MIPS_CONF1_CA |
+ MIPS_CONF1_FP);
+ }
+ if (cpu_guest_has_conf2) {
+ kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M);
+ /* Config2 */
+ kvm_save_gc0_config2(cop0);
+ }
+ if (cpu_guest_has_conf3) {
+ kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M);
+ /* Config3 */
+ kvm_save_gc0_config3(cop0);
+ /* architecturally writable (e.g. from guest) */
+ kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE);
+ /* architecturally read only, but maybe writable from root */
+ kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA |
+ MIPS_CONF3_BPG |
+ MIPS_CONF3_ULRI |
+ MIPS_CONF3_DSP |
+ MIPS_CONF3_CTXTC |
+ MIPS_CONF3_ITL |
+ MIPS_CONF3_LPA |
+ MIPS_CONF3_VEIC |
+ MIPS_CONF3_VINT |
+ MIPS_CONF3_SP |
+ MIPS_CONF3_CDMM |
+ MIPS_CONF3_MT |
+ MIPS_CONF3_SM |
+ MIPS_CONF3_TL);
+ }
+ if (cpu_guest_has_conf4) {
+ kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M);
+ /* Config4 */
+ kvm_save_gc0_config4(cop0);
+ }
+ if (cpu_guest_has_conf5) {
+ kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M);
+ /* Config5 */
+ kvm_save_gc0_config5(cop0);
+ /* architecturally writable (e.g. from guest) */
+ kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K |
+ MIPS_CONF5_CV |
+ MIPS_CONF5_MSAEN |
+ MIPS_CONF5_UFE |
+ MIPS_CONF5_FRE |
+ MIPS_CONF5_SBRI |
+ MIPS_CONF5_UFR);
+ /* architecturally read only, but maybe writable from root */
+ kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
+ }
+
+ if (cpu_guest_has_contextconfig) {
+ /* ContextConfig */
+ kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
+#ifdef CONFIG_64BIT
+ /* XContextConfig */
+ /* bits SEGBITS-13+3:4 set */
+ kvm_write_sw_gc0_xcontextconfig(cop0,
+ ((1ull << (cpu_vmbits - 13)) - 1) << 4);
+#endif
+ }
+
+ /* Implementation dependent, use the legacy layout */
+ if (cpu_guest_has_segments) {
+ /* SegCtl0, SegCtl1, SegCtl2 */
+ kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
+ kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
+ (_page_cachable_default >> _CACHE_SHIFT) <<
+ (16 + MIPS_SEGCFG_C_SHIFT));
+ kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
+ }
+
+ /* reset HTW registers */
+ if (cpu_guest_has_htw && (cpu_has_mips_r5 || cpu_has_mips_r6)) {
+ /* PWField */
+ kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
+ /* PWSize */
+ kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
+ }
+
+ /* start with no pending virtual guest interrupts */
+ if (cpu_has_guestctl2)
+ cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
+
+ /* Put PC at reset vector */
+ vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
+
+ return 0;
+}
+
+static void kvm_vz_flush_shadow_all(struct kvm *kvm)
+{
+ if (cpu_has_guestid) {
+ /* Flush GuestID for each VCPU individually */
+ kvm_flush_remote_tlbs(kvm);
+ } else {
+ /*
+ * For each CPU there is a single GPA ASID used by all VCPUs in
+ * the VM, so it doesn't make sense for the VCPUs to handle
+ * invalidation of these ASIDs individually.
+ *
+ * Instead mark all CPUs as needing ASID invalidation in
+ * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to
+ * kick any running VCPUs so they check asid_flush_mask.
+ */
+ cpumask_setall(&kvm->arch.asid_flush_mask);
+ kvm_flush_remote_tlbs(kvm);
+ }
+}
+
+static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *slot)
+{
+ kvm_vz_flush_shadow_all(kvm);
+}
+
+static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu)
+{
+ int cpu = smp_processor_id();
+ int preserve_guest_tlb;
+
+ preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
+
+ if (preserve_guest_tlb)
+ kvm_vz_vcpu_save_wired(vcpu);
+
+ kvm_vz_vcpu_load_tlb(vcpu, cpu);
+
+ if (preserve_guest_tlb)
+ kvm_vz_vcpu_load_wired(vcpu);
+}
+
+static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu)
+{
+ int cpu = smp_processor_id();
+ int r;
+
+ kvm_vz_acquire_htimer(vcpu);
+ /* Check if we have any exceptions/interrupts pending */
+ kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
+
+ kvm_vz_check_requests(vcpu, cpu);
+ kvm_vz_vcpu_load_tlb(vcpu, cpu);
+ kvm_vz_vcpu_load_wired(vcpu);
+
+ r = vcpu->arch.vcpu_run(vcpu);
+
+ kvm_vz_vcpu_save_wired(vcpu);
+
+ return r;
+}
+
+static struct kvm_mips_callbacks kvm_vz_callbacks = {
+ .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable,
+ .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss,
+ .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss,
+ .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss,
+ .handle_addr_err_st = kvm_trap_vz_no_handler,
+ .handle_addr_err_ld = kvm_trap_vz_no_handler,
+ .handle_syscall = kvm_trap_vz_no_handler,
+ .handle_res_inst = kvm_trap_vz_no_handler,
+ .handle_break = kvm_trap_vz_no_handler,
+ .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
+ .handle_guest_exit = kvm_trap_vz_handle_guest_exit,
+
+ .hardware_enable = kvm_vz_hardware_enable,
+ .hardware_disable = kvm_vz_hardware_disable,
+ .check_extension = kvm_vz_check_extension,
+ .vcpu_init = kvm_vz_vcpu_init,
+ .vcpu_uninit = kvm_vz_vcpu_uninit,
+ .vcpu_setup = kvm_vz_vcpu_setup,
+ .flush_shadow_all = kvm_vz_flush_shadow_all,
+ .flush_shadow_memslot = kvm_vz_flush_shadow_memslot,
+ .gva_to_gpa = kvm_vz_gva_to_gpa_cb,
+ .queue_timer_int = kvm_vz_queue_timer_int_cb,
+ .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
+ .queue_io_int = kvm_vz_queue_io_int_cb,
+ .dequeue_io_int = kvm_vz_dequeue_io_int_cb,
+ .irq_deliver = kvm_vz_irq_deliver_cb,
+ .irq_clear = kvm_vz_irq_clear_cb,
+ .num_regs = kvm_vz_num_regs,
+ .copy_reg_indices = kvm_vz_copy_reg_indices,
+ .get_one_reg = kvm_vz_get_one_reg,
+ .set_one_reg = kvm_vz_set_one_reg,
+ .vcpu_load = kvm_vz_vcpu_load,
+ .vcpu_put = kvm_vz_vcpu_put,
+ .vcpu_run = kvm_vz_vcpu_run,
+ .vcpu_reenter = kvm_vz_vcpu_reenter,
+};
+
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
+{
+ if (!cpu_has_vz)
+ return -ENODEV;
+
+ /*
+ * VZ requires at least 2 KScratch registers, so it should have been
+ * possible to allocate pgd_reg.
+ */
+ if (WARN(pgd_reg == -1,
+ "pgd_reg not allocated even though cpu_has_vz\n"))
+ return -ENODEV;
+
+ pr_info("Starting KVM with MIPS VZ extensions\n");
+
+ *install_callbacks = &kvm_vz_callbacks;
+ return 0;
+}
diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig
new file mode 100644
index 000000000..6c6802e48
--- /dev/null
+++ b/arch/mips/lantiq/Kconfig
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0
+if LANTIQ
+
+config SOC_TYPE_XWAY
+ bool
+ select PINCTRL_XWAY
+ default n
+
+choice
+ prompt "SoC Type"
+ default SOC_XWAY
+
+config SOC_AMAZON_SE
+ bool "Amazon SE"
+ select SOC_TYPE_XWAY
+ select MFD_SYSCON
+ select MFD_CORE
+
+config SOC_XWAY
+ bool "XWAY"
+ select SOC_TYPE_XWAY
+ select HAVE_PCI
+ select MFD_SYSCON
+ select MFD_CORE
+
+config SOC_FALCON
+ bool "FALCON"
+ select PINCTRL_FALCON
+
+endchoice
+
+choice
+ prompt "Built-in device tree"
+ help
+ Legacy bootloaders do not pass a DTB pointer to the kernel, so
+ if a "wrapper" is not being used, the kernel will need to include
+ a device tree that matches the target board.
+
+ The builtin DTB will only be used if the firmware does not supply
+ a valid DTB.
+
+config LANTIQ_DT_NONE
+ bool "None"
+
+config DT_EASY50712
+ bool "Easy50712"
+ depends on SOC_XWAY
+ select BUILTIN_DTB
+endchoice
+
+config PCI_LANTIQ
+ bool "PCI Support"
+ depends on SOC_XWAY && PCI
+
+endif
diff --git a/arch/mips/lantiq/Makefile b/arch/mips/lantiq/Makefile
new file mode 100644
index 000000000..e7234ca09
--- /dev/null
+++ b/arch/mips/lantiq/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2010 John Crispin <john@phrozen.org>
+#
+
+obj-y := irq.o clk.o prom.o
+
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+
+obj-$(CONFIG_SOC_TYPE_XWAY) += xway/
+obj-$(CONFIG_SOC_FALCON) += falcon/
diff --git a/arch/mips/lantiq/Platform b/arch/mips/lantiq/Platform
new file mode 100644
index 000000000..0bc9c0fbd
--- /dev/null
+++ b/arch/mips/lantiq/Platform
@@ -0,0 +1,8 @@
+#
+# Lantiq
+#
+
+cflags-$(CONFIG_LANTIQ) += -I$(srctree)/arch/mips/include/asm/mach-lantiq
+load-$(CONFIG_LANTIQ) = 0xffffffff80002000
+cflags-$(CONFIG_SOC_TYPE_XWAY) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/xway
+cflags-$(CONFIG_SOC_FALCON) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/falcon
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
new file mode 100644
index 000000000..2d5a0bcb0
--- /dev/null
+++ b/arch/mips/lantiq/clk.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+#include <linux/io.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/list.h>
+
+#include <asm/time.h>
+#include <asm/irq.h>
+#include <asm/div64.h>
+
+#include <lantiq_soc.h>
+
+#include "clk.h"
+#include "prom.h"
+
+/* lantiq socs have 3 static clocks */
+static struct clk cpu_clk_generic[4];
+
+void clkdev_add_static(unsigned long cpu, unsigned long fpi,
+ unsigned long io, unsigned long ppe)
+{
+ cpu_clk_generic[0].rate = cpu;
+ cpu_clk_generic[1].rate = fpi;
+ cpu_clk_generic[2].rate = io;
+ cpu_clk_generic[3].rate = ppe;
+}
+
+struct clk *clk_get_cpu(void)
+{
+ return &cpu_clk_generic[0];
+}
+
+struct clk *clk_get_fpi(void)
+{
+ return &cpu_clk_generic[1];
+}
+EXPORT_SYMBOL_GPL(clk_get_fpi);
+
+struct clk *clk_get_io(void)
+{
+ return &cpu_clk_generic[2];
+}
+EXPORT_SYMBOL_GPL(clk_get_io);
+
+struct clk *clk_get_ppe(void)
+{
+ return &cpu_clk_generic[3];
+}
+EXPORT_SYMBOL_GPL(clk_get_ppe);
+
+static inline int clk_good(struct clk *clk)
+{
+ return clk && !IS_ERR(clk);
+}
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ if (unlikely(!clk_good(clk)))
+ return 0;
+
+ if (clk->rate != 0)
+ return clk->rate;
+
+ if (clk->get_rate != NULL)
+ return clk->get_rate();
+
+ return 0;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ if (unlikely(!clk_good(clk)))
+ return 0;
+ if (clk->rates && *clk->rates) {
+ unsigned long *r = clk->rates;
+
+ while (*r && (*r != rate))
+ r++;
+ if (!*r) {
+ pr_err("clk %s.%s: trying to set invalid rate %ld\n",
+ clk->cl.dev_id, clk->cl.con_id, rate);
+ return -1;
+ }
+ }
+ clk->rate = rate;
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ if (unlikely(!clk_good(clk)))
+ return 0;
+ if (clk->rates && *clk->rates) {
+ unsigned long *r = clk->rates;
+
+ while (*r && (*r != rate))
+ r++;
+ if (!*r) {
+ return clk->rate;
+ }
+ }
+ return rate;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_enable(struct clk *clk)
+{
+ if (unlikely(!clk_good(clk)))
+ return -1;
+
+ if (clk->enable)
+ return clk->enable(clk);
+
+ return -1;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+ if (unlikely(!clk_good(clk)))
+ return;
+
+ if (clk->disable)
+ clk->disable(clk);
+}
+EXPORT_SYMBOL(clk_disable);
+
+int clk_activate(struct clk *clk)
+{
+ if (unlikely(!clk_good(clk)))
+ return -1;
+
+ if (clk->activate)
+ return clk->activate(clk);
+
+ return -1;
+}
+EXPORT_SYMBOL(clk_activate);
+
+void clk_deactivate(struct clk *clk)
+{
+ if (unlikely(!clk_good(clk)))
+ return;
+
+ if (clk->deactivate)
+ clk->deactivate(clk);
+}
+EXPORT_SYMBOL(clk_deactivate);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+ return NULL;
+}
+EXPORT_SYMBOL(clk_get_parent);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+static inline u32 get_counter_resolution(void)
+{
+ u32 res;
+
+ __asm__ __volatile__(
+ ".set push\n"
+ ".set mips32r2\n"
+ "rdhwr %0, $3\n"
+ ".set pop\n"
+ : "=&r" (res)
+ : /* no input */
+ : "memory");
+
+ return res;
+}
+
+void __init plat_time_init(void)
+{
+ struct clk *clk;
+
+ ltq_soc_init();
+
+ clk = clk_get_cpu();
+ mips_hpt_frequency = clk_get_rate(clk) / get_counter_resolution();
+ write_c0_compare(read_c0_count());
+ pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
+ clk_put(clk);
+}
diff --git a/arch/mips/lantiq/clk.h b/arch/mips/lantiq/clk.h
new file mode 100644
index 000000000..f135e3035
--- /dev/null
+++ b/arch/mips/lantiq/clk.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+
+#ifndef _LTQ_CLK_H__
+#define _LTQ_CLK_H__
+
+#include <linux/clkdev.h>
+
+/* clock speeds */
+#define CLOCK_33M 33333333
+#define CLOCK_60M 60000000
+#define CLOCK_62_5M 62500000
+#define CLOCK_83M 83333333
+#define CLOCK_83_5M 83500000
+#define CLOCK_98_304M 98304000
+#define CLOCK_100M 100000000
+#define CLOCK_111M 111111111
+#define CLOCK_125M 125000000
+#define CLOCK_133M 133333333
+#define CLOCK_150M 150000000
+#define CLOCK_166M 166666666
+#define CLOCK_167M 166666667
+#define CLOCK_196_608M 196608000
+#define CLOCK_200M 200000000
+#define CLOCK_222M 222000000
+#define CLOCK_240M 240000000
+#define CLOCK_250M 250000000
+#define CLOCK_266M 266666666
+#define CLOCK_288M 288888888
+#define CLOCK_300M 300000000
+#define CLOCK_333M 333333333
+#define CLOCK_360M 360000000
+#define CLOCK_393M 393215332
+#define CLOCK_400M 400000000
+#define CLOCK_432M 432000000
+#define CLOCK_450M 450000000
+#define CLOCK_500M 500000000
+#define CLOCK_600M 600000000
+#define CLOCK_666M 666666666
+#define CLOCK_720M 720000000
+
+/* clock out speeds */
+#define CLOCK_32_768K 32768
+#define CLOCK_1_536M 1536000
+#define CLOCK_2_5M 2500000
+#define CLOCK_12M 12000000
+#define CLOCK_24M 24000000
+#define CLOCK_25M 25000000
+#define CLOCK_30M 30000000
+#define CLOCK_40M 40000000
+#define CLOCK_48M 48000000
+#define CLOCK_50M 50000000
+#define CLOCK_60M 60000000
+
+struct clk {
+ struct clk_lookup cl;
+ unsigned long rate;
+ unsigned long *rates;
+ unsigned int module;
+ unsigned int bits;
+ unsigned long (*get_rate) (void);
+ int (*enable) (struct clk *clk);
+ void (*disable) (struct clk *clk);
+ int (*activate) (struct clk *clk);
+ void (*deactivate) (struct clk *clk);
+ void (*reboot) (struct clk *clk);
+};
+
+extern void clkdev_add_static(unsigned long cpu, unsigned long fpi,
+ unsigned long io, unsigned long ppe);
+
+extern unsigned long ltq_danube_cpu_hz(void);
+extern unsigned long ltq_danube_fpi_hz(void);
+extern unsigned long ltq_danube_pp32_hz(void);
+
+extern unsigned long ltq_ar9_cpu_hz(void);
+extern unsigned long ltq_ar9_fpi_hz(void);
+
+extern unsigned long ltq_vr9_cpu_hz(void);
+extern unsigned long ltq_vr9_fpi_hz(void);
+extern unsigned long ltq_vr9_pp32_hz(void);
+
+extern unsigned long ltq_ar10_cpu_hz(void);
+extern unsigned long ltq_ar10_fpi_hz(void);
+extern unsigned long ltq_ar10_pp32_hz(void);
+
+extern unsigned long ltq_grx390_cpu_hz(void);
+extern unsigned long ltq_grx390_fpi_hz(void);
+extern unsigned long ltq_grx390_pp32_hz(void);
+
+#endif
diff --git a/arch/mips/lantiq/early_printk.c b/arch/mips/lantiq/early_printk.c
new file mode 100644
index 000000000..4e4a28be1
--- /dev/null
+++ b/arch/mips/lantiq/early_printk.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/cpu.h>
+#include <lantiq_soc.h>
+#include <asm/setup.h>
+
+#define ASC_BUF 1024
+#define LTQ_ASC_FSTAT ((u32 *)(LTQ_EARLY_ASC + 0x0048))
+#ifdef __BIG_ENDIAN
+#define LTQ_ASC_TBUF ((u32 *)(LTQ_EARLY_ASC + 0x0020 + 3))
+#else
+#define LTQ_ASC_TBUF ((u32 *)(LTQ_EARLY_ASC + 0x0020))
+#endif
+#define TXMASK 0x3F00
+#define TXOFFSET 8
+
+void prom_putchar(char c)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ do { } while ((ltq_r32(LTQ_ASC_FSTAT) & TXMASK) >> TXOFFSET);
+ if (c == '\n')
+ ltq_w8('\r', LTQ_ASC_TBUF);
+ ltq_w8(c, LTQ_ASC_TBUF);
+ local_irq_restore(flags);
+}
diff --git a/arch/mips/lantiq/falcon/Makefile b/arch/mips/lantiq/falcon/Makefile
new file mode 100644
index 000000000..98da1e031
--- /dev/null
+++ b/arch/mips/lantiq/falcon/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y := prom.o reset.o sysctrl.o
diff --git a/arch/mips/lantiq/falcon/prom.c b/arch/mips/lantiq/falcon/prom.c
new file mode 100644
index 000000000..7b98def10
--- /dev/null
+++ b/arch/mips/lantiq/falcon/prom.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/kernel.h>
+#include <asm/cacheflush.h>
+#include <asm/traps.h>
+#include <asm/io.h>
+
+#include <lantiq_soc.h>
+
+#include "../prom.h"
+
+#define SOC_FALCON "Falcon"
+#define SOC_FALCON_D "Falcon-D"
+#define SOC_FALCON_V "Falcon-V"
+#define SOC_FALCON_M "Falcon-M"
+
+#define COMP_FALCON "lantiq,falcon"
+
+#define PART_SHIFT 12
+#define PART_MASK 0x0FFFF000
+#define REV_SHIFT 28
+#define REV_MASK 0xF0000000
+#define SREV_SHIFT 22
+#define SREV_MASK 0x03C00000
+#define TYPE_SHIFT 26
+#define TYPE_MASK 0x3C000000
+
+/* reset, nmi and ejtag exception vectors */
+#define BOOT_REG_BASE (KSEG1 | 0x1F200000)
+#define BOOT_RVEC (BOOT_REG_BASE | 0x00)
+#define BOOT_NVEC (BOOT_REG_BASE | 0x04)
+#define BOOT_EVEC (BOOT_REG_BASE | 0x08)
+
+void __init ltq_soc_nmi_setup(void)
+{
+ extern void (*nmi_handler)(void);
+
+ ltq_w32((unsigned long)&nmi_handler, (void *)BOOT_NVEC);
+}
+
+void __init ltq_soc_ejtag_setup(void)
+{
+ extern void (*ejtag_debug_handler)(void);
+
+ ltq_w32((unsigned long)&ejtag_debug_handler, (void *)BOOT_EVEC);
+}
+
+void __init ltq_soc_detect(struct ltq_soc_info *i)
+{
+ u32 type;
+ i->partnum = (ltq_r32(FALCON_CHIPID) & PART_MASK) >> PART_SHIFT;
+ i->rev = (ltq_r32(FALCON_CHIPID) & REV_MASK) >> REV_SHIFT;
+ i->srev = ((ltq_r32(FALCON_CHIPCONF) & SREV_MASK) >> SREV_SHIFT);
+ i->compatible = COMP_FALCON;
+ i->type = SOC_TYPE_FALCON;
+ sprintf(i->rev_type, "%c%d%d", (i->srev & 0x4) ? ('B') : ('A'),
+ i->rev & 0x7, (i->srev & 0x3) + 1);
+
+ switch (i->partnum) {
+ case SOC_ID_FALCON:
+ type = (ltq_r32(FALCON_CHIPTYPE) & TYPE_MASK) >> TYPE_SHIFT;
+ switch (type) {
+ case 0:
+ i->name = SOC_FALCON_D;
+ break;
+ case 1:
+ i->name = SOC_FALCON_V;
+ break;
+ case 2:
+ i->name = SOC_FALCON_M;
+ break;
+ default:
+ i->name = SOC_FALCON;
+ break;
+ }
+ break;
+
+ default:
+ unreachable();
+ break;
+ }
+
+ board_nmi_handler_setup = ltq_soc_nmi_setup;
+ board_ejtag_handler_setup = ltq_soc_ejtag_setup;
+}
diff --git a/arch/mips/lantiq/falcon/reset.c b/arch/mips/lantiq/falcon/reset.c
new file mode 100644
index 000000000..261996c23
--- /dev/null
+++ b/arch/mips/lantiq/falcon/reset.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <asm/reboot.h>
+#include <linux/export.h>
+
+#include <lantiq_soc.h>
+
+/*
+ * Dummy implementation. Used to allow platform code to find out what
+ * source was booted from
+ */
+unsigned char ltq_boot_select(void)
+{
+ return BS_SPI;
+}
+
+#define BOOT_REG_BASE (KSEG1 | 0x1F200000)
+#define BOOT_PW1_REG (BOOT_REG_BASE | 0x20)
+#define BOOT_PW2_REG (BOOT_REG_BASE | 0x24)
+#define BOOT_PW1 0x4C545100
+#define BOOT_PW2 0x0051544C
+
+#define WDT_REG_BASE (KSEG1 | 0x1F8803F0)
+#define WDT_PW1 0x00BE0000
+#define WDT_PW2 0x00DC0000
+
+static void machine_restart(char *command)
+{
+ local_irq_disable();
+
+ /* reboot magic */
+ ltq_w32(BOOT_PW1, (void *)BOOT_PW1_REG); /* 'LTQ\0' */
+ ltq_w32(BOOT_PW2, (void *)BOOT_PW2_REG); /* '\0QTL' */
+ ltq_w32(0, (void *)BOOT_REG_BASE); /* reset Bootreg RVEC */
+
+ /* watchdog magic */
+ ltq_w32(WDT_PW1, (void *)WDT_REG_BASE);
+ ltq_w32(WDT_PW2 |
+ (0x3 << 26) | /* PWL */
+ (0x2 << 24) | /* CLKDIV */
+ (0x1 << 31) | /* enable */
+ (1), /* reload */
+ (void *)WDT_REG_BASE);
+ unreachable();
+}
+
+static void machine_halt(void)
+{
+ local_irq_disable();
+ unreachable();
+}
+
+static void machine_power_off(void)
+{
+ local_irq_disable();
+ unreachable();
+}
+
+static int __init mips_reboot_setup(void)
+{
+ _machine_restart = machine_restart;
+ _machine_halt = machine_halt;
+ pm_power_off = machine_power_off;
+ return 0;
+}
+
+arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
new file mode 100644
index 000000000..446a25369
--- /dev/null
+++ b/arch/mips/lantiq/falcon/sysctrl.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2011 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/clkdev.h>
+#include <linux/of_address.h>
+#include <asm/delay.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+
+/* infrastructure control register */
+#define SYS1_INFRAC 0x00bc
+/* Configuration fuses for drivers and pll */
+#define STATUS_CONFIG 0x0040
+
+/* GPE frequency selection */
+#define GPPC_OFFSET 24
+#define GPEFREQ_MASK 0x0000C00
+#define GPEFREQ_OFFSET 10
+/* Clock status register */
+#define SYSCTL_CLKS 0x0000
+/* Clock enable register */
+#define SYSCTL_CLKEN 0x0004
+/* Clock clear register */
+#define SYSCTL_CLKCLR 0x0008
+/* Activation Status Register */
+#define SYSCTL_ACTS 0x0020
+/* Activation Register */
+#define SYSCTL_ACT 0x0024
+/* Deactivation Register */
+#define SYSCTL_DEACT 0x0028
+/* reboot Register */
+#define SYSCTL_RBT 0x002c
+/* CPU0 Clock Control Register */
+#define SYS1_CPU0CC 0x0040
+/* HRST_OUT_N Control Register */
+#define SYS1_HRSTOUTC 0x00c0
+/* clock divider bit */
+#define CPU0CC_CPUDIV 0x0001
+
+/* Activation Status Register */
+#define ACTS_ASC0_ACT 0x00001000
+#define ACTS_SSC0 0x00002000
+#define ACTS_ASC1_ACT 0x00000800
+#define ACTS_I2C_ACT 0x00004000
+#define ACTS_P0 0x00010000
+#define ACTS_P1 0x00010000
+#define ACTS_P2 0x00020000
+#define ACTS_P3 0x00020000
+#define ACTS_P4 0x00040000
+#define ACTS_PADCTRL0 0x00100000
+#define ACTS_PADCTRL1 0x00100000
+#define ACTS_PADCTRL2 0x00200000
+#define ACTS_PADCTRL3 0x00200000
+#define ACTS_PADCTRL4 0x00400000
+
+#define sysctl_w32(m, x, y) ltq_w32((x), sysctl_membase[m] + (y))
+#define sysctl_r32(m, x) ltq_r32(sysctl_membase[m] + (x))
+#define sysctl_w32_mask(m, clear, set, reg) \
+ sysctl_w32(m, (sysctl_r32(m, reg) & ~(clear)) | (set), reg)
+
+#define status_w32(x, y) ltq_w32((x), status_membase + (y))
+#define status_r32(x) ltq_r32(status_membase + (x))
+
+static void __iomem *sysctl_membase[3], *status_membase;
+void __iomem *ltq_sys1_membase, *ltq_ebu_membase;
+
+void falcon_trigger_hrst(int level)
+{
+ sysctl_w32(SYSCTL_SYS1, level & 1, SYS1_HRSTOUTC);
+}
+
+static inline void sysctl_wait(struct clk *clk,
+ unsigned int test, unsigned int reg)
+{
+ int err = 1000000;
+
+ do {} while (--err && ((sysctl_r32(clk->module, reg)
+ & clk->bits) != test));
+ if (!err)
+ pr_err("module de/activation failed %d %08X %08X %08X\n",
+ clk->module, clk->bits, test,
+ sysctl_r32(clk->module, reg) & clk->bits);
+}
+
+static int sysctl_activate(struct clk *clk)
+{
+ sysctl_w32(clk->module, clk->bits, SYSCTL_CLKEN);
+ sysctl_w32(clk->module, clk->bits, SYSCTL_ACT);
+ sysctl_wait(clk, clk->bits, SYSCTL_ACTS);
+ return 0;
+}
+
+static void sysctl_deactivate(struct clk *clk)
+{
+ sysctl_w32(clk->module, clk->bits, SYSCTL_CLKCLR);
+ sysctl_w32(clk->module, clk->bits, SYSCTL_DEACT);
+ sysctl_wait(clk, 0, SYSCTL_ACTS);
+}
+
+static int sysctl_clken(struct clk *clk)
+{
+ sysctl_w32(clk->module, clk->bits, SYSCTL_CLKEN);
+ sysctl_w32(clk->module, clk->bits, SYSCTL_ACT);
+ sysctl_wait(clk, clk->bits, SYSCTL_CLKS);
+ return 0;
+}
+
+static void sysctl_clkdis(struct clk *clk)
+{
+ sysctl_w32(clk->module, clk->bits, SYSCTL_CLKCLR);
+ sysctl_wait(clk, 0, SYSCTL_CLKS);
+}
+
+static void sysctl_reboot(struct clk *clk)
+{
+ unsigned int act;
+ unsigned int bits;
+
+ act = sysctl_r32(clk->module, SYSCTL_ACT);
+ bits = ~act & clk->bits;
+ if (bits != 0) {
+ sysctl_w32(clk->module, bits, SYSCTL_CLKEN);
+ sysctl_w32(clk->module, bits, SYSCTL_ACT);
+ sysctl_wait(clk, bits, SYSCTL_ACTS);
+ }
+ sysctl_w32(clk->module, act & clk->bits, SYSCTL_RBT);
+ sysctl_wait(clk, clk->bits, SYSCTL_ACTS);
+}
+
+/* enable the ONU core */
+static void falcon_gpe_enable(void)
+{
+ unsigned int freq;
+ unsigned int status;
+
+ /* if if the clock is already enabled */
+ status = sysctl_r32(SYSCTL_SYS1, SYS1_INFRAC);
+ if (status & (1 << (GPPC_OFFSET + 1)))
+ return;
+
+ freq = (status_r32(STATUS_CONFIG) &
+ GPEFREQ_MASK) >>
+ GPEFREQ_OFFSET;
+ if (freq == 0)
+ freq = 1; /* use 625MHz on unfused chip */
+
+ /* apply new frequency */
+ sysctl_w32_mask(SYSCTL_SYS1, 7 << (GPPC_OFFSET + 1),
+ freq << (GPPC_OFFSET + 2) , SYS1_INFRAC);
+ udelay(1);
+
+ /* enable new frequency */
+ sysctl_w32_mask(SYSCTL_SYS1, 0, 1 << (GPPC_OFFSET + 1), SYS1_INFRAC);
+ udelay(1);
+}
+
+static inline void clkdev_add_sys(const char *dev, unsigned int module,
+ unsigned int bits)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ if (!clk)
+ return;
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = NULL;
+ clk->cl.clk = clk;
+ clk->module = module;
+ clk->bits = bits;
+ clk->activate = sysctl_activate;
+ clk->deactivate = sysctl_deactivate;
+ clk->enable = sysctl_clken;
+ clk->disable = sysctl_clkdis;
+ clk->reboot = sysctl_reboot;
+ clkdev_add(&clk->cl);
+}
+
+void __init ltq_soc_init(void)
+{
+ struct device_node *np_status =
+ of_find_compatible_node(NULL, NULL, "lantiq,status-falcon");
+ struct device_node *np_ebu =
+ of_find_compatible_node(NULL, NULL, "lantiq,ebu-falcon");
+ struct device_node *np_sys1 =
+ of_find_compatible_node(NULL, NULL, "lantiq,sys1-falcon");
+ struct device_node *np_syseth =
+ of_find_compatible_node(NULL, NULL, "lantiq,syseth-falcon");
+ struct device_node *np_sysgpe =
+ of_find_compatible_node(NULL, NULL, "lantiq,sysgpe-falcon");
+ struct resource res_status, res_ebu, res_sys[3];
+ int i;
+
+ /* check if all the core register ranges are available */
+ if (!np_status || !np_ebu || !np_sys1 || !np_syseth || !np_sysgpe)
+ panic("Failed to load core nodes from devicetree");
+
+ if (of_address_to_resource(np_status, 0, &res_status) ||
+ of_address_to_resource(np_ebu, 0, &res_ebu) ||
+ of_address_to_resource(np_sys1, 0, &res_sys[0]) ||
+ of_address_to_resource(np_syseth, 0, &res_sys[1]) ||
+ of_address_to_resource(np_sysgpe, 0, &res_sys[2]))
+ panic("Failed to get core resources");
+
+ if ((request_mem_region(res_status.start, resource_size(&res_status),
+ res_status.name) < 0) ||
+ (request_mem_region(res_ebu.start, resource_size(&res_ebu),
+ res_ebu.name) < 0) ||
+ (request_mem_region(res_sys[0].start,
+ resource_size(&res_sys[0]),
+ res_sys[0].name) < 0) ||
+ (request_mem_region(res_sys[1].start,
+ resource_size(&res_sys[1]),
+ res_sys[1].name) < 0) ||
+ (request_mem_region(res_sys[2].start,
+ resource_size(&res_sys[2]),
+ res_sys[2].name) < 0))
+ pr_err("Failed to request core resources");
+
+ status_membase = ioremap(res_status.start,
+ resource_size(&res_status));
+ ltq_ebu_membase = ioremap(res_ebu.start,
+ resource_size(&res_ebu));
+
+ if (!status_membase || !ltq_ebu_membase)
+ panic("Failed to remap core resources");
+
+ for (i = 0; i < 3; i++) {
+ sysctl_membase[i] = ioremap(res_sys[i].start,
+ resource_size(&res_sys[i]));
+ if (!sysctl_membase[i])
+ panic("Failed to remap sysctrl resources");
+ }
+ ltq_sys1_membase = sysctl_membase[0];
+
+ falcon_gpe_enable();
+
+ /* get our 3 static rates for cpu, fpi and io clocks */
+ if (ltq_sys1_r32(SYS1_CPU0CC) & CPU0CC_CPUDIV)
+ clkdev_add_static(CLOCK_200M, CLOCK_100M, CLOCK_200M, 0);
+ else
+ clkdev_add_static(CLOCK_400M, CLOCK_100M, CLOCK_200M, 0);
+
+ /* add our clock domains */
+ clkdev_add_sys("1d810000.gpio", SYSCTL_SYSETH, ACTS_P0);
+ clkdev_add_sys("1d810100.gpio", SYSCTL_SYSETH, ACTS_P2);
+ clkdev_add_sys("1e800100.gpio", SYSCTL_SYS1, ACTS_P1);
+ clkdev_add_sys("1e800200.gpio", SYSCTL_SYS1, ACTS_P3);
+ clkdev_add_sys("1e800300.gpio", SYSCTL_SYS1, ACTS_P4);
+ clkdev_add_sys("1db01000.pad", SYSCTL_SYSETH, ACTS_PADCTRL0);
+ clkdev_add_sys("1db02000.pad", SYSCTL_SYSETH, ACTS_PADCTRL2);
+ clkdev_add_sys("1e800400.pad", SYSCTL_SYS1, ACTS_PADCTRL1);
+ clkdev_add_sys("1e800500.pad", SYSCTL_SYS1, ACTS_PADCTRL3);
+ clkdev_add_sys("1e800600.pad", SYSCTL_SYS1, ACTS_PADCTRL4);
+ clkdev_add_sys("1e100b00.serial", SYSCTL_SYS1, ACTS_ASC1_ACT);
+ clkdev_add_sys("1e100c00.serial", SYSCTL_SYS1, ACTS_ASC0_ACT);
+ clkdev_add_sys("1e100d00.spi", SYSCTL_SYS1, ACTS_SSC0);
+ clkdev_add_sys("1e200000.i2c", SYSCTL_SYS1, ACTS_I2C_ACT);
+}
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
new file mode 100644
index 000000000..43c2f271e
--- /dev/null
+++ b/arch/mips/lantiq/irq.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/irqdomain.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/bootinfo.h>
+#include <asm/irq_cpu.h>
+
+#include <lantiq_soc.h>
+#include <irq.h>
+
+/* register definitions - internal irqs */
+#define LTQ_ICU_ISR 0x0000
+#define LTQ_ICU_IER 0x0008
+#define LTQ_ICU_IOSR 0x0010
+#define LTQ_ICU_IRSR 0x0018
+#define LTQ_ICU_IMR 0x0020
+
+#define LTQ_ICU_IM_SIZE 0x28
+
+/* register definitions - external irqs */
+#define LTQ_EIU_EXIN_C 0x0000
+#define LTQ_EIU_EXIN_INIC 0x0004
+#define LTQ_EIU_EXIN_INC 0x0008
+#define LTQ_EIU_EXIN_INEN 0x000C
+
+/* number of external interrupts */
+#define MAX_EIU 6
+
+/* the performance counter */
+#define LTQ_PERF_IRQ (INT_NUM_IM4_IRL0 + 31)
+
+/*
+ * irqs generated by devices attached to the EBU need to be acked in
+ * a special manner
+ */
+#define LTQ_ICU_EBU_IRQ 22
+
+#define ltq_icu_w32(vpe, m, x, y) \
+ ltq_w32((x), ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (y))
+
+#define ltq_icu_r32(vpe, m, x) \
+ ltq_r32(ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (x))
+
+#define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y))
+#define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x))
+
+/* we have a cascade of 8 irqs */
+#define MIPS_CPU_IRQ_CASCADE 8
+
+static int exin_avail;
+static u32 ltq_eiu_irq[MAX_EIU];
+static void __iomem *ltq_icu_membase[NR_CPUS];
+static void __iomem *ltq_eiu_membase;
+static struct irq_domain *ltq_domain;
+static DEFINE_SPINLOCK(ltq_eiu_lock);
+static DEFINE_RAW_SPINLOCK(ltq_icu_lock);
+static int ltq_perfcount_irq;
+
+int ltq_eiu_get_irq(int exin)
+{
+ if (exin < exin_avail)
+ return ltq_eiu_irq[exin];
+ return -1;
+}
+
+void ltq_disable_irq(struct irq_data *d)
+{
+ unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
+ unsigned long im = offset / INT_NUM_IM_OFFSET;
+ unsigned long flags;
+ int vpe;
+
+ offset %= INT_NUM_IM_OFFSET;
+
+ raw_spin_lock_irqsave(&ltq_icu_lock, flags);
+ for_each_present_cpu(vpe) {
+ ltq_icu_w32(vpe, im,
+ ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
+ LTQ_ICU_IER);
+ }
+ raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
+}
+
+void ltq_mask_and_ack_irq(struct irq_data *d)
+{
+ unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
+ unsigned long im = offset / INT_NUM_IM_OFFSET;
+ unsigned long flags;
+ int vpe;
+
+ offset %= INT_NUM_IM_OFFSET;
+
+ raw_spin_lock_irqsave(&ltq_icu_lock, flags);
+ for_each_present_cpu(vpe) {
+ ltq_icu_w32(vpe, im,
+ ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
+ LTQ_ICU_IER);
+ ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
+ }
+ raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
+}
+
+static void ltq_ack_irq(struct irq_data *d)
+{
+ unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
+ unsigned long im = offset / INT_NUM_IM_OFFSET;
+ unsigned long flags;
+ int vpe;
+
+ offset %= INT_NUM_IM_OFFSET;
+
+ raw_spin_lock_irqsave(&ltq_icu_lock, flags);
+ for_each_present_cpu(vpe) {
+ ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
+ }
+ raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
+}
+
+void ltq_enable_irq(struct irq_data *d)
+{
+ unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
+ unsigned long im = offset / INT_NUM_IM_OFFSET;
+ unsigned long flags;
+ int vpe;
+
+ offset %= INT_NUM_IM_OFFSET;
+
+ vpe = cpumask_first(irq_data_get_effective_affinity_mask(d));
+
+ /* This shouldn't be even possible, maybe during CPU hotplug spam */
+ if (unlikely(vpe >= nr_cpu_ids))
+ vpe = smp_processor_id();
+
+ raw_spin_lock_irqsave(&ltq_icu_lock, flags);
+
+ ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, LTQ_ICU_IER) | BIT(offset),
+ LTQ_ICU_IER);
+
+ raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
+}
+
+static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
+{
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < exin_avail; i++) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
+ int val = 0;
+ int edge = 0;
+
+ switch (type) {
+ case IRQF_TRIGGER_NONE:
+ break;
+ case IRQF_TRIGGER_RISING:
+ val = 1;
+ edge = 1;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ val = 2;
+ edge = 1;
+ break;
+ case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
+ val = 3;
+ edge = 1;
+ break;
+ case IRQF_TRIGGER_HIGH:
+ val = 5;
+ break;
+ case IRQF_TRIGGER_LOW:
+ val = 6;
+ break;
+ default:
+ pr_err("invalid type %d for irq %ld\n",
+ type, d->hwirq);
+ return -EINVAL;
+ }
+
+ if (edge)
+ irq_set_handler(d->hwirq, handle_edge_irq);
+
+ spin_lock_irqsave(&ltq_eiu_lock, flags);
+ ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
+ (~(7 << (i * 4)))) | (val << (i * 4)),
+ LTQ_EIU_EXIN_C);
+ spin_unlock_irqrestore(&ltq_eiu_lock, flags);
+ }
+ }
+
+ return 0;
+}
+
+static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
+{
+ int i;
+
+ ltq_enable_irq(d);
+ for (i = 0; i < exin_avail; i++) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
+ /* by default we are low level triggered */
+ ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
+ /* clear all pending */
+ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i),
+ LTQ_EIU_EXIN_INC);
+ /* enable */
+ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
+ LTQ_EIU_EXIN_INEN);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void ltq_shutdown_eiu_irq(struct irq_data *d)
+{
+ int i;
+
+ ltq_disable_irq(d);
+ for (i = 0; i < exin_avail; i++) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
+ /* disable */
+ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
+ LTQ_EIU_EXIN_INEN);
+ break;
+ }
+ }
+}
+
+#if defined(CONFIG_SMP)
+static int ltq_icu_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *cpumask, bool force)
+{
+ struct cpumask tmask;
+
+ if (!cpumask_and(&tmask, cpumask, cpu_online_mask))
+ return -EINVAL;
+
+ irq_data_update_effective_affinity(d, &tmask);
+
+ return IRQ_SET_MASK_OK;
+}
+#endif
+
+static struct irq_chip ltq_irq_type = {
+ .name = "icu",
+ .irq_enable = ltq_enable_irq,
+ .irq_disable = ltq_disable_irq,
+ .irq_unmask = ltq_enable_irq,
+ .irq_ack = ltq_ack_irq,
+ .irq_mask = ltq_disable_irq,
+ .irq_mask_ack = ltq_mask_and_ack_irq,
+#if defined(CONFIG_SMP)
+ .irq_set_affinity = ltq_icu_irq_set_affinity,
+#endif
+};
+
+static struct irq_chip ltq_eiu_type = {
+ .name = "eiu",
+ .irq_startup = ltq_startup_eiu_irq,
+ .irq_shutdown = ltq_shutdown_eiu_irq,
+ .irq_enable = ltq_enable_irq,
+ .irq_disable = ltq_disable_irq,
+ .irq_unmask = ltq_enable_irq,
+ .irq_ack = ltq_ack_irq,
+ .irq_mask = ltq_disable_irq,
+ .irq_mask_ack = ltq_mask_and_ack_irq,
+ .irq_set_type = ltq_eiu_settype,
+#if defined(CONFIG_SMP)
+ .irq_set_affinity = ltq_icu_irq_set_affinity,
+#endif
+};
+
+static void ltq_hw_irq_handler(struct irq_desc *desc)
+{
+ unsigned int module = irq_desc_get_irq(desc) - 2;
+ u32 irq;
+ irq_hw_number_t hwirq;
+ int vpe = smp_processor_id();
+
+ irq = ltq_icu_r32(vpe, module, LTQ_ICU_IOSR);
+ if (irq == 0)
+ return;
+
+ /*
+ * silicon bug causes only the msb set to 1 to be valid. all
+ * other bits might be bogus
+ */
+ irq = __fls(irq);
+ hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
+ generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
+
+ /* if this is a EBU irq, we need to ack it or get a deadlock */
+ if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
+ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
+ LTQ_EBU_PCC_ISTAT);
+}
+
+static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ struct irq_chip *chip = &ltq_irq_type;
+ struct irq_data *data;
+ int i;
+
+ if (hw < MIPS_CPU_IRQ_CASCADE)
+ return 0;
+
+ for (i = 0; i < exin_avail; i++)
+ if (hw == ltq_eiu_irq[i])
+ chip = &ltq_eiu_type;
+
+ data = irq_get_irq_data(irq);
+
+ irq_data_update_effective_affinity(data, cpumask_of(0));
+
+ irq_set_chip_and_handler(irq, chip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops irq_domain_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = icu_map,
+};
+
+int __init icu_of_init(struct device_node *node, struct device_node *parent)
+{
+ struct device_node *eiu_node;
+ struct resource res;
+ int i, ret, vpe;
+
+ /* load register regions of available ICUs */
+ for_each_possible_cpu(vpe) {
+ if (of_address_to_resource(node, vpe, &res))
+ panic("Failed to get icu%i memory range", vpe);
+
+ if (!request_mem_region(res.start, resource_size(&res),
+ res.name))
+ pr_err("Failed to request icu%i memory\n", vpe);
+
+ ltq_icu_membase[vpe] = ioremap(res.start,
+ resource_size(&res));
+
+ if (!ltq_icu_membase[vpe])
+ panic("Failed to remap icu%i memory", vpe);
+ }
+
+ /* turn off all irqs by default */
+ for_each_possible_cpu(vpe) {
+ for (i = 0; i < MAX_IM; i++) {
+ /* make sure all irqs are turned off by default */
+ ltq_icu_w32(vpe, i, 0, LTQ_ICU_IER);
+
+ /* clear all possibly pending interrupts */
+ ltq_icu_w32(vpe, i, ~0, LTQ_ICU_ISR);
+ ltq_icu_w32(vpe, i, ~0, LTQ_ICU_IMR);
+
+ /* clear resend */
+ ltq_icu_w32(vpe, i, 0, LTQ_ICU_IRSR);
+ }
+ }
+
+ mips_cpu_irq_init();
+
+ for (i = 0; i < MAX_IM; i++)
+ irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
+
+ ltq_domain = irq_domain_add_linear(node,
+ (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
+ &irq_domain_ops, 0);
+
+ /* tell oprofile which irq to use */
+ ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
+
+ /* the external interrupts are optional and xway only */
+ eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
+ if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
+ /* find out how many external irq sources we have */
+ exin_avail = of_property_count_u32_elems(eiu_node,
+ "lantiq,eiu-irqs");
+
+ if (exin_avail > MAX_EIU)
+ exin_avail = MAX_EIU;
+
+ ret = of_property_read_u32_array(eiu_node, "lantiq,eiu-irqs",
+ ltq_eiu_irq, exin_avail);
+ if (ret)
+ panic("failed to load external irq resources");
+
+ if (!request_mem_region(res.start, resource_size(&res),
+ res.name))
+ pr_err("Failed to request eiu memory");
+
+ ltq_eiu_membase = ioremap(res.start,
+ resource_size(&res));
+ if (!ltq_eiu_membase)
+ panic("Failed to remap eiu memory");
+ }
+
+ return 0;
+}
+
+int get_c0_perfcount_int(void)
+{
+ return ltq_perfcount_irq;
+}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+
+unsigned int get_c0_compare_int(void)
+{
+ return CP0_LEGACY_COMPARE_IRQ;
+}
+
+static const struct of_device_id of_irq_ids[] __initconst = {
+ { .compatible = "lantiq,icu", .data = icu_of_init },
+ {},
+};
+
+void __init arch_init_irq(void)
+{
+ of_irq_init(of_irq_ids);
+}
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
new file mode 100644
index 000000000..2729a4b63
--- /dev/null
+++ b/arch/mips/lantiq/prom.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/export.h>
+#include <linux/clk.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+
+#include <asm/bootinfo.h>
+#include <asm/time.h>
+#include <asm/prom.h>
+
+#include <lantiq.h>
+
+#include "prom.h"
+#include "clk.h"
+
+/* access to the ebu needs to be locked between different drivers */
+DEFINE_SPINLOCK(ebu_lock);
+EXPORT_SYMBOL_GPL(ebu_lock);
+
+/*
+ * this struct is filled by the soc specific detection code and holds
+ * information about the specific soc type, revision and name
+ */
+static struct ltq_soc_info soc_info;
+
+const char *get_system_type(void)
+{
+ return soc_info.sys_type;
+}
+
+int ltq_soc_type(void)
+{
+ return soc_info.type;
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
+
+static void __init prom_init_cmdline(void)
+{
+ int argc = fw_arg0;
+ char **argv = (char **) KSEG1ADDR(fw_arg1);
+ int i;
+
+ arcs_cmdline[0] = '\0';
+
+ for (i = 0; i < argc; i++) {
+ char *p = (char *) KSEG1ADDR(argv[i]);
+
+ if (CPHYSADDR(p) && *p) {
+ strlcat(arcs_cmdline, p, sizeof(arcs_cmdline));
+ strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
+ }
+ }
+}
+
+void __init plat_mem_setup(void)
+{
+ void *dtb;
+
+ ioport_resource.start = IOPORT_RESOURCE_START;
+ ioport_resource.end = IOPORT_RESOURCE_END;
+ iomem_resource.start = IOMEM_RESOURCE_START;
+ iomem_resource.end = IOMEM_RESOURCE_END;
+
+ set_io_port_base((unsigned long) KSEG1);
+
+ if (fw_passed_dtb) /* UHI interface */
+ dtb = (void *)fw_passed_dtb;
+ else if (&__dtb_start != &__dtb_end)
+ dtb = (void *)__dtb_start;
+ else
+ panic("no dtb found");
+
+ /*
+ * Load the devicetree. This causes the chosen node to be
+ * parsed resulting in our memory appearing
+ */
+ __dt_setup_arch(dtb);
+}
+
+void __init device_tree_init(void)
+{
+ unflatten_and_copy_device_tree();
+}
+
+void __init prom_init(void)
+{
+ /* call the soc specific detetcion code and get it to fill soc_info */
+ ltq_soc_detect(&soc_info);
+ snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev %s",
+ soc_info.name, soc_info.rev_type);
+ soc_info.sys_type[LTQ_SYS_TYPE_LEN - 1] = '\0';
+ pr_info("SoC: %s\n", soc_info.sys_type);
+ prom_init_cmdline();
+
+#if defined(CONFIG_MIPS_MT_SMP)
+ if (register_vsmp_smp_ops())
+ panic("failed to register_vsmp_smp_ops()");
+#endif
+}
diff --git a/arch/mips/lantiq/prom.h b/arch/mips/lantiq/prom.h
new file mode 100644
index 000000000..5cd29c6b3
--- /dev/null
+++ b/arch/mips/lantiq/prom.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+
+#ifndef _LTQ_PROM_H__
+#define _LTQ_PROM_H__
+
+#define LTQ_SYS_TYPE_LEN 0x100
+#define LTQ_SYS_REV_LEN 0x10
+
+struct ltq_soc_info {
+ unsigned char *name;
+ unsigned int rev;
+ unsigned char rev_type[LTQ_SYS_REV_LEN];
+ unsigned int srev;
+ unsigned int partnum;
+ unsigned int type;
+ unsigned char sys_type[LTQ_SYS_TYPE_LEN];
+ unsigned char *compatible;
+};
+
+extern void ltq_soc_detect(struct ltq_soc_info *i);
+extern void ltq_soc_init(void);
+
+#endif
diff --git a/arch/mips/lantiq/xway/Makefile b/arch/mips/lantiq/xway/Makefile
new file mode 100644
index 000000000..c0f02dab7
--- /dev/null
+++ b/arch/mips/lantiq/xway/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y := prom.o sysctrl.o clk.o dma.o gptu.o dcdc.o
+
+obj-y += vmmc.o
diff --git a/arch/mips/lantiq/xway/clk.c b/arch/mips/lantiq/xway/clk.c
new file mode 100644
index 000000000..47ad21430
--- /dev/null
+++ b/arch/mips/lantiq/xway/clk.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ * Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
+ */
+
+#include <linux/io.h>
+#include <linux/export.h>
+#include <linux/clk.h>
+
+#include <asm/time.h>
+#include <asm/irq.h>
+#include <asm/div64.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+
+static unsigned int ram_clocks[] = {
+ CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M };
+#define DDR_HZ ram_clocks[ltq_cgu_r32(CGU_SYS) & 0x3]
+
+/* legacy xway clock */
+#define CGU_SYS 0x10
+
+/* vr9, ar10/grx390 clock */
+#define CGU_SYS_XRX 0x0c
+#define CGU_IF_CLK_AR10 0x24
+
+unsigned long ltq_danube_fpi_hz(void)
+{
+ unsigned long ddr_clock = DDR_HZ;
+
+ if (ltq_cgu_r32(CGU_SYS) & 0x40)
+ return ddr_clock >> 1;
+ return ddr_clock;
+}
+
+unsigned long ltq_danube_cpu_hz(void)
+{
+ switch (ltq_cgu_r32(CGU_SYS) & 0xc) {
+ case 0:
+ return CLOCK_333M;
+ case 4:
+ return DDR_HZ;
+ case 8:
+ return DDR_HZ << 1;
+ default:
+ return DDR_HZ >> 1;
+ }
+}
+
+unsigned long ltq_danube_pp32_hz(void)
+{
+ unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 7) & 3;
+ unsigned long clk;
+
+ switch (clksys) {
+ case 1:
+ clk = CLOCK_240M;
+ break;
+ case 2:
+ clk = CLOCK_222M;
+ break;
+ case 3:
+ clk = CLOCK_133M;
+ break;
+ default:
+ clk = CLOCK_266M;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_ar9_sys_hz(void)
+{
+ if (((ltq_cgu_r32(CGU_SYS) >> 3) & 0x3) == 0x2)
+ return CLOCK_393M;
+ return CLOCK_333M;
+}
+
+unsigned long ltq_ar9_fpi_hz(void)
+{
+ unsigned long sys = ltq_ar9_sys_hz();
+
+ if (ltq_cgu_r32(CGU_SYS) & BIT(0))
+ return sys / 3;
+ else
+ return sys / 2;
+}
+
+unsigned long ltq_ar9_cpu_hz(void)
+{
+ if (ltq_cgu_r32(CGU_SYS) & BIT(2))
+ return ltq_ar9_fpi_hz();
+ else
+ return ltq_ar9_sys_hz();
+}
+
+unsigned long ltq_vr9_cpu_hz(void)
+{
+ unsigned int cpu_sel;
+ unsigned long clk;
+
+ cpu_sel = (ltq_cgu_r32(CGU_SYS_XRX) >> 4) & 0xf;
+
+ switch (cpu_sel) {
+ case 0:
+ clk = CLOCK_600M;
+ break;
+ case 1:
+ clk = CLOCK_500M;
+ break;
+ case 2:
+ clk = CLOCK_393M;
+ break;
+ case 3:
+ clk = CLOCK_333M;
+ break;
+ case 5:
+ case 6:
+ clk = CLOCK_196_608M;
+ break;
+ case 7:
+ clk = CLOCK_167M;
+ break;
+ case 4:
+ case 8:
+ case 9:
+ clk = CLOCK_125M;
+ break;
+ default:
+ clk = 0;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_vr9_fpi_hz(void)
+{
+ unsigned int ocp_sel, cpu_clk;
+ unsigned long clk;
+
+ cpu_clk = ltq_vr9_cpu_hz();
+ ocp_sel = ltq_cgu_r32(CGU_SYS_XRX) & 0x3;
+
+ switch (ocp_sel) {
+ case 0:
+ /* OCP ratio 1 */
+ clk = cpu_clk;
+ break;
+ case 2:
+ /* OCP ratio 2 */
+ clk = cpu_clk / 2;
+ break;
+ case 3:
+ /* OCP ratio 2.5 */
+ clk = (cpu_clk * 2) / 5;
+ break;
+ case 4:
+ /* OCP ratio 3 */
+ clk = cpu_clk / 3;
+ break;
+ default:
+ clk = 0;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_vr9_pp32_hz(void)
+{
+ unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 16) & 0x7;
+ unsigned long clk;
+
+ switch (clksys) {
+ case 0:
+ clk = CLOCK_500M;
+ break;
+ case 1:
+ clk = CLOCK_432M;
+ break;
+ case 2:
+ clk = CLOCK_288M;
+ break;
+ default:
+ clk = CLOCK_500M;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_ar10_cpu_hz(void)
+{
+ unsigned int clksys;
+ int cpu_fs = (ltq_cgu_r32(CGU_SYS_XRX) >> 8) & 0x1;
+ int freq_div = (ltq_cgu_r32(CGU_SYS_XRX) >> 4) & 0x7;
+
+ switch (cpu_fs) {
+ case 0:
+ clksys = CLOCK_500M;
+ break;
+ case 1:
+ clksys = CLOCK_600M;
+ break;
+ default:
+ clksys = CLOCK_500M;
+ break;
+ }
+
+ switch (freq_div) {
+ case 0:
+ return clksys;
+ case 1:
+ return clksys >> 1;
+ case 2:
+ return clksys >> 2;
+ default:
+ return clksys;
+ }
+}
+
+unsigned long ltq_ar10_fpi_hz(void)
+{
+ int freq_fpi = (ltq_cgu_r32(CGU_IF_CLK_AR10) >> 25) & 0xf;
+
+ switch (freq_fpi) {
+ case 1:
+ return CLOCK_300M;
+ case 5:
+ return CLOCK_250M;
+ case 2:
+ return CLOCK_150M;
+ case 6:
+ return CLOCK_125M;
+
+ default:
+ return CLOCK_125M;
+ }
+}
+
+unsigned long ltq_ar10_pp32_hz(void)
+{
+ unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 16) & 0x7;
+ unsigned long clk;
+
+ switch (clksys) {
+ case 1:
+ clk = CLOCK_250M;
+ break;
+ case 4:
+ clk = CLOCK_400M;
+ break;
+ default:
+ clk = CLOCK_250M;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_grx390_cpu_hz(void)
+{
+ unsigned int clksys;
+ int cpu_fs = ((ltq_cgu_r32(CGU_SYS_XRX) >> 9) & 0x3);
+ int freq_div = ((ltq_cgu_r32(CGU_SYS_XRX) >> 4) & 0x7);
+
+ switch (cpu_fs) {
+ case 0:
+ clksys = CLOCK_600M;
+ break;
+ case 1:
+ clksys = CLOCK_666M;
+ break;
+ case 2:
+ clksys = CLOCK_720M;
+ break;
+ default:
+ clksys = CLOCK_600M;
+ break;
+ }
+
+ switch (freq_div) {
+ case 0:
+ return clksys;
+ case 1:
+ return clksys >> 1;
+ case 2:
+ return clksys >> 2;
+ default:
+ return clksys;
+ }
+}
+
+unsigned long ltq_grx390_fpi_hz(void)
+{
+ /* fpi clock is derived from ddr_clk */
+ unsigned int clksys;
+ int cpu_fs = ((ltq_cgu_r32(CGU_SYS_XRX) >> 9) & 0x3);
+ int freq_div = ((ltq_cgu_r32(CGU_SYS_XRX)) & 0x7);
+ switch (cpu_fs) {
+ case 0:
+ clksys = CLOCK_600M;
+ break;
+ case 1:
+ clksys = CLOCK_666M;
+ break;
+ case 2:
+ clksys = CLOCK_720M;
+ break;
+ default:
+ clksys = CLOCK_600M;
+ break;
+ }
+
+ switch (freq_div) {
+ case 1:
+ return clksys >> 1;
+ case 2:
+ return clksys >> 2;
+ default:
+ return clksys >> 1;
+ }
+}
+
+unsigned long ltq_grx390_pp32_hz(void)
+{
+ unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 16) & 0x7;
+ unsigned long clk;
+
+ switch (clksys) {
+ case 1:
+ clk = CLOCK_250M;
+ break;
+ case 2:
+ clk = CLOCK_432M;
+ break;
+ case 4:
+ clk = CLOCK_400M;
+ break;
+ default:
+ clk = CLOCK_250M;
+ break;
+ }
+ return clk;
+}
diff --git a/arch/mips/lantiq/xway/dcdc.c b/arch/mips/lantiq/xway/dcdc.c
new file mode 100644
index 000000000..4960bee0a
--- /dev/null
+++ b/arch/mips/lantiq/xway/dcdc.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2010 Sameer Ahmad, Lantiq GmbH
+ */
+
+#include <linux/ioport.h>
+#include <linux/of_platform.h>
+
+#include <lantiq_soc.h>
+
+/* Bias and regulator Setup Register */
+#define DCDC_BIAS_VREG0 0xa
+/* Bias and regulator Setup Register */
+#define DCDC_BIAS_VREG1 0xb
+
+#define dcdc_w8(x, y) ltq_w8((x), dcdc_membase + (y))
+#define dcdc_r8(x) ltq_r8(dcdc_membase + (x))
+
+static void __iomem *dcdc_membase;
+
+static int dcdc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dcdc_membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dcdc_membase))
+ return PTR_ERR(dcdc_membase);
+
+ dev_info(&pdev->dev, "Core Voltage : %d mV\n",
+ dcdc_r8(DCDC_BIAS_VREG1) * 8);
+
+ return 0;
+}
+
+static const struct of_device_id dcdc_match[] = {
+ { .compatible = "lantiq,dcdc-xrx200" },
+ {},
+};
+
+static struct platform_driver dcdc_driver = {
+ .probe = dcdc_probe,
+ .driver = {
+ .name = "dcdc-xrx200",
+ .of_match_table = dcdc_match,
+ },
+};
+
+int __init dcdc_init(void)
+{
+ int ret = platform_driver_register(&dcdc_driver);
+
+ if (ret)
+ pr_info("dcdc: Error registering platform driver\n");
+ return ret;
+}
+
+arch_initcall(dcdc_init);
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
new file mode 100644
index 000000000..ab13e2571
--- /dev/null
+++ b/arch/mips/lantiq/xway/dma.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2011 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+#include <lantiq_soc.h>
+#include <xway_dma.h>
+
+#define LTQ_DMA_ID 0x08
+#define LTQ_DMA_CTRL 0x10
+#define LTQ_DMA_CPOLL 0x14
+#define LTQ_DMA_CS 0x18
+#define LTQ_DMA_CCTRL 0x1C
+#define LTQ_DMA_CDBA 0x20
+#define LTQ_DMA_CDLEN 0x24
+#define LTQ_DMA_CIS 0x28
+#define LTQ_DMA_CIE 0x2C
+#define LTQ_DMA_PS 0x40
+#define LTQ_DMA_PCTRL 0x44
+#define LTQ_DMA_IRNEN 0xf4
+
+#define DMA_ID_CHNR GENMASK(26, 20) /* channel number */
+#define DMA_DESCPT BIT(3) /* descriptor complete irq */
+#define DMA_TX BIT(8) /* TX channel direction */
+#define DMA_CHAN_ON BIT(0) /* channel on / off bit */
+#define DMA_PDEN BIT(6) /* enable packet drop */
+#define DMA_CHAN_RST BIT(1) /* channel on / off bit */
+#define DMA_RESET BIT(0) /* channel on / off bit */
+#define DMA_IRQ_ACK 0x7e /* IRQ status register */
+#define DMA_POLL BIT(31) /* turn on channel polling */
+#define DMA_CLK_DIV4 BIT(6) /* polling clock divider */
+#define DMA_PCTRL_2W_BURST 0x1 /* 2 word burst length */
+#define DMA_PCTRL_4W_BURST 0x2 /* 4 word burst length */
+#define DMA_PCTRL_8W_BURST 0x3 /* 8 word burst length */
+#define DMA_TX_BURST_SHIFT 4 /* tx burst shift */
+#define DMA_RX_BURST_SHIFT 2 /* rx burst shift */
+#define DMA_ETOP_ENDIANNESS (0xf << 8) /* endianness swap etop channels */
+#define DMA_WEIGHT (BIT(17) | BIT(16)) /* default channel wheight */
+
+#define ltq_dma_r32(x) ltq_r32(ltq_dma_membase + (x))
+#define ltq_dma_w32(x, y) ltq_w32(x, ltq_dma_membase + (y))
+#define ltq_dma_w32_mask(x, y, z) ltq_w32_mask(x, y, \
+ ltq_dma_membase + (z))
+
+static void __iomem *ltq_dma_membase;
+static DEFINE_SPINLOCK(ltq_dma_lock);
+
+void
+ltq_dma_enable_irq(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_enable_irq);
+
+void
+ltq_dma_disable_irq(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_disable_irq);
+
+void
+ltq_dma_ack_irq(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS);
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_ack_irq);
+
+void
+ltq_dma_open(struct ltq_dma_channel *ch)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&ltq_dma_lock, flag);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL);
+ spin_unlock_irqrestore(&ltq_dma_lock, flag);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_open);
+
+void
+ltq_dma_close(struct ltq_dma_channel *ch)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&ltq_dma_lock, flag);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
+ ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
+ spin_unlock_irqrestore(&ltq_dma_lock, flag);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_close);
+
+static void
+ltq_dma_alloc(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ ch->desc = 0;
+ ch->desc_base = dma_alloc_coherent(ch->dev,
+ LTQ_DESC_NUM * LTQ_DESC_SIZE,
+ &ch->phys, GFP_ATOMIC);
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32(ch->phys, LTQ_DMA_CDBA);
+ ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN);
+ ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
+ wmb();
+ ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL);
+ while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST)
+ ;
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+
+void
+ltq_dma_alloc_tx(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ ltq_dma_alloc(ch);
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
+ ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
+ ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL);
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx);
+
+void
+ltq_dma_alloc_rx(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ ltq_dma_alloc(ch);
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
+ ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
+ ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL);
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx);
+
+void
+ltq_dma_free(struct ltq_dma_channel *ch)
+{
+ if (!ch->desc_base)
+ return;
+ ltq_dma_close(ch);
+ dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE,
+ ch->desc_base, ch->phys);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_free);
+
+void
+ltq_dma_init_port(int p)
+{
+ ltq_dma_w32(p, LTQ_DMA_PS);
+ switch (p) {
+ case DMA_PORT_ETOP:
+ /*
+ * Tell the DMA engine to swap the endianness of data frames and
+ * drop packets if the channel arbitration fails.
+ */
+ ltq_dma_w32_mask(0, DMA_ETOP_ENDIANNESS | DMA_PDEN,
+ LTQ_DMA_PCTRL);
+ break;
+
+ case DMA_PORT_DEU:
+ ltq_dma_w32((DMA_PCTRL_2W_BURST << DMA_TX_BURST_SHIFT) |
+ (DMA_PCTRL_2W_BURST << DMA_RX_BURST_SHIFT),
+ LTQ_DMA_PCTRL);
+ break;
+
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ltq_dma_init_port);
+
+static int
+ltq_dma_init(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct resource *res;
+ unsigned int id, nchannels;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ltq_dma_membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ltq_dma_membase))
+ panic("Failed to remap dma resource");
+
+ /* power up and reset the dma engine */
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ panic("Failed to get dma clock");
+
+ clk_enable(clk);
+ ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL);
+
+ usleep_range(1, 10);
+
+ /* disable all interrupts */
+ ltq_dma_w32(0, LTQ_DMA_IRNEN);
+
+ /* reset/configure each channel */
+ id = ltq_dma_r32(LTQ_DMA_ID);
+ nchannels = ((id & DMA_ID_CHNR) >> 20);
+ for (i = 0; i < nchannels; i++) {
+ ltq_dma_w32(i, LTQ_DMA_CS);
+ ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL);
+ ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL);
+ ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
+ }
+
+ dev_info(&pdev->dev,
+ "Init done - hw rev: %X, ports: %d, channels: %d\n",
+ id & 0x1f, (id >> 16) & 0xf, nchannels);
+
+ return 0;
+}
+
+static const struct of_device_id dma_match[] = {
+ { .compatible = "lantiq,dma-xway" },
+ {},
+};
+
+static struct platform_driver dma_driver = {
+ .probe = ltq_dma_init,
+ .driver = {
+ .name = "dma-xway",
+ .of_match_table = dma_match,
+ },
+};
+
+int __init
+dma_init(void)
+{
+ return platform_driver_register(&dma_driver);
+}
+
+postcore_initcall(dma_init);
diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c
new file mode 100644
index 000000000..200fe9ff6
--- /dev/null
+++ b/arch/mips/lantiq/xway/gptu.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2012 Lantiq GmbH
+ */
+
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+
+#include <lantiq_soc.h>
+#include "../clk.h"
+
+/* the magic ID byte of the core */
+#define GPTU_MAGIC 0x59
+/* clock control register */
+#define GPTU_CLC 0x00
+/* id register */
+#define GPTU_ID 0x08
+/* interrupt node enable */
+#define GPTU_IRNEN 0xf4
+/* interrupt control register */
+#define GPTU_IRCR 0xf8
+/* interrupt capture register */
+#define GPTU_IRNCR 0xfc
+/* there are 3 identical blocks of 2 timers. calculate register offsets */
+#define GPTU_SHIFT(x) (x % 2 ? 4 : 0)
+#define GPTU_BASE(x) (((x >> 1) * 0x20) + 0x10)
+/* timer control register */
+#define GPTU_CON(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x00)
+/* timer auto reload register */
+#define GPTU_RUN(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x08)
+/* timer manual reload register */
+#define GPTU_RLD(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x10)
+/* timer count register */
+#define GPTU_CNT(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x18)
+
+/* GPTU_CON(x) */
+#define CON_CNT BIT(2)
+#define CON_EDGE_ANY (BIT(7) | BIT(6))
+#define CON_SYNC BIT(8)
+#define CON_CLK_INT BIT(10)
+
+/* GPTU_RUN(x) */
+#define RUN_SEN BIT(0)
+#define RUN_RL BIT(2)
+
+/* set clock to runmode */
+#define CLC_RMC BIT(8)
+/* bring core out of suspend */
+#define CLC_SUSPEND BIT(4)
+/* the disable bit */
+#define CLC_DISABLE BIT(0)
+
+#define gptu_w32(x, y) ltq_w32((x), gptu_membase + (y))
+#define gptu_r32(x) ltq_r32(gptu_membase + (x))
+
+enum gptu_timer {
+ TIMER1A = 0,
+ TIMER1B,
+ TIMER2A,
+ TIMER2B,
+ TIMER3A,
+ TIMER3B
+};
+
+static void __iomem *gptu_membase;
+static struct resource irqres[6];
+
+static irqreturn_t timer_irq_handler(int irq, void *priv)
+{
+ int timer = irq - irqres[0].start;
+ gptu_w32(1 << timer, GPTU_IRNCR);
+ return IRQ_HANDLED;
+}
+
+static void gptu_hwinit(void)
+{
+ gptu_w32(0x00, GPTU_IRNEN);
+ gptu_w32(0xff, GPTU_IRNCR);
+ gptu_w32(CLC_RMC | CLC_SUSPEND, GPTU_CLC);
+}
+
+static void gptu_hwexit(void)
+{
+ gptu_w32(0x00, GPTU_IRNEN);
+ gptu_w32(0xff, GPTU_IRNCR);
+ gptu_w32(CLC_DISABLE, GPTU_CLC);
+}
+
+static int gptu_enable(struct clk *clk)
+{
+ int ret = request_irq(irqres[clk->bits].start, timer_irq_handler,
+ IRQF_TIMER, "gtpu", NULL);
+ if (ret) {
+ pr_err("gptu: failed to request irq\n");
+ return ret;
+ }
+
+ gptu_w32(CON_CNT | CON_EDGE_ANY | CON_SYNC | CON_CLK_INT,
+ GPTU_CON(clk->bits));
+ gptu_w32(1, GPTU_RLD(clk->bits));
+ gptu_w32(gptu_r32(GPTU_IRNEN) | BIT(clk->bits), GPTU_IRNEN);
+ gptu_w32(RUN_SEN | RUN_RL, GPTU_RUN(clk->bits));
+ return 0;
+}
+
+static void gptu_disable(struct clk *clk)
+{
+ gptu_w32(0, GPTU_RUN(clk->bits));
+ gptu_w32(0, GPTU_CON(clk->bits));
+ gptu_w32(0, GPTU_RLD(clk->bits));
+ gptu_w32(gptu_r32(GPTU_IRNEN) & ~BIT(clk->bits), GPTU_IRNEN);
+ free_irq(irqres[clk->bits].start, NULL);
+}
+
+static inline void clkdev_add_gptu(struct device *dev, const char *con,
+ unsigned int timer)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ if (!clk)
+ return;
+ clk->cl.dev_id = dev_name(dev);
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+ clk->enable = gptu_enable;
+ clk->disable = gptu_disable;
+ clk->bits = timer;
+ clkdev_add(&clk->cl);
+}
+
+static int gptu_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct resource *res;
+
+ if (of_irq_to_resource_table(pdev->dev.of_node, irqres, 6) != 6) {
+ dev_err(&pdev->dev, "Failed to get IRQ list\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ /* remap gptu register range */
+ gptu_membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gptu_membase))
+ return PTR_ERR(gptu_membase);
+
+ /* enable our clock */
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ return -ENOENT;
+ }
+ clk_enable(clk);
+
+ /* power up the core */
+ gptu_hwinit();
+
+ /* the gptu has a ID register */
+ if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) {
+ dev_err(&pdev->dev, "Failed to find magic\n");
+ gptu_hwexit();
+ clk_disable(clk);
+ clk_put(clk);
+ return -ENAVAIL;
+ }
+
+ /* register the clocks */
+ clkdev_add_gptu(&pdev->dev, "timer1a", TIMER1A);
+ clkdev_add_gptu(&pdev->dev, "timer1b", TIMER1B);
+ clkdev_add_gptu(&pdev->dev, "timer2a", TIMER2A);
+ clkdev_add_gptu(&pdev->dev, "timer2b", TIMER2B);
+ clkdev_add_gptu(&pdev->dev, "timer3a", TIMER3A);
+ clkdev_add_gptu(&pdev->dev, "timer3b", TIMER3B);
+
+ dev_info(&pdev->dev, "gptu: 6 timers loaded\n");
+
+ return 0;
+}
+
+static const struct of_device_id gptu_match[] = {
+ { .compatible = "lantiq,gptu-xway" },
+ {},
+};
+
+static struct platform_driver dma_driver = {
+ .probe = gptu_probe,
+ .driver = {
+ .name = "gptu-xway",
+ .of_match_table = gptu_match,
+ },
+};
+
+int __init gptu_init(void)
+{
+ int ret = platform_driver_register(&dma_driver);
+
+ if (ret)
+ pr_info("gptu: Error registering platform driver\n");
+ return ret;
+}
+
+arch_initcall(gptu_init);
diff --git a/arch/mips/lantiq/xway/prom.c b/arch/mips/lantiq/xway/prom.c
new file mode 100644
index 000000000..544619754
--- /dev/null
+++ b/arch/mips/lantiq/xway/prom.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ * Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
+ */
+
+#include <linux/export.h>
+#include <linux/clk.h>
+#include <asm/bootinfo.h>
+#include <asm/time.h>
+
+#include <lantiq_soc.h>
+
+#include "../prom.h"
+
+#define SOC_DANUBE "Danube"
+#define SOC_TWINPASS "Twinpass"
+#define SOC_AMAZON_SE "Amazon_SE"
+#define SOC_AR9 "AR9"
+#define SOC_GR9 "GRX200"
+#define SOC_VR9 "xRX200"
+#define SOC_VRX220 "xRX220"
+#define SOC_AR10 "xRX300"
+#define SOC_GRX390 "xRX330"
+
+#define COMP_DANUBE "lantiq,danube"
+#define COMP_TWINPASS "lantiq,twinpass"
+#define COMP_AMAZON_SE "lantiq,ase"
+#define COMP_AR9 "lantiq,ar9"
+#define COMP_GR9 "lantiq,gr9"
+#define COMP_VR9 "lantiq,vr9"
+#define COMP_AR10 "lantiq,ar10"
+#define COMP_GRX390 "lantiq,grx390"
+
+#define PART_SHIFT 12
+#define PART_MASK 0x0FFFFFFF
+#define REV_SHIFT 28
+#define REV_MASK 0xF0000000
+
+void __init ltq_soc_detect(struct ltq_soc_info *i)
+{
+ i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
+ i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
+ sprintf(i->rev_type, "1.%d", i->rev);
+ switch (i->partnum) {
+ case SOC_ID_DANUBE1:
+ case SOC_ID_DANUBE2:
+ i->name = SOC_DANUBE;
+ i->type = SOC_TYPE_DANUBE;
+ i->compatible = COMP_DANUBE;
+ break;
+
+ case SOC_ID_TWINPASS:
+ i->name = SOC_TWINPASS;
+ i->type = SOC_TYPE_DANUBE;
+ i->compatible = COMP_TWINPASS;
+ break;
+
+ case SOC_ID_ARX188:
+ case SOC_ID_ARX168_1:
+ case SOC_ID_ARX168_2:
+ case SOC_ID_ARX182:
+ i->name = SOC_AR9;
+ i->type = SOC_TYPE_AR9;
+ i->compatible = COMP_AR9;
+ break;
+
+ case SOC_ID_GRX188:
+ case SOC_ID_GRX168:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_AR9;
+ i->compatible = COMP_GR9;
+ break;
+
+ case SOC_ID_AMAZON_SE_1:
+ case SOC_ID_AMAZON_SE_2:
+#ifdef CONFIG_PCI
+ panic("ase is only supported for non pci kernels");
+#endif
+ i->name = SOC_AMAZON_SE;
+ i->type = SOC_TYPE_AMAZON_SE;
+ i->compatible = COMP_AMAZON_SE;
+ break;
+
+ case SOC_ID_VRX282:
+ case SOC_ID_VRX268:
+ case SOC_ID_VRX288:
+ i->name = SOC_VR9;
+ i->type = SOC_TYPE_VR9;
+ i->compatible = COMP_VR9;
+ break;
+
+ case SOC_ID_GRX268:
+ case SOC_ID_GRX288:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_VR9;
+ i->compatible = COMP_GR9;
+ break;
+
+ case SOC_ID_VRX268_2:
+ case SOC_ID_VRX288_2:
+ i->name = SOC_VR9;
+ i->type = SOC_TYPE_VR9_2;
+ i->compatible = COMP_VR9;
+ break;
+
+ case SOC_ID_VRX220:
+ i->name = SOC_VRX220;
+ i->type = SOC_TYPE_VRX220;
+ i->compatible = COMP_VR9;
+ break;
+
+ case SOC_ID_GRX282_2:
+ case SOC_ID_GRX288_2:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_VR9_2;
+ i->compatible = COMP_GR9;
+ break;
+
+ case SOC_ID_ARX362:
+ case SOC_ID_ARX368:
+ case SOC_ID_ARX382:
+ case SOC_ID_ARX388:
+ case SOC_ID_URX388:
+ i->name = SOC_AR10;
+ i->type = SOC_TYPE_AR10;
+ i->compatible = COMP_AR10;
+ break;
+
+ case SOC_ID_GRX383:
+ case SOC_ID_GRX369:
+ case SOC_ID_GRX387:
+ case SOC_ID_GRX389:
+ i->name = SOC_GRX390;
+ i->type = SOC_TYPE_GRX390;
+ i->compatible = COMP_GRX390;
+ break;
+
+ default:
+ unreachable();
+ break;
+ }
+}
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
new file mode 100644
index 000000000..084f6caba
--- /dev/null
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2011-2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
+ */
+
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/clkdev.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+#include "../prom.h"
+
+/* clock control register for legacy */
+#define CGU_IFCCR 0x0018
+#define CGU_IFCCR_VR9 0x0024
+/* system clock register for legacy */
+#define CGU_SYS 0x0010
+/* pci control register */
+#define CGU_PCICR 0x0034
+#define CGU_PCICR_VR9 0x0038
+/* ephy configuration register */
+#define CGU_EPHY 0x10
+
+/* Legacy PMU register for ar9, ase, danube */
+/* power control register */
+#define PMU_PWDCR 0x1C
+/* power status register */
+#define PMU_PWDSR 0x20
+/* power control register */
+#define PMU_PWDCR1 0x24
+/* power status register */
+#define PMU_PWDSR1 0x28
+/* power control register */
+#define PWDCR(x) ((x) ? (PMU_PWDCR1) : (PMU_PWDCR))
+/* power status register */
+#define PWDSR(x) ((x) ? (PMU_PWDSR1) : (PMU_PWDSR))
+
+
+/* PMU register for ar10 and grx390 */
+
+/* First register set */
+#define PMU_CLK_SR 0x20 /* status */
+#define PMU_CLK_CR_A 0x24 /* Enable */
+#define PMU_CLK_CR_B 0x28 /* Disable */
+/* Second register set */
+#define PMU_CLK_SR1 0x30 /* status */
+#define PMU_CLK_CR1_A 0x34 /* Enable */
+#define PMU_CLK_CR1_B 0x38 /* Disable */
+/* Third register set */
+#define PMU_ANA_SR 0x40 /* status */
+#define PMU_ANA_CR_A 0x44 /* Enable */
+#define PMU_ANA_CR_B 0x48 /* Disable */
+
+/* Status */
+static u32 pmu_clk_sr[] = {
+ PMU_CLK_SR,
+ PMU_CLK_SR1,
+ PMU_ANA_SR,
+};
+
+/* Enable */
+static u32 pmu_clk_cr_a[] = {
+ PMU_CLK_CR_A,
+ PMU_CLK_CR1_A,
+ PMU_ANA_CR_A,
+};
+
+/* Disable */
+static u32 pmu_clk_cr_b[] = {
+ PMU_CLK_CR_B,
+ PMU_CLK_CR1_B,
+ PMU_ANA_CR_B,
+};
+
+#define PWDCR_EN_XRX(x) (pmu_clk_cr_a[(x)])
+#define PWDCR_DIS_XRX(x) (pmu_clk_cr_b[(x)])
+#define PWDSR_XRX(x) (pmu_clk_sr[(x)])
+
+/* clock gates that we can en/disable */
+#define PMU_USB0_P BIT(0)
+#define PMU_ASE_SDIO BIT(2) /* ASE special */
+#define PMU_PCI BIT(4)
+#define PMU_DMA BIT(5)
+#define PMU_USB0 BIT(6)
+#define PMU_ASC0 BIT(7)
+#define PMU_EPHY BIT(7) /* ase */
+#define PMU_USIF BIT(7) /* from vr9 until grx390 */
+#define PMU_SPI BIT(8)
+#define PMU_DFE BIT(9)
+#define PMU_EBU BIT(10)
+#define PMU_STP BIT(11)
+#define PMU_GPT BIT(12)
+#define PMU_AHBS BIT(13) /* vr9 */
+#define PMU_FPI BIT(14)
+#define PMU_AHBM BIT(15)
+#define PMU_SDIO BIT(16) /* danube, ar9, vr9 */
+#define PMU_ASC1 BIT(17)
+#define PMU_PPE_QSB BIT(18)
+#define PMU_PPE_SLL01 BIT(19)
+#define PMU_DEU BIT(20)
+#define PMU_PPE_TC BIT(21)
+#define PMU_PPE_EMA BIT(22)
+#define PMU_PPE_DPLUM BIT(23)
+#define PMU_PPE_DP BIT(23)
+#define PMU_PPE_DPLUS BIT(24)
+#define PMU_USB1_P BIT(26)
+#define PMU_GPHY3 BIT(26) /* grx390 */
+#define PMU_USB1 BIT(27)
+#define PMU_SWITCH BIT(28)
+#define PMU_PPE_TOP BIT(29)
+#define PMU_GPHY0 BIT(29) /* ar10, xrx390 */
+#define PMU_GPHY BIT(30)
+#define PMU_GPHY1 BIT(30) /* ar10, xrx390 */
+#define PMU_PCIE_CLK BIT(31)
+#define PMU_GPHY2 BIT(31) /* ar10, xrx390 */
+
+#define PMU1_PCIE_PHY BIT(0) /* vr9-specific,moved in ar10/grx390 */
+#define PMU1_PCIE_CTL BIT(1)
+#define PMU1_PCIE_PDI BIT(4)
+#define PMU1_PCIE_MSI BIT(5)
+#define PMU1_CKE BIT(6)
+#define PMU1_PCIE1_CTL BIT(17)
+#define PMU1_PCIE1_PDI BIT(20)
+#define PMU1_PCIE1_MSI BIT(21)
+#define PMU1_PCIE2_CTL BIT(25)
+#define PMU1_PCIE2_PDI BIT(26)
+#define PMU1_PCIE2_MSI BIT(27)
+
+#define PMU_ANALOG_USB0_P BIT(0)
+#define PMU_ANALOG_USB1_P BIT(1)
+#define PMU_ANALOG_PCIE0_P BIT(8)
+#define PMU_ANALOG_PCIE1_P BIT(9)
+#define PMU_ANALOG_PCIE2_P BIT(10)
+#define PMU_ANALOG_DSL_AFE BIT(16)
+#define PMU_ANALOG_DCDC_2V5 BIT(17)
+#define PMU_ANALOG_DCDC_1VX BIT(18)
+#define PMU_ANALOG_DCDC_1V0 BIT(19)
+
+#define pmu_w32(x, y) ltq_w32((x), pmu_membase + (y))
+#define pmu_r32(x) ltq_r32(pmu_membase + (x))
+
+static void __iomem *pmu_membase;
+void __iomem *ltq_cgu_membase;
+void __iomem *ltq_ebu_membase;
+
+static u32 ifccr = CGU_IFCCR;
+static u32 pcicr = CGU_PCICR;
+
+static DEFINE_SPINLOCK(g_pmu_lock);
+
+/* legacy function kept alive to ease clkdev transition */
+void ltq_pmu_enable(unsigned int module)
+{
+ int retry = 1000000;
+
+ spin_lock(&g_pmu_lock);
+ pmu_w32(pmu_r32(PMU_PWDCR) & ~module, PMU_PWDCR);
+ do {} while (--retry && (pmu_r32(PMU_PWDSR) & module));
+ spin_unlock(&g_pmu_lock);
+
+ if (!retry)
+ panic("activating PMU module failed!");
+}
+EXPORT_SYMBOL(ltq_pmu_enable);
+
+/* legacy function kept alive to ease clkdev transition */
+void ltq_pmu_disable(unsigned int module)
+{
+ int retry = 1000000;
+
+ spin_lock(&g_pmu_lock);
+ pmu_w32(pmu_r32(PMU_PWDCR) | module, PMU_PWDCR);
+ do {} while (--retry && (!(pmu_r32(PMU_PWDSR) & module)));
+ spin_unlock(&g_pmu_lock);
+
+ if (!retry)
+ pr_warn("deactivating PMU module failed!");
+}
+EXPORT_SYMBOL(ltq_pmu_disable);
+
+/* enable a hw clock */
+static int cgu_enable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(ifccr) | clk->bits, ifccr);
+ return 0;
+}
+
+/* disable a hw clock */
+static void cgu_disable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(ifccr) & ~clk->bits, ifccr);
+}
+
+/* enable a clock gate */
+static int pmu_enable(struct clk *clk)
+{
+ int retry = 1000000;
+
+ if (of_machine_is_compatible("lantiq,ar10")
+ || of_machine_is_compatible("lantiq,grx390")) {
+ pmu_w32(clk->bits, PWDCR_EN_XRX(clk->module));
+ do {} while (--retry &&
+ (!(pmu_r32(PWDSR_XRX(clk->module)) & clk->bits)));
+
+ } else {
+ spin_lock(&g_pmu_lock);
+ pmu_w32(pmu_r32(PWDCR(clk->module)) & ~clk->bits,
+ PWDCR(clk->module));
+ do {} while (--retry &&
+ (pmu_r32(PWDSR(clk->module)) & clk->bits));
+ spin_unlock(&g_pmu_lock);
+ }
+
+ if (!retry)
+ panic("activating PMU module failed!");
+
+ return 0;
+}
+
+/* disable a clock gate */
+static void pmu_disable(struct clk *clk)
+{
+ int retry = 1000000;
+
+ if (of_machine_is_compatible("lantiq,ar10")
+ || of_machine_is_compatible("lantiq,grx390")) {
+ pmu_w32(clk->bits, PWDCR_DIS_XRX(clk->module));
+ do {} while (--retry &&
+ (pmu_r32(PWDSR_XRX(clk->module)) & clk->bits));
+ } else {
+ spin_lock(&g_pmu_lock);
+ pmu_w32(pmu_r32(PWDCR(clk->module)) | clk->bits,
+ PWDCR(clk->module));
+ do {} while (--retry &&
+ (!(pmu_r32(PWDSR(clk->module)) & clk->bits)));
+ spin_unlock(&g_pmu_lock);
+ }
+
+ if (!retry)
+ pr_warn("deactivating PMU module failed!");
+}
+
+/* the pci enable helper */
+static int pci_enable(struct clk *clk)
+{
+ unsigned int val = ltq_cgu_r32(ifccr);
+ /* set bus clock speed */
+ if (of_machine_is_compatible("lantiq,ar9") ||
+ of_machine_is_compatible("lantiq,vr9")) {
+ val &= ~0x1f00000;
+ if (clk->rate == CLOCK_33M)
+ val |= 0xe00000;
+ else
+ val |= 0x700000; /* 62.5M */
+ } else {
+ val &= ~0xf00000;
+ if (clk->rate == CLOCK_33M)
+ val |= 0x800000;
+ else
+ val |= 0x400000; /* 62.5M */
+ }
+ ltq_cgu_w32(val, ifccr);
+ pmu_enable(clk);
+ return 0;
+}
+
+/* enable the external clock as a source */
+static int pci_ext_enable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(ifccr) & ~(1 << 16), ifccr);
+ ltq_cgu_w32((1 << 30), pcicr);
+ return 0;
+}
+
+/* disable the external clock as a source */
+static void pci_ext_disable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(ifccr) | (1 << 16), ifccr);
+ ltq_cgu_w32((1 << 31) | (1 << 30), pcicr);
+}
+
+/* enable a clockout source */
+static int clkout_enable(struct clk *clk)
+{
+ int i;
+
+ /* get the correct rate */
+ for (i = 0; i < 4; i++) {
+ if (clk->rates[i] == clk->rate) {
+ int shift = 14 - (2 * clk->module);
+ int enable = 7 - clk->module;
+ unsigned int val = ltq_cgu_r32(ifccr);
+
+ val &= ~(3 << shift);
+ val |= i << shift;
+ val |= enable;
+ ltq_cgu_w32(val, ifccr);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+/* manage the clock gates via PMU */
+static void clkdev_add_pmu(const char *dev, const char *con, bool deactivate,
+ unsigned int module, unsigned int bits)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ if (!clk)
+ return;
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+ clk->enable = pmu_enable;
+ clk->disable = pmu_disable;
+ clk->module = module;
+ clk->bits = bits;
+ if (deactivate) {
+ /*
+ * Disable it during the initialization. Module should enable
+ * when used
+ */
+ pmu_disable(clk);
+ }
+ clkdev_add(&clk->cl);
+}
+
+/* manage the clock generator */
+static void clkdev_add_cgu(const char *dev, const char *con,
+ unsigned int bits)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ if (!clk)
+ return;
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+ clk->enable = cgu_enable;
+ clk->disable = cgu_disable;
+ clk->bits = bits;
+ clkdev_add(&clk->cl);
+}
+
+/* pci needs its own enable function as the setup is a bit more complex */
+static unsigned long valid_pci_rates[] = {CLOCK_33M, CLOCK_62_5M, 0};
+
+static void clkdev_add_pci(void)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ /* main pci clock */
+ if (clk) {
+ clk->cl.dev_id = "17000000.pci";
+ clk->cl.con_id = NULL;
+ clk->cl.clk = clk;
+ clk->rate = CLOCK_33M;
+ clk->rates = valid_pci_rates;
+ clk->enable = pci_enable;
+ clk->disable = pmu_disable;
+ clk->module = 0;
+ clk->bits = PMU_PCI;
+ clkdev_add(&clk->cl);
+ }
+
+ /* use internal/external bus clock */
+ if (clk_ext) {
+ clk_ext->cl.dev_id = "17000000.pci";
+ clk_ext->cl.con_id = "external";
+ clk_ext->cl.clk = clk_ext;
+ clk_ext->enable = pci_ext_enable;
+ clk_ext->disable = pci_ext_disable;
+ clkdev_add(&clk_ext->cl);
+ }
+}
+
+/* xway socs can generate clocks on gpio pins */
+static unsigned long valid_clkout_rates[4][5] = {
+ {CLOCK_32_768K, CLOCK_1_536M, CLOCK_2_5M, CLOCK_12M, 0},
+ {CLOCK_40M, CLOCK_12M, CLOCK_24M, CLOCK_48M, 0},
+ {CLOCK_25M, CLOCK_40M, CLOCK_30M, CLOCK_60M, 0},
+ {CLOCK_12M, CLOCK_50M, CLOCK_32_768K, CLOCK_25M, 0},
+};
+
+static void clkdev_add_clkout(void)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ struct clk *clk;
+ char *name;
+
+ name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
+ if (!name)
+ continue;
+ sprintf(name, "clkout%d", i);
+
+ clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ if (!clk) {
+ kfree(name);
+ continue;
+ }
+ clk->cl.dev_id = "1f103000.cgu";
+ clk->cl.con_id = name;
+ clk->cl.clk = clk;
+ clk->rate = 0;
+ clk->rates = valid_clkout_rates[i];
+ clk->enable = clkout_enable;
+ clk->module = i;
+ clkdev_add(&clk->cl);
+ }
+}
+
+/* bring up all register ranges that we need for basic system control */
+void __init ltq_soc_init(void)
+{
+ struct resource res_pmu, res_cgu, res_ebu;
+ struct device_node *np_pmu =
+ of_find_compatible_node(NULL, NULL, "lantiq,pmu-xway");
+ struct device_node *np_cgu =
+ of_find_compatible_node(NULL, NULL, "lantiq,cgu-xway");
+ struct device_node *np_ebu =
+ of_find_compatible_node(NULL, NULL, "lantiq,ebu-xway");
+
+ /* check if all the core register ranges are available */
+ if (!np_pmu || !np_cgu || !np_ebu)
+ panic("Failed to load core nodes from devicetree");
+
+ if (of_address_to_resource(np_pmu, 0, &res_pmu) ||
+ of_address_to_resource(np_cgu, 0, &res_cgu) ||
+ of_address_to_resource(np_ebu, 0, &res_ebu))
+ panic("Failed to get core resources");
+
+ if (!request_mem_region(res_pmu.start, resource_size(&res_pmu),
+ res_pmu.name) ||
+ !request_mem_region(res_cgu.start, resource_size(&res_cgu),
+ res_cgu.name) ||
+ !request_mem_region(res_ebu.start, resource_size(&res_ebu),
+ res_ebu.name))
+ pr_err("Failed to request core resources");
+
+ pmu_membase = ioremap(res_pmu.start, resource_size(&res_pmu));
+ ltq_cgu_membase = ioremap(res_cgu.start,
+ resource_size(&res_cgu));
+ ltq_ebu_membase = ioremap(res_ebu.start,
+ resource_size(&res_ebu));
+ if (!pmu_membase || !ltq_cgu_membase || !ltq_ebu_membase)
+ panic("Failed to remap core resources");
+
+ /* make sure to unprotect the memory region where flash is located */
+ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0);
+
+ /* add our generic xway clocks */
+ clkdev_add_pmu("10000000.fpi", NULL, 0, 0, PMU_FPI);
+ clkdev_add_pmu("1e100a00.gptu", NULL, 1, 0, PMU_GPT);
+ clkdev_add_pmu("1e100bb0.stp", NULL, 1, 0, PMU_STP);
+ clkdev_add_pmu("1e100c00.serial", NULL, 0, 0, PMU_ASC1);
+ clkdev_add_pmu("1e104100.dma", NULL, 1, 0, PMU_DMA);
+ clkdev_add_pmu("1e100800.spi", NULL, 1, 0, PMU_SPI);
+ clkdev_add_pmu("1e105300.ebu", NULL, 0, 0, PMU_EBU);
+ clkdev_add_clkout();
+
+ /* add the soc dependent clocks */
+ if (of_machine_is_compatible("lantiq,vr9")) {
+ ifccr = CGU_IFCCR_VR9;
+ pcicr = CGU_PCICR_VR9;
+ } else {
+ clkdev_add_pmu("1e180000.etop", NULL, 1, 0, PMU_PPE);
+ }
+
+ if (!of_machine_is_compatible("lantiq,ase"))
+ clkdev_add_pci();
+
+ if (of_machine_is_compatible("lantiq,grx390") ||
+ of_machine_is_compatible("lantiq,ar10")) {
+ clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY0);
+ clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY1);
+ clkdev_add_pmu("1e108000.switch", "gphy2", 0, 0, PMU_GPHY2);
+ clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 2, PMU_ANALOG_USB0_P);
+ clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 2, PMU_ANALOG_USB1_P);
+ /* rc 0 */
+ clkdev_add_pmu("1f106800.phy", "phy", 1, 2, PMU_ANALOG_PCIE0_P);
+ clkdev_add_pmu("1d900000.pcie", "msi", 1, 1, PMU1_PCIE_MSI);
+ clkdev_add_pmu("1f106800.phy", "pdi", 1, 1, PMU1_PCIE_PDI);
+ clkdev_add_pmu("1d900000.pcie", "ctl", 1, 1, PMU1_PCIE_CTL);
+ /* rc 1 */
+ clkdev_add_pmu("1f700400.phy", "phy", 1, 2, PMU_ANALOG_PCIE1_P);
+ clkdev_add_pmu("19000000.pcie", "msi", 1, 1, PMU1_PCIE1_MSI);
+ clkdev_add_pmu("1f700400.phy", "pdi", 1, 1, PMU1_PCIE1_PDI);
+ clkdev_add_pmu("19000000.pcie", "ctl", 1, 1, PMU1_PCIE1_CTL);
+ }
+
+ if (of_machine_is_compatible("lantiq,ase")) {
+ if (ltq_cgu_r32(CGU_SYS) & (1 << 5))
+ clkdev_add_static(CLOCK_266M, CLOCK_133M,
+ CLOCK_133M, CLOCK_266M);
+ else
+ clkdev_add_static(CLOCK_133M, CLOCK_133M,
+ CLOCK_133M, CLOCK_133M);
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
+ clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
+ clkdev_add_pmu("1e180000.etop", "ppe", 1, 0, PMU_PPE);
+ clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY);
+ clkdev_add_pmu("1e180000.etop", "ephy", 1, 0, PMU_EPHY);
+ clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_ASE_SDIO);
+ clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+ } else if (of_machine_is_compatible("lantiq,grx390")) {
+ clkdev_add_static(ltq_grx390_cpu_hz(), ltq_grx390_fpi_hz(),
+ ltq_grx390_fpi_hz(), ltq_grx390_pp32_hz());
+ clkdev_add_pmu("1e108000.switch", "gphy3", 0, 0, PMU_GPHY3);
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
+ clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1);
+ /* rc 2 */
+ clkdev_add_pmu("1f106a00.pcie", "phy", 1, 2, PMU_ANALOG_PCIE2_P);
+ clkdev_add_pmu("1a800000.pcie", "msi", 1, 1, PMU1_PCIE2_MSI);
+ clkdev_add_pmu("1f106a00.pcie", "pdi", 1, 1, PMU1_PCIE2_PDI);
+ clkdev_add_pmu("1a800000.pcie", "ctl", 1, 1, PMU1_PCIE2_CTL);
+ clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP);
+ clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
+ clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ } else if (of_machine_is_compatible("lantiq,ar10")) {
+ clkdev_add_static(ltq_ar10_cpu_hz(), ltq_ar10_fpi_hz(),
+ ltq_ar10_fpi_hz(), ltq_ar10_pp32_hz());
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
+ clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1);
+ clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH |
+ PMU_PPE_DP | PMU_PPE_TC);
+ clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
+ clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE);
+ clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+ } else if (of_machine_is_compatible("lantiq,vr9")) {
+ clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(),
+ ltq_vr9_fpi_hz(), ltq_vr9_pp32_hz());
+ clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
+ clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P);
+ clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM);
+ clkdev_add_pmu("1f106800.phy", "phy", 1, 1, PMU1_PCIE_PHY);
+ clkdev_add_pmu("1d900000.pcie", "bus", 1, 0, PMU_PCIE_CLK);
+ clkdev_add_pmu("1d900000.pcie", "msi", 1, 1, PMU1_PCIE_MSI);
+ clkdev_add_pmu("1f106800.phy", "pdi", 1, 1, PMU1_PCIE_PDI);
+ clkdev_add_pmu("1d900000.pcie", "ctl", 1, 1, PMU1_PCIE_CTL);
+ clkdev_add_pmu(NULL, "ahb", 1, 0, PMU_AHBM | PMU_AHBS);
+
+ clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
+ clkdev_add_pmu("1e10b308.eth", NULL, 0, 0,
+ PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
+ PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
+ PMU_PPE_QSB | PMU_PPE_TOP);
+ clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY);
+ clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY);
+ clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
+ clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+ } else if (of_machine_is_compatible("lantiq,ar9")) {
+ clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
+ ltq_ar9_fpi_hz(), CLOCK_250M);
+ clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
+ clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P);
+ clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM);
+ clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH);
+ clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
+ clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+ clkdev_add_pmu("1e100400.serial", NULL, 1, 0, PMU_ASC0);
+ } else {
+ clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
+ ltq_danube_fpi_hz(), ltq_danube_pp32_hz());
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
+ clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
+ clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
+ clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+ clkdev_add_pmu("1e100400.serial", NULL, 1, 0, PMU_ASC0);
+ }
+}
diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c
new file mode 100644
index 000000000..7a14da8d9
--- /dev/null
+++ b/arch/mips/lantiq/xway/vmmc.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/export.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/dma-mapping.h>
+
+#include <lantiq_soc.h>
+
+static unsigned int *cp1_base;
+
+unsigned int *ltq_get_cp1_base(void)
+{
+ if (!cp1_base)
+ panic("no cp1 base was set\n");
+
+ return cp1_base;
+}
+EXPORT_SYMBOL(ltq_get_cp1_base);
+
+static int vmmc_probe(struct platform_device *pdev)
+{
+#define CP1_SIZE (1 << 20)
+ int gpio_count;
+ dma_addr_t dma;
+
+ cp1_base =
+ (void *) CPHYSADDR(dma_alloc_coherent(&pdev->dev, CP1_SIZE,
+ &dma, GFP_KERNEL));
+
+ gpio_count = of_gpio_count(pdev->dev.of_node);
+ while (gpio_count > 0) {
+ enum of_gpio_flags flags;
+ int gpio = of_get_gpio_flags(pdev->dev.of_node,
+ --gpio_count, &flags);
+ if (gpio_request(gpio, "vmmc-relay"))
+ continue;
+ dev_info(&pdev->dev, "requested GPIO %d\n", gpio);
+ gpio_direction_output(gpio,
+ (flags & OF_GPIO_ACTIVE_LOW) ? (0) : (1));
+ }
+
+ dev_info(&pdev->dev, "reserved %dMB at 0x%p", CP1_SIZE >> 20, cp1_base);
+
+ return 0;
+}
+
+static const struct of_device_id vmmc_match[] = {
+ { .compatible = "lantiq,vmmc-xway" },
+ {},
+};
+
+static struct platform_driver vmmc_driver = {
+ .probe = vmmc_probe,
+ .driver = {
+ .name = "lantiq,vmmc",
+ .of_match_table = vmmc_match,
+ },
+};
+builtin_platform_driver(vmmc_driver);
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
new file mode 100644
index 000000000..479f50559
--- /dev/null
+++ b/arch/mips/lib/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for MIPS-specific library files..
+#
+
+lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
+ mips-atomic.o strncpy_user.o \
+ strnlen_user.o uncached.o
+
+obj-y += iomap_copy.o
+obj-$(CONFIG_PCI) += iomap-pci.o
+lib-$(CONFIG_GENERIC_CSUM) := $(filter-out csum_partial.o, $(lib-y))
+
+obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o
+obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o
+obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o
+
+# libgcc-style stuff needed in the kernel
+obj-y += bswapsi.o bswapdi.o multi3.o
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
new file mode 100644
index 000000000..116d0bd8b
--- /dev/null
+++ b/arch/mips/lib/bitops.c
@@ -0,0 +1,148 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/irqflags.h>
+#include <linux/export.h>
+
+
+/**
+ * __mips_set_bit - Atomically set a bit in memory. This is called by
+ * set_bit() if it cannot find a faster solution.
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ */
+void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
+ unsigned long mask;
+ unsigned long flags;
+
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ *a |= mask;
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_set_bit);
+
+
+/**
+ * __mips_clear_bit - Clears a bit in memory. This is called by clear_bit() if
+ * it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ */
+void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
+ unsigned long mask;
+ unsigned long flags;
+
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ *a &= ~mask;
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_clear_bit);
+
+
+/**
+ * __mips_change_bit - Toggle a bit in memory. This is called by change_bit()
+ * if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ */
+void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
+ unsigned long mask;
+ unsigned long flags;
+
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ *a ^= mask;
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_change_bit);
+
+
+/**
+ * __mips_test_and_set_bit_lock - Set a bit and return its old value. This is
+ * called by test_and_set_bit_lock() if it cannot find a faster solution.
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+int __mips_test_and_set_bit_lock(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
+ unsigned long mask;
+ unsigned long flags;
+ int res;
+
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ res = (mask & *a) != 0;
+ *a |= mask;
+ raw_local_irq_restore(flags);
+ return res;
+}
+EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
+
+
+/**
+ * __mips_test_and_clear_bit - Clear a bit and return its old value. This is
+ * called by test_and_clear_bit() if it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ */
+int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
+ unsigned long mask;
+ unsigned long flags;
+ int res;
+
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ res = (mask & *a) != 0;
+ *a &= ~mask;
+ raw_local_irq_restore(flags);
+ return res;
+}
+EXPORT_SYMBOL(__mips_test_and_clear_bit);
+
+
+/**
+ * __mips_test_and_change_bit - Change a bit and return its old value. This is
+ * called by test_and_change_bit() if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to count from
+ */
+int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = &addr[BIT_WORD(nr)];
+ unsigned int bit = nr % BITS_PER_LONG;
+ unsigned long mask;
+ unsigned long flags;
+ int res;
+
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ res = (mask & *a) != 0;
+ *a ^= mask;
+ raw_local_irq_restore(flags);
+ return res;
+}
+EXPORT_SYMBOL(__mips_test_and_change_bit);
diff --git a/arch/mips/lib/bswapdi.c b/arch/mips/lib/bswapdi.c
new file mode 100644
index 000000000..fcef74084
--- /dev/null
+++ b/arch/mips/lib/bswapdi.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+#include <linux/compiler.h>
+
+unsigned long long notrace __bswapdi2(unsigned long long u)
+{
+ return (((u) & 0xff00000000000000ull) >> 56) |
+ (((u) & 0x00ff000000000000ull) >> 40) |
+ (((u) & 0x0000ff0000000000ull) >> 24) |
+ (((u) & 0x000000ff00000000ull) >> 8) |
+ (((u) & 0x00000000ff000000ull) << 8) |
+ (((u) & 0x0000000000ff0000ull) << 24) |
+ (((u) & 0x000000000000ff00ull) << 40) |
+ (((u) & 0x00000000000000ffull) << 56);
+}
+
+EXPORT_SYMBOL(__bswapdi2);
diff --git a/arch/mips/lib/bswapsi.c b/arch/mips/lib/bswapsi.c
new file mode 100644
index 000000000..22d8e4f6d
--- /dev/null
+++ b/arch/mips/lib/bswapsi.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+#include <linux/compiler.h>
+
+unsigned int notrace __bswapsi2(unsigned int u)
+{
+ return (((u) & 0xff000000) >> 24) |
+ (((u) & 0x00ff0000) >> 8) |
+ (((u) & 0x0000ff00) << 8) |
+ (((u) & 0x000000ff) << 24);
+}
+
+EXPORT_SYMBOL(__bswapsi2);
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
new file mode 100644
index 000000000..a46db0807
--- /dev/null
+++ b/arch/mips/lib/csum_partial.S
@@ -0,0 +1,754 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Quick'n'dirty IP checksum ...
+ *
+ * Copyright (C) 1998, 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2007 Maciej W. Rozycki
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ */
+#include <linux/errno.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/export.h>
+#include <asm/regdef.h>
+
+#ifdef CONFIG_64BIT
+/*
+ * As we are sharing code base with the mips32 tree (which use the o32 ABI
+ * register definitions). We need to redefine the register definitions from
+ * the n64 ABI register naming to the o32 ABI register naming.
+ */
+#undef t0
+#undef t1
+#undef t2
+#undef t3
+#define t0 $8
+#define t1 $9
+#define t2 $10
+#define t3 $11
+#define t4 $12
+#define t5 $13
+#define t6 $14
+#define t7 $15
+
+#define USE_DOUBLE
+#endif
+
+#ifdef USE_DOUBLE
+
+#define LOAD ld
+#define LOAD32 lwu
+#define ADD daddu
+#define NBYTES 8
+
+#else
+
+#define LOAD lw
+#define LOAD32 lw
+#define ADD addu
+#define NBYTES 4
+
+#endif /* USE_DOUBLE */
+
+#define UNIT(unit) ((unit)*NBYTES)
+
+#define ADDC(sum,reg) \
+ .set push; \
+ .set noat; \
+ ADD sum, reg; \
+ sltu v1, sum, reg; \
+ ADD sum, v1; \
+ .set pop
+
+#define ADDC32(sum,reg) \
+ .set push; \
+ .set noat; \
+ addu sum, reg; \
+ sltu v1, sum, reg; \
+ addu sum, v1; \
+ .set pop
+
+#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
+ LOAD _t0, (offset + UNIT(0))(src); \
+ LOAD _t1, (offset + UNIT(1))(src); \
+ LOAD _t2, (offset + UNIT(2))(src); \
+ LOAD _t3, (offset + UNIT(3))(src); \
+ ADDC(_t0, _t1); \
+ ADDC(_t2, _t3); \
+ ADDC(sum, _t0); \
+ ADDC(sum, _t2)
+
+#ifdef USE_DOUBLE
+#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
+ CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)
+#else
+#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
+ CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3); \
+ CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3)
+#endif
+
+/*
+ * a0: source address
+ * a1: length of the area to checksum
+ * a2: partial checksum
+ */
+
+#define src a0
+#define sum v0
+
+ .text
+ .set noreorder
+ .align 5
+LEAF(csum_partial)
+EXPORT_SYMBOL(csum_partial)
+ move sum, zero
+ move t7, zero
+
+ sltiu t8, a1, 0x8
+ bnez t8, .Lsmall_csumcpy /* < 8 bytes to copy */
+ move t2, a1
+
+ andi t7, src, 0x1 /* odd buffer? */
+
+.Lhword_align:
+ beqz t7, .Lword_align
+ andi t8, src, 0x2
+
+ lbu t0, (src)
+ LONG_SUBU a1, a1, 0x1
+#ifdef __MIPSEL__
+ sll t0, t0, 8
+#endif
+ ADDC(sum, t0)
+ PTR_ADDU src, src, 0x1
+ andi t8, src, 0x2
+
+.Lword_align:
+ beqz t8, .Ldword_align
+ sltiu t8, a1, 56
+
+ lhu t0, (src)
+ LONG_SUBU a1, a1, 0x2
+ ADDC(sum, t0)
+ sltiu t8, a1, 56
+ PTR_ADDU src, src, 0x2
+
+.Ldword_align:
+ bnez t8, .Ldo_end_words
+ move t8, a1
+
+ andi t8, src, 0x4
+ beqz t8, .Lqword_align
+ andi t8, src, 0x8
+
+ LOAD32 t0, 0x00(src)
+ LONG_SUBU a1, a1, 0x4
+ ADDC(sum, t0)
+ PTR_ADDU src, src, 0x4
+ andi t8, src, 0x8
+
+.Lqword_align:
+ beqz t8, .Loword_align
+ andi t8, src, 0x10
+
+#ifdef USE_DOUBLE
+ ld t0, 0x00(src)
+ LONG_SUBU a1, a1, 0x8
+ ADDC(sum, t0)
+#else
+ lw t0, 0x00(src)
+ lw t1, 0x04(src)
+ LONG_SUBU a1, a1, 0x8
+ ADDC(sum, t0)
+ ADDC(sum, t1)
+#endif
+ PTR_ADDU src, src, 0x8
+ andi t8, src, 0x10
+
+.Loword_align:
+ beqz t8, .Lbegin_movement
+ LONG_SRL t8, a1, 0x7
+
+#ifdef USE_DOUBLE
+ ld t0, 0x00(src)
+ ld t1, 0x08(src)
+ ADDC(sum, t0)
+ ADDC(sum, t1)
+#else
+ CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
+#endif
+ LONG_SUBU a1, a1, 0x10
+ PTR_ADDU src, src, 0x10
+ LONG_SRL t8, a1, 0x7
+
+.Lbegin_movement:
+ beqz t8, 1f
+ andi t2, a1, 0x40
+
+.Lmove_128bytes:
+ CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
+ LONG_SUBU t8, t8, 0x01
+ .set reorder /* DADDI_WAR */
+ PTR_ADDU src, src, 0x80
+ bnez t8, .Lmove_128bytes
+ .set noreorder
+
+1:
+ beqz t2, 1f
+ andi t2, a1, 0x20
+
+.Lmove_64bytes:
+ CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
+ PTR_ADDU src, src, 0x40
+
+1:
+ beqz t2, .Ldo_end_words
+ andi t8, a1, 0x1c
+
+.Lmove_32bytes:
+ CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
+ andi t8, a1, 0x1c
+ PTR_ADDU src, src, 0x20
+
+.Ldo_end_words:
+ beqz t8, .Lsmall_csumcpy
+ andi t2, a1, 0x3
+ LONG_SRL t8, t8, 0x2
+
+.Lend_words:
+ LOAD32 t0, (src)
+ LONG_SUBU t8, t8, 0x1
+ ADDC(sum, t0)
+ .set reorder /* DADDI_WAR */
+ PTR_ADDU src, src, 0x4
+ bnez t8, .Lend_words
+ .set noreorder
+
+/* unknown src alignment and < 8 bytes to go */
+.Lsmall_csumcpy:
+ move a1, t2
+
+ andi t0, a1, 4
+ beqz t0, 1f
+ andi t0, a1, 2
+
+ /* Still a full word to go */
+ ulw t1, (src)
+ PTR_ADDIU src, 4
+#ifdef USE_DOUBLE
+ dsll t1, t1, 32 /* clear lower 32bit */
+#endif
+ ADDC(sum, t1)
+
+1: move t1, zero
+ beqz t0, 1f
+ andi t0, a1, 1
+
+ /* Still a halfword to go */
+ ulhu t1, (src)
+ PTR_ADDIU src, 2
+
+1: beqz t0, 1f
+ sll t1, t1, 16
+
+ lbu t2, (src)
+ nop
+
+#ifdef __MIPSEB__
+ sll t2, t2, 8
+#endif
+ or t1, t2
+
+1: ADDC(sum, t1)
+
+ /* fold checksum */
+#ifdef USE_DOUBLE
+ dsll32 v1, sum, 0
+ daddu sum, v1
+ sltu v1, sum, v1
+ dsra32 sum, sum, 0
+ addu sum, v1
+#endif
+
+ /* odd buffer alignment? */
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
+ defined(CONFIG_CPU_LOONGSON64)
+ .set push
+ .set arch=mips32r2
+ wsbh v1, sum
+ movn sum, v1, t7
+ .set pop
+#else
+ beqz t7, 1f /* odd buffer alignment? */
+ lui v1, 0x00ff
+ addu v1, 0x00ff
+ and t0, sum, v1
+ sll t0, t0, 8
+ srl sum, sum, 8
+ and sum, sum, v1
+ or sum, sum, t0
+1:
+#endif
+ .set reorder
+ /* Add the passed partial csum. */
+ ADDC32(sum, a2)
+ jr ra
+ .set noreorder
+ END(csum_partial)
+
+
+/*
+ * checksum and copy routines based on memcpy.S
+ *
+ * csum_partial_copy_nocheck(src, dst, len)
+ * __csum_partial_copy_kernel(src, dst, len)
+ *
+ * See "Spec" in memcpy.S for details. Unlike __copy_user, all
+ * function in this file use the standard calling convention.
+ */
+
+#define src a0
+#define dst a1
+#define len a2
+#define sum v0
+#define odd t8
+
+/*
+ * All exception handlers simply return 0.
+ */
+
+/* Instruction type */
+#define LD_INSN 1
+#define ST_INSN 2
+#define LEGACY_MODE 1
+#define EVA_MODE 2
+#define USEROP 1
+#define KERNELOP 2
+
+/*
+ * Wrapper to add an entry in the exception table
+ * in case the insn causes a memory exception.
+ * Arguments:
+ * insn : Load/store instruction
+ * type : Instruction type
+ * reg : Register
+ * addr : Address
+ * handler : Exception handler
+ */
+#define EXC(insn, type, reg, addr) \
+ .if \mode == LEGACY_MODE; \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, .L_exc; \
+ .previous; \
+ /* This is enabled in EVA mode */ \
+ .else; \
+ /* If loading from user or storing to user */ \
+ .if ((\from == USEROP) && (type == LD_INSN)) || \
+ ((\to == USEROP) && (type == ST_INSN)); \
+9: __BUILD_EVA_INSN(insn##e, reg, addr); \
+ .section __ex_table,"a"; \
+ PTR 9b, .L_exc; \
+ .previous; \
+ .else; \
+ /* EVA without exception */ \
+ insn reg, addr; \
+ .endif; \
+ .endif
+
+#undef LOAD
+
+#ifdef USE_DOUBLE
+
+#define LOADK ld /* No exception */
+#define LOAD(reg, addr) EXC(ld, LD_INSN, reg, addr)
+#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr)
+#define LOADL(reg, addr) EXC(ldl, LD_INSN, reg, addr)
+#define LOADR(reg, addr) EXC(ldr, LD_INSN, reg, addr)
+#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr)
+#define STOREL(reg, addr) EXC(sdl, ST_INSN, reg, addr)
+#define STORER(reg, addr) EXC(sdr, ST_INSN, reg, addr)
+#define STORE(reg, addr) EXC(sd, ST_INSN, reg, addr)
+#define ADD daddu
+#define SUB dsubu
+#define SRL dsrl
+#define SLL dsll
+#define SLLV dsllv
+#define SRLV dsrlv
+#define NBYTES 8
+#define LOG_NBYTES 3
+
+#else
+
+#define LOADK lw /* No exception */
+#define LOAD(reg, addr) EXC(lw, LD_INSN, reg, addr)
+#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr)
+#define LOADL(reg, addr) EXC(lwl, LD_INSN, reg, addr)
+#define LOADR(reg, addr) EXC(lwr, LD_INSN, reg, addr)
+#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr)
+#define STOREL(reg, addr) EXC(swl, ST_INSN, reg, addr)
+#define STORER(reg, addr) EXC(swr, ST_INSN, reg, addr)
+#define STORE(reg, addr) EXC(sw, ST_INSN, reg, addr)
+#define ADD addu
+#define SUB subu
+#define SRL srl
+#define SLL sll
+#define SLLV sllv
+#define SRLV srlv
+#define NBYTES 4
+#define LOG_NBYTES 2
+
+#endif /* USE_DOUBLE */
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define LDFIRST LOADR
+#define LDREST LOADL
+#define STFIRST STORER
+#define STREST STOREL
+#define SHIFT_DISCARD SLLV
+#define SHIFT_DISCARD_REVERT SRLV
+#else
+#define LDFIRST LOADL
+#define LDREST LOADR
+#define STFIRST STOREL
+#define STREST STORER
+#define SHIFT_DISCARD SRLV
+#define SHIFT_DISCARD_REVERT SLLV
+#endif
+
+#define FIRST(unit) ((unit)*NBYTES)
+#define REST(unit) (FIRST(unit)+NBYTES-1)
+
+#define ADDRMASK (NBYTES-1)
+
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+ .set noat
+#else
+ .set at=v1
+#endif
+
+ .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to
+
+ li sum, -1
+ move odd, zero
+ /*
+ * Note: dst & src may be unaligned, len may be 0
+ * Temps
+ */
+ /*
+ * The "issue break"s below are very approximate.
+ * Issue delays for dcache fills will perturb the schedule, as will
+ * load queue full replay traps, etc.
+ *
+ * If len < NBYTES use byte operations.
+ */
+ sltu t2, len, NBYTES
+ and t1, dst, ADDRMASK
+ bnez t2, .Lcopy_bytes_checklen\@
+ and t0, src, ADDRMASK
+ andi odd, dst, 0x1 /* odd buffer? */
+ bnez t1, .Ldst_unaligned\@
+ nop
+ bnez t0, .Lsrc_unaligned_dst_aligned\@
+ /*
+ * use delay slot for fall-through
+ * src and dst are aligned; need to compute rem
+ */
+.Lboth_aligned\@:
+ SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
+ beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
+ nop
+ SUB len, 8*NBYTES # subtract here for bgez loop
+ .align 4
+1:
+ LOAD(t0, UNIT(0)(src))
+ LOAD(t1, UNIT(1)(src))
+ LOAD(t2, UNIT(2)(src))
+ LOAD(t3, UNIT(3)(src))
+ LOAD(t4, UNIT(4)(src))
+ LOAD(t5, UNIT(5)(src))
+ LOAD(t6, UNIT(6)(src))
+ LOAD(t7, UNIT(7)(src))
+ SUB len, len, 8*NBYTES
+ ADD src, src, 8*NBYTES
+ STORE(t0, UNIT(0)(dst))
+ ADDC(t0, t1)
+ STORE(t1, UNIT(1)(dst))
+ ADDC(sum, t0)
+ STORE(t2, UNIT(2)(dst))
+ ADDC(t2, t3)
+ STORE(t3, UNIT(3)(dst))
+ ADDC(sum, t2)
+ STORE(t4, UNIT(4)(dst))
+ ADDC(t4, t5)
+ STORE(t5, UNIT(5)(dst))
+ ADDC(sum, t4)
+ STORE(t6, UNIT(6)(dst))
+ ADDC(t6, t7)
+ STORE(t7, UNIT(7)(dst))
+ ADDC(sum, t6)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 8*NBYTES
+ bgez len, 1b
+ .set noreorder
+ ADD len, 8*NBYTES # revert len (see above)
+
+ /*
+ * len == the number of bytes left to copy < 8*NBYTES
+ */
+.Lcleanup_both_aligned\@:
+#define rem t7
+ beqz len, .Ldone\@
+ sltu t0, len, 4*NBYTES
+ bnez t0, .Lless_than_4units\@
+ and rem, len, (NBYTES-1) # rem = len % NBYTES
+ /*
+ * len >= 4*NBYTES
+ */
+ LOAD(t0, UNIT(0)(src))
+ LOAD(t1, UNIT(1)(src))
+ LOAD(t2, UNIT(2)(src))
+ LOAD(t3, UNIT(3)(src))
+ SUB len, len, 4*NBYTES
+ ADD src, src, 4*NBYTES
+ STORE(t0, UNIT(0)(dst))
+ ADDC(t0, t1)
+ STORE(t1, UNIT(1)(dst))
+ ADDC(sum, t0)
+ STORE(t2, UNIT(2)(dst))
+ ADDC(t2, t3)
+ STORE(t3, UNIT(3)(dst))
+ ADDC(sum, t2)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 4*NBYTES
+ beqz len, .Ldone\@
+ .set noreorder
+.Lless_than_4units\@:
+ /*
+ * rem = len % NBYTES
+ */
+ beq rem, len, .Lcopy_bytes\@
+ nop
+1:
+ LOAD(t0, 0(src))
+ ADD src, src, NBYTES
+ SUB len, len, NBYTES
+ STORE(t0, 0(dst))
+ ADDC(sum, t0)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, NBYTES
+ bne rem, len, 1b
+ .set noreorder
+
+ /*
+ * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
+ * A loop would do only a byte at a time with possible branch
+ * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
+ * because can't assume read-access to dst. Instead, use
+ * STREST dst, which doesn't require read access to dst.
+ *
+ * This code should perform better than a simple loop on modern,
+ * wide-issue mips processors because the code has fewer branches and
+ * more instruction-level parallelism.
+ */
+#define bits t2
+ beqz len, .Ldone\@
+ ADD t1, dst, len # t1 is just past last byte of dst
+ li bits, 8*NBYTES
+ SLL rem, len, 3 # rem = number of bits to keep
+ LOAD(t0, 0(src))
+ SUB bits, bits, rem # bits = number of bits to discard
+ SHIFT_DISCARD t0, t0, bits
+ STREST(t0, -1(t1))
+ SHIFT_DISCARD_REVERT t0, t0, bits
+ .set reorder
+ ADDC(sum, t0)
+ b .Ldone\@
+ .set noreorder
+.Ldst_unaligned\@:
+ /*
+ * dst is unaligned
+ * t0 = src & ADDRMASK
+ * t1 = dst & ADDRMASK; T1 > 0
+ * len >= NBYTES
+ *
+ * Copy enough bytes to align dst
+ * Set match = (src and dst have same alignment)
+ */
+#define match rem
+ LDFIRST(t3, FIRST(0)(src))
+ ADD t2, zero, NBYTES
+ LDREST(t3, REST(0)(src))
+ SUB t2, t2, t1 # t2 = number of bytes copied
+ xor match, t0, t1
+ STFIRST(t3, FIRST(0)(dst))
+ SLL t4, t1, 3 # t4 = number of bits to discard
+ SHIFT_DISCARD t3, t3, t4
+ /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
+ ADDC(sum, t3)
+ beq len, t2, .Ldone\@
+ SUB len, len, t2
+ ADD dst, dst, t2
+ beqz match, .Lboth_aligned\@
+ ADD src, src, t2
+
+.Lsrc_unaligned_dst_aligned\@:
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ beqz t0, .Lcleanup_src_unaligned\@
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+1:
+/*
+ * Avoid consecutive LD*'s to the same register since some mips
+ * implementations can't issue them in the same cycle.
+ * It's OK to load FIRST(N+1) before REST(N) because the two addresses
+ * are to the same unit (unless src is aligned, but it's not).
+ */
+ LDFIRST(t0, FIRST(0)(src))
+ LDFIRST(t1, FIRST(1)(src))
+ SUB len, len, 4*NBYTES
+ LDREST(t0, REST(0)(src))
+ LDREST(t1, REST(1)(src))
+ LDFIRST(t2, FIRST(2)(src))
+ LDFIRST(t3, FIRST(3)(src))
+ LDREST(t2, REST(2)(src))
+ LDREST(t3, REST(3)(src))
+ ADD src, src, 4*NBYTES
+#ifdef CONFIG_CPU_SB1
+ nop # improves slotting
+#endif
+ STORE(t0, UNIT(0)(dst))
+ ADDC(t0, t1)
+ STORE(t1, UNIT(1)(dst))
+ ADDC(sum, t0)
+ STORE(t2, UNIT(2)(dst))
+ ADDC(t2, t3)
+ STORE(t3, UNIT(3)(dst))
+ ADDC(sum, t2)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 4*NBYTES
+ bne len, rem, 1b
+ .set noreorder
+
+.Lcleanup_src_unaligned\@:
+ beqz len, .Ldone\@
+ and rem, len, NBYTES-1 # rem = len % NBYTES
+ beq rem, len, .Lcopy_bytes\@
+ nop
+1:
+ LDFIRST(t0, FIRST(0)(src))
+ LDREST(t0, REST(0)(src))
+ ADD src, src, NBYTES
+ SUB len, len, NBYTES
+ STORE(t0, 0(dst))
+ ADDC(sum, t0)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, NBYTES
+ bne len, rem, 1b
+ .set noreorder
+
+.Lcopy_bytes_checklen\@:
+ beqz len, .Ldone\@
+ nop
+.Lcopy_bytes\@:
+ /* 0 < len < NBYTES */
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define SHIFT_START 0
+#define SHIFT_INC 8
+#else
+#define SHIFT_START 8*(NBYTES-1)
+#define SHIFT_INC -8
+#endif
+ move t2, zero # partial word
+ li t3, SHIFT_START # shift
+#define COPY_BYTE(N) \
+ LOADBU(t0, N(src)); \
+ SUB len, len, 1; \
+ STOREB(t0, N(dst)); \
+ SLLV t0, t0, t3; \
+ addu t3, SHIFT_INC; \
+ beqz len, .Lcopy_bytes_done\@; \
+ or t2, t0
+
+ COPY_BYTE(0)
+ COPY_BYTE(1)
+#ifdef USE_DOUBLE
+ COPY_BYTE(2)
+ COPY_BYTE(3)
+ COPY_BYTE(4)
+ COPY_BYTE(5)
+#endif
+ LOADBU(t0, NBYTES-2(src))
+ SUB len, len, 1
+ STOREB(t0, NBYTES-2(dst))
+ SLLV t0, t0, t3
+ or t2, t0
+.Lcopy_bytes_done\@:
+ ADDC(sum, t2)
+.Ldone\@:
+ /* fold checksum */
+ .set push
+ .set noat
+#ifdef USE_DOUBLE
+ dsll32 v1, sum, 0
+ daddu sum, v1
+ sltu v1, sum, v1
+ dsra32 sum, sum, 0
+ addu sum, v1
+#endif
+
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
+ defined(CONFIG_CPU_LOONGSON64)
+ .set push
+ .set arch=mips32r2
+ wsbh v1, sum
+ movn sum, v1, odd
+ .set pop
+#else
+ beqz odd, 1f /* odd buffer alignment? */
+ lui v1, 0x00ff
+ addu v1, 0x00ff
+ and t0, sum, v1
+ sll t0, t0, 8
+ srl sum, sum, 8
+ and sum, sum, v1
+ or sum, sum, t0
+1:
+#endif
+ .set pop
+ .set reorder
+ jr ra
+ .set noreorder
+ .endm
+
+ .set noreorder
+.L_exc:
+ jr ra
+ li v0, 0
+
+FEXPORT(__csum_partial_copy_nocheck)
+EXPORT_SYMBOL(__csum_partial_copy_nocheck)
+#ifndef CONFIG_EVA
+FEXPORT(__csum_partial_copy_to_user)
+EXPORT_SYMBOL(__csum_partial_copy_to_user)
+FEXPORT(__csum_partial_copy_from_user)
+EXPORT_SYMBOL(__csum_partial_copy_from_user)
+#endif
+__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP
+
+#ifdef CONFIG_EVA
+LEAF(__csum_partial_copy_to_user)
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP
+END(__csum_partial_copy_to_user)
+
+LEAF(__csum_partial_copy_from_user)
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP
+END(__csum_partial_copy_from_user)
+#endif
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c
new file mode 100644
index 000000000..2e8dfc1d5
--- /dev/null
+++ b/arch/mips/lib/delay.c
@@ -0,0 +1,69 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 by Waldorf Electronics
+ * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2007, 2014 Maciej W. Rozycki
+ */
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/param.h>
+#include <linux/smp.h>
+#include <linux/stringify.h>
+
+#include <asm/asm.h>
+#include <asm/compiler.h>
+#include <asm/war.h>
+
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+#define GCC_DADDI_IMM_ASM() "I"
+#else
+#define GCC_DADDI_IMM_ASM() "r"
+#endif
+
+#ifndef CONFIG_HAVE_PLAT_DELAY
+
+void __delay(unsigned long loops)
+{
+ __asm__ __volatile__ (
+ " .set noreorder \n"
+ " .align 3 \n"
+ "1: bnez %0, 1b \n"
+ " " __stringify(LONG_SUBU) " %0, %1 \n"
+ " .set reorder \n"
+ : "=r" (loops)
+ : GCC_DADDI_IMM_ASM() (1), "0" (loops));
+}
+EXPORT_SYMBOL(__delay);
+
+/*
+ * Division by multiplication: you don't have to worry about
+ * loss of precision.
+ *
+ * Use only for very small delays ( < 1 msec). Should probably use a
+ * lookup table, really, as the multiplications take much too long with
+ * short delays. This is a "reasonable" implementation, though (and the
+ * first constant multiplications gets optimized away if the delay is
+ * a constant)
+ */
+
+void __udelay(unsigned long us)
+{
+ unsigned int lpj = raw_current_cpu_data.udelay_val;
+
+ __delay((us * 0x000010c7ull * HZ * lpj) >> 32);
+}
+EXPORT_SYMBOL(__udelay);
+
+void __ndelay(unsigned long ns)
+{
+ unsigned int lpj = raw_current_cpu_data.udelay_val;
+
+ __delay((ns * 0x00000005ull * HZ * lpj) >> 32);
+}
+EXPORT_SYMBOL(__ndelay);
+
+#endif
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
new file mode 100644
index 000000000..425642363
--- /dev/null
+++ b/arch/mips/lib/dump_tlb.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Dump R4x00 TLB for debugging purposes.
+ *
+ * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle.
+ * Copyright (C) 1999 by Silicon Graphics, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+
+#include <asm/hazards.h>
+#include <asm/mipsregs.h>
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+#include <asm/tlbdebug.h>
+
+void dump_tlb_regs(void)
+{
+ const int field = 2 * sizeof(unsigned long);
+
+ pr_info("Index : %0x\n", read_c0_index());
+ pr_info("PageMask : %0x\n", read_c0_pagemask());
+ if (cpu_has_guestid)
+ pr_info("GuestCtl1: %0x\n", read_c0_guestctl1());
+ pr_info("EntryHi : %0*lx\n", field, read_c0_entryhi());
+ pr_info("EntryLo0 : %0*lx\n", field, read_c0_entrylo0());
+ pr_info("EntryLo1 : %0*lx\n", field, read_c0_entrylo1());
+ pr_info("Wired : %0x\n", read_c0_wired());
+ switch (current_cpu_type()) {
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_R14000:
+ case CPU_R16000:
+ pr_info("FrameMask: %0x\n", read_c0_framemask());
+ break;
+ }
+ if (cpu_has_small_pages || cpu_has_rixi || cpu_has_xpa)
+ pr_info("PageGrain: %0x\n", read_c0_pagegrain());
+ if (cpu_has_htw) {
+ pr_info("PWField : %0*lx\n", field, read_c0_pwfield());
+ pr_info("PWSize : %0*lx\n", field, read_c0_pwsize());
+ pr_info("PWCtl : %0x\n", read_c0_pwctl());
+ }
+}
+
+static inline const char *msk2str(unsigned int mask)
+{
+ switch (mask) {
+ case PM_4K: return "4kb";
+ case PM_16K: return "16kb";
+ case PM_64K: return "64kb";
+ case PM_256K: return "256kb";
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case PM_8K: return "8kb";
+ case PM_32K: return "32kb";
+ case PM_128K: return "128kb";
+ case PM_512K: return "512kb";
+ case PM_2M: return "2Mb";
+ case PM_8M: return "8Mb";
+ case PM_32M: return "32Mb";
+#endif
+#ifndef CONFIG_CPU_VR41XX
+ case PM_1M: return "1Mb";
+ case PM_4M: return "4Mb";
+ case PM_16M: return "16Mb";
+ case PM_64M: return "64Mb";
+ case PM_256M: return "256Mb";
+ case PM_1G: return "1Gb";
+#endif
+ }
+ return "";
+}
+
+static void dump_tlb(int first, int last)
+{
+ unsigned long s_entryhi, entryhi, asid, mmid;
+ unsigned long long entrylo0, entrylo1, pa;
+ unsigned int s_index, s_pagemask, s_guestctl1 = 0;
+ unsigned int pagemask, guestctl1 = 0, c0, c1, i;
+ unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
+ int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
+ unsigned long s_mmid;
+#ifdef CONFIG_32BIT
+ bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
+ int pwidth = xpa ? 11 : 8;
+ int vwidth = 8;
+#else
+ bool xpa = false;
+ int pwidth = 11;
+ int vwidth = 11;
+#endif
+
+ s_pagemask = read_c0_pagemask();
+ s_entryhi = read_c0_entryhi();
+ s_index = read_c0_index();
+
+ if (cpu_has_mmid)
+ asid = s_mmid = read_c0_memorymapid();
+ else
+ asid = s_entryhi & asidmask;
+
+ if (cpu_has_guestid)
+ s_guestctl1 = read_c0_guestctl1();
+
+ for (i = first; i <= last; i++) {
+ write_c0_index(i);
+ mtc0_tlbr_hazard();
+ tlb_read();
+ tlb_read_hazard();
+ pagemask = read_c0_pagemask();
+ entryhi = read_c0_entryhi();
+ entrylo0 = read_c0_entrylo0();
+ entrylo1 = read_c0_entrylo1();
+
+ if (cpu_has_mmid)
+ mmid = read_c0_memorymapid();
+ else
+ mmid = entryhi & asidmask;
+
+ if (cpu_has_guestid)
+ guestctl1 = read_c0_guestctl1();
+
+ /* EHINV bit marks entire entry as invalid */
+ if (cpu_has_tlbinv && entryhi & MIPS_ENTRYHI_EHINV)
+ continue;
+ /*
+ * Prior to tlbinv, unused entries have a virtual address of
+ * CKSEG0.
+ */
+ if ((entryhi & ~0x1ffffUL) == CKSEG0)
+ continue;
+ /*
+ * ASID takes effect in absence of G (global) bit.
+ * We check both G bits, even though architecturally they should
+ * match one another, because some revisions of the SB1 core may
+ * leave only a single G bit set after a machine check exception
+ * due to duplicate TLB entry.
+ */
+ if (!((entrylo0 | entrylo1) & ENTRYLO_G) && (mmid != asid))
+ continue;
+
+ /*
+ * Only print entries in use
+ */
+ printk("Index: %2d pgmask=%s ", i, msk2str(pagemask));
+
+ c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
+ c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
+
+ pr_cont("va=%0*lx asid=%0*lx",
+ vwidth, (entryhi & ~0x1fffUL),
+ asidwidth, mmid);
+ if (cpu_has_guestid)
+ pr_cont(" gid=%02lx",
+ (guestctl1 & MIPS_GCTL1_RID)
+ >> MIPS_GCTL1_RID_SHIFT);
+ /* RI/XI are in awkward places, so mask them off separately */
+ pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
+ if (xpa)
+ pa |= (unsigned long long)readx_c0_entrylo0() << 30;
+ pa = (pa << 6) & PAGE_MASK;
+ pr_cont("\n\t[");
+ if (cpu_has_rixi)
+ pr_cont("ri=%d xi=%d ",
+ (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
+ (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
+ pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [",
+ pwidth, pa, c0,
+ (entrylo0 & ENTRYLO_D) ? 1 : 0,
+ (entrylo0 & ENTRYLO_V) ? 1 : 0,
+ (entrylo0 & ENTRYLO_G) ? 1 : 0);
+ /* RI/XI are in awkward places, so mask them off separately */
+ pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
+ if (xpa)
+ pa |= (unsigned long long)readx_c0_entrylo1() << 30;
+ pa = (pa << 6) & PAGE_MASK;
+ if (cpu_has_rixi)
+ pr_cont("ri=%d xi=%d ",
+ (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
+ (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
+ pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
+ pwidth, pa, c1,
+ (entrylo1 & ENTRYLO_D) ? 1 : 0,
+ (entrylo1 & ENTRYLO_V) ? 1 : 0,
+ (entrylo1 & ENTRYLO_G) ? 1 : 0);
+ }
+ printk("\n");
+
+ write_c0_entryhi(s_entryhi);
+ write_c0_index(s_index);
+ write_c0_pagemask(s_pagemask);
+ if (cpu_has_guestid)
+ write_c0_guestctl1(s_guestctl1);
+}
+
+void dump_tlb_all(void)
+{
+ dump_tlb(0, current_cpu_data.tlbsize - 1);
+}
diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c
new file mode 100644
index 000000000..210f5a95e
--- /dev/null
+++ b/arch/mips/lib/iomap-pci.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implement the default iomap interfaces
+ *
+ * (C) Copyright 2004 Linus Torvalds
+ * (C) Copyright 2006 Ralf Baechle <ralf@linux-mips.org>
+ * (C) Copyright 2007 MIPS Technologies, Inc.
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/pci.h>
+#include <linux/export.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_PCI_DRIVERS_LEGACY
+
+void __iomem *__pci_ioport_map(struct pci_dev *dev,
+ unsigned long port, unsigned int nr)
+{
+ struct pci_controller *ctrl = dev->bus->sysdata;
+ unsigned long base = ctrl->io_map_base;
+
+ /* This will eventually become a BUG_ON but for now be gentle */
+ if (unlikely(!ctrl->io_map_base)) {
+ struct pci_bus *bus = dev->bus;
+ char name[8];
+
+ while (bus->parent)
+ bus = bus->parent;
+
+ ctrl->io_map_base = base = mips_io_port_base;
+
+ sprintf(name, "%04x:%02x", pci_domain_nr(bus), bus->number);
+ printk(KERN_WARNING "io_map_base of root PCI bus %s unset. "
+ "Trying to continue but you better\nfix this issue or "
+ "report it to linux-mips@linux-mips.org or your "
+ "vendor.\n", name);
+#ifdef CONFIG_PCI_DOMAINS
+ panic("To avoid data corruption io_map_base MUST be set with "
+ "multiple PCI domains.");
+#endif
+ }
+
+ return (void __iomem *) (ctrl->io_map_base + port);
+}
+
+#endif /* CONFIG_PCI_DRIVERS_LEGACY */
diff --git a/arch/mips/lib/iomap_copy.c b/arch/mips/lib/iomap_copy.c
new file mode 100644
index 000000000..157500a09
--- /dev/null
+++ b/arch/mips/lib/iomap_copy.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/export.h>
+#include <linux/io.h>
+
+/**
+ * __ioread64_copy - copy data from MMIO space, in 64-bit units
+ * @to: destination (must be 64-bit aligned)
+ * @from: source, in MMIO space (must be 64-bit aligned)
+ * @count: number of 64-bit quantities to copy
+ *
+ * Copy data from MMIO space to kernel space, in units of 32 or 64 bits at a
+ * time. Order of access is not guaranteed, nor is a memory barrier
+ * performed afterwards.
+ */
+void __ioread64_copy(void *to, const void __iomem *from, size_t count)
+{
+#ifdef CONFIG_64BIT
+ u64 *dst = to;
+ const u64 __iomem *src = from;
+ const u64 __iomem *end = src + count;
+
+ while (src < end)
+ *dst++ = __raw_readq(src++);
+#else
+ __ioread32_copy(to, from, count * 2);
+#endif
+}
+EXPORT_SYMBOL_GPL(__ioread64_copy);
diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h
new file mode 100644
index 000000000..199a7f962
--- /dev/null
+++ b/arch/mips/lib/libgcc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_LIBGCC_H
+#define __ASM_LIBGCC_H
+
+#include <asm/byteorder.h>
+
+typedef int word_type __attribute__ ((mode (__word__)));
+
+#ifdef __BIG_ENDIAN
+struct DWstruct {
+ int high, low;
+};
+
+struct TWstruct {
+ long long high, low;
+};
+#elif defined(__LITTLE_ENDIAN)
+struct DWstruct {
+ int low, high;
+};
+
+struct TWstruct {
+ long long low, high;
+};
+#else
+#error I feel sick.
+#endif
+
+typedef union {
+ struct DWstruct s;
+ long long ll;
+} DWunion;
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6)
+typedef int ti_type __attribute__((mode(TI)));
+
+typedef union {
+ struct TWstruct s;
+ ti_type ti;
+} TWunion;
+#endif
+
+#endif /* __ASM_LIBGCC_H */
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
new file mode 100644
index 000000000..88065ee43
--- /dev/null
+++ b/arch/mips/lib/memcpy.S
@@ -0,0 +1,709 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Unified implementation of memcpy, memmove and the __copy_user backend.
+ *
+ * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
+ * Copyright (C) 2002 Broadcom, Inc.
+ * memcpy/copy_user author: Mark Vandevoorde
+ * Copyright (C) 2007 Maciej W. Rozycki
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ *
+ * Mnemonic names for arguments to memcpy/__copy_user
+ */
+
+/*
+ * Hack to resolve longstanding prefetch issue
+ *
+ * Prefetching may be fatal on some systems if we're prefetching beyond the
+ * end of memory on some systems. It's also a seriously bad idea on non
+ * dma-coherent systems.
+ */
+#ifdef CONFIG_DMA_NONCOHERENT
+#undef CONFIG_CPU_HAS_PREFETCH
+#endif
+#ifdef CONFIG_MIPS_MALTA
+#undef CONFIG_CPU_HAS_PREFETCH
+#endif
+#ifdef CONFIG_CPU_MIPSR6
+#undef CONFIG_CPU_HAS_PREFETCH
+#endif
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/export.h>
+#include <asm/regdef.h>
+
+#define dst a0
+#define src a1
+#define len a2
+
+/*
+ * Spec
+ *
+ * memcpy copies len bytes from src to dst and sets v0 to dst.
+ * It assumes that
+ * - src and dst don't overlap
+ * - src is readable
+ * - dst is writable
+ * memcpy uses the standard calling convention
+ *
+ * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
+ * the number of uncopied bytes due to an exception caused by a read or write.
+ * __copy_user assumes that src and dst don't overlap, and that the call is
+ * implementing one of the following:
+ * copy_to_user
+ * - src is readable (no exceptions when reading src)
+ * copy_from_user
+ * - dst is writable (no exceptions when writing dst)
+ * __copy_user uses a non-standard calling convention; see
+ * include/asm-mips/uaccess.h
+ *
+ * When an exception happens on a load, the handler must
+ # ensure that all of the destination buffer is overwritten to prevent
+ * leaking information to user mode programs.
+ */
+
+/*
+ * Implementation
+ */
+
+/*
+ * The exception handler for loads requires that:
+ * 1- AT contain the address of the byte just past the end of the source
+ * of the copy,
+ * 2- src_entry <= src < AT, and
+ * 3- (dst - src) == (dst_entry - src_entry),
+ * The _entry suffix denotes values when __copy_user was called.
+ *
+ * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
+ * (2) is met by incrementing src by the number of bytes copied
+ * (3) is met by not doing loads between a pair of increments of dst and src
+ *
+ * The exception handlers for stores adjust len (if necessary) and return.
+ * These handlers do not need to overwrite any data.
+ *
+ * For __rmemcpy and memmove an exception is always a kernel bug, therefore
+ * they're not protected.
+ */
+
+/* Instruction type */
+#define LD_INSN 1
+#define ST_INSN 2
+/* Pretech type */
+#define SRC_PREFETCH 1
+#define DST_PREFETCH 2
+#define LEGACY_MODE 1
+#define EVA_MODE 2
+#define USEROP 1
+#define KERNELOP 2
+
+/*
+ * Wrapper to add an entry in the exception table
+ * in case the insn causes a memory exception.
+ * Arguments:
+ * insn : Load/store instruction
+ * type : Instruction type
+ * reg : Register
+ * addr : Address
+ * handler : Exception handler
+ */
+
+#define EXC(insn, type, reg, addr, handler) \
+ .if \mode == LEGACY_MODE; \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous; \
+ /* This is assembled in EVA mode */ \
+ .else; \
+ /* If loading from user or storing to user */ \
+ .if ((\from == USEROP) && (type == LD_INSN)) || \
+ ((\to == USEROP) && (type == ST_INSN)); \
+9: __BUILD_EVA_INSN(insn##e, reg, addr); \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous; \
+ .else; \
+ /* \
+ * Still in EVA, but no need for \
+ * exception handler or EVA insn \
+ */ \
+ insn reg, addr; \
+ .endif; \
+ .endif
+
+/*
+ * Only on the 64-bit kernel we can made use of 64-bit registers.
+ */
+#ifdef CONFIG_64BIT
+#define USE_DOUBLE
+#endif
+
+#ifdef USE_DOUBLE
+
+#define LOADK ld /* No exception */
+#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
+#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
+#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
+#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
+#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
+#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
+#define ADD daddu
+#define SUB dsubu
+#define SRL dsrl
+#define SRA dsra
+#define SLL dsll
+#define SLLV dsllv
+#define SRLV dsrlv
+#define NBYTES 8
+#define LOG_NBYTES 3
+
+/*
+ * As we are sharing code base with the mips32 tree (which use the o32 ABI
+ * register definitions). We need to redefine the register definitions from
+ * the n64 ABI register naming to the o32 ABI register naming.
+ */
+#undef t0
+#undef t1
+#undef t2
+#undef t3
+#define t0 $8
+#define t1 $9
+#define t2 $10
+#define t3 $11
+#define t4 $12
+#define t5 $13
+#define t6 $14
+#define t7 $15
+
+#else
+
+#define LOADK lw /* No exception */
+#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
+#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
+#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
+#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
+#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
+#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
+#define ADD addu
+#define SUB subu
+#define SRL srl
+#define SLL sll
+#define SRA sra
+#define SLLV sllv
+#define SRLV srlv
+#define NBYTES 4
+#define LOG_NBYTES 2
+
+#endif /* USE_DOUBLE */
+
+#define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler)
+#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
+
+#ifdef CONFIG_CPU_HAS_PREFETCH
+# define _PREF(hint, addr, type) \
+ .if \mode == LEGACY_MODE; \
+ kernel_pref(hint, addr); \
+ .else; \
+ .if ((\from == USEROP) && (type == SRC_PREFETCH)) || \
+ ((\to == USEROP) && (type == DST_PREFETCH)); \
+ /* \
+ * PREFE has only 9 bits for the offset \
+ * compared to PREF which has 16, so it may \
+ * need to use the $at register but this \
+ * register should remain intact because it's \
+ * used later on. Therefore use $v1. \
+ */ \
+ .set at=v1; \
+ user_pref(hint, addr); \
+ .set noat; \
+ .else; \
+ kernel_pref(hint, addr); \
+ .endif; \
+ .endif
+#else
+# define _PREF(hint, addr, type)
+#endif
+
+#define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH)
+#define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH)
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define LDFIRST LOADR
+#define LDREST LOADL
+#define STFIRST STORER
+#define STREST STOREL
+#define SHIFT_DISCARD SLLV
+#else
+#define LDFIRST LOADL
+#define LDREST LOADR
+#define STFIRST STOREL
+#define STREST STORER
+#define SHIFT_DISCARD SRLV
+#endif
+
+#define FIRST(unit) ((unit)*NBYTES)
+#define REST(unit) (FIRST(unit)+NBYTES-1)
+#define UNIT(unit) FIRST(unit)
+
+#define ADDRMASK (NBYTES-1)
+
+ .text
+ .set noreorder
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+ .set noat
+#else
+ .set at=v1
+#endif
+
+ .align 5
+
+ /*
+ * Macro to build the __copy_user common code
+ * Arguments:
+ * mode : LEGACY_MODE or EVA_MODE
+ * from : Source operand. USEROP or KERNELOP
+ * to : Destination operand. USEROP or KERNELOP
+ */
+ .macro __BUILD_COPY_USER mode, from, to
+
+ /* initialize __memcpy if this the first time we execute this macro */
+ .ifnotdef __memcpy
+ .set __memcpy, 1
+ .hidden __memcpy /* make sure it does not leak */
+ .endif
+
+ /*
+ * Note: dst & src may be unaligned, len may be 0
+ * Temps
+ */
+#define rem t8
+
+ R10KCBARRIER(0(ra))
+ /*
+ * The "issue break"s below are very approximate.
+ * Issue delays for dcache fills will perturb the schedule, as will
+ * load queue full replay traps, etc.
+ *
+ * If len < NBYTES use byte operations.
+ */
+ PREFS( 0, 0(src) )
+ PREFD( 1, 0(dst) )
+ sltu t2, len, NBYTES
+ and t1, dst, ADDRMASK
+ PREFS( 0, 1*32(src) )
+ PREFD( 1, 1*32(dst) )
+ bnez t2, .Lcopy_bytes_checklen\@
+ and t0, src, ADDRMASK
+ PREFS( 0, 2*32(src) )
+ PREFD( 1, 2*32(dst) )
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
+ bnez t1, .Ldst_unaligned\@
+ nop
+ bnez t0, .Lsrc_unaligned_dst_aligned\@
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
+ or t0, t0, t1
+ bnez t0, .Lcopy_unaligned_bytes\@
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
+ /*
+ * use delay slot for fall-through
+ * src and dst are aligned; need to compute rem
+ */
+.Lboth_aligned\@:
+ SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
+ beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
+ and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
+ PREFS( 0, 3*32(src) )
+ PREFD( 1, 3*32(dst) )
+ .align 4
+1:
+ R10KCBARRIER(0(ra))
+ LOAD(t0, UNIT(0)(src), .Ll_exc\@)
+ LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
+ LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
+ LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
+ SUB len, len, 8*NBYTES
+ LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
+ LOAD(t7, UNIT(5)(src), .Ll_exc_copy\@)
+ STORE(t0, UNIT(0)(dst), .Ls_exc_p8u\@)
+ STORE(t1, UNIT(1)(dst), .Ls_exc_p7u\@)
+ LOAD(t0, UNIT(6)(src), .Ll_exc_copy\@)
+ LOAD(t1, UNIT(7)(src), .Ll_exc_copy\@)
+ ADD src, src, 8*NBYTES
+ ADD dst, dst, 8*NBYTES
+ STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@)
+ STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@)
+ STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@)
+ STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@)
+ STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@)
+ STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@)
+ PREFS( 0, 8*32(src) )
+ PREFD( 1, 8*32(dst) )
+ bne len, rem, 1b
+ nop
+
+ /*
+ * len == rem == the number of bytes left to copy < 8*NBYTES
+ */
+.Lcleanup_both_aligned\@:
+ beqz len, .Ldone\@
+ sltu t0, len, 4*NBYTES
+ bnez t0, .Lless_than_4units\@
+ and rem, len, (NBYTES-1) # rem = len % NBYTES
+ /*
+ * len >= 4*NBYTES
+ */
+ LOAD( t0, UNIT(0)(src), .Ll_exc\@)
+ LOAD( t1, UNIT(1)(src), .Ll_exc_copy\@)
+ LOAD( t2, UNIT(2)(src), .Ll_exc_copy\@)
+ LOAD( t3, UNIT(3)(src), .Ll_exc_copy\@)
+ SUB len, len, 4*NBYTES
+ ADD src, src, 4*NBYTES
+ R10KCBARRIER(0(ra))
+ STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@)
+ STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@)
+ STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@)
+ STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 4*NBYTES
+ beqz len, .Ldone\@
+ .set noreorder
+.Lless_than_4units\@:
+ /*
+ * rem = len % NBYTES
+ */
+ beq rem, len, .Lcopy_bytes\@
+ nop
+1:
+ R10KCBARRIER(0(ra))
+ LOAD(t0, 0(src), .Ll_exc\@)
+ ADD src, src, NBYTES
+ SUB len, len, NBYTES
+ STORE(t0, 0(dst), .Ls_exc_p1u\@)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, NBYTES
+ bne rem, len, 1b
+ .set noreorder
+
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
+ /*
+ * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
+ * A loop would do only a byte at a time with possible branch
+ * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
+ * because can't assume read-access to dst. Instead, use
+ * STREST dst, which doesn't require read access to dst.
+ *
+ * This code should perform better than a simple loop on modern,
+ * wide-issue mips processors because the code has fewer branches and
+ * more instruction-level parallelism.
+ */
+#define bits t2
+ beqz len, .Ldone\@
+ ADD t1, dst, len # t1 is just past last byte of dst
+ li bits, 8*NBYTES
+ SLL rem, len, 3 # rem = number of bits to keep
+ LOAD(t0, 0(src), .Ll_exc\@)
+ SUB bits, bits, rem # bits = number of bits to discard
+ SHIFT_DISCARD t0, t0, bits
+ STREST(t0, -1(t1), .Ls_exc\@)
+ jr ra
+ move len, zero
+.Ldst_unaligned\@:
+ /*
+ * dst is unaligned
+ * t0 = src & ADDRMASK
+ * t1 = dst & ADDRMASK; T1 > 0
+ * len >= NBYTES
+ *
+ * Copy enough bytes to align dst
+ * Set match = (src and dst have same alignment)
+ */
+#define match rem
+ LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
+ ADD t2, zero, NBYTES
+ LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
+ SUB t2, t2, t1 # t2 = number of bytes copied
+ xor match, t0, t1
+ R10KCBARRIER(0(ra))
+ STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
+ beq len, t2, .Ldone\@
+ SUB len, len, t2
+ ADD dst, dst, t2
+ beqz match, .Lboth_aligned\@
+ ADD src, src, t2
+
+.Lsrc_unaligned_dst_aligned\@:
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ PREFS( 0, 3*32(src) )
+ beqz t0, .Lcleanup_src_unaligned\@
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ PREFD( 1, 3*32(dst) )
+1:
+/*
+ * Avoid consecutive LD*'s to the same register since some mips
+ * implementations can't issue them in the same cycle.
+ * It's OK to load FIRST(N+1) before REST(N) because the two addresses
+ * are to the same unit (unless src is aligned, but it's not).
+ */
+ R10KCBARRIER(0(ra))
+ LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
+ LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
+ SUB len, len, 4*NBYTES
+ LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
+ LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
+ LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
+ LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
+ LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
+ LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
+ PREFS( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
+ ADD src, src, 4*NBYTES
+#ifdef CONFIG_CPU_SB1
+ nop # improves slotting
+#endif
+ STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@)
+ STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@)
+ STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@)
+ STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@)
+ PREFD( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 4*NBYTES
+ bne len, rem, 1b
+ .set noreorder
+
+.Lcleanup_src_unaligned\@:
+ beqz len, .Ldone\@
+ and rem, len, NBYTES-1 # rem = len % NBYTES
+ beq rem, len, .Lcopy_bytes\@
+ nop
+1:
+ R10KCBARRIER(0(ra))
+ LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
+ LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
+ ADD src, src, NBYTES
+ SUB len, len, NBYTES
+ STORE(t0, 0(dst), .Ls_exc_p1u\@)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, NBYTES
+ bne len, rem, 1b
+ .set noreorder
+
+#endif /* !CONFIG_CPU_NO_LOAD_STORE_LR */
+.Lcopy_bytes_checklen\@:
+ beqz len, .Ldone\@
+ nop
+.Lcopy_bytes\@:
+ /* 0 < len < NBYTES */
+ R10KCBARRIER(0(ra))
+#define COPY_BYTE(N) \
+ LOADB(t0, N(src), .Ll_exc\@); \
+ SUB len, len, 1; \
+ beqz len, .Ldone\@; \
+ STOREB(t0, N(dst), .Ls_exc_p1\@)
+
+ COPY_BYTE(0)
+ COPY_BYTE(1)
+#ifdef USE_DOUBLE
+ COPY_BYTE(2)
+ COPY_BYTE(3)
+ COPY_BYTE(4)
+ COPY_BYTE(5)
+#endif
+ LOADB(t0, NBYTES-2(src), .Ll_exc\@)
+ SUB len, len, 1
+ jr ra
+ STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@)
+.Ldone\@:
+ jr ra
+ nop
+
+#ifdef CONFIG_CPU_NO_LOAD_STORE_LR
+.Lcopy_unaligned_bytes\@:
+1:
+ COPY_BYTE(0)
+ COPY_BYTE(1)
+ COPY_BYTE(2)
+ COPY_BYTE(3)
+ COPY_BYTE(4)
+ COPY_BYTE(5)
+ COPY_BYTE(6)
+ COPY_BYTE(7)
+ ADD src, src, 8
+ b 1b
+ ADD dst, dst, 8
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
+ .if __memcpy == 1
+ END(memcpy)
+ .set __memcpy, 0
+ .hidden __memcpy
+ .endif
+
+.Ll_exc_copy\@:
+ /*
+ * Copy bytes from src until faulting load address (or until a
+ * lb faults)
+ *
+ * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
+ * may be more than a byte beyond the last address.
+ * Hence, the lb below may get an exception.
+ *
+ * Assumes src < THREAD_BUADDR($28)
+ */
+ LOADK t0, TI_TASK($28)
+ nop
+ LOADK t0, THREAD_BUADDR(t0)
+1:
+ LOADB(t1, 0(src), .Ll_exc\@)
+ ADD src, src, 1
+ sb t1, 0(dst) # can't fault -- we're copy_from_user
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 1
+ bne src, t0, 1b
+ .set noreorder
+.Ll_exc\@:
+ LOADK t0, TI_TASK($28)
+ nop
+ LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
+ nop
+ SUB len, AT, t0 # len number of uncopied bytes
+ jr ra
+ nop
+
+#define SEXC(n) \
+ .set reorder; /* DADDI_WAR */ \
+.Ls_exc_p ## n ## u\@: \
+ ADD len, len, n*NBYTES; \
+ jr ra; \
+ .set noreorder
+
+SEXC(8)
+SEXC(7)
+SEXC(6)
+SEXC(5)
+SEXC(4)
+SEXC(3)
+SEXC(2)
+SEXC(1)
+
+.Ls_exc_p1\@:
+ .set reorder /* DADDI_WAR */
+ ADD len, len, 1
+ jr ra
+ .set noreorder
+.Ls_exc\@:
+ jr ra
+ nop
+ .endm
+
+#ifndef CONFIG_HAVE_PLAT_MEMCPY
+ .align 5
+LEAF(memmove)
+EXPORT_SYMBOL(memmove)
+ ADD t0, a0, a2
+ ADD t1, a1, a2
+ sltu t0, a1, t0 # dst + len <= src -> memcpy
+ sltu t1, a0, t1 # dst >= src + len -> memcpy
+ and t0, t1
+ beqz t0, .L__memcpy
+ move v0, a0 /* return value */
+ beqz a2, .Lr_out
+ END(memmove)
+
+ /* fall through to __rmemcpy */
+LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
+ sltu t0, a1, a0
+ beqz t0, .Lr_end_bytes_up # src >= dst
+ nop
+ ADD a0, a2 # dst = dst + len
+ ADD a1, a2 # src = src + len
+
+.Lr_end_bytes:
+ R10KCBARRIER(0(ra))
+ lb t0, -1(a1)
+ SUB a2, a2, 0x1
+ sb t0, -1(a0)
+ SUB a1, a1, 0x1
+ .set reorder /* DADDI_WAR */
+ SUB a0, a0, 0x1
+ bnez a2, .Lr_end_bytes
+ .set noreorder
+
+.Lr_out:
+ jr ra
+ move a2, zero
+
+.Lr_end_bytes_up:
+ R10KCBARRIER(0(ra))
+ lb t0, (a1)
+ SUB a2, a2, 0x1
+ sb t0, (a0)
+ ADD a1, a1, 0x1
+ .set reorder /* DADDI_WAR */
+ ADD a0, a0, 0x1
+ bnez a2, .Lr_end_bytes_up
+ .set noreorder
+
+ jr ra
+ move a2, zero
+ END(__rmemcpy)
+
+/*
+ * A combined memcpy/__copy_user
+ * __copy_user sets len to 0 for success; else to an upper bound of
+ * the number of uncopied bytes.
+ * memcpy sets v0 to dst.
+ */
+ .align 5
+LEAF(memcpy) /* a0=dst a1=src a2=len */
+EXPORT_SYMBOL(memcpy)
+ move v0, dst /* return value */
+.L__memcpy:
+FEXPORT(__copy_user)
+EXPORT_SYMBOL(__copy_user)
+ /* Legacy Mode, user <-> user */
+ __BUILD_COPY_USER LEGACY_MODE USEROP USEROP
+
+#endif
+
+#ifdef CONFIG_EVA
+
+/*
+ * For EVA we need distinct symbols for reading and writing to user space.
+ * This is because we need to use specific EVA instructions to perform the
+ * virtual <-> physical translation when a virtual address is actually in user
+ * space
+ */
+
+/*
+ * __copy_from_user (EVA)
+ */
+
+LEAF(__copy_from_user_eva)
+EXPORT_SYMBOL(__copy_from_user_eva)
+ __BUILD_COPY_USER EVA_MODE USEROP KERNELOP
+END(__copy_from_user_eva)
+
+
+
+/*
+ * __copy_to_user (EVA)
+ */
+
+LEAF(__copy_to_user_eva)
+EXPORT_SYMBOL(__copy_to_user_eva)
+__BUILD_COPY_USER EVA_MODE KERNELOP USEROP
+END(__copy_to_user_eva)
+
+/*
+ * __copy_in_user (EVA)
+ */
+
+LEAF(__copy_in_user_eva)
+EXPORT_SYMBOL(__copy_in_user_eva)
+__BUILD_COPY_USER EVA_MODE USEROP USEROP
+END(__copy_in_user_eva)
+
+#endif
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
new file mode 100644
index 000000000..d5449e8a3
--- /dev/null
+++ b/arch/mips/lib/memset.S
@@ -0,0 +1,328 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998, 1999, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2007 by Maciej W. Rozycki
+ * Copyright (C) 2011, 2012 MIPS Technologies, Inc.
+ */
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/export.h>
+#include <asm/regdef.h>
+
+#if LONGSIZE == 4
+#define LONG_S_L swl
+#define LONG_S_R swr
+#else
+#define LONG_S_L sdl
+#define LONG_S_R sdr
+#endif
+
+#ifdef CONFIG_CPU_MICROMIPS
+#define STORSIZE (LONGSIZE * 2)
+#define STORMASK (STORSIZE - 1)
+#define FILL64RG t8
+#define FILLPTRG t7
+#undef LONG_S
+#define LONG_S LONG_SP
+#else
+#define STORSIZE LONGSIZE
+#define STORMASK LONGMASK
+#define FILL64RG a1
+#define FILLPTRG t0
+#endif
+
+#define LEGACY_MODE 1
+#define EVA_MODE 2
+
+/*
+ * No need to protect it with EVA #ifdefery. The generated block of code
+ * will never be assembled if EVA is not enabled.
+ */
+#define __EVAFY(insn, reg, addr) __BUILD_EVA_INSN(insn##e, reg, addr)
+#define ___BUILD_EVA_INSN(insn, reg, addr) __EVAFY(insn, reg, addr)
+
+#define EX(insn,reg,addr,handler) \
+ .if \mode == LEGACY_MODE; \
+9: insn reg, addr; \
+ .else; \
+9: ___BUILD_EVA_INSN(insn, reg, addr); \
+ .endif; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
+ .macro f_fill64 dst, offset, val, fixup, mode
+ EX(LONG_S, \val, (\offset + 0 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 1 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 2 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 3 * STORSIZE)(\dst), \fixup)
+#if ((defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) || !defined(CONFIG_CPU_MICROMIPS))
+ EX(LONG_S, \val, (\offset + 4 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 5 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 6 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 7 * STORSIZE)(\dst), \fixup)
+#endif
+#if (!defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4))
+ EX(LONG_S, \val, (\offset + 8 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 9 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 10 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 11 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 12 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 13 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 14 * STORSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 15 * STORSIZE)(\dst), \fixup)
+#endif
+ .endm
+
+ .align 5
+
+ /*
+ * Macro to generate the __bzero{,_user} symbol
+ * Arguments:
+ * mode: LEGACY_MODE or EVA_MODE
+ */
+ .macro __BUILD_BZERO mode
+ /* Initialize __memset if this is the first time we call this macro */
+ .ifnotdef __memset
+ .set __memset, 1
+ .hidden __memset /* Make sure it does not leak */
+ .endif
+
+ sltiu t0, a2, STORSIZE /* very small region? */
+ .set noreorder
+ bnez t0, .Lsmall_memset\@
+ andi t0, a0, STORMASK /* aligned? */
+ .set reorder
+
+#ifdef CONFIG_CPU_MICROMIPS
+ move t8, a1 /* used by 'swp' instruction */
+ move t9, a1
+#endif
+ .set noreorder
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+ beqz t0, 1f
+ PTR_SUBU t0, STORSIZE /* alignment in bytes */
+#else
+ .set noat
+ li AT, STORSIZE
+ beqz t0, 1f
+ PTR_SUBU t0, AT /* alignment in bytes */
+ .set at
+#endif
+ .set reorder
+
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
+ R10KCBARRIER(0(ra))
+#ifdef __MIPSEB__
+ EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
+#else
+ EX(LONG_S_R, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
+#endif
+ PTR_SUBU a0, t0 /* long align ptr */
+ PTR_ADDU a2, t0 /* correct size */
+
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
+#define STORE_BYTE(N) \
+ EX(sb, a1, N(a0), .Lbyte_fixup\@); \
+ .set noreorder; \
+ beqz t0, 0f; \
+ PTR_ADDU t0, 1; \
+ .set reorder;
+
+ PTR_ADDU a2, t0 /* correct size */
+ PTR_ADDU t0, 1
+ STORE_BYTE(0)
+ STORE_BYTE(1)
+#if LONGSIZE == 4
+ EX(sb, a1, 2(a0), .Lbyte_fixup\@)
+#else
+ STORE_BYTE(2)
+ STORE_BYTE(3)
+ STORE_BYTE(4)
+ STORE_BYTE(5)
+ EX(sb, a1, 6(a0), .Lbyte_fixup\@)
+#endif
+0:
+ ori a0, STORMASK
+ xori a0, STORMASK
+ PTR_ADDIU a0, STORSIZE
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
+1: ori t1, a2, 0x3f /* # of full blocks */
+ xori t1, 0x3f
+ andi t0, a2, 0x40-STORSIZE
+ beqz t1, .Lmemset_partial\@ /* no block to fill */
+
+ PTR_ADDU t1, a0 /* end address */
+1: PTR_ADDIU a0, 64
+ R10KCBARRIER(0(ra))
+ f_fill64 a0, -64, FILL64RG, .Lfwd_fixup\@, \mode
+ bne t1, a0, 1b
+
+.Lmemset_partial\@:
+ R10KCBARRIER(0(ra))
+ PTR_LA t1, 2f /* where to start */
+#ifdef CONFIG_CPU_MICROMIPS
+ LONG_SRL t7, t0, 1
+#endif
+#if LONGSIZE == 4
+ PTR_SUBU t1, FILLPTRG
+#else
+ .set noat
+ LONG_SRL AT, FILLPTRG, 1
+ PTR_SUBU t1, AT
+ .set at
+#endif
+ PTR_ADDU a0, t0 /* dest ptr */
+ jr t1
+
+ /* ... but first do longs ... */
+ f_fill64 a0, -64, FILL64RG, .Lpartial_fixup\@, \mode
+2: andi a2, STORMASK /* At most one long to go */
+
+ .set noreorder
+ beqz a2, 1f
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
+ PTR_ADDU a0, a2 /* What's left */
+ .set reorder
+ R10KCBARRIER(0(ra))
+#ifdef __MIPSEB__
+ EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@)
+#else
+ EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
+#endif
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
+ PTR_SUBU t0, $0, a2
+ .set reorder
+ move a2, zero /* No remaining longs */
+ PTR_ADDIU t0, 1
+ STORE_BYTE(0)
+ STORE_BYTE(1)
+#if LONGSIZE == 4
+ EX(sb, a1, 2(a0), .Lbyte_fixup\@)
+#else
+ STORE_BYTE(2)
+ STORE_BYTE(3)
+ STORE_BYTE(4)
+ STORE_BYTE(5)
+ EX(sb, a1, 6(a0), .Lbyte_fixup\@)
+#endif
+0:
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
+1: move a2, zero
+ jr ra
+
+.Lsmall_memset\@:
+ PTR_ADDU t1, a0, a2
+ beqz a2, 2f
+
+1: PTR_ADDIU a0, 1 /* fill bytewise */
+ R10KCBARRIER(0(ra))
+ .set noreorder
+ bne t1, a0, 1b
+ EX(sb, a1, -1(a0), .Lsmall_fixup\@)
+ .set reorder
+
+2: move a2, zero
+ jr ra /* done */
+ .if __memset == 1
+ END(memset)
+ .set __memset, 0
+ .hidden __memset
+ .endif
+
+#ifdef CONFIG_CPU_NO_LOAD_STORE_LR
+.Lbyte_fixup\@:
+ /*
+ * unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1
+ * a2 = a2 - t0 + 1
+ */
+ PTR_SUBU a2, t0
+ PTR_ADDIU a2, 1
+ jr ra
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
+
+.Lfirst_fixup\@:
+ /* unset_bytes already in a2 */
+ jr ra
+
+.Lfwd_fixup\@:
+ /*
+ * unset_bytes = partial_start_addr + #bytes - fault_addr
+ * a2 = t1 + (a2 & 3f) - $28->task->BUADDR
+ */
+ PTR_L t0, TI_TASK($28)
+ andi a2, 0x3f
+ LONG_L t0, THREAD_BUADDR(t0)
+ LONG_ADDU a2, t1
+ LONG_SUBU a2, t0
+ jr ra
+
+.Lpartial_fixup\@:
+ /*
+ * unset_bytes = partial_end_addr + #bytes - fault_addr
+ * a2 = a0 + (a2 & STORMASK) - $28->task->BUADDR
+ */
+ PTR_L t0, TI_TASK($28)
+ andi a2, STORMASK
+ LONG_L t0, THREAD_BUADDR(t0)
+ LONG_ADDU a2, a0
+ LONG_SUBU a2, t0
+ jr ra
+
+.Llast_fixup\@:
+ /* unset_bytes already in a2 */
+ jr ra
+
+.Lsmall_fixup\@:
+ /*
+ * unset_bytes = end_addr - current_addr + 1
+ * a2 = t1 - a0 + 1
+ */
+ PTR_SUBU a2, t1, a0
+ PTR_ADDIU a2, 1
+ jr ra
+
+ .endm
+
+/*
+ * memset(void *s, int c, size_t n)
+ *
+ * a0: start of area to clear
+ * a1: char to fill with
+ * a2: size of area to clear
+ */
+
+LEAF(memset)
+EXPORT_SYMBOL(memset)
+ move v0, a0 /* result */
+ beqz a1, 1f
+
+ andi a1, 0xff /* spread fillword */
+ LONG_SLL t1, a1, 8
+ or a1, t1
+ LONG_SLL t1, a1, 16
+#if LONGSIZE == 8
+ or a1, t1
+ LONG_SLL t1, a1, 32
+#endif
+ or a1, t1
+1:
+#ifndef CONFIG_EVA
+FEXPORT(__bzero)
+EXPORT_SYMBOL(__bzero)
+#else
+FEXPORT(__bzero_kernel)
+EXPORT_SYMBOL(__bzero_kernel)
+#endif
+ __BUILD_BZERO LEGACY_MODE
+
+#ifdef CONFIG_EVA
+LEAF(__bzero)
+EXPORT_SYMBOL(__bzero)
+ __BUILD_BZERO EVA_MODE
+END(__bzero)
+#endif
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
new file mode 100644
index 000000000..a9b72eacf
--- /dev/null
+++ b/arch/mips/lib/mips-atomic.c
@@ -0,0 +1,113 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1996 by Paul M. Antoine
+ * Copyright (C) 1999 Silicon Graphics
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#include <asm/irqflags.h>
+#include <asm/hazards.h>
+#include <linux/compiler.h>
+#include <linux/preempt.h>
+#include <linux/export.h>
+#include <linux/stringify.h>
+
+#if !defined(CONFIG_CPU_HAS_DIEI)
+
+/*
+ * For cli() we have to insert nops to make sure that the new value
+ * has actually arrived in the status register before the end of this
+ * macro.
+ * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
+ * no nops at all.
+ */
+/*
+ * For TX49, operating only IE bit is not enough.
+ *
+ * If mfc0 $12 follows store and the mfc0 is last instruction of a
+ * page and fetching the next instruction causes TLB miss, the result
+ * of the mfc0 might wrongly contain EXL bit.
+ *
+ * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
+ *
+ * Workaround: mask EXL bit of the result or place a nop before mfc0.
+ */
+notrace void arch_local_irq_disable(void)
+{
+ preempt_disable_notrace();
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noat \n"
+ " mfc0 $1,$12 \n"
+ " ori $1,0x1f \n"
+ " xori $1,0x1f \n"
+ " .set noreorder \n"
+ " mtc0 $1,$12 \n"
+ " " __stringify(__irq_disable_hazard) " \n"
+ " .set pop \n"
+ : /* no outputs */
+ : /* no inputs */
+ : "memory");
+
+ preempt_enable_notrace();
+}
+EXPORT_SYMBOL(arch_local_irq_disable);
+
+notrace unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+
+ preempt_disable_notrace();
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set reorder \n"
+ " .set noat \n"
+ " mfc0 %[flags], $12 \n"
+ " ori $1, %[flags], 0x1f \n"
+ " xori $1, 0x1f \n"
+ " .set noreorder \n"
+ " mtc0 $1, $12 \n"
+ " " __stringify(__irq_disable_hazard) " \n"
+ " .set pop \n"
+ : [flags] "=r" (flags)
+ : /* no inputs */
+ : "memory");
+
+ preempt_enable_notrace();
+
+ return flags;
+}
+EXPORT_SYMBOL(arch_local_irq_save);
+
+notrace void arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long __tmp1;
+
+ preempt_disable_notrace();
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder \n"
+ " .set noat \n"
+ " mfc0 $1, $12 \n"
+ " andi %[flags], 1 \n"
+ " ori $1, 0x1f \n"
+ " xori $1, 0x1f \n"
+ " or %[flags], $1 \n"
+ " mtc0 %[flags], $12 \n"
+ " " __stringify(__irq_disable_hazard) " \n"
+ " .set pop \n"
+ : [flags] "=r" (__tmp1)
+ : "0" (flags)
+ : "memory");
+
+ preempt_enable_notrace();
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+#endif /* !CONFIG_CPU_HAS_DIEI */
diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
new file mode 100644
index 000000000..4c2483f41
--- /dev/null
+++ b/arch/mips/lib/multi3.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+
+#include "libgcc.h"
+
+/*
+ * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for
+ * that specific case only we implement that intrinsic here.
+ *
+ * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
+ */
+#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8)
+
+/* multiply 64-bit values, low 64-bits returned */
+static inline long long notrace dmulu(long long a, long long b)
+{
+ long long res;
+
+ asm ("dmulu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
+ return res;
+}
+
+/* multiply 64-bit unsigned values, high 64-bits of 128-bit result returned */
+static inline long long notrace dmuhu(long long a, long long b)
+{
+ long long res;
+
+ asm ("dmuhu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
+ return res;
+}
+
+/* multiply 128-bit values, low 128-bits returned */
+ti_type notrace __multi3(ti_type a, ti_type b)
+{
+ TWunion res, aa, bb;
+
+ aa.ti = a;
+ bb.ti = b;
+
+ /*
+ * a * b = (a.lo * b.lo)
+ * + 2^64 * (a.hi * b.lo + a.lo * b.hi)
+ * [+ 2^128 * (a.hi * b.hi)]
+ */
+ res.s.low = dmulu(aa.s.low, bb.s.low);
+ res.s.high = dmuhu(aa.s.low, bb.s.low);
+ res.s.high += dmulu(aa.s.high, bb.s.low);
+ res.s.high += dmulu(aa.s.low, bb.s.high);
+
+ return res.ti;
+}
+EXPORT_SYMBOL(__multi3);
+
+#endif /* 64BIT && CPU_MIPSR6 && GCC7 */
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
new file mode 100644
index 000000000..10b4bf7f7
--- /dev/null
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Dump R3000 TLB for debugging purposes.
+ *
+ * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle.
+ * Copyright (C) 1999 by Silicon Graphics, Inc.
+ * Copyright (C) 1999 by Harald Koerfgen
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+#include <asm/tlbdebug.h>
+
+extern int r3k_have_wired_reg;
+
+void dump_tlb_regs(void)
+{
+ pr_info("Index : %0x\n", read_c0_index());
+ pr_info("EntryHi : %0lx\n", read_c0_entryhi());
+ pr_info("EntryLo : %0lx\n", read_c0_entrylo0());
+ if (r3k_have_wired_reg)
+ pr_info("Wired : %0x\n", read_c0_wired());
+}
+
+static void dump_tlb(int first, int last)
+{
+ int i;
+ unsigned int asid;
+ unsigned long entryhi, entrylo0, asid_mask;
+
+ asid_mask = cpu_asid_mask(&current_cpu_data);
+ asid = read_c0_entryhi() & asid_mask;
+
+ for (i = first; i <= last; i++) {
+ write_c0_index(i<<8);
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ "tlbr\n\t"
+ "nop\n\t"
+ ".set\treorder");
+ entryhi = read_c0_entryhi();
+ entrylo0 = read_c0_entrylo0();
+
+ /* Unused entries have a virtual address of KSEG0. */
+ if ((entryhi & PAGE_MASK) != KSEG0 &&
+ (entrylo0 & R3K_ENTRYLO_G ||
+ (entryhi & asid_mask) == asid)) {
+ /*
+ * Only print entries in use
+ */
+ printk("Index: %2d ", i);
+
+ pr_cont("va=%08lx asid=%08lx"
+ " [pa=%06lx n=%d d=%d v=%d g=%d]",
+ entryhi & PAGE_MASK,
+ entryhi & asid_mask,
+ entrylo0 & PAGE_MASK,
+ (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
+ (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
+ (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
+ (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
+ }
+ }
+ printk("\n");
+
+ write_c0_entryhi(asid);
+}
+
+void dump_tlb_all(void)
+{
+ dump_tlb(0, current_cpu_data.tlbsize - 1);
+}
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
new file mode 100644
index 000000000..acdff66bd
--- /dev/null
+++ b/arch/mips/lib/strncpy_user.S
@@ -0,0 +1,85 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1999 by Ralf Baechle
+ * Copyright (C) 2011 MIPS Technologies, Inc.
+ */
+#include <linux/errno.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/export.h>
+#include <asm/regdef.h>
+
+#define EX(insn,reg,addr,handler) \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
+/*
+ * Returns: -EFAULT if exception before terminator, N if the entire
+ * buffer filled, else strlen.
+ */
+
+/*
+ * Ugly special case have to check: we might get passed a user space
+ * pointer which wraps into the kernel space. We don't deal with that. If
+ * it happens at most some bytes of the exceptions handlers will be copied.
+ */
+
+ .macro __BUILD_STRNCPY_ASM func
+LEAF(__strncpy_from_\func\()_asm)
+ LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
+ and v0, a1
+ bnez v0, .Lfault\@
+
+ move t0, zero
+ move v1, a1
+.ifeqs "\func","kernel"
+1: EX(lbu, v0, (v1), .Lfault\@)
+.else
+1: EX(lbue, v0, (v1), .Lfault\@)
+.endif
+ PTR_ADDIU v1, 1
+ R10KCBARRIER(0(ra))
+ sb v0, (a0)
+ beqz v0, 2f
+ PTR_ADDIU t0, 1
+ PTR_ADDIU a0, 1
+ bne t0, a2, 1b
+2: PTR_ADDU v0, a1, t0
+ xor v0, a1
+ bltz v0, .Lfault\@
+ move v0, t0
+ jr ra # return n
+ END(__strncpy_from_\func\()_asm)
+
+.Lfault\@:
+ li v0, -EFAULT
+ jr ra
+
+ .section __ex_table,"a"
+ PTR 1b, .Lfault\@
+ .previous
+
+ .endm
+
+#ifndef CONFIG_EVA
+ /* Set aliases */
+ .global __strncpy_from_user_asm
+ .set __strncpy_from_user_asm, __strncpy_from_kernel_asm
+EXPORT_SYMBOL(__strncpy_from_user_asm)
+#endif
+
+__BUILD_STRNCPY_ASM kernel
+EXPORT_SYMBOL(__strncpy_from_kernel_asm)
+
+#ifdef CONFIG_EVA
+ .set push
+ .set eva
+__BUILD_STRNCPY_ASM user
+ .set pop
+EXPORT_SYMBOL(__strncpy_from_user_asm)
+#endif
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
new file mode 100644
index 000000000..e1bacf5a3
--- /dev/null
+++ b/arch/mips/lib/strnlen_user.S
@@ -0,0 +1,84 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle
+ * Copyright (c) 1999 Silicon Graphics, Inc.
+ */
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/export.h>
+#include <asm/regdef.h>
+
+#define EX(insn,reg,addr,handler) \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
+/*
+ * Return the size of a string including the ending NUL character up to a
+ * maximum of a1 or 0 in case of error.
+ *
+ * Note: for performance reasons we deliberately accept that a user may
+ * make strlen_user and strnlen_user access the first few KSEG0
+ * bytes. There's nothing secret there. On 64-bit accessing beyond
+ * the maximum is a tad hairier ...
+ */
+ .macro __BUILD_STRNLEN_ASM func
+LEAF(__strnlen_\func\()_asm)
+ LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
+ and v0, a0
+ bnez v0, .Lfault\@
+
+ move v0, a0
+ PTR_ADDU a1, a0 # stop pointer
+1:
+#ifdef CONFIG_CPU_DADDI_WORKAROUNDS
+ .set noat
+ li AT, 1
+#endif
+ beq v0, a1, 1f # limit reached?
+.ifeqs "\func", "kernel"
+ EX(lb, t0, (v0), .Lfault\@)
+.else
+ EX(lbe, t0, (v0), .Lfault\@)
+.endif
+ .set noreorder
+ bnez t0, 1b
+1:
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+ PTR_ADDIU v0, 1
+#else
+ PTR_ADDU v0, AT
+ .set at
+#endif
+ .set reorder
+ PTR_SUBU v0, a0
+ jr ra
+ END(__strnlen_\func\()_asm)
+
+.Lfault\@:
+ move v0, zero
+ jr ra
+ .endm
+
+#ifndef CONFIG_EVA
+ /* Set aliases */
+ .global __strnlen_user_asm
+ .set __strnlen_user_asm, __strnlen_kernel_asm
+EXPORT_SYMBOL(__strnlen_user_asm)
+#endif
+
+__BUILD_STRNLEN_ASM kernel
+EXPORT_SYMBOL(__strnlen_kernel_asm)
+
+#ifdef CONFIG_EVA
+
+ .set push
+ .set eva
+__BUILD_STRNLEN_ASM user
+ .set pop
+EXPORT_SYMBOL(__strnlen_user_asm)
+#endif
diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c
new file mode 100644
index 000000000..f80a67c09
--- /dev/null
+++ b/arch/mips/lib/uncached.c
@@ -0,0 +1,82 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Thiemo Seufer
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Author: Maciej W. Rozycki <macro@mips.com>
+ */
+
+
+#include <asm/addrspace.h>
+#include <asm/bug.h>
+#include <asm/cacheflush.h>
+
+#ifndef CKSEG2
+#define CKSEG2 CKSSEG
+#endif
+#ifndef TO_PHYS_MASK
+#define TO_PHYS_MASK -1
+#endif
+
+/*
+ * FUNC is executed in one of the uncached segments, depending on its
+ * original address as follows:
+ *
+ * 1. If the original address is in CKSEG0 or CKSEG1, then the uncached
+ * segment used is CKSEG1.
+ * 2. If the original address is in XKPHYS, then the uncached segment
+ * used is XKPHYS(2).
+ * 3. Otherwise it's a bug.
+ *
+ * The same remapping is done with the stack pointer. Stack handling
+ * works because we don't handle stack arguments or more complex return
+ * values, so we can avoid sharing the same stack area between a cached
+ * and the uncached mode.
+ */
+unsigned long run_uncached(void *func)
+{
+ register long ret __asm__("$2");
+ long lfunc = (long)func, ufunc;
+ long usp;
+ long sp;
+
+ __asm__("move %0, $sp" : "=r" (sp));
+
+ if (sp >= (long)CKSEG0 && sp < (long)CKSEG2)
+ usp = CKSEG1ADDR(sp);
+#ifdef CONFIG_64BIT
+ else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0, 0) &&
+ (long long)sp < (long long)PHYS_TO_XKPHYS(8, 0))
+ usp = PHYS_TO_XKPHYS(K_CALG_UNCACHED,
+ XKPHYS_TO_PHYS((long long)sp));
+#endif
+ else {
+ BUG();
+ usp = sp;
+ }
+ if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2)
+ ufunc = CKSEG1ADDR(lfunc);
+#ifdef CONFIG_64BIT
+ else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0, 0) &&
+ (long long)lfunc < (long long)PHYS_TO_XKPHYS(8, 0))
+ ufunc = PHYS_TO_XKPHYS(K_CALG_UNCACHED,
+ XKPHYS_TO_PHYS((long long)lfunc));
+#endif
+ else {
+ BUG();
+ ufunc = lfunc;
+ }
+
+ __asm__ __volatile__ (
+ " move $16, $sp\n"
+ " move $sp, %1\n"
+ " jalr %2\n"
+ " move $sp, $16"
+ : "=r" (ret)
+ : "r" (usp), "r" (ufunc)
+ : "$16", "$31");
+
+ return ret;
+}
diff --git a/arch/mips/loongson2ef/Kconfig b/arch/mips/loongson2ef/Kconfig
new file mode 100644
index 000000000..96dc6eba4
--- /dev/null
+++ b/arch/mips/loongson2ef/Kconfig
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: GPL-2.0
+if MACH_LOONGSON2EF
+
+choice
+ prompt "Machine Type"
+
+config LEMOTE_FULOONG2E
+ bool "Lemote Fuloong(2e) mini-PC"
+ select ARCH_SPARSEMEM_ENABLE
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
+ select CEVT_R4K
+ select CSRC_R4K
+ select SYS_HAS_CPU_LOONGSON2E
+ select DMA_NONCOHERENT
+ select BOOT_ELF32
+ select BOARD_SCACHE
+ select FORCE_PCI
+ select I8259
+ select ISA
+ select IRQ_MIPS_CPU
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_HAS_EARLY_PRINTK
+ select USE_GENERIC_EARLY_PRINTK_8250
+ select GENERIC_ISA_DMA_SUPPORT_BROKEN
+ select CPU_HAS_WB
+ select LOONGSON_MC146818
+ help
+ Lemote Fuloong(2e) mini-PC board based on the Chinese Loongson-2E CPU and
+ an FPGA northbridge
+
+ Lemote Fuloong(2e) mini PC have a VIA686B south bridge.
+
+config LEMOTE_MACH2F
+ bool "Lemote Loongson 2F family machines"
+ select ARCH_SPARSEMEM_ENABLE
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
+ select BOARD_SCACHE
+ select BOOT_ELF32
+ select CEVT_R4K if ! MIPS_EXTERNAL_TIMER
+ select CPU_HAS_WB
+ select CS5536
+ select CSRC_R4K if ! MIPS_EXTERNAL_TIMER
+ select DMA_NONCOHERENT
+ select GENERIC_ISA_DMA_SUPPORT_BROKEN
+ select FORCE_PCI
+ select I8259
+ select IRQ_MIPS_CPU
+ select ISA
+ select SYS_HAS_CPU_LOONGSON2F
+ select SYS_HAS_EARLY_PRINTK
+ select USE_GENERIC_EARLY_PRINTK_8250
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select LOONGSON_MC146818
+ help
+ Lemote Loongson 2F family machines utilize the 2F revision of
+ Loongson processor and the AMD CS5536 south bridge.
+
+ These family machines include fuloong2f mini PC, yeeloong2f notebook,
+ LingLoong allinone PC and so forth.
+
+endchoice
+
+config CS5536
+ bool
+
+config CS5536_MFGPT
+ bool "CS5536 MFGPT Timer"
+ depends on CS5536 && !HIGH_RES_TIMERS
+ select MIPS_EXTERNAL_TIMER
+ help
+ This option enables the mfgpt0 timer of AMD CS5536. With this timer
+ switched on you can not use high resolution timers.
+
+ If you want to enable the Loongson2 CPUFreq Driver, Please enable
+ this option at first, otherwise, You will get wrong system time.
+
+ If unsure, say Yes.
+
+config LOONGSON_UART_BASE
+ bool
+ default y
+ depends on EARLY_PRINTK || SERIAL_8250
+
+config LOONGSON_MC146818
+ bool
+ default n
+
+endif # MACH_LOONGSON2EF
diff --git a/arch/mips/loongson2ef/Makefile b/arch/mips/loongson2ef/Makefile
new file mode 100644
index 000000000..d4af1605c
--- /dev/null
+++ b/arch/mips/loongson2ef/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Common code for all Loongson based systems
+#
+
+obj-$(CONFIG_MACH_LOONGSON2EF) += common/
+
+#
+# Lemote Fuloong mini-PC (Loongson 2E-based)
+#
+
+obj-$(CONFIG_LEMOTE_FULOONG2E) += fuloong-2e/
+
+#
+# Lemote loongson2f family machines
+#
+
+obj-$(CONFIG_LEMOTE_MACH2F) += lemote-2f/
diff --git a/arch/mips/loongson2ef/Platform b/arch/mips/loongson2ef/Platform
new file mode 100644
index 000000000..ae023b9a1
--- /dev/null
+++ b/arch/mips/loongson2ef/Platform
@@ -0,0 +1,57 @@
+#
+# Loongson Processors' Support
+#
+
+# Only gcc >= 4.4 have Loongson specific support
+cflags-$(CONFIG_CPU_LOONGSON2EF) += -Wa,--trap
+cflags-$(CONFIG_CPU_LOONGSON2E) += \
+ $(call cc-option,-march=loongson2e,-march=r4600)
+cflags-$(CONFIG_CPU_LOONGSON2F) += \
+ $(call cc-option,-march=loongson2f,-march=r4600)
+#
+# Some versions of binutils, not currently mainline as of 2019/02/04, support
+# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction
+# to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h for a
+# description).
+#
+# We disable this in order to prevent the assembler meddling with the
+# instruction that labels refer to, ie. if we label an ll instruction:
+#
+# 1: ll v0, 0(a0)
+#
+# ...then with the assembler fix applied the label may actually point at a sync
+# instruction inserted by the assembler, and if we were using the label in an
+# exception table the table would no longer contain the address of the ll
+# instruction.
+#
+# Avoid this by explicitly disabling that assembler behaviour. If upstream
+# binutils does not merge support for the flag then we can revisit & remove
+# this later - for now it ensures vendor toolchains don't cause problems.
+#
+cflags-$(CONFIG_CPU_LOONGSON2EF) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
+
+# Enable the workarounds for Loongson2f
+ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
+ ifeq ($(call as-option,-Wa$(comma)-mfix-loongson2f-nop,),)
+ $(error only binutils >= 2.20.2 have needed option -mfix-loongson2f-nop)
+ else
+ cflags-$(CONFIG_CPU_NOP_WORKAROUNDS) += -Wa$(comma)-mfix-loongson2f-nop
+ endif
+ ifeq ($(call as-option,-Wa$(comma)-mfix-loongson2f-jump,),)
+ $(error only binutils >= 2.20.2 have needed option -mfix-loongson2f-jump)
+ else
+ cflags-$(CONFIG_CPU_JUMP_WORKAROUNDS) += -Wa$(comma)-mfix-loongson2f-jump
+ endif
+endif
+
+# Some -march= flags enable MMI instructions, and GCC complains about that
+# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
+cflags-y += $(call cc-option,-mno-loongson-mmi)
+
+#
+# Loongson Machines' Support
+#
+
+cflags-$(CONFIG_MACH_LOONGSON2EF) += -I$(srctree)/arch/mips/include/asm/mach-loongson2ef -mno-branch-likely
+load-$(CONFIG_LEMOTE_FULOONG2E) += 0xffffffff80100000
+load-$(CONFIG_LEMOTE_MACH2F) += 0xffffffff80200000
diff --git a/arch/mips/loongson2ef/common/Makefile b/arch/mips/loongson2ef/common/Makefile
new file mode 100644
index 000000000..d5ab3e543
--- /dev/null
+++ b/arch/mips/loongson2ef/common/Makefile
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for loongson based machines.
+#
+
+obj-y += setup.o init.o env.o time.o reset.o irq.o \
+ bonito-irq.o mem.o machtype.o platform.o serial.o
+obj-$(CONFIG_PCI) += pci.o
+
+#
+# Serial port support
+#
+obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o
+obj-$(CONFIG_LOONGSON_MC146818) += rtc.o
+
+#
+# Enable CS5536 Virtual Support Module(VSM) to virtulize the PCI configure
+# space
+#
+obj-$(CONFIG_CS5536) += cs5536/
+
+#
+# Suspend Support
+#
+
+obj-$(CONFIG_SUSPEND) += pm.o
diff --git a/arch/mips/loongson2ef/common/bonito-irq.c b/arch/mips/loongson2ef/common/bonito-irq.c
new file mode 100644
index 000000000..c06ad412e
--- /dev/null
+++ b/arch/mips/loongson2ef/common/bonito-irq.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ * Copyright (C) 2000, 2001 Ralf Baechle (ralf@gnu.org)
+ *
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ */
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+
+#include <loongson.h>
+
+static inline void bonito_irq_enable(struct irq_data *d)
+{
+ LOONGSON_INTENSET = (1 << (d->irq - LOONGSON_IRQ_BASE));
+ mmiowb();
+}
+
+static inline void bonito_irq_disable(struct irq_data *d)
+{
+ LOONGSON_INTENCLR = (1 << (d->irq - LOONGSON_IRQ_BASE));
+ mmiowb();
+}
+
+static struct irq_chip bonito_irq_type = {
+ .name = "bonito_irq",
+ .irq_mask = bonito_irq_disable,
+ .irq_unmask = bonito_irq_enable,
+};
+
+void bonito_irq_init(void)
+{
+ u32 i;
+
+ for (i = LOONGSON_IRQ_BASE; i < LOONGSON_IRQ_BASE + 32; i++)
+ irq_set_chip_and_handler(i, &bonito_irq_type,
+ handle_level_irq);
+
+#ifdef CONFIG_CPU_LOONGSON2E
+ i = LOONGSON_IRQ_BASE + 10;
+ if (request_irq(i, no_action, 0, "dma_timeout", NULL))
+ pr_err("Failed to request irq %d (dma_timeout)\n", i);
+#endif
+}
diff --git a/arch/mips/loongson2ef/common/cs5536/Makefile b/arch/mips/loongson2ef/common/cs5536/Makefile
new file mode 100644
index 000000000..b32b29661
--- /dev/null
+++ b/arch/mips/loongson2ef/common/cs5536/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for CS5536 support.
+#
+
+obj-$(CONFIG_CS5536) += cs5536_pci.o cs5536_ide.o cs5536_acc.o cs5536_ohci.o \
+ cs5536_isa.o cs5536_ehci.o
+
+#
+# Enable cs5536 mfgpt Timer
+#
+obj-$(CONFIG_CS5536_MFGPT) += cs5536_mfgpt.o
diff --git a/arch/mips/loongson2ef/common/cs5536/cs5536_acc.c b/arch/mips/loongson2ef/common/cs5536/cs5536_acc.c
new file mode 100644
index 000000000..ff50aae72
--- /dev/null
+++ b/arch/mips/loongson2ef/common/cs5536/cs5536_acc.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * the ACC Virtual Support Module of AMD CS5536
+ *
+ * Copyright (C) 2007 Lemote, Inc.
+ * Author : jlliu, liujl@lemote.com
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+
+#include <cs5536/cs5536.h>
+#include <cs5536/cs5536_pci.h>
+
+void pci_acc_write_reg(int reg, u32 value)
+{
+ u32 hi = 0, lo = value;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ _rdmsr(GLIU_MSR_REG(GLIU_PAE), &hi, &lo);
+ if (value & PCI_COMMAND_MASTER)
+ lo |= (0x03 << 8);
+ else
+ lo &= ~(0x03 << 8);
+ _wrmsr(GLIU_MSR_REG(GLIU_PAE), hi, lo);
+ break;
+ case PCI_STATUS:
+ if (value & PCI_STATUS_PARITY) {
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_PARE_ERR_FLAG) {
+ lo = (lo & 0x0000ffff) | SB_PARE_ERR_FLAG;
+ _wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
+ }
+ }
+ break;
+ case PCI_BAR0_REG:
+ if (value == PCI_BAR_RANGE_MASK) {
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ lo |= SOFT_BAR_ACC_FLAG;
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else if (value & 0x01) {
+ value &= 0xfffffffc;
+ hi = 0xA0000000 | ((value & 0x000ff000) >> 12);
+ lo = 0x000fff80 | ((value & 0x00000fff) << 20);
+ _wrmsr(GLIU_MSR_REG(GLIU_IOD_BM1), hi, lo);
+ }
+ break;
+ case PCI_ACC_INT_REG:
+ _rdmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), &hi, &lo);
+ /* disable all the usb interrupt in PIC */
+ lo &= ~(0xf << PIC_YSEL_LOW_ACC_SHIFT);
+ if (value) /* enable all the acc interrupt in PIC */
+ lo |= (CS5536_ACC_INTR << PIC_YSEL_LOW_ACC_SHIFT);
+ _wrmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), hi, lo);
+ break;
+ default:
+ break;
+ }
+}
+
+u32 pci_acc_read_reg(int reg)
+{
+ u32 hi, lo;
+ u32 conf_data = 0;
+
+ switch (reg) {
+ case PCI_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_ACC_DEVICE_ID, CS5536_VENDOR_ID);
+ break;
+ case PCI_COMMAND:
+ _rdmsr(GLIU_MSR_REG(GLIU_IOD_BM1), &hi, &lo);
+ if (((lo & 0xfff00000) || (hi & 0x000000ff))
+ && ((hi & 0xf0000000) == 0xa0000000))
+ conf_data |= PCI_COMMAND_IO;
+ _rdmsr(GLIU_MSR_REG(GLIU_PAE), &hi, &lo);
+ if ((lo & 0x300) == 0x300)
+ conf_data |= PCI_COMMAND_MASTER;
+ break;
+ case PCI_STATUS:
+ conf_data |= PCI_STATUS_66MHZ;
+ conf_data |= PCI_STATUS_FAST_BACK;
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_PARE_ERR_FLAG)
+ conf_data |= PCI_STATUS_PARITY;
+ conf_data |= PCI_STATUS_DEVSEL_MEDIUM;
+ break;
+ case PCI_CLASS_REVISION:
+ _rdmsr(ACC_MSR_REG(ACC_CAP), &hi, &lo);
+ conf_data = lo & 0x000000ff;
+ conf_data |= (CS5536_ACC_CLASS_CODE << 8);
+ break;
+ case PCI_CACHE_LINE_SIZE:
+ conf_data =
+ CFG_PCI_CACHE_LINE_SIZE(PCI_NORMAL_HEADER_TYPE,
+ PCI_NORMAL_LATENCY_TIMER);
+ break;
+ case PCI_BAR0_REG:
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ if (lo & SOFT_BAR_ACC_FLAG) {
+ conf_data = CS5536_ACC_RANGE |
+ PCI_BASE_ADDRESS_SPACE_IO;
+ lo &= ~SOFT_BAR_ACC_FLAG;
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else {
+ _rdmsr(GLIU_MSR_REG(GLIU_IOD_BM1), &hi, &lo);
+ conf_data = (hi & 0x000000ff) << 12;
+ conf_data |= (lo & 0xfff00000) >> 20;
+ conf_data |= 0x01;
+ conf_data &= ~0x02;
+ }
+ break;
+ case PCI_CARDBUS_CIS:
+ conf_data = PCI_CARDBUS_CIS_POINTER;
+ break;
+ case PCI_SUBSYSTEM_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_ACC_SUB_ID, CS5536_SUB_VENDOR_ID);
+ break;
+ case PCI_ROM_ADDRESS:
+ conf_data = PCI_EXPANSION_ROM_BAR;
+ break;
+ case PCI_CAPABILITY_LIST:
+ conf_data = PCI_CAPLIST_USB_POINTER;
+ break;
+ case PCI_INTERRUPT_LINE:
+ conf_data =
+ CFG_PCI_INTERRUPT_LINE(PCI_DEFAULT_PIN, CS5536_ACC_INTR);
+ break;
+ default:
+ break;
+ }
+
+ return conf_data;
+}
diff --git a/arch/mips/loongson2ef/common/cs5536/cs5536_ehci.c b/arch/mips/loongson2ef/common/cs5536/cs5536_ehci.c
new file mode 100644
index 000000000..bd4c39fe6
--- /dev/null
+++ b/arch/mips/loongson2ef/common/cs5536/cs5536_ehci.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * the EHCI Virtual Support Module of AMD CS5536
+ *
+ * Copyright (C) 2007 Lemote, Inc.
+ * Author : jlliu, liujl@lemote.com
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+
+#include <cs5536/cs5536.h>
+#include <cs5536/cs5536_pci.h>
+
+void pci_ehci_write_reg(int reg, u32 value)
+{
+ u32 hi = 0, lo = value;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ if (value & PCI_COMMAND_MASTER)
+ hi |= PCI_COMMAND_MASTER;
+ else
+ hi &= ~PCI_COMMAND_MASTER;
+
+ if (value & PCI_COMMAND_MEMORY)
+ hi |= PCI_COMMAND_MEMORY;
+ else
+ hi &= ~PCI_COMMAND_MEMORY;
+ _wrmsr(USB_MSR_REG(USB_EHCI), hi, lo);
+ break;
+ case PCI_STATUS:
+ if (value & PCI_STATUS_PARITY) {
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_PARE_ERR_FLAG) {
+ lo = (lo & 0x0000ffff) | SB_PARE_ERR_FLAG;
+ _wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
+ }
+ }
+ break;
+ case PCI_BAR0_REG:
+ if (value == PCI_BAR_RANGE_MASK) {
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ lo |= SOFT_BAR_EHCI_FLAG;
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else if ((value & 0x01) == 0x00) {
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ lo = value;
+ _wrmsr(USB_MSR_REG(USB_EHCI), hi, lo);
+
+ value &= 0xfffffff0;
+ hi = 0x40000000 | ((value & 0xff000000) >> 24);
+ lo = 0x000fffff | ((value & 0x00fff000) << 8);
+ _wrmsr(GLIU_MSR_REG(GLIU_P2D_BM4), hi, lo);
+ }
+ break;
+ case PCI_EHCI_LEGSMIEN_REG:
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ hi &= 0x003f0000;
+ hi |= (value & 0x3f) << 16;
+ _wrmsr(USB_MSR_REG(USB_EHCI), hi, lo);
+ break;
+ case PCI_EHCI_FLADJ_REG:
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ hi &= ~0x00003f00;
+ hi |= value & 0x00003f00;
+ _wrmsr(USB_MSR_REG(USB_EHCI), hi, lo);
+ break;
+ default:
+ break;
+ }
+}
+
+u32 pci_ehci_read_reg(int reg)
+{
+ u32 conf_data = 0;
+ u32 hi, lo;
+
+ switch (reg) {
+ case PCI_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_EHCI_DEVICE_ID, CS5536_VENDOR_ID);
+ break;
+ case PCI_COMMAND:
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ if (hi & PCI_COMMAND_MASTER)
+ conf_data |= PCI_COMMAND_MASTER;
+ if (hi & PCI_COMMAND_MEMORY)
+ conf_data |= PCI_COMMAND_MEMORY;
+ break;
+ case PCI_STATUS:
+ conf_data |= PCI_STATUS_66MHZ;
+ conf_data |= PCI_STATUS_FAST_BACK;
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_PARE_ERR_FLAG)
+ conf_data |= PCI_STATUS_PARITY;
+ conf_data |= PCI_STATUS_DEVSEL_MEDIUM;
+ break;
+ case PCI_CLASS_REVISION:
+ _rdmsr(USB_MSR_REG(USB_CAP), &hi, &lo);
+ conf_data = lo & 0x000000ff;
+ conf_data |= (CS5536_EHCI_CLASS_CODE << 8);
+ break;
+ case PCI_CACHE_LINE_SIZE:
+ conf_data =
+ CFG_PCI_CACHE_LINE_SIZE(PCI_NORMAL_HEADER_TYPE,
+ PCI_NORMAL_LATENCY_TIMER);
+ break;
+ case PCI_BAR0_REG:
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ if (lo & SOFT_BAR_EHCI_FLAG) {
+ conf_data = CS5536_EHCI_RANGE |
+ PCI_BASE_ADDRESS_SPACE_MEMORY;
+ lo &= ~SOFT_BAR_EHCI_FLAG;
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else {
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ conf_data = lo & 0xfffff000;
+ }
+ break;
+ case PCI_CARDBUS_CIS:
+ conf_data = PCI_CARDBUS_CIS_POINTER;
+ break;
+ case PCI_SUBSYSTEM_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_EHCI_SUB_ID, CS5536_SUB_VENDOR_ID);
+ break;
+ case PCI_ROM_ADDRESS:
+ conf_data = PCI_EXPANSION_ROM_BAR;
+ break;
+ case PCI_CAPABILITY_LIST:
+ conf_data = PCI_CAPLIST_USB_POINTER;
+ break;
+ case PCI_INTERRUPT_LINE:
+ conf_data =
+ CFG_PCI_INTERRUPT_LINE(PCI_DEFAULT_PIN, CS5536_USB_INTR);
+ break;
+ case PCI_EHCI_LEGSMIEN_REG:
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ conf_data = (hi & 0x003f0000) >> 16;
+ break;
+ case PCI_EHCI_LEGSMISTS_REG:
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ conf_data = (hi & 0x3f000000) >> 24;
+ break;
+ case PCI_EHCI_FLADJ_REG:
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ conf_data = hi & 0x00003f00;
+ break;
+ default:
+ break;
+ }
+
+ return conf_data;
+}
diff --git a/arch/mips/loongson2ef/common/cs5536/cs5536_ide.c b/arch/mips/loongson2ef/common/cs5536/cs5536_ide.c
new file mode 100644
index 000000000..bb933294b
--- /dev/null
+++ b/arch/mips/loongson2ef/common/cs5536/cs5536_ide.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * the IDE Virtual Support Module of AMD CS5536
+ *
+ * Copyright (C) 2007 Lemote, Inc.
+ * Author : jlliu, liujl@lemote.com
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+
+#include <cs5536/cs5536.h>
+#include <cs5536/cs5536_pci.h>
+
+void pci_ide_write_reg(int reg, u32 value)
+{
+ u32 hi = 0, lo = value;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ _rdmsr(GLIU_MSR_REG(GLIU_PAE), &hi, &lo);
+ if (value & PCI_COMMAND_MASTER)
+ lo |= (0x03 << 4);
+ else
+ lo &= ~(0x03 << 4);
+ _wrmsr(GLIU_MSR_REG(GLIU_PAE), hi, lo);
+ break;
+ case PCI_STATUS:
+ if (value & PCI_STATUS_PARITY) {
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_PARE_ERR_FLAG) {
+ lo = (lo & 0x0000ffff) | SB_PARE_ERR_FLAG;
+ _wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
+ }
+ }
+ break;
+ case PCI_CACHE_LINE_SIZE:
+ value &= 0x0000ff00;
+ _rdmsr(SB_MSR_REG(SB_CTRL), &hi, &lo);
+ hi &= 0xffffff00;
+ hi |= (value >> 8);
+ _wrmsr(SB_MSR_REG(SB_CTRL), hi, lo);
+ break;
+ case PCI_BAR4_REG:
+ if (value == PCI_BAR_RANGE_MASK) {
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ lo |= SOFT_BAR_IDE_FLAG;
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else if (value & 0x01) {
+ _rdmsr(IDE_MSR_REG(IDE_IO_BAR), &hi, &lo);
+ lo = (value & 0xfffffff0) | 0x1;
+ _wrmsr(IDE_MSR_REG(IDE_IO_BAR), hi, lo);
+
+ value &= 0xfffffffc;
+ hi = 0x60000000 | ((value & 0x000ff000) >> 12);
+ lo = 0x000ffff0 | ((value & 0x00000fff) << 20);
+ _wrmsr(GLIU_MSR_REG(GLIU_IOD_BM2), hi, lo);
+ }
+ break;
+ case PCI_IDE_CFG_REG:
+ if (value == CS5536_IDE_FLASH_SIGNATURE) {
+ _rdmsr(DIVIL_MSR_REG(DIVIL_BALL_OPTS), &hi, &lo);
+ lo |= 0x01;
+ _wrmsr(DIVIL_MSR_REG(DIVIL_BALL_OPTS), hi, lo);
+ } else {
+ _rdmsr(IDE_MSR_REG(IDE_CFG), &hi, &lo);
+ lo = value;
+ _wrmsr(IDE_MSR_REG(IDE_CFG), hi, lo);
+ }
+ break;
+ case PCI_IDE_DTC_REG:
+ _rdmsr(IDE_MSR_REG(IDE_DTC), &hi, &lo);
+ lo = value;
+ _wrmsr(IDE_MSR_REG(IDE_DTC), hi, lo);
+ break;
+ case PCI_IDE_CAST_REG:
+ _rdmsr(IDE_MSR_REG(IDE_CAST), &hi, &lo);
+ lo = value;
+ _wrmsr(IDE_MSR_REG(IDE_CAST), hi, lo);
+ break;
+ case PCI_IDE_ETC_REG:
+ _rdmsr(IDE_MSR_REG(IDE_ETC), &hi, &lo);
+ lo = value;
+ _wrmsr(IDE_MSR_REG(IDE_ETC), hi, lo);
+ break;
+ case PCI_IDE_PM_REG:
+ _rdmsr(IDE_MSR_REG(IDE_INTERNAL_PM), &hi, &lo);
+ lo = value;
+ _wrmsr(IDE_MSR_REG(IDE_INTERNAL_PM), hi, lo);
+ break;
+ default:
+ break;
+ }
+}
+
+u32 pci_ide_read_reg(int reg)
+{
+ u32 conf_data = 0;
+ u32 hi, lo;
+
+ switch (reg) {
+ case PCI_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_IDE_DEVICE_ID, CS5536_VENDOR_ID);
+ break;
+ case PCI_COMMAND:
+ _rdmsr(IDE_MSR_REG(IDE_IO_BAR), &hi, &lo);
+ if (lo & 0xfffffff0)
+ conf_data |= PCI_COMMAND_IO;
+ _rdmsr(GLIU_MSR_REG(GLIU_PAE), &hi, &lo);
+ if ((lo & 0x30) == 0x30)
+ conf_data |= PCI_COMMAND_MASTER;
+ break;
+ case PCI_STATUS:
+ conf_data |= PCI_STATUS_66MHZ;
+ conf_data |= PCI_STATUS_FAST_BACK;
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_PARE_ERR_FLAG)
+ conf_data |= PCI_STATUS_PARITY;
+ conf_data |= PCI_STATUS_DEVSEL_MEDIUM;
+ break;
+ case PCI_CLASS_REVISION:
+ _rdmsr(IDE_MSR_REG(IDE_CAP), &hi, &lo);
+ conf_data = lo & 0x000000ff;
+ conf_data |= (CS5536_IDE_CLASS_CODE << 8);
+ break;
+ case PCI_CACHE_LINE_SIZE:
+ _rdmsr(SB_MSR_REG(SB_CTRL), &hi, &lo);
+ hi &= 0x000000f8;
+ conf_data = CFG_PCI_CACHE_LINE_SIZE(PCI_NORMAL_HEADER_TYPE, hi);
+ break;
+ case PCI_BAR4_REG:
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ if (lo & SOFT_BAR_IDE_FLAG) {
+ conf_data = CS5536_IDE_RANGE |
+ PCI_BASE_ADDRESS_SPACE_IO;
+ lo &= ~SOFT_BAR_IDE_FLAG;
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else {
+ _rdmsr(IDE_MSR_REG(IDE_IO_BAR), &hi, &lo);
+ conf_data = lo & 0xfffffff0;
+ conf_data |= 0x01;
+ conf_data &= ~0x02;
+ }
+ break;
+ case PCI_CARDBUS_CIS:
+ conf_data = PCI_CARDBUS_CIS_POINTER;
+ break;
+ case PCI_SUBSYSTEM_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_IDE_SUB_ID, CS5536_SUB_VENDOR_ID);
+ break;
+ case PCI_ROM_ADDRESS:
+ conf_data = PCI_EXPANSION_ROM_BAR;
+ break;
+ case PCI_CAPABILITY_LIST:
+ conf_data = PCI_CAPLIST_POINTER;
+ break;
+ case PCI_INTERRUPT_LINE:
+ conf_data =
+ CFG_PCI_INTERRUPT_LINE(PCI_DEFAULT_PIN, CS5536_IDE_INTR);
+ break;
+ case PCI_IDE_CFG_REG:
+ _rdmsr(IDE_MSR_REG(IDE_CFG), &hi, &lo);
+ conf_data = lo;
+ break;
+ case PCI_IDE_DTC_REG:
+ _rdmsr(IDE_MSR_REG(IDE_DTC), &hi, &lo);
+ conf_data = lo;
+ break;
+ case PCI_IDE_CAST_REG:
+ _rdmsr(IDE_MSR_REG(IDE_CAST), &hi, &lo);
+ conf_data = lo;
+ break;
+ case PCI_IDE_ETC_REG:
+ _rdmsr(IDE_MSR_REG(IDE_ETC), &hi, &lo);
+ conf_data = lo;
+ break;
+ case PCI_IDE_PM_REG:
+ _rdmsr(IDE_MSR_REG(IDE_INTERNAL_PM), &hi, &lo);
+ conf_data = lo;
+ break;
+ default:
+ break;
+ }
+
+ return conf_data;
+}
diff --git a/arch/mips/loongson2ef/common/cs5536/cs5536_isa.c b/arch/mips/loongson2ef/common/cs5536/cs5536_isa.c
new file mode 100644
index 000000000..5ad38f86e
--- /dev/null
+++ b/arch/mips/loongson2ef/common/cs5536/cs5536_isa.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * the ISA Virtual Support Module of AMD CS5536
+ *
+ * Copyright (C) 2007 Lemote, Inc.
+ * Author : jlliu, liujl@lemote.com
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+
+#include <linux/pci.h>
+#include <cs5536/cs5536.h>
+#include <cs5536/cs5536_pci.h>
+
+/* common variables for PCI_ISA_READ/WRITE_BAR */
+static const u32 divil_msr_reg[6] = {
+ DIVIL_MSR_REG(DIVIL_LBAR_SMB), DIVIL_MSR_REG(DIVIL_LBAR_GPIO),
+ DIVIL_MSR_REG(DIVIL_LBAR_MFGPT), DIVIL_MSR_REG(DIVIL_LBAR_IRQ),
+ DIVIL_MSR_REG(DIVIL_LBAR_PMS), DIVIL_MSR_REG(DIVIL_LBAR_ACPI),
+};
+
+static const u32 soft_bar_flag[6] = {
+ SOFT_BAR_SMB_FLAG, SOFT_BAR_GPIO_FLAG, SOFT_BAR_MFGPT_FLAG,
+ SOFT_BAR_IRQ_FLAG, SOFT_BAR_PMS_FLAG, SOFT_BAR_ACPI_FLAG,
+};
+
+static const u32 sb_msr_reg[6] = {
+ SB_MSR_REG(SB_R0), SB_MSR_REG(SB_R1), SB_MSR_REG(SB_R2),
+ SB_MSR_REG(SB_R3), SB_MSR_REG(SB_R4), SB_MSR_REG(SB_R5),
+};
+
+static const u32 bar_space_range[6] = {
+ CS5536_SMB_RANGE, CS5536_GPIO_RANGE, CS5536_MFGPT_RANGE,
+ CS5536_IRQ_RANGE, CS5536_PMS_RANGE, CS5536_ACPI_RANGE,
+};
+
+static const int bar_space_len[6] = {
+ CS5536_SMB_LENGTH, CS5536_GPIO_LENGTH, CS5536_MFGPT_LENGTH,
+ CS5536_IRQ_LENGTH, CS5536_PMS_LENGTH, CS5536_ACPI_LENGTH,
+};
+
+/*
+ * enable the divil module bar space.
+ *
+ * For all the DIVIL module LBAR, you should control the DIVIL LBAR reg
+ * and the RCONFx(0~5) reg to use the modules.
+ */
+static void divil_lbar_enable(void)
+{
+ u32 hi, lo;
+ int offset;
+
+ /*
+ * The DIVIL IRQ is not used yet. and make the RCONF0 reserved.
+ */
+
+ for (offset = DIVIL_LBAR_SMB; offset <= DIVIL_LBAR_PMS; offset++) {
+ _rdmsr(DIVIL_MSR_REG(offset), &hi, &lo);
+ hi |= 0x01;
+ _wrmsr(DIVIL_MSR_REG(offset), hi, lo);
+ }
+}
+
+/*
+ * disable the divil module bar space.
+ */
+static void divil_lbar_disable(void)
+{
+ u32 hi, lo;
+ int offset;
+
+ for (offset = DIVIL_LBAR_SMB; offset <= DIVIL_LBAR_PMS; offset++) {
+ _rdmsr(DIVIL_MSR_REG(offset), &hi, &lo);
+ hi &= ~0x01;
+ _wrmsr(DIVIL_MSR_REG(offset), hi, lo);
+ }
+}
+
+/*
+ * BAR write: write value to the n BAR
+ */
+
+void pci_isa_write_bar(int n, u32 value)
+{
+ u32 hi = 0, lo = value;
+
+ if (value == PCI_BAR_RANGE_MASK) {
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ lo |= soft_bar_flag[n];
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else if (value & 0x01) {
+ /* NATIVE reg */
+ hi = 0x0000f001;
+ lo &= bar_space_range[n];
+ _wrmsr(divil_msr_reg[n], hi, lo);
+
+ /* RCONFx is 4bytes in units for I/O space */
+ hi = ((value & 0x000ffffc) << 12) |
+ ((bar_space_len[n] - 4) << 12) | 0x01;
+ lo = ((value & 0x000ffffc) << 12) | 0x01;
+ _wrmsr(sb_msr_reg[n], hi, lo);
+ }
+}
+
+/*
+ * BAR read: read the n BAR
+ */
+
+u32 pci_isa_read_bar(int n)
+{
+ u32 conf_data = 0;
+ u32 hi, lo;
+
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ if (lo & soft_bar_flag[n]) {
+ conf_data = bar_space_range[n] | PCI_BASE_ADDRESS_SPACE_IO;
+ lo &= ~soft_bar_flag[n];
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else {
+ _rdmsr(divil_msr_reg[n], &hi, &lo);
+ conf_data = lo & bar_space_range[n];
+ conf_data |= 0x01;
+ conf_data &= ~0x02;
+ }
+ return conf_data;
+}
+
+/*
+ * isa_write: ISA write transfer
+ *
+ * We assume that this is not a bus master transfer.
+ */
+void pci_isa_write_reg(int reg, u32 value)
+{
+ u32 hi = 0, lo = value;
+ u32 temp;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ if (value & PCI_COMMAND_IO)
+ divil_lbar_enable();
+ else
+ divil_lbar_disable();
+ break;
+ case PCI_STATUS:
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ temp = lo & 0x0000ffff;
+ if ((value & PCI_STATUS_SIG_TARGET_ABORT) &&
+ (lo & SB_TAS_ERR_EN))
+ temp |= SB_TAS_ERR_FLAG;
+
+ if ((value & PCI_STATUS_REC_TARGET_ABORT) &&
+ (lo & SB_TAR_ERR_EN))
+ temp |= SB_TAR_ERR_FLAG;
+
+ if ((value & PCI_STATUS_REC_MASTER_ABORT)
+ && (lo & SB_MAR_ERR_EN))
+ temp |= SB_MAR_ERR_FLAG;
+
+ if ((value & PCI_STATUS_DETECTED_PARITY)
+ && (lo & SB_PARE_ERR_EN))
+ temp |= SB_PARE_ERR_FLAG;
+
+ lo = temp;
+ _wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
+ break;
+ case PCI_CACHE_LINE_SIZE:
+ value &= 0x0000ff00;
+ _rdmsr(SB_MSR_REG(SB_CTRL), &hi, &lo);
+ hi &= 0xffffff00;
+ hi |= (value >> 8);
+ _wrmsr(SB_MSR_REG(SB_CTRL), hi, lo);
+ break;
+ case PCI_BAR0_REG:
+ pci_isa_write_bar(0, value);
+ break;
+ case PCI_BAR1_REG:
+ pci_isa_write_bar(1, value);
+ break;
+ case PCI_BAR2_REG:
+ pci_isa_write_bar(2, value);
+ break;
+ case PCI_BAR3_REG:
+ pci_isa_write_bar(3, value);
+ break;
+ case PCI_BAR4_REG:
+ pci_isa_write_bar(4, value);
+ break;
+ case PCI_BAR5_REG:
+ pci_isa_write_bar(5, value);
+ break;
+ case PCI_UART1_INT_REG:
+ _rdmsr(DIVIL_MSR_REG(PIC_YSEL_HIGH), &hi, &lo);
+ /* disable uart1 interrupt in PIC */
+ lo &= ~(0xf << 24);
+ if (value) /* enable uart1 interrupt in PIC */
+ lo |= (CS5536_UART1_INTR << 24);
+ _wrmsr(DIVIL_MSR_REG(PIC_YSEL_HIGH), hi, lo);
+ break;
+ case PCI_UART2_INT_REG:
+ _rdmsr(DIVIL_MSR_REG(PIC_YSEL_HIGH), &hi, &lo);
+ /* disable uart2 interrupt in PIC */
+ lo &= ~(0xf << 28);
+ if (value) /* enable uart2 interrupt in PIC */
+ lo |= (CS5536_UART2_INTR << 28);
+ _wrmsr(DIVIL_MSR_REG(PIC_YSEL_HIGH), hi, lo);
+ break;
+ case PCI_ISA_FIXUP_REG:
+ if (value) {
+ /* enable the TARGET ABORT/MASTER ABORT etc. */
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ lo |= 0x00000063;
+ _wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
+ }
+
+ default:
+ /* ALL OTHER PCI CONFIG SPACE HEADER IS NOT IMPLEMENTED. */
+ break;
+ }
+}
+
+/*
+ * isa_read: ISA read transfers
+ *
+ * We assume that this is not a bus master transfer.
+ */
+u32 pci_isa_read_reg(int reg)
+{
+ u32 conf_data = 0;
+ u32 hi, lo;
+
+ switch (reg) {
+ case PCI_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_ISA_DEVICE_ID, CS5536_VENDOR_ID);
+ break;
+ case PCI_COMMAND:
+ /* we just check the first LBAR for the IO enable bit, */
+ /* maybe we should changed later. */
+ _rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_SMB), &hi, &lo);
+ if (hi & 0x01)
+ conf_data |= PCI_COMMAND_IO;
+ break;
+ case PCI_STATUS:
+ conf_data |= PCI_STATUS_66MHZ;
+ conf_data |= PCI_STATUS_DEVSEL_MEDIUM;
+ conf_data |= PCI_STATUS_FAST_BACK;
+
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_TAS_ERR_FLAG)
+ conf_data |= PCI_STATUS_SIG_TARGET_ABORT;
+ if (lo & SB_TAR_ERR_FLAG)
+ conf_data |= PCI_STATUS_REC_TARGET_ABORT;
+ if (lo & SB_MAR_ERR_FLAG)
+ conf_data |= PCI_STATUS_REC_MASTER_ABORT;
+ if (lo & SB_PARE_ERR_FLAG)
+ conf_data |= PCI_STATUS_DETECTED_PARITY;
+ break;
+ case PCI_CLASS_REVISION:
+ _rdmsr(GLCP_MSR_REG(GLCP_CHIP_REV_ID), &hi, &lo);
+ conf_data = lo & 0x000000ff;
+ conf_data |= (CS5536_ISA_CLASS_CODE << 8);
+ break;
+ case PCI_CACHE_LINE_SIZE:
+ _rdmsr(SB_MSR_REG(SB_CTRL), &hi, &lo);
+ hi &= 0x000000f8;
+ conf_data = CFG_PCI_CACHE_LINE_SIZE(PCI_BRIDGE_HEADER_TYPE, hi);
+ break;
+ /*
+ * we only use the LBAR of DIVIL, no RCONF used.
+ * all of them are IO space.
+ */
+ case PCI_BAR0_REG:
+ return pci_isa_read_bar(0);
+ break;
+ case PCI_BAR1_REG:
+ return pci_isa_read_bar(1);
+ break;
+ case PCI_BAR2_REG:
+ return pci_isa_read_bar(2);
+ break;
+ case PCI_BAR3_REG:
+ break;
+ case PCI_BAR4_REG:
+ return pci_isa_read_bar(4);
+ break;
+ case PCI_BAR5_REG:
+ return pci_isa_read_bar(5);
+ break;
+ case PCI_CARDBUS_CIS:
+ conf_data = PCI_CARDBUS_CIS_POINTER;
+ break;
+ case PCI_SUBSYSTEM_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_ISA_SUB_ID, CS5536_SUB_VENDOR_ID);
+ break;
+ case PCI_ROM_ADDRESS:
+ conf_data = PCI_EXPANSION_ROM_BAR;
+ break;
+ case PCI_CAPABILITY_LIST:
+ conf_data = PCI_CAPLIST_POINTER;
+ break;
+ case PCI_INTERRUPT_LINE:
+ /* no interrupt used here */
+ conf_data = CFG_PCI_INTERRUPT_LINE(0x00, 0x00);
+ break;
+ default:
+ break;
+ }
+
+ return conf_data;
+}
+
+/*
+ * The mfgpt timer interrupt is running early, so we must keep the south bridge
+ * mmio always enabled. Otherwise we may race with the PCI configuration which
+ * may temporarily disable it. When that happens and the timer interrupt fires,
+ * we are not able to clear it and the system will hang.
+ */
+static void cs5536_isa_mmio_always_on(struct pci_dev *dev)
+{
+ dev->mmio_always_on = 1;
+}
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA,
+ PCI_CLASS_BRIDGE_ISA, 8, cs5536_isa_mmio_always_on);
diff --git a/arch/mips/loongson2ef/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson2ef/common/cs5536/cs5536_mfgpt.c
new file mode 100644
index 000000000..f21a540a1
--- /dev/null
+++ b/arch/mips/loongson2ef/common/cs5536/cs5536_mfgpt.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * CS5536 General timer functions
+ *
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
+ * Author: Yanhua, yanh@lemote.com
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu zhangjin, wuzhangjin@gmail.com
+ *
+ * Reference: AMD Geode(TM) CS5536 Companion Device Data Book
+ */
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+
+#include <asm/time.h>
+
+#include <cs5536/cs5536_mfgpt.h>
+
+static DEFINE_RAW_SPINLOCK(mfgpt_lock);
+
+static u32 mfgpt_base;
+
+/*
+ * Initialize the MFGPT timer.
+ *
+ * This is also called after resume to bring the MFGPT into operation again.
+ */
+
+/* disable counter */
+void disable_mfgpt0_counter(void)
+{
+ outw(inw(MFGPT0_SETUP) & 0x7fff, MFGPT0_SETUP);
+}
+EXPORT_SYMBOL(disable_mfgpt0_counter);
+
+/* enable counter, comparator2 to event mode, 14.318MHz clock */
+void enable_mfgpt0_counter(void)
+{
+ outw(0xe310, MFGPT0_SETUP);
+}
+EXPORT_SYMBOL(enable_mfgpt0_counter);
+
+static int mfgpt_timer_set_periodic(struct clock_event_device *evt)
+{
+ raw_spin_lock(&mfgpt_lock);
+
+ outw(COMPARE, MFGPT0_CMP2); /* set comparator2 */
+ outw(0, MFGPT0_CNT); /* set counter to 0 */
+ enable_mfgpt0_counter();
+
+ raw_spin_unlock(&mfgpt_lock);
+ return 0;
+}
+
+static int mfgpt_timer_shutdown(struct clock_event_device *evt)
+{
+ if (clockevent_state_periodic(evt) || clockevent_state_oneshot(evt)) {
+ raw_spin_lock(&mfgpt_lock);
+ disable_mfgpt0_counter();
+ raw_spin_unlock(&mfgpt_lock);
+ }
+
+ return 0;
+}
+
+static struct clock_event_device mfgpt_clockevent = {
+ .name = "mfgpt",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+
+ /* The oneshot mode have very high deviation, don't use it! */
+ .set_state_shutdown = mfgpt_timer_shutdown,
+ .set_state_periodic = mfgpt_timer_set_periodic,
+ .irq = CS5536_MFGPT_INTR,
+};
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ u32 basehi;
+
+ /*
+ * get MFGPT base address
+ *
+ * NOTE: do not remove me, it's need for the value of mfgpt_base is
+ * variable
+ */
+ _rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_MFGPT), &basehi, &mfgpt_base);
+
+ /* ack */
+ outw(inw(MFGPT0_SETUP) | 0x4000, MFGPT0_SETUP);
+
+ mfgpt_clockevent.event_handler(&mfgpt_clockevent);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Initialize the conversion factor and the min/max deltas of the clock event
+ * structure and register the clock event source with the framework.
+ */
+void __init setup_mfgpt0_timer(void)
+{
+ u32 basehi;
+ struct clock_event_device *cd = &mfgpt_clockevent;
+ unsigned int cpu = smp_processor_id();
+
+ cd->cpumask = cpumask_of(cpu);
+ clockevent_set_clock(cd, MFGPT_TICK_RATE);
+ cd->max_delta_ns = clockevent_delta2ns(0xffff, cd);
+ cd->max_delta_ticks = 0xffff;
+ cd->min_delta_ns = clockevent_delta2ns(0xf, cd);
+ cd->min_delta_ticks = 0xf;
+
+ /* Enable MFGPT0 Comparator 2 Output to the Interrupt Mapper */
+ _wrmsr(DIVIL_MSR_REG(MFGPT_IRQ), 0, 0x100);
+
+ /* Enable Interrupt Gate 5 */
+ _wrmsr(DIVIL_MSR_REG(PIC_ZSEL_LOW), 0, 0x50000);
+
+ /* get MFGPT base address */
+ _rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_MFGPT), &basehi, &mfgpt_base);
+
+ clockevents_register_device(cd);
+
+ if (request_irq(CS5536_MFGPT_INTR, timer_interrupt,
+ IRQF_NOBALANCING | IRQF_TIMER, "timer", NULL))
+ pr_err("Failed to register timer interrupt\n");
+}
+
+/*
+ * Since the MFGPT overflows every tick, its not very useful
+ * to just read by itself. So use jiffies to emulate a free
+ * running counter:
+ */
+static u64 mfgpt_read(struct clocksource *cs)
+{
+ unsigned long flags;
+ int count;
+ u32 jifs;
+ static int old_count;
+ static u32 old_jifs;
+
+ raw_spin_lock_irqsave(&mfgpt_lock, flags);
+ /*
+ * Although our caller may have the read side of xtime_lock,
+ * this is now a seqlock, and we are cheating in this routine
+ * by having side effects on state that we cannot undo if
+ * there is a collision on the seqlock and our caller has to
+ * retry. (Namely, old_jifs and old_count.) So we must treat
+ * jiffies as volatile despite the lock. We read jiffies
+ * before latching the timer count to guarantee that although
+ * the jiffies value might be older than the count (that is,
+ * the counter may underflow between the last point where
+ * jiffies was incremented and the point where we latch the
+ * count), it cannot be newer.
+ */
+ jifs = jiffies;
+ /* read the count */
+ count = inw(MFGPT0_CNT);
+
+ /*
+ * It's possible for count to appear to go the wrong way for this
+ * reason:
+ *
+ * The timer counter underflows, but we haven't handled the resulting
+ * interrupt and incremented jiffies yet.
+ *
+ * Previous attempts to handle these cases intelligently were buggy, so
+ * we just do the simple thing now.
+ */
+ if (count < old_count && jifs == old_jifs)
+ count = old_count;
+
+ old_count = count;
+ old_jifs = jifs;
+
+ raw_spin_unlock_irqrestore(&mfgpt_lock, flags);
+
+ return (u64) (jifs * COMPARE) + count;
+}
+
+static struct clocksource clocksource_mfgpt = {
+ .name = "mfgpt",
+ .rating = 120, /* Functional for real use, but not desired */
+ .read = mfgpt_read,
+ .mask = CLOCKSOURCE_MASK(32),
+};
+
+int __init init_mfgpt_clocksource(void)
+{
+ if (num_possible_cpus() > 1) /* MFGPT does not scale! */
+ return 0;
+
+ return clocksource_register_hz(&clocksource_mfgpt, MFGPT_TICK_RATE);
+}
+
+arch_initcall(init_mfgpt_clocksource);
diff --git a/arch/mips/loongson2ef/common/cs5536/cs5536_ohci.c b/arch/mips/loongson2ef/common/cs5536/cs5536_ohci.c
new file mode 100644
index 000000000..71a52b120
--- /dev/null
+++ b/arch/mips/loongson2ef/common/cs5536/cs5536_ohci.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * the OHCI Virtual Support Module of AMD CS5536
+ *
+ * Copyright (C) 2007 Lemote, Inc.
+ * Author : jlliu, liujl@lemote.com
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+
+#include <cs5536/cs5536.h>
+#include <cs5536/cs5536_pci.h>
+
+void pci_ohci_write_reg(int reg, u32 value)
+{
+ u32 hi = 0, lo = value;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ _rdmsr(USB_MSR_REG(USB_OHCI), &hi, &lo);
+ if (value & PCI_COMMAND_MASTER)
+ hi |= PCI_COMMAND_MASTER;
+ else
+ hi &= ~PCI_COMMAND_MASTER;
+
+ if (value & PCI_COMMAND_MEMORY)
+ hi |= PCI_COMMAND_MEMORY;
+ else
+ hi &= ~PCI_COMMAND_MEMORY;
+ _wrmsr(USB_MSR_REG(USB_OHCI), hi, lo);
+ break;
+ case PCI_STATUS:
+ if (value & PCI_STATUS_PARITY) {
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_PARE_ERR_FLAG) {
+ lo = (lo & 0x0000ffff) | SB_PARE_ERR_FLAG;
+ _wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
+ }
+ }
+ break;
+ case PCI_BAR0_REG:
+ if (value == PCI_BAR_RANGE_MASK) {
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ lo |= SOFT_BAR_OHCI_FLAG;
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else if ((value & 0x01) == 0x00) {
+ _rdmsr(USB_MSR_REG(USB_OHCI), &hi, &lo);
+ lo = value;
+ _wrmsr(USB_MSR_REG(USB_OHCI), hi, lo);
+
+ value &= 0xfffffff0;
+ hi = 0x40000000 | ((value & 0xff000000) >> 24);
+ lo = 0x000fffff | ((value & 0x00fff000) << 8);
+ _wrmsr(GLIU_MSR_REG(GLIU_P2D_BM3), hi, lo);
+ }
+ break;
+ case PCI_OHCI_INT_REG:
+ _rdmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), &hi, &lo);
+ lo &= ~(0xf << PIC_YSEL_LOW_USB_SHIFT);
+ if (value) /* enable all the usb interrupt in PIC */
+ lo |= (CS5536_USB_INTR << PIC_YSEL_LOW_USB_SHIFT);
+ _wrmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), hi, lo);
+ break;
+ default:
+ break;
+ }
+}
+
+u32 pci_ohci_read_reg(int reg)
+{
+ u32 conf_data = 0;
+ u32 hi, lo;
+
+ switch (reg) {
+ case PCI_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_OHCI_DEVICE_ID, CS5536_VENDOR_ID);
+ break;
+ case PCI_COMMAND:
+ _rdmsr(USB_MSR_REG(USB_OHCI), &hi, &lo);
+ if (hi & PCI_COMMAND_MASTER)
+ conf_data |= PCI_COMMAND_MASTER;
+ if (hi & PCI_COMMAND_MEMORY)
+ conf_data |= PCI_COMMAND_MEMORY;
+ break;
+ case PCI_STATUS:
+ conf_data |= PCI_STATUS_66MHZ;
+ conf_data |= PCI_STATUS_FAST_BACK;
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_PARE_ERR_FLAG)
+ conf_data |= PCI_STATUS_PARITY;
+ conf_data |= PCI_STATUS_DEVSEL_MEDIUM;
+ break;
+ case PCI_CLASS_REVISION:
+ _rdmsr(USB_MSR_REG(USB_CAP), &hi, &lo);
+ conf_data = lo & 0x000000ff;
+ conf_data |= (CS5536_OHCI_CLASS_CODE << 8);
+ break;
+ case PCI_CACHE_LINE_SIZE:
+ conf_data =
+ CFG_PCI_CACHE_LINE_SIZE(PCI_NORMAL_HEADER_TYPE,
+ PCI_NORMAL_LATENCY_TIMER);
+ break;
+ case PCI_BAR0_REG:
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ if (lo & SOFT_BAR_OHCI_FLAG) {
+ conf_data = CS5536_OHCI_RANGE |
+ PCI_BASE_ADDRESS_SPACE_MEMORY;
+ lo &= ~SOFT_BAR_OHCI_FLAG;
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else {
+ _rdmsr(USB_MSR_REG(USB_OHCI), &hi, &lo);
+ conf_data = lo & 0xffffff00;
+ conf_data &= ~0x0000000f; /* 32bit mem */
+ }
+ break;
+ case PCI_CARDBUS_CIS:
+ conf_data = PCI_CARDBUS_CIS_POINTER;
+ break;
+ case PCI_SUBSYSTEM_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_OHCI_SUB_ID, CS5536_SUB_VENDOR_ID);
+ break;
+ case PCI_ROM_ADDRESS:
+ conf_data = PCI_EXPANSION_ROM_BAR;
+ break;
+ case PCI_CAPABILITY_LIST:
+ conf_data = PCI_CAPLIST_USB_POINTER;
+ break;
+ case PCI_INTERRUPT_LINE:
+ conf_data =
+ CFG_PCI_INTERRUPT_LINE(PCI_DEFAULT_PIN, CS5536_USB_INTR);
+ break;
+ case PCI_OHCI_INT_REG:
+ _rdmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), &hi, &lo);
+ if (((lo >> PIC_YSEL_LOW_USB_SHIFT) & 0xf) == CS5536_USB_INTR)
+ conf_data = 1;
+ break;
+ default:
+ break;
+ }
+
+ return conf_data;
+}
diff --git a/arch/mips/loongson2ef/common/cs5536/cs5536_pci.c b/arch/mips/loongson2ef/common/cs5536/cs5536_pci.c
new file mode 100644
index 000000000..202c89b56
--- /dev/null
+++ b/arch/mips/loongson2ef/common/cs5536/cs5536_pci.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * read/write operation to the PCI config space of CS5536
+ *
+ * Copyright (C) 2007 Lemote, Inc.
+ * Author : jlliu, liujl@lemote.com
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ *
+ * the Virtual Support Module(VSM) for virtulizing the PCI
+ * configure space are defined in cs5536_modulename.c respectively,
+ *
+ * after this virtulizing, user can access the PCI configure space
+ * directly as a normal multi-function PCI device which follows
+ * the PCI-2.2 spec.
+ */
+
+#include <linux/types.h>
+#include <cs5536/cs5536_pci.h>
+#include <cs5536/cs5536_vsm.h>
+
+enum {
+ CS5536_FUNC_START = -1,
+ CS5536_ISA_FUNC,
+ reserved_func,
+ CS5536_IDE_FUNC,
+ CS5536_ACC_FUNC,
+ CS5536_OHCI_FUNC,
+ CS5536_EHCI_FUNC,
+ CS5536_FUNC_END,
+};
+
+static const cs5536_pci_vsm_write vsm_conf_write[] = {
+ [CS5536_ISA_FUNC] = pci_isa_write_reg,
+ [reserved_func] = NULL,
+ [CS5536_IDE_FUNC] = pci_ide_write_reg,
+ [CS5536_ACC_FUNC] = pci_acc_write_reg,
+ [CS5536_OHCI_FUNC] = pci_ohci_write_reg,
+ [CS5536_EHCI_FUNC] = pci_ehci_write_reg,
+};
+
+static const cs5536_pci_vsm_read vsm_conf_read[] = {
+ [CS5536_ISA_FUNC] = pci_isa_read_reg,
+ [reserved_func] = NULL,
+ [CS5536_IDE_FUNC] = pci_ide_read_reg,
+ [CS5536_ACC_FUNC] = pci_acc_read_reg,
+ [CS5536_OHCI_FUNC] = pci_ohci_read_reg,
+ [CS5536_EHCI_FUNC] = pci_ehci_read_reg,
+};
+
+/*
+ * write to PCI config space and transfer it to MSR write.
+ */
+void cs5536_pci_conf_write4(int function, int reg, u32 value)
+{
+ if ((function <= CS5536_FUNC_START) || (function >= CS5536_FUNC_END))
+ return;
+ if ((reg < 0) || (reg > 0x100) || ((reg & 0x03) != 0))
+ return;
+
+ if (vsm_conf_write[function] != NULL)
+ vsm_conf_write[function](reg, value);
+}
+
+/*
+ * read PCI config space and transfer it to MSR access.
+ */
+u32 cs5536_pci_conf_read4(int function, int reg)
+{
+ u32 data = 0;
+
+ if ((function <= CS5536_FUNC_START) || (function >= CS5536_FUNC_END))
+ return 0;
+ if ((reg < 0) || ((reg & 0x03) != 0))
+ return 0;
+ if (reg > 0x100)
+ return 0xffffffff;
+
+ if (vsm_conf_read[function] != NULL)
+ data = vsm_conf_read[function](reg);
+
+ return data;
+}
diff --git a/arch/mips/loongson2ef/common/env.c b/arch/mips/loongson2ef/common/env.c
new file mode 100644
index 000000000..6f20bdf9b
--- /dev/null
+++ b/arch/mips/loongson2ef/common/env.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Based on Ocelot Linux port, which is
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: jsun@mvista.com or jsun@junsun.net
+ *
+ * Copyright 2003 ICT CAS
+ * Author: Michael Guo <guoyi@ict.ac.cn>
+ *
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+#include <linux/export.h>
+#include <asm/bootinfo.h>
+#include <asm/fw/fw.h>
+#include <loongson.h>
+
+u32 cpu_clock_freq;
+EXPORT_SYMBOL(cpu_clock_freq);
+
+void __init prom_init_env(void)
+{
+ /* pmon passes arguments in 32bit pointers */
+ unsigned int processor_id;
+
+ cpu_clock_freq = fw_getenvl("cpuclock");
+ memsize = fw_getenvl("memsize");
+ highmemsize = fw_getenvl("highmemsize");
+
+ if (memsize == 0)
+ memsize = 256;
+
+ pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize);
+
+ if (cpu_clock_freq == 0) {
+ processor_id = (&current_cpu_data)->processor_id;
+ switch (processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON2E:
+ cpu_clock_freq = 533080000;
+ break;
+ case PRID_REV_LOONGSON2F:
+ cpu_clock_freq = 797000000;
+ break;
+ default:
+ cpu_clock_freq = 100000000;
+ break;
+ }
+ }
+ pr_info("CpuClock = %u\n", cpu_clock_freq);
+}
diff --git a/arch/mips/loongson2ef/common/init.c b/arch/mips/loongson2ef/common/init.c
new file mode 100644
index 000000000..ce3f02f75
--- /dev/null
+++ b/arch/mips/loongson2ef/common/init.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+
+#include <linux/memblock.h>
+#include <asm/bootinfo.h>
+#include <asm/traps.h>
+#include <asm/smp-ops.h>
+#include <asm/cacheflush.h>
+#include <asm/fw/fw.h>
+
+#include <loongson.h>
+
+/* Loongson CPU address windows config space base address */
+unsigned long __maybe_unused _loongson_addrwincfg_base;
+
+static void __init mips_nmi_setup(void)
+{
+ void *base;
+ extern char except_vec_nmi[];
+
+ base = (void *)(CAC_BASE + 0x380);
+ memcpy(base, except_vec_nmi, 0x80);
+ flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
+}
+
+void __init prom_init(void)
+{
+#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
+ _loongson_addrwincfg_base = (unsigned long)
+ ioremap(LOONGSON_ADDRWINCFG_BASE, LOONGSON_ADDRWINCFG_SIZE);
+#endif
+
+ fw_init_cmdline();
+ prom_init_machtype();
+ prom_init_env();
+
+ /* init base address of io space */
+ set_io_port_base((unsigned long)
+ ioremap(LOONGSON_PCIIO_BASE, LOONGSON_PCIIO_SIZE));
+ prom_init_memory();
+
+ /*init the uart base address */
+ prom_init_uart_base();
+ board_nmi_handler_setup = mips_nmi_setup;
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/loongson2ef/common/irq.c b/arch/mips/loongson2ef/common/irq.c
new file mode 100644
index 000000000..0ea93c1c0
--- /dev/null
+++ b/arch/mips/loongson2ef/common/irq.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <loongson.h>
+/*
+ * the first level int-handler will jump here if it is a bonito irq
+ */
+void bonito_irqdispatch(void)
+{
+ u32 int_status;
+ int i;
+
+ /* workaround the IO dma problem: let cpu looping to allow DMA finish */
+ int_status = LOONGSON_INTISR;
+ while (int_status & (1 << 10)) {
+ udelay(1);
+ int_status = LOONGSON_INTISR;
+ }
+
+ /* Get pending sources, masked by current enables */
+ int_status = LOONGSON_INTISR & LOONGSON_INTEN;
+
+ if (int_status) {
+ i = __ffs(int_status);
+ do_IRQ(LOONGSON_IRQ_BASE + i);
+ }
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned int pending;
+
+ pending = read_c0_cause() & read_c0_status() & ST0_IM;
+
+ /* machine-specific plat_irq_dispatch */
+ mach_irq_dispatch(pending);
+}
+
+void __init arch_init_irq(void)
+{
+ /*
+ * Clear all of the interrupts while we change the able around a bit.
+ * int-handler is not on bootstrap
+ */
+ clear_c0_status(ST0_IM | ST0_BEV);
+
+ /* no steer */
+ LOONGSON_INTSTEER = 0;
+
+ /*
+ * Mask out all interrupt by writing "1" to all bit position in
+ * the interrupt reset reg.
+ */
+ LOONGSON_INTENCLR = ~0;
+
+ /* machine specific irq init */
+ mach_init_irq();
+}
diff --git a/arch/mips/loongson2ef/common/machtype.c b/arch/mips/loongson2ef/common/machtype.c
new file mode 100644
index 000000000..82f6de49f
--- /dev/null
+++ b/arch/mips/loongson2ef/common/machtype.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ *
+ * Copyright (c) 2009 Zhang Le <r0bertz@gentoo.org>
+ */
+#include <linux/errno.h>
+#include <asm/bootinfo.h>
+
+#include <loongson.h>
+#include <machine.h>
+
+/* please ensure the length of the machtype string is less than 50 */
+#define MACHTYPE_LEN 50
+
+static const char *system_types[] = {
+ [MACH_LOONGSON_UNKNOWN] = "unknown loongson machine",
+ [MACH_LEMOTE_FL2E] = "lemote-fuloong-2e-box",
+ [MACH_LEMOTE_FL2F] = "lemote-fuloong-2f-box",
+ [MACH_LEMOTE_ML2F7] = "lemote-mengloong-2f-7inches",
+ [MACH_LEMOTE_YL2F89] = "lemote-yeeloong-2f-8.9inches",
+ [MACH_DEXXON_GDIUM2F10] = "dexxon-gdium-2f",
+ [MACH_LEMOTE_NAS] = "lemote-nas-2f",
+ [MACH_LEMOTE_LL2F] = "lemote-lynloong-2f",
+ [MACH_LOONGSON_END] = NULL,
+};
+
+const char *get_system_type(void)
+{
+ return system_types[mips_machtype];
+}
+
+void __weak __init mach_prom_init_machtype(void)
+{
+}
+
+void __init prom_init_machtype(void)
+{
+ char *p, str[MACHTYPE_LEN + 1];
+ int machtype = MACH_LEMOTE_FL2E;
+
+ mips_machtype = LOONGSON_MACHTYPE;
+
+ p = strstr(arcs_cmdline, "machtype=");
+ if (!p) {
+ mach_prom_init_machtype();
+ return;
+ }
+ p += strlen("machtype=");
+ strncpy(str, p, MACHTYPE_LEN);
+ str[MACHTYPE_LEN] = '\0';
+ p = strstr(str, " ");
+ if (p)
+ *p = '\0';
+
+ for (; system_types[machtype]; machtype++)
+ if (strstr(system_types[machtype], str)) {
+ mips_machtype = machtype;
+ break;
+ }
+}
diff --git a/arch/mips/loongson2ef/common/mem.c b/arch/mips/loongson2ef/common/mem.c
new file mode 100644
index 000000000..057d58bb4
--- /dev/null
+++ b/arch/mips/loongson2ef/common/mem.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ */
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/memblock.h>
+#include <linux/mm.h>
+
+#include <asm/bootinfo.h>
+
+#include <loongson.h>
+#include <mem.h>
+#include <pci.h>
+
+
+u32 memsize, highmemsize;
+
+void __init prom_init_memory(void)
+{
+ memblock_add(0x0, (memsize << 20));
+
+#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
+ {
+ int bit;
+
+ bit = fls(memsize + highmemsize);
+ if (bit != ffs(memsize + highmemsize))
+ bit += 20;
+ else
+ bit = bit + 20 - 1;
+
+ /* set cpu window3 to map CPU to DDR: 2G -> 2G */
+ LOONGSON_ADDRWIN_CPUTODDR(ADDRWIN_WIN3, 0x80000000ul,
+ 0x80000000ul, (1 << bit));
+ mmiowb();
+ }
+#endif /* !CONFIG_CPU_SUPPORTS_ADDRWINCFG */
+
+#ifdef CONFIG_64BIT
+ if (highmemsize > 0)
+ memblock_add(LOONGSON_HIGHMEM_START, highmemsize << 20);
+#endif /* !CONFIG_64BIT */
+}
+
+/* override of arch/mips/mm/cache.c: __uncached_access */
+int __uncached_access(struct file *file, unsigned long addr)
+{
+ if (file->f_flags & O_DSYNC)
+ return 1;
+
+ return addr >= __pa(high_memory) ||
+ ((addr >= LOONGSON_MMIO_MEM_START) &&
+ (addr < LOONGSON_MMIO_MEM_END));
+}
diff --git a/arch/mips/loongson2ef/common/pci.c b/arch/mips/loongson2ef/common/pci.c
new file mode 100644
index 000000000..200916925
--- /dev/null
+++ b/arch/mips/loongson2ef/common/pci.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ */
+#include <linux/pci.h>
+
+#include <pci.h>
+#include <loongson.h>
+
+static struct resource loongson_pci_mem_resource = {
+ .name = "pci memory space",
+ .start = LOONGSON_PCI_MEM_START,
+ .end = LOONGSON_PCI_MEM_END,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource loongson_pci_io_resource = {
+ .name = "pci io space",
+ .start = LOONGSON_PCI_IO_START,
+ .end = IO_SPACE_LIMIT,
+ .flags = IORESOURCE_IO,
+};
+
+static struct pci_controller loongson_pci_controller = {
+ .pci_ops = &loongson_pci_ops,
+ .io_resource = &loongson_pci_io_resource,
+ .mem_resource = &loongson_pci_mem_resource,
+ .mem_offset = 0x00000000UL,
+ .io_offset = 0x00000000UL,
+};
+
+static void __init setup_pcimap(void)
+{
+ /*
+ * local to PCI mapping for CPU accessing PCI space
+ * CPU address space [256M,448M] is window for accessing pci space
+ * we set pcimap_lo[0,1,2] to map it to pci space[0M,64M], [320M,448M]
+ *
+ * pcimap: PCI_MAP2 PCI_Mem_Lo2 PCI_Mem_Lo1 PCI_Mem_Lo0
+ * [<2G] [384M,448M] [320M,384M] [0M,64M]
+ */
+ LOONGSON_PCIMAP = LOONGSON_PCIMAP_PCIMAP_2 |
+ LOONGSON_PCIMAP_WIN(2, LOONGSON_PCILO2_BASE) |
+ LOONGSON_PCIMAP_WIN(1, LOONGSON_PCILO1_BASE) |
+ LOONGSON_PCIMAP_WIN(0, 0);
+
+ /*
+ * PCI-DMA to local mapping: [2G,2G+256M] -> [0M,256M]
+ */
+ LOONGSON_PCIBASE0 = 0x80000000ul; /* base: 2G -> mmap: 0M */
+ /* size: 256M, burst transmission, pre-fetch enable, 64bit */
+ LOONGSON_PCI_HIT0_SEL_L = 0xc000000cul;
+ LOONGSON_PCI_HIT0_SEL_H = 0xfffffffful;
+ LOONGSON_PCI_HIT1_SEL_L = 0x00000006ul; /* set this BAR as invalid */
+ LOONGSON_PCI_HIT1_SEL_H = 0x00000000ul;
+ LOONGSON_PCI_HIT2_SEL_L = 0x00000006ul; /* set this BAR as invalid */
+ LOONGSON_PCI_HIT2_SEL_H = 0x00000000ul;
+
+ /* avoid deadlock of PCI reading/writing lock operation */
+ LOONGSON_PCI_ISR4C = 0xd2000001ul;
+
+ /* can not change gnt to break pci transfer when device's gnt not
+ deassert for some broken device */
+ LOONGSON_PXARB_CFG = 0x00fe0105ul;
+
+#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
+ /*
+ * set cpu addr window2 to map CPU address space to PCI address space
+ */
+ LOONGSON_ADDRWIN_CPUTOPCI(ADDRWIN_WIN2, LOONGSON_CPU_MEM_SRC,
+ LOONGSON_PCI_MEM_DST, MMAP_CPUTOPCI_SIZE);
+#endif
+}
+
+extern int sbx00_acpi_init(void);
+
+static int __init pcibios_init(void)
+{
+ setup_pcimap();
+
+ loongson_pci_controller.io_map_base = mips_io_port_base;
+ register_pci_controller(&loongson_pci_controller);
+
+
+ return 0;
+}
+
+arch_initcall(pcibios_init);
diff --git a/arch/mips/loongson2ef/common/platform.c b/arch/mips/loongson2ef/common/platform.c
new file mode 100644
index 000000000..0084820cf
--- /dev/null
+++ b/arch/mips/loongson2ef/common/platform.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+
+#include <linux/err.h>
+#include <linux/smp.h>
+#include <linux/platform_device.h>
+
+static struct platform_device loongson2_cpufreq_device = {
+ .name = "loongson2_cpufreq",
+ .id = -1,
+};
+
+static int __init loongson2_cpufreq_init(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ /* Only 2F revision and it's successors support CPUFreq */
+ if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON2F)
+ return platform_device_register(&loongson2_cpufreq_device);
+
+ return -ENODEV;
+}
+
+arch_initcall(loongson2_cpufreq_init);
diff --git a/arch/mips/loongson2ef/common/pm.c b/arch/mips/loongson2ef/common/pm.c
new file mode 100644
index 000000000..bcb7ae977
--- /dev/null
+++ b/arch/mips/loongson2ef/common/pm.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * loongson-specific suspend support
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+#include <linux/suspend.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h>
+
+#include <asm/i8259.h>
+#include <asm/mipsregs.h>
+
+#include <loongson.h>
+
+static unsigned int __maybe_unused cached_master_mask; /* i8259A */
+static unsigned int __maybe_unused cached_slave_mask;
+static unsigned int __maybe_unused cached_bonito_irq_mask; /* bonito */
+
+void arch_suspend_disable_irqs(void)
+{
+ /* disable all mips events */
+ local_irq_disable();
+
+#ifdef CONFIG_I8259
+ /* disable all events of i8259A */
+ cached_slave_mask = inb(PIC_SLAVE_IMR);
+ cached_master_mask = inb(PIC_MASTER_IMR);
+
+ outb(0xff, PIC_SLAVE_IMR);
+ inb(PIC_SLAVE_IMR);
+ outb(0xff, PIC_MASTER_IMR);
+ inb(PIC_MASTER_IMR);
+#endif
+ /* disable all events of bonito */
+ cached_bonito_irq_mask = LOONGSON_INTEN;
+ LOONGSON_INTENCLR = 0xffff;
+ (void)LOONGSON_INTENCLR;
+}
+
+void arch_suspend_enable_irqs(void)
+{
+ /* enable all mips events */
+ local_irq_enable();
+#ifdef CONFIG_I8259
+ /* only enable the cached events of i8259A */
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
+ outb(cached_master_mask, PIC_MASTER_IMR);
+#endif
+ /* enable all cached events of bonito */
+ LOONGSON_INTENSET = cached_bonito_irq_mask;
+ (void)LOONGSON_INTENSET;
+}
+
+/*
+ * Setup the board-specific events for waking up loongson from wait mode
+ */
+void __weak setup_wakeup_events(void)
+{
+}
+
+/*
+ * Check wakeup events
+ */
+int __weak wakeup_loongson(void)
+{
+ return 1;
+}
+
+/*
+ * If the events are really what we want to wakeup the CPU, wake it up
+ * otherwise put the CPU asleep again.
+ */
+static void wait_for_wakeup_events(void)
+{
+ while (!wakeup_loongson())
+ writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG);
+}
+
+/*
+ * Stop all perf counters
+ *
+ * $24 is the control register of Loongson perf counter
+ */
+static inline void stop_perf_counters(void)
+{
+ __write_64bit_c0_register($24, 0, 0);
+}
+
+
+static void loongson_suspend_enter(void)
+{
+ unsigned int cached_cpu_freq;
+
+ /* setup wakeup events via enabling the IRQs */
+ setup_wakeup_events();
+
+ stop_perf_counters();
+
+ cached_cpu_freq = readl(LOONGSON_CHIPCFG);
+
+ /* Put CPU into wait mode */
+ writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG);
+
+ /* wait for the given events to wakeup cpu from wait mode */
+ wait_for_wakeup_events();
+
+ writel(cached_cpu_freq, LOONGSON_CHIPCFG);
+
+ mmiowb();
+}
+
+void __weak mach_suspend(void)
+{
+}
+
+void __weak mach_resume(void)
+{
+}
+
+static int loongson_pm_enter(suspend_state_t state)
+{
+ mach_suspend();
+
+ /* processor specific suspend */
+ loongson_suspend_enter();
+
+ mach_resume();
+
+ return 0;
+}
+
+static int loongson_pm_valid_state(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_ON:
+ case PM_SUSPEND_STANDBY:
+ case PM_SUSPEND_MEM:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static const struct platform_suspend_ops loongson_pm_ops = {
+ .valid = loongson_pm_valid_state,
+ .enter = loongson_pm_enter,
+};
+
+static int __init loongson_pm_init(void)
+{
+ suspend_set_ops(&loongson_pm_ops);
+
+ return 0;
+}
+arch_initcall(loongson_pm_init);
diff --git a/arch/mips/loongson2ef/common/reset.c b/arch/mips/loongson2ef/common/reset.c
new file mode 100644
index 000000000..e49c40646
--- /dev/null
+++ b/arch/mips/loongson2ef/common/reset.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *
+ * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Zhangjin Wu, wuzhangjin@gmail.com
+ */
+#include <linux/init.h>
+#include <linux/pm.h>
+
+#include <asm/idle.h>
+#include <asm/reboot.h>
+
+#include <loongson.h>
+
+static inline void loongson_reboot(void)
+{
+#ifndef CONFIG_CPU_JUMP_WORKAROUNDS
+ ((void (*)(void))ioremap(LOONGSON_BOOT_BASE, 4)) ();
+#else
+ void (*func)(void);
+
+ func = (void *)ioremap(LOONGSON_BOOT_BASE, 4);
+
+ __asm__ __volatile__(
+ " .set noat \n"
+ " jr %[func] \n"
+ " .set at \n"
+ : /* No outputs */
+ : [func] "r" (func));
+#endif
+}
+
+static void loongson_restart(char *command)
+{
+ /* do preparation for reboot */
+ mach_prepare_reboot();
+
+ /* reboot via jumping to boot base address */
+ loongson_reboot();
+}
+
+static void loongson_poweroff(void)
+{
+ mach_prepare_shutdown();
+
+ /*
+ * It needs a wait loop here, but mips/kernel/reset.c already calls
+ * a generic delay loop, machine_hang(), so simply return.
+ */
+ return;
+}
+
+static void loongson_halt(void)
+{
+ pr_notice("\n\n** You can safely turn off the power now **\n\n");
+ while (1) {
+ if (cpu_wait)
+ cpu_wait();
+ }
+}
+
+static int __init mips_reboot_setup(void)
+{
+ _machine_restart = loongson_restart;
+ _machine_halt = loongson_halt;
+ pm_power_off = loongson_poweroff;
+
+ return 0;
+}
+
+arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/loongson2ef/common/rtc.c b/arch/mips/loongson2ef/common/rtc.c
new file mode 100644
index 000000000..8d7628c0f
--- /dev/null
+++ b/arch/mips/loongson2ef/common/rtc.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Lemote Fuloong platform support
+ *
+ * Copyright(c) 2010 Arnaud Patard <apatard@mandriva.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mc146818rtc.h>
+
+static struct resource loongson_rtc_resources[] = {
+ {
+ .start = RTC_PORT(0),
+ .end = RTC_PORT(1),
+ .flags = IORESOURCE_IO,
+ }, {
+ .start = RTC_IRQ,
+ .end = RTC_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device loongson_rtc_device = {
+ .name = "rtc_cmos",
+ .id = -1,
+ .resource = loongson_rtc_resources,
+ .num_resources = ARRAY_SIZE(loongson_rtc_resources),
+};
+
+
+static int __init loongson_rtc_platform_init(void)
+{
+ platform_device_register(&loongson_rtc_device);
+ return 0;
+}
+
+device_initcall(loongson_rtc_platform_init);
diff --git a/arch/mips/loongson2ef/common/serial.c b/arch/mips/loongson2ef/common/serial.c
new file mode 100644
index 000000000..ac4f6e3eb
--- /dev/null
+++ b/arch/mips/loongson2ef/common/serial.c
@@ -0,0 +1,86 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Yan hua (yanhua@lemote.com)
+ * Author: Wu Zhangjin (wuzhangjin@gmail.com)
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/serial_8250.h>
+
+#include <asm/bootinfo.h>
+
+#include <loongson.h>
+#include <machine.h>
+
+#define PORT(int, clk) \
+{ \
+ .irq = int, \
+ .uartclk = clk, \
+ .iotype = UPIO_PORT, \
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
+ .regshift = 0, \
+}
+
+#define PORT_M(int, clk) \
+{ \
+ .irq = MIPS_CPU_IRQ_BASE + (int), \
+ .uartclk = clk, \
+ .iotype = UPIO_MEM, \
+ .membase = (void __iomem *)NULL, \
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
+ .regshift = 0, \
+}
+
+static struct plat_serial8250_port uart8250_data[MACH_LOONGSON_END + 1] = {
+ [MACH_LOONGSON_UNKNOWN] = {},
+ [MACH_LEMOTE_FL2E] = PORT(4, 1843200),
+ [MACH_LEMOTE_FL2F] = PORT(3, 1843200),
+ [MACH_LEMOTE_ML2F7] = PORT_M(3, 3686400),
+ [MACH_LEMOTE_YL2F89] = PORT_M(3, 3686400),
+ [MACH_DEXXON_GDIUM2F10] = PORT_M(3, 3686400),
+ [MACH_LEMOTE_NAS] = PORT_M(3, 3686400),
+ [MACH_LEMOTE_LL2F] = PORT(3, 1843200),
+ [MACH_LOONGSON_END] = {},
+};
+
+static struct platform_device uart8250_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+};
+
+static int __init serial_init(void)
+{
+ unsigned char iotype;
+
+ iotype = uart8250_data[mips_machtype].iotype;
+
+ if (UPIO_MEM == iotype) {
+ uart8250_data[mips_machtype].mapbase =
+ loongson_uart_base;
+ uart8250_data[mips_machtype].membase =
+ (void __iomem *)_loongson_uart_base;
+ }
+ else if (UPIO_PORT == iotype)
+ uart8250_data[mips_machtype].iobase =
+ loongson_uart_base - LOONGSON_PCIIO_BASE;
+
+ memset(&uart8250_data[mips_machtype + 1], 0,
+ sizeof(struct plat_serial8250_port));
+ uart8250_device.dev.platform_data = &uart8250_data[mips_machtype];
+
+ return platform_device_register(&uart8250_device);
+}
+module_init(serial_init);
+
+static void __exit serial_exit(void)
+{
+ platform_device_unregister(&uart8250_device);
+}
+module_exit(serial_exit);
diff --git a/arch/mips/loongson2ef/common/setup.c b/arch/mips/loongson2ef/common/setup.c
new file mode 100644
index 000000000..4fd27f4f9
--- /dev/null
+++ b/arch/mips/loongson2ef/common/setup.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ */
+#include <linux/export.h>
+#include <linux/init.h>
+
+#include <asm/wbflush.h>
+#include <asm/bootinfo.h>
+
+#include <loongson.h>
+
+static void wbflush_loongson(void)
+{
+ asm(".set\tpush\n\t"
+ ".set\tnoreorder\n\t"
+ ".set mips3\n\t"
+ "sync\n\t"
+ "nop\n\t"
+ ".set\tpop\n\t"
+ ".set mips0\n\t");
+}
+
+void (*__wbflush)(void) = wbflush_loongson;
+EXPORT_SYMBOL(__wbflush);
+
+void __init plat_mem_setup(void)
+{
+}
diff --git a/arch/mips/loongson2ef/common/time.c b/arch/mips/loongson2ef/common/time.c
new file mode 100644
index 000000000..585741af4
--- /dev/null
+++ b/arch/mips/loongson2ef/common/time.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+#include <asm/mc146818-time.h>
+#include <asm/time.h>
+#include <asm/hpet.h>
+
+#include <loongson.h>
+#include <cs5536/cs5536_mfgpt.h>
+
+void __init plat_time_init(void)
+{
+ /* setup mips r4k timer */
+ mips_hpt_frequency = cpu_clock_freq / 2;
+
+ setup_mfgpt0_timer();
+}
+
+void read_persistent_clock64(struct timespec64 *ts)
+{
+ ts->tv_sec = mc146818_get_cmos_time();
+ ts->tv_nsec = 0;
+}
diff --git a/arch/mips/loongson2ef/common/uart_base.c b/arch/mips/loongson2ef/common/uart_base.c
new file mode 100644
index 000000000..522bea6ad
--- /dev/null
+++ b/arch/mips/loongson2ef/common/uart_base.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+
+#include <linux/export.h>
+#include <asm/bootinfo.h>
+#include <asm/setup.h>
+
+#include <loongson.h>
+
+/* raw */
+unsigned long loongson_uart_base;
+/* ioremapped */
+unsigned long _loongson_uart_base;
+
+EXPORT_SYMBOL(loongson_uart_base);
+EXPORT_SYMBOL(_loongson_uart_base);
+
+void prom_init_loongson_uart_base(void)
+{
+ switch (mips_machtype) {
+ case MACH_LEMOTE_FL2E:
+ loongson_uart_base = LOONGSON_PCIIO_BASE + 0x3f8;
+ break;
+ case MACH_LEMOTE_FL2F:
+ case MACH_LEMOTE_LL2F:
+ loongson_uart_base = LOONGSON_PCIIO_BASE + 0x2f8;
+ break;
+ case MACH_LEMOTE_ML2F7:
+ case MACH_LEMOTE_YL2F89:
+ case MACH_DEXXON_GDIUM2F10:
+ case MACH_LEMOTE_NAS:
+ default:
+ /* The CPU provided serial port (LPC) */
+ loongson_uart_base = LOONGSON_LIO1_BASE + 0x3f8;
+ break;
+ }
+
+ _loongson_uart_base = TO_UNCAC(loongson_uart_base);
+ setup_8250_early_printk_port(_loongson_uart_base, 0, 1024);
+}
diff --git a/arch/mips/loongson2ef/fuloong-2e/Makefile b/arch/mips/loongson2ef/fuloong-2e/Makefile
new file mode 100644
index 000000000..bb58edb3b
--- /dev/null
+++ b/arch/mips/loongson2ef/fuloong-2e/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for Lemote Fuloong2e mini-PC board.
+#
+
+obj-y += irq.o reset.o dma.o
diff --git a/arch/mips/loongson2ef/fuloong-2e/dma.c b/arch/mips/loongson2ef/fuloong-2e/dma.c
new file mode 100644
index 000000000..cea167d8a
--- /dev/null
+++ b/arch/mips/loongson2ef/fuloong-2e/dma.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/dma-direct.h>
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr | 0x80000000;
+}
+
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr & 0x7fffffff;
+}
diff --git a/arch/mips/loongson2ef/fuloong-2e/irq.c b/arch/mips/loongson2ef/fuloong-2e/irq.c
new file mode 100644
index 000000000..305aa2eb7
--- /dev/null
+++ b/arch/mips/loongson2ef/fuloong-2e/irq.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ */
+#include <linux/interrupt.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/i8259.h>
+
+#include <loongson.h>
+
+static void i8259_irqdispatch(void)
+{
+ int irq;
+
+ irq = i8259_irq();
+ if (irq >= 0)
+ do_IRQ(irq);
+ else
+ spurious_interrupt();
+}
+
+asmlinkage void mach_irq_dispatch(unsigned int pending)
+{
+ if (pending & CAUSEF_IP7)
+ do_IRQ(MIPS_CPU_IRQ_BASE + 7);
+ else if (pending & CAUSEF_IP6) /* perf counter loverflow */
+ do_perfcnt_IRQ();
+ else if (pending & CAUSEF_IP5)
+ i8259_irqdispatch();
+ else if (pending & CAUSEF_IP2)
+ bonito_irqdispatch();
+ else
+ spurious_interrupt();
+}
+
+void __init mach_init_irq(void)
+{
+ int irq;
+
+ /* init all controller
+ * 0-15 ------> i8259 interrupt
+ * 16-23 ------> mips cpu interrupt
+ * 32-63 ------> bonito irq
+ */
+
+ /* most bonito irq should be level triggered */
+ LOONGSON_INTEDGE = LOONGSON_ICU_SYSTEMERR | LOONGSON_ICU_MASTERERR |
+ LOONGSON_ICU_RETRYERR | LOONGSON_ICU_MBOXES;
+
+ /* Sets the first-level interrupt dispatcher. */
+ mips_cpu_irq_init();
+ init_i8259_irqs();
+ bonito_irq_init();
+
+ /* bonito irq at IP2 */
+ irq = MIPS_CPU_IRQ_BASE + 2;
+ if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL))
+ pr_err("Failed to request irq %d (cascade)\n", irq);
+ /* 8259 irq at IP5 */
+ irq = MIPS_CPU_IRQ_BASE + 5;
+ if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL))
+ pr_err("Failed to request irq %d (cascade)\n", irq);
+}
diff --git a/arch/mips/loongson2ef/fuloong-2e/reset.c b/arch/mips/loongson2ef/fuloong-2e/reset.c
new file mode 100644
index 000000000..8273de1cf
--- /dev/null
+++ b/arch/mips/loongson2ef/fuloong-2e/reset.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Board-specific reboot/shutdown routines
+ * Copyright (c) 2009 Philippe Vachon <philippe@cowpig.ca>
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+
+#include <loongson.h>
+
+void mach_prepare_reboot(void)
+{
+ LOONGSON_GENCFG &= ~(1 << 2);
+ LOONGSON_GENCFG |= (1 << 2);
+}
+
+void mach_prepare_shutdown(void)
+{
+}
diff --git a/arch/mips/loongson2ef/lemote-2f/Makefile b/arch/mips/loongson2ef/lemote-2f/Makefile
new file mode 100644
index 000000000..881a0ec06
--- /dev/null
+++ b/arch/mips/loongson2ef/lemote-2f/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for lemote loongson2f family machines
+#
+
+obj-y += clock.o machtype.o irq.o reset.o dma.o ec_kb3310b.o
+
+#
+# Suspend Support
+#
+
+obj-$(CONFIG_SUSPEND) += pm.o
diff --git a/arch/mips/loongson2ef/lemote-2f/clock.c b/arch/mips/loongson2ef/lemote-2f/clock.c
new file mode 100644
index 000000000..850b6b9f8
--- /dev/null
+++ b/arch/mips/loongson2ef/lemote-2f/clock.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology
+ * Author: Yanhua, yanh@lemote.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/cpufreq.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+
+#include <asm/mach-loongson2ef/loongson.h>
+
+enum {
+ DC_ZERO, DC_25PT = 2, DC_37PT, DC_50PT, DC_62PT, DC_75PT,
+ DC_87PT, DC_DISABLE, DC_RESV
+};
+
+struct cpufreq_frequency_table loongson2_clockmod_table[] = {
+ {0, DC_RESV, CPUFREQ_ENTRY_INVALID},
+ {0, DC_ZERO, CPUFREQ_ENTRY_INVALID},
+ {0, DC_25PT, 0},
+ {0, DC_37PT, 0},
+ {0, DC_50PT, 0},
+ {0, DC_62PT, 0},
+ {0, DC_75PT, 0},
+ {0, DC_87PT, 0},
+ {0, DC_DISABLE, 0},
+ {0, DC_RESV, CPUFREQ_TABLE_END},
+};
+EXPORT_SYMBOL_GPL(loongson2_clockmod_table);
+
+int loongson2_cpu_set_rate(unsigned long rate_khz)
+{
+ struct cpufreq_frequency_table *pos;
+ int regval;
+
+ cpufreq_for_each_valid_entry(pos, loongson2_clockmod_table)
+ if (rate_khz == pos->frequency)
+ break;
+ if (rate_khz != pos->frequency)
+ return -ENOTSUPP;
+
+ regval = readl(LOONGSON_CHIPCFG);
+ regval = (regval & ~0x7) | (pos->driver_data - 1);
+ writel(regval, LOONGSON_CHIPCFG);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(loongson2_cpu_set_rate);
diff --git a/arch/mips/loongson2ef/lemote-2f/dma.c b/arch/mips/loongson2ef/lemote-2f/dma.c
new file mode 100644
index 000000000..3c9e99456
--- /dev/null
+++ b/arch/mips/loongson2ef/lemote-2f/dma.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/dma-direct.h>
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr | 0x80000000;
+}
+
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ if (dma_addr > 0x8fffffff)
+ return dma_addr;
+ return dma_addr & 0x0fffffff;
+}
diff --git a/arch/mips/loongson2ef/lemote-2f/ec_kb3310b.c b/arch/mips/loongson2ef/lemote-2f/ec_kb3310b.c
new file mode 100644
index 000000000..d138220e9
--- /dev/null
+++ b/arch/mips/loongson2ef/lemote-2f/ec_kb3310b.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Basic KB3310B Embedded Controller support for the YeeLoong 2F netbook
+ *
+ * Copyright (C) 2008 Lemote Inc.
+ * Author: liujl <liujl@lemote.com>, 2008-04-20
+ */
+
+#include <linux/io.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+
+#include "ec_kb3310b.h"
+
+static DEFINE_SPINLOCK(index_access_lock);
+static DEFINE_SPINLOCK(port_access_lock);
+
+unsigned char ec_read(unsigned short addr)
+{
+ unsigned char value;
+ unsigned long flags;
+
+ spin_lock_irqsave(&index_access_lock, flags);
+ outb((addr & 0xff00) >> 8, EC_IO_PORT_HIGH);
+ outb((addr & 0x00ff), EC_IO_PORT_LOW);
+ value = inb(EC_IO_PORT_DATA);
+ spin_unlock_irqrestore(&index_access_lock, flags);
+
+ return value;
+}
+EXPORT_SYMBOL_GPL(ec_read);
+
+void ec_write(unsigned short addr, unsigned char val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&index_access_lock, flags);
+ outb((addr & 0xff00) >> 8, EC_IO_PORT_HIGH);
+ outb((addr & 0x00ff), EC_IO_PORT_LOW);
+ outb(val, EC_IO_PORT_DATA);
+ /* flush the write action */
+ inb(EC_IO_PORT_DATA);
+ spin_unlock_irqrestore(&index_access_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ec_write);
+
+/*
+ * This function is used for EC command writes and corresponding status queries.
+ */
+int ec_query_seq(unsigned char cmd)
+{
+ int timeout;
+ unsigned char status;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&port_access_lock, flags);
+
+ /* make chip goto reset mode */
+ udelay(EC_REG_DELAY);
+ outb(cmd, EC_CMD_PORT);
+ udelay(EC_REG_DELAY);
+
+ /* check if the command is received by ec */
+ timeout = EC_CMD_TIMEOUT;
+ status = inb(EC_STS_PORT);
+ while (timeout-- && (status & (1 << 1))) {
+ status = inb(EC_STS_PORT);
+ udelay(EC_REG_DELAY);
+ }
+
+ spin_unlock_irqrestore(&port_access_lock, flags);
+
+ if (timeout <= 0) {
+ printk(KERN_ERR "%s: deadable error : timeout...\n", __func__);
+ ret = -EINVAL;
+ } else
+ printk(KERN_INFO
+ "(%x/%d)ec issued command %d status : 0x%x\n",
+ timeout, EC_CMD_TIMEOUT - timeout, cmd, status);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ec_query_seq);
+
+/*
+ * Send query command to EC to get the proper event number
+ */
+int ec_query_event_num(void)
+{
+ return ec_query_seq(CMD_GET_EVENT_NUM);
+}
+EXPORT_SYMBOL(ec_query_event_num);
+
+/*
+ * Get event number from EC
+ *
+ * NOTE: This routine must follow the query_event_num function in the
+ * interrupt.
+ */
+int ec_get_event_num(void)
+{
+ int timeout = 100;
+ unsigned char value;
+ unsigned char status;
+
+ udelay(EC_REG_DELAY);
+ status = inb(EC_STS_PORT);
+ udelay(EC_REG_DELAY);
+ while (timeout-- && !(status & (1 << 0))) {
+ status = inb(EC_STS_PORT);
+ udelay(EC_REG_DELAY);
+ }
+ if (timeout <= 0) {
+ pr_info("%s: get event number timeout.\n", __func__);
+
+ return -EINVAL;
+ }
+ value = inb(EC_DAT_PORT);
+ udelay(EC_REG_DELAY);
+
+ return value;
+}
+EXPORT_SYMBOL(ec_get_event_num);
diff --git a/arch/mips/loongson2ef/lemote-2f/ec_kb3310b.h b/arch/mips/loongson2ef/lemote-2f/ec_kb3310b.h
new file mode 100644
index 000000000..aecdbc9c8
--- /dev/null
+++ b/arch/mips/loongson2ef/lemote-2f/ec_kb3310b.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * KB3310B Embedded Controller
+ *
+ * Copyright (C) 2008 Lemote Inc.
+ * Author: liujl <liujl@lemote.com>, 2008-03-14
+ */
+
+#ifndef _EC_KB3310B_H
+#define _EC_KB3310B_H
+
+extern unsigned char ec_read(unsigned short addr);
+extern void ec_write(unsigned short addr, unsigned char val);
+extern int ec_query_seq(unsigned char cmd);
+extern int ec_query_event_num(void);
+extern int ec_get_event_num(void);
+
+typedef int (*sci_handler) (int status);
+extern sci_handler yeeloong_report_lid_status;
+
+#define SCI_IRQ_NUM 0x0A
+
+/*
+ * The following registers are determined by the EC index configuration.
+ * 1, fill the PORT_HIGH as EC register high part.
+ * 2, fill the PORT_LOW as EC register low part.
+ * 3, fill the PORT_DATA as EC register write data or get the data from it.
+ */
+#define EC_IO_PORT_HIGH 0x0381
+#define EC_IO_PORT_LOW 0x0382
+#define EC_IO_PORT_DATA 0x0383
+
+/*
+ * EC delay time is 500us for register and status access
+ */
+#define EC_REG_DELAY 500 /* unit : us */
+#define EC_CMD_TIMEOUT 0x1000
+
+/*
+ * EC access port for SCI communication
+ */
+#define EC_CMD_PORT 0x66
+#define EC_STS_PORT 0x66
+#define EC_DAT_PORT 0x62
+#define CMD_INIT_IDLE_MODE 0xdd
+#define CMD_EXIT_IDLE_MODE 0xdf
+#define CMD_INIT_RESET_MODE 0xd8
+#define CMD_REBOOT_SYSTEM 0x8c
+#define CMD_GET_EVENT_NUM 0x84
+#define CMD_PROGRAM_PIECE 0xda
+
+/* temperature & fan registers */
+#define REG_TEMPERATURE_VALUE 0xF458
+#define REG_FAN_AUTO_MAN_SWITCH 0xF459
+#define BIT_FAN_AUTO 0
+#define BIT_FAN_MANUAL 1
+#define REG_FAN_CONTROL 0xF4D2
+#define BIT_FAN_CONTROL_ON (1 << 0)
+#define BIT_FAN_CONTROL_OFF (0 << 0)
+#define REG_FAN_STATUS 0xF4DA
+#define BIT_FAN_STATUS_ON (1 << 0)
+#define BIT_FAN_STATUS_OFF (0 << 0)
+#define REG_FAN_SPEED_HIGH 0xFE22
+#define REG_FAN_SPEED_LOW 0xFE23
+#define REG_FAN_SPEED_LEVEL 0xF4CC
+/* fan speed divider */
+#define FAN_SPEED_DIVIDER 480000 /* (60*1000*1000/62.5/2)*/
+
+/* battery registers */
+#define REG_BAT_DESIGN_CAP_HIGH 0xF77D
+#define REG_BAT_DESIGN_CAP_LOW 0xF77E
+#define REG_BAT_FULLCHG_CAP_HIGH 0xF780
+#define REG_BAT_FULLCHG_CAP_LOW 0xF781
+#define REG_BAT_DESIGN_VOL_HIGH 0xF782
+#define REG_BAT_DESIGN_VOL_LOW 0xF783
+#define REG_BAT_CURRENT_HIGH 0xF784
+#define REG_BAT_CURRENT_LOW 0xF785
+#define REG_BAT_VOLTAGE_HIGH 0xF786
+#define REG_BAT_VOLTAGE_LOW 0xF787
+#define REG_BAT_TEMPERATURE_HIGH 0xF788
+#define REG_BAT_TEMPERATURE_LOW 0xF789
+#define REG_BAT_RELATIVE_CAP_HIGH 0xF492
+#define REG_BAT_RELATIVE_CAP_LOW 0xF493
+#define REG_BAT_VENDOR 0xF4C4
+#define FLAG_BAT_VENDOR_SANYO 0x01
+#define FLAG_BAT_VENDOR_SIMPLO 0x02
+#define REG_BAT_CELL_COUNT 0xF4C6
+#define FLAG_BAT_CELL_3S1P 0x03
+#define FLAG_BAT_CELL_3S2P 0x06
+#define REG_BAT_CHARGE 0xF4A2
+#define FLAG_BAT_CHARGE_DISCHARGE 0x01
+#define FLAG_BAT_CHARGE_CHARGE 0x02
+#define FLAG_BAT_CHARGE_ACPOWER 0x00
+#define REG_BAT_STATUS 0xF4B0
+#define BIT_BAT_STATUS_LOW (1 << 5)
+#define BIT_BAT_STATUS_DESTROY (1 << 2)
+#define BIT_BAT_STATUS_FULL (1 << 1)
+#define BIT_BAT_STATUS_IN (1 << 0)
+#define REG_BAT_CHARGE_STATUS 0xF4B1
+#define BIT_BAT_CHARGE_STATUS_OVERTEMP (1 << 2)
+#define BIT_BAT_CHARGE_STATUS_PRECHG (1 << 1)
+#define REG_BAT_STATE 0xF482
+#define BIT_BAT_STATE_CHARGING (1 << 1)
+#define BIT_BAT_STATE_DISCHARGING (1 << 0)
+#define REG_BAT_POWER 0xF440
+#define BIT_BAT_POWER_S3 (1 << 2)
+#define BIT_BAT_POWER_ON (1 << 1)
+#define BIT_BAT_POWER_ACIN (1 << 0)
+
+/* other registers */
+/* Audio: rd/wr */
+#define REG_AUDIO_VOLUME 0xF46C
+#define REG_AUDIO_MUTE 0xF4E7
+#define REG_AUDIO_BEEP 0xF4D0
+/* USB port power or not: rd/wr */
+#define REG_USB0_FLAG 0xF461
+#define REG_USB1_FLAG 0xF462
+#define REG_USB2_FLAG 0xF463
+#define BIT_USB_FLAG_ON 1
+#define BIT_USB_FLAG_OFF 0
+/* LID */
+#define REG_LID_DETECT 0xF4BD
+#define BIT_LID_DETECT_ON 1
+#define BIT_LID_DETECT_OFF 0
+/* CRT */
+#define REG_CRT_DETECT 0xF4AD
+#define BIT_CRT_DETECT_PLUG 1
+#define BIT_CRT_DETECT_UNPLUG 0
+/* LCD backlight brightness adjust: 9 levels */
+#define REG_DISPLAY_BRIGHTNESS 0xF4F5
+/* Black screen Status */
+#define BIT_DISPLAY_LCD_ON 1
+#define BIT_DISPLAY_LCD_OFF 0
+/* LCD backlight control: off/restore */
+#define REG_BACKLIGHT_CTRL 0xF7BD
+#define BIT_BACKLIGHT_ON 1
+#define BIT_BACKLIGHT_OFF 0
+/* Reset the machine auto-clear: rd/wr */
+#define REG_RESET 0xF4EC
+#define BIT_RESET_ON 1
+/* Light the led: rd/wr */
+#define REG_LED 0xF4C8
+#define BIT_LED_RED_POWER (1 << 0)
+#define BIT_LED_ORANGE_POWER (1 << 1)
+#define BIT_LED_GREEN_CHARGE (1 << 2)
+#define BIT_LED_RED_CHARGE (1 << 3)
+#define BIT_LED_NUMLOCK (1 << 4)
+/* Test led mode, all led on/off */
+#define REG_LED_TEST 0xF4C2
+#define BIT_LED_TEST_IN 1
+#define BIT_LED_TEST_OUT 0
+/* Camera on/off */
+#define REG_CAMERA_STATUS 0xF46A
+#define BIT_CAMERA_STATUS_ON 1
+#define BIT_CAMERA_STATUS_OFF 0
+#define REG_CAMERA_CONTROL 0xF7B7
+#define BIT_CAMERA_CONTROL_OFF 0
+#define BIT_CAMERA_CONTROL_ON 1
+/* Wlan Status */
+#define REG_WLAN 0xF4FA
+#define BIT_WLAN_ON 1
+#define BIT_WLAN_OFF 0
+#define REG_DISPLAY_LCD 0xF79F
+
+/* SCI Event Number from EC */
+enum {
+ EVENT_LID = 0x23, /* LID open/close */
+ EVENT_DISPLAY_TOGGLE, /* Fn+F3 for display switch */
+ EVENT_SLEEP, /* Fn+F1 for entering sleep mode */
+ EVENT_OVERTEMP, /* Over-temperature happened */
+ EVENT_CRT_DETECT, /* CRT is connected */
+ EVENT_CAMERA, /* Camera on/off */
+ EVENT_USB_OC2, /* USB2 Over Current occurred */
+ EVENT_USB_OC0, /* USB0 Over Current occurred */
+ EVENT_BLACK_SCREEN, /* Turn on/off backlight */
+ EVENT_AUDIO_MUTE, /* Mute on/off */
+ EVENT_DISPLAY_BRIGHTNESS,/* LCD backlight brightness adjust */
+ EVENT_AC_BAT, /* AC & Battery relative issue */
+ EVENT_AUDIO_VOLUME, /* Volume adjust */
+ EVENT_WLAN, /* Wlan on/off */
+ EVENT_END
+};
+
+#endif /* !_EC_KB3310B_H */
diff --git a/arch/mips/loongson2ef/lemote-2f/irq.c b/arch/mips/loongson2ef/lemote-2f/irq.c
new file mode 100644
index 000000000..6f0057997
--- /dev/null
+++ b/arch/mips/loongson2ef/lemote-2f/irq.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2007 Lemote Inc.
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/i8259.h>
+#include <asm/mipsregs.h>
+
+#include <loongson.h>
+#include <machine.h>
+
+#define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* cpu timer */
+#define LOONGSON_NORTH_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 6) /* bonito */
+#define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 3) /* cpu serial port */
+#define LOONGSON_SOUTH_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 2) /* i8259 */
+
+#define LOONGSON_INT_BIT_INT0 (1 << 11)
+#define LOONGSON_INT_BIT_INT1 (1 << 12)
+
+/*
+ * The generic i8259_irq() make the kernel hang on booting. Since we cannot
+ * get the irq via the IRR directly, we access the ISR instead.
+ */
+int mach_i8259_irq(void)
+{
+ int irq, isr;
+
+ irq = -1;
+
+ if ((LOONGSON_INTISR & LOONGSON_INTEN) & LOONGSON_INT_BIT_INT0) {
+ raw_spin_lock(&i8259A_lock);
+ isr = inb(PIC_MASTER_CMD) &
+ ~inb(PIC_MASTER_IMR) & ~(1 << PIC_CASCADE_IR);
+ if (!isr)
+ isr = (inb(PIC_SLAVE_CMD) & ~inb(PIC_SLAVE_IMR)) << 8;
+ irq = ffs(isr) - 1;
+ if (unlikely(irq == 7)) {
+ /*
+ * This may be a spurious interrupt.
+ *
+ * Read the interrupt status register (ISR). If the most
+ * significant bit is not set then there is no valid
+ * interrupt.
+ */
+ outb(0x0B, PIC_MASTER_ISR); /* ISR register */
+ if (~inb(PIC_MASTER_ISR) & 0x80)
+ irq = -1;
+ }
+ raw_spin_unlock(&i8259A_lock);
+ }
+
+ return irq;
+}
+EXPORT_SYMBOL(mach_i8259_irq);
+
+static void i8259_irqdispatch(void)
+{
+ int irq;
+
+ irq = mach_i8259_irq();
+ if (irq >= 0)
+ do_IRQ(irq);
+ else
+ spurious_interrupt();
+}
+
+void mach_irq_dispatch(unsigned int pending)
+{
+ if (pending & CAUSEF_IP7)
+ do_IRQ(LOONGSON_TIMER_IRQ);
+ else if (pending & CAUSEF_IP6) { /* North Bridge, Perf counter */
+ do_perfcnt_IRQ();
+ bonito_irqdispatch();
+ } else if (pending & CAUSEF_IP3) /* CPU UART */
+ do_IRQ(LOONGSON_UART_IRQ);
+ else if (pending & CAUSEF_IP2) /* South Bridge */
+ i8259_irqdispatch();
+ else
+ spurious_interrupt();
+}
+
+static irqreturn_t ip6_action(int cpl, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
+void __init mach_init_irq(void)
+{
+ /* init all controller
+ * 0-15 ------> i8259 interrupt
+ * 16-23 ------> mips cpu interrupt
+ * 32-63 ------> bonito irq
+ */
+
+ /* setup cs5536 as high level trigger */
+ LOONGSON_INTPOL = LOONGSON_INT_BIT_INT0 | LOONGSON_INT_BIT_INT1;
+ LOONGSON_INTEDGE &= ~(LOONGSON_INT_BIT_INT0 | LOONGSON_INT_BIT_INT1);
+
+ /* Sets the first-level interrupt dispatcher. */
+ mips_cpu_irq_init();
+ init_i8259_irqs();
+ bonito_irq_init();
+
+ /* setup north bridge irq (bonito) */
+ if (request_irq(LOONGSON_NORTH_BRIDGE_IRQ, ip6_action,
+ IRQF_SHARED | IRQF_NO_THREAD, "cascade", ip6_action))
+ pr_err("Failed to register north bridge cascade interrupt\n");
+ /* setup source bridge irq (i8259) */
+ if (request_irq(LOONGSON_SOUTH_BRIDGE_IRQ, no_action,
+ IRQF_NO_THREAD | IRQF_NO_SUSPEND, "cascade", NULL))
+ pr_err("Failed to register south bridge cascade interrupt\n");
+}
diff --git a/arch/mips/loongson2ef/lemote-2f/machtype.c b/arch/mips/loongson2ef/lemote-2f/machtype.c
new file mode 100644
index 000000000..9462a3ab5
--- /dev/null
+++ b/arch/mips/loongson2ef/lemote-2f/machtype.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+#include <asm/bootinfo.h>
+
+#include <loongson.h>
+
+void __init mach_prom_init_machtype(void)
+{
+ /* We share the same kernel image file among Lemote 2F family
+ * of machines, and provide the machtype= kernel command line
+ * to users to indicate their machine, this command line will
+ * be passed by the latest PMON automatically. and fortunately,
+ * up to now, we can get the machine type from the PMON_VER=
+ * commandline directly except the NAS machine, In the old
+ * machines, this will help the users a lot.
+ *
+ * If no "machtype=" passed, get machine type from "PMON_VER=".
+ * PMON_VER=LM8089 Lemote 8.9'' netbook
+ * LM8101 Lemote 10.1'' netbook
+ * (The above two netbooks have the same kernel support)
+ * LM6XXX Lemote FuLoong(2F) box series
+ * LM9XXX Lemote LynLoong PC series
+ */
+ if (strstr(arcs_cmdline, "PMON_VER=LM")) {
+ if (strstr(arcs_cmdline, "PMON_VER=LM8"))
+ mips_machtype = MACH_LEMOTE_YL2F89;
+ else if (strstr(arcs_cmdline, "PMON_VER=LM6"))
+ mips_machtype = MACH_LEMOTE_FL2F;
+ else if (strstr(arcs_cmdline, "PMON_VER=LM9"))
+ mips_machtype = MACH_LEMOTE_LL2F;
+ else
+ mips_machtype = MACH_LEMOTE_NAS;
+
+ strcat(arcs_cmdline, " machtype=");
+ strcat(arcs_cmdline, get_system_type());
+ strcat(arcs_cmdline, " ");
+ }
+}
diff --git a/arch/mips/loongson2ef/lemote-2f/pm.c b/arch/mips/loongson2ef/lemote-2f/pm.c
new file mode 100644
index 000000000..3d0027229
--- /dev/null
+++ b/arch/mips/loongson2ef/lemote-2f/pm.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Lemote loongson2f family machines' specific suspend support
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#include <linux/suspend.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h>
+#include <linux/i8042.h>
+#include <linux/export.h>
+
+#include <asm/i8259.h>
+#include <asm/mipsregs.h>
+#include <asm/bootinfo.h>
+
+#include <loongson.h>
+
+#include <cs5536/cs5536_mfgpt.h>
+#include "ec_kb3310b.h"
+
+#define I8042_KBD_IRQ 1
+#define I8042_CTR_KBDINT 0x01
+#define I8042_CTR_KBDDIS 0x10
+
+static unsigned char i8042_ctr;
+
+static int i8042_enable_kbd_port(void)
+{
+ if (i8042_command(&i8042_ctr, I8042_CMD_CTL_RCTR)) {
+ pr_err("i8042.c: Can't read CTR while enabling i8042 kbd port."
+ "\n");
+ return -EIO;
+ }
+
+ i8042_ctr &= ~I8042_CTR_KBDDIS;
+ i8042_ctr |= I8042_CTR_KBDINT;
+
+ if (i8042_command(&i8042_ctr, I8042_CMD_CTL_WCTR)) {
+ i8042_ctr &= ~I8042_CTR_KBDINT;
+ i8042_ctr |= I8042_CTR_KBDDIS;
+ pr_err("i8042.c: Failed to enable KBD port.\n");
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void setup_wakeup_events(void)
+{
+ int irq_mask;
+
+ switch (mips_machtype) {
+ case MACH_LEMOTE_ML2F7:
+ case MACH_LEMOTE_YL2F89:
+ /* open the keyboard irq in i8259A */
+ outb((0xff & ~(1 << I8042_KBD_IRQ)), PIC_MASTER_IMR);
+ irq_mask = inb(PIC_MASTER_IMR);
+
+ /* enable keyboard port */
+ i8042_enable_kbd_port();
+
+ /* Wakeup CPU via SCI lid open event */
+ outb(irq_mask & ~(1 << PIC_CASCADE_IR), PIC_MASTER_IMR);
+ inb(PIC_MASTER_IMR);
+ outb(0xff & ~(1 << (SCI_IRQ_NUM - 8)), PIC_SLAVE_IMR);
+ inb(PIC_SLAVE_IMR);
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+static struct delayed_work lid_task;
+static int initialized;
+/* yeeloong_report_lid_status will be implemented in yeeloong_laptop.c */
+sci_handler yeeloong_report_lid_status;
+EXPORT_SYMBOL(yeeloong_report_lid_status);
+static void yeeloong_lid_update_task(struct work_struct *work)
+{
+ if (yeeloong_report_lid_status)
+ yeeloong_report_lid_status(BIT_LID_DETECT_ON);
+}
+
+int wakeup_loongson(void)
+{
+ int irq;
+
+ /* query the interrupt number */
+ irq = mach_i8259_irq();
+ if (irq < 0)
+ return 0;
+
+ printk(KERN_INFO "%s: irq = %d\n", __func__, irq);
+
+ if (irq == I8042_KBD_IRQ)
+ return 1;
+ else if (irq == SCI_IRQ_NUM) {
+ int ret, sci_event;
+ /* query the event number */
+ ret = ec_query_seq(CMD_GET_EVENT_NUM);
+ if (ret < 0)
+ return 0;
+ sci_event = ec_get_event_num();
+ if (sci_event < 0)
+ return 0;
+ if (sci_event == EVENT_LID) {
+ int lid_status;
+ /* check the LID status */
+ lid_status = ec_read(REG_LID_DETECT);
+ /* wakeup cpu when people open the LID */
+ if (lid_status == BIT_LID_DETECT_ON) {
+ /* If we call it directly here, the WARNING
+ * will be sent out by getnstimeofday
+ * via "WARN_ON(timekeeping_suspended);"
+ * because we can not schedule in suspend mode.
+ */
+ if (initialized == 0) {
+ INIT_DELAYED_WORK(&lid_task,
+ yeeloong_lid_update_task);
+ initialized = 1;
+ }
+ schedule_delayed_work(&lid_task, 1);
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void __weak mach_suspend(void)
+{
+ disable_mfgpt0_counter();
+}
+
+void __weak mach_resume(void)
+{
+ enable_mfgpt0_counter();
+}
diff --git a/arch/mips/loongson2ef/lemote-2f/reset.c b/arch/mips/loongson2ef/lemote-2f/reset.c
new file mode 100644
index 000000000..197dae4ff
--- /dev/null
+++ b/arch/mips/loongson2ef/lemote-2f/reset.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Board-specific reboot/shutdown routines
+ *
+ * Copyright (c) 2009 Philippe Vachon <philippe@cowpig.ca>
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+
+#include <asm/bootinfo.h>
+
+#include <loongson.h>
+
+#include <cs5536/cs5536.h>
+#include "ec_kb3310b.h"
+
+static void reset_cpu(void)
+{
+ /*
+ * reset cpu to full speed, this is needed when enabling cpu frequency
+ * scalling
+ */
+ writel(readl(LOONGSON_CHIPCFG) | 0x7, LOONGSON_CHIPCFG);
+}
+
+/* reset support for fuloong2f */
+
+static void fl2f_reboot(void)
+{
+ reset_cpu();
+
+ /* send a reset signal to south bridge.
+ *
+ * NOTE: if enable "Power Management" in kernel, rtl8169 will not reset
+ * normally with this reset operation and it will not work in PMON, but
+ * you can type halt command and then reboot, seems the hardware reset
+ * logic not work normally.
+ */
+ {
+ u32 hi, lo;
+ _rdmsr(DIVIL_MSR_REG(DIVIL_SOFT_RESET), &hi, &lo);
+ lo |= 0x00000001;
+ _wrmsr(DIVIL_MSR_REG(DIVIL_SOFT_RESET), hi, lo);
+ }
+}
+
+static void fl2f_shutdown(void)
+{
+ u32 hi, lo, val;
+ int gpio_base;
+
+ /* get gpio base */
+ _rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_GPIO), &hi, &lo);
+ gpio_base = lo & 0xff00;
+
+ /* make cs5536 gpio13 output enable */
+ val = inl(gpio_base + GPIOL_OUT_EN);
+ val &= ~(1 << (16 + 13));
+ val |= (1 << 13);
+ outl(val, gpio_base + GPIOL_OUT_EN);
+ mmiowb();
+ /* make cs5536 gpio13 output low level voltage. */
+ val = inl(gpio_base + GPIOL_OUT_VAL) & ~(1 << (13));
+ val |= (1 << (16 + 13));
+ outl(val, gpio_base + GPIOL_OUT_VAL);
+ mmiowb();
+}
+
+/* reset support for yeeloong2f and mengloong2f notebook */
+
+static void ml2f_reboot(void)
+{
+ reset_cpu();
+
+ /* sending an reset signal to EC(embedded controller) */
+ ec_write(REG_RESET, BIT_RESET_ON);
+}
+
+#define yl2f89_reboot ml2f_reboot
+
+/* menglong(7inches) laptop has different shutdown logic from 8.9inches */
+#define EC_SHUTDOWN_IO_PORT_HIGH 0xff2d
+#define EC_SHUTDOWN_IO_PORT_LOW 0xff2e
+#define EC_SHUTDOWN_IO_PORT_DATA 0xff2f
+#define REG_SHUTDOWN_HIGH 0xFC
+#define REG_SHUTDOWN_LOW 0x29
+#define BIT_SHUTDOWN_ON (1 << 1)
+
+static void ml2f_shutdown(void)
+{
+ u8 val;
+ u64 i;
+
+ outb(REG_SHUTDOWN_HIGH, EC_SHUTDOWN_IO_PORT_HIGH);
+ outb(REG_SHUTDOWN_LOW, EC_SHUTDOWN_IO_PORT_LOW);
+ mmiowb();
+ val = inb(EC_SHUTDOWN_IO_PORT_DATA);
+ outb(val & (~BIT_SHUTDOWN_ON), EC_SHUTDOWN_IO_PORT_DATA);
+ mmiowb();
+ /* need enough wait here... how many microseconds needs? */
+ for (i = 0; i < 0x10000; i++)
+ delay();
+ outb(val | BIT_SHUTDOWN_ON, EC_SHUTDOWN_IO_PORT_DATA);
+ mmiowb();
+}
+
+static void yl2f89_shutdown(void)
+{
+ /* cpu-gpio0 output low */
+ LOONGSON_GPIODATA &= ~0x00000001;
+ /* cpu-gpio0 as output */
+ LOONGSON_GPIOIE &= ~0x00000001;
+}
+
+void mach_prepare_reboot(void)
+{
+ switch (mips_machtype) {
+ case MACH_LEMOTE_FL2F:
+ case MACH_LEMOTE_NAS:
+ case MACH_LEMOTE_LL2F:
+ fl2f_reboot();
+ break;
+ case MACH_LEMOTE_ML2F7:
+ ml2f_reboot();
+ break;
+ case MACH_LEMOTE_YL2F89:
+ yl2f89_reboot();
+ break;
+ default:
+ break;
+ }
+}
+
+void mach_prepare_shutdown(void)
+{
+ switch (mips_machtype) {
+ case MACH_LEMOTE_FL2F:
+ case MACH_LEMOTE_NAS:
+ case MACH_LEMOTE_LL2F:
+ fl2f_shutdown();
+ break;
+ case MACH_LEMOTE_ML2F7:
+ ml2f_shutdown();
+ break;
+ case MACH_LEMOTE_YL2F89:
+ yl2f89_shutdown();
+ break;
+ default:
+ break;
+ }
+}
diff --git a/arch/mips/loongson32/Kconfig b/arch/mips/loongson32/Kconfig
new file mode 100644
index 000000000..e27879b48
--- /dev/null
+++ b/arch/mips/loongson32/Kconfig
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0
+if MACH_LOONGSON32
+
+choice
+ prompt "Machine Type"
+
+config LOONGSON1_LS1B
+ bool "Loongson LS1B board"
+ select CEVT_R4K if !MIPS_EXTERNAL_TIMER
+ select CSRC_R4K if !MIPS_EXTERNAL_TIMER
+ select SYS_HAS_CPU_LOONGSON1B
+ select DMA_NONCOHERENT
+ select BOOT_ELF32
+ select IRQ_MIPS_CPU
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_HAS_EARLY_PRINTK
+ select USE_GENERIC_EARLY_PRINTK_8250
+ select COMMON_CLK
+
+config LOONGSON1_LS1C
+ bool "Loongson LS1C board"
+ select CEVT_R4K if !MIPS_EXTERNAL_TIMER
+ select CSRC_R4K if !MIPS_EXTERNAL_TIMER
+ select SYS_HAS_CPU_LOONGSON1C
+ select DMA_NONCOHERENT
+ select BOOT_ELF32
+ select IRQ_MIPS_CPU
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_HAS_EARLY_PRINTK
+ select USE_GENERIC_EARLY_PRINTK_8250
+ select COMMON_CLK
+endchoice
+
+menuconfig CEVT_CSRC_LS1X
+ bool "Use PWM Timer for clockevent/clocksource"
+ select MIPS_EXTERNAL_TIMER
+ depends on CPU_LOONGSON32
+ help
+ This option changes the default clockevent/clocksource to PWM Timer,
+ and is required by Loongson1 CPUFreq support.
+
+ If unsure, say N.
+
+choice
+ prompt "Select clockevent/clocksource"
+ depends on CEVT_CSRC_LS1X
+ default TIMER_USE_PWM0
+
+config TIMER_USE_PWM0
+ bool "Use PWM Timer 0"
+ help
+ Use PWM Timer 0 as the default clockevent/clocksourcer.
+
+config TIMER_USE_PWM1
+ bool "Use PWM Timer 1"
+ help
+ Use PWM Timer 1 as the default clockevent/clocksourcer.
+
+config TIMER_USE_PWM2
+ bool "Use PWM Timer 2"
+ help
+ Use PWM Timer 2 as the default clockevent/clocksourcer.
+
+config TIMER_USE_PWM3
+ bool "Use PWM Timer 3"
+ help
+ Use PWM Timer 3 as the default clockevent/clocksourcer.
+
+endchoice
+
+endif # MACH_LOONGSON32
diff --git a/arch/mips/loongson32/Makefile b/arch/mips/loongson32/Makefile
new file mode 100644
index 000000000..ba10954b4
--- /dev/null
+++ b/arch/mips/loongson32/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Common code for all Loongson 1 based systems
+#
+
+obj-$(CONFIG_MACH_LOONGSON32) += common/
+
+#
+# Loongson LS1B board
+#
+
+obj-$(CONFIG_LOONGSON1_LS1B) += ls1b/
+
+#
+# Loongson LS1C board
+#
+
+obj-$(CONFIG_LOONGSON1_LS1C) += ls1c/
diff --git a/arch/mips/loongson32/Platform b/arch/mips/loongson32/Platform
new file mode 100644
index 000000000..3b9673e7a
--- /dev/null
+++ b/arch/mips/loongson32/Platform
@@ -0,0 +1,3 @@
+cflags-$(CONFIG_CPU_LOONGSON32) += -march=mips32r2 -Wa,--trap
+cflags-$(CONFIG_MACH_LOONGSON32) += -I$(srctree)/arch/mips/include/asm/mach-loongson32
+load-$(CONFIG_CPU_LOONGSON32) += 0xffffffff80200000
diff --git a/arch/mips/loongson32/common/Makefile b/arch/mips/loongson32/common/Makefile
new file mode 100644
index 000000000..7b49c8260
--- /dev/null
+++ b/arch/mips/loongson32/common/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for common code of loongson1 based machines.
+#
+
+obj-y += time.o irq.o platform.o prom.o reset.o setup.o
diff --git a/arch/mips/loongson32/common/irq.c b/arch/mips/loongson32/common/irq.c
new file mode 100644
index 000000000..9a50070f7
--- /dev/null
+++ b/arch/mips/loongson32/common/irq.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/irq_cpu.h>
+
+#include <loongson1.h>
+#include <irq.h>
+
+#define LS1X_INTC_REG(n, x) \
+ ((void __iomem *)KSEG1ADDR(LS1X_INTC_BASE + (n * 0x18) + (x)))
+
+#define LS1X_INTC_INTISR(n) LS1X_INTC_REG(n, 0x0)
+#define LS1X_INTC_INTIEN(n) LS1X_INTC_REG(n, 0x4)
+#define LS1X_INTC_INTSET(n) LS1X_INTC_REG(n, 0x8)
+#define LS1X_INTC_INTCLR(n) LS1X_INTC_REG(n, 0xc)
+#define LS1X_INTC_INTPOL(n) LS1X_INTC_REG(n, 0x10)
+#define LS1X_INTC_INTEDGE(n) LS1X_INTC_REG(n, 0x14)
+
+static void ls1x_irq_ack(struct irq_data *d)
+{
+ unsigned int bit = (d->irq - LS1X_IRQ_BASE) & 0x1f;
+ unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5;
+
+ __raw_writel(__raw_readl(LS1X_INTC_INTCLR(n))
+ | (1 << bit), LS1X_INTC_INTCLR(n));
+}
+
+static void ls1x_irq_mask(struct irq_data *d)
+{
+ unsigned int bit = (d->irq - LS1X_IRQ_BASE) & 0x1f;
+ unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5;
+
+ __raw_writel(__raw_readl(LS1X_INTC_INTIEN(n))
+ & ~(1 << bit), LS1X_INTC_INTIEN(n));
+}
+
+static void ls1x_irq_mask_ack(struct irq_data *d)
+{
+ unsigned int bit = (d->irq - LS1X_IRQ_BASE) & 0x1f;
+ unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5;
+
+ __raw_writel(__raw_readl(LS1X_INTC_INTIEN(n))
+ & ~(1 << bit), LS1X_INTC_INTIEN(n));
+ __raw_writel(__raw_readl(LS1X_INTC_INTCLR(n))
+ | (1 << bit), LS1X_INTC_INTCLR(n));
+}
+
+static void ls1x_irq_unmask(struct irq_data *d)
+{
+ unsigned int bit = (d->irq - LS1X_IRQ_BASE) & 0x1f;
+ unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5;
+
+ __raw_writel(__raw_readl(LS1X_INTC_INTIEN(n))
+ | (1 << bit), LS1X_INTC_INTIEN(n));
+}
+
+static int ls1x_irq_settype(struct irq_data *d, unsigned int type)
+{
+ unsigned int bit = (d->irq - LS1X_IRQ_BASE) & 0x1f;
+ unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ __raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
+ | (1 << bit), LS1X_INTC_INTPOL(n));
+ __raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
+ & ~(1 << bit), LS1X_INTC_INTEDGE(n));
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ __raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
+ & ~(1 << bit), LS1X_INTC_INTPOL(n));
+ __raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
+ & ~(1 << bit), LS1X_INTC_INTEDGE(n));
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ __raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
+ | (1 << bit), LS1X_INTC_INTPOL(n));
+ __raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
+ | (1 << bit), LS1X_INTC_INTEDGE(n));
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ __raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
+ & ~(1 << bit), LS1X_INTC_INTPOL(n));
+ __raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
+ | (1 << bit), LS1X_INTC_INTEDGE(n));
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ __raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
+ & ~(1 << bit), LS1X_INTC_INTPOL(n));
+ __raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
+ | (1 << bit), LS1X_INTC_INTEDGE(n));
+ break;
+ case IRQ_TYPE_NONE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct irq_chip ls1x_irq_chip = {
+ .name = "LS1X-INTC",
+ .irq_ack = ls1x_irq_ack,
+ .irq_mask = ls1x_irq_mask,
+ .irq_mask_ack = ls1x_irq_mask_ack,
+ .irq_unmask = ls1x_irq_unmask,
+ .irq_set_type = ls1x_irq_settype,
+};
+
+static void ls1x_irq_dispatch(int n)
+{
+ u32 int_status, irq;
+
+ /* Get pending sources, masked by current enables */
+ int_status = __raw_readl(LS1X_INTC_INTISR(n)) &
+ __raw_readl(LS1X_INTC_INTIEN(n));
+
+ if (int_status) {
+ irq = LS1X_IRQ(n, __ffs(int_status));
+ do_IRQ(irq);
+ }
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned int pending;
+
+ pending = read_c0_cause() & read_c0_status() & ST0_IM;
+
+ if (pending & CAUSEF_IP7)
+ do_IRQ(TIMER_IRQ);
+ else if (pending & CAUSEF_IP2)
+ ls1x_irq_dispatch(0); /* INT0 */
+ else if (pending & CAUSEF_IP3)
+ ls1x_irq_dispatch(1); /* INT1 */
+ else if (pending & CAUSEF_IP4)
+ ls1x_irq_dispatch(2); /* INT2 */
+ else if (pending & CAUSEF_IP5)
+ ls1x_irq_dispatch(3); /* INT3 */
+ else if (pending & CAUSEF_IP6)
+ ls1x_irq_dispatch(4); /* INT4 */
+ else
+ spurious_interrupt();
+
+}
+
+static void __init ls1x_irq_init(int base)
+{
+ int n;
+
+ /* Disable interrupts and clear pending,
+ * setup all IRQs as high level triggered
+ */
+ for (n = 0; n < INTN; n++) {
+ __raw_writel(0x0, LS1X_INTC_INTIEN(n));
+ __raw_writel(0xffffffff, LS1X_INTC_INTCLR(n));
+ __raw_writel(0xffffffff, LS1X_INTC_INTPOL(n));
+ /* set DMA0, DMA1 and DMA2 to edge trigger */
+ __raw_writel(n ? 0x0 : 0xe000, LS1X_INTC_INTEDGE(n));
+ }
+
+
+ for (n = base; n < NR_IRQS; n++) {
+ irq_set_chip_and_handler(n, &ls1x_irq_chip,
+ handle_level_irq);
+ }
+
+ if (request_irq(INT0_IRQ, no_action, IRQF_NO_THREAD, "cascade", NULL))
+ pr_err("Failed to request irq %d (cascade)\n", INT0_IRQ);
+ if (request_irq(INT1_IRQ, no_action, IRQF_NO_THREAD, "cascade", NULL))
+ pr_err("Failed to request irq %d (cascade)\n", INT1_IRQ);
+ if (request_irq(INT2_IRQ, no_action, IRQF_NO_THREAD, "cascade", NULL))
+ pr_err("Failed to request irq %d (cascade)\n", INT2_IRQ);
+ if (request_irq(INT3_IRQ, no_action, IRQF_NO_THREAD, "cascade", NULL))
+ pr_err("Failed to request irq %d (cascade)\n", INT3_IRQ);
+#if defined(CONFIG_LOONGSON1_LS1C)
+ if (request_irq(INT4_IRQ, no_action, IRQF_NO_THREAD, "cascade", NULL))
+ pr_err("Failed to request irq %d (cascade)\n", INT4_IRQ);
+#endif
+}
+
+void __init arch_init_irq(void)
+{
+ mips_cpu_irq_init();
+ ls1x_irq_init(LS1X_IRQ_BASE);
+}
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
new file mode 100644
index 000000000..311dc1580
--- /dev/null
+++ b/arch/mips/loongson32/common/platform.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2011-2016 Zhang, Keguang <keguang.zhang@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/mtd/partitions.h>
+#include <linux/sizes.h>
+#include <linux/phy.h>
+#include <linux/serial_8250.h>
+#include <linux/stmmac.h>
+#include <linux/usb/ehci_pdriver.h>
+
+#include <platform.h>
+#include <loongson1.h>
+#include <cpufreq.h>
+#include <dma.h>
+#include <nand.h>
+
+/* 8250/16550 compatible UART */
+#define LS1X_UART(_id) \
+ { \
+ .mapbase = LS1X_UART ## _id ## _BASE, \
+ .irq = LS1X_UART ## _id ## _IRQ, \
+ .iotype = UPIO_MEM, \
+ .flags = UPF_IOREMAP | UPF_FIXED_TYPE, \
+ .type = PORT_16550A, \
+ }
+
+static struct plat_serial8250_port ls1x_serial8250_pdata[] = {
+ LS1X_UART(0),
+ LS1X_UART(1),
+ LS1X_UART(2),
+ LS1X_UART(3),
+ {},
+};
+
+struct platform_device ls1x_uart_pdev = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = ls1x_serial8250_pdata,
+ },
+};
+
+void __init ls1x_serial_set_uartclk(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct plat_serial8250_port *p;
+
+ clk = clk_get(&pdev->dev, pdev->name);
+ if (IS_ERR(clk)) {
+ pr_err("unable to get %s clock, err=%ld",
+ pdev->name, PTR_ERR(clk));
+ return;
+ }
+ clk_prepare_enable(clk);
+
+ for (p = pdev->dev.platform_data; p->flags != 0; ++p)
+ p->uartclk = clk_get_rate(clk);
+}
+
+/* CPUFreq */
+static struct plat_ls1x_cpufreq ls1x_cpufreq_pdata = {
+ .clk_name = "cpu_clk",
+ .osc_clk_name = "osc_clk",
+ .max_freq = 266 * 1000,
+ .min_freq = 33 * 1000,
+};
+
+struct platform_device ls1x_cpufreq_pdev = {
+ .name = "ls1x-cpufreq",
+ .dev = {
+ .platform_data = &ls1x_cpufreq_pdata,
+ },
+};
+
+/* Synopsys Ethernet GMAC */
+static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = {
+ .phy_mask = 0,
+};
+
+static struct stmmac_dma_cfg ls1x_eth_dma_cfg = {
+ .pbl = 1,
+};
+
+int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
+{
+ struct plat_stmmacenet_data *plat_dat = NULL;
+ u32 val;
+
+ val = __raw_readl(LS1X_MUX_CTRL1);
+
+#if defined(CONFIG_LOONGSON1_LS1B)
+ plat_dat = dev_get_platdata(&pdev->dev);
+ if (plat_dat->bus_id) {
+ __raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 |
+ GMAC1_USE_UART0, LS1X_MUX_CTRL0);
+ switch (plat_dat->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ val |= (GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
+ break;
+ default:
+ pr_err("unsupported mii mode %d\n",
+ plat_dat->phy_interface);
+ return -ENOTSUPP;
+ }
+ val &= ~GMAC1_SHUT;
+ } else {
+ switch (plat_dat->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ val |= (GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
+ break;
+ default:
+ pr_err("unsupported mii mode %d\n",
+ plat_dat->phy_interface);
+ return -ENOTSUPP;
+ }
+ val &= ~GMAC0_SHUT;
+ }
+ __raw_writel(val, LS1X_MUX_CTRL1);
+#elif defined(CONFIG_LOONGSON1_LS1C)
+ plat_dat = dev_get_platdata(&pdev->dev);
+
+ val &= ~PHY_INTF_SELI;
+ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII)
+ val |= 0x4 << PHY_INTF_SELI_SHIFT;
+ __raw_writel(val, LS1X_MUX_CTRL1);
+
+ val = __raw_readl(LS1X_MUX_CTRL0);
+ __raw_writel(val & (~GMAC_SHUT), LS1X_MUX_CTRL0);
+#endif
+
+ return 0;
+}
+
+static struct plat_stmmacenet_data ls1x_eth0_pdata = {
+ .bus_id = 0,
+ .phy_addr = -1,
+#if defined(CONFIG_LOONGSON1_LS1B)
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+#elif defined(CONFIG_LOONGSON1_LS1C)
+ .phy_interface = PHY_INTERFACE_MODE_RMII,
+#endif
+ .mdio_bus_data = &ls1x_mdio_bus_data,
+ .dma_cfg = &ls1x_eth_dma_cfg,
+ .has_gmac = 1,
+ .tx_coe = 1,
+ .rx_queues_to_use = 1,
+ .tx_queues_to_use = 1,
+ .init = ls1x_eth_mux_init,
+};
+
+static struct resource ls1x_eth0_resources[] = {
+ [0] = {
+ .start = LS1X_GMAC0_BASE,
+ .end = LS1X_GMAC0_BASE + SZ_64K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = "macirq",
+ .start = LS1X_GMAC0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device ls1x_eth0_pdev = {
+ .name = "stmmaceth",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(ls1x_eth0_resources),
+ .resource = ls1x_eth0_resources,
+ .dev = {
+ .platform_data = &ls1x_eth0_pdata,
+ },
+};
+
+#ifdef CONFIG_LOONGSON1_LS1B
+static struct plat_stmmacenet_data ls1x_eth1_pdata = {
+ .bus_id = 1,
+ .phy_addr = -1,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+ .mdio_bus_data = &ls1x_mdio_bus_data,
+ .dma_cfg = &ls1x_eth_dma_cfg,
+ .has_gmac = 1,
+ .tx_coe = 1,
+ .rx_queues_to_use = 1,
+ .tx_queues_to_use = 1,
+ .init = ls1x_eth_mux_init,
+};
+
+static struct resource ls1x_eth1_resources[] = {
+ [0] = {
+ .start = LS1X_GMAC1_BASE,
+ .end = LS1X_GMAC1_BASE + SZ_64K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = "macirq",
+ .start = LS1X_GMAC1_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device ls1x_eth1_pdev = {
+ .name = "stmmaceth",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(ls1x_eth1_resources),
+ .resource = ls1x_eth1_resources,
+ .dev = {
+ .platform_data = &ls1x_eth1_pdata,
+ },
+};
+#endif /* CONFIG_LOONGSON1_LS1B */
+
+/* GPIO */
+static struct resource ls1x_gpio0_resources[] = {
+ [0] = {
+ .start = LS1X_GPIO0_BASE,
+ .end = LS1X_GPIO0_BASE + SZ_4 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device ls1x_gpio0_pdev = {
+ .name = "ls1x-gpio",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(ls1x_gpio0_resources),
+ .resource = ls1x_gpio0_resources,
+};
+
+static struct resource ls1x_gpio1_resources[] = {
+ [0] = {
+ .start = LS1X_GPIO1_BASE,
+ .end = LS1X_GPIO1_BASE + SZ_4 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device ls1x_gpio1_pdev = {
+ .name = "ls1x-gpio",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(ls1x_gpio1_resources),
+ .resource = ls1x_gpio1_resources,
+};
+
+/* USB EHCI */
+static u64 ls1x_ehci_dmamask = DMA_BIT_MASK(32);
+
+static struct resource ls1x_ehci_resources[] = {
+ [0] = {
+ .start = LS1X_EHCI_BASE,
+ .end = LS1X_EHCI_BASE + SZ_32K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = LS1X_EHCI_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct usb_ehci_pdata ls1x_ehci_pdata = {
+};
+
+struct platform_device ls1x_ehci_pdev = {
+ .name = "ehci-platform",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ls1x_ehci_resources),
+ .resource = ls1x_ehci_resources,
+ .dev = {
+ .dma_mask = &ls1x_ehci_dmamask,
+ .platform_data = &ls1x_ehci_pdata,
+ },
+};
+
+/* Real Time Clock */
+void __init ls1x_rtc_set_extclk(struct platform_device *pdev)
+{
+ u32 val = __raw_readl(LS1X_RTC_CTRL);
+
+ if (!(val & RTC_EXTCLK_OK))
+ __raw_writel(val | RTC_EXTCLK_EN, LS1X_RTC_CTRL);
+}
+
+struct platform_device ls1x_rtc_pdev = {
+ .name = "ls1x-rtc",
+ .id = -1,
+};
+
+/* Watchdog */
+static struct resource ls1x_wdt_resources[] = {
+ {
+ .start = LS1X_WDT_BASE,
+ .end = LS1X_WDT_BASE + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device ls1x_wdt_pdev = {
+ .name = "ls1x-wdt",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ls1x_wdt_resources),
+ .resource = ls1x_wdt_resources,
+};
diff --git a/arch/mips/loongson32/common/prom.c b/arch/mips/loongson32/common/prom.c
new file mode 100644
index 000000000..c133b5adf
--- /dev/null
+++ b/arch/mips/loongson32/common/prom.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Modified from arch/mips/pnx833x/common/prom.c.
+ */
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/serial_reg.h>
+#include <asm/fw/fw.h>
+
+#include <loongson1.h>
+
+unsigned long memsize;
+
+void __init prom_init(void)
+{
+ void __iomem *uart_base;
+
+ fw_init_cmdline();
+
+ memsize = fw_getenvl("memsize");
+ if(!memsize)
+ memsize = DEFAULT_MEMSIZE;
+
+ if (strstr(arcs_cmdline, "console=ttyS3"))
+ uart_base = ioremap(LS1X_UART3_BASE, 0x0f);
+ else if (strstr(arcs_cmdline, "console=ttyS2"))
+ uart_base = ioremap(LS1X_UART2_BASE, 0x0f);
+ else if (strstr(arcs_cmdline, "console=ttyS1"))
+ uart_base = ioremap(LS1X_UART1_BASE, 0x0f);
+ else
+ uart_base = ioremap(LS1X_UART0_BASE, 0x0f);
+ setup_8250_early_printk_port((unsigned long)uart_base, 0, 0);
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
+
+void __init plat_mem_setup(void)
+{
+ memblock_add(0x0, (memsize << 20));
+}
diff --git a/arch/mips/loongson32/common/reset.c b/arch/mips/loongson32/common/reset.c
new file mode 100644
index 000000000..0c7399b30
--- /dev/null
+++ b/arch/mips/loongson32/common/reset.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ */
+
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <linux/sizes.h>
+#include <asm/idle.h>
+#include <asm/reboot.h>
+
+#include <loongson1.h>
+
+static void __iomem *wdt_reg_base;
+
+static void ls1x_halt(void)
+{
+ while (1) {
+ if (cpu_wait)
+ cpu_wait();
+ }
+}
+
+static void ls1x_restart(char *command)
+{
+ __raw_writel(0x1, wdt_reg_base + WDT_EN);
+ __raw_writel(0x1, wdt_reg_base + WDT_TIMER);
+ __raw_writel(0x1, wdt_reg_base + WDT_SET);
+
+ ls1x_halt();
+}
+
+static void ls1x_power_off(void)
+{
+ ls1x_halt();
+}
+
+static int __init ls1x_reboot_setup(void)
+{
+ wdt_reg_base = ioremap(LS1X_WDT_BASE, (SZ_4 + SZ_8));
+ if (!wdt_reg_base)
+ panic("Failed to remap watchdog registers");
+
+ _machine_restart = ls1x_restart;
+ _machine_halt = ls1x_halt;
+ pm_power_off = ls1x_power_off;
+
+ return 0;
+}
+
+arch_initcall(ls1x_reboot_setup);
diff --git a/arch/mips/loongson32/common/setup.c b/arch/mips/loongson32/common/setup.c
new file mode 100644
index 000000000..4733fe037
--- /dev/null
+++ b/arch/mips/loongson32/common/setup.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ */
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <asm/cpu-info.h>
+#include <asm/bootinfo.h>
+
+const char *get_system_type(void)
+{
+ unsigned int processor_id = (&current_cpu_data)->processor_id;
+
+ switch (processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON1B:
+#if defined(CONFIG_LOONGSON1_LS1B)
+ return "LOONGSON LS1B";
+#elif defined(CONFIG_LOONGSON1_LS1C)
+ return "LOONGSON LS1C";
+#endif
+ default:
+ return "LOONGSON (unknown)";
+ }
+}
diff --git a/arch/mips/loongson32/common/time.c b/arch/mips/loongson32/common/time.c
new file mode 100644
index 000000000..459b15c96
--- /dev/null
+++ b/arch/mips/loongson32/common/time.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/sizes.h>
+#include <asm/time.h>
+
+#include <loongson1.h>
+#include <platform.h>
+
+#ifdef CONFIG_CEVT_CSRC_LS1X
+
+#if defined(CONFIG_TIMER_USE_PWM1)
+#define LS1X_TIMER_BASE LS1X_PWM1_BASE
+#define LS1X_TIMER_IRQ LS1X_PWM1_IRQ
+
+#elif defined(CONFIG_TIMER_USE_PWM2)
+#define LS1X_TIMER_BASE LS1X_PWM2_BASE
+#define LS1X_TIMER_IRQ LS1X_PWM2_IRQ
+
+#elif defined(CONFIG_TIMER_USE_PWM3)
+#define LS1X_TIMER_BASE LS1X_PWM3_BASE
+#define LS1X_TIMER_IRQ LS1X_PWM3_IRQ
+
+#else
+#define LS1X_TIMER_BASE LS1X_PWM0_BASE
+#define LS1X_TIMER_IRQ LS1X_PWM0_IRQ
+#endif
+
+DEFINE_RAW_SPINLOCK(ls1x_timer_lock);
+
+static void __iomem *timer_reg_base;
+static uint32_t ls1x_jiffies_per_tick;
+
+static inline void ls1x_pwmtimer_set_period(uint32_t period)
+{
+ __raw_writel(period, timer_reg_base + PWM_HRC);
+ __raw_writel(period, timer_reg_base + PWM_LRC);
+}
+
+static inline void ls1x_pwmtimer_restart(void)
+{
+ __raw_writel(0x0, timer_reg_base + PWM_CNT);
+ __raw_writel(INT_EN | CNT_EN, timer_reg_base + PWM_CTRL);
+}
+
+void __init ls1x_pwmtimer_init(void)
+{
+ timer_reg_base = ioremap(LS1X_TIMER_BASE, SZ_16);
+ if (!timer_reg_base)
+ panic("Failed to remap timer registers");
+
+ ls1x_jiffies_per_tick = DIV_ROUND_CLOSEST(mips_hpt_frequency, HZ);
+
+ ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
+ ls1x_pwmtimer_restart();
+}
+
+static u64 ls1x_clocksource_read(struct clocksource *cs)
+{
+ unsigned long flags;
+ int count;
+ u32 jifs;
+ static int old_count;
+ static u32 old_jifs;
+
+ raw_spin_lock_irqsave(&ls1x_timer_lock, flags);
+ /*
+ * Although our caller may have the read side of xtime_lock,
+ * this is now a seqlock, and we are cheating in this routine
+ * by having side effects on state that we cannot undo if
+ * there is a collision on the seqlock and our caller has to
+ * retry. (Namely, old_jifs and old_count.) So we must treat
+ * jiffies as volatile despite the lock. We read jiffies
+ * before latching the timer count to guarantee that although
+ * the jiffies value might be older than the count (that is,
+ * the counter may underflow between the last point where
+ * jiffies was incremented and the point where we latch the
+ * count), it cannot be newer.
+ */
+ jifs = jiffies;
+ /* read the count */
+ count = __raw_readl(timer_reg_base + PWM_CNT);
+
+ /*
+ * It's possible for count to appear to go the wrong way for this
+ * reason:
+ *
+ * The timer counter underflows, but we haven't handled the resulting
+ * interrupt and incremented jiffies yet.
+ *
+ * Previous attempts to handle these cases intelligently were buggy, so
+ * we just do the simple thing now.
+ */
+ if (count < old_count && jifs == old_jifs)
+ count = old_count;
+
+ old_count = count;
+ old_jifs = jifs;
+
+ raw_spin_unlock_irqrestore(&ls1x_timer_lock, flags);
+
+ return (u64) (jifs * ls1x_jiffies_per_tick) + count;
+}
+
+static struct clocksource ls1x_clocksource = {
+ .name = "ls1x-pwmtimer",
+ .read = ls1x_clocksource_read,
+ .mask = CLOCKSOURCE_MASK(24),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static irqreturn_t ls1x_clockevent_isr(int irq, void *devid)
+{
+ struct clock_event_device *cd = devid;
+
+ ls1x_pwmtimer_restart();
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static int ls1x_clockevent_set_state_periodic(struct clock_event_device *cd)
+{
+ raw_spin_lock(&ls1x_timer_lock);
+ ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
+ ls1x_pwmtimer_restart();
+ __raw_writel(INT_EN | CNT_EN, timer_reg_base + PWM_CTRL);
+ raw_spin_unlock(&ls1x_timer_lock);
+
+ return 0;
+}
+
+static int ls1x_clockevent_tick_resume(struct clock_event_device *cd)
+{
+ raw_spin_lock(&ls1x_timer_lock);
+ __raw_writel(INT_EN | CNT_EN, timer_reg_base + PWM_CTRL);
+ raw_spin_unlock(&ls1x_timer_lock);
+
+ return 0;
+}
+
+static int ls1x_clockevent_set_state_shutdown(struct clock_event_device *cd)
+{
+ raw_spin_lock(&ls1x_timer_lock);
+ __raw_writel(__raw_readl(timer_reg_base + PWM_CTRL) & ~CNT_EN,
+ timer_reg_base + PWM_CTRL);
+ raw_spin_unlock(&ls1x_timer_lock);
+
+ return 0;
+}
+
+static int ls1x_clockevent_set_next(unsigned long evt,
+ struct clock_event_device *cd)
+{
+ raw_spin_lock(&ls1x_timer_lock);
+ ls1x_pwmtimer_set_period(evt);
+ ls1x_pwmtimer_restart();
+ raw_spin_unlock(&ls1x_timer_lock);
+
+ return 0;
+}
+
+static struct clock_event_device ls1x_clockevent = {
+ .name = "ls1x-pwmtimer",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .rating = 300,
+ .irq = LS1X_TIMER_IRQ,
+ .set_next_event = ls1x_clockevent_set_next,
+ .set_state_shutdown = ls1x_clockevent_set_state_shutdown,
+ .set_state_periodic = ls1x_clockevent_set_state_periodic,
+ .set_state_oneshot = ls1x_clockevent_set_state_shutdown,
+ .tick_resume = ls1x_clockevent_tick_resume,
+};
+
+static void __init ls1x_time_init(void)
+{
+ struct clock_event_device *cd = &ls1x_clockevent;
+ int ret;
+
+ if (!mips_hpt_frequency)
+ panic("Invalid timer clock rate");
+
+ ls1x_pwmtimer_init();
+
+ clockevent_set_clock(cd, mips_hpt_frequency);
+ cd->max_delta_ns = clockevent_delta2ns(0xffffff, cd);
+ cd->max_delta_ticks = 0xffffff;
+ cd->min_delta_ns = clockevent_delta2ns(0x000300, cd);
+ cd->min_delta_ticks = 0x000300;
+ cd->cpumask = cpumask_of(smp_processor_id());
+ clockevents_register_device(cd);
+
+ ls1x_clocksource.rating = 200 + mips_hpt_frequency / 10000000;
+ ret = clocksource_register_hz(&ls1x_clocksource, mips_hpt_frequency);
+ if (ret)
+ panic(KERN_ERR "Failed to register clocksource: %d\n", ret);
+
+ if (request_irq(LS1X_TIMER_IRQ, ls1x_clockevent_isr,
+ IRQF_PERCPU | IRQF_TIMER, "ls1x-pwmtimer",
+ &ls1x_clockevent))
+ pr_err("Failed to register ls1x-pwmtimer interrupt\n");
+}
+#endif /* CONFIG_CEVT_CSRC_LS1X */
+
+void __init plat_time_init(void)
+{
+ struct clk *clk = NULL;
+
+ /* initialize LS1X clocks */
+ ls1x_clk_init();
+
+#ifdef CONFIG_CEVT_CSRC_LS1X
+ /* setup LS1X PWM timer */
+ clk = clk_get(NULL, "ls1x-pwmtimer");
+ if (IS_ERR(clk))
+ panic("unable to get timer clock, err=%ld", PTR_ERR(clk));
+
+ mips_hpt_frequency = clk_get_rate(clk);
+ ls1x_time_init();
+#else
+ /* setup mips r4k timer */
+ clk = clk_get(NULL, "cpu_clk");
+ if (IS_ERR(clk))
+ panic("unable to get cpu clock, err=%ld", PTR_ERR(clk));
+
+ mips_hpt_frequency = clk_get_rate(clk) / 2;
+#endif /* CONFIG_CEVT_CSRC_LS1X */
+}
diff --git a/arch/mips/loongson32/ls1b/Makefile b/arch/mips/loongson32/ls1b/Makefile
new file mode 100644
index 000000000..33c574dc0
--- /dev/null
+++ b/arch/mips/loongson32/ls1b/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for loongson1B based machines.
+#
+
+obj-y += board.o
diff --git a/arch/mips/loongson32/ls1b/board.c b/arch/mips/loongson32/ls1b/board.c
new file mode 100644
index 000000000..727e06718
--- /dev/null
+++ b/arch/mips/loongson32/ls1b/board.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2011-2016 Zhang, Keguang <keguang.zhang@gmail.com>
+ */
+
+#include <linux/leds.h>
+#include <linux/mtd/partitions.h>
+#include <linux/sizes.h>
+
+#include <loongson1.h>
+#include <dma.h>
+#include <nand.h>
+#include <platform.h>
+
+static const struct gpio_led ls1x_gpio_leds[] __initconst = {
+ {
+ .name = "LED9",
+ .default_trigger = "heartbeat",
+ .gpio = 38,
+ .active_low = 1,
+ .default_state = LEDS_GPIO_DEFSTATE_OFF,
+ }, {
+ .name = "LED6",
+ .default_trigger = "nand-disk",
+ .gpio = 39,
+ .active_low = 1,
+ .default_state = LEDS_GPIO_DEFSTATE_OFF,
+ },
+};
+
+static const struct gpio_led_platform_data ls1x_led_pdata __initconst = {
+ .num_leds = ARRAY_SIZE(ls1x_gpio_leds),
+ .leds = ls1x_gpio_leds,
+};
+
+static struct platform_device *ls1b_platform_devices[] __initdata = {
+ &ls1x_uart_pdev,
+ &ls1x_cpufreq_pdev,
+ &ls1x_eth0_pdev,
+ &ls1x_eth1_pdev,
+ &ls1x_ehci_pdev,
+ &ls1x_gpio0_pdev,
+ &ls1x_gpio1_pdev,
+ &ls1x_rtc_pdev,
+ &ls1x_wdt_pdev,
+};
+
+static int __init ls1b_platform_init(void)
+{
+ ls1x_serial_set_uartclk(&ls1x_uart_pdev);
+
+ gpio_led_register_device(-1, &ls1x_led_pdata);
+
+ return platform_add_devices(ls1b_platform_devices,
+ ARRAY_SIZE(ls1b_platform_devices));
+}
+
+arch_initcall(ls1b_platform_init);
diff --git a/arch/mips/loongson32/ls1c/Makefile b/arch/mips/loongson32/ls1c/Makefile
new file mode 100644
index 000000000..1cf3aa264
--- /dev/null
+++ b/arch/mips/loongson32/ls1c/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for loongson1C based machines.
+#
+
+obj-y += board.o
diff --git a/arch/mips/loongson32/ls1c/board.c b/arch/mips/loongson32/ls1c/board.c
new file mode 100644
index 000000000..9dcfe9de5
--- /dev/null
+++ b/arch/mips/loongson32/ls1c/board.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2016 Yang Ling <gnaygnil@gmail.com>
+ */
+
+#include <platform.h>
+
+static struct platform_device *ls1c_platform_devices[] __initdata = {
+ &ls1x_uart_pdev,
+ &ls1x_eth0_pdev,
+ &ls1x_rtc_pdev,
+ &ls1x_wdt_pdev,
+};
+
+static int __init ls1c_platform_init(void)
+{
+ ls1x_serial_set_uartclk(&ls1x_uart_pdev);
+
+ return platform_add_devices(ls1c_platform_devices,
+ ARRAY_SIZE(ls1c_platform_devices));
+}
+
+arch_initcall(ls1c_platform_init);
diff --git a/arch/mips/loongson64/Kconfig b/arch/mips/loongson64/Kconfig
new file mode 100644
index 000000000..517f1f8e8
--- /dev/null
+++ b/arch/mips/loongson64/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+if MACH_LOONGSON64
+
+config RS780_HPET
+ bool "RS780/SBX00 HPET Timer"
+ depends on MACH_LOONGSON64
+ depends on BROKEN
+ select MIPS_EXTERNAL_TIMER
+ help
+ This option enables the hpet timer of AMD RS780/SBX00.
+
+ Note: This driver is doing some dangerous hack. Please only enable
+ it on RS780E systems.
+
+endif # MACH_LOONGSON64
diff --git a/arch/mips/loongson64/Makefile b/arch/mips/loongson64/Makefile
new file mode 100644
index 000000000..39c06f52b
--- /dev/null
+++ b/arch/mips/loongson64/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for Loongson-3 family machines
+#
+obj-$(CONFIG_MACH_LOONGSON64) += cop2-ex.o platform.o dma.o \
+ setup.o init.o env.o time.o reset.o \
+
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_NUMA) += numa.o
+obj-$(CONFIG_RS780_HPET) += hpet.o
+obj-$(CONFIG_SUSPEND) += pm.o
+obj-$(CONFIG_PCI_QUIRKS) += vbios_quirk.o
+obj-$(CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION) += cpucfg-emul.o
diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform
new file mode 100644
index 000000000..e2354e128
--- /dev/null
+++ b/arch/mips/loongson64/Platform
@@ -0,0 +1,37 @@
+#
+# Loongson Processors' Support
+#
+
+
+cflags-$(CONFIG_CPU_LOONGSON64) += -Wa,--trap
+
+#
+# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
+# as MIPS64 R2; older versions as just R1. This leaves the possibility open
+# that GCC might generate R2 code for -march=loongson3a which then is rejected
+# by GAS. The cc-option can't probe for this behaviour so -march=loongson3a
+# can't easily be used safely within the kbuild framework.
+#
+ifeq ($(call cc-ifversion, -ge, 0409, y), y)
+ ifeq ($(call ld-ifversion, -ge, 225000000, y), y)
+ cflags-$(CONFIG_CPU_LOONGSON64) += \
+ $(call cc-option,-march=loongson3a -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
+ else
+ cflags-$(CONFIG_CPU_LOONGSON64) += \
+ $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
+ endif
+else
+ cflags-$(CONFIG_CPU_LOONGSON64) += \
+ $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
+endif
+
+# Some -march= flags enable MMI instructions, and GCC complains about that
+# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
+cflags-y += $(call cc-option,-mno-loongson-mmi)
+
+#
+# Loongson Machines' Support
+#
+
+cflags-$(CONFIG_MACH_LOONGSON64) += -I$(srctree)/arch/mips/include/asm/mach-loongson64 -mno-branch-likely
+load-$(CONFIG_CPU_LOONGSON64) += 0xffffffff80200000
diff --git a/arch/mips/loongson64/cop2-ex.c b/arch/mips/loongson64/cop2-ex.c
new file mode 100644
index 000000000..00055d4b6
--- /dev/null
+++ b/arch/mips/loongson64/cop2-ex.c
@@ -0,0 +1,341 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Lemote Corporation.
+ * written by Huacai Chen <chenhc@lemote.com>
+ *
+ * based on arch/mips/cavium-octeon/cpu.c
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/notifier.h>
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+#include <linux/sched/signal.h>
+
+#include <asm/fpu.h>
+#include <asm/cop2.h>
+#include <asm/inst.h>
+#include <asm/branch.h>
+#include <asm/current.h>
+#include <asm/mipsregs.h>
+#include <asm/unaligned-emul.h>
+
+static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
+ void *data)
+{
+ unsigned int res, fpu_owned;
+ unsigned long ra, value, value_next;
+ union mips_instruction insn;
+ int fr = !test_thread_flag(TIF_32BIT_FPREGS);
+ struct pt_regs *regs = (struct pt_regs *)data;
+ void __user *addr = (void __user *)regs->cp0_badvaddr;
+ unsigned int __user *pc = (unsigned int __user *)exception_epc(regs);
+
+ ra = regs->regs[31];
+ __get_user(insn.word, pc);
+
+ switch (action) {
+ case CU2_EXCEPTION:
+ preempt_disable();
+ fpu_owned = __is_fpu_owner();
+ if (!fr)
+ set_c0_status(ST0_CU1 | ST0_CU2);
+ else
+ set_c0_status(ST0_CU1 | ST0_CU2 | ST0_FR);
+ enable_fpu_hazard();
+ KSTK_STATUS(current) |= (ST0_CU1 | ST0_CU2);
+ if (fr)
+ KSTK_STATUS(current) |= ST0_FR;
+ else
+ KSTK_STATUS(current) &= ~ST0_FR;
+ /* If FPU is owned, we needn't init or restore fp */
+ if (!fpu_owned) {
+ set_thread_flag(TIF_USEDFPU);
+ init_fp_ctx(current);
+ _restore_fp(current);
+ }
+ preempt_enable();
+
+ return NOTIFY_STOP; /* Don't call default notifier */
+
+ case CU2_LWC2_OP:
+ if (insn.loongson3_lswc2_format.ls == 0)
+ goto sigbus;
+
+ if (insn.loongson3_lswc2_format.fr == 0) { /* gslq */
+ if (!access_ok(addr, 16))
+ goto sigbus;
+
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+
+ LoadDW(addr + 8, value_next, res);
+ if (res)
+ goto fault;
+
+ regs->regs[insn.loongson3_lswc2_format.rt] = value;
+ regs->regs[insn.loongson3_lswc2_format.rq] = value_next;
+ compute_return_epc(regs);
+ } else { /* gslqc1 */
+ if (!access_ok(addr, 16))
+ goto sigbus;
+
+ lose_fpu(1);
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+
+ LoadDW(addr + 8, value_next, res);
+ if (res)
+ goto fault;
+
+ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0, value);
+ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0, value_next);
+ compute_return_epc(regs);
+ own_fpu(1);
+ }
+ return NOTIFY_STOP; /* Don't call default notifier */
+
+ case CU2_SWC2_OP:
+ if (insn.loongson3_lswc2_format.ls == 0)
+ goto sigbus;
+
+ if (insn.loongson3_lswc2_format.fr == 0) { /* gssq */
+ if (!access_ok(addr, 16))
+ goto sigbus;
+
+ /* write upper 8 bytes first */
+ value_next = regs->regs[insn.loongson3_lswc2_format.rq];
+
+ StoreDW(addr + 8, value_next, res);
+ if (res)
+ goto fault;
+ value = regs->regs[insn.loongson3_lswc2_format.rt];
+
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+
+ compute_return_epc(regs);
+ } else { /* gssqc1 */
+ if (!access_ok(addr, 16))
+ goto sigbus;
+
+ lose_fpu(1);
+ value_next = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0);
+
+ StoreDW(addr + 8, value_next, res);
+ if (res)
+ goto fault;
+
+ value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0);
+
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+
+ compute_return_epc(regs);
+ own_fpu(1);
+ }
+ return NOTIFY_STOP; /* Don't call default notifier */
+
+ case CU2_LDC2_OP:
+ switch (insn.loongson3_lsdc2_format.opcode1) {
+ /*
+ * Loongson-3 overridden ldc2 instructions.
+ * opcode1 instruction
+ * 0x1 gslhx: load 2 bytes to GPR
+ * 0x2 gslwx: load 4 bytes to GPR
+ * 0x3 gsldx: load 8 bytes to GPR
+ * 0x6 gslwxc1: load 4 bytes to FPR
+ * 0x7 gsldxc1: load 8 bytes to FPR
+ */
+ case 0x1:
+ if (!access_ok(addr, 2))
+ goto sigbus;
+
+ LoadHW(addr, value, res);
+ if (res)
+ goto fault;
+
+ compute_return_epc(regs);
+ regs->regs[insn.loongson3_lsdc2_format.rt] = value;
+ break;
+ case 0x2:
+ if (!access_ok(addr, 4))
+ goto sigbus;
+
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+
+ compute_return_epc(regs);
+ regs->regs[insn.loongson3_lsdc2_format.rt] = value;
+ break;
+ case 0x3:
+ if (!access_ok(addr, 8))
+ goto sigbus;
+
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+
+ compute_return_epc(regs);
+ regs->regs[insn.loongson3_lsdc2_format.rt] = value;
+ break;
+ case 0x6:
+ die_if_kernel("Unaligned FP access in kernel code", regs);
+ BUG_ON(!used_math());
+ if (!access_ok(addr, 4))
+ goto sigbus;
+
+ lose_fpu(1);
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+
+ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
+ compute_return_epc(regs);
+ own_fpu(1);
+
+ break;
+ case 0x7:
+ die_if_kernel("Unaligned FP access in kernel code", regs);
+ BUG_ON(!used_math());
+ if (!access_ok(addr, 8))
+ goto sigbus;
+
+ lose_fpu(1);
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+
+ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
+ compute_return_epc(regs);
+ own_fpu(1);
+ break;
+
+ }
+ return NOTIFY_STOP; /* Don't call default notifier */
+
+ case CU2_SDC2_OP:
+ switch (insn.loongson3_lsdc2_format.opcode1) {
+ /*
+ * Loongson-3 overridden sdc2 instructions.
+ * opcode1 instruction
+ * 0x1 gsshx: store 2 bytes from GPR
+ * 0x2 gsswx: store 4 bytes from GPR
+ * 0x3 gssdx: store 8 bytes from GPR
+ * 0x6 gsswxc1: store 4 bytes from FPR
+ * 0x7 gssdxc1: store 8 bytes from FPR
+ */
+ case 0x1:
+ if (!access_ok(addr, 2))
+ goto sigbus;
+
+ compute_return_epc(regs);
+ value = regs->regs[insn.loongson3_lsdc2_format.rt];
+
+ StoreHW(addr, value, res);
+ if (res)
+ goto fault;
+
+ break;
+ case 0x2:
+ if (!access_ok(addr, 4))
+ goto sigbus;
+
+ compute_return_epc(regs);
+ value = regs->regs[insn.loongson3_lsdc2_format.rt];
+
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+
+ break;
+ case 0x3:
+ if (!access_ok(addr, 8))
+ goto sigbus;
+
+ compute_return_epc(regs);
+ value = regs->regs[insn.loongson3_lsdc2_format.rt];
+
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+
+ break;
+
+ case 0x6:
+ die_if_kernel("Unaligned FP access in kernel code", regs);
+ BUG_ON(!used_math());
+
+ if (!access_ok(addr, 4))
+ goto sigbus;
+
+ lose_fpu(1);
+ value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
+
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+
+ compute_return_epc(regs);
+ own_fpu(1);
+
+ break;
+ case 0x7:
+ die_if_kernel("Unaligned FP access in kernel code", regs);
+ BUG_ON(!used_math());
+
+ if (!access_ok(addr, 8))
+ goto sigbus;
+
+ lose_fpu(1);
+ value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
+
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+
+ compute_return_epc(regs);
+ own_fpu(1);
+
+ break;
+ }
+ return NOTIFY_STOP; /* Don't call default notifier */
+ }
+
+ return NOTIFY_OK; /* Let default notifier send signals */
+
+fault:
+ /* roll back jump/branch */
+ regs->regs[31] = ra;
+ regs->cp0_epc = (unsigned long)pc;
+ /* Did we have an exception handler installed? */
+ if (fixup_exception(regs))
+ return NOTIFY_STOP; /* Don't call default notifier */
+
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGSEGV);
+
+ return NOTIFY_STOP; /* Don't call default notifier */
+
+sigbus:
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGBUS);
+
+ return NOTIFY_STOP; /* Don't call default notifier */
+}
+
+static int __init loongson_cu2_setup(void)
+{
+ return cu2_notifier(loongson_cu2_call, 0);
+}
+early_initcall(loongson_cu2_setup);
diff --git a/arch/mips/loongson64/cpucfg-emul.c b/arch/mips/loongson64/cpucfg-emul.c
new file mode 100644
index 000000000..630927e46
--- /dev/null
+++ b/arch/mips/loongson64/cpucfg-emul.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/elf.h>
+
+#include <loongson_regs.h>
+#include <cpucfg-emul.h>
+
+static bool is_loongson(struct cpuinfo_mips *c)
+{
+ switch (c->processor_id & PRID_COMP_MASK) {
+ case PRID_COMP_LEGACY:
+ return ((c->processor_id & PRID_IMP_MASK) ==
+ PRID_IMP_LOONGSON_64C);
+
+ case PRID_COMP_LOONGSON:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static u32 get_loongson_fprev(struct cpuinfo_mips *c)
+{
+ return c->fpu_id & LOONGSON_FPREV_MASK;
+}
+
+static bool cpu_has_uca(void)
+{
+ u32 diag = read_c0_diag();
+ u32 new_diag;
+
+ if (diag & LOONGSON_DIAG_UCAC)
+ /* UCA is already enabled. */
+ return true;
+
+ /* See if UCAC bit can be flipped on. This should be safe. */
+ new_diag = diag | LOONGSON_DIAG_UCAC;
+ write_c0_diag(new_diag);
+ new_diag = read_c0_diag();
+ write_c0_diag(diag);
+
+ return (new_diag & LOONGSON_DIAG_UCAC) != 0;
+}
+
+static void probe_uca(struct cpuinfo_mips *c)
+{
+ if (cpu_has_uca())
+ c->loongson3_cpucfg_data[0] |= LOONGSON_CFG1_LSUCA;
+}
+
+static void decode_loongson_config6(struct cpuinfo_mips *c)
+{
+ u32 config6 = read_c0_config6();
+
+ if (config6 & LOONGSON_CONF6_SFBEN)
+ c->loongson3_cpucfg_data[0] |= LOONGSON_CFG1_SFBP;
+ if (config6 & LOONGSON_CONF6_LLEXC)
+ c->loongson3_cpucfg_data[0] |= LOONGSON_CFG1_LLEXC;
+ if (config6 & LOONGSON_CONF6_SCRAND)
+ c->loongson3_cpucfg_data[0] |= LOONGSON_CFG1_SCRAND;
+}
+
+static void patch_cpucfg_sel1(struct cpuinfo_mips *c)
+{
+ u64 ases = c->ases;
+ u64 options = c->options;
+ u32 data = c->loongson3_cpucfg_data[0];
+
+ if (options & MIPS_CPU_FPU) {
+ data |= LOONGSON_CFG1_FP;
+ data |= get_loongson_fprev(c) << LOONGSON_CFG1_FPREV_OFFSET;
+ }
+ if (ases & MIPS_ASE_LOONGSON_MMI)
+ data |= LOONGSON_CFG1_MMI;
+ if (ases & MIPS_ASE_MSA)
+ data |= LOONGSON_CFG1_MSA1;
+
+ c->loongson3_cpucfg_data[0] = data;
+}
+
+static void patch_cpucfg_sel2(struct cpuinfo_mips *c)
+{
+ u64 ases = c->ases;
+ u64 options = c->options;
+ u32 data = c->loongson3_cpucfg_data[1];
+
+ if (ases & MIPS_ASE_LOONGSON_EXT)
+ data |= LOONGSON_CFG2_LEXT1;
+ if (ases & MIPS_ASE_LOONGSON_EXT2)
+ data |= LOONGSON_CFG2_LEXT2;
+ if (options & MIPS_CPU_LDPTE)
+ data |= LOONGSON_CFG2_LSPW;
+
+ if (ases & MIPS_ASE_VZ)
+ data |= LOONGSON_CFG2_LVZP;
+ else
+ data &= ~LOONGSON_CFG2_LVZREV;
+
+ c->loongson3_cpucfg_data[1] = data;
+}
+
+static void patch_cpucfg_sel3(struct cpuinfo_mips *c)
+{
+ u64 ases = c->ases;
+ u32 data = c->loongson3_cpucfg_data[2];
+
+ if (ases & MIPS_ASE_LOONGSON_CAM) {
+ data |= LOONGSON_CFG3_LCAMP;
+ } else {
+ data &= ~LOONGSON_CFG3_LCAMREV;
+ data &= ~LOONGSON_CFG3_LCAMNUM;
+ data &= ~LOONGSON_CFG3_LCAMKW;
+ data &= ~LOONGSON_CFG3_LCAMVW;
+ }
+
+ c->loongson3_cpucfg_data[2] = data;
+}
+
+void loongson3_cpucfg_synthesize_data(struct cpuinfo_mips *c)
+{
+ /* Only engage the logic on Loongson processors. */
+ if (!is_loongson(c))
+ return;
+
+ /* CPUs with CPUCFG support don't need to synthesize anything. */
+ if (cpu_has_cfg())
+ goto have_cpucfg_now;
+
+ c->loongson3_cpucfg_data[0] = 0;
+ c->loongson3_cpucfg_data[1] = 0;
+ c->loongson3_cpucfg_data[2] = 0;
+
+ /* Add CPUCFG features non-discoverable otherwise. */
+ switch (c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) {
+ case PRID_IMP_LOONGSON_64R | PRID_REV_LOONGSON2K_R1_0:
+ case PRID_IMP_LOONGSON_64R | PRID_REV_LOONGSON2K_R1_1:
+ case PRID_IMP_LOONGSON_64R | PRID_REV_LOONGSON2K_R1_2:
+ case PRID_IMP_LOONGSON_64R | PRID_REV_LOONGSON2K_R1_3:
+ decode_loongson_config6(c);
+ probe_uca(c);
+
+ c->loongson3_cpucfg_data[0] |= (LOONGSON_CFG1_LSLDR0 |
+ LOONGSON_CFG1_LSSYNCI | LOONGSON_CFG1_LLSYNC |
+ LOONGSON_CFG1_TGTSYNC);
+ c->loongson3_cpucfg_data[1] |= (LOONGSON_CFG2_LBT1 |
+ LOONGSON_CFG2_LBT2 | LOONGSON_CFG2_LPMP |
+ LOONGSON_CFG2_LPM_REV2);
+ c->loongson3_cpucfg_data[2] = 0;
+ break;
+
+ case PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R1:
+ c->loongson3_cpucfg_data[0] |= (LOONGSON_CFG1_LSLDR0 |
+ LOONGSON_CFG1_LSSYNCI | LOONGSON_CFG1_LSUCA |
+ LOONGSON_CFG1_LLSYNC | LOONGSON_CFG1_TGTSYNC);
+ c->loongson3_cpucfg_data[1] |= (LOONGSON_CFG2_LBT1 |
+ LOONGSON_CFG2_LPMP | LOONGSON_CFG2_LPM_REV1);
+ c->loongson3_cpucfg_data[2] |= (
+ LOONGSON_CFG3_LCAM_REV1 |
+ LOONGSON_CFG3_LCAMNUM_REV1 |
+ LOONGSON_CFG3_LCAMKW_REV1 |
+ LOONGSON_CFG3_LCAMVW_REV1);
+ break;
+
+ case PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3B_R1:
+ case PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3B_R2:
+ c->loongson3_cpucfg_data[0] |= (LOONGSON_CFG1_LSLDR0 |
+ LOONGSON_CFG1_LSSYNCI | LOONGSON_CFG1_LSUCA |
+ LOONGSON_CFG1_LLSYNC | LOONGSON_CFG1_TGTSYNC);
+ c->loongson3_cpucfg_data[1] |= (LOONGSON_CFG2_LBT1 |
+ LOONGSON_CFG2_LPMP | LOONGSON_CFG2_LPM_REV1);
+ c->loongson3_cpucfg_data[2] |= (
+ LOONGSON_CFG3_LCAM_REV1 |
+ LOONGSON_CFG3_LCAMNUM_REV1 |
+ LOONGSON_CFG3_LCAMKW_REV1 |
+ LOONGSON_CFG3_LCAMVW_REV1);
+ break;
+
+ case PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0:
+ case PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_1:
+ case PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R3_0:
+ case PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R3_1:
+ decode_loongson_config6(c);
+ probe_uca(c);
+
+ c->loongson3_cpucfg_data[0] |= (LOONGSON_CFG1_CNT64 |
+ LOONGSON_CFG1_LSLDR0 | LOONGSON_CFG1_LSPREF |
+ LOONGSON_CFG1_LSPREFX | LOONGSON_CFG1_LSSYNCI |
+ LOONGSON_CFG1_LLSYNC | LOONGSON_CFG1_TGTSYNC);
+ c->loongson3_cpucfg_data[1] |= (LOONGSON_CFG2_LBT1 |
+ LOONGSON_CFG2_LBT2 | LOONGSON_CFG2_LBTMMU |
+ LOONGSON_CFG2_LPMP | LOONGSON_CFG2_LPM_REV1 |
+ LOONGSON_CFG2_LVZ_REV1);
+ c->loongson3_cpucfg_data[2] |= (LOONGSON_CFG3_LCAM_REV1 |
+ LOONGSON_CFG3_LCAMNUM_REV1 |
+ LOONGSON_CFG3_LCAMKW_REV1 |
+ LOONGSON_CFG3_LCAMVW_REV1);
+ break;
+
+ default:
+ /* It is possible that some future Loongson cores still do
+ * not have CPUCFG, so do not emulate anything for these
+ * cores.
+ */
+ return;
+ }
+
+ /* This feature is set by firmware, but all known Loongson-64 systems
+ * are configured this way.
+ */
+ c->loongson3_cpucfg_data[0] |= LOONGSON_CFG1_CDMAP;
+
+ /* Patch in dynamically probed bits. */
+ patch_cpucfg_sel1(c);
+ patch_cpucfg_sel2(c);
+ patch_cpucfg_sel3(c);
+
+have_cpucfg_now:
+ /* We have usable CPUCFG now, emulated or not.
+ * Announce CPUCFG availability to userspace via hwcap.
+ */
+ elf_hwcap |= HWCAP_LOONGSON_CPUCFG;
+}
diff --git a/arch/mips/loongson64/dma.c b/arch/mips/loongson64/dma.c
new file mode 100644
index 000000000..364f2f27c
--- /dev/null
+++ b/arch/mips/loongson64/dma.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/dma-direct.h>
+#include <linux/init.h>
+#include <linux/swiotlb.h>
+#include <boot_param.h>
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
+ * Loongson-3's 48bit address space and embed it into 40bit */
+ long nid = (paddr >> 44) & 0x3;
+
+ return ((nid << 44) ^ paddr) | (nid << node_id_offset);
+}
+
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
+ * Loongson-3's 48bit address space and embed it into 40bit */
+ long nid = (daddr >> node_id_offset) & 0x3;
+
+ return ((nid << node_id_offset) ^ daddr) | (nid << 44);
+}
+
+void __init plat_swiotlb_setup(void)
+{
+ swiotlb_init(1);
+}
diff --git a/arch/mips/loongson64/env.c b/arch/mips/loongson64/env.c
new file mode 100644
index 000000000..134cb8e9e
--- /dev/null
+++ b/arch/mips/loongson64/env.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Based on Ocelot Linux port, which is
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: jsun@mvista.com or jsun@junsun.net
+ *
+ * Copyright 2003 ICT CAS
+ * Author: Michael Guo <guoyi@ict.ac.cn>
+ *
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+#include <linux/export.h>
+#include <linux/pci_ids.h>
+#include <asm/bootinfo.h>
+#include <loongson.h>
+#include <boot_param.h>
+#include <builtin_dtbs.h>
+#include <workarounds.h>
+
+#define HOST_BRIDGE_CONFIG_ADDR ((void __iomem *)TO_UNCAC(0x1a000000))
+
+u32 cpu_clock_freq;
+EXPORT_SYMBOL(cpu_clock_freq);
+struct efi_memory_map_loongson *loongson_memmap;
+struct loongson_system_configuration loongson_sysconf;
+
+u64 loongson_chipcfg[MAX_PACKAGES] = {0xffffffffbfc00180};
+u64 loongson_chiptemp[MAX_PACKAGES];
+u64 loongson_freqctrl[MAX_PACKAGES];
+
+unsigned long long smp_group[4];
+
+const char *get_system_type(void)
+{
+ return "Generic Loongson64 System";
+}
+
+void __init prom_init_env(void)
+{
+ struct boot_params *boot_p;
+ struct loongson_params *loongson_p;
+ struct system_loongson *esys;
+ struct efi_cpuinfo_loongson *ecpu;
+ struct irq_source_routing_table *eirq_source;
+ u32 id;
+ u16 vendor, device;
+
+ /* firmware arguments are initialized in head.S */
+ boot_p = (struct boot_params *)fw_arg2;
+ loongson_p = &(boot_p->efi.smbios.lp);
+
+ esys = (struct system_loongson *)
+ ((u64)loongson_p + loongson_p->system_offset);
+ ecpu = (struct efi_cpuinfo_loongson *)
+ ((u64)loongson_p + loongson_p->cpu_offset);
+ eirq_source = (struct irq_source_routing_table *)
+ ((u64)loongson_p + loongson_p->irq_offset);
+ loongson_memmap = (struct efi_memory_map_loongson *)
+ ((u64)loongson_p + loongson_p->memory_offset);
+
+ cpu_clock_freq = ecpu->cpu_clock_freq;
+ loongson_sysconf.cputype = ecpu->cputype;
+ switch (ecpu->cputype) {
+ case Legacy_3A:
+ case Loongson_3A:
+ loongson_sysconf.cores_per_node = 4;
+ loongson_sysconf.cores_per_package = 4;
+ smp_group[0] = 0x900000003ff01000;
+ smp_group[1] = 0x900010003ff01000;
+ smp_group[2] = 0x900020003ff01000;
+ smp_group[3] = 0x900030003ff01000;
+ loongson_chipcfg[0] = 0x900000001fe00180;
+ loongson_chipcfg[1] = 0x900010001fe00180;
+ loongson_chipcfg[2] = 0x900020001fe00180;
+ loongson_chipcfg[3] = 0x900030001fe00180;
+ loongson_chiptemp[0] = 0x900000001fe0019c;
+ loongson_chiptemp[1] = 0x900010001fe0019c;
+ loongson_chiptemp[2] = 0x900020001fe0019c;
+ loongson_chiptemp[3] = 0x900030001fe0019c;
+ loongson_freqctrl[0] = 0x900000001fe001d0;
+ loongson_freqctrl[1] = 0x900010001fe001d0;
+ loongson_freqctrl[2] = 0x900020001fe001d0;
+ loongson_freqctrl[3] = 0x900030001fe001d0;
+ loongson_sysconf.ht_control_base = 0x90000EFDFB000000;
+ loongson_sysconf.workarounds = WORKAROUND_CPUFREQ;
+ break;
+ case Legacy_3B:
+ case Loongson_3B:
+ loongson_sysconf.cores_per_node = 4; /* One chip has 2 nodes */
+ loongson_sysconf.cores_per_package = 8;
+ smp_group[0] = 0x900000003ff01000;
+ smp_group[1] = 0x900010003ff05000;
+ smp_group[2] = 0x900020003ff09000;
+ smp_group[3] = 0x900030003ff0d000;
+ loongson_chipcfg[0] = 0x900000001fe00180;
+ loongson_chipcfg[1] = 0x900020001fe00180;
+ loongson_chipcfg[2] = 0x900040001fe00180;
+ loongson_chipcfg[3] = 0x900060001fe00180;
+ loongson_chiptemp[0] = 0x900000001fe0019c;
+ loongson_chiptemp[1] = 0x900020001fe0019c;
+ loongson_chiptemp[2] = 0x900040001fe0019c;
+ loongson_chiptemp[3] = 0x900060001fe0019c;
+ loongson_freqctrl[0] = 0x900000001fe001d0;
+ loongson_freqctrl[1] = 0x900020001fe001d0;
+ loongson_freqctrl[2] = 0x900040001fe001d0;
+ loongson_freqctrl[3] = 0x900060001fe001d0;
+ loongson_sysconf.ht_control_base = 0x90001EFDFB000000;
+ loongson_sysconf.workarounds = WORKAROUND_CPUHOTPLUG;
+ break;
+ default:
+ loongson_sysconf.cores_per_node = 1;
+ loongson_sysconf.cores_per_package = 1;
+ loongson_chipcfg[0] = 0x900000001fe00180;
+ }
+
+ loongson_sysconf.nr_cpus = ecpu->nr_cpus;
+ loongson_sysconf.boot_cpu_id = ecpu->cpu_startup_core_id;
+ loongson_sysconf.reserved_cpus_mask = ecpu->reserved_cores_mask;
+ if (ecpu->nr_cpus > NR_CPUS || ecpu->nr_cpus == 0)
+ loongson_sysconf.nr_cpus = NR_CPUS;
+ loongson_sysconf.nr_nodes = (loongson_sysconf.nr_cpus +
+ loongson_sysconf.cores_per_node - 1) /
+ loongson_sysconf.cores_per_node;
+
+ loongson_sysconf.pci_mem_start_addr = eirq_source->pci_mem_start_addr;
+ loongson_sysconf.pci_mem_end_addr = eirq_source->pci_mem_end_addr;
+ loongson_sysconf.pci_io_base = eirq_source->pci_io_start_addr;
+ loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits;
+ if (loongson_sysconf.dma_mask_bits < 32 ||
+ loongson_sysconf.dma_mask_bits > 64)
+ loongson_sysconf.dma_mask_bits = 32;
+
+ loongson_sysconf.restart_addr = boot_p->reset_system.ResetWarm;
+ loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown;
+ loongson_sysconf.suspend_addr = boot_p->reset_system.DoSuspend;
+
+ loongson_sysconf.vgabios_addr = boot_p->efi.smbios.vga_bios;
+ pr_debug("Shutdown Addr: %llx, Restart Addr: %llx, VBIOS Addr: %llx\n",
+ loongson_sysconf.poweroff_addr, loongson_sysconf.restart_addr,
+ loongson_sysconf.vgabios_addr);
+
+ memset(loongson_sysconf.ecname, 0, 32);
+ if (esys->has_ec)
+ memcpy(loongson_sysconf.ecname, esys->ec_name, 32);
+ loongson_sysconf.workarounds |= esys->workarounds;
+
+ loongson_sysconf.nr_uarts = esys->nr_uarts;
+ if (esys->nr_uarts < 1 || esys->nr_uarts > MAX_UARTS)
+ loongson_sysconf.nr_uarts = 1;
+ memcpy(loongson_sysconf.uarts, esys->uarts,
+ sizeof(struct uart_device) * loongson_sysconf.nr_uarts);
+
+ loongson_sysconf.nr_sensors = esys->nr_sensors;
+ if (loongson_sysconf.nr_sensors > MAX_SENSORS)
+ loongson_sysconf.nr_sensors = 0;
+ if (loongson_sysconf.nr_sensors)
+ memcpy(loongson_sysconf.sensors, esys->sensors,
+ sizeof(struct sensor_device) * loongson_sysconf.nr_sensors);
+ pr_info("CpuClock = %u\n", cpu_clock_freq);
+
+ /* Read the ID of PCI host bridge to detect bridge type */
+ id = readl(HOST_BRIDGE_CONFIG_ADDR);
+ vendor = id & 0xffff;
+ device = (id >> 16) & 0xffff;
+
+ switch (vendor) {
+ case PCI_VENDOR_ID_LOONGSON:
+ pr_info("The bridge chip is LS7A\n");
+ loongson_sysconf.bridgetype = LS7A;
+ loongson_sysconf.early_config = ls7a_early_config;
+ break;
+ case PCI_VENDOR_ID_AMD:
+ case PCI_VENDOR_ID_ATI:
+ pr_info("The bridge chip is RS780E or SR5690\n");
+ loongson_sysconf.bridgetype = RS780E;
+ loongson_sysconf.early_config = rs780e_early_config;
+ break;
+ default:
+ pr_info("The bridge chip is VIRTUAL\n");
+ loongson_sysconf.bridgetype = VIRTUAL;
+ loongson_sysconf.early_config = virtual_early_config;
+ loongson_fdt_blob = __dtb_loongson64v_4core_virtio_begin;
+ break;
+ }
+
+ if ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64C) {
+ switch (read_c0_prid() & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON3A_R1:
+ case PRID_REV_LOONGSON3A_R2_0:
+ case PRID_REV_LOONGSON3A_R2_1:
+ case PRID_REV_LOONGSON3A_R3_0:
+ case PRID_REV_LOONGSON3A_R3_1:
+ switch (loongson_sysconf.bridgetype) {
+ case LS7A:
+ loongson_fdt_blob = __dtb_loongson64c_4core_ls7a_begin;
+ break;
+ case RS780E:
+ loongson_fdt_blob = __dtb_loongson64c_4core_rs780e_begin;
+ break;
+ default:
+ break;
+ }
+ break;
+ case PRID_REV_LOONGSON3B_R1:
+ case PRID_REV_LOONGSON3B_R2:
+ if (loongson_sysconf.bridgetype == RS780E)
+ loongson_fdt_blob = __dtb_loongson64c_8core_rs780e_begin;
+ break;
+ default:
+ break;
+ }
+ } else if ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G) {
+ if (loongson_sysconf.bridgetype == LS7A)
+ loongson_fdt_blob = __dtb_loongson64g_4core_ls7a_begin;
+ }
+
+ if (!loongson_fdt_blob)
+ pr_err("Failed to determine built-in Loongson64 dtb\n");
+}
diff --git a/arch/mips/loongson64/hpet.c b/arch/mips/loongson64/hpet.c
new file mode 100644
index 000000000..e42825925
--- /dev/null
+++ b/arch/mips/loongson64/hpet.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/percpu.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+#include <asm/hpet.h>
+#include <asm/time.h>
+
+#define SMBUS_CFG_BASE (loongson_sysconf.ht_control_base + 0x0300a000)
+#define SMBUS_PCI_REG40 0x40
+#define SMBUS_PCI_REG64 0x64
+#define SMBUS_PCI_REGB4 0xb4
+
+#define HPET_MIN_CYCLES 16
+#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES * 12)
+
+static DEFINE_SPINLOCK(hpet_lock);
+DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
+
+static unsigned int smbus_read(int offset)
+{
+ return *(volatile unsigned int *)(SMBUS_CFG_BASE + offset);
+}
+
+static void smbus_write(int offset, int data)
+{
+ *(volatile unsigned int *)(SMBUS_CFG_BASE + offset) = data;
+}
+
+static void smbus_enable(int offset, int bit)
+{
+ unsigned int cfg = smbus_read(offset);
+
+ cfg |= bit;
+ smbus_write(offset, cfg);
+}
+
+static int hpet_read(int offset)
+{
+ return *(volatile unsigned int *)(HPET_MMIO_ADDR + offset);
+}
+
+static void hpet_write(int offset, int data)
+{
+ *(volatile unsigned int *)(HPET_MMIO_ADDR + offset) = data;
+}
+
+static void hpet_start_counter(void)
+{
+ unsigned int cfg = hpet_read(HPET_CFG);
+
+ cfg |= HPET_CFG_ENABLE;
+ hpet_write(HPET_CFG, cfg);
+}
+
+static void hpet_stop_counter(void)
+{
+ unsigned int cfg = hpet_read(HPET_CFG);
+
+ cfg &= ~HPET_CFG_ENABLE;
+ hpet_write(HPET_CFG, cfg);
+}
+
+static void hpet_reset_counter(void)
+{
+ hpet_write(HPET_COUNTER, 0);
+ hpet_write(HPET_COUNTER + 4, 0);
+}
+
+static void hpet_restart_counter(void)
+{
+ hpet_stop_counter();
+ hpet_reset_counter();
+ hpet_start_counter();
+}
+
+static void hpet_enable_legacy_int(void)
+{
+ /* Do nothing on Loongson-3 */
+}
+
+static int hpet_set_state_periodic(struct clock_event_device *evt)
+{
+ int cfg;
+
+ spin_lock(&hpet_lock);
+
+ pr_info("set clock event to periodic mode!\n");
+ /* stop counter */
+ hpet_stop_counter();
+
+ /* enables the timer0 to generate a periodic interrupt */
+ cfg = hpet_read(HPET_T0_CFG);
+ cfg &= ~HPET_TN_LEVEL;
+ cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
+ HPET_TN_32BIT;
+ hpet_write(HPET_T0_CFG, cfg);
+
+ /* set the comparator */
+ hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
+ udelay(1);
+ hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
+
+ /* start counter */
+ hpet_start_counter();
+
+ spin_unlock(&hpet_lock);
+ return 0;
+}
+
+static int hpet_set_state_shutdown(struct clock_event_device *evt)
+{
+ int cfg;
+
+ spin_lock(&hpet_lock);
+
+ cfg = hpet_read(HPET_T0_CFG);
+ cfg &= ~HPET_TN_ENABLE;
+ hpet_write(HPET_T0_CFG, cfg);
+
+ spin_unlock(&hpet_lock);
+ return 0;
+}
+
+static int hpet_set_state_oneshot(struct clock_event_device *evt)
+{
+ int cfg;
+
+ spin_lock(&hpet_lock);
+
+ pr_info("set clock event to one shot mode!\n");
+ cfg = hpet_read(HPET_T0_CFG);
+ /*
+ * set timer0 type
+ * 1 : periodic interrupt
+ * 0 : non-periodic(oneshot) interrupt
+ */
+ cfg &= ~HPET_TN_PERIODIC;
+ cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
+ hpet_write(HPET_T0_CFG, cfg);
+
+ spin_unlock(&hpet_lock);
+ return 0;
+}
+
+static int hpet_tick_resume(struct clock_event_device *evt)
+{
+ spin_lock(&hpet_lock);
+ hpet_enable_legacy_int();
+ spin_unlock(&hpet_lock);
+
+ return 0;
+}
+
+static int hpet_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ u32 cnt;
+ s32 res;
+
+ cnt = hpet_read(HPET_COUNTER);
+ cnt += (u32) delta;
+ hpet_write(HPET_T0_CMP, cnt);
+
+ res = (s32)(cnt - hpet_read(HPET_COUNTER));
+
+ return res < HPET_MIN_CYCLES ? -ETIME : 0;
+}
+
+static irqreturn_t hpet_irq_handler(int irq, void *data)
+{
+ int is_irq;
+ struct clock_event_device *cd;
+ unsigned int cpu = smp_processor_id();
+
+ is_irq = hpet_read(HPET_STATUS);
+ if (is_irq & HPET_T0_IRS) {
+ /* clear the TIMER0 irq status register */
+ hpet_write(HPET_STATUS, HPET_T0_IRS);
+ cd = &per_cpu(hpet_clockevent_device, cpu);
+ cd->event_handler(cd);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/*
+ * hpet address assignation and irq setting should be done in bios.
+ * but pmon don't do this, we just setup here directly.
+ * The operation under is normal. unfortunately, hpet_setup process
+ * is before pci initialize.
+ *
+ * {
+ * struct pci_dev *pdev;
+ *
+ * pdev = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
+ * pci_write_config_word(pdev, SMBUS_PCI_REGB4, HPET_ADDR);
+ *
+ * ...
+ * }
+ */
+static void hpet_setup(void)
+{
+ /* set hpet base address */
+ smbus_write(SMBUS_PCI_REGB4, HPET_ADDR);
+
+ /* enable decoding of access to HPET MMIO*/
+ smbus_enable(SMBUS_PCI_REG40, (1 << 28));
+
+ /* HPET irq enable */
+ smbus_enable(SMBUS_PCI_REG64, (1 << 10));
+
+ hpet_enable_legacy_int();
+}
+
+void __init setup_hpet_timer(void)
+{
+ unsigned long flags = IRQF_NOBALANCING | IRQF_TIMER;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+
+ hpet_setup();
+
+ cd = &per_cpu(hpet_clockevent_device, cpu);
+ cd->name = "hpet";
+ cd->rating = 100;
+ cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ cd->set_state_shutdown = hpet_set_state_shutdown;
+ cd->set_state_periodic = hpet_set_state_periodic;
+ cd->set_state_oneshot = hpet_set_state_oneshot;
+ cd->tick_resume = hpet_tick_resume;
+ cd->set_next_event = hpet_next_event;
+ cd->irq = HPET_T0_IRQ;
+ cd->cpumask = cpumask_of(cpu);
+ clockevent_set_clock(cd, HPET_FREQ);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->max_delta_ticks = 0x7fffffff;
+ cd->min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, cd);
+ cd->min_delta_ticks = HPET_MIN_PROG_DELTA;
+
+ clockevents_register_device(cd);
+ if (request_irq(HPET_T0_IRQ, hpet_irq_handler, flags, "hpet", NULL))
+ pr_err("Failed to request irq %d (hpet)\n", HPET_T0_IRQ);
+ pr_info("hpet clock event device register\n");
+}
+
+static u64 hpet_read_counter(struct clocksource *cs)
+{
+ return (u64)hpet_read(HPET_COUNTER);
+}
+
+static void hpet_suspend(struct clocksource *cs)
+{
+}
+
+static void hpet_resume(struct clocksource *cs)
+{
+ hpet_setup();
+ hpet_restart_counter();
+}
+
+static struct clocksource csrc_hpet = {
+ .name = "hpet",
+ /* mips clocksource rating is less than 300, so hpet is better. */
+ .rating = 300,
+ .read = hpet_read_counter,
+ .mask = CLOCKSOURCE_MASK(32),
+ /* oneshot mode work normal with this flag */
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .suspend = hpet_suspend,
+ .resume = hpet_resume,
+ .mult = 0,
+ .shift = 10,
+};
+
+int __init init_hpet_clocksource(void)
+{
+ csrc_hpet.mult = clocksource_hz2mult(HPET_FREQ, csrc_hpet.shift);
+ return clocksource_register_hz(&csrc_hpet, HPET_FREQ);
+}
+
+arch_initcall(init_hpet_clocksource);
diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
new file mode 100644
index 000000000..4071cc884
--- /dev/null
+++ b/arch/mips/loongson64/init.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+
+#include <linux/irqchip.h>
+#include <linux/logic_pio.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <asm/bootinfo.h>
+#include <asm/traps.h>
+#include <asm/smp-ops.h>
+#include <asm/cacheflush.h>
+#include <asm/fw/fw.h>
+
+#include <loongson.h>
+#include <boot_param.h>
+
+#define NODE_ID_OFFSET_ADDR ((void __iomem *)TO_UNCAC(0x1001041c))
+
+u32 node_id_offset;
+
+static void __init mips_nmi_setup(void)
+{
+ void *base;
+ extern char except_vec_nmi[];
+
+ base = (void *)(CAC_BASE + 0x380);
+ memcpy(base, except_vec_nmi, 0x80);
+ flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
+}
+
+void ls7a_early_config(void)
+{
+ node_id_offset = ((readl(NODE_ID_OFFSET_ADDR) >> 8) & 0x1f) + 36;
+}
+
+void rs780e_early_config(void)
+{
+ node_id_offset = 37;
+}
+
+void virtual_early_config(void)
+{
+ node_id_offset = 44;
+}
+
+void __init prom_init(void)
+{
+ fw_init_cmdline();
+ prom_init_env();
+
+ /* init base address of io space */
+ set_io_port_base(PCI_IOBASE);
+
+ loongson_sysconf.early_config();
+
+ prom_init_numa_memory();
+
+ /* Hardcode to CPU UART 0 */
+ setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE + 0x1e0), 0, 1024);
+
+ register_smp_ops(&loongson3_smp_ops);
+ board_nmi_handler_setup = mips_nmi_setup;
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
+
+static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_t hw_start,
+ resource_size_t size)
+{
+ int ret = 0;
+ struct logic_pio_hwaddr *range;
+ unsigned long vaddr;
+
+ range = kzalloc(sizeof(*range), GFP_ATOMIC);
+ if (!range)
+ return -ENOMEM;
+
+ range->fwnode = fwnode;
+ range->size = size = round_up(size, PAGE_SIZE);
+ range->hw_start = hw_start;
+ range->flags = LOGIC_PIO_CPU_MMIO;
+
+ ret = logic_pio_register_range(range);
+ if (ret) {
+ kfree(range);
+ return ret;
+ }
+
+ /* Legacy ISA must placed at the start of PCI_IOBASE */
+ if (range->io_start != 0) {
+ logic_pio_unregister_range(range);
+ kfree(range);
+ return -EINVAL;
+ }
+
+ vaddr = PCI_IOBASE + range->io_start;
+
+ ioremap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
+
+ return 0;
+}
+
+static __init void reserve_pio_range(void)
+{
+ struct device_node *np;
+
+ for_each_node_by_name(np, "isa") {
+ struct of_range range;
+ struct of_range_parser parser;
+
+ pr_info("ISA Bridge: %pOF\n", np);
+
+ if (of_range_parser_init(&parser, np)) {
+ pr_info("Failed to parse resources.\n");
+ break;
+ }
+
+ for_each_of_range(&parser, &range) {
+ switch (range.flags & IORESOURCE_TYPE_BITS) {
+ case IORESOURCE_IO:
+ pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n",
+ range.cpu_addr,
+ range.cpu_addr + range.size - 1,
+ range.bus_addr);
+ if (add_legacy_isa_io(&np->fwnode, range.cpu_addr, range.size))
+ pr_warn("Failed to reserve legacy IO in Logic PIO\n");
+ break;
+ case IORESOURCE_MEM:
+ pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx\n",
+ range.cpu_addr,
+ range.cpu_addr + range.size - 1,
+ range.bus_addr);
+ break;
+ }
+ }
+ }
+
+ /* Reserve vgabios if it comes from firmware */
+ if (loongson_sysconf.vgabios_addr)
+ memblock_reserve(virt_to_phys((void *)loongson_sysconf.vgabios_addr),
+ SZ_256K);
+}
+
+void __init arch_init_irq(void)
+{
+ reserve_pio_range();
+ irqchip_init();
+}
diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c
new file mode 100644
index 000000000..e4c461df3
--- /dev/null
+++ b/arch/mips/loongson64/numa.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2010 Loongson Inc. & Lemote Inc. &
+ * Institute of Computing Technology
+ * Author: Xiang Gao, gaoxiang@ict.ac.cn
+ * Huacai Chen, chenhc@lemote.com
+ * Xiaofu Meng, Shuangshuang Zhang
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/export.h>
+#include <linux/nodemask.h>
+#include <linux/swap.h>
+#include <linux/memblock.h>
+#include <linux/pfn.h>
+#include <linux/highmem.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/sections.h>
+#include <linux/irq.h>
+#include <asm/bootinfo.h>
+#include <asm/mc146818-time.h>
+#include <asm/time.h>
+#include <asm/wbflush.h>
+#include <boot_param.h>
+
+static struct pglist_data prealloc__node_data[MAX_NUMNODES];
+unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
+EXPORT_SYMBOL(__node_distances);
+struct pglist_data *__node_data[MAX_NUMNODES];
+EXPORT_SYMBOL(__node_data);
+
+cpumask_t __node_cpumask[MAX_NUMNODES];
+EXPORT_SYMBOL(__node_cpumask);
+
+static void enable_lpa(void)
+{
+ unsigned long value;
+
+ value = __read_32bit_c0_register($16, 3);
+ value |= 0x00000080;
+ __write_32bit_c0_register($16, 3, value);
+ value = __read_32bit_c0_register($16, 3);
+ pr_info("CP0_Config3: CP0 16.3 (0x%lx)\n", value);
+
+ value = __read_32bit_c0_register($5, 1);
+ value |= 0x20000000;
+ __write_32bit_c0_register($5, 1, value);
+ value = __read_32bit_c0_register($5, 1);
+ pr_info("CP0_PageGrain: CP0 5.1 (0x%lx)\n", value);
+}
+
+static void cpu_node_probe(void)
+{
+ int i;
+
+ nodes_clear(node_possible_map);
+ nodes_clear(node_online_map);
+ for (i = 0; i < loongson_sysconf.nr_nodes; i++) {
+ node_set_state(num_online_nodes(), N_POSSIBLE);
+ node_set_online(num_online_nodes());
+ }
+
+ pr_info("NUMA: Discovered %d cpus on %d nodes\n",
+ loongson_sysconf.nr_cpus, num_online_nodes());
+}
+
+static int __init compute_node_distance(int row, int col)
+{
+ int package_row = row * loongson_sysconf.cores_per_node /
+ loongson_sysconf.cores_per_package;
+ int package_col = col * loongson_sysconf.cores_per_node /
+ loongson_sysconf.cores_per_package;
+
+ if (col == row)
+ return LOCAL_DISTANCE;
+ else if (package_row == package_col)
+ return 40;
+ else
+ return 100;
+}
+
+static void __init init_topology_matrix(void)
+{
+ int row, col;
+
+ for (row = 0; row < MAX_NUMNODES; row++)
+ for (col = 0; col < MAX_NUMNODES; col++)
+ __node_distances[row][col] = -1;
+
+ for_each_online_node(row) {
+ for_each_online_node(col) {
+ __node_distances[row][col] =
+ compute_node_distance(row, col);
+ }
+ }
+}
+
+static void __init szmem(unsigned int node)
+{
+ u32 i, mem_type;
+ static unsigned long num_physpages;
+ u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size;
+
+ /* Parse memory information and activate */
+ for (i = 0; i < loongson_memmap->nr_map; i++) {
+ node_id = loongson_memmap->map[i].node_id;
+ if (node_id != node)
+ continue;
+
+ mem_type = loongson_memmap->map[i].mem_type;
+ mem_size = loongson_memmap->map[i].mem_size;
+ mem_start = loongson_memmap->map[i].mem_start;
+
+ switch (mem_type) {
+ case SYSTEM_RAM_LOW:
+ start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT;
+ node_psize = (mem_size << 20) >> PAGE_SHIFT;
+ end_pfn = start_pfn + node_psize;
+ num_physpages += node_psize;
+ pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
+ (u32)node_id, mem_type, mem_start, mem_size);
+ pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
+ start_pfn, end_pfn, num_physpages);
+ memblock_add_node(PFN_PHYS(start_pfn),
+ PFN_PHYS(node_psize), node);
+ break;
+ case SYSTEM_RAM_HIGH:
+ start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT;
+ node_psize = (mem_size << 20) >> PAGE_SHIFT;
+ end_pfn = start_pfn + node_psize;
+ num_physpages += node_psize;
+ pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
+ (u32)node_id, mem_type, mem_start, mem_size);
+ pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
+ start_pfn, end_pfn, num_physpages);
+ memblock_add_node(PFN_PHYS(start_pfn),
+ PFN_PHYS(node_psize), node);
+ break;
+ case SYSTEM_RAM_RESERVED:
+ pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
+ (u32)node_id, mem_type, mem_start, mem_size);
+ memblock_reserve(((node_id << 44) + mem_start),
+ mem_size << 20);
+ break;
+ }
+ }
+}
+
+static void __init node_mem_init(unsigned int node)
+{
+ unsigned long node_addrspace_offset;
+ unsigned long start_pfn, end_pfn;
+
+ node_addrspace_offset = nid_to_addrbase(node);
+ pr_info("Node%d's addrspace_offset is 0x%lx\n",
+ node, node_addrspace_offset);
+
+ get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
+ pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n",
+ node, start_pfn, end_pfn);
+
+ __node_data[node] = prealloc__node_data + node;
+
+ NODE_DATA(node)->node_start_pfn = start_pfn;
+ NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
+
+ if (node == 0) {
+ /* kernel end address */
+ unsigned long kernel_end_pfn = PFN_UP(__pa_symbol(&_end));
+
+ /* used by finalize_initrd() */
+ max_low_pfn = end_pfn;
+
+ /* Reserve the kernel text/data/bss */
+ memblock_reserve(start_pfn << PAGE_SHIFT,
+ ((kernel_end_pfn - start_pfn) << PAGE_SHIFT));
+
+ /* Reserve 0xfe000000~0xffffffff for RS780E integrated GPU */
+ if (node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT))
+ memblock_reserve((node_addrspace_offset | 0xfe000000),
+ 32 << 20);
+
+ /* Reserve pfn range 0~node[0]->node_start_pfn */
+ memblock_reserve(0, PAGE_SIZE * start_pfn);
+ }
+}
+
+static __init void prom_meminit(void)
+{
+ unsigned int node, cpu, active_cpu = 0;
+
+ cpu_node_probe();
+ init_topology_matrix();
+
+ for (node = 0; node < loongson_sysconf.nr_nodes; node++) {
+ if (node_online(node)) {
+ szmem(node);
+ node_mem_init(node);
+ cpumask_clear(&__node_cpumask[node]);
+ }
+ }
+ max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
+
+ for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
+ node = cpu / loongson_sysconf.cores_per_node;
+ if (node >= num_online_nodes())
+ node = 0;
+
+ if (loongson_sysconf.reserved_cpus_mask & (1<<cpu))
+ continue;
+
+ cpumask_set_cpu(active_cpu, &__node_cpumask[node]);
+ pr_info("NUMA: set cpumask cpu %d on node %d\n", active_cpu, node);
+
+ active_cpu++;
+ }
+}
+
+void __init paging_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES] = {0, };
+
+ pagetable_init();
+ zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
+ zones_size[ZONE_NORMAL] = max_low_pfn;
+ free_area_init(zones_size);
+}
+
+void __init mem_init(void)
+{
+ high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
+ memblock_free_all();
+ setup_zero_pages(); /* This comes from node 0 */
+ mem_init_print_info(NULL);
+}
+
+/* All PCI device belongs to logical Node-0 */
+int pcibus_to_node(struct pci_bus *bus)
+{
+ return 0;
+}
+EXPORT_SYMBOL(pcibus_to_node);
+
+void __init prom_init_numa_memory(void)
+{
+ enable_lpa();
+ prom_meminit();
+}
+EXPORT_SYMBOL(prom_init_numa_memory);
diff --git a/arch/mips/loongson64/platform.c b/arch/mips/loongson64/platform.c
new file mode 100644
index 000000000..9674ae136
--- /dev/null
+++ b/arch/mips/loongson64/platform.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ * Xiang Yu, xiangy@lemote.com
+ * Chen Huacai, chenhc@lemote.com
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <asm/bootinfo.h>
+#include <boot_param.h>
+#include <loongson_hwmon.h>
+#include <workarounds.h>
+
+static int __init loongson3_platform_init(void)
+{
+ int i;
+ struct platform_device *pdev;
+
+ if (loongson_sysconf.ecname[0] != '\0')
+ platform_device_register_simple(loongson_sysconf.ecname, -1, NULL, 0);
+
+ for (i = 0; i < loongson_sysconf.nr_sensors; i++) {
+ if (loongson_sysconf.sensors[i].type > SENSOR_FAN)
+ continue;
+
+ pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
+ if (!pdev)
+ return -ENOMEM;
+
+ pdev->name = loongson_sysconf.sensors[i].name;
+ pdev->id = loongson_sysconf.sensors[i].id;
+ pdev->dev.platform_data = &loongson_sysconf.sensors[i];
+ platform_device_register(pdev);
+ }
+
+ return 0;
+}
+
+arch_initcall(loongson3_platform_init);
diff --git a/arch/mips/loongson64/pm.c b/arch/mips/loongson64/pm.c
new file mode 100644
index 000000000..7c8556f09
--- /dev/null
+++ b/arch/mips/loongson64/pm.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * loongson-specific suspend support
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+#include <linux/suspend.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h>
+
+#include <asm/i8259.h>
+#include <asm/mipsregs.h>
+
+#include <loongson.h>
+
+static unsigned int __maybe_unused cached_master_mask; /* i8259A */
+static unsigned int __maybe_unused cached_slave_mask;
+static unsigned int __maybe_unused cached_bonito_irq_mask; /* bonito */
+
+void arch_suspend_disable_irqs(void)
+{
+ /* disable all mips events */
+ local_irq_disable();
+
+#ifdef CONFIG_I8259
+ /* disable all events of i8259A */
+ cached_slave_mask = inb(PIC_SLAVE_IMR);
+ cached_master_mask = inb(PIC_MASTER_IMR);
+
+ outb(0xff, PIC_SLAVE_IMR);
+ inb(PIC_SLAVE_IMR);
+ outb(0xff, PIC_MASTER_IMR);
+ inb(PIC_MASTER_IMR);
+#endif
+ /* disable all events of bonito */
+ cached_bonito_irq_mask = LOONGSON_INTEN;
+ LOONGSON_INTENCLR = 0xffff;
+ (void)LOONGSON_INTENCLR;
+}
+
+void arch_suspend_enable_irqs(void)
+{
+ /* enable all mips events */
+ local_irq_enable();
+#ifdef CONFIG_I8259
+ /* only enable the cached events of i8259A */
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
+ outb(cached_master_mask, PIC_MASTER_IMR);
+#endif
+ /* enable all cached events of bonito */
+ LOONGSON_INTENSET = cached_bonito_irq_mask;
+ (void)LOONGSON_INTENSET;
+}
+
+/*
+ * Setup the board-specific events for waking up loongson from wait mode
+ */
+void __weak setup_wakeup_events(void)
+{
+}
+
+void __weak mach_suspend(void)
+{
+}
+
+void __weak mach_resume(void)
+{
+}
+
+static int loongson_pm_enter(suspend_state_t state)
+{
+ mach_suspend();
+
+ mach_resume();
+
+ return 0;
+}
+
+static int loongson_pm_valid_state(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_ON:
+ case PM_SUSPEND_STANDBY:
+ case PM_SUSPEND_MEM:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static const struct platform_suspend_ops loongson_pm_ops = {
+ .valid = loongson_pm_valid_state,
+ .enter = loongson_pm_enter,
+};
+
+static int __init loongson_pm_init(void)
+{
+ suspend_set_ops(&loongson_pm_ops);
+
+ return 0;
+}
+arch_initcall(loongson_pm_init);
diff --git a/arch/mips/loongson64/reset.c b/arch/mips/loongson64/reset.c
new file mode 100644
index 000000000..3bb8a1ed9
--- /dev/null
+++ b/arch/mips/loongson64/reset.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *
+ * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Zhangjin Wu, wuzhangjin@gmail.com
+ */
+#include <linux/init.h>
+#include <linux/pm.h>
+
+#include <asm/idle.h>
+#include <asm/reboot.h>
+
+#include <loongson.h>
+#include <boot_param.h>
+
+static void loongson_restart(char *command)
+{
+
+ void (*fw_restart)(void) = (void *)loongson_sysconf.restart_addr;
+
+ fw_restart();
+ while (1) {
+ if (cpu_wait)
+ cpu_wait();
+ }
+}
+
+static void loongson_poweroff(void)
+{
+ void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
+
+ fw_poweroff();
+ while (1) {
+ if (cpu_wait)
+ cpu_wait();
+ }
+}
+
+static void loongson_halt(void)
+{
+ pr_notice("\n\n** You can safely turn off the power now **\n\n");
+ while (1) {
+ if (cpu_wait)
+ cpu_wait();
+ }
+}
+
+static int __init mips_reboot_setup(void)
+{
+ _machine_restart = loongson_restart;
+ _machine_halt = loongson_halt;
+ pm_power_off = loongson_poweroff;
+
+ return 0;
+}
+
+arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/loongson64/setup.c b/arch/mips/loongson64/setup.c
new file mode 100644
index 000000000..6fe3ffffc
--- /dev/null
+++ b/arch/mips/loongson64/setup.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ */
+#include <linux/export.h>
+#include <linux/init.h>
+
+#include <asm/wbflush.h>
+#include <asm/bootinfo.h>
+#include <linux/libfdt.h>
+#include <linux/of_fdt.h>
+
+#include <asm/prom.h>
+
+#include <loongson.h>
+
+void *loongson_fdt_blob;
+
+static void wbflush_loongson(void)
+{
+ asm(".set\tpush\n\t"
+ ".set\tnoreorder\n\t"
+ ".set mips3\n\t"
+ "sync\n\t"
+ "nop\n\t"
+ ".set\tpop\n\t"
+ ".set mips0\n\t");
+}
+
+void (*__wbflush)(void) = wbflush_loongson;
+EXPORT_SYMBOL(__wbflush);
+
+void __init plat_mem_setup(void)
+{
+ if (loongson_fdt_blob)
+ __dt_setup_arch(loongson_fdt_blob);
+}
+
+void __init device_tree_init(void)
+{
+ if (!initial_boot_params)
+ return;
+
+ unflatten_and_copy_device_tree();
+}
diff --git a/arch/mips/loongson64/smp.c b/arch/mips/loongson64/smp.c
new file mode 100644
index 000000000..e744e1bee
--- /dev/null
+++ b/arch/mips/loongson64/smp.c
@@ -0,0 +1,806 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2010, 2011, 2012, Lemote, Inc.
+ * Author: Chen Huacai, chenhc@lemote.com
+ */
+
+#include <irq.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/smp.h>
+#include <linux/cpufreq.h>
+#include <linux/kexec.h>
+#include <asm/processor.h>
+#include <asm/time.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <loongson.h>
+#include <loongson_regs.h>
+#include <workarounds.h>
+
+#include "smp.h"
+
+DEFINE_PER_CPU(int, cpu_state);
+
+#define LS_IPI_IRQ (MIPS_CPU_IRQ_BASE + 6)
+
+static void *ipi_set0_regs[16];
+static void *ipi_clear0_regs[16];
+static void *ipi_status0_regs[16];
+static void *ipi_en0_regs[16];
+static void *ipi_mailbox_buf[16];
+static uint32_t core0_c0count[NR_CPUS];
+
+/* read a 32bit value from ipi register */
+#define loongson3_ipi_read32(addr) readl(addr)
+/* read a 64bit value from ipi register */
+#define loongson3_ipi_read64(addr) readq(addr)
+/* write a 32bit value to ipi register */
+#define loongson3_ipi_write32(action, addr) \
+ do { \
+ writel(action, addr); \
+ __wbflush(); \
+ } while (0)
+/* write a 64bit value to ipi register */
+#define loongson3_ipi_write64(action, addr) \
+ do { \
+ writeq(action, addr); \
+ __wbflush(); \
+ } while (0)
+
+u32 (*ipi_read_clear)(int cpu);
+void (*ipi_write_action)(int cpu, u32 action);
+
+static u32 csr_ipi_read_clear(int cpu)
+{
+ u32 action;
+
+ /* Load the ipi register to figure out what we're supposed to do */
+ action = csr_readl(LOONGSON_CSR_IPI_STATUS);
+ /* Clear the ipi register to clear the interrupt */
+ csr_writel(action, LOONGSON_CSR_IPI_CLEAR);
+
+ return action;
+}
+
+static void csr_ipi_write_action(int cpu, u32 action)
+{
+ unsigned int irq = 0;
+
+ while ((irq = ffs(action))) {
+ uint32_t val = CSR_IPI_SEND_BLOCK;
+ val |= (irq - 1);
+ val |= (cpu << CSR_IPI_SEND_CPU_SHIFT);
+ csr_writel(val, LOONGSON_CSR_IPI_SEND);
+ action &= ~BIT(irq - 1);
+ }
+}
+
+static u32 legacy_ipi_read_clear(int cpu)
+{
+ u32 action;
+
+ /* Load the ipi register to figure out what we're supposed to do */
+ action = loongson3_ipi_read32(ipi_status0_regs[cpu_logical_map(cpu)]);
+ /* Clear the ipi register to clear the interrupt */
+ loongson3_ipi_write32(action, ipi_clear0_regs[cpu_logical_map(cpu)]);
+
+ return action;
+}
+
+static void legacy_ipi_write_action(int cpu, u32 action)
+{
+ loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu]);
+}
+
+static void csr_ipi_probe(void)
+{
+ if (cpu_has_csr() && csr_readl(LOONGSON_CSR_FEATURES) & LOONGSON_CSRF_IPI) {
+ ipi_read_clear = csr_ipi_read_clear;
+ ipi_write_action = csr_ipi_write_action;
+ } else {
+ ipi_read_clear = legacy_ipi_read_clear;
+ ipi_write_action = legacy_ipi_write_action;
+ }
+}
+
+static void ipi_set0_regs_init(void)
+{
+ ipi_set0_regs[0] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + SET0);
+ ipi_set0_regs[1] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + SET0);
+ ipi_set0_regs[2] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + SET0);
+ ipi_set0_regs[3] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + SET0);
+ ipi_set0_regs[4] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + SET0);
+ ipi_set0_regs[5] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + SET0);
+ ipi_set0_regs[6] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + SET0);
+ ipi_set0_regs[7] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + SET0);
+ ipi_set0_regs[8] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + SET0);
+ ipi_set0_regs[9] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + SET0);
+ ipi_set0_regs[10] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + SET0);
+ ipi_set0_regs[11] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + SET0);
+ ipi_set0_regs[12] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + SET0);
+ ipi_set0_regs[13] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + SET0);
+ ipi_set0_regs[14] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + SET0);
+ ipi_set0_regs[15] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + SET0);
+}
+
+static void ipi_clear0_regs_init(void)
+{
+ ipi_clear0_regs[0] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + CLEAR0);
+ ipi_clear0_regs[1] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + CLEAR0);
+ ipi_clear0_regs[2] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + CLEAR0);
+ ipi_clear0_regs[3] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + CLEAR0);
+ ipi_clear0_regs[4] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + CLEAR0);
+ ipi_clear0_regs[5] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + CLEAR0);
+ ipi_clear0_regs[6] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + CLEAR0);
+ ipi_clear0_regs[7] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + CLEAR0);
+ ipi_clear0_regs[8] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + CLEAR0);
+ ipi_clear0_regs[9] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + CLEAR0);
+ ipi_clear0_regs[10] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + CLEAR0);
+ ipi_clear0_regs[11] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + CLEAR0);
+ ipi_clear0_regs[12] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + CLEAR0);
+ ipi_clear0_regs[13] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + CLEAR0);
+ ipi_clear0_regs[14] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + CLEAR0);
+ ipi_clear0_regs[15] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + CLEAR0);
+}
+
+static void ipi_status0_regs_init(void)
+{
+ ipi_status0_regs[0] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + STATUS0);
+ ipi_status0_regs[1] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + STATUS0);
+ ipi_status0_regs[2] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + STATUS0);
+ ipi_status0_regs[3] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + STATUS0);
+ ipi_status0_regs[4] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + STATUS0);
+ ipi_status0_regs[5] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + STATUS0);
+ ipi_status0_regs[6] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + STATUS0);
+ ipi_status0_regs[7] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + STATUS0);
+ ipi_status0_regs[8] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + STATUS0);
+ ipi_status0_regs[9] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + STATUS0);
+ ipi_status0_regs[10] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + STATUS0);
+ ipi_status0_regs[11] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + STATUS0);
+ ipi_status0_regs[12] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + STATUS0);
+ ipi_status0_regs[13] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + STATUS0);
+ ipi_status0_regs[14] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + STATUS0);
+ ipi_status0_regs[15] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + STATUS0);
+}
+
+static void ipi_en0_regs_init(void)
+{
+ ipi_en0_regs[0] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + EN0);
+ ipi_en0_regs[1] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + EN0);
+ ipi_en0_regs[2] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + EN0);
+ ipi_en0_regs[3] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + EN0);
+ ipi_en0_regs[4] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + EN0);
+ ipi_en0_regs[5] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + EN0);
+ ipi_en0_regs[6] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + EN0);
+ ipi_en0_regs[7] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + EN0);
+ ipi_en0_regs[8] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + EN0);
+ ipi_en0_regs[9] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + EN0);
+ ipi_en0_regs[10] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + EN0);
+ ipi_en0_regs[11] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + EN0);
+ ipi_en0_regs[12] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + EN0);
+ ipi_en0_regs[13] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + EN0);
+ ipi_en0_regs[14] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + EN0);
+ ipi_en0_regs[15] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + EN0);
+}
+
+static void ipi_mailbox_buf_init(void)
+{
+ ipi_mailbox_buf[0] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + BUF);
+ ipi_mailbox_buf[1] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + BUF);
+ ipi_mailbox_buf[2] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + BUF);
+ ipi_mailbox_buf[3] = (void *)
+ (SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + BUF);
+ ipi_mailbox_buf[4] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + BUF);
+ ipi_mailbox_buf[5] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + BUF);
+ ipi_mailbox_buf[6] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + BUF);
+ ipi_mailbox_buf[7] = (void *)
+ (SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + BUF);
+ ipi_mailbox_buf[8] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + BUF);
+ ipi_mailbox_buf[9] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + BUF);
+ ipi_mailbox_buf[10] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + BUF);
+ ipi_mailbox_buf[11] = (void *)
+ (SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + BUF);
+ ipi_mailbox_buf[12] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + BUF);
+ ipi_mailbox_buf[13] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + BUF);
+ ipi_mailbox_buf[14] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + BUF);
+ ipi_mailbox_buf[15] = (void *)
+ (SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + BUF);
+}
+
+/*
+ * Simple enough, just poke the appropriate ipi register
+ */
+static void loongson3_send_ipi_single(int cpu, unsigned int action)
+{
+ ipi_write_action(cpu_logical_map(cpu), (u32)action);
+}
+
+static void
+loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ ipi_write_action(cpu_logical_map(i), (u32)action);
+}
+
+
+static irqreturn_t loongson3_ipi_interrupt(int irq, void *dev_id)
+{
+ int i, cpu = smp_processor_id();
+ unsigned int action, c0count;
+
+ action = ipi_read_clear(cpu);
+
+ if (action & SMP_RESCHEDULE_YOURSELF)
+ scheduler_ipi();
+
+ if (action & SMP_CALL_FUNCTION) {
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
+ }
+
+ if (action & SMP_ASK_C0COUNT) {
+ BUG_ON(cpu != 0);
+ c0count = read_c0_count();
+ c0count = c0count ? c0count : 1;
+ for (i = 1; i < nr_cpu_ids; i++)
+ core0_c0count[i] = c0count;
+ __wbflush(); /* Let others see the result ASAP */
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define MAX_LOOPS 800
+/*
+ * SMP init and finish on secondary CPUs
+ */
+static void loongson3_init_secondary(void)
+{
+ int i;
+ uint32_t initcount;
+ unsigned int cpu = smp_processor_id();
+ unsigned int imask = STATUSF_IP7 | STATUSF_IP6 |
+ STATUSF_IP3 | STATUSF_IP2;
+
+ /* Set interrupt mask, but don't enable */
+ change_c0_status(ST0_IM, imask);
+
+ for (i = 0; i < num_possible_cpus(); i++)
+ loongson3_ipi_write32(0xffffffff, ipi_en0_regs[cpu_logical_map(i)]);
+
+ per_cpu(cpu_state, cpu) = CPU_ONLINE;
+ cpu_set_core(&cpu_data[cpu],
+ cpu_logical_map(cpu) % loongson_sysconf.cores_per_package);
+ cpu_data[cpu].package =
+ cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
+
+ i = 0;
+ core0_c0count[cpu] = 0;
+ loongson3_send_ipi_single(0, SMP_ASK_C0COUNT);
+ while (!core0_c0count[cpu]) {
+ i++;
+ cpu_relax();
+ }
+
+ if (i > MAX_LOOPS)
+ i = MAX_LOOPS;
+ if (cpu_data[cpu].package)
+ initcount = core0_c0count[cpu] + i;
+ else /* Local access is faster for loops */
+ initcount = core0_c0count[cpu] + i/2;
+
+ write_c0_count(initcount);
+}
+
+static void loongson3_smp_finish(void)
+{
+ int cpu = smp_processor_id();
+
+ write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
+ local_irq_enable();
+ loongson3_ipi_write64(0,
+ ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x0);
+ pr_info("CPU#%d finished, CP0_ST=%x\n",
+ smp_processor_id(), read_c0_status());
+}
+
+static void __init loongson3_smp_setup(void)
+{
+ int i = 0, num = 0; /* i: physical id, num: logical id */
+
+ init_cpu_possible(cpu_none_mask);
+
+ /* For unified kernel, NR_CPUS is the maximum possible value,
+ * loongson_sysconf.nr_cpus is the really present value */
+ while (i < loongson_sysconf.nr_cpus) {
+ if (loongson_sysconf.reserved_cpus_mask & (1<<i)) {
+ /* Reserved physical CPU cores */
+ __cpu_number_map[i] = -1;
+ } else {
+ __cpu_number_map[i] = num;
+ __cpu_logical_map[num] = i;
+ set_cpu_possible(num, true);
+ num++;
+ }
+ i++;
+ }
+ pr_info("Detected %i available CPU(s)\n", num);
+
+ while (num < loongson_sysconf.nr_cpus) {
+ __cpu_logical_map[num] = -1;
+ num++;
+ }
+
+ csr_ipi_probe();
+ ipi_set0_regs_init();
+ ipi_clear0_regs_init();
+ ipi_status0_regs_init();
+ ipi_en0_regs_init();
+ ipi_mailbox_buf_init();
+ cpu_set_core(&cpu_data[0],
+ cpu_logical_map(0) % loongson_sysconf.cores_per_package);
+ cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
+}
+
+static void __init loongson3_prepare_cpus(unsigned int max_cpus)
+{
+ if (request_irq(LS_IPI_IRQ, loongson3_ipi_interrupt,
+ IRQF_PERCPU | IRQF_NO_SUSPEND, "SMP_IPI", NULL))
+ pr_err("Failed to request IPI IRQ\n");
+ init_cpu_present(cpu_possible_mask);
+ per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+}
+
+/*
+ * Setup the PC, SP, and GP of a secondary processor and start it runing!
+ */
+static int loongson3_boot_secondary(int cpu, struct task_struct *idle)
+{
+ unsigned long startargs[4];
+
+ pr_info("Booting CPU#%d...\n", cpu);
+
+ /* startargs[] are initial PC, SP and GP for secondary CPU */
+ startargs[0] = (unsigned long)&smp_bootstrap;
+ startargs[1] = (unsigned long)__KSTK_TOS(idle);
+ startargs[2] = (unsigned long)task_thread_info(idle);
+ startargs[3] = 0;
+
+ pr_debug("CPU#%d, func_pc=%lx, sp=%lx, gp=%lx\n",
+ cpu, startargs[0], startargs[1], startargs[2]);
+
+ loongson3_ipi_write64(startargs[3],
+ ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x18);
+ loongson3_ipi_write64(startargs[2],
+ ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x10);
+ loongson3_ipi_write64(startargs[1],
+ ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x8);
+ loongson3_ipi_write64(startargs[0],
+ ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x0);
+ return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int loongson3_cpu_disable(void)
+{
+ unsigned long flags;
+ unsigned int cpu = smp_processor_id();
+
+ if (cpu == 0)
+ return -EBUSY;
+
+ set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
+ local_irq_save(flags);
+ irq_cpu_offline();
+ clear_c0_status(ST0_IM);
+ local_irq_restore(flags);
+ local_flush_tlb_all();
+
+ return 0;
+}
+
+
+static void loongson3_cpu_die(unsigned int cpu)
+{
+ while (per_cpu(cpu_state, cpu) != CPU_DEAD)
+ cpu_relax();
+
+ mb();
+}
+
+/* To shutdown a core in Loongson 3, the target core should go to CKSEG1 and
+ * flush all L1 entries at first. Then, another core (usually Core 0) can
+ * safely disable the clock of the target core. loongson3_play_dead() is
+ * called via CKSEG1 (uncached and unmmaped) */
+static void loongson3_type1_play_dead(int *state_addr)
+{
+ register int val;
+ register long cpuid, core, node, count;
+ register void *addr, *base, *initfunc;
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder \n"
+ " li %[addr], 0x80000000 \n" /* KSEG0 */
+ "1: cache 0, 0(%[addr]) \n" /* flush L1 ICache */
+ " cache 0, 1(%[addr]) \n"
+ " cache 0, 2(%[addr]) \n"
+ " cache 0, 3(%[addr]) \n"
+ " cache 1, 0(%[addr]) \n" /* flush L1 DCache */
+ " cache 1, 1(%[addr]) \n"
+ " cache 1, 2(%[addr]) \n"
+ " cache 1, 3(%[addr]) \n"
+ " addiu %[sets], %[sets], -1 \n"
+ " bnez %[sets], 1b \n"
+ " addiu %[addr], %[addr], 0x20 \n"
+ " li %[val], 0x7 \n" /* *state_addr = CPU_DEAD; */
+ " sw %[val], (%[state_addr]) \n"
+ " sync \n"
+ " cache 21, (%[state_addr]) \n" /* flush entry of *state_addr */
+ " .set pop \n"
+ : [addr] "=&r" (addr), [val] "=&r" (val)
+ : [state_addr] "r" (state_addr),
+ [sets] "r" (cpu_data[smp_processor_id()].dcache.sets));
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder \n"
+ " .set mips64 \n"
+ " mfc0 %[cpuid], $15, 1 \n"
+ " andi %[cpuid], 0x3ff \n"
+ " dli %[base], 0x900000003ff01000 \n"
+ " andi %[core], %[cpuid], 0x3 \n"
+ " sll %[core], 8 \n" /* get core id */
+ " or %[base], %[base], %[core] \n"
+ " andi %[node], %[cpuid], 0xc \n"
+ " dsll %[node], 42 \n" /* get node id */
+ " or %[base], %[base], %[node] \n"
+ "1: li %[count], 0x100 \n" /* wait for init loop */
+ "2: bnez %[count], 2b \n" /* limit mailbox access */
+ " addiu %[count], -1 \n"
+ " ld %[initfunc], 0x20(%[base]) \n" /* get PC via mailbox */
+ " beqz %[initfunc], 1b \n"
+ " nop \n"
+ " ld $sp, 0x28(%[base]) \n" /* get SP via mailbox */
+ " ld $gp, 0x30(%[base]) \n" /* get GP via mailbox */
+ " ld $a1, 0x38(%[base]) \n"
+ " jr %[initfunc] \n" /* jump to initial PC */
+ " nop \n"
+ " .set pop \n"
+ : [core] "=&r" (core), [node] "=&r" (node),
+ [base] "=&r" (base), [cpuid] "=&r" (cpuid),
+ [count] "=&r" (count), [initfunc] "=&r" (initfunc)
+ : /* No Input */
+ : "a1");
+}
+
+static void loongson3_type2_play_dead(int *state_addr)
+{
+ register int val;
+ register long cpuid, core, node, count;
+ register void *addr, *base, *initfunc;
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder \n"
+ " li %[addr], 0x80000000 \n" /* KSEG0 */
+ "1: cache 0, 0(%[addr]) \n" /* flush L1 ICache */
+ " cache 0, 1(%[addr]) \n"
+ " cache 0, 2(%[addr]) \n"
+ " cache 0, 3(%[addr]) \n"
+ " cache 1, 0(%[addr]) \n" /* flush L1 DCache */
+ " cache 1, 1(%[addr]) \n"
+ " cache 1, 2(%[addr]) \n"
+ " cache 1, 3(%[addr]) \n"
+ " addiu %[sets], %[sets], -1 \n"
+ " bnez %[sets], 1b \n"
+ " addiu %[addr], %[addr], 0x20 \n"
+ " li %[val], 0x7 \n" /* *state_addr = CPU_DEAD; */
+ " sw %[val], (%[state_addr]) \n"
+ " sync \n"
+ " cache 21, (%[state_addr]) \n" /* flush entry of *state_addr */
+ " .set pop \n"
+ : [addr] "=&r" (addr), [val] "=&r" (val)
+ : [state_addr] "r" (state_addr),
+ [sets] "r" (cpu_data[smp_processor_id()].dcache.sets));
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder \n"
+ " .set mips64 \n"
+ " mfc0 %[cpuid], $15, 1 \n"
+ " andi %[cpuid], 0x3ff \n"
+ " dli %[base], 0x900000003ff01000 \n"
+ " andi %[core], %[cpuid], 0x3 \n"
+ " sll %[core], 8 \n" /* get core id */
+ " or %[base], %[base], %[core] \n"
+ " andi %[node], %[cpuid], 0xc \n"
+ " dsll %[node], 42 \n" /* get node id */
+ " or %[base], %[base], %[node] \n"
+ " dsrl %[node], 30 \n" /* 15:14 */
+ " or %[base], %[base], %[node] \n"
+ "1: li %[count], 0x100 \n" /* wait for init loop */
+ "2: bnez %[count], 2b \n" /* limit mailbox access */
+ " addiu %[count], -1 \n"
+ " ld %[initfunc], 0x20(%[base]) \n" /* get PC via mailbox */
+ " beqz %[initfunc], 1b \n"
+ " nop \n"
+ " ld $sp, 0x28(%[base]) \n" /* get SP via mailbox */
+ " ld $gp, 0x30(%[base]) \n" /* get GP via mailbox */
+ " ld $a1, 0x38(%[base]) \n"
+ " jr %[initfunc] \n" /* jump to initial PC */
+ " nop \n"
+ " .set pop \n"
+ : [core] "=&r" (core), [node] "=&r" (node),
+ [base] "=&r" (base), [cpuid] "=&r" (cpuid),
+ [count] "=&r" (count), [initfunc] "=&r" (initfunc)
+ : /* No Input */
+ : "a1");
+}
+
+static void loongson3_type3_play_dead(int *state_addr)
+{
+ register int val;
+ register long cpuid, core, node, count;
+ register void *addr, *base, *initfunc;
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder \n"
+ " li %[addr], 0x80000000 \n" /* KSEG0 */
+ "1: cache 0, 0(%[addr]) \n" /* flush L1 ICache */
+ " cache 0, 1(%[addr]) \n"
+ " cache 0, 2(%[addr]) \n"
+ " cache 0, 3(%[addr]) \n"
+ " cache 1, 0(%[addr]) \n" /* flush L1 DCache */
+ " cache 1, 1(%[addr]) \n"
+ " cache 1, 2(%[addr]) \n"
+ " cache 1, 3(%[addr]) \n"
+ " addiu %[sets], %[sets], -1 \n"
+ " bnez %[sets], 1b \n"
+ " addiu %[addr], %[addr], 0x40 \n"
+ " li %[addr], 0x80000000 \n" /* KSEG0 */
+ "2: cache 2, 0(%[addr]) \n" /* flush L1 VCache */
+ " cache 2, 1(%[addr]) \n"
+ " cache 2, 2(%[addr]) \n"
+ " cache 2, 3(%[addr]) \n"
+ " cache 2, 4(%[addr]) \n"
+ " cache 2, 5(%[addr]) \n"
+ " cache 2, 6(%[addr]) \n"
+ " cache 2, 7(%[addr]) \n"
+ " cache 2, 8(%[addr]) \n"
+ " cache 2, 9(%[addr]) \n"
+ " cache 2, 10(%[addr]) \n"
+ " cache 2, 11(%[addr]) \n"
+ " cache 2, 12(%[addr]) \n"
+ " cache 2, 13(%[addr]) \n"
+ " cache 2, 14(%[addr]) \n"
+ " cache 2, 15(%[addr]) \n"
+ " addiu %[vsets], %[vsets], -1 \n"
+ " bnez %[vsets], 2b \n"
+ " addiu %[addr], %[addr], 0x40 \n"
+ " li %[val], 0x7 \n" /* *state_addr = CPU_DEAD; */
+ " sw %[val], (%[state_addr]) \n"
+ " sync \n"
+ " cache 21, (%[state_addr]) \n" /* flush entry of *state_addr */
+ " .set pop \n"
+ : [addr] "=&r" (addr), [val] "=&r" (val)
+ : [state_addr] "r" (state_addr),
+ [sets] "r" (cpu_data[smp_processor_id()].dcache.sets),
+ [vsets] "r" (cpu_data[smp_processor_id()].vcache.sets));
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder \n"
+ " .set mips64 \n"
+ " mfc0 %[cpuid], $15, 1 \n"
+ " andi %[cpuid], 0x3ff \n"
+ " dli %[base], 0x900000003ff01000 \n"
+ " andi %[core], %[cpuid], 0x3 \n"
+ " sll %[core], 8 \n" /* get core id */
+ " or %[base], %[base], %[core] \n"
+ " andi %[node], %[cpuid], 0xc \n"
+ " dsll %[node], 42 \n" /* get node id */
+ " or %[base], %[base], %[node] \n"
+ "1: li %[count], 0x100 \n" /* wait for init loop */
+ "2: bnez %[count], 2b \n" /* limit mailbox access */
+ " addiu %[count], -1 \n"
+ " ld %[initfunc], 0x20(%[base]) \n" /* get PC via mailbox */
+ " beqz %[initfunc], 1b \n"
+ " nop \n"
+ " ld $sp, 0x28(%[base]) \n" /* get SP via mailbox */
+ " ld $gp, 0x30(%[base]) \n" /* get GP via mailbox */
+ " ld $a1, 0x38(%[base]) \n"
+ " jr %[initfunc] \n" /* jump to initial PC */
+ " nop \n"
+ " .set pop \n"
+ : [core] "=&r" (core), [node] "=&r" (node),
+ [base] "=&r" (base), [cpuid] "=&r" (cpuid),
+ [count] "=&r" (count), [initfunc] "=&r" (initfunc)
+ : /* No Input */
+ : "a1");
+}
+
+void play_dead(void)
+{
+ int prid_imp, prid_rev, *state_addr;
+ unsigned int cpu = smp_processor_id();
+ void (*play_dead_at_ckseg1)(int *);
+
+ idle_task_exit();
+
+ prid_imp = read_c0_prid() & PRID_IMP_MASK;
+ prid_rev = read_c0_prid() & PRID_REV_MASK;
+
+ if (prid_imp == PRID_IMP_LOONGSON_64G) {
+ play_dead_at_ckseg1 =
+ (void *)CKSEG1ADDR((unsigned long)loongson3_type3_play_dead);
+ goto out;
+ }
+
+ switch (prid_rev) {
+ case PRID_REV_LOONGSON3A_R1:
+ default:
+ play_dead_at_ckseg1 =
+ (void *)CKSEG1ADDR((unsigned long)loongson3_type1_play_dead);
+ break;
+ case PRID_REV_LOONGSON3B_R1:
+ case PRID_REV_LOONGSON3B_R2:
+ play_dead_at_ckseg1 =
+ (void *)CKSEG1ADDR((unsigned long)loongson3_type2_play_dead);
+ break;
+ case PRID_REV_LOONGSON3A_R2_0:
+ case PRID_REV_LOONGSON3A_R2_1:
+ case PRID_REV_LOONGSON3A_R3_0:
+ case PRID_REV_LOONGSON3A_R3_1:
+ play_dead_at_ckseg1 =
+ (void *)CKSEG1ADDR((unsigned long)loongson3_type3_play_dead);
+ break;
+ }
+
+out:
+ state_addr = &per_cpu(cpu_state, cpu);
+ mb();
+ play_dead_at_ckseg1(state_addr);
+}
+
+static int loongson3_disable_clock(unsigned int cpu)
+{
+ uint64_t core_id = cpu_core(&cpu_data[cpu]);
+ uint64_t package_id = cpu_data[cpu].package;
+
+ if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
+ LOONGSON_CHIPCFG(package_id) &= ~(1 << (12 + core_id));
+ } else {
+ if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
+ LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3));
+ }
+ return 0;
+}
+
+static int loongson3_enable_clock(unsigned int cpu)
+{
+ uint64_t core_id = cpu_core(&cpu_data[cpu]);
+ uint64_t package_id = cpu_data[cpu].package;
+
+ if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
+ LOONGSON_CHIPCFG(package_id) |= 1 << (12 + core_id);
+ } else {
+ if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
+ LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3);
+ }
+ return 0;
+}
+
+static int register_loongson3_notifier(void)
+{
+ return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE,
+ "mips/loongson:prepare",
+ loongson3_enable_clock,
+ loongson3_disable_clock);
+}
+early_initcall(register_loongson3_notifier);
+
+#endif
+
+const struct plat_smp_ops loongson3_smp_ops = {
+ .send_ipi_single = loongson3_send_ipi_single,
+ .send_ipi_mask = loongson3_send_ipi_mask,
+ .init_secondary = loongson3_init_secondary,
+ .smp_finish = loongson3_smp_finish,
+ .boot_secondary = loongson3_boot_secondary,
+ .smp_setup = loongson3_smp_setup,
+ .prepare_cpus = loongson3_prepare_cpus,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = loongson3_cpu_disable,
+ .cpu_die = loongson3_cpu_die,
+#endif
+#ifdef CONFIG_KEXEC
+ .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
+#endif
+};
diff --git a/arch/mips/loongson64/smp.h b/arch/mips/loongson64/smp.h
new file mode 100644
index 000000000..957bde81e
--- /dev/null
+++ b/arch/mips/loongson64/smp.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LOONGSON_SMP_H_
+#define __LOONGSON_SMP_H_
+
+/* for Loongson-3 smp support */
+extern unsigned long long smp_group[4];
+
+/* 4 groups(nodes) in maximum in numa case */
+#define SMP_CORE_GROUP0_BASE (smp_group[0])
+#define SMP_CORE_GROUP1_BASE (smp_group[1])
+#define SMP_CORE_GROUP2_BASE (smp_group[2])
+#define SMP_CORE_GROUP3_BASE (smp_group[3])
+
+/* 4 cores in each group(node) */
+#define SMP_CORE0_OFFSET 0x000
+#define SMP_CORE1_OFFSET 0x100
+#define SMP_CORE2_OFFSET 0x200
+#define SMP_CORE3_OFFSET 0x300
+
+/* ipi registers offsets */
+#define STATUS0 0x00
+#define EN0 0x04
+#define SET0 0x08
+#define CLEAR0 0x0c
+#define STATUS1 0x10
+#define MASK1 0x14
+#define SET1 0x18
+#define CLEAR1 0x1c
+#define BUF 0x20
+
+#endif
diff --git a/arch/mips/loongson64/time.c b/arch/mips/loongson64/time.c
new file mode 100644
index 000000000..91e842b58
--- /dev/null
+++ b/arch/mips/loongson64/time.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
+
+#include <asm/time.h>
+#include <asm/hpet.h>
+
+#include <loongson.h>
+
+void __init plat_time_init(void)
+{
+ /* setup mips r4k timer */
+ mips_hpt_frequency = cpu_clock_freq / 2;
+
+#ifdef CONFIG_RS780_HPET
+ setup_hpet_timer();
+#endif
+}
diff --git a/arch/mips/loongson64/vbios_quirk.c b/arch/mips/loongson64/vbios_quirk.c
new file mode 100644
index 000000000..9a29e94d3
--- /dev/null
+++ b/arch/mips/loongson64/vbios_quirk.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/pci.h>
+#include <loongson.h>
+
+static void pci_fixup_radeon(struct pci_dev *pdev)
+{
+ struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
+
+ if (res->start)
+ return;
+
+ if (!loongson_sysconf.vgabios_addr)
+ return;
+
+ pci_disable_rom(pdev);
+ if (res->parent)
+ release_resource(res);
+
+ res->start = virt_to_phys((void *) loongson_sysconf.vgabios_addr);
+ res->end = res->start + 256*1024 - 1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW |
+ IORESOURCE_PCI_FIXED;
+
+ dev_info(&pdev->dev, "BAR %d: assigned %pR for Radeon ROM\n",
+ PCI_ROM_RESOURCE, res);
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, 0x9615,
+ PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_radeon);
diff --git a/arch/mips/math-emu/Makefile b/arch/mips/math-emu/Makefile
new file mode 100644
index 000000000..81d25ff32
--- /dev/null
+++ b/arch/mips/math-emu/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux/MIPS kernel FPU emulation.
+#
+
+obj-y += cp1emu.o ieee754dp.o ieee754sp.o ieee754.o \
+ dp_div.o dp_mul.o dp_sub.o dp_add.o dp_fsp.o dp_cmp.o dp_simple.o \
+ dp_tint.o dp_fint.o dp_rint.o dp_maddf.o dp_2008class.o dp_fmin.o \
+ dp_fmax.o \
+ sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_simple.o \
+ sp_tint.o sp_fint.o sp_rint.o sp_maddf.o sp_2008class.o sp_fmin.o \
+ sp_fmax.o \
+ dsemul.o
+
+lib-y += ieee754d.o \
+ dp_tlong.o dp_flong.o dp_sqrt.o \
+ sp_tlong.o sp_flong.o sp_sqrt.o
+
+obj-$(CONFIG_DEBUG_FS) += me-debugfs.o
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
new file mode 100644
index 000000000..587cf1d11
--- /dev/null
+++ b/arch/mips/math-emu/cp1emu.c
@@ -0,0 +1,2948 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * cp1emu.c: a MIPS coprocessor 1 (FPU) instruction emulator
+ *
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ *
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ *
+ * A complete emulator for MIPS coprocessor 1 instructions. This is
+ * required for #float(switch) or #float(trap), where it catches all
+ * COP1 instructions via the "CoProcessor Unusable" exception.
+ *
+ * More surprisingly it is also required for #float(ieee), to help out
+ * the hardware FPU at the boundaries of the IEEE-754 representation
+ * (denormalised values, infinities, underflow, etc). It is made
+ * quite nasty because emulation of some non-COP1 instructions is
+ * required, e.g. in branch delay slots.
+ *
+ * Note if you know that you won't have an FPU, then you'll get much
+ * better performance by compiling with -msoft-float!
+ */
+#include <linux/sched.h>
+#include <linux/debugfs.h>
+#include <linux/percpu-defs.h>
+#include <linux/perf_event.h>
+
+#include <asm/branch.h>
+#include <asm/inst.h>
+#include <asm/ptrace.h>
+#include <asm/signal.h>
+#include <linux/uaccess.h>
+
+#include <asm/cpu-info.h>
+#include <asm/processor.h>
+#include <asm/fpu_emulator.h>
+#include <asm/fpu.h>
+#include <asm/mips-r2-to-r6-emul.h>
+
+#include "ieee754.h"
+
+/* Function which emulates a floating point instruction. */
+
+static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *,
+ mips_instruction);
+
+static int fpux_emu(struct pt_regs *,
+ struct mips_fpu_struct *, mips_instruction, void __user **);
+
+/* Control registers */
+
+#define FPCREG_RID 0 /* $0 = revision id */
+#define FPCREG_FCCR 25 /* $25 = fccr */
+#define FPCREG_FEXR 26 /* $26 = fexr */
+#define FPCREG_FENR 28 /* $28 = fenr */
+#define FPCREG_CSR 31 /* $31 = csr */
+
+/* convert condition code register number to csr bit */
+const unsigned int fpucondbit[8] = {
+ FPU_CSR_COND,
+ FPU_CSR_COND1,
+ FPU_CSR_COND2,
+ FPU_CSR_COND3,
+ FPU_CSR_COND4,
+ FPU_CSR_COND5,
+ FPU_CSR_COND6,
+ FPU_CSR_COND7
+};
+
+/* (microMIPS) Convert certain microMIPS instructions to MIPS32 format. */
+static const int sd_format[] = {16, 17, 0, 0, 0, 0, 0, 0};
+static const int sdps_format[] = {16, 17, 22, 0, 0, 0, 0, 0};
+static const int dwl_format[] = {17, 20, 21, 0, 0, 0, 0, 0};
+static const int swl_format[] = {16, 20, 21, 0, 0, 0, 0, 0};
+
+/*
+ * This functions translates a 32-bit microMIPS instruction
+ * into a 32-bit MIPS32 instruction. Returns 0 on success
+ * and SIGILL otherwise.
+ */
+static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr)
+{
+ union mips_instruction insn = *insn_ptr;
+ union mips_instruction mips32_insn = insn;
+ int func, fmt, op;
+
+ switch (insn.mm_i_format.opcode) {
+ case mm_ldc132_op:
+ mips32_insn.mm_i_format.opcode = ldc1_op;
+ mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+ mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+ break;
+ case mm_lwc132_op:
+ mips32_insn.mm_i_format.opcode = lwc1_op;
+ mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+ mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+ break;
+ case mm_sdc132_op:
+ mips32_insn.mm_i_format.opcode = sdc1_op;
+ mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+ mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+ break;
+ case mm_swc132_op:
+ mips32_insn.mm_i_format.opcode = swc1_op;
+ mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+ mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+ break;
+ case mm_pool32i_op:
+ /* NOTE: offset is << by 1 if in microMIPS mode. */
+ if ((insn.mm_i_format.rt == mm_bc1f_op) ||
+ (insn.mm_i_format.rt == mm_bc1t_op)) {
+ mips32_insn.fb_format.opcode = cop1_op;
+ mips32_insn.fb_format.bc = bc_op;
+ mips32_insn.fb_format.flag =
+ (insn.mm_i_format.rt == mm_bc1t_op) ? 1 : 0;
+ } else
+ return SIGILL;
+ break;
+ case mm_pool32f_op:
+ switch (insn.mm_fp0_format.func) {
+ case mm_32f_01_op:
+ case mm_32f_11_op:
+ case mm_32f_02_op:
+ case mm_32f_12_op:
+ case mm_32f_41_op:
+ case mm_32f_51_op:
+ case mm_32f_42_op:
+ case mm_32f_52_op:
+ op = insn.mm_fp0_format.func;
+ if (op == mm_32f_01_op)
+ func = madd_s_op;
+ else if (op == mm_32f_11_op)
+ func = madd_d_op;
+ else if (op == mm_32f_02_op)
+ func = nmadd_s_op;
+ else if (op == mm_32f_12_op)
+ func = nmadd_d_op;
+ else if (op == mm_32f_41_op)
+ func = msub_s_op;
+ else if (op == mm_32f_51_op)
+ func = msub_d_op;
+ else if (op == mm_32f_42_op)
+ func = nmsub_s_op;
+ else
+ func = nmsub_d_op;
+ mips32_insn.fp6_format.opcode = cop1x_op;
+ mips32_insn.fp6_format.fr = insn.mm_fp6_format.fr;
+ mips32_insn.fp6_format.ft = insn.mm_fp6_format.ft;
+ mips32_insn.fp6_format.fs = insn.mm_fp6_format.fs;
+ mips32_insn.fp6_format.fd = insn.mm_fp6_format.fd;
+ mips32_insn.fp6_format.func = func;
+ break;
+ case mm_32f_10_op:
+ func = -1; /* Invalid */
+ op = insn.mm_fp5_format.op & 0x7;
+ if (op == mm_ldxc1_op)
+ func = ldxc1_op;
+ else if (op == mm_sdxc1_op)
+ func = sdxc1_op;
+ else if (op == mm_lwxc1_op)
+ func = lwxc1_op;
+ else if (op == mm_swxc1_op)
+ func = swxc1_op;
+
+ if (func != -1) {
+ mips32_insn.r_format.opcode = cop1x_op;
+ mips32_insn.r_format.rs =
+ insn.mm_fp5_format.base;
+ mips32_insn.r_format.rt =
+ insn.mm_fp5_format.index;
+ mips32_insn.r_format.rd = 0;
+ mips32_insn.r_format.re = insn.mm_fp5_format.fd;
+ mips32_insn.r_format.func = func;
+ } else
+ return SIGILL;
+ break;
+ case mm_32f_40_op:
+ op = -1; /* Invalid */
+ if (insn.mm_fp2_format.op == mm_fmovt_op)
+ op = 1;
+ else if (insn.mm_fp2_format.op == mm_fmovf_op)
+ op = 0;
+ if (op != -1) {
+ mips32_insn.fp0_format.opcode = cop1_op;
+ mips32_insn.fp0_format.fmt =
+ sdps_format[insn.mm_fp2_format.fmt];
+ mips32_insn.fp0_format.ft =
+ (insn.mm_fp2_format.cc<<2) + op;
+ mips32_insn.fp0_format.fs =
+ insn.mm_fp2_format.fs;
+ mips32_insn.fp0_format.fd =
+ insn.mm_fp2_format.fd;
+ mips32_insn.fp0_format.func = fmovc_op;
+ } else
+ return SIGILL;
+ break;
+ case mm_32f_60_op:
+ func = -1; /* Invalid */
+ if (insn.mm_fp0_format.op == mm_fadd_op)
+ func = fadd_op;
+ else if (insn.mm_fp0_format.op == mm_fsub_op)
+ func = fsub_op;
+ else if (insn.mm_fp0_format.op == mm_fmul_op)
+ func = fmul_op;
+ else if (insn.mm_fp0_format.op == mm_fdiv_op)
+ func = fdiv_op;
+ if (func != -1) {
+ mips32_insn.fp0_format.opcode = cop1_op;
+ mips32_insn.fp0_format.fmt =
+ sdps_format[insn.mm_fp0_format.fmt];
+ mips32_insn.fp0_format.ft =
+ insn.mm_fp0_format.ft;
+ mips32_insn.fp0_format.fs =
+ insn.mm_fp0_format.fs;
+ mips32_insn.fp0_format.fd =
+ insn.mm_fp0_format.fd;
+ mips32_insn.fp0_format.func = func;
+ } else
+ return SIGILL;
+ break;
+ case mm_32f_70_op:
+ func = -1; /* Invalid */
+ if (insn.mm_fp0_format.op == mm_fmovn_op)
+ func = fmovn_op;
+ else if (insn.mm_fp0_format.op == mm_fmovz_op)
+ func = fmovz_op;
+ if (func != -1) {
+ mips32_insn.fp0_format.opcode = cop1_op;
+ mips32_insn.fp0_format.fmt =
+ sdps_format[insn.mm_fp0_format.fmt];
+ mips32_insn.fp0_format.ft =
+ insn.mm_fp0_format.ft;
+ mips32_insn.fp0_format.fs =
+ insn.mm_fp0_format.fs;
+ mips32_insn.fp0_format.fd =
+ insn.mm_fp0_format.fd;
+ mips32_insn.fp0_format.func = func;
+ } else
+ return SIGILL;
+ break;
+ case mm_32f_73_op: /* POOL32FXF */
+ switch (insn.mm_fp1_format.op) {
+ case mm_movf0_op:
+ case mm_movf1_op:
+ case mm_movt0_op:
+ case mm_movt1_op:
+ if ((insn.mm_fp1_format.op & 0x7f) ==
+ mm_movf0_op)
+ op = 0;
+ else
+ op = 1;
+ mips32_insn.r_format.opcode = spec_op;
+ mips32_insn.r_format.rs = insn.mm_fp4_format.fs;
+ mips32_insn.r_format.rt =
+ (insn.mm_fp4_format.cc << 2) + op;
+ mips32_insn.r_format.rd = insn.mm_fp4_format.rt;
+ mips32_insn.r_format.re = 0;
+ mips32_insn.r_format.func = movc_op;
+ break;
+ case mm_fcvtd0_op:
+ case mm_fcvtd1_op:
+ case mm_fcvts0_op:
+ case mm_fcvts1_op:
+ if ((insn.mm_fp1_format.op & 0x7f) ==
+ mm_fcvtd0_op) {
+ func = fcvtd_op;
+ fmt = swl_format[insn.mm_fp3_format.fmt];
+ } else {
+ func = fcvts_op;
+ fmt = dwl_format[insn.mm_fp3_format.fmt];
+ }
+ mips32_insn.fp0_format.opcode = cop1_op;
+ mips32_insn.fp0_format.fmt = fmt;
+ mips32_insn.fp0_format.ft = 0;
+ mips32_insn.fp0_format.fs =
+ insn.mm_fp3_format.fs;
+ mips32_insn.fp0_format.fd =
+ insn.mm_fp3_format.rt;
+ mips32_insn.fp0_format.func = func;
+ break;
+ case mm_fmov0_op:
+ case mm_fmov1_op:
+ case mm_fabs0_op:
+ case mm_fabs1_op:
+ case mm_fneg0_op:
+ case mm_fneg1_op:
+ if ((insn.mm_fp1_format.op & 0x7f) ==
+ mm_fmov0_op)
+ func = fmov_op;
+ else if ((insn.mm_fp1_format.op & 0x7f) ==
+ mm_fabs0_op)
+ func = fabs_op;
+ else
+ func = fneg_op;
+ mips32_insn.fp0_format.opcode = cop1_op;
+ mips32_insn.fp0_format.fmt =
+ sdps_format[insn.mm_fp3_format.fmt];
+ mips32_insn.fp0_format.ft = 0;
+ mips32_insn.fp0_format.fs =
+ insn.mm_fp3_format.fs;
+ mips32_insn.fp0_format.fd =
+ insn.mm_fp3_format.rt;
+ mips32_insn.fp0_format.func = func;
+ break;
+ case mm_ffloorl_op:
+ case mm_ffloorw_op:
+ case mm_fceill_op:
+ case mm_fceilw_op:
+ case mm_ftruncl_op:
+ case mm_ftruncw_op:
+ case mm_froundl_op:
+ case mm_froundw_op:
+ case mm_fcvtl_op:
+ case mm_fcvtw_op:
+ if (insn.mm_fp1_format.op == mm_ffloorl_op)
+ func = ffloorl_op;
+ else if (insn.mm_fp1_format.op == mm_ffloorw_op)
+ func = ffloor_op;
+ else if (insn.mm_fp1_format.op == mm_fceill_op)
+ func = fceill_op;
+ else if (insn.mm_fp1_format.op == mm_fceilw_op)
+ func = fceil_op;
+ else if (insn.mm_fp1_format.op == mm_ftruncl_op)
+ func = ftruncl_op;
+ else if (insn.mm_fp1_format.op == mm_ftruncw_op)
+ func = ftrunc_op;
+ else if (insn.mm_fp1_format.op == mm_froundl_op)
+ func = froundl_op;
+ else if (insn.mm_fp1_format.op == mm_froundw_op)
+ func = fround_op;
+ else if (insn.mm_fp1_format.op == mm_fcvtl_op)
+ func = fcvtl_op;
+ else
+ func = fcvtw_op;
+ mips32_insn.fp0_format.opcode = cop1_op;
+ mips32_insn.fp0_format.fmt =
+ sd_format[insn.mm_fp1_format.fmt];
+ mips32_insn.fp0_format.ft = 0;
+ mips32_insn.fp0_format.fs =
+ insn.mm_fp1_format.fs;
+ mips32_insn.fp0_format.fd =
+ insn.mm_fp1_format.rt;
+ mips32_insn.fp0_format.func = func;
+ break;
+ case mm_frsqrt_op:
+ case mm_fsqrt_op:
+ case mm_frecip_op:
+ if (insn.mm_fp1_format.op == mm_frsqrt_op)
+ func = frsqrt_op;
+ else if (insn.mm_fp1_format.op == mm_fsqrt_op)
+ func = fsqrt_op;
+ else
+ func = frecip_op;
+ mips32_insn.fp0_format.opcode = cop1_op;
+ mips32_insn.fp0_format.fmt =
+ sdps_format[insn.mm_fp1_format.fmt];
+ mips32_insn.fp0_format.ft = 0;
+ mips32_insn.fp0_format.fs =
+ insn.mm_fp1_format.fs;
+ mips32_insn.fp0_format.fd =
+ insn.mm_fp1_format.rt;
+ mips32_insn.fp0_format.func = func;
+ break;
+ case mm_mfc1_op:
+ case mm_mtc1_op:
+ case mm_cfc1_op:
+ case mm_ctc1_op:
+ case mm_mfhc1_op:
+ case mm_mthc1_op:
+ if (insn.mm_fp1_format.op == mm_mfc1_op)
+ op = mfc_op;
+ else if (insn.mm_fp1_format.op == mm_mtc1_op)
+ op = mtc_op;
+ else if (insn.mm_fp1_format.op == mm_cfc1_op)
+ op = cfc_op;
+ else if (insn.mm_fp1_format.op == mm_ctc1_op)
+ op = ctc_op;
+ else if (insn.mm_fp1_format.op == mm_mfhc1_op)
+ op = mfhc_op;
+ else
+ op = mthc_op;
+ mips32_insn.fp1_format.opcode = cop1_op;
+ mips32_insn.fp1_format.op = op;
+ mips32_insn.fp1_format.rt =
+ insn.mm_fp1_format.rt;
+ mips32_insn.fp1_format.fs =
+ insn.mm_fp1_format.fs;
+ mips32_insn.fp1_format.fd = 0;
+ mips32_insn.fp1_format.func = 0;
+ break;
+ default:
+ return SIGILL;
+ }
+ break;
+ case mm_32f_74_op: /* c.cond.fmt */
+ mips32_insn.fp0_format.opcode = cop1_op;
+ mips32_insn.fp0_format.fmt =
+ sdps_format[insn.mm_fp4_format.fmt];
+ mips32_insn.fp0_format.ft = insn.mm_fp4_format.rt;
+ mips32_insn.fp0_format.fs = insn.mm_fp4_format.fs;
+ mips32_insn.fp0_format.fd = insn.mm_fp4_format.cc << 2;
+ mips32_insn.fp0_format.func =
+ insn.mm_fp4_format.cond | MM_MIPS32_COND_FC;
+ break;
+ default:
+ return SIGILL;
+ }
+ break;
+ default:
+ return SIGILL;
+ }
+
+ *insn_ptr = mips32_insn;
+ return 0;
+}
+
+/*
+ * Redundant with logic already in kernel/branch.c,
+ * embedded in compute_return_epc. At some point,
+ * a single subroutine should be used across both
+ * modules.
+ */
+int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ unsigned long *contpc)
+{
+ union mips_instruction insn = (union mips_instruction)dec_insn.insn;
+ unsigned int fcr31;
+ unsigned int bit = 0;
+ unsigned int bit0;
+ union fpureg *fpr;
+
+ switch (insn.i_format.opcode) {
+ case spec_op:
+ switch (insn.r_format.func) {
+ case jalr_op:
+ if (insn.r_format.rd != 0) {
+ regs->regs[insn.r_format.rd] =
+ regs->cp0_epc + dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ }
+ fallthrough;
+ case jr_op:
+ /* For R6, JR already emulated in jalr_op */
+ if (NO_R6EMU && insn.r_format.func == jr_op)
+ break;
+ *contpc = regs->regs[insn.r_format.rs];
+ return 1;
+ }
+ break;
+ case bcond_op:
+ switch (insn.i_format.rt) {
+ case bltzal_op:
+ case bltzall_op:
+ if (NO_R6EMU && (insn.i_format.rs ||
+ insn.i_format.rt == bltzall_op))
+ break;
+
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ fallthrough;
+ case bltzl_op:
+ if (NO_R6EMU)
+ break;
+ fallthrough;
+ case bltz_op:
+ if ((long)regs->regs[insn.i_format.rs] < 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case bgezal_op:
+ case bgezall_op:
+ if (NO_R6EMU && (insn.i_format.rs ||
+ insn.i_format.rt == bgezall_op))
+ break;
+
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ fallthrough;
+ case bgezl_op:
+ if (NO_R6EMU)
+ break;
+ fallthrough;
+ case bgez_op:
+ if ((long)regs->regs[insn.i_format.rs] >= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ }
+ break;
+ case jalx_op:
+ set_isa16_mode(bit);
+ fallthrough;
+ case jal_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ fallthrough;
+ case j_op:
+ *contpc = regs->cp0_epc + dec_insn.pc_inc;
+ *contpc >>= 28;
+ *contpc <<= 28;
+ *contpc |= (insn.j_format.target << 2);
+ /* Set microMIPS mode bit: XOR for jalx. */
+ *contpc ^= bit;
+ return 1;
+ case beql_op:
+ if (NO_R6EMU)
+ break;
+ fallthrough;
+ case beq_op:
+ if (regs->regs[insn.i_format.rs] ==
+ regs->regs[insn.i_format.rt])
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case bnel_op:
+ if (NO_R6EMU)
+ break;
+ fallthrough;
+ case bne_op:
+ if (regs->regs[insn.i_format.rs] !=
+ regs->regs[insn.i_format.rt])
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case blezl_op:
+ if (!insn.i_format.rt && NO_R6EMU)
+ break;
+ fallthrough;
+ case blez_op:
+
+ /*
+ * Compact branches for R6 for the
+ * blez and blezl opcodes.
+ * BLEZ | rs = 0 | rt != 0 == BLEZALC
+ * BLEZ | rs = rt != 0 == BGEZALC
+ * BLEZ | rs != 0 | rt != 0 == BGEUC
+ * BLEZL | rs = 0 | rt != 0 == BLEZC
+ * BLEZL | rs = rt != 0 == BGEZC
+ * BLEZL | rs != 0 | rt != 0 == BGEC
+ *
+ * For real BLEZ{,L}, rt is always 0.
+ */
+ if (cpu_has_mips_r6 && insn.i_format.rt) {
+ if ((insn.i_format.opcode == blez_op) &&
+ ((!insn.i_format.rs && insn.i_format.rt) ||
+ (insn.i_format.rs == insn.i_format.rt)))
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc;
+ *contpc = regs->cp0_epc + dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+
+ return 1;
+ }
+ if ((long)regs->regs[insn.i_format.rs] <= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case bgtzl_op:
+ if (!insn.i_format.rt && NO_R6EMU)
+ break;
+ fallthrough;
+ case bgtz_op:
+ /*
+ * Compact branches for R6 for the
+ * bgtz and bgtzl opcodes.
+ * BGTZ | rs = 0 | rt != 0 == BGTZALC
+ * BGTZ | rs = rt != 0 == BLTZALC
+ * BGTZ | rs != 0 | rt != 0 == BLTUC
+ * BGTZL | rs = 0 | rt != 0 == BGTZC
+ * BGTZL | rs = rt != 0 == BLTZC
+ * BGTZL | rs != 0 | rt != 0 == BLTC
+ *
+ * *ZALC varint for BGTZ &&& rt != 0
+ * For real GTZ{,L}, rt is always 0.
+ */
+ if (cpu_has_mips_r6 && insn.i_format.rt) {
+ if ((insn.i_format.opcode == blez_op) &&
+ ((!insn.i_format.rs && insn.i_format.rt) ||
+ (insn.i_format.rs == insn.i_format.rt)))
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc;
+ *contpc = regs->cp0_epc + dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+
+ return 1;
+ }
+
+ if ((long)regs->regs[insn.i_format.rs] > 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case pop10_op:
+ case pop30_op:
+ if (!cpu_has_mips_r6)
+ break;
+ if (insn.i_format.rt && !insn.i_format.rs)
+ regs->regs[31] = regs->cp0_epc + 4;
+ *contpc = regs->cp0_epc + dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+
+ return 1;
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+ case ldc2_op: /* This is bbit032 on Octeon */
+ if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0)
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+ case swc2_op: /* This is bbit1 on Octeon */
+ if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+ case sdc2_op: /* This is bbit132 on Octeon */
+ if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32)))
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+#else
+ case bc6_op:
+ /*
+ * Only valid for MIPS R6 but we can still end up
+ * here from a broken userland so just tell emulator
+ * this is not a branch and let it break later on.
+ */
+ if (!cpu_has_mips_r6)
+ break;
+ *contpc = regs->cp0_epc + dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+
+ return 1;
+ case balc6_op:
+ if (!cpu_has_mips_r6)
+ break;
+ regs->regs[31] = regs->cp0_epc + 4;
+ *contpc = regs->cp0_epc + dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+
+ return 1;
+ case pop66_op:
+ if (!cpu_has_mips_r6)
+ break;
+ *contpc = regs->cp0_epc + dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+
+ return 1;
+ case pop76_op:
+ if (!cpu_has_mips_r6)
+ break;
+ if (!insn.i_format.rs)
+ regs->regs[31] = regs->cp0_epc + 4;
+ *contpc = regs->cp0_epc + dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+
+ return 1;
+#endif
+ case cop0_op:
+ case cop1_op:
+ /* Need to check for R6 bc1nez and bc1eqz branches */
+ if (cpu_has_mips_r6 &&
+ ((insn.i_format.rs == bc1eqz_op) ||
+ (insn.i_format.rs == bc1nez_op))) {
+ bit = 0;
+ fpr = &current->thread.fpu.fpr[insn.i_format.rt];
+ bit0 = get_fpr32(fpr, 0) & 0x1;
+ switch (insn.i_format.rs) {
+ case bc1eqz_op:
+ bit = bit0 == 0;
+ break;
+ case bc1nez_op:
+ bit = bit0 != 0;
+ break;
+ }
+ if (bit)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+
+ return 1;
+ }
+ /* R2/R6 compatible cop1 instruction */
+ fallthrough;
+ case cop2_op:
+ case cop1x_op:
+ if (insn.i_format.rs == bc_op) {
+ preempt_disable();
+ if (is_fpu_owner())
+ fcr31 = read_32bit_cp1_register(CP1_STATUS);
+ else
+ fcr31 = current->thread.fpu.fcr31;
+ preempt_enable();
+
+ bit = (insn.i_format.rt >> 2);
+ bit += (bit != 0);
+ bit += 23;
+ switch (insn.i_format.rt & 3) {
+ case 0: /* bc1f */
+ case 2: /* bc1fl */
+ if (~fcr31 & (1 << bit))
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case 1: /* bc1t */
+ case 3: /* bc1tl */
+ if (fcr31 & (1 << bit))
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ }
+ }
+ break;
+ }
+ return 0;
+}
+
+/*
+ * In the Linux kernel, we support selection of FPR format on the
+ * basis of the Status.FR bit. If an FPU is not present, the FR bit
+ * is hardwired to zero, which would imply a 32-bit FPU even for
+ * 64-bit CPUs so we rather look at TIF_32BIT_FPREGS.
+ * FPU emu is slow and bulky and optimizing this function offers fairly
+ * sizeable benefits so we try to be clever and make this function return
+ * a constant whenever possible, that is on 64-bit kernels without O32
+ * compatibility enabled and on 32-bit without 64-bit FPU support.
+ */
+static inline int cop1_64bit(struct pt_regs *xcp)
+{
+ if (IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_MIPS32_O32))
+ return 1;
+ else if (IS_ENABLED(CONFIG_32BIT) &&
+ !IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
+ return 0;
+
+ return !test_thread_flag(TIF_32BIT_FPREGS);
+}
+
+static inline bool hybrid_fprs(void)
+{
+ return test_thread_flag(TIF_HYBRID_FPREGS);
+}
+
+#define SIFROMREG(si, x) \
+do { \
+ if (cop1_64bit(xcp) && !hybrid_fprs()) \
+ (si) = (int)get_fpr32(&ctx->fpr[x], 0); \
+ else \
+ (si) = (int)get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1); \
+} while (0)
+
+#define SITOREG(si, x) \
+do { \
+ if (cop1_64bit(xcp) && !hybrid_fprs()) { \
+ unsigned int i; \
+ set_fpr32(&ctx->fpr[x], 0, si); \
+ for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val32); i++) \
+ set_fpr32(&ctx->fpr[x], i, 0); \
+ } else { \
+ set_fpr32(&ctx->fpr[(x) & ~1], (x) & 1, si); \
+ } \
+} while (0)
+
+#define SIFROMHREG(si, x) ((si) = (int)get_fpr32(&ctx->fpr[x], 1))
+
+#define SITOHREG(si, x) \
+do { \
+ unsigned int i; \
+ set_fpr32(&ctx->fpr[x], 1, si); \
+ for (i = 2; i < ARRAY_SIZE(ctx->fpr[x].val32); i++) \
+ set_fpr32(&ctx->fpr[x], i, 0); \
+} while (0)
+
+#define DIFROMREG(di, x) \
+ ((di) = get_fpr64(&ctx->fpr[(x) & ~(cop1_64bit(xcp) ^ 1)], 0))
+
+#define DITOREG(di, x) \
+do { \
+ unsigned int fpr, i; \
+ fpr = (x) & ~(cop1_64bit(xcp) ^ 1); \
+ set_fpr64(&ctx->fpr[fpr], 0, di); \
+ for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val64); i++) \
+ set_fpr64(&ctx->fpr[fpr], i, 0); \
+} while (0)
+
+#define SPFROMREG(sp, x) SIFROMREG((sp).bits, x)
+#define SPTOREG(sp, x) SITOREG((sp).bits, x)
+#define DPFROMREG(dp, x) DIFROMREG((dp).bits, x)
+#define DPTOREG(dp, x) DITOREG((dp).bits, x)
+
+/*
+ * Emulate a CFC1 instruction.
+ */
+static inline void cop1_cfc(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ mips_instruction ir)
+{
+ u32 fcr31 = ctx->fcr31;
+ u32 value = 0;
+
+ switch (MIPSInst_RD(ir)) {
+ case FPCREG_CSR:
+ value = fcr31;
+ pr_debug("%p gpr[%d]<-csr=%08x\n",
+ (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
+ break;
+
+ case FPCREG_FENR:
+ if (!cpu_has_mips_r)
+ break;
+ value = (fcr31 >> (FPU_CSR_FS_S - MIPS_FENR_FS_S)) &
+ MIPS_FENR_FS;
+ value |= fcr31 & (FPU_CSR_ALL_E | FPU_CSR_RM);
+ pr_debug("%p gpr[%d]<-enr=%08x\n",
+ (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
+ break;
+
+ case FPCREG_FEXR:
+ if (!cpu_has_mips_r)
+ break;
+ value = fcr31 & (FPU_CSR_ALL_X | FPU_CSR_ALL_S);
+ pr_debug("%p gpr[%d]<-exr=%08x\n",
+ (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
+ break;
+
+ case FPCREG_FCCR:
+ if (!cpu_has_mips_r)
+ break;
+ value = (fcr31 >> (FPU_CSR_COND_S - MIPS_FCCR_COND0_S)) &
+ MIPS_FCCR_COND0;
+ value |= (fcr31 >> (FPU_CSR_COND1_S - MIPS_FCCR_COND1_S)) &
+ (MIPS_FCCR_CONDX & ~MIPS_FCCR_COND0);
+ pr_debug("%p gpr[%d]<-ccr=%08x\n",
+ (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
+ break;
+
+ case FPCREG_RID:
+ value = boot_cpu_data.fpu_id;
+ break;
+
+ default:
+ break;
+ }
+
+ if (MIPSInst_RT(ir))
+ xcp->regs[MIPSInst_RT(ir)] = value;
+}
+
+/*
+ * Emulate a CTC1 instruction.
+ */
+static inline void cop1_ctc(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ mips_instruction ir)
+{
+ u32 fcr31 = ctx->fcr31;
+ u32 value;
+ u32 mask;
+
+ if (MIPSInst_RT(ir) == 0)
+ value = 0;
+ else
+ value = xcp->regs[MIPSInst_RT(ir)];
+
+ switch (MIPSInst_RD(ir)) {
+ case FPCREG_CSR:
+ pr_debug("%p gpr[%d]->csr=%08x\n",
+ (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
+
+ /* Preserve read-only bits. */
+ mask = boot_cpu_data.fpu_msk31;
+ fcr31 = (value & ~mask) | (fcr31 & mask);
+ break;
+
+ case FPCREG_FENR:
+ if (!cpu_has_mips_r)
+ break;
+ pr_debug("%p gpr[%d]->enr=%08x\n",
+ (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
+ fcr31 &= ~(FPU_CSR_FS | FPU_CSR_ALL_E | FPU_CSR_RM);
+ fcr31 |= (value << (FPU_CSR_FS_S - MIPS_FENR_FS_S)) &
+ FPU_CSR_FS;
+ fcr31 |= value & (FPU_CSR_ALL_E | FPU_CSR_RM);
+ break;
+
+ case FPCREG_FEXR:
+ if (!cpu_has_mips_r)
+ break;
+ pr_debug("%p gpr[%d]->exr=%08x\n",
+ (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
+ fcr31 &= ~(FPU_CSR_ALL_X | FPU_CSR_ALL_S);
+ fcr31 |= value & (FPU_CSR_ALL_X | FPU_CSR_ALL_S);
+ break;
+
+ case FPCREG_FCCR:
+ if (!cpu_has_mips_r)
+ break;
+ pr_debug("%p gpr[%d]->ccr=%08x\n",
+ (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
+ fcr31 &= ~(FPU_CSR_CONDX | FPU_CSR_COND);
+ fcr31 |= (value << (FPU_CSR_COND_S - MIPS_FCCR_COND0_S)) &
+ FPU_CSR_COND;
+ fcr31 |= (value << (FPU_CSR_COND1_S - MIPS_FCCR_COND1_S)) &
+ FPU_CSR_CONDX;
+ break;
+
+ default:
+ break;
+ }
+
+ ctx->fcr31 = fcr31;
+}
+
+/*
+ * Emulate the single floating point instruction pointed at by EPC.
+ * Two instructions if the instruction is in a branch delay slot.
+ */
+
+static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ struct mm_decoded_insn dec_insn, void __user **fault_addr)
+{
+ unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc;
+ unsigned int cond, cbit, bit0;
+ mips_instruction ir;
+ int likely, pc_inc;
+ union fpureg *fpr;
+ u32 __user *wva;
+ u64 __user *dva;
+ u32 wval;
+ u64 dval;
+ int sig;
+
+ /*
+ * These are giving gcc a gentle hint about what to expect in
+ * dec_inst in order to do better optimization.
+ */
+ if (!cpu_has_mmips && dec_insn.micro_mips_mode)
+ unreachable();
+
+ /* XXX NEC Vr54xx bug workaround */
+ if (delay_slot(xcp)) {
+ if (dec_insn.micro_mips_mode) {
+ if (!mm_isBranchInstr(xcp, dec_insn, &contpc))
+ clear_delay_slot(xcp);
+ } else {
+ if (!isBranchInstr(xcp, dec_insn, &contpc))
+ clear_delay_slot(xcp);
+ }
+ }
+
+ if (delay_slot(xcp)) {
+ /*
+ * The instruction to be emulated is in a branch delay slot
+ * which means that we have to emulate the branch instruction
+ * BEFORE we do the cop1 instruction.
+ *
+ * This branch could be a COP1 branch, but in that case we
+ * would have had a trap for that instruction, and would not
+ * come through this route.
+ *
+ * Linux MIPS branch emulator operates on context, updating the
+ * cp0_epc.
+ */
+ ir = dec_insn.next_insn; /* process delay slot instr */
+ pc_inc = dec_insn.next_pc_inc;
+ } else {
+ ir = dec_insn.insn; /* process current instr */
+ pc_inc = dec_insn.pc_inc;
+ }
+
+ /*
+ * Since microMIPS FPU instructios are a subset of MIPS32 FPU
+ * instructions, we want to convert microMIPS FPU instructions
+ * into MIPS32 instructions so that we could reuse all of the
+ * FPU emulation code.
+ *
+ * NOTE: We cannot do this for branch instructions since they
+ * are not a subset. Example: Cannot emulate a 16-bit
+ * aligned target address with a MIPS32 instruction.
+ */
+ if (dec_insn.micro_mips_mode) {
+ /*
+ * If next instruction is a 16-bit instruction, then it
+ * it cannot be a FPU instruction. This could happen
+ * since we can be called for non-FPU instructions.
+ */
+ if ((pc_inc == 2) ||
+ (microMIPS32_to_MIPS32((union mips_instruction *)&ir)
+ == SIGILL))
+ return SIGILL;
+ }
+
+emul:
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0);
+ MIPS_FPU_EMU_INC_STATS(emulated);
+ switch (MIPSInst_OPCODE(ir)) {
+ case ldc1_op:
+ dva = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
+ MIPSInst_SIMM(ir));
+ MIPS_FPU_EMU_INC_STATS(loads);
+
+ if (!access_ok(dva, sizeof(u64))) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ *fault_addr = dva;
+ return SIGBUS;
+ }
+ if (__get_user(dval, dva)) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ *fault_addr = dva;
+ return SIGSEGV;
+ }
+ DITOREG(dval, MIPSInst_RT(ir));
+ break;
+
+ case sdc1_op:
+ dva = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
+ MIPSInst_SIMM(ir));
+ MIPS_FPU_EMU_INC_STATS(stores);
+ DIFROMREG(dval, MIPSInst_RT(ir));
+ if (!access_ok(dva, sizeof(u64))) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ *fault_addr = dva;
+ return SIGBUS;
+ }
+ if (__put_user(dval, dva)) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ *fault_addr = dva;
+ return SIGSEGV;
+ }
+ break;
+
+ case lwc1_op:
+ wva = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
+ MIPSInst_SIMM(ir));
+ MIPS_FPU_EMU_INC_STATS(loads);
+ if (!access_ok(wva, sizeof(u32))) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ *fault_addr = wva;
+ return SIGBUS;
+ }
+ if (__get_user(wval, wva)) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ *fault_addr = wva;
+ return SIGSEGV;
+ }
+ SITOREG(wval, MIPSInst_RT(ir));
+ break;
+
+ case swc1_op:
+ wva = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
+ MIPSInst_SIMM(ir));
+ MIPS_FPU_EMU_INC_STATS(stores);
+ SIFROMREG(wval, MIPSInst_RT(ir));
+ if (!access_ok(wva, sizeof(u32))) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ *fault_addr = wva;
+ return SIGBUS;
+ }
+ if (__put_user(wval, wva)) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ *fault_addr = wva;
+ return SIGSEGV;
+ }
+ break;
+
+ case cop1_op:
+ switch (MIPSInst_RS(ir)) {
+ case dmfc_op:
+ if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
+ return SIGILL;
+
+ /* copregister fs -> gpr[rt] */
+ if (MIPSInst_RT(ir) != 0) {
+ DIFROMREG(xcp->regs[MIPSInst_RT(ir)],
+ MIPSInst_RD(ir));
+ }
+ break;
+
+ case dmtc_op:
+ if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
+ return SIGILL;
+
+ /* copregister fs <- rt */
+ DITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
+ break;
+
+ case mfhc_op:
+ if (!cpu_has_mips_r2_r6)
+ return SIGILL;
+
+ /* copregister rd -> gpr[rt] */
+ if (MIPSInst_RT(ir) != 0) {
+ SIFROMHREG(xcp->regs[MIPSInst_RT(ir)],
+ MIPSInst_RD(ir));
+ }
+ break;
+
+ case mthc_op:
+ if (!cpu_has_mips_r2_r6)
+ return SIGILL;
+
+ /* copregister rd <- gpr[rt] */
+ SITOHREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
+ break;
+
+ case mfc_op:
+ /* copregister rd -> gpr[rt] */
+ if (MIPSInst_RT(ir) != 0) {
+ SIFROMREG(xcp->regs[MIPSInst_RT(ir)],
+ MIPSInst_RD(ir));
+ }
+ break;
+
+ case mtc_op:
+ /* copregister rd <- rt */
+ SITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
+ break;
+
+ case cfc_op:
+ /* cop control register rd -> gpr[rt] */
+ cop1_cfc(xcp, ctx, ir);
+ break;
+
+ case ctc_op:
+ /* copregister rd <- rt */
+ cop1_ctc(xcp, ctx, ir);
+ if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
+ return SIGFPE;
+ }
+ break;
+
+ case bc1eqz_op:
+ case bc1nez_op:
+ if (!cpu_has_mips_r6 || delay_slot(xcp))
+ return SIGILL;
+
+ likely = 0;
+ cond = 0;
+ fpr = &current->thread.fpu.fpr[MIPSInst_RT(ir)];
+ bit0 = get_fpr32(fpr, 0) & 0x1;
+ switch (MIPSInst_RS(ir)) {
+ case bc1eqz_op:
+ MIPS_FPU_EMU_INC_STATS(bc1eqz);
+ cond = bit0 == 0;
+ break;
+ case bc1nez_op:
+ MIPS_FPU_EMU_INC_STATS(bc1nez);
+ cond = bit0 != 0;
+ break;
+ }
+ goto branch_common;
+
+ case bc_op:
+ if (delay_slot(xcp))
+ return SIGILL;
+
+ if (cpu_has_mips_4_5_r)
+ cbit = fpucondbit[MIPSInst_RT(ir) >> 2];
+ else
+ cbit = FPU_CSR_COND;
+ cond = ctx->fcr31 & cbit;
+
+ likely = 0;
+ switch (MIPSInst_RT(ir) & 3) {
+ case bcfl_op:
+ if (cpu_has_mips_2_3_4_5_r)
+ likely = 1;
+ fallthrough;
+ case bcf_op:
+ cond = !cond;
+ break;
+ case bctl_op:
+ if (cpu_has_mips_2_3_4_5_r)
+ likely = 1;
+ fallthrough;
+ case bct_op:
+ break;
+ }
+branch_common:
+ MIPS_FPU_EMU_INC_STATS(branches);
+ set_delay_slot(xcp);
+ if (cond) {
+ /*
+ * Branch taken: emulate dslot instruction
+ */
+ unsigned long bcpc;
+
+ /*
+ * Remember EPC at the branch to point back
+ * at so that any delay-slot instruction
+ * signal is not silently ignored.
+ */
+ bcpc = xcp->cp0_epc;
+ xcp->cp0_epc += dec_insn.pc_inc;
+
+ contpc = MIPSInst_SIMM(ir);
+ ir = dec_insn.next_insn;
+ if (dec_insn.micro_mips_mode) {
+ contpc = (xcp->cp0_epc + (contpc << 1));
+
+ /* If 16-bit instruction, not FPU. */
+ if ((dec_insn.next_pc_inc == 2) ||
+ (microMIPS32_to_MIPS32((union mips_instruction *)&ir) == SIGILL)) {
+
+ /*
+ * Since this instruction will
+ * be put on the stack with
+ * 32-bit words, get around
+ * this problem by putting a
+ * NOP16 as the second one.
+ */
+ if (dec_insn.next_pc_inc == 2)
+ ir = (ir & (~0xffff)) | MM_NOP16;
+
+ /*
+ * Single step the non-CP1
+ * instruction in the dslot.
+ */
+ sig = mips_dsemul(xcp, ir,
+ bcpc, contpc);
+ if (sig < 0)
+ break;
+ if (sig)
+ xcp->cp0_epc = bcpc;
+ /*
+ * SIGILL forces out of
+ * the emulation loop.
+ */
+ return sig ? sig : SIGILL;
+ }
+ } else
+ contpc = (xcp->cp0_epc + (contpc << 2));
+
+ switch (MIPSInst_OPCODE(ir)) {
+ case lwc1_op:
+ case swc1_op:
+ goto emul;
+
+ case ldc1_op:
+ case sdc1_op:
+ if (cpu_has_mips_2_3_4_5_r)
+ goto emul;
+
+ goto bc_sigill;
+
+ case cop1_op:
+ goto emul;
+
+ case cop1x_op:
+ if (cpu_has_mips_4_5_64_r2_r6)
+ /* its one of ours */
+ goto emul;
+
+ goto bc_sigill;
+
+ case spec_op:
+ switch (MIPSInst_FUNC(ir)) {
+ case movc_op:
+ if (cpu_has_mips_4_5_r)
+ goto emul;
+
+ goto bc_sigill;
+ }
+ break;
+
+ bc_sigill:
+ xcp->cp0_epc = bcpc;
+ return SIGILL;
+ }
+
+ /*
+ * Single step the non-cp1
+ * instruction in the dslot
+ */
+ sig = mips_dsemul(xcp, ir, bcpc, contpc);
+ if (sig < 0)
+ break;
+ if (sig)
+ xcp->cp0_epc = bcpc;
+ /* SIGILL forces out of the emulation loop. */
+ return sig ? sig : SIGILL;
+ } else if (likely) { /* branch not taken */
+ /*
+ * branch likely nullifies
+ * dslot if not taken
+ */
+ xcp->cp0_epc += dec_insn.pc_inc;
+ contpc += dec_insn.pc_inc;
+ /*
+ * else continue & execute
+ * dslot as normal insn
+ */
+ }
+ break;
+
+ default:
+ if (!(MIPSInst_RS(ir) & 0x10))
+ return SIGILL;
+
+ /* a real fpu computation instruction */
+ sig = fpu_emu(xcp, ctx, ir);
+ if (sig)
+ return sig;
+ }
+ break;
+
+ case cop1x_op:
+ if (!cpu_has_mips_4_5_64_r2_r6)
+ return SIGILL;
+
+ sig = fpux_emu(xcp, ctx, ir, fault_addr);
+ if (sig)
+ return sig;
+ break;
+
+ case spec_op:
+ if (!cpu_has_mips_4_5_r)
+ return SIGILL;
+
+ if (MIPSInst_FUNC(ir) != movc_op)
+ return SIGILL;
+ cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+ if (((ctx->fcr31 & cond) != 0) == ((MIPSInst_RT(ir) & 1) != 0))
+ xcp->regs[MIPSInst_RD(ir)] =
+ xcp->regs[MIPSInst_RS(ir)];
+ break;
+ default:
+ return SIGILL;
+ }
+
+ /* we did it !! */
+ xcp->cp0_epc = contpc;
+ clear_delay_slot(xcp);
+
+ return 0;
+}
+
+/*
+ * Conversion table from MIPS compare ops 48-63
+ * cond = ieee754dp_cmp(x,y,IEEE754_UN,sig);
+ */
+static const unsigned char cmptab[8] = {
+ 0, /* cmp_0 (sig) cmp_sf */
+ IEEE754_CUN, /* cmp_un (sig) cmp_ngle */
+ IEEE754_CEQ, /* cmp_eq (sig) cmp_seq */
+ IEEE754_CEQ | IEEE754_CUN, /* cmp_ueq (sig) cmp_ngl */
+ IEEE754_CLT, /* cmp_olt (sig) cmp_lt */
+ IEEE754_CLT | IEEE754_CUN, /* cmp_ult (sig) cmp_nge */
+ IEEE754_CLT | IEEE754_CEQ, /* cmp_ole (sig) cmp_le */
+ IEEE754_CLT | IEEE754_CEQ | IEEE754_CUN, /* cmp_ule (sig) cmp_ngt */
+};
+
+static const unsigned char negative_cmptab[8] = {
+ 0, /* Reserved */
+ IEEE754_CLT | IEEE754_CGT | IEEE754_CEQ,
+ IEEE754_CLT | IEEE754_CGT | IEEE754_CUN,
+ IEEE754_CLT | IEEE754_CGT,
+ /* Reserved */
+};
+
+
+/*
+ * Additional MIPS4 instructions
+ */
+
+#define DEF3OP(name, p, f1, f2, f3) \
+static union ieee754##p fpemu_##p##_##name(union ieee754##p r, \
+ union ieee754##p s, union ieee754##p t) \
+{ \
+ struct _ieee754_csr ieee754_csr_save; \
+ s = f1(s, t); \
+ ieee754_csr_save = ieee754_csr; \
+ s = f2(s, r); \
+ ieee754_csr_save.cx |= ieee754_csr.cx; \
+ ieee754_csr_save.sx |= ieee754_csr.sx; \
+ s = f3(s); \
+ ieee754_csr.cx |= ieee754_csr_save.cx; \
+ ieee754_csr.sx |= ieee754_csr_save.sx; \
+ return s; \
+}
+
+static union ieee754dp fpemu_dp_recip(union ieee754dp d)
+{
+ return ieee754dp_div(ieee754dp_one(0), d);
+}
+
+static union ieee754dp fpemu_dp_rsqrt(union ieee754dp d)
+{
+ return ieee754dp_div(ieee754dp_one(0), ieee754dp_sqrt(d));
+}
+
+static union ieee754sp fpemu_sp_recip(union ieee754sp s)
+{
+ return ieee754sp_div(ieee754sp_one(0), s);
+}
+
+static union ieee754sp fpemu_sp_rsqrt(union ieee754sp s)
+{
+ return ieee754sp_div(ieee754sp_one(0), ieee754sp_sqrt(s));
+}
+
+DEF3OP(madd, sp, ieee754sp_mul, ieee754sp_add, );
+DEF3OP(msub, sp, ieee754sp_mul, ieee754sp_sub, );
+DEF3OP(nmadd, sp, ieee754sp_mul, ieee754sp_add, ieee754sp_neg);
+DEF3OP(nmsub, sp, ieee754sp_mul, ieee754sp_sub, ieee754sp_neg);
+DEF3OP(madd, dp, ieee754dp_mul, ieee754dp_add, );
+DEF3OP(msub, dp, ieee754dp_mul, ieee754dp_sub, );
+DEF3OP(nmadd, dp, ieee754dp_mul, ieee754dp_add, ieee754dp_neg);
+DEF3OP(nmsub, dp, ieee754dp_mul, ieee754dp_sub, ieee754dp_neg);
+
+static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ mips_instruction ir, void __user **fault_addr)
+{
+ unsigned int rcsr = 0; /* resulting csr */
+
+ MIPS_FPU_EMU_INC_STATS(cp1xops);
+
+ switch (MIPSInst_FMA_FFMT(ir)) {
+ case s_fmt:{ /* 0 */
+
+ union ieee754sp(*handler) (union ieee754sp, union ieee754sp, union ieee754sp);
+ union ieee754sp fd, fr, fs, ft;
+ u32 __user *va;
+ u32 val;
+
+ switch (MIPSInst_FUNC(ir)) {
+ case lwxc1_op:
+ va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
+ xcp->regs[MIPSInst_FT(ir)]);
+
+ MIPS_FPU_EMU_INC_STATS(loads);
+ if (!access_ok(va, sizeof(u32))) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ *fault_addr = va;
+ return SIGBUS;
+ }
+ if (__get_user(val, va)) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ *fault_addr = va;
+ return SIGSEGV;
+ }
+ SITOREG(val, MIPSInst_FD(ir));
+ break;
+
+ case swxc1_op:
+ va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
+ xcp->regs[MIPSInst_FT(ir)]);
+
+ MIPS_FPU_EMU_INC_STATS(stores);
+
+ SIFROMREG(val, MIPSInst_FS(ir));
+ if (!access_ok(va, sizeof(u32))) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ *fault_addr = va;
+ return SIGBUS;
+ }
+ if (put_user(val, va)) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ *fault_addr = va;
+ return SIGSEGV;
+ }
+ break;
+
+ case madd_s_op:
+ if (cpu_has_mac2008_only)
+ handler = ieee754sp_madd;
+ else
+ handler = fpemu_sp_madd;
+ goto scoptop;
+ case msub_s_op:
+ if (cpu_has_mac2008_only)
+ handler = ieee754sp_msub;
+ else
+ handler = fpemu_sp_msub;
+ goto scoptop;
+ case nmadd_s_op:
+ if (cpu_has_mac2008_only)
+ handler = ieee754sp_nmadd;
+ else
+ handler = fpemu_sp_nmadd;
+ goto scoptop;
+ case nmsub_s_op:
+ if (cpu_has_mac2008_only)
+ handler = ieee754sp_nmsub;
+ else
+ handler = fpemu_sp_nmsub;
+ goto scoptop;
+
+ scoptop:
+ SPFROMREG(fr, MIPSInst_FR(ir));
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ fd = (*handler) (fr, fs, ft);
+ SPTOREG(fd, MIPSInst_FD(ir));
+
+ copcsr:
+ if (ieee754_cxtest(IEEE754_INEXACT)) {
+ MIPS_FPU_EMU_INC_STATS(ieee754_inexact);
+ rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S;
+ }
+ if (ieee754_cxtest(IEEE754_UNDERFLOW)) {
+ MIPS_FPU_EMU_INC_STATS(ieee754_underflow);
+ rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S;
+ }
+ if (ieee754_cxtest(IEEE754_OVERFLOW)) {
+ MIPS_FPU_EMU_INC_STATS(ieee754_overflow);
+ rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S;
+ }
+ if (ieee754_cxtest(IEEE754_INVALID_OPERATION)) {
+ MIPS_FPU_EMU_INC_STATS(ieee754_invalidop);
+ rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S;
+ }
+
+ ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr;
+ if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
+ /*printk ("SIGFPE: FPU csr = %08x\n",
+ ctx->fcr31); */
+ return SIGFPE;
+ }
+
+ break;
+
+ default:
+ return SIGILL;
+ }
+ break;
+ }
+
+ case d_fmt:{ /* 1 */
+ union ieee754dp(*handler) (union ieee754dp, union ieee754dp, union ieee754dp);
+ union ieee754dp fd, fr, fs, ft;
+ u64 __user *va;
+ u64 val;
+
+ switch (MIPSInst_FUNC(ir)) {
+ case ldxc1_op:
+ va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
+ xcp->regs[MIPSInst_FT(ir)]);
+
+ MIPS_FPU_EMU_INC_STATS(loads);
+ if (!access_ok(va, sizeof(u64))) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ *fault_addr = va;
+ return SIGBUS;
+ }
+ if (__get_user(val, va)) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ *fault_addr = va;
+ return SIGSEGV;
+ }
+ DITOREG(val, MIPSInst_FD(ir));
+ break;
+
+ case sdxc1_op:
+ va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
+ xcp->regs[MIPSInst_FT(ir)]);
+
+ MIPS_FPU_EMU_INC_STATS(stores);
+ DIFROMREG(val, MIPSInst_FS(ir));
+ if (!access_ok(va, sizeof(u64))) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ *fault_addr = va;
+ return SIGBUS;
+ }
+ if (__put_user(val, va)) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ *fault_addr = va;
+ return SIGSEGV;
+ }
+ break;
+
+ case madd_d_op:
+ if (cpu_has_mac2008_only)
+ handler = ieee754dp_madd;
+ else
+ handler = fpemu_dp_madd;
+ goto dcoptop;
+ case msub_d_op:
+ if (cpu_has_mac2008_only)
+ handler = ieee754dp_msub;
+ else
+ handler = fpemu_dp_msub;
+ goto dcoptop;
+ case nmadd_d_op:
+ if (cpu_has_mac2008_only)
+ handler = ieee754dp_nmadd;
+ else
+ handler = fpemu_dp_nmadd;
+ goto dcoptop;
+ case nmsub_d_op:
+ if (cpu_has_mac2008_only)
+ handler = ieee754dp_nmsub;
+ else
+ handler = fpemu_dp_nmsub;
+ goto dcoptop;
+
+ dcoptop:
+ DPFROMREG(fr, MIPSInst_FR(ir));
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ fd = (*handler) (fr, fs, ft);
+ DPTOREG(fd, MIPSInst_FD(ir));
+ goto copcsr;
+
+ default:
+ return SIGILL;
+ }
+ break;
+ }
+
+ case 0x3:
+ if (MIPSInst_FUNC(ir) != pfetch_op)
+ return SIGILL;
+
+ /* ignore prefx operation */
+ break;
+
+ default:
+ return SIGILL;
+ }
+
+ return 0;
+}
+
+
+
+/*
+ * Emulate a single COP1 arithmetic instruction.
+ */
+static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ mips_instruction ir)
+{
+ int rfmt; /* resulting format */
+ unsigned int rcsr = 0; /* resulting csr */
+ unsigned int oldrm;
+ unsigned int cbit;
+ unsigned int cond;
+ union {
+ union ieee754dp d;
+ union ieee754sp s;
+ int w;
+ s64 l;
+ } rv; /* resulting value */
+ u64 bits;
+
+ MIPS_FPU_EMU_INC_STATS(cp1ops);
+ switch (rfmt = (MIPSInst_FFMT(ir) & 0xf)) {
+ case s_fmt: { /* 0 */
+ union {
+ union ieee754sp(*b) (union ieee754sp, union ieee754sp);
+ union ieee754sp(*u) (union ieee754sp);
+ } handler;
+ union ieee754sp fd, fs, ft;
+
+ switch (MIPSInst_FUNC(ir)) {
+ /* binary ops */
+ case fadd_op:
+ MIPS_FPU_EMU_INC_STATS(add_s);
+ handler.b = ieee754sp_add;
+ goto scopbop;
+ case fsub_op:
+ MIPS_FPU_EMU_INC_STATS(sub_s);
+ handler.b = ieee754sp_sub;
+ goto scopbop;
+ case fmul_op:
+ MIPS_FPU_EMU_INC_STATS(mul_s);
+ handler.b = ieee754sp_mul;
+ goto scopbop;
+ case fdiv_op:
+ MIPS_FPU_EMU_INC_STATS(div_s);
+ handler.b = ieee754sp_div;
+ goto scopbop;
+
+ /* unary ops */
+ case fsqrt_op:
+ if (!cpu_has_mips_2_3_4_5_r)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(sqrt_s);
+ handler.u = ieee754sp_sqrt;
+ goto scopuop;
+
+ /*
+ * Note that on some MIPS IV implementations such as the
+ * R5000 and R8000 the FSQRT and FRECIP instructions do not
+ * achieve full IEEE-754 accuracy - however this emulator does.
+ */
+ case frsqrt_op:
+ if (!cpu_has_mips_4_5_64_r2_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(rsqrt_s);
+ handler.u = fpemu_sp_rsqrt;
+ goto scopuop;
+
+ case frecip_op:
+ if (!cpu_has_mips_4_5_64_r2_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(recip_s);
+ handler.u = fpemu_sp_recip;
+ goto scopuop;
+
+ case fmovc_op:
+ if (!cpu_has_mips_4_5_r)
+ return SIGILL;
+
+ cond = fpucondbit[MIPSInst_FT(ir) >> 2];
+ if (((ctx->fcr31 & cond) != 0) !=
+ ((MIPSInst_FT(ir) & 1) != 0))
+ return 0;
+ SPFROMREG(rv.s, MIPSInst_FS(ir));
+ break;
+
+ case fmovz_op:
+ if (!cpu_has_mips_4_5_r)
+ return SIGILL;
+
+ if (xcp->regs[MIPSInst_FT(ir)] != 0)
+ return 0;
+ SPFROMREG(rv.s, MIPSInst_FS(ir));
+ break;
+
+ case fmovn_op:
+ if (!cpu_has_mips_4_5_r)
+ return SIGILL;
+
+ if (xcp->regs[MIPSInst_FT(ir)] == 0)
+ return 0;
+ SPFROMREG(rv.s, MIPSInst_FS(ir));
+ break;
+
+ case fseleqz_op:
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(seleqz_s);
+ SPFROMREG(rv.s, MIPSInst_FT(ir));
+ if (rv.w & 0x1)
+ rv.w = 0;
+ else
+ SPFROMREG(rv.s, MIPSInst_FS(ir));
+ break;
+
+ case fselnez_op:
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(selnez_s);
+ SPFROMREG(rv.s, MIPSInst_FT(ir));
+ if (rv.w & 0x1)
+ SPFROMREG(rv.s, MIPSInst_FS(ir));
+ else
+ rv.w = 0;
+ break;
+
+ case fmaddf_op: {
+ union ieee754sp ft, fs, fd;
+
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(maddf_s);
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ SPFROMREG(fd, MIPSInst_FD(ir));
+ rv.s = ieee754sp_maddf(fd, fs, ft);
+ goto copcsr;
+ }
+
+ case fmsubf_op: {
+ union ieee754sp ft, fs, fd;
+
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(msubf_s);
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ SPFROMREG(fd, MIPSInst_FD(ir));
+ rv.s = ieee754sp_msubf(fd, fs, ft);
+ goto copcsr;
+ }
+
+ case frint_op: {
+ union ieee754sp fs;
+
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(rint_s);
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.s = ieee754sp_rint(fs);
+ goto copcsr;
+ }
+
+ case fclass_op: {
+ union ieee754sp fs;
+
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(class_s);
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.w = ieee754sp_2008class(fs);
+ rfmt = w_fmt;
+ goto copcsr;
+ }
+
+ case fmin_op: {
+ union ieee754sp fs, ft;
+
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(min_s);
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.s = ieee754sp_fmin(fs, ft);
+ goto copcsr;
+ }
+
+ case fmina_op: {
+ union ieee754sp fs, ft;
+
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(mina_s);
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.s = ieee754sp_fmina(fs, ft);
+ goto copcsr;
+ }
+
+ case fmax_op: {
+ union ieee754sp fs, ft;
+
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(max_s);
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.s = ieee754sp_fmax(fs, ft);
+ goto copcsr;
+ }
+
+ case fmaxa_op: {
+ union ieee754sp fs, ft;
+
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(maxa_s);
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.s = ieee754sp_fmaxa(fs, ft);
+ goto copcsr;
+ }
+
+ case fabs_op:
+ MIPS_FPU_EMU_INC_STATS(abs_s);
+ handler.u = ieee754sp_abs;
+ goto scopuop;
+
+ case fneg_op:
+ MIPS_FPU_EMU_INC_STATS(neg_s);
+ handler.u = ieee754sp_neg;
+ goto scopuop;
+
+ case fmov_op:
+ /* an easy one */
+ MIPS_FPU_EMU_INC_STATS(mov_s);
+ SPFROMREG(rv.s, MIPSInst_FS(ir));
+ goto copcsr;
+
+ /* binary op on handler */
+scopbop:
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ SPFROMREG(ft, MIPSInst_FT(ir));
+
+ rv.s = (*handler.b) (fs, ft);
+ goto copcsr;
+scopuop:
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.s = (*handler.u) (fs);
+ goto copcsr;
+copcsr:
+ if (ieee754_cxtest(IEEE754_INEXACT)) {
+ MIPS_FPU_EMU_INC_STATS(ieee754_inexact);
+ rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S;
+ }
+ if (ieee754_cxtest(IEEE754_UNDERFLOW)) {
+ MIPS_FPU_EMU_INC_STATS(ieee754_underflow);
+ rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S;
+ }
+ if (ieee754_cxtest(IEEE754_OVERFLOW)) {
+ MIPS_FPU_EMU_INC_STATS(ieee754_overflow);
+ rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S;
+ }
+ if (ieee754_cxtest(IEEE754_ZERO_DIVIDE)) {
+ MIPS_FPU_EMU_INC_STATS(ieee754_zerodiv);
+ rcsr |= FPU_CSR_DIV_X | FPU_CSR_DIV_S;
+ }
+ if (ieee754_cxtest(IEEE754_INVALID_OPERATION)) {
+ MIPS_FPU_EMU_INC_STATS(ieee754_invalidop);
+ rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S;
+ }
+ break;
+
+ /* unary conv ops */
+ case fcvts_op:
+ return SIGILL; /* not defined */
+
+ case fcvtd_op:
+ MIPS_FPU_EMU_INC_STATS(cvt_d_s);
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.d = ieee754dp_fsp(fs);
+ rfmt = d_fmt;
+ goto copcsr;
+
+ case fcvtw_op:
+ MIPS_FPU_EMU_INC_STATS(cvt_w_s);
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.w = ieee754sp_tint(fs);
+ rfmt = w_fmt;
+ goto copcsr;
+
+ case fround_op:
+ case ftrunc_op:
+ case fceil_op:
+ case ffloor_op:
+ if (!cpu_has_mips_2_3_4_5_r)
+ return SIGILL;
+
+ if (MIPSInst_FUNC(ir) == fceil_op)
+ MIPS_FPU_EMU_INC_STATS(ceil_w_s);
+ if (MIPSInst_FUNC(ir) == ffloor_op)
+ MIPS_FPU_EMU_INC_STATS(floor_w_s);
+ if (MIPSInst_FUNC(ir) == fround_op)
+ MIPS_FPU_EMU_INC_STATS(round_w_s);
+ if (MIPSInst_FUNC(ir) == ftrunc_op)
+ MIPS_FPU_EMU_INC_STATS(trunc_w_s);
+
+ oldrm = ieee754_csr.rm;
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ ieee754_csr.rm = MIPSInst_FUNC(ir);
+ rv.w = ieee754sp_tint(fs);
+ ieee754_csr.rm = oldrm;
+ rfmt = w_fmt;
+ goto copcsr;
+
+ case fsel_op:
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(sel_s);
+ SPFROMREG(fd, MIPSInst_FD(ir));
+ if (fd.bits & 0x1)
+ SPFROMREG(rv.s, MIPSInst_FT(ir));
+ else
+ SPFROMREG(rv.s, MIPSInst_FS(ir));
+ break;
+
+ case fcvtl_op:
+ if (!cpu_has_mips_3_4_5_64_r2_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(cvt_l_s);
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.l = ieee754sp_tlong(fs);
+ rfmt = l_fmt;
+ goto copcsr;
+
+ case froundl_op:
+ case ftruncl_op:
+ case fceill_op:
+ case ffloorl_op:
+ if (!cpu_has_mips_3_4_5_64_r2_r6)
+ return SIGILL;
+
+ if (MIPSInst_FUNC(ir) == fceill_op)
+ MIPS_FPU_EMU_INC_STATS(ceil_l_s);
+ if (MIPSInst_FUNC(ir) == ffloorl_op)
+ MIPS_FPU_EMU_INC_STATS(floor_l_s);
+ if (MIPSInst_FUNC(ir) == froundl_op)
+ MIPS_FPU_EMU_INC_STATS(round_l_s);
+ if (MIPSInst_FUNC(ir) == ftruncl_op)
+ MIPS_FPU_EMU_INC_STATS(trunc_l_s);
+
+ oldrm = ieee754_csr.rm;
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ ieee754_csr.rm = MIPSInst_FUNC(ir);
+ rv.l = ieee754sp_tlong(fs);
+ ieee754_csr.rm = oldrm;
+ rfmt = l_fmt;
+ goto copcsr;
+
+ default:
+ if (!NO_R6EMU && MIPSInst_FUNC(ir) >= fcmp_op) {
+ unsigned int cmpop;
+ union ieee754sp fs, ft;
+
+ cmpop = MIPSInst_FUNC(ir) - fcmp_op;
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ rv.w = ieee754sp_cmp(fs, ft,
+ cmptab[cmpop & 0x7], cmpop & 0x8);
+ rfmt = -1;
+ if ((cmpop & 0x8) && ieee754_cxtest
+ (IEEE754_INVALID_OPERATION))
+ rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+ else
+ goto copcsr;
+
+ } else
+ return SIGILL;
+ break;
+ }
+ break;
+ }
+
+ case d_fmt: {
+ union ieee754dp fd, fs, ft;
+ union {
+ union ieee754dp(*b) (union ieee754dp, union ieee754dp);
+ union ieee754dp(*u) (union ieee754dp);
+ } handler;
+
+ switch (MIPSInst_FUNC(ir)) {
+ /* binary ops */
+ case fadd_op:
+ MIPS_FPU_EMU_INC_STATS(add_d);
+ handler.b = ieee754dp_add;
+ goto dcopbop;
+ case fsub_op:
+ MIPS_FPU_EMU_INC_STATS(sub_d);
+ handler.b = ieee754dp_sub;
+ goto dcopbop;
+ case fmul_op:
+ MIPS_FPU_EMU_INC_STATS(mul_d);
+ handler.b = ieee754dp_mul;
+ goto dcopbop;
+ case fdiv_op:
+ MIPS_FPU_EMU_INC_STATS(div_d);
+ handler.b = ieee754dp_div;
+ goto dcopbop;
+
+ /* unary ops */
+ case fsqrt_op:
+ if (!cpu_has_mips_2_3_4_5_r)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(sqrt_d);
+ handler.u = ieee754dp_sqrt;
+ goto dcopuop;
+ /*
+ * Note that on some MIPS IV implementations such as the
+ * R5000 and R8000 the FSQRT and FRECIP instructions do not
+ * achieve full IEEE-754 accuracy - however this emulator does.
+ */
+ case frsqrt_op:
+ if (!cpu_has_mips_4_5_64_r2_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(rsqrt_d);
+ handler.u = fpemu_dp_rsqrt;
+ goto dcopuop;
+ case frecip_op:
+ if (!cpu_has_mips_4_5_64_r2_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(recip_d);
+ handler.u = fpemu_dp_recip;
+ goto dcopuop;
+ case fmovc_op:
+ if (!cpu_has_mips_4_5_r)
+ return SIGILL;
+
+ cond = fpucondbit[MIPSInst_FT(ir) >> 2];
+ if (((ctx->fcr31 & cond) != 0) !=
+ ((MIPSInst_FT(ir) & 1) != 0))
+ return 0;
+ DPFROMREG(rv.d, MIPSInst_FS(ir));
+ break;
+ case fmovz_op:
+ if (!cpu_has_mips_4_5_r)
+ return SIGILL;
+
+ if (xcp->regs[MIPSInst_FT(ir)] != 0)
+ return 0;
+ DPFROMREG(rv.d, MIPSInst_FS(ir));
+ break;
+ case fmovn_op:
+ if (!cpu_has_mips_4_5_r)
+ return SIGILL;
+
+ if (xcp->regs[MIPSInst_FT(ir)] == 0)
+ return 0;
+ DPFROMREG(rv.d, MIPSInst_FS(ir));
+ break;
+
+ case fseleqz_op:
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(seleqz_d);
+ DPFROMREG(rv.d, MIPSInst_FT(ir));
+ if (rv.l & 0x1)
+ rv.l = 0;
+ else
+ DPFROMREG(rv.d, MIPSInst_FS(ir));
+ break;
+
+ case fselnez_op:
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(selnez_d);
+ DPFROMREG(rv.d, MIPSInst_FT(ir));
+ if (rv.l & 0x1)
+ DPFROMREG(rv.d, MIPSInst_FS(ir));
+ else
+ rv.l = 0;
+ break;
+
+ case fmaddf_op: {
+ union ieee754dp ft, fs, fd;
+
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(maddf_d);
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ DPFROMREG(fd, MIPSInst_FD(ir));
+ rv.d = ieee754dp_maddf(fd, fs, ft);
+ goto copcsr;
+ }
+
+ case fmsubf_op: {
+ union ieee754dp ft, fs, fd;
+
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(msubf_d);
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ DPFROMREG(fd, MIPSInst_FD(ir));
+ rv.d = ieee754dp_msubf(fd, fs, ft);
+ goto copcsr;
+ }
+
+ case frint_op: {
+ union ieee754dp fs;
+
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(rint_d);
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ rv.d = ieee754dp_rint(fs);
+ goto copcsr;
+ }
+
+ case fclass_op: {
+ union ieee754dp fs;
+
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(class_d);
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ rv.l = ieee754dp_2008class(fs);
+ rfmt = l_fmt;
+ goto copcsr;
+ }
+
+ case fmin_op: {
+ union ieee754dp fs, ft;
+
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(min_d);
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ rv.d = ieee754dp_fmin(fs, ft);
+ goto copcsr;
+ }
+
+ case fmina_op: {
+ union ieee754dp fs, ft;
+
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(mina_d);
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ rv.d = ieee754dp_fmina(fs, ft);
+ goto copcsr;
+ }
+
+ case fmax_op: {
+ union ieee754dp fs, ft;
+
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(max_d);
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ rv.d = ieee754dp_fmax(fs, ft);
+ goto copcsr;
+ }
+
+ case fmaxa_op: {
+ union ieee754dp fs, ft;
+
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(maxa_d);
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ rv.d = ieee754dp_fmaxa(fs, ft);
+ goto copcsr;
+ }
+
+ case fabs_op:
+ MIPS_FPU_EMU_INC_STATS(abs_d);
+ handler.u = ieee754dp_abs;
+ goto dcopuop;
+
+ case fneg_op:
+ MIPS_FPU_EMU_INC_STATS(neg_d);
+ handler.u = ieee754dp_neg;
+ goto dcopuop;
+
+ case fmov_op:
+ /* an easy one */
+ MIPS_FPU_EMU_INC_STATS(mov_d);
+ DPFROMREG(rv.d, MIPSInst_FS(ir));
+ goto copcsr;
+
+ /* binary op on handler */
+dcopbop:
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ DPFROMREG(ft, MIPSInst_FT(ir));
+
+ rv.d = (*handler.b) (fs, ft);
+ goto copcsr;
+dcopuop:
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ rv.d = (*handler.u) (fs);
+ goto copcsr;
+
+ /*
+ * unary conv ops
+ */
+ case fcvts_op:
+ MIPS_FPU_EMU_INC_STATS(cvt_s_d);
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ rv.s = ieee754sp_fdp(fs);
+ rfmt = s_fmt;
+ goto copcsr;
+
+ case fcvtd_op:
+ return SIGILL; /* not defined */
+
+ case fcvtw_op:
+ MIPS_FPU_EMU_INC_STATS(cvt_w_d);
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ rv.w = ieee754dp_tint(fs); /* wrong */
+ rfmt = w_fmt;
+ goto copcsr;
+
+ case fround_op:
+ case ftrunc_op:
+ case fceil_op:
+ case ffloor_op:
+ if (!cpu_has_mips_2_3_4_5_r)
+ return SIGILL;
+
+ if (MIPSInst_FUNC(ir) == fceil_op)
+ MIPS_FPU_EMU_INC_STATS(ceil_w_d);
+ if (MIPSInst_FUNC(ir) == ffloor_op)
+ MIPS_FPU_EMU_INC_STATS(floor_w_d);
+ if (MIPSInst_FUNC(ir) == fround_op)
+ MIPS_FPU_EMU_INC_STATS(round_w_d);
+ if (MIPSInst_FUNC(ir) == ftrunc_op)
+ MIPS_FPU_EMU_INC_STATS(trunc_w_d);
+
+ oldrm = ieee754_csr.rm;
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ ieee754_csr.rm = MIPSInst_FUNC(ir);
+ rv.w = ieee754dp_tint(fs);
+ ieee754_csr.rm = oldrm;
+ rfmt = w_fmt;
+ goto copcsr;
+
+ case fsel_op:
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(sel_d);
+ DPFROMREG(fd, MIPSInst_FD(ir));
+ if (fd.bits & 0x1)
+ DPFROMREG(rv.d, MIPSInst_FT(ir));
+ else
+ DPFROMREG(rv.d, MIPSInst_FS(ir));
+ break;
+
+ case fcvtl_op:
+ if (!cpu_has_mips_3_4_5_64_r2_r6)
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(cvt_l_d);
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ rv.l = ieee754dp_tlong(fs);
+ rfmt = l_fmt;
+ goto copcsr;
+
+ case froundl_op:
+ case ftruncl_op:
+ case fceill_op:
+ case ffloorl_op:
+ if (!cpu_has_mips_3_4_5_64_r2_r6)
+ return SIGILL;
+
+ if (MIPSInst_FUNC(ir) == fceill_op)
+ MIPS_FPU_EMU_INC_STATS(ceil_l_d);
+ if (MIPSInst_FUNC(ir) == ffloorl_op)
+ MIPS_FPU_EMU_INC_STATS(floor_l_d);
+ if (MIPSInst_FUNC(ir) == froundl_op)
+ MIPS_FPU_EMU_INC_STATS(round_l_d);
+ if (MIPSInst_FUNC(ir) == ftruncl_op)
+ MIPS_FPU_EMU_INC_STATS(trunc_l_d);
+
+ oldrm = ieee754_csr.rm;
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ ieee754_csr.rm = MIPSInst_FUNC(ir);
+ rv.l = ieee754dp_tlong(fs);
+ ieee754_csr.rm = oldrm;
+ rfmt = l_fmt;
+ goto copcsr;
+
+ default:
+ if (!NO_R6EMU && MIPSInst_FUNC(ir) >= fcmp_op) {
+ unsigned int cmpop;
+ union ieee754dp fs, ft;
+
+ cmpop = MIPSInst_FUNC(ir) - fcmp_op;
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ rv.w = ieee754dp_cmp(fs, ft,
+ cmptab[cmpop & 0x7], cmpop & 0x8);
+ rfmt = -1;
+ if ((cmpop & 0x8)
+ &&
+ ieee754_cxtest
+ (IEEE754_INVALID_OPERATION))
+ rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+ else
+ goto copcsr;
+
+ }
+ else {
+ return SIGILL;
+ }
+ break;
+ }
+ break;
+ }
+
+ case w_fmt: {
+ union ieee754dp fs;
+
+ switch (MIPSInst_FUNC(ir)) {
+ case fcvts_op:
+ /* convert word to single precision real */
+ MIPS_FPU_EMU_INC_STATS(cvt_s_w);
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.s = ieee754sp_fint(fs.bits);
+ rfmt = s_fmt;
+ goto copcsr;
+ case fcvtd_op:
+ /* convert word to double precision real */
+ MIPS_FPU_EMU_INC_STATS(cvt_d_w);
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.d = ieee754dp_fint(fs.bits);
+ rfmt = d_fmt;
+ goto copcsr;
+ default: {
+ /* Emulating the new CMP.condn.fmt R6 instruction */
+#define CMPOP_MASK 0x7
+#define SIGN_BIT (0x1 << 3)
+#define PREDICATE_BIT (0x1 << 4)
+
+ int cmpop = MIPSInst_FUNC(ir) & CMPOP_MASK;
+ int sig = MIPSInst_FUNC(ir) & SIGN_BIT;
+ union ieee754sp fs, ft;
+
+ /* This is an R6 only instruction */
+ if (!cpu_has_mips_r6 ||
+ (MIPSInst_FUNC(ir) & 0x20))
+ return SIGILL;
+
+ if (!sig) {
+ if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
+ switch (cmpop) {
+ case 0:
+ MIPS_FPU_EMU_INC_STATS(cmp_af_s);
+ break;
+ case 1:
+ MIPS_FPU_EMU_INC_STATS(cmp_un_s);
+ break;
+ case 2:
+ MIPS_FPU_EMU_INC_STATS(cmp_eq_s);
+ break;
+ case 3:
+ MIPS_FPU_EMU_INC_STATS(cmp_ueq_s);
+ break;
+ case 4:
+ MIPS_FPU_EMU_INC_STATS(cmp_lt_s);
+ break;
+ case 5:
+ MIPS_FPU_EMU_INC_STATS(cmp_ult_s);
+ break;
+ case 6:
+ MIPS_FPU_EMU_INC_STATS(cmp_le_s);
+ break;
+ case 7:
+ MIPS_FPU_EMU_INC_STATS(cmp_ule_s);
+ break;
+ }
+ } else {
+ switch (cmpop) {
+ case 1:
+ MIPS_FPU_EMU_INC_STATS(cmp_or_s);
+ break;
+ case 2:
+ MIPS_FPU_EMU_INC_STATS(cmp_une_s);
+ break;
+ case 3:
+ MIPS_FPU_EMU_INC_STATS(cmp_ne_s);
+ break;
+ }
+ }
+ } else {
+ if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
+ switch (cmpop) {
+ case 0:
+ MIPS_FPU_EMU_INC_STATS(cmp_saf_s);
+ break;
+ case 1:
+ MIPS_FPU_EMU_INC_STATS(cmp_sun_s);
+ break;
+ case 2:
+ MIPS_FPU_EMU_INC_STATS(cmp_seq_s);
+ break;
+ case 3:
+ MIPS_FPU_EMU_INC_STATS(cmp_sueq_s);
+ break;
+ case 4:
+ MIPS_FPU_EMU_INC_STATS(cmp_slt_s);
+ break;
+ case 5:
+ MIPS_FPU_EMU_INC_STATS(cmp_sult_s);
+ break;
+ case 6:
+ MIPS_FPU_EMU_INC_STATS(cmp_sle_s);
+ break;
+ case 7:
+ MIPS_FPU_EMU_INC_STATS(cmp_sule_s);
+ break;
+ }
+ } else {
+ switch (cmpop) {
+ case 1:
+ MIPS_FPU_EMU_INC_STATS(cmp_sor_s);
+ break;
+ case 2:
+ MIPS_FPU_EMU_INC_STATS(cmp_sune_s);
+ break;
+ case 3:
+ MIPS_FPU_EMU_INC_STATS(cmp_sne_s);
+ break;
+ }
+ }
+ }
+
+ /* fmt is w_fmt for single precision so fix it */
+ rfmt = s_fmt;
+ /* default to false */
+ rv.w = 0;
+
+ /* CMP.condn.S */
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ SPFROMREG(ft, MIPSInst_FT(ir));
+
+ /* positive predicates */
+ if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
+ if (ieee754sp_cmp(fs, ft, cmptab[cmpop],
+ sig))
+ rv.w = -1; /* true, all 1s */
+ if ((sig) &&
+ ieee754_cxtest(IEEE754_INVALID_OPERATION))
+ rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+ else
+ goto copcsr;
+ } else {
+ /* negative predicates */
+ switch (cmpop) {
+ case 1:
+ case 2:
+ case 3:
+ if (ieee754sp_cmp(fs, ft,
+ negative_cmptab[cmpop],
+ sig))
+ rv.w = -1; /* true, all 1s */
+ if (sig &&
+ ieee754_cxtest(IEEE754_INVALID_OPERATION))
+ rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+ else
+ goto copcsr;
+ break;
+ default:
+ /* Reserved R6 ops */
+ return SIGILL;
+ }
+ }
+ break;
+ }
+ }
+ break;
+ }
+
+ case l_fmt:
+
+ if (!cpu_has_mips_3_4_5_64_r2_r6)
+ return SIGILL;
+
+ DIFROMREG(bits, MIPSInst_FS(ir));
+
+ switch (MIPSInst_FUNC(ir)) {
+ case fcvts_op:
+ /* convert long to single precision real */
+ MIPS_FPU_EMU_INC_STATS(cvt_s_l);
+ rv.s = ieee754sp_flong(bits);
+ rfmt = s_fmt;
+ goto copcsr;
+ case fcvtd_op:
+ /* convert long to double precision real */
+ MIPS_FPU_EMU_INC_STATS(cvt_d_l);
+ rv.d = ieee754dp_flong(bits);
+ rfmt = d_fmt;
+ goto copcsr;
+ default: {
+ /* Emulating the new CMP.condn.fmt R6 instruction */
+ int cmpop = MIPSInst_FUNC(ir) & CMPOP_MASK;
+ int sig = MIPSInst_FUNC(ir) & SIGN_BIT;
+ union ieee754dp fs, ft;
+
+ if (!cpu_has_mips_r6 ||
+ (MIPSInst_FUNC(ir) & 0x20))
+ return SIGILL;
+
+ if (!sig) {
+ if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
+ switch (cmpop) {
+ case 0:
+ MIPS_FPU_EMU_INC_STATS(cmp_af_d);
+ break;
+ case 1:
+ MIPS_FPU_EMU_INC_STATS(cmp_un_d);
+ break;
+ case 2:
+ MIPS_FPU_EMU_INC_STATS(cmp_eq_d);
+ break;
+ case 3:
+ MIPS_FPU_EMU_INC_STATS(cmp_ueq_d);
+ break;
+ case 4:
+ MIPS_FPU_EMU_INC_STATS(cmp_lt_d);
+ break;
+ case 5:
+ MIPS_FPU_EMU_INC_STATS(cmp_ult_d);
+ break;
+ case 6:
+ MIPS_FPU_EMU_INC_STATS(cmp_le_d);
+ break;
+ case 7:
+ MIPS_FPU_EMU_INC_STATS(cmp_ule_d);
+ break;
+ }
+ } else {
+ switch (cmpop) {
+ case 1:
+ MIPS_FPU_EMU_INC_STATS(cmp_or_d);
+ break;
+ case 2:
+ MIPS_FPU_EMU_INC_STATS(cmp_une_d);
+ break;
+ case 3:
+ MIPS_FPU_EMU_INC_STATS(cmp_ne_d);
+ break;
+ }
+ }
+ } else {
+ if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
+ switch (cmpop) {
+ case 0:
+ MIPS_FPU_EMU_INC_STATS(cmp_saf_d);
+ break;
+ case 1:
+ MIPS_FPU_EMU_INC_STATS(cmp_sun_d);
+ break;
+ case 2:
+ MIPS_FPU_EMU_INC_STATS(cmp_seq_d);
+ break;
+ case 3:
+ MIPS_FPU_EMU_INC_STATS(cmp_sueq_d);
+ break;
+ case 4:
+ MIPS_FPU_EMU_INC_STATS(cmp_slt_d);
+ break;
+ case 5:
+ MIPS_FPU_EMU_INC_STATS(cmp_sult_d);
+ break;
+ case 6:
+ MIPS_FPU_EMU_INC_STATS(cmp_sle_d);
+ break;
+ case 7:
+ MIPS_FPU_EMU_INC_STATS(cmp_sule_d);
+ break;
+ }
+ } else {
+ switch (cmpop) {
+ case 1:
+ MIPS_FPU_EMU_INC_STATS(cmp_sor_d);
+ break;
+ case 2:
+ MIPS_FPU_EMU_INC_STATS(cmp_sune_d);
+ break;
+ case 3:
+ MIPS_FPU_EMU_INC_STATS(cmp_sne_d);
+ break;
+ }
+ }
+ }
+
+ /* fmt is l_fmt for double precision so fix it */
+ rfmt = d_fmt;
+ /* default to false */
+ rv.l = 0;
+
+ /* CMP.condn.D */
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ DPFROMREG(ft, MIPSInst_FT(ir));
+
+ /* positive predicates */
+ if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
+ if (ieee754dp_cmp(fs, ft,
+ cmptab[cmpop], sig))
+ rv.l = -1LL; /* true, all 1s */
+ if (sig &&
+ ieee754_cxtest(IEEE754_INVALID_OPERATION))
+ rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+ else
+ goto copcsr;
+ } else {
+ /* negative predicates */
+ switch (cmpop) {
+ case 1:
+ case 2:
+ case 3:
+ if (ieee754dp_cmp(fs, ft,
+ negative_cmptab[cmpop],
+ sig))
+ rv.l = -1LL; /* true, all 1s */
+ if (sig &&
+ ieee754_cxtest(IEEE754_INVALID_OPERATION))
+ rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+ else
+ goto copcsr;
+ break;
+ default:
+ /* Reserved R6 ops */
+ return SIGILL;
+ }
+ }
+ break;
+ }
+ }
+ break;
+
+ default:
+ return SIGILL;
+ }
+
+ /*
+ * Update the fpu CSR register for this operation.
+ * If an exception is required, generate a tidy SIGFPE exception,
+ * without updating the result register.
+ * Note: cause exception bits do not accumulate, they are rewritten
+ * for each op; only the flag/sticky bits accumulate.
+ */
+ ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr;
+ if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
+ /*printk ("SIGFPE: FPU csr = %08x\n",ctx->fcr31); */
+ return SIGFPE;
+ }
+
+ /*
+ * Now we can safely write the result back to the register file.
+ */
+ switch (rfmt) {
+ case -1:
+
+ if (cpu_has_mips_4_5_r)
+ cbit = fpucondbit[MIPSInst_FD(ir) >> 2];
+ else
+ cbit = FPU_CSR_COND;
+ if (rv.w)
+ ctx->fcr31 |= cbit;
+ else
+ ctx->fcr31 &= ~cbit;
+ break;
+
+ case d_fmt:
+ DPTOREG(rv.d, MIPSInst_FD(ir));
+ break;
+ case s_fmt:
+ SPTOREG(rv.s, MIPSInst_FD(ir));
+ break;
+ case w_fmt:
+ SITOREG(rv.w, MIPSInst_FD(ir));
+ break;
+ case l_fmt:
+ if (!cpu_has_mips_3_4_5_64_r2_r6)
+ return SIGILL;
+
+ DITOREG(rv.l, MIPSInst_FD(ir));
+ break;
+ default:
+ return SIGILL;
+ }
+
+ return 0;
+}
+
+/*
+ * Emulate FPU instructions.
+ *
+ * If we use FPU hardware, then we have been typically called to handle
+ * an unimplemented operation, such as where an operand is a NaN or
+ * denormalized. In that case exit the emulation loop after a single
+ * iteration so as to let hardware execute any subsequent instructions.
+ *
+ * If we have no FPU hardware or it has been disabled, then continue
+ * emulating floating-point instructions until one of these conditions
+ * has occurred:
+ *
+ * - a non-FPU instruction has been encountered,
+ *
+ * - an attempt to emulate has ended with a signal,
+ *
+ * - the ISA mode has been switched.
+ *
+ * We need to terminate the emulation loop if we got switched to the
+ * MIPS16 mode, whether supported or not, so that we do not attempt
+ * to emulate a MIPS16 instruction as a regular MIPS FPU instruction.
+ * Similarly if we got switched to the microMIPS mode and only the
+ * regular MIPS mode is supported, so that we do not attempt to emulate
+ * a microMIPS instruction as a regular MIPS FPU instruction. Or if
+ * we got switched to the regular MIPS mode and only the microMIPS mode
+ * is supported, so that we do not attempt to emulate a regular MIPS
+ * instruction that should cause an Address Error exception instead.
+ * For simplicity we always terminate upon an ISA mode switch.
+ */
+int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ int has_fpu, void __user **fault_addr)
+{
+ unsigned long oldepc, prevepc;
+ struct mm_decoded_insn dec_insn;
+ u16 instr[4];
+ u16 *instr_ptr;
+ int sig = 0;
+
+ /*
+ * Initialize context if it hasn't been used already, otherwise ensure
+ * it has been saved to struct thread_struct.
+ */
+ if (!init_fp_ctx(current))
+ lose_fpu(1);
+
+ oldepc = xcp->cp0_epc;
+ do {
+ prevepc = xcp->cp0_epc;
+
+ if (get_isa16_mode(prevepc) && cpu_has_mmips) {
+ /*
+ * Get next 2 microMIPS instructions and convert them
+ * into 32-bit instructions.
+ */
+ if ((get_user(instr[0], (u16 __user *)msk_isa16_mode(xcp->cp0_epc))) ||
+ (get_user(instr[1], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 2))) ||
+ (get_user(instr[2], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 4))) ||
+ (get_user(instr[3], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 6)))) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ return SIGBUS;
+ }
+ instr_ptr = instr;
+
+ /* Get first instruction. */
+ if (mm_insn_16bit(*instr_ptr)) {
+ /* Duplicate the half-word. */
+ dec_insn.insn = (*instr_ptr << 16) |
+ (*instr_ptr);
+ /* 16-bit instruction. */
+ dec_insn.pc_inc = 2;
+ instr_ptr += 1;
+ } else {
+ dec_insn.insn = (*instr_ptr << 16) |
+ *(instr_ptr+1);
+ /* 32-bit instruction. */
+ dec_insn.pc_inc = 4;
+ instr_ptr += 2;
+ }
+ /* Get second instruction. */
+ if (mm_insn_16bit(*instr_ptr)) {
+ /* Duplicate the half-word. */
+ dec_insn.next_insn = (*instr_ptr << 16) |
+ (*instr_ptr);
+ /* 16-bit instruction. */
+ dec_insn.next_pc_inc = 2;
+ } else {
+ dec_insn.next_insn = (*instr_ptr << 16) |
+ *(instr_ptr+1);
+ /* 32-bit instruction. */
+ dec_insn.next_pc_inc = 4;
+ }
+ dec_insn.micro_mips_mode = 1;
+ } else {
+ if ((get_user(dec_insn.insn,
+ (mips_instruction __user *) xcp->cp0_epc)) ||
+ (get_user(dec_insn.next_insn,
+ (mips_instruction __user *)(xcp->cp0_epc+4)))) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ return SIGBUS;
+ }
+ dec_insn.pc_inc = 4;
+ dec_insn.next_pc_inc = 4;
+ dec_insn.micro_mips_mode = 0;
+ }
+
+ if ((dec_insn.insn == 0) ||
+ ((dec_insn.pc_inc == 2) &&
+ ((dec_insn.insn & 0xffff) == MM_NOP16)))
+ xcp->cp0_epc += dec_insn.pc_inc; /* Skip NOPs */
+ else {
+ /*
+ * The 'ieee754_csr' is an alias of ctx->fcr31.
+ * No need to copy ctx->fcr31 to ieee754_csr.
+ */
+ sig = cop1Emulate(xcp, ctx, dec_insn, fault_addr);
+ }
+
+ if (has_fpu)
+ break;
+ if (sig)
+ break;
+ /*
+ * We have to check for the ISA bit explicitly here,
+ * because `get_isa16_mode' may return 0 if support
+ * for code compression has been globally disabled,
+ * or otherwise we may produce the wrong signal or
+ * even proceed successfully where we must not.
+ */
+ if ((xcp->cp0_epc ^ prevepc) & 0x1)
+ break;
+
+ cond_resched();
+ } while (xcp->cp0_epc > prevepc);
+
+ /* SIGILL indicates a non-fpu instruction */
+ if (sig == SIGILL && xcp->cp0_epc != oldepc)
+ /* but if EPC has advanced, then ignore it */
+ sig = 0;
+
+ return sig;
+}
diff --git a/arch/mips/math-emu/dp_2008class.c b/arch/mips/math-emu/dp_2008class.c
new file mode 100644
index 000000000..81a0a63b1
--- /dev/null
+++ b/arch/mips/math-emu/dp_2008class.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: CLASS.f
+ * FPR[fd] = class(FPR[fs])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ */
+
+#include "ieee754dp.h"
+
+int ieee754dp_2008class(union ieee754dp x)
+{
+ COMPXDP;
+
+ EXPLODEXDP;
+
+ /*
+ * 10 bit mask as follows:
+ *
+ * bit0 = SNAN
+ * bit1 = QNAN
+ * bit2 = -INF
+ * bit3 = -NORM
+ * bit4 = -DNORM
+ * bit5 = -ZERO
+ * bit6 = INF
+ * bit7 = NORM
+ * bit8 = DNORM
+ * bit9 = ZERO
+ */
+
+ switch(xc) {
+ case IEEE754_CLASS_SNAN:
+ return 0x01;
+ case IEEE754_CLASS_QNAN:
+ return 0x02;
+ case IEEE754_CLASS_INF:
+ return 0x04 << (xs ? 0 : 4);
+ case IEEE754_CLASS_NORM:
+ return 0x08 << (xs ? 0 : 4);
+ case IEEE754_CLASS_DNORM:
+ return 0x10 << (xs ? 0 : 4);
+ case IEEE754_CLASS_ZERO:
+ return 0x20 << (xs ? 0 : 4);
+ default:
+ pr_err("Unknown class: %d\n", xc);
+ return 0;
+ }
+}
diff --git a/arch/mips/math-emu/dp_add.c b/arch/mips/math-emu/dp_add.c
new file mode 100644
index 000000000..78504736b
--- /dev/null
+++ b/arch/mips/math-emu/dp_add.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * double precision: common utilities
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_add(union ieee754dp x, union ieee754dp y)
+{
+ int s;
+
+ COMPXDP;
+ COMPYDP;
+
+ EXPLODEXDP;
+ EXPLODEYDP;
+
+ ieee754_clearcx();
+
+ FLUSHXDP;
+ FLUSHYDP;
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754dp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754dp_nanxcpt(x);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return x;
+
+
+ /*
+ * Infinity handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ if (xs == ys)
+ return x;
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754dp_indef();
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ return x;
+
+ /*
+ * Zero handling
+ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ if (xs == ys)
+ return x;
+ else
+ return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ DPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ DPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ break;
+ }
+ assert(xm & DP_HIDDEN_BIT);
+ assert(ym & DP_HIDDEN_BIT);
+
+ /*
+ * Provide guard,round and stick bit space.
+ */
+ xm <<= 3;
+ ym <<= 3;
+
+ if (xe > ye) {
+ /*
+ * Have to shift y fraction right to align.
+ */
+ s = xe - ye;
+ ym = XDPSRS(ym, s);
+ ye += s;
+ } else if (ye > xe) {
+ /*
+ * Have to shift x fraction right to align.
+ */
+ s = ye - xe;
+ xm = XDPSRS(xm, s);
+ xe += s;
+ }
+ assert(xe == ye);
+ assert(xe <= DP_EMAX);
+
+ if (xs == ys) {
+ /*
+ * Generate 28 bit result of adding two 27 bit numbers
+ * leaving result in xm, xs and xe.
+ */
+ xm = xm + ym;
+
+ if (xm >> (DP_FBITS + 1 + 3)) { /* carry out */
+ xm = XDPSRS1(xm);
+ xe++;
+ }
+ } else {
+ if (xm >= ym) {
+ xm = xm - ym;
+ } else {
+ xm = ym - xm;
+ xs = ys;
+ }
+ if (xm == 0)
+ return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+ /*
+ * Normalize to rounding precision.
+ */
+ while ((xm >> (DP_FBITS + 3)) == 0) {
+ xm <<= 1;
+ xe--;
+ }
+ }
+
+ return ieee754dp_format(xs, xe, xm);
+}
diff --git a/arch/mips/math-emu/dp_cmp.c b/arch/mips/math-emu/dp_cmp.c
new file mode 100644
index 000000000..a59680b03
--- /dev/null
+++ b/arch/mips/math-emu/dp_cmp.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * double precision: common utilities
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754dp.h"
+
+int ieee754dp_cmp(union ieee754dp x, union ieee754dp y, int cmp, int sig)
+{
+ s64 vx;
+ s64 vy;
+
+ COMPXDP;
+ COMPYDP;
+
+ EXPLODEXDP;
+ EXPLODEYDP;
+ FLUSHXDP;
+ FLUSHYDP;
+ ieee754_clearcx(); /* Even clear inexact flag here */
+
+ if (ieee754_class_nan(xc) || ieee754_class_nan(yc)) {
+ if (sig ||
+ xc == IEEE754_CLASS_SNAN || yc == IEEE754_CLASS_SNAN)
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return (cmp & IEEE754_CUN) != 0;
+ } else {
+ vx = x.bits;
+ vy = y.bits;
+
+ if (vx < 0)
+ vx = -vx ^ DP_SIGN_BIT;
+ if (vy < 0)
+ vy = -vy ^ DP_SIGN_BIT;
+
+ if (vx < vy)
+ return (cmp & IEEE754_CLT) != 0;
+ else if (vx == vy)
+ return (cmp & IEEE754_CEQ) != 0;
+ else
+ return (cmp & IEEE754_CGT) != 0;
+ }
+}
diff --git a/arch/mips/math-emu/dp_div.c b/arch/mips/math-emu/dp_div.c
new file mode 100644
index 000000000..ac1ecc462
--- /dev/null
+++ b/arch/mips/math-emu/dp_div.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * double precision: common utilities
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_div(union ieee754dp x, union ieee754dp y)
+{
+ u64 rm;
+ int re;
+ u64 bm;
+
+ COMPXDP;
+ COMPYDP;
+
+ EXPLODEXDP;
+ EXPLODEYDP;
+
+ ieee754_clearcx();
+
+ FLUSHXDP;
+ FLUSHYDP;
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754dp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754dp_nanxcpt(x);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return x;
+
+
+ /*
+ * Infinity handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754dp_indef();
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ return ieee754dp_zero(xs ^ ys);
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ return ieee754dp_inf(xs ^ ys);
+
+ /*
+ * Zero handling
+ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754dp_indef();
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ ieee754_setcx(IEEE754_ZERO_DIVIDE);
+ return ieee754dp_inf(xs ^ ys);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ return ieee754dp_zero(xs == ys ? 0 : 1);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ DPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ DPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ break;
+ }
+ assert(xm & DP_HIDDEN_BIT);
+ assert(ym & DP_HIDDEN_BIT);
+
+ /* provide rounding space */
+ xm <<= 3;
+ ym <<= 3;
+
+ /* now the dirty work */
+
+ rm = 0;
+ re = xe - ye;
+
+ for (bm = DP_MBIT(DP_FBITS + 2); bm; bm >>= 1) {
+ if (xm >= ym) {
+ xm -= ym;
+ rm |= bm;
+ if (xm == 0)
+ break;
+ }
+ xm <<= 1;
+ }
+
+ rm <<= 1;
+ if (xm)
+ rm |= 1; /* have remainder, set sticky */
+
+ assert(rm);
+
+ /*
+ * Normalise rm to rounding precision ?
+ */
+ while ((rm >> (DP_FBITS + 3)) == 0) {
+ rm <<= 1;
+ re--;
+ }
+
+ return ieee754dp_format(xs == ys ? 0 : 1, re, rm);
+}
diff --git a/arch/mips/math-emu/dp_fint.c b/arch/mips/math-emu/dp_fint.c
new file mode 100644
index 000000000..996b15ba0
--- /dev/null
+++ b/arch/mips/math-emu/dp_fint.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * double precision: common utilities
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_fint(int x)
+{
+ u64 xm;
+ int xe;
+ int xs;
+
+ ieee754_clearcx();
+
+ if (x == 0)
+ return ieee754dp_zero(0);
+ if (x == 1 || x == -1)
+ return ieee754dp_one(x < 0);
+ if (x == 10 || x == -10)
+ return ieee754dp_ten(x < 0);
+
+ xs = (x < 0);
+ if (xs) {
+ if (x == (1 << 31))
+ xm = ((unsigned) 1 << 31); /* max neg can't be safely negated */
+ else
+ xm = -x;
+ } else {
+ xm = x;
+ }
+
+ /* normalize - result can never be inexact or overflow */
+ xe = DP_FBITS;
+ while ((xm >> DP_FBITS) == 0) {
+ xm <<= 1;
+ xe--;
+ }
+ return builddp(xs, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
+}
diff --git a/arch/mips/math-emu/dp_flong.c b/arch/mips/math-emu/dp_flong.c
new file mode 100644
index 000000000..681ee00c9
--- /dev/null
+++ b/arch/mips/math-emu/dp_flong.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * double precision: common utilities
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_flong(s64 x)
+{
+ u64 xm;
+ int xe;
+ int xs;
+
+ ieee754_clearcx();
+
+ if (x == 0)
+ return ieee754dp_zero(0);
+ if (x == 1 || x == -1)
+ return ieee754dp_one(x < 0);
+ if (x == 10 || x == -10)
+ return ieee754dp_ten(x < 0);
+
+ xs = (x < 0);
+ if (xs) {
+ if (x == (1ULL << 63))
+ xm = (1ULL << 63); /* max neg can't be safely negated */
+ else
+ xm = -x;
+ } else {
+ xm = x;
+ }
+
+ /* normalize */
+ xe = DP_FBITS + 3;
+ if (xm >> (DP_FBITS + 1 + 3)) {
+ /* shunt out overflow bits */
+ while (xm >> (DP_FBITS + 1 + 3)) {
+ XDPSRSX1();
+ }
+ } else {
+ /* normalize in grs extended double precision */
+ while ((xm >> (DP_FBITS + 3)) == 0) {
+ xm <<= 1;
+ xe--;
+ }
+ }
+
+ return ieee754dp_format(xs, xe, xm);
+}
diff --git a/arch/mips/math-emu/dp_fmax.c b/arch/mips/math-emu/dp_fmax.c
new file mode 100644
index 000000000..126ec90bb
--- /dev/null
+++ b/arch/mips/math-emu/dp_fmax.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: MIN{,A}.f
+ * MIN : Scalar Floating-Point Minimum
+ * MINA: Scalar Floating-Point argument with Minimum Absolute Value
+ *
+ * MIN.D : FPR[fd] = minNum(FPR[fs],FPR[ft])
+ * MINA.D: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
+{
+ COMPXDP;
+ COMPYDP;
+
+ EXPLODEXDP;
+ EXPLODEYDP;
+
+ FLUSHXDP;
+ FLUSHYDP;
+
+ ieee754_clearcx();
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754dp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754dp_nanxcpt(x);
+
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return y;
+
+ /*
+ * Infinity and zero handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return xs ? y : x;
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ return ys ? x : y;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ return ieee754dp_zero(xs & ys);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ DPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ DPDNORMX;
+ }
+
+ /* Finally get to do some computation */
+
+ assert(xm & DP_HIDDEN_BIT);
+ assert(ym & DP_HIDDEN_BIT);
+
+ /* Compare signs */
+ if (xs > ys)
+ return y;
+ else if (xs < ys)
+ return x;
+
+ /* Signs of inputs are equal, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ }
+
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return y;
+ return x;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
+ if (xm <= ym)
+ return x;
+ return y;
+}
+
+union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
+{
+ COMPXDP;
+ COMPYDP;
+
+ EXPLODEXDP;
+ EXPLODEYDP;
+
+ FLUSHXDP;
+ FLUSHYDP;
+
+ ieee754_clearcx();
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754dp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754dp_nanxcpt(x);
+
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return y;
+
+ /*
+ * Infinity and zero handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754dp_inf(xs & ys);
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ return ieee754dp_zero(xs & ys);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ DPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ DPDNORMX;
+ }
+
+ /* Finally get to do some computation */
+
+ assert(xm & DP_HIDDEN_BIT);
+ assert(ym & DP_HIDDEN_BIT);
+
+ /* Compare exponent */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+
+ /* Compare mantissa */
+ if (xm < ym)
+ return y;
+ else if (xm > ym)
+ return x;
+ else if (xs == 0)
+ return x;
+ return y;
+}
diff --git a/arch/mips/math-emu/dp_fmin.c b/arch/mips/math-emu/dp_fmin.c
new file mode 100644
index 000000000..35ded4c45
--- /dev/null
+++ b/arch/mips/math-emu/dp_fmin.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: MIN{,A}.f
+ * MIN : Scalar Floating-Point Minimum
+ * MINA: Scalar Floating-Point argument with Minimum Absolute Value
+ *
+ * MIN.D : FPR[fd] = minNum(FPR[fs],FPR[ft])
+ * MINA.D: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
+{
+ COMPXDP;
+ COMPYDP;
+
+ EXPLODEXDP;
+ EXPLODEYDP;
+
+ FLUSHXDP;
+ FLUSHYDP;
+
+ ieee754_clearcx();
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754dp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754dp_nanxcpt(x);
+
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return y;
+
+ /*
+ * Infinity and zero handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return xs ? x : y;
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ return ys ? y : x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ return ieee754dp_zero(xs | ys);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ DPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ DPDNORMX;
+ }
+
+ /* Finally get to do some computation */
+
+ assert(xm & DP_HIDDEN_BIT);
+ assert(ym & DP_HIDDEN_BIT);
+
+ /* Compare signs */
+ if (xs > ys)
+ return x;
+ else if (xs < ys)
+ return y;
+
+ /* Signs of inputs are the same, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ }
+
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return x;
+ return y;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
+ if (xm <= ym)
+ return y;
+ return x;
+}
+
+union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
+{
+ COMPXDP;
+ COMPYDP;
+
+ EXPLODEXDP;
+ EXPLODEYDP;
+
+ FLUSHXDP;
+ FLUSHYDP;
+
+ ieee754_clearcx();
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754dp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754dp_nanxcpt(x);
+
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return y;
+
+ /*
+ * Infinity and zero handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754dp_inf(xs | ys);
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ return ieee754dp_zero(xs | ys);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ DPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ DPDNORMX;
+ }
+
+ /* Finally get to do some computation */
+
+ assert(xm & DP_HIDDEN_BIT);
+ assert(ym & DP_HIDDEN_BIT);
+
+ /* Compare exponent */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+
+ /* Compare mantissa */
+ if (xm < ym)
+ return x;
+ else if (xm > ym)
+ return y;
+ else if (xs == 1)
+ return x;
+ return y;
+}
diff --git a/arch/mips/math-emu/dp_fsp.c b/arch/mips/math-emu/dp_fsp.c
new file mode 100644
index 000000000..be8a929c4
--- /dev/null
+++ b/arch/mips/math-emu/dp_fsp.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * double precision: common utilities
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754sp.h"
+#include "ieee754dp.h"
+
+static inline union ieee754dp ieee754dp_nan_fsp(int xs, u64 xm)
+{
+ return builddp(xs, DP_EMAX + 1 + DP_EBIAS,
+ xm << (DP_FBITS - SP_FBITS));
+}
+
+union ieee754dp ieee754dp_fsp(union ieee754sp x)
+{
+ COMPXSP;
+
+ EXPLODEXSP;
+
+ ieee754_clearcx();
+
+ FLUSHXSP;
+
+ switch (xc) {
+ case IEEE754_CLASS_SNAN:
+ return ieee754dp_nanxcpt(ieee754dp_nan_fsp(xs, xm));
+
+ case IEEE754_CLASS_QNAN:
+ return ieee754dp_nan_fsp(xs, xm);
+
+ case IEEE754_CLASS_INF:
+ return ieee754dp_inf(xs);
+
+ case IEEE754_CLASS_ZERO:
+ return ieee754dp_zero(xs);
+
+ case IEEE754_CLASS_DNORM:
+ /* normalize */
+ while ((xm >> SP_FBITS) == 0) {
+ xm <<= 1;
+ xe--;
+ }
+ break;
+
+ case IEEE754_CLASS_NORM:
+ break;
+ }
+
+ /*
+ * Can't possibly overflow,underflow, or need rounding
+ */
+
+ /* drop the hidden bit */
+ xm &= ~SP_HIDDEN_BIT;
+
+ return builddp(xs, xe + DP_EBIAS,
+ (u64) xm << (DP_FBITS - SP_FBITS));
+}
diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c
new file mode 100644
index 000000000..931e66f68
--- /dev/null
+++ b/arch/mips/math-emu/dp_maddf.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: MADDF.f (Fused Multiply Add)
+ * MADDF.fmt: FPR[fd] = FPR[fd] + (FPR[fs] x FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ */
+
+#include "ieee754dp.h"
+
+
+/* 128 bits shift right logical with rounding. */
+static void srl128(u64 *hptr, u64 *lptr, int count)
+{
+ u64 low;
+
+ if (count >= 128) {
+ *lptr = *hptr != 0 || *lptr != 0;
+ *hptr = 0;
+ } else if (count >= 64) {
+ if (count == 64) {
+ *lptr = *hptr | (*lptr != 0);
+ } else {
+ low = *lptr;
+ *lptr = *hptr >> (count - 64);
+ *lptr |= (*hptr << (128 - count)) != 0 || low != 0;
+ }
+ *hptr = 0;
+ } else {
+ low = *lptr;
+ *lptr = low >> count | *hptr << (64 - count);
+ *lptr |= (low << (64 - count)) != 0;
+ *hptr = *hptr >> count;
+ }
+}
+
+static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y, enum maddf_flags flags)
+{
+ int re;
+ int rs;
+ unsigned int lxm;
+ unsigned int hxm;
+ unsigned int lym;
+ unsigned int hym;
+ u64 lrm;
+ u64 hrm;
+ u64 lzm;
+ u64 hzm;
+ u64 t;
+ u64 at;
+ int s;
+
+ COMPXDP;
+ COMPYDP;
+ COMPZDP;
+
+ EXPLODEXDP;
+ EXPLODEYDP;
+ EXPLODEZDP;
+
+ FLUSHXDP;
+ FLUSHYDP;
+ FLUSHZDP;
+
+ ieee754_clearcx();
+
+ rs = xs ^ ys;
+ if (flags & MADDF_NEGATE_PRODUCT)
+ rs ^= 1;
+ if (flags & MADDF_NEGATE_ADDITION)
+ zs ^= 1;
+
+ /*
+ * Handle the cases when at least one of x, y or z is a NaN.
+ * Order of precedence is sNaN, qNaN and z, x, y.
+ */
+ if (zc == IEEE754_CLASS_SNAN)
+ return ieee754dp_nanxcpt(z);
+ if (xc == IEEE754_CLASS_SNAN)
+ return ieee754dp_nanxcpt(x);
+ if (yc == IEEE754_CLASS_SNAN)
+ return ieee754dp_nanxcpt(y);
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (xc == IEEE754_CLASS_QNAN)
+ return x;
+ if (yc == IEEE754_CLASS_QNAN)
+ return y;
+
+ if (zc == IEEE754_CLASS_DNORM)
+ DPDNORMZ;
+ /* ZERO z cases are handled separately below */
+
+ switch (CLPAIR(xc, yc)) {
+
+ /*
+ * Infinity handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754dp_indef();
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ if ((zc == IEEE754_CLASS_INF) && (zs != rs)) {
+ /*
+ * Cases of addition of infinities with opposite signs
+ * or subtraction of infinities with same signs.
+ */
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754dp_indef();
+ }
+ /*
+ * z is here either not an infinity, or an infinity having the
+ * same sign as product (x*y). The result must be an infinity,
+ * and its sign is determined only by the sign of product (x*y).
+ */
+ return ieee754dp_inf(rs);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754dp_inf(zs);
+ if (zc == IEEE754_CLASS_ZERO) {
+ /* Handle cases +0 + (-0) and similar ones. */
+ if (zs == rs)
+ /*
+ * Cases of addition of zeros of equal signs
+ * or subtraction of zeroes of opposite signs.
+ * The sign of the resulting zero is in any
+ * such case determined only by the sign of z.
+ */
+ return z;
+
+ return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
+ }
+ /* x*y is here 0, and z is not 0, so just return z */
+ return z;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754dp_inf(zs);
+ DPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754dp_inf(zs);
+ DPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754dp_inf(zs);
+ /* continue to real computations */
+ }
+
+ /* Finally get to do some computation */
+
+ /*
+ * Do the multiplication bit first
+ *
+ * rm = xm * ym, re = xe + ye basically
+ *
+ * At this point xm and ym should have been normalized.
+ */
+ assert(xm & DP_HIDDEN_BIT);
+ assert(ym & DP_HIDDEN_BIT);
+
+ re = xe + ye;
+
+ /* shunt to top of word */
+ xm <<= 64 - (DP_FBITS + 1);
+ ym <<= 64 - (DP_FBITS + 1);
+
+ /*
+ * Multiply 64 bits xm and ym to give 128 bits result in hrm:lrm.
+ */
+
+ lxm = xm;
+ hxm = xm >> 32;
+ lym = ym;
+ hym = ym >> 32;
+
+ lrm = DPXMULT(lxm, lym);
+ hrm = DPXMULT(hxm, hym);
+
+ t = DPXMULT(lxm, hym);
+
+ at = lrm + (t << 32);
+ hrm += at < lrm;
+ lrm = at;
+
+ hrm = hrm + (t >> 32);
+
+ t = DPXMULT(hxm, lym);
+
+ at = lrm + (t << 32);
+ hrm += at < lrm;
+ lrm = at;
+
+ hrm = hrm + (t >> 32);
+
+ /* Put explicit bit at bit 126 if necessary */
+ if ((int64_t)hrm < 0) {
+ lrm = (hrm << 63) | (lrm >> 1);
+ hrm = hrm >> 1;
+ re++;
+ }
+
+ assert(hrm & (1 << 62));
+
+ if (zc == IEEE754_CLASS_ZERO) {
+ /*
+ * Move explicit bit from bit 126 to bit 55 since the
+ * ieee754dp_format code expects the mantissa to be
+ * 56 bits wide (53 + 3 rounding bits).
+ */
+ srl128(&hrm, &lrm, (126 - 55));
+ return ieee754dp_format(rs, re, lrm);
+ }
+
+ /* Move explicit bit from bit 52 to bit 126 */
+ lzm = 0;
+ hzm = zm << 10;
+ assert(hzm & (1 << 62));
+
+ /* Make the exponents the same */
+ if (ze > re) {
+ /*
+ * Have to shift y fraction right to align.
+ */
+ s = ze - re;
+ srl128(&hrm, &lrm, s);
+ re += s;
+ } else if (re > ze) {
+ /*
+ * Have to shift x fraction right to align.
+ */
+ s = re - ze;
+ srl128(&hzm, &lzm, s);
+ ze += s;
+ }
+ assert(ze == re);
+ assert(ze <= DP_EMAX);
+
+ /* Do the addition */
+ if (zs == rs) {
+ /*
+ * Generate 128 bit result by adding two 127 bit numbers
+ * leaving result in hzm:lzm, zs and ze.
+ */
+ hzm = hzm + hrm + (lzm > (lzm + lrm));
+ lzm = lzm + lrm;
+ if ((int64_t)hzm < 0) { /* carry out */
+ srl128(&hzm, &lzm, 1);
+ ze++;
+ }
+ } else {
+ if (hzm > hrm || (hzm == hrm && lzm >= lrm)) {
+ hzm = hzm - hrm - (lzm < lrm);
+ lzm = lzm - lrm;
+ } else {
+ hzm = hrm - hzm - (lrm < lzm);
+ lzm = lrm - lzm;
+ zs = rs;
+ }
+ if (lzm == 0 && hzm == 0)
+ return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+ /*
+ * Put explicit bit at bit 126 if necessary.
+ */
+ if (hzm == 0) {
+ /* left shift by 63 or 64 bits */
+ if ((int64_t)lzm < 0) {
+ /* MSB of lzm is the explicit bit */
+ hzm = lzm >> 1;
+ lzm = lzm << 63;
+ ze -= 63;
+ } else {
+ hzm = lzm;
+ lzm = 0;
+ ze -= 64;
+ }
+ }
+
+ t = 0;
+ while ((hzm >> (62 - t)) == 0)
+ t++;
+
+ assert(t <= 62);
+ if (t) {
+ hzm = hzm << t | lzm >> (64 - t);
+ lzm = lzm << t;
+ ze -= t;
+ }
+ }
+
+ /*
+ * Move explicit bit from bit 126 to bit 55 since the
+ * ieee754dp_format code expects the mantissa to be
+ * 56 bits wide (53 + 3 rounding bits).
+ */
+ srl128(&hzm, &lzm, (126 - 55));
+
+ return ieee754dp_format(zs, ze, lzm);
+}
+
+union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y)
+{
+ return _dp_maddf(z, x, y, 0);
+}
+
+union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y)
+{
+ return _dp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
+}
+
+union ieee754dp ieee754dp_madd(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y)
+{
+ return _dp_maddf(z, x, y, 0);
+}
+
+union ieee754dp ieee754dp_msub(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y)
+{
+ return _dp_maddf(z, x, y, MADDF_NEGATE_ADDITION);
+}
+
+union ieee754dp ieee754dp_nmadd(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y)
+{
+ return _dp_maddf(z, x, y, MADDF_NEGATE_PRODUCT|MADDF_NEGATE_ADDITION);
+}
+
+union ieee754dp ieee754dp_nmsub(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y)
+{
+ return _dp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
+}
diff --git a/arch/mips/math-emu/dp_mul.c b/arch/mips/math-emu/dp_mul.c
new file mode 100644
index 000000000..8a671bb7a
--- /dev/null
+++ b/arch/mips/math-emu/dp_mul.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * double precision: common utilities
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_mul(union ieee754dp x, union ieee754dp y)
+{
+ int re;
+ int rs;
+ u64 rm;
+ unsigned int lxm;
+ unsigned int hxm;
+ unsigned int lym;
+ unsigned int hym;
+ u64 lrm;
+ u64 hrm;
+ u64 t;
+ u64 at;
+
+ COMPXDP;
+ COMPYDP;
+
+ EXPLODEXDP;
+ EXPLODEYDP;
+
+ ieee754_clearcx();
+
+ FLUSHXDP;
+ FLUSHYDP;
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754dp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754dp_nanxcpt(x);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return x;
+
+
+ /*
+ * Infinity handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754dp_indef();
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754dp_inf(xs ^ ys);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return ieee754dp_zero(xs ^ ys);
+
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ DPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ DPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ break;
+ }
+ /* rm = xm * ym, re = xe+ye basically */
+ assert(xm & DP_HIDDEN_BIT);
+ assert(ym & DP_HIDDEN_BIT);
+
+ re = xe + ye;
+ rs = xs ^ ys;
+
+ /* shunt to top of word */
+ xm <<= 64 - (DP_FBITS + 1);
+ ym <<= 64 - (DP_FBITS + 1);
+
+ /*
+ * Multiply 64 bits xm, ym to give high 64 bits rm with stickness.
+ */
+
+ lxm = xm;
+ hxm = xm >> 32;
+ lym = ym;
+ hym = ym >> 32;
+
+ lrm = DPXMULT(lxm, lym);
+ hrm = DPXMULT(hxm, hym);
+
+ t = DPXMULT(lxm, hym);
+
+ at = lrm + (t << 32);
+ hrm += at < lrm;
+ lrm = at;
+
+ hrm = hrm + (t >> 32);
+
+ t = DPXMULT(hxm, lym);
+
+ at = lrm + (t << 32);
+ hrm += at < lrm;
+ lrm = at;
+
+ hrm = hrm + (t >> 32);
+
+ rm = hrm | (lrm != 0);
+
+ /*
+ * Sticky shift down to normal rounding precision.
+ */
+ if ((s64) rm < 0) {
+ rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
+ ((rm << (DP_FBITS + 1 + 3)) != 0);
+ re++;
+ } else {
+ rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
+ ((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
+ }
+ assert(rm & (DP_HIDDEN_BIT << 3));
+
+ return ieee754dp_format(rs, re, rm);
+}
diff --git a/arch/mips/math-emu/dp_rint.c b/arch/mips/math-emu/dp_rint.c
new file mode 100644
index 000000000..7f30b7a30
--- /dev/null
+++ b/arch/mips/math-emu/dp_rint.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * double precision: common utilities
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ * Copyright (C) 2017 Imagination Technologies, Ltd.
+ * Author: Aleksandar Markovic <aleksandar.markovic@imgtec.com>
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_rint(union ieee754dp x)
+{
+ union ieee754dp ret;
+ u64 residue;
+ int sticky;
+ int round;
+ int odd;
+
+ COMPXDP;
+
+ ieee754_clearcx();
+
+ EXPLODEXDP;
+ FLUSHXDP;
+
+ if (xc == IEEE754_CLASS_SNAN)
+ return ieee754dp_nanxcpt(x);
+
+ if ((xc == IEEE754_CLASS_QNAN) ||
+ (xc == IEEE754_CLASS_INF) ||
+ (xc == IEEE754_CLASS_ZERO))
+ return x;
+
+ if (xe >= DP_FBITS)
+ return x;
+
+ if (xe < -1) {
+ residue = xm;
+ round = 0;
+ sticky = residue != 0;
+ xm = 0;
+ } else {
+ residue = xm << (64 - DP_FBITS + xe);
+ round = (residue >> 63) != 0;
+ sticky = (residue << 1) != 0;
+ xm >>= DP_FBITS - xe;
+ }
+
+ odd = (xm & 0x1) != 0x0;
+
+ switch (ieee754_csr.rm) {
+ case FPU_CSR_RN: /* toward nearest */
+ if (round && (sticky || odd))
+ xm++;
+ break;
+ case FPU_CSR_RZ: /* toward zero */
+ break;
+ case FPU_CSR_RU: /* toward +infinity */
+ if ((round || sticky) && !xs)
+ xm++;
+ break;
+ case FPU_CSR_RD: /* toward -infinity */
+ if ((round || sticky) && xs)
+ xm++;
+ break;
+ }
+
+ if (round || sticky)
+ ieee754_setcx(IEEE754_INEXACT);
+
+ ret = ieee754dp_flong(xm);
+ DPSIGN(ret) = xs;
+
+ return ret;
+}
diff --git a/arch/mips/math-emu/dp_simple.c b/arch/mips/math-emu/dp_simple.c
new file mode 100644
index 000000000..b063aad7d
--- /dev/null
+++ b/arch/mips/math-emu/dp_simple.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * double precision: common utilities
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_neg(union ieee754dp x)
+{
+ union ieee754dp y;
+
+ if (ieee754_csr.abs2008) {
+ y = x;
+ DPSIGN(y) = !DPSIGN(x);
+ } else {
+ unsigned int oldrm;
+
+ oldrm = ieee754_csr.rm;
+ ieee754_csr.rm = FPU_CSR_RD;
+ y = ieee754dp_sub(ieee754dp_zero(0), x);
+ ieee754_csr.rm = oldrm;
+ }
+ return y;
+}
+
+union ieee754dp ieee754dp_abs(union ieee754dp x)
+{
+ union ieee754dp y;
+
+ if (ieee754_csr.abs2008) {
+ y = x;
+ DPSIGN(y) = 0;
+ } else {
+ unsigned int oldrm;
+
+ oldrm = ieee754_csr.rm;
+ ieee754_csr.rm = FPU_CSR_RD;
+ if (DPSIGN(x))
+ y = ieee754dp_sub(ieee754dp_zero(0), x);
+ else
+ y = ieee754dp_add(ieee754dp_zero(0), x);
+ ieee754_csr.rm = oldrm;
+ }
+ return y;
+}
diff --git a/arch/mips/math-emu/dp_sqrt.c b/arch/mips/math-emu/dp_sqrt.c
new file mode 100644
index 000000000..1ee38f824
--- /dev/null
+++ b/arch/mips/math-emu/dp_sqrt.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * double precision square root
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754dp.h"
+
+static const unsigned int table[] = {
+ 0, 1204, 3062, 5746, 9193, 13348, 18162, 23592,
+ 29598, 36145, 43202, 50740, 58733, 67158, 75992,
+ 85215, 83599, 71378, 60428, 50647, 41945, 34246,
+ 27478, 21581, 16499, 12183, 8588, 5674, 3403,
+ 1742, 661, 130
+};
+
+union ieee754dp ieee754dp_sqrt(union ieee754dp x)
+{
+ struct _ieee754_csr oldcsr;
+ union ieee754dp y, z, t;
+ unsigned int scalx, yh;
+ COMPXDP;
+
+ EXPLODEXDP;
+ ieee754_clearcx();
+ FLUSHXDP;
+
+ /* x == INF or NAN? */
+ switch (xc) {
+ case IEEE754_CLASS_SNAN:
+ return ieee754dp_nanxcpt(x);
+
+ case IEEE754_CLASS_QNAN:
+ /* sqrt(Nan) = Nan */
+ return x;
+
+ case IEEE754_CLASS_ZERO:
+ /* sqrt(0) = 0 */
+ return x;
+
+ case IEEE754_CLASS_INF:
+ if (xs) {
+ /* sqrt(-Inf) = Nan */
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754dp_indef();
+ }
+ /* sqrt(+Inf) = Inf */
+ return x;
+
+ case IEEE754_CLASS_DNORM:
+ DPDNORMX;
+ fallthrough;
+ case IEEE754_CLASS_NORM:
+ if (xs) {
+ /* sqrt(-x) = Nan */
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754dp_indef();
+ }
+ break;
+ }
+
+ /* save old csr; switch off INX enable & flag; set RN rounding */
+ oldcsr = ieee754_csr;
+ ieee754_csr.mx &= ~IEEE754_INEXACT;
+ ieee754_csr.sx &= ~IEEE754_INEXACT;
+ ieee754_csr.rm = FPU_CSR_RN;
+
+ /* adjust exponent to prevent overflow */
+ scalx = 0;
+ if (xe > 512) { /* x > 2**-512? */
+ xe -= 512; /* x = x / 2**512 */
+ scalx += 256;
+ } else if (xe < -512) { /* x < 2**-512? */
+ xe += 512; /* x = x * 2**512 */
+ scalx -= 256;
+ }
+
+ x = builddp(0, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
+ y = x;
+
+ /* magic initial approximation to almost 8 sig. bits */
+ yh = y.bits >> 32;
+ yh = (yh >> 1) + 0x1ff80000;
+ yh = yh - table[(yh >> 15) & 31];
+ y.bits = ((u64) yh << 32) | (y.bits & 0xffffffff);
+
+ /* Heron's rule once with correction to improve to ~18 sig. bits */
+ /* t=x/y; y=y+t; py[n0]=py[n0]-0x00100006; py[n1]=0; */
+ t = ieee754dp_div(x, y);
+ y = ieee754dp_add(y, t);
+ y.bits -= 0x0010000600000000LL;
+ y.bits &= 0xffffffff00000000LL;
+
+ /* triple to almost 56 sig. bits: y ~= sqrt(x) to within 1 ulp */
+ /* t=y*y; z=t; pt[n0]+=0x00100000; t+=z; z=(x-z)*y; */
+ t = ieee754dp_mul(y, y);
+ z = t;
+ t.bexp += 0x001;
+ t = ieee754dp_add(t, z);
+ z = ieee754dp_mul(ieee754dp_sub(x, z), y);
+
+ /* t=z/(t+x) ; pt[n0]+=0x00100000; y+=t; */
+ t = ieee754dp_div(z, ieee754dp_add(t, x));
+ t.bexp += 0x001;
+ y = ieee754dp_add(y, t);
+
+ /* twiddle last bit to force y correctly rounded */
+
+ /* set RZ, clear INEX flag */
+ ieee754_csr.rm = FPU_CSR_RZ;
+ ieee754_csr.sx &= ~IEEE754_INEXACT;
+
+ /* t=x/y; ...chopped quotient, possibly inexact */
+ t = ieee754dp_div(x, y);
+
+ if (ieee754_csr.sx & IEEE754_INEXACT || t.bits != y.bits) {
+
+ if (!(ieee754_csr.sx & IEEE754_INEXACT))
+ /* t = t-ulp */
+ t.bits -= 1;
+
+ /* add inexact to result status */
+ oldcsr.cx |= IEEE754_INEXACT;
+ oldcsr.sx |= IEEE754_INEXACT;
+
+ switch (oldcsr.rm) {
+ case FPU_CSR_RU:
+ y.bits += 1;
+ fallthrough;
+ case FPU_CSR_RN:
+ t.bits += 1;
+ break;
+ }
+
+ /* y=y+t; ...chopped sum */
+ y = ieee754dp_add(y, t);
+
+ /* adjust scalx for correctly rounded sqrt(x) */
+ scalx -= 1;
+ }
+
+ /* py[n0]=py[n0]+scalx; ...scale back y */
+ y.bexp += scalx;
+
+ /* restore rounding mode, possibly set inexact */
+ ieee754_csr = oldcsr;
+
+ return y;
+}
diff --git a/arch/mips/math-emu/dp_sub.c b/arch/mips/math-emu/dp_sub.c
new file mode 100644
index 000000000..08474ad2a
--- /dev/null
+++ b/arch/mips/math-emu/dp_sub.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * double precision: common utilities
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_sub(union ieee754dp x, union ieee754dp y)
+{
+ int s;
+
+ COMPXDP;
+ COMPYDP;
+
+ EXPLODEXDP;
+ EXPLODEYDP;
+
+ ieee754_clearcx();
+
+ FLUSHXDP;
+ FLUSHYDP;
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754dp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754dp_nanxcpt(x);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return x;
+
+
+ /*
+ * Infinity handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ if (xs != ys)
+ return x;
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754dp_indef();
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ return ieee754dp_inf(ys ^ 1);
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ return x;
+
+ /*
+ * Zero handling
+ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ if (xs != ys)
+ return x;
+ else
+ return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ /* quick fix up */
+ DPSIGN(y) ^= 1;
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ /* normalize ym,ye */
+ DPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ /* normalize xm,xe */
+ DPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ break;
+ }
+ /* flip sign of y and handle as add */
+ ys ^= 1;
+
+ assert(xm & DP_HIDDEN_BIT);
+ assert(ym & DP_HIDDEN_BIT);
+
+
+ /* provide guard,round and stick bit dpace */
+ xm <<= 3;
+ ym <<= 3;
+
+ if (xe > ye) {
+ /*
+ * Have to shift y fraction right to align
+ */
+ s = xe - ye;
+ ym = XDPSRS(ym, s);
+ ye += s;
+ } else if (ye > xe) {
+ /*
+ * Have to shift x fraction right to align
+ */
+ s = ye - xe;
+ xm = XDPSRS(xm, s);
+ xe += s;
+ }
+ assert(xe == ye);
+ assert(xe <= DP_EMAX);
+
+ if (xs == ys) {
+ /* generate 28 bit result of adding two 27 bit numbers
+ */
+ xm = xm + ym;
+
+ if (xm >> (DP_FBITS + 1 + 3)) { /* carry out */
+ xm = XDPSRS1(xm); /* shift preserving sticky */
+ xe++;
+ }
+ } else {
+ if (xm >= ym) {
+ xm = xm - ym;
+ } else {
+ xm = ym - xm;
+ xs = ys;
+ }
+ if (xm == 0) {
+ if (ieee754_csr.rm == FPU_CSR_RD)
+ return ieee754dp_zero(1); /* round negative inf. => sign = -1 */
+ else
+ return ieee754dp_zero(0); /* other round modes => sign = 1 */
+ }
+
+ /* normalize to rounding precision
+ */
+ while ((xm >> (DP_FBITS + 3)) == 0) {
+ xm <<= 1;
+ xe--;
+ }
+ }
+
+ return ieee754dp_format(xs, xe, xm);
+}
diff --git a/arch/mips/math-emu/dp_tint.c b/arch/mips/math-emu/dp_tint.c
new file mode 100644
index 000000000..0e6ad35e7
--- /dev/null
+++ b/arch/mips/math-emu/dp_tint.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * double precision: common utilities
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754dp.h"
+
+int ieee754dp_tint(union ieee754dp x)
+{
+ u64 residue;
+ int round;
+ int sticky;
+ int odd;
+
+ COMPXDP;
+
+ ieee754_clearcx();
+
+ EXPLODEXDP;
+ FLUSHXDP;
+
+ switch (xc) {
+ case IEEE754_CLASS_SNAN:
+ case IEEE754_CLASS_QNAN:
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754si_indef();
+
+ case IEEE754_CLASS_INF:
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754si_overflow(xs);
+
+ case IEEE754_CLASS_ZERO:
+ return 0;
+
+ case IEEE754_CLASS_DNORM:
+ case IEEE754_CLASS_NORM:
+ break;
+ }
+ if (xe > 31) {
+ /* Set invalid. We will only use overflow for floating
+ point overflow */
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754si_overflow(xs);
+ }
+ /* oh gawd */
+ if (xe > DP_FBITS) {
+ xm <<= xe - DP_FBITS;
+ } else if (xe < DP_FBITS) {
+ if (xe < -1) {
+ residue = xm;
+ round = 0;
+ sticky = residue != 0;
+ xm = 0;
+ } else {
+ residue = xm << (64 - DP_FBITS + xe);
+ round = (residue >> 63) != 0;
+ sticky = (residue << 1) != 0;
+ xm >>= DP_FBITS - xe;
+ }
+ /* Note: At this point upper 32 bits of xm are guaranteed
+ to be zero */
+ odd = (xm & 0x1) != 0x0;
+ switch (ieee754_csr.rm) {
+ case FPU_CSR_RN:
+ if (round && (sticky || odd))
+ xm++;
+ break;
+ case FPU_CSR_RZ:
+ break;
+ case FPU_CSR_RU: /* toward +Infinity */
+ if ((round || sticky) && !xs)
+ xm++;
+ break;
+ case FPU_CSR_RD: /* toward -Infinity */
+ if ((round || sticky) && xs)
+ xm++;
+ break;
+ }
+ /* look for valid corner case 0x80000000 */
+ if ((xm >> 31) != 0 && (xs == 0 || xm != 0x80000000)) {
+ /* This can happen after rounding */
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754si_overflow(xs);
+ }
+ if (round || sticky)
+ ieee754_setcx(IEEE754_INEXACT);
+ }
+ if (xs)
+ return -xm;
+ else
+ return xm;
+}
diff --git a/arch/mips/math-emu/dp_tlong.c b/arch/mips/math-emu/dp_tlong.c
new file mode 100644
index 000000000..c61ef02d4
--- /dev/null
+++ b/arch/mips/math-emu/dp_tlong.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * double precision: common utilities
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754dp.h"
+
+s64 ieee754dp_tlong(union ieee754dp x)
+{
+ u64 residue;
+ int round;
+ int sticky;
+ int odd;
+
+ COMPXDP;
+
+ ieee754_clearcx();
+
+ EXPLODEXDP;
+ FLUSHXDP;
+
+ switch (xc) {
+ case IEEE754_CLASS_SNAN:
+ case IEEE754_CLASS_QNAN:
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754di_indef();
+
+ case IEEE754_CLASS_INF:
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754di_overflow(xs);
+
+ case IEEE754_CLASS_ZERO:
+ return 0;
+
+ case IEEE754_CLASS_DNORM:
+ case IEEE754_CLASS_NORM:
+ break;
+ }
+ if (xe >= 63) {
+ /* look for valid corner case */
+ if (xe == 63 && xs && xm == DP_HIDDEN_BIT)
+ return -0x8000000000000000LL;
+ /* Set invalid. We will only use overflow for floating
+ point overflow */
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754di_overflow(xs);
+ }
+ /* oh gawd */
+ if (xe > DP_FBITS) {
+ xm <<= xe - DP_FBITS;
+ } else if (xe < DP_FBITS) {
+ if (xe < -1) {
+ residue = xm;
+ round = 0;
+ sticky = residue != 0;
+ xm = 0;
+ } else {
+ /* Shifting a u64 64 times does not work,
+ * so we do it in two steps. Be aware that xe
+ * may be -1 */
+ residue = xm << (xe + 1);
+ residue <<= 63 - DP_FBITS;
+ round = (residue >> 63) != 0;
+ sticky = (residue << 1) != 0;
+ xm >>= DP_FBITS - xe;
+ }
+ odd = (xm & 0x1) != 0x0;
+ switch (ieee754_csr.rm) {
+ case FPU_CSR_RN:
+ if (round && (sticky || odd))
+ xm++;
+ break;
+ case FPU_CSR_RZ:
+ break;
+ case FPU_CSR_RU: /* toward +Infinity */
+ if ((round || sticky) && !xs)
+ xm++;
+ break;
+ case FPU_CSR_RD: /* toward -Infinity */
+ if ((round || sticky) && xs)
+ xm++;
+ break;
+ }
+ if ((xm >> 63) != 0) {
+ /* This can happen after rounding */
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754di_overflow(xs);
+ }
+ if (round || sticky)
+ ieee754_setcx(IEEE754_INEXACT);
+ }
+ if (xs)
+ return -xm;
+ else
+ return xm;
+}
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
new file mode 100644
index 000000000..e2d46cb93
--- /dev/null
+++ b/arch/mips/math-emu/dsemul.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mm_types.h>
+#include <linux/sched/task.h>
+
+#include <asm/branch.h>
+#include <asm/cacheflush.h>
+#include <asm/fpu_emulator.h>
+#include <asm/inst.h>
+#include <asm/mipsregs.h>
+#include <linux/uaccess.h>
+
+/**
+ * struct emuframe - The 'emulation' frame structure
+ * @emul: The instruction to 'emulate'.
+ * @badinst: A break instruction to cause a return to the kernel.
+ *
+ * This structure defines the frames placed within the delay slot emulation
+ * page in response to a call to mips_dsemul(). Each thread may be allocated
+ * only one frame at any given time. The kernel stores within it the
+ * instruction to be 'emulated' followed by a break instruction, then
+ * executes the frame in user mode. The break causes a trap to the kernel
+ * which leads to do_dsemulret() being called unless the instruction in
+ * @emul causes a trap itself, is a branch, or a signal is delivered to
+ * the thread. In these cases the allocated frame will either be reused by
+ * a subsequent delay slot 'emulation', or be freed during signal delivery or
+ * upon thread exit.
+ *
+ * This approach is used because:
+ *
+ * - Actually emulating all instructions isn't feasible. We would need to
+ * be able to handle instructions from all revisions of the MIPS ISA,
+ * all ASEs & all vendor instruction set extensions. This would be a
+ * whole lot of work & continual maintenance burden as new instructions
+ * are introduced, and in the case of some vendor extensions may not
+ * even be possible. Thus we need to take the approach of actually
+ * executing the instruction.
+ *
+ * - We must execute the instruction within user context. If we were to
+ * execute the instruction in kernel mode then it would have access to
+ * kernel resources without very careful checks, leaving us with a
+ * high potential for security or stability issues to arise.
+ *
+ * - We used to place the frame on the users stack, but this requires
+ * that the stack be executable. This is bad for security so the
+ * per-process page is now used instead.
+ *
+ * - The instruction in @emul may be something entirely invalid for a
+ * delay slot. The user may (intentionally or otherwise) place a branch
+ * in a delay slot, or a kernel mode instruction, or something else
+ * which generates an exception. Thus we can't rely upon the break in
+ * @badinst always being hit. For this reason we track the index of the
+ * frame allocated to each thread, allowing us to clean it up at later
+ * points such as signal delivery or thread exit.
+ *
+ * - The user may generate a fake struct emuframe if they wish, invoking
+ * the BRK_MEMU break instruction themselves. We must therefore not
+ * trust that BRK_MEMU means there's actually a valid frame allocated
+ * to the thread, and must not allow the user to do anything they
+ * couldn't already.
+ */
+struct emuframe {
+ mips_instruction emul;
+ mips_instruction badinst;
+};
+
+static const int emupage_frame_count = PAGE_SIZE / sizeof(struct emuframe);
+
+static inline __user struct emuframe *dsemul_page(void)
+{
+ return (__user struct emuframe *)STACK_TOP;
+}
+
+static int alloc_emuframe(void)
+{
+ mm_context_t *mm_ctx = &current->mm->context;
+ int idx;
+
+retry:
+ spin_lock(&mm_ctx->bd_emupage_lock);
+
+ /* Ensure we have an allocation bitmap */
+ if (!mm_ctx->bd_emupage_allocmap) {
+ mm_ctx->bd_emupage_allocmap =
+ kcalloc(BITS_TO_LONGS(emupage_frame_count),
+ sizeof(unsigned long),
+ GFP_ATOMIC);
+
+ if (!mm_ctx->bd_emupage_allocmap) {
+ idx = BD_EMUFRAME_NONE;
+ goto out_unlock;
+ }
+ }
+
+ /* Attempt to allocate a single bit/frame */
+ idx = bitmap_find_free_region(mm_ctx->bd_emupage_allocmap,
+ emupage_frame_count, 0);
+ if (idx < 0) {
+ /*
+ * Failed to allocate a frame. We'll wait until one becomes
+ * available. We unlock the page so that other threads actually
+ * get the opportunity to free their frames, which means
+ * technically the result of bitmap_full may be incorrect.
+ * However the worst case is that we repeat all this and end up
+ * back here again.
+ */
+ spin_unlock(&mm_ctx->bd_emupage_lock);
+ if (!wait_event_killable(mm_ctx->bd_emupage_queue,
+ !bitmap_full(mm_ctx->bd_emupage_allocmap,
+ emupage_frame_count)))
+ goto retry;
+
+ /* Received a fatal signal - just give in */
+ return BD_EMUFRAME_NONE;
+ }
+
+ /* Success! */
+ pr_debug("allocate emuframe %d to %d\n", idx, current->pid);
+out_unlock:
+ spin_unlock(&mm_ctx->bd_emupage_lock);
+ return idx;
+}
+
+static void free_emuframe(int idx, struct mm_struct *mm)
+{
+ mm_context_t *mm_ctx = &mm->context;
+
+ spin_lock(&mm_ctx->bd_emupage_lock);
+
+ pr_debug("free emuframe %d from %d\n", idx, current->pid);
+ bitmap_clear(mm_ctx->bd_emupage_allocmap, idx, 1);
+
+ /* If some thread is waiting for a frame, now's its chance */
+ wake_up(&mm_ctx->bd_emupage_queue);
+
+ spin_unlock(&mm_ctx->bd_emupage_lock);
+}
+
+static bool within_emuframe(struct pt_regs *regs)
+{
+ unsigned long base = (unsigned long)dsemul_page();
+
+ if (regs->cp0_epc < base)
+ return false;
+ if (regs->cp0_epc >= (base + PAGE_SIZE))
+ return false;
+
+ return true;
+}
+
+bool dsemul_thread_cleanup(struct task_struct *tsk)
+{
+ int fr_idx;
+
+ /* Clear any allocated frame, retrieving its index */
+ fr_idx = atomic_xchg(&tsk->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+
+ /* If no frame was allocated, we're done */
+ if (fr_idx == BD_EMUFRAME_NONE)
+ return false;
+
+ task_lock(tsk);
+
+ /* Free the frame that this thread had allocated */
+ if (tsk->mm)
+ free_emuframe(fr_idx, tsk->mm);
+
+ task_unlock(tsk);
+ return true;
+}
+
+bool dsemul_thread_rollback(struct pt_regs *regs)
+{
+ struct emuframe __user *fr;
+ int fr_idx;
+
+ /* Do nothing if we're not executing from a frame */
+ if (!within_emuframe(regs))
+ return false;
+
+ /* Find the frame being executed */
+ fr_idx = atomic_read(&current->thread.bd_emu_frame);
+ if (fr_idx == BD_EMUFRAME_NONE)
+ return false;
+ fr = &dsemul_page()[fr_idx];
+
+ /*
+ * If the PC is at the emul instruction, roll back to the branch. If
+ * PC is at the badinst (break) instruction, we've already emulated the
+ * instruction so progress to the continue PC. If it's anything else
+ * then something is amiss & the user has branched into some other area
+ * of the emupage - we'll free the allocated frame anyway.
+ */
+ if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->emul)
+ regs->cp0_epc = current->thread.bd_emu_branch_pc;
+ else if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->badinst)
+ regs->cp0_epc = current->thread.bd_emu_cont_pc;
+
+ atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+ free_emuframe(fr_idx, current->mm);
+ return true;
+}
+
+void dsemul_mm_cleanup(struct mm_struct *mm)
+{
+ mm_context_t *mm_ctx = &mm->context;
+
+ kfree(mm_ctx->bd_emupage_allocmap);
+}
+
+int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
+ unsigned long branch_pc, unsigned long cont_pc)
+{
+ int isa16 = get_isa16_mode(regs->cp0_epc);
+ mips_instruction break_math;
+ unsigned long fr_uaddr;
+ struct emuframe fr;
+ int fr_idx, ret;
+
+ /* NOP is easy */
+ if (ir == 0)
+ return -1;
+
+ /* microMIPS instructions */
+ if (isa16) {
+ union mips_instruction insn = { .word = ir };
+
+ /* NOP16 aka MOVE16 $0, $0 */
+ if ((ir >> 16) == MM_NOP16)
+ return -1;
+
+ /* ADDIUPC */
+ if (insn.mm_a_format.opcode == mm_addiupc_op) {
+ unsigned int rs;
+ s32 v;
+
+ rs = (((insn.mm_a_format.rs + 0xe) & 0xf) + 2);
+ v = regs->cp0_epc & ~3;
+ v += insn.mm_a_format.simmediate << 2;
+ regs->regs[rs] = (long)v;
+ return -1;
+ }
+ }
+
+ pr_debug("dsemul 0x%08lx cont at 0x%08lx\n", regs->cp0_epc, cont_pc);
+
+ /* Allocate a frame if we don't already have one */
+ fr_idx = atomic_read(&current->thread.bd_emu_frame);
+ if (fr_idx == BD_EMUFRAME_NONE)
+ fr_idx = alloc_emuframe();
+ if (fr_idx == BD_EMUFRAME_NONE)
+ return SIGBUS;
+
+ /* Retrieve the appropriately encoded break instruction */
+ break_math = BREAK_MATH(isa16);
+
+ /* Write the instructions to the frame */
+ if (isa16) {
+ union mips_instruction _emul = {
+ .halfword = { ir >> 16, ir }
+ };
+ union mips_instruction _badinst = {
+ .halfword = { break_math >> 16, break_math }
+ };
+
+ fr.emul = _emul.word;
+ fr.badinst = _badinst.word;
+ } else {
+ fr.emul = ir;
+ fr.badinst = break_math;
+ }
+
+ /* Write the frame to user memory */
+ fr_uaddr = (unsigned long)&dsemul_page()[fr_idx];
+ ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr),
+ FOLL_FORCE | FOLL_WRITE);
+ if (unlikely(ret != sizeof(fr))) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ free_emuframe(fr_idx, current->mm);
+ return SIGBUS;
+ }
+
+ /* Record the PC of the branch, PC to continue from & frame index */
+ current->thread.bd_emu_branch_pc = branch_pc;
+ current->thread.bd_emu_cont_pc = cont_pc;
+ atomic_set(&current->thread.bd_emu_frame, fr_idx);
+
+ /* Change user register context to execute the frame */
+ regs->cp0_epc = fr_uaddr | isa16;
+
+ return 0;
+}
+
+bool do_dsemulret(struct pt_regs *xcp)
+{
+ /* Cleanup the allocated frame, returning if there wasn't one */
+ if (!dsemul_thread_cleanup(current)) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ return false;
+ }
+
+ /* Set EPC to return to post-branch instruction */
+ xcp->cp0_epc = current->thread.bd_emu_cont_pc;
+ pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
+ MIPS_FPU_EMU_INC_STATS(ds_emul);
+ return true;
+}
diff --git a/arch/mips/math-emu/ieee754.c b/arch/mips/math-emu/ieee754.c
new file mode 100644
index 000000000..0ba5dfbd4
--- /dev/null
+++ b/arch/mips/math-emu/ieee754.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* ieee754 floating point arithmetic
+ * single and double precision
+ *
+ * BUGS
+ * not much dp done
+ * doesn't generate IEEE754_INEXACT
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include <linux/compiler.h>
+
+#include "ieee754.h"
+#include "ieee754sp.h"
+#include "ieee754dp.h"
+
+/*
+ * Special constants
+ */
+
+/*
+ * Older GCC requires the inner braces for initialization of union ieee754dp's
+ * anonymous struct member. Without an error will result.
+ */
+#define xPCNST(s, b, m, ebias) \
+{ \
+ { \
+ .sign = (s), \
+ .bexp = (b) + ebias, \
+ .mant = (m) \
+ } \
+}
+
+#define DPCNST(s, b, m) \
+ xPCNST(s, b, m, DP_EBIAS)
+
+const union ieee754dp __ieee754dp_spcvals[] = {
+ DPCNST(0, DP_EMIN - 1, 0x0000000000000ULL), /* + zero */
+ DPCNST(1, DP_EMIN - 1, 0x0000000000000ULL), /* - zero */
+ DPCNST(0, 0, 0x0000000000000ULL), /* + 1.0 */
+ DPCNST(1, 0, 0x0000000000000ULL), /* - 1.0 */
+ DPCNST(0, 3, 0x4000000000000ULL), /* + 10.0 */
+ DPCNST(1, 3, 0x4000000000000ULL), /* - 10.0 */
+ DPCNST(0, DP_EMAX + 1, 0x0000000000000ULL), /* + infinity */
+ DPCNST(1, DP_EMAX + 1, 0x0000000000000ULL), /* - infinity */
+ DPCNST(0, DP_EMAX + 1, 0x7FFFFFFFFFFFFULL), /* + ind legacy qNaN */
+ DPCNST(0, DP_EMAX + 1, 0x8000000000000ULL), /* + indef 2008 qNaN */
+ DPCNST(0, DP_EMAX, 0xFFFFFFFFFFFFFULL), /* + max */
+ DPCNST(1, DP_EMAX, 0xFFFFFFFFFFFFFULL), /* - max */
+ DPCNST(0, DP_EMIN, 0x0000000000000ULL), /* + min normal */
+ DPCNST(1, DP_EMIN, 0x0000000000000ULL), /* - min normal */
+ DPCNST(0, DP_EMIN - 1, 0x0000000000001ULL), /* + min denormal */
+ DPCNST(1, DP_EMIN - 1, 0x0000000000001ULL), /* - min denormal */
+ DPCNST(0, 31, 0x0000000000000ULL), /* + 1.0e31 */
+ DPCNST(0, 63, 0x0000000000000ULL), /* + 1.0e63 */
+};
+
+#define SPCNST(s, b, m) \
+ xPCNST(s, b, m, SP_EBIAS)
+
+const union ieee754sp __ieee754sp_spcvals[] = {
+ SPCNST(0, SP_EMIN - 1, 0x000000), /* + zero */
+ SPCNST(1, SP_EMIN - 1, 0x000000), /* - zero */
+ SPCNST(0, 0, 0x000000), /* + 1.0 */
+ SPCNST(1, 0, 0x000000), /* - 1.0 */
+ SPCNST(0, 3, 0x200000), /* + 10.0 */
+ SPCNST(1, 3, 0x200000), /* - 10.0 */
+ SPCNST(0, SP_EMAX + 1, 0x000000), /* + infinity */
+ SPCNST(1, SP_EMAX + 1, 0x000000), /* - infinity */
+ SPCNST(0, SP_EMAX + 1, 0x3FFFFF), /* + indef legacy quiet NaN */
+ SPCNST(0, SP_EMAX + 1, 0x400000), /* + indef 2008 quiet NaN */
+ SPCNST(0, SP_EMAX, 0x7FFFFF), /* + max normal */
+ SPCNST(1, SP_EMAX, 0x7FFFFF), /* - max normal */
+ SPCNST(0, SP_EMIN, 0x000000), /* + min normal */
+ SPCNST(1, SP_EMIN, 0x000000), /* - min normal */
+ SPCNST(0, SP_EMIN - 1, 0x000001), /* + min denormal */
+ SPCNST(1, SP_EMIN - 1, 0x000001), /* - min denormal */
+ SPCNST(0, 31, 0x000000), /* + 1.0e31 */
+ SPCNST(0, 63, 0x000000), /* + 1.0e63 */
+};
diff --git a/arch/mips/math-emu/ieee754.h b/arch/mips/math-emu/ieee754.h
new file mode 100644
index 000000000..090caa740
--- /dev/null
+++ b/arch/mips/math-emu/ieee754.h
@@ -0,0 +1,311 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ *
+ * Nov 7, 2000
+ * Modification to allow integration with Linux kernel
+ *
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ */
+#ifndef __ARCH_MIPS_MATH_EMU_IEEE754_H
+#define __ARCH_MIPS_MATH_EMU_IEEE754_H
+
+#include <linux/compiler.h>
+#include <asm/byteorder.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <asm/bitfield.h>
+
+union ieee754dp {
+ struct {
+ __BITFIELD_FIELD(unsigned int sign:1,
+ __BITFIELD_FIELD(unsigned int bexp:11,
+ __BITFIELD_FIELD(u64 mant:52,
+ ;)))
+ };
+ u64 bits;
+};
+
+union ieee754sp {
+ struct {
+ __BITFIELD_FIELD(unsigned sign:1,
+ __BITFIELD_FIELD(unsigned bexp:8,
+ __BITFIELD_FIELD(unsigned mant:23,
+ ;)))
+ };
+ u32 bits;
+};
+
+/*
+ * single precision (often aka float)
+*/
+int ieee754sp_class(union ieee754sp x);
+
+union ieee754sp ieee754sp_abs(union ieee754sp x);
+union ieee754sp ieee754sp_neg(union ieee754sp x);
+
+union ieee754sp ieee754sp_add(union ieee754sp x, union ieee754sp y);
+union ieee754sp ieee754sp_sub(union ieee754sp x, union ieee754sp y);
+union ieee754sp ieee754sp_mul(union ieee754sp x, union ieee754sp y);
+union ieee754sp ieee754sp_div(union ieee754sp x, union ieee754sp y);
+
+union ieee754sp ieee754sp_fint(int x);
+union ieee754sp ieee754sp_flong(s64 x);
+union ieee754sp ieee754sp_fdp(union ieee754dp x);
+union ieee754sp ieee754sp_rint(union ieee754sp x);
+
+int ieee754sp_tint(union ieee754sp x);
+s64 ieee754sp_tlong(union ieee754sp x);
+
+int ieee754sp_cmp(union ieee754sp x, union ieee754sp y, int cop, int sig);
+
+union ieee754sp ieee754sp_sqrt(union ieee754sp x);
+
+union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y);
+union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y);
+union ieee754sp ieee754sp_madd(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y);
+union ieee754sp ieee754sp_msub(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y);
+union ieee754sp ieee754sp_nmadd(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y);
+union ieee754sp ieee754sp_nmsub(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y);
+int ieee754sp_2008class(union ieee754sp x);
+union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y);
+union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y);
+union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y);
+union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y);
+
+/*
+ * double precision (often aka double)
+*/
+int ieee754dp_class(union ieee754dp x);
+
+union ieee754dp ieee754dp_add(union ieee754dp x, union ieee754dp y);
+union ieee754dp ieee754dp_sub(union ieee754dp x, union ieee754dp y);
+union ieee754dp ieee754dp_mul(union ieee754dp x, union ieee754dp y);
+union ieee754dp ieee754dp_div(union ieee754dp x, union ieee754dp y);
+
+union ieee754dp ieee754dp_abs(union ieee754dp x);
+union ieee754dp ieee754dp_neg(union ieee754dp x);
+
+union ieee754dp ieee754dp_fint(int x);
+union ieee754dp ieee754dp_flong(s64 x);
+union ieee754dp ieee754dp_fsp(union ieee754sp x);
+union ieee754dp ieee754dp_rint(union ieee754dp x);
+
+int ieee754dp_tint(union ieee754dp x);
+s64 ieee754dp_tlong(union ieee754dp x);
+
+int ieee754dp_cmp(union ieee754dp x, union ieee754dp y, int cop, int sig);
+
+union ieee754dp ieee754dp_sqrt(union ieee754dp x);
+
+union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y);
+union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y);
+union ieee754dp ieee754dp_madd(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y);
+union ieee754dp ieee754dp_msub(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y);
+union ieee754dp ieee754dp_nmadd(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y);
+union ieee754dp ieee754dp_nmsub(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y);
+int ieee754dp_2008class(union ieee754dp x);
+union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y);
+union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y);
+union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y);
+union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y);
+
+
+/* 5 types of floating point number
+*/
+enum {
+ IEEE754_CLASS_NORM = 0x00,
+ IEEE754_CLASS_ZERO = 0x01,
+ IEEE754_CLASS_DNORM = 0x02,
+ IEEE754_CLASS_INF = 0x03,
+ IEEE754_CLASS_SNAN = 0x04,
+ IEEE754_CLASS_QNAN = 0x05,
+};
+
+/* exception numbers */
+#define IEEE754_INEXACT 0x01
+#define IEEE754_UNDERFLOW 0x02
+#define IEEE754_OVERFLOW 0x04
+#define IEEE754_ZERO_DIVIDE 0x08
+#define IEEE754_INVALID_OPERATION 0x10
+
+/* cmp operators
+*/
+#define IEEE754_CLT 0x01
+#define IEEE754_CEQ 0x02
+#define IEEE754_CGT 0x04
+#define IEEE754_CUN 0x08
+
+/*
+ * The control status register
+ */
+struct _ieee754_csr {
+ __BITFIELD_FIELD(unsigned fcc:7, /* condition[7:1] */
+ __BITFIELD_FIELD(unsigned nod:1, /* set 1 for no denormals */
+ __BITFIELD_FIELD(unsigned c:1, /* condition[0] */
+ __BITFIELD_FIELD(unsigned pad0:3,
+ __BITFIELD_FIELD(unsigned abs2008:1, /* IEEE 754-2008 ABS/NEG.fmt */
+ __BITFIELD_FIELD(unsigned nan2008:1, /* IEEE 754-2008 NaN mode */
+ __BITFIELD_FIELD(unsigned cx:6, /* exceptions this operation */
+ __BITFIELD_FIELD(unsigned mx:5, /* exception enable mask */
+ __BITFIELD_FIELD(unsigned sx:5, /* exceptions total */
+ __BITFIELD_FIELD(unsigned rm:2, /* current rounding mode */
+ ;))))))))))
+};
+#define ieee754_csr (*(struct _ieee754_csr *)(&current->thread.fpu.fcr31))
+
+static inline unsigned int ieee754_getrm(void)
+{
+ return (ieee754_csr.rm);
+}
+
+static inline unsigned int ieee754_setrm(unsigned int rm)
+{
+ return (ieee754_csr.rm = rm);
+}
+
+/*
+ * get current exceptions
+ */
+static inline unsigned int ieee754_getcx(void)
+{
+ return (ieee754_csr.cx);
+}
+
+/* test for current exception condition
+ */
+static inline int ieee754_cxtest(unsigned int n)
+{
+ return (ieee754_csr.cx & n);
+}
+
+/*
+ * get sticky exceptions
+ */
+static inline unsigned int ieee754_getsx(void)
+{
+ return (ieee754_csr.sx);
+}
+
+/* clear sticky conditions
+*/
+static inline unsigned int ieee754_clrsx(void)
+{
+ return (ieee754_csr.sx = 0);
+}
+
+/* test for sticky exception condition
+ */
+static inline int ieee754_sxtest(unsigned int n)
+{
+ return (ieee754_csr.sx & n);
+}
+
+/* debugging */
+union ieee754sp ieee754sp_dump(char *s, union ieee754sp x);
+union ieee754dp ieee754dp_dump(char *s, union ieee754dp x);
+
+#define IEEE754_SPCVAL_PZERO 0 /* +0.0 */
+#define IEEE754_SPCVAL_NZERO 1 /* -0.0 */
+#define IEEE754_SPCVAL_PONE 2 /* +1.0 */
+#define IEEE754_SPCVAL_NONE 3 /* -1.0 */
+#define IEEE754_SPCVAL_PTEN 4 /* +10.0 */
+#define IEEE754_SPCVAL_NTEN 5 /* -10.0 */
+#define IEEE754_SPCVAL_PINFINITY 6 /* +inf */
+#define IEEE754_SPCVAL_NINFINITY 7 /* -inf */
+#define IEEE754_SPCVAL_INDEF_LEG 8 /* legacy quiet NaN */
+#define IEEE754_SPCVAL_INDEF_2008 9 /* IEEE 754-2008 quiet NaN */
+#define IEEE754_SPCVAL_PMAX 10 /* +max norm */
+#define IEEE754_SPCVAL_NMAX 11 /* -max norm */
+#define IEEE754_SPCVAL_PMIN 12 /* +min norm */
+#define IEEE754_SPCVAL_NMIN 13 /* -min norm */
+#define IEEE754_SPCVAL_PMIND 14 /* +min denorm */
+#define IEEE754_SPCVAL_NMIND 15 /* -min denorm */
+#define IEEE754_SPCVAL_P1E31 16 /* + 1.0e31 */
+#define IEEE754_SPCVAL_P1E63 17 /* + 1.0e63 */
+
+extern const union ieee754dp __ieee754dp_spcvals[];
+extern const union ieee754sp __ieee754sp_spcvals[];
+#define ieee754dp_spcvals ((const union ieee754dp *)__ieee754dp_spcvals)
+#define ieee754sp_spcvals ((const union ieee754sp *)__ieee754sp_spcvals)
+
+/*
+ * Return infinity with given sign
+ */
+#define ieee754dp_inf(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PINFINITY+(sn)])
+#define ieee754dp_zero(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PZERO+(sn)])
+#define ieee754dp_one(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PONE+(sn)])
+#define ieee754dp_ten(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PTEN+(sn)])
+#define ieee754dp_indef() (ieee754dp_spcvals[IEEE754_SPCVAL_INDEF_LEG + \
+ ieee754_csr.nan2008])
+#define ieee754dp_max(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PMAX+(sn)])
+#define ieee754dp_min(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PMIN+(sn)])
+#define ieee754dp_mind(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PMIND+(sn)])
+#define ieee754dp_1e31() (ieee754dp_spcvals[IEEE754_SPCVAL_P1E31])
+#define ieee754dp_1e63() (ieee754dp_spcvals[IEEE754_SPCVAL_P1E63])
+
+#define ieee754sp_inf(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PINFINITY+(sn)])
+#define ieee754sp_zero(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PZERO+(sn)])
+#define ieee754sp_one(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PONE+(sn)])
+#define ieee754sp_ten(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PTEN+(sn)])
+#define ieee754sp_indef() (ieee754sp_spcvals[IEEE754_SPCVAL_INDEF_LEG + \
+ ieee754_csr.nan2008])
+#define ieee754sp_max(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PMAX+(sn)])
+#define ieee754sp_min(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PMIN+(sn)])
+#define ieee754sp_mind(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PMIND+(sn)])
+#define ieee754sp_1e31() (ieee754sp_spcvals[IEEE754_SPCVAL_P1E31])
+#define ieee754sp_1e63() (ieee754sp_spcvals[IEEE754_SPCVAL_P1E63])
+
+/*
+ * Indefinite integer value
+ */
+static inline int ieee754si_indef(void)
+{
+ return ieee754_csr.nan2008 ? 0 : INT_MAX;
+}
+
+static inline s64 ieee754di_indef(void)
+{
+ return ieee754_csr.nan2008 ? 0 : S64_MAX;
+}
+
+/*
+ * Overflow integer value
+ */
+static inline int ieee754si_overflow(int xs)
+{
+ return ieee754_csr.nan2008 && xs ? INT_MIN : INT_MAX;
+}
+
+static inline s64 ieee754di_overflow(int xs)
+{
+ return ieee754_csr.nan2008 && xs ? S64_MIN : S64_MAX;
+}
+
+/* result types for xctx.rt */
+#define IEEE754_RT_SP 0
+#define IEEE754_RT_DP 1
+#define IEEE754_RT_XP 2
+#define IEEE754_RT_SI 3
+#define IEEE754_RT_DI 4
+
+/* compat */
+#define ieee754dp_fix(x) ieee754dp_tint(x)
+#define ieee754sp_fix(x) ieee754sp_tint(x)
+
+#endif /* __ARCH_MIPS_MATH_EMU_IEEE754_H */
diff --git a/arch/mips/math-emu/ieee754d.c b/arch/mips/math-emu/ieee754d.c
new file mode 100644
index 000000000..586c4db2d
--- /dev/null
+++ b/arch/mips/math-emu/ieee754d.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Some debug functions
+ *
+ * MIPS floating point support
+ *
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ *
+ * Nov 7, 2000
+ * Modified to build and operate in Linux kernel environment.
+ *
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/printk.h>
+#include "ieee754.h"
+#include "ieee754sp.h"
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_dump(char *m, union ieee754dp x)
+{
+ int i;
+
+ printk("%s", m);
+ printk("<%08x,%08x>\n", (unsigned) (x.bits >> 32),
+ (unsigned) x.bits);
+ printk("\t=");
+ switch (ieee754dp_class(x)) {
+ case IEEE754_CLASS_QNAN:
+ case IEEE754_CLASS_SNAN:
+ printk("Nan %c", DPSIGN(x) ? '-' : '+');
+ for (i = DP_FBITS - 1; i >= 0; i--)
+ printk("%c", DPMANT(x) & DP_MBIT(i) ? '1' : '0');
+ break;
+ case IEEE754_CLASS_INF:
+ printk("%cInfinity", DPSIGN(x) ? '-' : '+');
+ break;
+ case IEEE754_CLASS_ZERO:
+ printk("%cZero", DPSIGN(x) ? '-' : '+');
+ break;
+ case IEEE754_CLASS_DNORM:
+ printk("%c0.", DPSIGN(x) ? '-' : '+');
+ for (i = DP_FBITS - 1; i >= 0; i--)
+ printk("%c", DPMANT(x) & DP_MBIT(i) ? '1' : '0');
+ printk("e%d", DPBEXP(x) - DP_EBIAS);
+ break;
+ case IEEE754_CLASS_NORM:
+ printk("%c1.", DPSIGN(x) ? '-' : '+');
+ for (i = DP_FBITS - 1; i >= 0; i--)
+ printk("%c", DPMANT(x) & DP_MBIT(i) ? '1' : '0');
+ printk("e%d", DPBEXP(x) - DP_EBIAS);
+ break;
+ default:
+ printk("Illegal/Unknown IEEE754 value class");
+ }
+ printk("\n");
+ return x;
+}
+
+union ieee754sp ieee754sp_dump(char *m, union ieee754sp x)
+{
+ int i;
+
+ printk("%s=", m);
+ printk("<%08x>\n", (unsigned) x.bits);
+ printk("\t=");
+ switch (ieee754sp_class(x)) {
+ case IEEE754_CLASS_QNAN:
+ case IEEE754_CLASS_SNAN:
+ printk("Nan %c", SPSIGN(x) ? '-' : '+');
+ for (i = SP_FBITS - 1; i >= 0; i--)
+ printk("%c", SPMANT(x) & SP_MBIT(i) ? '1' : '0');
+ break;
+ case IEEE754_CLASS_INF:
+ printk("%cInfinity", SPSIGN(x) ? '-' : '+');
+ break;
+ case IEEE754_CLASS_ZERO:
+ printk("%cZero", SPSIGN(x) ? '-' : '+');
+ break;
+ case IEEE754_CLASS_DNORM:
+ printk("%c0.", SPSIGN(x) ? '-' : '+');
+ for (i = SP_FBITS - 1; i >= 0; i--)
+ printk("%c", SPMANT(x) & SP_MBIT(i) ? '1' : '0');
+ printk("e%d", SPBEXP(x) - SP_EBIAS);
+ break;
+ case IEEE754_CLASS_NORM:
+ printk("%c1.", SPSIGN(x) ? '-' : '+');
+ for (i = SP_FBITS - 1; i >= 0; i--)
+ printk("%c", SPMANT(x) & SP_MBIT(i) ? '1' : '0');
+ printk("e%d", SPBEXP(x) - SP_EBIAS);
+ break;
+ default:
+ printk("Illegal/Unknown IEEE754 value class");
+ }
+ printk("\n");
+ return x;
+}
diff --git a/arch/mips/math-emu/ieee754dp.c b/arch/mips/math-emu/ieee754dp.c
new file mode 100644
index 000000000..07ef146e2
--- /dev/null
+++ b/arch/mips/math-emu/ieee754dp.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * double precision: common utilities
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include <linux/compiler.h>
+
+#include "ieee754dp.h"
+
+int ieee754dp_class(union ieee754dp x)
+{
+ COMPXDP;
+ EXPLODEXDP;
+ return xc;
+}
+
+static inline int ieee754dp_isnan(union ieee754dp x)
+{
+ return ieee754_class_nan(ieee754dp_class(x));
+}
+
+static inline int ieee754dp_issnan(union ieee754dp x)
+{
+ int qbit;
+
+ assert(ieee754dp_isnan(x));
+ qbit = (DPMANT(x) & DP_MBIT(DP_FBITS - 1)) == DP_MBIT(DP_FBITS - 1);
+ return ieee754_csr.nan2008 ^ qbit;
+}
+
+
+/*
+ * Raise the Invalid Operation IEEE 754 exception
+ * and convert the signaling NaN supplied to a quiet NaN.
+ */
+union ieee754dp __cold ieee754dp_nanxcpt(union ieee754dp r)
+{
+ assert(ieee754dp_issnan(r));
+
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ if (ieee754_csr.nan2008) {
+ DPMANT(r) |= DP_MBIT(DP_FBITS - 1);
+ } else {
+ DPMANT(r) &= ~DP_MBIT(DP_FBITS - 1);
+ if (!ieee754dp_isnan(r))
+ DPMANT(r) |= DP_MBIT(DP_FBITS - 2);
+ }
+
+ return r;
+}
+
+static u64 ieee754dp_get_rounding(int sn, u64 xm)
+{
+ /* inexact must round of 3 bits
+ */
+ if (xm & (DP_MBIT(3) - 1)) {
+ switch (ieee754_csr.rm) {
+ case FPU_CSR_RZ:
+ break;
+ case FPU_CSR_RN:
+ xm += 0x3 + ((xm >> 3) & 1);
+ /* xm += (xm&0x8)?0x4:0x3 */
+ break;
+ case FPU_CSR_RU: /* toward +Infinity */
+ if (!sn) /* ?? */
+ xm += 0x8;
+ break;
+ case FPU_CSR_RD: /* toward -Infinity */
+ if (sn) /* ?? */
+ xm += 0x8;
+ break;
+ }
+ }
+ return xm;
+}
+
+
+/* generate a normal/denormal number with over,under handling
+ * sn is sign
+ * xe is an unbiased exponent
+ * xm is 3bit extended precision value.
+ */
+union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
+{
+ assert(xm); /* we don't gen exact zeros (probably should) */
+
+ assert((xm >> (DP_FBITS + 1 + 3)) == 0); /* no excess */
+ assert(xm & (DP_HIDDEN_BIT << 3));
+
+ if (xe < DP_EMIN) {
+ /* strip lower bits */
+ int es = DP_EMIN - xe;
+
+ if (ieee754_csr.nod) {
+ ieee754_setcx(IEEE754_UNDERFLOW);
+ ieee754_setcx(IEEE754_INEXACT);
+
+ switch(ieee754_csr.rm) {
+ case FPU_CSR_RN:
+ case FPU_CSR_RZ:
+ return ieee754dp_zero(sn);
+ case FPU_CSR_RU: /* toward +Infinity */
+ if (sn == 0)
+ return ieee754dp_min(0);
+ else
+ return ieee754dp_zero(1);
+ case FPU_CSR_RD: /* toward -Infinity */
+ if (sn == 0)
+ return ieee754dp_zero(0);
+ else
+ return ieee754dp_min(1);
+ }
+ }
+
+ if (xe == DP_EMIN - 1 &&
+ ieee754dp_get_rounding(sn, xm) >> (DP_FBITS + 1 + 3))
+ {
+ /* Not tiny after rounding */
+ ieee754_setcx(IEEE754_INEXACT);
+ xm = ieee754dp_get_rounding(sn, xm);
+ xm >>= 1;
+ /* Clear grs bits */
+ xm &= ~(DP_MBIT(3) - 1);
+ xe++;
+ }
+ else {
+ /* sticky right shift es bits
+ */
+ xm = XDPSRS(xm, es);
+ xe += es;
+ assert((xm & (DP_HIDDEN_BIT << 3)) == 0);
+ assert(xe == DP_EMIN);
+ }
+ }
+ if (xm & (DP_MBIT(3) - 1)) {
+ ieee754_setcx(IEEE754_INEXACT);
+ if ((xm & (DP_HIDDEN_BIT << 3)) == 0) {
+ ieee754_setcx(IEEE754_UNDERFLOW);
+ }
+
+ /* inexact must round of 3 bits
+ */
+ xm = ieee754dp_get_rounding(sn, xm);
+ /* adjust exponent for rounding add overflowing
+ */
+ if (xm >> (DP_FBITS + 3 + 1)) {
+ /* add causes mantissa overflow */
+ xm >>= 1;
+ xe++;
+ }
+ }
+ /* strip grs bits */
+ xm >>= 3;
+
+ assert((xm >> (DP_FBITS + 1)) == 0); /* no excess */
+ assert(xe >= DP_EMIN);
+
+ if (xe > DP_EMAX) {
+ ieee754_setcx(IEEE754_OVERFLOW);
+ ieee754_setcx(IEEE754_INEXACT);
+ /* -O can be table indexed by (rm,sn) */
+ switch (ieee754_csr.rm) {
+ case FPU_CSR_RN:
+ return ieee754dp_inf(sn);
+ case FPU_CSR_RZ:
+ return ieee754dp_max(sn);
+ case FPU_CSR_RU: /* toward +Infinity */
+ if (sn == 0)
+ return ieee754dp_inf(0);
+ else
+ return ieee754dp_max(1);
+ case FPU_CSR_RD: /* toward -Infinity */
+ if (sn == 0)
+ return ieee754dp_max(0);
+ else
+ return ieee754dp_inf(1);
+ }
+ }
+ /* gen norm/denorm/zero */
+
+ if ((xm & DP_HIDDEN_BIT) == 0) {
+ /* we underflow (tiny/zero) */
+ assert(xe == DP_EMIN);
+ if (ieee754_csr.mx & IEEE754_UNDERFLOW)
+ ieee754_setcx(IEEE754_UNDERFLOW);
+ return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm);
+ } else {
+ assert((xm >> (DP_FBITS + 1)) == 0); /* no excess */
+ assert(xm & DP_HIDDEN_BIT);
+
+ return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
+ }
+}
diff --git a/arch/mips/math-emu/ieee754dp.h b/arch/mips/math-emu/ieee754dp.h
new file mode 100644
index 000000000..b7c43a99a
--- /dev/null
+++ b/arch/mips/math-emu/ieee754dp.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE754 floating point
+ * double precision internal header file
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include <linux/compiler.h>
+
+#include "ieee754int.h"
+
+#define assert(expr) ((void)0)
+
+#define DP_EBIAS 1023
+#define DP_EMIN (-1022)
+#define DP_EMAX 1023
+#define DP_FBITS 52
+#define DP_MBITS 52
+
+#define DP_MBIT(x) ((u64)1 << (x))
+#define DP_HIDDEN_BIT DP_MBIT(DP_FBITS)
+#define DP_SIGN_BIT DP_MBIT(63)
+
+#define DPSIGN(dp) (dp.sign)
+#define DPBEXP(dp) (dp.bexp)
+#define DPMANT(dp) (dp.mant)
+
+static inline int ieee754dp_finite(union ieee754dp x)
+{
+ return DPBEXP(x) != DP_EMAX + 1 + DP_EBIAS;
+}
+
+/* 3bit extended double precision sticky right shift */
+#define XDPSRS(v,rs) \
+ ((rs > (DP_FBITS+3))?1:((v) >> (rs)) | ((v) << (64-(rs)) != 0))
+
+#define XDPSRSX1() \
+ (xe++, (xm = (xm >> 1) | (xm & 1)))
+
+#define XDPSRS1(v) \
+ (((v) >> 1) | ((v) & 1))
+
+/* 32bit * 32bit => 64bit unsigned integer multiplication */
+#define DPXMULT(x, y) ((u64)(x) * (u64)y)
+
+/* convert denormal to normalized with extended exponent */
+#define DPDNORMx(m,e) \
+ while ((m >> DP_FBITS) == 0) { m <<= 1; e--; }
+#define DPDNORMX DPDNORMx(xm, xe)
+#define DPDNORMY DPDNORMx(ym, ye)
+#define DPDNORMZ DPDNORMx(zm, ze)
+
+static inline union ieee754dp builddp(int s, int bx, u64 m)
+{
+ union ieee754dp r;
+
+ assert((s) == 0 || (s) == 1);
+ assert((bx) >= DP_EMIN - 1 + DP_EBIAS
+ && (bx) <= DP_EMAX + 1 + DP_EBIAS);
+ assert(((m) >> DP_FBITS) == 0);
+
+ r.sign = s;
+ r.bexp = bx;
+ r.mant = m;
+
+ return r;
+}
+
+extern union ieee754dp __cold ieee754dp_nanxcpt(union ieee754dp);
+extern union ieee754dp ieee754dp_format(int, int, u64);
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
new file mode 100644
index 000000000..2c3b13546
--- /dev/null
+++ b/arch/mips/math-emu/ieee754int.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE754 floating point
+ * common internal header file
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+#ifndef __IEEE754INT_H
+#define __IEEE754INT_H
+
+#include "ieee754.h"
+
+#define CLPAIR(x, y) ((x)*6+(y))
+
+enum maddf_flags {
+ MADDF_NEGATE_PRODUCT = 1 << 0,
+ MADDF_NEGATE_ADDITION = 1 << 1,
+};
+
+static inline void ieee754_clearcx(void)
+{
+ ieee754_csr.cx = 0;
+}
+
+static inline void ieee754_setcx(const unsigned int flags)
+{
+ ieee754_csr.cx |= flags;
+ ieee754_csr.sx |= flags;
+}
+
+static inline int ieee754_setandtestcx(const unsigned int x)
+{
+ ieee754_setcx(x);
+
+ return ieee754_csr.mx & x;
+}
+
+static inline int ieee754_class_nan(int xc)
+{
+ return xc >= IEEE754_CLASS_SNAN;
+}
+
+#define COMPXSP \
+ unsigned int xm; int xe; int xs __maybe_unused; int xc
+
+#define COMPYSP \
+ unsigned int ym; int ye; int ys; int yc
+
+#define COMPZSP \
+ unsigned int zm; int ze; int zs; int zc
+
+#define EXPLODESP(v, vc, vs, ve, vm) \
+{ \
+ vs = SPSIGN(v); \
+ ve = SPBEXP(v); \
+ vm = SPMANT(v); \
+ if (ve == SP_EMAX+1+SP_EBIAS) { \
+ if (vm == 0) \
+ vc = IEEE754_CLASS_INF; \
+ else if (ieee754_csr.nan2008 ^ !(vm & SP_MBIT(SP_FBITS - 1))) \
+ vc = IEEE754_CLASS_QNAN; \
+ else \
+ vc = IEEE754_CLASS_SNAN; \
+ } else if (ve == SP_EMIN-1+SP_EBIAS) { \
+ if (vm) { \
+ ve = SP_EMIN; \
+ vc = IEEE754_CLASS_DNORM; \
+ } else \
+ vc = IEEE754_CLASS_ZERO; \
+ } else { \
+ ve -= SP_EBIAS; \
+ vm |= SP_HIDDEN_BIT; \
+ vc = IEEE754_CLASS_NORM; \
+ } \
+}
+#define EXPLODEXSP EXPLODESP(x, xc, xs, xe, xm)
+#define EXPLODEYSP EXPLODESP(y, yc, ys, ye, ym)
+#define EXPLODEZSP EXPLODESP(z, zc, zs, ze, zm)
+
+
+#define COMPXDP \
+ u64 xm; int xe; int xs __maybe_unused; int xc
+
+#define COMPYDP \
+ u64 ym; int ye; int ys; int yc
+
+#define COMPZDP \
+ u64 zm; int ze; int zs; int zc
+
+#define EXPLODEDP(v, vc, vs, ve, vm) \
+{ \
+ vm = DPMANT(v); \
+ vs = DPSIGN(v); \
+ ve = DPBEXP(v); \
+ if (ve == DP_EMAX+1+DP_EBIAS) { \
+ if (vm == 0) \
+ vc = IEEE754_CLASS_INF; \
+ else if (ieee754_csr.nan2008 ^ !(vm & DP_MBIT(DP_FBITS - 1))) \
+ vc = IEEE754_CLASS_QNAN; \
+ else \
+ vc = IEEE754_CLASS_SNAN; \
+ } else if (ve == DP_EMIN-1+DP_EBIAS) { \
+ if (vm) { \
+ ve = DP_EMIN; \
+ vc = IEEE754_CLASS_DNORM; \
+ } else \
+ vc = IEEE754_CLASS_ZERO; \
+ } else { \
+ ve -= DP_EBIAS; \
+ vm |= DP_HIDDEN_BIT; \
+ vc = IEEE754_CLASS_NORM; \
+ } \
+}
+#define EXPLODEXDP EXPLODEDP(x, xc, xs, xe, xm)
+#define EXPLODEYDP EXPLODEDP(y, yc, ys, ye, ym)
+#define EXPLODEZDP EXPLODEDP(z, zc, zs, ze, zm)
+
+#define FLUSHDP(v, vc, vs, ve, vm) \
+ if (vc==IEEE754_CLASS_DNORM) { \
+ if (ieee754_csr.nod) { \
+ ieee754_setcx(IEEE754_INEXACT); \
+ vc = IEEE754_CLASS_ZERO; \
+ ve = DP_EMIN-1+DP_EBIAS; \
+ vm = 0; \
+ v = ieee754dp_zero(vs); \
+ } \
+ }
+
+#define FLUSHSP(v, vc, vs, ve, vm) \
+ if (vc==IEEE754_CLASS_DNORM) { \
+ if (ieee754_csr.nod) { \
+ ieee754_setcx(IEEE754_INEXACT); \
+ vc = IEEE754_CLASS_ZERO; \
+ ve = SP_EMIN-1+SP_EBIAS; \
+ vm = 0; \
+ v = ieee754sp_zero(vs); \
+ } \
+ }
+
+#define FLUSHXDP FLUSHDP(x, xc, xs, xe, xm)
+#define FLUSHYDP FLUSHDP(y, yc, ys, ye, ym)
+#define FLUSHZDP FLUSHDP(z, zc, zs, ze, zm)
+#define FLUSHXSP FLUSHSP(x, xc, xs, xe, xm)
+#define FLUSHYSP FLUSHSP(y, yc, ys, ye, ym)
+#define FLUSHZSP FLUSHSP(z, zc, zs, ze, zm)
+
+#endif /* __IEEE754INT_H */
diff --git a/arch/mips/math-emu/ieee754sp.c b/arch/mips/math-emu/ieee754sp.c
new file mode 100644
index 000000000..0b6267bc8
--- /dev/null
+++ b/arch/mips/math-emu/ieee754sp.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * single precision
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include <linux/compiler.h>
+
+#include "ieee754sp.h"
+
+int ieee754sp_class(union ieee754sp x)
+{
+ COMPXSP;
+ EXPLODEXSP;
+ return xc;
+}
+
+static inline int ieee754sp_isnan(union ieee754sp x)
+{
+ return ieee754_class_nan(ieee754sp_class(x));
+}
+
+static inline int ieee754sp_issnan(union ieee754sp x)
+{
+ int qbit;
+
+ assert(ieee754sp_isnan(x));
+ qbit = (SPMANT(x) & SP_MBIT(SP_FBITS - 1)) == SP_MBIT(SP_FBITS - 1);
+ return ieee754_csr.nan2008 ^ qbit;
+}
+
+
+/*
+ * Raise the Invalid Operation IEEE 754 exception
+ * and convert the signaling NaN supplied to a quiet NaN.
+ */
+union ieee754sp __cold ieee754sp_nanxcpt(union ieee754sp r)
+{
+ assert(ieee754sp_issnan(r));
+
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ if (ieee754_csr.nan2008) {
+ SPMANT(r) |= SP_MBIT(SP_FBITS - 1);
+ } else {
+ SPMANT(r) &= ~SP_MBIT(SP_FBITS - 1);
+ if (!ieee754sp_isnan(r))
+ SPMANT(r) |= SP_MBIT(SP_FBITS - 2);
+ }
+
+ return r;
+}
+
+static unsigned int ieee754sp_get_rounding(int sn, unsigned int xm)
+{
+ /* inexact must round of 3 bits
+ */
+ if (xm & (SP_MBIT(3) - 1)) {
+ switch (ieee754_csr.rm) {
+ case FPU_CSR_RZ:
+ break;
+ case FPU_CSR_RN:
+ xm += 0x3 + ((xm >> 3) & 1);
+ /* xm += (xm&0x8)?0x4:0x3 */
+ break;
+ case FPU_CSR_RU: /* toward +Infinity */
+ if (!sn) /* ?? */
+ xm += 0x8;
+ break;
+ case FPU_CSR_RD: /* toward -Infinity */
+ if (sn) /* ?? */
+ xm += 0x8;
+ break;
+ }
+ }
+ return xm;
+}
+
+
+/* generate a normal/denormal number with over,under handling
+ * sn is sign
+ * xe is an unbiased exponent
+ * xm is 3bit extended precision value.
+ */
+union ieee754sp ieee754sp_format(int sn, int xe, unsigned int xm)
+{
+ assert(xm); /* we don't gen exact zeros (probably should) */
+
+ assert((xm >> (SP_FBITS + 1 + 3)) == 0); /* no excess */
+ assert(xm & (SP_HIDDEN_BIT << 3));
+
+ if (xe < SP_EMIN) {
+ /* strip lower bits */
+ int es = SP_EMIN - xe;
+
+ if (ieee754_csr.nod) {
+ ieee754_setcx(IEEE754_UNDERFLOW);
+ ieee754_setcx(IEEE754_INEXACT);
+
+ switch(ieee754_csr.rm) {
+ case FPU_CSR_RN:
+ case FPU_CSR_RZ:
+ return ieee754sp_zero(sn);
+ case FPU_CSR_RU: /* toward +Infinity */
+ if (sn == 0)
+ return ieee754sp_min(0);
+ else
+ return ieee754sp_zero(1);
+ case FPU_CSR_RD: /* toward -Infinity */
+ if (sn == 0)
+ return ieee754sp_zero(0);
+ else
+ return ieee754sp_min(1);
+ }
+ }
+
+ if (xe == SP_EMIN - 1 &&
+ ieee754sp_get_rounding(sn, xm) >> (SP_FBITS + 1 + 3))
+ {
+ /* Not tiny after rounding */
+ ieee754_setcx(IEEE754_INEXACT);
+ xm = ieee754sp_get_rounding(sn, xm);
+ xm >>= 1;
+ /* Clear grs bits */
+ xm &= ~(SP_MBIT(3) - 1);
+ xe++;
+ } else {
+ /* sticky right shift es bits
+ */
+ xm = XSPSRS(xm, es);
+ xe += es;
+ assert((xm & (SP_HIDDEN_BIT << 3)) == 0);
+ assert(xe == SP_EMIN);
+ }
+ }
+ if (xm & (SP_MBIT(3) - 1)) {
+ ieee754_setcx(IEEE754_INEXACT);
+ if ((xm & (SP_HIDDEN_BIT << 3)) == 0) {
+ ieee754_setcx(IEEE754_UNDERFLOW);
+ }
+
+ /* inexact must round of 3 bits
+ */
+ xm = ieee754sp_get_rounding(sn, xm);
+ /* adjust exponent for rounding add overflowing
+ */
+ if (xm >> (SP_FBITS + 1 + 3)) {
+ /* add causes mantissa overflow */
+ xm >>= 1;
+ xe++;
+ }
+ }
+ /* strip grs bits */
+ xm >>= 3;
+
+ assert((xm >> (SP_FBITS + 1)) == 0); /* no excess */
+ assert(xe >= SP_EMIN);
+
+ if (xe > SP_EMAX) {
+ ieee754_setcx(IEEE754_OVERFLOW);
+ ieee754_setcx(IEEE754_INEXACT);
+ /* -O can be table indexed by (rm,sn) */
+ switch (ieee754_csr.rm) {
+ case FPU_CSR_RN:
+ return ieee754sp_inf(sn);
+ case FPU_CSR_RZ:
+ return ieee754sp_max(sn);
+ case FPU_CSR_RU: /* toward +Infinity */
+ if (sn == 0)
+ return ieee754sp_inf(0);
+ else
+ return ieee754sp_max(1);
+ case FPU_CSR_RD: /* toward -Infinity */
+ if (sn == 0)
+ return ieee754sp_max(0);
+ else
+ return ieee754sp_inf(1);
+ }
+ }
+ /* gen norm/denorm/zero */
+
+ if ((xm & SP_HIDDEN_BIT) == 0) {
+ /* we underflow (tiny/zero) */
+ assert(xe == SP_EMIN);
+ if (ieee754_csr.mx & IEEE754_UNDERFLOW)
+ ieee754_setcx(IEEE754_UNDERFLOW);
+ return buildsp(sn, SP_EMIN - 1 + SP_EBIAS, xm);
+ } else {
+ assert((xm >> (SP_FBITS + 1)) == 0); /* no excess */
+ assert(xm & SP_HIDDEN_BIT);
+
+ return buildsp(sn, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT);
+ }
+}
diff --git a/arch/mips/math-emu/ieee754sp.h b/arch/mips/math-emu/ieee754sp.h
new file mode 100644
index 000000000..79040f890
--- /dev/null
+++ b/arch/mips/math-emu/ieee754sp.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE754 floating point
+ * double precision internal header file
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include <linux/compiler.h>
+
+#include "ieee754int.h"
+
+#define assert(expr) ((void)0)
+
+#define SP_EBIAS 127
+#define SP_EMIN (-126)
+#define SP_EMAX 127
+#define SP_FBITS 23
+#define SP_MBITS 23
+
+#define SP_MBIT(x) ((u32)1 << (x))
+#define SP_HIDDEN_BIT SP_MBIT(SP_FBITS)
+#define SP_SIGN_BIT SP_MBIT(31)
+
+#define SPSIGN(sp) (sp.sign)
+#define SPBEXP(sp) (sp.bexp)
+#define SPMANT(sp) (sp.mant)
+
+static inline int ieee754sp_finite(union ieee754sp x)
+{
+ return SPBEXP(x) != SP_EMAX + 1 + SP_EBIAS;
+}
+
+/* 64 bit right shift with rounding */
+#define XSPSRS64(v, rs) \
+ (((rs) >= 64) ? ((v) != 0) : ((v) >> (rs)) | ((v) << (64-(rs)) != 0))
+
+/* 3bit extended single precision sticky right shift */
+#define XSPSRS(v, rs) \
+ ((rs > (SP_FBITS+3))?1:((v) >> (rs)) | ((v) << (32-(rs)) != 0))
+
+#define XSPSRS1(m) \
+ ((m >> 1) | (m & 1))
+
+#define SPXSRSX1() \
+ (xe++, (xm = XSPSRS1(xm)))
+
+#define SPXSRSY1() \
+ (ye++, (ym = XSPSRS1(ym)))
+
+/* convert denormal to normalized with extended exponent */
+#define SPDNORMx(m,e) \
+ while ((m >> SP_FBITS) == 0) { m <<= 1; e--; }
+#define SPDNORMX SPDNORMx(xm, xe)
+#define SPDNORMY SPDNORMx(ym, ye)
+#define SPDNORMZ SPDNORMx(zm, ze)
+
+static inline union ieee754sp buildsp(int s, int bx, unsigned int m)
+{
+ union ieee754sp r;
+
+ assert((s) == 0 || (s) == 1);
+ assert((bx) >= SP_EMIN - 1 + SP_EBIAS
+ && (bx) <= SP_EMAX + 1 + SP_EBIAS);
+ assert(((m) >> SP_FBITS) == 0);
+
+ r.sign = s;
+ r.bexp = bx;
+ r.mant = m;
+
+ return r;
+}
+
+extern union ieee754sp __cold ieee754sp_nanxcpt(union ieee754sp);
+extern union ieee754sp ieee754sp_format(int, int, unsigned);
diff --git a/arch/mips/math-emu/me-debugfs.c b/arch/mips/math-emu/me-debugfs.c
new file mode 100644
index 000000000..d5ad76b2b
--- /dev/null
+++ b/arch/mips/math-emu/me-debugfs.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/cpumask.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/types.h>
+#include <asm/debug.h>
+#include <asm/fpu_emulator.h>
+#include <asm/local.h>
+
+DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
+
+static int fpuemu_stat_get(void *data, u64 *val)
+{
+ int cpu;
+ unsigned long sum = 0;
+
+ for_each_online_cpu(cpu) {
+ struct mips_fpu_emulator_stats *ps;
+ local_t *pv;
+
+ ps = &per_cpu(fpuemustats, cpu);
+ pv = (void *)ps + (unsigned long)data;
+ sum += local_read(pv);
+ }
+ *val = sum;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_fpuemu_stat, fpuemu_stat_get, NULL, "%llu\n");
+
+/*
+ * Used to obtain names for a debugfs instruction counter, given field name
+ * in fpuemustats structure. For example, for input "cmp_sueq_d", the output
+ * would be "cmp.sueq.d". This is needed since dots are not allowed to be
+ * used in structure field names, and are, on the other hand, desired to be
+ * used in debugfs item names to be clearly associated to corresponding
+ * MIPS FPU instructions.
+ */
+static void adjust_instruction_counter_name(char *out_name, char *in_name)
+{
+ int i = 0;
+
+ strcpy(out_name, in_name);
+ while (in_name[i] != '\0') {
+ if (out_name[i] == '_')
+ out_name[i] = '.';
+ i++;
+ }
+}
+
+static int fpuemustats_clear_show(struct seq_file *s, void *unused)
+{
+ __this_cpu_write((fpuemustats).emulated, 0);
+ __this_cpu_write((fpuemustats).loads, 0);
+ __this_cpu_write((fpuemustats).stores, 0);
+ __this_cpu_write((fpuemustats).branches, 0);
+ __this_cpu_write((fpuemustats).cp1ops, 0);
+ __this_cpu_write((fpuemustats).cp1xops, 0);
+ __this_cpu_write((fpuemustats).errors, 0);
+ __this_cpu_write((fpuemustats).ieee754_inexact, 0);
+ __this_cpu_write((fpuemustats).ieee754_underflow, 0);
+ __this_cpu_write((fpuemustats).ieee754_overflow, 0);
+ __this_cpu_write((fpuemustats).ieee754_zerodiv, 0);
+ __this_cpu_write((fpuemustats).ieee754_invalidop, 0);
+ __this_cpu_write((fpuemustats).ds_emul, 0);
+
+ __this_cpu_write((fpuemustats).abs_s, 0);
+ __this_cpu_write((fpuemustats).abs_d, 0);
+ __this_cpu_write((fpuemustats).add_s, 0);
+ __this_cpu_write((fpuemustats).add_d, 0);
+ __this_cpu_write((fpuemustats).bc1eqz, 0);
+ __this_cpu_write((fpuemustats).bc1nez, 0);
+ __this_cpu_write((fpuemustats).ceil_w_s, 0);
+ __this_cpu_write((fpuemustats).ceil_w_d, 0);
+ __this_cpu_write((fpuemustats).ceil_l_s, 0);
+ __this_cpu_write((fpuemustats).ceil_l_d, 0);
+ __this_cpu_write((fpuemustats).class_s, 0);
+ __this_cpu_write((fpuemustats).class_d, 0);
+ __this_cpu_write((fpuemustats).cmp_af_s, 0);
+ __this_cpu_write((fpuemustats).cmp_af_d, 0);
+ __this_cpu_write((fpuemustats).cmp_eq_s, 0);
+ __this_cpu_write((fpuemustats).cmp_eq_d, 0);
+ __this_cpu_write((fpuemustats).cmp_le_s, 0);
+ __this_cpu_write((fpuemustats).cmp_le_d, 0);
+ __this_cpu_write((fpuemustats).cmp_lt_s, 0);
+ __this_cpu_write((fpuemustats).cmp_lt_d, 0);
+ __this_cpu_write((fpuemustats).cmp_ne_s, 0);
+ __this_cpu_write((fpuemustats).cmp_ne_d, 0);
+ __this_cpu_write((fpuemustats).cmp_or_s, 0);
+ __this_cpu_write((fpuemustats).cmp_or_d, 0);
+ __this_cpu_write((fpuemustats).cmp_ueq_s, 0);
+ __this_cpu_write((fpuemustats).cmp_ueq_d, 0);
+ __this_cpu_write((fpuemustats).cmp_ule_s, 0);
+ __this_cpu_write((fpuemustats).cmp_ule_d, 0);
+ __this_cpu_write((fpuemustats).cmp_ult_s, 0);
+ __this_cpu_write((fpuemustats).cmp_ult_d, 0);
+ __this_cpu_write((fpuemustats).cmp_un_s, 0);
+ __this_cpu_write((fpuemustats).cmp_un_d, 0);
+ __this_cpu_write((fpuemustats).cmp_une_s, 0);
+ __this_cpu_write((fpuemustats).cmp_une_d, 0);
+ __this_cpu_write((fpuemustats).cmp_saf_s, 0);
+ __this_cpu_write((fpuemustats).cmp_saf_d, 0);
+ __this_cpu_write((fpuemustats).cmp_seq_s, 0);
+ __this_cpu_write((fpuemustats).cmp_seq_d, 0);
+ __this_cpu_write((fpuemustats).cmp_sle_s, 0);
+ __this_cpu_write((fpuemustats).cmp_sle_d, 0);
+ __this_cpu_write((fpuemustats).cmp_slt_s, 0);
+ __this_cpu_write((fpuemustats).cmp_slt_d, 0);
+ __this_cpu_write((fpuemustats).cmp_sne_s, 0);
+ __this_cpu_write((fpuemustats).cmp_sne_d, 0);
+ __this_cpu_write((fpuemustats).cmp_sor_s, 0);
+ __this_cpu_write((fpuemustats).cmp_sor_d, 0);
+ __this_cpu_write((fpuemustats).cmp_sueq_s, 0);
+ __this_cpu_write((fpuemustats).cmp_sueq_d, 0);
+ __this_cpu_write((fpuemustats).cmp_sule_s, 0);
+ __this_cpu_write((fpuemustats).cmp_sule_d, 0);
+ __this_cpu_write((fpuemustats).cmp_sult_s, 0);
+ __this_cpu_write((fpuemustats).cmp_sult_d, 0);
+ __this_cpu_write((fpuemustats).cmp_sun_s, 0);
+ __this_cpu_write((fpuemustats).cmp_sun_d, 0);
+ __this_cpu_write((fpuemustats).cmp_sune_s, 0);
+ __this_cpu_write((fpuemustats).cmp_sune_d, 0);
+ __this_cpu_write((fpuemustats).cvt_d_l, 0);
+ __this_cpu_write((fpuemustats).cvt_d_s, 0);
+ __this_cpu_write((fpuemustats).cvt_d_w, 0);
+ __this_cpu_write((fpuemustats).cvt_l_s, 0);
+ __this_cpu_write((fpuemustats).cvt_l_d, 0);
+ __this_cpu_write((fpuemustats).cvt_s_d, 0);
+ __this_cpu_write((fpuemustats).cvt_s_l, 0);
+ __this_cpu_write((fpuemustats).cvt_s_w, 0);
+ __this_cpu_write((fpuemustats).cvt_w_s, 0);
+ __this_cpu_write((fpuemustats).cvt_w_d, 0);
+ __this_cpu_write((fpuemustats).div_s, 0);
+ __this_cpu_write((fpuemustats).div_d, 0);
+ __this_cpu_write((fpuemustats).floor_w_s, 0);
+ __this_cpu_write((fpuemustats).floor_w_d, 0);
+ __this_cpu_write((fpuemustats).floor_l_s, 0);
+ __this_cpu_write((fpuemustats).floor_l_d, 0);
+ __this_cpu_write((fpuemustats).maddf_s, 0);
+ __this_cpu_write((fpuemustats).maddf_d, 0);
+ __this_cpu_write((fpuemustats).max_s, 0);
+ __this_cpu_write((fpuemustats).max_d, 0);
+ __this_cpu_write((fpuemustats).maxa_s, 0);
+ __this_cpu_write((fpuemustats).maxa_d, 0);
+ __this_cpu_write((fpuemustats).min_s, 0);
+ __this_cpu_write((fpuemustats).min_d, 0);
+ __this_cpu_write((fpuemustats).mina_s, 0);
+ __this_cpu_write((fpuemustats).mina_d, 0);
+ __this_cpu_write((fpuemustats).mov_s, 0);
+ __this_cpu_write((fpuemustats).mov_d, 0);
+ __this_cpu_write((fpuemustats).msubf_s, 0);
+ __this_cpu_write((fpuemustats).msubf_d, 0);
+ __this_cpu_write((fpuemustats).mul_s, 0);
+ __this_cpu_write((fpuemustats).mul_d, 0);
+ __this_cpu_write((fpuemustats).neg_s, 0);
+ __this_cpu_write((fpuemustats).neg_d, 0);
+ __this_cpu_write((fpuemustats).recip_s, 0);
+ __this_cpu_write((fpuemustats).recip_d, 0);
+ __this_cpu_write((fpuemustats).rint_s, 0);
+ __this_cpu_write((fpuemustats).rint_d, 0);
+ __this_cpu_write((fpuemustats).round_w_s, 0);
+ __this_cpu_write((fpuemustats).round_w_d, 0);
+ __this_cpu_write((fpuemustats).round_l_s, 0);
+ __this_cpu_write((fpuemustats).round_l_d, 0);
+ __this_cpu_write((fpuemustats).rsqrt_s, 0);
+ __this_cpu_write((fpuemustats).rsqrt_d, 0);
+ __this_cpu_write((fpuemustats).sel_s, 0);
+ __this_cpu_write((fpuemustats).sel_d, 0);
+ __this_cpu_write((fpuemustats).seleqz_s, 0);
+ __this_cpu_write((fpuemustats).seleqz_d, 0);
+ __this_cpu_write((fpuemustats).selnez_s, 0);
+ __this_cpu_write((fpuemustats).selnez_d, 0);
+ __this_cpu_write((fpuemustats).sqrt_s, 0);
+ __this_cpu_write((fpuemustats).sqrt_d, 0);
+ __this_cpu_write((fpuemustats).sub_s, 0);
+ __this_cpu_write((fpuemustats).sub_d, 0);
+ __this_cpu_write((fpuemustats).trunc_w_s, 0);
+ __this_cpu_write((fpuemustats).trunc_w_d, 0);
+ __this_cpu_write((fpuemustats).trunc_l_s, 0);
+ __this_cpu_write((fpuemustats).trunc_l_d, 0);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(fpuemustats_clear);
+
+static int __init debugfs_fpuemu(void)
+{
+ struct dentry *fpuemu_debugfs_base_dir;
+ struct dentry *fpuemu_debugfs_inst_dir;
+ char name[32];
+
+ fpuemu_debugfs_base_dir = debugfs_create_dir("fpuemustats",
+ mips_debugfs_dir);
+
+ debugfs_create_file("fpuemustats_clear", 0444, mips_debugfs_dir, NULL,
+ &fpuemustats_clear_fops);
+
+#define FPU_EMU_STAT_OFFSET(m) \
+ offsetof(struct mips_fpu_emulator_stats, m)
+
+#define FPU_STAT_CREATE(m) \
+do { \
+ debugfs_create_file(#m, 0444, fpuemu_debugfs_base_dir, \
+ (void *)FPU_EMU_STAT_OFFSET(m), \
+ &fops_fpuemu_stat); \
+} while (0)
+
+ FPU_STAT_CREATE(emulated);
+ FPU_STAT_CREATE(loads);
+ FPU_STAT_CREATE(stores);
+ FPU_STAT_CREATE(branches);
+ FPU_STAT_CREATE(cp1ops);
+ FPU_STAT_CREATE(cp1xops);
+ FPU_STAT_CREATE(errors);
+ FPU_STAT_CREATE(ieee754_inexact);
+ FPU_STAT_CREATE(ieee754_underflow);
+ FPU_STAT_CREATE(ieee754_overflow);
+ FPU_STAT_CREATE(ieee754_zerodiv);
+ FPU_STAT_CREATE(ieee754_invalidop);
+ FPU_STAT_CREATE(ds_emul);
+
+ fpuemu_debugfs_inst_dir = debugfs_create_dir("instructions",
+ fpuemu_debugfs_base_dir);
+
+#define FPU_STAT_CREATE_EX(m) \
+do { \
+ adjust_instruction_counter_name(name, #m); \
+ \
+ debugfs_create_file(name, 0444, fpuemu_debugfs_inst_dir, \
+ (void *)FPU_EMU_STAT_OFFSET(m), \
+ &fops_fpuemu_stat); \
+} while (0)
+
+ FPU_STAT_CREATE_EX(abs_s);
+ FPU_STAT_CREATE_EX(abs_d);
+ FPU_STAT_CREATE_EX(add_s);
+ FPU_STAT_CREATE_EX(add_d);
+ FPU_STAT_CREATE_EX(bc1eqz);
+ FPU_STAT_CREATE_EX(bc1nez);
+ FPU_STAT_CREATE_EX(ceil_w_s);
+ FPU_STAT_CREATE_EX(ceil_w_d);
+ FPU_STAT_CREATE_EX(ceil_l_s);
+ FPU_STAT_CREATE_EX(ceil_l_d);
+ FPU_STAT_CREATE_EX(class_s);
+ FPU_STAT_CREATE_EX(class_d);
+ FPU_STAT_CREATE_EX(cmp_af_s);
+ FPU_STAT_CREATE_EX(cmp_af_d);
+ FPU_STAT_CREATE_EX(cmp_eq_s);
+ FPU_STAT_CREATE_EX(cmp_eq_d);
+ FPU_STAT_CREATE_EX(cmp_le_s);
+ FPU_STAT_CREATE_EX(cmp_le_d);
+ FPU_STAT_CREATE_EX(cmp_lt_s);
+ FPU_STAT_CREATE_EX(cmp_lt_d);
+ FPU_STAT_CREATE_EX(cmp_ne_s);
+ FPU_STAT_CREATE_EX(cmp_ne_d);
+ FPU_STAT_CREATE_EX(cmp_or_s);
+ FPU_STAT_CREATE_EX(cmp_or_d);
+ FPU_STAT_CREATE_EX(cmp_ueq_s);
+ FPU_STAT_CREATE_EX(cmp_ueq_d);
+ FPU_STAT_CREATE_EX(cmp_ule_s);
+ FPU_STAT_CREATE_EX(cmp_ule_d);
+ FPU_STAT_CREATE_EX(cmp_ult_s);
+ FPU_STAT_CREATE_EX(cmp_ult_d);
+ FPU_STAT_CREATE_EX(cmp_un_s);
+ FPU_STAT_CREATE_EX(cmp_un_d);
+ FPU_STAT_CREATE_EX(cmp_une_s);
+ FPU_STAT_CREATE_EX(cmp_une_d);
+ FPU_STAT_CREATE_EX(cmp_saf_s);
+ FPU_STAT_CREATE_EX(cmp_saf_d);
+ FPU_STAT_CREATE_EX(cmp_seq_s);
+ FPU_STAT_CREATE_EX(cmp_seq_d);
+ FPU_STAT_CREATE_EX(cmp_sle_s);
+ FPU_STAT_CREATE_EX(cmp_sle_d);
+ FPU_STAT_CREATE_EX(cmp_slt_s);
+ FPU_STAT_CREATE_EX(cmp_slt_d);
+ FPU_STAT_CREATE_EX(cmp_sne_s);
+ FPU_STAT_CREATE_EX(cmp_sne_d);
+ FPU_STAT_CREATE_EX(cmp_sor_s);
+ FPU_STAT_CREATE_EX(cmp_sor_d);
+ FPU_STAT_CREATE_EX(cmp_sueq_s);
+ FPU_STAT_CREATE_EX(cmp_sueq_d);
+ FPU_STAT_CREATE_EX(cmp_sule_s);
+ FPU_STAT_CREATE_EX(cmp_sule_d);
+ FPU_STAT_CREATE_EX(cmp_sult_s);
+ FPU_STAT_CREATE_EX(cmp_sult_d);
+ FPU_STAT_CREATE_EX(cmp_sun_s);
+ FPU_STAT_CREATE_EX(cmp_sun_d);
+ FPU_STAT_CREATE_EX(cmp_sune_s);
+ FPU_STAT_CREATE_EX(cmp_sune_d);
+ FPU_STAT_CREATE_EX(cvt_d_l);
+ FPU_STAT_CREATE_EX(cvt_d_s);
+ FPU_STAT_CREATE_EX(cvt_d_w);
+ FPU_STAT_CREATE_EX(cvt_l_s);
+ FPU_STAT_CREATE_EX(cvt_l_d);
+ FPU_STAT_CREATE_EX(cvt_s_d);
+ FPU_STAT_CREATE_EX(cvt_s_l);
+ FPU_STAT_CREATE_EX(cvt_s_w);
+ FPU_STAT_CREATE_EX(cvt_w_s);
+ FPU_STAT_CREATE_EX(cvt_w_d);
+ FPU_STAT_CREATE_EX(div_s);
+ FPU_STAT_CREATE_EX(div_d);
+ FPU_STAT_CREATE_EX(floor_w_s);
+ FPU_STAT_CREATE_EX(floor_w_d);
+ FPU_STAT_CREATE_EX(floor_l_s);
+ FPU_STAT_CREATE_EX(floor_l_d);
+ FPU_STAT_CREATE_EX(maddf_s);
+ FPU_STAT_CREATE_EX(maddf_d);
+ FPU_STAT_CREATE_EX(max_s);
+ FPU_STAT_CREATE_EX(max_d);
+ FPU_STAT_CREATE_EX(maxa_s);
+ FPU_STAT_CREATE_EX(maxa_d);
+ FPU_STAT_CREATE_EX(min_s);
+ FPU_STAT_CREATE_EX(min_d);
+ FPU_STAT_CREATE_EX(mina_s);
+ FPU_STAT_CREATE_EX(mina_d);
+ FPU_STAT_CREATE_EX(mov_s);
+ FPU_STAT_CREATE_EX(mov_d);
+ FPU_STAT_CREATE_EX(msubf_s);
+ FPU_STAT_CREATE_EX(msubf_d);
+ FPU_STAT_CREATE_EX(mul_s);
+ FPU_STAT_CREATE_EX(mul_d);
+ FPU_STAT_CREATE_EX(neg_s);
+ FPU_STAT_CREATE_EX(neg_d);
+ FPU_STAT_CREATE_EX(recip_s);
+ FPU_STAT_CREATE_EX(recip_d);
+ FPU_STAT_CREATE_EX(rint_s);
+ FPU_STAT_CREATE_EX(rint_d);
+ FPU_STAT_CREATE_EX(round_w_s);
+ FPU_STAT_CREATE_EX(round_w_d);
+ FPU_STAT_CREATE_EX(round_l_s);
+ FPU_STAT_CREATE_EX(round_l_d);
+ FPU_STAT_CREATE_EX(rsqrt_s);
+ FPU_STAT_CREATE_EX(rsqrt_d);
+ FPU_STAT_CREATE_EX(sel_s);
+ FPU_STAT_CREATE_EX(sel_d);
+ FPU_STAT_CREATE_EX(seleqz_s);
+ FPU_STAT_CREATE_EX(seleqz_d);
+ FPU_STAT_CREATE_EX(selnez_s);
+ FPU_STAT_CREATE_EX(selnez_d);
+ FPU_STAT_CREATE_EX(sqrt_s);
+ FPU_STAT_CREATE_EX(sqrt_d);
+ FPU_STAT_CREATE_EX(sub_s);
+ FPU_STAT_CREATE_EX(sub_d);
+ FPU_STAT_CREATE_EX(trunc_w_s);
+ FPU_STAT_CREATE_EX(trunc_w_d);
+ FPU_STAT_CREATE_EX(trunc_l_s);
+ FPU_STAT_CREATE_EX(trunc_l_d);
+
+ return 0;
+}
+arch_initcall(debugfs_fpuemu);
diff --git a/arch/mips/math-emu/sp_2008class.c b/arch/mips/math-emu/sp_2008class.c
new file mode 100644
index 000000000..b9adab6c2
--- /dev/null
+++ b/arch/mips/math-emu/sp_2008class.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: CLASS.f
+ * FPR[fd] = class(FPR[fs])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ */
+
+#include "ieee754sp.h"
+
+int ieee754sp_2008class(union ieee754sp x)
+{
+ COMPXSP;
+
+ EXPLODEXSP;
+
+ /*
+ * 10 bit mask as follows:
+ *
+ * bit0 = SNAN
+ * bit1 = QNAN
+ * bit2 = -INF
+ * bit3 = -NORM
+ * bit4 = -DNORM
+ * bit5 = -ZERO
+ * bit6 = INF
+ * bit7 = NORM
+ * bit8 = DNORM
+ * bit9 = ZERO
+ */
+
+ switch(xc) {
+ case IEEE754_CLASS_SNAN:
+ return 0x01;
+ case IEEE754_CLASS_QNAN:
+ return 0x02;
+ case IEEE754_CLASS_INF:
+ return 0x04 << (xs ? 0 : 4);
+ case IEEE754_CLASS_NORM:
+ return 0x08 << (xs ? 0 : 4);
+ case IEEE754_CLASS_DNORM:
+ return 0x10 << (xs ? 0 : 4);
+ case IEEE754_CLASS_ZERO:
+ return 0x20 << (xs ? 0 : 4);
+ default:
+ pr_err("Unknown class: %d\n", xc);
+ return 0;
+ }
+}
diff --git a/arch/mips/math-emu/sp_add.c b/arch/mips/math-emu/sp_add.c
new file mode 100644
index 000000000..715cd0534
--- /dev/null
+++ b/arch/mips/math-emu/sp_add.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * single precision
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_add(union ieee754sp x, union ieee754sp y)
+{
+ int s;
+
+ COMPXSP;
+ COMPYSP;
+
+ EXPLODEXSP;
+ EXPLODEYSP;
+
+ ieee754_clearcx();
+
+ FLUSHXSP;
+ FLUSHYSP;
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754sp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754sp_nanxcpt(x);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return x;
+
+
+ /*
+ * Infinity handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ if (xs == ys)
+ return x;
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754sp_indef();
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ return x;
+
+ /*
+ * Zero handling
+ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ if (xs == ys)
+ return x;
+ else
+ return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ SPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ SPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ break;
+ }
+ assert(xm & SP_HIDDEN_BIT);
+ assert(ym & SP_HIDDEN_BIT);
+
+ /*
+ * Provide guard, round and stick bit space.
+ */
+ xm <<= 3;
+ ym <<= 3;
+
+ if (xe > ye) {
+ /*
+ * Have to shift y fraction right to align.
+ */
+ s = xe - ye;
+ ym = XSPSRS(ym, s);
+ ye += s;
+ } else if (ye > xe) {
+ /*
+ * Have to shift x fraction right to align.
+ */
+ s = ye - xe;
+ xm = XSPSRS(xm, s);
+ xe += s;
+ }
+ assert(xe == ye);
+ assert(xe <= SP_EMAX);
+
+ if (xs == ys) {
+ /*
+ * Generate 28 bit result of adding two 27 bit numbers
+ * leaving result in xm, xs and xe.
+ */
+ xm = xm + ym;
+
+ if (xm >> (SP_FBITS + 1 + 3)) { /* carry out */
+ SPXSRSX1();
+ }
+ } else {
+ if (xm >= ym) {
+ xm = xm - ym;
+ } else {
+ xm = ym - xm;
+ xs = ys;
+ }
+ if (xm == 0)
+ return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+ /*
+ * Normalize in extended single precision
+ */
+ while ((xm >> (SP_FBITS + 3)) == 0) {
+ xm <<= 1;
+ xe--;
+ }
+ }
+
+ return ieee754sp_format(xs, xe, xm);
+}
diff --git a/arch/mips/math-emu/sp_cmp.c b/arch/mips/math-emu/sp_cmp.c
new file mode 100644
index 000000000..64a37362a
--- /dev/null
+++ b/arch/mips/math-emu/sp_cmp.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * single precision
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754sp.h"
+
+int ieee754sp_cmp(union ieee754sp x, union ieee754sp y, int cmp, int sig)
+{
+ int vx;
+ int vy;
+
+ COMPXSP;
+ COMPYSP;
+
+ EXPLODEXSP;
+ EXPLODEYSP;
+ FLUSHXSP;
+ FLUSHYSP;
+ ieee754_clearcx(); /* Even clear inexact flag here */
+
+ if (ieee754_class_nan(xc) || ieee754_class_nan(yc)) {
+ if (sig ||
+ xc == IEEE754_CLASS_SNAN || yc == IEEE754_CLASS_SNAN)
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return (cmp & IEEE754_CUN) != 0;
+ } else {
+ vx = x.bits;
+ vy = y.bits;
+
+ if (vx < 0)
+ vx = -vx ^ SP_SIGN_BIT;
+ if (vy < 0)
+ vy = -vy ^ SP_SIGN_BIT;
+
+ if (vx < vy)
+ return (cmp & IEEE754_CLT) != 0;
+ else if (vx == vy)
+ return (cmp & IEEE754_CEQ) != 0;
+ else
+ return (cmp & IEEE754_CGT) != 0;
+ }
+}
diff --git a/arch/mips/math-emu/sp_div.c b/arch/mips/math-emu/sp_div.c
new file mode 100644
index 000000000..2bfa266fd
--- /dev/null
+++ b/arch/mips/math-emu/sp_div.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * single precision
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_div(union ieee754sp x, union ieee754sp y)
+{
+ unsigned int rm;
+ int re;
+ unsigned int bm;
+
+ COMPXSP;
+ COMPYSP;
+
+ EXPLODEXSP;
+ EXPLODEYSP;
+
+ ieee754_clearcx();
+
+ FLUSHXSP;
+ FLUSHYSP;
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754sp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754sp_nanxcpt(x);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return x;
+
+
+ /*
+ * Infinity handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754sp_indef();
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ return ieee754sp_zero(xs ^ ys);
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ return ieee754sp_inf(xs ^ ys);
+
+ /*
+ * Zero handling
+ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754sp_indef();
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ ieee754_setcx(IEEE754_ZERO_DIVIDE);
+ return ieee754sp_inf(xs ^ ys);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ return ieee754sp_zero(xs == ys ? 0 : 1);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ SPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ SPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ break;
+ }
+ assert(xm & SP_HIDDEN_BIT);
+ assert(ym & SP_HIDDEN_BIT);
+
+ /* provide rounding space */
+ xm <<= 3;
+ ym <<= 3;
+
+ /* now the dirty work */
+
+ rm = 0;
+ re = xe - ye;
+
+ for (bm = SP_MBIT(SP_FBITS + 2); bm; bm >>= 1) {
+ if (xm >= ym) {
+ xm -= ym;
+ rm |= bm;
+ if (xm == 0)
+ break;
+ }
+ xm <<= 1;
+ }
+
+ rm <<= 1;
+ if (xm)
+ rm |= 1; /* have remainder, set sticky */
+
+ assert(rm);
+
+ /* normalise rm to rounding precision ?
+ */
+ while ((rm >> (SP_FBITS + 3)) == 0) {
+ rm <<= 1;
+ re--;
+ }
+
+ return ieee754sp_format(xs == ys ? 0 : 1, re, rm);
+}
diff --git a/arch/mips/math-emu/sp_fdp.c b/arch/mips/math-emu/sp_fdp.c
new file mode 100644
index 000000000..56417497c
--- /dev/null
+++ b/arch/mips/math-emu/sp_fdp.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * single precision
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754sp.h"
+#include "ieee754dp.h"
+
+static inline union ieee754sp ieee754sp_nan_fdp(int xs, u64 xm)
+{
+ return buildsp(xs, SP_EMAX + 1 + SP_EBIAS,
+ xm >> (DP_FBITS - SP_FBITS));
+}
+
+union ieee754sp ieee754sp_fdp(union ieee754dp x)
+{
+ union ieee754sp y;
+ u32 rm;
+
+ COMPXDP;
+ COMPYSP;
+
+ EXPLODEXDP;
+
+ ieee754_clearcx();
+
+ FLUSHXDP;
+
+ switch (xc) {
+ case IEEE754_CLASS_SNAN:
+ x = ieee754dp_nanxcpt(x);
+ EXPLODEXDP;
+ fallthrough;
+ case IEEE754_CLASS_QNAN:
+ y = ieee754sp_nan_fdp(xs, xm);
+ if (!ieee754_csr.nan2008) {
+ EXPLODEYSP;
+ if (!ieee754_class_nan(yc))
+ y = ieee754sp_indef();
+ }
+ return y;
+
+ case IEEE754_CLASS_INF:
+ return ieee754sp_inf(xs);
+
+ case IEEE754_CLASS_ZERO:
+ return ieee754sp_zero(xs);
+
+ case IEEE754_CLASS_DNORM:
+ /* can't possibly be sp representable */
+ ieee754_setcx(IEEE754_UNDERFLOW);
+ ieee754_setcx(IEEE754_INEXACT);
+ if ((ieee754_csr.rm == FPU_CSR_RU && !xs) ||
+ (ieee754_csr.rm == FPU_CSR_RD && xs))
+ return ieee754sp_mind(xs);
+ return ieee754sp_zero(xs);
+
+ case IEEE754_CLASS_NORM:
+ break;
+ }
+
+ /*
+ * Convert from DP_FBITS to SP_FBITS+3 with sticky right shift.
+ */
+ rm = (xm >> (DP_FBITS - (SP_FBITS + 3))) |
+ ((xm << (64 - (DP_FBITS - (SP_FBITS + 3)))) != 0);
+
+ return ieee754sp_format(xs, xe, rm);
+}
diff --git a/arch/mips/math-emu/sp_fint.c b/arch/mips/math-emu/sp_fint.c
new file mode 100644
index 000000000..6068e3caa
--- /dev/null
+++ b/arch/mips/math-emu/sp_fint.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * single precision
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_fint(int x)
+{
+ unsigned int xm;
+ int xe;
+ int xs;
+
+ ieee754_clearcx();
+
+ if (x == 0)
+ return ieee754sp_zero(0);
+ if (x == 1 || x == -1)
+ return ieee754sp_one(x < 0);
+ if (x == 10 || x == -10)
+ return ieee754sp_ten(x < 0);
+
+ xs = (x < 0);
+ if (xs) {
+ if (x == (1 << 31))
+ xm = ((unsigned) 1 << 31); /* max neg can't be safely negated */
+ else
+ xm = -x;
+ } else {
+ xm = x;
+ }
+ xe = SP_FBITS + 3;
+
+ if (xm >> (SP_FBITS + 1 + 3)) {
+ /* shunt out overflow bits
+ */
+ while (xm >> (SP_FBITS + 1 + 3)) {
+ SPXSRSX1();
+ }
+ } else {
+ /* normalize in grs extended single precision
+ */
+ while ((xm >> (SP_FBITS + 3)) == 0) {
+ xm <<= 1;
+ xe--;
+ }
+ }
+ return ieee754sp_format(xs, xe, xm);
+}
diff --git a/arch/mips/math-emu/sp_flong.c b/arch/mips/math-emu/sp_flong.c
new file mode 100644
index 000000000..1b223fb5a
--- /dev/null
+++ b/arch/mips/math-emu/sp_flong.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * single precision
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_flong(s64 x)
+{
+ u64 xm; /* <--- need 64-bit mantissa temp */
+ int xe;
+ int xs;
+
+ ieee754_clearcx();
+
+ if (x == 0)
+ return ieee754sp_zero(0);
+ if (x == 1 || x == -1)
+ return ieee754sp_one(x < 0);
+ if (x == 10 || x == -10)
+ return ieee754sp_ten(x < 0);
+
+ xs = (x < 0);
+ if (xs) {
+ if (x == (1ULL << 63))
+ xm = (1ULL << 63); /* max neg can't be safely negated */
+ else
+ xm = -x;
+ } else {
+ xm = x;
+ }
+ xe = SP_FBITS + 3;
+
+ if (xm >> (SP_FBITS + 1 + 3)) {
+ /* shunt out overflow bits
+ */
+ while (xm >> (SP_FBITS + 1 + 3)) {
+ SPXSRSX1();
+ }
+ } else {
+ /* normalize in grs extended single precision */
+ while ((xm >> (SP_FBITS + 3)) == 0) {
+ xm <<= 1;
+ xe--;
+ }
+ }
+ return ieee754sp_format(xs, xe, xm);
+}
diff --git a/arch/mips/math-emu/sp_fmax.c b/arch/mips/math-emu/sp_fmax.c
new file mode 100644
index 000000000..3fb16a1df
--- /dev/null
+++ b/arch/mips/math-emu/sp_fmax.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: MAX{,A}.f
+ * MAX : Scalar Floating-Point Maximum
+ * MAXA: Scalar Floating-Point argument with Maximum Absolute Value
+ *
+ * MAX.S : FPR[fd] = maxNum(FPR[fs],FPR[ft])
+ * MAXA.S: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
+{
+ COMPXSP;
+ COMPYSP;
+
+ EXPLODEXSP;
+ EXPLODEYSP;
+
+ FLUSHXSP;
+ FLUSHYSP;
+
+ ieee754_clearcx();
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754sp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754sp_nanxcpt(x);
+
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return y;
+
+ /*
+ * Infinity and zero handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return xs ? y : x;
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ return ys ? x : y;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ return ieee754sp_zero(xs & ys);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ SPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ SPDNORMX;
+ }
+
+ /* Finally get to do some computation */
+
+ assert(xm & SP_HIDDEN_BIT);
+ assert(ym & SP_HIDDEN_BIT);
+
+ /* Compare signs */
+ if (xs > ys)
+ return y;
+ else if (xs < ys)
+ return x;
+
+ /* Signs of inputs are equal, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ }
+
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return y;
+ return x;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
+ if (xm <= ym)
+ return x;
+ return y;
+}
+
+union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
+{
+ COMPXSP;
+ COMPYSP;
+
+ EXPLODEXSP;
+ EXPLODEYSP;
+
+ FLUSHXSP;
+ FLUSHYSP;
+
+ ieee754_clearcx();
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754sp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754sp_nanxcpt(x);
+
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return y;
+
+ /*
+ * Infinity and zero handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754sp_inf(xs & ys);
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ return ieee754sp_zero(xs & ys);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ SPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ SPDNORMX;
+ }
+
+ /* Finally get to do some computation */
+
+ assert(xm & SP_HIDDEN_BIT);
+ assert(ym & SP_HIDDEN_BIT);
+
+ /* Compare exponent */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+
+ /* Compare mantissa */
+ if (xm < ym)
+ return y;
+ else if (xm > ym)
+ return x;
+ else if (xs == 0)
+ return x;
+ return y;
+}
diff --git a/arch/mips/math-emu/sp_fmin.c b/arch/mips/math-emu/sp_fmin.c
new file mode 100644
index 000000000..ad2599d4a
--- /dev/null
+++ b/arch/mips/math-emu/sp_fmin.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: MIN{,A}.f
+ * MIN : Scalar Floating-Point Minimum
+ * MINA: Scalar Floating-Point argument with Minimum Absolute Value
+ *
+ * MIN.S : FPR[fd] = minNum(FPR[fs],FPR[ft])
+ * MINA.S: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
+{
+ COMPXSP;
+ COMPYSP;
+
+ EXPLODEXSP;
+ EXPLODEYSP;
+
+ FLUSHXSP;
+ FLUSHYSP;
+
+ ieee754_clearcx();
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754sp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754sp_nanxcpt(x);
+
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return y;
+
+ /*
+ * Infinity and zero handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return xs ? x : y;
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ return ys ? y : x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ return ieee754sp_zero(xs | ys);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ SPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ SPDNORMX;
+ }
+
+ /* Finally get to do some computation */
+
+ assert(xm & SP_HIDDEN_BIT);
+ assert(ym & SP_HIDDEN_BIT);
+
+ /* Compare signs */
+ if (xs > ys)
+ return x;
+ else if (xs < ys)
+ return y;
+
+ /* Signs of inputs are the same, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ }
+
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return x;
+ return y;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
+ if (xm <= ym)
+ return y;
+ return x;
+}
+
+union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
+{
+ COMPXSP;
+ COMPYSP;
+
+ EXPLODEXSP;
+ EXPLODEYSP;
+
+ FLUSHXSP;
+ FLUSHYSP;
+
+ ieee754_clearcx();
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754sp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754sp_nanxcpt(x);
+
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return y;
+
+ /*
+ * Infinity and zero handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754sp_inf(xs | ys);
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ return ieee754sp_zero(xs | ys);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ SPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ SPDNORMX;
+ }
+
+ /* Finally get to do some computation */
+
+ assert(xm & SP_HIDDEN_BIT);
+ assert(ym & SP_HIDDEN_BIT);
+
+ /* Compare exponent */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+
+ /* Compare mantissa */
+ if (xm < ym)
+ return x;
+ else if (xm > ym)
+ return y;
+ else if (xs == 1)
+ return x;
+ return y;
+}
diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c
new file mode 100644
index 000000000..473ee222d
--- /dev/null
+++ b/arch/mips/math-emu/sp_maddf.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: MADDF.f (Fused Multiply Add)
+ * MADDF.fmt: FPR[fd] = FPR[fd] + (FPR[fs] x FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ */
+
+#include "ieee754sp.h"
+
+
+static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y, enum maddf_flags flags)
+{
+ int re;
+ int rs;
+ unsigned int rm;
+ u64 rm64;
+ u64 zm64;
+ int s;
+
+ COMPXSP;
+ COMPYSP;
+ COMPZSP;
+
+ EXPLODEXSP;
+ EXPLODEYSP;
+ EXPLODEZSP;
+
+ FLUSHXSP;
+ FLUSHYSP;
+ FLUSHZSP;
+
+ ieee754_clearcx();
+
+ rs = xs ^ ys;
+ if (flags & MADDF_NEGATE_PRODUCT)
+ rs ^= 1;
+ if (flags & MADDF_NEGATE_ADDITION)
+ zs ^= 1;
+
+ /*
+ * Handle the cases when at least one of x, y or z is a NaN.
+ * Order of precedence is sNaN, qNaN and z, x, y.
+ */
+ if (zc == IEEE754_CLASS_SNAN)
+ return ieee754sp_nanxcpt(z);
+ if (xc == IEEE754_CLASS_SNAN)
+ return ieee754sp_nanxcpt(x);
+ if (yc == IEEE754_CLASS_SNAN)
+ return ieee754sp_nanxcpt(y);
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (xc == IEEE754_CLASS_QNAN)
+ return x;
+ if (yc == IEEE754_CLASS_QNAN)
+ return y;
+
+ if (zc == IEEE754_CLASS_DNORM)
+ SPDNORMZ;
+ /* ZERO z cases are handled separately below */
+
+ switch (CLPAIR(xc, yc)) {
+
+
+ /*
+ * Infinity handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754sp_indef();
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ if ((zc == IEEE754_CLASS_INF) && (zs != rs)) {
+ /*
+ * Cases of addition of infinities with opposite signs
+ * or subtraction of infinities with same signs.
+ */
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754sp_indef();
+ }
+ /*
+ * z is here either not an infinity, or an infinity having the
+ * same sign as product (x*y). The result must be an infinity,
+ * and its sign is determined only by the sign of product (x*y).
+ */
+ return ieee754sp_inf(rs);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754sp_inf(zs);
+ if (zc == IEEE754_CLASS_ZERO) {
+ /* Handle cases +0 + (-0) and similar ones. */
+ if (zs == rs)
+ /*
+ * Cases of addition of zeros of equal signs
+ * or subtraction of zeroes of opposite signs.
+ * The sign of the resulting zero is in any
+ * such case determined only by the sign of z.
+ */
+ return z;
+
+ return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
+ }
+ /* x*y is here 0, and z is not 0, so just return z */
+ return z;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754sp_inf(zs);
+ SPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754sp_inf(zs);
+ SPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754sp_inf(zs);
+ /* continue to real computations */
+ }
+
+ /* Finally get to do some computation */
+
+ /*
+ * Do the multiplication bit first
+ *
+ * rm = xm * ym, re = xe + ye basically
+ *
+ * At this point xm and ym should have been normalized.
+ */
+
+ /* rm = xm * ym, re = xe+ye basically */
+ assert(xm & SP_HIDDEN_BIT);
+ assert(ym & SP_HIDDEN_BIT);
+
+ re = xe + ye;
+
+ /* Multiple 24 bit xm and ym to give 48 bit results */
+ rm64 = (uint64_t)xm * ym;
+
+ /* Shunt to top of word */
+ rm64 = rm64 << 16;
+
+ /* Put explicit bit at bit 62 if necessary */
+ if ((int64_t) rm64 < 0) {
+ rm64 = rm64 >> 1;
+ re++;
+ }
+
+ assert(rm64 & (1 << 62));
+
+ if (zc == IEEE754_CLASS_ZERO) {
+ /*
+ * Move explicit bit from bit 62 to bit 26 since the
+ * ieee754sp_format code expects the mantissa to be
+ * 27 bits wide (24 + 3 rounding bits).
+ */
+ rm = XSPSRS64(rm64, (62 - 26));
+ return ieee754sp_format(rs, re, rm);
+ }
+
+ /* Move explicit bit from bit 23 to bit 62 */
+ zm64 = (uint64_t)zm << (62 - 23);
+ assert(zm64 & (1 << 62));
+
+ /* Make the exponents the same */
+ if (ze > re) {
+ /*
+ * Have to shift r fraction right to align.
+ */
+ s = ze - re;
+ rm64 = XSPSRS64(rm64, s);
+ re += s;
+ } else if (re > ze) {
+ /*
+ * Have to shift z fraction right to align.
+ */
+ s = re - ze;
+ zm64 = XSPSRS64(zm64, s);
+ ze += s;
+ }
+ assert(ze == re);
+ assert(ze <= SP_EMAX);
+
+ /* Do the addition */
+ if (zs == rs) {
+ /*
+ * Generate 64 bit result by adding two 63 bit numbers
+ * leaving result in zm64, zs and ze.
+ */
+ zm64 = zm64 + rm64;
+ if ((int64_t)zm64 < 0) { /* carry out */
+ zm64 = XSPSRS1(zm64);
+ ze++;
+ }
+ } else {
+ if (zm64 >= rm64) {
+ zm64 = zm64 - rm64;
+ } else {
+ zm64 = rm64 - zm64;
+ zs = rs;
+ }
+ if (zm64 == 0)
+ return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+ /*
+ * Put explicit bit at bit 62 if necessary.
+ */
+ while ((zm64 >> 62) == 0) {
+ zm64 <<= 1;
+ ze--;
+ }
+ }
+
+ /*
+ * Move explicit bit from bit 62 to bit 26 since the
+ * ieee754sp_format code expects the mantissa to be
+ * 27 bits wide (24 + 3 rounding bits).
+ */
+ zm = XSPSRS64(zm64, (62 - 26));
+
+ return ieee754sp_format(zs, ze, zm);
+}
+
+union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y)
+{
+ return _sp_maddf(z, x, y, 0);
+}
+
+union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y)
+{
+ return _sp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
+}
+
+union ieee754sp ieee754sp_madd(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y)
+{
+ return _sp_maddf(z, x, y, 0);
+}
+
+union ieee754sp ieee754sp_msub(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y)
+{
+ return _sp_maddf(z, x, y, MADDF_NEGATE_ADDITION);
+}
+
+union ieee754sp ieee754sp_nmadd(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y)
+{
+ return _sp_maddf(z, x, y, MADDF_NEGATE_PRODUCT|MADDF_NEGATE_ADDITION);
+}
+
+union ieee754sp ieee754sp_nmsub(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y)
+{
+ return _sp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
+}
diff --git a/arch/mips/math-emu/sp_mul.c b/arch/mips/math-emu/sp_mul.c
new file mode 100644
index 000000000..26cfd6302
--- /dev/null
+++ b/arch/mips/math-emu/sp_mul.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * single precision
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_mul(union ieee754sp x, union ieee754sp y)
+{
+ int re;
+ int rs;
+ unsigned int rm;
+ unsigned short lxm;
+ unsigned short hxm;
+ unsigned short lym;
+ unsigned short hym;
+ unsigned int lrm;
+ unsigned int hrm;
+ unsigned int t;
+ unsigned int at;
+
+ COMPXSP;
+ COMPYSP;
+
+ EXPLODEXSP;
+ EXPLODEYSP;
+
+ ieee754_clearcx();
+
+ FLUSHXSP;
+ FLUSHYSP;
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754sp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754sp_nanxcpt(x);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return x;
+
+
+ /*
+ * Infinity handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754sp_indef();
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754sp_inf(xs ^ ys);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return ieee754sp_zero(xs ^ ys);
+
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ SPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ SPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ break;
+ }
+ /* rm = xm * ym, re = xe+ye basically */
+ assert(xm & SP_HIDDEN_BIT);
+ assert(ym & SP_HIDDEN_BIT);
+
+ re = xe + ye;
+ rs = xs ^ ys;
+
+ /* shunt to top of word */
+ xm <<= 32 - (SP_FBITS + 1);
+ ym <<= 32 - (SP_FBITS + 1);
+
+ /*
+ * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+ */
+ lxm = xm & 0xffff;
+ hxm = xm >> 16;
+ lym = ym & 0xffff;
+ hym = ym >> 16;
+
+ lrm = lxm * lym; /* 16 * 16 => 32 */
+ hrm = hxm * hym; /* 16 * 16 => 32 */
+
+ t = lxm * hym; /* 16 * 16 => 32 */
+ at = lrm + (t << 16);
+ hrm += at < lrm;
+ lrm = at;
+ hrm = hrm + (t >> 16);
+
+ t = hxm * lym; /* 16 * 16 => 32 */
+ at = lrm + (t << 16);
+ hrm += at < lrm;
+ lrm = at;
+ hrm = hrm + (t >> 16);
+
+ rm = hrm | (lrm != 0);
+
+ /*
+ * Sticky shift down to normal rounding precision.
+ */
+ if ((int) rm < 0) {
+ rm = (rm >> (32 - (SP_FBITS + 1 + 3))) |
+ ((rm << (SP_FBITS + 1 + 3)) != 0);
+ re++;
+ } else {
+ rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) |
+ ((rm << (SP_FBITS + 1 + 3 + 1)) != 0);
+ }
+ assert(rm & (SP_HIDDEN_BIT << 3));
+
+ return ieee754sp_format(rs, re, rm);
+}
diff --git a/arch/mips/math-emu/sp_rint.c b/arch/mips/math-emu/sp_rint.c
new file mode 100644
index 000000000..d5f75fe21
--- /dev/null
+++ b/arch/mips/math-emu/sp_rint.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * single precision
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ * Copyright (C) 2017 Imagination Technologies, Ltd.
+ * Author: Aleksandar Markovic <aleksandar.markovic@imgtec.com>
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_rint(union ieee754sp x)
+{
+ union ieee754sp ret;
+ u32 residue;
+ int sticky;
+ int round;
+ int odd;
+
+ COMPXDP; /* <-- DP needed for 64-bit mantissa tmp */
+
+ ieee754_clearcx();
+
+ EXPLODEXSP;
+ FLUSHXSP;
+
+ if (xc == IEEE754_CLASS_SNAN)
+ return ieee754sp_nanxcpt(x);
+
+ if ((xc == IEEE754_CLASS_QNAN) ||
+ (xc == IEEE754_CLASS_INF) ||
+ (xc == IEEE754_CLASS_ZERO))
+ return x;
+
+ if (xe >= SP_FBITS)
+ return x;
+
+ if (xe < -1) {
+ residue = xm;
+ round = 0;
+ sticky = residue != 0;
+ xm = 0;
+ } else {
+ residue = xm << (xe + 1);
+ residue <<= 31 - SP_FBITS;
+ round = (residue >> 31) != 0;
+ sticky = (residue << 1) != 0;
+ xm >>= SP_FBITS - xe;
+ }
+
+ odd = (xm & 0x1) != 0x0;
+
+ switch (ieee754_csr.rm) {
+ case FPU_CSR_RN: /* toward nearest */
+ if (round && (sticky || odd))
+ xm++;
+ break;
+ case FPU_CSR_RZ: /* toward zero */
+ break;
+ case FPU_CSR_RU: /* toward +infinity */
+ if ((round || sticky) && !xs)
+ xm++;
+ break;
+ case FPU_CSR_RD: /* toward -infinity */
+ if ((round || sticky) && xs)
+ xm++;
+ break;
+ }
+
+ if (round || sticky)
+ ieee754_setcx(IEEE754_INEXACT);
+
+ ret = ieee754sp_flong(xm);
+ SPSIGN(ret) = xs;
+
+ return ret;
+}
diff --git a/arch/mips/math-emu/sp_simple.c b/arch/mips/math-emu/sp_simple.c
new file mode 100644
index 000000000..b9e91da7d
--- /dev/null
+++ b/arch/mips/math-emu/sp_simple.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * single precision
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_neg(union ieee754sp x)
+{
+ union ieee754sp y;
+
+ if (ieee754_csr.abs2008) {
+ y = x;
+ SPSIGN(y) = !SPSIGN(x);
+ } else {
+ unsigned int oldrm;
+
+ oldrm = ieee754_csr.rm;
+ ieee754_csr.rm = FPU_CSR_RD;
+ y = ieee754sp_sub(ieee754sp_zero(0), x);
+ ieee754_csr.rm = oldrm;
+ }
+ return y;
+}
+
+union ieee754sp ieee754sp_abs(union ieee754sp x)
+{
+ union ieee754sp y;
+
+ if (ieee754_csr.abs2008) {
+ y = x;
+ SPSIGN(y) = 0;
+ } else {
+ unsigned int oldrm;
+
+ oldrm = ieee754_csr.rm;
+ ieee754_csr.rm = FPU_CSR_RD;
+ if (SPSIGN(x))
+ y = ieee754sp_sub(ieee754sp_zero(0), x);
+ else
+ y = ieee754sp_add(ieee754sp_zero(0), x);
+ ieee754_csr.rm = oldrm;
+ }
+ return y;
+}
diff --git a/arch/mips/math-emu/sp_sqrt.c b/arch/mips/math-emu/sp_sqrt.c
new file mode 100644
index 000000000..e9bb60121
--- /dev/null
+++ b/arch/mips/math-emu/sp_sqrt.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * single precision square root
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_sqrt(union ieee754sp x)
+{
+ int ix, s, q, m, t, i;
+ unsigned int r;
+ COMPXSP;
+
+ /* take care of Inf and NaN */
+
+ EXPLODEXSP;
+ ieee754_clearcx();
+ FLUSHXSP;
+
+ /* x == INF or NAN? */
+ switch (xc) {
+ case IEEE754_CLASS_SNAN:
+ return ieee754sp_nanxcpt(x);
+
+ case IEEE754_CLASS_QNAN:
+ /* sqrt(Nan) = Nan */
+ return x;
+
+ case IEEE754_CLASS_ZERO:
+ /* sqrt(0) = 0 */
+ return x;
+
+ case IEEE754_CLASS_INF:
+ if (xs) {
+ /* sqrt(-Inf) = Nan */
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754sp_indef();
+ }
+ /* sqrt(+Inf) = Inf */
+ return x;
+
+ case IEEE754_CLASS_DNORM:
+ case IEEE754_CLASS_NORM:
+ if (xs) {
+ /* sqrt(-x) = Nan */
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754sp_indef();
+ }
+ break;
+ }
+
+ ix = x.bits;
+
+ /* normalize x */
+ m = (ix >> 23);
+ if (m == 0) { /* subnormal x */
+ for (i = 0; (ix & 0x00800000) == 0; i++)
+ ix <<= 1;
+ m -= i - 1;
+ }
+ m -= 127; /* unbias exponent */
+ ix = (ix & 0x007fffff) | 0x00800000;
+ if (m & 1) /* odd m, double x to make it even */
+ ix += ix;
+ m >>= 1; /* m = [m/2] */
+
+ /* generate sqrt(x) bit by bit */
+ ix += ix;
+ s = 0;
+ q = 0; /* q = sqrt(x) */
+ r = 0x01000000; /* r = moving bit from right to left */
+
+ while (r != 0) {
+ t = s + r;
+ if (t <= ix) {
+ s = t + r;
+ ix -= t;
+ q += r;
+ }
+ ix += ix;
+ r >>= 1;
+ }
+
+ if (ix != 0) {
+ ieee754_setcx(IEEE754_INEXACT);
+ switch (ieee754_csr.rm) {
+ case FPU_CSR_RU:
+ q += 2;
+ break;
+ case FPU_CSR_RN:
+ q += (q & 1);
+ break;
+ }
+ }
+ ix = (q >> 1) + 0x3f000000;
+ ix += (m << 23);
+ x.bits = ix;
+ return x;
+}
diff --git a/arch/mips/math-emu/sp_sub.c b/arch/mips/math-emu/sp_sub.c
new file mode 100644
index 000000000..16c8e9ae6
--- /dev/null
+++ b/arch/mips/math-emu/sp_sub.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * single precision
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_sub(union ieee754sp x, union ieee754sp y)
+{
+ int s;
+
+ COMPXSP;
+ COMPYSP;
+
+ EXPLODEXSP;
+ EXPLODEYSP;
+
+ ieee754_clearcx();
+
+ FLUSHXSP;
+ FLUSHYSP;
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ return ieee754sp_nanxcpt(y);
+
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754sp_nanxcpt(x);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return x;
+
+
+ /*
+ * Infinity handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ if (xs != ys)
+ return x;
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754sp_indef();
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ return ieee754sp_inf(ys ^ 1);
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ return x;
+
+ /*
+ * Zero handling
+ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ if (xs != ys)
+ return x;
+ else
+ return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ /* quick fix up */
+ SPSIGN(y) ^= 1;
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+ fallthrough;
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ SPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ SPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ break;
+ }
+ /* flip sign of y and handle as add */
+ ys ^= 1;
+
+ assert(xm & SP_HIDDEN_BIT);
+ assert(ym & SP_HIDDEN_BIT);
+
+
+ /* provide guard,round and stick bit space */
+ xm <<= 3;
+ ym <<= 3;
+
+ if (xe > ye) {
+ /*
+ * have to shift y fraction right to align
+ */
+ s = xe - ye;
+ ym = XSPSRS(ym, s);
+ ye += s;
+ } else if (ye > xe) {
+ /*
+ * have to shift x fraction right to align
+ */
+ s = ye - xe;
+ xm = XSPSRS(xm, s);
+ xe += s;
+ }
+ assert(xe == ye);
+ assert(xe <= SP_EMAX);
+
+ if (xs == ys) {
+ /* generate 28 bit result of adding two 27 bit numbers
+ */
+ xm = xm + ym;
+
+ if (xm >> (SP_FBITS + 1 + 3)) { /* carry out */
+ SPXSRSX1(); /* shift preserving sticky */
+ }
+ } else {
+ if (xm >= ym) {
+ xm = xm - ym;
+ } else {
+ xm = ym - xm;
+ xs = ys;
+ }
+ if (xm == 0) {
+ if (ieee754_csr.rm == FPU_CSR_RD)
+ return ieee754sp_zero(1); /* round negative inf. => sign = -1 */
+ else
+ return ieee754sp_zero(0); /* other round modes => sign = 1 */
+ }
+ /* normalize to rounding precision
+ */
+ while ((xm >> (SP_FBITS + 3)) == 0) {
+ xm <<= 1;
+ xe--;
+ }
+ }
+
+ return ieee754sp_format(xs, xe, xm);
+}
diff --git a/arch/mips/math-emu/sp_tint.c b/arch/mips/math-emu/sp_tint.c
new file mode 100644
index 000000000..f7a5cf5e1
--- /dev/null
+++ b/arch/mips/math-emu/sp_tint.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * single precision
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754sp.h"
+
+int ieee754sp_tint(union ieee754sp x)
+{
+ u32 residue;
+ int round;
+ int sticky;
+ int odd;
+
+ COMPXSP;
+
+ ieee754_clearcx();
+
+ EXPLODEXSP;
+ FLUSHXSP;
+
+ switch (xc) {
+ case IEEE754_CLASS_SNAN:
+ case IEEE754_CLASS_QNAN:
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754si_indef();
+
+ case IEEE754_CLASS_INF:
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754si_overflow(xs);
+
+ case IEEE754_CLASS_ZERO:
+ return 0;
+
+ case IEEE754_CLASS_DNORM:
+ case IEEE754_CLASS_NORM:
+ break;
+ }
+ if (xe >= 31) {
+ /* look for valid corner case */
+ if (xe == 31 && xs && xm == SP_HIDDEN_BIT)
+ return -0x80000000;
+ /* Set invalid. We will only use overflow for floating
+ point overflow */
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754si_overflow(xs);
+ }
+ /* oh gawd */
+ if (xe > SP_FBITS) {
+ xm <<= xe - SP_FBITS;
+ } else {
+ if (xe < -1) {
+ residue = xm;
+ round = 0;
+ sticky = residue != 0;
+ xm = 0;
+ } else {
+ /* Shifting a u32 32 times does not work,
+ * so we do it in two steps. Be aware that xe
+ * may be -1 */
+ residue = xm << (xe + 1);
+ residue <<= 31 - SP_FBITS;
+ round = (residue >> 31) != 0;
+ sticky = (residue << 1) != 0;
+ xm >>= SP_FBITS - xe;
+ }
+ odd = (xm & 0x1) != 0x0;
+ switch (ieee754_csr.rm) {
+ case FPU_CSR_RN:
+ if (round && (sticky || odd))
+ xm++;
+ break;
+ case FPU_CSR_RZ:
+ break;
+ case FPU_CSR_RU: /* toward +Infinity */
+ if ((round || sticky) && !xs)
+ xm++;
+ break;
+ case FPU_CSR_RD: /* toward -Infinity */
+ if ((round || sticky) && xs)
+ xm++;
+ break;
+ }
+ if ((xm >> 31) != 0) {
+ /* This can happen after rounding */
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754si_overflow(xs);
+ }
+ if (round || sticky)
+ ieee754_setcx(IEEE754_INEXACT);
+ }
+ if (xs)
+ return -xm;
+ else
+ return xm;
+}
diff --git a/arch/mips/math-emu/sp_tlong.c b/arch/mips/math-emu/sp_tlong.c
new file mode 100644
index 000000000..adc191304
--- /dev/null
+++ b/arch/mips/math-emu/sp_tlong.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* IEEE754 floating point arithmetic
+ * single precision
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 1994-2000 Algorithmics Ltd.
+ */
+
+#include "ieee754sp.h"
+
+s64 ieee754sp_tlong(union ieee754sp x)
+{
+ u32 residue;
+ int round;
+ int sticky;
+ int odd;
+
+ COMPXDP; /* <-- need 64-bit mantissa tmp */
+
+ ieee754_clearcx();
+
+ EXPLODEXSP;
+ FLUSHXSP;
+
+ switch (xc) {
+ case IEEE754_CLASS_SNAN:
+ case IEEE754_CLASS_QNAN:
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754di_indef();
+
+ case IEEE754_CLASS_INF:
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754di_overflow(xs);
+
+ case IEEE754_CLASS_ZERO:
+ return 0;
+
+ case IEEE754_CLASS_DNORM:
+ case IEEE754_CLASS_NORM:
+ break;
+ }
+ if (xe >= 63) {
+ /* look for valid corner case */
+ if (xe == 63 && xs && xm == SP_HIDDEN_BIT)
+ return -0x8000000000000000LL;
+ /* Set invalid. We will only use overflow for floating
+ point overflow */
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754di_overflow(xs);
+ }
+ /* oh gawd */
+ if (xe > SP_FBITS) {
+ xm <<= xe - SP_FBITS;
+ } else if (xe < SP_FBITS) {
+ if (xe < -1) {
+ residue = xm;
+ round = 0;
+ sticky = residue != 0;
+ xm = 0;
+ } else {
+ residue = xm << (32 - SP_FBITS + xe);
+ round = (residue >> 31) != 0;
+ sticky = (residue << 1) != 0;
+ xm >>= SP_FBITS - xe;
+ }
+ odd = (xm & 0x1) != 0x0;
+ switch (ieee754_csr.rm) {
+ case FPU_CSR_RN:
+ if (round && (sticky || odd))
+ xm++;
+ break;
+ case FPU_CSR_RZ:
+ break;
+ case FPU_CSR_RU: /* toward +Infinity */
+ if ((round || sticky) && !xs)
+ xm++;
+ break;
+ case FPU_CSR_RD: /* toward -Infinity */
+ if ((round || sticky) && xs)
+ xm++;
+ break;
+ }
+ if ((xm >> 63) != 0) {
+ /* This can happen after rounding */
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754di_overflow(xs);
+ }
+ if (round || sticky)
+ ieee754_setcx(IEEE754_INEXACT);
+ }
+ if (xs)
+ return -xm;
+ else
+ return xm;
+}
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
new file mode 100644
index 000000000..865926a37
--- /dev/null
+++ b/arch/mips/mm/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux/MIPS-specific parts of the memory manager.
+#
+
+obj-y += cache.o
+obj-y += context.o
+obj-y += extable.o
+obj-y += fault.o
+obj-y += init.o
+obj-y += mmap.o
+obj-y += page.o
+obj-y += page-funcs.o
+obj-y += pgtable.o
+obj-y += tlbex.o
+obj-y += tlbex-fault.o
+obj-y += tlb-funcs.o
+
+ifdef CONFIG_CPU_MICROMIPS
+obj-y += uasm-micromips.o
+else
+obj-y += uasm-mips.o
+endif
+
+obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
+obj-$(CONFIG_64BIT) += ioremap64.o pgtable-64.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_DMA_NONCOHERENT) += dma-noncoherent.o
+
+obj-$(CONFIG_CPU_R3K_TLB) += tlb-r3k.o
+obj-$(CONFIG_CPU_R4K_CACHE_TLB) += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_R3000) += c-r3k.o
+obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o
+obj-$(CONFIG_CPU_TX39XX) += c-tx39.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o
+
+obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
+obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
+obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
+obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
+obj-$(CONFIG_SCACHE_DEBUGFS) += sc-debugfs.o
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
new file mode 100644
index 000000000..8ae181e08
--- /dev/null
+++ b/arch/mips/mm/c-octeon.c
@@ -0,0 +1,352 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005-2007 Cavium Networks
+ */
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/io.h>
+
+#include <asm/bcache.h>
+#include <asm/bootinfo.h>
+#include <asm/cacheops.h>
+#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
+#include <asm/page.h>
+#include <asm/r4kcache.h>
+#include <asm/traps.h>
+#include <asm/mmu_context.h>
+#include <asm/war.h>
+
+#include <asm/octeon/octeon.h>
+
+unsigned long long cache_err_dcache[NR_CPUS];
+EXPORT_SYMBOL_GPL(cache_err_dcache);
+
+/**
+ * Octeon automatically flushes the dcache on tlb changes, so
+ * from Linux's viewpoint it acts much like a physically
+ * tagged cache. No flushing is needed
+ *
+ */
+static void octeon_flush_data_cache_page(unsigned long addr)
+{
+ /* Nothing to do */
+}
+
+static inline void octeon_local_flush_icache(void)
+{
+ asm volatile ("synci 0($0)");
+}
+
+/*
+ * Flush local I-cache for the specified range.
+ */
+static void local_octeon_flush_icache_range(unsigned long start,
+ unsigned long end)
+{
+ octeon_local_flush_icache();
+}
+
+/**
+ * Flush caches as necessary for all cores affected by a
+ * vma. If no vma is supplied, all cores are flushed.
+ *
+ * @vma: VMA to flush or NULL to flush all icaches.
+ */
+static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
+{
+ extern void octeon_send_ipi_single(int cpu, unsigned int action);
+#ifdef CONFIG_SMP
+ int cpu;
+ cpumask_t mask;
+#endif
+
+ mb();
+ octeon_local_flush_icache();
+#ifdef CONFIG_SMP
+ preempt_disable();
+ cpu = smp_processor_id();
+
+ /*
+ * If we have a vma structure, we only need to worry about
+ * cores it has been used on
+ */
+ if (vma)
+ mask = *mm_cpumask(vma->vm_mm);
+ else
+ mask = *cpu_online_mask;
+ cpumask_clear_cpu(cpu, &mask);
+ for_each_cpu(cpu, &mask)
+ octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
+
+ preempt_enable();
+#endif
+}
+
+
+/**
+ * Called to flush the icache on all cores
+ */
+static void octeon_flush_icache_all(void)
+{
+ octeon_flush_icache_all_cores(NULL);
+}
+
+
+/**
+ * Called to flush all memory associated with a memory
+ * context.
+ *
+ * @mm: Memory context to flush
+ */
+static void octeon_flush_cache_mm(struct mm_struct *mm)
+{
+ /*
+ * According to the R4K version of this file, CPUs without
+ * dcache aliases don't need to do anything here
+ */
+}
+
+
+/**
+ * Flush a range of kernel addresses out of the icache
+ *
+ */
+static void octeon_flush_icache_range(unsigned long start, unsigned long end)
+{
+ octeon_flush_icache_all_cores(NULL);
+}
+
+
+/**
+ * Flush a range out of a vma
+ *
+ * @vma: VMA to flush
+ * @start:
+ * @end:
+ */
+static void octeon_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (vma->vm_flags & VM_EXEC)
+ octeon_flush_icache_all_cores(vma);
+}
+
+
+/**
+ * Flush a specific page of a vma
+ *
+ * @vma: VMA to flush page for
+ * @page: Page to flush
+ * @pfn:
+ */
+static void octeon_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long page, unsigned long pfn)
+{
+ if (vma->vm_flags & VM_EXEC)
+ octeon_flush_icache_all_cores(vma);
+}
+
+static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
+{
+ BUG();
+}
+
+/**
+ * Probe Octeon's caches
+ *
+ */
+static void probe_octeon(void)
+{
+ unsigned long icache_size;
+ unsigned long dcache_size;
+ unsigned int config1;
+ struct cpuinfo_mips *c = &current_cpu_data;
+ int cputype = current_cpu_type();
+
+ config1 = read_c0_config1();
+ switch (cputype) {
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ c->icache.linesz = 2 << ((config1 >> 19) & 7);
+ c->icache.sets = 64 << ((config1 >> 22) & 7);
+ c->icache.ways = 1 + ((config1 >> 16) & 7);
+ c->icache.flags |= MIPS_CACHE_VTAG;
+ icache_size =
+ c->icache.sets * c->icache.ways * c->icache.linesz;
+ c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
+ c->dcache.linesz = 128;
+ if (cputype == CPU_CAVIUM_OCTEON_PLUS)
+ c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
+ else
+ c->dcache.sets = 1; /* CN3XXX has one Dcache set */
+ c->dcache.ways = 64;
+ dcache_size =
+ c->dcache.sets * c->dcache.ways * c->dcache.linesz;
+ c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
+ c->options |= MIPS_CPU_PREFETCH;
+ break;
+
+ case CPU_CAVIUM_OCTEON2:
+ c->icache.linesz = 2 << ((config1 >> 19) & 7);
+ c->icache.sets = 8;
+ c->icache.ways = 37;
+ c->icache.flags |= MIPS_CACHE_VTAG;
+ icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
+
+ c->dcache.linesz = 128;
+ c->dcache.ways = 32;
+ c->dcache.sets = 8;
+ dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
+ c->options |= MIPS_CPU_PREFETCH;
+ break;
+
+ case CPU_CAVIUM_OCTEON3:
+ c->icache.linesz = 128;
+ c->icache.sets = 16;
+ c->icache.ways = 39;
+ c->icache.flags |= MIPS_CACHE_VTAG;
+ icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
+
+ c->dcache.linesz = 128;
+ c->dcache.ways = 32;
+ c->dcache.sets = 8;
+ dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
+ c->options |= MIPS_CPU_PREFETCH;
+ break;
+
+ default:
+ panic("Unsupported Cavium Networks CPU type");
+ break;
+ }
+
+ /* compute a couple of other cache variables */
+ c->icache.waysize = icache_size / c->icache.ways;
+ c->dcache.waysize = dcache_size / c->dcache.ways;
+
+ c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
+ c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
+
+ if (smp_processor_id() == 0) {
+ pr_info("Primary instruction cache %ldkB, %s, %d way, "
+ "%d sets, linesize %d bytes.\n",
+ icache_size >> 10,
+ cpu_has_vtag_icache ?
+ "virtually tagged" : "physically tagged",
+ c->icache.ways, c->icache.sets, c->icache.linesz);
+
+ pr_info("Primary data cache %ldkB, %d-way, %d sets, "
+ "linesize %d bytes.\n",
+ dcache_size >> 10, c->dcache.ways,
+ c->dcache.sets, c->dcache.linesz);
+ }
+}
+
+static void octeon_cache_error_setup(void)
+{
+ extern char except_vec2_octeon;
+ set_handler(0x100, &except_vec2_octeon, 0x80);
+}
+
+/**
+ * Setup the Octeon cache flush routines
+ *
+ */
+void octeon_cache_init(void)
+{
+ probe_octeon();
+
+ shm_align_mask = PAGE_SIZE - 1;
+
+ flush_cache_all = octeon_flush_icache_all;
+ __flush_cache_all = octeon_flush_icache_all;
+ flush_cache_mm = octeon_flush_cache_mm;
+ flush_cache_page = octeon_flush_cache_page;
+ flush_cache_range = octeon_flush_cache_range;
+ flush_icache_all = octeon_flush_icache_all;
+ flush_data_cache_page = octeon_flush_data_cache_page;
+ flush_icache_range = octeon_flush_icache_range;
+ local_flush_icache_range = local_octeon_flush_icache_range;
+ __flush_icache_user_range = octeon_flush_icache_range;
+ __local_flush_icache_user_range = local_octeon_flush_icache_range;
+
+ __flush_kernel_vmap_range = octeon_flush_kernel_vmap_range;
+
+ build_clear_page();
+ build_copy_page();
+
+ board_cache_error_setup = octeon_cache_error_setup;
+}
+
+/*
+ * Handle a cache error exception
+ */
+static RAW_NOTIFIER_HEAD(co_cache_error_chain);
+
+int register_co_cache_error_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&co_cache_error_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);
+
+int unregister_co_cache_error_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_unregister(&co_cache_error_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);
+
+static void co_cache_error_call_notifiers(unsigned long val)
+{
+ int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL);
+ if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
+ u64 dcache_err;
+ unsigned long coreid = cvmx_get_core_num();
+ u64 icache_err = read_octeon_c0_icacheerr();
+
+ if (val) {
+ dcache_err = cache_err_dcache[coreid];
+ cache_err_dcache[coreid] = 0;
+ } else {
+ dcache_err = read_octeon_c0_dcacheerr();
+ }
+
+ pr_err("Core%lu: Cache error exception:\n", coreid);
+ pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
+ if (icache_err & 1) {
+ pr_err("CacheErr (Icache) == %llx\n",
+ (unsigned long long)icache_err);
+ write_octeon_c0_icacheerr(0);
+ }
+ if (dcache_err & 1) {
+ pr_err("CacheErr (Dcache) == %llx\n",
+ (unsigned long long)dcache_err);
+ }
+ }
+}
+
+/*
+ * Called when the the exception is recoverable
+ */
+
+asmlinkage void cache_parity_error_octeon_recoverable(void)
+{
+ co_cache_error_call_notifiers(0);
+}
+
+/**
+ * Called when the the exception is not recoverable
+ */
+
+asmlinkage void cache_parity_error_octeon_non_recoverable(void)
+{
+ co_cache_error_call_notifiers(1);
+ panic("Can't handle cache error: nested exception");
+}
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
new file mode 100644
index 000000000..df6755ca1
--- /dev/null
+++ b/arch/mips/mm/c-r3k.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r2300.c: R2000 and R3000 specific mmu/cache code.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ *
+ * with a lot of changes to make this thing work for R3000s
+ * Tx39XX R4k style caches added. HK
+ * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
+ * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
+ * Copyright (C) 2001, 2004, 2007 Maciej W. Rozycki
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+
+#include <asm/page.h>
+#include <asm/mmu_context.h>
+#include <asm/isadep.h>
+#include <asm/io.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+
+static unsigned long icache_size, dcache_size; /* Size in bytes */
+static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
+
+unsigned long r3k_cache_size(unsigned long ca_flags)
+{
+ unsigned long flags, status, dummy, size;
+ volatile unsigned long *p;
+
+ p = (volatile unsigned long *) KSEG0;
+
+ flags = read_c0_status();
+
+ /* isolate cache space */
+ write_c0_status((ca_flags|flags)&~ST0_IEC);
+
+ *p = 0xa5a55a5a;
+ dummy = *p;
+ status = read_c0_status();
+
+ if (dummy != 0xa5a55a5a || (status & ST0_CM)) {
+ size = 0;
+ } else {
+ for (size = 128; size <= 0x40000; size <<= 1)
+ *(p + size) = 0;
+ *p = -1;
+ for (size = 128;
+ (size <= 0x40000) && (*(p + size) == 0);
+ size <<= 1)
+ ;
+ if (size > 0x40000)
+ size = 0;
+ }
+
+ write_c0_status(flags);
+
+ return size * sizeof(*p);
+}
+
+unsigned long r3k_cache_lsize(unsigned long ca_flags)
+{
+ unsigned long flags, status, lsize, i;
+ volatile unsigned long *p;
+
+ p = (volatile unsigned long *) KSEG0;
+
+ flags = read_c0_status();
+
+ /* isolate cache space */
+ write_c0_status((ca_flags|flags)&~ST0_IEC);
+
+ for (i = 0; i < 128; i++)
+ *(p + i) = 0;
+ *(volatile unsigned char *)p = 0;
+ for (lsize = 1; lsize < 128; lsize <<= 1) {
+ *(p + lsize);
+ status = read_c0_status();
+ if (!(status & ST0_CM))
+ break;
+ }
+ for (i = 0; i < 128; i += lsize)
+ *(volatile unsigned char *)(p + i) = 0;
+
+ write_c0_status(flags);
+
+ return lsize * sizeof(*p);
+}
+
+static void r3k_probe_cache(void)
+{
+ dcache_size = r3k_cache_size(ST0_ISC);
+ if (dcache_size)
+ dcache_lsize = r3k_cache_lsize(ST0_ISC);
+
+ icache_size = r3k_cache_size(ST0_ISC|ST0_SWC);
+ if (icache_size)
+ icache_lsize = r3k_cache_lsize(ST0_ISC|ST0_SWC);
+}
+
+static void r3k_flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long size, i, flags;
+ volatile unsigned char *p;
+
+ size = end - start;
+ if (size > icache_size || KSEGX(start) != KSEG0) {
+ start = KSEG0;
+ size = icache_size;
+ }
+ p = (char *)start;
+
+ flags = read_c0_status();
+
+ /* isolate cache space */
+ write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
+
+ for (i = 0; i < size; i += 0x080) {
+ asm( "sb\t$0, 0x000(%0)\n\t"
+ "sb\t$0, 0x004(%0)\n\t"
+ "sb\t$0, 0x008(%0)\n\t"
+ "sb\t$0, 0x00c(%0)\n\t"
+ "sb\t$0, 0x010(%0)\n\t"
+ "sb\t$0, 0x014(%0)\n\t"
+ "sb\t$0, 0x018(%0)\n\t"
+ "sb\t$0, 0x01c(%0)\n\t"
+ "sb\t$0, 0x020(%0)\n\t"
+ "sb\t$0, 0x024(%0)\n\t"
+ "sb\t$0, 0x028(%0)\n\t"
+ "sb\t$0, 0x02c(%0)\n\t"
+ "sb\t$0, 0x030(%0)\n\t"
+ "sb\t$0, 0x034(%0)\n\t"
+ "sb\t$0, 0x038(%0)\n\t"
+ "sb\t$0, 0x03c(%0)\n\t"
+ "sb\t$0, 0x040(%0)\n\t"
+ "sb\t$0, 0x044(%0)\n\t"
+ "sb\t$0, 0x048(%0)\n\t"
+ "sb\t$0, 0x04c(%0)\n\t"
+ "sb\t$0, 0x050(%0)\n\t"
+ "sb\t$0, 0x054(%0)\n\t"
+ "sb\t$0, 0x058(%0)\n\t"
+ "sb\t$0, 0x05c(%0)\n\t"
+ "sb\t$0, 0x060(%0)\n\t"
+ "sb\t$0, 0x064(%0)\n\t"
+ "sb\t$0, 0x068(%0)\n\t"
+ "sb\t$0, 0x06c(%0)\n\t"
+ "sb\t$0, 0x070(%0)\n\t"
+ "sb\t$0, 0x074(%0)\n\t"
+ "sb\t$0, 0x078(%0)\n\t"
+ "sb\t$0, 0x07c(%0)\n\t"
+ : : "r" (p) );
+ p += 0x080;
+ }
+
+ write_c0_status(flags);
+}
+
+static void r3k_flush_dcache_range(unsigned long start, unsigned long end)
+{
+ unsigned long size, i, flags;
+ volatile unsigned char *p;
+
+ size = end - start;
+ if (size > dcache_size || KSEGX(start) != KSEG0) {
+ start = KSEG0;
+ size = dcache_size;
+ }
+ p = (char *)start;
+
+ flags = read_c0_status();
+
+ /* isolate cache space */
+ write_c0_status((ST0_ISC|flags)&~ST0_IEC);
+
+ for (i = 0; i < size; i += 0x080) {
+ asm( "sb\t$0, 0x000(%0)\n\t"
+ "sb\t$0, 0x004(%0)\n\t"
+ "sb\t$0, 0x008(%0)\n\t"
+ "sb\t$0, 0x00c(%0)\n\t"
+ "sb\t$0, 0x010(%0)\n\t"
+ "sb\t$0, 0x014(%0)\n\t"
+ "sb\t$0, 0x018(%0)\n\t"
+ "sb\t$0, 0x01c(%0)\n\t"
+ "sb\t$0, 0x020(%0)\n\t"
+ "sb\t$0, 0x024(%0)\n\t"
+ "sb\t$0, 0x028(%0)\n\t"
+ "sb\t$0, 0x02c(%0)\n\t"
+ "sb\t$0, 0x030(%0)\n\t"
+ "sb\t$0, 0x034(%0)\n\t"
+ "sb\t$0, 0x038(%0)\n\t"
+ "sb\t$0, 0x03c(%0)\n\t"
+ "sb\t$0, 0x040(%0)\n\t"
+ "sb\t$0, 0x044(%0)\n\t"
+ "sb\t$0, 0x048(%0)\n\t"
+ "sb\t$0, 0x04c(%0)\n\t"
+ "sb\t$0, 0x050(%0)\n\t"
+ "sb\t$0, 0x054(%0)\n\t"
+ "sb\t$0, 0x058(%0)\n\t"
+ "sb\t$0, 0x05c(%0)\n\t"
+ "sb\t$0, 0x060(%0)\n\t"
+ "sb\t$0, 0x064(%0)\n\t"
+ "sb\t$0, 0x068(%0)\n\t"
+ "sb\t$0, 0x06c(%0)\n\t"
+ "sb\t$0, 0x070(%0)\n\t"
+ "sb\t$0, 0x074(%0)\n\t"
+ "sb\t$0, 0x078(%0)\n\t"
+ "sb\t$0, 0x07c(%0)\n\t"
+ : : "r" (p) );
+ p += 0x080;
+ }
+
+ write_c0_status(flags);
+}
+
+static inline void r3k_flush_cache_all(void)
+{
+}
+
+static inline void r3k___flush_cache_all(void)
+{
+ r3k_flush_dcache_range(KSEG0, KSEG0 + dcache_size);
+ r3k_flush_icache_range(KSEG0, KSEG0 + icache_size);
+}
+
+static void r3k_flush_cache_mm(struct mm_struct *mm)
+{
+}
+
+static void r3k_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+}
+
+static void r3k_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
+{
+ unsigned long kaddr = KSEG0ADDR(pfn << PAGE_SHIFT);
+ int exec = vma->vm_flags & VM_EXEC;
+ struct mm_struct *mm = vma->vm_mm;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ pr_debug("cpage[%08llx,%08lx]\n",
+ cpu_context(smp_processor_id(), mm), addr);
+
+ /* No ASID => no such page in the cache. */
+ if (cpu_context(smp_processor_id(), mm) == 0)
+ return;
+
+ pmdp = pmd_off(mm, addr);
+ ptep = pte_offset_kernel(pmdp, addr);
+
+ /* Invalid => no such page in the cache. */
+ if (!(pte_val(*ptep) & _PAGE_PRESENT))
+ return;
+
+ r3k_flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
+ if (exec)
+ r3k_flush_icache_range(kaddr, kaddr + PAGE_SIZE);
+}
+
+static void local_r3k_flush_data_cache_page(void *addr)
+{
+}
+
+static void r3k_flush_data_cache_page(unsigned long addr)
+{
+}
+
+static void r3k_flush_kernel_vmap_range(unsigned long vaddr, int size)
+{
+ BUG();
+}
+
+static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
+{
+ /* Catch bad driver code */
+ BUG_ON(size == 0);
+
+ iob();
+ r3k_flush_dcache_range(start, start + size);
+}
+
+void r3k_cache_init(void)
+{
+ extern void build_clear_page(void);
+ extern void build_copy_page(void);
+
+ r3k_probe_cache();
+
+ flush_cache_all = r3k_flush_cache_all;
+ __flush_cache_all = r3k___flush_cache_all;
+ flush_cache_mm = r3k_flush_cache_mm;
+ flush_cache_range = r3k_flush_cache_range;
+ flush_cache_page = r3k_flush_cache_page;
+ flush_icache_range = r3k_flush_icache_range;
+ local_flush_icache_range = r3k_flush_icache_range;
+ __flush_icache_user_range = r3k_flush_icache_range;
+ __local_flush_icache_user_range = r3k_flush_icache_range;
+
+ __flush_kernel_vmap_range = r3k_flush_kernel_vmap_range;
+
+ local_flush_data_cache_page = local_r3k_flush_data_cache_page;
+ flush_data_cache_page = r3k_flush_data_cache_page;
+
+ _dma_cache_wback_inv = r3k_dma_cache_wback_inv;
+ _dma_cache_wback = r3k_dma_cache_wback_inv;
+ _dma_cache_inv = r3k_dma_cache_wback_inv;
+
+ pr_info("Primary instruction cache %ldkB, linesize %ld bytes.\n",
+ icache_size >> 10, icache_lsize);
+ pr_info("Primary data cache %ldkB, linesize %ld bytes.\n",
+ dcache_size >> 10, dcache_lsize);
+
+ build_clear_page();
+ build_copy_page();
+}
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
new file mode 100644
index 000000000..96adc3d23
--- /dev/null
+++ b/arch/mips/mm/c-r4k.c
@@ -0,0 +1,2009 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/cpu_pm.h>
+#include <linux/hardirq.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/preempt.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/export.h>
+#include <linux/bitops.h>
+
+#include <asm/bcache.h>
+#include <asm/bootinfo.h>
+#include <asm/cache.h>
+#include <asm/cacheops.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/r4kcache.h>
+#include <asm/sections.h>
+#include <asm/mmu_context.h>
+#include <asm/war.h>
+#include <asm/cacheflush.h> /* for run_uncached() */
+#include <asm/traps.h>
+#include <asm/dma-coherence.h>
+#include <asm/mips-cps.h>
+
+/*
+ * Bits describing what cache ops an SMP callback function may perform.
+ *
+ * R4K_HIT - Virtual user or kernel address based cache operations. The
+ * active_mm must be checked before using user addresses, falling
+ * back to kmap.
+ * R4K_INDEX - Index based cache operations.
+ */
+
+#define R4K_HIT BIT(0)
+#define R4K_INDEX BIT(1)
+
+/**
+ * r4k_op_needs_ipi() - Decide if a cache op needs to be done on every core.
+ * @type: Type of cache operations (R4K_HIT or R4K_INDEX).
+ *
+ * Decides whether a cache op needs to be performed on every core in the system.
+ * This may change depending on the @type of cache operation, as well as the set
+ * of online CPUs, so preemption should be disabled by the caller to prevent CPU
+ * hotplug from changing the result.
+ *
+ * Returns: 1 if the cache operation @type should be done on every core in
+ * the system.
+ * 0 if the cache operation @type is globalized and only needs to
+ * be performed on a simple CPU.
+ */
+static inline bool r4k_op_needs_ipi(unsigned int type)
+{
+ /* The MIPS Coherence Manager (CM) globalizes address-based cache ops */
+ if (type == R4K_HIT && mips_cm_present())
+ return false;
+
+ /*
+ * Hardware doesn't globalize the required cache ops, so SMP calls may
+ * be needed, but only if there are foreign CPUs (non-siblings with
+ * separate caches).
+ */
+ /* cpu_foreign_map[] undeclared when !CONFIG_SMP */
+#ifdef CONFIG_SMP
+ return !cpumask_empty(&cpu_foreign_map[0]);
+#else
+ return false;
+#endif
+}
+
+/*
+ * Special Variant of smp_call_function for use by cache functions:
+ *
+ * o No return value
+ * o collapses to normal function call on UP kernels
+ * o collapses to normal function call on systems with a single shared
+ * primary cache.
+ * o doesn't disable interrupts on the local CPU
+ */
+static inline void r4k_on_each_cpu(unsigned int type,
+ void (*func)(void *info), void *info)
+{
+ preempt_disable();
+ if (r4k_op_needs_ipi(type))
+ smp_call_function_many(&cpu_foreign_map[smp_processor_id()],
+ func, info, 1);
+ func(info);
+ preempt_enable();
+}
+
+/*
+ * Must die.
+ */
+static unsigned long icache_size __read_mostly;
+static unsigned long dcache_size __read_mostly;
+static unsigned long vcache_size __read_mostly;
+static unsigned long scache_size __read_mostly;
+
+/*
+ * Dummy cache handling routines for machines without boardcaches
+ */
+static void cache_noop(void) {}
+
+static struct bcache_ops no_sc_ops = {
+ .bc_enable = (void *)cache_noop,
+ .bc_disable = (void *)cache_noop,
+ .bc_wback_inv = (void *)cache_noop,
+ .bc_inv = (void *)cache_noop
+};
+
+struct bcache_ops *bcops = &no_sc_ops;
+
+#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
+#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
+
+#define R4600_HIT_CACHEOP_WAR_IMPL \
+do { \
+ if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && \
+ cpu_is_r4600_v2_x()) \
+ *(volatile unsigned long *)CKSEG1; \
+ if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP)) \
+ __asm__ __volatile__("nop;nop;nop;nop"); \
+} while (0)
+
+static void (*r4k_blast_dcache_page)(unsigned long addr);
+
+static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
+{
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ blast_dcache32_page(addr);
+}
+
+static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
+{
+ blast_dcache64_page(addr);
+}
+
+static inline void r4k_blast_dcache_page_dc128(unsigned long addr)
+{
+ blast_dcache128_page(addr);
+}
+
+static void r4k_blast_dcache_page_setup(void)
+{
+ unsigned long dc_lsize = cpu_dcache_line_size();
+
+ switch (dc_lsize) {
+ case 0:
+ r4k_blast_dcache_page = (void *)cache_noop;
+ break;
+ case 16:
+ r4k_blast_dcache_page = blast_dcache16_page;
+ break;
+ case 32:
+ r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
+ break;
+ case 64:
+ r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
+ break;
+ case 128:
+ r4k_blast_dcache_page = r4k_blast_dcache_page_dc128;
+ break;
+ default:
+ break;
+ }
+}
+
+#ifndef CONFIG_EVA
+#define r4k_blast_dcache_user_page r4k_blast_dcache_page
+#else
+
+static void (*r4k_blast_dcache_user_page)(unsigned long addr);
+
+static void r4k_blast_dcache_user_page_setup(void)
+{
+ unsigned long dc_lsize = cpu_dcache_line_size();
+
+ if (dc_lsize == 0)
+ r4k_blast_dcache_user_page = (void *)cache_noop;
+ else if (dc_lsize == 16)
+ r4k_blast_dcache_user_page = blast_dcache16_user_page;
+ else if (dc_lsize == 32)
+ r4k_blast_dcache_user_page = blast_dcache32_user_page;
+ else if (dc_lsize == 64)
+ r4k_blast_dcache_user_page = blast_dcache64_user_page;
+}
+
+#endif
+
+static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
+
+static void r4k_blast_dcache_page_indexed_setup(void)
+{
+ unsigned long dc_lsize = cpu_dcache_line_size();
+
+ if (dc_lsize == 0)
+ r4k_blast_dcache_page_indexed = (void *)cache_noop;
+ else if (dc_lsize == 16)
+ r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
+ else if (dc_lsize == 32)
+ r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
+ else if (dc_lsize == 64)
+ r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
+ else if (dc_lsize == 128)
+ r4k_blast_dcache_page_indexed = blast_dcache128_page_indexed;
+}
+
+void (* r4k_blast_dcache)(void);
+EXPORT_SYMBOL(r4k_blast_dcache);
+
+static void r4k_blast_dcache_setup(void)
+{
+ unsigned long dc_lsize = cpu_dcache_line_size();
+
+ if (dc_lsize == 0)
+ r4k_blast_dcache = (void *)cache_noop;
+ else if (dc_lsize == 16)
+ r4k_blast_dcache = blast_dcache16;
+ else if (dc_lsize == 32)
+ r4k_blast_dcache = blast_dcache32;
+ else if (dc_lsize == 64)
+ r4k_blast_dcache = blast_dcache64;
+ else if (dc_lsize == 128)
+ r4k_blast_dcache = blast_dcache128;
+}
+
+/* force code alignment (used for CONFIG_WAR_TX49XX_ICACHE_INDEX_INV) */
+#define JUMP_TO_ALIGN(order) \
+ __asm__ __volatile__( \
+ "b\t1f\n\t" \
+ ".align\t" #order "\n\t" \
+ "1:\n\t" \
+ )
+#define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
+#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
+
+static inline void blast_r4600_v1_icache32(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ blast_icache32();
+ local_irq_restore(flags);
+}
+
+static inline void tx49_blast_icache32(void)
+{
+ unsigned long start = INDEX_BASE;
+ unsigned long end = start + current_cpu_data.icache.waysize;
+ unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
+ unsigned long ws_end = current_cpu_data.icache.ways <<
+ current_cpu_data.icache.waybit;
+ unsigned long ws, addr;
+
+ CACHE32_UNROLL32_ALIGN2;
+ /* I'm in even chunk. blast odd chunks */
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+ for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
+ CACHE32_UNROLL32_ALIGN;
+ /* I'm in odd chunk. blast even chunks */
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+ for (addr = start; addr < end; addr += 0x400 * 2)
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
+}
+
+static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ blast_icache32_page_indexed(page);
+ local_irq_restore(flags);
+}
+
+static inline void tx49_blast_icache32_page_indexed(unsigned long page)
+{
+ unsigned long indexmask = current_cpu_data.icache.waysize - 1;
+ unsigned long start = INDEX_BASE + (page & indexmask);
+ unsigned long end = start + PAGE_SIZE;
+ unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
+ unsigned long ws_end = current_cpu_data.icache.ways <<
+ current_cpu_data.icache.waybit;
+ unsigned long ws, addr;
+
+ CACHE32_UNROLL32_ALIGN2;
+ /* I'm in even chunk. blast odd chunks */
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+ for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
+ CACHE32_UNROLL32_ALIGN;
+ /* I'm in odd chunk. blast even chunks */
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+ for (addr = start; addr < end; addr += 0x400 * 2)
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
+}
+
+static void (* r4k_blast_icache_page)(unsigned long addr);
+
+static void r4k_blast_icache_page_setup(void)
+{
+ unsigned long ic_lsize = cpu_icache_line_size();
+
+ if (ic_lsize == 0)
+ r4k_blast_icache_page = (void *)cache_noop;
+ else if (ic_lsize == 16)
+ r4k_blast_icache_page = blast_icache16_page;
+ else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2EF)
+ r4k_blast_icache_page = loongson2_blast_icache32_page;
+ else if (ic_lsize == 32)
+ r4k_blast_icache_page = blast_icache32_page;
+ else if (ic_lsize == 64)
+ r4k_blast_icache_page = blast_icache64_page;
+ else if (ic_lsize == 128)
+ r4k_blast_icache_page = blast_icache128_page;
+}
+
+#ifndef CONFIG_EVA
+#define r4k_blast_icache_user_page r4k_blast_icache_page
+#else
+
+static void (*r4k_blast_icache_user_page)(unsigned long addr);
+
+static void r4k_blast_icache_user_page_setup(void)
+{
+ unsigned long ic_lsize = cpu_icache_line_size();
+
+ if (ic_lsize == 0)
+ r4k_blast_icache_user_page = (void *)cache_noop;
+ else if (ic_lsize == 16)
+ r4k_blast_icache_user_page = blast_icache16_user_page;
+ else if (ic_lsize == 32)
+ r4k_blast_icache_user_page = blast_icache32_user_page;
+ else if (ic_lsize == 64)
+ r4k_blast_icache_user_page = blast_icache64_user_page;
+}
+
+#endif
+
+static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
+
+static void r4k_blast_icache_page_indexed_setup(void)
+{
+ unsigned long ic_lsize = cpu_icache_line_size();
+
+ if (ic_lsize == 0)
+ r4k_blast_icache_page_indexed = (void *)cache_noop;
+ else if (ic_lsize == 16)
+ r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
+ else if (ic_lsize == 32) {
+ if (IS_ENABLED(CONFIG_WAR_R4600_V1_INDEX_ICACHEOP) &&
+ cpu_is_r4600_v1_x())
+ r4k_blast_icache_page_indexed =
+ blast_icache32_r4600_v1_page_indexed;
+ else if (IS_ENABLED(CONFIG_WAR_TX49XX_ICACHE_INDEX_INV))
+ r4k_blast_icache_page_indexed =
+ tx49_blast_icache32_page_indexed;
+ else if (current_cpu_type() == CPU_LOONGSON2EF)
+ r4k_blast_icache_page_indexed =
+ loongson2_blast_icache32_page_indexed;
+ else
+ r4k_blast_icache_page_indexed =
+ blast_icache32_page_indexed;
+ } else if (ic_lsize == 64)
+ r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
+}
+
+void (* r4k_blast_icache)(void);
+EXPORT_SYMBOL(r4k_blast_icache);
+
+static void r4k_blast_icache_setup(void)
+{
+ unsigned long ic_lsize = cpu_icache_line_size();
+
+ if (ic_lsize == 0)
+ r4k_blast_icache = (void *)cache_noop;
+ else if (ic_lsize == 16)
+ r4k_blast_icache = blast_icache16;
+ else if (ic_lsize == 32) {
+ if (IS_ENABLED(CONFIG_WAR_R4600_V1_INDEX_ICACHEOP) &&
+ cpu_is_r4600_v1_x())
+ r4k_blast_icache = blast_r4600_v1_icache32;
+ else if (IS_ENABLED(CONFIG_WAR_TX49XX_ICACHE_INDEX_INV))
+ r4k_blast_icache = tx49_blast_icache32;
+ else if (current_cpu_type() == CPU_LOONGSON2EF)
+ r4k_blast_icache = loongson2_blast_icache32;
+ else
+ r4k_blast_icache = blast_icache32;
+ } else if (ic_lsize == 64)
+ r4k_blast_icache = blast_icache64;
+ else if (ic_lsize == 128)
+ r4k_blast_icache = blast_icache128;
+}
+
+static void (* r4k_blast_scache_page)(unsigned long addr);
+
+static void r4k_blast_scache_page_setup(void)
+{
+ unsigned long sc_lsize = cpu_scache_line_size();
+
+ if (scache_size == 0)
+ r4k_blast_scache_page = (void *)cache_noop;
+ else if (sc_lsize == 16)
+ r4k_blast_scache_page = blast_scache16_page;
+ else if (sc_lsize == 32)
+ r4k_blast_scache_page = blast_scache32_page;
+ else if (sc_lsize == 64)
+ r4k_blast_scache_page = blast_scache64_page;
+ else if (sc_lsize == 128)
+ r4k_blast_scache_page = blast_scache128_page;
+}
+
+static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
+
+static void r4k_blast_scache_page_indexed_setup(void)
+{
+ unsigned long sc_lsize = cpu_scache_line_size();
+
+ if (scache_size == 0)
+ r4k_blast_scache_page_indexed = (void *)cache_noop;
+ else if (sc_lsize == 16)
+ r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
+ else if (sc_lsize == 32)
+ r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
+ else if (sc_lsize == 64)
+ r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
+ else if (sc_lsize == 128)
+ r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
+}
+
+static void (* r4k_blast_scache)(void);
+
+static void r4k_blast_scache_setup(void)
+{
+ unsigned long sc_lsize = cpu_scache_line_size();
+
+ if (scache_size == 0)
+ r4k_blast_scache = (void *)cache_noop;
+ else if (sc_lsize == 16)
+ r4k_blast_scache = blast_scache16;
+ else if (sc_lsize == 32)
+ r4k_blast_scache = blast_scache32;
+ else if (sc_lsize == 64)
+ r4k_blast_scache = blast_scache64;
+ else if (sc_lsize == 128)
+ r4k_blast_scache = blast_scache128;
+}
+
+static void (*r4k_blast_scache_node)(long node);
+
+static void r4k_blast_scache_node_setup(void)
+{
+ unsigned long sc_lsize = cpu_scache_line_size();
+
+ if (current_cpu_type() != CPU_LOONGSON64)
+ r4k_blast_scache_node = (void *)cache_noop;
+ else if (sc_lsize == 16)
+ r4k_blast_scache_node = blast_scache16_node;
+ else if (sc_lsize == 32)
+ r4k_blast_scache_node = blast_scache32_node;
+ else if (sc_lsize == 64)
+ r4k_blast_scache_node = blast_scache64_node;
+ else if (sc_lsize == 128)
+ r4k_blast_scache_node = blast_scache128_node;
+}
+
+static inline void local_r4k___flush_cache_all(void * args)
+{
+ switch (current_cpu_type()) {
+ case CPU_LOONGSON2EF:
+ case CPU_R4000SC:
+ case CPU_R4000MC:
+ case CPU_R4400SC:
+ case CPU_R4400MC:
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_R14000:
+ case CPU_R16000:
+ /*
+ * These caches are inclusive caches, that is, if something
+ * is not cached in the S-cache, we know it also won't be
+ * in one of the primary caches.
+ */
+ r4k_blast_scache();
+ break;
+
+ case CPU_LOONGSON64:
+ /* Use get_ebase_cpunum() for both NUMA=y/n */
+ r4k_blast_scache_node(get_ebase_cpunum() >> 2);
+ break;
+
+ case CPU_BMIPS5000:
+ r4k_blast_scache();
+ __sync();
+ break;
+
+ default:
+ r4k_blast_dcache();
+ r4k_blast_icache();
+ break;
+ }
+}
+
+static void r4k___flush_cache_all(void)
+{
+ r4k_on_each_cpu(R4K_INDEX, local_r4k___flush_cache_all, NULL);
+}
+
+/**
+ * has_valid_asid() - Determine if an mm already has an ASID.
+ * @mm: Memory map.
+ * @type: R4K_HIT or R4K_INDEX, type of cache op.
+ *
+ * Determines whether @mm already has an ASID on any of the CPUs which cache ops
+ * of type @type within an r4k_on_each_cpu() call will affect. If
+ * r4k_on_each_cpu() does an SMP call to a single VPE in each core, then the
+ * scope of the operation is confined to sibling CPUs, otherwise all online CPUs
+ * will need to be checked.
+ *
+ * Must be called in non-preemptive context.
+ *
+ * Returns: 1 if the CPUs affected by @type cache ops have an ASID for @mm.
+ * 0 otherwise.
+ */
+static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type)
+{
+ unsigned int i;
+ const cpumask_t *mask = cpu_present_mask;
+
+ if (cpu_has_mmid)
+ return cpu_context(0, mm) != 0;
+
+ /* cpu_sibling_map[] undeclared when !CONFIG_SMP */
+#ifdef CONFIG_SMP
+ /*
+ * If r4k_on_each_cpu does SMP calls, it does them to a single VPE in
+ * each foreign core, so we only need to worry about siblings.
+ * Otherwise we need to worry about all present CPUs.
+ */
+ if (r4k_op_needs_ipi(type))
+ mask = &cpu_sibling_map[smp_processor_id()];
+#endif
+ for_each_cpu(i, mask)
+ if (cpu_context(i, mm))
+ return 1;
+ return 0;
+}
+
+static void r4k__flush_cache_vmap(void)
+{
+ r4k_blast_dcache();
+}
+
+static void r4k__flush_cache_vunmap(void)
+{
+ r4k_blast_dcache();
+}
+
+/*
+ * Note: flush_tlb_range() assumes flush_cache_range() sufficiently flushes
+ * whole caches when vma is executable.
+ */
+static inline void local_r4k_flush_cache_range(void * args)
+{
+ struct vm_area_struct *vma = args;
+ int exec = vma->vm_flags & VM_EXEC;
+
+ if (!has_valid_asid(vma->vm_mm, R4K_INDEX))
+ return;
+
+ /*
+ * If dcache can alias, we must blast it since mapping is changing.
+ * If executable, we must ensure any dirty lines are written back far
+ * enough to be visible to icache.
+ */
+ if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
+ r4k_blast_dcache();
+ /* If executable, blast stale lines from icache */
+ if (exec)
+ r4k_blast_icache();
+}
+
+static void r4k_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ int exec = vma->vm_flags & VM_EXEC;
+
+ if (cpu_has_dc_aliases || exec)
+ r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_range, vma);
+}
+
+static inline void local_r4k_flush_cache_mm(void * args)
+{
+ struct mm_struct *mm = args;
+
+ if (!has_valid_asid(mm, R4K_INDEX))
+ return;
+
+ /*
+ * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
+ * only flush the primary caches but R1x000 behave sane ...
+ * R4000SC and R4400SC indexed S-cache ops also invalidate primary
+ * caches, so we can bail out early.
+ */
+ if (current_cpu_type() == CPU_R4000SC ||
+ current_cpu_type() == CPU_R4000MC ||
+ current_cpu_type() == CPU_R4400SC ||
+ current_cpu_type() == CPU_R4400MC) {
+ r4k_blast_scache();
+ return;
+ }
+
+ r4k_blast_dcache();
+}
+
+static void r4k_flush_cache_mm(struct mm_struct *mm)
+{
+ if (!cpu_has_dc_aliases)
+ return;
+
+ r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_mm, mm);
+}
+
+struct flush_cache_page_args {
+ struct vm_area_struct *vma;
+ unsigned long addr;
+ unsigned long pfn;
+};
+
+static inline void local_r4k_flush_cache_page(void *args)
+{
+ struct flush_cache_page_args *fcp_args = args;
+ struct vm_area_struct *vma = fcp_args->vma;
+ unsigned long addr = fcp_args->addr;
+ struct page *page = pfn_to_page(fcp_args->pfn);
+ int exec = vma->vm_flags & VM_EXEC;
+ struct mm_struct *mm = vma->vm_mm;
+ int map_coherent = 0;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ void *vaddr;
+
+ /*
+ * If owns no valid ASID yet, cannot possibly have gotten
+ * this page into the cache.
+ */
+ if (!has_valid_asid(mm, R4K_HIT))
+ return;
+
+ addr &= PAGE_MASK;
+ pmdp = pmd_off(mm, addr);
+ ptep = pte_offset_kernel(pmdp, addr);
+
+ /*
+ * If the page isn't marked valid, the page cannot possibly be
+ * in the cache.
+ */
+ if (!(pte_present(*ptep)))
+ return;
+
+ if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
+ vaddr = NULL;
+ else {
+ /*
+ * Use kmap_coherent or kmap_atomic to do flushes for
+ * another ASID than the current one.
+ */
+ map_coherent = (cpu_has_dc_aliases &&
+ page_mapcount(page) &&
+ !Page_dcache_dirty(page));
+ if (map_coherent)
+ vaddr = kmap_coherent(page, addr);
+ else
+ vaddr = kmap_atomic(page);
+ addr = (unsigned long)vaddr;
+ }
+
+ if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
+ vaddr ? r4k_blast_dcache_page(addr) :
+ r4k_blast_dcache_user_page(addr);
+ if (exec && !cpu_icache_snoops_remote_store)
+ r4k_blast_scache_page(addr);
+ }
+ if (exec) {
+ if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
+ drop_mmu_context(mm);
+ } else
+ vaddr ? r4k_blast_icache_page(addr) :
+ r4k_blast_icache_user_page(addr);
+ }
+
+ if (vaddr) {
+ if (map_coherent)
+ kunmap_coherent();
+ else
+ kunmap_atomic(vaddr);
+ }
+}
+
+static void r4k_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
+{
+ struct flush_cache_page_args args;
+
+ args.vma = vma;
+ args.addr = addr;
+ args.pfn = pfn;
+
+ r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_page, &args);
+}
+
+static inline void local_r4k_flush_data_cache_page(void * addr)
+{
+ r4k_blast_dcache_page((unsigned long) addr);
+}
+
+static void r4k_flush_data_cache_page(unsigned long addr)
+{
+ if (in_atomic())
+ local_r4k_flush_data_cache_page((void *)addr);
+ else
+ r4k_on_each_cpu(R4K_HIT, local_r4k_flush_data_cache_page,
+ (void *) addr);
+}
+
+struct flush_icache_range_args {
+ unsigned long start;
+ unsigned long end;
+ unsigned int type;
+ bool user;
+};
+
+static inline void __local_r4k_flush_icache_range(unsigned long start,
+ unsigned long end,
+ unsigned int type,
+ bool user)
+{
+ if (!cpu_has_ic_fills_f_dc) {
+ if (type == R4K_INDEX ||
+ (type & R4K_INDEX && end - start >= dcache_size)) {
+ r4k_blast_dcache();
+ } else {
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ if (user)
+ protected_blast_dcache_range(start, end);
+ else
+ blast_dcache_range(start, end);
+ }
+ }
+
+ if (type == R4K_INDEX ||
+ (type & R4K_INDEX && end - start > icache_size))
+ r4k_blast_icache();
+ else {
+ switch (boot_cpu_type()) {
+ case CPU_LOONGSON2EF:
+ protected_loongson2_blast_icache_range(start, end);
+ break;
+
+ default:
+ if (user)
+ protected_blast_icache_range(start, end);
+ else
+ blast_icache_range(start, end);
+ break;
+ }
+ }
+}
+
+static inline void local_r4k_flush_icache_range(unsigned long start,
+ unsigned long end)
+{
+ __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, false);
+}
+
+static inline void local_r4k_flush_icache_user_range(unsigned long start,
+ unsigned long end)
+{
+ __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, true);
+}
+
+static inline void local_r4k_flush_icache_range_ipi(void *args)
+{
+ struct flush_icache_range_args *fir_args = args;
+ unsigned long start = fir_args->start;
+ unsigned long end = fir_args->end;
+ unsigned int type = fir_args->type;
+ bool user = fir_args->user;
+
+ __local_r4k_flush_icache_range(start, end, type, user);
+}
+
+static void __r4k_flush_icache_range(unsigned long start, unsigned long end,
+ bool user)
+{
+ struct flush_icache_range_args args;
+ unsigned long size, cache_size;
+
+ args.start = start;
+ args.end = end;
+ args.type = R4K_HIT | R4K_INDEX;
+ args.user = user;
+
+ /*
+ * Indexed cache ops require an SMP call.
+ * Consider if that can or should be avoided.
+ */
+ preempt_disable();
+ if (r4k_op_needs_ipi(R4K_INDEX) && !r4k_op_needs_ipi(R4K_HIT)) {
+ /*
+ * If address-based cache ops don't require an SMP call, then
+ * use them exclusively for small flushes.
+ */
+ size = end - start;
+ cache_size = icache_size;
+ if (!cpu_has_ic_fills_f_dc) {
+ size *= 2;
+ cache_size += dcache_size;
+ }
+ if (size <= cache_size)
+ args.type &= ~R4K_INDEX;
+ }
+ r4k_on_each_cpu(args.type, local_r4k_flush_icache_range_ipi, &args);
+ preempt_enable();
+ instruction_hazard();
+}
+
+static void r4k_flush_icache_range(unsigned long start, unsigned long end)
+{
+ return __r4k_flush_icache_range(start, end, false);
+}
+
+static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
+{
+ return __r4k_flush_icache_range(start, end, true);
+}
+
+#ifdef CONFIG_DMA_NONCOHERENT
+
+static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
+{
+ /* Catch bad driver code */
+ if (WARN_ON(size == 0))
+ return;
+
+ preempt_disable();
+ if (cpu_has_inclusive_pcaches) {
+ if (size >= scache_size) {
+ if (current_cpu_type() != CPU_LOONGSON64)
+ r4k_blast_scache();
+ else
+ r4k_blast_scache_node(pa_to_nid(addr));
+ } else {
+ blast_scache_range(addr, addr + size);
+ }
+ preempt_enable();
+ __sync();
+ return;
+ }
+
+ /*
+ * Either no secondary cache or the available caches don't have the
+ * subset property so we have to flush the primary caches
+ * explicitly.
+ * If we would need IPI to perform an INDEX-type operation, then
+ * we have to use the HIT-type alternative as IPI cannot be used
+ * here due to interrupts possibly being disabled.
+ */
+ if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
+ r4k_blast_dcache();
+ } else {
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ blast_dcache_range(addr, addr + size);
+ }
+ preempt_enable();
+
+ bc_wback_inv(addr, size);
+ __sync();
+}
+
+static void prefetch_cache_inv(unsigned long addr, unsigned long size)
+{
+ unsigned int linesz = cpu_scache_line_size();
+ unsigned long addr0 = addr, addr1;
+
+ addr0 &= ~(linesz - 1);
+ addr1 = (addr0 + size - 1) & ~(linesz - 1);
+
+ protected_writeback_scache_line(addr0);
+ if (likely(addr1 != addr0))
+ protected_writeback_scache_line(addr1);
+ else
+ return;
+
+ addr0 += linesz;
+ if (likely(addr1 != addr0))
+ protected_writeback_scache_line(addr0);
+ else
+ return;
+
+ addr1 -= linesz;
+ if (likely(addr1 > addr0))
+ protected_writeback_scache_line(addr0);
+}
+
+static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
+{
+ /* Catch bad driver code */
+ if (WARN_ON(size == 0))
+ return;
+
+ preempt_disable();
+
+ if (current_cpu_type() == CPU_BMIPS5000)
+ prefetch_cache_inv(addr, size);
+
+ if (cpu_has_inclusive_pcaches) {
+ if (size >= scache_size) {
+ if (current_cpu_type() != CPU_LOONGSON64)
+ r4k_blast_scache();
+ else
+ r4k_blast_scache_node(pa_to_nid(addr));
+ } else {
+ /*
+ * There is no clearly documented alignment requirement
+ * for the cache instruction on MIPS processors and
+ * some processors, among them the RM5200 and RM7000
+ * QED processors will throw an address error for cache
+ * hit ops with insufficient alignment. Solved by
+ * aligning the address to cache line size.
+ */
+ blast_inv_scache_range(addr, addr + size);
+ }
+ preempt_enable();
+ __sync();
+ return;
+ }
+
+ if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
+ r4k_blast_dcache();
+ } else {
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ blast_inv_dcache_range(addr, addr + size);
+ }
+ preempt_enable();
+
+ bc_inv(addr, size);
+ __sync();
+}
+#endif /* CONFIG_DMA_NONCOHERENT */
+
+static void r4k_flush_icache_all(void)
+{
+ if (cpu_has_vtag_icache)
+ r4k_blast_icache();
+}
+
+struct flush_kernel_vmap_range_args {
+ unsigned long vaddr;
+ int size;
+};
+
+static inline void local_r4k_flush_kernel_vmap_range_index(void *args)
+{
+ /*
+ * Aliases only affect the primary caches so don't bother with
+ * S-caches or T-caches.
+ */
+ r4k_blast_dcache();
+}
+
+static inline void local_r4k_flush_kernel_vmap_range(void *args)
+{
+ struct flush_kernel_vmap_range_args *vmra = args;
+ unsigned long vaddr = vmra->vaddr;
+ int size = vmra->size;
+
+ /*
+ * Aliases only affect the primary caches so don't bother with
+ * S-caches or T-caches.
+ */
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ blast_dcache_range(vaddr, vaddr + size);
+}
+
+static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
+{
+ struct flush_kernel_vmap_range_args args;
+
+ args.vaddr = (unsigned long) vaddr;
+ args.size = size;
+
+ if (size >= dcache_size)
+ r4k_on_each_cpu(R4K_INDEX,
+ local_r4k_flush_kernel_vmap_range_index, NULL);
+ else
+ r4k_on_each_cpu(R4K_HIT, local_r4k_flush_kernel_vmap_range,
+ &args);
+}
+
+static inline void rm7k_erratum31(void)
+{
+ const unsigned long ic_lsize = 32;
+ unsigned long addr;
+
+ /* RM7000 erratum #31. The icache is screwed at startup. */
+ write_c0_taglo(0);
+ write_c0_taghi(0);
+
+ for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
+ __asm__ __volatile__ (
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ ".set mips3\n\t"
+ "cache\t%1, 0(%0)\n\t"
+ "cache\t%1, 0x1000(%0)\n\t"
+ "cache\t%1, 0x2000(%0)\n\t"
+ "cache\t%1, 0x3000(%0)\n\t"
+ "cache\t%2, 0(%0)\n\t"
+ "cache\t%2, 0x1000(%0)\n\t"
+ "cache\t%2, 0x2000(%0)\n\t"
+ "cache\t%2, 0x3000(%0)\n\t"
+ "cache\t%1, 0(%0)\n\t"
+ "cache\t%1, 0x1000(%0)\n\t"
+ "cache\t%1, 0x2000(%0)\n\t"
+ "cache\t%1, 0x3000(%0)\n\t"
+ ".set pop\n"
+ :
+ : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill_I));
+ }
+}
+
+static inline int alias_74k_erratum(struct cpuinfo_mips *c)
+{
+ unsigned int imp = c->processor_id & PRID_IMP_MASK;
+ unsigned int rev = c->processor_id & PRID_REV_MASK;
+ int present = 0;
+
+ /*
+ * Early versions of the 74K do not update the cache tags on a
+ * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
+ * aliases. In this case it is better to treat the cache as always
+ * having aliases. Also disable the synonym tag update feature
+ * where available. In this case no opportunistic tag update will
+ * happen where a load causes a virtual address miss but a physical
+ * address hit during a D-cache look-up.
+ */
+ switch (imp) {
+ case PRID_IMP_74K:
+ if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
+ present = 1;
+ if (rev == PRID_REV_ENCODE_332(2, 4, 0))
+ write_c0_config6(read_c0_config6() | MTI_CONF6_SYND);
+ break;
+ case PRID_IMP_1074K:
+ if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
+ present = 1;
+ write_c0_config6(read_c0_config6() | MTI_CONF6_SYND);
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ return present;
+}
+
+static void b5k_instruction_hazard(void)
+{
+ __sync();
+ __sync();
+ __asm__ __volatile__(
+ " nop; nop; nop; nop; nop; nop; nop; nop\n"
+ " nop; nop; nop; nop; nop; nop; nop; nop\n"
+ " nop; nop; nop; nop; nop; nop; nop; nop\n"
+ " nop; nop; nop; nop; nop; nop; nop; nop\n"
+ : : : "memory");
+}
+
+static char *way_string[] = { NULL, "direct mapped", "2-way",
+ "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
+ "9-way", "10-way", "11-way", "12-way",
+ "13-way", "14-way", "15-way", "16-way",
+};
+
+static void probe_pcache(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int config = read_c0_config();
+ unsigned int prid = read_c0_prid();
+ int has_74k_erratum = 0;
+ unsigned long config1;
+ unsigned int lsize;
+
+ switch (current_cpu_type()) {
+ case CPU_R4600: /* QED style two way caches? */
+ case CPU_R4700:
+ case CPU_R5000:
+ case CPU_NEVADA:
+ icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
+ c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
+ c->icache.ways = 2;
+ c->icache.waybit = __ffs(icache_size/2);
+
+ dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
+ c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
+ c->dcache.ways = 2;
+ c->dcache.waybit= __ffs(dcache_size/2);
+
+ c->options |= MIPS_CPU_CACHE_CDEX_P;
+ break;
+
+ case CPU_R5500:
+ icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
+ c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
+ c->icache.ways = 2;
+ c->icache.waybit= 0;
+
+ dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
+ c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
+ c->dcache.ways = 2;
+ c->dcache.waybit = 0;
+
+ c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
+ break;
+
+ case CPU_TX49XX:
+ icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
+ c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
+ c->icache.ways = 4;
+ c->icache.waybit= 0;
+
+ dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
+ c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
+ c->dcache.ways = 4;
+ c->dcache.waybit = 0;
+
+ c->options |= MIPS_CPU_CACHE_CDEX_P;
+ c->options |= MIPS_CPU_PREFETCH;
+ break;
+
+ case CPU_R4000PC:
+ case CPU_R4000SC:
+ case CPU_R4000MC:
+ case CPU_R4400PC:
+ case CPU_R4400SC:
+ case CPU_R4400MC:
+ icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
+ c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
+ c->icache.ways = 1;
+ c->icache.waybit = 0; /* doesn't matter */
+
+ dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
+ c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
+ c->dcache.ways = 1;
+ c->dcache.waybit = 0; /* does not matter */
+
+ c->options |= MIPS_CPU_CACHE_CDEX_P;
+ break;
+
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_R14000:
+ case CPU_R16000:
+ icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
+ c->icache.linesz = 64;
+ c->icache.ways = 2;
+ c->icache.waybit = 0;
+
+ dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
+ c->dcache.linesz = 32;
+ c->dcache.ways = 2;
+ c->dcache.waybit = 0;
+
+ c->options |= MIPS_CPU_PREFETCH;
+ break;
+
+ case CPU_VR4133:
+ write_c0_config(config & ~VR41_CONF_P4K);
+ fallthrough;
+ case CPU_VR4131:
+ /* Workaround for cache instruction bug of VR4131 */
+ if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
+ c->processor_id == 0x0c82U) {
+ config |= 0x00400000U;
+ if (c->processor_id == 0x0c80U)
+ config |= VR41_CONF_BP;
+ write_c0_config(config);
+ } else
+ c->options |= MIPS_CPU_CACHE_CDEX_P;
+
+ icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
+ c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
+ c->icache.ways = 2;
+ c->icache.waybit = __ffs(icache_size/2);
+
+ dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
+ c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
+ c->dcache.ways = 2;
+ c->dcache.waybit = __ffs(dcache_size/2);
+ break;
+
+ case CPU_VR41XX:
+ case CPU_VR4111:
+ case CPU_VR4121:
+ case CPU_VR4122:
+ case CPU_VR4181:
+ case CPU_VR4181A:
+ icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
+ c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
+ c->icache.ways = 1;
+ c->icache.waybit = 0; /* doesn't matter */
+
+ dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
+ c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
+ c->dcache.ways = 1;
+ c->dcache.waybit = 0; /* does not matter */
+
+ c->options |= MIPS_CPU_CACHE_CDEX_P;
+ break;
+
+ case CPU_RM7000:
+ rm7k_erratum31();
+
+ icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
+ c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
+ c->icache.ways = 4;
+ c->icache.waybit = __ffs(icache_size / c->icache.ways);
+
+ dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
+ c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
+ c->dcache.ways = 4;
+ c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
+
+ c->options |= MIPS_CPU_CACHE_CDEX_P;
+ c->options |= MIPS_CPU_PREFETCH;
+ break;
+
+ case CPU_LOONGSON2EF:
+ icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
+ c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
+ if (prid & 0x3)
+ c->icache.ways = 4;
+ else
+ c->icache.ways = 2;
+ c->icache.waybit = 0;
+
+ dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
+ c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
+ if (prid & 0x3)
+ c->dcache.ways = 4;
+ else
+ c->dcache.ways = 2;
+ c->dcache.waybit = 0;
+ break;
+
+ case CPU_LOONGSON64:
+ config1 = read_c0_config1();
+ lsize = (config1 >> 19) & 7;
+ if (lsize)
+ c->icache.linesz = 2 << lsize;
+ else
+ c->icache.linesz = 0;
+ c->icache.sets = 64 << ((config1 >> 22) & 7);
+ c->icache.ways = 1 + ((config1 >> 16) & 7);
+ icache_size = c->icache.sets *
+ c->icache.ways *
+ c->icache.linesz;
+ c->icache.waybit = 0;
+
+ lsize = (config1 >> 10) & 7;
+ if (lsize)
+ c->dcache.linesz = 2 << lsize;
+ else
+ c->dcache.linesz = 0;
+ c->dcache.sets = 64 << ((config1 >> 13) & 7);
+ c->dcache.ways = 1 + ((config1 >> 7) & 7);
+ dcache_size = c->dcache.sets *
+ c->dcache.ways *
+ c->dcache.linesz;
+ c->dcache.waybit = 0;
+ if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
+ (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
+ (c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
+ c->options |= MIPS_CPU_PREFETCH;
+ break;
+
+ case CPU_CAVIUM_OCTEON3:
+ /* For now lie about the number of ways. */
+ c->icache.linesz = 128;
+ c->icache.sets = 16;
+ c->icache.ways = 8;
+ c->icache.flags |= MIPS_CACHE_VTAG;
+ icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
+
+ c->dcache.linesz = 128;
+ c->dcache.ways = 8;
+ c->dcache.sets = 8;
+ dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
+ c->options |= MIPS_CPU_PREFETCH;
+ break;
+
+ default:
+ if (!(config & MIPS_CONF_M))
+ panic("Don't know how to probe P-caches on this cpu.");
+
+ /*
+ * So we seem to be a MIPS32 or MIPS64 CPU
+ * So let's probe the I-cache ...
+ */
+ config1 = read_c0_config1();
+
+ lsize = (config1 >> 19) & 7;
+
+ /* IL == 7 is reserved */
+ if (lsize == 7)
+ panic("Invalid icache line size");
+
+ c->icache.linesz = lsize ? 2 << lsize : 0;
+
+ c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
+ c->icache.ways = 1 + ((config1 >> 16) & 7);
+
+ icache_size = c->icache.sets *
+ c->icache.ways *
+ c->icache.linesz;
+ c->icache.waybit = __ffs(icache_size/c->icache.ways);
+
+ if (config & MIPS_CONF_VI)
+ c->icache.flags |= MIPS_CACHE_VTAG;
+
+ /*
+ * Now probe the MIPS32 / MIPS64 data cache.
+ */
+ c->dcache.flags = 0;
+
+ lsize = (config1 >> 10) & 7;
+
+ /* DL == 7 is reserved */
+ if (lsize == 7)
+ panic("Invalid dcache line size");
+
+ c->dcache.linesz = lsize ? 2 << lsize : 0;
+
+ c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
+ c->dcache.ways = 1 + ((config1 >> 7) & 7);
+
+ dcache_size = c->dcache.sets *
+ c->dcache.ways *
+ c->dcache.linesz;
+ c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
+
+ c->options |= MIPS_CPU_PREFETCH;
+ break;
+ }
+
+ /*
+ * Processor configuration sanity check for the R4000SC erratum
+ * #5. With page sizes larger than 32kB there is no possibility
+ * to get a VCE exception anymore so we don't care about this
+ * misconfiguration. The case is rather theoretical anyway;
+ * presumably no vendor is shipping his hardware in the "bad"
+ * configuration.
+ */
+ if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 &&
+ (prid & PRID_REV_MASK) < PRID_REV_R4400 &&
+ !(config & CONF_SC) && c->icache.linesz != 16 &&
+ PAGE_SIZE <= 0x8000)
+ panic("Improper R4000SC processor configuration detected");
+
+ /* compute a couple of other cache variables */
+ c->icache.waysize = icache_size / c->icache.ways;
+ c->dcache.waysize = dcache_size / c->dcache.ways;
+
+ c->icache.sets = c->icache.linesz ?
+ icache_size / (c->icache.linesz * c->icache.ways) : 0;
+ c->dcache.sets = c->dcache.linesz ?
+ dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
+
+ /*
+ * R1x000 P-caches are odd in a positive way. They're 32kB 2-way
+ * virtually indexed so normally would suffer from aliases. So
+ * normally they'd suffer from aliases but magic in the hardware deals
+ * with that for us so we don't need to take care ourselves.
+ */
+ switch (current_cpu_type()) {
+ case CPU_20KC:
+ case CPU_25KF:
+ case CPU_I6400:
+ case CPU_I6500:
+ case CPU_SB1:
+ case CPU_SB1A:
+ case CPU_XLR:
+ c->dcache.flags |= MIPS_CACHE_PINDEX;
+ break;
+
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_R14000:
+ case CPU_R16000:
+ break;
+
+ case CPU_74K:
+ case CPU_1074K:
+ has_74k_erratum = alias_74k_erratum(c);
+ fallthrough;
+ case CPU_M14KC:
+ case CPU_M14KEC:
+ case CPU_24K:
+ case CPU_34K:
+ case CPU_1004K:
+ case CPU_INTERAPTIV:
+ case CPU_P5600:
+ case CPU_PROAPTIV:
+ case CPU_M5150:
+ case CPU_QEMU_GENERIC:
+ case CPU_P6600:
+ case CPU_M6250:
+ if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
+ (c->icache.waysize > PAGE_SIZE))
+ c->icache.flags |= MIPS_CACHE_ALIASES;
+ if (!has_74k_erratum && (read_c0_config7() & MIPS_CONF7_AR)) {
+ /*
+ * Effectively physically indexed dcache,
+ * thus no virtual aliases.
+ */
+ c->dcache.flags |= MIPS_CACHE_PINDEX;
+ break;
+ }
+ fallthrough;
+ default:
+ if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
+ c->dcache.flags |= MIPS_CACHE_ALIASES;
+ }
+
+ /* Physically indexed caches don't suffer from virtual aliasing */
+ if (c->dcache.flags & MIPS_CACHE_PINDEX)
+ c->dcache.flags &= ~MIPS_CACHE_ALIASES;
+
+ /*
+ * In systems with CM the icache fills from L2 or closer caches, and
+ * thus sees remote stores without needing to write them back any
+ * further than that.
+ */
+ if (mips_cm_present())
+ c->icache.flags |= MIPS_IC_SNOOPS_REMOTE;
+
+ switch (current_cpu_type()) {
+ case CPU_20KC:
+ /*
+ * Some older 20Kc chips doesn't have the 'VI' bit in
+ * the config register.
+ */
+ c->icache.flags |= MIPS_CACHE_VTAG;
+ break;
+
+ case CPU_ALCHEMY:
+ case CPU_I6400:
+ case CPU_I6500:
+ c->icache.flags |= MIPS_CACHE_IC_F_DC;
+ break;
+
+ case CPU_BMIPS5000:
+ c->icache.flags |= MIPS_CACHE_IC_F_DC;
+ /* Cache aliases are handled in hardware; allow HIGHMEM */
+ c->dcache.flags &= ~MIPS_CACHE_ALIASES;
+ break;
+
+ case CPU_LOONGSON2EF:
+ /*
+ * LOONGSON2 has 4 way icache, but when using indexed cache op,
+ * one op will act on all 4 ways
+ */
+ c->icache.ways = 1;
+ }
+
+ pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
+ icache_size >> 10,
+ c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
+ way_string[c->icache.ways], c->icache.linesz);
+
+ pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
+ dcache_size >> 10, way_string[c->dcache.ways],
+ (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
+ (c->dcache.flags & MIPS_CACHE_ALIASES) ?
+ "cache aliases" : "no aliases",
+ c->dcache.linesz);
+}
+
+static void probe_vcache(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int config2, lsize;
+
+ if (current_cpu_type() != CPU_LOONGSON64)
+ return;
+
+ config2 = read_c0_config2();
+ if ((lsize = ((config2 >> 20) & 15)))
+ c->vcache.linesz = 2 << lsize;
+ else
+ c->vcache.linesz = lsize;
+
+ c->vcache.sets = 64 << ((config2 >> 24) & 15);
+ c->vcache.ways = 1 + ((config2 >> 16) & 15);
+
+ vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
+
+ c->vcache.waybit = 0;
+ c->vcache.waysize = vcache_size / c->vcache.ways;
+
+ pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
+ vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
+}
+
+/*
+ * If you even _breathe_ on this function, look at the gcc output and make sure
+ * it does not pop things on and off the stack for the cache sizing loop that
+ * executes in KSEG1 space or else you will crash and burn badly. You have
+ * been warned.
+ */
+static int probe_scache(void)
+{
+ unsigned long flags, addr, begin, end, pow2;
+ unsigned int config = read_c0_config();
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ if (config & CONF_SC)
+ return 0;
+
+ begin = (unsigned long) &_stext;
+ begin &= ~((4 * 1024 * 1024) - 1);
+ end = begin + (4 * 1024 * 1024);
+
+ /*
+ * This is such a bitch, you'd think they would make it easy to do
+ * this. Away you daemons of stupidity!
+ */
+ local_irq_save(flags);
+
+ /* Fill each size-multiple cache line with a valid tag. */
+ pow2 = (64 * 1024);
+ for (addr = begin; addr < end; addr = (begin + pow2)) {
+ unsigned long *p = (unsigned long *) addr;
+ __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
+ pow2 <<= 1;
+ }
+
+ /* Load first line with zero (therefore invalid) tag. */
+ write_c0_taglo(0);
+ write_c0_taghi(0);
+ __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
+ cache_op(Index_Store_Tag_I, begin);
+ cache_op(Index_Store_Tag_D, begin);
+ cache_op(Index_Store_Tag_SD, begin);
+
+ /* Now search for the wrap around point. */
+ pow2 = (128 * 1024);
+ for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
+ cache_op(Index_Load_Tag_SD, addr);
+ __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
+ if (!read_c0_taglo())
+ break;
+ pow2 <<= 1;
+ }
+ local_irq_restore(flags);
+ addr -= begin;
+
+ scache_size = addr;
+ c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
+ c->scache.ways = 1;
+ c->scache.waybit = 0; /* does not matter */
+
+ return 1;
+}
+
+static void loongson2_sc_init(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ scache_size = 512*1024;
+ c->scache.linesz = 32;
+ c->scache.ways = 4;
+ c->scache.waybit = 0;
+ c->scache.waysize = scache_size / (c->scache.ways);
+ c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
+ pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
+ scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
+
+ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
+}
+
+static void loongson3_sc_init(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int config2, lsize;
+
+ config2 = read_c0_config2();
+ lsize = (config2 >> 4) & 15;
+ if (lsize)
+ c->scache.linesz = 2 << lsize;
+ else
+ c->scache.linesz = 0;
+ c->scache.sets = 64 << ((config2 >> 8) & 15);
+ c->scache.ways = 1 + (config2 & 15);
+
+ scache_size = c->scache.sets *
+ c->scache.ways *
+ c->scache.linesz;
+
+ /* Loongson-3 has 4-Scache banks, while Loongson-2K have only 2 banks */
+ if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
+ scache_size *= 2;
+ else
+ scache_size *= 4;
+
+ c->scache.waybit = 0;
+ c->scache.waysize = scache_size / c->scache.ways;
+ pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
+ scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
+ if (scache_size)
+ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
+ return;
+}
+
+extern int r5k_sc_init(void);
+extern int rm7k_sc_init(void);
+extern int mips_sc_init(void);
+
+static void setup_scache(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int config = read_c0_config();
+ int sc_present = 0;
+
+ /*
+ * Do the probing thing on R4000SC and R4400SC processors. Other
+ * processors don't have a S-cache that would be relevant to the
+ * Linux memory management.
+ */
+ switch (current_cpu_type()) {
+ case CPU_R4000SC:
+ case CPU_R4000MC:
+ case CPU_R4400SC:
+ case CPU_R4400MC:
+ sc_present = run_uncached(probe_scache);
+ if (sc_present)
+ c->options |= MIPS_CPU_CACHE_CDEX_S;
+ break;
+
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_R14000:
+ case CPU_R16000:
+ scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
+ c->scache.linesz = 64 << ((config >> 13) & 1);
+ c->scache.ways = 2;
+ c->scache.waybit= 0;
+ sc_present = 1;
+ break;
+
+ case CPU_R5000:
+ case CPU_NEVADA:
+#ifdef CONFIG_R5000_CPU_SCACHE
+ r5k_sc_init();
+#endif
+ return;
+
+ case CPU_RM7000:
+#ifdef CONFIG_RM7000_CPU_SCACHE
+ rm7k_sc_init();
+#endif
+ return;
+
+ case CPU_LOONGSON2EF:
+ loongson2_sc_init();
+ return;
+
+ case CPU_LOONGSON64:
+ loongson3_sc_init();
+ return;
+
+ case CPU_CAVIUM_OCTEON3:
+ case CPU_XLP:
+ /* don't need to worry about L2, fully coherent */
+ return;
+
+ default:
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
+#ifdef CONFIG_MIPS_CPU_SCACHE
+ if (mips_sc_init ()) {
+ scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
+ printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
+ scache_size >> 10,
+ way_string[c->scache.ways], c->scache.linesz);
+
+ if (current_cpu_type() == CPU_BMIPS5000)
+ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
+ }
+
+#else
+ if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
+ panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
+#endif
+ return;
+ }
+ sc_present = 0;
+ }
+
+ if (!sc_present)
+ return;
+
+ /* compute a couple of other cache variables */
+ c->scache.waysize = scache_size / c->scache.ways;
+
+ c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
+
+ printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
+ scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
+
+ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
+}
+
+void au1x00_fixup_config_od(void)
+{
+ /*
+ * c0_config.od (bit 19) was write only (and read as 0)
+ * on the early revisions of Alchemy SOCs. It disables the bus
+ * transaction overlapping and needs to be set to fix various errata.
+ */
+ switch (read_c0_prid()) {
+ case 0x00030100: /* Au1000 DA */
+ case 0x00030201: /* Au1000 HA */
+ case 0x00030202: /* Au1000 HB */
+ case 0x01030200: /* Au1500 AB */
+ /*
+ * Au1100 errata actually keeps silence about this bit, so we set it
+ * just in case for those revisions that require it to be set according
+ * to the (now gone) cpu table.
+ */
+ case 0x02030200: /* Au1100 AB */
+ case 0x02030201: /* Au1100 BA */
+ case 0x02030202: /* Au1100 BC */
+ set_c0_config(1 << 19);
+ break;
+ }
+}
+
+/* CP0 hazard avoidance. */
+#define NXP_BARRIER() \
+ __asm__ __volatile__( \
+ ".set noreorder\n\t" \
+ "nop; nop; nop; nop; nop; nop;\n\t" \
+ ".set reorder\n\t")
+
+static void nxp_pr4450_fixup_config(void)
+{
+ unsigned long config0;
+
+ config0 = read_c0_config();
+
+ /* clear all three cache coherency fields */
+ config0 &= ~(0x7 | (7 << 25) | (7 << 28));
+ config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << 28));
+ write_c0_config(config0);
+ NXP_BARRIER();
+}
+
+static int cca = -1;
+
+static int __init cca_setup(char *str)
+{
+ get_option(&str, &cca);
+
+ return 0;
+}
+
+early_param("cca", cca_setup);
+
+static void coherency_setup(void)
+{
+ if (cca < 0 || cca > 7)
+ cca = read_c0_config() & CONF_CM_CMASK;
+ _page_cachable_default = cca << _CACHE_SHIFT;
+
+ pr_debug("Using cache attribute %d\n", cca);
+ change_c0_config(CONF_CM_CMASK, cca);
+
+ /*
+ * c0_status.cu=0 specifies that updates by the sc instruction use
+ * the coherency mode specified by the TLB; 1 means cachable
+ * coherent update on write will be used. Not all processors have
+ * this bit and; some wire it to zero, others like Toshiba had the
+ * silly idea of putting something else there ...
+ */
+ switch (current_cpu_type()) {
+ case CPU_R4000PC:
+ case CPU_R4000SC:
+ case CPU_R4000MC:
+ case CPU_R4400PC:
+ case CPU_R4400SC:
+ case CPU_R4400MC:
+ clear_c0_config(CONF_CU);
+ break;
+ /*
+ * We need to catch the early Alchemy SOCs with
+ * the write-only co_config.od bit and set it back to one on:
+ * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB
+ */
+ case CPU_ALCHEMY:
+ au1x00_fixup_config_od();
+ break;
+
+ case PRID_IMP_PR4450:
+ nxp_pr4450_fixup_config();
+ break;
+ }
+}
+
+static void r4k_cache_error_setup(void)
+{
+ extern char __weak except_vec2_generic;
+ extern char __weak except_vec2_sb1;
+
+ switch (current_cpu_type()) {
+ case CPU_SB1:
+ case CPU_SB1A:
+ set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
+ break;
+
+ default:
+ set_uncached_handler(0x100, &except_vec2_generic, 0x80);
+ break;
+ }
+}
+
+void r4k_cache_init(void)
+{
+ extern void build_clear_page(void);
+ extern void build_copy_page(void);
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ probe_pcache();
+ probe_vcache();
+ setup_scache();
+
+ r4k_blast_dcache_page_setup();
+ r4k_blast_dcache_page_indexed_setup();
+ r4k_blast_dcache_setup();
+ r4k_blast_icache_page_setup();
+ r4k_blast_icache_page_indexed_setup();
+ r4k_blast_icache_setup();
+ r4k_blast_scache_page_setup();
+ r4k_blast_scache_page_indexed_setup();
+ r4k_blast_scache_setup();
+ r4k_blast_scache_node_setup();
+#ifdef CONFIG_EVA
+ r4k_blast_dcache_user_page_setup();
+ r4k_blast_icache_user_page_setup();
+#endif
+
+ /*
+ * Some MIPS32 and MIPS64 processors have physically indexed caches.
+ * This code supports virtually indexed processors and will be
+ * unnecessarily inefficient on physically indexed processors.
+ */
+ if (c->dcache.linesz && cpu_has_dc_aliases)
+ shm_align_mask = max_t( unsigned long,
+ c->dcache.sets * c->dcache.linesz - 1,
+ PAGE_SIZE - 1);
+ else
+ shm_align_mask = PAGE_SIZE-1;
+
+ __flush_cache_vmap = r4k__flush_cache_vmap;
+ __flush_cache_vunmap = r4k__flush_cache_vunmap;
+
+ flush_cache_all = cache_noop;
+ __flush_cache_all = r4k___flush_cache_all;
+ flush_cache_mm = r4k_flush_cache_mm;
+ flush_cache_page = r4k_flush_cache_page;
+ flush_cache_range = r4k_flush_cache_range;
+
+ __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
+
+ flush_icache_all = r4k_flush_icache_all;
+ local_flush_data_cache_page = local_r4k_flush_data_cache_page;
+ flush_data_cache_page = r4k_flush_data_cache_page;
+ flush_icache_range = r4k_flush_icache_range;
+ local_flush_icache_range = local_r4k_flush_icache_range;
+ __flush_icache_user_range = r4k_flush_icache_user_range;
+ __local_flush_icache_user_range = local_r4k_flush_icache_user_range;
+
+#ifdef CONFIG_DMA_NONCOHERENT
+#ifdef CONFIG_DMA_MAYBE_COHERENT
+ if (coherentio == IO_COHERENCE_ENABLED ||
+ (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio)) {
+ _dma_cache_wback_inv = (void *)cache_noop;
+ _dma_cache_wback = (void *)cache_noop;
+ _dma_cache_inv = (void *)cache_noop;
+ } else
+#endif /* CONFIG_DMA_MAYBE_COHERENT */
+ {
+ _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
+ _dma_cache_wback = r4k_dma_cache_wback_inv;
+ _dma_cache_inv = r4k_dma_cache_inv;
+ }
+#endif /* CONFIG_DMA_NONCOHERENT */
+
+ build_clear_page();
+ build_copy_page();
+
+ /*
+ * We want to run CMP kernels on core with and without coherent
+ * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
+ * or not to flush caches.
+ */
+ local_r4k___flush_cache_all(NULL);
+
+ coherency_setup();
+ board_cache_error_setup = r4k_cache_error_setup;
+
+ /*
+ * Per-CPU overrides
+ */
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ /* No IPI is needed because all CPUs share the same D$ */
+ flush_data_cache_page = r4k_blast_dcache_page;
+ break;
+ case CPU_BMIPS5000:
+ /* We lose our superpowers if L2 is disabled */
+ if (c->scache.flags & MIPS_CACHE_NOT_PRESENT)
+ break;
+
+ /* I$ fills from D$ just by emptying the write buffers */
+ flush_cache_page = (void *)b5k_instruction_hazard;
+ flush_cache_range = (void *)b5k_instruction_hazard;
+ local_flush_data_cache_page = (void *)b5k_instruction_hazard;
+ flush_data_cache_page = (void *)b5k_instruction_hazard;
+ flush_icache_range = (void *)b5k_instruction_hazard;
+ local_flush_icache_range = (void *)b5k_instruction_hazard;
+
+
+ /* Optimization: an L2 flush implicitly flushes the L1 */
+ current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES;
+ break;
+ case CPU_LOONGSON64:
+ /* Loongson-3 maintains cache coherency by hardware */
+ __flush_cache_all = cache_noop;
+ __flush_cache_vmap = cache_noop;
+ __flush_cache_vunmap = cache_noop;
+ __flush_kernel_vmap_range = (void *)cache_noop;
+ flush_cache_mm = (void *)cache_noop;
+ flush_cache_page = (void *)cache_noop;
+ flush_cache_range = (void *)cache_noop;
+ flush_icache_all = (void *)cache_noop;
+ flush_data_cache_page = (void *)cache_noop;
+ local_flush_data_cache_page = (void *)cache_noop;
+ break;
+ }
+}
+
+static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ coherency_setup();
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block r4k_cache_pm_notifier_block = {
+ .notifier_call = r4k_cache_pm_notifier,
+};
+
+int __init r4k_cache_init_pm(void)
+{
+ return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block);
+}
+arch_initcall(r4k_cache_init_pm);
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
new file mode 100644
index 000000000..03dfbb40e
--- /dev/null
+++ b/arch/mips/mm/c-tx39.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r2300.c: R2000 and R3000 specific mmu/cache code.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ *
+ * with a lot of changes to make this thing work for R3000s
+ * Tx39XX R4k style caches added. HK
+ * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
+ * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+
+#include <asm/cacheops.h>
+#include <asm/page.h>
+#include <asm/mmu_context.h>
+#include <asm/isadep.h>
+#include <asm/io.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+
+/* For R3000 cores with R4000 style caches */
+static unsigned long icache_size, dcache_size; /* Size in bytes */
+
+#include <asm/r4kcache.h>
+
+/* This sequence is required to ensure icache is disabled immediately */
+#define TX39_STOP_STREAMING() \
+__asm__ __volatile__( \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ "b 1f\n\t" \
+ "nop\n\t" \
+ "1:\n\t" \
+ ".set pop" \
+ )
+
+/* TX39H-style cache flush routines. */
+static void tx39h_flush_icache_all(void)
+{
+ unsigned long flags, config;
+
+ /* disable icache (set ICE#) */
+ local_irq_save(flags);
+ config = read_c0_conf();
+ write_c0_conf(config & ~TX39_CONF_ICE);
+ TX39_STOP_STREAMING();
+ blast_icache16();
+ write_c0_conf(config);
+ local_irq_restore(flags);
+}
+
+static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size)
+{
+ /* Catch bad driver code */
+ BUG_ON(size == 0);
+
+ iob();
+ blast_inv_dcache_range(addr, addr + size);
+}
+
+
+/* TX39H2,TX39H3 */
+static inline void tx39_blast_dcache_page(unsigned long addr)
+{
+ if (current_cpu_type() != CPU_TX3912)
+ blast_dcache16_page(addr);
+}
+
+static inline void tx39_blast_dcache_page_indexed(unsigned long addr)
+{
+ blast_dcache16_page_indexed(addr);
+}
+
+static inline void tx39_blast_dcache(void)
+{
+ blast_dcache16();
+}
+
+static inline void tx39_blast_icache_page(unsigned long addr)
+{
+ unsigned long flags, config;
+ /* disable icache (set ICE#) */
+ local_irq_save(flags);
+ config = read_c0_conf();
+ write_c0_conf(config & ~TX39_CONF_ICE);
+ TX39_STOP_STREAMING();
+ blast_icache16_page(addr);
+ write_c0_conf(config);
+ local_irq_restore(flags);
+}
+
+static inline void tx39_blast_icache_page_indexed(unsigned long addr)
+{
+ unsigned long flags, config;
+ /* disable icache (set ICE#) */
+ local_irq_save(flags);
+ config = read_c0_conf();
+ write_c0_conf(config & ~TX39_CONF_ICE);
+ TX39_STOP_STREAMING();
+ blast_icache16_page_indexed(addr);
+ write_c0_conf(config);
+ local_irq_restore(flags);
+}
+
+static inline void tx39_blast_icache(void)
+{
+ unsigned long flags, config;
+ /* disable icache (set ICE#) */
+ local_irq_save(flags);
+ config = read_c0_conf();
+ write_c0_conf(config & ~TX39_CONF_ICE);
+ TX39_STOP_STREAMING();
+ blast_icache16();
+ write_c0_conf(config);
+ local_irq_restore(flags);
+}
+
+static void tx39__flush_cache_vmap(void)
+{
+ tx39_blast_dcache();
+}
+
+static void tx39__flush_cache_vunmap(void)
+{
+ tx39_blast_dcache();
+}
+
+static inline void tx39_flush_cache_all(void)
+{
+ if (!cpu_has_dc_aliases)
+ return;
+
+ tx39_blast_dcache();
+}
+
+static inline void tx39___flush_cache_all(void)
+{
+ tx39_blast_dcache();
+ tx39_blast_icache();
+}
+
+static void tx39_flush_cache_mm(struct mm_struct *mm)
+{
+ if (!cpu_has_dc_aliases)
+ return;
+
+ if (cpu_context(smp_processor_id(), mm) != 0)
+ tx39_blast_dcache();
+}
+
+static void tx39_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (!cpu_has_dc_aliases)
+ return;
+ if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
+ return;
+
+ tx39_blast_dcache();
+}
+
+static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
+{
+ int exec = vma->vm_flags & VM_EXEC;
+ struct mm_struct *mm = vma->vm_mm;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ /*
+ * If ownes no valid ASID yet, cannot possibly have gotten
+ * this page into the cache.
+ */
+ if (cpu_context(smp_processor_id(), mm) == 0)
+ return;
+
+ page &= PAGE_MASK;
+ pmdp = pmd_off(mm, page);
+ ptep = pte_offset_kernel(pmdp, page);
+
+ /*
+ * If the page isn't marked valid, the page cannot possibly be
+ * in the cache.
+ */
+ if (!(pte_val(*ptep) & _PAGE_PRESENT))
+ return;
+
+ /*
+ * Doing flushes for another ASID than the current one is
+ * too difficult since stupid R4k caches do a TLB translation
+ * for every cache flush operation. So we do indexed flushes
+ * in that case, which doesn't overly flush the cache too much.
+ */
+ if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
+ if (cpu_has_dc_aliases || exec)
+ tx39_blast_dcache_page(page);
+ if (exec)
+ tx39_blast_icache_page(page);
+
+ return;
+ }
+
+ /*
+ * Do indexed flush, too much work to get the (possible) TLB refills
+ * to work correctly.
+ */
+ if (cpu_has_dc_aliases || exec)
+ tx39_blast_dcache_page_indexed(page);
+ if (exec)
+ tx39_blast_icache_page_indexed(page);
+}
+
+static void local_tx39_flush_data_cache_page(void * addr)
+{
+ tx39_blast_dcache_page((unsigned long)addr);
+}
+
+static void tx39_flush_data_cache_page(unsigned long addr)
+{
+ tx39_blast_dcache_page(addr);
+}
+
+static void tx39_flush_icache_range(unsigned long start, unsigned long end)
+{
+ if (end - start > dcache_size)
+ tx39_blast_dcache();
+ else
+ protected_blast_dcache_range(start, end);
+
+ if (end - start > icache_size)
+ tx39_blast_icache();
+ else {
+ unsigned long flags, config;
+ /* disable icache (set ICE#) */
+ local_irq_save(flags);
+ config = read_c0_conf();
+ write_c0_conf(config & ~TX39_CONF_ICE);
+ TX39_STOP_STREAMING();
+ protected_blast_icache_range(start, end);
+ write_c0_conf(config);
+ local_irq_restore(flags);
+ }
+}
+
+static void tx39_flush_kernel_vmap_range(unsigned long vaddr, int size)
+{
+ BUG();
+}
+
+static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
+{
+ unsigned long end;
+
+ if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
+ end = addr + size;
+ do {
+ tx39_blast_dcache_page(addr);
+ addr += PAGE_SIZE;
+ } while(addr != end);
+ } else if (size > dcache_size) {
+ tx39_blast_dcache();
+ } else {
+ blast_dcache_range(addr, addr + size);
+ }
+}
+
+static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
+{
+ unsigned long end;
+
+ if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
+ end = addr + size;
+ do {
+ tx39_blast_dcache_page(addr);
+ addr += PAGE_SIZE;
+ } while(addr != end);
+ } else if (size > dcache_size) {
+ tx39_blast_dcache();
+ } else {
+ blast_inv_dcache_range(addr, addr + size);
+ }
+}
+
+static __init void tx39_probe_cache(void)
+{
+ unsigned long config;
+
+ config = read_c0_conf();
+
+ icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >>
+ TX39_CONF_ICS_SHIFT));
+ dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >>
+ TX39_CONF_DCS_SHIFT));
+
+ current_cpu_data.icache.linesz = 16;
+ switch (current_cpu_type()) {
+ case CPU_TX3912:
+ current_cpu_data.icache.ways = 1;
+ current_cpu_data.dcache.ways = 1;
+ current_cpu_data.dcache.linesz = 4;
+ break;
+
+ case CPU_TX3927:
+ current_cpu_data.icache.ways = 2;
+ current_cpu_data.dcache.ways = 2;
+ current_cpu_data.dcache.linesz = 16;
+ break;
+
+ case CPU_TX3922:
+ default:
+ current_cpu_data.icache.ways = 1;
+ current_cpu_data.dcache.ways = 1;
+ current_cpu_data.dcache.linesz = 16;
+ break;
+ }
+}
+
+void tx39_cache_init(void)
+{
+ extern void build_clear_page(void);
+ extern void build_copy_page(void);
+ unsigned long config;
+
+ config = read_c0_conf();
+ config &= ~TX39_CONF_WBON;
+ write_c0_conf(config);
+
+ tx39_probe_cache();
+
+ switch (current_cpu_type()) {
+ case CPU_TX3912:
+ /* TX39/H core (writethru direct-map cache) */
+ __flush_cache_vmap = tx39__flush_cache_vmap;
+ __flush_cache_vunmap = tx39__flush_cache_vunmap;
+ flush_cache_all = tx39h_flush_icache_all;
+ __flush_cache_all = tx39h_flush_icache_all;
+ flush_cache_mm = (void *) tx39h_flush_icache_all;
+ flush_cache_range = (void *) tx39h_flush_icache_all;
+ flush_cache_page = (void *) tx39h_flush_icache_all;
+ flush_icache_range = (void *) tx39h_flush_icache_all;
+ local_flush_icache_range = (void *) tx39h_flush_icache_all;
+
+ local_flush_data_cache_page = (void *) tx39h_flush_icache_all;
+ flush_data_cache_page = (void *) tx39h_flush_icache_all;
+
+ _dma_cache_wback_inv = tx39h_dma_cache_wback_inv;
+
+ shm_align_mask = PAGE_SIZE - 1;
+
+ break;
+
+ case CPU_TX3922:
+ case CPU_TX3927:
+ default:
+ /* TX39/H2,H3 core (writeback 2way-set-associative cache) */
+ /* board-dependent init code may set WBON */
+
+ __flush_cache_vmap = tx39__flush_cache_vmap;
+ __flush_cache_vunmap = tx39__flush_cache_vunmap;
+
+ flush_cache_all = tx39_flush_cache_all;
+ __flush_cache_all = tx39___flush_cache_all;
+ flush_cache_mm = tx39_flush_cache_mm;
+ flush_cache_range = tx39_flush_cache_range;
+ flush_cache_page = tx39_flush_cache_page;
+ flush_icache_range = tx39_flush_icache_range;
+ local_flush_icache_range = tx39_flush_icache_range;
+
+ __flush_kernel_vmap_range = tx39_flush_kernel_vmap_range;
+
+ local_flush_data_cache_page = local_tx39_flush_data_cache_page;
+ flush_data_cache_page = tx39_flush_data_cache_page;
+
+ _dma_cache_wback_inv = tx39_dma_cache_wback_inv;
+ _dma_cache_wback = tx39_dma_cache_wback_inv;
+ _dma_cache_inv = tx39_dma_cache_inv;
+
+ shm_align_mask = max_t(unsigned long,
+ (dcache_size / current_cpu_data.dcache.ways) - 1,
+ PAGE_SIZE - 1);
+
+ break;
+ }
+
+ __flush_icache_user_range = flush_icache_range;
+ __local_flush_icache_user_range = local_flush_icache_range;
+
+ current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways;
+ current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways;
+
+ current_cpu_data.icache.sets =
+ current_cpu_data.icache.waysize / current_cpu_data.icache.linesz;
+ current_cpu_data.dcache.sets =
+ current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz;
+
+ if (current_cpu_data.dcache.waysize > PAGE_SIZE)
+ current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES;
+
+ current_cpu_data.icache.waybit = 0;
+ current_cpu_data.dcache.waybit = 0;
+
+ pr_info("Primary instruction cache %ldkB, linesize %d bytes\n",
+ icache_size >> 10, current_cpu_data.icache.linesz);
+ pr_info("Primary data cache %ldkB, linesize %d bytes\n",
+ dcache_size >> 10, current_cpu_data.dcache.linesz);
+
+ build_clear_page();
+ build_copy_page();
+ tx39h_flush_icache_all();
+}
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
new file mode 100644
index 000000000..3e81ba000
--- /dev/null
+++ b/arch/mips/mm/cache.c
@@ -0,0 +1,242 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ */
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+
+#include <asm/cacheflush.h>
+#include <asm/processor.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/setup.h>
+
+/* Cache operations. */
+void (*flush_cache_all)(void);
+void (*__flush_cache_all)(void);
+EXPORT_SYMBOL_GPL(__flush_cache_all);
+void (*flush_cache_mm)(struct mm_struct *mm);
+void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
+ unsigned long pfn);
+void (*flush_icache_range)(unsigned long start, unsigned long end);
+EXPORT_SYMBOL_GPL(flush_icache_range);
+void (*local_flush_icache_range)(unsigned long start, unsigned long end);
+EXPORT_SYMBOL_GPL(local_flush_icache_range);
+void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
+void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
+EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
+
+void (*__flush_cache_vmap)(void);
+void (*__flush_cache_vunmap)(void);
+
+void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
+EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
+
+/* MIPS specific cache operations */
+void (*local_flush_data_cache_page)(void * addr);
+void (*flush_data_cache_page)(unsigned long addr);
+void (*flush_icache_all)(void);
+
+EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
+EXPORT_SYMBOL(flush_data_cache_page);
+EXPORT_SYMBOL(flush_icache_all);
+
+#ifdef CONFIG_DMA_NONCOHERENT
+
+/* DMA cache operations. */
+void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
+void (*_dma_cache_wback)(unsigned long start, unsigned long size);
+void (*_dma_cache_inv)(unsigned long start, unsigned long size);
+
+#endif /* CONFIG_DMA_NONCOHERENT */
+
+/*
+ * We could optimize the case where the cache argument is not BCACHE but
+ * that seems very atypical use ...
+ */
+SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
+ unsigned int, cache)
+{
+ if (bytes == 0)
+ return 0;
+ if (!access_ok((void __user *) addr, bytes))
+ return -EFAULT;
+
+ __flush_icache_user_range(addr, addr + bytes);
+
+ return 0;
+}
+
+void __flush_dcache_page(struct page *page)
+{
+ struct address_space *mapping = page_mapping_file(page);
+ unsigned long addr;
+
+ if (mapping && !mapping_mapped(mapping)) {
+ SetPageDcacheDirty(page);
+ return;
+ }
+
+ /*
+ * We could delay the flush for the !page_mapping case too. But that
+ * case is for exec env/arg pages and those are %99 certainly going to
+ * get faulted into the tlb (and thus flushed) anyways.
+ */
+ if (PageHighMem(page))
+ addr = (unsigned long)kmap_atomic(page);
+ else
+ addr = (unsigned long)page_address(page);
+
+ flush_data_cache_page(addr);
+
+ if (PageHighMem(page))
+ kunmap_atomic((void *)addr);
+}
+
+EXPORT_SYMBOL(__flush_dcache_page);
+
+void __flush_anon_page(struct page *page, unsigned long vmaddr)
+{
+ unsigned long addr = (unsigned long) page_address(page);
+
+ if (pages_do_alias(addr, vmaddr)) {
+ if (page_mapcount(page) && !Page_dcache_dirty(page)) {
+ void *kaddr;
+
+ kaddr = kmap_coherent(page, vmaddr);
+ flush_data_cache_page((unsigned long)kaddr);
+ kunmap_coherent();
+ } else
+ flush_data_cache_page(addr);
+ }
+}
+
+EXPORT_SYMBOL(__flush_anon_page);
+
+void __update_cache(unsigned long address, pte_t pte)
+{
+ struct page *page;
+ unsigned long pfn, addr;
+ int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
+
+ pfn = pte_pfn(pte);
+ if (unlikely(!pfn_valid(pfn)))
+ return;
+ page = pfn_to_page(pfn);
+ if (Page_dcache_dirty(page)) {
+ if (PageHighMem(page))
+ addr = (unsigned long)kmap_atomic(page);
+ else
+ addr = (unsigned long)page_address(page);
+
+ if (exec || pages_do_alias(addr, address & PAGE_MASK))
+ flush_data_cache_page(addr);
+
+ if (PageHighMem(page))
+ kunmap_atomic((void *)addr);
+
+ ClearPageDcacheDirty(page);
+ }
+}
+
+unsigned long _page_cachable_default;
+EXPORT_SYMBOL(_page_cachable_default);
+
+static inline void setup_protection_map(void)
+{
+ if (cpu_has_rixi) {
+ protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
+ protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
+ protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
+ protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
+
+ protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
+ protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
+ protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
+ protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
+ protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
+ protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
+
+ } else {
+ protection_map[0] = PAGE_NONE;
+ protection_map[1] = PAGE_READONLY;
+ protection_map[2] = PAGE_COPY;
+ protection_map[3] = PAGE_COPY;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+ protection_map[9] = PAGE_READONLY;
+ protection_map[10] = PAGE_SHARED;
+ protection_map[11] = PAGE_SHARED;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+ protection_map[15] = PAGE_SHARED;
+ }
+}
+
+void cpu_cache_init(void)
+{
+ if (cpu_has_3k_cache) {
+ extern void __weak r3k_cache_init(void);
+
+ r3k_cache_init();
+ }
+ if (cpu_has_6k_cache) {
+ extern void __weak r6k_cache_init(void);
+
+ r6k_cache_init();
+ }
+ if (cpu_has_4k_cache) {
+ extern void __weak r4k_cache_init(void);
+
+ r4k_cache_init();
+ }
+ if (cpu_has_8k_cache) {
+ extern void __weak r8k_cache_init(void);
+
+ r8k_cache_init();
+ }
+ if (cpu_has_tx39_cache) {
+ extern void __weak tx39_cache_init(void);
+
+ tx39_cache_init();
+ }
+
+ if (cpu_has_octeon_cache) {
+ extern void __weak octeon_cache_init(void);
+
+ octeon_cache_init();
+ }
+
+ setup_protection_map();
+}
+
+int __weak __uncached_access(struct file *file, unsigned long addr)
+{
+ if (file->f_flags & O_DSYNC)
+ return 1;
+
+ return addr >= __pa(high_memory);
+}
diff --git a/arch/mips/mm/cerr-sb1.c b/arch/mips/mm/cerr-sb1.c
new file mode 100644
index 000000000..a3c02df19
--- /dev/null
+++ b/arch/mips/mm/cerr-sb1.c
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2001,2002,2003 Broadcom Corporation
+ */
+#include <linux/sched.h>
+#include <asm/mipsregs.h>
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+
+#if !defined(CONFIG_SIBYTE_BUS_WATCHER) || defined(CONFIG_SIBYTE_BW_TRACE)
+#include <asm/io.h>
+#include <asm/sibyte/sb1250_scd.h>
+#endif
+
+/*
+ * We'd like to dump the L2_ECC_TAG register on errors, but errata make
+ * that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.)
+ */
+#undef DUMP_L2_ECC_TAG_ON_ERROR
+
+/* SB1 definitions */
+
+/* XXX should come from config1 XXX */
+#define SB1_CACHE_INDEX_MASK 0x1fe0
+
+#define CP0_ERRCTL_RECOVERABLE (1 << 31)
+#define CP0_ERRCTL_DCACHE (1 << 30)
+#define CP0_ERRCTL_ICACHE (1 << 29)
+#define CP0_ERRCTL_MULTIBUS (1 << 23)
+#define CP0_ERRCTL_MC_TLB (1 << 15)
+#define CP0_ERRCTL_MC_TIMEOUT (1 << 14)
+
+#define CP0_CERRI_TAG_PARITY (1 << 29)
+#define CP0_CERRI_DATA_PARITY (1 << 28)
+#define CP0_CERRI_EXTERNAL (1 << 26)
+
+#define CP0_CERRI_IDX_VALID(c) (!((c) & CP0_CERRI_EXTERNAL))
+#define CP0_CERRI_DATA (CP0_CERRI_DATA_PARITY)
+
+#define CP0_CERRD_MULTIPLE (1 << 31)
+#define CP0_CERRD_TAG_STATE (1 << 30)
+#define CP0_CERRD_TAG_ADDRESS (1 << 29)
+#define CP0_CERRD_DATA_SBE (1 << 28)
+#define CP0_CERRD_DATA_DBE (1 << 27)
+#define CP0_CERRD_EXTERNAL (1 << 26)
+#define CP0_CERRD_LOAD (1 << 25)
+#define CP0_CERRD_STORE (1 << 24)
+#define CP0_CERRD_FILLWB (1 << 23)
+#define CP0_CERRD_COHERENCY (1 << 22)
+#define CP0_CERRD_DUPTAG (1 << 21)
+
+#define CP0_CERRD_DPA_VALID(c) (!((c) & CP0_CERRD_EXTERNAL))
+#define CP0_CERRD_IDX_VALID(c) \
+ (((c) & (CP0_CERRD_LOAD | CP0_CERRD_STORE)) ? (!((c) & CP0_CERRD_EXTERNAL)) : 0)
+#define CP0_CERRD_CAUSES \
+ (CP0_CERRD_LOAD | CP0_CERRD_STORE | CP0_CERRD_FILLWB | CP0_CERRD_COHERENCY | CP0_CERRD_DUPTAG)
+#define CP0_CERRD_TYPES \
+ (CP0_CERRD_TAG_STATE | CP0_CERRD_TAG_ADDRESS | CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE | CP0_CERRD_EXTERNAL)
+#define CP0_CERRD_DATA (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE)
+
+static uint32_t extract_ic(unsigned short addr, int data);
+static uint32_t extract_dc(unsigned short addr, int data);
+
+static inline void breakout_errctl(unsigned int val)
+{
+ if (val & CP0_ERRCTL_RECOVERABLE)
+ printk(" recoverable");
+ if (val & CP0_ERRCTL_DCACHE)
+ printk(" dcache");
+ if (val & CP0_ERRCTL_ICACHE)
+ printk(" icache");
+ if (val & CP0_ERRCTL_MULTIBUS)
+ printk(" multiple-buserr");
+ printk("\n");
+}
+
+static inline void breakout_cerri(unsigned int val)
+{
+ if (val & CP0_CERRI_TAG_PARITY)
+ printk(" tag-parity");
+ if (val & CP0_CERRI_DATA_PARITY)
+ printk(" data-parity");
+ if (val & CP0_CERRI_EXTERNAL)
+ printk(" external");
+ printk("\n");
+}
+
+static inline void breakout_cerrd(unsigned int val)
+{
+ switch (val & CP0_CERRD_CAUSES) {
+ case CP0_CERRD_LOAD:
+ printk(" load,");
+ break;
+ case CP0_CERRD_STORE:
+ printk(" store,");
+ break;
+ case CP0_CERRD_FILLWB:
+ printk(" fill/wb,");
+ break;
+ case CP0_CERRD_COHERENCY:
+ printk(" coherency,");
+ break;
+ case CP0_CERRD_DUPTAG:
+ printk(" duptags,");
+ break;
+ default:
+ printk(" NO CAUSE,");
+ break;
+ }
+ if (!(val & CP0_CERRD_TYPES))
+ printk(" NO TYPE");
+ else {
+ if (val & CP0_CERRD_MULTIPLE)
+ printk(" multi-err");
+ if (val & CP0_CERRD_TAG_STATE)
+ printk(" tag-state");
+ if (val & CP0_CERRD_TAG_ADDRESS)
+ printk(" tag-address");
+ if (val & CP0_CERRD_DATA_SBE)
+ printk(" data-SBE");
+ if (val & CP0_CERRD_DATA_DBE)
+ printk(" data-DBE");
+ if (val & CP0_CERRD_EXTERNAL)
+ printk(" external");
+ }
+ printk("\n");
+}
+
+#ifndef CONFIG_SIBYTE_BUS_WATCHER
+
+static void check_bus_watcher(void)
+{
+ uint32_t status, l2_err, memio_err;
+#ifdef DUMP_L2_ECC_TAG_ON_ERROR
+ uint64_t l2_tag;
+#endif
+
+ /* Destructive read, clears register and interrupt */
+ status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS));
+ /* Bit 31 is always on, but there's no #define for that */
+ if (status & ~(1UL << 31)) {
+ l2_err = csr_in32(IOADDR(A_BUS_L2_ERRORS));
+#ifdef DUMP_L2_ECC_TAG_ON_ERROR
+ l2_tag = in64(IOADDR(A_L2_ECC_TAG));
+#endif
+ memio_err = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS));
+ printk("Bus watcher error counters: %08x %08x\n", l2_err, memio_err);
+ printk("\nLast recorded signature:\n");
+ printk("Request %02x from %d, answered by %d with Dcode %d\n",
+ (unsigned int)(G_SCD_BERR_TID(status) & 0x3f),
+ (int)(G_SCD_BERR_TID(status) >> 6),
+ (int)G_SCD_BERR_RID(status),
+ (int)G_SCD_BERR_DCODE(status));
+#ifdef DUMP_L2_ECC_TAG_ON_ERROR
+ printk("Last L2 tag w/ bad ECC: %016llx\n", l2_tag);
+#endif
+ } else {
+ printk("Bus watcher indicates no error\n");
+ }
+}
+#else
+extern void check_bus_watcher(void);
+#endif
+
+asmlinkage void sb1_cache_error(void)
+{
+ uint32_t errctl, cerr_i, cerr_d, dpalo, dpahi, eepc, res;
+ unsigned long long cerr_dpa;
+
+#ifdef CONFIG_SIBYTE_BW_TRACE
+ /* Freeze the trace buffer now */
+ csr_out32(M_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG));
+ printk("Trace buffer frozen\n");
+#endif
+
+ printk("Cache error exception on CPU %x:\n",
+ (read_c0_prid() >> 25) & 0x7);
+
+ __asm__ __volatile__ (
+ " .set push\n\t"
+ " .set mips64\n\t"
+ " .set noat\n\t"
+ " mfc0 %0, $26\n\t"
+ " mfc0 %1, $27\n\t"
+ " mfc0 %2, $27, 1\n\t"
+ " dmfc0 $1, $27, 3\n\t"
+ " dsrl32 %3, $1, 0 \n\t"
+ " sll %4, $1, 0 \n\t"
+ " mfc0 %5, $30\n\t"
+ " .set pop"
+ : "=r" (errctl), "=r" (cerr_i), "=r" (cerr_d),
+ "=r" (dpahi), "=r" (dpalo), "=r" (eepc));
+
+ cerr_dpa = (((uint64_t)dpahi) << 32) | dpalo;
+ printk(" c0_errorepc == %08x\n", eepc);
+ printk(" c0_errctl == %08x", errctl);
+ breakout_errctl(errctl);
+ if (errctl & CP0_ERRCTL_ICACHE) {
+ printk(" c0_cerr_i == %08x", cerr_i);
+ breakout_cerri(cerr_i);
+ if (CP0_CERRI_IDX_VALID(cerr_i)) {
+ /* Check index of EPC, allowing for delay slot */
+ if (((eepc & SB1_CACHE_INDEX_MASK) != (cerr_i & SB1_CACHE_INDEX_MASK)) &&
+ ((eepc & SB1_CACHE_INDEX_MASK) != ((cerr_i & SB1_CACHE_INDEX_MASK) - 4)))
+ printk(" cerr_i idx doesn't match eepc\n");
+ else {
+ res = extract_ic(cerr_i & SB1_CACHE_INDEX_MASK,
+ (cerr_i & CP0_CERRI_DATA) != 0);
+ if (!(res & cerr_i))
+ printk("...didn't see indicated icache problem\n");
+ }
+ }
+ }
+ if (errctl & CP0_ERRCTL_DCACHE) {
+ printk(" c0_cerr_d == %08x", cerr_d);
+ breakout_cerrd(cerr_d);
+ if (CP0_CERRD_DPA_VALID(cerr_d)) {
+ printk(" c0_cerr_dpa == %010llx\n", cerr_dpa);
+ if (!CP0_CERRD_IDX_VALID(cerr_d)) {
+ res = extract_dc(cerr_dpa & SB1_CACHE_INDEX_MASK,
+ (cerr_d & CP0_CERRD_DATA) != 0);
+ if (!(res & cerr_d))
+ printk("...didn't see indicated dcache problem\n");
+ } else {
+ if ((cerr_dpa & SB1_CACHE_INDEX_MASK) != (cerr_d & SB1_CACHE_INDEX_MASK))
+ printk(" cerr_d idx doesn't match cerr_dpa\n");
+ else {
+ res = extract_dc(cerr_d & SB1_CACHE_INDEX_MASK,
+ (cerr_d & CP0_CERRD_DATA) != 0);
+ if (!(res & cerr_d))
+ printk("...didn't see indicated problem\n");
+ }
+ }
+ }
+ }
+
+ check_bus_watcher();
+
+ /*
+ * Calling panic() when a fatal cache error occurs scrambles the
+ * state of the system (and the cache), making it difficult to
+ * investigate after the fact. However, if you just stall the CPU,
+ * the other CPU may keep on running, which is typically very
+ * undesirable.
+ */
+#ifdef CONFIG_SB1_CERR_STALL
+ while (1)
+ ;
+#else
+ panic("unhandled cache error");
+#endif
+}
+
+
+/* Parity lookup table. */
+static const uint8_t parity[256] = {
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0
+};
+
+/* Masks to select bits for Hamming parity, mask_72_64[i] for bit[i] */
+static const uint64_t mask_72_64[8] = {
+ 0x0738C808099264FFULL,
+ 0x38C808099264FF07ULL,
+ 0xC808099264FF0738ULL,
+ 0x08099264FF0738C8ULL,
+ 0x099264FF0738C808ULL,
+ 0x9264FF0738C80809ULL,
+ 0x64FF0738C8080992ULL,
+ 0xFF0738C808099264ULL
+};
+
+/* Calculate the parity on a range of bits */
+static char range_parity(uint64_t dword, int max, int min)
+{
+ char parity = 0;
+ int i;
+ dword >>= min;
+ for (i=max-min; i>=0; i--) {
+ if (dword & 0x1)
+ parity = !parity;
+ dword >>= 1;
+ }
+ return parity;
+}
+
+/* Calculate the 4-bit even byte-parity for an instruction */
+static unsigned char inst_parity(uint32_t word)
+{
+ int i, j;
+ char parity = 0;
+ for (j=0; j<4; j++) {
+ char byte_parity = 0;
+ for (i=0; i<8; i++) {
+ if (word & 0x80000000)
+ byte_parity = !byte_parity;
+ word <<= 1;
+ }
+ parity <<= 1;
+ parity |= byte_parity;
+ }
+ return parity;
+}
+
+static uint32_t extract_ic(unsigned short addr, int data)
+{
+ unsigned short way;
+ int valid;
+ uint32_t taghi, taglolo, taglohi;
+ unsigned long long taglo, va;
+ uint64_t tlo_tmp;
+ uint8_t lru;
+ int res = 0;
+
+ printk("Icache index 0x%04x ", addr);
+ for (way = 0; way < 4; way++) {
+ /* Index-load-tag-I */
+ __asm__ __volatile__ (
+ " .set push \n\t"
+ " .set noreorder \n\t"
+ " .set mips64 \n\t"
+ " .set noat \n\t"
+ " cache 4, 0(%3) \n\t"
+ " mfc0 %0, $29 \n\t"
+ " dmfc0 $1, $28 \n\t"
+ " dsrl32 %1, $1, 0 \n\t"
+ " sll %2, $1, 0 \n\t"
+ " .set pop"
+ : "=r" (taghi), "=r" (taglohi), "=r" (taglolo)
+ : "r" ((way << 13) | addr));
+
+ taglo = ((unsigned long long)taglohi << 32) | taglolo;
+ if (way == 0) {
+ lru = (taghi >> 14) & 0xff;
+ printk("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n",
+ ((addr >> 5) & 0x3), /* bank */
+ ((addr >> 7) & 0x3f), /* index */
+ (lru & 0x3),
+ ((lru >> 2) & 0x3),
+ ((lru >> 4) & 0x3),
+ ((lru >> 6) & 0x3));
+ }
+ va = (taglo & 0xC0000FFFFFFFE000ULL) | addr;
+ if ((taglo & (1 << 31)) && (((taglo >> 62) & 0x3) == 3))
+ va |= 0x3FFFF00000000000ULL;
+ valid = ((taghi >> 29) & 1);
+ if (valid) {
+ tlo_tmp = taglo & 0xfff3ff;
+ if (((taglo >> 10) & 1) ^ range_parity(tlo_tmp, 23, 0)) {
+ printk(" ** bad parity in VTag0/G/ASID\n");
+ res |= CP0_CERRI_TAG_PARITY;
+ }
+ if (((taglo >> 11) & 1) ^ range_parity(taglo, 63, 24)) {
+ printk(" ** bad parity in R/VTag1\n");
+ res |= CP0_CERRI_TAG_PARITY;
+ }
+ }
+ if (valid ^ ((taghi >> 27) & 1)) {
+ printk(" ** bad parity for valid bit\n");
+ res |= CP0_CERRI_TAG_PARITY;
+ }
+ printk(" %d [VA %016llx] [Vld? %d] raw tags: %08X-%016llX\n",
+ way, va, valid, taghi, taglo);
+
+ if (data) {
+ uint32_t datahi, insta, instb;
+ uint8_t predecode;
+ int offset;
+
+ /* (hit all banks and ways) */
+ for (offset = 0; offset < 4; offset++) {
+ /* Index-load-data-I */
+ __asm__ __volatile__ (
+ " .set push\n\t"
+ " .set noreorder\n\t"
+ " .set mips64\n\t"
+ " .set noat\n\t"
+ " cache 6, 0(%3) \n\t"
+ " mfc0 %0, $29, 1\n\t"
+ " dmfc0 $1, $28, 1\n\t"
+ " dsrl32 %1, $1, 0 \n\t"
+ " sll %2, $1, 0 \n\t"
+ " .set pop \n"
+ : "=r" (datahi), "=r" (insta), "=r" (instb)
+ : "r" ((way << 13) | addr | (offset << 3)));
+ predecode = (datahi >> 8) & 0xff;
+ if (((datahi >> 16) & 1) != (uint32_t)range_parity(predecode, 7, 0)) {
+ printk(" ** bad parity in predecode\n");
+ res |= CP0_CERRI_DATA_PARITY;
+ }
+ /* XXXKW should/could check predecode bits themselves */
+ if (((datahi >> 4) & 0xf) ^ inst_parity(insta)) {
+ printk(" ** bad parity in instruction a\n");
+ res |= CP0_CERRI_DATA_PARITY;
+ }
+ if ((datahi & 0xf) ^ inst_parity(instb)) {
+ printk(" ** bad parity in instruction b\n");
+ res |= CP0_CERRI_DATA_PARITY;
+ }
+ printk(" %05X-%08X%08X", datahi, insta, instb);
+ }
+ printk("\n");
+ }
+ }
+ return res;
+}
+
+/* Compute the ECC for a data doubleword */
+static uint8_t dc_ecc(uint64_t dword)
+{
+ uint64_t t;
+ uint32_t w;
+ uint8_t p;
+ int i;
+
+ p = 0;
+ for (i = 7; i >= 0; i--)
+ {
+ p <<= 1;
+ t = dword & mask_72_64[i];
+ w = (uint32_t)(t >> 32);
+ p ^= (parity[w>>24] ^ parity[(w>>16) & 0xFF]
+ ^ parity[(w>>8) & 0xFF] ^ parity[w & 0xFF]);
+ w = (uint32_t)(t & 0xFFFFFFFF);
+ p ^= (parity[w>>24] ^ parity[(w>>16) & 0xFF]
+ ^ parity[(w>>8) & 0xFF] ^ parity[w & 0xFF]);
+ }
+ return p;
+}
+
+struct dc_state {
+ unsigned char val;
+ char *name;
+};
+
+static struct dc_state dc_states[] = {
+ { 0x00, "INVALID" },
+ { 0x0f, "COH-SHD" },
+ { 0x13, "NCO-E-C" },
+ { 0x19, "NCO-E-D" },
+ { 0x16, "COH-E-C" },
+ { 0x1c, "COH-E-D" },
+ { 0xff, "*ERROR*" }
+};
+
+#define DC_TAG_VALID(state) \
+ (((state) == 0x0) || ((state) == 0xf) || ((state) == 0x13) || \
+ ((state) == 0x19) || ((state) == 0x16) || ((state) == 0x1c))
+
+static char *dc_state_str(unsigned char state)
+{
+ struct dc_state *dsc = dc_states;
+ while (dsc->val != 0xff) {
+ if (dsc->val == state)
+ break;
+ dsc++;
+ }
+ return dsc->name;
+}
+
+static uint32_t extract_dc(unsigned short addr, int data)
+{
+ int valid, way;
+ unsigned char state;
+ uint32_t taghi, taglolo, taglohi;
+ unsigned long long taglo, pa;
+ uint8_t ecc, lru;
+ int res = 0;
+
+ printk("Dcache index 0x%04x ", addr);
+ for (way = 0; way < 4; way++) {
+ __asm__ __volatile__ (
+ " .set push\n\t"
+ " .set noreorder\n\t"
+ " .set mips64\n\t"
+ " .set noat\n\t"
+ " cache 5, 0(%3)\n\t" /* Index-load-tag-D */
+ " mfc0 %0, $29, 2\n\t"
+ " dmfc0 $1, $28, 2\n\t"
+ " dsrl32 %1, $1, 0\n\t"
+ " sll %2, $1, 0\n\t"
+ " .set pop"
+ : "=r" (taghi), "=r" (taglohi), "=r" (taglolo)
+ : "r" ((way << 13) | addr));
+
+ taglo = ((unsigned long long)taglohi << 32) | taglolo;
+ pa = (taglo & 0xFFFFFFE000ULL) | addr;
+ if (way == 0) {
+ lru = (taghi >> 14) & 0xff;
+ printk("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n",
+ ((addr >> 11) & 0x2) | ((addr >> 5) & 1), /* bank */
+ ((addr >> 6) & 0x3f), /* index */
+ (lru & 0x3),
+ ((lru >> 2) & 0x3),
+ ((lru >> 4) & 0x3),
+ ((lru >> 6) & 0x3));
+ }
+ state = (taghi >> 25) & 0x1f;
+ valid = DC_TAG_VALID(state);
+ printk(" %d [PA %010llx] [state %s (%02x)] raw tags: %08X-%016llX\n",
+ way, pa, dc_state_str(state), state, taghi, taglo);
+ if (valid) {
+ if (((taglo >> 11) & 1) ^ range_parity(taglo, 39, 26)) {
+ printk(" ** bad parity in PTag1\n");
+ res |= CP0_CERRD_TAG_ADDRESS;
+ }
+ if (((taglo >> 10) & 1) ^ range_parity(taglo, 25, 13)) {
+ printk(" ** bad parity in PTag0\n");
+ res |= CP0_CERRD_TAG_ADDRESS;
+ }
+ } else {
+ res |= CP0_CERRD_TAG_STATE;
+ }
+
+ if (data) {
+ uint32_t datalohi, datalolo, datahi;
+ unsigned long long datalo;
+ int offset;
+ char bad_ecc = 0;
+
+ for (offset = 0; offset < 4; offset++) {
+ /* Index-load-data-D */
+ __asm__ __volatile__ (
+ " .set push\n\t"
+ " .set noreorder\n\t"
+ " .set mips64\n\t"
+ " .set noat\n\t"
+ " cache 7, 0(%3)\n\t" /* Index-load-data-D */
+ " mfc0 %0, $29, 3\n\t"
+ " dmfc0 $1, $28, 3\n\t"
+ " dsrl32 %1, $1, 0 \n\t"
+ " sll %2, $1, 0 \n\t"
+ " .set pop"
+ : "=r" (datahi), "=r" (datalohi), "=r" (datalolo)
+ : "r" ((way << 13) | addr | (offset << 3)));
+ datalo = ((unsigned long long)datalohi << 32) | datalolo;
+ ecc = dc_ecc(datalo);
+ if (ecc != datahi) {
+ int bits;
+ bad_ecc |= 1 << (3-offset);
+ ecc ^= datahi;
+ bits = hweight8(ecc);
+ res |= (bits == 1) ? CP0_CERRD_DATA_SBE : CP0_CERRD_DATA_DBE;
+ }
+ printk(" %02X-%016llX", datahi, datalo);
+ }
+ printk("\n");
+ if (bad_ecc)
+ printk(" dwords w/ bad ECC: %d %d %d %d\n",
+ !!(bad_ecc & 8), !!(bad_ecc & 4),
+ !!(bad_ecc & 2), !!(bad_ecc & 1));
+ }
+ }
+ return res;
+}
diff --git a/arch/mips/mm/cex-gen.S b/arch/mips/mm/cex-gen.S
new file mode 100644
index 000000000..45dff5cd4
--- /dev/null
+++ b/arch/mips/mm/cex-gen.S
@@ -0,0 +1,42 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 - 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ *
+ * Cache error handler
+ */
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+
+/*
+ * Game over. Go to the button. Press gently. Swear where allowed by
+ * legislation.
+ */
+ LEAF(except_vec2_generic)
+ .set noreorder
+ .set noat
+ .set mips0
+ /*
+ * This is a very bad place to be. Our cache error
+ * detection has triggered. If we have write-back data
+ * in the cache, we may not be able to recover. As a
+ * first-order desperate measure, turn off KSEG0 cacheing.
+ */
+ mfc0 k0,CP0_CONFIG
+ li k1,~CONF_CM_CMASK
+ and k0,k0,k1
+ ori k0,k0,CONF_CM_UNCACHED
+ mtc0 k0,CP0_CONFIG
+ /* Give it a few cycles to sink in... */
+ nop
+ nop
+ nop
+
+ j cache_parity_error
+ nop
+ END(except_vec2_generic)
diff --git a/arch/mips/mm/cex-oct.S b/arch/mips/mm/cex-oct.S
new file mode 100644
index 000000000..9029092aa
--- /dev/null
+++ b/arch/mips/mm/cex-oct.S
@@ -0,0 +1,70 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Cavium Networks
+ * Cache error handler
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+
+/*
+ * Handle cache error. Indicate to the second level handler whether
+ * the exception is recoverable.
+ */
+ LEAF(except_vec2_octeon)
+
+ .set push
+ .set mips64r2
+ .set noreorder
+ .set noat
+
+
+ /* due to an errata we need to read the COP0 CacheErr (Dcache)
+ * before any cache/DRAM access */
+
+ rdhwr k0, $0 /* get core_id */
+ PTR_LA k1, cache_err_dcache
+ sll k0, k0, 3
+ PTR_ADDU k1, k0, k1 /* k1 = &cache_err_dcache[core_id] */
+
+ dmfc0 k0, CP0_CACHEERR, 1
+ sd k0, (k1)
+ dmtc0 $0, CP0_CACHEERR, 1
+
+ /* check whether this is a nested exception */
+ mfc0 k1, CP0_STATUS
+ andi k1, k1, ST0_EXL
+ beqz k1, 1f
+ nop
+ j cache_parity_error_octeon_non_recoverable
+ nop
+
+ /* exception is recoverable */
+1: j handle_cache_err
+ nop
+
+ .set pop
+ END(except_vec2_octeon)
+
+ /* We need to jump to handle_cache_err so that the previous handler
+ * can fit within 0x80 bytes. We also move from 0xFFFFFFFFAXXXXXXX
+ * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */
+ LEAF(handle_cache_err)
+ .set push
+ .set noreorder
+ .set noat
+
+ SAVE_ALL
+ KMODE
+ jal cache_parity_error_octeon_recoverable
+ nop
+ j ret_from_exception
+ nop
+
+ .set pop
+ END(handle_cache_err)
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
new file mode 100644
index 000000000..85c6e6a40
--- /dev/null
+++ b/arch/mips/mm/cex-sb1.S
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2001,2002,2003 Broadcom Corporation
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/cacheops.h>
+#include <asm/sibyte/board.h>
+
+#define C0_ERRCTL $26 /* CP0: Error info */
+#define C0_CERR_I $27 /* CP0: Icache error */
+#define C0_CERR_D $27,1 /* CP0: Dcache error */
+
+ /*
+ * Based on SiByte sample software cache-err/cerr.S
+ * CVS revision 1.8. Only the 'unrecoverable' case
+ * is changed.
+ */
+
+ .set mips64
+ .set noreorder
+ .set noat
+
+ /*
+ * sb1_cerr_vec: code to be copied to the Cache Error
+ * Exception vector. The code must be pushed out to memory
+ * (either by copying to Kseg0 and Kseg1 both, or by flushing
+ * the L1 and L2) since it is fetched as 0xa0000100.
+ *
+ * NOTE: Be sure this handler is at most 28 instructions long
+ * since the final 16 bytes of the exception vector memory
+ * (0x170-0x17f) are used to preserve k0, k1, and ra.
+ */
+
+LEAF(except_vec2_sb1)
+ /*
+ * If this error is recoverable, we need to exit the handler
+ * without having dirtied any registers. To do this,
+ * save/restore k0 and k1 from low memory (Useg is direct
+ * mapped while ERL=1). Note that we can't save to a
+ * CPU-specific location without ruining a register in the
+ * process. This means we are vulnerable to data corruption
+ * whenever the handler is reentered by a second CPU.
+ */
+ sd k0,0x170($0)
+ sd k1,0x178($0)
+
+#ifdef CONFIG_SB1_CEX_ALWAYS_FATAL
+ j handle_vec2_sb1
+ nop
+#else
+ /*
+ * M_ERRCTL_RECOVERABLE is bit 31, which makes it easy to tell
+ * if we can fast-path out of here for a h/w-recovered error.
+ */
+ mfc0 k1,C0_ERRCTL
+ bgtz k1,attempt_recovery
+ sll k0,k1,1
+
+recovered_dcache:
+ /*
+ * Unlock CacheErr-D (which in turn unlocks CacheErr-DPA).
+ * Ought to log the occurrence of this recovered dcache error.
+ */
+ b recovered
+ mtc0 $0,C0_CERR_D
+
+attempt_recovery:
+ /*
+ * k0 has C0_ERRCTL << 1, which puts 'DC' at bit 31. Any
+ * Dcache errors we can recover from will take more extensive
+ * processing. For now, they are considered "unrecoverable".
+ * Note that 'DC' becoming set (outside of ERL mode) will
+ * cause 'IC' to clear; so if there's an Icache error, we'll
+ * only find out about it if we recover from this error and
+ * continue executing.
+ */
+ bltz k0,unrecoverable
+ sll k0,1
+
+ /*
+ * k0 has C0_ERRCTL << 2, which puts 'IC' at bit 31. If an
+ * Icache error isn't indicated, I'm not sure why we got here.
+ * Consider that case "unrecoverable" for now.
+ */
+ bgez k0,unrecoverable
+
+attempt_icache_recovery:
+ /*
+ * External icache errors are due to uncorrectable ECC errors
+ * in the L2 cache or Memory Controller and cannot be
+ * recovered here.
+ */
+ mfc0 k0,C0_CERR_I /* delay slot */
+ li k1,1 << 26 /* ICACHE_EXTERNAL */
+ and k1,k0
+ bnez k1,unrecoverable
+ andi k0,0x1fe0
+
+ /*
+ * Since the error is internal, the 'IDX' field from
+ * CacheErr-I is valid and we can just invalidate all blocks
+ * in that set.
+ */
+ cache Index_Invalidate_I,(0<<13)(k0)
+ cache Index_Invalidate_I,(1<<13)(k0)
+ cache Index_Invalidate_I,(2<<13)(k0)
+ cache Index_Invalidate_I,(3<<13)(k0)
+
+ /* Ought to log this recovered icache error */
+
+recovered:
+ /* Restore the saved registers */
+ ld k0,0x170($0)
+ ld k1,0x178($0)
+ eret
+
+unrecoverable:
+ /* Unrecoverable Icache or Dcache error; log it and/or fail */
+ j handle_vec2_sb1
+ nop
+#endif
+
+END(except_vec2_sb1)
+
+ LEAF(handle_vec2_sb1)
+ mfc0 k0,CP0_CONFIG
+ li k1,~CONF_CM_CMASK
+ and k0,k0,k1
+ ori k0,k0,CONF_CM_UNCACHED
+ mtc0 k0,CP0_CONFIG
+
+ SSNOP
+ SSNOP
+ SSNOP
+ SSNOP
+ bnezl $0, 1f
+1:
+ mfc0 k0, CP0_STATUS
+ sll k0, k0, 3 # check CU0 (kernel?)
+ bltz k0, 2f
+ nop
+
+ /* Get a valid Kseg0 stack pointer. Any task's stack pointer
+ * will do, although if we ever want to resume execution we
+ * better not have corrupted any state. */
+ get_saved_sp
+ move sp, k1
+
+2:
+ j sb1_cache_error
+ nop
+
+ END(handle_vec2_sb1)
diff --git a/arch/mips/mm/context.c b/arch/mips/mm/context.c
new file mode 100644
index 000000000..b25564090
--- /dev/null
+++ b/arch/mips/mm/context.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/atomic.h>
+#include <linux/mmu_context.h>
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
+
+static DEFINE_RAW_SPINLOCK(cpu_mmid_lock);
+
+static atomic64_t mmid_version;
+static unsigned int num_mmids;
+static unsigned long *mmid_map;
+
+static DEFINE_PER_CPU(u64, reserved_mmids);
+static cpumask_t tlb_flush_pending;
+
+static bool asid_versions_eq(int cpu, u64 a, u64 b)
+{
+ return ((a ^ b) & asid_version_mask(cpu)) == 0;
+}
+
+void get_new_mmu_context(struct mm_struct *mm)
+{
+ unsigned int cpu;
+ u64 asid;
+
+ /*
+ * This function is specific to ASIDs, and should not be called when
+ * MMIDs are in use.
+ */
+ if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
+ return;
+
+ cpu = smp_processor_id();
+ asid = asid_cache(cpu);
+
+ if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
+ if (cpu_has_vtag_icache)
+ flush_icache_all();
+ local_flush_tlb_all(); /* start new asid cycle */
+ }
+
+ set_cpu_context(cpu, mm, asid);
+ asid_cache(cpu) = asid;
+}
+EXPORT_SYMBOL_GPL(get_new_mmu_context);
+
+void check_mmu_context(struct mm_struct *mm)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /*
+ * This function is specific to ASIDs, and should not be called when
+ * MMIDs are in use.
+ */
+ if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
+ return;
+
+ /* Check if our ASID is of an older version and thus invalid */
+ if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu)))
+ get_new_mmu_context(mm);
+}
+EXPORT_SYMBOL_GPL(check_mmu_context);
+
+static void flush_context(void)
+{
+ u64 mmid;
+ int cpu;
+
+ /* Update the list of reserved MMIDs and the MMID bitmap */
+ bitmap_clear(mmid_map, 0, num_mmids);
+
+ /* Reserve an MMID for kmap/wired entries */
+ __set_bit(MMID_KERNEL_WIRED, mmid_map);
+
+ for_each_possible_cpu(cpu) {
+ mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0);
+
+ /*
+ * If this CPU has already been through a
+ * rollover, but hasn't run another task in
+ * the meantime, we must preserve its reserved
+ * MMID, as this is the only trace we have of
+ * the process it is still running.
+ */
+ if (mmid == 0)
+ mmid = per_cpu(reserved_mmids, cpu);
+
+ __set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map);
+ per_cpu(reserved_mmids, cpu) = mmid;
+ }
+
+ /*
+ * Queue a TLB invalidation for each CPU to perform on next
+ * context-switch
+ */
+ cpumask_setall(&tlb_flush_pending);
+}
+
+static bool check_update_reserved_mmid(u64 mmid, u64 newmmid)
+{
+ bool hit;
+ int cpu;
+
+ /*
+ * Iterate over the set of reserved MMIDs looking for a match.
+ * If we find one, then we can update our mm to use newmmid
+ * (i.e. the same MMID in the current generation) but we can't
+ * exit the loop early, since we need to ensure that all copies
+ * of the old MMID are updated to reflect the mm. Failure to do
+ * so could result in us missing the reserved MMID in a future
+ * generation.
+ */
+ hit = false;
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(reserved_mmids, cpu) == mmid) {
+ hit = true;
+ per_cpu(reserved_mmids, cpu) = newmmid;
+ }
+ }
+
+ return hit;
+}
+
+static u64 get_new_mmid(struct mm_struct *mm)
+{
+ static u32 cur_idx = MMID_KERNEL_WIRED + 1;
+ u64 mmid, version, mmid_mask;
+
+ mmid = cpu_context(0, mm);
+ version = atomic64_read(&mmid_version);
+ mmid_mask = cpu_asid_mask(&boot_cpu_data);
+
+ if (!asid_versions_eq(0, mmid, 0)) {
+ u64 newmmid = version | (mmid & mmid_mask);
+
+ /*
+ * If our current MMID was active during a rollover, we
+ * can continue to use it and this was just a false alarm.
+ */
+ if (check_update_reserved_mmid(mmid, newmmid)) {
+ mmid = newmmid;
+ goto set_context;
+ }
+
+ /*
+ * We had a valid MMID in a previous life, so try to re-use
+ * it if possible.
+ */
+ if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) {
+ mmid = newmmid;
+ goto set_context;
+ }
+ }
+
+ /* Allocate a free MMID */
+ mmid = find_next_zero_bit(mmid_map, num_mmids, cur_idx);
+ if (mmid != num_mmids)
+ goto reserve_mmid;
+
+ /* We're out of MMIDs, so increment the global version */
+ version = atomic64_add_return_relaxed(asid_first_version(0),
+ &mmid_version);
+
+ /* Note currently active MMIDs & mark TLBs as requiring flushes */
+ flush_context();
+
+ /* We have more MMIDs than CPUs, so this will always succeed */
+ mmid = find_first_zero_bit(mmid_map, num_mmids);
+
+reserve_mmid:
+ __set_bit(mmid, mmid_map);
+ cur_idx = mmid;
+ mmid |= version;
+set_context:
+ set_cpu_context(0, mm, mmid);
+ return mmid;
+}
+
+void check_switch_mmu_context(struct mm_struct *mm)
+{
+ unsigned int cpu = smp_processor_id();
+ u64 ctx, old_active_mmid;
+ unsigned long flags;
+
+ if (!cpu_has_mmid) {
+ check_mmu_context(mm);
+ write_c0_entryhi(cpu_asid(cpu, mm));
+ goto setup_pgd;
+ }
+
+ /*
+ * MMID switch fast-path, to avoid acquiring cpu_mmid_lock when it's
+ * unnecessary.
+ *
+ * The memory ordering here is subtle. If our active_mmids is non-zero
+ * and the MMID matches the current version, then we update the CPU's
+ * asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover
+ * means that either:
+ *
+ * - We get a zero back from the cmpxchg and end up waiting on
+ * cpu_mmid_lock in check_mmu_context(). Taking the lock synchronises
+ * with the rollover and so we are forced to see the updated
+ * generation.
+ *
+ * - We get a valid MMID back from the cmpxchg, which means the
+ * relaxed xchg in flush_context will treat us as reserved
+ * because atomic RmWs are totally ordered for a given location.
+ */
+ ctx = cpu_context(cpu, mm);
+ old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache);
+ if (!old_active_mmid ||
+ !asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) ||
+ !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) {
+ raw_spin_lock_irqsave(&cpu_mmid_lock, flags);
+
+ ctx = cpu_context(cpu, mm);
+ if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)))
+ ctx = get_new_mmid(mm);
+
+ WRITE_ONCE(cpu_data[cpu].asid_cache, ctx);
+ raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags);
+ }
+
+ /*
+ * Invalidate the local TLB if needed. Note that we must only clear our
+ * bit in tlb_flush_pending after this is complete, so that the
+ * cpu_has_shared_ftlb_entries case below isn't misled.
+ */
+ if (cpumask_test_cpu(cpu, &tlb_flush_pending)) {
+ if (cpu_has_vtag_icache)
+ flush_icache_all();
+ local_flush_tlb_all();
+ cpumask_clear_cpu(cpu, &tlb_flush_pending);
+ }
+
+ write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data));
+
+ /*
+ * If this CPU shares FTLB entries with its siblings and one or more of
+ * those siblings hasn't yet invalidated its TLB following a version
+ * increase then we need to invalidate any TLB entries for our MMID
+ * that we might otherwise pick up from a sibling.
+ *
+ * We ifdef on CONFIG_SMP because cpu_sibling_map isn't defined in
+ * CONFIG_SMP=n kernels.
+ */
+#ifdef CONFIG_SMP
+ if (cpu_has_shared_ftlb_entries &&
+ cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) {
+ /* Ensure we operate on the new MMID */
+ mtc0_tlbw_hazard();
+
+ /*
+ * Invalidate all TLB entries associated with the new
+ * MMID, and wait for the invalidation to complete.
+ */
+ ginvt_mmid();
+ sync_ginv();
+ }
+#endif
+
+setup_pgd:
+ TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
+}
+EXPORT_SYMBOL_GPL(check_switch_mmu_context);
+
+static int mmid_init(void)
+{
+ if (!cpu_has_mmid)
+ return 0;
+
+ /*
+ * Expect allocation after rollover to fail if we don't have at least
+ * one more MMID than CPUs.
+ */
+ num_mmids = asid_first_version(0);
+ WARN_ON(num_mmids <= num_possible_cpus());
+
+ atomic64_set(&mmid_version, asid_first_version(0));
+ mmid_map = kcalloc(BITS_TO_LONGS(num_mmids), sizeof(*mmid_map),
+ GFP_KERNEL);
+ if (!mmid_map)
+ panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids);
+
+ /* Reserve an MMID for kmap/wired entries */
+ __set_bit(MMID_KERNEL_WIRED, mmid_map);
+
+ pr_info("MMID allocator initialised with %u entries\n", num_mmids);
+ return 0;
+}
+early_initcall(mmid_init);
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
new file mode 100644
index 000000000..38d3d9143
--- /dev/null
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
+ * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
+ * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
+ */
+#include <linux/dma-direct.h>
+#include <linux/dma-map-ops.h>
+#include <linux/highmem.h>
+
+#include <asm/cache.h>
+#include <asm/cpu-type.h>
+#include <asm/dma-coherence.h>
+#include <asm/io.h>
+
+/*
+ * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
+ * fill random cachelines with stale data at any time, requiring an extra
+ * flush post-DMA.
+ *
+ * Warning on the terminology - Linux calls an uncached area coherent; MIPS
+ * terminology calls memory areas with hardware maintained coherency coherent.
+ *
+ * Note that the R14000 and R16000 should also be checked for in this condition.
+ * However this function is only called on non-I/O-coherent systems and only the
+ * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
+ * SGI IP32 aka O2.
+ */
+static inline bool cpu_needs_post_dma_flush(void)
+{
+ switch (boot_cpu_type()) {
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_BMIPS5000:
+ case CPU_LOONGSON2EF:
+ return true;
+ default:
+ /*
+ * Presence of MAARs suggests that the CPU supports
+ * speculatively prefetching data, and therefore requires
+ * the post-DMA flush/invalidate.
+ */
+ return cpu_has_maar;
+ }
+}
+
+void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+ dma_cache_wback_inv((unsigned long)page_address(page), size);
+}
+
+void *arch_dma_set_uncached(void *addr, size_t size)
+{
+ return (void *)(__pa(addr) + UNCAC_BASE);
+}
+
+static inline void dma_sync_virt_for_device(void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ dma_cache_wback((unsigned long)addr, size);
+ break;
+ case DMA_FROM_DEVICE:
+ dma_cache_inv((unsigned long)addr, size);
+ break;
+ case DMA_BIDIRECTIONAL:
+ dma_cache_wback_inv((unsigned long)addr, size);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline void dma_sync_virt_for_cpu(void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ break;
+ case DMA_FROM_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ dma_cache_inv((unsigned long)addr, size);
+ break;
+ default:
+ BUG();
+ }
+}
+
+/*
+ * A single sg entry may refer to multiple physically contiguous pages. But
+ * we still need to process highmem pages individually. If highmem is not
+ * configured then the bulk of this loop gets optimized out.
+ */
+static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir, bool for_device)
+{
+ struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
+ unsigned long offset = paddr & ~PAGE_MASK;
+ size_t left = size;
+
+ do {
+ size_t len = left;
+ void *addr;
+
+ if (PageHighMem(page)) {
+ if (offset + len > PAGE_SIZE)
+ len = PAGE_SIZE - offset;
+ }
+
+ addr = kmap_atomic(page);
+ if (for_device)
+ dma_sync_virt_for_device(addr + offset, len, dir);
+ else
+ dma_sync_virt_for_cpu(addr + offset, len, dir);
+ kunmap_atomic(addr);
+
+ offset = 0;
+ page++;
+ left -= len;
+ } while (left);
+}
+
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_phys(paddr, size, dir, true);
+}
+
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (cpu_needs_post_dma_flush())
+ dma_sync_phys(paddr, size, dir, false);
+}
+#endif
+
+#ifdef CONFIG_DMA_PERDEV_COHERENT
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *iommu, bool coherent)
+{
+ dev->dma_coherent = coherent;
+}
+#endif
diff --git a/arch/mips/mm/extable.c b/arch/mips/mm/extable.c
new file mode 100644
index 000000000..81bc8a34a
--- /dev/null
+++ b/arch/mips/mm/extable.c
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1997, 99, 2001 - 2004 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/extable.h>
+#include <linux/spinlock.h>
+#include <asm/branch.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(exception_epc(regs));
+ if (fixup) {
+ regs->cp0_epc = fixup->nextinsn;
+
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
new file mode 100644
index 000000000..7c871b14e
--- /dev/null
+++ b/arch/mips/mm/fault.c
@@ -0,0 +1,332 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 - 2000 by Ralf Baechle
+ */
+#include <linux/context_tracking.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/ratelimit.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/kprobes.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+#include <asm/branch.h>
+#include <asm/mmu_context.h>
+#include <asm/ptrace.h>
+#include <asm/highmem.h> /* For VMALLOC_END */
+#include <linux/kdebug.h>
+
+int show_unhandled_signals = 1;
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ */
+static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
+ unsigned long address)
+{
+ struct vm_area_struct * vma = NULL;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ const int field = sizeof(unsigned long) * 2;
+ int si_code;
+ vm_fault_t fault;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
+
+ static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
+
+#if 0
+ printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
+ current->comm, current->pid, field, address, write,
+ field, regs->cp0_epc);
+#endif
+
+#ifdef CONFIG_KPROBES
+ /*
+ * This is to notify the fault handler of the kprobes.
+ */
+ if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
+ current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
+ return;
+#endif
+
+ si_code = SEGV_MAPERR;
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+#ifdef CONFIG_64BIT
+# define VMALLOC_FAULT_TARGET no_context
+#else
+# define VMALLOC_FAULT_TARGET vmalloc_fault
+#endif
+
+ if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
+ goto VMALLOC_FAULT_TARGET;
+#ifdef MODULE_START
+ if (unlikely(address >= MODULE_START && address < MODULE_END))
+ goto VMALLOC_FAULT_TARGET;
+#endif
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (faulthandler_disabled() || !mm)
+ goto bad_area_nosemaphore;
+
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+retry:
+ mmap_read_lock(mm);
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (expand_stack(vma, address))
+ goto bad_area;
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ si_code = SEGV_ACCERR;
+
+ if (write) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ flags |= FAULT_FLAG_WRITE;
+ } else {
+ if (cpu_has_rixi) {
+ if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
+#if 0
+ pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
+ raw_smp_processor_id(),
+ current->comm, current->pid,
+ field, address, write,
+ field, regs->cp0_epc);
+#endif
+ goto bad_area;
+ }
+ if (!(vma->vm_flags & VM_READ) &&
+ exception_epc(regs) != address) {
+#if 0
+ pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
+ raw_smp_processor_id(),
+ current->comm, current->pid,
+ field, address, write,
+ field, regs->cp0_epc);
+#endif
+ goto bad_area;
+ }
+ } else {
+ if (unlikely(!vma_is_accessible(vma)))
+ goto bad_area;
+ }
+ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(vma, address, flags, regs);
+
+ if (fault_signal_pending(fault, regs))
+ return;
+
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+ BUG();
+ }
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
+
+ /*
+ * No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+ }
+
+ mmap_read_unlock(mm);
+ return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ mmap_read_unlock(mm);
+
+bad_area_nosemaphore:
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs)) {
+ tsk->thread.cp0_badvaddr = address;
+ tsk->thread.error_code = write;
+ if (show_unhandled_signals &&
+ unhandled_signal(tsk, SIGSEGV) &&
+ __ratelimit(&ratelimit_state)) {
+ pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n",
+ tsk->comm,
+ write ? "write access to" : "read access from",
+ field, address);
+ pr_info("epc = %0*lx in", field,
+ (unsigned long) regs->cp0_epc);
+ print_vma_addr(KERN_CONT " ", regs->cp0_epc);
+ pr_cont("\n");
+ pr_info("ra = %0*lx in", field,
+ (unsigned long) regs->regs[31]);
+ print_vma_addr(KERN_CONT " ", regs->regs[31]);
+ pr_cont("\n");
+ }
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
+ force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+ return;
+ }
+
+no_context:
+ /* Are we prepared to handle this kernel fault? */
+ if (fixup_exception(regs)) {
+ current->thread.cp0_baduaddr = address;
+ return;
+ }
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ bust_spinlocks(1);
+
+ printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
+ "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
+ raw_smp_processor_id(), field, address, field, regs->cp0_epc,
+ field, regs->regs[31]);
+ die("Oops", regs);
+
+out_of_memory:
+ /*
+ * We ran out of memory, call the OOM killer, and return the userspace
+ * (which will retry the fault, or kill us if we got oom-killed).
+ */
+ mmap_read_unlock(mm);
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
+ return;
+
+do_sigbus:
+ mmap_read_unlock(mm);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ goto no_context;
+
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+#if 0
+ printk("do_page_fault() #3: sending SIGBUS to %s for "
+ "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
+ tsk->comm,
+ write ? "write access to" : "read access from",
+ field, address,
+ field, (unsigned long) regs->cp0_epc,
+ field, (unsigned long) regs->regs[31]);
+#endif
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
+ tsk->thread.cp0_badvaddr = address;
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
+
+ return;
+#ifndef CONFIG_64BIT
+vmalloc_fault:
+ {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "tsk" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ int offset = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+ pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
+ pgd_k = init_mm.pgd + offset;
+
+ if (!pgd_present(*pgd_k))
+ goto no_context;
+ set_pgd(pgd, *pgd_k);
+
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d_k))
+ goto no_context;
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
+ if (!pud_present(*pud_k))
+ goto no_context;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd_k))
+ goto no_context;
+ set_pmd(pmd, *pmd_k);
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ goto no_context;
+ return;
+ }
+#endif
+}
+
+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+ unsigned long write, unsigned long address)
+{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ __do_page_fault(regs, write, address);
+ exception_exit(prev_state);
+}
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
new file mode 100644
index 000000000..5fec7f45d
--- /dev/null
+++ b/arch/mips/mm/highmem.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <asm/fixmap.h>
+#include <asm/tlbflush.h>
+
+static pte_t *kmap_pte;
+
+unsigned long highstart_pfn, highend_pfn;
+
+void kmap_flush_tlb(unsigned long addr)
+{
+ flush_tlb_one(addr);
+}
+EXPORT_SYMBOL(kmap_flush_tlb);
+
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
+{
+ unsigned long vaddr;
+ int idx, type;
+
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(*(kmap_pte - idx)));
+#endif
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
+ local_flush_tlb_one((unsigned long)vaddr);
+
+ return (void*) vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic_high_prot);
+
+void kunmap_atomic_high(void *kvaddr)
+{
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+ int type __maybe_unused;
+
+ if (vaddr < FIXADDR_START)
+ return;
+
+ type = kmap_atomic_idx();
+#ifdef CONFIG_DEBUG_HIGHMEM
+ {
+ int idx = type + KM_TYPE_NR * smp_processor_id();
+
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
+ local_flush_tlb_one(vaddr);
+ }
+#endif
+ kmap_atomic_idx_pop();
+}
+EXPORT_SYMBOL(kunmap_atomic_high);
+
+/*
+ * This is the same as kmap_atomic() but can map memory that doesn't
+ * have a struct page associated with it.
+ */
+void *kmap_atomic_pfn(unsigned long pfn)
+{
+ unsigned long vaddr;
+ int idx, type;
+
+ preempt_disable();
+ pagefault_disable();
+
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
+ flush_tlb_one(vaddr);
+
+ return (void*) vaddr;
+}
+
+void __init kmap_init(void)
+{
+ unsigned long kmap_vstart;
+
+ /* cache the first kmap pte */
+ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+ kmap_pte = virt_to_kpte(kmap_vstart);
+}
diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
new file mode 100644
index 000000000..77ffece9c
--- /dev/null
+++ b/arch/mips/mm/hugetlbpage.c
@@ -0,0 +1,81 @@
+/*
+ * MIPS Huge TLB Page Support for Kernel.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
+ * Copyright 2005, Embedded Alley Solutions, Inc.
+ * Matt Porter <mporter@embeddedalley.com>
+ * Copyright (C) 2008, 2009 Cavium Networks, Inc.
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/err.h>
+#include <linux/sysctl.h>
+#include <asm/mman.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+
+pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
+ unsigned long sz)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pte_t *pte = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ p4d = p4d_alloc(mm, pgd, addr);
+ pud = pud_alloc(mm, p4d, addr);
+ if (pud)
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+
+ return pte;
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
+ unsigned long sz)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ if (pgd_present(*pgd)) {
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_present(*p4d)) {
+ pud = pud_offset(p4d, addr);
+ if (pud_present(*pud))
+ pmd = pmd_offset(pud, addr);
+ }
+ }
+ return (pte_t *) pmd;
+}
+
+/*
+ * This function checks for proper alignment of input addr and len parameters.
+ */
+int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
+{
+ if (len & ~HPAGE_MASK)
+ return -EINVAL;
+ if (addr & ~HPAGE_MASK)
+ return -EINVAL;
+ return 0;
+}
+
+int pmd_huge(pmd_t pmd)
+{
+ return (pmd_val(pmd) & _PAGE_HUGE) != 0;
+}
+
+int pud_huge(pud_t pud)
+{
+ return (pud_val(pud) & _PAGE_HUGE) != 0;
+}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
new file mode 100644
index 000000000..07e84a774
--- /dev/null
+++ b/arch/mips/mm/init.c
@@ -0,0 +1,581 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/pagemap.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/highmem.h>
+#include <linux/swap.h>
+#include <linux/proc_fs.h>
+#include <linux/pfn.h>
+#include <linux/hardirq.h>
+#include <linux/gfp.h>
+#include <linux/kcore.h>
+#include <linux/initrd.h>
+
+#include <asm/bootinfo.h>
+#include <asm/cachectl.h>
+#include <asm/cpu.h>
+#include <asm/dma.h>
+#include <asm/kmap_types.h>
+#include <asm/maar.h>
+#include <asm/mmu_context.h>
+#include <asm/sections.h>
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+#include <asm/fixmap.h>
+
+/*
+ * We have up to 8 empty zeroed pages so we can map one of the right colour
+ * when needed. This is necessary only on R4000 / R4400 SC and MC versions
+ * where we have to avoid VCED / VECI exceptions for good performance at
+ * any price. Since page is never written to after the initialization we
+ * don't have to care about aliases on other CPUs.
+ */
+unsigned long empty_zero_page, zero_page_mask;
+EXPORT_SYMBOL_GPL(empty_zero_page);
+EXPORT_SYMBOL(zero_page_mask);
+
+/*
+ * Not static inline because used by IP27 special magic initialization code
+ */
+void setup_zero_pages(void)
+{
+ unsigned int order, i;
+ struct page *page;
+
+ if (cpu_has_vce)
+ order = 3;
+ else
+ order = 0;
+
+ empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!empty_zero_page)
+ panic("Oh boy, that early out of memory?");
+
+ page = virt_to_page((void *)empty_zero_page);
+ split_page(page, order);
+ for (i = 0; i < (1 << order); i++, page++)
+ mark_page_reserved(page);
+
+ zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
+}
+
+static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
+{
+ enum fixed_addresses idx;
+ unsigned int old_mmid;
+ unsigned long vaddr, flags, entrylo;
+ unsigned long old_ctx;
+ pte_t pte;
+ int tlbidx;
+
+ BUG_ON(Page_dcache_dirty(page));
+
+ preempt_disable();
+ pagefault_disable();
+ idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
+ idx += in_interrupt() ? FIX_N_COLOURS : 0;
+ vaddr = __fix_to_virt(FIX_CMAP_END - idx);
+ pte = mk_pte(page, prot);
+#if defined(CONFIG_XPA)
+ entrylo = pte_to_entrylo(pte.pte_high);
+#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+ entrylo = pte.pte_high;
+#else
+ entrylo = pte_to_entrylo(pte_val(pte));
+#endif
+
+ local_irq_save(flags);
+ old_ctx = read_c0_entryhi();
+ write_c0_entryhi(vaddr & (PAGE_MASK << 1));
+ write_c0_entrylo0(entrylo);
+ write_c0_entrylo1(entrylo);
+ if (cpu_has_mmid) {
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(MMID_KERNEL_WIRED);
+ }
+#ifdef CONFIG_XPA
+ if (cpu_has_xpa) {
+ entrylo = (pte.pte_low & _PFNX_MASK);
+ writex_c0_entrylo0(entrylo);
+ writex_c0_entrylo1(entrylo);
+ }
+#endif
+ tlbidx = num_wired_entries();
+ write_c0_wired(tlbidx + 1);
+ write_c0_index(tlbidx);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+ write_c0_entryhi(old_ctx);
+ if (cpu_has_mmid)
+ write_c0_memorymapid(old_mmid);
+ local_irq_restore(flags);
+
+ return (void*) vaddr;
+}
+
+void *kmap_coherent(struct page *page, unsigned long addr)
+{
+ return __kmap_pgprot(page, addr, PAGE_KERNEL);
+}
+
+void *kmap_noncoherent(struct page *page, unsigned long addr)
+{
+ return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
+}
+
+void kunmap_coherent(void)
+{
+ unsigned int wired;
+ unsigned long flags, old_ctx;
+
+ local_irq_save(flags);
+ old_ctx = read_c0_entryhi();
+ wired = num_wired_entries() - 1;
+ write_c0_wired(wired);
+ write_c0_index(wired);
+ write_c0_entryhi(UNIQUE_ENTRYHI(wired));
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+ write_c0_entryhi(old_ctx);
+ local_irq_restore(flags);
+ pagefault_enable();
+ preempt_enable();
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ void *vfrom, *vto;
+
+ vto = kmap_atomic(to);
+ if (cpu_has_dc_aliases &&
+ page_mapcount(from) && !Page_dcache_dirty(from)) {
+ vfrom = kmap_coherent(from, vaddr);
+ copy_page(vto, vfrom);
+ kunmap_coherent();
+ } else {
+ vfrom = kmap_atomic(from);
+ copy_page(vto, vfrom);
+ kunmap_atomic(vfrom);
+ }
+ if ((!cpu_has_ic_fills_f_dc) ||
+ pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
+ flush_data_cache_page((unsigned long)vto);
+ kunmap_atomic(vto);
+ /* Make sure this page is cleared on other CPU's too before using it */
+ smp_wmb();
+}
+
+void copy_to_user_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ if (cpu_has_dc_aliases &&
+ page_mapcount(page) && !Page_dcache_dirty(page)) {
+ void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+ memcpy(vto, src, len);
+ kunmap_coherent();
+ } else {
+ memcpy(dst, src, len);
+ if (cpu_has_dc_aliases)
+ SetPageDcacheDirty(page);
+ }
+ if (vma->vm_flags & VM_EXEC)
+ flush_cache_page(vma, vaddr, page_to_pfn(page));
+}
+
+void copy_from_user_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ if (cpu_has_dc_aliases &&
+ page_mapcount(page) && !Page_dcache_dirty(page)) {
+ void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+ memcpy(dst, vfrom, len);
+ kunmap_coherent();
+ } else {
+ memcpy(dst, src, len);
+ if (cpu_has_dc_aliases)
+ SetPageDcacheDirty(page);
+ }
+}
+EXPORT_SYMBOL_GPL(copy_from_user_page);
+
+void __init fixrange_init(unsigned long start, unsigned long end,
+ pgd_t *pgd_base)
+{
+#ifdef CONFIG_HIGHMEM
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ int i, j, k;
+ unsigned long vaddr;
+
+ vaddr = start;
+ i = pgd_index(vaddr);
+ j = pud_index(vaddr);
+ k = pmd_index(vaddr);
+ pgd = pgd_base + i;
+
+ for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
+ pud = (pud_t *)pgd;
+ for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
+ pmd = (pmd_t *)pud;
+ for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
+ if (pmd_none(*pmd)) {
+ pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
+ PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE,
+ PAGE_SIZE);
+
+ set_pmd(pmd, __pmd((unsigned long)pte));
+ BUG_ON(pte != pte_offset_kernel(pmd, 0));
+ }
+ vaddr += PMD_SIZE;
+ }
+ k = 0;
+ }
+ j = 0;
+ }
+#endif
+}
+
+struct maar_walk_info {
+ struct maar_config cfg[16];
+ unsigned int num_cfg;
+};
+
+static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages,
+ void *data)
+{
+ struct maar_walk_info *wi = data;
+ struct maar_config *cfg = &wi->cfg[wi->num_cfg];
+ unsigned int maar_align;
+
+ /* MAAR registers hold physical addresses right shifted by 4 bits */
+ maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4);
+
+ /* Fill in the MAAR config entry */
+ cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align);
+ cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1;
+ cfg->attrs = MIPS_MAAR_S;
+
+ /* Ensure we don't overflow the cfg array */
+ if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg)))
+ wi->num_cfg++;
+
+ return 0;
+}
+
+
+unsigned __weak platform_maar_init(unsigned num_pairs)
+{
+ unsigned int num_configured;
+ struct maar_walk_info wi;
+
+ wi.num_cfg = 0;
+ walk_system_ram_range(0, max_pfn, &wi, maar_res_walk);
+
+ num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs);
+ if (num_configured < wi.num_cfg)
+ pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
+ num_pairs, wi.num_cfg);
+
+ return num_configured;
+}
+
+void maar_init(void)
+{
+ unsigned num_maars, used, i;
+ phys_addr_t lower, upper, attr;
+ static struct {
+ struct maar_config cfgs[3];
+ unsigned used;
+ } recorded = { { { 0 } }, 0 };
+
+ if (!cpu_has_maar)
+ return;
+
+ /* Detect the number of MAARs */
+ write_c0_maari(~0);
+ back_to_back_c0_hazard();
+ num_maars = read_c0_maari() + 1;
+
+ /* MAARs should be in pairs */
+ WARN_ON(num_maars % 2);
+
+ /* Set MAARs using values we recorded already */
+ if (recorded.used) {
+ used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
+ BUG_ON(used != recorded.used);
+ } else {
+ /* Configure the required MAARs */
+ used = platform_maar_init(num_maars / 2);
+ }
+
+ /* Disable any further MAARs */
+ for (i = (used * 2); i < num_maars; i++) {
+ write_c0_maari(i);
+ back_to_back_c0_hazard();
+ write_c0_maar(0);
+ back_to_back_c0_hazard();
+ }
+
+ if (recorded.used)
+ return;
+
+ pr_info("MAAR configuration:\n");
+ for (i = 0; i < num_maars; i += 2) {
+ write_c0_maari(i);
+ back_to_back_c0_hazard();
+ upper = read_c0_maar();
+#ifdef CONFIG_XPA
+ upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
+#endif
+
+ write_c0_maari(i + 1);
+ back_to_back_c0_hazard();
+ lower = read_c0_maar();
+#ifdef CONFIG_XPA
+ lower |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
+#endif
+
+ attr = lower & upper;
+ lower = (lower & MIPS_MAAR_ADDR) << 4;
+ upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
+
+ pr_info(" [%d]: ", i / 2);
+ if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) {
+ pr_cont("disabled\n");
+ continue;
+ }
+
+ pr_cont("%pa-%pa", &lower, &upper);
+
+ if (attr & MIPS_MAAR_S)
+ pr_cont(" speculate");
+
+ pr_cont("\n");
+
+ /* Record the setup for use on secondary CPUs */
+ if (used <= ARRAY_SIZE(recorded.cfgs)) {
+ recorded.cfgs[recorded.used].lower = lower;
+ recorded.cfgs[recorded.used].upper = upper;
+ recorded.cfgs[recorded.used].attrs = attr;
+ recorded.used++;
+ }
+ }
+}
+
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+void __init paging_init(void)
+{
+ unsigned long max_zone_pfns[MAX_NR_ZONES];
+
+ pagetable_init();
+
+#ifdef CONFIG_HIGHMEM
+ kmap_init();
+#endif
+#ifdef CONFIG_ZONE_DMA
+ max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
+#endif
+#ifdef CONFIG_ZONE_DMA32
+ max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
+#endif
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+#ifdef CONFIG_HIGHMEM
+ max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
+
+ if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
+ printk(KERN_WARNING "This processor doesn't support highmem."
+ " %ldk highmem ignored\n",
+ (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
+ max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
+ }
+#endif
+
+ free_area_init(max_zone_pfns);
+}
+
+#ifdef CONFIG_64BIT
+static struct kcore_list kcore_kseg0;
+#endif
+
+static inline void __init mem_init_free_highmem(void)
+{
+#ifdef CONFIG_HIGHMEM
+ unsigned long tmp;
+
+ if (cpu_has_dc_aliases)
+ return;
+
+ for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
+ struct page *page = pfn_to_page(tmp);
+
+ if (!memblock_is_memory(PFN_PHYS(tmp)))
+ SetPageReserved(page);
+ else
+ free_highmem_page(page);
+ }
+#endif
+}
+
+void __init mem_init(void)
+{
+ /*
+ * When _PFN_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
+ * bits to hold a full 32b physical address on MIPS32 systems.
+ */
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT));
+
+#ifdef CONFIG_HIGHMEM
+#ifdef CONFIG_DISCONTIGMEM
+#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
+#endif
+ max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
+#else
+ max_mapnr = max_low_pfn;
+#endif
+ high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+
+ maar_init();
+ memblock_free_all();
+ setup_zero_pages(); /* Setup zeroed pages. */
+ mem_init_free_highmem();
+ mem_init_print_info(NULL);
+
+#ifdef CONFIG_64BIT
+ if ((unsigned long) &_text > (unsigned long) CKSEG0)
+ /* The -4 is a hack so that user tools don't have to handle
+ the overflow. */
+ kclist_add(&kcore_kseg0, (void *) CKSEG0,
+ 0x80000000 - 4, KCORE_TEXT);
+#endif
+}
+#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+
+void free_init_pages(const char *what, unsigned long begin, unsigned long end)
+{
+ unsigned long pfn;
+
+ for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
+ struct page *page = pfn_to_page(pfn);
+ void *addr = phys_to_virt(PFN_PHYS(pfn));
+
+ memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
+ free_reserved_page(page);
+ }
+ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+}
+
+void (*free_init_pages_eva)(void *begin, void *end) = NULL;
+
+void __ref free_initmem(void)
+{
+ prom_free_prom_memory();
+ /*
+ * Let the platform define a specific function to free the
+ * init section since EVA may have used any possible mapping
+ * between virtual and physical addresses.
+ */
+ if (free_init_pages_eva)
+ free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
+ else
+ free_initmem_default(POISON_FREE_INITMEM);
+}
+
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(__per_cpu_offset);
+
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
+{
+ return node_distance(cpu_to_node(from), cpu_to_node(to));
+}
+
+static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
+ size_t align)
+{
+ return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ cpu_to_node(cpu));
+}
+
+static void __init pcpu_fc_free(void *ptr, size_t size)
+{
+ memblock_free_early(__pa(ptr), size);
+}
+
+void __init setup_per_cpu_areas(void)
+{
+ unsigned long delta;
+ unsigned int cpu;
+ int rc;
+
+ /*
+ * Always reserve area for module percpu variables. That's
+ * what the legacy allocator did.
+ */
+ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+ PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
+ pcpu_cpu_distance,
+ pcpu_fc_alloc, pcpu_fc_free);
+ if (rc < 0)
+ panic("Failed to initialize percpu areas.");
+
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu)
+ __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+}
+#endif
+
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
+unsigned long pgd_current[NR_CPUS];
+#endif
+
+/*
+ * Align swapper_pg_dir in to 64K, allows its address to be loaded
+ * with a single LUI instruction in the TLB handlers. If we used
+ * __aligned(64K), its size would get rounded up to the alignment
+ * size, and waste space. So we place it in its own section and align
+ * it in the linker script.
+ */
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
+#ifndef __PAGETABLE_PUD_FOLDED
+pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
+#endif
+#ifndef __PAGETABLE_PMD_FOLDED
+pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
+EXPORT_SYMBOL_GPL(invalid_pmd_table);
+#endif
+pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
+EXPORT_SYMBOL(invalid_pte_table);
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
new file mode 100644
index 000000000..b6dad2fd5
--- /dev/null
+++ b/arch/mips/mm/ioremap.c
@@ -0,0 +1,119 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ * (C) Copyright 2001, 2002 Ralf Baechle
+ */
+#include <linux/export.h>
+#include <asm/addrspace.h>
+#include <asm/byteorder.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm_types.h>
+#include <linux/io.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <ioremap.h>
+
+#define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
+#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
+
+static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
+ void *arg)
+{
+ unsigned long i;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (pfn_valid(start_pfn + i) &&
+ !PageReserved(pfn_to_page(start_pfn + i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * ioremap_prot - map bus memory into CPU space
+ * @phys_addr: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_prot gives the caller control over cache coherency attributes (CCA)
+ */
+void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
+ unsigned long prot_val)
+{
+ unsigned long flags = prot_val & _CACHE_MASK;
+ unsigned long offset, pfn, last_pfn;
+ struct vm_struct *area;
+ phys_addr_t last_addr;
+ unsigned long vaddr;
+ void __iomem *cpu_addr;
+
+ cpu_addr = plat_ioremap(phys_addr, size, flags);
+ if (cpu_addr)
+ return cpu_addr;
+
+ phys_addr = fixup_bigphys_addr(phys_addr, size);
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+ /*
+ * Map uncached objects in the low 512mb of address space using KSEG1,
+ * otherwise map using page tables.
+ */
+ if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
+ flags == _CACHE_UNCACHED)
+ return (void __iomem *) CKSEG1ADDR(phys_addr);
+
+ /*
+ * Don't allow anybody to remap RAM that may be allocated by the page
+ * allocator, since that could lead to races & data clobbering.
+ */
+ pfn = PFN_DOWN(phys_addr);
+ last_pfn = PFN_DOWN(last_addr);
+ if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+ __ioremap_check_ram) == 1) {
+ WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
+ &phys_addr, &last_addr);
+ return NULL;
+ }
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr + 1) - phys_addr;
+
+ /*
+ * Ok, go for it..
+ */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ vaddr = (unsigned long)area->addr;
+
+ flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE;
+ if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
+ __pgprot(flags))) {
+ free_vm_area(area);
+ return NULL;
+ }
+
+ return (void __iomem *)(vaddr + offset);
+}
+EXPORT_SYMBOL(ioremap_prot);
+
+void iounmap(const volatile void __iomem *addr)
+{
+ if (!plat_iounmap(addr) && !IS_KSEG1(addr))
+ vunmap((void *)((unsigned long)addr & PAGE_MASK));
+}
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/mips/mm/ioremap64.c b/arch/mips/mm/ioremap64.c
new file mode 100644
index 000000000..15e7820d6
--- /dev/null
+++ b/arch/mips/mm/ioremap64.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/io.h>
+#include <ioremap.h>
+
+void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+ unsigned long prot_val)
+{
+ unsigned long flags = prot_val & _CACHE_MASK;
+ u64 base = (flags == _CACHE_UNCACHED ? IO_BASE : UNCAC_BASE);
+ void __iomem *addr;
+
+ addr = plat_ioremap(offset, size, flags);
+ if (!addr)
+ addr = (void __iomem *)(unsigned long)(base + offset);
+ return addr;
+}
+EXPORT_SYMBOL(ioremap_prot);
+
+void iounmap(const volatile void __iomem *addr)
+{
+ plat_iounmap(addr);
+}
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
new file mode 100644
index 000000000..00fe90c6d
--- /dev/null
+++ b/arch/mips/mm/mmap.c
@@ -0,0 +1,129 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/compiler.h>
+#include <linux/elf-randomize.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/export.h>
+#include <linux/personality.h>
+#include <linux/random.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
+
+unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
+EXPORT_SYMBOL(shm_align_mask);
+
+#define COLOUR_ALIGN(addr, pgoff) \
+ ((((addr) + shm_align_mask) & ~shm_align_mask) + \
+ (((pgoff) << PAGE_SHIFT) & shm_align_mask))
+
+enum mmap_allocation_direction {UP, DOWN};
+
+static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ unsigned long addr0, unsigned long len, unsigned long pgoff,
+ unsigned long flags, enum mmap_allocation_direction dir)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long addr = addr0;
+ int do_color_align;
+ struct vm_unmapped_area_info info;
+
+ if (unlikely(len > TASK_SIZE))
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED) {
+ /* Even MAP_FIXED mappings must reside within TASK_SIZE */
+ if (TASK_SIZE - len < addr)
+ return -EINVAL;
+
+ /*
+ * We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+ if ((flags & MAP_SHARED) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
+ return -EINVAL;
+ return addr;
+ }
+
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
+ /* requesting a specific address */
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+ info.length = len;
+ info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+
+ if (dir == DOWN) {
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.low_limit = PAGE_SIZE;
+ info.high_limit = mm->mmap_base;
+ addr = vm_unmapped_area(&info);
+
+ if (!(addr & ~PAGE_MASK))
+ return addr;
+
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ }
+
+ info.flags = 0;
+ info.low_limit = mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ return vm_unmapped_area(&info);
+}
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ return arch_get_unmapped_area_common(filp,
+ addr0, len, pgoff, flags, UP);
+}
+
+/*
+ * There is no need to export this but sched.h declares the function as
+ * extern so making it static here results in an error.
+ */
+unsigned long arch_get_unmapped_area_topdown(struct file *filp,
+ unsigned long addr0, unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ return arch_get_unmapped_area_common(filp,
+ addr0, len, pgoff, flags, DOWN);
+}
+
+bool __virt_addr_valid(const volatile void *kaddr)
+{
+ unsigned long vaddr = (unsigned long)kaddr;
+
+ if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
+ return false;
+
+ return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
+}
+EXPORT_SYMBOL_GPL(__virt_addr_valid);
diff --git a/arch/mips/mm/page-funcs.S b/arch/mips/mm/page-funcs.S
new file mode 100644
index 000000000..43181ac0a
--- /dev/null
+++ b/arch/mips/mm/page-funcs.S
@@ -0,0 +1,53 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Micro-assembler generated clear_page/copy_page functions.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <asm/asm.h>
+#include <asm/export.h>
+#include <asm/regdef.h>
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+#define cpu_clear_page_function_name clear_page_cpu
+#define cpu_copy_page_function_name copy_page_cpu
+#else
+#define cpu_clear_page_function_name clear_page
+#define cpu_copy_page_function_name copy_page
+#endif
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache: 0x058 bytes
+ * R4600 v1.7: 0x05c bytes
+ * R4600 v2.0: 0x060 bytes
+ * With prefetching, 16 word strides 0x120 bytes
+ */
+EXPORT(__clear_page_start)
+LEAF(cpu_clear_page_function_name)
+EXPORT_SYMBOL(cpu_clear_page_function_name)
+1: j 1b /* Dummy, will be replaced. */
+ .space 288
+END(cpu_clear_page_function_name)
+EXPORT(__clear_page_end)
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache: 0x11c bytes
+ * R4600 v1.7: 0x080 bytes
+ * R4600 v2.0: 0x07c bytes
+ * With prefetching, 16 word strides 0x540 bytes
+ */
+EXPORT(__copy_page_start)
+LEAF(cpu_copy_page_function_name)
+EXPORT_SYMBOL(cpu_copy_page_function_name)
+1: j 1b /* Dummy, will be replaced. */
+ .space 1344
+END(cpu_copy_page_function_name)
+EXPORT(__copy_page_end)
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
new file mode 100644
index 000000000..504bc4047
--- /dev/null
+++ b/arch/mips/mm/page.c
@@ -0,0 +1,681 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2007 Maciej W. Rozycki
+ * Copyright (C) 2008 Thiemo Seufer
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+
+#include <asm/bugs.h>
+#include <asm/cacheops.h>
+#include <asm/cpu-type.h>
+#include <asm/inst.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/prefetch.h>
+#include <asm/bootinfo.h>
+#include <asm/mipsregs.h>
+#include <asm/mmu_context.h>
+#include <asm/cpu.h>
+#include <asm/war.h>
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_dma.h>
+#endif
+
+#include <asm/uasm.h>
+
+/* Registers used in the assembled routines. */
+#define ZERO 0
+#define AT 2
+#define A0 4
+#define A1 5
+#define A2 6
+#define T0 8
+#define T1 9
+#define T2 10
+#define T3 11
+#define T9 25
+#define RA 31
+
+/* Handle labels (which must be positive integers). */
+enum label_id {
+ label_clear_nopref = 1,
+ label_clear_pref,
+ label_copy_nopref,
+ label_copy_pref_both,
+ label_copy_pref_store,
+};
+
+UASM_L_LA(_clear_nopref)
+UASM_L_LA(_clear_pref)
+UASM_L_LA(_copy_nopref)
+UASM_L_LA(_copy_pref_both)
+UASM_L_LA(_copy_pref_store)
+
+/* We need one branch and therefore one relocation per target label. */
+static struct uasm_label labels[5];
+static struct uasm_reloc relocs[5];
+
+#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
+#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
+
+/*
+ * R6 has a limited offset of the pref instruction.
+ * Skip it if the offset is more than 9 bits.
+ */
+#define _uasm_i_pref(a, b, c, d) \
+do { \
+ if (cpu_has_mips_r6) { \
+ if (c <= 0xff && c >= -0x100) \
+ uasm_i_pref(a, b, c, d);\
+ } else { \
+ uasm_i_pref(a, b, c, d); \
+ } \
+} while(0)
+
+static int pref_bias_clear_store;
+static int pref_bias_copy_load;
+static int pref_bias_copy_store;
+
+static u32 pref_src_mode;
+static u32 pref_dst_mode;
+
+static int clear_word_size;
+static int copy_word_size;
+
+static int half_clear_loop_size;
+static int half_copy_loop_size;
+
+static int cache_line_size;
+#define cache_line_mask() (cache_line_size - 1)
+
+static inline void
+pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
+{
+ if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
+ if (off > 0x7fff) {
+ uasm_i_lui(buf, T9, uasm_rel_hi(off));
+ uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
+ } else
+ uasm_i_addiu(buf, T9, ZERO, off);
+ uasm_i_daddu(buf, reg1, reg2, T9);
+ } else {
+ if (off > 0x7fff) {
+ uasm_i_lui(buf, T9, uasm_rel_hi(off));
+ uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
+ UASM_i_ADDU(buf, reg1, reg2, T9);
+ } else
+ UASM_i_ADDIU(buf, reg1, reg2, off);
+ }
+}
+
+static void set_prefetch_parameters(void)
+{
+ if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
+ clear_word_size = 8;
+ else
+ clear_word_size = 4;
+
+ if (cpu_has_64bit_gp_regs)
+ copy_word_size = 8;
+ else
+ copy_word_size = 4;
+
+ /*
+ * The pref's used here are using "streaming" hints, which cause the
+ * copied data to be kicked out of the cache sooner. A page copy often
+ * ends up copying a lot more data than is commonly used, so this seems
+ * to make sense in terms of reducing cache pollution, but I've no real
+ * performance data to back this up.
+ */
+ if (cpu_has_prefetch) {
+ /*
+ * XXX: Most prefetch bias values in here are based on
+ * guesswork.
+ */
+ cache_line_size = cpu_dcache_line_size();
+ switch (current_cpu_type()) {
+ case CPU_R5500:
+ case CPU_TX49XX:
+ /* These processors only support the Pref_Load. */
+ pref_bias_copy_load = 256;
+ break;
+
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_R14000:
+ case CPU_R16000:
+ /*
+ * Those values have been experimentally tuned for an
+ * Origin 200.
+ */
+ pref_bias_clear_store = 512;
+ pref_bias_copy_load = 256;
+ pref_bias_copy_store = 256;
+ pref_src_mode = Pref_LoadStreamed;
+ pref_dst_mode = Pref_StoreStreamed;
+ break;
+
+ case CPU_SB1:
+ case CPU_SB1A:
+ pref_bias_clear_store = 128;
+ pref_bias_copy_load = 128;
+ pref_bias_copy_store = 128;
+ /*
+ * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
+ * hints are broken.
+ */
+ if (current_cpu_type() == CPU_SB1 &&
+ (current_cpu_data.processor_id & 0xff) < 0x02) {
+ pref_src_mode = Pref_Load;
+ pref_dst_mode = Pref_Store;
+ } else {
+ pref_src_mode = Pref_LoadStreamed;
+ pref_dst_mode = Pref_StoreStreamed;
+ }
+ break;
+
+ case CPU_LOONGSON64:
+ /* Loongson-3 only support the Pref_Load/Pref_Store. */
+ pref_bias_clear_store = 128;
+ pref_bias_copy_load = 128;
+ pref_bias_copy_store = 128;
+ pref_src_mode = Pref_Load;
+ pref_dst_mode = Pref_Store;
+ break;
+
+ default:
+ pref_bias_clear_store = 128;
+ pref_bias_copy_load = 256;
+ pref_bias_copy_store = 128;
+ pref_src_mode = Pref_LoadStreamed;
+ if (cpu_has_mips_r6)
+ /*
+ * Bit 30 (Pref_PrepareForStore) has been
+ * removed from MIPS R6. Use bit 5
+ * (Pref_StoreStreamed).
+ */
+ pref_dst_mode = Pref_StoreStreamed;
+ else
+ pref_dst_mode = Pref_PrepareForStore;
+ break;
+ }
+ } else {
+ if (cpu_has_cache_cdex_s)
+ cache_line_size = cpu_scache_line_size();
+ else if (cpu_has_cache_cdex_p)
+ cache_line_size = cpu_dcache_line_size();
+ }
+ /*
+ * Too much unrolling will overflow the available space in
+ * clear_space_array / copy_page_array.
+ */
+ half_clear_loop_size = min(16 * clear_word_size,
+ max(cache_line_size >> 1,
+ 4 * clear_word_size));
+ half_copy_loop_size = min(16 * copy_word_size,
+ max(cache_line_size >> 1,
+ 4 * copy_word_size));
+}
+
+static void build_clear_store(u32 **buf, int off)
+{
+ if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
+ uasm_i_sd(buf, ZERO, off, A0);
+ } else {
+ uasm_i_sw(buf, ZERO, off, A0);
+ }
+}
+
+static inline void build_clear_pref(u32 **buf, int off)
+{
+ if (off & cache_line_mask())
+ return;
+
+ if (pref_bias_clear_store) {
+ _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
+ A0);
+ } else if (cache_line_size == (half_clear_loop_size << 1)) {
+ if (cpu_has_cache_cdex_s) {
+ uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
+ } else if (cpu_has_cache_cdex_p) {
+ if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) &&
+ cpu_is_r4600_v1_x()) {
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ }
+
+ if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) &&
+ cpu_is_r4600_v2_x())
+ uasm_i_lw(buf, ZERO, ZERO, AT);
+
+ uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
+ }
+ }
+}
+
+extern u32 __clear_page_start;
+extern u32 __clear_page_end;
+extern u32 __copy_page_start;
+extern u32 __copy_page_end;
+
+void build_clear_page(void)
+{
+ int off;
+ u32 *buf = &__clear_page_start;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+ int i;
+ static atomic_t run_once = ATOMIC_INIT(0);
+
+ if (atomic_xchg(&run_once, 1)) {
+ return;
+ }
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ set_prefetch_parameters();
+
+ /*
+ * This algorithm makes the following assumptions:
+ * - The prefetch bias is a multiple of 2 words.
+ * - The prefetch bias is less than one page.
+ */
+ BUG_ON(pref_bias_clear_store % (2 * clear_word_size));
+ BUG_ON(PAGE_SIZE < pref_bias_clear_store);
+
+ off = PAGE_SIZE - pref_bias_clear_store;
+ if (off > 0xffff || !pref_bias_clear_store)
+ pg_addiu(&buf, A2, A0, off);
+ else
+ uasm_i_ori(&buf, A2, A0, off);
+
+ if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x())
+ uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
+
+ off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
+ * cache_line_size : 0;
+ while (off) {
+ build_clear_pref(&buf, -off);
+ off -= cache_line_size;
+ }
+ uasm_l_clear_pref(&l, buf);
+ do {
+ build_clear_pref(&buf, off);
+ build_clear_store(&buf, off);
+ off += clear_word_size;
+ } while (off < half_clear_loop_size);
+ pg_addiu(&buf, A0, A0, 2 * off);
+ off = -off;
+ do {
+ build_clear_pref(&buf, off);
+ if (off == -clear_word_size)
+ uasm_il_bne(&buf, &r, A0, A2, label_clear_pref);
+ build_clear_store(&buf, off);
+ off += clear_word_size;
+ } while (off < 0);
+
+ if (pref_bias_clear_store) {
+ pg_addiu(&buf, A2, A0, pref_bias_clear_store);
+ uasm_l_clear_nopref(&l, buf);
+ off = 0;
+ do {
+ build_clear_store(&buf, off);
+ off += clear_word_size;
+ } while (off < half_clear_loop_size);
+ pg_addiu(&buf, A0, A0, 2 * off);
+ off = -off;
+ do {
+ if (off == -clear_word_size)
+ uasm_il_bne(&buf, &r, A0, A2,
+ label_clear_nopref);
+ build_clear_store(&buf, off);
+ off += clear_word_size;
+ } while (off < 0);
+ }
+
+ uasm_i_jr(&buf, RA);
+ uasm_i_nop(&buf);
+
+ BUG_ON(buf > &__clear_page_end);
+
+ uasm_resolve_relocs(relocs, labels);
+
+ pr_debug("Synthesized clear page handler (%u instructions).\n",
+ (u32)(buf - &__clear_page_start));
+
+ pr_debug("\t.set push\n");
+ pr_debug("\t.set noreorder\n");
+ for (i = 0; i < (buf - &__clear_page_start); i++)
+ pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
+ pr_debug("\t.set pop\n");
+}
+
+static void build_copy_load(u32 **buf, int reg, int off)
+{
+ if (cpu_has_64bit_gp_regs) {
+ uasm_i_ld(buf, reg, off, A1);
+ } else {
+ uasm_i_lw(buf, reg, off, A1);
+ }
+}
+
+static void build_copy_store(u32 **buf, int reg, int off)
+{
+ if (cpu_has_64bit_gp_regs) {
+ uasm_i_sd(buf, reg, off, A0);
+ } else {
+ uasm_i_sw(buf, reg, off, A0);
+ }
+}
+
+static inline void build_copy_load_pref(u32 **buf, int off)
+{
+ if (off & cache_line_mask())
+ return;
+
+ if (pref_bias_copy_load)
+ _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
+}
+
+static inline void build_copy_store_pref(u32 **buf, int off)
+{
+ if (off & cache_line_mask())
+ return;
+
+ if (pref_bias_copy_store) {
+ _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
+ A0);
+ } else if (cache_line_size == (half_copy_loop_size << 1)) {
+ if (cpu_has_cache_cdex_s) {
+ uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
+ } else if (cpu_has_cache_cdex_p) {
+ if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) &&
+ cpu_is_r4600_v1_x()) {
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ uasm_i_nop(buf);
+ }
+
+ if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) &&
+ cpu_is_r4600_v2_x())
+ uasm_i_lw(buf, ZERO, ZERO, AT);
+
+ uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
+ }
+ }
+}
+
+void build_copy_page(void)
+{
+ int off;
+ u32 *buf = &__copy_page_start;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+ int i;
+ static atomic_t run_once = ATOMIC_INIT(0);
+
+ if (atomic_xchg(&run_once, 1)) {
+ return;
+ }
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ set_prefetch_parameters();
+
+ /*
+ * This algorithm makes the following assumptions:
+ * - All prefetch biases are multiples of 8 words.
+ * - The prefetch biases are less than one page.
+ * - The store prefetch bias isn't greater than the load
+ * prefetch bias.
+ */
+ BUG_ON(pref_bias_copy_load % (8 * copy_word_size));
+ BUG_ON(pref_bias_copy_store % (8 * copy_word_size));
+ BUG_ON(PAGE_SIZE < pref_bias_copy_load);
+ BUG_ON(pref_bias_copy_store > pref_bias_copy_load);
+
+ off = PAGE_SIZE - pref_bias_copy_load;
+ if (off > 0xffff || !pref_bias_copy_load)
+ pg_addiu(&buf, A2, A0, off);
+ else
+ uasm_i_ori(&buf, A2, A0, off);
+
+ if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x())
+ uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
+
+ off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
+ cache_line_size : 0;
+ while (off) {
+ build_copy_load_pref(&buf, -off);
+ off -= cache_line_size;
+ }
+ off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
+ cache_line_size : 0;
+ while (off) {
+ build_copy_store_pref(&buf, -off);
+ off -= cache_line_size;
+ }
+ uasm_l_copy_pref_both(&l, buf);
+ do {
+ build_copy_load_pref(&buf, off);
+ build_copy_load(&buf, T0, off);
+ build_copy_load_pref(&buf, off + copy_word_size);
+ build_copy_load(&buf, T1, off + copy_word_size);
+ build_copy_load_pref(&buf, off + 2 * copy_word_size);
+ build_copy_load(&buf, T2, off + 2 * copy_word_size);
+ build_copy_load_pref(&buf, off + 3 * copy_word_size);
+ build_copy_load(&buf, T3, off + 3 * copy_word_size);
+ build_copy_store_pref(&buf, off);
+ build_copy_store(&buf, T0, off);
+ build_copy_store_pref(&buf, off + copy_word_size);
+ build_copy_store(&buf, T1, off + copy_word_size);
+ build_copy_store_pref(&buf, off + 2 * copy_word_size);
+ build_copy_store(&buf, T2, off + 2 * copy_word_size);
+ build_copy_store_pref(&buf, off + 3 * copy_word_size);
+ build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ off += 4 * copy_word_size;
+ } while (off < half_copy_loop_size);
+ pg_addiu(&buf, A1, A1, 2 * off);
+ pg_addiu(&buf, A0, A0, 2 * off);
+ off = -off;
+ do {
+ build_copy_load_pref(&buf, off);
+ build_copy_load(&buf, T0, off);
+ build_copy_load_pref(&buf, off + copy_word_size);
+ build_copy_load(&buf, T1, off + copy_word_size);
+ build_copy_load_pref(&buf, off + 2 * copy_word_size);
+ build_copy_load(&buf, T2, off + 2 * copy_word_size);
+ build_copy_load_pref(&buf, off + 3 * copy_word_size);
+ build_copy_load(&buf, T3, off + 3 * copy_word_size);
+ build_copy_store_pref(&buf, off);
+ build_copy_store(&buf, T0, off);
+ build_copy_store_pref(&buf, off + copy_word_size);
+ build_copy_store(&buf, T1, off + copy_word_size);
+ build_copy_store_pref(&buf, off + 2 * copy_word_size);
+ build_copy_store(&buf, T2, off + 2 * copy_word_size);
+ build_copy_store_pref(&buf, off + 3 * copy_word_size);
+ if (off == -(4 * copy_word_size))
+ uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both);
+ build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ off += 4 * copy_word_size;
+ } while (off < 0);
+
+ if (pref_bias_copy_load - pref_bias_copy_store) {
+ pg_addiu(&buf, A2, A0,
+ pref_bias_copy_load - pref_bias_copy_store);
+ uasm_l_copy_pref_store(&l, buf);
+ off = 0;
+ do {
+ build_copy_load(&buf, T0, off);
+ build_copy_load(&buf, T1, off + copy_word_size);
+ build_copy_load(&buf, T2, off + 2 * copy_word_size);
+ build_copy_load(&buf, T3, off + 3 * copy_word_size);
+ build_copy_store_pref(&buf, off);
+ build_copy_store(&buf, T0, off);
+ build_copy_store_pref(&buf, off + copy_word_size);
+ build_copy_store(&buf, T1, off + copy_word_size);
+ build_copy_store_pref(&buf, off + 2 * copy_word_size);
+ build_copy_store(&buf, T2, off + 2 * copy_word_size);
+ build_copy_store_pref(&buf, off + 3 * copy_word_size);
+ build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ off += 4 * copy_word_size;
+ } while (off < half_copy_loop_size);
+ pg_addiu(&buf, A1, A1, 2 * off);
+ pg_addiu(&buf, A0, A0, 2 * off);
+ off = -off;
+ do {
+ build_copy_load(&buf, T0, off);
+ build_copy_load(&buf, T1, off + copy_word_size);
+ build_copy_load(&buf, T2, off + 2 * copy_word_size);
+ build_copy_load(&buf, T3, off + 3 * copy_word_size);
+ build_copy_store_pref(&buf, off);
+ build_copy_store(&buf, T0, off);
+ build_copy_store_pref(&buf, off + copy_word_size);
+ build_copy_store(&buf, T1, off + copy_word_size);
+ build_copy_store_pref(&buf, off + 2 * copy_word_size);
+ build_copy_store(&buf, T2, off + 2 * copy_word_size);
+ build_copy_store_pref(&buf, off + 3 * copy_word_size);
+ if (off == -(4 * copy_word_size))
+ uasm_il_bne(&buf, &r, A2, A0,
+ label_copy_pref_store);
+ build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ off += 4 * copy_word_size;
+ } while (off < 0);
+ }
+
+ if (pref_bias_copy_store) {
+ pg_addiu(&buf, A2, A0, pref_bias_copy_store);
+ uasm_l_copy_nopref(&l, buf);
+ off = 0;
+ do {
+ build_copy_load(&buf, T0, off);
+ build_copy_load(&buf, T1, off + copy_word_size);
+ build_copy_load(&buf, T2, off + 2 * copy_word_size);
+ build_copy_load(&buf, T3, off + 3 * copy_word_size);
+ build_copy_store(&buf, T0, off);
+ build_copy_store(&buf, T1, off + copy_word_size);
+ build_copy_store(&buf, T2, off + 2 * copy_word_size);
+ build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ off += 4 * copy_word_size;
+ } while (off < half_copy_loop_size);
+ pg_addiu(&buf, A1, A1, 2 * off);
+ pg_addiu(&buf, A0, A0, 2 * off);
+ off = -off;
+ do {
+ build_copy_load(&buf, T0, off);
+ build_copy_load(&buf, T1, off + copy_word_size);
+ build_copy_load(&buf, T2, off + 2 * copy_word_size);
+ build_copy_load(&buf, T3, off + 3 * copy_word_size);
+ build_copy_store(&buf, T0, off);
+ build_copy_store(&buf, T1, off + copy_word_size);
+ build_copy_store(&buf, T2, off + 2 * copy_word_size);
+ if (off == -(4 * copy_word_size))
+ uasm_il_bne(&buf, &r, A2, A0,
+ label_copy_nopref);
+ build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ off += 4 * copy_word_size;
+ } while (off < 0);
+ }
+
+ uasm_i_jr(&buf, RA);
+ uasm_i_nop(&buf);
+
+ BUG_ON(buf > &__copy_page_end);
+
+ uasm_resolve_relocs(relocs, labels);
+
+ pr_debug("Synthesized copy page handler (%u instructions).\n",
+ (u32)(buf - &__copy_page_start));
+
+ pr_debug("\t.set push\n");
+ pr_debug("\t.set noreorder\n");
+ for (i = 0; i < (buf - &__copy_page_start); i++)
+ pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
+ pr_debug("\t.set pop\n");
+}
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+extern void clear_page_cpu(void *page);
+extern void copy_page_cpu(void *to, void *from);
+
+/*
+ * Pad descriptors to cacheline, since each is exclusively owned by a
+ * particular CPU.
+ */
+struct dmadscr {
+ u64 dscr_a;
+ u64 dscr_b;
+ u64 pad_a;
+ u64 pad_b;
+} ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
+
+void clear_page(void *page)
+{
+ u64 to_phys = CPHYSADDR((unsigned long)page);
+ unsigned int cpu = smp_processor_id();
+
+ /* if the page is not in KSEG0, use old way */
+ if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
+ return clear_page_cpu(page);
+
+ page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
+ M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
+ page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
+ __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
+
+ /*
+ * Don't really want to do it this way, but there's no
+ * reliable way to delay completion detection.
+ */
+ while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
+ & M_DM_DSCR_BASE_INTERRUPT))
+ ;
+ __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
+}
+EXPORT_SYMBOL(clear_page);
+
+void copy_page(void *to, void *from)
+{
+ u64 from_phys = CPHYSADDR((unsigned long)from);
+ u64 to_phys = CPHYSADDR((unsigned long)to);
+ unsigned int cpu = smp_processor_id();
+
+ /* if any page is not in KSEG0, use old way */
+ if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
+ || (long)KSEGX((unsigned long)from) != (long)CKSEG0)
+ return copy_page_cpu(to, from);
+
+ page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
+ M_DM_DSCRA_INTERRUPT;
+ page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
+ __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
+
+ /*
+ * Don't really want to do it this way, but there's no
+ * reliable way to delay completion detection.
+ */
+ while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
+ & M_DM_DSCR_BASE_INTERRUPT))
+ ;
+ __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
+}
+EXPORT_SYMBOL(copy_page);
+
+#endif /* CONFIG_SIBYTE_DMA_PAGEOPS */
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
new file mode 100644
index 000000000..bd4b0656a
--- /dev/null
+++ b/arch/mips/mm/pgtable-32.c
@@ -0,0 +1,91 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 by Ralf Baechle
+ */
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/highmem.h>
+#include <asm/fixmap.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+void pgd_init(unsigned long page)
+{
+ unsigned long *p = (unsigned long *) page;
+ int i;
+
+ for (i = 0; i < USER_PTRS_PER_PGD; i+=8) {
+ p[i + 0] = (unsigned long) invalid_pte_table;
+ p[i + 1] = (unsigned long) invalid_pte_table;
+ p[i + 2] = (unsigned long) invalid_pte_table;
+ p[i + 3] = (unsigned long) invalid_pte_table;
+ p[i + 4] = (unsigned long) invalid_pte_table;
+ p[i + 5] = (unsigned long) invalid_pte_table;
+ p[i + 6] = (unsigned long) invalid_pte_table;
+ p[i + 7] = (unsigned long) invalid_pte_table;
+ }
+}
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
+pmd_t mk_pmd(struct page *page, pgprot_t prot)
+{
+ pmd_t pmd;
+
+ pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot);
+
+ return pmd;
+}
+
+
+void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ *pmdp = pmd;
+ flush_tlb_all();
+}
+#endif /* defined(CONFIG_TRANSPARENT_HUGEPAGE) */
+
+void __init pagetable_init(void)
+{
+ unsigned long vaddr;
+ pgd_t *pgd_base;
+#ifdef CONFIG_HIGHMEM
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+#endif
+
+ /* Initialize the entire pgd. */
+ pgd_init((unsigned long)swapper_pg_dir);
+ pgd_init((unsigned long)swapper_pg_dir
+ + sizeof(pgd_t) * USER_PTRS_PER_PGD);
+
+ pgd_base = swapper_pg_dir;
+
+ /*
+ * Fixed mappings:
+ */
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
+ fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
+
+#ifdef CONFIG_HIGHMEM
+ /*
+ * Permanent kmaps:
+ */
+ vaddr = PKMAP_BASE;
+ fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+
+ pgd = swapper_pg_dir + pgd_index(vaddr);
+ p4d = p4d_offset(pgd, vaddr);
+ pud = pud_offset(p4d, vaddr);
+ pmd = pmd_offset(pud, vaddr);
+ pte = pte_offset_kernel(pmd, vaddr);
+ pkmap_page_table = pte;
+#endif
+}
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
new file mode 100644
index 000000000..183ff9f9c
--- /dev/null
+++ b/arch/mips/mm/pgtable-64.c
@@ -0,0 +1,125 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999, 2000 by Silicon Graphics
+ * Copyright (C) 2003 by Ralf Baechle
+ */
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <asm/fixmap.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+void pgd_init(unsigned long page)
+{
+ unsigned long *p, *end;
+ unsigned long entry;
+
+#if !defined(__PAGETABLE_PUD_FOLDED)
+ entry = (unsigned long)invalid_pud_table;
+#elif !defined(__PAGETABLE_PMD_FOLDED)
+ entry = (unsigned long)invalid_pmd_table;
+#else
+ entry = (unsigned long)invalid_pte_table;
+#endif
+
+ p = (unsigned long *) page;
+ end = p + PTRS_PER_PGD;
+
+ do {
+ p[0] = entry;
+ p[1] = entry;
+ p[2] = entry;
+ p[3] = entry;
+ p[4] = entry;
+ p += 8;
+ p[-3] = entry;
+ p[-2] = entry;
+ p[-1] = entry;
+ } while (p != end);
+}
+
+#ifndef __PAGETABLE_PMD_FOLDED
+void pmd_init(unsigned long addr, unsigned long pagetable)
+{
+ unsigned long *p, *end;
+
+ p = (unsigned long *) addr;
+ end = p + PTRS_PER_PMD;
+
+ do {
+ p[0] = pagetable;
+ p[1] = pagetable;
+ p[2] = pagetable;
+ p[3] = pagetable;
+ p[4] = pagetable;
+ p += 8;
+ p[-3] = pagetable;
+ p[-2] = pagetable;
+ p[-1] = pagetable;
+ } while (p != end);
+}
+EXPORT_SYMBOL_GPL(pmd_init);
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+void pud_init(unsigned long addr, unsigned long pagetable)
+{
+ unsigned long *p, *end;
+
+ p = (unsigned long *)addr;
+ end = p + PTRS_PER_PUD;
+
+ do {
+ p[0] = pagetable;
+ p[1] = pagetable;
+ p[2] = pagetable;
+ p[3] = pagetable;
+ p[4] = pagetable;
+ p += 8;
+ p[-3] = pagetable;
+ p[-2] = pagetable;
+ p[-1] = pagetable;
+ } while (p != end);
+}
+#endif
+
+pmd_t mk_pmd(struct page *page, pgprot_t prot)
+{
+ pmd_t pmd;
+
+ pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot);
+
+ return pmd;
+}
+
+void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ *pmdp = pmd;
+ flush_tlb_all();
+}
+
+void __init pagetable_init(void)
+{
+ unsigned long vaddr;
+ pgd_t *pgd_base;
+
+ /* Initialize the entire pgd. */
+ pgd_init((unsigned long)swapper_pg_dir);
+#ifndef __PAGETABLE_PUD_FOLDED
+ pud_init((unsigned long)invalid_pud_table, (unsigned long)invalid_pmd_table);
+#endif
+#ifndef __PAGETABLE_PMD_FOLDED
+ pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
+#endif
+ pgd_base = swapper_pg_dir;
+ /*
+ * Fixed mappings:
+ */
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+ fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
+}
diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c
new file mode 100644
index 000000000..05560b042
--- /dev/null
+++ b/arch/mips/mm/pgtable.c
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <asm/pgalloc.h>
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *ret, *init;
+
+ ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ if (ret) {
+ init = pgd_offset(&init_mm, 0UL);
+ pgd_init((unsigned long)ret);
+ memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pgd_alloc);
diff --git a/arch/mips/mm/sc-debugfs.c b/arch/mips/mm/sc-debugfs.c
new file mode 100644
index 000000000..80ff39471
--- /dev/null
+++ b/arch/mips/mm/sc-debugfs.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <asm/bcache.h>
+#include <asm/debug.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+
+static ssize_t sc_prefetch_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ bool enabled = bc_prefetch_is_enabled();
+ char buf[3];
+
+ buf[0] = enabled ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = 0;
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t sc_prefetch_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ bool enabled;
+ int err;
+
+ err = kstrtobool_from_user(user_buf, count, &enabled);
+ if (err)
+ return err;
+
+ if (enabled)
+ bc_prefetch_enable();
+ else
+ bc_prefetch_disable();
+
+ return count;
+}
+
+static const struct file_operations sc_prefetch_fops = {
+ .open = simple_open,
+ .llseek = default_llseek,
+ .read = sc_prefetch_read,
+ .write = sc_prefetch_write,
+};
+
+static int __init sc_debugfs_init(void)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("l2cache", mips_debugfs_dir);
+ debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir, NULL,
+ &sc_prefetch_fops);
+ return 0;
+}
+late_initcall(sc_debugfs_init);
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
new file mode 100644
index 000000000..d7238687d
--- /dev/null
+++ b/arch/mips/mm/sc-ip22.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sc-ip22.c: Indy cache management functions.
+ *
+ * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org),
+ * derived from r4xx0.c by David S. Miller (davem@davemloft.net).
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/bcache.h>
+#include <asm/page.h>
+#include <asm/bootinfo.h>
+#include <asm/sgi/ip22.h>
+#include <asm/sgi/mc.h>
+
+/* Secondary cache size in bytes, if present. */
+static unsigned long scache_size;
+
+#undef DEBUG_CACHE
+
+#define SC_SIZE 0x00080000
+#define SC_LINE 32
+#define CI_MASK (SC_SIZE - SC_LINE)
+#define SC_INDEX(n) ((n) & CI_MASK)
+
+static inline void indy_sc_wipe(unsigned long first, unsigned long last)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " .set push # indy_sc_wipe \n"
+ " .set noreorder \n"
+ " .set mips3 \n"
+ " .set noat \n"
+ " mfc0 %2, $12 \n"
+ " li $1, 0x80 # Go 64 bit \n"
+ " mtc0 $1, $12 \n"
+ " \n"
+ " # \n"
+ " # Open code a dli $1, 0x9000000080000000 \n"
+ " # \n"
+ " # Required because binutils 2.25 will happily accept \n"
+ " # 64 bit instructions in .set mips3 mode but puke on \n"
+ " # 64 bit constants when generating 32 bit ELF \n"
+ " # \n"
+ " lui $1,0x9000 \n"
+ " dsll $1,$1,0x10 \n"
+ " ori $1,$1,0x8000 \n"
+ " dsll $1,$1,0x10 \n"
+ " \n"
+ " or %0, $1 # first line to flush \n"
+ " or %1, $1 # last line to flush \n"
+ " .set at \n"
+ " \n"
+ "1: sw $0, 0(%0) \n"
+ " bne %0, %1, 1b \n"
+ " daddu %0, 32 \n"
+ " \n"
+ " mtc0 %2, $12 # Back to 32 bit \n"
+ " nop # pipeline hazard \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ " .set pop \n"
+ : "=r" (first), "=r" (last), "=&r" (tmp)
+ : "0" (first), "1" (last));
+}
+
+static void indy_sc_wback_invalidate(unsigned long addr, unsigned long size)
+{
+ unsigned long first_line, last_line;
+ unsigned long flags;
+
+#ifdef DEBUG_CACHE
+ printk("indy_sc_wback_invalidate[%08lx,%08lx]", addr, size);
+#endif
+
+ /* Catch bad driver code */
+ BUG_ON(size == 0);
+
+ /* Which lines to flush? */
+ first_line = SC_INDEX(addr);
+ last_line = SC_INDEX(addr + size - 1);
+
+ local_irq_save(flags);
+ if (first_line <= last_line) {
+ indy_sc_wipe(first_line, last_line);
+ goto out;
+ }
+
+ indy_sc_wipe(first_line, SC_SIZE - SC_LINE);
+ indy_sc_wipe(0, last_line);
+out:
+ local_irq_restore(flags);
+}
+
+static void indy_sc_enable(void)
+{
+ unsigned long addr, tmp1, tmp2;
+
+ /* This is really cool... */
+#ifdef DEBUG_CACHE
+ printk("Enabling R4600 SCACHE\n");
+#endif
+ __asm__ __volatile__(
+ ".set\tpush\n\t"
+ ".set\tnoreorder\n\t"
+ ".set\tmips3\n\t"
+ "mfc0\t%2, $12\n\t"
+ "nop; nop; nop; nop;\n\t"
+ "li\t%1, 0x80\n\t"
+ "mtc0\t%1, $12\n\t"
+ "nop; nop; nop; nop;\n\t"
+ "li\t%0, 0x1\n\t"
+ "dsll\t%0, 31\n\t"
+ "lui\t%1, 0x9000\n\t"
+ "dsll32\t%1, 0\n\t"
+ "or\t%0, %1, %0\n\t"
+ "sb\t$0, 0(%0)\n\t"
+ "mtc0\t$0, $12\n\t"
+ "nop; nop; nop; nop;\n\t"
+ "mtc0\t%2, $12\n\t"
+ "nop; nop; nop; nop;\n\t"
+ ".set\tpop"
+ : "=r" (tmp1), "=r" (tmp2), "=r" (addr));
+}
+
+static void indy_sc_disable(void)
+{
+ unsigned long tmp1, tmp2, tmp3;
+
+#ifdef DEBUG_CACHE
+ printk("Disabling R4600 SCACHE\n");
+#endif
+ __asm__ __volatile__(
+ ".set\tpush\n\t"
+ ".set\tnoreorder\n\t"
+ ".set\tmips3\n\t"
+ "li\t%0, 0x1\n\t"
+ "dsll\t%0, 31\n\t"
+ "lui\t%1, 0x9000\n\t"
+ "dsll32\t%1, 0\n\t"
+ "or\t%0, %1, %0\n\t"
+ "mfc0\t%2, $12\n\t"
+ "nop; nop; nop; nop\n\t"
+ "li\t%1, 0x80\n\t"
+ "mtc0\t%1, $12\n\t"
+ "nop; nop; nop; nop\n\t"
+ "sh\t$0, 0(%0)\n\t"
+ "mtc0\t$0, $12\n\t"
+ "nop; nop; nop; nop\n\t"
+ "mtc0\t%2, $12\n\t"
+ "nop; nop; nop; nop\n\t"
+ ".set\tpop"
+ : "=r" (tmp1), "=r" (tmp2), "=r" (tmp3));
+}
+
+static inline int __init indy_sc_probe(void)
+{
+ unsigned int size = ip22_eeprom_read(&sgimc->eeprom, 17);
+ if (size == 0)
+ return 0;
+
+ size <<= PAGE_SHIFT;
+ printk(KERN_INFO "R4600/R5000 SCACHE size %dK, linesize 32 bytes.\n",
+ size >> 10);
+ scache_size = size;
+
+ return 1;
+}
+
+/* XXX Check with wje if the Indy caches can differentiate between
+ writeback + invalidate and just invalidate. */
+static struct bcache_ops indy_sc_ops = {
+ .bc_enable = indy_sc_enable,
+ .bc_disable = indy_sc_disable,
+ .bc_wback_inv = indy_sc_wback_invalidate,
+ .bc_inv = indy_sc_wback_invalidate
+};
+
+void indy_sc_init(void)
+{
+ if (indy_sc_probe()) {
+ indy_sc_enable();
+ bcops = &indy_sc_ops;
+ }
+}
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
new file mode 100644
index 000000000..06ec304ad
--- /dev/null
+++ b/arch/mips/mm/sc-mips.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2006 Chris Dearman (chris@mips.com),
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/cpu-type.h>
+#include <asm/mipsregs.h>
+#include <asm/bcache.h>
+#include <asm/cacheops.h>
+#include <asm/page.h>
+#include <asm/mmu_context.h>
+#include <asm/r4kcache.h>
+#include <asm/mips-cps.h>
+#include <asm/bootinfo.h>
+
+/*
+ * MIPS32/MIPS64 L2 cache handling
+ */
+
+/*
+ * Writeback and invalidate the secondary cache before DMA.
+ */
+static void mips_sc_wback_inv(unsigned long addr, unsigned long size)
+{
+ blast_scache_range(addr, addr + size);
+}
+
+/*
+ * Invalidate the secondary cache before DMA.
+ */
+static void mips_sc_inv(unsigned long addr, unsigned long size)
+{
+ unsigned long lsize = cpu_scache_line_size();
+ unsigned long almask = ~(lsize - 1);
+
+ cache_op(Hit_Writeback_Inv_SD, addr & almask);
+ cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask);
+ blast_inv_scache_range(addr, addr + size);
+}
+
+static void mips_sc_enable(void)
+{
+ /* L2 cache is permanently enabled */
+}
+
+static void mips_sc_disable(void)
+{
+ /* L2 cache is permanently enabled */
+}
+
+static void mips_sc_prefetch_enable(void)
+{
+ unsigned long pftctl;
+
+ if (mips_cm_revision() < CM_REV_CM2_5)
+ return;
+
+ /*
+ * If there is one or more L2 prefetch unit present then enable
+ * prefetching for both code & data, for all ports.
+ */
+ pftctl = read_gcr_l2_pft_control();
+ if (pftctl & CM_GCR_L2_PFT_CONTROL_NPFT) {
+ pftctl &= ~CM_GCR_L2_PFT_CONTROL_PAGEMASK;
+ pftctl |= PAGE_MASK & CM_GCR_L2_PFT_CONTROL_PAGEMASK;
+ pftctl |= CM_GCR_L2_PFT_CONTROL_PFTEN;
+ write_gcr_l2_pft_control(pftctl);
+
+ set_gcr_l2_pft_control_b(CM_GCR_L2_PFT_CONTROL_B_PORTID |
+ CM_GCR_L2_PFT_CONTROL_B_CEN);
+ }
+}
+
+static void mips_sc_prefetch_disable(void)
+{
+ if (mips_cm_revision() < CM_REV_CM2_5)
+ return;
+
+ clear_gcr_l2_pft_control(CM_GCR_L2_PFT_CONTROL_PFTEN);
+ clear_gcr_l2_pft_control_b(CM_GCR_L2_PFT_CONTROL_B_PORTID |
+ CM_GCR_L2_PFT_CONTROL_B_CEN);
+}
+
+static bool mips_sc_prefetch_is_enabled(void)
+{
+ unsigned long pftctl;
+
+ if (mips_cm_revision() < CM_REV_CM2_5)
+ return false;
+
+ pftctl = read_gcr_l2_pft_control();
+ if (!(pftctl & CM_GCR_L2_PFT_CONTROL_NPFT))
+ return false;
+ return !!(pftctl & CM_GCR_L2_PFT_CONTROL_PFTEN);
+}
+
+static struct bcache_ops mips_sc_ops = {
+ .bc_enable = mips_sc_enable,
+ .bc_disable = mips_sc_disable,
+ .bc_wback_inv = mips_sc_wback_inv,
+ .bc_inv = mips_sc_inv,
+ .bc_prefetch_enable = mips_sc_prefetch_enable,
+ .bc_prefetch_disable = mips_sc_prefetch_disable,
+ .bc_prefetch_is_enabled = mips_sc_prefetch_is_enabled,
+};
+
+/*
+ * Check if the L2 cache controller is activated on a particular platform.
+ * MTI's L2 controller and the L2 cache controller of Broadcom's BMIPS
+ * cores both use c0_config2's bit 12 as "L2 Bypass" bit, that is the
+ * cache being disabled. However there is no guarantee for this to be
+ * true on all platforms. In an act of stupidity the spec defined bits
+ * 12..15 as implementation defined so below function will eventually have
+ * to be replaced by a platform specific probe.
+ */
+static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
+{
+ unsigned int config2 = read_c0_config2();
+ unsigned int tmp;
+
+ /* Check the bypass bit (L2B) */
+ switch (current_cpu_type()) {
+ case CPU_34K:
+ case CPU_74K:
+ case CPU_1004K:
+ case CPU_1074K:
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ case CPU_BMIPS5000:
+ case CPU_QEMU_GENERIC:
+ case CPU_P6600:
+ if (config2 & (1 << 12))
+ return 0;
+ }
+
+ tmp = (config2 >> 4) & 0x0f;
+ if (0 < tmp && tmp <= 7)
+ c->scache.linesz = 2 << tmp;
+ else
+ return 0;
+ return 1;
+}
+
+static int mips_sc_probe_cm3(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned long cfg = read_gcr_l2_config();
+ unsigned long sets, line_sz, assoc;
+
+ if (cfg & CM_GCR_L2_CONFIG_BYPASS)
+ return 0;
+
+ sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE;
+ sets >>= __ffs(CM_GCR_L2_CONFIG_SET_SIZE);
+ if (sets)
+ c->scache.sets = 64 << sets;
+
+ line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE;
+ line_sz >>= __ffs(CM_GCR_L2_CONFIG_LINE_SIZE);
+ if (line_sz)
+ c->scache.linesz = 2 << line_sz;
+
+ assoc = cfg & CM_GCR_L2_CONFIG_ASSOC;
+ assoc >>= __ffs(CM_GCR_L2_CONFIG_ASSOC);
+ c->scache.ways = assoc + 1;
+ c->scache.waysize = c->scache.sets * c->scache.linesz;
+ c->scache.waybit = __ffs(c->scache.waysize);
+
+ if (c->scache.linesz) {
+ c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int mips_sc_probe(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int config1, config2;
+ unsigned int tmp;
+
+ /* Mark as not present until probe completed */
+ c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
+
+ if (mips_cm_revision() >= CM_REV_CM3)
+ return mips_sc_probe_cm3();
+
+ /* Ignore anything but MIPSxx processors */
+ if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)))
+ return 0;
+
+ /* Does this MIPS32/MIPS64 CPU have a config2 register? */
+ config1 = read_c0_config1();
+ if (!(config1 & MIPS_CONF_M))
+ return 0;
+
+ config2 = read_c0_config2();
+
+ if (!mips_sc_is_activated(c))
+ return 0;
+
+ tmp = (config2 >> 8) & 0x0f;
+ if (tmp <= 7)
+ c->scache.sets = 64 << tmp;
+ else
+ return 0;
+
+ tmp = (config2 >> 0) & 0x0f;
+ if (tmp <= 7)
+ c->scache.ways = tmp + 1;
+ else
+ return 0;
+
+ if (current_cpu_type() == CPU_XBURST) {
+ switch (mips_machtype) {
+ /*
+ * According to config2 it would be 5-ways, but that is
+ * contradicted by all documentation.
+ */
+ case MACH_INGENIC_JZ4770:
+ case MACH_INGENIC_JZ4775:
+ c->scache.ways = 4;
+ break;
+
+ /*
+ * According to config2 it would be 5-ways and 512-sets,
+ * but that is contradicted by all documentation.
+ */
+ case MACH_INGENIC_X1000:
+ case MACH_INGENIC_X1000E:
+ c->scache.sets = 256;
+ c->scache.ways = 4;
+ break;
+ }
+ }
+
+ c->scache.waysize = c->scache.sets * c->scache.linesz;
+ c->scache.waybit = __ffs(c->scache.waysize);
+
+ c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+
+ return 1;
+}
+
+int mips_sc_init(void)
+{
+ int found = mips_sc_probe();
+ if (found) {
+ mips_sc_enable();
+ mips_sc_prefetch_enable();
+ bcops = &mips_sc_ops;
+ }
+ return found;
+}
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
new file mode 100644
index 000000000..736615d68
--- /dev/null
+++ b/arch/mips/mm/sc-r5k.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org),
+ * derived from r4xx0.c by David S. Miller (davem@davemloft.net).
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/mipsregs.h>
+#include <asm/bcache.h>
+#include <asm/cacheops.h>
+#include <asm/page.h>
+#include <asm/mmu_context.h>
+#include <asm/r4kcache.h>
+
+/* Secondary cache size in bytes, if present. */
+static unsigned long scache_size;
+
+#define SC_LINE 32
+#define SC_PAGE (128*SC_LINE)
+
+static inline void blast_r5000_scache(void)
+{
+ unsigned long start = INDEX_BASE;
+ unsigned long end = start + scache_size;
+
+ while(start < end) {
+ cache_op(R5K_Page_Invalidate_S, start);
+ start += SC_PAGE;
+ }
+}
+
+static void r5k_dma_cache_inv_sc(unsigned long addr, unsigned long size)
+{
+ unsigned long end, a;
+
+ /* Catch bad driver code */
+ BUG_ON(size == 0);
+
+ if (size >= scache_size) {
+ blast_r5000_scache();
+ return;
+ }
+
+ /* On the R5000 secondary cache we cannot
+ * invalidate less than a page at a time.
+ * The secondary cache is physically indexed, write-through.
+ */
+ a = addr & ~(SC_PAGE - 1);
+ end = (addr + size - 1) & ~(SC_PAGE - 1);
+ while (a <= end) {
+ cache_op(R5K_Page_Invalidate_S, a);
+ a += SC_PAGE;
+ }
+}
+
+static void r5k_sc_enable(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ set_c0_config(R5K_CONF_SE);
+ blast_r5000_scache();
+ local_irq_restore(flags);
+}
+
+static void r5k_sc_disable(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ blast_r5000_scache();
+ clear_c0_config(R5K_CONF_SE);
+ local_irq_restore(flags);
+}
+
+static inline int __init r5k_sc_probe(void)
+{
+ unsigned long config = read_c0_config();
+
+ if (config & CONF_SC)
+ return 0;
+
+ scache_size = (512 * 1024) << ((config & R5K_CONF_SS) >> 20);
+
+ printk("R5000 SCACHE size %ldkB, linesize 32 bytes.\n",
+ scache_size >> 10);
+
+ return 1;
+}
+
+static struct bcache_ops r5k_sc_ops = {
+ .bc_enable = r5k_sc_enable,
+ .bc_disable = r5k_sc_disable,
+ .bc_wback_inv = r5k_dma_cache_inv_sc,
+ .bc_inv = r5k_dma_cache_inv_sc
+};
+
+void r5k_sc_init(void)
+{
+ if (r5k_sc_probe()) {
+ r5k_sc_enable();
+ bcops = &r5k_sc_ops;
+ }
+}
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
new file mode 100644
index 000000000..e9e3777a7
--- /dev/null
+++ b/arch/mips/mm/sc-rm7k.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sc-rm7k.c: RM7000 cache management functions.
+ *
+ * Copyright (C) 1997, 2001, 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+
+#include <asm/addrspace.h>
+#include <asm/bcache.h>
+#include <asm/cacheops.h>
+#include <asm/mipsregs.h>
+#include <asm/processor.h>
+#include <asm/sections.h>
+#include <asm/cacheflush.h> /* for run_uncached() */
+
+/* Primary cache parameters. */
+#define sc_lsize 32
+#define tc_pagesize (32*128)
+
+/* Secondary cache parameters. */
+#define scache_size (256*1024) /* Fixed to 256KiB on RM7000 */
+
+/* Tertiary cache parameters */
+#define tc_lsize 32
+
+extern unsigned long icache_way_size, dcache_way_size;
+static unsigned long tcache_size;
+
+#include <asm/r4kcache.h>
+
+static int rm7k_tcache_init;
+
+/*
+ * Writeback and invalidate the primary cache dcache before DMA.
+ * (XXX These need to be fixed ...)
+ */
+static void rm7k_sc_wback_inv(unsigned long addr, unsigned long size)
+{
+ unsigned long end, a;
+
+ pr_debug("rm7k_sc_wback_inv[%08lx,%08lx]", addr, size);
+
+ /* Catch bad driver code */
+ BUG_ON(size == 0);
+
+ blast_scache_range(addr, addr + size);
+
+ if (!rm7k_tcache_init)
+ return;
+
+ a = addr & ~(tc_pagesize - 1);
+ end = (addr + size - 1) & ~(tc_pagesize - 1);
+ while(1) {
+ invalidate_tcache_page(a); /* Page_Invalidate_T */
+ if (a == end)
+ break;
+ a += tc_pagesize;
+ }
+}
+
+static void rm7k_sc_inv(unsigned long addr, unsigned long size)
+{
+ unsigned long end, a;
+
+ pr_debug("rm7k_sc_inv[%08lx,%08lx]", addr, size);
+
+ /* Catch bad driver code */
+ BUG_ON(size == 0);
+
+ blast_inv_scache_range(addr, addr + size);
+
+ if (!rm7k_tcache_init)
+ return;
+
+ a = addr & ~(tc_pagesize - 1);
+ end = (addr + size - 1) & ~(tc_pagesize - 1);
+ while(1) {
+ invalidate_tcache_page(a); /* Page_Invalidate_T */
+ if (a == end)
+ break;
+ a += tc_pagesize;
+ }
+}
+
+static void blast_rm7k_tcache(void)
+{
+ unsigned long start = CKSEG0ADDR(0);
+ unsigned long end = start + tcache_size;
+
+ write_c0_taglo(0);
+
+ while (start < end) {
+ cache_op(Page_Invalidate_T, start);
+ start += tc_pagesize;
+ }
+}
+
+/*
+ * This function is executed in uncached address space.
+ */
+static void __rm7k_tc_enable(void)
+{
+ int i;
+
+ set_c0_config(RM7K_CONF_TE);
+
+ write_c0_taglo(0);
+ write_c0_taghi(0);
+
+ for (i = 0; i < tcache_size; i += tc_lsize)
+ cache_op(Index_Store_Tag_T, CKSEG0ADDR(i));
+}
+
+static void rm7k_tc_enable(void)
+{
+ if (read_c0_config() & RM7K_CONF_TE)
+ return;
+
+ BUG_ON(tcache_size == 0);
+
+ run_uncached(__rm7k_tc_enable);
+}
+
+/*
+ * This function is executed in uncached address space.
+ */
+static void __rm7k_sc_enable(void)
+{
+ int i;
+
+ set_c0_config(RM7K_CONF_SE);
+
+ write_c0_taglo(0);
+ write_c0_taghi(0);
+
+ for (i = 0; i < scache_size; i += sc_lsize)
+ cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i));
+}
+
+static void rm7k_sc_enable(void)
+{
+ if (read_c0_config() & RM7K_CONF_SE)
+ return;
+
+ pr_info("Enabling secondary cache...\n");
+ run_uncached(__rm7k_sc_enable);
+
+ if (rm7k_tcache_init)
+ rm7k_tc_enable();
+}
+
+static void rm7k_tc_disable(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ blast_rm7k_tcache();
+ clear_c0_config(RM7K_CONF_TE);
+ local_irq_restore(flags);
+}
+
+static void rm7k_sc_disable(void)
+{
+ clear_c0_config(RM7K_CONF_SE);
+
+ if (rm7k_tcache_init)
+ rm7k_tc_disable();
+}
+
+static struct bcache_ops rm7k_sc_ops = {
+ .bc_enable = rm7k_sc_enable,
+ .bc_disable = rm7k_sc_disable,
+ .bc_wback_inv = rm7k_sc_wback_inv,
+ .bc_inv = rm7k_sc_inv
+};
+
+/*
+ * This is a probing function like the one found in c-r4k.c, we look for the
+ * wrap around point with different addresses.
+ */
+static void __probe_tcache(void)
+{
+ unsigned long flags, addr, begin, end, pow2;
+
+ begin = (unsigned long) &_stext;
+ begin &= ~((8 * 1024 * 1024) - 1);
+ end = begin + (8 * 1024 * 1024);
+
+ local_irq_save(flags);
+
+ set_c0_config(RM7K_CONF_TE);
+
+ /* Fill size-multiple lines with a valid tag */
+ pow2 = (256 * 1024);
+ for (addr = begin; addr <= end; addr = (begin + pow2)) {
+ unsigned long *p = (unsigned long *) addr;
+ __asm__ __volatile__("nop" : : "r" (*p));
+ pow2 <<= 1;
+ }
+
+ /* Load first line with a 0 tag, to check after */
+ write_c0_taglo(0);
+ write_c0_taghi(0);
+ cache_op(Index_Store_Tag_T, begin);
+
+ /* Look for the wrap-around */
+ pow2 = (512 * 1024);
+ for (addr = begin + (512 * 1024); addr <= end; addr = begin + pow2) {
+ cache_op(Index_Load_Tag_T, addr);
+ if (!read_c0_taglo())
+ break;
+ pow2 <<= 1;
+ }
+
+ addr -= begin;
+ tcache_size = addr;
+
+ clear_c0_config(RM7K_CONF_TE);
+
+ local_irq_restore(flags);
+}
+
+void rm7k_sc_init(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int config = read_c0_config();
+
+ if ((config & RM7K_CONF_SC))
+ return;
+
+ c->scache.linesz = sc_lsize;
+ c->scache.ways = 4;
+ c->scache.waybit= __ffs(scache_size / c->scache.ways);
+ c->scache.waysize = scache_size / c->scache.ways;
+ c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
+ printk(KERN_INFO "Secondary cache size %dK, linesize %d bytes.\n",
+ (scache_size >> 10), sc_lsize);
+
+ if (!(config & RM7K_CONF_SE))
+ rm7k_sc_enable();
+
+ bcops = &rm7k_sc_ops;
+
+ /*
+ * While we're at it let's deal with the tertiary cache.
+ */
+
+ rm7k_tcache_init = 0;
+ tcache_size = 0;
+
+ if (config & RM7K_CONF_TC)
+ return;
+
+ /*
+ * No efficient way to ask the hardware for the size of the tcache,
+ * so must probe for it.
+ */
+ run_uncached(__probe_tcache);
+ rm7k_tc_enable();
+ rm7k_tcache_init = 1;
+ c->tcache.linesz = tc_lsize;
+ c->tcache.ways = 1;
+ pr_info("Tertiary cache size %ldK.\n", (tcache_size >> 10));
+}
diff --git a/arch/mips/mm/tlb-funcs.S b/arch/mips/mm/tlb-funcs.S
new file mode 100644
index 000000000..00fef578c
--- /dev/null
+++ b/arch/mips/mm/tlb-funcs.S
@@ -0,0 +1,40 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Micro-assembler generated tlb handler functions.
+ *
+ * Copyright (C) 2013 Broadcom Corporation.
+ *
+ * Based on mm/page-funcs.c
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <asm/asm.h>
+#include <asm/export.h>
+#include <asm/regdef.h>
+
+#define FASTPATH_SIZE 128
+
+LEAF(tlbmiss_handler_setup_pgd)
+1: j 1b /* Dummy, will be replaced. */
+ .space 64
+END(tlbmiss_handler_setup_pgd)
+EXPORT(tlbmiss_handler_setup_pgd_end)
+EXPORT_SYMBOL_GPL(tlbmiss_handler_setup_pgd)
+
+LEAF(handle_tlbm)
+ .space FASTPATH_SIZE * 4
+END(handle_tlbm)
+EXPORT(handle_tlbm_end)
+
+LEAF(handle_tlbs)
+ .space FASTPATH_SIZE * 4
+END(handle_tlbs)
+EXPORT(handle_tlbs_end)
+
+LEAF(handle_tlbl)
+ .space FASTPATH_SIZE * 4
+END(handle_tlbl)
+EXPORT(handle_tlbl_end)
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
new file mode 100644
index 000000000..a36622ebe
--- /dev/null
+++ b/arch/mips/mm/tlb-r3k.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r2300.c: R2000 and R3000 specific mmu/cache code.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ *
+ * with a lot of changes to make this thing work for R3000s
+ * Tx39XX R4k style caches added. HK
+ * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
+ * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
+ * Copyright (C) 2002 Ralf Baechle
+ * Copyright (C) 2002 Maciej W. Rozycki
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+
+#include <asm/page.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbmisc.h>
+#include <asm/isadep.h>
+#include <asm/io.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+
+#undef DEBUG_TLB
+
+extern void build_tlb_refill_handler(void);
+
+/* CP0 hazard avoidance. */
+#define BARRIER \
+ __asm__ __volatile__( \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ "nop\n\t" \
+ ".set pop\n\t")
+
+int r3k_have_wired_reg; /* Should be in cpu_data? */
+
+/* TLB operations. */
+static void local_flush_tlb_from(int entry)
+{
+ unsigned long old_ctx;
+
+ old_ctx = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
+ write_c0_entrylo0(0);
+ while (entry < current_cpu_data.tlbsize) {
+ write_c0_index(entry << 8);
+ write_c0_entryhi((entry | 0x80000) << 12);
+ entry++; /* BARRIER */
+ tlb_write_indexed();
+ }
+ write_c0_entryhi(old_ctx);
+}
+
+void local_flush_tlb_all(void)
+{
+ unsigned long flags;
+
+#ifdef DEBUG_TLB
+ printk("[tlball]");
+#endif
+ local_irq_save(flags);
+ local_flush_tlb_from(r3k_have_wired_reg ? read_c0_wired() : 8);
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
+ struct mm_struct *mm = vma->vm_mm;
+ int cpu = smp_processor_id();
+
+ if (cpu_context(cpu, mm) != 0) {
+ unsigned long size, flags;
+
+#ifdef DEBUG_TLB
+ printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
+ cpu_context(cpu, mm) & asid_mask, start, end);
+#endif
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (size <= current_cpu_data.tlbsize) {
+ int oldpid = read_c0_entryhi() & asid_mask;
+ int newpid = cpu_context(cpu, mm) & asid_mask;
+
+ start &= PAGE_MASK;
+ end += PAGE_SIZE - 1;
+ end &= PAGE_MASK;
+ while (start < end) {
+ int idx;
+
+ write_c0_entryhi(start | newpid);
+ start += PAGE_SIZE; /* BARRIER */
+ tlb_probe();
+ idx = read_c0_index();
+ write_c0_entrylo0(0);
+ write_c0_entryhi(KSEG0);
+ if (idx < 0) /* BARRIER */
+ continue;
+ tlb_write_indexed();
+ }
+ write_c0_entryhi(oldpid);
+ } else {
+ drop_mmu_context(mm);
+ }
+ local_irq_restore(flags);
+ }
+}
+
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long size, flags;
+
+#ifdef DEBUG_TLB
+ printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end);
+#endif
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (size <= current_cpu_data.tlbsize) {
+ int pid = read_c0_entryhi();
+
+ start &= PAGE_MASK;
+ end += PAGE_SIZE - 1;
+ end &= PAGE_MASK;
+
+ while (start < end) {
+ int idx;
+
+ write_c0_entryhi(start);
+ start += PAGE_SIZE; /* BARRIER */
+ tlb_probe();
+ idx = read_c0_index();
+ write_c0_entrylo0(0);
+ write_c0_entryhi(KSEG0);
+ if (idx < 0) /* BARRIER */
+ continue;
+ tlb_write_indexed();
+ }
+ write_c0_entryhi(pid);
+ } else {
+ local_flush_tlb_all();
+ }
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
+ int cpu = smp_processor_id();
+
+ if (cpu_context(cpu, vma->vm_mm) != 0) {
+ unsigned long flags;
+ int oldpid, newpid, idx;
+
+#ifdef DEBUG_TLB
+ printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
+#endif
+ newpid = cpu_context(cpu, vma->vm_mm) & asid_mask;
+ page &= PAGE_MASK;
+ local_irq_save(flags);
+ oldpid = read_c0_entryhi() & asid_mask;
+ write_c0_entryhi(page | newpid);
+ BARRIER;
+ tlb_probe();
+ idx = read_c0_index();
+ write_c0_entrylo0(0);
+ write_c0_entryhi(KSEG0);
+ if (idx < 0) /* BARRIER */
+ goto finish;
+ tlb_write_indexed();
+
+finish:
+ write_c0_entryhi(oldpid);
+ local_irq_restore(flags);
+ }
+}
+
+void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+ unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
+ unsigned long flags;
+ int idx, pid;
+
+ /*
+ * Handle debugger faulting in for debugee.
+ */
+ if (current->active_mm != vma->vm_mm)
+ return;
+
+ pid = read_c0_entryhi() & asid_mask;
+
+#ifdef DEBUG_TLB
+ if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
+ printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
+ (cpu_context(cpu, vma->vm_mm)), pid);
+ }
+#endif
+
+ local_irq_save(flags);
+ address &= PAGE_MASK;
+ write_c0_entryhi(address | pid);
+ BARRIER;
+ tlb_probe();
+ idx = read_c0_index();
+ write_c0_entrylo0(pte_val(pte));
+ write_c0_entryhi(address | pid);
+ if (idx < 0) { /* BARRIER */
+ tlb_write_random();
+ } else {
+ tlb_write_indexed();
+ }
+ write_c0_entryhi(pid);
+ local_irq_restore(flags);
+}
+
+void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
+ unsigned long entryhi, unsigned long pagemask)
+{
+ unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
+ unsigned long flags;
+ unsigned long old_ctx;
+ static unsigned long wired = 0;
+
+ if (r3k_have_wired_reg) { /* TX39XX */
+ unsigned long old_pagemask;
+ unsigned long w;
+
+#ifdef DEBUG_TLB
+ printk("[tlbwired<entry lo0 %8x, hi %8x\n, pagemask %8x>]\n",
+ entrylo0, entryhi, pagemask);
+#endif
+
+ local_irq_save(flags);
+ /* Save old context and create impossible VPN2 value */
+ old_ctx = read_c0_entryhi() & asid_mask;
+ old_pagemask = read_c0_pagemask();
+ w = read_c0_wired();
+ write_c0_wired(w + 1);
+ write_c0_index(w << 8);
+ write_c0_pagemask(pagemask);
+ write_c0_entryhi(entryhi);
+ write_c0_entrylo0(entrylo0);
+ BARRIER;
+ tlb_write_indexed();
+
+ write_c0_entryhi(old_ctx);
+ write_c0_pagemask(old_pagemask);
+ local_flush_tlb_all();
+ local_irq_restore(flags);
+
+ } else if (wired < 8) {
+#ifdef DEBUG_TLB
+ printk("[tlbwired<entry lo0 %8x, hi %8x\n>]\n",
+ entrylo0, entryhi);
+#endif
+
+ local_irq_save(flags);
+ old_ctx = read_c0_entryhi() & asid_mask;
+ write_c0_entrylo0(entrylo0);
+ write_c0_entryhi(entryhi);
+ write_c0_index(wired);
+ wired++; /* BARRIER */
+ tlb_write_indexed();
+ write_c0_entryhi(old_ctx);
+ local_flush_tlb_all();
+ local_irq_restore(flags);
+ }
+}
+
+void tlb_init(void)
+{
+ switch (current_cpu_type()) {
+ case CPU_TX3922:
+ case CPU_TX3927:
+ r3k_have_wired_reg = 1;
+ write_c0_wired(0); /* Set to 8 on reset... */
+ break;
+ }
+ local_flush_tlb_from(0);
+ build_tlb_refill_handler();
+}
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
new file mode 100644
index 000000000..1b939abbe
--- /dev/null
+++ b/arch/mips/mm/tlb-r4k.c
@@ -0,0 +1,583 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/cpu_pm.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/export.h>
+
+#include <asm/cpu.h>
+#include <asm/cpu-type.h>
+#include <asm/bootinfo.h>
+#include <asm/hazards.h>
+#include <asm/mmu_context.h>
+#include <asm/tlb.h>
+#include <asm/tlbmisc.h>
+
+extern void build_tlb_refill_handler(void);
+
+/*
+ * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
+ * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
+ * itlb/dtlb are not totally transparent to software.
+ */
+static inline void flush_micro_tlb(void)
+{
+ switch (current_cpu_type()) {
+ case CPU_LOONGSON2EF:
+ write_c0_diag(LOONGSON_DIAG_ITLB);
+ break;
+ case CPU_LOONGSON64:
+ write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
+ break;
+ default:
+ break;
+ }
+}
+
+static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_EXEC)
+ flush_micro_tlb();
+}
+
+void local_flush_tlb_all(void)
+{
+ unsigned long flags;
+ unsigned long old_ctx;
+ int entry, ftlbhighset;
+
+ local_irq_save(flags);
+ /* Save old context and create impossible VPN2 value */
+ old_ctx = read_c0_entryhi();
+ htw_stop();
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+
+ entry = num_wired_entries();
+
+ /*
+ * Blast 'em all away.
+ * If there are any wired entries, fall back to iterating
+ */
+ if (cpu_has_tlbinv && !entry) {
+ if (current_cpu_data.tlbsizevtlb) {
+ write_c0_index(0);
+ mtc0_tlbw_hazard();
+ tlbinvf(); /* invalidate VTLB */
+ }
+ ftlbhighset = current_cpu_data.tlbsizevtlb +
+ current_cpu_data.tlbsizeftlbsets;
+ for (entry = current_cpu_data.tlbsizevtlb;
+ entry < ftlbhighset;
+ entry++) {
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+ tlbinvf(); /* invalidate one FTLB set */
+ }
+ } else {
+ while (entry < current_cpu_data.tlbsize) {
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ entry++;
+ }
+ }
+ tlbw_use_hazard();
+ write_c0_entryhi(old_ctx);
+ htw_start();
+ flush_micro_tlb();
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(local_flush_tlb_all);
+
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int cpu = smp_processor_id();
+
+ if (cpu_context(cpu, mm) != 0) {
+ unsigned long size, flags;
+
+ local_irq_save(flags);
+ start = round_down(start, PAGE_SIZE << 1);
+ end = round_up(end, PAGE_SIZE << 1);
+ size = (end - start) >> (PAGE_SHIFT + 1);
+ if (size <= (current_cpu_data.tlbsizeftlbsets ?
+ current_cpu_data.tlbsize / 8 :
+ current_cpu_data.tlbsize / 2)) {
+ unsigned long old_entryhi, old_mmid;
+ int newpid = cpu_asid(cpu, mm);
+
+ old_entryhi = read_c0_entryhi();
+ if (cpu_has_mmid) {
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(newpid);
+ }
+
+ htw_stop();
+ while (start < end) {
+ int idx;
+
+ if (cpu_has_mmid)
+ write_c0_entryhi(start);
+ else
+ write_c0_entryhi(start | newpid);
+ start += (PAGE_SIZE << 1);
+ mtc0_tlbw_hazard();
+ tlb_probe();
+ tlb_probe_hazard();
+ idx = read_c0_index();
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+ if (idx < 0)
+ continue;
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ }
+ tlbw_use_hazard();
+ write_c0_entryhi(old_entryhi);
+ if (cpu_has_mmid)
+ write_c0_memorymapid(old_mmid);
+ htw_start();
+ } else {
+ drop_mmu_context(mm);
+ }
+ flush_micro_tlb();
+ local_irq_restore(flags);
+ }
+}
+
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long size, flags;
+
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ size = (size + 1) >> 1;
+ if (size <= (current_cpu_data.tlbsizeftlbsets ?
+ current_cpu_data.tlbsize / 8 :
+ current_cpu_data.tlbsize / 2)) {
+ int pid = read_c0_entryhi();
+
+ start &= (PAGE_MASK << 1);
+ end += ((PAGE_SIZE << 1) - 1);
+ end &= (PAGE_MASK << 1);
+ htw_stop();
+
+ while (start < end) {
+ int idx;
+
+ write_c0_entryhi(start);
+ start += (PAGE_SIZE << 1);
+ mtc0_tlbw_hazard();
+ tlb_probe();
+ tlb_probe_hazard();
+ idx = read_c0_index();
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+ if (idx < 0)
+ continue;
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ }
+ tlbw_use_hazard();
+ write_c0_entryhi(pid);
+ htw_start();
+ } else {
+ local_flush_tlb_all();
+ }
+ flush_micro_tlb();
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ int cpu = smp_processor_id();
+
+ if (cpu_context(cpu, vma->vm_mm) != 0) {
+ unsigned long old_mmid;
+ unsigned long flags, old_entryhi;
+ int idx;
+
+ page &= (PAGE_MASK << 1);
+ local_irq_save(flags);
+ old_entryhi = read_c0_entryhi();
+ htw_stop();
+ if (cpu_has_mmid) {
+ old_mmid = read_c0_memorymapid();
+ write_c0_entryhi(page);
+ write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
+ } else {
+ write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
+ }
+ mtc0_tlbw_hazard();
+ tlb_probe();
+ tlb_probe_hazard();
+ idx = read_c0_index();
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+ if (idx < 0)
+ goto finish;
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+
+ finish:
+ write_c0_entryhi(old_entryhi);
+ if (cpu_has_mmid)
+ write_c0_memorymapid(old_mmid);
+ htw_start();
+ flush_micro_tlb_vm(vma);
+ local_irq_restore(flags);
+ }
+}
+
+/*
+ * This one is only used for pages with the global bit set so we don't care
+ * much about the ASID.
+ */
+void local_flush_tlb_one(unsigned long page)
+{
+ unsigned long flags;
+ int oldpid, idx;
+
+ local_irq_save(flags);
+ oldpid = read_c0_entryhi();
+ htw_stop();
+ page &= (PAGE_MASK << 1);
+ write_c0_entryhi(page);
+ mtc0_tlbw_hazard();
+ tlb_probe();
+ tlb_probe_hazard();
+ idx = read_c0_index();
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+ if (idx >= 0) {
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+ }
+ write_c0_entryhi(oldpid);
+ htw_start();
+ flush_micro_tlb();
+ local_irq_restore(flags);
+}
+
+/*
+ * We will need multiple versions of update_mmu_cache(), one that just
+ * updates the TLB with the new pte(s), and another which also checks
+ * for the R4k "end of page" hardware bug and does the needy.
+ */
+void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
+{
+ unsigned long flags;
+ pgd_t *pgdp;
+ p4d_t *p4dp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ int idx, pid;
+
+ /*
+ * Handle debugger faulting in for debugee.
+ */
+ if (current->active_mm != vma->vm_mm)
+ return;
+
+ local_irq_save(flags);
+
+ htw_stop();
+ address &= (PAGE_MASK << 1);
+ if (cpu_has_mmid) {
+ write_c0_entryhi(address);
+ } else {
+ pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
+ write_c0_entryhi(address | pid);
+ }
+ pgdp = pgd_offset(vma->vm_mm, address);
+ mtc0_tlbw_hazard();
+ tlb_probe();
+ tlb_probe_hazard();
+ p4dp = p4d_offset(pgdp, address);
+ pudp = pud_offset(p4dp, address);
+ pmdp = pmd_offset(pudp, address);
+ idx = read_c0_index();
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ /* this could be a huge page */
+ if (pmd_huge(*pmdp)) {
+ unsigned long lo;
+ write_c0_pagemask(PM_HUGE_MASK);
+ ptep = (pte_t *)pmdp;
+ lo = pte_to_entrylo(pte_val(*ptep));
+ write_c0_entrylo0(lo);
+ write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
+
+ mtc0_tlbw_hazard();
+ if (idx < 0)
+ tlb_write_random();
+ else
+ tlb_write_indexed();
+ tlbw_use_hazard();
+ write_c0_pagemask(PM_DEFAULT_MASK);
+ } else
+#endif
+ {
+ ptep = pte_offset_map(pmdp, address);
+
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#ifdef CONFIG_XPA
+ write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
+ if (cpu_has_xpa)
+ writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
+ ptep++;
+ write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
+ if (cpu_has_xpa)
+ writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
+#else
+ write_c0_entrylo0(ptep->pte_high);
+ ptep++;
+ write_c0_entrylo1(ptep->pte_high);
+#endif
+#else
+ write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
+ write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
+#endif
+ mtc0_tlbw_hazard();
+ if (idx < 0)
+ tlb_write_random();
+ else
+ tlb_write_indexed();
+ }
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb_vm(vma);
+ local_irq_restore(flags);
+}
+
+void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
+ unsigned long entryhi, unsigned long pagemask)
+{
+#ifdef CONFIG_XPA
+ panic("Broken for XPA kernels");
+#else
+ unsigned int old_mmid;
+ unsigned long flags;
+ unsigned long wired;
+ unsigned long old_pagemask;
+ unsigned long old_ctx;
+
+ local_irq_save(flags);
+ if (cpu_has_mmid) {
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(MMID_KERNEL_WIRED);
+ }
+ /* Save old context and create impossible VPN2 value */
+ old_ctx = read_c0_entryhi();
+ htw_stop();
+ old_pagemask = read_c0_pagemask();
+ wired = num_wired_entries();
+ write_c0_wired(wired + 1);
+ write_c0_index(wired);
+ tlbw_use_hazard(); /* What is the hazard here? */
+ write_c0_pagemask(pagemask);
+ write_c0_entryhi(entryhi);
+ write_c0_entrylo0(entrylo0);
+ write_c0_entrylo1(entrylo1);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+
+ write_c0_entryhi(old_ctx);
+ if (cpu_has_mmid)
+ write_c0_memorymapid(old_mmid);
+ tlbw_use_hazard(); /* What is the hazard here? */
+ htw_start();
+ write_c0_pagemask(old_pagemask);
+ local_flush_tlb_all();
+ local_irq_restore(flags);
+#endif
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+int has_transparent_hugepage(void)
+{
+ static unsigned int mask = -1;
+
+ if (mask == -1) { /* first call comes during __init */
+ unsigned long flags;
+
+ local_irq_save(flags);
+ write_c0_pagemask(PM_HUGE_MASK);
+ back_to_back_c0_hazard();
+ mask = read_c0_pagemask();
+ write_c0_pagemask(PM_DEFAULT_MASK);
+ local_irq_restore(flags);
+ }
+ return mask == PM_HUGE_MASK;
+}
+EXPORT_SYMBOL(has_transparent_hugepage);
+
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+/*
+ * Used for loading TLB entries before trap_init() has started, when we
+ * don't actually want to add a wired entry which remains throughout the
+ * lifetime of the system
+ */
+
+int temp_tlb_entry;
+
+__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
+ unsigned long entryhi, unsigned long pagemask)
+{
+ int ret = 0;
+ unsigned long flags;
+ unsigned long wired;
+ unsigned long old_pagemask;
+ unsigned long old_ctx;
+
+ local_irq_save(flags);
+ /* Save old context and create impossible VPN2 value */
+ htw_stop();
+ old_ctx = read_c0_entryhi();
+ old_pagemask = read_c0_pagemask();
+ wired = num_wired_entries();
+ if (--temp_tlb_entry < wired) {
+ printk(KERN_WARNING
+ "No TLB space left for add_temporary_entry\n");
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ write_c0_index(temp_tlb_entry);
+ write_c0_pagemask(pagemask);
+ write_c0_entryhi(entryhi);
+ write_c0_entrylo0(entrylo0);
+ write_c0_entrylo1(entrylo1);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+
+ write_c0_entryhi(old_ctx);
+ write_c0_pagemask(old_pagemask);
+ htw_start();
+out:
+ local_irq_restore(flags);
+ return ret;
+}
+
+static int ntlb;
+static int __init set_ntlb(char *str)
+{
+ get_option(&str, &ntlb);
+ return 1;
+}
+
+__setup("ntlb=", set_ntlb);
+
+/*
+ * Configure TLB (for init or after a CPU has been powered off).
+ */
+static void r4k_tlb_configure(void)
+{
+ /*
+ * You should never change this register:
+ * - On R4600 1.7 the tlbp never hits for pages smaller than
+ * the value in the c0_pagemask register.
+ * - The entire mm handling assumes the c0_pagemask register to
+ * be set to fixed-size pages.
+ */
+ write_c0_pagemask(PM_DEFAULT_MASK);
+ back_to_back_c0_hazard();
+ if (read_c0_pagemask() != PM_DEFAULT_MASK)
+ panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
+
+ write_c0_wired(0);
+ if (current_cpu_type() == CPU_R10000 ||
+ current_cpu_type() == CPU_R12000 ||
+ current_cpu_type() == CPU_R14000 ||
+ current_cpu_type() == CPU_R16000)
+ write_c0_framemask(0);
+
+ if (cpu_has_rixi) {
+ /*
+ * Enable the no read, no exec bits, and enable large physical
+ * address.
+ */
+#ifdef CONFIG_64BIT
+ set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
+#else
+ set_c0_pagegrain(PG_RIE | PG_XIE);
+#endif
+ }
+
+ temp_tlb_entry = current_cpu_data.tlbsize - 1;
+
+ /* From this point on the ARC firmware is dead. */
+ local_flush_tlb_all();
+
+ /* Did I tell you that ARC SUCKS? */
+}
+
+void tlb_init(void)
+{
+ r4k_tlb_configure();
+
+ if (ntlb) {
+ if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
+ int wired = current_cpu_data.tlbsize - ntlb;
+ write_c0_wired(wired);
+ write_c0_index(wired-1);
+ printk("Restricting TLB to %d entries\n", ntlb);
+ } else
+ printk("Ignoring invalid argument ntlb=%d\n", ntlb);
+ }
+
+ build_tlb_refill_handler();
+}
+
+static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ r4k_tlb_configure();
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block r4k_tlb_pm_notifier_block = {
+ .notifier_call = r4k_tlb_pm_notifier,
+};
+
+static int __init r4k_tlb_init_pm(void)
+{
+ return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
+}
+arch_initcall(r4k_tlb_init_pm);
diff --git a/arch/mips/mm/tlbex-fault.S b/arch/mips/mm/tlbex-fault.S
new file mode 100644
index 000000000..77db401fc
--- /dev/null
+++ b/arch/mips/mm/tlbex-fault.S
@@ -0,0 +1,28 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+
+ .macro tlb_do_page_fault, write
+ NESTED(tlb_do_page_fault_\write, PT_SIZE, sp)
+ .cfi_signal_frame
+ SAVE_ALL docfi=1
+ MFC0 a2, CP0_BADVADDR
+ KMODE
+ move a0, sp
+ REG_S a2, PT_BVADDR(sp)
+ li a1, \write
+ jal do_page_fault
+ j ret_from_exception
+ END(tlb_do_page_fault_\write)
+ .endm
+
+ tlb_do_page_fault 0
+ tlb_do_page_fault 1
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
new file mode 100644
index 000000000..e8e3635dd
--- /dev/null
+++ b/arch/mips/mm/tlbex.c
@@ -0,0 +1,2661 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Synthesize TLB refill handlers at runtime.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2008, 2009 Cavium Networks, Inc.
+ * Copyright (C) 2011 MIPS Technologies, Inc.
+ *
+ * ... and the days got worse and worse and now you see
+ * I've gone completely out of my mind.
+ *
+ * They're coming to take me a away haha
+ * they're coming to take me a away hoho hihi haha
+ * to the funny farm where code is beautiful all the time ...
+ *
+ * (Condolences to Napoleon XIV)
+ */
+
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/string.h>
+#include <linux/cache.h>
+#include <linux/pgtable.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cpu-type.h>
+#include <asm/mmu_context.h>
+#include <asm/war.h>
+#include <asm/uasm.h>
+#include <asm/setup.h>
+#include <asm/tlbex.h>
+
+static int mips_xpa_disabled;
+
+static int __init xpa_disable(char *s)
+{
+ mips_xpa_disabled = 1;
+
+ return 1;
+}
+
+__setup("noxpa", xpa_disable);
+
+/*
+ * TLB load/store/modify handlers.
+ *
+ * Only the fastpath gets synthesized at runtime, the slowpath for
+ * do_page_fault remains normal asm.
+ */
+extern void tlb_do_page_fault_0(void);
+extern void tlb_do_page_fault_1(void);
+
+struct work_registers {
+ int r1;
+ int r2;
+ int r3;
+};
+
+struct tlb_reg_save {
+ unsigned long a;
+ unsigned long b;
+} ____cacheline_aligned_in_smp;
+
+static struct tlb_reg_save handler_reg_save[NR_CPUS];
+
+static inline int r45k_bvahwbug(void)
+{
+ /* XXX: We should probe for the presence of this bug, but we don't. */
+ return 0;
+}
+
+static inline int r4k_250MHZhwbug(void)
+{
+ /* XXX: We should probe for the presence of this bug, but we don't. */
+ return 0;
+}
+
+extern int sb1250_m3_workaround_needed(void);
+
+static inline int __maybe_unused bcm1250_m3_war(void)
+{
+ if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS))
+ return sb1250_m3_workaround_needed();
+ return 0;
+}
+
+static inline int __maybe_unused r10000_llsc_war(void)
+{
+ return IS_ENABLED(CONFIG_WAR_R10000_LLSC);
+}
+
+static int use_bbit_insns(void)
+{
+ switch (current_cpu_type()) {
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ case CPU_CAVIUM_OCTEON3:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int use_lwx_insns(void)
+{
+ switch (current_cpu_type()) {
+ case CPU_CAVIUM_OCTEON2:
+ case CPU_CAVIUM_OCTEON3:
+ return 1;
+ default:
+ return 0;
+ }
+}
+#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
+ CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
+static bool scratchpad_available(void)
+{
+ return true;
+}
+static int scratchpad_offset(int i)
+{
+ /*
+ * CVMSEG starts at address -32768 and extends for
+ * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
+ */
+ i += 1; /* Kernel use starts at the top and works down. */
+ return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
+}
+#else
+static bool scratchpad_available(void)
+{
+ return false;
+}
+static int scratchpad_offset(int i)
+{
+ BUG();
+ /* Really unreachable, but evidently some GCC want this. */
+ return 0;
+}
+#endif
+/*
+ * Found by experiment: At least some revisions of the 4kc throw under
+ * some circumstances a machine check exception, triggered by invalid
+ * values in the index register. Delaying the tlbp instruction until
+ * after the next branch, plus adding an additional nop in front of
+ * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
+ * why; it's not an issue caused by the core RTL.
+ *
+ */
+static int m4kc_tlbp_war(void)
+{
+ return current_cpu_type() == CPU_4KC;
+}
+
+/* Handle labels (which must be positive integers). */
+enum label_id {
+ label_second_part = 1,
+ label_leave,
+ label_vmalloc,
+ label_vmalloc_done,
+ label_tlbw_hazard_0,
+ label_split = label_tlbw_hazard_0 + 8,
+ label_tlbl_goaround1,
+ label_tlbl_goaround2,
+ label_nopage_tlbl,
+ label_nopage_tlbs,
+ label_nopage_tlbm,
+ label_smp_pgtable_change,
+ label_r3000_write_probe_fail,
+ label_large_segbits_fault,
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ label_tlb_huge_update,
+#endif
+};
+
+UASM_L_LA(_second_part)
+UASM_L_LA(_leave)
+UASM_L_LA(_vmalloc)
+UASM_L_LA(_vmalloc_done)
+/* _tlbw_hazard_x is handled differently. */
+UASM_L_LA(_split)
+UASM_L_LA(_tlbl_goaround1)
+UASM_L_LA(_tlbl_goaround2)
+UASM_L_LA(_nopage_tlbl)
+UASM_L_LA(_nopage_tlbs)
+UASM_L_LA(_nopage_tlbm)
+UASM_L_LA(_smp_pgtable_change)
+UASM_L_LA(_r3000_write_probe_fail)
+UASM_L_LA(_large_segbits_fault)
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+UASM_L_LA(_tlb_huge_update)
+#endif
+
+static int hazard_instance;
+
+static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
+{
+ switch (instance) {
+ case 0 ... 7:
+ uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
+ return;
+ default:
+ BUG();
+ }
+}
+
+static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
+{
+ switch (instance) {
+ case 0 ... 7:
+ uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
+ break;
+ default:
+ BUG();
+ }
+}
+
+/*
+ * pgtable bits are assigned dynamically depending on processor feature
+ * and statically based on kernel configuration. This spits out the actual
+ * values the kernel is using. Required to make sense from disassembled
+ * TLB exception handlers.
+ */
+static void output_pgtable_bits_defines(void)
+{
+#define pr_define(fmt, ...) \
+ pr_debug("#define " fmt, ##__VA_ARGS__)
+
+ pr_debug("#include <asm/asm.h>\n");
+ pr_debug("#include <asm/regdef.h>\n");
+ pr_debug("\n");
+
+ pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
+ pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
+ pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
+ pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
+ pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
+#endif
+#ifdef _PAGE_NO_EXEC_SHIFT
+ if (cpu_has_rixi)
+ pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
+#endif
+ pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
+ pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
+ pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
+ pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
+ pr_debug("\n");
+}
+
+static inline void dump_handler(const char *symbol, const void *start, const void *end)
+{
+ unsigned int count = (end - start) / sizeof(u32);
+ const u32 *handler = start;
+ int i;
+
+ pr_debug("LEAF(%s)\n", symbol);
+
+ pr_debug("\t.set push\n");
+ pr_debug("\t.set noreorder\n");
+
+ for (i = 0; i < count; i++)
+ pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]);
+
+ pr_debug("\t.set\tpop\n");
+
+ pr_debug("\tEND(%s)\n", symbol);
+}
+
+/* The only general purpose registers allowed in TLB handlers. */
+#define K0 26
+#define K1 27
+
+/* Some CP0 registers */
+#define C0_INDEX 0, 0
+#define C0_ENTRYLO0 2, 0
+#define C0_TCBIND 2, 2
+#define C0_ENTRYLO1 3, 0
+#define C0_CONTEXT 4, 0
+#define C0_PAGEMASK 5, 0
+#define C0_PWBASE 5, 5
+#define C0_PWFIELD 5, 6
+#define C0_PWSIZE 5, 7
+#define C0_PWCTL 6, 6
+#define C0_BADVADDR 8, 0
+#define C0_PGD 9, 7
+#define C0_ENTRYHI 10, 0
+#define C0_EPC 14, 0
+#define C0_XCONTEXT 20, 0
+
+#ifdef CONFIG_64BIT
+# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
+#else
+# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
+#endif
+
+/* The worst case length of the handler is around 18 instructions for
+ * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
+ * Maximum space available is 32 instructions for R3000 and 64
+ * instructions for R4000.
+ *
+ * We deliberately chose a buffer size of 128, so we won't scribble
+ * over anything important on overflow before we panic.
+ */
+static u32 tlb_handler[128];
+
+/* simply assume worst case size for labels and relocs */
+static struct uasm_label labels[128];
+static struct uasm_reloc relocs[128];
+
+static int check_for_high_segbits;
+static bool fill_includes_sw_bits;
+
+static unsigned int kscratch_used_mask;
+
+static inline int __maybe_unused c0_kscratch(void)
+{
+ switch (current_cpu_type()) {
+ case CPU_XLP:
+ case CPU_XLR:
+ return 22;
+ default:
+ return 31;
+ }
+}
+
+static int allocate_kscratch(void)
+{
+ int r;
+ unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
+
+ r = ffs(a);
+
+ if (r == 0)
+ return -1;
+
+ r--; /* make it zero based */
+
+ kscratch_used_mask |= (1 << r);
+
+ return r;
+}
+
+static int scratch_reg;
+int pgd_reg;
+EXPORT_SYMBOL_GPL(pgd_reg);
+enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
+
+static struct work_registers build_get_work_registers(u32 **p)
+{
+ struct work_registers r;
+
+ if (scratch_reg >= 0) {
+ /* Save in CPU local C0_KScratch? */
+ UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
+ r.r1 = K0;
+ r.r2 = K1;
+ r.r3 = 1;
+ return r;
+ }
+
+ if (num_possible_cpus() > 1) {
+ /* Get smp_processor_id */
+ UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG);
+ UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);
+
+ /* handler_reg_save index in K0 */
+ UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
+
+ UASM_i_LA(p, K1, (long)&handler_reg_save);
+ UASM_i_ADDU(p, K0, K0, K1);
+ } else {
+ UASM_i_LA(p, K0, (long)&handler_reg_save);
+ }
+ /* K0 now points to save area, save $1 and $2 */
+ UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
+ UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
+
+ r.r1 = K1;
+ r.r2 = 1;
+ r.r3 = 2;
+ return r;
+}
+
+static void build_restore_work_registers(u32 **p)
+{
+ if (scratch_reg >= 0) {
+ uasm_i_ehb(p);
+ UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
+ return;
+ }
+ /* K0 already points to save area, restore $1 and $2 */
+ UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
+ UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
+}
+
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
+
+/*
+ * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
+ * we cannot do r3000 under these circumstances.
+ *
+ * The R3000 TLB handler is simple.
+ */
+static void build_r3000_tlb_refill_handler(void)
+{
+ long pgdc = (long)pgd_current;
+ u32 *p;
+
+ memset(tlb_handler, 0, sizeof(tlb_handler));
+ p = tlb_handler;
+
+ uasm_i_mfc0(&p, K0, C0_BADVADDR);
+ uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
+ uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
+ uasm_i_srl(&p, K0, K0, 22); /* load delay */
+ uasm_i_sll(&p, K0, K0, 2);
+ uasm_i_addu(&p, K1, K1, K0);
+ uasm_i_mfc0(&p, K0, C0_CONTEXT);
+ uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
+ uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
+ uasm_i_addu(&p, K1, K1, K0);
+ uasm_i_lw(&p, K0, 0, K1);
+ uasm_i_nop(&p); /* load delay */
+ uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
+ uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
+ uasm_i_tlbwr(&p); /* cp0 delay */
+ uasm_i_jr(&p, K1);
+ uasm_i_rfe(&p); /* branch delay */
+
+ if (p > tlb_handler + 32)
+ panic("TLB refill handler space exceeded");
+
+ pr_debug("Wrote TLB refill handler (%u instructions).\n",
+ (unsigned int)(p - tlb_handler));
+
+ memcpy((void *)ebase, tlb_handler, 0x80);
+ local_flush_icache_range(ebase, ebase + 0x80);
+ dump_handler("r3000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x80));
+}
+#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
+
+/*
+ * The R4000 TLB handler is much more complicated. We have two
+ * consecutive handler areas with 32 instructions space each.
+ * Since they aren't used at the same time, we can overflow in the
+ * other one.To keep things simple, we first assume linear space,
+ * then we relocate it to the final handler layout as needed.
+ */
+static u32 final_handler[64];
+
+/*
+ * Hazards
+ *
+ * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
+ * 2. A timing hazard exists for the TLBP instruction.
+ *
+ * stalling_instruction
+ * TLBP
+ *
+ * The JTLB is being read for the TLBP throughout the stall generated by the
+ * previous instruction. This is not really correct as the stalling instruction
+ * can modify the address used to access the JTLB. The failure symptom is that
+ * the TLBP instruction will use an address created for the stalling instruction
+ * and not the address held in C0_ENHI and thus report the wrong results.
+ *
+ * The software work-around is to not allow the instruction preceding the TLBP
+ * to stall - make it an NOP or some other instruction guaranteed not to stall.
+ *
+ * Errata 2 will not be fixed. This errata is also on the R5000.
+ *
+ * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
+ */
+static void __maybe_unused build_tlb_probe_entry(u32 **p)
+{
+ switch (current_cpu_type()) {
+ /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
+ case CPU_R4600:
+ case CPU_R4700:
+ case CPU_R5000:
+ case CPU_NEVADA:
+ uasm_i_nop(p);
+ uasm_i_tlbp(p);
+ break;
+
+ default:
+ uasm_i_tlbp(p);
+ break;
+ }
+}
+
+void build_tlb_write_entry(u32 **p, struct uasm_label **l,
+ struct uasm_reloc **r,
+ enum tlb_write_entry wmode)
+{
+ void(*tlbw)(u32 **) = NULL;
+
+ switch (wmode) {
+ case tlb_random: tlbw = uasm_i_tlbwr; break;
+ case tlb_indexed: tlbw = uasm_i_tlbwi; break;
+ }
+
+ if (cpu_has_mips_r2_r6) {
+ if (cpu_has_mips_r2_exec_hazard)
+ uasm_i_ehb(p);
+ tlbw(p);
+ return;
+ }
+
+ switch (current_cpu_type()) {
+ case CPU_R4000PC:
+ case CPU_R4000SC:
+ case CPU_R4000MC:
+ case CPU_R4400PC:
+ case CPU_R4400SC:
+ case CPU_R4400MC:
+ /*
+ * This branch uses up a mtc0 hazard nop slot and saves
+ * two nops after the tlbw instruction.
+ */
+ uasm_bgezl_hazard(p, r, hazard_instance);
+ tlbw(p);
+ uasm_bgezl_label(l, p, hazard_instance);
+ hazard_instance++;
+ uasm_i_nop(p);
+ break;
+
+ case CPU_R4600:
+ case CPU_R4700:
+ uasm_i_nop(p);
+ tlbw(p);
+ uasm_i_nop(p);
+ break;
+
+ case CPU_R5000:
+ case CPU_NEVADA:
+ uasm_i_nop(p); /* QED specifies 2 nops hazard */
+ uasm_i_nop(p); /* QED specifies 2 nops hazard */
+ tlbw(p);
+ break;
+
+ case CPU_5KC:
+ case CPU_TX49XX:
+ case CPU_PR4450:
+ case CPU_XLR:
+ uasm_i_nop(p);
+ tlbw(p);
+ break;
+
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_R14000:
+ case CPU_R16000:
+ case CPU_4KC:
+ case CPU_4KEC:
+ case CPU_M14KC:
+ case CPU_M14KEC:
+ case CPU_SB1:
+ case CPU_SB1A:
+ case CPU_4KSC:
+ case CPU_20KC:
+ case CPU_25KF:
+ case CPU_BMIPS32:
+ case CPU_BMIPS3300:
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ case CPU_BMIPS5000:
+ case CPU_LOONGSON2EF:
+ case CPU_LOONGSON64:
+ case CPU_R5500:
+ if (m4kc_tlbp_war())
+ uasm_i_nop(p);
+ fallthrough;
+ case CPU_ALCHEMY:
+ tlbw(p);
+ break;
+
+ case CPU_RM7000:
+ uasm_i_nop(p);
+ uasm_i_nop(p);
+ uasm_i_nop(p);
+ uasm_i_nop(p);
+ tlbw(p);
+ break;
+
+ case CPU_VR4111:
+ case CPU_VR4121:
+ case CPU_VR4122:
+ case CPU_VR4181:
+ case CPU_VR4181A:
+ uasm_i_nop(p);
+ uasm_i_nop(p);
+ tlbw(p);
+ uasm_i_nop(p);
+ uasm_i_nop(p);
+ break;
+
+ case CPU_VR4131:
+ case CPU_VR4133:
+ uasm_i_nop(p);
+ uasm_i_nop(p);
+ tlbw(p);
+ break;
+
+ case CPU_XBURST:
+ tlbw(p);
+ uasm_i_nop(p);
+ break;
+
+ default:
+ panic("No TLB refill handler yet (CPU type: %d)",
+ current_cpu_type());
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(build_tlb_write_entry);
+
+static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
+ unsigned int reg)
+{
+ if (_PAGE_GLOBAL_SHIFT == 0) {
+ /* pte_t is already in EntryLo format */
+ return;
+ }
+
+ if (cpu_has_rixi && _PAGE_NO_EXEC != 0) {
+ if (fill_includes_sw_bits) {
+ UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
+ } else {
+ UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
+ UASM_i_ROTR(p, reg, reg,
+ ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+ }
+ } else {
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
+#else
+ UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
+#endif
+ }
+}
+
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+
+static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
+ unsigned int tmp, enum label_id lid,
+ int restore_scratch)
+{
+ if (restore_scratch) {
+ /*
+ * Ensure the MFC0 below observes the value written to the
+ * KScratch register by the prior MTC0.
+ */
+ if (scratch_reg >= 0)
+ uasm_i_ehb(p);
+
+ /* Reset default page size */
+ if (PM_DEFAULT_MASK >> 16) {
+ uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
+ uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
+ uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+ uasm_il_b(p, r, lid);
+ } else if (PM_DEFAULT_MASK) {
+ uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
+ uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+ uasm_il_b(p, r, lid);
+ } else {
+ uasm_i_mtc0(p, 0, C0_PAGEMASK);
+ uasm_il_b(p, r, lid);
+ }
+ if (scratch_reg >= 0)
+ UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
+ else
+ UASM_i_LW(p, 1, scratchpad_offset(0), 0);
+ } else {
+ /* Reset default page size */
+ if (PM_DEFAULT_MASK >> 16) {
+ uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
+ uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
+ uasm_il_b(p, r, lid);
+ uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+ } else if (PM_DEFAULT_MASK) {
+ uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
+ uasm_il_b(p, r, lid);
+ uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+ } else {
+ uasm_il_b(p, r, lid);
+ uasm_i_mtc0(p, 0, C0_PAGEMASK);
+ }
+ }
+}
+
+static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
+ struct uasm_reloc **r,
+ unsigned int tmp,
+ enum tlb_write_entry wmode,
+ int restore_scratch)
+{
+ /* Set huge page tlb entry size */
+ uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
+ uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
+ uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+
+ build_tlb_write_entry(p, l, r, wmode);
+
+ build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
+}
+
+/*
+ * Check if Huge PTE is present, if so then jump to LABEL.
+ */
+static void
+build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
+ unsigned int pmd, int lid)
+{
+ UASM_i_LW(p, tmp, 0, pmd);
+ if (use_bbit_insns()) {
+ uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
+ } else {
+ uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
+ uasm_il_bnez(p, r, tmp, lid);
+ }
+}
+
+static void build_huge_update_entries(u32 **p, unsigned int pte,
+ unsigned int tmp)
+{
+ int small_sequence;
+
+ /*
+ * A huge PTE describes an area the size of the
+ * configured huge page size. This is twice the
+ * of the large TLB entry size we intend to use.
+ * A TLB entry half the size of the configured
+ * huge page size is configured into entrylo0
+ * and entrylo1 to cover the contiguous huge PTE
+ * address space.
+ */
+ small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
+
+ /* We can clobber tmp. It isn't used after this.*/
+ if (!small_sequence)
+ uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
+
+ build_convert_pte_to_entrylo(p, pte);
+ UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
+ /* convert to entrylo1 */
+ if (small_sequence)
+ UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
+ else
+ UASM_i_ADDU(p, pte, pte, tmp);
+
+ UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
+}
+
+static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
+ struct uasm_label **l,
+ unsigned int pte,
+ unsigned int ptr,
+ unsigned int flush)
+{
+#ifdef CONFIG_SMP
+ UASM_i_SC(p, pte, 0, ptr);
+ uasm_il_beqz(p, r, pte, label_tlb_huge_update);
+ UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
+#else
+ UASM_i_SW(p, pte, 0, ptr);
+#endif
+ if (cpu_has_ftlb && flush) {
+ BUG_ON(!cpu_has_tlbinv);
+
+ UASM_i_MFC0(p, ptr, C0_ENTRYHI);
+ uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
+ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
+ build_tlb_write_entry(p, l, r, tlb_indexed);
+
+ uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
+ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
+ build_huge_update_entries(p, pte, ptr);
+ build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
+
+ return;
+ }
+
+ build_huge_update_entries(p, pte, ptr);
+ build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
+}
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
+
+#ifdef CONFIG_64BIT
+/*
+ * TMP and PTR are scratch.
+ * TMP will be clobbered, PTR will hold the pmd entry.
+ */
+void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
+ unsigned int tmp, unsigned int ptr)
+{
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
+ long pgdc = (long)pgd_current;
+#endif
+ /*
+ * The vmalloc handling is not in the hotpath.
+ */
+ uasm_i_dmfc0(p, tmp, C0_BADVADDR);
+
+ if (check_for_high_segbits) {
+ /*
+ * The kernel currently implicitely assumes that the
+ * MIPS SEGBITS parameter for the processor is
+ * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
+ * allocate virtual addresses outside the maximum
+ * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
+ * that doesn't prevent user code from accessing the
+ * higher xuseg addresses. Here, we make sure that
+ * everything but the lower xuseg addresses goes down
+ * the module_alloc/vmalloc path.
+ */
+ uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ uasm_il_bnez(p, r, ptr, label_vmalloc);
+ } else {
+ uasm_il_bltz(p, r, tmp, label_vmalloc);
+ }
+ /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
+
+ if (pgd_reg != -1) {
+ /* pgd is in pgd_reg */
+ if (cpu_has_ldpte)
+ UASM_i_MFC0(p, ptr, C0_PWBASE);
+ else
+ UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
+ } else {
+#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
+ /*
+ * &pgd << 11 stored in CONTEXT [23..63].
+ */
+ UASM_i_MFC0(p, ptr, C0_CONTEXT);
+
+ /* Clear lower 23 bits of context. */
+ uasm_i_dins(p, ptr, 0, 0, 23);
+
+ /* 1 0 1 0 1 << 6 xkphys cached */
+ uasm_i_ori(p, ptr, ptr, 0x540);
+ uasm_i_drotr(p, ptr, ptr, 11);
+#elif defined(CONFIG_SMP)
+ UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
+ uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
+ UASM_i_LA_mostly(p, tmp, pgdc);
+ uasm_i_daddu(p, ptr, ptr, tmp);
+ uasm_i_dmfc0(p, tmp, C0_BADVADDR);
+ uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
+#else
+ UASM_i_LA_mostly(p, ptr, pgdc);
+ uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
+#endif
+ }
+
+ uasm_l_vmalloc_done(l, *p);
+
+ /* get pgd offset in bytes */
+ uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
+
+ uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
+ uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
+#ifndef __PAGETABLE_PUD_FOLDED
+ uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
+ uasm_i_ld(p, ptr, 0, ptr); /* get pud pointer */
+ uasm_i_dsrl_safe(p, tmp, tmp, PUD_SHIFT - 3); /* get pud offset in bytes */
+ uasm_i_andi(p, tmp, tmp, (PTRS_PER_PUD - 1) << 3);
+ uasm_i_daddu(p, ptr, ptr, tmp); /* add in pud offset */
+#endif
+#ifndef __PAGETABLE_PMD_FOLDED
+ uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
+ uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
+ uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
+ uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
+ uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
+#endif
+}
+EXPORT_SYMBOL_GPL(build_get_pmde64);
+
+/*
+ * BVADDR is the faulting address, PTR is scratch.
+ * PTR will hold the pgd for vmalloc.
+ */
+static void
+build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
+ unsigned int bvaddr, unsigned int ptr,
+ enum vmalloc64_mode mode)
+{
+ long swpd = (long)swapper_pg_dir;
+ int single_insn_swpd;
+ int did_vmalloc_branch = 0;
+
+ single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
+
+ uasm_l_vmalloc(l, *p);
+
+ if (mode != not_refill && check_for_high_segbits) {
+ if (single_insn_swpd) {
+ uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
+ uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
+ did_vmalloc_branch = 1;
+ /* fall through */
+ } else {
+ uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
+ }
+ }
+ if (!did_vmalloc_branch) {
+ if (single_insn_swpd) {
+ uasm_il_b(p, r, label_vmalloc_done);
+ uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
+ } else {
+ UASM_i_LA_mostly(p, ptr, swpd);
+ uasm_il_b(p, r, label_vmalloc_done);
+ if (uasm_in_compat_space_p(swpd))
+ uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
+ else
+ uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
+ }
+ }
+ if (mode != not_refill && check_for_high_segbits) {
+ uasm_l_large_segbits_fault(l, *p);
+
+ if (mode == refill_scratch && scratch_reg >= 0)
+ uasm_i_ehb(p);
+
+ /*
+ * We get here if we are an xsseg address, or if we are
+ * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
+ *
+ * Ignoring xsseg (assume disabled so would generate
+ * (address errors?), the only remaining possibility
+ * is the upper xuseg addresses. On processors with
+ * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
+ * addresses would have taken an address error. We try
+ * to mimic that here by taking a load/istream page
+ * fault.
+ */
+ if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
+ uasm_i_sync(p, 0);
+ UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
+ uasm_i_jr(p, ptr);
+
+ if (mode == refill_scratch) {
+ if (scratch_reg >= 0)
+ UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
+ else
+ UASM_i_LW(p, 1, scratchpad_offset(0), 0);
+ } else {
+ uasm_i_nop(p);
+ }
+ }
+}
+
+#else /* !CONFIG_64BIT */
+
+/*
+ * TMP and PTR are scratch.
+ * TMP will be clobbered, PTR will hold the pgd entry.
+ */
+void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
+{
+ if (pgd_reg != -1) {
+ /* pgd is in pgd_reg */
+ uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg);
+ uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
+ } else {
+ long pgdc = (long)pgd_current;
+
+ /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
+#ifdef CONFIG_SMP
+ uasm_i_mfc0(p, ptr, SMP_CPUID_REG);
+ UASM_i_LA_mostly(p, tmp, pgdc);
+ uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
+ uasm_i_addu(p, ptr, tmp, ptr);
+#else
+ UASM_i_LA_mostly(p, ptr, pgdc);
+#endif
+ uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
+ uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
+ }
+ uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
+ uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
+ uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
+}
+EXPORT_SYMBOL_GPL(build_get_pgde32);
+
+#endif /* !CONFIG_64BIT */
+
+static void build_adjust_context(u32 **p, unsigned int ctx)
+{
+ unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
+ unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
+
+ switch (current_cpu_type()) {
+ case CPU_VR41XX:
+ case CPU_VR4111:
+ case CPU_VR4121:
+ case CPU_VR4122:
+ case CPU_VR4131:
+ case CPU_VR4181:
+ case CPU_VR4181A:
+ case CPU_VR4133:
+ shift += 2;
+ break;
+
+ default:
+ break;
+ }
+
+ if (shift)
+ UASM_i_SRL(p, ctx, ctx, shift);
+ uasm_i_andi(p, ctx, ctx, mask);
+}
+
+void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
+{
+ /*
+ * Bug workaround for the Nevada. It seems as if under certain
+ * circumstances the move from cp0_context might produce a
+ * bogus result when the mfc0 instruction and its consumer are
+ * in a different cacheline or a load instruction, probably any
+ * memory reference, is between them.
+ */
+ switch (current_cpu_type()) {
+ case CPU_NEVADA:
+ UASM_i_LW(p, ptr, 0, ptr);
+ GET_CONTEXT(p, tmp); /* get context reg */
+ break;
+
+ default:
+ GET_CONTEXT(p, tmp); /* get context reg */
+ UASM_i_LW(p, ptr, 0, ptr);
+ break;
+ }
+
+ build_adjust_context(p, tmp);
+ UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
+}
+EXPORT_SYMBOL_GPL(build_get_ptep);
+
+void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
+{
+ int pte_off_even = 0;
+ int pte_off_odd = sizeof(pte_t);
+
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_PHYS_ADDR_T_64BIT)
+ /* The low 32 bits of EntryLo is stored in pte_high */
+ pte_off_even += offsetof(pte_t, pte_high);
+ pte_off_odd += offsetof(pte_t, pte_high);
+#endif
+
+ if (IS_ENABLED(CONFIG_XPA)) {
+ uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
+ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
+ UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
+
+ if (cpu_has_xpa && !mips_xpa_disabled) {
+ uasm_i_lw(p, tmp, 0, ptep);
+ uasm_i_ext(p, tmp, tmp, 0, 24);
+ uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
+ }
+
+ uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */
+ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
+ UASM_i_MTC0(p, tmp, C0_ENTRYLO1);
+
+ if (cpu_has_xpa && !mips_xpa_disabled) {
+ uasm_i_lw(p, tmp, sizeof(pte_t), ptep);
+ uasm_i_ext(p, tmp, tmp, 0, 24);
+ uasm_i_mthc0(p, tmp, C0_ENTRYLO1);
+ }
+ return;
+ }
+
+ UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */
+ UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */
+ if (r45k_bvahwbug())
+ build_tlb_probe_entry(p);
+ build_convert_pte_to_entrylo(p, tmp);
+ if (r4k_250MHZhwbug())
+ UASM_i_MTC0(p, 0, C0_ENTRYLO0);
+ UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
+ build_convert_pte_to_entrylo(p, ptep);
+ if (r45k_bvahwbug())
+ uasm_i_mfc0(p, tmp, C0_INDEX);
+ if (r4k_250MHZhwbug())
+ UASM_i_MTC0(p, 0, C0_ENTRYLO1);
+ UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
+}
+EXPORT_SYMBOL_GPL(build_update_entries);
+
+struct mips_huge_tlb_info {
+ int huge_pte;
+ int restore_scratch;
+ bool need_reload_pte;
+};
+
+static struct mips_huge_tlb_info
+build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
+ struct uasm_reloc **r, unsigned int tmp,
+ unsigned int ptr, int c0_scratch_reg)
+{
+ struct mips_huge_tlb_info rv;
+ unsigned int even, odd;
+ int vmalloc_branch_delay_filled = 0;
+ const int scratch = 1; /* Our extra working register */
+
+ rv.huge_pte = scratch;
+ rv.restore_scratch = 0;
+ rv.need_reload_pte = false;
+
+ if (check_for_high_segbits) {
+ UASM_i_MFC0(p, tmp, C0_BADVADDR);
+
+ if (pgd_reg != -1)
+ UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
+ else
+ UASM_i_MFC0(p, ptr, C0_CONTEXT);
+
+ if (c0_scratch_reg >= 0)
+ UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
+ else
+ UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
+
+ uasm_i_dsrl_safe(p, scratch, tmp,
+ PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ uasm_il_bnez(p, r, scratch, label_vmalloc);
+
+ if (pgd_reg == -1) {
+ vmalloc_branch_delay_filled = 1;
+ /* Clear lower 23 bits of context. */
+ uasm_i_dins(p, ptr, 0, 0, 23);
+ }
+ } else {
+ if (pgd_reg != -1)
+ UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
+ else
+ UASM_i_MFC0(p, ptr, C0_CONTEXT);
+
+ UASM_i_MFC0(p, tmp, C0_BADVADDR);
+
+ if (c0_scratch_reg >= 0)
+ UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
+ else
+ UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
+
+ if (pgd_reg == -1)
+ /* Clear lower 23 bits of context. */
+ uasm_i_dins(p, ptr, 0, 0, 23);
+
+ uasm_il_bltz(p, r, tmp, label_vmalloc);
+ }
+
+ if (pgd_reg == -1) {
+ vmalloc_branch_delay_filled = 1;
+ /* 1 0 1 0 1 << 6 xkphys cached */
+ uasm_i_ori(p, ptr, ptr, 0x540);
+ uasm_i_drotr(p, ptr, ptr, 11);
+ }
+
+#ifdef __PAGETABLE_PMD_FOLDED
+#define LOC_PTEP scratch
+#else
+#define LOC_PTEP ptr
+#endif
+
+ if (!vmalloc_branch_delay_filled)
+ /* get pgd offset in bytes */
+ uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
+
+ uasm_l_vmalloc_done(l, *p);
+
+ /*
+ * tmp ptr
+ * fall-through case = badvaddr *pgd_current
+ * vmalloc case = badvaddr swapper_pg_dir
+ */
+
+ if (vmalloc_branch_delay_filled)
+ /* get pgd offset in bytes */
+ uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
+
+#ifdef __PAGETABLE_PMD_FOLDED
+ GET_CONTEXT(p, tmp); /* get context reg */
+#endif
+ uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
+
+ if (use_lwx_insns()) {
+ UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
+ } else {
+ uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
+ uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
+ }
+
+#ifndef __PAGETABLE_PUD_FOLDED
+ /* get pud offset in bytes */
+ uasm_i_dsrl_safe(p, scratch, tmp, PUD_SHIFT - 3);
+ uasm_i_andi(p, scratch, scratch, (PTRS_PER_PUD - 1) << 3);
+
+ if (use_lwx_insns()) {
+ UASM_i_LWX(p, ptr, scratch, ptr);
+ } else {
+ uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
+ UASM_i_LW(p, ptr, 0, ptr);
+ }
+ /* ptr contains a pointer to PMD entry */
+ /* tmp contains the address */
+#endif
+
+#ifndef __PAGETABLE_PMD_FOLDED
+ /* get pmd offset in bytes */
+ uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
+ uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
+ GET_CONTEXT(p, tmp); /* get context reg */
+
+ if (use_lwx_insns()) {
+ UASM_i_LWX(p, scratch, scratch, ptr);
+ } else {
+ uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
+ UASM_i_LW(p, scratch, 0, ptr);
+ }
+#endif
+ /* Adjust the context during the load latency. */
+ build_adjust_context(p, tmp);
+
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
+ /*
+ * The in the LWX case we don't want to do the load in the
+ * delay slot. It cannot issue in the same cycle and may be
+ * speculative and unneeded.
+ */
+ if (use_lwx_insns())
+ uasm_i_nop(p);
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
+
+
+ /* build_update_entries */
+ if (use_lwx_insns()) {
+ even = ptr;
+ odd = tmp;
+ UASM_i_LWX(p, even, scratch, tmp);
+ UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
+ UASM_i_LWX(p, odd, scratch, tmp);
+ } else {
+ UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
+ even = tmp;
+ odd = ptr;
+ UASM_i_LW(p, even, 0, ptr); /* get even pte */
+ UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
+ }
+ if (cpu_has_rixi) {
+ uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL));
+ UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
+ uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL));
+ } else {
+ uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
+ UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
+ uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
+ }
+ UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
+
+ if (c0_scratch_reg >= 0) {
+ uasm_i_ehb(p);
+ UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
+ build_tlb_write_entry(p, l, r, tlb_random);
+ uasm_l_leave(l, *p);
+ rv.restore_scratch = 1;
+ } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13) {
+ build_tlb_write_entry(p, l, r, tlb_random);
+ uasm_l_leave(l, *p);
+ UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
+ } else {
+ UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
+ build_tlb_write_entry(p, l, r, tlb_random);
+ uasm_l_leave(l, *p);
+ rv.restore_scratch = 1;
+ }
+
+ uasm_i_eret(p); /* return from trap */
+
+ return rv;
+}
+
+/*
+ * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
+ * because EXL == 0. If we wrap, we can also use the 32 instruction
+ * slots before the XTLB refill exception handler which belong to the
+ * unused TLB refill exception.
+ */
+#define MIPS64_REFILL_INSNS 32
+
+static void build_r4000_tlb_refill_handler(void)
+{
+ u32 *p = tlb_handler;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+ u32 *f;
+ unsigned int final_len;
+ struct mips_huge_tlb_info htlb_info __maybe_unused;
+ enum vmalloc64_mode vmalloc_mode __maybe_unused;
+
+ memset(tlb_handler, 0, sizeof(tlb_handler));
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+ memset(final_handler, 0, sizeof(final_handler));
+
+ if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) {
+ htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
+ scratch_reg);
+ vmalloc_mode = refill_scratch;
+ } else {
+ htlb_info.huge_pte = K0;
+ htlb_info.restore_scratch = 0;
+ htlb_info.need_reload_pte = true;
+ vmalloc_mode = refill_noscratch;
+ /*
+ * create the plain linear handler
+ */
+ if (bcm1250_m3_war()) {
+ unsigned int segbits = 44;
+
+ uasm_i_dmfc0(&p, K0, C0_BADVADDR);
+ uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
+ uasm_i_xor(&p, K0, K0, K1);
+ uasm_i_dsrl_safe(&p, K1, K0, 62);
+ uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
+ uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
+ uasm_i_or(&p, K0, K0, K1);
+ uasm_il_bnez(&p, &r, K0, label_leave);
+ /* No need for uasm_i_nop */
+ }
+
+#ifdef CONFIG_64BIT
+ build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
+#else
+ build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
+#endif
+
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
+#endif
+
+ build_get_ptep(&p, K0, K1);
+ build_update_entries(&p, K0, K1);
+ build_tlb_write_entry(&p, &l, &r, tlb_random);
+ uasm_l_leave(&l, p);
+ uasm_i_eret(&p); /* return from trap */
+ }
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ uasm_l_tlb_huge_update(&l, p);
+ if (htlb_info.need_reload_pte)
+ UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
+ build_huge_update_entries(&p, htlb_info.huge_pte, K1);
+ build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
+ htlb_info.restore_scratch);
+#endif
+
+#ifdef CONFIG_64BIT
+ build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
+#endif
+
+ /*
+ * Overflow check: For the 64bit handler, we need at least one
+ * free instruction slot for the wrap-around branch. In worst
+ * case, if the intended insertion point is a delay slot, we
+ * need three, with the second nop'ed and the third being
+ * unused.
+ */
+ switch (boot_cpu_type()) {
+ default:
+ if (sizeof(long) == 4) {
+ case CPU_LOONGSON2EF:
+ /* Loongson2 ebase is different than r4k, we have more space */
+ if ((p - tlb_handler) > 64)
+ panic("TLB refill handler space exceeded");
+ /*
+ * Now fold the handler in the TLB refill handler space.
+ */
+ f = final_handler;
+ /* Simplest case, just copy the handler. */
+ uasm_copy_handler(relocs, labels, tlb_handler, p, f);
+ final_len = p - tlb_handler;
+ break;
+ } else {
+ if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
+ || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
+ && uasm_insn_has_bdelay(relocs,
+ tlb_handler + MIPS64_REFILL_INSNS - 3)))
+ panic("TLB refill handler space exceeded");
+ /*
+ * Now fold the handler in the TLB refill handler space.
+ */
+ f = final_handler + MIPS64_REFILL_INSNS;
+ if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
+ /* Just copy the handler. */
+ uasm_copy_handler(relocs, labels, tlb_handler, p, f);
+ final_len = p - tlb_handler;
+ } else {
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ const enum label_id ls = label_tlb_huge_update;
+#else
+ const enum label_id ls = label_vmalloc;
+#endif
+ u32 *split;
+ int ov = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
+ ;
+ BUG_ON(i == ARRAY_SIZE(labels));
+ split = labels[i].addr;
+
+ /*
+ * See if we have overflown one way or the other.
+ */
+ if (split > tlb_handler + MIPS64_REFILL_INSNS ||
+ split < p - MIPS64_REFILL_INSNS)
+ ov = 1;
+
+ if (ov) {
+ /*
+ * Split two instructions before the end. One
+ * for the branch and one for the instruction
+ * in the delay slot.
+ */
+ split = tlb_handler + MIPS64_REFILL_INSNS - 2;
+
+ /*
+ * If the branch would fall in a delay slot,
+ * we must back up an additional instruction
+ * so that it is no longer in a delay slot.
+ */
+ if (uasm_insn_has_bdelay(relocs, split - 1))
+ split--;
+ }
+ /* Copy first part of the handler. */
+ uasm_copy_handler(relocs, labels, tlb_handler, split, f);
+ f += split - tlb_handler;
+
+ if (ov) {
+ /* Insert branch. */
+ uasm_l_split(&l, final_handler);
+ uasm_il_b(&f, &r, label_split);
+ if (uasm_insn_has_bdelay(relocs, split))
+ uasm_i_nop(&f);
+ else {
+ uasm_copy_handler(relocs, labels,
+ split, split + 1, f);
+ uasm_move_labels(labels, f, f + 1, -1);
+ f++;
+ split++;
+ }
+ }
+
+ /* Copy the rest of the handler. */
+ uasm_copy_handler(relocs, labels, split, p, final_handler);
+ final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
+ (p - split);
+ }
+ }
+ break;
+ }
+
+ uasm_resolve_relocs(relocs, labels);
+ pr_debug("Wrote TLB refill handler (%u instructions).\n",
+ final_len);
+
+ memcpy((void *)ebase, final_handler, 0x100);
+ local_flush_icache_range(ebase, ebase + 0x100);
+ dump_handler("r4000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x100));
+}
+
+static void setup_pw(void)
+{
+ unsigned int pwctl;
+ unsigned long pgd_i, pgd_w;
+#ifndef __PAGETABLE_PMD_FOLDED
+ unsigned long pmd_i, pmd_w;
+#endif
+ unsigned long pt_i, pt_w;
+ unsigned long pte_i, pte_w;
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ unsigned long psn;
+
+ psn = ilog2(_PAGE_HUGE); /* bit used to indicate huge page */
+#endif
+ pgd_i = PGDIR_SHIFT; /* 1st level PGD */
+#ifndef __PAGETABLE_PMD_FOLDED
+ pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER;
+
+ pmd_i = PMD_SHIFT; /* 2nd level PMD */
+ pmd_w = PMD_SHIFT - PAGE_SHIFT;
+#else
+ pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER;
+#endif
+
+ pt_i = PAGE_SHIFT; /* 3rd level PTE */
+ pt_w = PAGE_SHIFT - 3;
+
+ pte_i = ilog2(_PAGE_GLOBAL);
+ pte_w = 0;
+ pwctl = 1 << 30; /* Set PWDirExt */
+
+#ifndef __PAGETABLE_PMD_FOLDED
+ write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i);
+ write_c0_pwsize(1 << 30 | pgd_w << 24 | pmd_w << 12 | pt_w << 6 | pte_w);
+#else
+ write_c0_pwfield(pgd_i << 24 | pt_i << 6 | pte_i);
+ write_c0_pwsize(1 << 30 | pgd_w << 24 | pt_w << 6 | pte_w);
+#endif
+
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ pwctl |= (1 << 6 | psn);
+#endif
+ write_c0_pwctl(pwctl);
+ write_c0_kpgd((long)swapper_pg_dir);
+ kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
+}
+
+static void build_loongson3_tlb_refill_handler(void)
+{
+ u32 *p = tlb_handler;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+ memset(tlb_handler, 0, sizeof(tlb_handler));
+
+ if (check_for_high_segbits) {
+ uasm_i_dmfc0(&p, K0, C0_BADVADDR);
+ uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ uasm_il_beqz(&p, &r, K1, label_vmalloc);
+ uasm_i_nop(&p);
+
+ uasm_il_bgez(&p, &r, K0, label_large_segbits_fault);
+ uasm_i_nop(&p);
+ uasm_l_vmalloc(&l, p);
+ }
+
+ uasm_i_dmfc0(&p, K1, C0_PGD);
+
+ uasm_i_lddir(&p, K0, K1, 3); /* global page dir */
+#ifndef __PAGETABLE_PMD_FOLDED
+ uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */
+#endif
+ uasm_i_ldpte(&p, K1, 0); /* even */
+ uasm_i_ldpte(&p, K1, 1); /* odd */
+ uasm_i_tlbwr(&p);
+
+ /* restore page mask */
+ if (PM_DEFAULT_MASK >> 16) {
+ uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16);
+ uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff);
+ uasm_i_mtc0(&p, K0, C0_PAGEMASK);
+ } else if (PM_DEFAULT_MASK) {
+ uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK);
+ uasm_i_mtc0(&p, K0, C0_PAGEMASK);
+ } else {
+ uasm_i_mtc0(&p, 0, C0_PAGEMASK);
+ }
+
+ uasm_i_eret(&p);
+
+ if (check_for_high_segbits) {
+ uasm_l_large_segbits_fault(&l, p);
+ UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0);
+ uasm_i_jr(&p, K1);
+ uasm_i_nop(&p);
+ }
+
+ uasm_resolve_relocs(relocs, labels);
+ memcpy((void *)(ebase + 0x80), tlb_handler, 0x80);
+ local_flush_icache_range(ebase + 0x80, ebase + 0x100);
+ dump_handler("loongson3_tlb_refill",
+ (u32 *)(ebase + 0x80), (u32 *)(ebase + 0x100));
+}
+
+static void build_setup_pgd(void)
+{
+ const int a0 = 4;
+ const int __maybe_unused a1 = 5;
+ const int __maybe_unused a2 = 6;
+ u32 *p = (u32 *)msk_isa16_mode((ulong)tlbmiss_handler_setup_pgd);
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
+ long pgdc = (long)pgd_current;
+#endif
+
+ memset(p, 0, tlbmiss_handler_setup_pgd_end - (char *)p);
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+ pgd_reg = allocate_kscratch();
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
+ if (pgd_reg == -1) {
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ /* PGD << 11 in c0_Context */
+ /*
+ * If it is a ckseg0 address, convert to a physical
+ * address. Shifting right by 29 and adding 4 will
+ * result in zero for these addresses.
+ *
+ */
+ UASM_i_SRA(&p, a1, a0, 29);
+ UASM_i_ADDIU(&p, a1, a1, 4);
+ uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
+ uasm_i_nop(&p);
+ uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
+ uasm_l_tlbl_goaround1(&l, p);
+ UASM_i_SLL(&p, a0, a0, 11);
+ UASM_i_MTC0(&p, a0, C0_CONTEXT);
+ uasm_i_jr(&p, 31);
+ uasm_i_ehb(&p);
+ } else {
+ /* PGD in c0_KScratch */
+ if (cpu_has_ldpte)
+ UASM_i_MTC0(&p, a0, C0_PWBASE);
+ else
+ UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
+ uasm_i_jr(&p, 31);
+ uasm_i_ehb(&p);
+ }
+#else
+#ifdef CONFIG_SMP
+ /* Save PGD to pgd_current[smp_processor_id()] */
+ UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG);
+ UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT);
+ UASM_i_LA_mostly(&p, a2, pgdc);
+ UASM_i_ADDU(&p, a2, a2, a1);
+ UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
+#else
+ UASM_i_LA_mostly(&p, a2, pgdc);
+ UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
+#endif /* SMP */
+
+ /* if pgd_reg is allocated, save PGD also to scratch register */
+ if (pgd_reg != -1) {
+ UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
+ uasm_i_jr(&p, 31);
+ uasm_i_ehb(&p);
+ } else {
+ uasm_i_jr(&p, 31);
+ uasm_i_nop(&p);
+ }
+#endif
+ if (p >= (u32 *)tlbmiss_handler_setup_pgd_end)
+ panic("tlbmiss_handler_setup_pgd space exceeded");
+
+ uasm_resolve_relocs(relocs, labels);
+ pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
+ (unsigned int)(p - (u32 *)tlbmiss_handler_setup_pgd));
+
+ dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,
+ tlbmiss_handler_setup_pgd_end);
+}
+
+static void
+iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
+{
+#ifdef CONFIG_SMP
+ if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
+ uasm_i_sync(p, 0);
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (cpu_has_64bits)
+ uasm_i_lld(p, pte, 0, ptr);
+ else
+# endif
+ UASM_i_LL(p, pte, 0, ptr);
+#else
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (cpu_has_64bits)
+ uasm_i_ld(p, pte, 0, ptr);
+ else
+# endif
+ UASM_i_LW(p, pte, 0, ptr);
+#endif
+}
+
+static void
+iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
+ unsigned int mode, unsigned int scratch)
+{
+ unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
+ unsigned int swmode = mode & ~hwmode;
+
+ if (IS_ENABLED(CONFIG_XPA) && !cpu_has_64bits) {
+ uasm_i_lui(p, scratch, swmode >> 16);
+ uasm_i_or(p, pte, pte, scratch);
+ BUG_ON(swmode & 0xffff);
+ } else {
+ uasm_i_ori(p, pte, pte, mode);
+ }
+
+#ifdef CONFIG_SMP
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (cpu_has_64bits)
+ uasm_i_scd(p, pte, 0, ptr);
+ else
+# endif
+ UASM_i_SC(p, pte, 0, ptr);
+
+ if (r10000_llsc_war())
+ uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
+ else
+ uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
+
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (!cpu_has_64bits) {
+ /* no uasm_i_nop needed */
+ uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
+ uasm_i_ori(p, pte, pte, hwmode);
+ BUG_ON(hwmode & ~0xffff);
+ uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
+ uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
+ /* no uasm_i_nop needed */
+ uasm_i_lw(p, pte, 0, ptr);
+ } else
+ uasm_i_nop(p);
+# else
+ uasm_i_nop(p);
+# endif
+#else
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (cpu_has_64bits)
+ uasm_i_sd(p, pte, 0, ptr);
+ else
+# endif
+ UASM_i_SW(p, pte, 0, ptr);
+
+# ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (!cpu_has_64bits) {
+ uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
+ uasm_i_ori(p, pte, pte, hwmode);
+ BUG_ON(hwmode & ~0xffff);
+ uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
+ uasm_i_lw(p, pte, 0, ptr);
+ }
+# endif
+#endif
+}
+
+/*
+ * Check if PTE is present, if not then jump to LABEL. PTR points to
+ * the page table where this PTE is located, PTE will be re-loaded
+ * with it's original value.
+ */
+static void
+build_pte_present(u32 **p, struct uasm_reloc **r,
+ int pte, int ptr, int scratch, enum label_id lid)
+{
+ int t = scratch >= 0 ? scratch : pte;
+ int cur = pte;
+
+ if (cpu_has_rixi) {
+ if (use_bbit_insns()) {
+ uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
+ uasm_i_nop(p);
+ } else {
+ if (_PAGE_PRESENT_SHIFT) {
+ uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
+ cur = t;
+ }
+ uasm_i_andi(p, t, cur, 1);
+ uasm_il_beqz(p, r, t, lid);
+ if (pte == t)
+ /* You lose the SMP race :-(*/
+ iPTE_LW(p, pte, ptr);
+ }
+ } else {
+ if (_PAGE_PRESENT_SHIFT) {
+ uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
+ cur = t;
+ }
+ uasm_i_andi(p, t, cur,
+ (_PAGE_PRESENT | _PAGE_NO_READ) >> _PAGE_PRESENT_SHIFT);
+ uasm_i_xori(p, t, t, _PAGE_PRESENT >> _PAGE_PRESENT_SHIFT);
+ uasm_il_bnez(p, r, t, lid);
+ if (pte == t)
+ /* You lose the SMP race :-(*/
+ iPTE_LW(p, pte, ptr);
+ }
+}
+
+/* Make PTE valid, store result in PTR. */
+static void
+build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
+ unsigned int ptr, unsigned int scratch)
+{
+ unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
+
+ iPTE_SW(p, r, pte, ptr, mode, scratch);
+}
+
+/*
+ * Check if PTE can be written to, if not branch to LABEL. Regardless
+ * restore PTE with value from PTR when done.
+ */
+static void
+build_pte_writable(u32 **p, struct uasm_reloc **r,
+ unsigned int pte, unsigned int ptr, int scratch,
+ enum label_id lid)
+{
+ int t = scratch >= 0 ? scratch : pte;
+ int cur = pte;
+
+ if (_PAGE_PRESENT_SHIFT) {
+ uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
+ cur = t;
+ }
+ uasm_i_andi(p, t, cur,
+ (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT);
+ uasm_i_xori(p, t, t,
+ (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT);
+ uasm_il_bnez(p, r, t, lid);
+ if (pte == t)
+ /* You lose the SMP race :-(*/
+ iPTE_LW(p, pte, ptr);
+ else
+ uasm_i_nop(p);
+}
+
+/* Make PTE writable, update software status bits as well, then store
+ * at PTR.
+ */
+static void
+build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
+ unsigned int ptr, unsigned int scratch)
+{
+ unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
+ | _PAGE_DIRTY);
+
+ iPTE_SW(p, r, pte, ptr, mode, scratch);
+}
+
+/*
+ * Check if PTE can be modified, if not branch to LABEL. Regardless
+ * restore PTE with value from PTR when done.
+ */
+static void
+build_pte_modifiable(u32 **p, struct uasm_reloc **r,
+ unsigned int pte, unsigned int ptr, int scratch,
+ enum label_id lid)
+{
+ if (use_bbit_insns()) {
+ uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
+ uasm_i_nop(p);
+ } else {
+ int t = scratch >= 0 ? scratch : pte;
+ uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT);
+ uasm_i_andi(p, t, t, 1);
+ uasm_il_beqz(p, r, t, lid);
+ if (pte == t)
+ /* You lose the SMP race :-(*/
+ iPTE_LW(p, pte, ptr);
+ }
+}
+
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
+
+
+/*
+ * R3000 style TLB load/store/modify handlers.
+ */
+
+/*
+ * This places the pte into ENTRYLO0 and writes it with tlbwi.
+ * Then it returns.
+ */
+static void
+build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
+{
+ uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
+ uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
+ uasm_i_tlbwi(p);
+ uasm_i_jr(p, tmp);
+ uasm_i_rfe(p); /* branch delay */
+}
+
+/*
+ * This places the pte into ENTRYLO0 and writes it with tlbwi
+ * or tlbwr as appropriate. This is because the index register
+ * may have the probe fail bit set as a result of a trap on a
+ * kseg2 access, i.e. without refill. Then it returns.
+ */
+static void
+build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
+ struct uasm_reloc **r, unsigned int pte,
+ unsigned int tmp)
+{
+ uasm_i_mfc0(p, tmp, C0_INDEX);
+ uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
+ uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
+ uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
+ uasm_i_tlbwi(p); /* cp0 delay */
+ uasm_i_jr(p, tmp);
+ uasm_i_rfe(p); /* branch delay */
+ uasm_l_r3000_write_probe_fail(l, *p);
+ uasm_i_tlbwr(p); /* cp0 delay */
+ uasm_i_jr(p, tmp);
+ uasm_i_rfe(p); /* branch delay */
+}
+
+static void
+build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
+ unsigned int ptr)
+{
+ long pgdc = (long)pgd_current;
+
+ uasm_i_mfc0(p, pte, C0_BADVADDR);
+ uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
+ uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
+ uasm_i_srl(p, pte, pte, 22); /* load delay */
+ uasm_i_sll(p, pte, pte, 2);
+ uasm_i_addu(p, ptr, ptr, pte);
+ uasm_i_mfc0(p, pte, C0_CONTEXT);
+ uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
+ uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
+ uasm_i_addu(p, ptr, ptr, pte);
+ uasm_i_lw(p, pte, 0, ptr);
+ uasm_i_tlbp(p); /* load delay */
+}
+
+static void build_r3000_tlb_load_handler(void)
+{
+ u32 *p = (u32 *)handle_tlbl;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(p, 0, handle_tlbl_end - (char *)p);
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ build_r3000_tlbchange_handler_head(&p, K0, K1);
+ build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
+ uasm_i_nop(&p); /* load delay */
+ build_make_valid(&p, &r, K0, K1, -1);
+ build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
+
+ uasm_l_nopage_tlbl(&l, p);
+ uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
+ uasm_i_nop(&p);
+
+ if (p >= (u32 *)handle_tlbl_end)
+ panic("TLB load handler fastpath space exceeded");
+
+ uasm_resolve_relocs(relocs, labels);
+ pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
+ (unsigned int)(p - (u32 *)handle_tlbl));
+
+ dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_end);
+}
+
+static void build_r3000_tlb_store_handler(void)
+{
+ u32 *p = (u32 *)handle_tlbs;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(p, 0, handle_tlbs_end - (char *)p);
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ build_r3000_tlbchange_handler_head(&p, K0, K1);
+ build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
+ uasm_i_nop(&p); /* load delay */
+ build_make_write(&p, &r, K0, K1, -1);
+ build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
+
+ uasm_l_nopage_tlbs(&l, p);
+ uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
+ uasm_i_nop(&p);
+
+ if (p >= (u32 *)handle_tlbs_end)
+ panic("TLB store handler fastpath space exceeded");
+
+ uasm_resolve_relocs(relocs, labels);
+ pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
+ (unsigned int)(p - (u32 *)handle_tlbs));
+
+ dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_end);
+}
+
+static void build_r3000_tlb_modify_handler(void)
+{
+ u32 *p = (u32 *)handle_tlbm;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(p, 0, handle_tlbm_end - (char *)p);
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ build_r3000_tlbchange_handler_head(&p, K0, K1);
+ build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
+ uasm_i_nop(&p); /* load delay */
+ build_make_write(&p, &r, K0, K1, -1);
+ build_r3000_pte_reload_tlbwi(&p, K0, K1);
+
+ uasm_l_nopage_tlbm(&l, p);
+ uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
+ uasm_i_nop(&p);
+
+ if (p >= (u32 *)handle_tlbm_end)
+ panic("TLB modify handler fastpath space exceeded");
+
+ uasm_resolve_relocs(relocs, labels);
+ pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
+ (unsigned int)(p - (u32 *)handle_tlbm));
+
+ dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_end);
+}
+#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
+
+static bool cpu_has_tlbex_tlbp_race(void)
+{
+ /*
+ * When a Hardware Table Walker is running it can replace TLB entries
+ * at any time, leading to a race between it & the CPU.
+ */
+ if (cpu_has_htw)
+ return true;
+
+ /*
+ * If the CPU shares FTLB RAM with its siblings then our entry may be
+ * replaced at any time by a sibling performing a write to the FTLB.
+ */
+ if (cpu_has_shared_ftlb_ram)
+ return true;
+
+ /* In all other cases there ought to be no race condition to handle */
+ return false;
+}
+
+/*
+ * R4000 style TLB load/store/modify handlers.
+ */
+static struct work_registers
+build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
+ struct uasm_reloc **r)
+{
+ struct work_registers wr = build_get_work_registers(p);
+
+#ifdef CONFIG_64BIT
+ build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
+#else
+ build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
+#endif
+
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ /*
+ * For huge tlb entries, pmd doesn't contain an address but
+ * instead contains the tlb pte. Check the PAGE_HUGE bit and
+ * see if we need to jump to huge tlb processing.
+ */
+ build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
+#endif
+
+ UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
+ UASM_i_LW(p, wr.r2, 0, wr.r2);
+ UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
+ uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
+ UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
+
+#ifdef CONFIG_SMP
+ uasm_l_smp_pgtable_change(l, *p);
+#endif
+ iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
+ if (!m4kc_tlbp_war()) {
+ build_tlb_probe_entry(p);
+ if (cpu_has_tlbex_tlbp_race()) {
+ /* race condition happens, leaving */
+ uasm_i_ehb(p);
+ uasm_i_mfc0(p, wr.r3, C0_INDEX);
+ uasm_il_bltz(p, r, wr.r3, label_leave);
+ uasm_i_nop(p);
+ }
+ }
+ return wr;
+}
+
+static void
+build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
+ struct uasm_reloc **r, unsigned int tmp,
+ unsigned int ptr)
+{
+ uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
+ uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
+ build_update_entries(p, tmp, ptr);
+ build_tlb_write_entry(p, l, r, tlb_indexed);
+ uasm_l_leave(l, *p);
+ build_restore_work_registers(p);
+ uasm_i_eret(p); /* return from trap */
+
+#ifdef CONFIG_64BIT
+ build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
+#endif
+}
+
+static void build_r4000_tlb_load_handler(void)
+{
+ u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbl);
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+ struct work_registers wr;
+
+ memset(p, 0, handle_tlbl_end - (char *)p);
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ if (bcm1250_m3_war()) {
+ unsigned int segbits = 44;
+
+ uasm_i_dmfc0(&p, K0, C0_BADVADDR);
+ uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
+ uasm_i_xor(&p, K0, K0, K1);
+ uasm_i_dsrl_safe(&p, K1, K0, 62);
+ uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
+ uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
+ uasm_i_or(&p, K0, K0, K1);
+ uasm_il_bnez(&p, &r, K0, label_leave);
+ /* No need for uasm_i_nop */
+ }
+
+ wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
+ build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
+ if (m4kc_tlbp_war())
+ build_tlb_probe_entry(&p);
+
+ if (cpu_has_rixi && !cpu_has_rixiex) {
+ /*
+ * If the page is not _PAGE_VALID, RI or XI could not
+ * have triggered it. Skip the expensive test..
+ */
+ if (use_bbit_insns()) {
+ uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
+ label_tlbl_goaround1);
+ } else {
+ uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
+ uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
+ }
+ uasm_i_nop(&p);
+
+ /*
+ * Warn if something may race with us & replace the TLB entry
+ * before we read it here. Everything with such races should
+ * also have dedicated RiXi exception handlers, so this
+ * shouldn't be hit.
+ */
+ WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path");
+
+ uasm_i_tlbr(&p);
+
+ switch (current_cpu_type()) {
+ default:
+ if (cpu_has_mips_r2_exec_hazard) {
+ uasm_i_ehb(&p);
+
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ break;
+ }
+ }
+
+ /* Examine entrylo 0 or 1 based on ptr. */
+ if (use_bbit_insns()) {
+ uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
+ } else {
+ uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
+ uasm_i_beqz(&p, wr.r3, 8);
+ }
+ /* load it in the delay slot*/
+ UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
+ /* load it if ptr is odd */
+ UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
+ /*
+ * If the entryLo (now in wr.r3) is valid (bit 1), RI or
+ * XI must have triggered it.
+ */
+ if (use_bbit_insns()) {
+ uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
+ uasm_i_nop(&p);
+ uasm_l_tlbl_goaround1(&l, p);
+ } else {
+ uasm_i_andi(&p, wr.r3, wr.r3, 2);
+ uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
+ uasm_i_nop(&p);
+ }
+ uasm_l_tlbl_goaround1(&l, p);
+ }
+ build_make_valid(&p, &r, wr.r1, wr.r2, wr.r3);
+ build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
+
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ /*
+ * This is the entry point when build_r4000_tlbchange_handler_head
+ * spots a huge page.
+ */
+ uasm_l_tlb_huge_update(&l, p);
+ iPTE_LW(&p, wr.r1, wr.r2);
+ build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
+ build_tlb_probe_entry(&p);
+
+ if (cpu_has_rixi && !cpu_has_rixiex) {
+ /*
+ * If the page is not _PAGE_VALID, RI or XI could not
+ * have triggered it. Skip the expensive test..
+ */
+ if (use_bbit_insns()) {
+ uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
+ label_tlbl_goaround2);
+ } else {
+ uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
+ uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
+ }
+ uasm_i_nop(&p);
+
+ /*
+ * Warn if something may race with us & replace the TLB entry
+ * before we read it here. Everything with such races should
+ * also have dedicated RiXi exception handlers, so this
+ * shouldn't be hit.
+ */
+ WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path");
+
+ uasm_i_tlbr(&p);
+
+ switch (current_cpu_type()) {
+ default:
+ if (cpu_has_mips_r2_exec_hazard) {
+ uasm_i_ehb(&p);
+
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ break;
+ }
+ }
+
+ /* Examine entrylo 0 or 1 based on ptr. */
+ if (use_bbit_insns()) {
+ uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
+ } else {
+ uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
+ uasm_i_beqz(&p, wr.r3, 8);
+ }
+ /* load it in the delay slot*/
+ UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
+ /* load it if ptr is odd */
+ UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
+ /*
+ * If the entryLo (now in wr.r3) is valid (bit 1), RI or
+ * XI must have triggered it.
+ */
+ if (use_bbit_insns()) {
+ uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
+ } else {
+ uasm_i_andi(&p, wr.r3, wr.r3, 2);
+ uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
+ }
+ if (PM_DEFAULT_MASK == 0)
+ uasm_i_nop(&p);
+ /*
+ * We clobbered C0_PAGEMASK, restore it. On the other branch
+ * it is restored in build_huge_tlb_write_entry.
+ */
+ build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
+
+ uasm_l_tlbl_goaround2(&l, p);
+ }
+ uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
+#endif
+
+ uasm_l_nopage_tlbl(&l, p);
+ if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
+ uasm_i_sync(&p, 0);
+ build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+ if ((unsigned long)tlb_do_page_fault_0 & 1) {
+ uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
+ uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
+ uasm_i_jr(&p, K0);
+ } else
+#endif
+ uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
+ uasm_i_nop(&p);
+
+ if (p >= (u32 *)handle_tlbl_end)
+ panic("TLB load handler fastpath space exceeded");
+
+ uasm_resolve_relocs(relocs, labels);
+ pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
+ (unsigned int)(p - (u32 *)handle_tlbl));
+
+ dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_end);
+}
+
+static void build_r4000_tlb_store_handler(void)
+{
+ u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbs);
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+ struct work_registers wr;
+
+ memset(p, 0, handle_tlbs_end - (char *)p);
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
+ build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
+ if (m4kc_tlbp_war())
+ build_tlb_probe_entry(&p);
+ build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
+ build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
+
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ /*
+ * This is the entry point when
+ * build_r4000_tlbchange_handler_head spots a huge page.
+ */
+ uasm_l_tlb_huge_update(&l, p);
+ iPTE_LW(&p, wr.r1, wr.r2);
+ build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
+ build_tlb_probe_entry(&p);
+ uasm_i_ori(&p, wr.r1, wr.r1,
+ _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
+#endif
+
+ uasm_l_nopage_tlbs(&l, p);
+ if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
+ uasm_i_sync(&p, 0);
+ build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+ if ((unsigned long)tlb_do_page_fault_1 & 1) {
+ uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+ uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+ uasm_i_jr(&p, K0);
+ } else
+#endif
+ uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
+ uasm_i_nop(&p);
+
+ if (p >= (u32 *)handle_tlbs_end)
+ panic("TLB store handler fastpath space exceeded");
+
+ uasm_resolve_relocs(relocs, labels);
+ pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
+ (unsigned int)(p - (u32 *)handle_tlbs));
+
+ dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_end);
+}
+
+static void build_r4000_tlb_modify_handler(void)
+{
+ u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbm);
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+ struct work_registers wr;
+
+ memset(p, 0, handle_tlbm_end - (char *)p);
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
+ build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
+ if (m4kc_tlbp_war())
+ build_tlb_probe_entry(&p);
+ /* Present and writable bits set, set accessed and dirty bits. */
+ build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
+ build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
+
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ /*
+ * This is the entry point when
+ * build_r4000_tlbchange_handler_head spots a huge page.
+ */
+ uasm_l_tlb_huge_update(&l, p);
+ iPTE_LW(&p, wr.r1, wr.r2);
+ build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
+ build_tlb_probe_entry(&p);
+ uasm_i_ori(&p, wr.r1, wr.r1,
+ _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
+#endif
+
+ uasm_l_nopage_tlbm(&l, p);
+ if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
+ uasm_i_sync(&p, 0);
+ build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+ if ((unsigned long)tlb_do_page_fault_1 & 1) {
+ uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+ uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+ uasm_i_jr(&p, K0);
+ } else
+#endif
+ uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
+ uasm_i_nop(&p);
+
+ if (p >= (u32 *)handle_tlbm_end)
+ panic("TLB modify handler fastpath space exceeded");
+
+ uasm_resolve_relocs(relocs, labels);
+ pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
+ (unsigned int)(p - (u32 *)handle_tlbm));
+
+ dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_end);
+}
+
+static void flush_tlb_handlers(void)
+{
+ local_flush_icache_range((unsigned long)handle_tlbl,
+ (unsigned long)handle_tlbl_end);
+ local_flush_icache_range((unsigned long)handle_tlbs,
+ (unsigned long)handle_tlbs_end);
+ local_flush_icache_range((unsigned long)handle_tlbm,
+ (unsigned long)handle_tlbm_end);
+ local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
+ (unsigned long)tlbmiss_handler_setup_pgd_end);
+}
+
+static void print_htw_config(void)
+{
+ unsigned long config;
+ unsigned int pwctl;
+ const int field = 2 * sizeof(unsigned long);
+
+ config = read_c0_pwfield();
+ pr_debug("PWField (0x%0*lx): GDI: 0x%02lx UDI: 0x%02lx MDI: 0x%02lx PTI: 0x%02lx PTEI: 0x%02lx\n",
+ field, config,
+ (config & MIPS_PWFIELD_GDI_MASK) >> MIPS_PWFIELD_GDI_SHIFT,
+ (config & MIPS_PWFIELD_UDI_MASK) >> MIPS_PWFIELD_UDI_SHIFT,
+ (config & MIPS_PWFIELD_MDI_MASK) >> MIPS_PWFIELD_MDI_SHIFT,
+ (config & MIPS_PWFIELD_PTI_MASK) >> MIPS_PWFIELD_PTI_SHIFT,
+ (config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT);
+
+ config = read_c0_pwsize();
+ pr_debug("PWSize (0x%0*lx): PS: 0x%lx GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n",
+ field, config,
+ (config & MIPS_PWSIZE_PS_MASK) >> MIPS_PWSIZE_PS_SHIFT,
+ (config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT,
+ (config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT,
+ (config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT,
+ (config & MIPS_PWSIZE_PTW_MASK) >> MIPS_PWSIZE_PTW_SHIFT,
+ (config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT);
+
+ pwctl = read_c0_pwctl();
+ pr_debug("PWCtl (0x%x): PWEn: 0x%x XK: 0x%x XS: 0x%x XU: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n",
+ pwctl,
+ (pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT,
+ (pwctl & MIPS_PWCTL_XK_MASK) >> MIPS_PWCTL_XK_SHIFT,
+ (pwctl & MIPS_PWCTL_XS_MASK) >> MIPS_PWCTL_XS_SHIFT,
+ (pwctl & MIPS_PWCTL_XU_MASK) >> MIPS_PWCTL_XU_SHIFT,
+ (pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT,
+ (pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT,
+ (pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT);
+}
+
+static void config_htw_params(void)
+{
+ unsigned long pwfield, pwsize, ptei;
+ unsigned int config;
+
+ /*
+ * We are using 2-level page tables, so we only need to
+ * setup GDW and PTW appropriately. UDW and MDW will remain 0.
+ * The default value of GDI/UDI/MDI/PTI is 0xc. It is illegal to
+ * write values less than 0xc in these fields because the entire
+ * write will be dropped. As a result of which, we must preserve
+ * the original reset values and overwrite only what we really want.
+ */
+
+ pwfield = read_c0_pwfield();
+ /* re-initialize the GDI field */
+ pwfield &= ~MIPS_PWFIELD_GDI_MASK;
+ pwfield |= PGDIR_SHIFT << MIPS_PWFIELD_GDI_SHIFT;
+ /* re-initialize the PTI field including the even/odd bit */
+ pwfield &= ~MIPS_PWFIELD_PTI_MASK;
+ pwfield |= PAGE_SHIFT << MIPS_PWFIELD_PTI_SHIFT;
+ if (CONFIG_PGTABLE_LEVELS >= 3) {
+ pwfield &= ~MIPS_PWFIELD_MDI_MASK;
+ pwfield |= PMD_SHIFT << MIPS_PWFIELD_MDI_SHIFT;
+ }
+ /* Set the PTEI right shift */
+ ptei = _PAGE_GLOBAL_SHIFT << MIPS_PWFIELD_PTEI_SHIFT;
+ pwfield |= ptei;
+ write_c0_pwfield(pwfield);
+ /* Check whether the PTEI value is supported */
+ back_to_back_c0_hazard();
+ pwfield = read_c0_pwfield();
+ if (((pwfield & MIPS_PWFIELD_PTEI_MASK) << MIPS_PWFIELD_PTEI_SHIFT)
+ != ptei) {
+ pr_warn("Unsupported PTEI field value: 0x%lx. HTW will not be enabled",
+ ptei);
+ /*
+ * Drop option to avoid HTW being enabled via another path
+ * (eg htw_reset())
+ */
+ current_cpu_data.options &= ~MIPS_CPU_HTW;
+ return;
+ }
+
+ pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT;
+ pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
+ if (CONFIG_PGTABLE_LEVELS >= 3)
+ pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
+
+ /* Set pointer size to size of directory pointers */
+ if (IS_ENABLED(CONFIG_64BIT))
+ pwsize |= MIPS_PWSIZE_PS_MASK;
+ /* PTEs may be multiple pointers long (e.g. with XPA) */
+ pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT)
+ & MIPS_PWSIZE_PTEW_MASK;
+
+ write_c0_pwsize(pwsize);
+
+ /* Make sure everything is set before we enable the HTW */
+ back_to_back_c0_hazard();
+
+ /*
+ * Enable HTW (and only for XUSeg on 64-bit), and disable the rest of
+ * the pwctl fields.
+ */
+ config = 1 << MIPS_PWCTL_PWEN_SHIFT;
+ if (IS_ENABLED(CONFIG_64BIT))
+ config |= MIPS_PWCTL_XU_MASK;
+ write_c0_pwctl(config);
+ pr_info("Hardware Page Table Walker enabled\n");
+
+ print_htw_config();
+}
+
+static void config_xpa_params(void)
+{
+#ifdef CONFIG_XPA
+ unsigned int pagegrain;
+
+ if (mips_xpa_disabled) {
+ pr_info("Extended Physical Addressing (XPA) disabled\n");
+ return;
+ }
+
+ pagegrain = read_c0_pagegrain();
+ write_c0_pagegrain(pagegrain | PG_ELPA);
+ back_to_back_c0_hazard();
+ pagegrain = read_c0_pagegrain();
+
+ if (pagegrain & PG_ELPA)
+ pr_info("Extended Physical Addressing (XPA) enabled\n");
+ else
+ panic("Extended Physical Addressing (XPA) disabled");
+#endif
+}
+
+static void check_pabits(void)
+{
+ unsigned long entry;
+ unsigned pabits, fillbits;
+
+ if (!cpu_has_rixi || _PAGE_NO_EXEC == 0) {
+ /*
+ * We'll only be making use of the fact that we can rotate bits
+ * into the fill if the CPU supports RIXI, so don't bother
+ * probing this for CPUs which don't.
+ */
+ return;
+ }
+
+ write_c0_entrylo0(~0ul);
+ back_to_back_c0_hazard();
+ entry = read_c0_entrylo0();
+
+ /* clear all non-PFN bits */
+ entry &= ~((1 << MIPS_ENTRYLO_PFN_SHIFT) - 1);
+ entry &= ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
+
+ /* find a lower bound on PABITS, and upper bound on fill bits */
+ pabits = fls_long(entry) + 6;
+ fillbits = max_t(int, (int)BITS_PER_LONG - pabits, 0);
+
+ /* minus the RI & XI bits */
+ fillbits -= min_t(unsigned, fillbits, 2);
+
+ if (fillbits >= ilog2(_PAGE_NO_EXEC))
+ fill_includes_sw_bits = true;
+
+ pr_debug("Entry* registers contain %u fill bits\n", fillbits);
+}
+
+void build_tlb_refill_handler(void)
+{
+ /*
+ * The refill handler is generated per-CPU, multi-node systems
+ * may have local storage for it. The other handlers are only
+ * needed once.
+ */
+ static int run_once = 0;
+
+ if (IS_ENABLED(CONFIG_XPA) && !cpu_has_rixi)
+ panic("Kernels supporting XPA currently require CPUs with RIXI");
+
+ output_pgtable_bits_defines();
+ check_pabits();
+
+#ifdef CONFIG_64BIT
+ check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+#endif
+
+ if (cpu_has_3kex) {
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
+ if (!run_once) {
+ build_setup_pgd();
+ build_r3000_tlb_refill_handler();
+ build_r3000_tlb_load_handler();
+ build_r3000_tlb_store_handler();
+ build_r3000_tlb_modify_handler();
+ flush_tlb_handlers();
+ run_once++;
+ }
+#else
+ panic("No R3000 TLB refill handler");
+#endif
+ return;
+ }
+
+ if (cpu_has_ldpte)
+ setup_pw();
+
+ if (!run_once) {
+ scratch_reg = allocate_kscratch();
+ build_setup_pgd();
+ build_r4000_tlb_load_handler();
+ build_r4000_tlb_store_handler();
+ build_r4000_tlb_modify_handler();
+ if (cpu_has_ldpte)
+ build_loongson3_tlb_refill_handler();
+ else
+ build_r4000_tlb_refill_handler();
+ flush_tlb_handlers();
+ run_once++;
+ }
+ if (cpu_has_xpa)
+ config_xpa_params();
+ if (cpu_has_htw)
+ config_htw_params();
+}
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
new file mode 100644
index 000000000..75ef90486
--- /dev/null
+++ b/arch/mips/mm/uasm-micromips.c
@@ -0,0 +1,232 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * A small micro-assembler. It is intentionally kept simple, does only
+ * support a subset of instructions, and does not try to hide pipeline
+ * effects like branch delay slots.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005, 2007 Maciej W. Rozycki
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <asm/inst.h>
+#include <asm/elf.h>
+#include <asm/bugs.h>
+#include <asm/uasm.h>
+
+#define RS_MASK 0x1f
+#define RS_SH 16
+#define RT_MASK 0x1f
+#define RT_SH 21
+#define SCIMM_MASK 0x3ff
+#define SCIMM_SH 16
+
+/* This macro sets the non-variable bits of an instruction. */
+#define M(a, b, c, d, e, f) \
+ ((a) << OP_SH \
+ | (b) << RT_SH \
+ | (c) << RS_SH \
+ | (d) << RD_SH \
+ | (e) << RE_SH \
+ | (f) << FUNC_SH)
+
+#include "uasm.c"
+
+static const struct insn insn_table_MM[insn_invalid] = {
+ [insn_addu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD},
+ [insn_addiu] = {M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
+ [insn_and] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD},
+ [insn_andi] = {M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM},
+ [insn_beq] = {M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
+ [insn_beql] = {0, 0},
+ [insn_bgez] = {M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM},
+ [insn_bgezl] = {0, 0},
+ [insn_bltz] = {M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM},
+ [insn_bltzl] = {0, 0},
+ [insn_bne] = {M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM},
+ [insn_cache] = {M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM},
+ [insn_cfc1] = {M(mm_pool32f_op, 0, 0, 0, mm_cfc1_op, mm_32f_73_op), RT | RS},
+ [insn_cfcmsa] = {M(mm_pool32s_op, 0, msa_cfc_op, 0, 0, mm_32s_elm_op), RD | RE},
+ [insn_ctc1] = {M(mm_pool32f_op, 0, 0, 0, mm_ctc1_op, mm_32f_73_op), RT | RS},
+ [insn_ctcmsa] = {M(mm_pool32s_op, 0, msa_ctc_op, 0, 0, mm_32s_elm_op), RD | RE},
+ [insn_daddu] = {0, 0},
+ [insn_daddiu] = {0, 0},
+ [insn_di] = {M(mm_pool32a_op, 0, 0, 0, mm_di_op, mm_pool32axf_op), RS},
+ [insn_divu] = {M(mm_pool32a_op, 0, 0, 0, mm_divu_op, mm_pool32axf_op), RT | RS},
+ [insn_dmfc0] = {0, 0},
+ [insn_dmtc0] = {0, 0},
+ [insn_dsll] = {0, 0},
+ [insn_dsll32] = {0, 0},
+ [insn_dsra] = {0, 0},
+ [insn_dsrl] = {0, 0},
+ [insn_dsrl32] = {0, 0},
+ [insn_drotr] = {0, 0},
+ [insn_drotr32] = {0, 0},
+ [insn_dsubu] = {0, 0},
+ [insn_eret] = {M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0},
+ [insn_ins] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE},
+ [insn_ext] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE},
+ [insn_j] = {M(mm_j32_op, 0, 0, 0, 0, 0), JIMM},
+ [insn_jal] = {M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM},
+ [insn_jalr] = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RT | RS},
+ [insn_jr] = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS},
+ [insn_lb] = {M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
+ [insn_ld] = {0, 0},
+ [insn_lh] = {M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
+ [insn_ll] = {M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM},
+ [insn_lld] = {0, 0},
+ [insn_lui] = {M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM},
+ [insn_lw] = {M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
+ [insn_mfc0] = {M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD},
+ [insn_mfhi] = {M(mm_pool32a_op, 0, 0, 0, mm_mfhi32_op, mm_pool32axf_op), RS},
+ [insn_mflo] = {M(mm_pool32a_op, 0, 0, 0, mm_mflo32_op, mm_pool32axf_op), RS},
+ [insn_mtc0] = {M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD},
+ [insn_mthi] = {M(mm_pool32a_op, 0, 0, 0, mm_mthi32_op, mm_pool32axf_op), RS},
+ [insn_mtlo] = {M(mm_pool32a_op, 0, 0, 0, mm_mtlo32_op, mm_pool32axf_op), RS},
+ [insn_mul] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_mul_op), RT | RS | RD},
+ [insn_or] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD},
+ [insn_ori] = {M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM},
+ [insn_pref] = {M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM},
+ [insn_rfe] = {0, 0},
+ [insn_sc] = {M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM},
+ [insn_scd] = {0, 0},
+ [insn_sd] = {0, 0},
+ [insn_sll] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD},
+ [insn_sllv] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD},
+ [insn_slt] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD},
+ [insn_sltiu] = {M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
+ [insn_sltu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD},
+ [insn_sra] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD},
+ [insn_srav] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srav_op), RT | RS | RD},
+ [insn_srl] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD},
+ [insn_srlv] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srlv32_op), RT | RS | RD},
+ [insn_rotr] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD},
+ [insn_subu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD},
+ [insn_sw] = {M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
+ [insn_sync] = {M(mm_pool32a_op, 0, 0, 0, mm_sync_op, mm_pool32axf_op), RS},
+ [insn_tlbp] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0},
+ [insn_tlbr] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0},
+ [insn_tlbwi] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0},
+ [insn_tlbwr] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0},
+ [insn_wait] = {M(mm_pool32a_op, 0, 0, 0, mm_wait_op, mm_pool32axf_op), SCIMM},
+ [insn_wsbh] = {M(mm_pool32a_op, 0, 0, 0, mm_wsbh_op, mm_pool32axf_op), RT | RS},
+ [insn_xor] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD},
+ [insn_xori] = {M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM},
+ [insn_dins] = {0, 0},
+ [insn_dinsm] = {0, 0},
+ [insn_syscall] = {M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM},
+ [insn_bbit0] = {0, 0},
+ [insn_bbit1] = {0, 0},
+ [insn_lwx] = {0, 0},
+ [insn_ldx] = {0, 0},
+};
+
+#undef M
+
+static inline u32 build_bimm(s32 arg)
+{
+ WARN(arg > 0xffff || arg < -0x10000,
+ KERN_WARNING "Micro-assembler field overflow\n");
+
+ WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
+
+ return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
+}
+
+static inline u32 build_jimm(u32 arg)
+{
+
+ WARN(arg & ~((JIMM_MASK << 2) | 1),
+ KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg >> 1) & JIMM_MASK;
+}
+
+/*
+ * The order of opcode arguments is implicitly left to right,
+ * starting with RS and ending with FUNC or IMM.
+ */
+static void build_insn(u32 **buf, enum opcode opc, ...)
+{
+ const struct insn *ip;
+ va_list ap;
+ u32 op;
+
+ if (opc < 0 || opc >= insn_invalid ||
+ (opc == insn_daddiu && r4k_daddiu_bug()) ||
+ (insn_table_MM[opc].match == 0 && insn_table_MM[opc].fields == 0))
+ panic("Unsupported Micro-assembler instruction %d", opc);
+
+ ip = &insn_table_MM[opc];
+
+ op = ip->match;
+ va_start(ap, opc);
+ if (ip->fields & RS) {
+ if (opc == insn_mfc0 || opc == insn_mtc0 ||
+ opc == insn_cfc1 || opc == insn_ctc1)
+ op |= build_rt(va_arg(ap, u32));
+ else
+ op |= build_rs(va_arg(ap, u32));
+ }
+ if (ip->fields & RT) {
+ if (opc == insn_mfc0 || opc == insn_mtc0 ||
+ opc == insn_cfc1 || opc == insn_ctc1)
+ op |= build_rs(va_arg(ap, u32));
+ else
+ op |= build_rt(va_arg(ap, u32));
+ }
+ if (ip->fields & RD)
+ op |= build_rd(va_arg(ap, u32));
+ if (ip->fields & RE)
+ op |= build_re(va_arg(ap, u32));
+ if (ip->fields & SIMM)
+ op |= build_simm(va_arg(ap, s32));
+ if (ip->fields & UIMM)
+ op |= build_uimm(va_arg(ap, u32));
+ if (ip->fields & BIMM)
+ op |= build_bimm(va_arg(ap, s32));
+ if (ip->fields & JIMM)
+ op |= build_jimm(va_arg(ap, u32));
+ if (ip->fields & FUNC)
+ op |= build_func(va_arg(ap, u32));
+ if (ip->fields & SET)
+ op |= build_set(va_arg(ap, u32));
+ if (ip->fields & SCIMM)
+ op |= build_scimm(va_arg(ap, u32));
+ va_end(ap);
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ **buf = ((op & 0xffff) << 16) | (op >> 16);
+#else
+ **buf = op;
+#endif
+ (*buf)++;
+}
+
+static inline void
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+ long laddr = (long)lab->addr;
+ long raddr = (long)rel->addr;
+
+ switch (rel->type) {
+ case R_MIPS_PC16:
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ *rel->addr |= (build_bimm(laddr - (raddr + 4)) << 16);
+#else
+ *rel->addr |= build_bimm(laddr - (raddr + 4));
+#endif
+ break;
+
+ default:
+ panic("Unsupported Micro-assembler relocation %d",
+ rel->type);
+ }
+}
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
new file mode 100644
index 000000000..7154a1d99
--- /dev/null
+++ b/arch/mips/mm/uasm-mips.c
@@ -0,0 +1,290 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * A small micro-assembler. It is intentionally kept simple, does only
+ * support a subset of instructions, and does not try to hide pipeline
+ * effects like branch delay slots.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005, 2007 Maciej W. Rozycki
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <asm/inst.h>
+#include <asm/elf.h>
+#include <asm/bugs.h>
+#include <asm/uasm.h>
+
+#define RS_MASK 0x1f
+#define RS_SH 21
+#define RT_MASK 0x1f
+#define RT_SH 16
+#define SCIMM_MASK 0xfffff
+#define SCIMM_SH 6
+
+/* This macro sets the non-variable bits of an instruction. */
+#define M(a, b, c, d, e, f) \
+ ((a) << OP_SH \
+ | (b) << RS_SH \
+ | (c) << RT_SH \
+ | (d) << RD_SH \
+ | (e) << RE_SH \
+ | (f) << FUNC_SH)
+
+/* This macro sets the non-variable bits of an R6 instruction. */
+#define M6(a, b, c, d, e) \
+ ((a) << OP_SH \
+ | (b) << RS_SH \
+ | (c) << RT_SH \
+ | (d) << SIMM9_SH \
+ | (e) << FUNC_SH)
+
+#include "uasm.c"
+
+static const struct insn insn_table[insn_invalid] = {
+ [insn_addiu] = {M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_addu] = {M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD},
+ [insn_and] = {M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD},
+ [insn_andi] = {M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM},
+ [insn_bbit0] = {M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
+ [insn_bbit1] = {M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
+ [insn_beq] = {M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
+ [insn_beql] = {M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
+ [insn_bgez] = {M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM},
+ [insn_bgezl] = {M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM},
+ [insn_bgtz] = {M(bgtz_op, 0, 0, 0, 0, 0), RS | BIMM},
+ [insn_blez] = {M(blez_op, 0, 0, 0, 0, 0), RS | BIMM},
+ [insn_bltz] = {M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM},
+ [insn_bltzl] = {M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM},
+ [insn_bne] = {M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM},
+ [insn_break] = {M(spec_op, 0, 0, 0, 0, break_op), SCIMM},
+#ifndef CONFIG_CPU_MIPSR6
+ [insn_cache] = {M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+#else
+ [insn_cache] = {M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9},
+#endif
+ [insn_cfc1] = {M(cop1_op, cfc_op, 0, 0, 0, 0), RT | RD},
+ [insn_cfcmsa] = {M(msa_op, 0, msa_cfc_op, 0, 0, msa_elm_op), RD | RE},
+ [insn_ctc1] = {M(cop1_op, ctc_op, 0, 0, 0, 0), RT | RD},
+ [insn_ctcmsa] = {M(msa_op, 0, msa_ctc_op, 0, 0, msa_elm_op), RD | RE},
+ [insn_daddiu] = {M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_daddu] = {M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD},
+ [insn_ddivu] = {M(spec_op, 0, 0, 0, 0, ddivu_op), RS | RT},
+ [insn_ddivu_r6] = {M(spec_op, 0, 0, 0, ddivu_ddivu6_op, ddivu_op),
+ RS | RT | RD},
+ [insn_di] = {M(cop0_op, mfmc0_op, 0, 12, 0, 0), RT},
+ [insn_dins] = {M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE},
+ [insn_dinsm] = {M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE},
+ [insn_dinsu] = {M(spec3_op, 0, 0, 0, 0, dinsu_op), RS | RT | RD | RE},
+ [insn_divu] = {M(spec_op, 0, 0, 0, 0, divu_op), RS | RT},
+ [insn_divu_r6] = {M(spec_op, 0, 0, 0, divu_divu6_op, divu_op),
+ RS | RT | RD},
+ [insn_dmfc0] = {M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
+ [insn_dmodu] = {M(spec_op, 0, 0, 0, ddivu_dmodu_op, ddivu_op),
+ RS | RT | RD},
+ [insn_dmtc0] = {M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
+ [insn_dmultu] = {M(spec_op, 0, 0, 0, 0, dmultu_op), RS | RT},
+ [insn_dmulu] = {M(spec_op, 0, 0, 0, dmult_dmul_op, dmultu_op),
+ RS | RT | RD},
+ [insn_drotr] = {M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE},
+ [insn_drotr32] = {M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE},
+ [insn_dsbh] = {M(spec3_op, 0, 0, 0, dsbh_op, dbshfl_op), RT | RD},
+ [insn_dshd] = {M(spec3_op, 0, 0, 0, dshd_op, dbshfl_op), RT | RD},
+ [insn_dsll] = {M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE},
+ [insn_dsll32] = {M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE},
+ [insn_dsllv] = {M(spec_op, 0, 0, 0, 0, dsllv_op), RS | RT | RD},
+ [insn_dsra] = {M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE},
+ [insn_dsra32] = {M(spec_op, 0, 0, 0, 0, dsra32_op), RT | RD | RE},
+ [insn_dsrav] = {M(spec_op, 0, 0, 0, 0, dsrav_op), RS | RT | RD},
+ [insn_dsrl] = {M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE},
+ [insn_dsrl32] = {M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE},
+ [insn_dsrlv] = {M(spec_op, 0, 0, 0, 0, dsrlv_op), RS | RT | RD},
+ [insn_dsubu] = {M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD},
+ [insn_eret] = {M(cop0_op, cop_op, 0, 0, 0, eret_op), 0},
+ [insn_ext] = {M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE},
+ [insn_ins] = {M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE},
+ [insn_j] = {M(j_op, 0, 0, 0, 0, 0), JIMM},
+ [insn_jal] = {M(jal_op, 0, 0, 0, 0, 0), JIMM},
+ [insn_jalr] = {M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD},
+#ifndef CONFIG_CPU_MIPSR6
+ [insn_jr] = {M(spec_op, 0, 0, 0, 0, jr_op), RS},
+#else
+ [insn_jr] = {M(spec_op, 0, 0, 0, 0, jalr_op), RS},
+#endif
+ [insn_lb] = {M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_lbu] = {M(lbu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_ld] = {M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_lddir] = {M(lwc2_op, 0, 0, 0, lddir_op, mult_op), RS | RT | RD},
+ [insn_ldpte] = {M(lwc2_op, 0, 0, 0, ldpte_op, mult_op), RS | RD},
+ [insn_ldx] = {M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD},
+ [insn_lh] = {M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_lhu] = {M(lhu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+#ifndef CONFIG_CPU_MIPSR6
+ [insn_ll] = {M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_lld] = {M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+#else
+ [insn_ll] = {M6(spec3_op, 0, 0, 0, ll6_op), RS | RT | SIMM9},
+ [insn_lld] = {M6(spec3_op, 0, 0, 0, lld6_op), RS | RT | SIMM9},
+#endif
+ [insn_lui] = {M(lui_op, 0, 0, 0, 0, 0), RT | SIMM},
+ [insn_lw] = {M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_lwu] = {M(lwu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_lwx] = {M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD},
+ [insn_mfc0] = {M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
+ [insn_mfhc0] = {M(cop0_op, mfhc0_op, 0, 0, 0, 0), RT | RD | SET},
+ [insn_mfhi] = {M(spec_op, 0, 0, 0, 0, mfhi_op), RD},
+ [insn_mflo] = {M(spec_op, 0, 0, 0, 0, mflo_op), RD},
+ [insn_modu] = {M(spec_op, 0, 0, 0, divu_modu_op, divu_op),
+ RS | RT | RD},
+ [insn_movn] = {M(spec_op, 0, 0, 0, 0, movn_op), RS | RT | RD},
+ [insn_movz] = {M(spec_op, 0, 0, 0, 0, movz_op), RS | RT | RD},
+ [insn_mtc0] = {M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
+ [insn_mthc0] = {M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET},
+ [insn_mthi] = {M(spec_op, 0, 0, 0, 0, mthi_op), RS},
+ [insn_mtlo] = {M(spec_op, 0, 0, 0, 0, mtlo_op), RS},
+ [insn_mulu] = {M(spec_op, 0, 0, 0, multu_mulu_op, multu_op),
+ RS | RT | RD},
+#ifndef CONFIG_CPU_MIPSR6
+ [insn_mul] = {M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
+#else
+ [insn_mul] = {M(spec_op, 0, 0, 0, mult_mul_op, mult_op), RS | RT | RD},
+#endif
+ [insn_multu] = {M(spec_op, 0, 0, 0, 0, multu_op), RS | RT},
+ [insn_nor] = {M(spec_op, 0, 0, 0, 0, nor_op), RS | RT | RD},
+ [insn_or] = {M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD},
+ [insn_ori] = {M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM},
+#ifndef CONFIG_CPU_MIPSR6
+ [insn_pref] = {M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+#else
+ [insn_pref] = {M6(spec3_op, 0, 0, 0, pref6_op), RS | RT | SIMM9},
+#endif
+ [insn_rfe] = {M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0},
+ [insn_rotr] = {M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE},
+ [insn_sb] = {M(sb_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+#ifndef CONFIG_CPU_MIPSR6
+ [insn_sc] = {M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_scd] = {M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+#else
+ [insn_sc] = {M6(spec3_op, 0, 0, 0, sc6_op), RS | RT | SIMM9},
+ [insn_scd] = {M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9},
+#endif
+ [insn_sd] = {M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_seleqz] = {M(spec_op, 0, 0, 0, 0, seleqz_op), RS | RT | RD},
+ [insn_selnez] = {M(spec_op, 0, 0, 0, 0, selnez_op), RS | RT | RD},
+ [insn_sh] = {M(sh_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_sll] = {M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE},
+ [insn_sllv] = {M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD},
+ [insn_slt] = {M(spec_op, 0, 0, 0, 0, slt_op), RS | RT | RD},
+ [insn_slti] = {M(slti_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_sltiu] = {M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_sltu] = {M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD},
+ [insn_sra] = {M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE},
+ [insn_srav] = {M(spec_op, 0, 0, 0, 0, srav_op), RS | RT | RD},
+ [insn_srl] = {M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE},
+ [insn_srlv] = {M(spec_op, 0, 0, 0, 0, srlv_op), RS | RT | RD},
+ [insn_subu] = {M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD},
+ [insn_sw] = {M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_sync] = {M(spec_op, 0, 0, 0, 0, sync_op), RE},
+ [insn_syscall] = {M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
+ [insn_tlbp] = {M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0},
+ [insn_tlbr] = {M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0},
+ [insn_tlbwi] = {M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0},
+ [insn_tlbwr] = {M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0},
+ [insn_wait] = {M(cop0_op, cop_op, 0, 0, 0, wait_op), SCIMM},
+ [insn_wsbh] = {M(spec3_op, 0, 0, 0, wsbh_op, bshfl_op), RT | RD},
+ [insn_xor] = {M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD},
+ [insn_xori] = {M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM},
+ [insn_yield] = {M(spec3_op, 0, 0, 0, 0, yield_op), RS | RD},
+};
+
+#undef M
+
+static inline u32 build_bimm(s32 arg)
+{
+ WARN(arg > 0x1ffff || arg < -0x20000,
+ KERN_WARNING "Micro-assembler field overflow\n");
+
+ WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
+
+ return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
+}
+
+static inline u32 build_jimm(u32 arg)
+{
+ WARN(arg & ~(JIMM_MASK << 2),
+ KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg >> 2) & JIMM_MASK;
+}
+
+/*
+ * The order of opcode arguments is implicitly left to right,
+ * starting with RS and ending with FUNC or IMM.
+ */
+static void build_insn(u32 **buf, enum opcode opc, ...)
+{
+ const struct insn *ip;
+ va_list ap;
+ u32 op;
+
+ if (opc < 0 || opc >= insn_invalid ||
+ (opc == insn_daddiu && r4k_daddiu_bug()) ||
+ (insn_table[opc].match == 0 && insn_table[opc].fields == 0))
+ panic("Unsupported Micro-assembler instruction %d", opc);
+
+ ip = &insn_table[opc];
+
+ op = ip->match;
+ va_start(ap, opc);
+ if (ip->fields & RS)
+ op |= build_rs(va_arg(ap, u32));
+ if (ip->fields & RT)
+ op |= build_rt(va_arg(ap, u32));
+ if (ip->fields & RD)
+ op |= build_rd(va_arg(ap, u32));
+ if (ip->fields & RE)
+ op |= build_re(va_arg(ap, u32));
+ if (ip->fields & SIMM)
+ op |= build_simm(va_arg(ap, s32));
+ if (ip->fields & UIMM)
+ op |= build_uimm(va_arg(ap, u32));
+ if (ip->fields & BIMM)
+ op |= build_bimm(va_arg(ap, s32));
+ if (ip->fields & JIMM)
+ op |= build_jimm(va_arg(ap, u32));
+ if (ip->fields & FUNC)
+ op |= build_func(va_arg(ap, u32));
+ if (ip->fields & SET)
+ op |= build_set(va_arg(ap, u32));
+ if (ip->fields & SCIMM)
+ op |= build_scimm(va_arg(ap, u32));
+ if (ip->fields & SIMM9)
+ op |= build_scimm9(va_arg(ap, u32));
+ va_end(ap);
+
+ **buf = op;
+ (*buf)++;
+}
+
+static inline void
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+ long laddr = (long)lab->addr;
+ long raddr = (long)rel->addr;
+
+ switch (rel->type) {
+ case R_MIPS_PC16:
+ *rel->addr |= build_bimm(laddr - (raddr + 4));
+ break;
+
+ default:
+ panic("Unsupported Micro-assembler relocation %d",
+ rel->type);
+ }
+}
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
new file mode 100644
index 000000000..81dd226d6
--- /dev/null
+++ b/arch/mips/mm/uasm.c
@@ -0,0 +1,643 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * A small micro-assembler. It is intentionally kept simple, does only
+ * support a subset of instructions, and does not try to hide pipeline
+ * effects like branch delay slots.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005, 2007 Maciej W. Rozycki
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
+ */
+
+enum fields {
+ RS = 0x001,
+ RT = 0x002,
+ RD = 0x004,
+ RE = 0x008,
+ SIMM = 0x010,
+ UIMM = 0x020,
+ BIMM = 0x040,
+ JIMM = 0x080,
+ FUNC = 0x100,
+ SET = 0x200,
+ SCIMM = 0x400,
+ SIMM9 = 0x800,
+};
+
+#define OP_MASK 0x3f
+#define OP_SH 26
+#define RD_MASK 0x1f
+#define RD_SH 11
+#define RE_MASK 0x1f
+#define RE_SH 6
+#define IMM_MASK 0xffff
+#define IMM_SH 0
+#define JIMM_MASK 0x3ffffff
+#define JIMM_SH 0
+#define FUNC_MASK 0x3f
+#define FUNC_SH 0
+#define SET_MASK 0x7
+#define SET_SH 0
+#define SIMM9_SH 7
+#define SIMM9_MASK 0x1ff
+
+enum opcode {
+ insn_addiu, insn_addu, insn_and, insn_andi, insn_bbit0, insn_bbit1,
+ insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bgtz, insn_blez,
+ insn_bltz, insn_bltzl, insn_bne, insn_break, insn_cache, insn_cfc1,
+ insn_cfcmsa, insn_ctc1, insn_ctcmsa, insn_daddiu, insn_daddu, insn_ddivu,
+ insn_ddivu_r6, insn_di, insn_dins, insn_dinsm, insn_dinsu, insn_divu,
+ insn_divu_r6, insn_dmfc0, insn_dmodu, insn_dmtc0, insn_dmultu,
+ insn_dmulu, insn_drotr, insn_drotr32, insn_dsbh, insn_dshd, insn_dsll,
+ insn_dsll32, insn_dsllv, insn_dsra, insn_dsra32, insn_dsrav, insn_dsrl,
+ insn_dsrl32, insn_dsrlv, insn_dsubu, insn_eret, insn_ext, insn_ins,
+ insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, insn_lbu, insn_ld,
+ insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu, insn_ll, insn_lld,
+ insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi,
+ insn_mflo, insn_modu, insn_movn, insn_movz, insn_mtc0, insn_mthc0,
+ insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_nor,
+ insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, insn_sc,
+ insn_scd, insn_seleqz, insn_selnez, insn_sd, insn_sh, insn_sll,
+ insn_sllv, insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra,
+ insn_srav, insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync,
+ insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait,
+ insn_wsbh, insn_xor, insn_xori, insn_yield,
+ insn_invalid /* insn_invalid must be last */
+};
+
+struct insn {
+ u32 match;
+ enum fields fields;
+};
+
+static inline u32 build_rs(u32 arg)
+{
+ WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg & RS_MASK) << RS_SH;
+}
+
+static inline u32 build_rt(u32 arg)
+{
+ WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg & RT_MASK) << RT_SH;
+}
+
+static inline u32 build_rd(u32 arg)
+{
+ WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg & RD_MASK) << RD_SH;
+}
+
+static inline u32 build_re(u32 arg)
+{
+ WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg & RE_MASK) << RE_SH;
+}
+
+static inline u32 build_simm(s32 arg)
+{
+ WARN(arg > 0x7fff || arg < -0x8000,
+ KERN_WARNING "Micro-assembler field overflow\n");
+
+ return arg & 0xffff;
+}
+
+static inline u32 build_uimm(u32 arg)
+{
+ WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n");
+
+ return arg & IMM_MASK;
+}
+
+static inline u32 build_scimm(u32 arg)
+{
+ WARN(arg & ~SCIMM_MASK,
+ KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg & SCIMM_MASK) << SCIMM_SH;
+}
+
+static inline u32 build_scimm9(s32 arg)
+{
+ WARN((arg > 0xff || arg < -0x100),
+ KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg & SIMM9_MASK) << SIMM9_SH;
+}
+
+static inline u32 build_func(u32 arg)
+{
+ WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
+
+ return arg & FUNC_MASK;
+}
+
+static inline u32 build_set(u32 arg)
+{
+ WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n");
+
+ return arg & SET_MASK;
+}
+
+static void build_insn(u32 **buf, enum opcode opc, ...);
+
+#define I_u1u2u3(op) \
+Ip_u1u2u3(op) \
+{ \
+ build_insn(buf, insn##op, a, b, c); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
+#define I_s3s1s2(op) \
+Ip_s3s1s2(op) \
+{ \
+ build_insn(buf, insn##op, b, c, a); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
+#define I_u2u1u3(op) \
+Ip_u2u1u3(op) \
+{ \
+ build_insn(buf, insn##op, b, a, c); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
+#define I_u3u2u1(op) \
+Ip_u3u2u1(op) \
+{ \
+ build_insn(buf, insn##op, c, b, a); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
+#define I_u3u1u2(op) \
+Ip_u3u1u2(op) \
+{ \
+ build_insn(buf, insn##op, b, c, a); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
+#define I_u1u2s3(op) \
+Ip_u1u2s3(op) \
+{ \
+ build_insn(buf, insn##op, a, b, c); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
+#define I_u2s3u1(op) \
+Ip_u2s3u1(op) \
+{ \
+ build_insn(buf, insn##op, c, a, b); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
+#define I_u2u1s3(op) \
+Ip_u2u1s3(op) \
+{ \
+ build_insn(buf, insn##op, b, a, c); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
+#define I_u2u1msbu3(op) \
+Ip_u2u1msbu3(op) \
+{ \
+ build_insn(buf, insn##op, b, a, c+d-1, c); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
+#define I_u2u1msb32u3(op) \
+Ip_u2u1msbu3(op) \
+{ \
+ build_insn(buf, insn##op, b, a, c+d-33, c); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
+#define I_u2u1msb32msb3(op) \
+Ip_u2u1msbu3(op) \
+{ \
+ build_insn(buf, insn##op, b, a, c+d-33, c-32); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
+#define I_u2u1msbdu3(op) \
+Ip_u2u1msbu3(op) \
+{ \
+ build_insn(buf, insn##op, b, a, d-1, c); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
+#define I_u1u2(op) \
+Ip_u1u2(op) \
+{ \
+ build_insn(buf, insn##op, a, b); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
+#define I_u2u1(op) \
+Ip_u1u2(op) \
+{ \
+ build_insn(buf, insn##op, b, a); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
+#define I_u1s2(op) \
+Ip_u1s2(op) \
+{ \
+ build_insn(buf, insn##op, a, b); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
+#define I_u1(op) \
+Ip_u1(op) \
+{ \
+ build_insn(buf, insn##op, a); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
+#define I_0(op) \
+Ip_0(op) \
+{ \
+ build_insn(buf, insn##op); \
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
+I_u2u1s3(_addiu)
+I_u3u1u2(_addu)
+I_u2u1u3(_andi)
+I_u3u1u2(_and)
+I_u1u2s3(_beq)
+I_u1u2s3(_beql)
+I_u1s2(_bgez)
+I_u1s2(_bgezl)
+I_u1s2(_bgtz)
+I_u1s2(_blez)
+I_u1s2(_bltz)
+I_u1s2(_bltzl)
+I_u1u2s3(_bne)
+I_u1(_break)
+I_u2s3u1(_cache)
+I_u1u2(_cfc1)
+I_u2u1(_cfcmsa)
+I_u1u2(_ctc1)
+I_u2u1(_ctcmsa)
+I_u1u2(_ddivu)
+I_u3u1u2(_ddivu_r6)
+I_u1u2u3(_dmfc0)
+I_u3u1u2(_dmodu)
+I_u1u2u3(_dmtc0)
+I_u1u2(_dmultu)
+I_u3u1u2(_dmulu)
+I_u2u1s3(_daddiu)
+I_u3u1u2(_daddu)
+I_u1(_di);
+I_u1u2(_divu)
+I_u3u1u2(_divu_r6)
+I_u2u1(_dsbh);
+I_u2u1(_dshd);
+I_u2u1u3(_dsll)
+I_u2u1u3(_dsll32)
+I_u3u2u1(_dsllv)
+I_u2u1u3(_dsra)
+I_u2u1u3(_dsra32)
+I_u3u2u1(_dsrav)
+I_u2u1u3(_dsrl)
+I_u2u1u3(_dsrl32)
+I_u3u2u1(_dsrlv)
+I_u2u1u3(_drotr)
+I_u2u1u3(_drotr32)
+I_u3u1u2(_dsubu)
+I_0(_eret)
+I_u2u1msbdu3(_ext)
+I_u2u1msbu3(_ins)
+I_u1(_j)
+I_u1(_jal)
+I_u2u1(_jalr)
+I_u1(_jr)
+I_u2s3u1(_lb)
+I_u2s3u1(_lbu)
+I_u2s3u1(_ld)
+I_u2s3u1(_lh)
+I_u2s3u1(_lhu)
+I_u2s3u1(_ll)
+I_u2s3u1(_lld)
+I_u1s2(_lui)
+I_u2s3u1(_lw)
+I_u2s3u1(_lwu)
+I_u1u2u3(_mfc0)
+I_u1u2u3(_mfhc0)
+I_u3u1u2(_modu)
+I_u3u1u2(_movn)
+I_u3u1u2(_movz)
+I_u1(_mfhi)
+I_u1(_mflo)
+I_u1u2u3(_mtc0)
+I_u1u2u3(_mthc0)
+I_u1(_mthi)
+I_u1(_mtlo)
+I_u3u1u2(_mul)
+I_u1u2(_multu)
+I_u3u1u2(_mulu)
+I_u3u1u2(_nor)
+I_u3u1u2(_or)
+I_u2u1u3(_ori)
+I_0(_rfe)
+I_u2s3u1(_sb)
+I_u2s3u1(_sc)
+I_u2s3u1(_scd)
+I_u2s3u1(_sd)
+I_u3u1u2(_seleqz)
+I_u3u1u2(_selnez)
+I_u2s3u1(_sh)
+I_u2u1u3(_sll)
+I_u3u2u1(_sllv)
+I_s3s1s2(_slt)
+I_u2u1s3(_slti)
+I_u2u1s3(_sltiu)
+I_u3u1u2(_sltu)
+I_u2u1u3(_sra)
+I_u3u2u1(_srav)
+I_u2u1u3(_srl)
+I_u3u2u1(_srlv)
+I_u2u1u3(_rotr)
+I_u3u1u2(_subu)
+I_u2s3u1(_sw)
+I_u1(_sync)
+I_0(_tlbp)
+I_0(_tlbr)
+I_0(_tlbwi)
+I_0(_tlbwr)
+I_u1(_wait);
+I_u2u1(_wsbh)
+I_u3u1u2(_xor)
+I_u2u1u3(_xori)
+I_u2u1(_yield)
+I_u2u1msbu3(_dins);
+I_u2u1msb32u3(_dinsm);
+I_u2u1msb32msb3(_dinsu);
+I_u1(_syscall);
+I_u1u2s3(_bbit0);
+I_u1u2s3(_bbit1);
+I_u3u1u2(_lwx)
+I_u3u1u2(_ldx)
+I_u1u2(_ldpte)
+I_u2u1u3(_lddir)
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+#include <asm/octeon/octeon.h>
+void uasm_i_pref(u32 **buf, unsigned int a, signed int b,
+ unsigned int c)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && a <= 24 && a != 5)
+ /*
+ * As per erratum Core-14449, replace prefetches 0-4,
+ * 6-24 with 'pref 28'.
+ */
+ build_insn(buf, insn_pref, c, 28, b);
+ else
+ build_insn(buf, insn_pref, c, a, b);
+}
+UASM_EXPORT_SYMBOL(uasm_i_pref);
+#else
+I_u2s3u1(_pref)
+#endif
+
+/* Handle labels. */
+void uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
+{
+ (*lab)->addr = addr;
+ (*lab)->lab = lid;
+ (*lab)++;
+}
+UASM_EXPORT_SYMBOL(uasm_build_label);
+
+int uasm_in_compat_space_p(long addr)
+{
+ /* Is this address in 32bit compat space? */
+ return addr == (int)addr;
+}
+UASM_EXPORT_SYMBOL(uasm_in_compat_space_p);
+
+static int uasm_rel_highest(long val)
+{
+#ifdef CONFIG_64BIT
+ return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
+#else
+ return 0;
+#endif
+}
+
+static int uasm_rel_higher(long val)
+{
+#ifdef CONFIG_64BIT
+ return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
+#else
+ return 0;
+#endif
+}
+
+int uasm_rel_hi(long val)
+{
+ return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
+}
+UASM_EXPORT_SYMBOL(uasm_rel_hi);
+
+int uasm_rel_lo(long val)
+{
+ return ((val & 0xffff) ^ 0x8000) - 0x8000;
+}
+UASM_EXPORT_SYMBOL(uasm_rel_lo);
+
+void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
+{
+ if (!uasm_in_compat_space_p(addr)) {
+ uasm_i_lui(buf, rs, uasm_rel_highest(addr));
+ if (uasm_rel_higher(addr))
+ uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr));
+ if (uasm_rel_hi(addr)) {
+ uasm_i_dsll(buf, rs, rs, 16);
+ uasm_i_daddiu(buf, rs, rs,
+ uasm_rel_hi(addr));
+ uasm_i_dsll(buf, rs, rs, 16);
+ } else
+ uasm_i_dsll32(buf, rs, rs, 0);
+ } else
+ uasm_i_lui(buf, rs, uasm_rel_hi(addr));
+}
+UASM_EXPORT_SYMBOL(UASM_i_LA_mostly);
+
+void UASM_i_LA(u32 **buf, unsigned int rs, long addr)
+{
+ UASM_i_LA_mostly(buf, rs, addr);
+ if (uasm_rel_lo(addr)) {
+ if (!uasm_in_compat_space_p(addr))
+ uasm_i_daddiu(buf, rs, rs,
+ uasm_rel_lo(addr));
+ else
+ uasm_i_addiu(buf, rs, rs,
+ uasm_rel_lo(addr));
+ }
+}
+UASM_EXPORT_SYMBOL(UASM_i_LA);
+
+/* Handle relocations. */
+void uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
+{
+ (*rel)->addr = addr;
+ (*rel)->type = R_MIPS_PC16;
+ (*rel)->lab = lid;
+ (*rel)++;
+}
+UASM_EXPORT_SYMBOL(uasm_r_mips_pc16);
+
+static inline void __resolve_relocs(struct uasm_reloc *rel,
+ struct uasm_label *lab);
+
+void uasm_resolve_relocs(struct uasm_reloc *rel,
+ struct uasm_label *lab)
+{
+ struct uasm_label *l;
+
+ for (; rel->lab != UASM_LABEL_INVALID; rel++)
+ for (l = lab; l->lab != UASM_LABEL_INVALID; l++)
+ if (rel->lab == l->lab)
+ __resolve_relocs(rel, l);
+}
+UASM_EXPORT_SYMBOL(uasm_resolve_relocs);
+
+void uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end,
+ long off)
+{
+ for (; rel->lab != UASM_LABEL_INVALID; rel++)
+ if (rel->addr >= first && rel->addr < end)
+ rel->addr += off;
+}
+UASM_EXPORT_SYMBOL(uasm_move_relocs);
+
+void uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end,
+ long off)
+{
+ for (; lab->lab != UASM_LABEL_INVALID; lab++)
+ if (lab->addr >= first && lab->addr < end)
+ lab->addr += off;
+}
+UASM_EXPORT_SYMBOL(uasm_move_labels);
+
+void uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab,
+ u32 *first, u32 *end, u32 *target)
+{
+ long off = (long)(target - first);
+
+ memcpy(target, first, (end - first) * sizeof(u32));
+
+ uasm_move_relocs(rel, first, end, off);
+ uasm_move_labels(lab, first, end, off);
+}
+UASM_EXPORT_SYMBOL(uasm_copy_handler);
+
+int uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
+{
+ for (; rel->lab != UASM_LABEL_INVALID; rel++) {
+ if (rel->addr == addr
+ && (rel->type == R_MIPS_PC16
+ || rel->type == R_MIPS_26))
+ return 1;
+ }
+
+ return 0;
+}
+UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay);
+
+/* Convenience functions for labeled branches. */
+void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bltz(p, reg, 0);
+}
+UASM_EXPORT_SYMBOL(uasm_il_bltz);
+
+void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_b(p, 0);
+}
+UASM_EXPORT_SYMBOL(uasm_il_b);
+
+void uasm_il_beq(u32 **p, struct uasm_reloc **r, unsigned int r1,
+ unsigned int r2, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_beq(p, r1, r2, 0);
+}
+UASM_EXPORT_SYMBOL(uasm_il_beq);
+
+void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_beqz(p, reg, 0);
+}
+UASM_EXPORT_SYMBOL(uasm_il_beqz);
+
+void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_beqzl(p, reg, 0);
+}
+UASM_EXPORT_SYMBOL(uasm_il_beqzl);
+
+void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+ unsigned int reg2, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bne(p, reg1, reg2, 0);
+}
+UASM_EXPORT_SYMBOL(uasm_il_bne);
+
+void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bnez(p, reg, 0);
+}
+UASM_EXPORT_SYMBOL(uasm_il_bnez);
+
+void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bgezl(p, reg, 0);
+}
+UASM_EXPORT_SYMBOL(uasm_il_bgezl);
+
+void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bgez(p, reg, 0);
+}
+UASM_EXPORT_SYMBOL(uasm_il_bgez);
+
+void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ unsigned int bit, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bbit0(p, reg, bit, 0);
+}
+UASM_EXPORT_SYMBOL(uasm_il_bbit0);
+
+void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ unsigned int bit, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bbit1(p, reg, bit, 0);
+}
+UASM_EXPORT_SYMBOL(uasm_il_bbit1);
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
new file mode 100644
index 000000000..94c11f5ea
--- /dev/null
+++ b/arch/mips/mti-malta/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Carsten Langgaard, carstenl@mips.com
+# Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+#
+# Copyright (C) 2008 Wind River Systems, Inc.
+# written by Ralf Baechle <ralf@linux-mips.org>
+#
+obj-y += malta-dt.o
+obj-y += malta-dtshim.o
+obj-y += malta-init.o
+obj-y += malta-int.o
+obj-y += malta-memory.o
+obj-y += malta-platform.o
+obj-y += malta-setup.o
+obj-y += malta-time.o
+
+obj-$(CONFIG_MIPS_CMP) += malta-amon.o
+
+CFLAGS_malta-dtshim.o = -I$(src)/../../../scripts/dtc/libfdt
diff --git a/arch/mips/mti-malta/Platform b/arch/mips/mti-malta/Platform
new file mode 100644
index 000000000..41e0d2a2d
--- /dev/null
+++ b/arch/mips/mti-malta/Platform
@@ -0,0 +1,10 @@
+#
+# MIPS Malta board
+#
+cflags-$(CONFIG_MIPS_MALTA) += -I$(srctree)/arch/mips/include/asm/mach-malta
+ifdef CONFIG_KVM_GUEST
+ load-$(CONFIG_MIPS_MALTA) += 0x0000000040100000
+else
+ load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000
+endif
+all-$(CONFIG_MIPS_MALTA) := $(COMPRESSION_FNAME).bin
diff --git a/arch/mips/mti-malta/malta-amon.c b/arch/mips/mti-malta/malta-amon.c
new file mode 100644
index 000000000..84ac523b0
--- /dev/null
+++ b/arch/mips/mti-malta/malta-amon.c
@@ -0,0 +1,88 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ *
+ * Arbitrary Monitor Interface
+ */
+#include <linux/kernel.h>
+#include <linux/smp.h>
+
+#include <asm/addrspace.h>
+#include <asm/mipsmtregs.h>
+#include <asm/mips-boards/launch.h>
+#include <asm/vpe.h>
+
+int amon_cpu_avail(int cpu)
+{
+ struct cpulaunch *launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH);
+
+ if (cpu < 0 || cpu >= NCPULAUNCH) {
+ pr_debug("avail: cpu%d is out of range\n", cpu);
+ return 0;
+ }
+
+ launch += cpu;
+ if (!(launch->flags & LAUNCH_FREADY)) {
+ pr_debug("avail: cpu%d is not ready\n", cpu);
+ return 0;
+ }
+ if (launch->flags & (LAUNCH_FGO|LAUNCH_FGONE)) {
+ pr_debug("avail: too late.. cpu%d is already gone\n", cpu);
+ return 0;
+ }
+
+ return 1;
+}
+
+int amon_cpu_start(int cpu,
+ unsigned long pc, unsigned long sp,
+ unsigned long gp, unsigned long a0)
+{
+ volatile struct cpulaunch *launch =
+ (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH);
+
+ if (!amon_cpu_avail(cpu))
+ return -1;
+ if (cpu == smp_processor_id()) {
+ pr_debug("launch: I am cpu%d!\n", cpu);
+ return -1;
+ }
+ launch += cpu;
+
+ pr_debug("launch: starting cpu%d\n", cpu);
+
+ launch->pc = pc;
+ launch->gp = gp;
+ launch->sp = sp;
+ launch->a0 = a0;
+
+ smp_wmb(); /* Target must see parameters before go */
+ launch->flags |= LAUNCH_FGO;
+ smp_wmb(); /* Target must see go before we poll */
+
+ while ((launch->flags & LAUNCH_FGONE) == 0)
+ ;
+ smp_rmb(); /* Target will be updating flags soon */
+ pr_debug("launch: cpu%d gone!\n", cpu);
+
+ return 0;
+}
+
+#ifdef CONFIG_MIPS_VPE_LOADER_CMP
+int vpe_run(struct vpe *v)
+{
+ struct vpe_notifications *n;
+
+ if (amon_cpu_start(aprp_cpu_index(), v->__start, 0, 0, 0) < 0)
+ return -1;
+
+ list_for_each_entry(n, &v->notify, list)
+ n->start(VPE_MODULE_MINOR);
+
+ return 0;
+}
+#endif
diff --git a/arch/mips/mti-malta/malta-dt.c b/arch/mips/mti-malta/malta-dt.c
new file mode 100644
index 000000000..d045c9149
--- /dev/null
+++ b/arch/mips/mti-malta/malta-dt.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+
+void __init device_tree_init(void)
+{
+ unflatten_and_copy_device_tree();
+}
diff --git a/arch/mips/mti-malta/malta-dtshim.c b/arch/mips/mti-malta/malta-dtshim.c
new file mode 100644
index 000000000..f451268f6
--- /dev/null
+++ b/arch/mips/mti-malta/malta-dtshim.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/libfdt.h>
+#include <linux/of_fdt.h>
+#include <linux/sizes.h>
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/fw/fw.h>
+#include <asm/mips-boards/generic.h>
+#include <asm/mips-boards/malta.h>
+#include <asm/mips-cps.h>
+#include <asm/page.h>
+
+#define ROCIT_REG_BASE 0x1f403000
+#define ROCIT_CONFIG_GEN1 (ROCIT_REG_BASE + 0x04)
+#define ROCIT_CONFIG_GEN1_MEMMAP_SHIFT 8
+#define ROCIT_CONFIG_GEN1_MEMMAP_MASK (0xf << 8)
+
+static unsigned char fdt_buf[16 << 10] __initdata __aligned(8);
+
+/* determined physical memory size, not overridden by command line args */
+extern unsigned long physical_memsize;
+
+enum mem_map {
+ MEM_MAP_V1 = 0,
+ MEM_MAP_V2,
+};
+
+#define MAX_MEM_ARRAY_ENTRIES 2
+
+static __init int malta_scon(void)
+{
+ int scon = MIPS_REVISION_SCONID;
+
+ if (scon != MIPS_REVISION_SCON_OTHER)
+ return scon;
+
+ switch (MIPS_REVISION_CORID) {
+ case MIPS_REVISION_CORID_QED_RM5261:
+ case MIPS_REVISION_CORID_CORE_LV:
+ case MIPS_REVISION_CORID_CORE_FPGA:
+ case MIPS_REVISION_CORID_CORE_FPGAR2:
+ return MIPS_REVISION_SCON_GT64120;
+
+ case MIPS_REVISION_CORID_CORE_EMUL_BON:
+ case MIPS_REVISION_CORID_BONITO64:
+ case MIPS_REVISION_CORID_CORE_20K:
+ return MIPS_REVISION_SCON_BONITO;
+
+ case MIPS_REVISION_CORID_CORE_MSC:
+ case MIPS_REVISION_CORID_CORE_FPGA2:
+ case MIPS_REVISION_CORID_CORE_24K:
+ return MIPS_REVISION_SCON_SOCIT;
+
+ case MIPS_REVISION_CORID_CORE_FPGA3:
+ case MIPS_REVISION_CORID_CORE_FPGA4:
+ case MIPS_REVISION_CORID_CORE_FPGA5:
+ case MIPS_REVISION_CORID_CORE_EMUL_MSC:
+ default:
+ return MIPS_REVISION_SCON_ROCIT;
+ }
+}
+
+static unsigned __init gen_fdt_mem_array(__be32 *mem_array, unsigned long size,
+ enum mem_map map)
+{
+ unsigned long size_preio;
+ unsigned entries;
+
+ entries = 1;
+ mem_array[0] = cpu_to_be32(PHYS_OFFSET);
+ if (IS_ENABLED(CONFIG_EVA)) {
+ /*
+ * The current Malta EVA configuration is "special" in that it
+ * always makes use of addresses in the upper half of the 32 bit
+ * physical address map, which gives it a contiguous region of
+ * DDR but limits it to 2GB.
+ */
+ mem_array[1] = cpu_to_be32(size);
+ goto done;
+ }
+
+ size_preio = min_t(unsigned long, size, SZ_256M);
+ mem_array[1] = cpu_to_be32(size_preio);
+ size -= size_preio;
+ if (!size)
+ goto done;
+
+ if (map == MEM_MAP_V2) {
+ /*
+ * We have a flat 32 bit physical memory map with DDR filling
+ * all 4GB of the memory map, apart from the I/O region which
+ * obscures 256MB from 0x10000000-0x1fffffff.
+ *
+ * Therefore we discard the 256MB behind the I/O region.
+ */
+ if (size <= SZ_256M)
+ goto done;
+ size -= SZ_256M;
+
+ /* Make use of the memory following the I/O region */
+ entries++;
+ mem_array[2] = cpu_to_be32(PHYS_OFFSET + SZ_512M);
+ mem_array[3] = cpu_to_be32(size);
+ } else {
+ /*
+ * We have a 32 bit physical memory map with a 2GB DDR region
+ * aliased in the upper & lower halves of it. The I/O region
+ * obscures 256MB from 0x10000000-0x1fffffff in the low alias
+ * but the DDR it obscures is accessible via the high alias.
+ *
+ * Simply access everything beyond the lowest 256MB of DDR using
+ * the high alias.
+ */
+ entries++;
+ mem_array[2] = cpu_to_be32(PHYS_OFFSET + SZ_2G + SZ_256M);
+ mem_array[3] = cpu_to_be32(size);
+ }
+
+done:
+ BUG_ON(entries > MAX_MEM_ARRAY_ENTRIES);
+ return entries;
+}
+
+static void __init append_memory(void *fdt, int root_off)
+{
+ __be32 mem_array[2 * MAX_MEM_ARRAY_ENTRIES];
+ unsigned long memsize;
+ unsigned mem_entries;
+ int i, err, mem_off;
+ enum mem_map mem_map;
+ u32 config;
+ char *var, param_name[10], *var_names[] = {
+ "ememsize", "memsize",
+ };
+
+ /* if a memory node already exists, leave it alone */
+ mem_off = fdt_path_offset(fdt, "/memory");
+ if (mem_off >= 0)
+ return;
+
+ /* find memory size from the bootloader environment */
+ for (i = 0; i < ARRAY_SIZE(var_names); i++) {
+ var = fw_getenv(var_names[i]);
+ if (!var)
+ continue;
+
+ err = kstrtoul(var, 0, &physical_memsize);
+ if (!err)
+ break;
+
+ pr_warn("Failed to read the '%s' env variable '%s'\n",
+ var_names[i], var);
+ }
+
+ if (!physical_memsize) {
+ pr_warn("The bootloader didn't provide memsize: defaulting to 32MB\n");
+ physical_memsize = 32 << 20;
+ }
+
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
+ /*
+ * SOC-it swaps, or perhaps doesn't swap, when DMA'ing
+ * the last word of physical memory.
+ */
+ physical_memsize -= PAGE_SIZE;
+ }
+
+ /* default to using all available RAM */
+ memsize = physical_memsize;
+
+ /* allow the user to override the usable memory */
+ for (i = 0; i < ARRAY_SIZE(var_names); i++) {
+ snprintf(param_name, sizeof(param_name), "%s=", var_names[i]);
+ var = strstr(arcs_cmdline, param_name);
+ if (!var)
+ continue;
+
+ memsize = memparse(var + strlen(param_name), NULL);
+ }
+
+ /* if the user says there's more RAM than we thought, believe them */
+ physical_memsize = max_t(unsigned long, physical_memsize, memsize);
+
+ /* detect the memory map in use */
+ if (malta_scon() == MIPS_REVISION_SCON_ROCIT) {
+ /* ROCit has a register indicating the memory map in use */
+ config = readl((void __iomem *)CKSEG1ADDR(ROCIT_CONFIG_GEN1));
+ mem_map = config & ROCIT_CONFIG_GEN1_MEMMAP_MASK;
+ mem_map >>= ROCIT_CONFIG_GEN1_MEMMAP_SHIFT;
+ } else {
+ /* if not using ROCit, presume the v1 memory map */
+ mem_map = MEM_MAP_V1;
+ }
+ if (mem_map > MEM_MAP_V2)
+ panic("Unsupported physical memory map v%u detected",
+ (unsigned int)mem_map);
+
+ /* append memory to the DT */
+ mem_off = fdt_add_subnode(fdt, root_off, "memory");
+ if (mem_off < 0)
+ panic("Unable to add memory node to DT: %d", mem_off);
+
+ err = fdt_setprop_string(fdt, mem_off, "device_type", "memory");
+ if (err)
+ panic("Unable to set memory node device_type: %d", err);
+
+ mem_entries = gen_fdt_mem_array(mem_array, physical_memsize, mem_map);
+ err = fdt_setprop(fdt, mem_off, "reg", mem_array,
+ mem_entries * 2 * sizeof(mem_array[0]));
+ if (err)
+ panic("Unable to set memory regs property: %d", err);
+
+ mem_entries = gen_fdt_mem_array(mem_array, memsize, mem_map);
+ err = fdt_setprop(fdt, mem_off, "linux,usable-memory", mem_array,
+ mem_entries * 2 * sizeof(mem_array[0]));
+ if (err)
+ panic("Unable to set linux,usable-memory property: %d", err);
+}
+
+static void __init remove_gic(void *fdt)
+{
+ int err, gic_off, i8259_off, cpu_off;
+ void __iomem *biu_base;
+ uint32_t cpu_phandle, sc_cfg;
+
+ /* if we have a CM which reports a GIC is present, leave the DT alone */
+ err = mips_cm_probe();
+ if (!err && (read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX))
+ return;
+
+ if (malta_scon() == MIPS_REVISION_SCON_ROCIT) {
+ /*
+ * On systems using the RocIT system controller a GIC may be
+ * present without a CM. Detect whether that is the case.
+ */
+ biu_base = ioremap(MSC01_BIU_REG_BASE,
+ MSC01_BIU_ADDRSPACE_SZ);
+ sc_cfg = __raw_readl(biu_base + MSC01_SC_CFG_OFS);
+ if (sc_cfg & MSC01_SC_CFG_GICPRES_MSK) {
+ /* enable the GIC at the system controller level */
+ sc_cfg |= BIT(MSC01_SC_CFG_GICENA_SHF);
+ __raw_writel(sc_cfg, biu_base + MSC01_SC_CFG_OFS);
+ return;
+ }
+ }
+
+ gic_off = fdt_node_offset_by_compatible(fdt, -1, "mti,gic");
+ if (gic_off < 0) {
+ pr_warn("malta-dtshim: unable to find DT GIC node: %d\n",
+ gic_off);
+ return;
+ }
+
+ err = fdt_nop_node(fdt, gic_off);
+ if (err)
+ pr_warn("malta-dtshim: unable to nop GIC node\n");
+
+ i8259_off = fdt_node_offset_by_compatible(fdt, -1, "intel,i8259");
+ if (i8259_off < 0) {
+ pr_warn("malta-dtshim: unable to find DT i8259 node: %d\n",
+ i8259_off);
+ return;
+ }
+
+ cpu_off = fdt_node_offset_by_compatible(fdt, -1,
+ "mti,cpu-interrupt-controller");
+ if (cpu_off < 0) {
+ pr_warn("malta-dtshim: unable to find CPU intc node: %d\n",
+ cpu_off);
+ return;
+ }
+
+ cpu_phandle = fdt_get_phandle(fdt, cpu_off);
+ if (!cpu_phandle) {
+ pr_warn("malta-dtshim: unable to get CPU intc phandle\n");
+ return;
+ }
+
+ err = fdt_setprop_u32(fdt, i8259_off, "interrupt-parent", cpu_phandle);
+ if (err) {
+ pr_warn("malta-dtshim: unable to set i8259 interrupt-parent: %d\n",
+ err);
+ return;
+ }
+
+ err = fdt_setprop_u32(fdt, i8259_off, "interrupts", 2);
+ if (err) {
+ pr_warn("malta-dtshim: unable to set i8259 interrupts: %d\n",
+ err);
+ return;
+ }
+}
+
+void __init *malta_dt_shim(void *fdt)
+{
+ int root_off, len, err;
+ const char *compat;
+
+ if (fdt_check_header(fdt))
+ panic("Corrupt DT");
+
+ err = fdt_open_into(fdt, fdt_buf, sizeof(fdt_buf));
+ if (err)
+ panic("Unable to open FDT: %d", err);
+
+ root_off = fdt_path_offset(fdt_buf, "/");
+ if (root_off < 0)
+ panic("No / node in DT");
+
+ compat = fdt_getprop(fdt_buf, root_off, "compatible", &len);
+ if (!compat)
+ panic("No root compatible property in DT: %d", len);
+
+ /* if this isn't Malta, leave the DT alone */
+ if (strncmp(compat, "mti,malta", len))
+ return fdt;
+
+ append_memory(fdt_buf, root_off);
+ remove_gic(fdt_buf);
+
+ err = fdt_pack(fdt_buf);
+ if (err)
+ panic("Unable to pack FDT: %d\n", err);
+
+ return fdt_buf;
+}
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
new file mode 100644
index 000000000..893af377a
--- /dev/null
+++ b/arch/mips/mti-malta/malta-init.c
@@ -0,0 +1,298 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * PROM library initialisation code.
+ *
+ * Copyright (C) 1999,2000,2004,2005,2012 MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ * Maciej W. Rozycki <macro@mips.com>
+ * Steven J. Hill <sjhill@mips.com>
+ */
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/pci_regs.h>
+#include <linux/serial_core.h>
+
+#include <asm/cacheflush.h>
+#include <asm/smp-ops.h>
+#include <asm/traps.h>
+#include <asm/fw/fw.h>
+#include <asm/mips-cps.h>
+#include <asm/mips-boards/generic.h>
+#include <asm/mips-boards/malta.h>
+
+static int mips_revision_corid;
+int mips_revision_sconid;
+
+/* Bonito64 system controller register base. */
+unsigned long _pcictrl_bonito;
+unsigned long _pcictrl_bonito_pcicfg;
+
+/* GT64120 system controller register base */
+unsigned long _pcictrl_gt64120;
+
+/* MIPS System controller register base */
+unsigned long _pcictrl_msc;
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+static void __init console_config(void)
+{
+ char console_string[40];
+ int baud = 0;
+ char parity = '\0', bits = '\0', flow = '\0';
+ char *s;
+
+ s = fw_getenv("modetty0");
+ if (s) {
+ while (*s >= '0' && *s <= '9')
+ baud = baud*10 + *s++ - '0';
+ if (*s == ',')
+ s++;
+ if (*s)
+ parity = *s++;
+ if (*s == ',')
+ s++;
+ if (*s)
+ bits = *s++;
+ if (*s == ',')
+ s++;
+ if (*s == 'h')
+ flow = 'r';
+ }
+ if (baud == 0)
+ baud = 38400;
+ if (parity != 'n' && parity != 'o' && parity != 'e')
+ parity = 'n';
+ if (bits != '7' && bits != '8')
+ bits = '8';
+ if (flow == '\0')
+ flow = 'r';
+
+ if ((strstr(fw_getcmdline(), "earlycon=")) == NULL) {
+ sprintf(console_string, "uart8250,io,0x3f8,%d%c%c", baud,
+ parity, bits);
+ setup_earlycon(console_string);
+ }
+
+ if ((strstr(fw_getcmdline(), "console=")) == NULL) {
+ sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
+ parity, bits, flow);
+ strcat(fw_getcmdline(), console_string);
+ pr_info("Config serial console:%s\n", console_string);
+ }
+}
+#endif
+
+static void __init mips_nmi_setup(void)
+{
+ void *base;
+ extern char except_vec_nmi[];
+
+ base = cpu_has_veic ?
+ (void *)(CAC_BASE + 0xa80) :
+ (void *)(CAC_BASE + 0x380);
+ memcpy(base, except_vec_nmi, 0x80);
+ flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
+}
+
+static void __init mips_ejtag_setup(void)
+{
+ void *base;
+ extern char except_vec_ejtag_debug[];
+
+ base = cpu_has_veic ?
+ (void *)(CAC_BASE + 0xa00) :
+ (void *)(CAC_BASE + 0x300);
+ memcpy(base, except_vec_ejtag_debug, 0x80);
+ flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
+}
+
+phys_addr_t mips_cpc_default_phys_base(void)
+{
+ return CPC_BASE_ADDR;
+}
+
+void __init prom_init(void)
+{
+ /*
+ * early setup of _pcictrl_bonito so that we can determine
+ * the system controller on a CORE_EMUL board
+ */
+ _pcictrl_bonito = (unsigned long)ioremap(BONITO_REG_BASE, BONITO_REG_SIZE);
+
+ mips_revision_corid = MIPS_REVISION_CORID;
+
+ if (mips_revision_corid == MIPS_REVISION_CORID_CORE_EMUL) {
+ if (BONITO_PCIDID == 0x0001df53 ||
+ BONITO_PCIDID == 0x0003df53)
+ mips_revision_corid = MIPS_REVISION_CORID_CORE_EMUL_BON;
+ else
+ mips_revision_corid = MIPS_REVISION_CORID_CORE_EMUL_MSC;
+ }
+
+ mips_revision_sconid = MIPS_REVISION_SCONID;
+ if (mips_revision_sconid == MIPS_REVISION_SCON_OTHER) {
+ switch (mips_revision_corid) {
+ case MIPS_REVISION_CORID_QED_RM5261:
+ case MIPS_REVISION_CORID_CORE_LV:
+ case MIPS_REVISION_CORID_CORE_FPGA:
+ case MIPS_REVISION_CORID_CORE_FPGAR2:
+ mips_revision_sconid = MIPS_REVISION_SCON_GT64120;
+ break;
+ case MIPS_REVISION_CORID_CORE_EMUL_BON:
+ case MIPS_REVISION_CORID_BONITO64:
+ case MIPS_REVISION_CORID_CORE_20K:
+ mips_revision_sconid = MIPS_REVISION_SCON_BONITO;
+ break;
+ case MIPS_REVISION_CORID_CORE_MSC:
+ case MIPS_REVISION_CORID_CORE_FPGA2:
+ case MIPS_REVISION_CORID_CORE_24K:
+ /*
+ * SOCit/ROCit support is essentially identical
+ * but make an attempt to distinguish them
+ */
+ mips_revision_sconid = MIPS_REVISION_SCON_SOCIT;
+ break;
+ case MIPS_REVISION_CORID_CORE_FPGA3:
+ case MIPS_REVISION_CORID_CORE_FPGA4:
+ case MIPS_REVISION_CORID_CORE_FPGA5:
+ case MIPS_REVISION_CORID_CORE_EMUL_MSC:
+ default:
+ /* See above */
+ mips_revision_sconid = MIPS_REVISION_SCON_ROCIT;
+ break;
+ }
+ }
+
+ switch (mips_revision_sconid) {
+ u32 start, map, mask, data;
+
+ case MIPS_REVISION_SCON_GT64120:
+ /*
+ * Setup the North bridge to do Master byte-lane swapping
+ * when running in bigendian.
+ */
+ _pcictrl_gt64120 = (unsigned long)ioremap(MIPS_GT_BASE, 0x2000);
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ GT_WRITE(GT_PCI0_CMD_OFS, GT_PCI0_CMD_MBYTESWAP_BIT |
+ GT_PCI0_CMD_SBYTESWAP_BIT);
+#else
+ GT_WRITE(GT_PCI0_CMD_OFS, 0);
+#endif
+ /* Fix up PCI I/O mapping if necessary (for Atlas). */
+ start = GT_READ(GT_PCI0IOLD_OFS);
+ map = GT_READ(GT_PCI0IOREMAP_OFS);
+ if ((start & map) != 0) {
+ map &= ~start;
+ GT_WRITE(GT_PCI0IOREMAP_OFS, map);
+ }
+
+ set_io_port_base(MALTA_GT_PORT_BASE);
+ break;
+
+ case MIPS_REVISION_SCON_BONITO:
+ _pcictrl_bonito_pcicfg = (unsigned long)ioremap(BONITO_PCICFG_BASE, BONITO_PCICFG_SIZE);
+
+ /*
+ * Disable Bonito IOBC.
+ */
+ BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
+ ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
+ BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
+
+ /*
+ * Setup the North bridge to do Master byte-lane swapping
+ * when running in bigendian.
+ */
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ BONITO_BONGENCFG = BONITO_BONGENCFG &
+ ~(BONITO_BONGENCFG_MSTRBYTESWAP |
+ BONITO_BONGENCFG_BYTESWAP);
+#else
+ BONITO_BONGENCFG = BONITO_BONGENCFG |
+ BONITO_BONGENCFG_MSTRBYTESWAP |
+ BONITO_BONGENCFG_BYTESWAP;
+#endif
+
+ set_io_port_base(MALTA_BONITO_PORT_BASE);
+ break;
+
+ case MIPS_REVISION_SCON_SOCIT:
+ case MIPS_REVISION_SCON_ROCIT:
+ _pcictrl_msc = (unsigned long)ioremap(MIPS_MSC01_PCI_REG_BASE, 0x2000);
+mips_pci_controller:
+ mb();
+ MSC_READ(MSC01_PCI_CFG, data);
+ MSC_WRITE(MSC01_PCI_CFG, data & ~MSC01_PCI_CFG_EN_BIT);
+ wmb();
+
+ /* Fix up lane swapping. */
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ MSC_WRITE(MSC01_PCI_SWAP, MSC01_PCI_SWAP_NOSWAP);
+#else
+ MSC_WRITE(MSC01_PCI_SWAP,
+ MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_IO_SHF |
+ MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_MEM_SHF |
+ MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_BAR0_SHF);
+#endif
+
+ /*
+ * Setup the Malta max (2GB) memory for PCI DMA in host bridge
+ * in transparent addressing mode.
+ */
+ mask = PHYS_OFFSET | PCI_BASE_ADDRESS_MEM_PREFETCH;
+ MSC_WRITE(MSC01_PCI_BAR0, mask);
+ MSC_WRITE(MSC01_PCI_HEAD4, mask);
+
+ mask &= MSC01_PCI_BAR0_SIZE_MSK;
+ MSC_WRITE(MSC01_PCI_P2SCMSKL, mask);
+ MSC_WRITE(MSC01_PCI_P2SCMAPL, mask);
+
+ /* Don't handle target retries indefinitely. */
+ if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) ==
+ MSC01_PCI_CFG_MAXRTRY_MSK)
+ data = (data & ~(MSC01_PCI_CFG_MAXRTRY_MSK <<
+ MSC01_PCI_CFG_MAXRTRY_SHF)) |
+ ((MSC01_PCI_CFG_MAXRTRY_MSK - 1) <<
+ MSC01_PCI_CFG_MAXRTRY_SHF);
+
+ wmb();
+ MSC_WRITE(MSC01_PCI_CFG, data);
+ mb();
+
+ set_io_port_base(MALTA_MSC_PORT_BASE);
+ break;
+
+ case MIPS_REVISION_SCON_SOCITSC:
+ case MIPS_REVISION_SCON_SOCITSCP:
+ _pcictrl_msc = (unsigned long)ioremap(MIPS_SOCITSC_PCI_REG_BASE, 0x2000);
+ goto mips_pci_controller;
+
+ default:
+ /* Unknown system controller */
+ while (1); /* We die here... */
+ }
+ board_nmi_handler_setup = mips_nmi_setup;
+ board_ejtag_handler_setup = mips_ejtag_setup;
+
+ fw_init_cmdline();
+ fw_meminit();
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ console_config();
+#endif
+ /* Early detection of CMP support */
+ mips_cpc_probe();
+
+ if (!register_cps_smp_ops())
+ return;
+ if (!register_cmp_smp_ops())
+ return;
+ if (!register_vsmp_smp_ops())
+ return;
+ register_up_smp_ops();
+}
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
new file mode 100644
index 000000000..03d85b2b3
--- /dev/null
+++ b/arch/mips/mti-malta/malta-int.c
@@ -0,0 +1,223 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000, 2001, 2004 MIPS Technologies, Inc.
+ * Copyright (C) 2001 Ralf Baechle
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ *
+ * Routines for generic manipulation of the interrupts found on the MIPS
+ * Malta board. The interrupt controller is located in the South Bridge
+ * a PIIX4 device with two internal 82C95 interrupt controllers.
+ */
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/kernel_stat.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+
+#include <asm/traps.h>
+#include <asm/i8259.h>
+#include <asm/irq_cpu.h>
+#include <asm/irq_regs.h>
+#include <asm/mips-boards/malta.h>
+#include <asm/mips-boards/maltaint.h>
+#include <asm/mips-cps.h>
+#include <asm/gt64120.h>
+#include <asm/mips-boards/generic.h>
+#include <asm/mips-boards/msc01_pci.h>
+#include <asm/msc01_ic.h>
+#include <asm/setup.h>
+#include <asm/rtlx.h>
+
+static inline int mips_pcibios_iack(void)
+{
+ int irq;
+
+ /*
+ * Determine highest priority pending interrupt by performing
+ * a PCI Interrupt Acknowledge cycle.
+ */
+ switch (mips_revision_sconid) {
+ case MIPS_REVISION_SCON_SOCIT:
+ case MIPS_REVISION_SCON_ROCIT:
+ case MIPS_REVISION_SCON_SOCITSC:
+ case MIPS_REVISION_SCON_SOCITSCP:
+ MSC_READ(MSC01_PCI_IACK, irq);
+ irq &= 0xff;
+ break;
+ case MIPS_REVISION_SCON_GT64120:
+ irq = GT_READ(GT_PCI0_IACK_OFS);
+ irq &= 0xff;
+ break;
+ case MIPS_REVISION_SCON_BONITO:
+ /* The following will generate a PCI IACK cycle on the
+ * Bonito controller. It's a little bit kludgy, but it
+ * was the easiest way to implement it in hardware at
+ * the given time.
+ */
+ BONITO_PCIMAP_CFG = 0x20000;
+
+ /* Flush Bonito register block */
+ (void) BONITO_PCIMAP_CFG;
+ iob(); /* sync */
+
+ irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg);
+ iob(); /* sync */
+ irq &= 0xff;
+ BONITO_PCIMAP_CFG = 0;
+ break;
+ default:
+ pr_emerg("Unknown system controller.\n");
+ return -1;
+ }
+ return irq;
+}
+
+static void corehi_irqdispatch(void)
+{
+ unsigned int intedge, intsteer, pcicmd, pcibadaddr;
+ unsigned int pcimstat, intisr, inten, intpol;
+ unsigned int intrcause, datalo, datahi;
+ struct pt_regs *regs = get_irq_regs();
+
+ pr_emerg("CoreHI interrupt, shouldn't happen, we die here!\n");
+ pr_emerg("epc : %08lx\nStatus: %08lx\n"
+ "Cause : %08lx\nbadVaddr : %08lx\n",
+ regs->cp0_epc, regs->cp0_status,
+ regs->cp0_cause, regs->cp0_badvaddr);
+
+ /* Read all the registers and then print them as there is a
+ problem with interspersed printk's upsetting the Bonito controller.
+ Do it for the others too.
+ */
+
+ switch (mips_revision_sconid) {
+ case MIPS_REVISION_SCON_SOCIT:
+ case MIPS_REVISION_SCON_ROCIT:
+ case MIPS_REVISION_SCON_SOCITSC:
+ case MIPS_REVISION_SCON_SOCITSCP:
+ ll_msc_irq();
+ break;
+ case MIPS_REVISION_SCON_GT64120:
+ intrcause = GT_READ(GT_INTRCAUSE_OFS);
+ datalo = GT_READ(GT_CPUERR_ADDRLO_OFS);
+ datahi = GT_READ(GT_CPUERR_ADDRHI_OFS);
+ pr_emerg("GT_INTRCAUSE = %08x\n", intrcause);
+ pr_emerg("GT_CPUERR_ADDR = %02x%08x\n",
+ datahi, datalo);
+ break;
+ case MIPS_REVISION_SCON_BONITO:
+ pcibadaddr = BONITO_PCIBADADDR;
+ pcimstat = BONITO_PCIMSTAT;
+ intisr = BONITO_INTISR;
+ inten = BONITO_INTEN;
+ intpol = BONITO_INTPOL;
+ intedge = BONITO_INTEDGE;
+ intsteer = BONITO_INTSTEER;
+ pcicmd = BONITO_PCICMD;
+ pr_emerg("BONITO_INTISR = %08x\n", intisr);
+ pr_emerg("BONITO_INTEN = %08x\n", inten);
+ pr_emerg("BONITO_INTPOL = %08x\n", intpol);
+ pr_emerg("BONITO_INTEDGE = %08x\n", intedge);
+ pr_emerg("BONITO_INTSTEER = %08x\n", intsteer);
+ pr_emerg("BONITO_PCICMD = %08x\n", pcicmd);
+ pr_emerg("BONITO_PCIBADADDR = %08x\n", pcibadaddr);
+ pr_emerg("BONITO_PCIMSTAT = %08x\n", pcimstat);
+ break;
+ }
+
+ die("CoreHi interrupt", regs);
+}
+
+static irqreturn_t corehi_handler(int irq, void *dev_id)
+{
+ corehi_irqdispatch();
+ return IRQ_HANDLED;
+}
+
+static msc_irqmap_t msc_irqmap[] __initdata = {
+ {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0},
+ {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0},
+};
+static int msc_nr_irqs __initdata = ARRAY_SIZE(msc_irqmap);
+
+static msc_irqmap_t msc_eicirqmap[] __initdata = {
+ {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0},
+ {MSC01E_INT_SW1, MSC01_IRQ_LEVEL, 0},
+ {MSC01E_INT_I8259A, MSC01_IRQ_LEVEL, 0},
+ {MSC01E_INT_SMI, MSC01_IRQ_LEVEL, 0},
+ {MSC01E_INT_COREHI, MSC01_IRQ_LEVEL, 0},
+ {MSC01E_INT_CORELO, MSC01_IRQ_LEVEL, 0},
+ {MSC01E_INT_TMR, MSC01_IRQ_EDGE, 0},
+ {MSC01E_INT_PCI, MSC01_IRQ_LEVEL, 0},
+ {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0},
+ {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0}
+};
+
+static int msc_nr_eicirqs __initdata = ARRAY_SIZE(msc_eicirqmap);
+
+void __init arch_init_irq(void)
+{
+ int corehi_irq;
+
+ /*
+ * Preallocate the i8259's expected virq's here. Since irqchip_init()
+ * will probe the irqchips in hierarchial order, i8259 is probed last.
+ * If anything allocates a virq before the i8259 is probed, it will
+ * be given one of the i8259's expected range and consequently setup
+ * of the i8259 will fail.
+ */
+ WARN(irq_alloc_descs(I8259A_IRQ_BASE, I8259A_IRQ_BASE,
+ 16, numa_node_id()) < 0,
+ "Cannot reserve i8259 virqs at IRQ%d\n", I8259A_IRQ_BASE);
+
+ i8259_set_poll(mips_pcibios_iack);
+ irqchip_init();
+
+ switch (mips_revision_sconid) {
+ case MIPS_REVISION_SCON_SOCIT:
+ case MIPS_REVISION_SCON_ROCIT:
+ if (cpu_has_veic)
+ init_msc_irqs(MIPS_MSC01_IC_REG_BASE,
+ MSC01E_INT_BASE, msc_eicirqmap,
+ msc_nr_eicirqs);
+ else
+ init_msc_irqs(MIPS_MSC01_IC_REG_BASE,
+ MSC01C_INT_BASE, msc_irqmap,
+ msc_nr_irqs);
+ break;
+
+ case MIPS_REVISION_SCON_SOCITSC:
+ case MIPS_REVISION_SCON_SOCITSCP:
+ if (cpu_has_veic)
+ init_msc_irqs(MIPS_SOCITSC_IC_REG_BASE,
+ MSC01E_INT_BASE, msc_eicirqmap,
+ msc_nr_eicirqs);
+ else
+ init_msc_irqs(MIPS_SOCITSC_IC_REG_BASE,
+ MSC01C_INT_BASE, msc_irqmap,
+ msc_nr_irqs);
+ }
+
+ if (mips_gic_present()) {
+ corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
+ } else if (cpu_has_veic) {
+ set_vi_handler(MSC01E_INT_COREHI, corehi_irqdispatch);
+ corehi_irq = MSC01E_INT_BASE + MSC01E_INT_COREHI;
+ } else {
+ corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
+ }
+
+ if (request_irq(corehi_irq, corehi_handler, IRQF_NO_THREAD, "CoreHi",
+ NULL))
+ pr_err("Failed to request irq %d (CoreHi)\n", corehi_irq);
+}
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
new file mode 100644
index 000000000..7c25a0a23
--- /dev/null
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -0,0 +1,48 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * PROM library functions for acquiring/using memory descriptors given to
+ * us from the YAMON.
+ *
+ * Copyright (C) 1999,2000,2012 MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ * Steven J. Hill <sjhill@mips.com>
+ */
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/string.h>
+
+#include <asm/bootinfo.h>
+#include <asm/cdmm.h>
+#include <asm/maar.h>
+#include <asm/sections.h>
+#include <asm/fw/fw.h>
+
+/* determined physical memory size, not overridden by command line args */
+unsigned long physical_memsize = 0L;
+
+static void free_init_pages_eva_malta(void *begin, void *end)
+{
+ free_init_pages("unused kernel", __pa_symbol((unsigned long *)begin),
+ __pa_symbol((unsigned long *)end));
+}
+
+void __init fw_meminit(void)
+{
+ bool eva = IS_ENABLED(CONFIG_EVA);
+
+ free_init_pages_eva = eva ? free_init_pages_eva_malta : NULL;
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
+
+phys_addr_t mips_cdmm_phys_base(void)
+{
+ /* This address is "typically unused" */
+ return 0x1fc10000;
+}
diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c
new file mode 100644
index 000000000..62ffac500
--- /dev/null
+++ b/arch/mips/mti-malta/malta-platform.c
@@ -0,0 +1,76 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006, 07 MIPS Technologies, Inc.
+ * written by Ralf Baechle (ralf@linux-mips.org)
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ *
+ * Copyright (C) 2008 Wind River Systems, Inc.
+ * updated by Tiejun Chen <tiejun.chen@windriver.com>
+ *
+ * 1. Probe driver for the Malta's UART ports:
+ *
+ * o 2 ports in the SMC SuperIO
+ * o 1 port in the CBUS UART, a discrete 16550 which normally is only used
+ * for bringups.
+ *
+ * We don't use 8250_platform.c on Malta as it would result in the CBUS
+ * UART becoming ttyS0.
+ *
+ * 2. Register RTC-CMOS platform device on Malta.
+ */
+#include <linux/init.h>
+#include <linux/serial_8250.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <asm/mips-boards/maltaint.h>
+
+#define SMC_PORT(base, int) \
+{ \
+ .iobase = base, \
+ .irq = int, \
+ .uartclk = 1843200, \
+ .iotype = UPIO_PORT, \
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
+ .regshift = 0, \
+}
+
+#define CBUS_UART_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP)
+
+static struct plat_serial8250_port uart8250_data[] = {
+ SMC_PORT(0x3F8, 4),
+ SMC_PORT(0x2F8, 3),
+#ifndef CONFIG_MIPS_CMP
+ {
+ .mapbase = 0x1f000900, /* The CBUS UART */
+ .irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
+ .uartclk = 3686400, /* Twice the usual clk! */
+ .iotype = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) ?
+ UPIO_MEM32BE : UPIO_MEM32,
+ .flags = CBUS_UART_FLAGS,
+ .regshift = 3,
+ },
+#endif
+ { },
+};
+
+static struct platform_device malta_uart8250_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = uart8250_data,
+ },
+};
+
+static struct platform_device *malta_devices[] __initdata = {
+ &malta_uart8250_device,
+};
+
+static int __init malta_add_devices(void)
+{
+ return platform_add_devices(malta_devices, ARRAY_SIZE(malta_devices));
+}
+
+device_initcall(malta_add_devices);
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
new file mode 100644
index 000000000..e1fb8b534
--- /dev/null
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2008 Dmitri Vorobiev
+ */
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/of_fdt.h>
+#include <linux/pci.h>
+#include <linux/screen_info.h>
+#include <linux/time.h>
+
+#include <asm/dma-coherence.h>
+#include <asm/fw/fw.h>
+#include <asm/mips-cps.h>
+#include <asm/mips-boards/generic.h>
+#include <asm/mips-boards/malta.h>
+#include <asm/mips-boards/maltaint.h>
+#include <asm/dma.h>
+#include <asm/prom.h>
+#include <asm/traps.h>
+#ifdef CONFIG_VT
+#include <linux/console.h>
+#endif
+
+#define ROCIT_CONFIG_GEN0 0x1f403000
+#define ROCIT_CONFIG_GEN0_PCI_IOCU BIT(7)
+
+static struct resource standard_io_resources[] = {
+ {
+ .name = "dma1",
+ .start = 0x00,
+ .end = 0x1f,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+ },
+ {
+ .name = "timer",
+ .start = 0x40,
+ .end = 0x5f,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+ },
+ {
+ .name = "keyboard",
+ .start = 0x60,
+ .end = 0x6f,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+ },
+ {
+ .name = "dma page reg",
+ .start = 0x80,
+ .end = 0x8f,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+ },
+ {
+ .name = "dma2",
+ .start = 0xc0,
+ .end = 0xdf,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+ },
+};
+
+const char *get_system_type(void)
+{
+ return "MIPS Malta";
+}
+
+#ifdef CONFIG_BLK_DEV_FD
+static void __init fd_activate(void)
+{
+ /*
+ * Activate Floppy Controller in the SMSC FDC37M817 Super I/O
+ * Controller.
+ * Done by YAMON 2.00 onwards
+ */
+ /* Entering config state. */
+ SMSC_WRITE(SMSC_CONFIG_ENTER, SMSC_CONFIG_REG);
+
+ /* Activate floppy controller. */
+ SMSC_WRITE(SMSC_CONFIG_DEVNUM, SMSC_CONFIG_REG);
+ SMSC_WRITE(SMSC_CONFIG_DEVNUM_FLOPPY, SMSC_DATA_REG);
+ SMSC_WRITE(SMSC_CONFIG_ACTIVATE, SMSC_CONFIG_REG);
+ SMSC_WRITE(SMSC_CONFIG_ACTIVATE_ENABLE, SMSC_DATA_REG);
+
+ /* Exit config state. */
+ SMSC_WRITE(SMSC_CONFIG_EXIT, SMSC_CONFIG_REG);
+}
+#endif
+
+static int __init plat_enable_iocoherency(void)
+{
+ int supported = 0;
+ u32 cfg;
+
+ if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
+ if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
+ BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
+ pr_info("Enabled Bonito CPU coherency\n");
+ supported = 1;
+ }
+ if (strstr(fw_getcmdline(), "iobcuncached")) {
+ BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
+ BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
+ ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
+ BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
+ pr_info("Disabled Bonito IOBC coherency\n");
+ } else {
+ BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
+ BONITO_PCIMEMBASECFG |=
+ (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
+ BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
+ pr_info("Enabled Bonito IOBC coherency\n");
+ }
+ } else if (mips_cps_numiocu(0) != 0) {
+ /* Nothing special needs to be done to enable coherency */
+ pr_info("CMP IOCU detected\n");
+ cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0));
+ if (!(cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)) {
+ pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
+ return 0;
+ }
+ supported = 1;
+ }
+ hw_coherentio = supported;
+ return supported;
+}
+
+static void __init plat_setup_iocoherency(void)
+{
+ if (plat_enable_iocoherency()) {
+ if (coherentio == IO_COHERENCE_DISABLED)
+ pr_info("Hardware DMA cache coherency disabled\n");
+ else
+ pr_info("Hardware DMA cache coherency enabled\n");
+ } else {
+ if (coherentio == IO_COHERENCE_ENABLED)
+ pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n");
+ else
+ pr_info("Software DMA cache coherency enabled\n");
+ }
+}
+
+static void __init pci_clock_check(void)
+{
+ unsigned int __iomem *jmpr_p =
+ (unsigned int *) ioremap(MALTA_JMPRS_REG, sizeof(unsigned int));
+ int jmpr = (__raw_readl(jmpr_p) >> 2) & 0x07;
+ static const int pciclocks[] __initconst = {
+ 33, 20, 25, 30, 12, 16, 37, 10
+ };
+ int pciclock = pciclocks[jmpr];
+ char *optptr, *argptr = fw_getcmdline();
+
+ /*
+ * If user passed a pci_clock= option, don't tack on another one
+ */
+ optptr = strstr(argptr, "pci_clock=");
+ if (optptr && (optptr == argptr || optptr[-1] == ' '))
+ return;
+
+ if (pciclock != 33) {
+ pr_warn("WARNING: PCI clock is %dMHz, setting pci_clock\n",
+ pciclock);
+ argptr += strlen(argptr);
+ sprintf(argptr, " pci_clock=%d", pciclock);
+ if (pciclock < 20 || pciclock > 66)
+ pr_warn("WARNING: IDE timing calculations will be "
+ "incorrect\n");
+ }
+}
+
+#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
+static void __init screen_info_setup(void)
+{
+ screen_info = (struct screen_info) {
+ .orig_x = 0,
+ .orig_y = 25,
+ .ext_mem_k = 0,
+ .orig_video_page = 0,
+ .orig_video_mode = 0,
+ .orig_video_cols = 80,
+ .unused2 = 0,
+ .orig_video_ega_bx = 0,
+ .unused3 = 0,
+ .orig_video_lines = 25,
+ .orig_video_isVGA = VIDEO_TYPE_VGAC,
+ .orig_video_points = 16
+ };
+}
+#endif
+
+static void __init bonito_quirks_setup(void)
+{
+ char *argptr;
+
+ argptr = fw_getcmdline();
+ if (strstr(argptr, "debug")) {
+ BONITO_BONGENCFG |= BONITO_BONGENCFG_DEBUGMODE;
+ pr_info("Enabled Bonito debug mode\n");
+ } else
+ BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE;
+}
+
+void __init *plat_get_fdt(void)
+{
+ return (void *)__dtb_start;
+}
+
+void __init plat_mem_setup(void)
+{
+ unsigned int i;
+ void *fdt = plat_get_fdt();
+
+ fdt = malta_dt_shim(fdt);
+ __dt_setup_arch(fdt);
+
+ if (IS_ENABLED(CONFIG_EVA))
+ /* EVA has already been configured in mach-malta/kernel-init.h */
+ pr_info("Enhanced Virtual Addressing (EVA) activated\n");
+
+ mips_pcibios_init();
+
+ /* Request I/O space for devices used on the Malta board. */
+ for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
+ request_resource(&ioport_resource, standard_io_resources+i);
+
+ /*
+ * Enable DMA channel 4 (cascade channel) in the PIIX4 south bridge.
+ */
+ enable_dma(4);
+
+ if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO)
+ bonito_quirks_setup();
+
+ plat_setup_iocoherency();
+
+ pci_clock_check();
+
+#ifdef CONFIG_BLK_DEV_FD
+ fd_activate();
+#endif
+
+#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
+ screen_info_setup();
+#endif
+}
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
new file mode 100644
index 000000000..7efcfe0c9
--- /dev/null
+++ b/arch/mips/mti-malta/malta-time.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Setting up the clock on the MIPS boards.
+ */
+#include <linux/types.h>
+#include <linux/i8253.h>
+#include <linux/init.h>
+#include <linux/kernel_stat.h>
+#include <linux/libfdt.h>
+#include <linux/math64.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/mc146818rtc.h>
+
+#include <asm/cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/hardirq.h>
+#include <asm/irq.h>
+#include <asm/div64.h>
+#include <asm/setup.h>
+#include <asm/time.h>
+#include <asm/mc146818-time.h>
+#include <asm/msc01_ic.h>
+#include <asm/mips-cps.h>
+
+#include <asm/mips-boards/generic.h>
+#include <asm/mips-boards/maltaint.h>
+
+static int mips_cpu_timer_irq;
+static int mips_cpu_perf_irq;
+extern int cp0_perfcount_irq;
+
+static unsigned int gic_frequency;
+
+static void mips_timer_dispatch(void)
+{
+ do_IRQ(mips_cpu_timer_irq);
+}
+
+static void mips_perf_dispatch(void)
+{
+ do_IRQ(mips_cpu_perf_irq);
+}
+
+static unsigned int freqround(unsigned int freq, unsigned int amount)
+{
+ freq += amount;
+ freq -= freq % (amount*2);
+ return freq;
+}
+
+/*
+ * Estimate CPU and GIC frequencies.
+ */
+static void __init estimate_frequencies(void)
+{
+ unsigned long flags;
+ unsigned int count, start;
+ unsigned char secs1, secs2, ctrl;
+ int secs;
+ u64 giccount = 0, gicstart = 0;
+
+#if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ
+ mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000;
+ return;
+#endif
+
+ local_irq_save(flags);
+
+ if (mips_gic_present())
+ clear_gic_config(GIC_CONFIG_COUNTSTOP);
+
+ /*
+ * Read counters exactly on rising edge of update flag.
+ * This helps get an accurate reading under virtualisation.
+ */
+ while (CMOS_READ(RTC_REG_A) & RTC_UIP);
+ while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
+ start = read_c0_count();
+ if (mips_gic_present())
+ gicstart = read_gic_counter();
+
+ /* Wait for falling edge before reading RTC. */
+ while (CMOS_READ(RTC_REG_A) & RTC_UIP);
+ secs1 = CMOS_READ(RTC_SECONDS);
+
+ /* Read counters again exactly on rising edge of update flag. */
+ while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
+ count = read_c0_count();
+ if (mips_gic_present())
+ giccount = read_gic_counter();
+
+ /* Wait for falling edge before reading RTC again. */
+ while (CMOS_READ(RTC_REG_A) & RTC_UIP);
+ secs2 = CMOS_READ(RTC_SECONDS);
+
+ ctrl = CMOS_READ(RTC_CONTROL);
+
+ local_irq_restore(flags);
+
+ if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ secs1 = bcd2bin(secs1);
+ secs2 = bcd2bin(secs2);
+ }
+ secs = secs2 - secs1;
+ if (secs < 1)
+ secs += 60;
+
+ count -= start;
+ count /= secs;
+ mips_hpt_frequency = count;
+
+ if (mips_gic_present()) {
+ giccount = div_u64(giccount - gicstart, secs);
+ gic_frequency = giccount;
+ }
+}
+
+void read_persistent_clock64(struct timespec64 *ts)
+{
+ ts->tv_sec = mc146818_get_cmos_time();
+ ts->tv_nsec = 0;
+}
+
+int get_c0_fdc_int(void)
+{
+ /*
+ * Some cores claim the FDC is routable through the GIC, but it doesn't
+ * actually seem to be connected for those Malta bitstreams.
+ */
+ switch (current_cpu_type()) {
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ return -1;
+ };
+
+ if (cpu_has_veic)
+ return -1;
+ else if (mips_gic_present())
+ return gic_get_c0_fdc_int();
+ else if (cp0_fdc_irq >= 0)
+ return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
+ else
+ return -1;
+}
+
+int get_c0_perfcount_int(void)
+{
+ if (cpu_has_veic) {
+ set_vi_handler(MSC01E_INT_PERFCTR, mips_perf_dispatch);
+ mips_cpu_perf_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
+ } else if (mips_gic_present()) {
+ mips_cpu_perf_irq = gic_get_c0_perfcount_int();
+ } else if (cp0_perfcount_irq >= 0) {
+ mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+ } else {
+ mips_cpu_perf_irq = -1;
+ }
+
+ return mips_cpu_perf_irq;
+}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+
+unsigned int get_c0_compare_int(void)
+{
+ if (cpu_has_veic) {
+ set_vi_handler(MSC01E_INT_CPUCTR, mips_timer_dispatch);
+ mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR;
+ } else if (mips_gic_present()) {
+ mips_cpu_timer_irq = gic_get_c0_compare_int();
+ } else {
+ mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+ }
+
+ return mips_cpu_timer_irq;
+}
+
+static void __init init_rtc(void)
+{
+ unsigned char freq, ctrl;
+
+ /* Set 32KHz time base if not already set */
+ freq = CMOS_READ(RTC_FREQ_SELECT);
+ if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
+ CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
+
+ /* Ensure SET bit is clear so RTC can run */
+ ctrl = CMOS_READ(RTC_CONTROL);
+ if (ctrl & RTC_SET)
+ CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
+}
+
+#ifdef CONFIG_CLKSRC_MIPS_GIC
+static u32 gic_frequency_dt;
+
+static struct property gic_frequency_prop = {
+ .name = "clock-frequency",
+ .length = sizeof(u32),
+ .value = &gic_frequency_dt,
+};
+
+static void update_gic_frequency_dt(void)
+{
+ struct device_node *node;
+
+ gic_frequency_dt = cpu_to_be32(gic_frequency);
+
+ node = of_find_compatible_node(NULL, NULL, "mti,gic-timer");
+ if (!node) {
+ pr_err("mti,gic-timer device node not found\n");
+ return;
+ }
+
+ if (of_update_property(node, &gic_frequency_prop) < 0)
+ pr_err("error updating gic frequency property\n");
+}
+
+#endif
+
+void __init plat_time_init(void)
+{
+ unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK);
+ unsigned int freq;
+
+ init_rtc();
+ estimate_frequencies();
+
+ freq = mips_hpt_frequency;
+ if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
+ (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
+ freq *= 2;
+ freq = freqround(freq, 5000);
+ printk("CPU frequency %d.%02d MHz\n", freq/1000000,
+ (freq%1000000)*100/1000000);
+
+#ifdef CONFIG_I8253
+ /* Only Malta has a PIT. */
+ setup_pit_timer();
+#endif
+
+ if (mips_gic_present()) {
+ freq = freqround(gic_frequency, 5000);
+ printk("GIC frequency %d.%02d MHz\n", freq/1000000,
+ (freq%1000000)*100/1000000);
+#ifdef CONFIG_CLKSRC_MIPS_GIC
+ update_gic_frequency_dt();
+ timer_probe();
+#endif
+ }
+}
diff --git a/arch/mips/net/Makefile b/arch/mips/net/Makefile
new file mode 100644
index 000000000..d55912349
--- /dev/null
+++ b/arch/mips/net/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# MIPS networking code
+
+obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o
+obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
new file mode 100644
index 000000000..cb6d22439
--- /dev/null
+++ b/arch/mips/net/bpf_jit.c
@@ -0,0 +1,1299 @@
+/*
+ * Just-In-Time compiler for BPF filters on MIPS
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/filter.h>
+#include <linux/if_vlan.h>
+#include <linux/moduleloader.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <asm/asm.h>
+#include <asm/bitops.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-features.h>
+#include <asm/uasm.h>
+
+#include "bpf_jit.h"
+
+/* ABI
+ * r_skb_hl SKB header length
+ * r_data SKB data pointer
+ * r_off Offset
+ * r_A BPF register A
+ * r_X BPF register X
+ * r_skb *skb
+ * r_M *scratch memory
+ * r_skb_len SKB length
+ *
+ * On entry (*bpf_func)(*skb, *filter)
+ * a0 = MIPS_R_A0 = skb;
+ * a1 = MIPS_R_A1 = filter;
+ *
+ * Stack
+ * ...
+ * M[15]
+ * M[14]
+ * M[13]
+ * ...
+ * M[0] <-- r_M
+ * saved reg k-1
+ * saved reg k-2
+ * ...
+ * saved reg 0 <-- r_sp
+ * <no argument area>
+ *
+ * Packet layout
+ *
+ * <--------------------- len ------------------------>
+ * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------>
+ * ----------------------------------------------------
+ * | skb->data |
+ * ----------------------------------------------------
+ */
+
+#define ptr typeof(unsigned long)
+
+#define SCRATCH_OFF(k) (4 * (k))
+
+/* JIT flags */
+#define SEEN_CALL (1 << BPF_MEMWORDS)
+#define SEEN_SREG_SFT (BPF_MEMWORDS + 1)
+#define SEEN_SREG_BASE (1 << SEEN_SREG_SFT)
+#define SEEN_SREG(x) (SEEN_SREG_BASE << (x))
+#define SEEN_OFF SEEN_SREG(2)
+#define SEEN_A SEEN_SREG(3)
+#define SEEN_X SEEN_SREG(4)
+#define SEEN_SKB SEEN_SREG(5)
+#define SEEN_MEM SEEN_SREG(6)
+/* SEEN_SK_DATA also implies skb_hl an skb_len */
+#define SEEN_SKB_DATA (SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0))
+
+/* Arguments used by JIT */
+#define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */
+
+#define SBIT(x) (1 << (x)) /* Signed version of BIT() */
+
+/**
+ * struct jit_ctx - JIT context
+ * @skf: The sk_filter
+ * @prologue_bytes: Number of bytes for prologue
+ * @idx: Instruction index
+ * @flags: JIT flags
+ * @offsets: Instruction offsets
+ * @target: Memory location for the compiled filter
+ */
+struct jit_ctx {
+ const struct bpf_prog *skf;
+ unsigned int prologue_bytes;
+ u32 idx;
+ u32 flags;
+ u32 *offsets;
+ u32 *target;
+};
+
+
+static inline int optimize_div(u32 *k)
+{
+ /* power of 2 divides can be implemented with right shift */
+ if (!(*k & (*k-1))) {
+ *k = ilog2(*k);
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
+
+/* Simply emit the instruction if the JIT memory space has been allocated */
+#define emit_instr(ctx, func, ...) \
+do { \
+ if ((ctx)->target != NULL) { \
+ u32 *p = &(ctx)->target[ctx->idx]; \
+ uasm_i_##func(&p, ##__VA_ARGS__); \
+ } \
+ (ctx)->idx++; \
+} while (0)
+
+/*
+ * Similar to emit_instr but it must be used when we need to emit
+ * 32-bit or 64-bit instructions
+ */
+#define emit_long_instr(ctx, func, ...) \
+do { \
+ if ((ctx)->target != NULL) { \
+ u32 *p = &(ctx)->target[ctx->idx]; \
+ UASM_i_##func(&p, ##__VA_ARGS__); \
+ } \
+ (ctx)->idx++; \
+} while (0)
+
+/* Determine if immediate is within the 16-bit signed range */
+static inline bool is_range16(s32 imm)
+{
+ return !(imm >= SBIT(15) || imm < -SBIT(15));
+}
+
+static inline void emit_addu(unsigned int dst, unsigned int src1,
+ unsigned int src2, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, addu, dst, src1, src2);
+}
+
+static inline void emit_nop(struct jit_ctx *ctx)
+{
+ emit_instr(ctx, nop);
+}
+
+/* Load a u32 immediate to a register */
+static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
+{
+ if (ctx->target != NULL) {
+ /* addiu can only handle s16 */
+ if (!is_range16(imm)) {
+ u32 *p = &ctx->target[ctx->idx];
+ uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
+ p = &ctx->target[ctx->idx + 1];
+ uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff);
+ } else {
+ u32 *p = &ctx->target[ctx->idx];
+ uasm_i_addiu(&p, dst, r_zero, imm);
+ }
+ }
+ ctx->idx++;
+
+ if (!is_range16(imm))
+ ctx->idx++;
+}
+
+static inline void emit_or(unsigned int dst, unsigned int src1,
+ unsigned int src2, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, or, dst, src1, src2);
+}
+
+static inline void emit_ori(unsigned int dst, unsigned src, u32 imm,
+ struct jit_ctx *ctx)
+{
+ if (imm >= BIT(16)) {
+ emit_load_imm(r_tmp, imm, ctx);
+ emit_or(dst, src, r_tmp, ctx);
+ } else {
+ emit_instr(ctx, ori, dst, src, imm);
+ }
+}
+
+static inline void emit_daddiu(unsigned int dst, unsigned int src,
+ int imm, struct jit_ctx *ctx)
+{
+ /*
+ * Only used for stack, so the imm is relatively small
+ * and it fits in 15-bits
+ */
+ emit_instr(ctx, daddiu, dst, src, imm);
+}
+
+static inline void emit_addiu(unsigned int dst, unsigned int src,
+ u32 imm, struct jit_ctx *ctx)
+{
+ if (!is_range16(imm)) {
+ emit_load_imm(r_tmp, imm, ctx);
+ emit_addu(dst, r_tmp, src, ctx);
+ } else {
+ emit_instr(ctx, addiu, dst, src, imm);
+ }
+}
+
+static inline void emit_and(unsigned int dst, unsigned int src1,
+ unsigned int src2, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, and, dst, src1, src2);
+}
+
+static inline void emit_andi(unsigned int dst, unsigned int src,
+ u32 imm, struct jit_ctx *ctx)
+{
+ /* If imm does not fit in u16 then load it to register */
+ if (imm >= BIT(16)) {
+ emit_load_imm(r_tmp, imm, ctx);
+ emit_and(dst, src, r_tmp, ctx);
+ } else {
+ emit_instr(ctx, andi, dst, src, imm);
+ }
+}
+
+static inline void emit_xor(unsigned int dst, unsigned int src1,
+ unsigned int src2, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, xor, dst, src1, src2);
+}
+
+static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx)
+{
+ /* If imm does not fit in u16 then load it to register */
+ if (imm >= BIT(16)) {
+ emit_load_imm(r_tmp, imm, ctx);
+ emit_xor(dst, src, r_tmp, ctx);
+ } else {
+ emit_instr(ctx, xori, dst, src, imm);
+ }
+}
+
+static inline void emit_stack_offset(int offset, struct jit_ctx *ctx)
+{
+ emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset);
+}
+
+static inline void emit_subu(unsigned int dst, unsigned int src1,
+ unsigned int src2, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, subu, dst, src1, src2);
+}
+
+static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx)
+{
+ emit_subu(reg, r_zero, reg, ctx);
+}
+
+static inline void emit_sllv(unsigned int dst, unsigned int src,
+ unsigned int sa, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, sllv, dst, src, sa);
+}
+
+static inline void emit_sll(unsigned int dst, unsigned int src,
+ unsigned int sa, struct jit_ctx *ctx)
+{
+ /* sa is 5-bits long */
+ if (sa >= BIT(5))
+ /* Shifting >= 32 results in zero */
+ emit_jit_reg_move(dst, r_zero, ctx);
+ else
+ emit_instr(ctx, sll, dst, src, sa);
+}
+
+static inline void emit_srlv(unsigned int dst, unsigned int src,
+ unsigned int sa, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, srlv, dst, src, sa);
+}
+
+static inline void emit_srl(unsigned int dst, unsigned int src,
+ unsigned int sa, struct jit_ctx *ctx)
+{
+ /* sa is 5-bits long */
+ if (sa >= BIT(5))
+ /* Shifting >= 32 results in zero */
+ emit_jit_reg_move(dst, r_zero, ctx);
+ else
+ emit_instr(ctx, srl, dst, src, sa);
+}
+
+static inline void emit_slt(unsigned int dst, unsigned int src1,
+ unsigned int src2, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, slt, dst, src1, src2);
+}
+
+static inline void emit_sltu(unsigned int dst, unsigned int src1,
+ unsigned int src2, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, sltu, dst, src1, src2);
+}
+
+static inline void emit_sltiu(unsigned dst, unsigned int src,
+ unsigned int imm, struct jit_ctx *ctx)
+{
+ /* 16 bit immediate */
+ if (!is_range16((s32)imm)) {
+ emit_load_imm(r_tmp, imm, ctx);
+ emit_sltu(dst, src, r_tmp, ctx);
+ } else {
+ emit_instr(ctx, sltiu, dst, src, imm);
+ }
+
+}
+
+/* Store register on the stack */
+static inline void emit_store_stack_reg(ptr reg, ptr base,
+ unsigned int offset,
+ struct jit_ctx *ctx)
+{
+ emit_long_instr(ctx, SW, reg, offset, base);
+}
+
+static inline void emit_store(ptr reg, ptr base, unsigned int offset,
+ struct jit_ctx *ctx)
+{
+ emit_instr(ctx, sw, reg, offset, base);
+}
+
+static inline void emit_load_stack_reg(ptr reg, ptr base,
+ unsigned int offset,
+ struct jit_ctx *ctx)
+{
+ emit_long_instr(ctx, LW, reg, offset, base);
+}
+
+static inline void emit_load(unsigned int reg, unsigned int base,
+ unsigned int offset, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, lw, reg, offset, base);
+}
+
+static inline void emit_load_byte(unsigned int reg, unsigned int base,
+ unsigned int offset, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, lb, reg, offset, base);
+}
+
+static inline void emit_half_load(unsigned int reg, unsigned int base,
+ unsigned int offset, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, lh, reg, offset, base);
+}
+
+static inline void emit_half_load_unsigned(unsigned int reg, unsigned int base,
+ unsigned int offset, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, lhu, reg, offset, base);
+}
+
+static inline void emit_mul(unsigned int dst, unsigned int src1,
+ unsigned int src2, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, mul, dst, src1, src2);
+}
+
+static inline void emit_div(unsigned int dst, unsigned int src,
+ struct jit_ctx *ctx)
+{
+ if (ctx->target != NULL) {
+ u32 *p = &ctx->target[ctx->idx];
+ uasm_i_divu(&p, dst, src);
+ p = &ctx->target[ctx->idx + 1];
+ uasm_i_mflo(&p, dst);
+ }
+ ctx->idx += 2; /* 2 insts */
+}
+
+static inline void emit_mod(unsigned int dst, unsigned int src,
+ struct jit_ctx *ctx)
+{
+ if (ctx->target != NULL) {
+ u32 *p = &ctx->target[ctx->idx];
+ uasm_i_divu(&p, dst, src);
+ p = &ctx->target[ctx->idx + 1];
+ uasm_i_mfhi(&p, dst);
+ }
+ ctx->idx += 2; /* 2 insts */
+}
+
+static inline void emit_dsll(unsigned int dst, unsigned int src,
+ unsigned int sa, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, dsll, dst, src, sa);
+}
+
+static inline void emit_dsrl32(unsigned int dst, unsigned int src,
+ unsigned int sa, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, dsrl32, dst, src, sa);
+}
+
+static inline void emit_wsbh(unsigned int dst, unsigned int src,
+ struct jit_ctx *ctx)
+{
+ emit_instr(ctx, wsbh, dst, src);
+}
+
+/* load pointer to register */
+static inline void emit_load_ptr(unsigned int dst, unsigned int src,
+ int imm, struct jit_ctx *ctx)
+{
+ /* src contains the base addr of the 32/64-pointer */
+ emit_long_instr(ctx, LW, dst, imm, src);
+}
+
+/* load a function pointer to register */
+static inline void emit_load_func(unsigned int reg, ptr imm,
+ struct jit_ctx *ctx)
+{
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ /* At this point imm is always 64-bit */
+ emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
+ emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
+ emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx);
+ emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
+ emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx);
+ } else {
+ emit_load_imm(reg, imm, ctx);
+ }
+}
+
+/* Move to real MIPS register */
+static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
+{
+ emit_long_instr(ctx, ADDU, dst, src, r_zero);
+}
+
+/* Move to JIT (32-bit) register */
+static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
+{
+ emit_addu(dst, src, r_zero, ctx);
+}
+
+/* Compute the immediate value for PC-relative branches. */
+static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
+{
+ if (ctx->target == NULL)
+ return 0;
+
+ /*
+ * We want a pc-relative branch. We only do forward branches
+ * so tgt is always after pc. tgt is the instruction offset
+ * we want to jump to.
+
+ * Branch on MIPS:
+ * I: target_offset <- sign_extend(offset)
+ * I+1: PC += target_offset (delay slot)
+ *
+ * ctx->idx currently points to the branch instruction
+ * but the offset is added to the delay slot so we need
+ * to subtract 4.
+ */
+ return ctx->offsets[tgt] -
+ (ctx->idx * 4 - ctx->prologue_bytes) - 4;
+}
+
+static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2,
+ unsigned int imm, struct jit_ctx *ctx)
+{
+ if (ctx->target != NULL) {
+ u32 *p = &ctx->target[ctx->idx];
+
+ switch (cond) {
+ case MIPS_COND_EQ:
+ uasm_i_beq(&p, reg1, reg2, imm);
+ break;
+ case MIPS_COND_NE:
+ uasm_i_bne(&p, reg1, reg2, imm);
+ break;
+ case MIPS_COND_ALL:
+ uasm_i_b(&p, imm);
+ break;
+ default:
+ pr_warn("%s: Unhandled branch conditional: %d\n",
+ __func__, cond);
+ }
+ }
+ ctx->idx++;
+}
+
+static inline void emit_b(unsigned int imm, struct jit_ctx *ctx)
+{
+ emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx);
+}
+
+static inline void emit_jalr(unsigned int link, unsigned int reg,
+ struct jit_ctx *ctx)
+{
+ emit_instr(ctx, jalr, link, reg);
+}
+
+static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, jr, reg);
+}
+
+static inline u16 align_sp(unsigned int num)
+{
+ /* Double word alignment for 32-bit, quadword for 64-bit */
+ unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8;
+ num = (num + (align - 1)) & -align;
+ return num;
+}
+
+static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
+{
+ int i = 0, real_off = 0;
+ u32 sflags, tmp_flags;
+
+ /* Adjust the stack pointer */
+ if (offset)
+ emit_stack_offset(-align_sp(offset), ctx);
+
+ tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
+ /* sflags is essentially a bitmap */
+ while (tmp_flags) {
+ if ((sflags >> i) & 0x1) {
+ emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
+ ctx);
+ real_off += SZREG;
+ }
+ i++;
+ tmp_flags >>= 1;
+ }
+
+ /* save return address */
+ if (ctx->flags & SEEN_CALL) {
+ emit_store_stack_reg(r_ra, r_sp, real_off, ctx);
+ real_off += SZREG;
+ }
+
+ /* Setup r_M leaving the alignment gap if necessary */
+ if (ctx->flags & SEEN_MEM) {
+ if (real_off % (SZREG * 2))
+ real_off += SZREG;
+ emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off);
+ }
+}
+
+static void restore_bpf_jit_regs(struct jit_ctx *ctx,
+ unsigned int offset)
+{
+ int i, real_off = 0;
+ u32 sflags, tmp_flags;
+
+ tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
+ /* sflags is a bitmap */
+ i = 0;
+ while (tmp_flags) {
+ if ((sflags >> i) & 0x1) {
+ emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
+ ctx);
+ real_off += SZREG;
+ }
+ i++;
+ tmp_flags >>= 1;
+ }
+
+ /* restore return address */
+ if (ctx->flags & SEEN_CALL)
+ emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
+
+ /* Restore the sp and discard the scrach memory */
+ if (offset)
+ emit_stack_offset(align_sp(offset), ctx);
+}
+
+static unsigned int get_stack_depth(struct jit_ctx *ctx)
+{
+ int sp_off = 0;
+
+
+ /* How may s* regs do we need to preserved? */
+ sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG;
+
+ if (ctx->flags & SEEN_MEM)
+ sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */
+
+ if (ctx->flags & SEEN_CALL)
+ sp_off += SZREG; /* Space for our ra register */
+
+ return sp_off;
+}
+
+static void build_prologue(struct jit_ctx *ctx)
+{
+ int sp_off;
+
+ /* Calculate the total offset for the stack pointer */
+ sp_off = get_stack_depth(ctx);
+ save_bpf_jit_regs(ctx, sp_off);
+
+ if (ctx->flags & SEEN_SKB)
+ emit_reg_move(r_skb, MIPS_R_A0, ctx);
+
+ if (ctx->flags & SEEN_SKB_DATA) {
+ /* Load packet length */
+ emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len),
+ ctx);
+ emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len),
+ ctx);
+ /* Load the data pointer */
+ emit_load_ptr(r_skb_data, r_skb,
+ offsetof(struct sk_buff, data), ctx);
+ /* Load the header length */
+ emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx);
+ }
+
+ if (ctx->flags & SEEN_X)
+ emit_jit_reg_move(r_X, r_zero, ctx);
+
+ /*
+ * Do not leak kernel data to userspace, we only need to clear
+ * r_A if it is ever used. In fact if it is never used, we
+ * will not save/restore it, so clearing it in this case would
+ * corrupt the state of the caller.
+ */
+ if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
+ (ctx->flags & SEEN_A))
+ emit_jit_reg_move(r_A, r_zero, ctx);
+}
+
+static void build_epilogue(struct jit_ctx *ctx)
+{
+ unsigned int sp_off;
+
+ /* Calculate the total offset for the stack pointer */
+
+ sp_off = get_stack_depth(ctx);
+ restore_bpf_jit_regs(ctx, sp_off);
+
+ /* Return */
+ emit_jr(r_ra, ctx);
+ emit_nop(ctx);
+}
+
+#define CHOOSE_LOAD_FUNC(K, func) \
+ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
+ func##_positive)
+
+static bool is_bad_offset(int b_off)
+{
+ return b_off > 0x1ffff || b_off < -0x20000;
+}
+
+static int build_body(struct jit_ctx *ctx)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ const struct sock_filter *inst;
+ unsigned int i, off, condt;
+ u32 k, b_off __maybe_unused;
+ u8 (*sk_load_func)(unsigned long *skb, int offset);
+
+ for (i = 0; i < prog->len; i++) {
+ u16 code;
+
+ inst = &(prog->insns[i]);
+ pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
+ __func__, inst->code, inst->jt, inst->jf, inst->k);
+ k = inst->k;
+ code = bpf_anc_helper(inst);
+
+ if (ctx->target == NULL)
+ ctx->offsets[i] = ctx->idx * 4;
+
+ switch (code) {
+ case BPF_LD | BPF_IMM:
+ /* A <- k ==> li r_A, k */
+ ctx->flags |= SEEN_A;
+ emit_load_imm(r_A, k, ctx);
+ break;
+ case BPF_LD | BPF_W | BPF_LEN:
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, len) != 4);
+ /* A <- len ==> lw r_A, offset(skb) */
+ ctx->flags |= SEEN_SKB | SEEN_A;
+ off = offsetof(struct sk_buff, len);
+ emit_load(r_A, r_skb, off, ctx);
+ break;
+ case BPF_LD | BPF_MEM:
+ /* A <- M[k] ==> lw r_A, offset(M) */
+ ctx->flags |= SEEN_MEM | SEEN_A;
+ emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
+ break;
+ case BPF_LD | BPF_W | BPF_ABS:
+ /* A <- P[k:4] */
+ sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word);
+ goto load;
+ case BPF_LD | BPF_H | BPF_ABS:
+ /* A <- P[k:2] */
+ sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half);
+ goto load;
+ case BPF_LD | BPF_B | BPF_ABS:
+ /* A <- P[k:1] */
+ sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte);
+load:
+ emit_load_imm(r_off, k, ctx);
+load_common:
+ ctx->flags |= SEEN_CALL | SEEN_OFF |
+ SEEN_SKB | SEEN_A | SEEN_SKB_DATA;
+
+ emit_load_func(r_s0, (ptr)sk_load_func, ctx);
+ emit_reg_move(MIPS_R_A0, r_skb, ctx);
+ emit_jalr(MIPS_R_RA, r_s0, ctx);
+ /* Load second argument to delay slot */
+ emit_reg_move(MIPS_R_A1, r_off, ctx);
+ /* Check the error value */
+ emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx),
+ ctx);
+ /* Load return register on DS for failures */
+ emit_reg_move(r_ret, r_zero, ctx);
+ /* Return with error */
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_b(b_off, ctx);
+ emit_nop(ctx);
+ break;
+ case BPF_LD | BPF_W | BPF_IND:
+ /* A <- P[X + k:4] */
+ sk_load_func = sk_load_word;
+ goto load_ind;
+ case BPF_LD | BPF_H | BPF_IND:
+ /* A <- P[X + k:2] */
+ sk_load_func = sk_load_half;
+ goto load_ind;
+ case BPF_LD | BPF_B | BPF_IND:
+ /* A <- P[X + k:1] */
+ sk_load_func = sk_load_byte;
+load_ind:
+ ctx->flags |= SEEN_OFF | SEEN_X;
+ emit_addiu(r_off, r_X, k, ctx);
+ goto load_common;
+ case BPF_LDX | BPF_IMM:
+ /* X <- k */
+ ctx->flags |= SEEN_X;
+ emit_load_imm(r_X, k, ctx);
+ break;
+ case BPF_LDX | BPF_MEM:
+ /* X <- M[k] */
+ ctx->flags |= SEEN_X | SEEN_MEM;
+ emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
+ break;
+ case BPF_LDX | BPF_W | BPF_LEN:
+ /* X <- len */
+ ctx->flags |= SEEN_X | SEEN_SKB;
+ off = offsetof(struct sk_buff, len);
+ emit_load(r_X, r_skb, off, ctx);
+ break;
+ case BPF_LDX | BPF_B | BPF_MSH:
+ /* X <- 4 * (P[k:1] & 0xf) */
+ ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB;
+ /* Load offset to a1 */
+ emit_load_func(r_s0, (ptr)sk_load_byte, ctx);
+ /*
+ * This may emit two instructions so it may not fit
+ * in the delay slot. So use a0 in the delay slot.
+ */
+ emit_load_imm(MIPS_R_A1, k, ctx);
+ emit_jalr(MIPS_R_RA, r_s0, ctx);
+ emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
+ /* Check the error value */
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);
+ emit_reg_move(r_ret, r_zero, ctx);
+ /* We are good */
+ /* X <- P[1:K] & 0xf */
+ emit_andi(r_X, r_A, 0xf, ctx);
+ /* X << 2 */
+ emit_b(b_imm(i + 1, ctx), ctx);
+ emit_sll(r_X, r_X, 2, ctx); /* delay slot */
+ break;
+ case BPF_ST:
+ /* M[k] <- A */
+ ctx->flags |= SEEN_MEM | SEEN_A;
+ emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
+ break;
+ case BPF_STX:
+ /* M[k] <- X */
+ ctx->flags |= SEEN_MEM | SEEN_X;
+ emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
+ break;
+ case BPF_ALU | BPF_ADD | BPF_K:
+ /* A += K */
+ ctx->flags |= SEEN_A;
+ emit_addiu(r_A, r_A, k, ctx);
+ break;
+ case BPF_ALU | BPF_ADD | BPF_X:
+ /* A += X */
+ ctx->flags |= SEEN_A | SEEN_X;
+ emit_addu(r_A, r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_SUB | BPF_K:
+ /* A -= K */
+ ctx->flags |= SEEN_A;
+ emit_addiu(r_A, r_A, -k, ctx);
+ break;
+ case BPF_ALU | BPF_SUB | BPF_X:
+ /* A -= X */
+ ctx->flags |= SEEN_A | SEEN_X;
+ emit_subu(r_A, r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_K:
+ /* A *= K */
+ /* Load K to scratch register before MUL */
+ ctx->flags |= SEEN_A;
+ emit_load_imm(r_s0, k, ctx);
+ emit_mul(r_A, r_A, r_s0, ctx);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_X:
+ /* A *= X */
+ ctx->flags |= SEEN_A | SEEN_X;
+ emit_mul(r_A, r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_DIV | BPF_K:
+ /* A /= k */
+ if (k == 1)
+ break;
+ if (optimize_div(&k)) {
+ ctx->flags |= SEEN_A;
+ emit_srl(r_A, r_A, k, ctx);
+ break;
+ }
+ ctx->flags |= SEEN_A;
+ emit_load_imm(r_s0, k, ctx);
+ emit_div(r_A, r_s0, ctx);
+ break;
+ case BPF_ALU | BPF_MOD | BPF_K:
+ /* A %= k */
+ if (k == 1) {
+ ctx->flags |= SEEN_A;
+ emit_jit_reg_move(r_A, r_zero, ctx);
+ } else {
+ ctx->flags |= SEEN_A;
+ emit_load_imm(r_s0, k, ctx);
+ emit_mod(r_A, r_s0, ctx);
+ }
+ break;
+ case BPF_ALU | BPF_DIV | BPF_X:
+ /* A /= X */
+ ctx->flags |= SEEN_X | SEEN_A;
+ /* Check if r_X is zero */
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
+ emit_load_imm(r_ret, 0, ctx); /* delay slot */
+ emit_div(r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_MOD | BPF_X:
+ /* A %= X */
+ ctx->flags |= SEEN_X | SEEN_A;
+ /* Check if r_X is zero */
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
+ emit_load_imm(r_ret, 0, ctx); /* delay slot */
+ emit_mod(r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_OR | BPF_K:
+ /* A |= K */
+ ctx->flags |= SEEN_A;
+ emit_ori(r_A, r_A, k, ctx);
+ break;
+ case BPF_ALU | BPF_OR | BPF_X:
+ /* A |= X */
+ ctx->flags |= SEEN_A;
+ emit_ori(r_A, r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_XOR | BPF_K:
+ /* A ^= k */
+ ctx->flags |= SEEN_A;
+ emit_xori(r_A, r_A, k, ctx);
+ break;
+ case BPF_ANC | SKF_AD_ALU_XOR_X:
+ case BPF_ALU | BPF_XOR | BPF_X:
+ /* A ^= X */
+ ctx->flags |= SEEN_A;
+ emit_xor(r_A, r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_AND | BPF_K:
+ /* A &= K */
+ ctx->flags |= SEEN_A;
+ emit_andi(r_A, r_A, k, ctx);
+ break;
+ case BPF_ALU | BPF_AND | BPF_X:
+ /* A &= X */
+ ctx->flags |= SEEN_A | SEEN_X;
+ emit_and(r_A, r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_LSH | BPF_K:
+ /* A <<= K */
+ ctx->flags |= SEEN_A;
+ emit_sll(r_A, r_A, k, ctx);
+ break;
+ case BPF_ALU | BPF_LSH | BPF_X:
+ /* A <<= X */
+ ctx->flags |= SEEN_A | SEEN_X;
+ emit_sllv(r_A, r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_K:
+ /* A >>= K */
+ ctx->flags |= SEEN_A;
+ emit_srl(r_A, r_A, k, ctx);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_X:
+ ctx->flags |= SEEN_A | SEEN_X;
+ emit_srlv(r_A, r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_NEG:
+ /* A = -A */
+ ctx->flags |= SEEN_A;
+ emit_neg(r_A, ctx);
+ break;
+ case BPF_JMP | BPF_JA:
+ /* pc += K */
+ b_off = b_imm(i + k + 1, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_b(b_off, ctx);
+ emit_nop(ctx);
+ break;
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ /* pc += ( A == K ) ? pc->jt : pc->jf */
+ condt = MIPS_COND_EQ | MIPS_COND_K;
+ goto jmp_cmp;
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ ctx->flags |= SEEN_X;
+ /* pc += ( A == X ) ? pc->jt : pc->jf */
+ condt = MIPS_COND_EQ | MIPS_COND_X;
+ goto jmp_cmp;
+ case BPF_JMP | BPF_JGE | BPF_K:
+ /* pc += ( A >= K ) ? pc->jt : pc->jf */
+ condt = MIPS_COND_GE | MIPS_COND_K;
+ goto jmp_cmp;
+ case BPF_JMP | BPF_JGE | BPF_X:
+ ctx->flags |= SEEN_X;
+ /* pc += ( A >= X ) ? pc->jt : pc->jf */
+ condt = MIPS_COND_GE | MIPS_COND_X;
+ goto jmp_cmp;
+ case BPF_JMP | BPF_JGT | BPF_K:
+ /* pc += ( A > K ) ? pc->jt : pc->jf */
+ condt = MIPS_COND_GT | MIPS_COND_K;
+ goto jmp_cmp;
+ case BPF_JMP | BPF_JGT | BPF_X:
+ ctx->flags |= SEEN_X;
+ /* pc += ( A > X ) ? pc->jt : pc->jf */
+ condt = MIPS_COND_GT | MIPS_COND_X;
+jmp_cmp:
+ /* Greater or Equal */
+ if ((condt & MIPS_COND_GE) ||
+ (condt & MIPS_COND_GT)) {
+ if (condt & MIPS_COND_K) { /* K */
+ ctx->flags |= SEEN_A;
+ emit_sltiu(r_s0, r_A, k, ctx);
+ } else { /* X */
+ ctx->flags |= SEEN_A |
+ SEEN_X;
+ emit_sltu(r_s0, r_A, r_X, ctx);
+ }
+ /* A < (K|X) ? r_scrach = 1 */
+ b_off = b_imm(i + inst->jf + 1, ctx);
+ emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
+ ctx);
+ emit_nop(ctx);
+ /* A > (K|X) ? scratch = 0 */
+ if (condt & MIPS_COND_GT) {
+ /* Checking for equality */
+ ctx->flags |= SEEN_A | SEEN_X;
+ if (condt & MIPS_COND_K)
+ emit_load_imm(r_s0, k, ctx);
+ else
+ emit_jit_reg_move(r_s0, r_X,
+ ctx);
+ b_off = b_imm(i + inst->jf + 1, ctx);
+ emit_bcond(MIPS_COND_EQ, r_A, r_s0,
+ b_off, ctx);
+ emit_nop(ctx);
+ /* Finally, A > K|X */
+ b_off = b_imm(i + inst->jt + 1, ctx);
+ emit_b(b_off, ctx);
+ emit_nop(ctx);
+ } else {
+ /* A >= (K|X) so jump */
+ b_off = b_imm(i + inst->jt + 1, ctx);
+ emit_b(b_off, ctx);
+ emit_nop(ctx);
+ }
+ } else {
+ /* A == K|X */
+ if (condt & MIPS_COND_K) { /* K */
+ ctx->flags |= SEEN_A;
+ emit_load_imm(r_s0, k, ctx);
+ /* jump true */
+ b_off = b_imm(i + inst->jt + 1, ctx);
+ emit_bcond(MIPS_COND_EQ, r_A, r_s0,
+ b_off, ctx);
+ emit_nop(ctx);
+ /* jump false */
+ b_off = b_imm(i + inst->jf + 1,
+ ctx);
+ emit_bcond(MIPS_COND_NE, r_A, r_s0,
+ b_off, ctx);
+ emit_nop(ctx);
+ } else { /* X */
+ /* jump true */
+ ctx->flags |= SEEN_A | SEEN_X;
+ b_off = b_imm(i + inst->jt + 1,
+ ctx);
+ emit_bcond(MIPS_COND_EQ, r_A, r_X,
+ b_off, ctx);
+ emit_nop(ctx);
+ /* jump false */
+ b_off = b_imm(i + inst->jf + 1, ctx);
+ emit_bcond(MIPS_COND_NE, r_A, r_X,
+ b_off, ctx);
+ emit_nop(ctx);
+ }
+ }
+ break;
+ case BPF_JMP | BPF_JSET | BPF_K:
+ ctx->flags |= SEEN_A;
+ /* pc += (A & K) ? pc -> jt : pc -> jf */
+ emit_load_imm(r_s1, k, ctx);
+ emit_and(r_s0, r_A, r_s1, ctx);
+ /* jump true */
+ b_off = b_imm(i + inst->jt + 1, ctx);
+ emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
+ emit_nop(ctx);
+ /* jump false */
+ b_off = b_imm(i + inst->jf + 1, ctx);
+ emit_b(b_off, ctx);
+ emit_nop(ctx);
+ break;
+ case BPF_JMP | BPF_JSET | BPF_X:
+ ctx->flags |= SEEN_X | SEEN_A;
+ /* pc += (A & X) ? pc -> jt : pc -> jf */
+ emit_and(r_s0, r_A, r_X, ctx);
+ /* jump true */
+ b_off = b_imm(i + inst->jt + 1, ctx);
+ emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
+ emit_nop(ctx);
+ /* jump false */
+ b_off = b_imm(i + inst->jf + 1, ctx);
+ emit_b(b_off, ctx);
+ emit_nop(ctx);
+ break;
+ case BPF_RET | BPF_A:
+ ctx->flags |= SEEN_A;
+ if (i != prog->len - 1) {
+ /*
+ * If this is not the last instruction
+ * then jump to the epilogue
+ */
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_b(b_off, ctx);
+ }
+ emit_reg_move(r_ret, r_A, ctx); /* delay slot */
+ break;
+ case BPF_RET | BPF_K:
+ /*
+ * It can emit two instructions so it does not fit on
+ * the delay slot.
+ */
+ emit_load_imm(r_ret, k, ctx);
+ if (i != prog->len - 1) {
+ /*
+ * If this is not the last instruction
+ * then jump to the epilogue
+ */
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_b(b_off, ctx);
+ emit_nop(ctx);
+ }
+ break;
+ case BPF_MISC | BPF_TAX:
+ /* X = A */
+ ctx->flags |= SEEN_X | SEEN_A;
+ emit_jit_reg_move(r_X, r_A, ctx);
+ break;
+ case BPF_MISC | BPF_TXA:
+ /* A = X */
+ ctx->flags |= SEEN_A | SEEN_X;
+ emit_jit_reg_move(r_A, r_X, ctx);
+ break;
+ /* AUX */
+ case BPF_ANC | SKF_AD_PROTOCOL:
+ /* A = ntohs(skb->protocol */
+ ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
+ BUILD_BUG_ON(sizeof_field(struct sk_buff,
+ protocol) != 2);
+ off = offsetof(struct sk_buff, protocol);
+ emit_half_load(r_A, r_skb, off, ctx);
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ /* This needs little endian fixup */
+ if (cpu_has_wsbh) {
+ /* R2 and later have the wsbh instruction */
+ emit_wsbh(r_A, r_A, ctx);
+ } else {
+ /* Get first byte */
+ emit_andi(r_tmp_imm, r_A, 0xff, ctx);
+ /* Shift it */
+ emit_sll(r_tmp, r_tmp_imm, 8, ctx);
+ /* Get second byte */
+ emit_srl(r_tmp_imm, r_A, 8, ctx);
+ emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx);
+ /* Put everyting together in r_A */
+ emit_or(r_A, r_tmp, r_tmp_imm, ctx);
+ }
+#endif
+ break;
+ case BPF_ANC | SKF_AD_CPU:
+ ctx->flags |= SEEN_A | SEEN_OFF;
+ /* A = current_thread_info()->cpu */
+ BUILD_BUG_ON(sizeof_field(struct thread_info,
+ cpu) != 4);
+ off = offsetof(struct thread_info, cpu);
+ /* $28/gp points to the thread_info struct */
+ emit_load(r_A, 28, off, ctx);
+ break;
+ case BPF_ANC | SKF_AD_IFINDEX:
+ /* A = skb->dev->ifindex */
+ case BPF_ANC | SKF_AD_HATYPE:
+ /* A = skb->dev->type */
+ ctx->flags |= SEEN_SKB | SEEN_A;
+ off = offsetof(struct sk_buff, dev);
+ /* Load *dev pointer */
+ emit_load_ptr(r_s0, r_skb, off, ctx);
+ /* error (0) in the delay slot */
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);
+ emit_reg_move(r_ret, r_zero, ctx);
+ if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
+ BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
+ off = offsetof(struct net_device, ifindex);
+ emit_load(r_A, r_s0, off, ctx);
+ } else { /* (code == (BPF_ANC | SKF_AD_HATYPE) */
+ BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2);
+ off = offsetof(struct net_device, type);
+ emit_half_load_unsigned(r_A, r_s0, off, ctx);
+ }
+ break;
+ case BPF_ANC | SKF_AD_MARK:
+ ctx->flags |= SEEN_SKB | SEEN_A;
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
+ off = offsetof(struct sk_buff, mark);
+ emit_load(r_A, r_skb, off, ctx);
+ break;
+ case BPF_ANC | SKF_AD_RXHASH:
+ ctx->flags |= SEEN_SKB | SEEN_A;
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
+ off = offsetof(struct sk_buff, hash);
+ emit_load(r_A, r_skb, off, ctx);
+ break;
+ case BPF_ANC | SKF_AD_VLAN_TAG:
+ ctx->flags |= SEEN_SKB | SEEN_A;
+ BUILD_BUG_ON(sizeof_field(struct sk_buff,
+ vlan_tci) != 2);
+ off = offsetof(struct sk_buff, vlan_tci);
+ emit_half_load_unsigned(r_A, r_skb, off, ctx);
+ break;
+ case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
+ ctx->flags |= SEEN_SKB | SEEN_A;
+ emit_load_byte(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET(), ctx);
+ if (PKT_VLAN_PRESENT_BIT)
+ emit_srl(r_A, r_A, PKT_VLAN_PRESENT_BIT, ctx);
+ if (PKT_VLAN_PRESENT_BIT < 7)
+ emit_andi(r_A, r_A, 1, ctx);
+ break;
+ case BPF_ANC | SKF_AD_PKTTYPE:
+ ctx->flags |= SEEN_SKB;
+
+ emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx);
+ /* Keep only the last 3 bits */
+ emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* Get the actual packet type to the lower 3 bits */
+ emit_srl(r_A, r_A, 5, ctx);
+#endif
+ break;
+ case BPF_ANC | SKF_AD_QUEUE:
+ ctx->flags |= SEEN_SKB | SEEN_A;
+ BUILD_BUG_ON(sizeof_field(struct sk_buff,
+ queue_mapping) != 2);
+ BUILD_BUG_ON(offsetof(struct sk_buff,
+ queue_mapping) > 0xff);
+ off = offsetof(struct sk_buff, queue_mapping);
+ emit_half_load_unsigned(r_A, r_skb, off, ctx);
+ break;
+ default:
+ pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
+ inst->code);
+ return -1;
+ }
+ }
+
+ /* compute offsets only during the first pass */
+ if (ctx->target == NULL)
+ ctx->offsets[i] = ctx->idx * 4;
+
+ return 0;
+}
+
+void bpf_jit_compile(struct bpf_prog *fp)
+{
+ struct jit_ctx ctx;
+ unsigned int alloc_size, tmp_idx;
+
+ if (!bpf_jit_enable)
+ return;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
+ if (ctx.offsets == NULL)
+ return;
+
+ ctx.skf = fp;
+
+ if (build_body(&ctx))
+ goto out;
+
+ tmp_idx = ctx.idx;
+ build_prologue(&ctx);
+ ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
+ /* just to complete the ctx.idx count */
+ build_epilogue(&ctx);
+
+ alloc_size = 4 * ctx.idx;
+ ctx.target = module_alloc(alloc_size);
+ if (ctx.target == NULL)
+ goto out;
+
+ /* Clean it */
+ memset(ctx.target, 0, alloc_size);
+
+ ctx.idx = 0;
+
+ /* Generate the actual JIT code */
+ build_prologue(&ctx);
+ if (build_body(&ctx)) {
+ module_memfree(ctx.target);
+ goto out;
+ }
+ build_epilogue(&ctx);
+
+ /* Update the icache */
+ flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx));
+
+ if (bpf_jit_enable > 1)
+ /* Dump JIT code */
+ bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
+
+ fp->bpf_func = (void *)ctx.target;
+ fp->jited = 1;
+
+out:
+ kfree(ctx.offsets);
+}
+
+void bpf_jit_free(struct bpf_prog *fp)
+{
+ if (fp->jited)
+ module_memfree(fp->bpf_func);
+
+ bpf_prog_unlock_free(fp);
+}
diff --git a/arch/mips/net/bpf_jit.h b/arch/mips/net/bpf_jit.h
new file mode 100644
index 000000000..166ca06c9
--- /dev/null
+++ b/arch/mips/net/bpf_jit.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Just-In-Time compiler for BPF filters on MIPS
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ */
+
+#ifndef BPF_JIT_MIPS_OP_H
+#define BPF_JIT_MIPS_OP_H
+
+/* Registers used by JIT */
+#define MIPS_R_ZERO 0
+#define MIPS_R_V0 2
+#define MIPS_R_A0 4
+#define MIPS_R_A1 5
+#define MIPS_R_T4 12
+#define MIPS_R_T5 13
+#define MIPS_R_T6 14
+#define MIPS_R_T7 15
+#define MIPS_R_S0 16
+#define MIPS_R_S1 17
+#define MIPS_R_S2 18
+#define MIPS_R_S3 19
+#define MIPS_R_S4 20
+#define MIPS_R_S5 21
+#define MIPS_R_S6 22
+#define MIPS_R_S7 23
+#define MIPS_R_SP 29
+#define MIPS_R_RA 31
+
+/* Conditional codes */
+#define MIPS_COND_EQ 0x1
+#define MIPS_COND_GE (0x1 << 1)
+#define MIPS_COND_GT (0x1 << 2)
+#define MIPS_COND_NE (0x1 << 3)
+#define MIPS_COND_ALL (0x1 << 4)
+/* Conditionals on X register or K immediate */
+#define MIPS_COND_X (0x1 << 5)
+#define MIPS_COND_K (0x1 << 6)
+
+#define r_ret MIPS_R_V0
+
+/*
+ * Use 2 scratch registers to avoid pipeline interlocks.
+ * There is no overhead during epilogue and prologue since
+ * any of the $s0-$s6 registers will only be preserved if
+ * they are going to actually be used.
+ */
+#define r_skb_hl MIPS_R_S0 /* skb header length */
+#define r_skb_data MIPS_R_S1 /* skb actual data */
+#define r_off MIPS_R_S2
+#define r_A MIPS_R_S3
+#define r_X MIPS_R_S4
+#define r_skb MIPS_R_S5
+#define r_M MIPS_R_S6
+#define r_skb_len MIPS_R_S7
+#define r_s0 MIPS_R_T4 /* scratch reg 1 */
+#define r_s1 MIPS_R_T5 /* scratch reg 2 */
+#define r_tmp_imm MIPS_R_T6 /* No need to preserve this */
+#define r_tmp MIPS_R_T7 /* No need to preserve this */
+#define r_zero MIPS_R_ZERO
+#define r_sp MIPS_R_SP
+#define r_ra MIPS_R_RA
+
+#ifndef __ASSEMBLY__
+
+/* Declare ASM helpers */
+
+#define DECLARE_LOAD_FUNC(func) \
+ extern u8 func(unsigned long *skb, int offset); \
+ extern u8 func##_negative(unsigned long *skb, int offset); \
+ extern u8 func##_positive(unsigned long *skb, int offset)
+
+DECLARE_LOAD_FUNC(sk_load_word);
+DECLARE_LOAD_FUNC(sk_load_half);
+DECLARE_LOAD_FUNC(sk_load_byte);
+
+#endif
+
+#endif /* BPF_JIT_MIPS_OP_H */
diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
new file mode 100644
index 000000000..57154c588
--- /dev/null
+++ b/arch/mips/net/bpf_jit_asm.S
@@ -0,0 +1,285 @@
+/*
+ * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
+ * compiler.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#include <asm/asm.h>
+#include <asm/isa-rev.h>
+#include <asm/regdef.h>
+#include "bpf_jit.h"
+
+/* ABI
+ *
+ * r_skb_hl skb header length
+ * r_skb_data skb data
+ * r_off(a1) offset register
+ * r_A BPF register A
+ * r_X PF register X
+ * r_skb(a0) *skb
+ * r_M *scratch memory
+ * r_skb_le skb length
+ * r_s0 Scratch register 0
+ * r_s1 Scratch register 1
+ *
+ * On entry:
+ * a0: *skb
+ * a1: offset (imm or imm + X)
+ *
+ * All non-BPF-ABI registers are free for use. On return, we only
+ * care about r_ret. The BPF-ABI registers are assumed to remain
+ * unmodified during the entire filter operation.
+ */
+
+#define skb a0
+#define offset a1
+#define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */
+
+ /* We know better :) so prevent assembler reordering etc */
+ .set noreorder
+
+#define is_offset_negative(TYPE) \
+ /* If offset is negative we have more work to do */ \
+ slti t0, offset, 0; \
+ bgtz t0, bpf_slow_path_##TYPE##_neg; \
+ /* Be careful what follows in DS. */
+
+#define is_offset_in_header(SIZE, TYPE) \
+ /* Reading from header? */ \
+ addiu $r_s0, $r_skb_hl, -SIZE; \
+ slt t0, $r_s0, offset; \
+ bgtz t0, bpf_slow_path_##TYPE; \
+
+LEAF(sk_load_word)
+ is_offset_negative(word)
+FEXPORT(sk_load_word_positive)
+ is_offset_in_header(4, word)
+ /* Offset within header boundaries */
+ PTR_ADDU t1, $r_skb_data, offset
+ .set reorder
+ lw $r_A, 0(t1)
+ .set noreorder
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if MIPS_ISA_REV >= 2
+ wsbh t0, $r_A
+ rotr $r_A, t0, 16
+# else
+ sll t0, $r_A, 24
+ srl t1, $r_A, 24
+ srl t2, $r_A, 8
+ or t0, t0, t1
+ andi t2, t2, 0xff00
+ andi t1, $r_A, 0xff00
+ or t0, t0, t2
+ sll t1, t1, 8
+ or $r_A, t0, t1
+# endif
+#endif
+ jr $r_ra
+ move $r_ret, zero
+ END(sk_load_word)
+
+LEAF(sk_load_half)
+ is_offset_negative(half)
+FEXPORT(sk_load_half_positive)
+ is_offset_in_header(2, half)
+ /* Offset within header boundaries */
+ PTR_ADDU t1, $r_skb_data, offset
+ lhu $r_A, 0(t1)
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if MIPS_ISA_REV >= 2
+ wsbh $r_A, $r_A
+# else
+ sll t0, $r_A, 8
+ srl t1, $r_A, 8
+ andi t0, t0, 0xff00
+ or $r_A, t0, t1
+# endif
+#endif
+ jr $r_ra
+ move $r_ret, zero
+ END(sk_load_half)
+
+LEAF(sk_load_byte)
+ is_offset_negative(byte)
+FEXPORT(sk_load_byte_positive)
+ is_offset_in_header(1, byte)
+ /* Offset within header boundaries */
+ PTR_ADDU t1, $r_skb_data, offset
+ lbu $r_A, 0(t1)
+ jr $r_ra
+ move $r_ret, zero
+ END(sk_load_byte)
+
+/*
+ * call skb_copy_bits:
+ * (prototype in linux/skbuff.h)
+ *
+ * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
+ *
+ * o32 mandates we leave 4 spaces for argument registers in case
+ * the callee needs to use them. Even though we don't care about
+ * the argument registers ourselves, we need to allocate that space
+ * to remain ABI compliant since the callee may want to use that space.
+ * We also allocate 2 more spaces for $r_ra and our return register (*to).
+ *
+ * n64 is a bit different. The *caller* will allocate the space to preserve
+ * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
+ * good reason but it does not matter that much really.
+ *
+ * (void *to) is returned in r_s0
+ *
+ */
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define DS_OFFSET(SIZE) (4 * SZREG)
+#else
+#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
+#endif
+#define bpf_slow_path_common(SIZE) \
+ /* Quick check. Are we within reasonable boundaries? */ \
+ LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
+ sltu $r_s0, offset, $r_s1; \
+ beqz $r_s0, fault; \
+ /* Load 4th argument in DS */ \
+ LONG_ADDIU a3, zero, SIZE; \
+ PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
+ PTR_LA t0, skb_copy_bits; \
+ PTR_S $r_ra, (5 * SZREG)($r_sp); \
+ /* Assign low slot to a2 */ \
+ PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \
+ jalr t0; \
+ /* Reset our destination slot (DS but it's ok) */ \
+ INT_S zero, (4 * SZREG)($r_sp); \
+ /* \
+ * skb_copy_bits returns 0 on success and -EFAULT \
+ * on error. Our data live in a2. Do not bother with \
+ * our data if an error has been returned. \
+ */ \
+ /* Restore our frame */ \
+ PTR_L $r_ra, (5 * SZREG)($r_sp); \
+ INT_L $r_s0, (4 * SZREG)($r_sp); \
+ bltz v0, fault; \
+ PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
+ move $r_ret, zero; \
+
+NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
+ bpf_slow_path_common(4)
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if MIPS_ISA_REV >= 2
+ wsbh t0, $r_s0
+ jr $r_ra
+ rotr $r_A, t0, 16
+# else
+ sll t0, $r_s0, 24
+ srl t1, $r_s0, 24
+ srl t2, $r_s0, 8
+ or t0, t0, t1
+ andi t2, t2, 0xff00
+ andi t1, $r_s0, 0xff00
+ or t0, t0, t2
+ sll t1, t1, 8
+ jr $r_ra
+ or $r_A, t0, t1
+# endif
+#else
+ jr $r_ra
+ move $r_A, $r_s0
+#endif
+
+ END(bpf_slow_path_word)
+
+NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
+ bpf_slow_path_common(2)
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if MIPS_ISA_REV >= 2
+ jr $r_ra
+ wsbh $r_A, $r_s0
+# else
+ sll t0, $r_s0, 8
+ andi t1, $r_s0, 0xff00
+ andi t0, t0, 0xff00
+ srl t1, t1, 8
+ jr $r_ra
+ or $r_A, t0, t1
+# endif
+#else
+ jr $r_ra
+ move $r_A, $r_s0
+#endif
+
+ END(bpf_slow_path_half)
+
+NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
+ bpf_slow_path_common(1)
+ jr $r_ra
+ move $r_A, $r_s0
+
+ END(bpf_slow_path_byte)
+
+/*
+ * Negative entry points
+ */
+ .macro bpf_is_end_of_data
+ li t0, SKF_LL_OFF
+ /* Reading link layer data? */
+ slt t1, offset, t0
+ bgtz t1, fault
+ /* Be careful what follows in DS. */
+ .endm
+/*
+ * call skb_copy_bits:
+ * (prototype in linux/filter.h)
+ *
+ * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
+ * int k, unsigned int size)
+ *
+ * see above (bpf_slow_path_common) for ABI restrictions
+ */
+#define bpf_negative_common(SIZE) \
+ PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
+ PTR_LA t0, bpf_internal_load_pointer_neg_helper; \
+ PTR_S $r_ra, (5 * SZREG)($r_sp); \
+ jalr t0; \
+ li a2, SIZE; \
+ PTR_L $r_ra, (5 * SZREG)($r_sp); \
+ /* Check return pointer */ \
+ beqz v0, fault; \
+ PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
+ /* Preserve our pointer */ \
+ move $r_s0, v0; \
+ /* Set return value */ \
+ move $r_ret, zero; \
+
+bpf_slow_path_word_neg:
+ bpf_is_end_of_data
+NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
+ bpf_negative_common(4)
+ jr $r_ra
+ lw $r_A, 0($r_s0)
+ END(sk_load_word_negative)
+
+bpf_slow_path_half_neg:
+ bpf_is_end_of_data
+NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
+ bpf_negative_common(2)
+ jr $r_ra
+ lhu $r_A, 0($r_s0)
+ END(sk_load_half_negative)
+
+bpf_slow_path_byte_neg:
+ bpf_is_end_of_data
+NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
+ bpf_negative_common(1)
+ jr $r_ra
+ lbu $r_A, 0($r_s0)
+ END(sk_load_byte_negative)
+
+fault:
+ jr $r_ra
+ addiu $r_ret, zero, 1
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
new file mode 100644
index 000000000..b31b91e57
--- /dev/null
+++ b/arch/mips/net/ebpf_jit.c
@@ -0,0 +1,1933 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Just-In-Time compiler for eBPF filters on MIPS
+ *
+ * Copyright (c) 2017 Cavium, Inc.
+ *
+ * Based on code from:
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
+#include <linux/slab.h>
+#include <asm/bitops.h>
+#include <asm/byteorder.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-features.h>
+#include <asm/isa-rev.h>
+#include <asm/uasm.h>
+
+/* Registers used by JIT */
+#define MIPS_R_ZERO 0
+#define MIPS_R_AT 1
+#define MIPS_R_V0 2 /* BPF_R0 */
+#define MIPS_R_V1 3
+#define MIPS_R_A0 4 /* BPF_R1 */
+#define MIPS_R_A1 5 /* BPF_R2 */
+#define MIPS_R_A2 6 /* BPF_R3 */
+#define MIPS_R_A3 7 /* BPF_R4 */
+#define MIPS_R_A4 8 /* BPF_R5 */
+#define MIPS_R_T4 12 /* BPF_AX */
+#define MIPS_R_T5 13
+#define MIPS_R_T6 14
+#define MIPS_R_T7 15
+#define MIPS_R_S0 16 /* BPF_R6 */
+#define MIPS_R_S1 17 /* BPF_R7 */
+#define MIPS_R_S2 18 /* BPF_R8 */
+#define MIPS_R_S3 19 /* BPF_R9 */
+#define MIPS_R_S4 20 /* BPF_TCC */
+#define MIPS_R_S5 21
+#define MIPS_R_S6 22
+#define MIPS_R_S7 23
+#define MIPS_R_T8 24
+#define MIPS_R_T9 25
+#define MIPS_R_SP 29
+#define MIPS_R_RA 31
+
+/* eBPF flags */
+#define EBPF_SAVE_S0 BIT(0)
+#define EBPF_SAVE_S1 BIT(1)
+#define EBPF_SAVE_S2 BIT(2)
+#define EBPF_SAVE_S3 BIT(3)
+#define EBPF_SAVE_S4 BIT(4)
+#define EBPF_SAVE_RA BIT(5)
+#define EBPF_SEEN_FP BIT(6)
+#define EBPF_SEEN_TC BIT(7)
+#define EBPF_TCC_IN_V1 BIT(8)
+
+/*
+ * For the mips64 ISA, we need to track the value range or type for
+ * each JIT register. The BPF machine requires zero extended 32-bit
+ * values, but the mips64 ISA requires sign extended 32-bit values.
+ * At each point in the BPF program we track the state of every
+ * register so that we can zero extend or sign extend as the BPF
+ * semantics require.
+ */
+enum reg_val_type {
+ /* uninitialized */
+ REG_UNKNOWN,
+ /* not known to be 32-bit compatible. */
+ REG_64BIT,
+ /* 32-bit compatible, no truncation needed for 64-bit ops. */
+ REG_64BIT_32BIT,
+ /* 32-bit compatible, need truncation for 64-bit ops. */
+ REG_32BIT,
+ /* 32-bit no sign/zero extension needed. */
+ REG_32BIT_POS
+};
+
+/*
+ * high bit of offsets indicates if long branch conversion done at
+ * this insn.
+ */
+#define OFFSETS_B_CONV BIT(31)
+
+/**
+ * struct jit_ctx - JIT context
+ * @skf: The sk_filter
+ * @stack_size: eBPF stack size
+ * @idx: Instruction index
+ * @flags: JIT flags
+ * @offsets: Instruction offsets
+ * @target: Memory location for the compiled filter
+ * @reg_val_types Packed enum reg_val_type for each register.
+ */
+struct jit_ctx {
+ const struct bpf_prog *skf;
+ int stack_size;
+ u32 idx;
+ u32 flags;
+ u32 *offsets;
+ u32 *target;
+ u64 *reg_val_types;
+ unsigned int long_b_conversion:1;
+ unsigned int gen_b_offsets:1;
+ unsigned int use_bbit_insns:1;
+};
+
+static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type)
+{
+ *rvt &= ~(7ull << (reg * 3));
+ *rvt |= ((u64)type << (reg * 3));
+}
+
+static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx,
+ int index, int reg)
+{
+ return (ctx->reg_val_types[index] >> (reg * 3)) & 7;
+}
+
+/* Simply emit the instruction if the JIT memory space has been allocated */
+#define emit_instr_long(ctx, func64, func32, ...) \
+do { \
+ if ((ctx)->target != NULL) { \
+ u32 *p = &(ctx)->target[ctx->idx]; \
+ if (IS_ENABLED(CONFIG_64BIT)) \
+ uasm_i_##func64(&p, ##__VA_ARGS__); \
+ else \
+ uasm_i_##func32(&p, ##__VA_ARGS__); \
+ } \
+ (ctx)->idx++; \
+} while (0)
+
+#define emit_instr(ctx, func, ...) \
+ emit_instr_long(ctx, func, func, ##__VA_ARGS__)
+
+static unsigned int j_target(struct jit_ctx *ctx, int target_idx)
+{
+ unsigned long target_va, base_va;
+ unsigned int r;
+
+ if (!ctx->target)
+ return 0;
+
+ base_va = (unsigned long)ctx->target;
+ target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV);
+
+ if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful))
+ return (unsigned int)-1;
+ r = target_va & 0x0ffffffful;
+ return r;
+}
+
+/* Compute the immediate value for PC-relative branches. */
+static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
+{
+ if (!ctx->gen_b_offsets)
+ return 0;
+
+ /*
+ * We want a pc-relative branch. tgt is the instruction offset
+ * we want to jump to.
+
+ * Branch on MIPS:
+ * I: target_offset <- sign_extend(offset)
+ * I+1: PC += target_offset (delay slot)
+ *
+ * ctx->idx currently points to the branch instruction
+ * but the offset is added to the delay slot so we need
+ * to subtract 4.
+ */
+ return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) -
+ (ctx->idx * 4) - 4;
+}
+
+enum which_ebpf_reg {
+ src_reg,
+ src_reg_no_fp,
+ dst_reg,
+ dst_reg_fp_ok
+};
+
+/*
+ * For eBPF, the register mapping naturally falls out of the
+ * requirements of eBPF and the MIPS n64 ABI. We don't maintain a
+ * separate frame pointer, so BPF_REG_10 relative accesses are
+ * adjusted to be $sp relative.
+ */
+static int ebpf_to_mips_reg(struct jit_ctx *ctx,
+ const struct bpf_insn *insn,
+ enum which_ebpf_reg w)
+{
+ int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
+ insn->src_reg : insn->dst_reg;
+
+ switch (ebpf_reg) {
+ case BPF_REG_0:
+ return MIPS_R_V0;
+ case BPF_REG_1:
+ return MIPS_R_A0;
+ case BPF_REG_2:
+ return MIPS_R_A1;
+ case BPF_REG_3:
+ return MIPS_R_A2;
+ case BPF_REG_4:
+ return MIPS_R_A3;
+ case BPF_REG_5:
+ return MIPS_R_A4;
+ case BPF_REG_6:
+ ctx->flags |= EBPF_SAVE_S0;
+ return MIPS_R_S0;
+ case BPF_REG_7:
+ ctx->flags |= EBPF_SAVE_S1;
+ return MIPS_R_S1;
+ case BPF_REG_8:
+ ctx->flags |= EBPF_SAVE_S2;
+ return MIPS_R_S2;
+ case BPF_REG_9:
+ ctx->flags |= EBPF_SAVE_S3;
+ return MIPS_R_S3;
+ case BPF_REG_10:
+ if (w == dst_reg || w == src_reg_no_fp)
+ goto bad_reg;
+ ctx->flags |= EBPF_SEEN_FP;
+ /*
+ * Needs special handling, return something that
+ * cannot be clobbered just in case.
+ */
+ return MIPS_R_ZERO;
+ case BPF_REG_AX:
+ return MIPS_R_T4;
+ default:
+bad_reg:
+ WARN(1, "Illegal bpf reg: %d\n", ebpf_reg);
+ return -EINVAL;
+ }
+}
+/*
+ * eBPF stack frame will be something like:
+ *
+ * Entry $sp ------> +--------------------------------+
+ * | $ra (optional) |
+ * +--------------------------------+
+ * | $s0 (optional) |
+ * +--------------------------------+
+ * | $s1 (optional) |
+ * +--------------------------------+
+ * | $s2 (optional) |
+ * +--------------------------------+
+ * | $s3 (optional) |
+ * +--------------------------------+
+ * | $s4 (optional) |
+ * +--------------------------------+
+ * | tmp-storage (if $ra saved) |
+ * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
+ * | BPF_REG_10 relative storage |
+ * | MAX_BPF_STACK (optional) |
+ * | . |
+ * | . |
+ * | . |
+ * $sp --------> +--------------------------------+
+ *
+ * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
+ * area is not allocated.
+ */
+static int gen_int_prologue(struct jit_ctx *ctx)
+{
+ int stack_adjust = 0;
+ int store_offset;
+ int locals_size;
+
+ if (ctx->flags & EBPF_SAVE_RA)
+ /*
+ * If RA we are doing a function call and may need
+ * extra 8-byte tmp area.
+ */
+ stack_adjust += 2 * sizeof(long);
+ if (ctx->flags & EBPF_SAVE_S0)
+ stack_adjust += sizeof(long);
+ if (ctx->flags & EBPF_SAVE_S1)
+ stack_adjust += sizeof(long);
+ if (ctx->flags & EBPF_SAVE_S2)
+ stack_adjust += sizeof(long);
+ if (ctx->flags & EBPF_SAVE_S3)
+ stack_adjust += sizeof(long);
+ if (ctx->flags & EBPF_SAVE_S4)
+ stack_adjust += sizeof(long);
+
+ BUILD_BUG_ON(MAX_BPF_STACK & 7);
+ locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0;
+
+ stack_adjust += locals_size;
+
+ ctx->stack_size = stack_adjust;
+
+ /*
+ * First instruction initializes the tail call count (TCC).
+ * On tail call we skip this instruction, and the TCC is
+ * passed in $v1 from the caller.
+ */
+ emit_instr(ctx, addiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
+ if (stack_adjust)
+ emit_instr_long(ctx, daddiu, addiu,
+ MIPS_R_SP, MIPS_R_SP, -stack_adjust);
+ else
+ return 0;
+
+ store_offset = stack_adjust - sizeof(long);
+
+ if (ctx->flags & EBPF_SAVE_RA) {
+ emit_instr_long(ctx, sd, sw,
+ MIPS_R_RA, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
+ }
+ if (ctx->flags & EBPF_SAVE_S0) {
+ emit_instr_long(ctx, sd, sw,
+ MIPS_R_S0, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
+ }
+ if (ctx->flags & EBPF_SAVE_S1) {
+ emit_instr_long(ctx, sd, sw,
+ MIPS_R_S1, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
+ }
+ if (ctx->flags & EBPF_SAVE_S2) {
+ emit_instr_long(ctx, sd, sw,
+ MIPS_R_S2, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
+ }
+ if (ctx->flags & EBPF_SAVE_S3) {
+ emit_instr_long(ctx, sd, sw,
+ MIPS_R_S3, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
+ }
+ if (ctx->flags & EBPF_SAVE_S4) {
+ emit_instr_long(ctx, sd, sw,
+ MIPS_R_S4, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
+ }
+
+ if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1))
+ emit_instr_long(ctx, daddu, addu,
+ MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
+
+ return 0;
+}
+
+static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ int stack_adjust = ctx->stack_size;
+ int store_offset = stack_adjust - sizeof(long);
+ enum reg_val_type td;
+ int r0 = MIPS_R_V0;
+
+ if (dest_reg == MIPS_R_RA) {
+ /* Don't let zero extended value escape. */
+ td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
+ if (td == REG_64BIT)
+ emit_instr(ctx, sll, r0, r0, 0);
+ }
+
+ if (ctx->flags & EBPF_SAVE_RA) {
+ emit_instr_long(ctx, ld, lw,
+ MIPS_R_RA, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
+ }
+ if (ctx->flags & EBPF_SAVE_S0) {
+ emit_instr_long(ctx, ld, lw,
+ MIPS_R_S0, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
+ }
+ if (ctx->flags & EBPF_SAVE_S1) {
+ emit_instr_long(ctx, ld, lw,
+ MIPS_R_S1, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
+ }
+ if (ctx->flags & EBPF_SAVE_S2) {
+ emit_instr_long(ctx, ld, lw,
+ MIPS_R_S2, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
+ }
+ if (ctx->flags & EBPF_SAVE_S3) {
+ emit_instr_long(ctx, ld, lw,
+ MIPS_R_S3, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
+ }
+ if (ctx->flags & EBPF_SAVE_S4) {
+ emit_instr_long(ctx, ld, lw,
+ MIPS_R_S4, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
+ }
+ emit_instr(ctx, jr, dest_reg);
+
+ if (stack_adjust)
+ emit_instr_long(ctx, daddiu, addiu,
+ MIPS_R_SP, MIPS_R_SP, stack_adjust);
+ else
+ emit_instr(ctx, nop);
+
+ return 0;
+}
+
+static void gen_imm_to_reg(const struct bpf_insn *insn, int reg,
+ struct jit_ctx *ctx)
+{
+ if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
+ emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm);
+ } else {
+ int lower = (s16)(insn->imm & 0xffff);
+ int upper = insn->imm - lower;
+
+ emit_instr(ctx, lui, reg, upper >> 16);
+ emit_instr(ctx, addiu, reg, reg, lower);
+ }
+}
+
+static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+ int idx)
+{
+ int upper_bound, lower_bound;
+ int dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+
+ if (dst < 0)
+ return dst;
+
+ switch (BPF_OP(insn->code)) {
+ case BPF_MOV:
+ case BPF_ADD:
+ upper_bound = S16_MAX;
+ lower_bound = S16_MIN;
+ break;
+ case BPF_SUB:
+ upper_bound = -(int)S16_MIN;
+ lower_bound = -(int)S16_MAX;
+ break;
+ case BPF_AND:
+ case BPF_OR:
+ case BPF_XOR:
+ upper_bound = 0xffff;
+ lower_bound = 0;
+ break;
+ case BPF_RSH:
+ case BPF_LSH:
+ case BPF_ARSH:
+ /* Shift amounts are truncated, no need for bounds */
+ upper_bound = S32_MAX;
+ lower_bound = S32_MIN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Immediate move clobbers the register, so no sign/zero
+ * extension needed.
+ */
+ if (BPF_CLASS(insn->code) == BPF_ALU64 &&
+ BPF_OP(insn->code) != BPF_MOV &&
+ get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ /* BPF_ALU | BPF_LSH doesn't need separate sign extension */
+ if (BPF_CLASS(insn->code) == BPF_ALU &&
+ BPF_OP(insn->code) != BPF_LSH &&
+ BPF_OP(insn->code) != BPF_MOV &&
+ get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT)
+ emit_instr(ctx, sll, dst, dst, 0);
+
+ if (insn->imm >= lower_bound && insn->imm <= upper_bound) {
+ /* single insn immediate case */
+ switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
+ case BPF_ALU64 | BPF_MOV:
+ emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_AND:
+ case BPF_ALU | BPF_AND:
+ emit_instr(ctx, andi, dst, dst, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_OR:
+ case BPF_ALU | BPF_OR:
+ emit_instr(ctx, ori, dst, dst, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_XOR:
+ case BPF_ALU | BPF_XOR:
+ emit_instr(ctx, xori, dst, dst, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_ADD:
+ emit_instr(ctx, daddiu, dst, dst, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_SUB:
+ emit_instr(ctx, daddiu, dst, dst, -insn->imm);
+ break;
+ case BPF_ALU64 | BPF_RSH:
+ emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f);
+ break;
+ case BPF_ALU | BPF_RSH:
+ emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f);
+ break;
+ case BPF_ALU64 | BPF_LSH:
+ emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f);
+ break;
+ case BPF_ALU | BPF_LSH:
+ emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f);
+ break;
+ case BPF_ALU64 | BPF_ARSH:
+ emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f);
+ break;
+ case BPF_ALU | BPF_ARSH:
+ emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f);
+ break;
+ case BPF_ALU | BPF_MOV:
+ emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm);
+ break;
+ case BPF_ALU | BPF_ADD:
+ emit_instr(ctx, addiu, dst, dst, insn->imm);
+ break;
+ case BPF_ALU | BPF_SUB:
+ emit_instr(ctx, addiu, dst, dst, -insn->imm);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ /* multi insn immediate case */
+ if (BPF_OP(insn->code) == BPF_MOV) {
+ gen_imm_to_reg(insn, dst, ctx);
+ } else {
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
+ case BPF_ALU64 | BPF_AND:
+ case BPF_ALU | BPF_AND:
+ emit_instr(ctx, and, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU64 | BPF_OR:
+ case BPF_ALU | BPF_OR:
+ emit_instr(ctx, or, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU64 | BPF_XOR:
+ case BPF_ALU | BPF_XOR:
+ emit_instr(ctx, xor, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU64 | BPF_ADD:
+ emit_instr(ctx, daddu, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU64 | BPF_SUB:
+ emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU | BPF_ADD:
+ emit_instr(ctx, addu, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU | BPF_SUB:
+ emit_instr(ctx, subu, dst, dst, MIPS_R_AT);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
+{
+ if (value >= 0xffffffffffff8000ull || value < 0x8000ull) {
+ emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value);
+ } else if (value >= 0xffffffff80000000ull ||
+ (value < 0x80000000 && value > 0xffff)) {
+ emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16));
+ emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff));
+ } else {
+ int i;
+ bool seen_part = false;
+ int needed_shift = 0;
+
+ for (i = 0; i < 4; i++) {
+ u64 part = (value >> (16 * (3 - i))) & 0xffff;
+
+ if (seen_part && needed_shift > 0 && (part || i == 3)) {
+ emit_instr(ctx, dsll_safe, dst, dst, needed_shift);
+ needed_shift = 0;
+ }
+ if (part) {
+ if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) {
+ emit_instr(ctx, lui, dst, (s32)(s16)part);
+ needed_shift = -16;
+ } else {
+ emit_instr(ctx, ori, dst,
+ seen_part ? dst : MIPS_R_ZERO,
+ (unsigned int)part);
+ }
+ seen_part = true;
+ }
+ if (seen_part)
+ needed_shift += 16;
+ }
+ }
+}
+
+static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
+{
+ int off, b_off;
+ int tcc_reg;
+
+ ctx->flags |= EBPF_SEEN_TC;
+ /*
+ * if (index >= array->map.max_entries)
+ * goto out;
+ */
+ off = offsetof(struct bpf_array, map.max_entries);
+ emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1);
+ emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2);
+ b_off = b_imm(this_idx + 1, ctx);
+ emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
+ /*
+ * if (TCC-- < 0)
+ * goto out;
+ */
+ /* Delay slot */
+ tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4;
+ emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1);
+ b_off = b_imm(this_idx + 1, ctx);
+ emit_instr(ctx, bltz, tcc_reg, b_off);
+ /*
+ * prog = array->ptrs[index];
+ * if (prog == NULL)
+ * goto out;
+ */
+ /* Delay slot */
+ emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3);
+ emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1);
+ off = offsetof(struct bpf_array, ptrs);
+ emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8);
+ b_off = b_imm(this_idx + 1, ctx);
+ emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off);
+ /* Delay slot */
+ emit_instr(ctx, nop);
+
+ /* goto *(prog->bpf_func + 4); */
+ off = offsetof(struct bpf_prog, bpf_func);
+ emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT);
+ /* All systems are go... propagate TCC */
+ emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO);
+ /* Skip first instruction (TCC initialization) */
+ emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4);
+ return build_int_epilogue(ctx, MIPS_R_T9);
+}
+
+static bool is_bad_offset(int b_off)
+{
+ return b_off > 0x1ffff || b_off < -0x20000;
+}
+
+/* Returns the number of insn slots consumed. */
+static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+ int this_idx, int exit_idx)
+{
+ int src, dst, r, td, ts, mem_off, b_off;
+ bool need_swap, did_move, cmp_eq;
+ unsigned int target = 0;
+ u64 t64;
+ s64 t64s;
+ int bpf_op = BPF_OP(insn->code);
+
+ if (IS_ENABLED(CONFIG_32BIT) && ((BPF_CLASS(insn->code) == BPF_ALU64)
+ || (bpf_op == BPF_DW)))
+ return -EINVAL;
+
+ switch (insn->code) {
+ case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */
+ case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */
+ case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */
+ case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */
+ r = gen_imm_insn(insn, ctx, this_idx);
+ if (r < 0)
+ return r;
+ break;
+ case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ if (insn->imm == 1) /* Mult by 1 is a nop */
+ break;
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ if (MIPS_ISA_REV >= 6) {
+ emit_instr(ctx, dmulu, dst, dst, MIPS_R_AT);
+ } else {
+ emit_instr(ctx, dmultu, MIPS_R_AT, dst);
+ emit_instr(ctx, mflo, dst);
+ }
+ break;
+ case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (td == REG_64BIT) {
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ }
+ if (insn->imm == 1) /* Mult by 1 is a nop */
+ break;
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ if (MIPS_ISA_REV >= 6) {
+ emit_instr(ctx, mulu, dst, dst, MIPS_R_AT);
+ } else {
+ emit_instr(ctx, multu, dst, MIPS_R_AT);
+ emit_instr(ctx, mflo, dst);
+ }
+ break;
+ case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (td == REG_64BIT) {
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ }
+ emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst);
+ break;
+ case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */
+ case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */
+ if (insn->imm == 0)
+ return -EINVAL;
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (td == REG_64BIT)
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ if (insn->imm == 1) {
+ /* div by 1 is a nop, mod by 1 is zero */
+ if (bpf_op == BPF_MOD)
+ emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
+ break;
+ }
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ if (MIPS_ISA_REV >= 6) {
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, divu_r6, dst, dst, MIPS_R_AT);
+ else
+ emit_instr(ctx, modu, dst, dst, MIPS_R_AT);
+ break;
+ }
+ emit_instr(ctx, divu, dst, MIPS_R_AT);
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, mflo, dst);
+ else
+ emit_instr(ctx, mfhi, dst);
+ break;
+ case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */
+ case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */
+ if (insn->imm == 0)
+ return -EINVAL;
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ if (insn->imm == 1) {
+ /* div by 1 is a nop, mod by 1 is zero */
+ if (bpf_op == BPF_MOD)
+ emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
+ break;
+ }
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ if (MIPS_ISA_REV >= 6) {
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, ddivu_r6, dst, dst, MIPS_R_AT);
+ else
+ emit_instr(ctx, modu, dst, dst, MIPS_R_AT);
+ break;
+ }
+ emit_instr(ctx, ddivu, dst, MIPS_R_AT);
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, mflo, dst);
+ else
+ emit_instr(ctx, mfhi, dst);
+ break;
+ case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */
+ src = ebpf_to_mips_reg(ctx, insn, src_reg);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (src < 0 || dst < 0)
+ return -EINVAL;
+ if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ did_move = false;
+ if (insn->src_reg == BPF_REG_10) {
+ if (bpf_op == BPF_MOV) {
+ emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK);
+ did_move = true;
+ } else {
+ emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK);
+ src = MIPS_R_AT;
+ }
+ } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+ int tmp_reg = MIPS_R_AT;
+
+ if (bpf_op == BPF_MOV) {
+ tmp_reg = dst;
+ did_move = true;
+ }
+ emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO);
+ emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32);
+ src = MIPS_R_AT;
+ }
+ switch (bpf_op) {
+ case BPF_MOV:
+ if (!did_move)
+ emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO);
+ break;
+ case BPF_ADD:
+ emit_instr(ctx, daddu, dst, dst, src);
+ break;
+ case BPF_SUB:
+ emit_instr(ctx, dsubu, dst, dst, src);
+ break;
+ case BPF_XOR:
+ emit_instr(ctx, xor, dst, dst, src);
+ break;
+ case BPF_OR:
+ emit_instr(ctx, or, dst, dst, src);
+ break;
+ case BPF_AND:
+ emit_instr(ctx, and, dst, dst, src);
+ break;
+ case BPF_MUL:
+ if (MIPS_ISA_REV >= 6) {
+ emit_instr(ctx, dmulu, dst, dst, src);
+ } else {
+ emit_instr(ctx, dmultu, dst, src);
+ emit_instr(ctx, mflo, dst);
+ }
+ break;
+ case BPF_DIV:
+ case BPF_MOD:
+ if (MIPS_ISA_REV >= 6) {
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, ddivu_r6,
+ dst, dst, src);
+ else
+ emit_instr(ctx, modu, dst, dst, src);
+ break;
+ }
+ emit_instr(ctx, ddivu, dst, src);
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, mflo, dst);
+ else
+ emit_instr(ctx, mfhi, dst);
+ break;
+ case BPF_LSH:
+ emit_instr(ctx, dsllv, dst, dst, src);
+ break;
+ case BPF_RSH:
+ emit_instr(ctx, dsrlv, dst, dst, src);
+ break;
+ case BPF_ARSH:
+ emit_instr(ctx, dsrav, dst, dst, src);
+ break;
+ default:
+ pr_err("ALU64_REG NOT HANDLED\n");
+ return -EINVAL;
+ }
+ break;
+ case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_ARSH | BPF_X: /* ALU_REG */
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (src < 0 || dst < 0)
+ return -EINVAL;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (td == REG_64BIT) {
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ }
+ did_move = false;
+ ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
+ if (ts == REG_64BIT) {
+ int tmp_reg = MIPS_R_AT;
+
+ if (bpf_op == BPF_MOV) {
+ tmp_reg = dst;
+ did_move = true;
+ }
+ /* sign extend */
+ emit_instr(ctx, sll, tmp_reg, src, 0);
+ src = MIPS_R_AT;
+ }
+ switch (bpf_op) {
+ case BPF_MOV:
+ if (!did_move)
+ emit_instr(ctx, addu, dst, src, MIPS_R_ZERO);
+ break;
+ case BPF_ADD:
+ emit_instr(ctx, addu, dst, dst, src);
+ break;
+ case BPF_SUB:
+ emit_instr(ctx, subu, dst, dst, src);
+ break;
+ case BPF_XOR:
+ emit_instr(ctx, xor, dst, dst, src);
+ break;
+ case BPF_OR:
+ emit_instr(ctx, or, dst, dst, src);
+ break;
+ case BPF_AND:
+ emit_instr(ctx, and, dst, dst, src);
+ break;
+ case BPF_MUL:
+ emit_instr(ctx, mul, dst, dst, src);
+ break;
+ case BPF_DIV:
+ case BPF_MOD:
+ if (MIPS_ISA_REV >= 6) {
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, divu_r6, dst, dst, src);
+ else
+ emit_instr(ctx, modu, dst, dst, src);
+ break;
+ }
+ emit_instr(ctx, divu, dst, src);
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, mflo, dst);
+ else
+ emit_instr(ctx, mfhi, dst);
+ break;
+ case BPF_LSH:
+ emit_instr(ctx, sllv, dst, dst, src);
+ break;
+ case BPF_RSH:
+ emit_instr(ctx, srlv, dst, dst, src);
+ break;
+ case BPF_ARSH:
+ emit_instr(ctx, srav, dst, dst, src);
+ break;
+ default:
+ pr_err("ALU_REG NOT HANDLED\n");
+ return -EINVAL;
+ }
+ break;
+ case BPF_JMP | BPF_EXIT:
+ if (this_idx + 1 < exit_idx) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
+ emit_instr(ctx, nop);
+ }
+ break;
+ case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */
+ case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */
+ cmp_eq = (bpf_op == BPF_JEQ);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+ if (dst < 0)
+ return dst;
+ if (insn->imm == 0) {
+ src = MIPS_R_ZERO;
+ } else {
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ src = MIPS_R_AT;
+ }
+ goto jeq_common;
+ case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */
+ case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_X:
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JSET | BPF_X:
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (src < 0 || dst < 0)
+ return -EINVAL;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
+ if (td == REG_32BIT && ts != REG_32BIT) {
+ emit_instr(ctx, sll, MIPS_R_AT, src, 0);
+ src = MIPS_R_AT;
+ } else if (ts == REG_32BIT && td != REG_32BIT) {
+ emit_instr(ctx, sll, MIPS_R_AT, dst, 0);
+ dst = MIPS_R_AT;
+ }
+ if (bpf_op == BPF_JSET) {
+ emit_instr(ctx, and, MIPS_R_AT, dst, src);
+ cmp_eq = false;
+ dst = MIPS_R_AT;
+ src = MIPS_R_ZERO;
+ } else if (bpf_op == BPF_JSGT || bpf_op == BPF_JSLE) {
+ emit_instr(ctx, dsubu, MIPS_R_AT, dst, src);
+ if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ if (bpf_op == BPF_JSGT)
+ emit_instr(ctx, blez, MIPS_R_AT, b_off);
+ else
+ emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
+ emit_instr(ctx, nop);
+ return 2; /* We consumed the exit. */
+ }
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ if (bpf_op == BPF_JSGT)
+ emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
+ else
+ emit_instr(ctx, blez, MIPS_R_AT, b_off);
+ emit_instr(ctx, nop);
+ break;
+ } else if (bpf_op == BPF_JSGE || bpf_op == BPF_JSLT) {
+ emit_instr(ctx, slt, MIPS_R_AT, dst, src);
+ cmp_eq = bpf_op == BPF_JSGE;
+ dst = MIPS_R_AT;
+ src = MIPS_R_ZERO;
+ } else if (bpf_op == BPF_JGT || bpf_op == BPF_JLE) {
+ /* dst or src could be AT */
+ emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
+ emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
+ /* SP known to be non-zero, movz becomes boolean not */
+ if (MIPS_ISA_REV >= 6) {
+ emit_instr(ctx, seleqz, MIPS_R_T9,
+ MIPS_R_SP, MIPS_R_T8);
+ } else {
+ emit_instr(ctx, movz, MIPS_R_T9,
+ MIPS_R_SP, MIPS_R_T8);
+ emit_instr(ctx, movn, MIPS_R_T9,
+ MIPS_R_ZERO, MIPS_R_T8);
+ }
+ emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
+ cmp_eq = bpf_op == BPF_JGT;
+ dst = MIPS_R_AT;
+ src = MIPS_R_ZERO;
+ } else if (bpf_op == BPF_JGE || bpf_op == BPF_JLT) {
+ emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
+ cmp_eq = bpf_op == BPF_JGE;
+ dst = MIPS_R_AT;
+ src = MIPS_R_ZERO;
+ } else { /* JNE/JEQ case */
+ cmp_eq = (bpf_op == BPF_JEQ);
+ }
+jeq_common:
+ /*
+ * If the next insn is EXIT and we are jumping arround
+ * only it, invert the sense of the compare and
+ * conditionally jump to the exit. Poor man's branch
+ * chaining.
+ */
+ if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off)) {
+ target = j_target(ctx, exit_idx);
+ if (target == (unsigned int)-1)
+ return -E2BIG;
+ cmp_eq = !cmp_eq;
+ b_off = 4 * 3;
+ if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
+ ctx->offsets[this_idx] |= OFFSETS_B_CONV;
+ ctx->long_b_conversion = 1;
+ }
+ }
+
+ if (cmp_eq)
+ emit_instr(ctx, bne, dst, src, b_off);
+ else
+ emit_instr(ctx, beq, dst, src, b_off);
+ emit_instr(ctx, nop);
+ if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
+ emit_instr(ctx, j, target);
+ emit_instr(ctx, nop);
+ }
+ return 2; /* We consumed the exit. */
+ }
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off)) {
+ target = j_target(ctx, this_idx + insn->off + 1);
+ if (target == (unsigned int)-1)
+ return -E2BIG;
+ cmp_eq = !cmp_eq;
+ b_off = 4 * 3;
+ if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
+ ctx->offsets[this_idx] |= OFFSETS_B_CONV;
+ ctx->long_b_conversion = 1;
+ }
+ }
+
+ if (cmp_eq)
+ emit_instr(ctx, beq, dst, src, b_off);
+ else
+ emit_instr(ctx, bne, dst, src, b_off);
+ emit_instr(ctx, nop);
+ if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
+ emit_instr(ctx, j, target);
+ emit_instr(ctx, nop);
+ }
+ break;
+ case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */
+ case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */
+ case BPF_JMP | BPF_JSLT | BPF_K: /* JMP_IMM */
+ case BPF_JMP | BPF_JSLE | BPF_K: /* JMP_IMM */
+ cmp_eq = (bpf_op == BPF_JSGE);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+ if (dst < 0)
+ return dst;
+
+ if (insn->imm == 0) {
+ if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ switch (bpf_op) {
+ case BPF_JSGT:
+ emit_instr(ctx, blez, dst, b_off);
+ break;
+ case BPF_JSGE:
+ emit_instr(ctx, bltz, dst, b_off);
+ break;
+ case BPF_JSLT:
+ emit_instr(ctx, bgez, dst, b_off);
+ break;
+ case BPF_JSLE:
+ emit_instr(ctx, bgtz, dst, b_off);
+ break;
+ }
+ emit_instr(ctx, nop);
+ return 2; /* We consumed the exit. */
+ }
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ switch (bpf_op) {
+ case BPF_JSGT:
+ emit_instr(ctx, bgtz, dst, b_off);
+ break;
+ case BPF_JSGE:
+ emit_instr(ctx, bgez, dst, b_off);
+ break;
+ case BPF_JSLT:
+ emit_instr(ctx, bltz, dst, b_off);
+ break;
+ case BPF_JSLE:
+ emit_instr(ctx, blez, dst, b_off);
+ break;
+ }
+ emit_instr(ctx, nop);
+ break;
+ }
+ /*
+ * only "LT" compare available, so we must use imm + 1
+ * to generate "GT" and imm -1 to generate LE
+ */
+ if (bpf_op == BPF_JSGT)
+ t64s = insn->imm + 1;
+ else if (bpf_op == BPF_JSLE)
+ t64s = insn->imm + 1;
+ else
+ t64s = insn->imm;
+
+ cmp_eq = bpf_op == BPF_JSGT || bpf_op == BPF_JSGE;
+ if (t64s >= S16_MIN && t64s <= S16_MAX) {
+ emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s);
+ src = MIPS_R_AT;
+ dst = MIPS_R_ZERO;
+ goto jeq_common;
+ }
+ emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
+ emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT);
+ src = MIPS_R_AT;
+ dst = MIPS_R_ZERO;
+ goto jeq_common;
+
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_K:
+ cmp_eq = (bpf_op == BPF_JGE);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+ if (dst < 0)
+ return dst;
+ /*
+ * only "LT" compare available, so we must use imm + 1
+ * to generate "GT" and imm -1 to generate LE
+ */
+ if (bpf_op == BPF_JGT)
+ t64s = (u64)(u32)(insn->imm) + 1;
+ else if (bpf_op == BPF_JLE)
+ t64s = (u64)(u32)(insn->imm) + 1;
+ else
+ t64s = (u64)(u32)(insn->imm);
+
+ cmp_eq = bpf_op == BPF_JGT || bpf_op == BPF_JGE;
+
+ emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
+ emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT);
+ src = MIPS_R_AT;
+ dst = MIPS_R_ZERO;
+ goto jeq_common;
+
+ case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+ if (dst < 0)
+ return dst;
+
+ if (ctx->use_bbit_insns && hweight32((u32)insn->imm) == 1) {
+ if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off);
+ emit_instr(ctx, nop);
+ return 2; /* We consumed the exit. */
+ }
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off);
+ emit_instr(ctx, nop);
+ break;
+ }
+ t64 = (u32)insn->imm;
+ emit_const_to_reg(ctx, MIPS_R_AT, t64);
+ emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT);
+ src = MIPS_R_AT;
+ dst = MIPS_R_ZERO;
+ cmp_eq = false;
+ goto jeq_common;
+
+ case BPF_JMP | BPF_JA:
+ /*
+ * Prefer relative branch for easier debugging, but
+ * fall back if needed.
+ */
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off)) {
+ target = j_target(ctx, this_idx + insn->off + 1);
+ if (target == (unsigned int)-1)
+ return -E2BIG;
+ emit_instr(ctx, j, target);
+ } else {
+ emit_instr(ctx, b, b_off);
+ }
+ emit_instr(ctx, nop);
+ break;
+ case BPF_LD | BPF_DW | BPF_IMM:
+ if (insn->src_reg != 0)
+ return -EINVAL;
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32);
+ emit_const_to_reg(ctx, dst, t64);
+ return 2; /* Double slot insn */
+
+ case BPF_JMP | BPF_CALL:
+ ctx->flags |= EBPF_SAVE_RA;
+ t64s = (s64)insn->imm + (long)__bpf_call_base;
+ emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s);
+ emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
+ /* delay slot */
+ emit_instr(ctx, nop);
+ break;
+
+ case BPF_JMP | BPF_TAIL_CALL:
+ if (emit_bpf_tail_call(ctx, this_idx))
+ return -EINVAL;
+ break;
+
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (insn->imm == 64 && td == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+
+ if (insn->imm != 64 && td == REG_64BIT) {
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ }
+
+#ifdef __BIG_ENDIAN
+ need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE);
+#else
+ need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE);
+#endif
+ if (insn->imm == 16) {
+ if (need_swap)
+ emit_instr(ctx, wsbh, dst, dst);
+ emit_instr(ctx, andi, dst, dst, 0xffff);
+ } else if (insn->imm == 32) {
+ if (need_swap) {
+ emit_instr(ctx, wsbh, dst, dst);
+ emit_instr(ctx, rotr, dst, dst, 16);
+ }
+ } else { /* 64-bit*/
+ if (need_swap) {
+ emit_instr(ctx, dsbh, dst, dst);
+ emit_instr(ctx, dshd, dst, dst);
+ }
+ }
+ break;
+
+ case BPF_ST | BPF_NOSPEC: /* speculation barrier */
+ break;
+
+ case BPF_ST | BPF_B | BPF_MEM:
+ case BPF_ST | BPF_H | BPF_MEM:
+ case BPF_ST | BPF_W | BPF_MEM:
+ case BPF_ST | BPF_DW | BPF_MEM:
+ if (insn->dst_reg == BPF_REG_10) {
+ ctx->flags |= EBPF_SEEN_FP;
+ dst = MIPS_R_SP;
+ mem_off = insn->off + MAX_BPF_STACK;
+ } else {
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ mem_off = insn->off;
+ }
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst);
+ break;
+ case BPF_H:
+ emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst);
+ break;
+ case BPF_W:
+ emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst);
+ break;
+ case BPF_DW:
+ emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst);
+ break;
+ }
+ break;
+
+ case BPF_LDX | BPF_B | BPF_MEM:
+ case BPF_LDX | BPF_H | BPF_MEM:
+ case BPF_LDX | BPF_W | BPF_MEM:
+ case BPF_LDX | BPF_DW | BPF_MEM:
+ if (insn->src_reg == BPF_REG_10) {
+ ctx->flags |= EBPF_SEEN_FP;
+ src = MIPS_R_SP;
+ mem_off = insn->off + MAX_BPF_STACK;
+ } else {
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ if (src < 0)
+ return src;
+ mem_off = insn->off;
+ }
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ emit_instr(ctx, lbu, dst, mem_off, src);
+ break;
+ case BPF_H:
+ emit_instr(ctx, lhu, dst, mem_off, src);
+ break;
+ case BPF_W:
+ emit_instr(ctx, lw, dst, mem_off, src);
+ break;
+ case BPF_DW:
+ emit_instr(ctx, ld, dst, mem_off, src);
+ break;
+ }
+ break;
+
+ case BPF_STX | BPF_B | BPF_MEM:
+ case BPF_STX | BPF_H | BPF_MEM:
+ case BPF_STX | BPF_W | BPF_MEM:
+ case BPF_STX | BPF_DW | BPF_MEM:
+ case BPF_STX | BPF_W | BPF_XADD:
+ case BPF_STX | BPF_DW | BPF_XADD:
+ if (insn->dst_reg == BPF_REG_10) {
+ ctx->flags |= EBPF_SEEN_FP;
+ dst = MIPS_R_SP;
+ mem_off = insn->off + MAX_BPF_STACK;
+ } else {
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ mem_off = insn->off;
+ }
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ if (src < 0)
+ return src;
+ if (BPF_MODE(insn->code) == BPF_XADD) {
+ /*
+ * If mem_off does not fit within the 9 bit ll/sc
+ * instruction immediate field, use a temp reg.
+ */
+ if (MIPS_ISA_REV >= 6 &&
+ (mem_off >= BIT(8) || mem_off < -BIT(8))) {
+ emit_instr(ctx, daddiu, MIPS_R_T6,
+ dst, mem_off);
+ mem_off = 0;
+ dst = MIPS_R_T6;
+ }
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_W:
+ if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+ emit_instr(ctx, sll, MIPS_R_AT, src, 0);
+ src = MIPS_R_AT;
+ }
+ emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst);
+ emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src);
+ emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst);
+ /*
+ * On failure back up to LL (-4
+ * instructions of 4 bytes each
+ */
+ emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
+ emit_instr(ctx, nop);
+ break;
+ case BPF_DW:
+ if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+ emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
+ emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
+ src = MIPS_R_AT;
+ }
+ emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst);
+ emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src);
+ emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst);
+ emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
+ emit_instr(ctx, nop);
+ break;
+ }
+ } else { /* BPF_MEM */
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ emit_instr(ctx, sb, src, mem_off, dst);
+ break;
+ case BPF_H:
+ emit_instr(ctx, sh, src, mem_off, dst);
+ break;
+ case BPF_W:
+ emit_instr(ctx, sw, src, mem_off, dst);
+ break;
+ case BPF_DW:
+ if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+ emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
+ emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
+ src = MIPS_R_AT;
+ }
+ emit_instr(ctx, sd, src, mem_off, dst);
+ break;
+ }
+ }
+ break;
+
+ default:
+ pr_err("NOT HANDLED %d - (%02x)\n",
+ this_idx, (unsigned int)insn->code);
+ return -EINVAL;
+ }
+ return 1;
+}
+
+#define RVT_VISITED_MASK 0xc000000000000000ull
+#define RVT_FALL_THROUGH 0x4000000000000000ull
+#define RVT_BRANCH_TAKEN 0x8000000000000000ull
+#define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
+
+static int build_int_body(struct jit_ctx *ctx)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ const struct bpf_insn *insn;
+ int i, r;
+
+ for (i = 0; i < prog->len; ) {
+ insn = prog->insnsi + i;
+ if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) {
+ /* dead instruction, don't emit it. */
+ i++;
+ continue;
+ }
+
+ if (ctx->target == NULL)
+ ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4);
+
+ r = build_one_insn(insn, ctx, i, prog->len);
+ if (r < 0)
+ return r;
+ i += r;
+ }
+ /* epilogue offset */
+ if (ctx->target == NULL)
+ ctx->offsets[i] = ctx->idx * 4;
+
+ /*
+ * All exits have an offset of the epilogue, some offsets may
+ * not have been set due to banch-around threading, so set
+ * them now.
+ */
+ if (ctx->target == NULL)
+ for (i = 0; i < prog->len; i++) {
+ insn = prog->insnsi + i;
+ if (insn->code == (BPF_JMP | BPF_EXIT))
+ ctx->offsets[i] = ctx->idx * 4;
+ }
+ return 0;
+}
+
+/* return the last idx processed, or negative for error */
+static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt,
+ int start_idx, bool follow_taken)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ const struct bpf_insn *insn;
+ u64 exit_rvt = initial_rvt;
+ u64 *rvt = ctx->reg_val_types;
+ int idx;
+ int reg;
+
+ for (idx = start_idx; idx < prog->len; idx++) {
+ rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt;
+ insn = prog->insnsi + idx;
+ switch (BPF_CLASS(insn->code)) {
+ case BPF_ALU:
+ switch (BPF_OP(insn->code)) {
+ case BPF_ADD:
+ case BPF_SUB:
+ case BPF_MUL:
+ case BPF_DIV:
+ case BPF_OR:
+ case BPF_AND:
+ case BPF_LSH:
+ case BPF_RSH:
+ case BPF_NEG:
+ case BPF_MOD:
+ case BPF_XOR:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ break;
+ case BPF_MOV:
+ if (BPF_SRC(insn->code)) {
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ } else {
+ /* IMM to REG move*/
+ if (insn->imm >= 0)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ else
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ }
+ break;
+ case BPF_END:
+ if (insn->imm == 64)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ else if (insn->imm == 32)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ else /* insn->imm == 16 */
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ break;
+ }
+ rvt[idx] |= RVT_DONE;
+ break;
+ case BPF_ALU64:
+ switch (BPF_OP(insn->code)) {
+ case BPF_MOV:
+ if (BPF_SRC(insn->code)) {
+ /* REG to REG move*/
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ } else {
+ /* IMM to REG move*/
+ if (insn->imm >= 0)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ else
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
+ }
+ break;
+ default:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ }
+ rvt[idx] |= RVT_DONE;
+ break;
+ case BPF_LD:
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_DW:
+ if (BPF_MODE(insn->code) == BPF_IMM) {
+ s64 val;
+
+ val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32));
+ if (val > 0 && val <= S32_MAX)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ else if (val >= S32_MIN && val <= S32_MAX)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
+ else
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ rvt[idx] |= RVT_DONE;
+ idx++;
+ } else {
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ }
+ break;
+ case BPF_B:
+ case BPF_H:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ break;
+ case BPF_W:
+ if (BPF_MODE(insn->code) == BPF_IMM)
+ set_reg_val_type(&exit_rvt, insn->dst_reg,
+ insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT);
+ else
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ break;
+ }
+ rvt[idx] |= RVT_DONE;
+ break;
+ case BPF_LDX:
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_DW:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ break;
+ case BPF_B:
+ case BPF_H:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ break;
+ case BPF_W:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ break;
+ }
+ rvt[idx] |= RVT_DONE;
+ break;
+ case BPF_JMP:
+ switch (BPF_OP(insn->code)) {
+ case BPF_EXIT:
+ rvt[idx] = RVT_DONE | exit_rvt;
+ rvt[prog->len] = exit_rvt;
+ return idx;
+ case BPF_JA:
+ rvt[idx] |= RVT_DONE;
+ idx += insn->off;
+ break;
+ case BPF_JEQ:
+ case BPF_JGT:
+ case BPF_JGE:
+ case BPF_JLT:
+ case BPF_JLE:
+ case BPF_JSET:
+ case BPF_JNE:
+ case BPF_JSGT:
+ case BPF_JSGE:
+ case BPF_JSLT:
+ case BPF_JSLE:
+ if (follow_taken) {
+ rvt[idx] |= RVT_BRANCH_TAKEN;
+ idx += insn->off;
+ follow_taken = false;
+ } else {
+ rvt[idx] |= RVT_FALL_THROUGH;
+ }
+ break;
+ case BPF_CALL:
+ set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT);
+ /* Upon call return, argument registers are clobbered. */
+ for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++)
+ set_reg_val_type(&exit_rvt, reg, REG_64BIT);
+
+ rvt[idx] |= RVT_DONE;
+ break;
+ default:
+ WARN(1, "Unhandled BPF_JMP case.\n");
+ rvt[idx] |= RVT_DONE;
+ break;
+ }
+ break;
+ default:
+ rvt[idx] |= RVT_DONE;
+ break;
+ }
+ }
+ return idx;
+}
+
+/*
+ * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
+ * each eBPF insn. This allows unneeded sign and zero extension
+ * operations to be omitted.
+ *
+ * Doesn't handle yet confluence of control paths with conflicting
+ * ranges, but it is good enough for most sane code.
+ */
+static int reg_val_propagate(struct jit_ctx *ctx)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ u64 exit_rvt;
+ int reg;
+ int i;
+
+ /*
+ * 11 registers * 3 bits/reg leaves top bits free for other
+ * uses. Bit-62..63 used to see if we have visited an insn.
+ */
+ exit_rvt = 0;
+
+ /* Upon entry, argument registers are 64-bit. */
+ for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++)
+ set_reg_val_type(&exit_rvt, reg, REG_64BIT);
+
+ /*
+ * First follow all conditional branches on the fall-through
+ * edge of control flow..
+ */
+ reg_val_propagate_range(ctx, exit_rvt, 0, false);
+restart_search:
+ /*
+ * Then repeatedly find the first conditional branch where
+ * both edges of control flow have not been taken, and follow
+ * the branch taken edge. We will end up restarting the
+ * search once per conditional branch insn.
+ */
+ for (i = 0; i < prog->len; i++) {
+ u64 rvt = ctx->reg_val_types[i];
+
+ if ((rvt & RVT_VISITED_MASK) == RVT_DONE ||
+ (rvt & RVT_VISITED_MASK) == 0)
+ continue;
+ if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) {
+ reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true);
+ } else { /* RVT_BRANCH_TAKEN */
+ WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
+ reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false);
+ }
+ goto restart_search;
+ }
+ /*
+ * Eventually all conditional branches have been followed on
+ * both branches and we are done. Any insn that has not been
+ * visited at this point is dead.
+ */
+
+ return 0;
+}
+
+static void jit_fill_hole(void *area, unsigned int size)
+{
+ u32 *p;
+
+ /* We are guaranteed to have aligned memory. */
+ for (p = area; size >= sizeof(u32); size -= sizeof(u32))
+ uasm_i_break(&p, BRK_BUG); /* Increments p */
+}
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+{
+ struct bpf_prog *orig_prog = prog;
+ bool tmp_blinded = false;
+ struct bpf_prog *tmp;
+ struct bpf_binary_header *header = NULL;
+ struct jit_ctx ctx;
+ unsigned int image_size;
+ u8 *image_ptr;
+
+ if (!prog->jit_requested)
+ return prog;
+
+ tmp = bpf_jit_blind_constants(prog);
+ /* If blinding was requested and we failed during blinding,
+ * we must fall back to the interpreter.
+ */
+ if (IS_ERR(tmp))
+ return orig_prog;
+ if (tmp != prog) {
+ tmp_blinded = true;
+ prog = tmp;
+ }
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ preempt_disable();
+ switch (current_cpu_type()) {
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ case CPU_CAVIUM_OCTEON3:
+ ctx.use_bbit_insns = 1;
+ break;
+ default:
+ ctx.use_bbit_insns = 0;
+ }
+ preempt_enable();
+
+ ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
+ if (ctx.offsets == NULL)
+ goto out_err;
+
+ ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL);
+ if (ctx.reg_val_types == NULL)
+ goto out_err;
+
+ ctx.skf = prog;
+
+ if (reg_val_propagate(&ctx))
+ goto out_err;
+
+ /*
+ * First pass discovers used resources and instruction offsets
+ * assuming short branches are used.
+ */
+ if (build_int_body(&ctx))
+ goto out_err;
+
+ /*
+ * If no calls are made (EBPF_SAVE_RA), then tail call count
+ * in $v1, else we must save in n$s4.
+ */
+ if (ctx.flags & EBPF_SEEN_TC) {
+ if (ctx.flags & EBPF_SAVE_RA)
+ ctx.flags |= EBPF_SAVE_S4;
+ else
+ ctx.flags |= EBPF_TCC_IN_V1;
+ }
+
+ /*
+ * Second pass generates offsets, if any branches are out of
+ * range a jump-around long sequence is generated, and we have
+ * to try again from the beginning to generate the new
+ * offsets. This is done until no additional conversions are
+ * necessary.
+ */
+ do {
+ ctx.idx = 0;
+ ctx.gen_b_offsets = 1;
+ ctx.long_b_conversion = 0;
+ if (gen_int_prologue(&ctx))
+ goto out_err;
+ if (build_int_body(&ctx))
+ goto out_err;
+ if (build_int_epilogue(&ctx, MIPS_R_RA))
+ goto out_err;
+ } while (ctx.long_b_conversion);
+
+ image_size = 4 * ctx.idx;
+
+ header = bpf_jit_binary_alloc(image_size, &image_ptr,
+ sizeof(u32), jit_fill_hole);
+ if (header == NULL)
+ goto out_err;
+
+ ctx.target = (u32 *)image_ptr;
+
+ /* Third pass generates the code */
+ ctx.idx = 0;
+ if (gen_int_prologue(&ctx))
+ goto out_err;
+ if (build_int_body(&ctx))
+ goto out_err;
+ if (build_int_epilogue(&ctx, MIPS_R_RA))
+ goto out_err;
+
+ /* Update the icache */
+ flush_icache_range((unsigned long)ctx.target,
+ (unsigned long)&ctx.target[ctx.idx]);
+
+ if (bpf_jit_enable > 1)
+ /* Dump JIT code */
+ bpf_jit_dump(prog->len, image_size, 2, ctx.target);
+
+ bpf_jit_binary_lock_ro(header);
+ prog->bpf_func = (void *)ctx.target;
+ prog->jited = 1;
+ prog->jited_len = image_size;
+out_normal:
+ if (tmp_blinded)
+ bpf_jit_prog_release_other(prog, prog == orig_prog ?
+ tmp : orig_prog);
+ kfree(ctx.offsets);
+ kfree(ctx.reg_val_types);
+
+ return prog;
+
+out_err:
+ prog = orig_prog;
+ if (header)
+ bpf_jit_binary_free(header);
+ goto out_normal;
+}
diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig
new file mode 100644
index 000000000..412351c5a
--- /dev/null
+++ b/arch/mips/netlogic/Kconfig
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0
+if NLM_XLP_BOARD || NLM_XLR_BOARD
+
+if NLM_XLP_BOARD
+config DT_XLP_EVP
+ bool "Built-in device tree for XLP EVP boards"
+ default y
+ select BUILTIN_DTB
+ help
+ Add an FDT blob for XLP EVP boards into the kernel.
+ This DTB will be used if the firmware does not pass in a DTB
+ pointer to the kernel. The corresponding DTS file is at
+ arch/mips/netlogic/dts/xlp_evp.dts
+
+config DT_XLP_SVP
+ bool "Built-in device tree for XLP SVP boards"
+ default y
+ select BUILTIN_DTB
+ help
+ Add an FDT blob for XLP VP boards into the kernel.
+ This DTB will be used if the firmware does not pass in a DTB
+ pointer to the kernel. The corresponding DTS file is at
+ arch/mips/netlogic/dts/xlp_svp.dts
+
+config DT_XLP_FVP
+ bool "Built-in device tree for XLP FVP boards"
+ default y
+ select BUILTIN_DTB
+ help
+ Add an FDT blob for XLP FVP board into the kernel.
+ This DTB will be used if the firmware does not pass in a DTB
+ pointer to the kernel. The corresponding DTS file is at
+ arch/mips/netlogic/dts/xlp_fvp.dts
+
+config DT_XLP_GVP
+ bool "Built-in device tree for XLP GVP boards"
+ default y
+ select BUILTIN_DTB
+ help
+ Add an FDT blob for XLP GVP board into the kernel.
+ This DTB will be used if the firmware does not pass in a DTB
+ pointer to the kernel. The corresponding DTS file is at
+ arch/mips/netlogic/dts/xlp_gvp.dts
+
+config DT_XLP_RVP
+ bool "Built-in device tree for XLP RVP boards"
+ default y
+ help
+ Add an FDT blob for XLP RVP board into the kernel.
+ This DTB will be used if the firmware does not pass in a DTB
+ pointer to the kernel. The corresponding DTS file is at
+ arch/mips/netlogic/dts/xlp_rvp.dts
+
+config NLM_MULTINODE
+ bool "Support for multi-chip boards"
+ depends on NLM_XLP_BOARD
+ default n
+ help
+ Add support for boards with 2 or 4 XLPs connected over ICI.
+
+if NLM_MULTINODE
+choice
+ prompt "Number of XLPs on the board"
+ default NLM_MULTINODE_2
+ help
+ In the multi-node case, specify the number of SoCs on the board.
+
+config NLM_MULTINODE_2
+ bool "Dual-XLP board"
+ help
+ Support boards with upto two XLPs connected over ICI.
+
+config NLM_MULTINODE_4
+ bool "Quad-XLP board"
+ help
+ Support boards with upto four XLPs connected over ICI.
+
+endchoice
+
+endif
+endif
+
+config NLM_COMMON
+ bool
+
+endif
diff --git a/arch/mips/netlogic/Makefile b/arch/mips/netlogic/Makefile
new file mode 100644
index 000000000..c53561589
--- /dev/null
+++ b/arch/mips/netlogic/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NLM_COMMON) += common/
+obj-$(CONFIG_CPU_XLR) += xlr/
+obj-$(CONFIG_CPU_XLP) += xlp/
diff --git a/arch/mips/netlogic/Platform b/arch/mips/netlogic/Platform
new file mode 100644
index 000000000..4195a097f
--- /dev/null
+++ b/arch/mips/netlogic/Platform
@@ -0,0 +1,16 @@
+#
+# NETLOGIC includes
+#
+cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/mach-netlogic
+cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/netlogic
+
+#
+# use mips64 if xlr is not available
+#
+cflags-$(CONFIG_CPU_XLR) += $(call cc-option,-march=xlr,-march=mips64)
+cflags-$(CONFIG_CPU_XLP) += $(call cc-option,-march=xlp,-march=mips64r2)
+
+#
+# NETLOGIC processor support
+#
+load-$(CONFIG_NLM_COMMON) += 0xffffffff80100000
diff --git a/arch/mips/netlogic/common/Makefile b/arch/mips/netlogic/common/Makefile
new file mode 100644
index 000000000..89f6e3f39
--- /dev/null
+++ b/arch/mips/netlogic/common/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += irq.o time.o
+obj-y += reset.o
+obj-$(CONFIG_SMP) += smp.o smpboot.o
+obj-$(CONFIG_EARLY_PRINTK) += earlycons.o
diff --git a/arch/mips/netlogic/common/earlycons.c b/arch/mips/netlogic/common/earlycons.c
new file mode 100644
index 000000000..8f5bc1597
--- /dev/null
+++ b/arch/mips/netlogic/common/earlycons.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/types.h>
+#include <linux/serial_reg.h>
+
+#include <asm/mipsregs.h>
+#include <asm/setup.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+
+#if defined(CONFIG_CPU_XLP)
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/uart.h>
+#elif defined(CONFIG_CPU_XLR)
+#include <asm/netlogic/xlr/iomap.h>
+#endif
+
+void prom_putchar(char c)
+{
+ uint64_t uartbase;
+
+#if defined(CONFIG_CPU_XLP)
+ uartbase = nlm_get_uart_regbase(0, 0);
+#elif defined(CONFIG_CPU_XLR)
+ uartbase = nlm_mmio_base(NETLOGIC_IO_UART_0_OFFSET);
+#endif
+ while ((nlm_read_reg(uartbase, UART_LSR) & UART_LSR_THRE) == 0)
+ ;
+ nlm_write_reg(uartbase, UART_TX, c);
+}
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
new file mode 100644
index 000000000..cf33dd8a4
--- /dev/null
+++ b/arch/mips/netlogic/common/irq.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/errno.h>
+#include <asm/signal.h>
+#include <asm/ptrace.h>
+#include <asm/mipsregs.h>
+#include <asm/thread_info.h>
+
+#include <asm/netlogic/mips-extns.h>
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+
+#if defined(CONFIG_CPU_XLP)
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#elif defined(CONFIG_CPU_XLR)
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/fmn.h>
+#else
+#error "Unknown CPU"
+#endif
+
+#ifdef CONFIG_SMP
+#define SMP_IRQ_MASK ((1ULL << IRQ_IPI_SMP_FUNCTION) | \
+ (1ULL << IRQ_IPI_SMP_RESCHEDULE))
+#else
+#define SMP_IRQ_MASK 0
+#endif
+#define PERCPU_IRQ_MASK (SMP_IRQ_MASK | (1ull << IRQ_TIMER) | \
+ (1ull << IRQ_FMN))
+
+struct nlm_pic_irq {
+ void (*extra_ack)(struct irq_data *);
+ struct nlm_soc_info *node;
+ int picirq;
+ int irt;
+ int flags;
+};
+
+static void xlp_pic_enable(struct irq_data *d)
+{
+ unsigned long flags;
+ struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
+
+ BUG_ON(!pd);
+ spin_lock_irqsave(&pd->node->piclock, flags);
+ nlm_pic_enable_irt(pd->node->picbase, pd->irt);
+ spin_unlock_irqrestore(&pd->node->piclock, flags);
+}
+
+static void xlp_pic_disable(struct irq_data *d)
+{
+ struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ BUG_ON(!pd);
+ spin_lock_irqsave(&pd->node->piclock, flags);
+ nlm_pic_disable_irt(pd->node->picbase, pd->irt);
+ spin_unlock_irqrestore(&pd->node->piclock, flags);
+}
+
+static void xlp_pic_mask_ack(struct irq_data *d)
+{
+ struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
+
+ clear_c0_eimr(pd->picirq);
+ ack_c0_eirr(pd->picirq);
+}
+
+static void xlp_pic_unmask(struct irq_data *d)
+{
+ struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
+
+ BUG_ON(!pd);
+
+ if (pd->extra_ack)
+ pd->extra_ack(d);
+
+ /* re-enable the intr on this cpu */
+ set_c0_eimr(pd->picirq);
+
+ /* Ack is a single write, no need to lock */
+ nlm_pic_ack(pd->node->picbase, pd->irt);
+}
+
+static struct irq_chip xlp_pic = {
+ .name = "XLP-PIC",
+ .irq_enable = xlp_pic_enable,
+ .irq_disable = xlp_pic_disable,
+ .irq_mask_ack = xlp_pic_mask_ack,
+ .irq_unmask = xlp_pic_unmask,
+};
+
+static void cpuintr_disable(struct irq_data *d)
+{
+ clear_c0_eimr(d->irq);
+}
+
+static void cpuintr_enable(struct irq_data *d)
+{
+ set_c0_eimr(d->irq);
+}
+
+static void cpuintr_ack(struct irq_data *d)
+{
+ ack_c0_eirr(d->irq);
+}
+
+/*
+ * Chip definition for CPU originated interrupts(timer, msg) and
+ * IPIs
+ */
+struct irq_chip nlm_cpu_intr = {
+ .name = "XLP-CPU-INTR",
+ .irq_enable = cpuintr_enable,
+ .irq_disable = cpuintr_disable,
+ .irq_mask = cpuintr_disable,
+ .irq_ack = cpuintr_ack,
+ .irq_eoi = cpuintr_enable,
+};
+
+static void __init nlm_init_percpu_irqs(void)
+{
+ int i;
+
+ for (i = 0; i < PIC_IRT_FIRST_IRQ; i++)
+ irq_set_chip_and_handler(i, &nlm_cpu_intr, handle_percpu_irq);
+#ifdef CONFIG_SMP
+ irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr,
+ nlm_smp_function_ipi_handler);
+ irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr,
+ nlm_smp_resched_ipi_handler);
+#endif
+}
+
+
+void nlm_setup_pic_irq(int node, int picirq, int irq, int irt)
+{
+ struct nlm_pic_irq *pic_data;
+ int xirq;
+
+ xirq = nlm_irq_to_xirq(node, irq);
+ pic_data = kzalloc(sizeof(*pic_data), GFP_KERNEL);
+ BUG_ON(pic_data == NULL);
+ pic_data->irt = irt;
+ pic_data->picirq = picirq;
+ pic_data->node = nlm_get_node(node);
+ irq_set_chip_and_handler(xirq, &xlp_pic, handle_level_irq);
+ irq_set_chip_data(xirq, pic_data);
+}
+
+void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *))
+{
+ struct nlm_pic_irq *pic_data;
+ int xirq;
+
+ xirq = nlm_irq_to_xirq(node, irq);
+ pic_data = irq_get_chip_data(xirq);
+ if (WARN_ON(!pic_data))
+ return;
+ pic_data->extra_ack = xack;
+}
+
+static void nlm_init_node_irqs(int node)
+{
+ struct nlm_soc_info *nodep;
+ int i, irt;
+
+ pr_info("Init IRQ for node %d\n", node);
+ nodep = nlm_get_node(node);
+ nodep->irqmask = PERCPU_IRQ_MASK;
+ for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ; i++) {
+ irt = nlm_irq_to_irt(i);
+ if (irt == -1) /* unused irq */
+ continue;
+ nodep->irqmask |= 1ull << i;
+ if (irt == -2) /* not a direct PIC irq */
+ continue;
+
+ nlm_pic_init_irt(nodep->picbase, irt, i,
+ node * nlm_threads_per_node(), 0);
+ nlm_setup_pic_irq(node, i, i, irt);
+ }
+}
+
+void nlm_smp_irq_init(int hwtid)
+{
+ int cpu, node;
+
+ cpu = hwtid % nlm_threads_per_node();
+ node = hwtid / nlm_threads_per_node();
+
+ if (cpu == 0 && node != 0)
+ nlm_init_node_irqs(node);
+ write_c0_eimr(nlm_get_node(node)->irqmask);
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ uint64_t eirr;
+ int i, node;
+
+ node = nlm_nodeid();
+ eirr = read_c0_eirr_and_eimr();
+ if (eirr == 0)
+ return;
+
+ i = __ffs64(eirr);
+ /* per-CPU IRQs don't need translation */
+ if (i < PIC_IRQ_BASE) {
+ do_IRQ(i);
+ return;
+ }
+
+#if defined(CONFIG_PCI_MSI) && defined(CONFIG_CPU_XLP)
+ /* PCI interrupts need a second level dispatch for MSI bits */
+ if (i >= PIC_PCIE_LINK_MSI_IRQ(0) && i <= PIC_PCIE_LINK_MSI_IRQ(3)) {
+ nlm_dispatch_msi(node, i);
+ return;
+ }
+ if (i >= PIC_PCIE_MSIX_IRQ(0) && i <= PIC_PCIE_MSIX_IRQ(3)) {
+ nlm_dispatch_msix(node, i);
+ return;
+ }
+
+#endif
+ /* top level irq handling */
+ do_IRQ(nlm_irq_to_xirq(node, i));
+}
+
+#ifdef CONFIG_CPU_XLP
+static const struct irq_domain_ops xlp_pic_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+static int __init xlp_of_pic_init(struct device_node *node,
+ struct device_node *parent)
+{
+ const int n_picirqs = PIC_IRT_LAST_IRQ - PIC_IRQ_BASE + 1;
+ struct irq_domain *xlp_pic_domain;
+ struct resource res;
+ int socid, ret, bus;
+
+ /* we need a hack to get the PIC's SoC chip id */
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret < 0) {
+ pr_err("PIC %pOFn: reg property not found!\n", node);
+ return -EINVAL;
+ }
+
+ if (cpu_is_xlp9xx()) {
+ bus = (res.start >> 20) & 0xf;
+ for (socid = 0; socid < NLM_NR_NODES; socid++) {
+ if (!nlm_node_present(socid))
+ continue;
+ if (nlm_get_node(socid)->socbus == bus)
+ break;
+ }
+ if (socid == NLM_NR_NODES) {
+ pr_err("PIC %pOFn: Node mapping for bus %d not found!\n",
+ node, bus);
+ return -EINVAL;
+ }
+ } else {
+ socid = (res.start >> 18) & 0x3;
+ if (!nlm_node_present(socid)) {
+ pr_err("PIC %pOFn: node %d does not exist!\n",
+ node, socid);
+ return -EINVAL;
+ }
+ }
+
+ if (!nlm_node_present(socid)) {
+ pr_err("PIC %pOFn: node %d does not exist!\n", node, socid);
+ return -EINVAL;
+ }
+
+ xlp_pic_domain = irq_domain_add_legacy(node, n_picirqs,
+ nlm_irq_to_xirq(socid, PIC_IRQ_BASE), PIC_IRQ_BASE,
+ &xlp_pic_irq_domain_ops, NULL);
+ if (xlp_pic_domain == NULL) {
+ pr_err("PIC %pOFn: Creating legacy domain failed!\n", node);
+ return -EINVAL;
+ }
+ pr_info("Node %d: IRQ domain created for PIC@%pR\n", socid, &res);
+ return 0;
+}
+
+static struct of_device_id __initdata xlp_pic_irq_ids[] = {
+ { .compatible = "netlogic,xlp-pic", .data = xlp_of_pic_init },
+ {},
+};
+#endif
+
+void __init arch_init_irq(void)
+{
+ /* Initialize the irq descriptors */
+ nlm_init_percpu_irqs();
+ nlm_init_node_irqs(0);
+ write_c0_eimr(nlm_current_node()->irqmask);
+#if defined(CONFIG_CPU_XLR)
+ nlm_setup_fmn_irq();
+#endif
+#ifdef CONFIG_CPU_XLP
+ of_irq_init(xlp_pic_irq_ids);
+#endif
+}
diff --git a/arch/mips/netlogic/common/reset.S b/arch/mips/netlogic/common/reset.S
new file mode 100644
index 000000000..c474981a6
--- /dev/null
+++ b/arch/mips/netlogic/common/reset.S
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2003-2013 Broadcom Corporation.
+ * All Rights Reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/cpu.h>
+#include <asm/cacheops.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asmmacro.h>
+#include <asm/addrspace.h>
+
+#include <asm/netlogic/common.h>
+
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+#include <asm/netlogic/xlp-hal/cpucontrol.h>
+
+#define SYS_CPU_COHERENT_BASE CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
+ XLP_IO_SYS_OFFSET(0) + XLP_IO_PCI_HDRSZ + \
+ SYS_CPU_NONCOHERENT_MODE * 4
+
+/* Enable XLP features and workarounds in the LSU */
+.macro xlp_config_lsu
+ li t0, LSU_DEFEATURE
+ mfcr t1, t0
+
+ lui t2, 0x4080 /* Enable Unaligned Access, L2HPE */
+ or t1, t1, t2
+ mtcr t1, t0
+
+ li t0, ICU_DEFEATURE
+ mfcr t1, t0
+ ori t1, 0x1000 /* Enable Icache partitioning */
+ mtcr t1, t0
+
+ li t0, SCHED_DEFEATURE
+ lui t1, 0x0100 /* Disable BRU accepting ALU ops */
+ mtcr t1, t0
+.endm
+
+/*
+ * Allow access to physical mem >64G by enabling ELPA in PAGEGRAIN
+ * register. This is needed before going to C code since the SP can
+ * in this region. Called from all HW threads.
+ */
+.macro xlp_early_mmu_init
+ mfc0 t0, CP0_PAGEMASK, 1
+ li t1, (1 << 29) /* ELPA bit */
+ or t0, t1
+ mtc0 t0, CP0_PAGEMASK, 1
+.endm
+
+/*
+ * L1D cache has to be flushed before enabling threads in XLP.
+ * On XLP8xx/XLP3xx, we do a low level flush using processor control
+ * registers. On XLPII CPUs, usual cache instructions work.
+ */
+.macro xlp_flush_l1_dcache
+ mfc0 t0, CP0_PRID
+ andi t0, t0, PRID_IMP_MASK
+ slt t1, t0, 0x1200
+ beqz t1, 15f
+ nop
+
+ /* XLP8xx low level cache flush */
+ li t0, LSU_DEBUG_DATA0
+ li t1, LSU_DEBUG_ADDR
+ li t2, 0 /* index */
+ li t3, 0x1000 /* loop count */
+11:
+ sll v0, t2, 5
+ mtcr zero, t0
+ ori v1, v0, 0x3 /* way0 | write_enable | write_active */
+ mtcr v1, t1
+12:
+ mfcr v1, t1
+ andi v1, 0x1 /* wait for write_active == 0 */
+ bnez v1, 12b
+ nop
+ mtcr zero, t0
+ ori v1, v0, 0x7 /* way1 | write_enable | write_active */
+ mtcr v1, t1
+13:
+ mfcr v1, t1
+ andi v1, 0x1 /* wait for write_active == 0 */
+ bnez v1, 13b
+ nop
+ addi t2, 1
+ bne t3, t2, 11b
+ nop
+ b 17f
+ nop
+
+ /* XLPII CPUs, Invalidate all 64k of L1 D-cache */
+15:
+ li t0, 0x80000000
+ li t1, 0x80010000
+16: cache Index_Writeback_Inv_D, 0(t0)
+ addiu t0, t0, 32
+ bne t0, t1, 16b
+ nop
+17:
+.endm
+
+/*
+ * nlm_reset_entry will be copied to the reset entry point for
+ * XLR and XLP. The XLP cores start here when they are woken up. This
+ * is also the NMI entry point.
+ *
+ * We use scratch reg 6/7 to save k0/k1 and check for NMI first.
+ *
+ * The data corresponding to reset/NMI is stored at RESET_DATA_PHYS
+ * location, this will have the thread mask (used when core is woken up)
+ * and the current NMI handler in case we reached here for an NMI.
+ *
+ * When a core or thread is newly woken up, it marks itself ready and
+ * loops in a 'wait'. When the CPU really needs waking up, we send an NMI
+ * IPI to it, with the NMI handler set to prom_boot_secondary_cpus
+ */
+ .set noreorder
+ .set noat
+ .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */
+
+FEXPORT(nlm_reset_entry)
+ dmtc0 k0, $22, 6
+ dmtc0 k1, $22, 7
+ mfc0 k0, CP0_STATUS
+ li k1, 0x80000
+ and k1, k0, k1
+ beqz k1, 1f /* go to real reset entry */
+ nop
+ li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
+ ld k0, BOOT_NMI_HANDLER(k1)
+ jr k0
+ nop
+
+1: /* Entry point on core wakeup */
+ mfc0 t0, CP0_PRID /* processor ID */
+ andi t0, PRID_IMP_MASK
+ li t1, 0x1500 /* XLP 9xx */
+ beq t0, t1, 2f /* does not need to set coherent */
+ nop
+
+ li t1, 0x1300 /* XLP 5xx */
+ beq t0, t1, 2f /* does not need to set coherent */
+ nop
+
+ /* set bit in SYS coherent register for the core */
+ mfc0 t0, CP0_EBASE
+ mfc0 t1, CP0_EBASE
+ srl t1, 5
+ andi t1, 0x3 /* t1 <- node */
+ li t2, 0x40000
+ mul t3, t2, t1 /* t3 = node * 0x40000 */
+ srl t0, t0, 2
+ and t0, t0, 0x7 /* t0 <- core */
+ li t1, 0x1
+ sll t0, t1, t0
+ nor t0, t0, zero /* t0 <- ~(1 << core) */
+ li t2, SYS_CPU_COHERENT_BASE
+ add t2, t2, t3 /* t2 <- SYS offset for node */
+ lw t1, 0(t2)
+ and t1, t1, t0
+ sw t1, 0(t2)
+
+ /* read back to ensure complete */
+ lw t1, 0(t2)
+ sync
+
+2:
+ /* Configure LSU on Non-0 Cores. */
+ xlp_config_lsu
+ /* FALL THROUGH */
+
+/*
+ * Wake up sibling threads from the initial thread in a core.
+ */
+EXPORT(nlm_boot_siblings)
+ /* core L1D flush before enable threads */
+ xlp_flush_l1_dcache
+ /* save ra and sp, will be used later (only for boot cpu) */
+ dmtc0 ra, $22, 6
+ dmtc0 sp, $22, 7
+ /* Enable hw threads by writing to MAP_THREADMODE of the core */
+ li t0, CKSEG1ADDR(RESET_DATA_PHYS)
+ lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
+ li t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE)
+ mfcr t2, t0
+ or t2, t2, t1
+ mtcr t2, t0
+
+ /*
+ * The new hardware thread starts at the next instruction
+ * For all the cases other than core 0 thread 0, we will
+ * jump to the secondary wait function.
+
+ * NOTE: All GPR contents are lost after the mtcr above!
+ */
+ mfc0 v0, CP0_EBASE
+ andi v0, 0x3ff /* v0 <- node/core */
+
+ /*
+ * Errata: to avoid potential live lock, setup IFU_BRUB_RESERVE
+ * when running 4 threads per core
+ */
+ andi v1, v0, 0x3 /* v1 <- thread id */
+ bnez v1, 2f
+ nop
+
+ /* thread 0 of each core. */
+ li t0, CKSEG1ADDR(RESET_DATA_PHYS)
+ lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
+ subu t1, 0x3 /* 4-thread per core mode? */
+ bnez t1, 2f
+ nop
+
+ li t0, IFU_BRUB_RESERVE
+ li t1, 0x55
+ mtcr t1, t0
+ _ehb
+2:
+ beqz v0, 4f /* boot cpu (cpuid == 0)? */
+ nop
+
+ /* setup status reg */
+ move t1, zero
+#ifdef CONFIG_64BIT
+ ori t1, ST0_KX
+#endif
+ mtc0 t1, CP0_STATUS
+
+ xlp_early_mmu_init
+
+ /* mark CPU ready */
+ li t3, CKSEG1ADDR(RESET_DATA_PHYS)
+ ADDIU t1, t3, BOOT_CPU_READY
+ sll v1, v0, 2
+ PTR_ADDU t1, v1
+ li t2, 1
+ sw t2, 0(t1)
+ /* Wait until NMI hits */
+3: wait
+ b 3b
+ nop
+
+ /*
+ * For the boot CPU, we have to restore ra and sp and return, rest
+ * of the registers will be restored by the caller
+ */
+4:
+ dmfc0 ra, $22, 6
+ dmfc0 sp, $22, 7
+ jr ra
+ nop
+EXPORT(nlm_reset_entry_end)
+
+LEAF(nlm_init_boot_cpu)
+#ifdef CONFIG_CPU_XLP
+ xlp_config_lsu
+ xlp_early_mmu_init
+#endif
+ jr ra
+ nop
+END(nlm_init_boot_cpu)
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
new file mode 100644
index 000000000..39a300bd6
--- /dev/null
+++ b/arch/mips/netlogic/common/smp.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/sched/task_stack.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/mmu_context.h>
+
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/mips-extns.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+
+#if defined(CONFIG_CPU_XLP)
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#elif defined(CONFIG_CPU_XLR)
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/xlr.h>
+#else
+#error "Unknown CPU"
+#endif
+
+void nlm_send_ipi_single(int logical_cpu, unsigned int action)
+{
+ unsigned int hwtid;
+ uint64_t picbase;
+
+ /* node id is part of hwtid, and needed for send_ipi */
+ hwtid = cpu_logical_map(logical_cpu);
+ picbase = nlm_get_node(nlm_hwtid_to_node(hwtid))->picbase;
+
+ if (action & SMP_CALL_FUNCTION)
+ nlm_pic_send_ipi(picbase, hwtid, IRQ_IPI_SMP_FUNCTION, 0);
+ if (action & SMP_RESCHEDULE_YOURSELF)
+ nlm_pic_send_ipi(picbase, hwtid, IRQ_IPI_SMP_RESCHEDULE, 0);
+}
+
+void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ nlm_send_ipi_single(cpu, action);
+ }
+}
+
+/* IRQ_IPI_SMP_FUNCTION Handler */
+void nlm_smp_function_ipi_handler(struct irq_desc *desc)
+{
+ unsigned int irq = irq_desc_get_irq(desc);
+ clear_c0_eimr(irq);
+ ack_c0_eirr(irq);
+ generic_smp_call_function_interrupt();
+ set_c0_eimr(irq);
+}
+
+/* IRQ_IPI_SMP_RESCHEDULE handler */
+void nlm_smp_resched_ipi_handler(struct irq_desc *desc)
+{
+ unsigned int irq = irq_desc_get_irq(desc);
+ clear_c0_eimr(irq);
+ ack_c0_eirr(irq);
+ scheduler_ipi();
+ set_c0_eimr(irq);
+}
+
+/*
+ * Called before going into mips code, early cpu init
+ */
+void nlm_early_init_secondary(int cpu)
+{
+ change_c0_config(CONF_CM_CMASK, 0x3);
+#ifdef CONFIG_CPU_XLP
+ xlp_mmu_init();
+#endif
+ write_c0_ebase(nlm_current_node()->ebase);
+}
+
+/*
+ * Code to run on secondary just after probing the CPU
+ */
+static void nlm_init_secondary(void)
+{
+ int hwtid;
+
+ hwtid = hard_smp_processor_id();
+ cpu_set_core(&current_cpu_data, hwtid / NLM_THREADS_PER_CORE);
+ current_cpu_data.package = nlm_nodeid();
+ nlm_percpu_init(hwtid);
+ nlm_smp_irq_init(hwtid);
+}
+
+void nlm_prepare_cpus(unsigned int max_cpus)
+{
+ /* declare we are SMT capable */
+ smp_num_siblings = nlm_threads_per_core;
+}
+
+void nlm_smp_finish(void)
+{
+ local_irq_enable();
+}
+
+/*
+ * Boot all other cpus in the system, initialize them, and bring them into
+ * the boot function
+ */
+unsigned long nlm_next_gp;
+unsigned long nlm_next_sp;
+static cpumask_t phys_cpu_present_mask;
+
+int nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
+{
+ uint64_t picbase;
+ int hwtid;
+
+ hwtid = cpu_logical_map(logical_cpu);
+ picbase = nlm_get_node(nlm_hwtid_to_node(hwtid))->picbase;
+
+ nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
+ nlm_next_gp = (unsigned long)task_thread_info(idle);
+
+ /* barrier for sp/gp store above */
+ __sync();
+ nlm_pic_send_ipi(picbase, hwtid, 1, 1); /* NMI */
+
+ return 0;
+}
+
+void __init nlm_smp_setup(void)
+{
+ unsigned int boot_cpu;
+ int num_cpus, i, ncore, node;
+ volatile u32 *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);
+
+ boot_cpu = hard_smp_processor_id();
+ cpumask_clear(&phys_cpu_present_mask);
+
+ cpumask_set_cpu(boot_cpu, &phys_cpu_present_mask);
+ __cpu_number_map[boot_cpu] = 0;
+ __cpu_logical_map[0] = boot_cpu;
+ set_cpu_possible(0, true);
+
+ num_cpus = 1;
+ for (i = 0; i < NR_CPUS; i++) {
+ /*
+ * cpu_ready array is not set for the boot_cpu,
+ * it is only set for ASPs (see smpboot.S)
+ */
+ if (cpu_ready[i]) {
+ cpumask_set_cpu(i, &phys_cpu_present_mask);
+ __cpu_number_map[i] = num_cpus;
+ __cpu_logical_map[num_cpus] = i;
+ set_cpu_possible(num_cpus, true);
+ node = nlm_hwtid_to_node(i);
+ cpumask_set_cpu(num_cpus, &nlm_get_node(node)->cpumask);
+ ++num_cpus;
+ }
+ }
+
+ pr_info("Physical CPU mask: %*pb\n",
+ cpumask_pr_args(&phys_cpu_present_mask));
+ pr_info("Possible CPU mask: %*pb\n",
+ cpumask_pr_args(cpu_possible_mask));
+
+ /* check with the cores we have woken up */
+ for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
+ ncore += hweight32(nlm_get_node(i)->coremask);
+
+ pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
+ nlm_threads_per_core, num_cpus);
+
+ /* switch NMI handler to boot CPUs */
+ nlm_set_nmi_handler(nlm_boot_secondary_cpus);
+}
+
+static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
+{
+ uint32_t core0_thr_mask, core_thr_mask;
+ int threadmode, i, j;
+
+ core0_thr_mask = 0;
+ for (i = 0; i < NLM_THREADS_PER_CORE; i++)
+ if (cpumask_test_cpu(i, wakeup_mask))
+ core0_thr_mask |= (1 << i);
+ switch (core0_thr_mask) {
+ case 1:
+ nlm_threads_per_core = 1;
+ threadmode = 0;
+ break;
+ case 3:
+ nlm_threads_per_core = 2;
+ threadmode = 2;
+ break;
+ case 0xf:
+ nlm_threads_per_core = 4;
+ threadmode = 3;
+ break;
+ default:
+ goto unsupp;
+ }
+
+ /* Verify other cores CPU masks */
+ for (i = 0; i < NR_CPUS; i += NLM_THREADS_PER_CORE) {
+ core_thr_mask = 0;
+ for (j = 0; j < NLM_THREADS_PER_CORE; j++)
+ if (cpumask_test_cpu(i + j, wakeup_mask))
+ core_thr_mask |= (1 << j);
+ if (core_thr_mask != 0 && core_thr_mask != core0_thr_mask)
+ goto unsupp;
+ }
+ return threadmode;
+
+unsupp:
+ panic("Unsupported CPU mask %*pb", cpumask_pr_args(wakeup_mask));
+ return 0;
+}
+
+int nlm_wakeup_secondary_cpus(void)
+{
+ u32 *reset_data;
+ int threadmode;
+
+ /* verify the mask and setup core config variables */
+ threadmode = nlm_parse_cpumask(&nlm_cpumask);
+
+ /* Setup CPU init parameters */
+ reset_data = nlm_get_boot_data(BOOT_THREAD_MODE);
+ *reset_data = threadmode;
+
+#ifdef CONFIG_CPU_XLP
+ xlp_wakeup_secondary_cpus();
+#else
+ xlr_wakeup_secondary_cpus();
+#endif
+ return 0;
+}
+
+const struct plat_smp_ops nlm_smp_ops = {
+ .send_ipi_single = nlm_send_ipi_single,
+ .send_ipi_mask = nlm_send_ipi_mask,
+ .init_secondary = nlm_init_secondary,
+ .smp_finish = nlm_smp_finish,
+ .boot_secondary = nlm_boot_secondary,
+ .smp_setup = nlm_smp_setup,
+ .prepare_cpus = nlm_prepare_cpus,
+};
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
new file mode 100644
index 000000000..509c1a7e7
--- /dev/null
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asmmacro.h>
+#include <asm/addrspace.h>
+
+#include <asm/netlogic/common.h>
+
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+#include <asm/netlogic/xlp-hal/cpucontrol.h>
+
+ .set noreorder
+ .set noat
+ .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */
+
+/* Called by the boot cpu to wake up its sibling threads */
+NESTED(xlp_boot_core0_siblings, PT_SIZE, sp)
+ /* CPU register contents lost when enabling threads, save them first */
+ SAVE_ALL
+ sync
+ /* find the location to which nlm_boot_siblings was relocated */
+ li t0, CKSEG1ADDR(RESET_VEC_PHYS)
+ PTR_LA t1, nlm_reset_entry
+ PTR_LA t2, nlm_boot_siblings
+ dsubu t2, t1
+ daddu t2, t0
+ /* call it */
+ jalr t2
+ nop
+ RESTORE_ALL
+ jr ra
+ nop
+END(xlp_boot_core0_siblings)
+
+NESTED(nlm_boot_secondary_cpus, 16, sp)
+ /* Initialize CP0 Status */
+ move t1, zero
+#ifdef CONFIG_64BIT
+ ori t1, ST0_KX
+#endif
+ mtc0 t1, CP0_STATUS
+ PTR_LA t1, nlm_next_sp
+ PTR_L sp, 0(t1)
+ PTR_LA t1, nlm_next_gp
+ PTR_L gp, 0(t1)
+
+ /* a0 has the processor id */
+ mfc0 a0, CP0_EBASE
+ andi a0, 0x3ff /* a0 <- node/core */
+ PTR_LA t0, nlm_early_init_secondary
+ jalr t0
+ nop
+
+ PTR_LA t0, smp_bootstrap
+ jr t0
+ nop
+END(nlm_boot_secondary_cpus)
+
+/*
+ * In case of RMIboot bootloader which is used on XLR boards, the CPUs
+ * be already woken up and waiting in bootloader code.
+ * This will get them out of the bootloader code and into linux. Needed
+ * because the bootloader area will be taken and initialized by linux.
+ */
+NESTED(nlm_rmiboot_preboot, 16, sp)
+ mfc0 t0, $15, 1 /* read ebase */
+ andi t0, 0x1f /* t0 has the processor_id() */
+ andi t2, t0, 0x3 /* thread num */
+ sll t0, 2 /* offset in cpu array */
+
+ li t3, CKSEG1ADDR(RESET_DATA_PHYS)
+ ADDIU t1, t3, BOOT_CPU_READY
+ ADDU t1, t0
+ li t3, 1
+ sw t3, 0(t1)
+
+ bnez t2, 1f /* skip thread programming */
+ nop /* for thread id != 0 */
+
+ /*
+ * XLR MMU setup only for first thread in core
+ */
+ li t0, 0x400
+ mfcr t1, t0
+ li t2, 6 /* XLR thread mode mask */
+ nor t3, t2, zero
+ and t2, t1, t2 /* t2 - current thread mode */
+ li v0, CKSEG1ADDR(RESET_DATA_PHYS)
+ lw v1, BOOT_THREAD_MODE(v0) /* v1 - new thread mode */
+ sll v1, 1
+ beq v1, t2, 1f /* same as request value */
+ nop /* nothing to do */
+
+ and t2, t1, t3 /* mask out old thread mode */
+ or t1, t2, v1 /* put in new value */
+ mtcr t1, t0 /* update core control */
+
+ /* wait for NMI to hit */
+1: wait
+ b 1b
+ nop
+END(nlm_rmiboot_preboot)
diff --git a/arch/mips/netlogic/common/time.c b/arch/mips/netlogic/common/time.c
new file mode 100644
index 000000000..cbbf0d482
--- /dev/null
+++ b/arch/mips/netlogic/common/time.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+
+#include <asm/time.h>
+#include <asm/cpu-features.h>
+
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/haldefs.h>
+
+#if defined(CONFIG_CPU_XLP)
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#elif defined(CONFIG_CPU_XLR)
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/xlr.h>
+#else
+#error "Unknown CPU"
+#endif
+
+unsigned int get_c0_compare_int(void)
+{
+ return IRQ_TIMER;
+}
+
+static u64 nlm_get_pic_timer(struct clocksource *cs)
+{
+ uint64_t picbase = nlm_get_node(0)->picbase;
+
+ return ~nlm_pic_read_timer(picbase, PIC_CLOCK_TIMER);
+}
+
+static u64 nlm_get_pic_timer32(struct clocksource *cs)
+{
+ uint64_t picbase = nlm_get_node(0)->picbase;
+
+ return ~nlm_pic_read_timer32(picbase, PIC_CLOCK_TIMER);
+}
+
+static struct clocksource csrc_pic = {
+ .name = "PIC",
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void nlm_init_pic_timer(void)
+{
+ uint64_t picbase = nlm_get_node(0)->picbase;
+ u32 picfreq;
+
+ nlm_pic_set_timer(picbase, PIC_CLOCK_TIMER, ~0ULL, 0, 0);
+ if (current_cpu_data.cputype == CPU_XLR) {
+ csrc_pic.mask = CLOCKSOURCE_MASK(32);
+ csrc_pic.read = nlm_get_pic_timer32;
+ } else {
+ csrc_pic.mask = CLOCKSOURCE_MASK(64);
+ csrc_pic.read = nlm_get_pic_timer;
+ }
+ csrc_pic.rating = 1000;
+ picfreq = pic_timer_freq();
+ clocksource_register_hz(&csrc_pic, picfreq);
+ pr_info("PIC clock source added, frequency %d\n", picfreq);
+}
+
+void __init plat_time_init(void)
+{
+ nlm_init_pic_timer();
+ mips_hpt_frequency = nlm_get_cpu_frequency();
+ if (current_cpu_type() == CPU_XLR)
+ preset_lpj = mips_hpt_frequency / (3 * HZ);
+ else
+ preset_lpj = mips_hpt_frequency / (2 * HZ);
+ pr_info("MIPS counter frequency [%ld]\n",
+ (unsigned long)mips_hpt_frequency);
+}
diff --git a/arch/mips/netlogic/xlp/Makefile b/arch/mips/netlogic/xlp/Makefile
new file mode 100644
index 000000000..d62465717
--- /dev/null
+++ b/arch/mips/netlogic/xlp/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += setup.o nlm_hal.o cop2-ex.o dt.o
+obj-$(CONFIG_SMP) += wakeup.o
+ifdef CONFIG_USB
+obj-y += usb-init.o
+obj-y += usb-init-xlp2.o
+endif
+ifdef CONFIG_SATA_AHCI
+obj-y += ahci-init.o
+obj-y += ahci-init-xlp2.o
+endif
diff --git a/arch/mips/netlogic/xlp/ahci-init-xlp2.c b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
new file mode 100644
index 000000000..c11b9c7dc
--- /dev/null
+++ b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2003-2014 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <linux/bitops.h>
+#include <linux/pci_ids.h>
+#include <linux/nodemask.h>
+
+#include <asm/cpu.h>
+#include <asm/mipsregs.h>
+
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/mips-extns.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/iomap.h>
+
+#define SATA_CTL 0x0
+#define SATA_STATUS 0x1 /* Status Reg */
+#define SATA_INT 0x2 /* Interrupt Reg */
+#define SATA_INT_MASK 0x3 /* Interrupt Mask Reg */
+#define SATA_BIU_TIMEOUT 0x4
+#define AXIWRSPERRLOG 0x5
+#define AXIRDSPERRLOG 0x6
+#define BiuTimeoutLow 0x7
+#define BiuTimeoutHi 0x8
+#define BiuSlvErLow 0x9
+#define BiuSlvErHi 0xa
+#define IO_CONFIG_SWAP_DIS 0xb
+#define CR_REG_TIMER 0xc
+#define CORE_ID 0xd
+#define AXI_SLAVE_OPT1 0xe
+#define PHY_MEM_ACCESS 0xf
+#define PHY0_CNTRL 0x10
+#define PHY0_STAT 0x11
+#define PHY0_RX_ALIGN 0x12
+#define PHY0_RX_EQ_LO 0x13
+#define PHY0_RX_EQ_HI 0x14
+#define PHY0_BIST_LOOP 0x15
+#define PHY1_CNTRL 0x16
+#define PHY1_STAT 0x17
+#define PHY1_RX_ALIGN 0x18
+#define PHY1_RX_EQ_LO 0x19
+#define PHY1_RX_EQ_HI 0x1a
+#define PHY1_BIST_LOOP 0x1b
+#define RdExBase 0x1c
+#define RdExLimit 0x1d
+#define CacheAllocBase 0x1e
+#define CacheAllocLimit 0x1f
+#define BiuSlaveCmdGstNum 0x20
+
+/*SATA_CTL Bits */
+#define SATA_RST_N BIT(0) /* Active low reset sata_core phy */
+#define SataCtlReserve0 BIT(1)
+#define M_CSYSREQ BIT(2) /* AXI master low power, not used */
+#define S_CSYSREQ BIT(3) /* AXI slave low power, not used */
+#define P0_CP_DET BIT(8) /* Reserved, bring in from pad */
+#define P0_MP_SW BIT(9) /* Mech Switch */
+#define P0_DISABLE BIT(10) /* disable p0 */
+#define P0_ACT_LED_EN BIT(11) /* Active LED enable */
+#define P0_IRST_HARD_SYNTH BIT(12) /* PHY hard synth reset */
+#define P0_IRST_HARD_TXRX BIT(13) /* PHY lane hard reset */
+#define P0_IRST_POR BIT(14) /* PHY power on reset*/
+#define P0_IPDTXL BIT(15) /* PHY Tx lane dis/power down */
+#define P0_IPDRXL BIT(16) /* PHY Rx lane dis/power down */
+#define P0_IPDIPDMSYNTH BIT(17) /* PHY synthesizer dis/porwer down */
+#define P0_CP_POD_EN BIT(18) /* CP_POD enable */
+#define P0_AT_BYPASS BIT(19) /* P0 address translation by pass */
+#define P1_CP_DET BIT(20) /* Reserved,Cold Detect */
+#define P1_MP_SW BIT(21) /* Mech Switch */
+#define P1_DISABLE BIT(22) /* disable p1 */
+#define P1_ACT_LED_EN BIT(23) /* Active LED enable */
+#define P1_IRST_HARD_SYNTH BIT(24) /* PHY hard synth reset */
+#define P1_IRST_HARD_TXRX BIT(25) /* PHY lane hard reset */
+#define P1_IRST_POR BIT(26) /* PHY power on reset*/
+#define P1_IPDTXL BIT(27) /* PHY Tx lane dis/porwer down */
+#define P1_IPDRXL BIT(28) /* PHY Rx lane dis/porwer down */
+#define P1_IPDIPDMSYNTH BIT(29) /* PHY synthesizer dis/porwer down */
+#define P1_CP_POD_EN BIT(30)
+#define P1_AT_BYPASS BIT(31) /* P1 address translation by pass */
+
+/* Status register */
+#define M_CACTIVE BIT(0) /* m_cactive, not used */
+#define S_CACTIVE BIT(1) /* s_cactive, not used */
+#define P0_PHY_READY BIT(8) /* phy is ready */
+#define P0_CP_POD BIT(9) /* Cold PowerOn */
+#define P0_SLUMBER BIT(10) /* power mode slumber */
+#define P0_PATIAL BIT(11) /* power mode patial */
+#define P0_PHY_SIG_DET BIT(12) /* phy dignal detect */
+#define P0_PHY_CALI BIT(13) /* phy calibration done */
+#define P1_PHY_READY BIT(16) /* phy is ready */
+#define P1_CP_POD BIT(17) /* Cold PowerOn */
+#define P1_SLUMBER BIT(18) /* power mode slumber */
+#define P1_PATIAL BIT(19) /* power mode patial */
+#define P1_PHY_SIG_DET BIT(20) /* phy dignal detect */
+#define P1_PHY_CALI BIT(21) /* phy calibration done */
+
+/* SATA CR_REG_TIMER bits */
+#define CR_TIME_SCALE (0x1000 << 0)
+
+/* SATA PHY specific registers start and end address */
+#define RXCDRCALFOSC0 0x0065
+#define CALDUTY 0x006e
+#define RXDPIF 0x8065
+#define PPMDRIFTMAX_HI 0x80A4
+
+#define nlm_read_sata_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_sata_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_get_sata_pcibase(node) \
+ nlm_pcicfg_base(XLP9XX_IO_SATA_OFFSET(node))
+#define nlm_get_sata_regbase(node) \
+ (nlm_get_sata_pcibase(node) + 0x100)
+
+/* SATA PHY config for register block 1 0x0065 .. 0x006e */
+static const u8 sata_phy_config1[] = {
+ 0xC9, 0xC9, 0x07, 0x07, 0x18, 0x18, 0x01, 0x01, 0x22, 0x00
+};
+
+/* SATA PHY config for register block 2 0x8065 .. 0x80A4 */
+static const u8 sata_phy_config2[] = {
+ 0xAA, 0x00, 0x4C, 0xC9, 0xC9, 0x07, 0x07, 0x18,
+ 0x18, 0x05, 0x0C, 0x10, 0x00, 0x10, 0x00, 0xFF,
+ 0xCF, 0xF7, 0xE1, 0xF5, 0xFD, 0xFD, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xE3, 0xE7, 0xDB, 0xF5, 0xFD, 0xFD,
+ 0xF5, 0xF5, 0xFF, 0xFF, 0xE3, 0xE7, 0xDB, 0xF5,
+ 0xFD, 0xFD, 0xF5, 0xF5, 0xFF, 0xFF, 0xFF, 0xF5,
+ 0x3F, 0x00, 0x32, 0x00, 0x03, 0x01, 0x05, 0x05,
+ 0x04, 0x00, 0x00, 0x08, 0x04, 0x00, 0x00, 0x04,
+};
+
+const int sata_phy_debug = 0; /* set to verify PHY writes */
+
+static void sata_clear_glue_reg(u64 regbase, u32 off, u32 bit)
+{
+ u32 reg_val;
+
+ reg_val = nlm_read_sata_reg(regbase, off);
+ nlm_write_sata_reg(regbase, off, (reg_val & ~bit));
+}
+
+static void sata_set_glue_reg(u64 regbase, u32 off, u32 bit)
+{
+ u32 reg_val;
+
+ reg_val = nlm_read_sata_reg(regbase, off);
+ nlm_write_sata_reg(regbase, off, (reg_val | bit));
+}
+
+static void write_phy_reg(u64 regbase, u32 addr, u32 physel, u8 data)
+{
+ nlm_write_sata_reg(regbase, PHY_MEM_ACCESS,
+ (1u << 31) | (physel << 24) | (data << 16) | addr);
+ udelay(850);
+}
+
+static u8 read_phy_reg(u64 regbase, u32 addr, u32 physel)
+{
+ u32 val;
+
+ nlm_write_sata_reg(regbase, PHY_MEM_ACCESS,
+ (0 << 31) | (physel << 24) | (0 << 16) | addr);
+ udelay(850);
+ val = nlm_read_sata_reg(regbase, PHY_MEM_ACCESS);
+ return (val >> 16) & 0xff;
+}
+
+static void config_sata_phy(u64 regbase)
+{
+ u32 port, i, reg;
+ u8 val;
+
+ for (port = 0; port < 2; port++) {
+ for (i = 0, reg = RXCDRCALFOSC0; reg <= CALDUTY; reg++, i++)
+ write_phy_reg(regbase, reg, port, sata_phy_config1[i]);
+
+ for (i = 0, reg = RXDPIF; reg <= PPMDRIFTMAX_HI; reg++, i++)
+ write_phy_reg(regbase, reg, port, sata_phy_config2[i]);
+
+ /* Fix for PHY link up failures at lower temperatures */
+ write_phy_reg(regbase, 0x800F, port, 0x1f);
+
+ val = read_phy_reg(regbase, 0x0029, port);
+ write_phy_reg(regbase, 0x0029, port, val | (0x7 << 1));
+
+ val = read_phy_reg(regbase, 0x0056, port);
+ write_phy_reg(regbase, 0x0056, port, val & ~(1 << 3));
+
+ val = read_phy_reg(regbase, 0x0018, port);
+ write_phy_reg(regbase, 0x0018, port, val & ~(0x7 << 0));
+ }
+}
+
+static void check_phy_register(u64 regbase, u32 addr, u32 physel, u8 xdata)
+{
+ u8 data;
+
+ data = read_phy_reg(regbase, addr, physel);
+ pr_info("PHY read addr = 0x%x physel = %d data = 0x%x %s\n",
+ addr, physel, data, data == xdata ? "TRUE" : "FALSE");
+}
+
+static void verify_sata_phy_config(u64 regbase)
+{
+ u32 port, i, reg;
+
+ for (port = 0; port < 2; port++) {
+ for (i = 0, reg = RXCDRCALFOSC0; reg <= CALDUTY; reg++, i++)
+ check_phy_register(regbase, reg, port,
+ sata_phy_config1[i]);
+
+ for (i = 0, reg = RXDPIF; reg <= PPMDRIFTMAX_HI; reg++, i++)
+ check_phy_register(regbase, reg, port,
+ sata_phy_config2[i]);
+ }
+}
+
+static void nlm_sata_firmware_init(int node)
+{
+ u32 reg_val;
+ u64 regbase;
+ int n;
+
+ pr_info("Initializing XLP9XX On-chip AHCI...\n");
+ regbase = nlm_get_sata_regbase(node);
+
+ /* Reset port0 */
+ sata_clear_glue_reg(regbase, SATA_CTL, P0_IRST_POR);
+ sata_clear_glue_reg(regbase, SATA_CTL, P0_IRST_HARD_TXRX);
+ sata_clear_glue_reg(regbase, SATA_CTL, P0_IRST_HARD_SYNTH);
+ sata_clear_glue_reg(regbase, SATA_CTL, P0_IPDTXL);
+ sata_clear_glue_reg(regbase, SATA_CTL, P0_IPDRXL);
+ sata_clear_glue_reg(regbase, SATA_CTL, P0_IPDIPDMSYNTH);
+
+ /* port1 */
+ sata_clear_glue_reg(regbase, SATA_CTL, P1_IRST_POR);
+ sata_clear_glue_reg(regbase, SATA_CTL, P1_IRST_HARD_TXRX);
+ sata_clear_glue_reg(regbase, SATA_CTL, P1_IRST_HARD_SYNTH);
+ sata_clear_glue_reg(regbase, SATA_CTL, P1_IPDTXL);
+ sata_clear_glue_reg(regbase, SATA_CTL, P1_IPDRXL);
+ sata_clear_glue_reg(regbase, SATA_CTL, P1_IPDIPDMSYNTH);
+ udelay(300);
+
+ /* Set PHY */
+ sata_set_glue_reg(regbase, SATA_CTL, P0_IPDTXL);
+ sata_set_glue_reg(regbase, SATA_CTL, P0_IPDRXL);
+ sata_set_glue_reg(regbase, SATA_CTL, P0_IPDIPDMSYNTH);
+ sata_set_glue_reg(regbase, SATA_CTL, P1_IPDTXL);
+ sata_set_glue_reg(regbase, SATA_CTL, P1_IPDRXL);
+ sata_set_glue_reg(regbase, SATA_CTL, P1_IPDIPDMSYNTH);
+
+ udelay(1000);
+ sata_set_glue_reg(regbase, SATA_CTL, P0_IRST_POR);
+ udelay(1000);
+ sata_set_glue_reg(regbase, SATA_CTL, P1_IRST_POR);
+ udelay(1000);
+
+ /* setup PHY */
+ config_sata_phy(regbase);
+ if (sata_phy_debug)
+ verify_sata_phy_config(regbase);
+
+ udelay(1000);
+ sata_set_glue_reg(regbase, SATA_CTL, P0_IRST_HARD_TXRX);
+ sata_set_glue_reg(regbase, SATA_CTL, P0_IRST_HARD_SYNTH);
+ sata_set_glue_reg(regbase, SATA_CTL, P1_IRST_HARD_TXRX);
+ sata_set_glue_reg(regbase, SATA_CTL, P1_IRST_HARD_SYNTH);
+ udelay(300);
+
+ /* Override reset in serial PHY mode */
+ sata_set_glue_reg(regbase, CR_REG_TIMER, CR_TIME_SCALE);
+ /* Set reset SATA */
+ sata_set_glue_reg(regbase, SATA_CTL, SATA_RST_N);
+ sata_set_glue_reg(regbase, SATA_CTL, M_CSYSREQ);
+ sata_set_glue_reg(regbase, SATA_CTL, S_CSYSREQ);
+
+ pr_debug("Waiting for PHYs to come up.\n");
+ n = 10000;
+ do {
+ reg_val = nlm_read_sata_reg(regbase, SATA_STATUS);
+ if ((reg_val & P1_PHY_READY) && (reg_val & P0_PHY_READY))
+ break;
+ udelay(10);
+ } while (--n > 0);
+
+ if (reg_val & P0_PHY_READY)
+ pr_info("PHY0 is up.\n");
+ else
+ pr_info("PHY0 is down.\n");
+ if (reg_val & P1_PHY_READY)
+ pr_info("PHY1 is up.\n");
+ else
+ pr_info("PHY1 is down.\n");
+
+ pr_info("XLP AHCI Init Done.\n");
+}
+
+static int __init nlm_ahci_init(void)
+{
+ int node;
+
+ if (!cpu_is_xlp9xx())
+ return 0;
+ for (node = 0; node < NLM_NR_NODES; node++)
+ if (nlm_node_present(node))
+ nlm_sata_firmware_init(node);
+ return 0;
+}
+
+static void nlm_sata_intr_ack(struct irq_data *data)
+{
+ u64 regbase;
+ u32 val;
+ int node;
+
+ node = data->irq / NLM_IRQS_PER_NODE;
+ regbase = nlm_get_sata_regbase(node);
+ val = nlm_read_sata_reg(regbase, SATA_INT);
+ sata_set_glue_reg(regbase, SATA_INT, val);
+}
+
+static void nlm_sata_fixup_bar(struct pci_dev *dev)
+{
+ dev->resource[5] = dev->resource[0];
+ memset(&dev->resource[0], 0, sizeof(dev->resource[0]));
+}
+
+static void nlm_sata_fixup_final(struct pci_dev *dev)
+{
+ u32 val;
+ u64 regbase;
+ int node;
+
+ /* Find end bridge function to find node */
+ node = xlp_socdev_to_node(dev);
+ regbase = nlm_get_sata_regbase(node);
+
+ /* clear pending interrupts and then enable them */
+ val = nlm_read_sata_reg(regbase, SATA_INT);
+ sata_set_glue_reg(regbase, SATA_INT, val);
+
+ /* Enable only the core interrupt */
+ sata_set_glue_reg(regbase, SATA_INT_MASK, 0x1);
+
+ dev->irq = nlm_irq_to_xirq(node, PIC_SATA_IRQ);
+ nlm_set_pic_extra_ack(node, PIC_SATA_IRQ, nlm_sata_intr_ack);
+}
+
+arch_initcall(nlm_ahci_init);
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_XLP9XX_SATA,
+ nlm_sata_fixup_bar);
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_XLP9XX_SATA,
+ nlm_sata_fixup_final);
diff --git a/arch/mips/netlogic/xlp/ahci-init.c b/arch/mips/netlogic/xlp/ahci-init.c
new file mode 100644
index 000000000..92be1a325
--- /dev/null
+++ b/arch/mips/netlogic/xlp/ahci-init.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2003-2014 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <linux/bitops.h>
+
+#include <asm/cpu.h>
+#include <asm/mipsregs.h>
+
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/mips-extns.h>
+
+#define SATA_CTL 0x0
+#define SATA_STATUS 0x1 /* Status Reg */
+#define SATA_INT 0x2 /* Interrupt Reg */
+#define SATA_INT_MASK 0x3 /* Interrupt Mask Reg */
+#define SATA_CR_REG_TIMER 0x4 /* PHY Conrol Timer Reg */
+#define SATA_CORE_ID 0x5 /* Core ID Reg */
+#define SATA_AXI_SLAVE_OPT1 0x6 /* AXI Slave Options Reg */
+#define SATA_PHY_LOS_LEV 0x7 /* PHY LOS Level Reg */
+#define SATA_PHY_MULTI 0x8 /* PHY Multiplier Reg */
+#define SATA_PHY_CLK_SEL 0x9 /* Clock Select Reg */
+#define SATA_PHY_AMP1_GEN1 0xa /* PHY Transmit Amplitude Reg 1 */
+#define SATA_PHY_AMP1_GEN2 0xb /* PHY Transmit Amplitude Reg 2 */
+#define SATA_PHY_AMP1_GEN3 0xc /* PHY Transmit Amplitude Reg 3 */
+#define SATA_PHY_PRE1 0xd /* PHY Transmit Preemphasis Reg 1 */
+#define SATA_PHY_PRE2 0xe /* PHY Transmit Preemphasis Reg 2 */
+#define SATA_PHY_PRE3 0xf /* PHY Transmit Preemphasis Reg 3 */
+#define SATA_SPDMODE 0x10 /* Speed Mode Reg */
+#define SATA_REFCLK 0x11 /* Reference Clock Control Reg */
+#define SATA_BYTE_SWAP_DIS 0x12 /* byte swap disable */
+
+/*SATA_CTL Bits */
+#define SATA_RST_N BIT(0)
+#define PHY0_RESET_N BIT(16)
+#define PHY1_RESET_N BIT(17)
+#define PHY2_RESET_N BIT(18)
+#define PHY3_RESET_N BIT(19)
+#define M_CSYSREQ BIT(2)
+#define S_CSYSREQ BIT(3)
+
+/*SATA_STATUS Bits */
+#define P0_PHY_READY BIT(4)
+#define P1_PHY_READY BIT(5)
+#define P2_PHY_READY BIT(6)
+#define P3_PHY_READY BIT(7)
+
+#define nlm_read_sata_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_sata_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_get_sata_pcibase(node) \
+ nlm_pcicfg_base(XLP_IO_SATA_OFFSET(node))
+/* SATA device specific configuration registers are starts at 0x900 offset */
+#define nlm_get_sata_regbase(node) \
+ (nlm_get_sata_pcibase(node) + 0x900)
+
+static void sata_clear_glue_reg(uint64_t regbase, uint32_t off, uint32_t bit)
+{
+ uint32_t reg_val;
+
+ reg_val = nlm_read_sata_reg(regbase, off);
+ nlm_write_sata_reg(regbase, off, (reg_val & ~bit));
+}
+
+static void sata_set_glue_reg(uint64_t regbase, uint32_t off, uint32_t bit)
+{
+ uint32_t reg_val;
+
+ reg_val = nlm_read_sata_reg(regbase, off);
+ nlm_write_sata_reg(regbase, off, (reg_val | bit));
+}
+
+static void nlm_sata_firmware_init(int node)
+{
+ uint32_t reg_val;
+ uint64_t regbase;
+ int i;
+
+ pr_info("XLP AHCI Initialization started.\n");
+ regbase = nlm_get_sata_regbase(node);
+
+ /* Reset SATA */
+ sata_clear_glue_reg(regbase, SATA_CTL, SATA_RST_N);
+ /* Reset PHY */
+ sata_clear_glue_reg(regbase, SATA_CTL,
+ (PHY3_RESET_N | PHY2_RESET_N
+ | PHY1_RESET_N | PHY0_RESET_N));
+
+ /* Set SATA */
+ sata_set_glue_reg(regbase, SATA_CTL, SATA_RST_N);
+ /* Set PHY */
+ sata_set_glue_reg(regbase, SATA_CTL,
+ (PHY3_RESET_N | PHY2_RESET_N
+ | PHY1_RESET_N | PHY0_RESET_N));
+
+ pr_debug("Waiting for PHYs to come up.\n");
+ i = 0;
+ do {
+ reg_val = nlm_read_sata_reg(regbase, SATA_STATUS);
+ i++;
+ } while (((reg_val & 0xF0) != 0xF0) && (i < 10000));
+
+ for (i = 0; i < 4; i++) {
+ if (reg_val & (P0_PHY_READY << i))
+ pr_info("PHY%d is up.\n", i);
+ else
+ pr_info("PHY%d is down.\n", i);
+ }
+
+ pr_info("XLP AHCI init done.\n");
+}
+
+static int __init nlm_ahci_init(void)
+{
+ int node = 0;
+ int chip = read_c0_prid() & PRID_IMP_MASK;
+
+ if (chip == PRID_IMP_NETLOGIC_XLP3XX)
+ nlm_sata_firmware_init(node);
+ return 0;
+}
+
+static void nlm_sata_intr_ack(struct irq_data *data)
+{
+ uint32_t val = 0;
+ uint64_t regbase;
+
+ regbase = nlm_get_sata_regbase(nlm_nodeid());
+ val = nlm_read_sata_reg(regbase, SATA_INT);
+ sata_set_glue_reg(regbase, SATA_INT, val);
+}
+
+static void nlm_sata_fixup_bar(struct pci_dev *dev)
+{
+ /*
+ * The AHCI resource is in BAR 0, move it to
+ * BAR 5, where it is expected
+ */
+ dev->resource[5] = dev->resource[0];
+ memset(&dev->resource[0], 0, sizeof(dev->resource[0]));
+}
+
+static void nlm_sata_fixup_final(struct pci_dev *dev)
+{
+ uint32_t val;
+ uint64_t regbase;
+ int node = 0; /* XLP3XX does not support multi-node */
+
+ regbase = nlm_get_sata_regbase(node);
+
+ /* clear pending interrupts and then enable them */
+ val = nlm_read_sata_reg(regbase, SATA_INT);
+ sata_set_glue_reg(regbase, SATA_INT, val);
+
+ /* Mask the core interrupt. If all the interrupts
+ * are enabled there are spurious interrupt flow
+ * happening, to avoid only enable core interrupt
+ * mask.
+ */
+ sata_set_glue_reg(regbase, SATA_INT_MASK, 0x1);
+
+ dev->irq = PIC_SATA_IRQ;
+ nlm_set_pic_extra_ack(node, PIC_SATA_IRQ, nlm_sata_intr_ack);
+}
+
+arch_initcall(nlm_ahci_init);
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_SATA,
+ nlm_sata_fixup_bar);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_SATA,
+ nlm_sata_fixup_final);
diff --git a/arch/mips/netlogic/xlp/cop2-ex.c b/arch/mips/netlogic/xlp/cop2-ex.c
new file mode 100644
index 000000000..21e439b3d
--- /dev/null
+++ b/arch/mips/netlogic/xlp/cop2-ex.c
@@ -0,0 +1,121 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Broadcom Corporation.
+ *
+ * based on arch/mips/cavium-octeon/cpu.c
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/capability.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/notifier.h>
+#include <linux/prefetch.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/cop2.h>
+#include <asm/current.h>
+#include <asm/mipsregs.h>
+#include <asm/page.h>
+
+#include <asm/netlogic/mips-extns.h>
+
+/*
+ * 64 bit ops are done in inline assembly to support 32 bit
+ * compilation
+ */
+void nlm_cop2_save(struct nlm_cop2_state *r)
+{
+ asm volatile(
+ ".set push\n"
+ ".set noat\n"
+ "dmfc2 $1, $0, 0\n"
+ "sd $1, 0(%1)\n"
+ "dmfc2 $1, $0, 1\n"
+ "sd $1, 8(%1)\n"
+ "dmfc2 $1, $0, 2\n"
+ "sd $1, 16(%1)\n"
+ "dmfc2 $1, $0, 3\n"
+ "sd $1, 24(%1)\n"
+ "dmfc2 $1, $1, 0\n"
+ "sd $1, 0(%2)\n"
+ "dmfc2 $1, $1, 1\n"
+ "sd $1, 8(%2)\n"
+ "dmfc2 $1, $1, 2\n"
+ "sd $1, 16(%2)\n"
+ "dmfc2 $1, $1, 3\n"
+ "sd $1, 24(%2)\n"
+ ".set pop\n"
+ : "=m"(*r)
+ : "r"(r->tx), "r"(r->rx));
+
+ r->tx_msg_status = __read_32bit_c2_register($2, 0);
+ r->rx_msg_status = __read_32bit_c2_register($3, 0) & 0x0fffffff;
+}
+
+void nlm_cop2_restore(struct nlm_cop2_state *r)
+{
+ u32 rstat;
+
+ asm volatile(
+ ".set push\n"
+ ".set noat\n"
+ "ld $1, 0(%1)\n"
+ "dmtc2 $1, $0, 0\n"
+ "ld $1, 8(%1)\n"
+ "dmtc2 $1, $0, 1\n"
+ "ld $1, 16(%1)\n"
+ "dmtc2 $1, $0, 2\n"
+ "ld $1, 24(%1)\n"
+ "dmtc2 $1, $0, 3\n"
+ "ld $1, 0(%2)\n"
+ "dmtc2 $1, $1, 0\n"
+ "ld $1, 8(%2)\n"
+ "dmtc2 $1, $1, 1\n"
+ "ld $1, 16(%2)\n"
+ "dmtc2 $1, $1, 2\n"
+ "ld $1, 24(%2)\n"
+ "dmtc2 $1, $1, 3\n"
+ ".set pop\n"
+ : : "m"(*r), "r"(r->tx), "r"(r->rx));
+
+ __write_32bit_c2_register($2, 0, r->tx_msg_status);
+ rstat = __read_32bit_c2_register($3, 0) & 0xf0000000u;
+ __write_32bit_c2_register($3, 0, r->rx_msg_status | rstat);
+}
+
+static int nlm_cu2_call(struct notifier_block *nfb, unsigned long action,
+ void *data)
+{
+ unsigned long flags;
+ unsigned int status;
+
+ switch (action) {
+ case CU2_EXCEPTION:
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ break;
+ local_irq_save(flags);
+ KSTK_STATUS(current) |= ST0_CU2;
+ status = read_c0_status();
+ write_c0_status(status | ST0_CU2);
+ nlm_cop2_restore(&(current->thread.cp2));
+ write_c0_status(status & ~ST0_CU2);
+ local_irq_restore(flags);
+ pr_info("COP2 access enabled for pid %d (%s)\n",
+ current->pid, current->comm);
+ return NOTIFY_BAD; /* Don't call default notifier */
+ }
+
+ return NOTIFY_OK; /* Let default notifier send signals */
+}
+
+static int __init nlm_cu2_setup(void)
+{
+ return cu2_notifier(nlm_cu2_call, 0);
+}
+early_initcall(nlm_cu2_setup);
diff --git a/arch/mips/netlogic/xlp/dt.c b/arch/mips/netlogic/xlp/dt.c
new file mode 100644
index 000000000..c856f2a3e
--- /dev/null
+++ b/arch/mips/netlogic/xlp/dt.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2003-2013 Broadcom Corporation.
+ * All Rights Reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+
+#include <asm/prom.h>
+
+extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], __dtb_xlp_fvp_begin[],
+ __dtb_xlp_gvp_begin[], __dtb_xlp_rvp_begin[];
+static void *xlp_fdt_blob;
+
+void __init *xlp_dt_init(void *fdtp)
+{
+ if (!fdtp) {
+ switch (current_cpu_data.processor_id & PRID_IMP_MASK) {
+#ifdef CONFIG_DT_XLP_RVP
+ case PRID_IMP_NETLOGIC_XLP5XX:
+ fdtp = __dtb_xlp_rvp_begin;
+ break;
+#endif
+#ifdef CONFIG_DT_XLP_GVP
+ case PRID_IMP_NETLOGIC_XLP9XX:
+ fdtp = __dtb_xlp_gvp_begin;
+ break;
+#endif
+#ifdef CONFIG_DT_XLP_FVP
+ case PRID_IMP_NETLOGIC_XLP2XX:
+ fdtp = __dtb_xlp_fvp_begin;
+ break;
+#endif
+#ifdef CONFIG_DT_XLP_SVP
+ case PRID_IMP_NETLOGIC_XLP3XX:
+ fdtp = __dtb_xlp_svp_begin;
+ break;
+#endif
+#ifdef CONFIG_DT_XLP_EVP
+ case PRID_IMP_NETLOGIC_XLP8XX:
+ fdtp = __dtb_xlp_evp_begin;
+ break;
+#endif
+ default:
+ /* Pick a built-in if any, and hope for the best */
+ fdtp = __dtb_start;
+ break;
+ }
+ }
+ xlp_fdt_blob = fdtp;
+ return fdtp;
+}
+
+void __init xlp_early_init_devtree(void)
+{
+ __dt_setup_arch(xlp_fdt_blob);
+}
+
+void __init device_tree_init(void)
+{
+ unflatten_and_copy_device_tree();
+}
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
new file mode 100644
index 000000000..25ee69489
--- /dev/null
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -0,0 +1,508 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+
+#include <asm/mipsregs.h>
+#include <asm/time.h>
+
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/bridge.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+
+/* Main initialization */
+void nlm_node_init(int node)
+{
+ struct nlm_soc_info *nodep;
+
+ nodep = nlm_get_node(node);
+ if (node == 0)
+ nodep->coremask = 1; /* node 0, boot cpu */
+ nodep->sysbase = nlm_get_sys_regbase(node);
+ nodep->picbase = nlm_get_pic_regbase(node);
+ nodep->ebase = read_c0_ebase() & MIPS_EBASE_BASE;
+ if (cpu_is_xlp9xx())
+ nodep->socbus = xlp9xx_get_socbus(node);
+ else
+ nodep->socbus = 0;
+ spin_lock_init(&nodep->piclock);
+}
+
+static int xlp9xx_irq_to_irt(int irq)
+{
+ switch (irq) {
+ case PIC_GPIO_IRQ:
+ return 12;
+ case PIC_I2C_0_IRQ:
+ return 125;
+ case PIC_I2C_1_IRQ:
+ return 126;
+ case PIC_I2C_2_IRQ:
+ return 127;
+ case PIC_I2C_3_IRQ:
+ return 128;
+ case PIC_9XX_XHCI_0_IRQ:
+ return 114;
+ case PIC_9XX_XHCI_1_IRQ:
+ return 115;
+ case PIC_9XX_XHCI_2_IRQ:
+ return 116;
+ case PIC_UART_0_IRQ:
+ return 133;
+ case PIC_UART_1_IRQ:
+ return 134;
+ case PIC_SATA_IRQ:
+ return 143;
+ case PIC_NAND_IRQ:
+ return 151;
+ case PIC_SPI_IRQ:
+ return 152;
+ case PIC_MMC_IRQ:
+ return 153;
+ case PIC_PCIE_LINK_LEGACY_IRQ(0):
+ case PIC_PCIE_LINK_LEGACY_IRQ(1):
+ case PIC_PCIE_LINK_LEGACY_IRQ(2):
+ case PIC_PCIE_LINK_LEGACY_IRQ(3):
+ return 191 + irq - PIC_PCIE_LINK_LEGACY_IRQ_BASE;
+ }
+ return -1;
+}
+
+static int xlp_irq_to_irt(int irq)
+{
+ uint64_t pcibase;
+ int devoff, irt;
+
+ devoff = 0;
+ switch (irq) {
+ case PIC_UART_0_IRQ:
+ devoff = XLP_IO_UART0_OFFSET(0);
+ break;
+ case PIC_UART_1_IRQ:
+ devoff = XLP_IO_UART1_OFFSET(0);
+ break;
+ case PIC_MMC_IRQ:
+ devoff = XLP_IO_MMC_OFFSET(0);
+ break;
+ case PIC_I2C_0_IRQ: /* I2C will be fixed up */
+ case PIC_I2C_1_IRQ:
+ case PIC_I2C_2_IRQ:
+ case PIC_I2C_3_IRQ:
+ if (cpu_is_xlpii())
+ devoff = XLP2XX_IO_I2C_OFFSET(0);
+ else
+ devoff = XLP_IO_I2C0_OFFSET(0);
+ break;
+ case PIC_SATA_IRQ:
+ devoff = XLP_IO_SATA_OFFSET(0);
+ break;
+ case PIC_GPIO_IRQ:
+ devoff = XLP_IO_GPIO_OFFSET(0);
+ break;
+ case PIC_NAND_IRQ:
+ devoff = XLP_IO_NAND_OFFSET(0);
+ break;
+ case PIC_SPI_IRQ:
+ devoff = XLP_IO_SPI_OFFSET(0);
+ break;
+ default:
+ if (cpu_is_xlpii()) {
+ switch (irq) {
+ /* XLP2XX has three XHCI USB controller */
+ case PIC_2XX_XHCI_0_IRQ:
+ devoff = XLP2XX_IO_USB_XHCI0_OFFSET(0);
+ break;
+ case PIC_2XX_XHCI_1_IRQ:
+ devoff = XLP2XX_IO_USB_XHCI1_OFFSET(0);
+ break;
+ case PIC_2XX_XHCI_2_IRQ:
+ devoff = XLP2XX_IO_USB_XHCI2_OFFSET(0);
+ break;
+ }
+ } else {
+ switch (irq) {
+ case PIC_EHCI_0_IRQ:
+ devoff = XLP_IO_USB_EHCI0_OFFSET(0);
+ break;
+ case PIC_EHCI_1_IRQ:
+ devoff = XLP_IO_USB_EHCI1_OFFSET(0);
+ break;
+ case PIC_OHCI_0_IRQ:
+ devoff = XLP_IO_USB_OHCI0_OFFSET(0);
+ break;
+ case PIC_OHCI_1_IRQ:
+ devoff = XLP_IO_USB_OHCI1_OFFSET(0);
+ break;
+ case PIC_OHCI_2_IRQ:
+ devoff = XLP_IO_USB_OHCI2_OFFSET(0);
+ break;
+ case PIC_OHCI_3_IRQ:
+ devoff = XLP_IO_USB_OHCI3_OFFSET(0);
+ break;
+ }
+ }
+ }
+
+ if (devoff != 0) {
+ uint32_t val;
+
+ pcibase = nlm_pcicfg_base(devoff);
+ val = nlm_read_reg(pcibase, XLP_PCI_IRTINFO_REG);
+ if (val == 0xffffffff) {
+ irt = -1;
+ } else {
+ irt = val & 0xffff;
+ /* HW weirdness, I2C IRT entry has to be fixed up */
+ switch (irq) {
+ case PIC_I2C_1_IRQ:
+ irt = irt + 1; break;
+ case PIC_I2C_2_IRQ:
+ irt = irt + 2; break;
+ case PIC_I2C_3_IRQ:
+ irt = irt + 3; break;
+ }
+ }
+ } else if (irq >= PIC_PCIE_LINK_LEGACY_IRQ(0) &&
+ irq <= PIC_PCIE_LINK_LEGACY_IRQ(3)) {
+ /* HW bug, PCI IRT entries are bad on early silicon, fix */
+ irt = PIC_IRT_PCIE_LINK_INDEX(irq -
+ PIC_PCIE_LINK_LEGACY_IRQ_BASE);
+ } else {
+ irt = -1;
+ }
+ return irt;
+}
+
+int nlm_irq_to_irt(int irq)
+{
+ /* return -2 for irqs without 1-1 mapping */
+ if (irq >= PIC_PCIE_LINK_MSI_IRQ(0) && irq <= PIC_PCIE_LINK_MSI_IRQ(3))
+ return -2;
+ if (irq >= PIC_PCIE_MSIX_IRQ(0) && irq <= PIC_PCIE_MSIX_IRQ(3))
+ return -2;
+
+ if (cpu_is_xlp9xx())
+ return xlp9xx_irq_to_irt(irq);
+ else
+ return xlp_irq_to_irt(irq);
+}
+
+static unsigned int nlm_xlp2_get_core_frequency(int node, int core)
+{
+ unsigned int pll_post_div, ctrl_val0, ctrl_val1, denom;
+ uint64_t num, sysbase, clockbase;
+
+ if (cpu_is_xlp9xx()) {
+ clockbase = nlm_get_clock_regbase(node);
+ ctrl_val0 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_CPU_PLL_CTRL0(core));
+ ctrl_val1 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_CPU_PLL_CTRL1(core));
+ } else {
+ sysbase = nlm_get_node(node)->sysbase;
+ ctrl_val0 = nlm_read_sys_reg(sysbase,
+ SYS_CPU_PLL_CTRL0(core));
+ ctrl_val1 = nlm_read_sys_reg(sysbase,
+ SYS_CPU_PLL_CTRL1(core));
+ }
+
+ /* Find PLL post divider value */
+ switch ((ctrl_val0 >> 24) & 0x7) {
+ case 1:
+ pll_post_div = 2;
+ break;
+ case 3:
+ pll_post_div = 4;
+ break;
+ case 7:
+ pll_post_div = 8;
+ break;
+ case 6:
+ pll_post_div = 16;
+ break;
+ case 0:
+ default:
+ pll_post_div = 1;
+ break;
+ }
+
+ num = 1000000ULL * (400 * 3 + 100 * (ctrl_val1 & 0x3f));
+ denom = 3 * pll_post_div;
+ do_div(num, denom);
+
+ return (unsigned int)num;
+}
+
+static unsigned int nlm_xlp_get_core_frequency(int node, int core)
+{
+ unsigned int pll_divf, pll_divr, dfs_div, ext_div;
+ unsigned int rstval, dfsval, denom;
+ uint64_t num, sysbase;
+
+ sysbase = nlm_get_node(node)->sysbase;
+ rstval = nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG);
+ dfsval = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIV_VALUE);
+ pll_divf = ((rstval >> 10) & 0x7f) + 1;
+ pll_divr = ((rstval >> 8) & 0x3) + 1;
+ ext_div = ((rstval >> 30) & 0x3) + 1;
+ dfs_div = ((dfsval >> (core * 4)) & 0xf) + 1;
+
+ num = 800000000ULL * pll_divf;
+ denom = 3 * pll_divr * ext_div * dfs_div;
+ do_div(num, denom);
+
+ return (unsigned int)num;
+}
+
+unsigned int nlm_get_core_frequency(int node, int core)
+{
+ if (cpu_is_xlpii())
+ return nlm_xlp2_get_core_frequency(node, core);
+ else
+ return nlm_xlp_get_core_frequency(node, core);
+}
+
+/*
+ * Calculate PIC frequency from PLL registers.
+ * freq_out = (ref_freq/2 * (6 + ctrl2[7:0]) + ctrl2[20:8]/2^13) /
+ * ((2^ctrl0[7:5]) * Table(ctrl0[26:24]))
+ */
+static unsigned int nlm_xlp2_get_pic_frequency(int node)
+{
+ u32 ctrl_val0, ctrl_val2, vco_post_div, pll_post_div, cpu_xlp9xx;
+ u32 mdiv, fdiv, pll_out_freq_den, reg_select, ref_div, pic_div;
+ u64 sysbase, pll_out_freq_num, ref_clk_select, clockbase, ref_clk;
+
+ sysbase = nlm_get_node(node)->sysbase;
+ clockbase = nlm_get_clock_regbase(node);
+ cpu_xlp9xx = cpu_is_xlp9xx();
+
+ /* Find ref_clk_base */
+ if (cpu_xlp9xx)
+ ref_clk_select = (nlm_read_sys_reg(sysbase,
+ SYS_9XX_POWER_ON_RESET_CFG) >> 18) & 0x3;
+ else
+ ref_clk_select = (nlm_read_sys_reg(sysbase,
+ SYS_POWER_ON_RESET_CFG) >> 18) & 0x3;
+ switch (ref_clk_select) {
+ case 0:
+ ref_clk = 200000000ULL;
+ ref_div = 3;
+ break;
+ case 1:
+ ref_clk = 100000000ULL;
+ ref_div = 1;
+ break;
+ case 2:
+ ref_clk = 125000000ULL;
+ ref_div = 1;
+ break;
+ case 3:
+ ref_clk = 400000000ULL;
+ ref_div = 3;
+ break;
+ }
+
+ /* Find the clock source PLL device for PIC */
+ if (cpu_xlp9xx) {
+ reg_select = nlm_read_sys_reg(clockbase,
+ SYS_9XX_CLK_DEV_SEL_REG) & 0x3;
+ switch (reg_select) {
+ case 0:
+ ctrl_val0 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_PLL_CTRL0);
+ ctrl_val2 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_PLL_CTRL2);
+ break;
+ case 1:
+ ctrl_val0 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_PLL_CTRL0_DEVX(0));
+ ctrl_val2 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_PLL_CTRL2_DEVX(0));
+ break;
+ case 2:
+ ctrl_val0 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_PLL_CTRL0_DEVX(1));
+ ctrl_val2 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_PLL_CTRL2_DEVX(1));
+ break;
+ case 3:
+ ctrl_val0 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_PLL_CTRL0_DEVX(2));
+ ctrl_val2 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_PLL_CTRL2_DEVX(2));
+ break;
+ }
+ } else {
+ reg_select = (nlm_read_sys_reg(sysbase,
+ SYS_CLK_DEV_SEL_REG) >> 22) & 0x3;
+ switch (reg_select) {
+ case 0:
+ ctrl_val0 = nlm_read_sys_reg(sysbase,
+ SYS_PLL_CTRL0);
+ ctrl_val2 = nlm_read_sys_reg(sysbase,
+ SYS_PLL_CTRL2);
+ break;
+ case 1:
+ ctrl_val0 = nlm_read_sys_reg(sysbase,
+ SYS_PLL_CTRL0_DEVX(0));
+ ctrl_val2 = nlm_read_sys_reg(sysbase,
+ SYS_PLL_CTRL2_DEVX(0));
+ break;
+ case 2:
+ ctrl_val0 = nlm_read_sys_reg(sysbase,
+ SYS_PLL_CTRL0_DEVX(1));
+ ctrl_val2 = nlm_read_sys_reg(sysbase,
+ SYS_PLL_CTRL2_DEVX(1));
+ break;
+ case 3:
+ ctrl_val0 = nlm_read_sys_reg(sysbase,
+ SYS_PLL_CTRL0_DEVX(2));
+ ctrl_val2 = nlm_read_sys_reg(sysbase,
+ SYS_PLL_CTRL2_DEVX(2));
+ break;
+ }
+ }
+
+ vco_post_div = (ctrl_val0 >> 5) & 0x7;
+ pll_post_div = (ctrl_val0 >> 24) & 0x7;
+ mdiv = ctrl_val2 & 0xff;
+ fdiv = (ctrl_val2 >> 8) & 0x1fff;
+
+ /* Find PLL post divider value */
+ switch (pll_post_div) {
+ case 1:
+ pll_post_div = 2;
+ break;
+ case 3:
+ pll_post_div = 4;
+ break;
+ case 7:
+ pll_post_div = 8;
+ break;
+ case 6:
+ pll_post_div = 16;
+ break;
+ case 0:
+ default:
+ pll_post_div = 1;
+ break;
+ }
+
+ fdiv = fdiv/(1 << 13);
+ pll_out_freq_num = ((ref_clk >> 1) * (6 + mdiv)) + fdiv;
+ pll_out_freq_den = (1 << vco_post_div) * pll_post_div * ref_div;
+
+ if (pll_out_freq_den > 0)
+ do_div(pll_out_freq_num, pll_out_freq_den);
+
+ /* PIC post divider, which happens after PLL */
+ if (cpu_xlp9xx)
+ pic_div = nlm_read_sys_reg(clockbase,
+ SYS_9XX_CLK_DEV_DIV_REG) & 0x3;
+ else
+ pic_div = (nlm_read_sys_reg(sysbase,
+ SYS_CLK_DEV_DIV_REG) >> 22) & 0x3;
+ do_div(pll_out_freq_num, 1 << pic_div);
+
+ return pll_out_freq_num;
+}
+
+unsigned int nlm_get_pic_frequency(int node)
+{
+ if (cpu_is_xlpii())
+ return nlm_xlp2_get_pic_frequency(node);
+ else
+ return 133333333;
+}
+
+unsigned int nlm_get_cpu_frequency(void)
+{
+ return nlm_get_core_frequency(0, 0);
+}
+
+/*
+ * Fills upto 8 pairs of entries containing the DRAM map of a node
+ * if node < 0, get dram map for all nodes
+ */
+int nlm_get_dram_map(int node, uint64_t *dram_map, int nentries)
+{
+ uint64_t bridgebase, base, lim;
+ uint32_t val;
+ unsigned int barreg, limreg, xlatreg;
+ int i, n, rv;
+
+ /* Look only at mapping on Node 0, we don't handle crazy configs */
+ bridgebase = nlm_get_bridge_regbase(0);
+ rv = 0;
+ for (i = 0; i < 8; i++) {
+ if (rv + 1 >= nentries)
+ break;
+ if (cpu_is_xlp9xx()) {
+ barreg = BRIDGE_9XX_DRAM_BAR(i);
+ limreg = BRIDGE_9XX_DRAM_LIMIT(i);
+ xlatreg = BRIDGE_9XX_DRAM_NODE_TRANSLN(i);
+ } else {
+ barreg = BRIDGE_DRAM_BAR(i);
+ limreg = BRIDGE_DRAM_LIMIT(i);
+ xlatreg = BRIDGE_DRAM_NODE_TRANSLN(i);
+ }
+ if (node >= 0) {
+ /* node specified, get node mapping of BAR */
+ val = nlm_read_bridge_reg(bridgebase, xlatreg);
+ n = (val >> 1) & 0x3;
+ if (n != node)
+ continue;
+ }
+ val = nlm_read_bridge_reg(bridgebase, barreg);
+ val = (val >> 12) & 0xfffff;
+ base = (uint64_t) val << 20;
+ val = nlm_read_bridge_reg(bridgebase, limreg);
+ val = (val >> 12) & 0xfffff;
+ if (val == 0) /* BAR not used */
+ continue;
+ lim = ((uint64_t)val + 1) << 20;
+ dram_map[rv] = base;
+ dram_map[rv + 1] = lim;
+ rv += 2;
+ }
+ return rv;
+}
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
new file mode 100644
index 000000000..9adc0c1b4
--- /dev/null
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of_fdt.h>
+#include <linux/memblock.h>
+
+#include <asm/idle.h>
+#include <asm/reboot.h>
+#include <asm/time.h>
+#include <asm/bootinfo.h>
+
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+
+uint64_t nlm_io_base;
+struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
+cpumask_t nlm_cpumask = CPU_MASK_CPU0;
+unsigned int nlm_threads_per_core;
+
+static void nlm_linux_exit(void)
+{
+ uint64_t sysbase = nlm_get_node(0)->sysbase;
+
+ if (cpu_is_xlp9xx())
+ nlm_write_sys_reg(sysbase, SYS_9XX_CHIP_RESET, 1);
+ else
+ nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1);
+ for ( ; ; )
+ cpu_wait();
+}
+
+static void nlm_fixup_mem(void)
+{
+ const int pref_backup = 512;
+ struct memblock_region *mem;
+
+ for_each_mem_region(mem) {
+ memblock_remove(mem->base + mem->size - pref_backup,
+ pref_backup);
+ }
+}
+
+static void __init xlp_init_mem_from_bars(void)
+{
+ uint64_t map[16];
+ int i, n;
+
+ n = nlm_get_dram_map(-1, map, ARRAY_SIZE(map)); /* -1 : all nodes */
+ for (i = 0; i < n; i += 2) {
+ /* exclude 0x1000_0000-0x2000_0000, u-boot device */
+ if (map[i] <= 0x10000000 && map[i+1] > 0x10000000)
+ map[i+1] = 0x10000000;
+ if (map[i] > 0x10000000 && map[i] < 0x20000000)
+ map[i] = 0x20000000;
+
+ memblock_add(map[i], map[i+1] - map[i]);
+ }
+}
+
+void __init plat_mem_setup(void)
+{
+#ifdef CONFIG_SMP
+ nlm_wakeup_secondary_cpus();
+
+ /* update TLB size after waking up threads */
+ current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+
+ register_smp_ops(&nlm_smp_ops);
+#endif
+ _machine_restart = (void (*)(char *))nlm_linux_exit;
+ _machine_halt = nlm_linux_exit;
+ pm_power_off = nlm_linux_exit;
+
+ /* memory and bootargs from DT */
+ xlp_early_init_devtree();
+
+ if (memblock_end_of_DRAM() == 0) {
+ pr_info("Using DRAM BARs for memory map.\n");
+ xlp_init_mem_from_bars();
+ }
+ /* Calculate and setup wired entries for mapped kernel */
+ nlm_fixup_mem();
+}
+
+const char *get_system_type(void)
+{
+ switch (read_c0_prid() & PRID_IMP_MASK) {
+ case PRID_IMP_NETLOGIC_XLP9XX:
+ case PRID_IMP_NETLOGIC_XLP5XX:
+ case PRID_IMP_NETLOGIC_XLP2XX:
+ return "Broadcom XLPII Series";
+ default:
+ return "Netlogic XLP Series";
+ }
+}
+
+void __init prom_free_prom_memory(void)
+{
+ /* Nothing yet */
+}
+
+void xlp_mmu_init(void)
+{
+ u32 conf4;
+
+ if (cpu_is_xlpii()) {
+ /* XLPII series has extended pagesize in config 4 */
+ conf4 = read_c0_config4() & ~0x1f00u;
+ write_c0_config4(conf4 | ((PAGE_SHIFT - 10) / 2 << 8));
+ } else {
+ /* enable extended TLB and Large Fixed TLB */
+ write_c0_config6(read_c0_config6() | 0x24);
+
+ /* set page mask of extended Fixed TLB in config7 */
+ write_c0_config7(PM_DEFAULT_MASK >>
+ (13 + (ffz(PM_DEFAULT_MASK >> 13) / 2)));
+ }
+}
+
+void nlm_percpu_init(int hwcpuid)
+{
+}
+
+void __init prom_init(void)
+{
+ void *reset_vec;
+
+ nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE);
+ nlm_init_boot_cpu();
+ xlp_mmu_init();
+ nlm_node_init(0);
+ xlp_dt_init((void *)(long)fw_arg0);
+
+ /* Update reset entry point with CPU init code */
+ reset_vec = (void *)CKSEG1ADDR(RESET_VEC_PHYS);
+ memset(reset_vec, 0, RESET_VEC_SIZE);
+ memcpy(reset_vec, (void *)nlm_reset_entry,
+ (nlm_reset_entry_end - nlm_reset_entry));
+
+#ifdef CONFIG_SMP
+ cpumask_setall(&nlm_cpumask);
+#endif
+}
diff --git a/arch/mips/netlogic/xlp/usb-init-xlp2.c b/arch/mips/netlogic/xlp/usb-init-xlp2.c
new file mode 100644
index 000000000..2524939a5
--- /dev/null
+++ b/arch/mips/netlogic/xlp/usb-init-xlp2.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2003-2013 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+
+#define XLPII_USB3_CTL_0 0xc0
+#define XLPII_VAUXRST BIT(0)
+#define XLPII_VCCRST BIT(1)
+#define XLPII_NUM2PORT 9
+#define XLPII_NUM3PORT 13
+#define XLPII_RTUNEREQ BIT(20)
+#define XLPII_MS_CSYSREQ BIT(21)
+#define XLPII_XS_CSYSREQ BIT(22)
+#define XLPII_RETENABLEN BIT(23)
+#define XLPII_TX2RX BIT(24)
+#define XLPII_XHCIREV BIT(25)
+#define XLPII_ECCDIS BIT(26)
+
+#define XLPII_USB3_INT_REG 0xc2
+#define XLPII_USB3_INT_MASK 0xc3
+
+#define XLPII_USB_PHY_TEST 0xc6
+#define XLPII_PRESET BIT(0)
+#define XLPII_ATERESET BIT(1)
+#define XLPII_LOOPEN BIT(2)
+#define XLPII_TESTPDHSP BIT(3)
+#define XLPII_TESTPDSSP BIT(4)
+#define XLPII_TESTBURNIN BIT(5)
+
+#define XLPII_USB_PHY_LOS_LV 0xc9
+#define XLPII_LOSLEV 0
+#define XLPII_LOSBIAS 5
+#define XLPII_SQRXTX 8
+#define XLPII_TXBOOST 11
+#define XLPII_RSLKSEL 16
+#define XLPII_FSEL 20
+
+#define XLPII_USB_RFCLK_REG 0xcc
+#define XLPII_VVLD 30
+
+#define nlm_read_usb_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_usb_reg(b, r, v) nlm_write_reg(b, r, v)
+
+#define nlm_xlpii_get_usb_pcibase(node, inst) \
+ nlm_pcicfg_base(cpu_is_xlp9xx() ? \
+ XLP9XX_IO_USB_OFFSET(node, inst) : \
+ XLP2XX_IO_USB_OFFSET(node, inst))
+#define nlm_xlpii_get_usb_regbase(node, inst) \
+ (nlm_xlpii_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
+
+static void xlp2xx_usb_ack(struct irq_data *data)
+{
+ u64 port_addr;
+
+ switch (data->irq) {
+ case PIC_2XX_XHCI_0_IRQ:
+ port_addr = nlm_xlpii_get_usb_regbase(0, 1);
+ break;
+ case PIC_2XX_XHCI_1_IRQ:
+ port_addr = nlm_xlpii_get_usb_regbase(0, 2);
+ break;
+ case PIC_2XX_XHCI_2_IRQ:
+ port_addr = nlm_xlpii_get_usb_regbase(0, 3);
+ break;
+ default:
+ pr_err("No matching USB irq!\n");
+ return;
+ }
+ nlm_write_usb_reg(port_addr, XLPII_USB3_INT_REG, 0xffffffff);
+}
+
+static void xlp9xx_usb_ack(struct irq_data *data)
+{
+ u64 port_addr;
+ int node, irq;
+
+ /* Find the node and irq on the node */
+ irq = data->irq % NLM_IRQS_PER_NODE;
+ node = data->irq / NLM_IRQS_PER_NODE;
+
+ switch (irq) {
+ case PIC_9XX_XHCI_0_IRQ:
+ port_addr = nlm_xlpii_get_usb_regbase(node, 1);
+ break;
+ case PIC_9XX_XHCI_1_IRQ:
+ port_addr = nlm_xlpii_get_usb_regbase(node, 2);
+ break;
+ case PIC_9XX_XHCI_2_IRQ:
+ port_addr = nlm_xlpii_get_usb_regbase(node, 3);
+ break;
+ default:
+ pr_err("No matching USB irq %d node %d!\n", irq, node);
+ return;
+ }
+ nlm_write_usb_reg(port_addr, XLPII_USB3_INT_REG, 0xffffffff);
+}
+
+static void nlm_xlpii_usb_hw_reset(int node, int port)
+{
+ u64 port_addr, xhci_base, pci_base;
+ void __iomem *corebase;
+ u32 val;
+
+ port_addr = nlm_xlpii_get_usb_regbase(node, port);
+
+ /* Set frequency */
+ val = nlm_read_usb_reg(port_addr, XLPII_USB_PHY_LOS_LV);
+ val &= ~(0x3f << XLPII_FSEL);
+ val |= (0x27 << XLPII_FSEL);
+ nlm_write_usb_reg(port_addr, XLPII_USB_PHY_LOS_LV, val);
+
+ val = nlm_read_usb_reg(port_addr, XLPII_USB_RFCLK_REG);
+ val |= (1 << XLPII_VVLD);
+ nlm_write_usb_reg(port_addr, XLPII_USB_RFCLK_REG, val);
+
+ /* PHY reset */
+ val = nlm_read_usb_reg(port_addr, XLPII_USB_PHY_TEST);
+ val &= (XLPII_ATERESET | XLPII_LOOPEN | XLPII_TESTPDHSP
+ | XLPII_TESTPDSSP | XLPII_TESTBURNIN);
+ nlm_write_usb_reg(port_addr, XLPII_USB_PHY_TEST, val);
+
+ /* Setup control register */
+ val = XLPII_VAUXRST | XLPII_VCCRST | (1 << XLPII_NUM2PORT)
+ | (1 << XLPII_NUM3PORT) | XLPII_MS_CSYSREQ | XLPII_XS_CSYSREQ
+ | XLPII_RETENABLEN | XLPII_XHCIREV;
+ nlm_write_usb_reg(port_addr, XLPII_USB3_CTL_0, val);
+
+ /* Enable interrupts */
+ nlm_write_usb_reg(port_addr, XLPII_USB3_INT_MASK, 0x00000001);
+
+ /* Clear all interrupts */
+ nlm_write_usb_reg(port_addr, XLPII_USB3_INT_REG, 0xffffffff);
+
+ udelay(2000);
+
+ /* XHCI configuration at PCI mem */
+ pci_base = nlm_xlpii_get_usb_pcibase(node, port);
+ xhci_base = nlm_read_usb_reg(pci_base, 0x4) & ~0xf;
+ corebase = ioremap(xhci_base, 0x10000);
+ if (!corebase)
+ return;
+
+ writel(0x240002, corebase + 0xc2c0);
+ /* GCTL 0xc110 */
+ val = readl(corebase + 0xc110);
+ val &= ~(0x3 << 12);
+ val |= (1 << 12);
+ writel(val, corebase + 0xc110);
+ udelay(100);
+
+ /* PHYCFG 0xc200 */
+ val = readl(corebase + 0xc200);
+ val &= ~(1 << 6);
+ writel(val, corebase + 0xc200);
+ udelay(100);
+
+ /* PIPECTL 0xc2c0 */
+ val = readl(corebase + 0xc2c0);
+ val &= ~(1 << 17);
+ writel(val, corebase + 0xc2c0);
+
+ iounmap(corebase);
+}
+
+static int __init nlm_platform_xlpii_usb_init(void)
+{
+ int node;
+
+ if (!cpu_is_xlpii())
+ return 0;
+
+ if (!cpu_is_xlp9xx()) {
+ /* XLP 2XX single node */
+ pr_info("Initializing 2XX USB Interface\n");
+ nlm_xlpii_usb_hw_reset(0, 1);
+ nlm_xlpii_usb_hw_reset(0, 2);
+ nlm_xlpii_usb_hw_reset(0, 3);
+ nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_0_IRQ, xlp2xx_usb_ack);
+ nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_1_IRQ, xlp2xx_usb_ack);
+ nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_2_IRQ, xlp2xx_usb_ack);
+ return 0;
+ }
+
+ /* XLP 9XX, multi-node */
+ pr_info("Initializing 9XX/5XX USB Interface\n");
+ for (node = 0; node < NLM_NR_NODES; node++) {
+ if (!nlm_node_present(node))
+ continue;
+ nlm_xlpii_usb_hw_reset(node, 1);
+ nlm_xlpii_usb_hw_reset(node, 2);
+ nlm_xlpii_usb_hw_reset(node, 3);
+ nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_0_IRQ, xlp9xx_usb_ack);
+ nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_1_IRQ, xlp9xx_usb_ack);
+ nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_2_IRQ, xlp9xx_usb_ack);
+ }
+ return 0;
+}
+
+arch_initcall(nlm_platform_xlpii_usb_init);
+
+static u64 xlp_usb_dmamask = ~(u32)0;
+
+/* Fixup the IRQ for USB devices which is exist on XLP9XX SOC PCIE bus */
+static void nlm_xlp9xx_usb_fixup_final(struct pci_dev *dev)
+{
+ int node;
+
+ node = xlp_socdev_to_node(dev);
+ dev->dev.dma_mask = &xlp_usb_dmamask;
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ switch (dev->devfn) {
+ case 0x21:
+ dev->irq = nlm_irq_to_xirq(node, PIC_9XX_XHCI_0_IRQ);
+ break;
+ case 0x22:
+ dev->irq = nlm_irq_to_xirq(node, PIC_9XX_XHCI_1_IRQ);
+ break;
+ case 0x23:
+ dev->irq = nlm_irq_to_xirq(node, PIC_9XX_XHCI_2_IRQ);
+ break;
+ }
+}
+
+/* Fixup the IRQ for USB devices which is exist on XLP2XX SOC PCIE bus */
+static void nlm_xlp2xx_usb_fixup_final(struct pci_dev *dev)
+{
+ dev->dev.dma_mask = &xlp_usb_dmamask;
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ switch (dev->devfn) {
+ case 0x21:
+ dev->irq = PIC_2XX_XHCI_0_IRQ;
+ break;
+ case 0x22:
+ dev->irq = PIC_2XX_XHCI_1_IRQ;
+ break;
+ case 0x23:
+ dev->irq = PIC_2XX_XHCI_2_IRQ;
+ break;
+ }
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_XLP9XX_XHCI,
+ nlm_xlp9xx_usb_fixup_final);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_XHCI,
+ nlm_xlp2xx_usb_fixup_final);
diff --git a/arch/mips/netlogic/xlp/usb-init.c b/arch/mips/netlogic/xlp/usb-init.c
new file mode 100644
index 000000000..f8117985f
--- /dev/null
+++ b/arch/mips/netlogic/xlp/usb-init.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+
+/*
+ * USB glue logic registers, used only during initialization
+ */
+#define USB_CTL_0 0x01
+#define USB_PHY_0 0x0A
+#define USB_PHY_RESET 0x01
+#define USB_PHY_PORT_RESET_0 0x10
+#define USB_PHY_PORT_RESET_1 0x20
+#define USB_CONTROLLER_RESET 0x01
+#define USB_INT_STATUS 0x0E
+#define USB_INT_EN 0x0F
+#define USB_PHY_INTERRUPT_EN 0x01
+#define USB_OHCI_INTERRUPT_EN 0x02
+#define USB_OHCI_INTERRUPT1_EN 0x04
+#define USB_OHCI_INTERRUPT2_EN 0x08
+#define USB_CTRL_INTERRUPT_EN 0x10
+
+#define nlm_read_usb_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_usb_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_get_usb_pcibase(node, inst) \
+ nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
+#define nlm_get_usb_regbase(node, inst) \
+ (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
+
+static void nlm_usb_intr_en(int node, int port)
+{
+ uint32_t val;
+ uint64_t port_addr;
+
+ port_addr = nlm_get_usb_regbase(node, port);
+ val = nlm_read_usb_reg(port_addr, USB_INT_EN);
+ val = USB_CTRL_INTERRUPT_EN | USB_OHCI_INTERRUPT_EN |
+ USB_OHCI_INTERRUPT1_EN | USB_OHCI_INTERRUPT2_EN;
+ nlm_write_usb_reg(port_addr, USB_INT_EN, val);
+}
+
+static void nlm_usb_hw_reset(int node, int port)
+{
+ uint64_t port_addr;
+ uint32_t val;
+
+ /* reset USB phy */
+ port_addr = nlm_get_usb_regbase(node, port);
+ val = nlm_read_usb_reg(port_addr, USB_PHY_0);
+ val &= ~(USB_PHY_RESET | USB_PHY_PORT_RESET_0 | USB_PHY_PORT_RESET_1);
+ nlm_write_usb_reg(port_addr, USB_PHY_0, val);
+
+ mdelay(100);
+ val = nlm_read_usb_reg(port_addr, USB_CTL_0);
+ val &= ~(USB_CONTROLLER_RESET);
+ val |= 0x4;
+ nlm_write_usb_reg(port_addr, USB_CTL_0, val);
+}
+
+static int __init nlm_platform_usb_init(void)
+{
+ if (cpu_is_xlpii())
+ return 0;
+
+ pr_info("Initializing USB Interface\n");
+ nlm_usb_hw_reset(0, 0);
+ nlm_usb_hw_reset(0, 3);
+
+ /* Enable PHY interrupts */
+ nlm_usb_intr_en(0, 0);
+ nlm_usb_intr_en(0, 3);
+
+ return 0;
+}
+
+arch_initcall(nlm_platform_usb_init);
+
+static u64 xlp_usb_dmamask = ~(u32)0;
+
+/* Fixup the IRQ for USB devices which is exist on XLP SOC PCIE bus */
+static void nlm_usb_fixup_final(struct pci_dev *dev)
+{
+ dev->dev.dma_mask = &xlp_usb_dmamask;
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ switch (dev->devfn) {
+ case 0x10:
+ dev->irq = PIC_EHCI_0_IRQ;
+ break;
+ case 0x11:
+ dev->irq = PIC_OHCI_0_IRQ;
+ break;
+ case 0x12:
+ dev->irq = PIC_OHCI_1_IRQ;
+ break;
+ case 0x13:
+ dev->irq = PIC_EHCI_1_IRQ;
+ break;
+ case 0x14:
+ dev->irq = PIC_OHCI_2_IRQ;
+ break;
+ case 0x15:
+ dev->irq = PIC_OHCI_3_IRQ;
+ break;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_EHCI,
+ nlm_usb_fixup_final);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_OHCI,
+ nlm_usb_fixup_final);
diff --git a/arch/mips/netlogic/xlp/wakeup.c b/arch/mips/netlogic/xlp/wakeup.c
new file mode 100644
index 000000000..d61004dd7
--- /dev/null
+++ b/arch/mips/netlogic/xlp/wakeup.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/threads.h>
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/mipsregs.h>
+#include <asm/addrspace.h>
+#include <asm/string.h>
+
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/mips-extns.h>
+
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+
+static int xlp_wakeup_core(uint64_t sysbase, int node, int core)
+{
+ uint32_t coremask, value;
+ int count, resetreg;
+
+ coremask = (1 << core);
+
+ /* Enable CPU clock in case of 8xx/3xx */
+ if (!cpu_is_xlpii()) {
+ value = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL);
+ value &= ~coremask;
+ nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, value);
+ }
+
+ /* On 9XX, mark coherent first */
+ if (cpu_is_xlp9xx()) {
+ value = nlm_read_sys_reg(sysbase, SYS_9XX_CPU_NONCOHERENT_MODE);
+ value &= ~coremask;
+ nlm_write_sys_reg(sysbase, SYS_9XX_CPU_NONCOHERENT_MODE, value);
+ }
+
+ /* Remove CPU Reset */
+ resetreg = cpu_is_xlp9xx() ? SYS_9XX_CPU_RESET : SYS_CPU_RESET;
+ value = nlm_read_sys_reg(sysbase, resetreg);
+ value &= ~coremask;
+ nlm_write_sys_reg(sysbase, resetreg, value);
+
+ /* We are done on 9XX */
+ if (cpu_is_xlp9xx())
+ return 1;
+
+ /* Poll for CPU to mark itself coherent on other type of XLP */
+ count = 100000;
+ do {
+ value = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE);
+ } while ((value & coremask) != 0 && --count > 0);
+
+ return count != 0;
+}
+
+static int wait_for_cpus(int cpu, int bootcpu)
+{
+ volatile uint32_t *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);
+ int i, count, notready;
+
+ count = 0x800000;
+ do {
+ notready = nlm_threads_per_core;
+ for (i = 0; i < nlm_threads_per_core; i++)
+ if (cpu_ready[cpu + i] || (cpu + i) == bootcpu)
+ --notready;
+ } while (notready != 0 && --count > 0);
+
+ return count != 0;
+}
+
+static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
+{
+ struct nlm_soc_info *nodep;
+ uint64_t syspcibase, fusebase;
+ uint32_t syscoremask, mask, fusemask;
+ int core, n, cpu, ncores;
+
+ for (n = 0; n < NLM_NR_NODES; n++) {
+ if (n != 0) {
+ /* check if node exists and is online */
+ if (cpu_is_xlp9xx()) {
+ int b = xlp9xx_get_socbus(n);
+ pr_info("Node %d SoC PCI bus %d.\n", n, b);
+ if (b == 0)
+ break;
+ } else {
+ syspcibase = nlm_get_sys_pcibase(n);
+ if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
+ break;
+ }
+ nlm_node_init(n);
+ }
+
+ /* read cores in reset from SYS */
+ nodep = nlm_get_node(n);
+
+ if (cpu_is_xlp9xx()) {
+ fusebase = nlm_get_fuse_regbase(n);
+ fusemask = nlm_read_reg(fusebase, FUSE_9XX_DEVCFG6);
+ switch (read_c0_prid() & PRID_IMP_MASK) {
+ case PRID_IMP_NETLOGIC_XLP5XX:
+ mask = 0xff;
+ break;
+ case PRID_IMP_NETLOGIC_XLP9XX:
+ default:
+ mask = 0xfffff;
+ break;
+ }
+ } else {
+ fusemask = nlm_read_sys_reg(nodep->sysbase,
+ SYS_EFUSE_DEVICE_CFG_STATUS0);
+ switch (read_c0_prid() & PRID_IMP_MASK) {
+ case PRID_IMP_NETLOGIC_XLP3XX:
+ mask = 0xf;
+ break;
+ case PRID_IMP_NETLOGIC_XLP2XX:
+ mask = 0x3;
+ break;
+ case PRID_IMP_NETLOGIC_XLP8XX:
+ default:
+ mask = 0xff;
+ break;
+ }
+ }
+
+ /*
+ * Fused out cores are set in the fusemask, and the remaining
+ * cores are renumbered to range 0 .. nactive-1
+ */
+ syscoremask = (1 << hweight32(~fusemask & mask)) - 1;
+
+ pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask);
+ ncores = nlm_cores_per_node();
+ for (core = 0; core < ncores; core++) {
+ /* we will be on node 0 core 0 */
+ if (n == 0 && core == 0)
+ continue;
+
+ /* see if the core exists */
+ if ((syscoremask & (1 << core)) == 0)
+ continue;
+
+ /* see if at least the first hw thread is enabled */
+ cpu = (n * ncores + core) * NLM_THREADS_PER_CORE;
+ if (!cpumask_test_cpu(cpu, wakeup_mask))
+ continue;
+
+ /* wake up the core */
+ if (!xlp_wakeup_core(nodep->sysbase, n, core))
+ continue;
+
+ /* core is up */
+ nodep->coremask |= 1u << core;
+
+ /* spin until the hw threads sets their ready */
+ if (!wait_for_cpus(cpu, 0))
+ pr_err("Node %d : timeout core %d\n", n, core);
+ }
+ }
+}
+
+void xlp_wakeup_secondary_cpus(void)
+{
+ /*
+ * In case of u-boot, the secondaries are in reset
+ * first wakeup core 0 threads
+ */
+ xlp_boot_core0_siblings();
+ if (!wait_for_cpus(0, 0))
+ pr_err("Node 0 : timeout core 0\n");
+
+ /* now get other cores out of reset */
+ xlp_enable_secondary_cores(&nlm_cpumask);
+}
diff --git a/arch/mips/netlogic/xlr/Makefile b/arch/mips/netlogic/xlr/Makefile
new file mode 100644
index 000000000..7c83100e5
--- /dev/null
+++ b/arch/mips/netlogic/xlr/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += fmn.o fmn-config.o setup.o platform.o platform-flash.o
+obj-$(CONFIG_SMP) += wakeup.o
diff --git a/arch/mips/netlogic/xlr/fmn-config.c b/arch/mips/netlogic/xlr/fmn-config.c
new file mode 100644
index 000000000..c7622c6e5
--- /dev/null
+++ b/arch/mips/netlogic/xlr/fmn-config.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/cpu-info.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/netlogic/xlr/fmn.h>
+#include <asm/netlogic/xlr/xlr.h>
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/haldefs.h>
+
+struct xlr_board_fmn_config xlr_board_fmn_config;
+
+static void __maybe_unused print_credit_config(struct xlr_fmn_info *fmn_info)
+{
+ int bkt;
+
+ pr_info("Bucket size :\n");
+ pr_info("Station\t: Size\n");
+ for (bkt = 0; bkt < 16; bkt++)
+ pr_info(" %d %d %d %d %d %d %d %d\n",
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 0],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 1],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 2],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 3],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 4],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 5],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 6],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 7]);
+ pr_info("\n");
+
+ pr_info("Credits distribution :\n");
+ pr_info("Station\t: Size\n");
+ for (bkt = 0; bkt < 16; bkt++)
+ pr_info(" %d %d %d %d %d %d %d %d\n",
+ fmn_info->credit_config[(bkt * 8) + 0],
+ fmn_info->credit_config[(bkt * 8) + 1],
+ fmn_info->credit_config[(bkt * 8) + 2],
+ fmn_info->credit_config[(bkt * 8) + 3],
+ fmn_info->credit_config[(bkt * 8) + 4],
+ fmn_info->credit_config[(bkt * 8) + 5],
+ fmn_info->credit_config[(bkt * 8) + 6],
+ fmn_info->credit_config[(bkt * 8) + 7]);
+ pr_info("\n");
+}
+
+static void check_credit_distribution(void)
+{
+ struct xlr_board_fmn_config *cfg = &xlr_board_fmn_config;
+ int bkt, n, total_credits, ncores;
+
+ ncores = hweight32(nlm_current_node()->coremask);
+ for (bkt = 0; bkt < 128; bkt++) {
+ total_credits = 0;
+ for (n = 0; n < ncores; n++)
+ total_credits += cfg->cpu[n].credit_config[bkt];
+ total_credits += cfg->gmac[0].credit_config[bkt];
+ total_credits += cfg->gmac[1].credit_config[bkt];
+ total_credits += cfg->dma.credit_config[bkt];
+ total_credits += cfg->cmp.credit_config[bkt];
+ total_credits += cfg->sae.credit_config[bkt];
+ total_credits += cfg->xgmac[0].credit_config[bkt];
+ total_credits += cfg->xgmac[1].credit_config[bkt];
+ if (total_credits > cfg->bucket_size[bkt])
+ pr_err("ERROR: Bucket %d: credits (%d) > size (%d)\n",
+ bkt, total_credits, cfg->bucket_size[bkt]);
+ }
+ pr_info("Credit distribution complete.\n");
+}
+
+/**
+ * Configure bucket size and credits for a device. 'size' is the size of
+ * the buckets for the device. This size is distributed among all the CPUs
+ * so that all of them can send messages to the device.
+ *
+ * The device is also given 'cpu_credits' to send messages to the CPUs
+ *
+ * @dev_info: FMN information structure for each devices
+ * @start_stn_id: Starting station id of dev_info
+ * @end_stn_id: End station id of dev_info
+ * @num_buckets: Total number of buckets for den_info
+ * @cpu_credits: Allowed credits to cpu for each devices pointing by dev_info
+ * @size: Size of the each buckets in the device station
+ */
+static void setup_fmn_cc(struct xlr_fmn_info *dev_info, int start_stn_id,
+ int end_stn_id, int num_buckets, int cpu_credits, int size)
+{
+ int i, j, num_core, n, credits_per_cpu;
+ struct xlr_fmn_info *cpu = xlr_board_fmn_config.cpu;
+
+ num_core = hweight32(nlm_current_node()->coremask);
+ dev_info->num_buckets = num_buckets;
+ dev_info->start_stn_id = start_stn_id;
+ dev_info->end_stn_id = end_stn_id;
+
+ n = num_core;
+ if (num_core == 3)
+ n = 4;
+
+ for (i = start_stn_id; i <= end_stn_id; i++) {
+ xlr_board_fmn_config.bucket_size[i] = size;
+
+ /* Dividing device credits equally to cpus */
+ credits_per_cpu = size / n;
+ for (j = 0; j < num_core; j++)
+ cpu[j].credit_config[i] = credits_per_cpu;
+
+ /* credits left to distribute */
+ credits_per_cpu = size - (credits_per_cpu * num_core);
+
+ /* distribute the remaining credits (if any), among cores */
+ for (j = 0; (j < num_core) && (credits_per_cpu >= 4); j++) {
+ cpu[j].credit_config[i] += 4;
+ credits_per_cpu -= 4;
+ }
+ }
+
+ /* Distributing cpu per bucket credits to devices */
+ for (i = 0; i < num_core; i++) {
+ for (j = 0; j < FMN_CORE_NBUCKETS; j++)
+ dev_info->credit_config[(i * 8) + j] = cpu_credits;
+ }
+}
+
+/*
+ * Each core has 256 slots and 8 buckets,
+ * Configure the 8 buckets each with 32 slots
+ */
+static void setup_cpu_fmninfo(struct xlr_fmn_info *cpu, int num_core)
+{
+ int i, j;
+
+ for (i = 0; i < num_core; i++) {
+ cpu[i].start_stn_id = (8 * i);
+ cpu[i].end_stn_id = (8 * i + 8);
+
+ for (j = cpu[i].start_stn_id; j < cpu[i].end_stn_id; j++)
+ xlr_board_fmn_config.bucket_size[j] = 32;
+ }
+}
+
+/**
+ * Setup the FMN details for each devices according to the device available
+ * in each variant of XLR/XLS processor
+ */
+void xlr_board_info_setup(void)
+{
+ struct xlr_fmn_info *cpu = xlr_board_fmn_config.cpu;
+ struct xlr_fmn_info *gmac = xlr_board_fmn_config.gmac;
+ struct xlr_fmn_info *xgmac = xlr_board_fmn_config.xgmac;
+ struct xlr_fmn_info *dma = &xlr_board_fmn_config.dma;
+ struct xlr_fmn_info *cmp = &xlr_board_fmn_config.cmp;
+ struct xlr_fmn_info *sae = &xlr_board_fmn_config.sae;
+ int processor_id, num_core;
+
+ num_core = hweight32(nlm_current_node()->coremask);
+ processor_id = read_c0_prid() & PRID_IMP_MASK;
+
+ setup_cpu_fmninfo(cpu, num_core);
+ switch (processor_id) {
+ case PRID_IMP_NETLOGIC_XLS104:
+ case PRID_IMP_NETLOGIC_XLS108:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 16, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 8, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 8, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLS204:
+ case PRID_IMP_NETLOGIC_XLS208:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 16, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 8, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 8, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLS404:
+ case PRID_IMP_NETLOGIC_XLS408:
+ case PRID_IMP_NETLOGIC_XLS404B:
+ case PRID_IMP_NETLOGIC_XLS408B:
+ case PRID_IMP_NETLOGIC_XLS416B:
+ case PRID_IMP_NETLOGIC_XLS608B:
+ case PRID_IMP_NETLOGIC_XLS616B:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 8, 32);
+ setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0,
+ FMN_STNID_GMAC1_TX3, 8, 8, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 4, 64);
+ setup_fmn_cc(cmp, FMN_STNID_CMP_0,
+ FMN_STNID_CMP_3, 4, 4, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 8, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLS412B:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 8, 32);
+ setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0,
+ FMN_STNID_GMAC1_TX3, 8, 8, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 4, 64);
+ setup_fmn_cc(cmp, FMN_STNID_CMP_0,
+ FMN_STNID_CMP_3, 4, 4, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 8, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLR308:
+ case PRID_IMP_NETLOGIC_XLR308C:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 16, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 8, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 4, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLR532:
+ case PRID_IMP_NETLOGIC_XLR532C:
+ case PRID_IMP_NETLOGIC_XLR516C:
+ case PRID_IMP_NETLOGIC_XLR508C:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 16, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 8, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 4, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLR732:
+ case PRID_IMP_NETLOGIC_XLR716:
+ setup_fmn_cc(&xgmac[0], FMN_STNID_XMAC0_00_TX,
+ FMN_STNID_XMAC0_15_TX, 8, 0, 32);
+ setup_fmn_cc(&xgmac[1], FMN_STNID_XMAC1_00_TX,
+ FMN_STNID_XMAC1_15_TX, 8, 0, 32);
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 24, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 4, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 4, 128);
+ break;
+ default:
+ pr_err("Unknown CPU with processor ID [%d]\n", processor_id);
+ pr_err("Error: Cannot initialize FMN credits.\n");
+ }
+
+ check_credit_distribution();
+
+#if 0 /* debug */
+ print_credit_config(&cpu[0]);
+ print_credit_config(&gmac[0]);
+#endif
+}
diff --git a/arch/mips/netlogic/xlr/fmn.c b/arch/mips/netlogic/xlr/fmn.c
new file mode 100644
index 000000000..f90303f31
--- /dev/null
+++ b/arch/mips/netlogic/xlr/fmn.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irqreturn.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/mipsregs.h>
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/xlr/fmn.h>
+#include <asm/netlogic/common.h>
+
+#define COP2_CC_INIT_CPU_DEST(dest, conf) \
+do { \
+ nlm_write_c2_cc##dest(0, conf[(dest * 8) + 0]); \
+ nlm_write_c2_cc##dest(1, conf[(dest * 8) + 1]); \
+ nlm_write_c2_cc##dest(2, conf[(dest * 8) + 2]); \
+ nlm_write_c2_cc##dest(3, conf[(dest * 8) + 3]); \
+ nlm_write_c2_cc##dest(4, conf[(dest * 8) + 4]); \
+ nlm_write_c2_cc##dest(5, conf[(dest * 8) + 5]); \
+ nlm_write_c2_cc##dest(6, conf[(dest * 8) + 6]); \
+ nlm_write_c2_cc##dest(7, conf[(dest * 8) + 7]); \
+} while (0)
+
+struct fmn_message_handler {
+ void (*action)(int, int, int, int, struct nlm_fmn_msg *, void *);
+ void *arg;
+} msg_handlers[128];
+
+/*
+ * FMN interrupt handler. We configure the FMN so that any messages in
+ * any of the CPU buckets will trigger an interrupt on the CPU.
+ * The message can be from any device on the FMN (like NAE/SAE/DMA).
+ * The source station id is used to figure out which of the registered
+ * handlers have to be called.
+ */
+static irqreturn_t fmn_message_handler(int irq, void *data)
+{
+ struct fmn_message_handler *hndlr;
+ int bucket, rv;
+ int size = 0, code = 0, src_stnid = 0;
+ struct nlm_fmn_msg msg;
+ uint32_t mflags, bkt_status;
+
+ mflags = nlm_cop2_enable_irqsave();
+ /* Disable message ring interrupt */
+ nlm_fmn_setup_intr(irq, 0);
+ while (1) {
+ /* 8 bkts per core, [24:31] each bit represents one bucket
+ * Bit is Zero if bucket is not empty */
+ bkt_status = (nlm_read_c2_status0() >> 24) & 0xff;
+ if (bkt_status == 0xff)
+ break;
+ for (bucket = 0; bucket < 8; bucket++) {
+ /* Continue on empty bucket */
+ if (bkt_status & (1 << bucket))
+ continue;
+ rv = nlm_fmn_receive(bucket, &size, &code, &src_stnid,
+ &msg);
+ if (rv != 0)
+ continue;
+
+ hndlr = &msg_handlers[src_stnid];
+ if (hndlr->action == NULL)
+ pr_warn("No msgring handler for stnid %d\n",
+ src_stnid);
+ else {
+ nlm_cop2_disable_irqrestore(mflags);
+ hndlr->action(bucket, src_stnid, size, code,
+ &msg, hndlr->arg);
+ mflags = nlm_cop2_enable_irqsave();
+ }
+ }
+ }
+ /* Enable message ring intr, to any thread in core */
+ nlm_fmn_setup_intr(irq, (1 << nlm_threads_per_core) - 1);
+ nlm_cop2_disable_irqrestore(mflags);
+ return IRQ_HANDLED;
+}
+
+void xlr_percpu_fmn_init(void)
+{
+ struct xlr_fmn_info *cpu_fmn_info;
+ int *bucket_sizes;
+ uint32_t flags;
+ int id;
+
+ BUG_ON(nlm_thread_id() != 0);
+ id = nlm_core_id();
+
+ bucket_sizes = xlr_board_fmn_config.bucket_size;
+ cpu_fmn_info = &xlr_board_fmn_config.cpu[id];
+ flags = nlm_cop2_enable_irqsave();
+
+ /* Setup bucket sizes for the core. */
+ nlm_write_c2_bucksize(0, bucket_sizes[id * 8 + 0]);
+ nlm_write_c2_bucksize(1, bucket_sizes[id * 8 + 1]);
+ nlm_write_c2_bucksize(2, bucket_sizes[id * 8 + 2]);
+ nlm_write_c2_bucksize(3, bucket_sizes[id * 8 + 3]);
+ nlm_write_c2_bucksize(4, bucket_sizes[id * 8 + 4]);
+ nlm_write_c2_bucksize(5, bucket_sizes[id * 8 + 5]);
+ nlm_write_c2_bucksize(6, bucket_sizes[id * 8 + 6]);
+ nlm_write_c2_bucksize(7, bucket_sizes[id * 8 + 7]);
+
+ /*
+ * For sending FMN messages, we need credits on the destination
+ * bucket. Program the credits this core has on the 128 possible
+ * destination buckets.
+ * We cannot use a loop here, because the the first argument has
+ * to be a constant integer value.
+ */
+ COP2_CC_INIT_CPU_DEST(0, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(1, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(2, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(3, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(4, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(5, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(6, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(7, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(8, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(9, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(10, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(11, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(12, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(13, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(14, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(15, cpu_fmn_info->credit_config);
+
+ /* enable FMN interrupts on this CPU */
+ nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1);
+ nlm_cop2_disable_irqrestore(flags);
+}
+
+
+/*
+ * Register a FMN message handler with respect to the source station id
+ * @stnid: source station id
+ * @action: Handler function pointer
+ */
+int nlm_register_fmn_handler(int start_stnid, int end_stnid,
+ void (*action)(int, int, int, int, struct nlm_fmn_msg *, void *),
+ void *arg)
+{
+ int sstnid;
+
+ for (sstnid = start_stnid; sstnid <= end_stnid; sstnid++) {
+ msg_handlers[sstnid].arg = arg;
+ smp_wmb();
+ msg_handlers[sstnid].action = action;
+ }
+ pr_debug("Registered FMN msg handler for stnid %d-%d\n",
+ start_stnid, end_stnid);
+ return 0;
+}
+
+void nlm_setup_fmn_irq(void)
+{
+ uint32_t flags;
+
+ /* request irq only once */
+ if (request_irq(IRQ_FMN, fmn_message_handler, IRQF_PERCPU, "fmn", NULL))
+ pr_err("Failed to request irq %d (fmn)\n", IRQ_FMN);
+
+ flags = nlm_cop2_enable_irqsave();
+ nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1);
+ nlm_cop2_disable_irqrestore(flags);
+}
diff --git a/arch/mips/netlogic/xlr/platform-flash.c b/arch/mips/netlogic/xlr/platform-flash.c
new file mode 100644
index 000000000..cf9162284
--- /dev/null
+++ b/arch/mips/netlogic/xlr/platform-flash.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2011, Netlogic Microsystems.
+ * Copyright 2004, Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/resource.h>
+#include <linux/spi/flash.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/physmap.h>
+#include <linux/mtd/platnand.h>
+
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/flash.h>
+#include <asm/netlogic/xlr/bridge.h>
+#include <asm/netlogic/xlr/gpio.h>
+#include <asm/netlogic/xlr/xlr.h>
+
+/*
+ * Default NOR partition layout
+ */
+static struct mtd_partition xlr_nor_parts[] = {
+ {
+ .name = "User FS",
+ .offset = 0x800000,
+ .size = MTDPART_SIZ_FULL,
+ }
+};
+
+/*
+ * Default NAND partition layout
+ */
+static struct mtd_partition xlr_nand_parts[] = {
+ {
+ .name = "Root Filesystem",
+ .offset = 64 * 64 * 2048,
+ .size = 432 * 64 * 2048,
+ },
+ {
+ .name = "Home Filesystem",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+/* Use PHYSMAP flash for NOR */
+struct physmap_flash_data xlr_nor_data = {
+ .width = 2,
+ .parts = xlr_nor_parts,
+ .nr_parts = ARRAY_SIZE(xlr_nor_parts),
+};
+
+static struct resource xlr_nor_res[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device xlr_nor_dev = {
+ .name = "physmap-flash",
+ .dev = {
+ .platform_data = &xlr_nor_data,
+ },
+ .num_resources = ARRAY_SIZE(xlr_nor_res),
+ .resource = xlr_nor_res,
+};
+
+/*
+ * Use "gen_nand" driver for NAND flash
+ *
+ * There seems to be no way to store a private pointer containing
+ * platform specific info in gen_nand drivier. We will use a global
+ * struct for now, since we currently have only one NAND chip per board.
+ */
+struct xlr_nand_flash_priv {
+ int cs;
+ uint64_t flash_mmio;
+};
+
+static struct xlr_nand_flash_priv nand_priv;
+
+static void xlr_nand_ctrl(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
+{
+ if (ctrl & NAND_CLE)
+ nlm_write_reg(nand_priv.flash_mmio,
+ FLASH_NAND_CLE(nand_priv.cs), cmd);
+ else if (ctrl & NAND_ALE)
+ nlm_write_reg(nand_priv.flash_mmio,
+ FLASH_NAND_ALE(nand_priv.cs), cmd);
+}
+
+struct platform_nand_data xlr_nand_data = {
+ .chip = {
+ .nr_chips = 1,
+ .nr_partitions = ARRAY_SIZE(xlr_nand_parts),
+ .chip_delay = 50,
+ .partitions = xlr_nand_parts,
+ },
+ .ctrl = {
+ .cmd_ctrl = xlr_nand_ctrl,
+ },
+};
+
+static struct resource xlr_nand_res[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device xlr_nand_dev = {
+ .name = "gen_nand",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(xlr_nand_res),
+ .resource = xlr_nand_res,
+ .dev = {
+ .platform_data = &xlr_nand_data,
+ }
+};
+
+/*
+ * XLR/XLS supports upto 8 devices on its FLASH interface. The value in
+ * FLASH_BAR (on the MEM/IO bridge) gives the base for mapping all the
+ * flash devices.
+ * Under this, each flash device has an offset and size given by the
+ * CSBASE_ADDR and CSBASE_MASK registers for the device.
+ *
+ * The CSBASE_ registers are expected to be setup by the bootloader.
+ */
+static void setup_flash_resource(uint64_t flash_mmio,
+ uint64_t flash_map_base, int cs, struct resource *res)
+{
+ u32 base, mask;
+
+ base = nlm_read_reg(flash_mmio, FLASH_CSBASE_ADDR(cs));
+ mask = nlm_read_reg(flash_mmio, FLASH_CSADDR_MASK(cs));
+
+ res->start = flash_map_base + ((unsigned long)base << 16);
+ res->end = res->start + (mask + 1) * 64 * 1024;
+}
+
+static int __init xlr_flash_init(void)
+{
+ uint64_t gpio_mmio, flash_mmio, flash_map_base;
+ u32 gpio_resetcfg, flash_bar;
+ int cs, boot_nand, boot_nor;
+
+ /* Flash address bits 39:24 is in bridge flash BAR */
+ flash_bar = nlm_read_reg(nlm_io_base, BRIDGE_FLASH_BAR);
+ flash_map_base = (flash_bar & 0xffff0000) << 8;
+
+ gpio_mmio = nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET);
+ flash_mmio = nlm_mmio_base(NETLOGIC_IO_FLASH_OFFSET);
+
+ /* Get the chip reset config */
+ gpio_resetcfg = nlm_read_reg(gpio_mmio, GPIO_PWRON_RESET_CFG_REG);
+
+ /* Check for boot flash type */
+ boot_nor = boot_nand = 0;
+ if (nlm_chip_is_xls()) {
+ /* On XLS, check boot from NAND bit (GPIO reset reg bit 16) */
+ if (gpio_resetcfg & (1 << 16))
+ boot_nand = 1;
+
+ /* check boot from PCMCIA, (GPIO reset reg bit 15 */
+ if ((gpio_resetcfg & (1 << 15)) == 0)
+ boot_nor = 1; /* not set, booted from NOR */
+ } else { /* XLR */
+ /* check boot from PCMCIA (bit 16 in GPIO reset on XLR) */
+ if ((gpio_resetcfg & (1 << 16)) == 0)
+ boot_nor = 1; /* not set, booted from NOR */
+ }
+
+ /* boot flash at chip select 0 */
+ cs = 0;
+
+ if (boot_nand) {
+ nand_priv.cs = cs;
+ nand_priv.flash_mmio = flash_mmio;
+ setup_flash_resource(flash_mmio, flash_map_base, cs,
+ xlr_nand_res);
+
+ /* Initialize NAND flash at CS 0 */
+ nlm_write_reg(flash_mmio, FLASH_CSDEV_PARM(cs),
+ FLASH_NAND_CSDEV_PARAM);
+ nlm_write_reg(flash_mmio, FLASH_CSTIME_PARMA(cs),
+ FLASH_NAND_CSTIME_PARAMA);
+ nlm_write_reg(flash_mmio, FLASH_CSTIME_PARMB(cs),
+ FLASH_NAND_CSTIME_PARAMB);
+
+ pr_info("ChipSelect %d: NAND Flash %pR\n", cs, xlr_nand_res);
+ return platform_device_register(&xlr_nand_dev);
+ }
+
+ if (boot_nor) {
+ setup_flash_resource(flash_mmio, flash_map_base, cs,
+ xlr_nor_res);
+ pr_info("ChipSelect %d: NOR Flash %pR\n", cs, xlr_nor_res);
+ return platform_device_register(&xlr_nor_dev);
+ }
+ return 0;
+}
+
+arch_initcall(xlr_flash_init);
diff --git a/arch/mips/netlogic/xlr/platform.c b/arch/mips/netlogic/xlr/platform.c
new file mode 100644
index 000000000..4785932af
--- /dev/null
+++ b/arch/mips/netlogic/xlr/platform.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2011, Netlogic Microsystems.
+ * Copyright 2004, Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/resource.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_reg.h>
+#include <linux/i2c.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/ohci_pdriver.h>
+
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/xlr.h>
+
+static unsigned int nlm_xlr_uart_in(struct uart_port *p, int offset)
+{
+ uint64_t uartbase;
+ unsigned int value;
+
+ /* sign extend to 64 bits, if needed */
+ uartbase = (uint64_t)(long)p->membase;
+ value = nlm_read_reg(uartbase, offset);
+
+ /* See XLR/XLS errata */
+ if (offset == UART_MSR)
+ value ^= 0xF0;
+ else if (offset == UART_MCR)
+ value ^= 0x3;
+
+ return value;
+}
+
+static void nlm_xlr_uart_out(struct uart_port *p, int offset, int value)
+{
+ uint64_t uartbase;
+
+ /* sign extend to 64 bits, if needed */
+ uartbase = (uint64_t)(long)p->membase;
+
+ /* See XLR/XLS errata */
+ if (offset == UART_MSR)
+ value ^= 0xF0;
+ else if (offset == UART_MCR)
+ value ^= 0x3;
+
+ nlm_write_reg(uartbase, offset, value);
+}
+
+#define PORT(_irq) \
+ { \
+ .irq = _irq, \
+ .regshift = 2, \
+ .iotype = UPIO_MEM32, \
+ .flags = (UPF_SKIP_TEST | \
+ UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF),\
+ .uartclk = PIC_CLK_HZ, \
+ .type = PORT_16550A, \
+ .serial_in = nlm_xlr_uart_in, \
+ .serial_out = nlm_xlr_uart_out, \
+ }
+
+static struct plat_serial8250_port xlr_uart_data[] = {
+ PORT(PIC_UART_0_IRQ),
+ PORT(PIC_UART_1_IRQ),
+ {},
+};
+
+static struct platform_device uart_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = xlr_uart_data,
+ },
+};
+
+static int __init nlm_uart_init(void)
+{
+ unsigned long uartbase;
+
+ uartbase = (unsigned long)nlm_mmio_base(NETLOGIC_IO_UART_0_OFFSET);
+ xlr_uart_data[0].membase = (void __iomem *)uartbase;
+ xlr_uart_data[0].mapbase = CPHYSADDR(uartbase);
+
+ uartbase = (unsigned long)nlm_mmio_base(NETLOGIC_IO_UART_1_OFFSET);
+ xlr_uart_data[1].membase = (void __iomem *)uartbase;
+ xlr_uart_data[1].mapbase = CPHYSADDR(uartbase);
+
+ return platform_device_register(&uart_device);
+}
+
+arch_initcall(nlm_uart_init);
+
+#ifdef CONFIG_USB
+/* Platform USB devices, only on XLS chips */
+static u64 xls_usb_dmamask = ~(u32)0;
+#define USB_PLATFORM_DEV(n, i, irq) \
+ { \
+ .name = n, \
+ .id = i, \
+ .num_resources = 2, \
+ .dev = { \
+ .dma_mask = &xls_usb_dmamask, \
+ .coherent_dma_mask = 0xffffffff, \
+ }, \
+ .resource = (struct resource[]) { \
+ { \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = irq, \
+ .end = irq, \
+ .flags = IORESOURCE_IRQ, \
+ }, \
+ }, \
+ }
+
+static struct usb_ehci_pdata xls_usb_ehci_pdata = {
+ .caps_offset = 0,
+};
+
+static struct usb_ohci_pdata xls_usb_ohci_pdata;
+
+static struct platform_device xls_usb_ehci_device =
+ USB_PLATFORM_DEV("ehci-platform", 0, PIC_USB_IRQ);
+static struct platform_device xls_usb_ohci_device_0 =
+ USB_PLATFORM_DEV("ohci-platform", 1, PIC_USB_IRQ);
+static struct platform_device xls_usb_ohci_device_1 =
+ USB_PLATFORM_DEV("ohci-platform", 2, PIC_USB_IRQ);
+
+static struct platform_device *xls_platform_devices[] = {
+ &xls_usb_ehci_device,
+ &xls_usb_ohci_device_0,
+ &xls_usb_ohci_device_1,
+};
+
+int xls_platform_usb_init(void)
+{
+ uint64_t usb_mmio, gpio_mmio;
+ unsigned long memres;
+ uint32_t val;
+
+ if (!nlm_chip_is_xls())
+ return 0;
+
+ gpio_mmio = nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET);
+ usb_mmio = nlm_mmio_base(NETLOGIC_IO_USB_1_OFFSET);
+
+ /* Clear Rogue Phy INTs */
+ nlm_write_reg(usb_mmio, 49, 0x10000000);
+ /* Enable all interrupts */
+ nlm_write_reg(usb_mmio, 50, 0x1f000000);
+
+ /* Enable ports */
+ nlm_write_reg(usb_mmio, 1, 0x07000500);
+
+ val = nlm_read_reg(gpio_mmio, 21);
+ if (((val >> 22) & 0x01) == 0) {
+ pr_info("Detected USB Device mode - Not supported!\n");
+ nlm_write_reg(usb_mmio, 0, 0x01000000);
+ return 0;
+ }
+
+ pr_info("Detected USB Host mode - Adding XLS USB devices.\n");
+ /* Clear reset, host mode */
+ nlm_write_reg(usb_mmio, 0, 0x02000000);
+
+ /* Memory resource for various XLS usb ports */
+ usb_mmio = nlm_mmio_base(NETLOGIC_IO_USB_0_OFFSET);
+ memres = CPHYSADDR((unsigned long)usb_mmio);
+ xls_usb_ehci_device.resource[0].start = memres;
+ xls_usb_ehci_device.resource[0].end = memres + 0x400 - 1;
+ xls_usb_ehci_device.dev.platform_data = &xls_usb_ehci_pdata;
+
+ memres += 0x400;
+ xls_usb_ohci_device_0.resource[0].start = memres;
+ xls_usb_ohci_device_0.resource[0].end = memres + 0x400 - 1;
+ xls_usb_ohci_device_0.dev.platform_data = &xls_usb_ohci_pdata;
+
+ memres += 0x400;
+ xls_usb_ohci_device_1.resource[0].start = memres;
+ xls_usb_ohci_device_1.resource[0].end = memres + 0x400 - 1;
+ xls_usb_ohci_device_1.dev.platform_data = &xls_usb_ohci_pdata;
+
+ return platform_add_devices(xls_platform_devices,
+ ARRAY_SIZE(xls_platform_devices));
+}
+
+arch_initcall(xls_platform_usb_init);
+#endif
+
+#ifdef CONFIG_I2C
+static struct i2c_board_info nlm_i2c_board_info1[] __initdata = {
+ /* All XLR boards have this RTC and Max6657 Temp Chip */
+ [0] = {
+ .type = "ds1374",
+ .addr = 0x68
+ },
+ [1] = {
+ .type = "lm90",
+ .addr = 0x4c
+ },
+};
+
+static struct resource i2c_resources[] = {
+ [0] = {
+ .start = 0, /* filled at init */
+ .end = 0,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device nlm_xlr_i2c_1 = {
+ .name = "xlr-i2cbus",
+ .id = 1,
+ .num_resources = 1,
+ .resource = i2c_resources,
+};
+
+static int __init nlm_i2c_init(void)
+{
+ int err = 0;
+ unsigned int offset;
+
+ /* I2C bus 0 does not have any useful devices, configure only bus 1 */
+ offset = NETLOGIC_IO_I2C_1_OFFSET;
+ nlm_xlr_i2c_1.resource[0].start = CPHYSADDR(nlm_mmio_base(offset));
+ nlm_xlr_i2c_1.resource[0].end = nlm_xlr_i2c_1.resource[0].start + 0xfff;
+
+ platform_device_register(&nlm_xlr_i2c_1);
+
+ err = i2c_register_board_info(1, nlm_i2c_board_info1,
+ ARRAY_SIZE(nlm_i2c_board_info1));
+ if (err < 0)
+ pr_err("nlm-i2c: cannot register board I2C devices\n");
+ return err;
+}
+
+arch_initcall(nlm_i2c_init);
+#endif
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
new file mode 100644
index 000000000..627e88101
--- /dev/null
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/serial_8250.h>
+#include <linux/memblock.h>
+#include <linux/pm.h>
+
+#include <asm/idle.h>
+#include <asm/reboot.h>
+#include <asm/time.h>
+#include <asm/bootinfo.h>
+
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/psb-bootinfo.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+
+#include <asm/netlogic/xlr/xlr.h>
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/gpio.h>
+#include <asm/netlogic/xlr/fmn.h>
+
+uint64_t nlm_io_base = DEFAULT_NETLOGIC_IO_BASE;
+struct psb_info nlm_prom_info;
+
+/* default to uniprocessor */
+unsigned int nlm_threads_per_core = 1;
+struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
+cpumask_t nlm_cpumask = CPU_MASK_CPU0;
+
+static void nlm_linux_exit(void)
+{
+ uint64_t gpiobase;
+
+ gpiobase = nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET);
+ /* trigger a chip reset by writing 1 to GPIO_SWRESET_REG */
+ nlm_write_reg(gpiobase, GPIO_SWRESET_REG, 1);
+ for ( ; ; )
+ cpu_wait();
+}
+
+void __init plat_mem_setup(void)
+{
+ _machine_restart = (void (*)(char *))nlm_linux_exit;
+ _machine_halt = nlm_linux_exit;
+ pm_power_off = nlm_linux_exit;
+}
+
+const char *get_system_type(void)
+{
+ return "Netlogic XLR/XLS Series";
+}
+
+unsigned int nlm_get_cpu_frequency(void)
+{
+ return (unsigned int)nlm_prom_info.cpu_frequency;
+}
+
+void __init prom_free_prom_memory(void)
+{
+ /* Nothing yet */
+}
+
+void nlm_percpu_init(int hwcpuid)
+{
+ if (hwcpuid % 4 == 0)
+ xlr_percpu_fmn_init();
+}
+
+static void __init build_arcs_cmdline(int *argv)
+{
+ int i, remain, len;
+ char *arg;
+
+ remain = sizeof(arcs_cmdline) - 1;
+ arcs_cmdline[0] = '\0';
+ for (i = 0; argv[i] != 0; i++) {
+ arg = (char *)(long)argv[i];
+ len = strlen(arg);
+ if (len + 1 > remain)
+ break;
+ strcat(arcs_cmdline, arg);
+ strcat(arcs_cmdline, " ");
+ remain -= len + 1;
+ }
+
+ /* Add the default options here */
+ if ((strstr(arcs_cmdline, "console=")) == NULL) {
+ arg = "console=ttyS0,38400 ";
+ len = strlen(arg);
+ if (len > remain)
+ goto fail;
+ strcat(arcs_cmdline, arg);
+ remain -= len;
+ }
+#ifdef CONFIG_BLK_DEV_INITRD
+ if ((strstr(arcs_cmdline, "rdinit=")) == NULL) {
+ arg = "rdinit=/sbin/init ";
+ len = strlen(arg);
+ if (len > remain)
+ goto fail;
+ strcat(arcs_cmdline, arg);
+ remain -= len;
+ }
+#endif
+ return;
+fail:
+ panic("Cannot add %s, command line too big!", arg);
+}
+
+static void prom_add_memory(void)
+{
+ struct nlm_boot_mem_map *bootm;
+ u64 start, size;
+ u64 pref_backup = 512; /* avoid pref walking beyond end */
+ int i;
+
+ bootm = (void *)(long)nlm_prom_info.psb_mem_map;
+ for (i = 0; i < bootm->nr_map; i++) {
+ if (bootm->map[i].type != NLM_BOOT_MEM_RAM)
+ continue;
+ start = bootm->map[i].addr;
+ size = bootm->map[i].size;
+
+ /* Work around for using bootloader mem */
+ if (i == 0 && start == 0 && size == 0x0c000000)
+ size = 0x0ff00000;
+
+ memblock_add(start, size - pref_backup);
+ }
+}
+
+static void nlm_init_node(void)
+{
+ struct nlm_soc_info *nodep;
+
+ nodep = nlm_current_node();
+ nodep->picbase = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET);
+ nodep->ebase = read_c0_ebase() & MIPS_EBASE_BASE;
+ spin_lock_init(&nodep->piclock);
+}
+
+void __init prom_init(void)
+{
+ int *argv, *envp; /* passed as 32 bit ptrs */
+ struct psb_info *prom_infop;
+ void *reset_vec;
+#ifdef CONFIG_SMP
+ int i;
+#endif
+
+ /* truncate to 32 bit and sign extend all args */
+ argv = (int *)(long)(int)fw_arg1;
+ envp = (int *)(long)(int)fw_arg2;
+ prom_infop = (struct psb_info *)(long)(int)fw_arg3;
+
+ nlm_prom_info = *prom_infop;
+ nlm_init_node();
+
+ /* Update reset entry point with CPU init code */
+ reset_vec = (void *)CKSEG1ADDR(RESET_VEC_PHYS);
+ memset(reset_vec, 0, RESET_VEC_SIZE);
+ memcpy(reset_vec, (void *)nlm_reset_entry,
+ (nlm_reset_entry_end - nlm_reset_entry));
+
+ build_arcs_cmdline(argv);
+ prom_add_memory();
+
+#ifdef CONFIG_SMP
+ for (i = 0; i < 32; i++)
+ if (nlm_prom_info.online_cpu_map & (1 << i))
+ cpumask_set_cpu(i, &nlm_cpumask);
+ nlm_wakeup_secondary_cpus();
+ register_smp_ops(&nlm_smp_ops);
+#endif
+ xlr_board_info_setup();
+ xlr_percpu_fmn_init();
+}
diff --git a/arch/mips/netlogic/xlr/wakeup.c b/arch/mips/netlogic/xlr/wakeup.c
new file mode 100644
index 000000000..d61cba1e9
--- /dev/null
+++ b/arch/mips/netlogic/xlr/wakeup.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/delay.h>
+#include <linux/threads.h>
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/mipsregs.h>
+#include <asm/addrspace.h>
+#include <asm/string.h>
+
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/mips-extns.h>
+
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+
+int xlr_wakeup_secondary_cpus(void)
+{
+ struct nlm_soc_info *nodep;
+ unsigned int i, j, boot_cpu;
+ volatile u32 *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);
+
+ /*
+ * In case of RMI boot, hit with NMI to get the cores
+ * from bootloader to linux code.
+ */
+ nodep = nlm_get_node(0);
+ boot_cpu = hard_smp_processor_id();
+ nlm_set_nmi_handler(nlm_rmiboot_preboot);
+ for (i = 0; i < NR_CPUS; i++) {
+ if (i == boot_cpu || !cpumask_test_cpu(i, &nlm_cpumask))
+ continue;
+ nlm_pic_send_ipi(nodep->picbase, i, 1, 1); /* send NMI */
+ }
+
+ /* Fill up the coremask early */
+ nodep->coremask = 1;
+ for (i = 1; i < nlm_cores_per_node(); i++) {
+ for (j = 1000000; j > 0; j--) {
+ if (cpu_ready[i * NLM_THREADS_PER_CORE])
+ break;
+ udelay(10);
+ }
+ if (j != 0)
+ nodep->coremask |= (1u << i);
+ else
+ pr_err("Failed to wakeup core %d\n", i);
+ }
+
+ return 0;
+}
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile
new file mode 100644
index 000000000..e10f216d0
--- /dev/null
+++ b/arch/mips/oprofile/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
+ oprof.o cpu_buffer.o buffer_sync.o \
+ event_buffer.o oprofile_files.o \
+ oprofilefs.o oprofile_stats.o \
+ timer_int.o )
+
+oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
+
+oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o
+oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o
+oprofile-$(CONFIG_CPU_R10000) += op_model_mipsxx.o
+oprofile-$(CONFIG_CPU_SB1) += op_model_mipsxx.o
+oprofile-$(CONFIG_CPU_XLR) += op_model_mipsxx.o
+oprofile-$(CONFIG_CPU_LOONGSON2EF) += op_model_loongson2.o
+oprofile-$(CONFIG_CPU_LOONGSON64) += op_model_loongson3.o
diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c
new file mode 100644
index 000000000..07d98ba7f
--- /dev/null
+++ b/arch/mips/oprofile/backtrace.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/oprofile.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/stacktrace.h>
+#include <linux/stacktrace.h>
+#include <linux/kernel.h>
+#include <asm/sections.h>
+#include <asm/inst.h>
+
+struct stackframe {
+ unsigned long sp;
+ unsigned long pc;
+ unsigned long ra;
+};
+
+static inline int get_mem(unsigned long addr, unsigned long *result)
+{
+ unsigned long *address = (unsigned long *) addr;
+ if (!access_ok(address, sizeof(unsigned long)))
+ return -1;
+ if (__copy_from_user_inatomic(result, address, sizeof(unsigned long)))
+ return -3;
+ return 0;
+}
+
+/*
+ * These two instruction helpers were taken from process.c
+ */
+static inline int is_ra_save_ins(union mips_instruction *ip)
+{
+ /* sw / sd $ra, offset($sp) */
+ return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op)
+ && ip->i_format.rs == 29 && ip->i_format.rt == 31;
+}
+
+static inline int is_sp_move_ins(union mips_instruction *ip)
+{
+ /* addiu/daddiu sp,sp,-imm */
+ if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
+ return 0;
+ if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
+ return 1;
+ return 0;
+}
+
+/*
+ * Looks for specific instructions that mark the end of a function.
+ * This usually means we ran into the code area of the previous function.
+ */
+static inline int is_end_of_function_marker(union mips_instruction *ip)
+{
+ /* jr ra */
+ if (ip->r_format.func == jr_op && ip->r_format.rs == 31)
+ return 1;
+ /* lui gp */
+ if (ip->i_format.opcode == lui_op && ip->i_format.rt == 28)
+ return 1;
+ return 0;
+}
+
+/*
+ * TODO for userspace stack unwinding:
+ * - handle cases where the stack is adjusted inside a function
+ * (generally doesn't happen)
+ * - find optimal value for max_instr_check
+ * - try to find a better way to handle leaf functions
+ */
+
+static inline int unwind_user_frame(struct stackframe *old_frame,
+ const unsigned int max_instr_check)
+{
+ struct stackframe new_frame = *old_frame;
+ off_t ra_offset = 0;
+ size_t stack_size = 0;
+ unsigned long addr;
+
+ if (old_frame->pc == 0 || old_frame->sp == 0 || old_frame->ra == 0)
+ return -9;
+
+ for (addr = new_frame.pc; (addr + max_instr_check > new_frame.pc)
+ && (!ra_offset || !stack_size); --addr) {
+ union mips_instruction ip;
+
+ if (get_mem(addr, (unsigned long *) &ip))
+ return -11;
+
+ if (is_sp_move_ins(&ip)) {
+ int stack_adjustment = ip.i_format.simmediate;
+ if (stack_adjustment > 0)
+ /* This marks the end of the previous function,
+ which means we overran. */
+ break;
+ stack_size = (unsigned long) stack_adjustment;
+ } else if (is_ra_save_ins(&ip)) {
+ int ra_slot = ip.i_format.simmediate;
+ if (ra_slot < 0)
+ /* This shouldn't happen. */
+ break;
+ ra_offset = ra_slot;
+ } else if (is_end_of_function_marker(&ip))
+ break;
+ }
+
+ if (!ra_offset || !stack_size)
+ goto done;
+
+ if (ra_offset) {
+ new_frame.ra = old_frame->sp + ra_offset;
+ if (get_mem(new_frame.ra, &(new_frame.ra)))
+ return -13;
+ }
+
+ if (stack_size) {
+ new_frame.sp = old_frame->sp + stack_size;
+ if (get_mem(new_frame.sp, &(new_frame.sp)))
+ return -14;
+ }
+
+ if (new_frame.sp > old_frame->sp)
+ return -2;
+
+done:
+ new_frame.pc = old_frame->ra;
+ *old_frame = new_frame;
+
+ return 0;
+}
+
+static inline void do_user_backtrace(unsigned long low_addr,
+ struct stackframe *frame,
+ unsigned int depth)
+{
+ const unsigned int max_instr_check = 512;
+ const unsigned long high_addr = low_addr + THREAD_SIZE;
+
+ while (depth-- && !unwind_user_frame(frame, max_instr_check)) {
+ oprofile_add_trace(frame->ra);
+ if (frame->sp < low_addr || frame->sp > high_addr)
+ break;
+ }
+}
+
+#ifndef CONFIG_KALLSYMS
+static inline void do_kernel_backtrace(unsigned long low_addr,
+ struct stackframe *frame,
+ unsigned int depth) { }
+#else
+static inline void do_kernel_backtrace(unsigned long low_addr,
+ struct stackframe *frame,
+ unsigned int depth)
+{
+ while (depth-- && frame->pc) {
+ frame->pc = unwind_stack_by_address(low_addr,
+ &(frame->sp),
+ frame->pc,
+ &(frame->ra));
+ oprofile_add_trace(frame->ra);
+ }
+}
+#endif
+
+void notrace op_mips_backtrace(struct pt_regs *const regs, unsigned int depth)
+{
+ struct stackframe frame = { .sp = regs->regs[29],
+ .pc = regs->cp0_epc,
+ .ra = regs->regs[31] };
+ const int userspace = user_mode(regs);
+ const unsigned long low_addr = ALIGN(frame.sp, THREAD_SIZE);
+
+ if (userspace)
+ do_user_backtrace(low_addr, &frame, depth);
+ else
+ do_kernel_backtrace(low_addr, &frame, depth);
+}
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
new file mode 100644
index 000000000..d3996c4c6
--- /dev/null
+++ b/arch/mips/oprofile/common.c
@@ -0,0 +1,147 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 2005 Ralf Baechle
+ * Copyright (C) 2005 MIPS Technologies, Inc.
+ */
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/oprofile.h>
+#include <linux/smp.h>
+#include <asm/cpu-info.h>
+#include <asm/cpu-type.h>
+
+#include "op_impl.h"
+
+extern struct op_mips_model op_model_mipsxx_ops __weak;
+extern struct op_mips_model op_model_loongson2_ops __weak;
+extern struct op_mips_model op_model_loongson3_ops __weak;
+
+static struct op_mips_model *model;
+
+static struct op_counter_config ctr[20];
+
+static int op_mips_setup(void)
+{
+ /* Pre-compute the values to stuff in the hardware registers. */
+ model->reg_setup(ctr);
+
+ /* Configure the registers on all cpus. */
+ on_each_cpu(model->cpu_setup, NULL, 1);
+
+ return 0;
+}
+
+static int op_mips_create_files(struct dentry *root)
+{
+ int i;
+
+ for (i = 0; i < model->num_counters; ++i) {
+ struct dentry *dir;
+ char buf[4];
+
+ snprintf(buf, sizeof buf, "%d", i);
+ dir = oprofilefs_mkdir(root, buf);
+
+ oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
+ oprofilefs_create_ulong(dir, "event", &ctr[i].event);
+ oprofilefs_create_ulong(dir, "count", &ctr[i].count);
+ oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
+ oprofilefs_create_ulong(dir, "user", &ctr[i].user);
+ oprofilefs_create_ulong(dir, "exl", &ctr[i].exl);
+ /* Dummy. */
+ oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
+ }
+
+ return 0;
+}
+
+static int op_mips_start(void)
+{
+ on_each_cpu(model->cpu_start, NULL, 1);
+
+ return 0;
+}
+
+static void op_mips_stop(void)
+{
+ /* Disable performance monitoring for all counters. */
+ on_each_cpu(model->cpu_stop, NULL, 1);
+}
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ struct op_mips_model *lmodel = NULL;
+ int res;
+
+ switch (boot_cpu_type()) {
+ case CPU_5KC:
+ case CPU_M14KC:
+ case CPU_M14KEC:
+ case CPU_20KC:
+ case CPU_24K:
+ case CPU_25KF:
+ case CPU_34K:
+ case CPU_1004K:
+ case CPU_74K:
+ case CPU_1074K:
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ case CPU_I6400:
+ case CPU_M5150:
+ case CPU_LOONGSON32:
+ case CPU_SB1:
+ case CPU_SB1A:
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_R14000:
+ case CPU_R16000:
+ case CPU_XLR:
+ lmodel = &op_model_mipsxx_ops;
+ break;
+
+ case CPU_LOONGSON2EF:
+ lmodel = &op_model_loongson2_ops;
+ break;
+ case CPU_LOONGSON64:
+ lmodel = &op_model_loongson3_ops;
+ break;
+ }
+
+ /*
+ * Always set the backtrace. This allows unsupported CPU types to still
+ * use timer-based oprofile.
+ */
+ ops->backtrace = op_mips_backtrace;
+
+ if (!lmodel)
+ return -ENODEV;
+
+ res = lmodel->init();
+ if (res)
+ return res;
+
+ model = lmodel;
+
+ ops->create_files = op_mips_create_files;
+ ops->setup = op_mips_setup;
+ //ops->shutdown = op_mips_shutdown;
+ ops->start = op_mips_start;
+ ops->stop = op_mips_stop;
+ ops->cpu_type = lmodel->cpu_type;
+
+ printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
+ lmodel->cpu_type);
+
+ return 0;
+}
+
+void oprofile_arch_exit(void)
+{
+ if (model)
+ model->exit();
+}
diff --git a/arch/mips/oprofile/op_impl.h b/arch/mips/oprofile/op_impl.h
new file mode 100644
index 000000000..a4e758a39
--- /dev/null
+++ b/arch/mips/oprofile/op_impl.h
@@ -0,0 +1,41 @@
+/**
+ * @file arch/alpha/oprofile/op_impl.h
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author Richard Henderson <rth@twiddle.net>
+ */
+
+#ifndef OP_IMPL_H
+#define OP_IMPL_H 1
+
+extern int (*perf_irq)(void);
+
+/* Per-counter configuration as set via oprofilefs. */
+struct op_counter_config {
+ unsigned long enabled;
+ unsigned long event;
+ unsigned long count;
+ /* Dummies because I am too lazy to hack the userspace tools. */
+ unsigned long kernel;
+ unsigned long user;
+ unsigned long exl;
+ unsigned long unit_mask;
+};
+
+/* Per-architecture configure and hooks. */
+struct op_mips_model {
+ void (*reg_setup) (struct op_counter_config *);
+ void (*cpu_setup) (void *dummy);
+ int (*init)(void);
+ void (*exit)(void);
+ void (*cpu_start)(void *args);
+ void (*cpu_stop)(void *args);
+ char *cpu_type;
+ unsigned char num_counters;
+};
+
+void op_mips_backtrace(struct pt_regs * const regs, unsigned int depth);
+
+#endif
diff --git a/arch/mips/oprofile/op_model_loongson2.c b/arch/mips/oprofile/op_model_loongson2.c
new file mode 100644
index 000000000..b249ec0be
--- /dev/null
+++ b/arch/mips/oprofile/op_model_loongson2.c
@@ -0,0 +1,161 @@
+/*
+ * Loongson2 performance counter driver for oprofile
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Yanhua <yanh@lemote.com>
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/oprofile.h>
+#include <linux/interrupt.h>
+
+#include <loongson.h> /* LOONGSON2_PERFCNT_IRQ */
+#include "op_impl.h"
+
+#define LOONGSON2_CPU_TYPE "mips/loongson2"
+
+#define LOONGSON2_PERFCNT_OVERFLOW (1ULL << 31)
+
+#define LOONGSON2_PERFCTRL_EXL (1UL << 0)
+#define LOONGSON2_PERFCTRL_KERNEL (1UL << 1)
+#define LOONGSON2_PERFCTRL_SUPERVISOR (1UL << 2)
+#define LOONGSON2_PERFCTRL_USER (1UL << 3)
+#define LOONGSON2_PERFCTRL_ENABLE (1UL << 4)
+#define LOONGSON2_PERFCTRL_EVENT(idx, event) \
+ (((event) & 0x0f) << ((idx) ? 9 : 5))
+
+#define read_c0_perfctrl() __read_64bit_c0_register($24, 0)
+#define write_c0_perfctrl(val) __write_64bit_c0_register($24, 0, val)
+#define read_c0_perfcnt() __read_64bit_c0_register($25, 0)
+#define write_c0_perfcnt(val) __write_64bit_c0_register($25, 0, val)
+
+static struct loongson2_register_config {
+ unsigned int ctrl;
+ unsigned long long reset_counter1;
+ unsigned long long reset_counter2;
+ int cnt1_enabled, cnt2_enabled;
+} reg;
+
+static char *oprofid = "LoongsonPerf";
+static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id);
+
+static void reset_counters(void *arg)
+{
+ write_c0_perfctrl(0);
+ write_c0_perfcnt(0);
+}
+
+static void loongson2_reg_setup(struct op_counter_config *cfg)
+{
+ unsigned int ctrl = 0;
+
+ reg.reset_counter1 = 0;
+ reg.reset_counter2 = 0;
+
+ /*
+ * Compute the performance counter ctrl word.
+ * For now, count kernel and user mode.
+ */
+ if (cfg[0].enabled) {
+ ctrl |= LOONGSON2_PERFCTRL_EVENT(0, cfg[0].event);
+ reg.reset_counter1 = 0x80000000ULL - cfg[0].count;
+ }
+
+ if (cfg[1].enabled) {
+ ctrl |= LOONGSON2_PERFCTRL_EVENT(1, cfg[1].event);
+ reg.reset_counter2 = 0x80000000ULL - cfg[1].count;
+ }
+
+ if (cfg[0].enabled || cfg[1].enabled) {
+ ctrl |= LOONGSON2_PERFCTRL_EXL | LOONGSON2_PERFCTRL_ENABLE;
+ if (cfg[0].kernel || cfg[1].kernel)
+ ctrl |= LOONGSON2_PERFCTRL_KERNEL;
+ if (cfg[0].user || cfg[1].user)
+ ctrl |= LOONGSON2_PERFCTRL_USER;
+ }
+
+ reg.ctrl = ctrl;
+
+ reg.cnt1_enabled = cfg[0].enabled;
+ reg.cnt2_enabled = cfg[1].enabled;
+}
+
+static void loongson2_cpu_setup(void *args)
+{
+ write_c0_perfcnt((reg.reset_counter2 << 32) | reg.reset_counter1);
+}
+
+static void loongson2_cpu_start(void *args)
+{
+ /* Start all counters on current CPU */
+ if (reg.cnt1_enabled || reg.cnt2_enabled)
+ write_c0_perfctrl(reg.ctrl);
+}
+
+static void loongson2_cpu_stop(void *args)
+{
+ /* Stop all counters on current CPU */
+ write_c0_perfctrl(0);
+ memset(&reg, 0, sizeof(reg));
+}
+
+static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id)
+{
+ uint64_t counter, counter1, counter2;
+ struct pt_regs *regs = get_irq_regs();
+ int enabled;
+
+ /* Check whether the irq belongs to me */
+ enabled = read_c0_perfctrl() & LOONGSON2_PERFCTRL_ENABLE;
+ if (!enabled)
+ return IRQ_NONE;
+ enabled = reg.cnt1_enabled | reg.cnt2_enabled;
+ if (!enabled)
+ return IRQ_NONE;
+
+ counter = read_c0_perfcnt();
+ counter1 = counter & 0xffffffff;
+ counter2 = counter >> 32;
+
+ if (counter1 & LOONGSON2_PERFCNT_OVERFLOW) {
+ if (reg.cnt1_enabled)
+ oprofile_add_sample(regs, 0);
+ counter1 = reg.reset_counter1;
+ }
+ if (counter2 & LOONGSON2_PERFCNT_OVERFLOW) {
+ if (reg.cnt2_enabled)
+ oprofile_add_sample(regs, 1);
+ counter2 = reg.reset_counter2;
+ }
+
+ write_c0_perfcnt((counter2 << 32) | counter1);
+
+ return IRQ_HANDLED;
+}
+
+static int __init loongson2_init(void)
+{
+ return request_irq(LOONGSON2_PERFCNT_IRQ, loongson2_perfcount_handler,
+ IRQF_SHARED, "Perfcounter", oprofid);
+}
+
+static void loongson2_exit(void)
+{
+ reset_counters(NULL);
+ free_irq(LOONGSON2_PERFCNT_IRQ, oprofid);
+}
+
+struct op_mips_model op_model_loongson2_ops = {
+ .reg_setup = loongson2_reg_setup,
+ .cpu_setup = loongson2_cpu_setup,
+ .init = loongson2_init,
+ .exit = loongson2_exit,
+ .cpu_start = loongson2_cpu_start,
+ .cpu_stop = loongson2_cpu_stop,
+ .cpu_type = LOONGSON2_CPU_TYPE,
+ .num_counters = 2
+};
diff --git a/arch/mips/oprofile/op_model_loongson3.c b/arch/mips/oprofile/op_model_loongson3.c
new file mode 100644
index 000000000..436b1fc99
--- /dev/null
+++ b/arch/mips/oprofile/op_model_loongson3.c
@@ -0,0 +1,213 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/proc_fs.h>
+#include <linux/oprofile.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <irq.h>
+#include <loongson.h>
+#include "op_impl.h"
+
+#define LOONGSON3_PERFCNT_OVERFLOW (1ULL << 63)
+
+#define LOONGSON3_PERFCTRL_EXL (1UL << 0)
+#define LOONGSON3_PERFCTRL_KERNEL (1UL << 1)
+#define LOONGSON3_PERFCTRL_SUPERVISOR (1UL << 2)
+#define LOONGSON3_PERFCTRL_USER (1UL << 3)
+#define LOONGSON3_PERFCTRL_ENABLE (1UL << 4)
+#define LOONGSON3_PERFCTRL_W (1UL << 30)
+#define LOONGSON3_PERFCTRL_M (1UL << 31)
+#define LOONGSON3_PERFCTRL_EVENT(idx, event) \
+ (((event) & (idx ? 0x0f : 0x3f)) << 5)
+
+/* Loongson-3 PerfCount performance counter1 register */
+#define read_c0_perflo1() __read_64bit_c0_register($25, 0)
+#define write_c0_perflo1(val) __write_64bit_c0_register($25, 0, val)
+#define read_c0_perfhi1() __read_64bit_c0_register($25, 1)
+#define write_c0_perfhi1(val) __write_64bit_c0_register($25, 1, val)
+
+/* Loongson-3 PerfCount performance counter2 register */
+#define read_c0_perflo2() __read_64bit_c0_register($25, 2)
+#define write_c0_perflo2(val) __write_64bit_c0_register($25, 2, val)
+#define read_c0_perfhi2() __read_64bit_c0_register($25, 3)
+#define write_c0_perfhi2(val) __write_64bit_c0_register($25, 3, val)
+
+static int (*save_perf_irq)(void);
+
+static struct loongson3_register_config {
+ unsigned int control1;
+ unsigned int control2;
+ unsigned long long reset_counter1;
+ unsigned long long reset_counter2;
+ int ctr1_enable, ctr2_enable;
+} reg;
+
+static void reset_counters(void *arg)
+{
+ write_c0_perfhi1(0);
+ write_c0_perfhi2(0);
+ write_c0_perflo1(0xc0000000);
+ write_c0_perflo2(0x40000000);
+}
+
+/* Compute all of the registers in preparation for enabling profiling. */
+static void loongson3_reg_setup(struct op_counter_config *ctr)
+{
+ unsigned int control1 = 0;
+ unsigned int control2 = 0;
+
+ reg.reset_counter1 = 0;
+ reg.reset_counter2 = 0;
+ /* Compute the performance counter control word. */
+ /* For now count kernel and user mode */
+ if (ctr[0].enabled) {
+ control1 |= LOONGSON3_PERFCTRL_EVENT(0, ctr[0].event) |
+ LOONGSON3_PERFCTRL_ENABLE;
+ if (ctr[0].kernel)
+ control1 |= LOONGSON3_PERFCTRL_KERNEL;
+ if (ctr[0].user)
+ control1 |= LOONGSON3_PERFCTRL_USER;
+ reg.reset_counter1 = 0x8000000000000000ULL - ctr[0].count;
+ }
+
+ if (ctr[1].enabled) {
+ control2 |= LOONGSON3_PERFCTRL_EVENT(1, ctr[1].event) |
+ LOONGSON3_PERFCTRL_ENABLE;
+ if (ctr[1].kernel)
+ control2 |= LOONGSON3_PERFCTRL_KERNEL;
+ if (ctr[1].user)
+ control2 |= LOONGSON3_PERFCTRL_USER;
+ reg.reset_counter2 = 0x8000000000000000ULL - ctr[1].count;
+ }
+
+ if (ctr[0].enabled)
+ control1 |= LOONGSON3_PERFCTRL_EXL;
+ if (ctr[1].enabled)
+ control2 |= LOONGSON3_PERFCTRL_EXL;
+
+ reg.control1 = control1;
+ reg.control2 = control2;
+ reg.ctr1_enable = ctr[0].enabled;
+ reg.ctr2_enable = ctr[1].enabled;
+}
+
+/* Program all of the registers in preparation for enabling profiling. */
+static void loongson3_cpu_setup(void *args)
+{
+ uint64_t perfcount1, perfcount2;
+
+ perfcount1 = reg.reset_counter1;
+ perfcount2 = reg.reset_counter2;
+ write_c0_perfhi1(perfcount1);
+ write_c0_perfhi2(perfcount2);
+}
+
+static void loongson3_cpu_start(void *args)
+{
+ /* Start all counters on current CPU */
+ reg.control1 |= (LOONGSON3_PERFCTRL_W|LOONGSON3_PERFCTRL_M);
+ reg.control2 |= (LOONGSON3_PERFCTRL_W|LOONGSON3_PERFCTRL_M);
+
+ if (reg.ctr1_enable)
+ write_c0_perflo1(reg.control1);
+ if (reg.ctr2_enable)
+ write_c0_perflo2(reg.control2);
+}
+
+static void loongson3_cpu_stop(void *args)
+{
+ /* Stop all counters on current CPU */
+ write_c0_perflo1(0xc0000000);
+ write_c0_perflo2(0x40000000);
+ memset(&reg, 0, sizeof(reg));
+}
+
+static int loongson3_perfcount_handler(void)
+{
+ unsigned long flags;
+ uint64_t counter1, counter2;
+ uint32_t cause, handled = IRQ_NONE;
+ struct pt_regs *regs = get_irq_regs();
+
+ cause = read_c0_cause();
+ if (!(cause & CAUSEF_PCI))
+ return handled;
+
+ counter1 = read_c0_perfhi1();
+ counter2 = read_c0_perfhi2();
+
+ local_irq_save(flags);
+
+ if (counter1 & LOONGSON3_PERFCNT_OVERFLOW) {
+ if (reg.ctr1_enable)
+ oprofile_add_sample(regs, 0);
+ counter1 = reg.reset_counter1;
+ }
+ if (counter2 & LOONGSON3_PERFCNT_OVERFLOW) {
+ if (reg.ctr2_enable)
+ oprofile_add_sample(regs, 1);
+ counter2 = reg.reset_counter2;
+ }
+
+ local_irq_restore(flags);
+
+ write_c0_perfhi1(counter1);
+ write_c0_perfhi2(counter2);
+
+ if (!(cause & CAUSEF_TI))
+ handled = IRQ_HANDLED;
+
+ return handled;
+}
+
+static int loongson3_starting_cpu(unsigned int cpu)
+{
+ write_c0_perflo1(reg.control1);
+ write_c0_perflo2(reg.control2);
+ return 0;
+}
+
+static int loongson3_dying_cpu(unsigned int cpu)
+{
+ write_c0_perflo1(0xc0000000);
+ write_c0_perflo2(0x40000000);
+ return 0;
+}
+
+static int __init loongson3_init(void)
+{
+ on_each_cpu(reset_counters, NULL, 1);
+ cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
+ "mips/oprofile/loongson3:starting",
+ loongson3_starting_cpu, loongson3_dying_cpu);
+ save_perf_irq = perf_irq;
+ perf_irq = loongson3_perfcount_handler;
+
+ return 0;
+}
+
+static void loongson3_exit(void)
+{
+ on_each_cpu(reset_counters, NULL, 1);
+ cpuhp_remove_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING);
+ perf_irq = save_perf_irq;
+}
+
+struct op_mips_model op_model_loongson3_ops = {
+ .reg_setup = loongson3_reg_setup,
+ .cpu_setup = loongson3_cpu_setup,
+ .init = loongson3_init,
+ .exit = loongson3_exit,
+ .cpu_start = loongson3_cpu_start,
+ .cpu_stop = loongson3_cpu_stop,
+ .cpu_type = "mips/loongson3",
+ .num_counters = 2
+};
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
new file mode 100644
index 000000000..55d7b7fd1
--- /dev/null
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -0,0 +1,479 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 05, 06 by Ralf Baechle
+ * Copyright (C) 2005 by MIPS Technologies, Inc.
+ */
+#include <linux/cpumask.h>
+#include <linux/oprofile.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <asm/irq_regs.h>
+#include <asm/time.h>
+
+#include "op_impl.h"
+
+#define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \
+ MIPS_PERFCTRL_EVENT)
+#define M_PERFCTL_VPEID(vpe) ((vpe) << MIPS_PERFCTRL_VPEID_S)
+
+#define M_COUNTER_OVERFLOW (1UL << 31)
+
+static int (*save_perf_irq)(void);
+static int perfcount_irq;
+
+/*
+ * XLR has only one set of counters per core. Designate the
+ * first hardware thread in the core for setup and init.
+ * Skip CPUs with non-zero hardware thread id (4 hwt per core)
+ */
+#if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
+#define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0)
+#else
+#define oprofile_skip_cpu(c) 0
+#endif
+
+#ifdef CONFIG_MIPS_MT_SMP
+#define WHAT (MIPS_PERFCTRL_MT_EN_VPE | \
+ M_PERFCTL_VPEID(cpu_vpe_id(&current_cpu_data)))
+#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
+ 0 : cpu_vpe_id(&current_cpu_data))
+
+/*
+ * The number of bits to shift to convert between counters per core and
+ * counters per VPE. There is no reasonable interface atm to obtain the
+ * number of VPEs used by Linux and in the 34K this number is fixed to two
+ * anyways so we hardcore a few things here for the moment. The way it's
+ * done here will ensure that oprofile VSMP kernel will run right on a lesser
+ * core like a 24K also or with maxcpus=1.
+ */
+static inline unsigned int vpe_shift(void)
+{
+ if (num_possible_cpus() > 1)
+ return 1;
+
+ return 0;
+}
+
+#else
+
+#define WHAT 0
+#define vpe_id() 0
+
+static inline unsigned int vpe_shift(void)
+{
+ return 0;
+}
+
+#endif
+
+static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
+{
+ return counters >> vpe_shift();
+}
+
+static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
+{
+ return counters << vpe_shift();
+}
+
+#define __define_perf_accessors(r, n, np) \
+ \
+static inline unsigned int r_c0_ ## r ## n(void) \
+{ \
+ unsigned int cpu = vpe_id(); \
+ \
+ switch (cpu) { \
+ case 0: \
+ return read_c0_ ## r ## n(); \
+ case 1: \
+ return read_c0_ ## r ## np(); \
+ default: \
+ BUG(); \
+ } \
+ return 0; \
+} \
+ \
+static inline void w_c0_ ## r ## n(unsigned int value) \
+{ \
+ unsigned int cpu = vpe_id(); \
+ \
+ switch (cpu) { \
+ case 0: \
+ write_c0_ ## r ## n(value); \
+ return; \
+ case 1: \
+ write_c0_ ## r ## np(value); \
+ return; \
+ default: \
+ BUG(); \
+ } \
+ return; \
+} \
+
+__define_perf_accessors(perfcntr, 0, 2)
+__define_perf_accessors(perfcntr, 1, 3)
+__define_perf_accessors(perfcntr, 2, 0)
+__define_perf_accessors(perfcntr, 3, 1)
+
+__define_perf_accessors(perfctrl, 0, 2)
+__define_perf_accessors(perfctrl, 1, 3)
+__define_perf_accessors(perfctrl, 2, 0)
+__define_perf_accessors(perfctrl, 3, 1)
+
+struct op_mips_model op_model_mipsxx_ops;
+
+static struct mipsxx_register_config {
+ unsigned int control[4];
+ unsigned int counter[4];
+} reg;
+
+/* Compute all of the registers in preparation for enabling profiling. */
+
+static void mipsxx_reg_setup(struct op_counter_config *ctr)
+{
+ unsigned int counters = op_model_mipsxx_ops.num_counters;
+ int i;
+
+ /* Compute the performance counter control word. */
+ for (i = 0; i < counters; i++) {
+ reg.control[i] = 0;
+ reg.counter[i] = 0;
+
+ if (!ctr[i].enabled)
+ continue;
+
+ reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
+ MIPS_PERFCTRL_IE;
+ if (ctr[i].kernel)
+ reg.control[i] |= MIPS_PERFCTRL_K;
+ if (ctr[i].user)
+ reg.control[i] |= MIPS_PERFCTRL_U;
+ if (ctr[i].exl)
+ reg.control[i] |= MIPS_PERFCTRL_EXL;
+ if (boot_cpu_type() == CPU_XLR)
+ reg.control[i] |= XLR_PERFCTRL_ALLTHREADS;
+ reg.counter[i] = 0x80000000 - ctr[i].count;
+ }
+}
+
+/* Program all of the registers in preparation for enabling profiling. */
+
+static void mipsxx_cpu_setup(void *args)
+{
+ unsigned int counters = op_model_mipsxx_ops.num_counters;
+
+ if (oprofile_skip_cpu(smp_processor_id()))
+ return;
+
+ switch (counters) {
+ case 4:
+ w_c0_perfctrl3(0);
+ w_c0_perfcntr3(reg.counter[3]);
+ fallthrough;
+ case 3:
+ w_c0_perfctrl2(0);
+ w_c0_perfcntr2(reg.counter[2]);
+ fallthrough;
+ case 2:
+ w_c0_perfctrl1(0);
+ w_c0_perfcntr1(reg.counter[1]);
+ fallthrough;
+ case 1:
+ w_c0_perfctrl0(0);
+ w_c0_perfcntr0(reg.counter[0]);
+ }
+}
+
+/* Start all counters on current CPU */
+static void mipsxx_cpu_start(void *args)
+{
+ unsigned int counters = op_model_mipsxx_ops.num_counters;
+
+ if (oprofile_skip_cpu(smp_processor_id()))
+ return;
+
+ switch (counters) {
+ case 4:
+ w_c0_perfctrl3(WHAT | reg.control[3]);
+ fallthrough;
+ case 3:
+ w_c0_perfctrl2(WHAT | reg.control[2]);
+ fallthrough;
+ case 2:
+ w_c0_perfctrl1(WHAT | reg.control[1]);
+ fallthrough;
+ case 1:
+ w_c0_perfctrl0(WHAT | reg.control[0]);
+ }
+}
+
+/* Stop all counters on current CPU */
+static void mipsxx_cpu_stop(void *args)
+{
+ unsigned int counters = op_model_mipsxx_ops.num_counters;
+
+ if (oprofile_skip_cpu(smp_processor_id()))
+ return;
+
+ switch (counters) {
+ case 4:
+ w_c0_perfctrl3(0);
+ fallthrough;
+ case 3:
+ w_c0_perfctrl2(0);
+ fallthrough;
+ case 2:
+ w_c0_perfctrl1(0);
+ fallthrough;
+ case 1:
+ w_c0_perfctrl0(0);
+ }
+}
+
+static int mipsxx_perfcount_handler(void)
+{
+ unsigned int counters = op_model_mipsxx_ops.num_counters;
+ unsigned int control;
+ unsigned int counter;
+ int handled = IRQ_NONE;
+
+ if (cpu_has_mips_r2 && !(read_c0_cause() & CAUSEF_PCI))
+ return handled;
+
+ switch (counters) {
+#define HANDLE_COUNTER(n) \
+ case n + 1: \
+ control = r_c0_perfctrl ## n(); \
+ counter = r_c0_perfcntr ## n(); \
+ if ((control & MIPS_PERFCTRL_IE) && \
+ (counter & M_COUNTER_OVERFLOW)) { \
+ oprofile_add_sample(get_irq_regs(), n); \
+ w_c0_perfcntr ## n(reg.counter[n]); \
+ handled = IRQ_HANDLED; \
+ }
+ HANDLE_COUNTER(3)
+ fallthrough;
+ HANDLE_COUNTER(2)
+ fallthrough;
+ HANDLE_COUNTER(1)
+ fallthrough;
+ HANDLE_COUNTER(0)
+ }
+
+ return handled;
+}
+
+static inline int __n_counters(void)
+{
+ if (!cpu_has_perf)
+ return 0;
+ if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
+ return 1;
+ if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
+ return 2;
+ if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
+ return 3;
+
+ return 4;
+}
+
+static inline int n_counters(void)
+{
+ int counters;
+
+ switch (current_cpu_type()) {
+ case CPU_R10000:
+ counters = 2;
+ break;
+
+ case CPU_R12000:
+ case CPU_R14000:
+ case CPU_R16000:
+ counters = 4;
+ break;
+
+ default:
+ counters = __n_counters();
+ }
+
+ return counters;
+}
+
+static void reset_counters(void *arg)
+{
+ int counters = (int)(long)arg;
+ switch (counters) {
+ case 4:
+ w_c0_perfctrl3(0);
+ w_c0_perfcntr3(0);
+ fallthrough;
+ case 3:
+ w_c0_perfctrl2(0);
+ w_c0_perfcntr2(0);
+ fallthrough;
+ case 2:
+ w_c0_perfctrl1(0);
+ w_c0_perfcntr1(0);
+ fallthrough;
+ case 1:
+ w_c0_perfctrl0(0);
+ w_c0_perfcntr0(0);
+ }
+}
+
+static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
+{
+ return mipsxx_perfcount_handler();
+}
+
+static int __init mipsxx_init(void)
+{
+ int counters;
+
+ counters = n_counters();
+ if (counters == 0) {
+ printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
+ return -ENODEV;
+ }
+
+#ifdef CONFIG_MIPS_MT_SMP
+ if (!cpu_has_mipsmt_pertccounters)
+ counters = counters_total_to_per_cpu(counters);
+#endif
+ on_each_cpu(reset_counters, (void *)(long)counters, 1);
+
+ op_model_mipsxx_ops.num_counters = counters;
+ switch (current_cpu_type()) {
+ case CPU_M14KC:
+ op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
+ break;
+
+ case CPU_M14KEC:
+ op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
+ break;
+
+ case CPU_20KC:
+ op_model_mipsxx_ops.cpu_type = "mips/20K";
+ break;
+
+ case CPU_24K:
+ op_model_mipsxx_ops.cpu_type = "mips/24K";
+ break;
+
+ case CPU_25KF:
+ op_model_mipsxx_ops.cpu_type = "mips/25K";
+ break;
+
+ case CPU_1004K:
+ case CPU_34K:
+ op_model_mipsxx_ops.cpu_type = "mips/34K";
+ break;
+
+ case CPU_1074K:
+ case CPU_74K:
+ op_model_mipsxx_ops.cpu_type = "mips/74K";
+ break;
+
+ case CPU_INTERAPTIV:
+ op_model_mipsxx_ops.cpu_type = "mips/interAptiv";
+ break;
+
+ case CPU_PROAPTIV:
+ op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
+ break;
+
+ case CPU_P5600:
+ op_model_mipsxx_ops.cpu_type = "mips/P5600";
+ break;
+
+ case CPU_I6400:
+ op_model_mipsxx_ops.cpu_type = "mips/I6400";
+ break;
+
+ case CPU_M5150:
+ op_model_mipsxx_ops.cpu_type = "mips/M5150";
+ break;
+
+ case CPU_5KC:
+ op_model_mipsxx_ops.cpu_type = "mips/5K";
+ break;
+
+ case CPU_R10000:
+ if ((current_cpu_data.processor_id & 0xff) == 0x20)
+ op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
+ else
+ op_model_mipsxx_ops.cpu_type = "mips/r10000";
+ break;
+
+ case CPU_R12000:
+ case CPU_R14000:
+ op_model_mipsxx_ops.cpu_type = "mips/r12000";
+ break;
+
+ case CPU_R16000:
+ op_model_mipsxx_ops.cpu_type = "mips/r16000";
+ break;
+
+ case CPU_SB1:
+ case CPU_SB1A:
+ op_model_mipsxx_ops.cpu_type = "mips/sb1";
+ break;
+
+ case CPU_LOONGSON32:
+ op_model_mipsxx_ops.cpu_type = "mips/loongson1";
+ break;
+
+ case CPU_XLR:
+ op_model_mipsxx_ops.cpu_type = "mips/xlr";
+ break;
+
+ default:
+ printk(KERN_ERR "Profiling unsupported for this CPU\n");
+
+ return -ENODEV;
+ }
+
+ save_perf_irq = perf_irq;
+ perf_irq = mipsxx_perfcount_handler;
+
+ if (get_c0_perfcount_int)
+ perfcount_irq = get_c0_perfcount_int();
+ else if (cp0_perfcount_irq >= 0)
+ perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+ else
+ perfcount_irq = -1;
+
+ if (perfcount_irq >= 0)
+ return request_irq(perfcount_irq, mipsxx_perfcount_int,
+ IRQF_PERCPU | IRQF_NOBALANCING |
+ IRQF_NO_THREAD | IRQF_NO_SUSPEND |
+ IRQF_SHARED,
+ "Perfcounter", save_perf_irq);
+
+ return 0;
+}
+
+static void mipsxx_exit(void)
+{
+ int counters = op_model_mipsxx_ops.num_counters;
+
+ if (perfcount_irq >= 0)
+ free_irq(perfcount_irq, save_perf_irq);
+
+ counters = counters_per_cpu_to_total(counters);
+ on_each_cpu(reset_counters, (void *)(long)counters, 1);
+
+ perf_irq = save_perf_irq;
+}
+
+struct op_mips_model op_model_mipsxx_ops = {
+ .reg_setup = mipsxx_reg_setup,
+ .cpu_setup = mipsxx_cpu_setup,
+ .init = mipsxx_init,
+ .exit = mipsxx_exit,
+ .cpu_start = mipsxx_cpu_start,
+ .cpu_stop = mipsxx_cpu_stop,
+};
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
new file mode 100644
index 000000000..f3eecc065
--- /dev/null
+++ b/arch/mips/pci/Makefile
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the PCI specific kernel interface routines under Linux.
+#
+
+obj-y += pci.o
+obj-$(CONFIG_PCI_DRIVERS_LEGACY)+= pci-legacy.o
+obj-$(CONFIG_PCI_DRIVERS_GENERIC)+= pci-generic.o
+
+#
+# PCI bus host bridge specific code
+#
+obj-$(CONFIG_MIPS_BONITO64) += ops-bonito64.o
+obj-$(CONFIG_PCI_GT64XXX_PCI0) += ops-gt64xxx_pci0.o
+obj-$(CONFIG_MIPS_MSC) += ops-msc.o
+obj-$(CONFIG_SOC_TX3927) += ops-tx3927.o
+obj-$(CONFIG_PCI_VR41XX) += ops-vr41xx.o pci-vr41xx.o
+obj-$(CONFIG_PCI_TX4927) += ops-tx4927.o
+obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o
+obj-$(CONFIG_BCM63XX) += pci-bcm63xx.o fixup-bcm63xx.o \
+ ops-bcm63xx.o
+obj-$(CONFIG_MIPS_ALCHEMY) += pci-alchemy.o
+obj-$(CONFIG_PCI_AR2315) += pci-ar2315.o
+obj-$(CONFIG_SOC_AR71XX) += pci-ar71xx.o
+obj-$(CONFIG_PCI_AR724X) += pci-ar724x.o
+obj-$(CONFIG_PCI_XTALK_BRIDGE) += pci-xtalk-bridge.o
+#
+# These are still pretty much in the old state, watch, go blind.
+#
+obj-$(CONFIG_ATH79) += fixup-ath79.o
+obj-$(CONFIG_MIPS_COBALT) += fixup-cobalt.o
+obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-loongson2.o
+obj-$(CONFIG_LEMOTE_MACH2F) += fixup-lemote2f.o ops-loongson2.o
+obj-$(CONFIG_MIPS_MALTA) += fixup-malta.o pci-malta.o
+obj-$(CONFIG_SGI_IP27) += pci-ip27.o
+obj-$(CONFIG_SGI_IP32) += fixup-ip32.o ops-mace.o pci-ip32.o
+obj-$(CONFIG_SIBYTE_SB1250) += fixup-sb1250.o pci-sb1250.o
+obj-$(CONFIG_SIBYTE_BCM112X) += fixup-sb1250.o pci-sb1250.o
+obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o
+obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o
+obj-$(CONFIG_LANTIQ) += fixup-lantiq.o
+obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o
+obj-$(CONFIG_SOC_MT7620) += pci-mt7620.o
+obj-$(CONFIG_SOC_RT288X) += pci-rt2880.o
+obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o
+obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o
+obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o
+obj-$(CONFIG_TANBAC_TB0287) += fixup-tb0287.o
+obj-$(CONFIG_TOSHIBA_JMR3927) += fixup-jmr3927.o
+obj-$(CONFIG_SOC_TX4927) += pci-tx4927.o
+obj-$(CONFIG_SOC_TX4938) += pci-tx4938.o
+obj-$(CONFIG_SOC_TX4939) += pci-tx4939.o
+obj-$(CONFIG_TOSHIBA_RBTX4927) += fixup-rbtx4927.o
+obj-$(CONFIG_TOSHIBA_RBTX4938) += fixup-rbtx4938.o
+obj-$(CONFIG_VICTOR_MPC30X) += fixup-mpc30x.o
+obj-$(CONFIG_ZAO_CAPCELLA) += fixup-capcella.o
+obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o
+obj-$(CONFIG_CAVIUM_OCTEON_SOC) += pci-octeon.o pcie-octeon.o
+obj-$(CONFIG_CPU_XLR) += pci-xlr.o
+obj-$(CONFIG_CPU_XLP) += pci-xlp.o
+
+ifdef CONFIG_PCI_MSI
+obj-$(CONFIG_CAVIUM_OCTEON_SOC) += msi-octeon.o
+obj-$(CONFIG_CPU_XLP) += msi-xlp.o
+endif
diff --git a/arch/mips/pci/fixup-ath79.c b/arch/mips/pci/fixup-ath79.c
new file mode 100644
index 000000000..09a4ce534
--- /dev/null
+++ b/arch/mips/pci/fixup-ath79.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2018 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/pci.h>
+//#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return PCIBIOS_SUCCESSFUL;
+}
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return of_irq_parse_and_map_pci(dev, slot, pin);
+}
diff --git a/arch/mips/pci/fixup-bcm63xx.c b/arch/mips/pci/fixup-bcm63xx.c
new file mode 100644
index 000000000..340863009
--- /dev/null
+++ b/arch/mips/pci/fixup-bcm63xx.c
@@ -0,0 +1,21 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <bcm63xx_cpu.h>
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return bcm63xx_get_irq_number(IRQ_PCI);
+}
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
diff --git a/arch/mips/pci/fixup-capcella.c b/arch/mips/pci/fixup-capcella.c
new file mode 100644
index 000000000..dc8cd98a1
--- /dev/null
+++ b/arch/mips/pci/fixup-capcella.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * fixup-cappcela.c, The ZAO Networks Capcella specific PCI fixups.
+ *
+ * Copyright (C) 2002,2004 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/vr41xx/capcella.h>
+
+/*
+ * Shortcuts
+ */
+#define INT1 RTL8139_1_IRQ
+#define INT2 RTL8139_2_IRQ
+#define INTA PC104PLUS_INTA_IRQ
+#define INTB PC104PLUS_INTB_IRQ
+#define INTC PC104PLUS_INTC_IRQ
+#define INTD PC104PLUS_INTD_IRQ
+
+static char irq_tab_capcella[][5] = {
+ [11] = { -1, INT1, INT1, INT1, INT1 },
+ [12] = { -1, INT2, INT2, INT2, INT2 },
+ [14] = { -1, INTA, INTB, INTC, INTD }
+};
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return irq_tab_capcella[slot][pin];
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
diff --git a/arch/mips/pci/fixup-cobalt.c b/arch/mips/pci/fixup-cobalt.c
new file mode 100644
index 000000000..44be65c3e
--- /dev/null
+++ b/arch/mips/pci/fixup-cobalt.c
@@ -0,0 +1,192 @@
+/*
+ * Cobalt Qube/Raq PCI support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1996, 1997, 2002, 2003 by Ralf Baechle
+ * Copyright (C) 2001, 2002, 2003 by Liam Davies (ldavies@agile.tv)
+ */
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/gt64120.h>
+
+#include <cobalt.h>
+#include <irq.h>
+
+/*
+ * PCI slot numbers
+ */
+#define COBALT_PCICONF_CPU 0x06
+#define COBALT_PCICONF_ETH0 0x07
+#define COBALT_PCICONF_RAQSCSI 0x08
+#define COBALT_PCICONF_VIA 0x09
+#define COBALT_PCICONF_PCISLOT 0x0A
+#define COBALT_PCICONF_ETH1 0x0C
+
+/*
+ * The Cobalt board ID information. The boards have an ID number wired
+ * into the VIA that is available in the high nibble of register 94.
+ */
+#define VIA_COBALT_BRD_ID_REG 0x94
+#define VIA_COBALT_BRD_REG_to_ID(reg) ((unsigned char)(reg) >> 4)
+
+static void qube_raq_galileo_early_fixup(struct pci_dev *dev)
+{
+ if (dev->devfn == PCI_DEVFN(0, 0) &&
+ (dev->class >> 8) == PCI_CLASS_MEMORY_OTHER) {
+
+ dev->class = (PCI_CLASS_BRIDGE_HOST << 8) | (dev->class & 0xff);
+
+ printk(KERN_INFO "Galileo: fixed bridge class\n");
+ }
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_GT64111,
+ qube_raq_galileo_early_fixup);
+
+static void qube_raq_via_bmIDE_fixup(struct pci_dev *dev)
+{
+ unsigned short cfgword;
+ unsigned char lt;
+
+ /* Enable Bus Mastering and fast back to back. */
+ pci_read_config_word(dev, PCI_COMMAND, &cfgword);
+ cfgword |= (PCI_COMMAND_FAST_BACK | PCI_COMMAND_MASTER);
+ pci_write_config_word(dev, PCI_COMMAND, cfgword);
+
+ /* Enable both ide interfaces. ROM only enables primary one. */
+ pci_write_config_byte(dev, 0x40, 0xb);
+
+ /* Set latency timer to reasonable value. */
+ pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lt);
+ if (lt < 64)
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 8);
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1,
+ qube_raq_via_bmIDE_fixup);
+
+static void qube_raq_galileo_fixup(struct pci_dev *dev)
+{
+ if (dev->devfn != PCI_DEVFN(0, 0))
+ return;
+
+ /* Fix PCI latency-timer and cache-line-size values in Galileo
+ * host bridge.
+ */
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 8);
+
+ /*
+ * The code described by the comment below has been removed
+ * as it causes bus mastering by the Ethernet controllers
+ * to break under any kind of network load. We always set
+ * the retry timeouts to their maximum.
+ *
+ * --x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--
+ *
+ * On all machines prior to Q2, we had the STOP line disconnected
+ * from Galileo to VIA on PCI. The new Galileo does not function
+ * correctly unless we have it connected.
+ *
+ * Therefore we must set the disconnect/retry cycle values to
+ * something sensible when using the new Galileo.
+ */
+
+ printk(KERN_INFO "Galileo: revision %u\n", dev->revision);
+
+#if 0
+ if (dev->revision >= 0x10) {
+ /* New Galileo, assumes PCI stop line to VIA is connected. */
+ GT_WRITE(GT_PCI0_TOR_OFS, 0x4020);
+ } else if (dev->revision == 0x1 || dev->revision == 0x2)
+#endif
+ {
+ signed int timeo;
+ /* XXX WE MUST DO THIS ELSE GALILEO LOCKS UP! -DaveM */
+ timeo = GT_READ(GT_PCI0_TOR_OFS);
+ /* Old Galileo, assumes PCI STOP line to VIA is disconnected. */
+ GT_WRITE(GT_PCI0_TOR_OFS,
+ (0xff << 16) | /* retry count */
+ (0xff << 8) | /* timeout 1 */
+ 0xff); /* timeout 0 */
+
+ /* enable PCI retry exceeded interrupt */
+ GT_WRITE(GT_INTRMASK_OFS, GT_INTR_RETRYCTR0_MSK | GT_READ(GT_INTRMASK_OFS));
+ }
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_GT64111,
+ qube_raq_galileo_fixup);
+
+int cobalt_board_id;
+
+static void qube_raq_via_board_id_fixup(struct pci_dev *dev)
+{
+ u8 id;
+ int retval;
+
+ retval = pci_read_config_byte(dev, VIA_COBALT_BRD_ID_REG, &id);
+ if (retval) {
+ panic("Cannot read board ID");
+ return;
+ }
+
+ cobalt_board_id = VIA_COBALT_BRD_REG_to_ID(id);
+
+ printk(KERN_INFO "Cobalt board ID: %d\n", cobalt_board_id);
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0,
+ qube_raq_via_board_id_fixup);
+
+static char irq_tab_qube1[] = {
+ [COBALT_PCICONF_CPU] = 0,
+ [COBALT_PCICONF_ETH0] = QUBE1_ETH0_IRQ,
+ [COBALT_PCICONF_RAQSCSI] = SCSI_IRQ,
+ [COBALT_PCICONF_VIA] = 0,
+ [COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
+ [COBALT_PCICONF_ETH1] = 0
+};
+
+static char irq_tab_cobalt[] = {
+ [COBALT_PCICONF_CPU] = 0,
+ [COBALT_PCICONF_ETH0] = ETH0_IRQ,
+ [COBALT_PCICONF_RAQSCSI] = SCSI_IRQ,
+ [COBALT_PCICONF_VIA] = 0,
+ [COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
+ [COBALT_PCICONF_ETH1] = ETH1_IRQ
+};
+
+static char irq_tab_raq2[] = {
+ [COBALT_PCICONF_CPU] = 0,
+ [COBALT_PCICONF_ETH0] = ETH0_IRQ,
+ [COBALT_PCICONF_RAQSCSI] = RAQ2_SCSI_IRQ,
+ [COBALT_PCICONF_VIA] = 0,
+ [COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
+ [COBALT_PCICONF_ETH1] = ETH1_IRQ
+};
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ if (cobalt_board_id <= COBALT_BRD_ID_QUBE1)
+ return irq_tab_qube1[slot];
+
+ if (cobalt_board_id == COBALT_BRD_ID_RAQ2)
+ return irq_tab_raq2[slot];
+
+ return irq_tab_cobalt[slot];
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
diff --git a/arch/mips/pci/fixup-fuloong2e.c b/arch/mips/pci/fixup-fuloong2e.c
new file mode 100644
index 000000000..91aa92323
--- /dev/null
+++ b/arch/mips/pci/fixup-fuloong2e.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2004 ICT CAS
+ * Author: Li xiaoyu, ICT CAS
+ * lixy@ict.ac.cn
+ *
+ * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ */
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <loongson.h>
+
+/* South bridge slot number is set by the pci probe process */
+static u8 sb_slot = 5;
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ int irq = 0;
+
+ if (slot == sb_slot) {
+ switch (PCI_FUNC(dev->devfn)) {
+ case 2:
+ irq = 10;
+ break;
+ case 3:
+ irq = 11;
+ break;
+ case 5:
+ irq = 9;
+ break;
+ }
+ } else {
+ irq = LOONGSON_IRQ_BASE + 25 + pin;
+ }
+ return irq;
+
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
+
+static void loongson2e_nec_fixup(struct pci_dev *pdev)
+{
+ unsigned int val;
+
+ /* Configures port 1, 2, 3, 4 to be validate*/
+ pci_read_config_dword(pdev, 0xe0, &val);
+ pci_write_config_dword(pdev, 0xe0, (val & ~7) | 0x4);
+
+ /* System clock is 48-MHz Oscillator. */
+ pci_write_config_dword(pdev, 0xe4, 1 << 5);
+}
+
+static void loongson2e_686b_func0_fixup(struct pci_dev *pdev)
+{
+ unsigned char c;
+
+ sb_slot = PCI_SLOT(pdev->devfn);
+
+ printk(KERN_INFO "via686b fix: ISA bridge\n");
+
+ /* Enable I/O Recovery time */
+ pci_write_config_byte(pdev, 0x40, 0x08);
+
+ /* Enable ISA refresh */
+ pci_write_config_byte(pdev, 0x41, 0x01);
+
+ /* disable ISA line buffer */
+ pci_write_config_byte(pdev, 0x45, 0x00);
+
+ /* Gate INTR, and flush line buffer */
+ pci_write_config_byte(pdev, 0x46, 0xe0);
+
+ /* Disable PCI Delay Transaction, Enable EISA ports 4D0/4D1. */
+ /* pci_write_config_byte(pdev, 0x47, 0x20); */
+
+ /*
+ * enable PCI Delay Transaction, Enable EISA ports 4D0/4D1.
+ * enable time-out timer
+ */
+ pci_write_config_byte(pdev, 0x47, 0xe6);
+
+ /*
+ * enable level trigger on pci irqs: 9,10,11,13
+ * important! without this PCI interrupts won't work
+ */
+ outb(0x2e, 0x4d1);
+
+ /* 512 K PCI Decode */
+ pci_write_config_byte(pdev, 0x48, 0x01);
+
+ /* Wait for PGNT before grant to ISA Master/DMA */
+ pci_write_config_byte(pdev, 0x4a, 0x84);
+
+ /*
+ * Plug'n'Play
+ *
+ * Parallel DRQ 3, Floppy DRQ 2 (default)
+ */
+ pci_write_config_byte(pdev, 0x50, 0x0e);
+
+ /*
+ * IRQ Routing for Floppy and Parallel port
+ *
+ * IRQ 6 for floppy, IRQ 7 for parallel port
+ */
+ pci_write_config_byte(pdev, 0x51, 0x76);
+
+ /* IRQ Routing for serial ports (take IRQ 3 and 4) */
+ pci_write_config_byte(pdev, 0x52, 0x34);
+
+ /* All IRQ's level triggered. */
+ pci_write_config_byte(pdev, 0x54, 0x00);
+
+ /* route PIRQA-D irq */
+ pci_write_config_byte(pdev, 0x55, 0x90); /* bit 7-4, PIRQA */
+ pci_write_config_byte(pdev, 0x56, 0xba); /* bit 7-4, PIRQC; */
+ /* 3-0, PIRQB */
+ pci_write_config_byte(pdev, 0x57, 0xd0); /* bit 7-4, PIRQD */
+
+ /* enable function 5/6, audio/modem */
+ pci_read_config_byte(pdev, 0x85, &c);
+ c &= ~(0x3 << 2);
+ pci_write_config_byte(pdev, 0x85, c);
+
+ printk(KERN_INFO"via686b fix: ISA bridge done\n");
+}
+
+static void loongson2e_686b_func1_fixup(struct pci_dev *pdev)
+{
+ printk(KERN_INFO"via686b fix: IDE\n");
+
+ /* Modify IDE controller setup */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 48);
+ pci_write_config_byte(pdev, PCI_COMMAND,
+ PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER);
+ pci_write_config_byte(pdev, 0x40, 0x0b);
+ /* legacy mode */
+ pci_write_config_byte(pdev, 0x42, 0x09);
+
+#if 1/* play safe, otherwise we may see notebook's usb keyboard lockup */
+ /* disable read prefetch/write post buffers */
+ pci_write_config_byte(pdev, 0x41, 0x02);
+
+ /* use 3/4 as fifo thresh hold */
+ pci_write_config_byte(pdev, 0x43, 0x0a);
+ pci_write_config_byte(pdev, 0x44, 0x00);
+
+ pci_write_config_byte(pdev, 0x45, 0x00);
+#else
+ pci_write_config_byte(pdev, 0x41, 0xc2);
+ pci_write_config_byte(pdev, 0x43, 0x35);
+ pci_write_config_byte(pdev, 0x44, 0x1c);
+
+ pci_write_config_byte(pdev, 0x45, 0x10);
+#endif
+
+ printk(KERN_INFO"via686b fix: IDE done\n");
+}
+
+static void loongson2e_686b_func2_fixup(struct pci_dev *pdev)
+{
+ /* irq routing */
+ pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 10);
+}
+
+static void loongson2e_686b_func3_fixup(struct pci_dev *pdev)
+{
+ /* irq routing */
+ pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 11);
+}
+
+static void loongson2e_686b_func5_fixup(struct pci_dev *pdev)
+{
+ unsigned int val;
+ unsigned char c;
+
+ /* enable IO */
+ pci_write_config_byte(pdev, PCI_COMMAND,
+ PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER);
+ pci_read_config_dword(pdev, 0x4, &val);
+ pci_write_config_dword(pdev, 0x4, val | 1);
+
+ /* route ac97 IRQ */
+ pci_write_config_byte(pdev, 0x3c, 9);
+
+ pci_read_config_byte(pdev, 0x8, &c);
+
+ /* link control: enable link & SGD PCM output */
+ pci_write_config_byte(pdev, 0x41, 0xcc);
+
+ /* disable game port, FM, midi, sb, enable write to reg2c-2f */
+ pci_write_config_byte(pdev, 0x42, 0x20);
+
+ /* we are using Avance logic codec */
+ pci_write_config_word(pdev, 0x2c, 0x1005);
+ pci_write_config_word(pdev, 0x2e, 0x4710);
+ pci_read_config_dword(pdev, 0x2c, &val);
+
+ pci_write_config_byte(pdev, 0x42, 0x0);
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686,
+ loongson2e_686b_func0_fixup);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1,
+ loongson2e_686b_func1_fixup);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_2,
+ loongson2e_686b_func2_fixup);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3,
+ loongson2e_686b_func3_fixup);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5,
+ loongson2e_686b_func5_fixup);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
+ loongson2e_nec_fixup);
diff --git a/arch/mips/pci/fixup-ip32.c b/arch/mips/pci/fixup-ip32.c
new file mode 100644
index 000000000..d091ffc53
--- /dev/null
+++ b/arch/mips/pci/fixup-ip32.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <asm/ip32/ip32_ints.h>
+/*
+ * O2 has up to 5 PCI devices connected into the MACE bridge. The device
+ * map looks like this:
+ *
+ * 0 aic7xxx 0
+ * 1 aic7xxx 1
+ * 2 expansion slot
+ * 3 N/C
+ * 4 N/C
+ */
+
+#define SCSI0 MACEPCI_SCSI0_IRQ
+#define SCSI1 MACEPCI_SCSI1_IRQ
+#define INTA0 MACEPCI_SLOT0_IRQ
+#define INTA1 MACEPCI_SLOT1_IRQ
+#define INTA2 MACEPCI_SLOT2_IRQ
+#define INTB MACEPCI_SHARED0_IRQ
+#define INTC MACEPCI_SHARED1_IRQ
+#define INTD MACEPCI_SHARED2_IRQ
+static char irq_tab_mace[][5] = {
+ /* Dummy INT#A INT#B INT#C INT#D */
+ {0, 0, 0, 0, 0}, /* This is placeholder row - never used */
+ {0, SCSI0, SCSI0, SCSI0, SCSI0},
+ {0, SCSI1, SCSI1, SCSI1, SCSI1},
+ {0, INTA0, INTB, INTC, INTD},
+ {0, INTA1, INTC, INTD, INTB},
+ {0, INTA2, INTD, INTB, INTC},
+};
+
+
+/*
+ * Given a PCI slot number (a la PCI_SLOT(...)) and the interrupt pin of
+ * the device (1-4 => A-D), tell what irq to use. Note that we don't
+ * in theory have slots 4 and 5, and we never normally use the shared
+ * irqs. I suppose a device without a pin A will thank us for doing it
+ * right if there exists such a broken piece of crap.
+ */
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return irq_tab_mace[slot][pin];
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
diff --git a/arch/mips/pci/fixup-jmr3927.c b/arch/mips/pci/fixup-jmr3927.c
new file mode 100644
index 000000000..d3102eeea
--- /dev/null
+++ b/arch/mips/pci/fixup-jmr3927.c
@@ -0,0 +1,79 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ * Board specific pci fixups.
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ppopov@mvista.com or source@mvista.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/types.h>
+#include <asm/txx9/pci.h>
+#include <asm/txx9/jmr3927.h>
+
+int jmr3927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ unsigned char irq = pin;
+
+ /* IRQ rotation (PICMG) */
+ irq--; /* 0-3 */
+ if (slot == TX3927_PCIC_IDSEL_AD_TO_SLOT(23)) {
+ /* PCI CardSlot (IDSEL=A23, DevNu=12) */
+ /* PCIA => PCIC (IDSEL=A23) */
+ /* NOTE: JMR3927 JP1 must be set to OPEN */
+ irq = (irq + 2) % 4;
+ } else if (slot == TX3927_PCIC_IDSEL_AD_TO_SLOT(22)) {
+ /* PCI CardSlot (IDSEL=A22, DevNu=11) */
+ /* PCIA => PCIA (IDSEL=A22) */
+ /* NOTE: JMR3927 JP1 must be set to OPEN */
+ irq = (irq + 0) % 4;
+ } else {
+ /* PCI Backplane */
+ if (txx9_pci_option & TXX9_PCI_OPT_PICMG)
+ irq = (irq + 33 - slot) % 4;
+ else
+ irq = (irq + 3 + slot) % 4;
+ }
+ irq++; /* 1-4 */
+
+ switch (irq) {
+ case 1:
+ irq = JMR3927_IRQ_IOC_PCIA;
+ break;
+ case 2:
+ irq = JMR3927_IRQ_IOC_PCIB;
+ break;
+ case 3:
+ irq = JMR3927_IRQ_IOC_PCIC;
+ break;
+ case 4:
+ irq = JMR3927_IRQ_IOC_PCID;
+ break;
+ }
+
+ /* Check OnBoard Ethernet (IDSEL=A24, DevNu=13) */
+ if (dev->bus->parent == NULL &&
+ slot == TX3927_PCIC_IDSEL_AD_TO_SLOT(24))
+ irq = JMR3927_IRQ_ETHER0;
+ return irq;
+}
diff --git a/arch/mips/pci/fixup-lantiq.c b/arch/mips/pci/fixup-lantiq.c
new file mode 100644
index 000000000..105569c1b
--- /dev/null
+++ b/arch/mips/pci/fixup-lantiq.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+
+int (*ltq_pci_plat_arch_init)(struct pci_dev *dev) = NULL;
+int (*ltq_pci_plat_dev_init)(struct pci_dev *dev) = NULL;
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ if (ltq_pci_plat_arch_init)
+ return ltq_pci_plat_arch_init(dev);
+
+ if (ltq_pci_plat_dev_init)
+ return ltq_pci_plat_dev_init(dev);
+
+ return 0;
+}
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return of_irq_parse_and_map_pci(dev, slot, pin);
+}
diff --git a/arch/mips/pci/fixup-lemote2f.c b/arch/mips/pci/fixup-lemote2f.c
new file mode 100644
index 000000000..632ff2daa
--- /dev/null
+++ b/arch/mips/pci/fixup-lemote2f.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2008 Lemote Technology
+ * Copyright (C) 2004 ICT CAS
+ * Author: Li xiaoyu, lixy@ict.ac.cn
+ *
+ * Copyright (C) 2007 Lemote, Inc.
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ */
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <loongson.h>
+#include <cs5536/cs5536.h>
+#include <cs5536/cs5536_pci.h>
+
+/* PCI interrupt pins
+ *
+ * These should not be changed, or you should consider loongson2f interrupt
+ * register and your pci card dispatch
+ */
+
+#define PCIA 4
+#define PCIB 5
+#define PCIC 6
+#define PCID 7
+
+/* all the pci device has the PCIA pin, check the datasheet. */
+static char irq_tab[][5] = {
+ /* INTA INTB INTC INTD */
+ {0, 0, 0, 0, 0}, /* 11: Unused */
+ {0, 0, 0, 0, 0}, /* 12: Unused */
+ {0, 0, 0, 0, 0}, /* 13: Unused */
+ {0, 0, 0, 0, 0}, /* 14: Unused */
+ {0, 0, 0, 0, 0}, /* 15: Unused */
+ {0, 0, 0, 0, 0}, /* 16: Unused */
+ {0, PCIA, 0, 0, 0}, /* 17: RTL8110-0 */
+ {0, PCIB, 0, 0, 0}, /* 18: RTL8110-1 */
+ {0, PCIC, 0, 0, 0}, /* 19: SiI3114 */
+ {0, PCID, 0, 0, 0}, /* 20: 3-ports nec usb */
+ {0, PCIA, PCIB, PCIC, PCID}, /* 21: PCI-SLOT */
+ {0, 0, 0, 0, 0}, /* 22: Unused */
+ {0, 0, 0, 0, 0}, /* 23: Unused */
+ {0, 0, 0, 0, 0}, /* 24: Unused */
+ {0, 0, 0, 0, 0}, /* 25: Unused */
+ {0, 0, 0, 0, 0}, /* 26: Unused */
+ {0, 0, 0, 0, 0}, /* 27: Unused */
+};
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ int virq;
+
+ if ((PCI_SLOT(dev->devfn) != PCI_IDSEL_CS5536)
+ && (PCI_SLOT(dev->devfn) < 32)) {
+ virq = irq_tab[slot][pin];
+ printk(KERN_INFO "slot: %d, pin: %d, irq: %d\n", slot, pin,
+ virq + LOONGSON_IRQ_BASE);
+ if (virq != 0)
+ return LOONGSON_IRQ_BASE + virq;
+ else
+ return 0;
+ } else if (PCI_SLOT(dev->devfn) == PCI_IDSEL_CS5536) { /* cs5536 */
+ switch (PCI_FUNC(dev->devfn)) {
+ case 2:
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
+ CS5536_IDE_INTR);
+ return CS5536_IDE_INTR; /* for IDE */
+ case 3:
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
+ CS5536_ACC_INTR);
+ return CS5536_ACC_INTR; /* for AUDIO */
+ case 4: /* for OHCI */
+ case 5: /* for EHCI */
+ case 6: /* for UDC */
+ case 7: /* for OTG */
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
+ CS5536_USB_INTR);
+ return CS5536_USB_INTR;
+ }
+ return dev->irq;
+ } else {
+ printk(KERN_INFO " strange pci slot number.\n");
+ return 0;
+ }
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
+
+/* CS5536 SPEC. fixup */
+static void loongson_cs5536_isa_fixup(struct pci_dev *pdev)
+{
+ /* the uart1 and uart2 interrupt in PIC is enabled as default */
+ pci_write_config_dword(pdev, PCI_UART1_INT_REG, 1);
+ pci_write_config_dword(pdev, PCI_UART2_INT_REG, 1);
+}
+
+static void loongson_cs5536_ide_fixup(struct pci_dev *pdev)
+{
+ /* setting the mutex pin as IDE function */
+ pci_write_config_dword(pdev, PCI_IDE_CFG_REG,
+ CS5536_IDE_FLASH_SIGNATURE);
+}
+
+static void loongson_cs5536_acc_fixup(struct pci_dev *pdev)
+{
+ /* enable the AUDIO interrupt in PIC */
+ pci_write_config_dword(pdev, PCI_ACC_INT_REG, 1);
+
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xc0);
+}
+
+static void loongson_cs5536_ohci_fixup(struct pci_dev *pdev)
+{
+ /* enable the OHCI interrupt in PIC */
+ /* THE OHCI, EHCI, UDC, OTG are shared with interrupt in PIC */
+ pci_write_config_dword(pdev, PCI_OHCI_INT_REG, 1);
+}
+
+static void loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
+{
+ u32 hi, lo;
+
+ /* Serial short detect enable */
+ _rdmsr(USB_MSR_REG(USB_CONFIG), &hi, &lo);
+ _wrmsr(USB_MSR_REG(USB_CONFIG), (1 << 1) | (1 << 3), lo);
+
+ /* setting the USB2.0 micro frame length */
+ pci_write_config_dword(pdev, PCI_EHCI_FLADJ_REG, 0x2000);
+}
+
+static void loongson_nec_fixup(struct pci_dev *pdev)
+{
+ unsigned int val;
+
+ pci_read_config_dword(pdev, 0xe0, &val);
+ /* Only 2 port be used */
+ pci_write_config_dword(pdev, 0xe0, (val & ~3) | 0x2);
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA,
+ loongson_cs5536_isa_fixup);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_OHC,
+ loongson_cs5536_ohci_fixup);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_EHC,
+ loongson_cs5536_ehci_fixup);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_AUDIO,
+ loongson_cs5536_acc_fixup);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE,
+ loongson_cs5536_ide_fixup);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
+ loongson_nec_fixup);
diff --git a/arch/mips/pci/fixup-malta.c b/arch/mips/pci/fixup-malta.c
new file mode 100644
index 000000000..8131e0ffe
--- /dev/null
+++ b/arch/mips/pci/fixup-malta.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <asm/mips-boards/piix4.h>
+
+/* PCI interrupt pins */
+#define PCIA 1
+#define PCIB 2
+#define PCIC 3
+#define PCID 4
+
+/* This table is filled in by interrogating the PIIX4 chip */
+static char pci_irq[5] = {
+};
+
+static char irq_tab[][5] = {
+ /* INTA INTB INTC INTD */
+ {0, 0, 0, 0, 0 }, /* 0: GT64120 PCI bridge */
+ {0, 0, 0, 0, 0 }, /* 1: Unused */
+ {0, 0, 0, 0, 0 }, /* 2: Unused */
+ {0, 0, 0, 0, 0 }, /* 3: Unused */
+ {0, 0, 0, 0, 0 }, /* 4: Unused */
+ {0, 0, 0, 0, 0 }, /* 5: Unused */
+ {0, 0, 0, 0, 0 }, /* 6: Unused */
+ {0, 0, 0, 0, 0 }, /* 7: Unused */
+ {0, 0, 0, 0, 0 }, /* 8: Unused */
+ {0, 0, 0, 0, 0 }, /* 9: Unused */
+ {0, 0, 0, 0, PCID }, /* 10: PIIX4 USB */
+ {0, PCIB, 0, 0, 0 }, /* 11: AMD 79C973 Ethernet */
+ {0, PCIC, 0, 0, 0 }, /* 12: Crystal 4281 Sound */
+ {0, 0, 0, 0, 0 }, /* 13: Unused */
+ {0, 0, 0, 0, 0 }, /* 14: Unused */
+ {0, 0, 0, 0, 0 }, /* 15: Unused */
+ {0, 0, 0, 0, 0 }, /* 16: Unused */
+ {0, 0, 0, 0, 0 }, /* 17: Bonito/SOC-it PCI Bridge*/
+ {0, PCIA, PCIB, PCIC, PCID }, /* 18: PCI Slot 1 */
+ {0, PCIB, PCIC, PCID, PCIA }, /* 19: PCI Slot 2 */
+ {0, PCIC, PCID, PCIA, PCIB }, /* 20: PCI Slot 3 */
+ {0, PCID, PCIA, PCIB, PCIC } /* 21: PCI Slot 4 */
+};
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ int virq;
+ virq = irq_tab[slot][pin];
+ return pci_irq[virq];
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
+
+static void malta_piix_func3_base_fixup(struct pci_dev *dev)
+{
+ /* Set a sane PM I/O base address */
+ pci_write_config_word(dev, PIIX4_FUNC3_PMBA, 0x1000);
+
+ /* Enable access to the PM I/O region */
+ pci_write_config_byte(dev, PIIX4_FUNC3_PMREGMISC,
+ PIIX4_FUNC3_PMREGMISC_EN);
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3,
+ malta_piix_func3_base_fixup);
+
+static void malta_piix_func0_fixup(struct pci_dev *pdev)
+{
+ unsigned char reg_val;
+ u32 reg_val32;
+ u16 reg_val16;
+ /* PIIX PIRQC[A:D] irq mappings */
+ static int piixirqmap[PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MAX] = {
+ 0, 0, 0, 3,
+ 4, 5, 6, 7,
+ 0, 9, 10, 11,
+ 12, 0, 14, 15
+ };
+ int i;
+
+ /* Interrogate PIIX4 to get PCI IRQ mapping */
+ for (i = 0; i <= 3; i++) {
+ pci_read_config_byte(pdev, PIIX4_FUNC0_PIRQRC+i, &reg_val);
+ if (reg_val & PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_DISABLE)
+ pci_irq[PCIA+i] = 0; /* Disabled */
+ else
+ pci_irq[PCIA+i] = piixirqmap[reg_val &
+ PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MASK];
+ }
+
+ /* Done by YAMON 2.00 onwards */
+ if (PCI_SLOT(pdev->devfn) == 10) {
+ /*
+ * Set top of main memory accessible by ISA or DMA
+ * devices to 16 Mb.
+ */
+ pci_read_config_byte(pdev, PIIX4_FUNC0_TOM, &reg_val);
+ pci_write_config_byte(pdev, PIIX4_FUNC0_TOM, reg_val |
+ PIIX4_FUNC0_TOM_TOP_OF_MEMORY_MASK);
+ }
+
+ /* Mux SERIRQ to its pin */
+ pci_read_config_dword(pdev, PIIX4_FUNC0_GENCFG, &reg_val32);
+ pci_write_config_dword(pdev, PIIX4_FUNC0_GENCFG,
+ reg_val32 | PIIX4_FUNC0_GENCFG_SERIRQ);
+
+ /* Enable SERIRQ */
+ pci_read_config_byte(pdev, PIIX4_FUNC0_SERIRQC, &reg_val);
+ reg_val |= PIIX4_FUNC0_SERIRQC_EN | PIIX4_FUNC0_SERIRQC_CONT;
+ pci_write_config_byte(pdev, PIIX4_FUNC0_SERIRQC, reg_val);
+
+ /* Enable response to special cycles */
+ pci_read_config_word(pdev, PCI_COMMAND, &reg_val16);
+ pci_write_config_word(pdev, PCI_COMMAND,
+ reg_val16 | PCI_COMMAND_SPECIAL);
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
+ malta_piix_func0_fixup);
+
+static void malta_piix_func1_fixup(struct pci_dev *pdev)
+{
+ unsigned char reg_val;
+
+ /* Done by YAMON 2.02 onwards */
+ if (PCI_SLOT(pdev->devfn) == 10) {
+ /*
+ * IDE Decode enable.
+ */
+ pci_read_config_byte(pdev, PIIX4_FUNC1_IDETIM_PRIMARY_HI,
+ &reg_val);
+ pci_write_config_byte(pdev, PIIX4_FUNC1_IDETIM_PRIMARY_HI,
+ reg_val|PIIX4_FUNC1_IDETIM_PRIMARY_HI_IDE_DECODE_EN);
+ pci_read_config_byte(pdev, PIIX4_FUNC1_IDETIM_SECONDARY_HI,
+ &reg_val);
+ pci_write_config_byte(pdev, PIIX4_FUNC1_IDETIM_SECONDARY_HI,
+ reg_val|PIIX4_FUNC1_IDETIM_SECONDARY_HI_IDE_DECODE_EN);
+ }
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB,
+ malta_piix_func1_fixup);
+
+/* Enable PCI 2.1 compatibility in PIIX4 */
+static void quirk_dlcsetup(struct pci_dev *dev)
+{
+ u8 odlc, ndlc;
+
+ (void) pci_read_config_byte(dev, PIIX4_FUNC0_DLC, &odlc);
+ /* Enable passive releases and delayed transaction */
+ ndlc = odlc | PIIX4_FUNC0_DLC_USBPR_EN |
+ PIIX4_FUNC0_DLC_PASSIVE_RELEASE_EN |
+ PIIX4_FUNC0_DLC_DELAYED_TRANSACTION_EN;
+ (void) pci_write_config_byte(dev, PIIX4_FUNC0_DLC, ndlc);
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
+ quirk_dlcsetup);
diff --git a/arch/mips/pci/fixup-mpc30x.c b/arch/mips/pci/fixup-mpc30x.c
new file mode 100644
index 000000000..27c75f268
--- /dev/null
+++ b/arch/mips/pci/fixup-mpc30x.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * fixup-mpc30x.c, The Victor MP-C303/304 specific PCI fixups.
+ *
+ * Copyright (C) 2002,2004 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/vr41xx/mpc30x.h>
+
+static const int internal_func_irqs[] = {
+ VRC4173_CASCADE_IRQ,
+ VRC4173_AC97_IRQ,
+ VRC4173_USB_IRQ,
+};
+
+static const int irq_tab_mpc30x[] = {
+ [12] = VRC4173_PCMCIA1_IRQ,
+ [13] = VRC4173_PCMCIA2_IRQ,
+ [29] = MQ200_IRQ,
+};
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ if (slot == 30)
+ return internal_func_irqs[PCI_FUNC(dev->devfn)];
+
+ return irq_tab_mpc30x[slot];
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
diff --git a/arch/mips/pci/fixup-rbtx4927.c b/arch/mips/pci/fixup-rbtx4927.c
new file mode 100644
index 000000000..d6aaed1d6
--- /dev/null
+++ b/arch/mips/pci/fixup-rbtx4927.c
@@ -0,0 +1,73 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ * Board specific pci fixups for the Toshiba rbtx4927
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ppopov@mvista.com or source@mvista.com
+ *
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * Copyright (C) 2004 MontaVista Software Inc.
+ * Author: Manish Lachwani (mlachwani@mvista.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/types.h>
+#include <asm/txx9/pci.h>
+#include <asm/txx9/rbtx4927.h>
+
+int rbtx4927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ unsigned char irq = pin;
+
+ /* IRQ rotation */
+ irq--; /* 0-3 */
+ if (slot == TX4927_PCIC_IDSEL_AD_TO_SLOT(23)) {
+ /* PCI CardSlot (IDSEL=A23) */
+ /* PCIA => PCIA */
+ irq = (irq + 0 + slot) % 4;
+ } else {
+ /* PCI Backplane */
+ if (txx9_pci_option & TXX9_PCI_OPT_PICMG)
+ irq = (irq + 33 - slot) % 4;
+ else
+ irq = (irq + 3 + slot) % 4;
+ }
+ irq++; /* 1-4 */
+
+ switch (irq) {
+ case 1:
+ irq = RBTX4927_IRQ_IOC_PCIA;
+ break;
+ case 2:
+ irq = RBTX4927_IRQ_IOC_PCIB;
+ break;
+ case 3:
+ irq = RBTX4927_IRQ_IOC_PCIC;
+ break;
+ case 4:
+ irq = RBTX4927_IRQ_IOC_PCID;
+ break;
+ }
+ return irq;
+}
diff --git a/arch/mips/pci/fixup-rbtx4938.c b/arch/mips/pci/fixup-rbtx4938.c
new file mode 100644
index 000000000..ff22a22db
--- /dev/null
+++ b/arch/mips/pci/fixup-rbtx4938.c
@@ -0,0 +1,53 @@
+/*
+ * Toshiba rbtx4938 pci routines
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
+ */
+#include <linux/types.h>
+#include <asm/txx9/pci.h>
+#include <asm/txx9/rbtx4938.h>
+
+int rbtx4938_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ int irq = tx4938_pcic1_map_irq(dev, slot);
+
+ if (irq >= 0)
+ return irq;
+ irq = pin;
+ /* IRQ rotation */
+ irq--; /* 0-3 */
+ if (slot == TX4927_PCIC_IDSEL_AD_TO_SLOT(23)) {
+ /* PCI CardSlot (IDSEL=A23) */
+ /* PCIA => PCIA (IDSEL=A23) */
+ irq = (irq + 0 + slot) % 4;
+ } else {
+ /* PCI Backplane */
+ if (txx9_pci_option & TXX9_PCI_OPT_PICMG)
+ irq = (irq + 33 - slot) % 4;
+ else
+ irq = (irq + 3 + slot) % 4;
+ }
+ irq++; /* 1-4 */
+
+ switch (irq) {
+ case 1:
+ irq = RBTX4938_IRQ_IOC_PCIA;
+ break;
+ case 2:
+ irq = RBTX4938_IRQ_IOC_PCIB;
+ break;
+ case 3:
+ irq = RBTX4938_IRQ_IOC_PCIC;
+ break;
+ case 4:
+ irq = RBTX4938_IRQ_IOC_PCID;
+ break;
+ }
+ return irq;
+}
diff --git a/arch/mips/pci/fixup-rc32434.c b/arch/mips/pci/fixup-rc32434.c
new file mode 100644
index 000000000..7fcafd5da
--- /dev/null
+++ b/arch/mips/pci/fixup-rc32434.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * stevel@mvista.com or source@mvista.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+
+#include <asm/mach-rc32434/rc32434.h>
+#include <asm/mach-rc32434/irq.h>
+
+static int irq_map[2][12] = {
+ {0, 0, 2, 3, 2, 3, 0, 0, 0, 0, 0, 1},
+ {0, 0, 1, 3, 0, 2, 1, 3, 0, 2, 1, 3}
+};
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ int irq = 0;
+
+ if (dev->bus->number < 2 && PCI_SLOT(dev->devfn) < 12)
+ irq = irq_map[dev->bus->number][PCI_SLOT(dev->devfn)];
+
+ return irq + GROUP4_IRQ_BASE + 4;
+}
+
+static void rc32434_pci_early_fixup(struct pci_dev *dev)
+{
+ if (PCI_SLOT(dev->devfn) == 6 && dev->bus->number == 0) {
+ /* disable prefetched memory range */
+ pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, 0);
+ pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, 0x10);
+
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 4);
+ }
+}
+
+/*
+ * The fixup applies to both the IDT and VIA devices present on the board
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, rc32434_pci_early_fixup);
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
diff --git a/arch/mips/pci/fixup-sb1250.c b/arch/mips/pci/fixup-sb1250.c
new file mode 100644
index 000000000..40efc990c
--- /dev/null
+++ b/arch/mips/pci/fixup-sb1250.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2004, 2006 MIPS Technologies, Inc. All rights reserved.
+ * Author: Maciej W. Rozycki <macro@mips.com>
+ * Copyright (C) 2018 Maciej W. Rozycki
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+
+/*
+ * Set the BCM1250, etc. PCI host bridge's TRDY timeout
+ * to the finite max.
+ */
+static void quirk_sb1250_pci(struct pci_dev *dev)
+{
+ pci_write_config_byte(dev, 0x40, 0xff);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
+ quirk_sb1250_pci);
+
+/*
+ * The BCM1250, etc. PCI host bridge does not support DAC on its 32-bit
+ * bus, so we set the bus's DMA limit accordingly. However the HT link
+ * down the artificial PCI-HT bridge supports 40-bit addressing and the
+ * SP1011 HT-PCI bridge downstream supports both DAC and a 64-bit bus
+ * width, so we record the PCI-HT bridge's secondary and subordinate bus
+ * numbers and do not set the limit for devices present in the inclusive
+ * range of those.
+ */
+struct sb1250_bus_dma_limit_exclude {
+ bool set;
+ unsigned char start;
+ unsigned char end;
+};
+
+static int sb1250_bus_dma_limit(struct pci_dev *dev, void *data)
+{
+ struct sb1250_bus_dma_limit_exclude *exclude = data;
+ bool exclude_this;
+ bool ht_bridge;
+
+ exclude_this = exclude->set && (dev->bus->number >= exclude->start &&
+ dev->bus->number <= exclude->end);
+ ht_bridge = !exclude->set && (dev->vendor == PCI_VENDOR_ID_SIBYTE &&
+ dev->device == PCI_DEVICE_ID_BCM1250_HT);
+
+ if (exclude_this) {
+ dev_dbg(&dev->dev, "not disabling DAC for device");
+ } else if (ht_bridge) {
+ exclude->start = dev->subordinate->number;
+ exclude->end = pci_bus_max_busnr(dev->subordinate);
+ exclude->set = true;
+ dev_dbg(&dev->dev, "not disabling DAC for [bus %02x-%02x]",
+ exclude->start, exclude->end);
+ } else {
+ dev_dbg(&dev->dev, "disabling DAC for device");
+ dev->dev.bus_dma_limit = DMA_BIT_MASK(32);
+ }
+
+ return 0;
+}
+
+static void quirk_sb1250_pci_dac(struct pci_dev *dev)
+{
+ struct sb1250_bus_dma_limit_exclude exclude = { .set = false };
+
+ pci_walk_bus(dev->bus, sb1250_bus_dma_limit, &exclude);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
+ quirk_sb1250_pci_dac);
+
+/*
+ * The BCM1250, etc. PCI/HT bridge reports as a host bridge.
+ */
+static void quirk_sb1250_ht(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_HT,
+ quirk_sb1250_ht);
+
+/*
+ * Set the SP1011 HT/PCI bridge's TRDY timeout to the finite max.
+ */
+static void quirk_sp1011(struct pci_dev *dev)
+{
+ pci_write_config_byte(dev, 0x64, 0xff);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIPACKETS, PCI_DEVICE_ID_SP1011,
+ quirk_sp1011);
diff --git a/arch/mips/pci/fixup-sni.c b/arch/mips/pci/fixup-sni.c
new file mode 100644
index 000000000..de012f8bd
--- /dev/null
+++ b/arch/mips/pci/fixup-sni.c
@@ -0,0 +1,169 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SNI specific PCI support for RM200/RM300.
+ *
+ * Copyright (C) 1997 - 2000, 2003, 04 Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/mipsregs.h>
+#include <asm/sni.h>
+
+#include <irq.h>
+
+/*
+ * PCIMT Shortcuts ...
+ */
+#define SCSI PCIMT_IRQ_SCSI
+#define ETH PCIMT_IRQ_ETHERNET
+#define INTA PCIMT_IRQ_INTA
+#define INTB PCIMT_IRQ_INTB
+#define INTC PCIMT_IRQ_INTC
+#define INTD PCIMT_IRQ_INTD
+
+/*
+ * Device 0: PCI EISA Bridge (directly routed)
+ * Device 1: NCR53c810 SCSI (directly routed)
+ * Device 2: PCnet32 Ethernet (directly routed)
+ * Device 3: VGA (routed to INTB)
+ * Device 4: Unused
+ * Device 5: Slot 2
+ * Device 6: Slot 3
+ * Device 7: Slot 4
+ *
+ * Documentation says the VGA is device 5 and device 3 is unused but that
+ * seem to be a documentation error. At least on my RM200C the Cirrus
+ * Logic CL-GD5434 VGA is device 3.
+ */
+static char irq_tab_rm200[8][5] = {
+ /* INTA INTB INTC INTD */
+ { 0, 0, 0, 0, 0 }, /* EISA bridge */
+ { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */
+ { ETH, ETH, ETH, ETH, ETH }, /* Ethernet */
+ { INTB, INTB, INTB, INTB, INTB }, /* VGA */
+ { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, INTB, INTC, INTD, INTA }, /* Slot 2 */
+ { 0, INTC, INTD, INTA, INTB }, /* Slot 3 */
+ { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */
+};
+
+/*
+ * In Revision D of the RM300 Device 2 has become a normal purpose Slot 1
+ *
+ * The VGA card is optional for RM300 systems.
+ */
+static char irq_tab_rm300d[8][5] = {
+ /* INTA INTB INTC INTD */
+ { 0, 0, 0, 0, 0 }, /* EISA bridge */
+ { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */
+ { 0, INTC, INTD, INTA, INTB }, /* Slot 1 */
+ { INTB, INTB, INTB, INTB, INTB }, /* VGA */
+ { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, INTB, INTC, INTD, INTA }, /* Slot 2 */
+ { 0, INTC, INTD, INTA, INTB }, /* Slot 3 */
+ { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */
+};
+
+static char irq_tab_rm300e[5][5] = {
+ /* INTA INTB INTC INTD */
+ { 0, 0, 0, 0, 0 }, /* HOST bridge */
+ { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */
+ { 0, INTC, INTD, INTA, INTB }, /* Bridge/i960 */
+ { 0, INTD, INTA, INTB, INTC }, /* Slot 1 */
+ { 0, INTA, INTB, INTC, INTD }, /* Slot 2 */
+};
+#undef SCSI
+#undef ETH
+#undef INTA
+#undef INTB
+#undef INTC
+#undef INTD
+
+
+/*
+ * PCIT Shortcuts ...
+ */
+#define SCSI0 PCIT_IRQ_SCSI0
+#define SCSI1 PCIT_IRQ_SCSI1
+#define ETH PCIT_IRQ_ETHERNET
+#define INTA PCIT_IRQ_INTA
+#define INTB PCIT_IRQ_INTB
+#define INTC PCIT_IRQ_INTC
+#define INTD PCIT_IRQ_INTD
+
+static char irq_tab_pcit[13][5] = {
+ /* INTA INTB INTC INTD */
+ { 0, 0, 0, 0, 0 }, /* HOST bridge */
+ { SCSI0, SCSI0, SCSI0, SCSI0, SCSI0 }, /* SCSI */
+ { SCSI1, SCSI1, SCSI1, SCSI1, SCSI1 }, /* SCSI */
+ { ETH, ETH, ETH, ETH, ETH }, /* Ethernet */
+ { 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */
+ { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, INTA, INTB, INTC, INTD }, /* Slot 1 */
+ { 0, INTB, INTC, INTD, INTA }, /* Slot 2 */
+ { 0, INTC, INTD, INTA, INTB }, /* Slot 3 */
+ { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */
+ { 0, INTA, INTB, INTC, INTD }, /* Slot 5 */
+};
+
+static char irq_tab_pcit_cplus[13][5] = {
+ /* INTA INTB INTC INTD */
+ { 0, 0, 0, 0, 0 }, /* HOST bridge */
+ { 0, INTB, INTC, INTD, INTA }, /* PCI Slot 9 */
+ { 0, 0, 0, 0, 0 }, /* PCI-EISA */
+ { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */
+ { 0, INTB, INTC, INTD, INTA }, /* fixup */
+};
+
+static inline int is_rm300_revd(void)
+{
+ unsigned char csmsr = *(volatile unsigned char *)PCIMT_CSMSR;
+
+ return (csmsr & 0xa0) == 0x20;
+}
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ switch (sni_brd_type) {
+ case SNI_BRD_PCI_TOWER_CPLUS:
+ if (slot == 4) {
+ /*
+ * SNI messed up interrupt wiring for onboard
+ * PCI bus 1; we need to fix this up here
+ */
+ while (dev && dev->bus->number != 1)
+ dev = dev->bus->self;
+ if (dev && dev->devfn >= PCI_DEVFN(4, 0))
+ slot = 5;
+ }
+ return irq_tab_pcit_cplus[slot][pin];
+ case SNI_BRD_PCI_TOWER:
+ return irq_tab_pcit[slot][pin];
+
+ case SNI_BRD_PCI_MTOWER:
+ if (is_rm300_revd())
+ return irq_tab_rm300d[slot][pin];
+ fallthrough;
+ case SNI_BRD_PCI_DESKTOP:
+ return irq_tab_rm200[slot][pin];
+
+ case SNI_BRD_PCI_MTOWER_CPLUS:
+ return irq_tab_rm300e[slot][pin];
+ }
+
+ return 0;
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
diff --git a/arch/mips/pci/fixup-tb0219.c b/arch/mips/pci/fixup-tb0219.c
new file mode 100644
index 000000000..439429985
--- /dev/null
+++ b/arch/mips/pci/fixup-tb0219.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * fixup-tb0219.c, The TANBAC TB0219 specific PCI fixups.
+ *
+ * Copyright (C) 2003 Megasolution Inc. <matsu@megasolution.jp>
+ * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/vr41xx/tb0219.h>
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ int irq = -1;
+
+ switch (slot) {
+ case 12:
+ irq = TB0219_PCI_SLOT1_IRQ;
+ break;
+ case 13:
+ irq = TB0219_PCI_SLOT2_IRQ;
+ break;
+ case 14:
+ irq = TB0219_PCI_SLOT3_IRQ;
+ break;
+ default:
+ break;
+ }
+
+ return irq;
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
diff --git a/arch/mips/pci/fixup-tb0226.c b/arch/mips/pci/fixup-tb0226.c
new file mode 100644
index 000000000..a4d1efadf
--- /dev/null
+++ b/arch/mips/pci/fixup-tb0226.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * fixup-tb0226.c, The TANBAC TB0226 specific PCI fixups.
+ *
+ * Copyright (C) 2002-2005 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/vr41xx/giu.h>
+#include <asm/vr41xx/tb0226.h>
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ int irq = -1;
+
+ switch (slot) {
+ case 12:
+ vr41xx_set_irq_trigger(GD82559_1_PIN,
+ IRQ_TRIGGER_LEVEL,
+ IRQ_SIGNAL_THROUGH);
+ vr41xx_set_irq_level(GD82559_1_PIN, IRQ_LEVEL_LOW);
+ irq = GD82559_1_IRQ;
+ break;
+ case 13:
+ vr41xx_set_irq_trigger(GD82559_2_PIN,
+ IRQ_TRIGGER_LEVEL,
+ IRQ_SIGNAL_THROUGH);
+ vr41xx_set_irq_level(GD82559_2_PIN, IRQ_LEVEL_LOW);
+ irq = GD82559_2_IRQ;
+ break;
+ case 14:
+ switch (pin) {
+ case 1:
+ vr41xx_set_irq_trigger(UPD720100_INTA_PIN,
+ IRQ_TRIGGER_LEVEL,
+ IRQ_SIGNAL_THROUGH);
+ vr41xx_set_irq_level(UPD720100_INTA_PIN,
+ IRQ_LEVEL_LOW);
+ irq = UPD720100_INTA_IRQ;
+ break;
+ case 2:
+ vr41xx_set_irq_trigger(UPD720100_INTB_PIN,
+ IRQ_TRIGGER_LEVEL,
+ IRQ_SIGNAL_THROUGH);
+ vr41xx_set_irq_level(UPD720100_INTB_PIN,
+ IRQ_LEVEL_LOW);
+ irq = UPD720100_INTB_IRQ;
+ break;
+ case 3:
+ vr41xx_set_irq_trigger(UPD720100_INTC_PIN,
+ IRQ_TRIGGER_LEVEL,
+ IRQ_SIGNAL_THROUGH);
+ vr41xx_set_irq_level(UPD720100_INTC_PIN,
+ IRQ_LEVEL_LOW);
+ irq = UPD720100_INTC_IRQ;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return irq;
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
diff --git a/arch/mips/pci/fixup-tb0287.c b/arch/mips/pci/fixup-tb0287.c
new file mode 100644
index 000000000..721ec9ac1
--- /dev/null
+++ b/arch/mips/pci/fixup-tb0287.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * fixup-tb0287.c, The TANBAC TB0287 specific PCI fixups.
+ *
+ * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/vr41xx/tb0287.h>
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ unsigned char bus;
+ int irq = -1;
+
+ bus = dev->bus->number;
+ if (bus == 0) {
+ switch (slot) {
+ case 16:
+ irq = TB0287_SM501_IRQ;
+ break;
+ case 17:
+ irq = TB0287_SIL680A_IRQ;
+ break;
+ default:
+ break;
+ }
+ } else if (bus == 1) {
+ switch (PCI_SLOT(dev->devfn)) {
+ case 0:
+ irq = TB0287_PCI_SLOT_IRQ;
+ break;
+ case 2:
+ case 3:
+ irq = TB0287_RTL8110_IRQ;
+ break;
+ default:
+ break;
+ }
+ } else if (bus > 1) {
+ irq = TB0287_PCI_SLOT_IRQ;
+ }
+
+ return irq;
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
new file mode 100644
index 000000000..288b58b00
--- /dev/null
+++ b/arch/mips/pci/msi-octeon.c
@@ -0,0 +1,438 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005-2009, 2010 Cavium Networks
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/msi.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-pci-defs.h>
+#include <asm/octeon/cvmx-npei-defs.h>
+#include <asm/octeon/cvmx-sli-defs.h>
+#include <asm/octeon/cvmx-pexp-defs.h>
+#include <asm/octeon/pci-octeon.h>
+
+/*
+ * Each bit in msi_free_irq_bitmask represents a MSI interrupt that is
+ * in use.
+ */
+static u64 msi_free_irq_bitmask[4];
+
+/*
+ * Each bit in msi_multiple_irq_bitmask tells that the device using
+ * this bit in msi_free_irq_bitmask is also using the next bit. This
+ * is used so we can disable all of the MSI interrupts when a device
+ * uses multiple.
+ */
+static u64 msi_multiple_irq_bitmask[4];
+
+/*
+ * This lock controls updates to msi_free_irq_bitmask and
+ * msi_multiple_irq_bitmask.
+ */
+static DEFINE_SPINLOCK(msi_free_irq_bitmask_lock);
+
+/*
+ * Number of MSI IRQs used. This variable is set up in
+ * the module init time.
+ */
+static int msi_irq_size;
+
+/**
+ * Called when a driver request MSI interrupts instead of the
+ * legacy INT A-D. This routine will allocate multiple interrupts
+ * for MSI devices that support them. A device can override this by
+ * programming the MSI control bits [6:4] before calling
+ * pci_enable_msi().
+ *
+ * @dev: Device requesting MSI interrupts
+ * @desc: MSI descriptor
+ *
+ * Returns 0 on success.
+ */
+int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
+{
+ struct msi_msg msg;
+ u16 control;
+ int configured_private_bits;
+ int request_private_bits;
+ int irq = 0;
+ int irq_step;
+ u64 search_mask;
+ int index;
+
+ /*
+ * Read the MSI config to figure out how many IRQs this device
+ * wants. Most devices only want 1, which will give
+ * configured_private_bits and request_private_bits equal 0.
+ */
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
+
+ /*
+ * If the number of private bits has been configured then use
+ * that value instead of the requested number. This gives the
+ * driver the chance to override the number of interrupts
+ * before calling pci_enable_msi().
+ */
+ configured_private_bits = (control & PCI_MSI_FLAGS_QSIZE) >> 4;
+ if (configured_private_bits == 0) {
+ /* Nothing is configured, so use the hardware requested size */
+ request_private_bits = (control & PCI_MSI_FLAGS_QMASK) >> 1;
+ } else {
+ /*
+ * Use the number of configured bits, assuming the
+ * driver wanted to override the hardware request
+ * value.
+ */
+ request_private_bits = configured_private_bits;
+ }
+
+ /*
+ * The PCI 2.3 spec mandates that there are at most 32
+ * interrupts. If this device asks for more, only give it one.
+ */
+ if (request_private_bits > 5)
+ request_private_bits = 0;
+
+try_only_one:
+ /*
+ * The IRQs have to be aligned on a power of two based on the
+ * number being requested.
+ */
+ irq_step = 1 << request_private_bits;
+
+ /* Mask with one bit for each IRQ */
+ search_mask = (1 << irq_step) - 1;
+
+ /*
+ * We're going to search msi_free_irq_bitmask_lock for zero
+ * bits. This represents an MSI interrupt number that isn't in
+ * use.
+ */
+ spin_lock(&msi_free_irq_bitmask_lock);
+ for (index = 0; index < msi_irq_size/64; index++) {
+ for (irq = 0; irq < 64; irq += irq_step) {
+ if ((msi_free_irq_bitmask[index] & (search_mask << irq)) == 0) {
+ msi_free_irq_bitmask[index] |= search_mask << irq;
+ msi_multiple_irq_bitmask[index] |= (search_mask >> 1) << irq;
+ goto msi_irq_allocated;
+ }
+ }
+ }
+msi_irq_allocated:
+ spin_unlock(&msi_free_irq_bitmask_lock);
+
+ /* Make sure the search for available interrupts didn't fail */
+ if (irq >= 64) {
+ if (request_private_bits) {
+ pr_err("arch_setup_msi_irq: Unable to find %d free interrupts, trying just one",
+ 1 << request_private_bits);
+ request_private_bits = 0;
+ goto try_only_one;
+ } else
+ panic("arch_setup_msi_irq: Unable to find a free MSI interrupt");
+ }
+
+ /* MSI interrupts start at logical IRQ OCTEON_IRQ_MSI_BIT0 */
+ irq += index*64;
+ irq += OCTEON_IRQ_MSI_BIT0;
+
+ switch (octeon_dma_bar_type) {
+ case OCTEON_DMA_BAR_TYPE_SMALL:
+ /* When not using big bar, Bar 0 is based at 128MB */
+ msg.address_lo =
+ ((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff;
+ msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32;
+ break;
+ case OCTEON_DMA_BAR_TYPE_BIG:
+ /* When using big bar, Bar 0 is based at 0 */
+ msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff;
+ msg.address_hi = (0 + CVMX_PCI_MSI_RCV) >> 32;
+ break;
+ case OCTEON_DMA_BAR_TYPE_PCIE:
+ /* When using PCIe, Bar 0 is based at 0 */
+ /* FIXME CVMX_NPEI_MSI_RCV* other than 0? */
+ msg.address_lo = (0 + CVMX_NPEI_PCIE_MSI_RCV) & 0xffffffff;
+ msg.address_hi = (0 + CVMX_NPEI_PCIE_MSI_RCV) >> 32;
+ break;
+ case OCTEON_DMA_BAR_TYPE_PCIE2:
+ /* When using PCIe2, Bar 0 is based at 0 */
+ msg.address_lo = (0 + CVMX_SLI_PCIE_MSI_RCV) & 0xffffffff;
+ msg.address_hi = (0 + CVMX_SLI_PCIE_MSI_RCV) >> 32;
+ break;
+ default:
+ panic("arch_setup_msi_irq: Invalid octeon_dma_bar_type");
+ }
+ msg.data = irq - OCTEON_IRQ_MSI_BIT0;
+
+ /* Update the number of IRQs the device has available to it */
+ control &= ~PCI_MSI_FLAGS_QSIZE;
+ control |= request_private_bits << 4;
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
+
+ irq_set_msi_desc(irq, desc);
+ pci_write_msi_msg(irq, &msg);
+ return 0;
+}
+
+int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ struct msi_desc *entry;
+ int ret;
+
+ /*
+ * MSI-X is not supported.
+ */
+ if (type == PCI_CAP_ID_MSIX)
+ return -EINVAL;
+
+ /*
+ * If an architecture wants to support multiple MSI, it needs to
+ * override arch_setup_msi_irqs()
+ */
+ if (type == PCI_CAP_ID_MSI && nvec > 1)
+ return 1;
+
+ for_each_pci_msi_entry(entry, dev) {
+ ret = arch_setup_msi_irq(dev, entry);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+/**
+ * Called when a device no longer needs its MSI interrupts. All
+ * MSI interrupts for the device are freed.
+ *
+ * @irq: The devices first irq number. There may be multple in sequence.
+ */
+void arch_teardown_msi_irq(unsigned int irq)
+{
+ int number_irqs;
+ u64 bitmask;
+ int index = 0;
+ int irq0;
+
+ if ((irq < OCTEON_IRQ_MSI_BIT0)
+ || (irq > msi_irq_size + OCTEON_IRQ_MSI_BIT0))
+ panic("arch_teardown_msi_irq: Attempted to teardown illegal "
+ "MSI interrupt (%d)", irq);
+
+ irq -= OCTEON_IRQ_MSI_BIT0;
+ index = irq / 64;
+ irq0 = irq % 64;
+
+ /*
+ * Count the number of IRQs we need to free by looking at the
+ * msi_multiple_irq_bitmask. Each bit set means that the next
+ * IRQ is also owned by this device.
+ */
+ number_irqs = 0;
+ while ((irq0 + number_irqs < 64) &&
+ (msi_multiple_irq_bitmask[index]
+ & (1ull << (irq0 + number_irqs))))
+ number_irqs++;
+ number_irqs++;
+ /* Mask with one bit for each IRQ */
+ bitmask = (1 << number_irqs) - 1;
+ /* Shift the mask to the correct bit location */
+ bitmask <<= irq0;
+ if ((msi_free_irq_bitmask[index] & bitmask) != bitmask)
+ panic("arch_teardown_msi_irq: Attempted to teardown MSI "
+ "interrupt (%d) not in use", irq);
+
+ /* Checks are done, update the in use bitmask */
+ spin_lock(&msi_free_irq_bitmask_lock);
+ msi_free_irq_bitmask[index] &= ~bitmask;
+ msi_multiple_irq_bitmask[index] &= ~bitmask;
+ spin_unlock(&msi_free_irq_bitmask_lock);
+}
+
+static DEFINE_RAW_SPINLOCK(octeon_irq_msi_lock);
+
+static u64 msi_rcv_reg[4];
+static u64 mis_ena_reg[4];
+
+static void octeon_irq_msi_enable_pcie(struct irq_data *data)
+{
+ u64 en;
+ unsigned long flags;
+ int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0;
+ int irq_index = msi_number >> 6;
+ int irq_bit = msi_number & 0x3f;
+
+ raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags);
+ en = cvmx_read_csr(mis_ena_reg[irq_index]);
+ en |= 1ull << irq_bit;
+ cvmx_write_csr(mis_ena_reg[irq_index], en);
+ cvmx_read_csr(mis_ena_reg[irq_index]);
+ raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
+}
+
+static void octeon_irq_msi_disable_pcie(struct irq_data *data)
+{
+ u64 en;
+ unsigned long flags;
+ int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0;
+ int irq_index = msi_number >> 6;
+ int irq_bit = msi_number & 0x3f;
+
+ raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags);
+ en = cvmx_read_csr(mis_ena_reg[irq_index]);
+ en &= ~(1ull << irq_bit);
+ cvmx_write_csr(mis_ena_reg[irq_index], en);
+ cvmx_read_csr(mis_ena_reg[irq_index]);
+ raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
+}
+
+static struct irq_chip octeon_irq_chip_msi_pcie = {
+ .name = "MSI",
+ .irq_enable = octeon_irq_msi_enable_pcie,
+ .irq_disable = octeon_irq_msi_disable_pcie,
+};
+
+static void octeon_irq_msi_enable_pci(struct irq_data *data)
+{
+ /*
+ * Octeon PCI doesn't have the ability to mask/unmask MSI
+ * interrupts individually. Instead of masking/unmasking them
+ * in groups of 16, we simple assume MSI devices are well
+ * behaved. MSI interrupts are always enable and the ACK is
+ * assumed to be enough
+ */
+}
+
+static void octeon_irq_msi_disable_pci(struct irq_data *data)
+{
+ /* See comment in enable */
+}
+
+static struct irq_chip octeon_irq_chip_msi_pci = {
+ .name = "MSI",
+ .irq_enable = octeon_irq_msi_enable_pci,
+ .irq_disable = octeon_irq_msi_disable_pci,
+};
+
+/*
+ * Called by the interrupt handling code when an MSI interrupt
+ * occurs.
+ */
+static irqreturn_t __octeon_msi_do_interrupt(int index, u64 msi_bits)
+{
+ int irq;
+ int bit;
+
+ bit = fls64(msi_bits);
+ if (bit) {
+ bit--;
+ /* Acknowledge it first. */
+ cvmx_write_csr(msi_rcv_reg[index], 1ull << bit);
+
+ irq = bit + OCTEON_IRQ_MSI_BIT0 + 64 * index;
+ do_IRQ(irq);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+#define OCTEON_MSI_INT_HANDLER_X(x) \
+static irqreturn_t octeon_msi_interrupt##x(int cpl, void *dev_id) \
+{ \
+ u64 msi_bits = cvmx_read_csr(msi_rcv_reg[(x)]); \
+ return __octeon_msi_do_interrupt((x), msi_bits); \
+}
+
+/*
+ * Create octeon_msi_interrupt{0-3} function body
+ */
+OCTEON_MSI_INT_HANDLER_X(0);
+OCTEON_MSI_INT_HANDLER_X(1);
+OCTEON_MSI_INT_HANDLER_X(2);
+OCTEON_MSI_INT_HANDLER_X(3);
+
+/*
+ * Initializes the MSI interrupt handling code
+ */
+int __init octeon_msi_initialize(void)
+{
+ int irq;
+ struct irq_chip *msi;
+
+ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
+ return 0;
+ } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
+ msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
+ msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
+ msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
+ msi_rcv_reg[3] = CVMX_PEXP_NPEI_MSI_RCV3;
+ mis_ena_reg[0] = CVMX_PEXP_NPEI_MSI_ENB0;
+ mis_ena_reg[1] = CVMX_PEXP_NPEI_MSI_ENB1;
+ mis_ena_reg[2] = CVMX_PEXP_NPEI_MSI_ENB2;
+ mis_ena_reg[3] = CVMX_PEXP_NPEI_MSI_ENB3;
+ msi = &octeon_irq_chip_msi_pcie;
+ } else {
+ msi_rcv_reg[0] = CVMX_NPI_NPI_MSI_RCV;
+#define INVALID_GENERATE_ADE 0x8700000000000000ULL;
+ msi_rcv_reg[1] = INVALID_GENERATE_ADE;
+ msi_rcv_reg[2] = INVALID_GENERATE_ADE;
+ msi_rcv_reg[3] = INVALID_GENERATE_ADE;
+ mis_ena_reg[0] = INVALID_GENERATE_ADE;
+ mis_ena_reg[1] = INVALID_GENERATE_ADE;
+ mis_ena_reg[2] = INVALID_GENERATE_ADE;
+ mis_ena_reg[3] = INVALID_GENERATE_ADE;
+ msi = &octeon_irq_chip_msi_pci;
+ }
+
+ for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_LAST; irq++)
+ irq_set_chip_and_handler(irq, msi, handle_simple_irq);
+
+ if (octeon_has_feature(OCTEON_FEATURE_PCIE)) {
+ if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0,
+ 0, "MSI[0:63]", octeon_msi_interrupt0))
+ panic("request_irq(OCTEON_IRQ_PCI_MSI0) failed");
+
+ if (request_irq(OCTEON_IRQ_PCI_MSI1, octeon_msi_interrupt1,
+ 0, "MSI[64:127]", octeon_msi_interrupt1))
+ panic("request_irq(OCTEON_IRQ_PCI_MSI1) failed");
+
+ if (request_irq(OCTEON_IRQ_PCI_MSI2, octeon_msi_interrupt2,
+ 0, "MSI[127:191]", octeon_msi_interrupt2))
+ panic("request_irq(OCTEON_IRQ_PCI_MSI2) failed");
+
+ if (request_irq(OCTEON_IRQ_PCI_MSI3, octeon_msi_interrupt3,
+ 0, "MSI[192:255]", octeon_msi_interrupt3))
+ panic("request_irq(OCTEON_IRQ_PCI_MSI3) failed");
+
+ msi_irq_size = 256;
+ } else if (octeon_is_pci_host()) {
+ if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0,
+ 0, "MSI[0:15]", octeon_msi_interrupt0))
+ panic("request_irq(OCTEON_IRQ_PCI_MSI0) failed");
+
+ if (request_irq(OCTEON_IRQ_PCI_MSI1, octeon_msi_interrupt0,
+ 0, "MSI[16:31]", octeon_msi_interrupt0))
+ panic("request_irq(OCTEON_IRQ_PCI_MSI1) failed");
+
+ if (request_irq(OCTEON_IRQ_PCI_MSI2, octeon_msi_interrupt0,
+ 0, "MSI[32:47]", octeon_msi_interrupt0))
+ panic("request_irq(OCTEON_IRQ_PCI_MSI2) failed");
+
+ if (request_irq(OCTEON_IRQ_PCI_MSI3, octeon_msi_interrupt0,
+ 0, "MSI[48:63]", octeon_msi_interrupt0))
+ panic("request_irq(OCTEON_IRQ_PCI_MSI3) failed");
+ msi_irq_size = 64;
+ }
+ return 0;
+}
+subsys_initcall(octeon_msi_initialize);
diff --git a/arch/mips/pci/msi-xlp.c b/arch/mips/pci/msi-xlp.c
new file mode 100644
index 000000000..bb14335f8
--- /dev/null
+++ b/arch/mips/pci/msi-xlp.c
@@ -0,0 +1,571 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/msi.h>
+#include <linux/mm.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/console.h>
+
+#include <asm/io.h>
+
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/mips-extns.h>
+
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#include <asm/netlogic/xlp-hal/pcibus.h>
+#include <asm/netlogic/xlp-hal/bridge.h>
+
+#define XLP_MSIVEC_PER_LINK 32
+#define XLP_MSIXVEC_TOTAL (cpu_is_xlp9xx() ? 128 : 32)
+#define XLP_MSIXVEC_PER_LINK (cpu_is_xlp9xx() ? 32 : 8)
+
+/* 128 MSI irqs per node, mapped starting at NLM_MSI_VEC_BASE */
+static inline int nlm_link_msiirq(int link, int msivec)
+{
+ return NLM_MSI_VEC_BASE + link * XLP_MSIVEC_PER_LINK + msivec;
+}
+
+/* get the link MSI vector from irq number */
+static inline int nlm_irq_msivec(int irq)
+{
+ return (irq - NLM_MSI_VEC_BASE) % XLP_MSIVEC_PER_LINK;
+}
+
+/* get the link from the irq number */
+static inline int nlm_irq_msilink(int irq)
+{
+ int total_msivec = XLP_MSIVEC_PER_LINK * PCIE_NLINKS;
+
+ return ((irq - NLM_MSI_VEC_BASE) % total_msivec) /
+ XLP_MSIVEC_PER_LINK;
+}
+
+/*
+ * For XLP 8xx/4xx/3xx/2xx, only 32 MSI-X vectors are possible because
+ * there are only 32 PIC interrupts for MSI. We split them statically
+ * and use 8 MSI-X vectors per link - this keeps the allocation and
+ * lookup simple.
+ * On XLP 9xx, there are 32 vectors per link, and the interrupts are
+ * not routed thru PIC, so we can use all 128 MSI-X vectors.
+ */
+static inline int nlm_link_msixirq(int link, int bit)
+{
+ return NLM_MSIX_VEC_BASE + link * XLP_MSIXVEC_PER_LINK + bit;
+}
+
+/* get the link MSI vector from irq number */
+static inline int nlm_irq_msixvec(int irq)
+{
+ return (irq - NLM_MSIX_VEC_BASE) % XLP_MSIXVEC_TOTAL;
+}
+
+/* get the link from MSIX vec */
+static inline int nlm_irq_msixlink(int msixvec)
+{
+ return msixvec / XLP_MSIXVEC_PER_LINK;
+}
+
+/*
+ * Per link MSI and MSI-X information, set as IRQ handler data for
+ * MSI and MSI-X interrupts.
+ */
+struct xlp_msi_data {
+ struct nlm_soc_info *node;
+ uint64_t lnkbase;
+ uint32_t msi_enabled_mask;
+ uint32_t msi_alloc_mask;
+ uint32_t msix_alloc_mask;
+ spinlock_t msi_lock;
+};
+
+/*
+ * MSI Chip definitions
+ *
+ * On XLP, there is a PIC interrupt associated with each PCIe link on the
+ * chip (which appears as a PCI bridge to us). This gives us 32 MSI irqa
+ * per link and 128 overall.
+ *
+ * When a device connected to the link raises a MSI interrupt, we get a
+ * link interrupt and we then have to look at PCIE_MSI_STATUS register at
+ * the bridge to map it to the IRQ
+ */
+static void xlp_msi_enable(struct irq_data *d)
+{
+ struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+ int vec;
+
+ vec = nlm_irq_msivec(d->irq);
+ spin_lock_irqsave(&md->msi_lock, flags);
+ md->msi_enabled_mask |= 1u << vec;
+ if (cpu_is_xlp9xx())
+ nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN,
+ md->msi_enabled_mask);
+ else
+ nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask);
+ spin_unlock_irqrestore(&md->msi_lock, flags);
+}
+
+static void xlp_msi_disable(struct irq_data *d)
+{
+ struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+ int vec;
+
+ vec = nlm_irq_msivec(d->irq);
+ spin_lock_irqsave(&md->msi_lock, flags);
+ md->msi_enabled_mask &= ~(1u << vec);
+ if (cpu_is_xlp9xx())
+ nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN,
+ md->msi_enabled_mask);
+ else
+ nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask);
+ spin_unlock_irqrestore(&md->msi_lock, flags);
+}
+
+static void xlp_msi_mask_ack(struct irq_data *d)
+{
+ struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
+ int link, vec;
+
+ link = nlm_irq_msilink(d->irq);
+ vec = nlm_irq_msivec(d->irq);
+ xlp_msi_disable(d);
+
+ /* Ack MSI on bridge */
+ if (cpu_is_xlp9xx())
+ nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_STATUS, 1u << vec);
+ else
+ nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec);
+
+}
+
+static struct irq_chip xlp_msi_chip = {
+ .name = "XLP-MSI",
+ .irq_enable = xlp_msi_enable,
+ .irq_disable = xlp_msi_disable,
+ .irq_mask_ack = xlp_msi_mask_ack,
+ .irq_unmask = xlp_msi_enable,
+};
+
+/*
+ * XLP8XX/4XX/3XX/2XX:
+ * The MSI-X interrupt handling is different from MSI, there are 32 MSI-X
+ * interrupts generated by the PIC and each of these correspond to a MSI-X
+ * vector (0-31) that can be assigned.
+ *
+ * We divide the MSI-X vectors to 8 per link and do a per-link allocation
+ *
+ * XLP9XX:
+ * 32 MSI-X vectors are available per link, and the interrupts are not routed
+ * thru the PIC. PIC ack not needed.
+ *
+ * Enable and disable done using standard MSI functions.
+ */
+static void xlp_msix_mask_ack(struct irq_data *d)
+{
+ struct xlp_msi_data *md;
+ int link, msixvec;
+ uint32_t status_reg, bit;
+
+ msixvec = nlm_irq_msixvec(d->irq);
+ link = nlm_irq_msixlink(msixvec);
+ pci_msi_mask_irq(d);
+ md = irq_data_get_irq_chip_data(d);
+
+ /* Ack MSI on bridge */
+ if (cpu_is_xlp9xx()) {
+ status_reg = PCIE_9XX_MSIX_STATUSX(link);
+ bit = msixvec % XLP_MSIXVEC_PER_LINK;
+ } else {
+ status_reg = PCIE_MSIX_STATUS;
+ bit = msixvec;
+ }
+ nlm_write_reg(md->lnkbase, status_reg, 1u << bit);
+
+ if (!cpu_is_xlp9xx())
+ nlm_pic_ack(md->node->picbase,
+ PIC_IRT_PCIE_MSIX_INDEX(msixvec));
+}
+
+static struct irq_chip xlp_msix_chip = {
+ .name = "XLP-MSIX",
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask_ack = xlp_msix_mask_ack,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+void arch_teardown_msi_irq(unsigned int irq)
+{
+}
+
+/*
+ * Setup a PCIe link for MSI. By default, the links are in
+ * legacy interrupt mode. We will switch them to MSI mode
+ * at the first MSI request.
+ */
+static void xlp_config_link_msi(uint64_t lnkbase, int lirq, uint64_t msiaddr)
+{
+ u32 val;
+
+ if (cpu_is_xlp9xx()) {
+ val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0);
+ if ((val & 0x200) == 0) {
+ val |= 0x200; /* MSI Interrupt enable */
+ nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val);
+ }
+ } else {
+ val = nlm_read_reg(lnkbase, PCIE_INT_EN0);
+ if ((val & 0x200) == 0) {
+ val |= 0x200;
+ nlm_write_reg(lnkbase, PCIE_INT_EN0, val);
+ }
+ }
+
+ val = nlm_read_reg(lnkbase, 0x1); /* CMD */
+ if ((val & 0x0400) == 0) {
+ val |= 0x0400;
+ nlm_write_reg(lnkbase, 0x1, val);
+ }
+
+ /* Update IRQ in the PCI irq reg */
+ val = nlm_read_pci_reg(lnkbase, 0xf);
+ val &= ~0x1fu;
+ val |= (1 << 8) | lirq;
+ nlm_write_pci_reg(lnkbase, 0xf, val);
+
+ /* MSI addr */
+ nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_ADDRH, msiaddr >> 32);
+ nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_ADDRL, msiaddr & 0xffffffff);
+
+ /* MSI cap for bridge */
+ val = nlm_read_reg(lnkbase, PCIE_BRIDGE_MSI_CAP);
+ if ((val & (1 << 16)) == 0) {
+ val |= 0xb << 16; /* mmc32, msi enable */
+ nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_CAP, val);
+ }
+}
+
+/*
+ * Allocate a MSI vector on a link
+ */
+static int xlp_setup_msi(uint64_t lnkbase, int node, int link,
+ struct msi_desc *desc)
+{
+ struct xlp_msi_data *md;
+ struct msi_msg msg;
+ unsigned long flags;
+ int msivec, irt, lirq, xirq, ret;
+ uint64_t msiaddr;
+
+ /* Get MSI data for the link */
+ lirq = PIC_PCIE_LINK_MSI_IRQ(link);
+ xirq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
+ md = irq_get_chip_data(xirq);
+ msiaddr = MSI_LINK_ADDR(node, link);
+
+ spin_lock_irqsave(&md->msi_lock, flags);
+ if (md->msi_alloc_mask == 0) {
+ xlp_config_link_msi(lnkbase, lirq, msiaddr);
+ /* switch the link IRQ to MSI range */
+ if (cpu_is_xlp9xx())
+ irt = PIC_9XX_IRT_PCIE_LINK_INDEX(link);
+ else
+ irt = PIC_IRT_PCIE_LINK_INDEX(link);
+ nlm_setup_pic_irq(node, lirq, lirq, irt);
+ nlm_pic_init_irt(nlm_get_node(node)->picbase, irt, lirq,
+ node * nlm_threads_per_node(), 1 /*en */);
+ }
+
+ /* allocate a MSI vec, and tell the bridge about it */
+ msivec = fls(md->msi_alloc_mask);
+ if (msivec == XLP_MSIVEC_PER_LINK) {
+ spin_unlock_irqrestore(&md->msi_lock, flags);
+ return -ENOMEM;
+ }
+ md->msi_alloc_mask |= (1u << msivec);
+ spin_unlock_irqrestore(&md->msi_lock, flags);
+
+ msg.address_hi = msiaddr >> 32;
+ msg.address_lo = msiaddr & 0xffffffff;
+ msg.data = 0xc00 | msivec;
+
+ xirq = xirq + msivec; /* msi mapped to global irq space */
+ ret = irq_set_msi_desc(xirq, desc);
+ if (ret < 0)
+ return ret;
+
+ pci_write_msi_msg(xirq, &msg);
+ return 0;
+}
+
+/*
+ * Switch a link to MSI-X mode
+ */
+static void xlp_config_link_msix(uint64_t lnkbase, int lirq, uint64_t msixaddr)
+{
+ u32 val;
+
+ val = nlm_read_reg(lnkbase, 0x2C);
+ if ((val & 0x80000000U) == 0) {
+ val |= 0x80000000U;
+ nlm_write_reg(lnkbase, 0x2C, val);
+ }
+
+ if (cpu_is_xlp9xx()) {
+ val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0);
+ if ((val & 0x200) == 0) {
+ val |= 0x200; /* MSI Interrupt enable */
+ nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val);
+ }
+ } else {
+ val = nlm_read_reg(lnkbase, PCIE_INT_EN0);
+ if ((val & 0x200) == 0) {
+ val |= 0x200; /* MSI Interrupt enable */
+ nlm_write_reg(lnkbase, PCIE_INT_EN0, val);
+ }
+ }
+
+ val = nlm_read_reg(lnkbase, 0x1); /* CMD */
+ if ((val & 0x0400) == 0) {
+ val |= 0x0400;
+ nlm_write_reg(lnkbase, 0x1, val);
+ }
+
+ /* Update IRQ in the PCI irq reg */
+ val = nlm_read_pci_reg(lnkbase, 0xf);
+ val &= ~0x1fu;
+ val |= (1 << 8) | lirq;
+ nlm_write_pci_reg(lnkbase, 0xf, val);
+
+ if (cpu_is_xlp9xx()) {
+ /* MSI-X addresses */
+ nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_BASE,
+ msixaddr >> 8);
+ nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_LIMIT,
+ (msixaddr + MSI_ADDR_SZ) >> 8);
+ } else {
+ /* MSI-X addresses */
+ nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_BASE,
+ msixaddr >> 8);
+ nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_LIMIT,
+ (msixaddr + MSI_ADDR_SZ) >> 8);
+ }
+}
+
+/*
+ * Allocate a MSI-X vector
+ */
+static int xlp_setup_msix(uint64_t lnkbase, int node, int link,
+ struct msi_desc *desc)
+{
+ struct xlp_msi_data *md;
+ struct msi_msg msg;
+ unsigned long flags;
+ int t, msixvec, lirq, xirq, ret;
+ uint64_t msixaddr;
+
+ /* Get MSI data for the link */
+ lirq = PIC_PCIE_MSIX_IRQ(link);
+ xirq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0));
+ md = irq_get_chip_data(xirq);
+ msixaddr = MSIX_LINK_ADDR(node, link);
+
+ spin_lock_irqsave(&md->msi_lock, flags);
+ /* switch the PCIe link to MSI-X mode at the first alloc */
+ if (md->msix_alloc_mask == 0)
+ xlp_config_link_msix(lnkbase, lirq, msixaddr);
+
+ /* allocate a MSI-X vec, and tell the bridge about it */
+ t = fls(md->msix_alloc_mask);
+ if (t == XLP_MSIXVEC_PER_LINK) {
+ spin_unlock_irqrestore(&md->msi_lock, flags);
+ return -ENOMEM;
+ }
+ md->msix_alloc_mask |= (1u << t);
+ spin_unlock_irqrestore(&md->msi_lock, flags);
+
+ xirq += t;
+ msixvec = nlm_irq_msixvec(xirq);
+
+ msg.address_hi = msixaddr >> 32;
+ msg.address_lo = msixaddr & 0xffffffff;
+ msg.data = 0xc00 | msixvec;
+
+ ret = irq_set_msi_desc(xirq, desc);
+ if (ret < 0)
+ return ret;
+
+ pci_write_msi_msg(xirq, &msg);
+ return 0;
+}
+
+int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
+{
+ struct pci_dev *lnkdev;
+ uint64_t lnkbase;
+ int node, link, slot;
+
+ lnkdev = xlp_get_pcie_link(dev);
+ if (lnkdev == NULL) {
+ dev_err(&dev->dev, "Could not find bridge\n");
+ return 1;
+ }
+ slot = PCI_SLOT(lnkdev->devfn);
+ link = PCI_FUNC(lnkdev->devfn);
+ node = slot / 8;
+ lnkbase = nlm_get_pcie_base(node, link);
+
+ if (desc->msi_attrib.is_msix)
+ return xlp_setup_msix(lnkbase, node, link, desc);
+ else
+ return xlp_setup_msi(lnkbase, node, link, desc);
+}
+
+void __init xlp_init_node_msi_irqs(int node, int link)
+{
+ struct nlm_soc_info *nodep;
+ struct xlp_msi_data *md;
+ int irq, i, irt, msixvec, val;
+
+ pr_info("[%d %d] Init node PCI IRT\n", node, link);
+ nodep = nlm_get_node(node);
+
+ /* Alloc an MSI block for the link */
+ md = kzalloc(sizeof(*md), GFP_KERNEL);
+ spin_lock_init(&md->msi_lock);
+ md->msi_enabled_mask = 0;
+ md->msi_alloc_mask = 0;
+ md->msix_alloc_mask = 0;
+ md->node = nodep;
+ md->lnkbase = nlm_get_pcie_base(node, link);
+
+ /* extended space for MSI interrupts */
+ irq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
+ for (i = irq; i < irq + XLP_MSIVEC_PER_LINK; i++) {
+ irq_set_chip_and_handler(i, &xlp_msi_chip, handle_level_irq);
+ irq_set_chip_data(i, md);
+ }
+
+ for (i = 0; i < XLP_MSIXVEC_PER_LINK ; i++) {
+ if (cpu_is_xlp9xx()) {
+ val = ((node * nlm_threads_per_node()) << 7 |
+ PIC_PCIE_MSIX_IRQ(link) << 1 | 0 << 0);
+ nlm_write_pcie_reg(md->lnkbase, PCIE_9XX_MSIX_VECX(i +
+ (link * XLP_MSIXVEC_PER_LINK)), val);
+ } else {
+ /* Initialize MSI-X irts to generate one interrupt
+ * per link
+ */
+ msixvec = link * XLP_MSIXVEC_PER_LINK + i;
+ irt = PIC_IRT_PCIE_MSIX_INDEX(msixvec);
+ nlm_pic_init_irt(nodep->picbase, irt,
+ PIC_PCIE_MSIX_IRQ(link),
+ node * nlm_threads_per_node(), 1);
+ }
+
+ /* Initialize MSI-X extended irq space for the link */
+ irq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, i));
+ irq_set_chip_and_handler(irq, &xlp_msix_chip, handle_level_irq);
+ irq_set_chip_data(irq, md);
+ }
+}
+
+void nlm_dispatch_msi(int node, int lirq)
+{
+ struct xlp_msi_data *md;
+ int link, i, irqbase;
+ u32 status;
+
+ link = lirq - PIC_PCIE_LINK_MSI_IRQ_BASE;
+ irqbase = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
+ md = irq_get_chip_data(irqbase);
+ if (cpu_is_xlp9xx())
+ status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSI_STATUS) &
+ md->msi_enabled_mask;
+ else
+ status = nlm_read_reg(md->lnkbase, PCIE_MSI_STATUS) &
+ md->msi_enabled_mask;
+ while (status) {
+ i = __ffs(status);
+ do_IRQ(irqbase + i);
+ status &= status - 1;
+ }
+
+ /* Ack at eirr and PIC */
+ ack_c0_eirr(PIC_PCIE_LINK_MSI_IRQ(link));
+ if (cpu_is_xlp9xx())
+ nlm_pic_ack(md->node->picbase,
+ PIC_9XX_IRT_PCIE_LINK_INDEX(link));
+ else
+ nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link));
+}
+
+void nlm_dispatch_msix(int node, int lirq)
+{
+ struct xlp_msi_data *md;
+ int link, i, irqbase;
+ u32 status;
+
+ link = lirq - PIC_PCIE_MSIX_IRQ_BASE;
+ irqbase = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0));
+ md = irq_get_chip_data(irqbase);
+ if (cpu_is_xlp9xx())
+ status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSIX_STATUSX(link));
+ else
+ status = nlm_read_reg(md->lnkbase, PCIE_MSIX_STATUS);
+
+ /* narrow it down to the MSI-x vectors for our link */
+ if (!cpu_is_xlp9xx())
+ status = (status >> (link * XLP_MSIXVEC_PER_LINK)) &
+ ((1 << XLP_MSIXVEC_PER_LINK) - 1);
+
+ while (status) {
+ i = __ffs(status);
+ do_IRQ(irqbase + i);
+ status &= status - 1;
+ }
+ /* Ack at eirr and PIC */
+ ack_c0_eirr(PIC_PCIE_MSIX_IRQ(link));
+}
diff --git a/arch/mips/pci/ops-bcm63xx.c b/arch/mips/pci/ops-bcm63xx.c
new file mode 100644
index 000000000..dc6dc2741
--- /dev/null
+++ b/arch/mips/pci/ops-bcm63xx.c
@@ -0,0 +1,528 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include "pci-bcm63xx.h"
+
+/*
+ * swizzle 32bits data to return only the needed part
+ */
+static int postprocess_read(u32 data, int where, unsigned int size)
+{
+ u32 ret;
+
+ ret = 0;
+ switch (size) {
+ case 1:
+ ret = (data >> ((where & 3) << 3)) & 0xff;
+ break;
+ case 2:
+ ret = (data >> ((where & 3) << 3)) & 0xffff;
+ break;
+ case 4:
+ ret = data;
+ break;
+ }
+ return ret;
+}
+
+static int preprocess_write(u32 orig_data, u32 val, int where,
+ unsigned int size)
+{
+ u32 ret;
+
+ ret = 0;
+ switch (size) {
+ case 1:
+ ret = (orig_data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ break;
+ case 2:
+ ret = (orig_data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ break;
+ case 4:
+ ret = val;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * setup hardware for a configuration cycle with given parameters
+ */
+static int bcm63xx_setup_cfg_access(int type, unsigned int busn,
+ unsigned int devfn, int where)
+{
+ unsigned int slot, func, reg;
+ u32 val;
+
+ slot = PCI_SLOT(devfn);
+ func = PCI_FUNC(devfn);
+ reg = where >> 2;
+
+ /* sanity check */
+ if (slot > (MPI_L2PCFG_DEVNUM_MASK >> MPI_L2PCFG_DEVNUM_SHIFT))
+ return 1;
+
+ if (func > (MPI_L2PCFG_FUNC_MASK >> MPI_L2PCFG_FUNC_SHIFT))
+ return 1;
+
+ if (reg > (MPI_L2PCFG_REG_MASK >> MPI_L2PCFG_REG_SHIFT))
+ return 1;
+
+ /* ok, setup config access */
+ val = (reg << MPI_L2PCFG_REG_SHIFT);
+ val |= (func << MPI_L2PCFG_FUNC_SHIFT);
+ val |= (slot << MPI_L2PCFG_DEVNUM_SHIFT);
+ val |= MPI_L2PCFG_CFG_USEREG_MASK;
+ val |= MPI_L2PCFG_CFG_SEL_MASK;
+ /* type 0 cycle for local bus, type 1 cycle for anything else */
+ if (type != 0) {
+ /* FIXME: how to specify bus ??? */
+ val |= (1 << MPI_L2PCFG_CFG_TYPE_SHIFT);
+ }
+ bcm_mpi_writel(val, MPI_L2PCFG_REG);
+
+ return 0;
+}
+
+static int bcm63xx_do_cfg_read(int type, unsigned int busn,
+ unsigned int devfn, int where, int size,
+ u32 *val)
+{
+ u32 data;
+
+ /* two phase cycle, first we write address, then read data at
+ * another location, caller already has a spinlock so no need
+ * to add one here */
+ if (bcm63xx_setup_cfg_access(type, busn, devfn, where))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ iob();
+ data = le32_to_cpu(__raw_readl(pci_iospace_start));
+ /* restore IO space normal behaviour */
+ bcm_mpi_writel(0, MPI_L2PCFG_REG);
+
+ *val = postprocess_read(data, where, size);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int bcm63xx_do_cfg_write(int type, unsigned int busn,
+ unsigned int devfn, int where, int size,
+ u32 val)
+{
+ u32 data;
+
+ /* two phase cycle, first we write address, then write data to
+ * another location, caller already has a spinlock so no need
+ * to add one here */
+ if (bcm63xx_setup_cfg_access(type, busn, devfn, where))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ iob();
+
+ data = le32_to_cpu(__raw_readl(pci_iospace_start));
+ data = preprocess_write(data, val, where, size);
+
+ __raw_writel(cpu_to_le32(data), pci_iospace_start);
+ wmb();
+ /* no way to know the access is done, we have to wait */
+ udelay(500);
+ /* restore IO space normal behaviour */
+ bcm_mpi_writel(0, MPI_L2PCFG_REG);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int bcm63xx_pci_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ int type;
+
+ type = bus->parent ? 1 : 0;
+
+ if (type == 0 && PCI_SLOT(devfn) == CARDBUS_PCI_IDSEL)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return bcm63xx_do_cfg_read(type, bus->number, devfn,
+ where, size, val);
+}
+
+static int bcm63xx_pci_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ int type;
+
+ type = bus->parent ? 1 : 0;
+
+ if (type == 0 && PCI_SLOT(devfn) == CARDBUS_PCI_IDSEL)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return bcm63xx_do_cfg_write(type, bus->number, devfn,
+ where, size, val);
+}
+
+struct pci_ops bcm63xx_pci_ops = {
+ .read = bcm63xx_pci_read,
+ .write = bcm63xx_pci_write
+};
+
+#ifdef CONFIG_CARDBUS
+/*
+ * emulate configuration read access on a cardbus bridge
+ */
+#define FAKE_CB_BRIDGE_SLOT 0x1e
+
+static int fake_cb_bridge_bus_number = -1;
+
+static struct {
+ u16 pci_command;
+ u8 cb_latency;
+ u8 subordinate_busn;
+ u8 cardbus_busn;
+ u8 pci_busn;
+ int bus_assigned;
+ u16 bridge_control;
+
+ u32 mem_base0;
+ u32 mem_limit0;
+ u32 mem_base1;
+ u32 mem_limit1;
+
+ u32 io_base0;
+ u32 io_limit0;
+ u32 io_base1;
+ u32 io_limit1;
+} fake_cb_bridge_regs;
+
+static int fake_cb_bridge_read(int where, int size, u32 *val)
+{
+ unsigned int reg;
+ u32 data;
+
+ data = 0;
+ reg = where >> 2;
+ switch (reg) {
+ case (PCI_VENDOR_ID >> 2):
+ case (PCI_CB_SUBSYSTEM_VENDOR_ID >> 2):
+ /* create dummy vendor/device id from our cpu id */
+ data = (bcm63xx_get_cpu_id() << 16) | PCI_VENDOR_ID_BROADCOM;
+ break;
+
+ case (PCI_COMMAND >> 2):
+ data = (PCI_STATUS_DEVSEL_SLOW << 16);
+ data |= fake_cb_bridge_regs.pci_command;
+ break;
+
+ case (PCI_CLASS_REVISION >> 2):
+ data = (PCI_CLASS_BRIDGE_CARDBUS << 16);
+ break;
+
+ case (PCI_CACHE_LINE_SIZE >> 2):
+ data = (PCI_HEADER_TYPE_CARDBUS << 16);
+ break;
+
+ case (PCI_INTERRUPT_LINE >> 2):
+ /* bridge control */
+ data = (fake_cb_bridge_regs.bridge_control << 16);
+ /* pin:intA line:0xff */
+ data |= (0x1 << 8) | 0xff;
+ break;
+
+ case (PCI_CB_PRIMARY_BUS >> 2):
+ data = (fake_cb_bridge_regs.cb_latency << 24);
+ data |= (fake_cb_bridge_regs.subordinate_busn << 16);
+ data |= (fake_cb_bridge_regs.cardbus_busn << 8);
+ data |= fake_cb_bridge_regs.pci_busn;
+ break;
+
+ case (PCI_CB_MEMORY_BASE_0 >> 2):
+ data = fake_cb_bridge_regs.mem_base0;
+ break;
+
+ case (PCI_CB_MEMORY_LIMIT_0 >> 2):
+ data = fake_cb_bridge_regs.mem_limit0;
+ break;
+
+ case (PCI_CB_MEMORY_BASE_1 >> 2):
+ data = fake_cb_bridge_regs.mem_base1;
+ break;
+
+ case (PCI_CB_MEMORY_LIMIT_1 >> 2):
+ data = fake_cb_bridge_regs.mem_limit1;
+ break;
+
+ case (PCI_CB_IO_BASE_0 >> 2):
+ /* | 1 for 32bits io support */
+ data = fake_cb_bridge_regs.io_base0 | 0x1;
+ break;
+
+ case (PCI_CB_IO_LIMIT_0 >> 2):
+ data = fake_cb_bridge_regs.io_limit0;
+ break;
+
+ case (PCI_CB_IO_BASE_1 >> 2):
+ /* | 1 for 32bits io support */
+ data = fake_cb_bridge_regs.io_base1 | 0x1;
+ break;
+
+ case (PCI_CB_IO_LIMIT_1 >> 2):
+ data = fake_cb_bridge_regs.io_limit1;
+ break;
+ }
+
+ *val = postprocess_read(data, where, size);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * emulate configuration write access on a cardbus bridge
+ */
+static int fake_cb_bridge_write(int where, int size, u32 val)
+{
+ unsigned int reg;
+ u32 data, tmp;
+ int ret;
+
+ ret = fake_cb_bridge_read((where & ~0x3), 4, &data);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ data = preprocess_write(data, val, where, size);
+
+ reg = where >> 2;
+ switch (reg) {
+ case (PCI_COMMAND >> 2):
+ fake_cb_bridge_regs.pci_command = (data & 0xffff);
+ break;
+
+ case (PCI_CB_PRIMARY_BUS >> 2):
+ fake_cb_bridge_regs.cb_latency = (data >> 24) & 0xff;
+ fake_cb_bridge_regs.subordinate_busn = (data >> 16) & 0xff;
+ fake_cb_bridge_regs.cardbus_busn = (data >> 8) & 0xff;
+ fake_cb_bridge_regs.pci_busn = data & 0xff;
+ if (fake_cb_bridge_regs.cardbus_busn)
+ fake_cb_bridge_regs.bus_assigned = 1;
+ break;
+
+ case (PCI_INTERRUPT_LINE >> 2):
+ tmp = (data >> 16) & 0xffff;
+ /* disable memory prefetch support */
+ tmp &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
+ tmp &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1;
+ fake_cb_bridge_regs.bridge_control = tmp;
+ break;
+
+ case (PCI_CB_MEMORY_BASE_0 >> 2):
+ fake_cb_bridge_regs.mem_base0 = data;
+ break;
+
+ case (PCI_CB_MEMORY_LIMIT_0 >> 2):
+ fake_cb_bridge_regs.mem_limit0 = data;
+ break;
+
+ case (PCI_CB_MEMORY_BASE_1 >> 2):
+ fake_cb_bridge_regs.mem_base1 = data;
+ break;
+
+ case (PCI_CB_MEMORY_LIMIT_1 >> 2):
+ fake_cb_bridge_regs.mem_limit1 = data;
+ break;
+
+ case (PCI_CB_IO_BASE_0 >> 2):
+ fake_cb_bridge_regs.io_base0 = data;
+ break;
+
+ case (PCI_CB_IO_LIMIT_0 >> 2):
+ fake_cb_bridge_regs.io_limit0 = data;
+ break;
+
+ case (PCI_CB_IO_BASE_1 >> 2):
+ fake_cb_bridge_regs.io_base1 = data;
+ break;
+
+ case (PCI_CB_IO_LIMIT_1 >> 2):
+ fake_cb_bridge_regs.io_limit1 = data;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int bcm63xx_cb_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ /* snoop access to slot 0x1e on root bus, we fake a cardbus
+ * bridge at this location */
+ if (!bus->parent && PCI_SLOT(devfn) == FAKE_CB_BRIDGE_SLOT) {
+ fake_cb_bridge_bus_number = bus->number;
+ return fake_cb_bridge_read(where, size, val);
+ }
+
+ /* a configuration cycle for the device behind the cardbus
+ * bridge is actually done as a type 0 cycle on the primary
+ * bus. This means that only one device can be on the cardbus
+ * bus */
+ if (fake_cb_bridge_regs.bus_assigned &&
+ bus->number == fake_cb_bridge_regs.cardbus_busn &&
+ PCI_SLOT(devfn) == 0)
+ return bcm63xx_do_cfg_read(0, 0,
+ PCI_DEVFN(CARDBUS_PCI_IDSEL, 0),
+ where, size, val);
+
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+static int bcm63xx_cb_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ if (!bus->parent && PCI_SLOT(devfn) == FAKE_CB_BRIDGE_SLOT) {
+ fake_cb_bridge_bus_number = bus->number;
+ return fake_cb_bridge_write(where, size, val);
+ }
+
+ if (fake_cb_bridge_regs.bus_assigned &&
+ bus->number == fake_cb_bridge_regs.cardbus_busn &&
+ PCI_SLOT(devfn) == 0)
+ return bcm63xx_do_cfg_write(0, 0,
+ PCI_DEVFN(CARDBUS_PCI_IDSEL, 0),
+ where, size, val);
+
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+struct pci_ops bcm63xx_cb_ops = {
+ .read = bcm63xx_cb_read,
+ .write = bcm63xx_cb_write,
+};
+
+/*
+ * only one IO window, so it cannot be shared by PCI and cardbus, use
+ * fixup to choose and detect unhandled configuration
+ */
+static void bcm63xx_fixup(struct pci_dev *dev)
+{
+ static int io_window = -1;
+ int i, found, new_io_window;
+ u32 val;
+
+ /* look for any io resource */
+ found = 0;
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return;
+
+ /* skip our fake bus with only cardbus bridge on it */
+ if (dev->bus->number == fake_cb_bridge_bus_number)
+ return;
+
+ /* find on which bus the device is */
+ if (fake_cb_bridge_regs.bus_assigned &&
+ dev->bus->number == fake_cb_bridge_regs.cardbus_busn &&
+ PCI_SLOT(dev->devfn) == 0)
+ new_io_window = 1;
+ else
+ new_io_window = 0;
+
+ if (new_io_window == io_window)
+ return;
+
+ if (io_window != -1) {
+ printk(KERN_ERR "bcm63xx: both PCI and cardbus devices "
+ "need IO, which hardware cannot do\n");
+ return;
+ }
+
+ printk(KERN_INFO "bcm63xx: PCI IO window assigned to %s\n",
+ (new_io_window == 0) ? "PCI" : "cardbus");
+
+ val = bcm_mpi_readl(MPI_L2PIOREMAP_REG);
+ if (io_window)
+ val |= MPI_L2PREMAP_IS_CARDBUS_MASK;
+ else
+ val &= ~MPI_L2PREMAP_IS_CARDBUS_MASK;
+ bcm_mpi_writel(val, MPI_L2PIOREMAP_REG);
+
+ io_window = new_io_window;
+}
+
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, bcm63xx_fixup);
+#endif
+
+static int bcm63xx_pcie_can_access(struct pci_bus *bus, int devfn)
+{
+ switch (bus->number) {
+ case PCIE_BUS_BRIDGE:
+ return PCI_SLOT(devfn) == 0;
+ case PCIE_BUS_DEVICE:
+ if (PCI_SLOT(devfn) == 0)
+ return bcm_pcie_readl(PCIE_DLSTATUS_REG)
+ & DLSTATUS_PHYLINKUP;
+ fallthrough;
+ default:
+ return false;
+ }
+}
+
+static int bcm63xx_pcie_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ u32 data;
+ u32 reg = where & ~3;
+
+ if (!bcm63xx_pcie_can_access(bus, devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (bus->number == PCIE_BUS_DEVICE)
+ reg += PCIE_DEVICE_OFFSET;
+
+ data = bcm_pcie_readl(reg);
+
+ *val = postprocess_read(data, where, size);
+
+ return PCIBIOS_SUCCESSFUL;
+
+}
+
+static int bcm63xx_pcie_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ u32 data;
+ u32 reg = where & ~3;
+
+ if (!bcm63xx_pcie_can_access(bus, devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (bus->number == PCIE_BUS_DEVICE)
+ reg += PCIE_DEVICE_OFFSET;
+
+
+ data = bcm_pcie_readl(reg);
+
+ data = preprocess_write(data, val, where, size);
+ bcm_pcie_writel(data, reg);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+
+struct pci_ops bcm63xx_pcie_ops = {
+ .read = bcm63xx_pcie_read,
+ .write = bcm63xx_pcie_write
+};
diff --git a/arch/mips/pci/ops-bonito64.c b/arch/mips/pci/ops-bonito64.c
new file mode 100644
index 000000000..4d5fe614f
--- /dev/null
+++ b/arch/mips/pci/ops-bonito64.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 1999, 2000, 2004 MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ * Maciej W. Rozycki <macro@mips.com>
+ *
+ * MIPS boards specific PCI support.
+ */
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+
+#include <asm/mips-boards/bonito64.h>
+
+#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_WRITE 1
+
+#define CFG_SPACE_REG(offset) (void *)CKSEG1ADDR(_pcictrl_bonito_pcicfg + (offset))
+#define ID_SEL_BEGIN 10
+#define MAX_DEV_NUM (31 - ID_SEL_BEGIN)
+
+
+static int bonito64_pcibios_config_access(unsigned char access_type,
+ struct pci_bus *bus,
+ unsigned int devfn, int where,
+ u32 * data)
+{
+ u32 busnum = bus->number;
+ u32 addr, type;
+ u32 dummy;
+ void *addrp;
+ int device = PCI_SLOT(devfn);
+ int function = PCI_FUNC(devfn);
+ int reg = where & ~3;
+
+ if (busnum == 0) {
+ /* Type 0 configuration for onboard PCI bus */
+ if (device > MAX_DEV_NUM)
+ return -1;
+
+ addr = (1 << (device + ID_SEL_BEGIN)) | (function << 8) | reg;
+ type = 0;
+ } else {
+ /* Type 1 configuration for offboard PCI bus */
+ addr = (busnum << 16) | (device << 11) | (function << 8) | reg;
+ type = 0x10000;
+ }
+
+ /* Clear aborts */
+ BONITO_PCICMD |= BONITO_PCICMD_MABORT_CLR | BONITO_PCICMD_MTABORT_CLR;
+
+ BONITO_PCIMAP_CFG = (addr >> 16) | type;
+
+ /* Flush Bonito register block */
+ dummy = BONITO_PCIMAP_CFG;
+ mmiowb();
+
+ addrp = CFG_SPACE_REG(addr & 0xffff);
+ if (access_type == PCI_ACCESS_WRITE) {
+ writel(cpu_to_le32(*data), addrp);
+ /* Wait till done */
+ while (BONITO_PCIMSTAT & 0xF);
+ } else {
+ *data = le32_to_cpu(readl(addrp));
+ }
+
+ /* Detect Master/Target abort */
+ if (BONITO_PCICMD & (BONITO_PCICMD_MABORT_CLR |
+ BONITO_PCICMD_MTABORT_CLR)) {
+ /* Error occurred */
+
+ /* Clear bits */
+ BONITO_PCICMD |= (BONITO_PCICMD_MABORT_CLR |
+ BONITO_PCICMD_MTABORT_CLR);
+
+ return -1;
+ }
+
+ return 0;
+
+}
+
+
+/*
+ * We can't address 8 and 16 bit words directly. Instead we have to
+ * read/write a 32bit word and mask/modify the data we actually want.
+ */
+static int bonito64_pcibios_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 * val)
+{
+ u32 data = 0;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (bonito64_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
+ &data))
+ return -1;
+
+ if (size == 1)
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ else if (size == 2)
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ else
+ *val = data;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int bonito64_pcibios_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ u32 data = 0;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (size == 4)
+ data = val;
+ else {
+ if (bonito64_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
+ where, &data))
+ return -1;
+
+ if (size == 1)
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else if (size == 2)
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ }
+
+ if (bonito64_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn, where,
+ &data))
+ return -1;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops bonito64_pci_ops = {
+ .read = bonito64_pcibios_read,
+ .write = bonito64_pcibios_write
+};
diff --git a/arch/mips/pci/ops-gt64xxx_pci0.c b/arch/mips/pci/ops-gt64xxx_pci0.c
new file mode 100644
index 000000000..501dcdf5a
--- /dev/null
+++ b/arch/mips/pci/ops-gt64xxx_pci0.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 1999, 2000, 2004 MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ * Maciej W. Rozycki <macro@mips.com>
+ */
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+
+#include <asm/gt64120.h>
+
+#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_WRITE 1
+
+/*
+ * PCI configuration cycle AD bus definition
+ */
+/* Type 0 */
+#define PCI_CFG_TYPE0_REG_SHF 0
+#define PCI_CFG_TYPE0_FUNC_SHF 8
+
+/* Type 1 */
+#define PCI_CFG_TYPE1_REG_SHF 0
+#define PCI_CFG_TYPE1_FUNC_SHF 8
+#define PCI_CFG_TYPE1_DEV_SHF 11
+#define PCI_CFG_TYPE1_BUS_SHF 16
+
+static int gt64xxx_pci0_pcibios_config_access(unsigned char access_type,
+ struct pci_bus *bus, unsigned int devfn, int where, u32 * data)
+{
+ unsigned char busnum = bus->number;
+ u32 intr;
+
+ if ((busnum == 0) && (devfn >= PCI_DEVFN(31, 0)))
+ return -1; /* Because of a bug in the galileo (for slot 31). */
+
+ /* Clear cause register bits */
+ GT_WRITE(GT_INTRCAUSE_OFS, ~(GT_INTRCAUSE_MASABORT0_BIT |
+ GT_INTRCAUSE_TARABORT0_BIT));
+
+ /* Setup address */
+ GT_WRITE(GT_PCI0_CFGADDR_OFS,
+ (busnum << GT_PCI0_CFGADDR_BUSNUM_SHF) |
+ (devfn << GT_PCI0_CFGADDR_FUNCTNUM_SHF) |
+ ((where / 4) << GT_PCI0_CFGADDR_REGNUM_SHF) |
+ GT_PCI0_CFGADDR_CONFIGEN_BIT);
+
+ if (access_type == PCI_ACCESS_WRITE) {
+ if (busnum == 0 && PCI_SLOT(devfn) == 0) {
+ /*
+ * The Galileo system controller is acting
+ * differently than other devices.
+ */
+ GT_WRITE(GT_PCI0_CFGDATA_OFS, *data);
+ } else
+ __GT_WRITE(GT_PCI0_CFGDATA_OFS, *data);
+ } else {
+ if (busnum == 0 && PCI_SLOT(devfn) == 0) {
+ /*
+ * The Galileo system controller is acting
+ * differently than other devices.
+ */
+ *data = GT_READ(GT_PCI0_CFGDATA_OFS);
+ } else
+ *data = __GT_READ(GT_PCI0_CFGDATA_OFS);
+ }
+
+ /* Check for master or target abort */
+ intr = GT_READ(GT_INTRCAUSE_OFS);
+
+ if (intr & (GT_INTRCAUSE_MASABORT0_BIT | GT_INTRCAUSE_TARABORT0_BIT)) {
+ /* Error occurred */
+
+ /* Clear bits */
+ GT_WRITE(GT_INTRCAUSE_OFS, ~(GT_INTRCAUSE_MASABORT0_BIT |
+ GT_INTRCAUSE_TARABORT0_BIT));
+
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/*
+ * We can't address 8 and 16 bit words directly. Instead we have to
+ * read/write a 32bit word and mask/modify the data we actually want.
+ */
+static int gt64xxx_pci0_pcibios_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 * val)
+{
+ u32 data = 0;
+
+ if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
+ where, &data))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (size == 1)
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ else if (size == 2)
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ else
+ *val = data;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int gt64xxx_pci0_pcibios_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ u32 data = 0;
+
+ if (size == 4)
+ data = val;
+ else {
+ if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_READ, bus,
+ devfn, where, &data))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (size == 1)
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else if (size == 2)
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ }
+
+ if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn,
+ where, &data))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops gt64xxx_pci0_ops = {
+ .read = gt64xxx_pci0_pcibios_read,
+ .write = gt64xxx_pci0_pcibios_write
+};
diff --git a/arch/mips/pci/ops-lantiq.c b/arch/mips/pci/ops-lantiq.c
new file mode 100644
index 000000000..7d7135539
--- /dev/null
+++ b/arch/mips/pci/ops-lantiq.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <asm/addrspace.h>
+#include <linux/vmalloc.h>
+
+#include <lantiq_soc.h>
+
+#include "pci-lantiq.h"
+
+#define LTQ_PCI_CFG_BUSNUM_SHF 16
+#define LTQ_PCI_CFG_DEVNUM_SHF 11
+#define LTQ_PCI_CFG_FUNNUM_SHF 8
+
+#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_WRITE 1
+
+static int ltq_pci_config_access(unsigned char access_type, struct pci_bus *bus,
+ unsigned int devfn, unsigned int where, u32 *data)
+{
+ unsigned long cfg_base;
+ unsigned long flags;
+ u32 temp;
+
+ /* we support slot from 0 to 15 dev_fn & 0x68 (AD29) is the
+ SoC itself */
+ if ((bus->number != 0) || ((devfn & 0xf8) > 0x78)
+ || ((devfn & 0xf8) == 0) || ((devfn & 0xf8) == 0x68))
+ return 1;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+
+ cfg_base = (unsigned long) ltq_pci_mapped_cfg;
+ cfg_base |= (bus->number << LTQ_PCI_CFG_BUSNUM_SHF) | (devfn <<
+ LTQ_PCI_CFG_FUNNUM_SHF) | (where & ~0x3);
+
+ /* Perform access */
+ if (access_type == PCI_ACCESS_WRITE) {
+ ltq_w32(swab32(*data), ((u32 *)cfg_base));
+ } else {
+ *data = ltq_r32(((u32 *)(cfg_base)));
+ *data = swab32(*data);
+ }
+ wmb();
+
+ /* clean possible Master abort */
+ cfg_base = (unsigned long) ltq_pci_mapped_cfg;
+ cfg_base |= (0x0 << LTQ_PCI_CFG_FUNNUM_SHF) + 4;
+ temp = ltq_r32(((u32 *)(cfg_base)));
+ temp = swab32(temp);
+ cfg_base = (unsigned long) ltq_pci_mapped_cfg;
+ cfg_base |= (0x68 << LTQ_PCI_CFG_FUNNUM_SHF) + 4;
+ ltq_w32(temp, ((u32 *)cfg_base));
+
+ spin_unlock_irqrestore(&ebu_lock, flags);
+
+ if (((*data) == 0xffffffff) && (access_type == PCI_ACCESS_READ))
+ return 1;
+
+ return 0;
+}
+
+int ltq_pci_read_config_dword(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ u32 data = 0;
+
+ if (ltq_pci_config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (size == 1)
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ else if (size == 2)
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ else
+ *val = data;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+int ltq_pci_write_config_dword(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ u32 data = 0;
+
+ if (size == 4) {
+ data = val;
+ } else {
+ if (ltq_pci_config_access(PCI_ACCESS_READ, bus,
+ devfn, where, &data))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (size == 1)
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else if (size == 2)
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ }
+
+ if (ltq_pci_config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+}
diff --git a/arch/mips/pci/ops-loongson2.c b/arch/mips/pci/ops-loongson2.c
new file mode 100644
index 000000000..0d1b36ba1
--- /dev/null
+++ b/arch/mips/pci/ops-loongson2.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 1999, 2000, 2004 MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ * Maciej W. Rozycki <macro@mips.com>
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+
+#include <loongson.h>
+
+#ifdef CONFIG_CS5536
+#include <cs5536/cs5536_pci.h>
+#include <cs5536/cs5536.h>
+#endif
+
+#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_WRITE 1
+
+#define CFG_SPACE_REG(offset) \
+ (void *)CKSEG1ADDR(LOONGSON_PCICFG_BASE | (offset))
+#define ID_SEL_BEGIN 11
+#define MAX_DEV_NUM (31 - ID_SEL_BEGIN)
+
+
+static int loongson_pcibios_config_access(unsigned char access_type,
+ struct pci_bus *bus,
+ unsigned int devfn, int where,
+ u32 *data)
+{
+ u32 busnum = bus->number;
+ u32 addr, type;
+ u32 dummy;
+ void *addrp;
+ int device = PCI_SLOT(devfn);
+ int function = PCI_FUNC(devfn);
+ int reg = where & ~3;
+
+ if (busnum == 0) {
+ /* board-specific part,currently,only fuloong2f,yeeloong2f
+ * use CS5536, fuloong2e use via686b, gdium has no
+ * south bridge
+ */
+#ifdef CONFIG_CS5536
+ /* cs5536_pci_conf_read4/write4() will call _rdmsr/_wrmsr() to
+ * access the regsters PCI_MSR_ADDR, PCI_MSR_DATA_LO,
+ * PCI_MSR_DATA_HI, which is bigger than PCI_MSR_CTRL, so, it
+ * will not go this branch, but the others. so, no calling dead
+ * loop here.
+ */
+ if ((PCI_IDSEL_CS5536 == device) && (reg < PCI_MSR_CTRL)) {
+ switch (access_type) {
+ case PCI_ACCESS_READ:
+ *data = cs5536_pci_conf_read4(function, reg);
+ break;
+ case PCI_ACCESS_WRITE:
+ cs5536_pci_conf_write4(function, reg, *data);
+ break;
+ }
+ return 0;
+ }
+#endif
+ /* Type 0 configuration for onboard PCI bus */
+ if (device > MAX_DEV_NUM)
+ return -1;
+
+ addr = (1 << (device + ID_SEL_BEGIN)) | (function << 8) | reg;
+ type = 0;
+ } else {
+ /* Type 1 configuration for offboard PCI bus */
+ addr = (busnum << 16) | (device << 11) | (function << 8) | reg;
+ type = 0x10000;
+ }
+
+ /* Clear aborts */
+ LOONGSON_PCICMD |= LOONGSON_PCICMD_MABORT_CLR | \
+ LOONGSON_PCICMD_MTABORT_CLR;
+
+ LOONGSON_PCIMAP_CFG = (addr >> 16) | type;
+
+ /* Flush Bonito register block */
+ dummy = LOONGSON_PCIMAP_CFG;
+ mmiowb();
+
+ addrp = CFG_SPACE_REG(addr & 0xffff);
+ if (access_type == PCI_ACCESS_WRITE)
+ writel(cpu_to_le32(*data), addrp);
+ else
+ *data = le32_to_cpu(readl(addrp));
+
+ /* Detect Master/Target abort */
+ if (LOONGSON_PCICMD & (LOONGSON_PCICMD_MABORT_CLR |
+ LOONGSON_PCICMD_MTABORT_CLR)) {
+ /* Error occurred */
+
+ /* Clear bits */
+ LOONGSON_PCICMD |= (LOONGSON_PCICMD_MABORT_CLR |
+ LOONGSON_PCICMD_MTABORT_CLR);
+
+ return -1;
+ }
+
+ return 0;
+
+}
+
+
+/*
+ * We can't address 8 and 16 bit words directly. Instead we have to
+ * read/write a 32bit word and mask/modify the data we actually want.
+ */
+static int loongson_pcibios_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ u32 data = 0;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (loongson_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
+ &data))
+ return -1;
+
+ if (size == 1)
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ else if (size == 2)
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ else
+ *val = data;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int loongson_pcibios_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ u32 data = 0;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (size == 4)
+ data = val;
+ else {
+ if (loongson_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
+ where, &data))
+ return -1;
+
+ if (size == 1)
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else if (size == 2)
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ }
+
+ if (loongson_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn, where,
+ &data))
+ return -1;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops loongson_pci_ops = {
+ .read = loongson_pcibios_read,
+ .write = loongson_pcibios_write
+};
+
+#ifdef CONFIG_CS5536
+DEFINE_RAW_SPINLOCK(msr_lock);
+
+void _rdmsr(u32 msr, u32 *hi, u32 *lo)
+{
+ struct pci_bus bus = {
+ .number = PCI_BUS_CS5536
+ };
+ u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&msr_lock, flags);
+ loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr);
+ loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_LO, 4, lo);
+ loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_HI, 4, hi);
+ raw_spin_unlock_irqrestore(&msr_lock, flags);
+}
+EXPORT_SYMBOL(_rdmsr);
+
+void _wrmsr(u32 msr, u32 hi, u32 lo)
+{
+ struct pci_bus bus = {
+ .number = PCI_BUS_CS5536
+ };
+ u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&msr_lock, flags);
+ loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr);
+ loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_LO, 4, lo);
+ loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_HI, 4, hi);
+ raw_spin_unlock_irqrestore(&msr_lock, flags);
+}
+EXPORT_SYMBOL(_wrmsr);
+#endif
diff --git a/arch/mips/pci/ops-mace.c b/arch/mips/pci/ops-mace.c
new file mode 100644
index 000000000..951d8070f
--- /dev/null
+++ b/arch/mips/pci/ops-mace.c
@@ -0,0 +1,100 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000, 2001 Keith M Wesolowski
+ */
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <asm/ip32/mace.h>
+
+#if 0
+# define DPRINTK(args...) printk(args);
+#else
+# define DPRINTK(args...)
+#endif
+
+/*
+ * O2 has up to 5 PCI devices connected into the MACE bridge. The device
+ * map looks like this:
+ *
+ * 0 aic7xxx 0
+ * 1 aic7xxx 1
+ * 2 expansion slot
+ * 3 N/C
+ * 4 N/C
+ */
+
+static inline int mkaddr(struct pci_bus *bus, unsigned int devfn,
+ unsigned int reg)
+{
+ return ((bus->number & 0xff) << 16) |
+ ((devfn & 0xff) << 8) |
+ (reg & 0xfc);
+}
+
+
+static int
+mace_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 *val)
+{
+ u32 control = mace->pci.control;
+
+ /* disable master aborts interrupts during config read */
+ mace->pci.control = control & ~MACEPCI_CONTROL_MAR_INT;
+ mace->pci.config_addr = mkaddr(bus, devfn, reg);
+ switch (size) {
+ case 1:
+ *val = mace->pci.config_data.b[(reg & 3) ^ 3];
+ break;
+ case 2:
+ *val = mace->pci.config_data.w[((reg >> 1) & 1) ^ 1];
+ break;
+ case 4:
+ *val = mace->pci.config_data.l;
+ break;
+ }
+ /* ack possible master abort */
+ mace->pci.error &= ~MACEPCI_ERROR_MASTER_ABORT;
+ mace->pci.control = control;
+ /*
+ * someone forgot to set the ultra bit for the onboard
+ * scsi chips; we fake it here
+ */
+ if (bus->number == 0 && reg == 0x40 && size == 4 &&
+ (devfn == (1 << 3) || devfn == (2 << 3)))
+ *val |= 0x1000;
+
+ DPRINTK("read%d: reg=%08x,val=%02x\n", size * 8, reg, *val);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+mace_pci_write_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 val)
+{
+ mace->pci.config_addr = mkaddr(bus, devfn, reg);
+ switch (size) {
+ case 1:
+ mace->pci.config_data.b[(reg & 3) ^ 3] = val;
+ break;
+ case 2:
+ mace->pci.config_data.w[((reg >> 1) & 1) ^ 1] = val;
+ break;
+ case 4:
+ mace->pci.config_data.l = val;
+ break;
+ }
+
+ DPRINTK("write%d: reg=%08x,val=%02x\n", size * 8, reg, val);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops mace_pci_ops = {
+ .read = mace_pci_read_config,
+ .write = mace_pci_write_config,
+};
diff --git a/arch/mips/pci/ops-msc.c b/arch/mips/pci/ops-msc.c
new file mode 100644
index 000000000..1f438baaf
--- /dev/null
+++ b/arch/mips/pci/ops-msc.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ * Maciej W. Rozycki <macro@mips.com>
+ * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * MIPS boards specific PCI support.
+ */
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+
+#include <asm/mips-boards/msc01_pci.h>
+
+#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_WRITE 1
+
+/*
+ * PCI configuration cycle AD bus definition
+ */
+/* Type 0 */
+#define PCI_CFG_TYPE0_REG_SHF 0
+#define PCI_CFG_TYPE0_FUNC_SHF 8
+
+/* Type 1 */
+#define PCI_CFG_TYPE1_REG_SHF 0
+#define PCI_CFG_TYPE1_FUNC_SHF 8
+#define PCI_CFG_TYPE1_DEV_SHF 11
+#define PCI_CFG_TYPE1_BUS_SHF 16
+
+static int msc_pcibios_config_access(unsigned char access_type,
+ struct pci_bus *bus, unsigned int devfn, int where, u32 * data)
+{
+ unsigned char busnum = bus->number;
+ u32 intr;
+
+ /* Clear status register bits. */
+ MSC_WRITE(MSC01_PCI_INTSTAT,
+ (MSC01_PCI_INTCFG_MA_BIT | MSC01_PCI_INTCFG_TA_BIT));
+
+ MSC_WRITE(MSC01_PCI_CFGADDR,
+ ((busnum << MSC01_PCI_CFGADDR_BNUM_SHF) |
+ (PCI_SLOT(devfn) << MSC01_PCI_CFGADDR_DNUM_SHF) |
+ (PCI_FUNC(devfn) << MSC01_PCI_CFGADDR_FNUM_SHF) |
+ ((where / 4) << MSC01_PCI_CFGADDR_RNUM_SHF)));
+
+ /* Perform access */
+ if (access_type == PCI_ACCESS_WRITE)
+ MSC_WRITE(MSC01_PCI_CFGDATA, *data);
+ else
+ MSC_READ(MSC01_PCI_CFGDATA, *data);
+
+ /* Detect Master/Target abort */
+ MSC_READ(MSC01_PCI_INTSTAT, intr);
+ if (intr & (MSC01_PCI_INTCFG_MA_BIT | MSC01_PCI_INTCFG_TA_BIT)) {
+ /* Error occurred */
+
+ /* Clear bits */
+ MSC_WRITE(MSC01_PCI_INTSTAT,
+ (MSC01_PCI_INTCFG_MA_BIT | MSC01_PCI_INTCFG_TA_BIT));
+
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/*
+ * We can't address 8 and 16 bit words directly. Instead we have to
+ * read/write a 32bit word and mask/modify the data we actually want.
+ */
+static int msc_pcibios_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 * val)
+{
+ u32 data = 0;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (msc_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
+ &data))
+ return -1;
+
+ if (size == 1)
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ else if (size == 2)
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ else
+ *val = data;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int msc_pcibios_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ u32 data = 0;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (size == 4)
+ data = val;
+ else {
+ if (msc_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
+ where, &data))
+ return -1;
+
+ if (size == 1)
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else if (size == 2)
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ }
+
+ if (msc_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn, where,
+ &data))
+ return -1;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops msc_pci_ops = {
+ .read = msc_pcibios_read,
+ .write = msc_pcibios_write
+};
diff --git a/arch/mips/pci/ops-rc32434.c b/arch/mips/pci/ops-rc32434.c
new file mode 100644
index 000000000..874ed6df9
--- /dev/null
+++ b/arch/mips/pci/ops-rc32434.c
@@ -0,0 +1,206 @@
+/*
+ * BRIEF MODULE DESCRIPTION
+ * pci_ops for IDT EB434 board
+ *
+ * Copyright 2004 IDT Inc. (rischelp@idt.com)
+ * Copyright 2006 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include <asm/cpu.h>
+#include <asm/mach-rc32434/rc32434.h>
+#include <asm/mach-rc32434/pci.h>
+
+#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_WRITE 1
+
+
+#define PCI_CFG_SET(bus, slot, func, off) \
+ (rc32434_pci->pcicfga = (0x80000000 | \
+ ((bus) << 16) | ((slot)<<11) | \
+ ((func)<<8) | (off)))
+
+static inline int config_access(unsigned char access_type,
+ struct pci_bus *bus, unsigned int devfn,
+ unsigned char where, u32 *data)
+{
+ unsigned int slot = PCI_SLOT(devfn);
+ u8 func = PCI_FUNC(devfn);
+
+ /* Setup address */
+ PCI_CFG_SET(bus->number, slot, func, where);
+ rc32434_sync();
+
+ if (access_type == PCI_ACCESS_WRITE)
+ rc32434_pci->pcicfgd = *data;
+ else
+ *data = rc32434_pci->pcicfgd;
+
+ rc32434_sync();
+
+ return 0;
+}
+
+
+/*
+ * We can't address 8 and 16 bit words directly. Instead we have to
+ * read/write a 32bit word and mask/modify the data we actually want.
+ */
+static int read_config_byte(struct pci_bus *bus, unsigned int devfn,
+ int where, u8 *val)
+{
+ u32 data;
+ int ret;
+
+ ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data);
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ return ret;
+}
+
+static int read_config_word(struct pci_bus *bus, unsigned int devfn,
+ int where, u16 *val)
+{
+ u32 data;
+ int ret;
+
+ ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data);
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ return ret;
+}
+
+static int read_config_dword(struct pci_bus *bus, unsigned int devfn,
+ int where, u32 *val)
+{
+ int ret;
+ int delay = 1;
+
+ /*
+ * Don't scan too far, else there will be errors with plugged in
+ * daughterboard (rb564).
+ */
+ if (bus->number == 0 && PCI_SLOT(devfn) > 21)
+ return 0;
+
+retry:
+ ret = config_access(PCI_ACCESS_READ, bus, devfn, where, val);
+
+ /*
+ * Certain devices react delayed at device scan time, this
+ * gives them time to settle
+ */
+ if (where == PCI_VENDOR_ID) {
+ if (ret == 0xffffffff || ret == 0x00000000 ||
+ ret == 0x0000ffff || ret == 0xffff0000) {
+ if (delay > 4)
+ return 0;
+ delay *= 2;
+ msleep(delay);
+ goto retry;
+ }
+ }
+
+ return ret;
+}
+
+static int
+write_config_byte(struct pci_bus *bus, unsigned int devfn, int where,
+ u8 val)
+{
+ u32 data = 0;
+
+ if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
+ return -1;
+
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+
+ if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
+ return -1;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+
+static int
+write_config_word(struct pci_bus *bus, unsigned int devfn, int where,
+ u16 val)
+{
+ u32 data = 0;
+
+ if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
+ return -1;
+
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+
+ if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
+ return -1;
+
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+
+static int
+write_config_dword(struct pci_bus *bus, unsigned int devfn, int where,
+ u32 val)
+{
+ if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &val))
+ return -1;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ switch (size) {
+ case 1:
+ return read_config_byte(bus, devfn, where, (u8 *) val);
+ case 2:
+ return read_config_word(bus, devfn, where, (u16 *) val);
+ default:
+ return read_config_dword(bus, devfn, where, val);
+ }
+}
+
+static int pci_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ switch (size) {
+ case 1:
+ return write_config_byte(bus, devfn, where, (u8) val);
+ case 2:
+ return write_config_word(bus, devfn, where, (u16) val);
+ default:
+ return write_config_dword(bus, devfn, where, val);
+ }
+}
+
+struct pci_ops rc32434_pci_ops = {
+ .read = pci_config_read,
+ .write = pci_config_write,
+};
diff --git a/arch/mips/pci/ops-sni.c b/arch/mips/pci/ops-sni.c
new file mode 100644
index 000000000..35daa7fe6
--- /dev/null
+++ b/arch/mips/pci/ops-sni.c
@@ -0,0 +1,164 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SNI specific PCI support for RM200/RM300.
+ *
+ * Copyright (C) 1997 - 2000, 2003 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <asm/sni.h>
+
+/*
+ * It seems that on the RM200 only lower 3 bits of the 5 bit PCI device
+ * address are decoded. We therefore manually have to reject attempts at
+ * reading outside this range. Being on the paranoid side we only do this
+ * test for bus 0 and hope forwarding and decoding work properly for any
+ * subordinated busses.
+ *
+ * ASIC PCI only supports type 1 config cycles.
+ */
+static int set_config_address(unsigned int busno, unsigned int devfn, int reg)
+{
+ if ((devfn > 255) || (reg > 255))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (busno == 0 && devfn >= PCI_DEVFN(8, 0))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ *(volatile u32 *)PCIMT_CONFIG_ADDRESS =
+ ((busno & 0xff) << 16) |
+ ((devfn & 0xff) << 8) |
+ (reg & 0xfc);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pcimt_read(struct pci_bus *bus, unsigned int devfn, int reg,
+ int size, u32 * val)
+{
+ int res;
+
+ if ((res = set_config_address(bus->number, devfn, reg)))
+ return res;
+
+ switch (size) {
+ case 1:
+ *val = inb(PCIMT_CONFIG_DATA + (reg & 3));
+ break;
+ case 2:
+ *val = inw(PCIMT_CONFIG_DATA + (reg & 2));
+ break;
+ case 4:
+ *val = inl(PCIMT_CONFIG_DATA);
+ break;
+ }
+
+ return 0;
+}
+
+static int pcimt_write(struct pci_bus *bus, unsigned int devfn, int reg,
+ int size, u32 val)
+{
+ int res;
+
+ if ((res = set_config_address(bus->number, devfn, reg)))
+ return res;
+
+ switch (size) {
+ case 1:
+ outb(val, PCIMT_CONFIG_DATA + (reg & 3));
+ break;
+ case 2:
+ outw(val, PCIMT_CONFIG_DATA + (reg & 2));
+ break;
+ case 4:
+ outl(val, PCIMT_CONFIG_DATA);
+ break;
+ }
+
+ return 0;
+}
+
+struct pci_ops sni_pcimt_ops = {
+ .read = pcimt_read,
+ .write = pcimt_write,
+};
+
+static int pcit_set_config_address(unsigned int busno, unsigned int devfn, int reg)
+{
+ if ((devfn > 255) || (reg > 255) || (busno > 255))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ outl((1 << 31) | ((busno & 0xff) << 16) | ((devfn & 0xff) << 8) | (reg & 0xfc), 0xcf8);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pcit_read(struct pci_bus *bus, unsigned int devfn, int reg,
+ int size, u32 * val)
+{
+ int res;
+
+ /*
+ * on bus 0 we need to check, whether there is a device answering
+ * for the devfn by doing a config write and checking the result. If
+ * we don't do it, we will get a data bus error
+ */
+ if (bus->number == 0) {
+ pcit_set_config_address(0, 0, 0x68);
+ outl(inl(0xcfc) | 0xc0000000, 0xcfc);
+ if ((res = pcit_set_config_address(0, devfn, 0)))
+ return res;
+ outl(0xffffffff, 0xcfc);
+ pcit_set_config_address(0, 0, 0x68);
+ if (inl(0xcfc) & 0x100000)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ if ((res = pcit_set_config_address(bus->number, devfn, reg)))
+ return res;
+
+ switch (size) {
+ case 1:
+ *val = inb(PCIMT_CONFIG_DATA + (reg & 3));
+ break;
+ case 2:
+ *val = inw(PCIMT_CONFIG_DATA + (reg & 2));
+ break;
+ case 4:
+ *val = inl(PCIMT_CONFIG_DATA);
+ break;
+ }
+ return 0;
+}
+
+static int pcit_write(struct pci_bus *bus, unsigned int devfn, int reg,
+ int size, u32 val)
+{
+ int res;
+
+ if ((res = pcit_set_config_address(bus->number, devfn, reg)))
+ return res;
+
+ switch (size) {
+ case 1:
+ outb(val, PCIMT_CONFIG_DATA + (reg & 3));
+ break;
+ case 2:
+ outw(val, PCIMT_CONFIG_DATA + (reg & 2));
+ break;
+ case 4:
+ outl(val, PCIMT_CONFIG_DATA);
+ break;
+ }
+
+ return 0;
+}
+
+
+struct pci_ops sni_pcit_ops = {
+ .read = pcit_read,
+ .write = pcit_write,
+};
diff --git a/arch/mips/pci/ops-tx3927.c b/arch/mips/pci/ops-tx3927.c
new file mode 100644
index 000000000..d35dc9c9a
--- /dev/null
+++ b/arch/mips/pci/ops-tx3927.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ahennessy@mvista.com
+ *
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
+ *
+ * Based on arch/mips/ddb5xxx/ddb5477/pci_ops.c
+ *
+ * Define the pci_ops for TX3927.
+ *
+ * Much of the code is derived from the original DDB5074 port by
+ * Geert Uytterhoeven <geert@linux-m68k.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include <asm/addrspace.h>
+#include <asm/txx9irq.h>
+#include <asm/txx9/pci.h>
+#include <asm/txx9/tx3927.h>
+
+static int mkaddr(struct pci_bus *bus, unsigned char devfn, unsigned char where)
+{
+ if (bus->parent == NULL &&
+ devfn >= PCI_DEVFN(TX3927_PCIC_MAX_DEVNU, 0))
+ return -1;
+ tx3927_pcicptr->ica =
+ ((bus->number & 0xff) << 0x10) |
+ ((devfn & 0xff) << 0x08) |
+ (where & 0xfc) | (bus->parent ? 1 : 0);
+
+ /* clear M_ABORT and Disable M_ABORT Int. */
+ tx3927_pcicptr->pcistat |= PCI_STATUS_REC_MASTER_ABORT;
+ tx3927_pcicptr->pcistatim &= ~PCI_STATUS_REC_MASTER_ABORT;
+ return 0;
+}
+
+static inline int check_abort(void)
+{
+ if (tx3927_pcicptr->pcistat & PCI_STATUS_REC_MASTER_ABORT) {
+ tx3927_pcicptr->pcistat |= PCI_STATUS_REC_MASTER_ABORT;
+ tx3927_pcicptr->pcistatim |= PCI_STATUS_REC_MASTER_ABORT;
+ /* flush write buffer */
+ iob();
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int tx3927_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 * val)
+{
+ if (mkaddr(bus, devfn, where)) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ switch (size) {
+ case 1:
+ *val = *(volatile u8 *) ((unsigned long) & tx3927_pcicptr->icd | (where & 3));
+ break;
+
+ case 2:
+ *val = le16_to_cpu(*(volatile u16 *) ((unsigned long) & tx3927_pcicptr->icd | (where & 3)));
+ break;
+
+ case 4:
+ *val = le32_to_cpu(tx3927_pcicptr->icd);
+ break;
+ }
+
+ return check_abort();
+}
+
+static int tx3927_pci_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ if (mkaddr(bus, devfn, where))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ *(volatile u8 *) ((unsigned long) & tx3927_pcicptr->icd | (where & 3)) = val;
+ break;
+
+ case 2:
+ *(volatile u16 *) ((unsigned long) & tx3927_pcicptr->icd | (where & 2)) =
+ cpu_to_le16(val);
+ break;
+
+ case 4:
+ tx3927_pcicptr->icd = cpu_to_le32(val);
+ }
+
+ return check_abort();
+}
+
+static struct pci_ops tx3927_pci_ops = {
+ .read = tx3927_pci_read_config,
+ .write = tx3927_pci_write_config,
+};
+
+void __init tx3927_pcic_setup(struct pci_controller *channel,
+ unsigned long sdram_size, int extarb)
+{
+ unsigned long flags;
+ unsigned long io_base =
+ channel->io_resource->start + mips_io_port_base - IO_BASE;
+ unsigned long io_size =
+ channel->io_resource->end - channel->io_resource->start;
+ unsigned long io_pciaddr =
+ channel->io_resource->start - channel->io_offset;
+ unsigned long mem_base =
+ channel->mem_resource->start;
+ unsigned long mem_size =
+ channel->mem_resource->end - channel->mem_resource->start;
+ unsigned long mem_pciaddr =
+ channel->mem_resource->start - channel->mem_offset;
+
+ printk(KERN_INFO "TX3927 PCIC -- DID:%04x VID:%04x RID:%02x Arbiter:%s",
+ tx3927_pcicptr->did, tx3927_pcicptr->vid,
+ tx3927_pcicptr->rid,
+ extarb ? "External" : "Internal");
+ channel->pci_ops = &tx3927_pci_ops;
+
+ local_irq_save(flags);
+ /* Disable External PCI Config. Access */
+ tx3927_pcicptr->lbc = TX3927_PCIC_LBC_EPCAD;
+#ifdef __BIG_ENDIAN
+ tx3927_pcicptr->lbc |= TX3927_PCIC_LBC_IBSE |
+ TX3927_PCIC_LBC_TIBSE |
+ TX3927_PCIC_LBC_TMFBSE | TX3927_PCIC_LBC_MSDSE;
+#endif
+ /* LB->PCI mappings */
+ tx3927_pcicptr->iomas = ~(io_size - 1);
+ tx3927_pcicptr->ilbioma = io_base;
+ tx3927_pcicptr->ipbioma = io_pciaddr;
+ tx3927_pcicptr->mmas = ~(mem_size - 1);
+ tx3927_pcicptr->ilbmma = mem_base;
+ tx3927_pcicptr->ipbmma = mem_pciaddr;
+ /* PCI->LB mappings */
+ tx3927_pcicptr->iobas = 0xffffffff;
+ tx3927_pcicptr->ioba = 0;
+ tx3927_pcicptr->tlbioma = 0;
+ tx3927_pcicptr->mbas = ~(sdram_size - 1);
+ tx3927_pcicptr->mba = 0;
+ tx3927_pcicptr->tlbmma = 0;
+ /* Enable Direct mapping Address Space Decoder */
+ tx3927_pcicptr->lbc |= TX3927_PCIC_LBC_ILMDE | TX3927_PCIC_LBC_ILIDE;
+
+ /* Clear All Local Bus Status */
+ tx3927_pcicptr->lbstat = TX3927_PCIC_LBIM_ALL;
+ /* Enable All Local Bus Interrupts */
+ tx3927_pcicptr->lbim = TX3927_PCIC_LBIM_ALL;
+ /* Clear All PCI Status Error */
+ tx3927_pcicptr->pcistat = TX3927_PCIC_PCISTATIM_ALL;
+ /* Enable All PCI Status Error Interrupts */
+ tx3927_pcicptr->pcistatim = TX3927_PCIC_PCISTATIM_ALL;
+
+ /* PCIC Int => IRC IRQ10 */
+ tx3927_pcicptr->il = TX3927_IR_PCI;
+ /* Target Control (per errata) */
+ tx3927_pcicptr->tc = TX3927_PCIC_TC_OF8E | TX3927_PCIC_TC_IF8E;
+
+ /* Enable Bus Arbiter */
+ if (!extarb)
+ tx3927_pcicptr->pbapmc = TX3927_PCIC_PBAPMC_PBAEN;
+
+ tx3927_pcicptr->pcicmd = PCI_COMMAND_MASTER |
+ PCI_COMMAND_MEMORY |
+ PCI_COMMAND_IO |
+ PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
+ local_irq_restore(flags);
+}
+
+static irqreturn_t tx3927_pcierr_interrupt(int irq, void *dev_id)
+{
+ struct pt_regs *regs = get_irq_regs();
+
+ if (txx9_pci_err_action != TXX9_PCI_ERR_IGNORE) {
+ printk(KERN_WARNING "PCI error interrupt at 0x%08lx.\n",
+ regs->cp0_epc);
+ printk(KERN_WARNING "pcistat:%02x, lbstat:%04lx\n",
+ tx3927_pcicptr->pcistat, tx3927_pcicptr->lbstat);
+ }
+ if (txx9_pci_err_action != TXX9_PCI_ERR_PANIC) {
+ /* clear all pci errors */
+ tx3927_pcicptr->pcistat |= TX3927_PCIC_PCISTATIM_ALL;
+ tx3927_pcicptr->istat = TX3927_PCIC_IIM_ALL;
+ tx3927_pcicptr->tstat = TX3927_PCIC_TIM_ALL;
+ tx3927_pcicptr->lbstat = TX3927_PCIC_LBIM_ALL;
+ return IRQ_HANDLED;
+ }
+ console_verbose();
+ panic("PCI error.");
+}
+
+void __init tx3927_setup_pcierr_irq(void)
+{
+ if (request_irq(TXX9_IRQ_BASE + TX3927_IR_PCI,
+ tx3927_pcierr_interrupt,
+ 0, "PCI error",
+ (void *)TX3927_PCIC_REG))
+ printk(KERN_WARNING "Failed to request irq for PCIERR\n");
+}
diff --git a/arch/mips/pci/ops-tx4927.c b/arch/mips/pci/ops-tx4927.c
new file mode 100644
index 000000000..f7802f100
--- /dev/null
+++ b/arch/mips/pci/ops-tx4927.c
@@ -0,0 +1,524 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Define the pci_ops for the PCIC on Toshiba TX4927, TX4938, etc.
+ *
+ * Based on linux/arch/mips/pci/ops-tx4938.c,
+ * linux/arch/mips/pci/fixup-rbtx4938.c,
+ * linux/arch/mips/txx9/rbtx4938/setup.c,
+ * and RBTX49xx patch from CELF patch archive.
+ *
+ * 2003-2005 (c) MontaVista Software, Inc.
+ * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
+ * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/txx9/pci.h>
+#include <asm/txx9/tx4927pcic.h>
+
+static struct {
+ struct pci_controller *channel;
+ struct tx4927_pcic_reg __iomem *pcicptr;
+} pcicptrs[2]; /* TX4938 has 2 pcic */
+
+static void __init set_tx4927_pcicptr(struct pci_controller *channel,
+ struct tx4927_pcic_reg __iomem *pcicptr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) {
+ if (pcicptrs[i].channel == channel) {
+ pcicptrs[i].pcicptr = pcicptr;
+ return;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) {
+ if (!pcicptrs[i].channel) {
+ pcicptrs[i].channel = channel;
+ pcicptrs[i].pcicptr = pcicptr;
+ return;
+ }
+ }
+ BUG();
+}
+
+struct tx4927_pcic_reg __iomem *get_tx4927_pcicptr(
+ struct pci_controller *channel)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) {
+ if (pcicptrs[i].channel == channel)
+ return pcicptrs[i].pcicptr;
+ }
+ return NULL;
+}
+
+static int mkaddr(struct pci_bus *bus, unsigned int devfn, int where,
+ struct tx4927_pcic_reg __iomem *pcicptr)
+{
+ if (bus->parent == NULL &&
+ devfn >= PCI_DEVFN(TX4927_PCIC_MAX_DEVNU, 0))
+ return -1;
+ __raw_writel(((bus->number & 0xff) << 0x10)
+ | ((devfn & 0xff) << 0x08) | (where & 0xfc)
+ | (bus->parent ? 1 : 0),
+ &pcicptr->g2pcfgadrs);
+ /* clear M_ABORT and Disable M_ABORT Int. */
+ __raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff)
+ | (PCI_STATUS_REC_MASTER_ABORT << 16),
+ &pcicptr->pcistatus);
+ return 0;
+}
+
+static int check_abort(struct tx4927_pcic_reg __iomem *pcicptr)
+{
+ int code = PCIBIOS_SUCCESSFUL;
+
+ /* wait write cycle completion before checking error status */
+ while (__raw_readl(&pcicptr->pcicstatus) & TX4927_PCIC_PCICSTATUS_IWB)
+ ;
+ if (__raw_readl(&pcicptr->pcistatus)
+ & (PCI_STATUS_REC_MASTER_ABORT << 16)) {
+ __raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff)
+ | (PCI_STATUS_REC_MASTER_ABORT << 16),
+ &pcicptr->pcistatus);
+ /* flush write buffer */
+ iob();
+ code = PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ return code;
+}
+
+static u8 icd_readb(int offset, struct tx4927_pcic_reg __iomem *pcicptr)
+{
+#ifdef __BIG_ENDIAN
+ offset ^= 3;
+#endif
+ return __raw_readb((void __iomem *)&pcicptr->g2pcfgdata + offset);
+}
+static u16 icd_readw(int offset, struct tx4927_pcic_reg __iomem *pcicptr)
+{
+#ifdef __BIG_ENDIAN
+ offset ^= 2;
+#endif
+ return __raw_readw((void __iomem *)&pcicptr->g2pcfgdata + offset);
+}
+static u32 icd_readl(struct tx4927_pcic_reg __iomem *pcicptr)
+{
+ return __raw_readl(&pcicptr->g2pcfgdata);
+}
+static void icd_writeb(u8 val, int offset,
+ struct tx4927_pcic_reg __iomem *pcicptr)
+{
+#ifdef __BIG_ENDIAN
+ offset ^= 3;
+#endif
+ __raw_writeb(val, (void __iomem *)&pcicptr->g2pcfgdata + offset);
+}
+static void icd_writew(u16 val, int offset,
+ struct tx4927_pcic_reg __iomem *pcicptr)
+{
+#ifdef __BIG_ENDIAN
+ offset ^= 2;
+#endif
+ __raw_writew(val, (void __iomem *)&pcicptr->g2pcfgdata + offset);
+}
+static void icd_writel(u32 val, struct tx4927_pcic_reg __iomem *pcicptr)
+{
+ __raw_writel(val, &pcicptr->g2pcfgdata);
+}
+
+static struct tx4927_pcic_reg __iomem *pci_bus_to_pcicptr(struct pci_bus *bus)
+{
+ struct pci_controller *channel = bus->sysdata;
+ return get_tx4927_pcicptr(channel);
+}
+
+static int tx4927_pci_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(bus);
+
+ if (mkaddr(bus, devfn, where, pcicptr)) {
+ *val = 0xffffffff;
+ return -1;
+ }
+ switch (size) {
+ case 1:
+ *val = icd_readb(where & 3, pcicptr);
+ break;
+ case 2:
+ *val = icd_readw(where & 3, pcicptr);
+ break;
+ default:
+ *val = icd_readl(pcicptr);
+ }
+ return check_abort(pcicptr);
+}
+
+static int tx4927_pci_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(bus);
+
+ if (mkaddr(bus, devfn, where, pcicptr))
+ return -1;
+ switch (size) {
+ case 1:
+ icd_writeb(val, where & 3, pcicptr);
+ break;
+ case 2:
+ icd_writew(val, where & 3, pcicptr);
+ break;
+ default:
+ icd_writel(val, pcicptr);
+ }
+ return check_abort(pcicptr);
+}
+
+static struct pci_ops tx4927_pci_ops = {
+ .read = tx4927_pci_config_read,
+ .write = tx4927_pci_config_write,
+};
+
+static struct {
+ u8 trdyto;
+ u8 retryto;
+ u16 gbwc;
+} tx4927_pci_opts = {
+ .trdyto = 0,
+ .retryto = 0,
+ .gbwc = 0xfe0, /* 4064 GBUSCLK for CCFG.GTOT=0b11 */
+};
+
+char *tx4927_pcibios_setup(char *str)
+{
+ if (!strncmp(str, "trdyto=", 7)) {
+ u8 val = 0;
+ if (kstrtou8(str + 7, 0, &val) == 0)
+ tx4927_pci_opts.trdyto = val;
+ return NULL;
+ }
+ if (!strncmp(str, "retryto=", 8)) {
+ u8 val = 0;
+ if (kstrtou8(str + 8, 0, &val) == 0)
+ tx4927_pci_opts.retryto = val;
+ return NULL;
+ }
+ if (!strncmp(str, "gbwc=", 5)) {
+ u16 val;
+ if (kstrtou16(str + 5, 0, &val) == 0)
+ tx4927_pci_opts.gbwc = val;
+ return NULL;
+ }
+ return str;
+}
+
+void __init tx4927_pcic_setup(struct tx4927_pcic_reg __iomem *pcicptr,
+ struct pci_controller *channel, int extarb)
+{
+ int i;
+ unsigned long flags;
+
+ set_tx4927_pcicptr(channel, pcicptr);
+
+ if (!channel->pci_ops)
+ printk(KERN_INFO
+ "PCIC -- DID:%04x VID:%04x RID:%02x Arbiter:%s\n",
+ __raw_readl(&pcicptr->pciid) >> 16,
+ __raw_readl(&pcicptr->pciid) & 0xffff,
+ __raw_readl(&pcicptr->pciccrev) & 0xff,
+ extarb ? "External" : "Internal");
+ channel->pci_ops = &tx4927_pci_ops;
+
+ local_irq_save(flags);
+
+ /* Disable All Initiator Space */
+ __raw_writel(__raw_readl(&pcicptr->pciccfg)
+ & ~(TX4927_PCIC_PCICCFG_G2PMEN(0)
+ | TX4927_PCIC_PCICCFG_G2PMEN(1)
+ | TX4927_PCIC_PCICCFG_G2PMEN(2)
+ | TX4927_PCIC_PCICCFG_G2PIOEN),
+ &pcicptr->pciccfg);
+
+ /* GB->PCI mappings */
+ __raw_writel((channel->io_resource->end - channel->io_resource->start)
+ >> 4,
+ &pcicptr->g2piomask);
+ ____raw_writeq((channel->io_resource->start +
+ channel->io_map_base - IO_BASE) |
+#ifdef __BIG_ENDIAN
+ TX4927_PCIC_G2PIOGBASE_ECHG
+#else
+ TX4927_PCIC_G2PIOGBASE_BSDIS
+#endif
+ , &pcicptr->g2piogbase);
+ ____raw_writeq(channel->io_resource->start - channel->io_offset,
+ &pcicptr->g2piopbase);
+ for (i = 0; i < 3; i++) {
+ __raw_writel(0, &pcicptr->g2pmmask[i]);
+ ____raw_writeq(0, &pcicptr->g2pmgbase[i]);
+ ____raw_writeq(0, &pcicptr->g2pmpbase[i]);
+ }
+ if (channel->mem_resource->end) {
+ __raw_writel((channel->mem_resource->end
+ - channel->mem_resource->start) >> 4,
+ &pcicptr->g2pmmask[0]);
+ ____raw_writeq(channel->mem_resource->start |
+#ifdef __BIG_ENDIAN
+ TX4927_PCIC_G2PMnGBASE_ECHG
+#else
+ TX4927_PCIC_G2PMnGBASE_BSDIS
+#endif
+ , &pcicptr->g2pmgbase[0]);
+ ____raw_writeq(channel->mem_resource->start -
+ channel->mem_offset,
+ &pcicptr->g2pmpbase[0]);
+ }
+ /* PCI->GB mappings (I/O 256B) */
+ __raw_writel(0, &pcicptr->p2giopbase); /* 256B */
+ ____raw_writeq(0, &pcicptr->p2giogbase);
+ /* PCI->GB mappings (MEM 512MB (64MB on R1.x)) */
+ __raw_writel(0, &pcicptr->p2gm0plbase);
+ __raw_writel(0, &pcicptr->p2gm0pubase);
+ ____raw_writeq(TX4927_PCIC_P2GMnGBASE_TMEMEN |
+#ifdef __BIG_ENDIAN
+ TX4927_PCIC_P2GMnGBASE_TECHG
+#else
+ TX4927_PCIC_P2GMnGBASE_TBSDIS
+#endif
+ , &pcicptr->p2gmgbase[0]);
+ /* PCI->GB mappings (MEM 16MB) */
+ __raw_writel(0xffffffff, &pcicptr->p2gm1plbase);
+ __raw_writel(0xffffffff, &pcicptr->p2gm1pubase);
+ ____raw_writeq(0, &pcicptr->p2gmgbase[1]);
+ /* PCI->GB mappings (MEM 1MB) */
+ __raw_writel(0xffffffff, &pcicptr->p2gm2pbase); /* 1MB */
+ ____raw_writeq(0, &pcicptr->p2gmgbase[2]);
+
+ /* Clear all (including IRBER) except for GBWC */
+ __raw_writel((tx4927_pci_opts.gbwc << 16)
+ & TX4927_PCIC_PCICCFG_GBWC_MASK,
+ &pcicptr->pciccfg);
+ /* Enable Initiator Memory Space */
+ if (channel->mem_resource->end)
+ __raw_writel(__raw_readl(&pcicptr->pciccfg)
+ | TX4927_PCIC_PCICCFG_G2PMEN(0),
+ &pcicptr->pciccfg);
+ /* Enable Initiator I/O Space */
+ if (channel->io_resource->end)
+ __raw_writel(__raw_readl(&pcicptr->pciccfg)
+ | TX4927_PCIC_PCICCFG_G2PIOEN,
+ &pcicptr->pciccfg);
+ /* Enable Initiator Config */
+ __raw_writel(__raw_readl(&pcicptr->pciccfg)
+ | TX4927_PCIC_PCICCFG_ICAEN | TX4927_PCIC_PCICCFG_TCAR,
+ &pcicptr->pciccfg);
+
+ /* Do not use MEMMUL, MEMINF: YMFPCI card causes M_ABORT. */
+ __raw_writel(0, &pcicptr->pcicfg1);
+
+ __raw_writel((__raw_readl(&pcicptr->g2ptocnt) & ~0xffff)
+ | (tx4927_pci_opts.trdyto & 0xff)
+ | ((tx4927_pci_opts.retryto & 0xff) << 8),
+ &pcicptr->g2ptocnt);
+
+ /* Clear All Local Bus Status */
+ __raw_writel(TX4927_PCIC_PCICSTATUS_ALL, &pcicptr->pcicstatus);
+ /* Enable All Local Bus Interrupts */
+ __raw_writel(TX4927_PCIC_PCICSTATUS_ALL, &pcicptr->pcicmask);
+ /* Clear All Initiator Status */
+ __raw_writel(TX4927_PCIC_G2PSTATUS_ALL, &pcicptr->g2pstatus);
+ /* Enable All Initiator Interrupts */
+ __raw_writel(TX4927_PCIC_G2PSTATUS_ALL, &pcicptr->g2pmask);
+ /* Clear All PCI Status Error */
+ __raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff)
+ | (TX4927_PCIC_PCISTATUS_ALL << 16),
+ &pcicptr->pcistatus);
+ /* Enable All PCI Status Error Interrupts */
+ __raw_writel(TX4927_PCIC_PCISTATUS_ALL, &pcicptr->pcimask);
+
+ if (!extarb) {
+ /* Reset Bus Arbiter */
+ __raw_writel(TX4927_PCIC_PBACFG_RPBA, &pcicptr->pbacfg);
+ __raw_writel(0, &pcicptr->pbabm);
+ /* Enable Bus Arbiter */
+ __raw_writel(TX4927_PCIC_PBACFG_PBAEN, &pcicptr->pbacfg);
+ }
+
+ __raw_writel(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
+ | PCI_COMMAND_PARITY | PCI_COMMAND_SERR,
+ &pcicptr->pcistatus);
+ local_irq_restore(flags);
+
+ printk(KERN_DEBUG
+ "PCI: COMMAND=%04x,PCIMASK=%04x,"
+ "TRDYTO=%02x,RETRYTO=%02x,GBWC=%03x\n",
+ __raw_readl(&pcicptr->pcistatus) & 0xffff,
+ __raw_readl(&pcicptr->pcimask) & 0xffff,
+ __raw_readl(&pcicptr->g2ptocnt) & 0xff,
+ (__raw_readl(&pcicptr->g2ptocnt) & 0xff00) >> 8,
+ (__raw_readl(&pcicptr->pciccfg) >> 16) & 0xfff);
+}
+
+static void tx4927_report_pcic_status1(struct tx4927_pcic_reg __iomem *pcicptr)
+{
+ __u16 pcistatus = (__u16)(__raw_readl(&pcicptr->pcistatus) >> 16);
+ __u32 g2pstatus = __raw_readl(&pcicptr->g2pstatus);
+ __u32 pcicstatus = __raw_readl(&pcicptr->pcicstatus);
+ static struct {
+ __u32 flag;
+ const char *str;
+ } pcistat_tbl[] = {
+ { PCI_STATUS_DETECTED_PARITY, "DetectedParityError" },
+ { PCI_STATUS_SIG_SYSTEM_ERROR, "SignaledSystemError" },
+ { PCI_STATUS_REC_MASTER_ABORT, "ReceivedMasterAbort" },
+ { PCI_STATUS_REC_TARGET_ABORT, "ReceivedTargetAbort" },
+ { PCI_STATUS_SIG_TARGET_ABORT, "SignaledTargetAbort" },
+ { PCI_STATUS_PARITY, "MasterParityError" },
+ }, g2pstat_tbl[] = {
+ { TX4927_PCIC_G2PSTATUS_TTOE, "TIOE" },
+ { TX4927_PCIC_G2PSTATUS_RTOE, "RTOE" },
+ }, pcicstat_tbl[] = {
+ { TX4927_PCIC_PCICSTATUS_PME, "PME" },
+ { TX4927_PCIC_PCICSTATUS_TLB, "TLB" },
+ { TX4927_PCIC_PCICSTATUS_NIB, "NIB" },
+ { TX4927_PCIC_PCICSTATUS_ZIB, "ZIB" },
+ { TX4927_PCIC_PCICSTATUS_PERR, "PERR" },
+ { TX4927_PCIC_PCICSTATUS_SERR, "SERR" },
+ { TX4927_PCIC_PCICSTATUS_GBE, "GBE" },
+ { TX4927_PCIC_PCICSTATUS_IWB, "IWB" },
+ };
+ int i, cont;
+
+ printk(KERN_ERR "");
+ if (pcistatus & TX4927_PCIC_PCISTATUS_ALL) {
+ printk(KERN_CONT "pcistat:%04x(", pcistatus);
+ for (i = 0, cont = 0; i < ARRAY_SIZE(pcistat_tbl); i++)
+ if (pcistatus & pcistat_tbl[i].flag)
+ printk(KERN_CONT "%s%s",
+ cont++ ? " " : "", pcistat_tbl[i].str);
+ printk(KERN_CONT ") ");
+ }
+ if (g2pstatus & TX4927_PCIC_G2PSTATUS_ALL) {
+ printk(KERN_CONT "g2pstatus:%08x(", g2pstatus);
+ for (i = 0, cont = 0; i < ARRAY_SIZE(g2pstat_tbl); i++)
+ if (g2pstatus & g2pstat_tbl[i].flag)
+ printk(KERN_CONT "%s%s",
+ cont++ ? " " : "", g2pstat_tbl[i].str);
+ printk(KERN_CONT ") ");
+ }
+ if (pcicstatus & TX4927_PCIC_PCICSTATUS_ALL) {
+ printk(KERN_CONT "pcicstatus:%08x(", pcicstatus);
+ for (i = 0, cont = 0; i < ARRAY_SIZE(pcicstat_tbl); i++)
+ if (pcicstatus & pcicstat_tbl[i].flag)
+ printk(KERN_CONT "%s%s",
+ cont++ ? " " : "", pcicstat_tbl[i].str);
+ printk(KERN_CONT ")");
+ }
+ printk(KERN_CONT "\n");
+}
+
+void tx4927_report_pcic_status(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) {
+ if (pcicptrs[i].pcicptr)
+ tx4927_report_pcic_status1(pcicptrs[i].pcicptr);
+ }
+}
+
+static void tx4927_dump_pcic_settings1(struct tx4927_pcic_reg __iomem *pcicptr)
+{
+ int i;
+ __u32 __iomem *preg = (__u32 __iomem *)pcicptr;
+
+ printk(KERN_INFO "tx4927 pcic (0x%p) settings:", pcicptr);
+ for (i = 0; i < sizeof(struct tx4927_pcic_reg); i += 4, preg++) {
+ if (i % 32 == 0) {
+ printk(KERN_CONT "\n");
+ printk(KERN_INFO "%04x:", i);
+ }
+ /* skip registers with side-effects */
+ if (i == offsetof(struct tx4927_pcic_reg, g2pintack)
+ || i == offsetof(struct tx4927_pcic_reg, g2pspc)
+ || i == offsetof(struct tx4927_pcic_reg, g2pcfgadrs)
+ || i == offsetof(struct tx4927_pcic_reg, g2pcfgdata)) {
+ printk(KERN_CONT " XXXXXXXX");
+ continue;
+ }
+ printk(KERN_CONT " %08x", __raw_readl(preg));
+ }
+ printk(KERN_CONT "\n");
+}
+
+void tx4927_dump_pcic_settings(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) {
+ if (pcicptrs[i].pcicptr)
+ tx4927_dump_pcic_settings1(pcicptrs[i].pcicptr);
+ }
+}
+
+irqreturn_t tx4927_pcierr_interrupt(int irq, void *dev_id)
+{
+ struct pt_regs *regs = get_irq_regs();
+ struct tx4927_pcic_reg __iomem *pcicptr =
+ (struct tx4927_pcic_reg __iomem *)(unsigned long)dev_id;
+
+ if (txx9_pci_err_action != TXX9_PCI_ERR_IGNORE) {
+ printk(KERN_WARNING "PCIERR interrupt at 0x%0*lx\n",
+ (int)(2 * sizeof(unsigned long)), regs->cp0_epc);
+ tx4927_report_pcic_status1(pcicptr);
+ }
+ if (txx9_pci_err_action != TXX9_PCI_ERR_PANIC) {
+ /* clear all pci errors */
+ __raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff)
+ | (TX4927_PCIC_PCISTATUS_ALL << 16),
+ &pcicptr->pcistatus);
+ __raw_writel(TX4927_PCIC_G2PSTATUS_ALL, &pcicptr->g2pstatus);
+ __raw_writel(TX4927_PCIC_PBASTATUS_ALL, &pcicptr->pbastatus);
+ __raw_writel(TX4927_PCIC_PCICSTATUS_ALL, &pcicptr->pcicstatus);
+ return IRQ_HANDLED;
+ }
+ console_verbose();
+ tx4927_dump_pcic_settings1(pcicptr);
+ panic("PCI error.");
+}
+
+#ifdef CONFIG_TOSHIBA_FPCIB0
+static void tx4927_quirk_slc90e66_bridge(struct pci_dev *dev)
+{
+ struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(dev->bus);
+
+ if (!pcicptr)
+ return;
+ if (__raw_readl(&pcicptr->pbacfg) & TX4927_PCIC_PBACFG_PBAEN) {
+ /* Reset Bus Arbiter */
+ __raw_writel(TX4927_PCIC_PBACFG_RPBA, &pcicptr->pbacfg);
+ /*
+ * swap reqBP and reqXP (raise priority of SLC90E66).
+ * SLC90E66(PCI-ISA bridge) is connected to REQ2 on
+ * PCI Backplane board.
+ */
+ __raw_writel(0x72543610, &pcicptr->pbareqport);
+ __raw_writel(0, &pcicptr->pbabm);
+ /* Use Fixed ParkMaster (required by SLC90E66) */
+ __raw_writel(TX4927_PCIC_PBACFG_FIXPA, &pcicptr->pbacfg);
+ /* Enable Bus Arbiter */
+ __raw_writel(TX4927_PCIC_PBACFG_FIXPA |
+ TX4927_PCIC_PBACFG_PBAEN,
+ &pcicptr->pbacfg);
+ printk(KERN_INFO "PCI: Use Fixed Park Master (REQPORT %08x)\n",
+ __raw_readl(&pcicptr->pbareqport));
+ }
+}
+#define PCI_DEVICE_ID_EFAR_SLC90E66_0 0x9460
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_0,
+ tx4927_quirk_slc90e66_bridge);
+#endif
diff --git a/arch/mips/pci/ops-vr41xx.c b/arch/mips/pci/ops-vr41xx.c
new file mode 100644
index 000000000..7b7709aa1
--- /dev/null
+++ b/arch/mips/pci/ops-vr41xx.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ops-vr41xx.c, PCI configuration routines for the PCIU of NEC VR4100 series.
+ *
+ * Copyright (C) 2001-2003 MontaVista Software Inc.
+ * Author: Yoichi Yuasa <source@mvista.com>
+ * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+/*
+ * Changes:
+ * MontaVista Software Inc. <source@mvista.com>
+ * - New creation, NEC VR4122 and VR4131 are supported.
+ */
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+
+#define PCICONFDREG (void __iomem *)KSEG1ADDR(0x0f000c14)
+#define PCICONFAREG (void __iomem *)KSEG1ADDR(0x0f000c18)
+
+static inline int set_pci_configuration_address(unsigned char number,
+ unsigned int devfn, int where)
+{
+ if (number == 0) {
+ /*
+ * Type 0 configuration
+ */
+ if (PCI_SLOT(devfn) < 11 || where > 0xff)
+ return -EINVAL;
+
+ writel((1U << PCI_SLOT(devfn)) | (PCI_FUNC(devfn) << 8) |
+ (where & 0xfc), PCICONFAREG);
+ } else {
+ /*
+ * Type 1 configuration
+ */
+ if (where > 0xff)
+ return -EINVAL;
+
+ writel(((uint32_t)number << 16) | ((devfn & 0xff) << 8) |
+ (where & 0xfc) | 1U, PCICONFAREG);
+ }
+
+ return 0;
+}
+
+static int pci_config_read(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, uint32_t *val)
+{
+ uint32_t data;
+
+ *val = 0xffffffffU;
+ if (set_pci_configuration_address(bus->number, devfn, where) < 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ data = readl(PCICONFDREG);
+
+ switch (size) {
+ case 1:
+ *val = (data >> ((where & 3) << 3)) & 0xffU;
+ break;
+ case 2:
+ *val = (data >> ((where & 2) << 3)) & 0xffffU;
+ break;
+ case 4:
+ *val = data;
+ break;
+ default:
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_config_write(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, uint32_t val)
+{
+ uint32_t data;
+ int shift;
+
+ if (set_pci_configuration_address(bus->number, devfn, where) < 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ data = readl(PCICONFDREG);
+
+ switch (size) {
+ case 1:
+ shift = (where & 3) << 3;
+ data &= ~(0xffU << shift);
+ data |= ((val & 0xffU) << shift);
+ break;
+ case 2:
+ shift = (where & 2) << 3;
+ data &= ~(0xffffU << shift);
+ data |= ((val & 0xffffU) << shift);
+ break;
+ case 4:
+ data = val;
+ break;
+ default:
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ }
+
+ writel(data, PCICONFDREG);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops vr41xx_pci_ops = {
+ .read = pci_config_read,
+ .write = pci_config_write,
+};
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c
new file mode 100644
index 000000000..7285b5667
--- /dev/null
+++ b/arch/mips/pci/pci-alchemy.c
@@ -0,0 +1,537 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Alchemy PCI host mode support.
+ *
+ * Copyright 2001-2003, 2007-2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ * Support for all devices (greater than 16) added by David Gathright.
+ */
+
+#include <linux/clk.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/syscore_ops.h>
+#include <linux/vmalloc.h>
+
+#include <asm/dma-coherence.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/tlbmisc.h>
+
+#ifdef CONFIG_PCI_DEBUG
+#define DBG(x...) printk(KERN_DEBUG x)
+#else
+#define DBG(x...) do {} while (0)
+#endif
+
+#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_WRITE 1
+
+struct alchemy_pci_context {
+ struct pci_controller alchemy_pci_ctrl; /* leave as first member! */
+ void __iomem *regs; /* ctrl base */
+ /* tools for wired entry for config space access */
+ unsigned long last_elo0;
+ unsigned long last_elo1;
+ int wired_entry;
+ struct vm_struct *pci_cfg_vm;
+
+ unsigned long pm[12];
+
+ int (*board_map_irq)(const struct pci_dev *d, u8 slot, u8 pin);
+ int (*board_pci_idsel)(unsigned int devsel, int assert);
+};
+
+/* for syscore_ops. There's only one PCI controller on Alchemy chips, so this
+ * should suffice for now.
+ */
+static struct alchemy_pci_context *__alchemy_pci_ctx;
+
+
+/* IO/MEM resources for PCI. Keep the memres in sync with fixup_bigphys_addr
+ * in arch/mips/alchemy/common/setup.c
+ */
+static struct resource alchemy_pci_def_memres = {
+ .start = ALCHEMY_PCI_MEMWIN_START,
+ .end = ALCHEMY_PCI_MEMWIN_END,
+ .name = "PCI memory space",
+ .flags = IORESOURCE_MEM
+};
+
+static struct resource alchemy_pci_def_iores = {
+ .start = ALCHEMY_PCI_IOWIN_START,
+ .end = ALCHEMY_PCI_IOWIN_END,
+ .name = "PCI IO space",
+ .flags = IORESOURCE_IO
+};
+
+static void mod_wired_entry(int entry, unsigned long entrylo0,
+ unsigned long entrylo1, unsigned long entryhi,
+ unsigned long pagemask)
+{
+ unsigned long old_pagemask;
+ unsigned long old_ctx;
+
+ /* Save old context and create impossible VPN2 value */
+ old_ctx = read_c0_entryhi() & MIPS_ENTRYHI_ASID;
+ old_pagemask = read_c0_pagemask();
+ write_c0_index(entry);
+ write_c0_pagemask(pagemask);
+ write_c0_entryhi(entryhi);
+ write_c0_entrylo0(entrylo0);
+ write_c0_entrylo1(entrylo1);
+ tlb_write_indexed();
+ write_c0_entryhi(old_ctx);
+ write_c0_pagemask(old_pagemask);
+}
+
+static void alchemy_pci_wired_entry(struct alchemy_pci_context *ctx)
+{
+ ctx->wired_entry = read_c0_wired();
+ add_wired_entry(0, 0, (unsigned long)ctx->pci_cfg_vm->addr, PM_4K);
+ ctx->last_elo0 = ctx->last_elo1 = ~0;
+}
+
+static int config_access(unsigned char access_type, struct pci_bus *bus,
+ unsigned int dev_fn, unsigned char where, u32 *data)
+{
+ struct alchemy_pci_context *ctx = bus->sysdata;
+ unsigned int device = PCI_SLOT(dev_fn);
+ unsigned int function = PCI_FUNC(dev_fn);
+ unsigned long offset, status, cfg_base, flags, entryLo0, entryLo1, r;
+ int error = PCIBIOS_SUCCESSFUL;
+
+ if (device > 19) {
+ *data = 0xffffffff;
+ return -1;
+ }
+
+ local_irq_save(flags);
+ r = __raw_readl(ctx->regs + PCI_REG_STATCMD) & 0x0000ffff;
+ r |= PCI_STATCMD_STATUS(0x2000);
+ __raw_writel(r, ctx->regs + PCI_REG_STATCMD);
+ wmb();
+
+ /* Allow board vendors to implement their own off-chip IDSEL.
+ * If it doesn't succeed, may as well bail out at this point.
+ */
+ if (ctx->board_pci_idsel(device, 1) == 0) {
+ *data = 0xffffffff;
+ local_irq_restore(flags);
+ return -1;
+ }
+
+ /* Setup the config window */
+ if (bus->number == 0)
+ cfg_base = (1 << device) << 11;
+ else
+ cfg_base = 0x80000000 | (bus->number << 16) | (device << 11);
+
+ /* Setup the lower bits of the 36-bit address */
+ offset = (function << 8) | (where & ~0x3);
+ /* Pick up any address that falls below the page mask */
+ offset |= cfg_base & ~PAGE_MASK;
+
+ /* Page boundary */
+ cfg_base = cfg_base & PAGE_MASK;
+
+ /* To improve performance, if the current device is the same as
+ * the last device accessed, we don't touch the TLB.
+ */
+ entryLo0 = (6 << 26) | (cfg_base >> 6) | (2 << 3) | 7;
+ entryLo1 = (6 << 26) | (cfg_base >> 6) | (0x1000 >> 6) | (2 << 3) | 7;
+ if ((entryLo0 != ctx->last_elo0) || (entryLo1 != ctx->last_elo1)) {
+ mod_wired_entry(ctx->wired_entry, entryLo0, entryLo1,
+ (unsigned long)ctx->pci_cfg_vm->addr, PM_4K);
+ ctx->last_elo0 = entryLo0;
+ ctx->last_elo1 = entryLo1;
+ }
+
+ if (access_type == PCI_ACCESS_WRITE)
+ __raw_writel(*data, ctx->pci_cfg_vm->addr + offset);
+ else
+ *data = __raw_readl(ctx->pci_cfg_vm->addr + offset);
+ wmb();
+
+ DBG("alchemy-pci: cfg access %d bus %u dev %u at %x dat %x conf %lx\n",
+ access_type, bus->number, device, where, *data, offset);
+
+ /* check for errors, master abort */
+ status = __raw_readl(ctx->regs + PCI_REG_STATCMD);
+ if (status & (1 << 29)) {
+ *data = 0xffffffff;
+ error = -1;
+ DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n",
+ access_type, bus->number, device);
+ } else if ((status >> 28) & 0xf) {
+ DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n",
+ device, (status >> 28) & 0xf);
+
+ /* clear errors */
+ __raw_writel(status & 0xf000ffff, ctx->regs + PCI_REG_STATCMD);
+
+ *data = 0xffffffff;
+ error = -1;
+ }
+
+ /* Take away the IDSEL. */
+ (void)ctx->board_pci_idsel(device, 0);
+
+ local_irq_restore(flags);
+ return error;
+}
+
+static int read_config_byte(struct pci_bus *bus, unsigned int devfn,
+ int where, u8 *val)
+{
+ u32 data;
+ int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data);
+
+ if (where & 1)
+ data >>= 8;
+ if (where & 2)
+ data >>= 16;
+ *val = data & 0xff;
+ return ret;
+}
+
+static int read_config_word(struct pci_bus *bus, unsigned int devfn,
+ int where, u16 *val)
+{
+ u32 data;
+ int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data);
+
+ if (where & 2)
+ data >>= 16;
+ *val = data & 0xffff;
+ return ret;
+}
+
+static int read_config_dword(struct pci_bus *bus, unsigned int devfn,
+ int where, u32 *val)
+{
+ return config_access(PCI_ACCESS_READ, bus, devfn, where, val);
+}
+
+static int write_config_byte(struct pci_bus *bus, unsigned int devfn,
+ int where, u8 val)
+{
+ u32 data = 0;
+
+ if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
+ return -1;
+
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+
+ if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
+ return -1;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int write_config_word(struct pci_bus *bus, unsigned int devfn,
+ int where, u16 val)
+{
+ u32 data = 0;
+
+ if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
+ return -1;
+
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+
+ if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
+ return -1;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int write_config_dword(struct pci_bus *bus, unsigned int devfn,
+ int where, u32 val)
+{
+ return config_access(PCI_ACCESS_WRITE, bus, devfn, where, &val);
+}
+
+static int alchemy_pci_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ switch (size) {
+ case 1: {
+ u8 _val;
+ int rc = read_config_byte(bus, devfn, where, &_val);
+
+ *val = _val;
+ return rc;
+ }
+ case 2: {
+ u16 _val;
+ int rc = read_config_word(bus, devfn, where, &_val);
+
+ *val = _val;
+ return rc;
+ }
+ default:
+ return read_config_dword(bus, devfn, where, val);
+ }
+}
+
+static int alchemy_pci_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ switch (size) {
+ case 1:
+ return write_config_byte(bus, devfn, where, (u8) val);
+ case 2:
+ return write_config_word(bus, devfn, where, (u16) val);
+ default:
+ return write_config_dword(bus, devfn, where, val);
+ }
+}
+
+static struct pci_ops alchemy_pci_ops = {
+ .read = alchemy_pci_read,
+ .write = alchemy_pci_write,
+};
+
+static int alchemy_pci_def_idsel(unsigned int devsel, int assert)
+{
+ return 1; /* success */
+}
+
+/* save PCI controller register contents. */
+static int alchemy_pci_suspend(void)
+{
+ struct alchemy_pci_context *ctx = __alchemy_pci_ctx;
+ if (!ctx)
+ return 0;
+
+ ctx->pm[0] = __raw_readl(ctx->regs + PCI_REG_CMEM);
+ ctx->pm[1] = __raw_readl(ctx->regs + PCI_REG_CONFIG) & 0x0009ffff;
+ ctx->pm[2] = __raw_readl(ctx->regs + PCI_REG_B2BMASK_CCH);
+ ctx->pm[3] = __raw_readl(ctx->regs + PCI_REG_B2BBASE0_VID);
+ ctx->pm[4] = __raw_readl(ctx->regs + PCI_REG_B2BBASE1_SID);
+ ctx->pm[5] = __raw_readl(ctx->regs + PCI_REG_MWMASK_DEV);
+ ctx->pm[6] = __raw_readl(ctx->regs + PCI_REG_MWBASE_REV_CCL);
+ ctx->pm[7] = __raw_readl(ctx->regs + PCI_REG_ID);
+ ctx->pm[8] = __raw_readl(ctx->regs + PCI_REG_CLASSREV);
+ ctx->pm[9] = __raw_readl(ctx->regs + PCI_REG_PARAM);
+ ctx->pm[10] = __raw_readl(ctx->regs + PCI_REG_MBAR);
+ ctx->pm[11] = __raw_readl(ctx->regs + PCI_REG_TIMEOUT);
+
+ return 0;
+}
+
+static void alchemy_pci_resume(void)
+{
+ struct alchemy_pci_context *ctx = __alchemy_pci_ctx;
+ if (!ctx)
+ return;
+
+ __raw_writel(ctx->pm[0], ctx->regs + PCI_REG_CMEM);
+ __raw_writel(ctx->pm[2], ctx->regs + PCI_REG_B2BMASK_CCH);
+ __raw_writel(ctx->pm[3], ctx->regs + PCI_REG_B2BBASE0_VID);
+ __raw_writel(ctx->pm[4], ctx->regs + PCI_REG_B2BBASE1_SID);
+ __raw_writel(ctx->pm[5], ctx->regs + PCI_REG_MWMASK_DEV);
+ __raw_writel(ctx->pm[6], ctx->regs + PCI_REG_MWBASE_REV_CCL);
+ __raw_writel(ctx->pm[7], ctx->regs + PCI_REG_ID);
+ __raw_writel(ctx->pm[8], ctx->regs + PCI_REG_CLASSREV);
+ __raw_writel(ctx->pm[9], ctx->regs + PCI_REG_PARAM);
+ __raw_writel(ctx->pm[10], ctx->regs + PCI_REG_MBAR);
+ __raw_writel(ctx->pm[11], ctx->regs + PCI_REG_TIMEOUT);
+ wmb();
+ __raw_writel(ctx->pm[1], ctx->regs + PCI_REG_CONFIG);
+ wmb();
+
+ /* YAMON on all db1xxx boards wipes the TLB and writes zero to C0_wired
+ * on resume, making it necessary to recreate it as soon as possible.
+ */
+ ctx->wired_entry = 8191; /* impossibly high value */
+ alchemy_pci_wired_entry(ctx); /* install it */
+}
+
+static struct syscore_ops alchemy_pci_pmops = {
+ .suspend = alchemy_pci_suspend,
+ .resume = alchemy_pci_resume,
+};
+
+static int alchemy_pci_probe(struct platform_device *pdev)
+{
+ struct alchemy_pci_platdata *pd = pdev->dev.platform_data;
+ struct alchemy_pci_context *ctx;
+ void __iomem *virt_io;
+ unsigned long val;
+ struct resource *r;
+ struct clk *c;
+ int ret;
+
+ /* need at least PCI IRQ mapping table */
+ if (!pd) {
+ dev_err(&pdev->dev, "need platform data for PCI setup\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ dev_err(&pdev->dev, "no memory for pcictl context\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no pcictl ctrl regs resource\n");
+ ret = -ENODEV;
+ goto out1;
+ }
+
+ if (!request_mem_region(r->start, resource_size(r), pdev->name)) {
+ dev_err(&pdev->dev, "cannot claim pci regs\n");
+ ret = -ENODEV;
+ goto out1;
+ }
+
+ c = clk_get(&pdev->dev, "pci_clko");
+ if (IS_ERR(c)) {
+ dev_err(&pdev->dev, "unable to find PCI clock\n");
+ ret = PTR_ERR(c);
+ goto out2;
+ }
+
+ ret = clk_prepare_enable(c);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot enable PCI clock\n");
+ goto out6;
+ }
+
+ ctx->regs = ioremap(r->start, resource_size(r));
+ if (!ctx->regs) {
+ dev_err(&pdev->dev, "cannot map pci regs\n");
+ ret = -ENODEV;
+ goto out5;
+ }
+
+ /* map parts of the PCI IO area */
+ /* REVISIT: if this changes with a newer variant (doubt it) make this
+ * a platform resource.
+ */
+ virt_io = ioremap(AU1500_PCI_IO_PHYS_ADDR, 0x00100000);
+ if (!virt_io) {
+ dev_err(&pdev->dev, "cannot remap pci io space\n");
+ ret = -ENODEV;
+ goto out3;
+ }
+ ctx->alchemy_pci_ctrl.io_map_base = (unsigned long)virt_io;
+
+ /* Au1500 revisions older than AD have borked coherent PCI */
+ if ((alchemy_get_cputype() == ALCHEMY_CPU_AU1500) &&
+ (read_c0_prid() < 0x01030202) &&
+ (coherentio == IO_COHERENCE_DISABLED)) {
+ val = __raw_readl(ctx->regs + PCI_REG_CONFIG);
+ val |= PCI_CONFIG_NC;
+ __raw_writel(val, ctx->regs + PCI_REG_CONFIG);
+ wmb();
+ dev_info(&pdev->dev, "non-coherent PCI on Au1500 AA/AB/AC\n");
+ }
+
+ if (pd->board_map_irq)
+ ctx->board_map_irq = pd->board_map_irq;
+
+ if (pd->board_pci_idsel)
+ ctx->board_pci_idsel = pd->board_pci_idsel;
+ else
+ ctx->board_pci_idsel = alchemy_pci_def_idsel;
+
+ /* fill in relevant pci_controller members */
+ ctx->alchemy_pci_ctrl.pci_ops = &alchemy_pci_ops;
+ ctx->alchemy_pci_ctrl.mem_resource = &alchemy_pci_def_memres;
+ ctx->alchemy_pci_ctrl.io_resource = &alchemy_pci_def_iores;
+
+ /* we can't ioremap the entire pci config space because it's too large,
+ * nor can we dynamically ioremap it because some drivers use the
+ * PCI config routines from within atomic contex and that becomes a
+ * problem in get_vm_area(). Instead we use one wired TLB entry to
+ * handle all config accesses for all busses.
+ */
+ ctx->pci_cfg_vm = get_vm_area(0x2000, VM_IOREMAP);
+ if (!ctx->pci_cfg_vm) {
+ dev_err(&pdev->dev, "unable to get vm area\n");
+ ret = -ENOMEM;
+ goto out4;
+ }
+ ctx->wired_entry = 8191; /* impossibly high value */
+ alchemy_pci_wired_entry(ctx); /* install it */
+
+ set_io_port_base((unsigned long)ctx->alchemy_pci_ctrl.io_map_base);
+
+ /* board may want to modify bits in the config register, do it now */
+ val = __raw_readl(ctx->regs + PCI_REG_CONFIG);
+ val &= ~pd->pci_cfg_clr;
+ val |= pd->pci_cfg_set;
+ val &= ~PCI_CONFIG_PD; /* clear disable bit */
+ __raw_writel(val, ctx->regs + PCI_REG_CONFIG);
+ wmb();
+
+ __alchemy_pci_ctx = ctx;
+ platform_set_drvdata(pdev, ctx);
+ register_syscore_ops(&alchemy_pci_pmops);
+ register_pci_controller(&ctx->alchemy_pci_ctrl);
+
+ dev_info(&pdev->dev, "PCI controller at %ld MHz\n",
+ clk_get_rate(c) / 1000000);
+
+ return 0;
+
+out4:
+ iounmap(virt_io);
+out3:
+ iounmap(ctx->regs);
+out5:
+ clk_disable_unprepare(c);
+out6:
+ clk_put(c);
+out2:
+ release_mem_region(r->start, resource_size(r));
+out1:
+ kfree(ctx);
+out:
+ return ret;
+}
+
+static struct platform_driver alchemy_pcictl_driver = {
+ .probe = alchemy_pci_probe,
+ .driver = {
+ .name = "alchemy-pci",
+ },
+};
+
+static int __init alchemy_pci_init(void)
+{
+ /* Au1500/Au1550 have PCI */
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1550:
+ return platform_driver_register(&alchemy_pcictl_driver);
+ }
+ return 0;
+}
+arch_initcall(alchemy_pci_init);
+
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct alchemy_pci_context *ctx = dev->sysdata;
+ if (ctx && ctx->board_map_irq)
+ return ctx->board_map_irq(dev, slot, pin);
+ return -1;
+}
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c
new file mode 100644
index 000000000..0b15730ce
--- /dev/null
+++ b/arch/mips/pci/pci-ar2315.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ */
+
+/**
+ * Both AR2315 and AR2316 chips have PCI interface unit, which supports DMA
+ * and interrupt. PCI interface supports MMIO access method, but does not
+ * seem to support I/O ports.
+ *
+ * Read/write operation in the region 0x80000000-0xBFFFFFFF causes
+ * a memory read/write command on the PCI bus. 30 LSBs of address on
+ * the bus are taken from memory read/write request and 2 MSBs are
+ * determined by PCI unit configuration.
+ *
+ * To work with the configuration space instead of memory is necessary set
+ * the CFG_SEL bit in the PCI_MISC_CONFIG register.
+ *
+ * Devices on the bus can perform DMA requests via chip BAR1. PCI host
+ * controller BARs are programmend as if an external device is programmed.
+ * Which means that during configuration, IDSEL pin of the chip should be
+ * asserted.
+ *
+ * We know (and support) only one board that uses the PCI interface -
+ * Fonera 2.0g (FON2202). It has a USB EHCI controller connected to the
+ * AR2315 PCI bus. IDSEL pin of USB controller is connected to AD[13] line
+ * and IDSEL pin of AR2315 is connected to AD[16] line.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <asm/paccess.h>
+
+/*
+ * PCI Bus Interface Registers
+ */
+#define AR2315_PCI_1MS_REG 0x0008
+
+#define AR2315_PCI_1MS_MASK 0x3FFFF /* # of AHB clk cycles in 1ms */
+
+#define AR2315_PCI_MISC_CONFIG 0x000c
+
+#define AR2315_PCIMISC_TXD_EN 0x00000001 /* Enable TXD for fragments */
+#define AR2315_PCIMISC_CFG_SEL 0x00000002 /* Mem or Config cycles */
+#define AR2315_PCIMISC_GIG_MASK 0x0000000C /* bits 31-30 for pci req */
+#define AR2315_PCIMISC_RST_MODE 0x00000030
+#define AR2315_PCIRST_INPUT 0x00000000 /* 4:5=0 rst is input */
+#define AR2315_PCIRST_LOW 0x00000010 /* 4:5=1 rst to GND */
+#define AR2315_PCIRST_HIGH 0x00000020 /* 4:5=2 rst to VDD */
+#define AR2315_PCIGRANT_EN 0x00000000 /* 6:7=0 early grant en */
+#define AR2315_PCIGRANT_FRAME 0x00000040 /* 6:7=1 grant waits 4 frame */
+#define AR2315_PCIGRANT_IDLE 0x00000080 /* 6:7=2 grant waits 4 idle */
+#define AR2315_PCIGRANT_GAP 0x00000000 /* 6:7=2 grant waits 4 idle */
+#define AR2315_PCICACHE_DIS 0x00001000 /* PCI external access cache
+ * disable */
+
+#define AR2315_PCI_OUT_TSTAMP 0x0010
+
+#define AR2315_PCI_UNCACHE_CFG 0x0014
+
+#define AR2315_PCI_IN_EN 0x0100
+
+#define AR2315_PCI_IN_EN0 0x01 /* Enable chain 0 */
+#define AR2315_PCI_IN_EN1 0x02 /* Enable chain 1 */
+#define AR2315_PCI_IN_EN2 0x04 /* Enable chain 2 */
+#define AR2315_PCI_IN_EN3 0x08 /* Enable chain 3 */
+
+#define AR2315_PCI_IN_DIS 0x0104
+
+#define AR2315_PCI_IN_DIS0 0x01 /* Disable chain 0 */
+#define AR2315_PCI_IN_DIS1 0x02 /* Disable chain 1 */
+#define AR2315_PCI_IN_DIS2 0x04 /* Disable chain 2 */
+#define AR2315_PCI_IN_DIS3 0x08 /* Disable chain 3 */
+
+#define AR2315_PCI_IN_PTR 0x0200
+
+#define AR2315_PCI_OUT_EN 0x0400
+
+#define AR2315_PCI_OUT_EN0 0x01 /* Enable chain 0 */
+
+#define AR2315_PCI_OUT_DIS 0x0404
+
+#define AR2315_PCI_OUT_DIS0 0x01 /* Disable chain 0 */
+
+#define AR2315_PCI_OUT_PTR 0x0408
+
+/* PCI interrupt status (write one to clear) */
+#define AR2315_PCI_ISR 0x0500
+
+#define AR2315_PCI_INT_TX 0x00000001 /* Desc In Completed */
+#define AR2315_PCI_INT_TXOK 0x00000002 /* Desc In OK */
+#define AR2315_PCI_INT_TXERR 0x00000004 /* Desc In ERR */
+#define AR2315_PCI_INT_TXEOL 0x00000008 /* Desc In End-of-List */
+#define AR2315_PCI_INT_RX 0x00000010 /* Desc Out Completed */
+#define AR2315_PCI_INT_RXOK 0x00000020 /* Desc Out OK */
+#define AR2315_PCI_INT_RXERR 0x00000040 /* Desc Out ERR */
+#define AR2315_PCI_INT_RXEOL 0x00000080 /* Desc Out EOL */
+#define AR2315_PCI_INT_TXOOD 0x00000200 /* Desc In Out-of-Desc */
+#define AR2315_PCI_INT_DESCMASK 0x0000FFFF /* Desc Mask */
+#define AR2315_PCI_INT_EXT 0x02000000 /* Extern PCI INTA */
+#define AR2315_PCI_INT_ABORT 0x04000000 /* PCI bus abort event */
+
+/* PCI interrupt mask */
+#define AR2315_PCI_IMR 0x0504
+
+/* Global PCI interrupt enable */
+#define AR2315_PCI_IER 0x0508
+
+#define AR2315_PCI_IER_DISABLE 0x00 /* disable pci interrupts */
+#define AR2315_PCI_IER_ENABLE 0x01 /* enable pci interrupts */
+
+#define AR2315_PCI_HOST_IN_EN 0x0800
+#define AR2315_PCI_HOST_IN_DIS 0x0804
+#define AR2315_PCI_HOST_IN_PTR 0x0810
+#define AR2315_PCI_HOST_OUT_EN 0x0900
+#define AR2315_PCI_HOST_OUT_DIS 0x0904
+#define AR2315_PCI_HOST_OUT_PTR 0x0908
+
+/*
+ * PCI interrupts, which share IP5
+ * Keep ordered according to AR2315_PCI_INT_XXX bits
+ */
+#define AR2315_PCI_IRQ_EXT 25
+#define AR2315_PCI_IRQ_ABORT 26
+#define AR2315_PCI_IRQ_COUNT 27
+
+/* Arbitrary size of memory region to access the configuration space */
+#define AR2315_PCI_CFG_SIZE 0x00100000
+
+#define AR2315_PCI_HOST_SLOT 3
+#define AR2315_PCI_HOST_DEVID ((0xff18 << 16) | PCI_VENDOR_ID_ATHEROS)
+
+/*
+ * We need some arbitrary non-zero value to be programmed to the BAR1 register
+ * of PCI host controller to enable DMA. The same value should be used as the
+ * offset to calculate the physical address of DMA buffer for PCI devices.
+ */
+#define AR2315_PCI_HOST_SDRAM_BASEADDR 0x20000000
+
+/* ??? access BAR */
+#define AR2315_PCI_HOST_MBAR0 0x10000000
+/* RAM access BAR */
+#define AR2315_PCI_HOST_MBAR1 AR2315_PCI_HOST_SDRAM_BASEADDR
+/* ??? access BAR */
+#define AR2315_PCI_HOST_MBAR2 0x30000000
+
+struct ar2315_pci_ctrl {
+ void __iomem *cfg_mem;
+ void __iomem *mmr_mem;
+ unsigned irq;
+ unsigned irq_ext;
+ struct irq_domain *domain;
+ struct pci_controller pci_ctrl;
+ struct resource mem_res;
+ struct resource io_res;
+};
+
+static inline dma_addr_t ar2315_dev_offset(struct device *dev)
+{
+ if (dev && dev_is_pci(dev))
+ return AR2315_PCI_HOST_SDRAM_BASEADDR;
+ return 0;
+}
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr + ar2315_dev_offset(dev);
+}
+
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr - ar2315_dev_offset(dev);
+}
+
+static inline struct ar2315_pci_ctrl *ar2315_pci_bus_to_apc(struct pci_bus *bus)
+{
+ struct pci_controller *hose = bus->sysdata;
+
+ return container_of(hose, struct ar2315_pci_ctrl, pci_ctrl);
+}
+
+static inline u32 ar2315_pci_reg_read(struct ar2315_pci_ctrl *apc, u32 reg)
+{
+ return __raw_readl(apc->mmr_mem + reg);
+}
+
+static inline void ar2315_pci_reg_write(struct ar2315_pci_ctrl *apc, u32 reg,
+ u32 val)
+{
+ __raw_writel(val, apc->mmr_mem + reg);
+}
+
+static inline void ar2315_pci_reg_mask(struct ar2315_pci_ctrl *apc, u32 reg,
+ u32 mask, u32 val)
+{
+ u32 ret = ar2315_pci_reg_read(apc, reg);
+
+ ret &= ~mask;
+ ret |= val;
+ ar2315_pci_reg_write(apc, reg, ret);
+}
+
+static int ar2315_pci_cfg_access(struct ar2315_pci_ctrl *apc, unsigned devfn,
+ int where, int size, u32 *ptr, bool write)
+{
+ int func = PCI_FUNC(devfn);
+ int dev = PCI_SLOT(devfn);
+ u32 addr = (1 << (13 + dev)) | (func << 8) | (where & ~3);
+ u32 mask = 0xffffffff >> 8 * (4 - size);
+ u32 sh = (where & 3) * 8;
+ u32 value, isr;
+
+ /* Prevent access past the remapped area */
+ if (addr >= AR2315_PCI_CFG_SIZE || dev > 18)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Clear pending errors */
+ ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT);
+ /* Select Configuration access */
+ ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG, 0,
+ AR2315_PCIMISC_CFG_SEL);
+
+ mb(); /* PCI must see space change before we begin */
+
+ value = __raw_readl(apc->cfg_mem + addr);
+
+ isr = ar2315_pci_reg_read(apc, AR2315_PCI_ISR);
+
+ if (isr & AR2315_PCI_INT_ABORT)
+ goto exit_err;
+
+ if (write) {
+ value = (value & ~(mask << sh)) | *ptr << sh;
+ __raw_writel(value, apc->cfg_mem + addr);
+ isr = ar2315_pci_reg_read(apc, AR2315_PCI_ISR);
+ if (isr & AR2315_PCI_INT_ABORT)
+ goto exit_err;
+ } else {
+ *ptr = (value >> sh) & mask;
+ }
+
+ goto exit;
+
+exit_err:
+ ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT);
+ if (!write)
+ *ptr = 0xffffffff;
+
+exit:
+ /* Select Memory access */
+ ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG, AR2315_PCIMISC_CFG_SEL,
+ 0);
+
+ return isr & AR2315_PCI_INT_ABORT ? PCIBIOS_DEVICE_NOT_FOUND :
+ PCIBIOS_SUCCESSFUL;
+}
+
+static inline int ar2315_pci_local_cfg_rd(struct ar2315_pci_ctrl *apc,
+ unsigned devfn, int where, u32 *val)
+{
+ return ar2315_pci_cfg_access(apc, devfn, where, sizeof(u32), val,
+ false);
+}
+
+static inline int ar2315_pci_local_cfg_wr(struct ar2315_pci_ctrl *apc,
+ unsigned devfn, int where, u32 val)
+{
+ return ar2315_pci_cfg_access(apc, devfn, where, sizeof(u32), &val,
+ true);
+}
+
+static int ar2315_pci_cfg_read(struct pci_bus *bus, unsigned devfn, int where,
+ int size, u32 *value)
+{
+ struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(bus);
+
+ if (PCI_SLOT(devfn) == AR2315_PCI_HOST_SLOT)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return ar2315_pci_cfg_access(apc, devfn, where, size, value, false);
+}
+
+static int ar2315_pci_cfg_write(struct pci_bus *bus, unsigned devfn, int where,
+ int size, u32 value)
+{
+ struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(bus);
+
+ if (PCI_SLOT(devfn) == AR2315_PCI_HOST_SLOT)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return ar2315_pci_cfg_access(apc, devfn, where, size, &value, true);
+}
+
+static struct pci_ops ar2315_pci_ops = {
+ .read = ar2315_pci_cfg_read,
+ .write = ar2315_pci_cfg_write,
+};
+
+static int ar2315_pci_host_setup(struct ar2315_pci_ctrl *apc)
+{
+ unsigned devfn = PCI_DEVFN(AR2315_PCI_HOST_SLOT, 0);
+ int res;
+ u32 id;
+
+ res = ar2315_pci_local_cfg_rd(apc, devfn, PCI_VENDOR_ID, &id);
+ if (res != PCIBIOS_SUCCESSFUL || id != AR2315_PCI_HOST_DEVID)
+ return -ENODEV;
+
+ /* Program MBARs */
+ ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_0,
+ AR2315_PCI_HOST_MBAR0);
+ ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_1,
+ AR2315_PCI_HOST_MBAR1);
+ ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_2,
+ AR2315_PCI_HOST_MBAR2);
+
+ /* Run */
+ ar2315_pci_local_cfg_wr(apc, devfn, PCI_COMMAND, PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER | PCI_COMMAND_SPECIAL |
+ PCI_COMMAND_INVALIDATE | PCI_COMMAND_PARITY |
+ PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK);
+
+ return 0;
+}
+
+static void ar2315_pci_irq_handler(struct irq_desc *desc)
+{
+ struct ar2315_pci_ctrl *apc = irq_desc_get_handler_data(desc);
+ u32 pending = ar2315_pci_reg_read(apc, AR2315_PCI_ISR) &
+ ar2315_pci_reg_read(apc, AR2315_PCI_IMR);
+ unsigned pci_irq = 0;
+
+ if (pending)
+ pci_irq = irq_find_mapping(apc->domain, __ffs(pending));
+
+ if (pci_irq)
+ generic_handle_irq(pci_irq);
+ else
+ spurious_interrupt();
+}
+
+static void ar2315_pci_irq_mask(struct irq_data *d)
+{
+ struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d);
+
+ ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, BIT(d->hwirq), 0);
+}
+
+static void ar2315_pci_irq_mask_ack(struct irq_data *d)
+{
+ struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d);
+ u32 m = BIT(d->hwirq);
+
+ ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, m, 0);
+ ar2315_pci_reg_write(apc, AR2315_PCI_ISR, m);
+}
+
+static void ar2315_pci_irq_unmask(struct irq_data *d)
+{
+ struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d);
+
+ ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, 0, BIT(d->hwirq));
+}
+
+static struct irq_chip ar2315_pci_irq_chip = {
+ .name = "AR2315-PCI",
+ .irq_mask = ar2315_pci_irq_mask,
+ .irq_mask_ack = ar2315_pci_irq_mask_ack,
+ .irq_unmask = ar2315_pci_irq_unmask,
+};
+
+static int ar2315_pci_irq_map(struct irq_domain *d, unsigned irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &ar2315_pci_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, d->host_data);
+ return 0;
+}
+
+static struct irq_domain_ops ar2315_pci_irq_domain_ops = {
+ .map = ar2315_pci_irq_map,
+};
+
+static void ar2315_pci_irq_init(struct ar2315_pci_ctrl *apc)
+{
+ ar2315_pci_reg_mask(apc, AR2315_PCI_IER, AR2315_PCI_IER_ENABLE, 0);
+ ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, (AR2315_PCI_INT_ABORT |
+ AR2315_PCI_INT_EXT), 0);
+
+ apc->irq_ext = irq_create_mapping(apc->domain, AR2315_PCI_IRQ_EXT);
+
+ irq_set_chained_handler_and_data(apc->irq, ar2315_pci_irq_handler,
+ apc);
+
+ /* Clear any pending Abort or external Interrupts
+ * and enable interrupt processing */
+ ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT |
+ AR2315_PCI_INT_EXT);
+ ar2315_pci_reg_mask(apc, AR2315_PCI_IER, 0, AR2315_PCI_IER_ENABLE);
+}
+
+static int ar2315_pci_probe(struct platform_device *pdev)
+{
+ struct ar2315_pci_ctrl *apc;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq, err;
+
+ apc = devm_kzalloc(dev, sizeof(*apc), GFP_KERNEL);
+ if (!apc)
+ return -ENOMEM;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+ apc->irq = irq;
+
+ apc->mmr_mem = devm_platform_ioremap_resource_byname(pdev,
+ "ar2315-pci-ctrl");
+ if (IS_ERR(apc->mmr_mem))
+ return PTR_ERR(apc->mmr_mem);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ar2315-pci-ext");
+ if (!res)
+ return -EINVAL;
+
+ apc->mem_res.name = "AR2315 PCI mem space";
+ apc->mem_res.parent = res;
+ apc->mem_res.start = res->start;
+ apc->mem_res.end = res->end;
+ apc->mem_res.flags = IORESOURCE_MEM;
+
+ /* Remap PCI config space */
+ apc->cfg_mem = devm_ioremap(dev, res->start,
+ AR2315_PCI_CFG_SIZE);
+ if (!apc->cfg_mem) {
+ dev_err(dev, "failed to remap PCI config space\n");
+ return -ENOMEM;
+ }
+
+ /* Reset the PCI bus by setting bits 5-4 in PCI_MCFG */
+ ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG,
+ AR2315_PCIMISC_RST_MODE,
+ AR2315_PCIRST_LOW);
+ msleep(100);
+
+ /* Bring the PCI out of reset */
+ ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG,
+ AR2315_PCIMISC_RST_MODE,
+ AR2315_PCIRST_HIGH | AR2315_PCICACHE_DIS | 0x8);
+
+ ar2315_pci_reg_write(apc, AR2315_PCI_UNCACHE_CFG,
+ 0x1E | /* 1GB uncached */
+ (1 << 5) | /* Enable uncached */
+ (0x2 << 30) /* Base: 0x80000000 */);
+ ar2315_pci_reg_read(apc, AR2315_PCI_UNCACHE_CFG);
+
+ msleep(500);
+
+ err = ar2315_pci_host_setup(apc);
+ if (err)
+ return err;
+
+ apc->domain = irq_domain_add_linear(NULL, AR2315_PCI_IRQ_COUNT,
+ &ar2315_pci_irq_domain_ops, apc);
+ if (!apc->domain) {
+ dev_err(dev, "failed to add IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ ar2315_pci_irq_init(apc);
+
+ /* PCI controller does not support I/O ports */
+ apc->io_res.name = "AR2315 IO space";
+ apc->io_res.start = 0;
+ apc->io_res.end = 0;
+ apc->io_res.flags = IORESOURCE_IO,
+
+ apc->pci_ctrl.pci_ops = &ar2315_pci_ops;
+ apc->pci_ctrl.mem_resource = &apc->mem_res,
+ apc->pci_ctrl.io_resource = &apc->io_res,
+
+ register_pci_controller(&apc->pci_ctrl);
+
+ dev_info(dev, "register PCI controller\n");
+
+ return 0;
+}
+
+static struct platform_driver ar2315_pci_driver = {
+ .probe = ar2315_pci_probe,
+ .driver = {
+ .name = "ar2315-pci",
+ },
+};
+
+static int __init ar2315_pci_init(void)
+{
+ return platform_driver_register(&ar2315_pci_driver);
+}
+arch_initcall(ar2315_pci_init);
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(dev->bus);
+
+ return slot ? 0 : apc->irq_ext;
+}
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
new file mode 100644
index 000000000..118760b3f
--- /dev/null
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Atheros AR71xx PCI host controller driver
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15 BSP
+ */
+
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/ath79.h>
+
+#define AR71XX_PCI_REG_CRP_AD_CBE 0x00
+#define AR71XX_PCI_REG_CRP_WRDATA 0x04
+#define AR71XX_PCI_REG_CRP_RDDATA 0x08
+#define AR71XX_PCI_REG_CFG_AD 0x0c
+#define AR71XX_PCI_REG_CFG_CBE 0x10
+#define AR71XX_PCI_REG_CFG_WRDATA 0x14
+#define AR71XX_PCI_REG_CFG_RDDATA 0x18
+#define AR71XX_PCI_REG_PCI_ERR 0x1c
+#define AR71XX_PCI_REG_PCI_ERR_ADDR 0x20
+#define AR71XX_PCI_REG_AHB_ERR 0x24
+#define AR71XX_PCI_REG_AHB_ERR_ADDR 0x28
+
+#define AR71XX_PCI_CRP_CMD_WRITE 0x00010000
+#define AR71XX_PCI_CRP_CMD_READ 0x00000000
+#define AR71XX_PCI_CFG_CMD_READ 0x0000000a
+#define AR71XX_PCI_CFG_CMD_WRITE 0x0000000b
+
+#define AR71XX_PCI_INT_CORE BIT(4)
+#define AR71XX_PCI_INT_DEV2 BIT(2)
+#define AR71XX_PCI_INT_DEV1 BIT(1)
+#define AR71XX_PCI_INT_DEV0 BIT(0)
+
+#define AR71XX_PCI_IRQ_COUNT 5
+
+struct ar71xx_pci_controller {
+ void __iomem *cfg_base;
+ int irq;
+ int irq_base;
+ struct pci_controller pci_ctrl;
+ struct resource io_res;
+ struct resource mem_res;
+};
+
+/* Byte lane enable bits */
+static const u8 ar71xx_pci_ble_table[4][4] = {
+ {0x0, 0xf, 0xf, 0xf},
+ {0xe, 0xd, 0xb, 0x7},
+ {0xc, 0xf, 0x3, 0xf},
+ {0xf, 0xf, 0xf, 0xf},
+};
+
+static const u32 ar71xx_pci_read_mask[8] = {
+ 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0
+};
+
+static inline u32 ar71xx_pci_get_ble(int where, int size, int local)
+{
+ u32 t;
+
+ t = ar71xx_pci_ble_table[size & 3][where & 3];
+ BUG_ON(t == 0xf);
+ t <<= (local) ? 20 : 4;
+
+ return t;
+}
+
+static inline u32 ar71xx_pci_bus_addr(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ u32 ret;
+
+ if (!bus->number) {
+ /* type 0 */
+ ret = (1 << PCI_SLOT(devfn)) | (PCI_FUNC(devfn) << 8) |
+ (where & ~3);
+ } else {
+ /* type 1 */
+ ret = (bus->number << 16) | (PCI_SLOT(devfn) << 11) |
+ (PCI_FUNC(devfn) << 8) | (where & ~3) | 1;
+ }
+
+ return ret;
+}
+
+static inline struct ar71xx_pci_controller *
+pci_bus_to_ar71xx_controller(struct pci_bus *bus)
+{
+ struct pci_controller *hose;
+
+ hose = (struct pci_controller *) bus->sysdata;
+ return container_of(hose, struct ar71xx_pci_controller, pci_ctrl);
+}
+
+static int ar71xx_pci_check_error(struct ar71xx_pci_controller *apc, int quiet)
+{
+ void __iomem *base = apc->cfg_base;
+ u32 pci_err;
+ u32 ahb_err;
+
+ pci_err = __raw_readl(base + AR71XX_PCI_REG_PCI_ERR) & 3;
+ if (pci_err) {
+ if (!quiet) {
+ u32 addr;
+
+ addr = __raw_readl(base + AR71XX_PCI_REG_PCI_ERR_ADDR);
+ pr_crit("ar71xx: %s bus error %d at addr 0x%x\n",
+ "PCI", pci_err, addr);
+ }
+
+ /* clear PCI error status */
+ __raw_writel(pci_err, base + AR71XX_PCI_REG_PCI_ERR);
+ }
+
+ ahb_err = __raw_readl(base + AR71XX_PCI_REG_AHB_ERR) & 1;
+ if (ahb_err) {
+ if (!quiet) {
+ u32 addr;
+
+ addr = __raw_readl(base + AR71XX_PCI_REG_AHB_ERR_ADDR);
+ pr_crit("ar71xx: %s bus error %d at addr 0x%x\n",
+ "AHB", ahb_err, addr);
+ }
+
+ /* clear AHB error status */
+ __raw_writel(ahb_err, base + AR71XX_PCI_REG_AHB_ERR);
+ }
+
+ return !!(ahb_err | pci_err);
+}
+
+static inline void ar71xx_pci_local_write(struct ar71xx_pci_controller *apc,
+ int where, int size, u32 value)
+{
+ void __iomem *base = apc->cfg_base;
+ u32 ad_cbe;
+
+ value = value << (8 * (where & 3));
+
+ ad_cbe = AR71XX_PCI_CRP_CMD_WRITE | (where & ~3);
+ ad_cbe |= ar71xx_pci_get_ble(where, size, 1);
+
+ __raw_writel(ad_cbe, base + AR71XX_PCI_REG_CRP_AD_CBE);
+ __raw_writel(value, base + AR71XX_PCI_REG_CRP_WRDATA);
+}
+
+static inline int ar71xx_pci_set_cfgaddr(struct pci_bus *bus,
+ unsigned int devfn,
+ int where, int size, u32 cmd)
+{
+ struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
+ void __iomem *base = apc->cfg_base;
+ u32 addr;
+
+ addr = ar71xx_pci_bus_addr(bus, devfn, where);
+
+ __raw_writel(addr, base + AR71XX_PCI_REG_CFG_AD);
+ __raw_writel(cmd | ar71xx_pci_get_ble(where, size, 0),
+ base + AR71XX_PCI_REG_CFG_CBE);
+
+ return ar71xx_pci_check_error(apc, 1);
+}
+
+static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
+ void __iomem *base = apc->cfg_base;
+ u32 data;
+ int err;
+ int ret;
+
+ ret = PCIBIOS_SUCCESSFUL;
+ data = ~0;
+
+ err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
+ AR71XX_PCI_CFG_CMD_READ);
+ if (err)
+ ret = PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ data = __raw_readl(base + AR71XX_PCI_REG_CFG_RDDATA);
+
+ *value = (data >> (8 * (where & 3))) & ar71xx_pci_read_mask[size & 7];
+
+ return ret;
+}
+
+static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
+ void __iomem *base = apc->cfg_base;
+ int err;
+ int ret;
+
+ value = value << (8 * (where & 3));
+ ret = PCIBIOS_SUCCESSFUL;
+
+ err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
+ AR71XX_PCI_CFG_CMD_WRITE);
+ if (err)
+ ret = PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ __raw_writel(value, base + AR71XX_PCI_REG_CFG_WRDATA);
+
+ return ret;
+}
+
+static struct pci_ops ar71xx_pci_ops = {
+ .read = ar71xx_pci_read_config,
+ .write = ar71xx_pci_write_config,
+};
+
+static void ar71xx_pci_irq_handler(struct irq_desc *desc)
+{
+ struct ar71xx_pci_controller *apc;
+ void __iomem *base = ath79_reset_base;
+ u32 pending;
+
+ apc = irq_desc_get_handler_data(desc);
+
+ pending = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_STATUS) &
+ __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+
+ if (pending & AR71XX_PCI_INT_DEV0)
+ generic_handle_irq(apc->irq_base + 0);
+
+ else if (pending & AR71XX_PCI_INT_DEV1)
+ generic_handle_irq(apc->irq_base + 1);
+
+ else if (pending & AR71XX_PCI_INT_DEV2)
+ generic_handle_irq(apc->irq_base + 2);
+
+ else if (pending & AR71XX_PCI_INT_CORE)
+ generic_handle_irq(apc->irq_base + 4);
+
+ else
+ spurious_interrupt();
+}
+
+static void ar71xx_pci_irq_unmask(struct irq_data *d)
+{
+ struct ar71xx_pci_controller *apc;
+ unsigned int irq;
+ void __iomem *base = ath79_reset_base;
+ u32 t;
+
+ apc = irq_data_get_irq_chip_data(d);
+ irq = d->irq - apc->irq_base;
+
+ t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+ __raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+
+ /* flush write */
+ __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+}
+
+static void ar71xx_pci_irq_mask(struct irq_data *d)
+{
+ struct ar71xx_pci_controller *apc;
+ unsigned int irq;
+ void __iomem *base = ath79_reset_base;
+ u32 t;
+
+ apc = irq_data_get_irq_chip_data(d);
+ irq = d->irq - apc->irq_base;
+
+ t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+ __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+
+ /* flush write */
+ __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+}
+
+static struct irq_chip ar71xx_pci_irq_chip = {
+ .name = "AR71XX PCI",
+ .irq_mask = ar71xx_pci_irq_mask,
+ .irq_unmask = ar71xx_pci_irq_unmask,
+ .irq_mask_ack = ar71xx_pci_irq_mask,
+};
+
+static void ar71xx_pci_irq_init(struct ar71xx_pci_controller *apc)
+{
+ void __iomem *base = ath79_reset_base;
+ int i;
+
+ __raw_writel(0, base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+ __raw_writel(0, base + AR71XX_RESET_REG_PCI_INT_STATUS);
+
+ BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR71XX_PCI_IRQ_COUNT);
+
+ apc->irq_base = ATH79_PCI_IRQ_BASE;
+ for (i = apc->irq_base;
+ i < apc->irq_base + AR71XX_PCI_IRQ_COUNT; i++) {
+ irq_set_chip_and_handler(i, &ar71xx_pci_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(i, apc);
+ }
+
+ irq_set_chained_handler_and_data(apc->irq, ar71xx_pci_irq_handler,
+ apc);
+}
+
+static void ar71xx_pci_reset(void)
+{
+ ath79_device_reset_set(AR71XX_RESET_PCI_BUS | AR71XX_RESET_PCI_CORE);
+ mdelay(100);
+
+ ath79_device_reset_clear(AR71XX_RESET_PCI_BUS | AR71XX_RESET_PCI_CORE);
+ mdelay(100);
+
+ ath79_ddr_set_pci_windows();
+ mdelay(100);
+}
+
+static int ar71xx_pci_probe(struct platform_device *pdev)
+{
+ struct ar71xx_pci_controller *apc;
+ struct resource *res;
+ u32 t;
+
+ apc = devm_kzalloc(&pdev->dev, sizeof(struct ar71xx_pci_controller),
+ GFP_KERNEL);
+ if (!apc)
+ return -ENOMEM;
+
+ apc->cfg_base = devm_platform_ioremap_resource_byname(pdev,
+ "cfg_base");
+ if (IS_ERR(apc->cfg_base))
+ return PTR_ERR(apc->cfg_base);
+
+ apc->irq = platform_get_irq(pdev, 0);
+ if (apc->irq < 0)
+ return -EINVAL;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base");
+ if (!res)
+ return -EINVAL;
+
+ apc->io_res.parent = res;
+ apc->io_res.name = "PCI IO space";
+ apc->io_res.start = res->start;
+ apc->io_res.end = res->end;
+ apc->io_res.flags = IORESOURCE_IO;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem_base");
+ if (!res)
+ return -EINVAL;
+
+ apc->mem_res.parent = res;
+ apc->mem_res.name = "PCI memory space";
+ apc->mem_res.start = res->start;
+ apc->mem_res.end = res->end;
+ apc->mem_res.flags = IORESOURCE_MEM;
+
+ ar71xx_pci_reset();
+
+ /* setup COMMAND register */
+ t = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE
+ | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
+ ar71xx_pci_local_write(apc, PCI_COMMAND, 4, t);
+
+ /* clear bus errors */
+ ar71xx_pci_check_error(apc, 1);
+
+ ar71xx_pci_irq_init(apc);
+
+ apc->pci_ctrl.pci_ops = &ar71xx_pci_ops;
+ apc->pci_ctrl.mem_resource = &apc->mem_res;
+ apc->pci_ctrl.io_resource = &apc->io_res;
+
+ register_pci_controller(&apc->pci_ctrl);
+
+ return 0;
+}
+
+static struct platform_driver ar71xx_pci_driver = {
+ .probe = ar71xx_pci_probe,
+ .driver = {
+ .name = "ar71xx-pci",
+ },
+};
+
+static int __init ar71xx_pci_init(void)
+{
+ return platform_driver_register(&ar71xx_pci_driver);
+}
+
+postcore_initcall(ar71xx_pci_init);
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
new file mode 100644
index 000000000..807558b25
--- /dev/null
+++ b/arch/mips/pci/pci-ar724x.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Atheros AR724X PCI host controller driver
+ *
+ * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
+ * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+
+#define AR724X_PCI_REG_APP 0x00
+#define AR724X_PCI_REG_RESET 0x18
+#define AR724X_PCI_REG_INT_STATUS 0x4c
+#define AR724X_PCI_REG_INT_MASK 0x50
+
+#define AR724X_PCI_APP_LTSSM_ENABLE BIT(0)
+
+#define AR724X_PCI_RESET_LINK_UP BIT(0)
+
+#define AR724X_PCI_INT_DEV0 BIT(14)
+
+#define AR724X_PCI_IRQ_COUNT 1
+
+#define AR7240_BAR0_WAR_VALUE 0xffff
+
+#define AR724X_PCI_CMD_INIT (PCI_COMMAND_MEMORY | \
+ PCI_COMMAND_MASTER | \
+ PCI_COMMAND_INVALIDATE | \
+ PCI_COMMAND_PARITY | \
+ PCI_COMMAND_SERR | \
+ PCI_COMMAND_FAST_BACK)
+
+struct ar724x_pci_controller {
+ void __iomem *devcfg_base;
+ void __iomem *ctrl_base;
+ void __iomem *crp_base;
+
+ int irq;
+ int irq_base;
+
+ bool link_up;
+ bool bar0_is_cached;
+ u32 bar0_value;
+
+ struct pci_controller pci_controller;
+ struct resource io_res;
+ struct resource mem_res;
+};
+
+static inline bool ar724x_pci_check_link(struct ar724x_pci_controller *apc)
+{
+ u32 reset;
+
+ reset = __raw_readl(apc->ctrl_base + AR724X_PCI_REG_RESET);
+ return reset & AR724X_PCI_RESET_LINK_UP;
+}
+
+static inline struct ar724x_pci_controller *
+pci_bus_to_ar724x_controller(struct pci_bus *bus)
+{
+ struct pci_controller *hose;
+
+ hose = (struct pci_controller *) bus->sysdata;
+ return container_of(hose, struct ar724x_pci_controller, pci_controller);
+}
+
+static int ar724x_pci_local_write(struct ar724x_pci_controller *apc,
+ int where, int size, u32 value)
+{
+ void __iomem *base;
+ u32 data;
+ int s;
+
+ WARN_ON(where & (size - 1));
+
+ if (!apc->link_up)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ base = apc->crp_base;
+ data = __raw_readl(base + (where & ~3));
+
+ switch (size) {
+ case 1:
+ s = ((where & 3) * 8);
+ data &= ~(0xff << s);
+ data |= ((value & 0xff) << s);
+ break;
+ case 2:
+ s = ((where & 2) * 8);
+ data &= ~(0xffff << s);
+ data |= ((value & 0xffff) << s);
+ break;
+ case 4:
+ data = value;
+ break;
+ default:
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ __raw_writel(data, base + (where & ~3));
+ /* flush write */
+ __raw_readl(base + (where & ~3));
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, uint32_t *value)
+{
+ struct ar724x_pci_controller *apc;
+ void __iomem *base;
+ u32 data;
+
+ apc = pci_bus_to_ar724x_controller(bus);
+ if (!apc->link_up)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (devfn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ base = apc->devcfg_base;
+ data = __raw_readl(base + (where & ~3));
+
+ switch (size) {
+ case 1:
+ if (where & 1)
+ data >>= 8;
+ if (where & 2)
+ data >>= 16;
+ data &= 0xff;
+ break;
+ case 2:
+ if (where & 2)
+ data >>= 16;
+ data &= 0xffff;
+ break;
+ case 4:
+ break;
+ default:
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ if (where == PCI_BASE_ADDRESS_0 && size == 4 &&
+ apc->bar0_is_cached) {
+ /* use the cached value */
+ *value = apc->bar0_value;
+ } else {
+ *value = data;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, uint32_t value)
+{
+ struct ar724x_pci_controller *apc;
+ void __iomem *base;
+ u32 data;
+ int s;
+
+ apc = pci_bus_to_ar724x_controller(bus);
+ if (!apc->link_up)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (devfn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (soc_is_ar7240() && where == PCI_BASE_ADDRESS_0 && size == 4) {
+ if (value != 0xffffffff) {
+ /*
+ * WAR for a hw issue. If the BAR0 register of the
+ * device is set to the proper base address, the
+ * memory space of the device is not accessible.
+ *
+ * Cache the intended value so it can be read back,
+ * and write a SoC specific constant value to the
+ * BAR0 register in order to make the device memory
+ * accessible.
+ */
+ apc->bar0_is_cached = true;
+ apc->bar0_value = value;
+
+ value = AR7240_BAR0_WAR_VALUE;
+ } else {
+ apc->bar0_is_cached = false;
+ }
+ }
+
+ base = apc->devcfg_base;
+ data = __raw_readl(base + (where & ~3));
+
+ switch (size) {
+ case 1:
+ s = ((where & 3) * 8);
+ data &= ~(0xff << s);
+ data |= ((value & 0xff) << s);
+ break;
+ case 2:
+ s = ((where & 2) * 8);
+ data &= ~(0xffff << s);
+ data |= ((value & 0xffff) << s);
+ break;
+ case 4:
+ data = value;
+ break;
+ default:
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ __raw_writel(data, base + (where & ~3));
+ /* flush write */
+ __raw_readl(base + (where & ~3));
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops ar724x_pci_ops = {
+ .read = ar724x_pci_read,
+ .write = ar724x_pci_write,
+};
+
+static void ar724x_pci_irq_handler(struct irq_desc *desc)
+{
+ struct ar724x_pci_controller *apc;
+ void __iomem *base;
+ u32 pending;
+
+ apc = irq_desc_get_handler_data(desc);
+ base = apc->ctrl_base;
+
+ pending = __raw_readl(base + AR724X_PCI_REG_INT_STATUS) &
+ __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+
+ if (pending & AR724X_PCI_INT_DEV0)
+ generic_handle_irq(apc->irq_base + 0);
+
+ else
+ spurious_interrupt();
+}
+
+static void ar724x_pci_irq_unmask(struct irq_data *d)
+{
+ struct ar724x_pci_controller *apc;
+ void __iomem *base;
+ int offset;
+ u32 t;
+
+ apc = irq_data_get_irq_chip_data(d);
+ base = apc->ctrl_base;
+ offset = apc->irq_base - d->irq;
+
+ switch (offset) {
+ case 0:
+ t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+ __raw_writel(t | AR724X_PCI_INT_DEV0,
+ base + AR724X_PCI_REG_INT_MASK);
+ /* flush write */
+ __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+ }
+}
+
+static void ar724x_pci_irq_mask(struct irq_data *d)
+{
+ struct ar724x_pci_controller *apc;
+ void __iomem *base;
+ int offset;
+ u32 t;
+
+ apc = irq_data_get_irq_chip_data(d);
+ base = apc->ctrl_base;
+ offset = apc->irq_base - d->irq;
+
+ switch (offset) {
+ case 0:
+ t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+ __raw_writel(t & ~AR724X_PCI_INT_DEV0,
+ base + AR724X_PCI_REG_INT_MASK);
+
+ /* flush write */
+ __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+
+ t = __raw_readl(base + AR724X_PCI_REG_INT_STATUS);
+ __raw_writel(t | AR724X_PCI_INT_DEV0,
+ base + AR724X_PCI_REG_INT_STATUS);
+
+ /* flush write */
+ __raw_readl(base + AR724X_PCI_REG_INT_STATUS);
+ }
+}
+
+static struct irq_chip ar724x_pci_irq_chip = {
+ .name = "AR724X PCI ",
+ .irq_mask = ar724x_pci_irq_mask,
+ .irq_unmask = ar724x_pci_irq_unmask,
+ .irq_mask_ack = ar724x_pci_irq_mask,
+};
+
+static void ar724x_pci_irq_init(struct ar724x_pci_controller *apc,
+ int id)
+{
+ void __iomem *base;
+ int i;
+
+ base = apc->ctrl_base;
+
+ __raw_writel(0, base + AR724X_PCI_REG_INT_MASK);
+ __raw_writel(0, base + AR724X_PCI_REG_INT_STATUS);
+
+ apc->irq_base = ATH79_PCI_IRQ_BASE + (id * AR724X_PCI_IRQ_COUNT);
+
+ for (i = apc->irq_base;
+ i < apc->irq_base + AR724X_PCI_IRQ_COUNT; i++) {
+ irq_set_chip_and_handler(i, &ar724x_pci_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(i, apc);
+ }
+
+ irq_set_chained_handler_and_data(apc->irq, ar724x_pci_irq_handler,
+ apc);
+}
+
+static void ar724x_pci_hw_init(struct ar724x_pci_controller *apc)
+{
+ u32 ppl, app;
+ int wait = 0;
+
+ /* deassert PCIe host controller and PCIe PHY reset */
+ ath79_device_reset_clear(AR724X_RESET_PCIE);
+ ath79_device_reset_clear(AR724X_RESET_PCIE_PHY);
+
+ /* remove the reset of the PCIE PLL */
+ ppl = ath79_pll_rr(AR724X_PLL_REG_PCIE_CONFIG);
+ ppl &= ~AR724X_PLL_REG_PCIE_CONFIG_PPL_RESET;
+ ath79_pll_wr(AR724X_PLL_REG_PCIE_CONFIG, ppl);
+
+ /* deassert bypass for the PCIE PLL */
+ ppl = ath79_pll_rr(AR724X_PLL_REG_PCIE_CONFIG);
+ ppl &= ~AR724X_PLL_REG_PCIE_CONFIG_PPL_BYPASS;
+ ath79_pll_wr(AR724X_PLL_REG_PCIE_CONFIG, ppl);
+
+ /* set PCIE Application Control to ready */
+ app = __raw_readl(apc->ctrl_base + AR724X_PCI_REG_APP);
+ app |= AR724X_PCI_APP_LTSSM_ENABLE;
+ __raw_writel(app, apc->ctrl_base + AR724X_PCI_REG_APP);
+
+ /* wait up to 100ms for PHY link up */
+ do {
+ mdelay(10);
+ wait++;
+ } while (wait < 10 && !ar724x_pci_check_link(apc));
+}
+
+static int ar724x_pci_probe(struct platform_device *pdev)
+{
+ struct ar724x_pci_controller *apc;
+ struct resource *res;
+ int id;
+
+ id = pdev->id;
+ if (id == -1)
+ id = 0;
+
+ apc = devm_kzalloc(&pdev->dev, sizeof(struct ar724x_pci_controller),
+ GFP_KERNEL);
+ if (!apc)
+ return -ENOMEM;
+
+ apc->ctrl_base = devm_platform_ioremap_resource_byname(pdev, "ctrl_base");
+ if (IS_ERR(apc->ctrl_base))
+ return PTR_ERR(apc->ctrl_base);
+
+ apc->devcfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg_base");
+ if (IS_ERR(apc->devcfg_base))
+ return PTR_ERR(apc->devcfg_base);
+
+ apc->crp_base = devm_platform_ioremap_resource_byname(pdev, "crp_base");
+ if (IS_ERR(apc->crp_base))
+ return PTR_ERR(apc->crp_base);
+
+ apc->irq = platform_get_irq(pdev, 0);
+ if (apc->irq < 0)
+ return -EINVAL;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base");
+ if (!res)
+ return -EINVAL;
+
+ apc->io_res.parent = res;
+ apc->io_res.name = "PCI IO space";
+ apc->io_res.start = res->start;
+ apc->io_res.end = res->end;
+ apc->io_res.flags = IORESOURCE_IO;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem_base");
+ if (!res)
+ return -EINVAL;
+
+ apc->mem_res.parent = res;
+ apc->mem_res.name = "PCI memory space";
+ apc->mem_res.start = res->start;
+ apc->mem_res.end = res->end;
+ apc->mem_res.flags = IORESOURCE_MEM;
+
+ apc->pci_controller.pci_ops = &ar724x_pci_ops;
+ apc->pci_controller.io_resource = &apc->io_res;
+ apc->pci_controller.mem_resource = &apc->mem_res;
+
+ /*
+ * Do the full PCIE Root Complex Initialization Sequence if the PCIe
+ * host controller is in reset.
+ */
+ if (ath79_reset_rr(AR724X_RESET_REG_RESET_MODULE) & AR724X_RESET_PCIE)
+ ar724x_pci_hw_init(apc);
+
+ apc->link_up = ar724x_pci_check_link(apc);
+ if (!apc->link_up)
+ dev_warn(&pdev->dev, "PCIe link is down\n");
+
+ ar724x_pci_irq_init(apc, id);
+
+ ar724x_pci_local_write(apc, PCI_COMMAND, 4, AR724X_PCI_CMD_INIT);
+
+ register_pci_controller(&apc->pci_controller);
+
+ return 0;
+}
+
+static struct platform_driver ar724x_pci_driver = {
+ .probe = ar724x_pci_probe,
+ .driver = {
+ .name = "ar724x-pci",
+ },
+};
+
+static int __init ar724x_pci_init(void)
+{
+ return platform_driver_register(&ar724x_pci_driver);
+}
+
+postcore_initcall(ar724x_pci_init);
diff --git a/arch/mips/pci/pci-bcm1480.c b/arch/mips/pci/pci-bcm1480.c
new file mode 100644
index 000000000..db0d4d22d
--- /dev/null
+++ b/arch/mips/pci/pci-bcm1480.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2001,2002,2005 Broadcom Corporation
+ * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
+ */
+
+/*
+ * BCM1x80/1x55-specific PCI support
+ *
+ * This module provides the glue between Linux's PCI subsystem
+ * and the hardware. We basically provide glue for accessing
+ * configuration space, and set up the translation for I/O
+ * space accesses.
+ *
+ * To access configuration space, we use ioremap. In the 32-bit
+ * kernel, this consumes either 4 or 8 page table pages, and 16MB of
+ * kernel mapped memory. Hopefully neither of these should be a huge
+ * problem.
+ *
+ * XXX: AT THIS TIME, ONLY the NATIVE PCI-X INTERFACE IS SUPPORTED.
+ */
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/vt.h>
+
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/bcm1480_scd.h>
+#include <asm/sibyte/board.h>
+#include <asm/io.h>
+
+/*
+ * Macros for calculating offsets into config space given a device
+ * structure or dev/fun/reg
+ */
+#define CFGOFFSET(bus, devfn, where) (((bus)<<16)+((devfn)<<8)+(where))
+#define CFGADDR(bus, devfn, where) CFGOFFSET((bus)->number, (devfn), where)
+
+static void *cfg_space;
+
+#define PCI_BUS_ENABLED 1
+#define PCI_DEVICE_MODE 2
+
+static int bcm1480_bus_status;
+
+#define PCI_BRIDGE_DEVICE 0
+
+/*
+ * Read/write 32-bit values in config space.
+ */
+static inline u32 READCFG32(u32 addr)
+{
+ return *(u32 *)(cfg_space + (addr&~3));
+}
+
+static inline void WRITECFG32(u32 addr, u32 data)
+{
+ *(u32 *)(cfg_space + (addr & ~3)) = data;
+}
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ if (pin == 0)
+ return -1;
+
+ return K_BCM1480_INT_PCI_INTA - 1 + pin;
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
+
+/*
+ * Some checks before doing config cycles:
+ * In PCI Device Mode, hide everything on bus 0 except the LDT host
+ * bridge. Otherwise, access is controlled by bridge MasterEn bits.
+ */
+static int bcm1480_pci_can_access(struct pci_bus *bus, int devfn)
+{
+ u32 devno;
+
+ if (!(bcm1480_bus_status & (PCI_BUS_ENABLED | PCI_DEVICE_MODE)))
+ return 0;
+
+ if (bus->number == 0) {
+ devno = PCI_SLOT(devfn);
+ if (bcm1480_bus_status & PCI_DEVICE_MODE)
+ return 0;
+ else
+ return 1;
+ } else
+ return 1;
+}
+
+/*
+ * Read/write access functions for various sizes of values
+ * in config space. Return all 1's for disallowed accesses
+ * for a kludgy but adequate simulation of master aborts.
+ */
+
+static int bcm1480_pcibios_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 * val)
+{
+ u32 data = 0;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (bcm1480_pci_can_access(bus, devfn))
+ data = READCFG32(CFGADDR(bus, devfn, where));
+ else
+ data = 0xFFFFFFFF;
+
+ if (size == 1)
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ else if (size == 2)
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ else
+ *val = data;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int bcm1480_pcibios_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ u32 cfgaddr = CFGADDR(bus, devfn, where);
+ u32 data = 0;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (!bcm1480_pci_can_access(bus, devfn))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ data = READCFG32(cfgaddr);
+
+ if (size == 1)
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else if (size == 2)
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else
+ data = val;
+
+ WRITECFG32(cfgaddr, data);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops bcm1480_pci_ops = {
+ .read = bcm1480_pcibios_read,
+ .write = bcm1480_pcibios_write,
+};
+
+static struct resource bcm1480_mem_resource = {
+ .name = "BCM1480 PCI MEM",
+ .start = A_BCM1480_PHYS_PCI_MEM_MATCH_BYTES,
+ .end = A_BCM1480_PHYS_PCI_MEM_MATCH_BYTES + 0xfffffffUL,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource bcm1480_io_resource = {
+ .name = "BCM1480 PCI I/O",
+ .start = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES,
+ .end = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES + 0x1ffffffUL,
+ .flags = IORESOURCE_IO,
+};
+
+struct pci_controller bcm1480_controller = {
+ .pci_ops = &bcm1480_pci_ops,
+ .mem_resource = &bcm1480_mem_resource,
+ .io_resource = &bcm1480_io_resource,
+ .io_offset = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES,
+};
+
+
+static int __init bcm1480_pcibios_init(void)
+{
+ uint32_t cmdreg;
+ uint64_t reg;
+
+ /* CFE will assign PCI resources */
+ pci_set_flags(PCI_PROBE_ONLY);
+
+ /* Avoid ISA compat ranges. */
+ PCIBIOS_MIN_IO = 0x00008000UL;
+ PCIBIOS_MIN_MEM = 0x01000000UL;
+
+ /* Set I/O resource limits. - unlimited for now to accommodate HT */
+ ioport_resource.end = 0xffffffffUL;
+ iomem_resource.end = 0xffffffffUL;
+
+ cfg_space = ioremap(A_BCM1480_PHYS_PCI_CFG_MATCH_BITS, 16*1024*1024);
+
+ /*
+ * See if the PCI bus has been configured by the firmware.
+ */
+ reg = __raw_readq(IOADDR(A_SCD_SYSTEM_CFG));
+ if (!(reg & M_BCM1480_SYS_PCI_HOST)) {
+ bcm1480_bus_status |= PCI_DEVICE_MODE;
+ } else {
+ cmdreg = READCFG32(CFGOFFSET(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0),
+ PCI_COMMAND));
+ if (!(cmdreg & PCI_COMMAND_MASTER)) {
+ printk
+ ("PCI: Skipping PCI probe. Bus is not initialized.\n");
+ iounmap(cfg_space);
+ return 1; /* XXX */
+ }
+ bcm1480_bus_status |= PCI_BUS_ENABLED;
+ }
+
+ /* turn on ExpMemEn */
+ cmdreg = READCFG32(CFGOFFSET(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), 0x40));
+ WRITECFG32(CFGOFFSET(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), 0x40),
+ cmdreg | 0x10);
+ cmdreg = READCFG32(CFGOFFSET(0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0), 0x40));
+
+ /*
+ * Establish mappings in KSEG2 (kernel virtual) to PCI I/O
+ * space. Use "match bytes" policy to make everything look
+ * little-endian. So, you need to also set
+ * CONFIG_SWAP_IO_SPACE, but this is the combination that
+ * works correctly with most of Linux's drivers.
+ * XXX ehs: Should this happen in PCI Device mode?
+ */
+
+ bcm1480_controller.io_map_base = (unsigned long)
+ ioremap(A_BCM1480_PHYS_PCI_IO_MATCH_BYTES, 65536);
+ bcm1480_controller.io_map_base -= bcm1480_controller.io_offset;
+ set_io_port_base(bcm1480_controller.io_map_base);
+
+ register_pci_controller(&bcm1480_controller);
+
+#ifdef CONFIG_VGA_CONSOLE
+ console_lock();
+ do_take_over_console(&vga_con, 0, MAX_NR_CONSOLES-1, 1);
+ console_unlock();
+#endif
+ return 0;
+}
+
+arch_initcall(bcm1480_pcibios_init);
diff --git a/arch/mips/pci/pci-bcm1480ht.c b/arch/mips/pci/pci-bcm1480ht.c
new file mode 100644
index 000000000..3d996acd2
--- /dev/null
+++ b/arch/mips/pci/pci-bcm1480ht.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2001,2002,2005 Broadcom Corporation
+ * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
+ */
+
+/*
+ * BCM1480/1455-specific HT support (looking like PCI)
+ *
+ * This module provides the glue between Linux's PCI subsystem
+ * and the hardware. We basically provide glue for accessing
+ * configuration space, and set up the translation for I/O
+ * space accesses.
+ *
+ * To access configuration space, we use ioremap. In the 32-bit
+ * kernel, this consumes either 4 or 8 page table pages, and 16MB of
+ * kernel mapped memory. Hopefully neither of these should be a huge
+ * problem.
+ *
+ */
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/bcm1480_scd.h>
+#include <asm/sibyte/board.h>
+#include <asm/io.h>
+
+/*
+ * Macros for calculating offsets into config space given a device
+ * structure or dev/fun/reg
+ */
+#define CFGOFFSET(bus, devfn, where) (((bus)<<16)+((devfn)<<8)+(where))
+#define CFGADDR(bus, devfn, where) CFGOFFSET((bus)->number, (devfn), where)
+
+static void *ht_cfg_space;
+
+#define PCI_BUS_ENABLED 1
+#define PCI_DEVICE_MODE 2
+
+static int bcm1480ht_bus_status;
+
+#define PCI_BRIDGE_DEVICE 0
+#define HT_BRIDGE_DEVICE 1
+
+/*
+ * HT's level-sensitive interrupts require EOI, which is generated
+ * through a 4MB memory-mapped region
+ */
+unsigned long ht_eoi_space;
+
+/*
+ * Read/write 32-bit values in config space.
+ */
+static inline u32 READCFG32(u32 addr)
+{
+ return *(u32 *)(ht_cfg_space + (addr&~3));
+}
+
+static inline void WRITECFG32(u32 addr, u32 data)
+{
+ *(u32 *)(ht_cfg_space + (addr & ~3)) = data;
+}
+
+/*
+ * Some checks before doing config cycles:
+ * In PCI Device Mode, hide everything on bus 0 except the LDT host
+ * bridge. Otherwise, access is controlled by bridge MasterEn bits.
+ */
+static int bcm1480ht_can_access(struct pci_bus *bus, int devfn)
+{
+ u32 devno;
+
+ if (!(bcm1480ht_bus_status & (PCI_BUS_ENABLED | PCI_DEVICE_MODE)))
+ return 0;
+
+ if (bus->number == 0) {
+ devno = PCI_SLOT(devfn);
+ if (bcm1480ht_bus_status & PCI_DEVICE_MODE)
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Read/write access functions for various sizes of values
+ * in config space. Return all 1's for disallowed accesses
+ * for a kludgy but adequate simulation of master aborts.
+ */
+
+static int bcm1480ht_pcibios_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 * val)
+{
+ u32 data = 0;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (bcm1480ht_can_access(bus, devfn))
+ data = READCFG32(CFGADDR(bus, devfn, where));
+ else
+ data = 0xFFFFFFFF;
+
+ if (size == 1)
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ else if (size == 2)
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ else
+ *val = data;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int bcm1480ht_pcibios_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ u32 cfgaddr = CFGADDR(bus, devfn, where);
+ u32 data = 0;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (!bcm1480ht_can_access(bus, devfn))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ data = READCFG32(cfgaddr);
+
+ if (size == 1)
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else if (size == 2)
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else
+ data = val;
+
+ WRITECFG32(cfgaddr, data);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int bcm1480ht_pcibios_get_busno(void)
+{
+ return 0;
+}
+
+struct pci_ops bcm1480ht_pci_ops = {
+ .read = bcm1480ht_pcibios_read,
+ .write = bcm1480ht_pcibios_write,
+};
+
+static struct resource bcm1480ht_mem_resource = {
+ .name = "BCM1480 HT MEM",
+ .start = A_BCM1480_PHYS_HT_MEM_MATCH_BYTES,
+ .end = A_BCM1480_PHYS_HT_MEM_MATCH_BYTES + 0x1fffffffUL,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource bcm1480ht_io_resource = {
+ .name = "BCM1480 HT I/O",
+ .start = A_BCM1480_PHYS_HT_IO_MATCH_BYTES,
+ .end = A_BCM1480_PHYS_HT_IO_MATCH_BYTES + 0x01ffffffUL,
+ .flags = IORESOURCE_IO,
+};
+
+struct pci_controller bcm1480ht_controller = {
+ .pci_ops = &bcm1480ht_pci_ops,
+ .mem_resource = &bcm1480ht_mem_resource,
+ .io_resource = &bcm1480ht_io_resource,
+ .index = 1,
+ .get_busno = bcm1480ht_pcibios_get_busno,
+ .io_offset = A_BCM1480_PHYS_HT_IO_MATCH_BYTES,
+};
+
+static int __init bcm1480ht_pcibios_init(void)
+{
+ ht_cfg_space = ioremap(A_BCM1480_PHYS_HT_CFG_MATCH_BITS, 16*1024*1024);
+
+ /* CFE doesn't always init all HT paths, so we always scan */
+ bcm1480ht_bus_status |= PCI_BUS_ENABLED;
+
+ ht_eoi_space = (unsigned long)
+ ioremap(A_BCM1480_PHYS_HT_SPECIAL_MATCH_BYTES,
+ 4 * 1024 * 1024);
+ bcm1480ht_controller.io_map_base = (unsigned long)
+ ioremap(A_BCM1480_PHYS_HT_IO_MATCH_BYTES, 65536);
+ bcm1480ht_controller.io_map_base -= bcm1480ht_controller.io_offset;
+
+ register_pci_controller(&bcm1480ht_controller);
+
+ return 0;
+}
+
+arch_initcall(bcm1480ht_pcibios_init);
diff --git a/arch/mips/pci/pci-bcm47xx.c b/arch/mips/pci/pci-bcm47xx.c
new file mode 100644
index 000000000..230d7dd27
--- /dev/null
+++ b/arch/mips/pci/pci-bcm47xx.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2008 Aurelien Jarno <aurelien@aurel32.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/ssb/ssb.h>
+#include <linux/bcma/bcma.h>
+#include <bcm47xx.h>
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return 0;
+}
+
+#ifdef CONFIG_BCM47XX_SSB
+static int bcm47xx_pcibios_plat_dev_init_ssb(struct pci_dev *dev)
+{
+ int res;
+ u8 slot, pin;
+
+ res = ssb_pcibios_plat_dev_init(dev);
+ if (res < 0) {
+ printk(KERN_ALERT "PCI: Failed to init device %s\n",
+ pci_name(dev));
+ return res;
+ }
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ slot = PCI_SLOT(dev->devfn);
+ res = ssb_pcibios_map_irq(dev, slot, pin);
+
+ /* IRQ-0 and IRQ-1 are software interrupts. */
+ if (res < 2) {
+ printk(KERN_ALERT "PCI: Failed to map IRQ of device %s\n",
+ pci_name(dev));
+ return res;
+ }
+
+ dev->irq = res;
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_BCM47XX_BCMA
+static int bcm47xx_pcibios_plat_dev_init_bcma(struct pci_dev *dev)
+{
+ int res;
+
+ res = bcma_core_pci_plat_dev_init(dev);
+ if (res < 0) {
+ printk(KERN_ALERT "PCI: Failed to init device %s\n",
+ pci_name(dev));
+ return res;
+ }
+
+ res = bcma_core_pci_pcibios_map_irq(dev);
+
+ /* IRQ-0 and IRQ-1 are software interrupts. */
+ if (res < 2) {
+ printk(KERN_ALERT "PCI: Failed to map IRQ of device %s\n",
+ pci_name(dev));
+ return res;
+ }
+
+ dev->irq = res;
+ return 0;
+}
+#endif
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+#ifdef CONFIG_BCM47XX_SSB
+ if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_SSB)
+ return bcm47xx_pcibios_plat_dev_init_ssb(dev);
+ else
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA)
+ return bcm47xx_pcibios_plat_dev_init_bcma(dev);
+ else
+#endif
+ return 0;
+}
diff --git a/arch/mips/pci/pci-bcm63xx.c b/arch/mips/pci/pci-bcm63xx.c
new file mode 100644
index 000000000..554836560
--- /dev/null
+++ b/arch/mips/pci/pci-bcm63xx.c
@@ -0,0 +1,351 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <asm/bootinfo.h>
+
+#include <bcm63xx_reset.h>
+
+#include "pci-bcm63xx.h"
+
+/*
+ * Allow PCI to be disabled at runtime depending on board nvram
+ * configuration
+ */
+int bcm63xx_pci_enabled;
+
+static struct resource bcm_pci_mem_resource = {
+ .name = "bcm63xx PCI memory space",
+ .start = BCM_PCI_MEM_BASE_PA,
+ .end = BCM_PCI_MEM_END_PA,
+ .flags = IORESOURCE_MEM
+};
+
+static struct resource bcm_pci_io_resource = {
+ .name = "bcm63xx PCI IO space",
+ .start = BCM_PCI_IO_BASE_PA,
+#ifdef CONFIG_CARDBUS
+ .end = BCM_PCI_IO_HALF_PA,
+#else
+ .end = BCM_PCI_IO_END_PA,
+#endif
+ .flags = IORESOURCE_IO
+};
+
+struct pci_controller bcm63xx_controller = {
+ .pci_ops = &bcm63xx_pci_ops,
+ .io_resource = &bcm_pci_io_resource,
+ .mem_resource = &bcm_pci_mem_resource,
+};
+
+/*
+ * We handle cardbus via a fake Cardbus bridge, memory and io spaces
+ * have to be clearly separated from PCI one since we have different
+ * memory decoder.
+ */
+#ifdef CONFIG_CARDBUS
+static struct resource bcm_cb_mem_resource = {
+ .name = "bcm63xx Cardbus memory space",
+ .start = BCM_CB_MEM_BASE_PA,
+ .end = BCM_CB_MEM_END_PA,
+ .flags = IORESOURCE_MEM
+};
+
+static struct resource bcm_cb_io_resource = {
+ .name = "bcm63xx Cardbus IO space",
+ .start = BCM_PCI_IO_HALF_PA + 1,
+ .end = BCM_PCI_IO_END_PA,
+ .flags = IORESOURCE_IO
+};
+
+struct pci_controller bcm63xx_cb_controller = {
+ .pci_ops = &bcm63xx_cb_ops,
+ .io_resource = &bcm_cb_io_resource,
+ .mem_resource = &bcm_cb_mem_resource,
+};
+#endif
+
+static struct resource bcm_pcie_mem_resource = {
+ .name = "bcm63xx PCIe memory space",
+ .start = BCM_PCIE_MEM_BASE_PA,
+ .end = BCM_PCIE_MEM_END_PA,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource bcm_pcie_io_resource = {
+ .name = "bcm63xx PCIe IO space",
+ .start = 0,
+ .end = 0,
+ .flags = 0,
+};
+
+struct pci_controller bcm63xx_pcie_controller = {
+ .pci_ops = &bcm63xx_pcie_ops,
+ .io_resource = &bcm_pcie_io_resource,
+ .mem_resource = &bcm_pcie_mem_resource,
+};
+
+static u32 bcm63xx_int_cfg_readl(u32 reg)
+{
+ u32 tmp;
+
+ tmp = reg & MPI_PCICFGCTL_CFGADDR_MASK;
+ tmp |= MPI_PCICFGCTL_WRITEEN_MASK;
+ bcm_mpi_writel(tmp, MPI_PCICFGCTL_REG);
+ iob();
+ return bcm_mpi_readl(MPI_PCICFGDATA_REG);
+}
+
+static void bcm63xx_int_cfg_writel(u32 val, u32 reg)
+{
+ u32 tmp;
+
+ tmp = reg & MPI_PCICFGCTL_CFGADDR_MASK;
+ tmp |= MPI_PCICFGCTL_WRITEEN_MASK;
+ bcm_mpi_writel(tmp, MPI_PCICFGCTL_REG);
+ bcm_mpi_writel(val, MPI_PCICFGDATA_REG);
+}
+
+void __iomem *pci_iospace_start;
+
+static void __init bcm63xx_reset_pcie(void)
+{
+ u32 val;
+ u32 reg;
+
+ /* enable SERDES */
+ if (BCMCPU_IS_6328())
+ reg = MISC_SERDES_CTRL_6328_REG;
+ else
+ reg = MISC_SERDES_CTRL_6362_REG;
+
+ val = bcm_misc_readl(reg);
+ val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN;
+ bcm_misc_writel(val, reg);
+
+ /* reset the PCIe core */
+ bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 1);
+ bcm63xx_core_set_reset(BCM63XX_RESET_PCIE_EXT, 1);
+ mdelay(10);
+
+ bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 0);
+ mdelay(10);
+
+ bcm63xx_core_set_reset(BCM63XX_RESET_PCIE_EXT, 0);
+ mdelay(200);
+}
+
+static struct clk *pcie_clk;
+
+static int __init bcm63xx_register_pcie(void)
+{
+ u32 val;
+
+ /* enable clock */
+ pcie_clk = clk_get(NULL, "pcie");
+ if (IS_ERR_OR_NULL(pcie_clk))
+ return -ENODEV;
+
+ clk_prepare_enable(pcie_clk);
+
+ bcm63xx_reset_pcie();
+
+ /* configure the PCIe bridge */
+ val = bcm_pcie_readl(PCIE_BRIDGE_OPT1_REG);
+ val |= OPT1_RD_BE_OPT_EN;
+ val |= OPT1_RD_REPLY_BE_FIX_EN;
+ val |= OPT1_PCIE_BRIDGE_HOLE_DET_EN;
+ val |= OPT1_L1_INT_STATUS_MASK_POL;
+ bcm_pcie_writel(val, PCIE_BRIDGE_OPT1_REG);
+
+ /* setup the interrupts */
+ val = bcm_pcie_readl(PCIE_BRIDGE_RC_INT_MASK_REG);
+ val |= PCIE_RC_INT_A | PCIE_RC_INT_B | PCIE_RC_INT_C | PCIE_RC_INT_D;
+ bcm_pcie_writel(val, PCIE_BRIDGE_RC_INT_MASK_REG);
+
+ val = bcm_pcie_readl(PCIE_BRIDGE_OPT2_REG);
+ /* enable credit checking and error checking */
+ val |= OPT2_TX_CREDIT_CHK_EN;
+ val |= OPT2_UBUS_UR_DECODE_DIS;
+
+ /* set device bus/func for the pcie device */
+ val |= (PCIE_BUS_DEVICE << OPT2_CFG_TYPE1_BUS_NO_SHIFT);
+ val |= OPT2_CFG_TYPE1_BD_SEL;
+ bcm_pcie_writel(val, PCIE_BRIDGE_OPT2_REG);
+
+ /* setup class code as bridge */
+ val = bcm_pcie_readl(PCIE_IDVAL3_REG);
+ val &= ~IDVAL3_CLASS_CODE_MASK;
+ val |= (PCI_CLASS_BRIDGE_PCI << IDVAL3_SUBCLASS_SHIFT);
+ bcm_pcie_writel(val, PCIE_IDVAL3_REG);
+
+ /* disable bar1 size */
+ val = bcm_pcie_readl(PCIE_CONFIG2_REG);
+ val &= ~CONFIG2_BAR1_SIZE_MASK;
+ bcm_pcie_writel(val, PCIE_CONFIG2_REG);
+
+ /* set bar0 to little endian */
+ val = (BCM_PCIE_MEM_BASE_PA >> 20) << BASEMASK_BASE_SHIFT;
+ val |= (BCM_PCIE_MEM_BASE_PA >> 20) << BASEMASK_MASK_SHIFT;
+ val |= BASEMASK_REMAP_EN;
+ bcm_pcie_writel(val, PCIE_BRIDGE_BAR0_BASEMASK_REG);
+
+ val = (BCM_PCIE_MEM_BASE_PA >> 20) << REBASE_ADDR_BASE_SHIFT;
+ bcm_pcie_writel(val, PCIE_BRIDGE_BAR0_REBASE_ADDR_REG);
+
+ register_pci_controller(&bcm63xx_pcie_controller);
+
+ return 0;
+}
+
+static int __init bcm63xx_register_pci(void)
+{
+ unsigned int mem_size;
+ u32 val;
+ /*
+ * configuration access are done through IO space, remap 4
+ * first bytes to access it from CPU.
+ *
+ * this means that no io access from CPU should happen while
+ * we do a configuration cycle, but there's no way we can add
+ * a spinlock for each io access, so this is currently kind of
+ * broken on SMP.
+ */
+ pci_iospace_start = ioremap(BCM_PCI_IO_BASE_PA, 4);
+ if (!pci_iospace_start)
+ return -ENOMEM;
+
+ /* setup local bus to PCI access (PCI memory) */
+ val = BCM_PCI_MEM_BASE_PA & MPI_L2P_BASE_MASK;
+ bcm_mpi_writel(val, MPI_L2PMEMBASE1_REG);
+ bcm_mpi_writel(~(BCM_PCI_MEM_SIZE - 1), MPI_L2PMEMRANGE1_REG);
+ bcm_mpi_writel(val | MPI_L2PREMAP_ENABLED_MASK, MPI_L2PMEMREMAP1_REG);
+
+ /* set Cardbus IDSEL (type 0 cfg access on primary bus for
+ * this IDSEL will be done on Cardbus instead) */
+ val = bcm_pcmcia_readl(PCMCIA_C1_REG);
+ val &= ~PCMCIA_C1_CBIDSEL_MASK;
+ val |= (CARDBUS_PCI_IDSEL << PCMCIA_C1_CBIDSEL_SHIFT);
+ bcm_pcmcia_writel(val, PCMCIA_C1_REG);
+
+#ifdef CONFIG_CARDBUS
+ /* setup local bus to PCI access (Cardbus memory) */
+ val = BCM_CB_MEM_BASE_PA & MPI_L2P_BASE_MASK;
+ bcm_mpi_writel(val, MPI_L2PMEMBASE2_REG);
+ bcm_mpi_writel(~(BCM_CB_MEM_SIZE - 1), MPI_L2PMEMRANGE2_REG);
+ val |= MPI_L2PREMAP_ENABLED_MASK | MPI_L2PREMAP_IS_CARDBUS_MASK;
+ bcm_mpi_writel(val, MPI_L2PMEMREMAP2_REG);
+#else
+ /* disable second access windows */
+ bcm_mpi_writel(0, MPI_L2PMEMREMAP2_REG);
+#endif
+
+ /* setup local bus to PCI access (IO memory), we have only 1
+ * IO window for both PCI and cardbus, but it cannot handle
+ * both at the same time, assume standard PCI for now, if
+ * cardbus card has IO zone, PCI fixup will change window to
+ * cardbus */
+ val = BCM_PCI_IO_BASE_PA & MPI_L2P_BASE_MASK;
+ bcm_mpi_writel(val, MPI_L2PIOBASE_REG);
+ bcm_mpi_writel(~(BCM_PCI_IO_SIZE - 1), MPI_L2PIORANGE_REG);
+ bcm_mpi_writel(val | MPI_L2PREMAP_ENABLED_MASK, MPI_L2PIOREMAP_REG);
+
+ /* enable PCI related GPIO pins */
+ bcm_mpi_writel(MPI_LOCBUSCTL_EN_PCI_GPIO_MASK, MPI_LOCBUSCTL_REG);
+
+ /* setup PCI to local bus access, used by PCI device to target
+ * local RAM while bus mastering */
+ bcm63xx_int_cfg_writel(0, PCI_BASE_ADDRESS_3);
+ if (BCMCPU_IS_3368() || BCMCPU_IS_6358() || BCMCPU_IS_6368())
+ val = MPI_SP0_REMAP_ENABLE_MASK;
+ else
+ val = 0;
+ bcm_mpi_writel(val, MPI_SP0_REMAP_REG);
+
+ bcm63xx_int_cfg_writel(0x0, PCI_BASE_ADDRESS_4);
+ bcm_mpi_writel(0, MPI_SP1_REMAP_REG);
+
+ mem_size = bcm63xx_get_memory_size();
+
+ /* 6348 before rev b0 exposes only 16 MB of RAM memory through
+ * PCI, throw a warning if we have more memory */
+ if (BCMCPU_IS_6348() && (bcm63xx_get_cpu_rev() & 0xf0) == 0xa0) {
+ if (mem_size > (16 * 1024 * 1024))
+ printk(KERN_WARNING "bcm63xx: this CPU "
+ "revision cannot handle more than 16MB "
+ "of RAM for PCI bus mastering\n");
+ } else {
+ /* setup sp0 range to local RAM size */
+ bcm_mpi_writel(~(mem_size - 1), MPI_SP0_RANGE_REG);
+ bcm_mpi_writel(0, MPI_SP1_RANGE_REG);
+ }
+
+ /* change host bridge retry counter to infinite number of
+ * retry, needed for some broadcom wifi cards with Silicon
+ * Backplane bus where access to srom seems very slow */
+ val = bcm63xx_int_cfg_readl(BCMPCI_REG_TIMERS);
+ val &= ~REG_TIMER_RETRY_MASK;
+ bcm63xx_int_cfg_writel(val, BCMPCI_REG_TIMERS);
+
+ /* enable memory decoder and bus mastering */
+ val = bcm63xx_int_cfg_readl(PCI_COMMAND);
+ val |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ bcm63xx_int_cfg_writel(val, PCI_COMMAND);
+
+ /* enable read prefetching & disable byte swapping for bus
+ * mastering transfers */
+ val = bcm_mpi_readl(MPI_PCIMODESEL_REG);
+ val &= ~MPI_PCIMODESEL_BAR1_NOSWAP_MASK;
+ val &= ~MPI_PCIMODESEL_BAR2_NOSWAP_MASK;
+ val &= ~MPI_PCIMODESEL_PREFETCH_MASK;
+ val |= (8 << MPI_PCIMODESEL_PREFETCH_SHIFT);
+ bcm_mpi_writel(val, MPI_PCIMODESEL_REG);
+
+ /* enable pci interrupt */
+ val = bcm_mpi_readl(MPI_LOCINT_REG);
+ val |= MPI_LOCINT_MASK(MPI_LOCINT_EXT_PCI_INT);
+ bcm_mpi_writel(val, MPI_LOCINT_REG);
+
+ register_pci_controller(&bcm63xx_controller);
+
+#ifdef CONFIG_CARDBUS
+ register_pci_controller(&bcm63xx_cb_controller);
+#endif
+
+ /* mark memory space used for IO mapping as reserved */
+ request_mem_region(BCM_PCI_IO_BASE_PA, BCM_PCI_IO_SIZE,
+ "bcm63xx PCI IO space");
+ return 0;
+}
+
+
+static int __init bcm63xx_pci_init(void)
+{
+ if (!bcm63xx_pci_enabled)
+ return -ENODEV;
+
+ switch (bcm63xx_get_cpu_id()) {
+ case BCM6328_CPU_ID:
+ case BCM6362_CPU_ID:
+ return bcm63xx_register_pcie();
+ case BCM3368_CPU_ID:
+ case BCM6348_CPU_ID:
+ case BCM6358_CPU_ID:
+ case BCM6368_CPU_ID:
+ return bcm63xx_register_pci();
+ default:
+ return -ENODEV;
+ }
+}
+
+arch_initcall(bcm63xx_pci_init);
diff --git a/arch/mips/pci/pci-bcm63xx.h b/arch/mips/pci/pci-bcm63xx.h
new file mode 100644
index 000000000..214def1e4
--- /dev/null
+++ b/arch/mips/pci/pci-bcm63xx.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef PCI_BCM63XX_H_
+#define PCI_BCM63XX_H_
+
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_regs.h>
+#include <bcm63xx_dev_pci.h>
+
+/*
+ * Cardbus shares the PCI bus, but has no IDSEL, so a special id is
+ * reserved for it. If you have a standard PCI device at this id, you
+ * need to change the following definition.
+ */
+#define CARDBUS_PCI_IDSEL 0x8
+
+
+#define PCIE_BUS_BRIDGE 0
+#define PCIE_BUS_DEVICE 1
+
+/*
+ * defined in ops-bcm63xx.c
+ */
+extern struct pci_ops bcm63xx_pci_ops;
+extern struct pci_ops bcm63xx_cb_ops;
+extern struct pci_ops bcm63xx_pcie_ops;
+
+/*
+ * defined in pci-bcm63xx.c
+ */
+extern void __iomem *pci_iospace_start;
+
+#endif /* ! PCI_BCM63XX_H_ */
diff --git a/arch/mips/pci/pci-generic.c b/arch/mips/pci/pci-generic.c
new file mode 100644
index 000000000..95b000178
--- /dev/null
+++ b/arch/mips/pci/pci-generic.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ *
+ * pcibios_align_resource taken from arch/arm/kernel/bios32.c.
+ */
+
+#include <linux/pci.h>
+
+/*
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might have be mirrored at 0x0100-0x03ff..
+ */
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ struct pci_dev *dev = data;
+ resource_size_t start = res->start;
+ struct pci_host_bridge *host_bridge;
+
+ if (res->flags & IORESOURCE_IO && start & 0x300)
+ start = (start + 0x3ff) & ~0x3ff;
+
+ start = (start + align - 1) & ~(align - 1);
+
+ host_bridge = pci_find_host_bridge(dev->bus);
+
+ if (host_bridge->align_resource)
+ return host_bridge->align_resource(dev, res,
+ start, size, align);
+
+ return start;
+}
+
+void pcibios_fixup_bus(struct pci_bus *bus)
+{
+ pci_read_bridge_bases(bus);
+}
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
new file mode 100644
index 000000000..d85cbf84e
--- /dev/null
+++ b/arch/mips/pci/pci-ip27.c
@@ -0,0 +1,42 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Christoph Hellwig (hch@lst.de)
+ * Copyright (C) 1999, 2000, 04 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <asm/sn/addrs.h>
+#include <asm/sn/types.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/ioc3.h>
+#include <asm/pci/bridge.h>
+
+#ifdef CONFIG_NUMA
+int pcibus_to_node(struct pci_bus *bus)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+
+ return bc->nasid;
+}
+EXPORT_SYMBOL(pcibus_to_node);
+#endif /* CONFIG_NUMA */
+
+static void ip29_fixup_phy(struct pci_dev *dev)
+{
+ int nasid = pcibus_to_node(dev->bus);
+ u32 sid;
+
+ if (nasid != 1)
+ return; /* only needed on second module */
+
+ /* enable ethernet PHY on IP29 systemboard */
+ pci_read_config_dword(dev, PCI_SUBSYSTEM_VENDOR_ID, &sid);
+ if (sid == (PCI_VENDOR_ID_SGI | (IOC3_SUBSYS_IP29_SYSBOARD) << 16))
+ REMOTE_HUB_S(nasid, MD_LED0, 0x09);
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3,
+ ip29_fixup_phy);
diff --git a/arch/mips/pci/pci-ip32.c b/arch/mips/pci/pci-ip32.c
new file mode 100644
index 000000000..7ae89d0c7
--- /dev/null
+++ b/arch/mips/pci/pci-ip32.c
@@ -0,0 +1,147 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000, 2001 Keith M Wesolowski
+ * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <asm/ip32/mace.h>
+#include <asm/ip32/ip32_ints.h>
+
+#undef DEBUG_MACE_PCI
+
+/*
+ * Handle errors from the bridge. This includes master and target aborts,
+ * various command and address errors, and the interrupt test. This gets
+ * registered on the bridge error irq. It's conceivable that some of these
+ * conditions warrant a panic. Anybody care to say which ones?
+ */
+static irqreturn_t macepci_error(int irq, void *dev)
+{
+ char s;
+ unsigned int flags = mace->pci.error;
+ unsigned int addr = mace->pci.error_addr;
+
+ if (flags & MACEPCI_ERROR_MEMORY_ADDR)
+ s = 'M';
+ else if (flags & MACEPCI_ERROR_CONFIG_ADDR)
+ s = 'C';
+ else
+ s = 'X';
+
+ if (flags & MACEPCI_ERROR_MASTER_ABORT) {
+ printk("MACEPCI: Master abort at 0x%08x (%c)\n", addr, s);
+ flags &= ~MACEPCI_ERROR_MASTER_ABORT;
+ }
+ if (flags & MACEPCI_ERROR_TARGET_ABORT) {
+ printk("MACEPCI: Target abort at 0x%08x (%c)\n", addr, s);
+ flags &= ~MACEPCI_ERROR_TARGET_ABORT;
+ }
+ if (flags & MACEPCI_ERROR_DATA_PARITY_ERR) {
+ printk("MACEPCI: Data parity error at 0x%08x (%c)\n", addr, s);
+ flags &= ~MACEPCI_ERROR_DATA_PARITY_ERR;
+ }
+ if (flags & MACEPCI_ERROR_RETRY_ERR) {
+ printk("MACEPCI: Retry error at 0x%08x (%c)\n", addr, s);
+ flags &= ~MACEPCI_ERROR_RETRY_ERR;
+ }
+ if (flags & MACEPCI_ERROR_ILLEGAL_CMD) {
+ printk("MACEPCI: Illegal command at 0x%08x (%c)\n", addr, s);
+ flags &= ~MACEPCI_ERROR_ILLEGAL_CMD;
+ }
+ if (flags & MACEPCI_ERROR_SYSTEM_ERR) {
+ printk("MACEPCI: System error at 0x%08x (%c)\n", addr, s);
+ flags &= ~MACEPCI_ERROR_SYSTEM_ERR;
+ }
+ if (flags & MACEPCI_ERROR_PARITY_ERR) {
+ printk("MACEPCI: Parity error at 0x%08x (%c)\n", addr, s);
+ flags &= ~MACEPCI_ERROR_PARITY_ERR;
+ }
+ if (flags & MACEPCI_ERROR_OVERRUN) {
+ printk("MACEPCI: Overrun error at 0x%08x (%c)\n", addr, s);
+ flags &= ~MACEPCI_ERROR_OVERRUN;
+ }
+ if (flags & MACEPCI_ERROR_SIG_TABORT) {
+ printk("MACEPCI: Signaled target abort (clearing)\n");
+ flags &= ~MACEPCI_ERROR_SIG_TABORT;
+ }
+ if (flags & MACEPCI_ERROR_INTERRUPT_TEST) {
+ printk("MACEPCI: Interrupt test triggered (clearing)\n");
+ flags &= ~MACEPCI_ERROR_INTERRUPT_TEST;
+ }
+
+ mace->pci.error = flags;
+
+ return IRQ_HANDLED;
+}
+
+
+extern struct pci_ops mace_pci_ops;
+#ifdef CONFIG_64BIT
+static struct resource mace_pci_mem_resource = {
+ .name = "SGI O2 PCI MEM",
+ .start = MACEPCI_HI_MEMORY,
+ .end = 0x2FFFFFFFFUL,
+ .flags = IORESOURCE_MEM,
+};
+static struct resource mace_pci_io_resource = {
+ .name = "SGI O2 PCI IO",
+ .start = 0x00000000UL,
+ .end = 0xffffffffUL,
+ .flags = IORESOURCE_IO,
+};
+#define MACE_PCI_MEM_OFFSET 0x200000000
+#else
+static struct resource mace_pci_mem_resource = {
+ .name = "SGI O2 PCI MEM",
+ .start = MACEPCI_LOW_MEMORY,
+ .end = MACEPCI_LOW_MEMORY + 0x2000000 - 1,
+ .flags = IORESOURCE_MEM,
+};
+static struct resource mace_pci_io_resource = {
+ .name = "SGI O2 PCI IO",
+ .start = 0x00000000,
+ .end = 0xFFFFFFFF,
+ .flags = IORESOURCE_IO,
+};
+#define MACE_PCI_MEM_OFFSET (MACEPCI_LOW_MEMORY - 0x80000000)
+#endif
+static struct pci_controller mace_pci_controller = {
+ .pci_ops = &mace_pci_ops,
+ .mem_resource = &mace_pci_mem_resource,
+ .io_resource = &mace_pci_io_resource,
+ .mem_offset = MACE_PCI_MEM_OFFSET,
+ .io_offset = 0,
+ .io_map_base = CKSEG1ADDR(MACEPCI_LOW_IO),
+};
+
+static int __init mace_init(void)
+{
+ PCIBIOS_MIN_IO = 0x1000;
+
+ /* Clear any outstanding errors and enable interrupts */
+ mace->pci.error_addr = 0;
+ mace->pci.error = 0;
+ mace->pci.control = 0xff008500;
+
+ printk("MACE PCI rev %d\n", mace->pci.rev);
+
+ BUG_ON(request_irq(MACE_PCI_BRIDGE_IRQ, macepci_error, 0,
+ "MACE PCI error", NULL));
+
+ /* extend memory resources */
+ iomem_resource.end = mace_pci_mem_resource.end;
+ ioport_resource = mace_pci_io_resource;
+
+ register_pci_controller(&mace_pci_controller);
+
+ return 0;
+}
+
+arch_initcall(mace_init);
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
new file mode 100644
index 000000000..1ca42f482
--- /dev/null
+++ b/arch/mips/pci/pci-lantiq.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/clk.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+
+#include <asm/addrspace.h>
+
+#include <lantiq_soc.h>
+#include <lantiq_irq.h>
+
+#include "pci-lantiq.h"
+
+#define PCI_CR_FCI_ADDR_MAP0 0x00C0
+#define PCI_CR_FCI_ADDR_MAP1 0x00C4
+#define PCI_CR_FCI_ADDR_MAP2 0x00C8
+#define PCI_CR_FCI_ADDR_MAP3 0x00CC
+#define PCI_CR_FCI_ADDR_MAP4 0x00D0
+#define PCI_CR_FCI_ADDR_MAP5 0x00D4
+#define PCI_CR_FCI_ADDR_MAP6 0x00D8
+#define PCI_CR_FCI_ADDR_MAP7 0x00DC
+#define PCI_CR_CLK_CTRL 0x0000
+#define PCI_CR_PCI_MOD 0x0030
+#define PCI_CR_PC_ARB 0x0080
+#define PCI_CR_FCI_ADDR_MAP11hg 0x00E4
+#define PCI_CR_BAR11MASK 0x0044
+#define PCI_CR_BAR12MASK 0x0048
+#define PCI_CR_BAR13MASK 0x004C
+#define PCI_CS_BASE_ADDR1 0x0010
+#define PCI_CR_PCI_ADDR_MAP11 0x0064
+#define PCI_CR_FCI_BURST_LENGTH 0x00E8
+#define PCI_CR_PCI_EOI 0x002C
+#define PCI_CS_STS_CMD 0x0004
+
+#define PCI_MASTER0_REQ_MASK_2BITS 8
+#define PCI_MASTER1_REQ_MASK_2BITS 10
+#define PCI_MASTER2_REQ_MASK_2BITS 12
+#define INTERNAL_ARB_ENABLE_BIT 0
+
+#define LTQ_CGU_IFCCR 0x0018
+#define LTQ_CGU_PCICR 0x0034
+
+#define ltq_pci_w32(x, y) ltq_w32((x), ltq_pci_membase + (y))
+#define ltq_pci_r32(x) ltq_r32(ltq_pci_membase + (x))
+
+#define ltq_pci_cfg_w32(x, y) ltq_w32((x), ltq_pci_mapped_cfg + (y))
+#define ltq_pci_cfg_r32(x) ltq_r32(ltq_pci_mapped_cfg + (x))
+
+__iomem void *ltq_pci_mapped_cfg;
+static __iomem void *ltq_pci_membase;
+
+static int reset_gpio;
+static struct clk *clk_pci, *clk_external;
+static struct resource pci_io_resource;
+static struct resource pci_mem_resource;
+static struct pci_ops pci_ops = {
+ .read = ltq_pci_read_config_dword,
+ .write = ltq_pci_write_config_dword
+};
+
+static struct pci_controller pci_controller = {
+ .pci_ops = &pci_ops,
+ .mem_resource = &pci_mem_resource,
+ .mem_offset = 0x00000000UL,
+ .io_resource = &pci_io_resource,
+ .io_offset = 0x00000000UL,
+};
+
+static inline u32 ltq_calc_bar11mask(void)
+{
+ u32 mem, bar11mask;
+
+ /* BAR11MASK value depends on available memory on system. */
+ mem = get_num_physpages() * PAGE_SIZE;
+ bar11mask = (0x0ffffff0 & ~((1 << (fls(mem) - 1)) - 1)) | 8;
+
+ return bar11mask;
+}
+
+static int ltq_pci_startup(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ const __be32 *req_mask, *bus_clk;
+ u32 temp_buffer;
+
+ /* get our clocks */
+ clk_pci = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk_pci)) {
+ dev_err(&pdev->dev, "failed to get pci clock\n");
+ return PTR_ERR(clk_pci);
+ }
+
+ clk_external = clk_get(&pdev->dev, "external");
+ if (IS_ERR(clk_external)) {
+ clk_put(clk_pci);
+ dev_err(&pdev->dev, "failed to get external pci clock\n");
+ return PTR_ERR(clk_external);
+ }
+
+ /* read the bus speed that we want */
+ bus_clk = of_get_property(node, "lantiq,bus-clock", NULL);
+ if (bus_clk)
+ clk_set_rate(clk_pci, *bus_clk);
+
+ /* and enable the clocks */
+ clk_enable(clk_pci);
+ if (of_find_property(node, "lantiq,external-clock", NULL))
+ clk_enable(clk_external);
+ else
+ clk_disable(clk_external);
+
+ /* setup reset gpio used by pci */
+ reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
+ if (gpio_is_valid(reset_gpio)) {
+ int ret = devm_gpio_request(&pdev->dev,
+ reset_gpio, "pci-reset");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to request gpio %d\n", reset_gpio);
+ return ret;
+ }
+ gpio_direction_output(reset_gpio, 1);
+ }
+
+ /* enable auto-switching between PCI and EBU */
+ ltq_pci_w32(0xa, PCI_CR_CLK_CTRL);
+
+ /* busy, i.e. configuration is not done, PCI access has to be retried */
+ ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) & ~(1 << 24), PCI_CR_PCI_MOD);
+ wmb();
+ /* BUS Master/IO/MEM access */
+ ltq_pci_cfg_w32(ltq_pci_cfg_r32(PCI_CS_STS_CMD) | 7, PCI_CS_STS_CMD);
+
+ /* enable external 2 PCI masters */
+ temp_buffer = ltq_pci_r32(PCI_CR_PC_ARB);
+ /* setup the request mask */
+ req_mask = of_get_property(node, "req-mask", NULL);
+ if (req_mask)
+ temp_buffer &= ~((*req_mask & 0xf) << 16);
+ else
+ temp_buffer &= ~0xf0000;
+ /* enable internal arbiter */
+ temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT);
+ /* enable internal PCI master reqest */
+ temp_buffer &= (~(3 << PCI_MASTER0_REQ_MASK_2BITS));
+
+ /* enable EBU request */
+ temp_buffer &= (~(3 << PCI_MASTER1_REQ_MASK_2BITS));
+
+ /* enable all external masters request */
+ temp_buffer &= (~(3 << PCI_MASTER2_REQ_MASK_2BITS));
+ ltq_pci_w32(temp_buffer, PCI_CR_PC_ARB);
+ wmb();
+
+ /* setup BAR memory regions */
+ ltq_pci_w32(0x18000000, PCI_CR_FCI_ADDR_MAP0);
+ ltq_pci_w32(0x18400000, PCI_CR_FCI_ADDR_MAP1);
+ ltq_pci_w32(0x18800000, PCI_CR_FCI_ADDR_MAP2);
+ ltq_pci_w32(0x18c00000, PCI_CR_FCI_ADDR_MAP3);
+ ltq_pci_w32(0x19000000, PCI_CR_FCI_ADDR_MAP4);
+ ltq_pci_w32(0x19400000, PCI_CR_FCI_ADDR_MAP5);
+ ltq_pci_w32(0x19800000, PCI_CR_FCI_ADDR_MAP6);
+ ltq_pci_w32(0x19c00000, PCI_CR_FCI_ADDR_MAP7);
+ ltq_pci_w32(0x1ae00000, PCI_CR_FCI_ADDR_MAP11hg);
+ ltq_pci_w32(ltq_calc_bar11mask(), PCI_CR_BAR11MASK);
+ ltq_pci_w32(0, PCI_CR_PCI_ADDR_MAP11);
+ ltq_pci_w32(0, PCI_CS_BASE_ADDR1);
+ /* both TX and RX endian swap are enabled */
+ ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_EOI) | 3, PCI_CR_PCI_EOI);
+ wmb();
+ ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR12MASK) | 0x80000000,
+ PCI_CR_BAR12MASK);
+ ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR13MASK) | 0x80000000,
+ PCI_CR_BAR13MASK);
+ /*use 8 dw burst length */
+ ltq_pci_w32(0x303, PCI_CR_FCI_BURST_LENGTH);
+ ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) | (1 << 24), PCI_CR_PCI_MOD);
+ wmb();
+
+ /* setup irq line */
+ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_CON) | 0xc, LTQ_EBU_PCC_CON);
+ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN);
+
+ /* toggle reset pin */
+ if (gpio_is_valid(reset_gpio)) {
+ __gpio_set_value(reset_gpio, 0);
+ wmb();
+ mdelay(1);
+ __gpio_set_value(reset_gpio, 1);
+ }
+ return 0;
+}
+
+static int ltq_pci_probe(struct platform_device *pdev)
+{
+ struct resource *res_cfg, *res_bridge;
+
+ pci_clear_flags(PCI_PROBE_ONLY);
+
+ res_bridge = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ltq_pci_membase = devm_ioremap_resource(&pdev->dev, res_bridge);
+ if (IS_ERR(ltq_pci_membase))
+ return PTR_ERR(ltq_pci_membase);
+
+ res_cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ltq_pci_mapped_cfg = devm_ioremap_resource(&pdev->dev, res_cfg);
+ if (IS_ERR(ltq_pci_mapped_cfg))
+ return PTR_ERR(ltq_pci_mapped_cfg);
+
+ ltq_pci_startup(pdev);
+
+ pci_load_of_ranges(&pci_controller, pdev->dev.of_node);
+ register_pci_controller(&pci_controller);
+ return 0;
+}
+
+static const struct of_device_id ltq_pci_match[] = {
+ { .compatible = "lantiq,pci-xway" },
+ {},
+};
+
+static struct platform_driver ltq_pci_driver = {
+ .probe = ltq_pci_probe,
+ .driver = {
+ .name = "pci-xway",
+ .of_match_table = ltq_pci_match,
+ },
+};
+
+int __init pcibios_init(void)
+{
+ int ret = platform_driver_register(&ltq_pci_driver);
+ if (ret)
+ pr_info("pci-xway: Error registering platform driver!");
+ return ret;
+}
+
+arch_initcall(pcibios_init);
diff --git a/arch/mips/pci/pci-lantiq.h b/arch/mips/pci/pci-lantiq.h
new file mode 100644
index 000000000..fdbb0e89b
--- /dev/null
+++ b/arch/mips/pci/pci-lantiq.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+
+#ifndef _LTQ_PCI_H__
+#define _LTQ_PCI_H__
+
+extern __iomem void *ltq_pci_mapped_cfg;
+extern int ltq_pci_read_config_dword(struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 *val);
+extern int ltq_pci_write_config_dword(struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 val);
+
+#endif
diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
new file mode 100644
index 000000000..3a9091942
--- /dev/null
+++ b/arch/mips/pci/pci-legacy.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *
+ * Copyright (C) 2003, 04, 11 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2011 Wind River Systems,
+ * written by Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/of_address.h>
+
+#include <asm/cpu-info.h>
+
+/*
+ * If PCI_PROBE_ONLY in pci_flags is set, we don't change any PCI resource
+ * assignments.
+ */
+
+/*
+ * The PCI controller list.
+ */
+static LIST_HEAD(controllers);
+
+static int pci_initialized;
+
+/*
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might have be mirrored at 0x0100-0x03ff..
+ */
+resource_size_t
+pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ struct pci_dev *dev = data;
+ struct pci_controller *hose = dev->sysdata;
+ resource_size_t start = res->start;
+
+ if (res->flags & IORESOURCE_IO) {
+ /* Make sure we start at our min on all hoses */
+ if (start < PCIBIOS_MIN_IO + hose->io_resource->start)
+ start = PCIBIOS_MIN_IO + hose->io_resource->start;
+
+ /*
+ * Put everything into 0x00-0xff region modulo 0x400
+ */
+ if (start & 0x300)
+ start = (start + 0x3ff) & ~0x3ff;
+ } else if (res->flags & IORESOURCE_MEM) {
+ /* Make sure we start at our min on all hoses */
+ if (start < PCIBIOS_MIN_MEM + hose->mem_resource->start)
+ start = PCIBIOS_MIN_MEM + hose->mem_resource->start;
+ }
+
+ return start;
+}
+
+static void pcibios_scanbus(struct pci_controller *hose)
+{
+ static int next_busno;
+ static int need_domain_info;
+ LIST_HEAD(resources);
+ struct pci_bus *bus;
+ struct pci_host_bridge *bridge;
+ int ret;
+
+ bridge = pci_alloc_host_bridge(0);
+ if (!bridge)
+ return;
+
+ if (hose->get_busno && pci_has_flag(PCI_PROBE_ONLY))
+ next_busno = (*hose->get_busno)();
+
+ pci_add_resource_offset(&resources,
+ hose->mem_resource, hose->mem_offset);
+ pci_add_resource_offset(&resources,
+ hose->io_resource, hose->io_offset);
+ pci_add_resource(&resources, hose->busn_resource);
+ list_splice_init(&resources, &bridge->windows);
+ bridge->dev.parent = NULL;
+ bridge->sysdata = hose;
+ bridge->busnr = next_busno;
+ bridge->ops = hose->pci_ops;
+ bridge->swizzle_irq = pci_common_swizzle;
+ bridge->map_irq = pcibios_map_irq;
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret) {
+ pci_free_host_bridge(bridge);
+ return;
+ }
+
+ hose->bus = bus = bridge->bus;
+
+ need_domain_info = need_domain_info || pci_domain_nr(bus);
+ set_pci_need_domain_info(hose, need_domain_info);
+
+ next_busno = bus->busn_res.end + 1;
+ /* Don't allow 8-bit bus number overflow inside the hose -
+ reserve some space for bridges. */
+ if (next_busno > 224) {
+ next_busno = 0;
+ need_domain_info = 1;
+ }
+
+ /*
+ * We insert PCI resources into the iomem_resource and
+ * ioport_resource trees in either pci_bus_claim_resources()
+ * or pci_bus_assign_resources().
+ */
+ if (pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_claim_resources(bus);
+ } else {
+ struct pci_bus *child;
+
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ }
+ pci_bus_add_devices(bus);
+}
+
+#ifdef CONFIG_OF
+void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
+{
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+
+ pr_info("PCI host bridge %pOF ranges:\n", node);
+ hose->of_node = node;
+
+ if (of_pci_range_parser_init(&parser, node))
+ return;
+
+ for_each_of_pci_range(&parser, &range) {
+ struct resource *res = NULL;
+
+ switch (range.flags & IORESOURCE_TYPE_BITS) {
+ case IORESOURCE_IO:
+ pr_info(" IO 0x%016llx..0x%016llx\n",
+ range.cpu_addr,
+ range.cpu_addr + range.size - 1);
+ hose->io_map_base =
+ (unsigned long)ioremap(range.cpu_addr,
+ range.size);
+ res = hose->io_resource;
+ break;
+ case IORESOURCE_MEM:
+ pr_info(" MEM 0x%016llx..0x%016llx\n",
+ range.cpu_addr,
+ range.cpu_addr + range.size - 1);
+ res = hose->mem_resource;
+ break;
+ }
+ if (res != NULL) {
+ res->name = node->full_name;
+ res->flags = range.flags;
+ res->start = range.cpu_addr;
+ res->end = range.cpu_addr + range.size - 1;
+ res->parent = res->child = res->sibling = NULL;
+ }
+ }
+}
+
+struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
+{
+ struct pci_controller *hose = bus->sysdata;
+
+ return of_node_get(hose->of_node);
+}
+#endif
+
+static DEFINE_MUTEX(pci_scan_mutex);
+
+void register_pci_controller(struct pci_controller *hose)
+{
+ struct resource *parent;
+
+ parent = hose->mem_resource->parent;
+ if (!parent)
+ parent = &iomem_resource;
+
+ if (request_resource(parent, hose->mem_resource) < 0)
+ goto out;
+
+ parent = hose->io_resource->parent;
+ if (!parent)
+ parent = &ioport_resource;
+
+ if (request_resource(parent, hose->io_resource) < 0) {
+ release_resource(hose->mem_resource);
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&hose->list);
+ list_add_tail(&hose->list, &controllers);
+
+ /*
+ * Do not panic here but later - this might happen before console init.
+ */
+ if (!hose->io_map_base) {
+ printk(KERN_WARNING
+ "registering PCI controller with io_map_base unset\n");
+ }
+
+ /*
+ * Scan the bus if it is register after the PCI subsystem
+ * initialization.
+ */
+ if (pci_initialized) {
+ mutex_lock(&pci_scan_mutex);
+ pcibios_scanbus(hose);
+ mutex_unlock(&pci_scan_mutex);
+ }
+
+ return;
+
+out:
+ printk(KERN_WARNING
+ "Skipping PCI bus scan due to resource conflict\n");
+}
+
+static int __init pcibios_init(void)
+{
+ struct pci_controller *hose;
+
+ /* Scan all of the recorded PCI controllers. */
+ list_for_each_entry(hose, &controllers, list)
+ pcibios_scanbus(hose);
+
+ pci_initialized = 1;
+
+ return 0;
+}
+
+subsys_initcall(pcibios_init);
+
+static int pcibios_enable_resources(struct pci_dev *dev, int mask)
+{
+ u16 cmd, old_cmd;
+ int idx;
+ struct resource *r;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+ for (idx=0; idx < PCI_NUM_RESOURCES; idx++) {
+ /* Only set up the requested stuff */
+ if (!(mask & (1<<idx)))
+ continue;
+
+ r = &dev->resource[idx];
+ if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
+ continue;
+ if ((idx == PCI_ROM_RESOURCE) &&
+ (!(r->flags & IORESOURCE_ROM_ENABLE)))
+ continue;
+ if (!r->start && r->end) {
+ pci_err(dev,
+ "can't enable device: resource collisions\n");
+ return -EINVAL;
+ }
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+ if (cmd != old_cmd) {
+ pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
+}
+
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ int err;
+
+ if ((err = pcibios_enable_resources(dev, mask)) < 0)
+ return err;
+
+ return pcibios_plat_dev_init(dev);
+}
+
+void pcibios_fixup_bus(struct pci_bus *bus)
+{
+ struct pci_dev *dev = bus->self;
+
+ if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
+ (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ pci_read_bridge_bases(bus);
+ }
+}
+
+char * (*pcibios_plat_setup)(char *str) __initdata;
+
+char *__init pcibios_setup(char *str)
+{
+ if (pcibios_plat_setup)
+ return pcibios_plat_setup(str);
+ return str;
+}
diff --git a/arch/mips/pci/pci-malta.c b/arch/mips/pci/pci-malta.c
new file mode 100644
index 000000000..6aefdf20c
--- /dev/null
+++ b/arch/mips/pci/pci-malta.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ * Maciej W. Rozycki <macro@mips.com>
+ *
+ * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
+ *
+ * MIPS boards specific PCI support.
+ */
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/gt64120.h>
+#include <asm/mips-cps.h>
+#include <asm/mips-boards/generic.h>
+#include <asm/mips-boards/bonito64.h>
+#include <asm/mips-boards/msc01_pci.h>
+
+static struct resource bonito64_mem_resource = {
+ .name = "Bonito PCI MEM",
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource bonito64_io_resource = {
+ .name = "Bonito PCI I/O",
+ .start = 0x00000000UL,
+ .end = 0x000fffffUL,
+ .flags = IORESOURCE_IO,
+};
+
+static struct resource gt64120_mem_resource = {
+ .name = "GT-64120 PCI MEM",
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource gt64120_io_resource = {
+ .name = "GT-64120 PCI I/O",
+ .flags = IORESOURCE_IO,
+};
+
+static struct resource msc_mem_resource = {
+ .name = "MSC PCI MEM",
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource msc_io_resource = {
+ .name = "MSC PCI I/O",
+ .flags = IORESOURCE_IO,
+};
+
+extern struct pci_ops bonito64_pci_ops;
+extern struct pci_ops gt64xxx_pci0_ops;
+extern struct pci_ops msc_pci_ops;
+
+static struct pci_controller bonito64_controller = {
+ .pci_ops = &bonito64_pci_ops,
+ .io_resource = &bonito64_io_resource,
+ .mem_resource = &bonito64_mem_resource,
+ .io_offset = 0x00000000UL,
+};
+
+static struct pci_controller gt64120_controller = {
+ .pci_ops = &gt64xxx_pci0_ops,
+ .io_resource = &gt64120_io_resource,
+ .mem_resource = &gt64120_mem_resource,
+};
+
+static struct pci_controller msc_controller = {
+ .pci_ops = &msc_pci_ops,
+ .io_resource = &msc_io_resource,
+ .mem_resource = &msc_mem_resource,
+};
+
+void __init mips_pcibios_init(void)
+{
+ struct pci_controller *controller;
+ resource_size_t start, end, map, start1, end1, map1, map2, map3, mask;
+
+ switch (mips_revision_sconid) {
+ case MIPS_REVISION_SCON_GT64120:
+ /*
+ * Due to a bug in the Galileo system controller, we need
+ * to setup the PCI BAR for the Galileo internal registers.
+ * This should be done in the bios/bootprom and will be
+ * fixed in a later revision of YAMON (the MIPS boards
+ * boot prom).
+ */
+ GT_WRITE(GT_PCI0_CFGADDR_OFS,
+ (0 << GT_PCI0_CFGADDR_BUSNUM_SHF) | /* Local bus */
+ (0 << GT_PCI0_CFGADDR_DEVNUM_SHF) | /* GT64120 dev */
+ (0 << GT_PCI0_CFGADDR_FUNCTNUM_SHF) | /* Function 0*/
+ ((0x20/4) << GT_PCI0_CFGADDR_REGNUM_SHF) | /* BAR 4*/
+ GT_PCI0_CFGADDR_CONFIGEN_BIT);
+
+ /* Perform the write */
+ GT_WRITE(GT_PCI0_CFGDATA_OFS, CPHYSADDR(MIPS_GT_BASE));
+
+ /* Set up resource ranges from the controller's registers. */
+ start = GT_READ(GT_PCI0M0LD_OFS);
+ end = GT_READ(GT_PCI0M0HD_OFS);
+ map = GT_READ(GT_PCI0M0REMAP_OFS);
+ end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK);
+ start1 = GT_READ(GT_PCI0M1LD_OFS);
+ end1 = GT_READ(GT_PCI0M1HD_OFS);
+ map1 = GT_READ(GT_PCI0M1REMAP_OFS);
+ end1 = (end1 & GT_PCI_HD_MSK) | (start1 & ~GT_PCI_HD_MSK);
+ /* Cannot support multiple windows, use the wider. */
+ if (end1 - start1 > end - start) {
+ start = start1;
+ end = end1;
+ map = map1;
+ }
+ mask = ~(start ^ end);
+ /* We don't support remapping with a discontiguous mask. */
+ BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) &&
+ mask != ~((mask & -mask) - 1));
+ gt64120_mem_resource.start = start;
+ gt64120_mem_resource.end = end;
+ gt64120_controller.mem_offset = (start & mask) - (map & mask);
+ /* Addresses are 36-bit, so do shifts in the destinations. */
+ gt64120_mem_resource.start <<= GT_PCI_DCRM_SHF;
+ gt64120_mem_resource.end <<= GT_PCI_DCRM_SHF;
+ gt64120_mem_resource.end |= (1 << GT_PCI_DCRM_SHF) - 1;
+ gt64120_controller.mem_offset <<= GT_PCI_DCRM_SHF;
+
+ start = GT_READ(GT_PCI0IOLD_OFS);
+ end = GT_READ(GT_PCI0IOHD_OFS);
+ map = GT_READ(GT_PCI0IOREMAP_OFS);
+ end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK);
+ mask = ~(start ^ end);
+ /* We don't support remapping with a discontiguous mask. */
+ BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) &&
+ mask != ~((mask & -mask) - 1));
+ gt64120_io_resource.start = map & mask;
+ gt64120_io_resource.end = (map & mask) | ~mask;
+ gt64120_controller.io_offset = 0;
+ /* Addresses are 36-bit, so do shifts in the destinations. */
+ gt64120_io_resource.start <<= GT_PCI_DCRM_SHF;
+ gt64120_io_resource.end <<= GT_PCI_DCRM_SHF;
+ gt64120_io_resource.end |= (1 << GT_PCI_DCRM_SHF) - 1;
+
+ controller = &gt64120_controller;
+ break;
+
+ case MIPS_REVISION_SCON_BONITO:
+ /* Set up resource ranges from the controller's registers. */
+ map = BONITO_PCIMAP;
+ map1 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO0) >>
+ BONITO_PCIMAP_PCIMAP_LO0_SHIFT;
+ map2 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO1) >>
+ BONITO_PCIMAP_PCIMAP_LO1_SHIFT;
+ map3 = (BONITO_PCIMAP & BONITO_PCIMAP_PCIMAP_LO2) >>
+ BONITO_PCIMAP_PCIMAP_LO2_SHIFT;
+ /* Combine as many adjacent windows as possible. */
+ map = map1;
+ start = BONITO_PCILO0_BASE;
+ end = 1;
+ if (map3 == map2 + 1) {
+ map = map2;
+ start = BONITO_PCILO1_BASE;
+ end++;
+ }
+ if (map2 == map1 + 1) {
+ map = map1;
+ start = BONITO_PCILO0_BASE;
+ end++;
+ }
+ bonito64_mem_resource.start = start;
+ bonito64_mem_resource.end = start +
+ BONITO_PCIMAP_WINBASE(end) - 1;
+ bonito64_controller.mem_offset = start -
+ BONITO_PCIMAP_WINBASE(map);
+
+ controller = &bonito64_controller;
+ break;
+
+ case MIPS_REVISION_SCON_SOCIT:
+ case MIPS_REVISION_SCON_ROCIT:
+ case MIPS_REVISION_SCON_SOCITSC:
+ case MIPS_REVISION_SCON_SOCITSCP:
+ /* Set up resource ranges from the controller's registers. */
+ MSC_READ(MSC01_PCI_SC2PMBASL, start);
+ MSC_READ(MSC01_PCI_SC2PMMSKL, mask);
+ MSC_READ(MSC01_PCI_SC2PMMAPL, map);
+ msc_mem_resource.start = start & mask;
+ msc_mem_resource.end = (start & mask) | ~mask;
+ msc_controller.mem_offset = (start & mask) - (map & mask);
+ if (mips_cps_numiocu(0)) {
+ write_gcr_reg0_base(start);
+ write_gcr_reg0_mask(mask |
+ CM_GCR_REGn_MASK_CMTGT_IOCU0);
+ }
+ MSC_READ(MSC01_PCI_SC2PIOBASL, start);
+ MSC_READ(MSC01_PCI_SC2PIOMSKL, mask);
+ MSC_READ(MSC01_PCI_SC2PIOMAPL, map);
+ msc_io_resource.start = map & mask;
+ msc_io_resource.end = (map & mask) | ~mask;
+ msc_controller.io_offset = 0;
+ ioport_resource.end = ~mask;
+ if (mips_cps_numiocu(0)) {
+ write_gcr_reg1_base(start);
+ write_gcr_reg1_mask(mask |
+ CM_GCR_REGn_MASK_CMTGT_IOCU0);
+ }
+ /* If ranges overlap I/O takes precedence. */
+ start = start & mask;
+ end = start | ~mask;
+ if ((start >= msc_mem_resource.start &&
+ start <= msc_mem_resource.end) ||
+ (end >= msc_mem_resource.start &&
+ end <= msc_mem_resource.end)) {
+ /* Use the larger space. */
+ start = max(start, msc_mem_resource.start);
+ end = min(end, msc_mem_resource.end);
+ if (start - msc_mem_resource.start >=
+ msc_mem_resource.end - end)
+ msc_mem_resource.end = start - 1;
+ else
+ msc_mem_resource.start = end + 1;
+ }
+
+ controller = &msc_controller;
+ break;
+ default:
+ return;
+ }
+
+ /* PIIX4 ACPI starts at 0x1000 */
+ if (controller->io_resource->start < 0x00001000UL)
+ controller->io_resource->start = 0x00001000UL;
+
+ iomem_resource.end &= 0xfffffffffULL; /* 64 GB */
+ ioport_resource.end = controller->io_resource->end;
+
+ controller->io_map_base = mips_io_port_base;
+
+ register_pci_controller(controller);
+}
diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
new file mode 100644
index 000000000..e03293234
--- /dev/null
+++ b/arch/mips/pci/pci-mt7620.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Ralink MT7620A SoC PCI support
+ *
+ * Copyright (C) 2007-2013 Bruce Chang (Mediatek)
+ * Copyright (C) 2013-2016 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/reset.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/mt7620.h>
+
+#define RALINK_PCI_IO_MAP_BASE 0x10160000
+#define RALINK_PCI_MEMORY_BASE 0x0
+
+#define RALINK_INT_PCIE0 4
+
+#define RALINK_CLKCFG1 0x30
+#define RALINK_GPIOMODE 0x60
+
+#define PPLL_CFG1 0x9c
+#define PPLL_LD BIT(23)
+
+#define PPLL_DRV 0xa0
+#define PDRV_SW_SET BIT(31)
+#define LC_CKDRVPD BIT(19)
+#define LC_CKDRVOHZ BIT(18)
+#define LC_CKDRVHZ BIT(17)
+#define LC_CKTEST BIT(16)
+
+/* PCI Bridge registers */
+#define RALINK_PCI_PCICFG_ADDR 0x00
+#define PCIRST BIT(1)
+
+#define RALINK_PCI_PCIENA 0x0C
+#define PCIINT2 BIT(20)
+
+#define RALINK_PCI_CONFIG_ADDR 0x20
+#define RALINK_PCI_CONFIG_DATA_VIRT_REG 0x24
+#define RALINK_PCI_MEMBASE 0x28
+#define RALINK_PCI_IOBASE 0x2C
+
+/* PCI RC registers */
+#define RALINK_PCI0_BAR0SETUP_ADDR 0x10
+#define RALINK_PCI0_IMBASEBAR0_ADDR 0x18
+#define RALINK_PCI0_ID 0x30
+#define RALINK_PCI0_CLASS 0x34
+#define RALINK_PCI0_SUBID 0x38
+#define RALINK_PCI0_STATUS 0x50
+#define PCIE_LINK_UP_ST BIT(0)
+
+#define PCIEPHY0_CFG 0x90
+
+#define RALINK_PCIEPHY_P0_CTL_OFFSET 0x7498
+#define RALINK_PCIE0_CLK_EN BIT(26)
+
+#define BUSY 0x80000000
+#define WAITRETRY_MAX 10
+#define WRITE_MODE (1UL << 23)
+#define DATA_SHIFT 0
+#define ADDR_SHIFT 8
+
+
+static void __iomem *bridge_base;
+static void __iomem *pcie_base;
+
+static struct reset_control *rstpcie0;
+
+static inline void bridge_w32(u32 val, unsigned reg)
+{
+ iowrite32(val, bridge_base + reg);
+}
+
+static inline u32 bridge_r32(unsigned reg)
+{
+ return ioread32(bridge_base + reg);
+}
+
+static inline void pcie_w32(u32 val, unsigned reg)
+{
+ iowrite32(val, pcie_base + reg);
+}
+
+static inline u32 pcie_r32(unsigned reg)
+{
+ return ioread32(pcie_base + reg);
+}
+
+static inline void pcie_m32(u32 clr, u32 set, unsigned reg)
+{
+ u32 val = pcie_r32(reg);
+
+ val &= ~clr;
+ val |= set;
+ pcie_w32(val, reg);
+}
+
+static int wait_pciephy_busy(void)
+{
+ unsigned long reg_value = 0x0, retry = 0;
+
+ while (1) {
+ reg_value = pcie_r32(PCIEPHY0_CFG);
+
+ if (reg_value & BUSY)
+ mdelay(100);
+ else
+ break;
+ if (retry++ > WAITRETRY_MAX) {
+ pr_warn("PCIE-PHY retry failed.\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void pcie_phy(unsigned long addr, unsigned long val)
+{
+ wait_pciephy_busy();
+ pcie_w32(WRITE_MODE | (val << DATA_SHIFT) | (addr << ADDR_SHIFT),
+ PCIEPHY0_CFG);
+ mdelay(1);
+ wait_pciephy_busy();
+}
+
+static int pci_config_read(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ unsigned int slot = PCI_SLOT(devfn);
+ u8 func = PCI_FUNC(devfn);
+ u32 address;
+ u32 data;
+ u32 num = 0;
+
+ if (bus)
+ num = bus->number;
+
+ address = (((where & 0xF00) >> 8) << 24) | (num << 16) | (slot << 11) |
+ (func << 8) | (where & 0xfc) | 0x80000000;
+ bridge_w32(address, RALINK_PCI_CONFIG_ADDR);
+ data = bridge_r32(RALINK_PCI_CONFIG_DATA_VIRT_REG);
+
+ switch (size) {
+ case 1:
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ break;
+ case 2:
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ break;
+ case 4:
+ *val = data;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_config_write(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 val)
+{
+ unsigned int slot = PCI_SLOT(devfn);
+ u8 func = PCI_FUNC(devfn);
+ u32 address;
+ u32 data;
+ u32 num = 0;
+
+ if (bus)
+ num = bus->number;
+
+ address = (((where & 0xF00) >> 8) << 24) | (num << 16) | (slot << 11) |
+ (func << 8) | (where & 0xfc) | 0x80000000;
+ bridge_w32(address, RALINK_PCI_CONFIG_ADDR);
+ data = bridge_r32(RALINK_PCI_CONFIG_DATA_VIRT_REG);
+
+ switch (size) {
+ case 1:
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ break;
+ case 2:
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ break;
+ case 4:
+ data = val;
+ break;
+ }
+
+ bridge_w32(data, RALINK_PCI_CONFIG_DATA_VIRT_REG);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops mt7620_pci_ops = {
+ .read = pci_config_read,
+ .write = pci_config_write,
+};
+
+static struct resource mt7620_res_pci_mem1;
+static struct resource mt7620_res_pci_io1;
+struct pci_controller mt7620_controller = {
+ .pci_ops = &mt7620_pci_ops,
+ .mem_resource = &mt7620_res_pci_mem1,
+ .mem_offset = 0x00000000UL,
+ .io_resource = &mt7620_res_pci_io1,
+ .io_offset = 0x00000000UL,
+ .io_map_base = 0xa0000000,
+};
+
+static int mt7620_pci_hw_init(struct platform_device *pdev)
+{
+ /* bypass PCIe DLL */
+ pcie_phy(0x0, 0x80);
+ pcie_phy(0x1, 0x04);
+
+ /* Elastic buffer control */
+ pcie_phy(0x68, 0xB4);
+
+ /* put core into reset */
+ pcie_m32(0, PCIRST, RALINK_PCI_PCICFG_ADDR);
+ reset_control_assert(rstpcie0);
+
+ /* disable power and all clocks */
+ rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
+ rt_sysc_m32(LC_CKDRVPD, PDRV_SW_SET, PPLL_DRV);
+
+ /* bring core out of reset */
+ reset_control_deassert(rstpcie0);
+ rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1);
+ mdelay(100);
+
+ if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) {
+ dev_err(&pdev->dev, "pcie PLL not locked, aborting init\n");
+ reset_control_assert(rstpcie0);
+ rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
+ return -1;
+ }
+
+ /* power up the bus */
+ rt_sysc_m32(LC_CKDRVHZ | LC_CKDRVOHZ, LC_CKDRVPD | PDRV_SW_SET,
+ PPLL_DRV);
+
+ return 0;
+}
+
+static int mt7628_pci_hw_init(struct platform_device *pdev)
+{
+ u32 val = 0;
+
+ /* bring the core out of reset */
+ rt_sysc_m32(BIT(16), 0, RALINK_GPIOMODE);
+ reset_control_deassert(rstpcie0);
+
+ /* enable the pci clk */
+ rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1);
+ mdelay(100);
+
+ /* voodoo from the SDK driver */
+ pcie_m32(~0xff, 0x5, RALINK_PCIEPHY_P0_CTL_OFFSET);
+
+ pci_config_read(NULL, 0, 0x70c, 4, &val);
+ val &= ~(0xff) << 8;
+ val |= 0x50 << 8;
+ pci_config_write(NULL, 0, 0x70c, 4, val);
+
+ pci_config_read(NULL, 0, 0x70c, 4, &val);
+ dev_err(&pdev->dev, "Port 0 N_FTS = %x\n", (unsigned int) val);
+
+ return 0;
+}
+
+static int mt7620_pci_probe(struct platform_device *pdev)
+{
+ struct resource *bridge_res = platform_get_resource(pdev,
+ IORESOURCE_MEM, 0);
+ struct resource *pcie_res = platform_get_resource(pdev,
+ IORESOURCE_MEM, 1);
+ u32 val = 0;
+
+ rstpcie0 = devm_reset_control_get_exclusive(&pdev->dev, "pcie0");
+ if (IS_ERR(rstpcie0))
+ return PTR_ERR(rstpcie0);
+
+ bridge_base = devm_ioremap_resource(&pdev->dev, bridge_res);
+ if (IS_ERR(bridge_base))
+ return PTR_ERR(bridge_base);
+
+ pcie_base = devm_ioremap_resource(&pdev->dev, pcie_res);
+ if (IS_ERR(pcie_base))
+ return PTR_ERR(pcie_base);
+
+ iomem_resource.start = 0;
+ iomem_resource.end = ~0;
+ ioport_resource.start = 0;
+ ioport_resource.end = ~0;
+
+ /* bring up the pci core */
+ switch (ralink_soc) {
+ case MT762X_SOC_MT7620A:
+ if (mt7620_pci_hw_init(pdev))
+ return -1;
+ break;
+
+ case MT762X_SOC_MT7628AN:
+ case MT762X_SOC_MT7688:
+ if (mt7628_pci_hw_init(pdev))
+ return -1;
+ break;
+
+ default:
+ dev_err(&pdev->dev, "pcie is not supported on this hardware\n");
+ return -1;
+ }
+ mdelay(50);
+
+ /* enable write access */
+ pcie_m32(PCIRST, 0, RALINK_PCI_PCICFG_ADDR);
+ mdelay(100);
+
+ /* check if there is a card present */
+ if ((pcie_r32(RALINK_PCI0_STATUS) & PCIE_LINK_UP_ST) == 0) {
+ reset_control_assert(rstpcie0);
+ rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
+ if (ralink_soc == MT762X_SOC_MT7620A)
+ rt_sysc_m32(LC_CKDRVPD, PDRV_SW_SET, PPLL_DRV);
+ dev_err(&pdev->dev, "PCIE0 no card, disable it(RST&CLK)\n");
+ return -1;
+ }
+
+ /* setup ranges */
+ bridge_w32(0xffffffff, RALINK_PCI_MEMBASE);
+ bridge_w32(RALINK_PCI_IO_MAP_BASE, RALINK_PCI_IOBASE);
+
+ pcie_w32(0x7FFF0001, RALINK_PCI0_BAR0SETUP_ADDR);
+ pcie_w32(RALINK_PCI_MEMORY_BASE, RALINK_PCI0_IMBASEBAR0_ADDR);
+ pcie_w32(0x06040001, RALINK_PCI0_CLASS);
+
+ /* enable interrupts */
+ pcie_m32(0, PCIINT2, RALINK_PCI_PCIENA);
+
+ /* voodoo from the SDK driver */
+ pci_config_read(NULL, 0, 4, 4, &val);
+ pci_config_write(NULL, 0, 4, 4, val | 0x7);
+
+ pci_load_of_ranges(&mt7620_controller, pdev->dev.of_node);
+ register_pci_controller(&mt7620_controller);
+
+ return 0;
+}
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ u16 cmd;
+ u32 val;
+ int irq = 0;
+
+ if ((dev->bus->number == 0) && (slot == 0)) {
+ pcie_w32(0x7FFF0001, RALINK_PCI0_BAR0SETUP_ADDR);
+ pci_config_write(dev->bus, 0, PCI_BASE_ADDRESS_0, 4,
+ RALINK_PCI_MEMORY_BASE);
+ pci_config_read(dev->bus, 0, PCI_BASE_ADDRESS_0, 4, &val);
+ } else if ((dev->bus->number == 1) && (slot == 0x0)) {
+ irq = RALINK_INT_PCIE0;
+ } else {
+ dev_err(&dev->dev, "no irq found - bus=0x%x, slot = 0x%x\n",
+ dev->bus->number, slot);
+ return 0;
+ }
+ dev_err(&dev->dev, "card - bus=0x%x, slot = 0x%x irq=%d\n",
+ dev->bus->number, slot, irq);
+
+ /* configure the cache line size to 0x14 */
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 0x14);
+
+ /* configure latency timer to 0xff */
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xff);
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+
+ /* setup the slot */
+ cmd = cmd | PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+
+ return irq;
+}
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
+
+static const struct of_device_id mt7620_pci_ids[] = {
+ { .compatible = "mediatek,mt7620-pci" },
+ {},
+};
+
+static struct platform_driver mt7620_pci_driver = {
+ .probe = mt7620_pci_probe,
+ .driver = {
+ .name = "mt7620-pci",
+ .of_match_table = of_match_ptr(mt7620_pci_ids),
+ },
+};
+
+static int __init mt7620_pci_init(void)
+{
+ return platform_driver_register(&mt7620_pci_driver);
+}
+
+arch_initcall(mt7620_pci_init);
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
new file mode 100644
index 000000000..fc29b85cf
--- /dev/null
+++ b/arch/mips/pci/pci-octeon.c
@@ -0,0 +1,711 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005-2009 Cavium Networks
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/swiotlb.h>
+
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-pci-defs.h>
+#include <asm/octeon/pci-octeon.h>
+
+#define USE_OCTEON_INTERNAL_ARBITER
+
+/*
+ * Octeon's PCI controller uses did=3, subdid=2 for PCI IO
+ * addresses. Use PCI endian swapping 1 so no address swapping is
+ * necessary. The Linux io routines will endian swap the data.
+ */
+#define OCTEON_PCI_IOSPACE_BASE 0x80011a0400000000ull
+#define OCTEON_PCI_IOSPACE_SIZE (1ull<<32)
+
+/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */
+#define OCTEON_PCI_MEMSPACE_OFFSET (0x00011b0000000000ull)
+
+u64 octeon_bar1_pci_phys;
+
+/**
+ * This is the bit decoding used for the Octeon PCI controller addresses
+ */
+union octeon_pci_address {
+ uint64_t u64;
+ struct {
+ uint64_t upper:2;
+ uint64_t reserved:13;
+ uint64_t io:1;
+ uint64_t did:5;
+ uint64_t subdid:3;
+ uint64_t reserved2:4;
+ uint64_t endian_swap:2;
+ uint64_t reserved3:10;
+ uint64_t bus:8;
+ uint64_t dev:5;
+ uint64_t func:3;
+ uint64_t reg:8;
+ } s;
+};
+
+int (*octeon_pcibios_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
+enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID;
+
+/**
+ * Map a PCI device to the appropriate interrupt line
+ *
+ * @dev: The Linux PCI device structure for the device to map
+ * @slot: The slot number for this device on __BUS 0__. Linux
+ * enumerates through all the bridges and figures out the
+ * slot on Bus 0 where this device eventually hooks to.
+ * @pin: The PCI interrupt pin read from the device, then swizzled
+ * as it goes through each bridge.
+ * Returns Interrupt number for the device
+ */
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ if (octeon_pcibios_map_irq)
+ return octeon_pcibios_map_irq(dev, slot, pin);
+ else
+ panic("octeon_pcibios_map_irq not set.");
+}
+
+
+/*
+ * Called to perform platform specific PCI setup
+ */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ uint16_t config;
+ uint32_t dconfig;
+ int pos;
+ /*
+ * Force the Cache line setting to 64 bytes. The standard
+ * Linux bus scan doesn't seem to set it. Octeon really has
+ * 128 byte lines, but Intel bridges get really upset if you
+ * try and set values above 64 bytes. Value is specified in
+ * 32bit words.
+ */
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 64 / 4);
+ /* Set latency timers for all devices */
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
+
+ /* Enable reporting System errors and parity errors on all devices */
+ /* Enable parity checking and error reporting */
+ pci_read_config_word(dev, PCI_COMMAND, &config);
+ config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
+ pci_write_config_word(dev, PCI_COMMAND, config);
+
+ if (dev->subordinate) {
+ /* Set latency timers on sub bridges */
+ pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 64);
+ /* More bridge error detection */
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config);
+ config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config);
+ }
+
+ /* Enable the PCIe normal error reporting */
+ config = PCI_EXP_DEVCTL_CERE; /* Correctable Error Reporting */
+ config |= PCI_EXP_DEVCTL_NFERE; /* Non-Fatal Error Reporting */
+ config |= PCI_EXP_DEVCTL_FERE; /* Fatal Error Reporting */
+ config |= PCI_EXP_DEVCTL_URRE; /* Unsupported Request */
+ pcie_capability_set_word(dev, PCI_EXP_DEVCTL, config);
+
+ /* Find the Advanced Error Reporting capability */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (pos) {
+ /* Clear Uncorrectable Error Status */
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
+ &dconfig);
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
+ dconfig);
+ /* Enable reporting of all uncorrectable errors */
+ /* Uncorrectable Error Mask - turned on bits disable errors */
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0);
+ /*
+ * Leave severity at HW default. This only controls if
+ * errors are reported as uncorrectable or
+ * correctable, not if the error is reported.
+ */
+ /* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */
+ /* Clear Correctable Error Status */
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig);
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig);
+ /* Enable reporting of all correctable errors */
+ /* Correctable Error Mask - turned on bits disable errors */
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0);
+ /* Advanced Error Capabilities */
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig);
+ /* ECRC Generation Enable */
+ if (config & PCI_ERR_CAP_ECRC_GENC)
+ config |= PCI_ERR_CAP_ECRC_GENE;
+ /* ECRC Check Enable */
+ if (config & PCI_ERR_CAP_ECRC_CHKC)
+ config |= PCI_ERR_CAP_ECRC_CHKE;
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig);
+ /* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */
+ /* Report all errors to the root complex */
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND,
+ PCI_ERR_ROOT_CMD_COR_EN |
+ PCI_ERR_ROOT_CMD_NONFATAL_EN |
+ PCI_ERR_ROOT_CMD_FATAL_EN);
+ /* Clear the Root status register */
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig);
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
+ }
+
+ return 0;
+}
+
+/**
+ * Return the mapping of PCI device number to IRQ line. Each
+ * character in the return string represents the interrupt
+ * line for the device at that position. Device 1 maps to the
+ * first character, etc. The characters A-D are used for PCI
+ * interrupts.
+ *
+ * Returns PCI interrupt mapping
+ */
+const char *octeon_get_pci_interrupts(void)
+{
+ /*
+ * Returning an empty string causes the interrupts to be
+ * routed based on the PCI specification. From the PCI spec:
+ *
+ * INTA# of Device Number 0 is connected to IRQW on the system
+ * board. (Device Number has no significance regarding being
+ * located on the system board or in a connector.) INTA# of
+ * Device Number 1 is connected to IRQX on the system
+ * board. INTA# of Device Number 2 is connected to IRQY on the
+ * system board. INTA# of Device Number 3 is connected to IRQZ
+ * on the system board. The table below describes how each
+ * agent's INTx# lines are connected to the system board
+ * interrupt lines. The following equation can be used to
+ * determine to which INTx# signal on the system board a given
+ * device's INTx# line(s) is connected.
+ *
+ * MB = (D + I) MOD 4 MB = System board Interrupt (IRQW = 0,
+ * IRQX = 1, IRQY = 2, and IRQZ = 3) D = Device Number I =
+ * Interrupt Number (INTA# = 0, INTB# = 1, INTC# = 2, and
+ * INTD# = 3)
+ */
+ if (of_machine_is_compatible("dlink,dsr-500n"))
+ return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC";
+ switch (octeon_bootinfo->board_type) {
+ case CVMX_BOARD_TYPE_NAO38:
+ /* This is really the NAC38 */
+ return "AAAAADABAAAAAAAAAAAAAAAAAAAAAAAA";
+ case CVMX_BOARD_TYPE_EBH3100:
+ case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
+ case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
+ return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ return "AABCD";
+ case CVMX_BOARD_TYPE_CUST_DSR1000N:
+ return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC";
+ case CVMX_BOARD_TYPE_THUNDER:
+ case CVMX_BOARD_TYPE_EBH3000:
+ default:
+ return "";
+ }
+}
+
+/**
+ * Map a PCI device to the appropriate interrupt line
+ *
+ * @dev: The Linux PCI device structure for the device to map
+ * @slot: The slot number for this device on __BUS 0__. Linux
+ * enumerates through all the bridges and figures out the
+ * slot on Bus 0 where this device eventually hooks to.
+ * @pin: The PCI interrupt pin read from the device, then swizzled
+ * as it goes through each bridge.
+ * Returns Interrupt number for the device
+ */
+int __init octeon_pci_pcibios_map_irq(const struct pci_dev *dev,
+ u8 slot, u8 pin)
+{
+ int irq_num;
+ const char *interrupts;
+ int dev_num;
+
+ /* Get the board specific interrupt mapping */
+ interrupts = octeon_get_pci_interrupts();
+
+ dev_num = dev->devfn >> 3;
+ if (dev_num < strlen(interrupts))
+ irq_num = ((interrupts[dev_num] - 'A' + pin - 1) & 3) +
+ OCTEON_IRQ_PCI_INT0;
+ else
+ irq_num = ((slot + pin - 3) & 3) + OCTEON_IRQ_PCI_INT0;
+ return irq_num;
+}
+
+
+/*
+ * Read a value from configuration space
+ */
+static int octeon_read_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 *val)
+{
+ union octeon_pci_address pci_addr;
+
+ pci_addr.u64 = 0;
+ pci_addr.s.upper = 2;
+ pci_addr.s.io = 1;
+ pci_addr.s.did = 3;
+ pci_addr.s.subdid = 1;
+ pci_addr.s.endian_swap = 1;
+ pci_addr.s.bus = bus->number;
+ pci_addr.s.dev = devfn >> 3;
+ pci_addr.s.func = devfn & 0x7;
+ pci_addr.s.reg = reg;
+
+ switch (size) {
+ case 4:
+ *val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64));
+ return PCIBIOS_SUCCESSFUL;
+ case 2:
+ *val = le16_to_cpu(cvmx_read64_uint16(pci_addr.u64));
+ return PCIBIOS_SUCCESSFUL;
+ case 1:
+ *val = cvmx_read64_uint8(pci_addr.u64);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+
+/*
+ * Write a value to PCI configuration space
+ */
+static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 val)
+{
+ union octeon_pci_address pci_addr;
+
+ pci_addr.u64 = 0;
+ pci_addr.s.upper = 2;
+ pci_addr.s.io = 1;
+ pci_addr.s.did = 3;
+ pci_addr.s.subdid = 1;
+ pci_addr.s.endian_swap = 1;
+ pci_addr.s.bus = bus->number;
+ pci_addr.s.dev = devfn >> 3;
+ pci_addr.s.func = devfn & 0x7;
+ pci_addr.s.reg = reg;
+
+ switch (size) {
+ case 4:
+ cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val));
+ return PCIBIOS_SUCCESSFUL;
+ case 2:
+ cvmx_write64_uint16(pci_addr.u64, cpu_to_le16(val));
+ return PCIBIOS_SUCCESSFUL;
+ case 1:
+ cvmx_write64_uint8(pci_addr.u64, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+
+static struct pci_ops octeon_pci_ops = {
+ .read = octeon_read_config,
+ .write = octeon_write_config,
+};
+
+static struct resource octeon_pci_mem_resource = {
+ .start = 0,
+ .end = 0,
+ .name = "Octeon PCI MEM",
+ .flags = IORESOURCE_MEM,
+};
+
+/*
+ * PCI ports must be above 16KB so the ISA bus filtering in the PCI-X to PCI
+ * bridge
+ */
+static struct resource octeon_pci_io_resource = {
+ .start = 0x4000,
+ .end = OCTEON_PCI_IOSPACE_SIZE - 1,
+ .name = "Octeon PCI IO",
+ .flags = IORESOURCE_IO,
+};
+
+static struct pci_controller octeon_pci_controller = {
+ .pci_ops = &octeon_pci_ops,
+ .mem_resource = &octeon_pci_mem_resource,
+ .mem_offset = OCTEON_PCI_MEMSPACE_OFFSET,
+ .io_resource = &octeon_pci_io_resource,
+ .io_offset = 0,
+ .io_map_base = OCTEON_PCI_IOSPACE_BASE,
+};
+
+
+/*
+ * Low level initialize the Octeon PCI controller
+ */
+static void octeon_pci_initialize(void)
+{
+ union cvmx_pci_cfg01 cfg01;
+ union cvmx_npi_ctl_status ctl_status;
+ union cvmx_pci_ctl_status_2 ctl_status_2;
+ union cvmx_pci_cfg19 cfg19;
+ union cvmx_pci_cfg16 cfg16;
+ union cvmx_pci_cfg22 cfg22;
+ union cvmx_pci_cfg56 cfg56;
+
+ /* Reset the PCI Bus */
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x1);
+ cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+
+ udelay(2000); /* Hold PCI reset for 2 ms */
+
+ ctl_status.u64 = 0; /* cvmx_read_csr(CVMX_NPI_CTL_STATUS); */
+ ctl_status.s.max_word = 1;
+ ctl_status.s.timer = 1;
+ cvmx_write_csr(CVMX_NPI_CTL_STATUS, ctl_status.u64);
+
+ /* Deassert PCI reset and advertize PCX Host Mode Device Capability
+ (64b) */
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x4);
+ cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+
+ udelay(2000); /* Wait 2 ms after deasserting PCI reset */
+
+ ctl_status_2.u32 = 0;
+ ctl_status_2.s.tsr_hwm = 1; /* Initializes to 0. Must be set
+ before any PCI reads. */
+ ctl_status_2.s.bar2pres = 1; /* Enable BAR2 */
+ ctl_status_2.s.bar2_enb = 1;
+ ctl_status_2.s.bar2_cax = 1; /* Don't use L2 */
+ ctl_status_2.s.bar2_esx = 1;
+ ctl_status_2.s.pmo_amod = 1; /* Round robin priority */
+ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
+ /* BAR1 hole */
+ ctl_status_2.s.bb1_hole = OCTEON_PCI_BAR1_HOLE_BITS;
+ ctl_status_2.s.bb1_siz = 1; /* BAR1 is 2GB */
+ ctl_status_2.s.bb_ca = 1; /* Don't use L2 with big bars */
+ ctl_status_2.s.bb_es = 1; /* Big bar in byte swap mode */
+ ctl_status_2.s.bb1 = 1; /* BAR1 is big */
+ ctl_status_2.s.bb0 = 1; /* BAR0 is big */
+ }
+
+ octeon_npi_write32(CVMX_NPI_PCI_CTL_STATUS_2, ctl_status_2.u32);
+ udelay(2000); /* Wait 2 ms before doing PCI reads */
+
+ ctl_status_2.u32 = octeon_npi_read32(CVMX_NPI_PCI_CTL_STATUS_2);
+ pr_notice("PCI Status: %s %s-bit\n",
+ ctl_status_2.s.ap_pcix ? "PCI-X" : "PCI",
+ ctl_status_2.s.ap_64ad ? "64" : "32");
+
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ union cvmx_pci_cnt_reg cnt_reg_start;
+ union cvmx_pci_cnt_reg cnt_reg_end;
+ unsigned long cycles, pci_clock;
+
+ cnt_reg_start.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
+ cycles = read_c0_cvmcount();
+ udelay(1000);
+ cnt_reg_end.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
+ cycles = read_c0_cvmcount() - cycles;
+ pci_clock = (cnt_reg_end.s.pcicnt - cnt_reg_start.s.pcicnt) /
+ (cycles / (mips_hpt_frequency / 1000000));
+ pr_notice("PCI Clock: %lu MHz\n", pci_clock);
+ }
+
+ /*
+ * TDOMC must be set to one in PCI mode. TDOMC should be set to 4
+ * in PCI-X mode to allow four outstanding splits. Otherwise,
+ * should not change from its reset value. Don't write PCI_CFG19
+ * in PCI mode (0x82000001 reset value), write it to 0x82000004
+ * after PCI-X mode is known. MRBCI,MDWE,MDRE -> must be zero.
+ * MRBCM -> must be one.
+ */
+ if (ctl_status_2.s.ap_pcix) {
+ cfg19.u32 = 0;
+ /*
+ * Target Delayed/Split request outstanding maximum
+ * count. [1..31] and 0=32. NOTE: If the user
+ * programs these bits beyond the Designed Maximum
+ * outstanding count, then the designed maximum table
+ * depth will be used instead. No additional
+ * Deferred/Split transactions will be accepted if
+ * this outstanding maximum count is
+ * reached. Furthermore, no additional deferred/split
+ * transactions will be accepted if the I/O delay/ I/O
+ * Split Request outstanding maximum is reached.
+ */
+ cfg19.s.tdomc = 4;
+ /*
+ * Master Deferred Read Request Outstanding Max Count
+ * (PCI only). CR4C[26:24] Max SAC cycles MAX DAC
+ * cycles 000 8 4 001 1 0 010 2 1 011 3 1 100 4 2 101
+ * 5 2 110 6 3 111 7 3 For example, if these bits are
+ * programmed to 100, the core can support 2 DAC
+ * cycles, 4 SAC cycles or a combination of 1 DAC and
+ * 2 SAC cycles. NOTE: For the PCI-X maximum
+ * outstanding split transactions, refer to
+ * CRE0[22:20].
+ */
+ cfg19.s.mdrrmc = 2;
+ /*
+ * Master Request (Memory Read) Byte Count/Byte Enable
+ * select. 0 = Byte Enables valid. In PCI mode, a
+ * burst transaction cannot be performed using Memory
+ * Read command=4?h6. 1 = DWORD Byte Count valid
+ * (default). In PCI Mode, the memory read byte
+ * enables are automatically generated by the
+ * core. Note: N3 Master Request transaction sizes are
+ * always determined through the
+ * am_attr[<35:32>|<7:0>] field.
+ */
+ cfg19.s.mrbcm = 1;
+ octeon_npi_write32(CVMX_NPI_PCI_CFG19, cfg19.u32);
+ }
+
+
+ cfg01.u32 = 0;
+ cfg01.s.msae = 1; /* Memory Space Access Enable */
+ cfg01.s.me = 1; /* Master Enable */
+ cfg01.s.pee = 1; /* PERR# Enable */
+ cfg01.s.see = 1; /* System Error Enable */
+ cfg01.s.fbbe = 1; /* Fast Back to Back Transaction Enable */
+
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+
+#ifdef USE_OCTEON_INTERNAL_ARBITER
+ /*
+ * When OCTEON is a PCI host, most systems will use OCTEON's
+ * internal arbiter, so must enable it before any PCI/PCI-X
+ * traffic can occur.
+ */
+ {
+ union cvmx_npi_pci_int_arb_cfg pci_int_arb_cfg;
+
+ pci_int_arb_cfg.u64 = 0;
+ pci_int_arb_cfg.s.en = 1; /* Internal arbiter enable */
+ cvmx_write_csr(CVMX_NPI_PCI_INT_ARB_CFG, pci_int_arb_cfg.u64);
+ }
+#endif /* USE_OCTEON_INTERNAL_ARBITER */
+
+ /*
+ * Preferably written to 1 to set MLTD. [RDSATI,TRTAE,
+ * TWTAE,TMAE,DPPMR -> must be zero. TILT -> must not be set to
+ * 1..7.
+ */
+ cfg16.u32 = 0;
+ cfg16.s.mltd = 1; /* Master Latency Timer Disable */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG16, cfg16.u32);
+
+ /*
+ * Should be written to 0x4ff00. MTTV -> must be zero.
+ * FLUSH -> must be 1. MRV -> should be 0xFF.
+ */
+ cfg22.u32 = 0;
+ /* Master Retry Value [1..255] and 0=infinite */
+ cfg22.s.mrv = 0xff;
+ /*
+ * AM_DO_FLUSH_I control NOTE: This bit MUST BE ONE for proper
+ * N3K operation.
+ */
+ cfg22.s.flush = 1;
+ octeon_npi_write32(CVMX_NPI_PCI_CFG22, cfg22.u32);
+
+ /*
+ * MOST Indicates the maximum number of outstanding splits (in -1
+ * notation) when OCTEON is in PCI-X mode. PCI-X performance is
+ * affected by the MOST selection. Should generally be written
+ * with one of 0x3be807, 0x2be807, 0x1be807, or 0x0be807,
+ * depending on the desired MOST of 3, 2, 1, or 0, respectively.
+ */
+ cfg56.u32 = 0;
+ cfg56.s.pxcid = 7; /* RO - PCI-X Capability ID */
+ cfg56.s.ncp = 0xe8; /* RO - Next Capability Pointer */
+ cfg56.s.dpere = 1; /* Data Parity Error Recovery Enable */
+ cfg56.s.roe = 1; /* Relaxed Ordering Enable */
+ cfg56.s.mmbc = 1; /* Maximum Memory Byte Count
+ [0=512B,1=1024B,2=2048B,3=4096B] */
+ cfg56.s.most = 3; /* Maximum outstanding Split transactions [0=1
+ .. 7=32] */
+
+ octeon_npi_write32(CVMX_NPI_PCI_CFG56, cfg56.u32);
+
+ /*
+ * Affects PCI performance when OCTEON services reads to its
+ * BAR1/BAR2. Refer to Section 10.6.1. The recommended values are
+ * 0x22, 0x33, and 0x33 for PCI_READ_CMD_6, PCI_READ_CMD_C, and
+ * PCI_READ_CMD_E, respectively. Unfortunately due to errata DDR-700,
+ * these values need to be changed so they won't possibly prefetch off
+ * of the end of memory if PCI is DMAing a buffer at the end of
+ * memory. Note that these values differ from their reset values.
+ */
+ octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_6, 0x21);
+ octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_C, 0x31);
+ octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_E, 0x31);
+}
+
+
+/*
+ * Initialize the Octeon PCI controller
+ */
+static int __init octeon_pci_setup(void)
+{
+ union cvmx_npi_mem_access_subidx mem_access;
+ int index;
+
+ /* Only these chips have PCI */
+ if (octeon_has_feature(OCTEON_FEATURE_PCIE))
+ return 0;
+
+ if (!octeon_is_pci_host()) {
+ pr_notice("Not in host mode, PCI Controller not initialized\n");
+ return 0;
+ }
+
+ /* Point pcibios_map_irq() to the PCI version of it */
+ octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
+
+ /* Only use the big bars on chips that support it */
+ if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
+ OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
+ octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_SMALL;
+ else
+ octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
+
+ /* PCI I/O and PCI MEM values */
+ set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
+ ioport_resource.start = 0;
+ ioport_resource.end = OCTEON_PCI_IOSPACE_SIZE - 1;
+
+ pr_notice("%s Octeon big bar support\n",
+ (octeon_dma_bar_type ==
+ OCTEON_DMA_BAR_TYPE_BIG) ? "Enabling" : "Disabling");
+
+ octeon_pci_initialize();
+
+ mem_access.u64 = 0;
+ mem_access.s.esr = 1; /* Endian-Swap on read. */
+ mem_access.s.esw = 1; /* Endian-Swap on write. */
+ mem_access.s.nsr = 0; /* No-Snoop on read. */
+ mem_access.s.nsw = 0; /* No-Snoop on write. */
+ mem_access.s.ror = 0; /* Relax Read on read. */
+ mem_access.s.row = 0; /* Relax Order on write. */
+ mem_access.s.ba = 0; /* PCI Address bits [63:36]. */
+ cvmx_write_csr(CVMX_NPI_MEM_ACCESS_SUBID3, mem_access.u64);
+
+ /*
+ * Remap the Octeon BAR 2 above all 32 bit devices
+ * (0x8000000000ul). This is done here so it is remapped
+ * before the readl()'s below. We don't want BAR2 overlapping
+ * with BAR0/BAR1 during these reads.
+ */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG08,
+ (u32)(OCTEON_BAR2_PCI_ADDRESS & 0xffffffffull));
+ octeon_npi_write32(CVMX_NPI_PCI_CFG09,
+ (u32)(OCTEON_BAR2_PCI_ADDRESS >> 32));
+
+ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
+ /* Remap the Octeon BAR 0 to 0-2GB */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG04, 0);
+ octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
+
+ /*
+ * Remap the Octeon BAR 1 to map 2GB-4GB (minus the
+ * BAR 1 hole).
+ */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG06, 2ul << 30);
+ octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
+
+ /* BAR1 movable mappings set for identity mapping */
+ octeon_bar1_pci_phys = 0x80000000ull;
+ for (index = 0; index < 32; index++) {
+ union cvmx_pci_bar1_indexx bar1_index;
+
+ bar1_index.u32 = 0;
+ /* Address bits[35:22] sent to L2C */
+ bar1_index.s.addr_idx =
+ (octeon_bar1_pci_phys >> 22) + index;
+ /* Don't put PCI accesses in L2. */
+ bar1_index.s.ca = 1;
+ /* Endian Swap Mode */
+ bar1_index.s.end_swp = 1;
+ /* Set '1' when the selected address range is valid. */
+ bar1_index.s.addr_v = 1;
+ octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
+ bar1_index.u32);
+ }
+
+ /* Devices go after BAR1 */
+ octeon_pci_mem_resource.start =
+ OCTEON_PCI_MEMSPACE_OFFSET + (4ul << 30) -
+ (OCTEON_PCI_BAR1_HOLE_SIZE << 20);
+ octeon_pci_mem_resource.end =
+ octeon_pci_mem_resource.start + (1ul << 30);
+ } else {
+ /* Remap the Octeon BAR 0 to map 128MB-(128MB+4KB) */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG04, 128ul << 20);
+ octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
+
+ /* Remap the Octeon BAR 1 to map 0-128MB */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG06, 0);
+ octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
+
+ /* BAR1 movable regions contiguous to cover the swiotlb */
+ octeon_bar1_pci_phys =
+ virt_to_phys(octeon_swiotlb) & ~((1ull << 22) - 1);
+
+ for (index = 0; index < 32; index++) {
+ union cvmx_pci_bar1_indexx bar1_index;
+
+ bar1_index.u32 = 0;
+ /* Address bits[35:22] sent to L2C */
+ bar1_index.s.addr_idx =
+ (octeon_bar1_pci_phys >> 22) + index;
+ /* Don't put PCI accesses in L2. */
+ bar1_index.s.ca = 1;
+ /* Endian Swap Mode */
+ bar1_index.s.end_swp = 1;
+ /* Set '1' when the selected address range is valid. */
+ bar1_index.s.addr_v = 1;
+ octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
+ bar1_index.u32);
+ }
+
+ /* Devices go after BAR0 */
+ octeon_pci_mem_resource.start =
+ OCTEON_PCI_MEMSPACE_OFFSET + (128ul << 20) +
+ (4ul << 10);
+ octeon_pci_mem_resource.end =
+ octeon_pci_mem_resource.start + (1ul << 30);
+ }
+
+ register_pci_controller(&octeon_pci_controller);
+
+ /*
+ * Clear any errors that might be pending from before the bus
+ * was setup properly.
+ */
+ cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1);
+
+ if (IS_ERR(platform_device_register_simple("octeon_pci_edac",
+ -1, NULL, 0)))
+ pr_err("Registration of co_pci_edac failed!\n");
+
+ octeon_pci_dma_init();
+
+ return 0;
+}
+
+arch_initcall(octeon_pci_setup);
diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c
new file mode 100644
index 000000000..7f6ce6d73
--- /dev/null
+++ b/arch/mips/pci/pci-rc32434.c
@@ -0,0 +1,231 @@
+/*
+ * BRIEF MODULE DESCRIPTION
+ * PCI initialization for IDT EB434 board
+ *
+ * Copyright 2004 IDT Inc. (rischelp@idt.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/mach-rc32434/rc32434.h>
+#include <asm/mach-rc32434/pci.h>
+
+#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_WRITE 1
+
+/* define an unsigned array for the PCI registers */
+static unsigned int korina_cnfg_regs[25] = {
+ KORINA_CNFG1, KORINA_CNFG2, KORINA_CNFG3, KORINA_CNFG4,
+ KORINA_CNFG5, KORINA_CNFG6, KORINA_CNFG7, KORINA_CNFG8,
+ KORINA_CNFG9, KORINA_CNFG10, KORINA_CNFG11, KORINA_CNFG12,
+ KORINA_CNFG13, KORINA_CNFG14, KORINA_CNFG15, KORINA_CNFG16,
+ KORINA_CNFG17, KORINA_CNFG18, KORINA_CNFG19, KORINA_CNFG20,
+ KORINA_CNFG21, KORINA_CNFG22, KORINA_CNFG23, KORINA_CNFG24
+};
+static struct resource rc32434_res_pci_mem1;
+static struct resource rc32434_res_pci_mem2;
+
+static struct resource rc32434_res_pci_mem1 = {
+ .name = "PCI MEM1",
+ .start = 0x50000000,
+ .end = 0x5FFFFFFF,
+ .flags = IORESOURCE_MEM,
+ .sibling = NULL,
+ .child = &rc32434_res_pci_mem2
+};
+
+static struct resource rc32434_res_pci_mem2 = {
+ .name = "PCI Mem2",
+ .start = 0x60000000,
+ .end = 0x6FFFFFFF,
+ .flags = IORESOURCE_MEM,
+ .parent = &rc32434_res_pci_mem1,
+ .sibling = NULL,
+ .child = NULL
+};
+
+static struct resource rc32434_res_pci_io1 = {
+ .name = "PCI I/O1",
+ .start = 0x18800000,
+ .end = 0x188FFFFF,
+ .flags = IORESOURCE_IO,
+};
+
+extern struct pci_ops rc32434_pci_ops;
+
+#define PCI_MEM1_START PCI_ADDR_START
+#define PCI_MEM1_END (PCI_ADDR_START + CPUTOPCI_MEM_WIN - 1)
+#define PCI_MEM2_START (PCI_ADDR_START + CPUTOPCI_MEM_WIN)
+#define PCI_MEM2_END (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) - 1)
+#define PCI_IO1_START (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN))
+#define PCI_IO1_END \
+ (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + CPUTOPCI_IO_WIN - 1)
+#define PCI_IO2_START \
+ (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + CPUTOPCI_IO_WIN)
+#define PCI_IO2_END \
+ (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + (2 * CPUTOPCI_IO_WIN) - 1)
+
+struct pci_controller rc32434_controller2;
+
+struct pci_controller rc32434_controller = {
+ .pci_ops = &rc32434_pci_ops,
+ .mem_resource = &rc32434_res_pci_mem1,
+ .io_resource = &rc32434_res_pci_io1,
+ .mem_offset = 0,
+ .io_offset = 0,
+
+};
+
+#ifdef __MIPSEB__
+#define PCI_ENDIAN_FLAG PCILBAC_sb_m
+#else
+#define PCI_ENDIAN_FLAG 0
+#endif
+
+static int __init rc32434_pcibridge_init(void)
+{
+ unsigned int pcicvalue, pcicdata = 0;
+ unsigned int dummyread, pcicntlval;
+ int loopCount;
+ unsigned int pci_config_addr;
+
+ pcicvalue = rc32434_pci->pcic;
+ pcicvalue = (pcicvalue >> PCIM_SHFT) & PCIM_BIT_LEN;
+ if (!((pcicvalue == PCIM_H_EA) ||
+ (pcicvalue == PCIM_H_IA_FIX) ||
+ (pcicvalue == PCIM_H_IA_RR))) {
+ pr_err("PCI init error!!!\n");
+ /* Not in Host Mode, return ERROR */
+ return -1;
+ }
+ /* Enables the Idle Grant mode, Arbiter Parking */
+ pcicdata |= (PCI_CTL_IGM | PCI_CTL_EAP | PCI_CTL_EN);
+ rc32434_pci->pcic = pcicdata; /* Enable the PCI bus Interface */
+ /* Zero out the PCI status & PCI Status Mask */
+ for (;;) {
+ pcicdata = rc32434_pci->pcis;
+ if (!(pcicdata & PCI_STAT_RIP))
+ break;
+ }
+
+ rc32434_pci->pcis = 0;
+ rc32434_pci->pcism = 0xFFFFFFFF;
+ /* Zero out the PCI decoupled registers */
+ rc32434_pci->pcidac = 0; /*
+ * disable PCI decoupled accesses at
+ * initialization
+ */
+ rc32434_pci->pcidas = 0; /* clear the status */
+ rc32434_pci->pcidasm = 0x0000007F; /* Mask all the interrupts */
+ /* Mask PCI Messaging Interrupts */
+ rc32434_pci_msg->pciiic = 0;
+ rc32434_pci_msg->pciiim = 0xFFFFFFFF;
+ rc32434_pci_msg->pciioic = 0;
+ rc32434_pci_msg->pciioim = 0;
+
+
+ /* Setup PCILB0 as Memory Window */
+ rc32434_pci->pcilba[0].address = (unsigned int) (PCI_ADDR_START);
+
+ /* setup the PCI map address as same as the local address */
+
+ rc32434_pci->pcilba[0].mapping = (unsigned int) (PCI_ADDR_START);
+
+
+ /* Setup PCILBA1 as MEM */
+ rc32434_pci->pcilba[0].control =
+ (((SIZE_256MB & 0x1f) << PCI_LBAC_SIZE_BIT) | PCI_ENDIAN_FLAG);
+ dummyread = rc32434_pci->pcilba[0].control; /* flush the CPU write Buffers */
+ rc32434_pci->pcilba[1].address = 0x60000000;
+ rc32434_pci->pcilba[1].mapping = 0x60000000;
+
+ /* setup PCILBA2 as IO Window */
+ rc32434_pci->pcilba[1].control =
+ (((SIZE_256MB & 0x1f) << PCI_LBAC_SIZE_BIT) | PCI_ENDIAN_FLAG);
+ dummyread = rc32434_pci->pcilba[1].control; /* flush the CPU write Buffers */
+ rc32434_pci->pcilba[2].address = 0x18C00000;
+ rc32434_pci->pcilba[2].mapping = 0x18FFFFFF;
+
+ /* setup PCILBA2 as IO Window */
+ rc32434_pci->pcilba[2].control =
+ (((SIZE_4MB & 0x1f) << PCI_LBAC_SIZE_BIT) | PCI_ENDIAN_FLAG);
+ dummyread = rc32434_pci->pcilba[2].control; /* flush the CPU write Buffers */
+
+ /* Setup PCILBA3 as IO Window */
+ rc32434_pci->pcilba[3].address = 0x18800000;
+ rc32434_pci->pcilba[3].mapping = 0x18800000;
+ rc32434_pci->pcilba[3].control =
+ ((((SIZE_1MB & 0x1ff) << PCI_LBAC_SIZE_BIT) | PCI_LBAC_MSI) |
+ PCI_ENDIAN_FLAG);
+ dummyread = rc32434_pci->pcilba[3].control; /* flush the CPU write Buffers */
+
+ pci_config_addr = (unsigned int) (0x80000004);
+ for (loopCount = 0; loopCount < 24; loopCount++) {
+ rc32434_pci->pcicfga = pci_config_addr;
+ dummyread = rc32434_pci->pcicfga;
+ rc32434_pci->pcicfgd = korina_cnfg_regs[loopCount];
+ dummyread = rc32434_pci->pcicfgd;
+ pci_config_addr += 4;
+ }
+ rc32434_pci->pcitc =
+ (unsigned int) ((PCITC_RTIMER_VAL & 0xff) << PCI_TC_RTIMER_BIT) |
+ ((PCITC_DTIMER_VAL & 0xff) << PCI_TC_DTIMER_BIT);
+
+ pcicntlval = rc32434_pci->pcic;
+ pcicntlval &= ~PCI_CTL_TNR;
+ rc32434_pci->pcic = pcicntlval;
+ pcicntlval = rc32434_pci->pcic;
+
+ return 0;
+}
+
+static int __init rc32434_pci_init(void)
+{
+ void __iomem *io_map_base;
+
+ pr_info("PCI: Initializing PCI\n");
+
+ ioport_resource.start = rc32434_res_pci_io1.start;
+ ioport_resource.end = rc32434_res_pci_io1.end;
+
+ rc32434_pcibridge_init();
+
+ io_map_base = ioremap(rc32434_res_pci_io1.start,
+ resource_size(&rc32434_res_pci_io1));
+
+ if (!io_map_base)
+ return -ENOMEM;
+
+ rc32434_controller.io_map_base =
+ (unsigned long)io_map_base - rc32434_res_pci_io1.start;
+
+ register_pci_controller(&rc32434_controller);
+ rc32434_sync();
+
+ return 0;
+}
+
+arch_initcall(rc32434_pci_init);
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
new file mode 100644
index 000000000..f1538d2be
--- /dev/null
+++ b/arch/mips/pci/pci-rt2880.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Ralink RT288x SoC PCI register definitions
+ *
+ * Copyright (C) 2009 John Crispin <john@phrozen.org>
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ */
+
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+
+#include <asm/mach-ralink/rt288x.h>
+
+#define RT2880_PCI_BASE 0x00440000
+#define RT288X_CPU_IRQ_PCI 4
+
+#define RT2880_PCI_MEM_BASE 0x20000000
+#define RT2880_PCI_MEM_SIZE 0x10000000
+#define RT2880_PCI_IO_BASE 0x00460000
+#define RT2880_PCI_IO_SIZE 0x00010000
+
+#define RT2880_PCI_REG_PCICFG_ADDR 0x00
+#define RT2880_PCI_REG_PCIMSK_ADDR 0x0c
+#define RT2880_PCI_REG_BAR0SETUP_ADDR 0x10
+#define RT2880_PCI_REG_IMBASEBAR0_ADDR 0x18
+#define RT2880_PCI_REG_CONFIG_ADDR 0x20
+#define RT2880_PCI_REG_CONFIG_DATA 0x24
+#define RT2880_PCI_REG_MEMBASE 0x28
+#define RT2880_PCI_REG_IOBASE 0x2c
+#define RT2880_PCI_REG_ID 0x30
+#define RT2880_PCI_REG_CLASS 0x34
+#define RT2880_PCI_REG_SUBID 0x38
+#define RT2880_PCI_REG_ARBCTL 0x80
+
+static void __iomem *rt2880_pci_base;
+static DEFINE_SPINLOCK(rt2880_pci_lock);
+
+static u32 rt2880_pci_reg_read(u32 reg)
+{
+ return readl(rt2880_pci_base + reg);
+}
+
+static void rt2880_pci_reg_write(u32 val, u32 reg)
+{
+ writel(val, rt2880_pci_base + reg);
+}
+
+static inline u32 rt2880_pci_get_cfgaddr(unsigned int bus, unsigned int slot,
+ unsigned int func, unsigned int where)
+{
+ return ((bus << 16) | (slot << 11) | (func << 8) | (where & 0xfc) |
+ 0x80000000);
+}
+
+static int rt2880_pci_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ unsigned long flags;
+ u32 address;
+ u32 data;
+
+ address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
+
+ spin_lock_irqsave(&rt2880_pci_lock, flags);
+ rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
+ data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA);
+ spin_unlock_irqrestore(&rt2880_pci_lock, flags);
+
+ switch (size) {
+ case 1:
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ break;
+ case 2:
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ break;
+ case 4:
+ *val = data;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rt2880_pci_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ unsigned long flags;
+ u32 address;
+ u32 data;
+
+ address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
+
+ spin_lock_irqsave(&rt2880_pci_lock, flags);
+ rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
+ data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA);
+
+ switch (size) {
+ case 1:
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ break;
+ case 2:
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ break;
+ case 4:
+ data = val;
+ break;
+ }
+
+ rt2880_pci_reg_write(data, RT2880_PCI_REG_CONFIG_DATA);
+ spin_unlock_irqrestore(&rt2880_pci_lock, flags);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops rt2880_pci_ops = {
+ .read = rt2880_pci_config_read,
+ .write = rt2880_pci_config_write,
+};
+
+static struct resource rt2880_pci_mem_resource = {
+ .name = "PCI MEM space",
+ .start = RT2880_PCI_MEM_BASE,
+ .end = RT2880_PCI_MEM_BASE + RT2880_PCI_MEM_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource rt2880_pci_io_resource = {
+ .name = "PCI IO space",
+ .start = RT2880_PCI_IO_BASE,
+ .end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1,
+ .flags = IORESOURCE_IO,
+};
+
+static struct pci_controller rt2880_pci_controller = {
+ .pci_ops = &rt2880_pci_ops,
+ .mem_resource = &rt2880_pci_mem_resource,
+ .io_resource = &rt2880_pci_io_resource,
+};
+
+static inline u32 rt2880_pci_read_u32(unsigned long reg)
+{
+ unsigned long flags;
+ u32 address;
+ u32 ret;
+
+ address = rt2880_pci_get_cfgaddr(0, 0, 0, reg);
+
+ spin_lock_irqsave(&rt2880_pci_lock, flags);
+ rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
+ ret = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA);
+ spin_unlock_irqrestore(&rt2880_pci_lock, flags);
+
+ return ret;
+}
+
+static inline void rt2880_pci_write_u32(unsigned long reg, u32 val)
+{
+ unsigned long flags;
+ u32 address;
+
+ address = rt2880_pci_get_cfgaddr(0, 0, 0, reg);
+
+ spin_lock_irqsave(&rt2880_pci_lock, flags);
+ rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
+ rt2880_pci_reg_write(val, RT2880_PCI_REG_CONFIG_DATA);
+ spin_unlock_irqrestore(&rt2880_pci_lock, flags);
+}
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ int irq = -1;
+
+ if (dev->bus->number != 0)
+ return irq;
+
+ switch (PCI_SLOT(dev->devfn)) {
+ case 0x00:
+ break;
+ case 0x11:
+ irq = RT288X_CPU_IRQ_PCI;
+ break;
+ default:
+ pr_err("%s:%s[%d] trying to alloc unknown pci irq\n",
+ __FILE__, __func__, __LINE__);
+ BUG();
+ break;
+ }
+
+ return irq;
+}
+
+static int rt288x_pci_probe(struct platform_device *pdev)
+{
+ void __iomem *io_map_base;
+
+ rt2880_pci_base = ioremap(RT2880_PCI_BASE, PAGE_SIZE);
+
+ io_map_base = ioremap(RT2880_PCI_IO_BASE, RT2880_PCI_IO_SIZE);
+ rt2880_pci_controller.io_map_base = (unsigned long) io_map_base;
+ set_io_port_base((unsigned long) io_map_base);
+
+ ioport_resource.start = RT2880_PCI_IO_BASE;
+ ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1;
+
+ rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR);
+ udelay(1);
+
+ rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL);
+ rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR);
+ rt2880_pci_reg_write(RT2880_PCI_MEM_BASE, RT2880_PCI_REG_MEMBASE);
+ rt2880_pci_reg_write(RT2880_PCI_IO_BASE, RT2880_PCI_REG_IOBASE);
+ rt2880_pci_reg_write(0x08000000, RT2880_PCI_REG_IMBASEBAR0_ADDR);
+ rt2880_pci_reg_write(0x08021814, RT2880_PCI_REG_ID);
+ rt2880_pci_reg_write(0x00800001, RT2880_PCI_REG_CLASS);
+ rt2880_pci_reg_write(0x28801814, RT2880_PCI_REG_SUBID);
+ rt2880_pci_reg_write(0x000c0000, RT2880_PCI_REG_PCIMSK_ADDR);
+
+ rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
+ (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
+
+ rt2880_pci_controller.of_node = pdev->dev.of_node;
+
+ register_pci_controller(&rt2880_pci_controller);
+ return 0;
+}
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ static bool slot0_init;
+
+ /*
+ * Nobody seems to initialize slot 0, but this platform requires it, so
+ * do it once when some other slot is being enabled. The PCI subsystem
+ * should configure other slots properly, so no need to do anything
+ * special for those.
+ */
+ if (!slot0_init && dev->bus->number == 0) {
+ u16 cmd;
+ u32 bar0;
+
+ slot0_init = true;
+
+ pci_bus_write_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
+ 0x08000000);
+ pci_bus_read_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
+ &bar0);
+
+ pci_bus_read_config_word(dev->bus, 0, PCI_COMMAND, &cmd);
+ cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
+ pci_bus_write_config_word(dev->bus, 0, PCI_COMMAND, cmd);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id rt288x_pci_match[] = {
+ { .compatible = "ralink,rt288x-pci" },
+ {},
+};
+
+static struct platform_driver rt288x_pci_driver = {
+ .probe = rt288x_pci_probe,
+ .driver = {
+ .name = "rt288x-pci",
+ .of_match_table = rt288x_pci_match,
+ },
+};
+
+int __init pcibios_init(void)
+{
+ int ret = platform_driver_register(&rt288x_pci_driver);
+
+ if (ret)
+ pr_info("rt288x-pci: Error registering platform driver!");
+
+ return ret;
+}
+
+arch_initcall(pcibios_init);
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
new file mode 100644
index 000000000..0ac634602
--- /dev/null
+++ b/arch/mips/pci/pci-rt3883.c
@@ -0,0 +1,590 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Ralink RT3662/RT3883 SoC PCI support
+ *
+ * Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach-ralink/rt3883.h>
+#include <asm/mach-ralink/ralink_regs.h>
+
+#define RT3883_MEMORY_BASE 0x00000000
+#define RT3883_MEMORY_SIZE 0x02000000
+
+#define RT3883_PCI_REG_PCICFG 0x00
+#define RT3883_PCICFG_P2P_BR_DEVNUM_M 0xf
+#define RT3883_PCICFG_P2P_BR_DEVNUM_S 16
+#define RT3883_PCICFG_PCIRST BIT(1)
+#define RT3883_PCI_REG_PCIRAW 0x04
+#define RT3883_PCI_REG_PCIINT 0x08
+#define RT3883_PCI_REG_PCIENA 0x0c
+
+#define RT3883_PCI_REG_CFGADDR 0x20
+#define RT3883_PCI_REG_CFGDATA 0x24
+#define RT3883_PCI_REG_MEMBASE 0x28
+#define RT3883_PCI_REG_IOBASE 0x2c
+#define RT3883_PCI_REG_ARBCTL 0x80
+
+#define RT3883_PCI_REG_BASE(_x) (0x1000 + (_x) * 0x1000)
+#define RT3883_PCI_REG_BAR0SETUP(_x) (RT3883_PCI_REG_BASE((_x)) + 0x10)
+#define RT3883_PCI_REG_IMBASEBAR0(_x) (RT3883_PCI_REG_BASE((_x)) + 0x18)
+#define RT3883_PCI_REG_ID(_x) (RT3883_PCI_REG_BASE((_x)) + 0x30)
+#define RT3883_PCI_REG_CLASS(_x) (RT3883_PCI_REG_BASE((_x)) + 0x34)
+#define RT3883_PCI_REG_SUBID(_x) (RT3883_PCI_REG_BASE((_x)) + 0x38)
+#define RT3883_PCI_REG_STATUS(_x) (RT3883_PCI_REG_BASE((_x)) + 0x50)
+
+#define RT3883_PCI_MODE_NONE 0
+#define RT3883_PCI_MODE_PCI BIT(0)
+#define RT3883_PCI_MODE_PCIE BIT(1)
+#define RT3883_PCI_MODE_BOTH (RT3883_PCI_MODE_PCI | RT3883_PCI_MODE_PCIE)
+
+#define RT3883_PCI_IRQ_COUNT 32
+
+#define RT3883_P2P_BR_DEVNUM 1
+
+struct rt3883_pci_controller {
+ void __iomem *base;
+
+ struct device_node *intc_of_node;
+ struct irq_domain *irq_domain;
+
+ struct pci_controller pci_controller;
+ struct resource io_res;
+ struct resource mem_res;
+
+ bool pcie_ready;
+};
+
+static inline struct rt3883_pci_controller *
+pci_bus_to_rt3883_controller(struct pci_bus *bus)
+{
+ struct pci_controller *hose;
+
+ hose = (struct pci_controller *) bus->sysdata;
+ return container_of(hose, struct rt3883_pci_controller, pci_controller);
+}
+
+static inline u32 rt3883_pci_r32(struct rt3883_pci_controller *rpc,
+ unsigned reg)
+{
+ return ioread32(rpc->base + reg);
+}
+
+static inline void rt3883_pci_w32(struct rt3883_pci_controller *rpc,
+ u32 val, unsigned reg)
+{
+ iowrite32(val, rpc->base + reg);
+}
+
+static inline u32 rt3883_pci_get_cfgaddr(unsigned int bus, unsigned int slot,
+ unsigned int func, unsigned int where)
+{
+ return (bus << 16) | (slot << 11) | (func << 8) | (where & 0xfc) |
+ 0x80000000;
+}
+
+static u32 rt3883_pci_read_cfg32(struct rt3883_pci_controller *rpc,
+ unsigned bus, unsigned slot,
+ unsigned func, unsigned reg)
+{
+ unsigned long flags;
+ u32 address;
+ u32 ret;
+
+ address = rt3883_pci_get_cfgaddr(bus, slot, func, reg);
+
+ rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR);
+ ret = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA);
+
+ return ret;
+}
+
+static void rt3883_pci_write_cfg32(struct rt3883_pci_controller *rpc,
+ unsigned bus, unsigned slot,
+ unsigned func, unsigned reg, u32 val)
+{
+ unsigned long flags;
+ u32 address;
+
+ address = rt3883_pci_get_cfgaddr(bus, slot, func, reg);
+
+ rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR);
+ rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA);
+}
+
+static void rt3883_pci_irq_handler(struct irq_desc *desc)
+{
+ struct rt3883_pci_controller *rpc;
+ u32 pending;
+
+ rpc = irq_desc_get_handler_data(desc);
+
+ pending = rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIINT) &
+ rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIENA);
+
+ if (!pending) {
+ spurious_interrupt();
+ return;
+ }
+
+ while (pending) {
+ unsigned irq, bit = __ffs(pending);
+
+ irq = irq_find_mapping(rpc->irq_domain, bit);
+ generic_handle_irq(irq);
+
+ pending &= ~BIT(bit);
+ }
+}
+
+static void rt3883_pci_irq_unmask(struct irq_data *d)
+{
+ struct rt3883_pci_controller *rpc;
+ u32 t;
+
+ rpc = irq_data_get_irq_chip_data(d);
+
+ t = rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIENA);
+ rt3883_pci_w32(rpc, t | BIT(d->hwirq), RT3883_PCI_REG_PCIENA);
+ /* flush write */
+ rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIENA);
+}
+
+static void rt3883_pci_irq_mask(struct irq_data *d)
+{
+ struct rt3883_pci_controller *rpc;
+ u32 t;
+
+ rpc = irq_data_get_irq_chip_data(d);
+
+ t = rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIENA);
+ rt3883_pci_w32(rpc, t & ~BIT(d->hwirq), RT3883_PCI_REG_PCIENA);
+ /* flush write */
+ rt3883_pci_r32(rpc, RT3883_PCI_REG_PCIENA);
+}
+
+static struct irq_chip rt3883_pci_irq_chip = {
+ .name = "RT3883 PCI",
+ .irq_mask = rt3883_pci_irq_mask,
+ .irq_unmask = rt3883_pci_irq_unmask,
+ .irq_mask_ack = rt3883_pci_irq_mask,
+};
+
+static int rt3883_pci_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &rt3883_pci_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, d->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops rt3883_pci_irq_domain_ops = {
+ .map = rt3883_pci_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int rt3883_pci_irq_init(struct device *dev,
+ struct rt3883_pci_controller *rpc)
+{
+ int irq;
+
+ irq = irq_of_parse_and_map(rpc->intc_of_node, 0);
+ if (irq == 0) {
+ dev_err(dev, "%pOF has no IRQ", rpc->intc_of_node);
+ return -EINVAL;
+ }
+
+ /* disable all interrupts */
+ rt3883_pci_w32(rpc, 0, RT3883_PCI_REG_PCIENA);
+
+ rpc->irq_domain =
+ irq_domain_add_linear(rpc->intc_of_node, RT3883_PCI_IRQ_COUNT,
+ &rt3883_pci_irq_domain_ops,
+ rpc);
+ if (!rpc->irq_domain) {
+ dev_err(dev, "unable to add IRQ domain\n");
+ return -ENODEV;
+ }
+
+ irq_set_chained_handler_and_data(irq, rt3883_pci_irq_handler, rpc);
+
+ return 0;
+}
+
+static int rt3883_pci_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct rt3883_pci_controller *rpc;
+ unsigned long flags;
+ u32 address;
+ u32 data;
+
+ rpc = pci_bus_to_rt3883_controller(bus);
+
+ if (!rpc->pcie_ready && bus->number == 1)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ address = rt3883_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
+
+ rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR);
+ data = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA);
+
+ switch (size) {
+ case 1:
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ break;
+ case 2:
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ break;
+ case 4:
+ *val = data;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rt3883_pci_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct rt3883_pci_controller *rpc;
+ unsigned long flags;
+ u32 address;
+ u32 data;
+
+ rpc = pci_bus_to_rt3883_controller(bus);
+
+ if (!rpc->pcie_ready && bus->number == 1)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ address = rt3883_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
+
+ rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR);
+ data = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA);
+
+ switch (size) {
+ case 1:
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ break;
+ case 2:
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ break;
+ case 4:
+ data = val;
+ break;
+ }
+
+ rt3883_pci_w32(rpc, data, RT3883_PCI_REG_CFGDATA);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops rt3883_pci_ops = {
+ .read = rt3883_pci_config_read,
+ .write = rt3883_pci_config_write,
+};
+
+static void rt3883_pci_preinit(struct rt3883_pci_controller *rpc, unsigned mode)
+{
+ u32 syscfg1;
+ u32 rstctrl;
+ u32 clkcfg1;
+ u32 t;
+
+ rstctrl = rt_sysc_r32(RT3883_SYSC_REG_RSTCTRL);
+ syscfg1 = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1);
+ clkcfg1 = rt_sysc_r32(RT3883_SYSC_REG_CLKCFG1);
+
+ if (mode & RT3883_PCI_MODE_PCIE) {
+ rstctrl |= RT3883_RSTCTRL_PCIE;
+ rt_sysc_w32(rstctrl, RT3883_SYSC_REG_RSTCTRL);
+
+ /* setup PCI PAD drive mode */
+ syscfg1 &= ~(0x30);
+ syscfg1 |= (2 << 4);
+ rt_sysc_w32(syscfg1, RT3883_SYSC_REG_SYSCFG1);
+
+ t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN0);
+ t &= ~BIT(31);
+ rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN0);
+
+ t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN1);
+ t &= 0x80ffffff;
+ rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN1);
+
+ t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN1);
+ t |= 0xa << 24;
+ rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN1);
+
+ t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN0);
+ t |= BIT(31);
+ rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN0);
+
+ msleep(50);
+
+ rstctrl &= ~RT3883_RSTCTRL_PCIE;
+ rt_sysc_w32(rstctrl, RT3883_SYSC_REG_RSTCTRL);
+ }
+
+ syscfg1 |= (RT3883_SYSCFG1_PCIE_RC_MODE | RT3883_SYSCFG1_PCI_HOST_MODE);
+
+ clkcfg1 &= ~(RT3883_CLKCFG1_PCI_CLK_EN | RT3883_CLKCFG1_PCIE_CLK_EN);
+
+ if (mode & RT3883_PCI_MODE_PCI) {
+ clkcfg1 |= RT3883_CLKCFG1_PCI_CLK_EN;
+ rstctrl &= ~RT3883_RSTCTRL_PCI;
+ }
+
+ if (mode & RT3883_PCI_MODE_PCIE) {
+ clkcfg1 |= RT3883_CLKCFG1_PCIE_CLK_EN;
+ rstctrl &= ~RT3883_RSTCTRL_PCIE;
+ }
+
+ rt_sysc_w32(syscfg1, RT3883_SYSC_REG_SYSCFG1);
+ rt_sysc_w32(rstctrl, RT3883_SYSC_REG_RSTCTRL);
+ rt_sysc_w32(clkcfg1, RT3883_SYSC_REG_CLKCFG1);
+
+ msleep(500);
+
+ /*
+ * setup the device number of the P2P bridge
+ * and de-assert the reset line
+ */
+ t = (RT3883_P2P_BR_DEVNUM << RT3883_PCICFG_P2P_BR_DEVNUM_S);
+ rt3883_pci_w32(rpc, t, RT3883_PCI_REG_PCICFG);
+
+ /* flush write */
+ rt3883_pci_r32(rpc, RT3883_PCI_REG_PCICFG);
+ msleep(500);
+
+ if (mode & RT3883_PCI_MODE_PCIE) {
+ msleep(500);
+
+ t = rt3883_pci_r32(rpc, RT3883_PCI_REG_STATUS(1));
+
+ rpc->pcie_ready = t & BIT(0);
+
+ if (!rpc->pcie_ready) {
+ /* reset the PCIe block */
+ t = rt_sysc_r32(RT3883_SYSC_REG_RSTCTRL);
+ t |= RT3883_RSTCTRL_PCIE;
+ rt_sysc_w32(t, RT3883_SYSC_REG_RSTCTRL);
+ t &= ~RT3883_RSTCTRL_PCIE;
+ rt_sysc_w32(t, RT3883_SYSC_REG_RSTCTRL);
+
+ /* turn off PCIe clock */
+ t = rt_sysc_r32(RT3883_SYSC_REG_CLKCFG1);
+ t &= ~RT3883_CLKCFG1_PCIE_CLK_EN;
+ rt_sysc_w32(t, RT3883_SYSC_REG_CLKCFG1);
+
+ t = rt_sysc_r32(RT3883_SYSC_REG_PCIE_CLK_GEN0);
+ t &= ~0xf000c080;
+ rt_sysc_w32(t, RT3883_SYSC_REG_PCIE_CLK_GEN0);
+ }
+ }
+
+ /* enable PCI arbiter */
+ rt3883_pci_w32(rpc, 0x79, RT3883_PCI_REG_ARBCTL);
+}
+
+static int rt3883_pci_probe(struct platform_device *pdev)
+{
+ struct rt3883_pci_controller *rpc;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ struct device_node *child;
+ u32 val;
+ int err;
+ int mode;
+
+ rpc = devm_kzalloc(dev, sizeof(*rpc), GFP_KERNEL);
+ if (!rpc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rpc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(rpc->base))
+ return PTR_ERR(rpc->base);
+
+ /* find the interrupt controller child node */
+ for_each_child_of_node(np, child) {
+ if (of_get_property(child, "interrupt-controller", NULL)) {
+ rpc->intc_of_node = child;
+ break;
+ }
+ }
+
+ if (!rpc->intc_of_node) {
+ dev_err(dev, "%pOF has no %s child node",
+ rpc->intc_of_node,
+ "interrupt controller");
+ return -EINVAL;
+ }
+
+ /* find the PCI host bridge child node */
+ for_each_child_of_node(np, child) {
+ if (of_node_is_type(child, "pci")) {
+ rpc->pci_controller.of_node = child;
+ break;
+ }
+ }
+
+ if (!rpc->pci_controller.of_node) {
+ dev_err(dev, "%pOF has no %s child node",
+ rpc->intc_of_node,
+ "PCI host bridge");
+ err = -EINVAL;
+ goto err_put_intc_node;
+ }
+
+ mode = RT3883_PCI_MODE_NONE;
+ for_each_available_child_of_node(rpc->pci_controller.of_node, child) {
+ int devfn;
+
+ if (!of_node_is_type(child, "pci"))
+ continue;
+
+ devfn = of_pci_get_devfn(child);
+ if (devfn < 0)
+ continue;
+
+ switch (PCI_SLOT(devfn)) {
+ case 1:
+ mode |= RT3883_PCI_MODE_PCIE;
+ break;
+
+ case 17:
+ case 18:
+ mode |= RT3883_PCI_MODE_PCI;
+ break;
+ }
+ }
+
+ if (mode == RT3883_PCI_MODE_NONE) {
+ dev_err(dev, "unable to determine PCI mode\n");
+ err = -EINVAL;
+ goto err_put_hb_node;
+ }
+
+ dev_info(dev, "mode:%s%s\n",
+ (mode & RT3883_PCI_MODE_PCI) ? " PCI" : "",
+ (mode & RT3883_PCI_MODE_PCIE) ? " PCIe" : "");
+
+ rt3883_pci_preinit(rpc, mode);
+
+ rpc->pci_controller.pci_ops = &rt3883_pci_ops;
+ rpc->pci_controller.io_resource = &rpc->io_res;
+ rpc->pci_controller.mem_resource = &rpc->mem_res;
+
+ /* Load PCI I/O and memory resources from DT */
+ pci_load_of_ranges(&rpc->pci_controller,
+ rpc->pci_controller.of_node);
+
+ rt3883_pci_w32(rpc, rpc->mem_res.start, RT3883_PCI_REG_MEMBASE);
+ rt3883_pci_w32(rpc, rpc->io_res.start, RT3883_PCI_REG_IOBASE);
+
+ ioport_resource.start = rpc->io_res.start;
+ ioport_resource.end = rpc->io_res.end;
+
+ /* PCI */
+ rt3883_pci_w32(rpc, 0x03ff0000, RT3883_PCI_REG_BAR0SETUP(0));
+ rt3883_pci_w32(rpc, RT3883_MEMORY_BASE, RT3883_PCI_REG_IMBASEBAR0(0));
+ rt3883_pci_w32(rpc, 0x08021814, RT3883_PCI_REG_ID(0));
+ rt3883_pci_w32(rpc, 0x00800001, RT3883_PCI_REG_CLASS(0));
+ rt3883_pci_w32(rpc, 0x28801814, RT3883_PCI_REG_SUBID(0));
+
+ /* PCIe */
+ rt3883_pci_w32(rpc, 0x03ff0000, RT3883_PCI_REG_BAR0SETUP(1));
+ rt3883_pci_w32(rpc, RT3883_MEMORY_BASE, RT3883_PCI_REG_IMBASEBAR0(1));
+ rt3883_pci_w32(rpc, 0x08021814, RT3883_PCI_REG_ID(1));
+ rt3883_pci_w32(rpc, 0x06040001, RT3883_PCI_REG_CLASS(1));
+ rt3883_pci_w32(rpc, 0x28801814, RT3883_PCI_REG_SUBID(1));
+
+ err = rt3883_pci_irq_init(dev, rpc);
+ if (err)
+ goto err_put_hb_node;
+
+ /* PCIe */
+ val = rt3883_pci_read_cfg32(rpc, 0, 0x01, 0, PCI_COMMAND);
+ val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ rt3883_pci_write_cfg32(rpc, 0, 0x01, 0, PCI_COMMAND, val);
+
+ /* PCI */
+ val = rt3883_pci_read_cfg32(rpc, 0, 0x00, 0, PCI_COMMAND);
+ val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ rt3883_pci_write_cfg32(rpc, 0, 0x00, 0, PCI_COMMAND, val);
+
+ if (mode == RT3883_PCI_MODE_PCIE) {
+ rt3883_pci_w32(rpc, 0x03ff0001, RT3883_PCI_REG_BAR0SETUP(0));
+ rt3883_pci_w32(rpc, 0x03ff0001, RT3883_PCI_REG_BAR0SETUP(1));
+
+ rt3883_pci_write_cfg32(rpc, 0, RT3883_P2P_BR_DEVNUM, 0,
+ PCI_BASE_ADDRESS_0,
+ RT3883_MEMORY_BASE);
+ /* flush write */
+ rt3883_pci_read_cfg32(rpc, 0, RT3883_P2P_BR_DEVNUM, 0,
+ PCI_BASE_ADDRESS_0);
+ } else {
+ rt3883_pci_write_cfg32(rpc, 0, RT3883_P2P_BR_DEVNUM, 0,
+ PCI_IO_BASE, 0x00000101);
+ }
+
+ register_pci_controller(&rpc->pci_controller);
+
+ return 0;
+
+err_put_hb_node:
+ of_node_put(rpc->pci_controller.of_node);
+err_put_intc_node:
+ of_node_put(rpc->intc_of_node);
+ return err;
+}
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return of_irq_parse_and_map_pci(dev, slot, pin);
+}
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
+
+static const struct of_device_id rt3883_pci_ids[] = {
+ { .compatible = "ralink,rt3883-pci" },
+ {},
+};
+
+static struct platform_driver rt3883_pci_driver = {
+ .probe = rt3883_pci_probe,
+ .driver = {
+ .name = "rt3883-pci",
+ .of_match_table = of_match_ptr(rt3883_pci_ids),
+ },
+};
+
+static int __init rt3883_pci_init(void)
+{
+ return platform_driver_register(&rt3883_pci_driver);
+}
+
+postcore_initcall(rt3883_pci_init);
diff --git a/arch/mips/pci/pci-sb1250.c b/arch/mips/pci/pci-sb1250.c
new file mode 100644
index 000000000..c3f82b280
--- /dev/null
+++ b/arch/mips/pci/pci-sb1250.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2001,2002,2003 Broadcom Corporation
+ * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
+ */
+
+/*
+ * BCM1250-specific PCI support
+ *
+ * This module provides the glue between Linux's PCI subsystem
+ * and the hardware. We basically provide glue for accessing
+ * configuration space, and set up the translation for I/O
+ * space accesses.
+ *
+ * To access configuration space, we use ioremap. In the 32-bit
+ * kernel, this consumes either 4 or 8 page table pages, and 16MB of
+ * kernel mapped memory. Hopefully neither of these should be a huge
+ * problem.
+ */
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/vt.h>
+
+#include <asm/io.h>
+
+#include <asm/sibyte/sb1250_defs.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_scd.h>
+#include <asm/sibyte/board.h>
+
+/*
+ * Macros for calculating offsets into config space given a device
+ * structure or dev/fun/reg
+ */
+#define CFGOFFSET(bus, devfn, where) (((bus)<<16) + ((devfn)<<8) + (where))
+#define CFGADDR(bus, devfn, where) CFGOFFSET((bus)->number, (devfn), where)
+
+static void *cfg_space;
+
+#define PCI_BUS_ENABLED 1
+#define LDT_BUS_ENABLED 2
+#define PCI_DEVICE_MODE 4
+
+static int sb1250_bus_status;
+
+#define PCI_BRIDGE_DEVICE 0
+#define LDT_BRIDGE_DEVICE 1
+
+#ifdef CONFIG_SIBYTE_HAS_LDT
+/*
+ * HT's level-sensitive interrupts require EOI, which is generated
+ * through a 4MB memory-mapped region
+ */
+unsigned long ldt_eoi_space;
+#endif
+
+/*
+ * Read/write 32-bit values in config space.
+ */
+static inline u32 READCFG32(u32 addr)
+{
+ return *(u32 *) (cfg_space + (addr & ~3));
+}
+
+static inline void WRITECFG32(u32 addr, u32 data)
+{
+ *(u32 *) (cfg_space + (addr & ~3)) = data;
+}
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return dev->irq;
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
+
+/*
+ * Some checks before doing config cycles:
+ * In PCI Device Mode, hide everything on bus 0 except the LDT host
+ * bridge. Otherwise, access is controlled by bridge MasterEn bits.
+ */
+static int sb1250_pci_can_access(struct pci_bus *bus, int devfn)
+{
+ u32 devno;
+
+ if (!(sb1250_bus_status & (PCI_BUS_ENABLED | PCI_DEVICE_MODE)))
+ return 0;
+
+ if (bus->number == 0) {
+ devno = PCI_SLOT(devfn);
+ if (devno == LDT_BRIDGE_DEVICE)
+ return (sb1250_bus_status & LDT_BUS_ENABLED) != 0;
+ else if (sb1250_bus_status & PCI_DEVICE_MODE)
+ return 0;
+ else
+ return 1;
+ } else
+ return 1;
+}
+
+/*
+ * Read/write access functions for various sizes of values
+ * in config space. Return all 1's for disallowed accesses
+ * for a kludgy but adequate simulation of master aborts.
+ */
+
+static int sb1250_pcibios_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 * val)
+{
+ u32 data = 0;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (sb1250_pci_can_access(bus, devfn))
+ data = READCFG32(CFGADDR(bus, devfn, where));
+ else
+ data = 0xFFFFFFFF;
+
+ if (size == 1)
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ else if (size == 2)
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ else
+ *val = data;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int sb1250_pcibios_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ u32 cfgaddr = CFGADDR(bus, devfn, where);
+ u32 data = 0;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (!sb1250_pci_can_access(bus, devfn))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ data = READCFG32(cfgaddr);
+
+ if (size == 1)
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else if (size == 2)
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else
+ data = val;
+
+ WRITECFG32(cfgaddr, data);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops sb1250_pci_ops = {
+ .read = sb1250_pcibios_read,
+ .write = sb1250_pcibios_write,
+};
+
+static struct resource sb1250_mem_resource = {
+ .name = "SB1250 PCI MEM",
+ .start = 0x40000000UL,
+ .end = 0x5fffffffUL,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource sb1250_io_resource = {
+ .name = "SB1250 PCI I/O",
+ .start = 0x00000000UL,
+ .end = 0x01ffffffUL,
+ .flags = IORESOURCE_IO,
+};
+
+struct pci_controller sb1250_controller = {
+ .pci_ops = &sb1250_pci_ops,
+ .mem_resource = &sb1250_mem_resource,
+ .io_resource = &sb1250_io_resource,
+};
+
+static int __init sb1250_pcibios_init(void)
+{
+ void __iomem *io_map_base;
+ uint32_t cmdreg;
+ uint64_t reg;
+
+ /* CFE will assign PCI resources */
+ pci_set_flags(PCI_PROBE_ONLY);
+
+ /* Avoid ISA compat ranges. */
+ PCIBIOS_MIN_IO = 0x00008000UL;
+ PCIBIOS_MIN_MEM = 0x01000000UL;
+
+ /* Set I/O resource limits. */
+ ioport_resource.end = 0x01ffffffUL; /* 32MB accessible by sb1250 */
+ iomem_resource.end = 0xffffffffUL; /* no HT support yet */
+
+ cfg_space =
+ ioremap(A_PHYS_LDTPCI_CFG_MATCH_BITS, 16 * 1024 * 1024);
+
+ /*
+ * See if the PCI bus has been configured by the firmware.
+ */
+ reg = __raw_readq(IOADDR(A_SCD_SYSTEM_CFG));
+ if (!(reg & M_SYS_PCI_HOST)) {
+ sb1250_bus_status |= PCI_DEVICE_MODE;
+ } else {
+ cmdreg =
+ READCFG32(CFGOFFSET
+ (0, PCI_DEVFN(PCI_BRIDGE_DEVICE, 0),
+ PCI_COMMAND));
+ if (!(cmdreg & PCI_COMMAND_MASTER)) {
+ printk
+ ("PCI: Skipping PCI probe. Bus is not initialized.\n");
+ iounmap(cfg_space);
+ return 0;
+ }
+ sb1250_bus_status |= PCI_BUS_ENABLED;
+ }
+
+ /*
+ * Establish mappings in KSEG2 (kernel virtual) to PCI I/O
+ * space. Use "match bytes" policy to make everything look
+ * little-endian. So, you need to also set
+ * CONFIG_SWAP_IO_SPACE, but this is the combination that
+ * works correctly with most of Linux's drivers.
+ * XXX ehs: Should this happen in PCI Device mode?
+ */
+ io_map_base = ioremap(A_PHYS_LDTPCI_IO_MATCH_BYTES, 1024 * 1024);
+ sb1250_controller.io_map_base = (unsigned long)io_map_base;
+ set_io_port_base((unsigned long)io_map_base);
+
+#ifdef CONFIG_SIBYTE_HAS_LDT
+ /*
+ * Also check the LDT bridge's enable, just in case we didn't
+ * initialize that one.
+ */
+
+ cmdreg = READCFG32(CFGOFFSET(0, PCI_DEVFN(LDT_BRIDGE_DEVICE, 0),
+ PCI_COMMAND));
+ if (cmdreg & PCI_COMMAND_MASTER) {
+ sb1250_bus_status |= LDT_BUS_ENABLED;
+
+ /*
+ * Need bits 23:16 to convey vector number. Note that
+ * this consumes 4MB of kernel-mapped memory
+ * (Kseg2/Kseg3) for 32-bit kernel.
+ */
+ ldt_eoi_space = (unsigned long)
+ ioremap(A_PHYS_LDT_SPECIAL_MATCH_BYTES,
+ 4 * 1024 * 1024);
+ }
+#endif
+
+ register_pci_controller(&sb1250_controller);
+
+#ifdef CONFIG_VGA_CONSOLE
+ console_lock();
+ do_take_over_console(&vga_con, 0, MAX_NR_CONSOLES - 1, 1);
+ console_unlock();
+#endif
+ return 0;
+}
+arch_initcall(sb1250_pcibios_init);
diff --git a/arch/mips/pci/pci-tx4927.c b/arch/mips/pci/pci-tx4927.c
new file mode 100644
index 000000000..9b3301d19
--- /dev/null
+++ b/arch/mips/pci/pci-tx4927.c
@@ -0,0 +1,91 @@
+/*
+ * Based on linux/arch/mips/txx9/rbtx4938/setup.c,
+ * and RBTX49xx patch from CELF patch archive.
+ *
+ * Copyright 2001, 2003-2005 MontaVista Software Inc.
+ * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
+ * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/tx4927.h>
+
+int __init tx4927_report_pciclk(void)
+{
+ int pciclk = 0;
+
+ pr_info("PCIC --%s PCICLK:",
+ (__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCI66) ?
+ " PCI66" : "");
+ if (__raw_readq(&tx4927_ccfgptr->pcfg) & TX4927_PCFG_PCICLKEN_ALL) {
+ u64 ccfg = __raw_readq(&tx4927_ccfgptr->ccfg);
+ switch ((unsigned long)ccfg &
+ TX4927_CCFG_PCIDIVMODE_MASK) {
+ case TX4927_CCFG_PCIDIVMODE_2_5:
+ pciclk = txx9_cpu_clock * 2 / 5; break;
+ case TX4927_CCFG_PCIDIVMODE_3:
+ pciclk = txx9_cpu_clock / 3; break;
+ case TX4927_CCFG_PCIDIVMODE_5:
+ pciclk = txx9_cpu_clock / 5; break;
+ case TX4927_CCFG_PCIDIVMODE_6:
+ pciclk = txx9_cpu_clock / 6; break;
+ }
+ pr_cont("Internal(%u.%uMHz)",
+ (pciclk + 50000) / 1000000,
+ ((pciclk + 50000) / 100000) % 10);
+ } else {
+ pr_cont("External");
+ pciclk = -1;
+ }
+ pr_cont("\n");
+ return pciclk;
+}
+
+int __init tx4927_pciclk66_setup(void)
+{
+ int pciclk;
+
+ /* Assert M66EN */
+ tx4927_ccfg_set(TX4927_CCFG_PCI66);
+ /* Double PCICLK (if possible) */
+ if (__raw_readq(&tx4927_ccfgptr->pcfg) & TX4927_PCFG_PCICLKEN_ALL) {
+ unsigned int pcidivmode = 0;
+ u64 ccfg = __raw_readq(&tx4927_ccfgptr->ccfg);
+ pcidivmode = (unsigned long)ccfg &
+ TX4927_CCFG_PCIDIVMODE_MASK;
+ switch (pcidivmode) {
+ case TX4927_CCFG_PCIDIVMODE_5:
+ case TX4927_CCFG_PCIDIVMODE_2_5:
+ pcidivmode = TX4927_CCFG_PCIDIVMODE_2_5;
+ pciclk = txx9_cpu_clock * 2 / 5;
+ break;
+ case TX4927_CCFG_PCIDIVMODE_6:
+ case TX4927_CCFG_PCIDIVMODE_3:
+ default:
+ pcidivmode = TX4927_CCFG_PCIDIVMODE_3;
+ pciclk = txx9_cpu_clock / 3;
+ }
+ tx4927_ccfg_change(TX4927_CCFG_PCIDIVMODE_MASK,
+ pcidivmode);
+ pr_debug("PCICLK: ccfg:%08lx\n",
+ (unsigned long)__raw_readq(&tx4927_ccfgptr->ccfg));
+ } else
+ pciclk = -1;
+ return pciclk;
+}
+
+void __init tx4927_setup_pcierr_irq(void)
+{
+ if (request_irq(TXX9_IRQ_BASE + TX4927_IR_PCIERR,
+ tx4927_pcierr_interrupt,
+ 0, "PCI error",
+ (void *)TX4927_PCIC_REG))
+ pr_warn("Failed to request irq for PCIERR\n");
+}
diff --git a/arch/mips/pci/pci-tx4938.c b/arch/mips/pci/pci-tx4938.c
new file mode 100644
index 000000000..a6418460e
--- /dev/null
+++ b/arch/mips/pci/pci-tx4938.c
@@ -0,0 +1,142 @@
+/*
+ * Based on linux/arch/mips/txx9/rbtx4938/setup.c,
+ * and RBTX49xx patch from CELF patch archive.
+ *
+ * Copyright 2001, 2003-2005 MontaVista Software Inc.
+ * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
+ * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/tx4938.h>
+
+int __init tx4938_report_pciclk(void)
+{
+ int pciclk = 0;
+
+ pr_info("PCIC --%s PCICLK:",
+ (__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCI66) ?
+ " PCI66" : "");
+ if (__raw_readq(&tx4938_ccfgptr->pcfg) & TX4938_PCFG_PCICLKEN_ALL) {
+ u64 ccfg = __raw_readq(&tx4938_ccfgptr->ccfg);
+ switch ((unsigned long)ccfg &
+ TX4938_CCFG_PCIDIVMODE_MASK) {
+ case TX4938_CCFG_PCIDIVMODE_4:
+ pciclk = txx9_cpu_clock / 4; break;
+ case TX4938_CCFG_PCIDIVMODE_4_5:
+ pciclk = txx9_cpu_clock * 2 / 9; break;
+ case TX4938_CCFG_PCIDIVMODE_5:
+ pciclk = txx9_cpu_clock / 5; break;
+ case TX4938_CCFG_PCIDIVMODE_5_5:
+ pciclk = txx9_cpu_clock * 2 / 11; break;
+ case TX4938_CCFG_PCIDIVMODE_8:
+ pciclk = txx9_cpu_clock / 8; break;
+ case TX4938_CCFG_PCIDIVMODE_9:
+ pciclk = txx9_cpu_clock / 9; break;
+ case TX4938_CCFG_PCIDIVMODE_10:
+ pciclk = txx9_cpu_clock / 10; break;
+ case TX4938_CCFG_PCIDIVMODE_11:
+ pciclk = txx9_cpu_clock / 11; break;
+ }
+ pr_cont("Internal(%u.%uMHz)",
+ (pciclk + 50000) / 1000000,
+ ((pciclk + 50000) / 100000) % 10);
+ } else {
+ pr_cont("External");
+ pciclk = -1;
+ }
+ pr_cont("\n");
+ return pciclk;
+}
+
+void __init tx4938_report_pci1clk(void)
+{
+ __u64 ccfg = __raw_readq(&tx4938_ccfgptr->ccfg);
+ unsigned int pciclk =
+ txx9_gbus_clock / ((ccfg & TX4938_CCFG_PCI1DMD) ? 4 : 2);
+
+ pr_info("PCIC1 -- %sPCICLK:%u.%uMHz\n",
+ (ccfg & TX4938_CCFG_PCI1_66) ? "PCI66 " : "",
+ (pciclk + 50000) / 1000000,
+ ((pciclk + 50000) / 100000) % 10);
+}
+
+int __init tx4938_pciclk66_setup(void)
+{
+ int pciclk;
+
+ /* Assert M66EN */
+ tx4938_ccfg_set(TX4938_CCFG_PCI66);
+ /* Double PCICLK (if possible) */
+ if (__raw_readq(&tx4938_ccfgptr->pcfg) & TX4938_PCFG_PCICLKEN_ALL) {
+ unsigned int pcidivmode = 0;
+ u64 ccfg = __raw_readq(&tx4938_ccfgptr->ccfg);
+ pcidivmode = (unsigned long)ccfg &
+ TX4938_CCFG_PCIDIVMODE_MASK;
+ switch (pcidivmode) {
+ case TX4938_CCFG_PCIDIVMODE_8:
+ case TX4938_CCFG_PCIDIVMODE_4:
+ pcidivmode = TX4938_CCFG_PCIDIVMODE_4;
+ pciclk = txx9_cpu_clock / 4;
+ break;
+ case TX4938_CCFG_PCIDIVMODE_9:
+ case TX4938_CCFG_PCIDIVMODE_4_5:
+ pcidivmode = TX4938_CCFG_PCIDIVMODE_4_5;
+ pciclk = txx9_cpu_clock * 2 / 9;
+ break;
+ case TX4938_CCFG_PCIDIVMODE_10:
+ case TX4938_CCFG_PCIDIVMODE_5:
+ pcidivmode = TX4938_CCFG_PCIDIVMODE_5;
+ pciclk = txx9_cpu_clock / 5;
+ break;
+ case TX4938_CCFG_PCIDIVMODE_11:
+ case TX4938_CCFG_PCIDIVMODE_5_5:
+ default:
+ pcidivmode = TX4938_CCFG_PCIDIVMODE_5_5;
+ pciclk = txx9_cpu_clock * 2 / 11;
+ break;
+ }
+ tx4938_ccfg_change(TX4938_CCFG_PCIDIVMODE_MASK,
+ pcidivmode);
+ pr_debug("PCICLK: ccfg:%08lx\n",
+ (unsigned long)__raw_readq(&tx4938_ccfgptr->ccfg));
+ } else
+ pciclk = -1;
+ return pciclk;
+}
+
+int tx4938_pcic1_map_irq(const struct pci_dev *dev, u8 slot)
+{
+ if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4938_pcic1ptr) {
+ switch (slot) {
+ case TX4927_PCIC_IDSEL_AD_TO_SLOT(31):
+ if (__raw_readq(&tx4938_ccfgptr->pcfg) &
+ TX4938_PCFG_ETH0_SEL)
+ return TXX9_IRQ_BASE + TX4938_IR_ETH0;
+ break;
+ case TX4927_PCIC_IDSEL_AD_TO_SLOT(30):
+ if (__raw_readq(&tx4938_ccfgptr->pcfg) &
+ TX4938_PCFG_ETH1_SEL)
+ return TXX9_IRQ_BASE + TX4938_IR_ETH1;
+ break;
+ }
+ return 0;
+ }
+ return -1;
+}
+
+void __init tx4938_setup_pcierr_irq(void)
+{
+ if (request_irq(TXX9_IRQ_BASE + TX4938_IR_PCIERR,
+ tx4927_pcierr_interrupt,
+ 0, "PCI error",
+ (void *)TX4927_PCIC_REG))
+ pr_warn("Failed to request irq for PCIERR\n");
+}
diff --git a/arch/mips/pci/pci-tx4939.c b/arch/mips/pci/pci-tx4939.c
new file mode 100644
index 000000000..09a65f7db
--- /dev/null
+++ b/arch/mips/pci/pci-tx4939.c
@@ -0,0 +1,107 @@
+/*
+ * Based on linux/arch/mips/txx9/rbtx4939/setup.c,
+ * and RBTX49xx patch from CELF patch archive.
+ *
+ * Copyright 2001, 2003-2005 MontaVista Software Inc.
+ * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
+ * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/tx4939.h>
+
+int __init tx4939_report_pciclk(void)
+{
+ int pciclk = 0;
+
+ pr_info("PCIC --%s PCICLK:",
+ (__raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_PCI66) ?
+ " PCI66" : "");
+ if (__raw_readq(&tx4939_ccfgptr->pcfg) & TX4939_PCFG_PCICLKEN_ALL) {
+ pciclk = txx9_master_clock * 20 / 6;
+ if (!(__raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_PCI66))
+ pciclk /= 2;
+ pr_cont("Internal(%u.%uMHz)",
+ (pciclk + 50000) / 1000000,
+ ((pciclk + 50000) / 100000) % 10);
+ } else {
+ pr_cont("External");
+ pciclk = -1;
+ }
+ pr_cont("\n");
+ return pciclk;
+}
+
+void __init tx4939_report_pci1clk(void)
+{
+ unsigned int pciclk = txx9_master_clock * 20 / 6;
+
+ pr_info("PCIC1 -- PCICLK:%u.%uMHz\n",
+ (pciclk + 50000) / 1000000,
+ ((pciclk + 50000) / 100000) % 10);
+}
+
+int tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot)
+{
+ if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4939_pcic1ptr) {
+ switch (slot) {
+ case TX4927_PCIC_IDSEL_AD_TO_SLOT(31):
+ if (__raw_readq(&tx4939_ccfgptr->pcfg) &
+ TX4939_PCFG_ET0MODE)
+ return TXX9_IRQ_BASE + TX4939_IR_ETH(0);
+ break;
+ case TX4927_PCIC_IDSEL_AD_TO_SLOT(30):
+ if (__raw_readq(&tx4939_ccfgptr->pcfg) &
+ TX4939_PCFG_ET1MODE)
+ return TXX9_IRQ_BASE + TX4939_IR_ETH(1);
+ break;
+ }
+ return 0;
+ }
+ return -1;
+}
+
+int tx4939_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ int irq = tx4939_pcic1_map_irq(dev, slot);
+
+ if (irq >= 0)
+ return irq;
+ irq = pin;
+ /* IRQ rotation */
+ irq--; /* 0-3 */
+ irq = (irq + 33 - slot) % 4;
+ irq++; /* 1-4 */
+
+ switch (irq) {
+ case 1:
+ irq = TXX9_IRQ_BASE + TX4939_IR_INTA;
+ break;
+ case 2:
+ irq = TXX9_IRQ_BASE + TX4939_IR_INTB;
+ break;
+ case 3:
+ irq = TXX9_IRQ_BASE + TX4939_IR_INTC;
+ break;
+ case 4:
+ irq = TXX9_IRQ_BASE + TX4939_IR_INTD;
+ break;
+ }
+ return irq;
+}
+
+void __init tx4939_setup_pcierr_irq(void)
+{
+ if (request_irq(TXX9_IRQ_BASE + TX4939_IR_PCIERR,
+ tx4927_pcierr_interrupt,
+ 0, "PCI error",
+ (void *)TX4939_PCIC_REG))
+ pr_warn("Failed to request irq for PCIERR\n");
+}
diff --git a/arch/mips/pci/pci-vr41xx.c b/arch/mips/pci/pci-vr41xx.c
new file mode 100644
index 000000000..1fa4e1014
--- /dev/null
+++ b/arch/mips/pci/pci-vr41xx.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * pci-vr41xx.c, PCI Control Unit routines for the NEC VR4100 series.
+ *
+ * Copyright (C) 2001-2003 MontaVista Software Inc.
+ * Author: Yoichi Yuasa <source@mvista.com>
+ * Copyright (C) 2004-2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
+ */
+/*
+ * Changes:
+ * MontaVista Software Inc. <source@mvista.com>
+ * - New creation, NEC VR4122 and VR4131 are supported.
+ */
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include <asm/cpu.h>
+#include <asm/io.h>
+#include <asm/vr41xx/pci.h>
+#include <asm/vr41xx/vr41xx.h>
+
+#include "pci-vr41xx.h"
+
+extern struct pci_ops vr41xx_pci_ops;
+
+static void __iomem *pciu_base;
+
+#define pciu_read(offset) readl(pciu_base + (offset))
+#define pciu_write(offset, value) writel((value), pciu_base + (offset))
+
+static struct pci_master_address_conversion pci_master_memory1 = {
+ .bus_base_address = PCI_MASTER_MEM1_BUS_BASE_ADDRESS,
+ .address_mask = PCI_MASTER_MEM1_ADDRESS_MASK,
+ .pci_base_address = PCI_MASTER_MEM1_PCI_BASE_ADDRESS,
+};
+
+static struct pci_target_address_conversion pci_target_memory1 = {
+ .address_mask = PCI_TARGET_MEM1_ADDRESS_MASK,
+ .bus_base_address = PCI_TARGET_MEM1_BUS_BASE_ADDRESS,
+};
+
+static struct pci_master_address_conversion pci_master_io = {
+ .bus_base_address = PCI_MASTER_IO_BUS_BASE_ADDRESS,
+ .address_mask = PCI_MASTER_IO_ADDRESS_MASK,
+ .pci_base_address = PCI_MASTER_IO_PCI_BASE_ADDRESS,
+};
+
+static struct pci_mailbox_address pci_mailbox = {
+ .base_address = PCI_MAILBOX_BASE_ADDRESS,
+};
+
+static struct pci_target_address_window pci_target_window1 = {
+ .base_address = PCI_TARGET_WINDOW1_BASE_ADDRESS,
+};
+
+static struct resource pci_mem_resource = {
+ .name = "PCI Memory resources",
+ .start = PCI_MEM_RESOURCE_START,
+ .end = PCI_MEM_RESOURCE_END,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource pci_io_resource = {
+ .name = "PCI I/O resources",
+ .start = PCI_IO_RESOURCE_START,
+ .end = PCI_IO_RESOURCE_END,
+ .flags = IORESOURCE_IO,
+};
+
+static struct pci_controller_unit_setup vr41xx_pci_controller_unit_setup = {
+ .master_memory1 = &pci_master_memory1,
+ .target_memory1 = &pci_target_memory1,
+ .master_io = &pci_master_io,
+ .exclusive_access = CANNOT_LOCK_FROM_DEVICE,
+ .wait_time_limit_from_irdy_to_trdy = 0,
+ .mailbox = &pci_mailbox,
+ .target_window1 = &pci_target_window1,
+ .master_latency_timer = 0x80,
+ .retry_limit = 0,
+ .arbiter_priority_control = PCI_ARBITRATION_MODE_FAIR,
+ .take_away_gnt_mode = PCI_TAKE_AWAY_GNT_DISABLE,
+};
+
+static struct pci_controller vr41xx_pci_controller = {
+ .pci_ops = &vr41xx_pci_ops,
+ .mem_resource = &pci_mem_resource,
+ .io_resource = &pci_io_resource,
+};
+
+void __init vr41xx_pciu_setup(struct pci_controller_unit_setup *setup)
+{
+ vr41xx_pci_controller_unit_setup = *setup;
+}
+
+static int __init vr41xx_pciu_init(void)
+{
+ struct pci_controller_unit_setup *setup;
+ struct pci_master_address_conversion *master;
+ struct pci_target_address_conversion *target;
+ struct pci_mailbox_address *mailbox;
+ struct pci_target_address_window *window;
+ unsigned long vtclock, pci_clock_max;
+ uint32_t val;
+
+ setup = &vr41xx_pci_controller_unit_setup;
+
+ if (request_mem_region(PCIU_BASE, PCIU_SIZE, "PCIU") == NULL)
+ return -EBUSY;
+
+ pciu_base = ioremap(PCIU_BASE, PCIU_SIZE);
+ if (pciu_base == NULL) {
+ release_mem_region(PCIU_BASE, PCIU_SIZE);
+ return -EBUSY;
+ }
+
+ /* Disable PCI interrupt */
+ vr41xx_disable_pciint();
+
+ /* Supply VTClock to PCIU */
+ vr41xx_supply_clock(PCIU_CLOCK);
+
+ /* Dummy write, waiting for supply of VTClock. */
+ vr41xx_disable_pciint();
+
+ /* Select PCI clock */
+ if (setup->pci_clock_max != 0)
+ pci_clock_max = setup->pci_clock_max;
+ else
+ pci_clock_max = PCI_CLOCK_MAX;
+ vtclock = vr41xx_get_vtclock_frequency();
+ if (vtclock < pci_clock_max)
+ pciu_write(PCICLKSELREG, EQUAL_VTCLOCK);
+ else if ((vtclock / 2) < pci_clock_max)
+ pciu_write(PCICLKSELREG, HALF_VTCLOCK);
+ else if (current_cpu_data.processor_id >= PRID_VR4131_REV2_1 &&
+ (vtclock / 3) < pci_clock_max)
+ pciu_write(PCICLKSELREG, ONE_THIRD_VTCLOCK);
+ else if ((vtclock / 4) < pci_clock_max)
+ pciu_write(PCICLKSELREG, QUARTER_VTCLOCK);
+ else {
+ printk(KERN_ERR "PCI Clock is over 33MHz.\n");
+ iounmap(pciu_base);
+ return -EINVAL;
+ }
+
+ /* Supply PCI clock by PCI bus */
+ vr41xx_supply_clock(PCI_CLOCK);
+
+ if (setup->master_memory1 != NULL) {
+ master = setup->master_memory1;
+ val = IBA(master->bus_base_address) |
+ MASTER_MSK(master->address_mask) |
+ WINEN |
+ PCIA(master->pci_base_address);
+ pciu_write(PCIMMAW1REG, val);
+ } else {
+ val = pciu_read(PCIMMAW1REG);
+ val &= ~WINEN;
+ pciu_write(PCIMMAW1REG, val);
+ }
+
+ if (setup->master_memory2 != NULL) {
+ master = setup->master_memory2;
+ val = IBA(master->bus_base_address) |
+ MASTER_MSK(master->address_mask) |
+ WINEN |
+ PCIA(master->pci_base_address);
+ pciu_write(PCIMMAW2REG, val);
+ } else {
+ val = pciu_read(PCIMMAW2REG);
+ val &= ~WINEN;
+ pciu_write(PCIMMAW2REG, val);
+ }
+
+ if (setup->target_memory1 != NULL) {
+ target = setup->target_memory1;
+ val = TARGET_MSK(target->address_mask) |
+ WINEN |
+ ITA(target->bus_base_address);
+ pciu_write(PCITAW1REG, val);
+ } else {
+ val = pciu_read(PCITAW1REG);
+ val &= ~WINEN;
+ pciu_write(PCITAW1REG, val);
+ }
+
+ if (setup->target_memory2 != NULL) {
+ target = setup->target_memory2;
+ val = TARGET_MSK(target->address_mask) |
+ WINEN |
+ ITA(target->bus_base_address);
+ pciu_write(PCITAW2REG, val);
+ } else {
+ val = pciu_read(PCITAW2REG);
+ val &= ~WINEN;
+ pciu_write(PCITAW2REG, val);
+ }
+
+ if (setup->master_io != NULL) {
+ master = setup->master_io;
+ val = IBA(master->bus_base_address) |
+ MASTER_MSK(master->address_mask) |
+ WINEN |
+ PCIIA(master->pci_base_address);
+ pciu_write(PCIMIOAWREG, val);
+ } else {
+ val = pciu_read(PCIMIOAWREG);
+ val &= ~WINEN;
+ pciu_write(PCIMIOAWREG, val);
+ }
+
+ if (setup->exclusive_access == CANNOT_LOCK_FROM_DEVICE)
+ pciu_write(PCIEXACCREG, UNLOCK);
+ else
+ pciu_write(PCIEXACCREG, 0);
+
+ if (current_cpu_type() == CPU_VR4122)
+ pciu_write(PCITRDYVREG, TRDYV(setup->wait_time_limit_from_irdy_to_trdy));
+
+ pciu_write(LATTIMEREG, MLTIM(setup->master_latency_timer));
+
+ if (setup->mailbox != NULL) {
+ mailbox = setup->mailbox;
+ val = MBADD(mailbox->base_address) | TYPE_32BITSPACE |
+ MSI_MEMORY | PREF_APPROVAL;
+ pciu_write(MAILBAREG, val);
+ }
+
+ if (setup->target_window1) {
+ window = setup->target_window1;
+ val = PMBA(window->base_address) | TYPE_32BITSPACE |
+ MSI_MEMORY | PREF_APPROVAL;
+ pciu_write(PCIMBA1REG, val);
+ }
+
+ if (setup->target_window2) {
+ window = setup->target_window2;
+ val = PMBA(window->base_address) | TYPE_32BITSPACE |
+ MSI_MEMORY | PREF_APPROVAL;
+ pciu_write(PCIMBA2REG, val);
+ }
+
+ val = pciu_read(RETVALREG);
+ val &= ~RTYVAL_MASK;
+ val |= RTYVAL(setup->retry_limit);
+ pciu_write(RETVALREG, val);
+
+ val = pciu_read(PCIAPCNTREG);
+ val &= ~(TKYGNT | PAPC);
+
+ switch (setup->arbiter_priority_control) {
+ case PCI_ARBITRATION_MODE_ALTERNATE_0:
+ val |= PAPC_ALTERNATE_0;
+ break;
+ case PCI_ARBITRATION_MODE_ALTERNATE_B:
+ val |= PAPC_ALTERNATE_B;
+ break;
+ default:
+ val |= PAPC_FAIR;
+ break;
+ }
+
+ if (setup->take_away_gnt_mode == PCI_TAKE_AWAY_GNT_ENABLE)
+ val |= TKYGNT_ENABLE;
+
+ pciu_write(PCIAPCNTREG, val);
+
+ pciu_write(COMMANDREG, PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER | PCI_COMMAND_PARITY |
+ PCI_COMMAND_SERR);
+
+ /* Clear bus error */
+ pciu_read(BUSERRADREG);
+
+ pciu_write(PCIENREG, PCIU_CONFIG_DONE);
+
+ if (setup->mem_resource != NULL)
+ vr41xx_pci_controller.mem_resource = setup->mem_resource;
+
+ if (setup->io_resource != NULL) {
+ vr41xx_pci_controller.io_resource = setup->io_resource;
+ } else {
+ set_io_port_base(IO_PORT_BASE);
+ ioport_resource.start = IO_PORT_RESOURCE_START;
+ ioport_resource.end = IO_PORT_RESOURCE_END;
+ }
+
+ if (setup->master_io) {
+ void __iomem *io_map_base;
+ struct resource *res = vr41xx_pci_controller.io_resource;
+ master = setup->master_io;
+ io_map_base = ioremap(master->bus_base_address,
+ resource_size(res));
+ if (!io_map_base)
+ return -EBUSY;
+
+ vr41xx_pci_controller.io_map_base = (unsigned long)io_map_base;
+ }
+
+ register_pci_controller(&vr41xx_pci_controller);
+
+ return 0;
+}
+
+arch_initcall(vr41xx_pciu_init);
diff --git a/arch/mips/pci/pci-vr41xx.h b/arch/mips/pci/pci-vr41xx.h
new file mode 100644
index 000000000..5595e4a39
--- /dev/null
+++ b/arch/mips/pci/pci-vr41xx.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * pci-vr41xx.h, Include file for PCI Control Unit of the NEC VR4100 series.
+ *
+ * Copyright (C) 2002 MontaVista Software Inc.
+ * Author: Yoichi Yuasa <source@mvista.com>
+ * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#ifndef __PCI_VR41XX_H
+#define __PCI_VR41XX_H
+
+#define PCIU_BASE 0x0f000c00UL
+#define PCIU_SIZE 0x200UL
+
+#define PCIMMAW1REG 0x00
+#define PCIMMAW2REG 0x04
+#define PCITAW1REG 0x08
+#define PCITAW2REG 0x0c
+#define PCIMIOAWREG 0x10
+ #define IBA(addr) ((addr) & 0xff000000U)
+ #define MASTER_MSK(mask) (((mask) >> 11) & 0x000fe000U)
+ #define PCIA(addr) (((addr) >> 24) & 0x000000ffU)
+ #define TARGET_MSK(mask) (((mask) >> 8) & 0x000fe000U)
+ #define ITA(addr) (((addr) >> 24) & 0x000000ffU)
+ #define PCIIA(addr) (((addr) >> 24) & 0x000000ffU)
+ #define WINEN 0x1000U
+#define PCICONFDREG 0x14
+#define PCICONFAREG 0x18
+#define PCIMAILREG 0x1c
+#define BUSERRADREG 0x24
+ #define EA(reg) ((reg) &0xfffffffc)
+
+#define INTCNTSTAREG 0x28
+ #define MABTCLR 0x80000000U
+ #define TRDYCLR 0x40000000U
+ #define PARCLR 0x20000000U
+ #define MBCLR 0x10000000U
+ #define SERRCLR 0x08000000U
+ #define RTYCLR 0x04000000U
+ #define MABCLR 0x02000000U
+ #define TABCLR 0x01000000U
+ /* RFU */
+ #define MABTMSK 0x00008000U
+ #define TRDYMSK 0x00004000U
+ #define PARMSK 0x00002000U
+ #define MBMSK 0x00001000U
+ #define SERRMSK 0x00000800U
+ #define RTYMSK 0x00000400U
+ #define MABMSK 0x00000200U
+ #define TABMSK 0x00000100U
+ #define IBAMABT 0x00000080U
+ #define TRDYRCH 0x00000040U
+ #define PAR 0x00000020U
+ #define MB 0x00000010U
+ #define PCISERR 0x00000008U
+ #define RTYRCH 0x00000004U
+ #define MABORT 0x00000002U
+ #define TABORT 0x00000001U
+
+#define PCIEXACCREG 0x2c
+ #define UNLOCK 0x2U
+ #define EAREQ 0x1U
+#define PCIRECONTREG 0x30
+ #define RTRYCNT(reg) ((reg) & 0x000000ffU)
+#define PCIENREG 0x34
+ #define PCIU_CONFIG_DONE 0x4U
+#define PCICLKSELREG 0x38
+ #define EQUAL_VTCLOCK 0x2U
+ #define HALF_VTCLOCK 0x0U
+ #define ONE_THIRD_VTCLOCK 0x3U
+ #define QUARTER_VTCLOCK 0x1U
+#define PCITRDYVREG 0x3c
+ #define TRDYV(val) ((uint32_t)(val) & 0xffU)
+#define PCICLKRUNREG 0x60
+
+#define VENDORIDREG 0x100
+#define DEVICEIDREG 0x100
+#define COMMANDREG 0x104
+#define STATUSREG 0x104
+#define REVIDREG 0x108
+#define CLASSREG 0x108
+#define CACHELSREG 0x10c
+#define LATTIMEREG 0x10c
+ #define MLTIM(val) (((uint32_t)(val) << 7) & 0xff00U)
+#define MAILBAREG 0x110
+#define PCIMBA1REG 0x114
+#define PCIMBA2REG 0x118
+ #define MBADD(base) ((base) & 0xfffff800U)
+ #define PMBA(base) ((base) & 0xffe00000U)
+ #define PREF 0x8U
+ #define PREF_APPROVAL 0x8U
+ #define PREF_DISAPPROVAL 0x0U
+ #define TYPE 0x6U
+ #define TYPE_32BITSPACE 0x0U
+ #define MSI 0x1U
+ #define MSI_MEMORY 0x0U
+#define INTLINEREG 0x13c
+#define INTPINREG 0x13c
+#define RETVALREG 0x140
+#define PCIAPCNTREG 0x140
+ #define TKYGNT 0x04000000U
+ #define TKYGNT_ENABLE 0x04000000U
+ #define TKYGNT_DISABLE 0x00000000U
+ #define PAPC 0x03000000U
+ #define PAPC_ALTERNATE_B 0x02000000U
+ #define PAPC_ALTERNATE_0 0x01000000U
+ #define PAPC_FAIR 0x00000000U
+ #define RTYVAL(val) (((uint32_t)(val) << 7) & 0xff00U)
+ #define RTYVAL_MASK 0xff00U
+
+#define PCI_CLOCK_MAX 33333333U
+
+/*
+ * Default setup
+ */
+#define PCI_MASTER_MEM1_BUS_BASE_ADDRESS 0x10000000U
+#define PCI_MASTER_MEM1_ADDRESS_MASK 0x7c000000U
+#define PCI_MASTER_MEM1_PCI_BASE_ADDRESS 0x10000000U
+
+#define PCI_TARGET_MEM1_ADDRESS_MASK 0x08000000U
+#define PCI_TARGET_MEM1_BUS_BASE_ADDRESS 0x00000000U
+
+#define PCI_MASTER_IO_BUS_BASE_ADDRESS 0x16000000U
+#define PCI_MASTER_IO_ADDRESS_MASK 0x7e000000U
+#define PCI_MASTER_IO_PCI_BASE_ADDRESS 0x00000000U
+
+#define PCI_MAILBOX_BASE_ADDRESS 0x00000000U
+
+#define PCI_TARGET_WINDOW1_BASE_ADDRESS 0x00000000U
+
+#define IO_PORT_BASE KSEG1ADDR(PCI_MASTER_IO_BUS_BASE_ADDRESS)
+#define IO_PORT_RESOURCE_START PCI_MASTER_IO_PCI_BASE_ADDRESS
+#define IO_PORT_RESOURCE_END (~PCI_MASTER_IO_ADDRESS_MASK & PCI_MASTER_ADDRESS_MASK)
+
+#define PCI_IO_RESOURCE_START 0x01000000UL
+#define PCI_IO_RESOURCE_END 0x01ffffffUL
+
+#define PCI_MEM_RESOURCE_START 0x11000000UL
+#define PCI_MEM_RESOURCE_END 0x13ffffffUL
+
+#endif /* __PCI_VR41XX_H */
diff --git a/arch/mips/pci/pci-xlp.c b/arch/mips/pci/pci-xlp.c
new file mode 100644
index 000000000..9eff9137f
--- /dev/null
+++ b/arch/mips/pci/pci-xlp.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/msi.h>
+#include <linux/mm.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/console.h>
+
+#include <asm/io.h>
+
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/mips-extns.h>
+
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#include <asm/netlogic/xlp-hal/pcibus.h>
+#include <asm/netlogic/xlp-hal/bridge.h>
+
+static void *pci_config_base;
+
+#define pci_cfg_addr(bus, devfn, off) (((bus) << 20) | ((devfn) << 12) | (off))
+
+/* PCI ops */
+static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ u32 data;
+ u32 *cfgaddr;
+
+ where &= ~3;
+ if (cpu_is_xlp9xx()) {
+ /* be very careful on SoC buses */
+ if (bus->number == 0) {
+ /* Scan only existing nodes - uboot bug? */
+ if (PCI_SLOT(devfn) != 0 ||
+ !nlm_node_present(PCI_FUNC(devfn)))
+ return 0xffffffff;
+ } else if (bus->parent->number == 0) { /* SoC bus */
+ if (PCI_SLOT(devfn) == 0) /* b.0.0 hangs */
+ return 0xffffffff;
+ if (devfn == 44) /* b.5.4 hangs */
+ return 0xffffffff;
+ }
+ } else if (bus->number == 0 && PCI_SLOT(devfn) == 1 && where == 0x954) {
+ return 0xffffffff;
+ }
+ cfgaddr = (u32 *)(pci_config_base +
+ pci_cfg_addr(bus->number, devfn, where));
+ data = *cfgaddr;
+ return data;
+}
+
+static inline void pci_cfg_write_32bit(struct pci_bus *bus, unsigned int devfn,
+ int where, u32 data)
+{
+ u32 *cfgaddr;
+
+ cfgaddr = (u32 *)(pci_config_base +
+ pci_cfg_addr(bus->number, devfn, where & ~3));
+ *cfgaddr = data;
+}
+
+static int nlm_pcibios_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ u32 data;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ data = pci_cfg_read_32bit(bus, devfn, where);
+
+ if (size == 1)
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ else if (size == 2)
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ else
+ *val = data;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+
+static int nlm_pcibios_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ u32 data;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ data = pci_cfg_read_32bit(bus, devfn, where);
+
+ if (size == 1)
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else if (size == 2)
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else
+ data = val;
+
+ pci_cfg_write_32bit(bus, devfn, where, data);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops nlm_pci_ops = {
+ .read = nlm_pcibios_read,
+ .write = nlm_pcibios_write
+};
+
+static struct resource nlm_pci_mem_resource = {
+ .name = "XLP PCI MEM",
+ .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */
+ .end = 0xdfffffffUL,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource nlm_pci_io_resource = {
+ .name = "XLP IO MEM",
+ .start = 0x14000000UL, /* 64MB PCI IO @ 0x1000_0000 */
+ .end = 0x17ffffffUL,
+ .flags = IORESOURCE_IO,
+};
+
+struct pci_controller nlm_pci_controller = {
+ .index = 0,
+ .pci_ops = &nlm_pci_ops,
+ .mem_resource = &nlm_pci_mem_resource,
+ .mem_offset = 0x00000000UL,
+ .io_resource = &nlm_pci_io_resource,
+ .io_offset = 0x00000000UL,
+};
+
+struct pci_dev *xlp_get_pcie_link(const struct pci_dev *dev)
+{
+ struct pci_bus *bus, *p;
+
+ bus = dev->bus;
+
+ if (cpu_is_xlp9xx()) {
+ /* find bus with grand parent number == 0 */
+ for (p = bus->parent; p && p->parent && p->parent->number != 0;
+ p = p->parent)
+ bus = p;
+ return (p && p->parent) ? bus->self : NULL;
+ } else {
+ /* Find the bridge on bus 0 */
+ for (p = bus->parent; p && p->number != 0; p = p->parent)
+ bus = p;
+
+ return p ? bus->self : NULL;
+ }
+}
+
+int xlp_socdev_to_node(const struct pci_dev *lnkdev)
+{
+ if (cpu_is_xlp9xx())
+ return PCI_FUNC(lnkdev->bus->self->devfn);
+ else
+ return PCI_SLOT(lnkdev->devfn) / 8;
+}
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct pci_dev *lnkdev;
+ int lnkfunc, node;
+
+ /*
+ * For XLP PCIe, there is an IRQ per Link, find out which
+ * link the device is on to assign interrupts
+ */
+ lnkdev = xlp_get_pcie_link(dev);
+ if (lnkdev == NULL)
+ return 0;
+
+ lnkfunc = PCI_FUNC(lnkdev->devfn);
+ node = xlp_socdev_to_node(lnkdev);
+
+ return nlm_irq_to_xirq(node, PIC_PCIE_LINK_LEGACY_IRQ(lnkfunc));
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
+
+/*
+ * If big-endian, enable hardware byteswap on the PCIe bridges.
+ * This will make both the SoC and PCIe devices behave consistently with
+ * readl/writel.
+ */
+#ifdef __BIG_ENDIAN
+static void xlp_config_pci_bswap(int node, int link)
+{
+ uint64_t nbubase, lnkbase;
+ u32 reg;
+
+ nbubase = nlm_get_bridge_regbase(node);
+ lnkbase = nlm_get_pcie_base(node, link);
+
+ /*
+ * Enable byte swap in hardware. Program each link's PCIe SWAP regions
+ * from the link's address ranges.
+ */
+ if (cpu_is_xlp9xx()) {
+ reg = nlm_read_bridge_reg(nbubase,
+ BRIDGE_9XX_PCIEMEM_BASE0 + link);
+ nlm_write_pci_reg(lnkbase, PCIE_9XX_BYTE_SWAP_MEM_BASE, reg);
+
+ reg = nlm_read_bridge_reg(nbubase,
+ BRIDGE_9XX_PCIEMEM_LIMIT0 + link);
+ nlm_write_pci_reg(lnkbase,
+ PCIE_9XX_BYTE_SWAP_MEM_LIM, reg | 0xfff);
+
+ reg = nlm_read_bridge_reg(nbubase,
+ BRIDGE_9XX_PCIEIO_BASE0 + link);
+ nlm_write_pci_reg(lnkbase, PCIE_9XX_BYTE_SWAP_IO_BASE, reg);
+
+ reg = nlm_read_bridge_reg(nbubase,
+ BRIDGE_9XX_PCIEIO_LIMIT0 + link);
+ nlm_write_pci_reg(lnkbase,
+ PCIE_9XX_BYTE_SWAP_IO_LIM, reg | 0xfff);
+ } else {
+ reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEMEM_BASE0 + link);
+ nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_MEM_BASE, reg);
+
+ reg = nlm_read_bridge_reg(nbubase,
+ BRIDGE_PCIEMEM_LIMIT0 + link);
+ nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_MEM_LIM, reg | 0xfff);
+
+ reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEIO_BASE0 + link);
+ nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_IO_BASE, reg);
+
+ reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEIO_LIMIT0 + link);
+ nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_IO_LIM, reg | 0xfff);
+ }
+}
+#else
+/* Swap configuration not needed in little-endian mode */
+static inline void xlp_config_pci_bswap(int node, int link) {}
+#endif /* __BIG_ENDIAN */
+
+static int __init pcibios_init(void)
+{
+ uint64_t pciebase;
+ int link, n;
+ u32 reg;
+
+ /* Firmware assigns PCI resources */
+ pci_set_flags(PCI_PROBE_ONLY);
+ pci_config_base = ioremap(XLP_DEFAULT_PCI_ECFG_BASE, 64 << 20);
+
+ /* Extend IO port for memory mapped io */
+ ioport_resource.start = 0;
+ ioport_resource.end = ~0;
+
+ for (n = 0; n < NLM_NR_NODES; n++) {
+ if (!nlm_node_present(n))
+ continue;
+
+ for (link = 0; link < PCIE_NLINKS; link++) {
+ pciebase = nlm_get_pcie_base(n, link);
+ if (nlm_read_pci_reg(pciebase, 0) == 0xffffffff)
+ continue;
+ xlp_config_pci_bswap(n, link);
+ xlp_init_node_msi_irqs(n, link);
+
+ /* put in intpin and irq - u-boot does not */
+ reg = nlm_read_pci_reg(pciebase, 0xf);
+ reg &= ~0x1ffu;
+ reg |= (1 << 8) | PIC_PCIE_LINK_LEGACY_IRQ(link);
+ nlm_write_pci_reg(pciebase, 0xf, reg);
+ pr_info("XLP PCIe: Link %d-%d initialized.\n", n, link);
+ }
+ }
+
+ set_io_port_base(CKSEG1);
+ nlm_pci_controller.io_map_base = CKSEG1;
+
+ register_pci_controller(&nlm_pci_controller);
+ pr_info("XLP PCIe Controller %pR%pR.\n", &nlm_pci_io_resource,
+ &nlm_pci_mem_resource);
+
+ return 0;
+}
+arch_initcall(pcibios_init);
diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c
new file mode 100644
index 000000000..2a1c81a12
--- /dev/null
+++ b/arch/mips/pci/pci-xlr.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/msi.h>
+#include <linux/mm.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/console.h>
+#include <linux/pci_regs.h>
+
+#include <asm/io.h>
+
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+
+#include <asm/netlogic/xlr/msidef.h>
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/xlr.h>
+
+static void *pci_config_base;
+
+#define pci_cfg_addr(bus, devfn, off) (((bus) << 16) | ((devfn) << 8) | (off))
+
+/* PCI ops */
+static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ u32 data;
+ u32 *cfgaddr;
+
+ cfgaddr = (u32 *)(pci_config_base +
+ pci_cfg_addr(bus->number, devfn, where & ~3));
+ data = *cfgaddr;
+ return cpu_to_le32(data);
+}
+
+static inline void pci_cfg_write_32bit(struct pci_bus *bus, unsigned int devfn,
+ int where, u32 data)
+{
+ u32 *cfgaddr;
+
+ cfgaddr = (u32 *)(pci_config_base +
+ pci_cfg_addr(bus->number, devfn, where & ~3));
+ *cfgaddr = cpu_to_le32(data);
+}
+
+static int nlm_pcibios_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ u32 data;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ data = pci_cfg_read_32bit(bus, devfn, where);
+
+ if (size == 1)
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ else if (size == 2)
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ else
+ *val = data;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+
+static int nlm_pcibios_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ u32 data;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ data = pci_cfg_read_32bit(bus, devfn, where);
+
+ if (size == 1)
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else if (size == 2)
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else
+ data = val;
+
+ pci_cfg_write_32bit(bus, devfn, where, data);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops nlm_pci_ops = {
+ .read = nlm_pcibios_read,
+ .write = nlm_pcibios_write
+};
+
+static struct resource nlm_pci_mem_resource = {
+ .name = "XLR PCI MEM",
+ .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */
+ .end = 0xdfffffffUL,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource nlm_pci_io_resource = {
+ .name = "XLR IO MEM",
+ .start = 0x10000000UL, /* 16MB PCI IO @ 0x1000_0000 */
+ .end = 0x100fffffUL,
+ .flags = IORESOURCE_IO,
+};
+
+struct pci_controller nlm_pci_controller = {
+ .index = 0,
+ .pci_ops = &nlm_pci_ops,
+ .mem_resource = &nlm_pci_mem_resource,
+ .mem_offset = 0x00000000UL,
+ .io_resource = &nlm_pci_io_resource,
+ .io_offset = 0x00000000UL,
+};
+
+/*
+ * The top level PCIe links on the XLS PCIe controller appear as
+ * bridges. Given a device, this function finds which link it is
+ * on.
+ */
+static struct pci_dev *xls_get_pcie_link(const struct pci_dev *dev)
+{
+ struct pci_bus *bus, *p;
+
+ /* Find the bridge on bus 0 */
+ bus = dev->bus;
+ for (p = bus->parent; p && p->number != 0; p = p->parent)
+ bus = p;
+
+ return p ? bus->self : NULL;
+}
+
+static int nlm_pci_link_to_irq(int link)
+{
+ switch (link) {
+ case 0:
+ return PIC_PCIE_LINK0_IRQ;
+ case 1:
+ return PIC_PCIE_LINK1_IRQ;
+ case 2:
+ if (nlm_chip_is_xls_b())
+ return PIC_PCIE_XLSB0_LINK2_IRQ;
+ else
+ return PIC_PCIE_LINK2_IRQ;
+ case 3:
+ if (nlm_chip_is_xls_b())
+ return PIC_PCIE_XLSB0_LINK3_IRQ;
+ else
+ return PIC_PCIE_LINK3_IRQ;
+ }
+ WARN(1, "Unexpected link %d\n", link);
+ return 0;
+}
+
+static int get_irq_vector(const struct pci_dev *dev)
+{
+ struct pci_dev *lnk;
+ int link;
+
+ if (!nlm_chip_is_xls())
+ return PIC_PCIX_IRQ; /* for XLR just one IRQ */
+
+ lnk = xls_get_pcie_link(dev);
+ if (lnk == NULL)
+ return 0;
+
+ link = PCI_SLOT(lnk->devfn);
+ return nlm_pci_link_to_irq(link);
+}
+
+#ifdef CONFIG_PCI_MSI
+void arch_teardown_msi_irq(unsigned int irq)
+{
+}
+
+int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
+{
+ struct msi_msg msg;
+ struct pci_dev *lnk;
+ int irq, ret;
+ u16 val;
+
+ /* MSI not supported on XLR */
+ if (!nlm_chip_is_xls())
+ return 1;
+
+ /*
+ * Enable MSI on the XLS PCIe controller bridge which was disabled
+ * at enumeration, the bridge MSI capability is at 0x50
+ */
+ lnk = xls_get_pcie_link(dev);
+ if (lnk == NULL)
+ return 1;
+
+ pci_read_config_word(lnk, 0x50 + PCI_MSI_FLAGS, &val);
+ if ((val & PCI_MSI_FLAGS_ENABLE) == 0) {
+ val |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(lnk, 0x50 + PCI_MSI_FLAGS, val);
+ }
+
+ irq = get_irq_vector(dev);
+ if (irq <= 0)
+ return 1;
+
+ msg.address_hi = MSI_ADDR_BASE_HI;
+ msg.address_lo = MSI_ADDR_BASE_LO |
+ MSI_ADDR_DEST_MODE_PHYSICAL |
+ MSI_ADDR_REDIRECTION_CPU;
+
+ msg.data = MSI_DATA_TRIGGER_EDGE |
+ MSI_DATA_LEVEL_ASSERT |
+ MSI_DATA_DELIVERY_FIXED;
+
+ ret = irq_set_msi_desc(irq, desc);
+ if (ret < 0)
+ return ret;
+
+ pci_write_msi_msg(irq, &msg);
+ return 0;
+}
+#endif
+
+/* Extra ACK needed for XLR on chip PCI controller */
+static void xlr_pci_ack(struct irq_data *d)
+{
+ uint64_t pcibase = nlm_mmio_base(NETLOGIC_IO_PCIX_OFFSET);
+
+ nlm_read_reg(pcibase, (0x140 >> 2));
+}
+
+/* Extra ACK needed for XLS on chip PCIe controller */
+static void xls_pcie_ack(struct irq_data *d)
+{
+ uint64_t pciebase_le = nlm_mmio_base(NETLOGIC_IO_PCIE_1_OFFSET);
+
+ switch (d->irq) {
+ case PIC_PCIE_LINK0_IRQ:
+ nlm_write_reg(pciebase_le, (0x90 >> 2), 0xffffffff);
+ break;
+ case PIC_PCIE_LINK1_IRQ:
+ nlm_write_reg(pciebase_le, (0x94 >> 2), 0xffffffff);
+ break;
+ case PIC_PCIE_LINK2_IRQ:
+ nlm_write_reg(pciebase_le, (0x190 >> 2), 0xffffffff);
+ break;
+ case PIC_PCIE_LINK3_IRQ:
+ nlm_write_reg(pciebase_le, (0x194 >> 2), 0xffffffff);
+ break;
+ }
+}
+
+/* For XLS B silicon, the 3,4 PCI interrupts are different */
+static void xls_pcie_ack_b(struct irq_data *d)
+{
+ uint64_t pciebase_le = nlm_mmio_base(NETLOGIC_IO_PCIE_1_OFFSET);
+
+ switch (d->irq) {
+ case PIC_PCIE_LINK0_IRQ:
+ nlm_write_reg(pciebase_le, (0x90 >> 2), 0xffffffff);
+ break;
+ case PIC_PCIE_LINK1_IRQ:
+ nlm_write_reg(pciebase_le, (0x94 >> 2), 0xffffffff);
+ break;
+ case PIC_PCIE_XLSB0_LINK2_IRQ:
+ nlm_write_reg(pciebase_le, (0x190 >> 2), 0xffffffff);
+ break;
+ case PIC_PCIE_XLSB0_LINK3_IRQ:
+ nlm_write_reg(pciebase_le, (0x194 >> 2), 0xffffffff);
+ break;
+ }
+}
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return get_irq_vector(dev);
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
+
+static int __init pcibios_init(void)
+{
+ void (*extra_ack)(struct irq_data *);
+ int link, irq;
+
+ /* PSB assigns PCI resources */
+ pci_set_flags(PCI_PROBE_ONLY);
+ pci_config_base = ioremap(DEFAULT_PCI_CONFIG_BASE, 16 << 20);
+
+ /* Extend IO port for memory mapped io */
+ ioport_resource.start = 0;
+ ioport_resource.end = ~0;
+
+ set_io_port_base(CKSEG1);
+ nlm_pci_controller.io_map_base = CKSEG1;
+
+ pr_info("Registering XLR/XLS PCIX/PCIE Controller.\n");
+ register_pci_controller(&nlm_pci_controller);
+
+ /*
+ * For PCI interrupts, we need to ack the PCI controller too, overload
+ * irq handler data to do this
+ */
+ if (!nlm_chip_is_xls()) {
+ /* XLR PCI controller ACK */
+ nlm_set_pic_extra_ack(0, PIC_PCIX_IRQ, xlr_pci_ack);
+ } else {
+ if (nlm_chip_is_xls_b())
+ extra_ack = xls_pcie_ack_b;
+ else
+ extra_ack = xls_pcie_ack;
+ for (link = 0; link < 4; link++) {
+ irq = nlm_pci_link_to_irq(link);
+ nlm_set_pic_extra_ack(0, irq, extra_ack);
+ }
+ }
+ return 0;
+}
+
+arch_initcall(pcibios_init);
diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c
new file mode 100644
index 000000000..50f7d42cc
--- /dev/null
+++ b/arch/mips/pci/pci-xtalk-bridge.c
@@ -0,0 +1,759 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2003 Christoph Hellwig (hch@lst.de)
+ * Copyright (C) 1999, 2000, 04 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/smp.h>
+#include <linux/dma-direct.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/xtalk-bridge.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/crc16.h>
+
+#include <asm/pci/bridge.h>
+#include <asm/paccess.h>
+#include <asm/sn/irq_alloc.h>
+#include <asm/sn/ioc3.h>
+
+#define CRC16_INIT 0
+#define CRC16_VALID 0xb001
+
+/*
+ * Common phys<->dma mapping for platforms using pci xtalk bridge
+ */
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus);
+
+ return bc->baddr + paddr;
+}
+
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr & ~(0xffUL << 56);
+}
+
+/*
+ * Most of the IOC3 PCI config register aren't present
+ * we emulate what is needed for a normal PCI enumeration
+ */
+static int ioc3_cfg_rd(void *addr, int where, int size, u32 *value, u32 sid)
+{
+ u32 cf, shift, mask;
+
+ switch (where & ~3) {
+ case 0x00 ... 0x10:
+ case 0x40 ... 0x44:
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ break;
+ case 0x2c:
+ cf = sid;
+ break;
+ case 0x3c:
+ /* emulate sane interrupt pin value */
+ cf = 0x00000100;
+ break;
+ default:
+ cf = 0;
+ break;
+ }
+ shift = (where & 3) << 3;
+ mask = 0xffffffffU >> ((4 - size) << 3);
+ *value = (cf >> shift) & mask;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int ioc3_cfg_wr(void *addr, int where, int size, u32 value)
+{
+ u32 cf, shift, mask, smask;
+
+ if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
+ return PCIBIOS_SUCCESSFUL;
+
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ shift = ((where & 3) << 3);
+ mask = (0xffffffffU >> ((4 - size) << 3));
+ smask = mask << shift;
+
+ cf = (cf & ~smask) | ((value & mask) << shift);
+ if (put_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static void bridge_disable_swapping(struct pci_dev *dev)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
+ int slot = PCI_SLOT(dev->devfn);
+
+ /* Turn off byte swapping */
+ bridge_clr(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR);
+ bridge_read(bc, b_widget.w_tflush); /* Flush */
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3,
+ bridge_disable_swapping);
+
+
+/*
+ * The Bridge ASIC supports both type 0 and type 1 access. Type 1 is
+ * not really documented, so right now I can't write code which uses it.
+ * Therefore we use type 0 accesses for now even though they won't work
+ * correctly for PCI-to-PCI bridges.
+ *
+ * The function is complicated by the ultimate brokenness of the IOC3 chip
+ * which is used in SGI systems. The IOC3 can only handle 32-bit PCI
+ * accesses and does only decode parts of it's address space.
+ */
+static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+ struct bridge_regs *bridge = bc->base;
+ int slot = PCI_SLOT(devfn);
+ int fn = PCI_FUNC(devfn);
+ void *addr;
+ u32 cf;
+ int res;
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * IOC3 is broken beyond belief ... Don't even give the
+ * generic PCI code a chance to look at it for real ...
+ */
+ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) {
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
+ return ioc3_cfg_rd(addr, where, size, value,
+ bc->ioc3_sid[slot]);
+ }
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
+
+ if (size == 1)
+ res = get_dbe(*value, (u8 *)addr);
+ else if (size == 2)
+ res = get_dbe(*value, (u16 *)addr);
+ else
+ res = get_dbe(*value, (u32 *)addr);
+
+ return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+ struct bridge_regs *bridge = bc->base;
+ int busno = bus->number;
+ int slot = PCI_SLOT(devfn);
+ int fn = PCI_FUNC(devfn);
+ void *addr;
+ u32 cf;
+ int res;
+
+ bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
+ addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * IOC3 is broken beyond belief ... Don't even give the
+ * generic PCI code a chance to look at it for real ...
+ */
+ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) {
+ addr = &bridge->b_type1_cfg.c[(fn << 8) | (where & ~3)];
+ return ioc3_cfg_rd(addr, where, size, value,
+ bc->ioc3_sid[slot]);
+ }
+
+ addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
+
+ if (size == 1)
+ res = get_dbe(*value, (u8 *)addr);
+ else if (size == 2)
+ res = get_dbe(*value, (u16 *)addr);
+ else
+ res = get_dbe(*value, (u32 *)addr);
+
+ return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ if (!pci_is_root_bus(bus))
+ return pci_conf1_read_config(bus, devfn, where, size, value);
+
+ return pci_conf0_read_config(bus, devfn, where, size, value);
+}
+
+static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+ struct bridge_regs *bridge = bc->base;
+ int slot = PCI_SLOT(devfn);
+ int fn = PCI_FUNC(devfn);
+ void *addr;
+ u32 cf;
+ int res;
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * IOC3 is broken beyond belief ... Don't even give the
+ * generic PCI code a chance to look at it for real ...
+ */
+ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) {
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
+ return ioc3_cfg_wr(addr, where, size, value);
+ }
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
+
+ if (size == 1)
+ res = put_dbe(value, (u8 *)addr);
+ else if (size == 2)
+ res = put_dbe(value, (u16 *)addr);
+ else
+ res = put_dbe(value, (u32 *)addr);
+
+ if (res)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+ struct bridge_regs *bridge = bc->base;
+ int slot = PCI_SLOT(devfn);
+ int fn = PCI_FUNC(devfn);
+ int busno = bus->number;
+ void *addr;
+ u32 cf;
+ int res;
+
+ bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
+ addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * IOC3 is broken beyond belief ... Don't even give the
+ * generic PCI code a chance to look at it for real ...
+ */
+ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) {
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
+ return ioc3_cfg_wr(addr, where, size, value);
+ }
+
+ addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
+
+ if (size == 1)
+ res = put_dbe(value, (u8 *)addr);
+ else if (size == 2)
+ res = put_dbe(value, (u16 *)addr);
+ else
+ res = put_dbe(value, (u32 *)addr);
+
+ if (res)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ if (!pci_is_root_bus(bus))
+ return pci_conf1_write_config(bus, devfn, where, size, value);
+
+ return pci_conf0_write_config(bus, devfn, where, size, value);
+}
+
+static struct pci_ops bridge_pci_ops = {
+ .read = pci_read_config,
+ .write = pci_write_config,
+};
+
+struct bridge_irq_chip_data {
+ struct bridge_controller *bc;
+ nasid_t nasid;
+};
+
+static int bridge_set_affinity(struct irq_data *d, const struct cpumask *mask,
+ bool force)
+{
+#ifdef CONFIG_NUMA
+ struct bridge_irq_chip_data *data = d->chip_data;
+ int bit = d->parent_data->hwirq;
+ int pin = d->hwirq;
+ int ret, cpu;
+
+ ret = irq_chip_set_affinity_parent(d, mask, force);
+ if (ret >= 0) {
+ cpu = cpumask_first_and(mask, cpu_online_mask);
+ data->nasid = cpu_to_node(cpu);
+ bridge_write(data->bc, b_int_addr[pin].addr,
+ (((data->bc->intr_addr >> 30) & 0x30000) |
+ bit | (data->nasid << 8)));
+ bridge_read(data->bc, b_wid_tflush);
+ }
+ return ret;
+#else
+ return irq_chip_set_affinity_parent(d, mask, force);
+#endif
+}
+
+struct irq_chip bridge_irq_chip = {
+ .name = "BRIDGE",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_affinity = bridge_set_affinity
+};
+
+static int bridge_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct bridge_irq_chip_data *data;
+ struct irq_alloc_info *info = arg;
+ int ret;
+
+ if (nr_irqs > 1 || !info)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+ if (ret >= 0) {
+ data->bc = info->ctrl;
+ data->nasid = info->nasid;
+ irq_domain_set_info(domain, virq, info->pin, &bridge_irq_chip,
+ data, handle_level_irq, NULL, NULL);
+ } else {
+ kfree(data);
+ }
+
+ return ret;
+}
+
+static void bridge_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
+
+ if (nr_irqs)
+ return;
+
+ kfree(irqd->chip_data);
+ irq_domain_free_irqs_top(domain, virq, nr_irqs);
+}
+
+static int bridge_domain_activate(struct irq_domain *domain,
+ struct irq_data *irqd, bool reserve)
+{
+ struct bridge_irq_chip_data *data = irqd->chip_data;
+ struct bridge_controller *bc = data->bc;
+ int bit = irqd->parent_data->hwirq;
+ int pin = irqd->hwirq;
+ u32 device;
+
+ bridge_write(bc, b_int_addr[pin].addr,
+ (((bc->intr_addr >> 30) & 0x30000) |
+ bit | (data->nasid << 8)));
+ bridge_set(bc, b_int_enable, (1 << pin));
+ bridge_set(bc, b_int_enable, 0x7ffffe00); /* more stuff in int_enable */
+
+ /*
+ * Enable sending of an interrupt clear packt to the hub on a high to
+ * low transition of the interrupt pin.
+ *
+ * IRIX sets additional bits in the address which are documented as
+ * reserved in the bridge docs.
+ */
+ bridge_set(bc, b_int_mode, (1UL << pin));
+
+ /*
+ * We assume the bridge to have a 1:1 mapping between devices
+ * (slots) and intr pins.
+ */
+ device = bridge_read(bc, b_int_device);
+ device &= ~(7 << (pin*3));
+ device |= (pin << (pin*3));
+ bridge_write(bc, b_int_device, device);
+
+ bridge_read(bc, b_wid_tflush);
+ return 0;
+}
+
+static void bridge_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *irqd)
+{
+ struct bridge_irq_chip_data *data = irqd->chip_data;
+
+ bridge_clr(data->bc, b_int_enable, (1 << irqd->hwirq));
+ bridge_read(data->bc, b_wid_tflush);
+}
+
+static const struct irq_domain_ops bridge_domain_ops = {
+ .alloc = bridge_domain_alloc,
+ .free = bridge_domain_free,
+ .activate = bridge_domain_activate,
+ .deactivate = bridge_domain_deactivate
+};
+
+/*
+ * All observed requests have pin == 1. We could have a global here, that
+ * gets incremented and returned every time - unfortunately, pci_map_irq
+ * may be called on the same device over and over, and need to return the
+ * same value. On O2000, pin can be 0 or 1, and PCI slots can be [0..7].
+ *
+ * A given PCI device, in general, should be able to intr any of the cpus
+ * on any one of the hubs connected to its xbow.
+ */
+static int bridge_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
+ struct irq_alloc_info info;
+ int irq;
+
+ switch (pin) {
+ case PCI_INTERRUPT_UNKNOWN:
+ case PCI_INTERRUPT_INTA:
+ case PCI_INTERRUPT_INTC:
+ pin = 0;
+ break;
+ case PCI_INTERRUPT_INTB:
+ case PCI_INTERRUPT_INTD:
+ pin = 1;
+ }
+
+ irq = bc->pci_int[slot][pin];
+ if (irq == -1) {
+ info.ctrl = bc;
+ info.nasid = bc->nasid;
+ info.pin = bc->int_mapping[slot][pin];
+
+ irq = irq_domain_alloc_irqs(bc->domain, 1, bc->nasid, &info);
+ if (irq < 0)
+ return irq;
+
+ bc->pci_int[slot][pin] = irq;
+ }
+ return irq;
+}
+
+#define IOC3_SID(sid) (PCI_VENDOR_ID_SGI | ((sid) << 16))
+
+static void bridge_setup_ip27_baseio6g(struct bridge_controller *bc)
+{
+ bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP27_BASEIO6G);
+ bc->ioc3_sid[6] = IOC3_SID(IOC3_SUBSYS_IP27_MIO);
+ bc->int_mapping[2][1] = 4;
+ bc->int_mapping[6][1] = 6;
+}
+
+static void bridge_setup_ip27_baseio(struct bridge_controller *bc)
+{
+ bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP27_BASEIO);
+ bc->int_mapping[2][1] = 4;
+}
+
+static void bridge_setup_ip29_baseio(struct bridge_controller *bc)
+{
+ bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP29_SYSBOARD);
+ bc->int_mapping[2][1] = 3;
+}
+
+static void bridge_setup_ip30_sysboard(struct bridge_controller *bc)
+{
+ bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_IP30_SYSBOARD);
+ bc->int_mapping[2][1] = 4;
+}
+
+static void bridge_setup_menet(struct bridge_controller *bc)
+{
+ bc->ioc3_sid[0] = IOC3_SID(IOC3_SUBSYS_MENET);
+ bc->ioc3_sid[1] = IOC3_SID(IOC3_SUBSYS_MENET);
+ bc->ioc3_sid[2] = IOC3_SID(IOC3_SUBSYS_MENET);
+ bc->ioc3_sid[3] = IOC3_SID(IOC3_SUBSYS_MENET4);
+}
+
+static void bridge_setup_io7(struct bridge_controller *bc)
+{
+ bc->ioc3_sid[4] = IOC3_SID(IOC3_SUBSYS_IO7);
+}
+
+static void bridge_setup_io8(struct bridge_controller *bc)
+{
+ bc->ioc3_sid[4] = IOC3_SID(IOC3_SUBSYS_IO8);
+}
+
+static void bridge_setup_io9(struct bridge_controller *bc)
+{
+ bc->ioc3_sid[1] = IOC3_SID(IOC3_SUBSYS_IO9);
+}
+
+static void bridge_setup_ip34_fuel_sysboard(struct bridge_controller *bc)
+{
+ bc->ioc3_sid[4] = IOC3_SID(IOC3_SUBSYS_IP34_SYSBOARD);
+}
+
+#define BRIDGE_BOARD_SETUP(_partno, _setup) \
+ { .match = _partno, .setup = _setup }
+
+static const struct {
+ char *match;
+ void (*setup)(struct bridge_controller *bc);
+} bridge_ioc3_devid[] = {
+ BRIDGE_BOARD_SETUP("030-0734-", bridge_setup_ip27_baseio6g),
+ BRIDGE_BOARD_SETUP("030-0880-", bridge_setup_ip27_baseio6g),
+ BRIDGE_BOARD_SETUP("030-1023-", bridge_setup_ip27_baseio),
+ BRIDGE_BOARD_SETUP("030-1124-", bridge_setup_ip27_baseio),
+ BRIDGE_BOARD_SETUP("030-1025-", bridge_setup_ip29_baseio),
+ BRIDGE_BOARD_SETUP("030-1244-", bridge_setup_ip29_baseio),
+ BRIDGE_BOARD_SETUP("030-1389-", bridge_setup_ip29_baseio),
+ BRIDGE_BOARD_SETUP("030-0887-", bridge_setup_ip30_sysboard),
+ BRIDGE_BOARD_SETUP("030-1467-", bridge_setup_ip30_sysboard),
+ BRIDGE_BOARD_SETUP("030-0873-", bridge_setup_menet),
+ BRIDGE_BOARD_SETUP("030-1557-", bridge_setup_io7),
+ BRIDGE_BOARD_SETUP("030-1673-", bridge_setup_io8),
+ BRIDGE_BOARD_SETUP("030-1771-", bridge_setup_io9),
+ BRIDGE_BOARD_SETUP("030-1707-", bridge_setup_ip34_fuel_sysboard),
+};
+
+static void bridge_setup_board(struct bridge_controller *bc, char *partnum)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bridge_ioc3_devid); i++)
+ if (!strncmp(partnum, bridge_ioc3_devid[i].match,
+ strlen(bridge_ioc3_devid[i].match))) {
+ bridge_ioc3_devid[i].setup(bc);
+ }
+}
+
+static int bridge_nvmem_match(struct device *dev, const void *data)
+{
+ const char *name = dev_name(dev);
+ const char *prefix = data;
+
+ if (strlen(name) < strlen(prefix))
+ return 0;
+
+ return memcmp(prefix, dev_name(dev), strlen(prefix)) == 0;
+}
+
+static int bridge_get_partnum(u64 baddr, char *partnum)
+{
+ struct nvmem_device *nvmem;
+ char prefix[24];
+ u8 prom[64];
+ int i, j;
+ int ret;
+
+ snprintf(prefix, sizeof(prefix), "bridge-%012llx-0b-", baddr);
+
+ nvmem = nvmem_device_find(prefix, bridge_nvmem_match);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ ret = nvmem_device_read(nvmem, 0, 64, prom);
+ nvmem_device_put(nvmem);
+
+ if (ret != 64)
+ return ret;
+
+ if (crc16(CRC16_INIT, prom, 32) != CRC16_VALID ||
+ crc16(CRC16_INIT, prom + 32, 32) != CRC16_VALID)
+ return -EINVAL;
+
+ /* Assemble part number */
+ j = 0;
+ for (i = 0; i < 19; i++)
+ if (prom[i + 11] != ' ')
+ partnum[j++] = prom[i + 11];
+
+ for (i = 0; i < 6; i++)
+ if (prom[i + 32] != ' ')
+ partnum[j++] = prom[i + 32];
+
+ partnum[j] = 0;
+
+ return 0;
+}
+
+static int bridge_probe(struct platform_device *pdev)
+{
+ struct xtalk_bridge_platform_data *bd = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct bridge_controller *bc;
+ struct pci_host_bridge *host;
+ struct irq_domain *domain, *parent;
+ struct fwnode_handle *fn;
+ char partnum[26];
+ int slot;
+ int err;
+
+ /* get part number from one wire prom */
+ if (bridge_get_partnum(virt_to_phys((void *)bd->bridge_addr), partnum))
+ return -EPROBE_DEFER; /* not available yet */
+
+ parent = irq_get_default_host();
+ if (!parent)
+ return -ENODEV;
+ fn = irq_domain_alloc_named_fwnode("BRIDGE");
+ if (!fn)
+ return -ENOMEM;
+ domain = irq_domain_create_hierarchy(parent, 0, 8, fn,
+ &bridge_domain_ops, NULL);
+ if (!domain) {
+ irq_domain_free_fwnode(fn);
+ return -ENOMEM;
+ }
+
+ pci_set_flags(PCI_PROBE_ONLY);
+
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*bc));
+ if (!host) {
+ err = -ENOMEM;
+ goto err_remove_domain;
+ }
+
+ bc = pci_host_bridge_priv(host);
+
+ bc->busn.name = "Bridge PCI busn";
+ bc->busn.start = 0;
+ bc->busn.end = 0xff;
+ bc->busn.flags = IORESOURCE_BUS;
+
+ bc->domain = domain;
+
+ pci_add_resource_offset(&host->windows, &bd->mem, bd->mem_offset);
+ pci_add_resource_offset(&host->windows, &bd->io, bd->io_offset);
+ pci_add_resource(&host->windows, &bc->busn);
+
+ err = devm_request_pci_bus_resources(dev, &host->windows);
+ if (err < 0)
+ goto err_free_resource;
+
+ bc->nasid = bd->nasid;
+
+ bc->baddr = (u64)bd->masterwid << 60 | PCI64_ATTR_BAR;
+ bc->base = (struct bridge_regs *)bd->bridge_addr;
+ bc->intr_addr = bd->intr_addr;
+
+ /*
+ * Clear all pending interrupts.
+ */
+ bridge_write(bc, b_int_rst_stat, BRIDGE_IRR_ALL_CLR);
+
+ /*
+ * Until otherwise set up, assume all interrupts are from slot 0
+ */
+ bridge_write(bc, b_int_device, 0x0);
+
+ /*
+ * disable swapping for big windows
+ */
+ bridge_clr(bc, b_wid_control,
+ BRIDGE_CTRL_IO_SWAP | BRIDGE_CTRL_MEM_SWAP);
+#ifdef CONFIG_PAGE_SIZE_4KB
+ bridge_clr(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
+#else /* 16kB or larger */
+ bridge_set(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
+#endif
+
+ /*
+ * Hmm... IRIX sets additional bits in the address which
+ * are documented as reserved in the bridge docs.
+ */
+ bridge_write(bc, b_wid_int_upper,
+ ((bc->intr_addr >> 32) & 0xffff) | (bd->masterwid << 16));
+ bridge_write(bc, b_wid_int_lower, bc->intr_addr & 0xffffffff);
+ bridge_write(bc, b_dir_map, (bd->masterwid << 20)); /* DMA */
+ bridge_write(bc, b_int_enable, 0);
+
+ for (slot = 0; slot < 8; slot++) {
+ bridge_set(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR);
+ bc->pci_int[slot][0] = -1;
+ bc->pci_int[slot][1] = -1;
+ /* default interrupt pin mapping */
+ bc->int_mapping[slot][0] = slot;
+ bc->int_mapping[slot][1] = slot ^ 4;
+ }
+ bridge_read(bc, b_wid_tflush); /* wait until Bridge PIO complete */
+
+ bridge_setup_board(bc, partnum);
+
+ host->dev.parent = dev;
+ host->sysdata = bc;
+ host->busnr = 0;
+ host->ops = &bridge_pci_ops;
+ host->map_irq = bridge_map_irq;
+ host->swizzle_irq = pci_common_swizzle;
+
+ err = pci_scan_root_bus_bridge(host);
+ if (err < 0)
+ goto err_free_resource;
+
+ pci_bus_claim_resources(host->bus);
+ pci_bus_add_devices(host->bus);
+
+ platform_set_drvdata(pdev, host->bus);
+
+ return 0;
+
+err_free_resource:
+ pci_free_resource_list(&host->windows);
+err_remove_domain:
+ irq_domain_remove(domain);
+ irq_domain_free_fwnode(fn);
+ return err;
+}
+
+static int bridge_remove(struct platform_device *pdev)
+{
+ struct pci_bus *bus = platform_get_drvdata(pdev);
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+ struct fwnode_handle *fn = bc->domain->fwnode;
+
+ irq_domain_remove(bc->domain);
+ irq_domain_free_fwnode(fn);
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(bus);
+ pci_remove_root_bus(bus);
+ pci_unlock_rescan_remove();
+
+ return 0;
+}
+
+static struct platform_driver bridge_driver = {
+ .probe = bridge_probe,
+ .remove = bridge_remove,
+ .driver = {
+ .name = "xtalk-bridge",
+ }
+};
+
+builtin_platform_driver(bridge_driver);
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
new file mode 100644
index 000000000..feebc6eee
--- /dev/null
+++ b/arch/mips/pci/pci.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *
+ * Copyright (C) 2003, 04, 11 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2011 Wind River Systems,
+ * written by Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/of_address.h>
+
+#include <asm/cpu-info.h>
+
+unsigned long PCIBIOS_MIN_IO;
+EXPORT_SYMBOL(PCIBIOS_MIN_IO);
+
+unsigned long PCIBIOS_MIN_MEM;
+EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
+
+static int __init pcibios_set_cache_line_size(void)
+{
+ unsigned int lsize;
+
+ /*
+ * Set PCI cacheline size to that of the highest level in the
+ * cache hierarchy.
+ */
+ lsize = cpu_dcache_line_size();
+ lsize = cpu_scache_line_size() ? : lsize;
+ lsize = cpu_tcache_line_size() ? : lsize;
+
+ BUG_ON(!lsize);
+
+ pci_dfl_cache_line_size = lsize >> 2;
+
+ pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize);
+ return 0;
+}
+arch_initcall(pcibios_set_cache_line_size);
+
+void pci_resource_to_user(const struct pci_dev *dev, int bar,
+ const struct resource *rsrc, resource_size_t *start,
+ resource_size_t *end)
+{
+ phys_addr_t size = resource_size(rsrc);
+
+ *start = fixup_bigphys_addr(rsrc->start, size);
+ *end = rsrc->start + size - 1;
+}
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
new file mode 100644
index 000000000..d919a0d81
--- /dev/null
+++ b/arch/mips/pci/pcie-octeon.c
@@ -0,0 +1,2088 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007, 2008, 2009, 2010, 2011 Cavium Networks
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-npei-defs.h>
+#include <asm/octeon/cvmx-pciercx-defs.h>
+#include <asm/octeon/cvmx-pescx-defs.h>
+#include <asm/octeon/cvmx-pexp-defs.h>
+#include <asm/octeon/cvmx-pemx-defs.h>
+#include <asm/octeon/cvmx-dpi-defs.h>
+#include <asm/octeon/cvmx-sli-defs.h>
+#include <asm/octeon/cvmx-sriox-defs.h>
+#include <asm/octeon/cvmx-helper-errata.h>
+#include <asm/octeon/pci-octeon.h>
+
+#define MRRS_CN5XXX 0 /* 128 byte Max Read Request Size */
+#define MPS_CN5XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */
+#define MRRS_CN6XXX 3 /* 1024 byte Max Read Request Size */
+#define MPS_CN6XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */
+
+/* Module parameter to disable PCI probing */
+static int pcie_disable;
+module_param(pcie_disable, int, S_IRUGO);
+
+static int enable_pcie_14459_war;
+static int enable_pcie_bus_num_war[2];
+
+union cvmx_pcie_address {
+ uint64_t u64;
+ struct {
+ uint64_t upper:2; /* Normally 2 for XKPHYS */
+ uint64_t reserved_49_61:13; /* Must be zero */
+ uint64_t io:1; /* 1 for IO space access */
+ uint64_t did:5; /* PCIe DID = 3 */
+ uint64_t subdid:3; /* PCIe SubDID = 1 */
+ uint64_t reserved_36_39:4; /* Must be zero */
+ uint64_t es:2; /* Endian swap = 1 */
+ uint64_t port:2; /* PCIe port 0,1 */
+ uint64_t reserved_29_31:3; /* Must be zero */
+ /*
+ * Selects the type of the configuration request (0 = type 0,
+ * 1 = type 1).
+ */
+ uint64_t ty:1;
+ /* Target bus number sent in the ID in the request. */
+ uint64_t bus:8;
+ /*
+ * Target device number sent in the ID in the
+ * request. Note that Dev must be zero for type 0
+ * configuration requests.
+ */
+ uint64_t dev:5;
+ /* Target function number sent in the ID in the request. */
+ uint64_t func:3;
+ /*
+ * Selects a register in the configuration space of
+ * the target.
+ */
+ uint64_t reg:12;
+ } config;
+ struct {
+ uint64_t upper:2; /* Normally 2 for XKPHYS */
+ uint64_t reserved_49_61:13; /* Must be zero */
+ uint64_t io:1; /* 1 for IO space access */
+ uint64_t did:5; /* PCIe DID = 3 */
+ uint64_t subdid:3; /* PCIe SubDID = 2 */
+ uint64_t reserved_36_39:4; /* Must be zero */
+ uint64_t es:2; /* Endian swap = 1 */
+ uint64_t port:2; /* PCIe port 0,1 */
+ uint64_t address:32; /* PCIe IO address */
+ } io;
+ struct {
+ uint64_t upper:2; /* Normally 2 for XKPHYS */
+ uint64_t reserved_49_61:13; /* Must be zero */
+ uint64_t io:1; /* 1 for IO space access */
+ uint64_t did:5; /* PCIe DID = 3 */
+ uint64_t subdid:3; /* PCIe SubDID = 3-6 */
+ uint64_t reserved_36_39:4; /* Must be zero */
+ uint64_t address:36; /* PCIe Mem address */
+ } mem;
+};
+
+static int cvmx_pcie_rc_initialize(int pcie_port);
+
+/**
+ * Return the Core virtual base address for PCIe IO access. IOs are
+ * read/written as an offset from this address.
+ *
+ * @pcie_port: PCIe port the IO is for
+ *
+ * Returns 64bit Octeon IO base address for read/write
+ */
+static inline uint64_t cvmx_pcie_get_io_base_address(int pcie_port)
+{
+ union cvmx_pcie_address pcie_addr;
+ pcie_addr.u64 = 0;
+ pcie_addr.io.upper = 0;
+ pcie_addr.io.io = 1;
+ pcie_addr.io.did = 3;
+ pcie_addr.io.subdid = 2;
+ pcie_addr.io.es = 1;
+ pcie_addr.io.port = pcie_port;
+ return pcie_addr.u64;
+}
+
+/**
+ * Size of the IO address region returned at address
+ * cvmx_pcie_get_io_base_address()
+ *
+ * @pcie_port: PCIe port the IO is for
+ *
+ * Returns Size of the IO window
+ */
+static inline uint64_t cvmx_pcie_get_io_size(int pcie_port)
+{
+ return 1ull << 32;
+}
+
+/**
+ * Return the Core virtual base address for PCIe MEM access. Memory is
+ * read/written as an offset from this address.
+ *
+ * @pcie_port: PCIe port the IO is for
+ *
+ * Returns 64bit Octeon IO base address for read/write
+ */
+static inline uint64_t cvmx_pcie_get_mem_base_address(int pcie_port)
+{
+ union cvmx_pcie_address pcie_addr;
+ pcie_addr.u64 = 0;
+ pcie_addr.mem.upper = 0;
+ pcie_addr.mem.io = 1;
+ pcie_addr.mem.did = 3;
+ pcie_addr.mem.subdid = 3 + pcie_port;
+ return pcie_addr.u64;
+}
+
+/**
+ * Size of the Mem address region returned at address
+ * cvmx_pcie_get_mem_base_address()
+ *
+ * @pcie_port: PCIe port the IO is for
+ *
+ * Returns Size of the Mem window
+ */
+static inline uint64_t cvmx_pcie_get_mem_size(int pcie_port)
+{
+ return 1ull << 36;
+}
+
+/**
+ * Read a PCIe config space register indirectly. This is used for
+ * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
+ *
+ * @pcie_port: PCIe port to read from
+ * @cfg_offset: Address to read
+ *
+ * Returns Value read
+ */
+static uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
+ union cvmx_pescx_cfg_rd pescx_cfg_rd;
+ pescx_cfg_rd.u64 = 0;
+ pescx_cfg_rd.s.addr = cfg_offset;
+ cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
+ pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
+ return pescx_cfg_rd.s.data;
+ } else {
+ union cvmx_pemx_cfg_rd pemx_cfg_rd;
+ pemx_cfg_rd.u64 = 0;
+ pemx_cfg_rd.s.addr = cfg_offset;
+ cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64);
+ pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port));
+ return pemx_cfg_rd.s.data;
+ }
+}
+
+/**
+ * Write a PCIe config space register indirectly. This is used for
+ * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
+ *
+ * @pcie_port: PCIe port to write to
+ * @cfg_offset: Address to write
+ * @val: Value to write
+ */
+static void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset,
+ uint32_t val)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
+ union cvmx_pescx_cfg_wr pescx_cfg_wr;
+ pescx_cfg_wr.u64 = 0;
+ pescx_cfg_wr.s.addr = cfg_offset;
+ pescx_cfg_wr.s.data = val;
+ cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
+ } else {
+ union cvmx_pemx_cfg_wr pemx_cfg_wr;
+ pemx_cfg_wr.u64 = 0;
+ pemx_cfg_wr.s.addr = cfg_offset;
+ pemx_cfg_wr.s.data = val;
+ cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64);
+ }
+}
+
+/**
+ * Build a PCIe config space request address for a device
+ *
+ * @pcie_port: PCIe port to access
+ * @bus: Sub bus
+ * @dev: Device ID
+ * @fn: Device sub function
+ * @reg: Register to access
+ *
+ * Returns 64bit Octeon IO address
+ */
+static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus,
+ int dev, int fn, int reg)
+{
+ union cvmx_pcie_address pcie_addr;
+ union cvmx_pciercx_cfg006 pciercx_cfg006;
+
+ pciercx_cfg006.u32 =
+ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
+ if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
+ return 0;
+
+ pcie_addr.u64 = 0;
+ pcie_addr.config.upper = 2;
+ pcie_addr.config.io = 1;
+ pcie_addr.config.did = 3;
+ pcie_addr.config.subdid = 1;
+ pcie_addr.config.es = 1;
+ pcie_addr.config.port = pcie_port;
+ pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
+ pcie_addr.config.bus = bus;
+ pcie_addr.config.dev = dev;
+ pcie_addr.config.func = fn;
+ pcie_addr.config.reg = reg;
+ return pcie_addr.u64;
+}
+
+/**
+ * Read 8bits from a Device's config space
+ *
+ * @pcie_port: PCIe port the device is on
+ * @bus: Sub bus
+ * @dev: Device ID
+ * @fn: Device sub function
+ * @reg: Register to access
+ *
+ * Returns Result of the read
+ */
+static uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev,
+ int fn, int reg)
+{
+ uint64_t address =
+ __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ return cvmx_read64_uint8(address);
+ else
+ return 0xff;
+}
+
+/**
+ * Read 16bits from a Device's config space
+ *
+ * @pcie_port: PCIe port the device is on
+ * @bus: Sub bus
+ * @dev: Device ID
+ * @fn: Device sub function
+ * @reg: Register to access
+ *
+ * Returns Result of the read
+ */
+static uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev,
+ int fn, int reg)
+{
+ uint64_t address =
+ __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ return le16_to_cpu(cvmx_read64_uint16(address));
+ else
+ return 0xffff;
+}
+
+/**
+ * Read 32bits from a Device's config space
+ *
+ * @pcie_port: PCIe port the device is on
+ * @bus: Sub bus
+ * @dev: Device ID
+ * @fn: Device sub function
+ * @reg: Register to access
+ *
+ * Returns Result of the read
+ */
+static uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev,
+ int fn, int reg)
+{
+ uint64_t address =
+ __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ return le32_to_cpu(cvmx_read64_uint32(address));
+ else
+ return 0xffffffff;
+}
+
+/**
+ * Write 8bits to a Device's config space
+ *
+ * @pcie_port: PCIe port the device is on
+ * @bus: Sub bus
+ * @dev: Device ID
+ * @fn: Device sub function
+ * @reg: Register to access
+ * @val: Value to write
+ */
+static void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn,
+ int reg, uint8_t val)
+{
+ uint64_t address =
+ __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ cvmx_write64_uint8(address, val);
+}
+
+/**
+ * Write 16bits to a Device's config space
+ *
+ * @pcie_port: PCIe port the device is on
+ * @bus: Sub bus
+ * @dev: Device ID
+ * @fn: Device sub function
+ * @reg: Register to access
+ * @val: Value to write
+ */
+static void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn,
+ int reg, uint16_t val)
+{
+ uint64_t address =
+ __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ cvmx_write64_uint16(address, cpu_to_le16(val));
+}
+
+/**
+ * Write 32bits to a Device's config space
+ *
+ * @pcie_port: PCIe port the device is on
+ * @bus: Sub bus
+ * @dev: Device ID
+ * @fn: Device sub function
+ * @reg: Register to access
+ * @val: Value to write
+ */
+static void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn,
+ int reg, uint32_t val)
+{
+ uint64_t address =
+ __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ cvmx_write64_uint32(address, cpu_to_le32(val));
+}
+
+/**
+ * Initialize the RC config space CSRs
+ *
+ * @pcie_port: PCIe port to initialize
+ */
+static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
+{
+ union cvmx_pciercx_cfg030 pciercx_cfg030;
+ union cvmx_pciercx_cfg070 pciercx_cfg070;
+ union cvmx_pciercx_cfg001 pciercx_cfg001;
+ union cvmx_pciercx_cfg032 pciercx_cfg032;
+ union cvmx_pciercx_cfg006 pciercx_cfg006;
+ union cvmx_pciercx_cfg008 pciercx_cfg008;
+ union cvmx_pciercx_cfg009 pciercx_cfg009;
+ union cvmx_pciercx_cfg010 pciercx_cfg010;
+ union cvmx_pciercx_cfg011 pciercx_cfg011;
+ union cvmx_pciercx_cfg035 pciercx_cfg035;
+ union cvmx_pciercx_cfg075 pciercx_cfg075;
+ union cvmx_pciercx_cfg034 pciercx_cfg034;
+
+ /* Max Payload Size (PCIE*_CFG030[MPS]) */
+ /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
+ /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
+ /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
+
+ pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
+ pciercx_cfg030.s.mps = MPS_CN5XXX;
+ pciercx_cfg030.s.mrrs = MRRS_CN5XXX;
+ } else {
+ pciercx_cfg030.s.mps = MPS_CN6XXX;
+ pciercx_cfg030.s.mrrs = MRRS_CN6XXX;
+ }
+ /*
+ * Enable relaxed order processing. This will allow devices to
+ * affect read response ordering.
+ */
+ pciercx_cfg030.s.ro_en = 1;
+ /* Enable no snoop processing. Not used by Octeon */
+ pciercx_cfg030.s.ns_en = 1;
+ /* Correctable error reporting enable. */
+ pciercx_cfg030.s.ce_en = 1;
+ /* Non-fatal error reporting enable. */
+ pciercx_cfg030.s.nfe_en = 1;
+ /* Fatal error reporting enable. */
+ pciercx_cfg030.s.fe_en = 1;
+ /* Unsupported request reporting enable. */
+ pciercx_cfg030.s.ur_en = 1;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
+
+
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
+ union cvmx_npei_ctl_status2 npei_ctl_status2;
+ /*
+ * Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match
+ * PCIE*_CFG030[MPS]. Max Read Request Size
+ * (NPEI_CTL_STATUS2[MRRS]) must not exceed
+ * PCIE*_CFG030[MRRS]
+ */
+ npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
+ /* Max payload size = 128 bytes for best Octeon DMA performance */
+ npei_ctl_status2.s.mps = MPS_CN5XXX;
+ /* Max read request size = 128 bytes for best Octeon DMA performance */
+ npei_ctl_status2.s.mrrs = MRRS_CN5XXX;
+ if (pcie_port)
+ npei_ctl_status2.s.c1_b1_s = 3; /* Port1 BAR1 Size 256MB */
+ else
+ npei_ctl_status2.s.c0_b1_s = 3; /* Port0 BAR1 Size 256MB */
+
+ cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
+ } else {
+ /*
+ * Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match
+ * PCIE*_CFG030[MPS]. Max Read Request Size
+ * (DPI_SLI_PRTX_CFG[MRRS]) must not exceed
+ * PCIE*_CFG030[MRRS].
+ */
+ union cvmx_dpi_sli_prtx_cfg prt_cfg;
+ union cvmx_sli_s2m_portx_ctl sli_s2m_portx_ctl;
+ prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
+ prt_cfg.s.mps = MPS_CN6XXX;
+ prt_cfg.s.mrrs = MRRS_CN6XXX;
+ /* Max outstanding load request. */
+ prt_cfg.s.molr = 32;
+ cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
+
+ sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
+ sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
+ cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
+ }
+
+ /* ECRC Generation (PCIE*_CFG070[GE,CE]) */
+ pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port));
+ pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */
+ pciercx_cfg070.s.ce = 1; /* ECRC check enable. */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32);
+
+ /*
+ * Access Enables (PCIE*_CFG001[MSAE,ME])
+ * ME and MSAE should always be set.
+ * Interrupt Disable (PCIE*_CFG001[I_DIS])
+ * System Error Message Enable (PCIE*_CFG001[SEE])
+ */
+ pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port));
+ pciercx_cfg001.s.msae = 1; /* Memory space enable. */
+ pciercx_cfg001.s.me = 1; /* Bus master enable. */
+ pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */
+ pciercx_cfg001.s.see = 1; /* SERR# enable */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32);
+
+ /* Advanced Error Recovery Message Enables */
+ /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0);
+ /* Use CVMX_PCIERCX_CFG067 hardware default */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0);
+
+
+ /* Active State Power Management (PCIE*_CFG032[ASLPC]) */
+ pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
+ pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32);
+
+ /*
+ * Link Width Mode (PCIERCn_CFG452[LME]) - Set during
+ * cvmx_pcie_rc_initialize_link()
+ *
+ * Primary Bus Number (PCIERCn_CFG006[PBNUM])
+ *
+ * We set the primary bus number to 1 so IDT bridges are
+ * happy. They don't like zero.
+ */
+ pciercx_cfg006.u32 = 0;
+ pciercx_cfg006.s.pbnum = 1;
+ pciercx_cfg006.s.sbnum = 1;
+ pciercx_cfg006.s.subbnum = 1;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32);
+
+
+ /*
+ * Memory-mapped I/O BAR (PCIERCn_CFG008)
+ * Most applications should disable the memory-mapped I/O BAR by
+ * setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR]
+ */
+ pciercx_cfg008.u32 = 0;
+ pciercx_cfg008.s.mb_addr = 0x100;
+ pciercx_cfg008.s.ml_addr = 0;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32);
+
+
+ /*
+ * Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011)
+ * Most applications should disable the prefetchable BAR by setting
+ * PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] <
+ * PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE]
+ */
+ pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port));
+ pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port));
+ pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port));
+ pciercx_cfg009.s.lmem_base = 0x100;
+ pciercx_cfg009.s.lmem_limit = 0;
+ pciercx_cfg010.s.umem_base = 0x100;
+ pciercx_cfg011.s.umem_limit = 0;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32);
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32);
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32);
+
+ /*
+ * System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE])
+ * PME Interrupt Enables (PCIERCn_CFG035[PMEIE])
+ */
+ pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port));
+ pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */
+ pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */
+ pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */
+ pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32);
+
+ /*
+ * Advanced Error Recovery Interrupt Enables
+ * (PCIERCn_CFG075[CERE,NFERE,FERE])
+ */
+ pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port));
+ pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */
+ pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */
+ pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32);
+
+ /*
+ * HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN],
+ * PCIERCn_CFG034[DLLS_EN,CCINT_EN])
+ */
+ pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port));
+ pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */
+ pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */
+ pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32);
+}
+
+/**
+ * Initialize a host mode PCIe gen 1 link. This function takes a PCIe
+ * port from reset to a link up state. Software can then begin
+ * configuring the rest of the link.
+ *
+ * @pcie_port: PCIe port to initialize
+ *
+ * Returns Zero on success
+ */
+static int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port)
+{
+ uint64_t start_cycle;
+ union cvmx_pescx_ctl_status pescx_ctl_status;
+ union cvmx_pciercx_cfg452 pciercx_cfg452;
+ union cvmx_pciercx_cfg032 pciercx_cfg032;
+ union cvmx_pciercx_cfg448 pciercx_cfg448;
+
+ /* Set the lane width */
+ pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
+ pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
+ if (pescx_ctl_status.s.qlm_cfg == 0)
+ /* We're in 8 lane (56XX) or 4 lane (54XX) mode */
+ pciercx_cfg452.s.lme = 0xf;
+ else
+ /* We're in 4 lane (56XX) or 2 lane (52XX) mode */
+ pciercx_cfg452.s.lme = 0x7;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32);
+
+ /*
+ * CN52XX pass 1.x has an errata where length mismatches on UR
+ * responses can cause bus errors on 64bit memory
+ * reads. Turning off length error checking fixes this.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
+ union cvmx_pciercx_cfg455 pciercx_cfg455;
+ pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port));
+ pciercx_cfg455.s.m_cpl_len_err = 1;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32);
+ }
+
+ /* Lane swap needs to be manually enabled for CN52XX */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1)) {
+ pescx_ctl_status.s.lane_swp = 1;
+ cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
+ }
+
+ /* Bring up the link */
+ pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
+ pescx_ctl_status.s.lnk_enb = 1;
+ cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
+
+ /*
+ * CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to
+ * be disabled.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
+ __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);
+
+ /* Wait for the link to come up */
+ start_cycle = cvmx_get_cycle();
+ do {
+ if (cvmx_get_cycle() - start_cycle > 2 * octeon_get_clock_rate()) {
+ cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
+ return -1;
+ }
+ __delay(10000);
+ pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
+ } while (pciercx_cfg032.s.dlla == 0);
+
+ /* Clear all pending errors */
+ cvmx_write_csr(CVMX_PEXP_NPEI_INT_SUM, cvmx_read_csr(CVMX_PEXP_NPEI_INT_SUM));
+
+ /*
+ * Update the Replay Time Limit. Empirically, some PCIe
+ * devices take a little longer to respond than expected under
+ * load. As a workaround for this we configure the Replay Time
+ * Limit to the value expected for a 512 byte MPS instead of
+ * our actual 256 byte MPS. The numbers below are directly
+ * from the PCIe spec table 3-4.
+ */
+ pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
+ switch (pciercx_cfg032.s.nlw) {
+ case 1: /* 1 lane */
+ pciercx_cfg448.s.rtl = 1677;
+ break;
+ case 2: /* 2 lanes */
+ pciercx_cfg448.s.rtl = 867;
+ break;
+ case 4: /* 4 lanes */
+ pciercx_cfg448.s.rtl = 462;
+ break;
+ case 8: /* 8 lanes */
+ pciercx_cfg448.s.rtl = 258;
+ break;
+ }
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
+
+ return 0;
+}
+
+static void __cvmx_increment_ba(union cvmx_sli_mem_access_subidx *pmas)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ pmas->cn68xx.ba++;
+ else
+ pmas->s.ba++;
+}
+
+/**
+ * Initialize a PCIe gen 1 port for use in host(RC) mode. It doesn't
+ * enumerate the bus.
+ *
+ * @pcie_port: PCIe port to initialize
+ *
+ * Returns Zero on success
+ */
+static int __cvmx_pcie_rc_initialize_gen1(int pcie_port)
+{
+ int i;
+ int base;
+ u64 addr_swizzle;
+ union cvmx_ciu_soft_prst ciu_soft_prst;
+ union cvmx_pescx_bist_status pescx_bist_status;
+ union cvmx_pescx_bist_status2 pescx_bist_status2;
+ union cvmx_npei_ctl_status npei_ctl_status;
+ union cvmx_npei_mem_access_ctl npei_mem_access_ctl;
+ union cvmx_npei_mem_access_subidx mem_access_subid;
+ union cvmx_npei_dbg_data npei_dbg_data;
+ union cvmx_pescx_ctl_status2 pescx_ctl_status2;
+ union cvmx_pciercx_cfg032 pciercx_cfg032;
+ union cvmx_npei_bar1_indexx bar1_index;
+
+retry:
+ /*
+ * Make sure we aren't trying to setup a target mode interface
+ * in host mode.
+ */
+ npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
+ if ((pcie_port == 0) && !npei_ctl_status.s.host_mode) {
+ cvmx_dprintf("PCIe: Port %d in endpoint mode\n", pcie_port);
+ return -1;
+ }
+
+ /*
+ * Make sure a CN52XX isn't trying to bring up port 1 when it
+ * is disabled.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
+ if ((pcie_port == 1) && npei_dbg_data.cn52xx.qlm0_link_width) {
+ cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n");
+ return -1;
+ }
+ }
+
+ /*
+ * PCIe switch arbitration mode. '0' == fixed priority NPEI,
+ * PCIe0, then PCIe1. '1' == round robin.
+ */
+ npei_ctl_status.s.arb = 1;
+ /* Allow up to 0x20 config retries */
+ npei_ctl_status.s.cfg_rtry = 0x20;
+ /*
+ * CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS
+ * don't reset.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
+ npei_ctl_status.s.p0_ntags = 0x20;
+ npei_ctl_status.s.p1_ntags = 0x20;
+ }
+ cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);
+
+ /* Bring the PCIe out of reset */
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) {
+ /*
+ * The EBH5200 board swapped the PCIe reset lines on
+ * the board. As a workaround for this bug, we bring
+ * both PCIe ports out of reset at the same time
+ * instead of on separate calls. So for port 0, we
+ * bring both out of reset and do nothing on port 1
+ */
+ if (pcie_port == 0) {
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ /*
+ * After a chip reset the PCIe will also be in
+ * reset. If it isn't, most likely someone is
+ * trying to init it again without a proper
+ * PCIe reset.
+ */
+ if (ciu_soft_prst.s.soft_prst == 0) {
+ /* Reset the ports */
+ ciu_soft_prst.s.soft_prst = 1;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ ciu_soft_prst.s.soft_prst = 1;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
+ /* Wait until pcie resets the ports. */
+ udelay(2000);
+ }
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
+ }
+ } else {
+ /*
+ * The normal case: The PCIe ports are completely
+ * separate and can be brought out of reset
+ * independently.
+ */
+ if (pcie_port)
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ else
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ /*
+ * After a chip reset the PCIe will also be in
+ * reset. If it isn't, most likely someone is trying
+ * to init it again without a proper PCIe reset.
+ */
+ if (ciu_soft_prst.s.soft_prst == 0) {
+ /* Reset the port */
+ ciu_soft_prst.s.soft_prst = 1;
+ if (pcie_port)
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
+ else
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
+ /* Wait until pcie resets the ports. */
+ udelay(2000);
+ }
+ if (pcie_port) {
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
+ } else {
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
+ }
+ }
+
+ /*
+ * Wait for PCIe reset to complete. Due to errata PCIE-700, we
+ * don't poll PESCX_CTL_STATUS2[PCIERST], but simply wait a
+ * fixed number of cycles.
+ */
+ __delay(400000);
+
+ /*
+ * PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of
+ * CN56XX and CN52XX, so we only probe it on newer chips
+ */
+ if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
+ /* Clear PCLK_RUN so we can check if the clock is running */
+ pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
+ pescx_ctl_status2.s.pclk_run = 1;
+ cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64);
+ /* Now that we cleared PCLK_RUN, wait for it to be set
+ * again telling us the clock is running
+ */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
+ union cvmx_pescx_ctl_status2, pclk_run, ==, 1, 10000)) {
+ cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port);
+ return -1;
+ }
+ }
+
+ /*
+ * Check and make sure PCIe came out of reset. If it doesn't
+ * the board probably hasn't wired the clocks up and the
+ * interface should be skipped.
+ */
+ pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
+ if (pescx_ctl_status2.s.pcierst) {
+ cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
+ return -1;
+ }
+
+ /*
+ * Check BIST2 status. If any bits are set skip this
+ * interface. This is an attempt to catch PCIE-813 on pass 1
+ * parts.
+ */
+ pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port));
+ if (pescx_bist_status2.u64) {
+ cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n",
+ pcie_port);
+ return -1;
+ }
+
+ /* Check BIST status */
+ pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port));
+ if (pescx_bist_status.u64)
+ cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n",
+ pcie_port, CAST64(pescx_bist_status.u64));
+
+ /* Initialize the config space CSRs */
+ __cvmx_pcie_rc_initialize_config_space(pcie_port);
+
+ /* Bring the link up */
+ if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port)) {
+ cvmx_dprintf("PCIe: Failed to initialize port %d, probably the slot is empty\n",
+ pcie_port);
+ return -1;
+ }
+
+ /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
+ npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
+ npei_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
+ npei_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
+ cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
+
+ /* Setup Mem access SubDIDs */
+ mem_access_subid.u64 = 0;
+ mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
+ mem_access_subid.s.nmerge = 1; /* Due to an errata on pass 1 chips, no merging is allowed. */
+ mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
+ mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
+ mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
+ mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */
+ mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */
+ mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */
+ mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */
+
+ /*
+ * Setup mem access 12-15 for port 0, 16-19 for port 1,
+ * supplying 36 bits of address space.
+ */
+ for (i = 12 + pcie_port * 4; i < 16 + pcie_port * 4; i++) {
+ cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
+ mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
+ }
+
+ /*
+ * Disable the peer to peer forwarding register. This must be
+ * setup by the OS after it enumerates the bus and assigns
+ * addresses to the PCIe busses.
+ */
+ for (i = 0; i < 4; i++) {
+ cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1);
+ cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1);
+ }
+
+ /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
+ cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
+
+ /* BAR1 follows BAR2 with a gap so it has the same address as for gen2. */
+ cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
+
+ bar1_index.u32 = 0;
+ bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
+ bar1_index.s.ca = 1; /* Not Cached */
+ bar1_index.s.end_swp = 1; /* Endian Swap mode */
+ bar1_index.s.addr_v = 1; /* Valid entry */
+
+ base = pcie_port ? 16 : 0;
+
+ /* Big endian swizzle for 32-bit PEXP_NCB register. */
+#ifdef __MIPSEB__
+ addr_swizzle = 4;
+#else
+ addr_swizzle = 0;
+#endif
+ for (i = 0; i < 16; i++) {
+ cvmx_write64_uint32((CVMX_PEXP_NPEI_BAR1_INDEXX(base) ^ addr_swizzle),
+ bar1_index.u32);
+ base++;
+ /* 256MB / 16 >> 22 == 4 */
+ bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
+ }
+
+ /*
+ * Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take
+ * precedence where they overlap. It also overlaps with the
+ * device addresses, so make sure the peer to peer forwarding
+ * is set right.
+ */
+ cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0);
+
+ /*
+ * Setup BAR2 attributes
+ *
+ * Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM])
+ * - PTLP_RO,CTLP_RO should normally be set (except for debug).
+ * - WAIT_COM=0 will likely work for all applications.
+ *
+ * Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]).
+ */
+ if (pcie_port) {
+ union cvmx_npei_ctl_port1 npei_ctl_port;
+ npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1);
+ npei_ctl_port.s.bar2_enb = 1;
+ npei_ctl_port.s.bar2_esx = 1;
+ npei_ctl_port.s.bar2_cax = 0;
+ npei_ctl_port.s.ptlp_ro = 1;
+ npei_ctl_port.s.ctlp_ro = 1;
+ npei_ctl_port.s.wait_com = 0;
+ npei_ctl_port.s.waitl_com = 0;
+ cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64);
+ } else {
+ union cvmx_npei_ctl_port0 npei_ctl_port;
+ npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0);
+ npei_ctl_port.s.bar2_enb = 1;
+ npei_ctl_port.s.bar2_esx = 1;
+ npei_ctl_port.s.bar2_cax = 0;
+ npei_ctl_port.s.ptlp_ro = 1;
+ npei_ctl_port.s.ctlp_ro = 1;
+ npei_ctl_port.s.wait_com = 0;
+ npei_ctl_port.s.waitl_com = 0;
+ cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64);
+ }
+
+ /*
+ * Both pass 1 and pass 2 of CN52XX and CN56XX have an errata
+ * that causes TLP ordering to not be preserved after multiple
+ * PCIe port resets. This code detects this fault and corrects
+ * it by aligning the TLP counters properly. Another link
+ * reset is then performed. See PCIE-13340
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
+ union cvmx_npei_dbg_data dbg_data;
+ int old_in_fif_p_count;
+ int in_fif_p_count;
+ int out_p_count;
+ int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1;
+ int i;
+
+ /*
+ * Choose a write address of 1MB. It should be
+ * harmless as all bars haven't been setup.
+ */
+ uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63);
+
+ /*
+ * Make sure at least in_p_offset have been executed before we try and
+ * read in_fif_p_count
+ */
+ i = in_p_offset;
+ while (i--) {
+ cvmx_write64_uint32(write_address, 0);
+ __delay(10000);
+ }
+
+ /*
+ * Read the IN_FIF_P_COUNT from the debug
+ * select. IN_FIF_P_COUNT can be unstable sometimes so
+ * read it twice with a write between the reads. This
+ * way we can tell the value is good as it will
+ * increment by one due to the write
+ */
+ cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc);
+ cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
+ do {
+ dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
+ old_in_fif_p_count = dbg_data.s.data & 0xff;
+ cvmx_write64_uint32(write_address, 0);
+ __delay(10000);
+ dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
+ in_fif_p_count = dbg_data.s.data & 0xff;
+ } while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
+
+ /* Update in_fif_p_count for it's offset with respect to out_p_count */
+ in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff;
+
+ /* Read the OUT_P_COUNT from the debug select */
+ cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f);
+ cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
+ dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
+ out_p_count = (dbg_data.s.data>>1) & 0xff;
+
+ /* Check that the two counters are aligned */
+ if (out_p_count != in_fif_p_count) {
+ cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
+ while (in_fif_p_count != 0) {
+ cvmx_write64_uint32(write_address, 0);
+ __delay(10000);
+ in_fif_p_count = (in_fif_p_count + 1) & 0xff;
+ }
+ /*
+ * The EBH5200 board swapped the PCIe reset
+ * lines on the board. This means we must
+ * bring both links down and up, which will
+ * cause the PCIe0 to need alignment
+ * again. Lots of messages will be displayed,
+ * but everything should work
+ */
+ if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) &&
+ (pcie_port == 1))
+ cvmx_pcie_rc_initialize(0);
+ /* Rety bringing this port up */
+ goto retry;
+ }
+ }
+
+ /* Display the link status */
+ pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
+ cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw);
+
+ return 0;
+}
+
+/**
+ * Initialize a host mode PCIe gen 2 link. This function takes a PCIe
+ * port from reset to a link up state. Software can then begin
+ * configuring the rest of the link.
+ *
+ * @pcie_port: PCIe port to initialize
+ *
+ * Return Zero on success.
+ */
+static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port)
+{
+ uint64_t start_cycle;
+ union cvmx_pemx_ctl_status pem_ctl_status;
+ union cvmx_pciercx_cfg032 pciercx_cfg032;
+ union cvmx_pciercx_cfg448 pciercx_cfg448;
+
+ /* Bring up the link */
+ pem_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
+ pem_ctl_status.s.lnk_enb = 1;
+ cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pem_ctl_status.u64);
+
+ /* Wait for the link to come up */
+ start_cycle = cvmx_get_cycle();
+ do {
+ if (cvmx_get_cycle() - start_cycle > octeon_get_clock_rate())
+ return -1;
+ __delay(10000);
+ pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
+ } while ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1));
+
+ /*
+ * Update the Replay Time Limit. Empirically, some PCIe
+ * devices take a little longer to respond than expected under
+ * load. As a workaround for this we configure the Replay Time
+ * Limit to the value expected for a 512 byte MPS instead of
+ * our actual 256 byte MPS. The numbers below are directly
+ * from the PCIe spec table 3-4
+ */
+ pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
+ switch (pciercx_cfg032.s.nlw) {
+ case 1: /* 1 lane */
+ pciercx_cfg448.s.rtl = 1677;
+ break;
+ case 2: /* 2 lanes */
+ pciercx_cfg448.s.rtl = 867;
+ break;
+ case 4: /* 4 lanes */
+ pciercx_cfg448.s.rtl = 462;
+ break;
+ case 8: /* 8 lanes */
+ pciercx_cfg448.s.rtl = 258;
+ break;
+ }
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
+
+ return 0;
+}
+
+
+/**
+ * Initialize a PCIe gen 2 port for use in host(RC) mode. It doesn't enumerate
+ * the bus.
+ *
+ * @pcie_port: PCIe port to initialize
+ *
+ * Returns Zero on success.
+ */
+static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
+{
+ int i;
+ union cvmx_ciu_soft_prst ciu_soft_prst;
+ union cvmx_mio_rst_ctlx mio_rst_ctl;
+ union cvmx_pemx_bar_ctl pemx_bar_ctl;
+ union cvmx_pemx_ctl_status pemx_ctl_status;
+ union cvmx_pemx_bist_status pemx_bist_status;
+ union cvmx_pemx_bist_status2 pemx_bist_status2;
+ union cvmx_pciercx_cfg032 pciercx_cfg032;
+ union cvmx_pciercx_cfg515 pciercx_cfg515;
+ union cvmx_sli_ctl_portx sli_ctl_portx;
+ union cvmx_sli_mem_access_ctl sli_mem_access_ctl;
+ union cvmx_sli_mem_access_subidx mem_access_subid;
+ union cvmx_sriox_status_reg sriox_status_reg;
+ union cvmx_pemx_bar1_indexx bar1_index;
+
+ if (octeon_has_feature(OCTEON_FEATURE_SRIO)) {
+ /* Make sure this interface isn't SRIO */
+ if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
+ /*
+ * The CN66XX requires reading the
+ * MIO_QLMX_CFG register to figure out the
+ * port type.
+ */
+ union cvmx_mio_qlmx_cfg qlmx_cfg;
+ qlmx_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(pcie_port));
+
+ if (qlmx_cfg.s.qlm_spd == 15) {
+ pr_notice("PCIe: Port %d is disabled, skipping.\n", pcie_port);
+ return -1;
+ }
+
+ switch (qlmx_cfg.s.qlm_spd) {
+ case 0x1: /* SRIO 1x4 short */
+ case 0x3: /* SRIO 1x4 long */
+ case 0x4: /* SRIO 2x2 short */
+ case 0x6: /* SRIO 2x2 long */
+ pr_notice("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
+ return -1;
+ case 0x9: /* SGMII */
+ pr_notice("PCIe: Port %d is SGMII, skipping.\n", pcie_port);
+ return -1;
+ case 0xb: /* XAUI */
+ pr_notice("PCIe: Port %d is XAUI, skipping.\n", pcie_port);
+ return -1;
+ case 0x0: /* PCIE gen2 */
+ case 0x8: /* PCIE gen2 (alias) */
+ case 0x2: /* PCIE gen1 */
+ case 0xa: /* PCIE gen1 (alias) */
+ break;
+ default:
+ pr_notice("PCIe: Port %d is unknown, skipping.\n", pcie_port);
+ return -1;
+ }
+ } else {
+ sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(pcie_port));
+ if (sriox_status_reg.s.srio) {
+ pr_notice("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
+ return -1;
+ }
+ }
+ }
+
+#if 0
+ /* This code is so that the PCIe analyzer is able to see 63XX traffic */
+ pr_notice("PCIE : init for pcie analyzer.\n");
+ cvmx_helper_qlm_jtag_init();
+ cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
+ cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
+ cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
+ cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
+ cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
+ cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
+ cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
+ cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
+ cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
+ cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
+ cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
+ cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
+ cvmx_helper_qlm_jtag_update(pcie_port);
+#endif
+
+ /* Make sure we aren't trying to setup a target mode interface in host mode */
+ mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
+ if (!mio_rst_ctl.s.host_mode) {
+ pr_notice("PCIe: Port %d in endpoint mode.\n", pcie_port);
+ return -1;
+ }
+
+ /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) {
+ if (pcie_port) {
+ union cvmx_ciu_qlm ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 5;
+ ciu_qlm.s.txmargin = 0x17;
+ cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
+ } else {
+ union cvmx_ciu_qlm ciu_qlm;
+ ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 5;
+ ciu_qlm.s.txmargin = 0x17;
+ cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
+ }
+ }
+ /* Bring the PCIe out of reset */
+ if (pcie_port)
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ else
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ /*
+ * After a chip reset the PCIe will also be in reset. If it
+ * isn't, most likely someone is trying to init it again
+ * without a proper PCIe reset
+ */
+ if (ciu_soft_prst.s.soft_prst == 0) {
+ /* Reset the port */
+ ciu_soft_prst.s.soft_prst = 1;
+ if (pcie_port)
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
+ else
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
+ /* Wait until pcie resets the ports. */
+ udelay(2000);
+ }
+ if (pcie_port) {
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
+ } else {
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
+ }
+
+ /* Wait for PCIe reset to complete */
+ udelay(1000);
+
+ /*
+ * Check and make sure PCIe came out of reset. If it doesn't
+ * the board probably hasn't wired the clocks up and the
+ * interface should be skipped.
+ */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_RST_CTLX(pcie_port), union cvmx_mio_rst_ctlx, rst_done, ==, 1, 10000)) {
+ pr_notice("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
+ return -1;
+ }
+
+ /* Check BIST status */
+ pemx_bist_status.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS(pcie_port));
+ if (pemx_bist_status.u64)
+ pr_notice("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status.u64));
+ pemx_bist_status2.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS2(pcie_port));
+ /* Errata PCIE-14766 may cause the lower 6 bits to be randomly set on CN63XXp1 */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
+ pemx_bist_status2.u64 &= ~0x3full;
+ if (pemx_bist_status2.u64)
+ pr_notice("PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status2.u64));
+
+ /* Initialize the config space CSRs */
+ __cvmx_pcie_rc_initialize_config_space(pcie_port);
+
+ /* Enable gen2 speed selection */
+ pciercx_cfg515.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG515(pcie_port));
+ pciercx_cfg515.s.dsc = 1;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG515(pcie_port), pciercx_cfg515.u32);
+
+ /* Bring the link up */
+ if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port)) {
+ /*
+ * Some gen1 devices don't handle the gen 2 training
+ * correctly. Disable gen2 and try again with only
+ * gen1
+ */
+ union cvmx_pciercx_cfg031 pciercx_cfg031;
+ pciercx_cfg031.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG031(pcie_port));
+ pciercx_cfg031.s.mls = 1;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG031(pcie_port), pciercx_cfg031.u32);
+ if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port)) {
+ pr_notice("PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port);
+ return -1;
+ }
+ }
+
+ /* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
+ sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL);
+ sli_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
+ sli_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
+ cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64);
+
+ /* Setup Mem access SubDIDs */
+ mem_access_subid.u64 = 0;
+ mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
+ mem_access_subid.s.nmerge = 0; /* Allow merging as it works on CN6XXX. */
+ mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
+ mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
+ mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
+ mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
+ /* PCIe Adddress Bits <63:34>. */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ mem_access_subid.cn68xx.ba = 0;
+ else
+ mem_access_subid.s.ba = 0;
+
+ /*
+ * Setup mem access 12-15 for port 0, 16-19 for port 1,
+ * supplying 36 bits of address space.
+ */
+ for (i = 12 + pcie_port * 4; i < 16 + pcie_port * 4; i++) {
+ cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
+ /* Set each SUBID to extend the addressable range */
+ __cvmx_increment_ba(&mem_access_subid);
+ }
+
+ /*
+ * Disable the peer to peer forwarding register. This must be
+ * setup by the OS after it enumerates the bus and assigns
+ * addresses to the PCIe busses.
+ */
+ for (i = 0; i < 4; i++) {
+ cvmx_write_csr(CVMX_PEMX_P2P_BARX_START(i, pcie_port), -1);
+ cvmx_write_csr(CVMX_PEMX_P2P_BARX_END(i, pcie_port), -1);
+ }
+
+ /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
+ cvmx_write_csr(CVMX_PEMX_P2N_BAR0_START(pcie_port), 0);
+
+ /*
+ * Set Octeon's BAR2 to decode 0-2^41. Bar0 and Bar1 take
+ * precedence where they overlap. It also overlaps with the
+ * device addresses, so make sure the peer to peer forwarding
+ * is set right.
+ */
+ cvmx_write_csr(CVMX_PEMX_P2N_BAR2_START(pcie_port), 0);
+
+ /*
+ * Setup BAR2 attributes
+ * Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM])
+ * - PTLP_RO,CTLP_RO should normally be set (except for debug).
+ * - WAIT_COM=0 will likely work for all applications.
+ * Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM])
+ */
+ pemx_bar_ctl.u64 = cvmx_read_csr(CVMX_PEMX_BAR_CTL(pcie_port));
+ pemx_bar_ctl.s.bar1_siz = 3; /* 256MB BAR1*/
+ pemx_bar_ctl.s.bar2_enb = 1;
+ pemx_bar_ctl.s.bar2_esx = 1;
+ pemx_bar_ctl.s.bar2_cax = 0;
+ cvmx_write_csr(CVMX_PEMX_BAR_CTL(pcie_port), pemx_bar_ctl.u64);
+ sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port));
+ sli_ctl_portx.s.ptlp_ro = 1;
+ sli_ctl_portx.s.ctlp_ro = 1;
+ sli_ctl_portx.s.wait_com = 0;
+ sli_ctl_portx.s.waitl_com = 0;
+ cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port), sli_ctl_portx.u64);
+
+ /* BAR1 follows BAR2 */
+ cvmx_write_csr(CVMX_PEMX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
+
+ bar1_index.u64 = 0;
+ bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
+ bar1_index.s.ca = 1; /* Not Cached */
+ bar1_index.s.end_swp = 1; /* Endian Swap mode */
+ bar1_index.s.addr_v = 1; /* Valid entry */
+
+ for (i = 0; i < 16; i++) {
+ cvmx_write_csr(CVMX_PEMX_BAR1_INDEXX(i, pcie_port), bar1_index.u64);
+ /* 256MB / 16 >> 22 == 4 */
+ bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
+ }
+
+ /*
+ * Allow config retries for 250ms. Count is based off the 5Ghz
+ * SERDES clock.
+ */
+ pemx_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
+ pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000;
+ cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pemx_ctl_status.u64);
+
+ /* Display the link status */
+ pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
+ pr_notice("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls);
+
+ return 0;
+}
+
+/**
+ * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
+ *
+ * @pcie_port: PCIe port to initialize
+ *
+ * Returns Zero on success
+ */
+static int cvmx_pcie_rc_initialize(int pcie_port)
+{
+ int result;
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI))
+ result = __cvmx_pcie_rc_initialize_gen1(pcie_port);
+ else
+ result = __cvmx_pcie_rc_initialize_gen2(pcie_port);
+ return result;
+}
+
+/* Above was cvmx-pcie.c, below original pcie.c */
+
+/**
+ * Map a PCI device to the appropriate interrupt line
+ *
+ * @dev: The Linux PCI device structure for the device to map
+ * @slot: The slot number for this device on __BUS 0__. Linux
+ * enumerates through all the bridges and figures out the
+ * slot on Bus 0 where this device eventually hooks to.
+ * @pin: The PCI interrupt pin read from the device, then swizzled
+ * as it goes through each bridge.
+ * Returns Interrupt number for the device
+ */
+int octeon_pcie_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ /*
+ * The EBH5600 board with the PCI to PCIe bridge mistakenly
+ * wires the first slot for both device id 2 and interrupt
+ * A. According to the PCI spec, device id 2 should be C. The
+ * following kludge attempts to fix this.
+ */
+ if (strstr(octeon_board_type_string(), "EBH5600") &&
+ dev->bus && dev->bus->parent) {
+ /*
+ * Iterate all the way up the device chain and find
+ * the root bus.
+ */
+ while (dev->bus && dev->bus->parent)
+ dev = to_pci_dev(dev->bus->bridge);
+ /*
+ * If the root bus is number 0 and the PEX 8114 is the
+ * root, assume we are behind the miswired bus. We
+ * need to correct the swizzle level by two. Yuck.
+ */
+ if ((dev->bus->number == 1) &&
+ (dev->vendor == 0x10b5) && (dev->device == 0x8114)) {
+ /*
+ * The pin field is one based, not zero. We
+ * need to swizzle it by minus two.
+ */
+ pin = ((pin - 3) & 3) + 1;
+ }
+ }
+ /*
+ * The -1 is because pin starts with one, not zero. It might
+ * be that this equation needs to include the slot number, but
+ * I don't have hardware to check that against.
+ */
+ return pin - 1 + OCTEON_IRQ_PCI_INT0;
+}
+
+static void set_cfg_read_retry(u32 retry_cnt)
+{
+ union cvmx_pemx_ctl_status pemx_ctl;
+ pemx_ctl.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(1));
+ pemx_ctl.s.cfg_rtry = retry_cnt;
+ cvmx_write_csr(CVMX_PEMX_CTL_STATUS(1), pemx_ctl.u64);
+}
+
+
+static u32 disable_cfg_read_retry(void)
+{
+ u32 retry_cnt;
+
+ union cvmx_pemx_ctl_status pemx_ctl;
+ pemx_ctl.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(1));
+ retry_cnt = pemx_ctl.s.cfg_rtry;
+ pemx_ctl.s.cfg_rtry = 0;
+ cvmx_write_csr(CVMX_PEMX_CTL_STATUS(1), pemx_ctl.u64);
+ return retry_cnt;
+}
+
+static int is_cfg_retry(void)
+{
+ union cvmx_pemx_int_sum pemx_int_sum;
+ pemx_int_sum.u64 = cvmx_read_csr(CVMX_PEMX_INT_SUM(1));
+ if (pemx_int_sum.s.crs_dr)
+ return 1;
+ return 0;
+}
+
+/*
+ * Read a value from configuration space
+ *
+ */
+static int octeon_pcie_read_config(unsigned int pcie_port, struct pci_bus *bus,
+ unsigned int devfn, int reg, int size,
+ u32 *val)
+{
+ union octeon_cvmemctl cvmmemctl;
+ union octeon_cvmemctl cvmmemctl_save;
+ int bus_number = bus->number;
+ int cfg_retry = 0;
+ int retry_cnt = 0;
+ int max_retry_cnt = 10;
+ u32 cfg_retry_cnt = 0;
+
+ cvmmemctl_save.u64 = 0;
+ BUG_ON(pcie_port >= ARRAY_SIZE(enable_pcie_bus_num_war));
+ /*
+ * For the top level bus make sure our hardware bus number
+ * matches the software one
+ */
+ if (bus->parent == NULL) {
+ if (enable_pcie_bus_num_war[pcie_port])
+ bus_number = 0;
+ else {
+ union cvmx_pciercx_cfg006 pciercx_cfg006;
+ pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port,
+ CVMX_PCIERCX_CFG006(pcie_port));
+ if (pciercx_cfg006.s.pbnum != bus_number) {
+ pciercx_cfg006.s.pbnum = bus_number;
+ pciercx_cfg006.s.sbnum = bus_number;
+ pciercx_cfg006.s.subbnum = bus_number;
+ cvmx_pcie_cfgx_write(pcie_port,
+ CVMX_PCIERCX_CFG006(pcie_port),
+ pciercx_cfg006.u32);
+ }
+ }
+ }
+
+ /*
+ * PCIe only has a single device connected to Octeon. It is
+ * always device ID 0. Don't bother doing reads for other
+ * device IDs on the first segment.
+ */
+ if ((bus->parent == NULL) && (devfn >> 3 != 0))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+
+ /*
+ * The following is a workaround for the CN57XX, CN56XX,
+ * CN55XX, and CN54XX errata with PCIe config reads from non
+ * existent devices. These chips will hang the PCIe link if a
+ * config read is performed that causes a UR response.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) ||
+ OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1)) {
+ /*
+ * For our EBH5600 board, port 0 has a bridge with two
+ * PCI-X slots. We need a new special checks to make
+ * sure we only probe valid stuff. The PCIe->PCI-X
+ * bridge only respondes to device ID 0, function
+ * 0-1
+ */
+ if ((bus->parent == NULL) && (devfn >= 2))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ /*
+ * The PCI-X slots are device ID 2,3. Choose one of
+ * the below "if" blocks based on what is plugged into
+ * the board.
+ */
+#if 1
+ /* Use this option if you aren't using either slot */
+ if (bus_number == 2)
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+#elif 0
+ /*
+ * Use this option if you are using the first slot but
+ * not the second.
+ */
+ if ((bus_number == 2) && (devfn >> 3 != 2))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+#elif 0
+ /*
+ * Use this option if you are using the second slot
+ * but not the first.
+ */
+ if ((bus_number == 2) && (devfn >> 3 != 3))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+#elif 0
+ /* Use this opion if you are using both slots */
+ if ((bus_number == 2) &&
+ !((devfn == (2 << 3)) || (devfn == (3 << 3))))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+#endif
+
+ /* The following #if gives a more complicated example. This is
+ the required checks for running a Nitrox CN16XX-NHBX in the
+ slot of the EBH5600. This card has a PLX PCIe bridge with
+ four Nitrox PLX parts behind it */
+#if 0
+ /* PLX bridge with 4 ports */
+ if ((bus_number == 4) &&
+ !((devfn >> 3 >= 1) && (devfn >> 3 <= 4)))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ /* Nitrox behind PLX 1 */
+ if ((bus_number == 5) && (devfn >> 3 != 0))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ /* Nitrox behind PLX 2 */
+ if ((bus_number == 6) && (devfn >> 3 != 0))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ /* Nitrox behind PLX 3 */
+ if ((bus_number == 7) && (devfn >> 3 != 0))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ /* Nitrox behind PLX 4 */
+ if ((bus_number == 8) && (devfn >> 3 != 0))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+#endif
+
+ /*
+ * Shorten the DID timeout so bus errors for PCIe
+ * config reads from non existent devices happen
+ * faster. This allows us to continue booting even if
+ * the above "if" checks are wrong. Once one of these
+ * errors happens, the PCIe port is dead.
+ */
+ cvmmemctl_save.u64 = __read_64bit_c0_register($11, 7);
+ cvmmemctl.u64 = cvmmemctl_save.u64;
+ cvmmemctl.s.didtto = 2;
+ __write_64bit_c0_register($11, 7, cvmmemctl.u64);
+ }
+
+ if ((OCTEON_IS_MODEL(OCTEON_CN63XX)) && (enable_pcie_14459_war))
+ cfg_retry_cnt = disable_cfg_read_retry();
+
+ pr_debug("pcie_cfg_rd port=%d b=%d devfn=0x%03x reg=0x%03x"
+ " size=%d ", pcie_port, bus_number, devfn, reg, size);
+ do {
+ switch (size) {
+ case 4:
+ *val = cvmx_pcie_config_read32(pcie_port, bus_number,
+ devfn >> 3, devfn & 0x7, reg);
+ break;
+ case 2:
+ *val = cvmx_pcie_config_read16(pcie_port, bus_number,
+ devfn >> 3, devfn & 0x7, reg);
+ break;
+ case 1:
+ *val = cvmx_pcie_config_read8(pcie_port, bus_number,
+ devfn >> 3, devfn & 0x7, reg);
+ break;
+ default:
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ set_cfg_read_retry(cfg_retry_cnt);
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ }
+ if ((OCTEON_IS_MODEL(OCTEON_CN63XX)) &&
+ (enable_pcie_14459_war)) {
+ cfg_retry = is_cfg_retry();
+ retry_cnt++;
+ if (retry_cnt > max_retry_cnt) {
+ pr_err(" pcie cfg_read retries failed. retry_cnt=%d\n",
+ retry_cnt);
+ cfg_retry = 0;
+ }
+ }
+ } while (cfg_retry);
+
+ if ((OCTEON_IS_MODEL(OCTEON_CN63XX)) && (enable_pcie_14459_war))
+ set_cfg_read_retry(cfg_retry_cnt);
+ pr_debug("val=%08x : tries=%02d\n", *val, retry_cnt);
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) ||
+ OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1))
+ write_c0_cvmmemctl(cvmmemctl_save.u64);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int octeon_pcie0_read_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 *val)
+{
+ return octeon_pcie_read_config(0, bus, devfn, reg, size, val);
+}
+
+static int octeon_pcie1_read_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 *val)
+{
+ return octeon_pcie_read_config(1, bus, devfn, reg, size, val);
+}
+
+static int octeon_dummy_read_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 *val)
+{
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+/*
+ * Write a value to PCI configuration space
+ */
+static int octeon_pcie_write_config(unsigned int pcie_port, struct pci_bus *bus,
+ unsigned int devfn, int reg,
+ int size, u32 val)
+{
+ int bus_number = bus->number;
+
+ BUG_ON(pcie_port >= ARRAY_SIZE(enable_pcie_bus_num_war));
+
+ if ((bus->parent == NULL) && (enable_pcie_bus_num_war[pcie_port]))
+ bus_number = 0;
+
+ pr_debug("pcie_cfg_wr port=%d b=%d devfn=0x%03x"
+ " reg=0x%03x size=%d val=%08x\n", pcie_port, bus_number, devfn,
+ reg, size, val);
+
+
+ switch (size) {
+ case 4:
+ cvmx_pcie_config_write32(pcie_port, bus_number, devfn >> 3,
+ devfn & 0x7, reg, val);
+ break;
+ case 2:
+ cvmx_pcie_config_write16(pcie_port, bus_number, devfn >> 3,
+ devfn & 0x7, reg, val);
+ break;
+ case 1:
+ cvmx_pcie_config_write8(pcie_port, bus_number, devfn >> 3,
+ devfn & 0x7, reg, val);
+ break;
+ default:
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int octeon_pcie0_write_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 val)
+{
+ return octeon_pcie_write_config(0, bus, devfn, reg, size, val);
+}
+
+static int octeon_pcie1_write_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 val)
+{
+ return octeon_pcie_write_config(1, bus, devfn, reg, size, val);
+}
+
+static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 val)
+{
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+static struct pci_ops octeon_pcie0_ops = {
+ .read = octeon_pcie0_read_config,
+ .write = octeon_pcie0_write_config,
+};
+
+static struct resource octeon_pcie0_mem_resource = {
+ .name = "Octeon PCIe0 MEM",
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource octeon_pcie0_io_resource = {
+ .name = "Octeon PCIe0 IO",
+ .flags = IORESOURCE_IO,
+};
+
+static struct pci_controller octeon_pcie0_controller = {
+ .pci_ops = &octeon_pcie0_ops,
+ .mem_resource = &octeon_pcie0_mem_resource,
+ .io_resource = &octeon_pcie0_io_resource,
+};
+
+static struct pci_ops octeon_pcie1_ops = {
+ .read = octeon_pcie1_read_config,
+ .write = octeon_pcie1_write_config,
+};
+
+static struct resource octeon_pcie1_mem_resource = {
+ .name = "Octeon PCIe1 MEM",
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource octeon_pcie1_io_resource = {
+ .name = "Octeon PCIe1 IO",
+ .flags = IORESOURCE_IO,
+};
+
+static struct pci_controller octeon_pcie1_controller = {
+ .pci_ops = &octeon_pcie1_ops,
+ .mem_resource = &octeon_pcie1_mem_resource,
+ .io_resource = &octeon_pcie1_io_resource,
+};
+
+static struct pci_ops octeon_dummy_ops = {
+ .read = octeon_dummy_read_config,
+ .write = octeon_dummy_write_config,
+};
+
+static struct resource octeon_dummy_mem_resource = {
+ .name = "Virtual PCIe MEM",
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource octeon_dummy_io_resource = {
+ .name = "Virtual PCIe IO",
+ .flags = IORESOURCE_IO,
+};
+
+static struct pci_controller octeon_dummy_controller = {
+ .pci_ops = &octeon_dummy_ops,
+ .mem_resource = &octeon_dummy_mem_resource,
+ .io_resource = &octeon_dummy_io_resource,
+};
+
+static int device_needs_bus_num_war(uint32_t deviceid)
+{
+#define IDT_VENDOR_ID 0x111d
+
+ if ((deviceid & 0xffff) == IDT_VENDOR_ID)
+ return 1;
+ return 0;
+}
+
+/**
+ * Initialize the Octeon PCIe controllers
+ *
+ * Returns
+ */
+static int __init octeon_pcie_setup(void)
+{
+ int result;
+ int host_mode;
+ int srio_war15205 = 0, port;
+ union cvmx_sli_ctl_portx sli_ctl_portx;
+ union cvmx_sriox_status_reg sriox_status_reg;
+
+ /* These chips don't have PCIe */
+ if (!octeon_has_feature(OCTEON_FEATURE_PCIE))
+ return 0;
+
+ /* No PCIe simulation */
+ if (octeon_is_simulation())
+ return 0;
+
+ /* Disable PCI if instructed on the command line */
+ if (pcie_disable)
+ return 0;
+
+ /* Point pcibios_map_irq() to the PCIe version of it */
+ octeon_pcibios_map_irq = octeon_pcie_pcibios_map_irq;
+
+ /*
+ * PCIe I/O range. It is based on port 0 but includes up until
+ * port 1's end.
+ */
+ set_io_port_base(CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(0)));
+ ioport_resource.start = 0;
+ ioport_resource.end =
+ cvmx_pcie_get_io_base_address(1) -
+ cvmx_pcie_get_io_base_address(0) + cvmx_pcie_get_io_size(1) - 1;
+
+ /*
+ * Create a dummy PCIe controller to swallow up bus 0. IDT bridges
+ * don't work if the primary bus number is zero. Here we add a fake
+ * PCIe controller that the kernel will give bus 0. This allows
+ * us to not change the normal kernel bus enumeration
+ */
+ octeon_dummy_controller.io_map_base = -1;
+ octeon_dummy_controller.mem_resource->start = (1ull<<48);
+ octeon_dummy_controller.mem_resource->end = (1ull<<48);
+ register_pci_controller(&octeon_dummy_controller);
+
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
+ union cvmx_npei_ctl_status npei_ctl_status;
+ npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
+ host_mode = npei_ctl_status.s.host_mode;
+ octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_PCIE;
+ } else {
+ union cvmx_mio_rst_ctlx mio_rst_ctl;
+ mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(0));
+ host_mode = mio_rst_ctl.s.host_mode;
+ octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_PCIE2;
+ }
+
+ if (host_mode) {
+ pr_notice("PCIe: Initializing port 0\n");
+ /* CN63XX pass 1_x/2.0 errata PCIe-15205 */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
+ sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(0));
+ if (sriox_status_reg.s.srio) {
+ srio_war15205 += 1; /* Port is SRIO */
+ port = 0;
+ }
+ }
+ result = cvmx_pcie_rc_initialize(0);
+ if (result == 0) {
+ uint32_t device0;
+ /* Memory offsets are physical addresses */
+ octeon_pcie0_controller.mem_offset =
+ cvmx_pcie_get_mem_base_address(0);
+ /* IO offsets are Mips virtual addresses */
+ octeon_pcie0_controller.io_map_base =
+ CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address
+ (0));
+ octeon_pcie0_controller.io_offset = 0;
+ /*
+ * To keep things similar to PCI, we start
+ * device addresses at the same place as PCI
+ * uisng big bar support. This normally
+ * translates to 4GB-256MB, which is the same
+ * as most x86 PCs.
+ */
+ octeon_pcie0_controller.mem_resource->start =
+ cvmx_pcie_get_mem_base_address(0) +
+ (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20);
+ octeon_pcie0_controller.mem_resource->end =
+ cvmx_pcie_get_mem_base_address(0) +
+ cvmx_pcie_get_mem_size(0) - 1;
+ /*
+ * Ports must be above 16KB for the ISA bus
+ * filtering in the PCI-X to PCI bridge.
+ */
+ octeon_pcie0_controller.io_resource->start = 4 << 10;
+ octeon_pcie0_controller.io_resource->end =
+ cvmx_pcie_get_io_size(0) - 1;
+ msleep(100); /* Some devices need extra time */
+ register_pci_controller(&octeon_pcie0_controller);
+ device0 = cvmx_pcie_config_read32(0, 0, 0, 0, 0);
+ enable_pcie_bus_num_war[0] =
+ device_needs_bus_num_war(device0);
+ }
+ } else {
+ pr_notice("PCIe: Port 0 in endpoint mode, skipping.\n");
+ /* CN63XX pass 1_x/2.0 errata PCIe-15205 */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
+ srio_war15205 += 1;
+ port = 0;
+ }
+ }
+
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
+ host_mode = 1;
+ /* Skip the 2nd port on CN52XX if port 0 is in 4 lane mode */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ union cvmx_npei_dbg_data dbg_data;
+ dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
+ if (dbg_data.cn52xx.qlm0_link_width)
+ host_mode = 0;
+ }
+ } else {
+ union cvmx_mio_rst_ctlx mio_rst_ctl;
+ mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(1));
+ host_mode = mio_rst_ctl.s.host_mode;
+ }
+
+ if (host_mode) {
+ pr_notice("PCIe: Initializing port 1\n");
+ /* CN63XX pass 1_x/2.0 errata PCIe-15205 */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
+ sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(1));
+ if (sriox_status_reg.s.srio) {
+ srio_war15205 += 1; /* Port is SRIO */
+ port = 1;
+ }
+ }
+ result = cvmx_pcie_rc_initialize(1);
+ if (result == 0) {
+ uint32_t device0;
+ /* Memory offsets are physical addresses */
+ octeon_pcie1_controller.mem_offset =
+ cvmx_pcie_get_mem_base_address(1);
+ /*
+ * To calculate the address for accessing the 2nd PCIe device,
+ * either 'io_map_base' (pci_iomap()), or 'mips_io_port_base'
+ * (ioport_map()) value is added to
+ * pci_resource_start(dev,bar)). The 'mips_io_port_base' is set
+ * only once based on first PCIe. Also changing 'io_map_base'
+ * based on first slot's value so that both the routines will
+ * work properly.
+ */
+ octeon_pcie1_controller.io_map_base =
+ CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(0));
+ /* IO offsets are Mips virtual addresses */
+ octeon_pcie1_controller.io_offset =
+ cvmx_pcie_get_io_base_address(1) -
+ cvmx_pcie_get_io_base_address(0);
+ /*
+ * To keep things similar to PCI, we start device
+ * addresses at the same place as PCI uisng big bar
+ * support. This normally translates to 4GB-256MB,
+ * which is the same as most x86 PCs.
+ */
+ octeon_pcie1_controller.mem_resource->start =
+ cvmx_pcie_get_mem_base_address(1) + (4ul << 30) -
+ (OCTEON_PCI_BAR1_HOLE_SIZE << 20);
+ octeon_pcie1_controller.mem_resource->end =
+ cvmx_pcie_get_mem_base_address(1) +
+ cvmx_pcie_get_mem_size(1) - 1;
+ /*
+ * Ports must be above 16KB for the ISA bus filtering
+ * in the PCI-X to PCI bridge.
+ */
+ octeon_pcie1_controller.io_resource->start =
+ cvmx_pcie_get_io_base_address(1) -
+ cvmx_pcie_get_io_base_address(0);
+ octeon_pcie1_controller.io_resource->end =
+ octeon_pcie1_controller.io_resource->start +
+ cvmx_pcie_get_io_size(1) - 1;
+ msleep(100); /* Some devices need extra time */
+ register_pci_controller(&octeon_pcie1_controller);
+ device0 = cvmx_pcie_config_read32(1, 0, 0, 0, 0);
+ enable_pcie_bus_num_war[1] =
+ device_needs_bus_num_war(device0);
+ }
+ } else {
+ pr_notice("PCIe: Port 1 not in root complex mode, skipping.\n");
+ /* CN63XX pass 1_x/2.0 errata PCIe-15205 */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
+ srio_war15205 += 1;
+ port = 1;
+ }
+ }
+
+ /*
+ * CN63XX pass 1_x/2.0 errata PCIe-15205 requires setting all
+ * of SRIO MACs SLI_CTL_PORT*[INT*_MAP] to similar value and
+ * all of PCIe Macs SLI_CTL_PORT*[INT*_MAP] to different value
+ * from the previous set values
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
+ if (srio_war15205 == 1) {
+ sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(port));
+ sli_ctl_portx.s.inta_map = 1;
+ sli_ctl_portx.s.intb_map = 1;
+ sli_ctl_portx.s.intc_map = 1;
+ sli_ctl_portx.s.intd_map = 1;
+ cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(port), sli_ctl_portx.u64);
+
+ sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(!port));
+ sli_ctl_portx.s.inta_map = 0;
+ sli_ctl_portx.s.intb_map = 0;
+ sli_ctl_portx.s.intc_map = 0;
+ sli_ctl_portx.s.intd_map = 0;
+ cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(!port), sli_ctl_portx.u64);
+ }
+ }
+
+ octeon_pci_dma_init();
+
+ return 0;
+}
+arch_initcall(octeon_pcie_setup);
diff --git a/arch/mips/pic32/Kconfig b/arch/mips/pic32/Kconfig
new file mode 100644
index 000000000..7acbb50c1
--- /dev/null
+++ b/arch/mips/pic32/Kconfig
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0
+if MACH_PIC32
+
+choice
+ prompt "Machine Type"
+
+config PIC32MZDA
+ bool "Microchip PIC32MZDA Platform"
+ select BOOT_ELF32
+ select BOOT_RAW
+ select CEVT_R4K
+ select CSRC_R4K
+ select DMA_NONCOHERENT
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select GPIOLIB
+ select COMMON_CLK
+ select CLKDEV_LOOKUP
+ select LIBFDT
+ select USE_OF
+ select PINCTRL
+ select PIC32_EVIC
+ help
+ Support for the Microchip PIC32MZDA microcontroller.
+
+ This is a 32-bit microcontroller with support for external or
+ internally packaged DDR2 memory up to 128MB.
+
+ For more information, see <http://www.microchip.com/>.
+
+endchoice
+
+choice
+ prompt "Devicetree selection"
+ default DTB_PIC32_NONE
+ help
+ Select the devicetree.
+
+config DTB_PIC32_NONE
+ bool "None"
+
+config DTB_PIC32_MZDA_SK
+ bool "PIC32MZDA Starter Kit"
+ depends on PIC32MZDA
+ select BUILTIN_DTB
+
+endchoice
+
+endif # MACH_PIC32
diff --git a/arch/mips/pic32/Makefile b/arch/mips/pic32/Makefile
new file mode 100644
index 000000000..6183e4a46
--- /dev/null
+++ b/arch/mips/pic32/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Joshua Henderson, <joshua.henderson@microchip.com>
+# Copyright (C) 2015 Microchip Technology, Inc. All rights reserved.
+#
+obj-$(CONFIG_MACH_PIC32) += common/
+obj-$(CONFIG_PIC32MZDA) += pic32mzda/
diff --git a/arch/mips/pic32/Platform b/arch/mips/pic32/Platform
new file mode 100644
index 000000000..1e92e52a1
--- /dev/null
+++ b/arch/mips/pic32/Platform
@@ -0,0 +1,6 @@
+#
+# PIC32MZDA
+#
+cflags-$(CONFIG_PIC32MZDA) += -I$(srctree)/arch/mips/include/asm/mach-pic32
+load-$(CONFIG_PIC32MZDA) += 0xffffffff88000000
+all-$(CONFIG_PIC32MZDA) := $(COMPRESSION_FNAME).bin
diff --git a/arch/mips/pic32/common/Makefile b/arch/mips/pic32/common/Makefile
new file mode 100644
index 000000000..a60750ab7
--- /dev/null
+++ b/arch/mips/pic32/common/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Joshua Henderson, <joshua.henderson@microchip.com>
+# Copyright (C) 2015 Microchip Technology, Inc. All rights reserved.
+#
+obj-y = reset.o irq.o
diff --git a/arch/mips/pic32/common/irq.c b/arch/mips/pic32/common/irq.c
new file mode 100644
index 000000000..fb00b797b
--- /dev/null
+++ b/arch/mips/pic32/common/irq.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/irqchip.h>
+#include <asm/irq.h>
+
+void __init arch_init_irq(void)
+{
+ irqchip_init();
+}
diff --git a/arch/mips/pic32/common/reset.c b/arch/mips/pic32/common/reset.c
new file mode 100644
index 000000000..a5fd7a8e2
--- /dev/null
+++ b/arch/mips/pic32/common/reset.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/pm.h>
+#include <asm/reboot.h>
+#include <asm/mach-pic32/pic32.h>
+
+#define PIC32_RSWRST 0x10
+
+static void pic32_halt(void)
+{
+ while (1) {
+ __asm__(".set push;\n"
+ ".set arch=r4000;\n"
+ "wait;\n"
+ ".set pop;\n"
+ );
+ }
+}
+
+static void pic32_machine_restart(char *command)
+{
+ void __iomem *reg =
+ ioremap(PIC32_BASE_RESET + PIC32_RSWRST, sizeof(u32));
+
+ pic32_syskey_unlock();
+
+ /* magic write/read */
+ __raw_writel(1, reg);
+ (void)__raw_readl(reg);
+
+ pic32_halt();
+}
+
+static void pic32_machine_halt(void)
+{
+ local_irq_disable();
+
+ pic32_halt();
+}
+
+static int __init mips_reboot_setup(void)
+{
+ _machine_restart = pic32_machine_restart;
+ _machine_halt = pic32_machine_halt;
+ pm_power_off = pic32_machine_halt;
+
+ return 0;
+}
+
+arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/pic32/pic32mzda/Makefile b/arch/mips/pic32/pic32mzda/Makefile
new file mode 100644
index 000000000..3b505142b
--- /dev/null
+++ b/arch/mips/pic32/pic32mzda/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Joshua Henderson, <joshua.henderson@microchip.com>
+# Copyright (C) 2015 Microchip Technology, Inc. All rights reserved.
+#
+obj-y := config.o early_clk.o init.o time.o
+
+obj-$(CONFIG_EARLY_PRINTK) += early_console.o \
+ early_pin.o
diff --git a/arch/mips/pic32/pic32mzda/config.c b/arch/mips/pic32/pic32mzda/config.c
new file mode 100644
index 000000000..36afe1b5b
--- /dev/null
+++ b/arch/mips/pic32/pic32mzda/config.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Purna Chandra Mandal, purna.mandal@microchip.com
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+
+#include <asm/mach-pic32/pic32.h>
+
+#include "pic32mzda.h"
+
+#define PIC32_CFGCON 0x0000
+#define PIC32_DEVID 0x0020
+#define PIC32_SYSKEY 0x0030
+#define PIC32_CFGEBIA 0x00c0
+#define PIC32_CFGEBIC 0x00d0
+#define PIC32_CFGCON2 0x00f0
+#define PIC32_RCON 0x1240
+
+static void __iomem *pic32_conf_base;
+static DEFINE_SPINLOCK(config_lock);
+static u32 pic32_reset_status;
+
+static u32 pic32_conf_get_reg_field(u32 offset, u32 rshift, u32 mask)
+{
+ u32 v;
+
+ v = readl(pic32_conf_base + offset);
+ v >>= rshift;
+ v &= mask;
+
+ return v;
+}
+
+static u32 pic32_conf_modify_atomic(u32 offset, u32 mask, u32 set)
+{
+ u32 v;
+ unsigned long flags;
+
+ spin_lock_irqsave(&config_lock, flags);
+ v = readl(pic32_conf_base + offset);
+ v &= ~mask;
+ v |= (set & mask);
+ writel(v, pic32_conf_base + offset);
+ spin_unlock_irqrestore(&config_lock, flags);
+
+ return 0;
+}
+
+int pic32_enable_lcd(void)
+{
+ return pic32_conf_modify_atomic(PIC32_CFGCON2, BIT(31), BIT(31));
+}
+
+int pic32_disable_lcd(void)
+{
+ return pic32_conf_modify_atomic(PIC32_CFGCON2, BIT(31), 0);
+}
+
+int pic32_set_lcd_mode(int mode)
+{
+ u32 mask = mode ? BIT(30) : 0;
+
+ return pic32_conf_modify_atomic(PIC32_CFGCON2, BIT(30), mask);
+}
+
+int pic32_set_sdhci_adma_fifo_threshold(u32 rthrsh, u32 wthrsh)
+{
+ u32 clr, set;
+
+ clr = (0x3ff << 4) | (0x3ff << 16);
+ set = (rthrsh << 4) | (wthrsh << 16);
+ return pic32_conf_modify_atomic(PIC32_CFGCON2, clr, set);
+}
+
+void pic32_syskey_unlock_debug(const char *func, const ulong line)
+{
+ void __iomem *syskey = pic32_conf_base + PIC32_SYSKEY;
+
+ pr_debug("%s: called from %s:%lu\n", __func__, func, line);
+ writel(0x00000000, syskey);
+ writel(0xAA996655, syskey);
+ writel(0x556699AA, syskey);
+}
+
+static u32 pic32_get_device_id(void)
+{
+ return pic32_conf_get_reg_field(PIC32_DEVID, 0, 0x0fffffff);
+}
+
+static u32 pic32_get_device_version(void)
+{
+ return pic32_conf_get_reg_field(PIC32_DEVID, 28, 0xf);
+}
+
+u32 pic32_get_boot_status(void)
+{
+ return pic32_reset_status;
+}
+EXPORT_SYMBOL(pic32_get_boot_status);
+
+void __init pic32_config_init(void)
+{
+ pic32_conf_base = ioremap(PIC32_BASE_CONFIG, 0x110);
+ if (!pic32_conf_base)
+ panic("pic32: config base not mapped");
+
+ /* Boot Status */
+ pic32_reset_status = readl(pic32_conf_base + PIC32_RCON);
+ writel(-1, PIC32_CLR(pic32_conf_base + PIC32_RCON));
+
+ /* Device Inforation */
+ pr_info("Device Id: 0x%08x, Device Ver: 0x%04x\n",
+ pic32_get_device_id(),
+ pic32_get_device_version());
+}
diff --git a/arch/mips/pic32/pic32mzda/early_clk.c b/arch/mips/pic32/pic32mzda/early_clk.c
new file mode 100644
index 000000000..6001e507d
--- /dev/null
+++ b/arch/mips/pic32/pic32mzda/early_clk.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ */
+#include <asm/mach-pic32/pic32.h>
+
+#include "pic32mzda.h"
+
+/* Oscillators, PLL & clocks */
+#define ICLK_MASK 0x00000080
+#define PLLDIV_MASK 0x00000007
+#define CUROSC_MASK 0x00000007
+#define PLLMUL_MASK 0x0000007F
+#define PB_MASK 0x00000007
+#define FRC1 0
+#define FRC2 7
+#define SPLL 1
+#define POSC 2
+#define FRC_CLK 8000000
+
+#define PIC32_POSC_FREQ 24000000
+
+#define OSCCON 0x0000
+#define SPLLCON 0x0020
+#define PB1DIV 0x0140
+
+u32 pic32_get_sysclk(void)
+{
+ u32 osc_freq = 0;
+ u32 pllclk;
+ u32 frcdivn;
+ u32 osccon;
+ u32 spllcon;
+ int curr_osc;
+
+ u32 plliclk;
+ u32 pllidiv;
+ u32 pllodiv;
+ u32 pllmult;
+ u32 frcdiv;
+
+ void __iomem *osc_base = ioremap(PIC32_BASE_OSC, 0x200);
+
+ osccon = __raw_readl(osc_base + OSCCON);
+ spllcon = __raw_readl(osc_base + SPLLCON);
+
+ plliclk = (spllcon & ICLK_MASK);
+ pllidiv = ((spllcon >> 8) & PLLDIV_MASK) + 1;
+ pllodiv = ((spllcon >> 24) & PLLDIV_MASK);
+ pllmult = ((spllcon >> 16) & PLLMUL_MASK) + 1;
+ frcdiv = ((osccon >> 24) & PLLDIV_MASK);
+
+ pllclk = plliclk ? FRC_CLK : PIC32_POSC_FREQ;
+ frcdivn = ((1 << frcdiv) + 1) + (128 * (frcdiv == 7));
+
+ if (pllodiv < 2)
+ pllodiv = 2;
+ else if (pllodiv < 5)
+ pllodiv = (1 << pllodiv);
+ else
+ pllodiv = 32;
+
+ curr_osc = (int)((osccon >> 12) & CUROSC_MASK);
+
+ switch (curr_osc) {
+ case FRC1:
+ case FRC2:
+ osc_freq = FRC_CLK / frcdivn;
+ break;
+ case SPLL:
+ osc_freq = ((pllclk / pllidiv) * pllmult) / pllodiv;
+ break;
+ case POSC:
+ osc_freq = PIC32_POSC_FREQ;
+ break;
+ default:
+ break;
+ }
+
+ iounmap(osc_base);
+
+ return osc_freq;
+}
+
+u32 pic32_get_pbclk(int bus)
+{
+ u32 clk_freq;
+ void __iomem *osc_base = ioremap(PIC32_BASE_OSC, 0x200);
+ u32 pbxdiv = PB1DIV + ((bus - 1) * 0x10);
+ u32 pbdiv = (__raw_readl(osc_base + pbxdiv) & PB_MASK) + 1;
+
+ iounmap(osc_base);
+
+ clk_freq = pic32_get_sysclk();
+
+ return clk_freq / pbdiv;
+}
diff --git a/arch/mips/pic32/pic32mzda/early_console.c b/arch/mips/pic32/pic32mzda/early_console.c
new file mode 100644
index 000000000..3cd1b408f
--- /dev/null
+++ b/arch/mips/pic32/pic32mzda/early_console.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ */
+#include <asm/mach-pic32/pic32.h>
+#include <asm/fw/fw.h>
+#include <asm/setup.h>
+
+#include "pic32mzda.h"
+#include "early_pin.h"
+
+/* Default early console parameters */
+#define EARLY_CONSOLE_PORT 1
+#define EARLY_CONSOLE_BAUDRATE 115200
+
+#define UART_ENABLE BIT(15)
+#define UART_ENABLE_RX BIT(12)
+#define UART_ENABLE_TX BIT(10)
+#define UART_TX_FULL BIT(9)
+
+/* UART1(x == 0) - UART6(x == 5) */
+#define UART_BASE(x) ((x) * 0x0200)
+#define U_MODE(x) UART_BASE(x)
+#define U_STA(x) (UART_BASE(x) + 0x10)
+#define U_TXR(x) (UART_BASE(x) + 0x20)
+#define U_BRG(x) (UART_BASE(x) + 0x40)
+
+static void __iomem *uart_base;
+static int console_port = -1;
+
+static int __init configure_uart_pins(int port)
+{
+ switch (port) {
+ case 1:
+ pic32_pps_input(IN_FUNC_U2RX, IN_RPB0);
+ pic32_pps_output(OUT_FUNC_U2TX, OUT_RPG9);
+ break;
+ case 5:
+ pic32_pps_input(IN_FUNC_U6RX, IN_RPD0);
+ pic32_pps_output(OUT_FUNC_U6TX, OUT_RPB8);
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static void __init configure_uart(int port, int baud)
+{
+ u32 pbclk;
+
+ pbclk = pic32_get_pbclk(2);
+
+ __raw_writel(0, uart_base + U_MODE(port));
+ __raw_writel(((pbclk / baud) / 16) - 1, uart_base + U_BRG(port));
+ __raw_writel(UART_ENABLE, uart_base + U_MODE(port));
+ __raw_writel(UART_ENABLE_TX | UART_ENABLE_RX,
+ uart_base + PIC32_SET(U_STA(port)));
+}
+
+static void __init setup_early_console(int port, int baud)
+{
+ if (configure_uart_pins(port))
+ return;
+
+ console_port = port;
+ configure_uart(console_port, baud);
+}
+
+static char * __init pic32_getcmdline(void)
+{
+ /*
+ * arch_mem_init() has not been called yet, so we don't have a real
+ * command line setup if using CONFIG_CMDLINE_BOOL.
+ */
+#ifdef CONFIG_CMDLINE_OVERRIDE
+ return CONFIG_CMDLINE;
+#else
+ return fw_getcmdline();
+#endif
+}
+
+static int __init get_port_from_cmdline(char *arch_cmdline)
+{
+ char *s;
+ int port = -1;
+
+ if (!arch_cmdline || *arch_cmdline == '\0')
+ goto _out;
+
+ s = strstr(arch_cmdline, "earlyprintk=");
+ if (s) {
+ s = strstr(s, "ttyS");
+ if (s)
+ s += 4;
+ else
+ goto _out;
+
+ port = (*s) - '0';
+ }
+
+_out:
+ return port;
+}
+
+static int __init get_baud_from_cmdline(char *arch_cmdline)
+{
+ char *s;
+ int baud = -1;
+
+ if (!arch_cmdline || *arch_cmdline == '\0')
+ goto _out;
+
+ s = strstr(arch_cmdline, "earlyprintk=");
+ if (s) {
+ s = strstr(s, "ttyS");
+ if (s)
+ s += 6;
+ else
+ goto _out;
+
+ baud = 0;
+ while (*s >= '0' && *s <= '9')
+ baud = baud * 10 + *s++ - '0';
+ }
+
+_out:
+ return baud;
+}
+
+void __init fw_init_early_console(void)
+{
+ char *arch_cmdline = pic32_getcmdline();
+ int baud, port;
+
+ uart_base = ioremap(PIC32_BASE_UART, 0xc00);
+
+ baud = get_baud_from_cmdline(arch_cmdline);
+ port = get_port_from_cmdline(arch_cmdline);
+
+ if (port == -1)
+ port = EARLY_CONSOLE_PORT;
+
+ if (baud == -1)
+ baud = EARLY_CONSOLE_BAUDRATE;
+
+ setup_early_console(port, baud);
+}
+
+void prom_putchar(char c)
+{
+ if (console_port >= 0) {
+ while (__raw_readl(
+ uart_base + U_STA(console_port)) & UART_TX_FULL)
+ ;
+
+ __raw_writel(c, uart_base + U_TXR(console_port));
+ }
+}
diff --git a/arch/mips/pic32/pic32mzda/early_pin.c b/arch/mips/pic32/pic32mzda/early_pin.c
new file mode 100644
index 000000000..f2822632b
--- /dev/null
+++ b/arch/mips/pic32/pic32mzda/early_pin.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ */
+#include <asm/io.h>
+
+#include "early_pin.h"
+
+#define PPS_BASE 0x1f800000
+
+/* Input PPS Registers */
+#define INT1R 0x1404
+#define INT2R 0x1408
+#define INT3R 0x140C
+#define INT4R 0x1410
+#define T2CKR 0x1418
+#define T3CKR 0x141C
+#define T4CKR 0x1420
+#define T5CKR 0x1424
+#define T6CKR 0x1428
+#define T7CKR 0x142C
+#define T8CKR 0x1430
+#define T9CKR 0x1434
+#define IC1R 0x1438
+#define IC2R 0x143C
+#define IC3R 0x1440
+#define IC4R 0x1444
+#define IC5R 0x1448
+#define IC6R 0x144C
+#define IC7R 0x1450
+#define IC8R 0x1454
+#define IC9R 0x1458
+#define OCFAR 0x1460
+#define U1RXR 0x1468
+#define U1CTSR 0x146C
+#define U2RXR 0x1470
+#define U2CTSR 0x1474
+#define U3RXR 0x1478
+#define U3CTSR 0x147C
+#define U4RXR 0x1480
+#define U4CTSR 0x1484
+#define U5RXR 0x1488
+#define U5CTSR 0x148C
+#define U6RXR 0x1490
+#define U6CTSR 0x1494
+#define SDI1R 0x149C
+#define SS1R 0x14A0
+#define SDI2R 0x14A8
+#define SS2R 0x14AC
+#define SDI3R 0x14B4
+#define SS3R 0x14B8
+#define SDI4R 0x14C0
+#define SS4R 0x14C4
+#define SDI5R 0x14CC
+#define SS5R 0x14D0
+#define SDI6R 0x14D8
+#define SS6R 0x14DC
+#define C1RXR 0x14E0
+#define C2RXR 0x14E4
+#define REFCLKI1R 0x14E8
+#define REFCLKI3R 0x14F0
+#define REFCLKI4R 0x14F4
+
+static const struct
+{
+ int function;
+ int reg;
+} input_pin_reg[] = {
+ { IN_FUNC_INT3, INT3R },
+ { IN_FUNC_T2CK, T2CKR },
+ { IN_FUNC_T6CK, T6CKR },
+ { IN_FUNC_IC3, IC3R },
+ { IN_FUNC_IC7, IC7R },
+ { IN_FUNC_U1RX, U1RXR },
+ { IN_FUNC_U2CTS, U2CTSR },
+ { IN_FUNC_U5RX, U5RXR },
+ { IN_FUNC_U6CTS, U6CTSR },
+ { IN_FUNC_SDI1, SDI1R },
+ { IN_FUNC_SDI3, SDI3R },
+ { IN_FUNC_SDI5, SDI5R },
+ { IN_FUNC_SS6, SS6R },
+ { IN_FUNC_REFCLKI1, REFCLKI1R },
+ { IN_FUNC_INT4, INT4R },
+ { IN_FUNC_T5CK, T5CKR },
+ { IN_FUNC_T7CK, T7CKR },
+ { IN_FUNC_IC4, IC4R },
+ { IN_FUNC_IC8, IC8R },
+ { IN_FUNC_U3RX, U3RXR },
+ { IN_FUNC_U4CTS, U4CTSR },
+ { IN_FUNC_SDI2, SDI2R },
+ { IN_FUNC_SDI4, SDI4R },
+ { IN_FUNC_C1RX, C1RXR },
+ { IN_FUNC_REFCLKI4, REFCLKI4R },
+ { IN_FUNC_INT2, INT2R },
+ { IN_FUNC_T3CK, T3CKR },
+ { IN_FUNC_T8CK, T8CKR },
+ { IN_FUNC_IC2, IC2R },
+ { IN_FUNC_IC5, IC5R },
+ { IN_FUNC_IC9, IC9R },
+ { IN_FUNC_U1CTS, U1CTSR },
+ { IN_FUNC_U2RX, U2RXR },
+ { IN_FUNC_U5CTS, U5CTSR },
+ { IN_FUNC_SS1, SS1R },
+ { IN_FUNC_SS3, SS3R },
+ { IN_FUNC_SS4, SS4R },
+ { IN_FUNC_SS5, SS5R },
+ { IN_FUNC_C2RX, C2RXR },
+ { IN_FUNC_INT1, INT1R },
+ { IN_FUNC_T4CK, T4CKR },
+ { IN_FUNC_T9CK, T9CKR },
+ { IN_FUNC_IC1, IC1R },
+ { IN_FUNC_IC6, IC6R },
+ { IN_FUNC_U3CTS, U3CTSR },
+ { IN_FUNC_U4RX, U4RXR },
+ { IN_FUNC_U6RX, U6RXR },
+ { IN_FUNC_SS2, SS2R },
+ { IN_FUNC_SDI6, SDI6R },
+ { IN_FUNC_OCFA, OCFAR },
+ { IN_FUNC_REFCLKI3, REFCLKI3R },
+};
+
+void pic32_pps_input(int function, int pin)
+{
+ void __iomem *pps_base = ioremap(PPS_BASE, 0xF4);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(input_pin_reg); i++) {
+ if (input_pin_reg[i].function == function) {
+ __raw_writel(pin, pps_base + input_pin_reg[i].reg);
+ return;
+ }
+ }
+
+ iounmap(pps_base);
+}
+
+/* Output PPS Registers */
+#define RPA14R 0x1538
+#define RPA15R 0x153C
+#define RPB0R 0x1540
+#define RPB1R 0x1544
+#define RPB2R 0x1548
+#define RPB3R 0x154C
+#define RPB5R 0x1554
+#define RPB6R 0x1558
+#define RPB7R 0x155C
+#define RPB8R 0x1560
+#define RPB9R 0x1564
+#define RPB10R 0x1568
+#define RPB14R 0x1578
+#define RPB15R 0x157C
+#define RPC1R 0x1584
+#define RPC2R 0x1588
+#define RPC3R 0x158C
+#define RPC4R 0x1590
+#define RPC13R 0x15B4
+#define RPC14R 0x15B8
+#define RPD0R 0x15C0
+#define RPD1R 0x15C4
+#define RPD2R 0x15C8
+#define RPD3R 0x15CC
+#define RPD4R 0x15D0
+#define RPD5R 0x15D4
+#define RPD6R 0x15D8
+#define RPD7R 0x15DC
+#define RPD9R 0x15E4
+#define RPD10R 0x15E8
+#define RPD11R 0x15EC
+#define RPD12R 0x15F0
+#define RPD14R 0x15F8
+#define RPD15R 0x15FC
+#define RPE3R 0x160C
+#define RPE5R 0x1614
+#define RPE8R 0x1620
+#define RPE9R 0x1624
+#define RPF0R 0x1640
+#define RPF1R 0x1644
+#define RPF2R 0x1648
+#define RPF3R 0x164C
+#define RPF4R 0x1650
+#define RPF5R 0x1654
+#define RPF8R 0x1660
+#define RPF12R 0x1670
+#define RPF13R 0x1674
+#define RPG0R 0x1680
+#define RPG1R 0x1684
+#define RPG6R 0x1698
+#define RPG7R 0x169C
+#define RPG8R 0x16A0
+#define RPG9R 0x16A4
+
+static const struct
+{
+ int pin;
+ int reg;
+} output_pin_reg[] = {
+ { OUT_RPD2, RPD2R },
+ { OUT_RPG8, RPG8R },
+ { OUT_RPF4, RPF4R },
+ { OUT_RPD10, RPD10R },
+ { OUT_RPF1, RPF1R },
+ { OUT_RPB9, RPB9R },
+ { OUT_RPB10, RPB10R },
+ { OUT_RPC14, RPC14R },
+ { OUT_RPB5, RPB5R },
+ { OUT_RPC1, RPC1R },
+ { OUT_RPD14, RPD14R },
+ { OUT_RPG1, RPG1R },
+ { OUT_RPA14, RPA14R },
+ { OUT_RPD6, RPD6R },
+ { OUT_RPD3, RPD3R },
+ { OUT_RPG7, RPG7R },
+ { OUT_RPF5, RPF5R },
+ { OUT_RPD11, RPD11R },
+ { OUT_RPF0, RPF0R },
+ { OUT_RPB1, RPB1R },
+ { OUT_RPE5, RPE5R },
+ { OUT_RPC13, RPC13R },
+ { OUT_RPB3, RPB3R },
+ { OUT_RPC4, RPC4R },
+ { OUT_RPD15, RPD15R },
+ { OUT_RPG0, RPG0R },
+ { OUT_RPA15, RPA15R },
+ { OUT_RPD7, RPD7R },
+ { OUT_RPD9, RPD9R },
+ { OUT_RPG6, RPG6R },
+ { OUT_RPB8, RPB8R },
+ { OUT_RPB15, RPB15R },
+ { OUT_RPD4, RPD4R },
+ { OUT_RPB0, RPB0R },
+ { OUT_RPE3, RPE3R },
+ { OUT_RPB7, RPB7R },
+ { OUT_RPF12, RPF12R },
+ { OUT_RPD12, RPD12R },
+ { OUT_RPF8, RPF8R },
+ { OUT_RPC3, RPC3R },
+ { OUT_RPE9, RPE9R },
+ { OUT_RPD1, RPD1R },
+ { OUT_RPG9, RPG9R },
+ { OUT_RPB14, RPB14R },
+ { OUT_RPD0, RPD0R },
+ { OUT_RPB6, RPB6R },
+ { OUT_RPD5, RPD5R },
+ { OUT_RPB2, RPB2R },
+ { OUT_RPF3, RPF3R },
+ { OUT_RPF13, RPF13R },
+ { OUT_RPC2, RPC2R },
+ { OUT_RPE8, RPE8R },
+ { OUT_RPF2, RPF2R },
+};
+
+void pic32_pps_output(int function, int pin)
+{
+ void __iomem *pps_base = ioremap(PPS_BASE, 0x170);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(output_pin_reg); i++) {
+ if (output_pin_reg[i].pin == pin) {
+ __raw_writel(function,
+ pps_base + output_pin_reg[i].reg);
+ return;
+ }
+ }
+
+ iounmap(pps_base);
+}
diff --git a/arch/mips/pic32/pic32mzda/early_pin.h b/arch/mips/pic32/pic32mzda/early_pin.h
new file mode 100644
index 000000000..60e9c3210
--- /dev/null
+++ b/arch/mips/pic32/pic32mzda/early_pin.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ */
+#ifndef _PIC32MZDA_EARLY_PIN_H
+#define _PIC32MZDA_EARLY_PIN_H
+
+/*
+ * This is a complete, yet overly simplistic and unoptimized, PIC32MZDA PPS
+ * configuration only useful before we have full pinctrl initialized.
+ */
+
+/* Input PPS Functions */
+enum {
+ IN_FUNC_INT3,
+ IN_FUNC_T2CK,
+ IN_FUNC_T6CK,
+ IN_FUNC_IC3,
+ IN_FUNC_IC7,
+ IN_FUNC_U1RX,
+ IN_FUNC_U2CTS,
+ IN_FUNC_U5RX,
+ IN_FUNC_U6CTS,
+ IN_FUNC_SDI1,
+ IN_FUNC_SDI3,
+ IN_FUNC_SDI5,
+ IN_FUNC_SS6,
+ IN_FUNC_REFCLKI1,
+ IN_FUNC_INT4,
+ IN_FUNC_T5CK,
+ IN_FUNC_T7CK,
+ IN_FUNC_IC4,
+ IN_FUNC_IC8,
+ IN_FUNC_U3RX,
+ IN_FUNC_U4CTS,
+ IN_FUNC_SDI2,
+ IN_FUNC_SDI4,
+ IN_FUNC_C1RX,
+ IN_FUNC_REFCLKI4,
+ IN_FUNC_INT2,
+ IN_FUNC_T3CK,
+ IN_FUNC_T8CK,
+ IN_FUNC_IC2,
+ IN_FUNC_IC5,
+ IN_FUNC_IC9,
+ IN_FUNC_U1CTS,
+ IN_FUNC_U2RX,
+ IN_FUNC_U5CTS,
+ IN_FUNC_SS1,
+ IN_FUNC_SS3,
+ IN_FUNC_SS4,
+ IN_FUNC_SS5,
+ IN_FUNC_C2RX,
+ IN_FUNC_INT1,
+ IN_FUNC_T4CK,
+ IN_FUNC_T9CK,
+ IN_FUNC_IC1,
+ IN_FUNC_IC6,
+ IN_FUNC_U3CTS,
+ IN_FUNC_U4RX,
+ IN_FUNC_U6RX,
+ IN_FUNC_SS2,
+ IN_FUNC_SDI6,
+ IN_FUNC_OCFA,
+ IN_FUNC_REFCLKI3,
+};
+
+/* Input PPS Pins */
+#define IN_RPD2 0x00
+#define IN_RPG8 0x01
+#define IN_RPF4 0x02
+#define IN_RPD10 0x03
+#define IN_RPF1 0x04
+#define IN_RPB9 0x05
+#define IN_RPB10 0x06
+#define IN_RPC14 0x07
+#define IN_RPB5 0x08
+#define IN_RPC1 0x0A
+#define IN_RPD14 0x0B
+#define IN_RPG1 0x0C
+#define IN_RPA14 0x0D
+#define IN_RPD6 0x0E
+#define IN_RPD3 0x00
+#define IN_RPG7 0x01
+#define IN_RPF5 0x02
+#define IN_RPD11 0x03
+#define IN_RPF0 0x04
+#define IN_RPB1 0x05
+#define IN_RPE5 0x06
+#define IN_RPC13 0x07
+#define IN_RPB3 0x08
+#define IN_RPC4 0x0A
+#define IN_RPD15 0x0B
+#define IN_RPG0 0x0C
+#define IN_RPA15 0x0D
+#define IN_RPD7 0x0E
+#define IN_RPD9 0x00
+#define IN_RPG6 0x01
+#define IN_RPB8 0x02
+#define IN_RPB15 0x03
+#define IN_RPD4 0x04
+#define IN_RPB0 0x05
+#define IN_RPE3 0x06
+#define IN_RPB7 0x07
+#define IN_RPF12 0x09
+#define IN_RPD12 0x0A
+#define IN_RPF8 0x0B
+#define IN_RPC3 0x0C
+#define IN_RPE9 0x0D
+#define IN_RPD1 0x00
+#define IN_RPG9 0x01
+#define IN_RPB14 0x02
+#define IN_RPD0 0x03
+#define IN_RPB6 0x05
+#define IN_RPD5 0x06
+#define IN_RPB2 0x07
+#define IN_RPF3 0x08
+#define IN_RPF13 0x09
+#define IN_RPF2 0x0B
+#define IN_RPC2 0x0C
+#define IN_RPE8 0x0D
+
+/* Output PPS Pins */
+enum {
+ OUT_RPD2,
+ OUT_RPG8,
+ OUT_RPF4,
+ OUT_RPD10,
+ OUT_RPF1,
+ OUT_RPB9,
+ OUT_RPB10,
+ OUT_RPC14,
+ OUT_RPB5,
+ OUT_RPC1,
+ OUT_RPD14,
+ OUT_RPG1,
+ OUT_RPA14,
+ OUT_RPD6,
+ OUT_RPD3,
+ OUT_RPG7,
+ OUT_RPF5,
+ OUT_RPD11,
+ OUT_RPF0,
+ OUT_RPB1,
+ OUT_RPE5,
+ OUT_RPC13,
+ OUT_RPB3,
+ OUT_RPC4,
+ OUT_RPD15,
+ OUT_RPG0,
+ OUT_RPA15,
+ OUT_RPD7,
+ OUT_RPD9,
+ OUT_RPG6,
+ OUT_RPB8,
+ OUT_RPB15,
+ OUT_RPD4,
+ OUT_RPB0,
+ OUT_RPE3,
+ OUT_RPB7,
+ OUT_RPF12,
+ OUT_RPD12,
+ OUT_RPF8,
+ OUT_RPC3,
+ OUT_RPE9,
+ OUT_RPD1,
+ OUT_RPG9,
+ OUT_RPB14,
+ OUT_RPD0,
+ OUT_RPB6,
+ OUT_RPD5,
+ OUT_RPB2,
+ OUT_RPF3,
+ OUT_RPF13,
+ OUT_RPC2,
+ OUT_RPE8,
+ OUT_RPF2,
+};
+
+/* Output PPS Functions */
+#define OUT_FUNC_U3TX 0x01
+#define OUT_FUNC_U4RTS 0x02
+#define OUT_FUNC_SDO1 0x05
+#define OUT_FUNC_SDO2 0x06
+#define OUT_FUNC_SDO3 0x07
+#define OUT_FUNC_SDO5 0x09
+#define OUT_FUNC_SS6 0x0A
+#define OUT_FUNC_OC3 0x0B
+#define OUT_FUNC_OC6 0x0C
+#define OUT_FUNC_REFCLKO4 0x0D
+#define OUT_FUNC_C2OUT 0x0E
+#define OUT_FUNC_C1TX 0x0F
+#define OUT_FUNC_U1TX 0x01
+#define OUT_FUNC_U2RTS 0x02
+#define OUT_FUNC_U5TX 0x03
+#define OUT_FUNC_U6RTS 0x04
+#define OUT_FUNC_SDO1 0x05
+#define OUT_FUNC_SDO2 0x06
+#define OUT_FUNC_SDO3 0x07
+#define OUT_FUNC_SDO4 0x08
+#define OUT_FUNC_SDO5 0x09
+#define OUT_FUNC_OC4 0x0B
+#define OUT_FUNC_OC7 0x0C
+#define OUT_FUNC_REFCLKO1 0x0F
+#define OUT_FUNC_U3RTS 0x01
+#define OUT_FUNC_U4TX 0x02
+#define OUT_FUNC_U6TX 0x04
+#define OUT_FUNC_SS1 0x05
+#define OUT_FUNC_SS3 0x07
+#define OUT_FUNC_SS4 0x08
+#define OUT_FUNC_SS5 0x09
+#define OUT_FUNC_SDO6 0x0A
+#define OUT_FUNC_OC5 0x0B
+#define OUT_FUNC_OC8 0x0C
+#define OUT_FUNC_C1OUT 0x0E
+#define OUT_FUNC_REFCLKO3 0x0F
+#define OUT_FUNC_U1RTS 0x01
+#define OUT_FUNC_U2TX 0x02
+#define OUT_FUNC_U5RTS 0x03
+#define OUT_FUNC_U6TX 0x04
+#define OUT_FUNC_SS2 0x06
+#define OUT_FUNC_SDO4 0x08
+#define OUT_FUNC_SDO6 0x0A
+#define OUT_FUNC_OC2 0x0B
+#define OUT_FUNC_OC1 0x0C
+#define OUT_FUNC_OC9 0x0D
+#define OUT_FUNC_C2TX 0x0F
+
+void pic32_pps_input(int function, int pin);
+void pic32_pps_output(int function, int pin);
+
+#endif
diff --git a/arch/mips/pic32/pic32mzda/init.c b/arch/mips/pic32/pic32mzda/init.c
new file mode 100644
index 000000000..488c0bee7
--- /dev/null
+++ b/arch/mips/pic32/pic32mzda/init.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Joshua Henderson, joshua.henderson@microchip.com
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <linux/platform_data/sdhci-pic32.h>
+
+#include <asm/fw/fw.h>
+#include <asm/mips-boards/generic.h>
+#include <asm/prom.h>
+
+#include "pic32mzda.h"
+
+const char *get_system_type(void)
+{
+ return "PIC32MZDA";
+}
+
+static ulong get_fdtaddr(void)
+{
+ ulong ftaddr = 0;
+
+ if (fw_passed_dtb && !fw_arg2 && !fw_arg3)
+ return (ulong)fw_passed_dtb;
+
+ if (&__dtb_start < &__dtb_end)
+ ftaddr = (ulong)__dtb_start;
+
+ return ftaddr;
+}
+
+void __init plat_mem_setup(void)
+{
+ void *dtb;
+
+ dtb = (void *)get_fdtaddr();
+ if (!dtb) {
+ pr_err("pic32: no DTB found.\n");
+ return;
+ }
+
+ /*
+ * Load the builtin device tree. This causes the chosen node to be
+ * parsed resulting in our memory appearing.
+ */
+ __dt_setup_arch(dtb);
+
+ pr_info("Found following command lines\n");
+ pr_info(" boot_command_line: %s\n", boot_command_line);
+ pr_info(" arcs_cmdline : %s\n", arcs_cmdline);
+#ifdef CONFIG_CMDLINE_BOOL
+ pr_info(" builtin_cmdline : %s\n", CONFIG_CMDLINE);
+#endif
+ if (dtb != __dtb_start)
+ strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+
+#ifdef CONFIG_EARLY_PRINTK
+ fw_init_early_console();
+#endif
+ pic32_config_init();
+}
+
+static __init void pic32_init_cmdline(int argc, char *argv[])
+{
+ unsigned int count = COMMAND_LINE_SIZE - 1;
+ int i;
+ char *dst = &(arcs_cmdline[0]);
+ char *src;
+
+ for (i = 1; i < argc && count; ++i) {
+ src = argv[i];
+ while (*src && count) {
+ *dst++ = *src++;
+ --count;
+ }
+ *dst++ = ' ';
+ }
+ if (i > 1)
+ --dst;
+
+ *dst = 0;
+}
+
+void __init prom_init(void)
+{
+ pic32_init_cmdline((int)fw_arg0, (char **)fw_arg1);
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
+
+void __init device_tree_init(void)
+{
+ if (!initial_boot_params)
+ return;
+
+ unflatten_and_copy_device_tree();
+}
+
+static struct pic32_sdhci_platform_data sdhci_data = {
+ .setup_dma = pic32_set_sdhci_adma_fifo_threshold,
+};
+
+static struct of_dev_auxdata pic32_auxdata_lookup[] __initdata = {
+ OF_DEV_AUXDATA("microchip,pic32mzda-sdhci", 0, "sdhci", &sdhci_data),
+ { /* sentinel */}
+};
+
+static int __init pic32_of_prepare_platform_data(struct of_dev_auxdata *lookup)
+{
+ struct device_node *root, *np;
+ struct resource res;
+
+ root = of_find_node_by_path("/");
+
+ for (; lookup->compatible; lookup++) {
+ np = of_find_compatible_node(NULL, NULL, lookup->compatible);
+ if (np) {
+ lookup->name = (char *)np->name;
+ if (lookup->phys_addr)
+ continue;
+ if (!of_address_to_resource(np, 0, &res))
+ lookup->phys_addr = res.start;
+ }
+ }
+
+ return 0;
+}
+
+static int __init plat_of_setup(void)
+{
+ if (!of_have_populated_dt())
+ panic("Device tree not present");
+
+ pic32_of_prepare_platform_data(pic32_auxdata_lookup);
+ if (of_platform_default_populate(NULL, pic32_auxdata_lookup, NULL))
+ panic("Failed to populate DT");
+
+ return 0;
+}
+arch_initcall(plat_of_setup);
diff --git a/arch/mips/pic32/pic32mzda/pic32mzda.h b/arch/mips/pic32/pic32mzda/pic32mzda.h
new file mode 100644
index 000000000..b7a93d8fe
--- /dev/null
+++ b/arch/mips/pic32/pic32mzda/pic32mzda.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ */
+#ifndef PIC32MZDA_COMMON_H
+#define PIC32MZDA_COMMON_H
+
+/* early clock */
+u32 pic32_get_pbclk(int bus);
+u32 pic32_get_sysclk(void);
+
+/* Device configuration */
+void __init pic32_config_init(void);
+int pic32_set_lcd_mode(int mode);
+int pic32_set_sdhci_adma_fifo_threshold(u32 rthrs, u32 wthrs);
+u32 pic32_get_boot_status(void);
+int pic32_disable_lcd(void);
+int pic32_enable_lcd(void);
+
+#endif
diff --git a/arch/mips/pic32/pic32mzda/time.c b/arch/mips/pic32/pic32mzda/time.c
new file mode 100644
index 000000000..7174e9abb
--- /dev/null
+++ b/arch/mips/pic32/pic32mzda/time.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_clk.h>
+#include <linux/of_irq.h>
+
+#include <asm/time.h>
+
+#include "pic32mzda.h"
+
+static const struct of_device_id pic32_infra_match[] = {
+ { .compatible = "microchip,pic32mzda-infra", },
+ { },
+};
+
+#define DEFAULT_CORE_TIMER_INTERRUPT 0
+
+static unsigned int pic32_xlate_core_timer_irq(void)
+{
+ struct device_node *node;
+ unsigned int irq;
+
+ node = of_find_matching_node(NULL, pic32_infra_match);
+
+ if (WARN_ON(!node))
+ goto default_map;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq)
+ goto default_map;
+
+ return irq;
+
+default_map:
+
+ return irq_create_mapping(NULL, DEFAULT_CORE_TIMER_INTERRUPT);
+}
+
+unsigned int get_c0_compare_int(void)
+{
+ return pic32_xlate_core_timer_irq();
+}
+
+void __init plat_time_init(void)
+{
+ unsigned long rate = pic32_get_pbclk(7);
+
+ of_clk_init(NULL);
+
+ pr_info("CPU Clock: %ldMHz\n", rate / 1000000);
+ mips_hpt_frequency = rate / 2;
+
+ timer_probe();
+}
diff --git a/arch/mips/pistachio/Kconfig b/arch/mips/pistachio/Kconfig
new file mode 100644
index 000000000..9a0e06c95
--- /dev/null
+++ b/arch/mips/pistachio/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+config PISTACHIO_GPTIMER_CLKSRC
+ bool "Enable General Purpose Timer based clocksource"
+ depends on MACH_PISTACHIO
+ select CLKSRC_PISTACHIO
+ select MIPS_EXTERNAL_TIMER
+ help
+ This option enables a clocksource driver based on a Pistachio
+ SoC General Purpose external timer.
+
+ If you want to enable the CPUFreq, you need to enable
+ this option.
+
+ If you don't want to enable CPUFreq, you can leave this disabled.
diff --git a/arch/mips/pistachio/Makefile b/arch/mips/pistachio/Makefile
new file mode 100644
index 000000000..66f4af17f
--- /dev/null
+++ b/arch/mips/pistachio/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += init.o irq.o time.o
diff --git a/arch/mips/pistachio/Platform b/arch/mips/pistachio/Platform
new file mode 100644
index 000000000..f73a1a929
--- /dev/null
+++ b/arch/mips/pistachio/Platform
@@ -0,0 +1,8 @@
+#
+# IMG Pistachio SoC
+#
+cflags-$(CONFIG_MACH_PISTACHIO) += \
+ -I$(srctree)/arch/mips/include/asm/mach-pistachio
+load-$(CONFIG_MACH_PISTACHIO) += 0xffffffff80400000
+zload-$(CONFIG_MACH_PISTACHIO) += 0xffffffff81000000
+all-$(CONFIG_MACH_PISTACHIO) := uImage.gz
diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c
new file mode 100644
index 000000000..558995ed6
--- /dev/null
+++ b/arch/mips/pistachio/init.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Pistachio platform setup
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * Copyright (C) 2016 Imagination Technologies
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+
+#include <asm/cacheflush.h>
+#include <asm/dma-coherence.h>
+#include <asm/fw/fw.h>
+#include <asm/mips-boards/generic.h>
+#include <asm/mips-cps.h>
+#include <asm/prom.h>
+#include <asm/smp-ops.h>
+#include <asm/traps.h>
+
+/*
+ * Core revision register decoding
+ * Bits 23 to 20: Major rev
+ * Bits 15 to 8: Minor rev
+ * Bits 7 to 0: Maintenance rev
+ */
+#define PISTACHIO_CORE_REV_REG 0xB81483D0
+#define PISTACHIO_CORE_REV_A1 0x00100006
+#define PISTACHIO_CORE_REV_B0 0x00100106
+
+const char *get_system_type(void)
+{
+ u32 core_rev;
+ const char *sys_type;
+
+ core_rev = __raw_readl((const void *)PISTACHIO_CORE_REV_REG);
+
+ switch (core_rev) {
+ case PISTACHIO_CORE_REV_B0:
+ sys_type = "IMG Pistachio SoC (B0)";
+ break;
+
+ case PISTACHIO_CORE_REV_A1:
+ sys_type = "IMG Pistachio SoC (A1)";
+ break;
+
+ default:
+ sys_type = "IMG Pistachio SoC";
+ break;
+ }
+
+ return sys_type;
+}
+
+void __init *plat_get_fdt(void)
+{
+ if (fw_arg0 != -2)
+ panic("Device-tree not present");
+ return (void *)fw_arg1;
+}
+
+void __init plat_mem_setup(void)
+{
+ __dt_setup_arch(plat_get_fdt());
+}
+
+#define DEFAULT_CPC_BASE_ADDR 0x1bde0000
+#define DEFAULT_CDMM_BASE_ADDR 0x1bdd0000
+
+phys_addr_t mips_cpc_default_phys_base(void)
+{
+ return DEFAULT_CPC_BASE_ADDR;
+}
+
+phys_addr_t mips_cdmm_phys_base(void)
+{
+ return DEFAULT_CDMM_BASE_ADDR;
+}
+
+static void __init mips_nmi_setup(void)
+{
+ void *base;
+ extern char except_vec_nmi[];
+
+ base = cpu_has_veic ?
+ (void *)(CAC_BASE + 0xa80) :
+ (void *)(CAC_BASE + 0x380);
+ memcpy(base, except_vec_nmi, 0x80);
+ flush_icache_range((unsigned long)base,
+ (unsigned long)base + 0x80);
+}
+
+static void __init mips_ejtag_setup(void)
+{
+ void *base;
+ extern char except_vec_ejtag_debug[];
+
+ base = cpu_has_veic ?
+ (void *)(CAC_BASE + 0xa00) :
+ (void *)(CAC_BASE + 0x300);
+ memcpy(base, except_vec_ejtag_debug, 0x80);
+ flush_icache_range((unsigned long)base,
+ (unsigned long)base + 0x80);
+}
+
+void __init prom_init(void)
+{
+ board_nmi_handler_setup = mips_nmi_setup;
+ board_ejtag_handler_setup = mips_ejtag_setup;
+
+ mips_cm_probe();
+ mips_cpc_probe();
+ register_cps_smp_ops();
+
+ pr_info("SoC Type: %s\n", get_system_type());
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
+
+void __init device_tree_init(void)
+{
+ if (!initial_boot_params)
+ return;
+
+ unflatten_and_copy_device_tree();
+}
diff --git a/arch/mips/pistachio/irq.c b/arch/mips/pistachio/irq.c
new file mode 100644
index 000000000..437c3101a
--- /dev/null
+++ b/arch/mips/pistachio/irq.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Pistachio IRQ setup
+ *
+ * Copyright (C) 2014 Google, Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/irqchip.h>
+#include <linux/kernel.h>
+
+#include <asm/cpu-features.h>
+#include <asm/irq_cpu.h>
+
+void __init arch_init_irq(void)
+{
+ pr_info("EIC is %s\n", cpu_has_veic ? "on" : "off");
+ pr_info("VINT is %s\n", cpu_has_vint ? "on" : "off");
+
+ if (!cpu_has_veic)
+ mips_cpu_irq_init();
+
+ irqchip_init();
+}
diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
new file mode 100644
index 000000000..de64751de
--- /dev/null
+++ b/arch/mips/pistachio/time.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Pistachio clocksource/timer setup
+ *
+ * Copyright (C) 2014 Google, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_clk.h>
+
+#include <asm/mips-cps.h>
+#include <asm/time.h>
+
+unsigned int get_c0_compare_int(void)
+{
+ return gic_get_c0_compare_int();
+}
+
+int get_c0_perfcount_int(void)
+{
+ return gic_get_c0_perfcount_int();
+}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+
+int get_c0_fdc_int(void)
+{
+ return gic_get_c0_fdc_int();
+}
+
+void __init plat_time_init(void)
+{
+ struct device_node *np;
+ struct clk *clk;
+
+ of_clk_init(NULL);
+ timer_probe();
+
+ np = of_get_cpu_node(0, NULL);
+ if (!np) {
+ pr_err("Failed to get CPU node\n");
+ return;
+ }
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get CPU clock: %ld\n", PTR_ERR(clk));
+ return;
+ }
+
+ mips_hpt_frequency = clk_get_rate(clk) / 2;
+ clk_put(clk);
+}
diff --git a/arch/mips/power/Makefile b/arch/mips/power/Makefile
new file mode 100644
index 000000000..14b7d9ee6
--- /dev/null
+++ b/arch/mips/power/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_HIBERNATION) += cpu.o hibernate.o hibernate_asm.o
diff --git a/arch/mips/power/cpu.c b/arch/mips/power/cpu.c
new file mode 100644
index 000000000..a15e29dfc
--- /dev/null
+++ b/arch/mips/power/cpu.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Suspend support specific for mips.
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Hu Hongbing <huhb@lemote.com>
+ * Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+#include <asm/sections.h>
+#include <asm/fpu.h>
+#include <asm/dsp.h>
+
+static u32 saved_status;
+struct pt_regs saved_regs;
+
+void save_processor_state(void)
+{
+ saved_status = read_c0_status();
+
+ if (is_fpu_owner())
+ save_fp(current);
+
+ save_dsp(current);
+}
+
+void restore_processor_state(void)
+{
+ write_c0_status(saved_status);
+
+ if (is_fpu_owner())
+ restore_fp(current);
+
+ restore_dsp(current);
+}
+
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
+ unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end));
+
+ return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
+}
diff --git a/arch/mips/power/hibernate.c b/arch/mips/power/hibernate.c
new file mode 100644
index 000000000..94ab17c3c
--- /dev/null
+++ b/arch/mips/power/hibernate.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <asm/tlbflush.h>
+
+extern int restore_image(void);
+
+int swsusp_arch_resume(void)
+{
+ /* Avoid TLB mismatch during and after kernel resume */
+ local_flush_tlb_all();
+ return restore_image();
+}
diff --git a/arch/mips/power/hibernate_asm.S b/arch/mips/power/hibernate_asm.S
new file mode 100644
index 000000000..e62538734
--- /dev/null
+++ b/arch/mips/power/hibernate_asm.S
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Hibernation support specific for mips - temporary page tables
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Hu Hongbing <huhb@lemote.com>
+ * Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+#include <asm/asm.h>
+
+.text
+LEAF(swsusp_arch_suspend)
+ PTR_LA t0, saved_regs
+ PTR_S ra, PT_R31(t0)
+ PTR_S sp, PT_R29(t0)
+ PTR_S fp, PT_R30(t0)
+ PTR_S gp, PT_R28(t0)
+ PTR_S s0, PT_R16(t0)
+ PTR_S s1, PT_R17(t0)
+ PTR_S s2, PT_R18(t0)
+ PTR_S s3, PT_R19(t0)
+ PTR_S s4, PT_R20(t0)
+ PTR_S s5, PT_R21(t0)
+ PTR_S s6, PT_R22(t0)
+ PTR_S s7, PT_R23(t0)
+ j swsusp_save
+END(swsusp_arch_suspend)
+
+LEAF(restore_image)
+ PTR_L t0, restore_pblist
+0:
+ PTR_L t1, PBE_ADDRESS(t0) /* source */
+ PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */
+ PTR_ADDU t3, t1, _PAGE_SIZE
+1:
+ REG_L t8, (t1)
+ REG_S t8, (t2)
+ PTR_ADDIU t1, t1, SZREG
+ PTR_ADDIU t2, t2, SZREG
+ bne t1, t3, 1b
+ PTR_L t0, PBE_NEXT(t0)
+ bnez t0, 0b
+ PTR_LA t0, saved_regs
+ PTR_L ra, PT_R31(t0)
+ PTR_L sp, PT_R29(t0)
+ PTR_L fp, PT_R30(t0)
+ PTR_L gp, PT_R28(t0)
+ PTR_L s0, PT_R16(t0)
+ PTR_L s1, PT_R17(t0)
+ PTR_L s2, PT_R18(t0)
+ PTR_L s3, PT_R19(t0)
+ PTR_L s4, PT_R20(t0)
+ PTR_L s5, PT_R21(t0)
+ PTR_L s6, PT_R22(t0)
+ PTR_L s7, PT_R23(t0)
+ PTR_LI v0, 0x0
+ jr ra
+END(restore_image)
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
new file mode 100644
index 000000000..c10d8b233
--- /dev/null
+++ b/arch/mips/ralink/Kconfig
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: GPL-2.0
+if RALINK
+
+config CLKEVT_RT3352
+ bool
+ depends on SOC_RT305X || SOC_MT7620
+ default y
+ select TIMER_OF
+ select CLKSRC_MMIO
+
+config RALINK_ILL_ACC
+ bool
+ depends on SOC_RT305X
+ default y
+
+config IRQ_INTC
+ bool
+ default y
+ depends on !SOC_MT7621
+
+choice
+ prompt "Ralink SoC selection"
+ default SOC_RT305X
+ help
+ Select Ralink MIPS SoC type.
+
+ config SOC_RT288X
+ bool "RT288x"
+ select MIPS_L1_CACHE_SHIFT_4
+ select HAVE_LEGACY_CLK
+ select HAVE_PCI
+
+ config SOC_RT305X
+ bool "RT305x"
+ select HAVE_LEGACY_CLK
+
+ config SOC_RT3883
+ bool "RT3883"
+ select HAVE_LEGACY_CLK
+ select HAVE_PCI
+
+ config SOC_MT7620
+ bool "MT7620/8"
+ select CPU_MIPSR2_IRQ_VI
+ select HAVE_LEGACY_CLK
+ select HAVE_PCI
+
+ config SOC_MT7621
+ bool "MT7621"
+ select MIPS_CPU_SCACHE
+ select SYS_SUPPORTS_MULTITHREADING
+ select SYS_SUPPORTS_SMP
+ select SYS_SUPPORTS_MIPS_CPS
+ select SYS_SUPPORTS_HIGHMEM
+ select MIPS_GIC
+ select COMMON_CLK
+ select CLKSRC_MIPS_GIC
+ select HAVE_PCI if PCI_MT7621
+ select SOC_BUS
+endchoice
+
+choice
+ prompt "Devicetree selection"
+ default DTB_RT_NONE
+ help
+ Select the devicetree.
+
+ config DTB_RT_NONE
+ bool "None"
+
+ config DTB_RT2880_EVAL
+ bool "RT2880 eval kit"
+ depends on SOC_RT288X
+ select BUILTIN_DTB
+
+ config DTB_RT305X_EVAL
+ bool "RT305x eval kit"
+ depends on SOC_RT305X
+ select BUILTIN_DTB
+
+ config DTB_RT3883_EVAL
+ bool "RT3883 eval kit"
+ depends on SOC_RT3883
+ select BUILTIN_DTB
+
+ config DTB_MT7620A_EVAL
+ bool "MT7620A eval kit"
+ depends on SOC_MT7620
+ select BUILTIN_DTB
+
+ config DTB_OMEGA2P
+ bool "Onion Omega2+"
+ depends on SOC_MT7620
+ select BUILTIN_DTB
+
+ config DTB_VOCORE2
+ bool "VoCore2"
+ depends on SOC_MT7620
+ select BUILTIN_DTB
+
+endchoice
+
+endif
diff --git a/arch/mips/ralink/Makefile b/arch/mips/ralink/Makefile
new file mode 100644
index 000000000..26fabbdea
--- /dev/null
+++ b/arch/mips/ralink/Makefile
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Makefile for the Ralink common stuff
+#
+# Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+# Copyright (C) 2013 John Crispin <john@phrozen.org>
+
+obj-y := prom.o of.o reset.o
+
+ifndef CONFIG_MIPS_GIC
+ obj-y += clk.o timer.o
+endif
+
+obj-$(CONFIG_CLKEVT_RT3352) += cevt-rt3352.o
+
+obj-$(CONFIG_RALINK_ILL_ACC) += ill_acc.o
+
+obj-$(CONFIG_IRQ_INTC) += irq.o
+obj-$(CONFIG_MIPS_GIC) += irq-gic.o timer-gic.o
+
+obj-$(CONFIG_SOC_RT288X) += rt288x.o
+obj-$(CONFIG_SOC_RT305X) += rt305x.o
+obj-$(CONFIG_SOC_RT3883) += rt3883.o
+obj-$(CONFIG_SOC_MT7620) += mt7620.o
+obj-$(CONFIG_SOC_MT7621) += mt7621.o
+
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+
+obj-$(CONFIG_DEBUG_FS) += bootrom.o
diff --git a/arch/mips/ralink/Platform b/arch/mips/ralink/Platform
new file mode 100644
index 000000000..02ee07914
--- /dev/null
+++ b/arch/mips/ralink/Platform
@@ -0,0 +1,33 @@
+#
+# Ralink SoC common stuff
+#
+cflags-$(CONFIG_RALINK) += -I$(srctree)/arch/mips/include/asm/mach-ralink
+
+#
+# Ralink RT288x
+#
+load-$(CONFIG_SOC_RT288X) += 0xffffffff88000000
+cflags-$(CONFIG_SOC_RT288X) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt288x
+
+#
+# Ralink RT305x
+#
+load-$(CONFIG_SOC_RT305X) += 0xffffffff80000000
+cflags-$(CONFIG_SOC_RT305X) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt305x
+
+#
+# Ralink RT3883
+#
+load-$(CONFIG_SOC_RT3883) += 0xffffffff80000000
+cflags-$(CONFIG_SOC_RT3883) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt3883
+
+#
+# Ralink MT7620
+#
+load-$(CONFIG_SOC_MT7620) += 0xffffffff80000000
+cflags-$(CONFIG_SOC_MT7620) += -I$(srctree)/arch/mips/include/asm/mach-ralink/mt7620
+
+# Ralink MT7621
+#
+load-$(CONFIG_SOC_MT7621) += 0xffffffff80001000
+cflags-$(CONFIG_SOC_MT7621) += -I$(srctree)/arch/mips/include/asm/mach-ralink/mt7621
diff --git a/arch/mips/ralink/bootrom.c b/arch/mips/ralink/bootrom.c
new file mode 100644
index 000000000..94ca8379b
--- /dev/null
+++ b/arch/mips/ralink/bootrom.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define BOOTROM_OFFSET 0x10118000
+#define BOOTROM_SIZE 0x8000
+
+static void __iomem *membase = (void __iomem *) KSEG1ADDR(BOOTROM_OFFSET);
+
+static int bootrom_show(struct seq_file *s, void *unused)
+{
+ seq_write(s, membase, BOOTROM_SIZE);
+
+ return 0;
+}
+
+static int bootrom_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, bootrom_show, NULL);
+}
+
+static const struct file_operations bootrom_file_ops = {
+ .open = bootrom_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init bootrom_setup(void)
+{
+ debugfs_create_file("bootrom", 0444, NULL, NULL, &bootrom_file_ops);
+ return 0;
+}
+
+postcore_initcall(bootrom_setup);
diff --git a/arch/mips/ralink/cevt-rt3352.c b/arch/mips/ralink/cevt-rt3352.c
new file mode 100644
index 000000000..269d4877d
--- /dev/null
+++ b/arch/mips/ralink/cevt-rt3352.c
@@ -0,0 +1,153 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 by John Crispin <john@phrozen.org>
+ */
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include <asm/mach-ralink/ralink_regs.h>
+
+#define SYSTICK_FREQ (50 * 1000)
+
+#define SYSTICK_CONFIG 0x00
+#define SYSTICK_COMPARE 0x04
+#define SYSTICK_COUNT 0x08
+
+/* route systick irq to mips irq 7 instead of the r4k-timer */
+#define CFG_EXT_STK_EN 0x2
+/* enable the counter */
+#define CFG_CNT_EN 0x1
+
+struct systick_device {
+ void __iomem *membase;
+ struct clock_event_device dev;
+ int irq_requested;
+ int freq_scale;
+};
+
+static int systick_set_oneshot(struct clock_event_device *evt);
+static int systick_shutdown(struct clock_event_device *evt);
+
+static int systick_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ struct systick_device *sdev;
+ u32 count;
+
+ sdev = container_of(evt, struct systick_device, dev);
+ count = ioread32(sdev->membase + SYSTICK_COUNT);
+ count = (count + delta) % SYSTICK_FREQ;
+ iowrite32(count, sdev->membase + SYSTICK_COMPARE);
+
+ return 0;
+}
+
+static void systick_event_handler(struct clock_event_device *dev)
+{
+ /* noting to do here */
+}
+
+static irqreturn_t systick_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *dev = (struct clock_event_device *) dev_id;
+
+ dev->event_handler(dev);
+
+ return IRQ_HANDLED;
+}
+
+static struct systick_device systick = {
+ .dev = {
+ /*
+ * cevt-r4k uses 300, make sure systick
+ * gets used if available
+ */
+ .rating = 310,
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = systick_next_event,
+ .set_state_shutdown = systick_shutdown,
+ .set_state_oneshot = systick_set_oneshot,
+ .event_handler = systick_event_handler,
+ },
+};
+
+static int systick_shutdown(struct clock_event_device *evt)
+{
+ struct systick_device *sdev;
+
+ sdev = container_of(evt, struct systick_device, dev);
+
+ if (sdev->irq_requested)
+ free_irq(systick.dev.irq, &systick.dev);
+ sdev->irq_requested = 0;
+ iowrite32(0, systick.membase + SYSTICK_CONFIG);
+
+ return 0;
+}
+
+static int systick_set_oneshot(struct clock_event_device *evt)
+{
+ const char *name = systick.dev.name;
+ struct systick_device *sdev;
+ int irq = systick.dev.irq;
+
+ sdev = container_of(evt, struct systick_device, dev);
+
+ if (!sdev->irq_requested) {
+ if (request_irq(irq, systick_interrupt,
+ IRQF_PERCPU | IRQF_TIMER, name, &systick.dev))
+ pr_err("Failed to request irq %d (%s)\n", irq, name);
+ }
+ sdev->irq_requested = 1;
+ iowrite32(CFG_EXT_STK_EN | CFG_CNT_EN,
+ systick.membase + SYSTICK_CONFIG);
+
+ return 0;
+}
+
+static int __init ralink_systick_init(struct device_node *np)
+{
+ int ret;
+
+ systick.membase = of_iomap(np, 0);
+ if (!systick.membase)
+ return -ENXIO;
+
+ systick.dev.name = np->name;
+ clockevents_calc_mult_shift(&systick.dev, SYSTICK_FREQ, 60);
+ systick.dev.max_delta_ns = clockevent_delta2ns(0x7fff, &systick.dev);
+ systick.dev.max_delta_ticks = 0x7fff;
+ systick.dev.min_delta_ns = clockevent_delta2ns(0x3, &systick.dev);
+ systick.dev.min_delta_ticks = 0x3;
+ systick.dev.irq = irq_of_parse_and_map(np, 0);
+ if (!systick.dev.irq) {
+ pr_err("%pOFn: request_irq failed", np);
+ return -EINVAL;
+ }
+
+ ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
+ SYSTICK_FREQ, 301, 16,
+ clocksource_mmio_readl_up);
+ if (ret)
+ return ret;
+
+ clockevents_register_device(&systick.dev);
+
+ pr_info("%pOFn: running - mult: %d, shift: %d\n",
+ np, systick.dev.mult, systick.dev.shift);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
diff --git a/arch/mips/ralink/clk.c b/arch/mips/ralink/clk.c
new file mode 100644
index 000000000..2f9d5acb3
--- /dev/null
+++ b/arch/mips/ralink/clk.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/clkdev.h>
+#include <linux/clk.h>
+
+#include <asm/time.h>
+
+#include "common.h"
+
+struct clk {
+ struct clk_lookup cl;
+ unsigned long rate;
+};
+
+void ralink_clk_add(const char *dev, unsigned long rate)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ if (!clk)
+ panic("failed to add clock");
+
+ clk->cl.dev_id = dev;
+ clk->cl.clk = clk;
+
+ clk->rate = rate;
+
+ clkdev_add(&clk->cl);
+}
+
+/*
+ * Linux clock API
+ */
+int clk_enable(struct clk *clk)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+}
+EXPORT_SYMBOL_GPL(clk_disable);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ if (!clk)
+ return 0;
+
+ return clk->rate;
+}
+EXPORT_SYMBOL_GPL(clk_get_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ return -1;
+}
+EXPORT_SYMBOL_GPL(clk_set_rate);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ return -1;
+}
+EXPORT_SYMBOL_GPL(clk_round_rate);
+
+void __init plat_time_init(void)
+{
+ struct clk *clk;
+
+ ralink_of_remap();
+
+ ralink_clk_init();
+ clk = clk_get_sys("cpu", NULL);
+ if (IS_ERR(clk))
+ panic("unable to get CPU clock, err=%ld", PTR_ERR(clk));
+ pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
+ mips_hpt_frequency = clk_get_rate(clk) / 2;
+ clk_put(clk);
+ timer_probe();
+}
diff --git a/arch/mips/ralink/common.h b/arch/mips/ralink/common.h
new file mode 100644
index 000000000..4bc65b7a3
--- /dev/null
+++ b/arch/mips/ralink/common.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+ */
+
+#ifndef _RALINK_COMMON_H__
+#define _RALINK_COMMON_H__
+
+#define RAMIPS_SYS_TYPE_LEN 32
+
+struct ralink_soc_info {
+ unsigned char sys_type[RAMIPS_SYS_TYPE_LEN];
+ unsigned char *compatible;
+
+ unsigned long mem_base;
+ unsigned long mem_size;
+ unsigned long mem_size_min;
+ unsigned long mem_size_max;
+};
+extern struct ralink_soc_info soc_info;
+
+extern void ralink_of_remap(void);
+
+extern void ralink_clk_init(void);
+extern void ralink_clk_add(const char *dev, unsigned long rate);
+
+extern void ralink_rst_init(void);
+
+extern void prom_soc_init(struct ralink_soc_info *soc_info);
+
+__iomem void *plat_of_remap_node(const char *node);
+
+#endif /* _RALINK_COMMON_H__ */
diff --git a/arch/mips/ralink/early_printk.c b/arch/mips/ralink/early_printk.c
new file mode 100644
index 000000000..eb4fac25e
--- /dev/null
+++ b/arch/mips/ralink/early_printk.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/io.h>
+#include <linux/serial_reg.h>
+
+#include <asm/addrspace.h>
+#include <asm/setup.h>
+
+#ifdef CONFIG_SOC_RT288X
+#define EARLY_UART_BASE 0x300c00
+#define CHIPID_BASE 0x300004
+#elif defined(CONFIG_SOC_MT7621)
+#define EARLY_UART_BASE 0x1E000c00
+#define CHIPID_BASE 0x1E000004
+#else
+#define EARLY_UART_BASE 0x10000c00
+#define CHIPID_BASE 0x10000004
+#endif
+
+#define MT7628_CHIP_NAME1 0x20203832
+
+#define UART_REG_TX 0x04
+#define UART_REG_LCR 0x0c
+#define UART_REG_LSR 0x14
+#define UART_REG_LSR_RT2880 0x1c
+
+static __iomem void *uart_membase = (__iomem void *) KSEG1ADDR(EARLY_UART_BASE);
+static __iomem void *chipid_membase = (__iomem void *) KSEG1ADDR(CHIPID_BASE);
+static int init_complete;
+
+static inline void uart_w32(u32 val, unsigned reg)
+{
+ __raw_writel(val, uart_membase + reg);
+}
+
+static inline u32 uart_r32(unsigned reg)
+{
+ return __raw_readl(uart_membase + reg);
+}
+
+static inline int soc_is_mt7628(void)
+{
+ return IS_ENABLED(CONFIG_SOC_MT7620) &&
+ (__raw_readl(chipid_membase) == MT7628_CHIP_NAME1);
+}
+
+static void find_uart_base(void)
+{
+ int i;
+
+ if (!soc_is_mt7628())
+ return;
+
+ for (i = 0; i < 3; i++) {
+ u32 reg = uart_r32(UART_REG_LCR + (0x100 * i));
+
+ if (!reg)
+ continue;
+
+ uart_membase = (__iomem void *) KSEG1ADDR(EARLY_UART_BASE +
+ (0x100 * i));
+ break;
+ }
+}
+
+void prom_putchar(char ch)
+{
+ if (!init_complete) {
+ find_uart_base();
+ init_complete = 1;
+ }
+
+ if (IS_ENABLED(CONFIG_SOC_MT7621) || soc_is_mt7628()) {
+ uart_w32((unsigned char)ch, UART_TX);
+ while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0)
+ ;
+ } else {
+ while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0)
+ ;
+ uart_w32((unsigned char)ch, UART_REG_TX);
+ while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0)
+ ;
+ }
+}
diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c
new file mode 100644
index 000000000..bea857c9d
--- /dev/null
+++ b/arch/mips/ralink/ill_acc.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+
+#include <asm/mach-ralink/ralink_regs.h>
+
+#define REG_ILL_ACC_ADDR 0x10
+#define REG_ILL_ACC_TYPE 0x14
+
+#define ILL_INT_STATUS BIT(31)
+#define ILL_ACC_WRITE BIT(30)
+#define ILL_ACC_LEN_M 0xff
+#define ILL_ACC_OFF_M 0xf
+#define ILL_ACC_OFF_S 16
+#define ILL_ACC_ID_M 0x7
+#define ILL_ACC_ID_S 8
+
+#define DRV_NAME "ill_acc"
+
+static const char * const ill_acc_ids[] = {
+ "cpu", "dma", "ppe", "pdma rx", "pdma tx", "pci/e", "wmac", "usb",
+};
+
+static irqreturn_t ill_acc_irq_handler(int irq, void *_priv)
+{
+ struct device *dev = (struct device *) _priv;
+ u32 addr = rt_memc_r32(REG_ILL_ACC_ADDR);
+ u32 type = rt_memc_r32(REG_ILL_ACC_TYPE);
+
+ dev_err(dev, "illegal %s access from %s - addr:0x%08x offset:%d len:%d\n",
+ (type & ILL_ACC_WRITE) ? ("write") : ("read"),
+ ill_acc_ids[(type >> ILL_ACC_ID_S) & ILL_ACC_ID_M],
+ addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M,
+ type & ILL_ACC_LEN_M);
+
+ rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE);
+
+ return IRQ_HANDLED;
+}
+
+static int __init ill_acc_of_setup(void)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ int irq;
+
+ /* somehow this driver breaks on RT5350 */
+ if (of_machine_is_compatible("ralink,rt5350-soc"))
+ return -EINVAL;
+
+ np = of_find_compatible_node(NULL, NULL, "ralink,rt3050-memc");
+ if (!np)
+ return -EINVAL;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ pr_err("%pOFn: failed to lookup pdev\n", np);
+ of_node_put(np);
+ return -EINVAL;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ put_device(&pdev->dev);
+ return -EINVAL;
+ }
+
+ if (request_irq(irq, ill_acc_irq_handler, 0, "ill_acc", &pdev->dev)) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ put_device(&pdev->dev);
+ return -EINVAL;
+ }
+
+ rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE);
+
+ dev_info(&pdev->dev, "irq registered\n");
+
+ return 0;
+}
+
+arch_initcall(ill_acc_of_setup);
diff --git a/arch/mips/ralink/irq-gic.c b/arch/mips/ralink/irq-gic.c
new file mode 100644
index 000000000..3bab51a5f
--- /dev/null
+++ b/arch/mips/ralink/irq-gic.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2015 Nikolay Martynov <mar.kolya@gmail.com>
+ * Copyright (C) 2015 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/init.h>
+
+#include <linux/of.h>
+#include <linux/irqchip.h>
+#include <asm/mips-cps.h>
+
+int get_c0_perfcount_int(void)
+{
+ return gic_get_c0_perfcount_int();
+}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+
+void __init arch_init_irq(void)
+{
+ irqchip_init();
+}
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
new file mode 100644
index 000000000..220ca0cd7
--- /dev/null
+++ b/arch/mips/ralink/irq.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+
+#include "common.h"
+
+#define INTC_INT_GLOBAL BIT(31)
+
+#define RALINK_CPU_IRQ_INTC (MIPS_CPU_IRQ_BASE + 2)
+#define RALINK_CPU_IRQ_PCI (MIPS_CPU_IRQ_BASE + 4)
+#define RALINK_CPU_IRQ_FE (MIPS_CPU_IRQ_BASE + 5)
+#define RALINK_CPU_IRQ_WIFI (MIPS_CPU_IRQ_BASE + 6)
+#define RALINK_CPU_IRQ_COUNTER (MIPS_CPU_IRQ_BASE + 7)
+
+/* we have a cascade of 8 irqs */
+#define RALINK_INTC_IRQ_BASE 8
+
+/* we have 32 SoC irqs */
+#define RALINK_INTC_IRQ_COUNT 32
+
+#define RALINK_INTC_IRQ_PERFC (RALINK_INTC_IRQ_BASE + 9)
+
+enum rt_intc_regs_enum {
+ INTC_REG_STATUS0 = 0,
+ INTC_REG_STATUS1,
+ INTC_REG_TYPE,
+ INTC_REG_RAW_STATUS,
+ INTC_REG_ENABLE,
+ INTC_REG_DISABLE,
+};
+
+static u32 rt_intc_regs[] = {
+ [INTC_REG_STATUS0] = 0x00,
+ [INTC_REG_STATUS1] = 0x04,
+ [INTC_REG_TYPE] = 0x20,
+ [INTC_REG_RAW_STATUS] = 0x30,
+ [INTC_REG_ENABLE] = 0x34,
+ [INTC_REG_DISABLE] = 0x38,
+};
+
+static void __iomem *rt_intc_membase;
+
+static int rt_perfcount_irq;
+
+static inline void rt_intc_w32(u32 val, unsigned reg)
+{
+ __raw_writel(val, rt_intc_membase + rt_intc_regs[reg]);
+}
+
+static inline u32 rt_intc_r32(unsigned reg)
+{
+ return __raw_readl(rt_intc_membase + rt_intc_regs[reg]);
+}
+
+static void ralink_intc_irq_unmask(struct irq_data *d)
+{
+ rt_intc_w32(BIT(d->hwirq), INTC_REG_ENABLE);
+}
+
+static void ralink_intc_irq_mask(struct irq_data *d)
+{
+ rt_intc_w32(BIT(d->hwirq), INTC_REG_DISABLE);
+}
+
+static struct irq_chip ralink_intc_irq_chip = {
+ .name = "INTC",
+ .irq_unmask = ralink_intc_irq_unmask,
+ .irq_mask = ralink_intc_irq_mask,
+ .irq_mask_ack = ralink_intc_irq_mask,
+};
+
+int get_c0_perfcount_int(void)
+{
+ return rt_perfcount_irq;
+}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+
+unsigned int get_c0_compare_int(void)
+{
+ return CP0_LEGACY_COMPARE_IRQ;
+}
+
+static void ralink_intc_irq_handler(struct irq_desc *desc)
+{
+ u32 pending = rt_intc_r32(INTC_REG_STATUS0);
+
+ if (pending) {
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ generic_handle_irq(irq_find_mapping(domain, __ffs(pending)));
+ } else {
+ spurious_interrupt();
+ }
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned long pending;
+
+ pending = read_c0_status() & read_c0_cause() & ST0_IM;
+
+ if (pending & STATUSF_IP7)
+ do_IRQ(RALINK_CPU_IRQ_COUNTER);
+
+ else if (pending & STATUSF_IP5)
+ do_IRQ(RALINK_CPU_IRQ_FE);
+
+ else if (pending & STATUSF_IP6)
+ do_IRQ(RALINK_CPU_IRQ_WIFI);
+
+ else if (pending & STATUSF_IP4)
+ do_IRQ(RALINK_CPU_IRQ_PCI);
+
+ else if (pending & STATUSF_IP2)
+ do_IRQ(RALINK_CPU_IRQ_INTC);
+
+ else
+ spurious_interrupt();
+}
+
+static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &ralink_intc_irq_chip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops irq_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = intc_map,
+};
+
+static int __init intc_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct resource res;
+ struct irq_domain *domain;
+ int irq;
+
+ if (!of_property_read_u32_array(node, "ralink,intc-registers",
+ rt_intc_regs, 6))
+ pr_info("intc: using register map from devicetree\n");
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq)
+ panic("Failed to get INTC IRQ");
+
+ if (of_address_to_resource(node, 0, &res))
+ panic("Failed to get intc memory range");
+
+ if (!request_mem_region(res.start, resource_size(&res),
+ res.name))
+ pr_err("Failed to request intc memory");
+
+ rt_intc_membase = ioremap(res.start,
+ resource_size(&res));
+ if (!rt_intc_membase)
+ panic("Failed to remap intc memory");
+
+ /* disable all interrupts */
+ rt_intc_w32(~0, INTC_REG_DISABLE);
+
+ /* route all INTC interrupts to MIPS HW0 interrupt */
+ rt_intc_w32(0, INTC_REG_TYPE);
+
+ domain = irq_domain_add_legacy(node, RALINK_INTC_IRQ_COUNT,
+ RALINK_INTC_IRQ_BASE, 0, &irq_domain_ops, NULL);
+ if (!domain)
+ panic("Failed to add irqdomain");
+
+ rt_intc_w32(INTC_INT_GLOBAL, INTC_REG_ENABLE);
+
+ irq_set_chained_handler_and_data(irq, ralink_intc_irq_handler, domain);
+
+ /* tell the kernel which irq is used for performance monitoring */
+ rt_perfcount_irq = irq_create_mapping(domain, 9);
+
+ return 0;
+}
+
+static struct of_device_id __initdata of_irq_ids[] = {
+ { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_irq_of_init },
+ { .compatible = "ralink,rt2880-intc", .data = intc_of_init },
+ {},
+};
+
+void __init arch_init_irq(void)
+{
+ of_irq_init(of_irq_ids);
+}
+
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
new file mode 100644
index 000000000..fcf010038
--- /dev/null
+++ b/arch/mips/ralink/mt7620.c
@@ -0,0 +1,718 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/bug.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/mt7620.h>
+#include <asm/mach-ralink/pinmux.h>
+
+#include "common.h"
+
+/* analog */
+#define PMU0_CFG 0x88
+#define PMU_SW_SET BIT(28)
+#define A_DCDC_EN BIT(24)
+#define A_SSC_PERI BIT(19)
+#define A_SSC_GEN BIT(18)
+#define A_SSC_M 0x3
+#define A_SSC_S 16
+#define A_DLY_M 0x7
+#define A_DLY_S 8
+#define A_VTUNE_M 0xff
+
+/* digital */
+#define PMU1_CFG 0x8C
+#define DIG_SW_SEL BIT(25)
+
+/* clock scaling */
+#define CLKCFG_FDIV_MASK 0x1f00
+#define CLKCFG_FDIV_USB_VAL 0x0300
+#define CLKCFG_FFRAC_MASK 0x001f
+#define CLKCFG_FFRAC_USB_VAL 0x0003
+
+/* EFUSE bits */
+#define EFUSE_MT7688 0x100000
+
+/* DRAM type bit */
+#define DRAM_TYPE_MT7628_MASK 0x1
+
+/* does the board have sdram or ddram */
+static int dram_type;
+
+static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) };
+static struct rt2880_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) };
+static struct rt2880_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 15, 2) };
+static struct rt2880_pmx_func mdio_grp[] = {
+ FUNC("mdio", MT7620_GPIO_MODE_MDIO, 22, 2),
+ FUNC("refclk", MT7620_GPIO_MODE_MDIO_REFCLK, 22, 2),
+};
+static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 24, 12) };
+static struct rt2880_pmx_func refclk_grp[] = { FUNC("spi refclk", 0, 37, 3) };
+static struct rt2880_pmx_func ephy_grp[] = { FUNC("ephy", 0, 40, 5) };
+static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 60, 12) };
+static struct rt2880_pmx_func wled_grp[] = { FUNC("wled", 0, 72, 1) };
+static struct rt2880_pmx_func pa_grp[] = { FUNC("pa", 0, 18, 4) };
+static struct rt2880_pmx_func uartf_grp[] = {
+ FUNC("uartf", MT7620_GPIO_MODE_UARTF, 7, 8),
+ FUNC("pcm uartf", MT7620_GPIO_MODE_PCM_UARTF, 7, 8),
+ FUNC("pcm i2s", MT7620_GPIO_MODE_PCM_I2S, 7, 8),
+ FUNC("i2s uartf", MT7620_GPIO_MODE_I2S_UARTF, 7, 8),
+ FUNC("pcm gpio", MT7620_GPIO_MODE_PCM_GPIO, 11, 4),
+ FUNC("gpio uartf", MT7620_GPIO_MODE_GPIO_UARTF, 7, 4),
+ FUNC("gpio i2s", MT7620_GPIO_MODE_GPIO_I2S, 7, 4),
+};
+static struct rt2880_pmx_func wdt_grp[] = {
+ FUNC("wdt rst", 0, 17, 1),
+ FUNC("wdt refclk", 0, 17, 1),
+ };
+static struct rt2880_pmx_func pcie_rst_grp[] = {
+ FUNC("pcie rst", MT7620_GPIO_MODE_PCIE_RST, 36, 1),
+ FUNC("pcie refclk", MT7620_GPIO_MODE_PCIE_REF, 36, 1)
+};
+static struct rt2880_pmx_func nd_sd_grp[] = {
+ FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15),
+ FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13)
+};
+
+static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
+ GRP("i2c", i2c_grp, 1, MT7620_GPIO_MODE_I2C),
+ GRP("uartf", uartf_grp, MT7620_GPIO_MODE_UART0_MASK,
+ MT7620_GPIO_MODE_UART0_SHIFT),
+ GRP("spi", spi_grp, 1, MT7620_GPIO_MODE_SPI),
+ GRP("uartlite", uartlite_grp, 1, MT7620_GPIO_MODE_UART1),
+ GRP_G("wdt", wdt_grp, MT7620_GPIO_MODE_WDT_MASK,
+ MT7620_GPIO_MODE_WDT_GPIO, MT7620_GPIO_MODE_WDT_SHIFT),
+ GRP_G("mdio", mdio_grp, MT7620_GPIO_MODE_MDIO_MASK,
+ MT7620_GPIO_MODE_MDIO_GPIO, MT7620_GPIO_MODE_MDIO_SHIFT),
+ GRP("rgmii1", rgmii1_grp, 1, MT7620_GPIO_MODE_RGMII1),
+ GRP("spi refclk", refclk_grp, 1, MT7620_GPIO_MODE_SPI_REF_CLK),
+ GRP_G("pcie", pcie_rst_grp, MT7620_GPIO_MODE_PCIE_MASK,
+ MT7620_GPIO_MODE_PCIE_GPIO, MT7620_GPIO_MODE_PCIE_SHIFT),
+ GRP_G("nd_sd", nd_sd_grp, MT7620_GPIO_MODE_ND_SD_MASK,
+ MT7620_GPIO_MODE_ND_SD_GPIO, MT7620_GPIO_MODE_ND_SD_SHIFT),
+ GRP("rgmii2", rgmii2_grp, 1, MT7620_GPIO_MODE_RGMII2),
+ GRP("wled", wled_grp, 1, MT7620_GPIO_MODE_WLED),
+ GRP("ephy", ephy_grp, 1, MT7620_GPIO_MODE_EPHY),
+ GRP("pa", pa_grp, 1, MT7620_GPIO_MODE_PA),
+ { 0 }
+};
+
+static struct rt2880_pmx_func pwm1_grp_mt7628[] = {
+ FUNC("sdxc d6", 3, 19, 1),
+ FUNC("utif", 2, 19, 1),
+ FUNC("gpio", 1, 19, 1),
+ FUNC("pwm1", 0, 19, 1),
+};
+
+static struct rt2880_pmx_func pwm0_grp_mt7628[] = {
+ FUNC("sdxc d7", 3, 18, 1),
+ FUNC("utif", 2, 18, 1),
+ FUNC("gpio", 1, 18, 1),
+ FUNC("pwm0", 0, 18, 1),
+};
+
+static struct rt2880_pmx_func uart2_grp_mt7628[] = {
+ FUNC("sdxc d5 d4", 3, 20, 2),
+ FUNC("pwm", 2, 20, 2),
+ FUNC("gpio", 1, 20, 2),
+ FUNC("uart2", 0, 20, 2),
+};
+
+static struct rt2880_pmx_func uart1_grp_mt7628[] = {
+ FUNC("sw_r", 3, 45, 2),
+ FUNC("pwm", 2, 45, 2),
+ FUNC("gpio", 1, 45, 2),
+ FUNC("uart1", 0, 45, 2),
+};
+
+static struct rt2880_pmx_func i2c_grp_mt7628[] = {
+ FUNC("-", 3, 4, 2),
+ FUNC("debug", 2, 4, 2),
+ FUNC("gpio", 1, 4, 2),
+ FUNC("i2c", 0, 4, 2),
+};
+
+static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) };
+static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) };
+static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
+static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
+
+static struct rt2880_pmx_func sd_mode_grp_mt7628[] = {
+ FUNC("jtag", 3, 22, 8),
+ FUNC("utif", 2, 22, 8),
+ FUNC("gpio", 1, 22, 8),
+ FUNC("sdxc", 0, 22, 8),
+};
+
+static struct rt2880_pmx_func uart0_grp_mt7628[] = {
+ FUNC("-", 3, 12, 2),
+ FUNC("-", 2, 12, 2),
+ FUNC("gpio", 1, 12, 2),
+ FUNC("uart0", 0, 12, 2),
+};
+
+static struct rt2880_pmx_func i2s_grp_mt7628[] = {
+ FUNC("antenna", 3, 0, 4),
+ FUNC("pcm", 2, 0, 4),
+ FUNC("gpio", 1, 0, 4),
+ FUNC("i2s", 0, 0, 4),
+};
+
+static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = {
+ FUNC("-", 3, 6, 1),
+ FUNC("refclk", 2, 6, 1),
+ FUNC("gpio", 1, 6, 1),
+ FUNC("spi cs1", 0, 6, 1),
+};
+
+static struct rt2880_pmx_func spis_grp_mt7628[] = {
+ FUNC("pwm_uart2", 3, 14, 4),
+ FUNC("utif", 2, 14, 4),
+ FUNC("gpio", 1, 14, 4),
+ FUNC("spis", 0, 14, 4),
+};
+
+static struct rt2880_pmx_func gpio_grp_mt7628[] = {
+ FUNC("pcie", 3, 11, 1),
+ FUNC("refclk", 2, 11, 1),
+ FUNC("gpio", 1, 11, 1),
+ FUNC("gpio", 0, 11, 1),
+};
+
+static struct rt2880_pmx_func p4led_kn_grp_mt7628[] = {
+ FUNC("jtag", 3, 30, 1),
+ FUNC("utif", 2, 30, 1),
+ FUNC("gpio", 1, 30, 1),
+ FUNC("p4led_kn", 0, 30, 1),
+};
+
+static struct rt2880_pmx_func p3led_kn_grp_mt7628[] = {
+ FUNC("jtag", 3, 31, 1),
+ FUNC("utif", 2, 31, 1),
+ FUNC("gpio", 1, 31, 1),
+ FUNC("p3led_kn", 0, 31, 1),
+};
+
+static struct rt2880_pmx_func p2led_kn_grp_mt7628[] = {
+ FUNC("jtag", 3, 32, 1),
+ FUNC("utif", 2, 32, 1),
+ FUNC("gpio", 1, 32, 1),
+ FUNC("p2led_kn", 0, 32, 1),
+};
+
+static struct rt2880_pmx_func p1led_kn_grp_mt7628[] = {
+ FUNC("jtag", 3, 33, 1),
+ FUNC("utif", 2, 33, 1),
+ FUNC("gpio", 1, 33, 1),
+ FUNC("p1led_kn", 0, 33, 1),
+};
+
+static struct rt2880_pmx_func p0led_kn_grp_mt7628[] = {
+ FUNC("jtag", 3, 34, 1),
+ FUNC("rsvd", 2, 34, 1),
+ FUNC("gpio", 1, 34, 1),
+ FUNC("p0led_kn", 0, 34, 1),
+};
+
+static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
+ FUNC("rsvd", 3, 35, 1),
+ FUNC("rsvd", 2, 35, 1),
+ FUNC("gpio", 1, 35, 1),
+ FUNC("wled_kn", 0, 35, 1),
+};
+
+static struct rt2880_pmx_func p4led_an_grp_mt7628[] = {
+ FUNC("jtag", 3, 39, 1),
+ FUNC("utif", 2, 39, 1),
+ FUNC("gpio", 1, 39, 1),
+ FUNC("p4led_an", 0, 39, 1),
+};
+
+static struct rt2880_pmx_func p3led_an_grp_mt7628[] = {
+ FUNC("jtag", 3, 40, 1),
+ FUNC("utif", 2, 40, 1),
+ FUNC("gpio", 1, 40, 1),
+ FUNC("p3led_an", 0, 40, 1),
+};
+
+static struct rt2880_pmx_func p2led_an_grp_mt7628[] = {
+ FUNC("jtag", 3, 41, 1),
+ FUNC("utif", 2, 41, 1),
+ FUNC("gpio", 1, 41, 1),
+ FUNC("p2led_an", 0, 41, 1),
+};
+
+static struct rt2880_pmx_func p1led_an_grp_mt7628[] = {
+ FUNC("jtag", 3, 42, 1),
+ FUNC("utif", 2, 42, 1),
+ FUNC("gpio", 1, 42, 1),
+ FUNC("p1led_an", 0, 42, 1),
+};
+
+static struct rt2880_pmx_func p0led_an_grp_mt7628[] = {
+ FUNC("jtag", 3, 43, 1),
+ FUNC("rsvd", 2, 43, 1),
+ FUNC("gpio", 1, 43, 1),
+ FUNC("p0led_an", 0, 43, 1),
+};
+
+static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
+ FUNC("rsvd", 3, 44, 1),
+ FUNC("rsvd", 2, 44, 1),
+ FUNC("gpio", 1, 44, 1),
+ FUNC("wled_an", 0, 44, 1),
+};
+
+#define MT7628_GPIO_MODE_MASK 0x3
+
+#define MT7628_GPIO_MODE_P4LED_KN 58
+#define MT7628_GPIO_MODE_P3LED_KN 56
+#define MT7628_GPIO_MODE_P2LED_KN 54
+#define MT7628_GPIO_MODE_P1LED_KN 52
+#define MT7628_GPIO_MODE_P0LED_KN 50
+#define MT7628_GPIO_MODE_WLED_KN 48
+#define MT7628_GPIO_MODE_P4LED_AN 42
+#define MT7628_GPIO_MODE_P3LED_AN 40
+#define MT7628_GPIO_MODE_P2LED_AN 38
+#define MT7628_GPIO_MODE_P1LED_AN 36
+#define MT7628_GPIO_MODE_P0LED_AN 34
+#define MT7628_GPIO_MODE_WLED_AN 32
+#define MT7628_GPIO_MODE_PWM1 30
+#define MT7628_GPIO_MODE_PWM0 28
+#define MT7628_GPIO_MODE_UART2 26
+#define MT7628_GPIO_MODE_UART1 24
+#define MT7628_GPIO_MODE_I2C 20
+#define MT7628_GPIO_MODE_REFCLK 18
+#define MT7628_GPIO_MODE_PERST 16
+#define MT7628_GPIO_MODE_WDT 14
+#define MT7628_GPIO_MODE_SPI 12
+#define MT7628_GPIO_MODE_SDMODE 10
+#define MT7628_GPIO_MODE_UART0 8
+#define MT7628_GPIO_MODE_I2S 6
+#define MT7628_GPIO_MODE_CS1 4
+#define MT7628_GPIO_MODE_SPIS 2
+#define MT7628_GPIO_MODE_GPIO 0
+
+static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
+ GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_PWM1),
+ GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_PWM0),
+ GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_UART2),
+ GRP_G("uart1", uart1_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_UART1),
+ GRP_G("i2c", i2c_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_I2C),
+ GRP("refclk", refclk_grp_mt7628, 1, MT7628_GPIO_MODE_REFCLK),
+ GRP("perst", perst_grp_mt7628, 1, MT7628_GPIO_MODE_PERST),
+ GRP("wdt", wdt_grp_mt7628, 1, MT7628_GPIO_MODE_WDT),
+ GRP("spi", spi_grp_mt7628, 1, MT7628_GPIO_MODE_SPI),
+ GRP_G("sdmode", sd_mode_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_SDMODE),
+ GRP_G("uart0", uart0_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_UART0),
+ GRP_G("i2s", i2s_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_I2S),
+ GRP_G("spi cs1", spi_cs1_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_CS1),
+ GRP_G("spis", spis_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_SPIS),
+ GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_GPIO),
+ GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_WLED_AN),
+ GRP_G("p0led_an", p0led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P0LED_AN),
+ GRP_G("p1led_an", p1led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P1LED_AN),
+ GRP_G("p2led_an", p2led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P2LED_AN),
+ GRP_G("p3led_an", p3led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P3LED_AN),
+ GRP_G("p4led_an", p4led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P4LED_AN),
+ GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_WLED_KN),
+ GRP_G("p0led_kn", p0led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P0LED_KN),
+ GRP_G("p1led_kn", p1led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P1LED_KN),
+ GRP_G("p2led_kn", p2led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P2LED_KN),
+ GRP_G("p3led_kn", p3led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P3LED_KN),
+ GRP_G("p4led_kn", p4led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P4LED_KN),
+ { 0 }
+};
+
+static inline int is_mt76x8(void)
+{
+ return ralink_soc == MT762X_SOC_MT7628AN ||
+ ralink_soc == MT762X_SOC_MT7688;
+}
+
+static __init u32
+mt7620_calc_rate(u32 ref_rate, u32 mul, u32 div)
+{
+ u64 t;
+
+ t = ref_rate;
+ t *= mul;
+ do_div(t, div);
+
+ return t;
+}
+
+#define MHZ(x) ((x) * 1000 * 1000)
+
+static __init unsigned long
+mt7620_get_xtal_rate(void)
+{
+ u32 reg;
+
+ reg = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG0);
+ if (reg & SYSCFG0_XTAL_FREQ_SEL)
+ return MHZ(40);
+
+ return MHZ(20);
+}
+
+static __init unsigned long
+mt7620_get_periph_rate(unsigned long xtal_rate)
+{
+ u32 reg;
+
+ reg = rt_sysc_r32(SYSC_REG_CLKCFG0);
+ if (reg & CLKCFG0_PERI_CLK_SEL)
+ return xtal_rate;
+
+ return MHZ(40);
+}
+
+static const u32 mt7620_clk_divider[] __initconst = { 2, 3, 4, 8 };
+
+static __init unsigned long
+mt7620_get_cpu_pll_rate(unsigned long xtal_rate)
+{
+ u32 reg;
+ u32 mul;
+ u32 div;
+
+ reg = rt_sysc_r32(SYSC_REG_CPLL_CONFIG0);
+ if (reg & CPLL_CFG0_BYPASS_REF_CLK)
+ return xtal_rate;
+
+ if ((reg & CPLL_CFG0_SW_CFG) == 0)
+ return MHZ(600);
+
+ mul = (reg >> CPLL_CFG0_PLL_MULT_RATIO_SHIFT) &
+ CPLL_CFG0_PLL_MULT_RATIO_MASK;
+ mul += 24;
+ if (reg & CPLL_CFG0_LC_CURFCK)
+ mul *= 2;
+
+ div = (reg >> CPLL_CFG0_PLL_DIV_RATIO_SHIFT) &
+ CPLL_CFG0_PLL_DIV_RATIO_MASK;
+
+ WARN_ON(div >= ARRAY_SIZE(mt7620_clk_divider));
+
+ return mt7620_calc_rate(xtal_rate, mul, mt7620_clk_divider[div]);
+}
+
+static __init unsigned long
+mt7620_get_pll_rate(unsigned long xtal_rate, unsigned long cpu_pll_rate)
+{
+ u32 reg;
+
+ reg = rt_sysc_r32(SYSC_REG_CPLL_CONFIG1);
+ if (reg & CPLL_CFG1_CPU_AUX1)
+ return xtal_rate;
+
+ if (reg & CPLL_CFG1_CPU_AUX0)
+ return MHZ(480);
+
+ return cpu_pll_rate;
+}
+
+static __init unsigned long
+mt7620_get_cpu_rate(unsigned long pll_rate)
+{
+ u32 reg;
+ u32 mul;
+ u32 div;
+
+ reg = rt_sysc_r32(SYSC_REG_CPU_SYS_CLKCFG);
+
+ mul = reg & CPU_SYS_CLKCFG_CPU_FFRAC_MASK;
+ div = (reg >> CPU_SYS_CLKCFG_CPU_FDIV_SHIFT) &
+ CPU_SYS_CLKCFG_CPU_FDIV_MASK;
+
+ return mt7620_calc_rate(pll_rate, mul, div);
+}
+
+static const u32 mt7620_ocp_dividers[16] __initconst = {
+ [CPU_SYS_CLKCFG_OCP_RATIO_2] = 2,
+ [CPU_SYS_CLKCFG_OCP_RATIO_3] = 3,
+ [CPU_SYS_CLKCFG_OCP_RATIO_4] = 4,
+ [CPU_SYS_CLKCFG_OCP_RATIO_5] = 5,
+ [CPU_SYS_CLKCFG_OCP_RATIO_10] = 10,
+};
+
+static __init unsigned long
+mt7620_get_dram_rate(unsigned long pll_rate)
+{
+ if (dram_type == SYSCFG0_DRAM_TYPE_SDRAM)
+ return pll_rate / 4;
+
+ return pll_rate / 3;
+}
+
+static __init unsigned long
+mt7620_get_sys_rate(unsigned long cpu_rate)
+{
+ u32 reg;
+ u32 ocp_ratio;
+ u32 div;
+
+ reg = rt_sysc_r32(SYSC_REG_CPU_SYS_CLKCFG);
+
+ ocp_ratio = (reg >> CPU_SYS_CLKCFG_OCP_RATIO_SHIFT) &
+ CPU_SYS_CLKCFG_OCP_RATIO_MASK;
+
+ if (WARN_ON(ocp_ratio >= ARRAY_SIZE(mt7620_ocp_dividers)))
+ return cpu_rate;
+
+ div = mt7620_ocp_dividers[ocp_ratio];
+ if (WARN(!div, "invalid divider for OCP ratio %u", ocp_ratio))
+ return cpu_rate;
+
+ return cpu_rate / div;
+}
+
+void __init ralink_clk_init(void)
+{
+ unsigned long xtal_rate;
+ unsigned long cpu_pll_rate;
+ unsigned long pll_rate;
+ unsigned long cpu_rate;
+ unsigned long sys_rate;
+ unsigned long dram_rate;
+ unsigned long periph_rate;
+ unsigned long pcmi2s_rate;
+
+ xtal_rate = mt7620_get_xtal_rate();
+
+#define RFMT(label) label ":%lu.%03luMHz "
+#define RINT(x) ((x) / 1000000)
+#define RFRAC(x) (((x) / 1000) % 1000)
+
+ if (is_mt76x8()) {
+ if (xtal_rate == MHZ(40))
+ cpu_rate = MHZ(580);
+ else
+ cpu_rate = MHZ(575);
+ dram_rate = sys_rate = cpu_rate / 3;
+ periph_rate = MHZ(40);
+ pcmi2s_rate = MHZ(480);
+
+ ralink_clk_add("10000d00.uartlite", periph_rate);
+ ralink_clk_add("10000e00.uartlite", periph_rate);
+ } else {
+ cpu_pll_rate = mt7620_get_cpu_pll_rate(xtal_rate);
+ pll_rate = mt7620_get_pll_rate(xtal_rate, cpu_pll_rate);
+
+ cpu_rate = mt7620_get_cpu_rate(pll_rate);
+ dram_rate = mt7620_get_dram_rate(pll_rate);
+ sys_rate = mt7620_get_sys_rate(cpu_rate);
+ periph_rate = mt7620_get_periph_rate(xtal_rate);
+ pcmi2s_rate = periph_rate;
+
+ pr_debug(RFMT("XTAL") RFMT("CPU_PLL") RFMT("PLL"),
+ RINT(xtal_rate), RFRAC(xtal_rate),
+ RINT(cpu_pll_rate), RFRAC(cpu_pll_rate),
+ RINT(pll_rate), RFRAC(pll_rate));
+
+ ralink_clk_add("10000500.uart", periph_rate);
+ }
+
+ pr_debug(RFMT("CPU") RFMT("DRAM") RFMT("SYS") RFMT("PERIPH"),
+ RINT(cpu_rate), RFRAC(cpu_rate),
+ RINT(dram_rate), RFRAC(dram_rate),
+ RINT(sys_rate), RFRAC(sys_rate),
+ RINT(periph_rate), RFRAC(periph_rate));
+#undef RFRAC
+#undef RINT
+#undef RFMT
+
+ ralink_clk_add("cpu", cpu_rate);
+ ralink_clk_add("10000100.timer", periph_rate);
+ ralink_clk_add("10000120.watchdog", periph_rate);
+ ralink_clk_add("10000900.i2c", periph_rate);
+ ralink_clk_add("10000a00.i2s", pcmi2s_rate);
+ ralink_clk_add("10000b00.spi", sys_rate);
+ ralink_clk_add("10000b40.spi", sys_rate);
+ ralink_clk_add("10000c00.uartlite", periph_rate);
+ ralink_clk_add("10000d00.uart1", periph_rate);
+ ralink_clk_add("10000e00.uart2", periph_rate);
+ ralink_clk_add("10180000.wmac", xtal_rate);
+
+ if (IS_ENABLED(CONFIG_USB) && !is_mt76x8()) {
+ /*
+ * When the CPU goes into sleep mode, the BUS clock will be
+ * too low for USB to function properly. Adjust the busses
+ * fractional divider to fix this
+ */
+ u32 val = rt_sysc_r32(SYSC_REG_CPU_SYS_CLKCFG);
+
+ val &= ~(CLKCFG_FDIV_MASK | CLKCFG_FFRAC_MASK);
+ val |= CLKCFG_FDIV_USB_VAL | CLKCFG_FFRAC_USB_VAL;
+
+ rt_sysc_w32(val, SYSC_REG_CPU_SYS_CLKCFG);
+ }
+}
+
+void __init ralink_of_remap(void)
+{
+ rt_sysc_membase = plat_of_remap_node("ralink,mt7620a-sysc");
+ rt_memc_membase = plat_of_remap_node("ralink,mt7620a-memc");
+
+ if (!rt_sysc_membase || !rt_memc_membase)
+ panic("Failed to remap core resources");
+}
+
+static __init void
+mt7620_dram_init(struct ralink_soc_info *soc_info)
+{
+ switch (dram_type) {
+ case SYSCFG0_DRAM_TYPE_SDRAM:
+ pr_info("Board has SDRAM\n");
+ soc_info->mem_size_min = MT7620_SDRAM_SIZE_MIN;
+ soc_info->mem_size_max = MT7620_SDRAM_SIZE_MAX;
+ break;
+
+ case SYSCFG0_DRAM_TYPE_DDR1:
+ pr_info("Board has DDR1\n");
+ soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN;
+ soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX;
+ break;
+
+ case SYSCFG0_DRAM_TYPE_DDR2:
+ pr_info("Board has DDR2\n");
+ soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN;
+ soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX;
+ break;
+ default:
+ BUG();
+ }
+}
+
+static __init void
+mt7628_dram_init(struct ralink_soc_info *soc_info)
+{
+ switch (dram_type) {
+ case SYSCFG0_DRAM_TYPE_DDR1_MT7628:
+ pr_info("Board has DDR1\n");
+ soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN;
+ soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX;
+ break;
+
+ case SYSCFG0_DRAM_TYPE_DDR2_MT7628:
+ pr_info("Board has DDR2\n");
+ soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN;
+ soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX;
+ break;
+ default:
+ BUG();
+ }
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+ void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE);
+ unsigned char *name = NULL;
+ u32 n0;
+ u32 n1;
+ u32 rev;
+ u32 cfg0;
+ u32 pmu0;
+ u32 pmu1;
+ u32 bga;
+
+ n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+ n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+ rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
+ bga = (rev >> CHIP_REV_PKG_SHIFT) & CHIP_REV_PKG_MASK;
+
+ if (n0 == MT7620_CHIP_NAME0 && n1 == MT7620_CHIP_NAME1) {
+ if (bga) {
+ ralink_soc = MT762X_SOC_MT7620A;
+ name = "MT7620A";
+ soc_info->compatible = "ralink,mt7620a-soc";
+ } else {
+ ralink_soc = MT762X_SOC_MT7620N;
+ name = "MT7620N";
+ soc_info->compatible = "ralink,mt7620n-soc";
+ }
+ } else if (n0 == MT7620_CHIP_NAME0 && n1 == MT7628_CHIP_NAME1) {
+ u32 efuse = __raw_readl(sysc + SYSC_REG_EFUSE_CFG);
+
+ if (efuse & EFUSE_MT7688) {
+ ralink_soc = MT762X_SOC_MT7688;
+ name = "MT7688";
+ } else {
+ ralink_soc = MT762X_SOC_MT7628AN;
+ name = "MT7628AN";
+ }
+ soc_info->compatible = "ralink,mt7628an-soc";
+ } else {
+ panic("mt762x: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+ }
+
+ snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+ "MediaTek %s ver:%u eco:%u",
+ name,
+ (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK,
+ (rev & CHIP_REV_ECO_MASK));
+
+ cfg0 = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG0);
+ if (is_mt76x8()) {
+ dram_type = cfg0 & DRAM_TYPE_MT7628_MASK;
+ } else {
+ dram_type = (cfg0 >> SYSCFG0_DRAM_TYPE_SHIFT) &
+ SYSCFG0_DRAM_TYPE_MASK;
+ if (dram_type == SYSCFG0_DRAM_TYPE_UNKNOWN)
+ dram_type = SYSCFG0_DRAM_TYPE_SDRAM;
+ }
+
+ soc_info->mem_base = MT7620_DRAM_BASE;
+ if (is_mt76x8())
+ mt7628_dram_init(soc_info);
+ else
+ mt7620_dram_init(soc_info);
+
+ pmu0 = __raw_readl(sysc + PMU0_CFG);
+ pmu1 = __raw_readl(sysc + PMU1_CFG);
+
+ pr_info("Analog PMU set to %s control\n",
+ (pmu0 & PMU_SW_SET) ? ("sw") : ("hw"));
+ pr_info("Digital PMU set to %s control\n",
+ (pmu1 & DIG_SW_SEL) ? ("sw") : ("hw"));
+
+ if (is_mt76x8())
+ rt2880_pinmux_data = mt7628an_pinmux_data;
+ else
+ rt2880_pinmux_data = mt7620a_pinmux_data;
+}
diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c
new file mode 100644
index 000000000..ca0ac607b
--- /dev/null
+++ b/arch/mips/ralink/mt7621.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2015 Nikolay Martynov <mar.kolya@gmail.com>
+ * Copyright (C) 2015 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#include <asm/mipsregs.h>
+#include <asm/smp-ops.h>
+#include <asm/mips-cps.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/mt7621.h>
+
+#include <pinmux.h>
+
+#include "common.h"
+
+#define MT7621_GPIO_MODE_UART1 1
+#define MT7621_GPIO_MODE_I2C 2
+#define MT7621_GPIO_MODE_UART3_MASK 0x3
+#define MT7621_GPIO_MODE_UART3_SHIFT 3
+#define MT7621_GPIO_MODE_UART3_GPIO 1
+#define MT7621_GPIO_MODE_UART2_MASK 0x3
+#define MT7621_GPIO_MODE_UART2_SHIFT 5
+#define MT7621_GPIO_MODE_UART2_GPIO 1
+#define MT7621_GPIO_MODE_JTAG 7
+#define MT7621_GPIO_MODE_WDT_MASK 0x3
+#define MT7621_GPIO_MODE_WDT_SHIFT 8
+#define MT7621_GPIO_MODE_WDT_GPIO 1
+#define MT7621_GPIO_MODE_PCIE_RST 0
+#define MT7621_GPIO_MODE_PCIE_REF 2
+#define MT7621_GPIO_MODE_PCIE_MASK 0x3
+#define MT7621_GPIO_MODE_PCIE_SHIFT 10
+#define MT7621_GPIO_MODE_PCIE_GPIO 1
+#define MT7621_GPIO_MODE_MDIO_MASK 0x3
+#define MT7621_GPIO_MODE_MDIO_SHIFT 12
+#define MT7621_GPIO_MODE_MDIO_GPIO 1
+#define MT7621_GPIO_MODE_RGMII1 14
+#define MT7621_GPIO_MODE_RGMII2 15
+#define MT7621_GPIO_MODE_SPI_MASK 0x3
+#define MT7621_GPIO_MODE_SPI_SHIFT 16
+#define MT7621_GPIO_MODE_SPI_GPIO 1
+#define MT7621_GPIO_MODE_SDHCI_MASK 0x3
+#define MT7621_GPIO_MODE_SDHCI_SHIFT 18
+#define MT7621_GPIO_MODE_SDHCI_GPIO 1
+
+static struct rt2880_pmx_func uart1_grp[] = { FUNC("uart1", 0, 1, 2) };
+static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 3, 2) };
+static struct rt2880_pmx_func uart3_grp[] = {
+ FUNC("uart3", 0, 5, 4),
+ FUNC("i2s", 2, 5, 4),
+ FUNC("spdif3", 3, 5, 4),
+};
+static struct rt2880_pmx_func uart2_grp[] = {
+ FUNC("uart2", 0, 9, 4),
+ FUNC("pcm", 2, 9, 4),
+ FUNC("spdif2", 3, 9, 4),
+};
+static struct rt2880_pmx_func jtag_grp[] = { FUNC("jtag", 0, 13, 5) };
+static struct rt2880_pmx_func wdt_grp[] = {
+ FUNC("wdt rst", 0, 18, 1),
+ FUNC("wdt refclk", 2, 18, 1),
+};
+static struct rt2880_pmx_func pcie_rst_grp[] = {
+ FUNC("pcie rst", MT7621_GPIO_MODE_PCIE_RST, 19, 1),
+ FUNC("pcie refclk", MT7621_GPIO_MODE_PCIE_REF, 19, 1)
+};
+static struct rt2880_pmx_func mdio_grp[] = { FUNC("mdio", 0, 20, 2) };
+static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 22, 12) };
+static struct rt2880_pmx_func spi_grp[] = {
+ FUNC("spi", 0, 34, 7),
+ FUNC("nand1", 2, 34, 7),
+};
+static struct rt2880_pmx_func sdhci_grp[] = {
+ FUNC("sdhci", 0, 41, 8),
+ FUNC("nand2", 2, 41, 8),
+};
+static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 49, 12) };
+
+static struct rt2880_pmx_group mt7621_pinmux_data[] = {
+ GRP("uart1", uart1_grp, 1, MT7621_GPIO_MODE_UART1),
+ GRP("i2c", i2c_grp, 1, MT7621_GPIO_MODE_I2C),
+ GRP_G("uart3", uart3_grp, MT7621_GPIO_MODE_UART3_MASK,
+ MT7621_GPIO_MODE_UART3_GPIO, MT7621_GPIO_MODE_UART3_SHIFT),
+ GRP_G("uart2", uart2_grp, MT7621_GPIO_MODE_UART2_MASK,
+ MT7621_GPIO_MODE_UART2_GPIO, MT7621_GPIO_MODE_UART2_SHIFT),
+ GRP("jtag", jtag_grp, 1, MT7621_GPIO_MODE_JTAG),
+ GRP_G("wdt", wdt_grp, MT7621_GPIO_MODE_WDT_MASK,
+ MT7621_GPIO_MODE_WDT_GPIO, MT7621_GPIO_MODE_WDT_SHIFT),
+ GRP_G("pcie", pcie_rst_grp, MT7621_GPIO_MODE_PCIE_MASK,
+ MT7621_GPIO_MODE_PCIE_GPIO, MT7621_GPIO_MODE_PCIE_SHIFT),
+ GRP_G("mdio", mdio_grp, MT7621_GPIO_MODE_MDIO_MASK,
+ MT7621_GPIO_MODE_MDIO_GPIO, MT7621_GPIO_MODE_MDIO_SHIFT),
+ GRP("rgmii2", rgmii2_grp, 1, MT7621_GPIO_MODE_RGMII2),
+ GRP_G("spi", spi_grp, MT7621_GPIO_MODE_SPI_MASK,
+ MT7621_GPIO_MODE_SPI_GPIO, MT7621_GPIO_MODE_SPI_SHIFT),
+ GRP_G("sdhci", sdhci_grp, MT7621_GPIO_MODE_SDHCI_MASK,
+ MT7621_GPIO_MODE_SDHCI_GPIO, MT7621_GPIO_MODE_SDHCI_SHIFT),
+ GRP("rgmii1", rgmii1_grp, 1, MT7621_GPIO_MODE_RGMII1),
+ { 0 }
+};
+
+phys_addr_t mips_cpc_default_phys_base(void)
+{
+ panic("Cannot detect cpc address");
+}
+
+void __init ralink_of_remap(void)
+{
+ rt_sysc_membase = plat_of_remap_node("mtk,mt7621-sysc");
+ rt_memc_membase = plat_of_remap_node("mtk,mt7621-memc");
+
+ if (!rt_sysc_membase || !rt_memc_membase)
+ panic("Failed to remap core resources");
+}
+
+static void soc_dev_init(struct ralink_soc_info *soc_info, u32 rev)
+{
+ struct soc_device *soc_dev;
+ struct soc_device_attribute *soc_dev_attr;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return;
+
+ soc_dev_attr->soc_id = "mt7621";
+ soc_dev_attr->family = "Ralink";
+
+ if (((rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK) == 1 &&
+ (rev & CHIP_REV_ECO_MASK) == 1)
+ soc_dev_attr->revision = "E2";
+ else
+ soc_dev_attr->revision = "E1";
+
+ soc_dev_attr->data = soc_info;
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr);
+ return;
+ }
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+ void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE);
+ unsigned char *name = NULL;
+ u32 n0;
+ u32 n1;
+ u32 rev;
+
+ /* Early detection of CMP support */
+ mips_cm_probe();
+ mips_cpc_probe();
+
+ if (mips_cps_numiocu(0)) {
+ /*
+ * mips_cm_probe() wipes out bootloader
+ * config for CM regions and we have to configure them
+ * again. This SoC cannot talk to pamlbus devices
+ * witout proper iocu region set up.
+ *
+ * FIXME: it would be better to do this with values
+ * from DT, but we need this very early because
+ * without this we cannot talk to pretty much anything
+ * including serial.
+ */
+ write_gcr_reg0_base(MT7621_PALMBUS_BASE);
+ write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE |
+ CM_GCR_REGn_MASK_CMTGT_IOCU0);
+ __sync();
+ }
+
+ n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+ n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+
+ if (n0 == MT7621_CHIP_NAME0 && n1 == MT7621_CHIP_NAME1) {
+ name = "MT7621";
+ soc_info->compatible = "mtk,mt7621-soc";
+ } else {
+ panic("mt7621: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+ }
+ ralink_soc = MT762X_SOC_MT7621AT;
+ rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
+
+ snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+ "MediaTek %s ver:%u eco:%u",
+ name,
+ (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK,
+ (rev & CHIP_REV_ECO_MASK));
+
+ soc_info->mem_size_min = MT7621_DDR2_SIZE_MIN;
+ soc_info->mem_size_max = MT7621_DDR2_SIZE_MAX;
+ soc_info->mem_base = MT7621_DRAM_BASE;
+
+ rt2880_pinmux_data = mt7621_pinmux_data;
+
+ soc_dev_init(soc_info, rev);
+
+ if (!register_cps_smp_ops())
+ return;
+ if (!register_cmp_smp_ops())
+ return;
+ if (!register_vsmp_smp_ops())
+ return;
+}
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
new file mode 100644
index 000000000..3017263ac
--- /dev/null
+++ b/arch/mips/ralink/of.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/sizes.h>
+#include <linux/of_fdt.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
+#include <asm/reboot.h>
+#include <asm/bootinfo.h>
+#include <asm/addrspace.h>
+#include <asm/prom.h>
+
+#include "common.h"
+
+__iomem void *rt_sysc_membase;
+__iomem void *rt_memc_membase;
+EXPORT_SYMBOL_GPL(rt_sysc_membase);
+
+__iomem void *plat_of_remap_node(const char *node)
+{
+ struct resource res;
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, node);
+ if (!np)
+ panic("Failed to find %s node", node);
+
+ if (of_address_to_resource(np, 0, &res))
+ panic("Failed to get resource for %s", node);
+
+ if (!request_mem_region(res.start,
+ resource_size(&res),
+ res.name))
+ panic("Failed to request resources for %s", node);
+
+ return ioremap(res.start, resource_size(&res));
+}
+
+void __init device_tree_init(void)
+{
+ unflatten_and_copy_device_tree();
+}
+
+static int memory_dtb;
+
+static int __init early_init_dt_find_memory(unsigned long node,
+ const char *uname, int depth, void *data)
+{
+ if (depth == 1 && !strcmp(uname, "memory@0"))
+ memory_dtb = 1;
+
+ return 0;
+}
+
+void __init plat_mem_setup(void)
+{
+ void *dtb = NULL;
+
+ set_io_port_base(KSEG1);
+
+ /*
+ * Load the builtin devicetree. This causes the chosen node to be
+ * parsed resulting in our memory appearing. fw_passed_dtb is used
+ * by CONFIG_MIPS_APPENDED_RAW_DTB as well.
+ */
+ if (fw_passed_dtb)
+ dtb = (void *)fw_passed_dtb;
+ else if (&__dtb_start != &__dtb_end)
+ dtb = (void *)__dtb_start;
+
+ __dt_setup_arch(dtb);
+
+ of_scan_flat_dt(early_init_dt_find_memory, NULL);
+ if (memory_dtb)
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+ else if (soc_info.mem_size)
+ memblock_add(soc_info.mem_base, soc_info.mem_size * SZ_1M);
+ else
+ detect_memory_region(soc_info.mem_base,
+ soc_info.mem_size_min * SZ_1M,
+ soc_info.mem_size_max * SZ_1M);
+}
+
+static int __init plat_of_setup(void)
+{
+ __dt_register_buses(soc_info.compatible, "palmbus");
+
+ /* make sure that the reset controller is setup early */
+ ralink_rst_init();
+
+ return 0;
+}
+
+arch_initcall(plat_of_setup);
diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
new file mode 100644
index 000000000..02e7878dc
--- /dev/null
+++ b/arch/mips/ralink/prom.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Joonas Lahtinen <joonas.lahtinen@gmail.com>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/string.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+
+#include <asm/bootinfo.h>
+#include <asm/addrspace.h>
+
+#include <asm/mach-ralink/ralink_regs.h>
+
+#include "common.h"
+
+struct ralink_soc_info soc_info;
+struct rt2880_pmx_group *rt2880_pinmux_data = NULL;
+
+enum ralink_soc_type ralink_soc;
+EXPORT_SYMBOL_GPL(ralink_soc);
+
+const char *get_system_type(void)
+{
+ return soc_info.sys_type;
+}
+
+static __init void prom_init_cmdline(void)
+{
+ int argc;
+ char **argv;
+ int i;
+
+ pr_debug("prom: fw_arg0=%08x fw_arg1=%08x fw_arg2=%08x fw_arg3=%08x\n",
+ (unsigned int)fw_arg0, (unsigned int)fw_arg1,
+ (unsigned int)fw_arg2, (unsigned int)fw_arg3);
+
+ argc = fw_arg0;
+ argv = (char **) KSEG1ADDR(fw_arg1);
+
+ if (!argv) {
+ pr_debug("argv=%p is invalid, skipping\n",
+ argv);
+ return;
+ }
+
+ for (i = 0; i < argc; i++) {
+ char *p = (char *) KSEG1ADDR(argv[i]);
+
+ if (CPHYSADDR(p) && *p) {
+ pr_debug("argv[%d]: %s\n", i, p);
+ strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
+ strlcat(arcs_cmdline, p, sizeof(arcs_cmdline));
+ }
+ }
+}
+
+void __init prom_init(void)
+{
+ prom_soc_init(&soc_info);
+
+ pr_info("SoC Type: %s\n", get_system_type());
+
+ prom_init_cmdline();
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/ralink/reset.c b/arch/mips/ralink/reset.c
new file mode 100644
index 000000000..8126f1260
--- /dev/null
+++ b/arch/mips/ralink/reset.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/reset-controller.h>
+
+#include <asm/reboot.h>
+
+#include <asm/mach-ralink/ralink_regs.h>
+
+/* Reset Control */
+#define SYSC_REG_RESET_CTRL 0x034
+
+#define RSTCTL_RESET_PCI BIT(26)
+#define RSTCTL_RESET_SYSTEM BIT(0)
+
+static int ralink_assert_device(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ u32 val;
+
+ if (id < 8)
+ return -1;
+
+ val = rt_sysc_r32(SYSC_REG_RESET_CTRL);
+ val |= BIT(id);
+ rt_sysc_w32(val, SYSC_REG_RESET_CTRL);
+
+ return 0;
+}
+
+static int ralink_deassert_device(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ u32 val;
+
+ if (id < 8)
+ return -1;
+
+ val = rt_sysc_r32(SYSC_REG_RESET_CTRL);
+ val &= ~BIT(id);
+ rt_sysc_w32(val, SYSC_REG_RESET_CTRL);
+
+ return 0;
+}
+
+static int ralink_reset_device(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ ralink_assert_device(rcdev, id);
+ return ralink_deassert_device(rcdev, id);
+}
+
+static const struct reset_control_ops reset_ops = {
+ .reset = ralink_reset_device,
+ .assert = ralink_assert_device,
+ .deassert = ralink_deassert_device,
+};
+
+static struct reset_controller_dev reset_dev = {
+ .ops = &reset_ops,
+ .owner = THIS_MODULE,
+ .nr_resets = 32,
+ .of_reset_n_cells = 1,
+};
+
+void ralink_rst_init(void)
+{
+ reset_dev.of_node = of_find_compatible_node(NULL, NULL,
+ "ralink,rt2880-reset");
+ if (!reset_dev.of_node)
+ pr_err("Failed to find reset controller node");
+ else
+ reset_controller_register(&reset_dev);
+}
+
+static void ralink_restart(char *command)
+{
+ if (IS_ENABLED(CONFIG_PCI)) {
+ rt_sysc_m32(0, RSTCTL_RESET_PCI, SYSC_REG_RESET_CTRL);
+ mdelay(50);
+ }
+
+ local_irq_disable();
+ rt_sysc_w32(RSTCTL_RESET_SYSTEM, SYSC_REG_RESET_CTRL);
+ unreachable();
+}
+
+static int __init mips_reboot_setup(void)
+{
+ _machine_restart = ralink_restart;
+
+ return 0;
+}
+
+arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
new file mode 100644
index 000000000..3f0968978
--- /dev/null
+++ b/arch/mips/ralink/rt288x.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/rt288x.h>
+#include <asm/mach-ralink/pinmux.h>
+
+#include "common.h"
+
+static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
+static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
+static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 7, 8) };
+static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
+static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
+static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) };
+static struct rt2880_pmx_func pci_func[] = { FUNC("pci", 0, 40, 32) };
+
+static struct rt2880_pmx_group rt2880_pinmux_data_act[] = {
+ GRP("i2c", i2c_func, 1, RT2880_GPIO_MODE_I2C),
+ GRP("spi", spi_func, 1, RT2880_GPIO_MODE_SPI),
+ GRP("uartlite", uartlite_func, 1, RT2880_GPIO_MODE_UART0),
+ GRP("jtag", jtag_func, 1, RT2880_GPIO_MODE_JTAG),
+ GRP("mdio", mdio_func, 1, RT2880_GPIO_MODE_MDIO),
+ GRP("sdram", sdram_func, 1, RT2880_GPIO_MODE_SDRAM),
+ GRP("pci", pci_func, 1, RT2880_GPIO_MODE_PCI),
+ { 0 }
+};
+
+void __init ralink_clk_init(void)
+{
+ unsigned long cpu_rate, wmac_rate = 40000000;
+ u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
+ t = ((t >> SYSTEM_CONFIG_CPUCLK_SHIFT) & SYSTEM_CONFIG_CPUCLK_MASK);
+
+ switch (t) {
+ case SYSTEM_CONFIG_CPUCLK_250:
+ cpu_rate = 250000000;
+ break;
+ case SYSTEM_CONFIG_CPUCLK_266:
+ cpu_rate = 266666667;
+ break;
+ case SYSTEM_CONFIG_CPUCLK_280:
+ cpu_rate = 280000000;
+ break;
+ case SYSTEM_CONFIG_CPUCLK_300:
+ cpu_rate = 300000000;
+ break;
+ }
+
+ ralink_clk_add("cpu", cpu_rate);
+ ralink_clk_add("300100.timer", cpu_rate / 2);
+ ralink_clk_add("300120.watchdog", cpu_rate / 2);
+ ralink_clk_add("300500.uart", cpu_rate / 2);
+ ralink_clk_add("300900.i2c", cpu_rate / 2);
+ ralink_clk_add("300c00.uartlite", cpu_rate / 2);
+ ralink_clk_add("400000.ethernet", cpu_rate / 2);
+ ralink_clk_add("480000.wmac", wmac_rate);
+}
+
+void __init ralink_of_remap(void)
+{
+ rt_sysc_membase = plat_of_remap_node("ralink,rt2880-sysc");
+ rt_memc_membase = plat_of_remap_node("ralink,rt2880-memc");
+
+ if (!rt_sysc_membase || !rt_memc_membase)
+ panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+ void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT2880_SYSC_BASE);
+ const char *name;
+ u32 n0;
+ u32 n1;
+ u32 id;
+
+ n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+ n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+ id = __raw_readl(sysc + SYSC_REG_CHIP_ID);
+
+ if (n0 == RT2880_CHIP_NAME0 && n1 == RT2880_CHIP_NAME1) {
+ soc_info->compatible = "ralink,r2880-soc";
+ name = "RT2880";
+ } else {
+ panic("rt288x: unknown SoC, n0:%08x n1:%08x", n0, n1);
+ }
+
+ snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+ "Ralink %s id:%u rev:%u",
+ name,
+ (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
+ (id & CHIP_ID_REV_MASK));
+
+ soc_info->mem_base = RT2880_SDRAM_BASE;
+ soc_info->mem_size_min = RT2880_MEM_SIZE_MIN;
+ soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
+
+ rt2880_pinmux_data = rt2880_pinmux_data_act;
+ ralink_soc = RT2880_SOC;
+}
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
new file mode 100644
index 000000000..496f966c0
--- /dev/null
+++ b/arch/mips/ralink/rt305x.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/bug.h>
+
+#include <asm/io.h>
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/rt305x.h>
+#include <asm/mach-ralink/pinmux.h>
+
+#include "common.h"
+
+static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
+static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
+static struct rt2880_pmx_func uartf_func[] = {
+ FUNC("uartf", RT305X_GPIO_MODE_UARTF, 7, 8),
+ FUNC("pcm uartf", RT305X_GPIO_MODE_PCM_UARTF, 7, 8),
+ FUNC("pcm i2s", RT305X_GPIO_MODE_PCM_I2S, 7, 8),
+ FUNC("i2s uartf", RT305X_GPIO_MODE_I2S_UARTF, 7, 8),
+ FUNC("pcm gpio", RT305X_GPIO_MODE_PCM_GPIO, 11, 4),
+ FUNC("gpio uartf", RT305X_GPIO_MODE_GPIO_UARTF, 7, 4),
+ FUNC("gpio i2s", RT305X_GPIO_MODE_GPIO_I2S, 7, 4),
+};
+static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
+static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
+static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
+static struct rt2880_pmx_func rt5350_led_func[] = { FUNC("led", 0, 22, 5) };
+static struct rt2880_pmx_func rt5350_cs1_func[] = {
+ FUNC("spi_cs1", 0, 27, 1),
+ FUNC("wdg_cs1", 1, 27, 1),
+};
+static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) };
+static struct rt2880_pmx_func rt3352_rgmii_func[] = {
+ FUNC("rgmii", 0, 24, 12)
+};
+static struct rt2880_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) };
+static struct rt2880_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) };
+static struct rt2880_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) };
+static struct rt2880_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) };
+static struct rt2880_pmx_func rt3352_cs1_func[] = {
+ FUNC("spi_cs1", 0, 45, 1),
+ FUNC("wdg_cs1", 1, 45, 1),
+};
+
+static struct rt2880_pmx_group rt3050_pinmux_data[] = {
+ GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
+ GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
+ GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
+ RT305X_GPIO_MODE_UART0_SHIFT),
+ GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
+ GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
+ GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO),
+ GRP("rgmii", rgmii_func, 1, RT305X_GPIO_MODE_RGMII),
+ GRP("sdram", sdram_func, 1, RT305X_GPIO_MODE_SDRAM),
+ { 0 }
+};
+
+static struct rt2880_pmx_group rt3352_pinmux_data[] = {
+ GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
+ GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
+ GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
+ RT305X_GPIO_MODE_UART0_SHIFT),
+ GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
+ GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
+ GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO),
+ GRP("rgmii", rt3352_rgmii_func, 1, RT305X_GPIO_MODE_RGMII),
+ GRP("lna", rt3352_lna_func, 1, RT3352_GPIO_MODE_LNA),
+ GRP("pa", rt3352_pa_func, 1, RT3352_GPIO_MODE_PA),
+ GRP("led", rt3352_led_func, 1, RT5350_GPIO_MODE_PHY_LED),
+ GRP("spi_cs1", rt3352_cs1_func, 2, RT5350_GPIO_MODE_SPI_CS1),
+ { 0 }
+};
+
+static struct rt2880_pmx_group rt5350_pinmux_data[] = {
+ GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
+ GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
+ GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
+ RT305X_GPIO_MODE_UART0_SHIFT),
+ GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
+ GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
+ GRP("led", rt5350_led_func, 1, RT5350_GPIO_MODE_PHY_LED),
+ GRP("spi_cs1", rt5350_cs1_func, 2, RT5350_GPIO_MODE_SPI_CS1),
+ { 0 }
+};
+
+static unsigned long rt5350_get_mem_size(void)
+{
+ void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
+ unsigned long ret;
+ u32 t;
+
+ t = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG);
+ t = (t >> RT5350_SYSCFG0_DRAM_SIZE_SHIFT) &
+ RT5350_SYSCFG0_DRAM_SIZE_MASK;
+
+ switch (t) {
+ case RT5350_SYSCFG0_DRAM_SIZE_2M:
+ ret = 2;
+ break;
+ case RT5350_SYSCFG0_DRAM_SIZE_8M:
+ ret = 8;
+ break;
+ case RT5350_SYSCFG0_DRAM_SIZE_16M:
+ ret = 16;
+ break;
+ case RT5350_SYSCFG0_DRAM_SIZE_32M:
+ ret = 32;
+ break;
+ case RT5350_SYSCFG0_DRAM_SIZE_64M:
+ ret = 64;
+ break;
+ default:
+ panic("rt5350: invalid DRAM size: %u", t);
+ break;
+ }
+
+ return ret;
+}
+
+void __init ralink_clk_init(void)
+{
+ unsigned long cpu_rate, sys_rate, wdt_rate, uart_rate;
+ unsigned long wmac_rate = 40000000;
+
+ u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
+
+ if (soc_is_rt305x() || soc_is_rt3350()) {
+ t = (t >> RT305X_SYSCFG_CPUCLK_SHIFT) &
+ RT305X_SYSCFG_CPUCLK_MASK;
+ switch (t) {
+ case RT305X_SYSCFG_CPUCLK_LOW:
+ cpu_rate = 320000000;
+ break;
+ case RT305X_SYSCFG_CPUCLK_HIGH:
+ cpu_rate = 384000000;
+ break;
+ }
+ sys_rate = uart_rate = wdt_rate = cpu_rate / 3;
+ } else if (soc_is_rt3352()) {
+ t = (t >> RT3352_SYSCFG0_CPUCLK_SHIFT) &
+ RT3352_SYSCFG0_CPUCLK_MASK;
+ switch (t) {
+ case RT3352_SYSCFG0_CPUCLK_LOW:
+ cpu_rate = 384000000;
+ break;
+ case RT3352_SYSCFG0_CPUCLK_HIGH:
+ cpu_rate = 400000000;
+ break;
+ }
+ sys_rate = wdt_rate = cpu_rate / 3;
+ uart_rate = 40000000;
+ } else if (soc_is_rt5350()) {
+ t = (t >> RT5350_SYSCFG0_CPUCLK_SHIFT) &
+ RT5350_SYSCFG0_CPUCLK_MASK;
+ switch (t) {
+ case RT5350_SYSCFG0_CPUCLK_360:
+ cpu_rate = 360000000;
+ sys_rate = cpu_rate / 3;
+ break;
+ case RT5350_SYSCFG0_CPUCLK_320:
+ cpu_rate = 320000000;
+ sys_rate = cpu_rate / 4;
+ break;
+ case RT5350_SYSCFG0_CPUCLK_300:
+ cpu_rate = 300000000;
+ sys_rate = cpu_rate / 3;
+ break;
+ default:
+ BUG();
+ }
+ uart_rate = 40000000;
+ wdt_rate = sys_rate;
+ } else {
+ BUG();
+ }
+
+ if (soc_is_rt3352() || soc_is_rt5350()) {
+ u32 val = rt_sysc_r32(RT3352_SYSC_REG_SYSCFG0);
+
+ if (!(val & RT3352_CLKCFG0_XTAL_SEL))
+ wmac_rate = 20000000;
+ }
+
+ ralink_clk_add("cpu", cpu_rate);
+ ralink_clk_add("sys", sys_rate);
+ ralink_clk_add("10000900.i2c", uart_rate);
+ ralink_clk_add("10000a00.i2s", uart_rate);
+ ralink_clk_add("10000b00.spi", sys_rate);
+ ralink_clk_add("10000b40.spi", sys_rate);
+ ralink_clk_add("10000100.timer", wdt_rate);
+ ralink_clk_add("10000120.watchdog", wdt_rate);
+ ralink_clk_add("10000500.uart", uart_rate);
+ ralink_clk_add("10000c00.uartlite", uart_rate);
+ ralink_clk_add("10100000.ethernet", sys_rate);
+ ralink_clk_add("10180000.wmac", wmac_rate);
+}
+
+void __init ralink_of_remap(void)
+{
+ rt_sysc_membase = plat_of_remap_node("ralink,rt3050-sysc");
+ rt_memc_membase = plat_of_remap_node("ralink,rt3050-memc");
+
+ if (!rt_sysc_membase || !rt_memc_membase)
+ panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+ void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
+ unsigned char *name;
+ u32 n0;
+ u32 n1;
+ u32 id;
+
+ n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+ n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+
+ if (n0 == RT3052_CHIP_NAME0 && n1 == RT3052_CHIP_NAME1) {
+ unsigned long icache_sets;
+
+ icache_sets = (read_c0_config1() >> 22) & 7;
+ if (icache_sets == 1) {
+ ralink_soc = RT305X_SOC_RT3050;
+ name = "RT3050";
+ soc_info->compatible = "ralink,rt3050-soc";
+ } else {
+ ralink_soc = RT305X_SOC_RT3052;
+ name = "RT3052";
+ soc_info->compatible = "ralink,rt3052-soc";
+ }
+ } else if (n0 == RT3350_CHIP_NAME0 && n1 == RT3350_CHIP_NAME1) {
+ ralink_soc = RT305X_SOC_RT3350;
+ name = "RT3350";
+ soc_info->compatible = "ralink,rt3350-soc";
+ } else if (n0 == RT3352_CHIP_NAME0 && n1 == RT3352_CHIP_NAME1) {
+ ralink_soc = RT305X_SOC_RT3352;
+ name = "RT3352";
+ soc_info->compatible = "ralink,rt3352-soc";
+ } else if (n0 == RT5350_CHIP_NAME0 && n1 == RT5350_CHIP_NAME1) {
+ ralink_soc = RT305X_SOC_RT5350;
+ name = "RT5350";
+ soc_info->compatible = "ralink,rt5350-soc";
+ } else {
+ panic("rt305x: unknown SoC, n0:%08x n1:%08x", n0, n1);
+ }
+
+ id = __raw_readl(sysc + SYSC_REG_CHIP_ID);
+
+ snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+ "Ralink %s id:%u rev:%u",
+ name,
+ (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
+ (id & CHIP_ID_REV_MASK));
+
+ soc_info->mem_base = RT305X_SDRAM_BASE;
+ if (soc_is_rt5350()) {
+ soc_info->mem_size = rt5350_get_mem_size();
+ rt2880_pinmux_data = rt5350_pinmux_data;
+ } else if (soc_is_rt305x() || soc_is_rt3350()) {
+ soc_info->mem_size_min = RT305X_MEM_SIZE_MIN;
+ soc_info->mem_size_max = RT305X_MEM_SIZE_MAX;
+ rt2880_pinmux_data = rt3050_pinmux_data;
+ } else if (soc_is_rt3352()) {
+ soc_info->mem_size_min = RT3352_MEM_SIZE_MIN;
+ soc_info->mem_size_max = RT3352_MEM_SIZE_MAX;
+ rt2880_pinmux_data = rt3352_pinmux_data;
+ }
+}
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
new file mode 100644
index 000000000..8f3fe3106
--- /dev/null
+++ b/arch/mips/ralink/rt3883.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/rt3883.h>
+#include <asm/mach-ralink/pinmux.h>
+
+#include "common.h"
+
+static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
+static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
+static struct rt2880_pmx_func uartf_func[] = {
+ FUNC("uartf", RT3883_GPIO_MODE_UARTF, 7, 8),
+ FUNC("pcm uartf", RT3883_GPIO_MODE_PCM_UARTF, 7, 8),
+ FUNC("pcm i2s", RT3883_GPIO_MODE_PCM_I2S, 7, 8),
+ FUNC("i2s uartf", RT3883_GPIO_MODE_I2S_UARTF, 7, 8),
+ FUNC("pcm gpio", RT3883_GPIO_MODE_PCM_GPIO, 11, 4),
+ FUNC("gpio uartf", RT3883_GPIO_MODE_GPIO_UARTF, 7, 4),
+ FUNC("gpio i2s", RT3883_GPIO_MODE_GPIO_I2S, 7, 4),
+};
+static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
+static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
+static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
+static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
+static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
+static struct rt2880_pmx_func pci_func[] = {
+ FUNC("pci-dev", 0, 40, 32),
+ FUNC("pci-host2", 1, 40, 32),
+ FUNC("pci-host1", 2, 40, 32),
+ FUNC("pci-fnc", 3, 40, 32)
+};
+static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
+static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
+
+static struct rt2880_pmx_group rt3883_pinmux_data[] = {
+ GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
+ GRP("spi", spi_func, 1, RT3883_GPIO_MODE_SPI),
+ GRP("uartf", uartf_func, RT3883_GPIO_MODE_UART0_MASK,
+ RT3883_GPIO_MODE_UART0_SHIFT),
+ GRP("uartlite", uartlite_func, 1, RT3883_GPIO_MODE_UART1),
+ GRP("jtag", jtag_func, 1, RT3883_GPIO_MODE_JTAG),
+ GRP("mdio", mdio_func, 1, RT3883_GPIO_MODE_MDIO),
+ GRP("lna a", lna_a_func, 1, RT3883_GPIO_MODE_LNA_A),
+ GRP("lna g", lna_g_func, 1, RT3883_GPIO_MODE_LNA_G),
+ GRP("pci", pci_func, RT3883_GPIO_MODE_PCI_MASK,
+ RT3883_GPIO_MODE_PCI_SHIFT),
+ GRP("ge1", ge1_func, 1, RT3883_GPIO_MODE_GE1),
+ GRP("ge2", ge2_func, 1, RT3883_GPIO_MODE_GE2),
+ { 0 }
+};
+
+void __init ralink_clk_init(void)
+{
+ unsigned long cpu_rate, sys_rate;
+ u32 syscfg0;
+ u32 clksel;
+ u32 ddr2;
+
+ syscfg0 = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG0);
+ clksel = ((syscfg0 >> RT3883_SYSCFG0_CPUCLK_SHIFT) &
+ RT3883_SYSCFG0_CPUCLK_MASK);
+ ddr2 = syscfg0 & RT3883_SYSCFG0_DRAM_TYPE_DDR2;
+
+ switch (clksel) {
+ case RT3883_SYSCFG0_CPUCLK_250:
+ cpu_rate = 250000000;
+ sys_rate = (ddr2) ? 125000000 : 83000000;
+ break;
+ case RT3883_SYSCFG0_CPUCLK_384:
+ cpu_rate = 384000000;
+ sys_rate = (ddr2) ? 128000000 : 96000000;
+ break;
+ case RT3883_SYSCFG0_CPUCLK_480:
+ cpu_rate = 480000000;
+ sys_rate = (ddr2) ? 160000000 : 120000000;
+ break;
+ case RT3883_SYSCFG0_CPUCLK_500:
+ cpu_rate = 500000000;
+ sys_rate = (ddr2) ? 166000000 : 125000000;
+ break;
+ }
+
+ ralink_clk_add("cpu", cpu_rate);
+ ralink_clk_add("10000100.timer", sys_rate);
+ ralink_clk_add("10000120.watchdog", sys_rate);
+ ralink_clk_add("10000500.uart", 40000000);
+ ralink_clk_add("10000900.i2c", 40000000);
+ ralink_clk_add("10000a00.i2s", 40000000);
+ ralink_clk_add("10000b00.spi", sys_rate);
+ ralink_clk_add("10000b40.spi", sys_rate);
+ ralink_clk_add("10000c00.uartlite", 40000000);
+ ralink_clk_add("10100000.ethernet", sys_rate);
+ ralink_clk_add("10180000.wmac", 40000000);
+}
+
+void __init ralink_of_remap(void)
+{
+ rt_sysc_membase = plat_of_remap_node("ralink,rt3883-sysc");
+ rt_memc_membase = plat_of_remap_node("ralink,rt3883-memc");
+
+ if (!rt_sysc_membase || !rt_memc_membase)
+ panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+ void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT3883_SYSC_BASE);
+ const char *name;
+ u32 n0;
+ u32 n1;
+ u32 id;
+
+ n0 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID0_3);
+ n1 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID4_7);
+ id = __raw_readl(sysc + RT3883_SYSC_REG_REVID);
+
+ if (n0 == RT3883_CHIP_NAME0 && n1 == RT3883_CHIP_NAME1) {
+ soc_info->compatible = "ralink,rt3883-soc";
+ name = "RT3883";
+ } else {
+ panic("rt3883: unknown SoC, n0:%08x n1:%08x", n0, n1);
+ }
+
+ snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+ "Ralink %s ver:%u eco:%u",
+ name,
+ (id >> RT3883_REVID_VER_ID_SHIFT) & RT3883_REVID_VER_ID_MASK,
+ (id & RT3883_REVID_ECO_ID_MASK));
+
+ soc_info->mem_base = RT3883_SDRAM_BASE;
+ soc_info->mem_size_min = RT3883_MEM_SIZE_MIN;
+ soc_info->mem_size_max = RT3883_MEM_SIZE_MAX;
+
+ rt2880_pinmux_data = rt3883_pinmux_data;
+
+ ralink_soc = RT3883_SOC;
+}
diff --git a/arch/mips/ralink/timer-gic.c b/arch/mips/ralink/timer-gic.c
new file mode 100644
index 000000000..dcf2a44ac
--- /dev/null
+++ b/arch/mips/ralink/timer-gic.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2015 Nikolay Martynov <mar.kolya@gmail.com>
+ * Copyright (C) 2015 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/init.h>
+
+#include <linux/of.h>
+#include <linux/of_clk.h>
+#include <linux/clocksource.h>
+
+#include "common.h"
+
+void __init plat_time_init(void)
+{
+ ralink_of_remap();
+
+ of_clk_init(NULL);
+ timer_probe();
+}
diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c
new file mode 100644
index 000000000..652424d8e
--- /dev/null
+++ b/arch/mips/ralink/timer.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Ralink RT2880 timer
+ * Author: John Crispin
+ *
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+*/
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/of_gpio.h>
+#include <linux/clk.h>
+
+#include <asm/mach-ralink/ralink_regs.h>
+
+#define TIMER_REG_TMRSTAT 0x00
+#define TIMER_REG_TMR0LOAD 0x10
+#define TIMER_REG_TMR0CTL 0x18
+
+#define TMRSTAT_TMR0INT BIT(0)
+
+#define TMR0CTL_ENABLE BIT(7)
+#define TMR0CTL_MODE_PERIODIC BIT(4)
+#define TMR0CTL_PRESCALER 1
+#define TMR0CTL_PRESCALE_VAL (0xf - TMR0CTL_PRESCALER)
+#define TMR0CTL_PRESCALE_DIV (65536 / BIT(TMR0CTL_PRESCALER))
+
+struct rt_timer {
+ struct device *dev;
+ void __iomem *membase;
+ int irq;
+ unsigned long timer_freq;
+ unsigned long timer_div;
+};
+
+static inline void rt_timer_w32(struct rt_timer *rt, u8 reg, u32 val)
+{
+ __raw_writel(val, rt->membase + reg);
+}
+
+static inline u32 rt_timer_r32(struct rt_timer *rt, u8 reg)
+{
+ return __raw_readl(rt->membase + reg);
+}
+
+static irqreturn_t rt_timer_irq(int irq, void *_rt)
+{
+ struct rt_timer *rt = (struct rt_timer *) _rt;
+
+ rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div);
+ rt_timer_w32(rt, TIMER_REG_TMRSTAT, TMRSTAT_TMR0INT);
+
+ return IRQ_HANDLED;
+}
+
+
+static int rt_timer_request(struct rt_timer *rt)
+{
+ int err = request_irq(rt->irq, rt_timer_irq, 0,
+ dev_name(rt->dev), rt);
+ if (err) {
+ dev_err(rt->dev, "failed to request irq\n");
+ } else {
+ u32 t = TMR0CTL_MODE_PERIODIC | TMR0CTL_PRESCALE_VAL;
+ rt_timer_w32(rt, TIMER_REG_TMR0CTL, t);
+ }
+ return err;
+}
+
+static int rt_timer_config(struct rt_timer *rt, unsigned long divisor)
+{
+ if (rt->timer_freq < divisor)
+ rt->timer_div = rt->timer_freq;
+ else
+ rt->timer_div = divisor;
+
+ rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div);
+
+ return 0;
+}
+
+static int rt_timer_enable(struct rt_timer *rt)
+{
+ u32 t;
+
+ rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div);
+
+ t = rt_timer_r32(rt, TIMER_REG_TMR0CTL);
+ t |= TMR0CTL_ENABLE;
+ rt_timer_w32(rt, TIMER_REG_TMR0CTL, t);
+
+ return 0;
+}
+
+static int rt_timer_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct rt_timer *rt;
+ struct clk *clk;
+
+ rt = devm_kzalloc(&pdev->dev, sizeof(*rt), GFP_KERNEL);
+ if (!rt) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ rt->irq = platform_get_irq(pdev, 0);
+ if (rt->irq < 0)
+ return rt->irq;
+
+ rt->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rt->membase))
+ return PTR_ERR(rt->membase);
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed get clock rate\n");
+ return PTR_ERR(clk);
+ }
+
+ rt->timer_freq = clk_get_rate(clk) / TMR0CTL_PRESCALE_DIV;
+ if (!rt->timer_freq)
+ return -EINVAL;
+
+ rt->dev = &pdev->dev;
+ platform_set_drvdata(pdev, rt);
+
+ rt_timer_request(rt);
+ rt_timer_config(rt, 2);
+ rt_timer_enable(rt);
+
+ dev_info(&pdev->dev, "maximum frequency is %luHz\n", rt->timer_freq);
+
+ return 0;
+}
+
+static const struct of_device_id rt_timer_match[] = {
+ { .compatible = "ralink,rt2880-timer" },
+ {},
+};
+
+static struct platform_driver rt_timer_driver = {
+ .probe = rt_timer_probe,
+ .driver = {
+ .name = "rt-timer",
+ .of_match_table = rt_timer_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(rt_timer_driver);
diff --git a/arch/mips/rb532/Makefile b/arch/mips/rb532/Makefile
new file mode 100644
index 000000000..fb4b4bf83
--- /dev/null
+++ b/arch/mips/rb532/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the RB532 board specific parts of the kernel
+#
+
+obj-$(CONFIG_SERIAL_8250_CONSOLE) += serial.o
+
+obj-y += irq.o time.o setup.o prom.o gpio.o devices.o
diff --git a/arch/mips/rb532/Platform b/arch/mips/rb532/Platform
new file mode 100644
index 000000000..12eaa8790
--- /dev/null
+++ b/arch/mips/rb532/Platform
@@ -0,0 +1,6 @@
+#
+# Routerboard 532
+#
+cflags-$(CONFIG_MIKROTIK_RB532) += \
+ -I$(srctree)/arch/mips/include/asm/mach-rc32434
+load-$(CONFIG_MIKROTIK_RB532) += 0xffffffff80101000
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
new file mode 100644
index 000000000..0e3c8d761
--- /dev/null
+++ b/arch/mips/rb532/devices.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RouterBoard 500 Platform devices
+ *
+ * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2007 Florian Fainelli <florian@openwrt.org>
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/platnand.h>
+#include <linux/mtd/mtd.h>
+#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/serial_8250.h>
+
+#include <asm/bootinfo.h>
+
+#include <asm/mach-rc32434/rc32434.h>
+#include <asm/mach-rc32434/dma.h>
+#include <asm/mach-rc32434/dma_v.h>
+#include <asm/mach-rc32434/eth.h>
+#include <asm/mach-rc32434/rb.h>
+#include <asm/mach-rc32434/integ.h>
+#include <asm/mach-rc32434/gpio.h>
+#include <asm/mach-rc32434/irq.h>
+
+#define ETH0_RX_DMA_ADDR (DMA0_BASE_ADDR + 0 * DMA_CHAN_OFFSET)
+#define ETH0_TX_DMA_ADDR (DMA0_BASE_ADDR + 1 * DMA_CHAN_OFFSET)
+
+extern unsigned int idt_cpu_freq;
+
+static struct mpmc_device dev3;
+
+void set_latch_u5(unsigned char or_mask, unsigned char nand_mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev3.lock, flags);
+
+ dev3.state = (dev3.state | or_mask) & ~nand_mask;
+ writeb(dev3.state, dev3.base);
+
+ spin_unlock_irqrestore(&dev3.lock, flags);
+}
+EXPORT_SYMBOL(set_latch_u5);
+
+unsigned char get_latch_u5(void)
+{
+ return dev3.state;
+}
+EXPORT_SYMBOL(get_latch_u5);
+
+static struct resource korina_dev0_res[] = {
+ {
+ .name = "korina_regs",
+ .start = ETH0_BASE_ADDR,
+ .end = ETH0_BASE_ADDR + sizeof(struct eth_regs),
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "korina_rx",
+ .start = ETH0_DMA_RX_IRQ,
+ .end = ETH0_DMA_RX_IRQ,
+ .flags = IORESOURCE_IRQ
+ }, {
+ .name = "korina_tx",
+ .start = ETH0_DMA_TX_IRQ,
+ .end = ETH0_DMA_TX_IRQ,
+ .flags = IORESOURCE_IRQ
+ }, {
+ .name = "korina_ovr",
+ .start = ETH0_RX_OVR_IRQ,
+ .end = ETH0_RX_OVR_IRQ,
+ .flags = IORESOURCE_IRQ
+ }, {
+ .name = "korina_und",
+ .start = ETH0_TX_UND_IRQ,
+ .end = ETH0_TX_UND_IRQ,
+ .flags = IORESOURCE_IRQ
+ }, {
+ .name = "korina_dma_rx",
+ .start = ETH0_RX_DMA_ADDR,
+ .end = ETH0_RX_DMA_ADDR + DMA_CHAN_OFFSET - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "korina_dma_tx",
+ .start = ETH0_TX_DMA_ADDR,
+ .end = ETH0_TX_DMA_ADDR + DMA_CHAN_OFFSET - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct korina_device korina_dev0_data = {
+ .name = "korina0",
+ .mac = {0xde, 0xca, 0xff, 0xc0, 0xff, 0xee}
+};
+
+static struct platform_device korina_dev0 = {
+ .id = -1,
+ .name = "korina",
+ .resource = korina_dev0_res,
+ .num_resources = ARRAY_SIZE(korina_dev0_res),
+};
+
+static struct resource cf_slot0_res[] = {
+ {
+ .name = "cf_membase",
+ .flags = IORESOURCE_MEM
+ }, {
+ .name = "cf_irq",
+ .start = (8 + 4 * 32 + CF_GPIO_NUM), /* 149 */
+ .end = (8 + 4 * 32 + CF_GPIO_NUM),
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct gpiod_lookup_table cf_slot0_gpio_table = {
+ .dev_id = "pata-rb532-cf",
+ .table = {
+ GPIO_LOOKUP("gpio0", CF_GPIO_NUM,
+ NULL, GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
+static struct platform_device cf_slot0 = {
+ .id = -1,
+ .name = "pata-rb532-cf",
+ .resource = cf_slot0_res,
+ .num_resources = ARRAY_SIZE(cf_slot0_res),
+};
+
+/* Resources and device for NAND */
+static int rb532_dev_ready(struct nand_chip *chip)
+{
+ return gpio_get_value(GPIO_RDY);
+}
+
+static void rb532_cmd_ctrl(struct nand_chip *chip, int cmd, unsigned int ctrl)
+{
+ unsigned char orbits, nandbits;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ orbits = (ctrl & NAND_CLE) << 1;
+ orbits |= (ctrl & NAND_ALE) >> 1;
+
+ nandbits = (~ctrl & NAND_CLE) << 1;
+ nandbits |= (~ctrl & NAND_ALE) >> 1;
+
+ set_latch_u5(orbits, nandbits);
+ }
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, chip->legacy.IO_ADDR_W);
+}
+
+static struct resource nand_slot0_res[] = {
+ [0] = {
+ .name = "nand_membase",
+ .flags = IORESOURCE_MEM
+ }
+};
+
+static struct platform_nand_data rb532_nand_data = {
+ .ctrl.dev_ready = rb532_dev_ready,
+ .ctrl.cmd_ctrl = rb532_cmd_ctrl,
+};
+
+static struct platform_device nand_slot0 = {
+ .name = "gen_nand",
+ .id = -1,
+ .resource = nand_slot0_res,
+ .num_resources = ARRAY_SIZE(nand_slot0_res),
+ .dev.platform_data = &rb532_nand_data,
+};
+
+static struct mtd_partition rb532_partition_info[] = {
+ {
+ .name = "Routerboard NAND boot",
+ .offset = 0,
+ .size = 4 * 1024 * 1024,
+ }, {
+ .name = "rootfs",
+ .offset = MTDPART_OFS_NXTBLK,
+ .size = MTDPART_SIZ_FULL,
+ }
+};
+
+static struct platform_device rb532_led = {
+ .name = "rb532-led",
+ .id = -1,
+};
+
+static struct platform_device rb532_button = {
+ .name = "rb532-button",
+ .id = -1,
+};
+
+static struct resource rb532_wdt_res[] = {
+ {
+ .name = "rb532_wdt_res",
+ .start = INTEG0_BASE_ADDR,
+ .end = INTEG0_BASE_ADDR + sizeof(struct integ),
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct platform_device rb532_wdt = {
+ .name = "rc32434_wdt",
+ .id = -1,
+ .resource = rb532_wdt_res,
+ .num_resources = ARRAY_SIZE(rb532_wdt_res),
+};
+
+static struct plat_serial8250_port rb532_uart_res[] = {
+ {
+ .type = PORT_16550A,
+ .membase = (char *)KSEG1ADDR(REGBASE + UART0BASE),
+ .irq = UART0_IRQ,
+ .regshift = 2,
+ .iotype = UPIO_MEM,
+ .flags = UPF_BOOT_AUTOCONF,
+ },
+ {
+ .flags = 0,
+ }
+};
+
+static struct platform_device rb532_uart = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev.platform_data = &rb532_uart_res,
+};
+
+static struct platform_device *rb532_devs[] = {
+ &korina_dev0,
+ &nand_slot0,
+ &cf_slot0,
+ &rb532_led,
+ &rb532_button,
+ &rb532_uart,
+ &rb532_wdt
+};
+
+/* NAND definitions */
+#define NAND_CHIP_DELAY 25
+
+static void __init rb532_nand_setup(void)
+{
+ switch (mips_machtype) {
+ case MACH_MIKROTIK_RB532A:
+ set_latch_u5(LO_FOFF | LO_CEX,
+ LO_ULED | LO_ALE | LO_CLE | LO_WPX);
+ break;
+ default:
+ set_latch_u5(LO_WPX | LO_FOFF | LO_CEX,
+ LO_ULED | LO_ALE | LO_CLE);
+ break;
+ }
+
+ /* Setup NAND specific settings */
+ rb532_nand_data.chip.nr_chips = 1;
+ rb532_nand_data.chip.nr_partitions = ARRAY_SIZE(rb532_partition_info);
+ rb532_nand_data.chip.partitions = rb532_partition_info;
+ rb532_nand_data.chip.chip_delay = NAND_CHIP_DELAY;
+}
+
+
+static int __init plat_setup_devices(void)
+{
+ /* Look for the CF card reader */
+ if (!readl(IDT434_REG_BASE + DEV1MASK))
+ rb532_devs[2] = NULL; /* disable cf_slot0 at index 2 */
+ else {
+ cf_slot0_res[0].start =
+ readl(IDT434_REG_BASE + DEV1BASE);
+ cf_slot0_res[0].end = cf_slot0_res[0].start + 0x1000;
+ }
+
+ /* Read the NAND resources from the device controller */
+ nand_slot0_res[0].start = readl(IDT434_REG_BASE + DEV2BASE);
+ nand_slot0_res[0].end = nand_slot0_res[0].start + 0x1000;
+
+ /* Read and map device controller 3 */
+ dev3.base = ioremap(readl(IDT434_REG_BASE + DEV3BASE), 1);
+
+ if (!dev3.base) {
+ printk(KERN_ERR "rb532: cannot remap device controller 3\n");
+ return -ENXIO;
+ }
+
+ /* Initialise the NAND device */
+ rb532_nand_setup();
+
+ /* set the uart clock to the current cpu frequency */
+ rb532_uart_res[0].uartclk = idt_cpu_freq;
+
+ dev_set_drvdata(&korina_dev0.dev, &korina_dev0_data);
+
+ gpiod_add_lookup_table(&cf_slot0_gpio_table);
+ return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs));
+}
+
+#ifdef CONFIG_NET
+
+static int __init setup_kmac(char *s)
+{
+ printk(KERN_INFO "korina mac = %s\n", s);
+ if (!mac_pton(s, korina_dev0_data.mac))
+ printk(KERN_ERR "Invalid mac\n");
+ return 1;
+}
+
+__setup("kmac=", setup_kmac);
+
+#endif /* CONFIG_NET */
+
+arch_initcall(plat_setup_devices);
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c
new file mode 100644
index 000000000..94f02ada4
--- /dev/null
+++ b/arch/mips/rb532/gpio.c
@@ -0,0 +1,207 @@
+/*
+ * Miscellaneous functions for IDT EB434 board
+ *
+ * Copyright 2004 IDT Inc. (rischelp@idt.com)
+ * Copyright 2006 Phil Sutter <n0-1@freewrt.org>
+ * Copyright 2007 Florian Fainelli <florian@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/driver.h>
+
+#include <asm/mach-rc32434/rb.h>
+#include <asm/mach-rc32434/gpio.h>
+
+struct rb532_gpio_chip {
+ struct gpio_chip chip;
+ void __iomem *regbase;
+};
+
+static struct resource rb532_gpio_reg0_res[] = {
+ {
+ .name = "gpio_reg0",
+ .start = REGBASE + GPIOBASE,
+ .end = REGBASE + GPIOBASE + sizeof(struct rb532_gpio_reg) - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+/* rb532_set_bit - sanely set a bit
+ *
+ * bitval: new value for the bit
+ * offset: bit index in the 4 byte address range
+ * ioaddr: 4 byte aligned address being altered
+ */
+static inline void rb532_set_bit(unsigned bitval,
+ unsigned offset, void __iomem *ioaddr)
+{
+ unsigned long flags;
+ u32 val;
+
+ local_irq_save(flags);
+
+ val = readl(ioaddr);
+ val &= ~(!bitval << offset); /* unset bit if bitval == 0 */
+ val |= (!!bitval << offset); /* set bit if bitval == 1 */
+ writel(val, ioaddr);
+
+ local_irq_restore(flags);
+}
+
+/* rb532_get_bit - read a bit
+ *
+ * returns the boolean state of the bit, which may be > 1
+ */
+static inline int rb532_get_bit(unsigned offset, void __iomem *ioaddr)
+{
+ return readl(ioaddr) & (1 << offset);
+}
+
+/*
+ * Return GPIO level */
+static int rb532_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct rb532_gpio_chip *gpch;
+
+ gpch = gpiochip_get_data(chip);
+ return !!rb532_get_bit(offset, gpch->regbase + GPIOD);
+}
+
+/*
+ * Set output GPIO level
+ */
+static void rb532_gpio_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct rb532_gpio_chip *gpch;
+
+ gpch = gpiochip_get_data(chip);
+ rb532_set_bit(value, offset, gpch->regbase + GPIOD);
+}
+
+/*
+ * Set GPIO direction to input
+ */
+static int rb532_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct rb532_gpio_chip *gpch;
+
+ gpch = gpiochip_get_data(chip);
+
+ /* disable alternate function in case it's set */
+ rb532_set_bit(0, offset, gpch->regbase + GPIOFUNC);
+
+ rb532_set_bit(0, offset, gpch->regbase + GPIOCFG);
+ return 0;
+}
+
+/*
+ * Set GPIO direction to output
+ */
+static int rb532_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct rb532_gpio_chip *gpch;
+
+ gpch = gpiochip_get_data(chip);
+
+ /* disable alternate function in case it's set */
+ rb532_set_bit(0, offset, gpch->regbase + GPIOFUNC);
+
+ /* set the initial output value */
+ rb532_set_bit(value, offset, gpch->regbase + GPIOD);
+
+ rb532_set_bit(1, offset, gpch->regbase + GPIOCFG);
+ return 0;
+}
+
+static int rb532_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+ return 8 + 4 * 32 + gpio;
+}
+
+static struct rb532_gpio_chip rb532_gpio_chip[] = {
+ [0] = {
+ .chip = {
+ .label = "gpio0",
+ .direction_input = rb532_gpio_direction_input,
+ .direction_output = rb532_gpio_direction_output,
+ .get = rb532_gpio_get,
+ .set = rb532_gpio_set,
+ .to_irq = rb532_gpio_to_irq,
+ .base = 0,
+ .ngpio = 32,
+ },
+ },
+};
+
+/*
+ * Set GPIO interrupt level
+ */
+void rb532_gpio_set_ilevel(int bit, unsigned gpio)
+{
+ rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOILEVEL);
+}
+EXPORT_SYMBOL(rb532_gpio_set_ilevel);
+
+/*
+ * Set GPIO interrupt status
+ */
+void rb532_gpio_set_istat(int bit, unsigned gpio)
+{
+ rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOISTAT);
+}
+EXPORT_SYMBOL(rb532_gpio_set_istat);
+
+/*
+ * Configure GPIO alternate function
+ */
+void rb532_gpio_set_func(unsigned gpio)
+{
+ rb532_set_bit(1, gpio, rb532_gpio_chip->regbase + GPIOFUNC);
+}
+EXPORT_SYMBOL(rb532_gpio_set_func);
+
+int __init rb532_gpio_init(void)
+{
+ struct resource *r;
+
+ r = rb532_gpio_reg0_res;
+ rb532_gpio_chip->regbase = ioremap(r->start, resource_size(r));
+
+ if (!rb532_gpio_chip->regbase) {
+ printk(KERN_ERR "rb532: cannot remap GPIO register 0\n");
+ return -ENXIO;
+ }
+
+ /* Register our GPIO chip */
+ gpiochip_add_data(&rb532_gpio_chip->chip, rb532_gpio_chip);
+
+ return 0;
+}
+arch_initcall(rb532_gpio_init);
diff --git a/arch/mips/rb532/irq.c b/arch/mips/rb532/irq.c
new file mode 100644
index 000000000..25cc250f2
--- /dev/null
+++ b/arch/mips/rb532/irq.c
@@ -0,0 +1,234 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * stevel@mvista.com or source@mvista.com
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/timex.h>
+#include <linux/random.h>
+#include <linux/delay.h>
+
+#include <asm/bootinfo.h>
+#include <asm/time.h>
+#include <asm/mipsregs.h>
+
+#include <asm/mach-rc32434/irq.h>
+#include <asm/mach-rc32434/gpio.h>
+
+struct intr_group {
+ u32 mask; /* mask of valid bits in pending/mask registers */
+ volatile u32 *base_addr;
+};
+
+#define RC32434_NR_IRQS (GROUP4_IRQ_BASE + 32)
+
+#if (NR_IRQS < RC32434_NR_IRQS)
+#error Too little irqs defined. Did you override <asm/irq.h> ?
+#endif
+
+static const struct intr_group intr_group[NUM_INTR_GROUPS] = {
+ {
+ .mask = 0x0000efff,
+ .base_addr = (u32 *) KSEG1ADDR(IC_GROUP0_PEND + 0 * IC_GROUP_OFFSET)},
+ {
+ .mask = 0x00001fff,
+ .base_addr = (u32 *) KSEG1ADDR(IC_GROUP0_PEND + 1 * IC_GROUP_OFFSET)},
+ {
+ .mask = 0x00000007,
+ .base_addr = (u32 *) KSEG1ADDR(IC_GROUP0_PEND + 2 * IC_GROUP_OFFSET)},
+ {
+ .mask = 0x0003ffff,
+ .base_addr = (u32 *) KSEG1ADDR(IC_GROUP0_PEND + 3 * IC_GROUP_OFFSET)},
+ {
+ .mask = 0xffffffff,
+ .base_addr = (u32 *) KSEG1ADDR(IC_GROUP0_PEND + 4 * IC_GROUP_OFFSET)}
+};
+
+#define READ_PEND(base) (*(base))
+#define READ_MASK(base) (*(base + 2))
+#define WRITE_MASK(base, val) (*(base + 2) = (val))
+
+static inline int irq_to_group(unsigned int irq_nr)
+{
+ return (irq_nr - GROUP0_IRQ_BASE) >> 5;
+}
+
+static inline int group_to_ip(unsigned int group)
+{
+ return group + 2;
+}
+
+static inline void enable_local_irq(unsigned int ip)
+{
+ int ipnum = 0x100 << ip;
+
+ set_c0_status(ipnum);
+}
+
+static inline void disable_local_irq(unsigned int ip)
+{
+ int ipnum = 0x100 << ip;
+
+ clear_c0_status(ipnum);
+}
+
+static inline void ack_local_irq(unsigned int ip)
+{
+ int ipnum = 0x100 << ip;
+
+ clear_c0_cause(ipnum);
+}
+
+static void rb532_enable_irq(struct irq_data *d)
+{
+ unsigned int group, intr_bit, irq_nr = d->irq;
+ int ip = irq_nr - GROUP0_IRQ_BASE;
+ volatile unsigned int *addr;
+
+ if (ip < 0)
+ enable_local_irq(irq_nr);
+ else {
+ group = ip >> 5;
+
+ ip &= (1 << 5) - 1;
+ intr_bit = 1 << ip;
+
+ enable_local_irq(group_to_ip(group));
+
+ addr = intr_group[group].base_addr;
+ WRITE_MASK(addr, READ_MASK(addr) & ~intr_bit);
+ }
+}
+
+static void rb532_disable_irq(struct irq_data *d)
+{
+ unsigned int group, intr_bit, mask, irq_nr = d->irq;
+ int ip = irq_nr - GROUP0_IRQ_BASE;
+ volatile unsigned int *addr;
+
+ if (ip < 0) {
+ disable_local_irq(irq_nr);
+ } else {
+ group = ip >> 5;
+
+ ip &= (1 << 5) - 1;
+ intr_bit = 1 << ip;
+ addr = intr_group[group].base_addr;
+ mask = READ_MASK(addr);
+ mask |= intr_bit;
+ WRITE_MASK(addr, mask);
+
+ /* There is a maximum of 14 GPIO interrupts */
+ if (group == GPIO_MAPPED_IRQ_GROUP && irq_nr <= (GROUP4_IRQ_BASE + 13))
+ rb532_gpio_set_istat(0, irq_nr - GPIO_MAPPED_IRQ_BASE);
+
+ /*
+ * if there are no more interrupts enabled in this
+ * group, disable corresponding IP
+ */
+ if (mask == intr_group[group].mask)
+ disable_local_irq(group_to_ip(group));
+ }
+}
+
+static void rb532_mask_and_ack_irq(struct irq_data *d)
+{
+ rb532_disable_irq(d);
+ ack_local_irq(group_to_ip(irq_to_group(d->irq)));
+}
+
+static int rb532_set_type(struct irq_data *d, unsigned type)
+{
+ int gpio = d->irq - GPIO_MAPPED_IRQ_BASE;
+ int group = irq_to_group(d->irq);
+
+ if (group != GPIO_MAPPED_IRQ_GROUP || d->irq > (GROUP4_IRQ_BASE + 13))
+ return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ rb532_gpio_set_ilevel(1, gpio);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ rb532_gpio_set_ilevel(0, gpio);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct irq_chip rc32434_irq_type = {
+ .name = "RB532",
+ .irq_ack = rb532_disable_irq,
+ .irq_mask = rb532_disable_irq,
+ .irq_mask_ack = rb532_mask_and_ack_irq,
+ .irq_unmask = rb532_enable_irq,
+ .irq_set_type = rb532_set_type,
+};
+
+void __init arch_init_irq(void)
+{
+ int i;
+
+ pr_info("Initializing IRQ's: %d out of %d\n", RC32434_NR_IRQS, NR_IRQS);
+
+ for (i = 0; i < RC32434_NR_IRQS; i++)
+ irq_set_chip_and_handler(i, &rc32434_irq_type,
+ handle_level_irq);
+}
+
+/* Main Interrupt dispatcher */
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned int ip, pend, group;
+ volatile unsigned int *addr;
+ unsigned int cp0_cause = read_c0_cause() & read_c0_status();
+
+ if (cp0_cause & CAUSEF_IP7) {
+ do_IRQ(7);
+ } else {
+ ip = (cp0_cause & 0x7c00);
+ if (ip) {
+ group = 21 + (fls(ip) - 32);
+
+ addr = intr_group[group].base_addr;
+
+ pend = READ_PEND(addr);
+ pend &= ~READ_MASK(addr); /* only unmasked interrupts */
+ pend = 39 + (fls(pend) - 32);
+ do_IRQ((group << 5) + pend);
+ }
+ }
+}
diff --git a/arch/mips/rb532/prom.c b/arch/mips/rb532/prom.c
new file mode 100644
index 000000000..a9d1f2019
--- /dev/null
+++ b/arch/mips/rb532/prom.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RouterBoard 500 specific prom routines
+ *
+ * Copyright (C) 2003, Peter Sadik <peter.sadik@idt.com>
+ * Copyright (C) 2005-2006, P.Christeas <p_christ@hol.gr>
+ * Copyright (C) 2007, Gabor Juhos <juhosg@openwrt.org>
+ * Felix Fietkau <nbd@openwrt.org>
+ * Florian Fainelli <florian@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/console.h>
+#include <linux/memblock.h>
+#include <linux/ioport.h>
+#include <linux/blkdev.h>
+
+#include <asm/bootinfo.h>
+#include <asm/mach-rc32434/ddr.h>
+#include <asm/mach-rc32434/prom.h>
+
+unsigned int idt_cpu_freq = 132000000;
+EXPORT_SYMBOL(idt_cpu_freq);
+
+static struct resource ddr_reg[] = {
+ {
+ .name = "ddr-reg",
+ .start = DDR0_PHYS_ADDR,
+ .end = DDR0_PHYS_ADDR + sizeof(struct ddr_ram),
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+void __init prom_free_prom_memory(void)
+{
+ /* No prom memory to free */
+}
+
+static inline int match_tag(char *arg, const char *tag)
+{
+ return strncmp(arg, tag, strlen(tag)) == 0;
+}
+
+static inline unsigned long tag2ul(char *arg, const char *tag)
+{
+ char *num;
+
+ num = arg + strlen(tag);
+ return simple_strtoul(num, 0, 10);
+}
+
+void __init prom_setup_cmdline(void)
+{
+ static char cmd_line[COMMAND_LINE_SIZE] __initdata;
+ char *cp, *board;
+ int prom_argc;
+ char **prom_argv;
+ int i;
+
+ prom_argc = fw_arg0;
+ prom_argv = (char **) fw_arg1;
+
+ cp = cmd_line;
+ /* Note: it is common that parameters start
+ * at argv[1] and not argv[0],
+ * however, our elf loader starts at [0] */
+ for (i = 0; i < prom_argc; i++) {
+ if (match_tag(prom_argv[i], FREQ_TAG)) {
+ idt_cpu_freq = tag2ul(prom_argv[i], FREQ_TAG);
+ continue;
+ }
+#ifdef IGNORE_CMDLINE_MEM
+ /* parses out the "mem=xx" arg */
+ if (match_tag(prom_argv[i], MEM_TAG))
+ continue;
+#endif
+ if (i > 0)
+ *(cp++) = ' ';
+ if (match_tag(prom_argv[i], BOARD_TAG)) {
+ board = prom_argv[i] + strlen(BOARD_TAG);
+
+ if (match_tag(board, BOARD_RB532A))
+ mips_machtype = MACH_MIKROTIK_RB532A;
+ else
+ mips_machtype = MACH_MIKROTIK_RB532;
+ }
+
+ strcpy(cp, prom_argv[i]);
+ cp += strlen(prom_argv[i]);
+ }
+ *(cp++) = ' ';
+
+ i = strlen(arcs_cmdline);
+ if (i > 0) {
+ *(cp++) = ' ';
+ strcpy(cp, arcs_cmdline);
+ cp += strlen(arcs_cmdline);
+ }
+ cmd_line[COMMAND_LINE_SIZE - 1] = '\0';
+
+ strcpy(arcs_cmdline, cmd_line);
+}
+
+void __init prom_init(void)
+{
+ struct ddr_ram __iomem *ddr;
+ phys_addr_t memsize;
+ phys_addr_t ddrbase;
+
+ ddr = ioremap(ddr_reg[0].start,
+ ddr_reg[0].end - ddr_reg[0].start);
+
+ if (!ddr) {
+ printk(KERN_ERR "Unable to remap DDR register\n");
+ return;
+ }
+
+ ddrbase = (phys_addr_t)&ddr->ddrbase;
+ memsize = (phys_addr_t)&ddr->ddrmask;
+ memsize = 0 - memsize;
+
+ prom_setup_cmdline();
+
+ /* give all RAM to boot allocator,
+ * except for the first 0x400 and the last 0x200 bytes */
+ memblock_add(ddrbase + 0x400, memsize - 0x600);
+}
diff --git a/arch/mips/rb532/serial.c b/arch/mips/rb532/serial.c
new file mode 100644
index 000000000..70482540b
--- /dev/null
+++ b/arch/mips/rb532/serial.c
@@ -0,0 +1,54 @@
+/*
+ * BRIEF MODULE DESCRIPTION
+ * Serial port initialisation.
+ *
+ * Copyright 2004 IDT Inc. (rischelp@idt.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/serial_core.h>
+#include <linux/serial_8250.h>
+#include <linux/irq.h>
+
+#include <asm/serial.h>
+#include <asm/mach-rc32434/rb.h>
+
+extern unsigned int idt_cpu_freq;
+
+static struct uart_port rb532_uart = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 0,
+ .irq = UART0_IRQ,
+ .iotype = UPIO_MEM,
+ .membase = (char *)KSEG1ADDR(REGBASE + UART0BASE),
+ .regshift = 2
+};
+
+int __init setup_serial_port(void)
+{
+ rb532_uart.uartclk = idt_cpu_freq;
+
+ return early_serial_setup(&rb532_uart);
+}
+arch_initcall(setup_serial_port);
diff --git a/arch/mips/rb532/setup.c b/arch/mips/rb532/setup.c
new file mode 100644
index 000000000..51af9d374
--- /dev/null
+++ b/arch/mips/rb532/setup.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * setup.c - boot time setup code
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+
+#include <asm/bootinfo.h>
+#include <asm/reboot.h>
+#include <asm/time.h>
+#include <linux/ioport.h>
+
+#include <asm/mach-rc32434/rb.h>
+#include <asm/mach-rc32434/pci.h>
+
+struct pci_reg __iomem *pci_reg;
+EXPORT_SYMBOL(pci_reg);
+
+static struct resource pci0_res[] = {
+ {
+ .name = "pci_reg0",
+ .start = PCI0_BASE_ADDR,
+ .end = PCI0_BASE_ADDR + sizeof(struct pci_reg),
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static void rb_machine_restart(char *command)
+{
+ /* just jump to the reset vector */
+ writel(0x80000001, IDT434_REG_BASE + RST);
+ ((void (*)(void)) KSEG1ADDR(0x1FC00000u))();
+}
+
+static void rb_machine_halt(void)
+{
+ for (;;)
+ continue;
+}
+
+void __init plat_mem_setup(void)
+{
+ u32 val;
+
+ _machine_restart = rb_machine_restart;
+ _machine_halt = rb_machine_halt;
+ pm_power_off = rb_machine_halt;
+
+ set_io_port_base(KSEG1);
+
+ pci_reg = ioremap(pci0_res[0].start,
+ pci0_res[0].end - pci0_res[0].start);
+ if (!pci_reg) {
+ printk(KERN_ERR "Could not remap PCI registers\n");
+ return;
+ }
+
+ val = __raw_readl(&pci_reg->pcic);
+ val &= 0xFFFFFF7;
+ __raw_writel(val, (void *)&pci_reg->pcic);
+
+#ifdef CONFIG_PCI
+ /* Enable PCI interrupts in EPLD Mask register */
+ *epld_mask = 0x0;
+ *(epld_mask + 1) = 0x0;
+#endif
+ write_c0_wired(0);
+}
+
+const char *get_system_type(void)
+{
+ switch (mips_machtype) {
+ case MACH_MIKROTIK_RB532A:
+ return "Mikrotik RB532A";
+ break;
+ default:
+ return "Mikrotik RB532";
+ break;
+ }
+}
diff --git a/arch/mips/rb532/time.c b/arch/mips/rb532/time.c
new file mode 100644
index 000000000..68713dd32
--- /dev/null
+++ b/arch/mips/rb532/time.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Setting up the clock on the MIPS boards.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel_stat.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mc146818rtc.h>
+#include <linux/irq.h>
+#include <linux/timex.h>
+
+#include <asm/mipsregs.h>
+#include <asm/time.h>
+#include <asm/mach-rc32434/rc32434.h>
+
+extern unsigned int idt_cpu_freq;
+
+/*
+ * Figure out the r4k offset, the amount to increment the compare
+ * register for each time tick. There is no RTC available.
+ *
+ * The RC32434 counts at half the CPU *core* speed.
+ */
+static unsigned long __init cal_r4koff(void)
+{
+ mips_hpt_frequency = idt_cpu_freq * IDT_CLOCK_MULT / 2;
+
+ return mips_hpt_frequency / HZ;
+}
+
+void __init plat_time_init(void)
+{
+ unsigned int est_freq;
+ unsigned long flags, r4k_offset;
+
+ local_irq_save(flags);
+
+ printk(KERN_INFO "calculating r4koff... ");
+ r4k_offset = cal_r4koff();
+ printk("%08lx(%d)\n", r4k_offset, (int) r4k_offset);
+
+ est_freq = 2 * r4k_offset * HZ;
+ est_freq += 5000; /* round */
+ est_freq -= est_freq % 10000;
+ printk(KERN_INFO "CPU frequency %d.%02d MHz\n", est_freq / 1000000,
+ (est_freq % 1000000) * 100 / 1000000);
+ local_irq_restore(flags);
+}
diff --git a/arch/mips/sgi-ip22/Makefile b/arch/mips/sgi-ip22/Makefile
new file mode 100644
index 000000000..45f42fa08
--- /dev/null
+++ b/arch/mips/sgi-ip22/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the SGI specific kernel interface routines
+# under Linux.
+#
+
+obj-y += ip22-mc.o ip22-hpc.o ip22-int.o ip22-time.o ip22-nvram.o \
+ ip22-platform.o ip22-reset.o ip22-setup.o ip22-gio.o
+
+obj-$(CONFIG_SGI_IP22) += ip22-berr.o
+obj-$(CONFIG_SGI_IP28) += ip28-berr.o
+obj-$(CONFIG_EISA) += ip22-eisa.o
diff --git a/arch/mips/sgi-ip22/Platform b/arch/mips/sgi-ip22/Platform
new file mode 100644
index 000000000..62fa30bb9
--- /dev/null
+++ b/arch/mips/sgi-ip22/Platform
@@ -0,0 +1,32 @@
+#
+# SGI IP22 (Indy/Indigo2)
+#
+# Set the load address to >= 0xffffffff88069000 if you want to leave space for
+# symmon, 0xffffffff80002000 for production kernels. Note that the value must
+# be aligned to a multiple of the kernel stack size or the handling of the
+# current variable will break so for 64-bit kernels we have to raise the start
+# address by 8kb.
+#
+cflags-$(CONFIG_SGI_IP22) += -I$(srctree)/arch/mips/include/asm/mach-ip22
+ifdef CONFIG_32BIT
+load-$(CONFIG_SGI_IP22) += 0xffffffff88002000
+endif
+ifdef CONFIG_64BIT
+load-$(CONFIG_SGI_IP22) += 0xffffffff88004000
+endif
+
+#
+# SGI IP28 (Indigo2 R10k)
+#
+# Set the load address to >= 0xa800000020080000 if you want to leave space for
+# symmon, 0xa800000020004000 for production kernels ? Note that the value must
+# be 16kb aligned or the handling of the current variable will break.
+# Simplified: what IP22 does at 128MB+ in ksegN, IP28 does at 512MB+ in xkphys
+#
+ifdef CONFIG_SGI_IP28
+ ifeq ($(call cc-option-yn,-march=r10000 -mr10k-cache-barrier=store), n)
+ $(error gcc doesn't support needed option -mr10k-cache-barrier=store)
+ endif
+endif
+cflags-$(CONFIG_SGI_IP28) += -mr10k-cache-barrier=store -I$(srctree)/arch/mips/include/asm/mach-ip28
+load-$(CONFIG_SGI_IP28) += 0xa800000020004000
diff --git a/arch/mips/sgi-ip22/ip22-berr.c b/arch/mips/sgi-ip22/ip22-berr.c
new file mode 100644
index 000000000..dc0110a60
--- /dev/null
+++ b/arch/mips/sgi-ip22/ip22-berr.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip22-berr.c: Bus error handling.
+ *
+ * Copyright (C) 2002, 2003 Ladislav Michl (ladis@linux-mips.org)
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+
+#include <asm/addrspace.h>
+#include <asm/traps.h>
+#include <asm/branch.h>
+#include <asm/irq_regs.h>
+#include <asm/sgi/mc.h>
+#include <asm/sgi/hpc3.h>
+#include <asm/sgi/ioc.h>
+#include <asm/sgi/ip22.h>
+
+
+static unsigned int cpu_err_stat; /* Status reg for CPU */
+static unsigned int gio_err_stat; /* Status reg for GIO */
+static unsigned int cpu_err_addr; /* Error address reg for CPU */
+static unsigned int gio_err_addr; /* Error address reg for GIO */
+static unsigned int extio_stat;
+static unsigned int hpc3_berr_stat; /* Bus error interrupt status */
+
+static void save_and_clear_buserr(void)
+{
+ /* save status registers */
+ cpu_err_addr = sgimc->cerr;
+ cpu_err_stat = sgimc->cstat;
+ gio_err_addr = sgimc->gerr;
+ gio_err_stat = sgimc->gstat;
+ extio_stat = ip22_is_fullhouse() ? sgioc->extio : (sgint->errstat << 4);
+ hpc3_berr_stat = hpc3c0->bestat;
+
+ sgimc->cstat = sgimc->gstat = 0;
+}
+
+#define GIO_ERRMASK 0xff00
+#define CPU_ERRMASK 0x3f00
+
+static void print_buserr(void)
+{
+ if (extio_stat & EXTIO_MC_BUSERR)
+ printk(KERN_ERR "MC Bus Error\n");
+ if (extio_stat & EXTIO_HPC3_BUSERR)
+ printk(KERN_ERR "HPC3 Bus Error 0x%x:<id=0x%x,%s,lane=0x%x>\n",
+ hpc3_berr_stat,
+ (hpc3_berr_stat & HPC3_BESTAT_PIDMASK) >>
+ HPC3_BESTAT_PIDSHIFT,
+ (hpc3_berr_stat & HPC3_BESTAT_CTYPE) ? "PIO" : "DMA",
+ hpc3_berr_stat & HPC3_BESTAT_BLMASK);
+ if (extio_stat & EXTIO_EISA_BUSERR)
+ printk(KERN_ERR "EISA Bus Error\n");
+ if (cpu_err_stat & CPU_ERRMASK)
+ printk(KERN_ERR "CPU error 0x%x<%s%s%s%s%s%s> @ 0x%08x\n",
+ cpu_err_stat,
+ cpu_err_stat & SGIMC_CSTAT_RD ? "RD " : "",
+ cpu_err_stat & SGIMC_CSTAT_PAR ? "PAR " : "",
+ cpu_err_stat & SGIMC_CSTAT_ADDR ? "ADDR " : "",
+ cpu_err_stat & SGIMC_CSTAT_SYSAD_PAR ? "SYSAD " : "",
+ cpu_err_stat & SGIMC_CSTAT_SYSCMD_PAR ? "SYSCMD " : "",
+ cpu_err_stat & SGIMC_CSTAT_BAD_DATA ? "BAD_DATA " : "",
+ cpu_err_addr);
+ if (gio_err_stat & GIO_ERRMASK)
+ printk(KERN_ERR "GIO error 0x%x:<%s%s%s%s%s%s%s%s> @ 0x%08x\n",
+ gio_err_stat,
+ gio_err_stat & SGIMC_GSTAT_RD ? "RD " : "",
+ gio_err_stat & SGIMC_GSTAT_WR ? "WR " : "",
+ gio_err_stat & SGIMC_GSTAT_TIME ? "TIME " : "",
+ gio_err_stat & SGIMC_GSTAT_PROM ? "PROM " : "",
+ gio_err_stat & SGIMC_GSTAT_ADDR ? "ADDR " : "",
+ gio_err_stat & SGIMC_GSTAT_BC ? "BC " : "",
+ gio_err_stat & SGIMC_GSTAT_PIO_RD ? "PIO_RD " : "",
+ gio_err_stat & SGIMC_GSTAT_PIO_WR ? "PIO_WR " : "",
+ gio_err_addr);
+}
+
+/*
+ * MC sends an interrupt whenever bus or parity errors occur. In addition,
+ * if the error happened during a CPU read, it also asserts the bus error
+ * pin on the R4K. Code in bus error handler save the MC bus error registers
+ * and then clear the interrupt when this happens.
+ */
+
+void ip22_be_interrupt(int irq)
+{
+ const int field = 2 * sizeof(unsigned long);
+ struct pt_regs *regs = get_irq_regs();
+
+ save_and_clear_buserr();
+ print_buserr();
+ printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
+ (regs->cp0_cause & 4) ? "Data" : "Instruction",
+ field, regs->cp0_epc, field, regs->regs[31]);
+ /* Assume it would be too dangerous to continue ... */
+ die_if_kernel("Oops", regs);
+ force_sig(SIGBUS);
+}
+
+static int ip22_be_handler(struct pt_regs *regs, int is_fixup)
+{
+ save_and_clear_buserr();
+ if (is_fixup)
+ return MIPS_BE_FIXUP;
+ print_buserr();
+ return MIPS_BE_FATAL;
+}
+
+void __init ip22_be_init(void)
+{
+ board_be_handler = ip22_be_handler;
+}
diff --git a/arch/mips/sgi-ip22/ip22-eisa.c b/arch/mips/sgi-ip22/ip22-eisa.c
new file mode 100644
index 000000000..f3b0e90e0
--- /dev/null
+++ b/arch/mips/sgi-ip22/ip22-eisa.c
@@ -0,0 +1,139 @@
+/*
+ * Basic EISA bus support for the SGI Indigo-2.
+ *
+ * (C) 2002 Pascal Dameme <netinet@freesurf.fr>
+ * and Marc Zyngier <mzyngier@freesurf.fr>
+ *
+ * This code is released under both the GPL version 2 and BSD
+ * licenses. Either license may be used.
+ *
+ * This code offers a very basic support for this EISA bus present in
+ * the SGI Indigo-2. It currently only supports PIO (forget about DMA
+ * for the time being). This is enough for a low-end ethernet card,
+ * but forget about your favorite SCSI card...
+ *
+ * TODO :
+ * - Fix bugs...
+ * - Add ISA support
+ * - Add DMA (yeah, right...).
+ * - Fix more bugs.
+ */
+
+#include <linux/eisa.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mipsregs.h>
+#include <asm/addrspace.h>
+#include <asm/processor.h>
+#include <asm/sgi/ioc.h>
+#include <asm/sgi/mc.h>
+#include <asm/sgi/ip22.h>
+#include <asm/i8259.h>
+
+/* I2 has four EISA slots. */
+#define IP22_EISA_MAX_SLOTS 4
+#define EISA_MAX_IRQ 16
+
+#define EIU_MODE_REG 0x0001ffc0
+#define EIU_STAT_REG 0x0001ffc4
+#define EIU_PREMPT_REG 0x0001ffc8
+#define EIU_QUIET_REG 0x0001ffcc
+#define EIU_INTRPT_ACK 0x00010004
+
+static char __init *decode_eisa_sig(unsigned long addr)
+{
+ static char sig_str[EISA_SIG_LEN] __initdata;
+ u8 sig[4];
+ u16 rev;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ sig[i] = inb(addr + i);
+
+ if (!i && (sig[0] & 0x80))
+ return NULL;
+ }
+
+ sig_str[0] = ((sig[0] >> 2) & 0x1f) + ('A' - 1);
+ sig_str[1] = (((sig[0] & 3) << 3) | (sig[1] >> 5)) + ('A' - 1);
+ sig_str[2] = (sig[1] & 0x1f) + ('A' - 1);
+ rev = (sig[2] << 8) | sig[3];
+ sprintf(sig_str + 3, "%04X", rev);
+
+ return sig_str;
+}
+
+static irqreturn_t ip22_eisa_intr(int irq, void *dev_id)
+{
+ u8 eisa_irq = inb(EIU_INTRPT_ACK);
+
+ inb(EISA_DMA1_STATUS);
+ inb(EISA_DMA2_STATUS);
+
+ if (eisa_irq < EISA_MAX_IRQ) {
+ do_IRQ(eisa_irq);
+ return IRQ_HANDLED;
+ }
+
+ /* Oops, Bad Stuff Happened... */
+ printk(KERN_ERR "eisa_irq %d out of bound\n", eisa_irq);
+
+ outb(0x20, EISA_INT2_CTRL);
+ outb(0x20, EISA_INT1_CTRL);
+
+ return IRQ_NONE;
+}
+
+int __init ip22_eisa_init(void)
+{
+ int i, c;
+ char *str;
+
+ if (!(sgimc->systemid & SGIMC_SYSID_EPRESENT)) {
+ printk(KERN_INFO "EISA: bus not present.\n");
+ return 1;
+ }
+
+ printk(KERN_INFO "EISA: Probing bus...\n");
+ for (c = 0, i = 1; i <= IP22_EISA_MAX_SLOTS; i++) {
+ if ((str = decode_eisa_sig(0x1000 * i + EISA_VENDOR_ID_OFFSET))) {
+ printk(KERN_INFO "EISA: slot %d : %s detected.\n",
+ i, str);
+ c++;
+ }
+ }
+ printk(KERN_INFO "EISA: Detected %d card%s.\n", c, c < 2 ? "" : "s");
+#ifdef CONFIG_ISA
+ printk(KERN_INFO "ISA support compiled in.\n");
+#endif
+
+ /* Warning : BlackMagicAhead(tm).
+ Please wave your favorite dead chicken over the busses */
+
+ /* First say hello to the EIU */
+ outl(0x0000FFFF, EIU_PREMPT_REG);
+ outl(1, EIU_QUIET_REG);
+ outl(0x40f3c07F, EIU_MODE_REG);
+
+ /* Now be nice to the EISA chipset */
+ outb(1, EISA_EXT_NMI_RESET_CTRL);
+ udelay(50); /* Wait long enough for the dust to settle */
+ outb(0, EISA_EXT_NMI_RESET_CTRL);
+ outb(0, EISA_DMA2_WRITE_SINGLE);
+
+ init_i8259_irqs();
+
+ if (request_irq(SGI_EISA_IRQ, ip22_eisa_intr, 0, "EISA", NULL))
+ pr_err("Failed to request irq %d (EISA)\n", SGI_EISA_IRQ);
+
+ EISA_bus = 1;
+ return 0;
+}
diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c
new file mode 100644
index 000000000..de0768a49
--- /dev/null
+++ b/arch/mips/sgi-ip22/ip22-gio.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+
+#include <asm/addrspace.h>
+#include <asm/paccess.h>
+#include <asm/gio_device.h>
+#include <asm/sgi/gio.h>
+#include <asm/sgi/hpc3.h>
+#include <asm/sgi/mc.h>
+#include <asm/sgi/ip22.h>
+
+static struct bus_type gio_bus_type;
+
+static struct {
+ const char *name;
+ __u8 id;
+} gio_name_table[] = {
+ { .name = "SGI Impact", .id = 0x10 },
+ { .name = "Phobos G160", .id = 0x35 },
+ { .name = "Phobos G130", .id = 0x36 },
+ { .name = "Phobos G100", .id = 0x37 },
+ { .name = "Set Engineering GFE", .id = 0x38 },
+ /* fake IDs */
+ { .name = "SGI Newport", .id = 0x7e },
+ { .name = "SGI GR2/GR3", .id = 0x7f },
+};
+
+static void gio_bus_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static struct device gio_bus = {
+ .init_name = "gio",
+ .release = &gio_bus_release,
+};
+
+/**
+ * gio_match_device - Tell if an of_device structure has a matching
+ * gio_match structure
+ * @ids: array of of device match structures to search in
+ * @dev: the of device structure to match against
+ *
+ * Used by a driver to check whether an of_device present in the
+ * system is in its list of supported devices.
+ */
+static const struct gio_device_id *
+gio_match_device(const struct gio_device_id *match,
+ const struct gio_device *dev)
+{
+ const struct gio_device_id *ids;
+
+ for (ids = match; ids->id != 0xff; ids++)
+ if (ids->id == dev->id.id)
+ return ids;
+
+ return NULL;
+}
+
+struct gio_device *gio_dev_get(struct gio_device *dev)
+{
+ struct device *tmp;
+
+ if (!dev)
+ return NULL;
+ tmp = get_device(&dev->dev);
+ if (tmp)
+ return to_gio_device(tmp);
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(gio_dev_get);
+
+void gio_dev_put(struct gio_device *dev)
+{
+ if (dev)
+ put_device(&dev->dev);
+}
+EXPORT_SYMBOL_GPL(gio_dev_put);
+
+/**
+ * gio_release_dev - free an gio device structure when all users of it are finished.
+ * @dev: device that's been disconnected
+ *
+ * Will be called only by the device core when all users of this gio device are
+ * done.
+ */
+void gio_release_dev(struct device *dev)
+{
+ struct gio_device *giodev;
+
+ giodev = to_gio_device(dev);
+ kfree(giodev);
+}
+EXPORT_SYMBOL_GPL(gio_release_dev);
+
+int gio_device_register(struct gio_device *giodev)
+{
+ giodev->dev.bus = &gio_bus_type;
+ giodev->dev.parent = &gio_bus;
+ return device_register(&giodev->dev);
+}
+EXPORT_SYMBOL_GPL(gio_device_register);
+
+void gio_device_unregister(struct gio_device *giodev)
+{
+ device_unregister(&giodev->dev);
+}
+EXPORT_SYMBOL_GPL(gio_device_unregister);
+
+static int gio_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct gio_device *gio_dev = to_gio_device(dev);
+ struct gio_driver *gio_drv = to_gio_driver(drv);
+
+ return gio_match_device(gio_drv->id_table, gio_dev) != NULL;
+}
+
+static int gio_device_probe(struct device *dev)
+{
+ int error = -ENODEV;
+ struct gio_driver *drv;
+ struct gio_device *gio_dev;
+ const struct gio_device_id *match;
+
+ drv = to_gio_driver(dev->driver);
+ gio_dev = to_gio_device(dev);
+
+ if (!drv->probe)
+ return error;
+
+ gio_dev_get(gio_dev);
+
+ match = gio_match_device(drv->id_table, gio_dev);
+ if (match)
+ error = drv->probe(gio_dev, match);
+ if (error)
+ gio_dev_put(gio_dev);
+
+ return error;
+}
+
+static int gio_device_remove(struct device *dev)
+{
+ struct gio_device *gio_dev = to_gio_device(dev);
+ struct gio_driver *drv = to_gio_driver(dev->driver);
+
+ if (dev->driver && drv->remove)
+ drv->remove(gio_dev);
+ return 0;
+}
+
+static void gio_device_shutdown(struct device *dev)
+{
+ struct gio_device *gio_dev = to_gio_device(dev);
+ struct gio_driver *drv = to_gio_driver(dev->driver);
+
+ if (dev->driver && drv->shutdown)
+ drv->shutdown(gio_dev);
+}
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct gio_device *gio_dev = to_gio_device(dev);
+ int len = snprintf(buf, PAGE_SIZE, "gio:%x\n", gio_dev->id.id);
+
+ return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gio_device *giodev;
+
+ giodev = to_gio_device(dev);
+ return sprintf(buf, "%s", giodev->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gio_device *giodev;
+
+ giodev = to_gio_device(dev);
+ return sprintf(buf, "%x", giodev->id.id);
+}
+static DEVICE_ATTR_RO(id);
+
+static struct attribute *gio_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ &dev_attr_name.attr,
+ &dev_attr_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(gio_dev);
+
+static int gio_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct gio_device *gio_dev = to_gio_device(dev);
+
+ add_uevent_var(env, "MODALIAS=gio:%x", gio_dev->id.id);
+ return 0;
+}
+
+int gio_register_driver(struct gio_driver *drv)
+{
+ /* initialize common driver fields */
+ if (!drv->driver.name)
+ drv->driver.name = drv->name;
+ if (!drv->driver.owner)
+ drv->driver.owner = drv->owner;
+ drv->driver.bus = &gio_bus_type;
+
+ /* register with core */
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(gio_register_driver);
+
+void gio_unregister_driver(struct gio_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(gio_unregister_driver);
+
+void gio_set_master(struct gio_device *dev)
+{
+ u32 tmp = sgimc->giopar;
+
+ switch (dev->slotno) {
+ case 0:
+ tmp |= SGIMC_GIOPAR_MASTERGFX;
+ break;
+ case 1:
+ tmp |= SGIMC_GIOPAR_MASTEREXP0;
+ break;
+ case 2:
+ tmp |= SGIMC_GIOPAR_MASTEREXP1;
+ break;
+ }
+ sgimc->giopar = tmp;
+}
+EXPORT_SYMBOL_GPL(gio_set_master);
+
+void ip22_gio_set_64bit(int slotno)
+{
+ u32 tmp = sgimc->giopar;
+
+ switch (slotno) {
+ case 0:
+ tmp |= SGIMC_GIOPAR_GFX64;
+ break;
+ case 1:
+ tmp |= SGIMC_GIOPAR_EXP064;
+ break;
+ case 2:
+ tmp |= SGIMC_GIOPAR_EXP164;
+ break;
+ }
+ sgimc->giopar = tmp;
+}
+
+static int ip22_gio_id(unsigned long addr, u32 *res)
+{
+ u8 tmp8;
+ u8 tmp16;
+ u32 tmp32;
+ u8 *ptr8;
+ u16 *ptr16;
+ u32 *ptr32;
+
+ ptr32 = (void *)CKSEG1ADDR(addr);
+ if (!get_dbe(tmp32, ptr32)) {
+ /*
+ * We got no DBE, but this doesn't mean anything.
+ * If GIO is pipelined (which can't be disabled
+ * for GFX slot) we don't get a DBE, but we see
+ * the transfer size as data. So we do an 8bit
+ * and a 16bit access and check whether the common
+ * data matches
+ */
+ ptr8 = (void *)CKSEG1ADDR(addr + 3);
+ if (get_dbe(tmp8, ptr8)) {
+ /*
+ * 32bit access worked, but 8bit doesn't
+ * so we don't see phantom reads on
+ * a pipelined bus, but a real card which
+ * doesn't support 8 bit reads
+ */
+ *res = tmp32;
+ return 1;
+ }
+ ptr16 = (void *)CKSEG1ADDR(addr + 2);
+ get_dbe(tmp16, ptr16);
+ if (tmp8 == (tmp16 & 0xff) &&
+ tmp8 == (tmp32 & 0xff) &&
+ tmp16 == (tmp32 & 0xffff)) {
+ *res = tmp32;
+ return 1;
+ }
+ }
+ return 0; /* nothing here */
+}
+
+#define HQ2_MYSTERY_OFFS 0x6A07C
+#define NEWPORT_USTATUS_OFFS 0xF133C
+
+static int ip22_is_gr2(unsigned long addr)
+{
+ u32 tmp;
+ u32 *ptr;
+
+ /* HQ2 only allows 32bit accesses */
+ ptr = (void *)CKSEG1ADDR(addr + HQ2_MYSTERY_OFFS);
+ if (!get_dbe(tmp, ptr)) {
+ if (tmp == 0xdeadbeef)
+ return 1;
+ }
+ return 0;
+}
+
+
+static void ip22_check_gio(int slotno, unsigned long addr, int irq)
+{
+ const char *name = "Unknown";
+ struct gio_device *gio_dev;
+ u32 tmp;
+ __u8 id;
+ int i;
+
+ /* first look for GR2/GR3 by checking mystery register */
+ if (ip22_is_gr2(addr))
+ tmp = 0x7f;
+ else {
+ if (!ip22_gio_id(addr, &tmp)) {
+ /*
+ * no GIO signature at start address of slot
+ * since Newport doesn't have one, we check if
+ * user status register is readable
+ */
+ if (ip22_gio_id(addr + NEWPORT_USTATUS_OFFS, &tmp))
+ tmp = 0x7e;
+ else
+ tmp = 0;
+ }
+ }
+ if (tmp) {
+ id = GIO_ID(tmp);
+ if (tmp & GIO_32BIT_ID) {
+ if (tmp & GIO_64BIT_IFACE)
+ ip22_gio_set_64bit(slotno);
+ }
+ for (i = 0; i < ARRAY_SIZE(gio_name_table); i++) {
+ if (id == gio_name_table[i].id) {
+ name = gio_name_table[i].name;
+ break;
+ }
+ }
+ printk(KERN_INFO "GIO: slot %d : %s (id %x)\n",
+ slotno, name, id);
+ gio_dev = kzalloc(sizeof *gio_dev, GFP_KERNEL);
+ gio_dev->name = name;
+ gio_dev->slotno = slotno;
+ gio_dev->id.id = id;
+ gio_dev->resource.start = addr;
+ gio_dev->resource.end = addr + 0x3fffff;
+ gio_dev->resource.flags = IORESOURCE_MEM;
+ gio_dev->irq = irq;
+ dev_set_name(&gio_dev->dev, "%d", slotno);
+ gio_device_register(gio_dev);
+ } else
+ printk(KERN_INFO "GIO: slot %d : Empty\n", slotno);
+}
+
+static struct bus_type gio_bus_type = {
+ .name = "gio",
+ .dev_groups = gio_dev_groups,
+ .match = gio_bus_match,
+ .probe = gio_device_probe,
+ .remove = gio_device_remove,
+ .shutdown = gio_device_shutdown,
+ .uevent = gio_device_uevent,
+};
+
+static struct resource gio_bus_resource = {
+ .start = GIO_SLOT_GFX_BASE,
+ .end = GIO_SLOT_GFX_BASE + 0x9fffff,
+ .name = "GIO Bus",
+ .flags = IORESOURCE_MEM,
+};
+
+int __init ip22_gio_init(void)
+{
+ unsigned int pbdma __maybe_unused;
+ int ret;
+
+ ret = device_register(&gio_bus);
+ if (ret) {
+ put_device(&gio_bus);
+ return ret;
+ }
+
+ ret = bus_register(&gio_bus_type);
+ if (!ret) {
+ request_resource(&iomem_resource, &gio_bus_resource);
+ printk(KERN_INFO "GIO: Probing bus...\n");
+
+ if (ip22_is_fullhouse()) {
+ /* Indigo2 */
+ ip22_check_gio(0, GIO_SLOT_GFX_BASE, SGI_GIO_1_IRQ);
+ ip22_check_gio(1, GIO_SLOT_EXP0_BASE, SGI_GIO_1_IRQ);
+ } else {
+ /* Indy/Challenge S */
+ if (get_dbe(pbdma, (unsigned int *)&hpc3c1->pbdma[1]))
+ ip22_check_gio(0, GIO_SLOT_GFX_BASE,
+ SGI_GIO_0_IRQ);
+ ip22_check_gio(1, GIO_SLOT_EXP0_BASE, SGI_GIOEXP0_IRQ);
+ ip22_check_gio(2, GIO_SLOT_EXP1_BASE, SGI_GIOEXP1_IRQ);
+ }
+ } else
+ device_unregister(&gio_bus);
+
+ return ret;
+}
+
+subsys_initcall(ip22_gio_init);
diff --git a/arch/mips/sgi-ip22/ip22-hpc.c b/arch/mips/sgi-ip22/ip22-hpc.c
new file mode 100644
index 000000000..49922e86c
--- /dev/null
+++ b/arch/mips/sgi-ip22/ip22-hpc.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip22-hpc.c: Routines for generic manipulation of the HPC controllers.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1998 Ralf Baechle
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+#include <asm/sgi/hpc3.h>
+#include <asm/sgi/ioc.h>
+#include <asm/sgi/ip22.h>
+
+struct hpc3_regs *hpc3c0, *hpc3c1;
+
+EXPORT_SYMBOL(hpc3c0);
+EXPORT_SYMBOL(hpc3c1);
+
+struct sgioc_regs *sgioc;
+
+EXPORT_SYMBOL(sgioc);
+
+/* We need software copies of these because they are write only. */
+u8 sgi_ioc_reset, sgi_ioc_write;
+
+extern char *system_type;
+
+void __init sgihpc_init(void)
+{
+ /* ioremap can't fail */
+ hpc3c0 = (struct hpc3_regs *)
+ ioremap(HPC3_CHIP0_BASE, sizeof(struct hpc3_regs));
+ hpc3c1 = (struct hpc3_regs *)
+ ioremap(HPC3_CHIP1_BASE, sizeof(struct hpc3_regs));
+ /* IOC lives in PBUS PIO channel 6 */
+ sgioc = (struct sgioc_regs *)hpc3c0->pbus_extregs[6];
+
+ hpc3c0->pbus_piocfg[6][0] |= HPC3_PIOCFG_DS16;
+ if (ip22_is_fullhouse()) {
+ /* Full House comes with INT2 which lives in PBUS PIO
+ * channel 4 */
+ sgint = (struct sgint_regs *)hpc3c0->pbus_extregs[4];
+ system_type = "SGI Indigo2";
+ } else {
+ /* Guiness comes with INT3 which is part of IOC */
+ sgint = &sgioc->int3;
+ system_type = "SGI Indy";
+ }
+
+ sgi_ioc_reset = (SGIOC_RESET_PPORT | SGIOC_RESET_KBDMOUSE |
+ SGIOC_RESET_EISA | SGIOC_RESET_ISDN |
+ SGIOC_RESET_LC0OFF);
+
+ sgi_ioc_write = (SGIOC_WRITE_EASEL | SGIOC_WRITE_NTHRESH |
+ SGIOC_WRITE_TPSPEED | SGIOC_WRITE_EPSEL |
+ SGIOC_WRITE_U0AMODE | SGIOC_WRITE_U1AMODE);
+
+ sgioc->reset = sgi_ioc_reset;
+ sgioc->write = sgi_ioc_write;
+}
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
new file mode 100644
index 000000000..96798a4ab
--- /dev/null
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip22-int.c: Routines for generic manipulation of the INT[23] ASIC
+ * found on INDY and Indigo2 workstations.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1997, 1998 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999 Andrew R. Baker (andrewb@uab.edu)
+ * - Indigo2 changes
+ * - Interrupt handling fixes
+ * Copyright (C) 2001, 2003 Ladislav Michl (ladis@linux-mips.org)
+ */
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+#include <linux/ftrace.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/sgi/hpc3.h>
+#include <asm/sgi/ip22.h>
+
+/* So far nothing hangs here */
+#undef USE_LIO3_IRQ
+
+struct sgint_regs *sgint;
+
+static char lc0msk_to_irqnr[256];
+static char lc1msk_to_irqnr[256];
+static char lc2msk_to_irqnr[256];
+static char lc3msk_to_irqnr[256];
+
+extern int ip22_eisa_init(void);
+
+static void enable_local0_irq(struct irq_data *d)
+{
+ /* don't allow mappable interrupt to be enabled from setup_irq,
+ * we have our own way to do so */
+ if (d->irq != SGI_MAP_0_IRQ)
+ sgint->imask0 |= (1 << (d->irq - SGINT_LOCAL0));
+}
+
+static void disable_local0_irq(struct irq_data *d)
+{
+ sgint->imask0 &= ~(1 << (d->irq - SGINT_LOCAL0));
+}
+
+static struct irq_chip ip22_local0_irq_type = {
+ .name = "IP22 local 0",
+ .irq_mask = disable_local0_irq,
+ .irq_unmask = enable_local0_irq,
+};
+
+static void enable_local1_irq(struct irq_data *d)
+{
+ /* don't allow mappable interrupt to be enabled from setup_irq,
+ * we have our own way to do so */
+ if (d->irq != SGI_MAP_1_IRQ)
+ sgint->imask1 |= (1 << (d->irq - SGINT_LOCAL1));
+}
+
+static void disable_local1_irq(struct irq_data *d)
+{
+ sgint->imask1 &= ~(1 << (d->irq - SGINT_LOCAL1));
+}
+
+static struct irq_chip ip22_local1_irq_type = {
+ .name = "IP22 local 1",
+ .irq_mask = disable_local1_irq,
+ .irq_unmask = enable_local1_irq,
+};
+
+static void enable_local2_irq(struct irq_data *d)
+{
+ sgint->imask0 |= (1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0));
+ sgint->cmeimask0 |= (1 << (d->irq - SGINT_LOCAL2));
+}
+
+static void disable_local2_irq(struct irq_data *d)
+{
+ sgint->cmeimask0 &= ~(1 << (d->irq - SGINT_LOCAL2));
+ if (!sgint->cmeimask0)
+ sgint->imask0 &= ~(1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0));
+}
+
+static struct irq_chip ip22_local2_irq_type = {
+ .name = "IP22 local 2",
+ .irq_mask = disable_local2_irq,
+ .irq_unmask = enable_local2_irq,
+};
+
+static void enable_local3_irq(struct irq_data *d)
+{
+ sgint->imask1 |= (1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1));
+ sgint->cmeimask1 |= (1 << (d->irq - SGINT_LOCAL3));
+}
+
+static void disable_local3_irq(struct irq_data *d)
+{
+ sgint->cmeimask1 &= ~(1 << (d->irq - SGINT_LOCAL3));
+ if (!sgint->cmeimask1)
+ sgint->imask1 &= ~(1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1));
+}
+
+static struct irq_chip ip22_local3_irq_type = {
+ .name = "IP22 local 3",
+ .irq_mask = disable_local3_irq,
+ .irq_unmask = enable_local3_irq,
+};
+
+static void indy_local0_irqdispatch(void)
+{
+ u8 mask = sgint->istat0 & sgint->imask0;
+ u8 mask2;
+ int irq;
+
+ if (mask & SGINT_ISTAT0_LIO2) {
+ mask2 = sgint->vmeistat & sgint->cmeimask0;
+ irq = lc2msk_to_irqnr[mask2];
+ } else
+ irq = lc0msk_to_irqnr[mask];
+
+ /*
+ * workaround for INT2 bug; if irq == 0, INT2 has seen a fifo full
+ * irq, but failed to latch it into status register
+ */
+ if (irq)
+ do_IRQ(irq);
+ else
+ do_IRQ(SGINT_LOCAL0 + 0);
+}
+
+static void indy_local1_irqdispatch(void)
+{
+ u8 mask = sgint->istat1 & sgint->imask1;
+ u8 mask2;
+ int irq;
+
+ if (mask & SGINT_ISTAT1_LIO3) {
+ mask2 = sgint->vmeistat & sgint->cmeimask1;
+ irq = lc3msk_to_irqnr[mask2];
+ } else
+ irq = lc1msk_to_irqnr[mask];
+
+ /* if irq == 0, then the interrupt has already been cleared */
+ if (irq)
+ do_IRQ(irq);
+}
+
+extern void ip22_be_interrupt(int irq);
+
+static void __irq_entry indy_buserror_irq(void)
+{
+ int irq = SGI_BUSERR_IRQ;
+
+ irq_enter();
+ kstat_incr_irq_this_cpu(irq);
+ ip22_be_interrupt(irq);
+ irq_exit();
+}
+
+#ifdef USE_LIO3_IRQ
+#define SGI_INTERRUPTS SGINT_END
+#else
+#define SGI_INTERRUPTS SGINT_LOCAL3
+#endif
+
+extern void indy_8254timer_irq(void);
+
+/*
+ * IRQs on the INDY look basically (barring software IRQs which we don't use
+ * at all) like:
+ *
+ * MIPS IRQ Source
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 Local IRQ level zero
+ * 3 Local IRQ level one
+ * 4 8254 Timer zero
+ * 5 8254 Timer one
+ * 6 Bus Error
+ * 7 R4k timer (what we use)
+ *
+ * We handle the IRQ according to _our_ priority which is:
+ *
+ * Highest ---- R4k Timer
+ * Local IRQ zero
+ * Local IRQ one
+ * Bus Error
+ * 8254 Timer zero
+ * Lowest ---- 8254 Timer one
+ *
+ * then we just return, if multiple IRQs are pending then we will just take
+ * another exception, big deal.
+ */
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned int pending = read_c0_status() & read_c0_cause();
+
+ /*
+ * First we check for r4k counter/timer IRQ.
+ */
+ if (pending & CAUSEF_IP7)
+ do_IRQ(SGI_TIMER_IRQ);
+ else if (pending & CAUSEF_IP2)
+ indy_local0_irqdispatch();
+ else if (pending & CAUSEF_IP3)
+ indy_local1_irqdispatch();
+ else if (pending & CAUSEF_IP6)
+ indy_buserror_irq();
+ else if (pending & (CAUSEF_IP4 | CAUSEF_IP5))
+ indy_8254timer_irq();
+}
+
+void __init arch_init_irq(void)
+{
+ int i;
+
+ /* Init local mask --> irq tables. */
+ for (i = 0; i < 256; i++) {
+ if (i & 0x80) {
+ lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 7;
+ lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 7;
+ lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 7;
+ lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 7;
+ } else if (i & 0x40) {
+ lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 6;
+ lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 6;
+ lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 6;
+ lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 6;
+ } else if (i & 0x20) {
+ lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 5;
+ lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 5;
+ lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 5;
+ lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 5;
+ } else if (i & 0x10) {
+ lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 4;
+ lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 4;
+ lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 4;
+ lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 4;
+ } else if (i & 0x08) {
+ lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 3;
+ lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 3;
+ lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 3;
+ lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 3;
+ } else if (i & 0x04) {
+ lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 2;
+ lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 2;
+ lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 2;
+ lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 2;
+ } else if (i & 0x02) {
+ lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 1;
+ lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 1;
+ lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 1;
+ lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 1;
+ } else if (i & 0x01) {
+ lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 0;
+ lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 0;
+ lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 0;
+ lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 0;
+ } else {
+ lc0msk_to_irqnr[i] = 0;
+ lc1msk_to_irqnr[i] = 0;
+ lc2msk_to_irqnr[i] = 0;
+ lc3msk_to_irqnr[i] = 0;
+ }
+ }
+
+ /* Mask out all interrupts. */
+ sgint->imask0 = 0;
+ sgint->imask1 = 0;
+ sgint->cmeimask0 = 0;
+ sgint->cmeimask1 = 0;
+
+ /* init CPU irqs */
+ mips_cpu_irq_init();
+
+ for (i = SGINT_LOCAL0; i < SGI_INTERRUPTS; i++) {
+ struct irq_chip *handler;
+
+ if (i < SGINT_LOCAL1)
+ handler = &ip22_local0_irq_type;
+ else if (i < SGINT_LOCAL2)
+ handler = &ip22_local1_irq_type;
+ else if (i < SGINT_LOCAL3)
+ handler = &ip22_local2_irq_type;
+ else
+ handler = &ip22_local3_irq_type;
+
+ irq_set_chip_and_handler(i, handler, handle_level_irq);
+ }
+
+ /* vector handler. this register the IRQ as non-sharable */
+ if (request_irq(SGI_LOCAL_0_IRQ, no_action, IRQF_NO_THREAD,
+ "local0 cascade", NULL))
+ pr_err("Failed to register local0 cascade interrupt\n");
+ if (request_irq(SGI_LOCAL_1_IRQ, no_action, IRQF_NO_THREAD,
+ "local1 cascade", NULL))
+ pr_err("Failed to register local1 cascade interrupt\n");
+ if (request_irq(SGI_BUSERR_IRQ, no_action, IRQF_NO_THREAD,
+ "Bus Error", NULL))
+ pr_err("Failed to register Bus Error interrupt\n");
+
+ /* cascade in cascade. i love Indy ;-) */
+ if (request_irq(SGI_MAP_0_IRQ, no_action, IRQF_NO_THREAD,
+ "mapable0 cascade", NULL))
+ pr_err("Failed to register mapable0 cascade interrupt\n");
+#ifdef USE_LIO3_IRQ
+ if (request_irq(SGI_MAP_1_IRQ, no_action, IRQF_NO_THREAD,
+ "mapable1 cascade", NULL))
+ pr_err("Failed to register mapable1 cascade interrupt\n");
+#endif
+
+#ifdef CONFIG_EISA
+ if (ip22_is_fullhouse()) /* Only Indigo-2 has EISA stuff */
+ ip22_eisa_init();
+#endif
+}
diff --git a/arch/mips/sgi-ip22/ip22-mc.c b/arch/mips/sgi-ip22/ip22-mc.c
new file mode 100644
index 000000000..74e5b9e27
--- /dev/null
+++ b/arch/mips/sgi-ip22/ip22-mc.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip22-mc.c: Routines for manipulating SGI Memory Controller.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1999 Andrew R. Baker (andrewb@uab.edu) - Indigo2 changes
+ * Copyright (C) 2003 Ladislav Michl (ladis@linux-mips.org)
+ * Copyright (C) 2004 Peter Fuerst (pf@net.alphadv.de) - IP28
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/spinlock.h>
+
+#include <asm/io.h>
+#include <asm/bootinfo.h>
+#include <asm/sgialib.h>
+#include <asm/sgi/mc.h>
+#include <asm/sgi/hpc3.h>
+#include <asm/sgi/ip22.h>
+
+struct sgimc_regs *sgimc;
+
+EXPORT_SYMBOL(sgimc);
+
+static inline unsigned long get_bank_addr(unsigned int memconfig)
+{
+ return (memconfig & SGIMC_MCONFIG_BASEADDR) << ((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 24 : 22);
+}
+
+static inline unsigned long get_bank_size(unsigned int memconfig)
+{
+ return ((memconfig & SGIMC_MCONFIG_RMASK) + 0x0100) << ((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 16 : 14);
+}
+
+static inline unsigned int get_bank_config(int bank)
+{
+ unsigned int res = bank > 1 ? sgimc->mconfig1 : sgimc->mconfig0;
+ return bank % 2 ? res & 0xffff : res >> 16;
+}
+
+#if defined(CONFIG_SGI_IP28) || defined(CONFIG_32BIT)
+static void __init probe_memory(void)
+{
+ /* prom detects all usable memory */
+}
+#else
+/*
+ * Detect installed memory, which PROM misses
+ */
+static void __init probe_memory(void)
+{
+ unsigned long addr, size;
+ int i;
+
+ printk(KERN_INFO "MC: Probing memory configuration:\n");
+ for (i = 0; i < 4; i++) {
+ unsigned int tmp = get_bank_config(i);
+ if (!(tmp & SGIMC_MCONFIG_BVALID))
+ continue;
+
+ size = get_bank_size(tmp);
+ addr = get_bank_addr(tmp);
+ printk(KERN_INFO " bank%d: %3ldM @ %08lx\n",
+ i, size / 1024 / 1024, addr);
+
+ if (addr >= SGIMC_SEG1_BADDR)
+ memblock_add(addr, size);
+ }
+}
+#endif
+
+void __init sgimc_init(void)
+{
+ u32 tmp;
+
+ /* ioremap can't fail */
+ sgimc = (struct sgimc_regs *)
+ ioremap(SGIMC_BASE, sizeof(struct sgimc_regs));
+
+ printk(KERN_INFO "MC: SGI memory controller Revision %d\n",
+ (int) sgimc->systemid & SGIMC_SYSID_MASKREV);
+
+ /* Place the MC into a known state. This must be done before
+ * interrupts are first enabled etc.
+ */
+
+ /* Step 0: Make sure we turn off the watchdog in case it's
+ * still running (which might be the case after a
+ * soft reboot).
+ */
+ tmp = sgimc->cpuctrl0;
+ tmp &= ~SGIMC_CCTRL0_WDOG;
+ sgimc->cpuctrl0 = tmp;
+
+ /* Step 1: The CPU/GIO error status registers will not latch
+ * up a new error status until the register has been
+ * cleared by the cpu. These status registers are
+ * cleared by writing any value to them.
+ */
+ sgimc->cstat = sgimc->gstat = 0;
+
+ /* Step 2: Enable all parity checking in cpu control register
+ * zero.
+ */
+ /* don't touch parity settings for IP28 */
+ tmp = sgimc->cpuctrl0;
+#ifndef CONFIG_SGI_IP28
+ tmp |= SGIMC_CCTRL0_EPERRGIO | SGIMC_CCTRL0_EPERRMEM;
+#endif
+ tmp |= SGIMC_CCTRL0_R4KNOCHKPARR;
+ sgimc->cpuctrl0 = tmp;
+
+ /* Step 3: Setup the MC write buffer depth, this is controlled
+ * in cpu control register 1 in the lower 4 bits.
+ */
+ tmp = sgimc->cpuctrl1;
+ tmp &= ~0xf;
+ tmp |= 0xd;
+ sgimc->cpuctrl1 = tmp;
+
+ /* Step 4: Initialize the RPSS divider register to run as fast
+ * as it can correctly operate. The register is laid
+ * out as follows:
+ *
+ * ----------------------------------------
+ * | RESERVED | INCREMENT | DIVIDER |
+ * ----------------------------------------
+ * 31 16 15 8 7 0
+ *
+ * DIVIDER determines how often a 'tick' happens,
+ * INCREMENT determines by how the RPSS increment
+ * registers value increases at each 'tick'. Thus,
+ * for IP22 we get INCREMENT=1, DIVIDER=1 == 0x101
+ */
+ sgimc->divider = 0x101;
+
+ /* Step 5: Initialize GIO64 arbitrator configuration register.
+ *
+ * NOTE: HPC init code in sgihpc_init() must run before us because
+ * we need to know Guiness vs. FullHouse and the board
+ * revision on this machine. You have been warned.
+ */
+
+ /* First the basic invariants across all GIO64 implementations. */
+ tmp = sgimc->giopar & SGIMC_GIOPAR_GFX64; /* keep gfx 64bit settings */
+ tmp |= SGIMC_GIOPAR_HPC64; /* All 1st HPC's interface at 64bits */
+ tmp |= SGIMC_GIOPAR_ONEBUS; /* Only one physical GIO bus exists */
+
+ if (ip22_is_fullhouse()) {
+ /* Fullhouse specific settings. */
+ if (SGIOC_SYSID_BOARDREV(sgioc->sysid) < 2) {
+ tmp |= SGIMC_GIOPAR_HPC264; /* 2nd HPC at 64bits */
+ tmp |= SGIMC_GIOPAR_PLINEEXP0; /* exp0 pipelines */
+ tmp |= SGIMC_GIOPAR_MASTEREXP1; /* exp1 masters */
+ tmp |= SGIMC_GIOPAR_RTIMEEXP0; /* exp0 is realtime */
+ } else {
+ tmp |= SGIMC_GIOPAR_HPC264; /* 2nd HPC 64bits */
+ tmp |= SGIMC_GIOPAR_PLINEEXP0; /* exp[01] pipelined */
+ tmp |= SGIMC_GIOPAR_PLINEEXP1;
+ tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA masters */
+ }
+ } else {
+ /* Guiness specific settings. */
+ tmp |= SGIMC_GIOPAR_EISA64; /* MC talks to EISA at 64bits */
+ tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA bus can act as master */
+ }
+ sgimc->giopar = tmp; /* poof */
+
+ probe_memory();
+}
+
+#ifdef CONFIG_SGI_IP28
+void __init prom_cleanup(void)
+{
+ u32 mconfig1;
+ unsigned long flags;
+ spinlock_t lock;
+
+ /*
+ * because ARCS accesses memory uncached we wait until ARCS
+ * isn't needed any longer, before we switch from slow to
+ * normal mode
+ */
+ spin_lock_irqsave(&lock, flags);
+ mconfig1 = sgimc->mconfig1;
+ /* map ECC register */
+ sgimc->mconfig1 = (mconfig1 & 0xffff0000) | 0x2060;
+ iob();
+ /* switch to normal mode */
+ *(unsigned long *)PHYS_TO_XKSEG_UNCACHED(0x60000000) = 0;
+ iob();
+ /* reduce WR_COL */
+ sgimc->cmacc = (sgimc->cmacc & ~0xf) | 4;
+ iob();
+ /* restore old config */
+ sgimc->mconfig1 = mconfig1;
+ iob();
+ spin_unlock_irqrestore(&lock, flags);
+}
+#endif
diff --git a/arch/mips/sgi-ip22/ip22-nvram.c b/arch/mips/sgi-ip22/ip22-nvram.c
new file mode 100644
index 000000000..e727ef519
--- /dev/null
+++ b/arch/mips/sgi-ip22/ip22-nvram.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip22-nvram.c: NVRAM and serial EEPROM handling.
+ *
+ * Copyright (C) 2003 Ladislav Michl (ladis@linux-mips.org)
+ */
+#include <linux/export.h>
+
+#include <asm/sgi/hpc3.h>
+#include <asm/sgi/ip22.h>
+
+/* Control opcode for serial eeprom */
+#define EEPROM_READ 0xc000 /* serial memory read */
+#define EEPROM_WEN 0x9800 /* write enable before prog modes */
+#define EEPROM_WRITE 0xa000 /* serial memory write */
+#define EEPROM_WRALL 0x8800 /* write all registers */
+#define EEPROM_WDS 0x8000 /* disable all programming */
+#define EEPROM_PRREAD 0xc000 /* read protect register */
+#define EEPROM_PREN 0x9800 /* enable protect register mode */
+#define EEPROM_PRCLEAR 0xffff /* clear protect register */
+#define EEPROM_PRWRITE 0xa000 /* write protect register */
+#define EEPROM_PRDS 0x8000 /* disable protect register, forever */
+
+#define EEPROM_EPROT 0x01 /* Protect register enable */
+#define EEPROM_CSEL 0x02 /* Chip select */
+#define EEPROM_ECLK 0x04 /* EEPROM clock */
+#define EEPROM_DATO 0x08 /* Data out */
+#define EEPROM_DATI 0x10 /* Data in */
+
+/* We need to use these functions early... */
+#define delay() ({ \
+ int x; \
+ for (x=0; x<100000; x++) __asm__ __volatile__(""); })
+
+#define eeprom_cs_on(ptr) ({ \
+ __raw_writel(__raw_readl(ptr) & ~EEPROM_DATO, ptr); \
+ __raw_writel(__raw_readl(ptr) & ~EEPROM_ECLK, ptr); \
+ __raw_writel(__raw_readl(ptr) & ~EEPROM_EPROT, ptr); \
+ delay(); \
+ __raw_writel(__raw_readl(ptr) | EEPROM_CSEL, ptr); \
+ __raw_writel(__raw_readl(ptr) | EEPROM_ECLK, ptr); })
+
+
+#define eeprom_cs_off(ptr) ({ \
+ __raw_writel(__raw_readl(ptr) & ~EEPROM_ECLK, ptr); \
+ __raw_writel(__raw_readl(ptr) & ~EEPROM_CSEL, ptr); \
+ __raw_writel(__raw_readl(ptr) | EEPROM_EPROT, ptr); \
+ __raw_writel(__raw_readl(ptr) | EEPROM_ECLK, ptr); })
+
+#define BITS_IN_COMMAND 11
+/*
+ * clock in the nvram command and the register number. For the
+ * national semiconductor nv ram chip the op code is 3 bits and
+ * the address is 6/8 bits.
+ */
+static inline void eeprom_cmd(unsigned int *ctrl, unsigned cmd, unsigned reg)
+{
+ unsigned short ser_cmd;
+ int i;
+
+ ser_cmd = cmd | (reg << (16 - BITS_IN_COMMAND));
+ for (i = 0; i < BITS_IN_COMMAND; i++) {
+ if (ser_cmd & (1<<15)) /* if high order bit set */
+ __raw_writel(__raw_readl(ctrl) | EEPROM_DATO, ctrl);
+ else
+ __raw_writel(__raw_readl(ctrl) & ~EEPROM_DATO, ctrl);
+ __raw_writel(__raw_readl(ctrl) & ~EEPROM_ECLK, ctrl);
+ delay();
+ __raw_writel(__raw_readl(ctrl) | EEPROM_ECLK, ctrl);
+ delay();
+ ser_cmd <<= 1;
+ }
+ /* see data sheet timing diagram */
+ __raw_writel(__raw_readl(ctrl) & ~EEPROM_DATO, ctrl);
+}
+
+unsigned short ip22_eeprom_read(unsigned int *ctrl, int reg)
+{
+ unsigned short res = 0;
+ int i;
+
+ __raw_writel(__raw_readl(ctrl) & ~EEPROM_EPROT, ctrl);
+ eeprom_cs_on(ctrl);
+ eeprom_cmd(ctrl, EEPROM_READ, reg);
+
+ /* clock the data ouf of serial mem */
+ for (i = 0; i < 16; i++) {
+ __raw_writel(__raw_readl(ctrl) & ~EEPROM_ECLK, ctrl);
+ delay();
+ __raw_writel(__raw_readl(ctrl) | EEPROM_ECLK, ctrl);
+ delay();
+ res <<= 1;
+ if (__raw_readl(ctrl) & EEPROM_DATI)
+ res |= 1;
+ }
+
+ eeprom_cs_off(ctrl);
+
+ return res;
+}
+
+EXPORT_SYMBOL(ip22_eeprom_read);
+
+/*
+ * Read specified register from main NVRAM
+ */
+unsigned short ip22_nvram_read(int reg)
+{
+ if (ip22_is_fullhouse())
+ /* IP22 (Indigo2 aka FullHouse) stores env variables into
+ * 93CS56 Microwire Bus EEPROM 2048 Bit (128x16) */
+ return ip22_eeprom_read(&hpc3c0->eeprom, reg);
+ else {
+ unsigned short tmp;
+ /* IP24 (Indy aka Guiness) uses DS1386 8K version */
+ reg <<= 1;
+ tmp = hpc3c0->bbram[reg++] & 0xff;
+ return (tmp << 8) | (hpc3c0->bbram[reg] & 0xff);
+ }
+}
+
+EXPORT_SYMBOL(ip22_nvram_read);
diff --git a/arch/mips/sgi-ip22/ip22-platform.c b/arch/mips/sgi-ip22/ip22-platform.c
new file mode 100644
index 000000000..0b2002e02
--- /dev/null
+++ b/arch/mips/sgi-ip22/ip22-platform.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/paccess.h>
+#include <asm/sgi/ip22.h>
+#include <asm/sgi/hpc3.h>
+#include <asm/sgi/mc.h>
+#include <asm/sgi/seeq.h>
+#include <asm/sgi/wd.h>
+
+static struct resource sgiwd93_0_resources[] = {
+ {
+ .name = "eth0 irq",
+ .start = SGI_WD93_0_IRQ,
+ .end = SGI_WD93_0_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct sgiwd93_platform_data sgiwd93_0_pd = {
+ .unit = 0,
+ .irq = SGI_WD93_0_IRQ,
+};
+
+static u64 sgiwd93_0_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device sgiwd93_0_device = {
+ .name = "sgiwd93",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(sgiwd93_0_resources),
+ .resource = sgiwd93_0_resources,
+ .dev = {
+ .platform_data = &sgiwd93_0_pd,
+ .dma_mask = &sgiwd93_0_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+static struct resource sgiwd93_1_resources[] = {
+ {
+ .name = "eth0 irq",
+ .start = SGI_WD93_1_IRQ,
+ .end = SGI_WD93_1_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct sgiwd93_platform_data sgiwd93_1_pd = {
+ .unit = 1,
+ .irq = SGI_WD93_1_IRQ,
+};
+
+static u64 sgiwd93_1_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device sgiwd93_1_device = {
+ .name = "sgiwd93",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(sgiwd93_1_resources),
+ .resource = sgiwd93_1_resources,
+ .dev = {
+ .platform_data = &sgiwd93_1_pd,
+ .dma_mask = &sgiwd93_1_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+/*
+ * Create a platform device for the GPI port that receives the
+ * image data from the embedded camera.
+ */
+static int __init sgiwd93_devinit(void)
+{
+ int res;
+
+ sgiwd93_0_pd.hregs = &hpc3c0->scsi_chan0;
+ sgiwd93_0_pd.wdregs = (unsigned char *) hpc3c0->scsi0_ext;
+
+ res = platform_device_register(&sgiwd93_0_device);
+ if (res)
+ return res;
+
+ if (!ip22_is_fullhouse())
+ return 0;
+
+ sgiwd93_1_pd.hregs = &hpc3c0->scsi_chan1;
+ sgiwd93_1_pd.wdregs = (unsigned char *) hpc3c0->scsi1_ext;
+
+ return platform_device_register(&sgiwd93_1_device);
+}
+
+device_initcall(sgiwd93_devinit);
+
+static struct resource sgiseeq_0_resources[] = {
+ {
+ .name = "eth0 irq",
+ .start = SGI_ENET_IRQ,
+ .end = SGI_ENET_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct sgiseeq_platform_data eth0_pd;
+
+static u64 sgiseeq_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device eth0_device = {
+ .name = "sgiseeq",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(sgiseeq_0_resources),
+ .resource = sgiseeq_0_resources,
+ .dev = {
+ .platform_data = &eth0_pd,
+ .dma_mask = &sgiseeq_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+static struct resource sgiseeq_1_resources[] = {
+ {
+ .name = "eth1 irq",
+ .start = SGI_GIO_0_IRQ,
+ .end = SGI_GIO_0_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct sgiseeq_platform_data eth1_pd;
+
+static struct platform_device eth1_device = {
+ .name = "sgiseeq",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(sgiseeq_1_resources),
+ .resource = sgiseeq_1_resources,
+ .dev = {
+ .platform_data = &eth1_pd,
+ },
+};
+
+/*
+ * Create a platform device for the GPI port that receives the
+ * image data from the embedded camera.
+ */
+static int __init sgiseeq_devinit(void)
+{
+ unsigned int pbdma __maybe_unused;
+ int res, i;
+
+ eth0_pd.hpc = hpc3c0;
+ eth0_pd.irq = SGI_ENET_IRQ;
+#define EADDR_NVOFS 250
+ for (i = 0; i < 3; i++) {
+ unsigned short tmp = ip22_nvram_read(EADDR_NVOFS / 2 + i);
+
+ eth0_pd.mac[2 * i] = tmp >> 8;
+ eth0_pd.mac[2 * i + 1] = tmp & 0xff;
+ }
+
+ res = platform_device_register(&eth0_device);
+ if (res)
+ return res;
+
+ /* Second HPC is missing? */
+ if (ip22_is_fullhouse() ||
+ get_dbe(pbdma, (unsigned int *)&hpc3c1->pbdma[1]))
+ return 0;
+
+ sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 |
+ SGIMC_GIOPAR_HPC264;
+ hpc3c1->pbus_piocfg[0][0] = 0x3ffff;
+ /* interrupt/config register on Challenge S Mezz board */
+ hpc3c1->pbus_extregs[0][0] = 0x30;
+
+ eth1_pd.hpc = hpc3c1;
+ eth1_pd.irq = SGI_GIO_0_IRQ;
+#define EADDR_NVOFS 250
+ for (i = 0; i < 3; i++) {
+ unsigned short tmp = ip22_eeprom_read(&hpc3c1->eeprom,
+ EADDR_NVOFS / 2 + i);
+
+ eth1_pd.mac[2 * i] = tmp >> 8;
+ eth1_pd.mac[2 * i + 1] = tmp & 0xff;
+ }
+
+ return platform_device_register(&eth1_device);
+}
+
+device_initcall(sgiseeq_devinit);
+
+static int __init sgi_hal2_devinit(void)
+{
+ return IS_ERR(platform_device_register_simple("sgihal2", 0, NULL, 0));
+}
+
+device_initcall(sgi_hal2_devinit);
+
+static int __init sgi_button_devinit(void)
+{
+ if (ip22_is_fullhouse())
+ return 0; /* full house has no volume buttons */
+
+ return IS_ERR(platform_device_register_simple("sgibtns", -1, NULL, 0));
+}
+
+device_initcall(sgi_button_devinit);
+
+static int __init sgi_ds1286_devinit(void)
+{
+ struct resource res;
+
+ memset(&res, 0, sizeof(res));
+ res.start = HPC3_CHIP0_BASE + offsetof(struct hpc3_regs, rtcregs);
+ res.end = res.start + sizeof(hpc3c0->rtcregs) - 1;
+ res.flags = IORESOURCE_MEM;
+
+ return IS_ERR(platform_device_register_simple("rtc-ds1286", -1,
+ &res, 1));
+}
+
+device_initcall(sgi_ds1286_devinit);
diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c
new file mode 100644
index 000000000..c374f3cee
--- /dev/null
+++ b/arch/mips/sgi-ip22/ip22-reset.c
@@ -0,0 +1,203 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1997, 1998, 2001, 03, 05, 06 by Ralf Baechle
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/rtc/ds1286.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+#include <linux/notifier.h>
+#include <linux/pm.h>
+#include <linux/timer.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/reboot.h>
+#include <asm/sgialib.h>
+#include <asm/sgi/ioc.h>
+#include <asm/sgi/hpc3.h>
+#include <asm/sgi/mc.h>
+#include <asm/sgi/ip22.h>
+
+/*
+ * Just powerdown if init hasn't done after POWERDOWN_TIMEOUT seconds.
+ * I'm not sure if this feature is a good idea, for now it's here just to
+ * make the power button make behave just like under IRIX.
+ */
+#define POWERDOWN_TIMEOUT 120
+
+/*
+ * Blink frequency during reboot grace period and when panicked.
+ */
+#define POWERDOWN_FREQ (HZ / 4)
+#define PANIC_FREQ (HZ / 8)
+
+static struct timer_list power_timer, blink_timer, debounce_timer;
+static unsigned long blink_timer_timeout;
+
+#define MACHINE_PANICED 1
+#define MACHINE_SHUTTING_DOWN 2
+
+static int machine_state;
+
+static void __noreturn sgi_machine_power_off(void)
+{
+ unsigned int tmp;
+
+ local_irq_disable();
+
+ /* Disable watchdog */
+ tmp = hpc3c0->rtcregs[RTC_CMD] & 0xff;
+ hpc3c0->rtcregs[RTC_CMD] = tmp | RTC_WAM;
+ hpc3c0->rtcregs[RTC_WSEC] = 0;
+ hpc3c0->rtcregs[RTC_WHSEC] = 0;
+
+ while (1) {
+ sgioc->panel = ~SGIOC_PANEL_POWERON;
+ /* Good bye cruel world ... */
+
+ /* If we're still running, we probably got sent an alarm
+ interrupt. Read the flag to clear it. */
+ tmp = hpc3c0->rtcregs[RTC_HOURS_ALARM];
+ }
+}
+
+static void __noreturn sgi_machine_restart(char *command)
+{
+ if (machine_state & MACHINE_SHUTTING_DOWN)
+ sgi_machine_power_off();
+ sgimc->cpuctrl0 |= SGIMC_CCTRL0_SYSINIT;
+ while (1);
+}
+
+static void __noreturn sgi_machine_halt(void)
+{
+ if (machine_state & MACHINE_SHUTTING_DOWN)
+ sgi_machine_power_off();
+ ArcEnterInteractiveMode();
+}
+
+static void power_timeout(struct timer_list *unused)
+{
+ sgi_machine_power_off();
+}
+
+static void blink_timeout(struct timer_list *unused)
+{
+ /* XXX fix this for fullhouse */
+ sgi_ioc_reset ^= (SGIOC_RESET_LC0OFF|SGIOC_RESET_LC1OFF);
+ sgioc->reset = sgi_ioc_reset;
+
+ mod_timer(&blink_timer, jiffies + blink_timer_timeout);
+}
+
+static void debounce(struct timer_list *unused)
+{
+ del_timer(&debounce_timer);
+ if (sgint->istat1 & SGINT_ISTAT1_PWR) {
+ /* Interrupt still being sent. */
+ debounce_timer.expires = jiffies + (HZ / 20); /* 0.05s */
+ add_timer(&debounce_timer);
+
+ sgioc->panel = SGIOC_PANEL_POWERON | SGIOC_PANEL_POWERINTR |
+ SGIOC_PANEL_VOLDNINTR | SGIOC_PANEL_VOLDNHOLD |
+ SGIOC_PANEL_VOLUPINTR | SGIOC_PANEL_VOLUPHOLD;
+
+ return;
+ }
+
+ if (machine_state & MACHINE_PANICED)
+ sgimc->cpuctrl0 |= SGIMC_CCTRL0_SYSINIT;
+
+ enable_irq(SGI_PANEL_IRQ);
+}
+
+static inline void power_button(void)
+{
+ if (machine_state & MACHINE_PANICED)
+ return;
+
+ if ((machine_state & MACHINE_SHUTTING_DOWN) ||
+ kill_cad_pid(SIGINT, 1)) {
+ /* No init process or button pressed twice. */
+ sgi_machine_power_off();
+ }
+
+ machine_state |= MACHINE_SHUTTING_DOWN;
+ blink_timer_timeout = POWERDOWN_FREQ;
+ blink_timeout(&blink_timer);
+
+ timer_setup(&power_timer, power_timeout, 0);
+ power_timer.expires = jiffies + POWERDOWN_TIMEOUT * HZ;
+ add_timer(&power_timer);
+}
+
+static irqreturn_t panel_int(int irq, void *dev_id)
+{
+ unsigned int buttons;
+
+ buttons = sgioc->panel;
+ sgioc->panel = SGIOC_PANEL_POWERON | SGIOC_PANEL_POWERINTR;
+
+ if (sgint->istat1 & SGINT_ISTAT1_PWR) {
+ /* Wait until interrupt goes away */
+ disable_irq_nosync(SGI_PANEL_IRQ);
+ timer_setup(&debounce_timer, debounce, 0);
+ debounce_timer.expires = jiffies + 5;
+ add_timer(&debounce_timer);
+ }
+
+ /* Power button was pressed
+ * ioc.ps page 22: "The Panel Register is called Power Control by Full
+ * House. Only lowest 2 bits are used. Guiness uses upper four bits
+ * for volume control". This is not true, all bits are pulled high
+ * on fullhouse */
+ if (!(buttons & SGIOC_PANEL_POWERINTR))
+ power_button();
+
+ return IRQ_HANDLED;
+}
+
+static int panic_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ if (machine_state & MACHINE_PANICED)
+ return NOTIFY_DONE;
+ machine_state |= MACHINE_PANICED;
+
+ blink_timer_timeout = PANIC_FREQ;
+ blink_timeout(&blink_timer);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_block = {
+ .notifier_call = panic_event,
+};
+
+static int __init reboot_setup(void)
+{
+ int res;
+
+ _machine_restart = sgi_machine_restart;
+ _machine_halt = sgi_machine_halt;
+ pm_power_off = sgi_machine_power_off;
+
+ res = request_irq(SGI_PANEL_IRQ, panel_int, 0, "Front Panel", NULL);
+ if (res) {
+ printk(KERN_ERR "Allocation of front panel IRQ failed\n");
+ return res;
+ }
+
+ timer_setup(&blink_timer, blink_timeout, 0);
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+
+ return 0;
+}
+
+subsys_initcall(reboot_setup);
diff --git a/arch/mips/sgi-ip22/ip22-setup.c b/arch/mips/sgi-ip22/ip22-setup.c
new file mode 100644
index 000000000..b69daa024
--- /dev/null
+++ b/arch/mips/sgi-ip22/ip22-setup.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip22-setup.c: SGI specific setup, including init of the feature struct.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1997, 1998 Ralf Baechle (ralf@gnu.org)
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kdev_t.h>
+#include <linux/types.h>
+#include <linux/console.h>
+#include <linux/sched.h>
+#include <linux/tty.h>
+
+#include <asm/addrspace.h>
+#include <asm/bcache.h>
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+#include <asm/reboot.h>
+#include <asm/time.h>
+#include <asm/io.h>
+#include <asm/traps.h>
+#include <asm/sgialib.h>
+#include <asm/sgi/mc.h>
+#include <asm/sgi/hpc3.h>
+#include <asm/sgi/ip22.h>
+
+extern void ip22_be_init(void) __init;
+
+void __init plat_mem_setup(void)
+{
+ char *ctype;
+ char *cserial;
+
+ board_be_init = ip22_be_init;
+
+ /* Init the INDY HPC I/O controller. Need to call this before
+ * fucking with the memory controller because it needs to know the
+ * boardID and whether this is a Guiness or a FullHouse machine.
+ */
+ sgihpc_init();
+
+ /* Init INDY memory controller. */
+ sgimc_init();
+
+#ifdef CONFIG_BOARD_SCACHE
+ /* Now enable boardcaches, if any. */
+ indy_sc_init();
+#endif
+
+ /* Set EISA IO port base for Indigo2
+ * ioremap cannot fail */
+ set_io_port_base((unsigned long)ioremap(0x00080000,
+ 0x1fffffff - 0x00080000));
+ /* ARCS console environment variable is set to "g?" for
+ * graphics console, it is set to "d" for the first serial
+ * line and "d2" for the second serial line.
+ *
+ * Need to check if the case is 'g' but no keyboard:
+ * (ConsoleIn/Out = serial)
+ */
+ ctype = ArcGetEnvironmentVariable("console");
+ cserial = ArcGetEnvironmentVariable("ConsoleOut");
+
+ if ((ctype && *ctype == 'd') || (cserial && *cserial == 's')) {
+ static char options[8] __initdata;
+ char *baud = ArcGetEnvironmentVariable("dbaud");
+ if (baud)
+ strcpy(options, baud);
+ add_preferred_console("ttyS", *(ctype + 1) == '2' ? 1 : 0,
+ baud ? options : NULL);
+ } else if (!ctype || *ctype != 'g') {
+ /* Use ARC if we don't want serial ('d') or graphics ('g'). */
+ prom_flags |= PROM_FLAG_USE_AS_CONSOLE;
+ add_preferred_console("arc", 0, NULL);
+ }
+}
diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c
new file mode 100644
index 000000000..045aa89f2
--- /dev/null
+++ b/arch/mips/sgi-ip22/ip22-time.c
@@ -0,0 +1,131 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Time operations for IP22 machines. Original code may come from
+ * Ralf Baechle or David S. Miller (sorry guys, i'm really not sure)
+ *
+ * Copyright (C) 2001 by Ladislav Michl
+ * Copyright (C) 2003, 06 Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/bcd.h>
+#include <linux/i8253.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/time.h>
+#include <linux/ftrace.h>
+
+#include <asm/cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/time.h>
+#include <asm/sgialib.h>
+#include <asm/sgi/ioc.h>
+#include <asm/sgi/hpc3.h>
+#include <asm/sgi/ip22.h>
+
+static unsigned long dosample(void)
+{
+ u32 ct0, ct1;
+ u8 msb;
+
+ /* Start the counter. */
+ sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL |
+ SGINT_TCWORD_MRGEN);
+ sgint->tcnt2 = SGINT_TCSAMP_COUNTER & 0xff;
+ sgint->tcnt2 = SGINT_TCSAMP_COUNTER >> 8;
+
+ /* Get initial counter invariant */
+ ct0 = read_c0_count();
+
+ /* Latch and spin until top byte of counter2 is zero */
+ do {
+ writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT, &sgint->tcword);
+ (void) readb(&sgint->tcnt2);
+ msb = readb(&sgint->tcnt2);
+ ct1 = read_c0_count();
+ } while (msb);
+
+ /* Stop the counter. */
+ writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | SGINT_TCWORD_MSWST,
+ &sgint->tcword);
+ /*
+ * Return the difference, this is how far the r4k counter increments
+ * for every 1/HZ seconds. We round off the nearest 1 MHz of master
+ * clock (= 1000000 / HZ / 2).
+ */
+
+ return (ct1 - ct0) / (500000/HZ) * (500000/HZ);
+}
+
+/*
+ * Here we need to calibrate the cycle counter to at least be close.
+ */
+__init void plat_time_init(void)
+{
+ unsigned long r4k_ticks[3];
+ unsigned long r4k_tick;
+
+ /*
+ * Figure out the r4k offset, the algorithm is very simple and works in
+ * _all_ cases as long as the 8254 counter register itself works ok (as
+ * an interrupt driving timer it does not because of bug, this is why
+ * we are using the onchip r4k counter/compare register to serve this
+ * purpose, but for r4k_offset calculation it will work ok for us).
+ * There are other very complicated ways of performing this calculation
+ * but this one works just fine so I am not going to futz around. ;-)
+ */
+ printk(KERN_INFO "Calibrating system timer... ");
+ dosample(); /* Prime cache. */
+ dosample(); /* Prime cache. */
+ /* Zero is NOT an option. */
+ do {
+ r4k_ticks[0] = dosample();
+ } while (!r4k_ticks[0]);
+ do {
+ r4k_ticks[1] = dosample();
+ } while (!r4k_ticks[1]);
+
+ if (r4k_ticks[0] != r4k_ticks[1]) {
+ printk("warning: timer counts differ, retrying... ");
+ r4k_ticks[2] = dosample();
+ if (r4k_ticks[2] == r4k_ticks[0]
+ || r4k_ticks[2] == r4k_ticks[1])
+ r4k_tick = r4k_ticks[2];
+ else {
+ printk("disagreement, using average... ");
+ r4k_tick = (r4k_ticks[0] + r4k_ticks[1]
+ + r4k_ticks[2]) / 3;
+ }
+ } else
+ r4k_tick = r4k_ticks[0];
+
+ printk("%d [%d.%04d MHz CPU]\n", (int) r4k_tick,
+ (int) (r4k_tick / (500000 / HZ)),
+ (int) (r4k_tick % (500000 / HZ)));
+
+ mips_hpt_frequency = r4k_tick * HZ;
+
+ if (ip22_is_fullhouse())
+ setup_pit_timer();
+}
+
+/* Generic SGI handler for (spurious) 8254 interrupts */
+void __irq_entry indy_8254timer_irq(void)
+{
+ int irq = SGI_8254_0_IRQ;
+ ULONG cnt;
+ char c;
+
+ irq_enter();
+ kstat_incr_irq_this_cpu(irq);
+ printk(KERN_ALERT "Oops, got 8254 interrupt.\n");
+ ArcRead(0, &c, 1, &cnt);
+ ArcEnterInteractiveMode();
+ irq_exit();
+}
diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c
new file mode 100644
index 000000000..c61362d9e
--- /dev/null
+++ b/arch/mips/sgi-ip22/ip28-berr.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip28-berr.c: Bus error handling.
+ *
+ * Copyright (C) 2002, 2003 Ladislav Michl (ladis@linux-mips.org)
+ * Copyright (C) 2005 Peter Fuerst (pf@net.alphadv.de) - IP28
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
+#include <linux/seq_file.h>
+
+#include <asm/addrspace.h>
+#include <asm/traps.h>
+#include <asm/branch.h>
+#include <asm/irq_regs.h>
+#include <asm/sgi/mc.h>
+#include <asm/sgi/hpc3.h>
+#include <asm/sgi/ioc.h>
+#include <asm/sgi/ip22.h>
+#include <asm/r4kcache.h>
+#include <linux/uaccess.h>
+#include <asm/bootinfo.h>
+
+static unsigned int count_be_is_fixup;
+static unsigned int count_be_handler;
+static unsigned int count_be_interrupt;
+static int debug_be_interrupt;
+
+static unsigned int cpu_err_stat; /* Status reg for CPU */
+static unsigned int gio_err_stat; /* Status reg for GIO */
+static unsigned int cpu_err_addr; /* Error address reg for CPU */
+static unsigned int gio_err_addr; /* Error address reg for GIO */
+static unsigned int extio_stat;
+static unsigned int hpc3_berr_stat; /* Bus error interrupt status */
+
+struct hpc3_stat {
+ unsigned long addr;
+ unsigned int ctrl;
+ unsigned int cbp;
+ unsigned int ndptr;
+};
+
+static struct {
+ struct hpc3_stat pbdma[8];
+ struct hpc3_stat scsi[2];
+ struct hpc3_stat ethrx, ethtx;
+} hpc3;
+
+static struct {
+ unsigned long err_addr;
+ struct {
+ u32 lo;
+ u32 hi;
+ } tags[1][2], tagd[4][2], tagi[4][2]; /* Way 0/1 */
+} cache_tags;
+
+static inline void save_cache_tags(unsigned busaddr)
+{
+ unsigned long addr = CAC_BASE | busaddr;
+ int i;
+ cache_tags.err_addr = addr;
+
+ /*
+ * Starting with a bus-address, save secondary cache (indexed by
+ * PA[23..18:7..6]) tags first.
+ */
+ addr &= ~1L;
+#define tag cache_tags.tags[0]
+ cache_op(Index_Load_Tag_S, addr);
+ tag[0].lo = read_c0_taglo(); /* PA[35:18], VA[13:12] */
+ tag[0].hi = read_c0_taghi(); /* PA[39:36] */
+ cache_op(Index_Load_Tag_S, addr | 1L);
+ tag[1].lo = read_c0_taglo(); /* PA[35:18], VA[13:12] */
+ tag[1].hi = read_c0_taghi(); /* PA[39:36] */
+#undef tag
+
+ /*
+ * Save all primary data cache (indexed by VA[13:5]) tags which
+ * might fit to this bus-address, knowing that VA[11:0] == PA[11:0].
+ * Saving all tags and evaluating them later is easier and safer
+ * than relying on VA[13:12] from the secondary cache tags to pick
+ * matching primary tags here already.
+ */
+ addr &= (0xffL << 56) | ((1 << 12) - 1);
+#define tag cache_tags.tagd[i]
+ for (i = 0; i < 4; ++i, addr += (1 << 12)) {
+ cache_op(Index_Load_Tag_D, addr);
+ tag[0].lo = read_c0_taglo(); /* PA[35:12] */
+ tag[0].hi = read_c0_taghi(); /* PA[39:36] */
+ cache_op(Index_Load_Tag_D, addr | 1L);
+ tag[1].lo = read_c0_taglo(); /* PA[35:12] */
+ tag[1].hi = read_c0_taghi(); /* PA[39:36] */
+ }
+#undef tag
+
+ /*
+ * Save primary instruction cache (indexed by VA[13:6]) tags
+ * the same way.
+ */
+ addr &= (0xffL << 56) | ((1 << 12) - 1);
+#define tag cache_tags.tagi[i]
+ for (i = 0; i < 4; ++i, addr += (1 << 12)) {
+ cache_op(Index_Load_Tag_I, addr);
+ tag[0].lo = read_c0_taglo(); /* PA[35:12] */
+ tag[0].hi = read_c0_taghi(); /* PA[39:36] */
+ cache_op(Index_Load_Tag_I, addr | 1L);
+ tag[1].lo = read_c0_taglo(); /* PA[35:12] */
+ tag[1].hi = read_c0_taghi(); /* PA[39:36] */
+ }
+#undef tag
+}
+
+#define GIO_ERRMASK 0xff00
+#define CPU_ERRMASK 0x3f00
+
+static void save_and_clear_buserr(void)
+{
+ int i;
+
+ /* save status registers */
+ cpu_err_addr = sgimc->cerr;
+ cpu_err_stat = sgimc->cstat;
+ gio_err_addr = sgimc->gerr;
+ gio_err_stat = sgimc->gstat;
+ extio_stat = sgioc->extio;
+ hpc3_berr_stat = hpc3c0->bestat;
+
+ hpc3.scsi[0].addr = (unsigned long)&hpc3c0->scsi_chan0;
+ hpc3.scsi[0].ctrl = hpc3c0->scsi_chan0.ctrl; /* HPC3_SCTRL_ACTIVE ? */
+ hpc3.scsi[0].cbp = hpc3c0->scsi_chan0.cbptr;
+ hpc3.scsi[0].ndptr = hpc3c0->scsi_chan0.ndptr;
+
+ hpc3.scsi[1].addr = (unsigned long)&hpc3c0->scsi_chan1;
+ hpc3.scsi[1].ctrl = hpc3c0->scsi_chan1.ctrl; /* HPC3_SCTRL_ACTIVE ? */
+ hpc3.scsi[1].cbp = hpc3c0->scsi_chan1.cbptr;
+ hpc3.scsi[1].ndptr = hpc3c0->scsi_chan1.ndptr;
+
+ hpc3.ethrx.addr = (unsigned long)&hpc3c0->ethregs.rx_cbptr;
+ hpc3.ethrx.ctrl = hpc3c0->ethregs.rx_ctrl; /* HPC3_ERXCTRL_ACTIVE ? */
+ hpc3.ethrx.cbp = hpc3c0->ethregs.rx_cbptr;
+ hpc3.ethrx.ndptr = hpc3c0->ethregs.rx_ndptr;
+
+ hpc3.ethtx.addr = (unsigned long)&hpc3c0->ethregs.tx_cbptr;
+ hpc3.ethtx.ctrl = hpc3c0->ethregs.tx_ctrl; /* HPC3_ETXCTRL_ACTIVE ? */
+ hpc3.ethtx.cbp = hpc3c0->ethregs.tx_cbptr;
+ hpc3.ethtx.ndptr = hpc3c0->ethregs.tx_ndptr;
+
+ for (i = 0; i < 8; ++i) {
+ /* HPC3_PDMACTRL_ISACT ? */
+ hpc3.pbdma[i].addr = (unsigned long)&hpc3c0->pbdma[i];
+ hpc3.pbdma[i].ctrl = hpc3c0->pbdma[i].pbdma_ctrl;
+ hpc3.pbdma[i].cbp = hpc3c0->pbdma[i].pbdma_bptr;
+ hpc3.pbdma[i].ndptr = hpc3c0->pbdma[i].pbdma_dptr;
+ }
+ i = 0;
+ if (gio_err_stat & CPU_ERRMASK)
+ i = gio_err_addr;
+ if (cpu_err_stat & CPU_ERRMASK)
+ i = cpu_err_addr;
+ save_cache_tags(i);
+
+ sgimc->cstat = sgimc->gstat = 0;
+}
+
+static void print_cache_tags(void)
+{
+ u32 scb, scw;
+ int i;
+
+ printk(KERN_ERR "Cache tags @ %08x:\n", (unsigned)cache_tags.err_addr);
+
+ /* PA[31:12] shifted to PTag0 (PA[35:12]) format */
+ scw = (cache_tags.err_addr >> 4) & 0x0fffff00;
+
+ scb = cache_tags.err_addr & ((1 << 12) - 1) & ~((1 << 5) - 1);
+ for (i = 0; i < 4; ++i) { /* for each possible VA[13:12] value */
+ if ((cache_tags.tagd[i][0].lo & 0x0fffff00) != scw &&
+ (cache_tags.tagd[i][1].lo & 0x0fffff00) != scw)
+ continue;
+ printk(KERN_ERR
+ "D: 0: %08x %08x, 1: %08x %08x (VA[13:5] %04x)\n",
+ cache_tags.tagd[i][0].hi, cache_tags.tagd[i][0].lo,
+ cache_tags.tagd[i][1].hi, cache_tags.tagd[i][1].lo,
+ scb | (1 << 12)*i);
+ }
+ scb = cache_tags.err_addr & ((1 << 12) - 1) & ~((1 << 6) - 1);
+ for (i = 0; i < 4; ++i) { /* for each possible VA[13:12] value */
+ if ((cache_tags.tagi[i][0].lo & 0x0fffff00) != scw &&
+ (cache_tags.tagi[i][1].lo & 0x0fffff00) != scw)
+ continue;
+ printk(KERN_ERR
+ "I: 0: %08x %08x, 1: %08x %08x (VA[13:6] %04x)\n",
+ cache_tags.tagi[i][0].hi, cache_tags.tagi[i][0].lo,
+ cache_tags.tagi[i][1].hi, cache_tags.tagi[i][1].lo,
+ scb | (1 << 12)*i);
+ }
+ i = read_c0_config();
+ scb = i & (1 << 13) ? 7:6; /* scblksize = 2^[7..6] */
+ scw = ((i >> 16) & 7) + 19 - 1; /* scwaysize = 2^[24..19] / 2 */
+
+ i = ((1 << scw) - 1) & ~((1 << scb) - 1);
+ printk(KERN_ERR "S: 0: %08x %08x, 1: %08x %08x (PA[%u:%u] %05x)\n",
+ cache_tags.tags[0][0].hi, cache_tags.tags[0][0].lo,
+ cache_tags.tags[0][1].hi, cache_tags.tags[0][1].lo,
+ scw-1, scb, i & (unsigned)cache_tags.err_addr);
+}
+
+static inline const char *cause_excode_text(int cause)
+{
+ static const char *txt[32] =
+ { "Interrupt",
+ "TLB modification",
+ "TLB (load or instruction fetch)",
+ "TLB (store)",
+ "Address error (load or instruction fetch)",
+ "Address error (store)",
+ "Bus error (instruction fetch)",
+ "Bus error (data: load or store)",
+ "Syscall",
+ "Breakpoint",
+ "Reserved instruction",
+ "Coprocessor unusable",
+ "Arithmetic Overflow",
+ "Trap",
+ "14",
+ "Floating-Point",
+ "16", "17", "18", "19", "20", "21", "22",
+ "Watch Hi/Lo",
+ "24", "25", "26", "27", "28", "29", "30", "31",
+ };
+ return txt[(cause & 0x7c) >> 2];
+}
+
+static void print_buserr(const struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+ int error = 0;
+
+ if (extio_stat & EXTIO_MC_BUSERR) {
+ printk(KERN_ERR "MC Bus Error\n");
+ error |= 1;
+ }
+ if (extio_stat & EXTIO_HPC3_BUSERR) {
+ printk(KERN_ERR "HPC3 Bus Error 0x%x:<id=0x%x,%s,lane=0x%x>\n",
+ hpc3_berr_stat,
+ (hpc3_berr_stat & HPC3_BESTAT_PIDMASK) >>
+ HPC3_BESTAT_PIDSHIFT,
+ (hpc3_berr_stat & HPC3_BESTAT_CTYPE) ? "PIO" : "DMA",
+ hpc3_berr_stat & HPC3_BESTAT_BLMASK);
+ error |= 2;
+ }
+ if (extio_stat & EXTIO_EISA_BUSERR) {
+ printk(KERN_ERR "EISA Bus Error\n");
+ error |= 4;
+ }
+ if (cpu_err_stat & CPU_ERRMASK) {
+ printk(KERN_ERR "CPU error 0x%x<%s%s%s%s%s%s> @ 0x%08x\n",
+ cpu_err_stat,
+ cpu_err_stat & SGIMC_CSTAT_RD ? "RD " : "",
+ cpu_err_stat & SGIMC_CSTAT_PAR ? "PAR " : "",
+ cpu_err_stat & SGIMC_CSTAT_ADDR ? "ADDR " : "",
+ cpu_err_stat & SGIMC_CSTAT_SYSAD_PAR ? "SYSAD " : "",
+ cpu_err_stat & SGIMC_CSTAT_SYSCMD_PAR ? "SYSCMD " : "",
+ cpu_err_stat & SGIMC_CSTAT_BAD_DATA ? "BAD_DATA " : "",
+ cpu_err_addr);
+ error |= 8;
+ }
+ if (gio_err_stat & GIO_ERRMASK) {
+ printk(KERN_ERR "GIO error 0x%x:<%s%s%s%s%s%s%s%s> @ 0x%08x\n",
+ gio_err_stat,
+ gio_err_stat & SGIMC_GSTAT_RD ? "RD " : "",
+ gio_err_stat & SGIMC_GSTAT_WR ? "WR " : "",
+ gio_err_stat & SGIMC_GSTAT_TIME ? "TIME " : "",
+ gio_err_stat & SGIMC_GSTAT_PROM ? "PROM " : "",
+ gio_err_stat & SGIMC_GSTAT_ADDR ? "ADDR " : "",
+ gio_err_stat & SGIMC_GSTAT_BC ? "BC " : "",
+ gio_err_stat & SGIMC_GSTAT_PIO_RD ? "PIO_RD " : "",
+ gio_err_stat & SGIMC_GSTAT_PIO_WR ? "PIO_WR " : "",
+ gio_err_addr);
+ error |= 16;
+ }
+ if (!error)
+ printk(KERN_ERR "MC: Hmm, didn't find any error condition.\n");
+ else {
+ printk(KERN_ERR "CP0: config %08x, "
+ "MC: cpuctrl0/1: %08x/%05x, giopar: %04x\n"
+ "MC: cpu/gio_memacc: %08x/%05x, memcfg0/1: %08x/%08x\n",
+ read_c0_config(),
+ sgimc->cpuctrl0, sgimc->cpuctrl0, sgimc->giopar,
+ sgimc->cmacc, sgimc->gmacc,
+ sgimc->mconfig0, sgimc->mconfig1);
+ print_cache_tags();
+ }
+ printk(KERN_ALERT "%s, epc == %0*lx, ra == %0*lx\n",
+ cause_excode_text(regs->cp0_cause),
+ field, regs->cp0_epc, field, regs->regs[31]);
+}
+
+static int check_microtlb(u32 hi, u32 lo, unsigned long vaddr)
+{
+ /* This is likely rather similar to correct code ;-) */
+
+ vaddr &= 0x7fffffff; /* Doc. states that top bit is ignored */
+
+ /* If tlb-entry is valid and VPN-high (bits [30:21] ?) matches... */
+ if ((lo & 2) && (vaddr >> 21) == ((hi<<1) >> 22)) {
+ u32 ctl = sgimc->dma_ctrl;
+ if (ctl & 1) {
+ unsigned int pgsz = (ctl & 2) ? 14:12; /* 16k:4k */
+ /* PTEIndex is VPN-low (bits [22:14]/[20:12] ?) */
+ unsigned long pte = (lo >> 6) << 12; /* PTEBase */
+ pte += 8*((vaddr >> pgsz) & 0x1ff);
+ if (page_is_ram(PFN_DOWN(pte))) {
+ /*
+ * Note: Since DMA hardware does look up
+ * translation on its own, this PTE *must*
+ * match the TLB/EntryLo-register format !
+ */
+ unsigned long a = *(unsigned long *)
+ PHYS_TO_XKSEG_UNCACHED(pte);
+ a = (a & 0x3f) << 6; /* PFN */
+ a += vaddr & ((1 << pgsz) - 1);
+ return cpu_err_addr == a;
+ }
+ }
+ }
+ return 0;
+}
+
+static int check_vdma_memaddr(void)
+{
+ if (cpu_err_stat & CPU_ERRMASK) {
+ u32 a = sgimc->maddronly;
+
+ if (!(sgimc->dma_ctrl & 0x100)) /* Xlate-bit clear ? */
+ return cpu_err_addr == a;
+
+ if (check_microtlb(sgimc->dtlb_hi0, sgimc->dtlb_lo0, a) ||
+ check_microtlb(sgimc->dtlb_hi1, sgimc->dtlb_lo1, a) ||
+ check_microtlb(sgimc->dtlb_hi2, sgimc->dtlb_lo2, a) ||
+ check_microtlb(sgimc->dtlb_hi3, sgimc->dtlb_lo3, a))
+ return 1;
+ }
+ return 0;
+}
+
+static int check_vdma_gioaddr(void)
+{
+ if (gio_err_stat & GIO_ERRMASK) {
+ u32 a = sgimc->gio_dma_trans;
+ a = (sgimc->gmaddronly & ~a) | (sgimc->gio_dma_sbits & a);
+ return gio_err_addr == a;
+ }
+ return 0;
+}
+
+/*
+ * MC sends an interrupt whenever bus or parity errors occur. In addition,
+ * if the error happened during a CPU read, it also asserts the bus error
+ * pin on the R4K. Code in bus error handler save the MC bus error registers
+ * and then clear the interrupt when this happens.
+ */
+
+static int ip28_be_interrupt(const struct pt_regs *regs)
+{
+ int i;
+
+ save_and_clear_buserr();
+ /*
+ * Try to find out, whether we got here by a mispredicted speculative
+ * load/store operation. If so, it's not fatal, we can go on.
+ */
+ /* Any cause other than "Interrupt" (ExcCode 0) is fatal. */
+ if (regs->cp0_cause & CAUSEF_EXCCODE)
+ goto mips_be_fatal;
+
+ /* Any cause other than "Bus error interrupt" (IP6) is weird. */
+ if ((regs->cp0_cause & CAUSEF_IP6) != CAUSEF_IP6)
+ goto mips_be_fatal;
+
+ if (extio_stat & (EXTIO_HPC3_BUSERR | EXTIO_EISA_BUSERR))
+ goto mips_be_fatal;
+
+ /* Any state other than "Memory bus error" is fatal. */
+ if (cpu_err_stat & CPU_ERRMASK & ~SGIMC_CSTAT_ADDR)
+ goto mips_be_fatal;
+
+ /* GIO errors other than timeouts are fatal */
+ if (gio_err_stat & GIO_ERRMASK & ~SGIMC_GSTAT_TIME)
+ goto mips_be_fatal;
+
+ /*
+ * Now we have an asynchronous bus error, speculatively or DMA caused.
+ * Need to search all DMA descriptors for the error address.
+ */
+ for (i = 0; i < sizeof(hpc3)/sizeof(struct hpc3_stat); ++i) {
+ struct hpc3_stat *hp = (struct hpc3_stat *)&hpc3 + i;
+ if ((cpu_err_stat & CPU_ERRMASK) &&
+ (cpu_err_addr == hp->ndptr || cpu_err_addr == hp->cbp))
+ break;
+ if ((gio_err_stat & GIO_ERRMASK) &&
+ (gio_err_addr == hp->ndptr || gio_err_addr == hp->cbp))
+ break;
+ }
+ if (i < sizeof(hpc3)/sizeof(struct hpc3_stat)) {
+ struct hpc3_stat *hp = (struct hpc3_stat *)&hpc3 + i;
+ printk(KERN_ERR "at DMA addresses: HPC3 @ %08lx:"
+ " ctl %08x, ndp %08x, cbp %08x\n",
+ CPHYSADDR(hp->addr), hp->ctrl, hp->ndptr, hp->cbp);
+ goto mips_be_fatal;
+ }
+ /* Check MC's virtual DMA stuff. */
+ if (check_vdma_memaddr()) {
+ printk(KERN_ERR "at GIO DMA: mem address 0x%08x.\n",
+ sgimc->maddronly);
+ goto mips_be_fatal;
+ }
+ if (check_vdma_gioaddr()) {
+ printk(KERN_ERR "at GIO DMA: gio address 0x%08x.\n",
+ sgimc->gmaddronly);
+ goto mips_be_fatal;
+ }
+ /* A speculative bus error... */
+ if (debug_be_interrupt) {
+ print_buserr(regs);
+ printk(KERN_ERR "discarded!\n");
+ }
+ return MIPS_BE_DISCARD;
+
+mips_be_fatal:
+ print_buserr(regs);
+ return MIPS_BE_FATAL;
+}
+
+void ip22_be_interrupt(int irq)
+{
+ struct pt_regs *regs = get_irq_regs();
+
+ count_be_interrupt++;
+
+ if (ip28_be_interrupt(regs) != MIPS_BE_DISCARD) {
+ /* Assume it would be too dangerous to continue ... */
+ die_if_kernel("Oops", regs);
+ force_sig(SIGBUS);
+ } else if (debug_be_interrupt)
+ show_regs(regs);
+}
+
+static int ip28_be_handler(struct pt_regs *regs, int is_fixup)
+{
+ /*
+ * We arrive here only in the unusual case of do_be() invocation,
+ * i.e. by a bus error exception without a bus error interrupt.
+ */
+ if (is_fixup) {
+ count_be_is_fixup++;
+ save_and_clear_buserr();
+ return MIPS_BE_FIXUP;
+ }
+ count_be_handler++;
+ return ip28_be_interrupt(regs);
+}
+
+void __init ip22_be_init(void)
+{
+ board_be_handler = ip28_be_handler;
+}
+
+int ip28_show_be_info(struct seq_file *m)
+{
+ seq_printf(m, "IP28 be fixups\t\t: %u\n", count_be_is_fixup);
+ seq_printf(m, "IP28 be interrupts\t: %u\n", count_be_interrupt);
+ seq_printf(m, "IP28 be handler\t\t: %u\n", count_be_handler);
+
+ return 0;
+}
+
+static int __init debug_be_setup(char *str)
+{
+ debug_be_interrupt++;
+ return 1;
+}
+__setup("ip28_debug_be", debug_be_setup);
diff --git a/arch/mips/sgi-ip27/Kconfig b/arch/mips/sgi-ip27/Kconfig
new file mode 100644
index 000000000..e5b6cadbe
--- /dev/null
+++ b/arch/mips/sgi-ip27/Kconfig
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0
+choice
+ prompt "Node addressing mode"
+ depends on SGI_IP27
+ default SGI_SN_M_MODE
+
+config SGI_SN_M_MODE
+ bool "IP27 M-Mode"
+ help
+ The nodes of Origin, Onyx, Fuel and Tezro systems can be configured
+ in either N-Modes which allows for more nodes or M-Mode which allows
+ for more memory. Your hardware is almost certainly running in
+ M-Mode, so choose M-mode here.
+
+config SGI_SN_N_MODE
+ bool "IP27 N-Mode"
+ help
+ The nodes of Origin, Onyx, Fuel and Tezro systems can be configured
+ in either N-Modes which allows for more nodes or M-Mode which allows
+ for more memory. Your hardware is almost certainly running in
+ M-Mode, so choose M-mode here.
+
+endchoice
+
+config MAPPED_KERNEL
+ bool "Mapped kernel support"
+ depends on SGI_IP27
+ help
+ Change the way a Linux kernel is loaded into memory on a MIPS64
+ machine. This is required in order to support text replication on
+ NUMA. If you need to understand it, read the source code.
+
+config REPLICATE_KTEXT
+ bool "Kernel text replication support"
+ depends on SGI_IP27
+ select MAPPED_KERNEL
+ help
+ Say Y here to enable replicating the kernel text across multiple
+ nodes in a NUMA cluster. This trades memory for speed.
+
diff --git a/arch/mips/sgi-ip27/Makefile b/arch/mips/sgi-ip27/Makefile
new file mode 100644
index 000000000..27c14ede1
--- /dev/null
+++ b/arch/mips/sgi-ip27/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the IP27 specific kernel interface routines under Linux.
+#
+
+obj-y := ip27-berr.o ip27-irq.o ip27-init.o ip27-klconfig.o \
+ ip27-klnuma.o ip27-memory.o ip27-nmi.o ip27-reset.o ip27-timer.o \
+ ip27-hubio.o ip27-xtalk.o
+
+obj-$(CONFIG_EARLY_PRINTK) += ip27-console.o
+obj-$(CONFIG_SMP) += ip27-smp.o
diff --git a/arch/mips/sgi-ip27/Platform b/arch/mips/sgi-ip27/Platform
new file mode 100644
index 000000000..e734ee6ab
--- /dev/null
+++ b/arch/mips/sgi-ip27/Platform
@@ -0,0 +1,16 @@
+#
+# SGI-IP27 (Origin200/2000)
+#
+# Set the load address to >= 0xc000000000300000 if you want to leave space for
+# symmon, 0xc00000000001c000 for production kernels. Note that the value must
+# be 16kb aligned or the handling of the current variable will break.
+#
+cflags-$(CONFIG_SGI_IP27) += -I$(srctree)/arch/mips/include/asm/mach-ip27
+ifdef CONFIG_MAPPED_KERNEL
+load-$(CONFIG_SGI_IP27) += 0xc00000004001c000
+OBJCOPYFLAGS := --change-addresses=0x3fffffff80000000
+dataoffset-$(CONFIG_SGI_IP27) += 0x01000000
+else
+load-$(CONFIG_SGI_IP27) += 0xa80000000001c000
+OBJCOPYFLAGS := --change-addresses=0x57ffffff80000000
+endif
diff --git a/arch/mips/sgi-ip27/TODO b/arch/mips/sgi-ip27/TODO
new file mode 100644
index 000000000..160857ff1
--- /dev/null
+++ b/arch/mips/sgi-ip27/TODO
@@ -0,0 +1,19 @@
+1. Need to figure out why PCI writes to the IOC3 hang, and if it is okay
+not to write to the IOC3 ever.
+2. Need to figure out RRB allocation in bridge_startup().
+3. Need to figure out why address swaizzling is needed in inw/outw for
+Qlogic scsi controllers.
+4. Need to integrate ip27-klconfig.c:find_lboard and
+ip27-init.c:find_lbaord_real. DONE
+5. Is it okay to set calias space on all nodes as 0, instead of 8k as
+in irix?
+6. Investigate why things do not work without the setup_test() call
+being invoked on all nodes in ip27-memory.c.
+8. Too many do_page_faults invoked - investigate.
+9. start_thread must turn off UX64 ... and define tlb_refill_debug.
+10. Need a bad pmd table, bad pte table. __bad_pmd_table/__bad_pagetable
+does not agree with pgd_bad/pmd_bad.
+11. All intrs (ip27_do_irq handlers) are targeted at cpu A on the node.
+This might need to change later. Only the timer intr is set up to be
+received on both Cpu A and B. (ip27_do_irq()/bridge_startup())
+13. Cache flushing (specially the SMP version) has to be investigated.
diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c
new file mode 100644
index 000000000..5a38ae6bd
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-berr.c
@@ -0,0 +1,96 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1999, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000 by Silicon Graphics
+ * Copyright (C) 2002 Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/signal.h> /* for SIGBUS */
+#include <linux/sched.h> /* schow_regs(), force_sig() */
+#include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
+
+#include <asm/ptrace.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/arch.h>
+#include <asm/tlbdebug.h>
+#include <asm/traps.h>
+#include <linux/uaccess.h>
+
+static void dump_hub_information(unsigned long errst0, unsigned long errst1)
+{
+ static char *err_type[2][8] = {
+ { NULL, "Uncached Partial Read PRERR", "DERR", "Read Timeout",
+ NULL, NULL, NULL, NULL },
+ { "WERR", "Uncached Partial Write", "PWERR", "Write Timeout",
+ NULL, NULL, NULL, NULL }
+ };
+ union pi_err_stat0 st0;
+ union pi_err_stat1 st1;
+
+ st0.pi_stat0_word = errst0;
+ st1.pi_stat1_word = errst1;
+
+ if (!st0.pi_stat0_fmt.s0_valid) {
+ pr_info("Hub does not contain valid error information\n");
+ return;
+ }
+
+ pr_info("Hub has valid error information:\n");
+ if (st0.pi_stat0_fmt.s0_ovr_run)
+ pr_info("Overrun is set. Error stack may contain additional "
+ "information.\n");
+ pr_info("Hub error address is %08lx\n",
+ (unsigned long)st0.pi_stat0_fmt.s0_addr);
+ pr_info("Incoming message command 0x%lx\n",
+ (unsigned long)st0.pi_stat0_fmt.s0_cmd);
+ pr_info("Supplemental field of incoming message is 0x%lx\n",
+ (unsigned long)st0.pi_stat0_fmt.s0_supl);
+ pr_info("T5 Rn (for RRB only) is 0x%lx\n",
+ (unsigned long)st0.pi_stat0_fmt.s0_t5_req);
+ pr_info("Error type is %s\n", err_type[st1.pi_stat1_fmt.s1_rw_rb]
+ [st0.pi_stat0_fmt.s0_err_type] ? : "invalid");
+}
+
+int ip27_be_handler(struct pt_regs *regs, int is_fixup)
+{
+ unsigned long errst0, errst1;
+ int data = regs->cp0_cause & 4;
+ int cpu = LOCAL_HUB_L(PI_CPU_NUM);
+
+ if (is_fixup)
+ return MIPS_BE_FIXUP;
+
+ printk("Slice %c got %cbe at 0x%lx\n", 'A' + cpu, data ? 'd' : 'i',
+ regs->cp0_epc);
+ printk("Hub information:\n");
+ printk("ERR_INT_PEND = 0x%06llx\n", LOCAL_HUB_L(PI_ERR_INT_PEND));
+ errst0 = LOCAL_HUB_L(cpu ? PI_ERR_STATUS0_B : PI_ERR_STATUS0_A);
+ errst1 = LOCAL_HUB_L(cpu ? PI_ERR_STATUS1_B : PI_ERR_STATUS1_A);
+ dump_hub_information(errst0, errst1);
+ show_regs(regs);
+ dump_tlb_all();
+ while(1);
+ force_sig(SIGBUS);
+}
+
+void __init ip27_be_init(void)
+{
+ /* XXX Initialize all the Hub & Bridge error handling here. */
+ int cpu = LOCAL_HUB_L(PI_CPU_NUM);
+ int cpuoff = cpu << 8;
+
+ board_be_handler = ip27_be_handler;
+
+ LOCAL_HUB_S(PI_ERR_INT_PEND,
+ cpu ? PI_ERR_CLEAR_ALL_B : PI_ERR_CLEAR_ALL_A);
+ LOCAL_HUB_S(PI_ERR_INT_MASK_A + cpuoff, 0);
+ LOCAL_HUB_S(PI_ERR_STACK_ADDR_A + cpuoff, 0);
+ LOCAL_HUB_S(PI_ERR_STACK_SIZE, 0); /* Disable error stack */
+ LOCAL_HUB_S(PI_SYSAD_ERRCHK_EN, PI_SYSAD_CHECK_ALL);
+}
diff --git a/arch/mips/sgi-ip27/ip27-common.h b/arch/mips/sgi-ip27/ip27-common.h
new file mode 100644
index 000000000..ed008a084
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-common.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __IP27_COMMON_H
+#define __IP27_COMMON_H
+
+extern nasid_t master_nasid;
+
+extern void cpu_node_probe(void);
+extern void hub_rt_clock_event_init(void);
+extern void hub_rtc_init(nasid_t nasid);
+extern void install_cpu_nmi_handler(int slice);
+extern void install_ipi(void);
+extern void ip27_reboot_setup(void);
+extern const struct plat_smp_ops ip27_smp_ops;
+extern unsigned long node_getfirstfree(nasid_t nasid);
+extern void per_cpu_init(void);
+extern void replicate_kernel_text(void);
+extern void setup_replication_mask(void);
+
+#endif /* __IP27_COMMON_H */
diff --git a/arch/mips/sgi-ip27/ip27-console.c b/arch/mips/sgi-ip27/ip27-console.c
new file mode 100644
index 000000000..7737a88c6
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-console.c
@@ -0,0 +1,42 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 2002 Ralf Baechle
+ */
+
+#include <asm/page.h>
+#include <asm/setup.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/ioc3.h>
+
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+
+#include "ip27-common.h"
+
+#define IOC3_CLK (22000000 / 3)
+#define IOC3_FLAGS (0)
+
+static inline struct ioc3_uartregs *console_uart(void)
+{
+ struct ioc3 *ioc3;
+ nasid_t nasid;
+
+ nasid = (master_nasid == INVALID_NASID) ? get_nasid() : master_nasid;
+ ioc3 = (struct ioc3 *)KL_CONFIG_CH_CONS_INFO(nasid)->memory_base;
+
+ return &ioc3->sregs.uarta;
+}
+
+void prom_putchar(char c)
+{
+ struct ioc3_uartregs *uart = console_uart();
+
+ while ((readb(&uart->iu_lsr) & 0x20) == 0)
+ ;
+ writeb(c, &uart->iu_thr);
+}
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
new file mode 100644
index 000000000..8352eb640
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-hubio.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.
+ * Copyright (C) 2004 Christoph Hellwig.
+ *
+ * Support functions for the HUB ASIC - mostly PIO mapping related.
+ */
+
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/mmzone.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/io.h>
+#include <asm/xtalk/xtalk.h>
+
+
+static int force_fire_and_forget = 1;
+
+/**
+ * hub_pio_map - establish a HUB PIO mapping
+ *
+ * @hub: hub to perform PIO mapping on
+ * @widget: widget ID to perform PIO mapping for
+ * @xtalk_addr: xtalk_address that needs to be mapped
+ * @size: size of the PIO mapping
+ *
+ **/
+unsigned long hub_pio_map(nasid_t nasid, xwidgetnum_t widget,
+ unsigned long xtalk_addr, size_t size)
+{
+ unsigned i;
+
+ /* use small-window mapping if possible */
+ if ((xtalk_addr % SWIN_SIZE) + size <= SWIN_SIZE)
+ return NODE_SWIN_BASE(nasid, widget) + (xtalk_addr % SWIN_SIZE);
+
+ if ((xtalk_addr % BWIN_SIZE) + size > BWIN_SIZE) {
+ printk(KERN_WARNING "PIO mapping at hub %d widget %d addr 0x%lx"
+ " too big (%ld)\n",
+ nasid, widget, xtalk_addr, size);
+ return 0;
+ }
+
+ xtalk_addr &= ~(BWIN_SIZE-1);
+ for (i = 0; i < HUB_NUM_BIG_WINDOW; i++) {
+ if (test_and_set_bit(i, hub_data(nasid)->h_bigwin_used))
+ continue;
+
+ /*
+ * The code below does a PIO write to setup an ITTE entry.
+ *
+ * We need to prevent other CPUs from seeing our updated
+ * memory shadow of the ITTE (in the piomap) until the ITTE
+ * entry is actually set up; otherwise, another CPU might
+ * attempt a PIO prematurely.
+ *
+ * Also, the only way we can know that an entry has been
+ * received by the hub and can be used by future PIO reads/
+ * writes is by reading back the ITTE entry after writing it.
+ *
+ * For these two reasons, we PIO read back the ITTE entry
+ * after we write it.
+ */
+ IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
+ __raw_readq(IIO_ITTE_GET(nasid, i));
+
+ return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE);
+ }
+
+ printk(KERN_WARNING "unable to establish PIO mapping for at"
+ " hub %d widget %d addr 0x%lx\n",
+ nasid, widget, xtalk_addr);
+ return 0;
+}
+
+
+/*
+ * hub_setup_prb(nasid, prbnum, credits, conveyor)
+ *
+ * Put a PRB into fire-and-forget mode if conveyor isn't set. Otherwise,
+ * put it into conveyor belt mode with the specified number of credits.
+ */
+static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
+{
+ union iprb_u prb;
+ int prb_offset;
+
+ /*
+ * Get the current register value.
+ */
+ prb_offset = IIO_IOPRB(prbnum);
+ prb.iprb_regval = REMOTE_HUB_L(nasid, prb_offset);
+
+ /*
+ * Clear out some fields.
+ */
+ prb.iprb_ovflow = 1;
+ prb.iprb_bnakctr = 0;
+ prb.iprb_anakctr = 0;
+
+ /*
+ * Enable or disable fire-and-forget mode.
+ */
+ prb.iprb_ff = force_fire_and_forget ? 1 : 0;
+
+ /*
+ * Set the appropriate number of PIO credits for the widget.
+ */
+ prb.iprb_xtalkctr = credits;
+
+ /*
+ * Store the new value to the register.
+ */
+ REMOTE_HUB_S(nasid, prb_offset, prb.iprb_regval);
+}
+
+/**
+ * hub_set_piomode - set pio mode for a given hub
+ *
+ * @nasid: physical node ID for the hub in question
+ *
+ * Put the hub into either "PIO conveyor belt" mode or "fire-and-forget" mode.
+ * To do this, we have to make absolutely sure that no PIOs are in progress
+ * so we turn off access to all widgets for the duration of the function.
+ *
+ * XXX - This code should really check what kind of widget we're talking
+ * to. Bridges can only handle three requests, but XG will do more.
+ * How many can crossbow handle to widget 0? We're assuming 1.
+ *
+ * XXX - There is a bug in the crossbow that link reset PIOs do not
+ * return write responses. The easiest solution to this problem is to
+ * leave widget 0 (xbow) in fire-and-forget mode at all times. This
+ * only affects pio's to xbow registers, which should be rare.
+ **/
+static void hub_set_piomode(nasid_t nasid)
+{
+ u64 ii_iowa;
+ union hubii_wcr_u ii_wcr;
+ unsigned i;
+
+ ii_iowa = REMOTE_HUB_L(nasid, IIO_OUTWIDGET_ACCESS);
+ REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, 0);
+
+ ii_wcr.wcr_reg_value = REMOTE_HUB_L(nasid, IIO_WCR);
+
+ if (ii_wcr.iwcr_dir_con) {
+ /*
+ * Assume a bridge here.
+ */
+ hub_setup_prb(nasid, 0, 3);
+ } else {
+ /*
+ * Assume a crossbow here.
+ */
+ hub_setup_prb(nasid, 0, 1);
+ }
+
+ /*
+ * XXX - Here's where we should take the widget type into
+ * when account assigning credits.
+ */
+ for (i = HUB_WIDGET_ID_MIN; i <= HUB_WIDGET_ID_MAX; i++)
+ hub_setup_prb(nasid, i, 3);
+
+ REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, ii_iowa);
+}
+
+/*
+ * hub_pio_init - PIO-related hub initialization
+ *
+ * @hub: hubinfo structure for our hub
+ */
+void hub_pio_init(nasid_t nasid)
+{
+ unsigned i;
+
+ /* initialize big window piomaps for this hub */
+ bitmap_zero(hub_data(nasid)->h_bigwin_used, HUB_NUM_BIG_WINDOW);
+ for (i = 0; i < HUB_NUM_BIG_WINDOW; i++)
+ IIO_ITTE_DISABLE(nasid, i);
+
+ hub_set_piomode(nasid);
+}
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
new file mode 100644
index 000000000..a4daf8ccd
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -0,0 +1,147 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
+ * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/export.h>
+#include <linux/cpumask.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/io.h>
+#include <asm/sgialib.h>
+#include <asm/time.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/types.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/ioc3.h>
+#include <asm/mipsregs.h>
+#include <asm/sn/gda.h>
+#include <asm/sn/intr.h>
+#include <asm/current.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+#include <asm/thread_info.h>
+#include <asm/sn/launch.h>
+#include <asm/sn/mapped_kernel.h>
+
+#include "ip27-common.h"
+
+#define CPU_NONE (cpuid_t)-1
+
+static DECLARE_BITMAP(hub_init_mask, MAX_NUMNODES);
+nasid_t master_nasid = INVALID_NASID;
+
+struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
+EXPORT_SYMBOL_GPL(sn_cpu_info);
+
+static void per_hub_init(nasid_t nasid)
+{
+ struct hub_data *hub = hub_data(nasid);
+
+ cpumask_set_cpu(smp_processor_id(), &hub->h_cpus);
+
+ if (test_and_set_bit(nasid, hub_init_mask))
+ return;
+ /*
+ * Set CRB timeout at 5ms, (< PI timeout of 10ms)
+ */
+ REMOTE_HUB_S(nasid, IIO_ICTP, 0x800);
+ REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);
+
+ hub_rtc_init(nasid);
+
+ if (nasid) {
+ /* copy exception handlers from first node to current node */
+ memcpy((void *)NODE_OFFSET_TO_K0(nasid, 0),
+ (void *)CKSEG0, 0x200);
+ __flush_cache_all();
+ /* switch to node local exception handlers */
+ REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
+ }
+}
+
+void per_cpu_init(void)
+{
+ int cpu = smp_processor_id();
+ nasid_t nasid = get_nasid();
+
+ clear_c0_status(ST0_IM);
+
+ per_hub_init(nasid);
+
+ pr_info("CPU %d clock is %dMHz.\n", cpu, sn_cpu_info[cpu].p_speed);
+
+ install_ipi();
+
+ /* Install our NMI handler if symmon hasn't installed one. */
+ install_cpu_nmi_handler(cputoslice(cpu));
+
+ enable_percpu_irq(IP27_HUB_PEND0_IRQ, IRQ_TYPE_NONE);
+ enable_percpu_irq(IP27_HUB_PEND1_IRQ, IRQ_TYPE_NONE);
+}
+
+void __init plat_mem_setup(void)
+{
+ u64 p, e, n_mode;
+ nasid_t nid;
+
+ register_smp_ops(&ip27_smp_ops);
+
+ ip27_reboot_setup();
+
+ /*
+ * hub_rtc init and cpu clock intr enabled for later calibrate_delay.
+ */
+ nid = get_nasid();
+ printk("IP27: Running on node %d.\n", nid);
+
+ p = LOCAL_HUB_L(PI_CPU_PRESENT_A) & 1;
+ e = LOCAL_HUB_L(PI_CPU_ENABLE_A) & 1;
+ printk("Node %d has %s primary CPU%s.\n", nid,
+ p ? "a" : "no",
+ e ? ", CPU is running" : "");
+
+ p = LOCAL_HUB_L(PI_CPU_PRESENT_B) & 1;
+ e = LOCAL_HUB_L(PI_CPU_ENABLE_B) & 1;
+ printk("Node %d has %s secondary CPU%s.\n", nid,
+ p ? "a" : "no",
+ e ? ", CPU is running" : "");
+
+ /*
+ * Try to catch kernel missconfigurations and give user an
+ * indication what option to select.
+ */
+ n_mode = LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_MORENODES_MASK;
+ printk("Machine is in %c mode.\n", n_mode ? 'N' : 'M');
+#ifdef CONFIG_SGI_SN_N_MODE
+ if (!n_mode)
+ panic("Kernel compiled for M mode.");
+#else
+ if (n_mode)
+ panic("Kernel compiled for N mode.");
+#endif
+
+ ioport_resource.start = 0;
+ ioport_resource.end = ~0UL;
+ set_io_port_base(IO_BASE);
+}
+
+const char *get_system_type(void)
+{
+ return "SGI Origin";
+}
+
+void __init prom_init(void)
+{
+ prom_init_cmdline(fw_arg0, (LONG *)fw_arg1);
+ prom_meminit();
+}
+
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
new file mode 100644
index 000000000..42df9fafa
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
+ *
+ * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 1999 - 2001 Kanoj Sarcar
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/sched.h>
+
+#include <asm/io.h>
+#include <asm/irq_cpu.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/irq_alloc.h>
+
+struct hub_irq_data {
+ u64 *irq_mask[2];
+ cpuid_t cpu;
+};
+
+static DECLARE_BITMAP(hub_irq_map, IP27_HUB_IRQ_COUNT);
+
+static DEFINE_PER_CPU(unsigned long [2], irq_enable_mask);
+
+static inline int alloc_level(void)
+{
+ int level;
+
+again:
+ level = find_first_zero_bit(hub_irq_map, IP27_HUB_IRQ_COUNT);
+ if (level >= IP27_HUB_IRQ_COUNT)
+ return -ENOSPC;
+
+ if (test_and_set_bit(level, hub_irq_map))
+ goto again;
+
+ return level;
+}
+
+static void enable_hub_irq(struct irq_data *d)
+{
+ struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
+ unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
+
+ set_bit(d->hwirq, mask);
+ __raw_writeq(mask[0], hd->irq_mask[0]);
+ __raw_writeq(mask[1], hd->irq_mask[1]);
+}
+
+static void disable_hub_irq(struct irq_data *d)
+{
+ struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
+ unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
+
+ clear_bit(d->hwirq, mask);
+ __raw_writeq(mask[0], hd->irq_mask[0]);
+ __raw_writeq(mask[1], hd->irq_mask[1]);
+}
+
+static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
+{
+ nasid_t nasid;
+ int cpu;
+
+ cpu = cpumask_first_and(mask, cpu_online_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_any(cpu_online_mask);
+
+ nasid = cpu_to_node(cpu);
+ hd->cpu = cpu;
+ if (!cputoslice(cpu)) {
+ hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_A);
+ hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_A);
+ } else {
+ hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B);
+ hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B);
+ }
+}
+
+static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
+ bool force)
+{
+ struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
+
+ if (!hd)
+ return -EINVAL;
+
+ if (irqd_is_started(d))
+ disable_hub_irq(d);
+
+ setup_hub_mask(hd, mask);
+
+ if (irqd_is_started(d))
+ enable_hub_irq(d);
+
+ irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
+
+ return 0;
+}
+
+static struct irq_chip hub_irq_type = {
+ .name = "HUB",
+ .irq_mask = disable_hub_irq,
+ .irq_unmask = enable_hub_irq,
+ .irq_set_affinity = set_affinity_hub_irq,
+};
+
+static int hub_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct irq_alloc_info *info = arg;
+ struct hub_irq_data *hd;
+ struct hub_data *hub;
+ struct irq_desc *desc;
+ int swlevel;
+
+ if (nr_irqs > 1 || !info)
+ return -EINVAL;
+
+ hd = kzalloc(sizeof(*hd), GFP_KERNEL);
+ if (!hd)
+ return -ENOMEM;
+
+ swlevel = alloc_level();
+ if (unlikely(swlevel < 0)) {
+ kfree(hd);
+ return -EAGAIN;
+ }
+ irq_domain_set_info(domain, virq, swlevel, &hub_irq_type, hd,
+ handle_level_irq, NULL, NULL);
+
+ /* use CPU connected to nearest hub */
+ hub = hub_data(info->nasid);
+ setup_hub_mask(hd, &hub->h_cpus);
+ info->nasid = cpu_to_node(hd->cpu);
+
+ /* Make sure it's not already pending when we connect it. */
+ REMOTE_HUB_CLR_INTR(info->nasid, swlevel);
+
+ desc = irq_to_desc(virq);
+ desc->irq_common_data.node = info->nasid;
+ cpumask_copy(desc->irq_common_data.affinity, &hub->h_cpus);
+
+ return 0;
+}
+
+static void hub_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *irqd;
+
+ if (nr_irqs > 1)
+ return;
+
+ irqd = irq_domain_get_irq_data(domain, virq);
+ if (irqd && irqd->chip_data)
+ kfree(irqd->chip_data);
+}
+
+static const struct irq_domain_ops hub_domain_ops = {
+ .alloc = hub_domain_alloc,
+ .free = hub_domain_free,
+};
+
+/*
+ * This code is unnecessarily complex, because we do
+ * intr enabling. Basically, once we grab the set of intrs we need
+ * to service, we must mask _all_ these interrupts; firstly, to make
+ * sure the same intr does not intr again, causing recursion that
+ * can lead to stack overflow. Secondly, we can not just mask the
+ * one intr we are do_IRQing, because the non-masked intrs in the
+ * first set might intr again, causing multiple servicings of the
+ * same intr. This effect is mostly seen for intercpu intrs.
+ * Kanoj 05.13.00
+ */
+
+static void ip27_do_irq_mask0(struct irq_desc *desc)
+{
+ cpuid_t cpu = smp_processor_id();
+ unsigned long *mask = per_cpu(irq_enable_mask, cpu);
+ struct irq_domain *domain;
+ u64 pend0;
+ int irq;
+
+ /* copied from Irix intpend0() */
+ pend0 = LOCAL_HUB_L(PI_INT_PEND0);
+
+ pend0 &= mask[0]; /* Pick intrs we should look at */
+ if (!pend0)
+ return;
+
+#ifdef CONFIG_SMP
+ if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
+ LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
+ scheduler_ipi();
+ } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
+ LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
+ scheduler_ipi();
+ } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
+ LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
+ generic_smp_call_function_interrupt();
+ } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
+ LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
+ generic_smp_call_function_interrupt();
+ } else
+#endif
+ {
+ domain = irq_desc_get_handler_data(desc);
+ irq = irq_linear_revmap(domain, __ffs(pend0));
+ if (irq)
+ generic_handle_irq(irq);
+ else
+ spurious_interrupt();
+ }
+
+ LOCAL_HUB_L(PI_INT_PEND0);
+}
+
+static void ip27_do_irq_mask1(struct irq_desc *desc)
+{
+ cpuid_t cpu = smp_processor_id();
+ unsigned long *mask = per_cpu(irq_enable_mask, cpu);
+ struct irq_domain *domain;
+ u64 pend1;
+ int irq;
+
+ /* copied from Irix intpend0() */
+ pend1 = LOCAL_HUB_L(PI_INT_PEND1);
+
+ pend1 &= mask[1]; /* Pick intrs we should look at */
+ if (!pend1)
+ return;
+
+ domain = irq_desc_get_handler_data(desc);
+ irq = irq_linear_revmap(domain, __ffs(pend1) + 64);
+ if (irq)
+ generic_handle_irq(irq);
+ else
+ spurious_interrupt();
+
+ LOCAL_HUB_L(PI_INT_PEND1);
+}
+
+void install_ipi(void)
+{
+ int cpu = smp_processor_id();
+ unsigned long *mask = per_cpu(irq_enable_mask, cpu);
+ int slice = LOCAL_HUB_L(PI_CPU_NUM);
+ int resched, call;
+
+ resched = CPU_RESCHED_A_IRQ + slice;
+ set_bit(resched, mask);
+ LOCAL_HUB_CLR_INTR(resched);
+
+ call = CPU_CALL_A_IRQ + slice;
+ set_bit(call, mask);
+ LOCAL_HUB_CLR_INTR(call);
+
+ if (slice == 0) {
+ LOCAL_HUB_S(PI_INT_MASK0_A, mask[0]);
+ LOCAL_HUB_S(PI_INT_MASK1_A, mask[1]);
+ } else {
+ LOCAL_HUB_S(PI_INT_MASK0_B, mask[0]);
+ LOCAL_HUB_S(PI_INT_MASK1_B, mask[1]);
+ }
+}
+
+void __init arch_init_irq(void)
+{
+ struct irq_domain *domain;
+ struct fwnode_handle *fn;
+ int i;
+
+ mips_cpu_irq_init();
+
+ /*
+ * Some interrupts are reserved by hardware or by software convention.
+ * Mark these as reserved right away so they won't be used accidentally
+ * later.
+ */
+ for (i = 0; i <= CPU_CALL_B_IRQ; i++)
+ set_bit(i, hub_irq_map);
+
+ for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++)
+ set_bit(i, hub_irq_map);
+
+ fn = irq_domain_alloc_named_fwnode("HUB");
+ WARN_ON(fn == NULL);
+ if (!fn)
+ return;
+ domain = irq_domain_create_linear(fn, IP27_HUB_IRQ_COUNT,
+ &hub_domain_ops, NULL);
+ WARN_ON(domain == NULL);
+ if (!domain)
+ return;
+
+ irq_set_default_host(domain);
+
+ irq_set_percpu_devid(IP27_HUB_PEND0_IRQ);
+ irq_set_chained_handler_and_data(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0,
+ domain);
+ irq_set_percpu_devid(IP27_HUB_PEND1_IRQ);
+ irq_set_chained_handler_and_data(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1,
+ domain);
+}
diff --git a/arch/mips/sgi-ip27/ip27-klconfig.c b/arch/mips/sgi-ip27/ip27-klconfig.c
new file mode 100644
index 000000000..81a1646e6
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-klconfig.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/param.h>
+#include <linux/timex.h>
+#include <linux/mm.h>
+
+#include <asm/sn/klconfig.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/gda.h>
+
+klinfo_t *find_component(lboard_t *brd, klinfo_t *kli, unsigned char struct_type)
+{
+ int index, j;
+
+ if (kli == (klinfo_t *)NULL) {
+ index = 0;
+ } else {
+ for (j = 0; j < KLCF_NUM_COMPS(brd); j++)
+ if (kli == KLCF_COMP(brd, j))
+ break;
+ index = j;
+ if (index == KLCF_NUM_COMPS(brd)) {
+ printk("find_component: Bad pointer: 0x%p\n", kli);
+ return (klinfo_t *)NULL;
+ }
+ index++; /* next component */
+ }
+
+ for (; index < KLCF_NUM_COMPS(brd); index++) {
+ kli = KLCF_COMP(brd, index);
+ if (KLCF_COMP_TYPE(kli) == struct_type)
+ return kli;
+ }
+
+ /* Didn't find it. */
+ return (klinfo_t *)NULL;
+}
+
+klinfo_t *find_first_component(lboard_t *brd, unsigned char struct_type)
+{
+ return find_component(brd, (klinfo_t *)NULL, struct_type);
+}
+
+lboard_t *find_lboard(lboard_t *start, unsigned char brd_type)
+{
+ /* Search all boards stored on this node. */
+ while (start) {
+ if (start->brd_type == brd_type)
+ return start;
+ start = KLCF_NEXT(start);
+ }
+ /* Didn't find it. */
+ return (lboard_t *)NULL;
+}
+
+lboard_t *find_lboard_class(lboard_t *start, unsigned char brd_type)
+{
+ /* Search all boards stored on this node. */
+ while (start) {
+ if (KLCLASS(start->brd_type) == KLCLASS(brd_type))
+ return start;
+ start = KLCF_NEXT(start);
+ }
+
+ /* Didn't find it. */
+ return (lboard_t *)NULL;
+}
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
new file mode 100644
index 000000000..abd7a84df
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ported from IRIX to Linux by Kanoj Sarcar, 06/08/00.
+ * Copyright 2000 - 2001 Silicon Graphics, Inc.
+ * Copyright 2000 - 2001 Kanoj Sarcar (kanoj@sgi.com)
+ */
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/kernel.h>
+#include <linux/nodemask.h>
+#include <linux/string.h>
+
+#include <asm/page.h>
+#include <asm/sections.h>
+#include <asm/sn/types.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/gda.h>
+#include <asm/sn/mapped_kernel.h>
+
+#include "ip27-common.h"
+
+static nodemask_t ktext_repmask;
+
+/*
+ * XXX - This needs to be much smarter about where it puts copies of the
+ * kernel. For example, we should never put a copy on a headless node,
+ * and we should respect the topology of the machine.
+ */
+void __init setup_replication_mask(void)
+{
+ /* Set only the master cnode's bit. The master cnode is always 0. */
+ nodes_clear(ktext_repmask);
+ node_set(0, ktext_repmask);
+
+#ifdef CONFIG_REPLICATE_KTEXT
+#ifndef CONFIG_MAPPED_KERNEL
+#error Kernel replication works with mapped kernel support. No calias support.
+#endif
+ {
+ nasid_t nasid;
+
+ for_each_online_node(nasid) {
+ if (nasid == 0)
+ continue;
+ /* Advertise that we have a copy of the kernel */
+ node_set(nasid, ktext_repmask);
+ }
+ }
+#endif
+ /* Set up a GDA pointer to the replication mask. */
+ GDA->g_ktext_repmask = &ktext_repmask;
+}
+
+
+static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid)
+{
+ kern_vars_t *kvp;
+
+ kvp = &hub_data(client_nasid)->kern_vars;
+
+ KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp;
+
+ kvp->kv_magic = KV_MAGIC;
+ kvp->kv_ro_nasid = server_nasid;
+ kvp->kv_rw_nasid = master_nasid;
+ kvp->kv_ro_baseaddr = NODE_CAC_BASE(server_nasid);
+ kvp->kv_rw_baseaddr = NODE_CAC_BASE(master_nasid);
+ printk("REPLICATION: ON nasid %d, ktext from nasid %d, kdata from nasid %d\n", client_nasid, server_nasid, master_nasid);
+}
+
+/* XXX - When the BTE works, we should use it instead of this. */
+static __init void copy_kernel(nasid_t dest_nasid)
+{
+ unsigned long dest_kern_start, source_start, source_end, kern_size;
+
+ source_start = (unsigned long) _stext;
+ source_end = (unsigned long) _etext;
+ kern_size = source_end - source_start;
+
+ dest_kern_start = CHANGE_ADDR_NASID(MAPPED_KERN_RO_TO_K0(source_start),
+ dest_nasid);
+ memcpy((void *)dest_kern_start, (void *)source_start, kern_size);
+}
+
+void __init replicate_kernel_text(void)
+{
+ nasid_t client_nasid;
+ nasid_t server_nasid;
+
+ server_nasid = master_nasid;
+
+ /* Record where the master node should get its kernel text */
+ set_ktext_source(master_nasid, master_nasid);
+
+ for_each_online_node(client_nasid) {
+ if (client_nasid == 0)
+ continue;
+
+ /* Check if this node should get a copy of the kernel */
+ if (node_isset(client_nasid, ktext_repmask)) {
+ server_nasid = client_nasid;
+ copy_kernel(server_nasid);
+ }
+
+ /* Record where this node should get its kernel text */
+ set_ktext_source(client_nasid, server_nasid);
+ }
+}
+
+/*
+ * Return pfn of first free page of memory on a node. PROM may allocate
+ * data structures on the first couple of pages of the first slot of each
+ * node. If this is the case, getfirstfree(node) > getslotstart(node, 0).
+ */
+unsigned long node_getfirstfree(nasid_t nasid)
+{
+ unsigned long loadbase = REP_BASE;
+ unsigned long offset;
+
+#ifdef CONFIG_MAPPED_KERNEL
+ loadbase += 16777216;
+#endif
+ offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase;
+ if ((nasid == 0) || (node_isset(nasid, ktext_repmask)))
+ return TO_NODE(nasid, offset) >> PAGE_SHIFT;
+ else
+ return KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >> PAGE_SHIFT;
+}
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
new file mode 100644
index 000000000..d411e0a90
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -0,0 +1,429 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000, 05 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2000 by Silicon Graphics, Inc.
+ * Copyright (C) 2004 by Christoph Hellwig
+ *
+ * On SGI IP27 the ARC memory configuration data is completely bogus but
+ * alternate easier to use mechanisms are available.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/export.h>
+#include <linux/nodemask.h>
+#include <linux/swap.h>
+#include <linux/pfn.h>
+#include <linux/highmem.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/sections.h>
+
+#include <asm/sn/arch.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/klconfig.h>
+
+#include "ip27-common.h"
+
+#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
+#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
+
+struct node_data *__node_data[MAX_NUMNODES];
+
+EXPORT_SYMBOL(__node_data);
+
+static u64 gen_region_mask(void)
+{
+ int region_shift;
+ u64 region_mask;
+ nasid_t nasid;
+
+ region_shift = get_region_shift();
+ region_mask = 0;
+ for_each_online_node(nasid)
+ region_mask |= BIT_ULL(nasid >> region_shift);
+
+ return region_mask;
+}
+
+#define rou_rflag rou_flags
+
+static int router_distance;
+
+static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth)
+{
+ klrou_t *router;
+ lboard_t *brd;
+ int port;
+
+ if (router_a->rou_rflag == 1)
+ return;
+
+ if (depth >= router_distance)
+ return;
+
+ router_a->rou_rflag = 1;
+
+ for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
+ if (router_a->rou_port[port].port_nasid == INVALID_NASID)
+ continue;
+
+ brd = (lboard_t *)NODE_OFFSET_TO_K0(
+ router_a->rou_port[port].port_nasid,
+ router_a->rou_port[port].port_offset);
+
+ if (brd->brd_type == KLTYPE_ROUTER) {
+ router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
+ if (router == router_b) {
+ if (depth < router_distance)
+ router_distance = depth;
+ }
+ else
+ router_recurse(router, router_b, depth + 1);
+ }
+ }
+
+ router_a->rou_rflag = 0;
+}
+
+unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
+EXPORT_SYMBOL(__node_distances);
+
+static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
+{
+ klrou_t *router, *router_a = NULL, *router_b = NULL;
+ lboard_t *brd, *dest_brd;
+ nasid_t nasid;
+ int port;
+
+ /* Figure out which routers nodes in question are connected to */
+ for_each_online_node(nasid) {
+ brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
+ KLTYPE_ROUTER);
+
+ if (!brd)
+ continue;
+
+ do {
+ if (brd->brd_flags & DUPLICATE_BOARD)
+ continue;
+
+ router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
+ router->rou_rflag = 0;
+
+ for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
+ if (router->rou_port[port].port_nasid == INVALID_NASID)
+ continue;
+
+ dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
+ router->rou_port[port].port_nasid,
+ router->rou_port[port].port_offset);
+
+ if (dest_brd->brd_type == KLTYPE_IP27) {
+ if (dest_brd->brd_nasid == nasid_a)
+ router_a = router;
+ if (dest_brd->brd_nasid == nasid_b)
+ router_b = router;
+ }
+ }
+
+ } while ((brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)));
+ }
+
+ if (nasid_a == nasid_b)
+ return LOCAL_DISTANCE;
+
+ if (router_a == router_b)
+ return LOCAL_DISTANCE + 1;
+
+ if (router_a == NULL) {
+ pr_info("node_distance: router_a NULL\n");
+ return 255;
+ }
+ if (router_b == NULL) {
+ pr_info("node_distance: router_b NULL\n");
+ return 255;
+ }
+
+ router_distance = 100;
+ router_recurse(router_a, router_b, 2);
+
+ return LOCAL_DISTANCE + router_distance;
+}
+
+static void __init init_topology_matrix(void)
+{
+ nasid_t row, col;
+
+ for (row = 0; row < MAX_NUMNODES; row++)
+ for (col = 0; col < MAX_NUMNODES; col++)
+ __node_distances[row][col] = -1;
+
+ for_each_online_node(row) {
+ for_each_online_node(col) {
+ __node_distances[row][col] =
+ compute_node_distance(row, col);
+ }
+ }
+}
+
+static void __init dump_topology(void)
+{
+ nasid_t nasid;
+ lboard_t *brd, *dest_brd;
+ int port;
+ int router_num = 0;
+ klrou_t *router;
+ nasid_t row, col;
+
+ pr_info("************** Topology ********************\n");
+
+ pr_info(" ");
+ for_each_online_node(col)
+ pr_cont("%02d ", col);
+ pr_cont("\n");
+ for_each_online_node(row) {
+ pr_info("%02d ", row);
+ for_each_online_node(col)
+ pr_cont("%2d ", node_distance(row, col));
+ pr_cont("\n");
+ }
+
+ for_each_online_node(nasid) {
+ brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
+ KLTYPE_ROUTER);
+
+ if (!brd)
+ continue;
+
+ do {
+ if (brd->brd_flags & DUPLICATE_BOARD)
+ continue;
+ pr_cont("Router %d:", router_num);
+ router_num++;
+
+ router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
+
+ for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
+ if (router->rou_port[port].port_nasid == INVALID_NASID)
+ continue;
+
+ dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
+ router->rou_port[port].port_nasid,
+ router->rou_port[port].port_offset);
+
+ if (dest_brd->brd_type == KLTYPE_IP27)
+ pr_cont(" %d", dest_brd->brd_nasid);
+ if (dest_brd->brd_type == KLTYPE_ROUTER)
+ pr_cont(" r");
+ }
+ pr_cont("\n");
+
+ } while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
+ }
+}
+
+static unsigned long __init slot_getbasepfn(nasid_t nasid, int slot)
+{
+ return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
+}
+
+static unsigned long __init slot_psize_compute(nasid_t nasid, int slot)
+{
+ lboard_t *brd;
+ klmembnk_t *banks;
+ unsigned long size;
+
+ /* Find the node board */
+ brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
+ if (!brd)
+ return 0;
+
+ /* Get the memory bank structure */
+ banks = (klmembnk_t *) find_first_component(brd, KLSTRUCT_MEMBNK);
+ if (!banks)
+ return 0;
+
+ /* Size in _Megabytes_ */
+ size = (unsigned long)banks->membnk_bnksz[slot/4];
+
+ /* hack for 128 dimm banks */
+ if (size <= 128) {
+ if (slot % 4 == 0) {
+ size <<= 20; /* size in bytes */
+ return size >> PAGE_SHIFT;
+ } else
+ return 0;
+ } else {
+ size /= 4;
+ size <<= 20;
+ return size >> PAGE_SHIFT;
+ }
+}
+
+static void __init mlreset(void)
+{
+ u64 region_mask;
+ nasid_t nasid;
+
+ master_nasid = get_nasid();
+
+ /*
+ * Probe for all CPUs - this creates the cpumask and sets up the
+ * mapping tables. We need to do this as early as possible.
+ */
+#ifdef CONFIG_SMP
+ cpu_node_probe();
+#endif
+
+ init_topology_matrix();
+ dump_topology();
+
+ region_mask = gen_region_mask();
+
+ setup_replication_mask();
+
+ /*
+ * Set all nodes' calias sizes to 8k
+ */
+ for_each_online_node(nasid) {
+ /*
+ * Always have node 0 in the region mask, otherwise
+ * CALIAS accesses get exceptions since the hub
+ * thinks it is a node 0 address.
+ */
+ REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
+ REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
+
+#ifdef LATER
+ /*
+ * Set up all hubs to have a big window pointing at
+ * widget 0. Memory mode, widget 0, offset 0
+ */
+ REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN),
+ ((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) |
+ (0 << IIO_ITTE_WIDGET_SHIFT)));
+#endif
+ }
+}
+
+static void __init szmem(void)
+{
+ unsigned long slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
+ int slot;
+ nasid_t node;
+
+ for_each_online_node(node) {
+ nodebytes = 0;
+ for (slot = 0; slot < MAX_MEM_SLOTS; slot++) {
+ slot_psize = slot_psize_compute(node, slot);
+ if (slot == 0)
+ slot0sz = slot_psize;
+ /*
+ * We need to refine the hack when we have replicated
+ * kernel text.
+ */
+ nodebytes += (1LL << SLOT_SHIFT);
+
+ if (!slot_psize)
+ continue;
+
+ if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) >
+ (slot0sz << PAGE_SHIFT)) {
+ pr_info("Ignoring slot %d onwards on node %d\n",
+ slot, node);
+ slot = MAX_MEM_SLOTS;
+ continue;
+ }
+ memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)),
+ PFN_PHYS(slot_psize), node);
+ }
+ }
+}
+
+static void __init node_mem_init(nasid_t node)
+{
+ unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
+ unsigned long slot_freepfn = node_getfirstfree(node);
+ unsigned long start_pfn, end_pfn;
+
+ get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
+
+ /*
+ * Allocate the node data structures on the node first.
+ */
+ __node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
+ memset(__node_data[node], 0, PAGE_SIZE);
+
+ NODE_DATA(node)->node_start_pfn = start_pfn;
+ NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
+
+ cpumask_clear(&hub_data(node)->h_cpus);
+
+ slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
+ sizeof(struct hub_data));
+
+ memblock_reserve(slot_firstpfn << PAGE_SHIFT,
+ ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT));
+}
+
+/*
+ * A node with nothing. We use it to avoid any special casing in
+ * cpumask_of_node
+ */
+static struct node_data null_node = {
+ .hub = {
+ .h_cpus = CPU_MASK_NONE
+ }
+};
+
+/*
+ * Currently, the intranode memory hole support assumes that each slot
+ * contains at least 32 MBytes of memory. We assume all bootmem data
+ * fits on the first slot.
+ */
+void __init prom_meminit(void)
+{
+ nasid_t node;
+
+ mlreset();
+ szmem();
+ max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
+
+ for (node = 0; node < MAX_NUMNODES; node++) {
+ if (node_online(node)) {
+ node_mem_init(node);
+ continue;
+ }
+ __node_data[node] = &null_node;
+ }
+}
+
+void __init prom_free_prom_memory(void)
+{
+ /* We got nothing to free here ... */
+}
+
+extern void setup_zero_pages(void);
+
+void __init paging_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES] = {0, };
+
+ pagetable_init();
+ zones_size[ZONE_NORMAL] = max_low_pfn;
+ free_area_init(zones_size);
+}
+
+void __init mem_init(void)
+{
+ high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
+ memblock_free_all();
+ setup_zero_pages(); /* This comes from node 0 */
+ mem_init_print_info(NULL);
+}
diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
new file mode 100644
index 000000000..84889b57d
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-nmi.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/mmzone.h>
+#include <linux/nodemask.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/atomic.h>
+#include <asm/sn/types.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/nmi.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/agent.h>
+
+#if 0
+#define NODE_NUM_CPUS(n) CNODE_NUM_CPUS(n)
+#else
+#define NODE_NUM_CPUS(n) CPUS_PER_NODE
+#endif
+
+#define SEND_NMI(_nasid, _slice) \
+ REMOTE_HUB_S((_nasid), (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
+
+typedef unsigned long machreg_t;
+
+static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+/*
+ * Let's see what else we need to do here. Set up sp, gp?
+ */
+void nmi_dump(void)
+{
+ void cont_nmi_dump(void);
+
+ cont_nmi_dump();
+}
+
+void install_cpu_nmi_handler(int slice)
+{
+ nmi_t *nmi_addr;
+
+ nmi_addr = (nmi_t *)NMI_ADDR(get_nasid(), slice);
+ if (nmi_addr->call_addr)
+ return;
+ nmi_addr->magic = NMI_MAGIC;
+ nmi_addr->call_addr = (void *)nmi_dump;
+ nmi_addr->call_addr_c =
+ (void *)(~((unsigned long)(nmi_addr->call_addr)));
+ nmi_addr->call_parm = 0;
+}
+
+/*
+ * Copy the cpu registers which have been saved in the IP27prom format
+ * into the eframe format for the node under consideration.
+ */
+
+void nmi_cpu_eframe_save(nasid_t nasid, int slice)
+{
+ struct reg_struct *nr;
+ int i;
+
+ /* Get the pointer to the current cpu's register set. */
+ nr = (struct reg_struct *)
+ (TO_UNCAC(TO_NODE(nasid, IP27_NMI_KREGS_OFFSET)) +
+ slice * IP27_NMI_KREGS_CPU_SIZE);
+
+ pr_emerg("NMI nasid %d: slice %d\n", nasid, slice);
+
+ /*
+ * Saved main processor registers
+ */
+ for (i = 0; i < 32; ) {
+ if ((i % 4) == 0)
+ pr_emerg("$%2d :", i);
+ pr_cont(" %016lx", nr->gpr[i]);
+
+ i++;
+ if ((i % 4) == 0)
+ pr_cont("\n");
+ }
+
+ pr_emerg("Hi : (value lost)\n");
+ pr_emerg("Lo : (value lost)\n");
+
+ /*
+ * Saved cp0 registers
+ */
+ pr_emerg("epc : %016lx %pS\n", nr->epc, (void *)nr->epc);
+ pr_emerg("%s\n", print_tainted());
+ pr_emerg("ErrEPC: %016lx %pS\n", nr->error_epc, (void *)nr->error_epc);
+ pr_emerg("ra : %016lx %pS\n", nr->gpr[31], (void *)nr->gpr[31]);
+ pr_emerg("Status: %08lx ", nr->sr);
+
+ if (nr->sr & ST0_KX)
+ pr_cont("KX ");
+ if (nr->sr & ST0_SX)
+ pr_cont("SX ");
+ if (nr->sr & ST0_UX)
+ pr_cont("UX ");
+
+ switch (nr->sr & ST0_KSU) {
+ case KSU_USER:
+ pr_cont("USER ");
+ break;
+ case KSU_SUPERVISOR:
+ pr_cont("SUPERVISOR ");
+ break;
+ case KSU_KERNEL:
+ pr_cont("KERNEL ");
+ break;
+ default:
+ pr_cont("BAD_MODE ");
+ break;
+ }
+
+ if (nr->sr & ST0_ERL)
+ pr_cont("ERL ");
+ if (nr->sr & ST0_EXL)
+ pr_cont("EXL ");
+ if (nr->sr & ST0_IE)
+ pr_cont("IE ");
+ pr_cont("\n");
+
+ pr_emerg("Cause : %08lx\n", nr->cause);
+ pr_emerg("PrId : %08x\n", read_c0_prid());
+ pr_emerg("BadVA : %016lx\n", nr->badva);
+ pr_emerg("CErr : %016lx\n", nr->cache_err);
+ pr_emerg("NMI_SR: %016lx\n", nr->nmi_sr);
+
+ pr_emerg("\n");
+}
+
+void nmi_dump_hub_irq(nasid_t nasid, int slice)
+{
+ u64 mask0, mask1, pend0, pend1;
+
+ if (slice == 0) { /* Slice A */
+ mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_A);
+ mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_A);
+ } else { /* Slice B */
+ mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_B);
+ mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_B);
+ }
+
+ pend0 = REMOTE_HUB_L(nasid, PI_INT_PEND0);
+ pend1 = REMOTE_HUB_L(nasid, PI_INT_PEND1);
+
+ pr_emerg("PI_INT_MASK0: %16llx PI_INT_MASK1: %16llx\n", mask0, mask1);
+ pr_emerg("PI_INT_PEND0: %16llx PI_INT_PEND1: %16llx\n", pend0, pend1);
+ pr_emerg("\n\n");
+}
+
+/*
+ * Copy the cpu registers which have been saved in the IP27prom format
+ * into the eframe format for the node under consideration.
+ */
+void nmi_node_eframe_save(nasid_t nasid)
+{
+ int slice;
+
+ if (nasid == INVALID_NASID)
+ return;
+
+ /* Save the registers into eframe for each cpu */
+ for (slice = 0; slice < NODE_NUM_CPUS(slice); slice++) {
+ nmi_cpu_eframe_save(nasid, slice);
+ nmi_dump_hub_irq(nasid, slice);
+ }
+}
+
+/*
+ * Save the nmi cpu registers for all cpus in the system.
+ */
+void
+nmi_eframes_save(void)
+{
+ nasid_t nasid;
+
+ for_each_online_node(nasid)
+ nmi_node_eframe_save(nasid);
+}
+
+void
+cont_nmi_dump(void)
+{
+#ifndef REAL_NMI_SIGNAL
+ static atomic_t nmied_cpus = ATOMIC_INIT(0);
+
+ atomic_inc(&nmied_cpus);
+#endif
+ /*
+ * Only allow 1 cpu to proceed
+ */
+ arch_spin_lock(&nmi_lock);
+
+#ifdef REAL_NMI_SIGNAL
+ /*
+ * Wait up to 15 seconds for the other cpus to respond to the NMI.
+ * If a cpu has not responded after 10 sec, send it 1 additional NMI.
+ * This is for 2 reasons:
+ * - sometimes a MMSC fail to NMI all cpus.
+ * - on 512p SN0 system, the MMSC will only send NMIs to
+ * half the cpus. Unfortunately, we don't know which cpus may be
+ * NMIed - it depends on how the site chooses to configure.
+ *
+ * Note: it has been measure that it takes the MMSC up to 2.3 secs to
+ * send NMIs to all cpus on a 256p system.
+ */
+ for (i=0; i < 1500; i++) {
+ for_each_online_node(node)
+ if (NODEPDA(node)->dump_count == 0)
+ break;
+ if (node == MAX_NUMNODES)
+ break;
+ if (i == 1000) {
+ for_each_online_node(node)
+ if (NODEPDA(node)->dump_count == 0) {
+ cpu = cpumask_first(cpumask_of_node(node));
+ for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
+ CPUMASK_SETB(nmied_cpus, cpu);
+ /*
+ * cputonasid, cputoslice
+ * needs kernel cpuid
+ */
+ SEND_NMI((cputonasid(cpu)), (cputoslice(cpu)));
+ }
+ }
+
+ }
+ udelay(10000);
+ }
+#else
+ while (atomic_read(&nmied_cpus) != num_online_cpus());
+#endif
+
+ /*
+ * Save the nmi cpu registers for all cpu in the eframe format.
+ */
+ nmi_eframes_save();
+ LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
+}
diff --git a/arch/mips/sgi-ip27/ip27-reset.c b/arch/mips/sgi-ip27/ip27-reset.c
new file mode 100644
index 000000000..5ac5ad638
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-reset.c
@@ -0,0 +1,81 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Reset an IP27.
+ *
+ * Copyright (C) 1997, 1998, 1999, 2000, 06 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/smp.h>
+#include <linux/mmzone.h>
+#include <linux/nodemask.h>
+#include <linux/pm.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/reboot.h>
+#include <asm/sgialib.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/gda.h>
+
+#include "ip27-common.h"
+
+void machine_restart(char *command) __noreturn;
+void machine_halt(void) __noreturn;
+void machine_power_off(void) __noreturn;
+
+#define noreturn while(1); /* Silence gcc. */
+
+/* XXX How to pass the reboot command to the firmware??? */
+static void ip27_machine_restart(char *command)
+{
+#if 0
+ int i;
+#endif
+
+ printk("Reboot started from CPU %d\n", smp_processor_id());
+#ifdef CONFIG_SMP
+ smp_send_stop();
+#endif
+#if 0
+ for_each_online_node(i)
+ REMOTE_HUB_S(i, PROMOP_REG, PROMOP_REBOOT);
+#else
+ LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
+#endif
+ noreturn;
+}
+
+static void ip27_machine_halt(void)
+{
+ int i;
+
+#ifdef CONFIG_SMP
+ smp_send_stop();
+#endif
+ for_each_online_node(i)
+ REMOTE_HUB_S(i, PROMOP_REG, PROMOP_RESTART);
+ LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
+ noreturn;
+}
+
+static void ip27_machine_power_off(void)
+{
+ /* To do ... */
+ noreturn;
+}
+
+void ip27_reboot_setup(void)
+{
+ _machine_restart = ip27_machine_restart;
+ _machine_halt = ip27_machine_halt;
+ pm_power_off = ip27_machine_power_off;
+}
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
new file mode 100644
index 000000000..5d2652a1d
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -0,0 +1,189 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
+ * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
+ */
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/topology.h>
+#include <linux/nodemask.h>
+
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/gda.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/launch.h>
+#include <asm/sn/mapped_kernel.h>
+#include <asm/sn/types.h>
+
+#include "ip27-common.h"
+
+static int node_scan_cpus(nasid_t nasid, int highest)
+{
+ static int cpus_found;
+ lboard_t *brd;
+ klcpu_t *acpu;
+ cpuid_t cpuid;
+
+ brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
+
+ do {
+ acpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU);
+ while (acpu) {
+ cpuid = acpu->cpu_info.virtid;
+ /* Only let it join in if it's marked enabled */
+ if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
+ (cpus_found != NR_CPUS)) {
+ if (cpuid > highest)
+ highest = cpuid;
+ set_cpu_possible(cpuid, true);
+ cputonasid(cpus_found) = nasid;
+ cputoslice(cpus_found) = acpu->cpu_info.physid;
+ sn_cpu_info[cpus_found].p_speed =
+ acpu->cpu_speed;
+ cpus_found++;
+ }
+ acpu = (klcpu_t *)find_component(brd, (klinfo_t *)acpu,
+ KLSTRUCT_CPU);
+ }
+ brd = KLCF_NEXT(brd);
+ if (!brd)
+ break;
+
+ brd = find_lboard(brd, KLTYPE_IP27);
+ } while (brd);
+
+ return highest;
+}
+
+void cpu_node_probe(void)
+{
+ int i, highest = 0;
+ gda_t *gdap = GDA;
+
+ nodes_clear(node_online_map);
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ nasid_t nasid = gdap->g_nasidtable[i];
+ if (nasid == INVALID_NASID)
+ break;
+ node_set_online(nasid);
+ highest = node_scan_cpus(nasid, highest);
+ }
+
+ printk("Discovered %d cpus on %d nodes\n", highest + 1, num_online_nodes());
+}
+
+static __init void intr_clear_all(nasid_t nasid)
+{
+ int i;
+
+ REMOTE_HUB_S(nasid, PI_INT_MASK0_A, 0);
+ REMOTE_HUB_S(nasid, PI_INT_MASK0_B, 0);
+ REMOTE_HUB_S(nasid, PI_INT_MASK1_A, 0);
+ REMOTE_HUB_S(nasid, PI_INT_MASK1_B, 0);
+
+ for (i = 0; i < 128; i++)
+ REMOTE_HUB_CLR_INTR(nasid, i);
+}
+
+static void ip27_send_ipi_single(int destid, unsigned int action)
+{
+ int irq;
+
+ switch (action) {
+ case SMP_RESCHEDULE_YOURSELF:
+ irq = CPU_RESCHED_A_IRQ;
+ break;
+ case SMP_CALL_FUNCTION:
+ irq = CPU_CALL_A_IRQ;
+ break;
+ default:
+ panic("sendintr");
+ }
+
+ irq += cputoslice(destid);
+
+ /*
+ * Set the interrupt bit associated with the CPU we want to
+ * send the interrupt to.
+ */
+ REMOTE_HUB_SEND_INTR(cpu_to_node(destid), irq);
+}
+
+static void ip27_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ ip27_send_ipi_single(i, action);
+}
+
+static void ip27_init_cpu(void)
+{
+ per_cpu_init();
+}
+
+static void ip27_smp_finish(void)
+{
+ hub_rt_clock_event_init();
+ local_irq_enable();
+}
+
+/*
+ * Launch a slave into smp_bootstrap(). It doesn't take an argument, and we
+ * set sp to the kernel stack of the newly created idle process, gp to the proc
+ * struct so that current_thread_info() will work.
+ */
+static int ip27_boot_secondary(int cpu, struct task_struct *idle)
+{
+ unsigned long gp = (unsigned long)task_thread_info(idle);
+ unsigned long sp = __KSTK_TOS(idle);
+
+ LAUNCH_SLAVE(cputonasid(cpu), cputoslice(cpu),
+ (launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap),
+ 0, (void *) sp, (void *) gp);
+ return 0;
+}
+
+static void __init ip27_smp_setup(void)
+{
+ nasid_t nasid;
+
+ for_each_online_node(nasid) {
+ if (nasid == 0)
+ continue;
+ intr_clear_all(nasid);
+ }
+
+ replicate_kernel_text();
+
+ /*
+ * PROM sets up system, that boot cpu is always first CPU on nasid 0
+ */
+ cputonasid(0) = 0;
+ cputoslice(0) = LOCAL_HUB_L(PI_CPU_NUM);
+}
+
+static void __init ip27_prepare_cpus(unsigned int max_cpus)
+{
+ /* We already did everything necessary earlier */
+}
+
+const struct plat_smp_ops ip27_smp_ops = {
+ .send_ipi_single = ip27_send_ipi_single,
+ .send_ipi_mask = ip27_send_ipi_mask,
+ .init_secondary = ip27_init_cpu,
+ .smp_finish = ip27_smp_finish,
+ .boot_secondary = ip27_boot_secondary,
+ .smp_setup = ip27_smp_setup,
+ .prepare_cpus = ip27_prepare_cpus,
+ .prepare_boot_cpu = ip27_init_cpu,
+};
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
new file mode 100644
index 000000000..79c434fec
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copytight (C) 1999, 2000, 05, 06 Ralf Baechle (ralf@linux-mips.org)
+ * Copytight (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/bcd.h>
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sched_clock.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/param.h>
+#include <linux/smp.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+
+#include <asm/time.h>
+#include <asm/sgialib.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/agent.h>
+
+#include "ip27-common.h"
+
+static int rt_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+ int slice = cputoslice(cpu);
+ unsigned long cnt;
+
+ cnt = LOCAL_HUB_L(PI_RT_COUNT);
+ cnt += delta;
+ LOCAL_HUB_S(PI_RT_COMPARE_A + PI_COUNT_OFFSET * slice, cnt);
+
+ return LOCAL_HUB_L(PI_RT_COUNT) >= cnt ? -ETIME : 0;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
+static DEFINE_PER_CPU(char [11], hub_rt_name);
+
+static irqreturn_t hub_rt_counter_handler(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
+ int slice = cputoslice(cpu);
+
+ /*
+ * Ack
+ */
+ LOCAL_HUB_S(PI_RT_PEND_A + PI_COUNT_OFFSET * slice, 0);
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+struct irqaction hub_rt_irqaction = {
+ .handler = hub_rt_counter_handler,
+ .percpu_dev_id = &hub_rt_clockevent,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
+ .name = "hub-rt",
+};
+
+/*
+ * This is a hack; we really need to figure these values out dynamically
+ *
+ * Since 800 ns works very well with various HUB frequencies, such as
+ * 360, 380, 390 and 400 MHZ, we use 800 ns rtc cycle time.
+ *
+ * Ralf: which clock rate is used to feed the counter?
+ */
+#define NSEC_PER_CYCLE 800
+#define CYCLES_PER_SEC (NSEC_PER_SEC / NSEC_PER_CYCLE)
+
+void hub_rt_clock_event_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
+ unsigned char *name = per_cpu(hub_rt_name, cpu);
+
+ sprintf(name, "hub-rt %d", cpu);
+ cd->name = name;
+ cd->features = CLOCK_EVT_FEAT_ONESHOT;
+ clockevent_set_clock(cd, CYCLES_PER_SEC);
+ cd->max_delta_ns = clockevent_delta2ns(0xfffffffffffff, cd);
+ cd->max_delta_ticks = 0xfffffffffffff;
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+ cd->min_delta_ticks = 0x300;
+ cd->rating = 200;
+ cd->irq = IP27_RT_TIMER_IRQ;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_next_event = rt_next_event;
+ clockevents_register_device(cd);
+
+ enable_percpu_irq(IP27_RT_TIMER_IRQ, IRQ_TYPE_NONE);
+}
+
+static void __init hub_rt_clock_event_global_init(void)
+{
+ irq_set_handler(IP27_RT_TIMER_IRQ, handle_percpu_devid_irq);
+ irq_set_percpu_devid(IP27_RT_TIMER_IRQ);
+ setup_percpu_irq(IP27_RT_TIMER_IRQ, &hub_rt_irqaction);
+}
+
+static u64 hub_rt_read(struct clocksource *cs)
+{
+ return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
+}
+
+struct clocksource hub_rt_clocksource = {
+ .name = "HUB-RT",
+ .rating = 200,
+ .read = hub_rt_read,
+ .mask = CLOCKSOURCE_MASK(52),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u64 notrace hub_rt_read_sched_clock(void)
+{
+ return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
+}
+
+static void __init hub_rt_clocksource_init(void)
+{
+ struct clocksource *cs = &hub_rt_clocksource;
+
+ clocksource_register_hz(cs, CYCLES_PER_SEC);
+
+ sched_clock_register(hub_rt_read_sched_clock, 52, CYCLES_PER_SEC);
+}
+
+void __init plat_time_init(void)
+{
+ hub_rt_clocksource_init();
+ hub_rt_clock_event_global_init();
+ hub_rt_clock_event_init();
+}
+
+void hub_rtc_init(nasid_t nasid)
+{
+
+ /*
+ * We only need to initialize the current node.
+ * If this is not the current node then it is a cpuless
+ * node and timeouts will not happen there.
+ */
+ if (get_nasid() == nasid) {
+ LOCAL_HUB_S(PI_RT_EN_A, 1);
+ LOCAL_HUB_S(PI_RT_EN_B, 1);
+ LOCAL_HUB_S(PI_PROF_EN_A, 0);
+ LOCAL_HUB_S(PI_PROF_EN_B, 0);
+ LOCAL_HUB_S(PI_RT_COUNT, 0);
+ LOCAL_HUB_S(PI_RT_PEND_A, 0);
+ LOCAL_HUB_S(PI_RT_PEND_B, 0);
+ }
+}
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
new file mode 100644
index 000000000..5143d1cf8
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000 Silcon Graphics, Inc.
+ * Copyright (C) 2004 Christoph Hellwig.
+ *
+ * Generic XTALK initialization code
+ */
+
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/sgi-w1.h>
+#include <linux/platform_data/xtalk-bridge.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/types.h>
+#include <asm/sn/klconfig.h>
+#include <asm/pci/bridge.h>
+#include <asm/xtalk/xtalk.h>
+
+
+#define XBOW_WIDGET_PART_NUM 0x0
+#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbow in Xbridge */
+#define BASE_XBOW_PORT 8 /* Lowest external port */
+
+static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
+{
+ struct xtalk_bridge_platform_data *bd;
+ struct sgi_w1_platform_data *wd;
+ struct platform_device *pdev_wd;
+ struct platform_device *pdev_bd;
+ struct resource w1_res;
+ unsigned long offset;
+
+ offset = NODE_OFFSET(nasid);
+
+ wd = kzalloc(sizeof(*wd), GFP_KERNEL);
+ if (!wd) {
+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
+ return;
+ }
+
+ snprintf(wd->dev_id, sizeof(wd->dev_id), "bridge-%012lx",
+ offset + (widget << SWIN_SIZE_BITS));
+
+ memset(&w1_res, 0, sizeof(w1_res));
+ w1_res.start = offset + (widget << SWIN_SIZE_BITS) +
+ offsetof(struct bridge_regs, b_nic);
+ w1_res.end = w1_res.start + 3;
+ w1_res.flags = IORESOURCE_MEM;
+
+ pdev_wd = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO);
+ if (!pdev_wd) {
+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
+ goto err_kfree_wd;
+ }
+ if (platform_device_add_resources(pdev_wd, &w1_res, 1)) {
+ pr_warn("xtalk:n%d/%x bridge failed to add platform resources.\n", nasid, widget);
+ goto err_put_pdev_wd;
+ }
+ if (platform_device_add_data(pdev_wd, wd, sizeof(*wd))) {
+ pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget);
+ goto err_put_pdev_wd;
+ }
+ if (platform_device_add(pdev_wd)) {
+ pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget);
+ goto err_put_pdev_wd;
+ }
+ /* platform_device_add_data() duplicates the data */
+ kfree(wd);
+
+ bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+ if (!bd) {
+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
+ goto err_unregister_pdev_wd;
+ }
+ pdev_bd = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
+ if (!pdev_bd) {
+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
+ goto err_kfree_bd;
+ }
+
+
+ bd->bridge_addr = RAW_NODE_SWIN_BASE(nasid, widget);
+ bd->intr_addr = BIT_ULL(47) + 0x01800000 + PI_INT_PEND_MOD;
+ bd->nasid = nasid;
+ bd->masterwid = masterwid;
+
+ bd->mem.name = "Bridge PCI MEM";
+ bd->mem.start = offset + (widget << SWIN_SIZE_BITS) + BRIDGE_DEVIO0;
+ bd->mem.end = offset + (widget << SWIN_SIZE_BITS) + SWIN_SIZE - 1;
+ bd->mem.flags = IORESOURCE_MEM;
+ bd->mem_offset = offset;
+
+ bd->io.name = "Bridge PCI IO";
+ bd->io.start = offset + (widget << SWIN_SIZE_BITS) + BRIDGE_DEVIO0;
+ bd->io.end = offset + (widget << SWIN_SIZE_BITS) + SWIN_SIZE - 1;
+ bd->io.flags = IORESOURCE_IO;
+ bd->io_offset = offset;
+
+ if (platform_device_add_data(pdev_bd, bd, sizeof(*bd))) {
+ pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget);
+ goto err_put_pdev_bd;
+ }
+ if (platform_device_add(pdev_bd)) {
+ pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget);
+ goto err_put_pdev_bd;
+ }
+ /* platform_device_add_data() duplicates the data */
+ kfree(bd);
+ pr_info("xtalk:n%d/%x bridge widget\n", nasid, widget);
+ return;
+
+err_put_pdev_bd:
+ platform_device_put(pdev_bd);
+err_kfree_bd:
+ kfree(bd);
+err_unregister_pdev_wd:
+ platform_device_unregister(pdev_wd);
+ return;
+err_put_pdev_wd:
+ platform_device_put(pdev_wd);
+err_kfree_wd:
+ kfree(wd);
+ return;
+}
+
+static int probe_one_port(nasid_t nasid, int widget, int masterwid)
+{
+ widgetreg_t widget_id;
+ xwidget_part_num_t partnum;
+
+ widget_id = *(volatile widgetreg_t *)
+ (RAW_NODE_SWIN_BASE(nasid, widget) + WIDGET_ID);
+ partnum = XWIDGET_PART_NUM(widget_id);
+
+ switch (partnum) {
+ case BRIDGE_WIDGET_PART_NUM:
+ case XBRIDGE_WIDGET_PART_NUM:
+ bridge_platform_create(nasid, widget, masterwid);
+ break;
+ default:
+ pr_info("xtalk:n%d/%d unknown widget (0x%x)\n",
+ nasid, widget, partnum);
+ break;
+ }
+
+ return 0;
+}
+
+static int xbow_probe(nasid_t nasid)
+{
+ lboard_t *brd;
+ klxbow_t *xbow_p;
+ unsigned masterwid, i;
+
+ /*
+ * found xbow, so may have multiple bridges
+ * need to probe xbow
+ */
+ brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_MIDPLANE8);
+ if (!brd)
+ return -ENODEV;
+
+ xbow_p = (klxbow_t *)find_component(brd, NULL, KLSTRUCT_XBOW);
+ if (!xbow_p)
+ return -ENODEV;
+
+ /*
+ * Okay, here's a xbow. Let's arbitrate and find
+ * out if we should initialize it. Set enabled
+ * hub connected at highest or lowest widget as
+ * master.
+ */
+#ifdef WIDGET_A
+ i = HUB_WIDGET_ID_MAX + 1;
+ do {
+ i--;
+ } while ((!XBOW_PORT_TYPE_HUB(xbow_p, i)) ||
+ (!XBOW_PORT_IS_ENABLED(xbow_p, i)));
+#else
+ i = HUB_WIDGET_ID_MIN - 1;
+ do {
+ i++;
+ } while ((!XBOW_PORT_TYPE_HUB(xbow_p, i)) ||
+ (!XBOW_PORT_IS_ENABLED(xbow_p, i)));
+#endif
+
+ masterwid = i;
+ if (nasid != XBOW_PORT_NASID(xbow_p, i))
+ return 1;
+
+ for (i = HUB_WIDGET_ID_MIN; i <= HUB_WIDGET_ID_MAX; i++) {
+ if (XBOW_PORT_IS_ENABLED(xbow_p, i) &&
+ XBOW_PORT_TYPE_IO(xbow_p, i))
+ probe_one_port(nasid, i, masterwid);
+ }
+
+ return 0;
+}
+
+static void xtalk_probe_node(nasid_t nasid)
+{
+ volatile u64 hubreg;
+ xwidget_part_num_t partnum;
+ widgetreg_t widget_id;
+
+ hubreg = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
+
+ /* check whether the link is up */
+ if (!(hubreg & IIO_LLP_CSR_IS_UP))
+ return;
+
+ widget_id = *(volatile widgetreg_t *)
+ (RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
+ partnum = XWIDGET_PART_NUM(widget_id);
+
+ switch (partnum) {
+ case BRIDGE_WIDGET_PART_NUM:
+ bridge_platform_create(nasid, 0x8, 0xa);
+ break;
+ case XBOW_WIDGET_PART_NUM:
+ case XXBOW_WIDGET_PART_NUM:
+ pr_info("xtalk:n%d/0 xbow widget\n", nasid);
+ xbow_probe(nasid);
+ break;
+ default:
+ pr_info("xtalk:n%d/0 unknown widget (0x%x)\n", nasid, partnum);
+ break;
+ }
+}
+
+static int __init xtalk_init(void)
+{
+ nasid_t nasid;
+
+ for_each_online_node(nasid)
+ xtalk_probe_node(nasid);
+
+ return 0;
+}
+arch_initcall(xtalk_init);
diff --git a/arch/mips/sgi-ip30/Makefile b/arch/mips/sgi-ip30/Makefile
new file mode 100644
index 000000000..18cf561b3
--- /dev/null
+++ b/arch/mips/sgi-ip30/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the IP30 specific kernel interface routines under Linux.
+#
+
+obj-y := ip30-irq.o ip30-power.o ip30-setup.o ip30-timer.o ip30-xtalk.o
+
+obj-$(CONFIG_EARLY_PRINTK) += ip30-console.o
+obj-$(CONFIG_SMP) += ip30-smp.o
diff --git a/arch/mips/sgi-ip30/Platform b/arch/mips/sgi-ip30/Platform
new file mode 100644
index 000000000..f6f11517e
--- /dev/null
+++ b/arch/mips/sgi-ip30/Platform
@@ -0,0 +1,5 @@
+#
+# SGI-IP30 (Octane/Octane2)
+#
+cflags-$(CONFIG_SGI_IP30) += -I$(srctree)/arch/mips/include/asm/mach-ip30
+load-$(CONFIG_SGI_IP30) += 0xa800000020004000
diff --git a/arch/mips/sgi-ip30/ip30-common.h b/arch/mips/sgi-ip30/ip30-common.h
new file mode 100644
index 000000000..7b5db24b6
--- /dev/null
+++ b/arch/mips/sgi-ip30/ip30-common.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __IP30_COMMON_H
+#define __IP30_COMMON_H
+
+/*
+ * Power Switch is wired via BaseIO BRIDGE slot #6.
+ *
+ * ACFail is wired via BaseIO BRIDGE slot #7.
+ */
+#define IP30_POWER_IRQ HEART_L2_INT_POWER_BTN
+
+#define IP30_HEART_L0_IRQ (MIPS_CPU_IRQ_BASE + 2)
+#define IP30_HEART_L1_IRQ (MIPS_CPU_IRQ_BASE + 3)
+#define IP30_HEART_L2_IRQ (MIPS_CPU_IRQ_BASE + 4)
+#define IP30_HEART_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 5)
+#define IP30_HEART_ERR_IRQ (MIPS_CPU_IRQ_BASE + 6)
+
+extern void __init ip30_install_ipi(void);
+extern struct plat_smp_ops ip30_smp_ops;
+extern void __init ip30_per_cpu_init(void);
+
+#endif /* __IP30_COMMON_H */
diff --git a/arch/mips/sgi-ip30/ip30-console.c b/arch/mips/sgi-ip30/ip30-console.c
new file mode 100644
index 000000000..b91f8c4fd
--- /dev/null
+++ b/arch/mips/sgi-ip30/ip30-console.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/io.h>
+
+#include <asm/sn/ioc3.h>
+
+static inline struct ioc3_uartregs *console_uart(void)
+{
+ struct ioc3 *ioc3;
+
+ ioc3 = (struct ioc3 *)((void *)(0x900000001f600000));
+ return &ioc3->sregs.uarta;
+}
+
+void prom_putchar(char c)
+{
+ struct ioc3_uartregs *uart = console_uart();
+
+ while ((readb(&uart->iu_lsr) & 0x20) == 0)
+ cpu_relax();
+
+ writeb(c, &uart->iu_thr);
+}
diff --git a/arch/mips/sgi-ip30/ip30-irq.c b/arch/mips/sgi-ip30/ip30-irq.c
new file mode 100644
index 000000000..e8374e4c7
--- /dev/null
+++ b/arch/mips/sgi-ip30/ip30-irq.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip30-irq.c: Highlevel interrupt handling for IP30 architecture.
+ */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
+#include <linux/tick.h>
+#include <linux/types.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/sgi/heart.h>
+
+#include "ip30-common.h"
+
+struct heart_irq_data {
+ u64 *irq_mask;
+ int cpu;
+};
+
+static DECLARE_BITMAP(heart_irq_map, HEART_NUM_IRQS);
+
+static DEFINE_PER_CPU(unsigned long, irq_enable_mask);
+
+static inline int heart_alloc_int(void)
+{
+ int bit;
+
+again:
+ bit = find_first_zero_bit(heart_irq_map, HEART_NUM_IRQS);
+ if (bit >= HEART_NUM_IRQS)
+ return -ENOSPC;
+
+ if (test_and_set_bit(bit, heart_irq_map))
+ goto again;
+
+ return bit;
+}
+
+static void ip30_error_irq(struct irq_desc *desc)
+{
+ u64 pending, mask, cause, error_irqs, err_reg;
+ int cpu = smp_processor_id();
+ int i;
+
+ pending = heart_read(&heart_regs->isr);
+ mask = heart_read(&heart_regs->imr[cpu]);
+ cause = heart_read(&heart_regs->cause);
+ error_irqs = (pending & HEART_L4_INT_MASK & mask);
+
+ /* Bail if there's nothing to process (how did we get here, then?) */
+ if (unlikely(!error_irqs))
+ return;
+
+ /* Prevent any of the error IRQs from firing again. */
+ heart_write(mask & ~(pending), &heart_regs->imr[cpu]);
+
+ /* Ack all error IRQs. */
+ heart_write(HEART_L4_INT_MASK, &heart_regs->clear_isr);
+
+ /*
+ * If we also have a cause value, then something happened, so loop
+ * through the error IRQs and report a "heart attack" for each one
+ * and print the value of the HEART cause register. This is really
+ * primitive right now, but it should hopefully work until a more
+ * robust error handling routine can be put together.
+ *
+ * Refer to heart.h for the HC_* macros to work out the cause
+ * that got us here.
+ */
+ if (cause) {
+ pr_alert("IP30: CPU%d: HEART ATTACK! ISR = 0x%.16llx, IMR = 0x%.16llx, CAUSE = 0x%.16llx\n",
+ cpu, pending, mask, cause);
+
+ if (cause & HC_COR_MEM_ERR) {
+ err_reg = heart_read(&heart_regs->mem_err_addr);
+ pr_alert(" HEART_MEMERR_ADDR = 0x%.16llx\n", err_reg);
+ }
+
+ /* i = 63; i >= 51; i-- */
+ for (i = HEART_ERR_MASK_END; i >= HEART_ERR_MASK_START; i--)
+ if ((pending >> i) & 1)
+ pr_alert(" HEART Error IRQ #%d\n", i);
+
+ /* XXX: Seems possible to loop forever here, so panic(). */
+ panic("IP30: Fatal Error !\n");
+ }
+
+ /* Unmask the error IRQs. */
+ heart_write(mask, &heart_regs->imr[cpu]);
+}
+
+static void ip30_normal_irq(struct irq_desc *desc)
+{
+ int cpu = smp_processor_id();
+ struct irq_domain *domain;
+ u64 pend, mask;
+ int irq;
+
+ pend = heart_read(&heart_regs->isr);
+ mask = (heart_read(&heart_regs->imr[cpu]) &
+ (HEART_L0_INT_MASK | HEART_L1_INT_MASK | HEART_L2_INT_MASK));
+
+ pend &= mask;
+ if (unlikely(!pend))
+ return;
+
+#ifdef CONFIG_SMP
+ if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_0)) {
+ heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0),
+ &heart_regs->clear_isr);
+ scheduler_ipi();
+ } else if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_1)) {
+ heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_1),
+ &heart_regs->clear_isr);
+ scheduler_ipi();
+ } else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_0)) {
+ heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0),
+ &heart_regs->clear_isr);
+ generic_smp_call_function_interrupt();
+ } else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_1)) {
+ heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_1),
+ &heart_regs->clear_isr);
+ generic_smp_call_function_interrupt();
+ } else
+#endif
+ {
+ domain = irq_desc_get_handler_data(desc);
+ irq = irq_linear_revmap(domain, __ffs(pend));
+ if (irq)
+ generic_handle_irq(irq);
+ else
+ spurious_interrupt();
+ }
+}
+
+static void ip30_ack_heart_irq(struct irq_data *d)
+{
+ heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr);
+}
+
+static void ip30_mask_heart_irq(struct irq_data *d)
+{
+ struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
+ unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
+
+ clear_bit(d->hwirq, mask);
+ heart_write(*mask, &heart_regs->imr[hd->cpu]);
+}
+
+static void ip30_mask_and_ack_heart_irq(struct irq_data *d)
+{
+ struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
+ unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
+
+ clear_bit(d->hwirq, mask);
+ heart_write(*mask, &heart_regs->imr[hd->cpu]);
+ heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr);
+}
+
+static void ip30_unmask_heart_irq(struct irq_data *d)
+{
+ struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
+ unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
+
+ set_bit(d->hwirq, mask);
+ heart_write(*mask, &heart_regs->imr[hd->cpu]);
+}
+
+static int ip30_set_heart_irq_affinity(struct irq_data *d,
+ const struct cpumask *mask, bool force)
+{
+ struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
+
+ if (!hd)
+ return -EINVAL;
+
+ if (irqd_is_started(d))
+ ip30_mask_and_ack_heart_irq(d);
+
+ hd->cpu = cpumask_first_and(mask, cpu_online_mask);
+
+ if (irqd_is_started(d))
+ ip30_unmask_heart_irq(d);
+
+ irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
+
+ return 0;
+}
+
+static struct irq_chip heart_irq_chip = {
+ .name = "HEART",
+ .irq_ack = ip30_ack_heart_irq,
+ .irq_mask = ip30_mask_heart_irq,
+ .irq_mask_ack = ip30_mask_and_ack_heart_irq,
+ .irq_unmask = ip30_unmask_heart_irq,
+ .irq_set_affinity = ip30_set_heart_irq_affinity,
+};
+
+static int heart_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct irq_alloc_info *info = arg;
+ struct heart_irq_data *hd;
+ int hwirq;
+
+ if (nr_irqs > 1 || !info)
+ return -EINVAL;
+
+ hd = kzalloc(sizeof(*hd), GFP_KERNEL);
+ if (!hd)
+ return -ENOMEM;
+
+ hwirq = heart_alloc_int();
+ if (hwirq < 0) {
+ kfree(hd);
+ return -EAGAIN;
+ }
+ irq_domain_set_info(domain, virq, hwirq, &heart_irq_chip, hd,
+ handle_level_irq, NULL, NULL);
+
+ return 0;
+}
+
+static void heart_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *irqd;
+
+ if (nr_irqs > 1)
+ return;
+
+ irqd = irq_domain_get_irq_data(domain, virq);
+ if (irqd) {
+ clear_bit(irqd->hwirq, heart_irq_map);
+ kfree(irqd->chip_data);
+ }
+}
+
+static const struct irq_domain_ops heart_domain_ops = {
+ .alloc = heart_domain_alloc,
+ .free = heart_domain_free,
+};
+
+void __init ip30_install_ipi(void)
+{
+ int cpu = smp_processor_id();
+ unsigned long *mask = &per_cpu(irq_enable_mask, cpu);
+
+ set_bit(HEART_L2_INT_RESCHED_CPU_0 + cpu, mask);
+ heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0 + cpu),
+ &heart_regs->clear_isr);
+ set_bit(HEART_L2_INT_CALL_CPU_0 + cpu, mask);
+ heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0 + cpu),
+ &heart_regs->clear_isr);
+
+ heart_write(*mask, &heart_regs->imr[cpu]);
+}
+
+void __init arch_init_irq(void)
+{
+ struct irq_domain *domain;
+ struct fwnode_handle *fn;
+ unsigned long *mask;
+ int i;
+
+ mips_cpu_irq_init();
+
+ /* Mask all IRQs. */
+ heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[0]);
+ heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[1]);
+ heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[2]);
+ heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[3]);
+
+ /* Ack everything. */
+ heart_write(HEART_ACK_ALL_MASK, &heart_regs->clear_isr);
+
+ /* Enable specific HEART error IRQs for each CPU. */
+ mask = &per_cpu(irq_enable_mask, 0);
+ *mask |= HEART_CPU0_ERR_MASK;
+ heart_write(*mask, &heart_regs->imr[0]);
+ mask = &per_cpu(irq_enable_mask, 1);
+ *mask |= HEART_CPU1_ERR_MASK;
+ heart_write(*mask, &heart_regs->imr[1]);
+
+ /*
+ * Some HEART bits are reserved by hardware or by software convention.
+ * Mark these as reserved right away so they won't be accidentally
+ * used later.
+ */
+ set_bit(HEART_L0_INT_GENERIC, heart_irq_map);
+ set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_0, heart_irq_map);
+ set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_1, heart_irq_map);
+ set_bit(HEART_L2_INT_RESCHED_CPU_0, heart_irq_map);
+ set_bit(HEART_L2_INT_RESCHED_CPU_1, heart_irq_map);
+ set_bit(HEART_L2_INT_CALL_CPU_0, heart_irq_map);
+ set_bit(HEART_L2_INT_CALL_CPU_1, heart_irq_map);
+ set_bit(HEART_L3_INT_TIMER, heart_irq_map);
+
+ /* Reserve the error interrupts (#51 to #63). */
+ for (i = HEART_L4_INT_XWID_ERR_9; i <= HEART_L4_INT_HEART_EXCP; i++)
+ set_bit(i, heart_irq_map);
+
+ fn = irq_domain_alloc_named_fwnode("HEART");
+ WARN_ON(fn == NULL);
+ if (!fn)
+ return;
+ domain = irq_domain_create_linear(fn, HEART_NUM_IRQS,
+ &heart_domain_ops, NULL);
+ WARN_ON(domain == NULL);
+ if (!domain)
+ return;
+
+ irq_set_default_host(domain);
+
+ irq_set_percpu_devid(IP30_HEART_L0_IRQ);
+ irq_set_chained_handler_and_data(IP30_HEART_L0_IRQ, ip30_normal_irq,
+ domain);
+ irq_set_percpu_devid(IP30_HEART_L1_IRQ);
+ irq_set_chained_handler_and_data(IP30_HEART_L1_IRQ, ip30_normal_irq,
+ domain);
+ irq_set_percpu_devid(IP30_HEART_L2_IRQ);
+ irq_set_chained_handler_and_data(IP30_HEART_L2_IRQ, ip30_normal_irq,
+ domain);
+ irq_set_percpu_devid(IP30_HEART_ERR_IRQ);
+ irq_set_chained_handler_and_data(IP30_HEART_ERR_IRQ, ip30_error_irq,
+ domain);
+}
diff --git a/arch/mips/sgi-ip30/ip30-power.c b/arch/mips/sgi-ip30/ip30-power.c
new file mode 100644
index 000000000..120b3f3d5
--- /dev/null
+++ b/arch/mips/sgi-ip30/ip30-power.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip30-power.c: Software powerdown and reset handling for IP30 architecture.
+ *
+ * Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
+ * 2014 Joshua Kinard <kumba@gentoo.org>
+ * 2009 Johannes Dickgreber <tanzy@gmx.de>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/notifier.h>
+#include <linux/delay.h>
+#include <linux/rtc/ds1685.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h>
+
+#include <asm/reboot.h>
+#include <asm/sgi/heart.h>
+
+static void __noreturn ip30_machine_restart(char *cmd)
+{
+ /*
+ * Execute HEART cold reset
+ * Yes, it's cold-HEARTed!
+ */
+ heart_write((heart_read(&heart_regs->mode) | HM_COLD_RST),
+ &heart_regs->mode);
+ unreachable();
+}
+
+static int __init ip30_reboot_setup(void)
+{
+ _machine_restart = ip30_machine_restart;
+
+ return 0;
+}
+
+subsys_initcall(ip30_reboot_setup);
diff --git a/arch/mips/sgi-ip30/ip30-setup.c b/arch/mips/sgi-ip30/ip30-setup.c
new file mode 100644
index 000000000..44b1607e9
--- /dev/null
+++ b/arch/mips/sgi-ip30/ip30-setup.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SGI IP30 miscellaneous setup bits.
+ *
+ * Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
+ * 2007 Joshua Kinard <kumba@gentoo.org>
+ * 2009 Johannes Dickgreber <tanzy@gmx.de>
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <linux/memblock.h>
+
+#include <asm/smp-ops.h>
+#include <asm/sgialib.h>
+#include <asm/time.h>
+#include <asm/sgi/heart.h>
+
+#include "ip30-common.h"
+
+/* Structure of accessible HEART registers located in XKPHYS space. */
+struct ip30_heart_regs __iomem *heart_regs = HEART_XKPHYS_BASE;
+
+/*
+ * ARCS will report up to the first 1GB of
+ * memory if queried. Anything beyond that
+ * is marked as reserved.
+ */
+#define IP30_MAX_PROM_MEMORY _AC(0x40000000, UL)
+
+/*
+ * Memory in the Octane starts at 512MB
+ */
+#define IP30_MEMORY_BASE _AC(0x20000000, UL)
+
+/*
+ * If using ARCS to probe for memory, then
+ * remaining memory will start at this offset.
+ */
+#define IP30_REAL_MEMORY_START (IP30_MEMORY_BASE + IP30_MAX_PROM_MEMORY)
+
+#define MEM_SHIFT(x) ((x) >> 20)
+
+static void __init ip30_mem_init(void)
+{
+ unsigned long total_mem;
+ phys_addr_t addr;
+ phys_addr_t size;
+ u32 memcfg;
+ int i;
+
+ total_mem = 0;
+ for (i = 0; i < HEART_MEMORY_BANKS; i++) {
+ memcfg = __raw_readl(&heart_regs->mem_cfg.l[i]);
+ if (!(memcfg & HEART_MEMCFG_VALID))
+ continue;
+
+ addr = memcfg & HEART_MEMCFG_ADDR_MASK;
+ addr <<= HEART_MEMCFG_UNIT_SHIFT;
+ addr += IP30_MEMORY_BASE;
+ size = memcfg & HEART_MEMCFG_SIZE_MASK;
+ size >>= HEART_MEMCFG_SIZE_SHIFT;
+ size += 1;
+ size <<= HEART_MEMCFG_UNIT_SHIFT;
+
+ total_mem += size;
+
+ if (addr >= IP30_REAL_MEMORY_START)
+ memblock_free(addr, size);
+ else if ((addr + size) > IP30_REAL_MEMORY_START)
+ memblock_free(IP30_REAL_MEMORY_START,
+ size - IP30_MAX_PROM_MEMORY);
+ }
+ pr_info("Detected %luMB of physical memory.\n", MEM_SHIFT(total_mem));
+}
+
+/**
+ * ip30_cpu_time_init - platform time initialization.
+ */
+static void __init ip30_cpu_time_init(void)
+{
+ int cpu = smp_processor_id();
+ u64 heart_compare;
+ unsigned int start, end;
+ int time_diff;
+
+ heart_compare = (heart_read(&heart_regs->count) +
+ (HEART_CYCLES_PER_SEC / 10));
+ start = read_c0_count();
+ while ((heart_read(&heart_regs->count) - heart_compare) & 0x800000)
+ cpu_relax();
+
+ end = read_c0_count();
+ time_diff = (int)end - (int)start;
+ mips_hpt_frequency = time_diff * 10;
+ pr_info("IP30: CPU%d: %d MHz CPU detected.\n", cpu,
+ (mips_hpt_frequency * 2) / 1000000);
+}
+
+void __init ip30_per_cpu_init(void)
+{
+ /* Disable all interrupts. */
+ clear_c0_status(ST0_IM);
+
+ ip30_cpu_time_init();
+#ifdef CONFIG_SMP
+ ip30_install_ipi();
+#endif
+
+ enable_percpu_irq(IP30_HEART_L0_IRQ, IRQ_TYPE_NONE);
+ enable_percpu_irq(IP30_HEART_L1_IRQ, IRQ_TYPE_NONE);
+ enable_percpu_irq(IP30_HEART_L2_IRQ, IRQ_TYPE_NONE);
+ enable_percpu_irq(IP30_HEART_ERR_IRQ, IRQ_TYPE_NONE);
+}
+
+/**
+ * plat_mem_setup - despite the name, misc setup happens here.
+ */
+void __init plat_mem_setup(void)
+{
+ ip30_mem_init();
+
+ /* XXX: Hard lock on /sbin/init if this flag isn't specified. */
+ prom_flags |= PROM_FLAG_DONT_FREE_TEMP;
+
+#ifdef CONFIG_SMP
+ register_smp_ops(&ip30_smp_ops);
+#else
+ ip30_per_cpu_init();
+#endif
+
+ ioport_resource.start = 0;
+ ioport_resource.end = ~0UL;
+ set_io_port_base(IO_BASE);
+}
diff --git a/arch/mips/sgi-ip30/ip30-smp.c b/arch/mips/sgi-ip30/ip30-smp.c
new file mode 100644
index 000000000..4bfe65460
--- /dev/null
+++ b/arch/mips/sgi-ip30/ip30-smp.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip30-smp.c: SMP on IP30 architecture.
+ * Based off of the original IP30 SMP code, with inspiration from ip27-smp.c
+ * and smp-bmips.c.
+ *
+ * Copyright (C) 2005-2007 Stanislaw Skowronek <skylark@unaligned.org>
+ * 2006-2007, 2014-2015 Joshua Kinard <kumba@gentoo.org>
+ * 2009 Johannes Dickgreber <tanzy@gmx.de>
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/time.h>
+#include <asm/sgi/heart.h>
+
+#include "ip30-common.h"
+
+#define MPCONF_MAGIC 0xbaddeed2
+#define MPCONF_ADDR 0xa800000000000600L
+#define MPCONF_SIZE 0x80
+#define MPCONF(x) (MPCONF_ADDR + (x) * MPCONF_SIZE)
+
+/* HEART can theoretically do 4 CPUs, but only 2 are physically possible */
+#define MP_NCPU 2
+
+struct mpconf {
+ u32 magic;
+ u32 prid;
+ u32 physid;
+ u32 virtid;
+ u32 scachesz;
+ u16 fanloads;
+ u16 res;
+ void *launch;
+ void *rendezvous;
+ u64 res2[3];
+ void *stackaddr;
+ void *lnch_parm;
+ void *rndv_parm;
+ u32 idleflag;
+};
+
+static void ip30_smp_send_ipi_single(int cpu, u32 action)
+{
+ int irq;
+
+ switch (action) {
+ case SMP_RESCHEDULE_YOURSELF:
+ irq = HEART_L2_INT_RESCHED_CPU_0;
+ break;
+ case SMP_CALL_FUNCTION:
+ irq = HEART_L2_INT_CALL_CPU_0;
+ break;
+ default:
+ panic("IP30: Unknown action value in %s!\n", __func__);
+ }
+
+ irq += cpu;
+
+ /* Poke the other CPU -- it's got mail! */
+ heart_write(BIT_ULL(irq), &heart_regs->set_isr);
+}
+
+static void ip30_smp_send_ipi_mask(const struct cpumask *mask, u32 action)
+{
+ u32 i;
+
+ for_each_cpu(i, mask)
+ ip30_smp_send_ipi_single(i, action);
+}
+
+static void __init ip30_smp_setup(void)
+{
+ int i;
+ int ncpu = 0;
+ struct mpconf *mpc;
+
+ init_cpu_possible(cpumask_of(0));
+
+ /* Scan the MPCONF structure and enumerate available CPUs. */
+ for (i = 0; i < MP_NCPU; i++) {
+ mpc = (struct mpconf *)MPCONF(i);
+ if (mpc->magic == MPCONF_MAGIC) {
+ set_cpu_possible(i, true);
+ __cpu_number_map[i] = ++ncpu;
+ __cpu_logical_map[ncpu] = i;
+ pr_info("IP30: Slot: %d, PrID: %.8x, PhyID: %d, VirtID: %d\n",
+ i, mpc->prid, mpc->physid, mpc->virtid);
+ }
+ }
+ pr_info("IP30: Detected %d CPU(s) present.\n", ncpu);
+
+ /*
+ * Set the coherency algorithm to '5' (cacheable coherent
+ * exclusive on write). This is needed on IP30 SMP, especially
+ * for R14000 CPUs, otherwise, instruction bus errors will
+ * occur upon reaching userland.
+ */
+ change_c0_config(CONF_CM_CMASK, CONF_CM_CACHABLE_COW);
+}
+
+static void __init ip30_smp_prepare_cpus(unsigned int max_cpus)
+{
+ /* nothing to do here */
+}
+
+static int __init ip30_smp_boot_secondary(int cpu, struct task_struct *idle)
+{
+ struct mpconf *mpc = (struct mpconf *)MPCONF(cpu);
+
+ /* Stack pointer (sp). */
+ mpc->stackaddr = (void *)__KSTK_TOS(idle);
+
+ /* Global pointer (gp). */
+ mpc->lnch_parm = task_thread_info(idle);
+
+ mb(); /* make sure stack and lparm are written */
+
+ /* Boot CPUx. */
+ mpc->launch = smp_bootstrap;
+
+ /* CPUx now executes smp_bootstrap, then ip30_smp_finish */
+ return 0;
+}
+
+static void __init ip30_smp_init_cpu(void)
+{
+ ip30_per_cpu_init();
+}
+
+static void __init ip30_smp_finish(void)
+{
+ enable_percpu_irq(get_c0_compare_int(), IRQ_TYPE_NONE);
+ local_irq_enable();
+}
+
+struct plat_smp_ops __read_mostly ip30_smp_ops = {
+ .send_ipi_single = ip30_smp_send_ipi_single,
+ .send_ipi_mask = ip30_smp_send_ipi_mask,
+ .smp_setup = ip30_smp_setup,
+ .prepare_cpus = ip30_smp_prepare_cpus,
+ .boot_secondary = ip30_smp_boot_secondary,
+ .init_secondary = ip30_smp_init_cpu,
+ .smp_finish = ip30_smp_finish,
+ .prepare_boot_cpu = ip30_smp_init_cpu,
+};
diff --git a/arch/mips/sgi-ip30/ip30-timer.c b/arch/mips/sgi-ip30/ip30-timer.c
new file mode 100644
index 000000000..d13e10547
--- /dev/null
+++ b/arch/mips/sgi-ip30/ip30-timer.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip30-timer.c: Clocksource/clockevent support for the
+ * HEART chip in SGI Octane (IP30) systems.
+ *
+ * Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
+ * Copyright (C) 2009 Johannes Dickgreber <tanzy@gmx.de>
+ * Copyright (C) 2011 Joshua Kinard <kumba@gentoo.org>
+ */
+
+#include <linux/clocksource.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/sched_clock.h>
+
+#include <asm/time.h>
+#include <asm/cevt-r4k.h>
+#include <asm/sgi/heart.h>
+
+static u64 ip30_heart_counter_read(struct clocksource *cs)
+{
+ return heart_read(&heart_regs->count);
+}
+
+struct clocksource ip30_heart_clocksource = {
+ .name = "HEART",
+ .rating = 400,
+ .read = ip30_heart_counter_read,
+ .mask = CLOCKSOURCE_MASK(52),
+ .flags = (CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_VALID_FOR_HRES),
+};
+
+static u64 notrace ip30_heart_read_sched_clock(void)
+{
+ return heart_read(&heart_regs->count);
+}
+
+static void __init ip30_heart_clocksource_init(void)
+{
+ struct clocksource *cs = &ip30_heart_clocksource;
+
+ clocksource_register_hz(cs, HEART_CYCLES_PER_SEC);
+
+ sched_clock_register(ip30_heart_read_sched_clock, 52,
+ HEART_CYCLES_PER_SEC);
+}
+
+void __init plat_time_init(void)
+{
+ int irq = get_c0_compare_int();
+
+ cp0_timer_irq_installed = 1;
+ c0_compare_irqaction.percpu_dev_id = &mips_clockevent_device;
+ c0_compare_irqaction.flags &= ~IRQF_SHARED;
+ irq_set_handler(irq, handle_percpu_devid_irq);
+ irq_set_percpu_devid(irq);
+ setup_percpu_irq(irq, &c0_compare_irqaction);
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
+
+ ip30_heart_clocksource_init();
+}
diff --git a/arch/mips/sgi-ip30/ip30-xtalk.c b/arch/mips/sgi-ip30/ip30-xtalk.c
new file mode 100644
index 000000000..8a2894645
--- /dev/null
+++ b/arch/mips/sgi-ip30/ip30-xtalk.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ip30-xtalk.c - Very basic Crosstalk (XIO) detection support.
+ * Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
+ * Copyright (C) 2009 Johannes Dickgreber <tanzy@gmx.de>
+ * Copyright (C) 2007, 2014-2016 Joshua Kinard <kumba@gentoo.org>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/sgi-w1.h>
+#include <linux/platform_data/xtalk-bridge.h>
+
+#include <asm/xtalk/xwidget.h>
+#include <asm/pci/bridge.h>
+
+#define IP30_SWIN_BASE(widget) \
+ (0x0000000010000000 | (((unsigned long)(widget)) << 24))
+
+#define IP30_RAW_SWIN_BASE(widget) (IO_BASE + IP30_SWIN_BASE(widget))
+
+#define IP30_SWIN_SIZE (1 << 24)
+
+#define IP30_WIDGET_XBOW _AC(0x0, UL) /* XBow is always 0 */
+#define IP30_WIDGET_HEART _AC(0x8, UL) /* HEART is always 8 */
+#define IP30_WIDGET_PCI_BASE _AC(0xf, UL) /* BaseIO PCI is always 15 */
+
+#define XTALK_NODEV 0xffffffff
+
+#define XBOW_REG_LINK_STAT_0 0x114
+#define XBOW_REG_LINK_BLK_SIZE 0x40
+#define XBOW_REG_LINK_ALIVE 0x80000000
+
+#define HEART_INTR_ADDR 0x00000080
+
+#define xtalk_read __raw_readl
+
+static void bridge_platform_create(int widget, int masterwid)
+{
+ struct xtalk_bridge_platform_data *bd;
+ struct sgi_w1_platform_data *wd;
+ struct platform_device *pdev;
+ struct resource w1_res;
+
+ wd = kzalloc(sizeof(*wd), GFP_KERNEL);
+ if (!wd)
+ goto no_mem;
+
+ snprintf(wd->dev_id, sizeof(wd->dev_id), "bridge-%012lx",
+ IP30_SWIN_BASE(widget));
+
+ memset(&w1_res, 0, sizeof(w1_res));
+ w1_res.start = IP30_SWIN_BASE(widget) +
+ offsetof(struct bridge_regs, b_nic);
+ w1_res.end = w1_res.start + 3;
+ w1_res.flags = IORESOURCE_MEM;
+
+ pdev = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO);
+ if (!pdev) {
+ kfree(wd);
+ goto no_mem;
+ }
+ platform_device_add_resources(pdev, &w1_res, 1);
+ platform_device_add_data(pdev, wd, sizeof(*wd));
+ platform_device_add(pdev);
+
+ bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+ if (!bd)
+ goto no_mem;
+ pdev = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
+ if (!pdev) {
+ kfree(bd);
+ goto no_mem;
+ }
+
+ bd->bridge_addr = IP30_RAW_SWIN_BASE(widget);
+ bd->intr_addr = HEART_INTR_ADDR;
+ bd->nasid = 0;
+ bd->masterwid = masterwid;
+
+ bd->mem.name = "Bridge PCI MEM";
+ bd->mem.start = IP30_SWIN_BASE(widget) + BRIDGE_DEVIO0;
+ bd->mem.end = IP30_SWIN_BASE(widget) + IP30_SWIN_SIZE - 1;
+ bd->mem.flags = IORESOURCE_MEM;
+ bd->mem_offset = IP30_SWIN_BASE(widget);
+
+ bd->io.name = "Bridge PCI IO";
+ bd->io.start = IP30_SWIN_BASE(widget) + BRIDGE_DEVIO0;
+ bd->io.end = IP30_SWIN_BASE(widget) + IP30_SWIN_SIZE - 1;
+ bd->io.flags = IORESOURCE_IO;
+ bd->io_offset = IP30_SWIN_BASE(widget);
+
+ platform_device_add_data(pdev, bd, sizeof(*bd));
+ platform_device_add(pdev);
+ pr_info("xtalk:%x bridge widget\n", widget);
+ return;
+
+no_mem:
+ pr_warn("xtalk:%x bridge create out of memory\n", widget);
+}
+
+static unsigned int __init xbow_widget_active(s8 wid)
+{
+ unsigned int link_stat;
+
+ link_stat = xtalk_read((void *)(IP30_RAW_SWIN_BASE(IP30_WIDGET_XBOW) +
+ XBOW_REG_LINK_STAT_0 +
+ XBOW_REG_LINK_BLK_SIZE *
+ (wid - 8)));
+
+ return (link_stat & XBOW_REG_LINK_ALIVE) ? 1 : 0;
+}
+
+static void __init xtalk_init_widget(s8 wid, s8 masterwid)
+{
+ xwidget_part_num_t partnum;
+ widgetreg_t widget_id;
+
+ if (!xbow_widget_active(wid))
+ return;
+
+ widget_id = xtalk_read((void *)(IP30_RAW_SWIN_BASE(wid) + WIDGET_ID));
+
+ partnum = XWIDGET_PART_NUM(widget_id);
+
+ switch (partnum) {
+ case BRIDGE_WIDGET_PART_NUM:
+ case XBRIDGE_WIDGET_PART_NUM:
+ bridge_platform_create(wid, masterwid);
+ break;
+ default:
+ pr_info("xtalk:%x unknown widget (0x%x)\n", wid, partnum);
+ break;
+ }
+}
+
+static int __init ip30_xtalk_init(void)
+{
+ int i;
+
+ /*
+ * Walk widget IDs backwards so that BaseIO is probed first. This
+ * ensures that the BaseIO IOC3 is always detected as eth0.
+ */
+ for (i = IP30_WIDGET_PCI_BASE; i > IP30_WIDGET_HEART; i--)
+ xtalk_init_widget(i, IP30_WIDGET_HEART);
+
+ return 0;
+}
+
+arch_initcall(ip30_xtalk_init);
diff --git a/arch/mips/sgi-ip32/Makefile b/arch/mips/sgi-ip32/Makefile
new file mode 100644
index 000000000..de0222466
--- /dev/null
+++ b/arch/mips/sgi-ip32/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the SGI specific kernel interface routines
+# under Linux.
+#
+
+obj-y += ip32-berr.o ip32-irq.o ip32-platform.o ip32-setup.o ip32-reset.o \
+ crime.o ip32-memory.o ip32-dma.o
diff --git a/arch/mips/sgi-ip32/Platform b/arch/mips/sgi-ip32/Platform
new file mode 100644
index 000000000..f58a7a02b
--- /dev/null
+++ b/arch/mips/sgi-ip32/Platform
@@ -0,0 +1,10 @@
+#
+# SGI-IP32 (O2)
+#
+# Set the load address to >= 80069000 if you want to leave space for symmon,
+# 0xffffffff80004000 for production kernels. Note that the value must be aligned to
+# a multiple of the kernel stack size or the handling of the current variable
+# will break.
+#
+cflags-$(CONFIG_SGI_IP32) += -I$(srctree)/arch/mips/include/asm/mach-ip32
+load-$(CONFIG_SGI_IP32) += 0xffffffff80004000
diff --git a/arch/mips/sgi-ip32/crime.c b/arch/mips/sgi-ip32/crime.c
new file mode 100644
index 000000000..a8e0c776c
--- /dev/null
+++ b/arch/mips/sgi-ip32/crime.c
@@ -0,0 +1,103 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 2003 Keith M Wesolowski
+ * Copyright (C) 2005 Ilya A. Volynets <ilya@total-knowledge.com>
+ */
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/export.h>
+#include <asm/bootinfo.h>
+#include <asm/io.h>
+#include <asm/mipsregs.h>
+#include <asm/page.h>
+#include <asm/ip32/crime.h>
+#include <asm/ip32/mace.h>
+
+struct sgi_crime __iomem *crime;
+struct sgi_mace __iomem *mace;
+
+EXPORT_SYMBOL_GPL(mace);
+
+void __init crime_init(void)
+{
+ unsigned int id, rev;
+ const int field = 2 * sizeof(unsigned long);
+
+ set_io_port_base((unsigned long) ioremap(MACEPCI_LOW_IO, 0x2000000));
+ crime = ioremap(CRIME_BASE, sizeof(struct sgi_crime));
+ mace = ioremap(MACE_BASE, sizeof(struct sgi_mace));
+
+ id = crime->id;
+ rev = id & CRIME_ID_REV;
+ id = (id & CRIME_ID_IDBITS) >> 4;
+ printk(KERN_INFO "CRIME id %1x rev %d at 0x%0*lx\n",
+ id, rev, field, (unsigned long) CRIME_BASE);
+}
+
+irqreturn_t crime_memerr_intr(unsigned int irq, void *dev_id)
+{
+ unsigned long stat, addr;
+ int fatal = 0;
+
+ stat = crime->mem_error_stat & CRIME_MEM_ERROR_STAT_MASK;
+ addr = crime->mem_error_addr & CRIME_MEM_ERROR_ADDR_MASK;
+
+ printk("CRIME memory error at 0x%08lx ST 0x%08lx<", addr, stat);
+
+ if (stat & CRIME_MEM_ERROR_INV)
+ printk("INV,");
+ if (stat & CRIME_MEM_ERROR_ECC) {
+ unsigned long ecc_syn =
+ crime->mem_ecc_syn & CRIME_MEM_ERROR_ECC_SYN_MASK;
+ unsigned long ecc_gen =
+ crime->mem_ecc_chk & CRIME_MEM_ERROR_ECC_CHK_MASK;
+ printk("ECC,SYN=0x%08lx,GEN=0x%08lx,", ecc_syn, ecc_gen);
+ }
+ if (stat & CRIME_MEM_ERROR_MULTIPLE) {
+ fatal = 1;
+ printk("MULTIPLE,");
+ }
+ if (stat & CRIME_MEM_ERROR_HARD_ERR) {
+ fatal = 1;
+ printk("HARD,");
+ }
+ if (stat & CRIME_MEM_ERROR_SOFT_ERR)
+ printk("SOFT,");
+ if (stat & CRIME_MEM_ERROR_CPU_ACCESS)
+ printk("CPU,");
+ if (stat & CRIME_MEM_ERROR_VICE_ACCESS)
+ printk("VICE,");
+ if (stat & CRIME_MEM_ERROR_GBE_ACCESS)
+ printk("GBE,");
+ if (stat & CRIME_MEM_ERROR_RE_ACCESS)
+ printk("RE,REID=0x%02lx,", (stat & CRIME_MEM_ERROR_RE_ID)>>8);
+ if (stat & CRIME_MEM_ERROR_MACE_ACCESS)
+ printk("MACE,MACEID=0x%02lx,", stat & CRIME_MEM_ERROR_MACE_ID);
+
+ crime->mem_error_stat = 0;
+
+ if (fatal) {
+ printk("FATAL>\n");
+ panic("Fatal memory error.");
+ } else
+ printk("NONFATAL>\n");
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t crime_cpuerr_intr(unsigned int irq, void *dev_id)
+{
+ unsigned long stat = crime->cpu_error_stat & CRIME_CPU_ERROR_MASK;
+ unsigned long addr = crime->cpu_error_addr & CRIME_CPU_ERROR_ADDR_MASK;
+
+ addr <<= 2;
+ printk("CRIME CPU error at 0x%09lx status 0x%08lx\n", addr, stat);
+ crime->cpu_error_stat = 0;
+
+ return IRQ_HANDLED;
+}
diff --git a/arch/mips/sgi-ip32/ip32-berr.c b/arch/mips/sgi-ip32/ip32-berr.c
new file mode 100644
index 000000000..c860f95ab
--- /dev/null
+++ b/arch/mips/sgi-ip32/ip32-berr.c
@@ -0,0 +1,38 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1999, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000 by Silicon Graphics
+ * Copyright (C) 2002 Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
+#include <asm/traps.h>
+#include <linux/uaccess.h>
+#include <asm/addrspace.h>
+#include <asm/ptrace.h>
+#include <asm/tlbdebug.h>
+
+static int ip32_be_handler(struct pt_regs *regs, int is_fixup)
+{
+ int data = regs->cp0_cause & 4;
+
+ if (is_fixup)
+ return MIPS_BE_FIXUP;
+
+ printk("Got %cbe at 0x%lx\n", data ? 'd' : 'i', regs->cp0_epc);
+ show_regs(regs);
+ dump_tlb_all();
+ while(1);
+ force_sig(SIGBUS);
+}
+
+void __init ip32_be_init(void)
+{
+ board_be_handler = ip32_be_handler;
+}
diff --git a/arch/mips/sgi-ip32/ip32-dma.c b/arch/mips/sgi-ip32/ip32-dma.c
new file mode 100644
index 000000000..20c6da9d7
--- /dev/null
+++ b/arch/mips/sgi-ip32/ip32-dma.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/dma-direct.h>
+#include <asm/ip32/crime.h>
+
+/*
+ * Few notes.
+ * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
+ * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for
+ * native-endian)
+ * 3. All other devices see memory as one big chunk at 0x40000000
+ * 4. Non-PCI devices will pass NULL as struct device*
+ *
+ * Thus we translate differently, depending on device.
+ */
+
+#define RAM_OFFSET_MASK 0x3fffffffUL
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ dma_addr_t dma_addr = paddr & RAM_OFFSET_MASK;
+
+ if (!dev)
+ dma_addr += CRIME_HI_MEM_BASE;
+ return dma_addr;
+}
+
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ phys_addr_t paddr = dma_addr & RAM_OFFSET_MASK;
+
+ if (dma_addr >= 256*1024*1024)
+ paddr += CRIME_HI_MEM_BASE;
+ return paddr;
+}
diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c
new file mode 100644
index 000000000..1bbd5bfb5
--- /dev/null
+++ b/arch/mips/sgi-ip32/ip32-irq.c
@@ -0,0 +1,499 @@
+/*
+ * Code to handle IP32 IRQs
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Harald Koerfgen
+ * Copyright (C) 2001 Keith M Wesolowski
+ */
+#include <linux/init.h>
+#include <linux/kernel_stat.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/signal.h>
+#include <asm/time.h>
+#include <asm/ip32/crime.h>
+#include <asm/ip32/mace.h>
+#include <asm/ip32/ip32_ints.h>
+
+/* issue a PIO read to make sure no PIO writes are pending */
+static inline void flush_crime_bus(void)
+{
+ crime->control;
+}
+
+static inline void flush_mace_bus(void)
+{
+ mace->perif.ctrl.misc;
+}
+
+/*
+ * O2 irq map
+ *
+ * IP0 -> software (ignored)
+ * IP1 -> software (ignored)
+ * IP2 -> (irq0) C crime 1.1 all interrupts; crime 1.5 ???
+ * IP3 -> (irq1) X unknown
+ * IP4 -> (irq2) X unknown
+ * IP5 -> (irq3) X unknown
+ * IP6 -> (irq4) X unknown
+ * IP7 -> (irq5) 7 CPU count/compare timer (system timer)
+ *
+ * crime: (C)
+ *
+ * CRIME_INT_STAT 31:0:
+ *
+ * 0 -> 8 Video in 1
+ * 1 -> 9 Video in 2
+ * 2 -> 10 Video out
+ * 3 -> 11 Mace ethernet
+ * 4 -> S SuperIO sub-interrupt
+ * 5 -> M Miscellaneous sub-interrupt
+ * 6 -> A Audio sub-interrupt
+ * 7 -> 15 PCI bridge errors
+ * 8 -> 16 PCI SCSI aic7xxx 0
+ * 9 -> 17 PCI SCSI aic7xxx 1
+ * 10 -> 18 PCI slot 0
+ * 11 -> 19 unused (PCI slot 1)
+ * 12 -> 20 unused (PCI slot 2)
+ * 13 -> 21 unused (PCI shared 0)
+ * 14 -> 22 unused (PCI shared 1)
+ * 15 -> 23 unused (PCI shared 2)
+ * 16 -> 24 GBE0 (E)
+ * 17 -> 25 GBE1 (E)
+ * 18 -> 26 GBE2 (E)
+ * 19 -> 27 GBE3 (E)
+ * 20 -> 28 CPU errors
+ * 21 -> 29 Memory errors
+ * 22 -> 30 RE empty edge (E)
+ * 23 -> 31 RE full edge (E)
+ * 24 -> 32 RE idle edge (E)
+ * 25 -> 33 RE empty level
+ * 26 -> 34 RE full level
+ * 27 -> 35 RE idle level
+ * 28 -> 36 unused (software 0) (E)
+ * 29 -> 37 unused (software 1) (E)
+ * 30 -> 38 unused (software 2) - crime 1.5 CPU SysCorError (E)
+ * 31 -> 39 VICE
+ *
+ * S, M, A: Use the MACE ISA interrupt register
+ * MACE_ISA_INT_STAT 31:0
+ *
+ * 0-7 -> 40-47 Audio
+ * 8 -> 48 RTC
+ * 9 -> 49 Keyboard
+ * 10 -> X Keyboard polled
+ * 11 -> 51 Mouse
+ * 12 -> X Mouse polled
+ * 13-15 -> 53-55 Count/compare timers
+ * 16-19 -> 56-59 Parallel (16 E)
+ * 20-25 -> 60-62 Serial 1 (22 E)
+ * 26-31 -> 66-71 Serial 2 (28 E)
+ *
+ * Note that this means IRQs 12-14, 50, and 52 do not exist. This is a
+ * different IRQ map than IRIX uses, but that's OK as Linux irq handling
+ * is quite different anyway.
+ */
+
+/* Some initial interrupts to set up */
+extern irqreturn_t crime_memerr_intr(int irq, void *dev_id);
+extern irqreturn_t crime_cpuerr_intr(int irq, void *dev_id);
+
+/*
+ * This is for pure CRIME interrupts - ie not MACE. The advantage?
+ * We get to split the register in half and do faster lookups.
+ */
+
+static uint64_t crime_mask;
+
+static inline void crime_enable_irq(struct irq_data *d)
+{
+ unsigned int bit = d->irq - CRIME_IRQ_BASE;
+
+ crime_mask |= 1 << bit;
+ crime->imask = crime_mask;
+}
+
+static inline void crime_disable_irq(struct irq_data *d)
+{
+ unsigned int bit = d->irq - CRIME_IRQ_BASE;
+
+ crime_mask &= ~(1 << bit);
+ crime->imask = crime_mask;
+ flush_crime_bus();
+}
+
+static struct irq_chip crime_level_interrupt = {
+ .name = "IP32 CRIME",
+ .irq_mask = crime_disable_irq,
+ .irq_unmask = crime_enable_irq,
+};
+
+static void crime_edge_mask_and_ack_irq(struct irq_data *d)
+{
+ unsigned int bit = d->irq - CRIME_IRQ_BASE;
+ uint64_t crime_int;
+
+ /* Edge triggered interrupts must be cleared. */
+ crime_int = crime->hard_int;
+ crime_int &= ~(1 << bit);
+ crime->hard_int = crime_int;
+
+ crime_disable_irq(d);
+}
+
+static struct irq_chip crime_edge_interrupt = {
+ .name = "IP32 CRIME",
+ .irq_ack = crime_edge_mask_and_ack_irq,
+ .irq_mask = crime_disable_irq,
+ .irq_mask_ack = crime_edge_mask_and_ack_irq,
+ .irq_unmask = crime_enable_irq,
+};
+
+/*
+ * This is for MACE PCI interrupts. We can decrease bus traffic by masking
+ * as close to the source as possible. This also means we can take the
+ * next chunk of the CRIME register in one piece.
+ */
+
+static unsigned long macepci_mask;
+
+static void enable_macepci_irq(struct irq_data *d)
+{
+ macepci_mask |= MACEPCI_CONTROL_INT(d->irq - MACEPCI_SCSI0_IRQ);
+ mace->pci.control = macepci_mask;
+ crime_mask |= 1 << (d->irq - CRIME_IRQ_BASE);
+ crime->imask = crime_mask;
+}
+
+static void disable_macepci_irq(struct irq_data *d)
+{
+ crime_mask &= ~(1 << (d->irq - CRIME_IRQ_BASE));
+ crime->imask = crime_mask;
+ flush_crime_bus();
+ macepci_mask &= ~MACEPCI_CONTROL_INT(d->irq - MACEPCI_SCSI0_IRQ);
+ mace->pci.control = macepci_mask;
+ flush_mace_bus();
+}
+
+static struct irq_chip ip32_macepci_interrupt = {
+ .name = "IP32 MACE PCI",
+ .irq_mask = disable_macepci_irq,
+ .irq_unmask = enable_macepci_irq,
+};
+
+/* This is used for MACE ISA interrupts. That means bits 4-6 in the
+ * CRIME register.
+ */
+
+#define MACEISA_AUDIO_INT (MACEISA_AUDIO_SW_INT | \
+ MACEISA_AUDIO_SC_INT | \
+ MACEISA_AUDIO1_DMAT_INT | \
+ MACEISA_AUDIO1_OF_INT | \
+ MACEISA_AUDIO2_DMAT_INT | \
+ MACEISA_AUDIO2_MERR_INT | \
+ MACEISA_AUDIO3_DMAT_INT | \
+ MACEISA_AUDIO3_MERR_INT)
+#define MACEISA_MISC_INT (MACEISA_RTC_INT | \
+ MACEISA_KEYB_INT | \
+ MACEISA_KEYB_POLL_INT | \
+ MACEISA_MOUSE_INT | \
+ MACEISA_MOUSE_POLL_INT | \
+ MACEISA_TIMER0_INT | \
+ MACEISA_TIMER1_INT | \
+ MACEISA_TIMER2_INT)
+#define MACEISA_SUPERIO_INT (MACEISA_PARALLEL_INT | \
+ MACEISA_PAR_CTXA_INT | \
+ MACEISA_PAR_CTXB_INT | \
+ MACEISA_PAR_MERR_INT | \
+ MACEISA_SERIAL1_INT | \
+ MACEISA_SERIAL1_TDMAT_INT | \
+ MACEISA_SERIAL1_TDMAPR_INT | \
+ MACEISA_SERIAL1_TDMAME_INT | \
+ MACEISA_SERIAL1_RDMAT_INT | \
+ MACEISA_SERIAL1_RDMAOR_INT | \
+ MACEISA_SERIAL2_INT | \
+ MACEISA_SERIAL2_TDMAT_INT | \
+ MACEISA_SERIAL2_TDMAPR_INT | \
+ MACEISA_SERIAL2_TDMAME_INT | \
+ MACEISA_SERIAL2_RDMAT_INT | \
+ MACEISA_SERIAL2_RDMAOR_INT)
+
+static unsigned long maceisa_mask;
+
+static void enable_maceisa_irq(struct irq_data *d)
+{
+ unsigned int crime_int = 0;
+
+ pr_debug("maceisa enable: %u\n", d->irq);
+
+ switch (d->irq) {
+ case MACEISA_AUDIO_SW_IRQ ... MACEISA_AUDIO3_MERR_IRQ:
+ crime_int = MACE_AUDIO_INT;
+ break;
+ case MACEISA_RTC_IRQ ... MACEISA_TIMER2_IRQ:
+ crime_int = MACE_MISC_INT;
+ break;
+ case MACEISA_PARALLEL_IRQ ... MACEISA_SERIAL2_RDMAOR_IRQ:
+ crime_int = MACE_SUPERIO_INT;
+ break;
+ }
+ pr_debug("crime_int %08x enabled\n", crime_int);
+ crime_mask |= crime_int;
+ crime->imask = crime_mask;
+ maceisa_mask |= 1 << (d->irq - MACEISA_AUDIO_SW_IRQ);
+ mace->perif.ctrl.imask = maceisa_mask;
+}
+
+static void disable_maceisa_irq(struct irq_data *d)
+{
+ unsigned int crime_int = 0;
+
+ maceisa_mask &= ~(1 << (d->irq - MACEISA_AUDIO_SW_IRQ));
+ if (!(maceisa_mask & MACEISA_AUDIO_INT))
+ crime_int |= MACE_AUDIO_INT;
+ if (!(maceisa_mask & MACEISA_MISC_INT))
+ crime_int |= MACE_MISC_INT;
+ if (!(maceisa_mask & MACEISA_SUPERIO_INT))
+ crime_int |= MACE_SUPERIO_INT;
+ crime_mask &= ~crime_int;
+ crime->imask = crime_mask;
+ flush_crime_bus();
+ mace->perif.ctrl.imask = maceisa_mask;
+ flush_mace_bus();
+}
+
+static void mask_and_ack_maceisa_irq(struct irq_data *d)
+{
+ unsigned long mace_int;
+
+ /* edge triggered */
+ mace_int = mace->perif.ctrl.istat;
+ mace_int &= ~(1 << (d->irq - MACEISA_AUDIO_SW_IRQ));
+ mace->perif.ctrl.istat = mace_int;
+
+ disable_maceisa_irq(d);
+}
+
+static struct irq_chip ip32_maceisa_level_interrupt = {
+ .name = "IP32 MACE ISA",
+ .irq_mask = disable_maceisa_irq,
+ .irq_unmask = enable_maceisa_irq,
+};
+
+static struct irq_chip ip32_maceisa_edge_interrupt = {
+ .name = "IP32 MACE ISA",
+ .irq_ack = mask_and_ack_maceisa_irq,
+ .irq_mask = disable_maceisa_irq,
+ .irq_mask_ack = mask_and_ack_maceisa_irq,
+ .irq_unmask = enable_maceisa_irq,
+};
+
+/* This is used for regular non-ISA, non-PCI MACE interrupts. That means
+ * bits 0-3 and 7 in the CRIME register.
+ */
+
+static void enable_mace_irq(struct irq_data *d)
+{
+ unsigned int bit = d->irq - CRIME_IRQ_BASE;
+
+ crime_mask |= (1 << bit);
+ crime->imask = crime_mask;
+}
+
+static void disable_mace_irq(struct irq_data *d)
+{
+ unsigned int bit = d->irq - CRIME_IRQ_BASE;
+
+ crime_mask &= ~(1 << bit);
+ crime->imask = crime_mask;
+ flush_crime_bus();
+}
+
+static struct irq_chip ip32_mace_interrupt = {
+ .name = "IP32 MACE",
+ .irq_mask = disable_mace_irq,
+ .irq_unmask = enable_mace_irq,
+};
+
+static void ip32_unknown_interrupt(void)
+{
+ printk("Unknown interrupt occurred!\n");
+ printk("cp0_status: %08x\n", read_c0_status());
+ printk("cp0_cause: %08x\n", read_c0_cause());
+ printk("CRIME intr mask: %016lx\n", crime->imask);
+ printk("CRIME intr status: %016lx\n", crime->istat);
+ printk("CRIME hardware intr register: %016lx\n", crime->hard_int);
+ printk("MACE ISA intr mask: %08lx\n", mace->perif.ctrl.imask);
+ printk("MACE ISA intr status: %08lx\n", mace->perif.ctrl.istat);
+ printk("MACE PCI control register: %08x\n", mace->pci.control);
+
+ printk("Register dump:\n");
+ show_regs(get_irq_regs());
+
+ printk("Please mail this report to linux-mips@linux-mips.org\n");
+ printk("Spinning...");
+ while(1) ;
+}
+
+/* CRIME 1.1 appears to deliver all interrupts to this one pin. */
+/* change this to loop over all edge-triggered irqs, exception masked out ones */
+static void ip32_irq0(void)
+{
+ uint64_t crime_int;
+ int irq = 0;
+
+ /*
+ * Sanity check interrupt numbering enum.
+ * MACE got 32 interrupts and there are 32 MACE ISA interrupts daisy
+ * chained.
+ */
+ BUILD_BUG_ON(CRIME_VICE_IRQ - MACE_VID_IN1_IRQ != 31);
+ BUILD_BUG_ON(MACEISA_SERIAL2_RDMAOR_IRQ - MACEISA_AUDIO_SW_IRQ != 31);
+
+ crime_int = crime->istat & crime_mask;
+
+ /* crime sometime delivers spurious interrupts, ignore them */
+ if (unlikely(crime_int == 0))
+ return;
+
+ irq = MACE_VID_IN1_IRQ + __ffs(crime_int);
+
+ if (crime_int & CRIME_MACEISA_INT_MASK) {
+ unsigned long mace_int = mace->perif.ctrl.istat;
+ irq = __ffs(mace_int & maceisa_mask) + MACEISA_AUDIO_SW_IRQ;
+ }
+
+ pr_debug("*irq %u*\n", irq);
+ do_IRQ(irq);
+}
+
+static void ip32_irq1(void)
+{
+ ip32_unknown_interrupt();
+}
+
+static void ip32_irq2(void)
+{
+ ip32_unknown_interrupt();
+}
+
+static void ip32_irq3(void)
+{
+ ip32_unknown_interrupt();
+}
+
+static void ip32_irq4(void)
+{
+ ip32_unknown_interrupt();
+}
+
+static void ip32_irq5(void)
+{
+ do_IRQ(MIPS_CPU_IRQ_BASE + 7);
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned int pending = read_c0_status() & read_c0_cause();
+
+ if (likely(pending & IE_IRQ0))
+ ip32_irq0();
+ else if (unlikely(pending & IE_IRQ1))
+ ip32_irq1();
+ else if (unlikely(pending & IE_IRQ2))
+ ip32_irq2();
+ else if (unlikely(pending & IE_IRQ3))
+ ip32_irq3();
+ else if (unlikely(pending & IE_IRQ4))
+ ip32_irq4();
+ else if (likely(pending & IE_IRQ5))
+ ip32_irq5();
+}
+
+void __init arch_init_irq(void)
+{
+ unsigned int irq;
+
+ /* Install our interrupt handler, then clear and disable all
+ * CRIME and MACE interrupts. */
+ crime->imask = 0;
+ crime->hard_int = 0;
+ crime->soft_int = 0;
+ mace->perif.ctrl.istat = 0;
+ mace->perif.ctrl.imask = 0;
+
+ mips_cpu_irq_init();
+ for (irq = CRIME_IRQ_BASE; irq <= IP32_IRQ_MAX; irq++) {
+ switch (irq) {
+ case MACE_VID_IN1_IRQ ... MACE_PCI_BRIDGE_IRQ:
+ irq_set_chip_and_handler_name(irq,
+ &ip32_mace_interrupt,
+ handle_level_irq,
+ "level");
+ break;
+
+ case MACEPCI_SCSI0_IRQ ... MACEPCI_SHARED2_IRQ:
+ irq_set_chip_and_handler_name(irq,
+ &ip32_macepci_interrupt,
+ handle_level_irq,
+ "level");
+ break;
+
+ case CRIME_CPUERR_IRQ:
+ case CRIME_MEMERR_IRQ:
+ irq_set_chip_and_handler_name(irq,
+ &crime_level_interrupt,
+ handle_level_irq,
+ "level");
+ break;
+
+ case CRIME_GBE0_IRQ ... CRIME_GBE3_IRQ:
+ case CRIME_RE_EMPTY_E_IRQ ... CRIME_RE_IDLE_E_IRQ:
+ case CRIME_SOFT0_IRQ ... CRIME_SOFT2_IRQ:
+ case CRIME_VICE_IRQ:
+ irq_set_chip_and_handler_name(irq,
+ &crime_edge_interrupt,
+ handle_edge_irq,
+ "edge");
+ break;
+
+ case MACEISA_PARALLEL_IRQ:
+ case MACEISA_SERIAL1_TDMAPR_IRQ:
+ case MACEISA_SERIAL2_TDMAPR_IRQ:
+ irq_set_chip_and_handler_name(irq,
+ &ip32_maceisa_edge_interrupt,
+ handle_edge_irq,
+ "edge");
+ break;
+
+ default:
+ irq_set_chip_and_handler_name(irq,
+ &ip32_maceisa_level_interrupt,
+ handle_level_irq,
+ "level");
+ break;
+ }
+ }
+ if (request_irq(CRIME_MEMERR_IRQ, crime_memerr_intr, 0,
+ "CRIME memory error", NULL))
+ pr_err("Failed to register CRIME memory error interrupt\n");
+ if (request_irq(CRIME_CPUERR_IRQ, crime_cpuerr_intr, 0,
+ "CRIME CPU error", NULL))
+ pr_err("Failed to register CRIME CPU error interrupt\n");
+
+#define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5)
+ change_c0_status(ST0_IM, ALLINTS);
+}
diff --git a/arch/mips/sgi-ip32/ip32-memory.c b/arch/mips/sgi-ip32/ip32-memory.c
new file mode 100644
index 000000000..0f53fed39
--- /dev/null
+++ b/arch/mips/sgi-ip32/ip32-memory.c
@@ -0,0 +1,47 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Keith M Wesolowski
+ * Copyright (C) 2005 Ilya A. Volynets (Total Knowledge)
+ */
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/mm.h>
+
+#include <asm/ip32/crime.h>
+#include <asm/bootinfo.h>
+#include <asm/page.h>
+
+extern void crime_init(void);
+
+void __init prom_meminit(void)
+{
+ u64 base, size;
+ int bank;
+
+ crime_init();
+
+ for (bank=0; bank < CRIME_MAXBANKS; bank++) {
+ u64 bankctl = crime->bank_ctrl[bank];
+ base = (bankctl & CRIME_MEM_BANK_CONTROL_ADDR) << 25;
+ if (bank != 0 && base == 0)
+ continue;
+ size = (bankctl & CRIME_MEM_BANK_CONTROL_SDRAM_SIZE) ? 128 : 32;
+ size <<= 20;
+ if (base + size > (256 << 20))
+ base += CRIME_HI_MEM_BASE;
+
+ printk("CRIME MC: bank %u base 0x%016Lx size %LuMiB\n",
+ bank, base, size >> 20);
+ memblock_add(base, size);
+ }
+}
+
+
+void __init prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/sgi-ip32/ip32-platform.c b/arch/mips/sgi-ip32/ip32-platform.c
new file mode 100644
index 000000000..c3909bd8d
--- /dev/null
+++ b/arch/mips/sgi-ip32/ip32-platform.c
@@ -0,0 +1,138 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+#include <linux/rtc/ds1685.h>
+
+#include <asm/ip32/mace.h>
+#include <asm/ip32/ip32_ints.h>
+
+extern void ip32_prepare_poweroff(void);
+
+#define MACEISA_SERIAL1_OFFS offsetof(struct sgi_mace, isa.serial1)
+#define MACEISA_SERIAL2_OFFS offsetof(struct sgi_mace, isa.serial2)
+
+#define MACE_PORT(offset,_irq) \
+{ \
+ .mapbase = MACE_BASE + offset, \
+ .irq = _irq, \
+ .uartclk = 1843200, \
+ .iotype = UPIO_MEM, \
+ .flags = UPF_SKIP_TEST|UPF_IOREMAP, \
+ .regshift = 8, \
+}
+
+static struct plat_serial8250_port uart8250_data[] = {
+ MACE_PORT(MACEISA_SERIAL1_OFFS, MACEISA_SERIAL1_IRQ),
+ MACE_PORT(MACEISA_SERIAL2_OFFS, MACEISA_SERIAL2_IRQ),
+ { },
+};
+
+static struct platform_device uart8250_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = uart8250_data,
+ },
+};
+
+static int __init uart8250_init(void)
+{
+ return platform_device_register(&uart8250_device);
+}
+
+device_initcall(uart8250_init);
+
+static __init int meth_devinit(void)
+{
+ struct platform_device *pd;
+ int ret;
+
+ pd = platform_device_alloc("meth", -1);
+ if (!pd)
+ return -ENOMEM;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ platform_device_put(pd);
+
+ return ret;
+}
+
+device_initcall(meth_devinit);
+
+static __init int sgio2audio_devinit(void)
+{
+ struct platform_device *pd;
+ int ret;
+
+ pd = platform_device_alloc("sgio2audio", -1);
+ if (!pd)
+ return -ENOMEM;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ platform_device_put(pd);
+
+ return ret;
+}
+
+device_initcall(sgio2audio_devinit);
+
+static __init int sgio2btns_devinit(void)
+{
+ return IS_ERR(platform_device_register_simple("sgibtns", -1, NULL, 0));
+}
+
+device_initcall(sgio2btns_devinit);
+
+#define MACE_RTC_RES_START (MACE_BASE + offsetof(struct sgi_mace, isa.rtc))
+#define MACE_RTC_RES_END (MACE_RTC_RES_START + 32767)
+
+static struct resource ip32_rtc_resources[] = {
+ {
+ .start = MACEISA_RTC_IRQ,
+ .end = MACEISA_RTC_IRQ,
+ .flags = IORESOURCE_IRQ
+ }, {
+ .start = MACE_RTC_RES_START,
+ .end = MACE_RTC_RES_END,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+/* RTC registers on IP32 are each padded by 256 bytes (0x100). */
+static struct ds1685_rtc_platform_data
+ip32_rtc_platform_data[] = {
+ {
+ .regstep = 0x100,
+ .bcd_mode = true,
+ .no_irq = false,
+ .uie_unsupported = false,
+ .access_type = ds1685_reg_direct,
+ .plat_prepare_poweroff = ip32_prepare_poweroff,
+ },
+};
+
+struct platform_device ip32_rtc_device = {
+ .name = "rtc-ds1685",
+ .id = -1,
+ .dev = {
+ .platform_data = ip32_rtc_platform_data,
+ },
+ .num_resources = ARRAY_SIZE(ip32_rtc_resources),
+ .resource = ip32_rtc_resources,
+};
+
+static __init int sgio2_rtc_devinit(void)
+{
+ return platform_device_register(&ip32_rtc_device);
+}
+
+device_initcall(sgio2_rtc_devinit);
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
new file mode 100644
index 000000000..20d863734
--- /dev/null
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -0,0 +1,152 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 Keith M Wesolowski
+ * Copyright (C) 2001 Paul Mundt
+ * Copyright (C) 2003 Guido Guenther <agx@sigxcpu.org>
+ */
+
+#include <linux/compiler.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/notifier.h>
+#include <linux/delay.h>
+#include <linux/rtc/ds1685.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h>
+
+#include <asm/addrspace.h>
+#include <asm/irq.h>
+#include <asm/reboot.h>
+#include <asm/wbflush.h>
+#include <asm/ip32/mace.h>
+#include <asm/ip32/crime.h>
+#include <asm/ip32/ip32_ints.h>
+
+#define POWERDOWN_TIMEOUT 120
+/*
+ * Blink frequency during reboot grace period and when panicked.
+ */
+#define POWERDOWN_FREQ (HZ / 4)
+#define PANIC_FREQ (HZ / 8)
+
+extern struct platform_device ip32_rtc_device;
+
+static struct timer_list power_timer, blink_timer;
+static unsigned long blink_timer_timeout;
+static int has_panicked, shutting_down;
+
+static __noreturn void ip32_poweroff(void *data)
+{
+ void (*poweroff_func)(struct platform_device *) =
+ symbol_get(ds1685_rtc_poweroff);
+
+#ifdef CONFIG_MODULES
+ /* If the first __symbol_get failed, our module wasn't loaded. */
+ if (!poweroff_func) {
+ request_module("rtc-ds1685");
+ poweroff_func = symbol_get(ds1685_rtc_poweroff);
+ }
+#endif
+
+ if (!poweroff_func)
+ pr_emerg("RTC not available for power-off. Spinning forever ...\n");
+ else {
+ (*poweroff_func)((struct platform_device *)data);
+ symbol_put(ds1685_rtc_poweroff);
+ }
+
+ unreachable();
+}
+
+static void ip32_machine_restart(char *cmd) __noreturn;
+static void ip32_machine_restart(char *cmd)
+{
+ msleep(20);
+ crime->control = CRIME_CONTROL_HARD_RESET;
+ unreachable();
+}
+
+static void blink_timeout(struct timer_list *unused)
+{
+ unsigned long led = mace->perif.ctrl.misc ^ MACEISA_LED_RED;
+ mace->perif.ctrl.misc = led;
+ mod_timer(&blink_timer, jiffies + blink_timer_timeout);
+}
+
+static void ip32_machine_halt(void)
+{
+ ip32_poweroff(&ip32_rtc_device);
+}
+
+static void power_timeout(struct timer_list *unused)
+{
+ ip32_poweroff(&ip32_rtc_device);
+}
+
+void ip32_prepare_poweroff(void)
+{
+ if (has_panicked)
+ return;
+
+ if (shutting_down || kill_cad_pid(SIGINT, 1)) {
+ /* No init process or button pressed twice. */
+ ip32_poweroff(&ip32_rtc_device);
+ }
+
+ shutting_down = 1;
+ blink_timer_timeout = POWERDOWN_FREQ;
+ blink_timeout(&blink_timer);
+
+ timer_setup(&power_timer, power_timeout, 0);
+ power_timer.expires = jiffies + POWERDOWN_TIMEOUT * HZ;
+ add_timer(&power_timer);
+}
+
+static int panic_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ unsigned long led;
+
+ if (has_panicked)
+ return NOTIFY_DONE;
+ has_panicked = 1;
+
+ /* turn off the green LED */
+ led = mace->perif.ctrl.misc | MACEISA_LED_GREEN;
+ mace->perif.ctrl.misc = led;
+
+ blink_timer_timeout = PANIC_FREQ;
+ blink_timeout(&blink_timer);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_block = {
+ .notifier_call = panic_event,
+};
+
+static __init int ip32_reboot_setup(void)
+{
+ /* turn on the green led only */
+ unsigned long led = mace->perif.ctrl.misc;
+ led |= MACEISA_LED_RED;
+ led &= ~MACEISA_LED_GREEN;
+ mace->perif.ctrl.misc = led;
+
+ _machine_restart = ip32_machine_restart;
+ _machine_halt = ip32_machine_halt;
+ pm_power_off = ip32_machine_halt;
+
+ timer_setup(&blink_timer, blink_timeout, 0);
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+
+ return 0;
+}
+
+subsys_initcall(ip32_reboot_setup);
diff --git a/arch/mips/sgi-ip32/ip32-setup.c b/arch/mips/sgi-ip32/ip32-setup.c
new file mode 100644
index 000000000..8019dae17
--- /dev/null
+++ b/arch/mips/sgi-ip32/ip32-setup.c
@@ -0,0 +1,100 @@
+/*
+ * IP32 basic setup
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Harald Koerfgen
+ * Copyright (C) 2002, 2003, 2005 Ilya A. Volynets
+ * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/param.h>
+#include <linux/sched.h>
+
+#include <asm/bootinfo.h>
+#include <asm/mipsregs.h>
+#include <asm/mmu_context.h>
+#include <asm/sgialib.h>
+#include <asm/time.h>
+#include <asm/traps.h>
+#include <asm/io.h>
+#include <asm/ip32/crime.h>
+#include <asm/ip32/mace.h>
+#include <asm/ip32/ip32_ints.h>
+
+extern void ip32_be_init(void);
+extern void crime_init(void);
+
+#ifdef CONFIG_SGI_O2MACE_ETH
+/*
+ * This is taken care of in here 'cause they say using Arc later on is
+ * problematic
+ */
+extern char o2meth_eaddr[8];
+static inline unsigned char str2hexnum(unsigned char c)
+{
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ return 0; /* foo */
+}
+
+static inline void str2eaddr(unsigned char *ea, unsigned char *str)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ unsigned char num;
+
+ if(*str == ':')
+ str++;
+ num = str2hexnum(*str++) << 4;
+ num |= (str2hexnum(*str++));
+ ea[i] = num;
+ }
+}
+#endif
+
+/* An arbitrary time; this can be decreased if reliability looks good */
+#define WAIT_MS 10
+
+void __init plat_time_init(void)
+{
+ printk(KERN_INFO "Calibrating system timer... ");
+ write_c0_count(0);
+ crime->timer = 0;
+ while (crime->timer < CRIME_MASTER_FREQ * WAIT_MS / 1000) ;
+ mips_hpt_frequency = read_c0_count() * 1000 / WAIT_MS;
+ printk("%d MHz CPU detected\n", mips_hpt_frequency * 2 / 1000000);
+}
+
+void __init plat_mem_setup(void)
+{
+ board_be_init = ip32_be_init;
+
+#ifdef CONFIG_SGI_O2MACE_ETH
+ {
+ char *mac = ArcGetEnvironmentVariable("eaddr");
+ str2eaddr(o2meth_eaddr, mac);
+ }
+#endif
+
+#if defined(CONFIG_SERIAL_CORE_CONSOLE)
+ {
+ char* con = ArcGetEnvironmentVariable("console");
+ if (con && *con == 'd') {
+ static char options[8] __initdata;
+ char *baud = ArcGetEnvironmentVariable("dbaud");
+ if (baud)
+ strcpy(options, baud);
+ add_preferred_console("ttyS", *(con + 1) == '2' ? 1 : 0,
+ baud ? options : NULL);
+ }
+ }
+#endif
+}
diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig
new file mode 100644
index 000000000..470d46183
--- /dev/null
+++ b/arch/mips/sibyte/Kconfig
@@ -0,0 +1,167 @@
+# SPDX-License-Identifier: GPL-2.0
+config SIBYTE_SB1250
+ bool
+ select CEVT_SB1250
+ select CSRC_SB1250
+ select HAVE_PCI
+ select IRQ_MIPS_CPU
+ select SIBYTE_ENABLE_LDT_IF_PCI
+ select SIBYTE_HAS_ZBUS_PROFILING
+ select SIBYTE_SB1xxx_SOC
+ select SYS_SUPPORTS_SMP
+
+config SIBYTE_BCM1120
+ bool
+ select CEVT_SB1250
+ select CSRC_SB1250
+ select IRQ_MIPS_CPU
+ select SIBYTE_BCM112X
+ select SIBYTE_HAS_ZBUS_PROFILING
+ select SIBYTE_SB1xxx_SOC
+
+config SIBYTE_BCM1125
+ bool
+ select CEVT_SB1250
+ select CSRC_SB1250
+ select HAVE_PCI
+ select IRQ_MIPS_CPU
+ select SIBYTE_BCM112X
+ select SIBYTE_HAS_ZBUS_PROFILING
+ select SIBYTE_SB1xxx_SOC
+
+config SIBYTE_BCM1125H
+ bool
+ select CEVT_SB1250
+ select CSRC_SB1250
+ select HAVE_PCI
+ select IRQ_MIPS_CPU
+ select SIBYTE_BCM112X
+ select SIBYTE_ENABLE_LDT_IF_PCI
+ select SIBYTE_HAS_ZBUS_PROFILING
+ select SIBYTE_SB1xxx_SOC
+
+config SIBYTE_BCM112X
+ bool
+ select CEVT_SB1250
+ select CSRC_SB1250
+ select IRQ_MIPS_CPU
+ select SIBYTE_SB1xxx_SOC
+ select SIBYTE_HAS_ZBUS_PROFILING
+
+config SIBYTE_BCM1x80
+ bool
+ select CEVT_BCM1480
+ select CSRC_BCM1480
+ select HAVE_PCI
+ select IRQ_MIPS_CPU
+ select SIBYTE_HAS_ZBUS_PROFILING
+ select SIBYTE_SB1xxx_SOC
+ select SYS_SUPPORTS_SMP
+
+config SIBYTE_BCM1x55
+ bool
+ select CEVT_BCM1480
+ select CSRC_BCM1480
+ select HAVE_PCI
+ select IRQ_MIPS_CPU
+ select SIBYTE_SB1xxx_SOC
+ select SIBYTE_HAS_ZBUS_PROFILING
+ select SYS_SUPPORTS_SMP
+
+config SIBYTE_SB1xxx_SOC
+ bool
+ select IRQ_MIPS_CPU
+ select SWAP_IO_SPACE
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select FW_CFE
+ select SYS_HAS_EARLY_PRINTK
+
+choice
+ prompt "SiByte SOC Stepping"
+ depends on SIBYTE_SB1xxx_SOC
+
+config CPU_SB1_PASS_2_1250
+ bool "1250 An"
+ depends on SIBYTE_SB1250
+ select CPU_SB1_PASS_2
+ help
+ Also called BCM1250 Pass 2
+
+config CPU_SB1_PASS_2_2
+ bool "1250 Bn"
+ depends on SIBYTE_SB1250
+ select CPU_HAS_PREFETCH
+ help
+ Also called BCM1250 Pass 2.2
+
+config CPU_SB1_PASS_4
+ bool "1250 Cn"
+ depends on SIBYTE_SB1250
+ select CPU_HAS_PREFETCH
+ help
+ Also called BCM1250 Pass 3
+
+config CPU_SB1_PASS_2_112x
+ bool "112x Hybrid"
+ depends on SIBYTE_BCM112X
+ select CPU_SB1_PASS_2
+
+config CPU_SB1_PASS_3
+ bool "112x An"
+ depends on SIBYTE_BCM112X
+ select CPU_HAS_PREFETCH
+
+endchoice
+
+config CPU_SB1_PASS_2
+ bool
+
+config SIBYTE_HAS_LDT
+ bool
+
+config SIBYTE_ENABLE_LDT_IF_PCI
+ bool
+ select SIBYTE_HAS_LDT if PCI
+
+config SB1_CEX_ALWAYS_FATAL
+ bool "All cache exceptions considered fatal (no recovery attempted)"
+ depends on SIBYTE_SB1xxx_SOC
+
+config SB1_CERR_STALL
+ bool "Stall (rather than panic) on fatal cache error"
+ depends on SIBYTE_SB1xxx_SOC
+
+config SIBYTE_CFE_CONSOLE
+ bool "Use firmware console"
+ depends on SIBYTE_SB1xxx_SOC
+ help
+ Use the CFE API's console write routines during boot. Other console
+ options (VT console, sb1250 duart console, etc.) should not be
+ configured.
+
+config SIBYTE_BUS_WATCHER
+ bool "Support for Bus Watcher statistics"
+ depends on SIBYTE_SB1xxx_SOC && \
+ (SIBYTE_BCM112X || SIBYTE_SB1250 || \
+ SIBYTE_BCM1x55 || SIBYTE_BCM1x80)
+ help
+ Handle and keep statistics on the bus error interrupts (COR_ECC,
+ BAD_ECC, IO_BUS).
+
+config SIBYTE_BW_TRACE
+ bool "Capture bus trace before bus error"
+ depends on SIBYTE_BUS_WATCHER
+ help
+ Run a continuous bus trace, dumping the raw data as soon as
+ a ZBbus error is detected. Cannot work if ZBbus profiling
+ is turned on, and also will interfere with JTAG-based trace
+ buffer activity. Raw buffer data is dumped to console, and
+ must be processed off-line.
+
+config SIBYTE_TBPROF
+ tristate "Support for ZBbus profiling"
+ depends on SIBYTE_HAS_ZBUS_PROFILING
+
+config SIBYTE_HAS_ZBUS_PROFILING
+ bool
diff --git a/arch/mips/sibyte/Makefile b/arch/mips/sibyte/Makefile
new file mode 100644
index 000000000..d015c4d79
--- /dev/null
+++ b/arch/mips/sibyte/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Sibyte SB1250 / BCM1480 family of SOCs
+#
+obj-$(CONFIG_SIBYTE_BCM112X) += sb1250/
+obj-$(CONFIG_SIBYTE_BCM112X) += common/
+obj-$(CONFIG_SIBYTE_SB1250) += sb1250/
+obj-$(CONFIG_SIBYTE_SB1250) += common/
+obj-$(CONFIG_SIBYTE_BCM1x55) += bcm1480/
+obj-$(CONFIG_SIBYTE_BCM1x55) += common/
+obj-$(CONFIG_SIBYTE_BCM1x80) += bcm1480/
+obj-$(CONFIG_SIBYTE_BCM1x80) += common/
+
+#
+# Sibyte BCM91120x (Carmel) board
+# Sibyte BCM91120C (CRhine) board
+# Sibyte BCM91125C (CRhone) board
+# Sibyte BCM91125E (Rhone) board
+# Sibyte SWARM board
+# Sibyte BCM91x80 (BigSur) board
+#
+obj-$(CONFIG_SIBYTE_CARMEL) += swarm/
+obj-$(CONFIG_SIBYTE_CRHINE) += swarm/
+obj-$(CONFIG_SIBYTE_CRHONE) += swarm/
+obj-$(CONFIG_SIBYTE_RHONE) += swarm/
+obj-$(CONFIG_SIBYTE_SENTOSA) += swarm/
+obj-$(CONFIG_SIBYTE_SWARM) += swarm/
+obj-$(CONFIG_SIBYTE_BIGSUR) += swarm/
+obj-$(CONFIG_SIBYTE_LITTLESUR) += swarm/
diff --git a/arch/mips/sibyte/Platform b/arch/mips/sibyte/Platform
new file mode 100644
index 000000000..65b2225b7
--- /dev/null
+++ b/arch/mips/sibyte/Platform
@@ -0,0 +1,40 @@
+#
+# These are all rather similar so we consider them a single platform
+#
+
+#
+# Sibyte SB1250 / BCM1480 family of SOCs
+#
+cflags-$(CONFIG_SIBYTE_BCM112X) += \
+ -I$(srctree)/arch/mips/include/asm/mach-sibyte \
+ -DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1250_112x_ALL
+
+cflags-$(CONFIG_SIBYTE_SB1250) += \
+ -I$(srctree)/arch/mips/include/asm/mach-sibyte \
+ -DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1250_112x_ALL
+
+cflags-$(CONFIG_SIBYTE_BCM1x55) += \
+ -I$(srctree)/arch/mips/include/asm/mach-sibyte \
+ -DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1480_ALL
+
+cflags-$(CONFIG_SIBYTE_BCM1x80) += \
+ -I$(srctree)/arch/mips/include/asm/mach-sibyte \
+ -DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1480_ALL
+
+#
+# Sibyte BCM91120x (Carmel) board
+# Sibyte BCM91120C (CRhine) board
+# Sibyte BCM91125C (CRhone) board
+# Sibyte BCM91125E (Rhone) board
+# Sibyte BCM91250A (SWARM) board
+# Sibyte BCM91250C2 (LittleSur) board
+# Sibyte BCM91x80 (BigSur) board
+#
+load-$(CONFIG_SIBYTE_CARMEL) := 0xffffffff80100000
+load-$(CONFIG_SIBYTE_CRHINE) := 0xffffffff80100000
+load-$(CONFIG_SIBYTE_CRHONE) := 0xffffffff80100000
+load-$(CONFIG_SIBYTE_RHONE) := 0xffffffff80100000
+load-$(CONFIG_SIBYTE_SENTOSA) := 0xffffffff80100000
+load-$(CONFIG_SIBYTE_SWARM) := 0xffffffff80100000
+load-$(CONFIG_SIBYTE_BIGSUR) := 0xffffffff80100000
+load-$(CONFIG_SIBYTE_LITTLESUR) := 0xffffffff80100000
diff --git a/arch/mips/sibyte/bcm1480/Makefile b/arch/mips/sibyte/bcm1480/Makefile
new file mode 100644
index 000000000..cf1327d3f
--- /dev/null
+++ b/arch/mips/sibyte/bcm1480/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y := setup.o irq.o time.o
+
+obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
new file mode 100644
index 000000000..7929bee91
--- /dev/null
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000,2001,2002,2003,2004 Broadcom Corporation
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/errno.h>
+#include <asm/irq_regs.h>
+#include <asm/signal.h>
+#include <asm/io.h>
+
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+#include <asm/sibyte/bcm1480_scd.h>
+
+#include <asm/sibyte/sb1250_uart.h>
+#include <asm/sibyte/sb1250.h>
+
+/*
+ * These are the routines that handle all the low level interrupt stuff.
+ * Actions handled here are: initialization of the interrupt map, requesting of
+ * interrupt lines by handlers, dispatching if interrupts to handlers, probing
+ * for interrupt lines
+ */
+
+#ifdef CONFIG_PCI
+extern unsigned long ht_eoi_space;
+#endif
+
+/* Store the CPU id (not the logical number) */
+int bcm1480_irq_owner[BCM1480_NR_IRQS];
+
+static DEFINE_RAW_SPINLOCK(bcm1480_imr_lock);
+
+void bcm1480_mask_irq(int cpu, int irq)
+{
+ unsigned long flags, hl_spacing;
+ u64 cur_ints;
+
+ raw_spin_lock_irqsave(&bcm1480_imr_lock, flags);
+ hl_spacing = 0;
+ if ((irq >= BCM1480_NR_IRQS_HALF) && (irq <= BCM1480_NR_IRQS)) {
+ hl_spacing = BCM1480_IMR_HL_SPACING;
+ irq -= BCM1480_NR_IRQS_HALF;
+ }
+ cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
+ cur_ints |= (((u64) 1) << irq);
+ ____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
+ raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags);
+}
+
+void bcm1480_unmask_irq(int cpu, int irq)
+{
+ unsigned long flags, hl_spacing;
+ u64 cur_ints;
+
+ raw_spin_lock_irqsave(&bcm1480_imr_lock, flags);
+ hl_spacing = 0;
+ if ((irq >= BCM1480_NR_IRQS_HALF) && (irq <= BCM1480_NR_IRQS)) {
+ hl_spacing = BCM1480_IMR_HL_SPACING;
+ irq -= BCM1480_NR_IRQS_HALF;
+ }
+ cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
+ cur_ints &= ~(((u64) 1) << irq);
+ ____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
+ raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags);
+}
+
+#ifdef CONFIG_SMP
+static int bcm1480_set_affinity(struct irq_data *d, const struct cpumask *mask,
+ bool force)
+{
+ unsigned int irq_dirty, irq = d->irq;
+ int i = 0, old_cpu, cpu, int_on, k;
+ u64 cur_ints;
+ unsigned long flags;
+
+ i = cpumask_first_and(mask, cpu_online_mask);
+
+ /* Convert logical CPU to physical CPU */
+ cpu = cpu_logical_map(i);
+
+ /* Protect against other affinity changers and IMR manipulation */
+ raw_spin_lock_irqsave(&bcm1480_imr_lock, flags);
+
+ /* Swizzle each CPU's IMR (but leave the IP selection alone) */
+ old_cpu = bcm1480_irq_owner[irq];
+ irq_dirty = irq;
+ if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) {
+ irq_dirty -= BCM1480_NR_IRQS_HALF;
+ }
+
+ for (k=0; k<2; k++) { /* Loop through high and low interrupt mask register */
+ cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
+ int_on = !(cur_ints & (((u64) 1) << irq_dirty));
+ if (int_on) {
+ /* If it was on, mask it */
+ cur_ints |= (((u64) 1) << irq_dirty);
+ ____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
+ }
+ bcm1480_irq_owner[irq] = cpu;
+ if (int_on) {
+ /* unmask for the new CPU */
+ cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
+ cur_ints &= ~(((u64) 1) << irq_dirty);
+ ____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
+ }
+ }
+ raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags);
+
+ return 0;
+}
+#endif
+
+
+/*****************************************************************************/
+
+static void disable_bcm1480_irq(struct irq_data *d)
+{
+ unsigned int irq = d->irq;
+
+ bcm1480_mask_irq(bcm1480_irq_owner[irq], irq);
+}
+
+static void enable_bcm1480_irq(struct irq_data *d)
+{
+ unsigned int irq = d->irq;
+
+ bcm1480_unmask_irq(bcm1480_irq_owner[irq], irq);
+}
+
+
+static void ack_bcm1480_irq(struct irq_data *d)
+{
+ unsigned int irq_dirty, irq = d->irq;
+ u64 pending;
+ int k;
+
+ /*
+ * If the interrupt was an HT interrupt, now is the time to
+ * clear it. NOTE: we assume the HT bridge was set up to
+ * deliver the interrupts to all CPUs (which makes affinity
+ * changing easier for us)
+ */
+ irq_dirty = irq;
+ if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) {
+ irq_dirty -= BCM1480_NR_IRQS_HALF;
+ }
+ for (k=0; k<2; k++) { /* Loop through high and low LDT interrupts */
+ pending = __raw_readq(IOADDR(A_BCM1480_IMR_REGISTER(bcm1480_irq_owner[irq],
+ R_BCM1480_IMR_LDT_INTERRUPT_H + (k*BCM1480_IMR_HL_SPACING))));
+ pending &= ((u64)1 << (irq_dirty));
+ if (pending) {
+#ifdef CONFIG_SMP
+ int i;
+ for (i=0; i<NR_CPUS; i++) {
+ /*
+ * Clear for all CPUs so an affinity switch
+ * doesn't find an old status
+ */
+ __raw_writeq(pending, IOADDR(A_BCM1480_IMR_REGISTER(cpu_logical_map(i),
+ R_BCM1480_IMR_LDT_INTERRUPT_CLR_H + (k*BCM1480_IMR_HL_SPACING))));
+ }
+#else
+ __raw_writeq(pending, IOADDR(A_BCM1480_IMR_REGISTER(0, R_BCM1480_IMR_LDT_INTERRUPT_CLR_H + (k*BCM1480_IMR_HL_SPACING))));
+#endif
+
+ /*
+ * Generate EOI. For Pass 1 parts, EOI is a nop. For
+ * Pass 2, the LDT world may be edge-triggered, but
+ * this EOI shouldn't hurt. If they are
+ * level-sensitive, the EOI is required.
+ */
+#ifdef CONFIG_PCI
+ if (ht_eoi_space)
+ *(uint32_t *)(ht_eoi_space+(irq<<16)+(7<<2)) = 0;
+#endif
+ }
+ }
+ bcm1480_mask_irq(bcm1480_irq_owner[irq], irq);
+}
+
+static struct irq_chip bcm1480_irq_type = {
+ .name = "BCM1480-IMR",
+ .irq_mask_ack = ack_bcm1480_irq,
+ .irq_mask = disable_bcm1480_irq,
+ .irq_unmask = enable_bcm1480_irq,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = bcm1480_set_affinity
+#endif
+};
+
+void __init init_bcm1480_irqs(void)
+{
+ int i;
+
+ for (i = 0; i < BCM1480_NR_IRQS; i++) {
+ irq_set_chip_and_handler(i, &bcm1480_irq_type,
+ handle_level_irq);
+ bcm1480_irq_owner[i] = 0;
+ }
+}
+
+/*
+ * init_IRQ is called early in the boot sequence from init/main.c. It
+ * is responsible for setting up the interrupt mapper and installing the
+ * handler that will be responsible for dispatching interrupts to the
+ * "right" place.
+ */
+/*
+ * For now, map all interrupts to IP[2]. We could save
+ * some cycles by parceling out system interrupts to different
+ * IP lines, but keep it simple for bringup. We'll also direct
+ * all interrupts to a single CPU; we should probably route
+ * PCI and LDT to one cpu and everything else to the other
+ * to balance the load a bit.
+ *
+ * On the second cpu, everything is set to IP5, which is
+ * ignored, EXCEPT the mailbox interrupt. That one is
+ * set to IP[2] so it is handled. This is needed so we
+ * can do cross-cpu function calls, as required by SMP
+ */
+
+#define IMR_IP2_VAL K_BCM1480_INT_MAP_I0
+#define IMR_IP3_VAL K_BCM1480_INT_MAP_I1
+#define IMR_IP4_VAL K_BCM1480_INT_MAP_I2
+#define IMR_IP5_VAL K_BCM1480_INT_MAP_I3
+#define IMR_IP6_VAL K_BCM1480_INT_MAP_I4
+
+void __init arch_init_irq(void)
+{
+ unsigned int i, cpu;
+ u64 tmp;
+ unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
+ STATUSF_IP1 | STATUSF_IP0;
+
+ /* Default everything to IP2 */
+ /* Start with _high registers which has no bit 0 interrupt source */
+ for (i = 1; i < BCM1480_NR_IRQS_HALF; i++) { /* was I0 */
+ for (cpu = 0; cpu < 4; cpu++) {
+ __raw_writeq(IMR_IP2_VAL,
+ IOADDR(A_BCM1480_IMR_REGISTER(cpu,
+ R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + (i << 3)));
+ }
+ }
+
+ /* Now do _low registers */
+ for (i = 0; i < BCM1480_NR_IRQS_HALF; i++) {
+ for (cpu = 0; cpu < 4; cpu++) {
+ __raw_writeq(IMR_IP2_VAL,
+ IOADDR(A_BCM1480_IMR_REGISTER(cpu,
+ R_BCM1480_IMR_INTERRUPT_MAP_BASE_L) + (i << 3)));
+ }
+ }
+
+ init_bcm1480_irqs();
+
+ /*
+ * Map the high 16 bits of mailbox_0 registers to IP[3], for
+ * inter-cpu messages
+ */
+ /* Was I1 */
+ for (cpu = 0; cpu < 4; cpu++) {
+ __raw_writeq(IMR_IP3_VAL, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) +
+ (K_BCM1480_INT_MBOX_0_0 << 3)));
+ }
+
+
+ /* Clear the mailboxes. The firmware may leave them dirty */
+ for (cpu = 0; cpu < 4; cpu++) {
+ __raw_writeq(0xffffffffffffffffULL,
+ IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_0_CLR_CPU)));
+ __raw_writeq(0xffffffffffffffffULL,
+ IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_1_CLR_CPU)));
+ }
+
+
+ /* Mask everything except the high 16 bit of mailbox_0 registers for all cpus */
+ tmp = ~((u64) 0) ^ ( (((u64) 1) << K_BCM1480_INT_MBOX_0_0));
+ for (cpu = 0; cpu < 4; cpu++) {
+ __raw_writeq(tmp, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MASK_H)));
+ }
+ tmp = ~((u64) 0);
+ for (cpu = 0; cpu < 4; cpu++) {
+ __raw_writeq(tmp, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MASK_L)));
+ }
+
+ /*
+ * Note that the timer interrupts are also mapped, but this is
+ * done in bcm1480_time_init(). Also, the profiling driver
+ * does its own management of IP7.
+ */
+
+ /* Enable necessary IPs, disable the rest */
+ change_c0_status(ST0_IM, imask);
+}
+
+extern void bcm1480_mailbox_interrupt(void);
+
+static inline void dispatch_ip2(void)
+{
+ unsigned long long mask_h, mask_l;
+ unsigned int cpu = smp_processor_id();
+ unsigned long base;
+
+ /*
+ * Default...we've hit an IP[2] interrupt, which means we've got to
+ * check the 1480 interrupt registers to figure out what to do. Need
+ * to detect which CPU we're on, now that smp_affinity is supported.
+ */
+ base = A_BCM1480_IMR_MAPPER(cpu);
+ mask_h = __raw_readq(
+ IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H));
+ mask_l = __raw_readq(
+ IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L));
+
+ if (mask_h) {
+ if (mask_h ^ 1)
+ do_IRQ(fls64(mask_h) - 1);
+ else if (mask_l)
+ do_IRQ(63 + fls64(mask_l));
+ }
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned int pending;
+
+ pending = read_c0_cause() & read_c0_status();
+
+ if (pending & CAUSEF_IP4)
+ do_IRQ(K_BCM1480_INT_TIMER_0 + cpu);
+#ifdef CONFIG_SMP
+ else if (pending & CAUSEF_IP3)
+ bcm1480_mailbox_interrupt();
+#endif
+
+ else if (pending & CAUSEF_IP2)
+ dispatch_ip2();
+}
diff --git a/arch/mips/sibyte/bcm1480/setup.c b/arch/mips/sibyte/bcm1480/setup.c
new file mode 100644
index 000000000..6f34b871b
--- /dev/null
+++ b/arch/mips/sibyte/bcm1480/setup.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000,2001,2002,2003,2004 Broadcom Corporation
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/reboot.h>
+#include <linux/string.h>
+
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/io.h>
+#include <asm/sibyte/sb1250.h>
+
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/bcm1480_scd.h>
+#include <asm/sibyte/sb1250_scd.h>
+
+unsigned int sb1_pass;
+unsigned int soc_pass;
+unsigned int soc_type;
+EXPORT_SYMBOL(soc_type);
+unsigned int periph_rev;
+EXPORT_SYMBOL_GPL(periph_rev);
+unsigned int zbbus_mhz;
+EXPORT_SYMBOL(zbbus_mhz);
+
+static unsigned int part_type;
+
+static char *soc_str;
+static char *pass_str;
+
+static int __init setup_bcm1x80_bcm1x55(void)
+{
+ int ret = 0;
+
+ switch (soc_pass) {
+ case K_SYS_REVISION_BCM1480_S0:
+ periph_rev = 1;
+ pass_str = "S0 (pass1)";
+ break;
+ case K_SYS_REVISION_BCM1480_A1:
+ periph_rev = 1;
+ pass_str = "A1 (pass1)";
+ break;
+ case K_SYS_REVISION_BCM1480_A2:
+ periph_rev = 1;
+ pass_str = "A2 (pass1)";
+ break;
+ case K_SYS_REVISION_BCM1480_A3:
+ periph_rev = 1;
+ pass_str = "A3 (pass1)";
+ break;
+ case K_SYS_REVISION_BCM1480_B0:
+ periph_rev = 1;
+ pass_str = "B0 (pass2)";
+ break;
+ default:
+ printk("Unknown %s rev %x\n", soc_str, soc_pass);
+ periph_rev = 1;
+ pass_str = "Unknown Revision";
+ break;
+ }
+
+ return ret;
+}
+
+/* Setup code likely to be common to all SiByte platforms */
+
+static int __init sys_rev_decode(void)
+{
+ int ret = 0;
+
+ switch (soc_type) {
+ case K_SYS_SOC_TYPE_BCM1x80:
+ if (part_type == K_SYS_PART_BCM1480)
+ soc_str = "BCM1480";
+ else if (part_type == K_SYS_PART_BCM1280)
+ soc_str = "BCM1280";
+ else
+ soc_str = "BCM1x80";
+ ret = setup_bcm1x80_bcm1x55();
+ break;
+
+ case K_SYS_SOC_TYPE_BCM1x55:
+ if (part_type == K_SYS_PART_BCM1455)
+ soc_str = "BCM1455";
+ else if (part_type == K_SYS_PART_BCM1255)
+ soc_str = "BCM1255";
+ else
+ soc_str = "BCM1x55";
+ ret = setup_bcm1x80_bcm1x55();
+ break;
+
+ default:
+ printk("Unknown part type %x\n", part_type);
+ ret = 1;
+ break;
+ }
+
+ return ret;
+}
+
+void __init bcm1480_setup(void)
+{
+ uint64_t sys_rev;
+ int plldiv;
+
+ sb1_pass = read_c0_prid() & PRID_REV_MASK;
+ sys_rev = __raw_readq(IOADDR(A_SCD_SYSTEM_REVISION));
+ soc_type = SYS_SOC_TYPE(sys_rev);
+ part_type = G_SYS_PART(sys_rev);
+ soc_pass = G_SYS_REVISION(sys_rev);
+
+ if (sys_rev_decode()) {
+ printk("Restart after failure to identify SiByte chip\n");
+ machine_restart(NULL);
+ }
+
+ plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG)));
+ zbbus_mhz = ((plldiv >> 1) * 50) + ((plldiv & 1) * 25);
+
+ printk("Broadcom SiByte %s %s @ %d MHz (SB-1A rev %d)\n",
+ soc_str, pass_str, zbbus_mhz * 2, sb1_pass);
+ printk("Board type: %s\n", get_system_type());
+}
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
new file mode 100644
index 000000000..5861e5025
--- /dev/null
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2001,2002,2004 Broadcom Corporation
+ */
+
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/kernel_stat.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/fw/cfe/cfe_api.h>
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+
+/*
+ * These are routines for dealing with the bcm1480 smp capabilities
+ * independent of board/firmware
+ */
+
+static void *mailbox_0_set_regs[] = {
+ IOADDR(A_BCM1480_IMR_CPU0_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU),
+ IOADDR(A_BCM1480_IMR_CPU1_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU),
+ IOADDR(A_BCM1480_IMR_CPU2_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU),
+ IOADDR(A_BCM1480_IMR_CPU3_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU),
+};
+
+static void *mailbox_0_clear_regs[] = {
+ IOADDR(A_BCM1480_IMR_CPU0_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU),
+ IOADDR(A_BCM1480_IMR_CPU1_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU),
+ IOADDR(A_BCM1480_IMR_CPU2_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU),
+ IOADDR(A_BCM1480_IMR_CPU3_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU),
+};
+
+static void *mailbox_0_regs[] = {
+ IOADDR(A_BCM1480_IMR_CPU0_BASE + R_BCM1480_IMR_MAILBOX_0_CPU),
+ IOADDR(A_BCM1480_IMR_CPU1_BASE + R_BCM1480_IMR_MAILBOX_0_CPU),
+ IOADDR(A_BCM1480_IMR_CPU2_BASE + R_BCM1480_IMR_MAILBOX_0_CPU),
+ IOADDR(A_BCM1480_IMR_CPU3_BASE + R_BCM1480_IMR_MAILBOX_0_CPU),
+};
+
+/*
+ * SMP init and finish on secondary CPUs
+ */
+void bcm1480_smp_init(void)
+{
+ unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
+ STATUSF_IP1 | STATUSF_IP0;
+
+ /* Set interrupt mask, but don't enable */
+ change_c0_status(ST0_IM, imask);
+}
+
+/*
+ * These are routines for dealing with the sb1250 smp capabilities
+ * independent of board/firmware
+ */
+
+/*
+ * Simple enough; everything is set up, so just poke the appropriate mailbox
+ * register, and we should be set
+ */
+static void bcm1480_send_ipi_single(int cpu, unsigned int action)
+{
+ __raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]);
+}
+
+static void bcm1480_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ bcm1480_send_ipi_single(i, action);
+}
+
+/*
+ * Code to run on secondary just after probing the CPU
+ */
+static void bcm1480_init_secondary(void)
+{
+ extern void bcm1480_smp_init(void);
+
+ bcm1480_smp_init();
+}
+
+/*
+ * Do any tidying up before marking online and running the idle
+ * loop
+ */
+static void bcm1480_smp_finish(void)
+{
+ extern void sb1480_clockevent_init(void);
+
+ sb1480_clockevent_init();
+ local_irq_enable();
+}
+
+/*
+ * Setup the PC, SP, and GP of a secondary processor and start it
+ * running!
+ */
+static int bcm1480_boot_secondary(int cpu, struct task_struct *idle)
+{
+ int retval;
+
+ retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
+ __KSTK_TOS(idle),
+ (unsigned long)task_thread_info(idle), 0);
+ if (retval != 0)
+ printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
+ return retval;
+}
+
+/*
+ * Use CFE to find out how many CPUs are available, setting up
+ * cpu_possible_mask and the logical/physical mappings.
+ * XXXKW will the boot CPU ever not be physical 0?
+ *
+ * Common setup before any secondaries are started
+ */
+static void __init bcm1480_smp_setup(void)
+{
+ int i, num;
+
+ init_cpu_possible(cpumask_of(0));
+ __cpu_number_map[0] = 0;
+ __cpu_logical_map[0] = 0;
+
+ for (i = 1, num = 0; i < NR_CPUS; i++) {
+ if (cfe_cpu_stop(i) == 0) {
+ set_cpu_possible(i, true);
+ __cpu_number_map[i] = ++num;
+ __cpu_logical_map[num] = i;
+ }
+ }
+ printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
+}
+
+static void __init bcm1480_prepare_cpus(unsigned int max_cpus)
+{
+}
+
+const struct plat_smp_ops bcm1480_smp_ops = {
+ .send_ipi_single = bcm1480_send_ipi_single,
+ .send_ipi_mask = bcm1480_send_ipi_mask,
+ .init_secondary = bcm1480_init_secondary,
+ .smp_finish = bcm1480_smp_finish,
+ .boot_secondary = bcm1480_boot_secondary,
+ .smp_setup = bcm1480_smp_setup,
+ .prepare_cpus = bcm1480_prepare_cpus,
+};
+
+void bcm1480_mailbox_interrupt(void)
+{
+ int cpu = smp_processor_id();
+ int irq = K_BCM1480_INT_MBOX_0_0;
+ unsigned int action;
+
+ kstat_incr_irq_this_cpu(irq);
+ /* Load the mailbox register to figure out what we're supposed to do */
+ action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff;
+
+ /* Clear the mailbox to clear the interrupt */
+ __raw_writeq(((u64)action)<<48, mailbox_0_clear_regs[cpu]);
+
+ if (action & SMP_RESCHEDULE_YOURSELF)
+ scheduler_ipi();
+
+ if (action & SMP_CALL_FUNCTION) {
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
+ }
+}
diff --git a/arch/mips/sibyte/bcm1480/time.c b/arch/mips/sibyte/bcm1480/time.c
new file mode 100644
index 000000000..e6450d79f
--- /dev/null
+++ b/arch/mips/sibyte/bcm1480/time.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000,2001,2004 Broadcom Corporation
+ */
+#include <linux/init.h>
+
+extern void sb1480_clockevent_init(void);
+extern void sb1480_clocksource_init(void);
+
+void __init plat_time_init(void)
+{
+ sb1480_clocksource_init();
+ sb1480_clockevent_init();
+}
diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile
new file mode 100644
index 000000000..57f670aa1
--- /dev/null
+++ b/arch/mips/sibyte/common/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y := cfe.o
+obj-$(CONFIG_SWIOTLB) += dma.o
+obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o
+obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o
+obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o
diff --git a/arch/mips/sibyte/common/bus_watcher.c b/arch/mips/sibyte/common/bus_watcher.c
new file mode 100644
index 000000000..d43291473
--- /dev/null
+++ b/arch/mips/sibyte/common/bus_watcher.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2002,2003 Broadcom Corporation
+ */
+
+/*
+ * The Bus Watcher monitors internal bus transactions and maintains
+ * counts of transactions with error status, logging details and
+ * causing one of several interrupts. This driver provides a handler
+ * for those interrupts which aggregates the counts (to avoid
+ * saturating the 8-bit counters) and provides a presence in
+ * /proc/bus_watcher if PROC_FS is on.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/io.h>
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+#include <asm/sibyte/sb1250_scd.h>
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#include <asm/sibyte/bcm1480_regs.h>
+#endif
+
+
+struct bw_stats_struct {
+ uint64_t status;
+ uint32_t l2_err;
+ uint32_t memio_err;
+ int status_printed;
+ unsigned long l2_cor_d;
+ unsigned long l2_bad_d;
+ unsigned long l2_cor_t;
+ unsigned long l2_bad_t;
+ unsigned long mem_cor_d;
+ unsigned long mem_bad_d;
+ unsigned long bus_error;
+} bw_stats;
+
+
+static void print_summary(uint32_t status, uint32_t l2_err,
+ uint32_t memio_err)
+{
+ printk("Bus watcher error counters: %08x %08x\n", l2_err, memio_err);
+ printk("\nLast recorded signature:\n");
+ printk("Request %02x from %d, answered by %d with Dcode %d\n",
+ (unsigned int)(G_SCD_BERR_TID(status) & 0x3f),
+ (int)(G_SCD_BERR_TID(status) >> 6),
+ (int)G_SCD_BERR_RID(status),
+ (int)G_SCD_BERR_DCODE(status));
+}
+
+/*
+ * check_bus_watcher is exported for use in situations where we want
+ * to see the most recent status of the bus watcher, which might have
+ * already been destructively read out of the registers.
+ *
+ * notes: this is currently used by the cache error handler
+ * should provide locking against the interrupt handler
+ */
+void check_bus_watcher(void)
+{
+ u32 status, l2_err, memio_err;
+
+#if defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250)
+ /* Use non-destructive register */
+ status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS_DEBUG));
+#elif defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+ /* Use non-destructive register */
+ /* Same as 1250 except BUS_ERR_STATUS_DEBUG is in a different place. */
+ status = csr_in32(IOADDR(A_BCM1480_BUS_ERR_STATUS_DEBUG));
+#else
+#error bus watcher being built for unknown Sibyte SOC!
+#endif
+ if (!(status & 0x7fffffff)) {
+ printk("Using last values reaped by bus watcher driver\n");
+ status = bw_stats.status;
+ l2_err = bw_stats.l2_err;
+ memio_err = bw_stats.memio_err;
+ } else {
+ l2_err = csr_in32(IOADDR(A_BUS_L2_ERRORS));
+ memio_err = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS));
+ }
+ if (status & ~(1UL << 31))
+ print_summary(status, l2_err, memio_err);
+ else
+ printk("Bus watcher indicates no error\n");
+}
+
+#ifdef CONFIG_PROC_FS
+
+/* For simplicity, I want to assume a single read is required each
+ time */
+static int bw_proc_show(struct seq_file *m, void *v)
+{
+ struct bw_stats_struct *stats = m->private;
+
+ seq_puts(m, "SiByte Bus Watcher statistics\n");
+ seq_puts(m, "-----------------------------\n");
+ seq_printf(m, "L2-d-cor %8ld\nL2-d-bad %8ld\n",
+ stats->l2_cor_d, stats->l2_bad_d);
+ seq_printf(m, "L2-t-cor %8ld\nL2-t-bad %8ld\n",
+ stats->l2_cor_t, stats->l2_bad_t);
+ seq_printf(m, "MC-d-cor %8ld\nMC-d-bad %8ld\n",
+ stats->mem_cor_d, stats->mem_bad_d);
+ seq_printf(m, "IO-err %8ld\n", stats->bus_error);
+ seq_puts(m, "\nLast recorded signature:\n");
+ seq_printf(m, "Request %02x from %d, answered by %d with Dcode %d\n",
+ (unsigned int)(G_SCD_BERR_TID(stats->status) & 0x3f),
+ (int)(G_SCD_BERR_TID(stats->status) >> 6),
+ (int)G_SCD_BERR_RID(stats->status),
+ (int)G_SCD_BERR_DCODE(stats->status));
+ /* XXXKW indicate multiple errors between printings, or stats
+ collection (or both)? */
+ if (stats->status & M_SCD_BERR_MULTERRS)
+ seq_puts(m, "Multiple errors observed since last check.\n");
+ if (stats->status_printed) {
+ seq_puts(m, "(no change since last printing)\n");
+ } else {
+ stats->status_printed = 1;
+ }
+
+ return 0;
+}
+
+static void create_proc_decoder(struct bw_stats_struct *stats)
+{
+ struct proc_dir_entry *ent;
+
+ ent = proc_create_single_data("bus_watcher", S_IWUSR | S_IRUGO, NULL,
+ bw_proc_show, stats);
+ if (!ent) {
+ printk(KERN_INFO "Unable to initialize bus_watcher /proc entry\n");
+ return;
+ }
+}
+
+#endif /* CONFIG_PROC_FS */
+
+/*
+ * sibyte_bw_int - handle bus watcher interrupts and accumulate counts
+ *
+ * notes: possible re-entry due to multiple sources
+ * should check/indicate saturation
+ */
+static irqreturn_t sibyte_bw_int(int irq, void *data)
+{
+ struct bw_stats_struct *stats = data;
+ unsigned long cntr;
+#ifdef CONFIG_SIBYTE_BW_TRACE
+ int i;
+#endif
+
+#ifdef CONFIG_SIBYTE_BW_TRACE
+ csr_out32(M_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG));
+ csr_out32(M_SCD_TRACE_CFG_START_READ, IOADDR(A_SCD_TRACE_CFG));
+
+ for (i=0; i<256*6; i++)
+ printk("%016llx\n",
+ (long long)__raw_readq(IOADDR(A_SCD_TRACE_READ)));
+
+ csr_out32(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
+ csr_out32(M_SCD_TRACE_CFG_START, IOADDR(A_SCD_TRACE_CFG));
+#endif
+
+ /* Destructive read, clears register and interrupt */
+ stats->status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS));
+ stats->status_printed = 0;
+
+ stats->l2_err = cntr = csr_in32(IOADDR(A_BUS_L2_ERRORS));
+ stats->l2_cor_d += G_SCD_L2ECC_CORR_D(cntr);
+ stats->l2_bad_d += G_SCD_L2ECC_BAD_D(cntr);
+ stats->l2_cor_t += G_SCD_L2ECC_CORR_T(cntr);
+ stats->l2_bad_t += G_SCD_L2ECC_BAD_T(cntr);
+ csr_out32(0, IOADDR(A_BUS_L2_ERRORS));
+
+ stats->memio_err = cntr = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS));
+ stats->mem_cor_d += G_SCD_MEM_ECC_CORR(cntr);
+ stats->mem_bad_d += G_SCD_MEM_ECC_BAD(cntr);
+ stats->bus_error += G_SCD_MEM_BUSERR(cntr);
+ csr_out32(0, IOADDR(A_BUS_MEM_IO_ERRORS));
+
+ return IRQ_HANDLED;
+}
+
+int __init sibyte_bus_watcher(void)
+{
+ memset(&bw_stats, 0, sizeof(struct bw_stats_struct));
+ bw_stats.status_printed = 1;
+
+ if (request_irq(K_INT_BAD_ECC, sibyte_bw_int, 0, "Bus watcher", &bw_stats)) {
+ printk("Failed to register bus watcher BAD_ECC irq\n");
+ return -1;
+ }
+ if (request_irq(K_INT_COR_ECC, sibyte_bw_int, 0, "Bus watcher", &bw_stats)) {
+ free_irq(K_INT_BAD_ECC, &bw_stats);
+ printk("Failed to register bus watcher COR_ECC irq\n");
+ return -1;
+ }
+ if (request_irq(K_INT_IO_BUS, sibyte_bw_int, 0, "Bus watcher", &bw_stats)) {
+ free_irq(K_INT_BAD_ECC, &bw_stats);
+ free_irq(K_INT_COR_ECC, &bw_stats);
+ printk("Failed to register bus watcher IO_BUS irq\n");
+ return -1;
+ }
+
+#ifdef CONFIG_PROC_FS
+ create_proc_decoder(&bw_stats);
+#endif
+
+#ifdef CONFIG_SIBYTE_BW_TRACE
+ csr_out32((M_SCD_TRSEQ_ASAMPLE | M_SCD_TRSEQ_DSAMPLE |
+ K_SCD_TRSEQ_TRIGGER_ALL),
+ IOADDR(A_SCD_TRACE_SEQUENCE_0));
+ csr_out32(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
+ csr_out32(M_SCD_TRACE_CFG_START, IOADDR(A_SCD_TRACE_CFG));
+#endif
+
+ return 0;
+}
+
+device_initcall(sibyte_bus_watcher);
diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c
new file mode 100644
index 000000000..89f7fca45
--- /dev/null
+++ b/arch/mips/sibyte/common/cfe.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/mm.h>
+#include <linux/blkdev.h>
+#include <linux/memblock.h>
+#include <linux/pm.h>
+#include <linux/smp.h>
+
+#include <asm/bootinfo.h>
+#include <asm/reboot.h>
+#include <asm/setup.h>
+#include <asm/sibyte/board.h>
+#include <asm/smp-ops.h>
+
+#include <asm/fw/cfe/cfe_api.h>
+#include <asm/fw/cfe/cfe_error.h>
+
+/* Max ram addressable in 32-bit segments */
+#ifdef CONFIG_64BIT
+#define MAX_RAM_SIZE (~0ULL)
+#else
+#ifdef CONFIG_HIGHMEM
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define MAX_RAM_SIZE (~0ULL)
+#else
+#define MAX_RAM_SIZE (0xffffffffULL)
+#endif
+#else
+#define MAX_RAM_SIZE (0x1fffffffULL)
+#endif
+#endif
+
+#define SIBYTE_MAX_MEM_REGIONS 8
+phys_addr_t board_mem_region_addrs[SIBYTE_MAX_MEM_REGIONS];
+phys_addr_t board_mem_region_sizes[SIBYTE_MAX_MEM_REGIONS];
+unsigned int board_mem_region_count;
+
+int cfe_cons_handle;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+extern unsigned long initrd_start, initrd_end;
+#endif
+
+static void __noreturn cfe_linux_exit(void *arg)
+{
+ int warm = *(int *)arg;
+
+ if (smp_processor_id()) {
+ static int reboot_smp;
+
+ /* Don't repeat the process from another CPU */
+ if (!reboot_smp) {
+ /* Get CPU 0 to do the cfe_exit */
+ reboot_smp = 1;
+ smp_call_function(cfe_linux_exit, arg, 0);
+ }
+ } else {
+ printk("Passing control back to CFE...\n");
+ cfe_exit(warm, 0);
+ printk("cfe_exit returned??\n");
+ }
+ while (1);
+}
+
+static void __noreturn cfe_linux_restart(char *command)
+{
+ static const int zero;
+
+ cfe_linux_exit((void *)&zero);
+}
+
+static void __noreturn cfe_linux_halt(void)
+{
+ static const int one = 1;
+
+ cfe_linux_exit((void *)&one);
+}
+
+static __init void prom_meminit(void)
+{
+ u64 addr, size, type; /* regardless of PHYS_ADDR_T_64BIT */
+ int mem_flags = 0;
+ unsigned int idx;
+ int rd_flag;
+#ifdef CONFIG_BLK_DEV_INITRD
+ unsigned long initrd_pstart;
+ unsigned long initrd_pend;
+
+ initrd_pstart = CPHYSADDR(initrd_start);
+ initrd_pend = CPHYSADDR(initrd_end);
+ if (initrd_start &&
+ ((initrd_pstart > MAX_RAM_SIZE)
+ || (initrd_pend > MAX_RAM_SIZE))) {
+ panic("initrd out of addressable memory");
+ }
+
+#endif /* INITRD */
+
+ for (idx = 0; cfe_enummem(idx, mem_flags, &addr, &size, &type) != CFE_ERR_NOMORE;
+ idx++) {
+ rd_flag = 0;
+ if (type == CFE_MI_AVAILABLE) {
+ /*
+ * See if this block contains (any portion of) the
+ * ramdisk
+ */
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start) {
+ if ((initrd_pstart > addr) &&
+ (initrd_pstart < (addr + size))) {
+ memblock_add(addr,
+ initrd_pstart - addr);
+ rd_flag = 1;
+ }
+ if ((initrd_pend > addr) &&
+ (initrd_pend < (addr + size))) {
+ memblock_add(initrd_pend,
+ (addr + size) - initrd_pend);
+ rd_flag = 1;
+ }
+ }
+#endif
+ if (!rd_flag) {
+ if (addr > MAX_RAM_SIZE)
+ continue;
+ if (addr+size > MAX_RAM_SIZE)
+ size = MAX_RAM_SIZE - (addr+size) + 1;
+ /*
+ * memcpy/__copy_user prefetch, which
+ * will cause a bus error for
+ * KSEG/KUSEG addrs not backed by RAM.
+ * Hence, reserve some padding for the
+ * prefetch distance.
+ */
+ if (size > 512)
+ size -= 512;
+ memblock_add(addr, size);
+ }
+ board_mem_region_addrs[board_mem_region_count] = addr;
+ board_mem_region_sizes[board_mem_region_count] = size;
+ board_mem_region_count++;
+ if (board_mem_region_count ==
+ SIBYTE_MAX_MEM_REGIONS) {
+ /*
+ * Too many regions. Need to configure more
+ */
+ while(1);
+ }
+ }
+ }
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start) {
+ memblock_add(initrd_pstart, initrd_pend - initrd_pstart);
+ memblock_reserve(initrd_pstart, initrd_pend - initrd_pstart);
+ }
+#endif
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+static int __init initrd_setup(char *str)
+{
+ char rdarg[64];
+ int idx;
+ char *tmp, *endptr;
+ unsigned long initrd_size;
+
+ /* Make a copy of the initrd argument so we can smash it up here */
+ for (idx = 0; idx < sizeof(rdarg)-1; idx++) {
+ if (!str[idx] || (str[idx] == ' ')) break;
+ rdarg[idx] = str[idx];
+ }
+
+ rdarg[idx] = 0;
+ str = rdarg;
+
+ /*
+ *Initrd location comes in the form "<hex size of ramdisk in bytes>@<location in memory>"
+ * e.g. initrd=3abfd@80010000. This is set up by the loader.
+ */
+ for (tmp = str; *tmp != '@'; tmp++) {
+ if (!*tmp) {
+ goto fail;
+ }
+ }
+ *tmp = 0;
+ tmp++;
+ if (!*tmp) {
+ goto fail;
+ }
+ initrd_size = simple_strtoul(str, &endptr, 16);
+ if (*endptr) {
+ *(tmp-1) = '@';
+ goto fail;
+ }
+ *(tmp-1) = '@';
+ initrd_start = simple_strtoul(tmp, &endptr, 16);
+ if (*endptr) {
+ goto fail;
+ }
+ initrd_end = initrd_start + initrd_size;
+ printk("Found initrd of %lx@%lx\n", initrd_size, initrd_start);
+ return 1;
+ fail:
+ printk("Bad initrd argument. Disabling initrd\n");
+ initrd_start = 0;
+ initrd_end = 0;
+ return 1;
+}
+
+#endif
+
+extern const struct plat_smp_ops sb_smp_ops;
+extern const struct plat_smp_ops bcm1480_smp_ops;
+
+/*
+ * prom_init is called just after the cpu type is determined, from setup_arch()
+ */
+void __init prom_init(void)
+{
+ uint64_t cfe_ept, cfe_handle;
+ unsigned int cfe_eptseal;
+ int argc = fw_arg0;
+ char **envp = (char **) fw_arg2;
+ int *prom_vec = (int *) fw_arg3;
+
+ _machine_restart = cfe_linux_restart;
+ _machine_halt = cfe_linux_halt;
+ pm_power_off = cfe_linux_halt;
+
+ /*
+ * Check if a loader was used; if NOT, the 4 arguments are
+ * what CFE gives us (handle, 0, EPT and EPTSEAL)
+ */
+ if (argc < 0) {
+ cfe_handle = (uint64_t)(long)argc;
+ cfe_ept = (long)envp;
+ cfe_eptseal = (uint32_t)(unsigned long)prom_vec;
+ } else {
+ if ((int32_t)(long)prom_vec < 0) {
+ /*
+ * Old loader; all it gives us is the handle,
+ * so use the "known" entrypoint and assume
+ * the seal.
+ */
+ cfe_handle = (uint64_t)(long)prom_vec;
+ cfe_ept = (uint64_t)((int32_t)0x9fc00500);
+ cfe_eptseal = CFE_EPTSEAL;
+ } else {
+ /*
+ * Newer loaders bundle the handle/ept/eptseal
+ * Note: prom_vec is in the loader's useg
+ * which is still alive in the TLB.
+ */
+ cfe_handle = (uint64_t)((int32_t *)prom_vec)[0];
+ cfe_ept = (uint64_t)((int32_t *)prom_vec)[2];
+ cfe_eptseal = (unsigned int)((uint32_t *)prom_vec)[3];
+ }
+ }
+ if (cfe_eptseal != CFE_EPTSEAL) {
+ /* too early for panic to do any good */
+ printk("CFE's entrypoint seal doesn't match. Spinning.");
+ while (1) ;
+ }
+ cfe_init(cfe_handle, cfe_ept);
+ /*
+ * Get the handle for (at least) prom_putchar, possibly for
+ * boot console
+ */
+ cfe_cons_handle = cfe_getstdhandle(CFE_STDHANDLE_CONSOLE);
+ if (cfe_getenv("LINUX_CMDLINE", arcs_cmdline, COMMAND_LINE_SIZE) < 0) {
+ if (argc >= 0) {
+ /* The loader should have set the command line */
+ /* too early for panic to do any good */
+ printk("LINUX_CMDLINE not defined in cfe.");
+ while (1) ;
+ }
+ }
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ {
+ char *ptr;
+ /* Need to find out early whether we've got an initrd. So scan
+ the list looking now */
+ for (ptr = arcs_cmdline; *ptr; ptr++) {
+ while (*ptr == ' ') {
+ ptr++;
+ }
+ if (!strncmp(ptr, "initrd=", 7)) {
+ initrd_setup(ptr+7);
+ break;
+ } else {
+ while (*ptr && (*ptr != ' ')) {
+ ptr++;
+ }
+ }
+ }
+ }
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+ /* Not sure this is needed, but it's the safe way. */
+ arcs_cmdline[COMMAND_LINE_SIZE-1] = 0;
+
+ prom_meminit();
+
+#if defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250)
+ register_smp_ops(&sb_smp_ops);
+#endif
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+ register_smp_ops(&bcm1480_smp_ops);
+#endif
+}
+
+void __init prom_free_prom_memory(void)
+{
+ /* Not sure what I'm supposed to do here. Nothing, I think */
+}
+
+void prom_putchar(char c)
+{
+ int ret;
+
+ while ((ret = cfe_write(cfe_cons_handle, &c, 1)) == 0)
+ ;
+}
diff --git a/arch/mips/sibyte/common/cfe_console.c b/arch/mips/sibyte/common/cfe_console.c
new file mode 100644
index 000000000..8af7b41f7
--- /dev/null
+++ b/arch/mips/sibyte/common/cfe_console.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/console.h>
+
+#include <asm/sibyte/board.h>
+
+#include <asm/fw/cfe/cfe_api.h>
+#include <asm/fw/cfe/cfe_error.h>
+
+extern int cfe_cons_handle;
+
+static void cfe_console_write(struct console *cons, const char *str,
+ unsigned int count)
+{
+ int i, last, written;
+
+ for (i=0, last=0; i<count; i++) {
+ if (!str[i])
+ /* XXXKW can/should this ever happen? */
+ return;
+ if (str[i] == '\n') {
+ do {
+ written = cfe_write(cfe_cons_handle, &str[last], i-last);
+ if (written < 0)
+ ;
+ last += written;
+ } while (last < i);
+ while (cfe_write(cfe_cons_handle, "\r", 1) <= 0)
+ ;
+ }
+ }
+ if (last != count) {
+ do {
+ written = cfe_write(cfe_cons_handle, &str[last], count-last);
+ if (written < 0)
+ ;
+ last += written;
+ } while (last < count);
+ }
+
+}
+
+static int cfe_console_setup(struct console *cons, char *str)
+{
+ char consdev[32];
+ /* XXXKW think about interaction with 'console=' cmdline arg */
+ /* If none of the console options are configured, the build will break. */
+ if (cfe_getenv("BOOT_CONSOLE", consdev, 32) >= 0) {
+#ifdef CONFIG_SERIAL_SB1250_DUART
+ if (!strcmp(consdev, "uart0")) {
+ setleds("u0cn");
+ } else if (!strcmp(consdev, "uart1")) {
+ setleds("u1cn");
+ } else
+#endif
+#ifdef CONFIG_VGA_CONSOLE
+ if (!strcmp(consdev, "pcconsole0")) {
+ setleds("pccn");
+ } else
+#endif
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static struct console sb1250_cfe_cons = {
+ .name = "cfe",
+ .write = cfe_console_write,
+ .setup = cfe_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+static int __init sb1250_cfe_console_init(void)
+{
+ register_console(&sb1250_cfe_cons);
+ return 0;
+}
+
+console_initcall(sb1250_cfe_console_init);
diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
new file mode 100644
index 000000000..eb47a94f3
--- /dev/null
+++ b/arch/mips/sibyte/common/dma.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DMA support for Broadcom SiByte platforms.
+ *
+ * Copyright (c) 2018 Maciej W. Rozycki
+ */
+
+#include <linux/swiotlb.h>
+#include <asm/bootinfo.h>
+
+void __init plat_swiotlb_setup(void)
+{
+ swiotlb_init(1);
+}
diff --git a/arch/mips/sibyte/common/sb_tbprof.c b/arch/mips/sibyte/common/sb_tbprof.c
new file mode 100644
index 000000000..f80d7a710
--- /dev/null
+++ b/arch/mips/sibyte/common/sb_tbprof.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *
+ * Copyright (C) 2001, 2002, 2003 Broadcom Corporation
+ * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+
+#undef DEBUG
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <asm/io.h>
+#include <asm/sibyte/sb1250.h>
+
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/bcm1480_scd.h>
+#include <asm/sibyte/bcm1480_int.h>
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_scd.h>
+#include <asm/sibyte/sb1250_int.h>
+#else
+#error invalid SiByte UART configuration
+#endif
+
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#undef K_INT_TRACE_FREEZE
+#define K_INT_TRACE_FREEZE K_BCM1480_INT_TRACE_FREEZE
+#undef K_INT_PERF_CNT
+#define K_INT_PERF_CNT K_BCM1480_INT_PERF_CNT
+#endif
+
+#include <linux/uaccess.h>
+
+#define SBPROF_TB_MAJOR 240
+
+typedef u64 tb_sample_t[6*256];
+
+enum open_status {
+ SB_CLOSED,
+ SB_OPENING,
+ SB_OPEN
+};
+
+struct sbprof_tb {
+ wait_queue_head_t tb_sync;
+ wait_queue_head_t tb_read;
+ struct mutex lock;
+ enum open_status open;
+ tb_sample_t *sbprof_tbbuf;
+ int next_tb_sample;
+
+ volatile int tb_enable;
+ volatile int tb_armed;
+
+};
+
+static struct sbprof_tb sbp;
+
+#define MAX_SAMPLE_BYTES (24*1024*1024)
+#define MAX_TBSAMPLE_BYTES (12*1024*1024)
+
+#define MAX_SAMPLES (MAX_SAMPLE_BYTES/sizeof(u_int32_t))
+#define TB_SAMPLE_SIZE (sizeof(tb_sample_t))
+#define MAX_TB_SAMPLES (MAX_TBSAMPLE_BYTES/TB_SAMPLE_SIZE)
+
+/* ioctls */
+#define SBPROF_ZBSTART _IOW('s', 0, int)
+#define SBPROF_ZBSTOP _IOW('s', 1, int)
+#define SBPROF_ZBWAITFULL _IOW('s', 2, int)
+
+/*
+ * Routines for using 40-bit SCD cycle counter
+ *
+ * Client responsible for either handling interrupts or making sure
+ * the cycles counter never saturates, e.g., by doing
+ * zclk_timer_init(0) at least every 2^40 - 1 ZCLKs.
+ */
+
+/*
+ * Configures SCD counter 0 to count ZCLKs starting from val;
+ * Configures SCD counters1,2,3 to count nothing.
+ * Must not be called while gathering ZBbus profiles.
+ */
+
+#define zclk_timer_init(val) \
+ __asm__ __volatile__ (".set push;" \
+ ".set mips64;" \
+ "la $8, 0xb00204c0;" /* SCD perf_cnt_cfg */ \
+ "sd %0, 0x10($8);" /* write val to counter0 */ \
+ "sd %1, 0($8);" /* config counter0 for zclks*/ \
+ ".set pop" \
+ : /* no outputs */ \
+ /* enable, counter0 */ \
+ : /* inputs */ "r"(val), "r" ((1ULL << 33) | 1ULL) \
+ : /* modifies */ "$8" )
+
+
+/* Reads SCD counter 0 and puts result in value
+ unsigned long long val; */
+#define zclk_get(val) \
+ __asm__ __volatile__ (".set push;" \
+ ".set mips64;" \
+ "la $8, 0xb00204c0;" /* SCD perf_cnt_cfg */ \
+ "ld %0, 0x10($8);" /* write val to counter0 */ \
+ ".set pop" \
+ : /* outputs */ "=r"(val) \
+ : /* inputs */ \
+ : /* modifies */ "$8" )
+
+#define DEVNAME "sb_tbprof"
+
+#define TB_FULL (sbp.next_tb_sample == MAX_TB_SAMPLES)
+
+/*
+ * Support for ZBbus sampling using the trace buffer
+ *
+ * We use the SCD performance counter interrupt, caused by a Zclk counter
+ * overflow, to trigger the start of tracing.
+ *
+ * We set the trace buffer to sample everything and freeze on
+ * overflow.
+ *
+ * We map the interrupt for trace_buffer_freeze to handle it on CPU 0.
+ *
+ */
+
+static u64 tb_period;
+
+static void arm_tb(void)
+{
+ u64 scdperfcnt;
+ u64 next = (1ULL << 40) - tb_period;
+ u64 tb_options = M_SCD_TRACE_CFG_FREEZE_FULL;
+
+ /*
+ * Generate an SCD_PERFCNT interrupt in TB_PERIOD Zclks to
+ * trigger start of trace. XXX vary sampling period
+ */
+ __raw_writeq(0, IOADDR(A_SCD_PERF_CNT_1));
+ scdperfcnt = __raw_readq(IOADDR(A_SCD_PERF_CNT_CFG));
+
+ /*
+ * Unfortunately, in Pass 2 we must clear all counters to knock down
+ * a previous interrupt request. This means that bus profiling
+ * requires ALL of the SCD perf counters.
+ */
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+ __raw_writeq((scdperfcnt & ~M_SPC_CFG_SRC1) |
+ /* keep counters 0,2,3,4,5,6,7 as is */
+ V_SPC_CFG_SRC1(1), /* counter 1 counts cycles */
+ IOADDR(A_BCM1480_SCD_PERF_CNT_CFG0));
+ __raw_writeq(
+ M_SPC_CFG_ENABLE | /* enable counting */
+ M_SPC_CFG_CLEAR | /* clear all counters */
+ V_SPC_CFG_SRC1(1), /* counter 1 counts cycles */
+ IOADDR(A_BCM1480_SCD_PERF_CNT_CFG1));
+#else
+ __raw_writeq((scdperfcnt & ~M_SPC_CFG_SRC1) |
+ /* keep counters 0,2,3 as is */
+ M_SPC_CFG_ENABLE | /* enable counting */
+ M_SPC_CFG_CLEAR | /* clear all counters */
+ V_SPC_CFG_SRC1(1), /* counter 1 counts cycles */
+ IOADDR(A_SCD_PERF_CNT_CFG));
+#endif
+ __raw_writeq(next, IOADDR(A_SCD_PERF_CNT_1));
+ /* Reset the trace buffer */
+ __raw_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
+#if 0 && defined(M_SCD_TRACE_CFG_FORCECNT)
+ /* XXXKW may want to expose control to the data-collector */
+ tb_options |= M_SCD_TRACE_CFG_FORCECNT;
+#endif
+ __raw_writeq(tb_options, IOADDR(A_SCD_TRACE_CFG));
+ sbp.tb_armed = 1;
+}
+
+static irqreturn_t sbprof_tb_intr(int irq, void *dev_id)
+{
+ int i;
+
+ pr_debug(DEVNAME ": tb_intr\n");
+
+ if (sbp.next_tb_sample < MAX_TB_SAMPLES) {
+ /* XXX should use XKPHYS to make writes bypass L2 */
+ u64 *p = sbp.sbprof_tbbuf[sbp.next_tb_sample++];
+ /* Read out trace */
+ __raw_writeq(M_SCD_TRACE_CFG_START_READ,
+ IOADDR(A_SCD_TRACE_CFG));
+ __asm__ __volatile__ ("sync" : : : "memory");
+ /* Loop runs backwards because bundles are read out in reverse order */
+ for (i = 256 * 6; i > 0; i -= 6) {
+ /* Subscripts decrease to put bundle in the order */
+ /* t0 lo, t0 hi, t1 lo, t1 hi, t2 lo, t2 hi */
+ p[i - 1] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
+ /* read t2 hi */
+ p[i - 2] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
+ /* read t2 lo */
+ p[i - 3] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
+ /* read t1 hi */
+ p[i - 4] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
+ /* read t1 lo */
+ p[i - 5] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
+ /* read t0 hi */
+ p[i - 6] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
+ /* read t0 lo */
+ }
+ if (!sbp.tb_enable) {
+ pr_debug(DEVNAME ": tb_intr shutdown\n");
+ __raw_writeq(M_SCD_TRACE_CFG_RESET,
+ IOADDR(A_SCD_TRACE_CFG));
+ sbp.tb_armed = 0;
+ wake_up_interruptible(&sbp.tb_sync);
+ } else {
+ /* knock down current interrupt and get another one later */
+ arm_tb();
+ }
+ } else {
+ /* No more trace buffer samples */
+ pr_debug(DEVNAME ": tb_intr full\n");
+ __raw_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
+ sbp.tb_armed = 0;
+ if (!sbp.tb_enable)
+ wake_up_interruptible(&sbp.tb_sync);
+ wake_up_interruptible(&sbp.tb_read);
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sbprof_pc_intr(int irq, void *dev_id)
+{
+ printk(DEVNAME ": unexpected pc_intr");
+ return IRQ_NONE;
+}
+
+/*
+ * Requires: Already called zclk_timer_init with a value that won't
+ * saturate 40 bits. No subsequent use of SCD performance counters
+ * or trace buffer.
+ */
+
+static int sbprof_zbprof_start(struct file *filp)
+{
+ u64 scdperfcnt;
+ int err;
+
+ if (xchg(&sbp.tb_enable, 1))
+ return -EBUSY;
+
+ pr_debug(DEVNAME ": starting\n");
+
+ sbp.next_tb_sample = 0;
+ filp->f_pos = 0;
+
+ err = request_irq(K_INT_TRACE_FREEZE, sbprof_tb_intr, 0,
+ DEVNAME " trace freeze", &sbp);
+ if (err)
+ return -EBUSY;
+
+ /* Make sure there isn't a perf-cnt interrupt waiting */
+ scdperfcnt = __raw_readq(IOADDR(A_SCD_PERF_CNT_CFG));
+ /* Disable and clear counters, override SRC_1 */
+ __raw_writeq((scdperfcnt & ~(M_SPC_CFG_SRC1 | M_SPC_CFG_ENABLE)) |
+ M_SPC_CFG_ENABLE | M_SPC_CFG_CLEAR | V_SPC_CFG_SRC1(1),
+ IOADDR(A_SCD_PERF_CNT_CFG));
+
+ /*
+ * We grab this interrupt to prevent others from trying to use
+ * it, even though we don't want to service the interrupts
+ * (they only feed into the trace-on-interrupt mechanism)
+ */
+ if (request_irq(K_INT_PERF_CNT, sbprof_pc_intr, 0, DEVNAME " scd perfcnt", &sbp)) {
+ free_irq(K_INT_TRACE_FREEZE, &sbp);
+ return -EBUSY;
+ }
+
+ /*
+ * I need the core to mask these, but the interrupt mapper to
+ * pass them through. I am exploiting my knowledge that
+ * cp0_status masks out IP[5]. krw
+ */
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+ __raw_writeq(K_BCM1480_INT_MAP_I3,
+ IOADDR(A_BCM1480_IMR_REGISTER(0, R_BCM1480_IMR_INTERRUPT_MAP_BASE_L) +
+ ((K_BCM1480_INT_PERF_CNT & 0x3f) << 3)));
+#else
+ __raw_writeq(K_INT_MAP_I3,
+ IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MAP_BASE) +
+ (K_INT_PERF_CNT << 3)));
+#endif
+
+ /* Initialize address traps */
+ __raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_0));
+ __raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_1));
+ __raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_2));
+ __raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_3));
+
+ __raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_0));
+ __raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_1));
+ __raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_2));
+ __raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_3));
+
+ __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_0));
+ __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_1));
+ __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_2));
+ __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_3));
+
+ /* Initialize Trace Event 0-7 */
+ /* when interrupt */
+ __raw_writeq(M_SCD_TREVT_INTERRUPT, IOADDR(A_SCD_TRACE_EVENT_0));
+ __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_1));
+ __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_2));
+ __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_3));
+ __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_4));
+ __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_5));
+ __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_6));
+ __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_7));
+
+ /* Initialize Trace Sequence 0-7 */
+ /* Start on event 0 (interrupt) */
+ __raw_writeq(V_SCD_TRSEQ_FUNC_START | 0x0fff,
+ IOADDR(A_SCD_TRACE_SEQUENCE_0));
+ /* dsamp when d used | asamp when a used */
+ __raw_writeq(M_SCD_TRSEQ_ASAMPLE | M_SCD_TRSEQ_DSAMPLE |
+ K_SCD_TRSEQ_TRIGGER_ALL,
+ IOADDR(A_SCD_TRACE_SEQUENCE_1));
+ __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_2));
+ __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_3));
+ __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_4));
+ __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_5));
+ __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_6));
+ __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_7));
+
+ /* Now indicate the PERF_CNT interrupt as a trace-relevant interrupt */
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+ __raw_writeq(1ULL << (K_BCM1480_INT_PERF_CNT & 0x3f),
+ IOADDR(A_BCM1480_IMR_REGISTER(0, R_BCM1480_IMR_INTERRUPT_TRACE_L)));
+#else
+ __raw_writeq(1ULL << K_INT_PERF_CNT,
+ IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_TRACE)));
+#endif
+ arm_tb();
+
+ pr_debug(DEVNAME ": done starting\n");
+
+ return 0;
+}
+
+static int sbprof_zbprof_stop(void)
+{
+ int err = 0;
+
+ pr_debug(DEVNAME ": stopping\n");
+
+ if (sbp.tb_enable) {
+ /*
+ * XXXKW there is a window here where the intr handler may run,
+ * see the disable, and do the wake_up before this sleep
+ * happens.
+ */
+ pr_debug(DEVNAME ": wait for disarm\n");
+ err = wait_event_interruptible(sbp.tb_sync, !sbp.tb_armed);
+ pr_debug(DEVNAME ": disarm complete, stat %d\n", err);
+
+ if (err)
+ return err;
+
+ sbp.tb_enable = 0;
+ free_irq(K_INT_TRACE_FREEZE, &sbp);
+ free_irq(K_INT_PERF_CNT, &sbp);
+ }
+
+ pr_debug(DEVNAME ": done stopping\n");
+
+ return err;
+}
+
+static int sbprof_tb_open(struct inode *inode, struct file *filp)
+{
+ int minor;
+
+ minor = iminor(inode);
+ if (minor != 0)
+ return -ENODEV;
+
+ if (xchg(&sbp.open, SB_OPENING) != SB_CLOSED)
+ return -EBUSY;
+
+ memset(&sbp, 0, sizeof(struct sbprof_tb));
+ sbp.sbprof_tbbuf = vzalloc(MAX_TBSAMPLE_BYTES);
+ if (!sbp.sbprof_tbbuf) {
+ sbp.open = SB_CLOSED;
+ wmb();
+ return -ENOMEM;
+ }
+
+ init_waitqueue_head(&sbp.tb_sync);
+ init_waitqueue_head(&sbp.tb_read);
+ mutex_init(&sbp.lock);
+
+ sbp.open = SB_OPEN;
+ wmb();
+
+ return 0;
+}
+
+static int sbprof_tb_release(struct inode *inode, struct file *filp)
+{
+ int minor;
+
+ minor = iminor(inode);
+ if (minor != 0 || sbp.open != SB_CLOSED)
+ return -ENODEV;
+
+ mutex_lock(&sbp.lock);
+
+ if (sbp.tb_armed || sbp.tb_enable)
+ sbprof_zbprof_stop();
+
+ vfree(sbp.sbprof_tbbuf);
+ sbp.open = SB_CLOSED;
+ wmb();
+
+ mutex_unlock(&sbp.lock);
+
+ return 0;
+}
+
+static ssize_t sbprof_tb_read(struct file *filp, char *buf,
+ size_t size, loff_t *offp)
+{
+ int cur_sample, sample_off, cur_count, sample_left;
+ char *src;
+ int count = 0;
+ char *dest = buf;
+ long cur_off = *offp;
+
+ if (!access_ok(buf, size))
+ return -EFAULT;
+
+ mutex_lock(&sbp.lock);
+
+ count = 0;
+ cur_sample = cur_off / TB_SAMPLE_SIZE;
+ sample_off = cur_off % TB_SAMPLE_SIZE;
+ sample_left = TB_SAMPLE_SIZE - sample_off;
+
+ while (size && (cur_sample < sbp.next_tb_sample)) {
+ int err;
+
+ cur_count = size < sample_left ? size : sample_left;
+ src = (char *)(((long)sbp.sbprof_tbbuf[cur_sample])+sample_off);
+ err = __copy_to_user(dest, src, cur_count);
+ if (err) {
+ *offp = cur_off + cur_count - err;
+ mutex_unlock(&sbp.lock);
+ return err;
+ }
+ pr_debug(DEVNAME ": read from sample %d, %d bytes\n",
+ cur_sample, cur_count);
+ size -= cur_count;
+ sample_left -= cur_count;
+ if (!sample_left) {
+ cur_sample++;
+ sample_off = 0;
+ sample_left = TB_SAMPLE_SIZE;
+ } else {
+ sample_off += cur_count;
+ }
+ cur_off += cur_count;
+ dest += cur_count;
+ count += cur_count;
+ }
+ *offp = cur_off;
+ mutex_unlock(&sbp.lock);
+
+ return count;
+}
+
+static long sbprof_tb_ioctl(struct file *filp,
+ unsigned int command,
+ unsigned long arg)
+{
+ int err = 0;
+
+ switch (command) {
+ case SBPROF_ZBSTART:
+ mutex_lock(&sbp.lock);
+ err = sbprof_zbprof_start(filp);
+ mutex_unlock(&sbp.lock);
+ break;
+
+ case SBPROF_ZBSTOP:
+ mutex_lock(&sbp.lock);
+ err = sbprof_zbprof_stop();
+ mutex_unlock(&sbp.lock);
+ break;
+
+ case SBPROF_ZBWAITFULL: {
+ err = wait_event_interruptible(sbp.tb_read, TB_FULL);
+ if (err)
+ break;
+
+ err = put_user(TB_FULL, (int *) arg);
+ break;
+ }
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static const struct file_operations sbprof_tb_fops = {
+ .owner = THIS_MODULE,
+ .open = sbprof_tb_open,
+ .release = sbprof_tb_release,
+ .read = sbprof_tb_read,
+ .unlocked_ioctl = sbprof_tb_ioctl,
+ .compat_ioctl = sbprof_tb_ioctl,
+ .mmap = NULL,
+ .llseek = default_llseek,
+};
+
+static struct class *tb_class;
+static struct device *tb_dev;
+
+static int __init sbprof_tb_init(void)
+{
+ struct device *dev;
+ struct class *tbc;
+ int err;
+
+ if (register_chrdev(SBPROF_TB_MAJOR, DEVNAME, &sbprof_tb_fops)) {
+ printk(KERN_WARNING DEVNAME ": initialization failed (dev %d)\n",
+ SBPROF_TB_MAJOR);
+ return -EIO;
+ }
+
+ tbc = class_create(THIS_MODULE, "sb_tracebuffer");
+ if (IS_ERR(tbc)) {
+ err = PTR_ERR(tbc);
+ goto out_chrdev;
+ }
+
+ tb_class = tbc;
+
+ dev = device_create(tbc, NULL, MKDEV(SBPROF_TB_MAJOR, 0), NULL, "tb");
+ if (IS_ERR(dev)) {
+ err = PTR_ERR(dev);
+ goto out_class;
+ }
+ tb_dev = dev;
+
+ sbp.open = SB_CLOSED;
+ wmb();
+ tb_period = zbbus_mhz * 10000LL;
+ pr_info(DEVNAME ": initialized - tb_period = %lld\n",
+ (long long) tb_period);
+ return 0;
+
+out_class:
+ class_destroy(tb_class);
+out_chrdev:
+ unregister_chrdev(SBPROF_TB_MAJOR, DEVNAME);
+
+ return err;
+}
+
+static void __exit sbprof_tb_cleanup(void)
+{
+ device_destroy(tb_class, MKDEV(SBPROF_TB_MAJOR, 0));
+ unregister_chrdev(SBPROF_TB_MAJOR, DEVNAME);
+ class_destroy(tb_class);
+}
+
+module_init(sbprof_tb_init);
+module_exit(sbprof_tb_cleanup);
+
+MODULE_ALIAS_CHARDEV_MAJOR(SBPROF_TB_MAJOR);
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
+MODULE_LICENSE("GPL");
diff --git a/arch/mips/sibyte/sb1250/Makefile b/arch/mips/sibyte/sb1250/Makefile
new file mode 100644
index 000000000..cf1327d3f
--- /dev/null
+++ b/arch/mips/sibyte/sb1250/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y := setup.o irq.o time.o
+
+obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
new file mode 100644
index 000000000..86f49c48f
--- /dev/null
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/errno.h>
+#include <asm/signal.h>
+#include <asm/time.h>
+#include <asm/io.h>
+
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+#include <asm/sibyte/sb1250_uart.h>
+#include <asm/sibyte/sb1250_scd.h>
+#include <asm/sibyte/sb1250.h>
+
+/*
+ * These are the routines that handle all the low level interrupt stuff.
+ * Actions handled here are: initialization of the interrupt map, requesting of
+ * interrupt lines by handlers, dispatching if interrupts to handlers, probing
+ * for interrupt lines
+ */
+
+#ifdef CONFIG_SIBYTE_HAS_LDT
+extern unsigned long ldt_eoi_space;
+#endif
+
+/* Store the CPU id (not the logical number) */
+int sb1250_irq_owner[SB1250_NR_IRQS];
+
+static DEFINE_RAW_SPINLOCK(sb1250_imr_lock);
+
+void sb1250_mask_irq(int cpu, int irq)
+{
+ unsigned long flags;
+ u64 cur_ints;
+
+ raw_spin_lock_irqsave(&sb1250_imr_lock, flags);
+ cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
+ R_IMR_INTERRUPT_MASK));
+ cur_ints |= (((u64) 1) << irq);
+ ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
+ R_IMR_INTERRUPT_MASK));
+ raw_spin_unlock_irqrestore(&sb1250_imr_lock, flags);
+}
+
+void sb1250_unmask_irq(int cpu, int irq)
+{
+ unsigned long flags;
+ u64 cur_ints;
+
+ raw_spin_lock_irqsave(&sb1250_imr_lock, flags);
+ cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
+ R_IMR_INTERRUPT_MASK));
+ cur_ints &= ~(((u64) 1) << irq);
+ ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
+ R_IMR_INTERRUPT_MASK));
+ raw_spin_unlock_irqrestore(&sb1250_imr_lock, flags);
+}
+
+#ifdef CONFIG_SMP
+static int sb1250_set_affinity(struct irq_data *d, const struct cpumask *mask,
+ bool force)
+{
+ int i = 0, old_cpu, cpu, int_on;
+ unsigned int irq = d->irq;
+ u64 cur_ints;
+ unsigned long flags;
+
+ i = cpumask_first_and(mask, cpu_online_mask);
+
+ /* Convert logical CPU to physical CPU */
+ cpu = cpu_logical_map(i);
+
+ /* Protect against other affinity changers and IMR manipulation */
+ raw_spin_lock_irqsave(&sb1250_imr_lock, flags);
+
+ /* Swizzle each CPU's IMR (but leave the IP selection alone) */
+ old_cpu = sb1250_irq_owner[irq];
+ cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(old_cpu) +
+ R_IMR_INTERRUPT_MASK));
+ int_on = !(cur_ints & (((u64) 1) << irq));
+ if (int_on) {
+ /* If it was on, mask it */
+ cur_ints |= (((u64) 1) << irq);
+ ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(old_cpu) +
+ R_IMR_INTERRUPT_MASK));
+ }
+ sb1250_irq_owner[irq] = cpu;
+ if (int_on) {
+ /* unmask for the new CPU */
+ cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
+ R_IMR_INTERRUPT_MASK));
+ cur_ints &= ~(((u64) 1) << irq);
+ ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
+ R_IMR_INTERRUPT_MASK));
+ }
+ raw_spin_unlock_irqrestore(&sb1250_imr_lock, flags);
+
+ return 0;
+}
+#endif
+
+static void disable_sb1250_irq(struct irq_data *d)
+{
+ unsigned int irq = d->irq;
+
+ sb1250_mask_irq(sb1250_irq_owner[irq], irq);
+}
+
+static void enable_sb1250_irq(struct irq_data *d)
+{
+ unsigned int irq = d->irq;
+
+ sb1250_unmask_irq(sb1250_irq_owner[irq], irq);
+}
+
+
+static void ack_sb1250_irq(struct irq_data *d)
+{
+ unsigned int irq = d->irq;
+#ifdef CONFIG_SIBYTE_HAS_LDT
+ u64 pending;
+
+ /*
+ * If the interrupt was an HT interrupt, now is the time to
+ * clear it. NOTE: we assume the HT bridge was set up to
+ * deliver the interrupts to all CPUs (which makes affinity
+ * changing easier for us)
+ */
+ pending = __raw_readq(IOADDR(A_IMR_REGISTER(sb1250_irq_owner[irq],
+ R_IMR_LDT_INTERRUPT)));
+ pending &= ((u64)1 << (irq));
+ if (pending) {
+ int i;
+ for (i=0; i<NR_CPUS; i++) {
+ int cpu;
+#ifdef CONFIG_SMP
+ cpu = cpu_logical_map(i);
+#else
+ cpu = i;
+#endif
+ /*
+ * Clear for all CPUs so an affinity switch
+ * doesn't find an old status
+ */
+ __raw_writeq(pending,
+ IOADDR(A_IMR_REGISTER(cpu,
+ R_IMR_LDT_INTERRUPT_CLR)));
+ }
+
+ /*
+ * Generate EOI. For Pass 1 parts, EOI is a nop. For
+ * Pass 2, the LDT world may be edge-triggered, but
+ * this EOI shouldn't hurt. If they are
+ * level-sensitive, the EOI is required.
+ */
+ *(uint32_t *)(ldt_eoi_space+(irq<<16)+(7<<2)) = 0;
+ }
+#endif
+ sb1250_mask_irq(sb1250_irq_owner[irq], irq);
+}
+
+static struct irq_chip sb1250_irq_type = {
+ .name = "SB1250-IMR",
+ .irq_mask_ack = ack_sb1250_irq,
+ .irq_unmask = enable_sb1250_irq,
+ .irq_mask = disable_sb1250_irq,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = sb1250_set_affinity
+#endif
+};
+
+void __init init_sb1250_irqs(void)
+{
+ int i;
+
+ for (i = 0; i < SB1250_NR_IRQS; i++) {
+ irq_set_chip_and_handler(i, &sb1250_irq_type,
+ handle_level_irq);
+ sb1250_irq_owner[i] = 0;
+ }
+}
+
+
+/*
+ * arch_init_irq is called early in the boot sequence from init/main.c via
+ * init_IRQ. It is responsible for setting up the interrupt mapper and
+ * installing the handler that will be responsible for dispatching interrupts
+ * to the "right" place.
+ */
+/*
+ * For now, map all interrupts to IP[2]. We could save
+ * some cycles by parceling out system interrupts to different
+ * IP lines, but keep it simple for bringup. We'll also direct
+ * all interrupts to a single CPU; we should probably route
+ * PCI and LDT to one cpu and everything else to the other
+ * to balance the load a bit.
+ *
+ * On the second cpu, everything is set to IP5, which is
+ * ignored, EXCEPT the mailbox interrupt. That one is
+ * set to IP[2] so it is handled. This is needed so we
+ * can do cross-cpu function calls, as required by SMP
+ */
+
+#define IMR_IP2_VAL K_INT_MAP_I0
+#define IMR_IP3_VAL K_INT_MAP_I1
+#define IMR_IP4_VAL K_INT_MAP_I2
+#define IMR_IP5_VAL K_INT_MAP_I3
+#define IMR_IP6_VAL K_INT_MAP_I4
+
+void __init arch_init_irq(void)
+{
+
+ unsigned int i;
+ u64 tmp;
+ unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
+ STATUSF_IP1 | STATUSF_IP0;
+
+ /* Default everything to IP2 */
+ for (i = 0; i < SB1250_NR_IRQS; i++) { /* was I0 */
+ __raw_writeq(IMR_IP2_VAL,
+ IOADDR(A_IMR_REGISTER(0,
+ R_IMR_INTERRUPT_MAP_BASE) +
+ (i << 3)));
+ __raw_writeq(IMR_IP2_VAL,
+ IOADDR(A_IMR_REGISTER(1,
+ R_IMR_INTERRUPT_MAP_BASE) +
+ (i << 3)));
+ }
+
+ init_sb1250_irqs();
+
+ /*
+ * Map the high 16 bits of the mailbox registers to IP[3], for
+ * inter-cpu messages
+ */
+ /* Was I1 */
+ __raw_writeq(IMR_IP3_VAL,
+ IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MAP_BASE) +
+ (K_INT_MBOX_0 << 3)));
+ __raw_writeq(IMR_IP3_VAL,
+ IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MAP_BASE) +
+ (K_INT_MBOX_0 << 3)));
+
+ /* Clear the mailboxes. The firmware may leave them dirty */
+ __raw_writeq(0xffffffffffffffffULL,
+ IOADDR(A_IMR_REGISTER(0, R_IMR_MAILBOX_CLR_CPU)));
+ __raw_writeq(0xffffffffffffffffULL,
+ IOADDR(A_IMR_REGISTER(1, R_IMR_MAILBOX_CLR_CPU)));
+
+ /* Mask everything except the mailbox registers for both cpus */
+ tmp = ~((u64) 0) ^ (((u64) 1) << K_INT_MBOX_0);
+ __raw_writeq(tmp, IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MASK)));
+ __raw_writeq(tmp, IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MASK)));
+
+ /*
+ * Note that the timer interrupts are also mapped, but this is
+ * done in sb1250_time_init(). Also, the profiling driver
+ * does its own management of IP7.
+ */
+
+ /* Enable necessary IPs, disable the rest */
+ change_c0_status(ST0_IM, imask);
+}
+
+extern void sb1250_mailbox_interrupt(void);
+
+static inline void dispatch_ip2(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long long mask;
+
+ /*
+ * Default...we've hit an IP[2] interrupt, which means we've got to
+ * check the 1250 interrupt registers to figure out what to do. Need
+ * to detect which CPU we're on, now that smp_affinity is supported.
+ */
+ mask = __raw_readq(IOADDR(A_IMR_REGISTER(cpu,
+ R_IMR_INTERRUPT_STATUS_BASE)));
+ if (mask)
+ do_IRQ(fls64(mask) - 1);
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned int pending;
+
+ /*
+ * What a pain. We have to be really careful saving the upper 32 bits
+ * of any * register across function calls if we don't want them
+ * trashed--since were running in -o32, the calling routing never saves
+ * the full 64 bits of a register across a function call. Being the
+ * interrupt handler, we're guaranteed that interrupts are disabled
+ * during this code so we don't have to worry about random interrupts
+ * blasting the high 32 bits.
+ */
+
+ pending = read_c0_cause() & read_c0_status() & ST0_IM;
+
+ if (pending & CAUSEF_IP7) /* CPU performance counter interrupt */
+ do_IRQ(MIPS_CPU_IRQ_BASE + 7);
+ else if (pending & CAUSEF_IP4)
+ do_IRQ(K_INT_TIMER_0 + cpu); /* sb1250_timer_interrupt() */
+
+#ifdef CONFIG_SMP
+ else if (pending & CAUSEF_IP3)
+ sb1250_mailbox_interrupt();
+#endif
+
+ else if (pending & CAUSEF_IP2)
+ dispatch_ip2();
+ else
+ spurious_interrupt();
+}
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
new file mode 100644
index 000000000..644b19038
--- /dev/null
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
+ */
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/string.h>
+
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/io.h>
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_scd.h>
+
+unsigned int sb1_pass;
+unsigned int soc_pass;
+unsigned int soc_type;
+EXPORT_SYMBOL(soc_type);
+unsigned int periph_rev;
+EXPORT_SYMBOL_GPL(periph_rev);
+unsigned int zbbus_mhz;
+EXPORT_SYMBOL(zbbus_mhz);
+
+static char *soc_str;
+static char *pass_str;
+static unsigned int war_pass; /* XXXKW don't overload PASS defines? */
+
+static int __init setup_bcm1250(void)
+{
+ int ret = 0;
+
+ switch (soc_pass) {
+ case K_SYS_REVISION_BCM1250_PASS1:
+ periph_rev = 1;
+ pass_str = "Pass 1";
+ break;
+ case K_SYS_REVISION_BCM1250_A10:
+ periph_rev = 2;
+ pass_str = "A8/A10";
+ /* XXXKW different war_pass? */
+ war_pass = K_SYS_REVISION_BCM1250_PASS2;
+ break;
+ case K_SYS_REVISION_BCM1250_PASS2_2:
+ periph_rev = 2;
+ pass_str = "B1";
+ break;
+ case K_SYS_REVISION_BCM1250_B2:
+ periph_rev = 2;
+ pass_str = "B2";
+ war_pass = K_SYS_REVISION_BCM1250_PASS2_2;
+ break;
+ case K_SYS_REVISION_BCM1250_PASS3:
+ periph_rev = 3;
+ pass_str = "C0";
+ break;
+ case K_SYS_REVISION_BCM1250_C1:
+ periph_rev = 3;
+ pass_str = "C1";
+ break;
+ default:
+ if (soc_pass < K_SYS_REVISION_BCM1250_PASS2_2) {
+ periph_rev = 2;
+ pass_str = "A0-A6";
+ war_pass = K_SYS_REVISION_BCM1250_PASS2;
+ } else {
+ printk("Unknown BCM1250 rev %x\n", soc_pass);
+ ret = 1;
+ }
+ break;
+ }
+
+ return ret;
+}
+
+int sb1250_m3_workaround_needed(void)
+{
+ switch (soc_type) {
+ case K_SYS_SOC_TYPE_BCM1250:
+ case K_SYS_SOC_TYPE_BCM1250_ALT:
+ case K_SYS_SOC_TYPE_BCM1250_ALT2:
+ case K_SYS_SOC_TYPE_BCM1125:
+ case K_SYS_SOC_TYPE_BCM1125H:
+ return soc_pass < K_SYS_REVISION_BCM1250_C0;
+
+ default:
+ return 0;
+ }
+}
+
+static int __init setup_bcm112x(void)
+{
+ int ret = 0;
+
+ switch (soc_pass) {
+ case 0:
+ /* Early build didn't have revid set */
+ periph_rev = 3;
+ pass_str = "A1";
+ war_pass = K_SYS_REVISION_BCM112x_A1;
+ break;
+ case K_SYS_REVISION_BCM112x_A1:
+ periph_rev = 3;
+ pass_str = "A1";
+ break;
+ case K_SYS_REVISION_BCM112x_A2:
+ periph_rev = 3;
+ pass_str = "A2";
+ break;
+ case K_SYS_REVISION_BCM112x_A3:
+ periph_rev = 3;
+ pass_str = "A3";
+ break;
+ case K_SYS_REVISION_BCM112x_A4:
+ periph_rev = 3;
+ pass_str = "A4";
+ break;
+ case K_SYS_REVISION_BCM112x_B0:
+ periph_rev = 3;
+ pass_str = "B0";
+ break;
+ default:
+ printk("Unknown %s rev %x\n", soc_str, soc_pass);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/* Setup code likely to be common to all SiByte platforms */
+
+static int __init sys_rev_decode(void)
+{
+ int ret = 0;
+
+ war_pass = soc_pass;
+ switch (soc_type) {
+ case K_SYS_SOC_TYPE_BCM1250:
+ case K_SYS_SOC_TYPE_BCM1250_ALT:
+ case K_SYS_SOC_TYPE_BCM1250_ALT2:
+ soc_str = "BCM1250";
+ ret = setup_bcm1250();
+ break;
+ case K_SYS_SOC_TYPE_BCM1120:
+ soc_str = "BCM1120";
+ ret = setup_bcm112x();
+ break;
+ case K_SYS_SOC_TYPE_BCM1125:
+ soc_str = "BCM1125";
+ ret = setup_bcm112x();
+ break;
+ case K_SYS_SOC_TYPE_BCM1125H:
+ soc_str = "BCM1125H";
+ ret = setup_bcm112x();
+ break;
+ default:
+ printk("Unknown SOC type %x\n", soc_type);
+ ret = 1;
+ break;
+ }
+
+ return ret;
+}
+
+void __init sb1250_setup(void)
+{
+ uint64_t sys_rev;
+ int plldiv;
+ int bad_config = 0;
+
+ sb1_pass = read_c0_prid() & PRID_REV_MASK;
+ sys_rev = __raw_readq(IOADDR(A_SCD_SYSTEM_REVISION));
+ soc_type = SYS_SOC_TYPE(sys_rev);
+ soc_pass = G_SYS_REVISION(sys_rev);
+
+ if (sys_rev_decode()) {
+ printk("Restart after failure to identify SiByte chip\n");
+ machine_restart(NULL);
+ }
+
+ plldiv = G_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG)));
+ zbbus_mhz = ((plldiv >> 1) * 50) + ((plldiv & 1) * 25);
+
+ printk("Broadcom SiByte %s %s @ %d MHz (SB1 rev %d)\n",
+ soc_str, pass_str, zbbus_mhz * 2, sb1_pass);
+ printk("Board type: %s\n", get_system_type());
+
+ switch (war_pass) {
+ case K_SYS_REVISION_BCM1250_PASS1:
+ printk("@@@@ This is a BCM1250 A0-A2 (Pass 1) board, "
+ "and the kernel doesn't have the proper "
+ "workarounds compiled in. @@@@\n");
+ bad_config = 1;
+ break;
+ case K_SYS_REVISION_BCM1250_PASS2:
+ /* Pass 2 - easiest as default for now - so many numbers */
+#if !defined(CONFIG_SB1_PASS_2_WORKAROUNDS) || \
+ !defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS)
+ printk("@@@@ This is a BCM1250 A3-A10 board, and the "
+ "kernel doesn't have the proper workarounds "
+ "compiled in. @@@@\n");
+ bad_config = 1;
+#endif
+#ifdef CONFIG_CPU_HAS_PREFETCH
+ printk("@@@@ Prefetches may be enabled in this kernel, "
+ "but are buggy on this board. @@@@\n");
+ bad_config = 1;
+#endif
+ break;
+ case K_SYS_REVISION_BCM1250_PASS2_2:
+#ifndef CONFIG_SB1_PASS_2_WORKAROUNDS
+ printk("@@@@ This is a BCM1250 B1/B2. board, and the "
+ "kernel doesn't have the proper workarounds "
+ "compiled in. @@@@\n");
+ bad_config = 1;
+#endif
+#if defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS) || \
+ !defined(CONFIG_CPU_HAS_PREFETCH)
+ printk("@@@@ This is a BCM1250 B1/B2, but the kernel is "
+ "conservatively configured for an 'A' stepping. "
+ "@@@@\n");
+#endif
+ break;
+ default:
+ break;
+ }
+ if (bad_config) {
+ printk("Invalid configuration for this chip.\n");
+ machine_restart(NULL);
+ }
+}
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
new file mode 100644
index 000000000..7a794234e
--- /dev/null
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2001, 2002, 2003 Broadcom Corporation
+ */
+
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/kernel_stat.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/fw/cfe/cfe_api.h>
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+
+static void *mailbox_set_regs[] = {
+ IOADDR(A_IMR_CPU0_BASE + R_IMR_MAILBOX_SET_CPU),
+ IOADDR(A_IMR_CPU1_BASE + R_IMR_MAILBOX_SET_CPU)
+};
+
+static void *mailbox_clear_regs[] = {
+ IOADDR(A_IMR_CPU0_BASE + R_IMR_MAILBOX_CLR_CPU),
+ IOADDR(A_IMR_CPU1_BASE + R_IMR_MAILBOX_CLR_CPU)
+};
+
+static void *mailbox_regs[] = {
+ IOADDR(A_IMR_CPU0_BASE + R_IMR_MAILBOX_CPU),
+ IOADDR(A_IMR_CPU1_BASE + R_IMR_MAILBOX_CPU)
+};
+
+/*
+ * SMP init and finish on secondary CPUs
+ */
+void sb1250_smp_init(void)
+{
+ unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
+ STATUSF_IP1 | STATUSF_IP0;
+
+ /* Set interrupt mask, but don't enable */
+ change_c0_status(ST0_IM, imask);
+}
+
+/*
+ * These are routines for dealing with the sb1250 smp capabilities
+ * independent of board/firmware
+ */
+
+/*
+ * Simple enough; everything is set up, so just poke the appropriate mailbox
+ * register, and we should be set
+ */
+static void sb1250_send_ipi_single(int cpu, unsigned int action)
+{
+ __raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]);
+}
+
+static inline void sb1250_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ sb1250_send_ipi_single(i, action);
+}
+
+/*
+ * Code to run on secondary just after probing the CPU
+ */
+static void sb1250_init_secondary(void)
+{
+ extern void sb1250_smp_init(void);
+
+ sb1250_smp_init();
+}
+
+/*
+ * Do any tidying up before marking online and running the idle
+ * loop
+ */
+static void sb1250_smp_finish(void)
+{
+ extern void sb1250_clockevent_init(void);
+
+ sb1250_clockevent_init();
+ local_irq_enable();
+}
+
+/*
+ * Setup the PC, SP, and GP of a secondary processor and start it
+ * running!
+ */
+static int sb1250_boot_secondary(int cpu, struct task_struct *idle)
+{
+ int retval;
+
+ retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
+ __KSTK_TOS(idle),
+ (unsigned long)task_thread_info(idle), 0);
+ if (retval != 0)
+ printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
+ return retval;
+}
+
+/*
+ * Use CFE to find out how many CPUs are available, setting up
+ * cpu_possible_mask and the logical/physical mappings.
+ * XXXKW will the boot CPU ever not be physical 0?
+ *
+ * Common setup before any secondaries are started
+ */
+static void __init sb1250_smp_setup(void)
+{
+ int i, num;
+
+ init_cpu_possible(cpumask_of(0));
+ __cpu_number_map[0] = 0;
+ __cpu_logical_map[0] = 0;
+
+ for (i = 1, num = 0; i < NR_CPUS; i++) {
+ if (cfe_cpu_stop(i) == 0) {
+ set_cpu_possible(i, true);
+ __cpu_number_map[i] = ++num;
+ __cpu_logical_map[num] = i;
+ }
+ }
+ printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
+}
+
+static void __init sb1250_prepare_cpus(unsigned int max_cpus)
+{
+}
+
+const struct plat_smp_ops sb_smp_ops = {
+ .send_ipi_single = sb1250_send_ipi_single,
+ .send_ipi_mask = sb1250_send_ipi_mask,
+ .init_secondary = sb1250_init_secondary,
+ .smp_finish = sb1250_smp_finish,
+ .boot_secondary = sb1250_boot_secondary,
+ .smp_setup = sb1250_smp_setup,
+ .prepare_cpus = sb1250_prepare_cpus,
+};
+
+void sb1250_mailbox_interrupt(void)
+{
+ int cpu = smp_processor_id();
+ int irq = K_INT_MBOX_0;
+ unsigned int action;
+
+ kstat_incr_irq_this_cpu(irq);
+ /* Load the mailbox register to figure out what we're supposed to do */
+ action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff;
+
+ /* Clear the mailbox to clear the interrupt */
+ ____raw_writeq(((u64)action) << 48, mailbox_clear_regs[cpu]);
+
+ if (action & SMP_RESCHEDULE_YOURSELF)
+ scheduler_ipi();
+
+ if (action & SMP_CALL_FUNCTION) {
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
+ }
+}
diff --git a/arch/mips/sibyte/sb1250/time.c b/arch/mips/sibyte/sb1250/time.c
new file mode 100644
index 000000000..8b63000a4
--- /dev/null
+++ b/arch/mips/sibyte/sb1250/time.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000, 2001 Broadcom Corporation
+ */
+#include <linux/init.h>
+
+extern void sb1250_clocksource_init(void);
+extern void sb1250_clockevent_init(void);
+
+void __init plat_time_init(void)
+{
+ sb1250_clocksource_init();
+ sb1250_clockevent_init();
+}
diff --git a/arch/mips/sibyte/swarm/Makefile b/arch/mips/sibyte/swarm/Makefile
new file mode 100644
index 000000000..96b41a28f
--- /dev/null
+++ b/arch/mips/sibyte/swarm/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y := platform.o setup.o rtc_xicor1241.o \
+ rtc_m41t81.o
+
+obj-$(CONFIG_I2C_BOARDINFO) += swarm-i2c.o
diff --git a/arch/mips/sibyte/swarm/platform.c b/arch/mips/sibyte/swarm/platform.c
new file mode 100644
index 000000000..484969db7
--- /dev/null
+++ b/arch/mips/sibyte/swarm/platform.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
+
+#include <asm/sibyte/board.h>
+#include <asm/sibyte/sb1250_genbus.h>
+#include <asm/sibyte/sb1250_regs.h>
+
+#if defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_LITTLESUR)
+
+#define DRV_NAME "pata-swarm"
+
+#define SWARM_IDE_SHIFT 5
+#define SWARM_IDE_BASE 0x1f0
+#define SWARM_IDE_CTRL 0x3f6
+
+static struct resource swarm_pata_resource[] = {
+ {
+ .name = "Swarm GenBus IDE",
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "Swarm GenBus IDE",
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "Swarm GenBus IDE",
+ .flags = IORESOURCE_IRQ,
+ .start = K_INT_GB_IDE,
+ .end = K_INT_GB_IDE,
+ },
+};
+
+static struct pata_platform_info pata_platform_data = {
+ .ioport_shift = SWARM_IDE_SHIFT,
+};
+
+static struct platform_device swarm_pata_device = {
+ .name = "pata_platform",
+ .id = -1,
+ .resource = swarm_pata_resource,
+ .num_resources = ARRAY_SIZE(swarm_pata_resource),
+ .dev = {
+ .platform_data = &pata_platform_data,
+ .coherent_dma_mask = ~0, /* grumble */
+ },
+};
+
+static int __init swarm_pata_init(void)
+{
+ u8 __iomem *base;
+ phys_addr_t offset, size;
+ struct resource *r;
+
+ if (!SIBYTE_HAVE_IDE)
+ return -ENODEV;
+
+ base = ioremap(A_IO_EXT_BASE, 0x800);
+ offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS));
+ size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS));
+ iounmap(base);
+
+ offset = G_IO_START_ADDR(offset) << S_IO_ADDRBASE;
+ size = (G_IO_MULT_SIZE(size) + 1) << S_IO_REGSIZE;
+ if (offset < A_PHYS_GENBUS || offset >= A_PHYS_GENBUS_END) {
+ pr_info(DRV_NAME ": PATA interface at GenBus disabled\n");
+
+ return -EBUSY;
+ }
+
+ pr_info(DRV_NAME ": PATA interface at GenBus slot %i\n", IDE_CS);
+
+ r = swarm_pata_resource;
+ r[0].start = offset + (SWARM_IDE_BASE << SWARM_IDE_SHIFT);
+ r[0].end = offset + ((SWARM_IDE_BASE + 8) << SWARM_IDE_SHIFT) - 1;
+ r[1].start = offset + (SWARM_IDE_CTRL << SWARM_IDE_SHIFT);
+ r[1].end = offset + ((SWARM_IDE_CTRL + 1) << SWARM_IDE_SHIFT) - 1;
+
+ return platform_device_register(&swarm_pata_device);
+}
+
+device_initcall(swarm_pata_init);
+
+#endif /* defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_LITTLESUR) */
+
+#define sb1250_dev_struct(num) \
+ static struct resource sb1250_res##num = { \
+ .name = "SB1250 MAC " __stringify(num), \
+ .flags = IORESOURCE_MEM, \
+ .start = A_MAC_CHANNEL_BASE(num), \
+ .end = A_MAC_CHANNEL_BASE(num + 1) -1, \
+ };\
+ static struct platform_device sb1250_dev##num = { \
+ .name = "sb1250-mac", \
+ .id = num, \
+ .resource = &sb1250_res##num, \
+ .num_resources = 1, \
+ }
+
+sb1250_dev_struct(0);
+sb1250_dev_struct(1);
+sb1250_dev_struct(2);
+sb1250_dev_struct(3);
+
+static struct platform_device *sb1250_devs[] __initdata = {
+ &sb1250_dev0,
+ &sb1250_dev1,
+ &sb1250_dev2,
+ &sb1250_dev3,
+};
+
+static int __init sb1250_device_init(void)
+{
+ int ret;
+
+ /* Set the number of available units based on the SOC type. */
+ switch (soc_type) {
+ case K_SYS_SOC_TYPE_BCM1250:
+ case K_SYS_SOC_TYPE_BCM1250_ALT:
+ ret = platform_add_devices(sb1250_devs, 3);
+ break;
+ case K_SYS_SOC_TYPE_BCM1120:
+ case K_SYS_SOC_TYPE_BCM1125:
+ case K_SYS_SOC_TYPE_BCM1125H:
+ case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
+ ret = platform_add_devices(sb1250_devs, 2);
+ break;
+ case K_SYS_SOC_TYPE_BCM1x55:
+ case K_SYS_SOC_TYPE_BCM1x80:
+ ret = platform_add_devices(sb1250_devs, 4);
+ break;
+ default:
+ ret = -ENODEV;
+ break;
+ }
+ return ret;
+}
+device_initcall(sb1250_device_init);
diff --git a/arch/mips/sibyte/swarm/rtc_m41t81.c b/arch/mips/sibyte/swarm/rtc_m41t81.c
new file mode 100644
index 000000000..afe1e3460
--- /dev/null
+++ b/arch/mips/sibyte/swarm/rtc_m41t81.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000, 2001 Broadcom Corporation
+ *
+ * Copyright (C) 2002 MontaVista Software Inc.
+ * Author: jsun@mvista.com or jsun@junsun.net
+ */
+#include <linux/bcd.h>
+#include <linux/types.h>
+#include <linux/time.h>
+
+#include <asm/time.h>
+#include <asm/addrspace.h>
+#include <asm/io.h>
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_smbus.h>
+
+
+/* M41T81 definitions */
+
+/*
+ * Register bits
+ */
+
+#define M41T81REG_SC_ST 0x80 /* stop bit */
+#define M41T81REG_HR_CB 0x40 /* century bit */
+#define M41T81REG_HR_CEB 0x80 /* century enable bit */
+#define M41T81REG_CTL_S 0x20 /* sign bit */
+#define M41T81REG_CTL_FT 0x40 /* frequency test bit */
+#define M41T81REG_CTL_OUT 0x80 /* output level */
+#define M41T81REG_WD_RB0 0x01 /* watchdog resolution bit 0 */
+#define M41T81REG_WD_RB1 0x02 /* watchdog resolution bit 1 */
+#define M41T81REG_WD_BMB0 0x04 /* watchdog multiplier bit 0 */
+#define M41T81REG_WD_BMB1 0x08 /* watchdog multiplier bit 1 */
+#define M41T81REG_WD_BMB2 0x10 /* watchdog multiplier bit 2 */
+#define M41T81REG_WD_BMB3 0x20 /* watchdog multiplier bit 3 */
+#define M41T81REG_WD_BMB4 0x40 /* watchdog multiplier bit 4 */
+#define M41T81REG_AMO_ABE 0x20 /* alarm in "battery back-up mode" enable bit */
+#define M41T81REG_AMO_SQWE 0x40 /* square wave enable */
+#define M41T81REG_AMO_AFE 0x80 /* alarm flag enable flag */
+#define M41T81REG_ADT_RPT5 0x40 /* alarm repeat mode bit 5 */
+#define M41T81REG_ADT_RPT4 0x80 /* alarm repeat mode bit 4 */
+#define M41T81REG_AHR_RPT3 0x80 /* alarm repeat mode bit 3 */
+#define M41T81REG_AHR_HT 0x40 /* halt update bit */
+#define M41T81REG_AMN_RPT2 0x80 /* alarm repeat mode bit 2 */
+#define M41T81REG_ASC_RPT1 0x80 /* alarm repeat mode bit 1 */
+#define M41T81REG_FLG_AF 0x40 /* alarm flag (read only) */
+#define M41T81REG_FLG_WDF 0x80 /* watchdog flag (read only) */
+#define M41T81REG_SQW_RS0 0x10 /* sqw frequency bit 0 */
+#define M41T81REG_SQW_RS1 0x20 /* sqw frequency bit 1 */
+#define M41T81REG_SQW_RS2 0x40 /* sqw frequency bit 2 */
+#define M41T81REG_SQW_RS3 0x80 /* sqw frequency bit 3 */
+
+
+/*
+ * Register numbers
+ */
+
+#define M41T81REG_TSC 0x00 /* tenths/hundredths of second */
+#define M41T81REG_SC 0x01 /* seconds */
+#define M41T81REG_MN 0x02 /* minute */
+#define M41T81REG_HR 0x03 /* hour/century */
+#define M41T81REG_DY 0x04 /* day of week */
+#define M41T81REG_DT 0x05 /* date of month */
+#define M41T81REG_MO 0x06 /* month */
+#define M41T81REG_YR 0x07 /* year */
+#define M41T81REG_CTL 0x08 /* control */
+#define M41T81REG_WD 0x09 /* watchdog */
+#define M41T81REG_AMO 0x0A /* alarm: month */
+#define M41T81REG_ADT 0x0B /* alarm: date */
+#define M41T81REG_AHR 0x0C /* alarm: hour */
+#define M41T81REG_AMN 0x0D /* alarm: minute */
+#define M41T81REG_ASC 0x0E /* alarm: second */
+#define M41T81REG_FLG 0x0F /* flags */
+#define M41T81REG_SQW 0x13 /* square wave register */
+
+#define M41T81_CCR_ADDRESS 0x68
+
+#define SMB_CSR(reg) IOADDR(A_SMB_REGISTER(1, reg))
+
+static int m41t81_read(uint8_t addr)
+{
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
+
+ __raw_writeq(addr & 0xff, SMB_CSR(R_SMB_CMD));
+ __raw_writeq(V_SMB_ADDR(M41T81_CCR_ADDRESS) | V_SMB_TT_WR1BYTE,
+ SMB_CSR(R_SMB_START));
+
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
+
+ __raw_writeq(V_SMB_ADDR(M41T81_CCR_ADDRESS) | V_SMB_TT_RD1BYTE,
+ SMB_CSR(R_SMB_START));
+
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
+
+ if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) {
+ /* Clear error bit by writing a 1 */
+ __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS));
+ return -1;
+ }
+
+ return __raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff;
+}
+
+static int m41t81_write(uint8_t addr, int b)
+{
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
+
+ __raw_writeq(addr & 0xff, SMB_CSR(R_SMB_CMD));
+ __raw_writeq(b & 0xff, SMB_CSR(R_SMB_DATA));
+ __raw_writeq(V_SMB_ADDR(M41T81_CCR_ADDRESS) | V_SMB_TT_WR2BYTE,
+ SMB_CSR(R_SMB_START));
+
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
+
+ if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) {
+ /* Clear error bit by writing a 1 */
+ __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS));
+ return -1;
+ }
+
+ /* read the same byte again to make sure it is written */
+ __raw_writeq(V_SMB_ADDR(M41T81_CCR_ADDRESS) | V_SMB_TT_RD1BYTE,
+ SMB_CSR(R_SMB_START));
+
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
+
+ return 0;
+}
+
+int m41t81_set_time(time64_t t)
+{
+ struct rtc_time tm;
+ unsigned long flags;
+
+ /* Note we don't care about the century */
+ rtc_time64_to_tm(t, &tm);
+
+ /*
+ * Note the write order matters as it ensures the correctness.
+ * When we write sec, 10th sec is clear. It is reasonable to
+ * believe we should finish writing min within a second.
+ */
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ tm.tm_sec = bin2bcd(tm.tm_sec);
+ m41t81_write(M41T81REG_SC, tm.tm_sec);
+
+ tm.tm_min = bin2bcd(tm.tm_min);
+ m41t81_write(M41T81REG_MN, tm.tm_min);
+
+ tm.tm_hour = bin2bcd(tm.tm_hour);
+ tm.tm_hour = (tm.tm_hour & 0x3f) | (m41t81_read(M41T81REG_HR) & 0xc0);
+ m41t81_write(M41T81REG_HR, tm.tm_hour);
+
+ /* tm_wday starts from 0 to 6 */
+ if (tm.tm_wday == 0) tm.tm_wday = 7;
+ tm.tm_wday = bin2bcd(tm.tm_wday);
+ m41t81_write(M41T81REG_DY, tm.tm_wday);
+
+ tm.tm_mday = bin2bcd(tm.tm_mday);
+ m41t81_write(M41T81REG_DT, tm.tm_mday);
+
+ /* tm_mon starts from 0, *ick* */
+ tm.tm_mon ++;
+ tm.tm_mon = bin2bcd(tm.tm_mon);
+ m41t81_write(M41T81REG_MO, tm.tm_mon);
+
+ /* we don't do century, everything is beyond 2000 */
+ tm.tm_year %= 100;
+ tm.tm_year = bin2bcd(tm.tm_year);
+ m41t81_write(M41T81REG_YR, tm.tm_year);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ return 0;
+}
+
+time64_t m41t81_get_time(void)
+{
+ unsigned int year, mon, day, hour, min, sec;
+ unsigned long flags;
+
+ /*
+ * min is valid if two reads of sec are the same.
+ */
+ for (;;) {
+ spin_lock_irqsave(&rtc_lock, flags);
+ sec = m41t81_read(M41T81REG_SC);
+ min = m41t81_read(M41T81REG_MN);
+ if (sec == m41t81_read(M41T81REG_SC)) break;
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ }
+ hour = m41t81_read(M41T81REG_HR) & 0x3f;
+ day = m41t81_read(M41T81REG_DT);
+ mon = m41t81_read(M41T81REG_MO);
+ year = m41t81_read(M41T81REG_YR);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ sec = bcd2bin(sec);
+ min = bcd2bin(min);
+ hour = bcd2bin(hour);
+ day = bcd2bin(day);
+ mon = bcd2bin(mon);
+ year = bcd2bin(year);
+
+ year += 2000;
+
+ return mktime64(year, mon, day, hour, min, sec);
+}
+
+int m41t81_probe(void)
+{
+ unsigned int tmp;
+
+ /* enable chip if it is not enabled yet */
+ tmp = m41t81_read(M41T81REG_SC);
+ m41t81_write(M41T81REG_SC, tmp & 0x7f);
+
+ return m41t81_read(M41T81REG_SC) != -1;
+}
diff --git a/arch/mips/sibyte/swarm/rtc_xicor1241.c b/arch/mips/sibyte/swarm/rtc_xicor1241.c
new file mode 100644
index 000000000..e2164200c
--- /dev/null
+++ b/arch/mips/sibyte/swarm/rtc_xicor1241.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000, 2001 Broadcom Corporation
+ *
+ * Copyright (C) 2002 MontaVista Software Inc.
+ * Author: jsun@mvista.com or jsun@junsun.net
+ */
+#include <linux/bcd.h>
+#include <linux/types.h>
+#include <linux/time.h>
+
+#include <asm/time.h>
+#include <asm/addrspace.h>
+#include <asm/io.h>
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_smbus.h>
+
+
+/* Xicor 1241 definitions */
+
+/*
+ * Register bits
+ */
+
+#define X1241REG_SR_BAT 0x80 /* currently on battery power */
+#define X1241REG_SR_RWEL 0x04 /* r/w latch is enabled, can write RTC */
+#define X1241REG_SR_WEL 0x02 /* r/w latch is unlocked, can enable r/w now */
+#define X1241REG_SR_RTCF 0x01 /* clock failed */
+#define X1241REG_BL_BP2 0x80 /* block protect 2 */
+#define X1241REG_BL_BP1 0x40 /* block protect 1 */
+#define X1241REG_BL_BP0 0x20 /* block protect 0 */
+#define X1241REG_BL_WD1 0x10
+#define X1241REG_BL_WD0 0x08
+#define X1241REG_HR_MIL 0x80 /* military time format */
+
+/*
+ * Register numbers
+ */
+
+#define X1241REG_BL 0x10 /* block protect bits */
+#define X1241REG_INT 0x11 /* */
+#define X1241REG_SC 0x30 /* Seconds */
+#define X1241REG_MN 0x31 /* Minutes */
+#define X1241REG_HR 0x32 /* Hours */
+#define X1241REG_DT 0x33 /* Day of month */
+#define X1241REG_MO 0x34 /* Month */
+#define X1241REG_YR 0x35 /* Year */
+#define X1241REG_DW 0x36 /* Day of Week */
+#define X1241REG_Y2K 0x37 /* Year 2K */
+#define X1241REG_SR 0x3F /* Status register */
+
+#define X1241_CCR_ADDRESS 0x6F
+
+#define SMB_CSR(reg) IOADDR(A_SMB_REGISTER(1, reg))
+
+static int xicor_read(uint8_t addr)
+{
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
+
+ __raw_writeq((addr >> 8) & 0x7, SMB_CSR(R_SMB_CMD));
+ __raw_writeq(addr & 0xff, SMB_CSR(R_SMB_DATA));
+ __raw_writeq(V_SMB_ADDR(X1241_CCR_ADDRESS) | V_SMB_TT_WR2BYTE,
+ SMB_CSR(R_SMB_START));
+
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
+
+ __raw_writeq(V_SMB_ADDR(X1241_CCR_ADDRESS) | V_SMB_TT_RD1BYTE,
+ SMB_CSR(R_SMB_START));
+
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
+
+ if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) {
+ /* Clear error bit by writing a 1 */
+ __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS));
+ return -1;
+ }
+
+ return __raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff;
+}
+
+static int xicor_write(uint8_t addr, int b)
+{
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
+
+ __raw_writeq(addr, SMB_CSR(R_SMB_CMD));
+ __raw_writeq((addr & 0xff) | ((b & 0xff) << 8), SMB_CSR(R_SMB_DATA));
+ __raw_writeq(V_SMB_ADDR(X1241_CCR_ADDRESS) | V_SMB_TT_WR3BYTE,
+ SMB_CSR(R_SMB_START));
+
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
+
+ if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) {
+ /* Clear error bit by writing a 1 */
+ __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS));
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+int xicor_set_time(time64_t t)
+{
+ struct rtc_time tm;
+ int tmp;
+ unsigned long flags;
+
+ rtc_time64_to_tm(t, &tm);
+ tm.tm_year += 1900;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ /* unlock writes to the CCR */
+ xicor_write(X1241REG_SR, X1241REG_SR_WEL);
+ xicor_write(X1241REG_SR, X1241REG_SR_WEL | X1241REG_SR_RWEL);
+
+ /* trivial ones */
+ tm.tm_sec = bin2bcd(tm.tm_sec);
+ xicor_write(X1241REG_SC, tm.tm_sec);
+
+ tm.tm_min = bin2bcd(tm.tm_min);
+ xicor_write(X1241REG_MN, tm.tm_min);
+
+ tm.tm_mday = bin2bcd(tm.tm_mday);
+ xicor_write(X1241REG_DT, tm.tm_mday);
+
+ /* tm_mon starts from 0, *ick* */
+ tm.tm_mon ++;
+ tm.tm_mon = bin2bcd(tm.tm_mon);
+ xicor_write(X1241REG_MO, tm.tm_mon);
+
+ /* year is split */
+ tmp = tm.tm_year / 100;
+ tm.tm_year %= 100;
+ xicor_write(X1241REG_YR, tm.tm_year);
+ xicor_write(X1241REG_Y2K, tmp);
+
+ /* hour is the most tricky one */
+ tmp = xicor_read(X1241REG_HR);
+ if (tmp & X1241REG_HR_MIL) {
+ /* 24 hour format */
+ tm.tm_hour = bin2bcd(tm.tm_hour);
+ tmp = (tmp & ~0x3f) | (tm.tm_hour & 0x3f);
+ } else {
+ /* 12 hour format, with 0x2 for pm */
+ tmp = tmp & ~0x3f;
+ if (tm.tm_hour >= 12) {
+ tmp |= 0x20;
+ tm.tm_hour -= 12;
+ }
+ tm.tm_hour = bin2bcd(tm.tm_hour);
+ tmp |= tm.tm_hour;
+ }
+ xicor_write(X1241REG_HR, tmp);
+
+ xicor_write(X1241REG_SR, 0);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ return 0;
+}
+
+time64_t xicor_get_time(void)
+{
+ unsigned int year, mon, day, hour, min, sec, y2k;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ sec = xicor_read(X1241REG_SC);
+ min = xicor_read(X1241REG_MN);
+ hour = xicor_read(X1241REG_HR);
+
+ if (hour & X1241REG_HR_MIL) {
+ hour &= 0x3f;
+ } else {
+ if (hour & 0x20)
+ hour = (hour & 0xf) + 0x12;
+ }
+
+ day = xicor_read(X1241REG_DT);
+ mon = xicor_read(X1241REG_MO);
+ year = xicor_read(X1241REG_YR);
+ y2k = xicor_read(X1241REG_Y2K);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ sec = bcd2bin(sec);
+ min = bcd2bin(min);
+ hour = bcd2bin(hour);
+ day = bcd2bin(day);
+ mon = bcd2bin(mon);
+ year = bcd2bin(year);
+ y2k = bcd2bin(y2k);
+
+ year += (y2k * 100);
+
+ return mktime64(year, mon, day, hour, min, sec);
+}
+
+int xicor_probe(void)
+{
+ return xicor_read(X1241REG_SC) != -1;
+}
diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c
new file mode 100644
index 000000000..538a2791b
--- /dev/null
+++ b/arch/mips/sibyte/swarm/setup.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000, 2001, 2002, 2003, 2004 Broadcom Corporation
+ * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
+ */
+
+/*
+ * Setup code for the SWARM board
+ */
+
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/screen_info.h>
+#include <linux/initrd.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/bootinfo.h>
+#include <asm/mipsregs.h>
+#include <asm/reboot.h>
+#include <asm/time.h>
+#include <asm/traps.h>
+#include <asm/sibyte/sb1250.h>
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#include <asm/sibyte/bcm1480_regs.h>
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+#include <asm/sibyte/sb1250_regs.h>
+#else
+#error invalid SiByte board configuration
+#endif
+#include <asm/sibyte/sb1250_genbus.h>
+#include <asm/sibyte/board.h>
+
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+extern void bcm1480_setup(void);
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+extern void sb1250_setup(void);
+#else
+#error invalid SiByte board configuration
+#endif
+
+extern int xicor_probe(void);
+extern int xicor_set_time(time64_t);
+extern time64_t xicor_get_time(void);
+
+extern int m41t81_probe(void);
+extern int m41t81_set_time(time64_t);
+extern time64_t m41t81_get_time(void);
+
+const char *get_system_type(void)
+{
+ return "SiByte " SIBYTE_BOARD_NAME;
+}
+
+int swarm_be_handler(struct pt_regs *regs, int is_fixup)
+{
+ if (!is_fixup && (regs->cp0_cause & 4)) {
+ /* Data bus error - print PA */
+ printk("DBE physical address: %010Lx\n",
+ __read_64bit_c0_register($26, 1));
+ }
+ return is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
+}
+
+enum swarm_rtc_type {
+ RTC_NONE,
+ RTC_XICOR,
+ RTC_M41T81,
+};
+
+enum swarm_rtc_type swarm_rtc_type;
+
+void read_persistent_clock64(struct timespec64 *ts)
+{
+ time64_t sec;
+
+ switch (swarm_rtc_type) {
+ case RTC_XICOR:
+ sec = xicor_get_time();
+ break;
+
+ case RTC_M41T81:
+ sec = m41t81_get_time();
+ break;
+
+ case RTC_NONE:
+ default:
+ sec = mktime64(2000, 1, 1, 0, 0, 0);
+ break;
+ }
+ ts->tv_sec = sec;
+ ts->tv_nsec = 0;
+}
+
+int update_persistent_clock64(struct timespec64 now)
+{
+ time64_t sec = now.tv_sec;
+
+ switch (swarm_rtc_type) {
+ case RTC_XICOR:
+ return xicor_set_time(sec);
+
+ case RTC_M41T81:
+ return m41t81_set_time(sec);
+
+ case RTC_NONE:
+ default:
+ return -1;
+ }
+}
+
+void __init plat_mem_setup(void)
+{
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+ bcm1480_setup();
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+ sb1250_setup();
+#else
+#error invalid SiByte board configuration
+#endif
+
+ board_be_handler = swarm_be_handler;
+
+ if (xicor_probe())
+ swarm_rtc_type = RTC_XICOR;
+ if (m41t81_probe())
+ swarm_rtc_type = RTC_M41T81;
+
+#ifdef CONFIG_VT
+ screen_info = (struct screen_info) {
+ .orig_video_page = 52,
+ .orig_video_mode = 3,
+ .orig_video_cols = 80,
+ .flags = 12,
+ .orig_video_ega_bx = 3,
+ .orig_video_lines = 25,
+ .orig_video_isVGA = 0x22,
+ .orig_video_points = 16,
+ };
+ /* XXXKW for CFE, get lines/cols from environment */
+#endif
+}
+
+#ifdef LEDS_PHYS
+
+#ifdef CONFIG_SIBYTE_CARMEL
+/* XXXKW need to detect Monterey/LittleSur/etc */
+#undef LEDS_PHYS
+#define LEDS_PHYS MLEDS_PHYS
+#endif
+
+void setleds(char *str)
+{
+ void *reg;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ reg = IOADDR(LEDS_PHYS) + 0x20 + ((3 - i) << 3);
+
+ if (!str[i])
+ writeb(' ', reg);
+ else
+ writeb(str[i], reg);
+ }
+}
+
+#endif /* LEDS_PHYS */
diff --git a/arch/mips/sibyte/swarm/swarm-i2c.c b/arch/mips/sibyte/swarm/swarm-i2c.c
new file mode 100644
index 000000000..1ed2dc96d
--- /dev/null
+++ b/arch/mips/sibyte/swarm/swarm-i2c.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Broadcom BCM91250A (SWARM), etc. I2C platform setup.
+ *
+ * Copyright (c) 2008 Maciej W. Rozycki
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+
+static struct i2c_board_info swarm_i2c_info1[] __initdata = {
+ {
+ I2C_BOARD_INFO("m41t81", 0x68),
+ },
+};
+
+static int __init swarm_i2c_init(void)
+{
+ int err;
+
+ err = i2c_register_board_info(1, swarm_i2c_info1,
+ ARRAY_SIZE(swarm_i2c_info1));
+ if (err < 0)
+ printk(KERN_ERR
+ "swarm-i2c: cannot register board I2C devices\n");
+ return err;
+}
+
+arch_initcall(swarm_i2c_init);
diff --git a/arch/mips/sni/Makefile b/arch/mips/sni/Makefile
new file mode 100644
index 000000000..6d97c3e96
--- /dev/null
+++ b/arch/mips/sni/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the SNI specific part of the kernel
+#
+
+obj-y += irq.o reset.o setup.o a20r.o rm200.o pcimt.o pcit.o time.o
+obj-$(CONFIG_EISA) += eisa.o
diff --git a/arch/mips/sni/Platform b/arch/mips/sni/Platform
new file mode 100644
index 000000000..b0b3dde0b
--- /dev/null
+++ b/arch/mips/sni/Platform
@@ -0,0 +1,10 @@
+#
+# SNI RM
+#
+cflags-$(CONFIG_SNI_RM) += -I$(srctree)/arch/mips/include/asm/mach-rm
+ifdef CONFIG_CPU_LITTLE_ENDIAN
+load-$(CONFIG_SNI_RM) += 0xffffffff80600000
+else
+load-$(CONFIG_SNI_RM) += 0xffffffff80030000
+endif
+all-$(CONFIG_SNI_RM) := $(COMPRESSION_FNAME).ecoff
diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c
new file mode 100644
index 000000000..eeeec18c4
--- /dev/null
+++ b/arch/mips/sni/a20r.c
@@ -0,0 +1,256 @@
+/*
+ * A20R specific code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+
+#include <asm/sni.h>
+#include <asm/time.h>
+
+#define PORT(_base,_irq) \
+ { \
+ .iobase = _base, \
+ .irq = _irq, \
+ .uartclk = 1843200, \
+ .iotype = UPIO_PORT, \
+ .flags = UPF_BOOT_AUTOCONF, \
+ }
+
+static struct plat_serial8250_port a20r_data[] = {
+ PORT(0x3f8, 4),
+ PORT(0x2f8, 3),
+ { },
+};
+
+static struct platform_device a20r_serial8250_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = a20r_data,
+ },
+};
+
+static struct resource a20r_ds1216_rsrc[] = {
+ {
+ .start = 0x1c081ffc,
+ .end = 0x1c081fff,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+static struct platform_device a20r_ds1216_device = {
+ .name = "rtc-ds1216",
+ .num_resources = ARRAY_SIZE(a20r_ds1216_rsrc),
+ .resource = a20r_ds1216_rsrc
+};
+
+static struct resource snirm_82596_rsrc[] = {
+ {
+ .start = 0x18000000,
+ .end = 0x18000004,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .start = 0x18010000,
+ .end = 0x18010004,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .start = 0x1ff00000,
+ .end = 0x1ff00020,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .start = 22,
+ .end = 22,
+ .flags = IORESOURCE_IRQ
+ },
+ {
+ .flags = 0x01 /* 16bit mpu port access */
+ }
+};
+
+static struct platform_device snirm_82596_pdev = {
+ .name = "snirm_82596",
+ .num_resources = ARRAY_SIZE(snirm_82596_rsrc),
+ .resource = snirm_82596_rsrc
+};
+
+static struct resource snirm_53c710_rsrc[] = {
+ {
+ .start = 0x19000000,
+ .end = 0x190fffff,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .start = 19,
+ .end = 19,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct platform_device snirm_53c710_pdev = {
+ .name = "snirm_53c710",
+ .num_resources = ARRAY_SIZE(snirm_53c710_rsrc),
+ .resource = snirm_53c710_rsrc
+};
+
+static struct resource sc26xx_rsrc[] = {
+ {
+ .start = 0x1c070000,
+ .end = 0x1c0700ff,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .start = 20,
+ .end = 20,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+#include <linux/platform_data/serial-sccnxp.h>
+
+static struct sccnxp_pdata sccnxp_data = {
+ .reg_shift = 2,
+ .mctrl_cfg[0] = MCTRL_SIG(DTR_OP, LINE_OP7) |
+ MCTRL_SIG(RTS_OP, LINE_OP3) |
+ MCTRL_SIG(DSR_IP, LINE_IP5) |
+ MCTRL_SIG(DCD_IP, LINE_IP6),
+ .mctrl_cfg[1] = MCTRL_SIG(DTR_OP, LINE_OP2) |
+ MCTRL_SIG(RTS_OP, LINE_OP1) |
+ MCTRL_SIG(DSR_IP, LINE_IP0) |
+ MCTRL_SIG(CTS_IP, LINE_IP1) |
+ MCTRL_SIG(DCD_IP, LINE_IP2) |
+ MCTRL_SIG(RNG_IP, LINE_IP3),
+};
+
+static struct platform_device sc26xx_pdev = {
+ .name = "sc2681",
+ .resource = sc26xx_rsrc,
+ .num_resources = ARRAY_SIZE(sc26xx_rsrc),
+ .dev = {
+ .platform_data = &sccnxp_data,
+ },
+};
+
+/*
+ * Trigger chipset to update CPU's CAUSE IP field
+ */
+static u32 a20r_update_cause_ip(void)
+{
+ u32 status = read_c0_status();
+
+ write_c0_status(status | 0x00010000);
+ asm volatile(
+ " .set push \n"
+ " .set noat \n"
+ " .set noreorder \n"
+ " lw $1, 0(%0) \n"
+ " sb $0, 0(%1) \n"
+ " sync \n"
+ " lb %1, 0(%1) \n"
+ " b 1f \n"
+ " ori %1, $1, 2 \n"
+ " .align 8 \n"
+ "1: \n"
+ " nop \n"
+ " sw %1, 0(%0) \n"
+ " sync \n"
+ " li %1, 0x20 \n"
+ "2: \n"
+ " nop \n"
+ " bnez %1,2b \n"
+ " addiu %1, -1 \n"
+ " sw $1, 0(%0) \n"
+ " sync \n"
+ ".set pop \n"
+ :
+ : "Jr" (PCIMT_UCONF), "Jr" (0xbc000000));
+ write_c0_status(status);
+
+ return status;
+}
+
+static inline void unmask_a20r_irq(struct irq_data *d)
+{
+ set_c0_status(0x100 << (d->irq - SNI_A20R_IRQ_BASE));
+ irq_enable_hazard();
+}
+
+static inline void mask_a20r_irq(struct irq_data *d)
+{
+ clear_c0_status(0x100 << (d->irq - SNI_A20R_IRQ_BASE));
+ irq_disable_hazard();
+}
+
+static struct irq_chip a20r_irq_type = {
+ .name = "A20R",
+ .irq_mask = mask_a20r_irq,
+ .irq_unmask = unmask_a20r_irq,
+};
+
+/*
+ * hwint 0 receive all interrupts
+ */
+static void a20r_hwint(void)
+{
+ u32 cause, status;
+ int irq;
+
+ clear_c0_status(IE_IRQ0);
+ status = a20r_update_cause_ip();
+ cause = read_c0_cause();
+
+ irq = ffs(((cause & status) >> 8) & 0xf8);
+ if (likely(irq > 0))
+ do_IRQ(SNI_A20R_IRQ_BASE + irq - 1);
+
+ a20r_update_cause_ip();
+ set_c0_status(IE_IRQ0);
+}
+
+void __init sni_a20r_irq_init(void)
+{
+ int i;
+
+ for (i = SNI_A20R_IRQ_BASE + 2 ; i < SNI_A20R_IRQ_BASE + 8; i++)
+ irq_set_chip_and_handler(i, &a20r_irq_type, handle_level_irq);
+ sni_hwint = a20r_hwint;
+ change_c0_status(ST0_IM, IE_IRQ0);
+ if (request_irq(SNI_A20R_IRQ_BASE + 3, sni_isa_irq_handler,
+ IRQF_SHARED, "ISA", sni_isa_irq_handler))
+ pr_err("Failed to register ISA interrupt\n");
+}
+
+void sni_a20r_init(void)
+{
+ /* FIXME, remove if not needed */
+}
+
+static int __init snirm_a20r_setup_devinit(void)
+{
+ switch (sni_brd_type) {
+ case SNI_BRD_TOWER_OASIC:
+ case SNI_BRD_MINITOWER:
+ platform_device_register(&snirm_82596_pdev);
+ platform_device_register(&snirm_53c710_pdev);
+ platform_device_register(&sc26xx_pdev);
+ platform_device_register(&a20r_serial8250_device);
+ platform_device_register(&a20r_ds1216_device);
+ sni_eisa_root_init();
+ break;
+ }
+ return 0;
+}
+
+device_initcall(snirm_a20r_setup_devinit);
diff --git a/arch/mips/sni/eisa.c b/arch/mips/sni/eisa.c
new file mode 100644
index 000000000..483a43d07
--- /dev/null
+++ b/arch/mips/sni/eisa.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Virtual EISA root driver.
+ * Acts as a placeholder if we don't have a proper EISA bridge.
+ *
+ * (C) 2003 Marc Zyngier <maz@wild-wind.fr.eu.org>
+ * modified for SNI usage by Thomas Bogendoerfer
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/eisa.h>
+#include <linux/init.h>
+
+/* The default EISA device parent (virtual root device).
+ * Now use a platform device, since that's the obvious choice. */
+
+static struct platform_device eisa_root_dev = {
+ .name = "eisa",
+ .id = 0,
+};
+
+static struct eisa_root_device eisa_bus_root = {
+ .dev = &eisa_root_dev.dev,
+ .bus_base_addr = 0,
+ .res = &ioport_resource,
+ .slots = EISA_MAX_SLOTS,
+ .dma_mask = 0xffffffff,
+ .force_probe = 1,
+};
+
+int __init sni_eisa_root_init(void)
+{
+ int r;
+
+ r = platform_device_register(&eisa_root_dev);
+ if (!r)
+ return r;
+
+ dev_set_drvdata(&eisa_root_dev.dev, &eisa_bus_root);
+
+ if (eisa_root_register(&eisa_bus_root)) {
+ /* A real bridge may have been registered before
+ * us. So quietly unregister. */
+ platform_device_unregister(&eisa_root_dev);
+ return -1;
+ }
+ return 0;
+}
diff --git a/arch/mips/sni/irq.c b/arch/mips/sni/irq.c
new file mode 100644
index 000000000..dec89afc9
--- /dev/null
+++ b/arch/mips/sni/irq.c
@@ -0,0 +1,76 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ * Copyright (C) 2006 Thomas Bogendoerfer
+ */
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+
+#include <asm/i8259.h>
+#include <asm/io.h>
+#include <asm/sni.h>
+#include <asm/irq.h>
+#include <asm/irq_cpu.h>
+
+void (*sni_hwint)(void);
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ sni_hwint();
+}
+
+/* ISA irq handler */
+irqreturn_t sni_isa_irq_handler(int dummy, void *p)
+{
+ int irq;
+
+ irq = i8259_irq();
+ if (unlikely(irq < 0))
+ return IRQ_NONE;
+
+ generic_handle_irq(irq);
+ return IRQ_HANDLED;
+}
+
+/*
+ * On systems with i8259-style interrupt controllers we assume for
+ * driver compatibility reasons interrupts 0 - 15 to be the i8295
+ * interrupts even if the hardware uses a different interrupt numbering.
+ */
+void __init arch_init_irq(void)
+{
+ init_i8259_irqs(); /* Integrated i8259 */
+ switch (sni_brd_type) {
+ case SNI_BRD_10:
+ case SNI_BRD_10NEW:
+ case SNI_BRD_TOWER_OASIC:
+ case SNI_BRD_MINITOWER:
+ sni_a20r_irq_init();
+ break;
+
+ case SNI_BRD_PCI_TOWER:
+ sni_pcit_irq_init();
+ break;
+
+ case SNI_BRD_PCI_TOWER_CPLUS:
+ sni_pcit_cplus_irq_init();
+ break;
+
+ case SNI_BRD_RM200:
+ sni_rm200_irq_init();
+ break;
+
+ case SNI_BRD_PCI_MTOWER:
+ case SNI_BRD_PCI_DESKTOP:
+ case SNI_BRD_PCI_MTOWER_CPLUS:
+ sni_pcimt_irq_init();
+ break;
+ }
+}
diff --git a/arch/mips/sni/pcimt.c b/arch/mips/sni/pcimt.c
new file mode 100644
index 000000000..12336c2a6
--- /dev/null
+++ b/arch/mips/sni/pcimt.c
@@ -0,0 +1,332 @@
+/*
+ * PCIMT specific code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 97, 98, 2000, 03, 04, 06 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2006,2007 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/serial_8250.h>
+
+#include <asm/sni.h>
+#include <asm/time.h>
+#include <asm/i8259.h>
+#include <asm/irq_cpu.h>
+
+#define cacheconf (*(volatile unsigned int *)PCIMT_CACHECONF)
+#define invspace (*(volatile unsigned int *)PCIMT_INVSPACE)
+
+static void __init sni_pcimt_sc_init(void)
+{
+ unsigned int scsiz, sc_size;
+
+ scsiz = cacheconf & 7;
+ if (scsiz == 0) {
+ printk("Second level cache is deactivated.\n");
+ return;
+ }
+ if (scsiz >= 6) {
+ printk("Invalid second level cache size configured, "
+ "deactivating second level cache.\n");
+ cacheconf = 0;
+ return;
+ }
+
+ sc_size = 128 << scsiz;
+ printk("%dkb second level cache detected, deactivating.\n", sc_size);
+ cacheconf = 0;
+}
+
+
+/*
+ * A bit more gossip about the iron we're running on ...
+ */
+static inline void sni_pcimt_detect(void)
+{
+ char boardtype[80];
+ unsigned char csmsr;
+ char *p = boardtype;
+ unsigned int asic;
+
+ csmsr = *(volatile unsigned char *)PCIMT_CSMSR;
+
+ p += sprintf(p, "%s PCI", (csmsr & 0x80) ? "RM200" : "RM300");
+ if ((csmsr & 0x80) == 0)
+ p += sprintf(p, ", board revision %s",
+ (csmsr & 0x20) ? "D" : "C");
+ asic = csmsr & 0x80;
+ asic = (csmsr & 0x08) ? asic : !asic;
+ p += sprintf(p, ", ASIC PCI Rev %s", asic ? "1.0" : "1.1");
+ printk("%s.\n", boardtype);
+}
+
+#define PORT(_base,_irq) \
+ { \
+ .iobase = _base, \
+ .irq = _irq, \
+ .uartclk = 1843200, \
+ .iotype = UPIO_PORT, \
+ .flags = UPF_BOOT_AUTOCONF, \
+ }
+
+static struct plat_serial8250_port pcimt_data[] = {
+ PORT(0x3f8, 4),
+ PORT(0x2f8, 3),
+ { },
+};
+
+static struct platform_device pcimt_serial8250_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = pcimt_data,
+ },
+};
+
+static struct resource pcimt_cmos_rsrc[] = {
+ {
+ .start = 0x70,
+ .end = 0x71,
+ .flags = IORESOURCE_IO
+ },
+ {
+ .start = 8,
+ .end = 8,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct platform_device pcimt_cmos_device = {
+ .name = "rtc_cmos",
+ .num_resources = ARRAY_SIZE(pcimt_cmos_rsrc),
+ .resource = pcimt_cmos_rsrc
+};
+
+
+static struct resource sni_io_resource = {
+ .start = 0x00000000UL,
+ .end = 0x03bfffffUL,
+ .name = "PCIMT IO MEM",
+ .flags = IORESOURCE_IO,
+};
+
+static struct resource pcimt_io_resources[] = {
+ {
+ .start = 0x00,
+ .end = 0x1f,
+ .name = "dma1",
+ .flags = IORESOURCE_BUSY
+ }, {
+ .start = 0x40,
+ .end = 0x5f,
+ .name = "timer",
+ .flags = IORESOURCE_BUSY
+ }, {
+ .start = 0x60,
+ .end = 0x6f,
+ .name = "keyboard",
+ .flags = IORESOURCE_BUSY
+ }, {
+ .start = 0x80,
+ .end = 0x8f,
+ .name = "dma page reg",
+ .flags = IORESOURCE_BUSY
+ }, {
+ .start = 0xc0,
+ .end = 0xdf,
+ .name = "dma2",
+ .flags = IORESOURCE_BUSY
+ }, {
+ .start = 0xcfc,
+ .end = 0xcff,
+ .name = "PCI config data",
+ .flags = IORESOURCE_BUSY
+ }
+};
+
+static struct resource pcimt_mem_resources[] = {
+ {
+ /*
+ * this region should only be 4 bytes long,
+ * but it's 16MB on all RM300C I've checked
+ */
+ .start = 0x1a000000,
+ .end = 0x1affffff,
+ .name = "PCI INT ACK",
+ .flags = IORESOURCE_BUSY
+ }
+};
+
+static struct resource sni_mem_resource = {
+ .start = 0x18000000UL,
+ .end = 0x1fbfffffUL,
+ .name = "PCIMT PCI MEM",
+ .flags = IORESOURCE_MEM
+};
+
+static void __init sni_pcimt_resource_init(void)
+{
+ int i;
+
+ /* request I/O space for devices used on all i[345]86 PCs */
+ for (i = 0; i < ARRAY_SIZE(pcimt_io_resources); i++)
+ request_resource(&sni_io_resource, pcimt_io_resources + i);
+ /* request MEM space for devices used on all i[345]86 PCs */
+ for (i = 0; i < ARRAY_SIZE(pcimt_mem_resources); i++)
+ request_resource(&sni_mem_resource, pcimt_mem_resources + i);
+}
+
+extern struct pci_ops sni_pcimt_ops;
+
+#ifdef CONFIG_PCI
+static struct pci_controller sni_controller = {
+ .pci_ops = &sni_pcimt_ops,
+ .mem_resource = &sni_mem_resource,
+ .mem_offset = 0x00000000UL,
+ .io_resource = &sni_io_resource,
+ .io_offset = 0x00000000UL,
+ .io_map_base = SNI_PORT_BASE
+};
+#endif
+
+static void enable_pcimt_irq(struct irq_data *d)
+{
+ unsigned int mask = 1 << (d->irq - PCIMT_IRQ_INT2);
+
+ *(volatile u8 *) PCIMT_IRQSEL |= mask;
+}
+
+void disable_pcimt_irq(struct irq_data *d)
+{
+ unsigned int mask = ~(1 << (d->irq - PCIMT_IRQ_INT2));
+
+ *(volatile u8 *) PCIMT_IRQSEL &= mask;
+}
+
+static struct irq_chip pcimt_irq_type = {
+ .name = "PCIMT",
+ .irq_mask = disable_pcimt_irq,
+ .irq_unmask = enable_pcimt_irq,
+};
+
+/*
+ * hwint0 should deal with MP agent, ASIC PCI, EISA NMI and debug
+ * button interrupts. Later ...
+ */
+static void pcimt_hwint0(void)
+{
+ panic("Received int0 but no handler yet ...");
+}
+
+/*
+ * hwint 1 deals with EISA and SCSI interrupts,
+ *
+ * The EISA_INT bit in CSITPEND is high active, all others are low active.
+ */
+static void pcimt_hwint1(void)
+{
+ u8 pend = *(volatile char *)PCIMT_CSITPEND;
+ unsigned long flags;
+
+ if (pend & IT_EISA) {
+ int irq;
+ /*
+ * Note: ASIC PCI's builtin interrupt acknowledge feature is
+ * broken. Using it may result in loss of some or all i8259
+ * interrupts, so don't use PCIMT_INT_ACKNOWLEDGE ...
+ */
+ irq = i8259_irq();
+ if (unlikely(irq < 0))
+ return;
+
+ do_IRQ(irq);
+ }
+
+ if (!(pend & IT_SCSI)) {
+ flags = read_c0_status();
+ clear_c0_status(ST0_IM);
+ do_IRQ(PCIMT_IRQ_SCSI);
+ write_c0_status(flags);
+ }
+}
+
+/*
+ * hwint 3 should deal with the PCI A - D interrupts,
+ */
+static void pcimt_hwint3(void)
+{
+ u8 pend = *(volatile char *)PCIMT_CSITPEND;
+ int irq;
+
+ pend &= (IT_INTA | IT_INTB | IT_INTC | IT_INTD);
+ pend ^= (IT_INTA | IT_INTB | IT_INTC | IT_INTD);
+ clear_c0_status(IE_IRQ3);
+ irq = PCIMT_IRQ_INT2 + ffs(pend) - 1;
+ do_IRQ(irq);
+ set_c0_status(IE_IRQ3);
+}
+
+static void sni_pcimt_hwint(void)
+{
+ u32 pending = read_c0_cause() & read_c0_status();
+
+ if (pending & C_IRQ5)
+ do_IRQ(MIPS_CPU_IRQ_BASE + 7);
+ else if (pending & C_IRQ4)
+ do_IRQ(MIPS_CPU_IRQ_BASE + 6);
+ else if (pending & C_IRQ3)
+ pcimt_hwint3();
+ else if (pending & C_IRQ1)
+ pcimt_hwint1();
+ else if (pending & C_IRQ0) {
+ pcimt_hwint0();
+ }
+}
+
+void __init sni_pcimt_irq_init(void)
+{
+ int i;
+
+ *(volatile u8 *) PCIMT_IRQSEL = IT_ETH | IT_EISA;
+ mips_cpu_irq_init();
+ /* Actually we've got more interrupts to handle ... */
+ for (i = PCIMT_IRQ_INT2; i <= PCIMT_IRQ_SCSI; i++)
+ irq_set_chip_and_handler(i, &pcimt_irq_type, handle_level_irq);
+ sni_hwint = sni_pcimt_hwint;
+ change_c0_status(ST0_IM, IE_IRQ1|IE_IRQ3);
+}
+
+void __init sni_pcimt_init(void)
+{
+ sni_pcimt_detect();
+ sni_pcimt_sc_init();
+ ioport_resource.end = sni_io_resource.end;
+#ifdef CONFIG_PCI
+ PCIBIOS_MIN_IO = 0x9000;
+ register_pci_controller(&sni_controller);
+#endif
+ sni_pcimt_resource_init();
+}
+
+static int __init snirm_pcimt_setup_devinit(void)
+{
+ switch (sni_brd_type) {
+ case SNI_BRD_PCI_MTOWER:
+ case SNI_BRD_PCI_DESKTOP:
+ case SNI_BRD_PCI_MTOWER_CPLUS:
+ platform_device_register(&pcimt_serial8250_device);
+ platform_device_register(&pcimt_cmos_device);
+ break;
+ }
+
+ return 0;
+}
+
+device_initcall(snirm_pcimt_setup_devinit);
diff --git a/arch/mips/sni/pcit.c b/arch/mips/sni/pcit.c
new file mode 100644
index 000000000..b331fe22c
--- /dev/null
+++ b/arch/mips/sni/pcit.c
@@ -0,0 +1,295 @@
+/*
+ * PCI Tower specific code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/serial_8250.h>
+
+#include <asm/sni.h>
+#include <asm/time.h>
+#include <asm/irq_cpu.h>
+
+
+#define PORT(_base,_irq) \
+ { \
+ .iobase = _base, \
+ .irq = _irq, \
+ .uartclk = 1843200, \
+ .iotype = UPIO_PORT, \
+ .flags = UPF_BOOT_AUTOCONF, \
+ }
+
+static struct plat_serial8250_port pcit_data[] = {
+ PORT(0x3f8, 0),
+ PORT(0x2f8, 3),
+ { },
+};
+
+static struct platform_device pcit_serial8250_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = pcit_data,
+ },
+};
+
+static struct plat_serial8250_port pcit_cplus_data[] = {
+ PORT(0x3f8, 0),
+ PORT(0x2f8, 3),
+ PORT(0x3e8, 4),
+ PORT(0x2e8, 3),
+ { },
+};
+
+static struct platform_device pcit_cplus_serial8250_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = pcit_cplus_data,
+ },
+};
+
+static struct resource pcit_cmos_rsrc[] = {
+ {
+ .start = 0x70,
+ .end = 0x71,
+ .flags = IORESOURCE_IO
+ },
+ {
+ .start = 8,
+ .end = 8,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct platform_device pcit_cmos_device = {
+ .name = "rtc_cmos",
+ .num_resources = ARRAY_SIZE(pcit_cmos_rsrc),
+ .resource = pcit_cmos_rsrc
+};
+
+static struct platform_device pcit_pcspeaker_pdev = {
+ .name = "pcspkr",
+ .id = -1,
+};
+
+static struct resource sni_io_resource = {
+ .start = 0x00000000UL,
+ .end = 0x03bfffffUL,
+ .name = "PCIT IO",
+ .flags = IORESOURCE_IO,
+};
+
+static struct resource pcit_io_resources[] = {
+ {
+ .start = 0x00,
+ .end = 0x1f,
+ .name = "dma1",
+ .flags = IORESOURCE_BUSY
+ }, {
+ .start = 0x40,
+ .end = 0x5f,
+ .name = "timer",
+ .flags = IORESOURCE_BUSY
+ }, {
+ .start = 0x60,
+ .end = 0x6f,
+ .name = "keyboard",
+ .flags = IORESOURCE_BUSY
+ }, {
+ .start = 0x80,
+ .end = 0x8f,
+ .name = "dma page reg",
+ .flags = IORESOURCE_BUSY
+ }, {
+ .start = 0xc0,
+ .end = 0xdf,
+ .name = "dma2",
+ .flags = IORESOURCE_BUSY
+ }, {
+ .start = 0xcf8,
+ .end = 0xcfb,
+ .name = "PCI config addr",
+ .flags = IORESOURCE_BUSY
+ }, {
+ .start = 0xcfc,
+ .end = 0xcff,
+ .name = "PCI config data",
+ .flags = IORESOURCE_BUSY
+ }
+};
+
+static void __init sni_pcit_resource_init(void)
+{
+ int i;
+
+ /* request I/O space for devices used on all i[345]86 PCs */
+ for (i = 0; i < ARRAY_SIZE(pcit_io_resources); i++)
+ request_resource(&sni_io_resource, pcit_io_resources + i);
+}
+
+
+extern struct pci_ops sni_pcit_ops;
+
+#ifdef CONFIG_PCI
+static struct resource sni_mem_resource = {
+ .start = 0x18000000UL,
+ .end = 0x1fbfffffUL,
+ .name = "PCIT PCI MEM",
+ .flags = IORESOURCE_MEM
+};
+
+static struct pci_controller sni_pcit_controller = {
+ .pci_ops = &sni_pcit_ops,
+ .mem_resource = &sni_mem_resource,
+ .mem_offset = 0x00000000UL,
+ .io_resource = &sni_io_resource,
+ .io_offset = 0x00000000UL,
+ .io_map_base = SNI_PORT_BASE
+};
+#endif /* CONFIG_PCI */
+
+static void enable_pcit_irq(struct irq_data *d)
+{
+ u32 mask = 1 << (d->irq - SNI_PCIT_INT_START + 24);
+
+ *(volatile u32 *)SNI_PCIT_INT_REG |= mask;
+}
+
+void disable_pcit_irq(struct irq_data *d)
+{
+ u32 mask = 1 << (d->irq - SNI_PCIT_INT_START + 24);
+
+ *(volatile u32 *)SNI_PCIT_INT_REG &= ~mask;
+}
+
+static struct irq_chip pcit_irq_type = {
+ .name = "PCIT",
+ .irq_mask = disable_pcit_irq,
+ .irq_unmask = enable_pcit_irq,
+};
+
+static void pcit_hwint1(void)
+{
+ u32 pending = *(volatile u32 *)SNI_PCIT_INT_REG;
+ int irq;
+
+ clear_c0_status(IE_IRQ1);
+ irq = ffs((pending >> 16) & 0x7f);
+
+ if (likely(irq > 0))
+ do_IRQ(irq + SNI_PCIT_INT_START - 1);
+ set_c0_status(IE_IRQ1);
+}
+
+static void pcit_hwint0(void)
+{
+ u32 pending = *(volatile u32 *)SNI_PCIT_INT_REG;
+ int irq;
+
+ clear_c0_status(IE_IRQ0);
+ irq = ffs((pending >> 16) & 0x3f);
+
+ if (likely(irq > 0))
+ do_IRQ(irq + SNI_PCIT_INT_START - 1);
+ set_c0_status(IE_IRQ0);
+}
+
+static void sni_pcit_hwint(void)
+{
+ u32 pending = read_c0_cause() & read_c0_status();
+
+ if (pending & C_IRQ1)
+ pcit_hwint1();
+ else if (pending & C_IRQ2)
+ do_IRQ(MIPS_CPU_IRQ_BASE + 4);
+ else if (pending & C_IRQ3)
+ do_IRQ(MIPS_CPU_IRQ_BASE + 5);
+ else if (pending & C_IRQ5)
+ do_IRQ(MIPS_CPU_IRQ_BASE + 7);
+}
+
+static void sni_pcit_hwint_cplus(void)
+{
+ u32 pending = read_c0_cause() & read_c0_status();
+
+ if (pending & C_IRQ0)
+ pcit_hwint0();
+ else if (pending & C_IRQ1)
+ do_IRQ(MIPS_CPU_IRQ_BASE + 3);
+ else if (pending & C_IRQ2)
+ do_IRQ(MIPS_CPU_IRQ_BASE + 4);
+ else if (pending & C_IRQ3)
+ do_IRQ(MIPS_CPU_IRQ_BASE + 5);
+ else if (pending & C_IRQ5)
+ do_IRQ(MIPS_CPU_IRQ_BASE + 7);
+}
+
+void __init sni_pcit_irq_init(void)
+{
+ int i;
+
+ mips_cpu_irq_init();
+ for (i = SNI_PCIT_INT_START; i <= SNI_PCIT_INT_END; i++)
+ irq_set_chip_and_handler(i, &pcit_irq_type, handle_level_irq);
+ *(volatile u32 *)SNI_PCIT_INT_REG = 0;
+ sni_hwint = sni_pcit_hwint;
+ change_c0_status(ST0_IM, IE_IRQ1);
+ if (request_irq(SNI_PCIT_INT_START + 6, sni_isa_irq_handler, 0, "ISA",
+ NULL))
+ pr_err("Failed to register ISA interrupt\n");
+}
+
+void __init sni_pcit_cplus_irq_init(void)
+{
+ int i;
+
+ mips_cpu_irq_init();
+ for (i = SNI_PCIT_INT_START; i <= SNI_PCIT_INT_END; i++)
+ irq_set_chip_and_handler(i, &pcit_irq_type, handle_level_irq);
+ *(volatile u32 *)SNI_PCIT_INT_REG = 0x40000000;
+ sni_hwint = sni_pcit_hwint_cplus;
+ change_c0_status(ST0_IM, IE_IRQ0);
+ if (request_irq(MIPS_CPU_IRQ_BASE + 3, sni_isa_irq_handler, 0, "ISA",
+ NULL))
+ pr_err("Failed to register ISA interrupt\n");
+}
+
+void __init sni_pcit_init(void)
+{
+ ioport_resource.end = sni_io_resource.end;
+#ifdef CONFIG_PCI
+ PCIBIOS_MIN_IO = 0x9000;
+ register_pci_controller(&sni_pcit_controller);
+#endif
+ sni_pcit_resource_init();
+}
+
+static int __init snirm_pcit_setup_devinit(void)
+{
+ switch (sni_brd_type) {
+ case SNI_BRD_PCI_TOWER:
+ platform_device_register(&pcit_serial8250_device);
+ platform_device_register(&pcit_cmos_device);
+ platform_device_register(&pcit_pcspeaker_pdev);
+ break;
+
+ case SNI_BRD_PCI_TOWER_CPLUS:
+ platform_device_register(&pcit_cplus_serial8250_device);
+ platform_device_register(&pcit_cmos_device);
+ platform_device_register(&pcit_pcspeaker_pdev);
+ break;
+ }
+ return 0;
+}
+
+device_initcall(snirm_pcit_setup_devinit);
diff --git a/arch/mips/sni/reset.c b/arch/mips/sni/reset.c
new file mode 100644
index 000000000..66f963d8d
--- /dev/null
+++ b/arch/mips/sni/reset.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/arch/mips/sni/process.c
+ *
+ * Reset a SNI machine.
+ */
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/reboot.h>
+#include <asm/sni.h>
+
+/*
+ * This routine reboots the machine by asking the keyboard
+ * controller to pulse the reset-line low. We try that for a while,
+ * and if it doesn't work, we do some other stupid things.
+ */
+static inline void kb_wait(void)
+{
+ int i;
+
+ for (i = 0; i < 0x10000; i++)
+ if ((inb_p(0x64) & 0x02) == 0)
+ break;
+}
+
+/* XXX This ends up at the ARC firmware prompt ... */
+void sni_machine_restart(char *command)
+{
+ int i;
+
+ /* This does a normal via the keyboard controller like a PC.
+ We can do that easier ... */
+ local_irq_disable();
+ for (;;) {
+ for (i = 0; i < 100; i++) {
+ kb_wait();
+ udelay(50);
+ outb_p(0xfe, 0x64); /* pulse reset low */
+ udelay(50);
+ }
+ }
+}
+
+void sni_machine_power_off(void)
+{
+ *(volatile unsigned char *)PCIMT_CSWCSM = 0xfd;
+}
diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
new file mode 100644
index 000000000..d84744ca8
--- /dev/null
+++ b/arch/mips/sni/rm200.c
@@ -0,0 +1,485 @@
+/*
+ * RM200 specific code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006,2007 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ *
+ * i8259 parts ripped out of arch/mips/kernel/i8259.c
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+#include <linux/io.h>
+
+#include <asm/sni.h>
+#include <asm/time.h>
+#include <asm/irq_cpu.h>
+
+#define RM200_I8259A_IRQ_BASE 32
+
+#define MEMPORT(_base,_irq) \
+ { \
+ .mapbase = _base, \
+ .irq = _irq, \
+ .uartclk = 1843200, \
+ .iotype = UPIO_MEM, \
+ .flags = UPF_BOOT_AUTOCONF|UPF_IOREMAP, \
+ }
+
+static struct plat_serial8250_port rm200_data[] = {
+ MEMPORT(0x160003f8, RM200_I8259A_IRQ_BASE + 4),
+ MEMPORT(0x160002f8, RM200_I8259A_IRQ_BASE + 3),
+ { },
+};
+
+static struct platform_device rm200_serial8250_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = rm200_data,
+ },
+};
+
+static struct resource rm200_ds1216_rsrc[] = {
+ {
+ .start = 0x1cd41ffc,
+ .end = 0x1cd41fff,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+static struct platform_device rm200_ds1216_device = {
+ .name = "rtc-ds1216",
+ .num_resources = ARRAY_SIZE(rm200_ds1216_rsrc),
+ .resource = rm200_ds1216_rsrc
+};
+
+static struct resource snirm_82596_rm200_rsrc[] = {
+ {
+ .start = 0x18000000,
+ .end = 0x180fffff,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .start = 0x1b000000,
+ .end = 0x1b000004,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .start = 0x1ff00000,
+ .end = 0x1ff00020,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .start = 27,
+ .end = 27,
+ .flags = IORESOURCE_IRQ
+ },
+ {
+ .flags = 0x00
+ }
+};
+
+static struct platform_device snirm_82596_rm200_pdev = {
+ .name = "snirm_82596",
+ .num_resources = ARRAY_SIZE(snirm_82596_rm200_rsrc),
+ .resource = snirm_82596_rm200_rsrc
+};
+
+static struct resource snirm_53c710_rm200_rsrc[] = {
+ {
+ .start = 0x19000000,
+ .end = 0x190fffff,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .start = 26,
+ .end = 26,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct platform_device snirm_53c710_rm200_pdev = {
+ .name = "snirm_53c710",
+ .num_resources = ARRAY_SIZE(snirm_53c710_rm200_rsrc),
+ .resource = snirm_53c710_rm200_rsrc
+};
+
+static int __init snirm_setup_devinit(void)
+{
+ if (sni_brd_type == SNI_BRD_RM200) {
+ platform_device_register(&rm200_serial8250_device);
+ platform_device_register(&rm200_ds1216_device);
+ platform_device_register(&snirm_82596_rm200_pdev);
+ platform_device_register(&snirm_53c710_rm200_pdev);
+ sni_eisa_root_init();
+ }
+ return 0;
+}
+
+device_initcall(snirm_setup_devinit);
+
+/*
+ * RM200 has an ISA and an EISA bus. The iSA bus is only used
+ * for onboard devices and also has twi i8259 PICs. Since these
+ * PICs are no accessible via inb/outb the following code uses
+ * readb/writeb to access them
+ */
+
+static DEFINE_RAW_SPINLOCK(sni_rm200_i8259A_lock);
+#define PIC_CMD 0x00
+#define PIC_IMR 0x01
+#define PIC_ISR PIC_CMD
+#define PIC_POLL PIC_ISR
+#define PIC_OCW3 PIC_ISR
+
+/* i8259A PIC related value */
+#define PIC_CASCADE_IR 2
+#define MASTER_ICW4_DEFAULT 0x01
+#define SLAVE_ICW4_DEFAULT 0x01
+
+/*
+ * This contains the irq mask for both 8259A irq controllers,
+ */
+static unsigned int rm200_cached_irq_mask = 0xffff;
+static __iomem u8 *rm200_pic_master;
+static __iomem u8 *rm200_pic_slave;
+
+#define cached_master_mask (rm200_cached_irq_mask)
+#define cached_slave_mask (rm200_cached_irq_mask >> 8)
+
+static void sni_rm200_disable_8259A_irq(struct irq_data *d)
+{
+ unsigned int mask, irq = d->irq - RM200_I8259A_IRQ_BASE;
+ unsigned long flags;
+
+ mask = 1 << irq;
+ raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags);
+ rm200_cached_irq_mask |= mask;
+ if (irq & 8)
+ writeb(cached_slave_mask, rm200_pic_slave + PIC_IMR);
+ else
+ writeb(cached_master_mask, rm200_pic_master + PIC_IMR);
+ raw_spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags);
+}
+
+static void sni_rm200_enable_8259A_irq(struct irq_data *d)
+{
+ unsigned int mask, irq = d->irq - RM200_I8259A_IRQ_BASE;
+ unsigned long flags;
+
+ mask = ~(1 << irq);
+ raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags);
+ rm200_cached_irq_mask &= mask;
+ if (irq & 8)
+ writeb(cached_slave_mask, rm200_pic_slave + PIC_IMR);
+ else
+ writeb(cached_master_mask, rm200_pic_master + PIC_IMR);
+ raw_spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags);
+}
+
+static inline int sni_rm200_i8259A_irq_real(unsigned int irq)
+{
+ int value;
+ int irqmask = 1 << irq;
+
+ if (irq < 8) {
+ writeb(0x0B, rm200_pic_master + PIC_CMD);
+ value = readb(rm200_pic_master + PIC_CMD) & irqmask;
+ writeb(0x0A, rm200_pic_master + PIC_CMD);
+ return value;
+ }
+ writeb(0x0B, rm200_pic_slave + PIC_CMD); /* ISR register */
+ value = readb(rm200_pic_slave + PIC_CMD) & (irqmask >> 8);
+ writeb(0x0A, rm200_pic_slave + PIC_CMD);
+ return value;
+}
+
+/*
+ * Careful! The 8259A is a fragile beast, it pretty
+ * much _has_ to be done exactly like this (mask it
+ * first, _then_ send the EOI, and the order of EOI
+ * to the two 8259s is important!
+ */
+void sni_rm200_mask_and_ack_8259A(struct irq_data *d)
+{
+ unsigned int irqmask, irq = d->irq - RM200_I8259A_IRQ_BASE;
+ unsigned long flags;
+
+ irqmask = 1 << irq;
+ raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags);
+ /*
+ * Lightweight spurious IRQ detection. We do not want
+ * to overdo spurious IRQ handling - it's usually a sign
+ * of hardware problems, so we only do the checks we can
+ * do without slowing down good hardware unnecessarily.
+ *
+ * Note that IRQ7 and IRQ15 (the two spurious IRQs
+ * usually resulting from the 8259A-1|2 PICs) occur
+ * even if the IRQ is masked in the 8259A. Thus we
+ * can check spurious 8259A IRQs without doing the
+ * quite slow i8259A_irq_real() call for every IRQ.
+ * This does not cover 100% of spurious interrupts,
+ * but should be enough to warn the user that there
+ * is something bad going on ...
+ */
+ if (rm200_cached_irq_mask & irqmask)
+ goto spurious_8259A_irq;
+ rm200_cached_irq_mask |= irqmask;
+
+handle_real_irq:
+ if (irq & 8) {
+ readb(rm200_pic_slave + PIC_IMR);
+ writeb(cached_slave_mask, rm200_pic_slave + PIC_IMR);
+ writeb(0x60+(irq & 7), rm200_pic_slave + PIC_CMD);
+ writeb(0x60+PIC_CASCADE_IR, rm200_pic_master + PIC_CMD);
+ } else {
+ readb(rm200_pic_master + PIC_IMR);
+ writeb(cached_master_mask, rm200_pic_master + PIC_IMR);
+ writeb(0x60+irq, rm200_pic_master + PIC_CMD);
+ }
+ raw_spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags);
+ return;
+
+spurious_8259A_irq:
+ /*
+ * this is the slow path - should happen rarely.
+ */
+ if (sni_rm200_i8259A_irq_real(irq))
+ /*
+ * oops, the IRQ _is_ in service according to the
+ * 8259A - not spurious, go handle it.
+ */
+ goto handle_real_irq;
+
+ {
+ static int spurious_irq_mask;
+ /*
+ * At this point we can be sure the IRQ is spurious,
+ * let's ACK and report it. [once per IRQ]
+ */
+ if (!(spurious_irq_mask & irqmask)) {
+ printk(KERN_DEBUG
+ "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
+ spurious_irq_mask |= irqmask;
+ }
+ atomic_inc(&irq_err_count);
+ /*
+ * Theoretically we do not have to handle this IRQ,
+ * but in Linux this does not cause problems and is
+ * simpler for us.
+ */
+ goto handle_real_irq;
+ }
+}
+
+static struct irq_chip sni_rm200_i8259A_chip = {
+ .name = "RM200-XT-PIC",
+ .irq_mask = sni_rm200_disable_8259A_irq,
+ .irq_unmask = sni_rm200_enable_8259A_irq,
+ .irq_mask_ack = sni_rm200_mask_and_ack_8259A,
+};
+
+/*
+ * Do the traditional i8259 interrupt polling thing. This is for the few
+ * cases where no better interrupt acknowledge method is available and we
+ * absolutely must touch the i8259.
+ */
+static inline int sni_rm200_i8259_irq(void)
+{
+ int irq;
+
+ raw_spin_lock(&sni_rm200_i8259A_lock);
+
+ /* Perform an interrupt acknowledge cycle on controller 1. */
+ writeb(0x0C, rm200_pic_master + PIC_CMD); /* prepare for poll */
+ irq = readb(rm200_pic_master + PIC_CMD) & 7;
+ if (irq == PIC_CASCADE_IR) {
+ /*
+ * Interrupt is cascaded so perform interrupt
+ * acknowledge on controller 2.
+ */
+ writeb(0x0C, rm200_pic_slave + PIC_CMD); /* prepare for poll */
+ irq = (readb(rm200_pic_slave + PIC_CMD) & 7) + 8;
+ }
+
+ if (unlikely(irq == 7)) {
+ /*
+ * This may be a spurious interrupt.
+ *
+ * Read the interrupt status register (ISR). If the most
+ * significant bit is not set then there is no valid
+ * interrupt.
+ */
+ writeb(0x0B, rm200_pic_master + PIC_ISR); /* ISR register */
+ if (~readb(rm200_pic_master + PIC_ISR) & 0x80)
+ irq = -1;
+ }
+
+ raw_spin_unlock(&sni_rm200_i8259A_lock);
+
+ return likely(irq >= 0) ? irq + RM200_I8259A_IRQ_BASE : irq;
+}
+
+void sni_rm200_init_8259A(void)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags);
+
+ writeb(0xff, rm200_pic_master + PIC_IMR);
+ writeb(0xff, rm200_pic_slave + PIC_IMR);
+
+ writeb(0x11, rm200_pic_master + PIC_CMD);
+ writeb(0, rm200_pic_master + PIC_IMR);
+ writeb(1U << PIC_CASCADE_IR, rm200_pic_master + PIC_IMR);
+ writeb(MASTER_ICW4_DEFAULT, rm200_pic_master + PIC_IMR);
+ writeb(0x11, rm200_pic_slave + PIC_CMD);
+ writeb(8, rm200_pic_slave + PIC_IMR);
+ writeb(PIC_CASCADE_IR, rm200_pic_slave + PIC_IMR);
+ writeb(SLAVE_ICW4_DEFAULT, rm200_pic_slave + PIC_IMR);
+ udelay(100); /* wait for 8259A to initialize */
+
+ writeb(cached_master_mask, rm200_pic_master + PIC_IMR);
+ writeb(cached_slave_mask, rm200_pic_slave + PIC_IMR);
+
+ raw_spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags);
+}
+
+/*
+ * IRQ2 is cascade interrupt to second interrupt controller
+ */
+
+static struct resource sni_rm200_pic1_resource = {
+ .name = "onboard ISA pic1",
+ .start = 0x16000020,
+ .end = 0x16000023,
+ .flags = IORESOURCE_BUSY
+};
+
+static struct resource sni_rm200_pic2_resource = {
+ .name = "onboard ISA pic2",
+ .start = 0x160000a0,
+ .end = 0x160000a3,
+ .flags = IORESOURCE_BUSY
+};
+
+/* ISA irq handler */
+static irqreturn_t sni_rm200_i8259A_irq_handler(int dummy, void *p)
+{
+ int irq;
+
+ irq = sni_rm200_i8259_irq();
+ if (unlikely(irq < 0))
+ return IRQ_NONE;
+
+ do_IRQ(irq);
+ return IRQ_HANDLED;
+}
+
+void __init sni_rm200_i8259_irqs(void)
+{
+ int i;
+
+ rm200_pic_master = ioremap(0x16000020, 4);
+ if (!rm200_pic_master)
+ return;
+ rm200_pic_slave = ioremap(0x160000a0, 4);
+ if (!rm200_pic_slave) {
+ iounmap(rm200_pic_master);
+ return;
+ }
+
+ insert_resource(&iomem_resource, &sni_rm200_pic1_resource);
+ insert_resource(&iomem_resource, &sni_rm200_pic2_resource);
+
+ sni_rm200_init_8259A();
+
+ for (i = RM200_I8259A_IRQ_BASE; i < RM200_I8259A_IRQ_BASE + 16; i++)
+ irq_set_chip_and_handler(i, &sni_rm200_i8259A_chip,
+ handle_level_irq);
+
+ if (request_irq(RM200_I8259A_IRQ_BASE + PIC_CASCADE_IR, no_action,
+ IRQF_NO_THREAD, "cascade", NULL))
+ pr_err("Failed to register cascade interrupt\n");
+}
+
+
+#define SNI_RM200_INT_STAT_REG CKSEG1ADDR(0xbc000000)
+#define SNI_RM200_INT_ENA_REG CKSEG1ADDR(0xbc080000)
+
+#define SNI_RM200_INT_START 24
+#define SNI_RM200_INT_END 28
+
+static void enable_rm200_irq(struct irq_data *d)
+{
+ unsigned int mask = 1 << (d->irq - SNI_RM200_INT_START);
+
+ *(volatile u8 *)SNI_RM200_INT_ENA_REG &= ~mask;
+}
+
+void disable_rm200_irq(struct irq_data *d)
+{
+ unsigned int mask = 1 << (d->irq - SNI_RM200_INT_START);
+
+ *(volatile u8 *)SNI_RM200_INT_ENA_REG |= mask;
+}
+
+static struct irq_chip rm200_irq_type = {
+ .name = "RM200",
+ .irq_mask = disable_rm200_irq,
+ .irq_unmask = enable_rm200_irq,
+};
+
+static void sni_rm200_hwint(void)
+{
+ u32 pending = read_c0_cause() & read_c0_status();
+ u8 mask;
+ u8 stat;
+ int irq;
+
+ if (pending & C_IRQ5)
+ do_IRQ(MIPS_CPU_IRQ_BASE + 7);
+ else if (pending & C_IRQ0) {
+ clear_c0_status(IE_IRQ0);
+ mask = *(volatile u8 *)SNI_RM200_INT_ENA_REG ^ 0x1f;
+ stat = *(volatile u8 *)SNI_RM200_INT_STAT_REG ^ 0x14;
+ irq = ffs(stat & mask & 0x1f);
+
+ if (likely(irq > 0))
+ do_IRQ(irq + SNI_RM200_INT_START - 1);
+ set_c0_status(IE_IRQ0);
+ }
+}
+
+void __init sni_rm200_irq_init(void)
+{
+ int i;
+
+ * (volatile u8 *)SNI_RM200_INT_ENA_REG = 0x1f;
+
+ sni_rm200_i8259_irqs();
+ mips_cpu_irq_init();
+ /* Actually we've got more interrupts to handle ... */
+ for (i = SNI_RM200_INT_START; i <= SNI_RM200_INT_END; i++)
+ irq_set_chip_and_handler(i, &rm200_irq_type, handle_level_irq);
+ sni_hwint = sni_rm200_hwint;
+ change_c0_status(ST0_IM, IE_IRQ0);
+ if (request_irq(SNI_RM200_INT_START + 0, sni_rm200_i8259A_irq_handler,
+ 0, "onboard ISA", NULL))
+ pr_err("Failed to register onboard ISA interrupt\n");
+ if (request_irq(SNI_RM200_INT_START + 1, sni_isa_irq_handler, 0, "ISA",
+ NULL))
+ pr_err("Failed to register ISA interrupt\n");
+}
+
+void __init sni_rm200_init(void)
+{
+}
diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c
new file mode 100644
index 000000000..efad85c8c
--- /dev/null
+++ b/arch/mips/sni/setup.c
@@ -0,0 +1,263 @@
+/*
+ * Setup pointers to hardware-dependent routines.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 97, 98, 2000, 03, 04, 06 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2006,2007 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ */
+#include <linux/eisa.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/console.h>
+#include <linux/fb.h>
+#include <linux/screen_info.h>
+
+#ifdef CONFIG_FW_ARC
+#include <asm/fw/arc/types.h>
+#include <asm/sgialib.h>
+#endif
+
+#ifdef CONFIG_FW_SNIPROM
+#include <asm/mipsprom.h>
+#endif
+
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/io.h>
+#include <asm/reboot.h>
+#include <asm/sni.h>
+
+unsigned int sni_brd_type;
+EXPORT_SYMBOL(sni_brd_type);
+
+extern void sni_machine_restart(char *command);
+extern void sni_machine_power_off(void);
+
+static void __init sni_display_setup(void)
+{
+#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) && defined(CONFIG_FW_ARC)
+ struct screen_info *si = &screen_info;
+ DISPLAY_STATUS *di;
+
+ di = ArcGetDisplayStatus(1);
+
+ if (di) {
+ si->orig_x = di->CursorXPosition;
+ si->orig_y = di->CursorYPosition;
+ si->orig_video_cols = di->CursorMaxXPosition;
+ si->orig_video_lines = di->CursorMaxYPosition;
+ si->orig_video_isVGA = VIDEO_TYPE_VGAC;
+ si->orig_video_points = 16;
+ }
+#endif
+}
+
+static void __init sni_console_setup(void)
+{
+#ifndef CONFIG_FW_ARC
+ char *ctype;
+ char *cdev;
+ char *baud;
+ int port;
+ static char options[8] __initdata;
+
+ cdev = prom_getenv("console_dev");
+ if (strncmp(cdev, "tty", 3) == 0) {
+ ctype = prom_getenv("console");
+ switch (*ctype) {
+ default:
+ case 'l':
+ port = 0;
+ baud = prom_getenv("lbaud");
+ break;
+ case 'r':
+ port = 1;
+ baud = prom_getenv("rbaud");
+ break;
+ }
+ if (baud)
+ strcpy(options, baud);
+ if (strncmp(cdev, "tty552", 6) == 0)
+ add_preferred_console("ttyS", port,
+ baud ? options : NULL);
+ else
+ add_preferred_console("ttySC", port,
+ baud ? options : NULL);
+ }
+#endif
+}
+
+#ifdef DEBUG
+static void __init sni_idprom_dump(void)
+{
+ int i;
+
+ pr_debug("SNI IDProm dump:\n");
+ for (i = 0; i < 256; i++) {
+ if (i%16 == 0)
+ pr_debug("%04x ", i);
+
+ printk("%02x ", *(unsigned char *) (SNI_IDPROM_BASE + i));
+
+ if (i % 16 == 15)
+ printk("\n");
+ }
+}
+#endif
+
+void __init plat_mem_setup(void)
+{
+ int cputype;
+
+ set_io_port_base(SNI_PORT_BASE);
+// ioport_resource.end = sni_io_resource.end;
+
+ /*
+ * Setup (E)ISA I/O memory access stuff
+ */
+#ifdef CONFIG_EISA
+ EISA_bus = 1;
+#endif
+
+ sni_brd_type = *(unsigned char *)SNI_IDPROM_BRDTYPE;
+ cputype = *(unsigned char *)SNI_IDPROM_CPUTYPE;
+ switch (sni_brd_type) {
+ case SNI_BRD_TOWER_OASIC:
+ switch (cputype) {
+ case SNI_CPU_M8030:
+ system_type = "RM400-330";
+ break;
+ case SNI_CPU_M8031:
+ system_type = "RM400-430";
+ break;
+ case SNI_CPU_M8037:
+ system_type = "RM400-530";
+ break;
+ case SNI_CPU_M8034:
+ system_type = "RM400-730";
+ break;
+ default:
+ system_type = "RM400-xxx";
+ break;
+ }
+ break;
+ case SNI_BRD_MINITOWER:
+ switch (cputype) {
+ case SNI_CPU_M8021:
+ case SNI_CPU_M8043:
+ system_type = "RM400-120";
+ break;
+ case SNI_CPU_M8040:
+ system_type = "RM400-220";
+ break;
+ case SNI_CPU_M8053:
+ system_type = "RM400-225";
+ break;
+ case SNI_CPU_M8050:
+ system_type = "RM400-420";
+ break;
+ default:
+ system_type = "RM400-xxx";
+ break;
+ }
+ break;
+ case SNI_BRD_PCI_TOWER:
+ system_type = "RM400-Cxx";
+ break;
+ case SNI_BRD_RM200:
+ system_type = "RM200-xxx";
+ break;
+ case SNI_BRD_PCI_MTOWER:
+ system_type = "RM300-Cxx";
+ break;
+ case SNI_BRD_PCI_DESKTOP:
+ switch (read_c0_prid() & PRID_IMP_MASK) {
+ case PRID_IMP_R4600:
+ case PRID_IMP_R4700:
+ system_type = "RM200-C20";
+ break;
+ case PRID_IMP_R5000:
+ system_type = "RM200-C40";
+ break;
+ default:
+ system_type = "RM200-Cxx";
+ break;
+ }
+ break;
+ case SNI_BRD_PCI_TOWER_CPLUS:
+ system_type = "RM400-Exx";
+ break;
+ case SNI_BRD_PCI_MTOWER_CPLUS:
+ system_type = "RM300-Exx";
+ break;
+ }
+ pr_debug("Found SNI brdtype %02x name %s\n", sni_brd_type, system_type);
+
+#ifdef DEBUG
+ sni_idprom_dump();
+#endif
+
+ switch (sni_brd_type) {
+ case SNI_BRD_10:
+ case SNI_BRD_10NEW:
+ case SNI_BRD_TOWER_OASIC:
+ case SNI_BRD_MINITOWER:
+ sni_a20r_init();
+ break;
+
+ case SNI_BRD_PCI_TOWER:
+ case SNI_BRD_PCI_TOWER_CPLUS:
+ sni_pcit_init();
+ break;
+
+ case SNI_BRD_RM200:
+ sni_rm200_init();
+ break;
+
+ case SNI_BRD_PCI_MTOWER:
+ case SNI_BRD_PCI_DESKTOP:
+ case SNI_BRD_PCI_MTOWER_CPLUS:
+ sni_pcimt_init();
+ break;
+ }
+
+ _machine_restart = sni_machine_restart;
+ pm_power_off = sni_machine_power_off;
+
+ sni_display_setup();
+ sni_console_setup();
+}
+
+#ifdef CONFIG_PCI
+
+#include <linux/pci.h>
+#include <video/vga.h>
+#include <video/cirrus.h>
+
+static void quirk_cirrus_ram_size(struct pci_dev *dev)
+{
+ u16 cmd;
+
+ /*
+ * firmware doesn't set the ram size correct, so we
+ * need to do it here, otherwise we get screen corruption
+ * on older Cirrus chips
+ */
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if ((cmd & (PCI_COMMAND_IO|PCI_COMMAND_MEMORY))
+ == (PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) {
+ vga_wseq(NULL, CL_SEQR6, 0x12); /* unlock all extension registers */
+ vga_wseq(NULL, CL_SEQRF, 0x18);
+ }
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5434_8,
+ quirk_cirrus_ram_size);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5436,
+ quirk_cirrus_ram_size);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446,
+ quirk_cirrus_ram_size);
+#endif
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
new file mode 100644
index 000000000..ff3ba7e77
--- /dev/null
+++ b/arch/mips/sni/time.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/types.h>
+#include <linux/i8253.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+#include <linux/time.h>
+#include <linux/clockchips.h>
+
+#include <asm/sni.h>
+#include <asm/time.h>
+
+#define SNI_CLOCK_TICK_RATE 3686400
+#define SNI_COUNTER2_DIV 64
+#define SNI_COUNTER0_DIV ((SNI_CLOCK_TICK_RATE / SNI_COUNTER2_DIV) / HZ)
+
+static int a20r_set_periodic(struct clock_event_device *evt)
+{
+ *(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0x34;
+ wmb();
+ *(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV & 0xff;
+ wmb();
+ *(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV >> 8;
+ wmb();
+
+ *(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0xb4;
+ wmb();
+ *(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV & 0xff;
+ wmb();
+ *(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV >> 8;
+ wmb();
+ return 0;
+}
+
+static struct clock_event_device a20r_clockevent_device = {
+ .name = "a20r-timer",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+
+ /* .mult, .shift, .max_delta_ns and .min_delta_ns left uninitialized */
+
+ .rating = 300,
+ .irq = SNI_A20R_IRQ_TIMER,
+ .set_state_periodic = a20r_set_periodic,
+};
+
+static irqreturn_t a20r_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = dev_id;
+
+ *(volatile u8 *)A20R_PT_TIM0_ACK = 0;
+ wmb();
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * a20r platform uses 2 counters to divide the input frequency.
+ * Counter 2 output is connected to Counter 0 & 1 input.
+ */
+static void __init sni_a20r_timer_setup(void)
+{
+ struct clock_event_device *cd = &a20r_clockevent_device;
+ unsigned int cpu = smp_processor_id();
+
+ cd->cpumask = cpumask_of(cpu);
+ clockevents_register_device(cd);
+ if (request_irq(SNI_A20R_IRQ_TIMER, a20r_interrupt,
+ IRQF_PERCPU | IRQF_TIMER, "a20r-timer", cd))
+ pr_err("Failed to register a20r-timer interrupt\n");
+}
+
+#define SNI_8254_TICK_RATE 1193182UL
+
+#define SNI_8254_TCSAMP_COUNTER ((SNI_8254_TICK_RATE / HZ) + 255)
+
+static __init unsigned long dosample(void)
+{
+ u32 ct0, ct1;
+ volatile u8 msb;
+
+ /* Start the counter. */
+ outb_p(0x34, 0x43);
+ outb_p(SNI_8254_TCSAMP_COUNTER & 0xff, 0x40);
+ outb(SNI_8254_TCSAMP_COUNTER >> 8, 0x40);
+
+ /* Get initial counter invariant */
+ ct0 = read_c0_count();
+
+ /* Latch and spin until top byte of counter0 is zero */
+ do {
+ outb(0x00, 0x43);
+ (void) inb(0x40);
+ msb = inb(0x40);
+ ct1 = read_c0_count();
+ } while (msb);
+
+ /* Stop the counter. */
+ outb(0x38, 0x43);
+ /*
+ * Return the difference, this is how far the r4k counter increments
+ * for every 1/HZ seconds. We round off the nearest 1 MHz of master
+ * clock (= 1000000 / HZ / 2).
+ */
+ /*return (ct1 - ct0 + (500000/HZ/2)) / (500000/HZ) * (500000/HZ);*/
+ return (ct1 - ct0) / (500000/HZ) * (500000/HZ);
+}
+
+/*
+ * Here we need to calibrate the cycle counter to at least be close.
+ */
+void __init plat_time_init(void)
+{
+ unsigned long r4k_ticks[3];
+ unsigned long r4k_tick;
+
+ /*
+ * Figure out the r4k offset, the algorithm is very simple and works in
+ * _all_ cases as long as the 8254 counter register itself works ok (as
+ * an interrupt driving timer it does not because of bug, this is why
+ * we are using the onchip r4k counter/compare register to serve this
+ * purpose, but for r4k_offset calculation it will work ok for us).
+ * There are other very complicated ways of performing this calculation
+ * but this one works just fine so I am not going to futz around. ;-)
+ */
+ printk(KERN_INFO "Calibrating system timer... ");
+ dosample(); /* Prime cache. */
+ dosample(); /* Prime cache. */
+ /* Zero is NOT an option. */
+ do {
+ r4k_ticks[0] = dosample();
+ } while (!r4k_ticks[0]);
+ do {
+ r4k_ticks[1] = dosample();
+ } while (!r4k_ticks[1]);
+
+ if (r4k_ticks[0] != r4k_ticks[1]) {
+ printk("warning: timer counts differ, retrying... ");
+ r4k_ticks[2] = dosample();
+ if (r4k_ticks[2] == r4k_ticks[0]
+ || r4k_ticks[2] == r4k_ticks[1])
+ r4k_tick = r4k_ticks[2];
+ else {
+ printk("disagreement, using average... ");
+ r4k_tick = (r4k_ticks[0] + r4k_ticks[1]
+ + r4k_ticks[2]) / 3;
+ }
+ } else
+ r4k_tick = r4k_ticks[0];
+
+ printk("%d [%d.%04d MHz CPU]\n", (int) r4k_tick,
+ (int) (r4k_tick / (500000 / HZ)),
+ (int) (r4k_tick % (500000 / HZ)));
+
+ mips_hpt_frequency = r4k_tick * HZ;
+
+ switch (sni_brd_type) {
+ case SNI_BRD_10:
+ case SNI_BRD_10NEW:
+ case SNI_BRD_TOWER_OASIC:
+ case SNI_BRD_MINITOWER:
+ sni_a20r_timer_setup();
+ break;
+ }
+ setup_pit_timer();
+}
diff --git a/arch/mips/tools/.gitignore b/arch/mips/tools/.gitignore
new file mode 100644
index 000000000..794817dfb
--- /dev/null
+++ b/arch/mips/tools/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+elf-entry
+loongson3-llsc-check
diff --git a/arch/mips/tools/Makefile b/arch/mips/tools/Makefile
new file mode 100644
index 000000000..b851e5dcc
--- /dev/null
+++ b/arch/mips/tools/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+hostprogs := elf-entry
+PHONY += elf-entry
+elf-entry: $(obj)/elf-entry
+ @:
+
+hostprogs += loongson3-llsc-check
+PHONY += loongson3-llsc-check
+loongson3-llsc-check: $(obj)/loongson3-llsc-check
+ @:
diff --git a/arch/mips/tools/elf-entry.c b/arch/mips/tools/elf-entry.c
new file mode 100644
index 000000000..dbd14ff05
--- /dev/null
+++ b/arch/mips/tools/elf-entry.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <byteswap.h>
+#include <elf.h>
+#include <endian.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef be32toh
+/* If libc provides [bl]e{32,64}toh() then we'll use them */
+#elif BYTE_ORDER == LITTLE_ENDIAN
+# define be32toh(x) bswap_32(x)
+# define le32toh(x) (x)
+# define be64toh(x) bswap_64(x)
+# define le64toh(x) (x)
+#elif BYTE_ORDER == BIG_ENDIAN
+# define be32toh(x) (x)
+# define le32toh(x) bswap_32(x)
+# define be64toh(x) (x)
+# define le64toh(x) bswap_64(x)
+#endif
+
+__attribute__((noreturn))
+static void die(const char *msg)
+{
+ fputs(msg, stderr);
+ exit(EXIT_FAILURE);
+}
+
+int main(int argc, const char *argv[])
+{
+ uint64_t entry;
+ size_t nread;
+ FILE *file;
+ union {
+ Elf32_Ehdr ehdr32;
+ Elf64_Ehdr ehdr64;
+ } hdr;
+
+ if (argc != 2)
+ die("Usage: elf-entry <elf-file>\n");
+
+ file = fopen(argv[1], "r");
+ if (!file) {
+ perror("Unable to open input file");
+ return EXIT_FAILURE;
+ }
+
+ nread = fread(&hdr, 1, sizeof(hdr), file);
+ if (nread != sizeof(hdr)) {
+ perror("Unable to read input file");
+ fclose(file);
+ return EXIT_FAILURE;
+ }
+
+ if (memcmp(hdr.ehdr32.e_ident, ELFMAG, SELFMAG)) {
+ fclose(file);
+ die("Input is not an ELF\n");
+ }
+
+ switch (hdr.ehdr32.e_ident[EI_CLASS]) {
+ case ELFCLASS32:
+ switch (hdr.ehdr32.e_ident[EI_DATA]) {
+ case ELFDATA2LSB:
+ entry = le32toh(hdr.ehdr32.e_entry);
+ break;
+ case ELFDATA2MSB:
+ entry = be32toh(hdr.ehdr32.e_entry);
+ break;
+ default:
+ fclose(file);
+ die("Invalid ELF encoding\n");
+ }
+
+ /* Sign extend to form a canonical address */
+ entry = (int64_t)(int32_t)entry;
+ break;
+
+ case ELFCLASS64:
+ switch (hdr.ehdr32.e_ident[EI_DATA]) {
+ case ELFDATA2LSB:
+ entry = le64toh(hdr.ehdr64.e_entry);
+ break;
+ case ELFDATA2MSB:
+ entry = be64toh(hdr.ehdr64.e_entry);
+ break;
+ default:
+ fclose(file);
+ die("Invalid ELF encoding\n");
+ }
+ break;
+
+ default:
+ fclose(file);
+ die("Invalid ELF class\n");
+ }
+
+ printf("0x%016" PRIx64 "\n", entry);
+ fclose(file);
+ return EXIT_SUCCESS;
+}
diff --git a/arch/mips/tools/generic-board-config.sh b/arch/mips/tools/generic-board-config.sh
new file mode 100755
index 000000000..dfa5f31f5
--- /dev/null
+++ b/arch/mips/tools/generic-board-config.sh
@@ -0,0 +1,84 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2017 Imagination Technologies
+# Author: Paul Burton <paul.burton@mips.com>
+#
+# This script merges configuration fragments for boards supported by the
+# generic MIPS kernel. It checks each for requirements specified using
+# formatted comments, and then calls merge_config.sh to merge those
+# fragments which have no unmet requirements.
+#
+# An example of requirements in your board config fragment might be:
+#
+# # require CONFIG_CPU_MIPS32_R2=y
+# # require CONFIG_CPU_LITTLE_ENDIAN=y
+#
+# This would mean that your board is only included in kernels which are
+# configured for little endian MIPS32r2 CPUs, and not for example in kernels
+# configured for 64 bit or big endian systems.
+#
+
+srctree="$1"
+objtree="$2"
+ref_cfg="$3"
+cfg="$4"
+boards_origin="$5"
+shift 5
+
+# Only print Skipping... lines if the user explicitly specified BOARDS=. In the
+# general case it only serves to obscure the useful output about what actually
+# was included.
+case ${boards_origin} in
+"command line")
+ print_skipped=1
+ ;;
+environment*)
+ print_skipped=1
+ ;;
+*)
+ print_skipped=0
+ ;;
+esac
+
+for board in $@; do
+ board_cfg="${srctree}/arch/mips/configs/generic/board-${board}.config"
+ if [ ! -f "${board_cfg}" ]; then
+ echo "WARNING: Board config '${board_cfg}' not found"
+ continue
+ fi
+
+ # For each line beginning with # require, cut out the field following
+ # it & search for that in the reference config file. If the requirement
+ # is not found then the subshell will exit with code 1, and we'll
+ # continue on to the next board.
+ grep -E '^# require ' "${board_cfg}" | \
+ cut -d' ' -f 3- | \
+ while read req; do
+ case ${req} in
+ *=y)
+ # If we require something =y then we check that a line
+ # containing it is present in the reference config.
+ grep -Eq "^${req}\$" "${ref_cfg}" && continue
+ ;;
+ *=n)
+ # If we require something =n then we just invert that
+ # check, considering the requirement met if there isn't
+ # a line containing the value =y in the reference
+ # config.
+ grep -Eq "^${req/%=n/=y}\$" "${ref_cfg}" || continue
+ ;;
+ *)
+ echo "WARNING: Unhandled requirement '${req}'"
+ ;;
+ esac
+
+ [ ${print_skipped} -eq 1 ] && echo "Skipping ${board_cfg}"
+ exit 1
+ done || continue
+
+ # Merge this board config fragment into our final config file
+ ${srctree}/scripts/kconfig/merge_config.sh \
+ -m -O ${objtree} ${cfg} ${board_cfg} \
+ | grep -Ev '^(#|Using)'
+done
diff --git a/arch/mips/tools/loongson3-llsc-check.c b/arch/mips/tools/loongson3-llsc-check.c
new file mode 100644
index 000000000..bdbc7b432
--- /dev/null
+++ b/arch/mips/tools/loongson3-llsc-check.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <byteswap.h>
+#include <elf.h>
+#include <endian.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#ifdef be32toh
+/* If libc provides le{16,32,64}toh() then we'll use them */
+#elif BYTE_ORDER == LITTLE_ENDIAN
+# define le16toh(x) (x)
+# define le32toh(x) (x)
+# define le64toh(x) (x)
+#elif BYTE_ORDER == BIG_ENDIAN
+# define le16toh(x) bswap_16(x)
+# define le32toh(x) bswap_32(x)
+# define le64toh(x) bswap_64(x)
+#endif
+
+/* MIPS opcodes, in bits 31:26 of an instruction */
+#define OP_SPECIAL 0x00
+#define OP_REGIMM 0x01
+#define OP_BEQ 0x04
+#define OP_BNE 0x05
+#define OP_BLEZ 0x06
+#define OP_BGTZ 0x07
+#define OP_BEQL 0x14
+#define OP_BNEL 0x15
+#define OP_BLEZL 0x16
+#define OP_BGTZL 0x17
+#define OP_LL 0x30
+#define OP_LLD 0x34
+#define OP_SC 0x38
+#define OP_SCD 0x3c
+
+/* Bits 20:16 of OP_REGIMM instructions */
+#define REGIMM_BLTZ 0x00
+#define REGIMM_BGEZ 0x01
+#define REGIMM_BLTZL 0x02
+#define REGIMM_BGEZL 0x03
+#define REGIMM_BLTZAL 0x10
+#define REGIMM_BGEZAL 0x11
+#define REGIMM_BLTZALL 0x12
+#define REGIMM_BGEZALL 0x13
+
+/* Bits 5:0 of OP_SPECIAL instructions */
+#define SPECIAL_SYNC 0x0f
+
+static void usage(FILE *f)
+{
+ fprintf(f, "Usage: loongson3-llsc-check /path/to/vmlinux\n");
+}
+
+static int se16(uint16_t x)
+{
+ return (int16_t)x;
+}
+
+static bool is_ll(uint32_t insn)
+{
+ switch (insn >> 26) {
+ case OP_LL:
+ case OP_LLD:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static bool is_sc(uint32_t insn)
+{
+ switch (insn >> 26) {
+ case OP_SC:
+ case OP_SCD:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static bool is_sync(uint32_t insn)
+{
+ /* Bits 31:11 should all be zeroes */
+ if (insn >> 11)
+ return false;
+
+ /* Bits 5:0 specify the SYNC special encoding */
+ if ((insn & 0x3f) != SPECIAL_SYNC)
+ return false;
+
+ return true;
+}
+
+static bool is_branch(uint32_t insn, int *off)
+{
+ switch (insn >> 26) {
+ case OP_BEQ:
+ case OP_BEQL:
+ case OP_BNE:
+ case OP_BNEL:
+ case OP_BGTZ:
+ case OP_BGTZL:
+ case OP_BLEZ:
+ case OP_BLEZL:
+ *off = se16(insn) + 1;
+ return true;
+
+ case OP_REGIMM:
+ switch ((insn >> 16) & 0x1f) {
+ case REGIMM_BGEZ:
+ case REGIMM_BGEZL:
+ case REGIMM_BGEZAL:
+ case REGIMM_BGEZALL:
+ case REGIMM_BLTZ:
+ case REGIMM_BLTZL:
+ case REGIMM_BLTZAL:
+ case REGIMM_BLTZALL:
+ *off = se16(insn) + 1;
+ return true;
+
+ default:
+ return false;
+ }
+
+ default:
+ return false;
+ }
+}
+
+static int check_ll(uint64_t pc, uint32_t *code, size_t sz)
+{
+ ssize_t i, max, sc_pos;
+ int off;
+
+ /*
+ * Every LL must be preceded by a sync instruction in order to ensure
+ * that instruction reordering doesn't allow a prior memory access to
+ * execute after the LL & cause erroneous results.
+ */
+ if (!is_sync(le32toh(code[-1]))) {
+ fprintf(stderr, "%" PRIx64 ": LL not preceded by sync\n", pc);
+ return -EINVAL;
+ }
+
+ /* Find the matching SC instruction */
+ max = sz / 4;
+ for (sc_pos = 0; sc_pos < max; sc_pos++) {
+ if (is_sc(le32toh(code[sc_pos])))
+ break;
+ }
+ if (sc_pos >= max) {
+ fprintf(stderr, "%" PRIx64 ": LL has no matching SC\n", pc);
+ return -EINVAL;
+ }
+
+ /*
+ * Check branches within the LL/SC loop target sync instructions,
+ * ensuring that speculative execution can't generate memory accesses
+ * due to instructions outside of the loop.
+ */
+ for (i = 0; i < sc_pos; i++) {
+ if (!is_branch(le32toh(code[i]), &off))
+ continue;
+
+ /*
+ * If the branch target is within the LL/SC loop then we don't
+ * need to worry about it.
+ */
+ if ((off >= -i) && (off <= sc_pos))
+ continue;
+
+ /* If the branch targets a sync instruction we're all good... */
+ if (is_sync(le32toh(code[i + off])))
+ continue;
+
+ /* ...but if not, we have a problem */
+ fprintf(stderr, "%" PRIx64 ": Branch target not a sync\n",
+ pc + (i * 4));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_code(uint64_t pc, uint32_t *code, size_t sz)
+{
+ int err = 0;
+
+ if (sz % 4) {
+ fprintf(stderr, "%" PRIx64 ": Section size not a multiple of 4\n",
+ pc);
+ err = -EINVAL;
+ sz -= (sz % 4);
+ }
+
+ if (is_ll(le32toh(code[0]))) {
+ fprintf(stderr, "%" PRIx64 ": First instruction in section is an LL\n",
+ pc);
+ err = -EINVAL;
+ }
+
+#define advance() ( \
+ code++, \
+ pc += 4, \
+ sz -= 4 \
+)
+
+ /*
+ * Skip the first instructionm allowing check_ll to look backwards
+ * unconditionally.
+ */
+ advance();
+
+ /* Now scan through the code looking for LL instructions */
+ for (; sz; advance()) {
+ if (is_ll(le32toh(code[0])))
+ err |= check_ll(pc, code, sz);
+ }
+
+ return err;
+}
+
+int main(int argc, char *argv[])
+{
+ int vmlinux_fd, status, err, i;
+ const char *vmlinux_path;
+ struct stat st;
+ Elf64_Ehdr *eh;
+ Elf64_Shdr *sh;
+ void *vmlinux;
+
+ status = EXIT_FAILURE;
+
+ if (argc < 2) {
+ usage(stderr);
+ goto out_ret;
+ }
+
+ vmlinux_path = argv[1];
+ vmlinux_fd = open(vmlinux_path, O_RDONLY);
+ if (vmlinux_fd == -1) {
+ perror("Unable to open vmlinux");
+ goto out_ret;
+ }
+
+ err = fstat(vmlinux_fd, &st);
+ if (err) {
+ perror("Unable to stat vmlinux");
+ goto out_close;
+ }
+
+ vmlinux = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, vmlinux_fd, 0);
+ if (vmlinux == MAP_FAILED) {
+ perror("Unable to mmap vmlinux");
+ goto out_close;
+ }
+
+ eh = vmlinux;
+ if (memcmp(eh->e_ident, ELFMAG, SELFMAG)) {
+ fprintf(stderr, "vmlinux is not an ELF?\n");
+ goto out_munmap;
+ }
+
+ if (eh->e_ident[EI_CLASS] != ELFCLASS64) {
+ fprintf(stderr, "vmlinux is not 64b?\n");
+ goto out_munmap;
+ }
+
+ if (eh->e_ident[EI_DATA] != ELFDATA2LSB) {
+ fprintf(stderr, "vmlinux is not little endian?\n");
+ goto out_munmap;
+ }
+
+ for (i = 0; i < le16toh(eh->e_shnum); i++) {
+ sh = vmlinux + le64toh(eh->e_shoff) + (i * le16toh(eh->e_shentsize));
+
+ if (sh->sh_type != SHT_PROGBITS)
+ continue;
+ if (!(sh->sh_flags & SHF_EXECINSTR))
+ continue;
+
+ err = check_code(le64toh(sh->sh_addr),
+ vmlinux + le64toh(sh->sh_offset),
+ le64toh(sh->sh_size));
+ if (err)
+ goto out_munmap;
+ }
+
+ status = EXIT_SUCCESS;
+out_munmap:
+ munmap(vmlinux, st.st_size);
+out_close:
+ close(vmlinux_fd);
+out_ret:
+ fprintf(stdout, "loongson3-llsc-check returns %s\n",
+ status ? "failure" : "success");
+ return status;
+}
diff --git a/arch/mips/txx9/Kconfig b/arch/mips/txx9/Kconfig
new file mode 100644
index 000000000..85c4c121c
--- /dev/null
+++ b/arch/mips/txx9/Kconfig
@@ -0,0 +1,128 @@
+# SPDX-License-Identifier: GPL-2.0
+config MACH_TX39XX
+ bool
+ select MACH_TXX9
+ select SYS_HAS_CPU_TX39XX
+
+config MACH_TX49XX
+ bool
+ select MACH_TXX9
+ select CEVT_R4K
+ select CSRC_R4K
+ select IRQ_MIPS_CPU
+ select SYS_HAS_CPU_TX49XX
+ select SYS_SUPPORTS_64BIT_KERNEL
+
+config MACH_TXX9
+ bool
+ select DMA_NONCOHERENT
+ select SWAP_IO_SPACE
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select COMMON_CLK
+
+config TOSHIBA_JMR3927
+ bool "Toshiba JMR-TX3927 board"
+ depends on MACH_TX39XX
+ select SOC_TX3927
+
+config TOSHIBA_RBTX4927
+ bool "Toshiba RBTX49[23]7 board"
+ depends on MACH_TX49XX
+ select SOC_TX4927
+ # TX4937 is subset of TX4938
+ select SOC_TX4938
+ help
+ This Toshiba board is based on the TX4927 processor. Say Y here to
+ support this machine type
+
+config TOSHIBA_RBTX4938
+ bool "Toshiba RBTX4938 board"
+ depends on MACH_TX49XX
+ select SOC_TX4938
+ help
+ This Toshiba board is based on the TX4938 processor. Say Y here to
+ support this machine type
+
+config TOSHIBA_RBTX4939
+ bool "Toshiba RBTX4939 board"
+ depends on MACH_TX49XX
+ select SOC_TX4939
+ select TXX9_7SEGLED
+ help
+ This Toshiba board is based on the TX4939 processor. Say Y here to
+ support this machine type
+
+config SOC_TX3927
+ bool
+ select CEVT_TXX9
+ imply HAS_TXX9_SERIAL
+ select HAVE_PCI
+ select IRQ_TXX9
+ select GPIO_TXX9
+
+config SOC_TX4927
+ bool
+ select CEVT_TXX9
+ imply HAS_TXX9_SERIAL
+ select HAVE_PCI
+ select IRQ_TXX9
+ select PCI_TX4927
+ select GPIO_TXX9
+ imply HAS_TXX9_ACLC
+
+config SOC_TX4938
+ bool
+ select CEVT_TXX9
+ imply HAS_TXX9_SERIAL
+ select HAVE_PCI
+ select IRQ_TXX9
+ select PCI_TX4927
+ select GPIO_TXX9
+ imply HAS_TXX9_ACLC
+
+config SOC_TX4939
+ bool
+ select CEVT_TXX9
+ imply HAS_TXX9_SERIAL
+ select HAVE_PCI
+ select PCI_TX4927
+ imply HAS_TXX9_ACLC
+
+config TXX9_7SEGLED
+ bool
+
+config TOSHIBA_FPCIB0
+ bool "FPCIB0 Backplane Support"
+ depends on PCI && MACH_TXX9
+ select I8259
+
+config PICMG_PCI_BACKPLANE_DEFAULT
+ bool "Support for PICMG PCI Backplane"
+ depends on PCI && MACH_TXX9
+ default y if !TOSHIBA_FPCIB0
+
+if TOSHIBA_RBTX4938
+
+comment "Multiplex Pin Select"
+choice
+ prompt "PIO[58:61]"
+ default TOSHIBA_RBTX4938_MPLEX_PIO58_61
+
+config TOSHIBA_RBTX4938_MPLEX_PIO58_61
+ bool "PIO"
+config TOSHIBA_RBTX4938_MPLEX_NAND
+ bool "NAND"
+config TOSHIBA_RBTX4938_MPLEX_ATA
+ bool "ATA"
+config TOSHIBA_RBTX4938_MPLEX_KEEP
+ bool "Keep firmware settings"
+
+endchoice
+
+endif
+
+config PCI_TX4927
+ bool
diff --git a/arch/mips/txx9/Makefile b/arch/mips/txx9/Makefile
new file mode 100644
index 000000000..195295937
--- /dev/null
+++ b/arch/mips/txx9/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Common TXx9
+#
+obj-$(CONFIG_MACH_TX39XX) += generic/
+obj-$(CONFIG_MACH_TX49XX) += generic/
+
+#
+# Toshiba JMR-TX3927 board
+#
+obj-$(CONFIG_TOSHIBA_JMR3927) += jmr3927/
+
+#
+# Toshiba RBTX49XX boards
+#
+obj-$(CONFIG_TOSHIBA_RBTX4927) += rbtx4927/
+obj-$(CONFIG_TOSHIBA_RBTX4938) += rbtx4938/
+obj-$(CONFIG_TOSHIBA_RBTX4939) += rbtx4939/
diff --git a/arch/mips/txx9/Platform b/arch/mips/txx9/Platform
new file mode 100644
index 000000000..7f4429ba2
--- /dev/null
+++ b/arch/mips/txx9/Platform
@@ -0,0 +1,7 @@
+cflags-$(CONFIG_MACH_TX39XX) += \
+ -I$(srctree)/arch/mips/include/asm/mach-tx39xx
+cflags-$(CONFIG_MACH_TX49XX) += \
+ -I$(srctree)/arch/mips/include/asm/mach-tx49xx
+
+load-$(CONFIG_MACH_TX39XX) += 0xffffffff80050000
+load-$(CONFIG_MACH_TX49XX) += 0xffffffff80100000
diff --git a/arch/mips/txx9/generic/7segled.c b/arch/mips/txx9/generic/7segled.c
new file mode 100644
index 000000000..2203c2548
--- /dev/null
+++ b/arch/mips/txx9/generic/7segled.c
@@ -0,0 +1,123 @@
+/*
+ * 7 Segment LED routines
+ * Based on RBTX49xx patch from CELF patch archive.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * (C) Copyright TOSHIBA CORPORATION 2005-2007
+ * All Rights Reserved.
+ */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/map_to_7segment.h>
+#include <asm/txx9/generic.h>
+
+static unsigned int tx_7segled_num;
+static void (*tx_7segled_putc)(unsigned int pos, unsigned char val);
+
+void __init txx9_7segled_init(unsigned int num,
+ void (*putc)(unsigned int pos, unsigned char val))
+{
+ tx_7segled_num = num;
+ tx_7segled_putc = putc;
+}
+
+static SEG7_CONVERSION_MAP(txx9_seg7map, MAP_ASCII7SEG_ALPHANUM_LC);
+
+int txx9_7segled_putc(unsigned int pos, char c)
+{
+ if (pos >= tx_7segled_num)
+ return -EINVAL;
+ c = map_to_seg7(&txx9_seg7map, c);
+ if (c < 0)
+ return c;
+ tx_7segled_putc(pos, c);
+ return 0;
+}
+
+static ssize_t ascii_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned int ch = dev->id;
+ txx9_7segled_putc(ch, buf[0]);
+ return size;
+}
+
+static ssize_t raw_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned int ch = dev->id;
+ tx_7segled_putc(ch, buf[0]);
+ return size;
+}
+
+static DEVICE_ATTR_WO(ascii);
+static DEVICE_ATTR_WO(raw);
+
+static ssize_t map_seg7_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ memcpy(buf, &txx9_seg7map, sizeof(txx9_seg7map));
+ return sizeof(txx9_seg7map);
+}
+
+static ssize_t map_seg7_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ if (size != sizeof(txx9_seg7map))
+ return -EINVAL;
+ memcpy(&txx9_seg7map, buf, size);
+ return size;
+}
+
+static DEVICE_ATTR(map_seg7, 0600, map_seg7_show, map_seg7_store);
+
+static struct bus_type tx_7segled_subsys = {
+ .name = "7segled",
+ .dev_name = "7segled",
+};
+
+static void tx_7segled_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static int __init tx_7segled_init_sysfs(void)
+{
+ int error, i;
+ if (!tx_7segled_num)
+ return -ENODEV;
+ error = subsys_system_register(&tx_7segled_subsys, NULL);
+ if (error)
+ return error;
+ error = device_create_file(tx_7segled_subsys.dev_root, &dev_attr_map_seg7);
+ if (error)
+ return error;
+ for (i = 0; i < tx_7segled_num; i++) {
+ struct device *dev;
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ error = -ENODEV;
+ break;
+ }
+ dev->id = i;
+ dev->bus = &tx_7segled_subsys;
+ dev->release = &tx_7segled_release;
+ error = device_register(dev);
+ if (error) {
+ put_device(dev);
+ return error;
+ }
+ device_create_file(dev, &dev_attr_ascii);
+ device_create_file(dev, &dev_attr_raw);
+ }
+ return error;
+}
+
+device_initcall(tx_7segled_init_sysfs);
diff --git a/arch/mips/txx9/generic/Makefile b/arch/mips/txx9/generic/Makefile
new file mode 100644
index 000000000..6d00580fc
--- /dev/null
+++ b/arch/mips/txx9/generic/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for common code for TXx9 based systems
+#
+
+obj-y += setup.o
+obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_SOC_TX3927) += setup_tx3927.o irq_tx3927.o
+obj-$(CONFIG_SOC_TX4927) += mem_tx4927.o setup_tx4927.o irq_tx4927.o
+obj-$(CONFIG_SOC_TX4938) += mem_tx4927.o setup_tx4938.o irq_tx4938.o
+obj-$(CONFIG_SOC_TX4939) += setup_tx4939.o irq_tx4939.o
+obj-$(CONFIG_TOSHIBA_FPCIB0) += smsc_fdc37m81x.o
+obj-$(CONFIG_SPI) += spi_eeprom.o
+obj-$(CONFIG_TXX9_7SEGLED) += 7segled.o
diff --git a/arch/mips/txx9/generic/irq_tx3927.c b/arch/mips/txx9/generic/irq_tx3927.c
new file mode 100644
index 000000000..c683f593e
--- /dev/null
+++ b/arch/mips/txx9/generic/irq_tx3927.c
@@ -0,0 +1,25 @@
+/*
+ * Common tx3927 irq handler
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ */
+#include <linux/init.h>
+#include <asm/txx9irq.h>
+#include <asm/txx9/tx3927.h>
+
+void __init tx3927_irq_init(void)
+{
+ int i;
+
+ txx9_irq_init(TX3927_IRC_REG);
+ /* raise priority for timers, sio */
+ for (i = 0; i < TX3927_NR_TMR; i++)
+ txx9_irq_set_pri(TX3927_IR_TMR(i), 6);
+ for (i = 0; i < TX3927_NR_SIO; i++)
+ txx9_irq_set_pri(TX3927_IR_SIO(i), 7);
+}
diff --git a/arch/mips/txx9/generic/irq_tx4927.c b/arch/mips/txx9/generic/irq_tx4927.c
new file mode 100644
index 000000000..ed8e702d4
--- /dev/null
+++ b/arch/mips/txx9/generic/irq_tx4927.c
@@ -0,0 +1,49 @@
+/*
+ * Common tx4927 irq handler
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/irq_cpu.h>
+#include <asm/txx9/tx4927.h>
+
+void __init tx4927_irq_init(void)
+{
+ int i;
+
+ mips_cpu_irq_init();
+ txx9_irq_init(TX4927_IRC_REG & 0xfffffffffULL);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4927_IRC_INT,
+ handle_simple_irq);
+ /* raise priority for errors, timers, SIO */
+ txx9_irq_set_pri(TX4927_IR_ECCERR, 7);
+ txx9_irq_set_pri(TX4927_IR_WTOERR, 7);
+ txx9_irq_set_pri(TX4927_IR_PCIERR, 7);
+ txx9_irq_set_pri(TX4927_IR_PCIPME, 7);
+ for (i = 0; i < TX4927_NUM_IR_TMR; i++)
+ txx9_irq_set_pri(TX4927_IR_TMR(i), 6);
+ for (i = 0; i < TX4927_NUM_IR_SIO; i++)
+ txx9_irq_set_pri(TX4927_IR_SIO(i), 5);
+}
diff --git a/arch/mips/txx9/generic/irq_tx4938.c b/arch/mips/txx9/generic/irq_tx4938.c
new file mode 100644
index 000000000..aace85653
--- /dev/null
+++ b/arch/mips/txx9/generic/irq_tx4938.c
@@ -0,0 +1,37 @@
+/*
+ * linux/arch/mips/tx4938/common/irq.c
+ *
+ * Common tx4938 irq handler
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/irq_cpu.h>
+#include <asm/txx9/tx4938.h>
+
+void __init tx4938_irq_init(void)
+{
+ int i;
+
+ mips_cpu_irq_init();
+ txx9_irq_init(TX4938_IRC_REG & 0xfffffffffULL);
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4938_IRC_INT,
+ handle_simple_irq);
+ /* raise priority for errors, timers, SIO */
+ txx9_irq_set_pri(TX4938_IR_ECCERR, 7);
+ txx9_irq_set_pri(TX4938_IR_WTOERR, 7);
+ txx9_irq_set_pri(TX4938_IR_PCIERR, 7);
+ txx9_irq_set_pri(TX4938_IR_PCIPME, 7);
+ for (i = 0; i < TX4938_NUM_IR_TMR; i++)
+ txx9_irq_set_pri(TX4938_IR_TMR(i), 6);
+ for (i = 0; i < TX4938_NUM_IR_SIO; i++)
+ txx9_irq_set_pri(TX4938_IR_SIO(i), 5);
+}
diff --git a/arch/mips/txx9/generic/irq_tx4939.c b/arch/mips/txx9/generic/irq_tx4939.c
new file mode 100644
index 000000000..0d7267e81
--- /dev/null
+++ b/arch/mips/txx9/generic/irq_tx4939.c
@@ -0,0 +1,216 @@
+/*
+ * TX4939 irq routines
+ * Based on linux/arch/mips/kernel/irq_txx9.c,
+ * and RBTX49xx patch from CELF patch archive.
+ *
+ * Copyright 2001, 2003-2005 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ahennessy@mvista.com
+ * source@mvista.com
+ * Copyright (C) 2000-2001,2005-2007 Toshiba Corporation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+/*
+ * TX4939 defines 64 IRQs.
+ * Similer to irq_txx9.c but different register layouts.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/types.h>
+#include <asm/irq_cpu.h>
+#include <asm/txx9irq.h>
+#include <asm/txx9/tx4939.h>
+
+/* IRCER : Int. Control Enable */
+#define TXx9_IRCER_ICE 0x00000001
+
+/* IRCR : Int. Control */
+#define TXx9_IRCR_LOW 0x00000000
+#define TXx9_IRCR_HIGH 0x00000001
+#define TXx9_IRCR_DOWN 0x00000002
+#define TXx9_IRCR_UP 0x00000003
+#define TXx9_IRCR_EDGE(cr) ((cr) & 0x00000002)
+
+/* IRSCR : Int. Status Control */
+#define TXx9_IRSCR_EIClrE 0x00000100
+#define TXx9_IRSCR_EIClr_MASK 0x0000000f
+
+/* IRCSR : Int. Current Status */
+#define TXx9_IRCSR_IF 0x00010000
+
+#define irc_dlevel 0
+#define irc_elevel 1
+
+static struct {
+ unsigned char level;
+ unsigned char mode;
+} tx4939irq[TX4939_NUM_IR] __read_mostly;
+
+static void tx4939_irq_unmask(struct irq_data *d)
+{
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
+ u32 __iomem *lvlp;
+ int ofs;
+ if (irq_nr < 32) {
+ irq_nr--;
+ lvlp = &tx4939_ircptr->lvl[(irq_nr % 16) / 2].r;
+ } else {
+ irq_nr -= 32;
+ lvlp = &tx4939_ircptr->lvl[8 + (irq_nr % 16) / 2].r;
+ }
+ ofs = (irq_nr & 16) + (irq_nr & 1) * 8;
+ __raw_writel((__raw_readl(lvlp) & ~(0xff << ofs))
+ | (tx4939irq[irq_nr].level << ofs),
+ lvlp);
+}
+
+static inline void tx4939_irq_mask(struct irq_data *d)
+{
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
+ u32 __iomem *lvlp;
+ int ofs;
+ if (irq_nr < 32) {
+ irq_nr--;
+ lvlp = &tx4939_ircptr->lvl[(irq_nr % 16) / 2].r;
+ } else {
+ irq_nr -= 32;
+ lvlp = &tx4939_ircptr->lvl[8 + (irq_nr % 16) / 2].r;
+ }
+ ofs = (irq_nr & 16) + (irq_nr & 1) * 8;
+ __raw_writel((__raw_readl(lvlp) & ~(0xff << ofs))
+ | (irc_dlevel << ofs),
+ lvlp);
+ mmiowb();
+}
+
+static void tx4939_irq_mask_ack(struct irq_data *d)
+{
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
+
+ tx4939_irq_mask(d);
+ if (TXx9_IRCR_EDGE(tx4939irq[irq_nr].mode)) {
+ irq_nr--;
+ /* clear edge detection */
+ __raw_writel((TXx9_IRSCR_EIClrE | (irq_nr & 0xf))
+ << (irq_nr & 0x10),
+ &tx4939_ircptr->edc.r);
+ }
+}
+
+static int tx4939_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
+ u32 cr;
+ u32 __iomem *crp;
+ int ofs;
+ int mode;
+
+ if (flow_type & IRQF_TRIGGER_PROBE)
+ return 0;
+ switch (flow_type & IRQF_TRIGGER_MASK) {
+ case IRQF_TRIGGER_RISING:
+ mode = TXx9_IRCR_UP;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ mode = TXx9_IRCR_DOWN;
+ break;
+ case IRQF_TRIGGER_HIGH:
+ mode = TXx9_IRCR_HIGH;
+ break;
+ case IRQF_TRIGGER_LOW:
+ mode = TXx9_IRCR_LOW;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (irq_nr < 32) {
+ irq_nr--;
+ crp = &tx4939_ircptr->dm[(irq_nr & 8) >> 3].r;
+ } else {
+ irq_nr -= 32;
+ crp = &tx4939_ircptr->dm2[((irq_nr & 8) >> 3)].r;
+ }
+ ofs = (((irq_nr & 16) >> 1) | (irq_nr & (8 - 1))) * 2;
+ cr = __raw_readl(crp);
+ cr &= ~(0x3 << ofs);
+ cr |= (mode & 0x3) << ofs;
+ __raw_writel(cr, crp);
+ tx4939irq[irq_nr].mode = mode;
+ return 0;
+}
+
+static struct irq_chip tx4939_irq_chip = {
+ .name = "TX4939",
+ .irq_ack = tx4939_irq_mask_ack,
+ .irq_mask = tx4939_irq_mask,
+ .irq_mask_ack = tx4939_irq_mask_ack,
+ .irq_unmask = tx4939_irq_unmask,
+ .irq_set_type = tx4939_irq_set_type,
+};
+
+static int tx4939_irq_set_pri(int irc_irq, int new_pri)
+{
+ int old_pri;
+
+ if ((unsigned int)irc_irq >= TX4939_NUM_IR)
+ return 0;
+ old_pri = tx4939irq[irc_irq].level;
+ tx4939irq[irc_irq].level = new_pri;
+ return old_pri;
+}
+
+void __init tx4939_irq_init(void)
+{
+ int i;
+
+ mips_cpu_irq_init();
+ /* disable interrupt control */
+ __raw_writel(0, &tx4939_ircptr->den.r);
+ __raw_writel(0, &tx4939_ircptr->maskint.r);
+ __raw_writel(0, &tx4939_ircptr->maskext.r);
+ /* irq_base + 0 is not used */
+ for (i = 1; i < TX4939_NUM_IR; i++) {
+ tx4939irq[i].level = 4; /* middle level */
+ tx4939irq[i].mode = TXx9_IRCR_LOW;
+ irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &tx4939_irq_chip,
+ handle_level_irq);
+ }
+
+ /* mask all IRC interrupts */
+ __raw_writel(0, &tx4939_ircptr->msk.r);
+ for (i = 0; i < 16; i++)
+ __raw_writel(0, &tx4939_ircptr->lvl[i].r);
+ /* setup IRC interrupt mode (Low Active) */
+ for (i = 0; i < 2; i++)
+ __raw_writel(0, &tx4939_ircptr->dm[i].r);
+ for (i = 0; i < 2; i++)
+ __raw_writel(0, &tx4939_ircptr->dm2[i].r);
+ /* enable interrupt control */
+ __raw_writel(TXx9_IRCER_ICE, &tx4939_ircptr->den.r);
+ __raw_writel(irc_elevel, &tx4939_ircptr->msk.r);
+
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4939_IRC_INT,
+ handle_simple_irq);
+
+ /* raise priority for errors, timers, sio */
+ tx4939_irq_set_pri(TX4939_IR_WTOERR, 7);
+ tx4939_irq_set_pri(TX4939_IR_PCIERR, 7);
+ tx4939_irq_set_pri(TX4939_IR_PCIPME, 7);
+ for (i = 0; i < TX4939_NUM_IR_TMR; i++)
+ tx4939_irq_set_pri(TX4939_IR_TMR(i), 6);
+ for (i = 0; i < TX4939_NUM_IR_SIO; i++)
+ tx4939_irq_set_pri(TX4939_IR_SIO(i), 5);
+}
+
+int tx4939_irq(void)
+{
+ u32 csr = __raw_readl(&tx4939_ircptr->cs.r);
+
+ if (likely(!(csr & TXx9_IRCSR_IF)))
+ return TXX9_IRQ_BASE + (csr & (TX4939_NUM_IR - 1));
+ return -1;
+}
diff --git a/arch/mips/txx9/generic/mem_tx4927.c b/arch/mips/txx9/generic/mem_tx4927.c
new file mode 100644
index 000000000..deea2ceae
--- /dev/null
+++ b/arch/mips/txx9/generic/mem_tx4927.c
@@ -0,0 +1,75 @@
+/*
+ * common tx4927 memory interface
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * Copyright 2001-2002 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <asm/txx9/tx4927.h>
+
+static unsigned int __init tx4927_process_sdccr(u64 __iomem *addr)
+{
+ u64 val;
+ unsigned int sdccr_ce;
+ unsigned int sdccr_bs;
+ unsigned int sdccr_rs;
+ unsigned int sdccr_cs;
+ unsigned int sdccr_mw;
+ unsigned int bs = 0;
+ unsigned int rs = 0;
+ unsigned int cs = 0;
+ unsigned int mw = 0;
+
+ val = __raw_readq(addr);
+
+ /* MVMCP -- need #defs for these bits masks */
+ sdccr_ce = ((val & (1 << 10)) >> 10);
+ sdccr_bs = ((val & (1 << 8)) >> 8);
+ sdccr_rs = ((val & (3 << 5)) >> 5);
+ sdccr_cs = ((val & (7 << 2)) >> 2);
+ sdccr_mw = ((val & (1 << 0)) >> 0);
+
+ if (sdccr_ce) {
+ bs = 2 << sdccr_bs;
+ rs = 2048 << sdccr_rs;
+ cs = 256 << sdccr_cs;
+ mw = 8 >> sdccr_mw;
+ }
+
+ return rs * cs * mw * bs;
+}
+
+unsigned int __init tx4927_get_mem_size(void)
+{
+ unsigned int total = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tx4927_sdramcptr->cr); i++)
+ total += tx4927_process_sdccr(&tx4927_sdramcptr->cr[i]);
+ return total;
+}
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c
new file mode 100644
index 000000000..fb998726b
--- /dev/null
+++ b/arch/mips/txx9/generic/pci.c
@@ -0,0 +1,432 @@
+/*
+ * linux/arch/mips/txx9/pci.c
+ *
+ * Based on linux/arch/mips/txx9/rbtx4927/setup.c,
+ * linux/arch/mips/txx9/rbtx4938/setup.c,
+ * and RBTX49xx patch from CELF patch archive.
+ *
+ * Copyright 2001-2005 MontaVista Software Inc.
+ * Copyright (C) 1996, 97, 2001, 04 Ralf Baechle (ralf@linux-mips.org)
+ * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/pci.h>
+#ifdef CONFIG_TOSHIBA_FPCIB0
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <asm/i8259.h>
+#include <asm/txx9/smsc_fdc37m81x.h>
+#endif
+
+static int __init
+early_read_config_word(struct pci_controller *hose,
+ int top_bus, int bus, int devfn, int offset, u16 *value)
+{
+ struct pci_bus fake_bus;
+
+ fake_bus.number = bus;
+ fake_bus.sysdata = hose;
+ fake_bus.ops = hose->pci_ops;
+
+ if (bus != top_bus)
+ /* Fake a parent bus structure. */
+ fake_bus.parent = &fake_bus;
+ else
+ fake_bus.parent = NULL;
+
+ return pci_bus_read_config_word(&fake_bus, devfn, offset, value);
+}
+
+int __init txx9_pci66_check(struct pci_controller *hose, int top_bus,
+ int current_bus)
+{
+ u32 pci_devfn;
+ unsigned short vid;
+ int cap66 = -1;
+ u16 stat;
+
+ /* It seems SLC90E66 needs some time after PCI reset... */
+ mdelay(80);
+
+ pr_info("PCI: Checking 66MHz capabilities...\n");
+
+ for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) {
+ if (PCI_FUNC(pci_devfn))
+ continue;
+ if (early_read_config_word(hose, top_bus, current_bus,
+ pci_devfn, PCI_VENDOR_ID, &vid) !=
+ PCIBIOS_SUCCESSFUL)
+ continue;
+ if (vid == 0xffff)
+ continue;
+
+ /* check 66MHz capability */
+ if (cap66 < 0)
+ cap66 = 1;
+ if (cap66) {
+ early_read_config_word(hose, top_bus, current_bus,
+ pci_devfn, PCI_STATUS, &stat);
+ if (!(stat & PCI_STATUS_66MHZ)) {
+ pr_debug("PCI: %02x:%02x not 66MHz capable.\n",
+ current_bus, pci_devfn);
+ cap66 = 0;
+ break;
+ }
+ }
+ }
+ return cap66 > 0;
+}
+
+static struct resource primary_pci_mem_res[2] = {
+ { .name = "PCI MEM" },
+ { .name = "PCI MMIO" },
+};
+static struct resource primary_pci_io_res = { .name = "PCI IO" };
+struct pci_controller txx9_primary_pcic = {
+ .mem_resource = &primary_pci_mem_res[0],
+ .io_resource = &primary_pci_io_res,
+};
+
+#ifdef CONFIG_64BIT
+int txx9_pci_mem_high __initdata = 1;
+#else
+int txx9_pci_mem_high __initdata;
+#endif
+
+/*
+ * allocate pci_controller and resources.
+ * mem_base, io_base: physical address. 0 for auto assignment.
+ * mem_size and io_size means max size on auto assignment.
+ * pcic must be &txx9_primary_pcic or NULL.
+ */
+struct pci_controller *__init
+txx9_alloc_pci_controller(struct pci_controller *pcic,
+ unsigned long mem_base, unsigned long mem_size,
+ unsigned long io_base, unsigned long io_size)
+{
+ struct pcic {
+ struct pci_controller c;
+ struct resource r_mem[2];
+ struct resource r_io;
+ } *new = NULL;
+ int min_size = 0x10000;
+
+ if (!pcic) {
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+ new->r_mem[0].name = "PCI mem";
+ new->r_mem[1].name = "PCI mmio";
+ new->r_io.name = "PCI io";
+ new->c.mem_resource = new->r_mem;
+ new->c.io_resource = &new->r_io;
+ pcic = &new->c;
+ } else
+ BUG_ON(pcic != &txx9_primary_pcic);
+ pcic->io_resource->flags = IORESOURCE_IO;
+
+ /*
+ * for auto assignment, first search a (big) region for PCI
+ * MEM, then search a region for PCI IO.
+ */
+ if (mem_base) {
+ pcic->mem_resource[0].start = mem_base;
+ pcic->mem_resource[0].end = mem_base + mem_size - 1;
+ if (request_resource(&iomem_resource, &pcic->mem_resource[0]))
+ goto free_and_exit;
+ } else {
+ unsigned long min = 0, max = 0x20000000; /* low 512MB */
+ if (!mem_size) {
+ /* default size for auto assignment */
+ if (txx9_pci_mem_high)
+ mem_size = 0x20000000; /* mem:512M(max) */
+ else
+ mem_size = 0x08000000; /* mem:128M(max) */
+ }
+ if (txx9_pci_mem_high) {
+ min = 0x20000000;
+ max = 0xe0000000;
+ }
+ /* search free region for PCI MEM */
+ for (; mem_size >= min_size; mem_size /= 2) {
+ if (allocate_resource(&iomem_resource,
+ &pcic->mem_resource[0],
+ mem_size, min, max,
+ mem_size, NULL, NULL) == 0)
+ break;
+ }
+ if (mem_size < min_size)
+ goto free_and_exit;
+ }
+
+ pcic->mem_resource[1].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (io_base) {
+ pcic->mem_resource[1].start = io_base;
+ pcic->mem_resource[1].end = io_base + io_size - 1;
+ if (request_resource(&iomem_resource, &pcic->mem_resource[1]))
+ goto release_and_exit;
+ } else {
+ if (!io_size)
+ /* default size for auto assignment */
+ io_size = 0x01000000; /* io:16M(max) */
+ /* search free region for PCI IO in low 512MB */
+ for (; io_size >= min_size; io_size /= 2) {
+ if (allocate_resource(&iomem_resource,
+ &pcic->mem_resource[1],
+ io_size, 0, 0x20000000,
+ io_size, NULL, NULL) == 0)
+ break;
+ }
+ if (io_size < min_size)
+ goto release_and_exit;
+ io_base = pcic->mem_resource[1].start;
+ }
+
+ pcic->mem_resource[0].flags = IORESOURCE_MEM;
+ if (pcic == &txx9_primary_pcic &&
+ mips_io_port_base == (unsigned long)-1) {
+ /* map ioport 0 to PCI I/O space address 0 */
+ set_io_port_base(IO_BASE + pcic->mem_resource[1].start);
+ pcic->io_resource->start = 0;
+ pcic->io_offset = 0; /* busaddr == ioaddr */
+ pcic->io_map_base = IO_BASE + pcic->mem_resource[1].start;
+ } else {
+ /* physaddr to ioaddr */
+ pcic->io_resource->start =
+ io_base - (mips_io_port_base - IO_BASE);
+ pcic->io_offset = io_base - (mips_io_port_base - IO_BASE);
+ pcic->io_map_base = mips_io_port_base;
+ }
+ pcic->io_resource->end = pcic->io_resource->start + io_size - 1;
+
+ pcic->mem_offset = 0; /* busaddr == physaddr */
+
+ pr_info("PCI: IO %pR MEM %pR\n", &pcic->mem_resource[1],
+ &pcic->mem_resource[0]);
+
+ /* register_pci_controller() will request MEM resource */
+ release_resource(&pcic->mem_resource[0]);
+ return pcic;
+ release_and_exit:
+ release_resource(&pcic->mem_resource[0]);
+ free_and_exit:
+ kfree(new);
+ pr_err("PCI: Failed to allocate resources.\n");
+ return NULL;
+}
+
+static int __init
+txx9_arch_pci_init(void)
+{
+ PCIBIOS_MIN_IO = 0x8000; /* reseve legacy I/O space */
+ return 0;
+}
+arch_initcall(txx9_arch_pci_init);
+
+/* IRQ/IDSEL mapping */
+int txx9_pci_option =
+#ifdef CONFIG_PICMG_PCI_BACKPLANE_DEFAULT
+ TXX9_PCI_OPT_PICMG |
+#endif
+ TXX9_PCI_OPT_CLK_AUTO;
+
+enum txx9_pci_err_action txx9_pci_err_action = TXX9_PCI_ERR_REPORT;
+
+#ifdef CONFIG_TOSHIBA_FPCIB0
+static irqreturn_t i8259_interrupt(int irq, void *dev_id)
+{
+ int isairq;
+
+ isairq = i8259_irq();
+ if (unlikely(isairq <= I8259A_IRQ_BASE))
+ return IRQ_NONE;
+ generic_handle_irq(isairq);
+ return IRQ_HANDLED;
+}
+
+static int txx9_i8259_irq_setup(int irq)
+{
+ int err;
+
+ init_i8259_irqs();
+ err = request_irq(irq, &i8259_interrupt, IRQF_SHARED,
+ "cascade(i8259)", (void *)(long)irq);
+ if (!err)
+ pr_info("PCI-ISA bridge PIC (irq %d)\n", irq);
+ return err;
+}
+
+static void __ref quirk_slc90e66_bridge(struct pci_dev *dev)
+{
+ int irq; /* PCI/ISA Bridge interrupt */
+ u8 reg_64;
+ u32 reg_b0;
+ u8 reg_e1;
+ irq = pcibios_map_irq(dev, PCI_SLOT(dev->devfn), 1); /* INTA */
+ if (!irq)
+ return;
+ txx9_i8259_irq_setup(irq);
+ pci_read_config_byte(dev, 0x64, &reg_64);
+ pci_read_config_dword(dev, 0xb0, &reg_b0);
+ pci_read_config_byte(dev, 0xe1, &reg_e1);
+ /* serial irq control */
+ reg_64 = 0xd0;
+ /* serial irq pin */
+ reg_b0 |= 0x00010000;
+ /* ide irq on isa14 */
+ reg_e1 &= 0xf0;
+ reg_e1 |= 0x0d;
+ pci_write_config_byte(dev, 0x64, reg_64);
+ pci_write_config_dword(dev, 0xb0, reg_b0);
+ pci_write_config_byte(dev, 0xe1, reg_e1);
+
+ smsc_fdc37m81x_init(0x3f0);
+ smsc_fdc37m81x_config_beg();
+ smsc_fdc37m81x_config_set(SMSC_FDC37M81X_DNUM,
+ SMSC_FDC37M81X_KBD);
+ smsc_fdc37m81x_config_set(SMSC_FDC37M81X_INT, 1);
+ smsc_fdc37m81x_config_set(SMSC_FDC37M81X_INT2, 12);
+ smsc_fdc37m81x_config_set(SMSC_FDC37M81X_ACTIVE,
+ 1);
+ smsc_fdc37m81x_config_end();
+}
+
+static void quirk_slc90e66_ide(struct pci_dev *dev)
+{
+ unsigned char dat;
+ int regs[2] = {0x41, 0x43};
+ int i;
+
+ /* SMSC SLC90E66 IDE uses irq 14, 15 (default) */
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 14);
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &dat);
+ pr_info("PCI: %s: IRQ %02x", pci_name(dev), dat);
+ /* enable SMSC SLC90E66 IDE */
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ pci_read_config_byte(dev, regs[i], &dat);
+ pci_write_config_byte(dev, regs[i], dat | 0x80);
+ pci_read_config_byte(dev, regs[i], &dat);
+ pr_cont(" IDETIM%d %02x", i, dat);
+ }
+ pci_read_config_byte(dev, 0x5c, &dat);
+ /*
+ * !!! DO NOT REMOVE THIS COMMENT IT IS REQUIRED BY SMSC !!!
+ *
+ * This line of code is intended to provide the user with a work
+ * around solution to the anomalies cited in SMSC's anomaly sheet
+ * entitled, "SLC90E66 Functional Rev.J_0.1 Anomalies"".
+ *
+ * !!! DO NOT REMOVE THIS COMMENT IT IS REQUIRED BY SMSC !!!
+ */
+ dat |= 0x01;
+ pci_write_config_byte(dev, 0x5c, dat);
+ pci_read_config_byte(dev, 0x5c, &dat);
+ pr_cont(" REG5C %02x\n", dat);
+}
+#endif /* CONFIG_TOSHIBA_FPCIB0 */
+
+static void tc35815_fixup(struct pci_dev *dev)
+{
+ /* This device may have PM registers but not they are not supported. */
+ if (dev->pm_cap) {
+ dev_info(&dev->dev, "PM disabled\n");
+ dev->pm_cap = 0;
+ }
+}
+
+static void final_fixup(struct pci_dev *dev)
+{
+ unsigned char bist;
+
+ /* Do build-in self test */
+ if (pci_read_config_byte(dev, PCI_BIST, &bist) == PCIBIOS_SUCCESSFUL &&
+ (bist & PCI_BIST_CAPABLE)) {
+ unsigned long timeout;
+ pci_set_power_state(dev, PCI_D0);
+ pr_info("PCI: %s BIST...", pci_name(dev));
+ pci_write_config_byte(dev, PCI_BIST, PCI_BIST_START);
+ timeout = jiffies + HZ * 2; /* timeout after 2 sec */
+ do {
+ pci_read_config_byte(dev, PCI_BIST, &bist);
+ if (time_after(jiffies, timeout))
+ break;
+ } while (bist & PCI_BIST_START);
+ if (bist & (PCI_BIST_CODE_MASK | PCI_BIST_START))
+ pr_cont("failed. (0x%x)\n", bist);
+ else
+ pr_cont("OK.\n");
+ }
+}
+
+#ifdef CONFIG_TOSHIBA_FPCIB0
+#define PCI_DEVICE_ID_EFAR_SLC90E66_0 0x9460
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_0,
+ quirk_slc90e66_bridge);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_1,
+ quirk_slc90e66_ide);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_1,
+ quirk_slc90e66_ide);
+#endif
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TOSHIBA_2,
+ PCI_DEVICE_ID_TOSHIBA_TC35815_NWU, tc35815_fixup);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TOSHIBA_2,
+ PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939, tc35815_fixup);
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, final_fixup);
+DECLARE_PCI_FIXUP_RESUME(PCI_ANY_ID, PCI_ANY_ID, final_fixup);
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
+
+static int (*txx9_pci_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return txx9_pci_map_irq(dev, slot, pin);
+}
+
+char * (*txx9_board_pcibios_setup)(char *str) __initdata;
+
+char *__init txx9_pcibios_setup(char *str)
+{
+ if (txx9_board_pcibios_setup && !txx9_board_pcibios_setup(str))
+ return NULL;
+ if (!strcmp(str, "picmg")) {
+ /* PICMG compliant backplane (TOSHIBA JMB-PICMG-ATX
+ (5V or 3.3V), JMB-PICMG-L2 (5V only), etc.) */
+ txx9_pci_option |= TXX9_PCI_OPT_PICMG;
+ return NULL;
+ } else if (!strcmp(str, "nopicmg")) {
+ /* non-PICMG compliant backplane (TOSHIBA
+ RBHBK4100,RBHBK4200, Interface PCM-PCM05, etc.) */
+ txx9_pci_option &= ~TXX9_PCI_OPT_PICMG;
+ return NULL;
+ } else if (!strncmp(str, "clk=", 4)) {
+ char *val = str + 4;
+ txx9_pci_option &= ~TXX9_PCI_OPT_CLK_MASK;
+ if (strcmp(val, "33") == 0)
+ txx9_pci_option |= TXX9_PCI_OPT_CLK_33;
+ else if (strcmp(val, "66") == 0)
+ txx9_pci_option |= TXX9_PCI_OPT_CLK_66;
+ else /* "auto" */
+ txx9_pci_option |= TXX9_PCI_OPT_CLK_AUTO;
+ return NULL;
+ } else if (!strncmp(str, "err=", 4)) {
+ if (!strcmp(str + 4, "panic"))
+ txx9_pci_err_action = TXX9_PCI_ERR_PANIC;
+ else if (!strcmp(str + 4, "ignore"))
+ txx9_pci_err_action = TXX9_PCI_ERR_IGNORE;
+ return NULL;
+ }
+
+ txx9_pci_map_irq = txx9_board_vec->pci_map_irq;
+
+ return str;
+}
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
new file mode 100644
index 000000000..6d0fd0e05
--- /dev/null
+++ b/arch/mips/txx9/generic/setup.c
@@ -0,0 +1,965 @@
+/*
+ * Based on linux/arch/mips/txx9/rbtx4938/setup.c,
+ * and RBTX49xx patch from CELF patch archive.
+ *
+ * 2003-2005 (c) MontaVista Software, Inc.
+ * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/txx9/ndfmc.h>
+#include <linux/serial_core.h>
+#include <linux/mtd/physmap.h>
+#include <linux/leds.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/bootinfo.h>
+#include <asm/idle.h>
+#include <asm/time.h>
+#include <asm/reboot.h>
+#include <asm/r4kcache.h>
+#include <asm/setup.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/pci.h>
+#include <asm/txx9tmr.h>
+#include <asm/txx9/dmac.h>
+#ifdef CONFIG_CPU_TX49XX
+#include <asm/txx9/tx4938.h>
+#endif
+
+/* EBUSC settings of TX4927, etc. */
+struct resource txx9_ce_res[8];
+static char txx9_ce_res_name[8][4]; /* "CEn" */
+
+/* pcode, internal register */
+unsigned int txx9_pcode;
+char txx9_pcode_str[8];
+static struct resource txx9_reg_res = {
+ .name = txx9_pcode_str,
+ .flags = IORESOURCE_MEM,
+};
+void __init
+txx9_reg_res_init(unsigned int pcode, unsigned long base, unsigned long size)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(txx9_ce_res); i++) {
+ sprintf(txx9_ce_res_name[i], "CE%d", i);
+ txx9_ce_res[i].flags = IORESOURCE_MEM;
+ txx9_ce_res[i].name = txx9_ce_res_name[i];
+ }
+
+ txx9_pcode = pcode;
+ sprintf(txx9_pcode_str, "TX%x", pcode);
+ if (base) {
+ txx9_reg_res.start = base & 0xfffffffffULL;
+ txx9_reg_res.end = (base & 0xfffffffffULL) + (size - 1);
+ request_resource(&iomem_resource, &txx9_reg_res);
+ }
+}
+
+/* clocks */
+unsigned int txx9_master_clock;
+unsigned int txx9_cpu_clock;
+unsigned int txx9_gbus_clock;
+
+#ifdef CONFIG_CPU_TX39XX
+/* don't enable by default - see errata */
+int txx9_ccfg_toeon __initdata;
+#else
+int txx9_ccfg_toeon __initdata = 1;
+#endif
+
+#define BOARD_VEC(board) extern struct txx9_board_vec board;
+#include <asm/txx9/boards.h>
+#undef BOARD_VEC
+
+struct txx9_board_vec *txx9_board_vec __initdata;
+static char txx9_system_type[32];
+
+static struct txx9_board_vec *board_vecs[] __initdata = {
+#define BOARD_VEC(board) &board,
+#include <asm/txx9/boards.h>
+#undef BOARD_VEC
+};
+
+static struct txx9_board_vec *__init find_board_byname(const char *name)
+{
+ int i;
+
+ /* search board_vecs table */
+ for (i = 0; i < ARRAY_SIZE(board_vecs); i++) {
+ if (strstr(board_vecs[i]->system, name))
+ return board_vecs[i];
+ }
+ return NULL;
+}
+
+static void __init prom_init_cmdline(void)
+{
+ int argc;
+ int *argv32;
+ int i; /* Always ignore the "-c" at argv[0] */
+
+ if (fw_arg0 >= CKSEG0 || fw_arg1 < CKSEG0) {
+ /*
+ * argc is not a valid number, or argv32 is not a valid
+ * pointer
+ */
+ argc = 0;
+ argv32 = NULL;
+ } else {
+ argc = (int)fw_arg0;
+ argv32 = (int *)fw_arg1;
+ }
+
+ arcs_cmdline[0] = '\0';
+
+ for (i = 1; i < argc; i++) {
+ char *str = (char *)(long)argv32[i];
+ if (i != 1)
+ strcat(arcs_cmdline, " ");
+ if (strchr(str, ' ')) {
+ strcat(arcs_cmdline, "\"");
+ strcat(arcs_cmdline, str);
+ strcat(arcs_cmdline, "\"");
+ } else
+ strcat(arcs_cmdline, str);
+ }
+}
+
+static int txx9_ic_disable __initdata;
+static int txx9_dc_disable __initdata;
+
+#if defined(CONFIG_CPU_TX49XX)
+/* flush all cache on very early stage (before 4k_cache_init) */
+static void __init early_flush_dcache(void)
+{
+ unsigned int conf = read_c0_config();
+ unsigned int dc_size = 1 << (12 + ((conf & CONF_DC) >> 6));
+ unsigned int linesz = 32;
+ unsigned long addr, end;
+
+ end = INDEX_BASE + dc_size / 4;
+ /* 4way, waybit=0 */
+ for (addr = INDEX_BASE; addr < end; addr += linesz) {
+ cache_op(Index_Writeback_Inv_D, addr | 0);
+ cache_op(Index_Writeback_Inv_D, addr | 1);
+ cache_op(Index_Writeback_Inv_D, addr | 2);
+ cache_op(Index_Writeback_Inv_D, addr | 3);
+ }
+}
+
+static void __init txx9_cache_fixup(void)
+{
+ unsigned int conf;
+
+ conf = read_c0_config();
+ /* flush and disable */
+ if (txx9_ic_disable) {
+ conf |= TX49_CONF_IC;
+ write_c0_config(conf);
+ }
+ if (txx9_dc_disable) {
+ early_flush_dcache();
+ conf |= TX49_CONF_DC;
+ write_c0_config(conf);
+ }
+
+ /* enable cache */
+ conf = read_c0_config();
+ if (!txx9_ic_disable)
+ conf &= ~TX49_CONF_IC;
+ if (!txx9_dc_disable)
+ conf &= ~TX49_CONF_DC;
+ write_c0_config(conf);
+
+ if (conf & TX49_CONF_IC)
+ pr_info("TX49XX I-Cache disabled.\n");
+ if (conf & TX49_CONF_DC)
+ pr_info("TX49XX D-Cache disabled.\n");
+}
+#elif defined(CONFIG_CPU_TX39XX)
+/* flush all cache on very early stage (before tx39_cache_init) */
+static void __init early_flush_dcache(void)
+{
+ unsigned int conf = read_c0_config();
+ unsigned int dc_size = 1 << (10 + ((conf & TX39_CONF_DCS_MASK) >>
+ TX39_CONF_DCS_SHIFT));
+ unsigned int linesz = 16;
+ unsigned long addr, end;
+
+ end = INDEX_BASE + dc_size / 2;
+ /* 2way, waybit=0 */
+ for (addr = INDEX_BASE; addr < end; addr += linesz) {
+ cache_op(Index_Writeback_Inv_D, addr | 0);
+ cache_op(Index_Writeback_Inv_D, addr | 1);
+ }
+}
+
+static void __init txx9_cache_fixup(void)
+{
+ unsigned int conf;
+
+ conf = read_c0_config();
+ /* flush and disable */
+ if (txx9_ic_disable) {
+ conf &= ~TX39_CONF_ICE;
+ write_c0_config(conf);
+ }
+ if (txx9_dc_disable) {
+ early_flush_dcache();
+ conf &= ~TX39_CONF_DCE;
+ write_c0_config(conf);
+ }
+
+ /* enable cache */
+ conf = read_c0_config();
+ if (!txx9_ic_disable)
+ conf |= TX39_CONF_ICE;
+ if (!txx9_dc_disable)
+ conf |= TX39_CONF_DCE;
+ write_c0_config(conf);
+
+ if (!(conf & TX39_CONF_ICE))
+ pr_info("TX39XX I-Cache disabled.\n");
+ if (!(conf & TX39_CONF_DCE))
+ pr_info("TX39XX D-Cache disabled.\n");
+}
+#else
+static inline void txx9_cache_fixup(void)
+{
+}
+#endif
+
+static void __init preprocess_cmdline(void)
+{
+ static char cmdline[COMMAND_LINE_SIZE] __initdata;
+ char *s;
+
+ strcpy(cmdline, arcs_cmdline);
+ s = cmdline;
+ arcs_cmdline[0] = '\0';
+ while (s && *s) {
+ char *str = strsep(&s, " ");
+ if (strncmp(str, "board=", 6) == 0) {
+ txx9_board_vec = find_board_byname(str + 6);
+ continue;
+ } else if (strncmp(str, "masterclk=", 10) == 0) {
+ unsigned int val;
+ if (kstrtouint(str + 10, 10, &val) == 0)
+ txx9_master_clock = val;
+ continue;
+ } else if (strcmp(str, "icdisable") == 0) {
+ txx9_ic_disable = 1;
+ continue;
+ } else if (strcmp(str, "dcdisable") == 0) {
+ txx9_dc_disable = 1;
+ continue;
+ } else if (strcmp(str, "toeoff") == 0) {
+ txx9_ccfg_toeon = 0;
+ continue;
+ } else if (strcmp(str, "toeon") == 0) {
+ txx9_ccfg_toeon = 1;
+ continue;
+ }
+ if (arcs_cmdline[0])
+ strcat(arcs_cmdline, " ");
+ strcat(arcs_cmdline, str);
+ }
+
+ txx9_cache_fixup();
+}
+
+static void __init select_board(void)
+{
+ const char *envstr;
+
+ /* first, determine by "board=" argument in preprocess_cmdline() */
+ if (txx9_board_vec)
+ return;
+ /* next, determine by "board" envvar */
+ envstr = prom_getenv("board");
+ if (envstr) {
+ txx9_board_vec = find_board_byname(envstr);
+ if (txx9_board_vec)
+ return;
+ }
+
+ /* select "default" board */
+#ifdef CONFIG_TOSHIBA_JMR3927
+ txx9_board_vec = &jmr3927_vec;
+#endif
+#ifdef CONFIG_CPU_TX49XX
+ switch (TX4938_REV_PCODE()) {
+#ifdef CONFIG_TOSHIBA_RBTX4927
+ case 0x4927:
+ txx9_board_vec = &rbtx4927_vec;
+ break;
+ case 0x4937:
+ txx9_board_vec = &rbtx4937_vec;
+ break;
+#endif
+#ifdef CONFIG_TOSHIBA_RBTX4938
+ case 0x4938:
+ txx9_board_vec = &rbtx4938_vec;
+ break;
+#endif
+#ifdef CONFIG_TOSHIBA_RBTX4939
+ case 0x4939:
+ txx9_board_vec = &rbtx4939_vec;
+ break;
+#endif
+ }
+#endif
+}
+
+void __init prom_init(void)
+{
+ prom_init_cmdline();
+ preprocess_cmdline();
+ select_board();
+
+ strcpy(txx9_system_type, txx9_board_vec->system);
+
+ txx9_board_vec->prom_init();
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
+
+const char *get_system_type(void)
+{
+ return txx9_system_type;
+}
+
+const char *__init prom_getenv(const char *name)
+{
+ const s32 *str;
+
+ if (fw_arg2 < CKSEG0)
+ return NULL;
+
+ str = (const s32 *)fw_arg2;
+ /* YAMON style ("name", "value" pairs) */
+ while (str[0] && str[1]) {
+ if (!strcmp((const char *)(unsigned long)str[0], name))
+ return (const char *)(unsigned long)str[1];
+ str += 2;
+ }
+ return NULL;
+}
+
+static void __noreturn txx9_machine_halt(void)
+{
+ local_irq_disable();
+ clear_c0_status(ST0_IM);
+ while (1) {
+ if (cpu_wait) {
+ (*cpu_wait)();
+ if (cpu_has_counter) {
+ /*
+ * Clear counter interrupt while it
+ * breaks WAIT instruction even if
+ * masked.
+ */
+ write_c0_compare(0);
+ }
+ }
+ }
+}
+
+/* Watchdog support */
+void __init txx9_wdt_init(unsigned long base)
+{
+ struct resource res = {
+ .start = base,
+ .end = base + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ };
+ platform_device_register_simple("txx9wdt", -1, &res, 1);
+}
+
+void txx9_wdt_now(unsigned long base)
+{
+ struct txx9_tmr_reg __iomem *tmrptr =
+ ioremap(base, sizeof(struct txx9_tmr_reg));
+ /* disable watch dog timer */
+ __raw_writel(TXx9_TMWTMR_WDIS | TXx9_TMWTMR_TWC, &tmrptr->wtmr);
+ __raw_writel(0, &tmrptr->tcr);
+ /* kick watchdog */
+ __raw_writel(TXx9_TMWTMR_TWIE, &tmrptr->wtmr);
+ __raw_writel(1, &tmrptr->cpra); /* immediate */
+ __raw_writel(TXx9_TMTCR_TCE | TXx9_TMTCR_CCDE | TXx9_TMTCR_TMODE_WDOG,
+ &tmrptr->tcr);
+}
+
+/* SPI support */
+void __init txx9_spi_init(int busid, unsigned long base, int irq)
+{
+ struct resource res[] = {
+ {
+ .start = base,
+ .end = base + 0x20 - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = irq,
+ .flags = IORESOURCE_IRQ,
+ },
+ };
+ platform_device_register_simple("spi_txx9", busid,
+ res, ARRAY_SIZE(res));
+}
+
+void __init txx9_ethaddr_init(unsigned int id, unsigned char *ethaddr)
+{
+ struct platform_device *pdev =
+ platform_device_alloc("tc35815-mac", id);
+ if (!pdev ||
+ platform_device_add_data(pdev, ethaddr, 6) ||
+ platform_device_add(pdev))
+ platform_device_put(pdev);
+}
+
+void __init txx9_sio_init(unsigned long baseaddr, int irq,
+ unsigned int line, unsigned int sclk, int nocts)
+{
+#ifdef CONFIG_SERIAL_TXX9
+ struct uart_port req;
+
+ memset(&req, 0, sizeof(req));
+ req.line = line;
+ req.iotype = UPIO_MEM;
+ req.membase = ioremap(baseaddr, 0x24);
+ req.mapbase = baseaddr;
+ req.irq = irq;
+ if (!nocts)
+ req.flags |= UPF_BUGGY_UART /*HAVE_CTS_LINE*/;
+ if (sclk) {
+ req.flags |= UPF_MAGIC_MULTIPLIER /*USE_SCLK*/;
+ req.uartclk = sclk;
+ } else
+ req.uartclk = TXX9_IMCLK;
+ early_serial_txx9_setup(&req);
+#endif /* CONFIG_SERIAL_TXX9 */
+}
+
+#ifdef CONFIG_EARLY_PRINTK
+static void null_prom_putchar(char c)
+{
+}
+void (*txx9_prom_putchar)(char c) = null_prom_putchar;
+
+void prom_putchar(char c)
+{
+ txx9_prom_putchar(c);
+}
+
+static void __iomem *early_txx9_sio_port;
+
+static void early_txx9_sio_putchar(char c)
+{
+#define TXX9_SICISR 0x0c
+#define TXX9_SITFIFO 0x1c
+#define TXX9_SICISR_TXALS 0x00000002
+ while (!(__raw_readl(early_txx9_sio_port + TXX9_SICISR) &
+ TXX9_SICISR_TXALS))
+ ;
+ __raw_writel(c, early_txx9_sio_port + TXX9_SITFIFO);
+}
+
+void __init txx9_sio_putchar_init(unsigned long baseaddr)
+{
+ early_txx9_sio_port = ioremap(baseaddr, 0x24);
+ txx9_prom_putchar = early_txx9_sio_putchar;
+}
+#endif /* CONFIG_EARLY_PRINTK */
+
+/* wrappers */
+void __init plat_mem_setup(void)
+{
+ ioport_resource.start = 0;
+ ioport_resource.end = ~0UL; /* no limit */
+ iomem_resource.start = 0;
+ iomem_resource.end = ~0UL; /* no limit */
+
+ /* fallback restart/halt routines */
+ _machine_restart = (void (*)(char *))txx9_machine_halt;
+ _machine_halt = txx9_machine_halt;
+ pm_power_off = txx9_machine_halt;
+
+#ifdef CONFIG_PCI
+ pcibios_plat_setup = txx9_pcibios_setup;
+#endif
+ txx9_board_vec->mem_setup();
+}
+
+void __init arch_init_irq(void)
+{
+ txx9_board_vec->irq_setup();
+}
+
+void __init plat_time_init(void)
+{
+#ifdef CONFIG_CPU_TX49XX
+ mips_hpt_frequency = txx9_cpu_clock / 2;
+#endif
+ txx9_board_vec->time_init();
+}
+
+static void txx9_clk_init(void)
+{
+ struct clk_hw *hw;
+ int error;
+
+ hw = clk_hw_register_fixed_rate(NULL, "gbus", NULL, 0, txx9_gbus_clock);
+ if (IS_ERR(hw)) {
+ error = PTR_ERR(hw);
+ goto fail;
+ }
+
+ hw = clk_hw_register_fixed_factor(NULL, "imbus", "gbus", 0, 1, 2);
+ error = clk_hw_register_clkdev(hw, "imbus_clk", NULL);
+ if (error)
+ goto fail;
+
+#ifdef CONFIG_CPU_TX49XX
+ if (TX4938_REV_PCODE() == 0x4938) {
+ hw = clk_hw_register_fixed_factor(NULL, "spi", "gbus", 0, 1, 4);
+ error = clk_hw_register_clkdev(hw, "spi-baseclk", NULL);
+ if (error)
+ goto fail;
+ }
+#endif
+
+ return;
+
+fail:
+ pr_err("Failed to register clocks: %d\n", error);
+}
+
+static int __init _txx9_arch_init(void)
+{
+ txx9_clk_init();
+
+ if (txx9_board_vec->arch_init)
+ txx9_board_vec->arch_init();
+ return 0;
+}
+arch_initcall(_txx9_arch_init);
+
+static int __init _txx9_device_init(void)
+{
+ if (txx9_board_vec->device_init)
+ txx9_board_vec->device_init();
+ return 0;
+}
+device_initcall(_txx9_device_init);
+
+int (*txx9_irq_dispatch)(int pending);
+asmlinkage void plat_irq_dispatch(void)
+{
+ int pending = read_c0_status() & read_c0_cause() & ST0_IM;
+ int irq = txx9_irq_dispatch(pending);
+
+ if (likely(irq >= 0))
+ do_IRQ(irq);
+ else
+ spurious_interrupt();
+}
+
+/* see include/asm-mips/mach-tx39xx/mangle-port.h, for example. */
+#ifdef NEEDS_TXX9_SWIZZLE_ADDR_B
+static unsigned long __swizzle_addr_none(unsigned long port)
+{
+ return port;
+}
+unsigned long (*__swizzle_addr_b)(unsigned long port) = __swizzle_addr_none;
+EXPORT_SYMBOL(__swizzle_addr_b);
+#endif
+
+#ifdef NEEDS_TXX9_IOSWABW
+static u16 ioswabw_default(volatile u16 *a, u16 x)
+{
+ return le16_to_cpu(x);
+}
+static u16 __mem_ioswabw_default(volatile u16 *a, u16 x)
+{
+ return x;
+}
+u16 (*ioswabw)(volatile u16 *a, u16 x) = ioswabw_default;
+EXPORT_SYMBOL(ioswabw);
+u16 (*__mem_ioswabw)(volatile u16 *a, u16 x) = __mem_ioswabw_default;
+EXPORT_SYMBOL(__mem_ioswabw);
+#endif
+
+void __init txx9_physmap_flash_init(int no, unsigned long addr,
+ unsigned long size,
+ const struct physmap_flash_data *pdata)
+{
+#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
+ struct resource res = {
+ .start = addr,
+ .end = addr + size - 1,
+ .flags = IORESOURCE_MEM,
+ };
+ struct platform_device *pdev;
+ static struct mtd_partition parts[2];
+ struct physmap_flash_data pdata_part;
+
+ /* If this area contained boot area, make separate partition */
+ if (pdata->nr_parts == 0 && !pdata->parts &&
+ addr < 0x1fc00000 && addr + size > 0x1fc00000 &&
+ !parts[0].name) {
+ parts[0].name = "boot";
+ parts[0].offset = 0x1fc00000 - addr;
+ parts[0].size = addr + size - 0x1fc00000;
+ parts[1].name = "user";
+ parts[1].offset = 0;
+ parts[1].size = 0x1fc00000 - addr;
+ pdata_part = *pdata;
+ pdata_part.nr_parts = ARRAY_SIZE(parts);
+ pdata_part.parts = parts;
+ pdata = &pdata_part;
+ }
+
+ pdev = platform_device_alloc("physmap-flash", no);
+ if (!pdev ||
+ platform_device_add_resources(pdev, &res, 1) ||
+ platform_device_add_data(pdev, pdata, sizeof(*pdata)) ||
+ platform_device_add(pdev))
+ platform_device_put(pdev);
+#endif
+}
+
+void __init txx9_ndfmc_init(unsigned long baseaddr,
+ const struct txx9ndfmc_platform_data *pdata)
+{
+#if IS_ENABLED(CONFIG_MTD_NAND_TXX9NDFMC)
+ struct resource res = {
+ .start = baseaddr,
+ .end = baseaddr + 0x1000 - 1,
+ .flags = IORESOURCE_MEM,
+ };
+ struct platform_device *pdev = platform_device_alloc("txx9ndfmc", -1);
+
+ if (!pdev ||
+ platform_device_add_resources(pdev, &res, 1) ||
+ platform_device_add_data(pdev, pdata, sizeof(*pdata)) ||
+ platform_device_add(pdev))
+ platform_device_put(pdev);
+#endif
+}
+
+#if IS_ENABLED(CONFIG_LEDS_GPIO)
+static DEFINE_SPINLOCK(txx9_iocled_lock);
+
+#define TXX9_IOCLED_MAXLEDS 8
+
+struct txx9_iocled_data {
+ struct gpio_chip chip;
+ u8 cur_val;
+ void __iomem *mmioaddr;
+ struct gpio_led_platform_data pdata;
+ struct gpio_led leds[TXX9_IOCLED_MAXLEDS];
+ char names[TXX9_IOCLED_MAXLEDS][32];
+};
+
+static int txx9_iocled_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct txx9_iocled_data *data = gpiochip_get_data(chip);
+ return !!(data->cur_val & (1 << offset));
+}
+
+static void txx9_iocled_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct txx9_iocled_data *data = gpiochip_get_data(chip);
+ unsigned long flags;
+ spin_lock_irqsave(&txx9_iocled_lock, flags);
+ if (value)
+ data->cur_val |= 1 << offset;
+ else
+ data->cur_val &= ~(1 << offset);
+ writeb(data->cur_val, data->mmioaddr);
+ mmiowb();
+ spin_unlock_irqrestore(&txx9_iocled_lock, flags);
+}
+
+static int txx9_iocled_dir_in(struct gpio_chip *chip, unsigned int offset)
+{
+ return 0;
+}
+
+static int txx9_iocled_dir_out(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ txx9_iocled_set(chip, offset, value);
+ return 0;
+}
+
+void __init txx9_iocled_init(unsigned long baseaddr,
+ int basenum, unsigned int num, int lowactive,
+ const char *color, char **deftriggers)
+{
+ struct txx9_iocled_data *iocled;
+ struct platform_device *pdev;
+ int i;
+ static char *default_triggers[] __initdata = {
+ "heartbeat",
+ "disk-activity",
+ "nand-disk",
+ NULL,
+ };
+
+ if (!deftriggers)
+ deftriggers = default_triggers;
+ iocled = kzalloc(sizeof(*iocled), GFP_KERNEL);
+ if (!iocled)
+ return;
+ iocled->mmioaddr = ioremap(baseaddr, 1);
+ if (!iocled->mmioaddr)
+ goto out_free;
+ iocled->chip.get = txx9_iocled_get;
+ iocled->chip.set = txx9_iocled_set;
+ iocled->chip.direction_input = txx9_iocled_dir_in;
+ iocled->chip.direction_output = txx9_iocled_dir_out;
+ iocled->chip.label = "iocled";
+ iocled->chip.base = basenum;
+ iocled->chip.ngpio = num;
+ if (gpiochip_add_data(&iocled->chip, iocled))
+ goto out_unmap;
+ if (basenum < 0)
+ basenum = iocled->chip.base;
+
+ pdev = platform_device_alloc("leds-gpio", basenum);
+ if (!pdev)
+ goto out_gpio;
+ iocled->pdata.num_leds = num;
+ iocled->pdata.leds = iocled->leds;
+ for (i = 0; i < num; i++) {
+ struct gpio_led *led = &iocled->leds[i];
+ snprintf(iocled->names[i], sizeof(iocled->names[i]),
+ "iocled:%s:%u", color, i);
+ led->name = iocled->names[i];
+ led->gpio = basenum + i;
+ led->active_low = lowactive;
+ if (deftriggers && *deftriggers)
+ led->default_trigger = *deftriggers++;
+ }
+ pdev->dev.platform_data = &iocled->pdata;
+ if (platform_device_add(pdev))
+ goto out_pdev;
+ return;
+
+out_pdev:
+ platform_device_put(pdev);
+out_gpio:
+ gpiochip_remove(&iocled->chip);
+out_unmap:
+ iounmap(iocled->mmioaddr);
+out_free:
+ kfree(iocled);
+}
+#else /* CONFIG_LEDS_GPIO */
+void __init txx9_iocled_init(unsigned long baseaddr,
+ int basenum, unsigned int num, int lowactive,
+ const char *color, char **deftriggers)
+{
+}
+#endif /* CONFIG_LEDS_GPIO */
+
+void __init txx9_dmac_init(int id, unsigned long baseaddr, int irq,
+ const struct txx9dmac_platform_data *pdata)
+{
+#if IS_ENABLED(CONFIG_TXX9_DMAC)
+ struct resource res[] = {
+ {
+ .start = baseaddr,
+ .end = baseaddr + 0x800 - 1,
+ .flags = IORESOURCE_MEM,
+#ifndef CONFIG_MACH_TX49XX
+ }, {
+ .start = irq,
+ .flags = IORESOURCE_IRQ,
+#endif
+ }
+ };
+#ifdef CONFIG_MACH_TX49XX
+ struct resource chan_res[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ }
+ };
+#endif
+ struct platform_device *pdev = platform_device_alloc("txx9dmac", id);
+ struct txx9dmac_chan_platform_data cpdata;
+ int i;
+
+ if (!pdev ||
+ platform_device_add_resources(pdev, res, ARRAY_SIZE(res)) ||
+ platform_device_add_data(pdev, pdata, sizeof(*pdata)) ||
+ platform_device_add(pdev)) {
+ platform_device_put(pdev);
+ return;
+ }
+ memset(&cpdata, 0, sizeof(cpdata));
+ cpdata.dmac_dev = pdev;
+ for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) {
+#ifdef CONFIG_MACH_TX49XX
+ chan_res[0].start = irq + i;
+#endif
+ pdev = platform_device_alloc("txx9dmac-chan",
+ id * TXX9_DMA_MAX_NR_CHANNELS + i);
+ if (!pdev ||
+#ifdef CONFIG_MACH_TX49XX
+ platform_device_add_resources(pdev, chan_res,
+ ARRAY_SIZE(chan_res)) ||
+#endif
+ platform_device_add_data(pdev, &cpdata, sizeof(cpdata)) ||
+ platform_device_add(pdev))
+ platform_device_put(pdev);
+ }
+#endif
+}
+
+void __init txx9_aclc_init(unsigned long baseaddr, int irq,
+ unsigned int dmac_id,
+ unsigned int dma_chan_out,
+ unsigned int dma_chan_in)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_TXX9ACLC)
+ unsigned int dma_base = dmac_id * TXX9_DMA_MAX_NR_CHANNELS;
+ struct resource res[] = {
+ {
+ .start = baseaddr,
+ .end = baseaddr + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = irq,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .name = "txx9dmac-chan",
+ .start = dma_base + dma_chan_out,
+ .flags = IORESOURCE_DMA,
+ }, {
+ .name = "txx9dmac-chan",
+ .start = dma_base + dma_chan_in,
+ .flags = IORESOURCE_DMA,
+ }
+ };
+ struct platform_device *pdev =
+ platform_device_alloc("txx9aclc-ac97", -1);
+
+ if (!pdev ||
+ platform_device_add_resources(pdev, res, ARRAY_SIZE(res)) ||
+ platform_device_add(pdev))
+ platform_device_put(pdev);
+#endif
+}
+
+static struct bus_type txx9_sramc_subsys = {
+ .name = "txx9_sram",
+ .dev_name = "txx9_sram",
+};
+
+struct txx9_sramc_dev {
+ struct device dev;
+ struct bin_attribute bindata_attr;
+ void __iomem *base;
+};
+
+static ssize_t txx9_sram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
+{
+ struct txx9_sramc_dev *dev = bin_attr->private;
+ size_t ramsize = bin_attr->size;
+
+ if (pos >= ramsize)
+ return 0;
+ if (pos + size > ramsize)
+ size = ramsize - pos;
+ memcpy_fromio(buf, dev->base + pos, size);
+ return size;
+}
+
+static ssize_t txx9_sram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
+{
+ struct txx9_sramc_dev *dev = bin_attr->private;
+ size_t ramsize = bin_attr->size;
+
+ if (pos >= ramsize)
+ return 0;
+ if (pos + size > ramsize)
+ size = ramsize - pos;
+ memcpy_toio(dev->base + pos, buf, size);
+ return size;
+}
+
+static void txx9_device_release(struct device *dev)
+{
+ struct txx9_sramc_dev *tdev;
+
+ tdev = container_of(dev, struct txx9_sramc_dev, dev);
+ kfree(tdev);
+}
+
+void __init txx9_sramc_init(struct resource *r)
+{
+ struct txx9_sramc_dev *dev;
+ size_t size;
+ int err;
+
+ err = subsys_system_register(&txx9_sramc_subsys, NULL);
+ if (err)
+ return;
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return;
+ size = resource_size(r);
+ dev->base = ioremap(r->start, size);
+ if (!dev->base) {
+ kfree(dev);
+ return;
+ }
+ dev->dev.release = &txx9_device_release;
+ dev->dev.bus = &txx9_sramc_subsys;
+ sysfs_bin_attr_init(&dev->bindata_attr);
+ dev->bindata_attr.attr.name = "bindata";
+ dev->bindata_attr.attr.mode = S_IRUSR | S_IWUSR;
+ dev->bindata_attr.read = txx9_sram_read;
+ dev->bindata_attr.write = txx9_sram_write;
+ dev->bindata_attr.size = size;
+ dev->bindata_attr.private = dev;
+ err = device_register(&dev->dev);
+ if (err)
+ goto exit_put;
+ err = sysfs_create_bin_file(&dev->dev.kobj, &dev->bindata_attr);
+ if (err) {
+ iounmap(dev->base);
+ device_unregister(&dev->dev);
+ }
+ return;
+exit_put:
+ iounmap(dev->base);
+ put_device(&dev->dev);
+}
diff --git a/arch/mips/txx9/generic/setup_tx3927.c b/arch/mips/txx9/generic/setup_tx3927.c
new file mode 100644
index 000000000..33f7a7253
--- /dev/null
+++ b/arch/mips/txx9/generic/setup_tx3927.c
@@ -0,0 +1,136 @@
+/*
+ * TX3927 setup routines
+ * Based on linux/arch/mips/txx9/jmr3927/setup.c
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/param.h>
+#include <linux/io.h>
+#include <linux/mtd/physmap.h>
+#include <asm/mipsregs.h>
+#include <asm/txx9irq.h>
+#include <asm/txx9tmr.h>
+#include <asm/txx9pio.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/tx3927.h>
+
+void __init tx3927_wdt_init(void)
+{
+ txx9_wdt_init(TX3927_TMR_REG(2));
+}
+
+void __init tx3927_setup(void)
+{
+ int i;
+ unsigned int conf;
+
+ txx9_reg_res_init(TX3927_REV_PCODE(), TX3927_REG_BASE,
+ TX3927_REG_SIZE);
+
+ /* SDRAMC,ROMC are configured by PROM */
+ for (i = 0; i < 8; i++) {
+ if (!(tx3927_romcptr->cr[i] & 0x8))
+ continue; /* disabled */
+ txx9_ce_res[i].start = (unsigned long)TX3927_ROMC_BA(i);
+ txx9_ce_res[i].end =
+ txx9_ce_res[i].start + TX3927_ROMC_SIZE(i) - 1;
+ request_resource(&iomem_resource, &txx9_ce_res[i]);
+ }
+
+ /* clocks */
+ txx9_gbus_clock = txx9_cpu_clock / 2;
+ /* change default value to udelay/mdelay take reasonable time */
+ loops_per_jiffy = txx9_cpu_clock / HZ / 2;
+
+ /* CCFG */
+ /* enable Timeout BusError */
+ if (txx9_ccfg_toeon)
+ tx3927_ccfgptr->ccfg |= TX3927_CCFG_TOE;
+
+ /* clear BusErrorOnWrite flag */
+ tx3927_ccfgptr->ccfg &= ~TX3927_CCFG_BEOW;
+ if (read_c0_conf() & TX39_CONF_WBON)
+ /* Disable PCI snoop */
+ tx3927_ccfgptr->ccfg &= ~TX3927_CCFG_PSNP;
+ else
+ /* Enable PCI SNOOP - with write through only */
+ tx3927_ccfgptr->ccfg |= TX3927_CCFG_PSNP;
+ /* do reset on watchdog */
+ tx3927_ccfgptr->ccfg |= TX3927_CCFG_WR;
+
+ pr_info("TX3927 -- CRIR:%08lx CCFG:%08lx PCFG:%08lx\n",
+ tx3927_ccfgptr->crir, tx3927_ccfgptr->ccfg,
+ tx3927_ccfgptr->pcfg);
+
+ /* TMR */
+ for (i = 0; i < TX3927_NR_TMR; i++)
+ txx9_tmr_init(TX3927_TMR_REG(i));
+
+ /* DMA */
+ tx3927_dmaptr->mcr = 0;
+ for (i = 0; i < ARRAY_SIZE(tx3927_dmaptr->ch); i++) {
+ /* reset channel */
+ tx3927_dmaptr->ch[i].ccr = TX3927_DMA_CCR_CHRST;
+ tx3927_dmaptr->ch[i].ccr = 0;
+ }
+ /* enable DMA */
+#ifdef __BIG_ENDIAN
+ tx3927_dmaptr->mcr = TX3927_DMA_MCR_MSTEN;
+#else
+ tx3927_dmaptr->mcr = TX3927_DMA_MCR_MSTEN | TX3927_DMA_MCR_LE;
+#endif
+
+ /* PIO */
+ __raw_writel(0, &tx3927_pioptr->maskcpu);
+ __raw_writel(0, &tx3927_pioptr->maskext);
+
+ conf = read_c0_conf();
+ if (conf & TX39_CONF_DCE) {
+ if (!(conf & TX39_CONF_WBON))
+ pr_info("TX3927 D-Cache WriteThrough.\n");
+ else if (!(conf & TX39_CONF_CWFON))
+ pr_info("TX3927 D-Cache WriteBack.\n");
+ else
+ pr_info("TX3927 D-Cache WriteBack (CWF) .\n");
+ }
+}
+
+void __init tx3927_time_init(unsigned int evt_tmrnr, unsigned int src_tmrnr)
+{
+ txx9_clockevent_init(TX3927_TMR_REG(evt_tmrnr),
+ TXX9_IRQ_BASE + TX3927_IR_TMR(evt_tmrnr),
+ TXX9_IMCLK);
+ txx9_clocksource_init(TX3927_TMR_REG(src_tmrnr), TXX9_IMCLK);
+}
+
+void __init tx3927_sio_init(unsigned int sclk, unsigned int cts_mask)
+{
+ int i;
+
+ for (i = 0; i < 2; i++)
+ txx9_sio_init(TX3927_SIO_REG(i),
+ TXX9_IRQ_BASE + TX3927_IR_SIO(i),
+ i, sclk, (1 << i) & cts_mask);
+}
+
+void __init tx3927_mtd_init(int ch)
+{
+ struct physmap_flash_data pdata = {
+ .width = TX3927_ROMC_WIDTH(ch) / 8,
+ };
+ unsigned long start = txx9_ce_res[ch].start;
+ unsigned long size = txx9_ce_res[ch].end - start + 1;
+
+ if (!(tx3927_romcptr->cr[ch] & 0x8))
+ return; /* disabled */
+ txx9_physmap_flash_init(ch, start, size, &pdata);
+}
diff --git a/arch/mips/txx9/generic/setup_tx4927.c b/arch/mips/txx9/generic/setup_tx4927.c
new file mode 100644
index 000000000..46e9c4101
--- /dev/null
+++ b/arch/mips/txx9/generic/setup_tx4927.c
@@ -0,0 +1,337 @@
+/*
+ * TX4927 setup routines
+ * Based on linux/arch/mips/txx9/rbtx4938/setup.c,
+ * and RBTX49xx patch from CELF patch archive.
+ *
+ * 2003-2005 (c) MontaVista Software, Inc.
+ * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/param.h>
+#include <linux/ptrace.h>
+#include <linux/mtd/physmap.h>
+#include <asm/reboot.h>
+#include <asm/traps.h>
+#include <asm/txx9irq.h>
+#include <asm/txx9tmr.h>
+#include <asm/txx9pio.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/dmac.h>
+#include <asm/txx9/tx4927.h>
+
+static void __init tx4927_wdr_init(void)
+{
+ /* report watchdog reset status */
+ if (____raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_WDRST)
+ pr_warn("Watchdog reset detected at 0x%lx\n",
+ read_c0_errorepc());
+ /* clear WatchDogReset (W1C) */
+ tx4927_ccfg_set(TX4927_CCFG_WDRST);
+ /* do reset on watchdog */
+ tx4927_ccfg_set(TX4927_CCFG_WR);
+}
+
+void __init tx4927_wdt_init(void)
+{
+ txx9_wdt_init(TX4927_TMR_REG(2) & 0xfffffffffULL);
+}
+
+static void tx4927_machine_restart(char *command)
+{
+ local_irq_disable();
+ pr_emerg("Rebooting (with %s watchdog reset)...\n",
+ (____raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_WDREXEN) ?
+ "external" : "internal");
+ /* clear watchdog status */
+ tx4927_ccfg_set(TX4927_CCFG_WDRST); /* W1C */
+ txx9_wdt_now(TX4927_TMR_REG(2) & 0xfffffffffULL);
+ while (!(____raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_WDRST))
+ ;
+ mdelay(10);
+ if (____raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_WDREXEN) {
+ pr_emerg("Rebooting (with internal watchdog reset)...\n");
+ /* External WDRST failed. Do internal watchdog reset */
+ tx4927_ccfg_clear(TX4927_CCFG_WDREXEN);
+ }
+ /* fallback */
+ (*_machine_halt)();
+}
+
+void show_registers(struct pt_regs *regs);
+static int tx4927_be_handler(struct pt_regs *regs, int is_fixup)
+{
+ int data = regs->cp0_cause & 4;
+ console_verbose();
+ pr_err("%cBE exception at %#lx\n", data ? 'D' : 'I', regs->cp0_epc);
+ pr_err("ccfg:%llx, toea:%llx\n",
+ (unsigned long long)____raw_readq(&tx4927_ccfgptr->ccfg),
+ (unsigned long long)____raw_readq(&tx4927_ccfgptr->toea));
+#ifdef CONFIG_PCI
+ tx4927_report_pcic_status();
+#endif
+ show_registers(regs);
+ panic("BusError!");
+}
+static void __init tx4927_be_init(void)
+{
+ board_be_handler = tx4927_be_handler;
+}
+
+static struct resource tx4927_sdram_resource[4];
+
+void __init tx4927_setup(void)
+{
+ int i;
+ __u32 divmode;
+ unsigned int cpuclk = 0;
+ u64 ccfg;
+
+ txx9_reg_res_init(TX4927_REV_PCODE(), TX4927_REG_BASE,
+ TX4927_REG_SIZE);
+ set_c0_config(TX49_CONF_CWFON);
+
+ /* SDRAMC,EBUSC are configured by PROM */
+ for (i = 0; i < 8; i++) {
+ if (!(TX4927_EBUSC_CR(i) & 0x8))
+ continue; /* disabled */
+ txx9_ce_res[i].start = (unsigned long)TX4927_EBUSC_BA(i);
+ txx9_ce_res[i].end =
+ txx9_ce_res[i].start + TX4927_EBUSC_SIZE(i) - 1;
+ request_resource(&iomem_resource, &txx9_ce_res[i]);
+ }
+
+ /* clocks */
+ ccfg = ____raw_readq(&tx4927_ccfgptr->ccfg);
+ if (txx9_master_clock) {
+ /* calculate gbus_clock and cpu_clock from master_clock */
+ divmode = (__u32)ccfg & TX4927_CCFG_DIVMODE_MASK;
+ switch (divmode) {
+ case TX4927_CCFG_DIVMODE_8:
+ case TX4927_CCFG_DIVMODE_10:
+ case TX4927_CCFG_DIVMODE_12:
+ case TX4927_CCFG_DIVMODE_16:
+ txx9_gbus_clock = txx9_master_clock * 4; break;
+ default:
+ txx9_gbus_clock = txx9_master_clock;
+ }
+ switch (divmode) {
+ case TX4927_CCFG_DIVMODE_2:
+ case TX4927_CCFG_DIVMODE_8:
+ cpuclk = txx9_gbus_clock * 2; break;
+ case TX4927_CCFG_DIVMODE_2_5:
+ case TX4927_CCFG_DIVMODE_10:
+ cpuclk = txx9_gbus_clock * 5 / 2; break;
+ case TX4927_CCFG_DIVMODE_3:
+ case TX4927_CCFG_DIVMODE_12:
+ cpuclk = txx9_gbus_clock * 3; break;
+ case TX4927_CCFG_DIVMODE_4:
+ case TX4927_CCFG_DIVMODE_16:
+ cpuclk = txx9_gbus_clock * 4; break;
+ }
+ txx9_cpu_clock = cpuclk;
+ } else {
+ if (txx9_cpu_clock == 0)
+ txx9_cpu_clock = 200000000; /* 200MHz */
+ /* calculate gbus_clock and master_clock from cpu_clock */
+ cpuclk = txx9_cpu_clock;
+ divmode = (__u32)ccfg & TX4927_CCFG_DIVMODE_MASK;
+ switch (divmode) {
+ case TX4927_CCFG_DIVMODE_2:
+ case TX4927_CCFG_DIVMODE_8:
+ txx9_gbus_clock = cpuclk / 2; break;
+ case TX4927_CCFG_DIVMODE_2_5:
+ case TX4927_CCFG_DIVMODE_10:
+ txx9_gbus_clock = cpuclk * 2 / 5; break;
+ case TX4927_CCFG_DIVMODE_3:
+ case TX4927_CCFG_DIVMODE_12:
+ txx9_gbus_clock = cpuclk / 3; break;
+ case TX4927_CCFG_DIVMODE_4:
+ case TX4927_CCFG_DIVMODE_16:
+ txx9_gbus_clock = cpuclk / 4; break;
+ }
+ switch (divmode) {
+ case TX4927_CCFG_DIVMODE_8:
+ case TX4927_CCFG_DIVMODE_10:
+ case TX4927_CCFG_DIVMODE_12:
+ case TX4927_CCFG_DIVMODE_16:
+ txx9_master_clock = txx9_gbus_clock / 4; break;
+ default:
+ txx9_master_clock = txx9_gbus_clock;
+ }
+ }
+ /* change default value to udelay/mdelay take reasonable time */
+ loops_per_jiffy = txx9_cpu_clock / HZ / 2;
+
+ /* CCFG */
+ tx4927_wdr_init();
+ /* clear BusErrorOnWrite flag (W1C) */
+ tx4927_ccfg_set(TX4927_CCFG_BEOW);
+ /* enable Timeout BusError */
+ if (txx9_ccfg_toeon)
+ tx4927_ccfg_set(TX4927_CCFG_TOE);
+
+ /* DMA selection */
+ txx9_clear64(&tx4927_ccfgptr->pcfg, TX4927_PCFG_DMASEL_ALL);
+
+ /* Use external clock for external arbiter */
+ if (!(____raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCIARB))
+ txx9_clear64(&tx4927_ccfgptr->pcfg, TX4927_PCFG_PCICLKEN_ALL);
+
+ pr_info("%s -- %dMHz(M%dMHz) CRIR:%08x CCFG:%llx PCFG:%llx\n",
+ txx9_pcode_str, (cpuclk + 500000) / 1000000,
+ (txx9_master_clock + 500000) / 1000000,
+ (__u32)____raw_readq(&tx4927_ccfgptr->crir),
+ ____raw_readq(&tx4927_ccfgptr->ccfg),
+ ____raw_readq(&tx4927_ccfgptr->pcfg));
+
+ pr_info("%s SDRAMC --", txx9_pcode_str);
+ for (i = 0; i < 4; i++) {
+ __u64 cr = TX4927_SDRAMC_CR(i);
+ unsigned long base, size;
+ if (!((__u32)cr & 0x00000400))
+ continue; /* disabled */
+ base = (unsigned long)(cr >> 49) << 21;
+ size = (((unsigned long)(cr >> 33) & 0x7fff) + 1) << 21;
+ pr_cont(" CR%d:%016llx", i, cr);
+ tx4927_sdram_resource[i].name = "SDRAM";
+ tx4927_sdram_resource[i].start = base;
+ tx4927_sdram_resource[i].end = base + size - 1;
+ tx4927_sdram_resource[i].flags = IORESOURCE_MEM;
+ request_resource(&iomem_resource, &tx4927_sdram_resource[i]);
+ }
+ pr_cont(" TR:%09llx\n", ____raw_readq(&tx4927_sdramcptr->tr));
+
+ /* TMR */
+ /* disable all timers */
+ for (i = 0; i < TX4927_NR_TMR; i++)
+ txx9_tmr_init(TX4927_TMR_REG(i) & 0xfffffffffULL);
+
+ /* PIO */
+ __raw_writel(0, &tx4927_pioptr->maskcpu);
+ __raw_writel(0, &tx4927_pioptr->maskext);
+
+ _machine_restart = tx4927_machine_restart;
+ board_be_init = tx4927_be_init;
+}
+
+void __init tx4927_time_init(unsigned int tmrnr)
+{
+ if (____raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_TINTDIS)
+ txx9_clockevent_init(TX4927_TMR_REG(tmrnr) & 0xfffffffffULL,
+ TXX9_IRQ_BASE + TX4927_IR_TMR(tmrnr),
+ TXX9_IMCLK);
+}
+
+void __init tx4927_sio_init(unsigned int sclk, unsigned int cts_mask)
+{
+ int i;
+
+ for (i = 0; i < 2; i++)
+ txx9_sio_init(TX4927_SIO_REG(i) & 0xfffffffffULL,
+ TXX9_IRQ_BASE + TX4927_IR_SIO(i),
+ i, sclk, (1 << i) & cts_mask);
+}
+
+void __init tx4927_mtd_init(int ch)
+{
+ struct physmap_flash_data pdata = {
+ .width = TX4927_EBUSC_WIDTH(ch) / 8,
+ };
+ unsigned long start = txx9_ce_res[ch].start;
+ unsigned long size = txx9_ce_res[ch].end - start + 1;
+
+ if (!(TX4927_EBUSC_CR(ch) & 0x8))
+ return; /* disabled */
+ txx9_physmap_flash_init(ch, start, size, &pdata);
+}
+
+void __init tx4927_dmac_init(int memcpy_chan)
+{
+ struct txx9dmac_platform_data plat_data = {
+ .memcpy_chan = memcpy_chan,
+ .have_64bit_regs = true,
+ };
+
+ txx9_dmac_init(0, TX4927_DMA_REG & 0xfffffffffULL,
+ TXX9_IRQ_BASE + TX4927_IR_DMA(0), &plat_data);
+}
+
+void __init tx4927_aclc_init(unsigned int dma_chan_out,
+ unsigned int dma_chan_in)
+{
+ u64 pcfg = __raw_readq(&tx4927_ccfgptr->pcfg);
+ __u64 dmasel_mask = 0, dmasel = 0;
+ unsigned long flags;
+
+ if (!(pcfg & TX4927_PCFG_SEL2))
+ return;
+ /* setup DMASEL (playback:ACLC ch0, capture:ACLC ch1) */
+ switch (dma_chan_out) {
+ case 0:
+ dmasel_mask |= TX4927_PCFG_DMASEL0_MASK;
+ dmasel |= TX4927_PCFG_DMASEL0_ACL0;
+ break;
+ case 2:
+ dmasel_mask |= TX4927_PCFG_DMASEL2_MASK;
+ dmasel |= TX4927_PCFG_DMASEL2_ACL0;
+ break;
+ default:
+ return;
+ }
+ switch (dma_chan_in) {
+ case 1:
+ dmasel_mask |= TX4927_PCFG_DMASEL1_MASK;
+ dmasel |= TX4927_PCFG_DMASEL1_ACL1;
+ break;
+ case 3:
+ dmasel_mask |= TX4927_PCFG_DMASEL3_MASK;
+ dmasel |= TX4927_PCFG_DMASEL3_ACL1;
+ break;
+ default:
+ return;
+ }
+ local_irq_save(flags);
+ txx9_clear64(&tx4927_ccfgptr->pcfg, dmasel_mask);
+ txx9_set64(&tx4927_ccfgptr->pcfg, dmasel);
+ local_irq_restore(flags);
+ txx9_aclc_init(TX4927_ACLC_REG & 0xfffffffffULL,
+ TXX9_IRQ_BASE + TX4927_IR_ACLC,
+ 0, dma_chan_out, dma_chan_in);
+}
+
+static void __init tx4927_stop_unused_modules(void)
+{
+ __u64 pcfg, rst = 0, ckd = 0;
+ char buf[128];
+
+ buf[0] = '\0';
+ local_irq_disable();
+ pcfg = ____raw_readq(&tx4927_ccfgptr->pcfg);
+ if (!(pcfg & TX4927_PCFG_SEL2)) {
+ rst |= TX4927_CLKCTR_ACLRST;
+ ckd |= TX4927_CLKCTR_ACLCKD;
+ strcat(buf, " ACLC");
+ }
+ if (rst | ckd) {
+ txx9_set64(&tx4927_ccfgptr->clkctr, rst);
+ txx9_set64(&tx4927_ccfgptr->clkctr, ckd);
+ }
+ local_irq_enable();
+ if (buf[0])
+ pr_info("%s: stop%s\n", txx9_pcode_str, buf);
+}
+
+static int __init tx4927_late_init(void)
+{
+ if (txx9_pcode != 0x4927)
+ return -ENODEV;
+ tx4927_stop_unused_modules();
+ return 0;
+}
+late_initcall(tx4927_late_init);
diff --git a/arch/mips/txx9/generic/setup_tx4938.c b/arch/mips/txx9/generic/setup_tx4938.c
new file mode 100644
index 000000000..17395d5d1
--- /dev/null
+++ b/arch/mips/txx9/generic/setup_tx4938.c
@@ -0,0 +1,485 @@
+/*
+ * TX4938/4937 setup routines
+ * Based on linux/arch/mips/txx9/rbtx4938/setup.c,
+ * and RBTX49xx patch from CELF patch archive.
+ *
+ * 2003-2005 (c) MontaVista Software, Inc.
+ * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/param.h>
+#include <linux/ptrace.h>
+#include <linux/mtd/physmap.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/txx9/ndfmc.h>
+#include <asm/reboot.h>
+#include <asm/traps.h>
+#include <asm/txx9irq.h>
+#include <asm/txx9tmr.h>
+#include <asm/txx9pio.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/dmac.h>
+#include <asm/txx9/tx4938.h>
+
+static void __init tx4938_wdr_init(void)
+{
+ /* report watchdog reset status */
+ if (____raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_WDRST)
+ pr_warn("Watchdog reset detected at 0x%lx\n",
+ read_c0_errorepc());
+ /* clear WatchDogReset (W1C) */
+ tx4938_ccfg_set(TX4938_CCFG_WDRST);
+ /* do reset on watchdog */
+ tx4938_ccfg_set(TX4938_CCFG_WR);
+}
+
+void __init tx4938_wdt_init(void)
+{
+ txx9_wdt_init(TX4938_TMR_REG(2) & 0xfffffffffULL);
+}
+
+static void tx4938_machine_restart(char *command)
+{
+ local_irq_disable();
+ pr_emerg("Rebooting (with %s watchdog reset)...\n",
+ (____raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_WDREXEN) ?
+ "external" : "internal");
+ /* clear watchdog status */
+ tx4938_ccfg_set(TX4938_CCFG_WDRST); /* W1C */
+ txx9_wdt_now(TX4938_TMR_REG(2) & 0xfffffffffULL);
+ while (!(____raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_WDRST))
+ ;
+ mdelay(10);
+ if (____raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_WDREXEN) {
+ pr_emerg("Rebooting (with internal watchdog reset)...\n");
+ /* External WDRST failed. Do internal watchdog reset */
+ tx4938_ccfg_clear(TX4938_CCFG_WDREXEN);
+ }
+ /* fallback */
+ (*_machine_halt)();
+}
+
+void show_registers(struct pt_regs *regs);
+static int tx4938_be_handler(struct pt_regs *regs, int is_fixup)
+{
+ int data = regs->cp0_cause & 4;
+ console_verbose();
+ pr_err("%cBE exception at %#lx\n", data ? 'D' : 'I', regs->cp0_epc);
+ pr_err("ccfg:%llx, toea:%llx\n",
+ (unsigned long long)____raw_readq(&tx4938_ccfgptr->ccfg),
+ (unsigned long long)____raw_readq(&tx4938_ccfgptr->toea));
+#ifdef CONFIG_PCI
+ tx4927_report_pcic_status();
+#endif
+ show_registers(regs);
+ panic("BusError!");
+}
+static void __init tx4938_be_init(void)
+{
+ board_be_handler = tx4938_be_handler;
+}
+
+static struct resource tx4938_sdram_resource[4];
+static struct resource tx4938_sram_resource;
+
+#define TX4938_SRAM_SIZE 0x800
+
+void __init tx4938_setup(void)
+{
+ int i;
+ __u32 divmode;
+ unsigned int cpuclk = 0;
+ u64 ccfg;
+
+ txx9_reg_res_init(TX4938_REV_PCODE(), TX4938_REG_BASE,
+ TX4938_REG_SIZE);
+ set_c0_config(TX49_CONF_CWFON);
+
+ /* SDRAMC,EBUSC are configured by PROM */
+ for (i = 0; i < 8; i++) {
+ if (!(TX4938_EBUSC_CR(i) & 0x8))
+ continue; /* disabled */
+ txx9_ce_res[i].start = (unsigned long)TX4938_EBUSC_BA(i);
+ txx9_ce_res[i].end =
+ txx9_ce_res[i].start + TX4938_EBUSC_SIZE(i) - 1;
+ request_resource(&iomem_resource, &txx9_ce_res[i]);
+ }
+
+ /* clocks */
+ ccfg = ____raw_readq(&tx4938_ccfgptr->ccfg);
+ if (txx9_master_clock) {
+ /* calculate gbus_clock and cpu_clock from master_clock */
+ divmode = (__u32)ccfg & TX4938_CCFG_DIVMODE_MASK;
+ switch (divmode) {
+ case TX4938_CCFG_DIVMODE_8:
+ case TX4938_CCFG_DIVMODE_10:
+ case TX4938_CCFG_DIVMODE_12:
+ case TX4938_CCFG_DIVMODE_16:
+ case TX4938_CCFG_DIVMODE_18:
+ txx9_gbus_clock = txx9_master_clock * 4; break;
+ default:
+ txx9_gbus_clock = txx9_master_clock;
+ }
+ switch (divmode) {
+ case TX4938_CCFG_DIVMODE_2:
+ case TX4938_CCFG_DIVMODE_8:
+ cpuclk = txx9_gbus_clock * 2; break;
+ case TX4938_CCFG_DIVMODE_2_5:
+ case TX4938_CCFG_DIVMODE_10:
+ cpuclk = txx9_gbus_clock * 5 / 2; break;
+ case TX4938_CCFG_DIVMODE_3:
+ case TX4938_CCFG_DIVMODE_12:
+ cpuclk = txx9_gbus_clock * 3; break;
+ case TX4938_CCFG_DIVMODE_4:
+ case TX4938_CCFG_DIVMODE_16:
+ cpuclk = txx9_gbus_clock * 4; break;
+ case TX4938_CCFG_DIVMODE_4_5:
+ case TX4938_CCFG_DIVMODE_18:
+ cpuclk = txx9_gbus_clock * 9 / 2; break;
+ }
+ txx9_cpu_clock = cpuclk;
+ } else {
+ if (txx9_cpu_clock == 0)
+ txx9_cpu_clock = 300000000; /* 300MHz */
+ /* calculate gbus_clock and master_clock from cpu_clock */
+ cpuclk = txx9_cpu_clock;
+ divmode = (__u32)ccfg & TX4938_CCFG_DIVMODE_MASK;
+ switch (divmode) {
+ case TX4938_CCFG_DIVMODE_2:
+ case TX4938_CCFG_DIVMODE_8:
+ txx9_gbus_clock = cpuclk / 2; break;
+ case TX4938_CCFG_DIVMODE_2_5:
+ case TX4938_CCFG_DIVMODE_10:
+ txx9_gbus_clock = cpuclk * 2 / 5; break;
+ case TX4938_CCFG_DIVMODE_3:
+ case TX4938_CCFG_DIVMODE_12:
+ txx9_gbus_clock = cpuclk / 3; break;
+ case TX4938_CCFG_DIVMODE_4:
+ case TX4938_CCFG_DIVMODE_16:
+ txx9_gbus_clock = cpuclk / 4; break;
+ case TX4938_CCFG_DIVMODE_4_5:
+ case TX4938_CCFG_DIVMODE_18:
+ txx9_gbus_clock = cpuclk * 2 / 9; break;
+ }
+ switch (divmode) {
+ case TX4938_CCFG_DIVMODE_8:
+ case TX4938_CCFG_DIVMODE_10:
+ case TX4938_CCFG_DIVMODE_12:
+ case TX4938_CCFG_DIVMODE_16:
+ case TX4938_CCFG_DIVMODE_18:
+ txx9_master_clock = txx9_gbus_clock / 4; break;
+ default:
+ txx9_master_clock = txx9_gbus_clock;
+ }
+ }
+ /* change default value to udelay/mdelay take reasonable time */
+ loops_per_jiffy = txx9_cpu_clock / HZ / 2;
+
+ /* CCFG */
+ tx4938_wdr_init();
+ /* clear BusErrorOnWrite flag (W1C) */
+ tx4938_ccfg_set(TX4938_CCFG_BEOW);
+ /* enable Timeout BusError */
+ if (txx9_ccfg_toeon)
+ tx4938_ccfg_set(TX4938_CCFG_TOE);
+
+ /* DMA selection */
+ txx9_clear64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_DMASEL_ALL);
+
+ /* Use external clock for external arbiter */
+ if (!(____raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCIARB))
+ txx9_clear64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_PCICLKEN_ALL);
+
+ pr_info("%s -- %dMHz(M%dMHz) CRIR:%08x CCFG:%llx PCFG:%llx\n",
+ txx9_pcode_str, (cpuclk + 500000) / 1000000,
+ (txx9_master_clock + 500000) / 1000000,
+ (__u32)____raw_readq(&tx4938_ccfgptr->crir),
+ ____raw_readq(&tx4938_ccfgptr->ccfg),
+ ____raw_readq(&tx4938_ccfgptr->pcfg));
+
+ pr_info("%s SDRAMC --", txx9_pcode_str);
+ for (i = 0; i < 4; i++) {
+ __u64 cr = TX4938_SDRAMC_CR(i);
+ unsigned long base, size;
+ if (!((__u32)cr & 0x00000400))
+ continue; /* disabled */
+ base = (unsigned long)(cr >> 49) << 21;
+ size = (((unsigned long)(cr >> 33) & 0x7fff) + 1) << 21;
+ pr_cont(" CR%d:%016llx", i, cr);
+ tx4938_sdram_resource[i].name = "SDRAM";
+ tx4938_sdram_resource[i].start = base;
+ tx4938_sdram_resource[i].end = base + size - 1;
+ tx4938_sdram_resource[i].flags = IORESOURCE_MEM;
+ request_resource(&iomem_resource, &tx4938_sdram_resource[i]);
+ }
+ pr_cont(" TR:%09llx\n", ____raw_readq(&tx4938_sdramcptr->tr));
+
+ /* SRAM */
+ if (txx9_pcode == 0x4938 && ____raw_readq(&tx4938_sramcptr->cr) & 1) {
+ unsigned int size = TX4938_SRAM_SIZE;
+ tx4938_sram_resource.name = "SRAM";
+ tx4938_sram_resource.start =
+ (____raw_readq(&tx4938_sramcptr->cr) >> (39-11))
+ & ~(size - 1);
+ tx4938_sram_resource.end =
+ tx4938_sram_resource.start + TX4938_SRAM_SIZE - 1;
+ tx4938_sram_resource.flags = IORESOURCE_MEM;
+ request_resource(&iomem_resource, &tx4938_sram_resource);
+ }
+
+ /* TMR */
+ /* disable all timers */
+ for (i = 0; i < TX4938_NR_TMR; i++)
+ txx9_tmr_init(TX4938_TMR_REG(i) & 0xfffffffffULL);
+
+ /* PIO */
+ __raw_writel(0, &tx4938_pioptr->maskcpu);
+ __raw_writel(0, &tx4938_pioptr->maskext);
+
+ if (txx9_pcode == 0x4938) {
+ __u64 pcfg = ____raw_readq(&tx4938_ccfgptr->pcfg);
+ /* set PCIC1 reset */
+ txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIC1RST);
+ if (pcfg & (TX4938_PCFG_ETH0_SEL | TX4938_PCFG_ETH1_SEL)) {
+ mdelay(1); /* at least 128 cpu clock */
+ /* clear PCIC1 reset */
+ txx9_clear64(&tx4938_ccfgptr->clkctr,
+ TX4938_CLKCTR_PCIC1RST);
+ } else {
+ pr_info("%s: stop PCIC1\n", txx9_pcode_str);
+ /* stop PCIC1 */
+ txx9_set64(&tx4938_ccfgptr->clkctr,
+ TX4938_CLKCTR_PCIC1CKD);
+ }
+ if (!(pcfg & TX4938_PCFG_ETH0_SEL)) {
+ pr_info("%s: stop ETH0\n", txx9_pcode_str);
+ txx9_set64(&tx4938_ccfgptr->clkctr,
+ TX4938_CLKCTR_ETH0RST);
+ txx9_set64(&tx4938_ccfgptr->clkctr,
+ TX4938_CLKCTR_ETH0CKD);
+ }
+ if (!(pcfg & TX4938_PCFG_ETH1_SEL)) {
+ pr_info("%s: stop ETH1\n", txx9_pcode_str);
+ txx9_set64(&tx4938_ccfgptr->clkctr,
+ TX4938_CLKCTR_ETH1RST);
+ txx9_set64(&tx4938_ccfgptr->clkctr,
+ TX4938_CLKCTR_ETH1CKD);
+ }
+ }
+
+ _machine_restart = tx4938_machine_restart;
+ board_be_init = tx4938_be_init;
+}
+
+void __init tx4938_time_init(unsigned int tmrnr)
+{
+ if (____raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_TINTDIS)
+ txx9_clockevent_init(TX4938_TMR_REG(tmrnr) & 0xfffffffffULL,
+ TXX9_IRQ_BASE + TX4938_IR_TMR(tmrnr),
+ TXX9_IMCLK);
+}
+
+void __init tx4938_sio_init(unsigned int sclk, unsigned int cts_mask)
+{
+ int i;
+ unsigned int ch_mask = 0;
+
+ if (__raw_readq(&tx4938_ccfgptr->pcfg) & TX4938_PCFG_ETH0_SEL)
+ ch_mask |= 1 << 1; /* disable SIO1 by PCFG setting */
+ for (i = 0; i < 2; i++) {
+ if ((1 << i) & ch_mask)
+ continue;
+ txx9_sio_init(TX4938_SIO_REG(i) & 0xfffffffffULL,
+ TXX9_IRQ_BASE + TX4938_IR_SIO(i),
+ i, sclk, (1 << i) & cts_mask);
+ }
+}
+
+void __init tx4938_spi_init(int busid)
+{
+ txx9_spi_init(busid, TX4938_SPI_REG & 0xfffffffffULL,
+ TXX9_IRQ_BASE + TX4938_IR_SPI);
+}
+
+void __init tx4938_ethaddr_init(unsigned char *addr0, unsigned char *addr1)
+{
+ u64 pcfg = __raw_readq(&tx4938_ccfgptr->pcfg);
+
+ if (addr0 && (pcfg & TX4938_PCFG_ETH0_SEL))
+ txx9_ethaddr_init(TXX9_IRQ_BASE + TX4938_IR_ETH0, addr0);
+ if (addr1 && (pcfg & TX4938_PCFG_ETH1_SEL))
+ txx9_ethaddr_init(TXX9_IRQ_BASE + TX4938_IR_ETH1, addr1);
+}
+
+void __init tx4938_mtd_init(int ch)
+{
+ struct physmap_flash_data pdata = {
+ .width = TX4938_EBUSC_WIDTH(ch) / 8,
+ };
+ unsigned long start = txx9_ce_res[ch].start;
+ unsigned long size = txx9_ce_res[ch].end - start + 1;
+
+ if (!(TX4938_EBUSC_CR(ch) & 0x8))
+ return; /* disabled */
+ txx9_physmap_flash_init(ch, start, size, &pdata);
+}
+
+void __init tx4938_ata_init(unsigned int irq, unsigned int shift, int tune)
+{
+ struct platform_device *pdev;
+ struct resource res[] = {
+ {
+ /* .start and .end are filled in later */
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = irq,
+ .flags = IORESOURCE_IRQ,
+ },
+ };
+ struct tx4938ide_platform_info pdata = {
+ .ioport_shift = shift,
+ /*
+ * The IDE driver should not change bus timings if other ISA
+ * devices existed.
+ */
+ .gbus_clock = tune ? txx9_gbus_clock : 0,
+ };
+ u64 ebccr;
+ int i;
+
+ if ((__raw_readq(&tx4938_ccfgptr->pcfg) &
+ (TX4938_PCFG_ATA_SEL | TX4938_PCFG_NDF_SEL))
+ != TX4938_PCFG_ATA_SEL)
+ return;
+ for (i = 0; i < 8; i++) {
+ /* check EBCCRn.ISA, EBCCRn.BSZ, EBCCRn.ME */
+ ebccr = __raw_readq(&tx4938_ebuscptr->cr[i]);
+ if ((ebccr & 0x00f00008) == 0x00e00008)
+ break;
+ }
+ if (i == 8)
+ return;
+ pdata.ebus_ch = i;
+ res[0].start = ((ebccr >> 48) << 20) + 0x10000;
+ res[0].end = res[0].start + 0x20000 - 1;
+ pdev = platform_device_alloc("tx4938ide", -1);
+ if (!pdev ||
+ platform_device_add_resources(pdev, res, ARRAY_SIZE(res)) ||
+ platform_device_add_data(pdev, &pdata, sizeof(pdata)) ||
+ platform_device_add(pdev))
+ platform_device_put(pdev);
+}
+
+void __init tx4938_ndfmc_init(unsigned int hold, unsigned int spw)
+{
+ struct txx9ndfmc_platform_data plat_data = {
+ .shift = 1,
+ .gbus_clock = txx9_gbus_clock,
+ .hold = hold,
+ .spw = spw,
+ .ch_mask = 1,
+ };
+ unsigned long baseaddr = TX4938_NDFMC_REG & 0xfffffffffULL;
+
+#ifdef __BIG_ENDIAN
+ baseaddr += 4;
+#endif
+ if ((__raw_readq(&tx4938_ccfgptr->pcfg) &
+ (TX4938_PCFG_ATA_SEL|TX4938_PCFG_ISA_SEL|TX4938_PCFG_NDF_SEL)) ==
+ TX4938_PCFG_NDF_SEL)
+ txx9_ndfmc_init(baseaddr, &plat_data);
+}
+
+void __init tx4938_dmac_init(int memcpy_chan0, int memcpy_chan1)
+{
+ struct txx9dmac_platform_data plat_data = {
+ .have_64bit_regs = true,
+ };
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ plat_data.memcpy_chan = i ? memcpy_chan1 : memcpy_chan0;
+ txx9_dmac_init(i, TX4938_DMA_REG(i) & 0xfffffffffULL,
+ TXX9_IRQ_BASE + TX4938_IR_DMA(i, 0),
+ &plat_data);
+ }
+}
+
+void __init tx4938_aclc_init(void)
+{
+ u64 pcfg = __raw_readq(&tx4938_ccfgptr->pcfg);
+
+ if ((pcfg & TX4938_PCFG_SEL2) &&
+ !(pcfg & TX4938_PCFG_ETH0_SEL))
+ txx9_aclc_init(TX4938_ACLC_REG & 0xfffffffffULL,
+ TXX9_IRQ_BASE + TX4938_IR_ACLC,
+ 1, 0, 1);
+}
+
+void __init tx4938_sramc_init(void)
+{
+ if (tx4938_sram_resource.start)
+ txx9_sramc_init(&tx4938_sram_resource);
+}
+
+static void __init tx4938_stop_unused_modules(void)
+{
+ __u64 pcfg, rst = 0, ckd = 0;
+ char buf[128];
+
+ buf[0] = '\0';
+ local_irq_disable();
+ pcfg = ____raw_readq(&tx4938_ccfgptr->pcfg);
+ switch (txx9_pcode) {
+ case 0x4937:
+ if (!(pcfg & TX4938_PCFG_SEL2)) {
+ rst |= TX4938_CLKCTR_ACLRST;
+ ckd |= TX4938_CLKCTR_ACLCKD;
+ strcat(buf, " ACLC");
+ }
+ break;
+ case 0x4938:
+ if (!(pcfg & TX4938_PCFG_SEL2) ||
+ (pcfg & TX4938_PCFG_ETH0_SEL)) {
+ rst |= TX4938_CLKCTR_ACLRST;
+ ckd |= TX4938_CLKCTR_ACLCKD;
+ strcat(buf, " ACLC");
+ }
+ if ((pcfg &
+ (TX4938_PCFG_ATA_SEL | TX4938_PCFG_ISA_SEL |
+ TX4938_PCFG_NDF_SEL))
+ != TX4938_PCFG_NDF_SEL) {
+ rst |= TX4938_CLKCTR_NDFRST;
+ ckd |= TX4938_CLKCTR_NDFCKD;
+ strcat(buf, " NDFMC");
+ }
+ if (!(pcfg & TX4938_PCFG_SPI_SEL)) {
+ rst |= TX4938_CLKCTR_SPIRST;
+ ckd |= TX4938_CLKCTR_SPICKD;
+ strcat(buf, " SPI");
+ }
+ break;
+ }
+ if (rst | ckd) {
+ txx9_set64(&tx4938_ccfgptr->clkctr, rst);
+ txx9_set64(&tx4938_ccfgptr->clkctr, ckd);
+ }
+ local_irq_enable();
+ if (buf[0])
+ pr_info("%s: stop%s\n", txx9_pcode_str, buf);
+}
+
+static int __init tx4938_late_init(void)
+{
+ if (txx9_pcode != 0x4937 && txx9_pcode != 0x4938)
+ return -ENODEV;
+ tx4938_stop_unused_modules();
+ return 0;
+}
+late_initcall(tx4938_late_init);
diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c
new file mode 100644
index 000000000..bf8a3cdab
--- /dev/null
+++ b/arch/mips/txx9/generic/setup_tx4939.c
@@ -0,0 +1,568 @@
+/*
+ * TX4939 setup routines
+ * Based on linux/arch/mips/txx9/generic/setup_tx4938.c,
+ * and RBTX49xx patch from CELF patch archive.
+ *
+ * 2003-2005 (c) MontaVista Software, Inc.
+ * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/param.h>
+#include <linux/ptrace.h>
+#include <linux/mtd/physmap.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/txx9/ndfmc.h>
+#include <asm/reboot.h>
+#include <asm/traps.h>
+#include <asm/txx9irq.h>
+#include <asm/txx9tmr.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/dmac.h>
+#include <asm/txx9/tx4939.h>
+
+static void __init tx4939_wdr_init(void)
+{
+ /* report watchdog reset status */
+ if (____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_WDRST)
+ pr_warn("Watchdog reset detected at 0x%lx\n",
+ read_c0_errorepc());
+ /* clear WatchDogReset (W1C) */
+ tx4939_ccfg_set(TX4939_CCFG_WDRST);
+ /* do reset on watchdog */
+ tx4939_ccfg_set(TX4939_CCFG_WR);
+}
+
+void __init tx4939_wdt_init(void)
+{
+ txx9_wdt_init(TX4939_TMR_REG(2) & 0xfffffffffULL);
+}
+
+static void tx4939_machine_restart(char *command)
+{
+ local_irq_disable();
+ pr_emerg("Rebooting (with %s watchdog reset)...\n",
+ (____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_WDREXEN) ?
+ "external" : "internal");
+ /* clear watchdog status */
+ tx4939_ccfg_set(TX4939_CCFG_WDRST); /* W1C */
+ txx9_wdt_now(TX4939_TMR_REG(2) & 0xfffffffffULL);
+ while (!(____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_WDRST))
+ ;
+ mdelay(10);
+ if (____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_WDREXEN) {
+ pr_emerg("Rebooting (with internal watchdog reset)...\n");
+ /* External WDRST failed. Do internal watchdog reset */
+ tx4939_ccfg_clear(TX4939_CCFG_WDREXEN);
+ }
+ /* fallback */
+ (*_machine_halt)();
+}
+
+void show_registers(struct pt_regs *regs);
+static int tx4939_be_handler(struct pt_regs *regs, int is_fixup)
+{
+ int data = regs->cp0_cause & 4;
+ console_verbose();
+ pr_err("%cBE exception at %#lx\n",
+ data ? 'D' : 'I', regs->cp0_epc);
+ pr_err("ccfg:%llx, toea:%llx\n",
+ (unsigned long long)____raw_readq(&tx4939_ccfgptr->ccfg),
+ (unsigned long long)____raw_readq(&tx4939_ccfgptr->toea));
+#ifdef CONFIG_PCI
+ tx4927_report_pcic_status();
+#endif
+ show_registers(regs);
+ panic("BusError!");
+}
+static void __init tx4939_be_init(void)
+{
+ board_be_handler = tx4939_be_handler;
+}
+
+static struct resource tx4939_sdram_resource[4];
+static struct resource tx4939_sram_resource;
+#define TX4939_SRAM_SIZE 0x800
+
+void __init tx4939_setup(void)
+{
+ int i;
+ __u32 divmode;
+ __u64 pcfg;
+ unsigned int cpuclk = 0;
+
+ txx9_reg_res_init(TX4939_REV_PCODE(), TX4939_REG_BASE,
+ TX4939_REG_SIZE);
+ set_c0_config(TX49_CONF_CWFON);
+
+ /* SDRAMC,EBUSC are configured by PROM */
+ for (i = 0; i < 4; i++) {
+ if (!(TX4939_EBUSC_CR(i) & 0x8))
+ continue; /* disabled */
+ txx9_ce_res[i].start = (unsigned long)TX4939_EBUSC_BA(i);
+ txx9_ce_res[i].end =
+ txx9_ce_res[i].start + TX4939_EBUSC_SIZE(i) - 1;
+ request_resource(&iomem_resource, &txx9_ce_res[i]);
+ }
+
+ /* clocks */
+ if (txx9_master_clock) {
+ /* calculate cpu_clock from master_clock */
+ divmode = (__u32)____raw_readq(&tx4939_ccfgptr->ccfg) &
+ TX4939_CCFG_MULCLK_MASK;
+ cpuclk = txx9_master_clock * 20 / 2;
+ switch (divmode) {
+ case TX4939_CCFG_MULCLK_8:
+ cpuclk = cpuclk / 3 * 4 /* / 6 * 8 */; break;
+ case TX4939_CCFG_MULCLK_9:
+ cpuclk = cpuclk / 2 * 3 /* / 6 * 9 */; break;
+ case TX4939_CCFG_MULCLK_10:
+ cpuclk = cpuclk / 3 * 5 /* / 6 * 10 */; break;
+ case TX4939_CCFG_MULCLK_11:
+ cpuclk = cpuclk / 6 * 11; break;
+ case TX4939_CCFG_MULCLK_12:
+ cpuclk = cpuclk * 2 /* / 6 * 12 */; break;
+ case TX4939_CCFG_MULCLK_13:
+ cpuclk = cpuclk / 6 * 13; break;
+ case TX4939_CCFG_MULCLK_14:
+ cpuclk = cpuclk / 3 * 7 /* / 6 * 14 */; break;
+ case TX4939_CCFG_MULCLK_15:
+ cpuclk = cpuclk / 2 * 5 /* / 6 * 15 */; break;
+ }
+ txx9_cpu_clock = cpuclk;
+ } else {
+ if (txx9_cpu_clock == 0)
+ txx9_cpu_clock = 400000000; /* 400MHz */
+ /* calculate master_clock from cpu_clock */
+ cpuclk = txx9_cpu_clock;
+ divmode = (__u32)____raw_readq(&tx4939_ccfgptr->ccfg) &
+ TX4939_CCFG_MULCLK_MASK;
+ switch (divmode) {
+ case TX4939_CCFG_MULCLK_8:
+ txx9_master_clock = cpuclk * 6 / 8; break;
+ case TX4939_CCFG_MULCLK_9:
+ txx9_master_clock = cpuclk * 6 / 9; break;
+ case TX4939_CCFG_MULCLK_10:
+ txx9_master_clock = cpuclk * 6 / 10; break;
+ case TX4939_CCFG_MULCLK_11:
+ txx9_master_clock = cpuclk * 6 / 11; break;
+ case TX4939_CCFG_MULCLK_12:
+ txx9_master_clock = cpuclk * 6 / 12; break;
+ case TX4939_CCFG_MULCLK_13:
+ txx9_master_clock = cpuclk * 6 / 13; break;
+ case TX4939_CCFG_MULCLK_14:
+ txx9_master_clock = cpuclk * 6 / 14; break;
+ case TX4939_CCFG_MULCLK_15:
+ txx9_master_clock = cpuclk * 6 / 15; break;
+ }
+ txx9_master_clock /= 10; /* * 2 / 20 */
+ }
+ /* calculate gbus_clock from cpu_clock */
+ divmode = (__u32)____raw_readq(&tx4939_ccfgptr->ccfg) &
+ TX4939_CCFG_YDIVMODE_MASK;
+ txx9_gbus_clock = txx9_cpu_clock;
+ switch (divmode) {
+ case TX4939_CCFG_YDIVMODE_2:
+ txx9_gbus_clock /= 2; break;
+ case TX4939_CCFG_YDIVMODE_3:
+ txx9_gbus_clock /= 3; break;
+ case TX4939_CCFG_YDIVMODE_5:
+ txx9_gbus_clock /= 5; break;
+ case TX4939_CCFG_YDIVMODE_6:
+ txx9_gbus_clock /= 6; break;
+ }
+ /* change default value to udelay/mdelay take reasonable time */
+ loops_per_jiffy = txx9_cpu_clock / HZ / 2;
+
+ /* CCFG */
+ tx4939_wdr_init();
+ /* clear BusErrorOnWrite flag (W1C) */
+ tx4939_ccfg_set(TX4939_CCFG_WDRST | TX4939_CCFG_BEOW);
+ /* enable Timeout BusError */
+ if (txx9_ccfg_toeon)
+ tx4939_ccfg_set(TX4939_CCFG_TOE);
+
+ /* DMA selection */
+ txx9_clear64(&tx4939_ccfgptr->pcfg, TX4939_PCFG_DMASEL_ALL);
+
+ /* Use external clock for external arbiter */
+ if (!(____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_PCIARB))
+ txx9_clear64(&tx4939_ccfgptr->pcfg, TX4939_PCFG_PCICLKEN_ALL);
+
+ pr_info("%s -- %dMHz(M%dMHz,G%dMHz) CRIR:%08x CCFG:%llx PCFG:%llx\n",
+ txx9_pcode_str,
+ (cpuclk + 500000) / 1000000,
+ (txx9_master_clock + 500000) / 1000000,
+ (txx9_gbus_clock + 500000) / 1000000,
+ (__u32)____raw_readq(&tx4939_ccfgptr->crir),
+ ____raw_readq(&tx4939_ccfgptr->ccfg),
+ ____raw_readq(&tx4939_ccfgptr->pcfg));
+
+ pr_info("%s DDRC -- EN:%08x", txx9_pcode_str,
+ (__u32)____raw_readq(&tx4939_ddrcptr->winen));
+ for (i = 0; i < 4; i++) {
+ __u64 win = ____raw_readq(&tx4939_ddrcptr->win[i]);
+ if (!((__u32)____raw_readq(&tx4939_ddrcptr->winen) & (1 << i)))
+ continue; /* disabled */
+ pr_cont(" #%d:%016llx", i, win);
+ tx4939_sdram_resource[i].name = "DDR SDRAM";
+ tx4939_sdram_resource[i].start =
+ (unsigned long)(win >> 48) << 20;
+ tx4939_sdram_resource[i].end =
+ ((((unsigned long)(win >> 32) & 0xffff) + 1) <<
+ 20) - 1;
+ tx4939_sdram_resource[i].flags = IORESOURCE_MEM;
+ request_resource(&iomem_resource, &tx4939_sdram_resource[i]);
+ }
+ pr_cont("\n");
+
+ /* SRAM */
+ if (____raw_readq(&tx4939_sramcptr->cr) & 1) {
+ unsigned int size = TX4939_SRAM_SIZE;
+ tx4939_sram_resource.name = "SRAM";
+ tx4939_sram_resource.start =
+ (____raw_readq(&tx4939_sramcptr->cr) >> (39-11))
+ & ~(size - 1);
+ tx4939_sram_resource.end =
+ tx4939_sram_resource.start + TX4939_SRAM_SIZE - 1;
+ tx4939_sram_resource.flags = IORESOURCE_MEM;
+ request_resource(&iomem_resource, &tx4939_sram_resource);
+ }
+
+ /* TMR */
+ /* disable all timers */
+ for (i = 0; i < TX4939_NR_TMR; i++)
+ txx9_tmr_init(TX4939_TMR_REG(i) & 0xfffffffffULL);
+
+ /* set PCIC1 reset (required to prevent hangup on BIST) */
+ txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_PCI1RST);
+ pcfg = ____raw_readq(&tx4939_ccfgptr->pcfg);
+ if (pcfg & (TX4939_PCFG_ET0MODE | TX4939_PCFG_ET1MODE)) {
+ mdelay(1); /* at least 128 cpu clock */
+ /* clear PCIC1 reset */
+ txx9_clear64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_PCI1RST);
+ } else {
+ pr_info("%s: stop PCIC1\n", txx9_pcode_str);
+ /* stop PCIC1 */
+ txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_PCI1CKD);
+ }
+ if (!(pcfg & TX4939_PCFG_ET0MODE)) {
+ pr_info("%s: stop ETH0\n", txx9_pcode_str);
+ txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_ETH0RST);
+ txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_ETH0CKD);
+ }
+ if (!(pcfg & TX4939_PCFG_ET1MODE)) {
+ pr_info("%s: stop ETH1\n", txx9_pcode_str);
+ txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_ETH1RST);
+ txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_ETH1CKD);
+ }
+
+ _machine_restart = tx4939_machine_restart;
+ board_be_init = tx4939_be_init;
+}
+
+void __init tx4939_time_init(unsigned int tmrnr)
+{
+ if (____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_TINTDIS)
+ txx9_clockevent_init(TX4939_TMR_REG(tmrnr) & 0xfffffffffULL,
+ TXX9_IRQ_BASE + TX4939_IR_TMR(tmrnr),
+ TXX9_IMCLK);
+}
+
+void __init tx4939_sio_init(unsigned int sclk, unsigned int cts_mask)
+{
+ int i;
+ unsigned int ch_mask = 0;
+ __u64 pcfg = __raw_readq(&tx4939_ccfgptr->pcfg);
+
+ cts_mask |= ~1; /* only SIO0 have RTS/CTS */
+ if ((pcfg & TX4939_PCFG_SIO2MODE_MASK) != TX4939_PCFG_SIO2MODE_SIO0)
+ cts_mask |= 1 << 0; /* disable SIO0 RTS/CTS by PCFG setting */
+ if ((pcfg & TX4939_PCFG_SIO2MODE_MASK) != TX4939_PCFG_SIO2MODE_SIO2)
+ ch_mask |= 1 << 2; /* disable SIO2 by PCFG setting */
+ if (pcfg & TX4939_PCFG_SIO3MODE)
+ ch_mask |= 1 << 3; /* disable SIO3 by PCFG setting */
+ for (i = 0; i < 4; i++) {
+ if ((1 << i) & ch_mask)
+ continue;
+ txx9_sio_init(TX4939_SIO_REG(i) & 0xfffffffffULL,
+ TXX9_IRQ_BASE + TX4939_IR_SIO(i),
+ i, sclk, (1 << i) & cts_mask);
+ }
+}
+
+#if IS_ENABLED(CONFIG_TC35815)
+static u32 tx4939_get_eth_speed(struct net_device *dev)
+{
+ struct ethtool_link_ksettings cmd;
+
+ if (__ethtool_get_link_ksettings(dev, &cmd))
+ return 100; /* default 100Mbps */
+
+ return cmd.base.speed;
+}
+
+static int tx4939_netdev_event(struct notifier_block *this,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (event == NETDEV_CHANGE && netif_carrier_ok(dev)) {
+ __u64 bit = 0;
+ if (dev->irq == TXX9_IRQ_BASE + TX4939_IR_ETH(0))
+ bit = TX4939_PCFG_SPEED0;
+ else if (dev->irq == TXX9_IRQ_BASE + TX4939_IR_ETH(1))
+ bit = TX4939_PCFG_SPEED1;
+ if (bit) {
+ if (tx4939_get_eth_speed(dev) == 100)
+ txx9_set64(&tx4939_ccfgptr->pcfg, bit);
+ else
+ txx9_clear64(&tx4939_ccfgptr->pcfg, bit);
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block tx4939_netdev_notifier = {
+ .notifier_call = tx4939_netdev_event,
+ .priority = 1,
+};
+
+void __init tx4939_ethaddr_init(unsigned char *addr0, unsigned char *addr1)
+{
+ u64 pcfg = __raw_readq(&tx4939_ccfgptr->pcfg);
+
+ if (addr0 && (pcfg & TX4939_PCFG_ET0MODE))
+ txx9_ethaddr_init(TXX9_IRQ_BASE + TX4939_IR_ETH(0), addr0);
+ if (addr1 && (pcfg & TX4939_PCFG_ET1MODE))
+ txx9_ethaddr_init(TXX9_IRQ_BASE + TX4939_IR_ETH(1), addr1);
+ register_netdevice_notifier(&tx4939_netdev_notifier);
+}
+#else
+void __init tx4939_ethaddr_init(unsigned char *addr0, unsigned char *addr1)
+{
+}
+#endif
+
+void __init tx4939_mtd_init(int ch)
+{
+ struct physmap_flash_data pdata = {
+ .width = TX4939_EBUSC_WIDTH(ch) / 8,
+ };
+ unsigned long start = txx9_ce_res[ch].start;
+ unsigned long size = txx9_ce_res[ch].end - start + 1;
+
+ if (!(TX4939_EBUSC_CR(ch) & 0x8))
+ return; /* disabled */
+ txx9_physmap_flash_init(ch, start, size, &pdata);
+}
+
+#define TX4939_ATA_REG_PHYS(ch) (TX4939_ATA_REG(ch) & 0xfffffffffULL)
+void __init tx4939_ata_init(void)
+{
+ static struct resource ata0_res[] = {
+ {
+ .start = TX4939_ATA_REG_PHYS(0),
+ .end = TX4939_ATA_REG_PHYS(0) + 0x1000 - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = TXX9_IRQ_BASE + TX4939_IR_ATA(0),
+ .flags = IORESOURCE_IRQ,
+ },
+ };
+ static struct resource ata1_res[] = {
+ {
+ .start = TX4939_ATA_REG_PHYS(1),
+ .end = TX4939_ATA_REG_PHYS(1) + 0x1000 - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = TXX9_IRQ_BASE + TX4939_IR_ATA(1),
+ .flags = IORESOURCE_IRQ,
+ },
+ };
+ static struct platform_device ata0_dev = {
+ .name = "tx4939ide",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(ata0_res),
+ .resource = ata0_res,
+ };
+ static struct platform_device ata1_dev = {
+ .name = "tx4939ide",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(ata1_res),
+ .resource = ata1_res,
+ };
+ __u64 pcfg = __raw_readq(&tx4939_ccfgptr->pcfg);
+
+ if (pcfg & TX4939_PCFG_ATA0MODE)
+ platform_device_register(&ata0_dev);
+ if ((pcfg & (TX4939_PCFG_ATA1MODE |
+ TX4939_PCFG_ET1MODE |
+ TX4939_PCFG_ET0MODE)) == TX4939_PCFG_ATA1MODE)
+ platform_device_register(&ata1_dev);
+}
+
+void __init tx4939_rtc_init(void)
+{
+ static struct resource res[] = {
+ {
+ .start = TX4939_RTC_REG & 0xfffffffffULL,
+ .end = (TX4939_RTC_REG & 0xfffffffffULL) + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = TXX9_IRQ_BASE + TX4939_IR_RTC,
+ .flags = IORESOURCE_IRQ,
+ },
+ };
+ static struct platform_device rtc_dev = {
+ .name = "tx4939rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(res),
+ .resource = res,
+ };
+
+ platform_device_register(&rtc_dev);
+}
+
+void __init tx4939_ndfmc_init(unsigned int hold, unsigned int spw,
+ unsigned char ch_mask, unsigned char wide_mask)
+{
+ struct txx9ndfmc_platform_data plat_data = {
+ .shift = 1,
+ .gbus_clock = txx9_gbus_clock,
+ .hold = hold,
+ .spw = spw,
+ .flags = NDFMC_PLAT_FLAG_NO_RSTR | NDFMC_PLAT_FLAG_HOLDADD |
+ NDFMC_PLAT_FLAG_DUMMYWRITE,
+ .ch_mask = ch_mask,
+ .wide_mask = wide_mask,
+ };
+ txx9_ndfmc_init(TX4939_NDFMC_REG & 0xfffffffffULL, &plat_data);
+}
+
+void __init tx4939_dmac_init(int memcpy_chan0, int memcpy_chan1)
+{
+ struct txx9dmac_platform_data plat_data = {
+ .have_64bit_regs = true,
+ };
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ plat_data.memcpy_chan = i ? memcpy_chan1 : memcpy_chan0;
+ txx9_dmac_init(i, TX4939_DMA_REG(i) & 0xfffffffffULL,
+ TXX9_IRQ_BASE + TX4939_IR_DMA(i, 0),
+ &plat_data);
+ }
+}
+
+void __init tx4939_aclc_init(void)
+{
+ u64 pcfg = __raw_readq(&tx4939_ccfgptr->pcfg);
+
+ if ((pcfg & TX4939_PCFG_I2SMODE_MASK) == TX4939_PCFG_I2SMODE_ACLC)
+ txx9_aclc_init(TX4939_ACLC_REG & 0xfffffffffULL,
+ TXX9_IRQ_BASE + TX4939_IR_ACLC, 1, 0, 1);
+}
+
+void __init tx4939_sramc_init(void)
+{
+ if (tx4939_sram_resource.start)
+ txx9_sramc_init(&tx4939_sram_resource);
+}
+
+void __init tx4939_rng_init(void)
+{
+ static struct resource res = {
+ .start = TX4939_RNG_REG & 0xfffffffffULL,
+ .end = (TX4939_RNG_REG & 0xfffffffffULL) + 0x30 - 1,
+ .flags = IORESOURCE_MEM,
+ };
+ static struct platform_device pdev = {
+ .name = "tx4939-rng",
+ .id = -1,
+ .num_resources = 1,
+ .resource = &res,
+ };
+
+ platform_device_register(&pdev);
+}
+
+static void __init tx4939_stop_unused_modules(void)
+{
+ __u64 pcfg, rst = 0, ckd = 0;
+ char buf[128];
+
+ buf[0] = '\0';
+ local_irq_disable();
+ pcfg = ____raw_readq(&tx4939_ccfgptr->pcfg);
+ if ((pcfg & TX4939_PCFG_I2SMODE_MASK) !=
+ TX4939_PCFG_I2SMODE_ACLC) {
+ rst |= TX4939_CLKCTR_ACLRST;
+ ckd |= TX4939_CLKCTR_ACLCKD;
+ strcat(buf, " ACLC");
+ }
+ if ((pcfg & TX4939_PCFG_I2SMODE_MASK) !=
+ TX4939_PCFG_I2SMODE_I2S &&
+ (pcfg & TX4939_PCFG_I2SMODE_MASK) !=
+ TX4939_PCFG_I2SMODE_I2S_ALT) {
+ rst |= TX4939_CLKCTR_I2SRST;
+ ckd |= TX4939_CLKCTR_I2SCKD;
+ strcat(buf, " I2S");
+ }
+ if (!(pcfg & TX4939_PCFG_ATA0MODE)) {
+ rst |= TX4939_CLKCTR_ATA0RST;
+ ckd |= TX4939_CLKCTR_ATA0CKD;
+ strcat(buf, " ATA0");
+ }
+ if (!(pcfg & TX4939_PCFG_ATA1MODE)) {
+ rst |= TX4939_CLKCTR_ATA1RST;
+ ckd |= TX4939_CLKCTR_ATA1CKD;
+ strcat(buf, " ATA1");
+ }
+ if (pcfg & TX4939_PCFG_SPIMODE) {
+ rst |= TX4939_CLKCTR_SPIRST;
+ ckd |= TX4939_CLKCTR_SPICKD;
+ strcat(buf, " SPI");
+ }
+ if (!(pcfg & (TX4939_PCFG_VSSMODE | TX4939_PCFG_VPSMODE))) {
+ rst |= TX4939_CLKCTR_VPCRST;
+ ckd |= TX4939_CLKCTR_VPCCKD;
+ strcat(buf, " VPC");
+ }
+ if ((pcfg & TX4939_PCFG_SIO2MODE_MASK) != TX4939_PCFG_SIO2MODE_SIO2) {
+ rst |= TX4939_CLKCTR_SIO2RST;
+ ckd |= TX4939_CLKCTR_SIO2CKD;
+ strcat(buf, " SIO2");
+ }
+ if (pcfg & TX4939_PCFG_SIO3MODE) {
+ rst |= TX4939_CLKCTR_SIO3RST;
+ ckd |= TX4939_CLKCTR_SIO3CKD;
+ strcat(buf, " SIO3");
+ }
+ if (rst | ckd) {
+ txx9_set64(&tx4939_ccfgptr->clkctr, rst);
+ txx9_set64(&tx4939_ccfgptr->clkctr, ckd);
+ }
+ local_irq_enable();
+ if (buf[0])
+ pr_info("%s: stop%s\n", txx9_pcode_str, buf);
+}
+
+static int __init tx4939_late_init(void)
+{
+ if (txx9_pcode != 0x4939)
+ return -ENODEV;
+ tx4939_stop_unused_modules();
+ return 0;
+}
+late_initcall(tx4939_late_init);
diff --git a/arch/mips/txx9/generic/smsc_fdc37m81x.c b/arch/mips/txx9/generic/smsc_fdc37m81x.c
new file mode 100644
index 000000000..40f4098d3
--- /dev/null
+++ b/arch/mips/txx9/generic/smsc_fdc37m81x.c
@@ -0,0 +1,169 @@
+/*
+ * Interface for smsc fdc48m81x Super IO chip
+ *
+ * Author: MontaVista Software, Inc. source@mvista.com
+ *
+ * 2001-2003 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Copyright 2004 (c) MontaVista Software, Inc.
+ */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/txx9/smsc_fdc37m81x.h>
+
+/* Common Registers */
+#define SMSC_FDC37M81X_CONFIG_INDEX 0x00
+#define SMSC_FDC37M81X_CONFIG_DATA 0x01
+#define SMSC_FDC37M81X_CONF 0x02
+#define SMSC_FDC37M81X_INDEX 0x03
+#define SMSC_FDC37M81X_DNUM 0x07
+#define SMSC_FDC37M81X_DID 0x20
+#define SMSC_FDC37M81X_DREV 0x21
+#define SMSC_FDC37M81X_PCNT 0x22
+#define SMSC_FDC37M81X_PMGT 0x23
+#define SMSC_FDC37M81X_OSC 0x24
+#define SMSC_FDC37M81X_CONFPA0 0x26
+#define SMSC_FDC37M81X_CONFPA1 0x27
+#define SMSC_FDC37M81X_TEST4 0x2B
+#define SMSC_FDC37M81X_TEST5 0x2C
+#define SMSC_FDC37M81X_TEST1 0x2D
+#define SMSC_FDC37M81X_TEST2 0x2E
+#define SMSC_FDC37M81X_TEST3 0x2F
+
+/* Logical device numbers */
+#define SMSC_FDC37M81X_FDD 0x00
+#define SMSC_FDC37M81X_SERIAL1 0x04
+#define SMSC_FDC37M81X_SERIAL2 0x05
+#define SMSC_FDC37M81X_KBD 0x07
+
+/* Logical device Config Registers */
+#define SMSC_FDC37M81X_ACTIVE 0x30
+#define SMSC_FDC37M81X_BASEADDR0 0x60
+#define SMSC_FDC37M81X_BASEADDR1 0x61
+#define SMSC_FDC37M81X_INT 0x70
+#define SMSC_FDC37M81X_INT2 0x72
+#define SMSC_FDC37M81X_MODE 0xF0
+
+/* Chip Config Values */
+#define SMSC_FDC37M81X_CONFIG_ENTER 0x55
+#define SMSC_FDC37M81X_CONFIG_EXIT 0xaa
+#define SMSC_FDC37M81X_CHIP_ID 0x4d
+
+static unsigned long g_smsc_fdc37m81x_base;
+
+static inline unsigned char smsc_fdc37m81x_rd(unsigned char index)
+{
+ outb(index, g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_INDEX);
+
+ return inb(g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_DATA);
+}
+
+static inline void smsc_dc37m81x_wr(unsigned char index, unsigned char data)
+{
+ outb(index, g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_INDEX);
+ outb(data, g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_DATA);
+}
+
+void smsc_fdc37m81x_config_beg(void)
+{
+ if (g_smsc_fdc37m81x_base) {
+ outb(SMSC_FDC37M81X_CONFIG_ENTER,
+ g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_INDEX);
+ }
+}
+
+void smsc_fdc37m81x_config_end(void)
+{
+ if (g_smsc_fdc37m81x_base)
+ outb(SMSC_FDC37M81X_CONFIG_EXIT,
+ g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_INDEX);
+}
+
+u8 smsc_fdc37m81x_config_get(u8 reg)
+{
+ u8 val = 0;
+
+ if (g_smsc_fdc37m81x_base)
+ val = smsc_fdc37m81x_rd(reg);
+
+ return val;
+}
+
+void smsc_fdc37m81x_config_set(u8 reg, u8 val)
+{
+ if (g_smsc_fdc37m81x_base)
+ smsc_dc37m81x_wr(reg, val);
+}
+
+unsigned long __init smsc_fdc37m81x_init(unsigned long port)
+{
+ const int field = sizeof(unsigned long) * 2;
+ u8 chip_id;
+
+ if (g_smsc_fdc37m81x_base)
+ pr_warn("%s: stepping on old base=0x%0*lx\n", __func__, field,
+ g_smsc_fdc37m81x_base);
+
+ g_smsc_fdc37m81x_base = port;
+
+ smsc_fdc37m81x_config_beg();
+
+ chip_id = smsc_fdc37m81x_rd(SMSC_FDC37M81X_DID);
+ if (chip_id == SMSC_FDC37M81X_CHIP_ID)
+ smsc_fdc37m81x_config_end();
+ else {
+ pr_warn("%s: unknown chip id 0x%02x\n", __func__, chip_id);
+ g_smsc_fdc37m81x_base = 0;
+ }
+
+ return g_smsc_fdc37m81x_base;
+}
+
+#ifdef DEBUG
+static void smsc_fdc37m81x_config_dump_one(const char *key, u8 dev, u8 reg)
+{
+ pr_info("%s: dev=0x%02x reg=0x%02x val=0x%02x\n", key, dev, reg,
+ smsc_fdc37m81x_rd(reg));
+}
+
+void smsc_fdc37m81x_config_dump(void)
+{
+ u8 orig;
+ const char *fname = __func__;
+
+ smsc_fdc37m81x_config_beg();
+
+ orig = smsc_fdc37m81x_rd(SMSC_FDC37M81X_DNUM);
+
+ pr_info("%s: common\n", fname);
+ smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE,
+ SMSC_FDC37M81X_DNUM);
+ smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE,
+ SMSC_FDC37M81X_DID);
+ smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE,
+ SMSC_FDC37M81X_DREV);
+ smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE,
+ SMSC_FDC37M81X_PCNT);
+ smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE,
+ SMSC_FDC37M81X_PMGT);
+
+ pr_info("%s: keyboard\n", fname);
+ smsc_dc37m81x_wr(SMSC_FDC37M81X_DNUM, SMSC_FDC37M81X_KBD);
+ smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_KBD,
+ SMSC_FDC37M81X_ACTIVE);
+ smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_KBD,
+ SMSC_FDC37M81X_INT);
+ smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_KBD,
+ SMSC_FDC37M81X_INT2);
+ smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_KBD,
+ SMSC_FDC37M81X_LDCR_F0);
+
+ smsc_dc37m81x_wr(SMSC_FDC37M81X_DNUM, orig);
+
+ smsc_fdc37m81x_config_end();
+}
+#endif
diff --git a/arch/mips/txx9/generic/spi_eeprom.c b/arch/mips/txx9/generic/spi_eeprom.c
new file mode 100644
index 000000000..d833dd2c9
--- /dev/null
+++ b/arch/mips/txx9/generic/spi_eeprom.c
@@ -0,0 +1,104 @@
+/*
+ * spi_eeprom.c
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
+ */
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/eeprom.h>
+#include <asm/txx9/spi.h>
+
+#define AT250X0_PAGE_SIZE 8
+
+/* register board information for at25 driver */
+int __init spi_eeprom_register(int busid, int chipid, int size)
+{
+ struct spi_board_info info = {
+ .modalias = "at25",
+ .max_speed_hz = 1500000, /* 1.5Mbps */
+ .bus_num = busid,
+ .chip_select = chipid,
+ /* Mode 0: High-Active, Sample-Then-Shift */
+ };
+ struct spi_eeprom *eeprom;
+ eeprom = kzalloc(sizeof(*eeprom), GFP_KERNEL);
+ if (!eeprom)
+ return -ENOMEM;
+ strcpy(eeprom->name, "at250x0");
+ eeprom->byte_len = size;
+ eeprom->page_size = AT250X0_PAGE_SIZE;
+ eeprom->flags = EE_ADDR1;
+ info.platform_data = eeprom;
+ return spi_register_board_info(&info, 1);
+}
+
+/* simple temporary spi driver to provide early access to seeprom. */
+
+static struct read_param {
+ int busid;
+ int chipid;
+ int address;
+ unsigned char *buf;
+ int len;
+} *read_param;
+
+static int __init early_seeprom_probe(struct spi_device *spi)
+{
+ int stat = 0;
+ u8 cmd[2];
+ int len = read_param->len;
+ char *buf = read_param->buf;
+ int address = read_param->address;
+
+ dev_info(&spi->dev, "spiclk %u KHz.\n",
+ (spi->max_speed_hz + 500) / 1000);
+ if (read_param->busid != spi->master->bus_num ||
+ read_param->chipid != spi->chip_select)
+ return -ENODEV;
+ while (len > 0) {
+ /* spi_write_then_read can only work with small chunk */
+ int c = len < AT250X0_PAGE_SIZE ? len : AT250X0_PAGE_SIZE;
+ cmd[0] = 0x03; /* AT25_READ */
+ cmd[1] = address;
+ stat = spi_write_then_read(spi, cmd, sizeof(cmd), buf, c);
+ buf += c;
+ len -= c;
+ address += c;
+ }
+ return stat;
+}
+
+static struct spi_driver early_seeprom_driver __initdata = {
+ .driver = {
+ .name = "at25",
+ },
+ .probe = early_seeprom_probe,
+};
+
+int __init spi_eeprom_read(int busid, int chipid, int address,
+ unsigned char *buf, int len)
+{
+ int ret;
+ struct read_param param = {
+ .busid = busid,
+ .chipid = chipid,
+ .address = address,
+ .buf = buf,
+ .len = len
+ };
+
+ read_param = &param;
+ ret = spi_register_driver(&early_seeprom_driver);
+ if (!ret)
+ spi_unregister_driver(&early_seeprom_driver);
+ return ret;
+}
diff --git a/arch/mips/txx9/jmr3927/Makefile b/arch/mips/txx9/jmr3927/Makefile
new file mode 100644
index 000000000..4bda0615d
--- /dev/null
+++ b/arch/mips/txx9/jmr3927/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for TOSHIBA JMR-TX3927 board
+#
+
+obj-y += prom.o irq.o setup.o
diff --git a/arch/mips/txx9/jmr3927/irq.c b/arch/mips/txx9/jmr3927/irq.c
new file mode 100644
index 000000000..c22c859a2
--- /dev/null
+++ b/arch/mips/txx9/jmr3927/irq.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ahennessy@mvista.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include <asm/io.h>
+#include <asm/mipsregs.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/jmr3927.h>
+
+#if JMR3927_IRQ_END > NR_IRQS
+#error JMR3927_IRQ_END > NR_IRQS
+#endif
+
+/*
+ * CP0_STATUS is a thread's resource (saved/restored on context switch).
+ * So disable_irq/enable_irq MUST handle IOC/IRC registers.
+ */
+static void mask_irq_ioc(struct irq_data *d)
+{
+ /* 0: mask */
+ unsigned int irq_nr = d->irq - JMR3927_IRQ_IOC;
+ unsigned char imask = jmr3927_ioc_reg_in(JMR3927_IOC_INTM_ADDR);
+ unsigned int bit = 1 << irq_nr;
+ jmr3927_ioc_reg_out(imask & ~bit, JMR3927_IOC_INTM_ADDR);
+ /* flush write buffer */
+ (void)jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR);
+}
+static void unmask_irq_ioc(struct irq_data *d)
+{
+ /* 0: mask */
+ unsigned int irq_nr = d->irq - JMR3927_IRQ_IOC;
+ unsigned char imask = jmr3927_ioc_reg_in(JMR3927_IOC_INTM_ADDR);
+ unsigned int bit = 1 << irq_nr;
+ jmr3927_ioc_reg_out(imask | bit, JMR3927_IOC_INTM_ADDR);
+ /* flush write buffer */
+ (void)jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR);
+}
+
+static int jmr3927_ioc_irqroute(void)
+{
+ unsigned char istat = jmr3927_ioc_reg_in(JMR3927_IOC_INTS2_ADDR);
+ int i;
+
+ for (i = 0; i < JMR3927_NR_IRQ_IOC; i++) {
+ if (istat & (1 << i))
+ return JMR3927_IRQ_IOC + i;
+ }
+ return -1;
+}
+
+static int jmr3927_irq_dispatch(int pending)
+{
+ int irq;
+
+ if ((pending & CAUSEF_IP7) == 0)
+ return -1;
+ irq = (pending >> CAUSEB_IP2) & 0x0f;
+ irq += JMR3927_IRQ_IRC;
+ if (irq == JMR3927_IRQ_IOCINT)
+ irq = jmr3927_ioc_irqroute();
+ return irq;
+}
+
+static struct irq_chip jmr3927_irq_ioc = {
+ .name = "jmr3927_ioc",
+ .irq_mask = mask_irq_ioc,
+ .irq_unmask = unmask_irq_ioc,
+};
+
+void __init jmr3927_irq_setup(void)
+{
+ int i;
+
+ txx9_irq_dispatch = jmr3927_irq_dispatch;
+ /* Now, interrupt control disabled, */
+ /* all IRC interrupts are masked, */
+ /* all IRC interrupt mode are Low Active. */
+
+ /* mask all IOC interrupts */
+ jmr3927_ioc_reg_out(0, JMR3927_IOC_INTM_ADDR);
+ /* setup IOC interrupt mode (SOFT:High Active, Others:Low Active) */
+ jmr3927_ioc_reg_out(JMR3927_IOC_INTF_SOFT, JMR3927_IOC_INTP_ADDR);
+
+ /* clear PCI Soft interrupts */
+ jmr3927_ioc_reg_out(0, JMR3927_IOC_INTS1_ADDR);
+ /* clear PCI Reset interrupts */
+ jmr3927_ioc_reg_out(0, JMR3927_IOC_RESET_ADDR);
+
+ tx3927_irq_init();
+ for (i = JMR3927_IRQ_IOC; i < JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC; i++)
+ irq_set_chip_and_handler(i, &jmr3927_irq_ioc,
+ handle_level_irq);
+
+ /* setup IOC interrupt 1 (PCI, MODEM) */
+ irq_set_chained_handler(JMR3927_IRQ_IOCINT, handle_simple_irq);
+}
diff --git a/arch/mips/txx9/jmr3927/prom.c b/arch/mips/txx9/jmr3927/prom.c
new file mode 100644
index 000000000..53c68de54
--- /dev/null
+++ b/arch/mips/txx9/jmr3927/prom.c
@@ -0,0 +1,52 @@
+/*
+ * BRIEF MODULE DESCRIPTION
+ * PROM library initialisation code, assuming a version of
+ * pmon is the boot code.
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ahennessy@mvista.com
+ *
+ * Based on arch/mips/au1000/common/prom.c
+ *
+ * This file was derived from Carsten Langgaard's
+ * arch/mips/mips-boards/xx files.
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/jmr3927.h>
+
+void __init jmr3927_prom_init(void)
+{
+ /* CCFG */
+ if ((tx3927_ccfgptr->ccfg & TX3927_CCFG_TLBOFF) == 0)
+ pr_err("TX3927 TLB off\n");
+
+ memblock_add(0, JMR3927_SDRAM_SIZE);
+ txx9_sio_putchar_init(TX3927_SIO_REG(1));
+}
diff --git a/arch/mips/txx9/jmr3927/setup.c b/arch/mips/txx9/jmr3927/setup.c
new file mode 100644
index 000000000..613943886
--- /dev/null
+++ b/arch/mips/txx9/jmr3927/setup.c
@@ -0,0 +1,223 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ahennessy@mvista.com
+ *
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <asm/reboot.h>
+#include <asm/txx9pio.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/pci.h>
+#include <asm/txx9/jmr3927.h>
+#include <asm/mipsregs.h>
+
+static void jmr3927_machine_restart(char *command)
+{
+ local_irq_disable();
+#if 1 /* Resetting PCI bus */
+ jmr3927_ioc_reg_out(0, JMR3927_IOC_RESET_ADDR);
+ jmr3927_ioc_reg_out(JMR3927_IOC_RESET_PCI, JMR3927_IOC_RESET_ADDR);
+ (void)jmr3927_ioc_reg_in(JMR3927_IOC_RESET_ADDR); /* flush WB */
+ mdelay(1);
+ jmr3927_ioc_reg_out(0, JMR3927_IOC_RESET_ADDR);
+#endif
+ jmr3927_ioc_reg_out(JMR3927_IOC_RESET_CPU, JMR3927_IOC_RESET_ADDR);
+ /* fallback */
+ (*_machine_halt)();
+}
+
+static void __init jmr3927_time_init(void)
+{
+ tx3927_time_init(0, 1);
+}
+
+#define DO_WRITE_THROUGH
+
+static void jmr3927_board_init(void);
+
+static void __init jmr3927_mem_setup(void)
+{
+ set_io_port_base(JMR3927_PORT_BASE + JMR3927_PCIIO);
+
+ _machine_restart = jmr3927_machine_restart;
+
+ /* cache setup */
+ {
+ unsigned int conf;
+#ifdef DO_WRITE_THROUGH
+ int mips_config_cwfon = 0;
+ int mips_config_wbon = 0;
+#else
+ int mips_config_cwfon = 1;
+ int mips_config_wbon = 1;
+#endif
+
+ conf = read_c0_conf();
+ conf &= ~(TX39_CONF_WBON | TX39_CONF_CWFON);
+ conf |= mips_config_wbon ? TX39_CONF_WBON : 0;
+ conf |= mips_config_cwfon ? TX39_CONF_CWFON : 0;
+
+ write_c0_conf(conf);
+ write_c0_cache(0);
+ }
+
+ /* initialize board */
+ jmr3927_board_init();
+
+ tx3927_sio_init(0, 1 << 1); /* ch1: noCTS */
+}
+
+static void __init jmr3927_pci_setup(void)
+{
+#ifdef CONFIG_PCI
+ int extarb = !(tx3927_ccfgptr->ccfg & TX3927_CCFG_PCIXARB);
+ struct pci_controller *c;
+
+ c = txx9_alloc_pci_controller(&txx9_primary_pcic,
+ JMR3927_PCIMEM, JMR3927_PCIMEM_SIZE,
+ JMR3927_PCIIO, JMR3927_PCIIO_SIZE);
+ register_pci_controller(c);
+ if (!extarb) {
+ /* Reset PCI Bus */
+ jmr3927_ioc_reg_out(0, JMR3927_IOC_RESET_ADDR);
+ udelay(100);
+ jmr3927_ioc_reg_out(JMR3927_IOC_RESET_PCI,
+ JMR3927_IOC_RESET_ADDR);
+ udelay(100);
+ jmr3927_ioc_reg_out(0, JMR3927_IOC_RESET_ADDR);
+ }
+ tx3927_pcic_setup(c, JMR3927_SDRAM_SIZE, extarb);
+ tx3927_setup_pcierr_irq();
+#endif /* CONFIG_PCI */
+}
+
+static void __init jmr3927_board_init(void)
+{
+ txx9_cpu_clock = JMR3927_CORECLK;
+ /* SDRAMC are configured by PROM */
+
+ /* ROMC */
+ tx3927_romcptr->cr[1] = JMR3927_ROMCE1 | 0x00030048;
+ tx3927_romcptr->cr[2] = JMR3927_ROMCE2 | 0x000064c8;
+ tx3927_romcptr->cr[3] = JMR3927_ROMCE3 | 0x0003f698;
+ tx3927_romcptr->cr[5] = JMR3927_ROMCE5 | 0x0000f218;
+
+ /* Pin selection */
+ tx3927_ccfgptr->pcfg &= ~TX3927_PCFG_SELALL;
+ tx3927_ccfgptr->pcfg |=
+ TX3927_PCFG_SELSIOC(0) | TX3927_PCFG_SELSIO_ALL |
+ (TX3927_PCFG_SELDMA_ALL & ~TX3927_PCFG_SELDMA(1));
+
+ tx3927_setup();
+
+ /* PIO[15:12] connected to LEDs */
+ __raw_writel(0x0000f000, &tx3927_pioptr->dir);
+
+ jmr3927_pci_setup();
+
+ /* SIO0 DTR on */
+ jmr3927_ioc_reg_out(0, JMR3927_IOC_DTR_ADDR);
+
+ jmr3927_led_set(0);
+
+ pr_info("JMR-TX3927 (Rev %d) --- IOC(Rev %d) DIPSW:%d,%d,%d,%d\n",
+ jmr3927_ioc_reg_in(JMR3927_IOC_BREV_ADDR) & JMR3927_REV_MASK,
+ jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR) & JMR3927_REV_MASK,
+ jmr3927_dipsw1(), jmr3927_dipsw2(),
+ jmr3927_dipsw3(), jmr3927_dipsw4());
+}
+
+/* This trick makes rtc-ds1742 driver usable as is. */
+static unsigned long jmr3927_swizzle_addr_b(unsigned long port)
+{
+ if ((port & 0xffff0000) != JMR3927_IOC_NVRAMB_ADDR)
+ return port;
+ port = (port & 0xffff0000) | (port & 0x7fff << 1);
+#ifdef __BIG_ENDIAN
+ return port;
+#else
+ return port | 1;
+#endif
+}
+
+static void __init jmr3927_rtc_init(void)
+{
+ static struct resource __initdata res = {
+ .start = JMR3927_IOC_NVRAMB_ADDR - IO_BASE,
+ .end = JMR3927_IOC_NVRAMB_ADDR - IO_BASE + 0x800 - 1,
+ .flags = IORESOURCE_MEM,
+ };
+ platform_device_register_simple("rtc-ds1742", -1, &res, 1);
+}
+
+static void __init jmr3927_mtd_init(void)
+{
+ int i;
+
+ for (i = 0; i < 2; i++)
+ tx3927_mtd_init(i);
+}
+
+static void __init jmr3927_device_init(void)
+{
+ unsigned long iocled_base = JMR3927_IOC_LED_ADDR - IO_BASE;
+#ifdef __LITTLE_ENDIAN
+ iocled_base |= 1;
+#endif
+ __swizzle_addr_b = jmr3927_swizzle_addr_b;
+ jmr3927_rtc_init();
+ tx3927_wdt_init();
+ jmr3927_mtd_init();
+ txx9_iocled_init(iocled_base, -1, 8, 1, "green", NULL);
+}
+
+static void __init jmr3927_arch_init(void)
+{
+ txx9_gpio_init(TX3927_PIO_REG, 0, 16);
+
+ gpio_request(11, "dipsw1");
+ gpio_request(10, "dipsw2");
+}
+
+struct txx9_board_vec jmr3927_vec __initdata = {
+ .system = "Toshiba JMR_TX3927",
+ .prom_init = jmr3927_prom_init,
+ .mem_setup = jmr3927_mem_setup,
+ .irq_setup = jmr3927_irq_setup,
+ .time_init = jmr3927_time_init,
+ .device_init = jmr3927_device_init,
+ .arch_init = jmr3927_arch_init,
+#ifdef CONFIG_PCI
+ .pci_map_irq = jmr3927_pci_map_irq,
+#endif
+};
diff --git a/arch/mips/txx9/rbtx4927/Makefile b/arch/mips/txx9/rbtx4927/Makefile
new file mode 100644
index 000000000..08a02aebd
--- /dev/null
+++ b/arch/mips/txx9/rbtx4927/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += prom.o setup.o irq.o
diff --git a/arch/mips/txx9/rbtx4927/irq.c b/arch/mips/txx9/rbtx4927/irq.c
new file mode 100644
index 000000000..3f48292c9
--- /dev/null
+++ b/arch/mips/txx9/rbtx4927/irq.c
@@ -0,0 +1,198 @@
+/*
+ * Toshiba RBTX4927 specific interrupt handlers
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * Copyright 2001-2002 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+/*
+ * I8259A_IRQ_BASE+00
+ * I8259A_IRQ_BASE+01 PS2/Keyboard
+ * I8259A_IRQ_BASE+02 Cascade RBTX4927-ISA (irqs 8-15)
+ * I8259A_IRQ_BASE+03
+ * I8259A_IRQ_BASE+04
+ * I8259A_IRQ_BASE+05
+ * I8259A_IRQ_BASE+06
+ * I8259A_IRQ_BASE+07
+ * I8259A_IRQ_BASE+08
+ * I8259A_IRQ_BASE+09
+ * I8259A_IRQ_BASE+10
+ * I8259A_IRQ_BASE+11
+ * I8259A_IRQ_BASE+12 PS2/Mouse (not supported at this time)
+ * I8259A_IRQ_BASE+13
+ * I8259A_IRQ_BASE+14 IDE
+ * I8259A_IRQ_BASE+15
+ *
+ * MIPS_CPU_IRQ_BASE+00 Software 0
+ * MIPS_CPU_IRQ_BASE+01 Software 1
+ * MIPS_CPU_IRQ_BASE+02 Cascade TX4927-CP0
+ * MIPS_CPU_IRQ_BASE+03 Multiplexed -- do not use
+ * MIPS_CPU_IRQ_BASE+04 Multiplexed -- do not use
+ * MIPS_CPU_IRQ_BASE+05 Multiplexed -- do not use
+ * MIPS_CPU_IRQ_BASE+06 Multiplexed -- do not use
+ * MIPS_CPU_IRQ_BASE+07 CPU TIMER
+ *
+ * TXX9_IRQ_BASE+00
+ * TXX9_IRQ_BASE+01
+ * TXX9_IRQ_BASE+02
+ * TXX9_IRQ_BASE+03 Cascade RBTX4927-IOC
+ * TXX9_IRQ_BASE+04
+ * TXX9_IRQ_BASE+05 RBTX4927 RTL-8019AS ethernet
+ * TXX9_IRQ_BASE+06
+ * TXX9_IRQ_BASE+07
+ * TXX9_IRQ_BASE+08 TX4927 SerialIO Channel 0
+ * TXX9_IRQ_BASE+09 TX4927 SerialIO Channel 1
+ * TXX9_IRQ_BASE+10
+ * TXX9_IRQ_BASE+11
+ * TXX9_IRQ_BASE+12
+ * TXX9_IRQ_BASE+13
+ * TXX9_IRQ_BASE+14
+ * TXX9_IRQ_BASE+15
+ * TXX9_IRQ_BASE+16 TX4927 PCI PCI-C
+ * TXX9_IRQ_BASE+17
+ * TXX9_IRQ_BASE+18
+ * TXX9_IRQ_BASE+19
+ * TXX9_IRQ_BASE+20
+ * TXX9_IRQ_BASE+21
+ * TXX9_IRQ_BASE+22 TX4927 PCI PCI-ERR
+ * TXX9_IRQ_BASE+23 TX4927 PCI PCI-PMA (not used)
+ * TXX9_IRQ_BASE+24
+ * TXX9_IRQ_BASE+25
+ * TXX9_IRQ_BASE+26
+ * TXX9_IRQ_BASE+27
+ * TXX9_IRQ_BASE+28
+ * TXX9_IRQ_BASE+29
+ * TXX9_IRQ_BASE+30
+ * TXX9_IRQ_BASE+31
+ *
+ * RBTX4927_IRQ_IOC+00 FPCIB0 PCI-D (SouthBridge)
+ * RBTX4927_IRQ_IOC+01 FPCIB0 PCI-C (SouthBridge)
+ * RBTX4927_IRQ_IOC+02 FPCIB0 PCI-B (SouthBridge/IDE/pin=1,INTR)
+ * RBTX4927_IRQ_IOC+03 FPCIB0 PCI-A (SouthBridge/USB/pin=4)
+ * RBTX4927_IRQ_IOC+04
+ * RBTX4927_IRQ_IOC+05
+ * RBTX4927_IRQ_IOC+06
+ * RBTX4927_IRQ_IOC+07
+ *
+ * NOTES:
+ * SouthBridge/INTR is mapped to SouthBridge/A=PCI-B/#58
+ * SouthBridge/ISA/pin=0 no pci irq used by this device
+ * SouthBridge/IDE/pin=1 no pci irq used by this device, using INTR
+ * via ISA IRQ14
+ * SouthBridge/USB/pin=4 using pci irq SouthBridge/D=PCI-A=#59
+ * SouthBridge/PMC/pin=0 no pci irq used by this device
+ * SuperIO/PS2/Keyboard, using INTR via ISA IRQ1
+ * SuperIO/PS2/Mouse, using INTR via ISA IRQ12 (mouse not currently supported)
+ * JP7 is not bus master -- do NOT use -- only 4 pci bus master's
+ * allowed -- SouthBridge, JP4, JP5, JP6
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/io.h>
+#include <asm/mipsregs.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/rbtx4927.h>
+
+static int toshiba_rbtx4927_irq_nested(int sw_irq)
+{
+ u8 level3;
+
+ level3 = readb(rbtx4927_imstat_addr) & 0x1f;
+ if (unlikely(!level3))
+ return -1;
+ return RBTX4927_IRQ_IOC + __fls8(level3);
+}
+
+static void toshiba_rbtx4927_irq_ioc_enable(struct irq_data *d)
+{
+ unsigned char v;
+
+ v = readb(rbtx4927_imask_addr);
+ v |= (1 << (d->irq - RBTX4927_IRQ_IOC));
+ writeb(v, rbtx4927_imask_addr);
+}
+
+static void toshiba_rbtx4927_irq_ioc_disable(struct irq_data *d)
+{
+ unsigned char v;
+
+ v = readb(rbtx4927_imask_addr);
+ v &= ~(1 << (d->irq - RBTX4927_IRQ_IOC));
+ writeb(v, rbtx4927_imask_addr);
+ mmiowb();
+}
+
+#define TOSHIBA_RBTX4927_IOC_NAME "RBTX4927-IOC"
+static struct irq_chip toshiba_rbtx4927_irq_ioc_type = {
+ .name = TOSHIBA_RBTX4927_IOC_NAME,
+ .irq_mask = toshiba_rbtx4927_irq_ioc_disable,
+ .irq_unmask = toshiba_rbtx4927_irq_ioc_enable,
+};
+
+static void __init toshiba_rbtx4927_irq_ioc_init(void)
+{
+ int i;
+
+ /* mask all IOC interrupts */
+ writeb(0, rbtx4927_imask_addr);
+ /* clear SoftInt interrupts */
+ writeb(0, rbtx4927_softint_addr);
+
+ for (i = RBTX4927_IRQ_IOC;
+ i < RBTX4927_IRQ_IOC + RBTX4927_NR_IRQ_IOC; i++)
+ irq_set_chip_and_handler(i, &toshiba_rbtx4927_irq_ioc_type,
+ handle_level_irq);
+ irq_set_chained_handler(RBTX4927_IRQ_IOCINT, handle_simple_irq);
+}
+
+static int rbtx4927_irq_dispatch(int pending)
+{
+ int irq;
+
+ if (pending & STATUSF_IP7) /* cpu timer */
+ irq = MIPS_CPU_IRQ_BASE + 7;
+ else if (pending & STATUSF_IP2) { /* tx4927 pic */
+ irq = txx9_irq();
+ if (irq == RBTX4927_IRQ_IOCINT)
+ irq = toshiba_rbtx4927_irq_nested(irq);
+ } else if (pending & STATUSF_IP0) /* user line 0 */
+ irq = MIPS_CPU_IRQ_BASE + 0;
+ else if (pending & STATUSF_IP1) /* user line 1 */
+ irq = MIPS_CPU_IRQ_BASE + 1;
+ else
+ irq = -1;
+ return irq;
+}
+
+void __init rbtx4927_irq_setup(void)
+{
+ txx9_irq_dispatch = rbtx4927_irq_dispatch;
+ tx4927_irq_init();
+ toshiba_rbtx4927_irq_ioc_init();
+ /* Onboard 10M Ether: High Active */
+ irq_set_irq_type(RBTX4927_RTL_8019_IRQ, IRQF_TRIGGER_HIGH);
+}
diff --git a/arch/mips/txx9/rbtx4927/prom.c b/arch/mips/txx9/rbtx4927/prom.c
new file mode 100644
index 000000000..9b4acff82
--- /dev/null
+++ b/arch/mips/txx9/rbtx4927/prom.c
@@ -0,0 +1,42 @@
+/*
+ * rbtx4927 specific prom routines
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * Copyright 2001-2002 MontaVista Software Inc.
+ *
+ * Copyright (C) 2004 MontaVista Software Inc.
+ * Author: Manish Lachwani, mlachwani@mvista.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/rbtx4927.h>
+
+void __init rbtx4927_prom_init(void)
+{
+ memblock_add(0, tx4927_get_mem_size());
+ txx9_sio_putchar_init(TX4927_SIO_REG(0) & 0xfffffffffULL);
+}
diff --git a/arch/mips/txx9/rbtx4927/setup.c b/arch/mips/txx9/rbtx4927/setup.c
new file mode 100644
index 000000000..31955c1d5
--- /dev/null
+++ b/arch/mips/txx9/rbtx4927/setup.c
@@ -0,0 +1,380 @@
+/*
+ * Toshiba rbtx4927 specific setup
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * Copyright 2001-2002 MontaVista Software Inc.
+ *
+ * Copyright (C) 1996, 97, 2001, 04 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2000 RidgeRun, Inc.
+ * Author: RidgeRun, Inc.
+ * glonnon@ridgerun.com, skranz@ridgerun.com, stevej@ridgerun.com
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: jsun@mvista.com or jsun@junsun.net
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ * Author: Michael Pruznick, michael_pruznick@mvista.com
+ *
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * Copyright (C) 2004 MontaVista Software Inc.
+ * Author: Manish Lachwani, mlachwani@mvista.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/leds.h>
+#include <asm/io.h>
+#include <asm/reboot.h>
+#include <asm/txx9pio.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/pci.h>
+#include <asm/txx9/rbtx4927.h>
+#include <asm/txx9/tx4938.h> /* for TX4937 */
+
+#ifdef CONFIG_PCI
+static void __init tx4927_pci_setup(void)
+{
+ int extarb = !(__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCIARB);
+ struct pci_controller *c = &txx9_primary_pcic;
+
+ register_pci_controller(c);
+
+ if (__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCI66)
+ txx9_pci_option =
+ (txx9_pci_option & ~TXX9_PCI_OPT_CLK_MASK) |
+ TXX9_PCI_OPT_CLK_66; /* already configured */
+
+ /* Reset PCI Bus */
+ writeb(1, rbtx4927_pcireset_addr);
+ /* Reset PCIC */
+ txx9_set64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST);
+ if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
+ TXX9_PCI_OPT_CLK_66)
+ tx4927_pciclk66_setup();
+ mdelay(10);
+ /* clear PCIC reset */
+ txx9_clear64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST);
+ writeb(0, rbtx4927_pcireset_addr);
+ iob();
+
+ tx4927_report_pciclk();
+ tx4927_pcic_setup(tx4927_pcicptr, c, extarb);
+ if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
+ TXX9_PCI_OPT_CLK_AUTO &&
+ txx9_pci66_check(c, 0, 0)) {
+ /* Reset PCI Bus */
+ writeb(1, rbtx4927_pcireset_addr);
+ /* Reset PCIC */
+ txx9_set64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST);
+ tx4927_pciclk66_setup();
+ mdelay(10);
+ /* clear PCIC reset */
+ txx9_clear64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST);
+ writeb(0, rbtx4927_pcireset_addr);
+ iob();
+ /* Reinitialize PCIC */
+ tx4927_report_pciclk();
+ tx4927_pcic_setup(tx4927_pcicptr, c, extarb);
+ }
+ tx4927_setup_pcierr_irq();
+}
+
+static void __init tx4937_pci_setup(void)
+{
+ int extarb = !(__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCIARB);
+ struct pci_controller *c = &txx9_primary_pcic;
+
+ register_pci_controller(c);
+
+ if (__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCI66)
+ txx9_pci_option =
+ (txx9_pci_option & ~TXX9_PCI_OPT_CLK_MASK) |
+ TXX9_PCI_OPT_CLK_66; /* already configured */
+
+ /* Reset PCI Bus */
+ writeb(1, rbtx4927_pcireset_addr);
+ /* Reset PCIC */
+ txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
+ if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
+ TXX9_PCI_OPT_CLK_66)
+ tx4938_pciclk66_setup();
+ mdelay(10);
+ /* clear PCIC reset */
+ txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
+ writeb(0, rbtx4927_pcireset_addr);
+ iob();
+
+ tx4938_report_pciclk();
+ tx4927_pcic_setup(tx4938_pcicptr, c, extarb);
+ if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
+ TXX9_PCI_OPT_CLK_AUTO &&
+ txx9_pci66_check(c, 0, 0)) {
+ /* Reset PCI Bus */
+ writeb(1, rbtx4927_pcireset_addr);
+ /* Reset PCIC */
+ txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
+ tx4938_pciclk66_setup();
+ mdelay(10);
+ /* clear PCIC reset */
+ txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
+ writeb(0, rbtx4927_pcireset_addr);
+ iob();
+ /* Reinitialize PCIC */
+ tx4938_report_pciclk();
+ tx4927_pcic_setup(tx4938_pcicptr, c, extarb);
+ }
+ tx4938_setup_pcierr_irq();
+}
+#else
+static inline void tx4927_pci_setup(void) {}
+static inline void tx4937_pci_setup(void) {}
+#endif /* CONFIG_PCI */
+
+static void __init rbtx4927_gpio_init(void)
+{
+ /* TX4927-SIO DTR on (PIO[15]) */
+ gpio_request(15, "sio-dtr");
+ gpio_direction_output(15, 1);
+
+ tx4927_sio_init(0, 0);
+}
+
+static void __init rbtx4927_arch_init(void)
+{
+ txx9_gpio_init(TX4927_PIO_REG & 0xfffffffffULL, 0, TX4927_NUM_PIO);
+
+ rbtx4927_gpio_init();
+
+ tx4927_pci_setup();
+}
+
+static void __init rbtx4937_arch_init(void)
+{
+ txx9_gpio_init(TX4938_PIO_REG & 0xfffffffffULL, 0, TX4938_NUM_PIO);
+
+ rbtx4927_gpio_init();
+
+ tx4937_pci_setup();
+}
+
+static void toshiba_rbtx4927_restart(char *command)
+{
+ /* enable the s/w reset register */
+ writeb(1, rbtx4927_softresetlock_addr);
+
+ /* wait for enable to be seen */
+ while (!(readb(rbtx4927_softresetlock_addr) & 1))
+ ;
+
+ /* do a s/w reset */
+ writeb(1, rbtx4927_softreset_addr);
+
+ /* fallback */
+ (*_machine_halt)();
+}
+
+static void __init rbtx4927_clock_init(void);
+static void __init rbtx4937_clock_init(void);
+
+static void __init rbtx4927_mem_setup(void)
+{
+ if (TX4927_REV_PCODE() == 0x4927) {
+ rbtx4927_clock_init();
+ tx4927_setup();
+ } else {
+ rbtx4937_clock_init();
+ tx4938_setup();
+ }
+
+ _machine_restart = toshiba_rbtx4927_restart;
+
+#ifdef CONFIG_PCI
+ txx9_alloc_pci_controller(&txx9_primary_pcic,
+ RBTX4927_PCIMEM, RBTX4927_PCIMEM_SIZE,
+ RBTX4927_PCIIO, RBTX4927_PCIIO_SIZE);
+ txx9_board_pcibios_setup = tx4927_pcibios_setup;
+#else
+ set_io_port_base(KSEG1 + RBTX4927_ISA_IO_OFFSET);
+#endif
+}
+
+static void __init rbtx4927_clock_init(void)
+{
+ /*
+ * ASSUMPTION: PCIDIVMODE is configured for PCI 33MHz or 66MHz.
+ *
+ * For TX4927:
+ * PCIDIVMODE[12:11]'s initial value is given by S9[4:3] (ON:0, OFF:1).
+ * CPU 166MHz: PCI 66MHz : PCIDIVMODE: 00 (1/2.5)
+ * CPU 200MHz: PCI 66MHz : PCIDIVMODE: 01 (1/3)
+ * CPU 166MHz: PCI 33MHz : PCIDIVMODE: 10 (1/5)
+ * CPU 200MHz: PCI 33MHz : PCIDIVMODE: 11 (1/6)
+ * i.e. S9[3]: ON (83MHz), OFF (100MHz)
+ */
+ switch ((unsigned long)__raw_readq(&tx4927_ccfgptr->ccfg) &
+ TX4927_CCFG_PCIDIVMODE_MASK) {
+ case TX4927_CCFG_PCIDIVMODE_2_5:
+ case TX4927_CCFG_PCIDIVMODE_5:
+ txx9_cpu_clock = 166666666; /* 166MHz */
+ break;
+ default:
+ txx9_cpu_clock = 200000000; /* 200MHz */
+ }
+}
+
+static void __init rbtx4937_clock_init(void)
+{
+ /*
+ * ASSUMPTION: PCIDIVMODE is configured for PCI 33MHz or 66MHz.
+ *
+ * For TX4937:
+ * PCIDIVMODE[12:11]'s initial value is given by S1[5:4] (ON:0, OFF:1)
+ * PCIDIVMODE[10] is 0.
+ * CPU 266MHz: PCI 33MHz : PCIDIVMODE: 000 (1/8)
+ * CPU 266MHz: PCI 66MHz : PCIDIVMODE: 001 (1/4)
+ * CPU 300MHz: PCI 33MHz : PCIDIVMODE: 010 (1/9)
+ * CPU 300MHz: PCI 66MHz : PCIDIVMODE: 011 (1/4.5)
+ * CPU 333MHz: PCI 33MHz : PCIDIVMODE: 100 (1/10)
+ * CPU 333MHz: PCI 66MHz : PCIDIVMODE: 101 (1/5)
+ */
+ switch ((unsigned long)__raw_readq(&tx4938_ccfgptr->ccfg) &
+ TX4938_CCFG_PCIDIVMODE_MASK) {
+ case TX4938_CCFG_PCIDIVMODE_8:
+ case TX4938_CCFG_PCIDIVMODE_4:
+ txx9_cpu_clock = 266666666; /* 266MHz */
+ break;
+ case TX4938_CCFG_PCIDIVMODE_9:
+ case TX4938_CCFG_PCIDIVMODE_4_5:
+ txx9_cpu_clock = 300000000; /* 300MHz */
+ break;
+ default:
+ txx9_cpu_clock = 333333333; /* 333MHz */
+ }
+}
+
+static void __init rbtx4927_time_init(void)
+{
+ tx4927_time_init(0);
+}
+
+static void __init toshiba_rbtx4927_rtc_init(void)
+{
+ struct resource res = {
+ .start = RBTX4927_BRAMRTC_BASE - IO_BASE,
+ .end = RBTX4927_BRAMRTC_BASE - IO_BASE + 0x800 - 1,
+ .flags = IORESOURCE_MEM,
+ };
+ platform_device_register_simple("rtc-ds1742", -1, &res, 1);
+}
+
+static void __init rbtx4927_ne_init(void)
+{
+ struct resource res[] = {
+ {
+ .start = RBTX4927_RTL_8019_BASE,
+ .end = RBTX4927_RTL_8019_BASE + 0x20 - 1,
+ .flags = IORESOURCE_IO,
+ }, {
+ .start = RBTX4927_RTL_8019_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+ };
+ platform_device_register_simple("ne", -1, res, ARRAY_SIZE(res));
+}
+
+static void __init rbtx4927_mtd_init(void)
+{
+ int i;
+
+ for (i = 0; i < 2; i++)
+ tx4927_mtd_init(i);
+}
+
+static void __init rbtx4927_gpioled_init(void)
+{
+ static const struct gpio_led leds[] = {
+ { .name = "gpioled:green:0", .gpio = 0, .active_low = 1, },
+ { .name = "gpioled:green:1", .gpio = 1, .active_low = 1, },
+ };
+ static struct gpio_led_platform_data pdata = {
+ .num_leds = ARRAY_SIZE(leds),
+ .leds = leds,
+ };
+ struct platform_device *pdev = platform_device_alloc("leds-gpio", 0);
+
+ if (!pdev)
+ return;
+ pdev->dev.platform_data = &pdata;
+ if (platform_device_add(pdev))
+ platform_device_put(pdev);
+}
+
+static void __init rbtx4927_device_init(void)
+{
+ toshiba_rbtx4927_rtc_init();
+ rbtx4927_ne_init();
+ tx4927_wdt_init();
+ rbtx4927_mtd_init();
+ if (TX4927_REV_PCODE() == 0x4927) {
+ tx4927_dmac_init(2);
+ tx4927_aclc_init(0, 1);
+ } else {
+ tx4938_dmac_init(0, 2);
+ tx4938_aclc_init();
+ }
+ platform_device_register_simple("txx9aclc-generic", -1, NULL, 0);
+ txx9_iocled_init(RBTX4927_LED_ADDR - IO_BASE, -1, 3, 1, "green", NULL);
+ rbtx4927_gpioled_init();
+}
+
+struct txx9_board_vec rbtx4927_vec __initdata = {
+ .system = "Toshiba RBTX4927",
+ .prom_init = rbtx4927_prom_init,
+ .mem_setup = rbtx4927_mem_setup,
+ .irq_setup = rbtx4927_irq_setup,
+ .time_init = rbtx4927_time_init,
+ .device_init = rbtx4927_device_init,
+ .arch_init = rbtx4927_arch_init,
+#ifdef CONFIG_PCI
+ .pci_map_irq = rbtx4927_pci_map_irq,
+#endif
+};
+struct txx9_board_vec rbtx4937_vec __initdata = {
+ .system = "Toshiba RBTX4937",
+ .prom_init = rbtx4927_prom_init,
+ .mem_setup = rbtx4927_mem_setup,
+ .irq_setup = rbtx4927_irq_setup,
+ .time_init = rbtx4927_time_init,
+ .device_init = rbtx4927_device_init,
+ .arch_init = rbtx4937_arch_init,
+#ifdef CONFIG_PCI
+ .pci_map_irq = rbtx4927_pci_map_irq,
+#endif
+};
diff --git a/arch/mips/txx9/rbtx4938/Makefile b/arch/mips/txx9/rbtx4938/Makefile
new file mode 100644
index 000000000..08a02aebd
--- /dev/null
+++ b/arch/mips/txx9/rbtx4938/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += prom.o setup.o irq.o
diff --git a/arch/mips/txx9/rbtx4938/irq.c b/arch/mips/txx9/rbtx4938/irq.c
new file mode 100644
index 000000000..58cd7a927
--- /dev/null
+++ b/arch/mips/txx9/rbtx4938/irq.c
@@ -0,0 +1,157 @@
+/*
+ * Toshiba RBTX4938 specific interrupt handlers
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
+ */
+
+/*
+ * MIPS_CPU_IRQ_BASE+00 Software 0
+ * MIPS_CPU_IRQ_BASE+01 Software 1
+ * MIPS_CPU_IRQ_BASE+02 Cascade TX4938-CP0
+ * MIPS_CPU_IRQ_BASE+03 Multiplexed -- do not use
+ * MIPS_CPU_IRQ_BASE+04 Multiplexed -- do not use
+ * MIPS_CPU_IRQ_BASE+05 Multiplexed -- do not use
+ * MIPS_CPU_IRQ_BASE+06 Multiplexed -- do not use
+ * MIPS_CPU_IRQ_BASE+07 CPU TIMER
+ *
+ * TXX9_IRQ_BASE+00
+ * TXX9_IRQ_BASE+01
+ * TXX9_IRQ_BASE+02 Cascade RBTX4938-IOC
+ * TXX9_IRQ_BASE+03 RBTX4938 RTL-8019AS Ethernet
+ * TXX9_IRQ_BASE+04
+ * TXX9_IRQ_BASE+05 TX4938 ETH1
+ * TXX9_IRQ_BASE+06 TX4938 ETH0
+ * TXX9_IRQ_BASE+07
+ * TXX9_IRQ_BASE+08 TX4938 SIO 0
+ * TXX9_IRQ_BASE+09 TX4938 SIO 1
+ * TXX9_IRQ_BASE+10 TX4938 DMA0
+ * TXX9_IRQ_BASE+11 TX4938 DMA1
+ * TXX9_IRQ_BASE+12 TX4938 DMA2
+ * TXX9_IRQ_BASE+13 TX4938 DMA3
+ * TXX9_IRQ_BASE+14
+ * TXX9_IRQ_BASE+15
+ * TXX9_IRQ_BASE+16 TX4938 PCIC
+ * TXX9_IRQ_BASE+17 TX4938 TMR0
+ * TXX9_IRQ_BASE+18 TX4938 TMR1
+ * TXX9_IRQ_BASE+19 TX4938 TMR2
+ * TXX9_IRQ_BASE+20
+ * TXX9_IRQ_BASE+21
+ * TXX9_IRQ_BASE+22 TX4938 PCIERR
+ * TXX9_IRQ_BASE+23
+ * TXX9_IRQ_BASE+24
+ * TXX9_IRQ_BASE+25
+ * TXX9_IRQ_BASE+26
+ * TXX9_IRQ_BASE+27
+ * TXX9_IRQ_BASE+28
+ * TXX9_IRQ_BASE+29
+ * TXX9_IRQ_BASE+30
+ * TXX9_IRQ_BASE+31 TX4938 SPI
+ *
+ * RBTX4938_IRQ_IOC+00 PCI-D
+ * RBTX4938_IRQ_IOC+01 PCI-C
+ * RBTX4938_IRQ_IOC+02 PCI-B
+ * RBTX4938_IRQ_IOC+03 PCI-A
+ * RBTX4938_IRQ_IOC+04 RTC
+ * RBTX4938_IRQ_IOC+05 ATA
+ * RBTX4938_IRQ_IOC+06 MODEM
+ * RBTX4938_IRQ_IOC+07 SWINT
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/mipsregs.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/rbtx4938.h>
+
+static int toshiba_rbtx4938_irq_nested(int sw_irq)
+{
+ u8 level3;
+
+ level3 = readb(rbtx4938_imstat_addr);
+ if (unlikely(!level3))
+ return -1;
+ /* must use fls so onboard ATA has priority */
+ return RBTX4938_IRQ_IOC + __fls8(level3);
+}
+
+static void toshiba_rbtx4938_irq_ioc_enable(struct irq_data *d)
+{
+ unsigned char v;
+
+ v = readb(rbtx4938_imask_addr);
+ v |= (1 << (d->irq - RBTX4938_IRQ_IOC));
+ writeb(v, rbtx4938_imask_addr);
+ mmiowb();
+}
+
+static void toshiba_rbtx4938_irq_ioc_disable(struct irq_data *d)
+{
+ unsigned char v;
+
+ v = readb(rbtx4938_imask_addr);
+ v &= ~(1 << (d->irq - RBTX4938_IRQ_IOC));
+ writeb(v, rbtx4938_imask_addr);
+ mmiowb();
+}
+
+#define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC"
+static struct irq_chip toshiba_rbtx4938_irq_ioc_type = {
+ .name = TOSHIBA_RBTX4938_IOC_NAME,
+ .irq_mask = toshiba_rbtx4938_irq_ioc_disable,
+ .irq_unmask = toshiba_rbtx4938_irq_ioc_enable,
+};
+
+static int rbtx4938_irq_dispatch(int pending)
+{
+ int irq;
+
+ if (pending & STATUSF_IP7)
+ irq = MIPS_CPU_IRQ_BASE + 7;
+ else if (pending & STATUSF_IP2) {
+ irq = txx9_irq();
+ if (irq == RBTX4938_IRQ_IOCINT)
+ irq = toshiba_rbtx4938_irq_nested(irq);
+ } else if (pending & STATUSF_IP1)
+ irq = MIPS_CPU_IRQ_BASE + 0;
+ else if (pending & STATUSF_IP0)
+ irq = MIPS_CPU_IRQ_BASE + 1;
+ else
+ irq = -1;
+ return irq;
+}
+
+static void __init toshiba_rbtx4938_irq_ioc_init(void)
+{
+ int i;
+
+ for (i = RBTX4938_IRQ_IOC;
+ i < RBTX4938_IRQ_IOC + RBTX4938_NR_IRQ_IOC; i++)
+ irq_set_chip_and_handler(i, &toshiba_rbtx4938_irq_ioc_type,
+ handle_level_irq);
+
+ irq_set_chained_handler(RBTX4938_IRQ_IOCINT, handle_simple_irq);
+}
+
+void __init rbtx4938_irq_setup(void)
+{
+ txx9_irq_dispatch = rbtx4938_irq_dispatch;
+ /* Now, interrupt control disabled, */
+ /* all IRC interrupts are masked, */
+ /* all IRC interrupt mode are Low Active. */
+
+ /* mask all IOC interrupts */
+ writeb(0, rbtx4938_imask_addr);
+
+ /* clear SoftInt interrupts */
+ writeb(0, rbtx4938_softint_addr);
+ tx4938_irq_init();
+ toshiba_rbtx4938_irq_ioc_init();
+ /* Onboard 10M Ether: High Active */
+ irq_set_irq_type(RBTX4938_IRQ_ETHER, IRQF_TRIGGER_HIGH);
+}
diff --git a/arch/mips/txx9/rbtx4938/prom.c b/arch/mips/txx9/rbtx4938/prom.c
new file mode 100644
index 000000000..0de84716a
--- /dev/null
+++ b/arch/mips/txx9/rbtx4938/prom.c
@@ -0,0 +1,22 @@
+/*
+ * rbtx4938 specific prom routines
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
+ */
+
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/rbtx4938.h>
+
+void __init rbtx4938_prom_init(void)
+{
+ memblock_add(0, tx4938_get_mem_size());
+ txx9_sio_putchar_init(TX4938_SIO_REG(0) & 0xfffffffffULL);
+}
diff --git a/arch/mips/txx9/rbtx4938/setup.c b/arch/mips/txx9/rbtx4938/setup.c
new file mode 100644
index 000000000..e68eb2e7c
--- /dev/null
+++ b/arch/mips/txx9/rbtx4938/setup.c
@@ -0,0 +1,372 @@
+/*
+ * Setup pointers to hardware-dependent routines.
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
+ */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio.h>
+#include <linux/mtd/physmap.h>
+
+#include <asm/reboot.h>
+#include <asm/io.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/pci.h>
+#include <asm/txx9/rbtx4938.h>
+#include <linux/spi/spi.h>
+#include <asm/txx9/spi.h>
+#include <asm/txx9pio.h>
+
+static void rbtx4938_machine_restart(char *command)
+{
+ local_irq_disable();
+ writeb(1, rbtx4938_softresetlock_addr);
+ writeb(1, rbtx4938_sfvol_addr);
+ writeb(1, rbtx4938_softreset_addr);
+ /* fallback */
+ (*_machine_halt)();
+}
+
+static void __init rbtx4938_pci_setup(void)
+{
+#ifdef CONFIG_PCI
+ int extarb = !(__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCIARB);
+ struct pci_controller *c = &txx9_primary_pcic;
+
+ register_pci_controller(c);
+
+ if (__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCI66)
+ txx9_pci_option =
+ (txx9_pci_option & ~TXX9_PCI_OPT_CLK_MASK) |
+ TXX9_PCI_OPT_CLK_66; /* already configured */
+
+ /* Reset PCI Bus */
+ writeb(0, rbtx4938_pcireset_addr);
+ /* Reset PCIC */
+ txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
+ if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
+ TXX9_PCI_OPT_CLK_66)
+ tx4938_pciclk66_setup();
+ mdelay(10);
+ /* clear PCIC reset */
+ txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
+ writeb(1, rbtx4938_pcireset_addr);
+ iob();
+
+ tx4938_report_pciclk();
+ tx4927_pcic_setup(tx4938_pcicptr, c, extarb);
+ if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
+ TXX9_PCI_OPT_CLK_AUTO &&
+ txx9_pci66_check(c, 0, 0)) {
+ /* Reset PCI Bus */
+ writeb(0, rbtx4938_pcireset_addr);
+ /* Reset PCIC */
+ txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
+ tx4938_pciclk66_setup();
+ mdelay(10);
+ /* clear PCIC reset */
+ txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
+ writeb(1, rbtx4938_pcireset_addr);
+ iob();
+ /* Reinitialize PCIC */
+ tx4938_report_pciclk();
+ tx4927_pcic_setup(tx4938_pcicptr, c, extarb);
+ }
+
+ if (__raw_readq(&tx4938_ccfgptr->pcfg) &
+ (TX4938_PCFG_ETH0_SEL|TX4938_PCFG_ETH1_SEL)) {
+ /* Reset PCIC1 */
+ txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIC1RST);
+ /* PCI1DMD==0 => PCI1CLK==GBUSCLK/2 => PCI66 */
+ if (!(__raw_readq(&tx4938_ccfgptr->ccfg)
+ & TX4938_CCFG_PCI1DMD))
+ tx4938_ccfg_set(TX4938_CCFG_PCI1_66);
+ mdelay(10);
+ /* clear PCIC1 reset */
+ txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIC1RST);
+ tx4938_report_pci1clk();
+
+ /* mem:64K(max), io:64K(max) (enough for ETH0,ETH1) */
+ c = txx9_alloc_pci_controller(NULL, 0, 0x10000, 0, 0x10000);
+ register_pci_controller(c);
+ tx4927_pcic_setup(tx4938_pcic1ptr, c, 0);
+ }
+ tx4938_setup_pcierr_irq();
+#endif /* CONFIG_PCI */
+}
+
+/* SPI support */
+
+/* chip select for SPI devices */
+#define SEEPROM1_CS 7 /* PIO7 */
+#define SEEPROM2_CS 0 /* IOC */
+#define SEEPROM3_CS 1 /* IOC */
+#define SRTC_CS 2 /* IOC */
+#define SPI_BUSNO 0
+
+static int __init rbtx4938_ethaddr_init(void)
+{
+#ifdef CONFIG_PCI
+ unsigned char dat[17];
+ unsigned char sum;
+ int i;
+
+ /* 0-3: "MAC\0", 4-9:eth0, 10-15:eth1, 16:sum */
+ if (spi_eeprom_read(SPI_BUSNO, SEEPROM1_CS, 0, dat, sizeof(dat))) {
+ pr_err("seeprom: read error.\n");
+ return -ENODEV;
+ } else {
+ if (strcmp(dat, "MAC") != 0)
+ pr_warn("seeprom: bad signature.\n");
+ for (i = 0, sum = 0; i < sizeof(dat); i++)
+ sum += dat[i];
+ if (sum)
+ pr_warn("seeprom: bad checksum.\n");
+ }
+ tx4938_ethaddr_init(&dat[4], &dat[4 + 6]);
+#endif /* CONFIG_PCI */
+ return 0;
+}
+
+static void __init rbtx4938_spi_setup(void)
+{
+ /* set SPI_SEL */
+ txx9_set64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_SPI_SEL);
+}
+
+static struct resource rbtx4938_fpga_resource;
+
+static void __init rbtx4938_time_init(void)
+{
+ tx4938_time_init(0);
+}
+
+static void __init rbtx4938_mem_setup(void)
+{
+ unsigned long long pcfg;
+
+ if (txx9_master_clock == 0)
+ txx9_master_clock = 25000000; /* 25MHz */
+
+ tx4938_setup();
+
+#ifdef CONFIG_PCI
+ txx9_alloc_pci_controller(&txx9_primary_pcic, 0, 0, 0, 0);
+ txx9_board_pcibios_setup = tx4927_pcibios_setup;
+#else
+ set_io_port_base(RBTX4938_ETHER_BASE);
+#endif
+
+ tx4938_sio_init(7372800, 0);
+
+#ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_PIO58_61
+ pr_info("PIOSEL: disabling both ATA and NAND selection\n");
+ txx9_clear64(&tx4938_ccfgptr->pcfg,
+ TX4938_PCFG_NDF_SEL | TX4938_PCFG_ATA_SEL);
+#endif
+
+#ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_NAND
+ pr_info("PIOSEL: enabling NAND selection\n");
+ txx9_set64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_NDF_SEL);
+ txx9_clear64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_ATA_SEL);
+#endif
+
+#ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_ATA
+ pr_info("PIOSEL: enabling ATA selection\n");
+ txx9_set64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_ATA_SEL);
+ txx9_clear64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_NDF_SEL);
+#endif
+
+#ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_KEEP
+ pcfg = ____raw_readq(&tx4938_ccfgptr->pcfg);
+ pr_info("PIOSEL: NAND %s, ATA %s\n",
+ (pcfg & TX4938_PCFG_NDF_SEL) ? "enabled" : "disabled",
+ (pcfg & TX4938_PCFG_ATA_SEL) ? "enabled" : "disabled");
+#endif
+
+ rbtx4938_spi_setup();
+ pcfg = ____raw_readq(&tx4938_ccfgptr->pcfg); /* updated */
+ /* fixup piosel */
+ if ((pcfg & (TX4938_PCFG_ATA_SEL | TX4938_PCFG_NDF_SEL)) ==
+ TX4938_PCFG_ATA_SEL)
+ writeb((readb(rbtx4938_piosel_addr) & 0x03) | 0x04,
+ rbtx4938_piosel_addr);
+ else if ((pcfg & (TX4938_PCFG_ATA_SEL | TX4938_PCFG_NDF_SEL)) ==
+ TX4938_PCFG_NDF_SEL)
+ writeb((readb(rbtx4938_piosel_addr) & 0x03) | 0x08,
+ rbtx4938_piosel_addr);
+ else
+ writeb(readb(rbtx4938_piosel_addr) & ~(0x08 | 0x04),
+ rbtx4938_piosel_addr);
+
+ rbtx4938_fpga_resource.name = "FPGA Registers";
+ rbtx4938_fpga_resource.start = CPHYSADDR(RBTX4938_FPGA_REG_ADDR);
+ rbtx4938_fpga_resource.end = CPHYSADDR(RBTX4938_FPGA_REG_ADDR) + 0xffff;
+ rbtx4938_fpga_resource.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&txx9_ce_res[2], &rbtx4938_fpga_resource))
+ pr_err("request resource for fpga failed\n");
+
+ _machine_restart = rbtx4938_machine_restart;
+
+ writeb(0xff, rbtx4938_led_addr);
+ pr_info("RBTX4938 --- FPGA(Rev %02x) DIPSW:%02x,%02x\n",
+ readb(rbtx4938_fpga_rev_addr),
+ readb(rbtx4938_dipsw_addr), readb(rbtx4938_bdipsw_addr));
+}
+
+static void __init rbtx4938_ne_init(void)
+{
+ struct resource res[] = {
+ {
+ .start = RBTX4938_RTL_8019_BASE,
+ .end = RBTX4938_RTL_8019_BASE + 0x20 - 1,
+ .flags = IORESOURCE_IO,
+ }, {
+ .start = RBTX4938_RTL_8019_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+ };
+ platform_device_register_simple("ne", -1, res, ARRAY_SIZE(res));
+}
+
+static DEFINE_SPINLOCK(rbtx4938_spi_gpio_lock);
+
+static void rbtx4938_spi_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ u8 val;
+ unsigned long flags;
+ spin_lock_irqsave(&rbtx4938_spi_gpio_lock, flags);
+ val = readb(rbtx4938_spics_addr);
+ if (value)
+ val |= 1 << offset;
+ else
+ val &= ~(1 << offset);
+ writeb(val, rbtx4938_spics_addr);
+ mmiowb();
+ spin_unlock_irqrestore(&rbtx4938_spi_gpio_lock, flags);
+}
+
+static int rbtx4938_spi_gpio_dir_out(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ rbtx4938_spi_gpio_set(chip, offset, value);
+ return 0;
+}
+
+static struct gpio_chip rbtx4938_spi_gpio_chip = {
+ .set = rbtx4938_spi_gpio_set,
+ .direction_output = rbtx4938_spi_gpio_dir_out,
+ .label = "RBTX4938-SPICS",
+ .base = 16,
+ .ngpio = 3,
+};
+
+static int __init rbtx4938_spi_init(void)
+{
+ struct spi_board_info srtc_info = {
+ .modalias = "rtc-rs5c348",
+ .max_speed_hz = 1000000, /* 1.0Mbps @ Vdd 2.0V */
+ .bus_num = 0,
+ .chip_select = 16 + SRTC_CS,
+ /* Mode 1 (High-Active, Shift-Then-Sample), High Avtive CS */
+ .mode = SPI_MODE_1 | SPI_CS_HIGH,
+ };
+ spi_register_board_info(&srtc_info, 1);
+ spi_eeprom_register(SPI_BUSNO, SEEPROM1_CS, 128);
+ spi_eeprom_register(SPI_BUSNO, 16 + SEEPROM2_CS, 128);
+ spi_eeprom_register(SPI_BUSNO, 16 + SEEPROM3_CS, 128);
+ gpio_request(16 + SRTC_CS, "rtc-rs5c348");
+ gpio_direction_output(16 + SRTC_CS, 0);
+ gpio_request(SEEPROM1_CS, "seeprom1");
+ gpio_direction_output(SEEPROM1_CS, 1);
+ gpio_request(16 + SEEPROM2_CS, "seeprom2");
+ gpio_direction_output(16 + SEEPROM2_CS, 1);
+ gpio_request(16 + SEEPROM3_CS, "seeprom3");
+ gpio_direction_output(16 + SEEPROM3_CS, 1);
+ tx4938_spi_init(SPI_BUSNO);
+ return 0;
+}
+
+static void __init rbtx4938_mtd_init(void)
+{
+ struct physmap_flash_data pdata = {
+ .width = 4,
+ };
+
+ switch (readb(rbtx4938_bdipsw_addr) & 7) {
+ case 0:
+ /* Boot */
+ txx9_physmap_flash_init(0, 0x1fc00000, 0x400000, &pdata);
+ /* System */
+ txx9_physmap_flash_init(1, 0x1e000000, 0x1000000, &pdata);
+ break;
+ case 1:
+ /* System */
+ txx9_physmap_flash_init(0, 0x1f000000, 0x1000000, &pdata);
+ /* Boot */
+ txx9_physmap_flash_init(1, 0x1ec00000, 0x400000, &pdata);
+ break;
+ case 2:
+ /* Ext */
+ txx9_physmap_flash_init(0, 0x1f000000, 0x1000000, &pdata);
+ /* System */
+ txx9_physmap_flash_init(1, 0x1e000000, 0x1000000, &pdata);
+ /* Boot */
+ txx9_physmap_flash_init(2, 0x1dc00000, 0x400000, &pdata);
+ break;
+ case 3:
+ /* Boot */
+ txx9_physmap_flash_init(1, 0x1bc00000, 0x400000, &pdata);
+ /* System */
+ txx9_physmap_flash_init(2, 0x1a000000, 0x1000000, &pdata);
+ break;
+ }
+}
+
+static void __init rbtx4938_arch_init(void)
+{
+ txx9_gpio_init(TX4938_PIO_REG & 0xfffffffffULL, 0, TX4938_NUM_PIO);
+ gpiochip_add_data(&rbtx4938_spi_gpio_chip, NULL);
+ rbtx4938_pci_setup();
+ rbtx4938_spi_init();
+}
+
+static void __init rbtx4938_device_init(void)
+{
+ rbtx4938_ethaddr_init();
+ rbtx4938_ne_init();
+ tx4938_wdt_init();
+ rbtx4938_mtd_init();
+ /* TC58DVM82A1FT: tDH=10ns, tWP=tRP=tREADID=35ns */
+ tx4938_ndfmc_init(10, 35);
+ tx4938_ata_init(RBTX4938_IRQ_IOC_ATA, 0, 1);
+ tx4938_dmac_init(0, 2);
+ tx4938_aclc_init();
+ platform_device_register_simple("txx9aclc-generic", -1, NULL, 0);
+ tx4938_sramc_init();
+ txx9_iocled_init(RBTX4938_LED_ADDR - IO_BASE, -1, 8, 1, "green", NULL);
+}
+
+struct txx9_board_vec rbtx4938_vec __initdata = {
+ .system = "Toshiba RBTX4938",
+ .prom_init = rbtx4938_prom_init,
+ .mem_setup = rbtx4938_mem_setup,
+ .irq_setup = rbtx4938_irq_setup,
+ .time_init = rbtx4938_time_init,
+ .device_init = rbtx4938_device_init,
+ .arch_init = rbtx4938_arch_init,
+#ifdef CONFIG_PCI
+ .pci_map_irq = rbtx4938_pci_map_irq,
+#endif
+};
diff --git a/arch/mips/txx9/rbtx4939/Makefile b/arch/mips/txx9/rbtx4939/Makefile
new file mode 100644
index 000000000..840496e7a
--- /dev/null
+++ b/arch/mips/txx9/rbtx4939/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += irq.o setup.o prom.o
diff --git a/arch/mips/txx9/rbtx4939/irq.c b/arch/mips/txx9/rbtx4939/irq.c
new file mode 100644
index 000000000..69a80616f
--- /dev/null
+++ b/arch/mips/txx9/rbtx4939/irq.c
@@ -0,0 +1,95 @@
+/*
+ * Toshiba RBTX4939 interrupt routines
+ * Based on linux/arch/mips/txx9/rbtx4938/irq.c,
+ * and RBTX49xx patch from CELF patch archive.
+ *
+ * Copyright (C) 2000-2001,2005-2006 Toshiba Corporation
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/mipsregs.h>
+#include <asm/txx9/rbtx4939.h>
+
+/*
+ * RBTX4939 IOC controller definition
+ */
+
+static void rbtx4939_ioc_irq_unmask(struct irq_data *d)
+{
+ int ioc_nr = d->irq - RBTX4939_IRQ_IOC;
+
+ writeb(readb(rbtx4939_ien_addr) | (1 << ioc_nr), rbtx4939_ien_addr);
+}
+
+static void rbtx4939_ioc_irq_mask(struct irq_data *d)
+{
+ int ioc_nr = d->irq - RBTX4939_IRQ_IOC;
+
+ writeb(readb(rbtx4939_ien_addr) & ~(1 << ioc_nr), rbtx4939_ien_addr);
+ mmiowb();
+}
+
+static struct irq_chip rbtx4939_ioc_irq_chip = {
+ .name = "IOC",
+ .irq_mask = rbtx4939_ioc_irq_mask,
+ .irq_unmask = rbtx4939_ioc_irq_unmask,
+};
+
+
+static inline int rbtx4939_ioc_irqroute(void)
+{
+ unsigned char istat = readb(rbtx4939_ifac2_addr);
+
+ if (unlikely(istat == 0))
+ return -1;
+ return RBTX4939_IRQ_IOC + __fls8(istat);
+}
+
+static int rbtx4939_irq_dispatch(int pending)
+{
+ int irq;
+
+ if (pending & CAUSEF_IP7)
+ return MIPS_CPU_IRQ_BASE + 7;
+ irq = tx4939_irq();
+ if (likely(irq >= 0)) {
+ /* redirect IOC interrupts */
+ switch (irq) {
+ case RBTX4939_IRQ_IOCINT:
+ irq = rbtx4939_ioc_irqroute();
+ break;
+ }
+ } else if (pending & CAUSEF_IP0)
+ irq = MIPS_CPU_IRQ_BASE + 0;
+ else if (pending & CAUSEF_IP1)
+ irq = MIPS_CPU_IRQ_BASE + 1;
+ else
+ irq = -1;
+ return irq;
+}
+
+void __init rbtx4939_irq_setup(void)
+{
+ int i;
+
+ /* mask all IOC interrupts */
+ writeb(0, rbtx4939_ien_addr);
+
+ /* clear SoftInt interrupts */
+ writeb(0, rbtx4939_softint_addr);
+
+ txx9_irq_dispatch = rbtx4939_irq_dispatch;
+
+ tx4939_irq_init();
+ for (i = RBTX4939_IRQ_IOC;
+ i < RBTX4939_IRQ_IOC + RBTX4939_NR_IRQ_IOC; i++)
+ irq_set_chip_and_handler(i, &rbtx4939_ioc_irq_chip,
+ handle_level_irq);
+
+ irq_set_chained_handler(RBTX4939_IRQ_IOCINT, handle_simple_irq);
+}
diff --git a/arch/mips/txx9/rbtx4939/prom.c b/arch/mips/txx9/rbtx4939/prom.c
new file mode 100644
index 000000000..ba25ba1bd
--- /dev/null
+++ b/arch/mips/txx9/rbtx4939/prom.c
@@ -0,0 +1,29 @@
+/*
+ * rbtx4939 specific prom routines
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/rbtx4939.h>
+
+void __init rbtx4939_prom_init(void)
+{
+ unsigned long start, size;
+ u64 win;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (!((__u32)____raw_readq(&tx4939_ddrcptr->winen) & (1 << i)))
+ continue;
+ win = ____raw_readq(&tx4939_ddrcptr->win[i]);
+ start = (unsigned long)(win >> 48);
+ size = (((unsigned long)(win >> 32) & 0xffff) + 1) - start;
+ memblock_add(start << 20, size << 20);
+ }
+ txx9_sio_putchar_init(TX4939_SIO_REG(0) & 0xfffffffffULL);
+}
diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c
new file mode 100644
index 000000000..ef29a9c2f
--- /dev/null
+++ b/arch/mips/txx9/rbtx4939/setup.c
@@ -0,0 +1,554 @@
+/*
+ * Toshiba RBTX4939 setup routines.
+ * Based on linux/arch/mips/txx9/rbtx4938/setup.c,
+ * and RBTX49xx patch from CELF patch archive.
+ *
+ * Copyright (C) 2000-2001,2005-2007 Toshiba Corporation
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/interrupt.h>
+#include <linux/smc91x.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/map.h>
+#include <asm/reboot.h>
+#include <asm/txx9/generic.h>
+#include <asm/txx9/pci.h>
+#include <asm/txx9/rbtx4939.h>
+
+static void rbtx4939_machine_restart(char *command)
+{
+ local_irq_disable();
+ writeb(1, rbtx4939_reseten_addr);
+ writeb(1, rbtx4939_softreset_addr);
+ while (1)
+ ;
+}
+
+static void __init rbtx4939_time_init(void)
+{
+ tx4939_time_init(0);
+}
+
+#if defined(__BIG_ENDIAN) && IS_ENABLED(CONFIG_SMC91X)
+#define HAVE_RBTX4939_IOSWAB
+#define IS_CE1_ADDR(addr) \
+ ((((unsigned long)(addr) - IO_BASE) & 0xfff00000) == TXX9_CE(1))
+static u16 rbtx4939_ioswabw(volatile u16 *a, u16 x)
+{
+ return IS_CE1_ADDR(a) ? x : le16_to_cpu(x);
+}
+static u16 rbtx4939_mem_ioswabw(volatile u16 *a, u16 x)
+{
+ return !IS_CE1_ADDR(a) ? x : le16_to_cpu(x);
+}
+#endif /* __BIG_ENDIAN && CONFIG_SMC91X */
+
+static void __init rbtx4939_pci_setup(void)
+{
+#ifdef CONFIG_PCI
+ int extarb = !(__raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_PCIARB);
+ struct pci_controller *c = &txx9_primary_pcic;
+
+ register_pci_controller(c);
+
+ tx4939_report_pciclk();
+ tx4927_pcic_setup(tx4939_pcicptr, c, extarb);
+ if (!(__raw_readq(&tx4939_ccfgptr->pcfg) & TX4939_PCFG_ATA1MODE) &&
+ (__raw_readq(&tx4939_ccfgptr->pcfg) &
+ (TX4939_PCFG_ET0MODE | TX4939_PCFG_ET1MODE))) {
+ tx4939_report_pci1clk();
+
+ /* mem:64K(max), io:64K(max) (enough for ETH0,ETH1) */
+ c = txx9_alloc_pci_controller(NULL, 0, 0x10000, 0, 0x10000);
+ register_pci_controller(c);
+ tx4927_pcic_setup(tx4939_pcic1ptr, c, 0);
+ }
+
+ tx4939_setup_pcierr_irq();
+#endif /* CONFIG_PCI */
+}
+
+static unsigned long long default_ebccr[] __initdata = {
+ 0x01c0000000007608ULL, /* 64M ROM */
+ 0x017f000000007049ULL, /* 1M IOC */
+ 0x0180000000408608ULL, /* ISA */
+ 0,
+};
+
+static void __init rbtx4939_ebusc_setup(void)
+{
+ int i;
+ unsigned int sp;
+
+ /* use user-configured speed */
+ sp = TX4939_EBUSC_CR(0) & 0x30;
+ default_ebccr[0] |= sp;
+ default_ebccr[1] |= sp;
+ default_ebccr[2] |= sp;
+ /* initialise by myself */
+ for (i = 0; i < ARRAY_SIZE(default_ebccr); i++) {
+ if (default_ebccr[i])
+ ____raw_writeq(default_ebccr[i],
+ &tx4939_ebuscptr->cr[i]);
+ else
+ ____raw_writeq(____raw_readq(&tx4939_ebuscptr->cr[i])
+ & ~8,
+ &tx4939_ebuscptr->cr[i]);
+ }
+}
+
+static void __init rbtx4939_update_ioc_pen(void)
+{
+ __u64 pcfg = ____raw_readq(&tx4939_ccfgptr->pcfg);
+ __u64 ccfg = ____raw_readq(&tx4939_ccfgptr->ccfg);
+ __u8 pe1 = readb(rbtx4939_pe1_addr);
+ __u8 pe2 = readb(rbtx4939_pe2_addr);
+ __u8 pe3 = readb(rbtx4939_pe3_addr);
+ if (pcfg & TX4939_PCFG_ATA0MODE)
+ pe1 |= RBTX4939_PE1_ATA(0);
+ else
+ pe1 &= ~RBTX4939_PE1_ATA(0);
+ if (pcfg & TX4939_PCFG_ATA1MODE) {
+ pe1 |= RBTX4939_PE1_ATA(1);
+ pe1 &= ~(RBTX4939_PE1_RMII(0) | RBTX4939_PE1_RMII(1));
+ } else {
+ pe1 &= ~RBTX4939_PE1_ATA(1);
+ if (pcfg & TX4939_PCFG_ET0MODE)
+ pe1 |= RBTX4939_PE1_RMII(0);
+ else
+ pe1 &= ~RBTX4939_PE1_RMII(0);
+ if (pcfg & TX4939_PCFG_ET1MODE)
+ pe1 |= RBTX4939_PE1_RMII(1);
+ else
+ pe1 &= ~RBTX4939_PE1_RMII(1);
+ }
+ if (ccfg & TX4939_CCFG_PTSEL)
+ pe3 &= ~(RBTX4939_PE3_VP | RBTX4939_PE3_VP_P |
+ RBTX4939_PE3_VP_S);
+ else {
+ __u64 vmode = pcfg &
+ (TX4939_PCFG_VSSMODE | TX4939_PCFG_VPSMODE);
+ if (vmode == 0)
+ pe3 &= ~(RBTX4939_PE3_VP | RBTX4939_PE3_VP_P |
+ RBTX4939_PE3_VP_S);
+ else if (vmode == TX4939_PCFG_VPSMODE) {
+ pe3 |= RBTX4939_PE3_VP_P;
+ pe3 &= ~(RBTX4939_PE3_VP | RBTX4939_PE3_VP_S);
+ } else if (vmode == TX4939_PCFG_VSSMODE) {
+ pe3 |= RBTX4939_PE3_VP | RBTX4939_PE3_VP_S;
+ pe3 &= ~RBTX4939_PE3_VP_P;
+ } else {
+ pe3 |= RBTX4939_PE3_VP | RBTX4939_PE3_VP_P;
+ pe3 &= ~RBTX4939_PE3_VP_S;
+ }
+ }
+ if (pcfg & TX4939_PCFG_SPIMODE) {
+ if (pcfg & TX4939_PCFG_SIO2MODE_GPIO)
+ pe2 &= ~(RBTX4939_PE2_SIO2 | RBTX4939_PE2_SIO0);
+ else {
+ if (pcfg & TX4939_PCFG_SIO2MODE_SIO2) {
+ pe2 |= RBTX4939_PE2_SIO2;
+ pe2 &= ~RBTX4939_PE2_SIO0;
+ } else {
+ pe2 |= RBTX4939_PE2_SIO0;
+ pe2 &= ~RBTX4939_PE2_SIO2;
+ }
+ }
+ if (pcfg & TX4939_PCFG_SIO3MODE)
+ pe2 |= RBTX4939_PE2_SIO3;
+ else
+ pe2 &= ~RBTX4939_PE2_SIO3;
+ pe2 &= ~RBTX4939_PE2_SPI;
+ } else {
+ pe2 |= RBTX4939_PE2_SPI;
+ pe2 &= ~(RBTX4939_PE2_SIO3 | RBTX4939_PE2_SIO2 |
+ RBTX4939_PE2_SIO0);
+ }
+ if ((pcfg & TX4939_PCFG_I2SMODE_MASK) == TX4939_PCFG_I2SMODE_GPIO)
+ pe2 |= RBTX4939_PE2_GPIO;
+ else
+ pe2 &= ~RBTX4939_PE2_GPIO;
+ writeb(pe1, rbtx4939_pe1_addr);
+ writeb(pe2, rbtx4939_pe2_addr);
+ writeb(pe3, rbtx4939_pe3_addr);
+}
+
+#define RBTX4939_MAX_7SEGLEDS 8
+
+#if IS_BUILTIN(CONFIG_LEDS_CLASS)
+static u8 led_val[RBTX4939_MAX_7SEGLEDS];
+struct rbtx4939_led_data {
+ struct led_classdev cdev;
+ char name[32];
+ unsigned int num;
+};
+
+/* Use "dot" in 7seg LEDs */
+static void rbtx4939_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct rbtx4939_led_data *led_dat =
+ container_of(led_cdev, struct rbtx4939_led_data, cdev);
+ unsigned int num = led_dat->num;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ led_val[num] = (led_val[num] & 0x7f) | (value ? 0x80 : 0);
+ writeb(led_val[num], rbtx4939_7seg_addr(num / 4, num % 4));
+ local_irq_restore(flags);
+}
+
+static int __init rbtx4939_led_probe(struct platform_device *pdev)
+{
+ struct rbtx4939_led_data *leds_data;
+ int i;
+ static char *default_triggers[] __initdata = {
+ "heartbeat",
+ "disk-activity",
+ "nand-disk",
+ };
+
+ leds_data = kcalloc(RBTX4939_MAX_7SEGLEDS, sizeof(*leds_data),
+ GFP_KERNEL);
+ if (!leds_data)
+ return -ENOMEM;
+ for (i = 0; i < RBTX4939_MAX_7SEGLEDS; i++) {
+ int rc;
+ struct rbtx4939_led_data *led_dat = &leds_data[i];
+
+ led_dat->num = i;
+ led_dat->cdev.brightness_set = rbtx4939_led_brightness_set;
+ sprintf(led_dat->name, "rbtx4939:amber:%u", i);
+ led_dat->cdev.name = led_dat->name;
+ if (i < ARRAY_SIZE(default_triggers))
+ led_dat->cdev.default_trigger = default_triggers[i];
+ rc = led_classdev_register(&pdev->dev, &led_dat->cdev);
+ if (rc < 0)
+ return rc;
+ led_dat->cdev.brightness_set(&led_dat->cdev, 0);
+ }
+ return 0;
+
+}
+
+static struct platform_driver rbtx4939_led_driver = {
+ .driver = {
+ .name = "rbtx4939-led",
+ },
+};
+
+static void __init rbtx4939_led_setup(void)
+{
+ platform_device_register_simple("rbtx4939-led", -1, NULL, 0);
+ platform_driver_probe(&rbtx4939_led_driver, rbtx4939_led_probe);
+}
+#else
+static inline void rbtx4939_led_setup(void)
+{
+}
+#endif
+
+static void __rbtx4939_7segled_putc(unsigned int pos, unsigned char val)
+{
+#if IS_BUILTIN(CONFIG_LEDS_CLASS)
+ unsigned long flags;
+ local_irq_save(flags);
+ /* bit7: reserved for LED class */
+ led_val[pos] = (led_val[pos] & 0x80) | (val & 0x7f);
+ val = led_val[pos];
+ local_irq_restore(flags);
+#endif
+ writeb(val, rbtx4939_7seg_addr(pos / 4, pos % 4));
+}
+
+static void rbtx4939_7segled_putc(unsigned int pos, unsigned char val)
+{
+ /* convert from map_to_seg7() notation */
+ val = (val & 0x88) |
+ ((val & 0x40) >> 6) |
+ ((val & 0x20) >> 4) |
+ ((val & 0x10) >> 2) |
+ ((val & 0x04) << 2) |
+ ((val & 0x02) << 4) |
+ ((val & 0x01) << 6);
+ __rbtx4939_7segled_putc(pos, val);
+}
+
+#if IS_ENABLED(CONFIG_MTD_RBTX4939)
+/* special mapping for boot rom */
+static unsigned long rbtx4939_flash_fixup_ofs(unsigned long ofs)
+{
+ u8 bdipsw = readb(rbtx4939_bdipsw_addr) & 0x0f;
+ unsigned char shift;
+
+ if (bdipsw & 8) {
+ /* BOOT Mode: USER ROM1 / USER ROM2 */
+ shift = bdipsw & 3;
+ /* rotate A[23:22] */
+ return (ofs & ~0xc00000) | ((((ofs >> 22) + shift) & 3) << 22);
+ }
+#ifdef __BIG_ENDIAN
+ if (bdipsw == 0)
+ /* BOOT Mode: Monitor ROM */
+ ofs ^= 0x400000; /* swap A[22] */
+#endif
+ return ofs;
+}
+
+static map_word rbtx4939_flash_read16(struct map_info *map, unsigned long ofs)
+{
+ map_word r;
+
+ ofs = rbtx4939_flash_fixup_ofs(ofs);
+ r.x[0] = __raw_readw(map->virt + ofs);
+ return r;
+}
+
+static void rbtx4939_flash_write16(struct map_info *map, const map_word datum,
+ unsigned long ofs)
+{
+ ofs = rbtx4939_flash_fixup_ofs(ofs);
+ __raw_writew(datum.x[0], map->virt + ofs);
+ mb(); /* see inline_map_write() in mtd/map.h */
+}
+
+static void rbtx4939_flash_copy_from(struct map_info *map, void *to,
+ unsigned long from, ssize_t len)
+{
+ u8 bdipsw = readb(rbtx4939_bdipsw_addr) & 0x0f;
+ unsigned char shift;
+ ssize_t curlen;
+
+ from += (unsigned long)map->virt;
+ if (bdipsw & 8) {
+ /* BOOT Mode: USER ROM1 / USER ROM2 */
+ shift = bdipsw & 3;
+ while (len) {
+ curlen = min_t(unsigned long, len,
+ 0x400000 - (from & (0x400000 - 1)));
+ memcpy(to,
+ (void *)((from & ~0xc00000) |
+ ((((from >> 22) + shift) & 3) << 22)),
+ curlen);
+ len -= curlen;
+ from += curlen;
+ to += curlen;
+ }
+ return;
+ }
+#ifdef __BIG_ENDIAN
+ if (bdipsw == 0) {
+ /* BOOT Mode: Monitor ROM */
+ while (len) {
+ curlen = min_t(unsigned long, len,
+ 0x400000 - (from & (0x400000 - 1)));
+ memcpy(to, (void *)(from ^ 0x400000), curlen);
+ len -= curlen;
+ from += curlen;
+ to += curlen;
+ }
+ return;
+ }
+#endif
+ memcpy(to, (void *)from, len);
+}
+
+static void rbtx4939_flash_map_init(struct map_info *map)
+{
+ map->read = rbtx4939_flash_read16;
+ map->write = rbtx4939_flash_write16;
+ map->copy_from = rbtx4939_flash_copy_from;
+}
+
+static void __init rbtx4939_mtd_init(void)
+{
+ static struct {
+ struct platform_device dev;
+ struct resource res;
+ struct rbtx4939_flash_data data;
+ } pdevs[4];
+ int i;
+ static char names[4][8];
+ static struct mtd_partition parts[4];
+ struct rbtx4939_flash_data *boot_pdata = &pdevs[0].data;
+ u8 bdipsw = readb(rbtx4939_bdipsw_addr) & 0x0f;
+
+ if (bdipsw & 8) {
+ /* BOOT Mode: USER ROM1 / USER ROM2 */
+ boot_pdata->nr_parts = 4;
+ for (i = 0; i < boot_pdata->nr_parts; i++) {
+ sprintf(names[i], "img%d", 4 - i);
+ parts[i].name = names[i];
+ parts[i].size = 0x400000;
+ parts[i].offset = MTDPART_OFS_NXTBLK;
+ }
+ } else if (bdipsw == 0) {
+ /* BOOT Mode: Monitor ROM */
+ boot_pdata->nr_parts = 2;
+ strcpy(names[0], "big");
+ strcpy(names[1], "little");
+ for (i = 0; i < boot_pdata->nr_parts; i++) {
+ parts[i].name = names[i];
+ parts[i].size = 0x400000;
+ parts[i].offset = MTDPART_OFS_NXTBLK;
+ }
+ } else {
+ /* BOOT Mode: ROM Emulator */
+ boot_pdata->nr_parts = 2;
+ parts[0].name = "boot";
+ parts[0].offset = 0xc00000;
+ parts[0].size = 0x400000;
+ parts[1].name = "user";
+ parts[1].offset = 0;
+ parts[1].size = 0xc00000;
+ }
+ boot_pdata->parts = parts;
+ boot_pdata->map_init = rbtx4939_flash_map_init;
+
+ for (i = 0; i < ARRAY_SIZE(pdevs); i++) {
+ struct resource *r = &pdevs[i].res;
+ struct platform_device *dev = &pdevs[i].dev;
+
+ r->start = 0x1f000000 - i * 0x1000000;
+ r->end = r->start + 0x1000000 - 1;
+ r->flags = IORESOURCE_MEM;
+ pdevs[i].data.width = 2;
+ dev->num_resources = 1;
+ dev->resource = r;
+ dev->id = i;
+ dev->name = "rbtx4939-flash";
+ dev->dev.platform_data = &pdevs[i].data;
+ platform_device_register(dev);
+ }
+}
+#else
+static void __init rbtx4939_mtd_init(void)
+{
+}
+#endif
+
+static void __init rbtx4939_arch_init(void)
+{
+ rbtx4939_pci_setup();
+}
+
+static void __init rbtx4939_device_init(void)
+{
+ unsigned long smc_addr = RBTX4939_ETHER_ADDR - IO_BASE;
+ struct resource smc_res[] = {
+ {
+ .start = smc_addr,
+ .end = smc_addr + 0x10 - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = RBTX4939_IRQ_ETHER,
+ /* override default irq flag defined in smc91x.h */
+ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
+ },
+ };
+ struct smc91x_platdata smc_pdata = {
+ .flags = SMC91X_USE_16BIT,
+ };
+ struct platform_device *pdev;
+#if IS_ENABLED(CONFIG_TC35815)
+ int i, j;
+ unsigned char ethaddr[2][6];
+ u8 bdipsw = readb(rbtx4939_bdipsw_addr) & 0x0f;
+
+ for (i = 0; i < 2; i++) {
+ unsigned long area = CKSEG1 + 0x1fff0000 + (i * 0x10);
+ if (bdipsw == 0)
+ memcpy(ethaddr[i], (void *)area, 6);
+ else {
+ u16 buf[3];
+ if (bdipsw & 8)
+ area -= 0x03000000;
+ else
+ area -= 0x01000000;
+ for (j = 0; j < 3; j++)
+ buf[j] = le16_to_cpup((u16 *)(area + j * 2));
+ memcpy(ethaddr[i], buf, 6);
+ }
+ }
+ tx4939_ethaddr_init(ethaddr[0], ethaddr[1]);
+#endif
+ pdev = platform_device_alloc("smc91x", -1);
+ if (!pdev ||
+ platform_device_add_resources(pdev, smc_res, ARRAY_SIZE(smc_res)) ||
+ platform_device_add_data(pdev, &smc_pdata, sizeof(smc_pdata)) ||
+ platform_device_add(pdev))
+ platform_device_put(pdev);
+ rbtx4939_mtd_init();
+ /* TC58DVM82A1FT: tDH=10ns, tWP=tRP=tREADID=35ns */
+ tx4939_ndfmc_init(10, 35,
+ (1 << 1) | (1 << 2),
+ (1 << 2)); /* ch1:8bit, ch2:16bit */
+ rbtx4939_led_setup();
+ tx4939_wdt_init();
+ tx4939_ata_init();
+ tx4939_rtc_init();
+ tx4939_dmac_init(0, 2);
+ tx4939_aclc_init();
+ platform_device_register_simple("txx9aclc-generic", -1, NULL, 0);
+ tx4939_sramc_init();
+ tx4939_rng_init();
+}
+
+static void __init rbtx4939_setup(void)
+{
+ int i;
+
+ rbtx4939_ebusc_setup();
+ /* always enable ATA0 */
+ txx9_set64(&tx4939_ccfgptr->pcfg, TX4939_PCFG_ATA0MODE);
+ if (txx9_master_clock == 0)
+ txx9_master_clock = 20000000;
+ tx4939_setup();
+ rbtx4939_update_ioc_pen();
+#ifdef HAVE_RBTX4939_IOSWAB
+ ioswabw = rbtx4939_ioswabw;
+ __mem_ioswabw = rbtx4939_mem_ioswabw;
+#endif
+
+ _machine_restart = rbtx4939_machine_restart;
+
+ txx9_7segled_init(RBTX4939_MAX_7SEGLEDS, rbtx4939_7segled_putc);
+ for (i = 0; i < RBTX4939_MAX_7SEGLEDS; i++)
+ txx9_7segled_putc(i, '-');
+ pr_info("RBTX4939 (Rev %02x) --- FPGA(Rev %02x) DIPSW:%02x,%02x\n",
+ readb(rbtx4939_board_rev_addr), readb(rbtx4939_ioc_rev_addr),
+ readb(rbtx4939_udipsw_addr), readb(rbtx4939_bdipsw_addr));
+
+#ifdef CONFIG_PCI
+ txx9_alloc_pci_controller(&txx9_primary_pcic, 0, 0, 0, 0);
+ txx9_board_pcibios_setup = tx4927_pcibios_setup;
+#else
+ set_io_port_base(RBTX4939_ETHER_BASE);
+#endif
+
+ tx4939_sio_init(TX4939_SCLK0(txx9_master_clock), 0);
+}
+
+struct txx9_board_vec rbtx4939_vec __initdata = {
+ .system = "Toshiba RBTX4939",
+ .prom_init = rbtx4939_prom_init,
+ .mem_setup = rbtx4939_setup,
+ .irq_setup = rbtx4939_irq_setup,
+ .time_init = rbtx4939_time_init,
+ .device_init = rbtx4939_device_init,
+ .arch_init = rbtx4939_arch_init,
+#ifdef CONFIG_PCI
+ .pci_map_irq = tx4939_pci_map_irq,
+#endif
+};
diff --git a/arch/mips/vdso/.gitignore b/arch/mips/vdso/.gitignore
new file mode 100644
index 000000000..1f43f6dd8
--- /dev/null
+++ b/arch/mips/vdso/.gitignore
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+*.so*
+vdso-*image.c
+genvdso
+vdso*.lds
diff --git a/arch/mips/vdso/Kconfig b/arch/mips/vdso/Kconfig
new file mode 100644
index 000000000..7aec72139
--- /dev/null
+++ b/arch/mips/vdso/Kconfig
@@ -0,0 +1,18 @@
+# For the pre-R6 code in arch/mips/vdso/vdso.h for locating
+# the base address of VDSO, the linker will emit a R_MIPS_PC32
+# relocation in binutils > 2.25 but it will fail with older versions
+# because that relocation is not supported for that symbol. As a result
+# of which we are forced to disable the VDSO symbols when building
+# with < 2.25 binutils on pre-R6 kernels. For more references on why we
+# can't use other methods to get the base address of VDSO please refer to
+# the comments on that file.
+#
+# GCC (at least up to version 9.2) appears to emit function calls that make use
+# of the GOT when targeting microMIPS, which we can't use in the VDSO due to
+# the lack of relocations. As such, we disable the VDSO for microMIPS builds.
+
+config MIPS_LD_CAN_LINK_VDSO
+ def_bool LD_VERSION >= 225000000 || LD_IS_LLD
+
+config MIPS_DISABLE_VDSO
+ def_bool CPU_MICROMIPS || (!CPU_MIPSR6 && !MIPS_LD_CAN_LINK_VDSO)
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
new file mode 100644
index 000000000..2131d3fd7
--- /dev/null
+++ b/arch/mips/vdso/Makefile
@@ -0,0 +1,208 @@
+# SPDX-License-Identifier: GPL-2.0
+# Objects to go into the VDSO.
+
+# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
+# the inclusion of generic Makefile.
+ARCH_REL_TYPE_ABS := R_MIPS_JUMP_SLOT|R_MIPS_GLOB_DAT
+include $(srctree)/lib/vdso/Makefile
+
+obj-vdso-y := elf.o vgettimeofday.o sigreturn.o
+
+# Common compiler flags between ABIs.
+ccflags-vdso := \
+ $(filter -I%,$(KBUILD_CFLAGS)) \
+ $(filter -E%,$(KBUILD_CFLAGS)) \
+ $(filter -mmicromips,$(KBUILD_CFLAGS)) \
+ $(filter -march=%,$(KBUILD_CFLAGS)) \
+ $(filter -m%-float,$(KBUILD_CFLAGS)) \
+ $(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \
+ $(CLANG_FLAGS) \
+ -D__VDSO__
+
+ifndef CONFIG_64BIT
+ccflags-vdso += -DBUILD_VDSO32
+endif
+
+#
+# The -fno-jump-tables flag only prevents the compiler from generating
+# jump tables but does not prevent the compiler from emitting absolute
+# offsets.
+cflags-vdso := $(ccflags-vdso) \
+ $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
+ -O3 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
+ -mrelax-pic-calls $(call cc-option, -mexplicit-relocs) \
+ -fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
+ $(call cc-option, -fno-asynchronous-unwind-tables)
+aflags-vdso := $(ccflags-vdso) \
+ -D__ASSEMBLY__ -Wa,-gdwarf-2
+
+ifneq ($(c-gettimeofday-y),)
+CFLAGS_vgettimeofday.o = -include $(c-gettimeofday-y)
+
+# config-n32-o32-env.c prepares the environment to build a 32bit vDSO
+# library on a 64bit kernel.
+# Note: Needs to be included before than the generic library.
+CFLAGS_vgettimeofday-o32.o = -include $(srctree)/$(src)/config-n32-o32-env.c -include $(c-gettimeofday-y)
+CFLAGS_vgettimeofday-n32.o = -include $(srctree)/$(src)/config-n32-o32-env.c -include $(c-gettimeofday-y)
+endif
+
+CFLAGS_REMOVE_vgettimeofday.o = -pg
+
+ifdef CONFIG_MIPS_DISABLE_VDSO
+ ifndef CONFIG_MIPS_LD_CAN_LINK_VDSO
+ $(warning MIPS VDSO requires binutils >= 2.25)
+ endif
+ obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y))
+endif
+
+# VDSO linker flags.
+ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
+ $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \
+ -G 0 --eh-frame-hdr --hash-style=sysv --build-id=sha1 -T
+
+CFLAGS_REMOVE_vdso.o = -pg
+
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
+KCOV_INSTRUMENT := n
+
+# Check that we don't have PIC 'jalr t9' calls left
+quiet_cmd_vdso_mips_check = VDSOCHK $@
+ cmd_vdso_mips_check = if $(OBJDUMP) --disassemble $@ | egrep -h "jalr.*t9" > /dev/null; \
+ then (echo >&2 "$@: PIC 'jalr t9' calls are not supported"; \
+ rm -f $@; /bin/false); fi
+
+#
+# Shared build commands.
+#
+
+quiet_cmd_vdsold_and_vdso_check = LD $@
+ cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check); $(cmd_vdso_mips_check)
+
+quiet_cmd_vdsoas_o_S = AS $@
+ cmd_vdsoas_o_S = $(CC) $(a_flags) -c -o $@ $<
+
+# Strip rule for the raw .so files
+$(obj)/%.so.raw: OBJCOPYFLAGS := -S
+$(obj)/%.so.raw: $(obj)/%.so.dbg.raw FORCE
+ $(call if_changed,objcopy)
+
+hostprogs := genvdso
+
+quiet_cmd_genvdso = GENVDSO $@
+define cmd_genvdso
+ $(foreach file,$(filter %.raw,$^),cp $(file) $(file:%.raw=%) &&) \
+ $(obj)/genvdso $(<:%.raw=%) $(<:%.dbg.raw=%) $@ $(VDSO_NAME)
+endef
+
+#
+# Build native VDSO.
+#
+
+native-abi := $(filter -mabi=%,$(KBUILD_CFLAGS))
+
+targets += $(obj-vdso-y)
+targets += vdso.lds
+targets += vdso.so.dbg.raw vdso.so.raw
+targets += vdso.so.dbg vdso.so
+targets += vdso-image.c
+
+obj-vdso := $(obj-vdso-y:%.o=$(obj)/%.o)
+
+$(obj-vdso): KBUILD_CFLAGS := $(cflags-vdso) $(native-abi)
+$(obj-vdso): KBUILD_AFLAGS := $(aflags-vdso) $(native-abi)
+
+$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) $(native-abi)
+
+$(obj)/vdso.so.dbg.raw: $(obj)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,vdsold_and_vdso_check)
+
+$(obj)/vdso-image.c: $(obj)/vdso.so.dbg.raw $(obj)/vdso.so.raw \
+ $(obj)/genvdso FORCE
+ $(call if_changed,genvdso)
+
+obj-y += vdso-image.o
+
+#
+# Build O32 VDSO.
+#
+
+# Define these outside the ifdef to ensure they are picked up by clean.
+targets += $(obj-vdso-y:%.o=%-o32.o)
+targets += vdso-o32.lds
+targets += vdso-o32.so.dbg.raw vdso-o32.so.raw
+targets += vdso-o32.so.dbg vdso-o32.so
+targets += vdso-o32-image.c
+
+ifdef CONFIG_MIPS32_O32
+
+obj-vdso-o32 := $(obj-vdso-y:%.o=$(obj)/%-o32.o)
+
+$(obj-vdso-o32): KBUILD_CFLAGS := $(cflags-vdso) -mabi=32
+$(obj-vdso-o32): KBUILD_AFLAGS := $(aflags-vdso) -mabi=32
+
+$(obj)/%-o32.o: $(src)/%.S FORCE
+ $(call if_changed_dep,vdsoas_o_S)
+
+$(obj)/%-o32.o: $(src)/%.c FORCE
+ $(call cmd,force_checksrc)
+ $(call if_changed_rule,cc_o_c)
+
+$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
+$(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
+ $(call if_changed_dep,cpp_lds_S)
+
+$(obj)/vdso-o32.so.dbg.raw: $(obj)/vdso-o32.lds $(obj-vdso-o32) FORCE
+ $(call if_changed,vdsold_and_vdso_check)
+
+$(obj)/vdso-o32-image.c: VDSO_NAME := o32
+$(obj)/vdso-o32-image.c: $(obj)/vdso-o32.so.dbg.raw $(obj)/vdso-o32.so.raw \
+ $(obj)/genvdso FORCE
+ $(call if_changed,genvdso)
+
+obj-y += vdso-o32-image.o
+
+endif
+
+#
+# Build N32 VDSO.
+#
+
+targets += $(obj-vdso-y:%.o=%-n32.o)
+targets += vdso-n32.lds
+targets += vdso-n32.so.dbg.raw vdso-n32.so.raw
+targets += vdso-n32.so.dbg vdso-n32.so
+targets += vdso-n32-image.c
+
+ifdef CONFIG_MIPS32_N32
+
+obj-vdso-n32 := $(obj-vdso-y:%.o=$(obj)/%-n32.o)
+
+$(obj-vdso-n32): KBUILD_CFLAGS := $(cflags-vdso) -mabi=n32
+$(obj-vdso-n32): KBUILD_AFLAGS := $(aflags-vdso) -mabi=n32
+
+$(obj)/%-n32.o: $(src)/%.S FORCE
+ $(call if_changed_dep,vdsoas_o_S)
+
+$(obj)/%-n32.o: $(src)/%.c FORCE
+ $(call cmd,force_checksrc)
+ $(call if_changed_rule,cc_o_c)
+
+$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
+$(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
+ $(call if_changed_dep,cpp_lds_S)
+
+$(obj)/vdso-n32.so.dbg.raw: $(obj)/vdso-n32.lds $(obj-vdso-n32) FORCE
+ $(call if_changed,vdsold_and_vdso_check)
+
+$(obj)/vdso-n32-image.c: VDSO_NAME := n32
+$(obj)/vdso-n32-image.c: $(obj)/vdso-n32.so.dbg.raw $(obj)/vdso-n32.so.raw \
+ $(obj)/genvdso FORCE
+ $(call if_changed,genvdso)
+
+obj-y += vdso-n32-image.o
+
+endif
+
+# FIXME: Need install rule for debug.
+# Needs to deal with dependency for generation of dbg by cmd_genvdso...
diff --git a/arch/mips/vdso/config-n32-o32-env.c b/arch/mips/vdso/config-n32-o32-env.c
new file mode 100644
index 000000000..0011a632a
--- /dev/null
+++ b/arch/mips/vdso/config-n32-o32-env.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Configuration file for O32 and N32 binaries.
+ * Note: To be included before lib/vdso/gettimeofday.c
+ */
+#if defined(CONFIG_MIPS32_O32) || defined(CONFIG_MIPS32_N32)
+/*
+ * In case of a 32 bit VDSO for a 64 bit kernel fake a 32 bit kernel
+ * configuration.
+ */
+#undef CONFIG_64BIT
+
+#define BUILD_VDSO32
+#define CONFIG_32BIT 1
+#define CONFIG_GENERIC_ATOMIC64 1
+#define BUILD_VDSO32_64
+
+#endif
+
diff --git a/arch/mips/vdso/elf.S b/arch/mips/vdso/elf.S
new file mode 100644
index 000000000..a25cb147f
--- /dev/null
+++ b/arch/mips/vdso/elf.S
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#include <asm/vdso/vdso.h>
+
+#include <asm/isa-rev.h>
+
+#include <linux/elfnote.h>
+#include <linux/version.h>
+
+ELFNOTE_START(Linux, 0, "a")
+ .long LINUX_VERSION_CODE
+ELFNOTE_END
+
+/*
+ * The .MIPS.abiflags section must be defined with the FP ABI flags set
+ * to 'any' to be able to link with both old and new libraries.
+ * Newer toolchains are capable of automatically generating this, but we want
+ * to work with older toolchains as well. Therefore, we define the contents of
+ * this section here (under different names), and then genvdso will patch
+ * it to have the correct name and type.
+ *
+ * We base the .MIPS.abiflags section on preprocessor definitions rather than
+ * CONFIG_* because we need to match the particular ABI we are building the
+ * VDSO for.
+ *
+ * See https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking
+ * for the .MIPS.abiflags section description.
+ */
+
+ .section .mips_abiflags, "a"
+ .align 3
+__mips_abiflags:
+ .hword 0 /* version */
+ .byte __mips /* isa_level */
+
+ /* isa_rev */
+ .byte MIPS_ISA_REV
+
+ /* gpr_size */
+#ifdef __mips64
+ .byte 2 /* AFL_REG_64 */
+#else
+ .byte 1 /* AFL_REG_32 */
+#endif
+
+ /* cpr1_size */
+#if (MIPS_ISA_REV >= 6) || defined(__mips64)
+ .byte 2 /* AFL_REG_64 */
+#else
+ .byte 1 /* AFL_REG_32 */
+#endif
+
+ .byte 0 /* cpr2_size (AFL_REG_NONE) */
+ .byte 0 /* fp_abi (Val_GNU_MIPS_ABI_FP_ANY) */
+ .word 0 /* isa_ext */
+ .word 0 /* ases */
+ .word 0 /* flags1 */
+ .word 0 /* flags2 */
diff --git a/arch/mips/vdso/genvdso.c b/arch/mips/vdso/genvdso.c
new file mode 100644
index 000000000..abb06ae04
--- /dev/null
+++ b/arch/mips/vdso/genvdso.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+/*
+ * This tool is used to generate the real VDSO images from the raw image. It
+ * first patches up the MIPS ABI flags and GNU attributes sections defined in
+ * elf.S to have the correct name and type. It then generates a C source file
+ * to be compiled into the kernel containing the VDSO image data and a
+ * mips_vdso_image struct for it, including symbol offsets extracted from the
+ * image.
+ *
+ * We need to be passed both a stripped and unstripped VDSO image. The stripped
+ * image is compiled into the kernel, but we must also patch up the unstripped
+ * image's ABI flags sections so that it can be installed and used for
+ * debugging.
+ */
+
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <byteswap.h>
+#include <elf.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+/* Define these in case the system elf.h is not new enough to have them. */
+#ifndef SHT_GNU_ATTRIBUTES
+# define SHT_GNU_ATTRIBUTES 0x6ffffff5
+#endif
+#ifndef SHT_MIPS_ABIFLAGS
+# define SHT_MIPS_ABIFLAGS 0x7000002a
+#endif
+
+enum {
+ ABI_O32 = (1 << 0),
+ ABI_N32 = (1 << 1),
+ ABI_N64 = (1 << 2),
+
+ ABI_ALL = ABI_O32 | ABI_N32 | ABI_N64,
+};
+
+/* Symbols the kernel requires offsets for. */
+static struct {
+ const char *name;
+ const char *offset_name;
+ unsigned int abis;
+} vdso_symbols[] = {
+ { "__vdso_sigreturn", "off_sigreturn", ABI_O32 },
+ { "__vdso_rt_sigreturn", "off_rt_sigreturn", ABI_ALL },
+ {}
+};
+
+static const char *program_name;
+static const char *vdso_name;
+static unsigned char elf_class;
+static unsigned int elf_abi;
+static bool need_swap;
+static FILE *out_file;
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+# define HOST_ORDER ELFDATA2LSB
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+# define HOST_ORDER ELFDATA2MSB
+#endif
+
+#define BUILD_SWAP(bits) \
+ static uint##bits##_t swap_uint##bits(uint##bits##_t val) \
+ { \
+ return need_swap ? bswap_##bits(val) : val; \
+ }
+
+BUILD_SWAP(16)
+BUILD_SWAP(32)
+BUILD_SWAP(64)
+
+#define __FUNC(name, bits) name##bits
+#define _FUNC(name, bits) __FUNC(name, bits)
+#define FUNC(name) _FUNC(name, ELF_BITS)
+
+#define __ELF(x, bits) Elf##bits##_##x
+#define _ELF(x, bits) __ELF(x, bits)
+#define ELF(x) _ELF(x, ELF_BITS)
+
+/*
+ * Include genvdso.h twice with ELF_BITS defined differently to get functions
+ * for both ELF32 and ELF64.
+ */
+
+#define ELF_BITS 64
+#include "genvdso.h"
+#undef ELF_BITS
+
+#define ELF_BITS 32
+#include "genvdso.h"
+#undef ELF_BITS
+
+static void *map_vdso(const char *path, size_t *_size)
+{
+ int fd;
+ struct stat stat;
+ void *addr;
+ const Elf32_Ehdr *ehdr;
+
+ fd = open(path, O_RDWR);
+ if (fd < 0) {
+ fprintf(stderr, "%s: Failed to open '%s': %s\n", program_name,
+ path, strerror(errno));
+ return NULL;
+ }
+
+ if (fstat(fd, &stat) != 0) {
+ fprintf(stderr, "%s: Failed to stat '%s': %s\n", program_name,
+ path, strerror(errno));
+ close(fd);
+ return NULL;
+ }
+
+ addr = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
+ 0);
+ if (addr == MAP_FAILED) {
+ fprintf(stderr, "%s: Failed to map '%s': %s\n", program_name,
+ path, strerror(errno));
+ close(fd);
+ return NULL;
+ }
+
+ /* ELF32/64 header formats are the same for the bits we're checking. */
+ ehdr = addr;
+
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) {
+ fprintf(stderr, "%s: '%s' is not an ELF file\n", program_name,
+ path);
+ close(fd);
+ return NULL;
+ }
+
+ elf_class = ehdr->e_ident[EI_CLASS];
+ switch (elf_class) {
+ case ELFCLASS32:
+ case ELFCLASS64:
+ break;
+ default:
+ fprintf(stderr, "%s: '%s' has invalid ELF class\n",
+ program_name, path);
+ close(fd);
+ return NULL;
+ }
+
+ switch (ehdr->e_ident[EI_DATA]) {
+ case ELFDATA2LSB:
+ case ELFDATA2MSB:
+ need_swap = ehdr->e_ident[EI_DATA] != HOST_ORDER;
+ break;
+ default:
+ fprintf(stderr, "%s: '%s' has invalid ELF data order\n",
+ program_name, path);
+ close(fd);
+ return NULL;
+ }
+
+ if (swap_uint16(ehdr->e_machine) != EM_MIPS) {
+ fprintf(stderr,
+ "%s: '%s' has invalid ELF machine (expected EM_MIPS)\n",
+ program_name, path);
+ close(fd);
+ return NULL;
+ } else if (swap_uint16(ehdr->e_type) != ET_DYN) {
+ fprintf(stderr,
+ "%s: '%s' has invalid ELF type (expected ET_DYN)\n",
+ program_name, path);
+ close(fd);
+ return NULL;
+ }
+
+ *_size = stat.st_size;
+ close(fd);
+ return addr;
+}
+
+static bool patch_vdso(const char *path, void *vdso)
+{
+ if (elf_class == ELFCLASS64)
+ return patch_vdso64(path, vdso);
+ else
+ return patch_vdso32(path, vdso);
+}
+
+static bool get_symbols(const char *path, void *vdso)
+{
+ if (elf_class == ELFCLASS64)
+ return get_symbols64(path, vdso);
+ else
+ return get_symbols32(path, vdso);
+}
+
+int main(int argc, char **argv)
+{
+ const char *dbg_vdso_path, *vdso_path, *out_path;
+ void *dbg_vdso, *vdso;
+ size_t dbg_vdso_size, vdso_size, i;
+
+ program_name = argv[0];
+
+ if (argc < 4 || argc > 5) {
+ fprintf(stderr,
+ "Usage: %s <debug VDSO> <stripped VDSO> <output file> [<name>]\n",
+ program_name);
+ return EXIT_FAILURE;
+ }
+
+ dbg_vdso_path = argv[1];
+ vdso_path = argv[2];
+ out_path = argv[3];
+ vdso_name = (argc > 4) ? argv[4] : "";
+
+ dbg_vdso = map_vdso(dbg_vdso_path, &dbg_vdso_size);
+ if (!dbg_vdso)
+ return EXIT_FAILURE;
+
+ vdso = map_vdso(vdso_path, &vdso_size);
+ if (!vdso)
+ return EXIT_FAILURE;
+
+ /* Patch both the VDSOs' ABI flags sections. */
+ if (!patch_vdso(dbg_vdso_path, dbg_vdso))
+ return EXIT_FAILURE;
+ if (!patch_vdso(vdso_path, vdso))
+ return EXIT_FAILURE;
+
+ if (msync(dbg_vdso, dbg_vdso_size, MS_SYNC) != 0) {
+ fprintf(stderr, "%s: Failed to sync '%s': %s\n", program_name,
+ dbg_vdso_path, strerror(errno));
+ return EXIT_FAILURE;
+ } else if (msync(vdso, vdso_size, MS_SYNC) != 0) {
+ fprintf(stderr, "%s: Failed to sync '%s': %s\n", program_name,
+ vdso_path, strerror(errno));
+ return EXIT_FAILURE;
+ }
+
+ out_file = fopen(out_path, "w");
+ if (!out_file) {
+ fprintf(stderr, "%s: Failed to open '%s': %s\n", program_name,
+ out_path, strerror(errno));
+ return EXIT_FAILURE;
+ }
+
+ fprintf(out_file, "/* Automatically generated - do not edit */\n");
+ fprintf(out_file, "#include <linux/linkage.h>\n");
+ fprintf(out_file, "#include <linux/mm.h>\n");
+ fprintf(out_file, "#include <asm/vdso.h>\n");
+ fprintf(out_file, "static int vdso_mremap(\n");
+ fprintf(out_file, " const struct vm_special_mapping *sm,\n");
+ fprintf(out_file, " struct vm_area_struct *new_vma)\n");
+ fprintf(out_file, "{\n");
+ fprintf(out_file, " unsigned long new_size =\n");
+ fprintf(out_file, " new_vma->vm_end - new_vma->vm_start;\n");
+ fprintf(out_file, " if (vdso_image.size != new_size)\n");
+ fprintf(out_file, " return -EINVAL;\n");
+ fprintf(out_file, " current->mm->context.vdso =\n");
+ fprintf(out_file, " (void *)(new_vma->vm_start);\n");
+ fprintf(out_file, " return 0;\n");
+ fprintf(out_file, "}\n");
+
+ /* Write out the stripped VDSO data. */
+ fprintf(out_file,
+ "static unsigned char vdso_data[PAGE_ALIGN(%zu)] __page_aligned_data = {\n\t",
+ vdso_size);
+ for (i = 0; i < vdso_size; i++) {
+ if (!(i % 10))
+ fprintf(out_file, "\n\t");
+ fprintf(out_file, "0x%02x, ", ((unsigned char *)vdso)[i]);
+ }
+ fprintf(out_file, "\n};\n");
+
+ /* Preallocate a page array. */
+ fprintf(out_file,
+ "static struct page *vdso_pages[PAGE_ALIGN(%zu) / PAGE_SIZE];\n",
+ vdso_size);
+
+ fprintf(out_file, "struct mips_vdso_image vdso_image%s%s = {\n",
+ (vdso_name[0]) ? "_" : "", vdso_name);
+ fprintf(out_file, "\t.data = vdso_data,\n");
+ fprintf(out_file, "\t.size = PAGE_ALIGN(%zu),\n", vdso_size);
+ fprintf(out_file, "\t.mapping = {\n");
+ fprintf(out_file, "\t\t.name = \"[vdso]\",\n");
+ fprintf(out_file, "\t\t.pages = vdso_pages,\n");
+ fprintf(out_file, "\t\t.mremap = vdso_mremap,\n");
+ fprintf(out_file, "\t},\n");
+
+ /* Calculate and write symbol offsets to <output file> */
+ if (!get_symbols(dbg_vdso_path, dbg_vdso)) {
+ unlink(out_path);
+ fclose(out_file);
+ return EXIT_FAILURE;
+ }
+
+ fprintf(out_file, "};\n");
+ fclose(out_file);
+
+ return EXIT_SUCCESS;
+}
diff --git a/arch/mips/vdso/genvdso.h b/arch/mips/vdso/genvdso.h
new file mode 100644
index 000000000..9bfb87403
--- /dev/null
+++ b/arch/mips/vdso/genvdso.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+static inline bool FUNC(patch_vdso)(const char *path, void *vdso)
+{
+ const ELF(Ehdr) *ehdr = vdso;
+ void *shdrs;
+ ELF(Shdr) *shdr;
+ char *shstrtab, *name;
+ uint16_t sh_count, sh_entsize, i;
+
+ shdrs = vdso + FUNC(swap_uint)(ehdr->e_shoff);
+ sh_count = swap_uint16(ehdr->e_shnum);
+ sh_entsize = swap_uint16(ehdr->e_shentsize);
+
+ shdr = shdrs + (sh_entsize * swap_uint16(ehdr->e_shstrndx));
+ shstrtab = vdso + FUNC(swap_uint)(shdr->sh_offset);
+
+ for (i = 0; i < sh_count; i++) {
+ shdr = shdrs + (i * sh_entsize);
+ name = shstrtab + swap_uint32(shdr->sh_name);
+
+ /*
+ * Ensure there are no relocation sections - ld.so does not
+ * relocate the VDSO so if there are relocations things will
+ * break.
+ */
+ switch (swap_uint32(shdr->sh_type)) {
+ case SHT_REL:
+ case SHT_RELA:
+ fprintf(stderr,
+ "%s: '%s' contains relocation sections\n",
+ program_name, path);
+ return false;
+ }
+
+ /* Check for existing sections. */
+ if (strcmp(name, ".MIPS.abiflags") == 0) {
+ fprintf(stderr,
+ "%s: '%s' already contains a '.MIPS.abiflags' section\n",
+ program_name, path);
+ return false;
+ }
+
+ if (strcmp(name, ".mips_abiflags") == 0) {
+ strcpy(name, ".MIPS.abiflags");
+ shdr->sh_type = swap_uint32(SHT_MIPS_ABIFLAGS);
+ shdr->sh_entsize = shdr->sh_size;
+ }
+ }
+
+ return true;
+}
+
+static inline bool FUNC(get_symbols)(const char *path, void *vdso)
+{
+ const ELF(Ehdr) *ehdr = vdso;
+ void *shdrs, *symtab;
+ ELF(Shdr) *shdr;
+ const ELF(Sym) *sym;
+ char *strtab, *name;
+ uint16_t sh_count, sh_entsize, st_count, st_entsize, i, j;
+ uint64_t offset;
+ uint32_t flags;
+
+ shdrs = vdso + FUNC(swap_uint)(ehdr->e_shoff);
+ sh_count = swap_uint16(ehdr->e_shnum);
+ sh_entsize = swap_uint16(ehdr->e_shentsize);
+
+ for (i = 0; i < sh_count; i++) {
+ shdr = shdrs + (i * sh_entsize);
+
+ if (swap_uint32(shdr->sh_type) == SHT_SYMTAB)
+ break;
+ }
+
+ if (i == sh_count) {
+ fprintf(stderr, "%s: '%s' has no symbol table\n", program_name,
+ path);
+ return false;
+ }
+
+ /* Get flags */
+ flags = swap_uint32(ehdr->e_flags);
+ if (elf_class == ELFCLASS64)
+ elf_abi = ABI_N64;
+ else if (flags & EF_MIPS_ABI2)
+ elf_abi = ABI_N32;
+ else
+ elf_abi = ABI_O32;
+
+ /* Get symbol table. */
+ symtab = vdso + FUNC(swap_uint)(shdr->sh_offset);
+ st_entsize = FUNC(swap_uint)(shdr->sh_entsize);
+ st_count = FUNC(swap_uint)(shdr->sh_size) / st_entsize;
+
+ /* Get string table. */
+ shdr = shdrs + (swap_uint32(shdr->sh_link) * sh_entsize);
+ strtab = vdso + FUNC(swap_uint)(shdr->sh_offset);
+
+ /* Write offsets for symbols needed by the kernel. */
+ for (i = 0; vdso_symbols[i].name; i++) {
+ if (!(vdso_symbols[i].abis & elf_abi))
+ continue;
+
+ for (j = 0; j < st_count; j++) {
+ sym = symtab + (j * st_entsize);
+ name = strtab + swap_uint32(sym->st_name);
+
+ if (!strcmp(name, vdso_symbols[i].name)) {
+ offset = FUNC(swap_uint)(sym->st_value);
+
+ fprintf(out_file,
+ "\t.%s = 0x%" PRIx64 ",\n",
+ vdso_symbols[i].offset_name, offset);
+ break;
+ }
+ }
+
+ if (j == st_count) {
+ fprintf(stderr,
+ "%s: '%s' is missing required symbol '%s'\n",
+ program_name, path, vdso_symbols[i].name);
+ return false;
+ }
+ }
+
+ return true;
+}
diff --git a/arch/mips/vdso/sigreturn.S b/arch/mips/vdso/sigreturn.S
new file mode 100644
index 000000000..e5c0ab98a
--- /dev/null
+++ b/arch/mips/vdso/sigreturn.S
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#include <asm/vdso/vdso.h>
+
+#include <uapi/asm/unistd.h>
+
+#include <asm/regdef.h>
+#include <asm/asm.h>
+
+ .section .text
+ .cfi_sections .debug_frame
+
+LEAF(__vdso_rt_sigreturn)
+ .cfi_signal_frame
+
+ li v0, __NR_rt_sigreturn
+ syscall
+
+ END(__vdso_rt_sigreturn)
+
+#if _MIPS_SIM == _MIPS_SIM_ABI32
+
+LEAF(__vdso_sigreturn)
+ .cfi_signal_frame
+
+ li v0, __NR_sigreturn
+ syscall
+
+ END(__vdso_sigreturn)
+
+#endif
diff --git a/arch/mips/vdso/vdso.lds.S b/arch/mips/vdso/vdso.lds.S
new file mode 100644
index 000000000..d90b65724
--- /dev/null
+++ b/arch/mips/vdso/vdso.lds.S
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#include <asm/sgidefs.h>
+
+#if _MIPS_SIM == _MIPS_SIM_ABI64
+OUTPUT_FORMAT("elf64-tradlittlemips", "elf64-tradbigmips", "elf64-tradlittlemips")
+#elif _MIPS_SIM == _MIPS_SIM_NABI32
+OUTPUT_FORMAT("elf32-ntradlittlemips", "elf32-ntradbigmips", "elf32-ntradlittlemips")
+#else
+OUTPUT_FORMAT("elf32-tradlittlemips", "elf32-tradbigmips", "elf32-tradlittlemips")
+#endif
+
+OUTPUT_ARCH(mips)
+
+SECTIONS
+{
+ PROVIDE(_start = .);
+ . = SIZEOF_HEADERS;
+
+ /*
+ * In order to retain compatibility with older toolchains we provide the
+ * ABI flags section ourself. Newer assemblers will automatically
+ * generate .MIPS.abiflags sections so we discard such input sections,
+ * and then manually define our own section here. genvdso will patch
+ * this section to have the correct name/type.
+ */
+ .mips_abiflags : { *(.mips_abiflags) } :text :abiflags
+
+ .reginfo : { *(.reginfo) } :text :reginfo
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+
+ .text : { *(.text*) } :text
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .rodata : { *(.rodata*) } :text
+
+ _end = .;
+ PROVIDE(end = .);
+
+ /DISCARD/ : {
+ *(.MIPS.abiflags)
+ *(.gnu.attributes)
+ *(.note.GNU-stack)
+ *(.data .data.* .gnu.linkonce.d.* .sdata*)
+ *(.bss .sbss .dynbss .dynsbss)
+ }
+}
+
+PHDRS
+{
+ /*
+ * Provide a PT_MIPS_ABIFLAGS header to assign the ABI flags section
+ * to. We can specify the header type directly here so no modification
+ * is needed later on.
+ */
+ abiflags 0x70000003;
+
+ /*
+ * The ABI flags header must exist directly after the PT_INTERP header,
+ * so we must explicitly place the PT_MIPS_REGINFO header after it to
+ * stop the linker putting one in at the start.
+ */
+ reginfo 0x70000000;
+
+ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
+
+VERSION
+{
+ LINUX_2.6 {
+#ifndef CONFIG_MIPS_DISABLE_VDSO
+ global:
+ __vdso_clock_gettime;
+ __vdso_gettimeofday;
+ __vdso_clock_getres;
+#if _MIPS_SIM != _MIPS_SIM_ABI64
+ __vdso_clock_gettime64;
+#endif
+#endif
+ local: *;
+ };
+}
diff --git a/arch/mips/vdso/vgettimeofday.c b/arch/mips/vdso/vgettimeofday.c
new file mode 100644
index 000000000..6b83b6376
--- /dev/null
+++ b/arch/mips/vdso/vgettimeofday.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MIPS64 and compat userspace implementations of gettimeofday()
+ * and similar.
+ *
+ * Copyright (C) 2015 Imagination Technologies
+ * Copyright (C) 2018 ARM Limited
+ *
+ */
+#include <linux/time.h>
+#include <linux/types.h>
+
+#if _MIPS_SIM != _MIPS_SIM_ABI64
+int __vdso_clock_gettime(clockid_t clock,
+ struct old_timespec32 *ts)
+{
+ return __cvdso_clock_gettime32(clock, ts);
+}
+
+#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
+
+/*
+ * This is behind the ifdef so that we don't provide the symbol when there's no
+ * possibility of there being a usable clocksource, because there's nothing we
+ * can do without it. When libc fails the symbol lookup it should fall back on
+ * the standard syscall path.
+ */
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+{
+ return __cvdso_gettimeofday(tv, tz);
+}
+
+#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
+
+int __vdso_clock_getres(clockid_t clock_id,
+ struct old_timespec32 *res)
+{
+ return __cvdso_clock_getres_time32(clock_id, res);
+}
+
+int __vdso_clock_gettime64(clockid_t clock,
+ struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime(clock, ts);
+}
+
+#else
+
+int __vdso_clock_gettime(clockid_t clock,
+ struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime(clock, ts);
+}
+
+#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
+
+/*
+ * This is behind the ifdef so that we don't provide the symbol when there's no
+ * possibility of there being a usable clocksource, because there's nothing we
+ * can do without it. When libc fails the symbol lookup it should fall back on
+ * the standard syscall path.
+ */
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+{
+ return __cvdso_gettimeofday(tv, tz);
+}
+
+#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
+
+int __vdso_clock_getres(clockid_t clock_id,
+ struct __kernel_timespec *res)
+{
+ return __cvdso_clock_getres(clock_id, res);
+}
+
+#endif
diff --git a/arch/mips/vr41xx/Kconfig b/arch/mips/vr41xx/Kconfig
new file mode 100644
index 000000000..e0b651db3
--- /dev/null
+++ b/arch/mips/vr41xx/Kconfig
@@ -0,0 +1,104 @@
+# SPDX-License-Identifier: GPL-2.0
+choice
+ prompt "Machine type"
+ depends on MACH_VR41XX
+ default TANBAC_TB022X
+
+config CASIO_E55
+ bool "CASIO CASSIOPEIA E-10/15/55/65"
+ select CEVT_R4K
+ select CSRC_R4K
+ select DMA_NONCOHERENT
+ select IRQ_MIPS_CPU
+ select ISA
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+
+config IBM_WORKPAD
+ bool "IBM WorkPad z50"
+ select CEVT_R4K
+ select CSRC_R4K
+ select DMA_NONCOHERENT
+ select IRQ_MIPS_CPU
+ select ISA
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+
+config TANBAC_TB022X
+ bool "TANBAC VR4131 multichip module and TANBAC VR4131DIMM"
+ select CEVT_R4K
+ select CSRC_R4K
+ select DMA_NONCOHERENT
+ select IRQ_MIPS_CPU
+ select HAVE_PCI
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ help
+ The TANBAC VR4131 multichip module(TB0225) and
+ the TANBAC VR4131DIMM(TB0229) are MIPS-based platforms
+ manufactured by TANBAC.
+ Please refer to <http://www.tanbac.co.jp/>
+ about VR4131 multichip module and VR4131DIMM.
+
+config VICTOR_MPC30X
+ bool "Victor MP-C303/304"
+ select CEVT_R4K
+ select CSRC_R4K
+ select DMA_NONCOHERENT
+ select IRQ_MIPS_CPU
+ select HAVE_PCI
+ select PCI_VR41XX
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+
+config ZAO_CAPCELLA
+ bool "ZAO Networks Capcella"
+ select CEVT_R4K
+ select CSRC_R4K
+ select DMA_NONCOHERENT
+ select IRQ_MIPS_CPU
+ select HAVE_PCI
+ select PCI_VR41XX
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+
+endchoice
+
+choice
+ prompt "Base board type"
+ depends on TANBAC_TB022X
+ default TANBAC_TB0287
+
+config TANBAC_TB0219
+ bool "TANBAC DIMM Evaluation Kit(TB0219)"
+ select GPIO_VR41XX
+ select PCI_VR41XX
+ help
+ The TANBAC DIMM Evaluation Kit(TB0219) is a MIPS-based platform
+ manufactured by TANBAC.
+ Please refer to <http://www.tanbac.co.jp/> about DIMM Evaluation Kit.
+
+config TANBAC_TB0226
+ bool "TANBAC Mbase(TB0226)"
+ select GPIO_VR41XX
+ select PCI_VR41XX
+ help
+ The TANBAC Mbase(TB0226) is a MIPS-based platform
+ manufactured by TANBAC.
+ Please refer to <http://www.tanbac.co.jp/> about Mbase.
+
+config TANBAC_TB0287
+ bool "TANBAC Mini-ITX DIMM base(TB0287)"
+ select PCI_VR41XX
+ help
+ The TANBAC Mini-ITX DIMM base(TB0287) is a MIPS-based platform
+ manufactured by TANBAC.
+ Please refer to <http://www.tanbac.co.jp/> about Mini-ITX DIMM base.
+
+endchoice
+
+config PCI_VR41XX
+ bool "Add PCI control unit support of NEC VR4100 series"
+ depends on MACH_VR41XX && HAVE_PCI
+ default y
+ select PCI
diff --git a/arch/mips/vr41xx/Makefile b/arch/mips/vr41xx/Makefile
new file mode 100644
index 000000000..765020d5e
--- /dev/null
+++ b/arch/mips/vr41xx/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+obj-$(CONFIG_MACH_VR41XX) += common/
+obj-$(CONFIG_CASIO_E55) += casio-e55/
+obj-$(CONFIG_IBM_WORKPAD) += ibm-workpad/
diff --git a/arch/mips/vr41xx/Platform b/arch/mips/vr41xx/Platform
new file mode 100644
index 000000000..3f593a3e5
--- /dev/null
+++ b/arch/mips/vr41xx/Platform
@@ -0,0 +1,29 @@
+#
+# NEC VR4100 series based machines
+#
+cflags-$(CONFIG_MACH_VR41XX) += -I$(srctree)/arch/mips/include/asm/mach-vr41xx
+
+#
+# CASIO CASSIPEIA E-55/65 (VR4111)
+#
+load-$(CONFIG_CASIO_E55) += 0xffffffff80004000
+
+#
+# IBM WorkPad z50 (VR4121)
+#
+load-$(CONFIG_IBM_WORKPAD) += 0xffffffff80004000
+
+#
+# TANBAC VR4131 multichip module(TB0225) and TANBAC VR4131DIMM(TB0229) (VR4131)
+#
+load-$(CONFIG_TANBAC_TB022X) += 0xffffffff80000000
+
+#
+# Victor MP-C303/304 (VR4122)
+#
+load-$(CONFIG_VICTOR_MPC30X) += 0xffffffff80001000
+
+#
+# ZAO Networks Capcella (VR4131)
+#
+load-$(CONFIG_ZAO_CAPCELLA) += 0xffffffff80000000
diff --git a/arch/mips/vr41xx/casio-e55/Makefile b/arch/mips/vr41xx/casio-e55/Makefile
new file mode 100644
index 000000000..65d30d7c8
--- /dev/null
+++ b/arch/mips/vr41xx/casio-e55/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the CASIO CASSIOPEIA E-55/65 specific parts of the kernel
+#
+
+obj-y += setup.o
diff --git a/arch/mips/vr41xx/casio-e55/setup.c b/arch/mips/vr41xx/casio-e55/setup.c
new file mode 100644
index 000000000..25ea7f19f
--- /dev/null
+++ b/arch/mips/vr41xx/casio-e55/setup.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * setup.c, Setup for the CASIO CASSIOPEIA E-11/15/55/65.
+ *
+ * Copyright (C) 2002-2006 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/init.h>
+#include <linux/ioport.h>
+
+#include <asm/io.h>
+
+#define E55_ISA_IO_BASE 0x1400c000
+#define E55_ISA_IO_SIZE 0x03ff4000
+#define E55_ISA_IO_START 0
+#define E55_ISA_IO_END (E55_ISA_IO_SIZE - 1)
+#define E55_IO_PORT_BASE KSEG1ADDR(E55_ISA_IO_BASE)
+
+static int __init casio_e55_setup(void)
+{
+ set_io_port_base(E55_IO_PORT_BASE);
+ ioport_resource.start = E55_ISA_IO_START;
+ ioport_resource.end = E55_ISA_IO_END;
+
+ return 0;
+}
+
+arch_initcall(casio_e55_setup);
diff --git a/arch/mips/vr41xx/common/Makefile b/arch/mips/vr41xx/common/Makefile
new file mode 100644
index 000000000..57d3eee29
--- /dev/null
+++ b/arch/mips/vr41xx/common/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for common code of the NEC VR4100 series.
+#
+
+obj-y += bcu.o cmu.o giu.o icu.o init.o irq.o pmu.o rtc.o siu.o type.o
diff --git a/arch/mips/vr41xx/common/bcu.c b/arch/mips/vr41xx/common/bcu.c
new file mode 100644
index 000000000..0677d1779
--- /dev/null
+++ b/arch/mips/vr41xx/common/bcu.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * bcu.c, Bus Control Unit routines for the NEC VR4100 series.
+ *
+ * Copyright (C) 2002 MontaVista Software Inc.
+ * Author: Yoichi Yuasa <source@mvista.com>
+ * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+/*
+ * Changes:
+ * MontaVista Software Inc. <source@mvista.com>
+ * - New creation, NEC VR4122 and VR4131 are supported.
+ * - Added support for NEC VR4111 and VR4121.
+ *
+ * Yoichi Yuasa <yuasa@linux-mips.org>
+ * - Added support for NEC VR4133.
+ */
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+
+#include <asm/cpu-type.h>
+#include <asm/cpu.h>
+#include <asm/io.h>
+
+#define CLKSPEEDREG_TYPE1 (void __iomem *)KSEG1ADDR(0x0b000014)
+#define CLKSPEEDREG_TYPE2 (void __iomem *)KSEG1ADDR(0x0f000014)
+ #define CLKSP(x) ((x) & 0x001f)
+ #define CLKSP_VR4133(x) ((x) & 0x0007)
+
+ #define DIV2B 0x8000
+ #define DIV3B 0x4000
+ #define DIV4B 0x2000
+
+ #define DIVT(x) (((x) & 0xf000) >> 12)
+ #define DIVVT(x) (((x) & 0x0f00) >> 8)
+
+ #define TDIVMODE(x) (2 << (((x) & 0x1000) >> 12))
+ #define VTDIVMODE(x) (((x) & 0x0700) >> 8)
+
+static unsigned long vr41xx_vtclock;
+static unsigned long vr41xx_tclock;
+
+unsigned long vr41xx_get_vtclock_frequency(void)
+{
+ return vr41xx_vtclock;
+}
+
+EXPORT_SYMBOL_GPL(vr41xx_get_vtclock_frequency);
+
+unsigned long vr41xx_get_tclock_frequency(void)
+{
+ return vr41xx_tclock;
+}
+
+EXPORT_SYMBOL_GPL(vr41xx_get_tclock_frequency);
+
+static inline uint16_t read_clkspeed(void)
+{
+ switch (current_cpu_type()) {
+ case CPU_VR4111:
+ case CPU_VR4121: return readw(CLKSPEEDREG_TYPE1);
+ case CPU_VR4122:
+ case CPU_VR4131:
+ case CPU_VR4133: return readw(CLKSPEEDREG_TYPE2);
+ default:
+ printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n");
+ break;
+ }
+
+ return 0;
+}
+
+static inline unsigned long calculate_pclock(uint16_t clkspeed)
+{
+ unsigned long pclock = 0;
+
+ switch (current_cpu_type()) {
+ case CPU_VR4111:
+ case CPU_VR4121:
+ pclock = 18432000 * 64;
+ pclock /= CLKSP(clkspeed);
+ break;
+ case CPU_VR4122:
+ pclock = 18432000 * 98;
+ pclock /= CLKSP(clkspeed);
+ break;
+ case CPU_VR4131:
+ pclock = 18432000 * 108;
+ pclock /= CLKSP(clkspeed);
+ break;
+ case CPU_VR4133:
+ switch (CLKSP_VR4133(clkspeed)) {
+ case 0:
+ pclock = 133000000;
+ break;
+ case 1:
+ pclock = 149000000;
+ break;
+ case 2:
+ pclock = 165900000;
+ break;
+ case 3:
+ pclock = 199100000;
+ break;
+ case 4:
+ pclock = 265900000;
+ break;
+ default:
+ printk(KERN_INFO "Unknown PClock speed for NEC VR4133\n");
+ break;
+ }
+ break;
+ default:
+ printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n");
+ break;
+ }
+
+ printk(KERN_INFO "PClock: %ldHz\n", pclock);
+
+ return pclock;
+}
+
+static inline unsigned long calculate_vtclock(uint16_t clkspeed, unsigned long pclock)
+{
+ unsigned long vtclock = 0;
+
+ switch (current_cpu_type()) {
+ case CPU_VR4111:
+ /* The NEC VR4111 doesn't have the VTClock. */
+ break;
+ case CPU_VR4121:
+ vtclock = pclock;
+ /* DIVVT == 9 Divide by 1.5 . VTClock = (PClock * 6) / 9 */
+ if (DIVVT(clkspeed) == 9)
+ vtclock = pclock * 6;
+ /* DIVVT == 10 Divide by 2.5 . VTClock = (PClock * 4) / 10 */
+ else if (DIVVT(clkspeed) == 10)
+ vtclock = pclock * 4;
+ vtclock /= DIVVT(clkspeed);
+ printk(KERN_INFO "VTClock: %ldHz\n", vtclock);
+ break;
+ case CPU_VR4122:
+ if(VTDIVMODE(clkspeed) == 7)
+ vtclock = pclock / 1;
+ else if(VTDIVMODE(clkspeed) == 1)
+ vtclock = pclock / 2;
+ else
+ vtclock = pclock / VTDIVMODE(clkspeed);
+ printk(KERN_INFO "VTClock: %ldHz\n", vtclock);
+ break;
+ case CPU_VR4131:
+ case CPU_VR4133:
+ vtclock = pclock / VTDIVMODE(clkspeed);
+ printk(KERN_INFO "VTClock: %ldHz\n", vtclock);
+ break;
+ default:
+ printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n");
+ break;
+ }
+
+ return vtclock;
+}
+
+static inline unsigned long calculate_tclock(uint16_t clkspeed, unsigned long pclock,
+ unsigned long vtclock)
+{
+ unsigned long tclock = 0;
+
+ switch (current_cpu_type()) {
+ case CPU_VR4111:
+ if (!(clkspeed & DIV2B))
+ tclock = pclock / 2;
+ else if (!(clkspeed & DIV3B))
+ tclock = pclock / 3;
+ else if (!(clkspeed & DIV4B))
+ tclock = pclock / 4;
+ break;
+ case CPU_VR4121:
+ tclock = pclock / DIVT(clkspeed);
+ break;
+ case CPU_VR4122:
+ case CPU_VR4131:
+ case CPU_VR4133:
+ tclock = vtclock / TDIVMODE(clkspeed);
+ break;
+ default:
+ printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n");
+ break;
+ }
+
+ printk(KERN_INFO "TClock: %ldHz\n", tclock);
+
+ return tclock;
+}
+
+void vr41xx_calculate_clock_frequency(void)
+{
+ unsigned long pclock;
+ uint16_t clkspeed;
+
+ clkspeed = read_clkspeed();
+
+ pclock = calculate_pclock(clkspeed);
+ vr41xx_vtclock = calculate_vtclock(clkspeed, pclock);
+ vr41xx_tclock = calculate_tclock(clkspeed, pclock, vr41xx_vtclock);
+}
+
+EXPORT_SYMBOL_GPL(vr41xx_calculate_clock_frequency);
diff --git a/arch/mips/vr41xx/common/cmu.c b/arch/mips/vr41xx/common/cmu.c
new file mode 100644
index 000000000..b59ee5479
--- /dev/null
+++ b/arch/mips/vr41xx/common/cmu.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * cmu.c, Clock Mask Unit routines for the NEC VR4100 series.
+ *
+ * Copyright (C) 2001-2002 MontaVista Software Inc.
+ * Author: Yoichi Yuasa <source@mvista.com>
+ * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+/*
+ * Changes:
+ * MontaVista Software Inc. <source@mvista.com>
+ * - New creation, NEC VR4122 and VR4131 are supported.
+ * - Added support for NEC VR4111 and VR4121.
+ *
+ * Yoichi Yuasa <yuasa@linux-mips.org>
+ * - Added support for NEC VR4133.
+ */
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/cpu.h>
+#include <asm/io.h>
+#include <asm/vr41xx/vr41xx.h>
+
+#define CMU_TYPE1_BASE 0x0b000060UL
+#define CMU_TYPE1_SIZE 0x4
+
+#define CMU_TYPE2_BASE 0x0f000060UL
+#define CMU_TYPE2_SIZE 0x4
+
+#define CMU_TYPE3_BASE 0x0f000060UL
+#define CMU_TYPE3_SIZE 0x8
+
+#define CMUCLKMSK 0x0
+ #define MSKPIU 0x0001
+ #define MSKSIU 0x0002
+ #define MSKAIU 0x0004
+ #define MSKKIU 0x0008
+ #define MSKFIR 0x0010
+ #define MSKDSIU 0x0820
+ #define MSKCSI 0x0040
+ #define MSKPCIU 0x0080
+ #define MSKSSIU 0x0100
+ #define MSKSHSP 0x0200
+ #define MSKFFIR 0x0400
+ #define MSKSCSI 0x1000
+ #define MSKPPCIU 0x2000
+#define CMUCLKMSK2 0x4
+ #define MSKCEU 0x0001
+ #define MSKMAC0 0x0002
+ #define MSKMAC1 0x0004
+
+static void __iomem *cmu_base;
+static uint16_t cmuclkmsk, cmuclkmsk2;
+static DEFINE_SPINLOCK(cmu_lock);
+
+#define cmu_read(offset) readw(cmu_base + (offset))
+#define cmu_write(offset, value) writew((value), cmu_base + (offset))
+
+void vr41xx_supply_clock(vr41xx_clock_t clock)
+{
+ spin_lock_irq(&cmu_lock);
+
+ switch (clock) {
+ case PIU_CLOCK:
+ cmuclkmsk |= MSKPIU;
+ break;
+ case SIU_CLOCK:
+ cmuclkmsk |= MSKSIU | MSKSSIU;
+ break;
+ case AIU_CLOCK:
+ cmuclkmsk |= MSKAIU;
+ break;
+ case KIU_CLOCK:
+ cmuclkmsk |= MSKKIU;
+ break;
+ case FIR_CLOCK:
+ cmuclkmsk |= MSKFIR | MSKFFIR;
+ break;
+ case DSIU_CLOCK:
+ if (current_cpu_type() == CPU_VR4111 ||
+ current_cpu_type() == CPU_VR4121)
+ cmuclkmsk |= MSKDSIU;
+ else
+ cmuclkmsk |= MSKSIU | MSKDSIU;
+ break;
+ case CSI_CLOCK:
+ cmuclkmsk |= MSKCSI | MSKSCSI;
+ break;
+ case PCIU_CLOCK:
+ cmuclkmsk |= MSKPCIU;
+ break;
+ case HSP_CLOCK:
+ cmuclkmsk |= MSKSHSP;
+ break;
+ case PCI_CLOCK:
+ cmuclkmsk |= MSKPPCIU;
+ break;
+ case CEU_CLOCK:
+ cmuclkmsk2 |= MSKCEU;
+ break;
+ case ETHER0_CLOCK:
+ cmuclkmsk2 |= MSKMAC0;
+ break;
+ case ETHER1_CLOCK:
+ cmuclkmsk2 |= MSKMAC1;
+ break;
+ default:
+ break;
+ }
+
+ if (clock == CEU_CLOCK || clock == ETHER0_CLOCK ||
+ clock == ETHER1_CLOCK)
+ cmu_write(CMUCLKMSK2, cmuclkmsk2);
+ else
+ cmu_write(CMUCLKMSK, cmuclkmsk);
+
+ spin_unlock_irq(&cmu_lock);
+}
+
+EXPORT_SYMBOL_GPL(vr41xx_supply_clock);
+
+void vr41xx_mask_clock(vr41xx_clock_t clock)
+{
+ spin_lock_irq(&cmu_lock);
+
+ switch (clock) {
+ case PIU_CLOCK:
+ cmuclkmsk &= ~MSKPIU;
+ break;
+ case SIU_CLOCK:
+ if (current_cpu_type() == CPU_VR4111 ||
+ current_cpu_type() == CPU_VR4121) {
+ cmuclkmsk &= ~(MSKSIU | MSKSSIU);
+ } else {
+ if (cmuclkmsk & MSKDSIU)
+ cmuclkmsk &= ~MSKSSIU;
+ else
+ cmuclkmsk &= ~(MSKSIU | MSKSSIU);
+ }
+ break;
+ case AIU_CLOCK:
+ cmuclkmsk &= ~MSKAIU;
+ break;
+ case KIU_CLOCK:
+ cmuclkmsk &= ~MSKKIU;
+ break;
+ case FIR_CLOCK:
+ cmuclkmsk &= ~(MSKFIR | MSKFFIR);
+ break;
+ case DSIU_CLOCK:
+ if (current_cpu_type() == CPU_VR4111 ||
+ current_cpu_type() == CPU_VR4121) {
+ cmuclkmsk &= ~MSKDSIU;
+ } else {
+ if (cmuclkmsk & MSKSSIU)
+ cmuclkmsk &= ~MSKDSIU;
+ else
+ cmuclkmsk &= ~(MSKSIU | MSKDSIU);
+ }
+ break;
+ case CSI_CLOCK:
+ cmuclkmsk &= ~(MSKCSI | MSKSCSI);
+ break;
+ case PCIU_CLOCK:
+ cmuclkmsk &= ~MSKPCIU;
+ break;
+ case HSP_CLOCK:
+ cmuclkmsk &= ~MSKSHSP;
+ break;
+ case PCI_CLOCK:
+ cmuclkmsk &= ~MSKPPCIU;
+ break;
+ case CEU_CLOCK:
+ cmuclkmsk2 &= ~MSKCEU;
+ break;
+ case ETHER0_CLOCK:
+ cmuclkmsk2 &= ~MSKMAC0;
+ break;
+ case ETHER1_CLOCK:
+ cmuclkmsk2 &= ~MSKMAC1;
+ break;
+ default:
+ break;
+ }
+
+ if (clock == CEU_CLOCK || clock == ETHER0_CLOCK ||
+ clock == ETHER1_CLOCK)
+ cmu_write(CMUCLKMSK2, cmuclkmsk2);
+ else
+ cmu_write(CMUCLKMSK, cmuclkmsk);
+
+ spin_unlock_irq(&cmu_lock);
+}
+
+EXPORT_SYMBOL_GPL(vr41xx_mask_clock);
+
+static int __init vr41xx_cmu_init(void)
+{
+ unsigned long start, size;
+
+ switch (current_cpu_type()) {
+ case CPU_VR4111:
+ case CPU_VR4121:
+ start = CMU_TYPE1_BASE;
+ size = CMU_TYPE1_SIZE;
+ break;
+ case CPU_VR4122:
+ case CPU_VR4131:
+ start = CMU_TYPE2_BASE;
+ size = CMU_TYPE2_SIZE;
+ break;
+ case CPU_VR4133:
+ start = CMU_TYPE3_BASE;
+ size = CMU_TYPE3_SIZE;
+ break;
+ default:
+ panic("Unexpected CPU of NEC VR4100 series");
+ break;
+ }
+
+ if (request_mem_region(start, size, "CMU") == NULL)
+ return -EBUSY;
+
+ cmu_base = ioremap(start, size);
+ if (cmu_base == NULL) {
+ release_mem_region(start, size);
+ return -EBUSY;
+ }
+
+ cmuclkmsk = cmu_read(CMUCLKMSK);
+ if (current_cpu_type() == CPU_VR4133)
+ cmuclkmsk2 = cmu_read(CMUCLKMSK2);
+
+ spin_lock_init(&cmu_lock);
+
+ return 0;
+}
+
+core_initcall(vr41xx_cmu_init);
diff --git a/arch/mips/vr41xx/common/giu.c b/arch/mips/vr41xx/common/giu.c
new file mode 100644
index 000000000..398c62641
--- /dev/null
+++ b/arch/mips/vr41xx/common/giu.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NEC VR4100 series GIU platform device.
+ *
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+
+#include <asm/cpu.h>
+#include <asm/vr41xx/giu.h>
+#include <asm/vr41xx/irq.h>
+
+static struct resource giu_50pins_pullupdown_resource[] __initdata = {
+ {
+ .start = 0x0b000100,
+ .end = 0x0b00011f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0x0b0002e0,
+ .end = 0x0b0002e3,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = GIUINT_IRQ,
+ .end = GIUINT_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource giu_36pins_resource[] __initdata = {
+ {
+ .start = 0x0f000140,
+ .end = 0x0f00015f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = GIUINT_IRQ,
+ .end = GIUINT_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource giu_48pins_resource[] __initdata = {
+ {
+ .start = 0x0f000140,
+ .end = 0x0f000167,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = GIUINT_IRQ,
+ .end = GIUINT_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static int __init vr41xx_giu_add(void)
+{
+ struct platform_device *pdev;
+ struct resource *res;
+ unsigned int num;
+ int retval;
+
+ pdev = platform_device_alloc("GIU", -1);
+ if (!pdev)
+ return -ENOMEM;
+
+ switch (current_cpu_type()) {
+ case CPU_VR4111:
+ case CPU_VR4121:
+ pdev->id = GPIO_50PINS_PULLUPDOWN;
+ res = giu_50pins_pullupdown_resource;
+ num = ARRAY_SIZE(giu_50pins_pullupdown_resource);
+ break;
+ case CPU_VR4122:
+ case CPU_VR4131:
+ pdev->id = GPIO_36PINS;
+ res = giu_36pins_resource;
+ num = ARRAY_SIZE(giu_36pins_resource);
+ break;
+ case CPU_VR4133:
+ pdev->id = GPIO_48PINS_EDGE_SELECT;
+ res = giu_48pins_resource;
+ num = ARRAY_SIZE(giu_48pins_resource);
+ break;
+ default:
+ retval = -ENODEV;
+ goto err_free_device;
+ }
+
+ retval = platform_device_add_resources(pdev, res, num);
+ if (retval)
+ goto err_free_device;
+
+ retval = platform_device_add(pdev);
+ if (retval)
+ goto err_free_device;
+
+ return 0;
+
+err_free_device:
+ platform_device_put(pdev);
+
+ return retval;
+}
+device_initcall(vr41xx_giu_add);
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
new file mode 100644
index 000000000..9240bcdbe
--- /dev/null
+++ b/arch/mips/vr41xx/common/icu.c
@@ -0,0 +1,714 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * icu.c, Interrupt Control Unit routines for the NEC VR4100 series.
+ *
+ * Copyright (C) 2001-2002 MontaVista Software Inc.
+ * Author: Yoichi Yuasa <source@mvista.com>
+ * Copyright (C) 2003-2006 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+/*
+ * Changes:
+ * MontaVista Software Inc. <source@mvista.com>
+ * - New creation, NEC VR4122 and VR4131 are supported.
+ * - Added support for NEC VR4111 and VR4121.
+ *
+ * Yoichi Yuasa <yuasa@linux-mips.org>
+ * - Coped with INTASSIGN of NEC VR4133.
+ */
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+
+#include <asm/cpu.h>
+#include <asm/io.h>
+#include <asm/vr41xx/irq.h>
+#include <asm/vr41xx/vr41xx.h>
+
+static void __iomem *icu1_base;
+static void __iomem *icu2_base;
+
+static unsigned char sysint1_assign[16] = {
+ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+static unsigned char sysint2_assign[16] = {
+ 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+#define ICU1_TYPE1_BASE 0x0b000080UL
+#define ICU2_TYPE1_BASE 0x0b000200UL
+
+#define ICU1_TYPE2_BASE 0x0f000080UL
+#define ICU2_TYPE2_BASE 0x0f0000a0UL
+
+#define ICU1_SIZE 0x20
+#define ICU2_SIZE 0x1c
+
+#define SYSINT1REG 0x00
+#define PIUINTREG 0x02
+#define INTASSIGN0 0x04
+#define INTASSIGN1 0x06
+#define GIUINTLREG 0x08
+#define DSIUINTREG 0x0a
+#define MSYSINT1REG 0x0c
+#define MPIUINTREG 0x0e
+#define MAIUINTREG 0x10
+#define MKIUINTREG 0x12
+#define MMACINTREG 0x12
+#define MGIUINTLREG 0x14
+#define MDSIUINTREG 0x16
+#define NMIREG 0x18
+#define SOFTREG 0x1a
+#define INTASSIGN2 0x1c
+#define INTASSIGN3 0x1e
+
+#define SYSINT2REG 0x00
+#define GIUINTHREG 0x02
+#define FIRINTREG 0x04
+#define MSYSINT2REG 0x06
+#define MGIUINTHREG 0x08
+#define MFIRINTREG 0x0a
+#define PCIINTREG 0x0c
+ #define PCIINT0 0x0001
+#define SCUINTREG 0x0e
+ #define SCUINT0 0x0001
+#define CSIINTREG 0x10
+#define MPCIINTREG 0x12
+#define MSCUINTREG 0x14
+#define MCSIINTREG 0x16
+#define BCUINTREG 0x18
+ #define BCUINTR 0x0001
+#define MBCUINTREG 0x1a
+
+#define SYSINT1_IRQ_TO_PIN(x) ((x) - SYSINT1_IRQ_BASE) /* Pin 0-15 */
+#define SYSINT2_IRQ_TO_PIN(x) ((x) - SYSINT2_IRQ_BASE) /* Pin 0-15 */
+
+#define INT_TO_IRQ(x) ((x) + 2) /* Int0-4 -> IRQ2-6 */
+
+#define icu1_read(offset) readw(icu1_base + (offset))
+#define icu1_write(offset, value) writew((value), icu1_base + (offset))
+
+#define icu2_read(offset) readw(icu2_base + (offset))
+#define icu2_write(offset, value) writew((value), icu2_base + (offset))
+
+#define INTASSIGN_MAX 4
+#define INTASSIGN_MASK 0x0007
+
+static inline uint16_t icu1_set(uint8_t offset, uint16_t set)
+{
+ uint16_t data;
+
+ data = icu1_read(offset);
+ data |= set;
+ icu1_write(offset, data);
+
+ return data;
+}
+
+static inline uint16_t icu1_clear(uint8_t offset, uint16_t clear)
+{
+ uint16_t data;
+
+ data = icu1_read(offset);
+ data &= ~clear;
+ icu1_write(offset, data);
+
+ return data;
+}
+
+static inline uint16_t icu2_set(uint8_t offset, uint16_t set)
+{
+ uint16_t data;
+
+ data = icu2_read(offset);
+ data |= set;
+ icu2_write(offset, data);
+
+ return data;
+}
+
+static inline uint16_t icu2_clear(uint8_t offset, uint16_t clear)
+{
+ uint16_t data;
+
+ data = icu2_read(offset);
+ data &= ~clear;
+ icu2_write(offset, data);
+
+ return data;
+}
+
+void vr41xx_enable_piuint(uint16_t mask)
+{
+ struct irq_desc *desc = irq_to_desc(PIU_IRQ);
+ unsigned long flags;
+
+ if (current_cpu_type() == CPU_VR4111 ||
+ current_cpu_type() == CPU_VR4121) {
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu1_set(MPIUINTREG, mask);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+
+EXPORT_SYMBOL(vr41xx_enable_piuint);
+
+void vr41xx_disable_piuint(uint16_t mask)
+{
+ struct irq_desc *desc = irq_to_desc(PIU_IRQ);
+ unsigned long flags;
+
+ if (current_cpu_type() == CPU_VR4111 ||
+ current_cpu_type() == CPU_VR4121) {
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu1_clear(MPIUINTREG, mask);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+
+EXPORT_SYMBOL(vr41xx_disable_piuint);
+
+void vr41xx_enable_aiuint(uint16_t mask)
+{
+ struct irq_desc *desc = irq_to_desc(AIU_IRQ);
+ unsigned long flags;
+
+ if (current_cpu_type() == CPU_VR4111 ||
+ current_cpu_type() == CPU_VR4121) {
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu1_set(MAIUINTREG, mask);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+
+EXPORT_SYMBOL(vr41xx_enable_aiuint);
+
+void vr41xx_disable_aiuint(uint16_t mask)
+{
+ struct irq_desc *desc = irq_to_desc(AIU_IRQ);
+ unsigned long flags;
+
+ if (current_cpu_type() == CPU_VR4111 ||
+ current_cpu_type() == CPU_VR4121) {
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu1_clear(MAIUINTREG, mask);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+
+EXPORT_SYMBOL(vr41xx_disable_aiuint);
+
+void vr41xx_enable_kiuint(uint16_t mask)
+{
+ struct irq_desc *desc = irq_to_desc(KIU_IRQ);
+ unsigned long flags;
+
+ if (current_cpu_type() == CPU_VR4111 ||
+ current_cpu_type() == CPU_VR4121) {
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu1_set(MKIUINTREG, mask);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+
+EXPORT_SYMBOL(vr41xx_enable_kiuint);
+
+void vr41xx_disable_kiuint(uint16_t mask)
+{
+ struct irq_desc *desc = irq_to_desc(KIU_IRQ);
+ unsigned long flags;
+
+ if (current_cpu_type() == CPU_VR4111 ||
+ current_cpu_type() == CPU_VR4121) {
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu1_clear(MKIUINTREG, mask);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+
+EXPORT_SYMBOL(vr41xx_disable_kiuint);
+
+void vr41xx_enable_macint(uint16_t mask)
+{
+ struct irq_desc *desc = irq_to_desc(ETHERNET_IRQ);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu1_set(MMACINTREG, mask);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+EXPORT_SYMBOL(vr41xx_enable_macint);
+
+void vr41xx_disable_macint(uint16_t mask)
+{
+ struct irq_desc *desc = irq_to_desc(ETHERNET_IRQ);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu1_clear(MMACINTREG, mask);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+EXPORT_SYMBOL(vr41xx_disable_macint);
+
+void vr41xx_enable_dsiuint(uint16_t mask)
+{
+ struct irq_desc *desc = irq_to_desc(DSIU_IRQ);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu1_set(MDSIUINTREG, mask);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+EXPORT_SYMBOL(vr41xx_enable_dsiuint);
+
+void vr41xx_disable_dsiuint(uint16_t mask)
+{
+ struct irq_desc *desc = irq_to_desc(DSIU_IRQ);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu1_clear(MDSIUINTREG, mask);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+EXPORT_SYMBOL(vr41xx_disable_dsiuint);
+
+void vr41xx_enable_firint(uint16_t mask)
+{
+ struct irq_desc *desc = irq_to_desc(FIR_IRQ);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu2_set(MFIRINTREG, mask);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+EXPORT_SYMBOL(vr41xx_enable_firint);
+
+void vr41xx_disable_firint(uint16_t mask)
+{
+ struct irq_desc *desc = irq_to_desc(FIR_IRQ);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu2_clear(MFIRINTREG, mask);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+EXPORT_SYMBOL(vr41xx_disable_firint);
+
+void vr41xx_enable_pciint(void)
+{
+ struct irq_desc *desc = irq_to_desc(PCI_IRQ);
+ unsigned long flags;
+
+ if (current_cpu_type() == CPU_VR4122 ||
+ current_cpu_type() == CPU_VR4131 ||
+ current_cpu_type() == CPU_VR4133) {
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu2_write(MPCIINTREG, PCIINT0);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+
+EXPORT_SYMBOL(vr41xx_enable_pciint);
+
+void vr41xx_disable_pciint(void)
+{
+ struct irq_desc *desc = irq_to_desc(PCI_IRQ);
+ unsigned long flags;
+
+ if (current_cpu_type() == CPU_VR4122 ||
+ current_cpu_type() == CPU_VR4131 ||
+ current_cpu_type() == CPU_VR4133) {
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu2_write(MPCIINTREG, 0);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+
+EXPORT_SYMBOL(vr41xx_disable_pciint);
+
+void vr41xx_enable_scuint(void)
+{
+ struct irq_desc *desc = irq_to_desc(SCU_IRQ);
+ unsigned long flags;
+
+ if (current_cpu_type() == CPU_VR4122 ||
+ current_cpu_type() == CPU_VR4131 ||
+ current_cpu_type() == CPU_VR4133) {
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu2_write(MSCUINTREG, SCUINT0);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+
+EXPORT_SYMBOL(vr41xx_enable_scuint);
+
+void vr41xx_disable_scuint(void)
+{
+ struct irq_desc *desc = irq_to_desc(SCU_IRQ);
+ unsigned long flags;
+
+ if (current_cpu_type() == CPU_VR4122 ||
+ current_cpu_type() == CPU_VR4131 ||
+ current_cpu_type() == CPU_VR4133) {
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu2_write(MSCUINTREG, 0);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+
+EXPORT_SYMBOL(vr41xx_disable_scuint);
+
+void vr41xx_enable_csiint(uint16_t mask)
+{
+ struct irq_desc *desc = irq_to_desc(CSI_IRQ);
+ unsigned long flags;
+
+ if (current_cpu_type() == CPU_VR4122 ||
+ current_cpu_type() == CPU_VR4131 ||
+ current_cpu_type() == CPU_VR4133) {
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu2_set(MCSIINTREG, mask);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+
+EXPORT_SYMBOL(vr41xx_enable_csiint);
+
+void vr41xx_disable_csiint(uint16_t mask)
+{
+ struct irq_desc *desc = irq_to_desc(CSI_IRQ);
+ unsigned long flags;
+
+ if (current_cpu_type() == CPU_VR4122 ||
+ current_cpu_type() == CPU_VR4131 ||
+ current_cpu_type() == CPU_VR4133) {
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu2_clear(MCSIINTREG, mask);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+
+EXPORT_SYMBOL(vr41xx_disable_csiint);
+
+void vr41xx_enable_bcuint(void)
+{
+ struct irq_desc *desc = irq_to_desc(BCU_IRQ);
+ unsigned long flags;
+
+ if (current_cpu_type() == CPU_VR4122 ||
+ current_cpu_type() == CPU_VR4131 ||
+ current_cpu_type() == CPU_VR4133) {
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu2_write(MBCUINTREG, BCUINTR);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+
+EXPORT_SYMBOL(vr41xx_enable_bcuint);
+
+void vr41xx_disable_bcuint(void)
+{
+ struct irq_desc *desc = irq_to_desc(BCU_IRQ);
+ unsigned long flags;
+
+ if (current_cpu_type() == CPU_VR4122 ||
+ current_cpu_type() == CPU_VR4131 ||
+ current_cpu_type() == CPU_VR4133) {
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ icu2_write(MBCUINTREG, 0);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+
+EXPORT_SYMBOL(vr41xx_disable_bcuint);
+
+static void disable_sysint1_irq(struct irq_data *d)
+{
+ icu1_clear(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(d->irq));
+}
+
+static void enable_sysint1_irq(struct irq_data *d)
+{
+ icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(d->irq));
+}
+
+static struct irq_chip sysint1_irq_type = {
+ .name = "SYSINT1",
+ .irq_mask = disable_sysint1_irq,
+ .irq_unmask = enable_sysint1_irq,
+};
+
+static void disable_sysint2_irq(struct irq_data *d)
+{
+ icu2_clear(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(d->irq));
+}
+
+static void enable_sysint2_irq(struct irq_data *d)
+{
+ icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(d->irq));
+}
+
+static struct irq_chip sysint2_irq_type = {
+ .name = "SYSINT2",
+ .irq_mask = disable_sysint2_irq,
+ .irq_unmask = enable_sysint2_irq,
+};
+
+static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ uint16_t intassign0, intassign1;
+ unsigned int pin;
+
+ pin = SYSINT1_IRQ_TO_PIN(irq);
+
+ raw_spin_lock_irq(&desc->lock);
+
+ intassign0 = icu1_read(INTASSIGN0);
+ intassign1 = icu1_read(INTASSIGN1);
+
+ switch (pin) {
+ case 0:
+ intassign0 &= ~INTASSIGN_MASK;
+ intassign0 |= (uint16_t)assign;
+ break;
+ case 1:
+ intassign0 &= ~(INTASSIGN_MASK << 3);
+ intassign0 |= (uint16_t)assign << 3;
+ break;
+ case 2:
+ intassign0 &= ~(INTASSIGN_MASK << 6);
+ intassign0 |= (uint16_t)assign << 6;
+ break;
+ case 3:
+ intassign0 &= ~(INTASSIGN_MASK << 9);
+ intassign0 |= (uint16_t)assign << 9;
+ break;
+ case 8:
+ intassign0 &= ~(INTASSIGN_MASK << 12);
+ intassign0 |= (uint16_t)assign << 12;
+ break;
+ case 9:
+ intassign1 &= ~INTASSIGN_MASK;
+ intassign1 |= (uint16_t)assign;
+ break;
+ case 11:
+ intassign1 &= ~(INTASSIGN_MASK << 6);
+ intassign1 |= (uint16_t)assign << 6;
+ break;
+ case 12:
+ intassign1 &= ~(INTASSIGN_MASK << 9);
+ intassign1 |= (uint16_t)assign << 9;
+ break;
+ default:
+ raw_spin_unlock_irq(&desc->lock);
+ return -EINVAL;
+ }
+
+ sysint1_assign[pin] = assign;
+ icu1_write(INTASSIGN0, intassign0);
+ icu1_write(INTASSIGN1, intassign1);
+
+ raw_spin_unlock_irq(&desc->lock);
+
+ return 0;
+}
+
+static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ uint16_t intassign2, intassign3;
+ unsigned int pin;
+
+ pin = SYSINT2_IRQ_TO_PIN(irq);
+
+ raw_spin_lock_irq(&desc->lock);
+
+ intassign2 = icu1_read(INTASSIGN2);
+ intassign3 = icu1_read(INTASSIGN3);
+
+ switch (pin) {
+ case 0:
+ intassign2 &= ~INTASSIGN_MASK;
+ intassign2 |= (uint16_t)assign;
+ break;
+ case 1:
+ intassign2 &= ~(INTASSIGN_MASK << 3);
+ intassign2 |= (uint16_t)assign << 3;
+ break;
+ case 3:
+ intassign2 &= ~(INTASSIGN_MASK << 6);
+ intassign2 |= (uint16_t)assign << 6;
+ break;
+ case 4:
+ intassign2 &= ~(INTASSIGN_MASK << 9);
+ intassign2 |= (uint16_t)assign << 9;
+ break;
+ case 5:
+ intassign2 &= ~(INTASSIGN_MASK << 12);
+ intassign2 |= (uint16_t)assign << 12;
+ break;
+ case 6:
+ intassign3 &= ~INTASSIGN_MASK;
+ intassign3 |= (uint16_t)assign;
+ break;
+ case 7:
+ intassign3 &= ~(INTASSIGN_MASK << 3);
+ intassign3 |= (uint16_t)assign << 3;
+ break;
+ case 8:
+ intassign3 &= ~(INTASSIGN_MASK << 6);
+ intassign3 |= (uint16_t)assign << 6;
+ break;
+ case 9:
+ intassign3 &= ~(INTASSIGN_MASK << 9);
+ intassign3 |= (uint16_t)assign << 9;
+ break;
+ case 10:
+ intassign3 &= ~(INTASSIGN_MASK << 12);
+ intassign3 |= (uint16_t)assign << 12;
+ break;
+ default:
+ raw_spin_unlock_irq(&desc->lock);
+ return -EINVAL;
+ }
+
+ sysint2_assign[pin] = assign;
+ icu1_write(INTASSIGN2, intassign2);
+ icu1_write(INTASSIGN3, intassign3);
+
+ raw_spin_unlock_irq(&desc->lock);
+
+ return 0;
+}
+
+int vr41xx_set_intassign(unsigned int irq, unsigned char intassign)
+{
+ int retval = -EINVAL;
+
+ if (current_cpu_type() != CPU_VR4133)
+ return -EINVAL;
+
+ if (intassign > INTASSIGN_MAX)
+ return -EINVAL;
+
+ if (irq >= SYSINT1_IRQ_BASE && irq <= SYSINT1_IRQ_LAST)
+ retval = set_sysint1_assign(irq, intassign);
+ else if (irq >= SYSINT2_IRQ_BASE && irq <= SYSINT2_IRQ_LAST)
+ retval = set_sysint2_assign(irq, intassign);
+
+ return retval;
+}
+
+EXPORT_SYMBOL(vr41xx_set_intassign);
+
+static int icu_get_irq(unsigned int irq)
+{
+ uint16_t pend1, pend2;
+ uint16_t mask1, mask2;
+ int i;
+
+ pend1 = icu1_read(SYSINT1REG);
+ mask1 = icu1_read(MSYSINT1REG);
+
+ pend2 = icu2_read(SYSINT2REG);
+ mask2 = icu2_read(MSYSINT2REG);
+
+ mask1 &= pend1;
+ mask2 &= pend2;
+
+ if (mask1) {
+ for (i = 0; i < 16; i++) {
+ if (irq == INT_TO_IRQ(sysint1_assign[i]) && (mask1 & (1 << i)))
+ return SYSINT1_IRQ(i);
+ }
+ }
+
+ if (mask2) {
+ for (i = 0; i < 16; i++) {
+ if (irq == INT_TO_IRQ(sysint2_assign[i]) && (mask2 & (1 << i)))
+ return SYSINT2_IRQ(i);
+ }
+ }
+
+ printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
+
+ return -1;
+}
+
+static int __init vr41xx_icu_init(void)
+{
+ unsigned long icu1_start, icu2_start;
+ int i;
+
+ switch (current_cpu_type()) {
+ case CPU_VR4111:
+ case CPU_VR4121:
+ icu1_start = ICU1_TYPE1_BASE;
+ icu2_start = ICU2_TYPE1_BASE;
+ break;
+ case CPU_VR4122:
+ case CPU_VR4131:
+ case CPU_VR4133:
+ icu1_start = ICU1_TYPE2_BASE;
+ icu2_start = ICU2_TYPE2_BASE;
+ break;
+ default:
+ printk(KERN_ERR "ICU: Unexpected CPU of NEC VR4100 series\n");
+ return -ENODEV;
+ }
+
+ if (request_mem_region(icu1_start, ICU1_SIZE, "ICU") == NULL)
+ return -EBUSY;
+
+ if (request_mem_region(icu2_start, ICU2_SIZE, "ICU") == NULL) {
+ release_mem_region(icu1_start, ICU1_SIZE);
+ return -EBUSY;
+ }
+
+ icu1_base = ioremap(icu1_start, ICU1_SIZE);
+ if (icu1_base == NULL) {
+ release_mem_region(icu1_start, ICU1_SIZE);
+ release_mem_region(icu2_start, ICU2_SIZE);
+ return -ENOMEM;
+ }
+
+ icu2_base = ioremap(icu2_start, ICU2_SIZE);
+ if (icu2_base == NULL) {
+ iounmap(icu1_base);
+ release_mem_region(icu1_start, ICU1_SIZE);
+ release_mem_region(icu2_start, ICU2_SIZE);
+ return -ENOMEM;
+ }
+
+ icu1_write(MSYSINT1REG, 0);
+ icu1_write(MGIUINTLREG, 0xffff);
+
+ icu2_write(MSYSINT2REG, 0);
+ icu2_write(MGIUINTHREG, 0xffff);
+
+ for (i = SYSINT1_IRQ_BASE; i <= SYSINT1_IRQ_LAST; i++)
+ irq_set_chip_and_handler(i, &sysint1_irq_type,
+ handle_level_irq);
+
+ for (i = SYSINT2_IRQ_BASE; i <= SYSINT2_IRQ_LAST; i++)
+ irq_set_chip_and_handler(i, &sysint2_irq_type,
+ handle_level_irq);
+
+ cascade_irq(INT0_IRQ, icu_get_irq);
+ cascade_irq(INT1_IRQ, icu_get_irq);
+ cascade_irq(INT2_IRQ, icu_get_irq);
+ cascade_irq(INT3_IRQ, icu_get_irq);
+ cascade_irq(INT4_IRQ, icu_get_irq);
+
+ return 0;
+}
+
+core_initcall(vr41xx_icu_init);
diff --git a/arch/mips/vr41xx/common/init.c b/arch/mips/vr41xx/common/init.c
new file mode 100644
index 000000000..ca53ac306
--- /dev/null
+++ b/arch/mips/vr41xx/common/init.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * init.c, Common initialization routines for NEC VR4100 series.
+ *
+ * Copyright (C) 2003-2009 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/string.h>
+
+#include <asm/bootinfo.h>
+#include <asm/time.h>
+#include <asm/vr41xx/irq.h>
+#include <asm/vr41xx/vr41xx.h>
+
+#define IO_MEM_RESOURCE_START 0UL
+#define IO_MEM_RESOURCE_END 0x1fffffffUL
+
+static void __init iomem_resource_init(void)
+{
+ iomem_resource.start = IO_MEM_RESOURCE_START;
+ iomem_resource.end = IO_MEM_RESOURCE_END;
+}
+
+void __init plat_time_init(void)
+{
+ unsigned long tclock;
+
+ vr41xx_calculate_clock_frequency();
+
+ tclock = vr41xx_get_tclock_frequency();
+ if (current_cpu_data.processor_id == PRID_VR4131_REV2_0 ||
+ current_cpu_data.processor_id == PRID_VR4131_REV2_1)
+ mips_hpt_frequency = tclock / 2;
+ else
+ mips_hpt_frequency = tclock / 4;
+}
+
+void __init plat_mem_setup(void)
+{
+ iomem_resource_init();
+
+ vr41xx_siu_setup();
+}
+
+void __init prom_init(void)
+{
+ int argc, i;
+ char **argv;
+
+ argc = fw_arg0;
+ argv = (char **)fw_arg1;
+
+ for (i = 1; i < argc; i++) {
+ strlcat(arcs_cmdline, argv[i], COMMAND_LINE_SIZE);
+ if (i < (argc - 1))
+ strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
+ }
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
new file mode 100644
index 000000000..8f68446ff
--- /dev/null
+++ b/arch/mips/vr41xx/common/irq.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Interrupt handing routines for NEC VR4100 series.
+ *
+ * Copyright (C) 2005-2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/vr41xx/irq.h>
+
+typedef struct irq_cascade {
+ int (*get_irq)(unsigned int);
+} irq_cascade_t;
+
+static irq_cascade_t irq_cascade[NR_IRQS] __cacheline_aligned;
+
+int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int))
+{
+ int retval = 0;
+
+ if (irq >= NR_IRQS)
+ return -EINVAL;
+
+ if (irq_cascade[irq].get_irq != NULL)
+ free_irq(irq, NULL);
+
+ irq_cascade[irq].get_irq = get_irq;
+
+ if (get_irq != NULL) {
+ retval = request_irq(irq, no_action, IRQF_NO_THREAD,
+ "cascade", NULL);
+ if (retval < 0)
+ irq_cascade[irq].get_irq = NULL;
+ }
+
+ return retval;
+}
+
+EXPORT_SYMBOL_GPL(cascade_irq);
+
+static void irq_dispatch(unsigned int irq)
+{
+ irq_cascade_t *cascade;
+
+ if (irq >= NR_IRQS) {
+ atomic_inc(&irq_err_count);
+ return;
+ }
+
+ cascade = irq_cascade + irq;
+ if (cascade->get_irq != NULL) {
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_data *idata = irq_desc_get_irq_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int ret;
+
+ if (chip->irq_mask_ack)
+ chip->irq_mask_ack(idata);
+ else {
+ chip->irq_mask(idata);
+ chip->irq_ack(idata);
+ }
+ ret = cascade->get_irq(irq);
+ irq = ret;
+ if (ret < 0)
+ atomic_inc(&irq_err_count);
+ else
+ irq_dispatch(irq);
+ if (!irqd_irq_disabled(idata) && chip->irq_unmask)
+ chip->irq_unmask(idata);
+ } else
+ do_IRQ(irq);
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
+
+ if (pending & CAUSEF_IP7)
+ do_IRQ(TIMER_IRQ);
+ else if (pending & 0x7800) {
+ if (pending & CAUSEF_IP3)
+ irq_dispatch(INT1_IRQ);
+ else if (pending & CAUSEF_IP4)
+ irq_dispatch(INT2_IRQ);
+ else if (pending & CAUSEF_IP5)
+ irq_dispatch(INT3_IRQ);
+ else if (pending & CAUSEF_IP6)
+ irq_dispatch(INT4_IRQ);
+ } else if (pending & CAUSEF_IP2)
+ irq_dispatch(INT0_IRQ);
+ else if (pending & CAUSEF_IP0)
+ do_IRQ(MIPS_SOFTINT0_IRQ);
+ else if (pending & CAUSEF_IP1)
+ do_IRQ(MIPS_SOFTINT1_IRQ);
+ else
+ spurious_interrupt();
+}
+
+void __init arch_init_irq(void)
+{
+ mips_cpu_irq_init();
+}
diff --git a/arch/mips/vr41xx/common/pmu.c b/arch/mips/vr41xx/common/pmu.c
new file mode 100644
index 000000000..93cc7e0b3
--- /dev/null
+++ b/arch/mips/vr41xx/common/pmu.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * pmu.c, Power Management Unit routines for NEC VR4100 series.
+ *
+ * Copyright (C) 2003-2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/pm.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/idle.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/reboot.h>
+
+#define PMU_TYPE1_BASE 0x0b0000a0UL
+#define PMU_TYPE1_SIZE 0x0eUL
+
+#define PMU_TYPE2_BASE 0x0f0000c0UL
+#define PMU_TYPE2_SIZE 0x10UL
+
+#define PMUCNT2REG 0x06
+ #define SOFTRST 0x0010
+
+static void __iomem *pmu_base;
+
+#define pmu_read(offset) readw(pmu_base + (offset))
+#define pmu_write(offset, value) writew((value), pmu_base + (offset))
+
+static void __cpuidle vr41xx_cpu_wait(void)
+{
+ local_irq_disable();
+ if (!need_resched())
+ /*
+ * "standby" sets IE bit of the CP0_STATUS to 1.
+ */
+ __asm__("standby;\n");
+ else
+ local_irq_enable();
+}
+
+static inline void software_reset(void)
+{
+ uint16_t pmucnt2;
+
+ switch (current_cpu_type()) {
+ case CPU_VR4122:
+ case CPU_VR4131:
+ case CPU_VR4133:
+ pmucnt2 = pmu_read(PMUCNT2REG);
+ pmucnt2 |= SOFTRST;
+ pmu_write(PMUCNT2REG, pmucnt2);
+ break;
+ default:
+ set_c0_status(ST0_BEV | ST0_ERL);
+ change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
+ __flush_cache_all();
+ write_c0_wired(0);
+ __asm__("jr %0"::"r"(0xbfc00000));
+ break;
+ }
+}
+
+static void vr41xx_restart(char *command)
+{
+ local_irq_disable();
+ software_reset();
+ while (1) ;
+}
+
+static void vr41xx_halt(void)
+{
+ local_irq_disable();
+ printk(KERN_NOTICE "\nYou can turn off the power supply\n");
+ __asm__("hibernate;\n");
+}
+
+static int __init vr41xx_pmu_init(void)
+{
+ unsigned long start, size;
+
+ switch (current_cpu_type()) {
+ case CPU_VR4111:
+ case CPU_VR4121:
+ start = PMU_TYPE1_BASE;
+ size = PMU_TYPE1_SIZE;
+ break;
+ case CPU_VR4122:
+ case CPU_VR4131:
+ case CPU_VR4133:
+ start = PMU_TYPE2_BASE;
+ size = PMU_TYPE2_SIZE;
+ break;
+ default:
+ printk("Unexpected CPU of NEC VR4100 series\n");
+ return -ENODEV;
+ }
+
+ if (request_mem_region(start, size, "PMU") == NULL)
+ return -EBUSY;
+
+ pmu_base = ioremap(start, size);
+ if (pmu_base == NULL) {
+ release_mem_region(start, size);
+ return -EBUSY;
+ }
+
+ cpu_wait = vr41xx_cpu_wait;
+ _machine_restart = vr41xx_restart;
+ _machine_halt = vr41xx_halt;
+ pm_power_off = vr41xx_halt;
+
+ return 0;
+}
+
+core_initcall(vr41xx_pmu_init);
diff --git a/arch/mips/vr41xx/common/rtc.c b/arch/mips/vr41xx/common/rtc.c
new file mode 100644
index 000000000..5ce668317
--- /dev/null
+++ b/arch/mips/vr41xx/common/rtc.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NEC VR4100 series RTC platform device.
+ *
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+
+#include <asm/cpu.h>
+#include <asm/vr41xx/irq.h>
+
+static struct resource rtc_type1_resource[] __initdata = {
+ {
+ .start = 0x0b0000c0,
+ .end = 0x0b0000df,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0x0b0001c0,
+ .end = 0x0b0001df,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = ELAPSEDTIME_IRQ,
+ .end = ELAPSEDTIME_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = RTCLONG1_IRQ,
+ .end = RTCLONG1_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource rtc_type2_resource[] __initdata = {
+ {
+ .start = 0x0f000100,
+ .end = 0x0f00011f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0x0f000120,
+ .end = 0x0f00013f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = ELAPSEDTIME_IRQ,
+ .end = ELAPSEDTIME_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = RTCLONG1_IRQ,
+ .end = RTCLONG1_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static int __init vr41xx_rtc_add(void)
+{
+ struct platform_device *pdev;
+ struct resource *res;
+ unsigned int num;
+ int retval;
+
+ pdev = platform_device_alloc("RTC", -1);
+ if (!pdev)
+ return -ENOMEM;
+
+ switch (current_cpu_type()) {
+ case CPU_VR4111:
+ case CPU_VR4121:
+ res = rtc_type1_resource;
+ num = ARRAY_SIZE(rtc_type1_resource);
+ break;
+ case CPU_VR4122:
+ case CPU_VR4131:
+ case CPU_VR4133:
+ res = rtc_type2_resource;
+ num = ARRAY_SIZE(rtc_type2_resource);
+ break;
+ default:
+ retval = -ENODEV;
+ goto err_free_device;
+ }
+
+ retval = platform_device_add_resources(pdev, res, num);
+ if (retval)
+ goto err_free_device;
+
+ retval = platform_device_add(pdev);
+ if (retval)
+ goto err_free_device;
+
+ return 0;
+
+err_free_device:
+ platform_device_put(pdev);
+
+ return retval;
+}
+device_initcall(vr41xx_rtc_add);
diff --git a/arch/mips/vr41xx/common/siu.c b/arch/mips/vr41xx/common/siu.c
new file mode 100644
index 000000000..b37a79154
--- /dev/null
+++ b/arch/mips/vr41xx/common/siu.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NEC VR4100 series SIU platform device.
+ *
+ * Copyright (C) 2007-2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/irq.h>
+
+#include <asm/cpu.h>
+#include <asm/vr41xx/siu.h>
+
+static unsigned int siu_type1_ports[SIU_PORTS_MAX] __initdata = {
+ PORT_VR41XX_SIU,
+ PORT_UNKNOWN,
+};
+
+static struct resource siu_type1_resource[] __initdata = {
+ {
+ .start = 0x0c000000,
+ .end = 0x0c00000a,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = SIU_IRQ,
+ .end = SIU_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static unsigned int siu_type2_ports[SIU_PORTS_MAX] __initdata = {
+ PORT_VR41XX_SIU,
+ PORT_VR41XX_DSIU,
+};
+
+static struct resource siu_type2_resource[] __initdata = {
+ {
+ .start = 0x0f000800,
+ .end = 0x0f00080a,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0x0f000820,
+ .end = 0x0f000829,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = SIU_IRQ,
+ .end = SIU_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = DSIU_IRQ,
+ .end = DSIU_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static int __init vr41xx_siu_add(void)
+{
+ struct platform_device *pdev;
+ struct resource *res;
+ unsigned int num;
+ int retval;
+
+ pdev = platform_device_alloc("SIU", -1);
+ if (!pdev)
+ return -ENOMEM;
+
+ switch (current_cpu_type()) {
+ case CPU_VR4111:
+ case CPU_VR4121:
+ pdev->dev.platform_data = siu_type1_ports;
+ res = siu_type1_resource;
+ num = ARRAY_SIZE(siu_type1_resource);
+ break;
+ case CPU_VR4122:
+ case CPU_VR4131:
+ case CPU_VR4133:
+ pdev->dev.platform_data = siu_type2_ports;
+ res = siu_type2_resource;
+ num = ARRAY_SIZE(siu_type2_resource);
+ break;
+ default:
+ retval = -ENODEV;
+ goto err_free_device;
+ }
+
+ retval = platform_device_add_resources(pdev, res, num);
+ if (retval)
+ goto err_free_device;
+
+ retval = platform_device_add(pdev);
+ if (retval)
+ goto err_free_device;
+
+ return 0;
+
+err_free_device:
+ platform_device_put(pdev);
+
+ return retval;
+}
+device_initcall(vr41xx_siu_add);
+
+void __init vr41xx_siu_setup(void)
+{
+ struct uart_port port;
+ struct resource *res;
+ unsigned int *type;
+ int i;
+
+ switch (current_cpu_type()) {
+ case CPU_VR4111:
+ case CPU_VR4121:
+ type = siu_type1_ports;
+ res = siu_type1_resource;
+ break;
+ case CPU_VR4122:
+ case CPU_VR4131:
+ case CPU_VR4133:
+ type = siu_type2_ports;
+ res = siu_type2_resource;
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < SIU_PORTS_MAX; i++) {
+ port.line = i;
+ port.type = type[i];
+ if (port.type == PORT_UNKNOWN)
+ break;
+ port.mapbase = res[i].start;
+ port.membase = (unsigned char __iomem *)KSEG1ADDR(res[i].start);
+ vr41xx_siu_early_setup(&port);
+ }
+}
diff --git a/arch/mips/vr41xx/common/type.c b/arch/mips/vr41xx/common/type.c
new file mode 100644
index 000000000..dddcf1eaa
--- /dev/null
+++ b/arch/mips/vr41xx/common/type.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * type.c, System type for NEC VR4100 series.
+ *
+ * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+
+const char *get_system_type(void)
+{
+ return "NEC VR4100 series";
+}
diff --git a/arch/mips/vr41xx/ibm-workpad/Makefile b/arch/mips/vr41xx/ibm-workpad/Makefile
new file mode 100644
index 000000000..c7be704e7
--- /dev/null
+++ b/arch/mips/vr41xx/ibm-workpad/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the IBM WorkPad z50 specific parts of the kernel
+#
+
+obj-y += setup.o
diff --git a/arch/mips/vr41xx/ibm-workpad/setup.c b/arch/mips/vr41xx/ibm-workpad/setup.c
new file mode 100644
index 000000000..7e14d65c5
--- /dev/null
+++ b/arch/mips/vr41xx/ibm-workpad/setup.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * setup.c, Setup for the IBM WorkPad z50.
+ *
+ * Copyright (C) 2002-2006 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/init.h>
+#include <linux/ioport.h>
+
+#include <asm/io.h>
+
+#define WORKPAD_ISA_IO_BASE 0x15000000
+#define WORKPAD_ISA_IO_SIZE 0x03000000
+#define WORKPAD_ISA_IO_START 0
+#define WORKPAD_ISA_IO_END (WORKPAD_ISA_IO_SIZE - 1)
+#define WORKPAD_IO_PORT_BASE KSEG1ADDR(WORKPAD_ISA_IO_BASE)
+
+static int __init ibm_workpad_setup(void)
+{
+ set_io_port_base(WORKPAD_IO_PORT_BASE);
+ ioport_resource.start = WORKPAD_ISA_IO_START;
+ ioport_resource.end = WORKPAD_ISA_IO_END;
+
+ return 0;
+}
+
+arch_initcall(ibm_workpad_setup);